forked from mirrors/linux
		
	perf/core: Simplify perf_pmu_register()
Using the previously introduced perf_pmu_free() and a new IDR helper, simplify the perf_pmu_register error paths. Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Signed-off-by: Ingo Molnar <mingo@kernel.org> Acked-by: Ravi Bangoria <ravi.bangoria@amd.com> Link: https://lore.kernel.org/r/20241104135518.198937277@infradead.org
This commit is contained in:
		
							parent
							
								
									8f4c4963d2
								
							
						
					
					
						commit
						6c8b0b835f
					
				
					 2 changed files with 46 additions and 42 deletions
				
			
		|  | @ -15,6 +15,7 @@ | |||
| #include <linux/radix-tree.h> | ||||
| #include <linux/gfp.h> | ||||
| #include <linux/percpu.h> | ||||
| #include <linux/cleanup.h> | ||||
| 
 | ||||
| struct idr { | ||||
| 	struct radix_tree_root	idr_rt; | ||||
|  | @ -124,6 +125,22 @@ void *idr_get_next_ul(struct idr *, unsigned long *nextid); | |||
| void *idr_replace(struct idr *, void *, unsigned long id); | ||||
| void idr_destroy(struct idr *); | ||||
| 
 | ||||
| struct __class_idr { | ||||
| 	struct idr *idr; | ||||
| 	int id; | ||||
| }; | ||||
| 
 | ||||
| #define idr_null ((struct __class_idr){ NULL, -1 }) | ||||
| #define take_idr_id(id) __get_and_null(id, idr_null) | ||||
| 
 | ||||
| DEFINE_CLASS(idr_alloc, struct __class_idr, | ||||
| 	     if (_T.id >= 0) idr_remove(_T.idr, _T.id), | ||||
| 	     ((struct __class_idr){ | ||||
| 	     	.idr = idr, | ||||
| 		.id = idr_alloc(idr, ptr, start, end, gfp), | ||||
| 	     }), | ||||
| 	     struct idr *idr, void *ptr, int start, int end, gfp_t gfp); | ||||
| 
 | ||||
| /**
 | ||||
|  * idr_init_base() - Initialise an IDR. | ||||
|  * @idr: IDR handle. | ||||
|  |  | |||
|  | @ -11914,52 +11914,49 @@ static void perf_pmu_free(struct pmu *pmu) | |||
| 	free_percpu(pmu->cpu_pmu_context); | ||||
| } | ||||
| 
 | ||||
| int perf_pmu_register(struct pmu *pmu, const char *name, int type) | ||||
| DEFINE_FREE(pmu_unregister, struct pmu *, if (_T) perf_pmu_free(_T)) | ||||
| 
 | ||||
| int perf_pmu_register(struct pmu *_pmu, const char *name, int type) | ||||
| { | ||||
| 	int cpu, ret, max = PERF_TYPE_MAX; | ||||
| 	int cpu, max = PERF_TYPE_MAX; | ||||
| 
 | ||||
| 	pmu->type = -1; | ||||
| 	struct pmu *pmu __free(pmu_unregister) = _pmu; | ||||
| 	guard(mutex)(&pmus_lock); | ||||
| 
 | ||||
| 	mutex_lock(&pmus_lock); | ||||
| 	ret = -ENOMEM; | ||||
| 	pmu->pmu_disable_count = alloc_percpu(int); | ||||
| 	if (!pmu->pmu_disable_count) | ||||
| 		goto unlock; | ||||
| 		return -ENOMEM; | ||||
| 
 | ||||
| 	if (WARN_ONCE(!name, "Can not register anonymous pmu.\n")) { | ||||
| 		ret = -EINVAL; | ||||
| 		goto free; | ||||
| 	} | ||||
| 	if (WARN_ONCE(!name, "Can not register anonymous pmu.\n")) | ||||
| 		return -EINVAL; | ||||
| 
 | ||||
| 	if (WARN_ONCE(pmu->scope >= PERF_PMU_MAX_SCOPE, "Can not register a pmu with an invalid scope.\n")) { | ||||
| 		ret = -EINVAL; | ||||
| 		goto free; | ||||
| 	} | ||||
| 	if (WARN_ONCE(pmu->scope >= PERF_PMU_MAX_SCOPE, | ||||
| 		      "Can not register a pmu with an invalid scope.\n")) | ||||
| 		return -EINVAL; | ||||
| 
 | ||||
| 	pmu->name = name; | ||||
| 
 | ||||
| 	if (type >= 0) | ||||
| 		max = type; | ||||
| 
 | ||||
| 	ret = idr_alloc(&pmu_idr, NULL, max, 0, GFP_KERNEL); | ||||
| 	if (ret < 0) | ||||
| 		goto free; | ||||
| 	CLASS(idr_alloc, pmu_type)(&pmu_idr, NULL, max, 0, GFP_KERNEL); | ||||
| 	if (pmu_type.id < 0) | ||||
| 		return pmu_type.id; | ||||
| 
 | ||||
| 	WARN_ON(type >= 0 && ret != type); | ||||
| 	WARN_ON(type >= 0 && pmu_type.id != type); | ||||
| 
 | ||||
| 	pmu->type = ret; | ||||
| 	pmu->type = pmu_type.id; | ||||
| 	atomic_set(&pmu->exclusive_cnt, 0); | ||||
| 
 | ||||
| 	if (pmu_bus_running && !pmu->dev) { | ||||
| 		ret = pmu_dev_alloc(pmu); | ||||
| 		int ret = pmu_dev_alloc(pmu); | ||||
| 		if (ret) | ||||
| 			goto free; | ||||
| 			return ret; | ||||
| 	} | ||||
| 
 | ||||
| 	ret = -ENOMEM; | ||||
| 	pmu->cpu_pmu_context = alloc_percpu(struct perf_cpu_pmu_context); | ||||
| 	if (!pmu->cpu_pmu_context) | ||||
| 		goto free; | ||||
| 		return -ENOMEM; | ||||
| 
 | ||||
| 	for_each_possible_cpu(cpu) { | ||||
| 		struct perf_cpu_pmu_context *cpc; | ||||
|  | @ -12000,32 +11997,22 @@ int perf_pmu_register(struct pmu *pmu, const char *name, int type) | |||
| 	/*
 | ||||
| 	 * Now that the PMU is complete, make it visible to perf_try_init_event(). | ||||
| 	 */ | ||||
| 	if (!idr_cmpxchg(&pmu_idr, pmu->type, NULL, pmu)) { | ||||
| 		ret = -EINVAL; | ||||
| 		goto free; | ||||
| 	} | ||||
| 	if (!idr_cmpxchg(&pmu_idr, pmu->type, NULL, pmu)) | ||||
| 		return -EINVAL; | ||||
| 	list_add_rcu(&pmu->entry, &pmus); | ||||
| 
 | ||||
| 	ret = 0; | ||||
| unlock: | ||||
| 	mutex_unlock(&pmus_lock); | ||||
| 
 | ||||
| 	return ret; | ||||
| 
 | ||||
| free: | ||||
| 	if (pmu->type >= 0) | ||||
| 		idr_remove(&pmu_idr, pmu->type); | ||||
| 	perf_pmu_free(pmu); | ||||
| 	goto unlock; | ||||
| 	take_idr_id(pmu_type); | ||||
| 	_pmu = no_free_ptr(pmu); // let it rip
 | ||||
| 	return 0; | ||||
| } | ||||
| EXPORT_SYMBOL_GPL(perf_pmu_register); | ||||
| 
 | ||||
| void perf_pmu_unregister(struct pmu *pmu) | ||||
| { | ||||
| 	mutex_lock(&pmus_lock); | ||||
| 	list_del_rcu(&pmu->entry); | ||||
| 	idr_remove(&pmu_idr, pmu->type); | ||||
| 	mutex_unlock(&pmus_lock); | ||||
| 	scoped_guard (mutex, &pmus_lock) { | ||||
| 		list_del_rcu(&pmu->entry); | ||||
| 		idr_remove(&pmu_idr, pmu->type); | ||||
| 	} | ||||
| 
 | ||||
| 	/*
 | ||||
| 	 * We dereference the pmu list under both SRCU and regular RCU, so | ||||
|  |  | |||
		Loading…
	
		Reference in a new issue
	
	 Peter Zijlstra
						Peter Zijlstra