forked from mirrors/linux
		
	perf/core: Simplify perf_pmu_register()
Using the previously introduced perf_pmu_free() and a new IDR helper, simplify the perf_pmu_register error paths. Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Signed-off-by: Ingo Molnar <mingo@kernel.org> Acked-by: Ravi Bangoria <ravi.bangoria@amd.com> Link: https://lore.kernel.org/r/20241104135518.198937277@infradead.org
This commit is contained in:
		
							parent
							
								
									8f4c4963d2
								
							
						
					
					
						commit
						6c8b0b835f
					
				
					 2 changed files with 46 additions and 42 deletions
				
			
		|  | @ -15,6 +15,7 @@ | ||||||
| #include <linux/radix-tree.h> | #include <linux/radix-tree.h> | ||||||
| #include <linux/gfp.h> | #include <linux/gfp.h> | ||||||
| #include <linux/percpu.h> | #include <linux/percpu.h> | ||||||
|  | #include <linux/cleanup.h> | ||||||
| 
 | 
 | ||||||
| struct idr { | struct idr { | ||||||
| 	struct radix_tree_root	idr_rt; | 	struct radix_tree_root	idr_rt; | ||||||
|  | @ -124,6 +125,22 @@ void *idr_get_next_ul(struct idr *, unsigned long *nextid); | ||||||
| void *idr_replace(struct idr *, void *, unsigned long id); | void *idr_replace(struct idr *, void *, unsigned long id); | ||||||
| void idr_destroy(struct idr *); | void idr_destroy(struct idr *); | ||||||
| 
 | 
 | ||||||
|  | struct __class_idr { | ||||||
|  | 	struct idr *idr; | ||||||
|  | 	int id; | ||||||
|  | }; | ||||||
|  | 
 | ||||||
|  | #define idr_null ((struct __class_idr){ NULL, -1 }) | ||||||
|  | #define take_idr_id(id) __get_and_null(id, idr_null) | ||||||
|  | 
 | ||||||
|  | DEFINE_CLASS(idr_alloc, struct __class_idr, | ||||||
|  | 	     if (_T.id >= 0) idr_remove(_T.idr, _T.id), | ||||||
|  | 	     ((struct __class_idr){ | ||||||
|  | 	     	.idr = idr, | ||||||
|  | 		.id = idr_alloc(idr, ptr, start, end, gfp), | ||||||
|  | 	     }), | ||||||
|  | 	     struct idr *idr, void *ptr, int start, int end, gfp_t gfp); | ||||||
|  | 
 | ||||||
| /**
 | /**
 | ||||||
|  * idr_init_base() - Initialise an IDR. |  * idr_init_base() - Initialise an IDR. | ||||||
|  * @idr: IDR handle. |  * @idr: IDR handle. | ||||||
|  |  | ||||||
|  | @ -11914,52 +11914,49 @@ static void perf_pmu_free(struct pmu *pmu) | ||||||
| 	free_percpu(pmu->cpu_pmu_context); | 	free_percpu(pmu->cpu_pmu_context); | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| int perf_pmu_register(struct pmu *pmu, const char *name, int type) | DEFINE_FREE(pmu_unregister, struct pmu *, if (_T) perf_pmu_free(_T)) | ||||||
|  | 
 | ||||||
|  | int perf_pmu_register(struct pmu *_pmu, const char *name, int type) | ||||||
| { | { | ||||||
| 	int cpu, ret, max = PERF_TYPE_MAX; | 	int cpu, max = PERF_TYPE_MAX; | ||||||
| 
 | 
 | ||||||
| 	pmu->type = -1; | 	struct pmu *pmu __free(pmu_unregister) = _pmu; | ||||||
|  | 	guard(mutex)(&pmus_lock); | ||||||
| 
 | 
 | ||||||
| 	mutex_lock(&pmus_lock); |  | ||||||
| 	ret = -ENOMEM; |  | ||||||
| 	pmu->pmu_disable_count = alloc_percpu(int); | 	pmu->pmu_disable_count = alloc_percpu(int); | ||||||
| 	if (!pmu->pmu_disable_count) | 	if (!pmu->pmu_disable_count) | ||||||
| 		goto unlock; | 		return -ENOMEM; | ||||||
| 
 | 
 | ||||||
| 	if (WARN_ONCE(!name, "Can not register anonymous pmu.\n")) { | 	if (WARN_ONCE(!name, "Can not register anonymous pmu.\n")) | ||||||
| 		ret = -EINVAL; | 		return -EINVAL; | ||||||
| 		goto free; |  | ||||||
| 	} |  | ||||||
| 
 | 
 | ||||||
| 	if (WARN_ONCE(pmu->scope >= PERF_PMU_MAX_SCOPE, "Can not register a pmu with an invalid scope.\n")) { | 	if (WARN_ONCE(pmu->scope >= PERF_PMU_MAX_SCOPE, | ||||||
| 		ret = -EINVAL; | 		      "Can not register a pmu with an invalid scope.\n")) | ||||||
| 		goto free; | 		return -EINVAL; | ||||||
| 	} |  | ||||||
| 
 | 
 | ||||||
| 	pmu->name = name; | 	pmu->name = name; | ||||||
| 
 | 
 | ||||||
| 	if (type >= 0) | 	if (type >= 0) | ||||||
| 		max = type; | 		max = type; | ||||||
| 
 | 
 | ||||||
| 	ret = idr_alloc(&pmu_idr, NULL, max, 0, GFP_KERNEL); | 	CLASS(idr_alloc, pmu_type)(&pmu_idr, NULL, max, 0, GFP_KERNEL); | ||||||
| 	if (ret < 0) | 	if (pmu_type.id < 0) | ||||||
| 		goto free; | 		return pmu_type.id; | ||||||
| 
 | 
 | ||||||
| 	WARN_ON(type >= 0 && ret != type); | 	WARN_ON(type >= 0 && pmu_type.id != type); | ||||||
| 
 | 
 | ||||||
| 	pmu->type = ret; | 	pmu->type = pmu_type.id; | ||||||
| 	atomic_set(&pmu->exclusive_cnt, 0); | 	atomic_set(&pmu->exclusive_cnt, 0); | ||||||
| 
 | 
 | ||||||
| 	if (pmu_bus_running && !pmu->dev) { | 	if (pmu_bus_running && !pmu->dev) { | ||||||
| 		ret = pmu_dev_alloc(pmu); | 		int ret = pmu_dev_alloc(pmu); | ||||||
| 		if (ret) | 		if (ret) | ||||||
| 			goto free; | 			return ret; | ||||||
| 	} | 	} | ||||||
| 
 | 
 | ||||||
| 	ret = -ENOMEM; |  | ||||||
| 	pmu->cpu_pmu_context = alloc_percpu(struct perf_cpu_pmu_context); | 	pmu->cpu_pmu_context = alloc_percpu(struct perf_cpu_pmu_context); | ||||||
| 	if (!pmu->cpu_pmu_context) | 	if (!pmu->cpu_pmu_context) | ||||||
| 		goto free; | 		return -ENOMEM; | ||||||
| 
 | 
 | ||||||
| 	for_each_possible_cpu(cpu) { | 	for_each_possible_cpu(cpu) { | ||||||
| 		struct perf_cpu_pmu_context *cpc; | 		struct perf_cpu_pmu_context *cpc; | ||||||
|  | @ -12000,32 +11997,22 @@ int perf_pmu_register(struct pmu *pmu, const char *name, int type) | ||||||
| 	/*
 | 	/*
 | ||||||
| 	 * Now that the PMU is complete, make it visible to perf_try_init_event(). | 	 * Now that the PMU is complete, make it visible to perf_try_init_event(). | ||||||
| 	 */ | 	 */ | ||||||
| 	if (!idr_cmpxchg(&pmu_idr, pmu->type, NULL, pmu)) { | 	if (!idr_cmpxchg(&pmu_idr, pmu->type, NULL, pmu)) | ||||||
| 		ret = -EINVAL; | 		return -EINVAL; | ||||||
| 		goto free; |  | ||||||
| 	} |  | ||||||
| 	list_add_rcu(&pmu->entry, &pmus); | 	list_add_rcu(&pmu->entry, &pmus); | ||||||
| 
 | 
 | ||||||
| 	ret = 0; | 	take_idr_id(pmu_type); | ||||||
| unlock: | 	_pmu = no_free_ptr(pmu); // let it rip
 | ||||||
| 	mutex_unlock(&pmus_lock); | 	return 0; | ||||||
| 
 |  | ||||||
| 	return ret; |  | ||||||
| 
 |  | ||||||
| free: |  | ||||||
| 	if (pmu->type >= 0) |  | ||||||
| 		idr_remove(&pmu_idr, pmu->type); |  | ||||||
| 	perf_pmu_free(pmu); |  | ||||||
| 	goto unlock; |  | ||||||
| } | } | ||||||
| EXPORT_SYMBOL_GPL(perf_pmu_register); | EXPORT_SYMBOL_GPL(perf_pmu_register); | ||||||
| 
 | 
 | ||||||
| void perf_pmu_unregister(struct pmu *pmu) | void perf_pmu_unregister(struct pmu *pmu) | ||||||
| { | { | ||||||
| 	mutex_lock(&pmus_lock); | 	scoped_guard (mutex, &pmus_lock) { | ||||||
| 	list_del_rcu(&pmu->entry); | 		list_del_rcu(&pmu->entry); | ||||||
| 	idr_remove(&pmu_idr, pmu->type); | 		idr_remove(&pmu_idr, pmu->type); | ||||||
| 	mutex_unlock(&pmus_lock); | 	} | ||||||
| 
 | 
 | ||||||
| 	/*
 | 	/*
 | ||||||
| 	 * We dereference the pmu list under both SRCU and regular RCU, so | 	 * We dereference the pmu list under both SRCU and regular RCU, so | ||||||
|  |  | ||||||
		Loading…
	
		Reference in a new issue
	
	 Peter Zijlstra
						Peter Zijlstra