mirror of
				https://github.com/torvalds/linux.git
				synced 2025-11-04 10:40:15 +02:00 
			
		
		
		
	percpu-refcount: require percpu_ref to be exited explicitly
Currently, a percpu_ref undoes percpu_ref_init() automatically by freeing the allocated percpu area when the percpu_ref is killed. While seemingly convenient, this has the following niggles. * It's impossible to re-init a released reference counter without going through re-allocation. * In the similar vein, it's impossible to initialize a percpu_ref count with static percpu variables. * We need and have an explicit destructor anyway for failure paths - percpu_ref_cancel_init(). This patch removes the automatic percpu counter freeing in percpu_ref_kill_rcu() and repurposes percpu_ref_cancel_init() into a generic destructor now named percpu_ref_exit(). percpu_ref_destroy() is considered but it gets confusing with percpu_ref_kill() while "exit" clearly indicates that it's the counterpart of percpu_ref_init(). All percpu_ref_cancel_init() users are updated to invoke percpu_ref_exit() instead and explicit percpu_ref_exit() calls are added to the destruction path of all percpu_ref users. Signed-off-by: Tejun Heo <tj@kernel.org> Acked-by: Benjamin LaHaise <bcrl@kvack.org> Cc: Kent Overstreet <kmo@daterainc.com> Cc: Christoph Lameter <cl@linux-foundation.org> Cc: Benjamin LaHaise <bcrl@kvack.org> Cc: Nicholas A. Bellinger <nab@linux-iscsi.org> Cc: Li Zefan <lizefan@huawei.com>
This commit is contained in:
		
							parent
							
								
									7d74207512
								
							
						
					
					
						commit
						9a1049da9b
					
				
					 5 changed files with 24 additions and 33 deletions
				
			
		| 
						 | 
				
			
			@ -825,7 +825,7 @@ int core_tpg_add_lun(
 | 
			
		|||
 | 
			
		||||
	ret = core_dev_export(dev, tpg, lun);
 | 
			
		||||
	if (ret < 0) {
 | 
			
		||||
		percpu_ref_cancel_init(&lun->lun_ref);
 | 
			
		||||
		percpu_ref_exit(&lun->lun_ref);
 | 
			
		||||
		return ret;
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
| 
						 | 
				
			
			@ -880,5 +880,7 @@ int core_tpg_post_dellun(
 | 
			
		|||
	lun->lun_status = TRANSPORT_LUN_STATUS_FREE;
 | 
			
		||||
	spin_unlock(&tpg->tpg_lun_lock);
 | 
			
		||||
 | 
			
		||||
	percpu_ref_exit(&lun->lun_ref);
 | 
			
		||||
 | 
			
		||||
	return 0;
 | 
			
		||||
}
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
							
								
								
									
										6
									
								
								fs/aio.c
									
									
									
									
									
								
							
							
						
						
									
										6
									
								
								fs/aio.c
									
									
									
									
									
								
							| 
						 | 
				
			
			@ -506,6 +506,8 @@ static void free_ioctx(struct work_struct *work)
 | 
			
		|||
 | 
			
		||||
	aio_free_ring(ctx);
 | 
			
		||||
	free_percpu(ctx->cpu);
 | 
			
		||||
	percpu_ref_exit(&ctx->reqs);
 | 
			
		||||
	percpu_ref_exit(&ctx->users);
 | 
			
		||||
	kmem_cache_free(kioctx_cachep, ctx);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
| 
						 | 
				
			
			@ -715,8 +717,8 @@ static struct kioctx *ioctx_alloc(unsigned nr_events)
 | 
			
		|||
err:
 | 
			
		||||
	mutex_unlock(&ctx->ring_lock);
 | 
			
		||||
	free_percpu(ctx->cpu);
 | 
			
		||||
	percpu_ref_cancel_init(&ctx->reqs);
 | 
			
		||||
	percpu_ref_cancel_init(&ctx->users);
 | 
			
		||||
	percpu_ref_exit(&ctx->reqs);
 | 
			
		||||
	percpu_ref_exit(&ctx->users);
 | 
			
		||||
	kmem_cache_free(kioctx_cachep, ctx);
 | 
			
		||||
	pr_debug("error allocating ioctx %d\n", err);
 | 
			
		||||
	return ERR_PTR(err);
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -57,9 +57,7 @@ struct percpu_ref {
 | 
			
		|||
	atomic_t		count;
 | 
			
		||||
	/*
 | 
			
		||||
	 * The low bit of the pointer indicates whether the ref is in percpu
 | 
			
		||||
	 * mode; if set, then get/put will manipulate the atomic_t (this is a
 | 
			
		||||
	 * hack because we need to keep the pointer around for
 | 
			
		||||
	 * percpu_ref_kill_rcu())
 | 
			
		||||
	 * mode; if set, then get/put will manipulate the atomic_t.
 | 
			
		||||
	 */
 | 
			
		||||
	unsigned long		pcpu_count_ptr;
 | 
			
		||||
	percpu_ref_func_t	*release;
 | 
			
		||||
| 
						 | 
				
			
			@ -69,7 +67,7 @@ struct percpu_ref {
 | 
			
		|||
 | 
			
		||||
int __must_check percpu_ref_init(struct percpu_ref *ref,
 | 
			
		||||
				 percpu_ref_func_t *release);
 | 
			
		||||
void percpu_ref_cancel_init(struct percpu_ref *ref);
 | 
			
		||||
void percpu_ref_exit(struct percpu_ref *ref);
 | 
			
		||||
void percpu_ref_kill_and_confirm(struct percpu_ref *ref,
 | 
			
		||||
				 percpu_ref_func_t *confirm_kill);
 | 
			
		||||
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -1638,7 +1638,7 @@ static int cgroup_setup_root(struct cgroup_root *root, unsigned int ss_mask)
 | 
			
		|||
exit_root_id:
 | 
			
		||||
	cgroup_exit_root_id(root);
 | 
			
		||||
cancel_ref:
 | 
			
		||||
	percpu_ref_cancel_init(&root_cgrp->self.refcnt);
 | 
			
		||||
	percpu_ref_exit(&root_cgrp->self.refcnt);
 | 
			
		||||
out:
 | 
			
		||||
	free_cgrp_cset_links(&tmp_links);
 | 
			
		||||
	return ret;
 | 
			
		||||
| 
						 | 
				
			
			@ -4133,6 +4133,8 @@ static void css_free_work_fn(struct work_struct *work)
 | 
			
		|||
		container_of(work, struct cgroup_subsys_state, destroy_work);
 | 
			
		||||
	struct cgroup *cgrp = css->cgroup;
 | 
			
		||||
 | 
			
		||||
	percpu_ref_exit(&css->refcnt);
 | 
			
		||||
 | 
			
		||||
	if (css->ss) {
 | 
			
		||||
		/* css free path */
 | 
			
		||||
		if (css->parent)
 | 
			
		||||
| 
						 | 
				
			
			@ -4330,7 +4332,7 @@ static int create_css(struct cgroup *cgrp, struct cgroup_subsys *ss)
 | 
			
		|||
err_free_id:
 | 
			
		||||
	cgroup_idr_remove(&ss->css_idr, css->id);
 | 
			
		||||
err_free_percpu_ref:
 | 
			
		||||
	percpu_ref_cancel_init(&css->refcnt);
 | 
			
		||||
	percpu_ref_exit(&css->refcnt);
 | 
			
		||||
err_free_css:
 | 
			
		||||
	call_rcu(&css->rcu_head, css_free_rcu_fn);
 | 
			
		||||
	return err;
 | 
			
		||||
| 
						 | 
				
			
			@ -4441,7 +4443,7 @@ static int cgroup_mkdir(struct kernfs_node *parent_kn, const char *name,
 | 
			
		|||
out_free_id:
 | 
			
		||||
	cgroup_idr_remove(&root->cgroup_idr, cgrp->id);
 | 
			
		||||
out_cancel_ref:
 | 
			
		||||
	percpu_ref_cancel_init(&cgrp->self.refcnt);
 | 
			
		||||
	percpu_ref_exit(&cgrp->self.refcnt);
 | 
			
		||||
out_free_cgrp:
 | 
			
		||||
	kfree(cgrp);
 | 
			
		||||
out_unlock:
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -61,36 +61,25 @@ int percpu_ref_init(struct percpu_ref *ref, percpu_ref_func_t *release)
 | 
			
		|||
EXPORT_SYMBOL_GPL(percpu_ref_init);
 | 
			
		||||
 | 
			
		||||
/**
 | 
			
		||||
 * percpu_ref_cancel_init - cancel percpu_ref_init()
 | 
			
		||||
 * @ref: percpu_ref to cancel init for
 | 
			
		||||
 * percpu_ref_exit - undo percpu_ref_init()
 | 
			
		||||
 * @ref: percpu_ref to exit
 | 
			
		||||
 *
 | 
			
		||||
 * Once a percpu_ref is initialized, its destruction is initiated by
 | 
			
		||||
 * percpu_ref_kill() and completes asynchronously, which can be painful to
 | 
			
		||||
 * do when destroying a half-constructed object in init failure path.
 | 
			
		||||
 *
 | 
			
		||||
 * This function destroys @ref without invoking @ref->release and the
 | 
			
		||||
 * memory area containing it can be freed immediately on return.  To
 | 
			
		||||
 * prevent accidental misuse, it's required that @ref has finished
 | 
			
		||||
 * percpu_ref_init(), whether successful or not, but never used.
 | 
			
		||||
 *
 | 
			
		||||
 * The weird name and usage restriction are to prevent people from using
 | 
			
		||||
 * this function by mistake for normal shutdown instead of
 | 
			
		||||
 * percpu_ref_kill().
 | 
			
		||||
 * This function exits @ref.  The caller is responsible for ensuring that
 | 
			
		||||
 * @ref is no longer in active use.  The usual places to invoke this
 | 
			
		||||
 * function from are the @ref->release() callback or in init failure path
 | 
			
		||||
 * where percpu_ref_init() succeeded but other parts of the initialization
 | 
			
		||||
 * of the embedding object failed.
 | 
			
		||||
 */
 | 
			
		||||
void percpu_ref_cancel_init(struct percpu_ref *ref)
 | 
			
		||||
void percpu_ref_exit(struct percpu_ref *ref)
 | 
			
		||||
{
 | 
			
		||||
	unsigned __percpu *pcpu_count = pcpu_count_ptr(ref);
 | 
			
		||||
	int cpu;
 | 
			
		||||
 | 
			
		||||
	WARN_ON_ONCE(atomic_read(&ref->count) != 1 + PCPU_COUNT_BIAS);
 | 
			
		||||
 | 
			
		||||
	if (pcpu_count) {
 | 
			
		||||
		for_each_possible_cpu(cpu)
 | 
			
		||||
			WARN_ON_ONCE(*per_cpu_ptr(pcpu_count, cpu));
 | 
			
		||||
		free_percpu(pcpu_count);
 | 
			
		||||
		ref->pcpu_count_ptr = PCPU_REF_DEAD;
 | 
			
		||||
	}
 | 
			
		||||
}
 | 
			
		||||
EXPORT_SYMBOL_GPL(percpu_ref_cancel_init);
 | 
			
		||||
EXPORT_SYMBOL_GPL(percpu_ref_exit);
 | 
			
		||||
 | 
			
		||||
static void percpu_ref_kill_rcu(struct rcu_head *rcu)
 | 
			
		||||
{
 | 
			
		||||
| 
						 | 
				
			
			@ -102,8 +91,6 @@ static void percpu_ref_kill_rcu(struct rcu_head *rcu)
 | 
			
		|||
	for_each_possible_cpu(cpu)
 | 
			
		||||
		count += *per_cpu_ptr(pcpu_count, cpu);
 | 
			
		||||
 | 
			
		||||
	free_percpu(pcpu_count);
 | 
			
		||||
 | 
			
		||||
	pr_debug("global %i pcpu %i", atomic_read(&ref->count), (int) count);
 | 
			
		||||
 | 
			
		||||
	/*
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
		Loading…
	
		Reference in a new issue