forked from mirrors/linux
		
	async: Split async_schedule_node_domain()
In preparation for subsequent changes, split async_schedule_node_domain() in two pieces so as to allow the bottom part of it to be called from a somewhat different code path. No functional impact. Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com> Reviewed-by: Stanislaw Gruszka <stanislaw.gruszka@linux.intel.com> Tested-by: Youngmin Nam <youngmin.nam@samsung.com> Reviewed-by: Ulf Hansson <ulf.hansson@linaro.org>
This commit is contained in:
		
							parent
							
								
									dadce3fbaf
								
							
						
					
					
						commit
						6aa09a5bcc
					
				
					 1 changed files with 34 additions and 22 deletions
				
			
		| 
						 | 
				
			
			@ -145,6 +145,39 @@ static void async_run_entry_fn(struct work_struct *work)
 | 
			
		|||
	wake_up(&async_done);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
static async_cookie_t __async_schedule_node_domain(async_func_t func,
 | 
			
		||||
						   void *data, int node,
 | 
			
		||||
						   struct async_domain *domain,
 | 
			
		||||
						   struct async_entry *entry)
 | 
			
		||||
{
 | 
			
		||||
	async_cookie_t newcookie;
 | 
			
		||||
	unsigned long flags;
 | 
			
		||||
 | 
			
		||||
	INIT_LIST_HEAD(&entry->domain_list);
 | 
			
		||||
	INIT_LIST_HEAD(&entry->global_list);
 | 
			
		||||
	INIT_WORK(&entry->work, async_run_entry_fn);
 | 
			
		||||
	entry->func = func;
 | 
			
		||||
	entry->data = data;
 | 
			
		||||
	entry->domain = domain;
 | 
			
		||||
 | 
			
		||||
	spin_lock_irqsave(&async_lock, flags);
 | 
			
		||||
 | 
			
		||||
	/* allocate cookie and queue */
 | 
			
		||||
	newcookie = entry->cookie = next_cookie++;
 | 
			
		||||
 | 
			
		||||
	list_add_tail(&entry->domain_list, &domain->pending);
 | 
			
		||||
	if (domain->registered)
 | 
			
		||||
		list_add_tail(&entry->global_list, &async_global_pending);
 | 
			
		||||
 | 
			
		||||
	atomic_inc(&entry_count);
 | 
			
		||||
	spin_unlock_irqrestore(&async_lock, flags);
 | 
			
		||||
 | 
			
		||||
	/* schedule for execution */
 | 
			
		||||
	queue_work_node(node, system_unbound_wq, &entry->work);
 | 
			
		||||
 | 
			
		||||
	return newcookie;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
/**
 | 
			
		||||
 * async_schedule_node_domain - NUMA specific version of async_schedule_domain
 | 
			
		||||
 * @func: function to execute asynchronously
 | 
			
		||||
| 
						 | 
				
			
			@ -186,29 +219,8 @@ async_cookie_t async_schedule_node_domain(async_func_t func, void *data,
 | 
			
		|||
		func(data, newcookie);
 | 
			
		||||
		return newcookie;
 | 
			
		||||
	}
 | 
			
		||||
	INIT_LIST_HEAD(&entry->domain_list);
 | 
			
		||||
	INIT_LIST_HEAD(&entry->global_list);
 | 
			
		||||
	INIT_WORK(&entry->work, async_run_entry_fn);
 | 
			
		||||
	entry->func = func;
 | 
			
		||||
	entry->data = data;
 | 
			
		||||
	entry->domain = domain;
 | 
			
		||||
 | 
			
		||||
	spin_lock_irqsave(&async_lock, flags);
 | 
			
		||||
 | 
			
		||||
	/* allocate cookie and queue */
 | 
			
		||||
	newcookie = entry->cookie = next_cookie++;
 | 
			
		||||
 | 
			
		||||
	list_add_tail(&entry->domain_list, &domain->pending);
 | 
			
		||||
	if (domain->registered)
 | 
			
		||||
		list_add_tail(&entry->global_list, &async_global_pending);
 | 
			
		||||
 | 
			
		||||
	atomic_inc(&entry_count);
 | 
			
		||||
	spin_unlock_irqrestore(&async_lock, flags);
 | 
			
		||||
 | 
			
		||||
	/* schedule for execution */
 | 
			
		||||
	queue_work_node(node, system_unbound_wq, &entry->work);
 | 
			
		||||
 | 
			
		||||
	return newcookie;
 | 
			
		||||
	return __async_schedule_node_domain(func, data, node, domain, entry);
 | 
			
		||||
}
 | 
			
		||||
EXPORT_SYMBOL_GPL(async_schedule_node_domain);
 | 
			
		||||
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
		Loading…
	
		Reference in a new issue