forked from mirrors/linux
		
	 c90aeb9482
			
		
	
	
		c90aeb9482
		
	
	
	
	
		
			
			Make the get_random_once() helper generic enough, so that functions in general would only be called once, where one user of this is then net_get_random_once(). The only implementation specific call is to get_random_bytes(), all the rest of this *_once() facility would be duplicated among different subsystems otherwise. The new DO_ONCE() helper will be used by prandom() later on, but might also be useful for other scenarios/subsystems as well where a one-time initialization in often-called, possibly fast path code could occur. Signed-off-by: Hannes Frederic Sowa <hannes@stressinduktion.org> Signed-off-by: Daniel Borkmann <daniel@iogearbox.net> Acked-by: Alexei Starovoitov <ast@kernel.org> Signed-off-by: David S. Miller <davem@davemloft.net>
		
			
				
	
	
		
			62 lines
		
	
	
	
		
			1.3 KiB
		
	
	
	
		
			C
		
	
	
	
	
	
			
		
		
	
	
			62 lines
		
	
	
	
		
			1.3 KiB
		
	
	
	
		
			C
		
	
	
	
	
	
| #include <linux/slab.h>
 | |
| #include <linux/spinlock.h>
 | |
| #include <linux/once.h>
 | |
| #include <linux/random.h>
 | |
| 
 | |
| struct once_work {
 | |
| 	struct work_struct work;
 | |
| 	struct static_key *key;
 | |
| };
 | |
| 
 | |
| static void once_deferred(struct work_struct *w)
 | |
| {
 | |
| 	struct once_work *work;
 | |
| 
 | |
| 	work = container_of(w, struct once_work, work);
 | |
| 	BUG_ON(!static_key_enabled(work->key));
 | |
| 	static_key_slow_dec(work->key);
 | |
| 	kfree(work);
 | |
| }
 | |
| 
 | |
| static void once_disable_jump(struct static_key *key)
 | |
| {
 | |
| 	struct once_work *w;
 | |
| 
 | |
| 	w = kmalloc(sizeof(*w), GFP_ATOMIC);
 | |
| 	if (!w)
 | |
| 		return;
 | |
| 
 | |
| 	INIT_WORK(&w->work, once_deferred);
 | |
| 	w->key = key;
 | |
| 	schedule_work(&w->work);
 | |
| }
 | |
| 
 | |
| static DEFINE_SPINLOCK(once_lock);
 | |
| 
 | |
| bool __do_once_start(bool *done, unsigned long *flags)
 | |
| 	__acquires(once_lock)
 | |
| {
 | |
| 	spin_lock_irqsave(&once_lock, *flags);
 | |
| 	if (*done) {
 | |
| 		spin_unlock_irqrestore(&once_lock, *flags);
 | |
| 		/* Keep sparse happy by restoring an even lock count on
 | |
| 		 * this lock. In case we return here, we don't call into
 | |
| 		 * __do_once_done but return early in the DO_ONCE() macro.
 | |
| 		 */
 | |
| 		__acquire(once_lock);
 | |
| 		return false;
 | |
| 	}
 | |
| 
 | |
| 	return true;
 | |
| }
 | |
| EXPORT_SYMBOL(__do_once_start);
 | |
| 
 | |
| void __do_once_done(bool *done, struct static_key *once_key,
 | |
| 		    unsigned long *flags)
 | |
| 	__releases(once_lock)
 | |
| {
 | |
| 	*done = true;
 | |
| 	spin_unlock_irqrestore(&once_lock, *flags);
 | |
| 	once_disable_jump(once_key);
 | |
| }
 | |
| EXPORT_SYMBOL(__do_once_done);
 |