mirror of
				https://github.com/torvalds/linux.git
				synced 2025-10-31 16:48:26 +02:00 
			
		
		
		
	bpf: Prepare bpf_prog_put() to be called from irq context.
Currently bpf_prog_put() is called from the task context only. With addition of bpf timers the timer related helpers will start calling bpf_prog_put() from irq-saved region and in rare cases might drop the refcnt to zero. To address this case, first, convert bpf_prog_free_id() to be irq-save (this is similar to bpf_map_free_id), and, second, defer non irq appropriate calls into work queue. For example: bpf_audit_prog() is calling kmalloc and wake_up_interruptible, bpf_prog_kallsyms_del_all()->bpf_ksym_del()->spin_unlock_bh(). They are not safe with irqs disabled. Signed-off-by: Alexei Starovoitov <ast@kernel.org> Signed-off-by: Daniel Borkmann <daniel@iogearbox.net> Acked-by: Martin KaFai Lau <kafai@fb.com> Acked-by: Andrii Nakryiko <andrii@kernel.org> Acked-by: Toke Høiland-Jørgensen <toke@redhat.com> Link: https://lore.kernel.org/bpf/20210715005417.78572-2-alexei.starovoitov@gmail.com
This commit is contained in:
		
							parent
							
								
									de587d564f
								
							
						
					
					
						commit
						d809e134be
					
				
					 1 changed files with 26 additions and 6 deletions
				
			
		|  | @ -1699,6 +1699,8 @@ static int bpf_prog_alloc_id(struct bpf_prog *prog) | |||
| 
 | ||||
| void bpf_prog_free_id(struct bpf_prog *prog, bool do_idr_lock) | ||||
| { | ||||
| 	unsigned long flags; | ||||
| 
 | ||||
| 	/* cBPF to eBPF migrations are currently not in the idr store.
 | ||||
| 	 * Offloaded programs are removed from the store when their device | ||||
| 	 * disappears - even if someone grabs an fd to them they are unusable, | ||||
|  | @ -1708,7 +1710,7 @@ void bpf_prog_free_id(struct bpf_prog *prog, bool do_idr_lock) | |||
| 		return; | ||||
| 
 | ||||
| 	if (do_idr_lock) | ||||
| 		spin_lock_bh(&prog_idr_lock); | ||||
| 		spin_lock_irqsave(&prog_idr_lock, flags); | ||||
| 	else | ||||
| 		__acquire(&prog_idr_lock); | ||||
| 
 | ||||
|  | @ -1716,7 +1718,7 @@ void bpf_prog_free_id(struct bpf_prog *prog, bool do_idr_lock) | |||
| 	prog->aux->id = 0; | ||||
| 
 | ||||
| 	if (do_idr_lock) | ||||
| 		spin_unlock_bh(&prog_idr_lock); | ||||
| 		spin_unlock_irqrestore(&prog_idr_lock, flags); | ||||
| 	else | ||||
| 		__release(&prog_idr_lock); | ||||
| } | ||||
|  | @ -1752,14 +1754,32 @@ static void __bpf_prog_put_noref(struct bpf_prog *prog, bool deferred) | |||
| 	} | ||||
| } | ||||
| 
 | ||||
| static void bpf_prog_put_deferred(struct work_struct *work) | ||||
| { | ||||
| 	struct bpf_prog_aux *aux; | ||||
| 	struct bpf_prog *prog; | ||||
| 
 | ||||
| 	aux = container_of(work, struct bpf_prog_aux, work); | ||||
| 	prog = aux->prog; | ||||
| 	perf_event_bpf_event(prog, PERF_BPF_EVENT_PROG_UNLOAD, 0); | ||||
| 	bpf_audit_prog(prog, BPF_AUDIT_UNLOAD); | ||||
| 	__bpf_prog_put_noref(prog, true); | ||||
| } | ||||
| 
 | ||||
| static void __bpf_prog_put(struct bpf_prog *prog, bool do_idr_lock) | ||||
| { | ||||
| 	if (atomic64_dec_and_test(&prog->aux->refcnt)) { | ||||
| 		perf_event_bpf_event(prog, PERF_BPF_EVENT_PROG_UNLOAD, 0); | ||||
| 		bpf_audit_prog(prog, BPF_AUDIT_UNLOAD); | ||||
| 	struct bpf_prog_aux *aux = prog->aux; | ||||
| 
 | ||||
| 	if (atomic64_dec_and_test(&aux->refcnt)) { | ||||
| 		/* bpf_prog_free_id() must be called first */ | ||||
| 		bpf_prog_free_id(prog, do_idr_lock); | ||||
| 		__bpf_prog_put_noref(prog, true); | ||||
| 
 | ||||
| 		if (in_irq() || irqs_disabled()) { | ||||
| 			INIT_WORK(&aux->work, bpf_prog_put_deferred); | ||||
| 			schedule_work(&aux->work); | ||||
| 		} else { | ||||
| 			bpf_prog_put_deferred(&aux->work); | ||||
| 		} | ||||
| 	} | ||||
| } | ||||
| 
 | ||||
|  |  | |||
		Loading…
	
		Reference in a new issue
	
	 Alexei Starovoitov
						Alexei Starovoitov