mirror of
				https://github.com/torvalds/linux.git
				synced 2025-11-03 18:20:25 +02:00 
			
		
		
		
	bpf: Streamline allowed helpers between tracing and base sets
Many conditional checks in switch-case are redundant with bpf_base_func_proto and should be removed. Regarding the permission checks bpf_base_func_proto: The permission checks in bpf_prog_load (as outlined below) ensure that the trace has both CAP_BPF and CAP_PERFMON capabilities, thus enabling the use of corresponding prototypes in bpf_base_func_proto without adverse effects. bpf_prog_load ...... bpf_cap = bpf_token_capable(token, CAP_BPF); ...... if (type != BPF_PROG_TYPE_SOCKET_FILTER && type != BPF_PROG_TYPE_CGROUP_SKB && !bpf_cap) goto put_token; ...... if (is_perfmon_prog_type(type) && !bpf_token_capable(token, CAP_PERFMON)) goto put_token; ...... Signed-off-by: Feng Yang <yangfeng@kylinos.cn> Signed-off-by: Andrii Nakryiko <andrii@kernel.org> Acked-by: Song Liu <song@kernel.org> Link: https://lore.kernel.org/bpf/20250423073151.297103-1-yangfeng59949@163.com
This commit is contained in:
		
							parent
							
								
									53ebef53a6
								
							
						
					
					
						commit
						6aca583f90
					
				
					 1 changed files with 0 additions and 72 deletions
				
			
		| 
						 | 
				
			
			@ -1430,56 +1430,14 @@ bpf_tracing_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
 | 
			
		|||
	const struct bpf_func_proto *func_proto;
 | 
			
		||||
 | 
			
		||||
	switch (func_id) {
 | 
			
		||||
	case BPF_FUNC_map_lookup_elem:
 | 
			
		||||
		return &bpf_map_lookup_elem_proto;
 | 
			
		||||
	case BPF_FUNC_map_update_elem:
 | 
			
		||||
		return &bpf_map_update_elem_proto;
 | 
			
		||||
	case BPF_FUNC_map_delete_elem:
 | 
			
		||||
		return &bpf_map_delete_elem_proto;
 | 
			
		||||
	case BPF_FUNC_map_push_elem:
 | 
			
		||||
		return &bpf_map_push_elem_proto;
 | 
			
		||||
	case BPF_FUNC_map_pop_elem:
 | 
			
		||||
		return &bpf_map_pop_elem_proto;
 | 
			
		||||
	case BPF_FUNC_map_peek_elem:
 | 
			
		||||
		return &bpf_map_peek_elem_proto;
 | 
			
		||||
	case BPF_FUNC_map_lookup_percpu_elem:
 | 
			
		||||
		return &bpf_map_lookup_percpu_elem_proto;
 | 
			
		||||
	case BPF_FUNC_ktime_get_ns:
 | 
			
		||||
		return &bpf_ktime_get_ns_proto;
 | 
			
		||||
	case BPF_FUNC_ktime_get_boot_ns:
 | 
			
		||||
		return &bpf_ktime_get_boot_ns_proto;
 | 
			
		||||
	case BPF_FUNC_tail_call:
 | 
			
		||||
		return &bpf_tail_call_proto;
 | 
			
		||||
	case BPF_FUNC_get_current_task:
 | 
			
		||||
		return &bpf_get_current_task_proto;
 | 
			
		||||
	case BPF_FUNC_get_current_task_btf:
 | 
			
		||||
		return &bpf_get_current_task_btf_proto;
 | 
			
		||||
	case BPF_FUNC_task_pt_regs:
 | 
			
		||||
		return &bpf_task_pt_regs_proto;
 | 
			
		||||
	case BPF_FUNC_get_current_uid_gid:
 | 
			
		||||
		return &bpf_get_current_uid_gid_proto;
 | 
			
		||||
	case BPF_FUNC_get_current_comm:
 | 
			
		||||
		return &bpf_get_current_comm_proto;
 | 
			
		||||
	case BPF_FUNC_trace_printk:
 | 
			
		||||
		return bpf_get_trace_printk_proto();
 | 
			
		||||
	case BPF_FUNC_get_smp_processor_id:
 | 
			
		||||
		return &bpf_get_smp_processor_id_proto;
 | 
			
		||||
	case BPF_FUNC_get_numa_node_id:
 | 
			
		||||
		return &bpf_get_numa_node_id_proto;
 | 
			
		||||
	case BPF_FUNC_perf_event_read:
 | 
			
		||||
		return &bpf_perf_event_read_proto;
 | 
			
		||||
	case BPF_FUNC_get_prandom_u32:
 | 
			
		||||
		return &bpf_get_prandom_u32_proto;
 | 
			
		||||
	case BPF_FUNC_probe_read_user:
 | 
			
		||||
		return &bpf_probe_read_user_proto;
 | 
			
		||||
	case BPF_FUNC_probe_read_kernel:
 | 
			
		||||
		return security_locked_down(LOCKDOWN_BPF_READ_KERNEL) < 0 ?
 | 
			
		||||
		       NULL : &bpf_probe_read_kernel_proto;
 | 
			
		||||
	case BPF_FUNC_probe_read_user_str:
 | 
			
		||||
		return &bpf_probe_read_user_str_proto;
 | 
			
		||||
	case BPF_FUNC_probe_read_kernel_str:
 | 
			
		||||
		return security_locked_down(LOCKDOWN_BPF_READ_KERNEL) < 0 ?
 | 
			
		||||
		       NULL : &bpf_probe_read_kernel_str_proto;
 | 
			
		||||
#ifdef CONFIG_ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE
 | 
			
		||||
	case BPF_FUNC_probe_read:
 | 
			
		||||
		return security_locked_down(LOCKDOWN_BPF_READ_KERNEL) < 0 ?
 | 
			
		||||
| 
						 | 
				
			
			@ -1489,10 +1447,6 @@ bpf_tracing_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
 | 
			
		|||
		       NULL : &bpf_probe_read_compat_str_proto;
 | 
			
		||||
#endif
 | 
			
		||||
#ifdef CONFIG_CGROUPS
 | 
			
		||||
	case BPF_FUNC_cgrp_storage_get:
 | 
			
		||||
		return &bpf_cgrp_storage_get_proto;
 | 
			
		||||
	case BPF_FUNC_cgrp_storage_delete:
 | 
			
		||||
		return &bpf_cgrp_storage_delete_proto;
 | 
			
		||||
	case BPF_FUNC_current_task_under_cgroup:
 | 
			
		||||
		return &bpf_current_task_under_cgroup_proto;
 | 
			
		||||
#endif
 | 
			
		||||
| 
						 | 
				
			
			@ -1500,20 +1454,6 @@ bpf_tracing_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
 | 
			
		|||
		return &bpf_send_signal_proto;
 | 
			
		||||
	case BPF_FUNC_send_signal_thread:
 | 
			
		||||
		return &bpf_send_signal_thread_proto;
 | 
			
		||||
	case BPF_FUNC_perf_event_read_value:
 | 
			
		||||
		return &bpf_perf_event_read_value_proto;
 | 
			
		||||
	case BPF_FUNC_ringbuf_output:
 | 
			
		||||
		return &bpf_ringbuf_output_proto;
 | 
			
		||||
	case BPF_FUNC_ringbuf_reserve:
 | 
			
		||||
		return &bpf_ringbuf_reserve_proto;
 | 
			
		||||
	case BPF_FUNC_ringbuf_submit:
 | 
			
		||||
		return &bpf_ringbuf_submit_proto;
 | 
			
		||||
	case BPF_FUNC_ringbuf_discard:
 | 
			
		||||
		return &bpf_ringbuf_discard_proto;
 | 
			
		||||
	case BPF_FUNC_ringbuf_query:
 | 
			
		||||
		return &bpf_ringbuf_query_proto;
 | 
			
		||||
	case BPF_FUNC_jiffies64:
 | 
			
		||||
		return &bpf_jiffies64_proto;
 | 
			
		||||
	case BPF_FUNC_get_task_stack:
 | 
			
		||||
		return prog->sleepable ? &bpf_get_task_stack_sleepable_proto
 | 
			
		||||
				       : &bpf_get_task_stack_proto;
 | 
			
		||||
| 
						 | 
				
			
			@ -1521,12 +1461,6 @@ bpf_tracing_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
 | 
			
		|||
		return &bpf_copy_from_user_proto;
 | 
			
		||||
	case BPF_FUNC_copy_from_user_task:
 | 
			
		||||
		return &bpf_copy_from_user_task_proto;
 | 
			
		||||
	case BPF_FUNC_snprintf_btf:
 | 
			
		||||
		return &bpf_snprintf_btf_proto;
 | 
			
		||||
	case BPF_FUNC_per_cpu_ptr:
 | 
			
		||||
		return &bpf_per_cpu_ptr_proto;
 | 
			
		||||
	case BPF_FUNC_this_cpu_ptr:
 | 
			
		||||
		return &bpf_this_cpu_ptr_proto;
 | 
			
		||||
	case BPF_FUNC_task_storage_get:
 | 
			
		||||
		if (bpf_prog_check_recur(prog))
 | 
			
		||||
			return &bpf_task_storage_get_recur_proto;
 | 
			
		||||
| 
						 | 
				
			
			@ -1535,18 +1469,12 @@ bpf_tracing_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
 | 
			
		|||
		if (bpf_prog_check_recur(prog))
 | 
			
		||||
			return &bpf_task_storage_delete_recur_proto;
 | 
			
		||||
		return &bpf_task_storage_delete_proto;
 | 
			
		||||
	case BPF_FUNC_for_each_map_elem:
 | 
			
		||||
		return &bpf_for_each_map_elem_proto;
 | 
			
		||||
	case BPF_FUNC_snprintf:
 | 
			
		||||
		return &bpf_snprintf_proto;
 | 
			
		||||
	case BPF_FUNC_get_func_ip:
 | 
			
		||||
		return &bpf_get_func_ip_proto_tracing;
 | 
			
		||||
	case BPF_FUNC_get_branch_snapshot:
 | 
			
		||||
		return &bpf_get_branch_snapshot_proto;
 | 
			
		||||
	case BPF_FUNC_find_vma:
 | 
			
		||||
		return &bpf_find_vma_proto;
 | 
			
		||||
	case BPF_FUNC_trace_vprintk:
 | 
			
		||||
		return bpf_get_trace_vprintk_proto();
 | 
			
		||||
	default:
 | 
			
		||||
		break;
 | 
			
		||||
	}
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
		Loading…
	
		Reference in a new issue