mirror of
				https://github.com/torvalds/linux.git
				synced 2025-11-04 10:40:15 +02:00 
			
		
		
		
	libbpf: Further decouple feature checking logic from bpf_object
Add feat_supported() helper that accepts feature cache instead of bpf_object. This allows low-level code in bpf.c to not know or care about higher-level concept of bpf_object, yet it will be able to utilize custom feature checking in cases where BPF token might influence the outcome. Signed-off-by: Andrii Nakryiko <andrii@kernel.org> Signed-off-by: Alexei Starovoitov <ast@kernel.org> Acked-by: John Fastabend <john.fastabend@gmail.com> Link: https://lore.kernel.org/bpf/20240124022127.2379740-23-andrii@kernel.org
This commit is contained in:
		
							parent
							
								
									ea4d587354
								
							
						
					
					
						commit
						d6dd1d4936
					
				
					 3 changed files with 22 additions and 11 deletions
				
			
		| 
						 | 
				
			
			@ -146,7 +146,7 @@ int bump_rlimit_memlock(void)
 | 
			
		|||
	struct rlimit rlim;
 | 
			
		||||
 | 
			
		||||
	/* if kernel supports memcg-based accounting, skip bumping RLIMIT_MEMLOCK */
 | 
			
		||||
	if (memlock_bumped || kernel_supports(NULL, FEAT_MEMCG_ACCOUNT))
 | 
			
		||||
	if (memlock_bumped || feat_supported(NULL, FEAT_MEMCG_ACCOUNT))
 | 
			
		||||
		return 0;
 | 
			
		||||
 | 
			
		||||
	memlock_bumped = true;
 | 
			
		||||
| 
						 | 
				
			
			@ -181,7 +181,7 @@ int bpf_map_create(enum bpf_map_type map_type,
 | 
			
		|||
		return libbpf_err(-EINVAL);
 | 
			
		||||
 | 
			
		||||
	attr.map_type = map_type;
 | 
			
		||||
	if (map_name && kernel_supports(NULL, FEAT_PROG_NAME))
 | 
			
		||||
	if (map_name && feat_supported(NULL, FEAT_PROG_NAME))
 | 
			
		||||
		libbpf_strlcpy(attr.map_name, map_name, sizeof(attr.map_name));
 | 
			
		||||
	attr.key_size = key_size;
 | 
			
		||||
	attr.value_size = value_size;
 | 
			
		||||
| 
						 | 
				
			
			@ -266,7 +266,7 @@ int bpf_prog_load(enum bpf_prog_type prog_type,
 | 
			
		|||
	attr.kern_version = OPTS_GET(opts, kern_version, 0);
 | 
			
		||||
	attr.prog_token_fd = OPTS_GET(opts, token_fd, 0);
 | 
			
		||||
 | 
			
		||||
	if (prog_name && kernel_supports(NULL, FEAT_PROG_NAME))
 | 
			
		||||
	if (prog_name && feat_supported(NULL, FEAT_PROG_NAME))
 | 
			
		||||
		libbpf_strlcpy(attr.prog_name, prog_name, sizeof(attr.prog_name));
 | 
			
		||||
	attr.license = ptr_to_u64(license);
 | 
			
		||||
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -5033,17 +5033,14 @@ static struct kern_feature_desc {
 | 
			
		|||
	},
 | 
			
		||||
};
 | 
			
		||||
 | 
			
		||||
bool kernel_supports(const struct bpf_object *obj, enum kern_feature_id feat_id)
 | 
			
		||||
bool feat_supported(struct kern_feature_cache *cache, enum kern_feature_id feat_id)
 | 
			
		||||
{
 | 
			
		||||
	struct kern_feature_desc *feat = &feature_probes[feat_id];
 | 
			
		||||
	struct kern_feature_cache *cache = &feature_cache;
 | 
			
		||||
	int ret;
 | 
			
		||||
 | 
			
		||||
	if (obj && obj->gen_loader)
 | 
			
		||||
		/* To generate loader program assume the latest kernel
 | 
			
		||||
		 * to avoid doing extra prog_load, map_create syscalls.
 | 
			
		||||
		 */
 | 
			
		||||
		return true;
 | 
			
		||||
	/* assume global feature cache, unless custom one is provided */
 | 
			
		||||
	if (!cache)
 | 
			
		||||
		cache = &feature_cache;
 | 
			
		||||
 | 
			
		||||
	if (READ_ONCE(cache->res[feat_id]) == FEAT_UNKNOWN) {
 | 
			
		||||
		ret = feat->probe();
 | 
			
		||||
| 
						 | 
				
			
			@ -5060,6 +5057,17 @@ bool kernel_supports(const struct bpf_object *obj, enum kern_feature_id feat_id)
 | 
			
		|||
	return READ_ONCE(cache->res[feat_id]) == FEAT_SUPPORTED;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
bool kernel_supports(const struct bpf_object *obj, enum kern_feature_id feat_id)
 | 
			
		||||
{
 | 
			
		||||
	if (obj && obj->gen_loader)
 | 
			
		||||
		/* To generate loader program assume the latest kernel
 | 
			
		||||
		 * to avoid doing extra prog_load, map_create syscalls.
 | 
			
		||||
		 */
 | 
			
		||||
		return true;
 | 
			
		||||
 | 
			
		||||
	return feat_supported(NULL, feat_id);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
static bool map_is_reuse_compat(const struct bpf_map *map, int map_fd)
 | 
			
		||||
{
 | 
			
		||||
	struct bpf_map_info map_info;
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -361,8 +361,11 @@ enum kern_feature_id {
 | 
			
		|||
	__FEAT_CNT,
 | 
			
		||||
};
 | 
			
		||||
 | 
			
		||||
int probe_memcg_account(void);
 | 
			
		||||
struct kern_feature_cache;
 | 
			
		||||
bool feat_supported(struct kern_feature_cache *cache, enum kern_feature_id feat_id);
 | 
			
		||||
bool kernel_supports(const struct bpf_object *obj, enum kern_feature_id feat_id);
 | 
			
		||||
 | 
			
		||||
int probe_memcg_account(void);
 | 
			
		||||
int bump_rlimit_memlock(void);
 | 
			
		||||
 | 
			
		||||
int parse_cpu_mask_str(const char *s, bool **mask, int *mask_sz);
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
		Loading…
	
		Reference in a new issue