forked from mirrors/linux
		
	Merge git://git.kernel.org/pub/scm/linux/kernel/git/bpf/bpf
Alexei Starovoitov says: ==================== pull-request: bpf 2020-03-12 The following pull-request contains BPF updates for your *net* tree. We've added 12 non-merge commits during the last 8 day(s) which contain a total of 12 files changed, 161 insertions(+), 15 deletions(-). The main changes are: 1) Andrii fixed two bugs in cgroup-bpf. 2) John fixed sockmap. 3) Luke fixed x32 jit. 4) Martin fixed two issues in struct_ops. 5) Yonghong fixed bpf_send_signal. 6) Yoshiki fixed BTF enum. ==================== Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
		
						commit
						242a6df688
					
				
					 12 changed files with 161 additions and 15 deletions
				
			
		
							
								
								
									
										1
									
								
								.mailmap
									
									
									
									
									
								
							
							
						
						
									
										1
									
								
								.mailmap
									
									
									
									
									
								
							| 
						 | 
				
			
			@ -225,6 +225,7 @@ Pratyush Anand <pratyush.anand@gmail.com> <pratyush.anand@st.com>
 | 
			
		|||
Praveen BP <praveenbp@ti.com>
 | 
			
		||||
Punit Agrawal <punitagrawal@gmail.com> <punit.agrawal@arm.com>
 | 
			
		||||
Qais Yousef <qsyousef@gmail.com> <qais.yousef@imgtec.com>
 | 
			
		||||
Quentin Monnet <quentin@isovalent.com> <quentin.monnet@netronome.com>
 | 
			
		||||
Quentin Perret <qperret@qperret.net> <quentin.perret@arm.com>
 | 
			
		||||
Rafael J. Wysocki <rjw@rjwysocki.net> <rjw@sisk.pl>
 | 
			
		||||
Rajesh Shah <rajesh.shah@intel.com>
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -2039,10 +2039,12 @@ static int do_jit(struct bpf_prog *bpf_prog, int *addrs, u8 *image,
 | 
			
		|||
			}
 | 
			
		||||
			/* and dreg_lo,sreg_lo */
 | 
			
		||||
			EMIT2(0x23, add_2reg(0xC0, sreg_lo, dreg_lo));
 | 
			
		||||
			if (is_jmp64) {
 | 
			
		||||
				/* and dreg_hi,sreg_hi */
 | 
			
		||||
				EMIT2(0x23, add_2reg(0xC0, sreg_hi, dreg_hi));
 | 
			
		||||
				/* or dreg_lo,dreg_hi */
 | 
			
		||||
				EMIT2(0x09, add_2reg(0xC0, dreg_lo, dreg_hi));
 | 
			
		||||
			}
 | 
			
		||||
			goto emit_cond_jmp;
 | 
			
		||||
		}
 | 
			
		||||
		case BPF_JMP | BPF_JSET | BPF_K:
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -482,13 +482,21 @@ static int bpf_struct_ops_map_delete_elem(struct bpf_map *map, void *key)
 | 
			
		|||
	prev_state = cmpxchg(&st_map->kvalue.state,
 | 
			
		||||
			     BPF_STRUCT_OPS_STATE_INUSE,
 | 
			
		||||
			     BPF_STRUCT_OPS_STATE_TOBEFREE);
 | 
			
		||||
	if (prev_state == BPF_STRUCT_OPS_STATE_INUSE) {
 | 
			
		||||
	switch (prev_state) {
 | 
			
		||||
	case BPF_STRUCT_OPS_STATE_INUSE:
 | 
			
		||||
		st_map->st_ops->unreg(&st_map->kvalue.data);
 | 
			
		||||
		if (refcount_dec_and_test(&st_map->kvalue.refcnt))
 | 
			
		||||
			bpf_map_put(map);
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
		return 0;
 | 
			
		||||
	case BPF_STRUCT_OPS_STATE_TOBEFREE:
 | 
			
		||||
		return -EINPROGRESS;
 | 
			
		||||
	case BPF_STRUCT_OPS_STATE_INIT:
 | 
			
		||||
		return -ENOENT;
 | 
			
		||||
	default:
 | 
			
		||||
		WARN_ON_ONCE(1);
 | 
			
		||||
		/* Should never happen.  Treat it as not found. */
 | 
			
		||||
		return -ENOENT;
 | 
			
		||||
	}
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
static void bpf_struct_ops_map_seq_show_elem(struct bpf_map *map, void *key,
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -2418,7 +2418,7 @@ static int btf_enum_check_member(struct btf_verifier_env *env,
 | 
			
		|||
 | 
			
		||||
	struct_size = struct_type->size;
 | 
			
		||||
	bytes_offset = BITS_ROUNDDOWN_BYTES(struct_bits_off);
 | 
			
		||||
	if (struct_size - bytes_offset < sizeof(int)) {
 | 
			
		||||
	if (struct_size - bytes_offset < member_type->size) {
 | 
			
		||||
		btf_verifier_log_member(env, struct_type, member,
 | 
			
		||||
					"Member exceeds struct_size");
 | 
			
		||||
		return -EINVAL;
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -227,6 +227,9 @@ int cgroup_bpf_inherit(struct cgroup *cgrp)
 | 
			
		|||
	for (i = 0; i < NR; i++)
 | 
			
		||||
		bpf_prog_array_free(arrays[i]);
 | 
			
		||||
 | 
			
		||||
	for (p = cgroup_parent(cgrp); p; p = cgroup_parent(p))
 | 
			
		||||
		cgroup_bpf_put(p);
 | 
			
		||||
 | 
			
		||||
	percpu_ref_exit(&cgrp->bpf.refcnt);
 | 
			
		||||
 | 
			
		||||
	return -ENOMEM;
 | 
			
		||||
| 
						 | 
				
			
			@ -302,8 +305,8 @@ int __cgroup_bpf_attach(struct cgroup *cgrp, struct bpf_prog *prog,
 | 
			
		|||
	u32 saved_flags = (flags & (BPF_F_ALLOW_OVERRIDE | BPF_F_ALLOW_MULTI));
 | 
			
		||||
	struct list_head *progs = &cgrp->bpf.progs[type];
 | 
			
		||||
	struct bpf_prog *old_prog = NULL;
 | 
			
		||||
	struct bpf_cgroup_storage *storage[MAX_BPF_CGROUP_STORAGE_TYPE],
 | 
			
		||||
		*old_storage[MAX_BPF_CGROUP_STORAGE_TYPE] = {NULL};
 | 
			
		||||
	struct bpf_cgroup_storage *storage[MAX_BPF_CGROUP_STORAGE_TYPE] = {};
 | 
			
		||||
	struct bpf_cgroup_storage *old_storage[MAX_BPF_CGROUP_STORAGE_TYPE] = {};
 | 
			
		||||
	struct bpf_prog_list *pl, *replace_pl = NULL;
 | 
			
		||||
	enum bpf_cgroup_storage_type stype;
 | 
			
		||||
	int err;
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -1510,6 +1510,11 @@ static int map_freeze(const union bpf_attr *attr)
 | 
			
		|||
	if (IS_ERR(map))
 | 
			
		||||
		return PTR_ERR(map);
 | 
			
		||||
 | 
			
		||||
	if (map->map_type == BPF_MAP_TYPE_STRUCT_OPS) {
 | 
			
		||||
		fdput(f);
 | 
			
		||||
		return -ENOTSUPP;
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	mutex_lock(&map->freeze_mutex);
 | 
			
		||||
 | 
			
		||||
	if (map->writecnt) {
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -732,7 +732,7 @@ static int bpf_send_signal_common(u32 sig, enum pid_type type)
 | 
			
		|||
	if (unlikely(!nmi_uaccess_okay()))
 | 
			
		||||
		return -EPERM;
 | 
			
		||||
 | 
			
		||||
	if (in_nmi()) {
 | 
			
		||||
	if (irqs_disabled()) {
 | 
			
		||||
		/* Do an early check on signal validity. Otherwise,
 | 
			
		||||
		 * the error is lost in deferred irq_work.
 | 
			
		||||
		 */
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -233,8 +233,11 @@ static void sock_map_free(struct bpf_map *map)
 | 
			
		|||
	struct bpf_stab *stab = container_of(map, struct bpf_stab, map);
 | 
			
		||||
	int i;
 | 
			
		||||
 | 
			
		||||
	/* After the sync no updates or deletes will be in-flight so it
 | 
			
		||||
	 * is safe to walk map and remove entries without risking a race
 | 
			
		||||
	 * in EEXIST update case.
 | 
			
		||||
	 */
 | 
			
		||||
	synchronize_rcu();
 | 
			
		||||
	raw_spin_lock_bh(&stab->lock);
 | 
			
		||||
	for (i = 0; i < stab->map.max_entries; i++) {
 | 
			
		||||
		struct sock **psk = &stab->sks[i];
 | 
			
		||||
		struct sock *sk;
 | 
			
		||||
| 
						 | 
				
			
			@ -248,7 +251,6 @@ static void sock_map_free(struct bpf_map *map)
 | 
			
		|||
			release_sock(sk);
 | 
			
		||||
		}
 | 
			
		||||
	}
 | 
			
		||||
	raw_spin_unlock_bh(&stab->lock);
 | 
			
		||||
 | 
			
		||||
	/* wait for psock readers accessing its map link */
 | 
			
		||||
	synchronize_rcu();
 | 
			
		||||
| 
						 | 
				
			
			@ -863,10 +865,13 @@ static void sock_hash_free(struct bpf_map *map)
 | 
			
		|||
	struct hlist_node *node;
 | 
			
		||||
	int i;
 | 
			
		||||
 | 
			
		||||
	/* After the sync no updates or deletes will be in-flight so it
 | 
			
		||||
	 * is safe to walk map and remove entries without risking a race
 | 
			
		||||
	 * in EEXIST update case.
 | 
			
		||||
	 */
 | 
			
		||||
	synchronize_rcu();
 | 
			
		||||
	for (i = 0; i < htab->buckets_num; i++) {
 | 
			
		||||
		bucket = sock_hash_select_bucket(htab, i);
 | 
			
		||||
		raw_spin_lock_bh(&bucket->lock);
 | 
			
		||||
		hlist_for_each_entry_safe(elem, node, &bucket->head, node) {
 | 
			
		||||
			hlist_del_rcu(&elem->node);
 | 
			
		||||
			lock_sock(elem->sk);
 | 
			
		||||
| 
						 | 
				
			
			@ -875,7 +880,6 @@ static void sock_hash_free(struct bpf_map *map)
 | 
			
		|||
			rcu_read_unlock();
 | 
			
		||||
			release_sock(elem->sk);
 | 
			
		||||
		}
 | 
			
		||||
		raw_spin_unlock_bh(&bucket->lock);
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	/* wait for psock readers accessing its map link */
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -0,0 +1,60 @@
 | 
			
		|||
// SPDX-License-Identifier: GPL-2.0
 | 
			
		||||
#include <test_progs.h>
 | 
			
		||||
#include <stdio.h>
 | 
			
		||||
#include <stdlib.h>
 | 
			
		||||
#include <sys/mman.h>
 | 
			
		||||
#include <pthread.h>
 | 
			
		||||
#include <sys/types.h>
 | 
			
		||||
#include <sys/stat.h>
 | 
			
		||||
#include <fcntl.h>
 | 
			
		||||
#include "test_send_signal_kern.skel.h"
 | 
			
		||||
 | 
			
		||||
static void sigusr1_handler(int signum)
 | 
			
		||||
{
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
#define THREAD_COUNT 100
 | 
			
		||||
 | 
			
		||||
static void *worker(void *p)
 | 
			
		||||
{
 | 
			
		||||
	int i;
 | 
			
		||||
 | 
			
		||||
	for ( i = 0; i < 1000; i++)
 | 
			
		||||
		usleep(1);
 | 
			
		||||
 | 
			
		||||
	return NULL;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
void test_send_signal_sched_switch(void)
 | 
			
		||||
{
 | 
			
		||||
	struct test_send_signal_kern *skel;
 | 
			
		||||
	pthread_t threads[THREAD_COUNT];
 | 
			
		||||
	u32 duration = 0;
 | 
			
		||||
	int i, err;
 | 
			
		||||
 | 
			
		||||
	signal(SIGUSR1, sigusr1_handler);
 | 
			
		||||
 | 
			
		||||
	skel = test_send_signal_kern__open_and_load();
 | 
			
		||||
	if (CHECK(!skel, "skel_open_and_load", "skeleton open_and_load failed\n"))
 | 
			
		||||
		return;
 | 
			
		||||
 | 
			
		||||
	skel->bss->pid = getpid();
 | 
			
		||||
	skel->bss->sig = SIGUSR1;
 | 
			
		||||
 | 
			
		||||
	err = test_send_signal_kern__attach(skel);
 | 
			
		||||
	if (CHECK(err, "skel_attach", "skeleton attach failed\n"))
 | 
			
		||||
		goto destroy_skel;
 | 
			
		||||
 | 
			
		||||
	for (i = 0; i < THREAD_COUNT; i++) {
 | 
			
		||||
		err = pthread_create(threads + i, NULL, worker, NULL);
 | 
			
		||||
		if (CHECK(err, "pthread_create", "Error creating thread, %s\n",
 | 
			
		||||
			  strerror(errno)))
 | 
			
		||||
			goto destroy_skel;
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	for (i = 0; i < THREAD_COUNT; i++)
 | 
			
		||||
		pthread_join(threads[i], NULL);
 | 
			
		||||
 | 
			
		||||
destroy_skel:
 | 
			
		||||
	test_send_signal_kern__destroy(skel);
 | 
			
		||||
}
 | 
			
		||||
| 
						 | 
				
			
			@ -31,6 +31,12 @@ int send_signal_tp(void *ctx)
 | 
			
		|||
	return bpf_send_signal_test(ctx);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
SEC("tracepoint/sched/sched_switch")
 | 
			
		||||
int send_signal_tp_sched(void *ctx)
 | 
			
		||||
{
 | 
			
		||||
	return bpf_send_signal_test(ctx);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
SEC("perf_event")
 | 
			
		||||
int send_signal_perf(void *ctx)
 | 
			
		||||
{
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -1062,6 +1062,48 @@ static struct btf_raw_test raw_tests[] = {
 | 
			
		|||
	.err_str = "Member exceeds struct_size",
 | 
			
		||||
},
 | 
			
		||||
 | 
			
		||||
/* Test member unexceeds the size of struct
 | 
			
		||||
 *
 | 
			
		||||
 * enum E {
 | 
			
		||||
 *     E0,
 | 
			
		||||
 *     E1,
 | 
			
		||||
 * };
 | 
			
		||||
 *
 | 
			
		||||
 * struct A {
 | 
			
		||||
 *     char m;
 | 
			
		||||
 *     enum E __attribute__((packed)) n;
 | 
			
		||||
 * };
 | 
			
		||||
 */
 | 
			
		||||
{
 | 
			
		||||
	.descr = "size check test #5",
 | 
			
		||||
	.raw_types = {
 | 
			
		||||
		/* int */			/* [1] */
 | 
			
		||||
		BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, sizeof(int)),
 | 
			
		||||
		/* char */			/* [2] */
 | 
			
		||||
		BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 8, 1),
 | 
			
		||||
		/* enum E { */			/* [3] */
 | 
			
		||||
		BTF_TYPE_ENC(NAME_TBD, BTF_INFO_ENC(BTF_KIND_ENUM, 0, 2), 1),
 | 
			
		||||
		BTF_ENUM_ENC(NAME_TBD, 0),
 | 
			
		||||
		BTF_ENUM_ENC(NAME_TBD, 1),
 | 
			
		||||
		/* } */
 | 
			
		||||
		/* struct A { */		/* [4] */
 | 
			
		||||
		BTF_TYPE_ENC(NAME_TBD, BTF_INFO_ENC(BTF_KIND_STRUCT, 0, 2), 2),
 | 
			
		||||
		BTF_MEMBER_ENC(NAME_TBD, 2, 0),	/* char m; */
 | 
			
		||||
		BTF_MEMBER_ENC(NAME_TBD, 3, 8),/* enum E __attribute__((packed)) n; */
 | 
			
		||||
		/* } */
 | 
			
		||||
		BTF_END_RAW,
 | 
			
		||||
	},
 | 
			
		||||
	.str_sec = "\0E\0E0\0E1\0A\0m\0n",
 | 
			
		||||
	.str_sec_size = sizeof("\0E\0E0\0E1\0A\0m\0n"),
 | 
			
		||||
	.map_type = BPF_MAP_TYPE_ARRAY,
 | 
			
		||||
	.map_name = "size_check5_map",
 | 
			
		||||
	.key_size = sizeof(int),
 | 
			
		||||
	.value_size = 2,
 | 
			
		||||
	.key_type_id = 1,
 | 
			
		||||
	.value_type_id = 4,
 | 
			
		||||
	.max_entries = 4,
 | 
			
		||||
},
 | 
			
		||||
 | 
			
		||||
/* typedef const void * const_void_ptr;
 | 
			
		||||
 * struct A {
 | 
			
		||||
 *	const_void_ptr m;
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -61,6 +61,21 @@
 | 
			
		|||
	},
 | 
			
		||||
	.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
 | 
			
		||||
},
 | 
			
		||||
{
 | 
			
		||||
	"jset32: ignores upper bits",
 | 
			
		||||
	.insns = {
 | 
			
		||||
	BPF_MOV64_IMM(BPF_REG_0, 0),
 | 
			
		||||
	BPF_LD_IMM64(BPF_REG_7, 0x8000000000000000),
 | 
			
		||||
	BPF_LD_IMM64(BPF_REG_8, 0x8000000000000000),
 | 
			
		||||
	BPF_JMP_REG(BPF_JSET, BPF_REG_7, BPF_REG_8, 1),
 | 
			
		||||
	BPF_EXIT_INSN(),
 | 
			
		||||
	BPF_JMP32_REG(BPF_JSET, BPF_REG_7, BPF_REG_8, 1),
 | 
			
		||||
	BPF_MOV64_IMM(BPF_REG_0, 2),
 | 
			
		||||
	BPF_EXIT_INSN(),
 | 
			
		||||
	},
 | 
			
		||||
	.result = ACCEPT,
 | 
			
		||||
	.retval = 2,
 | 
			
		||||
},
 | 
			
		||||
{
 | 
			
		||||
	"jset32: min/max deduction",
 | 
			
		||||
	.insns = {
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
		Loading…
	
		Reference in a new issue