forked from mirrors/linux
		
	net_sched: fix an OOB access in cls_tcindex
As Eric noticed, tcindex_alloc_perfect_hash() uses cp->hash
to compute the size of memory allocation, but cp->hash is
set again after the allocation, this caused an out-of-bound
access.
So we have to move all cp->hash initialization and computation
before the memory allocation. Move cp->mask and cp->shift together
as cp->hash may need them for computation too.
Reported-and-tested-by: syzbot+35d4dea36c387813ed31@syzkaller.appspotmail.com
Fixes: 331b72922c ("net: sched: RCU cls_tcindex")
Cc: Eric Dumazet <eric.dumazet@gmail.com>
Cc: John Fastabend <john.fastabend@gmail.com>
Cc: Jamal Hadi Salim <jhs@mojatatu.com>
Cc: Jiri Pirko <jiri@resnulli.us>
Cc: Jakub Kicinski <kuba@kernel.org>
Signed-off-by: Cong Wang <xiyou.wangcong@gmail.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
			
			
This commit is contained in:
		
							parent
							
								
									83b4304530
								
							
						
					
					
						commit
						599be01ee5
					
				
					 1 changed files with 20 additions and 20 deletions
				
			
		| 
						 | 
				
			
			@ -333,12 +333,31 @@ tcindex_set_parms(struct net *net, struct tcf_proto *tp, unsigned long base,
 | 
			
		|||
	cp->fall_through = p->fall_through;
 | 
			
		||||
	cp->tp = tp;
 | 
			
		||||
 | 
			
		||||
	if (tb[TCA_TCINDEX_HASH])
 | 
			
		||||
		cp->hash = nla_get_u32(tb[TCA_TCINDEX_HASH]);
 | 
			
		||||
 | 
			
		||||
	if (tb[TCA_TCINDEX_MASK])
 | 
			
		||||
		cp->mask = nla_get_u16(tb[TCA_TCINDEX_MASK]);
 | 
			
		||||
 | 
			
		||||
	if (tb[TCA_TCINDEX_SHIFT])
 | 
			
		||||
		cp->shift = nla_get_u32(tb[TCA_TCINDEX_SHIFT]);
 | 
			
		||||
 | 
			
		||||
	if (!cp->hash) {
 | 
			
		||||
		/* Hash not specified, use perfect hash if the upper limit
 | 
			
		||||
		 * of the hashing index is below the threshold.
 | 
			
		||||
		 */
 | 
			
		||||
		if ((cp->mask >> cp->shift) < PERFECT_HASH_THRESHOLD)
 | 
			
		||||
			cp->hash = (cp->mask >> cp->shift) + 1;
 | 
			
		||||
		else
 | 
			
		||||
			cp->hash = DEFAULT_HASH_SIZE;
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	if (p->perfect) {
 | 
			
		||||
		int i;
 | 
			
		||||
 | 
			
		||||
		if (tcindex_alloc_perfect_hash(net, cp) < 0)
 | 
			
		||||
			goto errout;
 | 
			
		||||
		for (i = 0; i < cp->hash; i++)
 | 
			
		||||
		for (i = 0; i < min(cp->hash, p->hash); i++)
 | 
			
		||||
			cp->perfect[i].res = p->perfect[i].res;
 | 
			
		||||
		balloc = 1;
 | 
			
		||||
	}
 | 
			
		||||
| 
						 | 
				
			
			@ -350,15 +369,6 @@ tcindex_set_parms(struct net *net, struct tcf_proto *tp, unsigned long base,
 | 
			
		|||
	if (old_r)
 | 
			
		||||
		cr = r->res;
 | 
			
		||||
 | 
			
		||||
	if (tb[TCA_TCINDEX_HASH])
 | 
			
		||||
		cp->hash = nla_get_u32(tb[TCA_TCINDEX_HASH]);
 | 
			
		||||
 | 
			
		||||
	if (tb[TCA_TCINDEX_MASK])
 | 
			
		||||
		cp->mask = nla_get_u16(tb[TCA_TCINDEX_MASK]);
 | 
			
		||||
 | 
			
		||||
	if (tb[TCA_TCINDEX_SHIFT])
 | 
			
		||||
		cp->shift = nla_get_u32(tb[TCA_TCINDEX_SHIFT]);
 | 
			
		||||
 | 
			
		||||
	err = -EBUSY;
 | 
			
		||||
 | 
			
		||||
	/* Hash already allocated, make sure that we still meet the
 | 
			
		||||
| 
						 | 
				
			
			@ -376,16 +386,6 @@ tcindex_set_parms(struct net *net, struct tcf_proto *tp, unsigned long base,
 | 
			
		|||
	if (tb[TCA_TCINDEX_FALL_THROUGH])
 | 
			
		||||
		cp->fall_through = nla_get_u32(tb[TCA_TCINDEX_FALL_THROUGH]);
 | 
			
		||||
 | 
			
		||||
	if (!cp->hash) {
 | 
			
		||||
		/* Hash not specified, use perfect hash if the upper limit
 | 
			
		||||
		 * of the hashing index is below the threshold.
 | 
			
		||||
		 */
 | 
			
		||||
		if ((cp->mask >> cp->shift) < PERFECT_HASH_THRESHOLD)
 | 
			
		||||
			cp->hash = (cp->mask >> cp->shift) + 1;
 | 
			
		||||
		else
 | 
			
		||||
			cp->hash = DEFAULT_HASH_SIZE;
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	if (!cp->perfect && !cp->h)
 | 
			
		||||
		cp->alloc_hash = cp->hash;
 | 
			
		||||
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
		Loading…
	
		Reference in a new issue