mirror of
				https://github.com/torvalds/linux.git
				synced 2025-11-04 10:40:15 +02:00 
			
		
		
		
	net_sched: Flush gso_skb list too during ->change()
Previously, when reducing a qdisc's limit via the ->change() operation, only the main skb queue was trimmed, potentially leaving packets in the gso_skb list. This could result in NULL pointer dereference when we only check sch->limit against sch->q.qlen. This patch introduces a new helper, qdisc_dequeue_internal(), which ensures both the gso_skb list and the main queue are properly flushed when trimming excess packets. All relevant qdiscs (codel, fq, fq_codel, fq_pie, hhf, pie) are updated to use this helper in their ->change() routines. Fixes:76e3cc126b("codel: Controlled Delay AQM") Fixes:4b549a2ef4("fq_codel: Fair Queue Codel AQM") Fixes:afe4fd0624("pkt_sched: fq: Fair Queue packet scheduler") Fixes:ec97ecf1eb("net: sched: add Flow Queue PIE packet scheduler") Fixes:10239edf86("net-qdisc-hhf: Heavy-Hitter Filter (HHF) qdisc") Fixes:d4b36210c2("net: pkt_sched: PIE AQM scheme") Reported-by: Will <willsroot@protonmail.com> Reported-by: Savy <savy@syst3mfailure.io> Signed-off-by: Cong Wang <xiyou.wangcong@gmail.com> Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
		
							parent
							
								
									6b3ab7f2cb
								
							
						
					
					
						commit
						2d3cbfd6d5
					
				
					 7 changed files with 21 additions and 6 deletions
				
			
		| 
						 | 
				
			
			@ -1031,6 +1031,21 @@ static inline struct sk_buff *__qdisc_dequeue_head(struct qdisc_skb_head *qh)
 | 
			
		|||
	return skb;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
static inline struct sk_buff *qdisc_dequeue_internal(struct Qdisc *sch, bool direct)
 | 
			
		||||
{
 | 
			
		||||
	struct sk_buff *skb;
 | 
			
		||||
 | 
			
		||||
	skb = __skb_dequeue(&sch->gso_skb);
 | 
			
		||||
	if (skb) {
 | 
			
		||||
		sch->q.qlen--;
 | 
			
		||||
		return skb;
 | 
			
		||||
	}
 | 
			
		||||
	if (direct)
 | 
			
		||||
		return __qdisc_dequeue_head(&sch->q);
 | 
			
		||||
	else
 | 
			
		||||
		return sch->dequeue(sch);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
static inline struct sk_buff *qdisc_dequeue_head(struct Qdisc *sch)
 | 
			
		||||
{
 | 
			
		||||
	struct sk_buff *skb = __qdisc_dequeue_head(&sch->q);
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -144,7 +144,7 @@ static int codel_change(struct Qdisc *sch, struct nlattr *opt,
 | 
			
		|||
 | 
			
		||||
	qlen = sch->q.qlen;
 | 
			
		||||
	while (sch->q.qlen > sch->limit) {
 | 
			
		||||
		struct sk_buff *skb = __qdisc_dequeue_head(&sch->q);
 | 
			
		||||
		struct sk_buff *skb = qdisc_dequeue_internal(sch, true);
 | 
			
		||||
 | 
			
		||||
		dropped += qdisc_pkt_len(skb);
 | 
			
		||||
		qdisc_qstats_backlog_dec(sch, skb);
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -1136,7 +1136,7 @@ static int fq_change(struct Qdisc *sch, struct nlattr *opt,
 | 
			
		|||
		sch_tree_lock(sch);
 | 
			
		||||
	}
 | 
			
		||||
	while (sch->q.qlen > sch->limit) {
 | 
			
		||||
		struct sk_buff *skb = fq_dequeue(sch);
 | 
			
		||||
		struct sk_buff *skb = qdisc_dequeue_internal(sch, false);
 | 
			
		||||
 | 
			
		||||
		if (!skb)
 | 
			
		||||
			break;
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -441,7 +441,7 @@ static int fq_codel_change(struct Qdisc *sch, struct nlattr *opt,
 | 
			
		|||
 | 
			
		||||
	while (sch->q.qlen > sch->limit ||
 | 
			
		||||
	       q->memory_usage > q->memory_limit) {
 | 
			
		||||
		struct sk_buff *skb = fq_codel_dequeue(sch);
 | 
			
		||||
		struct sk_buff *skb = qdisc_dequeue_internal(sch, false);
 | 
			
		||||
 | 
			
		||||
		q->cstats.drop_len += qdisc_pkt_len(skb);
 | 
			
		||||
		rtnl_kfree_skbs(skb, skb);
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -366,7 +366,7 @@ static int fq_pie_change(struct Qdisc *sch, struct nlattr *opt,
 | 
			
		|||
 | 
			
		||||
	/* Drop excess packets if new limit is lower */
 | 
			
		||||
	while (sch->q.qlen > sch->limit) {
 | 
			
		||||
		struct sk_buff *skb = fq_pie_qdisc_dequeue(sch);
 | 
			
		||||
		struct sk_buff *skb = qdisc_dequeue_internal(sch, false);
 | 
			
		||||
 | 
			
		||||
		len_dropped += qdisc_pkt_len(skb);
 | 
			
		||||
		num_dropped += 1;
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -564,7 +564,7 @@ static int hhf_change(struct Qdisc *sch, struct nlattr *opt,
 | 
			
		|||
	qlen = sch->q.qlen;
 | 
			
		||||
	prev_backlog = sch->qstats.backlog;
 | 
			
		||||
	while (sch->q.qlen > sch->limit) {
 | 
			
		||||
		struct sk_buff *skb = hhf_dequeue(sch);
 | 
			
		||||
		struct sk_buff *skb = qdisc_dequeue_internal(sch, false);
 | 
			
		||||
 | 
			
		||||
		rtnl_kfree_skbs(skb, skb);
 | 
			
		||||
	}
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -195,7 +195,7 @@ static int pie_change(struct Qdisc *sch, struct nlattr *opt,
 | 
			
		|||
	/* Drop excess packets if new limit is lower */
 | 
			
		||||
	qlen = sch->q.qlen;
 | 
			
		||||
	while (sch->q.qlen > sch->limit) {
 | 
			
		||||
		struct sk_buff *skb = __qdisc_dequeue_head(&sch->q);
 | 
			
		||||
		struct sk_buff *skb = qdisc_dequeue_internal(sch, true);
 | 
			
		||||
 | 
			
		||||
		dropped += qdisc_pkt_len(skb);
 | 
			
		||||
		qdisc_qstats_backlog_dec(sch, skb);
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
		Loading…
	
		Reference in a new issue