forked from mirrors/linux
		
	net: sched: gred: dynamically allocate tc_gred_qopt_offload
The tc_gred_qopt_offload structure has grown too big to be on the stack for 32-bit architectures after recent changes. net/sched/sch_gred.c:903:13: error: stack frame size (1180) exceeds limit (1024) in 'gred_destroy' [-Werror,-Wframe-larger-than] net/sched/sch_gred.c:310:13: error: stack frame size (1212) exceeds limit (1024) in 'gred_offload' [-Werror,-Wframe-larger-than] Use dynamic allocation per qdisc to avoid this. Fixes:50dc9a8572("net: sched: Merge Qdisc::bstats and Qdisc::cpu_bstats data types") Fixes:67c9e6270f("net: sched: Protect Qdisc::bstats with u64_stats") Suggested-by: Jakub Kicinski <kuba@kernel.org> Signed-off-by: Arnd Bergmann <arnd@arndb.de> Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> Link: https://lore.kernel.org/r/20211026100711.nalhttf6mbe6sudx@linutronix.de Signed-off-by: Jakub Kicinski <kuba@kernel.org>
This commit is contained in:
		
							parent
							
								
									4796e2518a
								
							
						
					
					
						commit
						f25c0515c5
					
				
					 1 changed files with 30 additions and 20 deletions
				
			
		|  | @ -56,6 +56,7 @@ struct gred_sched { | ||||||
| 	u32 		DPs; | 	u32 		DPs; | ||||||
| 	u32 		def; | 	u32 		def; | ||||||
| 	struct red_vars wred_set; | 	struct red_vars wred_set; | ||||||
|  | 	struct tc_gred_qopt_offload *opt; | ||||||
| }; | }; | ||||||
| 
 | 
 | ||||||
| static inline int gred_wred_mode(struct gred_sched *table) | static inline int gred_wred_mode(struct gred_sched *table) | ||||||
|  | @ -311,42 +312,43 @@ static void gred_offload(struct Qdisc *sch, enum tc_gred_command command) | ||||||
| { | { | ||||||
| 	struct gred_sched *table = qdisc_priv(sch); | 	struct gred_sched *table = qdisc_priv(sch); | ||||||
| 	struct net_device *dev = qdisc_dev(sch); | 	struct net_device *dev = qdisc_dev(sch); | ||||||
| 	struct tc_gred_qopt_offload opt = { | 	struct tc_gred_qopt_offload *opt = table->opt; | ||||||
| 		.command	= command, |  | ||||||
| 		.handle		= sch->handle, |  | ||||||
| 		.parent		= sch->parent, |  | ||||||
| 	}; |  | ||||||
| 
 | 
 | ||||||
| 	if (!tc_can_offload(dev) || !dev->netdev_ops->ndo_setup_tc) | 	if (!tc_can_offload(dev) || !dev->netdev_ops->ndo_setup_tc) | ||||||
| 		return; | 		return; | ||||||
| 
 | 
 | ||||||
|  | 	memset(opt, 0, sizeof(*opt)); | ||||||
|  | 	opt->command = command; | ||||||
|  | 	opt->handle = sch->handle; | ||||||
|  | 	opt->parent = sch->parent; | ||||||
|  | 
 | ||||||
| 	if (command == TC_GRED_REPLACE) { | 	if (command == TC_GRED_REPLACE) { | ||||||
| 		unsigned int i; | 		unsigned int i; | ||||||
| 
 | 
 | ||||||
| 		opt.set.grio_on = gred_rio_mode(table); | 		opt->set.grio_on = gred_rio_mode(table); | ||||||
| 		opt.set.wred_on = gred_wred_mode(table); | 		opt->set.wred_on = gred_wred_mode(table); | ||||||
| 		opt.set.dp_cnt = table->DPs; | 		opt->set.dp_cnt = table->DPs; | ||||||
| 		opt.set.dp_def = table->def; | 		opt->set.dp_def = table->def; | ||||||
| 
 | 
 | ||||||
| 		for (i = 0; i < table->DPs; i++) { | 		for (i = 0; i < table->DPs; i++) { | ||||||
| 			struct gred_sched_data *q = table->tab[i]; | 			struct gred_sched_data *q = table->tab[i]; | ||||||
| 
 | 
 | ||||||
| 			if (!q) | 			if (!q) | ||||||
| 				continue; | 				continue; | ||||||
| 			opt.set.tab[i].present = true; | 			opt->set.tab[i].present = true; | ||||||
| 			opt.set.tab[i].limit = q->limit; | 			opt->set.tab[i].limit = q->limit; | ||||||
| 			opt.set.tab[i].prio = q->prio; | 			opt->set.tab[i].prio = q->prio; | ||||||
| 			opt.set.tab[i].min = q->parms.qth_min >> q->parms.Wlog; | 			opt->set.tab[i].min = q->parms.qth_min >> q->parms.Wlog; | ||||||
| 			opt.set.tab[i].max = q->parms.qth_max >> q->parms.Wlog; | 			opt->set.tab[i].max = q->parms.qth_max >> q->parms.Wlog; | ||||||
| 			opt.set.tab[i].is_ecn = gred_use_ecn(q); | 			opt->set.tab[i].is_ecn = gred_use_ecn(q); | ||||||
| 			opt.set.tab[i].is_harddrop = gred_use_harddrop(q); | 			opt->set.tab[i].is_harddrop = gred_use_harddrop(q); | ||||||
| 			opt.set.tab[i].probability = q->parms.max_P; | 			opt->set.tab[i].probability = q->parms.max_P; | ||||||
| 			opt.set.tab[i].backlog = &q->backlog; | 			opt->set.tab[i].backlog = &q->backlog; | ||||||
| 		} | 		} | ||||||
| 		opt.set.qstats = &sch->qstats; | 		opt->set.qstats = &sch->qstats; | ||||||
| 	} | 	} | ||||||
| 
 | 
 | ||||||
| 	dev->netdev_ops->ndo_setup_tc(dev, TC_SETUP_QDISC_GRED, &opt); | 	dev->netdev_ops->ndo_setup_tc(dev, TC_SETUP_QDISC_GRED, opt); | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| static int gred_offload_dump_stats(struct Qdisc *sch) | static int gred_offload_dump_stats(struct Qdisc *sch) | ||||||
|  | @ -731,6 +733,7 @@ static int gred_change(struct Qdisc *sch, struct nlattr *opt, | ||||||
| static int gred_init(struct Qdisc *sch, struct nlattr *opt, | static int gred_init(struct Qdisc *sch, struct nlattr *opt, | ||||||
| 		     struct netlink_ext_ack *extack) | 		     struct netlink_ext_ack *extack) | ||||||
| { | { | ||||||
|  | 	struct gred_sched *table = qdisc_priv(sch); | ||||||
| 	struct nlattr *tb[TCA_GRED_MAX + 1]; | 	struct nlattr *tb[TCA_GRED_MAX + 1]; | ||||||
| 	int err; | 	int err; | ||||||
| 
 | 
 | ||||||
|  | @ -754,6 +757,12 @@ static int gred_init(struct Qdisc *sch, struct nlattr *opt, | ||||||
| 		sch->limit = qdisc_dev(sch)->tx_queue_len | 		sch->limit = qdisc_dev(sch)->tx_queue_len | ||||||
| 		             * psched_mtu(qdisc_dev(sch)); | 		             * psched_mtu(qdisc_dev(sch)); | ||||||
| 
 | 
 | ||||||
|  | 	if (qdisc_dev(sch)->netdev_ops->ndo_setup_tc) { | ||||||
|  | 		table->opt = kzalloc(sizeof(*table->opt), GFP_KERNEL); | ||||||
|  | 		if (!table->opt) | ||||||
|  | 			return -ENOMEM; | ||||||
|  | 	} | ||||||
|  | 
 | ||||||
| 	return gred_change_table_def(sch, tb[TCA_GRED_DPS], extack); | 	return gred_change_table_def(sch, tb[TCA_GRED_DPS], extack); | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
|  | @ -910,6 +919,7 @@ static void gred_destroy(struct Qdisc *sch) | ||||||
| 			gred_destroy_vq(table->tab[i]); | 			gred_destroy_vq(table->tab[i]); | ||||||
| 	} | 	} | ||||||
| 	gred_offload(sch, TC_GRED_DESTROY); | 	gred_offload(sch, TC_GRED_DESTROY); | ||||||
|  | 	kfree(table->opt); | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| static struct Qdisc_ops gred_qdisc_ops __read_mostly = { | static struct Qdisc_ops gred_qdisc_ops __read_mostly = { | ||||||
|  |  | ||||||
		Loading…
	
		Reference in a new issue
	
	 Arnd Bergmann
						Arnd Bergmann