mirror of
				https://github.com/torvalds/linux.git
				synced 2025-10-31 16:48:26 +02:00 
			
		
		
		
	sched: Make multiple runqueue task counters 32-bit
Make: struct dl_rq::dl_nr_migratory struct dl_rq::dl_nr_running struct rt_rq::rt_nr_boosted struct rt_rq::rt_nr_migratory struct rt_rq::rt_nr_total struct rq::nr_uninterruptible 32-bit. If total number of tasks can't exceed 2**32 (and less due to futex pid limits), then per-runqueue counters can't as well. This patchset has been sponsored by REX Prefix Eradication Society. Signed-off-by: Alexey Dobriyan <adobriyan@gmail.com> Signed-off-by: Ingo Molnar <mingo@kernel.org> Link: https://lore.kernel.org/r/20210422200228.1423391-4-adobriyan@gmail.com
This commit is contained in:
		
							parent
							
								
									8fc2858e57
								
							
						
					
					
						commit
						e6fe3f422b
					
				
					 2 changed files with 7 additions and 7 deletions
				
			
		|  | @ -81,7 +81,7 @@ long calc_load_fold_active(struct rq *this_rq, long adjust) | ||||||
| 	long nr_active, delta = 0; | 	long nr_active, delta = 0; | ||||||
| 
 | 
 | ||||||
| 	nr_active = this_rq->nr_running - adjust; | 	nr_active = this_rq->nr_running - adjust; | ||||||
| 	nr_active += (long)this_rq->nr_uninterruptible; | 	nr_active += (int)this_rq->nr_uninterruptible; | ||||||
| 
 | 
 | ||||||
| 	if (nr_active != this_rq->calc_load_active) { | 	if (nr_active != this_rq->calc_load_active) { | ||||||
| 		delta = nr_active - this_rq->calc_load_active; | 		delta = nr_active - this_rq->calc_load_active; | ||||||
|  |  | ||||||
|  | @ -636,8 +636,8 @@ struct rt_rq { | ||||||
| 	} highest_prio; | 	} highest_prio; | ||||||
| #endif | #endif | ||||||
| #ifdef CONFIG_SMP | #ifdef CONFIG_SMP | ||||||
| 	unsigned long		rt_nr_migratory; | 	unsigned int		rt_nr_migratory; | ||||||
| 	unsigned long		rt_nr_total; | 	unsigned int		rt_nr_total; | ||||||
| 	int			overloaded; | 	int			overloaded; | ||||||
| 	struct plist_head	pushable_tasks; | 	struct plist_head	pushable_tasks; | ||||||
| 
 | 
 | ||||||
|  | @ -651,7 +651,7 @@ struct rt_rq { | ||||||
| 	raw_spinlock_t		rt_runtime_lock; | 	raw_spinlock_t		rt_runtime_lock; | ||||||
| 
 | 
 | ||||||
| #ifdef CONFIG_RT_GROUP_SCHED | #ifdef CONFIG_RT_GROUP_SCHED | ||||||
| 	unsigned long		rt_nr_boosted; | 	unsigned int		rt_nr_boosted; | ||||||
| 
 | 
 | ||||||
| 	struct rq		*rq; | 	struct rq		*rq; | ||||||
| 	struct task_group	*tg; | 	struct task_group	*tg; | ||||||
|  | @ -668,7 +668,7 @@ struct dl_rq { | ||||||
| 	/* runqueue is an rbtree, ordered by deadline */ | 	/* runqueue is an rbtree, ordered by deadline */ | ||||||
| 	struct rb_root_cached	root; | 	struct rb_root_cached	root; | ||||||
| 
 | 
 | ||||||
| 	unsigned long		dl_nr_running; | 	unsigned int		dl_nr_running; | ||||||
| 
 | 
 | ||||||
| #ifdef CONFIG_SMP | #ifdef CONFIG_SMP | ||||||
| 	/*
 | 	/*
 | ||||||
|  | @ -682,7 +682,7 @@ struct dl_rq { | ||||||
| 		u64		next; | 		u64		next; | ||||||
| 	} earliest_dl; | 	} earliest_dl; | ||||||
| 
 | 
 | ||||||
| 	unsigned long		dl_nr_migratory; | 	unsigned int		dl_nr_migratory; | ||||||
| 	int			overloaded; | 	int			overloaded; | ||||||
| 
 | 
 | ||||||
| 	/*
 | 	/*
 | ||||||
|  | @ -960,7 +960,7 @@ struct rq { | ||||||
| 	 * one CPU and if it got migrated afterwards it may decrease | 	 * one CPU and if it got migrated afterwards it may decrease | ||||||
| 	 * it on another CPU. Always updated under the runqueue lock: | 	 * it on another CPU. Always updated under the runqueue lock: | ||||||
| 	 */ | 	 */ | ||||||
| 	unsigned long		nr_uninterruptible; | 	unsigned int		nr_uninterruptible; | ||||||
| 
 | 
 | ||||||
| 	struct task_struct __rcu	*curr; | 	struct task_struct __rcu	*curr; | ||||||
| 	struct task_struct	*idle; | 	struct task_struct	*idle; | ||||||
|  |  | ||||||
		Loading…
	
		Reference in a new issue
	
	 Alexey Dobriyan
						Alexey Dobriyan