mirror of
				https://github.com/torvalds/linux.git
				synced 2025-11-04 10:40:15 +02:00 
			
		
		
		
	sched: normalize_rt_tasks(): Don't use _irqsave for tasklist_lock, use task_rq_lock()
1. read_lock(tasklist_lock) does not need to disable irqs. 2. ->mm != NULL is a common mistake, use PF_KTHREAD. 3. The second ->mm check can be simply removed. 4. task_rq_lock() looks better than raw_spin_lock(&p->pi_lock) + __task_rq_lock(). Signed-off-by: Oleg Nesterov <oleg@redhat.com> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Cc: Kirill Tkhai <tkhai@yandex.ru> Cc: Mike Galbraith <umgwanakikbuti@gmail.com> Cc: Linus Torvalds <torvalds@linux-foundation.org> Link: http://lkml.kernel.org/r/20140921193338.GA28621@redhat.com Signed-off-by: Ingo Molnar <mingo@kernel.org>
This commit is contained in:
		
							parent
							
								
									8651c65844
								
							
						
					
					
						commit
						3472eaa1f1
					
				
					 1 changed files with 6 additions and 10 deletions
				
			
		| 
						 | 
					@ -7220,12 +7220,12 @@ void normalize_rt_tasks(void)
 | 
				
			||||||
	unsigned long flags;
 | 
						unsigned long flags;
 | 
				
			||||||
	struct rq *rq;
 | 
						struct rq *rq;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	read_lock_irqsave(&tasklist_lock, flags);
 | 
						read_lock(&tasklist_lock);
 | 
				
			||||||
	for_each_process_thread(g, p) {
 | 
						for_each_process_thread(g, p) {
 | 
				
			||||||
		/*
 | 
							/*
 | 
				
			||||||
		 * Only normalize user tasks:
 | 
							 * Only normalize user tasks:
 | 
				
			||||||
		 */
 | 
							 */
 | 
				
			||||||
		if (!p->mm)
 | 
							if (p->flags & PF_KTHREAD)
 | 
				
			||||||
			continue;
 | 
								continue;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
		p->se.exec_start		= 0;
 | 
							p->se.exec_start		= 0;
 | 
				
			||||||
| 
						 | 
					@ -7240,20 +7240,16 @@ void normalize_rt_tasks(void)
 | 
				
			||||||
			 * Renice negative nice level userspace
 | 
								 * Renice negative nice level userspace
 | 
				
			||||||
			 * tasks back to 0:
 | 
								 * tasks back to 0:
 | 
				
			||||||
			 */
 | 
								 */
 | 
				
			||||||
			if (task_nice(p) < 0 && p->mm)
 | 
								if (task_nice(p) < 0)
 | 
				
			||||||
				set_user_nice(p, 0);
 | 
									set_user_nice(p, 0);
 | 
				
			||||||
			continue;
 | 
								continue;
 | 
				
			||||||
		}
 | 
							}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
		raw_spin_lock(&p->pi_lock);
 | 
							rq = task_rq_lock(p, &flags);
 | 
				
			||||||
		rq = __task_rq_lock(p);
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
		normalize_task(rq, p);
 | 
							normalize_task(rq, p);
 | 
				
			||||||
 | 
							task_rq_unlock(rq, p, &flags);
 | 
				
			||||||
		__task_rq_unlock(rq);
 | 
					 | 
				
			||||||
		raw_spin_unlock(&p->pi_lock);
 | 
					 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
	read_unlock_irqrestore(&tasklist_lock, flags);
 | 
						read_unlock(&tasklist_lock);
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
#endif /* CONFIG_MAGIC_SYSRQ */
 | 
					#endif /* CONFIG_MAGIC_SYSRQ */
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
		Loading…
	
		Reference in a new issue