mirror of
				https://github.com/torvalds/linux.git
				synced 2025-11-04 02:30:34 +02:00 
			
		
		
		
	sched_ext: Don't hold scx_tasks_lock for too long
While enabling and disabling a BPF scheduler, every task is iterated a couple times by walking scx_tasks. Except for one, all iterations keep holding scx_tasks_lock. On multi-socket systems under heavy rq lock contention and high number of threads, this can can lead to RCU and other stalls. The following is triggered on a 2 x AMD EPYC 7642 system (192 logical CPUs) running `stress-ng --workload 150 --workload-threads 10` with >400k idle threads and RCU stall period reduced to 5s: rcu: INFO: rcu_preempt detected stalls on CPUs/tasks: rcu: 91-...!: (10 ticks this GP) idle=0754/1/0x4000000000000000 softirq=18204/18206 fqs=17 rcu: 186-...!: (17 ticks this GP) idle=ec54/1/0x4000000000000000 softirq=25863/25866 fqs=17 rcu: (detected by 80, t=10042 jiffies, g=89305, q=33 ncpus=192) Sending NMI from CPU 80 to CPUs 91: NMI backtrace for cpu 91 CPU: 91 UID: 0 PID: 284038 Comm: sched_ext_ops_h Kdump: loaded Not tainted 6.12.0-rc2-work-g6bf5681f7ee2-dirty #471 Hardware name: Supermicro Super Server/H11DSi, BIOS 2.8 12/14/2023 Sched_ext: simple (disabling+all) RIP: 0010:queued_spin_lock_slowpath+0x17b/0x2f0 Code: 02 c0 10 03 00 83 79 08 00 75 08 f3 90 83 79 08 00 74 f8 48 8b 11 48 85 d2 74 09 0f 0d 0a eb 0a 31 d2 eb 06 31 d2 eb 02 f3 90 <8b> 07 66 85 c0 75 f7 39 d8 75 0d be 01 00 00 00 89 d8 f0 0f b1 37 RSP: 0018:ffffc9000fadfcb8 EFLAGS: 00000002 RAX: 0000000001700001 RBX: 0000000001700000 RCX: ffff88bfcaaf10c0 RDX: 0000000000000000 RSI: 0000000000000101 RDI: ffff88bfca8f0080 RBP: 0000000001700000 R08: 0000000000000090 R09: ffffffffffffffff R10: ffff88a74761b268 R11: 0000000000000000 R12: ffff88a6b6765460 R13: ffffc9000fadfd60 R14: ffff88bfca8f0080 R15: ffff88bfcaac0000 FS: 0000000000000000(0000) GS:ffff88bfcaac0000(0000) knlGS:0000000000000000 CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033 CR2: 00007f5c55f526a0 CR3: 0000000afd474000 CR4: 0000000000350eb0 Call Trace: <NMI> </NMI> <TASK> do_raw_spin_lock+0x9c/0xb0 task_rq_lock+0x50/0x190 scx_task_iter_next_locked+0x157/0x170 scx_ops_disable_workfn+0x2c2/0xbf0 kthread_worker_fn+0x108/0x2a0 kthread+0xeb/0x110 ret_from_fork+0x36/0x40 ret_from_fork_asm+0x1a/0x30 </TASK> Sending NMI from CPU 80 to CPUs 186: NMI backtrace for cpu 186 CPU: 186 UID: 0 PID: 51248 Comm: fish Kdump: loaded Not tainted 6.12.0-rc2-work-g6bf5681f7ee2-dirty #471 scx_task_iter can safely drop locks while iterating. Make scx_task_iter_next() drop scx_tasks_lock every 32 iterations to avoid stalls. Signed-off-by: Tejun Heo <tj@kernel.org> Acked-by: David Vernet <void@manifault.com>
This commit is contained in:
		
							parent
							
								
									967da57832
								
							
						
					
					
						commit
						b07996c7ab
					
				
					 1 changed files with 17 additions and 2 deletions
				
			
		| 
						 | 
					@ -18,6 +18,12 @@ enum scx_consts {
 | 
				
			||||||
	SCX_EXIT_DUMP_DFL_LEN		= 32768,
 | 
						SCX_EXIT_DUMP_DFL_LEN		= 32768,
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	SCX_CPUPERF_ONE			= SCHED_CAPACITY_SCALE,
 | 
						SCX_CPUPERF_ONE			= SCHED_CAPACITY_SCALE,
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						/*
 | 
				
			||||||
 | 
						 * Iterating all tasks may take a while. Periodically drop
 | 
				
			||||||
 | 
						 * scx_tasks_lock to avoid causing e.g. CSD and RCU stalls.
 | 
				
			||||||
 | 
						 */
 | 
				
			||||||
 | 
						SCX_OPS_TASK_ITER_BATCH		= 32,
 | 
				
			||||||
};
 | 
					};
 | 
				
			||||||
 | 
					
 | 
				
			||||||
enum scx_exit_kind {
 | 
					enum scx_exit_kind {
 | 
				
			||||||
| 
						 | 
					@ -1273,6 +1279,7 @@ struct scx_task_iter {
 | 
				
			||||||
	struct task_struct		*locked;
 | 
						struct task_struct		*locked;
 | 
				
			||||||
	struct rq			*rq;
 | 
						struct rq			*rq;
 | 
				
			||||||
	struct rq_flags			rf;
 | 
						struct rq_flags			rf;
 | 
				
			||||||
 | 
						u32				cnt;
 | 
				
			||||||
};
 | 
					};
 | 
				
			||||||
 | 
					
 | 
				
			||||||
/**
 | 
					/**
 | 
				
			||||||
| 
						 | 
					@ -1301,6 +1308,7 @@ static void scx_task_iter_start(struct scx_task_iter *iter)
 | 
				
			||||||
	iter->cursor = (struct sched_ext_entity){ .flags = SCX_TASK_CURSOR };
 | 
						iter->cursor = (struct sched_ext_entity){ .flags = SCX_TASK_CURSOR };
 | 
				
			||||||
	list_add(&iter->cursor.tasks_node, &scx_tasks);
 | 
						list_add(&iter->cursor.tasks_node, &scx_tasks);
 | 
				
			||||||
	iter->locked = NULL;
 | 
						iter->locked = NULL;
 | 
				
			||||||
 | 
						iter->cnt = 0;
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
static void __scx_task_iter_rq_unlock(struct scx_task_iter *iter)
 | 
					static void __scx_task_iter_rq_unlock(struct scx_task_iter *iter)
 | 
				
			||||||
| 
						 | 
					@ -1355,14 +1363,21 @@ static void scx_task_iter_stop(struct scx_task_iter *iter)
 | 
				
			||||||
 * scx_task_iter_next - Next task
 | 
					 * scx_task_iter_next - Next task
 | 
				
			||||||
 * @iter: iterator to walk
 | 
					 * @iter: iterator to walk
 | 
				
			||||||
 *
 | 
					 *
 | 
				
			||||||
 * Visit the next task. See scx_task_iter_start() for details.
 | 
					 * Visit the next task. See scx_task_iter_start() for details. Locks are dropped
 | 
				
			||||||
 | 
					 * and re-acquired every %SCX_OPS_TASK_ITER_BATCH iterations to avoid causing
 | 
				
			||||||
 | 
					 * stalls by holding scx_tasks_lock for too long.
 | 
				
			||||||
 */
 | 
					 */
 | 
				
			||||||
static struct task_struct *scx_task_iter_next(struct scx_task_iter *iter)
 | 
					static struct task_struct *scx_task_iter_next(struct scx_task_iter *iter)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
	struct list_head *cursor = &iter->cursor.tasks_node;
 | 
						struct list_head *cursor = &iter->cursor.tasks_node;
 | 
				
			||||||
	struct sched_ext_entity *pos;
 | 
						struct sched_ext_entity *pos;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	lockdep_assert_held(&scx_tasks_lock);
 | 
						if (!(++iter->cnt % SCX_OPS_TASK_ITER_BATCH)) {
 | 
				
			||||||
 | 
							scx_task_iter_unlock(iter);
 | 
				
			||||||
 | 
							cpu_relax();
 | 
				
			||||||
 | 
							cond_resched();
 | 
				
			||||||
 | 
							scx_task_iter_relock(iter);
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	list_for_each_entry(pos, cursor, tasks_node) {
 | 
						list_for_each_entry(pos, cursor, tasks_node) {
 | 
				
			||||||
		if (&pos->tasks_node == &scx_tasks)
 | 
							if (&pos->tasks_node == &scx_tasks)
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
		Loading…
	
		Reference in a new issue