mirror of
				https://github.com/torvalds/linux.git
				synced 2025-11-04 02:30:34 +02:00 
			
		
		
		
	scx_move_task() is called from sched_move_task() and tells the BPF scheduler
that cgroup migration is being committed. sched_move_task() is used by both
cgroup and autogroup migrations and scx_move_task() tried to filter out
autogroup migrations by testing the destination cgroup and PF_EXITING but
this is not enough. In fact, without explicitly tagging the thread which is
doing the cgroup migration, there is no good way to tell apart
scx_move_task() invocations for racing migration to the root cgroup and an
autogroup migration.
This led to scx_move_task() incorrectly ignoring a migration from non-root
cgroup to an autogroup of the root cgroup triggering the following warning:
  WARNING: CPU: 7 PID: 1 at kernel/sched/ext.c:3725 scx_cgroup_can_attach+0x196/0x340
  ...
  Call Trace:
  <TASK>
    cgroup_migrate_execute+0x5b1/0x700
    cgroup_attach_task+0x296/0x400
    __cgroup_procs_write+0x128/0x140
    cgroup_procs_write+0x17/0x30
    kernfs_fop_write_iter+0x141/0x1f0
    vfs_write+0x31d/0x4a0
    __x64_sys_write+0x72/0xf0
    do_syscall_64+0x82/0x160
    entry_SYSCALL_64_after_hwframe+0x76/0x7e
Fix it by adding an argument to sched_move_task() that indicates whether the
moving is for a cgroup or autogroup migration. After the change,
scx_move_task() is called only for cgroup migrations and renamed to
scx_cgroup_move_task().
Link: https://github.com/sched-ext/scx/issues/370
Fixes: 8195136669 ("sched_ext: Add cgroup support")
Cc: stable@vger.kernel.org # v6.12+
Acked-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Signed-off-by: Tejun Heo <tj@kernel.org>
		
	
			
		
			
				
	
	
		
			91 lines
		
	
	
	
		
			3.5 KiB
		
	
	
	
		
			C
		
	
	
	
	
	
			
		
		
	
	
			91 lines
		
	
	
	
		
			3.5 KiB
		
	
	
	
		
			C
		
	
	
	
	
	
/* SPDX-License-Identifier: GPL-2.0 */
 | 
						|
/*
 | 
						|
 * BPF extensible scheduler class: Documentation/scheduler/sched-ext.rst
 | 
						|
 *
 | 
						|
 * Copyright (c) 2022 Meta Platforms, Inc. and affiliates.
 | 
						|
 * Copyright (c) 2022 Tejun Heo <tj@kernel.org>
 | 
						|
 * Copyright (c) 2022 David Vernet <dvernet@meta.com>
 | 
						|
 */
 | 
						|
#ifdef CONFIG_SCHED_CLASS_EXT
 | 
						|
 | 
						|
void scx_tick(struct rq *rq);
 | 
						|
void init_scx_entity(struct sched_ext_entity *scx);
 | 
						|
void scx_pre_fork(struct task_struct *p);
 | 
						|
int scx_fork(struct task_struct *p);
 | 
						|
void scx_post_fork(struct task_struct *p);
 | 
						|
void scx_cancel_fork(struct task_struct *p);
 | 
						|
bool scx_can_stop_tick(struct rq *rq);
 | 
						|
void scx_rq_activate(struct rq *rq);
 | 
						|
void scx_rq_deactivate(struct rq *rq);
 | 
						|
int scx_check_setscheduler(struct task_struct *p, int policy);
 | 
						|
bool task_should_scx(int policy);
 | 
						|
void init_sched_ext_class(void);
 | 
						|
 | 
						|
static inline u32 scx_cpuperf_target(s32 cpu)
 | 
						|
{
 | 
						|
	if (scx_enabled())
 | 
						|
		return cpu_rq(cpu)->scx.cpuperf_target;
 | 
						|
	else
 | 
						|
		return 0;
 | 
						|
}
 | 
						|
 | 
						|
static inline bool task_on_scx(const struct task_struct *p)
 | 
						|
{
 | 
						|
	return scx_enabled() && p->sched_class == &ext_sched_class;
 | 
						|
}
 | 
						|
 | 
						|
#ifdef CONFIG_SCHED_CORE
 | 
						|
bool scx_prio_less(const struct task_struct *a, const struct task_struct *b,
 | 
						|
		   bool in_fi);
 | 
						|
#endif
 | 
						|
 | 
						|
#else	/* CONFIG_SCHED_CLASS_EXT */
 | 
						|
 | 
						|
static inline void scx_tick(struct rq *rq) {}
 | 
						|
static inline void scx_pre_fork(struct task_struct *p) {}
 | 
						|
static inline int scx_fork(struct task_struct *p) { return 0; }
 | 
						|
static inline void scx_post_fork(struct task_struct *p) {}
 | 
						|
static inline void scx_cancel_fork(struct task_struct *p) {}
 | 
						|
static inline u32 scx_cpuperf_target(s32 cpu) { return 0; }
 | 
						|
static inline bool scx_can_stop_tick(struct rq *rq) { return true; }
 | 
						|
static inline void scx_rq_activate(struct rq *rq) {}
 | 
						|
static inline void scx_rq_deactivate(struct rq *rq) {}
 | 
						|
static inline int scx_check_setscheduler(struct task_struct *p, int policy) { return 0; }
 | 
						|
static inline bool task_on_scx(const struct task_struct *p) { return false; }
 | 
						|
static inline void init_sched_ext_class(void) {}
 | 
						|
 | 
						|
#endif	/* CONFIG_SCHED_CLASS_EXT */
 | 
						|
 | 
						|
#if defined(CONFIG_SCHED_CLASS_EXT) && defined(CONFIG_SMP)
 | 
						|
void __scx_update_idle(struct rq *rq, bool idle, bool do_notify);
 | 
						|
 | 
						|
static inline void scx_update_idle(struct rq *rq, bool idle, bool do_notify)
 | 
						|
{
 | 
						|
	if (scx_enabled())
 | 
						|
		__scx_update_idle(rq, idle, do_notify);
 | 
						|
}
 | 
						|
#else
 | 
						|
static inline void scx_update_idle(struct rq *rq, bool idle, bool do_notify) {}
 | 
						|
#endif
 | 
						|
 | 
						|
#ifdef CONFIG_CGROUP_SCHED
 | 
						|
#ifdef CONFIG_EXT_GROUP_SCHED
 | 
						|
int scx_tg_online(struct task_group *tg);
 | 
						|
void scx_tg_offline(struct task_group *tg);
 | 
						|
int scx_cgroup_can_attach(struct cgroup_taskset *tset);
 | 
						|
void scx_cgroup_move_task(struct task_struct *p);
 | 
						|
void scx_cgroup_finish_attach(void);
 | 
						|
void scx_cgroup_cancel_attach(struct cgroup_taskset *tset);
 | 
						|
void scx_group_set_weight(struct task_group *tg, unsigned long cgrp_weight);
 | 
						|
void scx_group_set_idle(struct task_group *tg, bool idle);
 | 
						|
#else	/* CONFIG_EXT_GROUP_SCHED */
 | 
						|
static inline int scx_tg_online(struct task_group *tg) { return 0; }
 | 
						|
static inline void scx_tg_offline(struct task_group *tg) {}
 | 
						|
static inline int scx_cgroup_can_attach(struct cgroup_taskset *tset) { return 0; }
 | 
						|
static inline void scx_cgroup_move_task(struct task_struct *p) {}
 | 
						|
static inline void scx_cgroup_finish_attach(void) {}
 | 
						|
static inline void scx_cgroup_cancel_attach(struct cgroup_taskset *tset) {}
 | 
						|
static inline void scx_group_set_weight(struct task_group *tg, unsigned long cgrp_weight) {}
 | 
						|
static inline void scx_group_set_idle(struct task_group *tg, bool idle) {}
 | 
						|
#endif	/* CONFIG_EXT_GROUP_SCHED */
 | 
						|
#endif	/* CONFIG_CGROUP_SCHED */
 |