mirror of
				https://github.com/torvalds/linux.git
				synced 2025-10-31 16:48:26 +02:00 
			
		
		
		
	sched_ext: Add scx_cgroup_enabled to gate cgroup operations and fix scx_tg_online()
If the BPF scheduler does not implement ops.cgroup_init(), scx_tg_online() didn't set SCX_TG_INITED which meant that ops.cgroup_exit(), even if implemented, won't be called from scx_tg_offline(). This is because SCX_HAS_OP(cgroupt_init) is used to test both whether SCX cgroup operations are enabled and ops.cgroup_init() exists. Fix it by introducing a separate bool scx_cgroup_enabled to gate cgroup operations and use SCX_HAS_OP(cgroup_init) only to test whether ops.cgroup_init() exists. Make all cgroup operations consistently use scx_cgroup_enabled to test whether cgroup operations are enabled. scx_cgroup_enabled is added instead of using scx_enabled() to ease planned locking updates. Signed-off-by: Tejun Heo <tj@kernel.org>
This commit is contained in:
		
							parent
							
								
									4269c603cc
								
							
						
					
					
						commit
						568894edbe
					
				
					 1 changed files with 22 additions and 13 deletions
				
			
		|  | @ -3706,6 +3706,7 @@ bool scx_can_stop_tick(struct rq *rq) | |||
| #ifdef CONFIG_EXT_GROUP_SCHED | ||||
| 
 | ||||
| DEFINE_STATIC_PERCPU_RWSEM(scx_cgroup_rwsem); | ||||
| static bool scx_cgroup_enabled; | ||||
| static bool cgroup_warned_missing_weight; | ||||
| static bool cgroup_warned_missing_idle; | ||||
| 
 | ||||
|  | @ -3725,8 +3726,7 @@ static void scx_cgroup_warn_missing_weight(struct task_group *tg) | |||
| 
 | ||||
| static void scx_cgroup_warn_missing_idle(struct task_group *tg) | ||||
| { | ||||
| 	if (scx_ops_enable_state() == SCX_OPS_DISABLED || | ||||
| 	    cgroup_warned_missing_idle) | ||||
| 	if (!scx_cgroup_enabled || cgroup_warned_missing_idle) | ||||
| 		return; | ||||
| 
 | ||||
| 	if (!tg->idle) | ||||
|  | @ -3747,15 +3747,18 @@ int scx_tg_online(struct task_group *tg) | |||
| 
 | ||||
| 	scx_cgroup_warn_missing_weight(tg); | ||||
| 
 | ||||
| 	if (SCX_HAS_OP(cgroup_init)) { | ||||
| 		struct scx_cgroup_init_args args = { .weight = tg->scx_weight }; | ||||
| 	if (scx_cgroup_enabled) { | ||||
| 		if (SCX_HAS_OP(cgroup_init)) { | ||||
| 			struct scx_cgroup_init_args args = | ||||
| 				{ .weight = tg->scx_weight }; | ||||
| 
 | ||||
| 		ret = SCX_CALL_OP_RET(SCX_KF_UNLOCKED, cgroup_init, | ||||
| 				      tg->css.cgroup, &args); | ||||
| 		if (!ret) | ||||
| 			ret = SCX_CALL_OP_RET(SCX_KF_UNLOCKED, cgroup_init, | ||||
| 					      tg->css.cgroup, &args); | ||||
| 			if (ret) | ||||
| 				ret = ops_sanitize_err("cgroup_init", ret); | ||||
| 		} | ||||
| 		if (ret == 0) | ||||
| 			tg->scx_flags |= SCX_TG_ONLINE | SCX_TG_INITED; | ||||
| 		else | ||||
| 			ret = ops_sanitize_err("cgroup_init", ret); | ||||
| 	} else { | ||||
| 		tg->scx_flags |= SCX_TG_ONLINE; | ||||
| 	} | ||||
|  | @ -3786,7 +3789,7 @@ int scx_cgroup_can_attach(struct cgroup_taskset *tset) | |||
| 	/* released in scx_finish/cancel_attach() */ | ||||
| 	percpu_down_read(&scx_cgroup_rwsem); | ||||
| 
 | ||||
| 	if (!scx_enabled()) | ||||
| 	if (!scx_cgroup_enabled) | ||||
| 		return 0; | ||||
| 
 | ||||
| 	cgroup_taskset_for_each(p, css, tset) { | ||||
|  | @ -3829,7 +3832,7 @@ int scx_cgroup_can_attach(struct cgroup_taskset *tset) | |||
| 
 | ||||
| void scx_move_task(struct task_struct *p) | ||||
| { | ||||
| 	if (!scx_enabled()) | ||||
| 	if (!scx_cgroup_enabled) | ||||
| 		return; | ||||
| 
 | ||||
| 	/*
 | ||||
|  | @ -3865,7 +3868,7 @@ void scx_cgroup_cancel_attach(struct cgroup_taskset *tset) | |||
| 	struct cgroup_subsys_state *css; | ||||
| 	struct task_struct *p; | ||||
| 
 | ||||
| 	if (!scx_enabled()) | ||||
| 	if (!scx_cgroup_enabled) | ||||
| 		goto out_unlock; | ||||
| 
 | ||||
| 	cgroup_taskset_for_each(p, css, tset) { | ||||
|  | @ -3882,7 +3885,7 @@ void scx_group_set_weight(struct task_group *tg, unsigned long weight) | |||
| { | ||||
| 	percpu_down_read(&scx_cgroup_rwsem); | ||||
| 
 | ||||
| 	if (tg->scx_weight != weight) { | ||||
| 	if (scx_cgroup_enabled && tg->scx_weight != weight) { | ||||
| 		if (SCX_HAS_OP(cgroup_set_weight)) | ||||
| 			SCX_CALL_OP(SCX_KF_UNLOCKED, cgroup_set_weight, | ||||
| 				    tg_cgrp(tg), weight); | ||||
|  | @ -4054,6 +4057,9 @@ static void scx_cgroup_exit(void) | |||
| 
 | ||||
| 	percpu_rwsem_assert_held(&scx_cgroup_rwsem); | ||||
| 
 | ||||
| 	WARN_ON_ONCE(!scx_cgroup_enabled); | ||||
| 	scx_cgroup_enabled = false; | ||||
| 
 | ||||
| 	/*
 | ||||
| 	 * scx_tg_on/offline() are excluded through scx_cgroup_rwsem. If we walk | ||||
| 	 * cgroups and exit all the inited ones, all online cgroups are exited. | ||||
|  | @ -4129,6 +4135,9 @@ static int scx_cgroup_init(void) | |||
| 	} | ||||
| 	rcu_read_unlock(); | ||||
| 
 | ||||
| 	WARN_ON_ONCE(scx_cgroup_enabled); | ||||
| 	scx_cgroup_enabled = true; | ||||
| 
 | ||||
| 	return 0; | ||||
| } | ||||
| 
 | ||||
|  |  | |||
		Loading…
	
		Reference in a new issue
	
	 Tejun Heo
						Tejun Heo