mirror of
https://github.com/torvalds/linux.git
synced 2025-11-05 19:19:43 +02:00
sched: Add RT_GROUP WARN checks for non-root task_groups
With CONFIG_RT_GROUP_SCHED but runtime disabling of RT_GROUPs we expect the existence of the root task_group only and all rt_sched_entity'ies should be queued on root's rt_rq. If we get a non-root RT_GROUP something went wrong. Signed-off-by: Michal Koutný <mkoutny@suse.com> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Link: https://lkml.kernel.org/r/20250310170442.504716-9-mkoutny@suse.com
This commit is contained in:
parent
d6809c2f60
commit
87f1fb77d8
1 changed files with 12 additions and 2 deletions
|
|
@ -176,11 +176,14 @@ static inline struct task_struct *rt_task_of(struct sched_rt_entity *rt_se)
|
||||||
|
|
||||||
static inline struct rq *rq_of_rt_rq(struct rt_rq *rt_rq)
|
static inline struct rq *rq_of_rt_rq(struct rt_rq *rt_rq)
|
||||||
{
|
{
|
||||||
|
/* Cannot fold with non-CONFIG_RT_GROUP_SCHED version, layout */
|
||||||
|
WARN_ON(!rt_group_sched_enabled() && rt_rq->tg != &root_task_group);
|
||||||
return rt_rq->rq;
|
return rt_rq->rq;
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline struct rt_rq *rt_rq_of_se(struct sched_rt_entity *rt_se)
|
static inline struct rt_rq *rt_rq_of_se(struct sched_rt_entity *rt_se)
|
||||||
{
|
{
|
||||||
|
WARN_ON(!rt_group_sched_enabled() && rt_se->rt_rq->tg != &root_task_group);
|
||||||
return rt_se->rt_rq;
|
return rt_se->rt_rq;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -188,6 +191,7 @@ static inline struct rq *rq_of_rt_se(struct sched_rt_entity *rt_se)
|
||||||
{
|
{
|
||||||
struct rt_rq *rt_rq = rt_se->rt_rq;
|
struct rt_rq *rt_rq = rt_se->rt_rq;
|
||||||
|
|
||||||
|
WARN_ON(!rt_group_sched_enabled() && rt_rq->tg != &root_task_group);
|
||||||
return rt_rq->rq;
|
return rt_rq->rq;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -504,8 +508,10 @@ typedef struct task_group *rt_rq_iter_t;
|
||||||
|
|
||||||
static inline struct task_group *next_task_group(struct task_group *tg)
|
static inline struct task_group *next_task_group(struct task_group *tg)
|
||||||
{
|
{
|
||||||
if (!rt_group_sched_enabled())
|
if (!rt_group_sched_enabled()) {
|
||||||
|
WARN_ON(tg != &root_task_group);
|
||||||
return NULL;
|
return NULL;
|
||||||
|
}
|
||||||
|
|
||||||
do {
|
do {
|
||||||
tg = list_entry_rcu(tg->list.next,
|
tg = list_entry_rcu(tg->list.next,
|
||||||
|
|
@ -2607,8 +2613,9 @@ static int task_is_throttled_rt(struct task_struct *p, int cpu)
|
||||||
{
|
{
|
||||||
struct rt_rq *rt_rq;
|
struct rt_rq *rt_rq;
|
||||||
|
|
||||||
#ifdef CONFIG_RT_GROUP_SCHED
|
#ifdef CONFIG_RT_GROUP_SCHED // XXX maybe add task_rt_rq(), see also sched_rt_period_rt_rq
|
||||||
rt_rq = task_group(p)->rt_rq[cpu];
|
rt_rq = task_group(p)->rt_rq[cpu];
|
||||||
|
WARN_ON(!rt_group_sched_enabled() && rt_rq->tg != &root_task_group);
|
||||||
#else
|
#else
|
||||||
rt_rq = &cpu_rq(cpu)->rt;
|
rt_rq = &cpu_rq(cpu)->rt;
|
||||||
#endif
|
#endif
|
||||||
|
|
@ -2718,6 +2725,9 @@ static int tg_rt_schedulable(struct task_group *tg, void *data)
|
||||||
tg->rt_bandwidth.rt_runtime && tg_has_rt_tasks(tg))
|
tg->rt_bandwidth.rt_runtime && tg_has_rt_tasks(tg))
|
||||||
return -EBUSY;
|
return -EBUSY;
|
||||||
|
|
||||||
|
if (WARN_ON(!rt_group_sched_enabled() && tg != &root_task_group))
|
||||||
|
return -EBUSY;
|
||||||
|
|
||||||
total = to_ratio(period, runtime);
|
total = to_ratio(period, runtime);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
|
|
||||||
Loading…
Reference in a new issue