mirror of
https://github.com/torvalds/linux.git
synced 2025-11-03 18:20:25 +02:00
sched/topology: Rename 'DIE' domain to 'PKG'
While reworking the x86 topology code Thomas tripped over creating a 'DIE' domain for the package mask. :-) Since these names are CONFIG_SCHED_DEBUG=y only, rename them to make the name less ambiguous. [ Shrikanth Hegde: rename on s390 as well. ] [ Valentin Schneider: also rename it in the comments. ] [ mingo: port to recent kernels & find all remaining occurances. ] Reported-by: Thomas Gleixner <tglx@linutronix.de> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Signed-off-by: Ingo Molnar <mingo@kernel.org> Acked-by: Valentin Schneider <vschneid@redhat.com> Acked-by: Mel Gorman <mgorman@suse.de> Acked-by: Heiko Carstens <hca@linux.ibm.com> Acked-by: Gautham R. Shenoy <gautham.shenoy@amd.com> Acked-by: Vincent Guittot <vincent.guittot@linaro.org> Link: https://lore.kernel.org/r/20230712141056.GI3100107@hirez.programming.kicks-ass.net
This commit is contained in:
parent
3657680f38
commit
f577cd57bf
5 changed files with 10 additions and 10 deletions
|
|
@ -1051,7 +1051,7 @@ static struct sched_domain_topology_level powerpc_topology[] = {
|
||||||
#endif
|
#endif
|
||||||
{ shared_cache_mask, powerpc_shared_cache_flags, SD_INIT_NAME(CACHE) },
|
{ shared_cache_mask, powerpc_shared_cache_flags, SD_INIT_NAME(CACHE) },
|
||||||
{ cpu_mc_mask, SD_INIT_NAME(MC) },
|
{ cpu_mc_mask, SD_INIT_NAME(MC) },
|
||||||
{ cpu_cpu_mask, SD_INIT_NAME(DIE) },
|
{ cpu_cpu_mask, SD_INIT_NAME(PKG) },
|
||||||
{ NULL, },
|
{ NULL, },
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
@ -1595,7 +1595,7 @@ static void add_cpu_to_masks(int cpu)
|
||||||
/* Skip all CPUs already part of current CPU core mask */
|
/* Skip all CPUs already part of current CPU core mask */
|
||||||
cpumask_andnot(mask, cpu_online_mask, cpu_core_mask(cpu));
|
cpumask_andnot(mask, cpu_online_mask, cpu_core_mask(cpu));
|
||||||
|
|
||||||
/* If chip_id is -1; limit the cpu_core_mask to within DIE*/
|
/* If chip_id is -1; limit the cpu_core_mask to within PKG */
|
||||||
if (chip_id == -1)
|
if (chip_id == -1)
|
||||||
cpumask_and(mask, mask, cpu_cpu_mask(cpu));
|
cpumask_and(mask, mask, cpu_cpu_mask(cpu));
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -522,7 +522,7 @@ static struct sched_domain_topology_level s390_topology[] = {
|
||||||
{ cpu_coregroup_mask, cpu_core_flags, SD_INIT_NAME(MC) },
|
{ cpu_coregroup_mask, cpu_core_flags, SD_INIT_NAME(MC) },
|
||||||
{ cpu_book_mask, SD_INIT_NAME(BOOK) },
|
{ cpu_book_mask, SD_INIT_NAME(BOOK) },
|
||||||
{ cpu_drawer_mask, SD_INIT_NAME(DRAWER) },
|
{ cpu_drawer_mask, SD_INIT_NAME(DRAWER) },
|
||||||
{ cpu_cpu_mask, SD_INIT_NAME(DIE) },
|
{ cpu_cpu_mask, SD_INIT_NAME(PKG) },
|
||||||
{ NULL, },
|
{ NULL, },
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -641,13 +641,13 @@ static void __init build_sched_topology(void)
|
||||||
};
|
};
|
||||||
#endif
|
#endif
|
||||||
/*
|
/*
|
||||||
* When there is NUMA topology inside the package skip the DIE domain
|
* When there is NUMA topology inside the package skip the PKG domain
|
||||||
* since the NUMA domains will auto-magically create the right spanning
|
* since the NUMA domains will auto-magically create the right spanning
|
||||||
* domains based on the SLIT.
|
* domains based on the SLIT.
|
||||||
*/
|
*/
|
||||||
if (!x86_has_numa_in_package) {
|
if (!x86_has_numa_in_package) {
|
||||||
x86_topology[i++] = (struct sched_domain_topology_level){
|
x86_topology[i++] = (struct sched_domain_topology_level){
|
||||||
cpu_cpu_mask, x86_die_flags, SD_INIT_NAME(DIE)
|
cpu_cpu_mask, x86_die_flags, SD_INIT_NAME(PKG)
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -9555,7 +9555,7 @@ static bool sched_use_asym_prio(struct sched_domain *sd, int cpu)
|
||||||
* can only do it if @group is an SMT group and has exactly on busy CPU. Larger
|
* can only do it if @group is an SMT group and has exactly on busy CPU. Larger
|
||||||
* imbalances in the number of CPUS are dealt with in find_busiest_group().
|
* imbalances in the number of CPUS are dealt with in find_busiest_group().
|
||||||
*
|
*
|
||||||
* If we are balancing load within an SMT core, or at DIE domain level, always
|
* If we are balancing load within an SMT core, or at PKG domain level, always
|
||||||
* proceed.
|
* proceed.
|
||||||
*
|
*
|
||||||
* Return: true if @env::dst_cpu can do with asym_packing load balance. False
|
* Return: true if @env::dst_cpu can do with asym_packing load balance. False
|
||||||
|
|
|
||||||
|
|
@ -1119,7 +1119,7 @@ build_overlap_sched_groups(struct sched_domain *sd, int cpu)
|
||||||
*
|
*
|
||||||
* - Simultaneous multithreading (SMT)
|
* - Simultaneous multithreading (SMT)
|
||||||
* - Multi-Core Cache (MC)
|
* - Multi-Core Cache (MC)
|
||||||
* - Package (DIE)
|
* - Package (PKG)
|
||||||
*
|
*
|
||||||
* Where the last one more or less denotes everything up to a NUMA node.
|
* Where the last one more or less denotes everything up to a NUMA node.
|
||||||
*
|
*
|
||||||
|
|
@ -1141,13 +1141,13 @@ build_overlap_sched_groups(struct sched_domain *sd, int cpu)
|
||||||
*
|
*
|
||||||
* CPU 0 1 2 3 4 5 6 7
|
* CPU 0 1 2 3 4 5 6 7
|
||||||
*
|
*
|
||||||
* DIE [ ]
|
* PKG [ ]
|
||||||
* MC [ ] [ ]
|
* MC [ ] [ ]
|
||||||
* SMT [ ] [ ] [ ] [ ]
|
* SMT [ ] [ ] [ ] [ ]
|
||||||
*
|
*
|
||||||
* - or -
|
* - or -
|
||||||
*
|
*
|
||||||
* DIE 0-7 0-7 0-7 0-7 0-7 0-7 0-7 0-7
|
* PKG 0-7 0-7 0-7 0-7 0-7 0-7 0-7 0-7
|
||||||
* MC 0-3 0-3 0-3 0-3 4-7 4-7 4-7 4-7
|
* MC 0-3 0-3 0-3 0-3 4-7 4-7 4-7 4-7
|
||||||
* SMT 0-1 0-1 2-3 2-3 4-5 4-5 6-7 6-7
|
* SMT 0-1 0-1 2-3 2-3 4-5 4-5 6-7 6-7
|
||||||
*
|
*
|
||||||
|
|
@ -1681,7 +1681,7 @@ static struct sched_domain_topology_level default_topology[] = {
|
||||||
#ifdef CONFIG_SCHED_MC
|
#ifdef CONFIG_SCHED_MC
|
||||||
{ cpu_coregroup_mask, cpu_core_flags, SD_INIT_NAME(MC) },
|
{ cpu_coregroup_mask, cpu_core_flags, SD_INIT_NAME(MC) },
|
||||||
#endif
|
#endif
|
||||||
{ cpu_cpu_mask, SD_INIT_NAME(DIE) },
|
{ cpu_cpu_mask, SD_INIT_NAME(PKG) },
|
||||||
{ NULL, },
|
{ NULL, },
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
|
||||||
Loading…
Reference in a new issue