mirror of
https://github.com/torvalds/linux.git
synced 2026-01-25 15:03:52 +08:00
smpboot: introduce SDTL_INIT() helper to tidy sched topology setup
Define a small SDTL_INIT(maskfn, flagsfn, name) macro and use it to build the sched_domain_topology_level array. Purely a cleanup; behaviour is unchanged. Suggested-by: Thomas Gleixner <tglx@linutronix.de> Signed-off-by: Li Chen <chenl311@chinatelecom.cn> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Reviewed-by: K Prateek Nayak <kprateek.nayak@amd.com> Tested-by: K Prateek Nayak <kprateek.nayak@amd.com> Link: https://lore.kernel.org/r/20250710105715.66594-2-me@linux.beauty
This commit is contained in:
@@ -1700,28 +1700,23 @@ static void __init build_sched_topology(void)
|
||||
#ifdef CONFIG_SCHED_SMT
|
||||
if (has_big_cores) {
|
||||
pr_info("Big cores detected but using small core scheduling\n");
|
||||
powerpc_topology[i++] = (struct sched_domain_topology_level){
|
||||
smallcore_smt_mask, powerpc_smt_flags, SD_INIT_NAME(SMT)
|
||||
};
|
||||
powerpc_topology[i++] =
|
||||
SDTL_INIT(smallcore_smt_mask, powerpc_smt_flags, SMT);
|
||||
} else {
|
||||
powerpc_topology[i++] = (struct sched_domain_topology_level){
|
||||
cpu_smt_mask, powerpc_smt_flags, SD_INIT_NAME(SMT)
|
||||
};
|
||||
powerpc_topology[i++] = SDTL_INIT(cpu_smt_mask, powerpc_smt_flags, SMT);
|
||||
}
|
||||
#endif
|
||||
if (shared_caches) {
|
||||
powerpc_topology[i++] = (struct sched_domain_topology_level){
|
||||
shared_cache_mask, powerpc_shared_cache_flags, SD_INIT_NAME(CACHE)
|
||||
};
|
||||
powerpc_topology[i++] =
|
||||
SDTL_INIT(shared_cache_mask, powerpc_shared_cache_flags, CACHE);
|
||||
}
|
||||
|
||||
if (has_coregroup_support()) {
|
||||
powerpc_topology[i++] = (struct sched_domain_topology_level){
|
||||
cpu_mc_mask, powerpc_shared_proc_flags, SD_INIT_NAME(MC)
|
||||
};
|
||||
powerpc_topology[i++] =
|
||||
SDTL_INIT(cpu_mc_mask, powerpc_shared_proc_flags, MC);
|
||||
}
|
||||
powerpc_topology[i++] = (struct sched_domain_topology_level){
|
||||
cpu_cpu_mask, powerpc_shared_proc_flags, SD_INIT_NAME(PKG)
|
||||
};
|
||||
|
||||
powerpc_topology[i++] = SDTL_INIT(cpu_cpu_mask, powerpc_shared_proc_flags, PKG);
|
||||
|
||||
/* There must be one trailing NULL entry left. */
|
||||
BUG_ON(i >= ARRAY_SIZE(powerpc_topology) - 1);
|
||||
|
||||
@@ -531,11 +531,11 @@ static const struct cpumask *cpu_drawer_mask(int cpu)
|
||||
}
|
||||
|
||||
static struct sched_domain_topology_level s390_topology[] = {
|
||||
{ cpu_thread_mask, cpu_smt_flags, SD_INIT_NAME(SMT) },
|
||||
{ cpu_coregroup_mask, cpu_core_flags, SD_INIT_NAME(MC) },
|
||||
{ cpu_book_mask, SD_INIT_NAME(BOOK) },
|
||||
{ cpu_drawer_mask, SD_INIT_NAME(DRAWER) },
|
||||
{ cpu_cpu_mask, SD_INIT_NAME(PKG) },
|
||||
SDTL_INIT(cpu_thread_mask, cpu_smt_flags, SMT),
|
||||
SDTL_INIT(cpu_coregroup_mask, cpu_core_flags, MC),
|
||||
SDTL_INIT(cpu_book_mask, NULL, BOOK),
|
||||
SDTL_INIT(cpu_drawer_mask, NULL, DRAWER),
|
||||
SDTL_INIT(cpu_cpu_mask, NULL, PKG),
|
||||
{ NULL, },
|
||||
};
|
||||
|
||||
|
||||
@@ -485,35 +485,26 @@ static void __init build_sched_topology(void)
|
||||
int i = 0;
|
||||
|
||||
#ifdef CONFIG_SCHED_SMT
|
||||
x86_topology[i++] = (struct sched_domain_topology_level){
|
||||
cpu_smt_mask, cpu_smt_flags, SD_INIT_NAME(SMT)
|
||||
};
|
||||
x86_topology[i++] = SDTL_INIT(cpu_smt_mask, cpu_smt_flags, SMT);
|
||||
#endif
|
||||
#ifdef CONFIG_SCHED_CLUSTER
|
||||
x86_topology[i++] = (struct sched_domain_topology_level){
|
||||
cpu_clustergroup_mask, x86_cluster_flags, SD_INIT_NAME(CLS)
|
||||
};
|
||||
x86_topology[i++] = SDTL_INIT(cpu_clustergroup_mask, x86_cluster_flags, CLS);
|
||||
#endif
|
||||
#ifdef CONFIG_SCHED_MC
|
||||
x86_topology[i++] = (struct sched_domain_topology_level){
|
||||
cpu_coregroup_mask, x86_core_flags, SD_INIT_NAME(MC)
|
||||
};
|
||||
x86_topology[i++] = SDTL_INIT(cpu_coregroup_mask, x86_core_flags, MC);
|
||||
#endif
|
||||
/*
|
||||
* When there is NUMA topology inside the package skip the PKG domain
|
||||
* since the NUMA domains will auto-magically create the right spanning
|
||||
* domains based on the SLIT.
|
||||
*/
|
||||
if (!x86_has_numa_in_package) {
|
||||
x86_topology[i++] = (struct sched_domain_topology_level){
|
||||
cpu_cpu_mask, x86_sched_itmt_flags, SD_INIT_NAME(PKG)
|
||||
};
|
||||
}
|
||||
if (!x86_has_numa_in_package)
|
||||
x86_topology[i++] = SDTL_INIT(cpu_cpu_mask, x86_sched_itmt_flags, PKG);
|
||||
|
||||
/*
|
||||
* There must be one trailing NULL entry left.
|
||||
*/
|
||||
BUG_ON(i >= ARRAY_SIZE(x86_topology)-1);
|
||||
BUG_ON(i >= ARRAY_SIZE(x86_topology) - 1);
|
||||
|
||||
set_sched_topology(x86_topology);
|
||||
}
|
||||
|
||||
@@ -196,8 +196,8 @@ struct sched_domain_topology_level {
|
||||
extern void __init set_sched_topology(struct sched_domain_topology_level *tl);
|
||||
extern void sched_update_asym_prefer_cpu(int cpu, int old_prio, int new_prio);
|
||||
|
||||
|
||||
# define SD_INIT_NAME(type) .name = #type
|
||||
#define SDTL_INIT(maskfn, flagsfn, dname) ((struct sched_domain_topology_level) \
|
||||
{ .mask = maskfn, .sd_flags = flagsfn, .name = #dname })
|
||||
|
||||
#if defined(CONFIG_ENERGY_MODEL) && defined(CONFIG_CPU_FREQ_GOV_SCHEDUTIL)
|
||||
extern void rebuild_sched_domains_energy(void);
|
||||
|
||||
@@ -1737,17 +1737,17 @@ sd_init(struct sched_domain_topology_level *tl,
|
||||
*/
|
||||
static struct sched_domain_topology_level default_topology[] = {
|
||||
#ifdef CONFIG_SCHED_SMT
|
||||
{ cpu_smt_mask, cpu_smt_flags, SD_INIT_NAME(SMT) },
|
||||
SDTL_INIT(cpu_smt_mask, cpu_smt_flags, SMT),
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_SCHED_CLUSTER
|
||||
{ cpu_clustergroup_mask, cpu_cluster_flags, SD_INIT_NAME(CLS) },
|
||||
SDTL_INIT(cpu_clustergroup_mask, cpu_cluster_flags, CLS),
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_SCHED_MC
|
||||
{ cpu_coregroup_mask, cpu_core_flags, SD_INIT_NAME(MC) },
|
||||
SDTL_INIT(cpu_coregroup_mask, cpu_core_flags, MC),
|
||||
#endif
|
||||
{ cpu_cpu_mask, SD_INIT_NAME(PKG) },
|
||||
SDTL_INIT(cpu_cpu_mask, NULL, PKG),
|
||||
{ NULL, },
|
||||
};
|
||||
|
||||
@@ -2008,23 +2008,15 @@ void sched_init_numa(int offline_node)
|
||||
/*
|
||||
* Add the NUMA identity distance, aka single NODE.
|
||||
*/
|
||||
tl[i++] = (struct sched_domain_topology_level){
|
||||
.mask = sd_numa_mask,
|
||||
.numa_level = 0,
|
||||
SD_INIT_NAME(NODE)
|
||||
};
|
||||
tl[i++] = SDTL_INIT(sd_numa_mask, NULL, NODE);
|
||||
|
||||
/*
|
||||
* .. and append 'j' levels of NUMA goodness.
|
||||
*/
|
||||
for (j = 1; j < nr_levels; i++, j++) {
|
||||
tl[i] = (struct sched_domain_topology_level){
|
||||
.mask = sd_numa_mask,
|
||||
.sd_flags = cpu_numa_flags,
|
||||
.flags = SDTL_OVERLAP,
|
||||
.numa_level = j,
|
||||
SD_INIT_NAME(NUMA)
|
||||
};
|
||||
tl[i] = SDTL_INIT(sd_numa_mask, cpu_numa_flags, NUMA);
|
||||
tl[i].numa_level = j;
|
||||
tl[i].flags = SDTL_OVERLAP;
|
||||
}
|
||||
|
||||
sched_domain_topology_saved = sched_domain_topology;
|
||||
|
||||
Reference in New Issue
Block a user