mirror of
				https://github.com/torvalds/linux.git
				synced 2025-11-04 10:40:15 +02:00 
			
		
		
		
	cacheinfo: Fix shared_cpu_map to handle shared caches at different levels
The cacheinfo sets up the shared_cpu_map by checking whether the caches with the same index are shared between CPUs. However, this will trigger slab-out-of-bounds access if the CPUs do not have the same cache hierarchy. Another problem is the mismatched shared_cpu_map when the shared cache does not have the same index between CPUs. CPU0 I D L3 index 0 1 2 x ^ ^ ^ ^ index 0 1 2 3 CPU1 I D L2 L3 This patch checks each cache is shared with all caches on other CPUs. Reviewed-by: Pierre Gondois <pierre.gondois@arm.com> Signed-off-by: Yong-Xuan Wang <yongxuan.wang@sifive.com> Link: https://lore.kernel.org/r/20230117105133.4445-2-yongxuan.wang@sifive.com Signed-off-by: Sudeep Holla <sudeep.holla@arm.com>
This commit is contained in:
		
							parent
							
								
									5944ce092b
								
							
						
					
					
						commit
						198102c910
					
				
					 1 changed files with 17 additions and 10 deletions
				
			
		| 
						 | 
					@ -319,7 +319,7 @@ static int cache_shared_cpu_map_setup(unsigned int cpu)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
	struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu);
 | 
						struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu);
 | 
				
			||||||
	struct cacheinfo *this_leaf, *sib_leaf;
 | 
						struct cacheinfo *this_leaf, *sib_leaf;
 | 
				
			||||||
	unsigned int index;
 | 
						unsigned int index, sib_index;
 | 
				
			||||||
	int ret = 0;
 | 
						int ret = 0;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	if (this_cpu_ci->cpu_map_populated)
 | 
						if (this_cpu_ci->cpu_map_populated)
 | 
				
			||||||
| 
						 | 
					@ -347,11 +347,13 @@ static int cache_shared_cpu_map_setup(unsigned int cpu)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
			if (i == cpu || !sib_cpu_ci->info_list)
 | 
								if (i == cpu || !sib_cpu_ci->info_list)
 | 
				
			||||||
				continue;/* skip if itself or no cacheinfo */
 | 
									continue;/* skip if itself or no cacheinfo */
 | 
				
			||||||
 | 
								for (sib_index = 0; sib_index < cache_leaves(i); sib_index++) {
 | 
				
			||||||
			sib_leaf = per_cpu_cacheinfo_idx(i, index);
 | 
									sib_leaf = per_cpu_cacheinfo_idx(i, sib_index);
 | 
				
			||||||
				if (cache_leaves_are_shared(this_leaf, sib_leaf)) {
 | 
									if (cache_leaves_are_shared(this_leaf, sib_leaf)) {
 | 
				
			||||||
					cpumask_set_cpu(cpu, &sib_leaf->shared_cpu_map);
 | 
										cpumask_set_cpu(cpu, &sib_leaf->shared_cpu_map);
 | 
				
			||||||
					cpumask_set_cpu(i, &this_leaf->shared_cpu_map);
 | 
										cpumask_set_cpu(i, &this_leaf->shared_cpu_map);
 | 
				
			||||||
 | 
										break;
 | 
				
			||||||
 | 
									}
 | 
				
			||||||
			}
 | 
								}
 | 
				
			||||||
		}
 | 
							}
 | 
				
			||||||
		/* record the maximum cache line size */
 | 
							/* record the maximum cache line size */
 | 
				
			||||||
| 
						 | 
					@ -365,7 +367,7 @@ static int cache_shared_cpu_map_setup(unsigned int cpu)
 | 
				
			||||||
static void cache_shared_cpu_map_remove(unsigned int cpu)
 | 
					static void cache_shared_cpu_map_remove(unsigned int cpu)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
	struct cacheinfo *this_leaf, *sib_leaf;
 | 
						struct cacheinfo *this_leaf, *sib_leaf;
 | 
				
			||||||
	unsigned int sibling, index;
 | 
						unsigned int sibling, index, sib_index;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	for (index = 0; index < cache_leaves(cpu); index++) {
 | 
						for (index = 0; index < cache_leaves(cpu); index++) {
 | 
				
			||||||
		this_leaf = per_cpu_cacheinfo_idx(cpu, index);
 | 
							this_leaf = per_cpu_cacheinfo_idx(cpu, index);
 | 
				
			||||||
| 
						 | 
					@ -376,9 +378,14 @@ static void cache_shared_cpu_map_remove(unsigned int cpu)
 | 
				
			||||||
			if (sibling == cpu || !sib_cpu_ci->info_list)
 | 
								if (sibling == cpu || !sib_cpu_ci->info_list)
 | 
				
			||||||
				continue;/* skip if itself or no cacheinfo */
 | 
									continue;/* skip if itself or no cacheinfo */
 | 
				
			||||||
 | 
					
 | 
				
			||||||
			sib_leaf = per_cpu_cacheinfo_idx(sibling, index);
 | 
								for (sib_index = 0; sib_index < cache_leaves(sibling); sib_index++) {
 | 
				
			||||||
 | 
									sib_leaf = per_cpu_cacheinfo_idx(sibling, sib_index);
 | 
				
			||||||
 | 
									if (cache_leaves_are_shared(this_leaf, sib_leaf)) {
 | 
				
			||||||
					cpumask_clear_cpu(cpu, &sib_leaf->shared_cpu_map);
 | 
										cpumask_clear_cpu(cpu, &sib_leaf->shared_cpu_map);
 | 
				
			||||||
					cpumask_clear_cpu(sibling, &this_leaf->shared_cpu_map);
 | 
										cpumask_clear_cpu(sibling, &this_leaf->shared_cpu_map);
 | 
				
			||||||
 | 
										break;
 | 
				
			||||||
 | 
									}
 | 
				
			||||||
 | 
								}
 | 
				
			||||||
		}
 | 
							}
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
		Loading…
	
		Reference in a new issue