mirror of
				https://github.com/torvalds/linux.git
				synced 2025-10-31 08:38:45 +02:00 
			
		
		
		
	percpu: add __percpu sparse annotations to core kernel subsystems
Add __percpu sparse annotations to core subsystems. These annotations are to make sparse consider percpu variables to be in a different address space and warn if accessed without going through percpu accessors. This patch doesn't affect normal builds. Signed-off-by: Tejun Heo <tj@kernel.org> Reviewed-by: Christoph Lameter <cl@linux-foundation.org> Acked-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com> Cc: Jens Axboe <axboe@kernel.dk> Cc: linux-mm@kvack.org Cc: Rusty Russell <rusty@rustcorp.com.au> Cc: Dipankar Sarma <dipankar@in.ibm.com> Cc: Peter Zijlstra <a.p.zijlstra@chello.nl> Cc: Andrew Morton <akpm@linux-foundation.org> Cc: Eric Biederman <ebiederm@xmission.com>
This commit is contained in:
		
							parent
							
								
									ab386128f2
								
							
						
					
					
						commit
						43cf38eb5c
					
				
					 11 changed files with 22 additions and 20 deletions
				
			
		|  | @ -150,8 +150,8 @@ struct blk_user_trace_setup { | |||
| struct blk_trace { | ||||
| 	int trace_state; | ||||
| 	struct rchan *rchan; | ||||
| 	unsigned long *sequence; | ||||
| 	unsigned char *msg_data; | ||||
| 	unsigned long __percpu *sequence; | ||||
| 	unsigned char __percpu *msg_data; | ||||
| 	u16 act_mask; | ||||
| 	u64 start_lba; | ||||
| 	u64 end_lba; | ||||
|  |  | |||
|  | @ -101,7 +101,7 @@ struct hd_struct { | |||
| 	unsigned long stamp; | ||||
| 	int in_flight[2]; | ||||
| #ifdef	CONFIG_SMP | ||||
| 	struct disk_stats *dkstats; | ||||
| 	struct disk_stats __percpu *dkstats; | ||||
| #else | ||||
| 	struct disk_stats dkstats; | ||||
| #endif | ||||
|  |  | |||
|  | @ -199,7 +199,7 @@ extern struct kimage *kexec_crash_image; | |||
|  */ | ||||
| extern struct resource crashk_res; | ||||
| typedef u32 note_buf_t[KEXEC_NOTE_BYTES/4]; | ||||
| extern note_buf_t *crash_notes; | ||||
| extern note_buf_t __percpu *crash_notes; | ||||
| extern u32 vmcoreinfo_note[VMCOREINFO_NOTE_SIZE/4]; | ||||
| extern size_t vmcoreinfo_size; | ||||
| extern size_t vmcoreinfo_max_size; | ||||
|  |  | |||
|  | @ -301,7 +301,7 @@ struct zone { | |||
| 	unsigned long		min_unmapped_pages; | ||||
| 	unsigned long		min_slab_pages; | ||||
| #endif | ||||
| 	struct per_cpu_pageset	*pageset; | ||||
| 	struct per_cpu_pageset __percpu *pageset; | ||||
| 	/*
 | ||||
| 	 * free areas of different sizes | ||||
| 	 */ | ||||
|  |  | |||
|  | @ -365,7 +365,7 @@ struct module | |||
| 
 | ||||
| 	struct module_ref { | ||||
| 		int count; | ||||
| 	} *refptr; | ||||
| 	} __percpu *refptr; | ||||
| #endif | ||||
| 
 | ||||
| #ifdef CONFIG_CONSTRUCTORS | ||||
|  |  | |||
|  | @ -21,7 +21,7 @@ struct percpu_counter { | |||
| #ifdef CONFIG_HOTPLUG_CPU | ||||
| 	struct list_head list;	/* All percpu_counters are on a list */ | ||||
| #endif | ||||
| 	s32 *counters; | ||||
| 	s32 __percpu *counters; | ||||
| }; | ||||
| 
 | ||||
| extern int percpu_counter_batch; | ||||
|  |  | |||
|  | @ -33,7 +33,7 @@ struct srcu_struct_array { | |||
| 
 | ||||
| struct srcu_struct { | ||||
| 	int completed; | ||||
| 	struct srcu_struct_array *per_cpu_ref; | ||||
| 	struct srcu_struct_array __percpu *per_cpu_ref; | ||||
| 	struct mutex mutex; | ||||
| }; | ||||
| 
 | ||||
|  |  | |||
|  | @ -41,7 +41,7 @@ | |||
| #include <asm/sections.h> | ||||
| 
 | ||||
| /* Per cpu memory for storing cpu states in case of system crash. */ | ||||
| note_buf_t* crash_notes; | ||||
| note_buf_t __percpu *crash_notes; | ||||
| 
 | ||||
| /* vmcoreinfo stuff */ | ||||
| static unsigned char vmcoreinfo_data[VMCOREINFO_BYTES]; | ||||
|  |  | |||
|  | @ -1566,7 +1566,7 @@ static unsigned long cpu_avg_load_per_task(int cpu) | |||
| 
 | ||||
| #ifdef CONFIG_FAIR_GROUP_SCHED | ||||
| 
 | ||||
| static __read_mostly unsigned long *update_shares_data; | ||||
| static __read_mostly unsigned long __percpu *update_shares_data; | ||||
| 
 | ||||
| static void __set_se_shares(struct sched_entity *se, unsigned long shares); | ||||
| 
 | ||||
|  | @ -10683,7 +10683,7 @@ struct cgroup_subsys cpu_cgroup_subsys = { | |||
| struct cpuacct { | ||||
| 	struct cgroup_subsys_state css; | ||||
| 	/* cpuusage holds pointer to a u64-type object on every cpu */ | ||||
| 	u64 *cpuusage; | ||||
| 	u64 __percpu *cpuusage; | ||||
| 	struct percpu_counter cpustat[CPUACCT_STAT_NSTATS]; | ||||
| 	struct cpuacct *parent; | ||||
| }; | ||||
|  |  | |||
|  | @ -45,7 +45,7 @@ static int refcount; | |||
| static struct workqueue_struct *stop_machine_wq; | ||||
| static struct stop_machine_data active, idle; | ||||
| static const struct cpumask *active_cpus; | ||||
| static void *stop_machine_work; | ||||
| static void __percpu *stop_machine_work; | ||||
| 
 | ||||
| static void set_state(enum stopmachine_state newstate) | ||||
| { | ||||
|  |  | |||
							
								
								
									
										18
									
								
								mm/percpu.c
									
									
									
									
									
								
							
							
						
						
									
										18
									
								
								mm/percpu.c
									
									
									
									
									
								
							|  | @ -80,13 +80,15 @@ | |||
| /* default addr <-> pcpu_ptr mapping, override in asm/percpu.h if necessary */ | ||||
| #ifndef __addr_to_pcpu_ptr | ||||
| #define __addr_to_pcpu_ptr(addr)					\ | ||||
| 	(void *)((unsigned long)(addr) - (unsigned long)pcpu_base_addr	\ | ||||
| 		 + (unsigned long)__per_cpu_start) | ||||
| 	(void __percpu *)((unsigned long)(addr) -			\ | ||||
| 			  (unsigned long)pcpu_base_addr	+		\ | ||||
| 			  (unsigned long)__per_cpu_start) | ||||
| #endif | ||||
| #ifndef __pcpu_ptr_to_addr | ||||
| #define __pcpu_ptr_to_addr(ptr)						\ | ||||
| 	(void *)((unsigned long)(ptr) + (unsigned long)pcpu_base_addr	\ | ||||
| 		 - (unsigned long)__per_cpu_start) | ||||
| 	(void __force *)((unsigned long)(ptr) +				\ | ||||
| 			 (unsigned long)pcpu_base_addr -		\ | ||||
| 			 (unsigned long)__per_cpu_start) | ||||
| #endif | ||||
| 
 | ||||
| struct pcpu_chunk { | ||||
|  | @ -1065,7 +1067,7 @@ static struct pcpu_chunk *alloc_pcpu_chunk(void) | |||
|  * RETURNS: | ||||
|  * Percpu pointer to the allocated area on success, NULL on failure. | ||||
|  */ | ||||
| static void *pcpu_alloc(size_t size, size_t align, bool reserved) | ||||
| static void __percpu *pcpu_alloc(size_t size, size_t align, bool reserved) | ||||
| { | ||||
| 	static int warn_limit = 10; | ||||
| 	struct pcpu_chunk *chunk; | ||||
|  | @ -1194,7 +1196,7 @@ static void *pcpu_alloc(size_t size, size_t align, bool reserved) | |||
|  * RETURNS: | ||||
|  * Percpu pointer to the allocated area on success, NULL on failure. | ||||
|  */ | ||||
| void *__alloc_percpu(size_t size, size_t align) | ||||
| void __percpu *__alloc_percpu(size_t size, size_t align) | ||||
| { | ||||
| 	return pcpu_alloc(size, align, false); | ||||
| } | ||||
|  | @ -1215,7 +1217,7 @@ EXPORT_SYMBOL_GPL(__alloc_percpu); | |||
|  * RETURNS: | ||||
|  * Percpu pointer to the allocated area on success, NULL on failure. | ||||
|  */ | ||||
| void *__alloc_reserved_percpu(size_t size, size_t align) | ||||
| void __percpu *__alloc_reserved_percpu(size_t size, size_t align) | ||||
| { | ||||
| 	return pcpu_alloc(size, align, true); | ||||
| } | ||||
|  | @ -1267,7 +1269,7 @@ static void pcpu_reclaim(struct work_struct *work) | |||
|  * CONTEXT: | ||||
|  * Can be called from atomic context. | ||||
|  */ | ||||
| void free_percpu(void *ptr) | ||||
| void free_percpu(void __percpu *ptr) | ||||
| { | ||||
| 	void *addr; | ||||
| 	struct pcpu_chunk *chunk; | ||||
|  |  | |||
		Loading…
	
		Reference in a new issue
	
	 Tejun Heo
						Tejun Heo