mirror of
				https://github.com/torvalds/linux.git
				synced 2025-11-01 00:58:39 +02:00 
			
		
		
		
	rcu: Refactor kvfree_call_rcu() and high-level helpers
Currently a kvfree_call_rcu() takes an offset within a structure as a second parameter, so a helper such as a kvfree_rcu_arg_2() has to convert rcu_head and a freed ptr to an offset in order to pass it. That leads to an extra conversion on macro entry. Instead of converting, refactor the code in way that a pointer that has to be freed is passed directly to the kvfree_call_rcu(). This patch does not make any functional change and is transparent to all kvfree_rcu() users. Signed-off-by: Uladzislau Rezki (Sony) <urezki@gmail.com> Signed-off-by: Paul E. McKenney <paulmck@kernel.org>
This commit is contained in:
		
							parent
							
								
									1b929c02af
								
							
						
					
					
						commit
						04a522b7da
					
				
					 5 changed files with 24 additions and 33 deletions
				
			
		|  | @ -1011,8 +1011,7 @@ do {									\ | ||||||
| 									\ | 									\ | ||||||
| 	if (___p) {									\ | 	if (___p) {									\ | ||||||
| 		BUILD_BUG_ON(!__is_kvfree_rcu_offset(offsetof(typeof(*(ptr)), rhf)));	\ | 		BUILD_BUG_ON(!__is_kvfree_rcu_offset(offsetof(typeof(*(ptr)), rhf)));	\ | ||||||
| 		kvfree_call_rcu(&((___p)->rhf), (rcu_callback_t)(unsigned long)		\ | 		kvfree_call_rcu(&((___p)->rhf), (void *) (___p));			\ | ||||||
| 			(offsetof(typeof(*(ptr)), rhf)));				\ |  | ||||||
| 	}										\ | 	}										\ | ||||||
| } while (0) | } while (0) | ||||||
| 
 | 
 | ||||||
|  | @ -1021,7 +1020,7 @@ do {								\ | ||||||
| 	typeof(ptr) ___p = (ptr);				\ | 	typeof(ptr) ___p = (ptr);				\ | ||||||
| 								\ | 								\ | ||||||
| 	if (___p)						\ | 	if (___p)						\ | ||||||
| 		kvfree_call_rcu(NULL, (rcu_callback_t) (___p));	\ | 		kvfree_call_rcu(NULL, (void *) (___p));		\ | ||||||
| } while (0) | } while (0) | ||||||
| 
 | 
 | ||||||
| /*
 | /*
 | ||||||
|  |  | ||||||
|  | @ -98,25 +98,25 @@ static inline void synchronize_rcu_expedited(void) | ||||||
|  */ |  */ | ||||||
| extern void kvfree(const void *addr); | extern void kvfree(const void *addr); | ||||||
| 
 | 
 | ||||||
| static inline void __kvfree_call_rcu(struct rcu_head *head, rcu_callback_t func) | static inline void __kvfree_call_rcu(struct rcu_head *head, void *ptr) | ||||||
| { | { | ||||||
| 	if (head) { | 	if (head) { | ||||||
| 		call_rcu(head, func); | 		call_rcu(head, (rcu_callback_t) ((void *) head - ptr)); | ||||||
| 		return; | 		return; | ||||||
| 	} | 	} | ||||||
| 
 | 
 | ||||||
| 	// kvfree_rcu(one_arg) call.
 | 	// kvfree_rcu(one_arg) call.
 | ||||||
| 	might_sleep(); | 	might_sleep(); | ||||||
| 	synchronize_rcu(); | 	synchronize_rcu(); | ||||||
| 	kvfree((void *) func); | 	kvfree(ptr); | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| #ifdef CONFIG_KASAN_GENERIC | #ifdef CONFIG_KASAN_GENERIC | ||||||
| void kvfree_call_rcu(struct rcu_head *head, rcu_callback_t func); | void kvfree_call_rcu(struct rcu_head *head, void *ptr); | ||||||
| #else | #else | ||||||
| static inline void kvfree_call_rcu(struct rcu_head *head, rcu_callback_t func) | static inline void kvfree_call_rcu(struct rcu_head *head, void *ptr) | ||||||
| { | { | ||||||
| 	__kvfree_call_rcu(head, func); | 	__kvfree_call_rcu(head, ptr); | ||||||
| } | } | ||||||
| #endif | #endif | ||||||
| 
 | 
 | ||||||
|  |  | ||||||
|  | @ -33,7 +33,7 @@ static inline void rcu_virt_note_context_switch(void) | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| void synchronize_rcu_expedited(void); | void synchronize_rcu_expedited(void); | ||||||
| void kvfree_call_rcu(struct rcu_head *head, rcu_callback_t func); | void kvfree_call_rcu(struct rcu_head *head, void *ptr); | ||||||
| 
 | 
 | ||||||
| void rcu_barrier(void); | void rcu_barrier(void); | ||||||
| bool rcu_eqs_special_set(int cpu); | bool rcu_eqs_special_set(int cpu); | ||||||
|  |  | ||||||
|  | @ -246,15 +246,12 @@ bool poll_state_synchronize_rcu(unsigned long oldstate) | ||||||
| EXPORT_SYMBOL_GPL(poll_state_synchronize_rcu); | EXPORT_SYMBOL_GPL(poll_state_synchronize_rcu); | ||||||
| 
 | 
 | ||||||
| #ifdef CONFIG_KASAN_GENERIC | #ifdef CONFIG_KASAN_GENERIC | ||||||
| void kvfree_call_rcu(struct rcu_head *head, rcu_callback_t func) | void kvfree_call_rcu(struct rcu_head *head, void *ptr) | ||||||
| { | { | ||||||
| 	if (head) { | 	if (head) | ||||||
| 		void *ptr = (void *) head - (unsigned long) func; |  | ||||||
| 
 |  | ||||||
| 		kasan_record_aux_stack_noalloc(ptr); | 		kasan_record_aux_stack_noalloc(ptr); | ||||||
| 	} |  | ||||||
| 
 | 
 | ||||||
| 	__kvfree_call_rcu(head, func); | 	__kvfree_call_rcu(head, ptr); | ||||||
| } | } | ||||||
| EXPORT_SYMBOL_GPL(kvfree_call_rcu); | EXPORT_SYMBOL_GPL(kvfree_call_rcu); | ||||||
| #endif | #endif | ||||||
|  |  | ||||||
|  | @ -3103,8 +3103,8 @@ static void kfree_rcu_work(struct work_struct *work) | ||||||
| 	 * This list is named "Channel 3". | 	 * This list is named "Channel 3". | ||||||
| 	 */ | 	 */ | ||||||
| 	for (; head; head = next) { | 	for (; head; head = next) { | ||||||
| 		unsigned long offset = (unsigned long)head->func; | 		void *ptr = (void *) head->func; | ||||||
| 		void *ptr = (void *)head - offset; | 		unsigned long offset = (void *) head - ptr; | ||||||
| 
 | 
 | ||||||
| 		next = head->next; | 		next = head->next; | ||||||
| 		debug_rcu_head_unqueue((struct rcu_head *)ptr); | 		debug_rcu_head_unqueue((struct rcu_head *)ptr); | ||||||
|  | @ -3342,26 +3342,21 @@ add_ptr_to_bulk_krc_lock(struct kfree_rcu_cpu **krcp, | ||||||
|  * be free'd in workqueue context. This allows us to: batch requests together to |  * be free'd in workqueue context. This allows us to: batch requests together to | ||||||
|  * reduce the number of grace periods during heavy kfree_rcu()/kvfree_rcu() load. |  * reduce the number of grace periods during heavy kfree_rcu()/kvfree_rcu() load. | ||||||
|  */ |  */ | ||||||
| void kvfree_call_rcu(struct rcu_head *head, rcu_callback_t func) | void kvfree_call_rcu(struct rcu_head *head, void *ptr) | ||||||
| { | { | ||||||
| 	unsigned long flags; | 	unsigned long flags; | ||||||
| 	struct kfree_rcu_cpu *krcp; | 	struct kfree_rcu_cpu *krcp; | ||||||
| 	bool success; | 	bool success; | ||||||
| 	void *ptr; |  | ||||||
| 
 | 
 | ||||||
| 	if (head) { | 	/*
 | ||||||
| 		ptr = (void *) head - (unsigned long) func; | 	 * Please note there is a limitation for the head-less | ||||||
| 	} else { | 	 * variant, that is why there is a clear rule for such | ||||||
| 		/*
 | 	 * objects: it can be used from might_sleep() context | ||||||
| 		 * Please note there is a limitation for the head-less | 	 * only. For other places please embed an rcu_head to | ||||||
| 		 * variant, that is why there is a clear rule for such | 	 * your data. | ||||||
| 		 * objects: it can be used from might_sleep() context | 	 */ | ||||||
| 		 * only. For other places please embed an rcu_head to | 	if (!head) | ||||||
| 		 * your data. |  | ||||||
| 		 */ |  | ||||||
| 		might_sleep(); | 		might_sleep(); | ||||||
| 		ptr = (unsigned long *) func; |  | ||||||
| 	} |  | ||||||
| 
 | 
 | ||||||
| 	// Queue the object but don't yet schedule the batch.
 | 	// Queue the object but don't yet schedule the batch.
 | ||||||
| 	if (debug_rcu_head_queue(ptr)) { | 	if (debug_rcu_head_queue(ptr)) { | ||||||
|  | @ -3382,7 +3377,7 @@ void kvfree_call_rcu(struct rcu_head *head, rcu_callback_t func) | ||||||
| 			// Inline if kvfree_rcu(one_arg) call.
 | 			// Inline if kvfree_rcu(one_arg) call.
 | ||||||
| 			goto unlock_return; | 			goto unlock_return; | ||||||
| 
 | 
 | ||||||
| 		head->func = func; | 		head->func = ptr; | ||||||
| 		head->next = krcp->head; | 		head->next = krcp->head; | ||||||
| 		krcp->head = head; | 		krcp->head = head; | ||||||
| 		success = true; | 		success = true; | ||||||
|  |  | ||||||
		Loading…
	
		Reference in a new issue
	
	 Uladzislau Rezki (Sony)
						Uladzislau Rezki (Sony)