mirror of
				https://github.com/torvalds/linux.git
				synced 2025-11-04 10:40:15 +02:00 
			
		
		
		
	net: avoid one atomic operation in skb_clone()
Fast clone cloning can actually avoid an atomic_inc(), if we guarantee prior clone_ref value is 1. This requires a change kfree_skbmem(), to perform the atomic_dec_and_test() on clone_ref before setting fclone to SKB_FCLONE_UNAVAILABLE. Signed-off-by: Eric Dumazet <edumazet@google.com> Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
		
							parent
							
								
									e500f488c2
								
							
						
					
					
						commit
						ce1a4ea3f1
					
				
					 1 changed files with 17 additions and 6 deletions
				
			
		| 
						 | 
					@ -541,13 +541,20 @@ static void kfree_skbmem(struct sk_buff *skb)
 | 
				
			||||||
	case SKB_FCLONE_CLONE:
 | 
						case SKB_FCLONE_CLONE:
 | 
				
			||||||
		fclones = container_of(skb, struct sk_buff_fclones, skb2);
 | 
							fclones = container_of(skb, struct sk_buff_fclones, skb2);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
							/* Warning : We must perform the atomic_dec_and_test() before
 | 
				
			||||||
 | 
							 * setting skb->fclone back to SKB_FCLONE_UNAVAILABLE, otherwise
 | 
				
			||||||
 | 
							 * skb_clone() could set clone_ref to 2 before our decrement.
 | 
				
			||||||
 | 
							 * Anyway, if we are going to free the structure, no need to
 | 
				
			||||||
 | 
							 * rewrite skb->fclone.
 | 
				
			||||||
 | 
							 */
 | 
				
			||||||
 | 
							if (atomic_dec_and_test(&fclones->fclone_ref)) {
 | 
				
			||||||
 | 
								kmem_cache_free(skbuff_fclone_cache, fclones);
 | 
				
			||||||
 | 
							} else {
 | 
				
			||||||
			/* The clone portion is available for
 | 
								/* The clone portion is available for
 | 
				
			||||||
			 * fast-cloning again.
 | 
								 * fast-cloning again.
 | 
				
			||||||
			 */
 | 
								 */
 | 
				
			||||||
			skb->fclone = SKB_FCLONE_UNAVAILABLE;
 | 
								skb->fclone = SKB_FCLONE_UNAVAILABLE;
 | 
				
			||||||
 | 
							}
 | 
				
			||||||
		if (atomic_dec_and_test(&fclones->fclone_ref))
 | 
					 | 
				
			||||||
			kmem_cache_free(skbuff_fclone_cache, fclones);
 | 
					 | 
				
			||||||
		break;
 | 
							break;
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
| 
						 | 
					@ -869,7 +876,11 @@ struct sk_buff *skb_clone(struct sk_buff *skb, gfp_t gfp_mask)
 | 
				
			||||||
	if (skb->fclone == SKB_FCLONE_ORIG &&
 | 
						if (skb->fclone == SKB_FCLONE_ORIG &&
 | 
				
			||||||
	    n->fclone == SKB_FCLONE_UNAVAILABLE) {
 | 
						    n->fclone == SKB_FCLONE_UNAVAILABLE) {
 | 
				
			||||||
		n->fclone = SKB_FCLONE_CLONE;
 | 
							n->fclone = SKB_FCLONE_CLONE;
 | 
				
			||||||
		atomic_inc(&fclones->fclone_ref);
 | 
							/* As our fastclone was free, clone_ref must be 1 at this point.
 | 
				
			||||||
 | 
							 * We could use atomic_inc() here, but it is faster
 | 
				
			||||||
 | 
							 * to set the final value.
 | 
				
			||||||
 | 
							 */
 | 
				
			||||||
 | 
							atomic_set(&fclones->fclone_ref, 2);
 | 
				
			||||||
	} else {
 | 
						} else {
 | 
				
			||||||
		if (skb_pfmemalloc(skb))
 | 
							if (skb_pfmemalloc(skb))
 | 
				
			||||||
			gfp_mask |= __GFP_MEMALLOC;
 | 
								gfp_mask |= __GFP_MEMALLOC;
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
		Loading…
	
		Reference in a new issue