mirror of
				https://github.com/torvalds/linux.git
				synced 2025-11-04 02:30:34 +02:00 
			
		
		
		
	ipv4: Use synchronize_rcu() during trie_rebalance()
During trie_rebalance() we free memory after resizing with call_rcu(), but large updates, especially with PREEMPT_NONE configs, can cause memory stresses, so this patch calls synchronize_rcu() in tnode_free_flush() after each sync_pages to guarantee such freeing (especially before resizing the root node). The value of sync_pages = 128 is based on Pawel Staszewski's tests as the lowest which doesn't hinder updating times. (For testing purposes there was a sysfs module parameter to change it on demand, but it's removed until we're sure it could be really useful.) The patch is based on suggestions by: Paul E. McKenney <paulmck@linux.vnet.ibm.com> Reported-by: Pawel Staszewski <pstaszewski@itcare.pl> Tested-by: Pawel Staszewski <pstaszewski@itcare.pl> Signed-off-by: Jarek Poplawski <jarkao2@gmail.com> Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
		
							parent
							
								
									2e477c9bd2
								
							
						
					
					
						commit
						c3059477fc
					
				
					 1 changed files with 15 additions and 0 deletions
				
			
		| 
						 | 
				
			
			@ -164,6 +164,14 @@ static struct tnode *inflate(struct trie *t, struct tnode *tn);
 | 
			
		|||
static struct tnode *halve(struct trie *t, struct tnode *tn);
 | 
			
		||||
/* tnodes to free after resize(); protected by RTNL */
 | 
			
		||||
static struct tnode *tnode_free_head;
 | 
			
		||||
static size_t tnode_free_size;
 | 
			
		||||
 | 
			
		||||
/*
 | 
			
		||||
 * synchronize_rcu after call_rcu for that many pages; it should be especially
 | 
			
		||||
 * useful before resizing the root node with PREEMPT_NONE configs; the value was
 | 
			
		||||
 * obtained experimentally, aiming to avoid visible slowdown.
 | 
			
		||||
 */
 | 
			
		||||
static const int sync_pages = 128;
 | 
			
		||||
 | 
			
		||||
static struct kmem_cache *fn_alias_kmem __read_mostly;
 | 
			
		||||
static struct kmem_cache *trie_leaf_kmem __read_mostly;
 | 
			
		||||
| 
						 | 
				
			
			@ -393,6 +401,8 @@ static void tnode_free_safe(struct tnode *tn)
 | 
			
		|||
	BUG_ON(IS_LEAF(tn));
 | 
			
		||||
	tn->tnode_free = tnode_free_head;
 | 
			
		||||
	tnode_free_head = tn;
 | 
			
		||||
	tnode_free_size += sizeof(struct tnode) +
 | 
			
		||||
			   (sizeof(struct node *) << tn->bits);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
static void tnode_free_flush(void)
 | 
			
		||||
| 
						 | 
				
			
			@ -404,6 +414,11 @@ static void tnode_free_flush(void)
 | 
			
		|||
		tn->tnode_free = NULL;
 | 
			
		||||
		tnode_free(tn);
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	if (tnode_free_size >= PAGE_SIZE * sync_pages) {
 | 
			
		||||
		tnode_free_size = 0;
 | 
			
		||||
		synchronize_rcu();
 | 
			
		||||
	}
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
static struct leaf *leaf_new(void)
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
		Loading…
	
		Reference in a new issue