aboutsummaryrefslogtreecommitdiffstats
path: root/net/ipv4
diff options
context:
space:
mode:
authorJarek Poplawski <jarkao2@gmail.com>2009-07-14 04:33:08 -0400
committerDavid S. Miller <davem@davemloft.net>2009-07-20 10:39:25 -0400
commitc3059477fce2d956a0bb3e04357324780c5d8eeb (patch)
treec08cd660873667268132d556c2b66d7f9725a173 /net/ipv4
parent2e477c9bd2bb6a1606e498adb53ba913378ecdf2 (diff)
ipv4: Use synchronize_rcu() during trie_rebalance()
During trie_rebalance() we free memory after resizing with call_rcu(), but large updates, especially with PREEMPT_NONE configs, can cause memory stresses, so this patch calls synchronize_rcu() in tnode_free_flush() after each sync_pages to guarantee such freeing (especially before resizing the root node). The value of sync_pages = 128 is based on Pawel Staszewski's tests as the lowest which doesn't hinder updating times. (For testing purposes there was a sysfs module parameter to change it on demand, but it's removed until we're sure it could be really useful.) The patch is based on suggestions by: Paul E. McKenney <paulmck@linux.vnet.ibm.com> Reported-by: Pawel Staszewski <pstaszewski@itcare.pl> Tested-by: Pawel Staszewski <pstaszewski@itcare.pl> Signed-off-by: Jarek Poplawski <jarkao2@gmail.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/ipv4')
-rw-r--r--net/ipv4/fib_trie.c15
1 files changed, 15 insertions, 0 deletions
diff --git a/net/ipv4/fib_trie.c b/net/ipv4/fib_trie.c
index 63c2fa7b68c4..58ba9f4f2c92 100644
--- a/net/ipv4/fib_trie.c
+++ b/net/ipv4/fib_trie.c
@@ -164,6 +164,14 @@ static struct tnode *inflate(struct trie *t, struct tnode *tn);
164static struct tnode *halve(struct trie *t, struct tnode *tn); 164static struct tnode *halve(struct trie *t, struct tnode *tn);
165/* tnodes to free after resize(); protected by RTNL */ 165/* tnodes to free after resize(); protected by RTNL */
166static struct tnode *tnode_free_head; 166static struct tnode *tnode_free_head;
167static size_t tnode_free_size;
168
169/*
170 * synchronize_rcu after call_rcu for that many pages; it should be especially
171 * useful before resizing the root node with PREEMPT_NONE configs; the value was
172 * obtained experimentally, aiming to avoid visible slowdown.
173 */
174static const int sync_pages = 128;
167 175
168static struct kmem_cache *fn_alias_kmem __read_mostly; 176static struct kmem_cache *fn_alias_kmem __read_mostly;
169static struct kmem_cache *trie_leaf_kmem __read_mostly; 177static struct kmem_cache *trie_leaf_kmem __read_mostly;
@@ -393,6 +401,8 @@ static void tnode_free_safe(struct tnode *tn)
393 BUG_ON(IS_LEAF(tn)); 401 BUG_ON(IS_LEAF(tn));
394 tn->tnode_free = tnode_free_head; 402 tn->tnode_free = tnode_free_head;
395 tnode_free_head = tn; 403 tnode_free_head = tn;
404 tnode_free_size += sizeof(struct tnode) +
405 (sizeof(struct node *) << tn->bits);
396} 406}
397 407
398static void tnode_free_flush(void) 408static void tnode_free_flush(void)
@@ -404,6 +414,11 @@ static void tnode_free_flush(void)
404 tn->tnode_free = NULL; 414 tn->tnode_free = NULL;
405 tnode_free(tn); 415 tnode_free(tn);
406 } 416 }
417
418 if (tnode_free_size >= PAGE_SIZE * sync_pages) {
419 tnode_free_size = 0;
420 synchronize_rcu();
421 }
407} 422}
408 423
409static struct leaf *leaf_new(void) 424static struct leaf *leaf_new(void)