aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorStephen Hemminger <shemminger@vyatta.com>2008-04-10 05:56:38 -0400
committerDavid S. Miller <davem@davemloft.net>2008-04-10 05:56:38 -0400
commit15be75cdb5db442d0e33d37b20832b88f3ccd383 (patch)
tree83fc9261da859ecf3d6dcad29dadc78f481f7d7f
parent5c06f510a25153ff79e8c2dca312b732a367c5bb (diff)
IPV4: fib_trie use vmalloc for large tnodes
Use vmalloc rather than alloc_pages to avoid wasting memory. The problem is that tnode structure has a power of 2 sized array, plus a header. So the current code wastes almost half the memory allocated because it always needs the next bigger size to hold that small header. This is similar to an earlier patch by Eric, but instead of a list and lock, I used a workqueue to handle the fact that vfree can't be done in interrupt context. Signed-off-by: Stephen Hemminger <shemminger@vyatta.com> Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r--net/ipv4/fib_trie.c25
1 files changed, 15 insertions, 10 deletions
diff --git a/net/ipv4/fib_trie.c b/net/ipv4/fib_trie.c
index 9e491e70e855..64a274282042 100644
--- a/net/ipv4/fib_trie.c
+++ b/net/ipv4/fib_trie.c
@@ -122,7 +122,10 @@ struct tnode {
122 unsigned char bits; /* 2log(KEYLENGTH) bits needed */ 122 unsigned char bits; /* 2log(KEYLENGTH) bits needed */
123 unsigned int full_children; /* KEYLENGTH bits needed */ 123 unsigned int full_children; /* KEYLENGTH bits needed */
124 unsigned int empty_children; /* KEYLENGTH bits needed */ 124 unsigned int empty_children; /* KEYLENGTH bits needed */
125 struct rcu_head rcu; 125 union {
126 struct rcu_head rcu;
127 struct work_struct work;
128 };
126 struct node *child[0]; 129 struct node *child[0];
127}; 130};
128 131
@@ -346,16 +349,16 @@ static inline void free_leaf_info(struct leaf_info *leaf)
346 349
347static struct tnode *tnode_alloc(size_t size) 350static struct tnode *tnode_alloc(size_t size)
348{ 351{
349 struct page *pages;
350
351 if (size <= PAGE_SIZE) 352 if (size <= PAGE_SIZE)
352 return kzalloc(size, GFP_KERNEL); 353 return kzalloc(size, GFP_KERNEL);
354 else
355 return __vmalloc(size, GFP_KERNEL | __GFP_ZERO, PAGE_KERNEL);
356}
353 357
354 pages = alloc_pages(GFP_KERNEL|__GFP_ZERO, get_order(size)); 358static void __tnode_vfree(struct work_struct *arg)
355 if (!pages) 359{
356 return NULL; 360 struct tnode *tn = container_of(arg, struct tnode, work);
357 361 vfree(tn);
358 return page_address(pages);
359} 362}
360 363
361static void __tnode_free_rcu(struct rcu_head *head) 364static void __tnode_free_rcu(struct rcu_head *head)
@@ -366,8 +369,10 @@ static void __tnode_free_rcu(struct rcu_head *head)
366 369
367 if (size <= PAGE_SIZE) 370 if (size <= PAGE_SIZE)
368 kfree(tn); 371 kfree(tn);
369 else 372 else {
370 free_pages((unsigned long)tn, get_order(size)); 373 INIT_WORK(&tn->work, __tnode_vfree);
374 schedule_work(&tn->work);
375 }
371} 376}
372 377
373static inline void tnode_free(struct tnode *tn) 378static inline void tnode_free(struct tnode *tn)