aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorDavid Miller <davem@davemloft.net>2011-07-24 20:01:22 -0400
committerDavid S. Miller <davem@davemloft.net>2011-11-30 18:46:43 -0500
commit5b8b0060cbd6332ae5d1fa0bec0e8e211248d0e7 (patch)
tree40ba4f43e875c830aefc3aef42fc05510b624922
parent1026fec8739663621d64216ba939c23bc1d089b7 (diff)
neigh: Get rid of neigh_table->kmem_cachep
We are going to alloc for device specific private areas for neighbour entries, and in order to do that we have to move away from the fixed allocation size enforced by using neigh_table->kmem_cachep As a nice side effect we can now use kfree_rcu(). Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r--include/net/neighbour.h1
-rw-r--r--net/core/neighbour.c18
2 files changed, 2 insertions, 17 deletions
diff --git a/include/net/neighbour.h b/include/net/neighbour.h
index 87c0e5ce6492..e31f0a86f9b7 100644
--- a/include/net/neighbour.h
+++ b/include/net/neighbour.h
@@ -173,7 +173,6 @@ struct neigh_table {
173 atomic_t entries; 173 atomic_t entries;
174 rwlock_t lock; 174 rwlock_t lock;
175 unsigned long last_rand; 175 unsigned long last_rand;
176 struct kmem_cache *kmem_cachep;
177 struct neigh_statistics __percpu *stats; 176 struct neigh_statistics __percpu *stats;
178 struct neigh_hash_table __rcu *nht; 177 struct neigh_hash_table __rcu *nht;
179 struct pneigh_entry **phash_buckets; 178 struct pneigh_entry **phash_buckets;
diff --git a/net/core/neighbour.c b/net/core/neighbour.c
index 27d3fefeaa13..661ad12e0cc9 100644
--- a/net/core/neighbour.c
+++ b/net/core/neighbour.c
@@ -288,7 +288,7 @@ static struct neighbour *neigh_alloc(struct neigh_table *tbl)
288 goto out_entries; 288 goto out_entries;
289 } 289 }
290 290
291 n = kmem_cache_zalloc(tbl->kmem_cachep, GFP_ATOMIC); 291 n = kzalloc(tbl->entry_size, GFP_ATOMIC);
292 if (!n) 292 if (!n)
293 goto out_entries; 293 goto out_entries;
294 294
@@ -678,12 +678,6 @@ static inline void neigh_parms_put(struct neigh_parms *parms)
678 neigh_parms_destroy(parms); 678 neigh_parms_destroy(parms);
679} 679}
680 680
681static void neigh_destroy_rcu(struct rcu_head *head)
682{
683 struct neighbour *neigh = container_of(head, struct neighbour, rcu);
684
685 kmem_cache_free(neigh->tbl->kmem_cachep, neigh);
686}
687/* 681/*
688 * neighbour must already be out of the table; 682 * neighbour must already be out of the table;
689 * 683 *
@@ -711,7 +705,7 @@ void neigh_destroy(struct neighbour *neigh)
711 NEIGH_PRINTK2("neigh %p is destroyed.\n", neigh); 705 NEIGH_PRINTK2("neigh %p is destroyed.\n", neigh);
712 706
713 atomic_dec(&neigh->tbl->entries); 707 atomic_dec(&neigh->tbl->entries);
714 call_rcu(&neigh->rcu, neigh_destroy_rcu); 708 kfree_rcu(neigh, rcu);
715} 709}
716EXPORT_SYMBOL(neigh_destroy); 710EXPORT_SYMBOL(neigh_destroy);
717 711
@@ -1486,11 +1480,6 @@ void neigh_table_init_no_netlink(struct neigh_table *tbl)
1486 tbl->parms.reachable_time = 1480 tbl->parms.reachable_time =
1487 neigh_rand_reach_time(tbl->parms.base_reachable_time); 1481 neigh_rand_reach_time(tbl->parms.base_reachable_time);
1488 1482
1489 if (!tbl->kmem_cachep)
1490 tbl->kmem_cachep =
1491 kmem_cache_create(tbl->id, tbl->entry_size, 0,
1492 SLAB_HWCACHE_ALIGN|SLAB_PANIC,
1493 NULL);
1494 tbl->stats = alloc_percpu(struct neigh_statistics); 1483 tbl->stats = alloc_percpu(struct neigh_statistics);
1495 if (!tbl->stats) 1484 if (!tbl->stats)
1496 panic("cannot create neighbour cache statistics"); 1485 panic("cannot create neighbour cache statistics");
@@ -1575,9 +1564,6 @@ int neigh_table_clear(struct neigh_table *tbl)
1575 free_percpu(tbl->stats); 1564 free_percpu(tbl->stats);
1576 tbl->stats = NULL; 1565 tbl->stats = NULL;
1577 1566
1578 kmem_cache_destroy(tbl->kmem_cachep);
1579 tbl->kmem_cachep = NULL;
1580
1581 return 0; 1567 return 0;
1582} 1568}
1583EXPORT_SYMBOL(neigh_table_clear); 1569EXPORT_SYMBOL(neigh_table_clear);