aboutsummaryrefslogtreecommitdiffstats
path: root/net/core
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2011-07-14 10:53:20 -0400
committerDavid S. Miller <davem@davemloft.net>2011-07-14 10:53:20 -0400
commitf6b72b6217f8c24f2a54988e58af858b4e66024d (patch)
treec59d5adcf9bb226db6f602c5078106052524cfea /net/core
parent390fd0b388e4f85549e5d60bdeb21364b344d9b9 (diff)
net: Embed hh_cache inside of struct neighbour.
Now that there is a one-to-one correspondance between neighbour and hh_cache entries, we no longer need: 1) dynamic allocation 2) attachment to dst->hh 3) refcounting Initialization of the hh_cache entry is indicated by hh_len being non-zero, and such initialization is always done with the neighbour's lock held as a writer. Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/core')
-rw-r--r--net/core/dst.c7
-rw-r--r--net/core/neighbour.c81
2 files changed, 22 insertions, 66 deletions
diff --git a/net/core/dst.c b/net/core/dst.c
index 6135f3671692..4aacc14936a0 100644
--- a/net/core/dst.c
+++ b/net/core/dst.c
@@ -172,7 +172,6 @@ void *dst_alloc(struct dst_ops *ops, struct net_device *dev,
172 dst->expires = 0UL; 172 dst->expires = 0UL;
173 dst->path = dst; 173 dst->path = dst;
174 dst->neighbour = NULL; 174 dst->neighbour = NULL;
175 dst->hh = NULL;
176#ifdef CONFIG_XFRM 175#ifdef CONFIG_XFRM
177 dst->xfrm = NULL; 176 dst->xfrm = NULL;
178#endif 177#endif
@@ -226,19 +225,13 @@ struct dst_entry *dst_destroy(struct dst_entry * dst)
226{ 225{
227 struct dst_entry *child; 226 struct dst_entry *child;
228 struct neighbour *neigh; 227 struct neighbour *neigh;
229 struct hh_cache *hh;
230 228
231 smp_rmb(); 229 smp_rmb();
232 230
233again: 231again:
234 neigh = dst->neighbour; 232 neigh = dst->neighbour;
235 hh = dst->hh;
236 child = dst->child; 233 child = dst->child;
237 234
238 dst->hh = NULL;
239 if (hh)
240 hh_cache_put(hh);
241
242 if (neigh) { 235 if (neigh) {
243 dst->neighbour = NULL; 236 dst->neighbour = NULL;
244 neigh_release(neigh); 237 neigh_release(neigh);
diff --git a/net/core/neighbour.c b/net/core/neighbour.c
index f879bb552994..77a399f2ad03 100644
--- a/net/core/neighbour.c
+++ b/net/core/neighbour.c
@@ -297,6 +297,7 @@ static struct neighbour *neigh_alloc(struct neigh_table *tbl)
297 n->updated = n->used = now; 297 n->updated = n->used = now;
298 n->nud_state = NUD_NONE; 298 n->nud_state = NUD_NONE;
299 n->output = neigh_blackhole; 299 n->output = neigh_blackhole;
300 seqlock_init(&n->hh.hh_lock);
300 n->parms = neigh_parms_clone(&tbl->parms); 301 n->parms = neigh_parms_clone(&tbl->parms);
301 setup_timer(&n->timer, neigh_timer_handler, (unsigned long)n); 302 setup_timer(&n->timer, neigh_timer_handler, (unsigned long)n);
302 303
@@ -702,14 +703,11 @@ void neigh_destroy(struct neighbour *neigh)
702 if (neigh_del_timer(neigh)) 703 if (neigh_del_timer(neigh))
703 printk(KERN_WARNING "Impossible event.\n"); 704 printk(KERN_WARNING "Impossible event.\n");
704 705
705 hh = neigh->hh; 706 hh = &neigh->hh;
706 if (hh) { 707 if (hh->hh_len) {
707 neigh->hh = NULL;
708
709 write_seqlock_bh(&hh->hh_lock); 708 write_seqlock_bh(&hh->hh_lock);
710 hh->hh_output = neigh_blackhole; 709 hh->hh_output = neigh_blackhole;
711 write_sequnlock_bh(&hh->hh_lock); 710 write_sequnlock_bh(&hh->hh_lock);
712 hh_cache_put(hh);
713 } 711 }
714 712
715 skb_queue_purge(&neigh->arp_queue); 713 skb_queue_purge(&neigh->arp_queue);
@@ -737,8 +735,8 @@ static void neigh_suspect(struct neighbour *neigh)
737 735
738 neigh->output = neigh->ops->output; 736 neigh->output = neigh->ops->output;
739 737
740 hh = neigh->hh; 738 hh = &neigh->hh;
741 if (hh) 739 if (hh->hh_len)
742 hh->hh_output = neigh->ops->output; 740 hh->hh_output = neigh->ops->output;
743} 741}
744 742
@@ -755,8 +753,8 @@ static void neigh_connect(struct neighbour *neigh)
755 753
756 neigh->output = neigh->ops->connected_output; 754 neigh->output = neigh->ops->connected_output;
757 755
758 hh = neigh->hh; 756 hh = &neigh->hh;
759 if (hh) 757 if (hh->hh_len)
760 hh->hh_output = neigh->ops->hh_output; 758 hh->hh_output = neigh->ops->hh_output;
761} 759}
762 760
@@ -1017,7 +1015,7 @@ out_unlock_bh:
1017} 1015}
1018EXPORT_SYMBOL(__neigh_event_send); 1016EXPORT_SYMBOL(__neigh_event_send);
1019 1017
1020static void neigh_update_hhs(const struct neighbour *neigh) 1018static void neigh_update_hhs(struct neighbour *neigh)
1021{ 1019{
1022 struct hh_cache *hh; 1020 struct hh_cache *hh;
1023 void (*update)(struct hh_cache*, const struct net_device*, const unsigned char *) 1021 void (*update)(struct hh_cache*, const struct net_device*, const unsigned char *)
@@ -1027,8 +1025,8 @@ static void neigh_update_hhs(const struct neighbour *neigh)
1027 update = neigh->dev->header_ops->cache_update; 1025 update = neigh->dev->header_ops->cache_update;
1028 1026
1029 if (update) { 1027 if (update) {
1030 hh = neigh->hh; 1028 hh = &neigh->hh;
1031 if (hh) { 1029 if (hh->hh_len) {
1032 write_seqlock_bh(&hh->hh_lock); 1030 write_seqlock_bh(&hh->hh_lock);
1033 update(hh, neigh->dev, neigh->ha); 1031 update(hh, neigh->dev, neigh->ha);
1034 write_sequnlock_bh(&hh->hh_lock); 1032 write_sequnlock_bh(&hh->hh_lock);
@@ -1214,62 +1212,29 @@ struct neighbour *neigh_event_ns(struct neigh_table *tbl,
1214} 1212}
1215EXPORT_SYMBOL(neigh_event_ns); 1213EXPORT_SYMBOL(neigh_event_ns);
1216 1214
1217static inline bool neigh_hh_lookup(struct neighbour *n, struct dst_entry *dst)
1218{
1219 struct hh_cache *hh;
1220
1221 smp_rmb(); /* paired with smp_wmb() in neigh_hh_init() */
1222 hh = n->hh;
1223 if (hh) {
1224 atomic_inc(&hh->hh_refcnt);
1225 if (unlikely(cmpxchg(&dst->hh, NULL, hh) != NULL))
1226 hh_cache_put(hh);
1227 return true;
1228 }
1229 return false;
1230}
1231
1232/* called with read_lock_bh(&n->lock); */ 1215/* called with read_lock_bh(&n->lock); */
1233static void neigh_hh_init(struct neighbour *n, struct dst_entry *dst, 1216static void neigh_hh_init(struct neighbour *n, struct dst_entry *dst)
1234 __be16 protocol)
1235{ 1217{
1236 struct hh_cache *hh;
1237 struct net_device *dev = dst->dev; 1218 struct net_device *dev = dst->dev;
1238 1219 __be16 prot = dst->ops->protocol;
1239 if (likely(neigh_hh_lookup(n, dst))) 1220 struct hh_cache *hh = &n->hh;
1240 return;
1241
1242 /* slow path */
1243 hh = kzalloc(sizeof(*hh), GFP_ATOMIC);
1244 if (!hh)
1245 return;
1246
1247 seqlock_init(&hh->hh_lock);
1248 atomic_set(&hh->hh_refcnt, 2);
1249
1250 if (dev->header_ops->cache(n, hh, protocol)) {
1251 kfree(hh);
1252 return;
1253 }
1254 1221
1255 write_lock_bh(&n->lock); 1222 write_lock_bh(&n->lock);
1256 1223
1257 /* must check if another thread already did the insert */ 1224 /* Only one thread can come in here and initialize the
1258 if (neigh_hh_lookup(n, dst)) { 1225 * hh_cache entry.
1259 kfree(hh); 1226 */
1227 if (hh->hh_len)
1228 goto end;
1229
1230 if (dev->header_ops->cache(n, hh, prot))
1260 goto end; 1231 goto end;
1261 }
1262 1232
1263 if (n->nud_state & NUD_CONNECTED) 1233 if (n->nud_state & NUD_CONNECTED)
1264 hh->hh_output = n->ops->hh_output; 1234 hh->hh_output = n->ops->hh_output;
1265 else 1235 else
1266 hh->hh_output = n->ops->output; 1236 hh->hh_output = n->ops->output;
1267 1237
1268 smp_wmb(); /* paired with smp_rmb() in neigh_hh_lookup() */
1269 n->hh = hh;
1270
1271 if (unlikely(cmpxchg(&dst->hh, NULL, hh) != NULL))
1272 hh_cache_put(hh);
1273end: 1238end:
1274 write_unlock_bh(&n->lock); 1239 write_unlock_bh(&n->lock);
1275} 1240}
@@ -1312,10 +1277,8 @@ int neigh_resolve_output(struct sk_buff *skb)
1312 struct net_device *dev = neigh->dev; 1277 struct net_device *dev = neigh->dev;
1313 unsigned int seq; 1278 unsigned int seq;
1314 1279
1315 if (dev->header_ops->cache && 1280 if (dev->header_ops->cache && !neigh->hh.hh_len)
1316 !dst->hh && 1281 neigh_hh_init(neigh, dst);
1317 !(dst->flags & DST_NOCACHE))
1318 neigh_hh_init(neigh, dst, dst->ops->protocol);
1319 1282
1320 do { 1283 do {
1321 seq = read_seqbegin(&neigh->ha_lock); 1284 seq = read_seqbegin(&neigh->ha_lock);