diff options
| -rw-r--r-- | include/linux/netdevice.h | 6 | ||||
| -rw-r--r-- | net/core/dst.c | 4 | ||||
| -rw-r--r-- | net/core/neighbour.c | 99 | 
3 files changed, 69 insertions, 40 deletions
| diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index 6abcef67b178..4160db3721ba 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h | |||
| @@ -281,6 +281,12 @@ struct hh_cache { | |||
| 281 | unsigned long hh_data[HH_DATA_ALIGN(LL_MAX_HEADER) / sizeof(long)]; | 281 | unsigned long hh_data[HH_DATA_ALIGN(LL_MAX_HEADER) / sizeof(long)]; | 
| 282 | }; | 282 | }; | 
| 283 | 283 | ||
| 284 | static inline void hh_cache_put(struct hh_cache *hh) | ||
| 285 | { | ||
| 286 | if (atomic_dec_and_test(&hh->hh_refcnt)) | ||
| 287 | kfree(hh); | ||
| 288 | } | ||
| 289 | |||
| 284 | /* Reserve HH_DATA_MOD byte aligned hard_header_len, but at least that much. | 290 | /* Reserve HH_DATA_MOD byte aligned hard_header_len, but at least that much. | 
| 285 | * Alternative is: | 291 | * Alternative is: | 
| 286 | * dev->hard_header_len ? (dev->hard_header_len + | 292 | * dev->hard_header_len ? (dev->hard_header_len + | 
| diff --git a/net/core/dst.c b/net/core/dst.c index 6c41b1fac3db..978a1ee1f7d0 100644 --- a/net/core/dst.c +++ b/net/core/dst.c | |||
| @@ -228,8 +228,8 @@ again: | |||
| 228 | child = dst->child; | 228 | child = dst->child; | 
| 229 | 229 | ||
| 230 | dst->hh = NULL; | 230 | dst->hh = NULL; | 
| 231 | if (hh && atomic_dec_and_test(&hh->hh_refcnt)) | 231 | if (hh) | 
| 232 | kfree(hh); | 232 | hh_cache_put(hh); | 
| 233 | 233 | ||
| 234 | if (neigh) { | 234 | if (neigh) { | 
| 235 | dst->neighbour = NULL; | 235 | dst->neighbour = NULL; | 
| diff --git a/net/core/neighbour.c b/net/core/neighbour.c index 3ffafaa0414c..2044906ecd1a 100644 --- a/net/core/neighbour.c +++ b/net/core/neighbour.c | |||
| @@ -709,8 +709,7 @@ void neigh_destroy(struct neighbour *neigh) | |||
| 709 | write_seqlock_bh(&hh->hh_lock); | 709 | write_seqlock_bh(&hh->hh_lock); | 
| 710 | hh->hh_output = neigh_blackhole; | 710 | hh->hh_output = neigh_blackhole; | 
| 711 | write_sequnlock_bh(&hh->hh_lock); | 711 | write_sequnlock_bh(&hh->hh_lock); | 
| 712 | if (atomic_dec_and_test(&hh->hh_refcnt)) | 712 | hh_cache_put(hh); | 
| 713 | kfree(hh); | ||
| 714 | } | 713 | } | 
| 715 | 714 | ||
| 716 | skb_queue_purge(&neigh->arp_queue); | 715 | skb_queue_purge(&neigh->arp_queue); | 
| @@ -1210,39 +1209,67 @@ struct neighbour *neigh_event_ns(struct neigh_table *tbl, | |||
| 1210 | } | 1209 | } | 
| 1211 | EXPORT_SYMBOL(neigh_event_ns); | 1210 | EXPORT_SYMBOL(neigh_event_ns); | 
| 1212 | 1211 | ||
| 1212 | static inline bool neigh_hh_lookup(struct neighbour *n, struct dst_entry *dst, | ||
| 1213 | __be16 protocol) | ||
| 1214 | { | ||
| 1215 | struct hh_cache *hh; | ||
| 1216 | |||
| 1217 | for (hh = n->hh; hh; hh = hh->hh_next) { | ||
| 1218 | if (hh->hh_type == protocol) { | ||
| 1219 | atomic_inc(&hh->hh_refcnt); | ||
| 1220 | if (unlikely(cmpxchg(&dst->hh, NULL, hh) != NULL)) | ||
| 1221 | hh_cache_put(hh); | ||
| 1222 | return true; | ||
| 1223 | } | ||
| 1224 | } | ||
| 1225 | return false; | ||
| 1226 | } | ||
| 1227 | |||
| 1228 | /* called with read_lock_bh(&n->lock); */ | ||
| 1213 | static void neigh_hh_init(struct neighbour *n, struct dst_entry *dst, | 1229 | static void neigh_hh_init(struct neighbour *n, struct dst_entry *dst, | 
| 1214 | __be16 protocol) | 1230 | __be16 protocol) | 
| 1215 | { | 1231 | { | 
| 1216 | struct hh_cache *hh; | 1232 | struct hh_cache *hh; | 
| 1217 | struct net_device *dev = dst->dev; | 1233 | struct net_device *dev = dst->dev; | 
| 1218 | 1234 | ||
| 1219 | for (hh = n->hh; hh; hh = hh->hh_next) | 1235 | if (likely(neigh_hh_lookup(n, dst, protocol))) | 
| 1220 | if (hh->hh_type == protocol) | 1236 | return; | 
| 1221 | break; | ||
| 1222 | 1237 | ||
| 1223 | if (!hh && (hh = kzalloc(sizeof(*hh), GFP_ATOMIC)) != NULL) { | 1238 | /* slow path */ | 
| 1224 | seqlock_init(&hh->hh_lock); | 1239 | hh = kzalloc(sizeof(*hh), GFP_ATOMIC); | 
| 1225 | hh->hh_type = protocol; | 1240 | if (!hh) | 
| 1226 | atomic_set(&hh->hh_refcnt, 0); | 1241 | return; | 
| 1227 | hh->hh_next = NULL; | ||
| 1228 | 1242 | ||
| 1229 | if (dev->header_ops->cache(n, hh)) { | 1243 | seqlock_init(&hh->hh_lock); | 
| 1230 | kfree(hh); | 1244 | hh->hh_type = protocol; | 
| 1231 | hh = NULL; | 1245 | atomic_set(&hh->hh_refcnt, 2); | 
| 1232 | } else { | 1246 | |
| 1233 | atomic_inc(&hh->hh_refcnt); | 1247 | if (dev->header_ops->cache(n, hh)) { | 
| 1234 | hh->hh_next = n->hh; | 1248 | kfree(hh); | 
| 1235 | n->hh = hh; | 1249 | return; | 
| 1236 | if (n->nud_state & NUD_CONNECTED) | ||
| 1237 | hh->hh_output = n->ops->hh_output; | ||
| 1238 | else | ||
| 1239 | hh->hh_output = n->ops->output; | ||
| 1240 | } | ||
| 1241 | } | 1250 | } | 
| 1242 | if (hh) { | 1251 | read_unlock(&n->lock); | 
| 1243 | atomic_inc(&hh->hh_refcnt); | 1252 | write_lock(&n->lock); | 
| 1244 | dst->hh = hh; | 1253 | |
| 1254 | /* must check if another thread already did the insert */ | ||
| 1255 | if (neigh_hh_lookup(n, dst, protocol)) { | ||
| 1256 | kfree(hh); | ||
| 1257 | goto end; | ||
| 1245 | } | 1258 | } | 
| 1259 | |||
| 1260 | if (n->nud_state & NUD_CONNECTED) | ||
| 1261 | hh->hh_output = n->ops->hh_output; | ||
| 1262 | else | ||
| 1263 | hh->hh_output = n->ops->output; | ||
| 1264 | |||
| 1265 | hh->hh_next = n->hh; | ||
| 1266 | n->hh = hh; | ||
| 1267 | |||
| 1268 | if (unlikely(cmpxchg(&dst->hh, NULL, hh) != NULL)) | ||
| 1269 | hh_cache_put(hh); | ||
| 1270 | end: | ||
| 1271 | write_unlock(&n->lock); | ||
| 1272 | read_lock(&n->lock); | ||
| 1246 | } | 1273 | } | 
| 1247 | 1274 | ||
| 1248 | /* This function can be used in contexts, where only old dev_queue_xmit | 1275 | /* This function can be used in contexts, where only old dev_queue_xmit | 
| @@ -1281,21 +1308,17 @@ int neigh_resolve_output(struct sk_buff *skb) | |||
| 1281 | if (!neigh_event_send(neigh, skb)) { | 1308 | if (!neigh_event_send(neigh, skb)) { | 
| 1282 | int err; | 1309 | int err; | 
| 1283 | struct net_device *dev = neigh->dev; | 1310 | struct net_device *dev = neigh->dev; | 
| 1311 | |||
| 1312 | read_lock_bh(&neigh->lock); | ||
| 1284 | if (dev->header_ops->cache && | 1313 | if (dev->header_ops->cache && | 
| 1285 | !dst->hh && | 1314 | !dst->hh && | 
| 1286 | !(dst->flags & DST_NOCACHE)) { | 1315 | !(dst->flags & DST_NOCACHE)) | 
| 1287 | write_lock_bh(&neigh->lock); | 1316 | neigh_hh_init(neigh, dst, dst->ops->protocol); | 
| 1288 | if (!dst->hh) | 1317 | |
| 1289 | neigh_hh_init(neigh, dst, dst->ops->protocol); | 1318 | err = dev_hard_header(skb, dev, ntohs(skb->protocol), | 
| 1290 | err = dev_hard_header(skb, dev, ntohs(skb->protocol), | 1319 | neigh->ha, NULL, skb->len); | 
| 1291 | neigh->ha, NULL, skb->len); | 1320 | read_unlock_bh(&neigh->lock); | 
| 1292 | write_unlock_bh(&neigh->lock); | 1321 | |
| 1293 | } else { | ||
| 1294 | read_lock_bh(&neigh->lock); | ||
| 1295 | err = dev_hard_header(skb, dev, ntohs(skb->protocol), | ||
| 1296 | neigh->ha, NULL, skb->len); | ||
| 1297 | read_unlock_bh(&neigh->lock); | ||
| 1298 | } | ||
| 1299 | if (err >= 0) | 1322 | if (err >= 0) | 
| 1300 | rc = neigh->ops->queue_xmit(skb); | 1323 | rc = neigh->ops->queue_xmit(skb); | 
| 1301 | else | 1324 | else | 
