aboutsummaryrefslogtreecommitdiffstats
path: root/net/core/dev.c
diff options
context:
space:
mode:
authorDan Williams <dan.j.williams@intel.com>2009-09-08 20:55:21 -0400
committerDan Williams <dan.j.williams@intel.com>2009-09-08 20:55:21 -0400
commitbbb20089a3275a19e475dbc21320c3742e3ca423 (patch)
tree216fdc1cbef450ca688135c5b8969169482d9a48 /net/core/dev.c
parent3e48e656903e9fd8bc805c6a2c4264d7808d315b (diff)
parent657a77fa7284d8ae28dfa48f1dc5d919bf5b2843 (diff)
Merge branch 'dmaengine' into async-tx-next
Conflicts: crypto/async_tx/async_xor.c drivers/dma/ioat/dma_v2.h drivers/dma/ioat/pci.c drivers/md/raid5.c
Diffstat (limited to 'net/core/dev.c')
-rw-r--r--net/core/dev.c688
1 files changed, 509 insertions, 179 deletions
diff --git a/net/core/dev.c b/net/core/dev.c
index e2e9e4af3ace..60b572812278 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -126,6 +126,7 @@
126#include <linux/in.h> 126#include <linux/in.h>
127#include <linux/jhash.h> 127#include <linux/jhash.h>
128#include <linux/random.h> 128#include <linux/random.h>
129#include <trace/events/napi.h>
129 130
130#include "net-sysfs.h" 131#include "net-sysfs.h"
131 132
@@ -268,7 +269,8 @@ static const unsigned short netdev_lock_type[] =
268 ARPHRD_IRDA, ARPHRD_FCPP, ARPHRD_FCAL, ARPHRD_FCPL, 269 ARPHRD_IRDA, ARPHRD_FCPP, ARPHRD_FCAL, ARPHRD_FCPL,
269 ARPHRD_FCFABRIC, ARPHRD_IEEE802_TR, ARPHRD_IEEE80211, 270 ARPHRD_FCFABRIC, ARPHRD_IEEE802_TR, ARPHRD_IEEE80211,
270 ARPHRD_IEEE80211_PRISM, ARPHRD_IEEE80211_RADIOTAP, ARPHRD_PHONET, 271 ARPHRD_IEEE80211_PRISM, ARPHRD_IEEE80211_RADIOTAP, ARPHRD_PHONET,
271 ARPHRD_PHONET_PIPE, ARPHRD_VOID, ARPHRD_NONE}; 272 ARPHRD_PHONET_PIPE, ARPHRD_IEEE802154, ARPHRD_IEEE802154_PHY,
273 ARPHRD_VOID, ARPHRD_NONE};
272 274
273static const char *netdev_lock_name[] = 275static const char *netdev_lock_name[] =
274 {"_xmit_NETROM", "_xmit_ETHER", "_xmit_EETHER", "_xmit_AX25", 276 {"_xmit_NETROM", "_xmit_ETHER", "_xmit_EETHER", "_xmit_AX25",
@@ -285,7 +287,8 @@ static const char *netdev_lock_name[] =
285 "_xmit_IRDA", "_xmit_FCPP", "_xmit_FCAL", "_xmit_FCPL", 287 "_xmit_IRDA", "_xmit_FCPP", "_xmit_FCAL", "_xmit_FCPL",
286 "_xmit_FCFABRIC", "_xmit_IEEE802_TR", "_xmit_IEEE80211", 288 "_xmit_FCFABRIC", "_xmit_IEEE802_TR", "_xmit_IEEE80211",
287 "_xmit_IEEE80211_PRISM", "_xmit_IEEE80211_RADIOTAP", "_xmit_PHONET", 289 "_xmit_IEEE80211_PRISM", "_xmit_IEEE80211_RADIOTAP", "_xmit_PHONET",
288 "_xmit_PHONET_PIPE", "_xmit_VOID", "_xmit_NONE"}; 290 "_xmit_PHONET_PIPE", "_xmit_IEEE802154", "_xmit_IEEE802154_PHY",
291 "_xmit_VOID", "_xmit_NONE"};
289 292
290static struct lock_class_key netdev_xmit_lock_key[ARRAY_SIZE(netdev_lock_type)]; 293static struct lock_class_key netdev_xmit_lock_key[ARRAY_SIZE(netdev_lock_type)];
291static struct lock_class_key netdev_addr_lock_key[ARRAY_SIZE(netdev_lock_type)]; 294static struct lock_class_key netdev_addr_lock_key[ARRAY_SIZE(netdev_lock_type)];
@@ -1047,7 +1050,7 @@ void dev_load(struct net *net, const char *name)
1047int dev_open(struct net_device *dev) 1050int dev_open(struct net_device *dev)
1048{ 1051{
1049 const struct net_device_ops *ops = dev->netdev_ops; 1052 const struct net_device_ops *ops = dev->netdev_ops;
1050 int ret = 0; 1053 int ret;
1051 1054
1052 ASSERT_RTNL(); 1055 ASSERT_RTNL();
1053 1056
@@ -1064,6 +1067,11 @@ int dev_open(struct net_device *dev)
1064 if (!netif_device_present(dev)) 1067 if (!netif_device_present(dev))
1065 return -ENODEV; 1068 return -ENODEV;
1066 1069
1070 ret = call_netdevice_notifiers(NETDEV_PRE_UP, dev);
1071 ret = notifier_to_errno(ret);
1072 if (ret)
1073 return ret;
1074
1067 /* 1075 /*
1068 * Call device private open method 1076 * Call device private open method
1069 */ 1077 */
@@ -1688,7 +1696,16 @@ int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev,
1688 goto gso; 1696 goto gso;
1689 } 1697 }
1690 1698
1699 /*
1700 * If device doesnt need skb->dst, release it right now while
1701 * its hot in this cpu cache
1702 */
1703 if (dev->priv_flags & IFF_XMIT_DST_RELEASE)
1704 skb_dst_drop(skb);
1705
1691 rc = ops->ndo_start_xmit(skb, dev); 1706 rc = ops->ndo_start_xmit(skb, dev);
1707 if (rc == 0)
1708 txq_trans_update(txq);
1692 /* 1709 /*
1693 * TODO: if skb_orphan() was called by 1710 * TODO: if skb_orphan() was called by
1694 * dev->hard_start_xmit() (for example, the unmodified 1711 * dev->hard_start_xmit() (for example, the unmodified
@@ -1718,6 +1735,7 @@ gso:
1718 skb->next = nskb; 1735 skb->next = nskb;
1719 return rc; 1736 return rc;
1720 } 1737 }
1738 txq_trans_update(txq);
1721 if (unlikely(netif_tx_queue_stopped(txq) && skb->next)) 1739 if (unlikely(netif_tx_queue_stopped(txq) && skb->next))
1722 return NETDEV_TX_BUSY; 1740 return NETDEV_TX_BUSY;
1723 } while (skb->next); 1741 } while (skb->next);
@@ -1735,8 +1753,12 @@ u16 skb_tx_hash(const struct net_device *dev, const struct sk_buff *skb)
1735{ 1753{
1736 u32 hash; 1754 u32 hash;
1737 1755
1738 if (skb_rx_queue_recorded(skb)) 1756 if (skb_rx_queue_recorded(skb)) {
1739 return skb_get_rx_queue(skb) % dev->real_num_tx_queues; 1757 hash = skb_get_rx_queue(skb);
1758 while (unlikely (hash >= dev->real_num_tx_queues))
1759 hash -= dev->real_num_tx_queues;
1760 return hash;
1761 }
1740 1762
1741 if (skb->sk && skb->sk->sk_hash) 1763 if (skb->sk && skb->sk->sk_hash)
1742 hash = skb->sk->sk_hash; 1764 hash = skb->sk->sk_hash;
@@ -1800,7 +1822,7 @@ int dev_queue_xmit(struct sk_buff *skb)
1800 if (netif_needs_gso(dev, skb)) 1822 if (netif_needs_gso(dev, skb))
1801 goto gso; 1823 goto gso;
1802 1824
1803 if (skb_shinfo(skb)->frag_list && 1825 if (skb_has_frags(skb) &&
1804 !(dev->features & NETIF_F_FRAGLIST) && 1826 !(dev->features & NETIF_F_FRAGLIST) &&
1805 __skb_linearize(skb)) 1827 __skb_linearize(skb))
1806 goto out_kfree_skb; 1828 goto out_kfree_skb;
@@ -2049,11 +2071,13 @@ static inline int deliver_skb(struct sk_buff *skb,
2049} 2071}
2050 2072
2051#if defined(CONFIG_BRIDGE) || defined (CONFIG_BRIDGE_MODULE) 2073#if defined(CONFIG_BRIDGE) || defined (CONFIG_BRIDGE_MODULE)
2052/* These hooks defined here for ATM */ 2074
2053struct net_bridge; 2075#if defined(CONFIG_ATM_LANE) || defined(CONFIG_ATM_LANE_MODULE)
2054struct net_bridge_fdb_entry *(*br_fdb_get_hook)(struct net_bridge *br, 2076/* This hook is defined here for ATM LANE */
2055 unsigned char *addr); 2077int (*br_fdb_test_addr_hook)(struct net_device *dev,
2056void (*br_fdb_put_hook)(struct net_bridge_fdb_entry *ent) __read_mostly; 2078 unsigned char *addr) __read_mostly;
2079EXPORT_SYMBOL(br_fdb_test_addr_hook);
2080#endif
2057 2081
2058/* 2082/*
2059 * If bridge module is loaded call bridging hook. 2083 * If bridge module is loaded call bridging hook.
@@ -2061,6 +2085,8 @@ void (*br_fdb_put_hook)(struct net_bridge_fdb_entry *ent) __read_mostly;
2061 */ 2085 */
2062struct sk_buff *(*br_handle_frame_hook)(struct net_bridge_port *p, 2086struct sk_buff *(*br_handle_frame_hook)(struct net_bridge_port *p,
2063 struct sk_buff *skb) __read_mostly; 2087 struct sk_buff *skb) __read_mostly;
2088EXPORT_SYMBOL(br_handle_frame_hook);
2089
2064static inline struct sk_buff *handle_bridge(struct sk_buff *skb, 2090static inline struct sk_buff *handle_bridge(struct sk_buff *skb,
2065 struct packet_type **pt_prev, int *ret, 2091 struct packet_type **pt_prev, int *ret,
2066 struct net_device *orig_dev) 2092 struct net_device *orig_dev)
@@ -2284,8 +2310,6 @@ ncls:
2284 if (!skb) 2310 if (!skb)
2285 goto out; 2311 goto out;
2286 2312
2287 skb_orphan(skb);
2288
2289 type = skb->protocol; 2313 type = skb->protocol;
2290 list_for_each_entry_rcu(ptype, 2314 list_for_each_entry_rcu(ptype,
2291 &ptype_base[ntohs(type) & PTYPE_HASH_MASK], list) { 2315 &ptype_base[ntohs(type) & PTYPE_HASH_MASK], list) {
@@ -2374,26 +2398,6 @@ void napi_gro_flush(struct napi_struct *napi)
2374} 2398}
2375EXPORT_SYMBOL(napi_gro_flush); 2399EXPORT_SYMBOL(napi_gro_flush);
2376 2400
2377void *skb_gro_header(struct sk_buff *skb, unsigned int hlen)
2378{
2379 unsigned int offset = skb_gro_offset(skb);
2380
2381 hlen += offset;
2382 if (hlen <= skb_headlen(skb))
2383 return skb->data + offset;
2384
2385 if (unlikely(!skb_shinfo(skb)->nr_frags ||
2386 skb_shinfo(skb)->frags[0].size <=
2387 hlen - skb_headlen(skb) ||
2388 PageHighMem(skb_shinfo(skb)->frags[0].page)))
2389 return pskb_may_pull(skb, hlen) ? skb->data + offset : NULL;
2390
2391 return page_address(skb_shinfo(skb)->frags[0].page) +
2392 skb_shinfo(skb)->frags[0].page_offset +
2393 offset - skb_headlen(skb);
2394}
2395EXPORT_SYMBOL(skb_gro_header);
2396
2397int dev_gro_receive(struct napi_struct *napi, struct sk_buff *skb) 2401int dev_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
2398{ 2402{
2399 struct sk_buff **pp = NULL; 2403 struct sk_buff **pp = NULL;
@@ -2407,7 +2411,7 @@ int dev_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
2407 if (!(skb->dev->features & NETIF_F_GRO)) 2411 if (!(skb->dev->features & NETIF_F_GRO))
2408 goto normal; 2412 goto normal;
2409 2413
2410 if (skb_is_gso(skb) || skb_shinfo(skb)->frag_list) 2414 if (skb_is_gso(skb) || skb_has_frags(skb))
2411 goto normal; 2415 goto normal;
2412 2416
2413 rcu_read_lock(); 2417 rcu_read_lock();
@@ -2456,10 +2460,25 @@ int dev_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
2456 ret = GRO_HELD; 2460 ret = GRO_HELD;
2457 2461
2458pull: 2462pull:
2459 if (unlikely(!pskb_may_pull(skb, skb_gro_offset(skb)))) { 2463 if (skb_headlen(skb) < skb_gro_offset(skb)) {
2460 if (napi->gro_list == skb) 2464 int grow = skb_gro_offset(skb) - skb_headlen(skb);
2461 napi->gro_list = skb->next; 2465
2462 ret = GRO_DROP; 2466 BUG_ON(skb->end - skb->tail < grow);
2467
2468 memcpy(skb_tail_pointer(skb), NAPI_GRO_CB(skb)->frag0, grow);
2469
2470 skb->tail += grow;
2471 skb->data_len -= grow;
2472
2473 skb_shinfo(skb)->frags[0].page_offset += grow;
2474 skb_shinfo(skb)->frags[0].size -= grow;
2475
2476 if (unlikely(!skb_shinfo(skb)->frags[0].size)) {
2477 put_page(skb_shinfo(skb)->frags[0].page);
2478 memmove(skb_shinfo(skb)->frags,
2479 skb_shinfo(skb)->frags + 1,
2480 --skb_shinfo(skb)->nr_frags);
2481 }
2463 } 2482 }
2464 2483
2465ok: 2484ok:
@@ -2509,6 +2528,22 @@ int napi_skb_finish(int ret, struct sk_buff *skb)
2509} 2528}
2510EXPORT_SYMBOL(napi_skb_finish); 2529EXPORT_SYMBOL(napi_skb_finish);
2511 2530
2531void skb_gro_reset_offset(struct sk_buff *skb)
2532{
2533 NAPI_GRO_CB(skb)->data_offset = 0;
2534 NAPI_GRO_CB(skb)->frag0 = NULL;
2535 NAPI_GRO_CB(skb)->frag0_len = 0;
2536
2537 if (skb->mac_header == skb->tail &&
2538 !PageHighMem(skb_shinfo(skb)->frags[0].page)) {
2539 NAPI_GRO_CB(skb)->frag0 =
2540 page_address(skb_shinfo(skb)->frags[0].page) +
2541 skb_shinfo(skb)->frags[0].page_offset;
2542 NAPI_GRO_CB(skb)->frag0_len = skb_shinfo(skb)->frags[0].size;
2543 }
2544}
2545EXPORT_SYMBOL(skb_gro_reset_offset);
2546
2512int napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb) 2547int napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
2513{ 2548{
2514 skb_gro_reset_offset(skb); 2549 skb_gro_reset_offset(skb);
@@ -2526,16 +2561,10 @@ void napi_reuse_skb(struct napi_struct *napi, struct sk_buff *skb)
2526} 2561}
2527EXPORT_SYMBOL(napi_reuse_skb); 2562EXPORT_SYMBOL(napi_reuse_skb);
2528 2563
2529struct sk_buff *napi_fraginfo_skb(struct napi_struct *napi, 2564struct sk_buff *napi_get_frags(struct napi_struct *napi)
2530 struct napi_gro_fraginfo *info)
2531{ 2565{
2532 struct net_device *dev = napi->dev; 2566 struct net_device *dev = napi->dev;
2533 struct sk_buff *skb = napi->skb; 2567 struct sk_buff *skb = napi->skb;
2534 struct ethhdr *eth;
2535 skb_frag_t *frag;
2536 int i;
2537
2538 napi->skb = NULL;
2539 2568
2540 if (!skb) { 2569 if (!skb) {
2541 skb = netdev_alloc_skb(dev, GRO_MAX_HEAD + NET_IP_ALIGN); 2570 skb = netdev_alloc_skb(dev, GRO_MAX_HEAD + NET_IP_ALIGN);
@@ -2543,47 +2572,14 @@ struct sk_buff *napi_fraginfo_skb(struct napi_struct *napi,
2543 goto out; 2572 goto out;
2544 2573
2545 skb_reserve(skb, NET_IP_ALIGN); 2574 skb_reserve(skb, NET_IP_ALIGN);
2546 }
2547
2548 BUG_ON(info->nr_frags > MAX_SKB_FRAGS);
2549 frag = info->frags;
2550
2551 for (i = 0; i < info->nr_frags; i++) {
2552 skb_fill_page_desc(skb, i, frag->page, frag->page_offset,
2553 frag->size);
2554 frag++;
2555 }
2556 skb_shinfo(skb)->nr_frags = info->nr_frags;
2557
2558 skb->data_len = info->len;
2559 skb->len += info->len;
2560 skb->truesize += info->len;
2561 2575
2562 skb_reset_mac_header(skb); 2576 napi->skb = skb;
2563 skb_gro_reset_offset(skb);
2564
2565 eth = skb_gro_header(skb, sizeof(*eth));
2566 if (!eth) {
2567 napi_reuse_skb(napi, skb);
2568 skb = NULL;
2569 goto out;
2570 } 2577 }
2571 2578
2572 skb_gro_pull(skb, sizeof(*eth));
2573
2574 /*
2575 * This works because the only protocols we care about don't require
2576 * special handling. We'll fix it up properly at the end.
2577 */
2578 skb->protocol = eth->h_proto;
2579
2580 skb->ip_summed = info->ip_summed;
2581 skb->csum = info->csum;
2582
2583out: 2579out:
2584 return skb; 2580 return skb;
2585} 2581}
2586EXPORT_SYMBOL(napi_fraginfo_skb); 2582EXPORT_SYMBOL(napi_get_frags);
2587 2583
2588int napi_frags_finish(struct napi_struct *napi, struct sk_buff *skb, int ret) 2584int napi_frags_finish(struct napi_struct *napi, struct sk_buff *skb, int ret)
2589{ 2585{
@@ -2613,9 +2609,46 @@ int napi_frags_finish(struct napi_struct *napi, struct sk_buff *skb, int ret)
2613} 2609}
2614EXPORT_SYMBOL(napi_frags_finish); 2610EXPORT_SYMBOL(napi_frags_finish);
2615 2611
2616int napi_gro_frags(struct napi_struct *napi, struct napi_gro_fraginfo *info) 2612struct sk_buff *napi_frags_skb(struct napi_struct *napi)
2617{ 2613{
2618 struct sk_buff *skb = napi_fraginfo_skb(napi, info); 2614 struct sk_buff *skb = napi->skb;
2615 struct ethhdr *eth;
2616 unsigned int hlen;
2617 unsigned int off;
2618
2619 napi->skb = NULL;
2620
2621 skb_reset_mac_header(skb);
2622 skb_gro_reset_offset(skb);
2623
2624 off = skb_gro_offset(skb);
2625 hlen = off + sizeof(*eth);
2626 eth = skb_gro_header_fast(skb, off);
2627 if (skb_gro_header_hard(skb, hlen)) {
2628 eth = skb_gro_header_slow(skb, hlen, off);
2629 if (unlikely(!eth)) {
2630 napi_reuse_skb(napi, skb);
2631 skb = NULL;
2632 goto out;
2633 }
2634 }
2635
2636 skb_gro_pull(skb, sizeof(*eth));
2637
2638 /*
2639 * This works because the only protocols we care about don't require
2640 * special handling. We'll fix it up properly at the end.
2641 */
2642 skb->protocol = eth->h_proto;
2643
2644out:
2645 return skb;
2646}
2647EXPORT_SYMBOL(napi_frags_skb);
2648
2649int napi_gro_frags(struct napi_struct *napi)
2650{
2651 struct sk_buff *skb = napi_frags_skb(napi);
2619 2652
2620 if (!skb) 2653 if (!skb)
2621 return NET_RX_DROP; 2654 return NET_RX_DROP;
@@ -2719,7 +2752,7 @@ void netif_napi_del(struct napi_struct *napi)
2719 struct sk_buff *skb, *next; 2752 struct sk_buff *skb, *next;
2720 2753
2721 list_del_init(&napi->dev_list); 2754 list_del_init(&napi->dev_list);
2722 kfree_skb(napi->skb); 2755 napi_free_frags(napi);
2723 2756
2724 for (skb = napi->gro_list; skb; skb = next) { 2757 for (skb = napi->gro_list; skb; skb = next) {
2725 next = skb->next; 2758 next = skb->next;
@@ -2773,8 +2806,10 @@ static void net_rx_action(struct softirq_action *h)
2773 * accidently calling ->poll() when NAPI is not scheduled. 2806 * accidently calling ->poll() when NAPI is not scheduled.
2774 */ 2807 */
2775 work = 0; 2808 work = 0;
2776 if (test_bit(NAPI_STATE_SCHED, &n->state)) 2809 if (test_bit(NAPI_STATE_SCHED, &n->state)) {
2777 work = n->poll(n, weight); 2810 work = n->poll(n, weight);
2811 trace_napi_poll(n);
2812 }
2778 2813
2779 WARN_ON_ONCE(work > weight); 2814 WARN_ON_ONCE(work > weight);
2780 2815
@@ -3424,10 +3459,10 @@ void __dev_set_rx_mode(struct net_device *dev)
3424 /* Unicast addresses changes may only happen under the rtnl, 3459 /* Unicast addresses changes may only happen under the rtnl,
3425 * therefore calling __dev_set_promiscuity here is safe. 3460 * therefore calling __dev_set_promiscuity here is safe.
3426 */ 3461 */
3427 if (dev->uc_count > 0 && !dev->uc_promisc) { 3462 if (dev->uc.count > 0 && !dev->uc_promisc) {
3428 __dev_set_promiscuity(dev, 1); 3463 __dev_set_promiscuity(dev, 1);
3429 dev->uc_promisc = 1; 3464 dev->uc_promisc = 1;
3430 } else if (dev->uc_count == 0 && dev->uc_promisc) { 3465 } else if (dev->uc.count == 0 && dev->uc_promisc) {
3431 __dev_set_promiscuity(dev, -1); 3466 __dev_set_promiscuity(dev, -1);
3432 dev->uc_promisc = 0; 3467 dev->uc_promisc = 0;
3433 } 3468 }
@@ -3444,6 +3479,316 @@ void dev_set_rx_mode(struct net_device *dev)
3444 netif_addr_unlock_bh(dev); 3479 netif_addr_unlock_bh(dev);
3445} 3480}
3446 3481
3482/* hw addresses list handling functions */
3483
3484static int __hw_addr_add(struct netdev_hw_addr_list *list, unsigned char *addr,
3485 int addr_len, unsigned char addr_type)
3486{
3487 struct netdev_hw_addr *ha;
3488 int alloc_size;
3489
3490 if (addr_len > MAX_ADDR_LEN)
3491 return -EINVAL;
3492
3493 list_for_each_entry(ha, &list->list, list) {
3494 if (!memcmp(ha->addr, addr, addr_len) &&
3495 ha->type == addr_type) {
3496 ha->refcount++;
3497 return 0;
3498 }
3499 }
3500
3501
3502 alloc_size = sizeof(*ha);
3503 if (alloc_size < L1_CACHE_BYTES)
3504 alloc_size = L1_CACHE_BYTES;
3505 ha = kmalloc(alloc_size, GFP_ATOMIC);
3506 if (!ha)
3507 return -ENOMEM;
3508 memcpy(ha->addr, addr, addr_len);
3509 ha->type = addr_type;
3510 ha->refcount = 1;
3511 ha->synced = false;
3512 list_add_tail_rcu(&ha->list, &list->list);
3513 list->count++;
3514 return 0;
3515}
3516
3517static void ha_rcu_free(struct rcu_head *head)
3518{
3519 struct netdev_hw_addr *ha;
3520
3521 ha = container_of(head, struct netdev_hw_addr, rcu_head);
3522 kfree(ha);
3523}
3524
3525static int __hw_addr_del(struct netdev_hw_addr_list *list, unsigned char *addr,
3526 int addr_len, unsigned char addr_type)
3527{
3528 struct netdev_hw_addr *ha;
3529
3530 list_for_each_entry(ha, &list->list, list) {
3531 if (!memcmp(ha->addr, addr, addr_len) &&
3532 (ha->type == addr_type || !addr_type)) {
3533 if (--ha->refcount)
3534 return 0;
3535 list_del_rcu(&ha->list);
3536 call_rcu(&ha->rcu_head, ha_rcu_free);
3537 list->count--;
3538 return 0;
3539 }
3540 }
3541 return -ENOENT;
3542}
3543
3544static int __hw_addr_add_multiple(struct netdev_hw_addr_list *to_list,
3545 struct netdev_hw_addr_list *from_list,
3546 int addr_len,
3547 unsigned char addr_type)
3548{
3549 int err;
3550 struct netdev_hw_addr *ha, *ha2;
3551 unsigned char type;
3552
3553 list_for_each_entry(ha, &from_list->list, list) {
3554 type = addr_type ? addr_type : ha->type;
3555 err = __hw_addr_add(to_list, ha->addr, addr_len, type);
3556 if (err)
3557 goto unroll;
3558 }
3559 return 0;
3560
3561unroll:
3562 list_for_each_entry(ha2, &from_list->list, list) {
3563 if (ha2 == ha)
3564 break;
3565 type = addr_type ? addr_type : ha2->type;
3566 __hw_addr_del(to_list, ha2->addr, addr_len, type);
3567 }
3568 return err;
3569}
3570
3571static void __hw_addr_del_multiple(struct netdev_hw_addr_list *to_list,
3572 struct netdev_hw_addr_list *from_list,
3573 int addr_len,
3574 unsigned char addr_type)
3575{
3576 struct netdev_hw_addr *ha;
3577 unsigned char type;
3578
3579 list_for_each_entry(ha, &from_list->list, list) {
3580 type = addr_type ? addr_type : ha->type;
3581 __hw_addr_del(to_list, ha->addr, addr_len, addr_type);
3582 }
3583}
3584
3585static int __hw_addr_sync(struct netdev_hw_addr_list *to_list,
3586 struct netdev_hw_addr_list *from_list,
3587 int addr_len)
3588{
3589 int err = 0;
3590 struct netdev_hw_addr *ha, *tmp;
3591
3592 list_for_each_entry_safe(ha, tmp, &from_list->list, list) {
3593 if (!ha->synced) {
3594 err = __hw_addr_add(to_list, ha->addr,
3595 addr_len, ha->type);
3596 if (err)
3597 break;
3598 ha->synced = true;
3599 ha->refcount++;
3600 } else if (ha->refcount == 1) {
3601 __hw_addr_del(to_list, ha->addr, addr_len, ha->type);
3602 __hw_addr_del(from_list, ha->addr, addr_len, ha->type);
3603 }
3604 }
3605 return err;
3606}
3607
3608static void __hw_addr_unsync(struct netdev_hw_addr_list *to_list,
3609 struct netdev_hw_addr_list *from_list,
3610 int addr_len)
3611{
3612 struct netdev_hw_addr *ha, *tmp;
3613
3614 list_for_each_entry_safe(ha, tmp, &from_list->list, list) {
3615 if (ha->synced) {
3616 __hw_addr_del(to_list, ha->addr,
3617 addr_len, ha->type);
3618 ha->synced = false;
3619 __hw_addr_del(from_list, ha->addr,
3620 addr_len, ha->type);
3621 }
3622 }
3623}
3624
3625static void __hw_addr_flush(struct netdev_hw_addr_list *list)
3626{
3627 struct netdev_hw_addr *ha, *tmp;
3628
3629 list_for_each_entry_safe(ha, tmp, &list->list, list) {
3630 list_del_rcu(&ha->list);
3631 call_rcu(&ha->rcu_head, ha_rcu_free);
3632 }
3633 list->count = 0;
3634}
3635
3636static void __hw_addr_init(struct netdev_hw_addr_list *list)
3637{
3638 INIT_LIST_HEAD(&list->list);
3639 list->count = 0;
3640}
3641
3642/* Device addresses handling functions */
3643
3644static void dev_addr_flush(struct net_device *dev)
3645{
3646 /* rtnl_mutex must be held here */
3647
3648 __hw_addr_flush(&dev->dev_addrs);
3649 dev->dev_addr = NULL;
3650}
3651
3652static int dev_addr_init(struct net_device *dev)
3653{
3654 unsigned char addr[MAX_ADDR_LEN];
3655 struct netdev_hw_addr *ha;
3656 int err;
3657
3658 /* rtnl_mutex must be held here */
3659
3660 __hw_addr_init(&dev->dev_addrs);
3661 memset(addr, 0, sizeof(addr));
3662 err = __hw_addr_add(&dev->dev_addrs, addr, sizeof(addr),
3663 NETDEV_HW_ADDR_T_LAN);
3664 if (!err) {
3665 /*
3666 * Get the first (previously created) address from the list
3667 * and set dev_addr pointer to this location.
3668 */
3669 ha = list_first_entry(&dev->dev_addrs.list,
3670 struct netdev_hw_addr, list);
3671 dev->dev_addr = ha->addr;
3672 }
3673 return err;
3674}
3675
3676/**
3677 * dev_addr_add - Add a device address
3678 * @dev: device
3679 * @addr: address to add
3680 * @addr_type: address type
3681 *
3682 * Add a device address to the device or increase the reference count if
3683 * it already exists.
3684 *
3685 * The caller must hold the rtnl_mutex.
3686 */
3687int dev_addr_add(struct net_device *dev, unsigned char *addr,
3688 unsigned char addr_type)
3689{
3690 int err;
3691
3692 ASSERT_RTNL();
3693
3694 err = __hw_addr_add(&dev->dev_addrs, addr, dev->addr_len, addr_type);
3695 if (!err)
3696 call_netdevice_notifiers(NETDEV_CHANGEADDR, dev);
3697 return err;
3698}
3699EXPORT_SYMBOL(dev_addr_add);
3700
3701/**
3702 * dev_addr_del - Release a device address.
3703 * @dev: device
3704 * @addr: address to delete
3705 * @addr_type: address type
3706 *
3707 * Release reference to a device address and remove it from the device
3708 * if the reference count drops to zero.
3709 *
3710 * The caller must hold the rtnl_mutex.
3711 */
3712int dev_addr_del(struct net_device *dev, unsigned char *addr,
3713 unsigned char addr_type)
3714{
3715 int err;
3716 struct netdev_hw_addr *ha;
3717
3718 ASSERT_RTNL();
3719
3720 /*
3721 * We can not remove the first address from the list because
3722 * dev->dev_addr points to that.
3723 */
3724 ha = list_first_entry(&dev->dev_addrs.list,
3725 struct netdev_hw_addr, list);
3726 if (ha->addr == dev->dev_addr && ha->refcount == 1)
3727 return -ENOENT;
3728
3729 err = __hw_addr_del(&dev->dev_addrs, addr, dev->addr_len,
3730 addr_type);
3731 if (!err)
3732 call_netdevice_notifiers(NETDEV_CHANGEADDR, dev);
3733 return err;
3734}
3735EXPORT_SYMBOL(dev_addr_del);
3736
3737/**
3738 * dev_addr_add_multiple - Add device addresses from another device
3739 * @to_dev: device to which addresses will be added
3740 * @from_dev: device from which addresses will be added
3741 * @addr_type: address type - 0 means type will be used from from_dev
3742 *
3743 * Add device addresses of the one device to another.
3744 **
3745 * The caller must hold the rtnl_mutex.
3746 */
3747int dev_addr_add_multiple(struct net_device *to_dev,
3748 struct net_device *from_dev,
3749 unsigned char addr_type)
3750{
3751 int err;
3752
3753 ASSERT_RTNL();
3754
3755 if (from_dev->addr_len != to_dev->addr_len)
3756 return -EINVAL;
3757 err = __hw_addr_add_multiple(&to_dev->dev_addrs, &from_dev->dev_addrs,
3758 to_dev->addr_len, addr_type);
3759 if (!err)
3760 call_netdevice_notifiers(NETDEV_CHANGEADDR, to_dev);
3761 return err;
3762}
3763EXPORT_SYMBOL(dev_addr_add_multiple);
3764
3765/**
3766 * dev_addr_del_multiple - Delete device addresses by another device
3767 * @to_dev: device where the addresses will be deleted
3768 * @from_dev: device by which addresses the addresses will be deleted
3769 * @addr_type: address type - 0 means type will used from from_dev
3770 *
3771 * Deletes addresses in to device by the list of addresses in from device.
3772 *
3773 * The caller must hold the rtnl_mutex.
3774 */
3775int dev_addr_del_multiple(struct net_device *to_dev,
3776 struct net_device *from_dev,
3777 unsigned char addr_type)
3778{
3779 ASSERT_RTNL();
3780
3781 if (from_dev->addr_len != to_dev->addr_len)
3782 return -EINVAL;
3783 __hw_addr_del_multiple(&to_dev->dev_addrs, &from_dev->dev_addrs,
3784 to_dev->addr_len, addr_type);
3785 call_netdevice_notifiers(NETDEV_CHANGEADDR, to_dev);
3786 return 0;
3787}
3788EXPORT_SYMBOL(dev_addr_del_multiple);
3789
3790/* multicast addresses handling functions */
3791
3447int __dev_addr_delete(struct dev_addr_list **list, int *count, 3792int __dev_addr_delete(struct dev_addr_list **list, int *count,
3448 void *addr, int alen, int glbl) 3793 void *addr, int alen, int glbl)
3449{ 3794{
@@ -3506,24 +3851,22 @@ int __dev_addr_add(struct dev_addr_list **list, int *count,
3506 * dev_unicast_delete - Release secondary unicast address. 3851 * dev_unicast_delete - Release secondary unicast address.
3507 * @dev: device 3852 * @dev: device
3508 * @addr: address to delete 3853 * @addr: address to delete
3509 * @alen: length of @addr
3510 * 3854 *
3511 * Release reference to a secondary unicast address and remove it 3855 * Release reference to a secondary unicast address and remove it
3512 * from the device if the reference count drops to zero. 3856 * from the device if the reference count drops to zero.
3513 * 3857 *
3514 * The caller must hold the rtnl_mutex. 3858 * The caller must hold the rtnl_mutex.
3515 */ 3859 */
3516int dev_unicast_delete(struct net_device *dev, void *addr, int alen) 3860int dev_unicast_delete(struct net_device *dev, void *addr)
3517{ 3861{
3518 int err; 3862 int err;
3519 3863
3520 ASSERT_RTNL(); 3864 ASSERT_RTNL();
3521 3865
3522 netif_addr_lock_bh(dev); 3866 err = __hw_addr_del(&dev->uc, addr, dev->addr_len,
3523 err = __dev_addr_delete(&dev->uc_list, &dev->uc_count, addr, alen, 0); 3867 NETDEV_HW_ADDR_T_UNICAST);
3524 if (!err) 3868 if (!err)
3525 __dev_set_rx_mode(dev); 3869 __dev_set_rx_mode(dev);
3526 netif_addr_unlock_bh(dev);
3527 return err; 3870 return err;
3528} 3871}
3529EXPORT_SYMBOL(dev_unicast_delete); 3872EXPORT_SYMBOL(dev_unicast_delete);
@@ -3532,24 +3875,22 @@ EXPORT_SYMBOL(dev_unicast_delete);
3532 * dev_unicast_add - add a secondary unicast address 3875 * dev_unicast_add - add a secondary unicast address
3533 * @dev: device 3876 * @dev: device
3534 * @addr: address to add 3877 * @addr: address to add
3535 * @alen: length of @addr
3536 * 3878 *
3537 * Add a secondary unicast address to the device or increase 3879 * Add a secondary unicast address to the device or increase
3538 * the reference count if it already exists. 3880 * the reference count if it already exists.
3539 * 3881 *
3540 * The caller must hold the rtnl_mutex. 3882 * The caller must hold the rtnl_mutex.
3541 */ 3883 */
3542int dev_unicast_add(struct net_device *dev, void *addr, int alen) 3884int dev_unicast_add(struct net_device *dev, void *addr)
3543{ 3885{
3544 int err; 3886 int err;
3545 3887
3546 ASSERT_RTNL(); 3888 ASSERT_RTNL();
3547 3889
3548 netif_addr_lock_bh(dev); 3890 err = __hw_addr_add(&dev->uc, addr, dev->addr_len,
3549 err = __dev_addr_add(&dev->uc_list, &dev->uc_count, addr, alen, 0); 3891 NETDEV_HW_ADDR_T_UNICAST);
3550 if (!err) 3892 if (!err)
3551 __dev_set_rx_mode(dev); 3893 __dev_set_rx_mode(dev);
3552 netif_addr_unlock_bh(dev);
3553 return err; 3894 return err;
3554} 3895}
3555EXPORT_SYMBOL(dev_unicast_add); 3896EXPORT_SYMBOL(dev_unicast_add);
@@ -3606,8 +3947,7 @@ void __dev_addr_unsync(struct dev_addr_list **to, int *to_count,
3606 * @from: source device 3947 * @from: source device
3607 * 3948 *
3608 * Add newly added addresses to the destination device and release 3949 * Add newly added addresses to the destination device and release
3609 * addresses that have no users left. The source device must be 3950 * addresses that have no users left.
3610 * locked by netif_tx_lock_bh.
3611 * 3951 *
3612 * This function is intended to be called from the dev->set_rx_mode 3952 * This function is intended to be called from the dev->set_rx_mode
3613 * function of layered software devices. 3953 * function of layered software devices.
@@ -3616,12 +3956,14 @@ int dev_unicast_sync(struct net_device *to, struct net_device *from)
3616{ 3956{
3617 int err = 0; 3957 int err = 0;
3618 3958
3619 netif_addr_lock_bh(to); 3959 ASSERT_RTNL();
3620 err = __dev_addr_sync(&to->uc_list, &to->uc_count, 3960
3621 &from->uc_list, &from->uc_count); 3961 if (to->addr_len != from->addr_len)
3962 return -EINVAL;
3963
3964 err = __hw_addr_sync(&to->uc, &from->uc, to->addr_len);
3622 if (!err) 3965 if (!err)
3623 __dev_set_rx_mode(to); 3966 __dev_set_rx_mode(to);
3624 netif_addr_unlock_bh(to);
3625 return err; 3967 return err;
3626} 3968}
3627EXPORT_SYMBOL(dev_unicast_sync); 3969EXPORT_SYMBOL(dev_unicast_sync);
@@ -3637,18 +3979,31 @@ EXPORT_SYMBOL(dev_unicast_sync);
3637 */ 3979 */
3638void dev_unicast_unsync(struct net_device *to, struct net_device *from) 3980void dev_unicast_unsync(struct net_device *to, struct net_device *from)
3639{ 3981{
3640 netif_addr_lock_bh(from); 3982 ASSERT_RTNL();
3641 netif_addr_lock(to);
3642 3983
3643 __dev_addr_unsync(&to->uc_list, &to->uc_count, 3984 if (to->addr_len != from->addr_len)
3644 &from->uc_list, &from->uc_count); 3985 return;
3645 __dev_set_rx_mode(to);
3646 3986
3647 netif_addr_unlock(to); 3987 __hw_addr_unsync(&to->uc, &from->uc, to->addr_len);
3648 netif_addr_unlock_bh(from); 3988 __dev_set_rx_mode(to);
3649} 3989}
3650EXPORT_SYMBOL(dev_unicast_unsync); 3990EXPORT_SYMBOL(dev_unicast_unsync);
3651 3991
3992static void dev_unicast_flush(struct net_device *dev)
3993{
3994 /* rtnl_mutex must be held here */
3995
3996 __hw_addr_flush(&dev->uc);
3997}
3998
3999static void dev_unicast_init(struct net_device *dev)
4000{
4001 /* rtnl_mutex must be held here */
4002
4003 __hw_addr_init(&dev->uc);
4004}
4005
4006
3652static void __dev_addr_discard(struct dev_addr_list **list) 4007static void __dev_addr_discard(struct dev_addr_list **list)
3653{ 4008{
3654 struct dev_addr_list *tmp; 4009 struct dev_addr_list *tmp;
@@ -3667,9 +4022,6 @@ static void dev_addr_discard(struct net_device *dev)
3667{ 4022{
3668 netif_addr_lock_bh(dev); 4023 netif_addr_lock_bh(dev);
3669 4024
3670 __dev_addr_discard(&dev->uc_list);
3671 dev->uc_count = 0;
3672
3673 __dev_addr_discard(&dev->mc_list); 4025 __dev_addr_discard(&dev->mc_list);
3674 dev->mc_count = 0; 4026 dev->mc_count = 0;
3675 4027
@@ -3853,7 +4205,7 @@ static int dev_ifsioc_locked(struct net *net, struct ifreq *ifr, unsigned int cm
3853 4205
3854 switch (cmd) { 4206 switch (cmd) {
3855 case SIOCGIFFLAGS: /* Get interface flags */ 4207 case SIOCGIFFLAGS: /* Get interface flags */
3856 ifr->ifr_flags = dev_get_flags(dev); 4208 ifr->ifr_flags = (short) dev_get_flags(dev);
3857 return 0; 4209 return 0;
3858 4210
3859 case SIOCGIFMETRIC: /* Get the metric on the interface 4211 case SIOCGIFMETRIC: /* Get the metric on the interface
@@ -4262,6 +4614,7 @@ static void rollback_registered(struct net_device *dev)
4262 /* 4614 /*
4263 * Flush the unicast and multicast chains 4615 * Flush the unicast and multicast chains
4264 */ 4616 */
4617 dev_unicast_flush(dev);
4265 dev_addr_discard(dev); 4618 dev_addr_discard(dev);
4266 4619
4267 if (dev->netdev_ops->ndo_uninit) 4620 if (dev->netdev_ops->ndo_uninit)
@@ -4333,39 +4686,6 @@ unsigned long netdev_fix_features(unsigned long features, const char *name)
4333} 4686}
4334EXPORT_SYMBOL(netdev_fix_features); 4687EXPORT_SYMBOL(netdev_fix_features);
4335 4688
4336/* Some devices need to (re-)set their netdev_ops inside
4337 * ->init() or similar. If that happens, we have to setup
4338 * the compat pointers again.
4339 */
4340void netdev_resync_ops(struct net_device *dev)
4341{
4342#ifdef CONFIG_COMPAT_NET_DEV_OPS
4343 const struct net_device_ops *ops = dev->netdev_ops;
4344
4345 dev->init = ops->ndo_init;
4346 dev->uninit = ops->ndo_uninit;
4347 dev->open = ops->ndo_open;
4348 dev->change_rx_flags = ops->ndo_change_rx_flags;
4349 dev->set_rx_mode = ops->ndo_set_rx_mode;
4350 dev->set_multicast_list = ops->ndo_set_multicast_list;
4351 dev->set_mac_address = ops->ndo_set_mac_address;
4352 dev->validate_addr = ops->ndo_validate_addr;
4353 dev->do_ioctl = ops->ndo_do_ioctl;
4354 dev->set_config = ops->ndo_set_config;
4355 dev->change_mtu = ops->ndo_change_mtu;
4356 dev->neigh_setup = ops->ndo_neigh_setup;
4357 dev->tx_timeout = ops->ndo_tx_timeout;
4358 dev->get_stats = ops->ndo_get_stats;
4359 dev->vlan_rx_register = ops->ndo_vlan_rx_register;
4360 dev->vlan_rx_add_vid = ops->ndo_vlan_rx_add_vid;
4361 dev->vlan_rx_kill_vid = ops->ndo_vlan_rx_kill_vid;
4362#ifdef CONFIG_NET_POLL_CONTROLLER
4363 dev->poll_controller = ops->ndo_poll_controller;
4364#endif
4365#endif
4366}
4367EXPORT_SYMBOL(netdev_resync_ops);
4368
4369/** 4689/**
4370 * register_netdevice - register a network device 4690 * register_netdevice - register a network device
4371 * @dev: device to register 4691 * @dev: device to register
@@ -4405,23 +4725,6 @@ int register_netdevice(struct net_device *dev)
4405 4725
4406 dev->iflink = -1; 4726 dev->iflink = -1;
4407 4727
4408#ifdef CONFIG_COMPAT_NET_DEV_OPS
4409 /* Netdevice_ops API compatibility support.
4410 * This is temporary until all network devices are converted.
4411 */
4412 if (dev->netdev_ops) {
4413 netdev_resync_ops(dev);
4414 } else {
4415 char drivername[64];
4416 pr_info("%s (%s): not using net_device_ops yet\n",
4417 dev->name, netdev_drivername(dev, drivername, 64));
4418
4419 /* This works only because net_device_ops and the
4420 compatibility structure are the same. */
4421 dev->netdev_ops = (void *) &(dev->init);
4422 }
4423#endif
4424
4425 /* Init, if this function is available */ 4728 /* Init, if this function is available */
4426 if (dev->netdev_ops->ndo_init) { 4729 if (dev->netdev_ops->ndo_init) {
4427 ret = dev->netdev_ops->ndo_init(dev); 4730 ret = dev->netdev_ops->ndo_init(dev);
@@ -4707,13 +5010,30 @@ void netdev_run_todo(void)
4707 * the internal statistics structure is used. 5010 * the internal statistics structure is used.
4708 */ 5011 */
4709const struct net_device_stats *dev_get_stats(struct net_device *dev) 5012const struct net_device_stats *dev_get_stats(struct net_device *dev)
4710 { 5013{
4711 const struct net_device_ops *ops = dev->netdev_ops; 5014 const struct net_device_ops *ops = dev->netdev_ops;
4712 5015
4713 if (ops->ndo_get_stats) 5016 if (ops->ndo_get_stats)
4714 return ops->ndo_get_stats(dev); 5017 return ops->ndo_get_stats(dev);
4715 else 5018 else {
4716 return &dev->stats; 5019 unsigned long tx_bytes = 0, tx_packets = 0, tx_dropped = 0;
5020 struct net_device_stats *stats = &dev->stats;
5021 unsigned int i;
5022 struct netdev_queue *txq;
5023
5024 for (i = 0; i < dev->num_tx_queues; i++) {
5025 txq = netdev_get_tx_queue(dev, i);
5026 tx_bytes += txq->tx_bytes;
5027 tx_packets += txq->tx_packets;
5028 tx_dropped += txq->tx_dropped;
5029 }
5030 if (tx_bytes || tx_packets || tx_dropped) {
5031 stats->tx_bytes = tx_bytes;
5032 stats->tx_packets = tx_packets;
5033 stats->tx_dropped = tx_dropped;
5034 }
5035 return stats;
5036 }
4717} 5037}
4718EXPORT_SYMBOL(dev_get_stats); 5038EXPORT_SYMBOL(dev_get_stats);
4719 5039
@@ -4748,18 +5068,18 @@ struct net_device *alloc_netdev_mq(int sizeof_priv, const char *name,
4748 struct netdev_queue *tx; 5068 struct netdev_queue *tx;
4749 struct net_device *dev; 5069 struct net_device *dev;
4750 size_t alloc_size; 5070 size_t alloc_size;
4751 void *p; 5071 struct net_device *p;
4752 5072
4753 BUG_ON(strlen(name) >= sizeof(dev->name)); 5073 BUG_ON(strlen(name) >= sizeof(dev->name));
4754 5074
4755 alloc_size = sizeof(struct net_device); 5075 alloc_size = sizeof(struct net_device);
4756 if (sizeof_priv) { 5076 if (sizeof_priv) {
4757 /* ensure 32-byte alignment of private area */ 5077 /* ensure 32-byte alignment of private area */
4758 alloc_size = (alloc_size + NETDEV_ALIGN_CONST) & ~NETDEV_ALIGN_CONST; 5078 alloc_size = ALIGN(alloc_size, NETDEV_ALIGN);
4759 alloc_size += sizeof_priv; 5079 alloc_size += sizeof_priv;
4760 } 5080 }
4761 /* ensure 32-byte alignment of whole construct */ 5081 /* ensure 32-byte alignment of whole construct */
4762 alloc_size += NETDEV_ALIGN_CONST; 5082 alloc_size += NETDEV_ALIGN - 1;
4763 5083
4764 p = kzalloc(alloc_size, GFP_KERNEL); 5084 p = kzalloc(alloc_size, GFP_KERNEL);
4765 if (!p) { 5085 if (!p) {
@@ -4771,13 +5091,17 @@ struct net_device *alloc_netdev_mq(int sizeof_priv, const char *name,
4771 if (!tx) { 5091 if (!tx) {
4772 printk(KERN_ERR "alloc_netdev: Unable to allocate " 5092 printk(KERN_ERR "alloc_netdev: Unable to allocate "
4773 "tx qdiscs.\n"); 5093 "tx qdiscs.\n");
4774 kfree(p); 5094 goto free_p;
4775 return NULL;
4776 } 5095 }
4777 5096
4778 dev = (struct net_device *) 5097 dev = PTR_ALIGN(p, NETDEV_ALIGN);
4779 (((long)p + NETDEV_ALIGN_CONST) & ~NETDEV_ALIGN_CONST);
4780 dev->padded = (char *)dev - (char *)p; 5098 dev->padded = (char *)dev - (char *)p;
5099
5100 if (dev_addr_init(dev))
5101 goto free_tx;
5102
5103 dev_unicast_init(dev);
5104
4781 dev_net_set(dev, &init_net); 5105 dev_net_set(dev, &init_net);
4782 5106
4783 dev->_tx = tx; 5107 dev->_tx = tx;
@@ -4789,9 +5113,17 @@ struct net_device *alloc_netdev_mq(int sizeof_priv, const char *name,
4789 netdev_init_queues(dev); 5113 netdev_init_queues(dev);
4790 5114
4791 INIT_LIST_HEAD(&dev->napi_list); 5115 INIT_LIST_HEAD(&dev->napi_list);
5116 dev->priv_flags = IFF_XMIT_DST_RELEASE;
4792 setup(dev); 5117 setup(dev);
4793 strcpy(dev->name, name); 5118 strcpy(dev->name, name);
4794 return dev; 5119 return dev;
5120
5121free_tx:
5122 kfree(tx);
5123
5124free_p:
5125 kfree(p);
5126 return NULL;
4795} 5127}
4796EXPORT_SYMBOL(alloc_netdev_mq); 5128EXPORT_SYMBOL(alloc_netdev_mq);
4797 5129
@@ -4811,6 +5143,9 @@ void free_netdev(struct net_device *dev)
4811 5143
4812 kfree(dev->_tx); 5144 kfree(dev->_tx);
4813 5145
5146 /* Flush device addresses */
5147 dev_addr_flush(dev);
5148
4814 list_for_each_entry_safe(p, n, &dev->napi_list, dev_list) 5149 list_for_each_entry_safe(p, n, &dev->napi_list, dev_list)
4815 netif_napi_del(p); 5150 netif_napi_del(p);
4816 5151
@@ -4970,6 +5305,7 @@ int dev_change_net_namespace(struct net_device *dev, struct net *net, const char
4970 /* 5305 /*
4971 * Flush the unicast and multicast chains 5306 * Flush the unicast and multicast chains
4972 */ 5307 */
5308 dev_unicast_flush(dev);
4973 dev_addr_discard(dev); 5309 dev_addr_discard(dev);
4974 5310
4975 netdev_unregister_kobject(dev); 5311 netdev_unregister_kobject(dev);
@@ -5325,12 +5661,6 @@ EXPORT_SYMBOL(net_enable_timestamp);
5325EXPORT_SYMBOL(net_disable_timestamp); 5661EXPORT_SYMBOL(net_disable_timestamp);
5326EXPORT_SYMBOL(dev_get_flags); 5662EXPORT_SYMBOL(dev_get_flags);
5327 5663
5328#if defined(CONFIG_BRIDGE) || defined(CONFIG_BRIDGE_MODULE)
5329EXPORT_SYMBOL(br_handle_frame_hook);
5330EXPORT_SYMBOL(br_fdb_get_hook);
5331EXPORT_SYMBOL(br_fdb_put_hook);
5332#endif
5333
5334EXPORT_SYMBOL(dev_load); 5664EXPORT_SYMBOL(dev_load);
5335 5665
5336EXPORT_PER_CPU_SYMBOL(softnet_data); 5666EXPORT_PER_CPU_SYMBOL(softnet_data);