aboutsummaryrefslogtreecommitdiffstats
path: root/net/core/dev.c
diff options
context:
space:
mode:
Diffstat (limited to 'net/core/dev.c')
-rw-r--r--net/core/dev.c688
1 files changed, 513 insertions, 175 deletions
diff --git a/net/core/dev.c b/net/core/dev.c
index e2e9e4af3ace..576a61574a93 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -126,6 +126,7 @@
126#include <linux/in.h> 126#include <linux/in.h>
127#include <linux/jhash.h> 127#include <linux/jhash.h>
128#include <linux/random.h> 128#include <linux/random.h>
129#include <trace/events/napi.h>
129 130
130#include "net-sysfs.h" 131#include "net-sysfs.h"
131 132
@@ -268,7 +269,8 @@ static const unsigned short netdev_lock_type[] =
268 ARPHRD_IRDA, ARPHRD_FCPP, ARPHRD_FCAL, ARPHRD_FCPL, 269 ARPHRD_IRDA, ARPHRD_FCPP, ARPHRD_FCAL, ARPHRD_FCPL,
269 ARPHRD_FCFABRIC, ARPHRD_IEEE802_TR, ARPHRD_IEEE80211, 270 ARPHRD_FCFABRIC, ARPHRD_IEEE802_TR, ARPHRD_IEEE80211,
270 ARPHRD_IEEE80211_PRISM, ARPHRD_IEEE80211_RADIOTAP, ARPHRD_PHONET, 271 ARPHRD_IEEE80211_PRISM, ARPHRD_IEEE80211_RADIOTAP, ARPHRD_PHONET,
271 ARPHRD_PHONET_PIPE, ARPHRD_VOID, ARPHRD_NONE}; 272 ARPHRD_PHONET_PIPE, ARPHRD_IEEE802154, ARPHRD_IEEE802154_PHY,
273 ARPHRD_VOID, ARPHRD_NONE};
272 274
273static const char *netdev_lock_name[] = 275static const char *netdev_lock_name[] =
274 {"_xmit_NETROM", "_xmit_ETHER", "_xmit_EETHER", "_xmit_AX25", 276 {"_xmit_NETROM", "_xmit_ETHER", "_xmit_EETHER", "_xmit_AX25",
@@ -285,7 +287,8 @@ static const char *netdev_lock_name[] =
285 "_xmit_IRDA", "_xmit_FCPP", "_xmit_FCAL", "_xmit_FCPL", 287 "_xmit_IRDA", "_xmit_FCPP", "_xmit_FCAL", "_xmit_FCPL",
286 "_xmit_FCFABRIC", "_xmit_IEEE802_TR", "_xmit_IEEE80211", 288 "_xmit_FCFABRIC", "_xmit_IEEE802_TR", "_xmit_IEEE80211",
287 "_xmit_IEEE80211_PRISM", "_xmit_IEEE80211_RADIOTAP", "_xmit_PHONET", 289 "_xmit_IEEE80211_PRISM", "_xmit_IEEE80211_RADIOTAP", "_xmit_PHONET",
288 "_xmit_PHONET_PIPE", "_xmit_VOID", "_xmit_NONE"}; 290 "_xmit_PHONET_PIPE", "_xmit_IEEE802154", "_xmit_IEEE802154_PHY",
291 "_xmit_VOID", "_xmit_NONE"};
289 292
290static struct lock_class_key netdev_xmit_lock_key[ARRAY_SIZE(netdev_lock_type)]; 293static struct lock_class_key netdev_xmit_lock_key[ARRAY_SIZE(netdev_lock_type)];
291static struct lock_class_key netdev_addr_lock_key[ARRAY_SIZE(netdev_lock_type)]; 294static struct lock_class_key netdev_addr_lock_key[ARRAY_SIZE(netdev_lock_type)];
@@ -1047,7 +1050,7 @@ void dev_load(struct net *net, const char *name)
1047int dev_open(struct net_device *dev) 1050int dev_open(struct net_device *dev)
1048{ 1051{
1049 const struct net_device_ops *ops = dev->netdev_ops; 1052 const struct net_device_ops *ops = dev->netdev_ops;
1050 int ret = 0; 1053 int ret;
1051 1054
1052 ASSERT_RTNL(); 1055 ASSERT_RTNL();
1053 1056
@@ -1064,6 +1067,11 @@ int dev_open(struct net_device *dev)
1064 if (!netif_device_present(dev)) 1067 if (!netif_device_present(dev))
1065 return -ENODEV; 1068 return -ENODEV;
1066 1069
1070 ret = call_netdevice_notifiers(NETDEV_PRE_UP, dev);
1071 ret = notifier_to_errno(ret);
1072 if (ret)
1073 return ret;
1074
1067 /* 1075 /*
1068 * Call device private open method 1076 * Call device private open method
1069 */ 1077 */
@@ -1688,7 +1696,16 @@ int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev,
1688 goto gso; 1696 goto gso;
1689 } 1697 }
1690 1698
1699 /*
1700 * If device doesnt need skb->dst, release it right now while
1701 * its hot in this cpu cache
1702 */
1703 if (dev->priv_flags & IFF_XMIT_DST_RELEASE)
1704 skb_dst_drop(skb);
1705
1691 rc = ops->ndo_start_xmit(skb, dev); 1706 rc = ops->ndo_start_xmit(skb, dev);
1707 if (rc == 0)
1708 txq_trans_update(txq);
1692 /* 1709 /*
1693 * TODO: if skb_orphan() was called by 1710 * TODO: if skb_orphan() was called by
1694 * dev->hard_start_xmit() (for example, the unmodified 1711 * dev->hard_start_xmit() (for example, the unmodified
@@ -1718,6 +1735,7 @@ gso:
1718 skb->next = nskb; 1735 skb->next = nskb;
1719 return rc; 1736 return rc;
1720 } 1737 }
1738 txq_trans_update(txq);
1721 if (unlikely(netif_tx_queue_stopped(txq) && skb->next)) 1739 if (unlikely(netif_tx_queue_stopped(txq) && skb->next))
1722 return NETDEV_TX_BUSY; 1740 return NETDEV_TX_BUSY;
1723 } while (skb->next); 1741 } while (skb->next);
@@ -1735,8 +1753,12 @@ u16 skb_tx_hash(const struct net_device *dev, const struct sk_buff *skb)
1735{ 1753{
1736 u32 hash; 1754 u32 hash;
1737 1755
1738 if (skb_rx_queue_recorded(skb)) 1756 if (skb_rx_queue_recorded(skb)) {
1739 return skb_get_rx_queue(skb) % dev->real_num_tx_queues; 1757 hash = skb_get_rx_queue(skb);
1758 while (unlikely (hash >= dev->real_num_tx_queues))
1759 hash -= dev->real_num_tx_queues;
1760 return hash;
1761 }
1740 1762
1741 if (skb->sk && skb->sk->sk_hash) 1763 if (skb->sk && skb->sk->sk_hash)
1742 hash = skb->sk->sk_hash; 1764 hash = skb->sk->sk_hash;
@@ -1800,7 +1822,7 @@ int dev_queue_xmit(struct sk_buff *skb)
1800 if (netif_needs_gso(dev, skb)) 1822 if (netif_needs_gso(dev, skb))
1801 goto gso; 1823 goto gso;
1802 1824
1803 if (skb_shinfo(skb)->frag_list && 1825 if (skb_has_frags(skb) &&
1804 !(dev->features & NETIF_F_FRAGLIST) && 1826 !(dev->features & NETIF_F_FRAGLIST) &&
1805 __skb_linearize(skb)) 1827 __skb_linearize(skb))
1806 goto out_kfree_skb; 1828 goto out_kfree_skb;
@@ -2049,11 +2071,13 @@ static inline int deliver_skb(struct sk_buff *skb,
2049} 2071}
2050 2072
2051#if defined(CONFIG_BRIDGE) || defined (CONFIG_BRIDGE_MODULE) 2073#if defined(CONFIG_BRIDGE) || defined (CONFIG_BRIDGE_MODULE)
2052/* These hooks defined here for ATM */ 2074
2053struct net_bridge; 2075#if defined(CONFIG_ATM_LANE) || defined(CONFIG_ATM_LANE_MODULE)
2054struct net_bridge_fdb_entry *(*br_fdb_get_hook)(struct net_bridge *br, 2076/* This hook is defined here for ATM LANE */
2055 unsigned char *addr); 2077int (*br_fdb_test_addr_hook)(struct net_device *dev,
2056void (*br_fdb_put_hook)(struct net_bridge_fdb_entry *ent) __read_mostly; 2078 unsigned char *addr) __read_mostly;
2079EXPORT_SYMBOL(br_fdb_test_addr_hook);
2080#endif
2057 2081
2058/* 2082/*
2059 * If bridge module is loaded call bridging hook. 2083 * If bridge module is loaded call bridging hook.
@@ -2061,6 +2085,8 @@ void (*br_fdb_put_hook)(struct net_bridge_fdb_entry *ent) __read_mostly;
2061 */ 2085 */
2062struct sk_buff *(*br_handle_frame_hook)(struct net_bridge_port *p, 2086struct sk_buff *(*br_handle_frame_hook)(struct net_bridge_port *p,
2063 struct sk_buff *skb) __read_mostly; 2087 struct sk_buff *skb) __read_mostly;
2088EXPORT_SYMBOL(br_handle_frame_hook);
2089
2064static inline struct sk_buff *handle_bridge(struct sk_buff *skb, 2090static inline struct sk_buff *handle_bridge(struct sk_buff *skb,
2065 struct packet_type **pt_prev, int *ret, 2091 struct packet_type **pt_prev, int *ret,
2066 struct net_device *orig_dev) 2092 struct net_device *orig_dev)
@@ -2374,26 +2400,6 @@ void napi_gro_flush(struct napi_struct *napi)
2374} 2400}
2375EXPORT_SYMBOL(napi_gro_flush); 2401EXPORT_SYMBOL(napi_gro_flush);
2376 2402
2377void *skb_gro_header(struct sk_buff *skb, unsigned int hlen)
2378{
2379 unsigned int offset = skb_gro_offset(skb);
2380
2381 hlen += offset;
2382 if (hlen <= skb_headlen(skb))
2383 return skb->data + offset;
2384
2385 if (unlikely(!skb_shinfo(skb)->nr_frags ||
2386 skb_shinfo(skb)->frags[0].size <=
2387 hlen - skb_headlen(skb) ||
2388 PageHighMem(skb_shinfo(skb)->frags[0].page)))
2389 return pskb_may_pull(skb, hlen) ? skb->data + offset : NULL;
2390
2391 return page_address(skb_shinfo(skb)->frags[0].page) +
2392 skb_shinfo(skb)->frags[0].page_offset +
2393 offset - skb_headlen(skb);
2394}
2395EXPORT_SYMBOL(skb_gro_header);
2396
2397int dev_gro_receive(struct napi_struct *napi, struct sk_buff *skb) 2403int dev_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
2398{ 2404{
2399 struct sk_buff **pp = NULL; 2405 struct sk_buff **pp = NULL;
@@ -2407,7 +2413,7 @@ int dev_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
2407 if (!(skb->dev->features & NETIF_F_GRO)) 2413 if (!(skb->dev->features & NETIF_F_GRO))
2408 goto normal; 2414 goto normal;
2409 2415
2410 if (skb_is_gso(skb) || skb_shinfo(skb)->frag_list) 2416 if (skb_is_gso(skb) || skb_has_frags(skb))
2411 goto normal; 2417 goto normal;
2412 2418
2413 rcu_read_lock(); 2419 rcu_read_lock();
@@ -2456,10 +2462,25 @@ int dev_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
2456 ret = GRO_HELD; 2462 ret = GRO_HELD;
2457 2463
2458pull: 2464pull:
2459 if (unlikely(!pskb_may_pull(skb, skb_gro_offset(skb)))) { 2465 if (skb_headlen(skb) < skb_gro_offset(skb)) {
2460 if (napi->gro_list == skb) 2466 int grow = skb_gro_offset(skb) - skb_headlen(skb);
2461 napi->gro_list = skb->next; 2467
2462 ret = GRO_DROP; 2468 BUG_ON(skb->end - skb->tail < grow);
2469
2470 memcpy(skb_tail_pointer(skb), NAPI_GRO_CB(skb)->frag0, grow);
2471
2472 skb->tail += grow;
2473 skb->data_len -= grow;
2474
2475 skb_shinfo(skb)->frags[0].page_offset += grow;
2476 skb_shinfo(skb)->frags[0].size -= grow;
2477
2478 if (unlikely(!skb_shinfo(skb)->frags[0].size)) {
2479 put_page(skb_shinfo(skb)->frags[0].page);
2480 memmove(skb_shinfo(skb)->frags,
2481 skb_shinfo(skb)->frags + 1,
2482 --skb_shinfo(skb)->nr_frags);
2483 }
2463 } 2484 }
2464 2485
2465ok: 2486ok:
@@ -2509,6 +2530,22 @@ int napi_skb_finish(int ret, struct sk_buff *skb)
2509} 2530}
2510EXPORT_SYMBOL(napi_skb_finish); 2531EXPORT_SYMBOL(napi_skb_finish);
2511 2532
2533void skb_gro_reset_offset(struct sk_buff *skb)
2534{
2535 NAPI_GRO_CB(skb)->data_offset = 0;
2536 NAPI_GRO_CB(skb)->frag0 = NULL;
2537 NAPI_GRO_CB(skb)->frag0_len = 0;
2538
2539 if (skb->mac_header == skb->tail &&
2540 !PageHighMem(skb_shinfo(skb)->frags[0].page)) {
2541 NAPI_GRO_CB(skb)->frag0 =
2542 page_address(skb_shinfo(skb)->frags[0].page) +
2543 skb_shinfo(skb)->frags[0].page_offset;
2544 NAPI_GRO_CB(skb)->frag0_len = skb_shinfo(skb)->frags[0].size;
2545 }
2546}
2547EXPORT_SYMBOL(skb_gro_reset_offset);
2548
2512int napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb) 2549int napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
2513{ 2550{
2514 skb_gro_reset_offset(skb); 2551 skb_gro_reset_offset(skb);
@@ -2526,16 +2563,10 @@ void napi_reuse_skb(struct napi_struct *napi, struct sk_buff *skb)
2526} 2563}
2527EXPORT_SYMBOL(napi_reuse_skb); 2564EXPORT_SYMBOL(napi_reuse_skb);
2528 2565
2529struct sk_buff *napi_fraginfo_skb(struct napi_struct *napi, 2566struct sk_buff *napi_get_frags(struct napi_struct *napi)
2530 struct napi_gro_fraginfo *info)
2531{ 2567{
2532 struct net_device *dev = napi->dev; 2568 struct net_device *dev = napi->dev;
2533 struct sk_buff *skb = napi->skb; 2569 struct sk_buff *skb = napi->skb;
2534 struct ethhdr *eth;
2535 skb_frag_t *frag;
2536 int i;
2537
2538 napi->skb = NULL;
2539 2570
2540 if (!skb) { 2571 if (!skb) {
2541 skb = netdev_alloc_skb(dev, GRO_MAX_HEAD + NET_IP_ALIGN); 2572 skb = netdev_alloc_skb(dev, GRO_MAX_HEAD + NET_IP_ALIGN);
@@ -2543,47 +2574,14 @@ struct sk_buff *napi_fraginfo_skb(struct napi_struct *napi,
2543 goto out; 2574 goto out;
2544 2575
2545 skb_reserve(skb, NET_IP_ALIGN); 2576 skb_reserve(skb, NET_IP_ALIGN);
2546 }
2547 2577
2548 BUG_ON(info->nr_frags > MAX_SKB_FRAGS); 2578 napi->skb = skb;
2549 frag = info->frags;
2550
2551 for (i = 0; i < info->nr_frags; i++) {
2552 skb_fill_page_desc(skb, i, frag->page, frag->page_offset,
2553 frag->size);
2554 frag++;
2555 }
2556 skb_shinfo(skb)->nr_frags = info->nr_frags;
2557
2558 skb->data_len = info->len;
2559 skb->len += info->len;
2560 skb->truesize += info->len;
2561
2562 skb_reset_mac_header(skb);
2563 skb_gro_reset_offset(skb);
2564
2565 eth = skb_gro_header(skb, sizeof(*eth));
2566 if (!eth) {
2567 napi_reuse_skb(napi, skb);
2568 skb = NULL;
2569 goto out;
2570 } 2579 }
2571 2580
2572 skb_gro_pull(skb, sizeof(*eth));
2573
2574 /*
2575 * This works because the only protocols we care about don't require
2576 * special handling. We'll fix it up properly at the end.
2577 */
2578 skb->protocol = eth->h_proto;
2579
2580 skb->ip_summed = info->ip_summed;
2581 skb->csum = info->csum;
2582
2583out: 2581out:
2584 return skb; 2582 return skb;
2585} 2583}
2586EXPORT_SYMBOL(napi_fraginfo_skb); 2584EXPORT_SYMBOL(napi_get_frags);
2587 2585
2588int napi_frags_finish(struct napi_struct *napi, struct sk_buff *skb, int ret) 2586int napi_frags_finish(struct napi_struct *napi, struct sk_buff *skb, int ret)
2589{ 2587{
@@ -2613,9 +2611,46 @@ int napi_frags_finish(struct napi_struct *napi, struct sk_buff *skb, int ret)
2613} 2611}
2614EXPORT_SYMBOL(napi_frags_finish); 2612EXPORT_SYMBOL(napi_frags_finish);
2615 2613
2616int napi_gro_frags(struct napi_struct *napi, struct napi_gro_fraginfo *info) 2614struct sk_buff *napi_frags_skb(struct napi_struct *napi)
2617{ 2615{
2618 struct sk_buff *skb = napi_fraginfo_skb(napi, info); 2616 struct sk_buff *skb = napi->skb;
2617 struct ethhdr *eth;
2618 unsigned int hlen;
2619 unsigned int off;
2620
2621 napi->skb = NULL;
2622
2623 skb_reset_mac_header(skb);
2624 skb_gro_reset_offset(skb);
2625
2626 off = skb_gro_offset(skb);
2627 hlen = off + sizeof(*eth);
2628 eth = skb_gro_header_fast(skb, off);
2629 if (skb_gro_header_hard(skb, hlen)) {
2630 eth = skb_gro_header_slow(skb, hlen, off);
2631 if (unlikely(!eth)) {
2632 napi_reuse_skb(napi, skb);
2633 skb = NULL;
2634 goto out;
2635 }
2636 }
2637
2638 skb_gro_pull(skb, sizeof(*eth));
2639
2640 /*
2641 * This works because the only protocols we care about don't require
2642 * special handling. We'll fix it up properly at the end.
2643 */
2644 skb->protocol = eth->h_proto;
2645
2646out:
2647 return skb;
2648}
2649EXPORT_SYMBOL(napi_frags_skb);
2650
2651int napi_gro_frags(struct napi_struct *napi)
2652{
2653 struct sk_buff *skb = napi_frags_skb(napi);
2619 2654
2620 if (!skb) 2655 if (!skb)
2621 return NET_RX_DROP; 2656 return NET_RX_DROP;
@@ -2719,7 +2754,7 @@ void netif_napi_del(struct napi_struct *napi)
2719 struct sk_buff *skb, *next; 2754 struct sk_buff *skb, *next;
2720 2755
2721 list_del_init(&napi->dev_list); 2756 list_del_init(&napi->dev_list);
2722 kfree_skb(napi->skb); 2757 napi_free_frags(napi);
2723 2758
2724 for (skb = napi->gro_list; skb; skb = next) { 2759 for (skb = napi->gro_list; skb; skb = next) {
2725 next = skb->next; 2760 next = skb->next;
@@ -2773,8 +2808,10 @@ static void net_rx_action(struct softirq_action *h)
2773 * accidently calling ->poll() when NAPI is not scheduled. 2808 * accidently calling ->poll() when NAPI is not scheduled.
2774 */ 2809 */
2775 work = 0; 2810 work = 0;
2776 if (test_bit(NAPI_STATE_SCHED, &n->state)) 2811 if (test_bit(NAPI_STATE_SCHED, &n->state)) {
2777 work = n->poll(n, weight); 2812 work = n->poll(n, weight);
2813 trace_napi_poll(n);
2814 }
2778 2815
2779 WARN_ON_ONCE(work > weight); 2816 WARN_ON_ONCE(work > weight);
2780 2817
@@ -3444,6 +3481,319 @@ void dev_set_rx_mode(struct net_device *dev)
3444 netif_addr_unlock_bh(dev); 3481 netif_addr_unlock_bh(dev);
3445} 3482}
3446 3483
3484/* hw addresses list handling functions */
3485
3486static int __hw_addr_add(struct list_head *list, int *delta,
3487 unsigned char *addr, int addr_len,
3488 unsigned char addr_type)
3489{
3490 struct netdev_hw_addr *ha;
3491 int alloc_size;
3492
3493 if (addr_len > MAX_ADDR_LEN)
3494 return -EINVAL;
3495
3496 list_for_each_entry(ha, list, list) {
3497 if (!memcmp(ha->addr, addr, addr_len) &&
3498 ha->type == addr_type) {
3499 ha->refcount++;
3500 return 0;
3501 }
3502 }
3503
3504
3505 alloc_size = sizeof(*ha);
3506 if (alloc_size < L1_CACHE_BYTES)
3507 alloc_size = L1_CACHE_BYTES;
3508 ha = kmalloc(alloc_size, GFP_ATOMIC);
3509 if (!ha)
3510 return -ENOMEM;
3511 memcpy(ha->addr, addr, addr_len);
3512 ha->type = addr_type;
3513 ha->refcount = 1;
3514 ha->synced = false;
3515 list_add_tail_rcu(&ha->list, list);
3516 if (delta)
3517 (*delta)++;
3518 return 0;
3519}
3520
3521static void ha_rcu_free(struct rcu_head *head)
3522{
3523 struct netdev_hw_addr *ha;
3524
3525 ha = container_of(head, struct netdev_hw_addr, rcu_head);
3526 kfree(ha);
3527}
3528
3529static int __hw_addr_del(struct list_head *list, int *delta,
3530 unsigned char *addr, int addr_len,
3531 unsigned char addr_type)
3532{
3533 struct netdev_hw_addr *ha;
3534
3535 list_for_each_entry(ha, list, list) {
3536 if (!memcmp(ha->addr, addr, addr_len) &&
3537 (ha->type == addr_type || !addr_type)) {
3538 if (--ha->refcount)
3539 return 0;
3540 list_del_rcu(&ha->list);
3541 call_rcu(&ha->rcu_head, ha_rcu_free);
3542 if (delta)
3543 (*delta)--;
3544 return 0;
3545 }
3546 }
3547 return -ENOENT;
3548}
3549
3550static int __hw_addr_add_multiple(struct list_head *to_list, int *to_delta,
3551 struct list_head *from_list, int addr_len,
3552 unsigned char addr_type)
3553{
3554 int err;
3555 struct netdev_hw_addr *ha, *ha2;
3556 unsigned char type;
3557
3558 list_for_each_entry(ha, from_list, list) {
3559 type = addr_type ? addr_type : ha->type;
3560 err = __hw_addr_add(to_list, to_delta, ha->addr,
3561 addr_len, type);
3562 if (err)
3563 goto unroll;
3564 }
3565 return 0;
3566
3567unroll:
3568 list_for_each_entry(ha2, from_list, list) {
3569 if (ha2 == ha)
3570 break;
3571 type = addr_type ? addr_type : ha2->type;
3572 __hw_addr_del(to_list, to_delta, ha2->addr,
3573 addr_len, type);
3574 }
3575 return err;
3576}
3577
3578static void __hw_addr_del_multiple(struct list_head *to_list, int *to_delta,
3579 struct list_head *from_list, int addr_len,
3580 unsigned char addr_type)
3581{
3582 struct netdev_hw_addr *ha;
3583 unsigned char type;
3584
3585 list_for_each_entry(ha, from_list, list) {
3586 type = addr_type ? addr_type : ha->type;
3587 __hw_addr_del(to_list, to_delta, ha->addr,
3588 addr_len, addr_type);
3589 }
3590}
3591
3592static int __hw_addr_sync(struct list_head *to_list, int *to_delta,
3593 struct list_head *from_list, int *from_delta,
3594 int addr_len)
3595{
3596 int err = 0;
3597 struct netdev_hw_addr *ha, *tmp;
3598
3599 list_for_each_entry_safe(ha, tmp, from_list, list) {
3600 if (!ha->synced) {
3601 err = __hw_addr_add(to_list, to_delta, ha->addr,
3602 addr_len, ha->type);
3603 if (err)
3604 break;
3605 ha->synced = true;
3606 ha->refcount++;
3607 } else if (ha->refcount == 1) {
3608 __hw_addr_del(to_list, to_delta, ha->addr,
3609 addr_len, ha->type);
3610 __hw_addr_del(from_list, from_delta, ha->addr,
3611 addr_len, ha->type);
3612 }
3613 }
3614 return err;
3615}
3616
3617static void __hw_addr_unsync(struct list_head *to_list, int *to_delta,
3618 struct list_head *from_list, int *from_delta,
3619 int addr_len)
3620{
3621 struct netdev_hw_addr *ha, *tmp;
3622
3623 list_for_each_entry_safe(ha, tmp, from_list, list) {
3624 if (ha->synced) {
3625 __hw_addr_del(to_list, to_delta, ha->addr,
3626 addr_len, ha->type);
3627 ha->synced = false;
3628 __hw_addr_del(from_list, from_delta, ha->addr,
3629 addr_len, ha->type);
3630 }
3631 }
3632}
3633
3634
3635static void __hw_addr_flush(struct list_head *list)
3636{
3637 struct netdev_hw_addr *ha, *tmp;
3638
3639 list_for_each_entry_safe(ha, tmp, list, list) {
3640 list_del_rcu(&ha->list);
3641 call_rcu(&ha->rcu_head, ha_rcu_free);
3642 }
3643}
3644
3645/* Device addresses handling functions */
3646
3647static void dev_addr_flush(struct net_device *dev)
3648{
3649 /* rtnl_mutex must be held here */
3650
3651 __hw_addr_flush(&dev->dev_addr_list);
3652 dev->dev_addr = NULL;
3653}
3654
3655static int dev_addr_init(struct net_device *dev)
3656{
3657 unsigned char addr[MAX_ADDR_LEN];
3658 struct netdev_hw_addr *ha;
3659 int err;
3660
3661 /* rtnl_mutex must be held here */
3662
3663 INIT_LIST_HEAD(&dev->dev_addr_list);
3664 memset(addr, 0, sizeof(addr));
3665 err = __hw_addr_add(&dev->dev_addr_list, NULL, addr, sizeof(addr),
3666 NETDEV_HW_ADDR_T_LAN);
3667 if (!err) {
3668 /*
3669 * Get the first (previously created) address from the list
3670 * and set dev_addr pointer to this location.
3671 */
3672 ha = list_first_entry(&dev->dev_addr_list,
3673 struct netdev_hw_addr, list);
3674 dev->dev_addr = ha->addr;
3675 }
3676 return err;
3677}
3678
3679/**
3680 * dev_addr_add - Add a device address
3681 * @dev: device
3682 * @addr: address to add
3683 * @addr_type: address type
3684 *
3685 * Add a device address to the device or increase the reference count if
3686 * it already exists.
3687 *
3688 * The caller must hold the rtnl_mutex.
3689 */
3690int dev_addr_add(struct net_device *dev, unsigned char *addr,
3691 unsigned char addr_type)
3692{
3693 int err;
3694
3695 ASSERT_RTNL();
3696
3697 err = __hw_addr_add(&dev->dev_addr_list, NULL, addr, dev->addr_len,
3698 addr_type);
3699 if (!err)
3700 call_netdevice_notifiers(NETDEV_CHANGEADDR, dev);
3701 return err;
3702}
3703EXPORT_SYMBOL(dev_addr_add);
3704
3705/**
3706 * dev_addr_del - Release a device address.
3707 * @dev: device
3708 * @addr: address to delete
3709 * @addr_type: address type
3710 *
3711 * Release reference to a device address and remove it from the device
3712 * if the reference count drops to zero.
3713 *
3714 * The caller must hold the rtnl_mutex.
3715 */
3716int dev_addr_del(struct net_device *dev, unsigned char *addr,
3717 unsigned char addr_type)
3718{
3719 int err;
3720 struct netdev_hw_addr *ha;
3721
3722 ASSERT_RTNL();
3723
3724 /*
3725 * We can not remove the first address from the list because
3726 * dev->dev_addr points to that.
3727 */
3728 ha = list_first_entry(&dev->dev_addr_list, struct netdev_hw_addr, list);
3729 if (ha->addr == dev->dev_addr && ha->refcount == 1)
3730 return -ENOENT;
3731
3732 err = __hw_addr_del(&dev->dev_addr_list, NULL, addr, dev->addr_len,
3733 addr_type);
3734 if (!err)
3735 call_netdevice_notifiers(NETDEV_CHANGEADDR, dev);
3736 return err;
3737}
3738EXPORT_SYMBOL(dev_addr_del);
3739
3740/**
3741 * dev_addr_add_multiple - Add device addresses from another device
3742 * @to_dev: device to which addresses will be added
3743 * @from_dev: device from which addresses will be added
3744 * @addr_type: address type - 0 means type will be used from from_dev
3745 *
3746 * Add device addresses of the one device to another.
3747 **
3748 * The caller must hold the rtnl_mutex.
3749 */
3750int dev_addr_add_multiple(struct net_device *to_dev,
3751 struct net_device *from_dev,
3752 unsigned char addr_type)
3753{
3754 int err;
3755
3756 ASSERT_RTNL();
3757
3758 if (from_dev->addr_len != to_dev->addr_len)
3759 return -EINVAL;
3760 err = __hw_addr_add_multiple(&to_dev->dev_addr_list, NULL,
3761 &from_dev->dev_addr_list,
3762 to_dev->addr_len, addr_type);
3763 if (!err)
3764 call_netdevice_notifiers(NETDEV_CHANGEADDR, to_dev);
3765 return err;
3766}
3767EXPORT_SYMBOL(dev_addr_add_multiple);
3768
3769/**
3770 * dev_addr_del_multiple - Delete device addresses by another device
3771 * @to_dev: device where the addresses will be deleted
3772 * @from_dev: device by which addresses the addresses will be deleted
3773 * @addr_type: address type - 0 means type will used from from_dev
3774 *
3775 * Deletes addresses in to device by the list of addresses in from device.
3776 *
3777 * The caller must hold the rtnl_mutex.
3778 */
3779int dev_addr_del_multiple(struct net_device *to_dev,
3780 struct net_device *from_dev,
3781 unsigned char addr_type)
3782{
3783 ASSERT_RTNL();
3784
3785 if (from_dev->addr_len != to_dev->addr_len)
3786 return -EINVAL;
3787 __hw_addr_del_multiple(&to_dev->dev_addr_list, NULL,
3788 &from_dev->dev_addr_list,
3789 to_dev->addr_len, addr_type);
3790 call_netdevice_notifiers(NETDEV_CHANGEADDR, to_dev);
3791 return 0;
3792}
3793EXPORT_SYMBOL(dev_addr_del_multiple);
3794
3795/* unicast and multicast addresses handling functions */
3796
3447int __dev_addr_delete(struct dev_addr_list **list, int *count, 3797int __dev_addr_delete(struct dev_addr_list **list, int *count,
3448 void *addr, int alen, int glbl) 3798 void *addr, int alen, int glbl)
3449{ 3799{
@@ -3506,24 +3856,22 @@ int __dev_addr_add(struct dev_addr_list **list, int *count,
3506 * dev_unicast_delete - Release secondary unicast address. 3856 * dev_unicast_delete - Release secondary unicast address.
3507 * @dev: device 3857 * @dev: device
3508 * @addr: address to delete 3858 * @addr: address to delete
3509 * @alen: length of @addr
3510 * 3859 *
3511 * Release reference to a secondary unicast address and remove it 3860 * Release reference to a secondary unicast address and remove it
3512 * from the device if the reference count drops to zero. 3861 * from the device if the reference count drops to zero.
3513 * 3862 *
3514 * The caller must hold the rtnl_mutex. 3863 * The caller must hold the rtnl_mutex.
3515 */ 3864 */
3516int dev_unicast_delete(struct net_device *dev, void *addr, int alen) 3865int dev_unicast_delete(struct net_device *dev, void *addr)
3517{ 3866{
3518 int err; 3867 int err;
3519 3868
3520 ASSERT_RTNL(); 3869 ASSERT_RTNL();
3521 3870
3522 netif_addr_lock_bh(dev); 3871 err = __hw_addr_del(&dev->uc_list, &dev->uc_count, addr,
3523 err = __dev_addr_delete(&dev->uc_list, &dev->uc_count, addr, alen, 0); 3872 dev->addr_len, NETDEV_HW_ADDR_T_UNICAST);
3524 if (!err) 3873 if (!err)
3525 __dev_set_rx_mode(dev); 3874 __dev_set_rx_mode(dev);
3526 netif_addr_unlock_bh(dev);
3527 return err; 3875 return err;
3528} 3876}
3529EXPORT_SYMBOL(dev_unicast_delete); 3877EXPORT_SYMBOL(dev_unicast_delete);
@@ -3532,24 +3880,22 @@ EXPORT_SYMBOL(dev_unicast_delete);
3532 * dev_unicast_add - add a secondary unicast address 3880 * dev_unicast_add - add a secondary unicast address
3533 * @dev: device 3881 * @dev: device
3534 * @addr: address to add 3882 * @addr: address to add
3535 * @alen: length of @addr
3536 * 3883 *
3537 * Add a secondary unicast address to the device or increase 3884 * Add a secondary unicast address to the device or increase
3538 * the reference count if it already exists. 3885 * the reference count if it already exists.
3539 * 3886 *
3540 * The caller must hold the rtnl_mutex. 3887 * The caller must hold the rtnl_mutex.
3541 */ 3888 */
3542int dev_unicast_add(struct net_device *dev, void *addr, int alen) 3889int dev_unicast_add(struct net_device *dev, void *addr)
3543{ 3890{
3544 int err; 3891 int err;
3545 3892
3546 ASSERT_RTNL(); 3893 ASSERT_RTNL();
3547 3894
3548 netif_addr_lock_bh(dev); 3895 err = __hw_addr_add(&dev->uc_list, &dev->uc_count, addr,
3549 err = __dev_addr_add(&dev->uc_list, &dev->uc_count, addr, alen, 0); 3896 dev->addr_len, NETDEV_HW_ADDR_T_UNICAST);
3550 if (!err) 3897 if (!err)
3551 __dev_set_rx_mode(dev); 3898 __dev_set_rx_mode(dev);
3552 netif_addr_unlock_bh(dev);
3553 return err; 3899 return err;
3554} 3900}
3555EXPORT_SYMBOL(dev_unicast_add); 3901EXPORT_SYMBOL(dev_unicast_add);
@@ -3606,8 +3952,7 @@ void __dev_addr_unsync(struct dev_addr_list **to, int *to_count,
3606 * @from: source device 3952 * @from: source device
3607 * 3953 *
3608 * Add newly added addresses to the destination device and release 3954 * Add newly added addresses to the destination device and release
3609 * addresses that have no users left. The source device must be 3955 * addresses that have no users left.
3610 * locked by netif_tx_lock_bh.
3611 * 3956 *
3612 * This function is intended to be called from the dev->set_rx_mode 3957 * This function is intended to be called from the dev->set_rx_mode
3613 * function of layered software devices. 3958 * function of layered software devices.
@@ -3616,12 +3961,15 @@ int dev_unicast_sync(struct net_device *to, struct net_device *from)
3616{ 3961{
3617 int err = 0; 3962 int err = 0;
3618 3963
3619 netif_addr_lock_bh(to); 3964 ASSERT_RTNL();
3620 err = __dev_addr_sync(&to->uc_list, &to->uc_count, 3965
3621 &from->uc_list, &from->uc_count); 3966 if (to->addr_len != from->addr_len)
3967 return -EINVAL;
3968
3969 err = __hw_addr_sync(&to->uc_list, &to->uc_count,
3970 &from->uc_list, &from->uc_count, to->addr_len);
3622 if (!err) 3971 if (!err)
3623 __dev_set_rx_mode(to); 3972 __dev_set_rx_mode(to);
3624 netif_addr_unlock_bh(to);
3625 return err; 3973 return err;
3626} 3974}
3627EXPORT_SYMBOL(dev_unicast_sync); 3975EXPORT_SYMBOL(dev_unicast_sync);
@@ -3637,18 +3985,33 @@ EXPORT_SYMBOL(dev_unicast_sync);
3637 */ 3985 */
3638void dev_unicast_unsync(struct net_device *to, struct net_device *from) 3986void dev_unicast_unsync(struct net_device *to, struct net_device *from)
3639{ 3987{
3640 netif_addr_lock_bh(from); 3988 ASSERT_RTNL();
3641 netif_addr_lock(to);
3642 3989
3643 __dev_addr_unsync(&to->uc_list, &to->uc_count, 3990 if (to->addr_len != from->addr_len)
3644 &from->uc_list, &from->uc_count); 3991 return;
3645 __dev_set_rx_mode(to);
3646 3992
3647 netif_addr_unlock(to); 3993 __hw_addr_unsync(&to->uc_list, &to->uc_count,
3648 netif_addr_unlock_bh(from); 3994 &from->uc_list, &from->uc_count, to->addr_len);
3995 __dev_set_rx_mode(to);
3649} 3996}
3650EXPORT_SYMBOL(dev_unicast_unsync); 3997EXPORT_SYMBOL(dev_unicast_unsync);
3651 3998
3999static void dev_unicast_flush(struct net_device *dev)
4000{
4001 /* rtnl_mutex must be held here */
4002
4003 __hw_addr_flush(&dev->uc_list);
4004 dev->uc_count = 0;
4005}
4006
4007static void dev_unicast_init(struct net_device *dev)
4008{
4009 /* rtnl_mutex must be held here */
4010
4011 INIT_LIST_HEAD(&dev->uc_list);
4012}
4013
4014
3652static void __dev_addr_discard(struct dev_addr_list **list) 4015static void __dev_addr_discard(struct dev_addr_list **list)
3653{ 4016{
3654 struct dev_addr_list *tmp; 4017 struct dev_addr_list *tmp;
@@ -3667,9 +4030,6 @@ static void dev_addr_discard(struct net_device *dev)
3667{ 4030{
3668 netif_addr_lock_bh(dev); 4031 netif_addr_lock_bh(dev);
3669 4032
3670 __dev_addr_discard(&dev->uc_list);
3671 dev->uc_count = 0;
3672
3673 __dev_addr_discard(&dev->mc_list); 4033 __dev_addr_discard(&dev->mc_list);
3674 dev->mc_count = 0; 4034 dev->mc_count = 0;
3675 4035
@@ -3853,7 +4213,7 @@ static int dev_ifsioc_locked(struct net *net, struct ifreq *ifr, unsigned int cm
3853 4213
3854 switch (cmd) { 4214 switch (cmd) {
3855 case SIOCGIFFLAGS: /* Get interface flags */ 4215 case SIOCGIFFLAGS: /* Get interface flags */
3856 ifr->ifr_flags = dev_get_flags(dev); 4216 ifr->ifr_flags = (short) dev_get_flags(dev);
3857 return 0; 4217 return 0;
3858 4218
3859 case SIOCGIFMETRIC: /* Get the metric on the interface 4219 case SIOCGIFMETRIC: /* Get the metric on the interface
@@ -4262,6 +4622,7 @@ static void rollback_registered(struct net_device *dev)
4262 /* 4622 /*
4263 * Flush the unicast and multicast chains 4623 * Flush the unicast and multicast chains
4264 */ 4624 */
4625 dev_unicast_flush(dev);
4265 dev_addr_discard(dev); 4626 dev_addr_discard(dev);
4266 4627
4267 if (dev->netdev_ops->ndo_uninit) 4628 if (dev->netdev_ops->ndo_uninit)
@@ -4333,39 +4694,6 @@ unsigned long netdev_fix_features(unsigned long features, const char *name)
4333} 4694}
4334EXPORT_SYMBOL(netdev_fix_features); 4695EXPORT_SYMBOL(netdev_fix_features);
4335 4696
4336/* Some devices need to (re-)set their netdev_ops inside
4337 * ->init() or similar. If that happens, we have to setup
4338 * the compat pointers again.
4339 */
4340void netdev_resync_ops(struct net_device *dev)
4341{
4342#ifdef CONFIG_COMPAT_NET_DEV_OPS
4343 const struct net_device_ops *ops = dev->netdev_ops;
4344
4345 dev->init = ops->ndo_init;
4346 dev->uninit = ops->ndo_uninit;
4347 dev->open = ops->ndo_open;
4348 dev->change_rx_flags = ops->ndo_change_rx_flags;
4349 dev->set_rx_mode = ops->ndo_set_rx_mode;
4350 dev->set_multicast_list = ops->ndo_set_multicast_list;
4351 dev->set_mac_address = ops->ndo_set_mac_address;
4352 dev->validate_addr = ops->ndo_validate_addr;
4353 dev->do_ioctl = ops->ndo_do_ioctl;
4354 dev->set_config = ops->ndo_set_config;
4355 dev->change_mtu = ops->ndo_change_mtu;
4356 dev->neigh_setup = ops->ndo_neigh_setup;
4357 dev->tx_timeout = ops->ndo_tx_timeout;
4358 dev->get_stats = ops->ndo_get_stats;
4359 dev->vlan_rx_register = ops->ndo_vlan_rx_register;
4360 dev->vlan_rx_add_vid = ops->ndo_vlan_rx_add_vid;
4361 dev->vlan_rx_kill_vid = ops->ndo_vlan_rx_kill_vid;
4362#ifdef CONFIG_NET_POLL_CONTROLLER
4363 dev->poll_controller = ops->ndo_poll_controller;
4364#endif
4365#endif
4366}
4367EXPORT_SYMBOL(netdev_resync_ops);
4368
4369/** 4697/**
4370 * register_netdevice - register a network device 4698 * register_netdevice - register a network device
4371 * @dev: device to register 4699 * @dev: device to register
@@ -4405,23 +4733,6 @@ int register_netdevice(struct net_device *dev)
4405 4733
4406 dev->iflink = -1; 4734 dev->iflink = -1;
4407 4735
4408#ifdef CONFIG_COMPAT_NET_DEV_OPS
4409 /* Netdevice_ops API compatibility support.
4410 * This is temporary until all network devices are converted.
4411 */
4412 if (dev->netdev_ops) {
4413 netdev_resync_ops(dev);
4414 } else {
4415 char drivername[64];
4416 pr_info("%s (%s): not using net_device_ops yet\n",
4417 dev->name, netdev_drivername(dev, drivername, 64));
4418
4419 /* This works only because net_device_ops and the
4420 compatibility structure are the same. */
4421 dev->netdev_ops = (void *) &(dev->init);
4422 }
4423#endif
4424
4425 /* Init, if this function is available */ 4736 /* Init, if this function is available */
4426 if (dev->netdev_ops->ndo_init) { 4737 if (dev->netdev_ops->ndo_init) {
4427 ret = dev->netdev_ops->ndo_init(dev); 4738 ret = dev->netdev_ops->ndo_init(dev);
@@ -4707,13 +5018,30 @@ void netdev_run_todo(void)
4707 * the internal statistics structure is used. 5018 * the internal statistics structure is used.
4708 */ 5019 */
4709const struct net_device_stats *dev_get_stats(struct net_device *dev) 5020const struct net_device_stats *dev_get_stats(struct net_device *dev)
4710 { 5021{
4711 const struct net_device_ops *ops = dev->netdev_ops; 5022 const struct net_device_ops *ops = dev->netdev_ops;
4712 5023
4713 if (ops->ndo_get_stats) 5024 if (ops->ndo_get_stats)
4714 return ops->ndo_get_stats(dev); 5025 return ops->ndo_get_stats(dev);
4715 else 5026 else {
4716 return &dev->stats; 5027 unsigned long tx_bytes = 0, tx_packets = 0, tx_dropped = 0;
5028 struct net_device_stats *stats = &dev->stats;
5029 unsigned int i;
5030 struct netdev_queue *txq;
5031
5032 for (i = 0; i < dev->num_tx_queues; i++) {
5033 txq = netdev_get_tx_queue(dev, i);
5034 tx_bytes += txq->tx_bytes;
5035 tx_packets += txq->tx_packets;
5036 tx_dropped += txq->tx_dropped;
5037 }
5038 if (tx_bytes || tx_packets || tx_dropped) {
5039 stats->tx_bytes = tx_bytes;
5040 stats->tx_packets = tx_packets;
5041 stats->tx_dropped = tx_dropped;
5042 }
5043 return stats;
5044 }
4717} 5045}
4718EXPORT_SYMBOL(dev_get_stats); 5046EXPORT_SYMBOL(dev_get_stats);
4719 5047
@@ -4748,18 +5076,18 @@ struct net_device *alloc_netdev_mq(int sizeof_priv, const char *name,
4748 struct netdev_queue *tx; 5076 struct netdev_queue *tx;
4749 struct net_device *dev; 5077 struct net_device *dev;
4750 size_t alloc_size; 5078 size_t alloc_size;
4751 void *p; 5079 struct net_device *p;
4752 5080
4753 BUG_ON(strlen(name) >= sizeof(dev->name)); 5081 BUG_ON(strlen(name) >= sizeof(dev->name));
4754 5082
4755 alloc_size = sizeof(struct net_device); 5083 alloc_size = sizeof(struct net_device);
4756 if (sizeof_priv) { 5084 if (sizeof_priv) {
4757 /* ensure 32-byte alignment of private area */ 5085 /* ensure 32-byte alignment of private area */
4758 alloc_size = (alloc_size + NETDEV_ALIGN_CONST) & ~NETDEV_ALIGN_CONST; 5086 alloc_size = ALIGN(alloc_size, NETDEV_ALIGN);
4759 alloc_size += sizeof_priv; 5087 alloc_size += sizeof_priv;
4760 } 5088 }
4761 /* ensure 32-byte alignment of whole construct */ 5089 /* ensure 32-byte alignment of whole construct */
4762 alloc_size += NETDEV_ALIGN_CONST; 5090 alloc_size += NETDEV_ALIGN - 1;
4763 5091
4764 p = kzalloc(alloc_size, GFP_KERNEL); 5092 p = kzalloc(alloc_size, GFP_KERNEL);
4765 if (!p) { 5093 if (!p) {
@@ -4771,13 +5099,17 @@ struct net_device *alloc_netdev_mq(int sizeof_priv, const char *name,
4771 if (!tx) { 5099 if (!tx) {
4772 printk(KERN_ERR "alloc_netdev: Unable to allocate " 5100 printk(KERN_ERR "alloc_netdev: Unable to allocate "
4773 "tx qdiscs.\n"); 5101 "tx qdiscs.\n");
4774 kfree(p); 5102 goto free_p;
4775 return NULL;
4776 } 5103 }
4777 5104
4778 dev = (struct net_device *) 5105 dev = PTR_ALIGN(p, NETDEV_ALIGN);
4779 (((long)p + NETDEV_ALIGN_CONST) & ~NETDEV_ALIGN_CONST);
4780 dev->padded = (char *)dev - (char *)p; 5106 dev->padded = (char *)dev - (char *)p;
5107
5108 if (dev_addr_init(dev))
5109 goto free_tx;
5110
5111 dev_unicast_init(dev);
5112
4781 dev_net_set(dev, &init_net); 5113 dev_net_set(dev, &init_net);
4782 5114
4783 dev->_tx = tx; 5115 dev->_tx = tx;
@@ -4789,9 +5121,17 @@ struct net_device *alloc_netdev_mq(int sizeof_priv, const char *name,
4789 netdev_init_queues(dev); 5121 netdev_init_queues(dev);
4790 5122
4791 INIT_LIST_HEAD(&dev->napi_list); 5123 INIT_LIST_HEAD(&dev->napi_list);
5124 dev->priv_flags = IFF_XMIT_DST_RELEASE;
4792 setup(dev); 5125 setup(dev);
4793 strcpy(dev->name, name); 5126 strcpy(dev->name, name);
4794 return dev; 5127 return dev;
5128
5129free_tx:
5130 kfree(tx);
5131
5132free_p:
5133 kfree(p);
5134 return NULL;
4795} 5135}
4796EXPORT_SYMBOL(alloc_netdev_mq); 5136EXPORT_SYMBOL(alloc_netdev_mq);
4797 5137
@@ -4811,6 +5151,9 @@ void free_netdev(struct net_device *dev)
4811 5151
4812 kfree(dev->_tx); 5152 kfree(dev->_tx);
4813 5153
5154 /* Flush device addresses */
5155 dev_addr_flush(dev);
5156
4814 list_for_each_entry_safe(p, n, &dev->napi_list, dev_list) 5157 list_for_each_entry_safe(p, n, &dev->napi_list, dev_list)
4815 netif_napi_del(p); 5158 netif_napi_del(p);
4816 5159
@@ -4970,6 +5313,7 @@ int dev_change_net_namespace(struct net_device *dev, struct net *net, const char
4970 /* 5313 /*
4971 * Flush the unicast and multicast chains 5314 * Flush the unicast and multicast chains
4972 */ 5315 */
5316 dev_unicast_flush(dev);
4973 dev_addr_discard(dev); 5317 dev_addr_discard(dev);
4974 5318
4975 netdev_unregister_kobject(dev); 5319 netdev_unregister_kobject(dev);
@@ -5325,12 +5669,6 @@ EXPORT_SYMBOL(net_enable_timestamp);
5325EXPORT_SYMBOL(net_disable_timestamp); 5669EXPORT_SYMBOL(net_disable_timestamp);
5326EXPORT_SYMBOL(dev_get_flags); 5670EXPORT_SYMBOL(dev_get_flags);
5327 5671
5328#if defined(CONFIG_BRIDGE) || defined(CONFIG_BRIDGE_MODULE)
5329EXPORT_SYMBOL(br_handle_frame_hook);
5330EXPORT_SYMBOL(br_fdb_get_hook);
5331EXPORT_SYMBOL(br_fdb_put_hook);
5332#endif
5333
5334EXPORT_SYMBOL(dev_load); 5672EXPORT_SYMBOL(dev_load);
5335 5673
5336EXPORT_PER_CPU_SYMBOL(softnet_data); 5674EXPORT_PER_CPU_SYMBOL(softnet_data);