diff options
Diffstat (limited to 'net/core/dev.c')
-rw-r--r-- | net/core/dev.c | 692 |
1 files changed, 515 insertions, 177 deletions
diff --git a/net/core/dev.c b/net/core/dev.c index 343883f65ea7..baf2dc13a34a 100644 --- a/net/core/dev.c +++ b/net/core/dev.c | |||
@@ -126,6 +126,7 @@ | |||
126 | #include <linux/in.h> | 126 | #include <linux/in.h> |
127 | #include <linux/jhash.h> | 127 | #include <linux/jhash.h> |
128 | #include <linux/random.h> | 128 | #include <linux/random.h> |
129 | #include <trace/events/napi.h> | ||
129 | 130 | ||
130 | #include "net-sysfs.h" | 131 | #include "net-sysfs.h" |
131 | 132 | ||
@@ -268,7 +269,8 @@ static const unsigned short netdev_lock_type[] = | |||
268 | ARPHRD_IRDA, ARPHRD_FCPP, ARPHRD_FCAL, ARPHRD_FCPL, | 269 | ARPHRD_IRDA, ARPHRD_FCPP, ARPHRD_FCAL, ARPHRD_FCPL, |
269 | ARPHRD_FCFABRIC, ARPHRD_IEEE802_TR, ARPHRD_IEEE80211, | 270 | ARPHRD_FCFABRIC, ARPHRD_IEEE802_TR, ARPHRD_IEEE80211, |
270 | ARPHRD_IEEE80211_PRISM, ARPHRD_IEEE80211_RADIOTAP, ARPHRD_PHONET, | 271 | ARPHRD_IEEE80211_PRISM, ARPHRD_IEEE80211_RADIOTAP, ARPHRD_PHONET, |
271 | ARPHRD_PHONET_PIPE, ARPHRD_VOID, ARPHRD_NONE}; | 272 | ARPHRD_PHONET_PIPE, ARPHRD_IEEE802154, ARPHRD_IEEE802154_PHY, |
273 | ARPHRD_VOID, ARPHRD_NONE}; | ||
272 | 274 | ||
273 | static const char *netdev_lock_name[] = | 275 | static const char *netdev_lock_name[] = |
274 | {"_xmit_NETROM", "_xmit_ETHER", "_xmit_EETHER", "_xmit_AX25", | 276 | {"_xmit_NETROM", "_xmit_ETHER", "_xmit_EETHER", "_xmit_AX25", |
@@ -285,7 +287,8 @@ static const char *netdev_lock_name[] = | |||
285 | "_xmit_IRDA", "_xmit_FCPP", "_xmit_FCAL", "_xmit_FCPL", | 287 | "_xmit_IRDA", "_xmit_FCPP", "_xmit_FCAL", "_xmit_FCPL", |
286 | "_xmit_FCFABRIC", "_xmit_IEEE802_TR", "_xmit_IEEE80211", | 288 | "_xmit_FCFABRIC", "_xmit_IEEE802_TR", "_xmit_IEEE80211", |
287 | "_xmit_IEEE80211_PRISM", "_xmit_IEEE80211_RADIOTAP", "_xmit_PHONET", | 289 | "_xmit_IEEE80211_PRISM", "_xmit_IEEE80211_RADIOTAP", "_xmit_PHONET", |
288 | "_xmit_PHONET_PIPE", "_xmit_VOID", "_xmit_NONE"}; | 290 | "_xmit_PHONET_PIPE", "_xmit_IEEE802154", "_xmit_IEEE802154_PHY", |
291 | "_xmit_VOID", "_xmit_NONE"}; | ||
289 | 292 | ||
290 | static struct lock_class_key netdev_xmit_lock_key[ARRAY_SIZE(netdev_lock_type)]; | 293 | static struct lock_class_key netdev_xmit_lock_key[ARRAY_SIZE(netdev_lock_type)]; |
291 | static struct lock_class_key netdev_addr_lock_key[ARRAY_SIZE(netdev_lock_type)]; | 294 | static struct lock_class_key netdev_addr_lock_key[ARRAY_SIZE(netdev_lock_type)]; |
@@ -1047,7 +1050,7 @@ void dev_load(struct net *net, const char *name) | |||
1047 | int dev_open(struct net_device *dev) | 1050 | int dev_open(struct net_device *dev) |
1048 | { | 1051 | { |
1049 | const struct net_device_ops *ops = dev->netdev_ops; | 1052 | const struct net_device_ops *ops = dev->netdev_ops; |
1050 | int ret = 0; | 1053 | int ret; |
1051 | 1054 | ||
1052 | ASSERT_RTNL(); | 1055 | ASSERT_RTNL(); |
1053 | 1056 | ||
@@ -1064,6 +1067,11 @@ int dev_open(struct net_device *dev) | |||
1064 | if (!netif_device_present(dev)) | 1067 | if (!netif_device_present(dev)) |
1065 | return -ENODEV; | 1068 | return -ENODEV; |
1066 | 1069 | ||
1070 | ret = call_netdevice_notifiers(NETDEV_PRE_UP, dev); | ||
1071 | ret = notifier_to_errno(ret); | ||
1072 | if (ret) | ||
1073 | return ret; | ||
1074 | |||
1067 | /* | 1075 | /* |
1068 | * Call device private open method | 1076 | * Call device private open method |
1069 | */ | 1077 | */ |
@@ -1336,7 +1344,12 @@ static void dev_queue_xmit_nit(struct sk_buff *skb, struct net_device *dev) | |||
1336 | { | 1344 | { |
1337 | struct packet_type *ptype; | 1345 | struct packet_type *ptype; |
1338 | 1346 | ||
1347 | #ifdef CONFIG_NET_CLS_ACT | ||
1348 | if (!(skb->tstamp.tv64 && (G_TC_FROM(skb->tc_verd) & AT_INGRESS))) | ||
1349 | net_timestamp(skb); | ||
1350 | #else | ||
1339 | net_timestamp(skb); | 1351 | net_timestamp(skb); |
1352 | #endif | ||
1340 | 1353 | ||
1341 | rcu_read_lock(); | 1354 | rcu_read_lock(); |
1342 | list_for_each_entry_rcu(ptype, &ptype_all, list) { | 1355 | list_for_each_entry_rcu(ptype, &ptype_all, list) { |
@@ -1683,7 +1696,16 @@ int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev, | |||
1683 | goto gso; | 1696 | goto gso; |
1684 | } | 1697 | } |
1685 | 1698 | ||
1699 | /* | ||
1700 | * If device doesnt need skb->dst, release it right now while | ||
1701 | * its hot in this cpu cache | ||
1702 | */ | ||
1703 | if (dev->priv_flags & IFF_XMIT_DST_RELEASE) | ||
1704 | skb_dst_drop(skb); | ||
1705 | |||
1686 | rc = ops->ndo_start_xmit(skb, dev); | 1706 | rc = ops->ndo_start_xmit(skb, dev); |
1707 | if (rc == 0) | ||
1708 | txq_trans_update(txq); | ||
1687 | /* | 1709 | /* |
1688 | * TODO: if skb_orphan() was called by | 1710 | * TODO: if skb_orphan() was called by |
1689 | * dev->hard_start_xmit() (for example, the unmodified | 1711 | * dev->hard_start_xmit() (for example, the unmodified |
@@ -1713,6 +1735,7 @@ gso: | |||
1713 | skb->next = nskb; | 1735 | skb->next = nskb; |
1714 | return rc; | 1736 | return rc; |
1715 | } | 1737 | } |
1738 | txq_trans_update(txq); | ||
1716 | if (unlikely(netif_tx_queue_stopped(txq) && skb->next)) | 1739 | if (unlikely(netif_tx_queue_stopped(txq) && skb->next)) |
1717 | return NETDEV_TX_BUSY; | 1740 | return NETDEV_TX_BUSY; |
1718 | } while (skb->next); | 1741 | } while (skb->next); |
@@ -1732,9 +1755,14 @@ u16 skb_tx_hash(const struct net_device *dev, const struct sk_buff *skb) | |||
1732 | 1755 | ||
1733 | if (skb_rx_queue_recorded(skb)) { | 1756 | if (skb_rx_queue_recorded(skb)) { |
1734 | hash = skb_get_rx_queue(skb); | 1757 | hash = skb_get_rx_queue(skb); |
1735 | } else if (skb->sk && skb->sk->sk_hash) { | 1758 | while (unlikely (hash >= dev->real_num_tx_queues)) |
1759 | hash -= dev->real_num_tx_queues; | ||
1760 | return hash; | ||
1761 | } | ||
1762 | |||
1763 | if (skb->sk && skb->sk->sk_hash) | ||
1736 | hash = skb->sk->sk_hash; | 1764 | hash = skb->sk->sk_hash; |
1737 | } else | 1765 | else |
1738 | hash = skb->protocol; | 1766 | hash = skb->protocol; |
1739 | 1767 | ||
1740 | hash = jhash_1word(hash, skb_tx_hashrnd); | 1768 | hash = jhash_1word(hash, skb_tx_hashrnd); |
@@ -1794,7 +1822,7 @@ int dev_queue_xmit(struct sk_buff *skb) | |||
1794 | if (netif_needs_gso(dev, skb)) | 1822 | if (netif_needs_gso(dev, skb)) |
1795 | goto gso; | 1823 | goto gso; |
1796 | 1824 | ||
1797 | if (skb_shinfo(skb)->frag_list && | 1825 | if (skb_has_frags(skb) && |
1798 | !(dev->features & NETIF_F_FRAGLIST) && | 1826 | !(dev->features & NETIF_F_FRAGLIST) && |
1799 | __skb_linearize(skb)) | 1827 | __skb_linearize(skb)) |
1800 | goto out_kfree_skb; | 1828 | goto out_kfree_skb; |
@@ -2043,11 +2071,13 @@ static inline int deliver_skb(struct sk_buff *skb, | |||
2043 | } | 2071 | } |
2044 | 2072 | ||
2045 | #if defined(CONFIG_BRIDGE) || defined (CONFIG_BRIDGE_MODULE) | 2073 | #if defined(CONFIG_BRIDGE) || defined (CONFIG_BRIDGE_MODULE) |
2046 | /* These hooks defined here for ATM */ | 2074 | |
2047 | struct net_bridge; | 2075 | #if defined(CONFIG_ATM_LANE) || defined(CONFIG_ATM_LANE_MODULE) |
2048 | struct net_bridge_fdb_entry *(*br_fdb_get_hook)(struct net_bridge *br, | 2076 | /* This hook is defined here for ATM LANE */ |
2049 | unsigned char *addr); | 2077 | int (*br_fdb_test_addr_hook)(struct net_device *dev, |
2050 | void (*br_fdb_put_hook)(struct net_bridge_fdb_entry *ent) __read_mostly; | 2078 | unsigned char *addr) __read_mostly; |
2079 | EXPORT_SYMBOL(br_fdb_test_addr_hook); | ||
2080 | #endif | ||
2051 | 2081 | ||
2052 | /* | 2082 | /* |
2053 | * If bridge module is loaded call bridging hook. | 2083 | * If bridge module is loaded call bridging hook. |
@@ -2055,6 +2085,8 @@ void (*br_fdb_put_hook)(struct net_bridge_fdb_entry *ent) __read_mostly; | |||
2055 | */ | 2085 | */ |
2056 | struct sk_buff *(*br_handle_frame_hook)(struct net_bridge_port *p, | 2086 | struct sk_buff *(*br_handle_frame_hook)(struct net_bridge_port *p, |
2057 | struct sk_buff *skb) __read_mostly; | 2087 | struct sk_buff *skb) __read_mostly; |
2088 | EXPORT_SYMBOL(br_handle_frame_hook); | ||
2089 | |||
2058 | static inline struct sk_buff *handle_bridge(struct sk_buff *skb, | 2090 | static inline struct sk_buff *handle_bridge(struct sk_buff *skb, |
2059 | struct packet_type **pt_prev, int *ret, | 2091 | struct packet_type **pt_prev, int *ret, |
2060 | struct net_device *orig_dev) | 2092 | struct net_device *orig_dev) |
@@ -2368,26 +2400,6 @@ void napi_gro_flush(struct napi_struct *napi) | |||
2368 | } | 2400 | } |
2369 | EXPORT_SYMBOL(napi_gro_flush); | 2401 | EXPORT_SYMBOL(napi_gro_flush); |
2370 | 2402 | ||
2371 | void *skb_gro_header(struct sk_buff *skb, unsigned int hlen) | ||
2372 | { | ||
2373 | unsigned int offset = skb_gro_offset(skb); | ||
2374 | |||
2375 | hlen += offset; | ||
2376 | if (hlen <= skb_headlen(skb)) | ||
2377 | return skb->data + offset; | ||
2378 | |||
2379 | if (unlikely(!skb_shinfo(skb)->nr_frags || | ||
2380 | skb_shinfo(skb)->frags[0].size <= | ||
2381 | hlen - skb_headlen(skb) || | ||
2382 | PageHighMem(skb_shinfo(skb)->frags[0].page))) | ||
2383 | return pskb_may_pull(skb, hlen) ? skb->data + offset : NULL; | ||
2384 | |||
2385 | return page_address(skb_shinfo(skb)->frags[0].page) + | ||
2386 | skb_shinfo(skb)->frags[0].page_offset + | ||
2387 | offset - skb_headlen(skb); | ||
2388 | } | ||
2389 | EXPORT_SYMBOL(skb_gro_header); | ||
2390 | |||
2391 | int dev_gro_receive(struct napi_struct *napi, struct sk_buff *skb) | 2403 | int dev_gro_receive(struct napi_struct *napi, struct sk_buff *skb) |
2392 | { | 2404 | { |
2393 | struct sk_buff **pp = NULL; | 2405 | struct sk_buff **pp = NULL; |
@@ -2401,7 +2413,7 @@ int dev_gro_receive(struct napi_struct *napi, struct sk_buff *skb) | |||
2401 | if (!(skb->dev->features & NETIF_F_GRO)) | 2413 | if (!(skb->dev->features & NETIF_F_GRO)) |
2402 | goto normal; | 2414 | goto normal; |
2403 | 2415 | ||
2404 | if (skb_is_gso(skb) || skb_shinfo(skb)->frag_list) | 2416 | if (skb_is_gso(skb) || skb_has_frags(skb)) |
2405 | goto normal; | 2417 | goto normal; |
2406 | 2418 | ||
2407 | rcu_read_lock(); | 2419 | rcu_read_lock(); |
@@ -2450,10 +2462,25 @@ int dev_gro_receive(struct napi_struct *napi, struct sk_buff *skb) | |||
2450 | ret = GRO_HELD; | 2462 | ret = GRO_HELD; |
2451 | 2463 | ||
2452 | pull: | 2464 | pull: |
2453 | if (unlikely(!pskb_may_pull(skb, skb_gro_offset(skb)))) { | 2465 | if (skb_headlen(skb) < skb_gro_offset(skb)) { |
2454 | if (napi->gro_list == skb) | 2466 | int grow = skb_gro_offset(skb) - skb_headlen(skb); |
2455 | napi->gro_list = skb->next; | 2467 | |
2456 | ret = GRO_DROP; | 2468 | BUG_ON(skb->end - skb->tail < grow); |
2469 | |||
2470 | memcpy(skb_tail_pointer(skb), NAPI_GRO_CB(skb)->frag0, grow); | ||
2471 | |||
2472 | skb->tail += grow; | ||
2473 | skb->data_len -= grow; | ||
2474 | |||
2475 | skb_shinfo(skb)->frags[0].page_offset += grow; | ||
2476 | skb_shinfo(skb)->frags[0].size -= grow; | ||
2477 | |||
2478 | if (unlikely(!skb_shinfo(skb)->frags[0].size)) { | ||
2479 | put_page(skb_shinfo(skb)->frags[0].page); | ||
2480 | memmove(skb_shinfo(skb)->frags, | ||
2481 | skb_shinfo(skb)->frags + 1, | ||
2482 | --skb_shinfo(skb)->nr_frags); | ||
2483 | } | ||
2457 | } | 2484 | } |
2458 | 2485 | ||
2459 | ok: | 2486 | ok: |
@@ -2503,6 +2530,22 @@ int napi_skb_finish(int ret, struct sk_buff *skb) | |||
2503 | } | 2530 | } |
2504 | EXPORT_SYMBOL(napi_skb_finish); | 2531 | EXPORT_SYMBOL(napi_skb_finish); |
2505 | 2532 | ||
2533 | void skb_gro_reset_offset(struct sk_buff *skb) | ||
2534 | { | ||
2535 | NAPI_GRO_CB(skb)->data_offset = 0; | ||
2536 | NAPI_GRO_CB(skb)->frag0 = NULL; | ||
2537 | NAPI_GRO_CB(skb)->frag0_len = 0; | ||
2538 | |||
2539 | if (skb->mac_header == skb->tail && | ||
2540 | !PageHighMem(skb_shinfo(skb)->frags[0].page)) { | ||
2541 | NAPI_GRO_CB(skb)->frag0 = | ||
2542 | page_address(skb_shinfo(skb)->frags[0].page) + | ||
2543 | skb_shinfo(skb)->frags[0].page_offset; | ||
2544 | NAPI_GRO_CB(skb)->frag0_len = skb_shinfo(skb)->frags[0].size; | ||
2545 | } | ||
2546 | } | ||
2547 | EXPORT_SYMBOL(skb_gro_reset_offset); | ||
2548 | |||
2506 | int napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb) | 2549 | int napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb) |
2507 | { | 2550 | { |
2508 | skb_gro_reset_offset(skb); | 2551 | skb_gro_reset_offset(skb); |
@@ -2520,16 +2563,10 @@ void napi_reuse_skb(struct napi_struct *napi, struct sk_buff *skb) | |||
2520 | } | 2563 | } |
2521 | EXPORT_SYMBOL(napi_reuse_skb); | 2564 | EXPORT_SYMBOL(napi_reuse_skb); |
2522 | 2565 | ||
2523 | struct sk_buff *napi_fraginfo_skb(struct napi_struct *napi, | 2566 | struct sk_buff *napi_get_frags(struct napi_struct *napi) |
2524 | struct napi_gro_fraginfo *info) | ||
2525 | { | 2567 | { |
2526 | struct net_device *dev = napi->dev; | 2568 | struct net_device *dev = napi->dev; |
2527 | struct sk_buff *skb = napi->skb; | 2569 | struct sk_buff *skb = napi->skb; |
2528 | struct ethhdr *eth; | ||
2529 | skb_frag_t *frag; | ||
2530 | int i; | ||
2531 | |||
2532 | napi->skb = NULL; | ||
2533 | 2570 | ||
2534 | if (!skb) { | 2571 | if (!skb) { |
2535 | skb = netdev_alloc_skb(dev, GRO_MAX_HEAD + NET_IP_ALIGN); | 2572 | skb = netdev_alloc_skb(dev, GRO_MAX_HEAD + NET_IP_ALIGN); |
@@ -2537,47 +2574,14 @@ struct sk_buff *napi_fraginfo_skb(struct napi_struct *napi, | |||
2537 | goto out; | 2574 | goto out; |
2538 | 2575 | ||
2539 | skb_reserve(skb, NET_IP_ALIGN); | 2576 | skb_reserve(skb, NET_IP_ALIGN); |
2540 | } | ||
2541 | |||
2542 | BUG_ON(info->nr_frags > MAX_SKB_FRAGS); | ||
2543 | frag = &info->frags[info->nr_frags - 1]; | ||
2544 | |||
2545 | for (i = skb_shinfo(skb)->nr_frags; i < info->nr_frags; i++) { | ||
2546 | skb_fill_page_desc(skb, i, frag->page, frag->page_offset, | ||
2547 | frag->size); | ||
2548 | frag++; | ||
2549 | } | ||
2550 | skb_shinfo(skb)->nr_frags = info->nr_frags; | ||
2551 | 2577 | ||
2552 | skb->data_len = info->len; | 2578 | napi->skb = skb; |
2553 | skb->len += info->len; | ||
2554 | skb->truesize += info->len; | ||
2555 | |||
2556 | skb_reset_mac_header(skb); | ||
2557 | skb_gro_reset_offset(skb); | ||
2558 | |||
2559 | eth = skb_gro_header(skb, sizeof(*eth)); | ||
2560 | if (!eth) { | ||
2561 | napi_reuse_skb(napi, skb); | ||
2562 | skb = NULL; | ||
2563 | goto out; | ||
2564 | } | 2579 | } |
2565 | 2580 | ||
2566 | skb_gro_pull(skb, sizeof(*eth)); | ||
2567 | |||
2568 | /* | ||
2569 | * This works because the only protocols we care about don't require | ||
2570 | * special handling. We'll fix it up properly at the end. | ||
2571 | */ | ||
2572 | skb->protocol = eth->h_proto; | ||
2573 | |||
2574 | skb->ip_summed = info->ip_summed; | ||
2575 | skb->csum = info->csum; | ||
2576 | |||
2577 | out: | 2581 | out: |
2578 | return skb; | 2582 | return skb; |
2579 | } | 2583 | } |
2580 | EXPORT_SYMBOL(napi_fraginfo_skb); | 2584 | EXPORT_SYMBOL(napi_get_frags); |
2581 | 2585 | ||
2582 | int napi_frags_finish(struct napi_struct *napi, struct sk_buff *skb, int ret) | 2586 | int napi_frags_finish(struct napi_struct *napi, struct sk_buff *skb, int ret) |
2583 | { | 2587 | { |
@@ -2607,9 +2611,46 @@ int napi_frags_finish(struct napi_struct *napi, struct sk_buff *skb, int ret) | |||
2607 | } | 2611 | } |
2608 | EXPORT_SYMBOL(napi_frags_finish); | 2612 | EXPORT_SYMBOL(napi_frags_finish); |
2609 | 2613 | ||
2610 | int napi_gro_frags(struct napi_struct *napi, struct napi_gro_fraginfo *info) | 2614 | struct sk_buff *napi_frags_skb(struct napi_struct *napi) |
2615 | { | ||
2616 | struct sk_buff *skb = napi->skb; | ||
2617 | struct ethhdr *eth; | ||
2618 | unsigned int hlen; | ||
2619 | unsigned int off; | ||
2620 | |||
2621 | napi->skb = NULL; | ||
2622 | |||
2623 | skb_reset_mac_header(skb); | ||
2624 | skb_gro_reset_offset(skb); | ||
2625 | |||
2626 | off = skb_gro_offset(skb); | ||
2627 | hlen = off + sizeof(*eth); | ||
2628 | eth = skb_gro_header_fast(skb, off); | ||
2629 | if (skb_gro_header_hard(skb, hlen)) { | ||
2630 | eth = skb_gro_header_slow(skb, hlen, off); | ||
2631 | if (unlikely(!eth)) { | ||
2632 | napi_reuse_skb(napi, skb); | ||
2633 | skb = NULL; | ||
2634 | goto out; | ||
2635 | } | ||
2636 | } | ||
2637 | |||
2638 | skb_gro_pull(skb, sizeof(*eth)); | ||
2639 | |||
2640 | /* | ||
2641 | * This works because the only protocols we care about don't require | ||
2642 | * special handling. We'll fix it up properly at the end. | ||
2643 | */ | ||
2644 | skb->protocol = eth->h_proto; | ||
2645 | |||
2646 | out: | ||
2647 | return skb; | ||
2648 | } | ||
2649 | EXPORT_SYMBOL(napi_frags_skb); | ||
2650 | |||
2651 | int napi_gro_frags(struct napi_struct *napi) | ||
2611 | { | 2652 | { |
2612 | struct sk_buff *skb = napi_fraginfo_skb(napi, info); | 2653 | struct sk_buff *skb = napi_frags_skb(napi); |
2613 | 2654 | ||
2614 | if (!skb) | 2655 | if (!skb) |
2615 | return NET_RX_DROP; | 2656 | return NET_RX_DROP; |
@@ -2713,7 +2754,7 @@ void netif_napi_del(struct napi_struct *napi) | |||
2713 | struct sk_buff *skb, *next; | 2754 | struct sk_buff *skb, *next; |
2714 | 2755 | ||
2715 | list_del_init(&napi->dev_list); | 2756 | list_del_init(&napi->dev_list); |
2716 | kfree_skb(napi->skb); | 2757 | napi_free_frags(napi); |
2717 | 2758 | ||
2718 | for (skb = napi->gro_list; skb; skb = next) { | 2759 | for (skb = napi->gro_list; skb; skb = next) { |
2719 | next = skb->next; | 2760 | next = skb->next; |
@@ -2767,8 +2808,10 @@ static void net_rx_action(struct softirq_action *h) | |||
2767 | * accidently calling ->poll() when NAPI is not scheduled. | 2808 | * accidently calling ->poll() when NAPI is not scheduled. |
2768 | */ | 2809 | */ |
2769 | work = 0; | 2810 | work = 0; |
2770 | if (test_bit(NAPI_STATE_SCHED, &n->state)) | 2811 | if (test_bit(NAPI_STATE_SCHED, &n->state)) { |
2771 | work = n->poll(n, weight); | 2812 | work = n->poll(n, weight); |
2813 | trace_napi_poll(n); | ||
2814 | } | ||
2772 | 2815 | ||
2773 | WARN_ON_ONCE(work > weight); | 2816 | WARN_ON_ONCE(work > weight); |
2774 | 2817 | ||
@@ -3418,10 +3461,10 @@ void __dev_set_rx_mode(struct net_device *dev) | |||
3418 | /* Unicast addresses changes may only happen under the rtnl, | 3461 | /* Unicast addresses changes may only happen under the rtnl, |
3419 | * therefore calling __dev_set_promiscuity here is safe. | 3462 | * therefore calling __dev_set_promiscuity here is safe. |
3420 | */ | 3463 | */ |
3421 | if (dev->uc_count > 0 && !dev->uc_promisc) { | 3464 | if (dev->uc.count > 0 && !dev->uc_promisc) { |
3422 | __dev_set_promiscuity(dev, 1); | 3465 | __dev_set_promiscuity(dev, 1); |
3423 | dev->uc_promisc = 1; | 3466 | dev->uc_promisc = 1; |
3424 | } else if (dev->uc_count == 0 && dev->uc_promisc) { | 3467 | } else if (dev->uc.count == 0 && dev->uc_promisc) { |
3425 | __dev_set_promiscuity(dev, -1); | 3468 | __dev_set_promiscuity(dev, -1); |
3426 | dev->uc_promisc = 0; | 3469 | dev->uc_promisc = 0; |
3427 | } | 3470 | } |
@@ -3438,6 +3481,316 @@ void dev_set_rx_mode(struct net_device *dev) | |||
3438 | netif_addr_unlock_bh(dev); | 3481 | netif_addr_unlock_bh(dev); |
3439 | } | 3482 | } |
3440 | 3483 | ||
3484 | /* hw addresses list handling functions */ | ||
3485 | |||
3486 | static int __hw_addr_add(struct netdev_hw_addr_list *list, unsigned char *addr, | ||
3487 | int addr_len, unsigned char addr_type) | ||
3488 | { | ||
3489 | struct netdev_hw_addr *ha; | ||
3490 | int alloc_size; | ||
3491 | |||
3492 | if (addr_len > MAX_ADDR_LEN) | ||
3493 | return -EINVAL; | ||
3494 | |||
3495 | list_for_each_entry(ha, &list->list, list) { | ||
3496 | if (!memcmp(ha->addr, addr, addr_len) && | ||
3497 | ha->type == addr_type) { | ||
3498 | ha->refcount++; | ||
3499 | return 0; | ||
3500 | } | ||
3501 | } | ||
3502 | |||
3503 | |||
3504 | alloc_size = sizeof(*ha); | ||
3505 | if (alloc_size < L1_CACHE_BYTES) | ||
3506 | alloc_size = L1_CACHE_BYTES; | ||
3507 | ha = kmalloc(alloc_size, GFP_ATOMIC); | ||
3508 | if (!ha) | ||
3509 | return -ENOMEM; | ||
3510 | memcpy(ha->addr, addr, addr_len); | ||
3511 | ha->type = addr_type; | ||
3512 | ha->refcount = 1; | ||
3513 | ha->synced = false; | ||
3514 | list_add_tail_rcu(&ha->list, &list->list); | ||
3515 | list->count++; | ||
3516 | return 0; | ||
3517 | } | ||
3518 | |||
3519 | static void ha_rcu_free(struct rcu_head *head) | ||
3520 | { | ||
3521 | struct netdev_hw_addr *ha; | ||
3522 | |||
3523 | ha = container_of(head, struct netdev_hw_addr, rcu_head); | ||
3524 | kfree(ha); | ||
3525 | } | ||
3526 | |||
3527 | static int __hw_addr_del(struct netdev_hw_addr_list *list, unsigned char *addr, | ||
3528 | int addr_len, unsigned char addr_type) | ||
3529 | { | ||
3530 | struct netdev_hw_addr *ha; | ||
3531 | |||
3532 | list_for_each_entry(ha, &list->list, list) { | ||
3533 | if (!memcmp(ha->addr, addr, addr_len) && | ||
3534 | (ha->type == addr_type || !addr_type)) { | ||
3535 | if (--ha->refcount) | ||
3536 | return 0; | ||
3537 | list_del_rcu(&ha->list); | ||
3538 | call_rcu(&ha->rcu_head, ha_rcu_free); | ||
3539 | list->count--; | ||
3540 | return 0; | ||
3541 | } | ||
3542 | } | ||
3543 | return -ENOENT; | ||
3544 | } | ||
3545 | |||
3546 | static int __hw_addr_add_multiple(struct netdev_hw_addr_list *to_list, | ||
3547 | struct netdev_hw_addr_list *from_list, | ||
3548 | int addr_len, | ||
3549 | unsigned char addr_type) | ||
3550 | { | ||
3551 | int err; | ||
3552 | struct netdev_hw_addr *ha, *ha2; | ||
3553 | unsigned char type; | ||
3554 | |||
3555 | list_for_each_entry(ha, &from_list->list, list) { | ||
3556 | type = addr_type ? addr_type : ha->type; | ||
3557 | err = __hw_addr_add(to_list, ha->addr, addr_len, type); | ||
3558 | if (err) | ||
3559 | goto unroll; | ||
3560 | } | ||
3561 | return 0; | ||
3562 | |||
3563 | unroll: | ||
3564 | list_for_each_entry(ha2, &from_list->list, list) { | ||
3565 | if (ha2 == ha) | ||
3566 | break; | ||
3567 | type = addr_type ? addr_type : ha2->type; | ||
3568 | __hw_addr_del(to_list, ha2->addr, addr_len, type); | ||
3569 | } | ||
3570 | return err; | ||
3571 | } | ||
3572 | |||
3573 | static void __hw_addr_del_multiple(struct netdev_hw_addr_list *to_list, | ||
3574 | struct netdev_hw_addr_list *from_list, | ||
3575 | int addr_len, | ||
3576 | unsigned char addr_type) | ||
3577 | { | ||
3578 | struct netdev_hw_addr *ha; | ||
3579 | unsigned char type; | ||
3580 | |||
3581 | list_for_each_entry(ha, &from_list->list, list) { | ||
3582 | type = addr_type ? addr_type : ha->type; | ||
3583 | __hw_addr_del(to_list, ha->addr, addr_len, addr_type); | ||
3584 | } | ||
3585 | } | ||
3586 | |||
3587 | static int __hw_addr_sync(struct netdev_hw_addr_list *to_list, | ||
3588 | struct netdev_hw_addr_list *from_list, | ||
3589 | int addr_len) | ||
3590 | { | ||
3591 | int err = 0; | ||
3592 | struct netdev_hw_addr *ha, *tmp; | ||
3593 | |||
3594 | list_for_each_entry_safe(ha, tmp, &from_list->list, list) { | ||
3595 | if (!ha->synced) { | ||
3596 | err = __hw_addr_add(to_list, ha->addr, | ||
3597 | addr_len, ha->type); | ||
3598 | if (err) | ||
3599 | break; | ||
3600 | ha->synced = true; | ||
3601 | ha->refcount++; | ||
3602 | } else if (ha->refcount == 1) { | ||
3603 | __hw_addr_del(to_list, ha->addr, addr_len, ha->type); | ||
3604 | __hw_addr_del(from_list, ha->addr, addr_len, ha->type); | ||
3605 | } | ||
3606 | } | ||
3607 | return err; | ||
3608 | } | ||
3609 | |||
3610 | static void __hw_addr_unsync(struct netdev_hw_addr_list *to_list, | ||
3611 | struct netdev_hw_addr_list *from_list, | ||
3612 | int addr_len) | ||
3613 | { | ||
3614 | struct netdev_hw_addr *ha, *tmp; | ||
3615 | |||
3616 | list_for_each_entry_safe(ha, tmp, &from_list->list, list) { | ||
3617 | if (ha->synced) { | ||
3618 | __hw_addr_del(to_list, ha->addr, | ||
3619 | addr_len, ha->type); | ||
3620 | ha->synced = false; | ||
3621 | __hw_addr_del(from_list, ha->addr, | ||
3622 | addr_len, ha->type); | ||
3623 | } | ||
3624 | } | ||
3625 | } | ||
3626 | |||
3627 | static void __hw_addr_flush(struct netdev_hw_addr_list *list) | ||
3628 | { | ||
3629 | struct netdev_hw_addr *ha, *tmp; | ||
3630 | |||
3631 | list_for_each_entry_safe(ha, tmp, &list->list, list) { | ||
3632 | list_del_rcu(&ha->list); | ||
3633 | call_rcu(&ha->rcu_head, ha_rcu_free); | ||
3634 | } | ||
3635 | list->count = 0; | ||
3636 | } | ||
3637 | |||
3638 | static void __hw_addr_init(struct netdev_hw_addr_list *list) | ||
3639 | { | ||
3640 | INIT_LIST_HEAD(&list->list); | ||
3641 | list->count = 0; | ||
3642 | } | ||
3643 | |||
3644 | /* Device addresses handling functions */ | ||
3645 | |||
3646 | static void dev_addr_flush(struct net_device *dev) | ||
3647 | { | ||
3648 | /* rtnl_mutex must be held here */ | ||
3649 | |||
3650 | __hw_addr_flush(&dev->dev_addrs); | ||
3651 | dev->dev_addr = NULL; | ||
3652 | } | ||
3653 | |||
3654 | static int dev_addr_init(struct net_device *dev) | ||
3655 | { | ||
3656 | unsigned char addr[MAX_ADDR_LEN]; | ||
3657 | struct netdev_hw_addr *ha; | ||
3658 | int err; | ||
3659 | |||
3660 | /* rtnl_mutex must be held here */ | ||
3661 | |||
3662 | __hw_addr_init(&dev->dev_addrs); | ||
3663 | memset(addr, 0, sizeof(addr)); | ||
3664 | err = __hw_addr_add(&dev->dev_addrs, addr, sizeof(addr), | ||
3665 | NETDEV_HW_ADDR_T_LAN); | ||
3666 | if (!err) { | ||
3667 | /* | ||
3668 | * Get the first (previously created) address from the list | ||
3669 | * and set dev_addr pointer to this location. | ||
3670 | */ | ||
3671 | ha = list_first_entry(&dev->dev_addrs.list, | ||
3672 | struct netdev_hw_addr, list); | ||
3673 | dev->dev_addr = ha->addr; | ||
3674 | } | ||
3675 | return err; | ||
3676 | } | ||
3677 | |||
3678 | /** | ||
3679 | * dev_addr_add - Add a device address | ||
3680 | * @dev: device | ||
3681 | * @addr: address to add | ||
3682 | * @addr_type: address type | ||
3683 | * | ||
3684 | * Add a device address to the device or increase the reference count if | ||
3685 | * it already exists. | ||
3686 | * | ||
3687 | * The caller must hold the rtnl_mutex. | ||
3688 | */ | ||
3689 | int dev_addr_add(struct net_device *dev, unsigned char *addr, | ||
3690 | unsigned char addr_type) | ||
3691 | { | ||
3692 | int err; | ||
3693 | |||
3694 | ASSERT_RTNL(); | ||
3695 | |||
3696 | err = __hw_addr_add(&dev->dev_addrs, addr, dev->addr_len, addr_type); | ||
3697 | if (!err) | ||
3698 | call_netdevice_notifiers(NETDEV_CHANGEADDR, dev); | ||
3699 | return err; | ||
3700 | } | ||
3701 | EXPORT_SYMBOL(dev_addr_add); | ||
3702 | |||
3703 | /** | ||
3704 | * dev_addr_del - Release a device address. | ||
3705 | * @dev: device | ||
3706 | * @addr: address to delete | ||
3707 | * @addr_type: address type | ||
3708 | * | ||
3709 | * Release reference to a device address and remove it from the device | ||
3710 | * if the reference count drops to zero. | ||
3711 | * | ||
3712 | * The caller must hold the rtnl_mutex. | ||
3713 | */ | ||
3714 | int dev_addr_del(struct net_device *dev, unsigned char *addr, | ||
3715 | unsigned char addr_type) | ||
3716 | { | ||
3717 | int err; | ||
3718 | struct netdev_hw_addr *ha; | ||
3719 | |||
3720 | ASSERT_RTNL(); | ||
3721 | |||
3722 | /* | ||
3723 | * We can not remove the first address from the list because | ||
3724 | * dev->dev_addr points to that. | ||
3725 | */ | ||
3726 | ha = list_first_entry(&dev->dev_addrs.list, | ||
3727 | struct netdev_hw_addr, list); | ||
3728 | if (ha->addr == dev->dev_addr && ha->refcount == 1) | ||
3729 | return -ENOENT; | ||
3730 | |||
3731 | err = __hw_addr_del(&dev->dev_addrs, addr, dev->addr_len, | ||
3732 | addr_type); | ||
3733 | if (!err) | ||
3734 | call_netdevice_notifiers(NETDEV_CHANGEADDR, dev); | ||
3735 | return err; | ||
3736 | } | ||
3737 | EXPORT_SYMBOL(dev_addr_del); | ||
3738 | |||
3739 | /** | ||
3740 | * dev_addr_add_multiple - Add device addresses from another device | ||
3741 | * @to_dev: device to which addresses will be added | ||
3742 | * @from_dev: device from which addresses will be added | ||
3743 | * @addr_type: address type - 0 means type will be used from from_dev | ||
3744 | * | ||
3745 | * Add device addresses of the one device to another. | ||
3746 | ** | ||
3747 | * The caller must hold the rtnl_mutex. | ||
3748 | */ | ||
3749 | int dev_addr_add_multiple(struct net_device *to_dev, | ||
3750 | struct net_device *from_dev, | ||
3751 | unsigned char addr_type) | ||
3752 | { | ||
3753 | int err; | ||
3754 | |||
3755 | ASSERT_RTNL(); | ||
3756 | |||
3757 | if (from_dev->addr_len != to_dev->addr_len) | ||
3758 | return -EINVAL; | ||
3759 | err = __hw_addr_add_multiple(&to_dev->dev_addrs, &from_dev->dev_addrs, | ||
3760 | to_dev->addr_len, addr_type); | ||
3761 | if (!err) | ||
3762 | call_netdevice_notifiers(NETDEV_CHANGEADDR, to_dev); | ||
3763 | return err; | ||
3764 | } | ||
3765 | EXPORT_SYMBOL(dev_addr_add_multiple); | ||
3766 | |||
3767 | /** | ||
3768 | * dev_addr_del_multiple - Delete device addresses by another device | ||
3769 | * @to_dev: device where the addresses will be deleted | ||
3770 | * @from_dev: device by which addresses the addresses will be deleted | ||
3771 | * @addr_type: address type - 0 means type will used from from_dev | ||
3772 | * | ||
3773 | * Deletes addresses in to device by the list of addresses in from device. | ||
3774 | * | ||
3775 | * The caller must hold the rtnl_mutex. | ||
3776 | */ | ||
3777 | int dev_addr_del_multiple(struct net_device *to_dev, | ||
3778 | struct net_device *from_dev, | ||
3779 | unsigned char addr_type) | ||
3780 | { | ||
3781 | ASSERT_RTNL(); | ||
3782 | |||
3783 | if (from_dev->addr_len != to_dev->addr_len) | ||
3784 | return -EINVAL; | ||
3785 | __hw_addr_del_multiple(&to_dev->dev_addrs, &from_dev->dev_addrs, | ||
3786 | to_dev->addr_len, addr_type); | ||
3787 | call_netdevice_notifiers(NETDEV_CHANGEADDR, to_dev); | ||
3788 | return 0; | ||
3789 | } | ||
3790 | EXPORT_SYMBOL(dev_addr_del_multiple); | ||
3791 | |||
3792 | /* multicast addresses handling functions */ | ||
3793 | |||
3441 | int __dev_addr_delete(struct dev_addr_list **list, int *count, | 3794 | int __dev_addr_delete(struct dev_addr_list **list, int *count, |
3442 | void *addr, int alen, int glbl) | 3795 | void *addr, int alen, int glbl) |
3443 | { | 3796 | { |
@@ -3500,24 +3853,22 @@ int __dev_addr_add(struct dev_addr_list **list, int *count, | |||
3500 | * dev_unicast_delete - Release secondary unicast address. | 3853 | * dev_unicast_delete - Release secondary unicast address. |
3501 | * @dev: device | 3854 | * @dev: device |
3502 | * @addr: address to delete | 3855 | * @addr: address to delete |
3503 | * @alen: length of @addr | ||
3504 | * | 3856 | * |
3505 | * Release reference to a secondary unicast address and remove it | 3857 | * Release reference to a secondary unicast address and remove it |
3506 | * from the device if the reference count drops to zero. | 3858 | * from the device if the reference count drops to zero. |
3507 | * | 3859 | * |
3508 | * The caller must hold the rtnl_mutex. | 3860 | * The caller must hold the rtnl_mutex. |
3509 | */ | 3861 | */ |
3510 | int dev_unicast_delete(struct net_device *dev, void *addr, int alen) | 3862 | int dev_unicast_delete(struct net_device *dev, void *addr) |
3511 | { | 3863 | { |
3512 | int err; | 3864 | int err; |
3513 | 3865 | ||
3514 | ASSERT_RTNL(); | 3866 | ASSERT_RTNL(); |
3515 | 3867 | ||
3516 | netif_addr_lock_bh(dev); | 3868 | err = __hw_addr_del(&dev->uc, addr, dev->addr_len, |
3517 | err = __dev_addr_delete(&dev->uc_list, &dev->uc_count, addr, alen, 0); | 3869 | NETDEV_HW_ADDR_T_UNICAST); |
3518 | if (!err) | 3870 | if (!err) |
3519 | __dev_set_rx_mode(dev); | 3871 | __dev_set_rx_mode(dev); |
3520 | netif_addr_unlock_bh(dev); | ||
3521 | return err; | 3872 | return err; |
3522 | } | 3873 | } |
3523 | EXPORT_SYMBOL(dev_unicast_delete); | 3874 | EXPORT_SYMBOL(dev_unicast_delete); |
@@ -3526,24 +3877,22 @@ EXPORT_SYMBOL(dev_unicast_delete); | |||
3526 | * dev_unicast_add - add a secondary unicast address | 3877 | * dev_unicast_add - add a secondary unicast address |
3527 | * @dev: device | 3878 | * @dev: device |
3528 | * @addr: address to add | 3879 | * @addr: address to add |
3529 | * @alen: length of @addr | ||
3530 | * | 3880 | * |
3531 | * Add a secondary unicast address to the device or increase | 3881 | * Add a secondary unicast address to the device or increase |
3532 | * the reference count if it already exists. | 3882 | * the reference count if it already exists. |
3533 | * | 3883 | * |
3534 | * The caller must hold the rtnl_mutex. | 3884 | * The caller must hold the rtnl_mutex. |
3535 | */ | 3885 | */ |
3536 | int dev_unicast_add(struct net_device *dev, void *addr, int alen) | 3886 | int dev_unicast_add(struct net_device *dev, void *addr) |
3537 | { | 3887 | { |
3538 | int err; | 3888 | int err; |
3539 | 3889 | ||
3540 | ASSERT_RTNL(); | 3890 | ASSERT_RTNL(); |
3541 | 3891 | ||
3542 | netif_addr_lock_bh(dev); | 3892 | err = __hw_addr_add(&dev->uc, addr, dev->addr_len, |
3543 | err = __dev_addr_add(&dev->uc_list, &dev->uc_count, addr, alen, 0); | 3893 | NETDEV_HW_ADDR_T_UNICAST); |
3544 | if (!err) | 3894 | if (!err) |
3545 | __dev_set_rx_mode(dev); | 3895 | __dev_set_rx_mode(dev); |
3546 | netif_addr_unlock_bh(dev); | ||
3547 | return err; | 3896 | return err; |
3548 | } | 3897 | } |
3549 | EXPORT_SYMBOL(dev_unicast_add); | 3898 | EXPORT_SYMBOL(dev_unicast_add); |
@@ -3600,8 +3949,7 @@ void __dev_addr_unsync(struct dev_addr_list **to, int *to_count, | |||
3600 | * @from: source device | 3949 | * @from: source device |
3601 | * | 3950 | * |
3602 | * Add newly added addresses to the destination device and release | 3951 | * Add newly added addresses to the destination device and release |
3603 | * addresses that have no users left. The source device must be | 3952 | * addresses that have no users left. |
3604 | * locked by netif_tx_lock_bh. | ||
3605 | * | 3953 | * |
3606 | * This function is intended to be called from the dev->set_rx_mode | 3954 | * This function is intended to be called from the dev->set_rx_mode |
3607 | * function of layered software devices. | 3955 | * function of layered software devices. |
@@ -3610,12 +3958,14 @@ int dev_unicast_sync(struct net_device *to, struct net_device *from) | |||
3610 | { | 3958 | { |
3611 | int err = 0; | 3959 | int err = 0; |
3612 | 3960 | ||
3613 | netif_addr_lock_bh(to); | 3961 | ASSERT_RTNL(); |
3614 | err = __dev_addr_sync(&to->uc_list, &to->uc_count, | 3962 | |
3615 | &from->uc_list, &from->uc_count); | 3963 | if (to->addr_len != from->addr_len) |
3964 | return -EINVAL; | ||
3965 | |||
3966 | err = __hw_addr_sync(&to->uc, &from->uc, to->addr_len); | ||
3616 | if (!err) | 3967 | if (!err) |
3617 | __dev_set_rx_mode(to); | 3968 | __dev_set_rx_mode(to); |
3618 | netif_addr_unlock_bh(to); | ||
3619 | return err; | 3969 | return err; |
3620 | } | 3970 | } |
3621 | EXPORT_SYMBOL(dev_unicast_sync); | 3971 | EXPORT_SYMBOL(dev_unicast_sync); |
@@ -3631,18 +3981,31 @@ EXPORT_SYMBOL(dev_unicast_sync); | |||
3631 | */ | 3981 | */ |
3632 | void dev_unicast_unsync(struct net_device *to, struct net_device *from) | 3982 | void dev_unicast_unsync(struct net_device *to, struct net_device *from) |
3633 | { | 3983 | { |
3634 | netif_addr_lock_bh(from); | 3984 | ASSERT_RTNL(); |
3635 | netif_addr_lock(to); | ||
3636 | 3985 | ||
3637 | __dev_addr_unsync(&to->uc_list, &to->uc_count, | 3986 | if (to->addr_len != from->addr_len) |
3638 | &from->uc_list, &from->uc_count); | 3987 | return; |
3639 | __dev_set_rx_mode(to); | ||
3640 | 3988 | ||
3641 | netif_addr_unlock(to); | 3989 | __hw_addr_unsync(&to->uc, &from->uc, to->addr_len); |
3642 | netif_addr_unlock_bh(from); | 3990 | __dev_set_rx_mode(to); |
3643 | } | 3991 | } |
3644 | EXPORT_SYMBOL(dev_unicast_unsync); | 3992 | EXPORT_SYMBOL(dev_unicast_unsync); |
3645 | 3993 | ||
3994 | static void dev_unicast_flush(struct net_device *dev) | ||
3995 | { | ||
3996 | /* rtnl_mutex must be held here */ | ||
3997 | |||
3998 | __hw_addr_flush(&dev->uc); | ||
3999 | } | ||
4000 | |||
4001 | static void dev_unicast_init(struct net_device *dev) | ||
4002 | { | ||
4003 | /* rtnl_mutex must be held here */ | ||
4004 | |||
4005 | __hw_addr_init(&dev->uc); | ||
4006 | } | ||
4007 | |||
4008 | |||
3646 | static void __dev_addr_discard(struct dev_addr_list **list) | 4009 | static void __dev_addr_discard(struct dev_addr_list **list) |
3647 | { | 4010 | { |
3648 | struct dev_addr_list *tmp; | 4011 | struct dev_addr_list *tmp; |
@@ -3661,9 +4024,6 @@ static void dev_addr_discard(struct net_device *dev) | |||
3661 | { | 4024 | { |
3662 | netif_addr_lock_bh(dev); | 4025 | netif_addr_lock_bh(dev); |
3663 | 4026 | ||
3664 | __dev_addr_discard(&dev->uc_list); | ||
3665 | dev->uc_count = 0; | ||
3666 | |||
3667 | __dev_addr_discard(&dev->mc_list); | 4027 | __dev_addr_discard(&dev->mc_list); |
3668 | dev->mc_count = 0; | 4028 | dev->mc_count = 0; |
3669 | 4029 | ||
@@ -3847,7 +4207,7 @@ static int dev_ifsioc_locked(struct net *net, struct ifreq *ifr, unsigned int cm | |||
3847 | 4207 | ||
3848 | switch (cmd) { | 4208 | switch (cmd) { |
3849 | case SIOCGIFFLAGS: /* Get interface flags */ | 4209 | case SIOCGIFFLAGS: /* Get interface flags */ |
3850 | ifr->ifr_flags = dev_get_flags(dev); | 4210 | ifr->ifr_flags = (short) dev_get_flags(dev); |
3851 | return 0; | 4211 | return 0; |
3852 | 4212 | ||
3853 | case SIOCGIFMETRIC: /* Get the metric on the interface | 4213 | case SIOCGIFMETRIC: /* Get the metric on the interface |
@@ -4256,6 +4616,7 @@ static void rollback_registered(struct net_device *dev) | |||
4256 | /* | 4616 | /* |
4257 | * Flush the unicast and multicast chains | 4617 | * Flush the unicast and multicast chains |
4258 | */ | 4618 | */ |
4619 | dev_unicast_flush(dev); | ||
4259 | dev_addr_discard(dev); | 4620 | dev_addr_discard(dev); |
4260 | 4621 | ||
4261 | if (dev->netdev_ops->ndo_uninit) | 4622 | if (dev->netdev_ops->ndo_uninit) |
@@ -4327,39 +4688,6 @@ unsigned long netdev_fix_features(unsigned long features, const char *name) | |||
4327 | } | 4688 | } |
4328 | EXPORT_SYMBOL(netdev_fix_features); | 4689 | EXPORT_SYMBOL(netdev_fix_features); |
4329 | 4690 | ||
4330 | /* Some devices need to (re-)set their netdev_ops inside | ||
4331 | * ->init() or similar. If that happens, we have to setup | ||
4332 | * the compat pointers again. | ||
4333 | */ | ||
4334 | void netdev_resync_ops(struct net_device *dev) | ||
4335 | { | ||
4336 | #ifdef CONFIG_COMPAT_NET_DEV_OPS | ||
4337 | const struct net_device_ops *ops = dev->netdev_ops; | ||
4338 | |||
4339 | dev->init = ops->ndo_init; | ||
4340 | dev->uninit = ops->ndo_uninit; | ||
4341 | dev->open = ops->ndo_open; | ||
4342 | dev->change_rx_flags = ops->ndo_change_rx_flags; | ||
4343 | dev->set_rx_mode = ops->ndo_set_rx_mode; | ||
4344 | dev->set_multicast_list = ops->ndo_set_multicast_list; | ||
4345 | dev->set_mac_address = ops->ndo_set_mac_address; | ||
4346 | dev->validate_addr = ops->ndo_validate_addr; | ||
4347 | dev->do_ioctl = ops->ndo_do_ioctl; | ||
4348 | dev->set_config = ops->ndo_set_config; | ||
4349 | dev->change_mtu = ops->ndo_change_mtu; | ||
4350 | dev->neigh_setup = ops->ndo_neigh_setup; | ||
4351 | dev->tx_timeout = ops->ndo_tx_timeout; | ||
4352 | dev->get_stats = ops->ndo_get_stats; | ||
4353 | dev->vlan_rx_register = ops->ndo_vlan_rx_register; | ||
4354 | dev->vlan_rx_add_vid = ops->ndo_vlan_rx_add_vid; | ||
4355 | dev->vlan_rx_kill_vid = ops->ndo_vlan_rx_kill_vid; | ||
4356 | #ifdef CONFIG_NET_POLL_CONTROLLER | ||
4357 | dev->poll_controller = ops->ndo_poll_controller; | ||
4358 | #endif | ||
4359 | #endif | ||
4360 | } | ||
4361 | EXPORT_SYMBOL(netdev_resync_ops); | ||
4362 | |||
4363 | /** | 4691 | /** |
4364 | * register_netdevice - register a network device | 4692 | * register_netdevice - register a network device |
4365 | * @dev: device to register | 4693 | * @dev: device to register |
@@ -4399,23 +4727,6 @@ int register_netdevice(struct net_device *dev) | |||
4399 | 4727 | ||
4400 | dev->iflink = -1; | 4728 | dev->iflink = -1; |
4401 | 4729 | ||
4402 | #ifdef CONFIG_COMPAT_NET_DEV_OPS | ||
4403 | /* Netdevice_ops API compatiability support. | ||
4404 | * This is temporary until all network devices are converted. | ||
4405 | */ | ||
4406 | if (dev->netdev_ops) { | ||
4407 | netdev_resync_ops(dev); | ||
4408 | } else { | ||
4409 | char drivername[64]; | ||
4410 | pr_info("%s (%s): not using net_device_ops yet\n", | ||
4411 | dev->name, netdev_drivername(dev, drivername, 64)); | ||
4412 | |||
4413 | /* This works only because net_device_ops and the | ||
4414 | compatiablity structure are the same. */ | ||
4415 | dev->netdev_ops = (void *) &(dev->init); | ||
4416 | } | ||
4417 | #endif | ||
4418 | |||
4419 | /* Init, if this function is available */ | 4730 | /* Init, if this function is available */ |
4420 | if (dev->netdev_ops->ndo_init) { | 4731 | if (dev->netdev_ops->ndo_init) { |
4421 | ret = dev->netdev_ops->ndo_init(dev); | 4732 | ret = dev->netdev_ops->ndo_init(dev); |
@@ -4701,13 +5012,30 @@ void netdev_run_todo(void) | |||
4701 | * the internal statistics structure is used. | 5012 | * the internal statistics structure is used. |
4702 | */ | 5013 | */ |
4703 | const struct net_device_stats *dev_get_stats(struct net_device *dev) | 5014 | const struct net_device_stats *dev_get_stats(struct net_device *dev) |
4704 | { | 5015 | { |
4705 | const struct net_device_ops *ops = dev->netdev_ops; | 5016 | const struct net_device_ops *ops = dev->netdev_ops; |
4706 | 5017 | ||
4707 | if (ops->ndo_get_stats) | 5018 | if (ops->ndo_get_stats) |
4708 | return ops->ndo_get_stats(dev); | 5019 | return ops->ndo_get_stats(dev); |
4709 | else | 5020 | else { |
4710 | return &dev->stats; | 5021 | unsigned long tx_bytes = 0, tx_packets = 0, tx_dropped = 0; |
5022 | struct net_device_stats *stats = &dev->stats; | ||
5023 | unsigned int i; | ||
5024 | struct netdev_queue *txq; | ||
5025 | |||
5026 | for (i = 0; i < dev->num_tx_queues; i++) { | ||
5027 | txq = netdev_get_tx_queue(dev, i); | ||
5028 | tx_bytes += txq->tx_bytes; | ||
5029 | tx_packets += txq->tx_packets; | ||
5030 | tx_dropped += txq->tx_dropped; | ||
5031 | } | ||
5032 | if (tx_bytes || tx_packets || tx_dropped) { | ||
5033 | stats->tx_bytes = tx_bytes; | ||
5034 | stats->tx_packets = tx_packets; | ||
5035 | stats->tx_dropped = tx_dropped; | ||
5036 | } | ||
5037 | return stats; | ||
5038 | } | ||
4711 | } | 5039 | } |
4712 | EXPORT_SYMBOL(dev_get_stats); | 5040 | EXPORT_SYMBOL(dev_get_stats); |
4713 | 5041 | ||
@@ -4742,18 +5070,18 @@ struct net_device *alloc_netdev_mq(int sizeof_priv, const char *name, | |||
4742 | struct netdev_queue *tx; | 5070 | struct netdev_queue *tx; |
4743 | struct net_device *dev; | 5071 | struct net_device *dev; |
4744 | size_t alloc_size; | 5072 | size_t alloc_size; |
4745 | void *p; | 5073 | struct net_device *p; |
4746 | 5074 | ||
4747 | BUG_ON(strlen(name) >= sizeof(dev->name)); | 5075 | BUG_ON(strlen(name) >= sizeof(dev->name)); |
4748 | 5076 | ||
4749 | alloc_size = sizeof(struct net_device); | 5077 | alloc_size = sizeof(struct net_device); |
4750 | if (sizeof_priv) { | 5078 | if (sizeof_priv) { |
4751 | /* ensure 32-byte alignment of private area */ | 5079 | /* ensure 32-byte alignment of private area */ |
4752 | alloc_size = (alloc_size + NETDEV_ALIGN_CONST) & ~NETDEV_ALIGN_CONST; | 5080 | alloc_size = ALIGN(alloc_size, NETDEV_ALIGN); |
4753 | alloc_size += sizeof_priv; | 5081 | alloc_size += sizeof_priv; |
4754 | } | 5082 | } |
4755 | /* ensure 32-byte alignment of whole construct */ | 5083 | /* ensure 32-byte alignment of whole construct */ |
4756 | alloc_size += NETDEV_ALIGN_CONST; | 5084 | alloc_size += NETDEV_ALIGN - 1; |
4757 | 5085 | ||
4758 | p = kzalloc(alloc_size, GFP_KERNEL); | 5086 | p = kzalloc(alloc_size, GFP_KERNEL); |
4759 | if (!p) { | 5087 | if (!p) { |
@@ -4765,13 +5093,17 @@ struct net_device *alloc_netdev_mq(int sizeof_priv, const char *name, | |||
4765 | if (!tx) { | 5093 | if (!tx) { |
4766 | printk(KERN_ERR "alloc_netdev: Unable to allocate " | 5094 | printk(KERN_ERR "alloc_netdev: Unable to allocate " |
4767 | "tx qdiscs.\n"); | 5095 | "tx qdiscs.\n"); |
4768 | kfree(p); | 5096 | goto free_p; |
4769 | return NULL; | ||
4770 | } | 5097 | } |
4771 | 5098 | ||
4772 | dev = (struct net_device *) | 5099 | dev = PTR_ALIGN(p, NETDEV_ALIGN); |
4773 | (((long)p + NETDEV_ALIGN_CONST) & ~NETDEV_ALIGN_CONST); | ||
4774 | dev->padded = (char *)dev - (char *)p; | 5100 | dev->padded = (char *)dev - (char *)p; |
5101 | |||
5102 | if (dev_addr_init(dev)) | ||
5103 | goto free_tx; | ||
5104 | |||
5105 | dev_unicast_init(dev); | ||
5106 | |||
4775 | dev_net_set(dev, &init_net); | 5107 | dev_net_set(dev, &init_net); |
4776 | 5108 | ||
4777 | dev->_tx = tx; | 5109 | dev->_tx = tx; |
@@ -4783,9 +5115,17 @@ struct net_device *alloc_netdev_mq(int sizeof_priv, const char *name, | |||
4783 | netdev_init_queues(dev); | 5115 | netdev_init_queues(dev); |
4784 | 5116 | ||
4785 | INIT_LIST_HEAD(&dev->napi_list); | 5117 | INIT_LIST_HEAD(&dev->napi_list); |
5118 | dev->priv_flags = IFF_XMIT_DST_RELEASE; | ||
4786 | setup(dev); | 5119 | setup(dev); |
4787 | strcpy(dev->name, name); | 5120 | strcpy(dev->name, name); |
4788 | return dev; | 5121 | return dev; |
5122 | |||
5123 | free_tx: | ||
5124 | kfree(tx); | ||
5125 | |||
5126 | free_p: | ||
5127 | kfree(p); | ||
5128 | return NULL; | ||
4789 | } | 5129 | } |
4790 | EXPORT_SYMBOL(alloc_netdev_mq); | 5130 | EXPORT_SYMBOL(alloc_netdev_mq); |
4791 | 5131 | ||
@@ -4805,6 +5145,9 @@ void free_netdev(struct net_device *dev) | |||
4805 | 5145 | ||
4806 | kfree(dev->_tx); | 5146 | kfree(dev->_tx); |
4807 | 5147 | ||
5148 | /* Flush device addresses */ | ||
5149 | dev_addr_flush(dev); | ||
5150 | |||
4808 | list_for_each_entry_safe(p, n, &dev->napi_list, dev_list) | 5151 | list_for_each_entry_safe(p, n, &dev->napi_list, dev_list) |
4809 | netif_napi_del(p); | 5152 | netif_napi_del(p); |
4810 | 5153 | ||
@@ -4964,6 +5307,7 @@ int dev_change_net_namespace(struct net_device *dev, struct net *net, const char | |||
4964 | /* | 5307 | /* |
4965 | * Flush the unicast and multicast chains | 5308 | * Flush the unicast and multicast chains |
4966 | */ | 5309 | */ |
5310 | dev_unicast_flush(dev); | ||
4967 | dev_addr_discard(dev); | 5311 | dev_addr_discard(dev); |
4968 | 5312 | ||
4969 | netdev_unregister_kobject(dev); | 5313 | netdev_unregister_kobject(dev); |
@@ -5319,12 +5663,6 @@ EXPORT_SYMBOL(net_enable_timestamp); | |||
5319 | EXPORT_SYMBOL(net_disable_timestamp); | 5663 | EXPORT_SYMBOL(net_disable_timestamp); |
5320 | EXPORT_SYMBOL(dev_get_flags); | 5664 | EXPORT_SYMBOL(dev_get_flags); |
5321 | 5665 | ||
5322 | #if defined(CONFIG_BRIDGE) || defined(CONFIG_BRIDGE_MODULE) | ||
5323 | EXPORT_SYMBOL(br_handle_frame_hook); | ||
5324 | EXPORT_SYMBOL(br_fdb_get_hook); | ||
5325 | EXPORT_SYMBOL(br_fdb_put_hook); | ||
5326 | #endif | ||
5327 | |||
5328 | EXPORT_SYMBOL(dev_load); | 5666 | EXPORT_SYMBOL(dev_load); |
5329 | 5667 | ||
5330 | EXPORT_PER_CPU_SYMBOL(softnet_data); | 5668 | EXPORT_PER_CPU_SYMBOL(softnet_data); |