diff options
Diffstat (limited to 'net/core/dev.c')
-rw-r--r-- | net/core/dev.c | 387 |
1 files changed, 245 insertions, 142 deletions
diff --git a/net/core/dev.c b/net/core/dev.c index 1f466e82ac33..e1c1cdcc2bb0 100644 --- a/net/core/dev.c +++ b/net/core/dev.c | |||
@@ -101,8 +101,6 @@ | |||
101 | #include <linux/proc_fs.h> | 101 | #include <linux/proc_fs.h> |
102 | #include <linux/seq_file.h> | 102 | #include <linux/seq_file.h> |
103 | #include <linux/stat.h> | 103 | #include <linux/stat.h> |
104 | #include <linux/if_bridge.h> | ||
105 | #include <linux/if_macvlan.h> | ||
106 | #include <net/dst.h> | 104 | #include <net/dst.h> |
107 | #include <net/pkt_sched.h> | 105 | #include <net/pkt_sched.h> |
108 | #include <net/checksum.h> | 106 | #include <net/checksum.h> |
@@ -803,35 +801,31 @@ struct net_device *dev_getfirstbyhwtype(struct net *net, unsigned short type) | |||
803 | EXPORT_SYMBOL(dev_getfirstbyhwtype); | 801 | EXPORT_SYMBOL(dev_getfirstbyhwtype); |
804 | 802 | ||
805 | /** | 803 | /** |
806 | * dev_get_by_flags - find any device with given flags | 804 | * dev_get_by_flags_rcu - find any device with given flags |
807 | * @net: the applicable net namespace | 805 | * @net: the applicable net namespace |
808 | * @if_flags: IFF_* values | 806 | * @if_flags: IFF_* values |
809 | * @mask: bitmask of bits in if_flags to check | 807 | * @mask: bitmask of bits in if_flags to check |
810 | * | 808 | * |
811 | * Search for any interface with the given flags. Returns NULL if a device | 809 | * Search for any interface with the given flags. Returns NULL if a device |
812 | * is not found or a pointer to the device. The device returned has | 810 | * is not found or a pointer to the device. Must be called inside |
813 | * had a reference added and the pointer is safe until the user calls | 811 | * rcu_read_lock(), and result refcount is unchanged. |
814 | * dev_put to indicate they have finished with it. | ||
815 | */ | 812 | */ |
816 | 813 | ||
817 | struct net_device *dev_get_by_flags(struct net *net, unsigned short if_flags, | 814 | struct net_device *dev_get_by_flags_rcu(struct net *net, unsigned short if_flags, |
818 | unsigned short mask) | 815 | unsigned short mask) |
819 | { | 816 | { |
820 | struct net_device *dev, *ret; | 817 | struct net_device *dev, *ret; |
821 | 818 | ||
822 | ret = NULL; | 819 | ret = NULL; |
823 | rcu_read_lock(); | ||
824 | for_each_netdev_rcu(net, dev) { | 820 | for_each_netdev_rcu(net, dev) { |
825 | if (((dev->flags ^ if_flags) & mask) == 0) { | 821 | if (((dev->flags ^ if_flags) & mask) == 0) { |
826 | dev_hold(dev); | ||
827 | ret = dev; | 822 | ret = dev; |
828 | break; | 823 | break; |
829 | } | 824 | } |
830 | } | 825 | } |
831 | rcu_read_unlock(); | ||
832 | return ret; | 826 | return ret; |
833 | } | 827 | } |
834 | EXPORT_SYMBOL(dev_get_by_flags); | 828 | EXPORT_SYMBOL(dev_get_by_flags_rcu); |
835 | 829 | ||
836 | /** | 830 | /** |
837 | * dev_valid_name - check if name is okay for network device | 831 | * dev_valid_name - check if name is okay for network device |
@@ -1542,7 +1536,8 @@ static void dev_queue_xmit_nit(struct sk_buff *skb, struct net_device *dev) | |||
1542 | if (net_ratelimit()) | 1536 | if (net_ratelimit()) |
1543 | printk(KERN_CRIT "protocol %04x is " | 1537 | printk(KERN_CRIT "protocol %04x is " |
1544 | "buggy, dev %s\n", | 1538 | "buggy, dev %s\n", |
1545 | skb2->protocol, dev->name); | 1539 | ntohs(skb2->protocol), |
1540 | dev->name); | ||
1546 | skb_reset_network_header(skb2); | 1541 | skb_reset_network_header(skb2); |
1547 | } | 1542 | } |
1548 | 1543 | ||
@@ -1924,6 +1919,22 @@ static inline void skb_orphan_try(struct sk_buff *skb) | |||
1924 | } | 1919 | } |
1925 | } | 1920 | } |
1926 | 1921 | ||
1922 | /* | ||
1923 | * Returns true if either: | ||
1924 | * 1. skb has frag_list and the device doesn't support FRAGLIST, or | ||
1925 | * 2. skb is fragmented and the device does not support SG, or if | ||
1926 | * at least one of fragments is in highmem and device does not | ||
1927 | * support DMA from it. | ||
1928 | */ | ||
1929 | static inline int skb_needs_linearize(struct sk_buff *skb, | ||
1930 | struct net_device *dev) | ||
1931 | { | ||
1932 | return skb_is_nonlinear(skb) && | ||
1933 | ((skb_has_frags(skb) && !(dev->features & NETIF_F_FRAGLIST)) || | ||
1934 | (skb_shinfo(skb)->nr_frags && (!(dev->features & NETIF_F_SG) || | ||
1935 | illegal_highdma(dev, skb)))); | ||
1936 | } | ||
1937 | |||
1927 | int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev, | 1938 | int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev, |
1928 | struct netdev_queue *txq) | 1939 | struct netdev_queue *txq) |
1929 | { | 1940 | { |
@@ -1948,6 +1959,22 @@ int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev, | |||
1948 | goto out_kfree_skb; | 1959 | goto out_kfree_skb; |
1949 | if (skb->next) | 1960 | if (skb->next) |
1950 | goto gso; | 1961 | goto gso; |
1962 | } else { | ||
1963 | if (skb_needs_linearize(skb, dev) && | ||
1964 | __skb_linearize(skb)) | ||
1965 | goto out_kfree_skb; | ||
1966 | |||
1967 | /* If packet is not checksummed and device does not | ||
1968 | * support checksumming for this protocol, complete | ||
1969 | * checksumming here. | ||
1970 | */ | ||
1971 | if (skb->ip_summed == CHECKSUM_PARTIAL) { | ||
1972 | skb_set_transport_header(skb, skb->csum_start - | ||
1973 | skb_headroom(skb)); | ||
1974 | if (!dev_can_checksum(dev, skb) && | ||
1975 | skb_checksum_help(skb)) | ||
1976 | goto out_kfree_skb; | ||
1977 | } | ||
1951 | } | 1978 | } |
1952 | 1979 | ||
1953 | rc = ops->ndo_start_xmit(skb, dev); | 1980 | rc = ops->ndo_start_xmit(skb, dev); |
@@ -2063,14 +2090,24 @@ static inline int __dev_xmit_skb(struct sk_buff *skb, struct Qdisc *q, | |||
2063 | struct netdev_queue *txq) | 2090 | struct netdev_queue *txq) |
2064 | { | 2091 | { |
2065 | spinlock_t *root_lock = qdisc_lock(q); | 2092 | spinlock_t *root_lock = qdisc_lock(q); |
2093 | bool contended = qdisc_is_running(q); | ||
2066 | int rc; | 2094 | int rc; |
2067 | 2095 | ||
2096 | /* | ||
2097 | * Heuristic to force contended enqueues to serialize on a | ||
2098 | * separate lock before trying to get qdisc main lock. | ||
2099 | * This permits __QDISC_STATE_RUNNING owner to get the lock more often | ||
2100 | * and dequeue packets faster. | ||
2101 | */ | ||
2102 | if (unlikely(contended)) | ||
2103 | spin_lock(&q->busylock); | ||
2104 | |||
2068 | spin_lock(root_lock); | 2105 | spin_lock(root_lock); |
2069 | if (unlikely(test_bit(__QDISC_STATE_DEACTIVATED, &q->state))) { | 2106 | if (unlikely(test_bit(__QDISC_STATE_DEACTIVATED, &q->state))) { |
2070 | kfree_skb(skb); | 2107 | kfree_skb(skb); |
2071 | rc = NET_XMIT_DROP; | 2108 | rc = NET_XMIT_DROP; |
2072 | } else if ((q->flags & TCQ_F_CAN_BYPASS) && !qdisc_qlen(q) && | 2109 | } else if ((q->flags & TCQ_F_CAN_BYPASS) && !qdisc_qlen(q) && |
2073 | !test_and_set_bit(__QDISC_STATE_RUNNING, &q->state)) { | 2110 | qdisc_run_begin(q)) { |
2074 | /* | 2111 | /* |
2075 | * This is a work-conserving queue; there are no old skbs | 2112 | * This is a work-conserving queue; there are no old skbs |
2076 | * waiting to be sent out; and the qdisc is not running - | 2113 | * waiting to be sent out; and the qdisc is not running - |
@@ -2079,37 +2116,33 @@ static inline int __dev_xmit_skb(struct sk_buff *skb, struct Qdisc *q, | |||
2079 | if (!(dev->priv_flags & IFF_XMIT_DST_RELEASE)) | 2116 | if (!(dev->priv_flags & IFF_XMIT_DST_RELEASE)) |
2080 | skb_dst_force(skb); | 2117 | skb_dst_force(skb); |
2081 | __qdisc_update_bstats(q, skb->len); | 2118 | __qdisc_update_bstats(q, skb->len); |
2082 | if (sch_direct_xmit(skb, q, dev, txq, root_lock)) | 2119 | if (sch_direct_xmit(skb, q, dev, txq, root_lock)) { |
2120 | if (unlikely(contended)) { | ||
2121 | spin_unlock(&q->busylock); | ||
2122 | contended = false; | ||
2123 | } | ||
2083 | __qdisc_run(q); | 2124 | __qdisc_run(q); |
2084 | else | 2125 | } else |
2085 | clear_bit(__QDISC_STATE_RUNNING, &q->state); | 2126 | qdisc_run_end(q); |
2086 | 2127 | ||
2087 | rc = NET_XMIT_SUCCESS; | 2128 | rc = NET_XMIT_SUCCESS; |
2088 | } else { | 2129 | } else { |
2089 | skb_dst_force(skb); | 2130 | skb_dst_force(skb); |
2090 | rc = qdisc_enqueue_root(skb, q); | 2131 | rc = qdisc_enqueue_root(skb, q); |
2091 | qdisc_run(q); | 2132 | if (qdisc_run_begin(q)) { |
2133 | if (unlikely(contended)) { | ||
2134 | spin_unlock(&q->busylock); | ||
2135 | contended = false; | ||
2136 | } | ||
2137 | __qdisc_run(q); | ||
2138 | } | ||
2092 | } | 2139 | } |
2093 | spin_unlock(root_lock); | 2140 | spin_unlock(root_lock); |
2094 | 2141 | if (unlikely(contended)) | |
2142 | spin_unlock(&q->busylock); | ||
2095 | return rc; | 2143 | return rc; |
2096 | } | 2144 | } |
2097 | 2145 | ||
2098 | /* | ||
2099 | * Returns true if either: | ||
2100 | * 1. skb has frag_list and the device doesn't support FRAGLIST, or | ||
2101 | * 2. skb is fragmented and the device does not support SG, or if | ||
2102 | * at least one of fragments is in highmem and device does not | ||
2103 | * support DMA from it. | ||
2104 | */ | ||
2105 | static inline int skb_needs_linearize(struct sk_buff *skb, | ||
2106 | struct net_device *dev) | ||
2107 | { | ||
2108 | return (skb_has_frags(skb) && !(dev->features & NETIF_F_FRAGLIST)) || | ||
2109 | (skb_shinfo(skb)->nr_frags && (!(dev->features & NETIF_F_SG) || | ||
2110 | illegal_highdma(dev, skb))); | ||
2111 | } | ||
2112 | |||
2113 | /** | 2146 | /** |
2114 | * dev_queue_xmit - transmit a buffer | 2147 | * dev_queue_xmit - transmit a buffer |
2115 | * @skb: buffer to transmit | 2148 | * @skb: buffer to transmit |
@@ -2142,25 +2175,6 @@ int dev_queue_xmit(struct sk_buff *skb) | |||
2142 | struct Qdisc *q; | 2175 | struct Qdisc *q; |
2143 | int rc = -ENOMEM; | 2176 | int rc = -ENOMEM; |
2144 | 2177 | ||
2145 | /* GSO will handle the following emulations directly. */ | ||
2146 | if (netif_needs_gso(dev, skb)) | ||
2147 | goto gso; | ||
2148 | |||
2149 | /* Convert a paged skb to linear, if required */ | ||
2150 | if (skb_needs_linearize(skb, dev) && __skb_linearize(skb)) | ||
2151 | goto out_kfree_skb; | ||
2152 | |||
2153 | /* If packet is not checksummed and device does not support | ||
2154 | * checksumming for this protocol, complete checksumming here. | ||
2155 | */ | ||
2156 | if (skb->ip_summed == CHECKSUM_PARTIAL) { | ||
2157 | skb_set_transport_header(skb, skb->csum_start - | ||
2158 | skb_headroom(skb)); | ||
2159 | if (!dev_can_checksum(dev, skb) && skb_checksum_help(skb)) | ||
2160 | goto out_kfree_skb; | ||
2161 | } | ||
2162 | |||
2163 | gso: | ||
2164 | /* Disable soft irqs for various locks below. Also | 2178 | /* Disable soft irqs for various locks below. Also |
2165 | * stops preemption for RCU. | 2179 | * stops preemption for RCU. |
2166 | */ | 2180 | */ |
@@ -2219,7 +2233,6 @@ gso: | |||
2219 | rc = -ENETDOWN; | 2233 | rc = -ENETDOWN; |
2220 | rcu_read_unlock_bh(); | 2234 | rcu_read_unlock_bh(); |
2221 | 2235 | ||
2222 | out_kfree_skb: | ||
2223 | kfree_skb(skb); | 2236 | kfree_skb(skb); |
2224 | return rc; | 2237 | return rc; |
2225 | out: | 2238 | out: |
@@ -2604,70 +2617,14 @@ static inline int deliver_skb(struct sk_buff *skb, | |||
2604 | return pt_prev->func(skb, skb->dev, pt_prev, orig_dev); | 2617 | return pt_prev->func(skb, skb->dev, pt_prev, orig_dev); |
2605 | } | 2618 | } |
2606 | 2619 | ||
2607 | #if defined(CONFIG_BRIDGE) || defined (CONFIG_BRIDGE_MODULE) | 2620 | #if (defined(CONFIG_BRIDGE) || defined(CONFIG_BRIDGE_MODULE)) && \ |
2608 | 2621 | (defined(CONFIG_ATM_LANE) || defined(CONFIG_ATM_LANE_MODULE)) | |
2609 | #if defined(CONFIG_ATM_LANE) || defined(CONFIG_ATM_LANE_MODULE) | ||
2610 | /* This hook is defined here for ATM LANE */ | 2622 | /* This hook is defined here for ATM LANE */ |
2611 | int (*br_fdb_test_addr_hook)(struct net_device *dev, | 2623 | int (*br_fdb_test_addr_hook)(struct net_device *dev, |
2612 | unsigned char *addr) __read_mostly; | 2624 | unsigned char *addr) __read_mostly; |
2613 | EXPORT_SYMBOL_GPL(br_fdb_test_addr_hook); | 2625 | EXPORT_SYMBOL_GPL(br_fdb_test_addr_hook); |
2614 | #endif | 2626 | #endif |
2615 | 2627 | ||
2616 | /* | ||
2617 | * If bridge module is loaded call bridging hook. | ||
2618 | * returns NULL if packet was consumed. | ||
2619 | */ | ||
2620 | struct sk_buff *(*br_handle_frame_hook)(struct net_bridge_port *p, | ||
2621 | struct sk_buff *skb) __read_mostly; | ||
2622 | EXPORT_SYMBOL_GPL(br_handle_frame_hook); | ||
2623 | |||
2624 | static inline struct sk_buff *handle_bridge(struct sk_buff *skb, | ||
2625 | struct packet_type **pt_prev, int *ret, | ||
2626 | struct net_device *orig_dev) | ||
2627 | { | ||
2628 | struct net_bridge_port *port; | ||
2629 | |||
2630 | if (skb->pkt_type == PACKET_LOOPBACK || | ||
2631 | (port = rcu_dereference(skb->dev->br_port)) == NULL) | ||
2632 | return skb; | ||
2633 | |||
2634 | if (*pt_prev) { | ||
2635 | *ret = deliver_skb(skb, *pt_prev, orig_dev); | ||
2636 | *pt_prev = NULL; | ||
2637 | } | ||
2638 | |||
2639 | return br_handle_frame_hook(port, skb); | ||
2640 | } | ||
2641 | #else | ||
2642 | #define handle_bridge(skb, pt_prev, ret, orig_dev) (skb) | ||
2643 | #endif | ||
2644 | |||
2645 | #if defined(CONFIG_MACVLAN) || defined(CONFIG_MACVLAN_MODULE) | ||
2646 | struct sk_buff *(*macvlan_handle_frame_hook)(struct macvlan_port *p, | ||
2647 | struct sk_buff *skb) __read_mostly; | ||
2648 | EXPORT_SYMBOL_GPL(macvlan_handle_frame_hook); | ||
2649 | |||
2650 | static inline struct sk_buff *handle_macvlan(struct sk_buff *skb, | ||
2651 | struct packet_type **pt_prev, | ||
2652 | int *ret, | ||
2653 | struct net_device *orig_dev) | ||
2654 | { | ||
2655 | struct macvlan_port *port; | ||
2656 | |||
2657 | port = rcu_dereference(skb->dev->macvlan_port); | ||
2658 | if (!port) | ||
2659 | return skb; | ||
2660 | |||
2661 | if (*pt_prev) { | ||
2662 | *ret = deliver_skb(skb, *pt_prev, orig_dev); | ||
2663 | *pt_prev = NULL; | ||
2664 | } | ||
2665 | return macvlan_handle_frame_hook(port, skb); | ||
2666 | } | ||
2667 | #else | ||
2668 | #define handle_macvlan(skb, pt_prev, ret, orig_dev) (skb) | ||
2669 | #endif | ||
2670 | |||
2671 | #ifdef CONFIG_NET_CLS_ACT | 2628 | #ifdef CONFIG_NET_CLS_ACT |
2672 | /* TODO: Maybe we should just force sch_ingress to be compiled in | 2629 | /* TODO: Maybe we should just force sch_ingress to be compiled in |
2673 | * when CONFIG_NET_CLS_ACT is? otherwise some useless instructions | 2630 | * when CONFIG_NET_CLS_ACT is? otherwise some useless instructions |
@@ -2685,10 +2642,10 @@ static int ing_filter(struct sk_buff *skb) | |||
2685 | int result = TC_ACT_OK; | 2642 | int result = TC_ACT_OK; |
2686 | struct Qdisc *q; | 2643 | struct Qdisc *q; |
2687 | 2644 | ||
2688 | if (MAX_RED_LOOP < ttl++) { | 2645 | if (unlikely(MAX_RED_LOOP < ttl++)) { |
2689 | printk(KERN_WARNING | 2646 | if (net_ratelimit()) |
2690 | "Redir loop detected Dropping packet (%d->%d)\n", | 2647 | pr_warning( "Redir loop detected Dropping packet (%d->%d)\n", |
2691 | skb->skb_iif, dev->ifindex); | 2648 | skb->skb_iif, dev->ifindex); |
2692 | return TC_ACT_SHOT; | 2649 | return TC_ACT_SHOT; |
2693 | } | 2650 | } |
2694 | 2651 | ||
@@ -2718,9 +2675,6 @@ static inline struct sk_buff *handle_ing(struct sk_buff *skb, | |||
2718 | if (*pt_prev) { | 2675 | if (*pt_prev) { |
2719 | *ret = deliver_skb(skb, *pt_prev, orig_dev); | 2676 | *ret = deliver_skb(skb, *pt_prev, orig_dev); |
2720 | *pt_prev = NULL; | 2677 | *pt_prev = NULL; |
2721 | } else { | ||
2722 | /* Huh? Why does turning on AF_PACKET affect this? */ | ||
2723 | skb->tc_verd = SET_TC_OK2MUNGE(skb->tc_verd); | ||
2724 | } | 2678 | } |
2725 | 2679 | ||
2726 | switch (ing_filter(skb)) { | 2680 | switch (ing_filter(skb)) { |
@@ -2763,6 +2717,51 @@ void netif_nit_deliver(struct sk_buff *skb) | |||
2763 | rcu_read_unlock(); | 2717 | rcu_read_unlock(); |
2764 | } | 2718 | } |
2765 | 2719 | ||
2720 | /** | ||
2721 | * netdev_rx_handler_register - register receive handler | ||
2722 | * @dev: device to register a handler for | ||
2723 | * @rx_handler: receive handler to register | ||
2724 | * @rx_handler_data: data pointer that is used by rx handler | ||
2725 | * | ||
2726 | * Register a receive hander for a device. This handler will then be | ||
2727 | * called from __netif_receive_skb. A negative errno code is returned | ||
2728 | * on a failure. | ||
2729 | * | ||
2730 | * The caller must hold the rtnl_mutex. | ||
2731 | */ | ||
2732 | int netdev_rx_handler_register(struct net_device *dev, | ||
2733 | rx_handler_func_t *rx_handler, | ||
2734 | void *rx_handler_data) | ||
2735 | { | ||
2736 | ASSERT_RTNL(); | ||
2737 | |||
2738 | if (dev->rx_handler) | ||
2739 | return -EBUSY; | ||
2740 | |||
2741 | rcu_assign_pointer(dev->rx_handler_data, rx_handler_data); | ||
2742 | rcu_assign_pointer(dev->rx_handler, rx_handler); | ||
2743 | |||
2744 | return 0; | ||
2745 | } | ||
2746 | EXPORT_SYMBOL_GPL(netdev_rx_handler_register); | ||
2747 | |||
2748 | /** | ||
2749 | * netdev_rx_handler_unregister - unregister receive handler | ||
2750 | * @dev: device to unregister a handler from | ||
2751 | * | ||
2752 | * Unregister a receive hander from a device. | ||
2753 | * | ||
2754 | * The caller must hold the rtnl_mutex. | ||
2755 | */ | ||
2756 | void netdev_rx_handler_unregister(struct net_device *dev) | ||
2757 | { | ||
2758 | |||
2759 | ASSERT_RTNL(); | ||
2760 | rcu_assign_pointer(dev->rx_handler, NULL); | ||
2761 | rcu_assign_pointer(dev->rx_handler_data, NULL); | ||
2762 | } | ||
2763 | EXPORT_SYMBOL_GPL(netdev_rx_handler_unregister); | ||
2764 | |||
2766 | static inline void skb_bond_set_mac_by_master(struct sk_buff *skb, | 2765 | static inline void skb_bond_set_mac_by_master(struct sk_buff *skb, |
2767 | struct net_device *master) | 2766 | struct net_device *master) |
2768 | { | 2767 | { |
@@ -2784,7 +2783,8 @@ int __skb_bond_should_drop(struct sk_buff *skb, struct net_device *master) | |||
2784 | if (master->priv_flags & IFF_MASTER_ARPMON) | 2783 | if (master->priv_flags & IFF_MASTER_ARPMON) |
2785 | dev->last_rx = jiffies; | 2784 | dev->last_rx = jiffies; |
2786 | 2785 | ||
2787 | if ((master->priv_flags & IFF_MASTER_ALB) && master->br_port) { | 2786 | if ((master->priv_flags & IFF_MASTER_ALB) && |
2787 | (master->priv_flags & IFF_BRIDGE_PORT)) { | ||
2788 | /* Do address unmangle. The local destination address | 2788 | /* Do address unmangle. The local destination address |
2789 | * will be always the one master has. Provides the right | 2789 | * will be always the one master has. Provides the right |
2790 | * functionality in a bridge. | 2790 | * functionality in a bridge. |
@@ -2815,6 +2815,7 @@ EXPORT_SYMBOL(__skb_bond_should_drop); | |||
2815 | static int __netif_receive_skb(struct sk_buff *skb) | 2815 | static int __netif_receive_skb(struct sk_buff *skb) |
2816 | { | 2816 | { |
2817 | struct packet_type *ptype, *pt_prev; | 2817 | struct packet_type *ptype, *pt_prev; |
2818 | rx_handler_func_t *rx_handler; | ||
2818 | struct net_device *orig_dev; | 2819 | struct net_device *orig_dev; |
2819 | struct net_device *master; | 2820 | struct net_device *master; |
2820 | struct net_device *null_or_orig; | 2821 | struct net_device *null_or_orig; |
@@ -2856,8 +2857,7 @@ static int __netif_receive_skb(struct sk_buff *skb) | |||
2856 | skb->dev = master; | 2857 | skb->dev = master; |
2857 | } | 2858 | } |
2858 | 2859 | ||
2859 | __get_cpu_var(softnet_data).processed++; | 2860 | __this_cpu_inc(softnet_data.processed); |
2860 | |||
2861 | skb_reset_network_header(skb); | 2861 | skb_reset_network_header(skb); |
2862 | skb_reset_transport_header(skb); | 2862 | skb_reset_transport_header(skb); |
2863 | skb->mac_len = skb->network_header - skb->mac_header; | 2863 | skb->mac_len = skb->network_header - skb->mac_header; |
@@ -2889,12 +2889,17 @@ static int __netif_receive_skb(struct sk_buff *skb) | |||
2889 | ncls: | 2889 | ncls: |
2890 | #endif | 2890 | #endif |
2891 | 2891 | ||
2892 | skb = handle_bridge(skb, &pt_prev, &ret, orig_dev); | 2892 | /* Handle special case of bridge or macvlan */ |
2893 | if (!skb) | 2893 | rx_handler = rcu_dereference(skb->dev->rx_handler); |
2894 | goto out; | 2894 | if (rx_handler) { |
2895 | skb = handle_macvlan(skb, &pt_prev, &ret, orig_dev); | 2895 | if (pt_prev) { |
2896 | if (!skb) | 2896 | ret = deliver_skb(skb, pt_prev, orig_dev); |
2897 | goto out; | 2897 | pt_prev = NULL; |
2898 | } | ||
2899 | skb = rx_handler(skb); | ||
2900 | if (!skb) | ||
2901 | goto out; | ||
2902 | } | ||
2898 | 2903 | ||
2899 | /* | 2904 | /* |
2900 | * Make sure frames received on VLAN interfaces stacked on | 2905 | * Make sure frames received on VLAN interfaces stacked on |
@@ -2955,6 +2960,9 @@ int netif_receive_skb(struct sk_buff *skb) | |||
2955 | if (netdev_tstamp_prequeue) | 2960 | if (netdev_tstamp_prequeue) |
2956 | net_timestamp_check(skb); | 2961 | net_timestamp_check(skb); |
2957 | 2962 | ||
2963 | if (skb_defer_rx_timestamp(skb)) | ||
2964 | return NET_RX_SUCCESS; | ||
2965 | |||
2958 | #ifdef CONFIG_RPS | 2966 | #ifdef CONFIG_RPS |
2959 | { | 2967 | { |
2960 | struct rps_dev_flow voidflow, *rflow = &voidflow; | 2968 | struct rps_dev_flow voidflow, *rflow = &voidflow; |
@@ -3719,10 +3727,11 @@ void dev_seq_stop(struct seq_file *seq, void *v) | |||
3719 | 3727 | ||
3720 | static void dev_seq_printf_stats(struct seq_file *seq, struct net_device *dev) | 3728 | static void dev_seq_printf_stats(struct seq_file *seq, struct net_device *dev) |
3721 | { | 3729 | { |
3722 | const struct net_device_stats *stats = dev_get_stats(dev); | 3730 | struct rtnl_link_stats64 temp; |
3731 | const struct rtnl_link_stats64 *stats = dev_get_stats(dev, &temp); | ||
3723 | 3732 | ||
3724 | seq_printf(seq, "%6s: %7lu %7lu %4lu %4lu %4lu %5lu %10lu %9lu " | 3733 | seq_printf(seq, "%6s: %7llu %7llu %4llu %4llu %4llu %5llu %10llu %9llu " |
3725 | "%8lu %7lu %4lu %4lu %4lu %5lu %7lu %10lu\n", | 3734 | "%8llu %7llu %4llu %4llu %4llu %5llu %7llu %10llu\n", |
3726 | dev->name, stats->rx_bytes, stats->rx_packets, | 3735 | dev->name, stats->rx_bytes, stats->rx_packets, |
3727 | stats->rx_errors, | 3736 | stats->rx_errors, |
3728 | stats->rx_dropped + stats->rx_missed_errors, | 3737 | stats->rx_dropped + stats->rx_missed_errors, |
@@ -5271,20 +5280,22 @@ void netdev_run_todo(void) | |||
5271 | /** | 5280 | /** |
5272 | * dev_txq_stats_fold - fold tx_queues stats | 5281 | * dev_txq_stats_fold - fold tx_queues stats |
5273 | * @dev: device to get statistics from | 5282 | * @dev: device to get statistics from |
5274 | * @stats: struct net_device_stats to hold results | 5283 | * @stats: struct rtnl_link_stats64 to hold results |
5275 | */ | 5284 | */ |
5276 | void dev_txq_stats_fold(const struct net_device *dev, | 5285 | void dev_txq_stats_fold(const struct net_device *dev, |
5277 | struct net_device_stats *stats) | 5286 | struct rtnl_link_stats64 *stats) |
5278 | { | 5287 | { |
5279 | unsigned long tx_bytes = 0, tx_packets = 0, tx_dropped = 0; | 5288 | u64 tx_bytes = 0, tx_packets = 0, tx_dropped = 0; |
5280 | unsigned int i; | 5289 | unsigned int i; |
5281 | struct netdev_queue *txq; | 5290 | struct netdev_queue *txq; |
5282 | 5291 | ||
5283 | for (i = 0; i < dev->num_tx_queues; i++) { | 5292 | for (i = 0; i < dev->num_tx_queues; i++) { |
5284 | txq = netdev_get_tx_queue(dev, i); | 5293 | txq = netdev_get_tx_queue(dev, i); |
5294 | spin_lock_bh(&txq->_xmit_lock); | ||
5285 | tx_bytes += txq->tx_bytes; | 5295 | tx_bytes += txq->tx_bytes; |
5286 | tx_packets += txq->tx_packets; | 5296 | tx_packets += txq->tx_packets; |
5287 | tx_dropped += txq->tx_dropped; | 5297 | tx_dropped += txq->tx_dropped; |
5298 | spin_unlock_bh(&txq->_xmit_lock); | ||
5288 | } | 5299 | } |
5289 | if (tx_bytes || tx_packets || tx_dropped) { | 5300 | if (tx_bytes || tx_packets || tx_dropped) { |
5290 | stats->tx_bytes = tx_bytes; | 5301 | stats->tx_bytes = tx_bytes; |
@@ -5294,23 +5305,53 @@ void dev_txq_stats_fold(const struct net_device *dev, | |||
5294 | } | 5305 | } |
5295 | EXPORT_SYMBOL(dev_txq_stats_fold); | 5306 | EXPORT_SYMBOL(dev_txq_stats_fold); |
5296 | 5307 | ||
5308 | /* Convert net_device_stats to rtnl_link_stats64. They have the same | ||
5309 | * fields in the same order, with only the type differing. | ||
5310 | */ | ||
5311 | static void netdev_stats_to_stats64(struct rtnl_link_stats64 *stats64, | ||
5312 | const struct net_device_stats *netdev_stats) | ||
5313 | { | ||
5314 | #if BITS_PER_LONG == 64 | ||
5315 | BUILD_BUG_ON(sizeof(*stats64) != sizeof(*netdev_stats)); | ||
5316 | memcpy(stats64, netdev_stats, sizeof(*stats64)); | ||
5317 | #else | ||
5318 | size_t i, n = sizeof(*stats64) / sizeof(u64); | ||
5319 | const unsigned long *src = (const unsigned long *)netdev_stats; | ||
5320 | u64 *dst = (u64 *)stats64; | ||
5321 | |||
5322 | BUILD_BUG_ON(sizeof(*netdev_stats) / sizeof(unsigned long) != | ||
5323 | sizeof(*stats64) / sizeof(u64)); | ||
5324 | for (i = 0; i < n; i++) | ||
5325 | dst[i] = src[i]; | ||
5326 | #endif | ||
5327 | } | ||
5328 | |||
5297 | /** | 5329 | /** |
5298 | * dev_get_stats - get network device statistics | 5330 | * dev_get_stats - get network device statistics |
5299 | * @dev: device to get statistics from | 5331 | * @dev: device to get statistics from |
5332 | * @storage: place to store stats | ||
5300 | * | 5333 | * |
5301 | * Get network statistics from device. The device driver may provide | 5334 | * Get network statistics from device. Return @storage. |
5302 | * its own method by setting dev->netdev_ops->get_stats; otherwise | 5335 | * The device driver may provide its own method by setting |
5303 | * the internal statistics structure is used. | 5336 | * dev->netdev_ops->get_stats64 or dev->netdev_ops->get_stats; |
5337 | * otherwise the internal statistics structure is used. | ||
5304 | */ | 5338 | */ |
5305 | const struct net_device_stats *dev_get_stats(struct net_device *dev) | 5339 | struct rtnl_link_stats64 *dev_get_stats(struct net_device *dev, |
5340 | struct rtnl_link_stats64 *storage) | ||
5306 | { | 5341 | { |
5307 | const struct net_device_ops *ops = dev->netdev_ops; | 5342 | const struct net_device_ops *ops = dev->netdev_ops; |
5308 | 5343 | ||
5309 | if (ops->ndo_get_stats) | 5344 | if (ops->ndo_get_stats64) { |
5310 | return ops->ndo_get_stats(dev); | 5345 | memset(storage, 0, sizeof(*storage)); |
5311 | 5346 | return ops->ndo_get_stats64(dev, storage); | |
5312 | dev_txq_stats_fold(dev, &dev->stats); | 5347 | } |
5313 | return &dev->stats; | 5348 | if (ops->ndo_get_stats) { |
5349 | netdev_stats_to_stats64(storage, ops->ndo_get_stats(dev)); | ||
5350 | return storage; | ||
5351 | } | ||
5352 | netdev_stats_to_stats64(storage, &dev->stats); | ||
5353 | dev_txq_stats_fold(dev, storage); | ||
5354 | return storage; | ||
5314 | } | 5355 | } |
5315 | EXPORT_SYMBOL(dev_get_stats); | 5356 | EXPORT_SYMBOL(dev_get_stats); |
5316 | 5357 | ||
@@ -5815,6 +5856,68 @@ char *netdev_drivername(const struct net_device *dev, char *buffer, int len) | |||
5815 | return buffer; | 5856 | return buffer; |
5816 | } | 5857 | } |
5817 | 5858 | ||
5859 | static int __netdev_printk(const char *level, const struct net_device *dev, | ||
5860 | struct va_format *vaf) | ||
5861 | { | ||
5862 | int r; | ||
5863 | |||
5864 | if (dev && dev->dev.parent) | ||
5865 | r = dev_printk(level, dev->dev.parent, "%s: %pV", | ||
5866 | netdev_name(dev), vaf); | ||
5867 | else if (dev) | ||
5868 | r = printk("%s%s: %pV", level, netdev_name(dev), vaf); | ||
5869 | else | ||
5870 | r = printk("%s(NULL net_device): %pV", level, vaf); | ||
5871 | |||
5872 | return r; | ||
5873 | } | ||
5874 | |||
5875 | int netdev_printk(const char *level, const struct net_device *dev, | ||
5876 | const char *format, ...) | ||
5877 | { | ||
5878 | struct va_format vaf; | ||
5879 | va_list args; | ||
5880 | int r; | ||
5881 | |||
5882 | va_start(args, format); | ||
5883 | |||
5884 | vaf.fmt = format; | ||
5885 | vaf.va = &args; | ||
5886 | |||
5887 | r = __netdev_printk(level, dev, &vaf); | ||
5888 | va_end(args); | ||
5889 | |||
5890 | return r; | ||
5891 | } | ||
5892 | EXPORT_SYMBOL(netdev_printk); | ||
5893 | |||
5894 | #define define_netdev_printk_level(func, level) \ | ||
5895 | int func(const struct net_device *dev, const char *fmt, ...) \ | ||
5896 | { \ | ||
5897 | int r; \ | ||
5898 | struct va_format vaf; \ | ||
5899 | va_list args; \ | ||
5900 | \ | ||
5901 | va_start(args, fmt); \ | ||
5902 | \ | ||
5903 | vaf.fmt = fmt; \ | ||
5904 | vaf.va = &args; \ | ||
5905 | \ | ||
5906 | r = __netdev_printk(level, dev, &vaf); \ | ||
5907 | va_end(args); \ | ||
5908 | \ | ||
5909 | return r; \ | ||
5910 | } \ | ||
5911 | EXPORT_SYMBOL(func); | ||
5912 | |||
5913 | define_netdev_printk_level(netdev_emerg, KERN_EMERG); | ||
5914 | define_netdev_printk_level(netdev_alert, KERN_ALERT); | ||
5915 | define_netdev_printk_level(netdev_crit, KERN_CRIT); | ||
5916 | define_netdev_printk_level(netdev_err, KERN_ERR); | ||
5917 | define_netdev_printk_level(netdev_warn, KERN_WARNING); | ||
5918 | define_netdev_printk_level(netdev_notice, KERN_NOTICE); | ||
5919 | define_netdev_printk_level(netdev_info, KERN_INFO); | ||
5920 | |||
5818 | static void __net_exit netdev_exit(struct net *net) | 5921 | static void __net_exit netdev_exit(struct net *net) |
5819 | { | 5922 | { |
5820 | kfree(net->dev_name_head); | 5923 | kfree(net->dev_name_head); |