diff options
Diffstat (limited to 'net/core/dev.c')
-rw-r--r-- | net/core/dev.c | 72 |
1 files changed, 38 insertions, 34 deletions
diff --git a/net/core/dev.c b/net/core/dev.c index 8d13a9b9f1df..600bb23c4c2e 100644 --- a/net/core/dev.c +++ b/net/core/dev.c | |||
@@ -1796,7 +1796,7 @@ gso: | |||
1796 | skb->tc_verd = SET_TC_AT(skb->tc_verd,AT_EGRESS); | 1796 | skb->tc_verd = SET_TC_AT(skb->tc_verd,AT_EGRESS); |
1797 | #endif | 1797 | #endif |
1798 | if (q->enqueue) { | 1798 | if (q->enqueue) { |
1799 | spinlock_t *root_lock = qdisc_root_lock(q); | 1799 | spinlock_t *root_lock = qdisc_lock(q); |
1800 | 1800 | ||
1801 | spin_lock(root_lock); | 1801 | spin_lock(root_lock); |
1802 | 1802 | ||
@@ -1805,7 +1805,6 @@ gso: | |||
1805 | 1805 | ||
1806 | spin_unlock(root_lock); | 1806 | spin_unlock(root_lock); |
1807 | 1807 | ||
1808 | rc = rc == NET_XMIT_BYPASS ? NET_XMIT_SUCCESS : rc; | ||
1809 | goto out; | 1808 | goto out; |
1810 | } | 1809 | } |
1811 | 1810 | ||
@@ -1909,7 +1908,6 @@ int netif_rx(struct sk_buff *skb) | |||
1909 | if (queue->input_pkt_queue.qlen <= netdev_max_backlog) { | 1908 | if (queue->input_pkt_queue.qlen <= netdev_max_backlog) { |
1910 | if (queue->input_pkt_queue.qlen) { | 1909 | if (queue->input_pkt_queue.qlen) { |
1911 | enqueue: | 1910 | enqueue: |
1912 | dev_hold(skb->dev); | ||
1913 | __skb_queue_tail(&queue->input_pkt_queue, skb); | 1911 | __skb_queue_tail(&queue->input_pkt_queue, skb); |
1914 | local_irq_restore(flags); | 1912 | local_irq_restore(flags); |
1915 | return NET_RX_SUCCESS; | 1913 | return NET_RX_SUCCESS; |
@@ -1941,22 +1939,6 @@ int netif_rx_ni(struct sk_buff *skb) | |||
1941 | 1939 | ||
1942 | EXPORT_SYMBOL(netif_rx_ni); | 1940 | EXPORT_SYMBOL(netif_rx_ni); |
1943 | 1941 | ||
1944 | static inline struct net_device *skb_bond(struct sk_buff *skb) | ||
1945 | { | ||
1946 | struct net_device *dev = skb->dev; | ||
1947 | |||
1948 | if (dev->master) { | ||
1949 | if (skb_bond_should_drop(skb)) { | ||
1950 | kfree_skb(skb); | ||
1951 | return NULL; | ||
1952 | } | ||
1953 | skb->dev = dev->master; | ||
1954 | } | ||
1955 | |||
1956 | return dev; | ||
1957 | } | ||
1958 | |||
1959 | |||
1960 | static void net_tx_action(struct softirq_action *h) | 1942 | static void net_tx_action(struct softirq_action *h) |
1961 | { | 1943 | { |
1962 | struct softnet_data *sd = &__get_cpu_var(softnet_data); | 1944 | struct softnet_data *sd = &__get_cpu_var(softnet_data); |
@@ -1995,7 +1977,7 @@ static void net_tx_action(struct softirq_action *h) | |||
1995 | smp_mb__before_clear_bit(); | 1977 | smp_mb__before_clear_bit(); |
1996 | clear_bit(__QDISC_STATE_SCHED, &q->state); | 1978 | clear_bit(__QDISC_STATE_SCHED, &q->state); |
1997 | 1979 | ||
1998 | root_lock = qdisc_root_lock(q); | 1980 | root_lock = qdisc_lock(q); |
1999 | if (spin_trylock(root_lock)) { | 1981 | if (spin_trylock(root_lock)) { |
2000 | qdisc_run(q); | 1982 | qdisc_run(q); |
2001 | spin_unlock(root_lock); | 1983 | spin_unlock(root_lock); |
@@ -2100,7 +2082,7 @@ static int ing_filter(struct sk_buff *skb) | |||
2100 | rxq = &dev->rx_queue; | 2082 | rxq = &dev->rx_queue; |
2101 | 2083 | ||
2102 | q = rxq->qdisc; | 2084 | q = rxq->qdisc; |
2103 | if (q) { | 2085 | if (q != &noop_qdisc) { |
2104 | spin_lock(qdisc_lock(q)); | 2086 | spin_lock(qdisc_lock(q)); |
2105 | result = qdisc_enqueue_root(skb, q); | 2087 | result = qdisc_enqueue_root(skb, q); |
2106 | spin_unlock(qdisc_lock(q)); | 2088 | spin_unlock(qdisc_lock(q)); |
@@ -2113,7 +2095,7 @@ static inline struct sk_buff *handle_ing(struct sk_buff *skb, | |||
2113 | struct packet_type **pt_prev, | 2095 | struct packet_type **pt_prev, |
2114 | int *ret, struct net_device *orig_dev) | 2096 | int *ret, struct net_device *orig_dev) |
2115 | { | 2097 | { |
2116 | if (!skb->dev->rx_queue.qdisc) | 2098 | if (skb->dev->rx_queue.qdisc == &noop_qdisc) |
2117 | goto out; | 2099 | goto out; |
2118 | 2100 | ||
2119 | if (*pt_prev) { | 2101 | if (*pt_prev) { |
@@ -2183,6 +2165,7 @@ int netif_receive_skb(struct sk_buff *skb) | |||
2183 | { | 2165 | { |
2184 | struct packet_type *ptype, *pt_prev; | 2166 | struct packet_type *ptype, *pt_prev; |
2185 | struct net_device *orig_dev; | 2167 | struct net_device *orig_dev; |
2168 | struct net_device *null_or_orig; | ||
2186 | int ret = NET_RX_DROP; | 2169 | int ret = NET_RX_DROP; |
2187 | __be16 type; | 2170 | __be16 type; |
2188 | 2171 | ||
@@ -2196,10 +2179,14 @@ int netif_receive_skb(struct sk_buff *skb) | |||
2196 | if (!skb->iif) | 2179 | if (!skb->iif) |
2197 | skb->iif = skb->dev->ifindex; | 2180 | skb->iif = skb->dev->ifindex; |
2198 | 2181 | ||
2199 | orig_dev = skb_bond(skb); | 2182 | null_or_orig = NULL; |
2200 | 2183 | orig_dev = skb->dev; | |
2201 | if (!orig_dev) | 2184 | if (orig_dev->master) { |
2202 | return NET_RX_DROP; | 2185 | if (skb_bond_should_drop(skb)) |
2186 | null_or_orig = orig_dev; /* deliver only exact match */ | ||
2187 | else | ||
2188 | skb->dev = orig_dev->master; | ||
2189 | } | ||
2203 | 2190 | ||
2204 | __get_cpu_var(netdev_rx_stat).total++; | 2191 | __get_cpu_var(netdev_rx_stat).total++; |
2205 | 2192 | ||
@@ -2223,7 +2210,8 @@ int netif_receive_skb(struct sk_buff *skb) | |||
2223 | #endif | 2210 | #endif |
2224 | 2211 | ||
2225 | list_for_each_entry_rcu(ptype, &ptype_all, list) { | 2212 | list_for_each_entry_rcu(ptype, &ptype_all, list) { |
2226 | if (!ptype->dev || ptype->dev == skb->dev) { | 2213 | if (ptype->dev == null_or_orig || ptype->dev == skb->dev || |
2214 | ptype->dev == orig_dev) { | ||
2227 | if (pt_prev) | 2215 | if (pt_prev) |
2228 | ret = deliver_skb(skb, pt_prev, orig_dev); | 2216 | ret = deliver_skb(skb, pt_prev, orig_dev); |
2229 | pt_prev = ptype; | 2217 | pt_prev = ptype; |
@@ -2248,7 +2236,8 @@ ncls: | |||
2248 | list_for_each_entry_rcu(ptype, | 2236 | list_for_each_entry_rcu(ptype, |
2249 | &ptype_base[ntohs(type) & PTYPE_HASH_MASK], list) { | 2237 | &ptype_base[ntohs(type) & PTYPE_HASH_MASK], list) { |
2250 | if (ptype->type == type && | 2238 | if (ptype->type == type && |
2251 | (!ptype->dev || ptype->dev == skb->dev)) { | 2239 | (ptype->dev == null_or_orig || ptype->dev == skb->dev || |
2240 | ptype->dev == orig_dev)) { | ||
2252 | if (pt_prev) | 2241 | if (pt_prev) |
2253 | ret = deliver_skb(skb, pt_prev, orig_dev); | 2242 | ret = deliver_skb(skb, pt_prev, orig_dev); |
2254 | pt_prev = ptype; | 2243 | pt_prev = ptype; |
@@ -2270,6 +2259,20 @@ out: | |||
2270 | return ret; | 2259 | return ret; |
2271 | } | 2260 | } |
2272 | 2261 | ||
2262 | /* Network device is going away, flush any packets still pending */ | ||
2263 | static void flush_backlog(void *arg) | ||
2264 | { | ||
2265 | struct net_device *dev = arg; | ||
2266 | struct softnet_data *queue = &__get_cpu_var(softnet_data); | ||
2267 | struct sk_buff *skb, *tmp; | ||
2268 | |||
2269 | skb_queue_walk_safe(&queue->input_pkt_queue, skb, tmp) | ||
2270 | if (skb->dev == dev) { | ||
2271 | __skb_unlink(skb, &queue->input_pkt_queue); | ||
2272 | kfree_skb(skb); | ||
2273 | } | ||
2274 | } | ||
2275 | |||
2273 | static int process_backlog(struct napi_struct *napi, int quota) | 2276 | static int process_backlog(struct napi_struct *napi, int quota) |
2274 | { | 2277 | { |
2275 | int work = 0; | 2278 | int work = 0; |
@@ -2279,7 +2282,6 @@ static int process_backlog(struct napi_struct *napi, int quota) | |||
2279 | napi->weight = weight_p; | 2282 | napi->weight = weight_p; |
2280 | do { | 2283 | do { |
2281 | struct sk_buff *skb; | 2284 | struct sk_buff *skb; |
2282 | struct net_device *dev; | ||
2283 | 2285 | ||
2284 | local_irq_disable(); | 2286 | local_irq_disable(); |
2285 | skb = __skb_dequeue(&queue->input_pkt_queue); | 2287 | skb = __skb_dequeue(&queue->input_pkt_queue); |
@@ -2288,14 +2290,9 @@ static int process_backlog(struct napi_struct *napi, int quota) | |||
2288 | local_irq_enable(); | 2290 | local_irq_enable(); |
2289 | break; | 2291 | break; |
2290 | } | 2292 | } |
2291 | |||
2292 | local_irq_enable(); | 2293 | local_irq_enable(); |
2293 | 2294 | ||
2294 | dev = skb->dev; | ||
2295 | |||
2296 | netif_receive_skb(skb); | 2295 | netif_receive_skb(skb); |
2297 | |||
2298 | dev_put(dev); | ||
2299 | } while (++work < quota && jiffies == start_time); | 2296 | } while (++work < quota && jiffies == start_time); |
2300 | 2297 | ||
2301 | return work; | 2298 | return work; |
@@ -3988,6 +3985,10 @@ int register_netdevice(struct net_device *dev) | |||
3988 | } | 3985 | } |
3989 | } | 3986 | } |
3990 | 3987 | ||
3988 | /* Enable software GSO if SG is supported. */ | ||
3989 | if (dev->features & NETIF_F_SG) | ||
3990 | dev->features |= NETIF_F_GSO; | ||
3991 | |||
3991 | netdev_initialize_kobject(dev); | 3992 | netdev_initialize_kobject(dev); |
3992 | ret = netdev_register_kobject(dev); | 3993 | ret = netdev_register_kobject(dev); |
3993 | if (ret) | 3994 | if (ret) |
@@ -4165,6 +4166,8 @@ void netdev_run_todo(void) | |||
4165 | 4166 | ||
4166 | dev->reg_state = NETREG_UNREGISTERED; | 4167 | dev->reg_state = NETREG_UNREGISTERED; |
4167 | 4168 | ||
4169 | on_each_cpu(flush_backlog, dev, 1); | ||
4170 | |||
4168 | netdev_wait_allrefs(dev); | 4171 | netdev_wait_allrefs(dev); |
4169 | 4172 | ||
4170 | /* paranoia */ | 4173 | /* paranoia */ |
@@ -4200,6 +4203,7 @@ static void netdev_init_queues(struct net_device *dev) | |||
4200 | { | 4203 | { |
4201 | netdev_init_one_queue(dev, &dev->rx_queue, NULL); | 4204 | netdev_init_one_queue(dev, &dev->rx_queue, NULL); |
4202 | netdev_for_each_tx_queue(dev, netdev_init_one_queue, NULL); | 4205 | netdev_for_each_tx_queue(dev, netdev_init_one_queue, NULL); |
4206 | spin_lock_init(&dev->tx_global_lock); | ||
4203 | } | 4207 | } |
4204 | 4208 | ||
4205 | /** | 4209 | /** |