aboutsummaryrefslogtreecommitdiffstats
path: root/net/core/dev.c
diff options
context:
space:
mode:
Diffstat (limited to 'net/core/dev.c')
-rw-r--r--net/core/dev.c121
1 files changed, 68 insertions, 53 deletions
diff --git a/net/core/dev.c b/net/core/dev.c
index 8d13a9b9f1df..60c51f765887 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -1339,19 +1339,23 @@ static void dev_queue_xmit_nit(struct sk_buff *skb, struct net_device *dev)
1339} 1339}
1340 1340
1341 1341
1342void __netif_schedule(struct Qdisc *q) 1342static inline void __netif_reschedule(struct Qdisc *q)
1343{ 1343{
1344 if (!test_and_set_bit(__QDISC_STATE_SCHED, &q->state)) { 1344 struct softnet_data *sd;
1345 struct softnet_data *sd; 1345 unsigned long flags;
1346 unsigned long flags;
1347 1346
1348 local_irq_save(flags); 1347 local_irq_save(flags);
1349 sd = &__get_cpu_var(softnet_data); 1348 sd = &__get_cpu_var(softnet_data);
1350 q->next_sched = sd->output_queue; 1349 q->next_sched = sd->output_queue;
1351 sd->output_queue = q; 1350 sd->output_queue = q;
1352 raise_softirq_irqoff(NET_TX_SOFTIRQ); 1351 raise_softirq_irqoff(NET_TX_SOFTIRQ);
1353 local_irq_restore(flags); 1352 local_irq_restore(flags);
1354 } 1353}
1354
1355void __netif_schedule(struct Qdisc *q)
1356{
1357 if (!test_and_set_bit(__QDISC_STATE_SCHED, &q->state))
1358 __netif_reschedule(q);
1355} 1359}
1356EXPORT_SYMBOL(__netif_schedule); 1360EXPORT_SYMBOL(__netif_schedule);
1357 1361
@@ -1796,16 +1800,19 @@ gso:
1796 skb->tc_verd = SET_TC_AT(skb->tc_verd,AT_EGRESS); 1800 skb->tc_verd = SET_TC_AT(skb->tc_verd,AT_EGRESS);
1797#endif 1801#endif
1798 if (q->enqueue) { 1802 if (q->enqueue) {
1799 spinlock_t *root_lock = qdisc_root_lock(q); 1803 spinlock_t *root_lock = qdisc_lock(q);
1800 1804
1801 spin_lock(root_lock); 1805 spin_lock(root_lock);
1802 1806
1803 rc = qdisc_enqueue_root(skb, q); 1807 if (unlikely(test_bit(__QDISC_STATE_DEACTIVATED, &q->state))) {
1804 qdisc_run(q); 1808 kfree_skb(skb);
1805 1809 rc = NET_XMIT_DROP;
1810 } else {
1811 rc = qdisc_enqueue_root(skb, q);
1812 qdisc_run(q);
1813 }
1806 spin_unlock(root_lock); 1814 spin_unlock(root_lock);
1807 1815
1808 rc = rc == NET_XMIT_BYPASS ? NET_XMIT_SUCCESS : rc;
1809 goto out; 1816 goto out;
1810 } 1817 }
1811 1818
@@ -1909,7 +1916,6 @@ int netif_rx(struct sk_buff *skb)
1909 if (queue->input_pkt_queue.qlen <= netdev_max_backlog) { 1916 if (queue->input_pkt_queue.qlen <= netdev_max_backlog) {
1910 if (queue->input_pkt_queue.qlen) { 1917 if (queue->input_pkt_queue.qlen) {
1911enqueue: 1918enqueue:
1912 dev_hold(skb->dev);
1913 __skb_queue_tail(&queue->input_pkt_queue, skb); 1919 __skb_queue_tail(&queue->input_pkt_queue, skb);
1914 local_irq_restore(flags); 1920 local_irq_restore(flags);
1915 return NET_RX_SUCCESS; 1921 return NET_RX_SUCCESS;
@@ -1941,22 +1947,6 @@ int netif_rx_ni(struct sk_buff *skb)
1941 1947
1942EXPORT_SYMBOL(netif_rx_ni); 1948EXPORT_SYMBOL(netif_rx_ni);
1943 1949
1944static inline struct net_device *skb_bond(struct sk_buff *skb)
1945{
1946 struct net_device *dev = skb->dev;
1947
1948 if (dev->master) {
1949 if (skb_bond_should_drop(skb)) {
1950 kfree_skb(skb);
1951 return NULL;
1952 }
1953 skb->dev = dev->master;
1954 }
1955
1956 return dev;
1957}
1958
1959
1960static void net_tx_action(struct softirq_action *h) 1950static void net_tx_action(struct softirq_action *h)
1961{ 1951{
1962 struct softnet_data *sd = &__get_cpu_var(softnet_data); 1952 struct softnet_data *sd = &__get_cpu_var(softnet_data);
@@ -1992,15 +1982,17 @@ static void net_tx_action(struct softirq_action *h)
1992 1982
1993 head = head->next_sched; 1983 head = head->next_sched;
1994 1984
1995 smp_mb__before_clear_bit(); 1985 root_lock = qdisc_lock(q);
1996 clear_bit(__QDISC_STATE_SCHED, &q->state);
1997
1998 root_lock = qdisc_root_lock(q);
1999 if (spin_trylock(root_lock)) { 1986 if (spin_trylock(root_lock)) {
1987 smp_mb__before_clear_bit();
1988 clear_bit(__QDISC_STATE_SCHED,
1989 &q->state);
2000 qdisc_run(q); 1990 qdisc_run(q);
2001 spin_unlock(root_lock); 1991 spin_unlock(root_lock);
2002 } else { 1992 } else {
2003 __netif_schedule(q); 1993 if (!test_bit(__QDISC_STATE_DEACTIVATED,
1994 &q->state))
1995 __netif_reschedule(q);
2004 } 1996 }
2005 } 1997 }
2006 } 1998 }
@@ -2100,9 +2092,10 @@ static int ing_filter(struct sk_buff *skb)
2100 rxq = &dev->rx_queue; 2092 rxq = &dev->rx_queue;
2101 2093
2102 q = rxq->qdisc; 2094 q = rxq->qdisc;
2103 if (q) { 2095 if (q != &noop_qdisc) {
2104 spin_lock(qdisc_lock(q)); 2096 spin_lock(qdisc_lock(q));
2105 result = qdisc_enqueue_root(skb, q); 2097 if (likely(!test_bit(__QDISC_STATE_DEACTIVATED, &q->state)))
2098 result = qdisc_enqueue_root(skb, q);
2106 spin_unlock(qdisc_lock(q)); 2099 spin_unlock(qdisc_lock(q));
2107 } 2100 }
2108 2101
@@ -2113,7 +2106,7 @@ static inline struct sk_buff *handle_ing(struct sk_buff *skb,
2113 struct packet_type **pt_prev, 2106 struct packet_type **pt_prev,
2114 int *ret, struct net_device *orig_dev) 2107 int *ret, struct net_device *orig_dev)
2115{ 2108{
2116 if (!skb->dev->rx_queue.qdisc) 2109 if (skb->dev->rx_queue.qdisc == &noop_qdisc)
2117 goto out; 2110 goto out;
2118 2111
2119 if (*pt_prev) { 2112 if (*pt_prev) {
@@ -2183,6 +2176,7 @@ int netif_receive_skb(struct sk_buff *skb)
2183{ 2176{
2184 struct packet_type *ptype, *pt_prev; 2177 struct packet_type *ptype, *pt_prev;
2185 struct net_device *orig_dev; 2178 struct net_device *orig_dev;
2179 struct net_device *null_or_orig;
2186 int ret = NET_RX_DROP; 2180 int ret = NET_RX_DROP;
2187 __be16 type; 2181 __be16 type;
2188 2182
@@ -2196,10 +2190,14 @@ int netif_receive_skb(struct sk_buff *skb)
2196 if (!skb->iif) 2190 if (!skb->iif)
2197 skb->iif = skb->dev->ifindex; 2191 skb->iif = skb->dev->ifindex;
2198 2192
2199 orig_dev = skb_bond(skb); 2193 null_or_orig = NULL;
2200 2194 orig_dev = skb->dev;
2201 if (!orig_dev) 2195 if (orig_dev->master) {
2202 return NET_RX_DROP; 2196 if (skb_bond_should_drop(skb))
2197 null_or_orig = orig_dev; /* deliver only exact match */
2198 else
2199 skb->dev = orig_dev->master;
2200 }
2203 2201
2204 __get_cpu_var(netdev_rx_stat).total++; 2202 __get_cpu_var(netdev_rx_stat).total++;
2205 2203
@@ -2223,7 +2221,8 @@ int netif_receive_skb(struct sk_buff *skb)
2223#endif 2221#endif
2224 2222
2225 list_for_each_entry_rcu(ptype, &ptype_all, list) { 2223 list_for_each_entry_rcu(ptype, &ptype_all, list) {
2226 if (!ptype->dev || ptype->dev == skb->dev) { 2224 if (ptype->dev == null_or_orig || ptype->dev == skb->dev ||
2225 ptype->dev == orig_dev) {
2227 if (pt_prev) 2226 if (pt_prev)
2228 ret = deliver_skb(skb, pt_prev, orig_dev); 2227 ret = deliver_skb(skb, pt_prev, orig_dev);
2229 pt_prev = ptype; 2228 pt_prev = ptype;
@@ -2248,7 +2247,8 @@ ncls:
2248 list_for_each_entry_rcu(ptype, 2247 list_for_each_entry_rcu(ptype,
2249 &ptype_base[ntohs(type) & PTYPE_HASH_MASK], list) { 2248 &ptype_base[ntohs(type) & PTYPE_HASH_MASK], list) {
2250 if (ptype->type == type && 2249 if (ptype->type == type &&
2251 (!ptype->dev || ptype->dev == skb->dev)) { 2250 (ptype->dev == null_or_orig || ptype->dev == skb->dev ||
2251 ptype->dev == orig_dev)) {
2252 if (pt_prev) 2252 if (pt_prev)
2253 ret = deliver_skb(skb, pt_prev, orig_dev); 2253 ret = deliver_skb(skb, pt_prev, orig_dev);
2254 pt_prev = ptype; 2254 pt_prev = ptype;
@@ -2270,6 +2270,20 @@ out:
2270 return ret; 2270 return ret;
2271} 2271}
2272 2272
2273/* Network device is going away, flush any packets still pending */
2274static void flush_backlog(void *arg)
2275{
2276 struct net_device *dev = arg;
2277 struct softnet_data *queue = &__get_cpu_var(softnet_data);
2278 struct sk_buff *skb, *tmp;
2279
2280 skb_queue_walk_safe(&queue->input_pkt_queue, skb, tmp)
2281 if (skb->dev == dev) {
2282 __skb_unlink(skb, &queue->input_pkt_queue);
2283 kfree_skb(skb);
2284 }
2285}
2286
2273static int process_backlog(struct napi_struct *napi, int quota) 2287static int process_backlog(struct napi_struct *napi, int quota)
2274{ 2288{
2275 int work = 0; 2289 int work = 0;
@@ -2279,7 +2293,6 @@ static int process_backlog(struct napi_struct *napi, int quota)
2279 napi->weight = weight_p; 2293 napi->weight = weight_p;
2280 do { 2294 do {
2281 struct sk_buff *skb; 2295 struct sk_buff *skb;
2282 struct net_device *dev;
2283 2296
2284 local_irq_disable(); 2297 local_irq_disable();
2285 skb = __skb_dequeue(&queue->input_pkt_queue); 2298 skb = __skb_dequeue(&queue->input_pkt_queue);
@@ -2288,14 +2301,9 @@ static int process_backlog(struct napi_struct *napi, int quota)
2288 local_irq_enable(); 2301 local_irq_enable();
2289 break; 2302 break;
2290 } 2303 }
2291
2292 local_irq_enable(); 2304 local_irq_enable();
2293 2305
2294 dev = skb->dev;
2295
2296 netif_receive_skb(skb); 2306 netif_receive_skb(skb);
2297
2298 dev_put(dev);
2299 } while (++work < quota && jiffies == start_time); 2307 } while (++work < quota && jiffies == start_time);
2300 2308
2301 return work; 2309 return work;
@@ -3988,6 +3996,10 @@ int register_netdevice(struct net_device *dev)
3988 } 3996 }
3989 } 3997 }
3990 3998
3999 /* Enable software GSO if SG is supported. */
4000 if (dev->features & NETIF_F_SG)
4001 dev->features |= NETIF_F_GSO;
4002
3991 netdev_initialize_kobject(dev); 4003 netdev_initialize_kobject(dev);
3992 ret = netdev_register_kobject(dev); 4004 ret = netdev_register_kobject(dev);
3993 if (ret) 4005 if (ret)
@@ -4165,6 +4177,8 @@ void netdev_run_todo(void)
4165 4177
4166 dev->reg_state = NETREG_UNREGISTERED; 4178 dev->reg_state = NETREG_UNREGISTERED;
4167 4179
4180 on_each_cpu(flush_backlog, dev, 1);
4181
4168 netdev_wait_allrefs(dev); 4182 netdev_wait_allrefs(dev);
4169 4183
4170 /* paranoia */ 4184 /* paranoia */
@@ -4200,6 +4214,7 @@ static void netdev_init_queues(struct net_device *dev)
4200{ 4214{
4201 netdev_init_one_queue(dev, &dev->rx_queue, NULL); 4215 netdev_init_one_queue(dev, &dev->rx_queue, NULL);
4202 netdev_for_each_tx_queue(dev, netdev_init_one_queue, NULL); 4216 netdev_for_each_tx_queue(dev, netdev_init_one_queue, NULL);
4217 spin_lock_init(&dev->tx_global_lock);
4203} 4218}
4204 4219
4205/** 4220/**