diff options
author | Ingo Molnar <mingo@elte.hu> | 2008-09-14 12:24:00 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2008-09-14 12:24:00 -0400 |
commit | 83bd6998b04fc1bb7280b14f16b2bdbdc07c914b (patch) | |
tree | af1bbe2bac5752b69b1ac58bf5d822c735da439b /net/core/dev.c | |
parent | e7250b8ae3870f37f660c2f65cafcaba85e3bfd3 (diff) | |
parent | adee14b2e1557d0a8559f29681732d05a89dfc35 (diff) |
Merge commit 'v2.6.27-rc6' into timers/hpet
Diffstat (limited to 'net/core/dev.c')
-rw-r--r-- | net/core/dev.c | 126 |
1 files changed, 73 insertions, 53 deletions
diff --git a/net/core/dev.c b/net/core/dev.c index 8d13a9b9f1df..e719ed29310f 100644 --- a/net/core/dev.c +++ b/net/core/dev.c | |||
@@ -1339,19 +1339,23 @@ static void dev_queue_xmit_nit(struct sk_buff *skb, struct net_device *dev) | |||
1339 | } | 1339 | } |
1340 | 1340 | ||
1341 | 1341 | ||
1342 | void __netif_schedule(struct Qdisc *q) | 1342 | static inline void __netif_reschedule(struct Qdisc *q) |
1343 | { | 1343 | { |
1344 | if (!test_and_set_bit(__QDISC_STATE_SCHED, &q->state)) { | 1344 | struct softnet_data *sd; |
1345 | struct softnet_data *sd; | 1345 | unsigned long flags; |
1346 | unsigned long flags; | ||
1347 | 1346 | ||
1348 | local_irq_save(flags); | 1347 | local_irq_save(flags); |
1349 | sd = &__get_cpu_var(softnet_data); | 1348 | sd = &__get_cpu_var(softnet_data); |
1350 | q->next_sched = sd->output_queue; | 1349 | q->next_sched = sd->output_queue; |
1351 | sd->output_queue = q; | 1350 | sd->output_queue = q; |
1352 | raise_softirq_irqoff(NET_TX_SOFTIRQ); | 1351 | raise_softirq_irqoff(NET_TX_SOFTIRQ); |
1353 | local_irq_restore(flags); | 1352 | local_irq_restore(flags); |
1354 | } | 1353 | } |
1354 | |||
1355 | void __netif_schedule(struct Qdisc *q) | ||
1356 | { | ||
1357 | if (!test_and_set_bit(__QDISC_STATE_SCHED, &q->state)) | ||
1358 | __netif_reschedule(q); | ||
1355 | } | 1359 | } |
1356 | EXPORT_SYMBOL(__netif_schedule); | 1360 | EXPORT_SYMBOL(__netif_schedule); |
1357 | 1361 | ||
@@ -1796,16 +1800,19 @@ gso: | |||
1796 | skb->tc_verd = SET_TC_AT(skb->tc_verd,AT_EGRESS); | 1800 | skb->tc_verd = SET_TC_AT(skb->tc_verd,AT_EGRESS); |
1797 | #endif | 1801 | #endif |
1798 | if (q->enqueue) { | 1802 | if (q->enqueue) { |
1799 | spinlock_t *root_lock = qdisc_root_lock(q); | 1803 | spinlock_t *root_lock = qdisc_lock(q); |
1800 | 1804 | ||
1801 | spin_lock(root_lock); | 1805 | spin_lock(root_lock); |
1802 | 1806 | ||
1803 | rc = qdisc_enqueue_root(skb, q); | 1807 | if (unlikely(test_bit(__QDISC_STATE_DEACTIVATED, &q->state))) { |
1804 | qdisc_run(q); | 1808 | kfree_skb(skb); |
1805 | 1809 | rc = NET_XMIT_DROP; | |
1810 | } else { | ||
1811 | rc = qdisc_enqueue_root(skb, q); | ||
1812 | qdisc_run(q); | ||
1813 | } | ||
1806 | spin_unlock(root_lock); | 1814 | spin_unlock(root_lock); |
1807 | 1815 | ||
1808 | rc = rc == NET_XMIT_BYPASS ? NET_XMIT_SUCCESS : rc; | ||
1809 | goto out; | 1816 | goto out; |
1810 | } | 1817 | } |
1811 | 1818 | ||
@@ -1909,7 +1916,6 @@ int netif_rx(struct sk_buff *skb) | |||
1909 | if (queue->input_pkt_queue.qlen <= netdev_max_backlog) { | 1916 | if (queue->input_pkt_queue.qlen <= netdev_max_backlog) { |
1910 | if (queue->input_pkt_queue.qlen) { | 1917 | if (queue->input_pkt_queue.qlen) { |
1911 | enqueue: | 1918 | enqueue: |
1912 | dev_hold(skb->dev); | ||
1913 | __skb_queue_tail(&queue->input_pkt_queue, skb); | 1919 | __skb_queue_tail(&queue->input_pkt_queue, skb); |
1914 | local_irq_restore(flags); | 1920 | local_irq_restore(flags); |
1915 | return NET_RX_SUCCESS; | 1921 | return NET_RX_SUCCESS; |
@@ -1941,22 +1947,6 @@ int netif_rx_ni(struct sk_buff *skb) | |||
1941 | 1947 | ||
1942 | EXPORT_SYMBOL(netif_rx_ni); | 1948 | EXPORT_SYMBOL(netif_rx_ni); |
1943 | 1949 | ||
1944 | static inline struct net_device *skb_bond(struct sk_buff *skb) | ||
1945 | { | ||
1946 | struct net_device *dev = skb->dev; | ||
1947 | |||
1948 | if (dev->master) { | ||
1949 | if (skb_bond_should_drop(skb)) { | ||
1950 | kfree_skb(skb); | ||
1951 | return NULL; | ||
1952 | } | ||
1953 | skb->dev = dev->master; | ||
1954 | } | ||
1955 | |||
1956 | return dev; | ||
1957 | } | ||
1958 | |||
1959 | |||
1960 | static void net_tx_action(struct softirq_action *h) | 1950 | static void net_tx_action(struct softirq_action *h) |
1961 | { | 1951 | { |
1962 | struct softnet_data *sd = &__get_cpu_var(softnet_data); | 1952 | struct softnet_data *sd = &__get_cpu_var(softnet_data); |
@@ -1992,15 +1982,22 @@ static void net_tx_action(struct softirq_action *h) | |||
1992 | 1982 | ||
1993 | head = head->next_sched; | 1983 | head = head->next_sched; |
1994 | 1984 | ||
1995 | smp_mb__before_clear_bit(); | 1985 | root_lock = qdisc_lock(q); |
1996 | clear_bit(__QDISC_STATE_SCHED, &q->state); | ||
1997 | |||
1998 | root_lock = qdisc_root_lock(q); | ||
1999 | if (spin_trylock(root_lock)) { | 1986 | if (spin_trylock(root_lock)) { |
1987 | smp_mb__before_clear_bit(); | ||
1988 | clear_bit(__QDISC_STATE_SCHED, | ||
1989 | &q->state); | ||
2000 | qdisc_run(q); | 1990 | qdisc_run(q); |
2001 | spin_unlock(root_lock); | 1991 | spin_unlock(root_lock); |
2002 | } else { | 1992 | } else { |
2003 | __netif_schedule(q); | 1993 | if (!test_bit(__QDISC_STATE_DEACTIVATED, |
1994 | &q->state)) { | ||
1995 | __netif_reschedule(q); | ||
1996 | } else { | ||
1997 | smp_mb__before_clear_bit(); | ||
1998 | clear_bit(__QDISC_STATE_SCHED, | ||
1999 | &q->state); | ||
2000 | } | ||
2004 | } | 2001 | } |
2005 | } | 2002 | } |
2006 | } | 2003 | } |
@@ -2100,9 +2097,10 @@ static int ing_filter(struct sk_buff *skb) | |||
2100 | rxq = &dev->rx_queue; | 2097 | rxq = &dev->rx_queue; |
2101 | 2098 | ||
2102 | q = rxq->qdisc; | 2099 | q = rxq->qdisc; |
2103 | if (q) { | 2100 | if (q != &noop_qdisc) { |
2104 | spin_lock(qdisc_lock(q)); | 2101 | spin_lock(qdisc_lock(q)); |
2105 | result = qdisc_enqueue_root(skb, q); | 2102 | if (likely(!test_bit(__QDISC_STATE_DEACTIVATED, &q->state))) |
2103 | result = qdisc_enqueue_root(skb, q); | ||
2106 | spin_unlock(qdisc_lock(q)); | 2104 | spin_unlock(qdisc_lock(q)); |
2107 | } | 2105 | } |
2108 | 2106 | ||
@@ -2113,7 +2111,7 @@ static inline struct sk_buff *handle_ing(struct sk_buff *skb, | |||
2113 | struct packet_type **pt_prev, | 2111 | struct packet_type **pt_prev, |
2114 | int *ret, struct net_device *orig_dev) | 2112 | int *ret, struct net_device *orig_dev) |
2115 | { | 2113 | { |
2116 | if (!skb->dev->rx_queue.qdisc) | 2114 | if (skb->dev->rx_queue.qdisc == &noop_qdisc) |
2117 | goto out; | 2115 | goto out; |
2118 | 2116 | ||
2119 | if (*pt_prev) { | 2117 | if (*pt_prev) { |
@@ -2183,6 +2181,7 @@ int netif_receive_skb(struct sk_buff *skb) | |||
2183 | { | 2181 | { |
2184 | struct packet_type *ptype, *pt_prev; | 2182 | struct packet_type *ptype, *pt_prev; |
2185 | struct net_device *orig_dev; | 2183 | struct net_device *orig_dev; |
2184 | struct net_device *null_or_orig; | ||
2186 | int ret = NET_RX_DROP; | 2185 | int ret = NET_RX_DROP; |
2187 | __be16 type; | 2186 | __be16 type; |
2188 | 2187 | ||
@@ -2196,10 +2195,14 @@ int netif_receive_skb(struct sk_buff *skb) | |||
2196 | if (!skb->iif) | 2195 | if (!skb->iif) |
2197 | skb->iif = skb->dev->ifindex; | 2196 | skb->iif = skb->dev->ifindex; |
2198 | 2197 | ||
2199 | orig_dev = skb_bond(skb); | 2198 | null_or_orig = NULL; |
2200 | 2199 | orig_dev = skb->dev; | |
2201 | if (!orig_dev) | 2200 | if (orig_dev->master) { |
2202 | return NET_RX_DROP; | 2201 | if (skb_bond_should_drop(skb)) |
2202 | null_or_orig = orig_dev; /* deliver only exact match */ | ||
2203 | else | ||
2204 | skb->dev = orig_dev->master; | ||
2205 | } | ||
2203 | 2206 | ||
2204 | __get_cpu_var(netdev_rx_stat).total++; | 2207 | __get_cpu_var(netdev_rx_stat).total++; |
2205 | 2208 | ||
@@ -2223,7 +2226,8 @@ int netif_receive_skb(struct sk_buff *skb) | |||
2223 | #endif | 2226 | #endif |
2224 | 2227 | ||
2225 | list_for_each_entry_rcu(ptype, &ptype_all, list) { | 2228 | list_for_each_entry_rcu(ptype, &ptype_all, list) { |
2226 | if (!ptype->dev || ptype->dev == skb->dev) { | 2229 | if (ptype->dev == null_or_orig || ptype->dev == skb->dev || |
2230 | ptype->dev == orig_dev) { | ||
2227 | if (pt_prev) | 2231 | if (pt_prev) |
2228 | ret = deliver_skb(skb, pt_prev, orig_dev); | 2232 | ret = deliver_skb(skb, pt_prev, orig_dev); |
2229 | pt_prev = ptype; | 2233 | pt_prev = ptype; |
@@ -2248,7 +2252,8 @@ ncls: | |||
2248 | list_for_each_entry_rcu(ptype, | 2252 | list_for_each_entry_rcu(ptype, |
2249 | &ptype_base[ntohs(type) & PTYPE_HASH_MASK], list) { | 2253 | &ptype_base[ntohs(type) & PTYPE_HASH_MASK], list) { |
2250 | if (ptype->type == type && | 2254 | if (ptype->type == type && |
2251 | (!ptype->dev || ptype->dev == skb->dev)) { | 2255 | (ptype->dev == null_or_orig || ptype->dev == skb->dev || |
2256 | ptype->dev == orig_dev)) { | ||
2252 | if (pt_prev) | 2257 | if (pt_prev) |
2253 | ret = deliver_skb(skb, pt_prev, orig_dev); | 2258 | ret = deliver_skb(skb, pt_prev, orig_dev); |
2254 | pt_prev = ptype; | 2259 | pt_prev = ptype; |
@@ -2270,6 +2275,20 @@ out: | |||
2270 | return ret; | 2275 | return ret; |
2271 | } | 2276 | } |
2272 | 2277 | ||
2278 | /* Network device is going away, flush any packets still pending */ | ||
2279 | static void flush_backlog(void *arg) | ||
2280 | { | ||
2281 | struct net_device *dev = arg; | ||
2282 | struct softnet_data *queue = &__get_cpu_var(softnet_data); | ||
2283 | struct sk_buff *skb, *tmp; | ||
2284 | |||
2285 | skb_queue_walk_safe(&queue->input_pkt_queue, skb, tmp) | ||
2286 | if (skb->dev == dev) { | ||
2287 | __skb_unlink(skb, &queue->input_pkt_queue); | ||
2288 | kfree_skb(skb); | ||
2289 | } | ||
2290 | } | ||
2291 | |||
2273 | static int process_backlog(struct napi_struct *napi, int quota) | 2292 | static int process_backlog(struct napi_struct *napi, int quota) |
2274 | { | 2293 | { |
2275 | int work = 0; | 2294 | int work = 0; |
@@ -2279,7 +2298,6 @@ static int process_backlog(struct napi_struct *napi, int quota) | |||
2279 | napi->weight = weight_p; | 2298 | napi->weight = weight_p; |
2280 | do { | 2299 | do { |
2281 | struct sk_buff *skb; | 2300 | struct sk_buff *skb; |
2282 | struct net_device *dev; | ||
2283 | 2301 | ||
2284 | local_irq_disable(); | 2302 | local_irq_disable(); |
2285 | skb = __skb_dequeue(&queue->input_pkt_queue); | 2303 | skb = __skb_dequeue(&queue->input_pkt_queue); |
@@ -2288,14 +2306,9 @@ static int process_backlog(struct napi_struct *napi, int quota) | |||
2288 | local_irq_enable(); | 2306 | local_irq_enable(); |
2289 | break; | 2307 | break; |
2290 | } | 2308 | } |
2291 | |||
2292 | local_irq_enable(); | 2309 | local_irq_enable(); |
2293 | 2310 | ||
2294 | dev = skb->dev; | ||
2295 | |||
2296 | netif_receive_skb(skb); | 2311 | netif_receive_skb(skb); |
2297 | |||
2298 | dev_put(dev); | ||
2299 | } while (++work < quota && jiffies == start_time); | 2312 | } while (++work < quota && jiffies == start_time); |
2300 | 2313 | ||
2301 | return work; | 2314 | return work; |
@@ -3988,6 +4001,10 @@ int register_netdevice(struct net_device *dev) | |||
3988 | } | 4001 | } |
3989 | } | 4002 | } |
3990 | 4003 | ||
4004 | /* Enable software GSO if SG is supported. */ | ||
4005 | if (dev->features & NETIF_F_SG) | ||
4006 | dev->features |= NETIF_F_GSO; | ||
4007 | |||
3991 | netdev_initialize_kobject(dev); | 4008 | netdev_initialize_kobject(dev); |
3992 | ret = netdev_register_kobject(dev); | 4009 | ret = netdev_register_kobject(dev); |
3993 | if (ret) | 4010 | if (ret) |
@@ -4165,6 +4182,8 @@ void netdev_run_todo(void) | |||
4165 | 4182 | ||
4166 | dev->reg_state = NETREG_UNREGISTERED; | 4183 | dev->reg_state = NETREG_UNREGISTERED; |
4167 | 4184 | ||
4185 | on_each_cpu(flush_backlog, dev, 1); | ||
4186 | |||
4168 | netdev_wait_allrefs(dev); | 4187 | netdev_wait_allrefs(dev); |
4169 | 4188 | ||
4170 | /* paranoia */ | 4189 | /* paranoia */ |
@@ -4200,6 +4219,7 @@ static void netdev_init_queues(struct net_device *dev) | |||
4200 | { | 4219 | { |
4201 | netdev_init_one_queue(dev, &dev->rx_queue, NULL); | 4220 | netdev_init_one_queue(dev, &dev->rx_queue, NULL); |
4202 | netdev_for_each_tx_queue(dev, netdev_init_one_queue, NULL); | 4221 | netdev_for_each_tx_queue(dev, netdev_init_one_queue, NULL); |
4222 | spin_lock_init(&dev->tx_global_lock); | ||
4203 | } | 4223 | } |
4204 | 4224 | ||
4205 | /** | 4225 | /** |