aboutsummaryrefslogtreecommitdiffstats
path: root/net/core
diff options
context:
space:
mode:
Diffstat (limited to 'net/core')
-rw-r--r--net/core/datagram.c87
-rw-r--r--net/core/dev.c126
-rw-r--r--net/core/neighbour.c13
-rw-r--r--net/core/netpoll.c1
-rw-r--r--net/core/pktgen.c83
-rw-r--r--net/core/skbuff.c15
6 files changed, 248 insertions, 77 deletions
diff --git a/net/core/datagram.c b/net/core/datagram.c
index dd61dcad6019..52f577a0f544 100644
--- a/net/core/datagram.c
+++ b/net/core/datagram.c
@@ -339,6 +339,93 @@ fault:
339 return -EFAULT; 339 return -EFAULT;
340} 340}
341 341
342/**
343 * skb_copy_datagram_from_iovec - Copy a datagram from an iovec.
344 * @skb: buffer to copy
345 * @offset: offset in the buffer to start copying to
346 * @from: io vector to copy to
347 * @len: amount of data to copy to buffer from iovec
348 *
349 * Returns 0 or -EFAULT.
350 * Note: the iovec is modified during the copy.
351 */
352int skb_copy_datagram_from_iovec(struct sk_buff *skb, int offset,
353 struct iovec *from, int len)
354{
355 int start = skb_headlen(skb);
356 int i, copy = start - offset;
357
358 /* Copy header. */
359 if (copy > 0) {
360 if (copy > len)
361 copy = len;
362 if (memcpy_fromiovec(skb->data + offset, from, copy))
363 goto fault;
364 if ((len -= copy) == 0)
365 return 0;
366 offset += copy;
367 }
368
369 /* Copy paged appendix. Hmm... why does this look so complicated? */
370 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
371 int end;
372
373 WARN_ON(start > offset + len);
374
375 end = start + skb_shinfo(skb)->frags[i].size;
376 if ((copy = end - offset) > 0) {
377 int err;
378 u8 *vaddr;
379 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
380 struct page *page = frag->page;
381
382 if (copy > len)
383 copy = len;
384 vaddr = kmap(page);
385 err = memcpy_fromiovec(vaddr + frag->page_offset +
386 offset - start, from, copy);
387 kunmap(page);
388 if (err)
389 goto fault;
390
391 if (!(len -= copy))
392 return 0;
393 offset += copy;
394 }
395 start = end;
396 }
397
398 if (skb_shinfo(skb)->frag_list) {
399 struct sk_buff *list = skb_shinfo(skb)->frag_list;
400
401 for (; list; list = list->next) {
402 int end;
403
404 WARN_ON(start > offset + len);
405
406 end = start + list->len;
407 if ((copy = end - offset) > 0) {
408 if (copy > len)
409 copy = len;
410 if (skb_copy_datagram_from_iovec(list,
411 offset - start,
412 from, copy))
413 goto fault;
414 if ((len -= copy) == 0)
415 return 0;
416 offset += copy;
417 }
418 start = end;
419 }
420 }
421 if (!len)
422 return 0;
423
424fault:
425 return -EFAULT;
426}
427EXPORT_SYMBOL(skb_copy_datagram_from_iovec);
428
342static int skb_copy_and_csum_datagram(const struct sk_buff *skb, int offset, 429static int skb_copy_and_csum_datagram(const struct sk_buff *skb, int offset,
343 u8 __user *to, int len, 430 u8 __user *to, int len,
344 __wsum *csump) 431 __wsum *csump)
diff --git a/net/core/dev.c b/net/core/dev.c
index 8d13a9b9f1df..e719ed29310f 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -1339,19 +1339,23 @@ static void dev_queue_xmit_nit(struct sk_buff *skb, struct net_device *dev)
1339} 1339}
1340 1340
1341 1341
1342void __netif_schedule(struct Qdisc *q) 1342static inline void __netif_reschedule(struct Qdisc *q)
1343{ 1343{
1344 if (!test_and_set_bit(__QDISC_STATE_SCHED, &q->state)) { 1344 struct softnet_data *sd;
1345 struct softnet_data *sd; 1345 unsigned long flags;
1346 unsigned long flags;
1347 1346
1348 local_irq_save(flags); 1347 local_irq_save(flags);
1349 sd = &__get_cpu_var(softnet_data); 1348 sd = &__get_cpu_var(softnet_data);
1350 q->next_sched = sd->output_queue; 1349 q->next_sched = sd->output_queue;
1351 sd->output_queue = q; 1350 sd->output_queue = q;
1352 raise_softirq_irqoff(NET_TX_SOFTIRQ); 1351 raise_softirq_irqoff(NET_TX_SOFTIRQ);
1353 local_irq_restore(flags); 1352 local_irq_restore(flags);
1354 } 1353}
1354
1355void __netif_schedule(struct Qdisc *q)
1356{
1357 if (!test_and_set_bit(__QDISC_STATE_SCHED, &q->state))
1358 __netif_reschedule(q);
1355} 1359}
1356EXPORT_SYMBOL(__netif_schedule); 1360EXPORT_SYMBOL(__netif_schedule);
1357 1361
@@ -1796,16 +1800,19 @@ gso:
1796 skb->tc_verd = SET_TC_AT(skb->tc_verd,AT_EGRESS); 1800 skb->tc_verd = SET_TC_AT(skb->tc_verd,AT_EGRESS);
1797#endif 1801#endif
1798 if (q->enqueue) { 1802 if (q->enqueue) {
1799 spinlock_t *root_lock = qdisc_root_lock(q); 1803 spinlock_t *root_lock = qdisc_lock(q);
1800 1804
1801 spin_lock(root_lock); 1805 spin_lock(root_lock);
1802 1806
1803 rc = qdisc_enqueue_root(skb, q); 1807 if (unlikely(test_bit(__QDISC_STATE_DEACTIVATED, &q->state))) {
1804 qdisc_run(q); 1808 kfree_skb(skb);
1805 1809 rc = NET_XMIT_DROP;
1810 } else {
1811 rc = qdisc_enqueue_root(skb, q);
1812 qdisc_run(q);
1813 }
1806 spin_unlock(root_lock); 1814 spin_unlock(root_lock);
1807 1815
1808 rc = rc == NET_XMIT_BYPASS ? NET_XMIT_SUCCESS : rc;
1809 goto out; 1816 goto out;
1810 } 1817 }
1811 1818
@@ -1909,7 +1916,6 @@ int netif_rx(struct sk_buff *skb)
1909 if (queue->input_pkt_queue.qlen <= netdev_max_backlog) { 1916 if (queue->input_pkt_queue.qlen <= netdev_max_backlog) {
1910 if (queue->input_pkt_queue.qlen) { 1917 if (queue->input_pkt_queue.qlen) {
1911enqueue: 1918enqueue:
1912 dev_hold(skb->dev);
1913 __skb_queue_tail(&queue->input_pkt_queue, skb); 1919 __skb_queue_tail(&queue->input_pkt_queue, skb);
1914 local_irq_restore(flags); 1920 local_irq_restore(flags);
1915 return NET_RX_SUCCESS; 1921 return NET_RX_SUCCESS;
@@ -1941,22 +1947,6 @@ int netif_rx_ni(struct sk_buff *skb)
1941 1947
1942EXPORT_SYMBOL(netif_rx_ni); 1948EXPORT_SYMBOL(netif_rx_ni);
1943 1949
1944static inline struct net_device *skb_bond(struct sk_buff *skb)
1945{
1946 struct net_device *dev = skb->dev;
1947
1948 if (dev->master) {
1949 if (skb_bond_should_drop(skb)) {
1950 kfree_skb(skb);
1951 return NULL;
1952 }
1953 skb->dev = dev->master;
1954 }
1955
1956 return dev;
1957}
1958
1959
1960static void net_tx_action(struct softirq_action *h) 1950static void net_tx_action(struct softirq_action *h)
1961{ 1951{
1962 struct softnet_data *sd = &__get_cpu_var(softnet_data); 1952 struct softnet_data *sd = &__get_cpu_var(softnet_data);
@@ -1992,15 +1982,22 @@ static void net_tx_action(struct softirq_action *h)
1992 1982
1993 head = head->next_sched; 1983 head = head->next_sched;
1994 1984
1995 smp_mb__before_clear_bit(); 1985 root_lock = qdisc_lock(q);
1996 clear_bit(__QDISC_STATE_SCHED, &q->state);
1997
1998 root_lock = qdisc_root_lock(q);
1999 if (spin_trylock(root_lock)) { 1986 if (spin_trylock(root_lock)) {
1987 smp_mb__before_clear_bit();
1988 clear_bit(__QDISC_STATE_SCHED,
1989 &q->state);
2000 qdisc_run(q); 1990 qdisc_run(q);
2001 spin_unlock(root_lock); 1991 spin_unlock(root_lock);
2002 } else { 1992 } else {
2003 __netif_schedule(q); 1993 if (!test_bit(__QDISC_STATE_DEACTIVATED,
1994 &q->state)) {
1995 __netif_reschedule(q);
1996 } else {
1997 smp_mb__before_clear_bit();
1998 clear_bit(__QDISC_STATE_SCHED,
1999 &q->state);
2000 }
2004 } 2001 }
2005 } 2002 }
2006 } 2003 }
@@ -2100,9 +2097,10 @@ static int ing_filter(struct sk_buff *skb)
2100 rxq = &dev->rx_queue; 2097 rxq = &dev->rx_queue;
2101 2098
2102 q = rxq->qdisc; 2099 q = rxq->qdisc;
2103 if (q) { 2100 if (q != &noop_qdisc) {
2104 spin_lock(qdisc_lock(q)); 2101 spin_lock(qdisc_lock(q));
2105 result = qdisc_enqueue_root(skb, q); 2102 if (likely(!test_bit(__QDISC_STATE_DEACTIVATED, &q->state)))
2103 result = qdisc_enqueue_root(skb, q);
2106 spin_unlock(qdisc_lock(q)); 2104 spin_unlock(qdisc_lock(q));
2107 } 2105 }
2108 2106
@@ -2113,7 +2111,7 @@ static inline struct sk_buff *handle_ing(struct sk_buff *skb,
2113 struct packet_type **pt_prev, 2111 struct packet_type **pt_prev,
2114 int *ret, struct net_device *orig_dev) 2112 int *ret, struct net_device *orig_dev)
2115{ 2113{
2116 if (!skb->dev->rx_queue.qdisc) 2114 if (skb->dev->rx_queue.qdisc == &noop_qdisc)
2117 goto out; 2115 goto out;
2118 2116
2119 if (*pt_prev) { 2117 if (*pt_prev) {
@@ -2183,6 +2181,7 @@ int netif_receive_skb(struct sk_buff *skb)
2183{ 2181{
2184 struct packet_type *ptype, *pt_prev; 2182 struct packet_type *ptype, *pt_prev;
2185 struct net_device *orig_dev; 2183 struct net_device *orig_dev;
2184 struct net_device *null_or_orig;
2186 int ret = NET_RX_DROP; 2185 int ret = NET_RX_DROP;
2187 __be16 type; 2186 __be16 type;
2188 2187
@@ -2196,10 +2195,14 @@ int netif_receive_skb(struct sk_buff *skb)
2196 if (!skb->iif) 2195 if (!skb->iif)
2197 skb->iif = skb->dev->ifindex; 2196 skb->iif = skb->dev->ifindex;
2198 2197
2199 orig_dev = skb_bond(skb); 2198 null_or_orig = NULL;
2200 2199 orig_dev = skb->dev;
2201 if (!orig_dev) 2200 if (orig_dev->master) {
2202 return NET_RX_DROP; 2201 if (skb_bond_should_drop(skb))
2202 null_or_orig = orig_dev; /* deliver only exact match */
2203 else
2204 skb->dev = orig_dev->master;
2205 }
2203 2206
2204 __get_cpu_var(netdev_rx_stat).total++; 2207 __get_cpu_var(netdev_rx_stat).total++;
2205 2208
@@ -2223,7 +2226,8 @@ int netif_receive_skb(struct sk_buff *skb)
2223#endif 2226#endif
2224 2227
2225 list_for_each_entry_rcu(ptype, &ptype_all, list) { 2228 list_for_each_entry_rcu(ptype, &ptype_all, list) {
2226 if (!ptype->dev || ptype->dev == skb->dev) { 2229 if (ptype->dev == null_or_orig || ptype->dev == skb->dev ||
2230 ptype->dev == orig_dev) {
2227 if (pt_prev) 2231 if (pt_prev)
2228 ret = deliver_skb(skb, pt_prev, orig_dev); 2232 ret = deliver_skb(skb, pt_prev, orig_dev);
2229 pt_prev = ptype; 2233 pt_prev = ptype;
@@ -2248,7 +2252,8 @@ ncls:
2248 list_for_each_entry_rcu(ptype, 2252 list_for_each_entry_rcu(ptype,
2249 &ptype_base[ntohs(type) & PTYPE_HASH_MASK], list) { 2253 &ptype_base[ntohs(type) & PTYPE_HASH_MASK], list) {
2250 if (ptype->type == type && 2254 if (ptype->type == type &&
2251 (!ptype->dev || ptype->dev == skb->dev)) { 2255 (ptype->dev == null_or_orig || ptype->dev == skb->dev ||
2256 ptype->dev == orig_dev)) {
2252 if (pt_prev) 2257 if (pt_prev)
2253 ret = deliver_skb(skb, pt_prev, orig_dev); 2258 ret = deliver_skb(skb, pt_prev, orig_dev);
2254 pt_prev = ptype; 2259 pt_prev = ptype;
@@ -2270,6 +2275,20 @@ out:
2270 return ret; 2275 return ret;
2271} 2276}
2272 2277
2278/* Network device is going away, flush any packets still pending */
2279static void flush_backlog(void *arg)
2280{
2281 struct net_device *dev = arg;
2282 struct softnet_data *queue = &__get_cpu_var(softnet_data);
2283 struct sk_buff *skb, *tmp;
2284
2285 skb_queue_walk_safe(&queue->input_pkt_queue, skb, tmp)
2286 if (skb->dev == dev) {
2287 __skb_unlink(skb, &queue->input_pkt_queue);
2288 kfree_skb(skb);
2289 }
2290}
2291
2273static int process_backlog(struct napi_struct *napi, int quota) 2292static int process_backlog(struct napi_struct *napi, int quota)
2274{ 2293{
2275 int work = 0; 2294 int work = 0;
@@ -2279,7 +2298,6 @@ static int process_backlog(struct napi_struct *napi, int quota)
2279 napi->weight = weight_p; 2298 napi->weight = weight_p;
2280 do { 2299 do {
2281 struct sk_buff *skb; 2300 struct sk_buff *skb;
2282 struct net_device *dev;
2283 2301
2284 local_irq_disable(); 2302 local_irq_disable();
2285 skb = __skb_dequeue(&queue->input_pkt_queue); 2303 skb = __skb_dequeue(&queue->input_pkt_queue);
@@ -2288,14 +2306,9 @@ static int process_backlog(struct napi_struct *napi, int quota)
2288 local_irq_enable(); 2306 local_irq_enable();
2289 break; 2307 break;
2290 } 2308 }
2291
2292 local_irq_enable(); 2309 local_irq_enable();
2293 2310
2294 dev = skb->dev;
2295
2296 netif_receive_skb(skb); 2311 netif_receive_skb(skb);
2297
2298 dev_put(dev);
2299 } while (++work < quota && jiffies == start_time); 2312 } while (++work < quota && jiffies == start_time);
2300 2313
2301 return work; 2314 return work;
@@ -3988,6 +4001,10 @@ int register_netdevice(struct net_device *dev)
3988 } 4001 }
3989 } 4002 }
3990 4003
4004 /* Enable software GSO if SG is supported. */
4005 if (dev->features & NETIF_F_SG)
4006 dev->features |= NETIF_F_GSO;
4007
3991 netdev_initialize_kobject(dev); 4008 netdev_initialize_kobject(dev);
3992 ret = netdev_register_kobject(dev); 4009 ret = netdev_register_kobject(dev);
3993 if (ret) 4010 if (ret)
@@ -4165,6 +4182,8 @@ void netdev_run_todo(void)
4165 4182
4166 dev->reg_state = NETREG_UNREGISTERED; 4183 dev->reg_state = NETREG_UNREGISTERED;
4167 4184
4185 on_each_cpu(flush_backlog, dev, 1);
4186
4168 netdev_wait_allrefs(dev); 4187 netdev_wait_allrefs(dev);
4169 4188
4170 /* paranoia */ 4189 /* paranoia */
@@ -4200,6 +4219,7 @@ static void netdev_init_queues(struct net_device *dev)
4200{ 4219{
4201 netdev_init_one_queue(dev, &dev->rx_queue, NULL); 4220 netdev_init_one_queue(dev, &dev->rx_queue, NULL);
4202 netdev_for_each_tx_queue(dev, netdev_init_one_queue, NULL); 4221 netdev_for_each_tx_queue(dev, netdev_init_one_queue, NULL);
4222 spin_lock_init(&dev->tx_global_lock);
4203} 4223}
4204 4224
4205/** 4225/**
diff --git a/net/core/neighbour.c b/net/core/neighbour.c
index f62c8af85d38..9d92e41826e7 100644
--- a/net/core/neighbour.c
+++ b/net/core/neighbour.c
@@ -2281,6 +2281,7 @@ static struct neighbour *neigh_get_idx(struct seq_file *seq, loff_t *pos)
2281 struct neighbour *n = neigh_get_first(seq); 2281 struct neighbour *n = neigh_get_first(seq);
2282 2282
2283 if (n) { 2283 if (n) {
2284 --(*pos);
2284 while (*pos) { 2285 while (*pos) {
2285 n = neigh_get_next(seq, n, pos); 2286 n = neigh_get_next(seq, n, pos);
2286 if (!n) 2287 if (!n)
@@ -2341,6 +2342,7 @@ static struct pneigh_entry *pneigh_get_idx(struct seq_file *seq, loff_t *pos)
2341 struct pneigh_entry *pn = pneigh_get_first(seq); 2342 struct pneigh_entry *pn = pneigh_get_first(seq);
2342 2343
2343 if (pn) { 2344 if (pn) {
2345 --(*pos);
2344 while (*pos) { 2346 while (*pos) {
2345 pn = pneigh_get_next(seq, pn, pos); 2347 pn = pneigh_get_next(seq, pn, pos);
2346 if (!pn) 2348 if (!pn)
@@ -2354,10 +2356,11 @@ static void *neigh_get_idx_any(struct seq_file *seq, loff_t *pos)
2354{ 2356{
2355 struct neigh_seq_state *state = seq->private; 2357 struct neigh_seq_state *state = seq->private;
2356 void *rc; 2358 void *rc;
2359 loff_t idxpos = *pos;
2357 2360
2358 rc = neigh_get_idx(seq, pos); 2361 rc = neigh_get_idx(seq, &idxpos);
2359 if (!rc && !(state->flags & NEIGH_SEQ_NEIGH_ONLY)) 2362 if (!rc && !(state->flags & NEIGH_SEQ_NEIGH_ONLY))
2360 rc = pneigh_get_idx(seq, pos); 2363 rc = pneigh_get_idx(seq, &idxpos);
2361 2364
2362 return rc; 2365 return rc;
2363} 2366}
@@ -2366,7 +2369,6 @@ void *neigh_seq_start(struct seq_file *seq, loff_t *pos, struct neigh_table *tbl
2366 __acquires(tbl->lock) 2369 __acquires(tbl->lock)
2367{ 2370{
2368 struct neigh_seq_state *state = seq->private; 2371 struct neigh_seq_state *state = seq->private;
2369 loff_t pos_minus_one;
2370 2372
2371 state->tbl = tbl; 2373 state->tbl = tbl;
2372 state->bucket = 0; 2374 state->bucket = 0;
@@ -2374,8 +2376,7 @@ void *neigh_seq_start(struct seq_file *seq, loff_t *pos, struct neigh_table *tbl
2374 2376
2375 read_lock_bh(&tbl->lock); 2377 read_lock_bh(&tbl->lock);
2376 2378
2377 pos_minus_one = *pos - 1; 2379 return *pos ? neigh_get_idx_any(seq, pos) : SEQ_START_TOKEN;
2378 return *pos ? neigh_get_idx_any(seq, &pos_minus_one) : SEQ_START_TOKEN;
2379} 2380}
2380EXPORT_SYMBOL(neigh_seq_start); 2381EXPORT_SYMBOL(neigh_seq_start);
2381 2382
@@ -2385,7 +2386,7 @@ void *neigh_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2385 void *rc; 2386 void *rc;
2386 2387
2387 if (v == SEQ_START_TOKEN) { 2388 if (v == SEQ_START_TOKEN) {
2388 rc = neigh_get_idx(seq, pos); 2389 rc = neigh_get_first(seq);
2389 goto out; 2390 goto out;
2390 } 2391 }
2391 2392
diff --git a/net/core/netpoll.c b/net/core/netpoll.c
index c12720895ecf..6c7af390be0a 100644
--- a/net/core/netpoll.c
+++ b/net/core/netpoll.c
@@ -70,6 +70,7 @@ static void queue_process(struct work_struct *work)
70 local_irq_save(flags); 70 local_irq_save(flags);
71 __netif_tx_lock(txq, smp_processor_id()); 71 __netif_tx_lock(txq, smp_processor_id());
72 if (netif_tx_queue_stopped(txq) || 72 if (netif_tx_queue_stopped(txq) ||
73 netif_tx_queue_frozen(txq) ||
73 dev->hard_start_xmit(skb, dev) != NETDEV_TX_OK) { 74 dev->hard_start_xmit(skb, dev) != NETDEV_TX_OK) {
74 skb_queue_head(&npinfo->txq, skb); 75 skb_queue_head(&npinfo->txq, skb);
75 __netif_tx_unlock(txq); 76 __netif_tx_unlock(txq);
diff --git a/net/core/pktgen.c b/net/core/pktgen.c
index c7d484f7e1c4..a756847e3814 100644
--- a/net/core/pktgen.c
+++ b/net/core/pktgen.c
@@ -168,7 +168,7 @@
168#include <asm/div64.h> /* do_div */ 168#include <asm/div64.h> /* do_div */
169#include <asm/timex.h> 169#include <asm/timex.h>
170 170
171#define VERSION "pktgen v2.69: Packet Generator for packet performance testing.\n" 171#define VERSION "pktgen v2.70: Packet Generator for packet performance testing.\n"
172 172
173#define IP_NAME_SZ 32 173#define IP_NAME_SZ 32
174#define MAX_MPLS_LABELS 16 /* This is the max label stack depth */ 174#define MAX_MPLS_LABELS 16 /* This is the max label stack depth */
@@ -189,6 +189,7 @@
189#define F_FLOW_SEQ (1<<11) /* Sequential flows */ 189#define F_FLOW_SEQ (1<<11) /* Sequential flows */
190#define F_IPSEC_ON (1<<12) /* ipsec on for flows */ 190#define F_IPSEC_ON (1<<12) /* ipsec on for flows */
191#define F_QUEUE_MAP_RND (1<<13) /* queue map Random */ 191#define F_QUEUE_MAP_RND (1<<13) /* queue map Random */
192#define F_QUEUE_MAP_CPU (1<<14) /* queue map mirrors smp_processor_id() */
192 193
193/* Thread control flag bits */ 194/* Thread control flag bits */
194#define T_TERMINATE (1<<0) 195#define T_TERMINATE (1<<0)
@@ -621,6 +622,9 @@ static int pktgen_if_show(struct seq_file *seq, void *v)
621 if (pkt_dev->flags & F_QUEUE_MAP_RND) 622 if (pkt_dev->flags & F_QUEUE_MAP_RND)
622 seq_printf(seq, "QUEUE_MAP_RND "); 623 seq_printf(seq, "QUEUE_MAP_RND ");
623 624
625 if (pkt_dev->flags & F_QUEUE_MAP_CPU)
626 seq_printf(seq, "QUEUE_MAP_CPU ");
627
624 if (pkt_dev->cflows) { 628 if (pkt_dev->cflows) {
625 if (pkt_dev->flags & F_FLOW_SEQ) 629 if (pkt_dev->flags & F_FLOW_SEQ)
626 seq_printf(seq, "FLOW_SEQ "); /*in sequence flows*/ 630 seq_printf(seq, "FLOW_SEQ "); /*in sequence flows*/
@@ -1134,6 +1138,12 @@ static ssize_t pktgen_if_write(struct file *file,
1134 1138
1135 else if (strcmp(f, "!QUEUE_MAP_RND") == 0) 1139 else if (strcmp(f, "!QUEUE_MAP_RND") == 0)
1136 pkt_dev->flags &= ~F_QUEUE_MAP_RND; 1140 pkt_dev->flags &= ~F_QUEUE_MAP_RND;
1141
1142 else if (strcmp(f, "QUEUE_MAP_CPU") == 0)
1143 pkt_dev->flags |= F_QUEUE_MAP_CPU;
1144
1145 else if (strcmp(f, "!QUEUE_MAP_CPU") == 0)
1146 pkt_dev->flags &= ~F_QUEUE_MAP_CPU;
1137#ifdef CONFIG_XFRM 1147#ifdef CONFIG_XFRM
1138 else if (strcmp(f, "IPSEC") == 0) 1148 else if (strcmp(f, "IPSEC") == 0)
1139 pkt_dev->flags |= F_IPSEC_ON; 1149 pkt_dev->flags |= F_IPSEC_ON;
@@ -1895,6 +1905,23 @@ static int pktgen_device_event(struct notifier_block *unused,
1895 return NOTIFY_DONE; 1905 return NOTIFY_DONE;
1896} 1906}
1897 1907
1908static struct net_device *pktgen_dev_get_by_name(struct pktgen_dev *pkt_dev, const char *ifname)
1909{
1910 char b[IFNAMSIZ+5];
1911 int i = 0;
1912
1913 for(i=0; ifname[i] != '@'; i++) {
1914 if(i == IFNAMSIZ)
1915 break;
1916
1917 b[i] = ifname[i];
1918 }
1919 b[i] = 0;
1920
1921 return dev_get_by_name(&init_net, b);
1922}
1923
1924
1898/* Associate pktgen_dev with a device. */ 1925/* Associate pktgen_dev with a device. */
1899 1926
1900static int pktgen_setup_dev(struct pktgen_dev *pkt_dev, const char *ifname) 1927static int pktgen_setup_dev(struct pktgen_dev *pkt_dev, const char *ifname)
@@ -1908,7 +1935,7 @@ static int pktgen_setup_dev(struct pktgen_dev *pkt_dev, const char *ifname)
1908 pkt_dev->odev = NULL; 1935 pkt_dev->odev = NULL;
1909 } 1936 }
1910 1937
1911 odev = dev_get_by_name(&init_net, ifname); 1938 odev = pktgen_dev_get_by_name(pkt_dev, ifname);
1912 if (!odev) { 1939 if (!odev) {
1913 printk(KERN_ERR "pktgen: no such netdevice: \"%s\"\n", ifname); 1940 printk(KERN_ERR "pktgen: no such netdevice: \"%s\"\n", ifname);
1914 return -ENODEV; 1941 return -ENODEV;
@@ -1934,6 +1961,8 @@ static int pktgen_setup_dev(struct pktgen_dev *pkt_dev, const char *ifname)
1934 */ 1961 */
1935static void pktgen_setup_inject(struct pktgen_dev *pkt_dev) 1962static void pktgen_setup_inject(struct pktgen_dev *pkt_dev)
1936{ 1963{
1964 int ntxq;
1965
1937 if (!pkt_dev->odev) { 1966 if (!pkt_dev->odev) {
1938 printk(KERN_ERR "pktgen: ERROR: pkt_dev->odev == NULL in " 1967 printk(KERN_ERR "pktgen: ERROR: pkt_dev->odev == NULL in "
1939 "setup_inject.\n"); 1968 "setup_inject.\n");
@@ -1942,6 +1971,33 @@ static void pktgen_setup_inject(struct pktgen_dev *pkt_dev)
1942 return; 1971 return;
1943 } 1972 }
1944 1973
1974 /* make sure that we don't pick a non-existing transmit queue */
1975 ntxq = pkt_dev->odev->real_num_tx_queues;
1976 if (ntxq <= num_online_cpus() && (pkt_dev->flags & F_QUEUE_MAP_CPU)) {
1977 printk(KERN_WARNING "pktgen: WARNING: QUEUE_MAP_CPU "
1978 "disabled because CPU count (%d) exceeds number ",
1979 num_online_cpus());
1980 printk(KERN_WARNING "pktgen: WARNING: of tx queues "
1981 "(%d) on %s \n", ntxq, pkt_dev->odev->name);
1982 pkt_dev->flags &= ~F_QUEUE_MAP_CPU;
1983 }
1984 if (ntxq <= pkt_dev->queue_map_min) {
1985 printk(KERN_WARNING "pktgen: WARNING: Requested "
1986 "queue_map_min (%d) exceeds number of tx\n",
1987 pkt_dev->queue_map_min);
1988 printk(KERN_WARNING "pktgen: WARNING: queues (%d) on "
1989 "%s, resetting\n", ntxq, pkt_dev->odev->name);
1990 pkt_dev->queue_map_min = ntxq - 1;
1991 }
1992 if (ntxq <= pkt_dev->queue_map_max) {
1993 printk(KERN_WARNING "pktgen: WARNING: Requested "
1994 "queue_map_max (%d) exceeds number of tx\n",
1995 pkt_dev->queue_map_max);
1996 printk(KERN_WARNING "pktgen: WARNING: queues (%d) on "
1997 "%s, resetting\n", ntxq, pkt_dev->odev->name);
1998 pkt_dev->queue_map_max = ntxq - 1;
1999 }
2000
1945 /* Default to the interface's mac if not explicitly set. */ 2001 /* Default to the interface's mac if not explicitly set. */
1946 2002
1947 if (is_zero_ether_addr(pkt_dev->src_mac)) 2003 if (is_zero_ether_addr(pkt_dev->src_mac))
@@ -2085,15 +2141,19 @@ static inline int f_pick(struct pktgen_dev *pkt_dev)
2085 if (pkt_dev->flows[flow].count >= pkt_dev->lflow) { 2141 if (pkt_dev->flows[flow].count >= pkt_dev->lflow) {
2086 /* reset time */ 2142 /* reset time */
2087 pkt_dev->flows[flow].count = 0; 2143 pkt_dev->flows[flow].count = 0;
2144 pkt_dev->flows[flow].flags = 0;
2088 pkt_dev->curfl += 1; 2145 pkt_dev->curfl += 1;
2089 if (pkt_dev->curfl >= pkt_dev->cflows) 2146 if (pkt_dev->curfl >= pkt_dev->cflows)
2090 pkt_dev->curfl = 0; /*reset */ 2147 pkt_dev->curfl = 0; /*reset */
2091 } 2148 }
2092 } else { 2149 } else {
2093 flow = random32() % pkt_dev->cflows; 2150 flow = random32() % pkt_dev->cflows;
2151 pkt_dev->curfl = flow;
2094 2152
2095 if (pkt_dev->flows[flow].count > pkt_dev->lflow) 2153 if (pkt_dev->flows[flow].count > pkt_dev->lflow) {
2096 pkt_dev->flows[flow].count = 0; 2154 pkt_dev->flows[flow].count = 0;
2155 pkt_dev->flows[flow].flags = 0;
2156 }
2097 } 2157 }
2098 2158
2099 return pkt_dev->curfl; 2159 return pkt_dev->curfl;
@@ -2125,7 +2185,11 @@ static void get_ipsec_sa(struct pktgen_dev *pkt_dev, int flow)
2125#endif 2185#endif
2126static void set_cur_queue_map(struct pktgen_dev *pkt_dev) 2186static void set_cur_queue_map(struct pktgen_dev *pkt_dev)
2127{ 2187{
2128 if (pkt_dev->queue_map_min < pkt_dev->queue_map_max) { 2188
2189 if (pkt_dev->flags & F_QUEUE_MAP_CPU)
2190 pkt_dev->cur_queue_map = smp_processor_id();
2191
2192 else if (pkt_dev->queue_map_min < pkt_dev->queue_map_max) {
2129 __u16 t; 2193 __u16 t;
2130 if (pkt_dev->flags & F_QUEUE_MAP_RND) { 2194 if (pkt_dev->flags & F_QUEUE_MAP_RND) {
2131 t = random32() % 2195 t = random32() %
@@ -2162,7 +2226,7 @@ static void mod_cur_headers(struct pktgen_dev *pkt_dev)
2162 mc = random32() % pkt_dev->src_mac_count; 2226 mc = random32() % pkt_dev->src_mac_count;
2163 else { 2227 else {
2164 mc = pkt_dev->cur_src_mac_offset++; 2228 mc = pkt_dev->cur_src_mac_offset++;
2165 if (pkt_dev->cur_src_mac_offset > 2229 if (pkt_dev->cur_src_mac_offset >=
2166 pkt_dev->src_mac_count) 2230 pkt_dev->src_mac_count)
2167 pkt_dev->cur_src_mac_offset = 0; 2231 pkt_dev->cur_src_mac_offset = 0;
2168 } 2232 }
@@ -2189,7 +2253,7 @@ static void mod_cur_headers(struct pktgen_dev *pkt_dev)
2189 2253
2190 else { 2254 else {
2191 mc = pkt_dev->cur_dst_mac_offset++; 2255 mc = pkt_dev->cur_dst_mac_offset++;
2192 if (pkt_dev->cur_dst_mac_offset > 2256 if (pkt_dev->cur_dst_mac_offset >=
2193 pkt_dev->dst_mac_count) { 2257 pkt_dev->dst_mac_count) {
2194 pkt_dev->cur_dst_mac_offset = 0; 2258 pkt_dev->cur_dst_mac_offset = 0;
2195 } 2259 }
@@ -3305,6 +3369,7 @@ static __inline__ void pktgen_xmit(struct pktgen_dev *pkt_dev)
3305 3369
3306 txq = netdev_get_tx_queue(odev, queue_map); 3370 txq = netdev_get_tx_queue(odev, queue_map);
3307 if (netif_tx_queue_stopped(txq) || 3371 if (netif_tx_queue_stopped(txq) ||
3372 netif_tx_queue_frozen(txq) ||
3308 need_resched()) { 3373 need_resched()) {
3309 idle_start = getCurUs(); 3374 idle_start = getCurUs();
3310 3375
@@ -3320,7 +3385,8 @@ static __inline__ void pktgen_xmit(struct pktgen_dev *pkt_dev)
3320 3385
3321 pkt_dev->idle_acc += getCurUs() - idle_start; 3386 pkt_dev->idle_acc += getCurUs() - idle_start;
3322 3387
3323 if (netif_tx_queue_stopped(txq)) { 3388 if (netif_tx_queue_stopped(txq) ||
3389 netif_tx_queue_frozen(txq)) {
3324 pkt_dev->next_tx_us = getCurUs(); /* TODO */ 3390 pkt_dev->next_tx_us = getCurUs(); /* TODO */
3325 pkt_dev->next_tx_ns = 0; 3391 pkt_dev->next_tx_ns = 0;
3326 goto out; /* Try the next interface */ 3392 goto out; /* Try the next interface */
@@ -3352,7 +3418,8 @@ static __inline__ void pktgen_xmit(struct pktgen_dev *pkt_dev)
3352 txq = netdev_get_tx_queue(odev, queue_map); 3418 txq = netdev_get_tx_queue(odev, queue_map);
3353 3419
3354 __netif_tx_lock_bh(txq); 3420 __netif_tx_lock_bh(txq);
3355 if (!netif_tx_queue_stopped(txq)) { 3421 if (!netif_tx_queue_stopped(txq) &&
3422 !netif_tx_queue_frozen(txq)) {
3356 3423
3357 atomic_inc(&(pkt_dev->skb->users)); 3424 atomic_inc(&(pkt_dev->skb->users));
3358 retry_now: 3425 retry_now:
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index 4e0c92274189..ca1ccdf1ef76 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -485,6 +485,9 @@ static struct sk_buff *__skb_clone(struct sk_buff *n, struct sk_buff *skb)
485 C(head); 485 C(head);
486 C(data); 486 C(data);
487 C(truesize); 487 C(truesize);
488#if defined(CONFIG_MAC80211) || defined(CONFIG_MAC80211_MODULE)
489 C(do_not_encrypt);
490#endif
488 atomic_set(&n->users, 1); 491 atomic_set(&n->users, 1);
489 492
490 atomic_inc(&(skb_shinfo(skb)->dataref)); 493 atomic_inc(&(skb_shinfo(skb)->dataref));
@@ -2253,14 +2256,7 @@ struct sk_buff *skb_segment(struct sk_buff *skb, int features)
2253 segs = nskb; 2256 segs = nskb;
2254 tail = nskb; 2257 tail = nskb;
2255 2258
2256 nskb->dev = skb->dev; 2259 __copy_skb_header(nskb, skb);
2257 skb_copy_queue_mapping(nskb, skb);
2258 nskb->priority = skb->priority;
2259 nskb->protocol = skb->protocol;
2260 nskb->vlan_tci = skb->vlan_tci;
2261 nskb->dst = dst_clone(skb->dst);
2262 memcpy(nskb->cb, skb->cb, sizeof(skb->cb));
2263 nskb->pkt_type = skb->pkt_type;
2264 nskb->mac_len = skb->mac_len; 2260 nskb->mac_len = skb->mac_len;
2265 2261
2266 skb_reserve(nskb, headroom); 2262 skb_reserve(nskb, headroom);
@@ -2271,6 +2267,7 @@ struct sk_buff *skb_segment(struct sk_buff *skb, int features)
2271 skb_copy_from_linear_data(skb, skb_put(nskb, doffset), 2267 skb_copy_from_linear_data(skb, skb_put(nskb, doffset),
2272 doffset); 2268 doffset);
2273 if (!sg) { 2269 if (!sg) {
2270 nskb->ip_summed = CHECKSUM_NONE;
2274 nskb->csum = skb_copy_and_csum_bits(skb, offset, 2271 nskb->csum = skb_copy_and_csum_bits(skb, offset,
2275 skb_put(nskb, len), 2272 skb_put(nskb, len),
2276 len, 0); 2273 len, 0);
@@ -2280,8 +2277,6 @@ struct sk_buff *skb_segment(struct sk_buff *skb, int features)
2280 frag = skb_shinfo(nskb)->frags; 2277 frag = skb_shinfo(nskb)->frags;
2281 k = 0; 2278 k = 0;
2282 2279
2283 nskb->ip_summed = CHECKSUM_PARTIAL;
2284 nskb->csum = skb->csum;
2285 skb_copy_from_linear_data_offset(skb, offset, 2280 skb_copy_from_linear_data_offset(skb, offset,
2286 skb_put(nskb, hsize), hsize); 2281 skb_put(nskb, hsize), hsize);
2287 2282