aboutsummaryrefslogtreecommitdiffstats
path: root/net/core
diff options
context:
space:
mode:
Diffstat (limited to 'net/core')
-rw-r--r--net/core/dev.c270
-rw-r--r--net/core/dev_mcast.c129
-rw-r--r--net/core/gen_estimator.c3
-rw-r--r--net/core/netpoll.c18
-rw-r--r--net/core/pktgen.c249
-rw-r--r--net/core/rtnetlink.c470
-rw-r--r--net/core/skbuff.c13
-rw-r--r--net/core/sock.c42
8 files changed, 932 insertions, 262 deletions
diff --git a/net/core/dev.c b/net/core/dev.c
index 835202fb34c4..96443055324e 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -955,7 +955,7 @@ int dev_open(struct net_device *dev)
955 /* 955 /*
956 * Initialize multicasting status 956 * Initialize multicasting status
957 */ 957 */
958 dev_mc_upload(dev); 958 dev_set_rx_mode(dev);
959 959
960 /* 960 /*
961 * Wakeup transmit queue engine 961 * Wakeup transmit queue engine
@@ -1442,7 +1442,9 @@ gso:
1442 skb->next = nskb; 1442 skb->next = nskb;
1443 return rc; 1443 return rc;
1444 } 1444 }
1445 if (unlikely(netif_queue_stopped(dev) && skb->next)) 1445 if (unlikely((netif_queue_stopped(dev) ||
1446 netif_subqueue_stopped(dev, skb->queue_mapping)) &&
1447 skb->next))
1446 return NETDEV_TX_BUSY; 1448 return NETDEV_TX_BUSY;
1447 } while (skb->next); 1449 } while (skb->next);
1448 1450
@@ -1523,8 +1525,10 @@ int dev_queue_xmit(struct sk_buff *skb)
1523 skb_headroom(skb)); 1525 skb_headroom(skb));
1524 1526
1525 if (!(dev->features & NETIF_F_GEN_CSUM) && 1527 if (!(dev->features & NETIF_F_GEN_CSUM) &&
1526 (!(dev->features & NETIF_F_IP_CSUM) || 1528 !((dev->features & NETIF_F_IP_CSUM) &&
1527 skb->protocol != htons(ETH_P_IP))) 1529 skb->protocol == htons(ETH_P_IP)) &&
1530 !((dev->features & NETIF_F_IPV6_CSUM) &&
1531 skb->protocol == htons(ETH_P_IPV6)))
1528 if (skb_checksum_help(skb)) 1532 if (skb_checksum_help(skb))
1529 goto out_kfree_skb; 1533 goto out_kfree_skb;
1530 } 1534 }
@@ -1558,6 +1562,8 @@ gso:
1558 spin_lock(&dev->queue_lock); 1562 spin_lock(&dev->queue_lock);
1559 q = dev->qdisc; 1563 q = dev->qdisc;
1560 if (q->enqueue) { 1564 if (q->enqueue) {
1565 /* reset queue_mapping to zero */
1566 skb->queue_mapping = 0;
1561 rc = q->enqueue(skb, q); 1567 rc = q->enqueue(skb, q);
1562 qdisc_run(dev); 1568 qdisc_run(dev);
1563 spin_unlock(&dev->queue_lock); 1569 spin_unlock(&dev->queue_lock);
@@ -1587,7 +1593,8 @@ gso:
1587 1593
1588 HARD_TX_LOCK(dev, cpu); 1594 HARD_TX_LOCK(dev, cpu);
1589 1595
1590 if (!netif_queue_stopped(dev)) { 1596 if (!netif_queue_stopped(dev) &&
1597 !netif_subqueue_stopped(dev, skb->queue_mapping)) {
1591 rc = 0; 1598 rc = 0;
1592 if (!dev_hard_start_xmit(skb, dev)) { 1599 if (!dev_hard_start_xmit(skb, dev)) {
1593 HARD_TX_UNLOCK(dev); 1600 HARD_TX_UNLOCK(dev);
@@ -2510,17 +2517,7 @@ int netdev_set_master(struct net_device *slave, struct net_device *master)
2510 return 0; 2517 return 0;
2511} 2518}
2512 2519
2513/** 2520static void __dev_set_promiscuity(struct net_device *dev, int inc)
2514 * dev_set_promiscuity - update promiscuity count on a device
2515 * @dev: device
2516 * @inc: modifier
2517 *
2518 * Add or remove promiscuity from a device. While the count in the device
2519 * remains above zero the interface remains promiscuous. Once it hits zero
2520 * the device reverts back to normal filtering operation. A negative inc
2521 * value is used to drop promiscuity on the device.
2522 */
2523void dev_set_promiscuity(struct net_device *dev, int inc)
2524{ 2521{
2525 unsigned short old_flags = dev->flags; 2522 unsigned short old_flags = dev->flags;
2526 2523
@@ -2529,7 +2526,6 @@ void dev_set_promiscuity(struct net_device *dev, int inc)
2529 else 2526 else
2530 dev->flags |= IFF_PROMISC; 2527 dev->flags |= IFF_PROMISC;
2531 if (dev->flags != old_flags) { 2528 if (dev->flags != old_flags) {
2532 dev_mc_upload(dev);
2533 printk(KERN_INFO "device %s %s promiscuous mode\n", 2529 printk(KERN_INFO "device %s %s promiscuous mode\n",
2534 dev->name, (dev->flags & IFF_PROMISC) ? "entered" : 2530 dev->name, (dev->flags & IFF_PROMISC) ? "entered" :
2535 "left"); 2531 "left");
@@ -2543,6 +2539,25 @@ void dev_set_promiscuity(struct net_device *dev, int inc)
2543} 2539}
2544 2540
2545/** 2541/**
2542 * dev_set_promiscuity - update promiscuity count on a device
2543 * @dev: device
2544 * @inc: modifier
2545 *
2546 * Add or remove promiscuity from a device. While the count in the device
2547 * remains above zero the interface remains promiscuous. Once it hits zero
2548 * the device reverts back to normal filtering operation. A negative inc
2549 * value is used to drop promiscuity on the device.
2550 */
2551void dev_set_promiscuity(struct net_device *dev, int inc)
2552{
2553 unsigned short old_flags = dev->flags;
2554
2555 __dev_set_promiscuity(dev, inc);
2556 if (dev->flags != old_flags)
2557 dev_set_rx_mode(dev);
2558}
2559
2560/**
2546 * dev_set_allmulti - update allmulti count on a device 2561 * dev_set_allmulti - update allmulti count on a device
2547 * @dev: device 2562 * @dev: device
2548 * @inc: modifier 2563 * @inc: modifier
@@ -2562,7 +2577,176 @@ void dev_set_allmulti(struct net_device *dev, int inc)
2562 if ((dev->allmulti += inc) == 0) 2577 if ((dev->allmulti += inc) == 0)
2563 dev->flags &= ~IFF_ALLMULTI; 2578 dev->flags &= ~IFF_ALLMULTI;
2564 if (dev->flags ^ old_flags) 2579 if (dev->flags ^ old_flags)
2565 dev_mc_upload(dev); 2580 dev_set_rx_mode(dev);
2581}
2582
2583/*
2584 * Upload unicast and multicast address lists to device and
2585 * configure RX filtering. When the device doesn't support unicast
2586 * filtering it is put in promiscous mode while unicast addresses
2587 * are present.
2588 */
2589void __dev_set_rx_mode(struct net_device *dev)
2590{
2591 /* dev_open will call this function so the list will stay sane. */
2592 if (!(dev->flags&IFF_UP))
2593 return;
2594
2595 if (!netif_device_present(dev))
2596 return;
2597
2598 if (dev->set_rx_mode)
2599 dev->set_rx_mode(dev);
2600 else {
2601 /* Unicast addresses changes may only happen under the rtnl,
2602 * therefore calling __dev_set_promiscuity here is safe.
2603 */
2604 if (dev->uc_count > 0 && !dev->uc_promisc) {
2605 __dev_set_promiscuity(dev, 1);
2606 dev->uc_promisc = 1;
2607 } else if (dev->uc_count == 0 && dev->uc_promisc) {
2608 __dev_set_promiscuity(dev, -1);
2609 dev->uc_promisc = 0;
2610 }
2611
2612 if (dev->set_multicast_list)
2613 dev->set_multicast_list(dev);
2614 }
2615}
2616
2617void dev_set_rx_mode(struct net_device *dev)
2618{
2619 netif_tx_lock_bh(dev);
2620 __dev_set_rx_mode(dev);
2621 netif_tx_unlock_bh(dev);
2622}
2623
2624int __dev_addr_delete(struct dev_addr_list **list, int *count,
2625 void *addr, int alen, int glbl)
2626{
2627 struct dev_addr_list *da;
2628
2629 for (; (da = *list) != NULL; list = &da->next) {
2630 if (memcmp(da->da_addr, addr, da->da_addrlen) == 0 &&
2631 alen == da->da_addrlen) {
2632 if (glbl) {
2633 int old_glbl = da->da_gusers;
2634 da->da_gusers = 0;
2635 if (old_glbl == 0)
2636 break;
2637 }
2638 if (--da->da_users)
2639 return 0;
2640
2641 *list = da->next;
2642 kfree(da);
2643 (*count)--;
2644 return 0;
2645 }
2646 }
2647 return -ENOENT;
2648}
2649
2650int __dev_addr_add(struct dev_addr_list **list, int *count,
2651 void *addr, int alen, int glbl)
2652{
2653 struct dev_addr_list *da;
2654
2655 for (da = *list; da != NULL; da = da->next) {
2656 if (memcmp(da->da_addr, addr, da->da_addrlen) == 0 &&
2657 da->da_addrlen == alen) {
2658 if (glbl) {
2659 int old_glbl = da->da_gusers;
2660 da->da_gusers = 1;
2661 if (old_glbl)
2662 return 0;
2663 }
2664 da->da_users++;
2665 return 0;
2666 }
2667 }
2668
2669 da = kmalloc(sizeof(*da), GFP_ATOMIC);
2670 if (da == NULL)
2671 return -ENOMEM;
2672 memcpy(da->da_addr, addr, alen);
2673 da->da_addrlen = alen;
2674 da->da_users = 1;
2675 da->da_gusers = glbl ? 1 : 0;
2676 da->next = *list;
2677 *list = da;
2678 (*count)++;
2679 return 0;
2680}
2681
2682void __dev_addr_discard(struct dev_addr_list **list)
2683{
2684 struct dev_addr_list *tmp;
2685
2686 while (*list != NULL) {
2687 tmp = *list;
2688 *list = tmp->next;
2689 if (tmp->da_users > tmp->da_gusers)
2690 printk("__dev_addr_discard: address leakage! "
2691 "da_users=%d\n", tmp->da_users);
2692 kfree(tmp);
2693 }
2694}
2695
2696/**
2697 * dev_unicast_delete - Release secondary unicast address.
2698 * @dev: device
2699 *
2700 * Release reference to a secondary unicast address and remove it
2701 * from the device if the reference count drop to zero.
2702 *
2703 * The caller must hold the rtnl_mutex.
2704 */
2705int dev_unicast_delete(struct net_device *dev, void *addr, int alen)
2706{
2707 int err;
2708
2709 ASSERT_RTNL();
2710
2711 netif_tx_lock_bh(dev);
2712 err = __dev_addr_delete(&dev->uc_list, &dev->uc_count, addr, alen, 0);
2713 if (!err)
2714 __dev_set_rx_mode(dev);
2715 netif_tx_unlock_bh(dev);
2716 return err;
2717}
2718EXPORT_SYMBOL(dev_unicast_delete);
2719
2720/**
2721 * dev_unicast_add - add a secondary unicast address
2722 * @dev: device
2723 *
2724 * Add a secondary unicast address to the device or increase
2725 * the reference count if it already exists.
2726 *
2727 * The caller must hold the rtnl_mutex.
2728 */
2729int dev_unicast_add(struct net_device *dev, void *addr, int alen)
2730{
2731 int err;
2732
2733 ASSERT_RTNL();
2734
2735 netif_tx_lock_bh(dev);
2736 err = __dev_addr_add(&dev->uc_list, &dev->uc_count, addr, alen, 0);
2737 if (!err)
2738 __dev_set_rx_mode(dev);
2739 netif_tx_unlock_bh(dev);
2740 return err;
2741}
2742EXPORT_SYMBOL(dev_unicast_add);
2743
2744static void dev_unicast_discard(struct net_device *dev)
2745{
2746 netif_tx_lock_bh(dev);
2747 __dev_addr_discard(&dev->uc_list);
2748 dev->uc_count = 0;
2749 netif_tx_unlock_bh(dev);
2566} 2750}
2567 2751
2568unsigned dev_get_flags(const struct net_device *dev) 2752unsigned dev_get_flags(const struct net_device *dev)
@@ -2608,7 +2792,7 @@ int dev_change_flags(struct net_device *dev, unsigned flags)
2608 * Load in the correct multicast list now the flags have changed. 2792 * Load in the correct multicast list now the flags have changed.
2609 */ 2793 */
2610 2794
2611 dev_mc_upload(dev); 2795 dev_set_rx_mode(dev);
2612 2796
2613 /* 2797 /*
2614 * Have we downed the interface. We handle IFF_UP ourselves 2798 * Have we downed the interface. We handle IFF_UP ourselves
@@ -2621,7 +2805,7 @@ int dev_change_flags(struct net_device *dev, unsigned flags)
2621 ret = ((old_flags & IFF_UP) ? dev_close : dev_open)(dev); 2805 ret = ((old_flags & IFF_UP) ? dev_close : dev_open)(dev);
2622 2806
2623 if (!ret) 2807 if (!ret)
2624 dev_mc_upload(dev); 2808 dev_set_rx_mode(dev);
2625 } 2809 }
2626 2810
2627 if (dev->flags & IFF_UP && 2811 if (dev->flags & IFF_UP &&
@@ -3121,6 +3305,22 @@ int register_netdevice(struct net_device *dev)
3121 } 3305 }
3122 } 3306 }
3123 3307
3308 /* Fix illegal checksum combinations */
3309 if ((dev->features & NETIF_F_HW_CSUM) &&
3310 (dev->features & (NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM))) {
3311 printk(KERN_NOTICE "%s: mixed HW and IP checksum settings.\n",
3312 dev->name);
3313 dev->features &= ~(NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM);
3314 }
3315
3316 if ((dev->features & NETIF_F_NO_CSUM) &&
3317 (dev->features & (NETIF_F_HW_CSUM|NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM))) {
3318 printk(KERN_NOTICE "%s: mixed no checksumming and other settings.\n",
3319 dev->name);
3320 dev->features &= ~(NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM|NETIF_F_HW_CSUM);
3321 }
3322
3323
3124 /* Fix illegal SG+CSUM combinations. */ 3324 /* Fix illegal SG+CSUM combinations. */
3125 if ((dev->features & NETIF_F_SG) && 3325 if ((dev->features & NETIF_F_SG) &&
3126 !(dev->features & NETIF_F_ALL_CSUM)) { 3326 !(dev->features & NETIF_F_ALL_CSUM)) {
@@ -3357,16 +3557,18 @@ static struct net_device_stats *internal_stats(struct net_device *dev)
3357} 3557}
3358 3558
3359/** 3559/**
3360 * alloc_netdev - allocate network device 3560 * alloc_netdev_mq - allocate network device
3361 * @sizeof_priv: size of private data to allocate space for 3561 * @sizeof_priv: size of private data to allocate space for
3362 * @name: device name format string 3562 * @name: device name format string
3363 * @setup: callback to initialize device 3563 * @setup: callback to initialize device
3564 * @queue_count: the number of subqueues to allocate
3364 * 3565 *
3365 * Allocates a struct net_device with private data area for driver use 3566 * Allocates a struct net_device with private data area for driver use
3366 * and performs basic initialization. 3567 * and performs basic initialization. Also allocates subquue structs
3568 * for each queue on the device at the end of the netdevice.
3367 */ 3569 */
3368struct net_device *alloc_netdev(int sizeof_priv, const char *name, 3570struct net_device *alloc_netdev_mq(int sizeof_priv, const char *name,
3369 void (*setup)(struct net_device *)) 3571 void (*setup)(struct net_device *), unsigned int queue_count)
3370{ 3572{
3371 void *p; 3573 void *p;
3372 struct net_device *dev; 3574 struct net_device *dev;
@@ -3375,7 +3577,9 @@ struct net_device *alloc_netdev(int sizeof_priv, const char *name,
3375 BUG_ON(strlen(name) >= sizeof(dev->name)); 3577 BUG_ON(strlen(name) >= sizeof(dev->name));
3376 3578
3377 /* ensure 32-byte alignment of both the device and private area */ 3579 /* ensure 32-byte alignment of both the device and private area */
3378 alloc_size = (sizeof(*dev) + NETDEV_ALIGN_CONST) & ~NETDEV_ALIGN_CONST; 3580 alloc_size = (sizeof(*dev) + NETDEV_ALIGN_CONST +
3581 (sizeof(struct net_device_subqueue) * queue_count)) &
3582 ~NETDEV_ALIGN_CONST;
3379 alloc_size += sizeof_priv + NETDEV_ALIGN_CONST; 3583 alloc_size += sizeof_priv + NETDEV_ALIGN_CONST;
3380 3584
3381 p = kzalloc(alloc_size, GFP_KERNEL); 3585 p = kzalloc(alloc_size, GFP_KERNEL);
@@ -3388,15 +3592,22 @@ struct net_device *alloc_netdev(int sizeof_priv, const char *name,
3388 (((long)p + NETDEV_ALIGN_CONST) & ~NETDEV_ALIGN_CONST); 3592 (((long)p + NETDEV_ALIGN_CONST) & ~NETDEV_ALIGN_CONST);
3389 dev->padded = (char *)dev - (char *)p; 3593 dev->padded = (char *)dev - (char *)p;
3390 3594
3391 if (sizeof_priv) 3595 if (sizeof_priv) {
3392 dev->priv = netdev_priv(dev); 3596 dev->priv = ((char *)dev +
3597 ((sizeof(struct net_device) +
3598 (sizeof(struct net_device_subqueue) *
3599 queue_count) + NETDEV_ALIGN_CONST)
3600 & ~NETDEV_ALIGN_CONST));
3601 }
3602
3603 dev->egress_subqueue_count = queue_count;
3393 3604
3394 dev->get_stats = internal_stats; 3605 dev->get_stats = internal_stats;
3395 setup(dev); 3606 setup(dev);
3396 strcpy(dev->name, name); 3607 strcpy(dev->name, name);
3397 return dev; 3608 return dev;
3398} 3609}
3399EXPORT_SYMBOL(alloc_netdev); 3610EXPORT_SYMBOL(alloc_netdev_mq);
3400 3611
3401/** 3612/**
3402 * free_netdev - free network device 3613 * free_netdev - free network device
@@ -3485,8 +3696,9 @@ void unregister_netdevice(struct net_device *dev)
3485 raw_notifier_call_chain(&netdev_chain, NETDEV_UNREGISTER, dev); 3696 raw_notifier_call_chain(&netdev_chain, NETDEV_UNREGISTER, dev);
3486 3697
3487 /* 3698 /*
3488 * Flush the multicast chain 3699 * Flush the unicast and multicast chains
3489 */ 3700 */
3701 dev_unicast_discard(dev);
3490 dev_mc_discard(dev); 3702 dev_mc_discard(dev);
3491 3703
3492 if (dev->uninit) 3704 if (dev->uninit)
diff --git a/net/core/dev_mcast.c b/net/core/dev_mcast.c
index 5a54053386c8..aa38100601fb 100644
--- a/net/core/dev_mcast.c
+++ b/net/core/dev_mcast.c
@@ -64,85 +64,24 @@
64 */ 64 */
65 65
66/* 66/*
67 * Update the multicast list into the physical NIC controller.
68 */
69
70static void __dev_mc_upload(struct net_device *dev)
71{
72 /* Don't do anything till we up the interface
73 * [dev_open will call this function so the list will
74 * stay sane]
75 */
76
77 if (!(dev->flags&IFF_UP))
78 return;
79
80 /*
81 * Devices with no set multicast or which have been
82 * detached don't get set.
83 */
84
85 if (dev->set_multicast_list == NULL ||
86 !netif_device_present(dev))
87 return;
88
89 dev->set_multicast_list(dev);
90}
91
92void dev_mc_upload(struct net_device *dev)
93{
94 netif_tx_lock_bh(dev);
95 __dev_mc_upload(dev);
96 netif_tx_unlock_bh(dev);
97}
98
99/*
100 * Delete a device level multicast 67 * Delete a device level multicast
101 */ 68 */
102 69
103int dev_mc_delete(struct net_device *dev, void *addr, int alen, int glbl) 70int dev_mc_delete(struct net_device *dev, void *addr, int alen, int glbl)
104{ 71{
105 int err = 0; 72 int err;
106 struct dev_mc_list *dmi, **dmip;
107 73
108 netif_tx_lock_bh(dev); 74 netif_tx_lock_bh(dev);
109 75 err = __dev_addr_delete(&dev->mc_list, &dev->mc_count,
110 for (dmip = &dev->mc_list; (dmi = *dmip) != NULL; dmip = &dmi->next) { 76 addr, alen, glbl);
77 if (!err) {
111 /* 78 /*
112 * Find the entry we want to delete. The device could 79 * We have altered the list, so the card
113 * have variable length entries so check these too. 80 * loaded filter is now wrong. Fix it
114 */ 81 */
115 if (memcmp(dmi->dmi_addr, addr, dmi->dmi_addrlen) == 0 &&
116 alen == dmi->dmi_addrlen) {
117 if (glbl) {
118 int old_glbl = dmi->dmi_gusers;
119 dmi->dmi_gusers = 0;
120 if (old_glbl == 0)
121 break;
122 }
123 if (--dmi->dmi_users)
124 goto done;
125 82
126 /* 83 __dev_set_rx_mode(dev);
127 * Last user. So delete the entry.
128 */
129 *dmip = dmi->next;
130 dev->mc_count--;
131
132 kfree(dmi);
133
134 /*
135 * We have altered the list, so the card
136 * loaded filter is now wrong. Fix it
137 */
138 __dev_mc_upload(dev);
139
140 netif_tx_unlock_bh(dev);
141 return 0;
142 }
143 } 84 }
144 err = -ENOENT;
145done:
146 netif_tx_unlock_bh(dev); 85 netif_tx_unlock_bh(dev);
147 return err; 86 return err;
148} 87}
@@ -153,46 +92,13 @@ done:
153 92
154int dev_mc_add(struct net_device *dev, void *addr, int alen, int glbl) 93int dev_mc_add(struct net_device *dev, void *addr, int alen, int glbl)
155{ 94{
156 int err = 0; 95 int err;
157 struct dev_mc_list *dmi, *dmi1;
158
159 dmi1 = kmalloc(sizeof(*dmi), GFP_ATOMIC);
160 96
161 netif_tx_lock_bh(dev); 97 netif_tx_lock_bh(dev);
162 for (dmi = dev->mc_list; dmi != NULL; dmi = dmi->next) { 98 err = __dev_addr_add(&dev->mc_list, &dev->mc_count, addr, alen, glbl);
163 if (memcmp(dmi->dmi_addr, addr, dmi->dmi_addrlen) == 0 && 99 if (!err)
164 dmi->dmi_addrlen == alen) { 100 __dev_set_rx_mode(dev);
165 if (glbl) {
166 int old_glbl = dmi->dmi_gusers;
167 dmi->dmi_gusers = 1;
168 if (old_glbl)
169 goto done;
170 }
171 dmi->dmi_users++;
172 goto done;
173 }
174 }
175
176 if ((dmi = dmi1) == NULL) {
177 netif_tx_unlock_bh(dev);
178 return -ENOMEM;
179 }
180 memcpy(dmi->dmi_addr, addr, alen);
181 dmi->dmi_addrlen = alen;
182 dmi->next = dev->mc_list;
183 dmi->dmi_users = 1;
184 dmi->dmi_gusers = glbl ? 1 : 0;
185 dev->mc_list = dmi;
186 dev->mc_count++;
187
188 __dev_mc_upload(dev);
189
190 netif_tx_unlock_bh(dev);
191 return 0;
192
193done:
194 netif_tx_unlock_bh(dev); 101 netif_tx_unlock_bh(dev);
195 kfree(dmi1);
196 return err; 102 return err;
197} 103}
198 104
@@ -203,16 +109,8 @@ done:
203void dev_mc_discard(struct net_device *dev) 109void dev_mc_discard(struct net_device *dev)
204{ 110{
205 netif_tx_lock_bh(dev); 111 netif_tx_lock_bh(dev);
206 112 __dev_addr_discard(&dev->mc_list);
207 while (dev->mc_list != NULL) {
208 struct dev_mc_list *tmp = dev->mc_list;
209 dev->mc_list = tmp->next;
210 if (tmp->dmi_users > tmp->dmi_gusers)
211 printk("dev_mc_discard: multicast leakage! dmi_users=%d\n", tmp->dmi_users);
212 kfree(tmp);
213 }
214 dev->mc_count = 0; 113 dev->mc_count = 0;
215
216 netif_tx_unlock_bh(dev); 114 netif_tx_unlock_bh(dev);
217} 115}
218 116
@@ -244,7 +142,7 @@ static void dev_mc_seq_stop(struct seq_file *seq, void *v)
244 142
245static int dev_mc_seq_show(struct seq_file *seq, void *v) 143static int dev_mc_seq_show(struct seq_file *seq, void *v)
246{ 144{
247 struct dev_mc_list *m; 145 struct dev_addr_list *m;
248 struct net_device *dev = v; 146 struct net_device *dev = v;
249 147
250 netif_tx_lock_bh(dev); 148 netif_tx_lock_bh(dev);
@@ -292,4 +190,3 @@ void __init dev_mcast_init(void)
292 190
293EXPORT_SYMBOL(dev_mc_add); 191EXPORT_SYMBOL(dev_mc_add);
294EXPORT_SYMBOL(dev_mc_delete); 192EXPORT_SYMBOL(dev_mc_delete);
295EXPORT_SYMBOL(dev_mc_upload);
diff --git a/net/core/gen_estimator.c b/net/core/gen_estimator.c
index 17daf4c9f793..cc84d8d8a3c7 100644
--- a/net/core/gen_estimator.c
+++ b/net/core/gen_estimator.c
@@ -128,7 +128,8 @@ static void est_timer(unsigned long arg)
128 spin_unlock(e->stats_lock); 128 spin_unlock(e->stats_lock);
129 } 129 }
130 130
131 mod_timer(&elist[idx].timer, jiffies + ((HZ<<idx)/4)); 131 if (elist[idx].list != NULL)
132 mod_timer(&elist[idx].timer, jiffies + ((HZ<<idx)/4));
132 read_unlock(&est_lock); 133 read_unlock(&est_lock);
133} 134}
134 135
diff --git a/net/core/netpoll.c b/net/core/netpoll.c
index a0efdd7a6b37..d1264e9a50a8 100644
--- a/net/core/netpoll.c
+++ b/net/core/netpoll.c
@@ -66,8 +66,9 @@ static void queue_process(struct work_struct *work)
66 66
67 local_irq_save(flags); 67 local_irq_save(flags);
68 netif_tx_lock(dev); 68 netif_tx_lock(dev);
69 if (netif_queue_stopped(dev) || 69 if ((netif_queue_stopped(dev) ||
70 dev->hard_start_xmit(skb, dev) != NETDEV_TX_OK) { 70 netif_subqueue_stopped(dev, skb->queue_mapping)) ||
71 dev->hard_start_xmit(skb, dev) != NETDEV_TX_OK) {
71 skb_queue_head(&npinfo->txq, skb); 72 skb_queue_head(&npinfo->txq, skb);
72 netif_tx_unlock(dev); 73 netif_tx_unlock(dev);
73 local_irq_restore(flags); 74 local_irq_restore(flags);
@@ -123,6 +124,13 @@ static void poll_napi(struct netpoll *np)
123 if (test_bit(__LINK_STATE_RX_SCHED, &np->dev->state) && 124 if (test_bit(__LINK_STATE_RX_SCHED, &np->dev->state) &&
124 npinfo->poll_owner != smp_processor_id() && 125 npinfo->poll_owner != smp_processor_id() &&
125 spin_trylock(&npinfo->poll_lock)) { 126 spin_trylock(&npinfo->poll_lock)) {
127 /* When calling dev->poll from poll_napi, we may end up in
128 * netif_rx_complete. However, only the CPU to which the
129 * device was queued is allowed to remove it from poll_list.
130 * Setting POLL_LIST_FROZEN tells netif_rx_complete
131 * to leave the NAPI state alone.
132 */
133 set_bit(__LINK_STATE_POLL_LIST_FROZEN, &np->dev->state);
126 npinfo->rx_flags |= NETPOLL_RX_DROP; 134 npinfo->rx_flags |= NETPOLL_RX_DROP;
127 atomic_inc(&trapped); 135 atomic_inc(&trapped);
128 136
@@ -130,6 +138,7 @@ static void poll_napi(struct netpoll *np)
130 138
131 atomic_dec(&trapped); 139 atomic_dec(&trapped);
132 npinfo->rx_flags &= ~NETPOLL_RX_DROP; 140 npinfo->rx_flags &= ~NETPOLL_RX_DROP;
141 clear_bit(__LINK_STATE_POLL_LIST_FROZEN, &np->dev->state);
133 spin_unlock(&npinfo->poll_lock); 142 spin_unlock(&npinfo->poll_lock);
134 } 143 }
135} 144}
@@ -254,7 +263,8 @@ static void netpoll_send_skb(struct netpoll *np, struct sk_buff *skb)
254 for (tries = jiffies_to_usecs(1)/USEC_PER_POLL; 263 for (tries = jiffies_to_usecs(1)/USEC_PER_POLL;
255 tries > 0; --tries) { 264 tries > 0; --tries) {
256 if (netif_tx_trylock(dev)) { 265 if (netif_tx_trylock(dev)) {
257 if (!netif_queue_stopped(dev)) 266 if (!netif_queue_stopped(dev) &&
267 !netif_subqueue_stopped(dev, skb->queue_mapping))
258 status = dev->hard_start_xmit(skb, dev); 268 status = dev->hard_start_xmit(skb, dev);
259 netif_tx_unlock(dev); 269 netif_tx_unlock(dev);
260 270
@@ -781,7 +791,6 @@ void netpoll_cleanup(struct netpoll *np)
781 spin_unlock_irqrestore(&npinfo->rx_lock, flags); 791 spin_unlock_irqrestore(&npinfo->rx_lock, flags);
782 } 792 }
783 793
784 np->dev->npinfo = NULL;
785 if (atomic_dec_and_test(&npinfo->refcnt)) { 794 if (atomic_dec_and_test(&npinfo->refcnt)) {
786 skb_queue_purge(&npinfo->arp_tx); 795 skb_queue_purge(&npinfo->arp_tx);
787 skb_queue_purge(&npinfo->txq); 796 skb_queue_purge(&npinfo->txq);
@@ -794,6 +803,7 @@ void netpoll_cleanup(struct netpoll *np)
794 kfree_skb(skb); 803 kfree_skb(skb);
795 } 804 }
796 kfree(npinfo); 805 kfree(npinfo);
806 np->dev->npinfo = NULL;
797 } 807 }
798 } 808 }
799 809
diff --git a/net/core/pktgen.c b/net/core/pktgen.c
index 9cd3a1cb60ef..75215331b045 100644
--- a/net/core/pktgen.c
+++ b/net/core/pktgen.c
@@ -152,6 +152,9 @@
152#include <net/checksum.h> 152#include <net/checksum.h>
153#include <net/ipv6.h> 153#include <net/ipv6.h>
154#include <net/addrconf.h> 154#include <net/addrconf.h>
155#ifdef CONFIG_XFRM
156#include <net/xfrm.h>
157#endif
155#include <asm/byteorder.h> 158#include <asm/byteorder.h>
156#include <linux/rcupdate.h> 159#include <linux/rcupdate.h>
157#include <asm/bitops.h> 160#include <asm/bitops.h>
@@ -181,6 +184,8 @@
181#define F_MPLS_RND (1<<8) /* Random MPLS labels */ 184#define F_MPLS_RND (1<<8) /* Random MPLS labels */
182#define F_VID_RND (1<<9) /* Random VLAN ID */ 185#define F_VID_RND (1<<9) /* Random VLAN ID */
183#define F_SVID_RND (1<<10) /* Random SVLAN ID */ 186#define F_SVID_RND (1<<10) /* Random SVLAN ID */
187#define F_FLOW_SEQ (1<<11) /* Sequential flows */
188#define F_IPSEC_ON (1<<12) /* ipsec on for flows */
184 189
185/* Thread control flag bits */ 190/* Thread control flag bits */
186#define T_TERMINATE (1<<0) 191#define T_TERMINATE (1<<0)
@@ -207,8 +212,15 @@ static struct proc_dir_entry *pg_proc_dir = NULL;
207struct flow_state { 212struct flow_state {
208 __be32 cur_daddr; 213 __be32 cur_daddr;
209 int count; 214 int count;
215#ifdef CONFIG_XFRM
216 struct xfrm_state *x;
217#endif
218 __u32 flags;
210}; 219};
211 220
221/* flow flag bits */
222#define F_INIT (1<<0) /* flow has been initialized */
223
212struct pktgen_dev { 224struct pktgen_dev {
213 /* 225 /*
214 * Try to keep frequent/infrequent used vars. separated. 226 * Try to keep frequent/infrequent used vars. separated.
@@ -228,6 +240,7 @@ struct pktgen_dev {
228 240
229 int min_pkt_size; /* = ETH_ZLEN; */ 241 int min_pkt_size; /* = ETH_ZLEN; */
230 int max_pkt_size; /* = ETH_ZLEN; */ 242 int max_pkt_size; /* = ETH_ZLEN; */
243 int pkt_overhead; /* overhead for MPLS, VLANs, IPSEC etc */
231 int nfrags; 244 int nfrags;
232 __u32 delay_us; /* Default delay */ 245 __u32 delay_us; /* Default delay */
233 __u32 delay_ns; 246 __u32 delay_ns;
@@ -341,7 +354,11 @@ struct pktgen_dev {
341 unsigned cflows; /* Concurrent flows (config) */ 354 unsigned cflows; /* Concurrent flows (config) */
342 unsigned lflow; /* Flow length (config) */ 355 unsigned lflow; /* Flow length (config) */
343 unsigned nflows; /* accumulated flows (stats) */ 356 unsigned nflows; /* accumulated flows (stats) */
344 357 unsigned curfl; /* current sequenced flow (state)*/
358#ifdef CONFIG_XFRM
359 __u8 ipsmode; /* IPSEC mode (config) */
360 __u8 ipsproto; /* IPSEC type (config) */
361#endif
345 char result[512]; 362 char result[512];
346}; 363};
347 364
@@ -690,6 +707,18 @@ static int pktgen_if_show(struct seq_file *seq, void *v)
690 if (pkt_dev->flags & F_MPLS_RND) 707 if (pkt_dev->flags & F_MPLS_RND)
691 seq_printf(seq, "MPLS_RND "); 708 seq_printf(seq, "MPLS_RND ");
692 709
710 if (pkt_dev->cflows) {
711 if (pkt_dev->flags & F_FLOW_SEQ)
712 seq_printf(seq, "FLOW_SEQ "); /*in sequence flows*/
713 else
714 seq_printf(seq, "FLOW_RND ");
715 }
716
717#ifdef CONFIG_XFRM
718 if (pkt_dev->flags & F_IPSEC_ON)
719 seq_printf(seq, "IPSEC ");
720#endif
721
693 if (pkt_dev->flags & F_MACSRC_RND) 722 if (pkt_dev->flags & F_MACSRC_RND)
694 seq_printf(seq, "MACSRC_RND "); 723 seq_printf(seq, "MACSRC_RND ");
695 724
@@ -1181,6 +1210,14 @@ static ssize_t pktgen_if_write(struct file *file,
1181 else if (strcmp(f, "!SVID_RND") == 0) 1210 else if (strcmp(f, "!SVID_RND") == 0)
1182 pkt_dev->flags &= ~F_SVID_RND; 1211 pkt_dev->flags &= ~F_SVID_RND;
1183 1212
1213 else if (strcmp(f, "FLOW_SEQ") == 0)
1214 pkt_dev->flags |= F_FLOW_SEQ;
1215
1216#ifdef CONFIG_XFRM
1217 else if (strcmp(f, "IPSEC") == 0)
1218 pkt_dev->flags |= F_IPSEC_ON;
1219#endif
1220
1184 else if (strcmp(f, "!IPV6") == 0) 1221 else if (strcmp(f, "!IPV6") == 0)
1185 pkt_dev->flags &= ~F_IPV6; 1222 pkt_dev->flags &= ~F_IPV6;
1186 1223
@@ -1189,7 +1226,7 @@ static ssize_t pktgen_if_write(struct file *file,
1189 "Flag -:%s:- unknown\nAvailable flags, (prepend ! to un-set flag):\n%s", 1226 "Flag -:%s:- unknown\nAvailable flags, (prepend ! to un-set flag):\n%s",
1190 f, 1227 f,
1191 "IPSRC_RND, IPDST_RND, UDPSRC_RND, UDPDST_RND, " 1228 "IPSRC_RND, IPDST_RND, UDPSRC_RND, UDPDST_RND, "
1192 "MACSRC_RND, MACDST_RND, TXSIZE_RND, IPV6, MPLS_RND, VID_RND, SVID_RND\n"); 1229 "MACSRC_RND, MACDST_RND, TXSIZE_RND, IPV6, MPLS_RND, VID_RND, SVID_RND, FLOW_SEQ, IPSEC\n");
1193 return count; 1230 return count;
1194 } 1231 }
1195 sprintf(pg_result, "OK: flags=0x%x", pkt_dev->flags); 1232 sprintf(pg_result, "OK: flags=0x%x", pkt_dev->flags);
@@ -2075,6 +2112,70 @@ static void spin(struct pktgen_dev *pkt_dev, __u64 spin_until_us)
2075 pkt_dev->idle_acc += now - start; 2112 pkt_dev->idle_acc += now - start;
2076} 2113}
2077 2114
2115static inline void set_pkt_overhead(struct pktgen_dev *pkt_dev)
2116{
2117 pkt_dev->pkt_overhead = 0;
2118 pkt_dev->pkt_overhead += pkt_dev->nr_labels*sizeof(u32);
2119 pkt_dev->pkt_overhead += VLAN_TAG_SIZE(pkt_dev);
2120 pkt_dev->pkt_overhead += SVLAN_TAG_SIZE(pkt_dev);
2121}
2122
2123static inline int f_seen(struct pktgen_dev *pkt_dev, int flow)
2124{
2125
2126 if (pkt_dev->flows[flow].flags & F_INIT)
2127 return 1;
2128 else
2129 return 0;
2130}
2131
2132static inline int f_pick(struct pktgen_dev *pkt_dev)
2133{
2134 int flow = pkt_dev->curfl;
2135
2136 if (pkt_dev->flags & F_FLOW_SEQ) {
2137 if (pkt_dev->flows[flow].count >= pkt_dev->lflow) {
2138 /* reset time */
2139 pkt_dev->flows[flow].count = 0;
2140 pkt_dev->curfl += 1;
2141 if (pkt_dev->curfl >= pkt_dev->cflows)
2142 pkt_dev->curfl = 0; /*reset */
2143 }
2144 } else {
2145 flow = random32() % pkt_dev->cflows;
2146
2147 if (pkt_dev->flows[flow].count > pkt_dev->lflow)
2148 pkt_dev->flows[flow].count = 0;
2149 }
2150
2151 return pkt_dev->curfl;
2152}
2153
2154
2155#ifdef CONFIG_XFRM
2156/* If there was already an IPSEC SA, we keep it as is, else
2157 * we go look for it ...
2158*/
2159inline
2160void get_ipsec_sa(struct pktgen_dev *pkt_dev, int flow)
2161{
2162 struct xfrm_state *x = pkt_dev->flows[flow].x;
2163 if (!x) {
2164 /*slow path: we dont already have xfrm_state*/
2165 x = xfrm_stateonly_find((xfrm_address_t *)&pkt_dev->cur_daddr,
2166 (xfrm_address_t *)&pkt_dev->cur_saddr,
2167 AF_INET,
2168 pkt_dev->ipsmode,
2169 pkt_dev->ipsproto, 0);
2170 if (x) {
2171 pkt_dev->flows[flow].x = x;
2172 set_pkt_overhead(pkt_dev);
2173 pkt_dev->pkt_overhead+=x->props.header_len;
2174 }
2175
2176 }
2177}
2178#endif
2078/* Increment/randomize headers according to flags and current values 2179/* Increment/randomize headers according to flags and current values
2079 * for IP src/dest, UDP src/dst port, MAC-Addr src/dst 2180 * for IP src/dest, UDP src/dst port, MAC-Addr src/dst
2080 */ 2181 */
@@ -2084,12 +2185,8 @@ static void mod_cur_headers(struct pktgen_dev *pkt_dev)
2084 __u32 imx; 2185 __u32 imx;
2085 int flow = 0; 2186 int flow = 0;
2086 2187
2087 if (pkt_dev->cflows) { 2188 if (pkt_dev->cflows)
2088 flow = random32() % pkt_dev->cflows; 2189 flow = f_pick(pkt_dev);
2089
2090 if (pkt_dev->flows[flow].count > pkt_dev->lflow)
2091 pkt_dev->flows[flow].count = 0;
2092 }
2093 2190
2094 /* Deal with source MAC */ 2191 /* Deal with source MAC */
2095 if (pkt_dev->src_mac_count > 1) { 2192 if (pkt_dev->src_mac_count > 1) {
@@ -2205,7 +2302,7 @@ static void mod_cur_headers(struct pktgen_dev *pkt_dev)
2205 pkt_dev->cur_saddr = htonl(t); 2302 pkt_dev->cur_saddr = htonl(t);
2206 } 2303 }
2207 2304
2208 if (pkt_dev->cflows && pkt_dev->flows[flow].count != 0) { 2305 if (pkt_dev->cflows && f_seen(pkt_dev, flow)) {
2209 pkt_dev->cur_daddr = pkt_dev->flows[flow].cur_daddr; 2306 pkt_dev->cur_daddr = pkt_dev->flows[flow].cur_daddr;
2210 } else { 2307 } else {
2211 imn = ntohl(pkt_dev->daddr_min); 2308 imn = ntohl(pkt_dev->daddr_min);
@@ -2235,8 +2332,13 @@ static void mod_cur_headers(struct pktgen_dev *pkt_dev)
2235 } 2332 }
2236 } 2333 }
2237 if (pkt_dev->cflows) { 2334 if (pkt_dev->cflows) {
2335 pkt_dev->flows[flow].flags |= F_INIT;
2238 pkt_dev->flows[flow].cur_daddr = 2336 pkt_dev->flows[flow].cur_daddr =
2239 pkt_dev->cur_daddr; 2337 pkt_dev->cur_daddr;
2338#ifdef CONFIG_XFRM
2339 if (pkt_dev->flags & F_IPSEC_ON)
2340 get_ipsec_sa(pkt_dev, flow);
2341#endif
2240 pkt_dev->nflows++; 2342 pkt_dev->nflows++;
2241 } 2343 }
2242 } 2344 }
@@ -2277,6 +2379,91 @@ static void mod_cur_headers(struct pktgen_dev *pkt_dev)
2277 pkt_dev->flows[flow].count++; 2379 pkt_dev->flows[flow].count++;
2278} 2380}
2279 2381
2382
2383#ifdef CONFIG_XFRM
2384static int pktgen_output_ipsec(struct sk_buff *skb, struct pktgen_dev *pkt_dev)
2385{
2386 struct xfrm_state *x = pkt_dev->flows[pkt_dev->curfl].x;
2387 int err = 0;
2388 struct iphdr *iph;
2389
2390 if (!x)
2391 return 0;
2392 /* XXX: we dont support tunnel mode for now until
2393 * we resolve the dst issue */
2394 if (x->props.mode != XFRM_MODE_TRANSPORT)
2395 return 0;
2396
2397 spin_lock(&x->lock);
2398 iph = ip_hdr(skb);
2399
2400 err = x->mode->output(x, skb);
2401 if (err)
2402 goto error;
2403 err = x->type->output(x, skb);
2404 if (err)
2405 goto error;
2406
2407 x->curlft.bytes +=skb->len;
2408 x->curlft.packets++;
2409 spin_unlock(&x->lock);
2410
2411error:
2412 spin_unlock(&x->lock);
2413 return err;
2414}
2415
2416static inline void free_SAs(struct pktgen_dev *pkt_dev)
2417{
2418 if (pkt_dev->cflows) {
2419 /* let go of the SAs if we have them */
2420 int i = 0;
2421 for (; i < pkt_dev->nflows; i++){
2422 struct xfrm_state *x = pkt_dev->flows[i].x;
2423 if (x) {
2424 xfrm_state_put(x);
2425 pkt_dev->flows[i].x = NULL;
2426 }
2427 }
2428 }
2429}
2430
2431static inline int process_ipsec(struct pktgen_dev *pkt_dev,
2432 struct sk_buff *skb, __be16 protocol)
2433{
2434 if (pkt_dev->flags & F_IPSEC_ON) {
2435 struct xfrm_state *x = pkt_dev->flows[pkt_dev->curfl].x;
2436 int nhead = 0;
2437 if (x) {
2438 int ret;
2439 __u8 *eth;
2440 nhead = x->props.header_len - skb_headroom(skb);
2441 if (nhead >0) {
2442 ret = pskb_expand_head(skb, nhead, 0, GFP_ATOMIC);
2443 if (ret < 0) {
2444 printk("Error expanding ipsec packet %d\n",ret);
2445 return 0;
2446 }
2447 }
2448
2449 /* ipsec is not expecting ll header */
2450 skb_pull(skb, ETH_HLEN);
2451 ret = pktgen_output_ipsec(skb, pkt_dev);
2452 if (ret) {
2453 printk("Error creating ipsec packet %d\n",ret);
2454 kfree_skb(skb);
2455 return 0;
2456 }
2457 /* restore ll */
2458 eth = (__u8 *) skb_push(skb, ETH_HLEN);
2459 memcpy(eth, pkt_dev->hh, 12);
2460 *(u16 *) & eth[12] = protocol;
2461 }
2462 }
2463 return 1;
2464}
2465#endif
2466
2280static void mpls_push(__be32 *mpls, struct pktgen_dev *pkt_dev) 2467static void mpls_push(__be32 *mpls, struct pktgen_dev *pkt_dev)
2281{ 2468{
2282 unsigned i; 2469 unsigned i;
@@ -2323,9 +2510,7 @@ static struct sk_buff *fill_packet_ipv4(struct net_device *odev,
2323 2510
2324 datalen = (odev->hard_header_len + 16) & ~0xf; 2511 datalen = (odev->hard_header_len + 16) & ~0xf;
2325 skb = alloc_skb(pkt_dev->cur_pkt_size + 64 + datalen + 2512 skb = alloc_skb(pkt_dev->cur_pkt_size + 64 + datalen +
2326 pkt_dev->nr_labels*sizeof(u32) + 2513 pkt_dev->pkt_overhead, GFP_ATOMIC);
2327 VLAN_TAG_SIZE(pkt_dev) + SVLAN_TAG_SIZE(pkt_dev),
2328 GFP_ATOMIC);
2329 if (!skb) { 2514 if (!skb) {
2330 sprintf(pkt_dev->result, "No memory"); 2515 sprintf(pkt_dev->result, "No memory");
2331 return NULL; 2516 return NULL;
@@ -2368,7 +2553,7 @@ static struct sk_buff *fill_packet_ipv4(struct net_device *odev,
2368 2553
2369 /* Eth + IPh + UDPh + mpls */ 2554 /* Eth + IPh + UDPh + mpls */
2370 datalen = pkt_dev->cur_pkt_size - 14 - 20 - 8 - 2555 datalen = pkt_dev->cur_pkt_size - 14 - 20 - 8 -
2371 pkt_dev->nr_labels*sizeof(u32) - VLAN_TAG_SIZE(pkt_dev) - SVLAN_TAG_SIZE(pkt_dev); 2556 pkt_dev->pkt_overhead;
2372 if (datalen < sizeof(struct pktgen_hdr)) 2557 if (datalen < sizeof(struct pktgen_hdr))
2373 datalen = sizeof(struct pktgen_hdr); 2558 datalen = sizeof(struct pktgen_hdr);
2374 2559
@@ -2391,8 +2576,7 @@ static struct sk_buff *fill_packet_ipv4(struct net_device *odev,
2391 iph->check = ip_fast_csum((void *)iph, iph->ihl); 2576 iph->check = ip_fast_csum((void *)iph, iph->ihl);
2392 skb->protocol = protocol; 2577 skb->protocol = protocol;
2393 skb->mac_header = (skb->network_header - ETH_HLEN - 2578 skb->mac_header = (skb->network_header - ETH_HLEN -
2394 pkt_dev->nr_labels * sizeof(u32) - 2579 pkt_dev->pkt_overhead);
2395 VLAN_TAG_SIZE(pkt_dev) - SVLAN_TAG_SIZE(pkt_dev));
2396 skb->dev = odev; 2580 skb->dev = odev;
2397 skb->pkt_type = PACKET_HOST; 2581 skb->pkt_type = PACKET_HOST;
2398 2582
@@ -2463,6 +2647,11 @@ static struct sk_buff *fill_packet_ipv4(struct net_device *odev,
2463 pgh->tv_usec = htonl(timestamp.tv_usec); 2647 pgh->tv_usec = htonl(timestamp.tv_usec);
2464 } 2648 }
2465 2649
2650#ifdef CONFIG_XFRM
2651 if (!process_ipsec(pkt_dev, skb, protocol))
2652 return NULL;
2653#endif
2654
2466 return skb; 2655 return skb;
2467} 2656}
2468 2657
@@ -2662,9 +2851,7 @@ static struct sk_buff *fill_packet_ipv6(struct net_device *odev,
2662 mod_cur_headers(pkt_dev); 2851 mod_cur_headers(pkt_dev);
2663 2852
2664 skb = alloc_skb(pkt_dev->cur_pkt_size + 64 + 16 + 2853 skb = alloc_skb(pkt_dev->cur_pkt_size + 64 + 16 +
2665 pkt_dev->nr_labels*sizeof(u32) + 2854 pkt_dev->pkt_overhead, GFP_ATOMIC);
2666 VLAN_TAG_SIZE(pkt_dev) + SVLAN_TAG_SIZE(pkt_dev),
2667 GFP_ATOMIC);
2668 if (!skb) { 2855 if (!skb) {
2669 sprintf(pkt_dev->result, "No memory"); 2856 sprintf(pkt_dev->result, "No memory");
2670 return NULL; 2857 return NULL;
@@ -2708,7 +2895,7 @@ static struct sk_buff *fill_packet_ipv6(struct net_device *odev,
2708 /* Eth + IPh + UDPh + mpls */ 2895 /* Eth + IPh + UDPh + mpls */
2709 datalen = pkt_dev->cur_pkt_size - 14 - 2896 datalen = pkt_dev->cur_pkt_size - 14 -
2710 sizeof(struct ipv6hdr) - sizeof(struct udphdr) - 2897 sizeof(struct ipv6hdr) - sizeof(struct udphdr) -
2711 pkt_dev->nr_labels*sizeof(u32) - VLAN_TAG_SIZE(pkt_dev) - SVLAN_TAG_SIZE(pkt_dev); 2898 pkt_dev->pkt_overhead;
2712 2899
2713 if (datalen < sizeof(struct pktgen_hdr)) { 2900 if (datalen < sizeof(struct pktgen_hdr)) {
2714 datalen = sizeof(struct pktgen_hdr); 2901 datalen = sizeof(struct pktgen_hdr);
@@ -2738,8 +2925,7 @@ static struct sk_buff *fill_packet_ipv6(struct net_device *odev,
2738 ipv6_addr_copy(&iph->saddr, &pkt_dev->cur_in6_saddr); 2925 ipv6_addr_copy(&iph->saddr, &pkt_dev->cur_in6_saddr);
2739 2926
2740 skb->mac_header = (skb->network_header - ETH_HLEN - 2927 skb->mac_header = (skb->network_header - ETH_HLEN -
2741 pkt_dev->nr_labels * sizeof(u32) - 2928 pkt_dev->pkt_overhead);
2742 VLAN_TAG_SIZE(pkt_dev) - SVLAN_TAG_SIZE(pkt_dev));
2743 skb->protocol = protocol; 2929 skb->protocol = protocol;
2744 skb->dev = odev; 2930 skb->dev = odev;
2745 skb->pkt_type = PACKET_HOST; 2931 skb->pkt_type = PACKET_HOST;
@@ -2857,6 +3043,7 @@ static void pktgen_run(struct pktgen_thread *t)
2857 pkt_dev->started_at = getCurUs(); 3043 pkt_dev->started_at = getCurUs();
2858 pkt_dev->next_tx_us = getCurUs(); /* Transmit immediately */ 3044 pkt_dev->next_tx_us = getCurUs(); /* Transmit immediately */
2859 pkt_dev->next_tx_ns = 0; 3045 pkt_dev->next_tx_ns = 0;
3046 set_pkt_overhead(pkt_dev);
2860 3047
2861 strcpy(pkt_dev->result, "Starting"); 3048 strcpy(pkt_dev->result, "Starting");
2862 started++; 3049 started++;
@@ -3139,7 +3326,9 @@ static __inline__ void pktgen_xmit(struct pktgen_dev *pkt_dev)
3139 } 3326 }
3140 } 3327 }
3141 3328
3142 if (netif_queue_stopped(odev) || need_resched()) { 3329 if ((netif_queue_stopped(odev) ||
3330 netif_subqueue_stopped(odev, pkt_dev->skb->queue_mapping)) ||
3331 need_resched()) {
3143 idle_start = getCurUs(); 3332 idle_start = getCurUs();
3144 3333
3145 if (!netif_running(odev)) { 3334 if (!netif_running(odev)) {
@@ -3154,7 +3343,8 @@ static __inline__ void pktgen_xmit(struct pktgen_dev *pkt_dev)
3154 3343
3155 pkt_dev->idle_acc += getCurUs() - idle_start; 3344 pkt_dev->idle_acc += getCurUs() - idle_start;
3156 3345
3157 if (netif_queue_stopped(odev)) { 3346 if (netif_queue_stopped(odev) ||
3347 netif_subqueue_stopped(odev, pkt_dev->skb->queue_mapping)) {
3158 pkt_dev->next_tx_us = getCurUs(); /* TODO */ 3348 pkt_dev->next_tx_us = getCurUs(); /* TODO */
3159 pkt_dev->next_tx_ns = 0; 3349 pkt_dev->next_tx_ns = 0;
3160 goto out; /* Try the next interface */ 3350 goto out; /* Try the next interface */
@@ -3181,7 +3371,8 @@ static __inline__ void pktgen_xmit(struct pktgen_dev *pkt_dev)
3181 } 3371 }
3182 3372
3183 netif_tx_lock_bh(odev); 3373 netif_tx_lock_bh(odev);
3184 if (!netif_queue_stopped(odev)) { 3374 if (!netif_queue_stopped(odev) &&
3375 !netif_subqueue_stopped(odev, pkt_dev->skb->queue_mapping)) {
3185 3376
3186 atomic_inc(&(pkt_dev->skb->users)); 3377 atomic_inc(&(pkt_dev->skb->users));
3187 retry_now: 3378 retry_now:
@@ -3446,11 +3637,18 @@ static int pktgen_add_device(struct pktgen_thread *t, const char *ifname)
3446 } 3637 }
3447 pkt_dev->entry->proc_fops = &pktgen_if_fops; 3638 pkt_dev->entry->proc_fops = &pktgen_if_fops;
3448 pkt_dev->entry->data = pkt_dev; 3639 pkt_dev->entry->data = pkt_dev;
3640#ifdef CONFIG_XFRM
3641 pkt_dev->ipsmode = XFRM_MODE_TRANSPORT;
3642 pkt_dev->ipsproto = IPPROTO_ESP;
3643#endif
3449 3644
3450 return add_dev_to_thread(t, pkt_dev); 3645 return add_dev_to_thread(t, pkt_dev);
3451out2: 3646out2:
3452 dev_put(pkt_dev->odev); 3647 dev_put(pkt_dev->odev);
3453out1: 3648out1:
3649#ifdef CONFIG_XFRM
3650 free_SAs(pkt_dev);
3651#endif
3454 if (pkt_dev->flows) 3652 if (pkt_dev->flows)
3455 vfree(pkt_dev->flows); 3653 vfree(pkt_dev->flows);
3456 kfree(pkt_dev); 3654 kfree(pkt_dev);
@@ -3545,6 +3743,9 @@ static int pktgen_remove_device(struct pktgen_thread *t,
3545 if (pkt_dev->entry) 3743 if (pkt_dev->entry)
3546 remove_proc_entry(pkt_dev->entry->name, pg_proc_dir); 3744 remove_proc_entry(pkt_dev->entry->name, pg_proc_dir);
3547 3745
3746#ifdef CONFIG_XFRM
3747 free_SAs(pkt_dev);
3748#endif
3548 if (pkt_dev->flows) 3749 if (pkt_dev->flows)
3549 vfree(pkt_dev->flows); 3750 vfree(pkt_dev->flows);
3550 kfree(pkt_dev); 3751 kfree(pkt_dev);
diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
index 02e8bf084277..864cbdf31ed7 100644
--- a/net/core/rtnetlink.c
+++ b/net/core/rtnetlink.c
@@ -97,6 +97,19 @@ int rtattr_parse(struct rtattr *tb[], int maxattr, struct rtattr *rta, int len)
97 return 0; 97 return 0;
98} 98}
99 99
100int __rtattr_parse_nested_compat(struct rtattr *tb[], int maxattr,
101 struct rtattr *rta, int len)
102{
103 if (RTA_PAYLOAD(rta) < len)
104 return -1;
105 if (RTA_PAYLOAD(rta) >= RTA_ALIGN(len) + sizeof(struct rtattr)) {
106 rta = RTA_DATA(rta) + RTA_ALIGN(len);
107 return rtattr_parse_nested(tb, maxattr, rta);
108 }
109 memset(tb, 0, sizeof(struct rtattr *) * maxattr);
110 return 0;
111}
112
100static struct rtnl_link *rtnl_msg_handlers[NPROTO]; 113static struct rtnl_link *rtnl_msg_handlers[NPROTO];
101 114
102static inline int rtm_msgindex(int msgtype) 115static inline int rtm_msgindex(int msgtype)
@@ -243,6 +256,150 @@ void rtnl_unregister_all(int protocol)
243 256
244EXPORT_SYMBOL_GPL(rtnl_unregister_all); 257EXPORT_SYMBOL_GPL(rtnl_unregister_all);
245 258
259static LIST_HEAD(link_ops);
260
261/**
262 * __rtnl_link_register - Register rtnl_link_ops with rtnetlink.
263 * @ops: struct rtnl_link_ops * to register
264 *
265 * The caller must hold the rtnl_mutex. This function should be used
266 * by drivers that create devices during module initialization. It
267 * must be called before registering the devices.
268 *
269 * Returns 0 on success or a negative error code.
270 */
271int __rtnl_link_register(struct rtnl_link_ops *ops)
272{
273 if (!ops->dellink)
274 ops->dellink = unregister_netdevice;
275
276 list_add_tail(&ops->list, &link_ops);
277 return 0;
278}
279
280EXPORT_SYMBOL_GPL(__rtnl_link_register);
281
282/**
283 * rtnl_link_register - Register rtnl_link_ops with rtnetlink.
284 * @ops: struct rtnl_link_ops * to register
285 *
286 * Returns 0 on success or a negative error code.
287 */
288int rtnl_link_register(struct rtnl_link_ops *ops)
289{
290 int err;
291
292 rtnl_lock();
293 err = __rtnl_link_register(ops);
294 rtnl_unlock();
295 return err;
296}
297
298EXPORT_SYMBOL_GPL(rtnl_link_register);
299
300/**
301 * __rtnl_link_unregister - Unregister rtnl_link_ops from rtnetlink.
302 * @ops: struct rtnl_link_ops * to unregister
303 *
304 * The caller must hold the rtnl_mutex.
305 */
306void __rtnl_link_unregister(struct rtnl_link_ops *ops)
307{
308 struct net_device *dev, *n;
309
310 for_each_netdev_safe(dev, n) {
311 if (dev->rtnl_link_ops == ops)
312 ops->dellink(dev);
313 }
314 list_del(&ops->list);
315}
316
317EXPORT_SYMBOL_GPL(__rtnl_link_unregister);
318
319/**
320 * rtnl_link_unregister - Unregister rtnl_link_ops from rtnetlink.
321 * @ops: struct rtnl_link_ops * to unregister
322 */
323void rtnl_link_unregister(struct rtnl_link_ops *ops)
324{
325 rtnl_lock();
326 __rtnl_link_unregister(ops);
327 rtnl_unlock();
328}
329
330EXPORT_SYMBOL_GPL(rtnl_link_unregister);
331
332static const struct rtnl_link_ops *rtnl_link_ops_get(const char *kind)
333{
334 const struct rtnl_link_ops *ops;
335
336 list_for_each_entry(ops, &link_ops, list) {
337 if (!strcmp(ops->kind, kind))
338 return ops;
339 }
340 return NULL;
341}
342
343static size_t rtnl_link_get_size(const struct net_device *dev)
344{
345 const struct rtnl_link_ops *ops = dev->rtnl_link_ops;
346 size_t size;
347
348 if (!ops)
349 return 0;
350
351 size = nlmsg_total_size(sizeof(struct nlattr)) + /* IFLA_LINKINFO */
352 nlmsg_total_size(strlen(ops->kind) + 1); /* IFLA_INFO_KIND */
353
354 if (ops->get_size)
355 /* IFLA_INFO_DATA + nested data */
356 size += nlmsg_total_size(sizeof(struct nlattr)) +
357 ops->get_size(dev);
358
359 if (ops->get_xstats_size)
360 size += ops->get_xstats_size(dev); /* IFLA_INFO_XSTATS */
361
362 return size;
363}
364
365static int rtnl_link_fill(struct sk_buff *skb, const struct net_device *dev)
366{
367 const struct rtnl_link_ops *ops = dev->rtnl_link_ops;
368 struct nlattr *linkinfo, *data;
369 int err = -EMSGSIZE;
370
371 linkinfo = nla_nest_start(skb, IFLA_LINKINFO);
372 if (linkinfo == NULL)
373 goto out;
374
375 if (nla_put_string(skb, IFLA_INFO_KIND, ops->kind) < 0)
376 goto err_cancel_link;
377 if (ops->fill_xstats) {
378 err = ops->fill_xstats(skb, dev);
379 if (err < 0)
380 goto err_cancel_link;
381 }
382 if (ops->fill_info) {
383 data = nla_nest_start(skb, IFLA_INFO_DATA);
384 if (data == NULL)
385 goto err_cancel_link;
386 err = ops->fill_info(skb, dev);
387 if (err < 0)
388 goto err_cancel_data;
389 nla_nest_end(skb, data);
390 }
391
392 nla_nest_end(skb, linkinfo);
393 return 0;
394
395err_cancel_data:
396 nla_nest_cancel(skb, data);
397err_cancel_link:
398 nla_nest_cancel(skb, linkinfo);
399out:
400 return err;
401}
402
246static const int rtm_min[RTM_NR_FAMILIES] = 403static const int rtm_min[RTM_NR_FAMILIES] =
247{ 404{
248 [RTM_FAM(RTM_NEWLINK)] = NLMSG_LENGTH(sizeof(struct ifinfomsg)), 405 [RTM_FAM(RTM_NEWLINK)] = NLMSG_LENGTH(sizeof(struct ifinfomsg)),
@@ -437,7 +594,7 @@ static void copy_rtnl_link_stats(struct rtnl_link_stats *a,
437 a->tx_compressed = b->tx_compressed; 594 a->tx_compressed = b->tx_compressed;
438}; 595};
439 596
440static inline size_t if_nlmsg_size(void) 597static inline size_t if_nlmsg_size(const struct net_device *dev)
441{ 598{
442 return NLMSG_ALIGN(sizeof(struct ifinfomsg)) 599 return NLMSG_ALIGN(sizeof(struct ifinfomsg))
443 + nla_total_size(IFNAMSIZ) /* IFLA_IFNAME */ 600 + nla_total_size(IFNAMSIZ) /* IFLA_IFNAME */
@@ -452,7 +609,8 @@ static inline size_t if_nlmsg_size(void)
452 + nla_total_size(4) /* IFLA_LINK */ 609 + nla_total_size(4) /* IFLA_LINK */
453 + nla_total_size(4) /* IFLA_MASTER */ 610 + nla_total_size(4) /* IFLA_MASTER */
454 + nla_total_size(1) /* IFLA_OPERSTATE */ 611 + nla_total_size(1) /* IFLA_OPERSTATE */
455 + nla_total_size(1); /* IFLA_LINKMODE */ 612 + nla_total_size(1) /* IFLA_LINKMODE */
613 + rtnl_link_get_size(dev); /* IFLA_LINKINFO */
456} 614}
457 615
458static int rtnl_fill_ifinfo(struct sk_buff *skb, struct net_device *dev, 616static int rtnl_fill_ifinfo(struct sk_buff *skb, struct net_device *dev,
@@ -522,6 +680,11 @@ static int rtnl_fill_ifinfo(struct sk_buff *skb, struct net_device *dev,
522 } 680 }
523 } 681 }
524 682
683 if (dev->rtnl_link_ops) {
684 if (rtnl_link_fill(skb, dev) < 0)
685 goto nla_put_failure;
686 }
687
525 return nlmsg_end(skb, nlh); 688 return nlmsg_end(skb, nlh);
526 689
527nla_put_failure: 690nla_put_failure:
@@ -553,6 +716,8 @@ cont:
553 716
554static const struct nla_policy ifla_policy[IFLA_MAX+1] = { 717static const struct nla_policy ifla_policy[IFLA_MAX+1] = {
555 [IFLA_IFNAME] = { .type = NLA_STRING, .len = IFNAMSIZ-1 }, 718 [IFLA_IFNAME] = { .type = NLA_STRING, .len = IFNAMSIZ-1 },
719 [IFLA_ADDRESS] = { .type = NLA_BINARY, .len = MAX_ADDR_LEN },
720 [IFLA_BROADCAST] = { .type = NLA_BINARY, .len = MAX_ADDR_LEN },
556 [IFLA_MAP] = { .len = sizeof(struct rtnl_link_ifmap) }, 721 [IFLA_MAP] = { .len = sizeof(struct rtnl_link_ifmap) },
557 [IFLA_MTU] = { .type = NLA_U32 }, 722 [IFLA_MTU] = { .type = NLA_U32 },
558 [IFLA_TXQLEN] = { .type = NLA_U32 }, 723 [IFLA_TXQLEN] = { .type = NLA_U32 },
@@ -561,44 +726,16 @@ static const struct nla_policy ifla_policy[IFLA_MAX+1] = {
561 [IFLA_LINKMODE] = { .type = NLA_U8 }, 726 [IFLA_LINKMODE] = { .type = NLA_U8 },
562}; 727};
563 728
564static int rtnl_setlink(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg) 729static const struct nla_policy ifla_info_policy[IFLA_INFO_MAX+1] = {
565{ 730 [IFLA_INFO_KIND] = { .type = NLA_STRING },
566 struct ifinfomsg *ifm; 731 [IFLA_INFO_DATA] = { .type = NLA_NESTED },
567 struct net_device *dev; 732};
568 int err, send_addr_notify = 0, modified = 0;
569 struct nlattr *tb[IFLA_MAX+1];
570 char ifname[IFNAMSIZ];
571
572 err = nlmsg_parse(nlh, sizeof(*ifm), tb, IFLA_MAX, ifla_policy);
573 if (err < 0)
574 goto errout;
575
576 if (tb[IFLA_IFNAME])
577 nla_strlcpy(ifname, tb[IFLA_IFNAME], IFNAMSIZ);
578 else
579 ifname[0] = '\0';
580
581 err = -EINVAL;
582 ifm = nlmsg_data(nlh);
583 if (ifm->ifi_index > 0)
584 dev = dev_get_by_index(ifm->ifi_index);
585 else if (tb[IFLA_IFNAME])
586 dev = dev_get_by_name(ifname);
587 else
588 goto errout;
589
590 if (dev == NULL) {
591 err = -ENODEV;
592 goto errout;
593 }
594
595 if (tb[IFLA_ADDRESS] &&
596 nla_len(tb[IFLA_ADDRESS]) < dev->addr_len)
597 goto errout_dev;
598 733
599 if (tb[IFLA_BROADCAST] && 734static int do_setlink(struct net_device *dev, struct ifinfomsg *ifm,
600 nla_len(tb[IFLA_BROADCAST]) < dev->addr_len) 735 struct nlattr **tb, char *ifname, int modified)
601 goto errout_dev; 736{
737 int send_addr_notify = 0;
738 int err;
602 739
603 if (tb[IFLA_MAP]) { 740 if (tb[IFLA_MAP]) {
604 struct rtnl_link_ifmap *u_map; 741 struct rtnl_link_ifmap *u_map;
@@ -606,12 +743,12 @@ static int rtnl_setlink(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
606 743
607 if (!dev->set_config) { 744 if (!dev->set_config) {
608 err = -EOPNOTSUPP; 745 err = -EOPNOTSUPP;
609 goto errout_dev; 746 goto errout;
610 } 747 }
611 748
612 if (!netif_device_present(dev)) { 749 if (!netif_device_present(dev)) {
613 err = -ENODEV; 750 err = -ENODEV;
614 goto errout_dev; 751 goto errout;
615 } 752 }
616 753
617 u_map = nla_data(tb[IFLA_MAP]); 754 u_map = nla_data(tb[IFLA_MAP]);
@@ -624,7 +761,7 @@ static int rtnl_setlink(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
624 761
625 err = dev->set_config(dev, &k_map); 762 err = dev->set_config(dev, &k_map);
626 if (err < 0) 763 if (err < 0)
627 goto errout_dev; 764 goto errout;
628 765
629 modified = 1; 766 modified = 1;
630 } 767 }
@@ -635,19 +772,19 @@ static int rtnl_setlink(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
635 772
636 if (!dev->set_mac_address) { 773 if (!dev->set_mac_address) {
637 err = -EOPNOTSUPP; 774 err = -EOPNOTSUPP;
638 goto errout_dev; 775 goto errout;
639 } 776 }
640 777
641 if (!netif_device_present(dev)) { 778 if (!netif_device_present(dev)) {
642 err = -ENODEV; 779 err = -ENODEV;
643 goto errout_dev; 780 goto errout;
644 } 781 }
645 782
646 len = sizeof(sa_family_t) + dev->addr_len; 783 len = sizeof(sa_family_t) + dev->addr_len;
647 sa = kmalloc(len, GFP_KERNEL); 784 sa = kmalloc(len, GFP_KERNEL);
648 if (!sa) { 785 if (!sa) {
649 err = -ENOMEM; 786 err = -ENOMEM;
650 goto errout_dev; 787 goto errout;
651 } 788 }
652 sa->sa_family = dev->type; 789 sa->sa_family = dev->type;
653 memcpy(sa->sa_data, nla_data(tb[IFLA_ADDRESS]), 790 memcpy(sa->sa_data, nla_data(tb[IFLA_ADDRESS]),
@@ -655,7 +792,7 @@ static int rtnl_setlink(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
655 err = dev->set_mac_address(dev, sa); 792 err = dev->set_mac_address(dev, sa);
656 kfree(sa); 793 kfree(sa);
657 if (err) 794 if (err)
658 goto errout_dev; 795 goto errout;
659 send_addr_notify = 1; 796 send_addr_notify = 1;
660 modified = 1; 797 modified = 1;
661 } 798 }
@@ -663,7 +800,7 @@ static int rtnl_setlink(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
663 if (tb[IFLA_MTU]) { 800 if (tb[IFLA_MTU]) {
664 err = dev_set_mtu(dev, nla_get_u32(tb[IFLA_MTU])); 801 err = dev_set_mtu(dev, nla_get_u32(tb[IFLA_MTU]));
665 if (err < 0) 802 if (err < 0)
666 goto errout_dev; 803 goto errout;
667 modified = 1; 804 modified = 1;
668 } 805 }
669 806
@@ -675,7 +812,7 @@ static int rtnl_setlink(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
675 if (ifm->ifi_index > 0 && ifname[0]) { 812 if (ifm->ifi_index > 0 && ifname[0]) {
676 err = dev_change_name(dev, ifname); 813 err = dev_change_name(dev, ifname);
677 if (err < 0) 814 if (err < 0)
678 goto errout_dev; 815 goto errout;
679 modified = 1; 816 modified = 1;
680 } 817 }
681 818
@@ -684,7 +821,6 @@ static int rtnl_setlink(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
684 send_addr_notify = 1; 821 send_addr_notify = 1;
685 } 822 }
686 823
687
688 if (ifm->ifi_flags || ifm->ifi_change) { 824 if (ifm->ifi_flags || ifm->ifi_change) {
689 unsigned int flags = ifm->ifi_flags; 825 unsigned int flags = ifm->ifi_flags;
690 826
@@ -712,7 +848,7 @@ static int rtnl_setlink(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
712 848
713 err = 0; 849 err = 0;
714 850
715errout_dev: 851errout:
716 if (err < 0 && modified && net_ratelimit()) 852 if (err < 0 && modified && net_ratelimit())
717 printk(KERN_WARNING "A link change request failed with " 853 printk(KERN_WARNING "A link change request failed with "
718 "some changes comitted already. Interface %s may " 854 "some changes comitted already. Interface %s may "
@@ -721,12 +857,239 @@ errout_dev:
721 857
722 if (send_addr_notify) 858 if (send_addr_notify)
723 call_netdevice_notifiers(NETDEV_CHANGEADDR, dev); 859 call_netdevice_notifiers(NETDEV_CHANGEADDR, dev);
860 return err;
861}
862
863static int rtnl_setlink(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
864{
865 struct ifinfomsg *ifm;
866 struct net_device *dev;
867 int err;
868 struct nlattr *tb[IFLA_MAX+1];
869 char ifname[IFNAMSIZ];
870
871 err = nlmsg_parse(nlh, sizeof(*ifm), tb, IFLA_MAX, ifla_policy);
872 if (err < 0)
873 goto errout;
874
875 if (tb[IFLA_IFNAME])
876 nla_strlcpy(ifname, tb[IFLA_IFNAME], IFNAMSIZ);
877 else
878 ifname[0] = '\0';
879
880 err = -EINVAL;
881 ifm = nlmsg_data(nlh);
882 if (ifm->ifi_index > 0)
883 dev = dev_get_by_index(ifm->ifi_index);
884 else if (tb[IFLA_IFNAME])
885 dev = dev_get_by_name(ifname);
886 else
887 goto errout;
888
889 if (dev == NULL) {
890 err = -ENODEV;
891 goto errout;
892 }
724 893
894 if (tb[IFLA_ADDRESS] &&
895 nla_len(tb[IFLA_ADDRESS]) < dev->addr_len)
896 goto errout_dev;
897
898 if (tb[IFLA_BROADCAST] &&
899 nla_len(tb[IFLA_BROADCAST]) < dev->addr_len)
900 goto errout_dev;
901
902 err = do_setlink(dev, ifm, tb, ifname, 0);
903errout_dev:
725 dev_put(dev); 904 dev_put(dev);
726errout: 905errout:
727 return err; 906 return err;
728} 907}
729 908
909static int rtnl_dellink(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
910{
911 const struct rtnl_link_ops *ops;
912 struct net_device *dev;
913 struct ifinfomsg *ifm;
914 char ifname[IFNAMSIZ];
915 struct nlattr *tb[IFLA_MAX+1];
916 int err;
917
918 err = nlmsg_parse(nlh, sizeof(*ifm), tb, IFLA_MAX, ifla_policy);
919 if (err < 0)
920 return err;
921
922 if (tb[IFLA_IFNAME])
923 nla_strlcpy(ifname, tb[IFLA_IFNAME], IFNAMSIZ);
924
925 ifm = nlmsg_data(nlh);
926 if (ifm->ifi_index > 0)
927 dev = __dev_get_by_index(ifm->ifi_index);
928 else if (tb[IFLA_IFNAME])
929 dev = __dev_get_by_name(ifname);
930 else
931 return -EINVAL;
932
933 if (!dev)
934 return -ENODEV;
935
936 ops = dev->rtnl_link_ops;
937 if (!ops)
938 return -EOPNOTSUPP;
939
940 ops->dellink(dev);
941 return 0;
942}
943
944static int rtnl_newlink(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
945{
946 const struct rtnl_link_ops *ops;
947 struct net_device *dev;
948 struct ifinfomsg *ifm;
949 char kind[MODULE_NAME_LEN];
950 char ifname[IFNAMSIZ];
951 struct nlattr *tb[IFLA_MAX+1];
952 struct nlattr *linkinfo[IFLA_INFO_MAX+1];
953 int err;
954
955replay:
956 err = nlmsg_parse(nlh, sizeof(*ifm), tb, IFLA_MAX, ifla_policy);
957 if (err < 0)
958 return err;
959
960 if (tb[IFLA_IFNAME])
961 nla_strlcpy(ifname, tb[IFLA_IFNAME], IFNAMSIZ);
962 else
963 ifname[0] = '\0';
964
965 ifm = nlmsg_data(nlh);
966 if (ifm->ifi_index > 0)
967 dev = __dev_get_by_index(ifm->ifi_index);
968 else if (ifname[0])
969 dev = __dev_get_by_name(ifname);
970 else
971 dev = NULL;
972
973 if (tb[IFLA_LINKINFO]) {
974 err = nla_parse_nested(linkinfo, IFLA_INFO_MAX,
975 tb[IFLA_LINKINFO], ifla_info_policy);
976 if (err < 0)
977 return err;
978 } else
979 memset(linkinfo, 0, sizeof(linkinfo));
980
981 if (linkinfo[IFLA_INFO_KIND]) {
982 nla_strlcpy(kind, linkinfo[IFLA_INFO_KIND], sizeof(kind));
983 ops = rtnl_link_ops_get(kind);
984 } else {
985 kind[0] = '\0';
986 ops = NULL;
987 }
988
989 if (1) {
990 struct nlattr *attr[ops ? ops->maxtype + 1 : 0], **data = NULL;
991
992 if (ops) {
993 if (ops->maxtype && linkinfo[IFLA_INFO_DATA]) {
994 err = nla_parse_nested(attr, ops->maxtype,
995 linkinfo[IFLA_INFO_DATA],
996 ops->policy);
997 if (err < 0)
998 return err;
999 data = attr;
1000 }
1001 if (ops->validate) {
1002 err = ops->validate(tb, data);
1003 if (err < 0)
1004 return err;
1005 }
1006 }
1007
1008 if (dev) {
1009 int modified = 0;
1010
1011 if (nlh->nlmsg_flags & NLM_F_EXCL)
1012 return -EEXIST;
1013 if (nlh->nlmsg_flags & NLM_F_REPLACE)
1014 return -EOPNOTSUPP;
1015
1016 if (linkinfo[IFLA_INFO_DATA]) {
1017 if (!ops || ops != dev->rtnl_link_ops ||
1018 !ops->changelink)
1019 return -EOPNOTSUPP;
1020
1021 err = ops->changelink(dev, tb, data);
1022 if (err < 0)
1023 return err;
1024 modified = 1;
1025 }
1026
1027 return do_setlink(dev, ifm, tb, ifname, modified);
1028 }
1029
1030 if (!(nlh->nlmsg_flags & NLM_F_CREATE))
1031 return -ENODEV;
1032
1033 if (ifm->ifi_index || ifm->ifi_flags || ifm->ifi_change)
1034 return -EOPNOTSUPP;
1035 if (tb[IFLA_MAP] || tb[IFLA_MASTER] || tb[IFLA_PROTINFO])
1036 return -EOPNOTSUPP;
1037
1038 if (!ops) {
1039#ifdef CONFIG_KMOD
1040 if (kind[0]) {
1041 __rtnl_unlock();
1042 request_module("rtnl-link-%s", kind);
1043 rtnl_lock();
1044 ops = rtnl_link_ops_get(kind);
1045 if (ops)
1046 goto replay;
1047 }
1048#endif
1049 return -EOPNOTSUPP;
1050 }
1051
1052 if (!ifname[0])
1053 snprintf(ifname, IFNAMSIZ, "%s%%d", ops->kind);
1054 dev = alloc_netdev(ops->priv_size, ifname, ops->setup);
1055 if (!dev)
1056 return -ENOMEM;
1057
1058 if (strchr(dev->name, '%')) {
1059 err = dev_alloc_name(dev, dev->name);
1060 if (err < 0)
1061 goto err_free;
1062 }
1063 dev->rtnl_link_ops = ops;
1064
1065 if (tb[IFLA_MTU])
1066 dev->mtu = nla_get_u32(tb[IFLA_MTU]);
1067 if (tb[IFLA_ADDRESS])
1068 memcpy(dev->dev_addr, nla_data(tb[IFLA_ADDRESS]),
1069 nla_len(tb[IFLA_ADDRESS]));
1070 if (tb[IFLA_BROADCAST])
1071 memcpy(dev->broadcast, nla_data(tb[IFLA_BROADCAST]),
1072 nla_len(tb[IFLA_BROADCAST]));
1073 if (tb[IFLA_TXQLEN])
1074 dev->tx_queue_len = nla_get_u32(tb[IFLA_TXQLEN]);
1075 if (tb[IFLA_WEIGHT])
1076 dev->weight = nla_get_u32(tb[IFLA_WEIGHT]);
1077 if (tb[IFLA_OPERSTATE])
1078 set_operstate(dev, nla_get_u8(tb[IFLA_OPERSTATE]));
1079 if (tb[IFLA_LINKMODE])
1080 dev->link_mode = nla_get_u8(tb[IFLA_LINKMODE]);
1081
1082 if (ops->newlink)
1083 err = ops->newlink(dev, tb, data);
1084 else
1085 err = register_netdevice(dev);
1086err_free:
1087 if (err < 0)
1088 free_netdev(dev);
1089 return err;
1090 }
1091}
1092
730static int rtnl_getlink(struct sk_buff *skb, struct nlmsghdr* nlh, void *arg) 1093static int rtnl_getlink(struct sk_buff *skb, struct nlmsghdr* nlh, void *arg)
731{ 1094{
732 struct ifinfomsg *ifm; 1095 struct ifinfomsg *ifm;
@@ -747,7 +1110,7 @@ static int rtnl_getlink(struct sk_buff *skb, struct nlmsghdr* nlh, void *arg)
747 } else 1110 } else
748 return -EINVAL; 1111 return -EINVAL;
749 1112
750 nskb = nlmsg_new(if_nlmsg_size(), GFP_KERNEL); 1113 nskb = nlmsg_new(if_nlmsg_size(dev), GFP_KERNEL);
751 if (nskb == NULL) { 1114 if (nskb == NULL) {
752 err = -ENOBUFS; 1115 err = -ENOBUFS;
753 goto errout; 1116 goto errout;
@@ -797,7 +1160,7 @@ void rtmsg_ifinfo(int type, struct net_device *dev, unsigned change)
797 struct sk_buff *skb; 1160 struct sk_buff *skb;
798 int err = -ENOBUFS; 1161 int err = -ENOBUFS;
799 1162
800 skb = nlmsg_new(if_nlmsg_size(), GFP_KERNEL); 1163 skb = nlmsg_new(if_nlmsg_size(dev), GFP_KERNEL);
801 if (skb == NULL) 1164 if (skb == NULL)
802 goto errout; 1165 goto errout;
803 1166
@@ -952,6 +1315,8 @@ void __init rtnetlink_init(void)
952 1315
953 rtnl_register(PF_UNSPEC, RTM_GETLINK, rtnl_getlink, rtnl_dump_ifinfo); 1316 rtnl_register(PF_UNSPEC, RTM_GETLINK, rtnl_getlink, rtnl_dump_ifinfo);
954 rtnl_register(PF_UNSPEC, RTM_SETLINK, rtnl_setlink, NULL); 1317 rtnl_register(PF_UNSPEC, RTM_SETLINK, rtnl_setlink, NULL);
1318 rtnl_register(PF_UNSPEC, RTM_NEWLINK, rtnl_newlink, NULL);
1319 rtnl_register(PF_UNSPEC, RTM_DELLINK, rtnl_dellink, NULL);
955 1320
956 rtnl_register(PF_UNSPEC, RTM_GETADDR, NULL, rtnl_dump_all); 1321 rtnl_register(PF_UNSPEC, RTM_GETADDR, NULL, rtnl_dump_all);
957 rtnl_register(PF_UNSPEC, RTM_GETROUTE, NULL, rtnl_dump_all); 1322 rtnl_register(PF_UNSPEC, RTM_GETROUTE, NULL, rtnl_dump_all);
@@ -960,6 +1325,7 @@ void __init rtnetlink_init(void)
960EXPORT_SYMBOL(__rta_fill); 1325EXPORT_SYMBOL(__rta_fill);
961EXPORT_SYMBOL(rtattr_strlcpy); 1326EXPORT_SYMBOL(rtattr_strlcpy);
962EXPORT_SYMBOL(rtattr_parse); 1327EXPORT_SYMBOL(rtattr_parse);
1328EXPORT_SYMBOL(__rtattr_parse_nested_compat);
963EXPORT_SYMBOL(rtnetlink_put_metrics); 1329EXPORT_SYMBOL(rtnetlink_put_metrics);
964EXPORT_SYMBOL(rtnl_lock); 1330EXPORT_SYMBOL(rtnl_lock);
965EXPORT_SYMBOL(rtnl_trylock); 1331EXPORT_SYMBOL(rtnl_trylock);
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index 3943c3ad9145..0583e8498f13 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -415,9 +415,11 @@ struct sk_buff *skb_clone(struct sk_buff *skb, gfp_t gfp_mask)
415 C(csum); 415 C(csum);
416 C(local_df); 416 C(local_df);
417 n->cloned = 1; 417 n->cloned = 1;
418 n->hdr_len = skb->nohdr ? skb_headroom(skb) : skb->hdr_len;
418 n->nohdr = 0; 419 n->nohdr = 0;
419 C(pkt_type); 420 C(pkt_type);
420 C(ip_summed); 421 C(ip_summed);
422 skb_copy_queue_mapping(n, skb);
421 C(priority); 423 C(priority);
422#if defined(CONFIG_IP_VS) || defined(CONFIG_IP_VS_MODULE) 424#if defined(CONFIG_IP_VS) || defined(CONFIG_IP_VS_MODULE)
423 C(ipvs_property); 425 C(ipvs_property);
@@ -426,6 +428,10 @@ struct sk_buff *skb_clone(struct sk_buff *skb, gfp_t gfp_mask)
426 n->destructor = NULL; 428 n->destructor = NULL;
427 C(mark); 429 C(mark);
428 __nf_copy(n, skb); 430 __nf_copy(n, skb);
431#if defined(CONFIG_NETFILTER_XT_TARGET_TRACE) || \
432 defined(CONFIG_NETFILTER_XT_TARGET_TRACE_MODULE)
433 C(nf_trace);
434#endif
429#ifdef CONFIG_NET_SCHED 435#ifdef CONFIG_NET_SCHED
430 C(tc_index); 436 C(tc_index);
431#ifdef CONFIG_NET_CLS_ACT 437#ifdef CONFIG_NET_CLS_ACT
@@ -459,6 +465,7 @@ static void copy_skb_header(struct sk_buff *new, const struct sk_buff *old)
459#endif 465#endif
460 new->sk = NULL; 466 new->sk = NULL;
461 new->dev = old->dev; 467 new->dev = old->dev;
468 skb_copy_queue_mapping(new, old);
462 new->priority = old->priority; 469 new->priority = old->priority;
463 new->protocol = old->protocol; 470 new->protocol = old->protocol;
464 new->dst = dst_clone(old->dst); 471 new->dst = dst_clone(old->dst);
@@ -482,6 +489,10 @@ static void copy_skb_header(struct sk_buff *new, const struct sk_buff *old)
482 new->destructor = NULL; 489 new->destructor = NULL;
483 new->mark = old->mark; 490 new->mark = old->mark;
484 __nf_copy(new, old); 491 __nf_copy(new, old);
492#if defined(CONFIG_NETFILTER_XT_TARGET_TRACE) || \
493 defined(CONFIG_NETFILTER_XT_TARGET_TRACE_MODULE)
494 new->nf_trace = old->nf_trace;
495#endif
485#if defined(CONFIG_IP_VS) || defined(CONFIG_IP_VS_MODULE) 496#if defined(CONFIG_IP_VS) || defined(CONFIG_IP_VS_MODULE)
486 new->ipvs_property = old->ipvs_property; 497 new->ipvs_property = old->ipvs_property;
487#endif 498#endif
@@ -676,6 +687,7 @@ int pskb_expand_head(struct sk_buff *skb, int nhead, int ntail,
676 skb->network_header += off; 687 skb->network_header += off;
677 skb->mac_header += off; 688 skb->mac_header += off;
678 skb->cloned = 0; 689 skb->cloned = 0;
690 skb->hdr_len = 0;
679 skb->nohdr = 0; 691 skb->nohdr = 0;
680 atomic_set(&skb_shinfo(skb)->dataref, 1); 692 atomic_set(&skb_shinfo(skb)->dataref, 1);
681 return 0; 693 return 0;
@@ -1930,6 +1942,7 @@ struct sk_buff *skb_segment(struct sk_buff *skb, int features)
1930 tail = nskb; 1942 tail = nskb;
1931 1943
1932 nskb->dev = skb->dev; 1944 nskb->dev = skb->dev;
1945 skb_copy_queue_mapping(nskb, skb);
1933 nskb->priority = skb->priority; 1946 nskb->priority = skb->priority;
1934 nskb->protocol = skb->protocol; 1947 nskb->protocol = skb->protocol;
1935 nskb->dst = dst_clone(skb->dst); 1948 nskb->dst = dst_clone(skb->dst);
diff --git a/net/core/sock.c b/net/core/sock.c
index c14ce0198d25..091032a250c7 100644
--- a/net/core/sock.c
+++ b/net/core/sock.c
@@ -210,7 +210,8 @@ static int sock_set_timeout(long *timeo_p, char __user *optval, int optlen)
210 return -EDOM; 210 return -EDOM;
211 211
212 if (tv.tv_sec < 0) { 212 if (tv.tv_sec < 0) {
213 static int warned = 0; 213 static int warned __read_mostly;
214
214 *timeo_p = 0; 215 *timeo_p = 0;
215 if (warned < 10 && net_ratelimit()) 216 if (warned < 10 && net_ratelimit())
216 warned++; 217 warned++;
@@ -1851,46 +1852,15 @@ void proto_unregister(struct proto *prot)
1851EXPORT_SYMBOL(proto_unregister); 1852EXPORT_SYMBOL(proto_unregister);
1852 1853
1853#ifdef CONFIG_PROC_FS 1854#ifdef CONFIG_PROC_FS
1854static inline struct proto *__proto_head(void)
1855{
1856 return list_entry(proto_list.next, struct proto, node);
1857}
1858
1859static inline struct proto *proto_head(void)
1860{
1861 return list_empty(&proto_list) ? NULL : __proto_head();
1862}
1863
1864static inline struct proto *proto_next(struct proto *proto)
1865{
1866 return proto->node.next == &proto_list ? NULL :
1867 list_entry(proto->node.next, struct proto, node);
1868}
1869
1870static inline struct proto *proto_get_idx(loff_t pos)
1871{
1872 struct proto *proto;
1873 loff_t i = 0;
1874
1875 list_for_each_entry(proto, &proto_list, node)
1876 if (i++ == pos)
1877 goto out;
1878
1879 proto = NULL;
1880out:
1881 return proto;
1882}
1883
1884static void *proto_seq_start(struct seq_file *seq, loff_t *pos) 1855static void *proto_seq_start(struct seq_file *seq, loff_t *pos)
1885{ 1856{
1886 read_lock(&proto_list_lock); 1857 read_lock(&proto_list_lock);
1887 return *pos ? proto_get_idx(*pos - 1) : SEQ_START_TOKEN; 1858 return seq_list_start_head(&proto_list, *pos);
1888} 1859}
1889 1860
1890static void *proto_seq_next(struct seq_file *seq, void *v, loff_t *pos) 1861static void *proto_seq_next(struct seq_file *seq, void *v, loff_t *pos)
1891{ 1862{
1892 ++*pos; 1863 return seq_list_next(v, &proto_list, pos);
1893 return v == SEQ_START_TOKEN ? proto_head() : proto_next(v);
1894} 1864}
1895 1865
1896static void proto_seq_stop(struct seq_file *seq, void *v) 1866static void proto_seq_stop(struct seq_file *seq, void *v)
@@ -1938,7 +1908,7 @@ static void proto_seq_printf(struct seq_file *seq, struct proto *proto)
1938 1908
1939static int proto_seq_show(struct seq_file *seq, void *v) 1909static int proto_seq_show(struct seq_file *seq, void *v)
1940{ 1910{
1941 if (v == SEQ_START_TOKEN) 1911 if (v == &proto_list)
1942 seq_printf(seq, "%-9s %-4s %-8s %-6s %-5s %-7s %-4s %-10s %s", 1912 seq_printf(seq, "%-9s %-4s %-8s %-6s %-5s %-7s %-4s %-10s %s",
1943 "protocol", 1913 "protocol",
1944 "size", 1914 "size",
@@ -1950,7 +1920,7 @@ static int proto_seq_show(struct seq_file *seq, void *v)
1950 "module", 1920 "module",
1951 "cl co di ac io in de sh ss gs se re sp bi br ha uh gp em\n"); 1921 "cl co di ac io in de sh ss gs se re sp bi br ha uh gp em\n");
1952 else 1922 else
1953 proto_seq_printf(seq, v); 1923 proto_seq_printf(seq, list_entry(v, struct proto, node));
1954 return 0; 1924 return 0;
1955} 1925}
1956 1926