diff options
Diffstat (limited to 'net/core')
-rw-r--r-- | net/core/datagram.c | 1 | ||||
-rw-r--r-- | net/core/dev.c | 408 | ||||
-rw-r--r-- | net/core/drop_monitor.c | 2 | ||||
-rw-r--r-- | net/core/ethtool.c | 81 | ||||
-rw-r--r-- | net/core/filter.c | 6 | ||||
-rw-r--r-- | net/core/gen_stats.c | 8 | ||||
-rw-r--r-- | net/core/net-sysfs.c | 55 | ||||
-rw-r--r-- | net/core/pktgen.c | 16 | ||||
-rw-r--r-- | net/core/rtnetlink.c | 179 | ||||
-rw-r--r-- | net/core/skbuff.c | 3 | ||||
-rw-r--r-- | net/core/sock.c | 58 |
11 files changed, 523 insertions, 294 deletions
diff --git a/net/core/datagram.c b/net/core/datagram.c index 4ade3011bb3..95c2e0840d0 100644 --- a/net/core/datagram.c +++ b/net/core/datagram.c | |||
@@ -271,6 +271,7 @@ int skb_kill_datagram(struct sock *sk, struct sk_buff *skb, unsigned int flags) | |||
271 | } | 271 | } |
272 | 272 | ||
273 | kfree_skb(skb); | 273 | kfree_skb(skb); |
274 | atomic_inc(&sk->sk_drops); | ||
274 | sk_mem_reclaim_partial(sk); | 275 | sk_mem_reclaim_partial(sk); |
275 | 276 | ||
276 | return err; | 277 | return err; |
diff --git a/net/core/dev.c b/net/core/dev.c index fe10551d367..d867522290b 100644 --- a/net/core/dev.c +++ b/net/core/dev.c | |||
@@ -79,6 +79,7 @@ | |||
79 | #include <linux/cpu.h> | 79 | #include <linux/cpu.h> |
80 | #include <linux/types.h> | 80 | #include <linux/types.h> |
81 | #include <linux/kernel.h> | 81 | #include <linux/kernel.h> |
82 | #include <linux/hash.h> | ||
82 | #include <linux/sched.h> | 83 | #include <linux/sched.h> |
83 | #include <linux/mutex.h> | 84 | #include <linux/mutex.h> |
84 | #include <linux/string.h> | 85 | #include <linux/string.h> |
@@ -175,7 +176,7 @@ static struct list_head ptype_all __read_mostly; /* Taps */ | |||
175 | * The @dev_base_head list is protected by @dev_base_lock and the rtnl | 176 | * The @dev_base_head list is protected by @dev_base_lock and the rtnl |
176 | * semaphore. | 177 | * semaphore. |
177 | * | 178 | * |
178 | * Pure readers hold dev_base_lock for reading. | 179 | * Pure readers hold dev_base_lock for reading, or rcu_read_lock() |
179 | * | 180 | * |
180 | * Writers must hold the rtnl semaphore while they loop through the | 181 | * Writers must hold the rtnl semaphore while they loop through the |
181 | * dev_base_head list, and hold dev_base_lock for writing when they do the | 182 | * dev_base_head list, and hold dev_base_lock for writing when they do the |
@@ -193,18 +194,15 @@ static struct list_head ptype_all __read_mostly; /* Taps */ | |||
193 | DEFINE_RWLOCK(dev_base_lock); | 194 | DEFINE_RWLOCK(dev_base_lock); |
194 | EXPORT_SYMBOL(dev_base_lock); | 195 | EXPORT_SYMBOL(dev_base_lock); |
195 | 196 | ||
196 | #define NETDEV_HASHBITS 8 | ||
197 | #define NETDEV_HASHENTRIES (1 << NETDEV_HASHBITS) | ||
198 | |||
199 | static inline struct hlist_head *dev_name_hash(struct net *net, const char *name) | 197 | static inline struct hlist_head *dev_name_hash(struct net *net, const char *name) |
200 | { | 198 | { |
201 | unsigned hash = full_name_hash(name, strnlen(name, IFNAMSIZ)); | 199 | unsigned hash = full_name_hash(name, strnlen(name, IFNAMSIZ)); |
202 | return &net->dev_name_head[hash & ((1 << NETDEV_HASHBITS) - 1)]; | 200 | return &net->dev_name_head[hash_32(hash, NETDEV_HASHBITS)]; |
203 | } | 201 | } |
204 | 202 | ||
205 | static inline struct hlist_head *dev_index_hash(struct net *net, int ifindex) | 203 | static inline struct hlist_head *dev_index_hash(struct net *net, int ifindex) |
206 | { | 204 | { |
207 | return &net->dev_index_head[ifindex & ((1 << NETDEV_HASHBITS) - 1)]; | 205 | return &net->dev_index_head[ifindex & (NETDEV_HASHENTRIES - 1)]; |
208 | } | 206 | } |
209 | 207 | ||
210 | /* Device list insertion */ | 208 | /* Device list insertion */ |
@@ -215,23 +213,26 @@ static int list_netdevice(struct net_device *dev) | |||
215 | ASSERT_RTNL(); | 213 | ASSERT_RTNL(); |
216 | 214 | ||
217 | write_lock_bh(&dev_base_lock); | 215 | write_lock_bh(&dev_base_lock); |
218 | list_add_tail(&dev->dev_list, &net->dev_base_head); | 216 | list_add_tail_rcu(&dev->dev_list, &net->dev_base_head); |
219 | hlist_add_head(&dev->name_hlist, dev_name_hash(net, dev->name)); | 217 | hlist_add_head_rcu(&dev->name_hlist, dev_name_hash(net, dev->name)); |
220 | hlist_add_head(&dev->index_hlist, dev_index_hash(net, dev->ifindex)); | 218 | hlist_add_head_rcu(&dev->index_hlist, |
219 | dev_index_hash(net, dev->ifindex)); | ||
221 | write_unlock_bh(&dev_base_lock); | 220 | write_unlock_bh(&dev_base_lock); |
222 | return 0; | 221 | return 0; |
223 | } | 222 | } |
224 | 223 | ||
225 | /* Device list removal */ | 224 | /* Device list removal |
225 | * caller must respect a RCU grace period before freeing/reusing dev | ||
226 | */ | ||
226 | static void unlist_netdevice(struct net_device *dev) | 227 | static void unlist_netdevice(struct net_device *dev) |
227 | { | 228 | { |
228 | ASSERT_RTNL(); | 229 | ASSERT_RTNL(); |
229 | 230 | ||
230 | /* Unlink dev from the device chain */ | 231 | /* Unlink dev from the device chain */ |
231 | write_lock_bh(&dev_base_lock); | 232 | write_lock_bh(&dev_base_lock); |
232 | list_del(&dev->dev_list); | 233 | list_del_rcu(&dev->dev_list); |
233 | hlist_del(&dev->name_hlist); | 234 | hlist_del_rcu(&dev->name_hlist); |
234 | hlist_del(&dev->index_hlist); | 235 | hlist_del_rcu(&dev->index_hlist); |
235 | write_unlock_bh(&dev_base_lock); | 236 | write_unlock_bh(&dev_base_lock); |
236 | } | 237 | } |
237 | 238 | ||
@@ -587,18 +588,44 @@ __setup("netdev=", netdev_boot_setup); | |||
587 | struct net_device *__dev_get_by_name(struct net *net, const char *name) | 588 | struct net_device *__dev_get_by_name(struct net *net, const char *name) |
588 | { | 589 | { |
589 | struct hlist_node *p; | 590 | struct hlist_node *p; |
591 | struct net_device *dev; | ||
592 | struct hlist_head *head = dev_name_hash(net, name); | ||
590 | 593 | ||
591 | hlist_for_each(p, dev_name_hash(net, name)) { | 594 | hlist_for_each_entry(dev, p, head, name_hlist) |
592 | struct net_device *dev | ||
593 | = hlist_entry(p, struct net_device, name_hlist); | ||
594 | if (!strncmp(dev->name, name, IFNAMSIZ)) | 595 | if (!strncmp(dev->name, name, IFNAMSIZ)) |
595 | return dev; | 596 | return dev; |
596 | } | 597 | |
597 | return NULL; | 598 | return NULL; |
598 | } | 599 | } |
599 | EXPORT_SYMBOL(__dev_get_by_name); | 600 | EXPORT_SYMBOL(__dev_get_by_name); |
600 | 601 | ||
601 | /** | 602 | /** |
603 | * dev_get_by_name_rcu - find a device by its name | ||
604 | * @net: the applicable net namespace | ||
605 | * @name: name to find | ||
606 | * | ||
607 | * Find an interface by name. | ||
608 | * If the name is found a pointer to the device is returned. | ||
609 | * If the name is not found then %NULL is returned. | ||
610 | * The reference counters are not incremented so the caller must be | ||
611 | * careful with locks. The caller must hold RCU lock. | ||
612 | */ | ||
613 | |||
614 | struct net_device *dev_get_by_name_rcu(struct net *net, const char *name) | ||
615 | { | ||
616 | struct hlist_node *p; | ||
617 | struct net_device *dev; | ||
618 | struct hlist_head *head = dev_name_hash(net, name); | ||
619 | |||
620 | hlist_for_each_entry_rcu(dev, p, head, name_hlist) | ||
621 | if (!strncmp(dev->name, name, IFNAMSIZ)) | ||
622 | return dev; | ||
623 | |||
624 | return NULL; | ||
625 | } | ||
626 | EXPORT_SYMBOL(dev_get_by_name_rcu); | ||
627 | |||
628 | /** | ||
602 | * dev_get_by_name - find a device by its name | 629 | * dev_get_by_name - find a device by its name |
603 | * @net: the applicable net namespace | 630 | * @net: the applicable net namespace |
604 | * @name: name to find | 631 | * @name: name to find |
@@ -614,11 +641,11 @@ struct net_device *dev_get_by_name(struct net *net, const char *name) | |||
614 | { | 641 | { |
615 | struct net_device *dev; | 642 | struct net_device *dev; |
616 | 643 | ||
617 | read_lock(&dev_base_lock); | 644 | rcu_read_lock(); |
618 | dev = __dev_get_by_name(net, name); | 645 | dev = dev_get_by_name_rcu(net, name); |
619 | if (dev) | 646 | if (dev) |
620 | dev_hold(dev); | 647 | dev_hold(dev); |
621 | read_unlock(&dev_base_lock); | 648 | rcu_read_unlock(); |
622 | return dev; | 649 | return dev; |
623 | } | 650 | } |
624 | EXPORT_SYMBOL(dev_get_by_name); | 651 | EXPORT_SYMBOL(dev_get_by_name); |
@@ -638,17 +665,42 @@ EXPORT_SYMBOL(dev_get_by_name); | |||
638 | struct net_device *__dev_get_by_index(struct net *net, int ifindex) | 665 | struct net_device *__dev_get_by_index(struct net *net, int ifindex) |
639 | { | 666 | { |
640 | struct hlist_node *p; | 667 | struct hlist_node *p; |
668 | struct net_device *dev; | ||
669 | struct hlist_head *head = dev_index_hash(net, ifindex); | ||
641 | 670 | ||
642 | hlist_for_each(p, dev_index_hash(net, ifindex)) { | 671 | hlist_for_each_entry(dev, p, head, index_hlist) |
643 | struct net_device *dev | ||
644 | = hlist_entry(p, struct net_device, index_hlist); | ||
645 | if (dev->ifindex == ifindex) | 672 | if (dev->ifindex == ifindex) |
646 | return dev; | 673 | return dev; |
647 | } | 674 | |
648 | return NULL; | 675 | return NULL; |
649 | } | 676 | } |
650 | EXPORT_SYMBOL(__dev_get_by_index); | 677 | EXPORT_SYMBOL(__dev_get_by_index); |
651 | 678 | ||
679 | /** | ||
680 | * dev_get_by_index_rcu - find a device by its ifindex | ||
681 | * @net: the applicable net namespace | ||
682 | * @ifindex: index of device | ||
683 | * | ||
684 | * Search for an interface by index. Returns %NULL if the device | ||
685 | * is not found or a pointer to the device. The device has not | ||
686 | * had its reference counter increased so the caller must be careful | ||
687 | * about locking. The caller must hold RCU lock. | ||
688 | */ | ||
689 | |||
690 | struct net_device *dev_get_by_index_rcu(struct net *net, int ifindex) | ||
691 | { | ||
692 | struct hlist_node *p; | ||
693 | struct net_device *dev; | ||
694 | struct hlist_head *head = dev_index_hash(net, ifindex); | ||
695 | |||
696 | hlist_for_each_entry_rcu(dev, p, head, index_hlist) | ||
697 | if (dev->ifindex == ifindex) | ||
698 | return dev; | ||
699 | |||
700 | return NULL; | ||
701 | } | ||
702 | EXPORT_SYMBOL(dev_get_by_index_rcu); | ||
703 | |||
652 | 704 | ||
653 | /** | 705 | /** |
654 | * dev_get_by_index - find a device by its ifindex | 706 | * dev_get_by_index - find a device by its ifindex |
@@ -665,11 +717,11 @@ struct net_device *dev_get_by_index(struct net *net, int ifindex) | |||
665 | { | 717 | { |
666 | struct net_device *dev; | 718 | struct net_device *dev; |
667 | 719 | ||
668 | read_lock(&dev_base_lock); | 720 | rcu_read_lock(); |
669 | dev = __dev_get_by_index(net, ifindex); | 721 | dev = dev_get_by_index_rcu(net, ifindex); |
670 | if (dev) | 722 | if (dev) |
671 | dev_hold(dev); | 723 | dev_hold(dev); |
672 | read_unlock(&dev_base_lock); | 724 | rcu_read_unlock(); |
673 | return dev; | 725 | return dev; |
674 | } | 726 | } |
675 | EXPORT_SYMBOL(dev_get_by_index); | 727 | EXPORT_SYMBOL(dev_get_by_index); |
@@ -748,15 +800,15 @@ struct net_device *dev_get_by_flags(struct net *net, unsigned short if_flags, | |||
748 | struct net_device *dev, *ret; | 800 | struct net_device *dev, *ret; |
749 | 801 | ||
750 | ret = NULL; | 802 | ret = NULL; |
751 | read_lock(&dev_base_lock); | 803 | rcu_read_lock(); |
752 | for_each_netdev(net, dev) { | 804 | for_each_netdev_rcu(net, dev) { |
753 | if (((dev->flags ^ if_flags) & mask) == 0) { | 805 | if (((dev->flags ^ if_flags) & mask) == 0) { |
754 | dev_hold(dev); | 806 | dev_hold(dev); |
755 | ret = dev; | 807 | ret = dev; |
756 | break; | 808 | break; |
757 | } | 809 | } |
758 | } | 810 | } |
759 | read_unlock(&dev_base_lock); | 811 | rcu_read_unlock(); |
760 | return ret; | 812 | return ret; |
761 | } | 813 | } |
762 | EXPORT_SYMBOL(dev_get_by_flags); | 814 | EXPORT_SYMBOL(dev_get_by_flags); |
@@ -935,7 +987,12 @@ rollback: | |||
935 | 987 | ||
936 | write_lock_bh(&dev_base_lock); | 988 | write_lock_bh(&dev_base_lock); |
937 | hlist_del(&dev->name_hlist); | 989 | hlist_del(&dev->name_hlist); |
938 | hlist_add_head(&dev->name_hlist, dev_name_hash(net, dev->name)); | 990 | write_unlock_bh(&dev_base_lock); |
991 | |||
992 | synchronize_rcu(); | ||
993 | |||
994 | write_lock_bh(&dev_base_lock); | ||
995 | hlist_add_head_rcu(&dev->name_hlist, dev_name_hash(net, dev->name)); | ||
939 | write_unlock_bh(&dev_base_lock); | 996 | write_unlock_bh(&dev_base_lock); |
940 | 997 | ||
941 | ret = call_netdevice_notifiers(NETDEV_CHANGENAME, dev); | 998 | ret = call_netdevice_notifiers(NETDEV_CHANGENAME, dev); |
@@ -1038,9 +1095,9 @@ void dev_load(struct net *net, const char *name) | |||
1038 | { | 1095 | { |
1039 | struct net_device *dev; | 1096 | struct net_device *dev; |
1040 | 1097 | ||
1041 | read_lock(&dev_base_lock); | 1098 | rcu_read_lock(); |
1042 | dev = __dev_get_by_name(net, name); | 1099 | dev = dev_get_by_name_rcu(net, name); |
1043 | read_unlock(&dev_base_lock); | 1100 | rcu_read_unlock(); |
1044 | 1101 | ||
1045 | if (!dev && capable(CAP_NET_ADMIN)) | 1102 | if (!dev && capable(CAP_NET_ADMIN)) |
1046 | request_module("%s", name); | 1103 | request_module("%s", name); |
@@ -1701,7 +1758,7 @@ int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev, | |||
1701 | struct netdev_queue *txq) | 1758 | struct netdev_queue *txq) |
1702 | { | 1759 | { |
1703 | const struct net_device_ops *ops = dev->netdev_ops; | 1760 | const struct net_device_ops *ops = dev->netdev_ops; |
1704 | int rc; | 1761 | int rc = NETDEV_TX_OK; |
1705 | 1762 | ||
1706 | if (likely(!skb->next)) { | 1763 | if (likely(!skb->next)) { |
1707 | if (!list_empty(&ptype_all)) | 1764 | if (!list_empty(&ptype_all)) |
@@ -1749,6 +1806,8 @@ gso: | |||
1749 | nskb->next = NULL; | 1806 | nskb->next = NULL; |
1750 | rc = ops->ndo_start_xmit(nskb, dev); | 1807 | rc = ops->ndo_start_xmit(nskb, dev); |
1751 | if (unlikely(rc != NETDEV_TX_OK)) { | 1808 | if (unlikely(rc != NETDEV_TX_OK)) { |
1809 | if (rc & ~NETDEV_TX_MASK) | ||
1810 | goto out_kfree_gso_skb; | ||
1752 | nskb->next = skb->next; | 1811 | nskb->next = skb->next; |
1753 | skb->next = nskb; | 1812 | skb->next = nskb; |
1754 | return rc; | 1813 | return rc; |
@@ -1758,11 +1817,12 @@ gso: | |||
1758 | return NETDEV_TX_BUSY; | 1817 | return NETDEV_TX_BUSY; |
1759 | } while (skb->next); | 1818 | } while (skb->next); |
1760 | 1819 | ||
1761 | skb->destructor = DEV_GSO_CB(skb)->destructor; | 1820 | out_kfree_gso_skb: |
1762 | 1821 | if (likely(skb->next == NULL)) | |
1822 | skb->destructor = DEV_GSO_CB(skb)->destructor; | ||
1763 | out_kfree_skb: | 1823 | out_kfree_skb: |
1764 | kfree_skb(skb); | 1824 | kfree_skb(skb); |
1765 | return NETDEV_TX_OK; | 1825 | return rc; |
1766 | } | 1826 | } |
1767 | 1827 | ||
1768 | static u32 skb_tx_hashrnd; | 1828 | static u32 skb_tx_hashrnd; |
@@ -1789,16 +1849,43 @@ u16 skb_tx_hash(const struct net_device *dev, const struct sk_buff *skb) | |||
1789 | } | 1849 | } |
1790 | EXPORT_SYMBOL(skb_tx_hash); | 1850 | EXPORT_SYMBOL(skb_tx_hash); |
1791 | 1851 | ||
1852 | static inline u16 dev_cap_txqueue(struct net_device *dev, u16 queue_index) | ||
1853 | { | ||
1854 | if (unlikely(queue_index >= dev->real_num_tx_queues)) { | ||
1855 | if (net_ratelimit()) { | ||
1856 | WARN(1, "%s selects TX queue %d, but " | ||
1857 | "real number of TX queues is %d\n", | ||
1858 | dev->name, queue_index, | ||
1859 | dev->real_num_tx_queues); | ||
1860 | } | ||
1861 | return 0; | ||
1862 | } | ||
1863 | return queue_index; | ||
1864 | } | ||
1865 | |||
1792 | static struct netdev_queue *dev_pick_tx(struct net_device *dev, | 1866 | static struct netdev_queue *dev_pick_tx(struct net_device *dev, |
1793 | struct sk_buff *skb) | 1867 | struct sk_buff *skb) |
1794 | { | 1868 | { |
1795 | const struct net_device_ops *ops = dev->netdev_ops; | 1869 | u16 queue_index; |
1796 | u16 queue_index = 0; | 1870 | struct sock *sk = skb->sk; |
1871 | |||
1872 | if (sk_tx_queue_recorded(sk)) { | ||
1873 | queue_index = sk_tx_queue_get(sk); | ||
1874 | } else { | ||
1875 | const struct net_device_ops *ops = dev->netdev_ops; | ||
1876 | |||
1877 | if (ops->ndo_select_queue) { | ||
1878 | queue_index = ops->ndo_select_queue(dev, skb); | ||
1879 | queue_index = dev_cap_txqueue(dev, queue_index); | ||
1880 | } else { | ||
1881 | queue_index = 0; | ||
1882 | if (dev->real_num_tx_queues > 1) | ||
1883 | queue_index = skb_tx_hash(dev, skb); | ||
1797 | 1884 | ||
1798 | if (ops->ndo_select_queue) | 1885 | if (sk && sk->sk_dst_cache) |
1799 | queue_index = ops->ndo_select_queue(dev, skb); | 1886 | sk_tx_queue_set(sk, queue_index); |
1800 | else if (dev->real_num_tx_queues > 1) | 1887 | } |
1801 | queue_index = skb_tx_hash(dev, skb); | 1888 | } |
1802 | 1889 | ||
1803 | skb_set_queue_mapping(skb, queue_index); | 1890 | skb_set_queue_mapping(skb, queue_index); |
1804 | return netdev_get_tx_queue(dev, queue_index); | 1891 | return netdev_get_tx_queue(dev, queue_index); |
@@ -1935,8 +2022,8 @@ gso: | |||
1935 | HARD_TX_LOCK(dev, txq, cpu); | 2022 | HARD_TX_LOCK(dev, txq, cpu); |
1936 | 2023 | ||
1937 | if (!netif_tx_queue_stopped(txq)) { | 2024 | if (!netif_tx_queue_stopped(txq)) { |
1938 | rc = NET_XMIT_SUCCESS; | 2025 | rc = dev_hard_start_xmit(skb, dev, txq); |
1939 | if (!dev_hard_start_xmit(skb, dev, txq)) { | 2026 | if (dev_xmit_complete(rc)) { |
1940 | HARD_TX_UNLOCK(dev, txq); | 2027 | HARD_TX_UNLOCK(dev, txq); |
1941 | goto out; | 2028 | goto out; |
1942 | } | 2029 | } |
@@ -2292,7 +2379,7 @@ int netif_receive_skb(struct sk_buff *skb) | |||
2292 | if (!skb->tstamp.tv64) | 2379 | if (!skb->tstamp.tv64) |
2293 | net_timestamp(skb); | 2380 | net_timestamp(skb); |
2294 | 2381 | ||
2295 | if (skb->vlan_tci && vlan_hwaccel_do_receive(skb)) | 2382 | if (vlan_tx_tag_present(skb) && vlan_hwaccel_do_receive(skb)) |
2296 | return NET_RX_SUCCESS; | 2383 | return NET_RX_SUCCESS; |
2297 | 2384 | ||
2298 | /* if we've gotten here through NAPI, check netpoll */ | 2385 | /* if we've gotten here through NAPI, check netpoll */ |
@@ -2440,7 +2527,7 @@ void napi_gro_flush(struct napi_struct *napi) | |||
2440 | } | 2527 | } |
2441 | EXPORT_SYMBOL(napi_gro_flush); | 2528 | EXPORT_SYMBOL(napi_gro_flush); |
2442 | 2529 | ||
2443 | int dev_gro_receive(struct napi_struct *napi, struct sk_buff *skb) | 2530 | enum gro_result dev_gro_receive(struct napi_struct *napi, struct sk_buff *skb) |
2444 | { | 2531 | { |
2445 | struct sk_buff **pp = NULL; | 2532 | struct sk_buff **pp = NULL; |
2446 | struct packet_type *ptype; | 2533 | struct packet_type *ptype; |
@@ -2448,7 +2535,7 @@ int dev_gro_receive(struct napi_struct *napi, struct sk_buff *skb) | |||
2448 | struct list_head *head = &ptype_base[ntohs(type) & PTYPE_HASH_MASK]; | 2535 | struct list_head *head = &ptype_base[ntohs(type) & PTYPE_HASH_MASK]; |
2449 | int same_flow; | 2536 | int same_flow; |
2450 | int mac_len; | 2537 | int mac_len; |
2451 | int ret; | 2538 | enum gro_result ret; |
2452 | 2539 | ||
2453 | if (!(skb->dev->features & NETIF_F_GRO)) | 2540 | if (!(skb->dev->features & NETIF_F_GRO)) |
2454 | goto normal; | 2541 | goto normal; |
@@ -2532,7 +2619,8 @@ normal: | |||
2532 | } | 2619 | } |
2533 | EXPORT_SYMBOL(dev_gro_receive); | 2620 | EXPORT_SYMBOL(dev_gro_receive); |
2534 | 2621 | ||
2535 | static int __napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb) | 2622 | static gro_result_t |
2623 | __napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb) | ||
2536 | { | 2624 | { |
2537 | struct sk_buff *p; | 2625 | struct sk_buff *p; |
2538 | 2626 | ||
@@ -2549,24 +2637,25 @@ static int __napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb) | |||
2549 | return dev_gro_receive(napi, skb); | 2637 | return dev_gro_receive(napi, skb); |
2550 | } | 2638 | } |
2551 | 2639 | ||
2552 | int napi_skb_finish(int ret, struct sk_buff *skb) | 2640 | gro_result_t napi_skb_finish(gro_result_t ret, struct sk_buff *skb) |
2553 | { | 2641 | { |
2554 | int err = NET_RX_SUCCESS; | ||
2555 | |||
2556 | switch (ret) { | 2642 | switch (ret) { |
2557 | case GRO_NORMAL: | 2643 | case GRO_NORMAL: |
2558 | return netif_receive_skb(skb); | 2644 | if (netif_receive_skb(skb)) |
2645 | ret = GRO_DROP; | ||
2646 | break; | ||
2559 | 2647 | ||
2560 | case GRO_DROP: | 2648 | case GRO_DROP: |
2561 | err = NET_RX_DROP; | ||
2562 | /* fall through */ | ||
2563 | |||
2564 | case GRO_MERGED_FREE: | 2649 | case GRO_MERGED_FREE: |
2565 | kfree_skb(skb); | 2650 | kfree_skb(skb); |
2566 | break; | 2651 | break; |
2652 | |||
2653 | case GRO_HELD: | ||
2654 | case GRO_MERGED: | ||
2655 | break; | ||
2567 | } | 2656 | } |
2568 | 2657 | ||
2569 | return err; | 2658 | return ret; |
2570 | } | 2659 | } |
2571 | EXPORT_SYMBOL(napi_skb_finish); | 2660 | EXPORT_SYMBOL(napi_skb_finish); |
2572 | 2661 | ||
@@ -2586,7 +2675,7 @@ void skb_gro_reset_offset(struct sk_buff *skb) | |||
2586 | } | 2675 | } |
2587 | EXPORT_SYMBOL(skb_gro_reset_offset); | 2676 | EXPORT_SYMBOL(skb_gro_reset_offset); |
2588 | 2677 | ||
2589 | int napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb) | 2678 | gro_result_t napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb) |
2590 | { | 2679 | { |
2591 | skb_gro_reset_offset(skb); | 2680 | skb_gro_reset_offset(skb); |
2592 | 2681 | ||
@@ -2605,49 +2694,41 @@ EXPORT_SYMBOL(napi_reuse_skb); | |||
2605 | 2694 | ||
2606 | struct sk_buff *napi_get_frags(struct napi_struct *napi) | 2695 | struct sk_buff *napi_get_frags(struct napi_struct *napi) |
2607 | { | 2696 | { |
2608 | struct net_device *dev = napi->dev; | ||
2609 | struct sk_buff *skb = napi->skb; | 2697 | struct sk_buff *skb = napi->skb; |
2610 | 2698 | ||
2611 | if (!skb) { | 2699 | if (!skb) { |
2612 | skb = netdev_alloc_skb(dev, GRO_MAX_HEAD + NET_IP_ALIGN); | 2700 | skb = netdev_alloc_skb_ip_align(napi->dev, GRO_MAX_HEAD); |
2613 | if (!skb) | 2701 | if (skb) |
2614 | goto out; | 2702 | napi->skb = skb; |
2615 | |||
2616 | skb_reserve(skb, NET_IP_ALIGN); | ||
2617 | |||
2618 | napi->skb = skb; | ||
2619 | } | 2703 | } |
2620 | |||
2621 | out: | ||
2622 | return skb; | 2704 | return skb; |
2623 | } | 2705 | } |
2624 | EXPORT_SYMBOL(napi_get_frags); | 2706 | EXPORT_SYMBOL(napi_get_frags); |
2625 | 2707 | ||
2626 | int napi_frags_finish(struct napi_struct *napi, struct sk_buff *skb, int ret) | 2708 | gro_result_t napi_frags_finish(struct napi_struct *napi, struct sk_buff *skb, |
2709 | gro_result_t ret) | ||
2627 | { | 2710 | { |
2628 | int err = NET_RX_SUCCESS; | ||
2629 | |||
2630 | switch (ret) { | 2711 | switch (ret) { |
2631 | case GRO_NORMAL: | 2712 | case GRO_NORMAL: |
2632 | case GRO_HELD: | 2713 | case GRO_HELD: |
2633 | skb->protocol = eth_type_trans(skb, napi->dev); | 2714 | skb->protocol = eth_type_trans(skb, napi->dev); |
2634 | 2715 | ||
2635 | if (ret == GRO_NORMAL) | 2716 | if (ret == GRO_HELD) |
2636 | return netif_receive_skb(skb); | 2717 | skb_gro_pull(skb, -ETH_HLEN); |
2637 | 2718 | else if (netif_receive_skb(skb)) | |
2638 | skb_gro_pull(skb, -ETH_HLEN); | 2719 | ret = GRO_DROP; |
2639 | break; | 2720 | break; |
2640 | 2721 | ||
2641 | case GRO_DROP: | 2722 | case GRO_DROP: |
2642 | err = NET_RX_DROP; | ||
2643 | /* fall through */ | ||
2644 | |||
2645 | case GRO_MERGED_FREE: | 2723 | case GRO_MERGED_FREE: |
2646 | napi_reuse_skb(napi, skb); | 2724 | napi_reuse_skb(napi, skb); |
2647 | break; | 2725 | break; |
2726 | |||
2727 | case GRO_MERGED: | ||
2728 | break; | ||
2648 | } | 2729 | } |
2649 | 2730 | ||
2650 | return err; | 2731 | return ret; |
2651 | } | 2732 | } |
2652 | EXPORT_SYMBOL(napi_frags_finish); | 2733 | EXPORT_SYMBOL(napi_frags_finish); |
2653 | 2734 | ||
@@ -2688,12 +2769,12 @@ out: | |||
2688 | } | 2769 | } |
2689 | EXPORT_SYMBOL(napi_frags_skb); | 2770 | EXPORT_SYMBOL(napi_frags_skb); |
2690 | 2771 | ||
2691 | int napi_gro_frags(struct napi_struct *napi) | 2772 | gro_result_t napi_gro_frags(struct napi_struct *napi) |
2692 | { | 2773 | { |
2693 | struct sk_buff *skb = napi_frags_skb(napi); | 2774 | struct sk_buff *skb = napi_frags_skb(napi); |
2694 | 2775 | ||
2695 | if (!skb) | 2776 | if (!skb) |
2696 | return NET_RX_DROP; | 2777 | return GRO_DROP; |
2697 | 2778 | ||
2698 | return napi_frags_finish(napi, skb, __napi_gro_receive(napi, skb)); | 2779 | return napi_frags_finish(napi, skb, __napi_gro_receive(napi, skb)); |
2699 | } | 2780 | } |
@@ -2938,15 +3019,15 @@ static int dev_ifname(struct net *net, struct ifreq __user *arg) | |||
2938 | if (copy_from_user(&ifr, arg, sizeof(struct ifreq))) | 3019 | if (copy_from_user(&ifr, arg, sizeof(struct ifreq))) |
2939 | return -EFAULT; | 3020 | return -EFAULT; |
2940 | 3021 | ||
2941 | read_lock(&dev_base_lock); | 3022 | rcu_read_lock(); |
2942 | dev = __dev_get_by_index(net, ifr.ifr_ifindex); | 3023 | dev = dev_get_by_index_rcu(net, ifr.ifr_ifindex); |
2943 | if (!dev) { | 3024 | if (!dev) { |
2944 | read_unlock(&dev_base_lock); | 3025 | rcu_read_unlock(); |
2945 | return -ENODEV; | 3026 | return -ENODEV; |
2946 | } | 3027 | } |
2947 | 3028 | ||
2948 | strcpy(ifr.ifr_name, dev->name); | 3029 | strcpy(ifr.ifr_name, dev->name); |
2949 | read_unlock(&dev_base_lock); | 3030 | rcu_read_unlock(); |
2950 | 3031 | ||
2951 | if (copy_to_user(arg, &ifr, sizeof(struct ifreq))) | 3032 | if (copy_to_user(arg, &ifr, sizeof(struct ifreq))) |
2952 | return -EFAULT; | 3033 | return -EFAULT; |
@@ -3016,18 +3097,18 @@ static int dev_ifconf(struct net *net, char __user *arg) | |||
3016 | * in detail. | 3097 | * in detail. |
3017 | */ | 3098 | */ |
3018 | void *dev_seq_start(struct seq_file *seq, loff_t *pos) | 3099 | void *dev_seq_start(struct seq_file *seq, loff_t *pos) |
3019 | __acquires(dev_base_lock) | 3100 | __acquires(RCU) |
3020 | { | 3101 | { |
3021 | struct net *net = seq_file_net(seq); | 3102 | struct net *net = seq_file_net(seq); |
3022 | loff_t off; | 3103 | loff_t off; |
3023 | struct net_device *dev; | 3104 | struct net_device *dev; |
3024 | 3105 | ||
3025 | read_lock(&dev_base_lock); | 3106 | rcu_read_lock(); |
3026 | if (!*pos) | 3107 | if (!*pos) |
3027 | return SEQ_START_TOKEN; | 3108 | return SEQ_START_TOKEN; |
3028 | 3109 | ||
3029 | off = 1; | 3110 | off = 1; |
3030 | for_each_netdev(net, dev) | 3111 | for_each_netdev_rcu(net, dev) |
3031 | if (off++ == *pos) | 3112 | if (off++ == *pos) |
3032 | return dev; | 3113 | return dev; |
3033 | 3114 | ||
@@ -3036,16 +3117,18 @@ void *dev_seq_start(struct seq_file *seq, loff_t *pos) | |||
3036 | 3117 | ||
3037 | void *dev_seq_next(struct seq_file *seq, void *v, loff_t *pos) | 3118 | void *dev_seq_next(struct seq_file *seq, void *v, loff_t *pos) |
3038 | { | 3119 | { |
3039 | struct net *net = seq_file_net(seq); | 3120 | struct net_device *dev = (v == SEQ_START_TOKEN) ? |
3121 | first_net_device(seq_file_net(seq)) : | ||
3122 | next_net_device((struct net_device *)v); | ||
3123 | |||
3040 | ++*pos; | 3124 | ++*pos; |
3041 | return v == SEQ_START_TOKEN ? | 3125 | return rcu_dereference(dev); |
3042 | first_net_device(net) : next_net_device((struct net_device *)v); | ||
3043 | } | 3126 | } |
3044 | 3127 | ||
3045 | void dev_seq_stop(struct seq_file *seq, void *v) | 3128 | void dev_seq_stop(struct seq_file *seq, void *v) |
3046 | __releases(dev_base_lock) | 3129 | __releases(RCU) |
3047 | { | 3130 | { |
3048 | read_unlock(&dev_base_lock); | 3131 | rcu_read_unlock(); |
3049 | } | 3132 | } |
3050 | 3133 | ||
3051 | static void dev_seq_printf_stats(struct seq_file *seq, struct net_device *dev) | 3134 | static void dev_seq_printf_stats(struct seq_file *seq, struct net_device *dev) |
@@ -4254,12 +4337,12 @@ int dev_set_mac_address(struct net_device *dev, struct sockaddr *sa) | |||
4254 | EXPORT_SYMBOL(dev_set_mac_address); | 4337 | EXPORT_SYMBOL(dev_set_mac_address); |
4255 | 4338 | ||
4256 | /* | 4339 | /* |
4257 | * Perform the SIOCxIFxxx calls, inside read_lock(dev_base_lock) | 4340 | * Perform the SIOCxIFxxx calls, inside rcu_read_lock() |
4258 | */ | 4341 | */ |
4259 | static int dev_ifsioc_locked(struct net *net, struct ifreq *ifr, unsigned int cmd) | 4342 | static int dev_ifsioc_locked(struct net *net, struct ifreq *ifr, unsigned int cmd) |
4260 | { | 4343 | { |
4261 | int err; | 4344 | int err; |
4262 | struct net_device *dev = __dev_get_by_name(net, ifr->ifr_name); | 4345 | struct net_device *dev = dev_get_by_name_rcu(net, ifr->ifr_name); |
4263 | 4346 | ||
4264 | if (!dev) | 4347 | if (!dev) |
4265 | return -ENODEV; | 4348 | return -ENODEV; |
@@ -4491,9 +4574,9 @@ int dev_ioctl(struct net *net, unsigned int cmd, void __user *arg) | |||
4491 | case SIOCGIFINDEX: | 4574 | case SIOCGIFINDEX: |
4492 | case SIOCGIFTXQLEN: | 4575 | case SIOCGIFTXQLEN: |
4493 | dev_load(net, ifr.ifr_name); | 4576 | dev_load(net, ifr.ifr_name); |
4494 | read_lock(&dev_base_lock); | 4577 | rcu_read_lock(); |
4495 | ret = dev_ifsioc_locked(net, &ifr, cmd); | 4578 | ret = dev_ifsioc_locked(net, &ifr, cmd); |
4496 | read_unlock(&dev_base_lock); | 4579 | rcu_read_unlock(); |
4497 | if (!ret) { | 4580 | if (!ret) { |
4498 | if (colon) | 4581 | if (colon) |
4499 | *colon = ':'; | 4582 | *colon = ':'; |
@@ -4636,59 +4719,76 @@ static void net_set_todo(struct net_device *dev) | |||
4636 | list_add_tail(&dev->todo_list, &net_todo_list); | 4719 | list_add_tail(&dev->todo_list, &net_todo_list); |
4637 | } | 4720 | } |
4638 | 4721 | ||
4639 | static void rollback_registered(struct net_device *dev) | 4722 | static void rollback_registered_many(struct list_head *head) |
4640 | { | 4723 | { |
4724 | struct net_device *dev; | ||
4725 | |||
4641 | BUG_ON(dev_boot_phase); | 4726 | BUG_ON(dev_boot_phase); |
4642 | ASSERT_RTNL(); | 4727 | ASSERT_RTNL(); |
4643 | 4728 | ||
4644 | /* Some devices call without registering for initialization unwind. */ | 4729 | list_for_each_entry(dev, head, unreg_list) { |
4645 | if (dev->reg_state == NETREG_UNINITIALIZED) { | 4730 | /* Some devices call without registering |
4646 | printk(KERN_DEBUG "unregister_netdevice: device %s/%p never " | 4731 | * for initialization unwind. |
4647 | "was registered\n", dev->name, dev); | 4732 | */ |
4733 | if (dev->reg_state == NETREG_UNINITIALIZED) { | ||
4734 | pr_debug("unregister_netdevice: device %s/%p never " | ||
4735 | "was registered\n", dev->name, dev); | ||
4648 | 4736 | ||
4649 | WARN_ON(1); | 4737 | WARN_ON(1); |
4650 | return; | 4738 | return; |
4651 | } | 4739 | } |
4652 | 4740 | ||
4653 | BUG_ON(dev->reg_state != NETREG_REGISTERED); | 4741 | BUG_ON(dev->reg_state != NETREG_REGISTERED); |
4654 | 4742 | ||
4655 | /* If device is running, close it first. */ | 4743 | /* If device is running, close it first. */ |
4656 | dev_close(dev); | 4744 | dev_close(dev); |
4657 | 4745 | ||
4658 | /* And unlink it from device chain. */ | 4746 | /* And unlink it from device chain. */ |
4659 | unlist_netdevice(dev); | 4747 | unlist_netdevice(dev); |
4660 | 4748 | ||
4661 | dev->reg_state = NETREG_UNREGISTERING; | 4749 | dev->reg_state = NETREG_UNREGISTERING; |
4750 | } | ||
4662 | 4751 | ||
4663 | synchronize_net(); | 4752 | synchronize_net(); |
4664 | 4753 | ||
4665 | /* Shutdown queueing discipline. */ | 4754 | list_for_each_entry(dev, head, unreg_list) { |
4666 | dev_shutdown(dev); | 4755 | /* Shutdown queueing discipline. */ |
4756 | dev_shutdown(dev); | ||
4667 | 4757 | ||
4668 | 4758 | ||
4669 | /* Notify protocols, that we are about to destroy | 4759 | /* Notify protocols, that we are about to destroy |
4670 | this device. They should clean all the things. | 4760 | this device. They should clean all the things. |
4671 | */ | 4761 | */ |
4672 | call_netdevice_notifiers(NETDEV_UNREGISTER, dev); | 4762 | call_netdevice_notifiers(NETDEV_UNREGISTER, dev); |
4673 | 4763 | ||
4674 | /* | 4764 | /* |
4675 | * Flush the unicast and multicast chains | 4765 | * Flush the unicast and multicast chains |
4676 | */ | 4766 | */ |
4677 | dev_unicast_flush(dev); | 4767 | dev_unicast_flush(dev); |
4678 | dev_addr_discard(dev); | 4768 | dev_addr_discard(dev); |
4679 | 4769 | ||
4680 | if (dev->netdev_ops->ndo_uninit) | 4770 | if (dev->netdev_ops->ndo_uninit) |
4681 | dev->netdev_ops->ndo_uninit(dev); | 4771 | dev->netdev_ops->ndo_uninit(dev); |
4682 | 4772 | ||
4683 | /* Notifier chain MUST detach us from master device. */ | 4773 | /* Notifier chain MUST detach us from master device. */ |
4684 | WARN_ON(dev->master); | 4774 | WARN_ON(dev->master); |
4685 | 4775 | ||
4686 | /* Remove entries from kobject tree */ | 4776 | /* Remove entries from kobject tree */ |
4687 | netdev_unregister_kobject(dev); | 4777 | netdev_unregister_kobject(dev); |
4778 | } | ||
4688 | 4779 | ||
4689 | synchronize_net(); | 4780 | synchronize_net(); |
4690 | 4781 | ||
4691 | dev_put(dev); | 4782 | list_for_each_entry(dev, head, unreg_list) |
4783 | dev_put(dev); | ||
4784 | } | ||
4785 | |||
4786 | static void rollback_registered(struct net_device *dev) | ||
4787 | { | ||
4788 | LIST_HEAD(single); | ||
4789 | |||
4790 | list_add(&dev->unreg_list, &single); | ||
4791 | rollback_registered_many(&single); | ||
4692 | } | 4792 | } |
4693 | 4793 | ||
4694 | static void __netdev_init_queue_locks_one(struct net_device *dev, | 4794 | static void __netdev_init_queue_locks_one(struct net_device *dev, |
@@ -4837,6 +4937,12 @@ int register_netdevice(struct net_device *dev) | |||
4837 | dev->features |= NETIF_F_GSO; | 4937 | dev->features |= NETIF_F_GSO; |
4838 | 4938 | ||
4839 | netdev_initialize_kobject(dev); | 4939 | netdev_initialize_kobject(dev); |
4940 | |||
4941 | ret = call_netdevice_notifiers(NETDEV_POST_INIT, dev); | ||
4942 | ret = notifier_to_errno(ret); | ||
4943 | if (ret) | ||
4944 | goto err_uninit; | ||
4945 | |||
4840 | ret = netdev_register_kobject(dev); | 4946 | ret = netdev_register_kobject(dev); |
4841 | if (ret) | 4947 | if (ret) |
4842 | goto err_uninit; | 4948 | goto err_uninit; |
@@ -5174,6 +5280,7 @@ struct net_device *alloc_netdev_mq(int sizeof_priv, const char *name, | |||
5174 | netdev_init_queues(dev); | 5280 | netdev_init_queues(dev); |
5175 | 5281 | ||
5176 | INIT_LIST_HEAD(&dev->napi_list); | 5282 | INIT_LIST_HEAD(&dev->napi_list); |
5283 | INIT_LIST_HEAD(&dev->unreg_list); | ||
5177 | dev->priv_flags = IFF_XMIT_DST_RELEASE; | 5284 | dev->priv_flags = IFF_XMIT_DST_RELEASE; |
5178 | setup(dev); | 5285 | setup(dev); |
5179 | strcpy(dev->name, name); | 5286 | strcpy(dev->name, name); |
@@ -5238,25 +5345,48 @@ void synchronize_net(void) | |||
5238 | EXPORT_SYMBOL(synchronize_net); | 5345 | EXPORT_SYMBOL(synchronize_net); |
5239 | 5346 | ||
5240 | /** | 5347 | /** |
5241 | * unregister_netdevice - remove device from the kernel | 5348 | * unregister_netdevice_queue - remove device from the kernel |
5242 | * @dev: device | 5349 | * @dev: device |
5243 | * | 5350 | * @head: list |
5351 | |||
5244 | * This function shuts down a device interface and removes it | 5352 | * This function shuts down a device interface and removes it |
5245 | * from the kernel tables. | 5353 | * from the kernel tables. |
5354 | * If head not NULL, device is queued to be unregistered later. | ||
5246 | * | 5355 | * |
5247 | * Callers must hold the rtnl semaphore. You may want | 5356 | * Callers must hold the rtnl semaphore. You may want |
5248 | * unregister_netdev() instead of this. | 5357 | * unregister_netdev() instead of this. |
5249 | */ | 5358 | */ |
5250 | 5359 | ||
5251 | void unregister_netdevice(struct net_device *dev) | 5360 | void unregister_netdevice_queue(struct net_device *dev, struct list_head *head) |
5252 | { | 5361 | { |
5253 | ASSERT_RTNL(); | 5362 | ASSERT_RTNL(); |
5254 | 5363 | ||
5255 | rollback_registered(dev); | 5364 | if (head) { |
5256 | /* Finish processing unregister after unlock */ | 5365 | list_move_tail(&dev->unreg_list, head); |
5257 | net_set_todo(dev); | 5366 | } else { |
5367 | rollback_registered(dev); | ||
5368 | /* Finish processing unregister after unlock */ | ||
5369 | net_set_todo(dev); | ||
5370 | } | ||
5371 | } | ||
5372 | EXPORT_SYMBOL(unregister_netdevice_queue); | ||
5373 | |||
5374 | /** | ||
5375 | * unregister_netdevice_many - unregister many devices | ||
5376 | * @head: list of devices | ||
5377 | * | ||
5378 | */ | ||
5379 | void unregister_netdevice_many(struct list_head *head) | ||
5380 | { | ||
5381 | struct net_device *dev; | ||
5382 | |||
5383 | if (!list_empty(head)) { | ||
5384 | rollback_registered_many(head); | ||
5385 | list_for_each_entry(dev, head, unreg_list) | ||
5386 | net_set_todo(dev); | ||
5387 | } | ||
5258 | } | 5388 | } |
5259 | EXPORT_SYMBOL(unregister_netdevice); | 5389 | EXPORT_SYMBOL(unregister_netdevice_many); |
5260 | 5390 | ||
5261 | /** | 5391 | /** |
5262 | * unregister_netdev - remove device from the kernel | 5392 | * unregister_netdev - remove device from the kernel |
@@ -5484,7 +5614,7 @@ unsigned long netdev_increment_features(unsigned long all, unsigned long one, | |||
5484 | one |= NETIF_F_ALL_CSUM; | 5614 | one |= NETIF_F_ALL_CSUM; |
5485 | 5615 | ||
5486 | one |= all & NETIF_F_ONE_FOR_ALL; | 5616 | one |= all & NETIF_F_ONE_FOR_ALL; |
5487 | all &= one | NETIF_F_LLTX | NETIF_F_GSO; | 5617 | all &= one | NETIF_F_LLTX | NETIF_F_GSO | NETIF_F_UFO; |
5488 | all |= one & mask & NETIF_F_ONE_FOR_ALL; | 5618 | all |= one & mask & NETIF_F_ONE_FOR_ALL; |
5489 | 5619 | ||
5490 | return all; | 5620 | return all; |
@@ -5583,7 +5713,7 @@ restart: | |||
5583 | 5713 | ||
5584 | /* Delete virtual devices */ | 5714 | /* Delete virtual devices */ |
5585 | if (dev->rtnl_link_ops && dev->rtnl_link_ops->dellink) { | 5715 | if (dev->rtnl_link_ops && dev->rtnl_link_ops->dellink) { |
5586 | dev->rtnl_link_ops->dellink(dev); | 5716 | dev->rtnl_link_ops->dellink(dev, NULL); |
5587 | goto restart; | 5717 | goto restart; |
5588 | } | 5718 | } |
5589 | 5719 | ||
diff --git a/net/core/drop_monitor.c b/net/core/drop_monitor.c index 0a113f26bc9..b8e9d3a8688 100644 --- a/net/core/drop_monitor.c +++ b/net/core/drop_monitor.c | |||
@@ -41,7 +41,7 @@ static void send_dm_alert(struct work_struct *unused); | |||
41 | * netlink alerts | 41 | * netlink alerts |
42 | */ | 42 | */ |
43 | static int trace_state = TRACE_OFF; | 43 | static int trace_state = TRACE_OFF; |
44 | static spinlock_t trace_state_lock = SPIN_LOCK_UNLOCKED; | 44 | static DEFINE_SPINLOCK(trace_state_lock); |
45 | 45 | ||
46 | struct per_cpu_dm_data { | 46 | struct per_cpu_dm_data { |
47 | struct work_struct dm_alert_work; | 47 | struct work_struct dm_alert_work; |
diff --git a/net/core/ethtool.c b/net/core/ethtool.c index 4c12ddb5f5e..d8aee584e8d 100644 --- a/net/core/ethtool.c +++ b/net/core/ethtool.c | |||
@@ -198,13 +198,6 @@ static int ethtool_get_drvinfo(struct net_device *dev, void __user *useraddr) | |||
198 | rc = ops->get_sset_count(dev, ETH_SS_PRIV_FLAGS); | 198 | rc = ops->get_sset_count(dev, ETH_SS_PRIV_FLAGS); |
199 | if (rc >= 0) | 199 | if (rc >= 0) |
200 | info.n_priv_flags = rc; | 200 | info.n_priv_flags = rc; |
201 | } else { | ||
202 | /* code path for obsolete hooks */ | ||
203 | |||
204 | if (ops->self_test_count) | ||
205 | info.testinfo_len = ops->self_test_count(dev); | ||
206 | if (ops->get_stats_count) | ||
207 | info.n_stats = ops->get_stats_count(dev); | ||
208 | } | 201 | } |
209 | if (ops->get_regs_len) | 202 | if (ops->get_regs_len) |
210 | info.regdump_len = ops->get_regs_len(dev); | 203 | info.regdump_len = ops->get_regs_len(dev); |
@@ -309,6 +302,26 @@ static int ethtool_get_regs(struct net_device *dev, char __user *useraddr) | |||
309 | return ret; | 302 | return ret; |
310 | } | 303 | } |
311 | 304 | ||
305 | static int ethtool_reset(struct net_device *dev, char __user *useraddr) | ||
306 | { | ||
307 | struct ethtool_value reset; | ||
308 | int ret; | ||
309 | |||
310 | if (!dev->ethtool_ops->reset) | ||
311 | return -EOPNOTSUPP; | ||
312 | |||
313 | if (copy_from_user(&reset, useraddr, sizeof(reset))) | ||
314 | return -EFAULT; | ||
315 | |||
316 | ret = dev->ethtool_ops->reset(dev, &reset.data); | ||
317 | if (ret) | ||
318 | return ret; | ||
319 | |||
320 | if (copy_to_user(useraddr, &reset, sizeof(reset))) | ||
321 | return -EFAULT; | ||
322 | return 0; | ||
323 | } | ||
324 | |||
312 | static int ethtool_get_wol(struct net_device *dev, char __user *useraddr) | 325 | static int ethtool_get_wol(struct net_device *dev, char __user *useraddr) |
313 | { | 326 | { |
314 | struct ethtool_wolinfo wol = { ETHTOOL_GWOL }; | 327 | struct ethtool_wolinfo wol = { ETHTOOL_GWOL }; |
@@ -684,16 +697,10 @@ static int ethtool_self_test(struct net_device *dev, char __user *useraddr) | |||
684 | u64 *data; | 697 | u64 *data; |
685 | int ret, test_len; | 698 | int ret, test_len; |
686 | 699 | ||
687 | if (!ops->self_test) | 700 | if (!ops->self_test || !ops->get_sset_count) |
688 | return -EOPNOTSUPP; | ||
689 | if (!ops->get_sset_count && !ops->self_test_count) | ||
690 | return -EOPNOTSUPP; | 701 | return -EOPNOTSUPP; |
691 | 702 | ||
692 | if (ops->get_sset_count) | 703 | test_len = ops->get_sset_count(dev, ETH_SS_TEST); |
693 | test_len = ops->get_sset_count(dev, ETH_SS_TEST); | ||
694 | else | ||
695 | /* code path for obsolete hook */ | ||
696 | test_len = ops->self_test_count(dev); | ||
697 | if (test_len < 0) | 704 | if (test_len < 0) |
698 | return test_len; | 705 | return test_len; |
699 | WARN_ON(test_len == 0); | 706 | WARN_ON(test_len == 0); |
@@ -728,36 +735,17 @@ static int ethtool_get_strings(struct net_device *dev, void __user *useraddr) | |||
728 | u8 *data; | 735 | u8 *data; |
729 | int ret; | 736 | int ret; |
730 | 737 | ||
731 | if (!ops->get_strings) | 738 | if (!ops->get_strings || !ops->get_sset_count) |
732 | return -EOPNOTSUPP; | 739 | return -EOPNOTSUPP; |
733 | 740 | ||
734 | if (copy_from_user(&gstrings, useraddr, sizeof(gstrings))) | 741 | if (copy_from_user(&gstrings, useraddr, sizeof(gstrings))) |
735 | return -EFAULT; | 742 | return -EFAULT; |
736 | 743 | ||
737 | if (ops->get_sset_count) { | 744 | ret = ops->get_sset_count(dev, gstrings.string_set); |
738 | ret = ops->get_sset_count(dev, gstrings.string_set); | 745 | if (ret < 0) |
739 | if (ret < 0) | 746 | return ret; |
740 | return ret; | 747 | |
741 | 748 | gstrings.len = ret; | |
742 | gstrings.len = ret; | ||
743 | } else { | ||
744 | /* code path for obsolete hooks */ | ||
745 | |||
746 | switch (gstrings.string_set) { | ||
747 | case ETH_SS_TEST: | ||
748 | if (!ops->self_test_count) | ||
749 | return -EOPNOTSUPP; | ||
750 | gstrings.len = ops->self_test_count(dev); | ||
751 | break; | ||
752 | case ETH_SS_STATS: | ||
753 | if (!ops->get_stats_count) | ||
754 | return -EOPNOTSUPP; | ||
755 | gstrings.len = ops->get_stats_count(dev); | ||
756 | break; | ||
757 | default: | ||
758 | return -EINVAL; | ||
759 | } | ||
760 | } | ||
761 | 749 | ||
762 | data = kmalloc(gstrings.len * ETH_GSTRING_LEN, GFP_USER); | 750 | data = kmalloc(gstrings.len * ETH_GSTRING_LEN, GFP_USER); |
763 | if (!data) | 751 | if (!data) |
@@ -798,16 +786,10 @@ static int ethtool_get_stats(struct net_device *dev, void __user *useraddr) | |||
798 | u64 *data; | 786 | u64 *data; |
799 | int ret, n_stats; | 787 | int ret, n_stats; |
800 | 788 | ||
801 | if (!ops->get_ethtool_stats) | 789 | if (!ops->get_ethtool_stats || !ops->get_sset_count) |
802 | return -EOPNOTSUPP; | ||
803 | if (!ops->get_sset_count && !ops->get_stats_count) | ||
804 | return -EOPNOTSUPP; | 790 | return -EOPNOTSUPP; |
805 | 791 | ||
806 | if (ops->get_sset_count) | 792 | n_stats = ops->get_sset_count(dev, ETH_SS_STATS); |
807 | n_stats = ops->get_sset_count(dev, ETH_SS_STATS); | ||
808 | else | ||
809 | /* code path for obsolete hook */ | ||
810 | n_stats = ops->get_stats_count(dev); | ||
811 | if (n_stats < 0) | 793 | if (n_stats < 0) |
812 | return n_stats; | 794 | return n_stats; |
813 | WARN_ON(n_stats == 0); | 795 | WARN_ON(n_stats == 0); |
@@ -1127,6 +1109,9 @@ int dev_ethtool(struct net *net, struct ifreq *ifr) | |||
1127 | case ETHTOOL_FLASHDEV: | 1109 | case ETHTOOL_FLASHDEV: |
1128 | rc = ethtool_flash_device(dev, useraddr); | 1110 | rc = ethtool_flash_device(dev, useraddr); |
1129 | break; | 1111 | break; |
1112 | case ETHTOOL_RESET: | ||
1113 | rc = ethtool_reset(dev, useraddr); | ||
1114 | break; | ||
1130 | default: | 1115 | default: |
1131 | rc = -EOPNOTSUPP; | 1116 | rc = -EOPNOTSUPP; |
1132 | } | 1117 | } |
diff --git a/net/core/filter.c b/net/core/filter.c index d1d779ca096..08db7b9143a 100644 --- a/net/core/filter.c +++ b/net/core/filter.c | |||
@@ -303,6 +303,12 @@ load_b: | |||
303 | case SKF_AD_IFINDEX: | 303 | case SKF_AD_IFINDEX: |
304 | A = skb->dev->ifindex; | 304 | A = skb->dev->ifindex; |
305 | continue; | 305 | continue; |
306 | case SKF_AD_MARK: | ||
307 | A = skb->mark; | ||
308 | continue; | ||
309 | case SKF_AD_QUEUE: | ||
310 | A = skb->queue_mapping; | ||
311 | continue; | ||
306 | case SKF_AD_NLATTR: { | 312 | case SKF_AD_NLATTR: { |
307 | struct nlattr *nla; | 313 | struct nlattr *nla; |
308 | 314 | ||
diff --git a/net/core/gen_stats.c b/net/core/gen_stats.c index 8569310268a..393b1d8618e 100644 --- a/net/core/gen_stats.c +++ b/net/core/gen_stats.c | |||
@@ -127,6 +127,7 @@ gnet_stats_copy_basic(struct gnet_dump *d, struct gnet_stats_basic_packed *b) | |||
127 | /** | 127 | /** |
128 | * gnet_stats_copy_rate_est - copy rate estimator statistics into statistics TLV | 128 | * gnet_stats_copy_rate_est - copy rate estimator statistics into statistics TLV |
129 | * @d: dumping handle | 129 | * @d: dumping handle |
130 | * @b: basic statistics | ||
130 | * @r: rate estimator statistics | 131 | * @r: rate estimator statistics |
131 | * | 132 | * |
132 | * Appends the rate estimator statistics to the top level TLV created by | 133 | * Appends the rate estimator statistics to the top level TLV created by |
@@ -136,8 +137,13 @@ gnet_stats_copy_basic(struct gnet_dump *d, struct gnet_stats_basic_packed *b) | |||
136 | * if the room in the socket buffer was not sufficient. | 137 | * if the room in the socket buffer was not sufficient. |
137 | */ | 138 | */ |
138 | int | 139 | int |
139 | gnet_stats_copy_rate_est(struct gnet_dump *d, struct gnet_stats_rate_est *r) | 140 | gnet_stats_copy_rate_est(struct gnet_dump *d, |
141 | const struct gnet_stats_basic_packed *b, | ||
142 | struct gnet_stats_rate_est *r) | ||
140 | { | 143 | { |
144 | if (b && !gen_estimator_active(b, r)) | ||
145 | return 0; | ||
146 | |||
141 | if (d->compat_tc_stats) { | 147 | if (d->compat_tc_stats) { |
142 | d->tc_stats.bps = r->bps; | 148 | d->tc_stats.bps = r->bps; |
143 | d->tc_stats.pps = r->pps; | 149 | d->tc_stats.pps = r->pps; |
diff --git a/net/core/net-sysfs.c b/net/core/net-sysfs.c index 427ded84122..157645c0da7 100644 --- a/net/core/net-sysfs.c +++ b/net/core/net-sysfs.c | |||
@@ -130,6 +130,48 @@ static ssize_t show_carrier(struct device *dev, | |||
130 | return -EINVAL; | 130 | return -EINVAL; |
131 | } | 131 | } |
132 | 132 | ||
133 | static ssize_t show_speed(struct device *dev, | ||
134 | struct device_attribute *attr, char *buf) | ||
135 | { | ||
136 | struct net_device *netdev = to_net_dev(dev); | ||
137 | int ret = -EINVAL; | ||
138 | |||
139 | if (!rtnl_trylock()) | ||
140 | return restart_syscall(); | ||
141 | |||
142 | if (netif_running(netdev) && | ||
143 | netdev->ethtool_ops && | ||
144 | netdev->ethtool_ops->get_settings) { | ||
145 | struct ethtool_cmd cmd = { ETHTOOL_GSET }; | ||
146 | |||
147 | if (!netdev->ethtool_ops->get_settings(netdev, &cmd)) | ||
148 | ret = sprintf(buf, fmt_dec, ethtool_cmd_speed(&cmd)); | ||
149 | } | ||
150 | rtnl_unlock(); | ||
151 | return ret; | ||
152 | } | ||
153 | |||
154 | static ssize_t show_duplex(struct device *dev, | ||
155 | struct device_attribute *attr, char *buf) | ||
156 | { | ||
157 | struct net_device *netdev = to_net_dev(dev); | ||
158 | int ret = -EINVAL; | ||
159 | |||
160 | if (!rtnl_trylock()) | ||
161 | return restart_syscall(); | ||
162 | |||
163 | if (netif_running(netdev) && | ||
164 | netdev->ethtool_ops && | ||
165 | netdev->ethtool_ops->get_settings) { | ||
166 | struct ethtool_cmd cmd = { ETHTOOL_GSET }; | ||
167 | |||
168 | if (!netdev->ethtool_ops->get_settings(netdev, &cmd)) | ||
169 | ret = sprintf(buf, "%s\n", cmd.duplex ? "full" : "half"); | ||
170 | } | ||
171 | rtnl_unlock(); | ||
172 | return ret; | ||
173 | } | ||
174 | |||
133 | static ssize_t show_dormant(struct device *dev, | 175 | static ssize_t show_dormant(struct device *dev, |
134 | struct device_attribute *attr, char *buf) | 176 | struct device_attribute *attr, char *buf) |
135 | { | 177 | { |
@@ -259,6 +301,8 @@ static struct device_attribute net_class_attributes[] = { | |||
259 | __ATTR(address, S_IRUGO, show_address, NULL), | 301 | __ATTR(address, S_IRUGO, show_address, NULL), |
260 | __ATTR(broadcast, S_IRUGO, show_broadcast, NULL), | 302 | __ATTR(broadcast, S_IRUGO, show_broadcast, NULL), |
261 | __ATTR(carrier, S_IRUGO, show_carrier, NULL), | 303 | __ATTR(carrier, S_IRUGO, show_carrier, NULL), |
304 | __ATTR(speed, S_IRUGO, show_speed, NULL), | ||
305 | __ATTR(duplex, S_IRUGO, show_duplex, NULL), | ||
262 | __ATTR(dormant, S_IRUGO, show_dormant, NULL), | 306 | __ATTR(dormant, S_IRUGO, show_dormant, NULL), |
263 | __ATTR(operstate, S_IRUGO, show_operstate, NULL), | 307 | __ATTR(operstate, S_IRUGO, show_operstate, NULL), |
264 | __ATTR(mtu, S_IRUGO | S_IWUSR, show_mtu, store_mtu), | 308 | __ATTR(mtu, S_IRUGO | S_IWUSR, show_mtu, store_mtu), |
@@ -500,12 +544,19 @@ int netdev_register_kobject(struct net_device *net) | |||
500 | dev_set_name(dev, "%s", net->name); | 544 | dev_set_name(dev, "%s", net->name); |
501 | 545 | ||
502 | #ifdef CONFIG_SYSFS | 546 | #ifdef CONFIG_SYSFS |
503 | *groups++ = &netstat_group; | 547 | /* Allow for a device specific group */ |
548 | if (*groups) | ||
549 | groups++; | ||
504 | 550 | ||
551 | *groups++ = &netstat_group; | ||
505 | #ifdef CONFIG_WIRELESS_EXT_SYSFS | 552 | #ifdef CONFIG_WIRELESS_EXT_SYSFS |
506 | if (net->wireless_handlers || net->ieee80211_ptr) | 553 | if (net->ieee80211_ptr) |
554 | *groups++ = &wireless_group; | ||
555 | #ifdef CONFIG_WIRELESS_EXT | ||
556 | else if (net->wireless_handlers) | ||
507 | *groups++ = &wireless_group; | 557 | *groups++ = &wireless_group; |
508 | #endif | 558 | #endif |
559 | #endif | ||
509 | #endif /* CONFIG_SYSFS */ | 560 | #endif /* CONFIG_SYSFS */ |
510 | 561 | ||
511 | if (dev_net(net) != &init_net) | 562 | if (dev_net(net) != &init_net) |
diff --git a/net/core/pktgen.c b/net/core/pktgen.c index 6eb8d47cbf3..d38470a3279 100644 --- a/net/core/pktgen.c +++ b/net/core/pktgen.c | |||
@@ -340,6 +340,7 @@ struct pktgen_dev { | |||
340 | __u16 cur_udp_src; | 340 | __u16 cur_udp_src; |
341 | __u16 cur_queue_map; | 341 | __u16 cur_queue_map; |
342 | __u32 cur_pkt_size; | 342 | __u32 cur_pkt_size; |
343 | __u32 last_pkt_size; | ||
343 | 344 | ||
344 | __u8 hh[14]; | 345 | __u8 hh[14]; |
345 | /* = { | 346 | /* = { |
@@ -3434,7 +3435,7 @@ static void pktgen_xmit(struct pktgen_dev *pkt_dev) | |||
3434 | pkt_dev->clone_count--; /* back out increment, OOM */ | 3435 | pkt_dev->clone_count--; /* back out increment, OOM */ |
3435 | return; | 3436 | return; |
3436 | } | 3437 | } |
3437 | 3438 | pkt_dev->last_pkt_size = pkt_dev->skb->len; | |
3438 | pkt_dev->allocated_skbs++; | 3439 | pkt_dev->allocated_skbs++; |
3439 | pkt_dev->clone_count = 0; /* reset counter */ | 3440 | pkt_dev->clone_count = 0; /* reset counter */ |
3440 | } | 3441 | } |
@@ -3446,12 +3447,14 @@ static void pktgen_xmit(struct pktgen_dev *pkt_dev) | |||
3446 | txq = netdev_get_tx_queue(odev, queue_map); | 3447 | txq = netdev_get_tx_queue(odev, queue_map); |
3447 | 3448 | ||
3448 | __netif_tx_lock_bh(txq); | 3449 | __netif_tx_lock_bh(txq); |
3449 | atomic_inc(&(pkt_dev->skb->users)); | ||
3450 | 3450 | ||
3451 | if (unlikely(netif_tx_queue_stopped(txq) || netif_tx_queue_frozen(txq))) | 3451 | if (unlikely(netif_tx_queue_stopped(txq) || netif_tx_queue_frozen(txq))) { |
3452 | ret = NETDEV_TX_BUSY; | 3452 | ret = NETDEV_TX_BUSY; |
3453 | else | 3453 | pkt_dev->last_ok = 0; |
3454 | ret = (*xmit)(pkt_dev->skb, odev); | 3454 | goto unlock; |
3455 | } | ||
3456 | atomic_inc(&(pkt_dev->skb->users)); | ||
3457 | ret = (*xmit)(pkt_dev->skb, odev); | ||
3455 | 3458 | ||
3456 | switch (ret) { | 3459 | switch (ret) { |
3457 | case NETDEV_TX_OK: | 3460 | case NETDEV_TX_OK: |
@@ -3459,7 +3462,7 @@ static void pktgen_xmit(struct pktgen_dev *pkt_dev) | |||
3459 | pkt_dev->last_ok = 1; | 3462 | pkt_dev->last_ok = 1; |
3460 | pkt_dev->sofar++; | 3463 | pkt_dev->sofar++; |
3461 | pkt_dev->seq_num++; | 3464 | pkt_dev->seq_num++; |
3462 | pkt_dev->tx_bytes += pkt_dev->cur_pkt_size; | 3465 | pkt_dev->tx_bytes += pkt_dev->last_pkt_size; |
3463 | break; | 3466 | break; |
3464 | default: /* Drivers are not supposed to return other values! */ | 3467 | default: /* Drivers are not supposed to return other values! */ |
3465 | if (net_ratelimit()) | 3468 | if (net_ratelimit()) |
@@ -3473,6 +3476,7 @@ static void pktgen_xmit(struct pktgen_dev *pkt_dev) | |||
3473 | atomic_dec(&(pkt_dev->skb->users)); | 3476 | atomic_dec(&(pkt_dev->skb->users)); |
3474 | pkt_dev->last_ok = 0; | 3477 | pkt_dev->last_ok = 0; |
3475 | } | 3478 | } |
3479 | unlock: | ||
3476 | __netif_tx_unlock_bh(txq); | 3480 | __netif_tx_unlock_bh(txq); |
3477 | 3481 | ||
3478 | /* If pkt_dev->count is zero, then run forever */ | 3482 | /* If pkt_dev->count is zero, then run forever */ |
diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c index eb42873f2a3..33148a56819 100644 --- a/net/core/rtnetlink.c +++ b/net/core/rtnetlink.c | |||
@@ -38,7 +38,6 @@ | |||
38 | 38 | ||
39 | #include <asm/uaccess.h> | 39 | #include <asm/uaccess.h> |
40 | #include <asm/system.h> | 40 | #include <asm/system.h> |
41 | #include <asm/string.h> | ||
42 | 41 | ||
43 | #include <linux/inet.h> | 42 | #include <linux/inet.h> |
44 | #include <linux/netdevice.h> | 43 | #include <linux/netdevice.h> |
@@ -53,8 +52,7 @@ | |||
53 | #include <net/rtnetlink.h> | 52 | #include <net/rtnetlink.h> |
54 | #include <net/net_namespace.h> | 53 | #include <net/net_namespace.h> |
55 | 54 | ||
56 | struct rtnl_link | 55 | struct rtnl_link { |
57 | { | ||
58 | rtnl_doit_func doit; | 56 | rtnl_doit_func doit; |
59 | rtnl_dumpit_func dumpit; | 57 | rtnl_dumpit_func dumpit; |
60 | }; | 58 | }; |
@@ -65,6 +63,7 @@ void rtnl_lock(void) | |||
65 | { | 63 | { |
66 | mutex_lock(&rtnl_mutex); | 64 | mutex_lock(&rtnl_mutex); |
67 | } | 65 | } |
66 | EXPORT_SYMBOL(rtnl_lock); | ||
68 | 67 | ||
69 | void __rtnl_unlock(void) | 68 | void __rtnl_unlock(void) |
70 | { | 69 | { |
@@ -76,16 +75,19 @@ void rtnl_unlock(void) | |||
76 | /* This fellow will unlock it for us. */ | 75 | /* This fellow will unlock it for us. */ |
77 | netdev_run_todo(); | 76 | netdev_run_todo(); |
78 | } | 77 | } |
78 | EXPORT_SYMBOL(rtnl_unlock); | ||
79 | 79 | ||
80 | int rtnl_trylock(void) | 80 | int rtnl_trylock(void) |
81 | { | 81 | { |
82 | return mutex_trylock(&rtnl_mutex); | 82 | return mutex_trylock(&rtnl_mutex); |
83 | } | 83 | } |
84 | EXPORT_SYMBOL(rtnl_trylock); | ||
84 | 85 | ||
85 | int rtnl_is_locked(void) | 86 | int rtnl_is_locked(void) |
86 | { | 87 | { |
87 | return mutex_is_locked(&rtnl_mutex); | 88 | return mutex_is_locked(&rtnl_mutex); |
88 | } | 89 | } |
90 | EXPORT_SYMBOL(rtnl_is_locked); | ||
89 | 91 | ||
90 | static struct rtnl_link *rtnl_msg_handlers[NPROTO]; | 92 | static struct rtnl_link *rtnl_msg_handlers[NPROTO]; |
91 | 93 | ||
@@ -168,7 +170,6 @@ int __rtnl_register(int protocol, int msgtype, | |||
168 | 170 | ||
169 | return 0; | 171 | return 0; |
170 | } | 172 | } |
171 | |||
172 | EXPORT_SYMBOL_GPL(__rtnl_register); | 173 | EXPORT_SYMBOL_GPL(__rtnl_register); |
173 | 174 | ||
174 | /** | 175 | /** |
@@ -188,7 +189,6 @@ void rtnl_register(int protocol, int msgtype, | |||
188 | "protocol = %d, message type = %d\n", | 189 | "protocol = %d, message type = %d\n", |
189 | protocol, msgtype); | 190 | protocol, msgtype); |
190 | } | 191 | } |
191 | |||
192 | EXPORT_SYMBOL_GPL(rtnl_register); | 192 | EXPORT_SYMBOL_GPL(rtnl_register); |
193 | 193 | ||
194 | /** | 194 | /** |
@@ -213,7 +213,6 @@ int rtnl_unregister(int protocol, int msgtype) | |||
213 | 213 | ||
214 | return 0; | 214 | return 0; |
215 | } | 215 | } |
216 | |||
217 | EXPORT_SYMBOL_GPL(rtnl_unregister); | 216 | EXPORT_SYMBOL_GPL(rtnl_unregister); |
218 | 217 | ||
219 | /** | 218 | /** |
@@ -230,7 +229,6 @@ void rtnl_unregister_all(int protocol) | |||
230 | kfree(rtnl_msg_handlers[protocol]); | 229 | kfree(rtnl_msg_handlers[protocol]); |
231 | rtnl_msg_handlers[protocol] = NULL; | 230 | rtnl_msg_handlers[protocol] = NULL; |
232 | } | 231 | } |
233 | |||
234 | EXPORT_SYMBOL_GPL(rtnl_unregister_all); | 232 | EXPORT_SYMBOL_GPL(rtnl_unregister_all); |
235 | 233 | ||
236 | static LIST_HEAD(link_ops); | 234 | static LIST_HEAD(link_ops); |
@@ -248,12 +246,11 @@ static LIST_HEAD(link_ops); | |||
248 | int __rtnl_link_register(struct rtnl_link_ops *ops) | 246 | int __rtnl_link_register(struct rtnl_link_ops *ops) |
249 | { | 247 | { |
250 | if (!ops->dellink) | 248 | if (!ops->dellink) |
251 | ops->dellink = unregister_netdevice; | 249 | ops->dellink = unregister_netdevice_queue; |
252 | 250 | ||
253 | list_add_tail(&ops->list, &link_ops); | 251 | list_add_tail(&ops->list, &link_ops); |
254 | return 0; | 252 | return 0; |
255 | } | 253 | } |
256 | |||
257 | EXPORT_SYMBOL_GPL(__rtnl_link_register); | 254 | EXPORT_SYMBOL_GPL(__rtnl_link_register); |
258 | 255 | ||
259 | /** | 256 | /** |
@@ -271,19 +268,18 @@ int rtnl_link_register(struct rtnl_link_ops *ops) | |||
271 | rtnl_unlock(); | 268 | rtnl_unlock(); |
272 | return err; | 269 | return err; |
273 | } | 270 | } |
274 | |||
275 | EXPORT_SYMBOL_GPL(rtnl_link_register); | 271 | EXPORT_SYMBOL_GPL(rtnl_link_register); |
276 | 272 | ||
277 | static void __rtnl_kill_links(struct net *net, struct rtnl_link_ops *ops) | 273 | static void __rtnl_kill_links(struct net *net, struct rtnl_link_ops *ops) |
278 | { | 274 | { |
279 | struct net_device *dev; | 275 | struct net_device *dev; |
280 | restart: | 276 | LIST_HEAD(list_kill); |
277 | |||
281 | for_each_netdev(net, dev) { | 278 | for_each_netdev(net, dev) { |
282 | if (dev->rtnl_link_ops == ops) { | 279 | if (dev->rtnl_link_ops == ops) |
283 | ops->dellink(dev); | 280 | ops->dellink(dev, &list_kill); |
284 | goto restart; | ||
285 | } | ||
286 | } | 281 | } |
282 | unregister_netdevice_many(&list_kill); | ||
287 | } | 283 | } |
288 | 284 | ||
289 | void rtnl_kill_links(struct net *net, struct rtnl_link_ops *ops) | 285 | void rtnl_kill_links(struct net *net, struct rtnl_link_ops *ops) |
@@ -309,7 +305,6 @@ void __rtnl_link_unregister(struct rtnl_link_ops *ops) | |||
309 | } | 305 | } |
310 | list_del(&ops->list); | 306 | list_del(&ops->list); |
311 | } | 307 | } |
312 | |||
313 | EXPORT_SYMBOL_GPL(__rtnl_link_unregister); | 308 | EXPORT_SYMBOL_GPL(__rtnl_link_unregister); |
314 | 309 | ||
315 | /** | 310 | /** |
@@ -322,7 +317,6 @@ void rtnl_link_unregister(struct rtnl_link_ops *ops) | |||
322 | __rtnl_link_unregister(ops); | 317 | __rtnl_link_unregister(ops); |
323 | rtnl_unlock(); | 318 | rtnl_unlock(); |
324 | } | 319 | } |
325 | |||
326 | EXPORT_SYMBOL_GPL(rtnl_link_unregister); | 320 | EXPORT_SYMBOL_GPL(rtnl_link_unregister); |
327 | 321 | ||
328 | static const struct rtnl_link_ops *rtnl_link_ops_get(const char *kind) | 322 | static const struct rtnl_link_ops *rtnl_link_ops_get(const char *kind) |
@@ -427,12 +421,13 @@ void __rta_fill(struct sk_buff *skb, int attrtype, int attrlen, const void *data | |||
427 | struct rtattr *rta; | 421 | struct rtattr *rta; |
428 | int size = RTA_LENGTH(attrlen); | 422 | int size = RTA_LENGTH(attrlen); |
429 | 423 | ||
430 | rta = (struct rtattr*)skb_put(skb, RTA_ALIGN(size)); | 424 | rta = (struct rtattr *)skb_put(skb, RTA_ALIGN(size)); |
431 | rta->rta_type = attrtype; | 425 | rta->rta_type = attrtype; |
432 | rta->rta_len = size; | 426 | rta->rta_len = size; |
433 | memcpy(RTA_DATA(rta), data, attrlen); | 427 | memcpy(RTA_DATA(rta), data, attrlen); |
434 | memset(RTA_DATA(rta) + attrlen, 0, RTA_ALIGN(size) - size); | 428 | memset(RTA_DATA(rta) + attrlen, 0, RTA_ALIGN(size) - size); |
435 | } | 429 | } |
430 | EXPORT_SYMBOL(__rta_fill); | ||
436 | 431 | ||
437 | int rtnetlink_send(struct sk_buff *skb, struct net *net, u32 pid, unsigned group, int echo) | 432 | int rtnetlink_send(struct sk_buff *skb, struct net *net, u32 pid, unsigned group, int echo) |
438 | { | 433 | { |
@@ -454,6 +449,7 @@ int rtnl_unicast(struct sk_buff *skb, struct net *net, u32 pid) | |||
454 | 449 | ||
455 | return nlmsg_unicast(rtnl, skb, pid); | 450 | return nlmsg_unicast(rtnl, skb, pid); |
456 | } | 451 | } |
452 | EXPORT_SYMBOL(rtnl_unicast); | ||
457 | 453 | ||
458 | void rtnl_notify(struct sk_buff *skb, struct net *net, u32 pid, u32 group, | 454 | void rtnl_notify(struct sk_buff *skb, struct net *net, u32 pid, u32 group, |
459 | struct nlmsghdr *nlh, gfp_t flags) | 455 | struct nlmsghdr *nlh, gfp_t flags) |
@@ -466,6 +462,7 @@ void rtnl_notify(struct sk_buff *skb, struct net *net, u32 pid, u32 group, | |||
466 | 462 | ||
467 | nlmsg_notify(rtnl, skb, pid, group, report, flags); | 463 | nlmsg_notify(rtnl, skb, pid, group, report, flags); |
468 | } | 464 | } |
465 | EXPORT_SYMBOL(rtnl_notify); | ||
469 | 466 | ||
470 | void rtnl_set_sk_err(struct net *net, u32 group, int error) | 467 | void rtnl_set_sk_err(struct net *net, u32 group, int error) |
471 | { | 468 | { |
@@ -473,6 +470,7 @@ void rtnl_set_sk_err(struct net *net, u32 group, int error) | |||
473 | 470 | ||
474 | netlink_set_err(rtnl, 0, group, error); | 471 | netlink_set_err(rtnl, 0, group, error); |
475 | } | 472 | } |
473 | EXPORT_SYMBOL(rtnl_set_sk_err); | ||
476 | 474 | ||
477 | int rtnetlink_put_metrics(struct sk_buff *skb, u32 *metrics) | 475 | int rtnetlink_put_metrics(struct sk_buff *skb, u32 *metrics) |
478 | { | 476 | { |
@@ -501,6 +499,7 @@ nla_put_failure: | |||
501 | nla_nest_cancel(skb, mx); | 499 | nla_nest_cancel(skb, mx); |
502 | return -EMSGSIZE; | 500 | return -EMSGSIZE; |
503 | } | 501 | } |
502 | EXPORT_SYMBOL(rtnetlink_put_metrics); | ||
504 | 503 | ||
505 | int rtnl_put_cacheinfo(struct sk_buff *skb, struct dst_entry *dst, u32 id, | 504 | int rtnl_put_cacheinfo(struct sk_buff *skb, struct dst_entry *dst, u32 id, |
506 | u32 ts, u32 tsage, long expires, u32 error) | 505 | u32 ts, u32 tsage, long expires, u32 error) |
@@ -520,14 +519,13 @@ int rtnl_put_cacheinfo(struct sk_buff *skb, struct dst_entry *dst, u32 id, | |||
520 | 519 | ||
521 | return nla_put(skb, RTA_CACHEINFO, sizeof(ci), &ci); | 520 | return nla_put(skb, RTA_CACHEINFO, sizeof(ci), &ci); |
522 | } | 521 | } |
523 | |||
524 | EXPORT_SYMBOL_GPL(rtnl_put_cacheinfo); | 522 | EXPORT_SYMBOL_GPL(rtnl_put_cacheinfo); |
525 | 523 | ||
526 | static void set_operstate(struct net_device *dev, unsigned char transition) | 524 | static void set_operstate(struct net_device *dev, unsigned char transition) |
527 | { | 525 | { |
528 | unsigned char operstate = dev->operstate; | 526 | unsigned char operstate = dev->operstate; |
529 | 527 | ||
530 | switch(transition) { | 528 | switch (transition) { |
531 | case IF_OPER_UP: | 529 | case IF_OPER_UP: |
532 | if ((operstate == IF_OPER_DORMANT || | 530 | if ((operstate == IF_OPER_DORMANT || |
533 | operstate == IF_OPER_UNKNOWN) && | 531 | operstate == IF_OPER_UNKNOWN) && |
@@ -682,22 +680,33 @@ nla_put_failure: | |||
682 | static int rtnl_dump_ifinfo(struct sk_buff *skb, struct netlink_callback *cb) | 680 | static int rtnl_dump_ifinfo(struct sk_buff *skb, struct netlink_callback *cb) |
683 | { | 681 | { |
684 | struct net *net = sock_net(skb->sk); | 682 | struct net *net = sock_net(skb->sk); |
685 | int idx; | 683 | int h, s_h; |
686 | int s_idx = cb->args[0]; | 684 | int idx = 0, s_idx; |
687 | struct net_device *dev; | 685 | struct net_device *dev; |
688 | 686 | struct hlist_head *head; | |
689 | idx = 0; | 687 | struct hlist_node *node; |
690 | for_each_netdev(net, dev) { | 688 | |
691 | if (idx < s_idx) | 689 | s_h = cb->args[0]; |
692 | goto cont; | 690 | s_idx = cb->args[1]; |
693 | if (rtnl_fill_ifinfo(skb, dev, RTM_NEWLINK, | 691 | |
694 | NETLINK_CB(cb->skb).pid, | 692 | for (h = s_h; h < NETDEV_HASHENTRIES; h++, s_idx = 0) { |
695 | cb->nlh->nlmsg_seq, 0, NLM_F_MULTI) <= 0) | 693 | idx = 0; |
696 | break; | 694 | head = &net->dev_index_head[h]; |
695 | hlist_for_each_entry(dev, node, head, index_hlist) { | ||
696 | if (idx < s_idx) | ||
697 | goto cont; | ||
698 | if (rtnl_fill_ifinfo(skb, dev, RTM_NEWLINK, | ||
699 | NETLINK_CB(cb->skb).pid, | ||
700 | cb->nlh->nlmsg_seq, 0, | ||
701 | NLM_F_MULTI) <= 0) | ||
702 | goto out; | ||
697 | cont: | 703 | cont: |
698 | idx++; | 704 | idx++; |
705 | } | ||
699 | } | 706 | } |
700 | cb->args[0] = idx; | 707 | out: |
708 | cb->args[1] = idx; | ||
709 | cb->args[0] = h; | ||
701 | 710 | ||
702 | return skb->len; | 711 | return skb->len; |
703 | } | 712 | } |
@@ -717,12 +726,27 @@ const struct nla_policy ifla_policy[IFLA_MAX+1] = { | |||
717 | [IFLA_NET_NS_PID] = { .type = NLA_U32 }, | 726 | [IFLA_NET_NS_PID] = { .type = NLA_U32 }, |
718 | [IFLA_IFALIAS] = { .type = NLA_STRING, .len = IFALIASZ-1 }, | 727 | [IFLA_IFALIAS] = { .type = NLA_STRING, .len = IFALIASZ-1 }, |
719 | }; | 728 | }; |
729 | EXPORT_SYMBOL(ifla_policy); | ||
720 | 730 | ||
721 | static const struct nla_policy ifla_info_policy[IFLA_INFO_MAX+1] = { | 731 | static const struct nla_policy ifla_info_policy[IFLA_INFO_MAX+1] = { |
722 | [IFLA_INFO_KIND] = { .type = NLA_STRING }, | 732 | [IFLA_INFO_KIND] = { .type = NLA_STRING }, |
723 | [IFLA_INFO_DATA] = { .type = NLA_NESTED }, | 733 | [IFLA_INFO_DATA] = { .type = NLA_NESTED }, |
724 | }; | 734 | }; |
725 | 735 | ||
736 | struct net *rtnl_link_get_net(struct net *src_net, struct nlattr *tb[]) | ||
737 | { | ||
738 | struct net *net; | ||
739 | /* Examine the link attributes and figure out which | ||
740 | * network namespace we are talking about. | ||
741 | */ | ||
742 | if (tb[IFLA_NET_NS_PID]) | ||
743 | net = get_net_ns_by_pid(nla_get_u32(tb[IFLA_NET_NS_PID])); | ||
744 | else | ||
745 | net = get_net(src_net); | ||
746 | return net; | ||
747 | } | ||
748 | EXPORT_SYMBOL(rtnl_link_get_net); | ||
749 | |||
726 | static int validate_linkmsg(struct net_device *dev, struct nlattr *tb[]) | 750 | static int validate_linkmsg(struct net_device *dev, struct nlattr *tb[]) |
727 | { | 751 | { |
728 | if (dev) { | 752 | if (dev) { |
@@ -746,8 +770,7 @@ static int do_setlink(struct net_device *dev, struct ifinfomsg *ifm, | |||
746 | int err; | 770 | int err; |
747 | 771 | ||
748 | if (tb[IFLA_NET_NS_PID]) { | 772 | if (tb[IFLA_NET_NS_PID]) { |
749 | struct net *net; | 773 | struct net *net = rtnl_link_get_net(dev_net(dev), tb); |
750 | net = get_net_ns_by_pid(nla_get_u32(tb[IFLA_NET_NS_PID])); | ||
751 | if (IS_ERR(net)) { | 774 | if (IS_ERR(net)) { |
752 | err = PTR_ERR(net); | 775 | err = PTR_ERR(net); |
753 | goto errout; | 776 | goto errout; |
@@ -910,9 +933,9 @@ static int rtnl_setlink(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg) | |||
910 | err = -EINVAL; | 933 | err = -EINVAL; |
911 | ifm = nlmsg_data(nlh); | 934 | ifm = nlmsg_data(nlh); |
912 | if (ifm->ifi_index > 0) | 935 | if (ifm->ifi_index > 0) |
913 | dev = dev_get_by_index(net, ifm->ifi_index); | 936 | dev = __dev_get_by_index(net, ifm->ifi_index); |
914 | else if (tb[IFLA_IFNAME]) | 937 | else if (tb[IFLA_IFNAME]) |
915 | dev = dev_get_by_name(net, ifname); | 938 | dev = __dev_get_by_name(net, ifname); |
916 | else | 939 | else |
917 | goto errout; | 940 | goto errout; |
918 | 941 | ||
@@ -921,12 +944,11 @@ static int rtnl_setlink(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg) | |||
921 | goto errout; | 944 | goto errout; |
922 | } | 945 | } |
923 | 946 | ||
924 | if ((err = validate_linkmsg(dev, tb)) < 0) | 947 | err = validate_linkmsg(dev, tb); |
925 | goto errout_dev; | 948 | if (err < 0) |
949 | goto errout; | ||
926 | 950 | ||
927 | err = do_setlink(dev, ifm, tb, ifname, 0); | 951 | err = do_setlink(dev, ifm, tb, ifname, 0); |
928 | errout_dev: | ||
929 | dev_put(dev); | ||
930 | errout: | 952 | errout: |
931 | return err; | 953 | return err; |
932 | } | 954 | } |
@@ -963,12 +985,12 @@ static int rtnl_dellink(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg) | |||
963 | if (!ops) | 985 | if (!ops) |
964 | return -EOPNOTSUPP; | 986 | return -EOPNOTSUPP; |
965 | 987 | ||
966 | ops->dellink(dev); | 988 | ops->dellink(dev, NULL); |
967 | return 0; | 989 | return 0; |
968 | } | 990 | } |
969 | 991 | ||
970 | struct net_device *rtnl_create_link(struct net *net, char *ifname, | 992 | struct net_device *rtnl_create_link(struct net *src_net, struct net *net, |
971 | const struct rtnl_link_ops *ops, struct nlattr *tb[]) | 993 | char *ifname, const struct rtnl_link_ops *ops, struct nlattr *tb[]) |
972 | { | 994 | { |
973 | int err; | 995 | int err; |
974 | struct net_device *dev; | 996 | struct net_device *dev; |
@@ -976,7 +998,8 @@ struct net_device *rtnl_create_link(struct net *net, char *ifname, | |||
976 | unsigned int real_num_queues = 1; | 998 | unsigned int real_num_queues = 1; |
977 | 999 | ||
978 | if (ops->get_tx_queues) { | 1000 | if (ops->get_tx_queues) { |
979 | err = ops->get_tx_queues(net, tb, &num_queues, &real_num_queues); | 1001 | err = ops->get_tx_queues(src_net, tb, &num_queues, |
1002 | &real_num_queues); | ||
980 | if (err) | 1003 | if (err) |
981 | goto err; | 1004 | goto err; |
982 | } | 1005 | } |
@@ -985,16 +1008,16 @@ struct net_device *rtnl_create_link(struct net *net, char *ifname, | |||
985 | if (!dev) | 1008 | if (!dev) |
986 | goto err; | 1009 | goto err; |
987 | 1010 | ||
1011 | dev_net_set(dev, net); | ||
1012 | dev->rtnl_link_ops = ops; | ||
988 | dev->real_num_tx_queues = real_num_queues; | 1013 | dev->real_num_tx_queues = real_num_queues; |
1014 | |||
989 | if (strchr(dev->name, '%')) { | 1015 | if (strchr(dev->name, '%')) { |
990 | err = dev_alloc_name(dev, dev->name); | 1016 | err = dev_alloc_name(dev, dev->name); |
991 | if (err < 0) | 1017 | if (err < 0) |
992 | goto err_free; | 1018 | goto err_free; |
993 | } | 1019 | } |
994 | 1020 | ||
995 | dev_net_set(dev, net); | ||
996 | dev->rtnl_link_ops = ops; | ||
997 | |||
998 | if (tb[IFLA_MTU]) | 1021 | if (tb[IFLA_MTU]) |
999 | dev->mtu = nla_get_u32(tb[IFLA_MTU]); | 1022 | dev->mtu = nla_get_u32(tb[IFLA_MTU]); |
1000 | if (tb[IFLA_ADDRESS]) | 1023 | if (tb[IFLA_ADDRESS]) |
@@ -1017,6 +1040,7 @@ err_free: | |||
1017 | err: | 1040 | err: |
1018 | return ERR_PTR(err); | 1041 | return ERR_PTR(err); |
1019 | } | 1042 | } |
1043 | EXPORT_SYMBOL(rtnl_create_link); | ||
1020 | 1044 | ||
1021 | static int rtnl_newlink(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg) | 1045 | static int rtnl_newlink(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg) |
1022 | { | 1046 | { |
@@ -1050,7 +1074,8 @@ replay: | |||
1050 | else | 1074 | else |
1051 | dev = NULL; | 1075 | dev = NULL; |
1052 | 1076 | ||
1053 | if ((err = validate_linkmsg(dev, tb)) < 0) | 1077 | err = validate_linkmsg(dev, tb); |
1078 | if (err < 0) | ||
1054 | return err; | 1079 | return err; |
1055 | 1080 | ||
1056 | if (tb[IFLA_LINKINFO]) { | 1081 | if (tb[IFLA_LINKINFO]) { |
@@ -1071,6 +1096,7 @@ replay: | |||
1071 | 1096 | ||
1072 | if (1) { | 1097 | if (1) { |
1073 | struct nlattr *attr[ops ? ops->maxtype + 1 : 0], **data = NULL; | 1098 | struct nlattr *attr[ops ? ops->maxtype + 1 : 0], **data = NULL; |
1099 | struct net *dest_net; | ||
1074 | 1100 | ||
1075 | if (ops) { | 1101 | if (ops) { |
1076 | if (ops->maxtype && linkinfo[IFLA_INFO_DATA]) { | 1102 | if (ops->maxtype && linkinfo[IFLA_INFO_DATA]) { |
@@ -1135,17 +1161,19 @@ replay: | |||
1135 | if (!ifname[0]) | 1161 | if (!ifname[0]) |
1136 | snprintf(ifname, IFNAMSIZ, "%s%%d", ops->kind); | 1162 | snprintf(ifname, IFNAMSIZ, "%s%%d", ops->kind); |
1137 | 1163 | ||
1138 | dev = rtnl_create_link(net, ifname, ops, tb); | 1164 | dest_net = rtnl_link_get_net(net, tb); |
1165 | dev = rtnl_create_link(net, dest_net, ifname, ops, tb); | ||
1139 | 1166 | ||
1140 | if (IS_ERR(dev)) | 1167 | if (IS_ERR(dev)) |
1141 | err = PTR_ERR(dev); | 1168 | err = PTR_ERR(dev); |
1142 | else if (ops->newlink) | 1169 | else if (ops->newlink) |
1143 | err = ops->newlink(dev, tb, data); | 1170 | err = ops->newlink(net, dev, tb, data); |
1144 | else | 1171 | else |
1145 | err = register_netdevice(dev); | 1172 | err = register_netdevice(dev); |
1146 | |||
1147 | if (err < 0 && !IS_ERR(dev)) | 1173 | if (err < 0 && !IS_ERR(dev)) |
1148 | free_netdev(dev); | 1174 | free_netdev(dev); |
1175 | |||
1176 | put_net(dest_net); | ||
1149 | return err; | 1177 | return err; |
1150 | } | 1178 | } |
1151 | } | 1179 | } |
@@ -1154,6 +1182,7 @@ static int rtnl_getlink(struct sk_buff *skb, struct nlmsghdr* nlh, void *arg) | |||
1154 | { | 1182 | { |
1155 | struct net *net = sock_net(skb->sk); | 1183 | struct net *net = sock_net(skb->sk); |
1156 | struct ifinfomsg *ifm; | 1184 | struct ifinfomsg *ifm; |
1185 | char ifname[IFNAMSIZ]; | ||
1157 | struct nlattr *tb[IFLA_MAX+1]; | 1186 | struct nlattr *tb[IFLA_MAX+1]; |
1158 | struct net_device *dev = NULL; | 1187 | struct net_device *dev = NULL; |
1159 | struct sk_buff *nskb; | 1188 | struct sk_buff *nskb; |
@@ -1163,19 +1192,23 @@ static int rtnl_getlink(struct sk_buff *skb, struct nlmsghdr* nlh, void *arg) | |||
1163 | if (err < 0) | 1192 | if (err < 0) |
1164 | return err; | 1193 | return err; |
1165 | 1194 | ||
1195 | if (tb[IFLA_IFNAME]) | ||
1196 | nla_strlcpy(ifname, tb[IFLA_IFNAME], IFNAMSIZ); | ||
1197 | |||
1166 | ifm = nlmsg_data(nlh); | 1198 | ifm = nlmsg_data(nlh); |
1167 | if (ifm->ifi_index > 0) { | 1199 | if (ifm->ifi_index > 0) |
1168 | dev = dev_get_by_index(net, ifm->ifi_index); | 1200 | dev = __dev_get_by_index(net, ifm->ifi_index); |
1169 | if (dev == NULL) | 1201 | else if (tb[IFLA_IFNAME]) |
1170 | return -ENODEV; | 1202 | dev = __dev_get_by_name(net, ifname); |
1171 | } else | 1203 | else |
1172 | return -EINVAL; | 1204 | return -EINVAL; |
1173 | 1205 | ||
1206 | if (dev == NULL) | ||
1207 | return -ENODEV; | ||
1208 | |||
1174 | nskb = nlmsg_new(if_nlmsg_size(dev), GFP_KERNEL); | 1209 | nskb = nlmsg_new(if_nlmsg_size(dev), GFP_KERNEL); |
1175 | if (nskb == NULL) { | 1210 | if (nskb == NULL) |
1176 | err = -ENOBUFS; | 1211 | return -ENOBUFS; |
1177 | goto errout; | ||
1178 | } | ||
1179 | 1212 | ||
1180 | err = rtnl_fill_ifinfo(nskb, dev, RTM_NEWLINK, NETLINK_CB(skb).pid, | 1213 | err = rtnl_fill_ifinfo(nskb, dev, RTM_NEWLINK, NETLINK_CB(skb).pid, |
1181 | nlh->nlmsg_seq, 0, 0); | 1214 | nlh->nlmsg_seq, 0, 0); |
@@ -1183,11 +1216,8 @@ static int rtnl_getlink(struct sk_buff *skb, struct nlmsghdr* nlh, void *arg) | |||
1183 | /* -EMSGSIZE implies BUG in if_nlmsg_size */ | 1216 | /* -EMSGSIZE implies BUG in if_nlmsg_size */ |
1184 | WARN_ON(err == -EMSGSIZE); | 1217 | WARN_ON(err == -EMSGSIZE); |
1185 | kfree_skb(nskb); | 1218 | kfree_skb(nskb); |
1186 | goto errout; | 1219 | } else |
1187 | } | 1220 | err = rtnl_unicast(nskb, net, NETLINK_CB(skb).pid); |
1188 | err = rtnl_unicast(nskb, net, NETLINK_CB(skb).pid); | ||
1189 | errout: | ||
1190 | dev_put(dev); | ||
1191 | 1221 | ||
1192 | return err; | 1222 | return err; |
1193 | } | 1223 | } |
@@ -1199,7 +1229,7 @@ static int rtnl_dump_all(struct sk_buff *skb, struct netlink_callback *cb) | |||
1199 | 1229 | ||
1200 | if (s_idx == 0) | 1230 | if (s_idx == 0) |
1201 | s_idx = 1; | 1231 | s_idx = 1; |
1202 | for (idx=1; idx<NPROTO; idx++) { | 1232 | for (idx = 1; idx < NPROTO; idx++) { |
1203 | int type = cb->nlh->nlmsg_type-RTM_BASE; | 1233 | int type = cb->nlh->nlmsg_type-RTM_BASE; |
1204 | if (idx < s_idx || idx == PF_PACKET) | 1234 | if (idx < s_idx || idx == PF_PACKET) |
1205 | continue; | 1235 | continue; |
@@ -1266,7 +1296,7 @@ static int rtnetlink_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh) | |||
1266 | if (nlh->nlmsg_len < NLMSG_LENGTH(sizeof(struct rtgenmsg))) | 1296 | if (nlh->nlmsg_len < NLMSG_LENGTH(sizeof(struct rtgenmsg))) |
1267 | return 0; | 1297 | return 0; |
1268 | 1298 | ||
1269 | family = ((struct rtgenmsg*)NLMSG_DATA(nlh))->rtgen_family; | 1299 | family = ((struct rtgenmsg *)NLMSG_DATA(nlh))->rtgen_family; |
1270 | if (family >= NPROTO) | 1300 | if (family >= NPROTO) |
1271 | return -EAFNOSUPPORT; | 1301 | return -EAFNOSUPPORT; |
1272 | 1302 | ||
@@ -1299,7 +1329,7 @@ static int rtnetlink_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh) | |||
1299 | 1329 | ||
1300 | if (nlh->nlmsg_len > min_len) { | 1330 | if (nlh->nlmsg_len > min_len) { |
1301 | int attrlen = nlh->nlmsg_len - NLMSG_ALIGN(min_len); | 1331 | int attrlen = nlh->nlmsg_len - NLMSG_ALIGN(min_len); |
1302 | struct rtattr *attr = (void*)nlh + NLMSG_ALIGN(min_len); | 1332 | struct rtattr *attr = (void *)nlh + NLMSG_ALIGN(min_len); |
1303 | 1333 | ||
1304 | while (RTA_OK(attr, attrlen)) { | 1334 | while (RTA_OK(attr, attrlen)) { |
1305 | unsigned flavor = attr->rta_type; | 1335 | unsigned flavor = attr->rta_type; |
@@ -1405,14 +1435,3 @@ void __init rtnetlink_init(void) | |||
1405 | rtnl_register(PF_UNSPEC, RTM_GETROUTE, NULL, rtnl_dump_all); | 1435 | rtnl_register(PF_UNSPEC, RTM_GETROUTE, NULL, rtnl_dump_all); |
1406 | } | 1436 | } |
1407 | 1437 | ||
1408 | EXPORT_SYMBOL(__rta_fill); | ||
1409 | EXPORT_SYMBOL(rtnetlink_put_metrics); | ||
1410 | EXPORT_SYMBOL(rtnl_lock); | ||
1411 | EXPORT_SYMBOL(rtnl_trylock); | ||
1412 | EXPORT_SYMBOL(rtnl_unlock); | ||
1413 | EXPORT_SYMBOL(rtnl_is_locked); | ||
1414 | EXPORT_SYMBOL(rtnl_unicast); | ||
1415 | EXPORT_SYMBOL(rtnl_notify); | ||
1416 | EXPORT_SYMBOL(rtnl_set_sk_err); | ||
1417 | EXPORT_SYMBOL(rtnl_create_link); | ||
1418 | EXPORT_SYMBOL(ifla_policy); | ||
diff --git a/net/core/skbuff.c b/net/core/skbuff.c index 80a96166df3..941bac90748 100644 --- a/net/core/skbuff.c +++ b/net/core/skbuff.c | |||
@@ -493,6 +493,9 @@ int skb_recycle_check(struct sk_buff *skb, int skb_size) | |||
493 | { | 493 | { |
494 | struct skb_shared_info *shinfo; | 494 | struct skb_shared_info *shinfo; |
495 | 495 | ||
496 | if (irqs_disabled()) | ||
497 | return 0; | ||
498 | |||
496 | if (skb_is_nonlinear(skb) || skb->fclone != SKB_FCLONE_UNAVAILABLE) | 499 | if (skb_is_nonlinear(skb) || skb->fclone != SKB_FCLONE_UNAVAILABLE) |
497 | return 0; | 500 | return 0; |
498 | 501 | ||
diff --git a/net/core/sock.c b/net/core/sock.c index 7626b6aacd6..76ff58d43e2 100644 --- a/net/core/sock.c +++ b/net/core/sock.c | |||
@@ -274,25 +274,27 @@ static void sock_disable_timestamp(struct sock *sk, int flag) | |||
274 | 274 | ||
275 | int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb) | 275 | int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb) |
276 | { | 276 | { |
277 | int err = 0; | 277 | int err; |
278 | int skb_len; | 278 | int skb_len; |
279 | unsigned long flags; | ||
280 | struct sk_buff_head *list = &sk->sk_receive_queue; | ||
279 | 281 | ||
280 | /* Cast sk->rcvbuf to unsigned... It's pointless, but reduces | 282 | /* Cast sk->rcvbuf to unsigned... It's pointless, but reduces |
281 | number of warnings when compiling with -W --ANK | 283 | number of warnings when compiling with -W --ANK |
282 | */ | 284 | */ |
283 | if (atomic_read(&sk->sk_rmem_alloc) + skb->truesize >= | 285 | if (atomic_read(&sk->sk_rmem_alloc) + skb->truesize >= |
284 | (unsigned)sk->sk_rcvbuf) { | 286 | (unsigned)sk->sk_rcvbuf) { |
285 | err = -ENOMEM; | 287 | atomic_inc(&sk->sk_drops); |
286 | goto out; | 288 | return -ENOMEM; |
287 | } | 289 | } |
288 | 290 | ||
289 | err = sk_filter(sk, skb); | 291 | err = sk_filter(sk, skb); |
290 | if (err) | 292 | if (err) |
291 | goto out; | 293 | return err; |
292 | 294 | ||
293 | if (!sk_rmem_schedule(sk, skb->truesize)) { | 295 | if (!sk_rmem_schedule(sk, skb->truesize)) { |
294 | err = -ENOBUFS; | 296 | atomic_inc(&sk->sk_drops); |
295 | goto out; | 297 | return -ENOBUFS; |
296 | } | 298 | } |
297 | 299 | ||
298 | skb->dev = NULL; | 300 | skb->dev = NULL; |
@@ -305,12 +307,14 @@ int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb) | |||
305 | */ | 307 | */ |
306 | skb_len = skb->len; | 308 | skb_len = skb->len; |
307 | 309 | ||
308 | skb_queue_tail(&sk->sk_receive_queue, skb); | 310 | spin_lock_irqsave(&list->lock, flags); |
311 | skb->dropcount = atomic_read(&sk->sk_drops); | ||
312 | __skb_queue_tail(list, skb); | ||
313 | spin_unlock_irqrestore(&list->lock, flags); | ||
309 | 314 | ||
310 | if (!sock_flag(sk, SOCK_DEAD)) | 315 | if (!sock_flag(sk, SOCK_DEAD)) |
311 | sk->sk_data_ready(sk, skb_len); | 316 | sk->sk_data_ready(sk, skb_len); |
312 | out: | 317 | return 0; |
313 | return err; | ||
314 | } | 318 | } |
315 | EXPORT_SYMBOL(sock_queue_rcv_skb); | 319 | EXPORT_SYMBOL(sock_queue_rcv_skb); |
316 | 320 | ||
@@ -348,11 +352,18 @@ discard_and_relse: | |||
348 | } | 352 | } |
349 | EXPORT_SYMBOL(sk_receive_skb); | 353 | EXPORT_SYMBOL(sk_receive_skb); |
350 | 354 | ||
355 | void sk_reset_txq(struct sock *sk) | ||
356 | { | ||
357 | sk_tx_queue_clear(sk); | ||
358 | } | ||
359 | EXPORT_SYMBOL(sk_reset_txq); | ||
360 | |||
351 | struct dst_entry *__sk_dst_check(struct sock *sk, u32 cookie) | 361 | struct dst_entry *__sk_dst_check(struct sock *sk, u32 cookie) |
352 | { | 362 | { |
353 | struct dst_entry *dst = sk->sk_dst_cache; | 363 | struct dst_entry *dst = sk->sk_dst_cache; |
354 | 364 | ||
355 | if (dst && dst->obsolete && dst->ops->check(dst, cookie) == NULL) { | 365 | if (dst && dst->obsolete && dst->ops->check(dst, cookie) == NULL) { |
366 | sk_tx_queue_clear(sk); | ||
356 | sk->sk_dst_cache = NULL; | 367 | sk->sk_dst_cache = NULL; |
357 | dst_release(dst); | 368 | dst_release(dst); |
358 | return NULL; | 369 | return NULL; |
@@ -406,17 +417,18 @@ static int sock_bindtodevice(struct sock *sk, char __user *optval, int optlen) | |||
406 | if (copy_from_user(devname, optval, optlen)) | 417 | if (copy_from_user(devname, optval, optlen)) |
407 | goto out; | 418 | goto out; |
408 | 419 | ||
409 | if (devname[0] == '\0') { | 420 | index = 0; |
410 | index = 0; | 421 | if (devname[0] != '\0') { |
411 | } else { | 422 | struct net_device *dev; |
412 | struct net_device *dev = dev_get_by_name(net, devname); | ||
413 | 423 | ||
424 | rcu_read_lock(); | ||
425 | dev = dev_get_by_name_rcu(net, devname); | ||
426 | if (dev) | ||
427 | index = dev->ifindex; | ||
428 | rcu_read_unlock(); | ||
414 | ret = -ENODEV; | 429 | ret = -ENODEV; |
415 | if (!dev) | 430 | if (!dev) |
416 | goto out; | 431 | goto out; |
417 | |||
418 | index = dev->ifindex; | ||
419 | dev_put(dev); | ||
420 | } | 432 | } |
421 | 433 | ||
422 | lock_sock(sk); | 434 | lock_sock(sk); |
@@ -702,6 +714,12 @@ set_rcvbuf: | |||
702 | 714 | ||
703 | /* We implement the SO_SNDLOWAT etc to | 715 | /* We implement the SO_SNDLOWAT etc to |
704 | not be settable (1003.1g 5.3) */ | 716 | not be settable (1003.1g 5.3) */ |
717 | case SO_RXQ_OVFL: | ||
718 | if (valbool) | ||
719 | sock_set_flag(sk, SOCK_RXQ_OVFL); | ||
720 | else | ||
721 | sock_reset_flag(sk, SOCK_RXQ_OVFL); | ||
722 | break; | ||
705 | default: | 723 | default: |
706 | ret = -ENOPROTOOPT; | 724 | ret = -ENOPROTOOPT; |
707 | break; | 725 | break; |
@@ -901,6 +919,10 @@ int sock_getsockopt(struct socket *sock, int level, int optname, | |||
901 | v.val = sk->sk_mark; | 919 | v.val = sk->sk_mark; |
902 | break; | 920 | break; |
903 | 921 | ||
922 | case SO_RXQ_OVFL: | ||
923 | v.val = !!sock_flag(sk, SOCK_RXQ_OVFL); | ||
924 | break; | ||
925 | |||
904 | default: | 926 | default: |
905 | return -ENOPROTOOPT; | 927 | return -ENOPROTOOPT; |
906 | } | 928 | } |
@@ -939,7 +961,8 @@ static void sock_copy(struct sock *nsk, const struct sock *osk) | |||
939 | void *sptr = nsk->sk_security; | 961 | void *sptr = nsk->sk_security; |
940 | #endif | 962 | #endif |
941 | BUILD_BUG_ON(offsetof(struct sock, sk_copy_start) != | 963 | BUILD_BUG_ON(offsetof(struct sock, sk_copy_start) != |
942 | sizeof(osk->sk_node) + sizeof(osk->sk_refcnt)); | 964 | sizeof(osk->sk_node) + sizeof(osk->sk_refcnt) + |
965 | sizeof(osk->sk_tx_queue_mapping)); | ||
943 | memcpy(&nsk->sk_copy_start, &osk->sk_copy_start, | 966 | memcpy(&nsk->sk_copy_start, &osk->sk_copy_start, |
944 | osk->sk_prot->obj_size - offsetof(struct sock, sk_copy_start)); | 967 | osk->sk_prot->obj_size - offsetof(struct sock, sk_copy_start)); |
945 | #ifdef CONFIG_SECURITY_NETWORK | 968 | #ifdef CONFIG_SECURITY_NETWORK |
@@ -983,6 +1006,7 @@ static struct sock *sk_prot_alloc(struct proto *prot, gfp_t priority, | |||
983 | 1006 | ||
984 | if (!try_module_get(prot->owner)) | 1007 | if (!try_module_get(prot->owner)) |
985 | goto out_free_sec; | 1008 | goto out_free_sec; |
1009 | sk_tx_queue_clear(sk); | ||
986 | } | 1010 | } |
987 | 1011 | ||
988 | return sk; | 1012 | return sk; |