aboutsummaryrefslogtreecommitdiffstats
path: root/net/core
diff options
context:
space:
mode:
authorOlof Johansson <olof@lixom.net>2012-09-12 02:22:25 -0400
committerOlof Johansson <olof@lixom.net>2012-09-12 02:22:25 -0400
commitf46e374c1ea7fafce70a838f09fbd67de3e4d49f (patch)
treee191e5316bdb8558bd3e2b0981c715bedd225ca7 /net/core
parenta4d3621e0d3ecf191961dc9214e4a378a7186780 (diff)
parenteb8ca943bae2b84c3fc14dfd7a908cb334465fef (diff)
Merge branch 'boards' of git://git.kernel.org/pub/scm/linux/kernel/git/horms/renesas into next/boards
* 'boards' of git://git.kernel.org/pub/scm/linux/kernel/git/horms/renesas: ARM: shmobile: marzen: enable thermal sensor ARM: shmobile: marzen: fixup regulator id for smsc911x ARM: shmobile: marzen: add SDHI0 support + sync to 3.6-rc4
Diffstat (limited to 'net/core')
-rw-r--r--net/core/dev.c28
-rw-r--r--net/core/dst.c10
-rw-r--r--net/core/netpoll.c109
-rw-r--r--net/core/netprio_cgroup.c30
-rw-r--r--net/core/scm.c4
-rw-r--r--net/core/sock.c1
6 files changed, 123 insertions, 59 deletions
diff --git a/net/core/dev.c b/net/core/dev.c
index 0cb3fe8d8e72..83988362805e 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -1055,6 +1055,8 @@ rollback:
1055 */ 1055 */
1056int dev_set_alias(struct net_device *dev, const char *alias, size_t len) 1056int dev_set_alias(struct net_device *dev, const char *alias, size_t len)
1057{ 1057{
1058 char *new_ifalias;
1059
1058 ASSERT_RTNL(); 1060 ASSERT_RTNL();
1059 1061
1060 if (len >= IFALIASZ) 1062 if (len >= IFALIASZ)
@@ -1068,9 +1070,10 @@ int dev_set_alias(struct net_device *dev, const char *alias, size_t len)
1068 return 0; 1070 return 0;
1069 } 1071 }
1070 1072
1071 dev->ifalias = krealloc(dev->ifalias, len + 1, GFP_KERNEL); 1073 new_ifalias = krealloc(dev->ifalias, len + 1, GFP_KERNEL);
1072 if (!dev->ifalias) 1074 if (!new_ifalias)
1073 return -ENOMEM; 1075 return -ENOMEM;
1076 dev->ifalias = new_ifalias;
1074 1077
1075 strlcpy(dev->ifalias, alias, len+1); 1078 strlcpy(dev->ifalias, alias, len+1);
1076 return len; 1079 return len;
@@ -1639,6 +1642,19 @@ static inline int deliver_skb(struct sk_buff *skb,
1639 return pt_prev->func(skb, skb->dev, pt_prev, orig_dev); 1642 return pt_prev->func(skb, skb->dev, pt_prev, orig_dev);
1640} 1643}
1641 1644
1645static inline bool skb_loop_sk(struct packet_type *ptype, struct sk_buff *skb)
1646{
1647 if (ptype->af_packet_priv == NULL)
1648 return false;
1649
1650 if (ptype->id_match)
1651 return ptype->id_match(ptype, skb->sk);
1652 else if ((struct sock *)ptype->af_packet_priv == skb->sk)
1653 return true;
1654
1655 return false;
1656}
1657
1642/* 1658/*
1643 * Support routine. Sends outgoing frames to any network 1659 * Support routine. Sends outgoing frames to any network
1644 * taps currently in use. 1660 * taps currently in use.
@@ -1656,8 +1672,7 @@ static void dev_queue_xmit_nit(struct sk_buff *skb, struct net_device *dev)
1656 * they originated from - MvS (miquels@drinkel.ow.org) 1672 * they originated from - MvS (miquels@drinkel.ow.org)
1657 */ 1673 */
1658 if ((ptype->dev == dev || !ptype->dev) && 1674 if ((ptype->dev == dev || !ptype->dev) &&
1659 (ptype->af_packet_priv == NULL || 1675 (!skb_loop_sk(ptype, skb))) {
1660 (struct sock *)ptype->af_packet_priv != skb->sk)) {
1661 if (pt_prev) { 1676 if (pt_prev) {
1662 deliver_skb(skb2, pt_prev, skb->dev); 1677 deliver_skb(skb2, pt_prev, skb->dev);
1663 pt_prev = ptype; 1678 pt_prev = ptype;
@@ -2134,6 +2149,9 @@ netdev_features_t netif_skb_features(struct sk_buff *skb)
2134 __be16 protocol = skb->protocol; 2149 __be16 protocol = skb->protocol;
2135 netdev_features_t features = skb->dev->features; 2150 netdev_features_t features = skb->dev->features;
2136 2151
2152 if (skb_shinfo(skb)->gso_segs > skb->dev->gso_max_segs)
2153 features &= ~NETIF_F_GSO_MASK;
2154
2137 if (protocol == htons(ETH_P_8021Q)) { 2155 if (protocol == htons(ETH_P_8021Q)) {
2138 struct vlan_ethhdr *veh = (struct vlan_ethhdr *)skb->data; 2156 struct vlan_ethhdr *veh = (struct vlan_ethhdr *)skb->data;
2139 protocol = veh->h_vlan_encapsulated_proto; 2157 protocol = veh->h_vlan_encapsulated_proto;
@@ -5726,6 +5744,7 @@ EXPORT_SYMBOL(netdev_refcnt_read);
5726 5744
5727/** 5745/**
5728 * netdev_wait_allrefs - wait until all references are gone. 5746 * netdev_wait_allrefs - wait until all references are gone.
5747 * @dev: target net_device
5729 * 5748 *
5730 * This is called when unregistering network devices. 5749 * This is called when unregistering network devices.
5731 * 5750 *
@@ -5986,6 +6005,7 @@ struct net_device *alloc_netdev_mqs(int sizeof_priv, const char *name,
5986 dev_net_set(dev, &init_net); 6005 dev_net_set(dev, &init_net);
5987 6006
5988 dev->gso_max_size = GSO_MAX_SIZE; 6007 dev->gso_max_size = GSO_MAX_SIZE;
6008 dev->gso_max_segs = GSO_MAX_SEGS;
5989 6009
5990 INIT_LIST_HEAD(&dev->napi_list); 6010 INIT_LIST_HEAD(&dev->napi_list);
5991 INIT_LIST_HEAD(&dev->unreg_list); 6011 INIT_LIST_HEAD(&dev->unreg_list);
diff --git a/net/core/dst.c b/net/core/dst.c
index 069d51d29414..56d63612e1e4 100644
--- a/net/core/dst.c
+++ b/net/core/dst.c
@@ -149,7 +149,15 @@ int dst_discard(struct sk_buff *skb)
149} 149}
150EXPORT_SYMBOL(dst_discard); 150EXPORT_SYMBOL(dst_discard);
151 151
152const u32 dst_default_metrics[RTAX_MAX]; 152const u32 dst_default_metrics[RTAX_MAX + 1] = {
153 /* This initializer is needed to force linker to place this variable
154 * into const section. Otherwise it might end into bss section.
155 * We really want to avoid false sharing on this variable, and catch
156 * any writes on it.
157 */
158 [RTAX_MAX] = 0xdeadbeef,
159};
160
153 161
154void *dst_alloc(struct dst_ops *ops, struct net_device *dev, 162void *dst_alloc(struct dst_ops *ops, struct net_device *dev,
155 int initial_ref, int initial_obsolete, unsigned short flags) 163 int initial_ref, int initial_obsolete, unsigned short flags)
diff --git a/net/core/netpoll.c b/net/core/netpoll.c
index b4c90e42b443..346b1eb83a1f 100644
--- a/net/core/netpoll.c
+++ b/net/core/netpoll.c
@@ -26,6 +26,7 @@
26#include <linux/workqueue.h> 26#include <linux/workqueue.h>
27#include <linux/slab.h> 27#include <linux/slab.h>
28#include <linux/export.h> 28#include <linux/export.h>
29#include <linux/if_vlan.h>
29#include <net/tcp.h> 30#include <net/tcp.h>
30#include <net/udp.h> 31#include <net/udp.h>
31#include <asm/unaligned.h> 32#include <asm/unaligned.h>
@@ -54,7 +55,7 @@ static atomic_t trapped;
54 MAX_UDP_CHUNK) 55 MAX_UDP_CHUNK)
55 56
56static void zap_completion_queue(void); 57static void zap_completion_queue(void);
57static void arp_reply(struct sk_buff *skb); 58static void netpoll_arp_reply(struct sk_buff *skb, struct netpoll_info *npinfo);
58 59
59static unsigned int carrier_timeout = 4; 60static unsigned int carrier_timeout = 4;
60module_param(carrier_timeout, uint, 0644); 61module_param(carrier_timeout, uint, 0644);
@@ -167,15 +168,24 @@ static void poll_napi(struct net_device *dev)
167 struct napi_struct *napi; 168 struct napi_struct *napi;
168 int budget = 16; 169 int budget = 16;
169 170
171 WARN_ON_ONCE(!irqs_disabled());
172
170 list_for_each_entry(napi, &dev->napi_list, dev_list) { 173 list_for_each_entry(napi, &dev->napi_list, dev_list) {
174 local_irq_enable();
171 if (napi->poll_owner != smp_processor_id() && 175 if (napi->poll_owner != smp_processor_id() &&
172 spin_trylock(&napi->poll_lock)) { 176 spin_trylock(&napi->poll_lock)) {
173 budget = poll_one_napi(dev->npinfo, napi, budget); 177 rcu_read_lock_bh();
178 budget = poll_one_napi(rcu_dereference_bh(dev->npinfo),
179 napi, budget);
180 rcu_read_unlock_bh();
174 spin_unlock(&napi->poll_lock); 181 spin_unlock(&napi->poll_lock);
175 182
176 if (!budget) 183 if (!budget) {
184 local_irq_disable();
177 break; 185 break;
186 }
178 } 187 }
188 local_irq_disable();
179 } 189 }
180} 190}
181 191
@@ -185,13 +195,14 @@ static void service_arp_queue(struct netpoll_info *npi)
185 struct sk_buff *skb; 195 struct sk_buff *skb;
186 196
187 while ((skb = skb_dequeue(&npi->arp_tx))) 197 while ((skb = skb_dequeue(&npi->arp_tx)))
188 arp_reply(skb); 198 netpoll_arp_reply(skb, npi);
189 } 199 }
190} 200}
191 201
192static void netpoll_poll_dev(struct net_device *dev) 202static void netpoll_poll_dev(struct net_device *dev)
193{ 203{
194 const struct net_device_ops *ops; 204 const struct net_device_ops *ops;
205 struct netpoll_info *ni = rcu_dereference_bh(dev->npinfo);
195 206
196 if (!dev || !netif_running(dev)) 207 if (!dev || !netif_running(dev))
197 return; 208 return;
@@ -206,17 +217,18 @@ static void netpoll_poll_dev(struct net_device *dev)
206 poll_napi(dev); 217 poll_napi(dev);
207 218
208 if (dev->flags & IFF_SLAVE) { 219 if (dev->flags & IFF_SLAVE) {
209 if (dev->npinfo) { 220 if (ni) {
210 struct net_device *bond_dev = dev->master; 221 struct net_device *bond_dev = dev->master;
211 struct sk_buff *skb; 222 struct sk_buff *skb;
212 while ((skb = skb_dequeue(&dev->npinfo->arp_tx))) { 223 struct netpoll_info *bond_ni = rcu_dereference_bh(bond_dev->npinfo);
224 while ((skb = skb_dequeue(&ni->arp_tx))) {
213 skb->dev = bond_dev; 225 skb->dev = bond_dev;
214 skb_queue_tail(&bond_dev->npinfo->arp_tx, skb); 226 skb_queue_tail(&bond_ni->arp_tx, skb);
215 } 227 }
216 } 228 }
217 } 229 }
218 230
219 service_arp_queue(dev->npinfo); 231 service_arp_queue(ni);
220 232
221 zap_completion_queue(); 233 zap_completion_queue();
222} 234}
@@ -302,6 +314,7 @@ static int netpoll_owner_active(struct net_device *dev)
302 return 0; 314 return 0;
303} 315}
304 316
317/* call with IRQ disabled */
305void netpoll_send_skb_on_dev(struct netpoll *np, struct sk_buff *skb, 318void netpoll_send_skb_on_dev(struct netpoll *np, struct sk_buff *skb,
306 struct net_device *dev) 319 struct net_device *dev)
307{ 320{
@@ -309,8 +322,11 @@ void netpoll_send_skb_on_dev(struct netpoll *np, struct sk_buff *skb,
309 unsigned long tries; 322 unsigned long tries;
310 const struct net_device_ops *ops = dev->netdev_ops; 323 const struct net_device_ops *ops = dev->netdev_ops;
311 /* It is up to the caller to keep npinfo alive. */ 324 /* It is up to the caller to keep npinfo alive. */
312 struct netpoll_info *npinfo = np->dev->npinfo; 325 struct netpoll_info *npinfo;
326
327 WARN_ON_ONCE(!irqs_disabled());
313 328
329 npinfo = rcu_dereference_bh(np->dev->npinfo);
314 if (!npinfo || !netif_running(dev) || !netif_device_present(dev)) { 330 if (!npinfo || !netif_running(dev) || !netif_device_present(dev)) {
315 __kfree_skb(skb); 331 __kfree_skb(skb);
316 return; 332 return;
@@ -319,16 +335,22 @@ void netpoll_send_skb_on_dev(struct netpoll *np, struct sk_buff *skb,
319 /* don't get messages out of order, and no recursion */ 335 /* don't get messages out of order, and no recursion */
320 if (skb_queue_len(&npinfo->txq) == 0 && !netpoll_owner_active(dev)) { 336 if (skb_queue_len(&npinfo->txq) == 0 && !netpoll_owner_active(dev)) {
321 struct netdev_queue *txq; 337 struct netdev_queue *txq;
322 unsigned long flags;
323 338
324 txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb)); 339 txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
325 340
326 local_irq_save(flags);
327 /* try until next clock tick */ 341 /* try until next clock tick */
328 for (tries = jiffies_to_usecs(1)/USEC_PER_POLL; 342 for (tries = jiffies_to_usecs(1)/USEC_PER_POLL;
329 tries > 0; --tries) { 343 tries > 0; --tries) {
330 if (__netif_tx_trylock(txq)) { 344 if (__netif_tx_trylock(txq)) {
331 if (!netif_xmit_stopped(txq)) { 345 if (!netif_xmit_stopped(txq)) {
346 if (vlan_tx_tag_present(skb) &&
347 !(netif_skb_features(skb) & NETIF_F_HW_VLAN_TX)) {
348 skb = __vlan_put_tag(skb, vlan_tx_tag_get(skb));
349 if (unlikely(!skb))
350 break;
351 skb->vlan_tci = 0;
352 }
353
332 status = ops->ndo_start_xmit(skb, dev); 354 status = ops->ndo_start_xmit(skb, dev);
333 if (status == NETDEV_TX_OK) 355 if (status == NETDEV_TX_OK)
334 txq_trans_update(txq); 356 txq_trans_update(txq);
@@ -347,10 +369,9 @@ void netpoll_send_skb_on_dev(struct netpoll *np, struct sk_buff *skb,
347 } 369 }
348 370
349 WARN_ONCE(!irqs_disabled(), 371 WARN_ONCE(!irqs_disabled(),
350 "netpoll_send_skb(): %s enabled interrupts in poll (%pF)\n", 372 "netpoll_send_skb_on_dev(): %s enabled interrupts in poll (%pF)\n",
351 dev->name, ops->ndo_start_xmit); 373 dev->name, ops->ndo_start_xmit);
352 374
353 local_irq_restore(flags);
354 } 375 }
355 376
356 if (status != NETDEV_TX_OK) { 377 if (status != NETDEV_TX_OK) {
@@ -423,9 +444,8 @@ void netpoll_send_udp(struct netpoll *np, const char *msg, int len)
423} 444}
424EXPORT_SYMBOL(netpoll_send_udp); 445EXPORT_SYMBOL(netpoll_send_udp);
425 446
426static void arp_reply(struct sk_buff *skb) 447static void netpoll_arp_reply(struct sk_buff *skb, struct netpoll_info *npinfo)
427{ 448{
428 struct netpoll_info *npinfo = skb->dev->npinfo;
429 struct arphdr *arp; 449 struct arphdr *arp;
430 unsigned char *arp_ptr; 450 unsigned char *arp_ptr;
431 int size, type = ARPOP_REPLY, ptype = ETH_P_ARP; 451 int size, type = ARPOP_REPLY, ptype = ETH_P_ARP;
@@ -543,13 +563,12 @@ static void arp_reply(struct sk_buff *skb)
543 spin_unlock_irqrestore(&npinfo->rx_lock, flags); 563 spin_unlock_irqrestore(&npinfo->rx_lock, flags);
544} 564}
545 565
546int __netpoll_rx(struct sk_buff *skb) 566int __netpoll_rx(struct sk_buff *skb, struct netpoll_info *npinfo)
547{ 567{
548 int proto, len, ulen; 568 int proto, len, ulen;
549 int hits = 0; 569 int hits = 0;
550 const struct iphdr *iph; 570 const struct iphdr *iph;
551 struct udphdr *uh; 571 struct udphdr *uh;
552 struct netpoll_info *npinfo = skb->dev->npinfo;
553 struct netpoll *np, *tmp; 572 struct netpoll *np, *tmp;
554 573
555 if (list_empty(&npinfo->rx_np)) 574 if (list_empty(&npinfo->rx_np))
@@ -565,6 +584,12 @@ int __netpoll_rx(struct sk_buff *skb)
565 return 1; 584 return 1;
566 } 585 }
567 586
587 if (skb->protocol == cpu_to_be16(ETH_P_8021Q)) {
588 skb = vlan_untag(skb);
589 if (unlikely(!skb))
590 goto out;
591 }
592
568 proto = ntohs(eth_hdr(skb)->h_proto); 593 proto = ntohs(eth_hdr(skb)->h_proto);
569 if (proto != ETH_P_IP) 594 if (proto != ETH_P_IP)
570 goto out; 595 goto out;
@@ -715,7 +740,7 @@ int netpoll_parse_options(struct netpoll *np, char *opt)
715} 740}
716EXPORT_SYMBOL(netpoll_parse_options); 741EXPORT_SYMBOL(netpoll_parse_options);
717 742
718int __netpoll_setup(struct netpoll *np, struct net_device *ndev) 743int __netpoll_setup(struct netpoll *np, struct net_device *ndev, gfp_t gfp)
719{ 744{
720 struct netpoll_info *npinfo; 745 struct netpoll_info *npinfo;
721 const struct net_device_ops *ops; 746 const struct net_device_ops *ops;
@@ -734,7 +759,7 @@ int __netpoll_setup(struct netpoll *np, struct net_device *ndev)
734 } 759 }
735 760
736 if (!ndev->npinfo) { 761 if (!ndev->npinfo) {
737 npinfo = kmalloc(sizeof(*npinfo), GFP_KERNEL); 762 npinfo = kmalloc(sizeof(*npinfo), gfp);
738 if (!npinfo) { 763 if (!npinfo) {
739 err = -ENOMEM; 764 err = -ENOMEM;
740 goto out; 765 goto out;
@@ -752,7 +777,7 @@ int __netpoll_setup(struct netpoll *np, struct net_device *ndev)
752 777
753 ops = np->dev->netdev_ops; 778 ops = np->dev->netdev_ops;
754 if (ops->ndo_netpoll_setup) { 779 if (ops->ndo_netpoll_setup) {
755 err = ops->ndo_netpoll_setup(ndev, npinfo); 780 err = ops->ndo_netpoll_setup(ndev, npinfo, gfp);
756 if (err) 781 if (err)
757 goto free_npinfo; 782 goto free_npinfo;
758 } 783 }
@@ -857,7 +882,7 @@ int netpoll_setup(struct netpoll *np)
857 refill_skbs(); 882 refill_skbs();
858 883
859 rtnl_lock(); 884 rtnl_lock();
860 err = __netpoll_setup(np, ndev); 885 err = __netpoll_setup(np, ndev, GFP_KERNEL);
861 rtnl_unlock(); 886 rtnl_unlock();
862 887
863 if (err) 888 if (err)
@@ -878,6 +903,24 @@ static int __init netpoll_init(void)
878} 903}
879core_initcall(netpoll_init); 904core_initcall(netpoll_init);
880 905
906static void rcu_cleanup_netpoll_info(struct rcu_head *rcu_head)
907{
908 struct netpoll_info *npinfo =
909 container_of(rcu_head, struct netpoll_info, rcu);
910
911 skb_queue_purge(&npinfo->arp_tx);
912 skb_queue_purge(&npinfo->txq);
913
914 /* we can't call cancel_delayed_work_sync here, as we are in softirq */
915 cancel_delayed_work(&npinfo->tx_work);
916
917 /* clean after last, unfinished work */
918 __skb_queue_purge(&npinfo->txq);
919 /* now cancel it again */
920 cancel_delayed_work(&npinfo->tx_work);
921 kfree(npinfo);
922}
923
881void __netpoll_cleanup(struct netpoll *np) 924void __netpoll_cleanup(struct netpoll *np)
882{ 925{
883 struct netpoll_info *npinfo; 926 struct netpoll_info *npinfo;
@@ -903,20 +946,24 @@ void __netpoll_cleanup(struct netpoll *np)
903 ops->ndo_netpoll_cleanup(np->dev); 946 ops->ndo_netpoll_cleanup(np->dev);
904 947
905 RCU_INIT_POINTER(np->dev->npinfo, NULL); 948 RCU_INIT_POINTER(np->dev->npinfo, NULL);
949 call_rcu_bh(&npinfo->rcu, rcu_cleanup_netpoll_info);
950 }
951}
952EXPORT_SYMBOL_GPL(__netpoll_cleanup);
906 953
907 /* avoid racing with NAPI reading npinfo */ 954static void rcu_cleanup_netpoll(struct rcu_head *rcu_head)
908 synchronize_rcu_bh(); 955{
956 struct netpoll *np = container_of(rcu_head, struct netpoll, rcu);
909 957
910 skb_queue_purge(&npinfo->arp_tx); 958 __netpoll_cleanup(np);
911 skb_queue_purge(&npinfo->txq); 959 kfree(np);
912 cancel_delayed_work_sync(&npinfo->tx_work); 960}
913 961
914 /* clean after last, unfinished work */ 962void __netpoll_free_rcu(struct netpoll *np)
915 __skb_queue_purge(&npinfo->txq); 963{
916 kfree(npinfo); 964 call_rcu_bh(&np->rcu, rcu_cleanup_netpoll);
917 }
918} 965}
919EXPORT_SYMBOL_GPL(__netpoll_cleanup); 966EXPORT_SYMBOL_GPL(__netpoll_free_rcu);
920 967
921void netpoll_cleanup(struct netpoll *np) 968void netpoll_cleanup(struct netpoll *np)
922{ 969{
diff --git a/net/core/netprio_cgroup.c b/net/core/netprio_cgroup.c
index ed0c0431fcd8..c75e3f9d060f 100644
--- a/net/core/netprio_cgroup.c
+++ b/net/core/netprio_cgroup.c
@@ -101,12 +101,10 @@ static int write_update_netdev_table(struct net_device *dev)
101 u32 max_len; 101 u32 max_len;
102 struct netprio_map *map; 102 struct netprio_map *map;
103 103
104 rtnl_lock();
105 max_len = atomic_read(&max_prioidx) + 1; 104 max_len = atomic_read(&max_prioidx) + 1;
106 map = rtnl_dereference(dev->priomap); 105 map = rtnl_dereference(dev->priomap);
107 if (!map || map->priomap_len < max_len) 106 if (!map || map->priomap_len < max_len)
108 ret = extend_netdev_table(dev, max_len); 107 ret = extend_netdev_table(dev, max_len);
109 rtnl_unlock();
110 108
111 return ret; 109 return ret;
112} 110}
@@ -256,17 +254,17 @@ static int write_priomap(struct cgroup *cgrp, struct cftype *cft,
256 if (!dev) 254 if (!dev)
257 goto out_free_devname; 255 goto out_free_devname;
258 256
257 rtnl_lock();
259 ret = write_update_netdev_table(dev); 258 ret = write_update_netdev_table(dev);
260 if (ret < 0) 259 if (ret < 0)
261 goto out_put_dev; 260 goto out_put_dev;
262 261
263 rcu_read_lock(); 262 map = rtnl_dereference(dev->priomap);
264 map = rcu_dereference(dev->priomap);
265 if (map) 263 if (map)
266 map->priomap[prioidx] = priority; 264 map->priomap[prioidx] = priority;
267 rcu_read_unlock();
268 265
269out_put_dev: 266out_put_dev:
267 rtnl_unlock();
270 dev_put(dev); 268 dev_put(dev);
271 269
272out_free_devname: 270out_free_devname:
@@ -277,12 +275,6 @@ out_free_devname:
277void net_prio_attach(struct cgroup *cgrp, struct cgroup_taskset *tset) 275void net_prio_attach(struct cgroup *cgrp, struct cgroup_taskset *tset)
278{ 276{
279 struct task_struct *p; 277 struct task_struct *p;
280 char *tmp = kzalloc(sizeof(char) * PATH_MAX, GFP_KERNEL);
281
282 if (!tmp) {
283 pr_warn("Unable to attach cgrp due to alloc failure!\n");
284 return;
285 }
286 278
287 cgroup_taskset_for_each(p, cgrp, tset) { 279 cgroup_taskset_for_each(p, cgrp, tset) {
288 unsigned int fd; 280 unsigned int fd;
@@ -296,32 +288,24 @@ void net_prio_attach(struct cgroup *cgrp, struct cgroup_taskset *tset)
296 continue; 288 continue;
297 } 289 }
298 290
299 rcu_read_lock(); 291 spin_lock(&files->file_lock);
300 fdt = files_fdtable(files); 292 fdt = files_fdtable(files);
301 for (fd = 0; fd < fdt->max_fds; fd++) { 293 for (fd = 0; fd < fdt->max_fds; fd++) {
302 char *path;
303 struct file *file; 294 struct file *file;
304 struct socket *sock; 295 struct socket *sock;
305 unsigned long s; 296 int err;
306 int rv, err = 0;
307 297
308 file = fcheck_files(files, fd); 298 file = fcheck_files(files, fd);
309 if (!file) 299 if (!file)
310 continue; 300 continue;
311 301
312 path = d_path(&file->f_path, tmp, PAGE_SIZE);
313 rv = sscanf(path, "socket:[%lu]", &s);
314 if (rv <= 0)
315 continue;
316
317 sock = sock_from_file(file, &err); 302 sock = sock_from_file(file, &err);
318 if (!err) 303 if (sock)
319 sock_update_netprioidx(sock->sk, p); 304 sock_update_netprioidx(sock->sk, p);
320 } 305 }
321 rcu_read_unlock(); 306 spin_unlock(&files->file_lock);
322 task_unlock(p); 307 task_unlock(p);
323 } 308 }
324 kfree(tmp);
325} 309}
326 310
327static struct cftype ss_files[] = { 311static struct cftype ss_files[] = {
diff --git a/net/core/scm.c b/net/core/scm.c
index 8f6ccfd68ef4..040cebeed45b 100644
--- a/net/core/scm.c
+++ b/net/core/scm.c
@@ -265,6 +265,7 @@ void scm_detach_fds(struct msghdr *msg, struct scm_cookie *scm)
265 for (i=0, cmfptr=(__force int __user *)CMSG_DATA(cm); i<fdmax; 265 for (i=0, cmfptr=(__force int __user *)CMSG_DATA(cm); i<fdmax;
266 i++, cmfptr++) 266 i++, cmfptr++)
267 { 267 {
268 struct socket *sock;
268 int new_fd; 269 int new_fd;
269 err = security_file_receive(fp[i]); 270 err = security_file_receive(fp[i]);
270 if (err) 271 if (err)
@@ -281,6 +282,9 @@ void scm_detach_fds(struct msghdr *msg, struct scm_cookie *scm)
281 } 282 }
282 /* Bump the usage count and install the file. */ 283 /* Bump the usage count and install the file. */
283 get_file(fp[i]); 284 get_file(fp[i]);
285 sock = sock_from_file(fp[i], &err);
286 if (sock)
287 sock_update_netprioidx(sock->sk, current);
284 fd_install(new_fd, fp[i]); 288 fd_install(new_fd, fp[i]);
285 } 289 }
286 290
diff --git a/net/core/sock.c b/net/core/sock.c
index 6b654b3ddfda..8f67ced8d6a8 100644
--- a/net/core/sock.c
+++ b/net/core/sock.c
@@ -1458,6 +1458,7 @@ void sk_setup_caps(struct sock *sk, struct dst_entry *dst)
1458 } else { 1458 } else {
1459 sk->sk_route_caps |= NETIF_F_SG | NETIF_F_HW_CSUM; 1459 sk->sk_route_caps |= NETIF_F_SG | NETIF_F_HW_CSUM;
1460 sk->sk_gso_max_size = dst->dev->gso_max_size; 1460 sk->sk_gso_max_size = dst->dev->gso_max_size;
1461 sk->sk_gso_max_segs = dst->dev->gso_max_segs;
1461 } 1462 }
1462 } 1463 }
1463} 1464}