diff options
author | Johan Hedberg <johan.hedberg@intel.com> | 2012-02-16 07:23:04 -0500 |
---|---|---|
committer | Johan Hedberg <johan.hedberg@intel.com> | 2012-02-16 07:25:34 -0500 |
commit | 46479e698530b8197d601a23317b7c7654195338 (patch) | |
tree | 710b2758ecd7d8a6ada37724c5d4c8027d5f358f /net | |
parent | 7b99b659d90c5d421cb1867295c78a4c0c030734 (diff) | |
parent | ca994a36f585432458ead9133fcfe05440edbb7b (diff) |
Merge branch 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/linville/wireless-next.git
Conflicts:
include/net/bluetooth/l2cap.h
net/bluetooth/hci_conn.c
net/bluetooth/l2cap_core.c
Diffstat (limited to 'net')
41 files changed, 229 insertions, 205 deletions
diff --git a/net/caif/caif_dev.c b/net/caif/caif_dev.c index 673728add60b..82c57069415f 100644 --- a/net/caif/caif_dev.c +++ b/net/caif/caif_dev.c | |||
@@ -59,8 +59,6 @@ struct cfcnfg *get_cfcnfg(struct net *net) | |||
59 | { | 59 | { |
60 | struct caif_net *caifn; | 60 | struct caif_net *caifn; |
61 | caifn = net_generic(net, caif_net_id); | 61 | caifn = net_generic(net, caif_net_id); |
62 | if (!caifn) | ||
63 | return NULL; | ||
64 | return caifn->cfg; | 62 | return caifn->cfg; |
65 | } | 63 | } |
66 | EXPORT_SYMBOL(get_cfcnfg); | 64 | EXPORT_SYMBOL(get_cfcnfg); |
@@ -69,8 +67,6 @@ static struct caif_device_entry_list *caif_device_list(struct net *net) | |||
69 | { | 67 | { |
70 | struct caif_net *caifn; | 68 | struct caif_net *caifn; |
71 | caifn = net_generic(net, caif_net_id); | 69 | caifn = net_generic(net, caif_net_id); |
72 | if (!caifn) | ||
73 | return NULL; | ||
74 | return &caifn->caifdevs; | 70 | return &caifn->caifdevs; |
75 | } | 71 | } |
76 | 72 | ||
@@ -99,8 +95,6 @@ static struct caif_device_entry *caif_device_alloc(struct net_device *dev) | |||
99 | struct caif_device_entry *caifd; | 95 | struct caif_device_entry *caifd; |
100 | 96 | ||
101 | caifdevs = caif_device_list(dev_net(dev)); | 97 | caifdevs = caif_device_list(dev_net(dev)); |
102 | if (!caifdevs) | ||
103 | return NULL; | ||
104 | 98 | ||
105 | caifd = kzalloc(sizeof(*caifd), GFP_KERNEL); | 99 | caifd = kzalloc(sizeof(*caifd), GFP_KERNEL); |
106 | if (!caifd) | 100 | if (!caifd) |
@@ -120,8 +114,6 @@ static struct caif_device_entry *caif_get(struct net_device *dev) | |||
120 | struct caif_device_entry_list *caifdevs = | 114 | struct caif_device_entry_list *caifdevs = |
121 | caif_device_list(dev_net(dev)); | 115 | caif_device_list(dev_net(dev)); |
122 | struct caif_device_entry *caifd; | 116 | struct caif_device_entry *caifd; |
123 | if (!caifdevs) | ||
124 | return NULL; | ||
125 | 117 | ||
126 | list_for_each_entry_rcu(caifd, &caifdevs->list, list) { | 118 | list_for_each_entry_rcu(caifd, &caifdevs->list, list) { |
127 | if (caifd->netdev == dev) | 119 | if (caifd->netdev == dev) |
@@ -321,8 +313,6 @@ void caif_enroll_dev(struct net_device *dev, struct caif_dev_common *caifdev, | |||
321 | struct caif_device_entry_list *caifdevs; | 313 | struct caif_device_entry_list *caifdevs; |
322 | 314 | ||
323 | caifdevs = caif_device_list(dev_net(dev)); | 315 | caifdevs = caif_device_list(dev_net(dev)); |
324 | if (!cfg || !caifdevs) | ||
325 | return; | ||
326 | caifd = caif_device_alloc(dev); | 316 | caifd = caif_device_alloc(dev); |
327 | if (!caifd) | 317 | if (!caifd) |
328 | return; | 318 | return; |
@@ -374,8 +364,6 @@ static int caif_device_notify(struct notifier_block *me, unsigned long what, | |||
374 | 364 | ||
375 | cfg = get_cfcnfg(dev_net(dev)); | 365 | cfg = get_cfcnfg(dev_net(dev)); |
376 | caifdevs = caif_device_list(dev_net(dev)); | 366 | caifdevs = caif_device_list(dev_net(dev)); |
377 | if (!cfg || !caifdevs) | ||
378 | return 0; | ||
379 | 367 | ||
380 | caifd = caif_get(dev); | 368 | caifd = caif_get(dev); |
381 | if (caifd == NULL && dev->type != ARPHRD_CAIF) | 369 | if (caifd == NULL && dev->type != ARPHRD_CAIF) |
@@ -507,9 +495,6 @@ static struct notifier_block caif_device_notifier = { | |||
507 | static int caif_init_net(struct net *net) | 495 | static int caif_init_net(struct net *net) |
508 | { | 496 | { |
509 | struct caif_net *caifn = net_generic(net, caif_net_id); | 497 | struct caif_net *caifn = net_generic(net, caif_net_id); |
510 | if (WARN_ON(!caifn)) | ||
511 | return -EINVAL; | ||
512 | |||
513 | INIT_LIST_HEAD(&caifn->caifdevs.list); | 498 | INIT_LIST_HEAD(&caifn->caifdevs.list); |
514 | mutex_init(&caifn->caifdevs.lock); | 499 | mutex_init(&caifn->caifdevs.lock); |
515 | 500 | ||
@@ -527,9 +512,6 @@ static void caif_exit_net(struct net *net) | |||
527 | caif_device_list(net); | 512 | caif_device_list(net); |
528 | struct cfcnfg *cfg = get_cfcnfg(net); | 513 | struct cfcnfg *cfg = get_cfcnfg(net); |
529 | 514 | ||
530 | if (!cfg || !caifdevs) | ||
531 | return; | ||
532 | |||
533 | rtnl_lock(); | 515 | rtnl_lock(); |
534 | mutex_lock(&caifdevs->lock); | 516 | mutex_lock(&caifdevs->lock); |
535 | 517 | ||
@@ -569,7 +551,7 @@ static int __init caif_device_init(void) | |||
569 | { | 551 | { |
570 | int result; | 552 | int result; |
571 | 553 | ||
572 | result = register_pernet_device(&caif_net_ops); | 554 | result = register_pernet_subsys(&caif_net_ops); |
573 | 555 | ||
574 | if (result) | 556 | if (result) |
575 | return result; | 557 | return result; |
@@ -582,7 +564,7 @@ static int __init caif_device_init(void) | |||
582 | 564 | ||
583 | static void __exit caif_device_exit(void) | 565 | static void __exit caif_device_exit(void) |
584 | { | 566 | { |
585 | unregister_pernet_device(&caif_net_ops); | 567 | unregister_pernet_subsys(&caif_net_ops); |
586 | unregister_netdevice_notifier(&caif_device_notifier); | 568 | unregister_netdevice_notifier(&caif_device_notifier); |
587 | dev_remove_pack(&caif_packet_type); | 569 | dev_remove_pack(&caif_packet_type); |
588 | } | 570 | } |
diff --git a/net/caif/cfcnfg.c b/net/caif/cfcnfg.c index 598aafb4cb51..ba9cfd47778a 100644 --- a/net/caif/cfcnfg.c +++ b/net/caif/cfcnfg.c | |||
@@ -309,7 +309,6 @@ int caif_connect_client(struct net *net, struct caif_connect_request *conn_req, | |||
309 | int err; | 309 | int err; |
310 | struct cfctrl_link_param param; | 310 | struct cfctrl_link_param param; |
311 | struct cfcnfg *cfg = get_cfcnfg(net); | 311 | struct cfcnfg *cfg = get_cfcnfg(net); |
312 | caif_assert(cfg != NULL); | ||
313 | 312 | ||
314 | rcu_read_lock(); | 313 | rcu_read_lock(); |
315 | err = caif_connect_req_to_link_param(cfg, conn_req, ¶m); | 314 | err = caif_connect_req_to_link_param(cfg, conn_req, ¶m); |
diff --git a/net/ceph/crush/mapper.c b/net/ceph/crush/mapper.c index 3a94eae7abe9..b79747c4b645 100644 --- a/net/ceph/crush/mapper.c +++ b/net/ceph/crush/mapper.c | |||
@@ -510,10 +510,15 @@ int crush_do_rule(struct crush_map *map, | |||
510 | switch (rule->steps[step].op) { | 510 | switch (rule->steps[step].op) { |
511 | case CRUSH_RULE_TAKE: | 511 | case CRUSH_RULE_TAKE: |
512 | w[0] = rule->steps[step].arg1; | 512 | w[0] = rule->steps[step].arg1; |
513 | if (force_pos >= 0) { | 513 | |
514 | BUG_ON(force_context[force_pos] != w[0]); | 514 | /* find position in force_context/hierarchy */ |
515 | while (force_pos >= 0 && | ||
516 | force_context[force_pos] != w[0]) | ||
515 | force_pos--; | 517 | force_pos--; |
516 | } | 518 | /* and move past it */ |
519 | if (force_pos >= 0) | ||
520 | force_pos--; | ||
521 | |||
517 | wsize = 1; | 522 | wsize = 1; |
518 | break; | 523 | break; |
519 | 524 | ||
diff --git a/net/ceph/crypto.c b/net/ceph/crypto.c index 85f3bc0a7062..b780cb7947dd 100644 --- a/net/ceph/crypto.c +++ b/net/ceph/crypto.c | |||
@@ -15,10 +15,9 @@ int ceph_crypto_key_clone(struct ceph_crypto_key *dst, | |||
15 | const struct ceph_crypto_key *src) | 15 | const struct ceph_crypto_key *src) |
16 | { | 16 | { |
17 | memcpy(dst, src, sizeof(struct ceph_crypto_key)); | 17 | memcpy(dst, src, sizeof(struct ceph_crypto_key)); |
18 | dst->key = kmalloc(src->len, GFP_NOFS); | 18 | dst->key = kmemdup(src->key, src->len, GFP_NOFS); |
19 | if (!dst->key) | 19 | if (!dst->key) |
20 | return -ENOMEM; | 20 | return -ENOMEM; |
21 | memcpy(dst->key, src->key, src->len); | ||
22 | return 0; | 21 | return 0; |
23 | } | 22 | } |
24 | 23 | ||
diff --git a/net/ceph/osd_client.c b/net/ceph/osd_client.c index f4f3f58f5234..5e254055c910 100644 --- a/net/ceph/osd_client.c +++ b/net/ceph/osd_client.c | |||
@@ -29,8 +29,8 @@ static void __register_request(struct ceph_osd_client *osdc, | |||
29 | struct ceph_osd_request *req); | 29 | struct ceph_osd_request *req); |
30 | static void __unregister_linger_request(struct ceph_osd_client *osdc, | 30 | static void __unregister_linger_request(struct ceph_osd_client *osdc, |
31 | struct ceph_osd_request *req); | 31 | struct ceph_osd_request *req); |
32 | static int __send_request(struct ceph_osd_client *osdc, | 32 | static void __send_request(struct ceph_osd_client *osdc, |
33 | struct ceph_osd_request *req); | 33 | struct ceph_osd_request *req); |
34 | 34 | ||
35 | static int op_needs_trail(int op) | 35 | static int op_needs_trail(int op) |
36 | { | 36 | { |
@@ -1022,8 +1022,8 @@ out: | |||
1022 | /* | 1022 | /* |
1023 | * caller should hold map_sem (for read) and request_mutex | 1023 | * caller should hold map_sem (for read) and request_mutex |
1024 | */ | 1024 | */ |
1025 | static int __send_request(struct ceph_osd_client *osdc, | 1025 | static void __send_request(struct ceph_osd_client *osdc, |
1026 | struct ceph_osd_request *req) | 1026 | struct ceph_osd_request *req) |
1027 | { | 1027 | { |
1028 | struct ceph_osd_request_head *reqhead; | 1028 | struct ceph_osd_request_head *reqhead; |
1029 | 1029 | ||
@@ -1041,7 +1041,6 @@ static int __send_request(struct ceph_osd_client *osdc, | |||
1041 | ceph_msg_get(req->r_request); /* send consumes a ref */ | 1041 | ceph_msg_get(req->r_request); /* send consumes a ref */ |
1042 | ceph_con_send(&req->r_osd->o_con, req->r_request); | 1042 | ceph_con_send(&req->r_osd->o_con, req->r_request); |
1043 | req->r_sent = req->r_osd->o_incarnation; | 1043 | req->r_sent = req->r_osd->o_incarnation; |
1044 | return 0; | ||
1045 | } | 1044 | } |
1046 | 1045 | ||
1047 | /* | 1046 | /* |
@@ -1726,17 +1725,9 @@ int ceph_osdc_start_request(struct ceph_osd_client *osdc, | |||
1726 | dout("send_request %p no up osds in pg\n", req); | 1725 | dout("send_request %p no up osds in pg\n", req); |
1727 | ceph_monc_request_next_osdmap(&osdc->client->monc); | 1726 | ceph_monc_request_next_osdmap(&osdc->client->monc); |
1728 | } else { | 1727 | } else { |
1729 | rc = __send_request(osdc, req); | 1728 | __send_request(osdc, req); |
1730 | if (rc) { | ||
1731 | if (nofail) { | ||
1732 | dout("osdc_start_request failed send, " | ||
1733 | " will retry %lld\n", req->r_tid); | ||
1734 | rc = 0; | ||
1735 | } else { | ||
1736 | __unregister_request(osdc, req); | ||
1737 | } | ||
1738 | } | ||
1739 | } | 1729 | } |
1730 | rc = 0; | ||
1740 | } | 1731 | } |
1741 | 1732 | ||
1742 | out_unlock: | 1733 | out_unlock: |
diff --git a/net/core/ethtool.c b/net/core/ethtool.c index 921aa2b4b415..369b41894527 100644 --- a/net/core/ethtool.c +++ b/net/core/ethtool.c | |||
@@ -1311,6 +1311,7 @@ int dev_ethtool(struct net *net, struct ifreq *ifr) | |||
1311 | case ETHTOOL_GRXCSUM: | 1311 | case ETHTOOL_GRXCSUM: |
1312 | case ETHTOOL_GTXCSUM: | 1312 | case ETHTOOL_GTXCSUM: |
1313 | case ETHTOOL_GSG: | 1313 | case ETHTOOL_GSG: |
1314 | case ETHTOOL_GSSET_INFO: | ||
1314 | case ETHTOOL_GSTRINGS: | 1315 | case ETHTOOL_GSTRINGS: |
1315 | case ETHTOOL_GTSO: | 1316 | case ETHTOOL_GTSO: |
1316 | case ETHTOOL_GPERMADDR: | 1317 | case ETHTOOL_GPERMADDR: |
diff --git a/net/core/flow_dissector.c b/net/core/flow_dissector.c index 0985b9b14b80..a225089df5b6 100644 --- a/net/core/flow_dissector.c +++ b/net/core/flow_dissector.c | |||
@@ -1,4 +1,5 @@ | |||
1 | #include <linux/skbuff.h> | 1 | #include <linux/skbuff.h> |
2 | #include <linux/export.h> | ||
2 | #include <linux/ip.h> | 3 | #include <linux/ip.h> |
3 | #include <linux/ipv6.h> | 4 | #include <linux/ipv6.h> |
4 | #include <linux/if_vlan.h> | 5 | #include <linux/if_vlan.h> |
diff --git a/net/core/net_namespace.c b/net/core/net_namespace.c index aefcd7acbffa..0e950fda9a0a 100644 --- a/net/core/net_namespace.c +++ b/net/core/net_namespace.c | |||
@@ -30,6 +30,20 @@ EXPORT_SYMBOL(init_net); | |||
30 | 30 | ||
31 | #define INITIAL_NET_GEN_PTRS 13 /* +1 for len +2 for rcu_head */ | 31 | #define INITIAL_NET_GEN_PTRS 13 /* +1 for len +2 for rcu_head */ |
32 | 32 | ||
33 | static unsigned int max_gen_ptrs = INITIAL_NET_GEN_PTRS; | ||
34 | |||
35 | static struct net_generic *net_alloc_generic(void) | ||
36 | { | ||
37 | struct net_generic *ng; | ||
38 | size_t generic_size = offsetof(struct net_generic, ptr[max_gen_ptrs]); | ||
39 | |||
40 | ng = kzalloc(generic_size, GFP_KERNEL); | ||
41 | if (ng) | ||
42 | ng->len = max_gen_ptrs; | ||
43 | |||
44 | return ng; | ||
45 | } | ||
46 | |||
33 | static int net_assign_generic(struct net *net, int id, void *data) | 47 | static int net_assign_generic(struct net *net, int id, void *data) |
34 | { | 48 | { |
35 | struct net_generic *ng, *old_ng; | 49 | struct net_generic *ng, *old_ng; |
@@ -43,8 +57,7 @@ static int net_assign_generic(struct net *net, int id, void *data) | |||
43 | if (old_ng->len >= id) | 57 | if (old_ng->len >= id) |
44 | goto assign; | 58 | goto assign; |
45 | 59 | ||
46 | ng = kzalloc(sizeof(struct net_generic) + | 60 | ng = net_alloc_generic(); |
47 | id * sizeof(void *), GFP_KERNEL); | ||
48 | if (ng == NULL) | 61 | if (ng == NULL) |
49 | return -ENOMEM; | 62 | return -ENOMEM; |
50 | 63 | ||
@@ -59,7 +72,6 @@ static int net_assign_generic(struct net *net, int id, void *data) | |||
59 | * the old copy for kfree after a grace period. | 72 | * the old copy for kfree after a grace period. |
60 | */ | 73 | */ |
61 | 74 | ||
62 | ng->len = id; | ||
63 | memcpy(&ng->ptr, &old_ng->ptr, old_ng->len * sizeof(void*)); | 75 | memcpy(&ng->ptr, &old_ng->ptr, old_ng->len * sizeof(void*)); |
64 | 76 | ||
65 | rcu_assign_pointer(net->gen, ng); | 77 | rcu_assign_pointer(net->gen, ng); |
@@ -161,18 +173,6 @@ out_undo: | |||
161 | goto out; | 173 | goto out; |
162 | } | 174 | } |
163 | 175 | ||
164 | static struct net_generic *net_alloc_generic(void) | ||
165 | { | ||
166 | struct net_generic *ng; | ||
167 | size_t generic_size = sizeof(struct net_generic) + | ||
168 | INITIAL_NET_GEN_PTRS * sizeof(void *); | ||
169 | |||
170 | ng = kzalloc(generic_size, GFP_KERNEL); | ||
171 | if (ng) | ||
172 | ng->len = INITIAL_NET_GEN_PTRS; | ||
173 | |||
174 | return ng; | ||
175 | } | ||
176 | 176 | ||
177 | #ifdef CONFIG_NET_NS | 177 | #ifdef CONFIG_NET_NS |
178 | static struct kmem_cache *net_cachep; | 178 | static struct kmem_cache *net_cachep; |
@@ -483,6 +483,7 @@ again: | |||
483 | } | 483 | } |
484 | return error; | 484 | return error; |
485 | } | 485 | } |
486 | max_gen_ptrs = max_t(unsigned int, max_gen_ptrs, *ops->id); | ||
486 | } | 487 | } |
487 | error = __register_pernet_operations(list, ops); | 488 | error = __register_pernet_operations(list, ops); |
488 | if (error) { | 489 | if (error) { |
diff --git a/net/core/pktgen.c b/net/core/pktgen.c index 65f80c7b1656..4d8ce93cd503 100644 --- a/net/core/pktgen.c +++ b/net/core/pktgen.c | |||
@@ -767,8 +767,8 @@ done: | |||
767 | return i; | 767 | return i; |
768 | } | 768 | } |
769 | 769 | ||
770 | static unsigned long num_arg(const char __user * user_buffer, | 770 | static long num_arg(const char __user *user_buffer, unsigned long maxlen, |
771 | unsigned long maxlen, unsigned long *num) | 771 | unsigned long *num) |
772 | { | 772 | { |
773 | int i; | 773 | int i; |
774 | *num = 0; | 774 | *num = 0; |
diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c index dbf2ddafd52d..65aebd450027 100644 --- a/net/core/rtnetlink.c +++ b/net/core/rtnetlink.c | |||
@@ -1509,6 +1509,9 @@ errout: | |||
1509 | 1509 | ||
1510 | if (send_addr_notify) | 1510 | if (send_addr_notify) |
1511 | call_netdevice_notifiers(NETDEV_CHANGEADDR, dev); | 1511 | call_netdevice_notifiers(NETDEV_CHANGEADDR, dev); |
1512 | min_ifinfo_dump_size = max_t(u16, if_nlmsg_size(dev), | ||
1513 | min_ifinfo_dump_size); | ||
1514 | |||
1512 | return err; | 1515 | return err; |
1513 | } | 1516 | } |
1514 | 1517 | ||
@@ -1960,7 +1963,7 @@ static int rtnetlink_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh) | |||
1960 | sz_idx = type>>2; | 1963 | sz_idx = type>>2; |
1961 | kind = type&3; | 1964 | kind = type&3; |
1962 | 1965 | ||
1963 | if (kind != 2 && security_netlink_recv(skb, CAP_NET_ADMIN)) | 1966 | if (kind != 2 && !capable(CAP_NET_ADMIN)) |
1964 | return -EPERM; | 1967 | return -EPERM; |
1965 | 1968 | ||
1966 | if (kind == 2 && nlh->nlmsg_flags&NLM_F_DUMP) { | 1969 | if (kind == 2 && nlh->nlmsg_flags&NLM_F_DUMP) { |
diff --git a/net/core/sock.c b/net/core/sock.c index 5c5af9988f94..3e81fd2e3c75 100644 --- a/net/core/sock.c +++ b/net/core/sock.c | |||
@@ -1827,7 +1827,7 @@ suppress_allocation: | |||
1827 | /* Alas. Undo changes. */ | 1827 | /* Alas. Undo changes. */ |
1828 | sk->sk_forward_alloc -= amt * SK_MEM_QUANTUM; | 1828 | sk->sk_forward_alloc -= amt * SK_MEM_QUANTUM; |
1829 | 1829 | ||
1830 | sk_memory_allocated_sub(sk, amt, parent_status); | 1830 | sk_memory_allocated_sub(sk, amt); |
1831 | 1831 | ||
1832 | return 0; | 1832 | return 0; |
1833 | } | 1833 | } |
@@ -1840,7 +1840,7 @@ EXPORT_SYMBOL(__sk_mem_schedule); | |||
1840 | void __sk_mem_reclaim(struct sock *sk) | 1840 | void __sk_mem_reclaim(struct sock *sk) |
1841 | { | 1841 | { |
1842 | sk_memory_allocated_sub(sk, | 1842 | sk_memory_allocated_sub(sk, |
1843 | sk->sk_forward_alloc >> SK_MEM_QUANTUM_SHIFT, 0); | 1843 | sk->sk_forward_alloc >> SK_MEM_QUANTUM_SHIFT); |
1844 | sk->sk_forward_alloc &= SK_MEM_QUANTUM - 1; | 1844 | sk->sk_forward_alloc &= SK_MEM_QUANTUM - 1; |
1845 | 1845 | ||
1846 | if (sk_under_memory_pressure(sk) && | 1846 | if (sk_under_memory_pressure(sk) && |
diff --git a/net/decnet/netfilter/dn_rtmsg.c b/net/decnet/netfilter/dn_rtmsg.c index 69975e0bcdea..1531135130db 100644 --- a/net/decnet/netfilter/dn_rtmsg.c +++ b/net/decnet/netfilter/dn_rtmsg.c | |||
@@ -108,7 +108,7 @@ static inline void dnrmg_receive_user_skb(struct sk_buff *skb) | |||
108 | if (nlh->nlmsg_len < sizeof(*nlh) || skb->len < nlh->nlmsg_len) | 108 | if (nlh->nlmsg_len < sizeof(*nlh) || skb->len < nlh->nlmsg_len) |
109 | return; | 109 | return; |
110 | 110 | ||
111 | if (security_netlink_recv(skb, CAP_NET_ADMIN)) | 111 | if (!capable(CAP_NET_ADMIN)) |
112 | RCV_SKB_FAIL(-EPERM); | 112 | RCV_SKB_FAIL(-EPERM); |
113 | 113 | ||
114 | /* Eventually we might send routing messages too */ | 114 | /* Eventually we might send routing messages too */ |
diff --git a/net/ipv4/inet_connection_sock.c b/net/ipv4/inet_connection_sock.c index 2e4e24476c4c..19d66cefd7d3 100644 --- a/net/ipv4/inet_connection_sock.c +++ b/net/ipv4/inet_connection_sock.c | |||
@@ -123,11 +123,14 @@ again: | |||
123 | smallest_size = tb->num_owners; | 123 | smallest_size = tb->num_owners; |
124 | smallest_rover = rover; | 124 | smallest_rover = rover; |
125 | if (atomic_read(&hashinfo->bsockets) > (high - low) + 1) { | 125 | if (atomic_read(&hashinfo->bsockets) > (high - low) + 1) { |
126 | spin_unlock(&head->lock); | ||
127 | snum = smallest_rover; | 126 | snum = smallest_rover; |
128 | goto have_snum; | 127 | goto tb_found; |
129 | } | 128 | } |
130 | } | 129 | } |
130 | if (!inet_csk(sk)->icsk_af_ops->bind_conflict(sk, tb)) { | ||
131 | snum = rover; | ||
132 | goto tb_found; | ||
133 | } | ||
131 | goto next; | 134 | goto next; |
132 | } | 135 | } |
133 | break; | 136 | break; |
diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c index 2b53a1f7abf6..6b3ca5ba4450 100644 --- a/net/ipv4/ip_gre.c +++ b/net/ipv4/ip_gre.c | |||
@@ -422,6 +422,10 @@ static struct ip_tunnel *ipgre_tunnel_locate(struct net *net, | |||
422 | if (register_netdevice(dev) < 0) | 422 | if (register_netdevice(dev) < 0) |
423 | goto failed_free; | 423 | goto failed_free; |
424 | 424 | ||
425 | /* Can use a lockless transmit, unless we generate output sequences */ | ||
426 | if (!(nt->parms.o_flags & GRE_SEQ)) | ||
427 | dev->features |= NETIF_F_LLTX; | ||
428 | |||
425 | dev_hold(dev); | 429 | dev_hold(dev); |
426 | ipgre_tunnel_link(ign, nt); | 430 | ipgre_tunnel_link(ign, nt); |
427 | return nt; | 431 | return nt; |
diff --git a/net/ipv4/netfilter/ip_queue.c b/net/ipv4/netfilter/ip_queue.c index a057fe64debd..94d45e1f8882 100644 --- a/net/ipv4/netfilter/ip_queue.c +++ b/net/ipv4/netfilter/ip_queue.c | |||
@@ -431,7 +431,7 @@ __ipq_rcv_skb(struct sk_buff *skb) | |||
431 | if (type <= IPQM_BASE) | 431 | if (type <= IPQM_BASE) |
432 | return; | 432 | return; |
433 | 433 | ||
434 | if (security_netlink_recv(skb, CAP_NET_ADMIN)) | 434 | if (!capable(CAP_NET_ADMIN)) |
435 | RCV_SKB_FAIL(-EPERM); | 435 | RCV_SKB_FAIL(-EPERM); |
436 | 436 | ||
437 | spin_lock_bh(&queue_lock); | 437 | spin_lock_bh(&queue_lock); |
diff --git a/net/ipv4/proc.c b/net/ipv4/proc.c index 3569d8ecaeac..6afc807ee2ad 100644 --- a/net/ipv4/proc.c +++ b/net/ipv4/proc.c | |||
@@ -216,7 +216,6 @@ static const struct snmp_mib snmp4_net_list[] = { | |||
216 | SNMP_MIB_ITEM("TCPPartialUndo", LINUX_MIB_TCPPARTIALUNDO), | 216 | SNMP_MIB_ITEM("TCPPartialUndo", LINUX_MIB_TCPPARTIALUNDO), |
217 | SNMP_MIB_ITEM("TCPDSACKUndo", LINUX_MIB_TCPDSACKUNDO), | 217 | SNMP_MIB_ITEM("TCPDSACKUndo", LINUX_MIB_TCPDSACKUNDO), |
218 | SNMP_MIB_ITEM("TCPLossUndo", LINUX_MIB_TCPLOSSUNDO), | 218 | SNMP_MIB_ITEM("TCPLossUndo", LINUX_MIB_TCPLOSSUNDO), |
219 | SNMP_MIB_ITEM("TCPLoss", LINUX_MIB_TCPLOSS), | ||
220 | SNMP_MIB_ITEM("TCPLostRetransmit", LINUX_MIB_TCPLOSTRETRANSMIT), | 219 | SNMP_MIB_ITEM("TCPLostRetransmit", LINUX_MIB_TCPLOSTRETRANSMIT), |
221 | SNMP_MIB_ITEM("TCPRenoFailures", LINUX_MIB_TCPRENOFAILURES), | 220 | SNMP_MIB_ITEM("TCPRenoFailures", LINUX_MIB_TCPRENOFAILURES), |
222 | SNMP_MIB_ITEM("TCPSackFailures", LINUX_MIB_TCPSACKFAILURES), | 221 | SNMP_MIB_ITEM("TCPSackFailures", LINUX_MIB_TCPSACKFAILURES), |
diff --git a/net/ipv4/sysctl_net_ipv4.c b/net/ipv4/sysctl_net_ipv4.c index 4aa7e9dc0cbb..4cb9cd2f2c39 100644 --- a/net/ipv4/sysctl_net_ipv4.c +++ b/net/ipv4/sysctl_net_ipv4.c | |||
@@ -814,6 +814,7 @@ static __net_init int ipv4_sysctl_init_net(struct net *net) | |||
814 | 814 | ||
815 | net->ipv4.sysctl_rt_cache_rebuild_count = 4; | 815 | net->ipv4.sysctl_rt_cache_rebuild_count = 4; |
816 | 816 | ||
817 | tcp_init_mem(net); | ||
817 | limit = nr_free_buffer_pages() / 8; | 818 | limit = nr_free_buffer_pages() / 8; |
818 | limit = max(limit, 128UL); | 819 | limit = max(limit, 128UL); |
819 | net->ipv4.sysctl_tcp_mem[0] = limit / 4 * 3; | 820 | net->ipv4.sysctl_tcp_mem[0] = limit / 4 * 3; |
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c index 9bcdec3ad772..06373b4a449a 100644 --- a/net/ipv4/tcp.c +++ b/net/ipv4/tcp.c | |||
@@ -3216,6 +3216,16 @@ static int __init set_thash_entries(char *str) | |||
3216 | } | 3216 | } |
3217 | __setup("thash_entries=", set_thash_entries); | 3217 | __setup("thash_entries=", set_thash_entries); |
3218 | 3218 | ||
3219 | void tcp_init_mem(struct net *net) | ||
3220 | { | ||
3221 | /* Set per-socket limits to no more than 1/128 the pressure threshold */ | ||
3222 | unsigned long limit = nr_free_buffer_pages() / 8; | ||
3223 | limit = max(limit, 128UL); | ||
3224 | net->ipv4.sysctl_tcp_mem[0] = limit / 4 * 3; | ||
3225 | net->ipv4.sysctl_tcp_mem[1] = limit; | ||
3226 | net->ipv4.sysctl_tcp_mem[2] = net->ipv4.sysctl_tcp_mem[0] * 2; | ||
3227 | } | ||
3228 | |||
3219 | void __init tcp_init(void) | 3229 | void __init tcp_init(void) |
3220 | { | 3230 | { |
3221 | struct sk_buff *skb = NULL; | 3231 | struct sk_buff *skb = NULL; |
@@ -3276,9 +3286,9 @@ void __init tcp_init(void) | |||
3276 | sysctl_tcp_max_orphans = cnt / 2; | 3286 | sysctl_tcp_max_orphans = cnt / 2; |
3277 | sysctl_max_syn_backlog = max(128, cnt / 256); | 3287 | sysctl_max_syn_backlog = max(128, cnt / 256); |
3278 | 3288 | ||
3279 | /* Set per-socket limits to no more than 1/128 the pressure threshold */ | 3289 | tcp_init_mem(&init_net); |
3280 | limit = ((unsigned long)init_net.ipv4.sysctl_tcp_mem[1]) | 3290 | limit = nr_free_buffer_pages() / 8; |
3281 | << (PAGE_SHIFT - 7); | 3291 | limit = max(limit, 128UL); |
3282 | max_share = min(4UL*1024*1024, limit); | 3292 | max_share = min(4UL*1024*1024, limit); |
3283 | 3293 | ||
3284 | sysctl_tcp_wmem[0] = SK_MEM_QUANTUM; | 3294 | sysctl_tcp_wmem[0] = SK_MEM_QUANTUM; |
diff --git a/net/ipv4/tcp_bic.c b/net/ipv4/tcp_bic.c index 6187eb4d1dcf..f45e1c242440 100644 --- a/net/ipv4/tcp_bic.c +++ b/net/ipv4/tcp_bic.c | |||
@@ -63,7 +63,6 @@ static inline void bictcp_reset(struct bictcp *ca) | |||
63 | { | 63 | { |
64 | ca->cnt = 0; | 64 | ca->cnt = 0; |
65 | ca->last_max_cwnd = 0; | 65 | ca->last_max_cwnd = 0; |
66 | ca->loss_cwnd = 0; | ||
67 | ca->last_cwnd = 0; | 66 | ca->last_cwnd = 0; |
68 | ca->last_time = 0; | 67 | ca->last_time = 0; |
69 | ca->epoch_start = 0; | 68 | ca->epoch_start = 0; |
@@ -72,7 +71,11 @@ static inline void bictcp_reset(struct bictcp *ca) | |||
72 | 71 | ||
73 | static void bictcp_init(struct sock *sk) | 72 | static void bictcp_init(struct sock *sk) |
74 | { | 73 | { |
75 | bictcp_reset(inet_csk_ca(sk)); | 74 | struct bictcp *ca = inet_csk_ca(sk); |
75 | |||
76 | bictcp_reset(ca); | ||
77 | ca->loss_cwnd = 0; | ||
78 | |||
76 | if (initial_ssthresh) | 79 | if (initial_ssthresh) |
77 | tcp_sk(sk)->snd_ssthresh = initial_ssthresh; | 80 | tcp_sk(sk)->snd_ssthresh = initial_ssthresh; |
78 | } | 81 | } |
@@ -127,7 +130,7 @@ static inline void bictcp_update(struct bictcp *ca, u32 cwnd) | |||
127 | } | 130 | } |
128 | 131 | ||
129 | /* if in slow start or link utilization is very low */ | 132 | /* if in slow start or link utilization is very low */ |
130 | if (ca->loss_cwnd == 0) { | 133 | if (ca->last_max_cwnd == 0) { |
131 | if (ca->cnt > 20) /* increase cwnd 5% per RTT */ | 134 | if (ca->cnt > 20) /* increase cwnd 5% per RTT */ |
132 | ca->cnt = 20; | 135 | ca->cnt = 20; |
133 | } | 136 | } |
@@ -185,7 +188,7 @@ static u32 bictcp_undo_cwnd(struct sock *sk) | |||
185 | { | 188 | { |
186 | const struct tcp_sock *tp = tcp_sk(sk); | 189 | const struct tcp_sock *tp = tcp_sk(sk); |
187 | const struct bictcp *ca = inet_csk_ca(sk); | 190 | const struct bictcp *ca = inet_csk_ca(sk); |
188 | return max(tp->snd_cwnd, ca->last_max_cwnd); | 191 | return max(tp->snd_cwnd, ca->loss_cwnd); |
189 | } | 192 | } |
190 | 193 | ||
191 | static void bictcp_state(struct sock *sk, u8 new_state) | 194 | static void bictcp_state(struct sock *sk, u8 new_state) |
diff --git a/net/ipv4/tcp_cubic.c b/net/ipv4/tcp_cubic.c index f376b05cca81..a9077f441cb2 100644 --- a/net/ipv4/tcp_cubic.c +++ b/net/ipv4/tcp_cubic.c | |||
@@ -107,7 +107,6 @@ static inline void bictcp_reset(struct bictcp *ca) | |||
107 | { | 107 | { |
108 | ca->cnt = 0; | 108 | ca->cnt = 0; |
109 | ca->last_max_cwnd = 0; | 109 | ca->last_max_cwnd = 0; |
110 | ca->loss_cwnd = 0; | ||
111 | ca->last_cwnd = 0; | 110 | ca->last_cwnd = 0; |
112 | ca->last_time = 0; | 111 | ca->last_time = 0; |
113 | ca->bic_origin_point = 0; | 112 | ca->bic_origin_point = 0; |
@@ -142,7 +141,10 @@ static inline void bictcp_hystart_reset(struct sock *sk) | |||
142 | 141 | ||
143 | static void bictcp_init(struct sock *sk) | 142 | static void bictcp_init(struct sock *sk) |
144 | { | 143 | { |
145 | bictcp_reset(inet_csk_ca(sk)); | 144 | struct bictcp *ca = inet_csk_ca(sk); |
145 | |||
146 | bictcp_reset(ca); | ||
147 | ca->loss_cwnd = 0; | ||
146 | 148 | ||
147 | if (hystart) | 149 | if (hystart) |
148 | bictcp_hystart_reset(sk); | 150 | bictcp_hystart_reset(sk); |
@@ -275,7 +277,7 @@ static inline void bictcp_update(struct bictcp *ca, u32 cwnd) | |||
275 | * The initial growth of cubic function may be too conservative | 277 | * The initial growth of cubic function may be too conservative |
276 | * when the available bandwidth is still unknown. | 278 | * when the available bandwidth is still unknown. |
277 | */ | 279 | */ |
278 | if (ca->loss_cwnd == 0 && ca->cnt > 20) | 280 | if (ca->last_max_cwnd == 0 && ca->cnt > 20) |
279 | ca->cnt = 20; /* increase cwnd 5% per RTT */ | 281 | ca->cnt = 20; /* increase cwnd 5% per RTT */ |
280 | 282 | ||
281 | /* TCP Friendly */ | 283 | /* TCP Friendly */ |
@@ -342,7 +344,7 @@ static u32 bictcp_undo_cwnd(struct sock *sk) | |||
342 | { | 344 | { |
343 | struct bictcp *ca = inet_csk_ca(sk); | 345 | struct bictcp *ca = inet_csk_ca(sk); |
344 | 346 | ||
345 | return max(tcp_sk(sk)->snd_cwnd, ca->last_max_cwnd); | 347 | return max(tcp_sk(sk)->snd_cwnd, ca->loss_cwnd); |
346 | } | 348 | } |
347 | 349 | ||
348 | static void bictcp_state(struct sock *sk, u8 new_state) | 350 | static void bictcp_state(struct sock *sk, u8 new_state) |
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c index 2877c3e09587..976034f82320 100644 --- a/net/ipv4/tcp_input.c +++ b/net/ipv4/tcp_input.c | |||
@@ -105,7 +105,6 @@ int sysctl_tcp_abc __read_mostly; | |||
105 | #define FLAG_SYN_ACKED 0x10 /* This ACK acknowledged SYN. */ | 105 | #define FLAG_SYN_ACKED 0x10 /* This ACK acknowledged SYN. */ |
106 | #define FLAG_DATA_SACKED 0x20 /* New SACK. */ | 106 | #define FLAG_DATA_SACKED 0x20 /* New SACK. */ |
107 | #define FLAG_ECE 0x40 /* ECE in this ACK */ | 107 | #define FLAG_ECE 0x40 /* ECE in this ACK */ |
108 | #define FLAG_DATA_LOST 0x80 /* SACK detected data lossage. */ | ||
109 | #define FLAG_SLOWPATH 0x100 /* Do not skip RFC checks for window update.*/ | 108 | #define FLAG_SLOWPATH 0x100 /* Do not skip RFC checks for window update.*/ |
110 | #define FLAG_ONLY_ORIG_SACKED 0x200 /* SACKs only non-rexmit sent before RTO */ | 109 | #define FLAG_ONLY_ORIG_SACKED 0x200 /* SACKs only non-rexmit sent before RTO */ |
111 | #define FLAG_SND_UNA_ADVANCED 0x400 /* Snd_una was changed (!= FLAG_DATA_ACKED) */ | 110 | #define FLAG_SND_UNA_ADVANCED 0x400 /* Snd_una was changed (!= FLAG_DATA_ACKED) */ |
@@ -1040,13 +1039,11 @@ static void tcp_skb_mark_lost_uncond_verify(struct tcp_sock *tp, | |||
1040 | * These 6 states form finite state machine, controlled by the following events: | 1039 | * These 6 states form finite state machine, controlled by the following events: |
1041 | * 1. New ACK (+SACK) arrives. (tcp_sacktag_write_queue()) | 1040 | * 1. New ACK (+SACK) arrives. (tcp_sacktag_write_queue()) |
1042 | * 2. Retransmission. (tcp_retransmit_skb(), tcp_xmit_retransmit_queue()) | 1041 | * 2. Retransmission. (tcp_retransmit_skb(), tcp_xmit_retransmit_queue()) |
1043 | * 3. Loss detection event of one of three flavors: | 1042 | * 3. Loss detection event of two flavors: |
1044 | * A. Scoreboard estimator decided the packet is lost. | 1043 | * A. Scoreboard estimator decided the packet is lost. |
1045 | * A'. Reno "three dupacks" marks head of queue lost. | 1044 | * A'. Reno "three dupacks" marks head of queue lost. |
1046 | * A''. Its FACK modfication, head until snd.fack is lost. | 1045 | * A''. Its FACK modification, head until snd.fack is lost. |
1047 | * B. SACK arrives sacking data transmitted after never retransmitted | 1046 | * B. SACK arrives sacking SND.NXT at the moment, when the |
1048 | * hole was sent out. | ||
1049 | * C. SACK arrives sacking SND.NXT at the moment, when the | ||
1050 | * segment was retransmitted. | 1047 | * segment was retransmitted. |
1051 | * 4. D-SACK added new rule: D-SACK changes any tag to S. | 1048 | * 4. D-SACK added new rule: D-SACK changes any tag to S. |
1052 | * | 1049 | * |
@@ -1153,7 +1150,7 @@ static int tcp_is_sackblock_valid(struct tcp_sock *tp, int is_dsack, | |||
1153 | } | 1150 | } |
1154 | 1151 | ||
1155 | /* Check for lost retransmit. This superb idea is borrowed from "ratehalving". | 1152 | /* Check for lost retransmit. This superb idea is borrowed from "ratehalving". |
1156 | * Event "C". Later note: FACK people cheated me again 8), we have to account | 1153 | * Event "B". Later note: FACK people cheated me again 8), we have to account |
1157 | * for reordering! Ugly, but should help. | 1154 | * for reordering! Ugly, but should help. |
1158 | * | 1155 | * |
1159 | * Search retransmitted skbs from write_queue that were sent when snd_nxt was | 1156 | * Search retransmitted skbs from write_queue that were sent when snd_nxt was |
@@ -1844,10 +1841,6 @@ tcp_sacktag_write_queue(struct sock *sk, const struct sk_buff *ack_skb, | |||
1844 | if (found_dup_sack && ((i + 1) == first_sack_index)) | 1841 | if (found_dup_sack && ((i + 1) == first_sack_index)) |
1845 | next_dup = &sp[i + 1]; | 1842 | next_dup = &sp[i + 1]; |
1846 | 1843 | ||
1847 | /* Event "B" in the comment above. */ | ||
1848 | if (after(end_seq, tp->high_seq)) | ||
1849 | state.flag |= FLAG_DATA_LOST; | ||
1850 | |||
1851 | /* Skip too early cached blocks */ | 1844 | /* Skip too early cached blocks */ |
1852 | while (tcp_sack_cache_ok(tp, cache) && | 1845 | while (tcp_sack_cache_ok(tp, cache) && |
1853 | !before(start_seq, cache->end_seq)) | 1846 | !before(start_seq, cache->end_seq)) |
@@ -2515,8 +2508,11 @@ static void tcp_timeout_skbs(struct sock *sk) | |||
2515 | tcp_verify_left_out(tp); | 2508 | tcp_verify_left_out(tp); |
2516 | } | 2509 | } |
2517 | 2510 | ||
2518 | /* Mark head of queue up as lost. With RFC3517 SACK, the packets is | 2511 | /* Detect loss in event "A" above by marking head of queue up as lost. |
2519 | * is against sacked "cnt", otherwise it's against facked "cnt" | 2512 | * For FACK or non-SACK(Reno) senders, the first "packets" number of segments |
2513 | * are considered lost. For RFC3517 SACK, a segment is considered lost if it | ||
2514 | * has at least tp->reordering SACKed seqments above it; "packets" refers to | ||
2515 | * the maximum SACKed segments to pass before reaching this limit. | ||
2520 | */ | 2516 | */ |
2521 | static void tcp_mark_head_lost(struct sock *sk, int packets, int mark_head) | 2517 | static void tcp_mark_head_lost(struct sock *sk, int packets, int mark_head) |
2522 | { | 2518 | { |
@@ -2525,6 +2521,8 @@ static void tcp_mark_head_lost(struct sock *sk, int packets, int mark_head) | |||
2525 | int cnt, oldcnt; | 2521 | int cnt, oldcnt; |
2526 | int err; | 2522 | int err; |
2527 | unsigned int mss; | 2523 | unsigned int mss; |
2524 | /* Use SACK to deduce losses of new sequences sent during recovery */ | ||
2525 | const u32 loss_high = tcp_is_sack(tp) ? tp->snd_nxt : tp->high_seq; | ||
2528 | 2526 | ||
2529 | WARN_ON(packets > tp->packets_out); | 2527 | WARN_ON(packets > tp->packets_out); |
2530 | if (tp->lost_skb_hint) { | 2528 | if (tp->lost_skb_hint) { |
@@ -2546,7 +2544,7 @@ static void tcp_mark_head_lost(struct sock *sk, int packets, int mark_head) | |||
2546 | tp->lost_skb_hint = skb; | 2544 | tp->lost_skb_hint = skb; |
2547 | tp->lost_cnt_hint = cnt; | 2545 | tp->lost_cnt_hint = cnt; |
2548 | 2546 | ||
2549 | if (after(TCP_SKB_CB(skb)->end_seq, tp->high_seq)) | 2547 | if (after(TCP_SKB_CB(skb)->end_seq, loss_high)) |
2550 | break; | 2548 | break; |
2551 | 2549 | ||
2552 | oldcnt = cnt; | 2550 | oldcnt = cnt; |
@@ -3033,19 +3031,10 @@ static void tcp_fastretrans_alert(struct sock *sk, int pkts_acked, | |||
3033 | if (tcp_check_sack_reneging(sk, flag)) | 3031 | if (tcp_check_sack_reneging(sk, flag)) |
3034 | return; | 3032 | return; |
3035 | 3033 | ||
3036 | /* C. Process data loss notification, provided it is valid. */ | 3034 | /* C. Check consistency of the current state. */ |
3037 | if (tcp_is_fack(tp) && (flag & FLAG_DATA_LOST) && | ||
3038 | before(tp->snd_una, tp->high_seq) && | ||
3039 | icsk->icsk_ca_state != TCP_CA_Open && | ||
3040 | tp->fackets_out > tp->reordering) { | ||
3041 | tcp_mark_head_lost(sk, tp->fackets_out - tp->reordering, 0); | ||
3042 | NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPLOSS); | ||
3043 | } | ||
3044 | |||
3045 | /* D. Check consistency of the current state. */ | ||
3046 | tcp_verify_left_out(tp); | 3035 | tcp_verify_left_out(tp); |
3047 | 3036 | ||
3048 | /* E. Check state exit conditions. State can be terminated | 3037 | /* D. Check state exit conditions. State can be terminated |
3049 | * when high_seq is ACKed. */ | 3038 | * when high_seq is ACKed. */ |
3050 | if (icsk->icsk_ca_state == TCP_CA_Open) { | 3039 | if (icsk->icsk_ca_state == TCP_CA_Open) { |
3051 | WARN_ON(tp->retrans_out != 0); | 3040 | WARN_ON(tp->retrans_out != 0); |
@@ -3077,7 +3066,7 @@ static void tcp_fastretrans_alert(struct sock *sk, int pkts_acked, | |||
3077 | } | 3066 | } |
3078 | } | 3067 | } |
3079 | 3068 | ||
3080 | /* F. Process state. */ | 3069 | /* E. Process state. */ |
3081 | switch (icsk->icsk_ca_state) { | 3070 | switch (icsk->icsk_ca_state) { |
3082 | case TCP_CA_Recovery: | 3071 | case TCP_CA_Recovery: |
3083 | if (!(flag & FLAG_SND_UNA_ADVANCED)) { | 3072 | if (!(flag & FLAG_SND_UNA_ADVANCED)) { |
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c index 1eb4ad57670e..337ba4cca052 100644 --- a/net/ipv4/tcp_ipv4.c +++ b/net/ipv4/tcp_ipv4.c | |||
@@ -631,7 +631,7 @@ static void tcp_v4_send_reset(struct sock *sk, struct sk_buff *skb) | |||
631 | arg.iov[0].iov_len = sizeof(rep.th); | 631 | arg.iov[0].iov_len = sizeof(rep.th); |
632 | 632 | ||
633 | #ifdef CONFIG_TCP_MD5SIG | 633 | #ifdef CONFIG_TCP_MD5SIG |
634 | key = sk ? tcp_v4_md5_do_lookup(sk, ip_hdr(skb)->daddr) : NULL; | 634 | key = sk ? tcp_v4_md5_do_lookup(sk, ip_hdr(skb)->saddr) : NULL; |
635 | if (key) { | 635 | if (key) { |
636 | rep.opt[0] = htonl((TCPOPT_NOP << 24) | | 636 | rep.opt[0] = htonl((TCPOPT_NOP << 24) | |
637 | (TCPOPT_NOP << 16) | | 637 | (TCPOPT_NOP << 16) | |
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c index 8c8de2780c7a..4ff3b6dc74fc 100644 --- a/net/ipv4/tcp_output.c +++ b/net/ipv4/tcp_output.c | |||
@@ -1141,11 +1141,9 @@ int tcp_trim_head(struct sock *sk, struct sk_buff *skb, u32 len) | |||
1141 | sk_mem_uncharge(sk, len); | 1141 | sk_mem_uncharge(sk, len); |
1142 | sock_set_flag(sk, SOCK_QUEUE_SHRUNK); | 1142 | sock_set_flag(sk, SOCK_QUEUE_SHRUNK); |
1143 | 1143 | ||
1144 | /* Any change of skb->len requires recalculation of tso | 1144 | /* Any change of skb->len requires recalculation of tso factor. */ |
1145 | * factor and mss. | ||
1146 | */ | ||
1147 | if (tcp_skb_pcount(skb) > 1) | 1145 | if (tcp_skb_pcount(skb) > 1) |
1148 | tcp_set_skb_tso_segs(sk, skb, tcp_current_mss(sk)); | 1146 | tcp_set_skb_tso_segs(sk, skb, tcp_skb_mss(skb)); |
1149 | 1147 | ||
1150 | return 0; | 1148 | return 0; |
1151 | } | 1149 | } |
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c index a225d5ee3c2f..c02280a4d126 100644 --- a/net/ipv6/addrconf.c +++ b/net/ipv6/addrconf.c | |||
@@ -502,29 +502,31 @@ static void addrconf_forward_change(struct net *net, __s32 newf) | |||
502 | rcu_read_unlock(); | 502 | rcu_read_unlock(); |
503 | } | 503 | } |
504 | 504 | ||
505 | static int addrconf_fixup_forwarding(struct ctl_table *table, int *p, int old) | 505 | static int addrconf_fixup_forwarding(struct ctl_table *table, int *p, int newf) |
506 | { | 506 | { |
507 | struct net *net; | 507 | struct net *net; |
508 | int old; | ||
509 | |||
510 | if (!rtnl_trylock()) | ||
511 | return restart_syscall(); | ||
508 | 512 | ||
509 | net = (struct net *)table->extra2; | 513 | net = (struct net *)table->extra2; |
510 | if (p == &net->ipv6.devconf_dflt->forwarding) | 514 | old = *p; |
511 | return 0; | 515 | *p = newf; |
512 | 516 | ||
513 | if (!rtnl_trylock()) { | 517 | if (p == &net->ipv6.devconf_dflt->forwarding) { |
514 | /* Restore the original values before restarting */ | 518 | rtnl_unlock(); |
515 | *p = old; | 519 | return 0; |
516 | return restart_syscall(); | ||
517 | } | 520 | } |
518 | 521 | ||
519 | if (p == &net->ipv6.devconf_all->forwarding) { | 522 | if (p == &net->ipv6.devconf_all->forwarding) { |
520 | __s32 newf = net->ipv6.devconf_all->forwarding; | ||
521 | net->ipv6.devconf_dflt->forwarding = newf; | 523 | net->ipv6.devconf_dflt->forwarding = newf; |
522 | addrconf_forward_change(net, newf); | 524 | addrconf_forward_change(net, newf); |
523 | } else if ((!*p) ^ (!old)) | 525 | } else if ((!newf) ^ (!old)) |
524 | dev_forward_change((struct inet6_dev *)table->extra1); | 526 | dev_forward_change((struct inet6_dev *)table->extra1); |
525 | rtnl_unlock(); | 527 | rtnl_unlock(); |
526 | 528 | ||
527 | if (*p) | 529 | if (newf) |
528 | rt6_purge_dflt_routers(net); | 530 | rt6_purge_dflt_routers(net); |
529 | return 1; | 531 | return 1; |
530 | } | 532 | } |
@@ -4260,9 +4262,17 @@ int addrconf_sysctl_forward(ctl_table *ctl, int write, | |||
4260 | int *valp = ctl->data; | 4262 | int *valp = ctl->data; |
4261 | int val = *valp; | 4263 | int val = *valp; |
4262 | loff_t pos = *ppos; | 4264 | loff_t pos = *ppos; |
4265 | ctl_table lctl; | ||
4263 | int ret; | 4266 | int ret; |
4264 | 4267 | ||
4265 | ret = proc_dointvec(ctl, write, buffer, lenp, ppos); | 4268 | /* |
4269 | * ctl->data points to idev->cnf.forwarding, we should | ||
4270 | * not modify it until we get the rtnl lock. | ||
4271 | */ | ||
4272 | lctl = *ctl; | ||
4273 | lctl.data = &val; | ||
4274 | |||
4275 | ret = proc_dointvec(&lctl, write, buffer, lenp, ppos); | ||
4266 | 4276 | ||
4267 | if (write) | 4277 | if (write) |
4268 | ret = addrconf_fixup_forwarding(ctl, valp, val); | 4278 | ret = addrconf_fixup_forwarding(ctl, valp, val); |
@@ -4300,26 +4310,27 @@ static void addrconf_disable_change(struct net *net, __s32 newf) | |||
4300 | rcu_read_unlock(); | 4310 | rcu_read_unlock(); |
4301 | } | 4311 | } |
4302 | 4312 | ||
4303 | static int addrconf_disable_ipv6(struct ctl_table *table, int *p, int old) | 4313 | static int addrconf_disable_ipv6(struct ctl_table *table, int *p, int newf) |
4304 | { | 4314 | { |
4305 | struct net *net; | 4315 | struct net *net; |
4316 | int old; | ||
4317 | |||
4318 | if (!rtnl_trylock()) | ||
4319 | return restart_syscall(); | ||
4306 | 4320 | ||
4307 | net = (struct net *)table->extra2; | 4321 | net = (struct net *)table->extra2; |
4322 | old = *p; | ||
4323 | *p = newf; | ||
4308 | 4324 | ||
4309 | if (p == &net->ipv6.devconf_dflt->disable_ipv6) | 4325 | if (p == &net->ipv6.devconf_dflt->disable_ipv6) { |
4326 | rtnl_unlock(); | ||
4310 | return 0; | 4327 | return 0; |
4311 | |||
4312 | if (!rtnl_trylock()) { | ||
4313 | /* Restore the original values before restarting */ | ||
4314 | *p = old; | ||
4315 | return restart_syscall(); | ||
4316 | } | 4328 | } |
4317 | 4329 | ||
4318 | if (p == &net->ipv6.devconf_all->disable_ipv6) { | 4330 | if (p == &net->ipv6.devconf_all->disable_ipv6) { |
4319 | __s32 newf = net->ipv6.devconf_all->disable_ipv6; | ||
4320 | net->ipv6.devconf_dflt->disable_ipv6 = newf; | 4331 | net->ipv6.devconf_dflt->disable_ipv6 = newf; |
4321 | addrconf_disable_change(net, newf); | 4332 | addrconf_disable_change(net, newf); |
4322 | } else if ((!*p) ^ (!old)) | 4333 | } else if ((!newf) ^ (!old)) |
4323 | dev_disable_change((struct inet6_dev *)table->extra1); | 4334 | dev_disable_change((struct inet6_dev *)table->extra1); |
4324 | 4335 | ||
4325 | rtnl_unlock(); | 4336 | rtnl_unlock(); |
@@ -4333,9 +4344,17 @@ int addrconf_sysctl_disable(ctl_table *ctl, int write, | |||
4333 | int *valp = ctl->data; | 4344 | int *valp = ctl->data; |
4334 | int val = *valp; | 4345 | int val = *valp; |
4335 | loff_t pos = *ppos; | 4346 | loff_t pos = *ppos; |
4347 | ctl_table lctl; | ||
4336 | int ret; | 4348 | int ret; |
4337 | 4349 | ||
4338 | ret = proc_dointvec(ctl, write, buffer, lenp, ppos); | 4350 | /* |
4351 | * ctl->data points to idev->cnf.disable_ipv6, we should | ||
4352 | * not modify it until we get the rtnl lock. | ||
4353 | */ | ||
4354 | lctl = *ctl; | ||
4355 | lctl.data = &val; | ||
4356 | |||
4357 | ret = proc_dointvec(&lctl, write, buffer, lenp, ppos); | ||
4339 | 4358 | ||
4340 | if (write) | 4359 | if (write) |
4341 | ret = addrconf_disable_ipv6(ctl, valp, val); | 4360 | ret = addrconf_disable_ipv6(ctl, valp, val); |
diff --git a/net/ipv6/netfilter/ip6_queue.c b/net/ipv6/netfilter/ip6_queue.c index fb80a23c6640..a34c9e4c792c 100644 --- a/net/ipv6/netfilter/ip6_queue.c +++ b/net/ipv6/netfilter/ip6_queue.c | |||
@@ -432,7 +432,7 @@ __ipq_rcv_skb(struct sk_buff *skb) | |||
432 | if (type <= IPQM_BASE) | 432 | if (type <= IPQM_BASE) |
433 | return; | 433 | return; |
434 | 434 | ||
435 | if (security_netlink_recv(skb, CAP_NET_ADMIN)) | 435 | if (!capable(CAP_NET_ADMIN)) |
436 | RCV_SKB_FAIL(-EPERM); | 436 | RCV_SKB_FAIL(-EPERM); |
437 | 437 | ||
438 | spin_lock_bh(&queue_lock); | 438 | spin_lock_bh(&queue_lock); |
diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c index 906c7ca43542..3edd05ae4388 100644 --- a/net/ipv6/tcp_ipv6.c +++ b/net/ipv6/tcp_ipv6.c | |||
@@ -1083,7 +1083,7 @@ static void tcp_v6_send_reset(struct sock *sk, struct sk_buff *skb) | |||
1083 | 1083 | ||
1084 | #ifdef CONFIG_TCP_MD5SIG | 1084 | #ifdef CONFIG_TCP_MD5SIG |
1085 | if (sk) | 1085 | if (sk) |
1086 | key = tcp_v6_md5_do_lookup(sk, &ipv6_hdr(skb)->daddr); | 1086 | key = tcp_v6_md5_do_lookup(sk, &ipv6_hdr(skb)->saddr); |
1087 | #endif | 1087 | #endif |
1088 | 1088 | ||
1089 | if (th->ack) | 1089 | if (th->ack) |
diff --git a/net/l2tp/l2tp_ip.c b/net/l2tp/l2tp_ip.c index d21e7ebd91ca..55670ec3cd0f 100644 --- a/net/l2tp/l2tp_ip.c +++ b/net/l2tp/l2tp_ip.c | |||
@@ -393,11 +393,6 @@ static int l2tp_ip_backlog_recv(struct sock *sk, struct sk_buff *skb) | |||
393 | { | 393 | { |
394 | int rc; | 394 | int rc; |
395 | 395 | ||
396 | if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb)) | ||
397 | goto drop; | ||
398 | |||
399 | nf_reset(skb); | ||
400 | |||
401 | /* Charge it to the socket, dropping if the queue is full. */ | 396 | /* Charge it to the socket, dropping if the queue is full. */ |
402 | rc = sock_queue_rcv_skb(sk, skb); | 397 | rc = sock_queue_rcv_skb(sk, skb); |
403 | if (rc < 0) | 398 | if (rc < 0) |
diff --git a/net/llc/af_llc.c b/net/llc/af_llc.c index a18e6c3d36e3..b9bef2c75026 100644 --- a/net/llc/af_llc.c +++ b/net/llc/af_llc.c | |||
@@ -713,6 +713,7 @@ static int llc_ui_recvmsg(struct kiocb *iocb, struct socket *sock, | |||
713 | struct sk_buff *skb = NULL; | 713 | struct sk_buff *skb = NULL; |
714 | struct sock *sk = sock->sk; | 714 | struct sock *sk = sock->sk; |
715 | struct llc_sock *llc = llc_sk(sk); | 715 | struct llc_sock *llc = llc_sk(sk); |
716 | unsigned long cpu_flags; | ||
716 | size_t copied = 0; | 717 | size_t copied = 0; |
717 | u32 peek_seq = 0; | 718 | u32 peek_seq = 0; |
718 | u32 *seq; | 719 | u32 *seq; |
@@ -838,7 +839,9 @@ static int llc_ui_recvmsg(struct kiocb *iocb, struct socket *sock, | |||
838 | goto copy_uaddr; | 839 | goto copy_uaddr; |
839 | 840 | ||
840 | if (!(flags & MSG_PEEK)) { | 841 | if (!(flags & MSG_PEEK)) { |
842 | spin_lock_irqsave(&sk->sk_receive_queue.lock, cpu_flags); | ||
841 | sk_eat_skb(sk, skb, 0); | 843 | sk_eat_skb(sk, skb, 0); |
844 | spin_unlock_irqrestore(&sk->sk_receive_queue.lock, cpu_flags); | ||
842 | *seq = 0; | 845 | *seq = 0; |
843 | } | 846 | } |
844 | 847 | ||
@@ -859,7 +862,9 @@ copy_uaddr: | |||
859 | llc_cmsg_rcv(msg, skb); | 862 | llc_cmsg_rcv(msg, skb); |
860 | 863 | ||
861 | if (!(flags & MSG_PEEK)) { | 864 | if (!(flags & MSG_PEEK)) { |
865 | spin_lock_irqsave(&sk->sk_receive_queue.lock, cpu_flags); | ||
862 | sk_eat_skb(sk, skb, 0); | 866 | sk_eat_skb(sk, skb, 0); |
867 | spin_unlock_irqrestore(&sk->sk_receive_queue.lock, cpu_flags); | ||
863 | *seq = 0; | 868 | *seq = 0; |
864 | } | 869 | } |
865 | 870 | ||
diff --git a/net/mac80211/main.c b/net/mac80211/main.c index 831a5bd44fd0..2306d7514fff 100644 --- a/net/mac80211/main.c +++ b/net/mac80211/main.c | |||
@@ -909,6 +909,8 @@ int ieee80211_register_hw(struct ieee80211_hw *hw) | |||
909 | wiphy_debug(local->hw.wiphy, "Failed to initialize wep: %d\n", | 909 | wiphy_debug(local->hw.wiphy, "Failed to initialize wep: %d\n", |
910 | result); | 910 | result); |
911 | 911 | ||
912 | ieee80211_led_init(local); | ||
913 | |||
912 | rtnl_lock(); | 914 | rtnl_lock(); |
913 | 915 | ||
914 | result = ieee80211_init_rate_ctrl_alg(local, | 916 | result = ieee80211_init_rate_ctrl_alg(local, |
@@ -930,8 +932,6 @@ int ieee80211_register_hw(struct ieee80211_hw *hw) | |||
930 | 932 | ||
931 | rtnl_unlock(); | 933 | rtnl_unlock(); |
932 | 934 | ||
933 | ieee80211_led_init(local); | ||
934 | |||
935 | local->network_latency_notifier.notifier_call = | 935 | local->network_latency_notifier.notifier_call = |
936 | ieee80211_max_network_latency; | 936 | ieee80211_max_network_latency; |
937 | result = pm_qos_add_notifier(PM_QOS_NETWORK_LATENCY, | 937 | result = pm_qos_add_notifier(PM_QOS_NETWORK_LATENCY, |
diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c index 2b5b1194dfc2..3ab85c02ef04 100644 --- a/net/mac80211/rx.c +++ b/net/mac80211/rx.c | |||
@@ -611,7 +611,7 @@ static void ieee80211_sta_reorder_release(struct ieee80211_hw *hw, | |||
611 | index = seq_sub(tid_agg_rx->head_seq_num, tid_agg_rx->ssn) % | 611 | index = seq_sub(tid_agg_rx->head_seq_num, tid_agg_rx->ssn) % |
612 | tid_agg_rx->buf_size; | 612 | tid_agg_rx->buf_size; |
613 | if (!tid_agg_rx->reorder_buf[index] && | 613 | if (!tid_agg_rx->reorder_buf[index] && |
614 | tid_agg_rx->stored_mpdu_num > 1) { | 614 | tid_agg_rx->stored_mpdu_num) { |
615 | /* | 615 | /* |
616 | * No buffers ready to be released, but check whether any | 616 | * No buffers ready to be released, but check whether any |
617 | * frames in the reorder buffer have timed out. | 617 | * frames in the reorder buffer have timed out. |
diff --git a/net/netfilter/nfnetlink.c b/net/netfilter/nfnetlink.c index b4f8d849480c..4d70785b953d 100644 --- a/net/netfilter/nfnetlink.c +++ b/net/netfilter/nfnetlink.c | |||
@@ -130,7 +130,7 @@ static int nfnetlink_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh) | |||
130 | const struct nfnetlink_subsystem *ss; | 130 | const struct nfnetlink_subsystem *ss; |
131 | int type, err; | 131 | int type, err; |
132 | 132 | ||
133 | if (security_netlink_recv(skb, CAP_NET_ADMIN)) | 133 | if (!capable(CAP_NET_ADMIN)) |
134 | return -EPERM; | 134 | return -EPERM; |
135 | 135 | ||
136 | /* All the messages must at least contain nfgenmsg */ | 136 | /* All the messages must at least contain nfgenmsg */ |
diff --git a/net/netlink/genetlink.c b/net/netlink/genetlink.c index a403b618faa5..c29d2568c9e0 100644 --- a/net/netlink/genetlink.c +++ b/net/netlink/genetlink.c | |||
@@ -524,7 +524,7 @@ static int genl_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh) | |||
524 | return -EOPNOTSUPP; | 524 | return -EOPNOTSUPP; |
525 | 525 | ||
526 | if ((ops->flags & GENL_ADMIN_PERM) && | 526 | if ((ops->flags & GENL_ADMIN_PERM) && |
527 | security_netlink_recv(skb, CAP_NET_ADMIN)) | 527 | !capable(CAP_NET_ADMIN)) |
528 | return -EPERM; | 528 | return -EPERM; |
529 | 529 | ||
530 | if (nlh->nlmsg_flags & NLM_F_DUMP) { | 530 | if (nlh->nlmsg_flags & NLM_F_DUMP) { |
diff --git a/net/rds/af_rds.c b/net/rds/af_rds.c index bb6ad81b671d..424ff622ab5f 100644 --- a/net/rds/af_rds.c +++ b/net/rds/af_rds.c | |||
@@ -68,7 +68,6 @@ static int rds_release(struct socket *sock) | |||
68 | { | 68 | { |
69 | struct sock *sk = sock->sk; | 69 | struct sock *sk = sock->sk; |
70 | struct rds_sock *rs; | 70 | struct rds_sock *rs; |
71 | unsigned long flags; | ||
72 | 71 | ||
73 | if (!sk) | 72 | if (!sk) |
74 | goto out; | 73 | goto out; |
@@ -94,10 +93,10 @@ static int rds_release(struct socket *sock) | |||
94 | rds_rdma_drop_keys(rs); | 93 | rds_rdma_drop_keys(rs); |
95 | rds_notify_queue_get(rs, NULL); | 94 | rds_notify_queue_get(rs, NULL); |
96 | 95 | ||
97 | spin_lock_irqsave(&rds_sock_lock, flags); | 96 | spin_lock_bh(&rds_sock_lock); |
98 | list_del_init(&rs->rs_item); | 97 | list_del_init(&rs->rs_item); |
99 | rds_sock_count--; | 98 | rds_sock_count--; |
100 | spin_unlock_irqrestore(&rds_sock_lock, flags); | 99 | spin_unlock_bh(&rds_sock_lock); |
101 | 100 | ||
102 | rds_trans_put(rs->rs_transport); | 101 | rds_trans_put(rs->rs_transport); |
103 | 102 | ||
@@ -409,7 +408,6 @@ static const struct proto_ops rds_proto_ops = { | |||
409 | 408 | ||
410 | static int __rds_create(struct socket *sock, struct sock *sk, int protocol) | 409 | static int __rds_create(struct socket *sock, struct sock *sk, int protocol) |
411 | { | 410 | { |
412 | unsigned long flags; | ||
413 | struct rds_sock *rs; | 411 | struct rds_sock *rs; |
414 | 412 | ||
415 | sock_init_data(sock, sk); | 413 | sock_init_data(sock, sk); |
@@ -426,10 +424,10 @@ static int __rds_create(struct socket *sock, struct sock *sk, int protocol) | |||
426 | spin_lock_init(&rs->rs_rdma_lock); | 424 | spin_lock_init(&rs->rs_rdma_lock); |
427 | rs->rs_rdma_keys = RB_ROOT; | 425 | rs->rs_rdma_keys = RB_ROOT; |
428 | 426 | ||
429 | spin_lock_irqsave(&rds_sock_lock, flags); | 427 | spin_lock_bh(&rds_sock_lock); |
430 | list_add_tail(&rs->rs_item, &rds_sock_list); | 428 | list_add_tail(&rs->rs_item, &rds_sock_list); |
431 | rds_sock_count++; | 429 | rds_sock_count++; |
432 | spin_unlock_irqrestore(&rds_sock_lock, flags); | 430 | spin_unlock_bh(&rds_sock_lock); |
433 | 431 | ||
434 | return 0; | 432 | return 0; |
435 | } | 433 | } |
@@ -471,12 +469,11 @@ static void rds_sock_inc_info(struct socket *sock, unsigned int len, | |||
471 | { | 469 | { |
472 | struct rds_sock *rs; | 470 | struct rds_sock *rs; |
473 | struct rds_incoming *inc; | 471 | struct rds_incoming *inc; |
474 | unsigned long flags; | ||
475 | unsigned int total = 0; | 472 | unsigned int total = 0; |
476 | 473 | ||
477 | len /= sizeof(struct rds_info_message); | 474 | len /= sizeof(struct rds_info_message); |
478 | 475 | ||
479 | spin_lock_irqsave(&rds_sock_lock, flags); | 476 | spin_lock_bh(&rds_sock_lock); |
480 | 477 | ||
481 | list_for_each_entry(rs, &rds_sock_list, rs_item) { | 478 | list_for_each_entry(rs, &rds_sock_list, rs_item) { |
482 | read_lock(&rs->rs_recv_lock); | 479 | read_lock(&rs->rs_recv_lock); |
@@ -492,7 +489,7 @@ static void rds_sock_inc_info(struct socket *sock, unsigned int len, | |||
492 | read_unlock(&rs->rs_recv_lock); | 489 | read_unlock(&rs->rs_recv_lock); |
493 | } | 490 | } |
494 | 491 | ||
495 | spin_unlock_irqrestore(&rds_sock_lock, flags); | 492 | spin_unlock_bh(&rds_sock_lock); |
496 | 493 | ||
497 | lens->nr = total; | 494 | lens->nr = total; |
498 | lens->each = sizeof(struct rds_info_message); | 495 | lens->each = sizeof(struct rds_info_message); |
@@ -504,11 +501,10 @@ static void rds_sock_info(struct socket *sock, unsigned int len, | |||
504 | { | 501 | { |
505 | struct rds_info_socket sinfo; | 502 | struct rds_info_socket sinfo; |
506 | struct rds_sock *rs; | 503 | struct rds_sock *rs; |
507 | unsigned long flags; | ||
508 | 504 | ||
509 | len /= sizeof(struct rds_info_socket); | 505 | len /= sizeof(struct rds_info_socket); |
510 | 506 | ||
511 | spin_lock_irqsave(&rds_sock_lock, flags); | 507 | spin_lock_bh(&rds_sock_lock); |
512 | 508 | ||
513 | if (len < rds_sock_count) | 509 | if (len < rds_sock_count) |
514 | goto out; | 510 | goto out; |
@@ -529,7 +525,7 @@ out: | |||
529 | lens->nr = rds_sock_count; | 525 | lens->nr = rds_sock_count; |
530 | lens->each = sizeof(struct rds_info_socket); | 526 | lens->each = sizeof(struct rds_info_socket); |
531 | 527 | ||
532 | spin_unlock_irqrestore(&rds_sock_lock, flags); | 528 | spin_unlock_bh(&rds_sock_lock); |
533 | } | 529 | } |
534 | 530 | ||
535 | static void rds_exit(void) | 531 | static void rds_exit(void) |
diff --git a/net/sched/sch_netem.c b/net/sched/sch_netem.c index e7e1d0b57b3d..2776012132ea 100644 --- a/net/sched/sch_netem.c +++ b/net/sched/sch_netem.c | |||
@@ -419,7 +419,7 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch) | |||
419 | 419 | ||
420 | cb = netem_skb_cb(skb); | 420 | cb = netem_skb_cb(skb); |
421 | if (q->gap == 0 || /* not doing reordering */ | 421 | if (q->gap == 0 || /* not doing reordering */ |
422 | q->counter < q->gap || /* inside last reordering gap */ | 422 | q->counter < q->gap - 1 || /* inside last reordering gap */ |
423 | q->reorder < get_crandom(&q->reorder_cor)) { | 423 | q->reorder < get_crandom(&q->reorder_cor)) { |
424 | psched_time_t now; | 424 | psched_time_t now; |
425 | psched_tdiff_t delay; | 425 | psched_tdiff_t delay; |
diff --git a/net/sunrpc/cache.c b/net/sunrpc/cache.c index 03b56bc3b659..465df9ae1046 100644 --- a/net/sunrpc/cache.c +++ b/net/sunrpc/cache.c | |||
@@ -1641,6 +1641,7 @@ int cache_register_net(struct cache_detail *cd, struct net *net) | |||
1641 | sunrpc_destroy_cache_detail(cd); | 1641 | sunrpc_destroy_cache_detail(cd); |
1642 | return ret; | 1642 | return ret; |
1643 | } | 1643 | } |
1644 | EXPORT_SYMBOL_GPL(cache_register_net); | ||
1644 | 1645 | ||
1645 | int cache_register(struct cache_detail *cd) | 1646 | int cache_register(struct cache_detail *cd) |
1646 | { | 1647 | { |
@@ -1653,6 +1654,7 @@ void cache_unregister_net(struct cache_detail *cd, struct net *net) | |||
1653 | remove_cache_proc_entries(cd, net); | 1654 | remove_cache_proc_entries(cd, net); |
1654 | sunrpc_destroy_cache_detail(cd); | 1655 | sunrpc_destroy_cache_detail(cd); |
1655 | } | 1656 | } |
1657 | EXPORT_SYMBOL_GPL(cache_unregister_net); | ||
1656 | 1658 | ||
1657 | void cache_unregister(struct cache_detail *cd) | 1659 | void cache_unregister(struct cache_detail *cd) |
1658 | { | 1660 | { |
diff --git a/net/sunrpc/svc.c b/net/sunrpc/svc.c index 9d01d46b05f3..e4aabc02368b 100644 --- a/net/sunrpc/svc.c +++ b/net/sunrpc/svc.c | |||
@@ -167,6 +167,7 @@ svc_pool_map_alloc_arrays(struct svc_pool_map *m, unsigned int maxpools) | |||
167 | 167 | ||
168 | fail_free: | 168 | fail_free: |
169 | kfree(m->to_pool); | 169 | kfree(m->to_pool); |
170 | m->to_pool = NULL; | ||
170 | fail: | 171 | fail: |
171 | return -ENOMEM; | 172 | return -ENOMEM; |
172 | } | 173 | } |
@@ -285,9 +286,10 @@ svc_pool_map_put(void) | |||
285 | mutex_lock(&svc_pool_map_mutex); | 286 | mutex_lock(&svc_pool_map_mutex); |
286 | 287 | ||
287 | if (!--m->count) { | 288 | if (!--m->count) { |
288 | m->mode = SVC_POOL_DEFAULT; | ||
289 | kfree(m->to_pool); | 289 | kfree(m->to_pool); |
290 | m->to_pool = NULL; | ||
290 | kfree(m->pool_to); | 291 | kfree(m->pool_to); |
292 | m->pool_to = NULL; | ||
291 | m->npools = 0; | 293 | m->npools = 0; |
292 | } | 294 | } |
293 | 295 | ||
@@ -527,17 +529,20 @@ svc_destroy(struct svc_serv *serv) | |||
527 | printk("svc_destroy: no threads for serv=%p!\n", serv); | 529 | printk("svc_destroy: no threads for serv=%p!\n", serv); |
528 | 530 | ||
529 | del_timer_sync(&serv->sv_temptimer); | 531 | del_timer_sync(&serv->sv_temptimer); |
530 | 532 | /* | |
531 | svc_close_all(&serv->sv_tempsocks); | 533 | * The set of xprts (contained in the sv_tempsocks and |
534 | * sv_permsocks lists) is now constant, since it is modified | ||
535 | * only by accepting new sockets (done by service threads in | ||
536 | * svc_recv) or aging old ones (done by sv_temptimer), or | ||
537 | * configuration changes (excluded by whatever locking the | ||
538 | * caller is using--nfsd_mutex in the case of nfsd). So it's | ||
539 | * safe to traverse those lists and shut everything down: | ||
540 | */ | ||
541 | svc_close_all(serv); | ||
532 | 542 | ||
533 | if (serv->sv_shutdown) | 543 | if (serv->sv_shutdown) |
534 | serv->sv_shutdown(serv); | 544 | serv->sv_shutdown(serv); |
535 | 545 | ||
536 | svc_close_all(&serv->sv_permsocks); | ||
537 | |||
538 | BUG_ON(!list_empty(&serv->sv_permsocks)); | ||
539 | BUG_ON(!list_empty(&serv->sv_tempsocks)); | ||
540 | |||
541 | cache_clean_deferred(serv); | 546 | cache_clean_deferred(serv); |
542 | 547 | ||
543 | if (svc_serv_is_pooled(serv)) | 548 | if (svc_serv_is_pooled(serv)) |
@@ -683,8 +688,8 @@ found_pool: | |||
683 | * Create or destroy enough new threads to make the number | 688 | * Create or destroy enough new threads to make the number |
684 | * of threads the given number. If `pool' is non-NULL, applies | 689 | * of threads the given number. If `pool' is non-NULL, applies |
685 | * only to threads in that pool, otherwise round-robins between | 690 | * only to threads in that pool, otherwise round-robins between |
686 | * all pools. Must be called with a svc_get() reference and | 691 | * all pools. Caller must ensure that mutual exclusion between this and |
687 | * the BKL or another lock to protect access to svc_serv fields. | 692 | * server startup or shutdown. |
688 | * | 693 | * |
689 | * Destroying threads relies on the service threads filling in | 694 | * Destroying threads relies on the service threads filling in |
690 | * rqstp->rq_task, which only the nfs ones do. Assumes the serv | 695 | * rqstp->rq_task, which only the nfs ones do. Assumes the serv |
diff --git a/net/sunrpc/svc_xprt.c b/net/sunrpc/svc_xprt.c index 38649cfa4e81..74cb0d8e9ca1 100644 --- a/net/sunrpc/svc_xprt.c +++ b/net/sunrpc/svc_xprt.c | |||
@@ -22,6 +22,7 @@ static struct svc_deferred_req *svc_deferred_dequeue(struct svc_xprt *xprt); | |||
22 | static int svc_deferred_recv(struct svc_rqst *rqstp); | 22 | static int svc_deferred_recv(struct svc_rqst *rqstp); |
23 | static struct cache_deferred_req *svc_defer(struct cache_req *req); | 23 | static struct cache_deferred_req *svc_defer(struct cache_req *req); |
24 | static void svc_age_temp_xprts(unsigned long closure); | 24 | static void svc_age_temp_xprts(unsigned long closure); |
25 | static void svc_delete_xprt(struct svc_xprt *xprt); | ||
25 | 26 | ||
26 | /* apparently the "standard" is that clients close | 27 | /* apparently the "standard" is that clients close |
27 | * idle connections after 5 minutes, servers after | 28 | * idle connections after 5 minutes, servers after |
@@ -147,8 +148,8 @@ EXPORT_SYMBOL_GPL(svc_xprt_put); | |||
147 | * Called by transport drivers to initialize the transport independent | 148 | * Called by transport drivers to initialize the transport independent |
148 | * portion of the transport instance. | 149 | * portion of the transport instance. |
149 | */ | 150 | */ |
150 | void svc_xprt_init(struct svc_xprt_class *xcl, struct svc_xprt *xprt, | 151 | void svc_xprt_init(struct net *net, struct svc_xprt_class *xcl, |
151 | struct svc_serv *serv) | 152 | struct svc_xprt *xprt, struct svc_serv *serv) |
152 | { | 153 | { |
153 | memset(xprt, 0, sizeof(*xprt)); | 154 | memset(xprt, 0, sizeof(*xprt)); |
154 | xprt->xpt_class = xcl; | 155 | xprt->xpt_class = xcl; |
@@ -163,7 +164,7 @@ void svc_xprt_init(struct svc_xprt_class *xcl, struct svc_xprt *xprt, | |||
163 | spin_lock_init(&xprt->xpt_lock); | 164 | spin_lock_init(&xprt->xpt_lock); |
164 | set_bit(XPT_BUSY, &xprt->xpt_flags); | 165 | set_bit(XPT_BUSY, &xprt->xpt_flags); |
165 | rpc_init_wait_queue(&xprt->xpt_bc_pending, "xpt_bc_pending"); | 166 | rpc_init_wait_queue(&xprt->xpt_bc_pending, "xpt_bc_pending"); |
166 | xprt->xpt_net = get_net(&init_net); | 167 | xprt->xpt_net = get_net(net); |
167 | } | 168 | } |
168 | EXPORT_SYMBOL_GPL(svc_xprt_init); | 169 | EXPORT_SYMBOL_GPL(svc_xprt_init); |
169 | 170 | ||
@@ -878,7 +879,7 @@ static void call_xpt_users(struct svc_xprt *xprt) | |||
878 | /* | 879 | /* |
879 | * Remove a dead transport | 880 | * Remove a dead transport |
880 | */ | 881 | */ |
881 | void svc_delete_xprt(struct svc_xprt *xprt) | 882 | static void svc_delete_xprt(struct svc_xprt *xprt) |
882 | { | 883 | { |
883 | struct svc_serv *serv = xprt->xpt_server; | 884 | struct svc_serv *serv = xprt->xpt_server; |
884 | struct svc_deferred_req *dr; | 885 | struct svc_deferred_req *dr; |
@@ -893,14 +894,7 @@ void svc_delete_xprt(struct svc_xprt *xprt) | |||
893 | spin_lock_bh(&serv->sv_lock); | 894 | spin_lock_bh(&serv->sv_lock); |
894 | if (!test_and_set_bit(XPT_DETACHED, &xprt->xpt_flags)) | 895 | if (!test_and_set_bit(XPT_DETACHED, &xprt->xpt_flags)) |
895 | list_del_init(&xprt->xpt_list); | 896 | list_del_init(&xprt->xpt_list); |
896 | /* | 897 | BUG_ON(!list_empty(&xprt->xpt_ready)); |
897 | * The only time we're called while xpt_ready is still on a list | ||
898 | * is while the list itself is about to be destroyed (in | ||
899 | * svc_destroy). BUT svc_xprt_enqueue could still be attempting | ||
900 | * to add new entries to the sp_sockets list, so we can't leave | ||
901 | * a freed xprt on it. | ||
902 | */ | ||
903 | list_del_init(&xprt->xpt_ready); | ||
904 | if (test_bit(XPT_TEMP, &xprt->xpt_flags)) | 898 | if (test_bit(XPT_TEMP, &xprt->xpt_flags)) |
905 | serv->sv_tmpcnt--; | 899 | serv->sv_tmpcnt--; |
906 | spin_unlock_bh(&serv->sv_lock); | 900 | spin_unlock_bh(&serv->sv_lock); |
@@ -928,22 +922,48 @@ void svc_close_xprt(struct svc_xprt *xprt) | |||
928 | } | 922 | } |
929 | EXPORT_SYMBOL_GPL(svc_close_xprt); | 923 | EXPORT_SYMBOL_GPL(svc_close_xprt); |
930 | 924 | ||
931 | void svc_close_all(struct list_head *xprt_list) | 925 | static void svc_close_list(struct list_head *xprt_list) |
926 | { | ||
927 | struct svc_xprt *xprt; | ||
928 | |||
929 | list_for_each_entry(xprt, xprt_list, xpt_list) { | ||
930 | set_bit(XPT_CLOSE, &xprt->xpt_flags); | ||
931 | set_bit(XPT_BUSY, &xprt->xpt_flags); | ||
932 | } | ||
933 | } | ||
934 | |||
935 | void svc_close_all(struct svc_serv *serv) | ||
932 | { | 936 | { |
937 | struct svc_pool *pool; | ||
933 | struct svc_xprt *xprt; | 938 | struct svc_xprt *xprt; |
934 | struct svc_xprt *tmp; | 939 | struct svc_xprt *tmp; |
940 | int i; | ||
941 | |||
942 | svc_close_list(&serv->sv_tempsocks); | ||
943 | svc_close_list(&serv->sv_permsocks); | ||
935 | 944 | ||
945 | for (i = 0; i < serv->sv_nrpools; i++) { | ||
946 | pool = &serv->sv_pools[i]; | ||
947 | |||
948 | spin_lock_bh(&pool->sp_lock); | ||
949 | while (!list_empty(&pool->sp_sockets)) { | ||
950 | xprt = list_first_entry(&pool->sp_sockets, struct svc_xprt, xpt_ready); | ||
951 | list_del_init(&xprt->xpt_ready); | ||
952 | } | ||
953 | spin_unlock_bh(&pool->sp_lock); | ||
954 | } | ||
936 | /* | 955 | /* |
937 | * The server is shutting down, and no more threads are running. | 956 | * At this point the sp_sockets lists will stay empty, since |
938 | * svc_xprt_enqueue() might still be running, but at worst it | 957 | * svc_enqueue will not add new entries without taking the |
939 | * will re-add the xprt to sp_sockets, which will soon get | 958 | * sp_lock and checking XPT_BUSY. |
940 | * freed. So we don't bother with any more locking, and don't | ||
941 | * leave the close to the (nonexistent) server threads: | ||
942 | */ | 959 | */ |
943 | list_for_each_entry_safe(xprt, tmp, xprt_list, xpt_list) { | 960 | list_for_each_entry_safe(xprt, tmp, &serv->sv_tempsocks, xpt_list) |
944 | set_bit(XPT_CLOSE, &xprt->xpt_flags); | ||
945 | svc_delete_xprt(xprt); | 961 | svc_delete_xprt(xprt); |
946 | } | 962 | list_for_each_entry_safe(xprt, tmp, &serv->sv_permsocks, xpt_list) |
963 | svc_delete_xprt(xprt); | ||
964 | |||
965 | BUG_ON(!list_empty(&serv->sv_permsocks)); | ||
966 | BUG_ON(!list_empty(&serv->sv_tempsocks)); | ||
947 | } | 967 | } |
948 | 968 | ||
949 | /* | 969 | /* |
diff --git a/net/sunrpc/svcsock.c b/net/sunrpc/svcsock.c index 4653286fcc9e..464570906f80 100644 --- a/net/sunrpc/svcsock.c +++ b/net/sunrpc/svcsock.c | |||
@@ -739,7 +739,8 @@ static void svc_udp_init(struct svc_sock *svsk, struct svc_serv *serv) | |||
739 | { | 739 | { |
740 | int err, level, optname, one = 1; | 740 | int err, level, optname, one = 1; |
741 | 741 | ||
742 | svc_xprt_init(&svc_udp_class, &svsk->sk_xprt, serv); | 742 | svc_xprt_init(sock_net(svsk->sk_sock->sk), &svc_udp_class, |
743 | &svsk->sk_xprt, serv); | ||
743 | clear_bit(XPT_CACHE_AUTH, &svsk->sk_xprt.xpt_flags); | 744 | clear_bit(XPT_CACHE_AUTH, &svsk->sk_xprt.xpt_flags); |
744 | svsk->sk_sk->sk_data_ready = svc_udp_data_ready; | 745 | svsk->sk_sk->sk_data_ready = svc_udp_data_ready; |
745 | svsk->sk_sk->sk_write_space = svc_write_space; | 746 | svsk->sk_sk->sk_write_space = svc_write_space; |
@@ -1343,7 +1344,8 @@ static void svc_tcp_init(struct svc_sock *svsk, struct svc_serv *serv) | |||
1343 | { | 1344 | { |
1344 | struct sock *sk = svsk->sk_sk; | 1345 | struct sock *sk = svsk->sk_sk; |
1345 | 1346 | ||
1346 | svc_xprt_init(&svc_tcp_class, &svsk->sk_xprt, serv); | 1347 | svc_xprt_init(sock_net(svsk->sk_sock->sk), &svc_tcp_class, |
1348 | &svsk->sk_xprt, serv); | ||
1347 | set_bit(XPT_CACHE_AUTH, &svsk->sk_xprt.xpt_flags); | 1349 | set_bit(XPT_CACHE_AUTH, &svsk->sk_xprt.xpt_flags); |
1348 | if (sk->sk_state == TCP_LISTEN) { | 1350 | if (sk->sk_state == TCP_LISTEN) { |
1349 | dprintk("setting up TCP socket for listening\n"); | 1351 | dprintk("setting up TCP socket for listening\n"); |
@@ -1659,7 +1661,7 @@ static struct svc_xprt *svc_bc_create_socket(struct svc_serv *serv, | |||
1659 | return ERR_PTR(-ENOMEM); | 1661 | return ERR_PTR(-ENOMEM); |
1660 | 1662 | ||
1661 | xprt = &svsk->sk_xprt; | 1663 | xprt = &svsk->sk_xprt; |
1662 | svc_xprt_init(&svc_tcp_bc_class, xprt, serv); | 1664 | svc_xprt_init(net, &svc_tcp_bc_class, xprt, serv); |
1663 | 1665 | ||
1664 | serv->sv_bc_xprt = xprt; | 1666 | serv->sv_bc_xprt = xprt; |
1665 | 1667 | ||
diff --git a/net/sunrpc/xprtrdma/svc_rdma_transport.c b/net/sunrpc/xprtrdma/svc_rdma_transport.c index ba1296d88de0..894cb42db91d 100644 --- a/net/sunrpc/xprtrdma/svc_rdma_transport.c +++ b/net/sunrpc/xprtrdma/svc_rdma_transport.c | |||
@@ -453,7 +453,7 @@ static struct svcxprt_rdma *rdma_create_xprt(struct svc_serv *serv, | |||
453 | 453 | ||
454 | if (!cma_xprt) | 454 | if (!cma_xprt) |
455 | return NULL; | 455 | return NULL; |
456 | svc_xprt_init(&svc_rdma_class, &cma_xprt->sc_xprt, serv); | 456 | svc_xprt_init(&init_net, &svc_rdma_class, &cma_xprt->sc_xprt, serv); |
457 | INIT_LIST_HEAD(&cma_xprt->sc_accept_q); | 457 | INIT_LIST_HEAD(&cma_xprt->sc_accept_q); |
458 | INIT_LIST_HEAD(&cma_xprt->sc_dto_q); | 458 | INIT_LIST_HEAD(&cma_xprt->sc_dto_q); |
459 | INIT_LIST_HEAD(&cma_xprt->sc_rq_dto_q); | 459 | INIT_LIST_HEAD(&cma_xprt->sc_rq_dto_q); |
diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c index aad8fb699989..85d3bb7490aa 100644 --- a/net/unix/af_unix.c +++ b/net/unix/af_unix.c | |||
@@ -1918,7 +1918,7 @@ static int unix_stream_recvmsg(struct kiocb *iocb, struct socket *sock, | |||
1918 | struct sk_buff *skb; | 1918 | struct sk_buff *skb; |
1919 | 1919 | ||
1920 | unix_state_lock(sk); | 1920 | unix_state_lock(sk); |
1921 | skb = skb_dequeue(&sk->sk_receive_queue); | 1921 | skb = skb_peek(&sk->sk_receive_queue); |
1922 | if (skb == NULL) { | 1922 | if (skb == NULL) { |
1923 | unix_sk(sk)->recursion_level = 0; | 1923 | unix_sk(sk)->recursion_level = 0; |
1924 | if (copied >= target) | 1924 | if (copied >= target) |
@@ -1958,11 +1958,8 @@ static int unix_stream_recvmsg(struct kiocb *iocb, struct socket *sock, | |||
1958 | if (check_creds) { | 1958 | if (check_creds) { |
1959 | /* Never glue messages from different writers */ | 1959 | /* Never glue messages from different writers */ |
1960 | if ((UNIXCB(skb).pid != siocb->scm->pid) || | 1960 | if ((UNIXCB(skb).pid != siocb->scm->pid) || |
1961 | (UNIXCB(skb).cred != siocb->scm->cred)) { | 1961 | (UNIXCB(skb).cred != siocb->scm->cred)) |
1962 | skb_queue_head(&sk->sk_receive_queue, skb); | ||
1963 | sk->sk_data_ready(sk, skb->len); | ||
1964 | break; | 1962 | break; |
1965 | } | ||
1966 | } else { | 1963 | } else { |
1967 | /* Copy credentials */ | 1964 | /* Copy credentials */ |
1968 | scm_set_cred(siocb->scm, UNIXCB(skb).pid, UNIXCB(skb).cred); | 1965 | scm_set_cred(siocb->scm, UNIXCB(skb).pid, UNIXCB(skb).cred); |
@@ -1977,8 +1974,6 @@ static int unix_stream_recvmsg(struct kiocb *iocb, struct socket *sock, | |||
1977 | 1974 | ||
1978 | chunk = min_t(unsigned int, skb->len, size); | 1975 | chunk = min_t(unsigned int, skb->len, size); |
1979 | if (memcpy_toiovec(msg->msg_iov, skb->data, chunk)) { | 1976 | if (memcpy_toiovec(msg->msg_iov, skb->data, chunk)) { |
1980 | skb_queue_head(&sk->sk_receive_queue, skb); | ||
1981 | sk->sk_data_ready(sk, skb->len); | ||
1982 | if (copied == 0) | 1977 | if (copied == 0) |
1983 | copied = -EFAULT; | 1978 | copied = -EFAULT; |
1984 | break; | 1979 | break; |
@@ -1993,13 +1988,10 @@ static int unix_stream_recvmsg(struct kiocb *iocb, struct socket *sock, | |||
1993 | if (UNIXCB(skb).fp) | 1988 | if (UNIXCB(skb).fp) |
1994 | unix_detach_fds(siocb->scm, skb); | 1989 | unix_detach_fds(siocb->scm, skb); |
1995 | 1990 | ||
1996 | /* put the skb back if we didn't use it up.. */ | 1991 | if (skb->len) |
1997 | if (skb->len) { | ||
1998 | skb_queue_head(&sk->sk_receive_queue, skb); | ||
1999 | sk->sk_data_ready(sk, skb->len); | ||
2000 | break; | 1992 | break; |
2001 | } | ||
2002 | 1993 | ||
1994 | skb_unlink(skb, &sk->sk_receive_queue); | ||
2003 | consume_skb(skb); | 1995 | consume_skb(skb); |
2004 | 1996 | ||
2005 | if (siocb->scm->fp) | 1997 | if (siocb->scm->fp) |
@@ -2010,9 +2002,6 @@ static int unix_stream_recvmsg(struct kiocb *iocb, struct socket *sock, | |||
2010 | if (UNIXCB(skb).fp) | 2002 | if (UNIXCB(skb).fp) |
2011 | siocb->scm->fp = scm_fp_dup(UNIXCB(skb).fp); | 2003 | siocb->scm->fp = scm_fp_dup(UNIXCB(skb).fp); |
2012 | 2004 | ||
2013 | /* put message back and return */ | ||
2014 | skb_queue_head(&sk->sk_receive_queue, skb); | ||
2015 | sk->sk_data_ready(sk, skb->len); | ||
2016 | break; | 2005 | break; |
2017 | } | 2006 | } |
2018 | } while (size); | 2007 | } while (size); |
diff --git a/net/xfrm/xfrm_user.c b/net/xfrm/xfrm_user.c index 637f11a1e4df..66b84fbf2746 100644 --- a/net/xfrm/xfrm_user.c +++ b/net/xfrm/xfrm_user.c | |||
@@ -2290,7 +2290,7 @@ static int xfrm_user_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh) | |||
2290 | link = &xfrm_dispatch[type]; | 2290 | link = &xfrm_dispatch[type]; |
2291 | 2291 | ||
2292 | /* All operations require privileges, even GET */ | 2292 | /* All operations require privileges, even GET */ |
2293 | if (security_netlink_recv(skb, CAP_NET_ADMIN)) | 2293 | if (!capable(CAP_NET_ADMIN)) |
2294 | return -EPERM; | 2294 | return -EPERM; |
2295 | 2295 | ||
2296 | if ((type == (XFRM_MSG_GETSA - XFRM_MSG_BASE) || | 2296 | if ((type == (XFRM_MSG_GETSA - XFRM_MSG_BASE) || |