aboutsummaryrefslogtreecommitdiffstats
path: root/net
diff options
context:
space:
mode:
Diffstat (limited to 'net')
-rw-r--r--net/bridge/br_netlink.c1
-rw-r--r--net/ceph/osdmap.c42
-rw-r--r--net/core/rtnetlink.c2
-rw-r--r--net/ipv4/inet_fragment.c20
-rw-r--r--net/ipv4/ip_fragment.c11
-rw-r--r--net/ipv4/ip_gre.c5
-rw-r--r--net/ipv4/tcp_ipv4.c14
-rw-r--r--net/ipv6/netfilter/nf_conntrack_reasm.c12
-rw-r--r--net/ipv6/reassembly.c8
-rw-r--r--net/ipv6/tcp_ipv6.c7
-rw-r--r--net/nfc/llcp/llcp.c62
-rw-r--r--net/nfc/llcp/sock.c2
-rw-r--r--net/openvswitch/actions.c4
-rw-r--r--net/openvswitch/datapath.c3
-rw-r--r--net/openvswitch/flow.c6
-rw-r--r--net/openvswitch/vport-netdev.c3
-rw-r--r--net/openvswitch/vport.c3
-rw-r--r--net/sunrpc/auth_gss/svcauth_gss.c12
-rw-r--r--net/sunrpc/rpc_pipe.c1
-rw-r--r--net/sunrpc/xprtsock.c15
20 files changed, 167 insertions, 66 deletions
diff --git a/net/bridge/br_netlink.c b/net/bridge/br_netlink.c
index db12a0fcfe50..299fc5f40a26 100644
--- a/net/bridge/br_netlink.c
+++ b/net/bridge/br_netlink.c
@@ -330,6 +330,7 @@ static int br_setport(struct net_bridge_port *p, struct nlattr *tb[])
330 br_set_port_flag(p, tb, IFLA_BRPORT_MODE, BR_HAIRPIN_MODE); 330 br_set_port_flag(p, tb, IFLA_BRPORT_MODE, BR_HAIRPIN_MODE);
331 br_set_port_flag(p, tb, IFLA_BRPORT_GUARD, BR_BPDU_GUARD); 331 br_set_port_flag(p, tb, IFLA_BRPORT_GUARD, BR_BPDU_GUARD);
332 br_set_port_flag(p, tb, IFLA_BRPORT_FAST_LEAVE, BR_MULTICAST_FAST_LEAVE); 332 br_set_port_flag(p, tb, IFLA_BRPORT_FAST_LEAVE, BR_MULTICAST_FAST_LEAVE);
333 br_set_port_flag(p, tb, IFLA_BRPORT_PROTECT, BR_ROOT_BLOCK);
333 334
334 if (tb[IFLA_BRPORT_COST]) { 335 if (tb[IFLA_BRPORT_COST]) {
335 err = br_stp_set_path_cost(p, nla_get_u32(tb[IFLA_BRPORT_COST])); 336 err = br_stp_set_path_cost(p, nla_get_u32(tb[IFLA_BRPORT_COST]));
diff --git a/net/ceph/osdmap.c b/net/ceph/osdmap.c
index 69bc4bf89e3e..4543b9aba40c 100644
--- a/net/ceph/osdmap.c
+++ b/net/ceph/osdmap.c
@@ -654,6 +654,24 @@ static int osdmap_set_max_osd(struct ceph_osdmap *map, int max)
654 return 0; 654 return 0;
655} 655}
656 656
657static int __decode_pgid(void **p, void *end, struct ceph_pg *pg)
658{
659 u8 v;
660
661 ceph_decode_need(p, end, 1+8+4+4, bad);
662 v = ceph_decode_8(p);
663 if (v != 1)
664 goto bad;
665 pg->pool = ceph_decode_64(p);
666 pg->seed = ceph_decode_32(p);
667 *p += 4; /* skip preferred */
668 return 0;
669
670bad:
671 dout("error decoding pgid\n");
672 return -EINVAL;
673}
674
657/* 675/*
658 * decode a full map. 676 * decode a full map.
659 */ 677 */
@@ -745,13 +763,12 @@ struct ceph_osdmap *osdmap_decode(void **p, void *end)
745 for (i = 0; i < len; i++) { 763 for (i = 0; i < len; i++) {
746 int n, j; 764 int n, j;
747 struct ceph_pg pgid; 765 struct ceph_pg pgid;
748 struct ceph_pg_v1 pgid_v1;
749 struct ceph_pg_mapping *pg; 766 struct ceph_pg_mapping *pg;
750 767
751 ceph_decode_need(p, end, sizeof(u32) + sizeof(u64), bad); 768 err = __decode_pgid(p, end, &pgid);
752 ceph_decode_copy(p, &pgid_v1, sizeof(pgid_v1)); 769 if (err)
753 pgid.pool = le32_to_cpu(pgid_v1.pool); 770 goto bad;
754 pgid.seed = le16_to_cpu(pgid_v1.ps); 771 ceph_decode_need(p, end, sizeof(u32), bad);
755 n = ceph_decode_32(p); 772 n = ceph_decode_32(p);
756 err = -EINVAL; 773 err = -EINVAL;
757 if (n > (UINT_MAX - sizeof(*pg)) / sizeof(u32)) 774 if (n > (UINT_MAX - sizeof(*pg)) / sizeof(u32))
@@ -818,8 +835,8 @@ struct ceph_osdmap *osdmap_apply_incremental(void **p, void *end,
818 u16 version; 835 u16 version;
819 836
820 ceph_decode_16_safe(p, end, version, bad); 837 ceph_decode_16_safe(p, end, version, bad);
821 if (version > 6) { 838 if (version != 6) {
822 pr_warning("got unknown v %d > %d of inc osdmap\n", version, 6); 839 pr_warning("got unknown v %d != 6 of inc osdmap\n", version);
823 goto bad; 840 goto bad;
824 } 841 }
825 842
@@ -963,15 +980,14 @@ struct ceph_osdmap *osdmap_apply_incremental(void **p, void *end,
963 while (len--) { 980 while (len--) {
964 struct ceph_pg_mapping *pg; 981 struct ceph_pg_mapping *pg;
965 int j; 982 int j;
966 struct ceph_pg_v1 pgid_v1;
967 struct ceph_pg pgid; 983 struct ceph_pg pgid;
968 u32 pglen; 984 u32 pglen;
969 ceph_decode_need(p, end, sizeof(u64) + sizeof(u32), bad);
970 ceph_decode_copy(p, &pgid_v1, sizeof(pgid_v1));
971 pgid.pool = le32_to_cpu(pgid_v1.pool);
972 pgid.seed = le16_to_cpu(pgid_v1.ps);
973 pglen = ceph_decode_32(p);
974 985
986 err = __decode_pgid(p, end, &pgid);
987 if (err)
988 goto bad;
989 ceph_decode_need(p, end, sizeof(u32), bad);
990 pglen = ceph_decode_32(p);
975 if (pglen) { 991 if (pglen) {
976 ceph_decode_need(p, end, pglen*sizeof(u32), bad); 992 ceph_decode_need(p, end, pglen*sizeof(u32), bad);
977 993
diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
index a585d45cc9d9..5fb8d7e47294 100644
--- a/net/core/rtnetlink.c
+++ b/net/core/rtnetlink.c
@@ -2621,7 +2621,7 @@ static int rtnetlink_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
2621 struct rtattr *attr = (void *)nlh + NLMSG_ALIGN(min_len); 2621 struct rtattr *attr = (void *)nlh + NLMSG_ALIGN(min_len);
2622 2622
2623 while (RTA_OK(attr, attrlen)) { 2623 while (RTA_OK(attr, attrlen)) {
2624 unsigned int flavor = attr->rta_type; 2624 unsigned int flavor = attr->rta_type & NLA_TYPE_MASK;
2625 if (flavor) { 2625 if (flavor) {
2626 if (flavor > rta_max[sz_idx]) 2626 if (flavor > rta_max[sz_idx])
2627 return -EINVAL; 2627 return -EINVAL;
diff --git a/net/ipv4/inet_fragment.c b/net/ipv4/inet_fragment.c
index 245ae078a07f..f4fd23de9b13 100644
--- a/net/ipv4/inet_fragment.c
+++ b/net/ipv4/inet_fragment.c
@@ -21,6 +21,7 @@
21#include <linux/rtnetlink.h> 21#include <linux/rtnetlink.h>
22#include <linux/slab.h> 22#include <linux/slab.h>
23 23
24#include <net/sock.h>
24#include <net/inet_frag.h> 25#include <net/inet_frag.h>
25 26
26static void inet_frag_secret_rebuild(unsigned long dummy) 27static void inet_frag_secret_rebuild(unsigned long dummy)
@@ -277,6 +278,7 @@ struct inet_frag_queue *inet_frag_find(struct netns_frags *nf,
277 __releases(&f->lock) 278 __releases(&f->lock)
278{ 279{
279 struct inet_frag_queue *q; 280 struct inet_frag_queue *q;
281 int depth = 0;
280 282
281 hlist_for_each_entry(q, &f->hash[hash], list) { 283 hlist_for_each_entry(q, &f->hash[hash], list) {
282 if (q->net == nf && f->match(q, key)) { 284 if (q->net == nf && f->match(q, key)) {
@@ -284,9 +286,25 @@ struct inet_frag_queue *inet_frag_find(struct netns_frags *nf,
284 read_unlock(&f->lock); 286 read_unlock(&f->lock);
285 return q; 287 return q;
286 } 288 }
289 depth++;
287 } 290 }
288 read_unlock(&f->lock); 291 read_unlock(&f->lock);
289 292
290 return inet_frag_create(nf, f, key); 293 if (depth <= INETFRAGS_MAXDEPTH)
294 return inet_frag_create(nf, f, key);
295 else
296 return ERR_PTR(-ENOBUFS);
291} 297}
292EXPORT_SYMBOL(inet_frag_find); 298EXPORT_SYMBOL(inet_frag_find);
299
300void inet_frag_maybe_warn_overflow(struct inet_frag_queue *q,
301 const char *prefix)
302{
303 static const char msg[] = "inet_frag_find: Fragment hash bucket"
304 " list length grew over limit " __stringify(INETFRAGS_MAXDEPTH)
305 ". Dropping fragment.\n";
306
307 if (PTR_ERR(q) == -ENOBUFS)
308 LIMIT_NETDEBUG(KERN_WARNING "%s%s", prefix, msg);
309}
310EXPORT_SYMBOL(inet_frag_maybe_warn_overflow);
diff --git a/net/ipv4/ip_fragment.c b/net/ipv4/ip_fragment.c
index b6d30acb600c..a6445b843ef4 100644
--- a/net/ipv4/ip_fragment.c
+++ b/net/ipv4/ip_fragment.c
@@ -292,14 +292,11 @@ static inline struct ipq *ip_find(struct net *net, struct iphdr *iph, u32 user)
292 hash = ipqhashfn(iph->id, iph->saddr, iph->daddr, iph->protocol); 292 hash = ipqhashfn(iph->id, iph->saddr, iph->daddr, iph->protocol);
293 293
294 q = inet_frag_find(&net->ipv4.frags, &ip4_frags, &arg, hash); 294 q = inet_frag_find(&net->ipv4.frags, &ip4_frags, &arg, hash);
295 if (q == NULL) 295 if (IS_ERR_OR_NULL(q)) {
296 goto out_nomem; 296 inet_frag_maybe_warn_overflow(q, pr_fmt());
297 297 return NULL;
298 }
298 return container_of(q, struct ipq, q); 299 return container_of(q, struct ipq, q);
299
300out_nomem:
301 LIMIT_NETDEBUG(KERN_ERR pr_fmt("ip_frag_create: no memory left !\n"));
302 return NULL;
303} 300}
304 301
305/* Is the fragment too far ahead to be part of ipq? */ 302/* Is the fragment too far ahead to be part of ipq? */
diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c
index d0ef0e674ec5..91d66dbde9c0 100644
--- a/net/ipv4/ip_gre.c
+++ b/net/ipv4/ip_gre.c
@@ -798,10 +798,7 @@ static netdev_tx_t ipgre_tunnel_xmit(struct sk_buff *skb, struct net_device *dev
798 798
799 if (dev->header_ops && dev->type == ARPHRD_IPGRE) { 799 if (dev->header_ops && dev->type == ARPHRD_IPGRE) {
800 gre_hlen = 0; 800 gre_hlen = 0;
801 if (skb->protocol == htons(ETH_P_IP)) 801 tiph = (const struct iphdr *)skb->data;
802 tiph = (const struct iphdr *)skb->data;
803 else
804 tiph = &tunnel->parms.iph;
805 } else { 802 } else {
806 gre_hlen = tunnel->hlen; 803 gre_hlen = tunnel->hlen;
807 tiph = &tunnel->parms.iph; 804 tiph = &tunnel->parms.iph;
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
index 4a8ec457310f..d09203c63264 100644
--- a/net/ipv4/tcp_ipv4.c
+++ b/net/ipv4/tcp_ipv4.c
@@ -274,13 +274,6 @@ static void tcp_v4_mtu_reduced(struct sock *sk)
274 struct inet_sock *inet = inet_sk(sk); 274 struct inet_sock *inet = inet_sk(sk);
275 u32 mtu = tcp_sk(sk)->mtu_info; 275 u32 mtu = tcp_sk(sk)->mtu_info;
276 276
277 /* We are not interested in TCP_LISTEN and open_requests (SYN-ACKs
278 * send out by Linux are always <576bytes so they should go through
279 * unfragmented).
280 */
281 if (sk->sk_state == TCP_LISTEN)
282 return;
283
284 dst = inet_csk_update_pmtu(sk, mtu); 277 dst = inet_csk_update_pmtu(sk, mtu);
285 if (!dst) 278 if (!dst)
286 return; 279 return;
@@ -408,6 +401,13 @@ void tcp_v4_err(struct sk_buff *icmp_skb, u32 info)
408 goto out; 401 goto out;
409 402
410 if (code == ICMP_FRAG_NEEDED) { /* PMTU discovery (RFC1191) */ 403 if (code == ICMP_FRAG_NEEDED) { /* PMTU discovery (RFC1191) */
404 /* We are not interested in TCP_LISTEN and open_requests
405 * (SYN-ACKs send out by Linux are always <576bytes so
406 * they should go through unfragmented).
407 */
408 if (sk->sk_state == TCP_LISTEN)
409 goto out;
410
411 tp->mtu_info = info; 411 tp->mtu_info = info;
412 if (!sock_owned_by_user(sk)) { 412 if (!sock_owned_by_user(sk)) {
413 tcp_v4_mtu_reduced(sk); 413 tcp_v4_mtu_reduced(sk);
diff --git a/net/ipv6/netfilter/nf_conntrack_reasm.c b/net/ipv6/netfilter/nf_conntrack_reasm.c
index 54087e96d7b8..6700069949dd 100644
--- a/net/ipv6/netfilter/nf_conntrack_reasm.c
+++ b/net/ipv6/netfilter/nf_conntrack_reasm.c
@@ -14,6 +14,8 @@
14 * 2 of the License, or (at your option) any later version. 14 * 2 of the License, or (at your option) any later version.
15 */ 15 */
16 16
17#define pr_fmt(fmt) "IPv6-nf: " fmt
18
17#include <linux/errno.h> 19#include <linux/errno.h>
18#include <linux/types.h> 20#include <linux/types.h>
19#include <linux/string.h> 21#include <linux/string.h>
@@ -180,13 +182,11 @@ static inline struct frag_queue *fq_find(struct net *net, __be32 id,
180 182
181 q = inet_frag_find(&net->nf_frag.frags, &nf_frags, &arg, hash); 183 q = inet_frag_find(&net->nf_frag.frags, &nf_frags, &arg, hash);
182 local_bh_enable(); 184 local_bh_enable();
183 if (q == NULL) 185 if (IS_ERR_OR_NULL(q)) {
184 goto oom; 186 inet_frag_maybe_warn_overflow(q, pr_fmt());
185 187 return NULL;
188 }
186 return container_of(q, struct frag_queue, q); 189 return container_of(q, struct frag_queue, q);
187
188oom:
189 return NULL;
190} 190}
191 191
192 192
diff --git a/net/ipv6/reassembly.c b/net/ipv6/reassembly.c
index 3c6a77290c6e..196ab9347ad1 100644
--- a/net/ipv6/reassembly.c
+++ b/net/ipv6/reassembly.c
@@ -26,6 +26,9 @@
26 * YOSHIFUJI,H. @USAGI Always remove fragment header to 26 * YOSHIFUJI,H. @USAGI Always remove fragment header to
27 * calculate ICV correctly. 27 * calculate ICV correctly.
28 */ 28 */
29
30#define pr_fmt(fmt) "IPv6: " fmt
31
29#include <linux/errno.h> 32#include <linux/errno.h>
30#include <linux/types.h> 33#include <linux/types.h>
31#include <linux/string.h> 34#include <linux/string.h>
@@ -185,9 +188,10 @@ fq_find(struct net *net, __be32 id, const struct in6_addr *src, const struct in6
185 hash = inet6_hash_frag(id, src, dst, ip6_frags.rnd); 188 hash = inet6_hash_frag(id, src, dst, ip6_frags.rnd);
186 189
187 q = inet_frag_find(&net->ipv6.frags, &ip6_frags, &arg, hash); 190 q = inet_frag_find(&net->ipv6.frags, &ip6_frags, &arg, hash);
188 if (q == NULL) 191 if (IS_ERR_OR_NULL(q)) {
192 inet_frag_maybe_warn_overflow(q, pr_fmt());
189 return NULL; 193 return NULL;
190 194 }
191 return container_of(q, struct frag_queue, q); 195 return container_of(q, struct frag_queue, q);
192} 196}
193 197
diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
index 9b6460055df5..f6d629fd6aee 100644
--- a/net/ipv6/tcp_ipv6.c
+++ b/net/ipv6/tcp_ipv6.c
@@ -389,6 +389,13 @@ static void tcp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
389 } 389 }
390 390
391 if (type == ICMPV6_PKT_TOOBIG) { 391 if (type == ICMPV6_PKT_TOOBIG) {
392 /* We are not interested in TCP_LISTEN and open_requests
393 * (SYN-ACKs send out by Linux are always <576bytes so
394 * they should go through unfragmented).
395 */
396 if (sk->sk_state == TCP_LISTEN)
397 goto out;
398
392 tp->mtu_info = ntohl(info); 399 tp->mtu_info = ntohl(info);
393 if (!sock_owned_by_user(sk)) 400 if (!sock_owned_by_user(sk))
394 tcp_v6_mtu_reduced(sk); 401 tcp_v6_mtu_reduced(sk);
diff --git a/net/nfc/llcp/llcp.c b/net/nfc/llcp/llcp.c
index 7f8266dd14cb..b530afadd76c 100644
--- a/net/nfc/llcp/llcp.c
+++ b/net/nfc/llcp/llcp.c
@@ -68,7 +68,8 @@ static void nfc_llcp_socket_purge(struct nfc_llcp_sock *sock)
68 } 68 }
69} 69}
70 70
71static void nfc_llcp_socket_release(struct nfc_llcp_local *local, bool listen) 71static void nfc_llcp_socket_release(struct nfc_llcp_local *local, bool listen,
72 int err)
72{ 73{
73 struct sock *sk; 74 struct sock *sk;
74 struct hlist_node *tmp; 75 struct hlist_node *tmp;
@@ -100,7 +101,10 @@ static void nfc_llcp_socket_release(struct nfc_llcp_local *local, bool listen)
100 101
101 nfc_llcp_accept_unlink(accept_sk); 102 nfc_llcp_accept_unlink(accept_sk);
102 103
104 if (err)
105 accept_sk->sk_err = err;
103 accept_sk->sk_state = LLCP_CLOSED; 106 accept_sk->sk_state = LLCP_CLOSED;
107 accept_sk->sk_state_change(sk);
104 108
105 bh_unlock_sock(accept_sk); 109 bh_unlock_sock(accept_sk);
106 110
@@ -123,7 +127,10 @@ static void nfc_llcp_socket_release(struct nfc_llcp_local *local, bool listen)
123 continue; 127 continue;
124 } 128 }
125 129
130 if (err)
131 sk->sk_err = err;
126 sk->sk_state = LLCP_CLOSED; 132 sk->sk_state = LLCP_CLOSED;
133 sk->sk_state_change(sk);
127 134
128 bh_unlock_sock(sk); 135 bh_unlock_sock(sk);
129 136
@@ -133,6 +140,36 @@ static void nfc_llcp_socket_release(struct nfc_llcp_local *local, bool listen)
133 } 140 }
134 141
135 write_unlock(&local->sockets.lock); 142 write_unlock(&local->sockets.lock);
143
144 /*
145 * If we want to keep the listening sockets alive,
146 * we don't touch the RAW ones.
147 */
148 if (listen == true)
149 return;
150
151 write_lock(&local->raw_sockets.lock);
152
153 sk_for_each_safe(sk, tmp, &local->raw_sockets.head) {
154 llcp_sock = nfc_llcp_sock(sk);
155
156 bh_lock_sock(sk);
157
158 nfc_llcp_socket_purge(llcp_sock);
159
160 if (err)
161 sk->sk_err = err;
162 sk->sk_state = LLCP_CLOSED;
163 sk->sk_state_change(sk);
164
165 bh_unlock_sock(sk);
166
167 sock_orphan(sk);
168
169 sk_del_node_init(sk);
170 }
171
172 write_unlock(&local->raw_sockets.lock);
136} 173}
137 174
138struct nfc_llcp_local *nfc_llcp_local_get(struct nfc_llcp_local *local) 175struct nfc_llcp_local *nfc_llcp_local_get(struct nfc_llcp_local *local)
@@ -142,20 +179,25 @@ struct nfc_llcp_local *nfc_llcp_local_get(struct nfc_llcp_local *local)
142 return local; 179 return local;
143} 180}
144 181
145static void local_release(struct kref *ref) 182static void local_cleanup(struct nfc_llcp_local *local, bool listen)
146{ 183{
147 struct nfc_llcp_local *local; 184 nfc_llcp_socket_release(local, listen, ENXIO);
148
149 local = container_of(ref, struct nfc_llcp_local, ref);
150
151 list_del(&local->list);
152 nfc_llcp_socket_release(local, false);
153 del_timer_sync(&local->link_timer); 185 del_timer_sync(&local->link_timer);
154 skb_queue_purge(&local->tx_queue); 186 skb_queue_purge(&local->tx_queue);
155 cancel_work_sync(&local->tx_work); 187 cancel_work_sync(&local->tx_work);
156 cancel_work_sync(&local->rx_work); 188 cancel_work_sync(&local->rx_work);
157 cancel_work_sync(&local->timeout_work); 189 cancel_work_sync(&local->timeout_work);
158 kfree_skb(local->rx_pending); 190 kfree_skb(local->rx_pending);
191}
192
193static void local_release(struct kref *ref)
194{
195 struct nfc_llcp_local *local;
196
197 local = container_of(ref, struct nfc_llcp_local, ref);
198
199 list_del(&local->list);
200 local_cleanup(local, false);
159 kfree(local); 201 kfree(local);
160} 202}
161 203
@@ -1348,7 +1390,7 @@ void nfc_llcp_mac_is_down(struct nfc_dev *dev)
1348 return; 1390 return;
1349 1391
1350 /* Close and purge all existing sockets */ 1392 /* Close and purge all existing sockets */
1351 nfc_llcp_socket_release(local, true); 1393 nfc_llcp_socket_release(local, true, 0);
1352} 1394}
1353 1395
1354void nfc_llcp_mac_is_up(struct nfc_dev *dev, u32 target_idx, 1396void nfc_llcp_mac_is_up(struct nfc_dev *dev, u32 target_idx,
@@ -1427,6 +1469,8 @@ void nfc_llcp_unregister_device(struct nfc_dev *dev)
1427 return; 1469 return;
1428 } 1470 }
1429 1471
1472 local_cleanup(local, false);
1473
1430 nfc_llcp_local_put(local); 1474 nfc_llcp_local_put(local);
1431} 1475}
1432 1476
diff --git a/net/nfc/llcp/sock.c b/net/nfc/llcp/sock.c
index 5332751943a9..5c7cdf3f2a83 100644
--- a/net/nfc/llcp/sock.c
+++ b/net/nfc/llcp/sock.c
@@ -278,6 +278,8 @@ struct sock *nfc_llcp_accept_dequeue(struct sock *parent,
278 278
279 pr_debug("Returning sk state %d\n", sk->sk_state); 279 pr_debug("Returning sk state %d\n", sk->sk_state);
280 280
281 sk_acceptq_removed(parent);
282
281 return sk; 283 return sk;
282 } 284 }
283 285
diff --git a/net/openvswitch/actions.c b/net/openvswitch/actions.c
index ac2defeeba83..d4d5363c7ba7 100644
--- a/net/openvswitch/actions.c
+++ b/net/openvswitch/actions.c
@@ -58,7 +58,7 @@ static int __pop_vlan_tci(struct sk_buff *skb, __be16 *current_tci)
58 58
59 if (skb->ip_summed == CHECKSUM_COMPLETE) 59 if (skb->ip_summed == CHECKSUM_COMPLETE)
60 skb->csum = csum_sub(skb->csum, csum_partial(skb->data 60 skb->csum = csum_sub(skb->csum, csum_partial(skb->data
61 + ETH_HLEN, VLAN_HLEN, 0)); 61 + (2 * ETH_ALEN), VLAN_HLEN, 0));
62 62
63 vhdr = (struct vlan_hdr *)(skb->data + ETH_HLEN); 63 vhdr = (struct vlan_hdr *)(skb->data + ETH_HLEN);
64 *current_tci = vhdr->h_vlan_TCI; 64 *current_tci = vhdr->h_vlan_TCI;
@@ -115,7 +115,7 @@ static int push_vlan(struct sk_buff *skb, const struct ovs_action_push_vlan *vla
115 115
116 if (skb->ip_summed == CHECKSUM_COMPLETE) 116 if (skb->ip_summed == CHECKSUM_COMPLETE)
117 skb->csum = csum_add(skb->csum, csum_partial(skb->data 117 skb->csum = csum_add(skb->csum, csum_partial(skb->data
118 + ETH_HLEN, VLAN_HLEN, 0)); 118 + (2 * ETH_ALEN), VLAN_HLEN, 0));
119 119
120 } 120 }
121 __vlan_hwaccel_put_tag(skb, ntohs(vlan->vlan_tci) & ~VLAN_TAG_PRESENT); 121 __vlan_hwaccel_put_tag(skb, ntohs(vlan->vlan_tci) & ~VLAN_TAG_PRESENT);
diff --git a/net/openvswitch/datapath.c b/net/openvswitch/datapath.c
index e87a26506dba..a4b724708a1a 100644
--- a/net/openvswitch/datapath.c
+++ b/net/openvswitch/datapath.c
@@ -394,6 +394,7 @@ static int queue_userspace_packet(struct net *net, int dp_ifindex,
394 394
395 skb_copy_and_csum_dev(skb, nla_data(nla)); 395 skb_copy_and_csum_dev(skb, nla_data(nla));
396 396
397 genlmsg_end(user_skb, upcall);
397 err = genlmsg_unicast(net, user_skb, upcall_info->portid); 398 err = genlmsg_unicast(net, user_skb, upcall_info->portid);
398 399
399out: 400out:
@@ -1690,6 +1691,7 @@ static int ovs_vport_cmd_new(struct sk_buff *skb, struct genl_info *info)
1690 if (IS_ERR(vport)) 1691 if (IS_ERR(vport))
1691 goto exit_unlock; 1692 goto exit_unlock;
1692 1693
1694 err = 0;
1693 reply = ovs_vport_cmd_build_info(vport, info->snd_portid, info->snd_seq, 1695 reply = ovs_vport_cmd_build_info(vport, info->snd_portid, info->snd_seq,
1694 OVS_VPORT_CMD_NEW); 1696 OVS_VPORT_CMD_NEW);
1695 if (IS_ERR(reply)) { 1697 if (IS_ERR(reply)) {
@@ -1771,6 +1773,7 @@ static int ovs_vport_cmd_del(struct sk_buff *skb, struct genl_info *info)
1771 if (IS_ERR(reply)) 1773 if (IS_ERR(reply))
1772 goto exit_unlock; 1774 goto exit_unlock;
1773 1775
1776 err = 0;
1774 ovs_dp_detach_port(vport); 1777 ovs_dp_detach_port(vport);
1775 1778
1776 genl_notify(reply, genl_info_net(info), info->snd_portid, 1779 genl_notify(reply, genl_info_net(info), info->snd_portid,
diff --git a/net/openvswitch/flow.c b/net/openvswitch/flow.c
index 20605ecf100b..fe0e4215c73d 100644
--- a/net/openvswitch/flow.c
+++ b/net/openvswitch/flow.c
@@ -482,7 +482,11 @@ static __be16 parse_ethertype(struct sk_buff *skb)
482 return htons(ETH_P_802_2); 482 return htons(ETH_P_802_2);
483 483
484 __skb_pull(skb, sizeof(struct llc_snap_hdr)); 484 __skb_pull(skb, sizeof(struct llc_snap_hdr));
485 return llc->ethertype; 485
486 if (ntohs(llc->ethertype) >= 1536)
487 return llc->ethertype;
488
489 return htons(ETH_P_802_2);
486} 490}
487 491
488static int parse_icmpv6(struct sk_buff *skb, struct sw_flow_key *key, 492static int parse_icmpv6(struct sk_buff *skb, struct sw_flow_key *key,
diff --git a/net/openvswitch/vport-netdev.c b/net/openvswitch/vport-netdev.c
index 670cbc3518de..2130d61c384a 100644
--- a/net/openvswitch/vport-netdev.c
+++ b/net/openvswitch/vport-netdev.c
@@ -43,8 +43,7 @@ static void netdev_port_receive(struct vport *vport, struct sk_buff *skb)
43 43
44 /* Make our own copy of the packet. Otherwise we will mangle the 44 /* Make our own copy of the packet. Otherwise we will mangle the
45 * packet for anyone who came before us (e.g. tcpdump via AF_PACKET). 45 * packet for anyone who came before us (e.g. tcpdump via AF_PACKET).
46 * (No one comes after us, since we tell handle_bridge() that we took 46 */
47 * the packet.) */
48 skb = skb_share_check(skb, GFP_ATOMIC); 47 skb = skb_share_check(skb, GFP_ATOMIC);
49 if (unlikely(!skb)) 48 if (unlikely(!skb))
50 return; 49 return;
diff --git a/net/openvswitch/vport.c b/net/openvswitch/vport.c
index ba717cc038b3..f6b8132ce4cb 100644
--- a/net/openvswitch/vport.c
+++ b/net/openvswitch/vport.c
@@ -325,8 +325,7 @@ int ovs_vport_get_options(const struct vport *vport, struct sk_buff *skb)
325 * @skb: skb that was received 325 * @skb: skb that was received
326 * 326 *
327 * Must be called with rcu_read_lock. The packet cannot be shared and 327 * Must be called with rcu_read_lock. The packet cannot be shared and
328 * skb->data should point to the Ethernet header. The caller must have already 328 * skb->data should point to the Ethernet header.
329 * called compute_ip_summed() to initialize the checksumming fields.
330 */ 329 */
331void ovs_vport_receive(struct vport *vport, struct sk_buff *skb) 330void ovs_vport_receive(struct vport *vport, struct sk_buff *skb)
332{ 331{
diff --git a/net/sunrpc/auth_gss/svcauth_gss.c b/net/sunrpc/auth_gss/svcauth_gss.c
index f7d34e7b6f81..5ead60550895 100644
--- a/net/sunrpc/auth_gss/svcauth_gss.c
+++ b/net/sunrpc/auth_gss/svcauth_gss.c
@@ -447,17 +447,21 @@ static int rsc_parse(struct cache_detail *cd,
447 else { 447 else {
448 int N, i; 448 int N, i;
449 449
450 /*
451 * NOTE: we skip uid_valid()/gid_valid() checks here:
452 * instead, * -1 id's are later mapped to the
453 * (export-specific) anonymous id by nfsd_setuser.
454 *
455 * (But supplementary gid's get no such special
456 * treatment so are checked for validity here.)
457 */
450 /* uid */ 458 /* uid */
451 rsci.cred.cr_uid = make_kuid(&init_user_ns, id); 459 rsci.cred.cr_uid = make_kuid(&init_user_ns, id);
452 if (!uid_valid(rsci.cred.cr_uid))
453 goto out;
454 460
455 /* gid */ 461 /* gid */
456 if (get_int(&mesg, &id)) 462 if (get_int(&mesg, &id))
457 goto out; 463 goto out;
458 rsci.cred.cr_gid = make_kgid(&init_user_ns, id); 464 rsci.cred.cr_gid = make_kgid(&init_user_ns, id);
459 if (!gid_valid(rsci.cred.cr_gid))
460 goto out;
461 465
462 /* number of additional gid's */ 466 /* number of additional gid's */
463 if (get_int(&mesg, &N)) 467 if (get_int(&mesg, &N))
diff --git a/net/sunrpc/rpc_pipe.c b/net/sunrpc/rpc_pipe.c
index a0f48a51e14e..a9129f8d7070 100644
--- a/net/sunrpc/rpc_pipe.c
+++ b/net/sunrpc/rpc_pipe.c
@@ -1175,6 +1175,7 @@ static struct file_system_type rpc_pipe_fs_type = {
1175 .kill_sb = rpc_kill_sb, 1175 .kill_sb = rpc_kill_sb,
1176}; 1176};
1177MODULE_ALIAS_FS("rpc_pipefs"); 1177MODULE_ALIAS_FS("rpc_pipefs");
1178MODULE_ALIAS("rpc_pipefs");
1178 1179
1179static void 1180static void
1180init_once(void *foo) 1181init_once(void *foo)
diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c
index c1d8476b7692..3d02130828da 100644
--- a/net/sunrpc/xprtsock.c
+++ b/net/sunrpc/xprtsock.c
@@ -849,6 +849,14 @@ static void xs_tcp_close(struct rpc_xprt *xprt)
849 xs_tcp_shutdown(xprt); 849 xs_tcp_shutdown(xprt);
850} 850}
851 851
852static void xs_local_destroy(struct rpc_xprt *xprt)
853{
854 xs_close(xprt);
855 xs_free_peer_addresses(xprt);
856 xprt_free(xprt);
857 module_put(THIS_MODULE);
858}
859
852/** 860/**
853 * xs_destroy - prepare to shutdown a transport 861 * xs_destroy - prepare to shutdown a transport
854 * @xprt: doomed transport 862 * @xprt: doomed transport
@@ -862,10 +870,7 @@ static void xs_destroy(struct rpc_xprt *xprt)
862 870
863 cancel_delayed_work_sync(&transport->connect_worker); 871 cancel_delayed_work_sync(&transport->connect_worker);
864 872
865 xs_close(xprt); 873 xs_local_destroy(xprt);
866 xs_free_peer_addresses(xprt);
867 xprt_free(xprt);
868 module_put(THIS_MODULE);
869} 874}
870 875
871static inline struct rpc_xprt *xprt_from_sock(struct sock *sk) 876static inline struct rpc_xprt *xprt_from_sock(struct sock *sk)
@@ -2482,7 +2487,7 @@ static struct rpc_xprt_ops xs_local_ops = {
2482 .send_request = xs_local_send_request, 2487 .send_request = xs_local_send_request,
2483 .set_retrans_timeout = xprt_set_retrans_timeout_def, 2488 .set_retrans_timeout = xprt_set_retrans_timeout_def,
2484 .close = xs_close, 2489 .close = xs_close,
2485 .destroy = xs_destroy, 2490 .destroy = xs_local_destroy,
2486 .print_stats = xs_local_print_stats, 2491 .print_stats = xs_local_print_stats,
2487}; 2492};
2488 2493