diff options
author | Jiri Kosina <jkosina@suse.cz> | 2010-06-16 12:08:13 -0400 |
---|---|---|
committer | Jiri Kosina <jkosina@suse.cz> | 2010-06-16 12:08:13 -0400 |
commit | f1bbbb6912662b9f6070c5bfc4ca9eb1f06a9d5b (patch) | |
tree | c2c130a74be25b0b2dff992e1a195e2728bdaadd /net | |
parent | fd0961ff67727482bb20ca7e8ea97b83e9de2ddb (diff) | |
parent | 7e27d6e778cd87b6f2415515d7127eba53fe5d02 (diff) |
Merge branch 'master' into for-next
Diffstat (limited to 'net')
74 files changed, 731 insertions, 398 deletions
diff --git a/net/8021q/vlan_core.c b/net/8021q/vlan_core.c index bd537fc10254..50f58f5f1c34 100644 --- a/net/8021q/vlan_core.c +++ b/net/8021q/vlan_core.c | |||
@@ -12,7 +12,7 @@ int __vlan_hwaccel_rx(struct sk_buff *skb, struct vlan_group *grp, | |||
12 | return NET_RX_DROP; | 12 | return NET_RX_DROP; |
13 | 13 | ||
14 | if (skb_bond_should_drop(skb, ACCESS_ONCE(skb->dev->master))) | 14 | if (skb_bond_should_drop(skb, ACCESS_ONCE(skb->dev->master))) |
15 | goto drop; | 15 | skb->deliver_no_wcard = 1; |
16 | 16 | ||
17 | skb->skb_iif = skb->dev->ifindex; | 17 | skb->skb_iif = skb->dev->ifindex; |
18 | __vlan_hwaccel_put_tag(skb, vlan_tci); | 18 | __vlan_hwaccel_put_tag(skb, vlan_tci); |
@@ -84,7 +84,7 @@ vlan_gro_common(struct napi_struct *napi, struct vlan_group *grp, | |||
84 | struct sk_buff *p; | 84 | struct sk_buff *p; |
85 | 85 | ||
86 | if (skb_bond_should_drop(skb, ACCESS_ONCE(skb->dev->master))) | 86 | if (skb_bond_should_drop(skb, ACCESS_ONCE(skb->dev->master))) |
87 | goto drop; | 87 | skb->deliver_no_wcard = 1; |
88 | 88 | ||
89 | skb->skb_iif = skb->dev->ifindex; | 89 | skb->skb_iif = skb->dev->ifindex; |
90 | __vlan_hwaccel_put_tag(skb, vlan_tci); | 90 | __vlan_hwaccel_put_tag(skb, vlan_tci); |
diff --git a/net/8021q/vlan_dev.c b/net/8021q/vlan_dev.c index 55be90826f5f..529842677817 100644 --- a/net/8021q/vlan_dev.c +++ b/net/8021q/vlan_dev.c | |||
@@ -708,7 +708,8 @@ static int vlan_dev_init(struct net_device *dev) | |||
708 | netif_carrier_off(dev); | 708 | netif_carrier_off(dev); |
709 | 709 | ||
710 | /* IFF_BROADCAST|IFF_MULTICAST; ??? */ | 710 | /* IFF_BROADCAST|IFF_MULTICAST; ??? */ |
711 | dev->flags = real_dev->flags & ~(IFF_UP | IFF_PROMISC | IFF_ALLMULTI); | 711 | dev->flags = real_dev->flags & ~(IFF_UP | IFF_PROMISC | IFF_ALLMULTI | |
712 | IFF_MASTER | IFF_SLAVE); | ||
712 | dev->iflink = real_dev->ifindex; | 713 | dev->iflink = real_dev->ifindex; |
713 | dev->state = (real_dev->state & ((1<<__LINK_STATE_NOCARRIER) | | 714 | dev->state = (real_dev->state & ((1<<__LINK_STATE_NOCARRIER) | |
714 | (1<<__LINK_STATE_DORMANT))) | | 715 | (1<<__LINK_STATE_DORMANT))) | |
diff --git a/net/9p/client.c b/net/9p/client.c index 0aa79faa9850..37c8da07a80b 100644 --- a/net/9p/client.c +++ b/net/9p/client.c | |||
@@ -1321,7 +1321,8 @@ static int p9_client_statsize(struct p9_wstat *wst, int proto_version) | |||
1321 | if (wst->muid) | 1321 | if (wst->muid) |
1322 | ret += strlen(wst->muid); | 1322 | ret += strlen(wst->muid); |
1323 | 1323 | ||
1324 | if (proto_version == p9_proto_2000u) { | 1324 | if ((proto_version == p9_proto_2000u) || |
1325 | (proto_version == p9_proto_2000L)) { | ||
1325 | ret += 2+4+4+4; /* extension[s] n_uid[4] n_gid[4] n_muid[4] */ | 1326 | ret += 2+4+4+4; /* extension[s] n_uid[4] n_gid[4] n_muid[4] */ |
1326 | if (wst->extension) | 1327 | if (wst->extension) |
1327 | ret += strlen(wst->extension); | 1328 | ret += strlen(wst->extension); |
@@ -1364,3 +1365,70 @@ error: | |||
1364 | return err; | 1365 | return err; |
1365 | } | 1366 | } |
1366 | EXPORT_SYMBOL(p9_client_wstat); | 1367 | EXPORT_SYMBOL(p9_client_wstat); |
1368 | |||
1369 | int p9_client_statfs(struct p9_fid *fid, struct p9_rstatfs *sb) | ||
1370 | { | ||
1371 | int err; | ||
1372 | struct p9_req_t *req; | ||
1373 | struct p9_client *clnt; | ||
1374 | |||
1375 | err = 0; | ||
1376 | clnt = fid->clnt; | ||
1377 | |||
1378 | P9_DPRINTK(P9_DEBUG_9P, ">>> TSTATFS fid %d\n", fid->fid); | ||
1379 | |||
1380 | req = p9_client_rpc(clnt, P9_TSTATFS, "d", fid->fid); | ||
1381 | if (IS_ERR(req)) { | ||
1382 | err = PTR_ERR(req); | ||
1383 | goto error; | ||
1384 | } | ||
1385 | |||
1386 | err = p9pdu_readf(req->rc, clnt->proto_version, "ddqqqqqqd", &sb->type, | ||
1387 | &sb->bsize, &sb->blocks, &sb->bfree, &sb->bavail, | ||
1388 | &sb->files, &sb->ffree, &sb->fsid, &sb->namelen); | ||
1389 | if (err) { | ||
1390 | p9pdu_dump(1, req->rc); | ||
1391 | p9_free_req(clnt, req); | ||
1392 | goto error; | ||
1393 | } | ||
1394 | |||
1395 | P9_DPRINTK(P9_DEBUG_9P, "<<< RSTATFS fid %d type 0x%lx bsize %ld " | ||
1396 | "blocks %llu bfree %llu bavail %llu files %llu ffree %llu " | ||
1397 | "fsid %llu namelen %ld\n", | ||
1398 | fid->fid, (long unsigned int)sb->type, (long int)sb->bsize, | ||
1399 | sb->blocks, sb->bfree, sb->bavail, sb->files, sb->ffree, | ||
1400 | sb->fsid, (long int)sb->namelen); | ||
1401 | |||
1402 | p9_free_req(clnt, req); | ||
1403 | error: | ||
1404 | return err; | ||
1405 | } | ||
1406 | EXPORT_SYMBOL(p9_client_statfs); | ||
1407 | |||
1408 | int p9_client_rename(struct p9_fid *fid, struct p9_fid *newdirfid, char *name) | ||
1409 | { | ||
1410 | int err; | ||
1411 | struct p9_req_t *req; | ||
1412 | struct p9_client *clnt; | ||
1413 | |||
1414 | err = 0; | ||
1415 | clnt = fid->clnt; | ||
1416 | |||
1417 | P9_DPRINTK(P9_DEBUG_9P, ">>> TRENAME fid %d newdirfid %d name %s\n", | ||
1418 | fid->fid, newdirfid->fid, name); | ||
1419 | |||
1420 | req = p9_client_rpc(clnt, P9_TRENAME, "dds", fid->fid, | ||
1421 | newdirfid->fid, name); | ||
1422 | if (IS_ERR(req)) { | ||
1423 | err = PTR_ERR(req); | ||
1424 | goto error; | ||
1425 | } | ||
1426 | |||
1427 | P9_DPRINTK(P9_DEBUG_9P, "<<< RRENAME fid %d\n", fid->fid); | ||
1428 | |||
1429 | p9_free_req(clnt, req); | ||
1430 | error: | ||
1431 | return err; | ||
1432 | } | ||
1433 | EXPORT_SYMBOL(p9_client_rename); | ||
1434 | |||
diff --git a/net/9p/protocol.c b/net/9p/protocol.c index e7541d5b0118..149f82160130 100644 --- a/net/9p/protocol.c +++ b/net/9p/protocol.c | |||
@@ -341,7 +341,8 @@ p9pdu_vreadf(struct p9_fcall *pdu, int proto_version, const char *fmt, | |||
341 | } | 341 | } |
342 | break; | 342 | break; |
343 | case '?': | 343 | case '?': |
344 | if (proto_version != p9_proto_2000u) | 344 | if ((proto_version != p9_proto_2000u) && |
345 | (proto_version != p9_proto_2000L)) | ||
345 | return 0; | 346 | return 0; |
346 | break; | 347 | break; |
347 | default: | 348 | default: |
@@ -393,7 +394,7 @@ p9pdu_vwritef(struct p9_fcall *pdu, int proto_version, const char *fmt, | |||
393 | const char *sptr = va_arg(ap, const char *); | 394 | const char *sptr = va_arg(ap, const char *); |
394 | int16_t len = 0; | 395 | int16_t len = 0; |
395 | if (sptr) | 396 | if (sptr) |
396 | len = MIN(strlen(sptr), USHORT_MAX); | 397 | len = MIN(strlen(sptr), USHRT_MAX); |
397 | 398 | ||
398 | errcode = p9pdu_writef(pdu, proto_version, | 399 | errcode = p9pdu_writef(pdu, proto_version, |
399 | "w", len); | 400 | "w", len); |
@@ -488,7 +489,8 @@ p9pdu_vwritef(struct p9_fcall *pdu, int proto_version, const char *fmt, | |||
488 | } | 489 | } |
489 | break; | 490 | break; |
490 | case '?': | 491 | case '?': |
491 | if (proto_version != p9_proto_2000u) | 492 | if ((proto_version != p9_proto_2000u) && |
493 | (proto_version != p9_proto_2000L)) | ||
492 | return 0; | 494 | return 0; |
493 | break; | 495 | break; |
494 | default: | 496 | default: |
diff --git a/net/9p/trans_virtio.c b/net/9p/trans_virtio.c index 7eb78ecc1618..dcfbe99ff81c 100644 --- a/net/9p/trans_virtio.c +++ b/net/9p/trans_virtio.c | |||
@@ -137,7 +137,7 @@ static void req_done(struct virtqueue *vq) | |||
137 | 137 | ||
138 | P9_DPRINTK(P9_DEBUG_TRANS, ": request done\n"); | 138 | P9_DPRINTK(P9_DEBUG_TRANS, ": request done\n"); |
139 | 139 | ||
140 | while ((rc = chan->vq->vq_ops->get_buf(chan->vq, &len)) != NULL) { | 140 | while ((rc = virtqueue_get_buf(chan->vq, &len)) != NULL) { |
141 | P9_DPRINTK(P9_DEBUG_TRANS, ": rc %p\n", rc); | 141 | P9_DPRINTK(P9_DEBUG_TRANS, ": rc %p\n", rc); |
142 | P9_DPRINTK(P9_DEBUG_TRANS, ": lookup tag %d\n", rc->tag); | 142 | P9_DPRINTK(P9_DEBUG_TRANS, ": lookup tag %d\n", rc->tag); |
143 | req = p9_tag_lookup(chan->client, rc->tag); | 143 | req = p9_tag_lookup(chan->client, rc->tag); |
@@ -209,13 +209,13 @@ p9_virtio_request(struct p9_client *client, struct p9_req_t *req) | |||
209 | 209 | ||
210 | req->status = REQ_STATUS_SENT; | 210 | req->status = REQ_STATUS_SENT; |
211 | 211 | ||
212 | if (chan->vq->vq_ops->add_buf(chan->vq, chan->sg, out, in, req->tc) < 0) { | 212 | if (virtqueue_add_buf(chan->vq, chan->sg, out, in, req->tc) < 0) { |
213 | P9_DPRINTK(P9_DEBUG_TRANS, | 213 | P9_DPRINTK(P9_DEBUG_TRANS, |
214 | "9p debug: virtio rpc add_buf returned failure"); | 214 | "9p debug: virtio rpc add_buf returned failure"); |
215 | return -EIO; | 215 | return -EIO; |
216 | } | 216 | } |
217 | 217 | ||
218 | chan->vq->vq_ops->kick(chan->vq); | 218 | virtqueue_kick(chan->vq); |
219 | 219 | ||
220 | P9_DPRINTK(P9_DEBUG_TRANS, "9p debug: virtio request kicked\n"); | 220 | P9_DPRINTK(P9_DEBUG_TRANS, "9p debug: virtio request kicked\n"); |
221 | return 0; | 221 | return 0; |
diff --git a/net/caif/Kconfig b/net/caif/Kconfig index cd1daf6008bd..ed651786f16b 100644 --- a/net/caif/Kconfig +++ b/net/caif/Kconfig | |||
@@ -2,10 +2,8 @@ | |||
2 | # CAIF net configurations | 2 | # CAIF net configurations |
3 | # | 3 | # |
4 | 4 | ||
5 | #menu "CAIF Support" | ||
6 | comment "CAIF Support" | ||
7 | menuconfig CAIF | 5 | menuconfig CAIF |
8 | tristate "Enable CAIF support" | 6 | tristate "CAIF support" |
9 | select CRC_CCITT | 7 | select CRC_CCITT |
10 | default n | 8 | default n |
11 | ---help--- | 9 | ---help--- |
@@ -45,4 +43,3 @@ config CAIF_NETDEV | |||
45 | If unsure say Y. | 43 | If unsure say Y. |
46 | 44 | ||
47 | endif | 45 | endif |
48 | #endmenu | ||
diff --git a/net/caif/caif_socket.c b/net/caif/caif_socket.c index c3a70c5c893a..3d0e09584fae 100644 --- a/net/caif/caif_socket.c +++ b/net/caif/caif_socket.c | |||
@@ -60,7 +60,7 @@ struct debug_fs_counter { | |||
60 | atomic_t num_rx_flow_off; | 60 | atomic_t num_rx_flow_off; |
61 | atomic_t num_rx_flow_on; | 61 | atomic_t num_rx_flow_on; |
62 | }; | 62 | }; |
63 | struct debug_fs_counter cnt; | 63 | static struct debug_fs_counter cnt; |
64 | #define dbfs_atomic_inc(v) atomic_inc(v) | 64 | #define dbfs_atomic_inc(v) atomic_inc(v) |
65 | #define dbfs_atomic_dec(v) atomic_dec(v) | 65 | #define dbfs_atomic_dec(v) atomic_dec(v) |
66 | #else | 66 | #else |
@@ -128,17 +128,17 @@ static void caif_read_unlock(struct sock *sk) | |||
128 | mutex_unlock(&cf_sk->readlock); | 128 | mutex_unlock(&cf_sk->readlock); |
129 | } | 129 | } |
130 | 130 | ||
131 | int sk_rcvbuf_lowwater(struct caifsock *cf_sk) | 131 | static int sk_rcvbuf_lowwater(struct caifsock *cf_sk) |
132 | { | 132 | { |
133 | /* A quarter of full buffer is used a low water mark */ | 133 | /* A quarter of full buffer is used a low water mark */ |
134 | return cf_sk->sk.sk_rcvbuf / 4; | 134 | return cf_sk->sk.sk_rcvbuf / 4; |
135 | } | 135 | } |
136 | 136 | ||
137 | void caif_flow_ctrl(struct sock *sk, int mode) | 137 | static void caif_flow_ctrl(struct sock *sk, int mode) |
138 | { | 138 | { |
139 | struct caifsock *cf_sk; | 139 | struct caifsock *cf_sk; |
140 | cf_sk = container_of(sk, struct caifsock, sk); | 140 | cf_sk = container_of(sk, struct caifsock, sk); |
141 | if (cf_sk->layer.dn) | 141 | if (cf_sk->layer.dn && cf_sk->layer.dn->modemcmd) |
142 | cf_sk->layer.dn->modemcmd(cf_sk->layer.dn, mode); | 142 | cf_sk->layer.dn->modemcmd(cf_sk->layer.dn, mode); |
143 | } | 143 | } |
144 | 144 | ||
@@ -146,7 +146,7 @@ void caif_flow_ctrl(struct sock *sk, int mode) | |||
146 | * Copied from sock.c:sock_queue_rcv_skb(), but changed so packets are | 146 | * Copied from sock.c:sock_queue_rcv_skb(), but changed so packets are |
147 | * not dropped, but CAIF is sending flow off instead. | 147 | * not dropped, but CAIF is sending flow off instead. |
148 | */ | 148 | */ |
149 | int caif_queue_rcv_skb(struct sock *sk, struct sk_buff *skb) | 149 | static int caif_queue_rcv_skb(struct sock *sk, struct sk_buff *skb) |
150 | { | 150 | { |
151 | int err; | 151 | int err; |
152 | int skb_len; | 152 | int skb_len; |
@@ -162,9 +162,8 @@ int caif_queue_rcv_skb(struct sock *sk, struct sk_buff *skb) | |||
162 | atomic_read(&cf_sk->sk.sk_rmem_alloc), | 162 | atomic_read(&cf_sk->sk.sk_rmem_alloc), |
163 | sk_rcvbuf_lowwater(cf_sk)); | 163 | sk_rcvbuf_lowwater(cf_sk)); |
164 | set_rx_flow_off(cf_sk); | 164 | set_rx_flow_off(cf_sk); |
165 | if (cf_sk->layer.dn) | 165 | dbfs_atomic_inc(&cnt.num_rx_flow_off); |
166 | cf_sk->layer.dn->modemcmd(cf_sk->layer.dn, | 166 | caif_flow_ctrl(sk, CAIF_MODEMCMD_FLOW_OFF_REQ); |
167 | CAIF_MODEMCMD_FLOW_OFF_REQ); | ||
168 | } | 167 | } |
169 | 168 | ||
170 | err = sk_filter(sk, skb); | 169 | err = sk_filter(sk, skb); |
@@ -175,9 +174,8 @@ int caif_queue_rcv_skb(struct sock *sk, struct sk_buff *skb) | |||
175 | trace_printk("CAIF: %s():" | 174 | trace_printk("CAIF: %s():" |
176 | " sending flow OFF due to rmem_schedule\n", | 175 | " sending flow OFF due to rmem_schedule\n", |
177 | __func__); | 176 | __func__); |
178 | if (cf_sk->layer.dn) | 177 | dbfs_atomic_inc(&cnt.num_rx_flow_off); |
179 | cf_sk->layer.dn->modemcmd(cf_sk->layer.dn, | 178 | caif_flow_ctrl(sk, CAIF_MODEMCMD_FLOW_OFF_REQ); |
180 | CAIF_MODEMCMD_FLOW_OFF_REQ); | ||
181 | } | 179 | } |
182 | skb->dev = NULL; | 180 | skb->dev = NULL; |
183 | skb_set_owner_r(skb, sk); | 181 | skb_set_owner_r(skb, sk); |
@@ -285,65 +283,51 @@ static void caif_check_flow_release(struct sock *sk) | |||
285 | { | 283 | { |
286 | struct caifsock *cf_sk = container_of(sk, struct caifsock, sk); | 284 | struct caifsock *cf_sk = container_of(sk, struct caifsock, sk); |
287 | 285 | ||
288 | if (cf_sk->layer.dn == NULL || cf_sk->layer.dn->modemcmd == NULL) | ||
289 | return; | ||
290 | if (rx_flow_is_on(cf_sk)) | 286 | if (rx_flow_is_on(cf_sk)) |
291 | return; | 287 | return; |
292 | 288 | ||
293 | if (atomic_read(&sk->sk_rmem_alloc) <= sk_rcvbuf_lowwater(cf_sk)) { | 289 | if (atomic_read(&sk->sk_rmem_alloc) <= sk_rcvbuf_lowwater(cf_sk)) { |
294 | dbfs_atomic_inc(&cnt.num_rx_flow_on); | 290 | dbfs_atomic_inc(&cnt.num_rx_flow_on); |
295 | set_rx_flow_on(cf_sk); | 291 | set_rx_flow_on(cf_sk); |
296 | cf_sk->layer.dn->modemcmd(cf_sk->layer.dn, | 292 | caif_flow_ctrl(sk, CAIF_MODEMCMD_FLOW_ON_REQ); |
297 | CAIF_MODEMCMD_FLOW_ON_REQ); | ||
298 | } | 293 | } |
299 | } | 294 | } |
295 | |||
300 | /* | 296 | /* |
301 | * Copied from sock.c:sock_queue_rcv_skb(), and added check that user buffer | 297 | * Copied from unix_dgram_recvmsg, but removed credit checks, |
302 | * has sufficient size. | 298 | * changed locking, address handling and added MSG_TRUNC. |
303 | */ | 299 | */ |
304 | |||
305 | static int caif_seqpkt_recvmsg(struct kiocb *iocb, struct socket *sock, | 300 | static int caif_seqpkt_recvmsg(struct kiocb *iocb, struct socket *sock, |
306 | struct msghdr *m, size_t buf_len, int flags) | 301 | struct msghdr *m, size_t len, int flags) |
307 | 302 | ||
308 | { | 303 | { |
309 | struct sock *sk = sock->sk; | 304 | struct sock *sk = sock->sk; |
310 | struct sk_buff *skb; | 305 | struct sk_buff *skb; |
311 | int ret = 0; | 306 | int ret; |
312 | int len; | 307 | int copylen; |
313 | 308 | ||
314 | if (unlikely(!buf_len)) | 309 | ret = -EOPNOTSUPP; |
315 | return -EINVAL; | 310 | if (m->msg_flags&MSG_OOB) |
311 | goto read_error; | ||
316 | 312 | ||
317 | skb = skb_recv_datagram(sk, flags, 0 , &ret); | 313 | skb = skb_recv_datagram(sk, flags, 0 , &ret); |
318 | if (!skb) | 314 | if (!skb) |
319 | goto read_error; | 315 | goto read_error; |
320 | 316 | copylen = skb->len; | |
321 | len = skb->len; | 317 | if (len < copylen) { |
322 | 318 | m->msg_flags |= MSG_TRUNC; | |
323 | if (skb && skb->len > buf_len && !(flags & MSG_PEEK)) { | 319 | copylen = len; |
324 | len = buf_len; | ||
325 | /* | ||
326 | * Push skb back on receive queue if buffer too small. | ||
327 | * This has a built-in race where multi-threaded receive | ||
328 | * may get packet in wrong order, but multiple read does | ||
329 | * not really guarantee ordered delivery anyway. | ||
330 | * Let's optimize for speed without taking locks. | ||
331 | */ | ||
332 | |||
333 | skb_queue_head(&sk->sk_receive_queue, skb); | ||
334 | ret = -EMSGSIZE; | ||
335 | goto read_error; | ||
336 | } | 320 | } |
337 | 321 | ||
338 | ret = skb_copy_datagram_iovec(skb, 0, m->msg_iov, len); | 322 | ret = skb_copy_datagram_iovec(skb, 0, m->msg_iov, copylen); |
339 | if (ret) | 323 | if (ret) |
340 | goto read_error; | 324 | goto out_free; |
341 | 325 | ||
326 | ret = (flags & MSG_TRUNC) ? skb->len : copylen; | ||
327 | out_free: | ||
342 | skb_free_datagram(sk, skb); | 328 | skb_free_datagram(sk, skb); |
343 | |||
344 | caif_check_flow_release(sk); | 329 | caif_check_flow_release(sk); |
345 | 330 | return ret; | |
346 | return len; | ||
347 | 331 | ||
348 | read_error: | 332 | read_error: |
349 | return ret; | 333 | return ret; |
@@ -920,17 +904,17 @@ wait_connect: | |||
920 | timeo = sock_sndtimeo(sk, flags & O_NONBLOCK); | 904 | timeo = sock_sndtimeo(sk, flags & O_NONBLOCK); |
921 | 905 | ||
922 | release_sock(sk); | 906 | release_sock(sk); |
923 | err = wait_event_interruptible_timeout(*sk_sleep(sk), | 907 | err = -ERESTARTSYS; |
908 | timeo = wait_event_interruptible_timeout(*sk_sleep(sk), | ||
924 | sk->sk_state != CAIF_CONNECTING, | 909 | sk->sk_state != CAIF_CONNECTING, |
925 | timeo); | 910 | timeo); |
926 | lock_sock(sk); | 911 | lock_sock(sk); |
927 | if (err < 0) | 912 | if (timeo < 0) |
928 | goto out; /* -ERESTARTSYS */ | 913 | goto out; /* -ERESTARTSYS */ |
929 | if (err == 0 && sk->sk_state != CAIF_CONNECTED) { | ||
930 | err = -ETIMEDOUT; | ||
931 | goto out; | ||
932 | } | ||
933 | 914 | ||
915 | err = -ETIMEDOUT; | ||
916 | if (timeo == 0 && sk->sk_state != CAIF_CONNECTED) | ||
917 | goto out; | ||
934 | if (sk->sk_state != CAIF_CONNECTED) { | 918 | if (sk->sk_state != CAIF_CONNECTED) { |
935 | sock->state = SS_UNCONNECTED; | 919 | sock->state = SS_UNCONNECTED; |
936 | err = sock_error(sk); | 920 | err = sock_error(sk); |
@@ -945,7 +929,6 @@ out: | |||
945 | return err; | 929 | return err; |
946 | } | 930 | } |
947 | 931 | ||
948 | |||
949 | /* | 932 | /* |
950 | * caif_release() - Disconnect a CAIF Socket | 933 | * caif_release() - Disconnect a CAIF Socket |
951 | * Copied and modified af_irda.c:irda_release(). | 934 | * Copied and modified af_irda.c:irda_release(). |
@@ -1019,10 +1002,6 @@ static unsigned int caif_poll(struct file *file, | |||
1019 | (sk->sk_shutdown & RCV_SHUTDOWN)) | 1002 | (sk->sk_shutdown & RCV_SHUTDOWN)) |
1020 | mask |= POLLIN | POLLRDNORM; | 1003 | mask |= POLLIN | POLLRDNORM; |
1021 | 1004 | ||
1022 | /* Connection-based need to check for termination and startup */ | ||
1023 | if (sk->sk_state == CAIF_DISCONNECTED) | ||
1024 | mask |= POLLHUP; | ||
1025 | |||
1026 | /* | 1005 | /* |
1027 | * we set writable also when the other side has shut down the | 1006 | * we set writable also when the other side has shut down the |
1028 | * connection. This prevents stuck sockets. | 1007 | * connection. This prevents stuck sockets. |
@@ -1194,7 +1173,7 @@ static struct net_proto_family caif_family_ops = { | |||
1194 | .owner = THIS_MODULE, | 1173 | .owner = THIS_MODULE, |
1195 | }; | 1174 | }; |
1196 | 1175 | ||
1197 | int af_caif_init(void) | 1176 | static int af_caif_init(void) |
1198 | { | 1177 | { |
1199 | int err = sock_register(&caif_family_ops); | 1178 | int err = sock_register(&caif_family_ops); |
1200 | if (!err) | 1179 | if (!err) |
diff --git a/net/caif/cfctrl.c b/net/caif/cfctrl.c index 0ffe1e1ce901..fcfda98a5e6d 100644 --- a/net/caif/cfctrl.c +++ b/net/caif/cfctrl.c | |||
@@ -44,13 +44,14 @@ struct cflayer *cfctrl_create(void) | |||
44 | dev_info.id = 0xff; | 44 | dev_info.id = 0xff; |
45 | memset(this, 0, sizeof(*this)); | 45 | memset(this, 0, sizeof(*this)); |
46 | cfsrvl_init(&this->serv, 0, &dev_info); | 46 | cfsrvl_init(&this->serv, 0, &dev_info); |
47 | spin_lock_init(&this->info_list_lock); | ||
48 | atomic_set(&this->req_seq_no, 1); | 47 | atomic_set(&this->req_seq_no, 1); |
49 | atomic_set(&this->rsp_seq_no, 1); | 48 | atomic_set(&this->rsp_seq_no, 1); |
50 | this->serv.layer.receive = cfctrl_recv; | 49 | this->serv.layer.receive = cfctrl_recv; |
51 | sprintf(this->serv.layer.name, "ctrl"); | 50 | sprintf(this->serv.layer.name, "ctrl"); |
52 | this->serv.layer.ctrlcmd = cfctrl_ctrlcmd; | 51 | this->serv.layer.ctrlcmd = cfctrl_ctrlcmd; |
53 | spin_lock_init(&this->loop_linkid_lock); | 52 | spin_lock_init(&this->loop_linkid_lock); |
53 | spin_lock_init(&this->info_list_lock); | ||
54 | INIT_LIST_HEAD(&this->list); | ||
54 | this->loop_linkid = 1; | 55 | this->loop_linkid = 1; |
55 | return &this->serv.layer; | 56 | return &this->serv.layer; |
56 | } | 57 | } |
@@ -112,20 +113,10 @@ bool cfctrl_req_eq(struct cfctrl_request_info *r1, | |||
112 | void cfctrl_insert_req(struct cfctrl *ctrl, | 113 | void cfctrl_insert_req(struct cfctrl *ctrl, |
113 | struct cfctrl_request_info *req) | 114 | struct cfctrl_request_info *req) |
114 | { | 115 | { |
115 | struct cfctrl_request_info *p; | ||
116 | spin_lock(&ctrl->info_list_lock); | 116 | spin_lock(&ctrl->info_list_lock); |
117 | req->next = NULL; | ||
118 | atomic_inc(&ctrl->req_seq_no); | 117 | atomic_inc(&ctrl->req_seq_no); |
119 | req->sequence_no = atomic_read(&ctrl->req_seq_no); | 118 | req->sequence_no = atomic_read(&ctrl->req_seq_no); |
120 | if (ctrl->first_req == NULL) { | 119 | list_add_tail(&req->list, &ctrl->list); |
121 | ctrl->first_req = req; | ||
122 | spin_unlock(&ctrl->info_list_lock); | ||
123 | return; | ||
124 | } | ||
125 | p = ctrl->first_req; | ||
126 | while (p->next != NULL) | ||
127 | p = p->next; | ||
128 | p->next = req; | ||
129 | spin_unlock(&ctrl->info_list_lock); | 120 | spin_unlock(&ctrl->info_list_lock); |
130 | } | 121 | } |
131 | 122 | ||
@@ -133,46 +124,28 @@ void cfctrl_insert_req(struct cfctrl *ctrl, | |||
133 | struct cfctrl_request_info *cfctrl_remove_req(struct cfctrl *ctrl, | 124 | struct cfctrl_request_info *cfctrl_remove_req(struct cfctrl *ctrl, |
134 | struct cfctrl_request_info *req) | 125 | struct cfctrl_request_info *req) |
135 | { | 126 | { |
136 | struct cfctrl_request_info *p; | 127 | struct cfctrl_request_info *p, *tmp, *first; |
137 | struct cfctrl_request_info *ret; | ||
138 | 128 | ||
139 | spin_lock(&ctrl->info_list_lock); | 129 | spin_lock(&ctrl->info_list_lock); |
140 | if (ctrl->first_req == NULL) { | 130 | first = list_first_entry(&ctrl->list, struct cfctrl_request_info, list); |
141 | spin_unlock(&ctrl->info_list_lock); | ||
142 | return NULL; | ||
143 | } | ||
144 | |||
145 | if (cfctrl_req_eq(req, ctrl->first_req)) { | ||
146 | ret = ctrl->first_req; | ||
147 | caif_assert(ctrl->first_req); | ||
148 | atomic_set(&ctrl->rsp_seq_no, | ||
149 | ctrl->first_req->sequence_no); | ||
150 | ctrl->first_req = ctrl->first_req->next; | ||
151 | spin_unlock(&ctrl->info_list_lock); | ||
152 | return ret; | ||
153 | } | ||
154 | 131 | ||
155 | p = ctrl->first_req; | 132 | list_for_each_entry_safe(p, tmp, &ctrl->list, list) { |
156 | 133 | if (cfctrl_req_eq(req, p)) { | |
157 | while (p->next != NULL) { | 134 | if (p != first) |
158 | if (cfctrl_req_eq(req, p->next)) { | 135 | pr_warning("CAIF: %s(): Requests are not " |
159 | pr_warning("CAIF: %s(): Requests are not " | ||
160 | "received in order\n", | 136 | "received in order\n", |
161 | __func__); | 137 | __func__); |
162 | ret = p->next; | 138 | |
163 | atomic_set(&ctrl->rsp_seq_no, | 139 | atomic_set(&ctrl->rsp_seq_no, |
164 | p->next->sequence_no); | 140 | p->sequence_no); |
165 | p->next = p->next->next; | 141 | list_del(&p->list); |
166 | spin_unlock(&ctrl->info_list_lock); | 142 | goto out; |
167 | return ret; | ||
168 | } | 143 | } |
169 | p = p->next; | ||
170 | } | 144 | } |
145 | p = NULL; | ||
146 | out: | ||
171 | spin_unlock(&ctrl->info_list_lock); | 147 | spin_unlock(&ctrl->info_list_lock); |
172 | 148 | return p; | |
173 | pr_warning("CAIF: %s(): Request does not match\n", | ||
174 | __func__); | ||
175 | return NULL; | ||
176 | } | 149 | } |
177 | 150 | ||
178 | struct cfctrl_rsp *cfctrl_get_respfuncs(struct cflayer *layer) | 151 | struct cfctrl_rsp *cfctrl_get_respfuncs(struct cflayer *layer) |
@@ -388,31 +361,18 @@ void cfctrl_getstartreason_req(struct cflayer *layer) | |||
388 | 361 | ||
389 | void cfctrl_cancel_req(struct cflayer *layr, struct cflayer *adap_layer) | 362 | void cfctrl_cancel_req(struct cflayer *layr, struct cflayer *adap_layer) |
390 | { | 363 | { |
391 | struct cfctrl_request_info *p, *req; | 364 | struct cfctrl_request_info *p, *tmp; |
392 | struct cfctrl *ctrl = container_obj(layr); | 365 | struct cfctrl *ctrl = container_obj(layr); |
393 | spin_lock(&ctrl->info_list_lock); | 366 | spin_lock(&ctrl->info_list_lock); |
394 | 367 | pr_warning("CAIF: %s(): enter\n", __func__); | |
395 | if (ctrl->first_req == NULL) { | 368 | |
396 | spin_unlock(&ctrl->info_list_lock); | 369 | list_for_each_entry_safe(p, tmp, &ctrl->list, list) { |
397 | return; | 370 | if (p->client_layer == adap_layer) { |
398 | } | 371 | pr_warning("CAIF: %s(): cancel req :%d\n", __func__, |
399 | 372 | p->sequence_no); | |
400 | if (ctrl->first_req->client_layer == adap_layer) { | 373 | list_del(&p->list); |
401 | 374 | kfree(p); | |
402 | req = ctrl->first_req; | ||
403 | ctrl->first_req = ctrl->first_req->next; | ||
404 | kfree(req); | ||
405 | } | ||
406 | |||
407 | p = ctrl->first_req; | ||
408 | while (p != NULL && p->next != NULL) { | ||
409 | if (p->next->client_layer == adap_layer) { | ||
410 | |||
411 | req = p->next; | ||
412 | p->next = p->next->next; | ||
413 | kfree(p->next); | ||
414 | } | 375 | } |
415 | p = p->next; | ||
416 | } | 376 | } |
417 | 377 | ||
418 | spin_unlock(&ctrl->info_list_lock); | 378 | spin_unlock(&ctrl->info_list_lock); |
@@ -634,7 +594,7 @@ static void cfctrl_ctrlcmd(struct cflayer *layr, enum caif_ctrlcmd ctrl, | |||
634 | case _CAIF_CTRLCMD_PHYIF_FLOW_OFF_IND: | 594 | case _CAIF_CTRLCMD_PHYIF_FLOW_OFF_IND: |
635 | case CAIF_CTRLCMD_FLOW_OFF_IND: | 595 | case CAIF_CTRLCMD_FLOW_OFF_IND: |
636 | spin_lock(&this->info_list_lock); | 596 | spin_lock(&this->info_list_lock); |
637 | if (this->first_req != NULL) { | 597 | if (!list_empty(&this->list)) { |
638 | pr_debug("CAIF: %s(): Received flow off in " | 598 | pr_debug("CAIF: %s(): Received flow off in " |
639 | "control layer", __func__); | 599 | "control layer", __func__); |
640 | } | 600 | } |
diff --git a/net/caif/cfmuxl.c b/net/caif/cfmuxl.c index 7372f27f1d32..80c8d332b258 100644 --- a/net/caif/cfmuxl.c +++ b/net/caif/cfmuxl.c | |||
@@ -174,10 +174,11 @@ struct cflayer *cfmuxl_remove_uplayer(struct cflayer *layr, u8 id) | |||
174 | spin_lock(&muxl->receive_lock); | 174 | spin_lock(&muxl->receive_lock); |
175 | up = get_up(muxl, id); | 175 | up = get_up(muxl, id); |
176 | if (up == NULL) | 176 | if (up == NULL) |
177 | return NULL; | 177 | goto out; |
178 | memset(muxl->up_cache, 0, sizeof(muxl->up_cache)); | 178 | memset(muxl->up_cache, 0, sizeof(muxl->up_cache)); |
179 | list_del(&up->node); | 179 | list_del(&up->node); |
180 | cfsrvl_put(up); | 180 | cfsrvl_put(up); |
181 | out: | ||
181 | spin_unlock(&muxl->receive_lock); | 182 | spin_unlock(&muxl->receive_lock); |
182 | return up; | 183 | return up; |
183 | } | 184 | } |
diff --git a/net/caif/cfpkt_skbuff.c b/net/caif/cfpkt_skbuff.c index 83fff2ff6658..a6fdf899741a 100644 --- a/net/caif/cfpkt_skbuff.c +++ b/net/caif/cfpkt_skbuff.c | |||
@@ -238,6 +238,7 @@ int cfpkt_add_head(struct cfpkt *pkt, const void *data2, u16 len) | |||
238 | struct sk_buff *lastskb; | 238 | struct sk_buff *lastskb; |
239 | u8 *to; | 239 | u8 *to; |
240 | const u8 *data = data2; | 240 | const u8 *data = data2; |
241 | int ret; | ||
241 | if (unlikely(is_erronous(pkt))) | 242 | if (unlikely(is_erronous(pkt))) |
242 | return -EPROTO; | 243 | return -EPROTO; |
243 | if (unlikely(skb_headroom(skb) < len)) { | 244 | if (unlikely(skb_headroom(skb) < len)) { |
@@ -246,9 +247,10 @@ int cfpkt_add_head(struct cfpkt *pkt, const void *data2, u16 len) | |||
246 | } | 247 | } |
247 | 248 | ||
248 | /* Make sure data is writable */ | 249 | /* Make sure data is writable */ |
249 | if (unlikely(skb_cow_data(skb, 0, &lastskb) < 0)) { | 250 | ret = skb_cow_data(skb, 0, &lastskb); |
251 | if (unlikely(ret < 0)) { | ||
250 | PKT_ERROR(pkt, "cfpkt_add_head: cow failed\n"); | 252 | PKT_ERROR(pkt, "cfpkt_add_head: cow failed\n"); |
251 | return -EPROTO; | 253 | return ret; |
252 | } | 254 | } |
253 | 255 | ||
254 | to = skb_push(skb, len); | 256 | to = skb_push(skb, len); |
@@ -316,6 +318,8 @@ EXPORT_SYMBOL(cfpkt_setlen); | |||
316 | struct cfpkt *cfpkt_create_uplink(const unsigned char *data, unsigned int len) | 318 | struct cfpkt *cfpkt_create_uplink(const unsigned char *data, unsigned int len) |
317 | { | 319 | { |
318 | struct cfpkt *pkt = cfpkt_create_pfx(len + PKT_POSTFIX, PKT_PREFIX); | 320 | struct cfpkt *pkt = cfpkt_create_pfx(len + PKT_POSTFIX, PKT_PREFIX); |
321 | if (!pkt) | ||
322 | return NULL; | ||
319 | if (unlikely(data != NULL)) | 323 | if (unlikely(data != NULL)) |
320 | cfpkt_add_body(pkt, data, len); | 324 | cfpkt_add_body(pkt, data, len); |
321 | return pkt; | 325 | return pkt; |
@@ -344,12 +348,13 @@ struct cfpkt *cfpkt_append(struct cfpkt *dstpkt, | |||
344 | 348 | ||
345 | if (dst->tail + neededtailspace > dst->end) { | 349 | if (dst->tail + neededtailspace > dst->end) { |
346 | /* Create a dumplicate of 'dst' with more tail space */ | 350 | /* Create a dumplicate of 'dst' with more tail space */ |
351 | struct cfpkt *tmppkt; | ||
347 | dstlen = skb_headlen(dst); | 352 | dstlen = skb_headlen(dst); |
348 | createlen = dstlen + neededtailspace; | 353 | createlen = dstlen + neededtailspace; |
349 | tmp = pkt_to_skb( | 354 | tmppkt = cfpkt_create(createlen + PKT_PREFIX + PKT_POSTFIX); |
350 | cfpkt_create(createlen + PKT_PREFIX + PKT_POSTFIX)); | 355 | if (tmppkt == NULL) |
351 | if (!tmp) | ||
352 | return NULL; | 356 | return NULL; |
357 | tmp = pkt_to_skb(tmppkt); | ||
353 | skb_set_tail_pointer(tmp, dstlen); | 358 | skb_set_tail_pointer(tmp, dstlen); |
354 | tmp->len = dstlen; | 359 | tmp->len = dstlen; |
355 | memcpy(tmp->data, dst->data, dstlen); | 360 | memcpy(tmp->data, dst->data, dstlen); |
@@ -368,6 +373,7 @@ struct cfpkt *cfpkt_split(struct cfpkt *pkt, u16 pos) | |||
368 | { | 373 | { |
369 | struct sk_buff *skb2; | 374 | struct sk_buff *skb2; |
370 | struct sk_buff *skb = pkt_to_skb(pkt); | 375 | struct sk_buff *skb = pkt_to_skb(pkt); |
376 | struct cfpkt *tmppkt; | ||
371 | u8 *split = skb->data + pos; | 377 | u8 *split = skb->data + pos; |
372 | u16 len2nd = skb_tail_pointer(skb) - split; | 378 | u16 len2nd = skb_tail_pointer(skb) - split; |
373 | 379 | ||
@@ -381,9 +387,12 @@ struct cfpkt *cfpkt_split(struct cfpkt *pkt, u16 pos) | |||
381 | } | 387 | } |
382 | 388 | ||
383 | /* Create a new packet for the second part of the data */ | 389 | /* Create a new packet for the second part of the data */ |
384 | skb2 = pkt_to_skb( | 390 | tmppkt = cfpkt_create_pfx(len2nd + PKT_PREFIX + PKT_POSTFIX, |
385 | cfpkt_create_pfx(len2nd + PKT_PREFIX + PKT_POSTFIX, | 391 | PKT_PREFIX); |
386 | PKT_PREFIX)); | 392 | if (tmppkt == NULL) |
393 | return NULL; | ||
394 | skb2 = pkt_to_skb(tmppkt); | ||
395 | |||
387 | 396 | ||
388 | if (skb2 == NULL) | 397 | if (skb2 == NULL) |
389 | return NULL; | 398 | return NULL; |
diff --git a/net/caif/cfrfml.c b/net/caif/cfrfml.c index cd2830fec935..fd27b172fb5d 100644 --- a/net/caif/cfrfml.c +++ b/net/caif/cfrfml.c | |||
@@ -83,7 +83,7 @@ static int cfrfml_transmit(struct cflayer *layr, struct cfpkt *pkt) | |||
83 | if (!cfsrvl_ready(service, &ret)) | 83 | if (!cfsrvl_ready(service, &ret)) |
84 | return ret; | 84 | return ret; |
85 | 85 | ||
86 | if (!cfpkt_getlen(pkt) > CAIF_MAX_PAYLOAD_SIZE) { | 86 | if (cfpkt_getlen(pkt) > CAIF_MAX_PAYLOAD_SIZE) { |
87 | pr_err("CAIF: %s():Packet too large - size=%d\n", | 87 | pr_err("CAIF: %s():Packet too large - size=%d\n", |
88 | __func__, cfpkt_getlen(pkt)); | 88 | __func__, cfpkt_getlen(pkt)); |
89 | return -EOVERFLOW; | 89 | return -EOVERFLOW; |
diff --git a/net/caif/cfserl.c b/net/caif/cfserl.c index 06029ea2da2f..965c5baace40 100644 --- a/net/caif/cfserl.c +++ b/net/caif/cfserl.c | |||
@@ -59,14 +59,18 @@ static int cfserl_receive(struct cflayer *l, struct cfpkt *newpkt) | |||
59 | u8 stx = CFSERL_STX; | 59 | u8 stx = CFSERL_STX; |
60 | int ret; | 60 | int ret; |
61 | u16 expectlen = 0; | 61 | u16 expectlen = 0; |
62 | |||
62 | caif_assert(newpkt != NULL); | 63 | caif_assert(newpkt != NULL); |
63 | spin_lock(&layr->sync); | 64 | spin_lock(&layr->sync); |
64 | 65 | ||
65 | if (layr->incomplete_frm != NULL) { | 66 | if (layr->incomplete_frm != NULL) { |
66 | |||
67 | layr->incomplete_frm = | 67 | layr->incomplete_frm = |
68 | cfpkt_append(layr->incomplete_frm, newpkt, expectlen); | 68 | cfpkt_append(layr->incomplete_frm, newpkt, expectlen); |
69 | pkt = layr->incomplete_frm; | 69 | pkt = layr->incomplete_frm; |
70 | if (pkt == NULL) { | ||
71 | spin_unlock(&layr->sync); | ||
72 | return -ENOMEM; | ||
73 | } | ||
70 | } else { | 74 | } else { |
71 | pkt = newpkt; | 75 | pkt = newpkt; |
72 | } | 76 | } |
@@ -154,7 +158,6 @@ static int cfserl_receive(struct cflayer *l, struct cfpkt *newpkt) | |||
154 | if (layr->usestx) { | 158 | if (layr->usestx) { |
155 | if (tail_pkt != NULL) | 159 | if (tail_pkt != NULL) |
156 | pkt = cfpkt_append(pkt, tail_pkt, 0); | 160 | pkt = cfpkt_append(pkt, tail_pkt, 0); |
157 | |||
158 | /* Start search for next STX if frame failed */ | 161 | /* Start search for next STX if frame failed */ |
159 | continue; | 162 | continue; |
160 | } else { | 163 | } else { |
diff --git a/net/caif/cfsrvl.c b/net/caif/cfsrvl.c index aff31f34528f..6e5b7079a684 100644 --- a/net/caif/cfsrvl.c +++ b/net/caif/cfsrvl.c | |||
@@ -123,6 +123,12 @@ static int cfservl_modemcmd(struct cflayer *layr, enum caif_modemcmd ctrl) | |||
123 | struct caif_payload_info *info; | 123 | struct caif_payload_info *info; |
124 | u8 flow_off = SRVL_FLOW_OFF; | 124 | u8 flow_off = SRVL_FLOW_OFF; |
125 | pkt = cfpkt_create(SRVL_CTRL_PKT_SIZE); | 125 | pkt = cfpkt_create(SRVL_CTRL_PKT_SIZE); |
126 | if (!pkt) { | ||
127 | pr_warning("CAIF: %s(): Out of memory\n", | ||
128 | __func__); | ||
129 | return -ENOMEM; | ||
130 | } | ||
131 | |||
126 | if (cfpkt_add_head(pkt, &flow_off, 1) < 0) { | 132 | if (cfpkt_add_head(pkt, &flow_off, 1) < 0) { |
127 | pr_err("CAIF: %s(): Packet is erroneous!\n", | 133 | pr_err("CAIF: %s(): Packet is erroneous!\n", |
128 | __func__); | 134 | __func__); |
diff --git a/net/caif/cfveil.c b/net/caif/cfveil.c index 0fd827f49491..e04f7d964e83 100644 --- a/net/caif/cfveil.c +++ b/net/caif/cfveil.c | |||
@@ -84,7 +84,7 @@ static int cfvei_transmit(struct cflayer *layr, struct cfpkt *pkt) | |||
84 | return ret; | 84 | return ret; |
85 | caif_assert(layr->dn != NULL); | 85 | caif_assert(layr->dn != NULL); |
86 | caif_assert(layr->dn->transmit != NULL); | 86 | caif_assert(layr->dn->transmit != NULL); |
87 | if (!cfpkt_getlen(pkt) > CAIF_MAX_PAYLOAD_SIZE) { | 87 | if (cfpkt_getlen(pkt) > CAIF_MAX_PAYLOAD_SIZE) { |
88 | pr_warning("CAIF: %s(): Packet too large - size=%d\n", | 88 | pr_warning("CAIF: %s(): Packet too large - size=%d\n", |
89 | __func__, cfpkt_getlen(pkt)); | 89 | __func__, cfpkt_getlen(pkt)); |
90 | return -EOVERFLOW; | 90 | return -EOVERFLOW; |
diff --git a/net/core/datagram.c b/net/core/datagram.c index e0097531417a..f5b6f43a4c2e 100644 --- a/net/core/datagram.c +++ b/net/core/datagram.c | |||
@@ -229,15 +229,17 @@ EXPORT_SYMBOL(skb_free_datagram); | |||
229 | 229 | ||
230 | void skb_free_datagram_locked(struct sock *sk, struct sk_buff *skb) | 230 | void skb_free_datagram_locked(struct sock *sk, struct sk_buff *skb) |
231 | { | 231 | { |
232 | bool slow; | ||
233 | |||
232 | if (likely(atomic_read(&skb->users) == 1)) | 234 | if (likely(atomic_read(&skb->users) == 1)) |
233 | smp_rmb(); | 235 | smp_rmb(); |
234 | else if (likely(!atomic_dec_and_test(&skb->users))) | 236 | else if (likely(!atomic_dec_and_test(&skb->users))) |
235 | return; | 237 | return; |
236 | 238 | ||
237 | lock_sock_bh(sk); | 239 | slow = lock_sock_fast(sk); |
238 | skb_orphan(skb); | 240 | skb_orphan(skb); |
239 | sk_mem_reclaim_partial(sk); | 241 | sk_mem_reclaim_partial(sk); |
240 | unlock_sock_bh(sk); | 242 | unlock_sock_fast(sk, slow); |
241 | 243 | ||
242 | /* skb is now orphaned, can be freed outside of locked section */ | 244 | /* skb is now orphaned, can be freed outside of locked section */ |
243 | __kfree_skb(skb); | 245 | __kfree_skb(skb); |
diff --git a/net/core/dev.c b/net/core/dev.c index d273e4e3ecdc..2b3bf53bc687 100644 --- a/net/core/dev.c +++ b/net/core/dev.c | |||
@@ -954,18 +954,22 @@ int dev_alloc_name(struct net_device *dev, const char *name) | |||
954 | } | 954 | } |
955 | EXPORT_SYMBOL(dev_alloc_name); | 955 | EXPORT_SYMBOL(dev_alloc_name); |
956 | 956 | ||
957 | static int dev_get_valid_name(struct net *net, const char *name, char *buf, | 957 | static int dev_get_valid_name(struct net_device *dev, const char *name, bool fmt) |
958 | bool fmt) | ||
959 | { | 958 | { |
959 | struct net *net; | ||
960 | |||
961 | BUG_ON(!dev_net(dev)); | ||
962 | net = dev_net(dev); | ||
963 | |||
960 | if (!dev_valid_name(name)) | 964 | if (!dev_valid_name(name)) |
961 | return -EINVAL; | 965 | return -EINVAL; |
962 | 966 | ||
963 | if (fmt && strchr(name, '%')) | 967 | if (fmt && strchr(name, '%')) |
964 | return __dev_alloc_name(net, name, buf); | 968 | return dev_alloc_name(dev, name); |
965 | else if (__dev_get_by_name(net, name)) | 969 | else if (__dev_get_by_name(net, name)) |
966 | return -EEXIST; | 970 | return -EEXIST; |
967 | else if (buf != name) | 971 | else if (dev->name != name) |
968 | strlcpy(buf, name, IFNAMSIZ); | 972 | strlcpy(dev->name, name, IFNAMSIZ); |
969 | 973 | ||
970 | return 0; | 974 | return 0; |
971 | } | 975 | } |
@@ -997,7 +1001,7 @@ int dev_change_name(struct net_device *dev, const char *newname) | |||
997 | 1001 | ||
998 | memcpy(oldname, dev->name, IFNAMSIZ); | 1002 | memcpy(oldname, dev->name, IFNAMSIZ); |
999 | 1003 | ||
1000 | err = dev_get_valid_name(net, newname, dev->name, 1); | 1004 | err = dev_get_valid_name(dev, newname, 1); |
1001 | if (err < 0) | 1005 | if (err < 0) |
1002 | return err; | 1006 | return err; |
1003 | 1007 | ||
@@ -2249,11 +2253,9 @@ static int get_rps_cpu(struct net_device *dev, struct sk_buff *skb, | |||
2249 | if (skb_rx_queue_recorded(skb)) { | 2253 | if (skb_rx_queue_recorded(skb)) { |
2250 | u16 index = skb_get_rx_queue(skb); | 2254 | u16 index = skb_get_rx_queue(skb); |
2251 | if (unlikely(index >= dev->num_rx_queues)) { | 2255 | if (unlikely(index >= dev->num_rx_queues)) { |
2252 | if (net_ratelimit()) { | 2256 | WARN_ONCE(dev->num_rx_queues > 1, "%s received packet " |
2253 | pr_warning("%s received packet on queue " | 2257 | "on queue %u, but number of RX queues is %u\n", |
2254 | "%u, but number of RX queues is %u\n", | 2258 | dev->name, index, dev->num_rx_queues); |
2255 | dev->name, index, dev->num_rx_queues); | ||
2256 | } | ||
2257 | goto done; | 2259 | goto done; |
2258 | } | 2260 | } |
2259 | rxqueue = dev->_rx + index; | 2261 | rxqueue = dev->_rx + index; |
@@ -2421,10 +2423,7 @@ static int enqueue_to_backlog(struct sk_buff *skb, int cpu, | |||
2421 | if (skb_queue_len(&sd->input_pkt_queue)) { | 2423 | if (skb_queue_len(&sd->input_pkt_queue)) { |
2422 | enqueue: | 2424 | enqueue: |
2423 | __skb_queue_tail(&sd->input_pkt_queue, skb); | 2425 | __skb_queue_tail(&sd->input_pkt_queue, skb); |
2424 | #ifdef CONFIG_RPS | 2426 | input_queue_tail_incr_save(sd, qtail); |
2425 | *qtail = sd->input_queue_head + | ||
2426 | skb_queue_len(&sd->input_pkt_queue); | ||
2427 | #endif | ||
2428 | rps_unlock(sd); | 2427 | rps_unlock(sd); |
2429 | local_irq_restore(flags); | 2428 | local_irq_restore(flags); |
2430 | return NET_RX_SUCCESS; | 2429 | return NET_RX_SUCCESS; |
@@ -2794,7 +2793,7 @@ static int __netif_receive_skb(struct sk_buff *skb) | |||
2794 | struct net_device *orig_dev; | 2793 | struct net_device *orig_dev; |
2795 | struct net_device *master; | 2794 | struct net_device *master; |
2796 | struct net_device *null_or_orig; | 2795 | struct net_device *null_or_orig; |
2797 | struct net_device *null_or_bond; | 2796 | struct net_device *orig_or_bond; |
2798 | int ret = NET_RX_DROP; | 2797 | int ret = NET_RX_DROP; |
2799 | __be16 type; | 2798 | __be16 type; |
2800 | 2799 | ||
@@ -2811,13 +2810,24 @@ static int __netif_receive_skb(struct sk_buff *skb) | |||
2811 | if (!skb->skb_iif) | 2810 | if (!skb->skb_iif) |
2812 | skb->skb_iif = skb->dev->ifindex; | 2811 | skb->skb_iif = skb->dev->ifindex; |
2813 | 2812 | ||
2813 | /* | ||
2814 | * bonding note: skbs received on inactive slaves should only | ||
2815 | * be delivered to pkt handlers that are exact matches. Also | ||
2816 | * the deliver_no_wcard flag will be set. If packet handlers | ||
2817 | * are sensitive to duplicate packets these skbs will need to | ||
2818 | * be dropped at the handler. The vlan accel path may have | ||
2819 | * already set the deliver_no_wcard flag. | ||
2820 | */ | ||
2814 | null_or_orig = NULL; | 2821 | null_or_orig = NULL; |
2815 | orig_dev = skb->dev; | 2822 | orig_dev = skb->dev; |
2816 | master = ACCESS_ONCE(orig_dev->master); | 2823 | master = ACCESS_ONCE(orig_dev->master); |
2817 | if (master) { | 2824 | if (skb->deliver_no_wcard) |
2818 | if (skb_bond_should_drop(skb, master)) | 2825 | null_or_orig = orig_dev; |
2826 | else if (master) { | ||
2827 | if (skb_bond_should_drop(skb, master)) { | ||
2828 | skb->deliver_no_wcard = 1; | ||
2819 | null_or_orig = orig_dev; /* deliver only exact match */ | 2829 | null_or_orig = orig_dev; /* deliver only exact match */ |
2820 | else | 2830 | } else |
2821 | skb->dev = master; | 2831 | skb->dev = master; |
2822 | } | 2832 | } |
2823 | 2833 | ||
@@ -2867,10 +2877,10 @@ ncls: | |||
2867 | * device that may have registered for a specific ptype. The | 2877 | * device that may have registered for a specific ptype. The |
2868 | * handler may have to adjust skb->dev and orig_dev. | 2878 | * handler may have to adjust skb->dev and orig_dev. |
2869 | */ | 2879 | */ |
2870 | null_or_bond = NULL; | 2880 | orig_or_bond = orig_dev; |
2871 | if ((skb->dev->priv_flags & IFF_802_1Q_VLAN) && | 2881 | if ((skb->dev->priv_flags & IFF_802_1Q_VLAN) && |
2872 | (vlan_dev_real_dev(skb->dev)->priv_flags & IFF_BONDING)) { | 2882 | (vlan_dev_real_dev(skb->dev)->priv_flags & IFF_BONDING)) { |
2873 | null_or_bond = vlan_dev_real_dev(skb->dev); | 2883 | orig_or_bond = vlan_dev_real_dev(skb->dev); |
2874 | } | 2884 | } |
2875 | 2885 | ||
2876 | type = skb->protocol; | 2886 | type = skb->protocol; |
@@ -2878,7 +2888,7 @@ ncls: | |||
2878 | &ptype_base[ntohs(type) & PTYPE_HASH_MASK], list) { | 2888 | &ptype_base[ntohs(type) & PTYPE_HASH_MASK], list) { |
2879 | if (ptype->type == type && (ptype->dev == null_or_orig || | 2889 | if (ptype->type == type && (ptype->dev == null_or_orig || |
2880 | ptype->dev == skb->dev || ptype->dev == orig_dev || | 2890 | ptype->dev == skb->dev || ptype->dev == orig_dev || |
2881 | ptype->dev == null_or_bond)) { | 2891 | ptype->dev == orig_or_bond)) { |
2882 | if (pt_prev) | 2892 | if (pt_prev) |
2883 | ret = deliver_skb(skb, pt_prev, orig_dev); | 2893 | ret = deliver_skb(skb, pt_prev, orig_dev); |
2884 | pt_prev = ptype; | 2894 | pt_prev = ptype; |
@@ -2959,7 +2969,7 @@ static void flush_backlog(void *arg) | |||
2959 | if (skb->dev == dev) { | 2969 | if (skb->dev == dev) { |
2960 | __skb_unlink(skb, &sd->input_pkt_queue); | 2970 | __skb_unlink(skb, &sd->input_pkt_queue); |
2961 | kfree_skb(skb); | 2971 | kfree_skb(skb); |
2962 | input_queue_head_add(sd, 1); | 2972 | input_queue_head_incr(sd); |
2963 | } | 2973 | } |
2964 | } | 2974 | } |
2965 | rps_unlock(sd); | 2975 | rps_unlock(sd); |
@@ -2968,6 +2978,7 @@ static void flush_backlog(void *arg) | |||
2968 | if (skb->dev == dev) { | 2978 | if (skb->dev == dev) { |
2969 | __skb_unlink(skb, &sd->process_queue); | 2979 | __skb_unlink(skb, &sd->process_queue); |
2970 | kfree_skb(skb); | 2980 | kfree_skb(skb); |
2981 | input_queue_head_incr(sd); | ||
2971 | } | 2982 | } |
2972 | } | 2983 | } |
2973 | } | 2984 | } |
@@ -3323,18 +3334,20 @@ static int process_backlog(struct napi_struct *napi, int quota) | |||
3323 | while ((skb = __skb_dequeue(&sd->process_queue))) { | 3334 | while ((skb = __skb_dequeue(&sd->process_queue))) { |
3324 | local_irq_enable(); | 3335 | local_irq_enable(); |
3325 | __netif_receive_skb(skb); | 3336 | __netif_receive_skb(skb); |
3326 | if (++work >= quota) | ||
3327 | return work; | ||
3328 | local_irq_disable(); | 3337 | local_irq_disable(); |
3338 | input_queue_head_incr(sd); | ||
3339 | if (++work >= quota) { | ||
3340 | local_irq_enable(); | ||
3341 | return work; | ||
3342 | } | ||
3329 | } | 3343 | } |
3330 | 3344 | ||
3331 | rps_lock(sd); | 3345 | rps_lock(sd); |
3332 | qlen = skb_queue_len(&sd->input_pkt_queue); | 3346 | qlen = skb_queue_len(&sd->input_pkt_queue); |
3333 | if (qlen) { | 3347 | if (qlen) |
3334 | input_queue_head_add(sd, qlen); | ||
3335 | skb_queue_splice_tail_init(&sd->input_pkt_queue, | 3348 | skb_queue_splice_tail_init(&sd->input_pkt_queue, |
3336 | &sd->process_queue); | 3349 | &sd->process_queue); |
3337 | } | 3350 | |
3338 | if (qlen < quota - work) { | 3351 | if (qlen < quota - work) { |
3339 | /* | 3352 | /* |
3340 | * Inline a custom version of __napi_complete(). | 3353 | * Inline a custom version of __napi_complete(). |
@@ -4960,7 +4973,7 @@ int register_netdevice(struct net_device *dev) | |||
4960 | } | 4973 | } |
4961 | } | 4974 | } |
4962 | 4975 | ||
4963 | ret = dev_get_valid_name(net, dev->name, dev->name, 0); | 4976 | ret = dev_get_valid_name(dev, dev->name, 0); |
4964 | if (ret) | 4977 | if (ret) |
4965 | goto err_uninit; | 4978 | goto err_uninit; |
4966 | 4979 | ||
@@ -5558,7 +5571,7 @@ int dev_change_net_namespace(struct net_device *dev, struct net *net, const char | |||
5558 | /* We get here if we can't use the current device name */ | 5571 | /* We get here if we can't use the current device name */ |
5559 | if (!pat) | 5572 | if (!pat) |
5560 | goto out; | 5573 | goto out; |
5561 | if (dev_get_valid_name(net, pat, dev->name, 1)) | 5574 | if (dev_get_valid_name(dev, pat, 1)) |
5562 | goto out; | 5575 | goto out; |
5563 | } | 5576 | } |
5564 | 5577 | ||
@@ -5661,12 +5674,14 @@ static int dev_cpu_callback(struct notifier_block *nfb, | |||
5661 | local_irq_enable(); | 5674 | local_irq_enable(); |
5662 | 5675 | ||
5663 | /* Process offline CPU's input_pkt_queue */ | 5676 | /* Process offline CPU's input_pkt_queue */ |
5664 | while ((skb = __skb_dequeue(&oldsd->input_pkt_queue))) { | 5677 | while ((skb = __skb_dequeue(&oldsd->process_queue))) { |
5665 | netif_rx(skb); | 5678 | netif_rx(skb); |
5666 | input_queue_head_add(oldsd, 1); | 5679 | input_queue_head_incr(oldsd); |
5667 | } | 5680 | } |
5668 | while ((skb = __skb_dequeue(&oldsd->process_queue))) | 5681 | while ((skb = __skb_dequeue(&oldsd->input_pkt_queue))) { |
5669 | netif_rx(skb); | 5682 | netif_rx(skb); |
5683 | input_queue_head_incr(oldsd); | ||
5684 | } | ||
5670 | 5685 | ||
5671 | return NOTIFY_OK; | 5686 | return NOTIFY_OK; |
5672 | } | 5687 | } |
diff --git a/net/core/drop_monitor.c b/net/core/drop_monitor.c index cf208d8042b1..ad41529fb60f 100644 --- a/net/core/drop_monitor.c +++ b/net/core/drop_monitor.c | |||
@@ -172,12 +172,12 @@ out: | |||
172 | return; | 172 | return; |
173 | } | 173 | } |
174 | 174 | ||
175 | static void trace_kfree_skb_hit(struct sk_buff *skb, void *location) | 175 | static void trace_kfree_skb_hit(void *ignore, struct sk_buff *skb, void *location) |
176 | { | 176 | { |
177 | trace_drop_common(skb, location); | 177 | trace_drop_common(skb, location); |
178 | } | 178 | } |
179 | 179 | ||
180 | static void trace_napi_poll_hit(struct napi_struct *napi) | 180 | static void trace_napi_poll_hit(void *ignore, struct napi_struct *napi) |
181 | { | 181 | { |
182 | struct dm_hw_stat_delta *new_stat; | 182 | struct dm_hw_stat_delta *new_stat; |
183 | 183 | ||
@@ -225,12 +225,12 @@ static int set_all_monitor_traces(int state) | |||
225 | 225 | ||
226 | switch (state) { | 226 | switch (state) { |
227 | case TRACE_ON: | 227 | case TRACE_ON: |
228 | rc |= register_trace_kfree_skb(trace_kfree_skb_hit); | 228 | rc |= register_trace_kfree_skb(trace_kfree_skb_hit, NULL); |
229 | rc |= register_trace_napi_poll(trace_napi_poll_hit); | 229 | rc |= register_trace_napi_poll(trace_napi_poll_hit, NULL); |
230 | break; | 230 | break; |
231 | case TRACE_OFF: | 231 | case TRACE_OFF: |
232 | rc |= unregister_trace_kfree_skb(trace_kfree_skb_hit); | 232 | rc |= unregister_trace_kfree_skb(trace_kfree_skb_hit, NULL); |
233 | rc |= unregister_trace_napi_poll(trace_napi_poll_hit); | 233 | rc |= unregister_trace_napi_poll(trace_napi_poll_hit, NULL); |
234 | 234 | ||
235 | tracepoint_synchronize_unregister(); | 235 | tracepoint_synchronize_unregister(); |
236 | 236 | ||
diff --git a/net/core/gen_estimator.c b/net/core/gen_estimator.c index cf8e70392fe0..785e5276a300 100644 --- a/net/core/gen_estimator.c +++ b/net/core/gen_estimator.c | |||
@@ -107,6 +107,7 @@ static DEFINE_RWLOCK(est_lock); | |||
107 | 107 | ||
108 | /* Protects against soft lockup during large deletion */ | 108 | /* Protects against soft lockup during large deletion */ |
109 | static struct rb_root est_root = RB_ROOT; | 109 | static struct rb_root est_root = RB_ROOT; |
110 | static DEFINE_SPINLOCK(est_tree_lock); | ||
110 | 111 | ||
111 | static void est_timer(unsigned long arg) | 112 | static void est_timer(unsigned long arg) |
112 | { | 113 | { |
@@ -201,7 +202,6 @@ struct gen_estimator *gen_find_node(const struct gnet_stats_basic_packed *bstats | |||
201 | * | 202 | * |
202 | * Returns 0 on success or a negative error code. | 203 | * Returns 0 on success or a negative error code. |
203 | * | 204 | * |
204 | * NOTE: Called under rtnl_mutex | ||
205 | */ | 205 | */ |
206 | int gen_new_estimator(struct gnet_stats_basic_packed *bstats, | 206 | int gen_new_estimator(struct gnet_stats_basic_packed *bstats, |
207 | struct gnet_stats_rate_est *rate_est, | 207 | struct gnet_stats_rate_est *rate_est, |
@@ -232,6 +232,7 @@ int gen_new_estimator(struct gnet_stats_basic_packed *bstats, | |||
232 | est->last_packets = bstats->packets; | 232 | est->last_packets = bstats->packets; |
233 | est->avpps = rate_est->pps<<10; | 233 | est->avpps = rate_est->pps<<10; |
234 | 234 | ||
235 | spin_lock(&est_tree_lock); | ||
235 | if (!elist[idx].timer.function) { | 236 | if (!elist[idx].timer.function) { |
236 | INIT_LIST_HEAD(&elist[idx].list); | 237 | INIT_LIST_HEAD(&elist[idx].list); |
237 | setup_timer(&elist[idx].timer, est_timer, idx); | 238 | setup_timer(&elist[idx].timer, est_timer, idx); |
@@ -242,6 +243,7 @@ int gen_new_estimator(struct gnet_stats_basic_packed *bstats, | |||
242 | 243 | ||
243 | list_add_rcu(&est->list, &elist[idx].list); | 244 | list_add_rcu(&est->list, &elist[idx].list); |
244 | gen_add_node(est); | 245 | gen_add_node(est); |
246 | spin_unlock(&est_tree_lock); | ||
245 | 247 | ||
246 | return 0; | 248 | return 0; |
247 | } | 249 | } |
@@ -261,13 +263,13 @@ static void __gen_kill_estimator(struct rcu_head *head) | |||
261 | * | 263 | * |
262 | * Removes the rate estimator specified by &bstats and &rate_est. | 264 | * Removes the rate estimator specified by &bstats and &rate_est. |
263 | * | 265 | * |
264 | * NOTE: Called under rtnl_mutex | ||
265 | */ | 266 | */ |
266 | void gen_kill_estimator(struct gnet_stats_basic_packed *bstats, | 267 | void gen_kill_estimator(struct gnet_stats_basic_packed *bstats, |
267 | struct gnet_stats_rate_est *rate_est) | 268 | struct gnet_stats_rate_est *rate_est) |
268 | { | 269 | { |
269 | struct gen_estimator *e; | 270 | struct gen_estimator *e; |
270 | 271 | ||
272 | spin_lock(&est_tree_lock); | ||
271 | while ((e = gen_find_node(bstats, rate_est))) { | 273 | while ((e = gen_find_node(bstats, rate_est))) { |
272 | rb_erase(&e->node, &est_root); | 274 | rb_erase(&e->node, &est_root); |
273 | 275 | ||
@@ -278,6 +280,7 @@ void gen_kill_estimator(struct gnet_stats_basic_packed *bstats, | |||
278 | list_del_rcu(&e->list); | 280 | list_del_rcu(&e->list); |
279 | call_rcu(&e->e_rcu, __gen_kill_estimator); | 281 | call_rcu(&e->e_rcu, __gen_kill_estimator); |
280 | } | 282 | } |
283 | spin_unlock(&est_tree_lock); | ||
281 | } | 284 | } |
282 | EXPORT_SYMBOL(gen_kill_estimator); | 285 | EXPORT_SYMBOL(gen_kill_estimator); |
283 | 286 | ||
@@ -312,8 +315,14 @@ EXPORT_SYMBOL(gen_replace_estimator); | |||
312 | bool gen_estimator_active(const struct gnet_stats_basic_packed *bstats, | 315 | bool gen_estimator_active(const struct gnet_stats_basic_packed *bstats, |
313 | const struct gnet_stats_rate_est *rate_est) | 316 | const struct gnet_stats_rate_est *rate_est) |
314 | { | 317 | { |
318 | bool res; | ||
319 | |||
315 | ASSERT_RTNL(); | 320 | ASSERT_RTNL(); |
316 | 321 | ||
317 | return gen_find_node(bstats, rate_est) != NULL; | 322 | spin_lock(&est_tree_lock); |
323 | res = gen_find_node(bstats, rate_est) != NULL; | ||
324 | spin_unlock(&est_tree_lock); | ||
325 | |||
326 | return res; | ||
318 | } | 327 | } |
319 | EXPORT_SYMBOL(gen_estimator_active); | 328 | EXPORT_SYMBOL(gen_estimator_active); |
diff --git a/net/core/neighbour.c b/net/core/neighbour.c index bff37908bd55..6ba1c0eece03 100644 --- a/net/core/neighbour.c +++ b/net/core/neighbour.c | |||
@@ -934,6 +934,7 @@ int __neigh_event_send(struct neighbour *neigh, struct sk_buff *skb) | |||
934 | kfree_skb(buff); | 934 | kfree_skb(buff); |
935 | NEIGH_CACHE_STAT_INC(neigh->tbl, unres_discards); | 935 | NEIGH_CACHE_STAT_INC(neigh->tbl, unres_discards); |
936 | } | 936 | } |
937 | skb_dst_force(skb); | ||
937 | __skb_queue_tail(&neigh->arp_queue, skb); | 938 | __skb_queue_tail(&neigh->arp_queue, skb); |
938 | } | 939 | } |
939 | rc = 1; | 940 | rc = 1; |
diff --git a/net/core/pktgen.c b/net/core/pktgen.c index 2ad68da418df..1dacd7ba8dbb 100644 --- a/net/core/pktgen.c +++ b/net/core/pktgen.c | |||
@@ -2170,7 +2170,7 @@ static void spin(struct pktgen_dev *pkt_dev, ktime_t spin_until) | |||
2170 | end_time = ktime_now(); | 2170 | end_time = ktime_now(); |
2171 | 2171 | ||
2172 | pkt_dev->idle_acc += ktime_to_ns(ktime_sub(end_time, start_time)); | 2172 | pkt_dev->idle_acc += ktime_to_ns(ktime_sub(end_time, start_time)); |
2173 | pkt_dev->next_tx = ktime_add_ns(end_time, pkt_dev->delay); | 2173 | pkt_dev->next_tx = ktime_add_ns(spin_until, pkt_dev->delay); |
2174 | } | 2174 | } |
2175 | 2175 | ||
2176 | static inline void set_pkt_overhead(struct pktgen_dev *pkt_dev) | 2176 | static inline void set_pkt_overhead(struct pktgen_dev *pkt_dev) |
diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c index e4b9870e4706..1a2af24e9e3d 100644 --- a/net/core/rtnetlink.c +++ b/net/core/rtnetlink.c | |||
@@ -650,11 +650,12 @@ static inline int rtnl_vfinfo_size(const struct net_device *dev) | |||
650 | if (dev->dev.parent && dev_is_pci(dev->dev.parent)) { | 650 | if (dev->dev.parent && dev_is_pci(dev->dev.parent)) { |
651 | 651 | ||
652 | int num_vfs = dev_num_vf(dev->dev.parent); | 652 | int num_vfs = dev_num_vf(dev->dev.parent); |
653 | size_t size = nlmsg_total_size(sizeof(struct nlattr)); | 653 | size_t size = nla_total_size(sizeof(struct nlattr)); |
654 | size += nlmsg_total_size(num_vfs * sizeof(struct nlattr)); | 654 | size += nla_total_size(num_vfs * sizeof(struct nlattr)); |
655 | size += num_vfs * (sizeof(struct ifla_vf_mac) + | 655 | size += num_vfs * |
656 | sizeof(struct ifla_vf_vlan) + | 656 | (nla_total_size(sizeof(struct ifla_vf_mac)) + |
657 | sizeof(struct ifla_vf_tx_rate)); | 657 | nla_total_size(sizeof(struct ifla_vf_vlan)) + |
658 | nla_total_size(sizeof(struct ifla_vf_tx_rate))); | ||
658 | return size; | 659 | return size; |
659 | } else | 660 | } else |
660 | return 0; | 661 | return 0; |
@@ -722,14 +723,13 @@ static int rtnl_vf_ports_fill(struct sk_buff *skb, struct net_device *dev) | |||
722 | 723 | ||
723 | for (vf = 0; vf < dev_num_vf(dev->dev.parent); vf++) { | 724 | for (vf = 0; vf < dev_num_vf(dev->dev.parent); vf++) { |
724 | vf_port = nla_nest_start(skb, IFLA_VF_PORT); | 725 | vf_port = nla_nest_start(skb, IFLA_VF_PORT); |
725 | if (!vf_port) { | 726 | if (!vf_port) |
726 | nla_nest_cancel(skb, vf_ports); | 727 | goto nla_put_failure; |
727 | return -EMSGSIZE; | ||
728 | } | ||
729 | NLA_PUT_U32(skb, IFLA_PORT_VF, vf); | 728 | NLA_PUT_U32(skb, IFLA_PORT_VF, vf); |
730 | err = dev->netdev_ops->ndo_get_vf_port(dev, vf, skb); | 729 | err = dev->netdev_ops->ndo_get_vf_port(dev, vf, skb); |
730 | if (err == -EMSGSIZE) | ||
731 | goto nla_put_failure; | ||
731 | if (err) { | 732 | if (err) { |
732 | nla_put_failure: | ||
733 | nla_nest_cancel(skb, vf_port); | 733 | nla_nest_cancel(skb, vf_port); |
734 | continue; | 734 | continue; |
735 | } | 735 | } |
@@ -739,6 +739,10 @@ nla_put_failure: | |||
739 | nla_nest_end(skb, vf_ports); | 739 | nla_nest_end(skb, vf_ports); |
740 | 740 | ||
741 | return 0; | 741 | return 0; |
742 | |||
743 | nla_put_failure: | ||
744 | nla_nest_cancel(skb, vf_ports); | ||
745 | return -EMSGSIZE; | ||
742 | } | 746 | } |
743 | 747 | ||
744 | static int rtnl_port_self_fill(struct sk_buff *skb, struct net_device *dev) | 748 | static int rtnl_port_self_fill(struct sk_buff *skb, struct net_device *dev) |
@@ -753,7 +757,7 @@ static int rtnl_port_self_fill(struct sk_buff *skb, struct net_device *dev) | |||
753 | err = dev->netdev_ops->ndo_get_vf_port(dev, PORT_SELF_VF, skb); | 757 | err = dev->netdev_ops->ndo_get_vf_port(dev, PORT_SELF_VF, skb); |
754 | if (err) { | 758 | if (err) { |
755 | nla_nest_cancel(skb, port_self); | 759 | nla_nest_cancel(skb, port_self); |
756 | return err; | 760 | return (err == -EMSGSIZE) ? err : 0; |
757 | } | 761 | } |
758 | 762 | ||
759 | nla_nest_end(skb, port_self); | 763 | nla_nest_end(skb, port_self); |
@@ -1199,8 +1203,10 @@ static int do_setlink(struct net_device *dev, struct ifinfomsg *ifm, | |||
1199 | struct nlattr *attr; | 1203 | struct nlattr *attr; |
1200 | int rem; | 1204 | int rem; |
1201 | nla_for_each_nested(attr, tb[IFLA_VFINFO_LIST], rem) { | 1205 | nla_for_each_nested(attr, tb[IFLA_VFINFO_LIST], rem) { |
1202 | if (nla_type(attr) != IFLA_VF_INFO) | 1206 | if (nla_type(attr) != IFLA_VF_INFO) { |
1207 | err = -EINVAL; | ||
1203 | goto errout; | 1208 | goto errout; |
1209 | } | ||
1204 | err = do_setvfinfo(dev, attr); | 1210 | err = do_setvfinfo(dev, attr); |
1205 | if (err < 0) | 1211 | if (err < 0) |
1206 | goto errout; | 1212 | goto errout; |
diff --git a/net/core/skbuff.c b/net/core/skbuff.c index c543dd252433..9f07e749d7b1 100644 --- a/net/core/skbuff.c +++ b/net/core/skbuff.c | |||
@@ -482,22 +482,22 @@ EXPORT_SYMBOL(consume_skb); | |||
482 | * reference count dropping and cleans up the skbuff as if it | 482 | * reference count dropping and cleans up the skbuff as if it |
483 | * just came from __alloc_skb(). | 483 | * just came from __alloc_skb(). |
484 | */ | 484 | */ |
485 | int skb_recycle_check(struct sk_buff *skb, int skb_size) | 485 | bool skb_recycle_check(struct sk_buff *skb, int skb_size) |
486 | { | 486 | { |
487 | struct skb_shared_info *shinfo; | 487 | struct skb_shared_info *shinfo; |
488 | 488 | ||
489 | if (irqs_disabled()) | 489 | if (irqs_disabled()) |
490 | return 0; | 490 | return false; |
491 | 491 | ||
492 | if (skb_is_nonlinear(skb) || skb->fclone != SKB_FCLONE_UNAVAILABLE) | 492 | if (skb_is_nonlinear(skb) || skb->fclone != SKB_FCLONE_UNAVAILABLE) |
493 | return 0; | 493 | return false; |
494 | 494 | ||
495 | skb_size = SKB_DATA_ALIGN(skb_size + NET_SKB_PAD); | 495 | skb_size = SKB_DATA_ALIGN(skb_size + NET_SKB_PAD); |
496 | if (skb_end_pointer(skb) - skb->head < skb_size) | 496 | if (skb_end_pointer(skb) - skb->head < skb_size) |
497 | return 0; | 497 | return false; |
498 | 498 | ||
499 | if (skb_shared(skb) || skb_cloned(skb)) | 499 | if (skb_shared(skb) || skb_cloned(skb)) |
500 | return 0; | 500 | return false; |
501 | 501 | ||
502 | skb_release_head_state(skb); | 502 | skb_release_head_state(skb); |
503 | 503 | ||
@@ -509,7 +509,7 @@ int skb_recycle_check(struct sk_buff *skb, int skb_size) | |||
509 | skb->data = skb->head + NET_SKB_PAD; | 509 | skb->data = skb->head + NET_SKB_PAD; |
510 | skb_reset_tail_pointer(skb); | 510 | skb_reset_tail_pointer(skb); |
511 | 511 | ||
512 | return 1; | 512 | return true; |
513 | } | 513 | } |
514 | EXPORT_SYMBOL(skb_recycle_check); | 514 | EXPORT_SYMBOL(skb_recycle_check); |
515 | 515 | ||
@@ -1406,12 +1406,13 @@ new_page: | |||
1406 | /* | 1406 | /* |
1407 | * Fill page/offset/length into spd, if it can hold more pages. | 1407 | * Fill page/offset/length into spd, if it can hold more pages. |
1408 | */ | 1408 | */ |
1409 | static inline int spd_fill_page(struct splice_pipe_desc *spd, struct page *page, | 1409 | static inline int spd_fill_page(struct splice_pipe_desc *spd, |
1410 | struct pipe_inode_info *pipe, struct page *page, | ||
1410 | unsigned int *len, unsigned int offset, | 1411 | unsigned int *len, unsigned int offset, |
1411 | struct sk_buff *skb, int linear, | 1412 | struct sk_buff *skb, int linear, |
1412 | struct sock *sk) | 1413 | struct sock *sk) |
1413 | { | 1414 | { |
1414 | if (unlikely(spd->nr_pages == PIPE_BUFFERS)) | 1415 | if (unlikely(spd->nr_pages == pipe->buffers)) |
1415 | return 1; | 1416 | return 1; |
1416 | 1417 | ||
1417 | if (linear) { | 1418 | if (linear) { |
@@ -1447,7 +1448,8 @@ static inline int __splice_segment(struct page *page, unsigned int poff, | |||
1447 | unsigned int plen, unsigned int *off, | 1448 | unsigned int plen, unsigned int *off, |
1448 | unsigned int *len, struct sk_buff *skb, | 1449 | unsigned int *len, struct sk_buff *skb, |
1449 | struct splice_pipe_desc *spd, int linear, | 1450 | struct splice_pipe_desc *spd, int linear, |
1450 | struct sock *sk) | 1451 | struct sock *sk, |
1452 | struct pipe_inode_info *pipe) | ||
1451 | { | 1453 | { |
1452 | if (!*len) | 1454 | if (!*len) |
1453 | return 1; | 1455 | return 1; |
@@ -1470,7 +1472,7 @@ static inline int __splice_segment(struct page *page, unsigned int poff, | |||
1470 | /* the linear region may spread across several pages */ | 1472 | /* the linear region may spread across several pages */ |
1471 | flen = min_t(unsigned int, flen, PAGE_SIZE - poff); | 1473 | flen = min_t(unsigned int, flen, PAGE_SIZE - poff); |
1472 | 1474 | ||
1473 | if (spd_fill_page(spd, page, &flen, poff, skb, linear, sk)) | 1475 | if (spd_fill_page(spd, pipe, page, &flen, poff, skb, linear, sk)) |
1474 | return 1; | 1476 | return 1; |
1475 | 1477 | ||
1476 | __segment_seek(&page, &poff, &plen, flen); | 1478 | __segment_seek(&page, &poff, &plen, flen); |
@@ -1485,9 +1487,9 @@ static inline int __splice_segment(struct page *page, unsigned int poff, | |||
1485 | * Map linear and fragment data from the skb to spd. It reports failure if the | 1487 | * Map linear and fragment data from the skb to spd. It reports failure if the |
1486 | * pipe is full or if we already spliced the requested length. | 1488 | * pipe is full or if we already spliced the requested length. |
1487 | */ | 1489 | */ |
1488 | static int __skb_splice_bits(struct sk_buff *skb, unsigned int *offset, | 1490 | static int __skb_splice_bits(struct sk_buff *skb, struct pipe_inode_info *pipe, |
1489 | unsigned int *len, struct splice_pipe_desc *spd, | 1491 | unsigned int *offset, unsigned int *len, |
1490 | struct sock *sk) | 1492 | struct splice_pipe_desc *spd, struct sock *sk) |
1491 | { | 1493 | { |
1492 | int seg; | 1494 | int seg; |
1493 | 1495 | ||
@@ -1497,7 +1499,7 @@ static int __skb_splice_bits(struct sk_buff *skb, unsigned int *offset, | |||
1497 | if (__splice_segment(virt_to_page(skb->data), | 1499 | if (__splice_segment(virt_to_page(skb->data), |
1498 | (unsigned long) skb->data & (PAGE_SIZE - 1), | 1500 | (unsigned long) skb->data & (PAGE_SIZE - 1), |
1499 | skb_headlen(skb), | 1501 | skb_headlen(skb), |
1500 | offset, len, skb, spd, 1, sk)) | 1502 | offset, len, skb, spd, 1, sk, pipe)) |
1501 | return 1; | 1503 | return 1; |
1502 | 1504 | ||
1503 | /* | 1505 | /* |
@@ -1507,7 +1509,7 @@ static int __skb_splice_bits(struct sk_buff *skb, unsigned int *offset, | |||
1507 | const skb_frag_t *f = &skb_shinfo(skb)->frags[seg]; | 1509 | const skb_frag_t *f = &skb_shinfo(skb)->frags[seg]; |
1508 | 1510 | ||
1509 | if (__splice_segment(f->page, f->page_offset, f->size, | 1511 | if (__splice_segment(f->page, f->page_offset, f->size, |
1510 | offset, len, skb, spd, 0, sk)) | 1512 | offset, len, skb, spd, 0, sk, pipe)) |
1511 | return 1; | 1513 | return 1; |
1512 | } | 1514 | } |
1513 | 1515 | ||
@@ -1524,8 +1526,8 @@ int skb_splice_bits(struct sk_buff *skb, unsigned int offset, | |||
1524 | struct pipe_inode_info *pipe, unsigned int tlen, | 1526 | struct pipe_inode_info *pipe, unsigned int tlen, |
1525 | unsigned int flags) | 1527 | unsigned int flags) |
1526 | { | 1528 | { |
1527 | struct partial_page partial[PIPE_BUFFERS]; | 1529 | struct partial_page partial[PIPE_DEF_BUFFERS]; |
1528 | struct page *pages[PIPE_BUFFERS]; | 1530 | struct page *pages[PIPE_DEF_BUFFERS]; |
1529 | struct splice_pipe_desc spd = { | 1531 | struct splice_pipe_desc spd = { |
1530 | .pages = pages, | 1532 | .pages = pages, |
1531 | .partial = partial, | 1533 | .partial = partial, |
@@ -1535,12 +1537,16 @@ int skb_splice_bits(struct sk_buff *skb, unsigned int offset, | |||
1535 | }; | 1537 | }; |
1536 | struct sk_buff *frag_iter; | 1538 | struct sk_buff *frag_iter; |
1537 | struct sock *sk = skb->sk; | 1539 | struct sock *sk = skb->sk; |
1540 | int ret = 0; | ||
1541 | |||
1542 | if (splice_grow_spd(pipe, &spd)) | ||
1543 | return -ENOMEM; | ||
1538 | 1544 | ||
1539 | /* | 1545 | /* |
1540 | * __skb_splice_bits() only fails if the output has no room left, | 1546 | * __skb_splice_bits() only fails if the output has no room left, |
1541 | * so no point in going over the frag_list for the error case. | 1547 | * so no point in going over the frag_list for the error case. |
1542 | */ | 1548 | */ |
1543 | if (__skb_splice_bits(skb, &offset, &tlen, &spd, sk)) | 1549 | if (__skb_splice_bits(skb, pipe, &offset, &tlen, &spd, sk)) |
1544 | goto done; | 1550 | goto done; |
1545 | else if (!tlen) | 1551 | else if (!tlen) |
1546 | goto done; | 1552 | goto done; |
@@ -1551,14 +1557,12 @@ int skb_splice_bits(struct sk_buff *skb, unsigned int offset, | |||
1551 | skb_walk_frags(skb, frag_iter) { | 1557 | skb_walk_frags(skb, frag_iter) { |
1552 | if (!tlen) | 1558 | if (!tlen) |
1553 | break; | 1559 | break; |
1554 | if (__skb_splice_bits(frag_iter, &offset, &tlen, &spd, sk)) | 1560 | if (__skb_splice_bits(frag_iter, pipe, &offset, &tlen, &spd, sk)) |
1555 | break; | 1561 | break; |
1556 | } | 1562 | } |
1557 | 1563 | ||
1558 | done: | 1564 | done: |
1559 | if (spd.nr_pages) { | 1565 | if (spd.nr_pages) { |
1560 | int ret; | ||
1561 | |||
1562 | /* | 1566 | /* |
1563 | * Drop the socket lock, otherwise we have reverse | 1567 | * Drop the socket lock, otherwise we have reverse |
1564 | * locking dependencies between sk_lock and i_mutex | 1568 | * locking dependencies between sk_lock and i_mutex |
@@ -1571,10 +1575,10 @@ done: | |||
1571 | release_sock(sk); | 1575 | release_sock(sk); |
1572 | ret = splice_to_pipe(pipe, &spd); | 1576 | ret = splice_to_pipe(pipe, &spd); |
1573 | lock_sock(sk); | 1577 | lock_sock(sk); |
1574 | return ret; | ||
1575 | } | 1578 | } |
1576 | 1579 | ||
1577 | return 0; | 1580 | splice_shrink_spd(pipe, &spd); |
1581 | return ret; | ||
1578 | } | 1582 | } |
1579 | 1583 | ||
1580 | /** | 1584 | /** |
@@ -2718,6 +2722,7 @@ int skb_gro_receive(struct sk_buff **head, struct sk_buff *skb) | |||
2718 | *NAPI_GRO_CB(nskb) = *NAPI_GRO_CB(p); | 2722 | *NAPI_GRO_CB(nskb) = *NAPI_GRO_CB(p); |
2719 | skb_shinfo(nskb)->frag_list = p; | 2723 | skb_shinfo(nskb)->frag_list = p; |
2720 | skb_shinfo(nskb)->gso_size = pinfo->gso_size; | 2724 | skb_shinfo(nskb)->gso_size = pinfo->gso_size; |
2725 | pinfo->gso_size = 0; | ||
2721 | skb_header_release(p); | 2726 | skb_header_release(p); |
2722 | nskb->prev = p; | 2727 | nskb->prev = p; |
2723 | 2728 | ||
@@ -2960,6 +2965,34 @@ int skb_cow_data(struct sk_buff *skb, int tailbits, struct sk_buff **trailer) | |||
2960 | } | 2965 | } |
2961 | EXPORT_SYMBOL_GPL(skb_cow_data); | 2966 | EXPORT_SYMBOL_GPL(skb_cow_data); |
2962 | 2967 | ||
2968 | static void sock_rmem_free(struct sk_buff *skb) | ||
2969 | { | ||
2970 | struct sock *sk = skb->sk; | ||
2971 | |||
2972 | atomic_sub(skb->truesize, &sk->sk_rmem_alloc); | ||
2973 | } | ||
2974 | |||
2975 | /* | ||
2976 | * Note: We dont mem charge error packets (no sk_forward_alloc changes) | ||
2977 | */ | ||
2978 | int sock_queue_err_skb(struct sock *sk, struct sk_buff *skb) | ||
2979 | { | ||
2980 | if (atomic_read(&sk->sk_rmem_alloc) + skb->truesize >= | ||
2981 | (unsigned)sk->sk_rcvbuf) | ||
2982 | return -ENOMEM; | ||
2983 | |||
2984 | skb_orphan(skb); | ||
2985 | skb->sk = sk; | ||
2986 | skb->destructor = sock_rmem_free; | ||
2987 | atomic_add(skb->truesize, &sk->sk_rmem_alloc); | ||
2988 | |||
2989 | skb_queue_tail(&sk->sk_error_queue, skb); | ||
2990 | if (!sock_flag(sk, SOCK_DEAD)) | ||
2991 | sk->sk_data_ready(sk, skb->len); | ||
2992 | return 0; | ||
2993 | } | ||
2994 | EXPORT_SYMBOL(sock_queue_err_skb); | ||
2995 | |||
2963 | void skb_tstamp_tx(struct sk_buff *orig_skb, | 2996 | void skb_tstamp_tx(struct sk_buff *orig_skb, |
2964 | struct skb_shared_hwtstamps *hwtstamps) | 2997 | struct skb_shared_hwtstamps *hwtstamps) |
2965 | { | 2998 | { |
@@ -2991,7 +3024,9 @@ void skb_tstamp_tx(struct sk_buff *orig_skb, | |||
2991 | memset(serr, 0, sizeof(*serr)); | 3024 | memset(serr, 0, sizeof(*serr)); |
2992 | serr->ee.ee_errno = ENOMSG; | 3025 | serr->ee.ee_errno = ENOMSG; |
2993 | serr->ee.ee_origin = SO_EE_ORIGIN_TIMESTAMPING; | 3026 | serr->ee.ee_origin = SO_EE_ORIGIN_TIMESTAMPING; |
3027 | |||
2994 | err = sock_queue_err_skb(sk, skb); | 3028 | err = sock_queue_err_skb(sk, skb); |
3029 | |||
2995 | if (err) | 3030 | if (err) |
2996 | kfree_skb(skb); | 3031 | kfree_skb(skb); |
2997 | } | 3032 | } |
diff --git a/net/core/sock.c b/net/core/sock.c index bf88a167c8f2..2cf7f9f7e775 100644 --- a/net/core/sock.c +++ b/net/core/sock.c | |||
@@ -123,6 +123,7 @@ | |||
123 | #include <linux/net_tstamp.h> | 123 | #include <linux/net_tstamp.h> |
124 | #include <net/xfrm.h> | 124 | #include <net/xfrm.h> |
125 | #include <linux/ipsec.h> | 125 | #include <linux/ipsec.h> |
126 | #include <net/cls_cgroup.h> | ||
126 | 127 | ||
127 | #include <linux/filter.h> | 128 | #include <linux/filter.h> |
128 | 129 | ||
@@ -217,6 +218,11 @@ __u32 sysctl_rmem_default __read_mostly = SK_RMEM_MAX; | |||
217 | int sysctl_optmem_max __read_mostly = sizeof(unsigned long)*(2*UIO_MAXIOV+512); | 218 | int sysctl_optmem_max __read_mostly = sizeof(unsigned long)*(2*UIO_MAXIOV+512); |
218 | EXPORT_SYMBOL(sysctl_optmem_max); | 219 | EXPORT_SYMBOL(sysctl_optmem_max); |
219 | 220 | ||
221 | #if defined(CONFIG_CGROUPS) && !defined(CONFIG_NET_CLS_CGROUP) | ||
222 | int net_cls_subsys_id = -1; | ||
223 | EXPORT_SYMBOL_GPL(net_cls_subsys_id); | ||
224 | #endif | ||
225 | |||
220 | static int sock_set_timeout(long *timeo_p, char __user *optval, int optlen) | 226 | static int sock_set_timeout(long *timeo_p, char __user *optval, int optlen) |
221 | { | 227 | { |
222 | struct timeval tv; | 228 | struct timeval tv; |
@@ -1050,6 +1056,17 @@ static void sk_prot_free(struct proto *prot, struct sock *sk) | |||
1050 | module_put(owner); | 1056 | module_put(owner); |
1051 | } | 1057 | } |
1052 | 1058 | ||
1059 | #ifdef CONFIG_CGROUPS | ||
1060 | void sock_update_classid(struct sock *sk) | ||
1061 | { | ||
1062 | u32 classid = task_cls_classid(current); | ||
1063 | |||
1064 | if (classid && classid != sk->sk_classid) | ||
1065 | sk->sk_classid = classid; | ||
1066 | } | ||
1067 | EXPORT_SYMBOL(sock_update_classid); | ||
1068 | #endif | ||
1069 | |||
1053 | /** | 1070 | /** |
1054 | * sk_alloc - All socket objects are allocated here | 1071 | * sk_alloc - All socket objects are allocated here |
1055 | * @net: the applicable net namespace | 1072 | * @net: the applicable net namespace |
@@ -1073,6 +1090,8 @@ struct sock *sk_alloc(struct net *net, int family, gfp_t priority, | |||
1073 | sock_lock_init(sk); | 1090 | sock_lock_init(sk); |
1074 | sock_net_set(sk, get_net(net)); | 1091 | sock_net_set(sk, get_net(net)); |
1075 | atomic_set(&sk->sk_wmem_alloc, 1); | 1092 | atomic_set(&sk->sk_wmem_alloc, 1); |
1093 | |||
1094 | sock_update_classid(sk); | ||
1076 | } | 1095 | } |
1077 | 1096 | ||
1078 | return sk; | 1097 | return sk; |
@@ -1988,6 +2007,39 @@ void release_sock(struct sock *sk) | |||
1988 | } | 2007 | } |
1989 | EXPORT_SYMBOL(release_sock); | 2008 | EXPORT_SYMBOL(release_sock); |
1990 | 2009 | ||
2010 | /** | ||
2011 | * lock_sock_fast - fast version of lock_sock | ||
2012 | * @sk: socket | ||
2013 | * | ||
2014 | * This version should be used for very small section, where process wont block | ||
2015 | * return false if fast path is taken | ||
2016 | * sk_lock.slock locked, owned = 0, BH disabled | ||
2017 | * return true if slow path is taken | ||
2018 | * sk_lock.slock unlocked, owned = 1, BH enabled | ||
2019 | */ | ||
2020 | bool lock_sock_fast(struct sock *sk) | ||
2021 | { | ||
2022 | might_sleep(); | ||
2023 | spin_lock_bh(&sk->sk_lock.slock); | ||
2024 | |||
2025 | if (!sk->sk_lock.owned) | ||
2026 | /* | ||
2027 | * Note : We must disable BH | ||
2028 | */ | ||
2029 | return false; | ||
2030 | |||
2031 | __lock_sock(sk); | ||
2032 | sk->sk_lock.owned = 1; | ||
2033 | spin_unlock(&sk->sk_lock.slock); | ||
2034 | /* | ||
2035 | * The sk_lock has mutex_lock() semantics here: | ||
2036 | */ | ||
2037 | mutex_acquire(&sk->sk_lock.dep_map, 0, 0, _RET_IP_); | ||
2038 | local_bh_enable(); | ||
2039 | return true; | ||
2040 | } | ||
2041 | EXPORT_SYMBOL(lock_sock_fast); | ||
2042 | |||
1991 | int sock_get_timestamp(struct sock *sk, struct timeval __user *userstamp) | 2043 | int sock_get_timestamp(struct sock *sk, struct timeval __user *userstamp) |
1992 | { | 2044 | { |
1993 | struct timeval tv; | 2045 | struct timeval tv; |
diff --git a/net/dccp/input.c b/net/dccp/input.c index 58f7bc156850..6beb6a7d6fba 100644 --- a/net/dccp/input.c +++ b/net/dccp/input.c | |||
@@ -124,9 +124,9 @@ static int dccp_rcv_closereq(struct sock *sk, struct sk_buff *skb) | |||
124 | return queued; | 124 | return queued; |
125 | } | 125 | } |
126 | 126 | ||
127 | static u8 dccp_reset_code_convert(const u8 code) | 127 | static u16 dccp_reset_code_convert(const u8 code) |
128 | { | 128 | { |
129 | const u8 error_code[] = { | 129 | const u16 error_code[] = { |
130 | [DCCP_RESET_CODE_CLOSED] = 0, /* normal termination */ | 130 | [DCCP_RESET_CODE_CLOSED] = 0, /* normal termination */ |
131 | [DCCP_RESET_CODE_UNSPECIFIED] = 0, /* nothing known */ | 131 | [DCCP_RESET_CODE_UNSPECIFIED] = 0, /* nothing known */ |
132 | [DCCP_RESET_CODE_ABORTED] = ECONNRESET, | 132 | [DCCP_RESET_CODE_ABORTED] = ECONNRESET, |
@@ -148,7 +148,7 @@ static u8 dccp_reset_code_convert(const u8 code) | |||
148 | 148 | ||
149 | static void dccp_rcv_reset(struct sock *sk, struct sk_buff *skb) | 149 | static void dccp_rcv_reset(struct sock *sk, struct sk_buff *skb) |
150 | { | 150 | { |
151 | u8 err = dccp_reset_code_convert(dccp_hdr_reset(skb)->dccph_reset_code); | 151 | u16 err = dccp_reset_code_convert(dccp_hdr_reset(skb)->dccph_reset_code); |
152 | 152 | ||
153 | sk->sk_err = err; | 153 | sk->sk_err = err; |
154 | 154 | ||
diff --git a/net/dccp/options.c b/net/dccp/options.c index 1b08cae9c65b..07395f861d35 100644 --- a/net/dccp/options.c +++ b/net/dccp/options.c | |||
@@ -296,7 +296,7 @@ static inline u8 dccp_ndp_len(const u64 ndp) | |||
296 | { | 296 | { |
297 | if (likely(ndp <= 0xFF)) | 297 | if (likely(ndp <= 0xFF)) |
298 | return 1; | 298 | return 1; |
299 | return likely(ndp <= USHORT_MAX) ? 2 : (ndp <= UINT_MAX ? 4 : 6); | 299 | return likely(ndp <= USHRT_MAX) ? 2 : (ndp <= UINT_MAX ? 4 : 6); |
300 | } | 300 | } |
301 | 301 | ||
302 | int dccp_insert_option(struct sock *sk, struct sk_buff *skb, | 302 | int dccp_insert_option(struct sock *sk, struct sk_buff *skb, |
diff --git a/net/ieee802154/wpan-class.c b/net/ieee802154/wpan-class.c index 3d803a1b9fb6..1627ef2e8522 100644 --- a/net/ieee802154/wpan-class.c +++ b/net/ieee802154/wpan-class.c | |||
@@ -147,13 +147,15 @@ struct wpan_phy *wpan_phy_alloc(size_t priv_size) | |||
147 | struct wpan_phy *phy = kzalloc(sizeof(*phy) + priv_size, | 147 | struct wpan_phy *phy = kzalloc(sizeof(*phy) + priv_size, |
148 | GFP_KERNEL); | 148 | GFP_KERNEL); |
149 | 149 | ||
150 | if (!phy) | ||
151 | goto out; | ||
150 | mutex_lock(&wpan_phy_mutex); | 152 | mutex_lock(&wpan_phy_mutex); |
151 | phy->idx = wpan_phy_idx++; | 153 | phy->idx = wpan_phy_idx++; |
152 | if (unlikely(!wpan_phy_idx_valid(phy->idx))) { | 154 | if (unlikely(!wpan_phy_idx_valid(phy->idx))) { |
153 | wpan_phy_idx--; | 155 | wpan_phy_idx--; |
154 | mutex_unlock(&wpan_phy_mutex); | 156 | mutex_unlock(&wpan_phy_mutex); |
155 | kfree(phy); | 157 | kfree(phy); |
156 | return NULL; | 158 | goto out; |
157 | } | 159 | } |
158 | mutex_unlock(&wpan_phy_mutex); | 160 | mutex_unlock(&wpan_phy_mutex); |
159 | 161 | ||
@@ -168,6 +170,9 @@ struct wpan_phy *wpan_phy_alloc(size_t priv_size) | |||
168 | phy->current_page = 0; /* for compatibility */ | 170 | phy->current_page = 0; /* for compatibility */ |
169 | 171 | ||
170 | return phy; | 172 | return phy; |
173 | |||
174 | out: | ||
175 | return NULL; | ||
171 | } | 176 | } |
172 | EXPORT_SYMBOL(wpan_phy_alloc); | 177 | EXPORT_SYMBOL(wpan_phy_alloc); |
173 | 178 | ||
diff --git a/net/ipv4/Kconfig b/net/ipv4/Kconfig index 8e3a1fd938ab..7c3a7d191249 100644 --- a/net/ipv4/Kconfig +++ b/net/ipv4/Kconfig | |||
@@ -303,7 +303,7 @@ config ARPD | |||
303 | If unsure, say N. | 303 | If unsure, say N. |
304 | 304 | ||
305 | config SYN_COOKIES | 305 | config SYN_COOKIES |
306 | bool "IP: TCP syncookie support (disabled per default)" | 306 | bool "IP: TCP syncookie support" |
307 | ---help--- | 307 | ---help--- |
308 | Normal TCP/IP networking is open to an attack known as "SYN | 308 | Normal TCP/IP networking is open to an attack known as "SYN |
309 | flooding". This denial-of-service attack prevents legitimate remote | 309 | flooding". This denial-of-service attack prevents legitimate remote |
@@ -328,13 +328,13 @@ config SYN_COOKIES | |||
328 | server is really overloaded. If this happens frequently better turn | 328 | server is really overloaded. If this happens frequently better turn |
329 | them off. | 329 | them off. |
330 | 330 | ||
331 | If you say Y here, note that SYN cookies aren't enabled by default; | 331 | If you say Y here, you can disable SYN cookies at run time by |
332 | you can enable them by saying Y to "/proc file system support" and | 332 | saying Y to "/proc file system support" and |
333 | "Sysctl support" below and executing the command | 333 | "Sysctl support" below and executing the command |
334 | 334 | ||
335 | echo 1 >/proc/sys/net/ipv4/tcp_syncookies | 335 | echo 0 > /proc/sys/net/ipv4/tcp_syncookies |
336 | 336 | ||
337 | at boot time after the /proc file system has been mounted. | 337 | after the /proc file system has been mounted. |
338 | 338 | ||
339 | If unsure, say N. | 339 | If unsure, say N. |
340 | 340 | ||
diff --git a/net/ipv4/ipmr.c b/net/ipv4/ipmr.c index 45889103b3e2..757f25eb9b4b 100644 --- a/net/ipv4/ipmr.c +++ b/net/ipv4/ipmr.c | |||
@@ -267,8 +267,10 @@ static void __net_exit ipmr_rules_exit(struct net *net) | |||
267 | { | 267 | { |
268 | struct mr_table *mrt, *next; | 268 | struct mr_table *mrt, *next; |
269 | 269 | ||
270 | list_for_each_entry_safe(mrt, next, &net->ipv4.mr_tables, list) | 270 | list_for_each_entry_safe(mrt, next, &net->ipv4.mr_tables, list) { |
271 | list_del(&mrt->list); | ||
271 | kfree(mrt); | 272 | kfree(mrt); |
273 | } | ||
272 | fib_rules_unregister(net->ipv4.mr_rules_ops); | 274 | fib_rules_unregister(net->ipv4.mr_rules_ops); |
273 | } | 275 | } |
274 | #else | 276 | #else |
@@ -1911,7 +1913,7 @@ static int __ipmr_fill_mroute(struct mr_table *mrt, struct sk_buff *skb, | |||
1911 | struct rtattr *mp_head; | 1913 | struct rtattr *mp_head; |
1912 | 1914 | ||
1913 | /* If cache is unresolved, don't try to parse IIF and OIF */ | 1915 | /* If cache is unresolved, don't try to parse IIF and OIF */ |
1914 | if (c->mfc_parent > MAXVIFS) | 1916 | if (c->mfc_parent >= MAXVIFS) |
1915 | return -ENOENT; | 1917 | return -ENOENT; |
1916 | 1918 | ||
1917 | if (VIF_EXISTS(mrt, c->mfc_parent)) | 1919 | if (VIF_EXISTS(mrt, c->mfc_parent)) |
diff --git a/net/ipv4/netfilter/ip_tables.c b/net/ipv4/netfilter/ip_tables.c index 63958f3394a5..4b6c5ca610fc 100644 --- a/net/ipv4/netfilter/ip_tables.c +++ b/net/ipv4/netfilter/ip_tables.c | |||
@@ -336,7 +336,7 @@ ipt_do_table(struct sk_buff *skb, | |||
336 | cpu = smp_processor_id(); | 336 | cpu = smp_processor_id(); |
337 | table_base = private->entries[cpu]; | 337 | table_base = private->entries[cpu]; |
338 | jumpstack = (struct ipt_entry **)private->jumpstack[cpu]; | 338 | jumpstack = (struct ipt_entry **)private->jumpstack[cpu]; |
339 | stackptr = &private->stackptr[cpu]; | 339 | stackptr = per_cpu_ptr(private->stackptr, cpu); |
340 | origptr = *stackptr; | 340 | origptr = *stackptr; |
341 | 341 | ||
342 | e = get_entry(table_base, private->hook_entry[hook]); | 342 | e = get_entry(table_base, private->hook_entry[hook]); |
diff --git a/net/ipv4/syncookies.c b/net/ipv4/syncookies.c index 5c24db4a3c91..9f6b22206c52 100644 --- a/net/ipv4/syncookies.c +++ b/net/ipv4/syncookies.c | |||
@@ -347,7 +347,7 @@ struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb, | |||
347 | { .sport = th->dest, | 347 | { .sport = th->dest, |
348 | .dport = th->source } } }; | 348 | .dport = th->source } } }; |
349 | security_req_classify_flow(req, &fl); | 349 | security_req_classify_flow(req, &fl); |
350 | if (ip_route_output_key(&init_net, &rt, &fl)) { | 350 | if (ip_route_output_key(sock_net(sk), &rt, &fl)) { |
351 | reqsk_free(req); | 351 | reqsk_free(req); |
352 | goto out; | 352 | goto out; |
353 | } | 353 | } |
diff --git a/net/ipv4/tcp_hybla.c b/net/ipv4/tcp_hybla.c index c209e054a634..377bc9349371 100644 --- a/net/ipv4/tcp_hybla.c +++ b/net/ipv4/tcp_hybla.c | |||
@@ -126,8 +126,8 @@ static void hybla_cong_avoid(struct sock *sk, u32 ack, u32 in_flight) | |||
126 | * calculate 2^fract in a <<7 value. | 126 | * calculate 2^fract in a <<7 value. |
127 | */ | 127 | */ |
128 | is_slowstart = 1; | 128 | is_slowstart = 1; |
129 | increment = ((1 << ca->rho) * hybla_fraction(rho_fractions)) | 129 | increment = ((1 << min(ca->rho, 16U)) * |
130 | - 128; | 130 | hybla_fraction(rho_fractions)) - 128; |
131 | } else { | 131 | } else { |
132 | /* | 132 | /* |
133 | * congestion avoidance | 133 | * congestion avoidance |
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c index 3e6dafcb1071..548d575e6cc6 100644 --- a/net/ipv4/tcp_input.c +++ b/net/ipv4/tcp_input.c | |||
@@ -2639,7 +2639,7 @@ static void DBGUNDO(struct sock *sk, const char *msg) | |||
2639 | if (sk->sk_family == AF_INET) { | 2639 | if (sk->sk_family == AF_INET) { |
2640 | printk(KERN_DEBUG "Undo %s %pI4/%u c%u l%u ss%u/%u p%u\n", | 2640 | printk(KERN_DEBUG "Undo %s %pI4/%u c%u l%u ss%u/%u p%u\n", |
2641 | msg, | 2641 | msg, |
2642 | &inet->daddr, ntohs(inet->dport), | 2642 | &inet->inet_daddr, ntohs(inet->inet_dport), |
2643 | tp->snd_cwnd, tcp_left_out(tp), | 2643 | tp->snd_cwnd, tcp_left_out(tp), |
2644 | tp->snd_ssthresh, tp->prior_ssthresh, | 2644 | tp->snd_ssthresh, tp->prior_ssthresh, |
2645 | tp->packets_out); | 2645 | tp->packets_out); |
@@ -2649,7 +2649,7 @@ static void DBGUNDO(struct sock *sk, const char *msg) | |||
2649 | struct ipv6_pinfo *np = inet6_sk(sk); | 2649 | struct ipv6_pinfo *np = inet6_sk(sk); |
2650 | printk(KERN_DEBUG "Undo %s %pI6/%u c%u l%u ss%u/%u p%u\n", | 2650 | printk(KERN_DEBUG "Undo %s %pI6/%u c%u l%u ss%u/%u p%u\n", |
2651 | msg, | 2651 | msg, |
2652 | &np->daddr, ntohs(inet->dport), | 2652 | &np->daddr, ntohs(inet->inet_dport), |
2653 | tp->snd_cwnd, tcp_left_out(tp), | 2653 | tp->snd_cwnd, tcp_left_out(tp), |
2654 | tp->snd_ssthresh, tp->prior_ssthresh, | 2654 | tp->snd_ssthresh, tp->prior_ssthresh, |
2655 | tp->packets_out); | 2655 | tp->packets_out); |
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c index 202cf09c4cd4..fe193e53af44 100644 --- a/net/ipv4/tcp_ipv4.c +++ b/net/ipv4/tcp_ipv4.c | |||
@@ -1555,6 +1555,7 @@ int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb) | |||
1555 | #endif | 1555 | #endif |
1556 | 1556 | ||
1557 | if (sk->sk_state == TCP_ESTABLISHED) { /* Fast path */ | 1557 | if (sk->sk_state == TCP_ESTABLISHED) { /* Fast path */ |
1558 | sock_rps_save_rxhash(sk, skb->rxhash); | ||
1558 | TCP_CHECK_TIMER(sk); | 1559 | TCP_CHECK_TIMER(sk); |
1559 | if (tcp_rcv_established(sk, skb, tcp_hdr(skb), skb->len)) { | 1560 | if (tcp_rcv_established(sk, skb, tcp_hdr(skb), skb->len)) { |
1560 | rsk = sk; | 1561 | rsk = sk; |
@@ -1579,7 +1580,9 @@ int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb) | |||
1579 | } | 1580 | } |
1580 | return 0; | 1581 | return 0; |
1581 | } | 1582 | } |
1582 | } | 1583 | } else |
1584 | sock_rps_save_rxhash(sk, skb->rxhash); | ||
1585 | |||
1583 | 1586 | ||
1584 | TCP_CHECK_TIMER(sk); | 1587 | TCP_CHECK_TIMER(sk); |
1585 | if (tcp_rcv_state_process(sk, skb, tcp_hdr(skb), skb->len)) { | 1588 | if (tcp_rcv_state_process(sk, skb, tcp_hdr(skb), skb->len)) { |
@@ -1672,8 +1675,6 @@ process: | |||
1672 | 1675 | ||
1673 | skb->dev = NULL; | 1676 | skb->dev = NULL; |
1674 | 1677 | ||
1675 | sock_rps_save_rxhash(sk, skb->rxhash); | ||
1676 | |||
1677 | bh_lock_sock_nested(sk); | 1678 | bh_lock_sock_nested(sk); |
1678 | ret = 0; | 1679 | ret = 0; |
1679 | if (!sock_owned_by_user(sk)) { | 1680 | if (!sock_owned_by_user(sk)) { |
diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c index 9de6a698f91d..eec4ff456e33 100644 --- a/net/ipv4/udp.c +++ b/net/ipv4/udp.c | |||
@@ -633,9 +633,9 @@ void __udp4_lib_err(struct sk_buff *skb, u32 info, struct udp_table *udptable) | |||
633 | if (!inet->recverr) { | 633 | if (!inet->recverr) { |
634 | if (!harderr || sk->sk_state != TCP_ESTABLISHED) | 634 | if (!harderr || sk->sk_state != TCP_ESTABLISHED) |
635 | goto out; | 635 | goto out; |
636 | } else { | 636 | } else |
637 | ip_icmp_error(sk, skb, err, uh->dest, info, (u8 *)(uh+1)); | 637 | ip_icmp_error(sk, skb, err, uh->dest, info, (u8 *)(uh+1)); |
638 | } | 638 | |
639 | sk->sk_err = err; | 639 | sk->sk_err = err; |
640 | sk->sk_error_report(sk); | 640 | sk->sk_error_report(sk); |
641 | out: | 641 | out: |
@@ -1063,10 +1063,11 @@ static unsigned int first_packet_length(struct sock *sk) | |||
1063 | spin_unlock_bh(&rcvq->lock); | 1063 | spin_unlock_bh(&rcvq->lock); |
1064 | 1064 | ||
1065 | if (!skb_queue_empty(&list_kill)) { | 1065 | if (!skb_queue_empty(&list_kill)) { |
1066 | lock_sock_bh(sk); | 1066 | bool slow = lock_sock_fast(sk); |
1067 | |||
1067 | __skb_queue_purge(&list_kill); | 1068 | __skb_queue_purge(&list_kill); |
1068 | sk_mem_reclaim_partial(sk); | 1069 | sk_mem_reclaim_partial(sk); |
1069 | unlock_sock_bh(sk); | 1070 | unlock_sock_fast(sk, slow); |
1070 | } | 1071 | } |
1071 | return res; | 1072 | return res; |
1072 | } | 1073 | } |
@@ -1123,6 +1124,7 @@ int udp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg, | |||
1123 | int peeked; | 1124 | int peeked; |
1124 | int err; | 1125 | int err; |
1125 | int is_udplite = IS_UDPLITE(sk); | 1126 | int is_udplite = IS_UDPLITE(sk); |
1127 | bool slow; | ||
1126 | 1128 | ||
1127 | /* | 1129 | /* |
1128 | * Check any passed addresses | 1130 | * Check any passed addresses |
@@ -1197,10 +1199,10 @@ out: | |||
1197 | return err; | 1199 | return err; |
1198 | 1200 | ||
1199 | csum_copy_err: | 1201 | csum_copy_err: |
1200 | lock_sock_bh(sk); | 1202 | slow = lock_sock_fast(sk); |
1201 | if (!skb_kill_datagram(sk, skb, flags)) | 1203 | if (!skb_kill_datagram(sk, skb, flags)) |
1202 | UDP_INC_STATS_USER(sock_net(sk), UDP_MIB_INERRORS, is_udplite); | 1204 | UDP_INC_STATS_USER(sock_net(sk), UDP_MIB_INERRORS, is_udplite); |
1203 | unlock_sock_bh(sk); | 1205 | unlock_sock_fast(sk, slow); |
1204 | 1206 | ||
1205 | if (noblock) | 1207 | if (noblock) |
1206 | return -EAGAIN; | 1208 | return -EAGAIN; |
@@ -1625,9 +1627,9 @@ int udp_rcv(struct sk_buff *skb) | |||
1625 | 1627 | ||
1626 | void udp_destroy_sock(struct sock *sk) | 1628 | void udp_destroy_sock(struct sock *sk) |
1627 | { | 1629 | { |
1628 | lock_sock_bh(sk); | 1630 | bool slow = lock_sock_fast(sk); |
1629 | udp_flush_pending_frames(sk); | 1631 | udp_flush_pending_frames(sk); |
1630 | unlock_sock_bh(sk); | 1632 | unlock_sock_fast(sk, slow); |
1631 | } | 1633 | } |
1632 | 1634 | ||
1633 | /* | 1635 | /* |
@@ -1686,8 +1688,8 @@ int udp_lib_setsockopt(struct sock *sk, int level, int optname, | |||
1686 | return -ENOPROTOOPT; | 1688 | return -ENOPROTOOPT; |
1687 | if (val != 0 && val < 8) /* Illegal coverage: use default (8) */ | 1689 | if (val != 0 && val < 8) /* Illegal coverage: use default (8) */ |
1688 | val = 8; | 1690 | val = 8; |
1689 | else if (val > USHORT_MAX) | 1691 | else if (val > USHRT_MAX) |
1690 | val = USHORT_MAX; | 1692 | val = USHRT_MAX; |
1691 | up->pcslen = val; | 1693 | up->pcslen = val; |
1692 | up->pcflag |= UDPLITE_SEND_CC; | 1694 | up->pcflag |= UDPLITE_SEND_CC; |
1693 | break; | 1695 | break; |
@@ -1700,8 +1702,8 @@ int udp_lib_setsockopt(struct sock *sk, int level, int optname, | |||
1700 | return -ENOPROTOOPT; | 1702 | return -ENOPROTOOPT; |
1701 | if (val != 0 && val < 8) /* Avoid silly minimal values. */ | 1703 | if (val != 0 && val < 8) /* Avoid silly minimal values. */ |
1702 | val = 8; | 1704 | val = 8; |
1703 | else if (val > USHORT_MAX) | 1705 | else if (val > USHRT_MAX) |
1704 | val = USHORT_MAX; | 1706 | val = USHRT_MAX; |
1705 | up->pcrlen = val; | 1707 | up->pcrlen = val; |
1706 | up->pcflag |= UDPLITE_RECV_CC; | 1708 | up->pcflag |= UDPLITE_RECV_CC; |
1707 | break; | 1709 | break; |
diff --git a/net/ipv6/icmp.c b/net/ipv6/icmp.c index ce7992982557..03e62f94ff8e 100644 --- a/net/ipv6/icmp.c +++ b/net/ipv6/icmp.c | |||
@@ -483,7 +483,7 @@ route_done: | |||
483 | np->tclass, NULL, &fl, (struct rt6_info*)dst, | 483 | np->tclass, NULL, &fl, (struct rt6_info*)dst, |
484 | MSG_DONTWAIT, np->dontfrag); | 484 | MSG_DONTWAIT, np->dontfrag); |
485 | if (err) { | 485 | if (err) { |
486 | ICMP6_INC_STATS_BH(net, idev, ICMP6_MIB_OUTMSGS); | 486 | ICMP6_INC_STATS_BH(net, idev, ICMP6_MIB_OUTERRORS); |
487 | ip6_flush_pending_frames(sk); | 487 | ip6_flush_pending_frames(sk); |
488 | goto out_put; | 488 | goto out_put; |
489 | } | 489 | } |
@@ -565,7 +565,7 @@ static void icmpv6_echo_reply(struct sk_buff *skb) | |||
565 | np->dontfrag); | 565 | np->dontfrag); |
566 | 566 | ||
567 | if (err) { | 567 | if (err) { |
568 | ICMP6_INC_STATS_BH(net, idev, ICMP6_MIB_OUTMSGS); | 568 | ICMP6_INC_STATS_BH(net, idev, ICMP6_MIB_OUTERRORS); |
569 | ip6_flush_pending_frames(sk); | 569 | ip6_flush_pending_frames(sk); |
570 | goto out_put; | 570 | goto out_put; |
571 | } | 571 | } |
diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c index cd963f64e27c..89425af0684c 100644 --- a/net/ipv6/ip6_output.c +++ b/net/ipv6/ip6_output.c | |||
@@ -507,7 +507,7 @@ int ip6_forward(struct sk_buff *skb) | |||
507 | if (mtu < IPV6_MIN_MTU) | 507 | if (mtu < IPV6_MIN_MTU) |
508 | mtu = IPV6_MIN_MTU; | 508 | mtu = IPV6_MIN_MTU; |
509 | 509 | ||
510 | if (skb->len > mtu) { | 510 | if (skb->len > mtu && !skb_is_gso(skb)) { |
511 | /* Again, force OUTPUT device used as source address */ | 511 | /* Again, force OUTPUT device used as source address */ |
512 | skb->dev = dst->dev; | 512 | skb->dev = dst->dev; |
513 | icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu); | 513 | icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu); |
diff --git a/net/ipv6/ip6mr.c b/net/ipv6/ip6mr.c index bd9e7d3e9c8e..66078dad7fe8 100644 --- a/net/ipv6/ip6mr.c +++ b/net/ipv6/ip6mr.c | |||
@@ -120,7 +120,7 @@ static void mroute_clean_tables(struct mr6_table *mrt); | |||
120 | static void ipmr_expire_process(unsigned long arg); | 120 | static void ipmr_expire_process(unsigned long arg); |
121 | 121 | ||
122 | #ifdef CONFIG_IPV6_MROUTE_MULTIPLE_TABLES | 122 | #ifdef CONFIG_IPV6_MROUTE_MULTIPLE_TABLES |
123 | #define ip6mr_for_each_table(mrt, met) \ | 123 | #define ip6mr_for_each_table(mrt, net) \ |
124 | list_for_each_entry_rcu(mrt, &net->ipv6.mr6_tables, list) | 124 | list_for_each_entry_rcu(mrt, &net->ipv6.mr6_tables, list) |
125 | 125 | ||
126 | static struct mr6_table *ip6mr_get_table(struct net *net, u32 id) | 126 | static struct mr6_table *ip6mr_get_table(struct net *net, u32 id) |
@@ -254,8 +254,10 @@ static void __net_exit ip6mr_rules_exit(struct net *net) | |||
254 | { | 254 | { |
255 | struct mr6_table *mrt, *next; | 255 | struct mr6_table *mrt, *next; |
256 | 256 | ||
257 | list_for_each_entry_safe(mrt, next, &net->ipv6.mr6_tables, list) | 257 | list_for_each_entry_safe(mrt, next, &net->ipv6.mr6_tables, list) { |
258 | list_del(&mrt->list); | ||
258 | ip6mr_free_table(mrt); | 259 | ip6mr_free_table(mrt); |
260 | } | ||
259 | fib_rules_unregister(net->ipv6.mr6_rules_ops); | 261 | fib_rules_unregister(net->ipv6.mr6_rules_ops); |
260 | } | 262 | } |
261 | #else | 263 | #else |
@@ -2017,7 +2019,7 @@ static int __ip6mr_fill_mroute(struct mr6_table *mrt, struct sk_buff *skb, | |||
2017 | struct rtattr *mp_head; | 2019 | struct rtattr *mp_head; |
2018 | 2020 | ||
2019 | /* If cache is unresolved, don't try to parse IIF and OIF */ | 2021 | /* If cache is unresolved, don't try to parse IIF and OIF */ |
2020 | if (c->mf6c_parent > MAXMIFS) | 2022 | if (c->mf6c_parent >= MAXMIFS) |
2021 | return -ENOENT; | 2023 | return -ENOENT; |
2022 | 2024 | ||
2023 | if (MIF_EXISTS(mrt, c->mf6c_parent)) | 2025 | if (MIF_EXISTS(mrt, c->mf6c_parent)) |
diff --git a/net/ipv6/mcast.c b/net/ipv6/mcast.c index 59f1881968c7..ab1622d7d409 100644 --- a/net/ipv6/mcast.c +++ b/net/ipv6/mcast.c | |||
@@ -1356,7 +1356,10 @@ static struct sk_buff *mld_newpack(struct net_device *dev, int size) | |||
1356 | IPV6_TLV_PADN, 0 }; | 1356 | IPV6_TLV_PADN, 0 }; |
1357 | 1357 | ||
1358 | /* we assume size > sizeof(ra) here */ | 1358 | /* we assume size > sizeof(ra) here */ |
1359 | skb = sock_alloc_send_skb(sk, size + LL_ALLOCATED_SPACE(dev), 1, &err); | 1359 | size += LL_ALLOCATED_SPACE(dev); |
1360 | /* limit our allocations to order-0 page */ | ||
1361 | size = min_t(int, size, SKB_MAX_ORDER(0, 0)); | ||
1362 | skb = sock_alloc_send_skb(sk, size, 1, &err); | ||
1360 | 1363 | ||
1361 | if (!skb) | 1364 | if (!skb) |
1362 | return NULL; | 1365 | return NULL; |
diff --git a/net/ipv6/netfilter/ip6_tables.c b/net/ipv6/netfilter/ip6_tables.c index 6f517bd83692..9d2d68f0e605 100644 --- a/net/ipv6/netfilter/ip6_tables.c +++ b/net/ipv6/netfilter/ip6_tables.c | |||
@@ -363,7 +363,7 @@ ip6t_do_table(struct sk_buff *skb, | |||
363 | cpu = smp_processor_id(); | 363 | cpu = smp_processor_id(); |
364 | table_base = private->entries[cpu]; | 364 | table_base = private->entries[cpu]; |
365 | jumpstack = (struct ip6t_entry **)private->jumpstack[cpu]; | 365 | jumpstack = (struct ip6t_entry **)private->jumpstack[cpu]; |
366 | stackptr = &private->stackptr[cpu]; | 366 | stackptr = per_cpu_ptr(private->stackptr, cpu); |
367 | origptr = *stackptr; | 367 | origptr = *stackptr; |
368 | 368 | ||
369 | e = get_entry(table_base, private->hook_entry[hook]); | 369 | e = get_entry(table_base, private->hook_entry[hook]); |
diff --git a/net/ipv6/route.c b/net/ipv6/route.c index 294cbe8b0725..252d76199c41 100644 --- a/net/ipv6/route.c +++ b/net/ipv6/route.c | |||
@@ -814,7 +814,7 @@ struct dst_entry * ip6_route_output(struct net *net, struct sock *sk, | |||
814 | { | 814 | { |
815 | int flags = 0; | 815 | int flags = 0; |
816 | 816 | ||
817 | if (fl->oif || rt6_need_strict(&fl->fl6_dst)) | 817 | if ((sk && sk->sk_bound_dev_if) || rt6_need_strict(&fl->fl6_dst)) |
818 | flags |= RT6_LOOKUP_F_IFACE; | 818 | flags |= RT6_LOOKUP_F_IFACE; |
819 | 819 | ||
820 | if (!ipv6_addr_any(&fl->fl6_src)) | 820 | if (!ipv6_addr_any(&fl->fl6_src)) |
diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c index 3d7a2c0b836a..87be58673b55 100644 --- a/net/ipv6/udp.c +++ b/net/ipv6/udp.c | |||
@@ -328,6 +328,7 @@ int udpv6_recvmsg(struct kiocb *iocb, struct sock *sk, | |||
328 | int err; | 328 | int err; |
329 | int is_udplite = IS_UDPLITE(sk); | 329 | int is_udplite = IS_UDPLITE(sk); |
330 | int is_udp4; | 330 | int is_udp4; |
331 | bool slow; | ||
331 | 332 | ||
332 | if (addr_len) | 333 | if (addr_len) |
333 | *addr_len=sizeof(struct sockaddr_in6); | 334 | *addr_len=sizeof(struct sockaddr_in6); |
@@ -424,7 +425,7 @@ out: | |||
424 | return err; | 425 | return err; |
425 | 426 | ||
426 | csum_copy_err: | 427 | csum_copy_err: |
427 | lock_sock_bh(sk); | 428 | slow = lock_sock_fast(sk); |
428 | if (!skb_kill_datagram(sk, skb, flags)) { | 429 | if (!skb_kill_datagram(sk, skb, flags)) { |
429 | if (is_udp4) | 430 | if (is_udp4) |
430 | UDP_INC_STATS_USER(sock_net(sk), | 431 | UDP_INC_STATS_USER(sock_net(sk), |
@@ -433,7 +434,7 @@ csum_copy_err: | |||
433 | UDP6_INC_STATS_USER(sock_net(sk), | 434 | UDP6_INC_STATS_USER(sock_net(sk), |
434 | UDP_MIB_INERRORS, is_udplite); | 435 | UDP_MIB_INERRORS, is_udplite); |
435 | } | 436 | } |
436 | unlock_sock_bh(sk); | 437 | unlock_sock_fast(sk, slow); |
437 | 438 | ||
438 | if (flags & MSG_DONTWAIT) | 439 | if (flags & MSG_DONTWAIT) |
439 | return -EAGAIN; | 440 | return -EAGAIN; |
diff --git a/net/iucv/af_iucv.c b/net/iucv/af_iucv.c index c8b4599a752e..9637e45744fa 100644 --- a/net/iucv/af_iucv.c +++ b/net/iucv/af_iucv.c | |||
@@ -1619,7 +1619,7 @@ static void iucv_callback_rx(struct iucv_path *path, struct iucv_message *msg) | |||
1619 | save_message: | 1619 | save_message: |
1620 | save_msg = kzalloc(sizeof(struct sock_msg_q), GFP_ATOMIC | GFP_DMA); | 1620 | save_msg = kzalloc(sizeof(struct sock_msg_q), GFP_ATOMIC | GFP_DMA); |
1621 | if (!save_msg) | 1621 | if (!save_msg) |
1622 | return; | 1622 | goto out_unlock; |
1623 | save_msg->path = path; | 1623 | save_msg->path = path; |
1624 | save_msg->msg = *msg; | 1624 | save_msg->msg = *msg; |
1625 | 1625 | ||
diff --git a/net/iucv/iucv.c b/net/iucv/iucv.c index fd8b28361a64..f28ad2cc8428 100644 --- a/net/iucv/iucv.c +++ b/net/iucv/iucv.c | |||
@@ -632,13 +632,14 @@ static int __cpuinit iucv_cpu_notify(struct notifier_block *self, | |||
632 | iucv_irq_data[cpu] = kmalloc_node(sizeof(struct iucv_irq_data), | 632 | iucv_irq_data[cpu] = kmalloc_node(sizeof(struct iucv_irq_data), |
633 | GFP_KERNEL|GFP_DMA, cpu_to_node(cpu)); | 633 | GFP_KERNEL|GFP_DMA, cpu_to_node(cpu)); |
634 | if (!iucv_irq_data[cpu]) | 634 | if (!iucv_irq_data[cpu]) |
635 | return NOTIFY_BAD; | 635 | return notifier_from_errno(-ENOMEM); |
636 | |||
636 | iucv_param[cpu] = kmalloc_node(sizeof(union iucv_param), | 637 | iucv_param[cpu] = kmalloc_node(sizeof(union iucv_param), |
637 | GFP_KERNEL|GFP_DMA, cpu_to_node(cpu)); | 638 | GFP_KERNEL|GFP_DMA, cpu_to_node(cpu)); |
638 | if (!iucv_param[cpu]) { | 639 | if (!iucv_param[cpu]) { |
639 | kfree(iucv_irq_data[cpu]); | 640 | kfree(iucv_irq_data[cpu]); |
640 | iucv_irq_data[cpu] = NULL; | 641 | iucv_irq_data[cpu] = NULL; |
641 | return NOTIFY_BAD; | 642 | return notifier_from_errno(-ENOMEM); |
642 | } | 643 | } |
643 | iucv_param_irq[cpu] = kmalloc_node(sizeof(union iucv_param), | 644 | iucv_param_irq[cpu] = kmalloc_node(sizeof(union iucv_param), |
644 | GFP_KERNEL|GFP_DMA, cpu_to_node(cpu)); | 645 | GFP_KERNEL|GFP_DMA, cpu_to_node(cpu)); |
@@ -647,7 +648,7 @@ static int __cpuinit iucv_cpu_notify(struct notifier_block *self, | |||
647 | iucv_param[cpu] = NULL; | 648 | iucv_param[cpu] = NULL; |
648 | kfree(iucv_irq_data[cpu]); | 649 | kfree(iucv_irq_data[cpu]); |
649 | iucv_irq_data[cpu] = NULL; | 650 | iucv_irq_data[cpu] = NULL; |
650 | return NOTIFY_BAD; | 651 | return notifier_from_errno(-ENOMEM); |
651 | } | 652 | } |
652 | break; | 653 | break; |
653 | case CPU_UP_CANCELED: | 654 | case CPU_UP_CANCELED: |
@@ -677,7 +678,7 @@ static int __cpuinit iucv_cpu_notify(struct notifier_block *self, | |||
677 | cpu_clear(cpu, cpumask); | 678 | cpu_clear(cpu, cpumask); |
678 | if (cpus_empty(cpumask)) | 679 | if (cpus_empty(cpumask)) |
679 | /* Can't offline last IUCV enabled cpu. */ | 680 | /* Can't offline last IUCV enabled cpu. */ |
680 | return NOTIFY_BAD; | 681 | return notifier_from_errno(-EINVAL); |
681 | smp_call_function_single(cpu, iucv_retrieve_cpu, NULL, 1); | 682 | smp_call_function_single(cpu, iucv_retrieve_cpu, NULL, 1); |
682 | if (cpus_empty(iucv_irq_cpumask)) | 683 | if (cpus_empty(iucv_irq_cpumask)) |
683 | smp_call_function_single(first_cpu(iucv_buffer_cpumask), | 684 | smp_call_function_single(first_cpu(iucv_buffer_cpumask), |
diff --git a/net/mac80211/agg-tx.c b/net/mac80211/agg-tx.c index c163d0a149f4..98258b7341e3 100644 --- a/net/mac80211/agg-tx.c +++ b/net/mac80211/agg-tx.c | |||
@@ -332,14 +332,16 @@ int ieee80211_start_tx_ba_session(struct ieee80211_sta *pubsta, u16 tid) | |||
332 | IEEE80211_QUEUE_STOP_REASON_AGGREGATION); | 332 | IEEE80211_QUEUE_STOP_REASON_AGGREGATION); |
333 | 333 | ||
334 | spin_unlock(&local->ampdu_lock); | 334 | spin_unlock(&local->ampdu_lock); |
335 | spin_unlock_bh(&sta->lock); | ||
336 | 335 | ||
337 | /* send an addBA request */ | 336 | /* prepare tid data */ |
338 | sta->ampdu_mlme.dialog_token_allocator++; | 337 | sta->ampdu_mlme.dialog_token_allocator++; |
339 | sta->ampdu_mlme.tid_tx[tid]->dialog_token = | 338 | sta->ampdu_mlme.tid_tx[tid]->dialog_token = |
340 | sta->ampdu_mlme.dialog_token_allocator; | 339 | sta->ampdu_mlme.dialog_token_allocator; |
341 | sta->ampdu_mlme.tid_tx[tid]->ssn = start_seq_num; | 340 | sta->ampdu_mlme.tid_tx[tid]->ssn = start_seq_num; |
342 | 341 | ||
342 | spin_unlock_bh(&sta->lock); | ||
343 | |||
344 | /* send AddBA request */ | ||
343 | ieee80211_send_addba_request(sdata, pubsta->addr, tid, | 345 | ieee80211_send_addba_request(sdata, pubsta->addr, tid, |
344 | sta->ampdu_mlme.tid_tx[tid]->dialog_token, | 346 | sta->ampdu_mlme.tid_tx[tid]->dialog_token, |
345 | sta->ampdu_mlme.tid_tx[tid]->ssn, | 347 | sta->ampdu_mlme.tid_tx[tid]->ssn, |
diff --git a/net/mac80211/chan.c b/net/mac80211/chan.c index 5d218c530a4e..32be11e4c4d9 100644 --- a/net/mac80211/chan.c +++ b/net/mac80211/chan.c | |||
@@ -5,7 +5,7 @@ | |||
5 | #include <linux/nl80211.h> | 5 | #include <linux/nl80211.h> |
6 | #include "ieee80211_i.h" | 6 | #include "ieee80211_i.h" |
7 | 7 | ||
8 | enum ieee80211_chan_mode | 8 | static enum ieee80211_chan_mode |
9 | __ieee80211_get_channel_mode(struct ieee80211_local *local, | 9 | __ieee80211_get_channel_mode(struct ieee80211_local *local, |
10 | struct ieee80211_sub_if_data *ignore) | 10 | struct ieee80211_sub_if_data *ignore) |
11 | { | 11 | { |
diff --git a/net/mac80211/driver-ops.h b/net/mac80211/driver-ops.h index 4f2271316650..9c1da0809160 100644 --- a/net/mac80211/driver-ops.h +++ b/net/mac80211/driver-ops.h | |||
@@ -349,7 +349,7 @@ static inline int drv_get_survey(struct ieee80211_local *local, int idx, | |||
349 | struct survey_info *survey) | 349 | struct survey_info *survey) |
350 | { | 350 | { |
351 | int ret = -EOPNOTSUPP; | 351 | int ret = -EOPNOTSUPP; |
352 | if (local->ops->conf_tx) | 352 | if (local->ops->get_survey) |
353 | ret = local->ops->get_survey(&local->hw, idx, survey); | 353 | ret = local->ops->get_survey(&local->hw, idx, survey); |
354 | /* trace_drv_get_survey(local, idx, survey, ret); */ | 354 | /* trace_drv_get_survey(local, idx, survey, ret); */ |
355 | return ret; | 355 | return ret; |
diff --git a/net/mac80211/key.c b/net/mac80211/key.c index 8d4b41787dcf..e8f6e3b252d8 100644 --- a/net/mac80211/key.c +++ b/net/mac80211/key.c | |||
@@ -140,7 +140,6 @@ static void ieee80211_key_enable_hw_accel(struct ieee80211_key *key) | |||
140 | struct ieee80211_sub_if_data, | 140 | struct ieee80211_sub_if_data, |
141 | u.ap); | 141 | u.ap); |
142 | 142 | ||
143 | key->conf.ap_addr = sdata->dev->dev_addr; | ||
144 | ret = drv_set_key(key->local, SET_KEY, sdata, sta, &key->conf); | 143 | ret = drv_set_key(key->local, SET_KEY, sdata, sta, &key->conf); |
145 | 144 | ||
146 | if (!ret) { | 145 | if (!ret) { |
diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c index 0839c4e8fd2e..f803f8b72a93 100644 --- a/net/mac80211/mlme.c +++ b/net/mac80211/mlme.c | |||
@@ -1692,14 +1692,52 @@ static void ieee80211_sta_rx_queued_mgmt(struct ieee80211_sub_if_data *sdata, | |||
1692 | rma = ieee80211_rx_mgmt_disassoc(sdata, mgmt, skb->len); | 1692 | rma = ieee80211_rx_mgmt_disassoc(sdata, mgmt, skb->len); |
1693 | break; | 1693 | break; |
1694 | case IEEE80211_STYPE_ACTION: | 1694 | case IEEE80211_STYPE_ACTION: |
1695 | if (mgmt->u.action.category != WLAN_CATEGORY_SPECTRUM_MGMT) | 1695 | switch (mgmt->u.action.category) { |
1696 | case WLAN_CATEGORY_BACK: { | ||
1697 | struct ieee80211_local *local = sdata->local; | ||
1698 | int len = skb->len; | ||
1699 | struct sta_info *sta; | ||
1700 | |||
1701 | rcu_read_lock(); | ||
1702 | sta = sta_info_get(sdata, mgmt->sa); | ||
1703 | if (!sta) { | ||
1704 | rcu_read_unlock(); | ||
1705 | break; | ||
1706 | } | ||
1707 | |||
1708 | local_bh_disable(); | ||
1709 | |||
1710 | switch (mgmt->u.action.u.addba_req.action_code) { | ||
1711 | case WLAN_ACTION_ADDBA_REQ: | ||
1712 | if (len < (IEEE80211_MIN_ACTION_SIZE + | ||
1713 | sizeof(mgmt->u.action.u.addba_req))) | ||
1714 | break; | ||
1715 | ieee80211_process_addba_request(local, sta, mgmt, len); | ||
1716 | break; | ||
1717 | case WLAN_ACTION_ADDBA_RESP: | ||
1718 | if (len < (IEEE80211_MIN_ACTION_SIZE + | ||
1719 | sizeof(mgmt->u.action.u.addba_resp))) | ||
1720 | break; | ||
1721 | ieee80211_process_addba_resp(local, sta, mgmt, len); | ||
1722 | break; | ||
1723 | case WLAN_ACTION_DELBA: | ||
1724 | if (len < (IEEE80211_MIN_ACTION_SIZE + | ||
1725 | sizeof(mgmt->u.action.u.delba))) | ||
1726 | break; | ||
1727 | ieee80211_process_delba(sdata, sta, mgmt, len); | ||
1728 | break; | ||
1729 | } | ||
1730 | local_bh_enable(); | ||
1731 | rcu_read_unlock(); | ||
1696 | break; | 1732 | break; |
1697 | 1733 | } | |
1698 | ieee80211_sta_process_chanswitch(sdata, | 1734 | case WLAN_CATEGORY_SPECTRUM_MGMT: |
1699 | &mgmt->u.action.u.chan_switch.sw_elem, | 1735 | ieee80211_sta_process_chanswitch(sdata, |
1700 | (void *)ifmgd->associated->priv, | 1736 | &mgmt->u.action.u.chan_switch.sw_elem, |
1701 | rx_status->mactime); | 1737 | (void *)ifmgd->associated->priv, |
1702 | break; | 1738 | rx_status->mactime); |
1739 | break; | ||
1740 | } | ||
1703 | } | 1741 | } |
1704 | mutex_unlock(&ifmgd->mtx); | 1742 | mutex_unlock(&ifmgd->mtx); |
1705 | 1743 | ||
@@ -1722,9 +1760,45 @@ static void ieee80211_sta_rx_queued_mgmt(struct ieee80211_sub_if_data *sdata, | |||
1722 | mutex_unlock(&ifmgd->mtx); | 1760 | mutex_unlock(&ifmgd->mtx); |
1723 | 1761 | ||
1724 | if (skb->len >= 24 + 2 /* mgmt + deauth reason */ && | 1762 | if (skb->len >= 24 + 2 /* mgmt + deauth reason */ && |
1725 | (fc & IEEE80211_FCTL_STYPE) == IEEE80211_STYPE_DEAUTH) | 1763 | (fc & IEEE80211_FCTL_STYPE) == IEEE80211_STYPE_DEAUTH) { |
1726 | cfg80211_send_deauth(sdata->dev, (u8 *)mgmt, skb->len); | 1764 | struct ieee80211_local *local = sdata->local; |
1765 | struct ieee80211_work *wk; | ||
1766 | |||
1767 | mutex_lock(&local->work_mtx); | ||
1768 | list_for_each_entry(wk, &local->work_list, list) { | ||
1769 | if (wk->sdata != sdata) | ||
1770 | continue; | ||
1771 | |||
1772 | if (wk->type != IEEE80211_WORK_ASSOC) | ||
1773 | continue; | ||
1774 | |||
1775 | if (memcmp(mgmt->bssid, wk->filter_ta, ETH_ALEN)) | ||
1776 | continue; | ||
1777 | if (memcmp(mgmt->sa, wk->filter_ta, ETH_ALEN)) | ||
1778 | continue; | ||
1727 | 1779 | ||
1780 | /* | ||
1781 | * Printing the message only here means we can't | ||
1782 | * spuriously print it, but it also means that it | ||
1783 | * won't be printed when the frame comes in before | ||
1784 | * we even tried to associate or in similar cases. | ||
1785 | * | ||
1786 | * Ultimately, I suspect cfg80211 should print the | ||
1787 | * messages instead. | ||
1788 | */ | ||
1789 | printk(KERN_DEBUG | ||
1790 | "%s: deauthenticated from %pM (Reason: %u)\n", | ||
1791 | sdata->name, mgmt->bssid, | ||
1792 | le16_to_cpu(mgmt->u.deauth.reason_code)); | ||
1793 | |||
1794 | list_del_rcu(&wk->list); | ||
1795 | free_work(wk); | ||
1796 | break; | ||
1797 | } | ||
1798 | mutex_unlock(&local->work_mtx); | ||
1799 | |||
1800 | cfg80211_send_deauth(sdata->dev, (u8 *)mgmt, skb->len); | ||
1801 | } | ||
1728 | out: | 1802 | out: |
1729 | kfree_skb(skb); | 1803 | kfree_skb(skb); |
1730 | } | 1804 | } |
diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c index 6e2a7bcd8cb8..be9abc2e6348 100644 --- a/net/mac80211/rx.c +++ b/net/mac80211/rx.c | |||
@@ -1818,17 +1818,26 @@ ieee80211_rx_h_ctrl(struct ieee80211_rx_data *rx, struct sk_buff_head *frames) | |||
1818 | return RX_CONTINUE; | 1818 | return RX_CONTINUE; |
1819 | 1819 | ||
1820 | if (ieee80211_is_back_req(bar->frame_control)) { | 1820 | if (ieee80211_is_back_req(bar->frame_control)) { |
1821 | struct { | ||
1822 | __le16 control, start_seq_num; | ||
1823 | } __packed bar_data; | ||
1824 | |||
1821 | if (!rx->sta) | 1825 | if (!rx->sta) |
1822 | return RX_DROP_MONITOR; | 1826 | return RX_DROP_MONITOR; |
1827 | |||
1828 | if (skb_copy_bits(skb, offsetof(struct ieee80211_bar, control), | ||
1829 | &bar_data, sizeof(bar_data))) | ||
1830 | return RX_DROP_MONITOR; | ||
1831 | |||
1823 | spin_lock(&rx->sta->lock); | 1832 | spin_lock(&rx->sta->lock); |
1824 | tid = le16_to_cpu(bar->control) >> 12; | 1833 | tid = le16_to_cpu(bar_data.control) >> 12; |
1825 | if (!rx->sta->ampdu_mlme.tid_active_rx[tid]) { | 1834 | if (!rx->sta->ampdu_mlme.tid_active_rx[tid]) { |
1826 | spin_unlock(&rx->sta->lock); | 1835 | spin_unlock(&rx->sta->lock); |
1827 | return RX_DROP_MONITOR; | 1836 | return RX_DROP_MONITOR; |
1828 | } | 1837 | } |
1829 | tid_agg_rx = rx->sta->ampdu_mlme.tid_rx[tid]; | 1838 | tid_agg_rx = rx->sta->ampdu_mlme.tid_rx[tid]; |
1830 | 1839 | ||
1831 | start_seq_num = le16_to_cpu(bar->start_seq_num) >> 4; | 1840 | start_seq_num = le16_to_cpu(bar_data.start_seq_num) >> 4; |
1832 | 1841 | ||
1833 | /* reset session timer */ | 1842 | /* reset session timer */ |
1834 | if (tid_agg_rx->timeout) | 1843 | if (tid_agg_rx->timeout) |
@@ -1935,6 +1944,9 @@ ieee80211_rx_h_action(struct ieee80211_rx_data *rx) | |||
1935 | if (len < IEEE80211_MIN_ACTION_SIZE + 1) | 1944 | if (len < IEEE80211_MIN_ACTION_SIZE + 1) |
1936 | break; | 1945 | break; |
1937 | 1946 | ||
1947 | if (sdata->vif.type == NL80211_IFTYPE_STATION) | ||
1948 | return ieee80211_sta_rx_mgmt(sdata, rx->skb); | ||
1949 | |||
1938 | switch (mgmt->u.action.u.addba_req.action_code) { | 1950 | switch (mgmt->u.action.u.addba_req.action_code) { |
1939 | case WLAN_ACTION_ADDBA_REQ: | 1951 | case WLAN_ACTION_ADDBA_REQ: |
1940 | if (len < (IEEE80211_MIN_ACTION_SIZE + | 1952 | if (len < (IEEE80211_MIN_ACTION_SIZE + |
diff --git a/net/mac80211/sta_info.c b/net/mac80211/sta_info.c index 730197591ab5..ba9360a475b0 100644 --- a/net/mac80211/sta_info.c +++ b/net/mac80211/sta_info.c | |||
@@ -259,7 +259,7 @@ struct sta_info *sta_info_alloc(struct ieee80211_sub_if_data *sdata, | |||
259 | skb_queue_head_init(&sta->tx_filtered); | 259 | skb_queue_head_init(&sta->tx_filtered); |
260 | 260 | ||
261 | for (i = 0; i < NUM_RX_DATA_QUEUES; i++) | 261 | for (i = 0; i < NUM_RX_DATA_QUEUES; i++) |
262 | sta->last_seq_ctrl[i] = cpu_to_le16(USHORT_MAX); | 262 | sta->last_seq_ctrl[i] = cpu_to_le16(USHRT_MAX); |
263 | 263 | ||
264 | #ifdef CONFIG_MAC80211_VERBOSE_DEBUG | 264 | #ifdef CONFIG_MAC80211_VERBOSE_DEBUG |
265 | printk(KERN_DEBUG "%s: Allocated STA %pM\n", | 265 | printk(KERN_DEBUG "%s: Allocated STA %pM\n", |
diff --git a/net/mac80211/sta_info.h b/net/mac80211/sta_info.h index 48a5e80957f0..df9d45544ca5 100644 --- a/net/mac80211/sta_info.h +++ b/net/mac80211/sta_info.h | |||
@@ -145,7 +145,7 @@ enum plink_state { | |||
145 | /** | 145 | /** |
146 | * struct sta_ampdu_mlme - STA aggregation information. | 146 | * struct sta_ampdu_mlme - STA aggregation information. |
147 | * | 147 | * |
148 | * @tid_state_rx: TID's state in Rx session state machine. | 148 | * @tid_active_rx: TID's state in Rx session state machine. |
149 | * @tid_rx: aggregation info for Rx per TID | 149 | * @tid_rx: aggregation info for Rx per TID |
150 | * @tid_state_tx: TID's state in Tx session state machine. | 150 | * @tid_state_tx: TID's state in Tx session state machine. |
151 | * @tid_tx: aggregation info for Tx per TID | 151 | * @tid_tx: aggregation info for Tx per TID |
diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c index b83c530c5e0a..eeeb8bc73982 100644 --- a/net/netfilter/nf_conntrack_core.c +++ b/net/netfilter/nf_conntrack_core.c | |||
@@ -424,6 +424,16 @@ __nf_conntrack_confirm(struct sk_buff *skb) | |||
424 | 424 | ||
425 | spin_lock_bh(&nf_conntrack_lock); | 425 | spin_lock_bh(&nf_conntrack_lock); |
426 | 426 | ||
427 | /* We have to check the DYING flag inside the lock to prevent | ||
428 | a race against nf_ct_get_next_corpse() possibly called from | ||
429 | user context, else we insert an already 'dead' hash, blocking | ||
430 | further use of that particular connection -JM */ | ||
431 | |||
432 | if (unlikely(nf_ct_is_dying(ct))) { | ||
433 | spin_unlock_bh(&nf_conntrack_lock); | ||
434 | return NF_ACCEPT; | ||
435 | } | ||
436 | |||
427 | /* See if there's one in the list already, including reverse: | 437 | /* See if there's one in the list already, including reverse: |
428 | NAT could have grabbed it without realizing, since we're | 438 | NAT could have grabbed it without realizing, since we're |
429 | not in the hash. If there is, we lost race. */ | 439 | not in the hash. If there is, we lost race. */ |
diff --git a/net/netfilter/nf_conntrack_sip.c b/net/netfilter/nf_conntrack_sip.c index b20f4275893c..53d892210a04 100644 --- a/net/netfilter/nf_conntrack_sip.c +++ b/net/netfilter/nf_conntrack_sip.c | |||
@@ -1393,10 +1393,8 @@ static int sip_help_tcp(struct sk_buff *skb, unsigned int protoff, | |||
1393 | 1393 | ||
1394 | nf_ct_refresh(ct, skb, sip_timeout * HZ); | 1394 | nf_ct_refresh(ct, skb, sip_timeout * HZ); |
1395 | 1395 | ||
1396 | if (skb_is_nonlinear(skb)) { | 1396 | if (unlikely(skb_linearize(skb))) |
1397 | pr_debug("Copy of skbuff not supported yet.\n"); | 1397 | return NF_DROP; |
1398 | return NF_ACCEPT; | ||
1399 | } | ||
1400 | 1398 | ||
1401 | dptr = skb->data + dataoff; | 1399 | dptr = skb->data + dataoff; |
1402 | datalen = skb->len - dataoff; | 1400 | datalen = skb->len - dataoff; |
@@ -1455,10 +1453,8 @@ static int sip_help_udp(struct sk_buff *skb, unsigned int protoff, | |||
1455 | 1453 | ||
1456 | nf_ct_refresh(ct, skb, sip_timeout * HZ); | 1454 | nf_ct_refresh(ct, skb, sip_timeout * HZ); |
1457 | 1455 | ||
1458 | if (skb_is_nonlinear(skb)) { | 1456 | if (unlikely(skb_linearize(skb))) |
1459 | pr_debug("Copy of skbuff not supported yet.\n"); | 1457 | return NF_DROP; |
1460 | return NF_ACCEPT; | ||
1461 | } | ||
1462 | 1458 | ||
1463 | dptr = skb->data + dataoff; | 1459 | dptr = skb->data + dataoff; |
1464 | datalen = skb->len - dataoff; | 1460 | datalen = skb->len - dataoff; |
diff --git a/net/netfilter/x_tables.c b/net/netfilter/x_tables.c index 445de702b8b7..e34622fa0003 100644 --- a/net/netfilter/x_tables.c +++ b/net/netfilter/x_tables.c | |||
@@ -699,10 +699,8 @@ void xt_free_table_info(struct xt_table_info *info) | |||
699 | vfree(info->jumpstack); | 699 | vfree(info->jumpstack); |
700 | else | 700 | else |
701 | kfree(info->jumpstack); | 701 | kfree(info->jumpstack); |
702 | if (sizeof(unsigned int) * nr_cpu_ids > PAGE_SIZE) | 702 | |
703 | vfree(info->stackptr); | 703 | free_percpu(info->stackptr); |
704 | else | ||
705 | kfree(info->stackptr); | ||
706 | 704 | ||
707 | kfree(info); | 705 | kfree(info); |
708 | } | 706 | } |
@@ -753,14 +751,9 @@ static int xt_jumpstack_alloc(struct xt_table_info *i) | |||
753 | unsigned int size; | 751 | unsigned int size; |
754 | int cpu; | 752 | int cpu; |
755 | 753 | ||
756 | size = sizeof(unsigned int) * nr_cpu_ids; | 754 | i->stackptr = alloc_percpu(unsigned int); |
757 | if (size > PAGE_SIZE) | ||
758 | i->stackptr = vmalloc(size); | ||
759 | else | ||
760 | i->stackptr = kmalloc(size, GFP_KERNEL); | ||
761 | if (i->stackptr == NULL) | 755 | if (i->stackptr == NULL) |
762 | return -ENOMEM; | 756 | return -ENOMEM; |
763 | memset(i->stackptr, 0, size); | ||
764 | 757 | ||
765 | size = sizeof(void **) * nr_cpu_ids; | 758 | size = sizeof(void **) * nr_cpu_ids; |
766 | if (size > PAGE_SIZE) | 759 | if (size > PAGE_SIZE) |
@@ -844,10 +837,6 @@ struct xt_table *xt_register_table(struct net *net, | |||
844 | struct xt_table_info *private; | 837 | struct xt_table_info *private; |
845 | struct xt_table *t, *table; | 838 | struct xt_table *t, *table; |
846 | 839 | ||
847 | ret = xt_jumpstack_alloc(newinfo); | ||
848 | if (ret < 0) | ||
849 | return ERR_PTR(ret); | ||
850 | |||
851 | /* Don't add one object to multiple lists. */ | 840 | /* Don't add one object to multiple lists. */ |
852 | table = kmemdup(input_table, sizeof(struct xt_table), GFP_KERNEL); | 841 | table = kmemdup(input_table, sizeof(struct xt_table), GFP_KERNEL); |
853 | if (!table) { | 842 | if (!table) { |
diff --git a/net/netfilter/xt_TEE.c b/net/netfilter/xt_TEE.c index d7920d9f49e9..859d9fd429c8 100644 --- a/net/netfilter/xt_TEE.c +++ b/net/netfilter/xt_TEE.c | |||
@@ -76,7 +76,7 @@ tee_tg_route4(struct sk_buff *skb, const struct xt_tee_tginfo *info) | |||
76 | if (ip_route_output_key(net, &rt, &fl) != 0) | 76 | if (ip_route_output_key(net, &rt, &fl) != 0) |
77 | return false; | 77 | return false; |
78 | 78 | ||
79 | dst_release(skb_dst(skb)); | 79 | skb_dst_drop(skb); |
80 | skb_dst_set(skb, &rt->u.dst); | 80 | skb_dst_set(skb, &rt->u.dst); |
81 | skb->dev = rt->u.dst.dev; | 81 | skb->dev = rt->u.dst.dev; |
82 | skb->protocol = htons(ETH_P_IP); | 82 | skb->protocol = htons(ETH_P_IP); |
@@ -157,7 +157,7 @@ tee_tg_route6(struct sk_buff *skb, const struct xt_tee_tginfo *info) | |||
157 | if (dst == NULL) | 157 | if (dst == NULL) |
158 | return false; | 158 | return false; |
159 | 159 | ||
160 | dst_release(skb_dst(skb)); | 160 | skb_dst_drop(skb); |
161 | skb_dst_set(skb, dst); | 161 | skb_dst_set(skb, dst); |
162 | skb->dev = dst->dev; | 162 | skb->dev = dst->dev; |
163 | skb->protocol = htons(ETH_P_IPV6); | 163 | skb->protocol = htons(ETH_P_IPV6); |
diff --git a/net/phonet/pep.c b/net/phonet/pep.c index af4d38bc3b22..94d72e85a475 100644 --- a/net/phonet/pep.c +++ b/net/phonet/pep.c | |||
@@ -626,6 +626,7 @@ static void pep_sock_close(struct sock *sk, long timeout) | |||
626 | struct pep_sock *pn = pep_sk(sk); | 626 | struct pep_sock *pn = pep_sk(sk); |
627 | int ifindex = 0; | 627 | int ifindex = 0; |
628 | 628 | ||
629 | sock_hold(sk); /* keep a reference after sk_common_release() */ | ||
629 | sk_common_release(sk); | 630 | sk_common_release(sk); |
630 | 631 | ||
631 | lock_sock(sk); | 632 | lock_sock(sk); |
@@ -644,6 +645,7 @@ static void pep_sock_close(struct sock *sk, long timeout) | |||
644 | 645 | ||
645 | if (ifindex) | 646 | if (ifindex) |
646 | gprs_detach(sk); | 647 | gprs_detach(sk); |
648 | sock_put(sk); | ||
647 | } | 649 | } |
648 | 650 | ||
649 | static int pep_wait_connreq(struct sock *sk, int noblock) | 651 | static int pep_wait_connreq(struct sock *sk, int noblock) |
@@ -1043,12 +1045,12 @@ static void pep_sock_unhash(struct sock *sk) | |||
1043 | lock_sock(sk); | 1045 | lock_sock(sk); |
1044 | if ((1 << sk->sk_state) & ~(TCPF_CLOSE|TCPF_LISTEN)) { | 1046 | if ((1 << sk->sk_state) & ~(TCPF_CLOSE|TCPF_LISTEN)) { |
1045 | skparent = pn->listener; | 1047 | skparent = pn->listener; |
1046 | sk_del_node_init(sk); | ||
1047 | release_sock(sk); | 1048 | release_sock(sk); |
1048 | 1049 | ||
1049 | sk = skparent; | ||
1050 | pn = pep_sk(skparent); | 1050 | pn = pep_sk(skparent); |
1051 | lock_sock(sk); | 1051 | lock_sock(skparent); |
1052 | sk_del_node_init(sk); | ||
1053 | sk = skparent; | ||
1052 | } | 1054 | } |
1053 | /* Unhash a listening sock only when it is closed | 1055 | /* Unhash a listening sock only when it is closed |
1054 | * and all of its active connected pipes are closed. */ | 1056 | * and all of its active connected pipes are closed. */ |
diff --git a/net/rds/ib_cm.c b/net/rds/ib_cm.c index 10ed0d55f759..f68832798db2 100644 --- a/net/rds/ib_cm.c +++ b/net/rds/ib_cm.c | |||
@@ -475,6 +475,7 @@ int rds_ib_cm_handle_connect(struct rdma_cm_id *cm_id, | |||
475 | err = rds_ib_setup_qp(conn); | 475 | err = rds_ib_setup_qp(conn); |
476 | if (err) { | 476 | if (err) { |
477 | rds_ib_conn_error(conn, "rds_ib_setup_qp failed (%d)\n", err); | 477 | rds_ib_conn_error(conn, "rds_ib_setup_qp failed (%d)\n", err); |
478 | mutex_unlock(&conn->c_cm_lock); | ||
478 | goto out; | 479 | goto out; |
479 | } | 480 | } |
480 | 481 | ||
diff --git a/net/rds/iw_cm.c b/net/rds/iw_cm.c index a9d951b4fbae..b5dd6ac39be8 100644 --- a/net/rds/iw_cm.c +++ b/net/rds/iw_cm.c | |||
@@ -452,6 +452,7 @@ int rds_iw_cm_handle_connect(struct rdma_cm_id *cm_id, | |||
452 | err = rds_iw_setup_qp(conn); | 452 | err = rds_iw_setup_qp(conn); |
453 | if (err) { | 453 | if (err) { |
454 | rds_iw_conn_error(conn, "rds_iw_setup_qp failed (%d)\n", err); | 454 | rds_iw_conn_error(conn, "rds_iw_setup_qp failed (%d)\n", err); |
455 | mutex_unlock(&conn->c_cm_lock); | ||
455 | goto out; | 456 | goto out; |
456 | } | 457 | } |
457 | 458 | ||
diff --git a/net/sched/act_nat.c b/net/sched/act_nat.c index d885ba311564..570949417f38 100644 --- a/net/sched/act_nat.c +++ b/net/sched/act_nat.c | |||
@@ -159,6 +159,9 @@ static int tcf_nat(struct sk_buff *skb, struct tc_action *a, | |||
159 | iph->daddr = new_addr; | 159 | iph->daddr = new_addr; |
160 | 160 | ||
161 | csum_replace4(&iph->check, addr, new_addr); | 161 | csum_replace4(&iph->check, addr, new_addr); |
162 | } else if ((iph->frag_off & htons(IP_OFFSET)) || | ||
163 | iph->protocol != IPPROTO_ICMP) { | ||
164 | goto out; | ||
162 | } | 165 | } |
163 | 166 | ||
164 | ihl = iph->ihl * 4; | 167 | ihl = iph->ihl * 4; |
@@ -247,6 +250,7 @@ static int tcf_nat(struct sk_buff *skb, struct tc_action *a, | |||
247 | break; | 250 | break; |
248 | } | 251 | } |
249 | 252 | ||
253 | out: | ||
250 | return action; | 254 | return action; |
251 | 255 | ||
252 | drop: | 256 | drop: |
diff --git a/net/sched/act_pedit.c b/net/sched/act_pedit.c index fdbd0b7bd840..50e3d945e1f4 100644 --- a/net/sched/act_pedit.c +++ b/net/sched/act_pedit.c | |||
@@ -125,7 +125,7 @@ static int tcf_pedit(struct sk_buff *skb, struct tc_action *a, | |||
125 | { | 125 | { |
126 | struct tcf_pedit *p = a->priv; | 126 | struct tcf_pedit *p = a->priv; |
127 | int i, munged = 0; | 127 | int i, munged = 0; |
128 | u8 *pptr; | 128 | unsigned int off; |
129 | 129 | ||
130 | if (!(skb->tc_verd & TC_OK2MUNGE)) { | 130 | if (!(skb->tc_verd & TC_OK2MUNGE)) { |
131 | /* should we set skb->cloned? */ | 131 | /* should we set skb->cloned? */ |
@@ -134,7 +134,7 @@ static int tcf_pedit(struct sk_buff *skb, struct tc_action *a, | |||
134 | } | 134 | } |
135 | } | 135 | } |
136 | 136 | ||
137 | pptr = skb_network_header(skb); | 137 | off = skb_network_offset(skb); |
138 | 138 | ||
139 | spin_lock(&p->tcf_lock); | 139 | spin_lock(&p->tcf_lock); |
140 | 140 | ||
@@ -144,17 +144,17 @@ static int tcf_pedit(struct sk_buff *skb, struct tc_action *a, | |||
144 | struct tc_pedit_key *tkey = p->tcfp_keys; | 144 | struct tc_pedit_key *tkey = p->tcfp_keys; |
145 | 145 | ||
146 | for (i = p->tcfp_nkeys; i > 0; i--, tkey++) { | 146 | for (i = p->tcfp_nkeys; i > 0; i--, tkey++) { |
147 | u32 *ptr; | 147 | u32 *ptr, _data; |
148 | int offset = tkey->off; | 148 | int offset = tkey->off; |
149 | 149 | ||
150 | if (tkey->offmask) { | 150 | if (tkey->offmask) { |
151 | if (skb->len > tkey->at) { | 151 | char *d, _d; |
152 | char *j = pptr + tkey->at; | 152 | |
153 | offset += ((*j & tkey->offmask) >> | 153 | d = skb_header_pointer(skb, off + tkey->at, 1, |
154 | tkey->shift); | 154 | &_d); |
155 | } else { | 155 | if (!d) |
156 | goto bad; | 156 | goto bad; |
157 | } | 157 | offset += (*d & tkey->offmask) >> tkey->shift; |
158 | } | 158 | } |
159 | 159 | ||
160 | if (offset % 4) { | 160 | if (offset % 4) { |
@@ -169,9 +169,13 @@ static int tcf_pedit(struct sk_buff *skb, struct tc_action *a, | |||
169 | goto bad; | 169 | goto bad; |
170 | } | 170 | } |
171 | 171 | ||
172 | ptr = (u32 *)(pptr+offset); | 172 | ptr = skb_header_pointer(skb, off + offset, 4, &_data); |
173 | if (!ptr) | ||
174 | goto bad; | ||
173 | /* just do it, baby */ | 175 | /* just do it, baby */ |
174 | *ptr = ((*ptr & tkey->mask) ^ tkey->val); | 176 | *ptr = ((*ptr & tkey->mask) ^ tkey->val); |
177 | if (ptr == &_data) | ||
178 | skb_store_bits(skb, off + offset, ptr, 4); | ||
175 | munged++; | 179 | munged++; |
176 | } | 180 | } |
177 | 181 | ||
diff --git a/net/sched/cls_cgroup.c b/net/sched/cls_cgroup.c index 221180384fd7..78ef2c5e130b 100644 --- a/net/sched/cls_cgroup.c +++ b/net/sched/cls_cgroup.c | |||
@@ -16,14 +16,11 @@ | |||
16 | #include <linux/errno.h> | 16 | #include <linux/errno.h> |
17 | #include <linux/skbuff.h> | 17 | #include <linux/skbuff.h> |
18 | #include <linux/cgroup.h> | 18 | #include <linux/cgroup.h> |
19 | #include <linux/rcupdate.h> | ||
19 | #include <net/rtnetlink.h> | 20 | #include <net/rtnetlink.h> |
20 | #include <net/pkt_cls.h> | 21 | #include <net/pkt_cls.h> |
21 | 22 | #include <net/sock.h> | |
22 | struct cgroup_cls_state | 23 | #include <net/cls_cgroup.h> |
23 | { | ||
24 | struct cgroup_subsys_state css; | ||
25 | u32 classid; | ||
26 | }; | ||
27 | 24 | ||
28 | static struct cgroup_subsys_state *cgrp_create(struct cgroup_subsys *ss, | 25 | static struct cgroup_subsys_state *cgrp_create(struct cgroup_subsys *ss, |
29 | struct cgroup *cgrp); | 26 | struct cgroup *cgrp); |
@@ -112,6 +109,10 @@ static int cls_cgroup_classify(struct sk_buff *skb, struct tcf_proto *tp, | |||
112 | struct cls_cgroup_head *head = tp->root; | 109 | struct cls_cgroup_head *head = tp->root; |
113 | u32 classid; | 110 | u32 classid; |
114 | 111 | ||
112 | rcu_read_lock(); | ||
113 | classid = task_cls_state(current)->classid; | ||
114 | rcu_read_unlock(); | ||
115 | |||
115 | /* | 116 | /* |
116 | * Due to the nature of the classifier it is required to ignore all | 117 | * Due to the nature of the classifier it is required to ignore all |
117 | * packets originating from softirq context as accessing `current' | 118 | * packets originating from softirq context as accessing `current' |
@@ -122,12 +123,12 @@ static int cls_cgroup_classify(struct sk_buff *skb, struct tcf_proto *tp, | |||
122 | * calls by looking at the number of nested bh disable calls because | 123 | * calls by looking at the number of nested bh disable calls because |
123 | * softirqs always disables bh. | 124 | * softirqs always disables bh. |
124 | */ | 125 | */ |
125 | if (softirq_count() != SOFTIRQ_OFFSET) | 126 | if (softirq_count() != SOFTIRQ_OFFSET) { |
126 | return -1; | 127 | /* If there is an sk_classid we'll use that. */ |
127 | 128 | if (!skb->sk) | |
128 | rcu_read_lock(); | 129 | return -1; |
129 | classid = task_cls_state(current)->classid; | 130 | classid = skb->sk->sk_classid; |
130 | rcu_read_unlock(); | 131 | } |
131 | 132 | ||
132 | if (!classid) | 133 | if (!classid) |
133 | return -1; | 134 | return -1; |
@@ -289,18 +290,35 @@ static struct tcf_proto_ops cls_cgroup_ops __read_mostly = { | |||
289 | 290 | ||
290 | static int __init init_cgroup_cls(void) | 291 | static int __init init_cgroup_cls(void) |
291 | { | 292 | { |
292 | int ret = register_tcf_proto_ops(&cls_cgroup_ops); | 293 | int ret; |
293 | if (ret) | 294 | |
294 | return ret; | ||
295 | ret = cgroup_load_subsys(&net_cls_subsys); | 295 | ret = cgroup_load_subsys(&net_cls_subsys); |
296 | if (ret) | 296 | if (ret) |
297 | unregister_tcf_proto_ops(&cls_cgroup_ops); | 297 | goto out; |
298 | |||
299 | #ifndef CONFIG_NET_CLS_CGROUP | ||
300 | /* We can't use rcu_assign_pointer because this is an int. */ | ||
301 | smp_wmb(); | ||
302 | net_cls_subsys_id = net_cls_subsys.subsys_id; | ||
303 | #endif | ||
304 | |||
305 | ret = register_tcf_proto_ops(&cls_cgroup_ops); | ||
306 | if (ret) | ||
307 | cgroup_unload_subsys(&net_cls_subsys); | ||
308 | |||
309 | out: | ||
298 | return ret; | 310 | return ret; |
299 | } | 311 | } |
300 | 312 | ||
301 | static void __exit exit_cgroup_cls(void) | 313 | static void __exit exit_cgroup_cls(void) |
302 | { | 314 | { |
303 | unregister_tcf_proto_ops(&cls_cgroup_ops); | 315 | unregister_tcf_proto_ops(&cls_cgroup_ops); |
316 | |||
317 | #ifndef CONFIG_NET_CLS_CGROUP | ||
318 | net_cls_subsys_id = -1; | ||
319 | synchronize_rcu(); | ||
320 | #endif | ||
321 | |||
304 | cgroup_unload_subsys(&net_cls_subsys); | 322 | cgroup_unload_subsys(&net_cls_subsys); |
305 | } | 323 | } |
306 | 324 | ||
diff --git a/net/sched/cls_u32.c b/net/sched/cls_u32.c index 96275422c619..4f522143811e 100644 --- a/net/sched/cls_u32.c +++ b/net/sched/cls_u32.c | |||
@@ -98,11 +98,11 @@ static int u32_classify(struct sk_buff *skb, struct tcf_proto *tp, struct tcf_re | |||
98 | { | 98 | { |
99 | struct { | 99 | struct { |
100 | struct tc_u_knode *knode; | 100 | struct tc_u_knode *knode; |
101 | u8 *ptr; | 101 | unsigned int off; |
102 | } stack[TC_U32_MAXDEPTH]; | 102 | } stack[TC_U32_MAXDEPTH]; |
103 | 103 | ||
104 | struct tc_u_hnode *ht = (struct tc_u_hnode*)tp->root; | 104 | struct tc_u_hnode *ht = (struct tc_u_hnode*)tp->root; |
105 | u8 *ptr = skb_network_header(skb); | 105 | unsigned int off = skb_network_offset(skb); |
106 | struct tc_u_knode *n; | 106 | struct tc_u_knode *n; |
107 | int sdepth = 0; | 107 | int sdepth = 0; |
108 | int off2 = 0; | 108 | int off2 = 0; |
@@ -134,8 +134,14 @@ next_knode: | |||
134 | #endif | 134 | #endif |
135 | 135 | ||
136 | for (i = n->sel.nkeys; i>0; i--, key++) { | 136 | for (i = n->sel.nkeys; i>0; i--, key++) { |
137 | 137 | unsigned int toff; | |
138 | if ((*(__be32*)(ptr+key->off+(off2&key->offmask))^key->val)&key->mask) { | 138 | __be32 *data, _data; |
139 | |||
140 | toff = off + key->off + (off2 & key->offmask); | ||
141 | data = skb_header_pointer(skb, toff, 4, &_data); | ||
142 | if (!data) | ||
143 | goto out; | ||
144 | if ((*data ^ key->val) & key->mask) { | ||
139 | n = n->next; | 145 | n = n->next; |
140 | goto next_knode; | 146 | goto next_knode; |
141 | } | 147 | } |
@@ -174,29 +180,45 @@ check_terminal: | |||
174 | if (sdepth >= TC_U32_MAXDEPTH) | 180 | if (sdepth >= TC_U32_MAXDEPTH) |
175 | goto deadloop; | 181 | goto deadloop; |
176 | stack[sdepth].knode = n; | 182 | stack[sdepth].knode = n; |
177 | stack[sdepth].ptr = ptr; | 183 | stack[sdepth].off = off; |
178 | sdepth++; | 184 | sdepth++; |
179 | 185 | ||
180 | ht = n->ht_down; | 186 | ht = n->ht_down; |
181 | sel = 0; | 187 | sel = 0; |
182 | if (ht->divisor) | 188 | if (ht->divisor) { |
183 | sel = ht->divisor&u32_hash_fold(*(__be32*)(ptr+n->sel.hoff), &n->sel,n->fshift); | 189 | __be32 *data, _data; |
184 | 190 | ||
191 | data = skb_header_pointer(skb, off + n->sel.hoff, 4, | ||
192 | &_data); | ||
193 | if (!data) | ||
194 | goto out; | ||
195 | sel = ht->divisor & u32_hash_fold(*data, &n->sel, | ||
196 | n->fshift); | ||
197 | } | ||
185 | if (!(n->sel.flags&(TC_U32_VAROFFSET|TC_U32_OFFSET|TC_U32_EAT))) | 198 | if (!(n->sel.flags&(TC_U32_VAROFFSET|TC_U32_OFFSET|TC_U32_EAT))) |
186 | goto next_ht; | 199 | goto next_ht; |
187 | 200 | ||
188 | if (n->sel.flags&(TC_U32_OFFSET|TC_U32_VAROFFSET)) { | 201 | if (n->sel.flags&(TC_U32_OFFSET|TC_U32_VAROFFSET)) { |
189 | off2 = n->sel.off + 3; | 202 | off2 = n->sel.off + 3; |
190 | if (n->sel.flags&TC_U32_VAROFFSET) | 203 | if (n->sel.flags & TC_U32_VAROFFSET) { |
191 | off2 += ntohs(n->sel.offmask & *(__be16*)(ptr+n->sel.offoff)) >>n->sel.offshift; | 204 | __be16 *data, _data; |
205 | |||
206 | data = skb_header_pointer(skb, | ||
207 | off + n->sel.offoff, | ||
208 | 2, &_data); | ||
209 | if (!data) | ||
210 | goto out; | ||
211 | off2 += ntohs(n->sel.offmask & *data) >> | ||
212 | n->sel.offshift; | ||
213 | } | ||
192 | off2 &= ~3; | 214 | off2 &= ~3; |
193 | } | 215 | } |
194 | if (n->sel.flags&TC_U32_EAT) { | 216 | if (n->sel.flags&TC_U32_EAT) { |
195 | ptr += off2; | 217 | off += off2; |
196 | off2 = 0; | 218 | off2 = 0; |
197 | } | 219 | } |
198 | 220 | ||
199 | if (ptr < skb_tail_pointer(skb)) | 221 | if (off < skb->len) |
200 | goto next_ht; | 222 | goto next_ht; |
201 | } | 223 | } |
202 | 224 | ||
@@ -204,9 +226,10 @@ check_terminal: | |||
204 | if (sdepth--) { | 226 | if (sdepth--) { |
205 | n = stack[sdepth].knode; | 227 | n = stack[sdepth].knode; |
206 | ht = n->ht_up; | 228 | ht = n->ht_up; |
207 | ptr = stack[sdepth].ptr; | 229 | off = stack[sdepth].off; |
208 | goto check_terminal; | 230 | goto check_terminal; |
209 | } | 231 | } |
232 | out: | ||
210 | return -1; | 233 | return -1; |
211 | 234 | ||
212 | deadloop: | 235 | deadloop: |
diff --git a/net/sched/sch_api.c b/net/sched/sch_api.c index fe35c1f338c2..b9e8c3b7d406 100644 --- a/net/sched/sch_api.c +++ b/net/sched/sch_api.c | |||
@@ -1195,6 +1195,11 @@ nla_put_failure: | |||
1195 | return -1; | 1195 | return -1; |
1196 | } | 1196 | } |
1197 | 1197 | ||
1198 | static bool tc_qdisc_dump_ignore(struct Qdisc *q) | ||
1199 | { | ||
1200 | return (q->flags & TCQ_F_BUILTIN) ? true : false; | ||
1201 | } | ||
1202 | |||
1198 | static int qdisc_notify(struct net *net, struct sk_buff *oskb, | 1203 | static int qdisc_notify(struct net *net, struct sk_buff *oskb, |
1199 | struct nlmsghdr *n, u32 clid, | 1204 | struct nlmsghdr *n, u32 clid, |
1200 | struct Qdisc *old, struct Qdisc *new) | 1205 | struct Qdisc *old, struct Qdisc *new) |
@@ -1206,11 +1211,11 @@ static int qdisc_notify(struct net *net, struct sk_buff *oskb, | |||
1206 | if (!skb) | 1211 | if (!skb) |
1207 | return -ENOBUFS; | 1212 | return -ENOBUFS; |
1208 | 1213 | ||
1209 | if (old && old->handle) { | 1214 | if (old && !tc_qdisc_dump_ignore(old)) { |
1210 | if (tc_fill_qdisc(skb, old, clid, pid, n->nlmsg_seq, 0, RTM_DELQDISC) < 0) | 1215 | if (tc_fill_qdisc(skb, old, clid, pid, n->nlmsg_seq, 0, RTM_DELQDISC) < 0) |
1211 | goto err_out; | 1216 | goto err_out; |
1212 | } | 1217 | } |
1213 | if (new) { | 1218 | if (new && !tc_qdisc_dump_ignore(new)) { |
1214 | if (tc_fill_qdisc(skb, new, clid, pid, n->nlmsg_seq, old ? NLM_F_REPLACE : 0, RTM_NEWQDISC) < 0) | 1219 | if (tc_fill_qdisc(skb, new, clid, pid, n->nlmsg_seq, old ? NLM_F_REPLACE : 0, RTM_NEWQDISC) < 0) |
1215 | goto err_out; | 1220 | goto err_out; |
1216 | } | 1221 | } |
@@ -1223,11 +1228,6 @@ err_out: | |||
1223 | return -EINVAL; | 1228 | return -EINVAL; |
1224 | } | 1229 | } |
1225 | 1230 | ||
1226 | static bool tc_qdisc_dump_ignore(struct Qdisc *q) | ||
1227 | { | ||
1228 | return (q->flags & TCQ_F_BUILTIN) ? true : false; | ||
1229 | } | ||
1230 | |||
1231 | static int tc_dump_qdisc_root(struct Qdisc *root, struct sk_buff *skb, | 1231 | static int tc_dump_qdisc_root(struct Qdisc *root, struct sk_buff *skb, |
1232 | struct netlink_callback *cb, | 1232 | struct netlink_callback *cb, |
1233 | int *q_idx_p, int s_q_idx) | 1233 | int *q_idx_p, int s_q_idx) |
diff --git a/net/socket.c b/net/socket.c index f9f7d0872cac..367d5477d00f 100644 --- a/net/socket.c +++ b/net/socket.c | |||
@@ -94,6 +94,7 @@ | |||
94 | 94 | ||
95 | #include <net/compat.h> | 95 | #include <net/compat.h> |
96 | #include <net/wext.h> | 96 | #include <net/wext.h> |
97 | #include <net/cls_cgroup.h> | ||
97 | 98 | ||
98 | #include <net/sock.h> | 99 | #include <net/sock.h> |
99 | #include <linux/netfilter.h> | 100 | #include <linux/netfilter.h> |
@@ -558,6 +559,8 @@ static inline int __sock_sendmsg(struct kiocb *iocb, struct socket *sock, | |||
558 | struct sock_iocb *si = kiocb_to_siocb(iocb); | 559 | struct sock_iocb *si = kiocb_to_siocb(iocb); |
559 | int err; | 560 | int err; |
560 | 561 | ||
562 | sock_update_classid(sock->sk); | ||
563 | |||
561 | si->sock = sock; | 564 | si->sock = sock; |
562 | si->scm = NULL; | 565 | si->scm = NULL; |
563 | si->msg = msg; | 566 | si->msg = msg; |
@@ -684,6 +687,8 @@ static inline int __sock_recvmsg_nosec(struct kiocb *iocb, struct socket *sock, | |||
684 | { | 687 | { |
685 | struct sock_iocb *si = kiocb_to_siocb(iocb); | 688 | struct sock_iocb *si = kiocb_to_siocb(iocb); |
686 | 689 | ||
690 | sock_update_classid(sock->sk); | ||
691 | |||
687 | si->sock = sock; | 692 | si->sock = sock; |
688 | si->scm = NULL; | 693 | si->scm = NULL; |
689 | si->msg = msg; | 694 | si->msg = msg; |
@@ -777,6 +782,8 @@ static ssize_t sock_splice_read(struct file *file, loff_t *ppos, | |||
777 | if (unlikely(!sock->ops->splice_read)) | 782 | if (unlikely(!sock->ops->splice_read)) |
778 | return -EINVAL; | 783 | return -EINVAL; |
779 | 784 | ||
785 | sock_update_classid(sock->sk); | ||
786 | |||
780 | return sock->ops->splice_read(sock, ppos, pipe, len, flags); | 787 | return sock->ops->splice_read(sock, ppos, pipe, len, flags); |
781 | } | 788 | } |
782 | 789 | ||
@@ -3069,6 +3076,8 @@ int kernel_setsockopt(struct socket *sock, int level, int optname, | |||
3069 | int kernel_sendpage(struct socket *sock, struct page *page, int offset, | 3076 | int kernel_sendpage(struct socket *sock, struct page *page, int offset, |
3070 | size_t size, int flags) | 3077 | size_t size, int flags) |
3071 | { | 3078 | { |
3079 | sock_update_classid(sock->sk); | ||
3080 | |||
3072 | if (sock->ops->sendpage) | 3081 | if (sock->ops->sendpage) |
3073 | return sock->ops->sendpage(sock, page, offset, size, flags); | 3082 | return sock->ops->sendpage(sock, page, offset, size, flags); |
3074 | 3083 | ||
diff --git a/net/sunrpc/cache.c b/net/sunrpc/cache.c index c2173ebdb33c..58de76c8540c 100644 --- a/net/sunrpc/cache.c +++ b/net/sunrpc/cache.c | |||
@@ -34,6 +34,7 @@ | |||
34 | #include <linux/sunrpc/cache.h> | 34 | #include <linux/sunrpc/cache.h> |
35 | #include <linux/sunrpc/stats.h> | 35 | #include <linux/sunrpc/stats.h> |
36 | #include <linux/sunrpc/rpc_pipe_fs.h> | 36 | #include <linux/sunrpc/rpc_pipe_fs.h> |
37 | #include <linux/smp_lock.h> | ||
37 | 38 | ||
38 | #define RPCDBG_FACILITY RPCDBG_CACHE | 39 | #define RPCDBG_FACILITY RPCDBG_CACHE |
39 | 40 | ||
@@ -1545,12 +1546,18 @@ static unsigned int cache_poll_pipefs(struct file *filp, poll_table *wait) | |||
1545 | return cache_poll(filp, wait, cd); | 1546 | return cache_poll(filp, wait, cd); |
1546 | } | 1547 | } |
1547 | 1548 | ||
1548 | static int cache_ioctl_pipefs(struct inode *inode, struct file *filp, | 1549 | static long cache_ioctl_pipefs(struct file *filp, |
1549 | unsigned int cmd, unsigned long arg) | 1550 | unsigned int cmd, unsigned long arg) |
1550 | { | 1551 | { |
1552 | struct inode *inode = filp->f_dentry->d_inode; | ||
1551 | struct cache_detail *cd = RPC_I(inode)->private; | 1553 | struct cache_detail *cd = RPC_I(inode)->private; |
1554 | long ret; | ||
1552 | 1555 | ||
1553 | return cache_ioctl(inode, filp, cmd, arg, cd); | 1556 | lock_kernel(); |
1557 | ret = cache_ioctl(inode, filp, cmd, arg, cd); | ||
1558 | unlock_kernel(); | ||
1559 | |||
1560 | return ret; | ||
1554 | } | 1561 | } |
1555 | 1562 | ||
1556 | static int cache_open_pipefs(struct inode *inode, struct file *filp) | 1563 | static int cache_open_pipefs(struct inode *inode, struct file *filp) |
@@ -1573,7 +1580,7 @@ const struct file_operations cache_file_operations_pipefs = { | |||
1573 | .read = cache_read_pipefs, | 1580 | .read = cache_read_pipefs, |
1574 | .write = cache_write_pipefs, | 1581 | .write = cache_write_pipefs, |
1575 | .poll = cache_poll_pipefs, | 1582 | .poll = cache_poll_pipefs, |
1576 | .ioctl = cache_ioctl_pipefs, /* for FIONREAD */ | 1583 | .unlocked_ioctl = cache_ioctl_pipefs, /* for FIONREAD */ |
1577 | .open = cache_open_pipefs, | 1584 | .open = cache_open_pipefs, |
1578 | .release = cache_release_pipefs, | 1585 | .release = cache_release_pipefs, |
1579 | }; | 1586 | }; |
diff --git a/net/sunrpc/rpc_pipe.c b/net/sunrpc/rpc_pipe.c index 20e30c6f8355..95ccbcf45d3e 100644 --- a/net/sunrpc/rpc_pipe.c +++ b/net/sunrpc/rpc_pipe.c | |||
@@ -27,6 +27,7 @@ | |||
27 | #include <linux/workqueue.h> | 27 | #include <linux/workqueue.h> |
28 | #include <linux/sunrpc/rpc_pipe_fs.h> | 28 | #include <linux/sunrpc/rpc_pipe_fs.h> |
29 | #include <linux/sunrpc/cache.h> | 29 | #include <linux/sunrpc/cache.h> |
30 | #include <linux/smp_lock.h> | ||
30 | 31 | ||
31 | static struct vfsmount *rpc_mount __read_mostly; | 32 | static struct vfsmount *rpc_mount __read_mostly; |
32 | static int rpc_mount_count; | 33 | static int rpc_mount_count; |
@@ -309,8 +310,7 @@ rpc_pipe_poll(struct file *filp, struct poll_table_struct *wait) | |||
309 | } | 310 | } |
310 | 311 | ||
311 | static int | 312 | static int |
312 | rpc_pipe_ioctl(struct inode *ino, struct file *filp, | 313 | rpc_pipe_ioctl_unlocked(struct file *filp, unsigned int cmd, unsigned long arg) |
313 | unsigned int cmd, unsigned long arg) | ||
314 | { | 314 | { |
315 | struct rpc_inode *rpci = RPC_I(filp->f_path.dentry->d_inode); | 315 | struct rpc_inode *rpci = RPC_I(filp->f_path.dentry->d_inode); |
316 | int len; | 316 | int len; |
@@ -331,13 +331,25 @@ rpc_pipe_ioctl(struct inode *ino, struct file *filp, | |||
331 | } | 331 | } |
332 | } | 332 | } |
333 | 333 | ||
334 | static long | ||
335 | rpc_pipe_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) | ||
336 | { | ||
337 | long ret; | ||
338 | |||
339 | lock_kernel(); | ||
340 | ret = rpc_pipe_ioctl_unlocked(filp, cmd, arg); | ||
341 | unlock_kernel(); | ||
342 | |||
343 | return ret; | ||
344 | } | ||
345 | |||
334 | static const struct file_operations rpc_pipe_fops = { | 346 | static const struct file_operations rpc_pipe_fops = { |
335 | .owner = THIS_MODULE, | 347 | .owner = THIS_MODULE, |
336 | .llseek = no_llseek, | 348 | .llseek = no_llseek, |
337 | .read = rpc_pipe_read, | 349 | .read = rpc_pipe_read, |
338 | .write = rpc_pipe_write, | 350 | .write = rpc_pipe_write, |
339 | .poll = rpc_pipe_poll, | 351 | .poll = rpc_pipe_poll, |
340 | .ioctl = rpc_pipe_ioctl, | 352 | .unlocked_ioctl = rpc_pipe_ioctl, |
341 | .open = rpc_pipe_open, | 353 | .open = rpc_pipe_open, |
342 | .release = rpc_pipe_release, | 354 | .release = rpc_pipe_release, |
343 | }; | 355 | }; |
diff --git a/net/sunrpc/rpcb_clnt.c b/net/sunrpc/rpcb_clnt.c index 121105355f60..dac219a56ae1 100644 --- a/net/sunrpc/rpcb_clnt.c +++ b/net/sunrpc/rpcb_clnt.c | |||
@@ -783,7 +783,7 @@ static int rpcb_dec_getport(struct rpc_rqst *req, __be32 *p, | |||
783 | port = ntohl(*p); | 783 | port = ntohl(*p); |
784 | dprintk("RPC: %5u PMAP_%s result: %lu\n", task->tk_pid, | 784 | dprintk("RPC: %5u PMAP_%s result: %lu\n", task->tk_pid, |
785 | task->tk_msg.rpc_proc->p_name, port); | 785 | task->tk_msg.rpc_proc->p_name, port); |
786 | if (unlikely(port > USHORT_MAX)) | 786 | if (unlikely(port > USHRT_MAX)) |
787 | return -EIO; | 787 | return -EIO; |
788 | 788 | ||
789 | rpcb->r_port = port; | 789 | rpcb->r_port = port; |
diff --git a/net/sunrpc/xprt.c b/net/sunrpc/xprt.c index 3fc325399ee4..dcd0132396ba 100644 --- a/net/sunrpc/xprt.c +++ b/net/sunrpc/xprt.c | |||
@@ -166,7 +166,6 @@ EXPORT_SYMBOL_GPL(xprt_unregister_transport); | |||
166 | int xprt_load_transport(const char *transport_name) | 166 | int xprt_load_transport(const char *transport_name) |
167 | { | 167 | { |
168 | struct xprt_class *t; | 168 | struct xprt_class *t; |
169 | char module_name[sizeof t->name + 5]; | ||
170 | int result; | 169 | int result; |
171 | 170 | ||
172 | result = 0; | 171 | result = 0; |
@@ -178,9 +177,7 @@ int xprt_load_transport(const char *transport_name) | |||
178 | } | 177 | } |
179 | } | 178 | } |
180 | spin_unlock(&xprt_list_lock); | 179 | spin_unlock(&xprt_list_lock); |
181 | strcpy(module_name, "xprt"); | 180 | result = request_module("xprt%s", transport_name); |
182 | strncat(module_name, transport_name, sizeof t->name); | ||
183 | result = request_module(module_name); | ||
184 | out: | 181 | out: |
185 | return result; | 182 | return result; |
186 | } | 183 | } |
diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c index b7cd8cccbe72..2a9675136c68 100644 --- a/net/sunrpc/xprtsock.c +++ b/net/sunrpc/xprtsock.c | |||
@@ -2293,6 +2293,7 @@ static struct rpc_xprt *xs_setup_udp(struct xprt_create *args) | |||
2293 | struct sockaddr *addr = args->dstaddr; | 2293 | struct sockaddr *addr = args->dstaddr; |
2294 | struct rpc_xprt *xprt; | 2294 | struct rpc_xprt *xprt; |
2295 | struct sock_xprt *transport; | 2295 | struct sock_xprt *transport; |
2296 | struct rpc_xprt *ret; | ||
2296 | 2297 | ||
2297 | xprt = xs_setup_xprt(args, xprt_udp_slot_table_entries); | 2298 | xprt = xs_setup_xprt(args, xprt_udp_slot_table_entries); |
2298 | if (IS_ERR(xprt)) | 2299 | if (IS_ERR(xprt)) |
@@ -2330,8 +2331,8 @@ static struct rpc_xprt *xs_setup_udp(struct xprt_create *args) | |||
2330 | xs_format_peer_addresses(xprt, "udp", RPCBIND_NETID_UDP6); | 2331 | xs_format_peer_addresses(xprt, "udp", RPCBIND_NETID_UDP6); |
2331 | break; | 2332 | break; |
2332 | default: | 2333 | default: |
2333 | kfree(xprt); | 2334 | ret = ERR_PTR(-EAFNOSUPPORT); |
2334 | return ERR_PTR(-EAFNOSUPPORT); | 2335 | goto out_err; |
2335 | } | 2336 | } |
2336 | 2337 | ||
2337 | if (xprt_bound(xprt)) | 2338 | if (xprt_bound(xprt)) |
@@ -2346,10 +2347,11 @@ static struct rpc_xprt *xs_setup_udp(struct xprt_create *args) | |||
2346 | 2347 | ||
2347 | if (try_module_get(THIS_MODULE)) | 2348 | if (try_module_get(THIS_MODULE)) |
2348 | return xprt; | 2349 | return xprt; |
2349 | 2350 | ret = ERR_PTR(-EINVAL); | |
2351 | out_err: | ||
2350 | kfree(xprt->slot); | 2352 | kfree(xprt->slot); |
2351 | kfree(xprt); | 2353 | kfree(xprt); |
2352 | return ERR_PTR(-EINVAL); | 2354 | return ret; |
2353 | } | 2355 | } |
2354 | 2356 | ||
2355 | static const struct rpc_timeout xs_tcp_default_timeout = { | 2357 | static const struct rpc_timeout xs_tcp_default_timeout = { |
@@ -2368,6 +2370,7 @@ static struct rpc_xprt *xs_setup_tcp(struct xprt_create *args) | |||
2368 | struct sockaddr *addr = args->dstaddr; | 2370 | struct sockaddr *addr = args->dstaddr; |
2369 | struct rpc_xprt *xprt; | 2371 | struct rpc_xprt *xprt; |
2370 | struct sock_xprt *transport; | 2372 | struct sock_xprt *transport; |
2373 | struct rpc_xprt *ret; | ||
2371 | 2374 | ||
2372 | xprt = xs_setup_xprt(args, xprt_tcp_slot_table_entries); | 2375 | xprt = xs_setup_xprt(args, xprt_tcp_slot_table_entries); |
2373 | if (IS_ERR(xprt)) | 2376 | if (IS_ERR(xprt)) |
@@ -2403,8 +2406,8 @@ static struct rpc_xprt *xs_setup_tcp(struct xprt_create *args) | |||
2403 | xs_format_peer_addresses(xprt, "tcp", RPCBIND_NETID_TCP6); | 2406 | xs_format_peer_addresses(xprt, "tcp", RPCBIND_NETID_TCP6); |
2404 | break; | 2407 | break; |
2405 | default: | 2408 | default: |
2406 | kfree(xprt); | 2409 | ret = ERR_PTR(-EAFNOSUPPORT); |
2407 | return ERR_PTR(-EAFNOSUPPORT); | 2410 | goto out_err; |
2408 | } | 2411 | } |
2409 | 2412 | ||
2410 | if (xprt_bound(xprt)) | 2413 | if (xprt_bound(xprt)) |
@@ -2420,10 +2423,11 @@ static struct rpc_xprt *xs_setup_tcp(struct xprt_create *args) | |||
2420 | 2423 | ||
2421 | if (try_module_get(THIS_MODULE)) | 2424 | if (try_module_get(THIS_MODULE)) |
2422 | return xprt; | 2425 | return xprt; |
2423 | 2426 | ret = ERR_PTR(-EINVAL); | |
2427 | out_err: | ||
2424 | kfree(xprt->slot); | 2428 | kfree(xprt->slot); |
2425 | kfree(xprt); | 2429 | kfree(xprt); |
2426 | return ERR_PTR(-EINVAL); | 2430 | return ret; |
2427 | } | 2431 | } |
2428 | 2432 | ||
2429 | /** | 2433 | /** |
@@ -2437,6 +2441,7 @@ static struct rpc_xprt *xs_setup_bc_tcp(struct xprt_create *args) | |||
2437 | struct rpc_xprt *xprt; | 2441 | struct rpc_xprt *xprt; |
2438 | struct sock_xprt *transport; | 2442 | struct sock_xprt *transport; |
2439 | struct svc_sock *bc_sock; | 2443 | struct svc_sock *bc_sock; |
2444 | struct rpc_xprt *ret; | ||
2440 | 2445 | ||
2441 | xprt = xs_setup_xprt(args, xprt_tcp_slot_table_entries); | 2446 | xprt = xs_setup_xprt(args, xprt_tcp_slot_table_entries); |
2442 | if (IS_ERR(xprt)) | 2447 | if (IS_ERR(xprt)) |
@@ -2476,8 +2481,8 @@ static struct rpc_xprt *xs_setup_bc_tcp(struct xprt_create *args) | |||
2476 | RPCBIND_NETID_TCP6); | 2481 | RPCBIND_NETID_TCP6); |
2477 | break; | 2482 | break; |
2478 | default: | 2483 | default: |
2479 | kfree(xprt); | 2484 | ret = ERR_PTR(-EAFNOSUPPORT); |
2480 | return ERR_PTR(-EAFNOSUPPORT); | 2485 | goto out_err; |
2481 | } | 2486 | } |
2482 | 2487 | ||
2483 | if (xprt_bound(xprt)) | 2488 | if (xprt_bound(xprt)) |
@@ -2499,9 +2504,11 @@ static struct rpc_xprt *xs_setup_bc_tcp(struct xprt_create *args) | |||
2499 | 2504 | ||
2500 | if (try_module_get(THIS_MODULE)) | 2505 | if (try_module_get(THIS_MODULE)) |
2501 | return xprt; | 2506 | return xprt; |
2507 | ret = ERR_PTR(-EINVAL); | ||
2508 | out_err: | ||
2502 | kfree(xprt->slot); | 2509 | kfree(xprt->slot); |
2503 | kfree(xprt); | 2510 | kfree(xprt); |
2504 | return ERR_PTR(-EINVAL); | 2511 | return ret; |
2505 | } | 2512 | } |
2506 | 2513 | ||
2507 | static struct xprt_class xs_udp_transport = { | 2514 | static struct xprt_class xs_udp_transport = { |
diff --git a/net/wireless/chan.c b/net/wireless/chan.c index d92d088026bf..b01a6f6397d7 100644 --- a/net/wireless/chan.c +++ b/net/wireless/chan.c | |||
@@ -50,7 +50,7 @@ int cfg80211_set_freq(struct cfg80211_registered_device *rdev, | |||
50 | struct ieee80211_channel *chan; | 50 | struct ieee80211_channel *chan; |
51 | int result; | 51 | int result; |
52 | 52 | ||
53 | if (wdev->iftype == NL80211_IFTYPE_MONITOR) | 53 | if (wdev && wdev->iftype == NL80211_IFTYPE_MONITOR) |
54 | wdev = NULL; | 54 | wdev = NULL; |
55 | 55 | ||
56 | if (wdev) { | 56 | if (wdev) { |
diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c index aaa1aad566cd..db71150b8040 100644 --- a/net/wireless/nl80211.c +++ b/net/wireless/nl80211.c | |||
@@ -4443,9 +4443,10 @@ static int nl80211_remain_on_channel(struct sk_buff *skb, | |||
4443 | if (channel_type != NL80211_CHAN_NO_HT && | 4443 | if (channel_type != NL80211_CHAN_NO_HT && |
4444 | channel_type != NL80211_CHAN_HT20 && | 4444 | channel_type != NL80211_CHAN_HT20 && |
4445 | channel_type != NL80211_CHAN_HT40PLUS && | 4445 | channel_type != NL80211_CHAN_HT40PLUS && |
4446 | channel_type != NL80211_CHAN_HT40MINUS) | 4446 | channel_type != NL80211_CHAN_HT40MINUS) { |
4447 | err = -EINVAL; | 4447 | err = -EINVAL; |
4448 | goto out; | 4448 | goto out; |
4449 | } | ||
4449 | } | 4450 | } |
4450 | 4451 | ||
4451 | freq = nla_get_u32(info->attrs[NL80211_ATTR_WIPHY_FREQ]); | 4452 | freq = nla_get_u32(info->attrs[NL80211_ATTR_WIPHY_FREQ]); |
@@ -4717,9 +4718,10 @@ static int nl80211_action(struct sk_buff *skb, struct genl_info *info) | |||
4717 | if (channel_type != NL80211_CHAN_NO_HT && | 4718 | if (channel_type != NL80211_CHAN_NO_HT && |
4718 | channel_type != NL80211_CHAN_HT20 && | 4719 | channel_type != NL80211_CHAN_HT20 && |
4719 | channel_type != NL80211_CHAN_HT40PLUS && | 4720 | channel_type != NL80211_CHAN_HT40PLUS && |
4720 | channel_type != NL80211_CHAN_HT40MINUS) | 4721 | channel_type != NL80211_CHAN_HT40MINUS) { |
4721 | err = -EINVAL; | 4722 | err = -EINVAL; |
4722 | goto out; | 4723 | goto out; |
4724 | } | ||
4723 | } | 4725 | } |
4724 | 4726 | ||
4725 | freq = nla_get_u32(info->attrs[NL80211_ATTR_WIPHY_FREQ]); | 4727 | freq = nla_get_u32(info->attrs[NL80211_ATTR_WIPHY_FREQ]); |
diff --git a/net/wireless/scan.c b/net/wireless/scan.c index a026c6d56bd3..58401d246bda 100644 --- a/net/wireless/scan.c +++ b/net/wireless/scan.c | |||
@@ -515,7 +515,7 @@ cfg80211_inform_bss(struct wiphy *wiphy, | |||
515 | 515 | ||
516 | privsz = wiphy->bss_priv_size; | 516 | privsz = wiphy->bss_priv_size; |
517 | 517 | ||
518 | if (WARN_ON(wiphy->signal_type == NL80211_BSS_SIGNAL_UNSPEC && | 518 | if (WARN_ON(wiphy->signal_type == CFG80211_SIGNAL_TYPE_UNSPEC && |
519 | (signal < 0 || signal > 100))) | 519 | (signal < 0 || signal > 100))) |
520 | return NULL; | 520 | return NULL; |
521 | 521 | ||
@@ -571,7 +571,7 @@ cfg80211_inform_bss_frame(struct wiphy *wiphy, | |||
571 | u.probe_resp.variable); | 571 | u.probe_resp.variable); |
572 | size_t privsz = wiphy->bss_priv_size; | 572 | size_t privsz = wiphy->bss_priv_size; |
573 | 573 | ||
574 | if (WARN_ON(wiphy->signal_type == NL80211_BSS_SIGNAL_UNSPEC && | 574 | if (WARN_ON(wiphy->signal_type == CFG80211_SIGNAL_TYPE_UNSPEC && |
575 | (signal < 0 || signal > 100))) | 575 | (signal < 0 || signal > 100))) |
576 | return NULL; | 576 | return NULL; |
577 | 577 | ||
diff --git a/net/xfrm/xfrm_output.c b/net/xfrm/xfrm_output.c index 6a329158bdfa..a3cca0a94346 100644 --- a/net/xfrm/xfrm_output.c +++ b/net/xfrm/xfrm_output.c | |||
@@ -95,13 +95,13 @@ resume: | |||
95 | goto error_nolock; | 95 | goto error_nolock; |
96 | } | 96 | } |
97 | 97 | ||
98 | dst = dst_pop(dst); | 98 | dst = skb_dst_pop(skb); |
99 | if (!dst) { | 99 | if (!dst) { |
100 | XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTERROR); | 100 | XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTERROR); |
101 | err = -EHOSTUNREACH; | 101 | err = -EHOSTUNREACH; |
102 | goto error_nolock; | 102 | goto error_nolock; |
103 | } | 103 | } |
104 | skb_dst_set(skb, dst); | 104 | skb_dst_set_noref(skb, dst); |
105 | x = dst->xfrm; | 105 | x = dst->xfrm; |
106 | } while (x && !(x->outer_mode->flags & XFRM_MODE_FLAG_TUNNEL)); | 106 | } while (x && !(x->outer_mode->flags & XFRM_MODE_FLAG_TUNNEL)); |
107 | 107 | ||
diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c index d965a2bad8d3..4bf27d901333 100644 --- a/net/xfrm/xfrm_policy.c +++ b/net/xfrm/xfrm_policy.c | |||
@@ -2153,6 +2153,7 @@ int __xfrm_route_forward(struct sk_buff *skb, unsigned short family) | |||
2153 | return 0; | 2153 | return 0; |
2154 | } | 2154 | } |
2155 | 2155 | ||
2156 | skb_dst_force(skb); | ||
2156 | dst = skb_dst(skb); | 2157 | dst = skb_dst(skb); |
2157 | 2158 | ||
2158 | res = xfrm_lookup(net, &dst, &fl, NULL, 0) == 0; | 2159 | res = xfrm_lookup(net, &dst, &fl, NULL, 0) == 0; |