aboutsummaryrefslogtreecommitdiffstats
path: root/net
diff options
context:
space:
mode:
authorJens Axboe <jaxboe@fusionio.com>2010-06-01 06:42:12 -0400
committerJens Axboe <jaxboe@fusionio.com>2010-06-01 06:42:12 -0400
commitb4ca761577535b2b4d153689ee97342797dfff05 (patch)
tree29054d55508f1faa22ec32acf7c245751af03348 /net
parent28f4197e5d4707311febeec8a0eb97cb5fd93c97 (diff)
parent67a3e12b05e055c0415c556a315a3d3eb637e29e (diff)
Merge branch 'master' into for-linus
Conflicts: fs/pipe.c Signed-off-by: Jens Axboe <jaxboe@fusionio.com>
Diffstat (limited to 'net')
-rw-r--r--net/9p/client.c70
-rw-r--r--net/9p/protocol.c8
-rw-r--r--net/caif/Kconfig5
-rw-r--r--net/caif/caif_socket.c91
-rw-r--r--net/caif/cfctrl.c92
-rw-r--r--net/caif/cfmuxl.c3
-rw-r--r--net/caif/cfpkt_skbuff.c25
-rw-r--r--net/caif/cfserl.c3
-rw-r--r--net/caif/cfsrvl.c6
-rw-r--r--net/core/datagram.c6
-rw-r--r--net/core/dev.c48
-rw-r--r--net/core/drop_monitor.c12
-rw-r--r--net/core/neighbour.c1
-rw-r--r--net/core/rtnetlink.c30
-rw-r--r--net/core/skbuff.c1
-rw-r--r--net/core/sock.c52
-rw-r--r--net/dccp/input.c6
-rw-r--r--net/dccp/options.c2
-rw-r--r--net/ieee802154/wpan-class.c7
-rw-r--r--net/ipv4/ipmr.c2
-rw-r--r--net/ipv4/udp.c22
-rw-r--r--net/ipv6/ip6_output.c2
-rw-r--r--net/ipv6/ip6mr.c2
-rw-r--r--net/ipv6/udp.c5
-rw-r--r--net/iucv/af_iucv.c2
-rw-r--r--net/iucv/iucv.c9
-rw-r--r--net/mac80211/key.c1
-rw-r--r--net/mac80211/sta_info.c2
-rw-r--r--net/mac80211/sta_info.h2
-rw-r--r--net/netfilter/nf_conntrack_core.c10
-rw-r--r--net/netfilter/nf_conntrack_sip.c12
-rw-r--r--net/netfilter/xt_TEE.c4
-rw-r--r--net/phonet/pep.c2
-rw-r--r--net/sched/cls_cgroup.c50
-rw-r--r--net/sched/sch_api.c14
-rw-r--r--net/socket.c9
-rw-r--r--net/sunrpc/cache.c13
-rw-r--r--net/sunrpc/rpc_pipe.c18
-rw-r--r--net/sunrpc/rpcb_clnt.c2
-rw-r--r--net/sunrpc/xprt.c5
-rw-r--r--net/sunrpc/xprtsock.c29
-rw-r--r--net/wireless/chan.c2
-rw-r--r--net/wireless/nl80211.c6
-rw-r--r--net/wireless/scan.c4
44 files changed, 428 insertions, 269 deletions
diff --git a/net/9p/client.c b/net/9p/client.c
index 0aa79faa9850..37c8da07a80b 100644
--- a/net/9p/client.c
+++ b/net/9p/client.c
@@ -1321,7 +1321,8 @@ static int p9_client_statsize(struct p9_wstat *wst, int proto_version)
1321 if (wst->muid) 1321 if (wst->muid)
1322 ret += strlen(wst->muid); 1322 ret += strlen(wst->muid);
1323 1323
1324 if (proto_version == p9_proto_2000u) { 1324 if ((proto_version == p9_proto_2000u) ||
1325 (proto_version == p9_proto_2000L)) {
1325 ret += 2+4+4+4; /* extension[s] n_uid[4] n_gid[4] n_muid[4] */ 1326 ret += 2+4+4+4; /* extension[s] n_uid[4] n_gid[4] n_muid[4] */
1326 if (wst->extension) 1327 if (wst->extension)
1327 ret += strlen(wst->extension); 1328 ret += strlen(wst->extension);
@@ -1364,3 +1365,70 @@ error:
1364 return err; 1365 return err;
1365} 1366}
1366EXPORT_SYMBOL(p9_client_wstat); 1367EXPORT_SYMBOL(p9_client_wstat);
1368
1369int p9_client_statfs(struct p9_fid *fid, struct p9_rstatfs *sb)
1370{
1371 int err;
1372 struct p9_req_t *req;
1373 struct p9_client *clnt;
1374
1375 err = 0;
1376 clnt = fid->clnt;
1377
1378 P9_DPRINTK(P9_DEBUG_9P, ">>> TSTATFS fid %d\n", fid->fid);
1379
1380 req = p9_client_rpc(clnt, P9_TSTATFS, "d", fid->fid);
1381 if (IS_ERR(req)) {
1382 err = PTR_ERR(req);
1383 goto error;
1384 }
1385
1386 err = p9pdu_readf(req->rc, clnt->proto_version, "ddqqqqqqd", &sb->type,
1387 &sb->bsize, &sb->blocks, &sb->bfree, &sb->bavail,
1388 &sb->files, &sb->ffree, &sb->fsid, &sb->namelen);
1389 if (err) {
1390 p9pdu_dump(1, req->rc);
1391 p9_free_req(clnt, req);
1392 goto error;
1393 }
1394
1395 P9_DPRINTK(P9_DEBUG_9P, "<<< RSTATFS fid %d type 0x%lx bsize %ld "
1396 "blocks %llu bfree %llu bavail %llu files %llu ffree %llu "
1397 "fsid %llu namelen %ld\n",
1398 fid->fid, (long unsigned int)sb->type, (long int)sb->bsize,
1399 sb->blocks, sb->bfree, sb->bavail, sb->files, sb->ffree,
1400 sb->fsid, (long int)sb->namelen);
1401
1402 p9_free_req(clnt, req);
1403error:
1404 return err;
1405}
1406EXPORT_SYMBOL(p9_client_statfs);
1407
1408int p9_client_rename(struct p9_fid *fid, struct p9_fid *newdirfid, char *name)
1409{
1410 int err;
1411 struct p9_req_t *req;
1412 struct p9_client *clnt;
1413
1414 err = 0;
1415 clnt = fid->clnt;
1416
1417 P9_DPRINTK(P9_DEBUG_9P, ">>> TRENAME fid %d newdirfid %d name %s\n",
1418 fid->fid, newdirfid->fid, name);
1419
1420 req = p9_client_rpc(clnt, P9_TRENAME, "dds", fid->fid,
1421 newdirfid->fid, name);
1422 if (IS_ERR(req)) {
1423 err = PTR_ERR(req);
1424 goto error;
1425 }
1426
1427 P9_DPRINTK(P9_DEBUG_9P, "<<< RRENAME fid %d\n", fid->fid);
1428
1429 p9_free_req(clnt, req);
1430error:
1431 return err;
1432}
1433EXPORT_SYMBOL(p9_client_rename);
1434
diff --git a/net/9p/protocol.c b/net/9p/protocol.c
index e7541d5b0118..149f82160130 100644
--- a/net/9p/protocol.c
+++ b/net/9p/protocol.c
@@ -341,7 +341,8 @@ p9pdu_vreadf(struct p9_fcall *pdu, int proto_version, const char *fmt,
341 } 341 }
342 break; 342 break;
343 case '?': 343 case '?':
344 if (proto_version != p9_proto_2000u) 344 if ((proto_version != p9_proto_2000u) &&
345 (proto_version != p9_proto_2000L))
345 return 0; 346 return 0;
346 break; 347 break;
347 default: 348 default:
@@ -393,7 +394,7 @@ p9pdu_vwritef(struct p9_fcall *pdu, int proto_version, const char *fmt,
393 const char *sptr = va_arg(ap, const char *); 394 const char *sptr = va_arg(ap, const char *);
394 int16_t len = 0; 395 int16_t len = 0;
395 if (sptr) 396 if (sptr)
396 len = MIN(strlen(sptr), USHORT_MAX); 397 len = MIN(strlen(sptr), USHRT_MAX);
397 398
398 errcode = p9pdu_writef(pdu, proto_version, 399 errcode = p9pdu_writef(pdu, proto_version,
399 "w", len); 400 "w", len);
@@ -488,7 +489,8 @@ p9pdu_vwritef(struct p9_fcall *pdu, int proto_version, const char *fmt,
488 } 489 }
489 break; 490 break;
490 case '?': 491 case '?':
491 if (proto_version != p9_proto_2000u) 492 if ((proto_version != p9_proto_2000u) &&
493 (proto_version != p9_proto_2000L))
492 return 0; 494 return 0;
493 break; 495 break;
494 default: 496 default:
diff --git a/net/caif/Kconfig b/net/caif/Kconfig
index cd1daf6008bd..ed651786f16b 100644
--- a/net/caif/Kconfig
+++ b/net/caif/Kconfig
@@ -2,10 +2,8 @@
2# CAIF net configurations 2# CAIF net configurations
3# 3#
4 4
5#menu "CAIF Support"
6comment "CAIF Support"
7menuconfig CAIF 5menuconfig CAIF
8 tristate "Enable CAIF support" 6 tristate "CAIF support"
9 select CRC_CCITT 7 select CRC_CCITT
10 default n 8 default n
11 ---help--- 9 ---help---
@@ -45,4 +43,3 @@ config CAIF_NETDEV
45 If unsure say Y. 43 If unsure say Y.
46 44
47endif 45endif
48#endmenu
diff --git a/net/caif/caif_socket.c b/net/caif/caif_socket.c
index c3a70c5c893a..3d0e09584fae 100644
--- a/net/caif/caif_socket.c
+++ b/net/caif/caif_socket.c
@@ -60,7 +60,7 @@ struct debug_fs_counter {
60 atomic_t num_rx_flow_off; 60 atomic_t num_rx_flow_off;
61 atomic_t num_rx_flow_on; 61 atomic_t num_rx_flow_on;
62}; 62};
63struct debug_fs_counter cnt; 63static struct debug_fs_counter cnt;
64#define dbfs_atomic_inc(v) atomic_inc(v) 64#define dbfs_atomic_inc(v) atomic_inc(v)
65#define dbfs_atomic_dec(v) atomic_dec(v) 65#define dbfs_atomic_dec(v) atomic_dec(v)
66#else 66#else
@@ -128,17 +128,17 @@ static void caif_read_unlock(struct sock *sk)
128 mutex_unlock(&cf_sk->readlock); 128 mutex_unlock(&cf_sk->readlock);
129} 129}
130 130
131int sk_rcvbuf_lowwater(struct caifsock *cf_sk) 131static int sk_rcvbuf_lowwater(struct caifsock *cf_sk)
132{ 132{
133 /* A quarter of full buffer is used a low water mark */ 133 /* A quarter of full buffer is used a low water mark */
134 return cf_sk->sk.sk_rcvbuf / 4; 134 return cf_sk->sk.sk_rcvbuf / 4;
135} 135}
136 136
137void caif_flow_ctrl(struct sock *sk, int mode) 137static void caif_flow_ctrl(struct sock *sk, int mode)
138{ 138{
139 struct caifsock *cf_sk; 139 struct caifsock *cf_sk;
140 cf_sk = container_of(sk, struct caifsock, sk); 140 cf_sk = container_of(sk, struct caifsock, sk);
141 if (cf_sk->layer.dn) 141 if (cf_sk->layer.dn && cf_sk->layer.dn->modemcmd)
142 cf_sk->layer.dn->modemcmd(cf_sk->layer.dn, mode); 142 cf_sk->layer.dn->modemcmd(cf_sk->layer.dn, mode);
143} 143}
144 144
@@ -146,7 +146,7 @@ void caif_flow_ctrl(struct sock *sk, int mode)
146 * Copied from sock.c:sock_queue_rcv_skb(), but changed so packets are 146 * Copied from sock.c:sock_queue_rcv_skb(), but changed so packets are
147 * not dropped, but CAIF is sending flow off instead. 147 * not dropped, but CAIF is sending flow off instead.
148 */ 148 */
149int caif_queue_rcv_skb(struct sock *sk, struct sk_buff *skb) 149static int caif_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
150{ 150{
151 int err; 151 int err;
152 int skb_len; 152 int skb_len;
@@ -162,9 +162,8 @@ int caif_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
162 atomic_read(&cf_sk->sk.sk_rmem_alloc), 162 atomic_read(&cf_sk->sk.sk_rmem_alloc),
163 sk_rcvbuf_lowwater(cf_sk)); 163 sk_rcvbuf_lowwater(cf_sk));
164 set_rx_flow_off(cf_sk); 164 set_rx_flow_off(cf_sk);
165 if (cf_sk->layer.dn) 165 dbfs_atomic_inc(&cnt.num_rx_flow_off);
166 cf_sk->layer.dn->modemcmd(cf_sk->layer.dn, 166 caif_flow_ctrl(sk, CAIF_MODEMCMD_FLOW_OFF_REQ);
167 CAIF_MODEMCMD_FLOW_OFF_REQ);
168 } 167 }
169 168
170 err = sk_filter(sk, skb); 169 err = sk_filter(sk, skb);
@@ -175,9 +174,8 @@ int caif_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
175 trace_printk("CAIF: %s():" 174 trace_printk("CAIF: %s():"
176 " sending flow OFF due to rmem_schedule\n", 175 " sending flow OFF due to rmem_schedule\n",
177 __func__); 176 __func__);
178 if (cf_sk->layer.dn) 177 dbfs_atomic_inc(&cnt.num_rx_flow_off);
179 cf_sk->layer.dn->modemcmd(cf_sk->layer.dn, 178 caif_flow_ctrl(sk, CAIF_MODEMCMD_FLOW_OFF_REQ);
180 CAIF_MODEMCMD_FLOW_OFF_REQ);
181 } 179 }
182 skb->dev = NULL; 180 skb->dev = NULL;
183 skb_set_owner_r(skb, sk); 181 skb_set_owner_r(skb, sk);
@@ -285,65 +283,51 @@ static void caif_check_flow_release(struct sock *sk)
285{ 283{
286 struct caifsock *cf_sk = container_of(sk, struct caifsock, sk); 284 struct caifsock *cf_sk = container_of(sk, struct caifsock, sk);
287 285
288 if (cf_sk->layer.dn == NULL || cf_sk->layer.dn->modemcmd == NULL)
289 return;
290 if (rx_flow_is_on(cf_sk)) 286 if (rx_flow_is_on(cf_sk))
291 return; 287 return;
292 288
293 if (atomic_read(&sk->sk_rmem_alloc) <= sk_rcvbuf_lowwater(cf_sk)) { 289 if (atomic_read(&sk->sk_rmem_alloc) <= sk_rcvbuf_lowwater(cf_sk)) {
294 dbfs_atomic_inc(&cnt.num_rx_flow_on); 290 dbfs_atomic_inc(&cnt.num_rx_flow_on);
295 set_rx_flow_on(cf_sk); 291 set_rx_flow_on(cf_sk);
296 cf_sk->layer.dn->modemcmd(cf_sk->layer.dn, 292 caif_flow_ctrl(sk, CAIF_MODEMCMD_FLOW_ON_REQ);
297 CAIF_MODEMCMD_FLOW_ON_REQ);
298 } 293 }
299} 294}
295
300/* 296/*
301 * Copied from sock.c:sock_queue_rcv_skb(), and added check that user buffer 297 * Copied from unix_dgram_recvmsg, but removed credit checks,
302 * has sufficient size. 298 * changed locking, address handling and added MSG_TRUNC.
303 */ 299 */
304
305static int caif_seqpkt_recvmsg(struct kiocb *iocb, struct socket *sock, 300static int caif_seqpkt_recvmsg(struct kiocb *iocb, struct socket *sock,
306 struct msghdr *m, size_t buf_len, int flags) 301 struct msghdr *m, size_t len, int flags)
307 302
308{ 303{
309 struct sock *sk = sock->sk; 304 struct sock *sk = sock->sk;
310 struct sk_buff *skb; 305 struct sk_buff *skb;
311 int ret = 0; 306 int ret;
312 int len; 307 int copylen;
313 308
314 if (unlikely(!buf_len)) 309 ret = -EOPNOTSUPP;
315 return -EINVAL; 310 if (m->msg_flags&MSG_OOB)
311 goto read_error;
316 312
317 skb = skb_recv_datagram(sk, flags, 0 , &ret); 313 skb = skb_recv_datagram(sk, flags, 0 , &ret);
318 if (!skb) 314 if (!skb)
319 goto read_error; 315 goto read_error;
320 316 copylen = skb->len;
321 len = skb->len; 317 if (len < copylen) {
322 318 m->msg_flags |= MSG_TRUNC;
323 if (skb && skb->len > buf_len && !(flags & MSG_PEEK)) { 319 copylen = len;
324 len = buf_len;
325 /*
326 * Push skb back on receive queue if buffer too small.
327 * This has a built-in race where multi-threaded receive
328 * may get packet in wrong order, but multiple read does
329 * not really guarantee ordered delivery anyway.
330 * Let's optimize for speed without taking locks.
331 */
332
333 skb_queue_head(&sk->sk_receive_queue, skb);
334 ret = -EMSGSIZE;
335 goto read_error;
336 } 320 }
337 321
338 ret = skb_copy_datagram_iovec(skb, 0, m->msg_iov, len); 322 ret = skb_copy_datagram_iovec(skb, 0, m->msg_iov, copylen);
339 if (ret) 323 if (ret)
340 goto read_error; 324 goto out_free;
341 325
326 ret = (flags & MSG_TRUNC) ? skb->len : copylen;
327out_free:
342 skb_free_datagram(sk, skb); 328 skb_free_datagram(sk, skb);
343
344 caif_check_flow_release(sk); 329 caif_check_flow_release(sk);
345 330 return ret;
346 return len;
347 331
348read_error: 332read_error:
349 return ret; 333 return ret;
@@ -920,17 +904,17 @@ wait_connect:
920 timeo = sock_sndtimeo(sk, flags & O_NONBLOCK); 904 timeo = sock_sndtimeo(sk, flags & O_NONBLOCK);
921 905
922 release_sock(sk); 906 release_sock(sk);
923 err = wait_event_interruptible_timeout(*sk_sleep(sk), 907 err = -ERESTARTSYS;
908 timeo = wait_event_interruptible_timeout(*sk_sleep(sk),
924 sk->sk_state != CAIF_CONNECTING, 909 sk->sk_state != CAIF_CONNECTING,
925 timeo); 910 timeo);
926 lock_sock(sk); 911 lock_sock(sk);
927 if (err < 0) 912 if (timeo < 0)
928 goto out; /* -ERESTARTSYS */ 913 goto out; /* -ERESTARTSYS */
929 if (err == 0 && sk->sk_state != CAIF_CONNECTED) {
930 err = -ETIMEDOUT;
931 goto out;
932 }
933 914
915 err = -ETIMEDOUT;
916 if (timeo == 0 && sk->sk_state != CAIF_CONNECTED)
917 goto out;
934 if (sk->sk_state != CAIF_CONNECTED) { 918 if (sk->sk_state != CAIF_CONNECTED) {
935 sock->state = SS_UNCONNECTED; 919 sock->state = SS_UNCONNECTED;
936 err = sock_error(sk); 920 err = sock_error(sk);
@@ -945,7 +929,6 @@ out:
945 return err; 929 return err;
946} 930}
947 931
948
949/* 932/*
950 * caif_release() - Disconnect a CAIF Socket 933 * caif_release() - Disconnect a CAIF Socket
951 * Copied and modified af_irda.c:irda_release(). 934 * Copied and modified af_irda.c:irda_release().
@@ -1019,10 +1002,6 @@ static unsigned int caif_poll(struct file *file,
1019 (sk->sk_shutdown & RCV_SHUTDOWN)) 1002 (sk->sk_shutdown & RCV_SHUTDOWN))
1020 mask |= POLLIN | POLLRDNORM; 1003 mask |= POLLIN | POLLRDNORM;
1021 1004
1022 /* Connection-based need to check for termination and startup */
1023 if (sk->sk_state == CAIF_DISCONNECTED)
1024 mask |= POLLHUP;
1025
1026 /* 1005 /*
1027 * we set writable also when the other side has shut down the 1006 * we set writable also when the other side has shut down the
1028 * connection. This prevents stuck sockets. 1007 * connection. This prevents stuck sockets.
@@ -1194,7 +1173,7 @@ static struct net_proto_family caif_family_ops = {
1194 .owner = THIS_MODULE, 1173 .owner = THIS_MODULE,
1195}; 1174};
1196 1175
1197int af_caif_init(void) 1176static int af_caif_init(void)
1198{ 1177{
1199 int err = sock_register(&caif_family_ops); 1178 int err = sock_register(&caif_family_ops);
1200 if (!err) 1179 if (!err)
diff --git a/net/caif/cfctrl.c b/net/caif/cfctrl.c
index 0ffe1e1ce901..fcfda98a5e6d 100644
--- a/net/caif/cfctrl.c
+++ b/net/caif/cfctrl.c
@@ -44,13 +44,14 @@ struct cflayer *cfctrl_create(void)
44 dev_info.id = 0xff; 44 dev_info.id = 0xff;
45 memset(this, 0, sizeof(*this)); 45 memset(this, 0, sizeof(*this));
46 cfsrvl_init(&this->serv, 0, &dev_info); 46 cfsrvl_init(&this->serv, 0, &dev_info);
47 spin_lock_init(&this->info_list_lock);
48 atomic_set(&this->req_seq_no, 1); 47 atomic_set(&this->req_seq_no, 1);
49 atomic_set(&this->rsp_seq_no, 1); 48 atomic_set(&this->rsp_seq_no, 1);
50 this->serv.layer.receive = cfctrl_recv; 49 this->serv.layer.receive = cfctrl_recv;
51 sprintf(this->serv.layer.name, "ctrl"); 50 sprintf(this->serv.layer.name, "ctrl");
52 this->serv.layer.ctrlcmd = cfctrl_ctrlcmd; 51 this->serv.layer.ctrlcmd = cfctrl_ctrlcmd;
53 spin_lock_init(&this->loop_linkid_lock); 52 spin_lock_init(&this->loop_linkid_lock);
53 spin_lock_init(&this->info_list_lock);
54 INIT_LIST_HEAD(&this->list);
54 this->loop_linkid = 1; 55 this->loop_linkid = 1;
55 return &this->serv.layer; 56 return &this->serv.layer;
56} 57}
@@ -112,20 +113,10 @@ bool cfctrl_req_eq(struct cfctrl_request_info *r1,
112void cfctrl_insert_req(struct cfctrl *ctrl, 113void cfctrl_insert_req(struct cfctrl *ctrl,
113 struct cfctrl_request_info *req) 114 struct cfctrl_request_info *req)
114{ 115{
115 struct cfctrl_request_info *p;
116 spin_lock(&ctrl->info_list_lock); 116 spin_lock(&ctrl->info_list_lock);
117 req->next = NULL;
118 atomic_inc(&ctrl->req_seq_no); 117 atomic_inc(&ctrl->req_seq_no);
119 req->sequence_no = atomic_read(&ctrl->req_seq_no); 118 req->sequence_no = atomic_read(&ctrl->req_seq_no);
120 if (ctrl->first_req == NULL) { 119 list_add_tail(&req->list, &ctrl->list);
121 ctrl->first_req = req;
122 spin_unlock(&ctrl->info_list_lock);
123 return;
124 }
125 p = ctrl->first_req;
126 while (p->next != NULL)
127 p = p->next;
128 p->next = req;
129 spin_unlock(&ctrl->info_list_lock); 120 spin_unlock(&ctrl->info_list_lock);
130} 121}
131 122
@@ -133,46 +124,28 @@ void cfctrl_insert_req(struct cfctrl *ctrl,
133struct cfctrl_request_info *cfctrl_remove_req(struct cfctrl *ctrl, 124struct cfctrl_request_info *cfctrl_remove_req(struct cfctrl *ctrl,
134 struct cfctrl_request_info *req) 125 struct cfctrl_request_info *req)
135{ 126{
136 struct cfctrl_request_info *p; 127 struct cfctrl_request_info *p, *tmp, *first;
137 struct cfctrl_request_info *ret;
138 128
139 spin_lock(&ctrl->info_list_lock); 129 spin_lock(&ctrl->info_list_lock);
140 if (ctrl->first_req == NULL) { 130 first = list_first_entry(&ctrl->list, struct cfctrl_request_info, list);
141 spin_unlock(&ctrl->info_list_lock);
142 return NULL;
143 }
144
145 if (cfctrl_req_eq(req, ctrl->first_req)) {
146 ret = ctrl->first_req;
147 caif_assert(ctrl->first_req);
148 atomic_set(&ctrl->rsp_seq_no,
149 ctrl->first_req->sequence_no);
150 ctrl->first_req = ctrl->first_req->next;
151 spin_unlock(&ctrl->info_list_lock);
152 return ret;
153 }
154 131
155 p = ctrl->first_req; 132 list_for_each_entry_safe(p, tmp, &ctrl->list, list) {
156 133 if (cfctrl_req_eq(req, p)) {
157 while (p->next != NULL) { 134 if (p != first)
158 if (cfctrl_req_eq(req, p->next)) { 135 pr_warning("CAIF: %s(): Requests are not "
159 pr_warning("CAIF: %s(): Requests are not "
160 "received in order\n", 136 "received in order\n",
161 __func__); 137 __func__);
162 ret = p->next; 138
163 atomic_set(&ctrl->rsp_seq_no, 139 atomic_set(&ctrl->rsp_seq_no,
164 p->next->sequence_no); 140 p->sequence_no);
165 p->next = p->next->next; 141 list_del(&p->list);
166 spin_unlock(&ctrl->info_list_lock); 142 goto out;
167 return ret;
168 } 143 }
169 p = p->next;
170 } 144 }
145 p = NULL;
146out:
171 spin_unlock(&ctrl->info_list_lock); 147 spin_unlock(&ctrl->info_list_lock);
172 148 return p;
173 pr_warning("CAIF: %s(): Request does not match\n",
174 __func__);
175 return NULL;
176} 149}
177 150
178struct cfctrl_rsp *cfctrl_get_respfuncs(struct cflayer *layer) 151struct cfctrl_rsp *cfctrl_get_respfuncs(struct cflayer *layer)
@@ -388,31 +361,18 @@ void cfctrl_getstartreason_req(struct cflayer *layer)
388 361
389void cfctrl_cancel_req(struct cflayer *layr, struct cflayer *adap_layer) 362void cfctrl_cancel_req(struct cflayer *layr, struct cflayer *adap_layer)
390{ 363{
391 struct cfctrl_request_info *p, *req; 364 struct cfctrl_request_info *p, *tmp;
392 struct cfctrl *ctrl = container_obj(layr); 365 struct cfctrl *ctrl = container_obj(layr);
393 spin_lock(&ctrl->info_list_lock); 366 spin_lock(&ctrl->info_list_lock);
394 367 pr_warning("CAIF: %s(): enter\n", __func__);
395 if (ctrl->first_req == NULL) { 368
396 spin_unlock(&ctrl->info_list_lock); 369 list_for_each_entry_safe(p, tmp, &ctrl->list, list) {
397 return; 370 if (p->client_layer == adap_layer) {
398 } 371 pr_warning("CAIF: %s(): cancel req :%d\n", __func__,
399 372 p->sequence_no);
400 if (ctrl->first_req->client_layer == adap_layer) { 373 list_del(&p->list);
401 374 kfree(p);
402 req = ctrl->first_req;
403 ctrl->first_req = ctrl->first_req->next;
404 kfree(req);
405 }
406
407 p = ctrl->first_req;
408 while (p != NULL && p->next != NULL) {
409 if (p->next->client_layer == adap_layer) {
410
411 req = p->next;
412 p->next = p->next->next;
413 kfree(p->next);
414 } 375 }
415 p = p->next;
416 } 376 }
417 377
418 spin_unlock(&ctrl->info_list_lock); 378 spin_unlock(&ctrl->info_list_lock);
@@ -634,7 +594,7 @@ static void cfctrl_ctrlcmd(struct cflayer *layr, enum caif_ctrlcmd ctrl,
634 case _CAIF_CTRLCMD_PHYIF_FLOW_OFF_IND: 594 case _CAIF_CTRLCMD_PHYIF_FLOW_OFF_IND:
635 case CAIF_CTRLCMD_FLOW_OFF_IND: 595 case CAIF_CTRLCMD_FLOW_OFF_IND:
636 spin_lock(&this->info_list_lock); 596 spin_lock(&this->info_list_lock);
637 if (this->first_req != NULL) { 597 if (!list_empty(&this->list)) {
638 pr_debug("CAIF: %s(): Received flow off in " 598 pr_debug("CAIF: %s(): Received flow off in "
639 "control layer", __func__); 599 "control layer", __func__);
640 } 600 }
diff --git a/net/caif/cfmuxl.c b/net/caif/cfmuxl.c
index 7372f27f1d32..80c8d332b258 100644
--- a/net/caif/cfmuxl.c
+++ b/net/caif/cfmuxl.c
@@ -174,10 +174,11 @@ struct cflayer *cfmuxl_remove_uplayer(struct cflayer *layr, u8 id)
174 spin_lock(&muxl->receive_lock); 174 spin_lock(&muxl->receive_lock);
175 up = get_up(muxl, id); 175 up = get_up(muxl, id);
176 if (up == NULL) 176 if (up == NULL)
177 return NULL; 177 goto out;
178 memset(muxl->up_cache, 0, sizeof(muxl->up_cache)); 178 memset(muxl->up_cache, 0, sizeof(muxl->up_cache));
179 list_del(&up->node); 179 list_del(&up->node);
180 cfsrvl_put(up); 180 cfsrvl_put(up);
181out:
181 spin_unlock(&muxl->receive_lock); 182 spin_unlock(&muxl->receive_lock);
182 return up; 183 return up;
183} 184}
diff --git a/net/caif/cfpkt_skbuff.c b/net/caif/cfpkt_skbuff.c
index 83fff2ff6658..a6fdf899741a 100644
--- a/net/caif/cfpkt_skbuff.c
+++ b/net/caif/cfpkt_skbuff.c
@@ -238,6 +238,7 @@ int cfpkt_add_head(struct cfpkt *pkt, const void *data2, u16 len)
238 struct sk_buff *lastskb; 238 struct sk_buff *lastskb;
239 u8 *to; 239 u8 *to;
240 const u8 *data = data2; 240 const u8 *data = data2;
241 int ret;
241 if (unlikely(is_erronous(pkt))) 242 if (unlikely(is_erronous(pkt)))
242 return -EPROTO; 243 return -EPROTO;
243 if (unlikely(skb_headroom(skb) < len)) { 244 if (unlikely(skb_headroom(skb) < len)) {
@@ -246,9 +247,10 @@ int cfpkt_add_head(struct cfpkt *pkt, const void *data2, u16 len)
246 } 247 }
247 248
248 /* Make sure data is writable */ 249 /* Make sure data is writable */
249 if (unlikely(skb_cow_data(skb, 0, &lastskb) < 0)) { 250 ret = skb_cow_data(skb, 0, &lastskb);
251 if (unlikely(ret < 0)) {
250 PKT_ERROR(pkt, "cfpkt_add_head: cow failed\n"); 252 PKT_ERROR(pkt, "cfpkt_add_head: cow failed\n");
251 return -EPROTO; 253 return ret;
252 } 254 }
253 255
254 to = skb_push(skb, len); 256 to = skb_push(skb, len);
@@ -316,6 +318,8 @@ EXPORT_SYMBOL(cfpkt_setlen);
316struct cfpkt *cfpkt_create_uplink(const unsigned char *data, unsigned int len) 318struct cfpkt *cfpkt_create_uplink(const unsigned char *data, unsigned int len)
317{ 319{
318 struct cfpkt *pkt = cfpkt_create_pfx(len + PKT_POSTFIX, PKT_PREFIX); 320 struct cfpkt *pkt = cfpkt_create_pfx(len + PKT_POSTFIX, PKT_PREFIX);
321 if (!pkt)
322 return NULL;
319 if (unlikely(data != NULL)) 323 if (unlikely(data != NULL))
320 cfpkt_add_body(pkt, data, len); 324 cfpkt_add_body(pkt, data, len);
321 return pkt; 325 return pkt;
@@ -344,12 +348,13 @@ struct cfpkt *cfpkt_append(struct cfpkt *dstpkt,
344 348
345 if (dst->tail + neededtailspace > dst->end) { 349 if (dst->tail + neededtailspace > dst->end) {
346 /* Create a dumplicate of 'dst' with more tail space */ 350 /* Create a dumplicate of 'dst' with more tail space */
351 struct cfpkt *tmppkt;
347 dstlen = skb_headlen(dst); 352 dstlen = skb_headlen(dst);
348 createlen = dstlen + neededtailspace; 353 createlen = dstlen + neededtailspace;
349 tmp = pkt_to_skb( 354 tmppkt = cfpkt_create(createlen + PKT_PREFIX + PKT_POSTFIX);
350 cfpkt_create(createlen + PKT_PREFIX + PKT_POSTFIX)); 355 if (tmppkt == NULL)
351 if (!tmp)
352 return NULL; 356 return NULL;
357 tmp = pkt_to_skb(tmppkt);
353 skb_set_tail_pointer(tmp, dstlen); 358 skb_set_tail_pointer(tmp, dstlen);
354 tmp->len = dstlen; 359 tmp->len = dstlen;
355 memcpy(tmp->data, dst->data, dstlen); 360 memcpy(tmp->data, dst->data, dstlen);
@@ -368,6 +373,7 @@ struct cfpkt *cfpkt_split(struct cfpkt *pkt, u16 pos)
368{ 373{
369 struct sk_buff *skb2; 374 struct sk_buff *skb2;
370 struct sk_buff *skb = pkt_to_skb(pkt); 375 struct sk_buff *skb = pkt_to_skb(pkt);
376 struct cfpkt *tmppkt;
371 u8 *split = skb->data + pos; 377 u8 *split = skb->data + pos;
372 u16 len2nd = skb_tail_pointer(skb) - split; 378 u16 len2nd = skb_tail_pointer(skb) - split;
373 379
@@ -381,9 +387,12 @@ struct cfpkt *cfpkt_split(struct cfpkt *pkt, u16 pos)
381 } 387 }
382 388
383 /* Create a new packet for the second part of the data */ 389 /* Create a new packet for the second part of the data */
384 skb2 = pkt_to_skb( 390 tmppkt = cfpkt_create_pfx(len2nd + PKT_PREFIX + PKT_POSTFIX,
385 cfpkt_create_pfx(len2nd + PKT_PREFIX + PKT_POSTFIX, 391 PKT_PREFIX);
386 PKT_PREFIX)); 392 if (tmppkt == NULL)
393 return NULL;
394 skb2 = pkt_to_skb(tmppkt);
395
387 396
388 if (skb2 == NULL) 397 if (skb2 == NULL)
389 return NULL; 398 return NULL;
diff --git a/net/caif/cfserl.c b/net/caif/cfserl.c
index 06029ea2da2f..cb4325a3dc83 100644
--- a/net/caif/cfserl.c
+++ b/net/caif/cfserl.c
@@ -67,6 +67,8 @@ static int cfserl_receive(struct cflayer *l, struct cfpkt *newpkt)
67 layr->incomplete_frm = 67 layr->incomplete_frm =
68 cfpkt_append(layr->incomplete_frm, newpkt, expectlen); 68 cfpkt_append(layr->incomplete_frm, newpkt, expectlen);
69 pkt = layr->incomplete_frm; 69 pkt = layr->incomplete_frm;
70 if (pkt == NULL)
71 return -ENOMEM;
70 } else { 72 } else {
71 pkt = newpkt; 73 pkt = newpkt;
72 } 74 }
@@ -154,7 +156,6 @@ static int cfserl_receive(struct cflayer *l, struct cfpkt *newpkt)
154 if (layr->usestx) { 156 if (layr->usestx) {
155 if (tail_pkt != NULL) 157 if (tail_pkt != NULL)
156 pkt = cfpkt_append(pkt, tail_pkt, 0); 158 pkt = cfpkt_append(pkt, tail_pkt, 0);
157
158 /* Start search for next STX if frame failed */ 159 /* Start search for next STX if frame failed */
159 continue; 160 continue;
160 } else { 161 } else {
diff --git a/net/caif/cfsrvl.c b/net/caif/cfsrvl.c
index aff31f34528f..6e5b7079a684 100644
--- a/net/caif/cfsrvl.c
+++ b/net/caif/cfsrvl.c
@@ -123,6 +123,12 @@ static int cfservl_modemcmd(struct cflayer *layr, enum caif_modemcmd ctrl)
123 struct caif_payload_info *info; 123 struct caif_payload_info *info;
124 u8 flow_off = SRVL_FLOW_OFF; 124 u8 flow_off = SRVL_FLOW_OFF;
125 pkt = cfpkt_create(SRVL_CTRL_PKT_SIZE); 125 pkt = cfpkt_create(SRVL_CTRL_PKT_SIZE);
126 if (!pkt) {
127 pr_warning("CAIF: %s(): Out of memory\n",
128 __func__);
129 return -ENOMEM;
130 }
131
126 if (cfpkt_add_head(pkt, &flow_off, 1) < 0) { 132 if (cfpkt_add_head(pkt, &flow_off, 1) < 0) {
127 pr_err("CAIF: %s(): Packet is erroneous!\n", 133 pr_err("CAIF: %s(): Packet is erroneous!\n",
128 __func__); 134 __func__);
diff --git a/net/core/datagram.c b/net/core/datagram.c
index e0097531417a..f5b6f43a4c2e 100644
--- a/net/core/datagram.c
+++ b/net/core/datagram.c
@@ -229,15 +229,17 @@ EXPORT_SYMBOL(skb_free_datagram);
229 229
230void skb_free_datagram_locked(struct sock *sk, struct sk_buff *skb) 230void skb_free_datagram_locked(struct sock *sk, struct sk_buff *skb)
231{ 231{
232 bool slow;
233
232 if (likely(atomic_read(&skb->users) == 1)) 234 if (likely(atomic_read(&skb->users) == 1))
233 smp_rmb(); 235 smp_rmb();
234 else if (likely(!atomic_dec_and_test(&skb->users))) 236 else if (likely(!atomic_dec_and_test(&skb->users)))
235 return; 237 return;
236 238
237 lock_sock_bh(sk); 239 slow = lock_sock_fast(sk);
238 skb_orphan(skb); 240 skb_orphan(skb);
239 sk_mem_reclaim_partial(sk); 241 sk_mem_reclaim_partial(sk);
240 unlock_sock_bh(sk); 242 unlock_sock_fast(sk, slow);
241 243
242 /* skb is now orphaned, can be freed outside of locked section */ 244 /* skb is now orphaned, can be freed outside of locked section */
243 __kfree_skb(skb); 245 __kfree_skb(skb);
diff --git a/net/core/dev.c b/net/core/dev.c
index d273e4e3ecdc..1845b08c624e 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -954,18 +954,22 @@ int dev_alloc_name(struct net_device *dev, const char *name)
954} 954}
955EXPORT_SYMBOL(dev_alloc_name); 955EXPORT_SYMBOL(dev_alloc_name);
956 956
957static int dev_get_valid_name(struct net *net, const char *name, char *buf, 957static int dev_get_valid_name(struct net_device *dev, const char *name, bool fmt)
958 bool fmt)
959{ 958{
959 struct net *net;
960
961 BUG_ON(!dev_net(dev));
962 net = dev_net(dev);
963
960 if (!dev_valid_name(name)) 964 if (!dev_valid_name(name))
961 return -EINVAL; 965 return -EINVAL;
962 966
963 if (fmt && strchr(name, '%')) 967 if (fmt && strchr(name, '%'))
964 return __dev_alloc_name(net, name, buf); 968 return dev_alloc_name(dev, name);
965 else if (__dev_get_by_name(net, name)) 969 else if (__dev_get_by_name(net, name))
966 return -EEXIST; 970 return -EEXIST;
967 else if (buf != name) 971 else if (dev->name != name)
968 strlcpy(buf, name, IFNAMSIZ); 972 strlcpy(dev->name, name, IFNAMSIZ);
969 973
970 return 0; 974 return 0;
971} 975}
@@ -997,7 +1001,7 @@ int dev_change_name(struct net_device *dev, const char *newname)
997 1001
998 memcpy(oldname, dev->name, IFNAMSIZ); 1002 memcpy(oldname, dev->name, IFNAMSIZ);
999 1003
1000 err = dev_get_valid_name(net, newname, dev->name, 1); 1004 err = dev_get_valid_name(dev, newname, 1);
1001 if (err < 0) 1005 if (err < 0)
1002 return err; 1006 return err;
1003 1007
@@ -2421,10 +2425,7 @@ static int enqueue_to_backlog(struct sk_buff *skb, int cpu,
2421 if (skb_queue_len(&sd->input_pkt_queue)) { 2425 if (skb_queue_len(&sd->input_pkt_queue)) {
2422enqueue: 2426enqueue:
2423 __skb_queue_tail(&sd->input_pkt_queue, skb); 2427 __skb_queue_tail(&sd->input_pkt_queue, skb);
2424#ifdef CONFIG_RPS 2428 input_queue_tail_incr_save(sd, qtail);
2425 *qtail = sd->input_queue_head +
2426 skb_queue_len(&sd->input_pkt_queue);
2427#endif
2428 rps_unlock(sd); 2429 rps_unlock(sd);
2429 local_irq_restore(flags); 2430 local_irq_restore(flags);
2430 return NET_RX_SUCCESS; 2431 return NET_RX_SUCCESS;
@@ -2959,7 +2960,7 @@ static void flush_backlog(void *arg)
2959 if (skb->dev == dev) { 2960 if (skb->dev == dev) {
2960 __skb_unlink(skb, &sd->input_pkt_queue); 2961 __skb_unlink(skb, &sd->input_pkt_queue);
2961 kfree_skb(skb); 2962 kfree_skb(skb);
2962 input_queue_head_add(sd, 1); 2963 input_queue_head_incr(sd);
2963 } 2964 }
2964 } 2965 }
2965 rps_unlock(sd); 2966 rps_unlock(sd);
@@ -2968,6 +2969,7 @@ static void flush_backlog(void *arg)
2968 if (skb->dev == dev) { 2969 if (skb->dev == dev) {
2969 __skb_unlink(skb, &sd->process_queue); 2970 __skb_unlink(skb, &sd->process_queue);
2970 kfree_skb(skb); 2971 kfree_skb(skb);
2972 input_queue_head_incr(sd);
2971 } 2973 }
2972 } 2974 }
2973} 2975}
@@ -3323,18 +3325,20 @@ static int process_backlog(struct napi_struct *napi, int quota)
3323 while ((skb = __skb_dequeue(&sd->process_queue))) { 3325 while ((skb = __skb_dequeue(&sd->process_queue))) {
3324 local_irq_enable(); 3326 local_irq_enable();
3325 __netif_receive_skb(skb); 3327 __netif_receive_skb(skb);
3326 if (++work >= quota)
3327 return work;
3328 local_irq_disable(); 3328 local_irq_disable();
3329 input_queue_head_incr(sd);
3330 if (++work >= quota) {
3331 local_irq_enable();
3332 return work;
3333 }
3329 } 3334 }
3330 3335
3331 rps_lock(sd); 3336 rps_lock(sd);
3332 qlen = skb_queue_len(&sd->input_pkt_queue); 3337 qlen = skb_queue_len(&sd->input_pkt_queue);
3333 if (qlen) { 3338 if (qlen)
3334 input_queue_head_add(sd, qlen);
3335 skb_queue_splice_tail_init(&sd->input_pkt_queue, 3339 skb_queue_splice_tail_init(&sd->input_pkt_queue,
3336 &sd->process_queue); 3340 &sd->process_queue);
3337 } 3341
3338 if (qlen < quota - work) { 3342 if (qlen < quota - work) {
3339 /* 3343 /*
3340 * Inline a custom version of __napi_complete(). 3344 * Inline a custom version of __napi_complete().
@@ -4960,7 +4964,7 @@ int register_netdevice(struct net_device *dev)
4960 } 4964 }
4961 } 4965 }
4962 4966
4963 ret = dev_get_valid_name(net, dev->name, dev->name, 0); 4967 ret = dev_get_valid_name(dev, dev->name, 0);
4964 if (ret) 4968 if (ret)
4965 goto err_uninit; 4969 goto err_uninit;
4966 4970
@@ -5558,7 +5562,7 @@ int dev_change_net_namespace(struct net_device *dev, struct net *net, const char
5558 /* We get here if we can't use the current device name */ 5562 /* We get here if we can't use the current device name */
5559 if (!pat) 5563 if (!pat)
5560 goto out; 5564 goto out;
5561 if (dev_get_valid_name(net, pat, dev->name, 1)) 5565 if (dev_get_valid_name(dev, pat, 1))
5562 goto out; 5566 goto out;
5563 } 5567 }
5564 5568
@@ -5661,12 +5665,14 @@ static int dev_cpu_callback(struct notifier_block *nfb,
5661 local_irq_enable(); 5665 local_irq_enable();
5662 5666
5663 /* Process offline CPU's input_pkt_queue */ 5667 /* Process offline CPU's input_pkt_queue */
5664 while ((skb = __skb_dequeue(&oldsd->input_pkt_queue))) { 5668 while ((skb = __skb_dequeue(&oldsd->process_queue))) {
5665 netif_rx(skb); 5669 netif_rx(skb);
5666 input_queue_head_add(oldsd, 1); 5670 input_queue_head_incr(oldsd);
5667 } 5671 }
5668 while ((skb = __skb_dequeue(&oldsd->process_queue))) 5672 while ((skb = __skb_dequeue(&oldsd->input_pkt_queue))) {
5669 netif_rx(skb); 5673 netif_rx(skb);
5674 input_queue_head_incr(oldsd);
5675 }
5670 5676
5671 return NOTIFY_OK; 5677 return NOTIFY_OK;
5672} 5678}
diff --git a/net/core/drop_monitor.c b/net/core/drop_monitor.c
index cf208d8042b1..ad41529fb60f 100644
--- a/net/core/drop_monitor.c
+++ b/net/core/drop_monitor.c
@@ -172,12 +172,12 @@ out:
172 return; 172 return;
173} 173}
174 174
175static void trace_kfree_skb_hit(struct sk_buff *skb, void *location) 175static void trace_kfree_skb_hit(void *ignore, struct sk_buff *skb, void *location)
176{ 176{
177 trace_drop_common(skb, location); 177 trace_drop_common(skb, location);
178} 178}
179 179
180static void trace_napi_poll_hit(struct napi_struct *napi) 180static void trace_napi_poll_hit(void *ignore, struct napi_struct *napi)
181{ 181{
182 struct dm_hw_stat_delta *new_stat; 182 struct dm_hw_stat_delta *new_stat;
183 183
@@ -225,12 +225,12 @@ static int set_all_monitor_traces(int state)
225 225
226 switch (state) { 226 switch (state) {
227 case TRACE_ON: 227 case TRACE_ON:
228 rc |= register_trace_kfree_skb(trace_kfree_skb_hit); 228 rc |= register_trace_kfree_skb(trace_kfree_skb_hit, NULL);
229 rc |= register_trace_napi_poll(trace_napi_poll_hit); 229 rc |= register_trace_napi_poll(trace_napi_poll_hit, NULL);
230 break; 230 break;
231 case TRACE_OFF: 231 case TRACE_OFF:
232 rc |= unregister_trace_kfree_skb(trace_kfree_skb_hit); 232 rc |= unregister_trace_kfree_skb(trace_kfree_skb_hit, NULL);
233 rc |= unregister_trace_napi_poll(trace_napi_poll_hit); 233 rc |= unregister_trace_napi_poll(trace_napi_poll_hit, NULL);
234 234
235 tracepoint_synchronize_unregister(); 235 tracepoint_synchronize_unregister();
236 236
diff --git a/net/core/neighbour.c b/net/core/neighbour.c
index bff37908bd55..6ba1c0eece03 100644
--- a/net/core/neighbour.c
+++ b/net/core/neighbour.c
@@ -934,6 +934,7 @@ int __neigh_event_send(struct neighbour *neigh, struct sk_buff *skb)
934 kfree_skb(buff); 934 kfree_skb(buff);
935 NEIGH_CACHE_STAT_INC(neigh->tbl, unres_discards); 935 NEIGH_CACHE_STAT_INC(neigh->tbl, unres_discards);
936 } 936 }
937 skb_dst_force(skb);
937 __skb_queue_tail(&neigh->arp_queue, skb); 938 __skb_queue_tail(&neigh->arp_queue, skb);
938 } 939 }
939 rc = 1; 940 rc = 1;
diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
index e4b9870e4706..1a2af24e9e3d 100644
--- a/net/core/rtnetlink.c
+++ b/net/core/rtnetlink.c
@@ -650,11 +650,12 @@ static inline int rtnl_vfinfo_size(const struct net_device *dev)
650 if (dev->dev.parent && dev_is_pci(dev->dev.parent)) { 650 if (dev->dev.parent && dev_is_pci(dev->dev.parent)) {
651 651
652 int num_vfs = dev_num_vf(dev->dev.parent); 652 int num_vfs = dev_num_vf(dev->dev.parent);
653 size_t size = nlmsg_total_size(sizeof(struct nlattr)); 653 size_t size = nla_total_size(sizeof(struct nlattr));
654 size += nlmsg_total_size(num_vfs * sizeof(struct nlattr)); 654 size += nla_total_size(num_vfs * sizeof(struct nlattr));
655 size += num_vfs * (sizeof(struct ifla_vf_mac) + 655 size += num_vfs *
656 sizeof(struct ifla_vf_vlan) + 656 (nla_total_size(sizeof(struct ifla_vf_mac)) +
657 sizeof(struct ifla_vf_tx_rate)); 657 nla_total_size(sizeof(struct ifla_vf_vlan)) +
658 nla_total_size(sizeof(struct ifla_vf_tx_rate)));
658 return size; 659 return size;
659 } else 660 } else
660 return 0; 661 return 0;
@@ -722,14 +723,13 @@ static int rtnl_vf_ports_fill(struct sk_buff *skb, struct net_device *dev)
722 723
723 for (vf = 0; vf < dev_num_vf(dev->dev.parent); vf++) { 724 for (vf = 0; vf < dev_num_vf(dev->dev.parent); vf++) {
724 vf_port = nla_nest_start(skb, IFLA_VF_PORT); 725 vf_port = nla_nest_start(skb, IFLA_VF_PORT);
725 if (!vf_port) { 726 if (!vf_port)
726 nla_nest_cancel(skb, vf_ports); 727 goto nla_put_failure;
727 return -EMSGSIZE;
728 }
729 NLA_PUT_U32(skb, IFLA_PORT_VF, vf); 728 NLA_PUT_U32(skb, IFLA_PORT_VF, vf);
730 err = dev->netdev_ops->ndo_get_vf_port(dev, vf, skb); 729 err = dev->netdev_ops->ndo_get_vf_port(dev, vf, skb);
730 if (err == -EMSGSIZE)
731 goto nla_put_failure;
731 if (err) { 732 if (err) {
732nla_put_failure:
733 nla_nest_cancel(skb, vf_port); 733 nla_nest_cancel(skb, vf_port);
734 continue; 734 continue;
735 } 735 }
@@ -739,6 +739,10 @@ nla_put_failure:
739 nla_nest_end(skb, vf_ports); 739 nla_nest_end(skb, vf_ports);
740 740
741 return 0; 741 return 0;
742
743nla_put_failure:
744 nla_nest_cancel(skb, vf_ports);
745 return -EMSGSIZE;
742} 746}
743 747
744static int rtnl_port_self_fill(struct sk_buff *skb, struct net_device *dev) 748static int rtnl_port_self_fill(struct sk_buff *skb, struct net_device *dev)
@@ -753,7 +757,7 @@ static int rtnl_port_self_fill(struct sk_buff *skb, struct net_device *dev)
753 err = dev->netdev_ops->ndo_get_vf_port(dev, PORT_SELF_VF, skb); 757 err = dev->netdev_ops->ndo_get_vf_port(dev, PORT_SELF_VF, skb);
754 if (err) { 758 if (err) {
755 nla_nest_cancel(skb, port_self); 759 nla_nest_cancel(skb, port_self);
756 return err; 760 return (err == -EMSGSIZE) ? err : 0;
757 } 761 }
758 762
759 nla_nest_end(skb, port_self); 763 nla_nest_end(skb, port_self);
@@ -1199,8 +1203,10 @@ static int do_setlink(struct net_device *dev, struct ifinfomsg *ifm,
1199 struct nlattr *attr; 1203 struct nlattr *attr;
1200 int rem; 1204 int rem;
1201 nla_for_each_nested(attr, tb[IFLA_VFINFO_LIST], rem) { 1205 nla_for_each_nested(attr, tb[IFLA_VFINFO_LIST], rem) {
1202 if (nla_type(attr) != IFLA_VF_INFO) 1206 if (nla_type(attr) != IFLA_VF_INFO) {
1207 err = -EINVAL;
1203 goto errout; 1208 goto errout;
1209 }
1204 err = do_setvfinfo(dev, attr); 1210 err = do_setvfinfo(dev, attr);
1205 if (err < 0) 1211 if (err < 0)
1206 goto errout; 1212 goto errout;
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index 66d9c416851e..f8abf68e3988 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -2722,6 +2722,7 @@ int skb_gro_receive(struct sk_buff **head, struct sk_buff *skb)
2722 *NAPI_GRO_CB(nskb) = *NAPI_GRO_CB(p); 2722 *NAPI_GRO_CB(nskb) = *NAPI_GRO_CB(p);
2723 skb_shinfo(nskb)->frag_list = p; 2723 skb_shinfo(nskb)->frag_list = p;
2724 skb_shinfo(nskb)->gso_size = pinfo->gso_size; 2724 skb_shinfo(nskb)->gso_size = pinfo->gso_size;
2725 pinfo->gso_size = 0;
2725 skb_header_release(p); 2726 skb_header_release(p);
2726 nskb->prev = p; 2727 nskb->prev = p;
2727 2728
diff --git a/net/core/sock.c b/net/core/sock.c
index bf88a167c8f2..2cf7f9f7e775 100644
--- a/net/core/sock.c
+++ b/net/core/sock.c
@@ -123,6 +123,7 @@
123#include <linux/net_tstamp.h> 123#include <linux/net_tstamp.h>
124#include <net/xfrm.h> 124#include <net/xfrm.h>
125#include <linux/ipsec.h> 125#include <linux/ipsec.h>
126#include <net/cls_cgroup.h>
126 127
127#include <linux/filter.h> 128#include <linux/filter.h>
128 129
@@ -217,6 +218,11 @@ __u32 sysctl_rmem_default __read_mostly = SK_RMEM_MAX;
217int sysctl_optmem_max __read_mostly = sizeof(unsigned long)*(2*UIO_MAXIOV+512); 218int sysctl_optmem_max __read_mostly = sizeof(unsigned long)*(2*UIO_MAXIOV+512);
218EXPORT_SYMBOL(sysctl_optmem_max); 219EXPORT_SYMBOL(sysctl_optmem_max);
219 220
221#if defined(CONFIG_CGROUPS) && !defined(CONFIG_NET_CLS_CGROUP)
222int net_cls_subsys_id = -1;
223EXPORT_SYMBOL_GPL(net_cls_subsys_id);
224#endif
225
220static int sock_set_timeout(long *timeo_p, char __user *optval, int optlen) 226static int sock_set_timeout(long *timeo_p, char __user *optval, int optlen)
221{ 227{
222 struct timeval tv; 228 struct timeval tv;
@@ -1050,6 +1056,17 @@ static void sk_prot_free(struct proto *prot, struct sock *sk)
1050 module_put(owner); 1056 module_put(owner);
1051} 1057}
1052 1058
1059#ifdef CONFIG_CGROUPS
1060void sock_update_classid(struct sock *sk)
1061{
1062 u32 classid = task_cls_classid(current);
1063
1064 if (classid && classid != sk->sk_classid)
1065 sk->sk_classid = classid;
1066}
1067EXPORT_SYMBOL(sock_update_classid);
1068#endif
1069
1053/** 1070/**
1054 * sk_alloc - All socket objects are allocated here 1071 * sk_alloc - All socket objects are allocated here
1055 * @net: the applicable net namespace 1072 * @net: the applicable net namespace
@@ -1073,6 +1090,8 @@ struct sock *sk_alloc(struct net *net, int family, gfp_t priority,
1073 sock_lock_init(sk); 1090 sock_lock_init(sk);
1074 sock_net_set(sk, get_net(net)); 1091 sock_net_set(sk, get_net(net));
1075 atomic_set(&sk->sk_wmem_alloc, 1); 1092 atomic_set(&sk->sk_wmem_alloc, 1);
1093
1094 sock_update_classid(sk);
1076 } 1095 }
1077 1096
1078 return sk; 1097 return sk;
@@ -1988,6 +2007,39 @@ void release_sock(struct sock *sk)
1988} 2007}
1989EXPORT_SYMBOL(release_sock); 2008EXPORT_SYMBOL(release_sock);
1990 2009
2010/**
2011 * lock_sock_fast - fast version of lock_sock
2012 * @sk: socket
2013 *
2014 * This version should be used for very small section, where process wont block
2015 * return false if fast path is taken
2016 * sk_lock.slock locked, owned = 0, BH disabled
2017 * return true if slow path is taken
2018 * sk_lock.slock unlocked, owned = 1, BH enabled
2019 */
2020bool lock_sock_fast(struct sock *sk)
2021{
2022 might_sleep();
2023 spin_lock_bh(&sk->sk_lock.slock);
2024
2025 if (!sk->sk_lock.owned)
2026 /*
2027 * Note : We must disable BH
2028 */
2029 return false;
2030
2031 __lock_sock(sk);
2032 sk->sk_lock.owned = 1;
2033 spin_unlock(&sk->sk_lock.slock);
2034 /*
2035 * The sk_lock has mutex_lock() semantics here:
2036 */
2037 mutex_acquire(&sk->sk_lock.dep_map, 0, 0, _RET_IP_);
2038 local_bh_enable();
2039 return true;
2040}
2041EXPORT_SYMBOL(lock_sock_fast);
2042
1991int sock_get_timestamp(struct sock *sk, struct timeval __user *userstamp) 2043int sock_get_timestamp(struct sock *sk, struct timeval __user *userstamp)
1992{ 2044{
1993 struct timeval tv; 2045 struct timeval tv;
diff --git a/net/dccp/input.c b/net/dccp/input.c
index 58f7bc156850..6beb6a7d6fba 100644
--- a/net/dccp/input.c
+++ b/net/dccp/input.c
@@ -124,9 +124,9 @@ static int dccp_rcv_closereq(struct sock *sk, struct sk_buff *skb)
124 return queued; 124 return queued;
125} 125}
126 126
127static u8 dccp_reset_code_convert(const u8 code) 127static u16 dccp_reset_code_convert(const u8 code)
128{ 128{
129 const u8 error_code[] = { 129 const u16 error_code[] = {
130 [DCCP_RESET_CODE_CLOSED] = 0, /* normal termination */ 130 [DCCP_RESET_CODE_CLOSED] = 0, /* normal termination */
131 [DCCP_RESET_CODE_UNSPECIFIED] = 0, /* nothing known */ 131 [DCCP_RESET_CODE_UNSPECIFIED] = 0, /* nothing known */
132 [DCCP_RESET_CODE_ABORTED] = ECONNRESET, 132 [DCCP_RESET_CODE_ABORTED] = ECONNRESET,
@@ -148,7 +148,7 @@ static u8 dccp_reset_code_convert(const u8 code)
148 148
149static void dccp_rcv_reset(struct sock *sk, struct sk_buff *skb) 149static void dccp_rcv_reset(struct sock *sk, struct sk_buff *skb)
150{ 150{
151 u8 err = dccp_reset_code_convert(dccp_hdr_reset(skb)->dccph_reset_code); 151 u16 err = dccp_reset_code_convert(dccp_hdr_reset(skb)->dccph_reset_code);
152 152
153 sk->sk_err = err; 153 sk->sk_err = err;
154 154
diff --git a/net/dccp/options.c b/net/dccp/options.c
index 1b08cae9c65b..07395f861d35 100644
--- a/net/dccp/options.c
+++ b/net/dccp/options.c
@@ -296,7 +296,7 @@ static inline u8 dccp_ndp_len(const u64 ndp)
296{ 296{
297 if (likely(ndp <= 0xFF)) 297 if (likely(ndp <= 0xFF))
298 return 1; 298 return 1;
299 return likely(ndp <= USHORT_MAX) ? 2 : (ndp <= UINT_MAX ? 4 : 6); 299 return likely(ndp <= USHRT_MAX) ? 2 : (ndp <= UINT_MAX ? 4 : 6);
300} 300}
301 301
302int dccp_insert_option(struct sock *sk, struct sk_buff *skb, 302int dccp_insert_option(struct sock *sk, struct sk_buff *skb,
diff --git a/net/ieee802154/wpan-class.c b/net/ieee802154/wpan-class.c
index 3d803a1b9fb6..1627ef2e8522 100644
--- a/net/ieee802154/wpan-class.c
+++ b/net/ieee802154/wpan-class.c
@@ -147,13 +147,15 @@ struct wpan_phy *wpan_phy_alloc(size_t priv_size)
147 struct wpan_phy *phy = kzalloc(sizeof(*phy) + priv_size, 147 struct wpan_phy *phy = kzalloc(sizeof(*phy) + priv_size,
148 GFP_KERNEL); 148 GFP_KERNEL);
149 149
150 if (!phy)
151 goto out;
150 mutex_lock(&wpan_phy_mutex); 152 mutex_lock(&wpan_phy_mutex);
151 phy->idx = wpan_phy_idx++; 153 phy->idx = wpan_phy_idx++;
152 if (unlikely(!wpan_phy_idx_valid(phy->idx))) { 154 if (unlikely(!wpan_phy_idx_valid(phy->idx))) {
153 wpan_phy_idx--; 155 wpan_phy_idx--;
154 mutex_unlock(&wpan_phy_mutex); 156 mutex_unlock(&wpan_phy_mutex);
155 kfree(phy); 157 kfree(phy);
156 return NULL; 158 goto out;
157 } 159 }
158 mutex_unlock(&wpan_phy_mutex); 160 mutex_unlock(&wpan_phy_mutex);
159 161
@@ -168,6 +170,9 @@ struct wpan_phy *wpan_phy_alloc(size_t priv_size)
168 phy->current_page = 0; /* for compatibility */ 170 phy->current_page = 0; /* for compatibility */
169 171
170 return phy; 172 return phy;
173
174out:
175 return NULL;
171} 176}
172EXPORT_SYMBOL(wpan_phy_alloc); 177EXPORT_SYMBOL(wpan_phy_alloc);
173 178
diff --git a/net/ipv4/ipmr.c b/net/ipv4/ipmr.c
index 45889103b3e2..856123fe32f9 100644
--- a/net/ipv4/ipmr.c
+++ b/net/ipv4/ipmr.c
@@ -1911,7 +1911,7 @@ static int __ipmr_fill_mroute(struct mr_table *mrt, struct sk_buff *skb,
1911 struct rtattr *mp_head; 1911 struct rtattr *mp_head;
1912 1912
1913 /* If cache is unresolved, don't try to parse IIF and OIF */ 1913 /* If cache is unresolved, don't try to parse IIF and OIF */
1914 if (c->mfc_parent > MAXVIFS) 1914 if (c->mfc_parent >= MAXVIFS)
1915 return -ENOENT; 1915 return -ENOENT;
1916 1916
1917 if (VIF_EXISTS(mrt, c->mfc_parent)) 1917 if (VIF_EXISTS(mrt, c->mfc_parent))
diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
index 9de6a698f91d..58585748bdac 100644
--- a/net/ipv4/udp.c
+++ b/net/ipv4/udp.c
@@ -1063,10 +1063,11 @@ static unsigned int first_packet_length(struct sock *sk)
1063 spin_unlock_bh(&rcvq->lock); 1063 spin_unlock_bh(&rcvq->lock);
1064 1064
1065 if (!skb_queue_empty(&list_kill)) { 1065 if (!skb_queue_empty(&list_kill)) {
1066 lock_sock_bh(sk); 1066 bool slow = lock_sock_fast(sk);
1067
1067 __skb_queue_purge(&list_kill); 1068 __skb_queue_purge(&list_kill);
1068 sk_mem_reclaim_partial(sk); 1069 sk_mem_reclaim_partial(sk);
1069 unlock_sock_bh(sk); 1070 unlock_sock_fast(sk, slow);
1070 } 1071 }
1071 return res; 1072 return res;
1072} 1073}
@@ -1123,6 +1124,7 @@ int udp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
1123 int peeked; 1124 int peeked;
1124 int err; 1125 int err;
1125 int is_udplite = IS_UDPLITE(sk); 1126 int is_udplite = IS_UDPLITE(sk);
1127 bool slow;
1126 1128
1127 /* 1129 /*
1128 * Check any passed addresses 1130 * Check any passed addresses
@@ -1197,10 +1199,10 @@ out:
1197 return err; 1199 return err;
1198 1200
1199csum_copy_err: 1201csum_copy_err:
1200 lock_sock_bh(sk); 1202 slow = lock_sock_fast(sk);
1201 if (!skb_kill_datagram(sk, skb, flags)) 1203 if (!skb_kill_datagram(sk, skb, flags))
1202 UDP_INC_STATS_USER(sock_net(sk), UDP_MIB_INERRORS, is_udplite); 1204 UDP_INC_STATS_USER(sock_net(sk), UDP_MIB_INERRORS, is_udplite);
1203 unlock_sock_bh(sk); 1205 unlock_sock_fast(sk, slow);
1204 1206
1205 if (noblock) 1207 if (noblock)
1206 return -EAGAIN; 1208 return -EAGAIN;
@@ -1625,9 +1627,9 @@ int udp_rcv(struct sk_buff *skb)
1625 1627
1626void udp_destroy_sock(struct sock *sk) 1628void udp_destroy_sock(struct sock *sk)
1627{ 1629{
1628 lock_sock_bh(sk); 1630 bool slow = lock_sock_fast(sk);
1629 udp_flush_pending_frames(sk); 1631 udp_flush_pending_frames(sk);
1630 unlock_sock_bh(sk); 1632 unlock_sock_fast(sk, slow);
1631} 1633}
1632 1634
1633/* 1635/*
@@ -1686,8 +1688,8 @@ int udp_lib_setsockopt(struct sock *sk, int level, int optname,
1686 return -ENOPROTOOPT; 1688 return -ENOPROTOOPT;
1687 if (val != 0 && val < 8) /* Illegal coverage: use default (8) */ 1689 if (val != 0 && val < 8) /* Illegal coverage: use default (8) */
1688 val = 8; 1690 val = 8;
1689 else if (val > USHORT_MAX) 1691 else if (val > USHRT_MAX)
1690 val = USHORT_MAX; 1692 val = USHRT_MAX;
1691 up->pcslen = val; 1693 up->pcslen = val;
1692 up->pcflag |= UDPLITE_SEND_CC; 1694 up->pcflag |= UDPLITE_SEND_CC;
1693 break; 1695 break;
@@ -1700,8 +1702,8 @@ int udp_lib_setsockopt(struct sock *sk, int level, int optname,
1700 return -ENOPROTOOPT; 1702 return -ENOPROTOOPT;
1701 if (val != 0 && val < 8) /* Avoid silly minimal values. */ 1703 if (val != 0 && val < 8) /* Avoid silly minimal values. */
1702 val = 8; 1704 val = 8;
1703 else if (val > USHORT_MAX) 1705 else if (val > USHRT_MAX)
1704 val = USHORT_MAX; 1706 val = USHRT_MAX;
1705 up->pcrlen = val; 1707 up->pcrlen = val;
1706 up->pcflag |= UDPLITE_RECV_CC; 1708 up->pcflag |= UDPLITE_RECV_CC;
1707 break; 1709 break;
diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
index cd963f64e27c..89425af0684c 100644
--- a/net/ipv6/ip6_output.c
+++ b/net/ipv6/ip6_output.c
@@ -507,7 +507,7 @@ int ip6_forward(struct sk_buff *skb)
507 if (mtu < IPV6_MIN_MTU) 507 if (mtu < IPV6_MIN_MTU)
508 mtu = IPV6_MIN_MTU; 508 mtu = IPV6_MIN_MTU;
509 509
510 if (skb->len > mtu) { 510 if (skb->len > mtu && !skb_is_gso(skb)) {
511 /* Again, force OUTPUT device used as source address */ 511 /* Again, force OUTPUT device used as source address */
512 skb->dev = dst->dev; 512 skb->dev = dst->dev;
513 icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu); 513 icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu);
diff --git a/net/ipv6/ip6mr.c b/net/ipv6/ip6mr.c
index bd9e7d3e9c8e..073071f2b75b 100644
--- a/net/ipv6/ip6mr.c
+++ b/net/ipv6/ip6mr.c
@@ -2017,7 +2017,7 @@ static int __ip6mr_fill_mroute(struct mr6_table *mrt, struct sk_buff *skb,
2017 struct rtattr *mp_head; 2017 struct rtattr *mp_head;
2018 2018
2019 /* If cache is unresolved, don't try to parse IIF and OIF */ 2019 /* If cache is unresolved, don't try to parse IIF and OIF */
2020 if (c->mf6c_parent > MAXMIFS) 2020 if (c->mf6c_parent >= MAXMIFS)
2021 return -ENOENT; 2021 return -ENOENT;
2022 2022
2023 if (MIF_EXISTS(mrt, c->mf6c_parent)) 2023 if (MIF_EXISTS(mrt, c->mf6c_parent))
diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
index 3d7a2c0b836a..87be58673b55 100644
--- a/net/ipv6/udp.c
+++ b/net/ipv6/udp.c
@@ -328,6 +328,7 @@ int udpv6_recvmsg(struct kiocb *iocb, struct sock *sk,
328 int err; 328 int err;
329 int is_udplite = IS_UDPLITE(sk); 329 int is_udplite = IS_UDPLITE(sk);
330 int is_udp4; 330 int is_udp4;
331 bool slow;
331 332
332 if (addr_len) 333 if (addr_len)
333 *addr_len=sizeof(struct sockaddr_in6); 334 *addr_len=sizeof(struct sockaddr_in6);
@@ -424,7 +425,7 @@ out:
424 return err; 425 return err;
425 426
426csum_copy_err: 427csum_copy_err:
427 lock_sock_bh(sk); 428 slow = lock_sock_fast(sk);
428 if (!skb_kill_datagram(sk, skb, flags)) { 429 if (!skb_kill_datagram(sk, skb, flags)) {
429 if (is_udp4) 430 if (is_udp4)
430 UDP_INC_STATS_USER(sock_net(sk), 431 UDP_INC_STATS_USER(sock_net(sk),
@@ -433,7 +434,7 @@ csum_copy_err:
433 UDP6_INC_STATS_USER(sock_net(sk), 434 UDP6_INC_STATS_USER(sock_net(sk),
434 UDP_MIB_INERRORS, is_udplite); 435 UDP_MIB_INERRORS, is_udplite);
435 } 436 }
436 unlock_sock_bh(sk); 437 unlock_sock_fast(sk, slow);
437 438
438 if (flags & MSG_DONTWAIT) 439 if (flags & MSG_DONTWAIT)
439 return -EAGAIN; 440 return -EAGAIN;
diff --git a/net/iucv/af_iucv.c b/net/iucv/af_iucv.c
index c8b4599a752e..9637e45744fa 100644
--- a/net/iucv/af_iucv.c
+++ b/net/iucv/af_iucv.c
@@ -1619,7 +1619,7 @@ static void iucv_callback_rx(struct iucv_path *path, struct iucv_message *msg)
1619save_message: 1619save_message:
1620 save_msg = kzalloc(sizeof(struct sock_msg_q), GFP_ATOMIC | GFP_DMA); 1620 save_msg = kzalloc(sizeof(struct sock_msg_q), GFP_ATOMIC | GFP_DMA);
1621 if (!save_msg) 1621 if (!save_msg)
1622 return; 1622 goto out_unlock;
1623 save_msg->path = path; 1623 save_msg->path = path;
1624 save_msg->msg = *msg; 1624 save_msg->msg = *msg;
1625 1625
diff --git a/net/iucv/iucv.c b/net/iucv/iucv.c
index fd8b28361a64..f28ad2cc8428 100644
--- a/net/iucv/iucv.c
+++ b/net/iucv/iucv.c
@@ -632,13 +632,14 @@ static int __cpuinit iucv_cpu_notify(struct notifier_block *self,
632 iucv_irq_data[cpu] = kmalloc_node(sizeof(struct iucv_irq_data), 632 iucv_irq_data[cpu] = kmalloc_node(sizeof(struct iucv_irq_data),
633 GFP_KERNEL|GFP_DMA, cpu_to_node(cpu)); 633 GFP_KERNEL|GFP_DMA, cpu_to_node(cpu));
634 if (!iucv_irq_data[cpu]) 634 if (!iucv_irq_data[cpu])
635 return NOTIFY_BAD; 635 return notifier_from_errno(-ENOMEM);
636
636 iucv_param[cpu] = kmalloc_node(sizeof(union iucv_param), 637 iucv_param[cpu] = kmalloc_node(sizeof(union iucv_param),
637 GFP_KERNEL|GFP_DMA, cpu_to_node(cpu)); 638 GFP_KERNEL|GFP_DMA, cpu_to_node(cpu));
638 if (!iucv_param[cpu]) { 639 if (!iucv_param[cpu]) {
639 kfree(iucv_irq_data[cpu]); 640 kfree(iucv_irq_data[cpu]);
640 iucv_irq_data[cpu] = NULL; 641 iucv_irq_data[cpu] = NULL;
641 return NOTIFY_BAD; 642 return notifier_from_errno(-ENOMEM);
642 } 643 }
643 iucv_param_irq[cpu] = kmalloc_node(sizeof(union iucv_param), 644 iucv_param_irq[cpu] = kmalloc_node(sizeof(union iucv_param),
644 GFP_KERNEL|GFP_DMA, cpu_to_node(cpu)); 645 GFP_KERNEL|GFP_DMA, cpu_to_node(cpu));
@@ -647,7 +648,7 @@ static int __cpuinit iucv_cpu_notify(struct notifier_block *self,
647 iucv_param[cpu] = NULL; 648 iucv_param[cpu] = NULL;
648 kfree(iucv_irq_data[cpu]); 649 kfree(iucv_irq_data[cpu]);
649 iucv_irq_data[cpu] = NULL; 650 iucv_irq_data[cpu] = NULL;
650 return NOTIFY_BAD; 651 return notifier_from_errno(-ENOMEM);
651 } 652 }
652 break; 653 break;
653 case CPU_UP_CANCELED: 654 case CPU_UP_CANCELED:
@@ -677,7 +678,7 @@ static int __cpuinit iucv_cpu_notify(struct notifier_block *self,
677 cpu_clear(cpu, cpumask); 678 cpu_clear(cpu, cpumask);
678 if (cpus_empty(cpumask)) 679 if (cpus_empty(cpumask))
679 /* Can't offline last IUCV enabled cpu. */ 680 /* Can't offline last IUCV enabled cpu. */
680 return NOTIFY_BAD; 681 return notifier_from_errno(-EINVAL);
681 smp_call_function_single(cpu, iucv_retrieve_cpu, NULL, 1); 682 smp_call_function_single(cpu, iucv_retrieve_cpu, NULL, 1);
682 if (cpus_empty(iucv_irq_cpumask)) 683 if (cpus_empty(iucv_irq_cpumask))
683 smp_call_function_single(first_cpu(iucv_buffer_cpumask), 684 smp_call_function_single(first_cpu(iucv_buffer_cpumask),
diff --git a/net/mac80211/key.c b/net/mac80211/key.c
index 8d4b41787dcf..e8f6e3b252d8 100644
--- a/net/mac80211/key.c
+++ b/net/mac80211/key.c
@@ -140,7 +140,6 @@ static void ieee80211_key_enable_hw_accel(struct ieee80211_key *key)
140 struct ieee80211_sub_if_data, 140 struct ieee80211_sub_if_data,
141 u.ap); 141 u.ap);
142 142
143 key->conf.ap_addr = sdata->dev->dev_addr;
144 ret = drv_set_key(key->local, SET_KEY, sdata, sta, &key->conf); 143 ret = drv_set_key(key->local, SET_KEY, sdata, sta, &key->conf);
145 144
146 if (!ret) { 145 if (!ret) {
diff --git a/net/mac80211/sta_info.c b/net/mac80211/sta_info.c
index 730197591ab5..ba9360a475b0 100644
--- a/net/mac80211/sta_info.c
+++ b/net/mac80211/sta_info.c
@@ -259,7 +259,7 @@ struct sta_info *sta_info_alloc(struct ieee80211_sub_if_data *sdata,
259 skb_queue_head_init(&sta->tx_filtered); 259 skb_queue_head_init(&sta->tx_filtered);
260 260
261 for (i = 0; i < NUM_RX_DATA_QUEUES; i++) 261 for (i = 0; i < NUM_RX_DATA_QUEUES; i++)
262 sta->last_seq_ctrl[i] = cpu_to_le16(USHORT_MAX); 262 sta->last_seq_ctrl[i] = cpu_to_le16(USHRT_MAX);
263 263
264#ifdef CONFIG_MAC80211_VERBOSE_DEBUG 264#ifdef CONFIG_MAC80211_VERBOSE_DEBUG
265 printk(KERN_DEBUG "%s: Allocated STA %pM\n", 265 printk(KERN_DEBUG "%s: Allocated STA %pM\n",
diff --git a/net/mac80211/sta_info.h b/net/mac80211/sta_info.h
index 48a5e80957f0..df9d45544ca5 100644
--- a/net/mac80211/sta_info.h
+++ b/net/mac80211/sta_info.h
@@ -145,7 +145,7 @@ enum plink_state {
145/** 145/**
146 * struct sta_ampdu_mlme - STA aggregation information. 146 * struct sta_ampdu_mlme - STA aggregation information.
147 * 147 *
148 * @tid_state_rx: TID's state in Rx session state machine. 148 * @tid_active_rx: TID's state in Rx session state machine.
149 * @tid_rx: aggregation info for Rx per TID 149 * @tid_rx: aggregation info for Rx per TID
150 * @tid_state_tx: TID's state in Tx session state machine. 150 * @tid_state_tx: TID's state in Tx session state machine.
151 * @tid_tx: aggregation info for Tx per TID 151 * @tid_tx: aggregation info for Tx per TID
diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c
index b83c530c5e0a..eeeb8bc73982 100644
--- a/net/netfilter/nf_conntrack_core.c
+++ b/net/netfilter/nf_conntrack_core.c
@@ -424,6 +424,16 @@ __nf_conntrack_confirm(struct sk_buff *skb)
424 424
425 spin_lock_bh(&nf_conntrack_lock); 425 spin_lock_bh(&nf_conntrack_lock);
426 426
427 /* We have to check the DYING flag inside the lock to prevent
428 a race against nf_ct_get_next_corpse() possibly called from
429 user context, else we insert an already 'dead' hash, blocking
430 further use of that particular connection -JM */
431
432 if (unlikely(nf_ct_is_dying(ct))) {
433 spin_unlock_bh(&nf_conntrack_lock);
434 return NF_ACCEPT;
435 }
436
427 /* See if there's one in the list already, including reverse: 437 /* See if there's one in the list already, including reverse:
428 NAT could have grabbed it without realizing, since we're 438 NAT could have grabbed it without realizing, since we're
429 not in the hash. If there is, we lost race. */ 439 not in the hash. If there is, we lost race. */
diff --git a/net/netfilter/nf_conntrack_sip.c b/net/netfilter/nf_conntrack_sip.c
index b20f4275893c..53d892210a04 100644
--- a/net/netfilter/nf_conntrack_sip.c
+++ b/net/netfilter/nf_conntrack_sip.c
@@ -1393,10 +1393,8 @@ static int sip_help_tcp(struct sk_buff *skb, unsigned int protoff,
1393 1393
1394 nf_ct_refresh(ct, skb, sip_timeout * HZ); 1394 nf_ct_refresh(ct, skb, sip_timeout * HZ);
1395 1395
1396 if (skb_is_nonlinear(skb)) { 1396 if (unlikely(skb_linearize(skb)))
1397 pr_debug("Copy of skbuff not supported yet.\n"); 1397 return NF_DROP;
1398 return NF_ACCEPT;
1399 }
1400 1398
1401 dptr = skb->data + dataoff; 1399 dptr = skb->data + dataoff;
1402 datalen = skb->len - dataoff; 1400 datalen = skb->len - dataoff;
@@ -1455,10 +1453,8 @@ static int sip_help_udp(struct sk_buff *skb, unsigned int protoff,
1455 1453
1456 nf_ct_refresh(ct, skb, sip_timeout * HZ); 1454 nf_ct_refresh(ct, skb, sip_timeout * HZ);
1457 1455
1458 if (skb_is_nonlinear(skb)) { 1456 if (unlikely(skb_linearize(skb)))
1459 pr_debug("Copy of skbuff not supported yet.\n"); 1457 return NF_DROP;
1460 return NF_ACCEPT;
1461 }
1462 1458
1463 dptr = skb->data + dataoff; 1459 dptr = skb->data + dataoff;
1464 datalen = skb->len - dataoff; 1460 datalen = skb->len - dataoff;
diff --git a/net/netfilter/xt_TEE.c b/net/netfilter/xt_TEE.c
index d7920d9f49e9..859d9fd429c8 100644
--- a/net/netfilter/xt_TEE.c
+++ b/net/netfilter/xt_TEE.c
@@ -76,7 +76,7 @@ tee_tg_route4(struct sk_buff *skb, const struct xt_tee_tginfo *info)
76 if (ip_route_output_key(net, &rt, &fl) != 0) 76 if (ip_route_output_key(net, &rt, &fl) != 0)
77 return false; 77 return false;
78 78
79 dst_release(skb_dst(skb)); 79 skb_dst_drop(skb);
80 skb_dst_set(skb, &rt->u.dst); 80 skb_dst_set(skb, &rt->u.dst);
81 skb->dev = rt->u.dst.dev; 81 skb->dev = rt->u.dst.dev;
82 skb->protocol = htons(ETH_P_IP); 82 skb->protocol = htons(ETH_P_IP);
@@ -157,7 +157,7 @@ tee_tg_route6(struct sk_buff *skb, const struct xt_tee_tginfo *info)
157 if (dst == NULL) 157 if (dst == NULL)
158 return false; 158 return false;
159 159
160 dst_release(skb_dst(skb)); 160 skb_dst_drop(skb);
161 skb_dst_set(skb, dst); 161 skb_dst_set(skb, dst);
162 skb->dev = dst->dev; 162 skb->dev = dst->dev;
163 skb->protocol = htons(ETH_P_IPV6); 163 skb->protocol = htons(ETH_P_IPV6);
diff --git a/net/phonet/pep.c b/net/phonet/pep.c
index af4d38bc3b22..7b048a35ca58 100644
--- a/net/phonet/pep.c
+++ b/net/phonet/pep.c
@@ -626,6 +626,7 @@ static void pep_sock_close(struct sock *sk, long timeout)
626 struct pep_sock *pn = pep_sk(sk); 626 struct pep_sock *pn = pep_sk(sk);
627 int ifindex = 0; 627 int ifindex = 0;
628 628
629 sock_hold(sk); /* keep a reference after sk_common_release() */
629 sk_common_release(sk); 630 sk_common_release(sk);
630 631
631 lock_sock(sk); 632 lock_sock(sk);
@@ -644,6 +645,7 @@ static void pep_sock_close(struct sock *sk, long timeout)
644 645
645 if (ifindex) 646 if (ifindex)
646 gprs_detach(sk); 647 gprs_detach(sk);
648 sock_put(sk);
647} 649}
648 650
649static int pep_wait_connreq(struct sock *sk, int noblock) 651static int pep_wait_connreq(struct sock *sk, int noblock)
diff --git a/net/sched/cls_cgroup.c b/net/sched/cls_cgroup.c
index 221180384fd7..78ef2c5e130b 100644
--- a/net/sched/cls_cgroup.c
+++ b/net/sched/cls_cgroup.c
@@ -16,14 +16,11 @@
16#include <linux/errno.h> 16#include <linux/errno.h>
17#include <linux/skbuff.h> 17#include <linux/skbuff.h>
18#include <linux/cgroup.h> 18#include <linux/cgroup.h>
19#include <linux/rcupdate.h>
19#include <net/rtnetlink.h> 20#include <net/rtnetlink.h>
20#include <net/pkt_cls.h> 21#include <net/pkt_cls.h>
21 22#include <net/sock.h>
22struct cgroup_cls_state 23#include <net/cls_cgroup.h>
23{
24 struct cgroup_subsys_state css;
25 u32 classid;
26};
27 24
28static struct cgroup_subsys_state *cgrp_create(struct cgroup_subsys *ss, 25static struct cgroup_subsys_state *cgrp_create(struct cgroup_subsys *ss,
29 struct cgroup *cgrp); 26 struct cgroup *cgrp);
@@ -112,6 +109,10 @@ static int cls_cgroup_classify(struct sk_buff *skb, struct tcf_proto *tp,
112 struct cls_cgroup_head *head = tp->root; 109 struct cls_cgroup_head *head = tp->root;
113 u32 classid; 110 u32 classid;
114 111
112 rcu_read_lock();
113 classid = task_cls_state(current)->classid;
114 rcu_read_unlock();
115
115 /* 116 /*
116 * Due to the nature of the classifier it is required to ignore all 117 * Due to the nature of the classifier it is required to ignore all
117 * packets originating from softirq context as accessing `current' 118 * packets originating from softirq context as accessing `current'
@@ -122,12 +123,12 @@ static int cls_cgroup_classify(struct sk_buff *skb, struct tcf_proto *tp,
122 * calls by looking at the number of nested bh disable calls because 123 * calls by looking at the number of nested bh disable calls because
123 * softirqs always disables bh. 124 * softirqs always disables bh.
124 */ 125 */
125 if (softirq_count() != SOFTIRQ_OFFSET) 126 if (softirq_count() != SOFTIRQ_OFFSET) {
126 return -1; 127 /* If there is an sk_classid we'll use that. */
127 128 if (!skb->sk)
128 rcu_read_lock(); 129 return -1;
129 classid = task_cls_state(current)->classid; 130 classid = skb->sk->sk_classid;
130 rcu_read_unlock(); 131 }
131 132
132 if (!classid) 133 if (!classid)
133 return -1; 134 return -1;
@@ -289,18 +290,35 @@ static struct tcf_proto_ops cls_cgroup_ops __read_mostly = {
289 290
290static int __init init_cgroup_cls(void) 291static int __init init_cgroup_cls(void)
291{ 292{
292 int ret = register_tcf_proto_ops(&cls_cgroup_ops); 293 int ret;
293 if (ret) 294
294 return ret;
295 ret = cgroup_load_subsys(&net_cls_subsys); 295 ret = cgroup_load_subsys(&net_cls_subsys);
296 if (ret) 296 if (ret)
297 unregister_tcf_proto_ops(&cls_cgroup_ops); 297 goto out;
298
299#ifndef CONFIG_NET_CLS_CGROUP
300 /* We can't use rcu_assign_pointer because this is an int. */
301 smp_wmb();
302 net_cls_subsys_id = net_cls_subsys.subsys_id;
303#endif
304
305 ret = register_tcf_proto_ops(&cls_cgroup_ops);
306 if (ret)
307 cgroup_unload_subsys(&net_cls_subsys);
308
309out:
298 return ret; 310 return ret;
299} 311}
300 312
301static void __exit exit_cgroup_cls(void) 313static void __exit exit_cgroup_cls(void)
302{ 314{
303 unregister_tcf_proto_ops(&cls_cgroup_ops); 315 unregister_tcf_proto_ops(&cls_cgroup_ops);
316
317#ifndef CONFIG_NET_CLS_CGROUP
318 net_cls_subsys_id = -1;
319 synchronize_rcu();
320#endif
321
304 cgroup_unload_subsys(&net_cls_subsys); 322 cgroup_unload_subsys(&net_cls_subsys);
305} 323}
306 324
diff --git a/net/sched/sch_api.c b/net/sched/sch_api.c
index fe35c1f338c2..b9e8c3b7d406 100644
--- a/net/sched/sch_api.c
+++ b/net/sched/sch_api.c
@@ -1195,6 +1195,11 @@ nla_put_failure:
1195 return -1; 1195 return -1;
1196} 1196}
1197 1197
1198static bool tc_qdisc_dump_ignore(struct Qdisc *q)
1199{
1200 return (q->flags & TCQ_F_BUILTIN) ? true : false;
1201}
1202
1198static int qdisc_notify(struct net *net, struct sk_buff *oskb, 1203static int qdisc_notify(struct net *net, struct sk_buff *oskb,
1199 struct nlmsghdr *n, u32 clid, 1204 struct nlmsghdr *n, u32 clid,
1200 struct Qdisc *old, struct Qdisc *new) 1205 struct Qdisc *old, struct Qdisc *new)
@@ -1206,11 +1211,11 @@ static int qdisc_notify(struct net *net, struct sk_buff *oskb,
1206 if (!skb) 1211 if (!skb)
1207 return -ENOBUFS; 1212 return -ENOBUFS;
1208 1213
1209 if (old && old->handle) { 1214 if (old && !tc_qdisc_dump_ignore(old)) {
1210 if (tc_fill_qdisc(skb, old, clid, pid, n->nlmsg_seq, 0, RTM_DELQDISC) < 0) 1215 if (tc_fill_qdisc(skb, old, clid, pid, n->nlmsg_seq, 0, RTM_DELQDISC) < 0)
1211 goto err_out; 1216 goto err_out;
1212 } 1217 }
1213 if (new) { 1218 if (new && !tc_qdisc_dump_ignore(new)) {
1214 if (tc_fill_qdisc(skb, new, clid, pid, n->nlmsg_seq, old ? NLM_F_REPLACE : 0, RTM_NEWQDISC) < 0) 1219 if (tc_fill_qdisc(skb, new, clid, pid, n->nlmsg_seq, old ? NLM_F_REPLACE : 0, RTM_NEWQDISC) < 0)
1215 goto err_out; 1220 goto err_out;
1216 } 1221 }
@@ -1223,11 +1228,6 @@ err_out:
1223 return -EINVAL; 1228 return -EINVAL;
1224} 1229}
1225 1230
1226static bool tc_qdisc_dump_ignore(struct Qdisc *q)
1227{
1228 return (q->flags & TCQ_F_BUILTIN) ? true : false;
1229}
1230
1231static int tc_dump_qdisc_root(struct Qdisc *root, struct sk_buff *skb, 1231static int tc_dump_qdisc_root(struct Qdisc *root, struct sk_buff *skb,
1232 struct netlink_callback *cb, 1232 struct netlink_callback *cb,
1233 int *q_idx_p, int s_q_idx) 1233 int *q_idx_p, int s_q_idx)
diff --git a/net/socket.c b/net/socket.c
index f9f7d0872cac..367d5477d00f 100644
--- a/net/socket.c
+++ b/net/socket.c
@@ -94,6 +94,7 @@
94 94
95#include <net/compat.h> 95#include <net/compat.h>
96#include <net/wext.h> 96#include <net/wext.h>
97#include <net/cls_cgroup.h>
97 98
98#include <net/sock.h> 99#include <net/sock.h>
99#include <linux/netfilter.h> 100#include <linux/netfilter.h>
@@ -558,6 +559,8 @@ static inline int __sock_sendmsg(struct kiocb *iocb, struct socket *sock,
558 struct sock_iocb *si = kiocb_to_siocb(iocb); 559 struct sock_iocb *si = kiocb_to_siocb(iocb);
559 int err; 560 int err;
560 561
562 sock_update_classid(sock->sk);
563
561 si->sock = sock; 564 si->sock = sock;
562 si->scm = NULL; 565 si->scm = NULL;
563 si->msg = msg; 566 si->msg = msg;
@@ -684,6 +687,8 @@ static inline int __sock_recvmsg_nosec(struct kiocb *iocb, struct socket *sock,
684{ 687{
685 struct sock_iocb *si = kiocb_to_siocb(iocb); 688 struct sock_iocb *si = kiocb_to_siocb(iocb);
686 689
690 sock_update_classid(sock->sk);
691
687 si->sock = sock; 692 si->sock = sock;
688 si->scm = NULL; 693 si->scm = NULL;
689 si->msg = msg; 694 si->msg = msg;
@@ -777,6 +782,8 @@ static ssize_t sock_splice_read(struct file *file, loff_t *ppos,
777 if (unlikely(!sock->ops->splice_read)) 782 if (unlikely(!sock->ops->splice_read))
778 return -EINVAL; 783 return -EINVAL;
779 784
785 sock_update_classid(sock->sk);
786
780 return sock->ops->splice_read(sock, ppos, pipe, len, flags); 787 return sock->ops->splice_read(sock, ppos, pipe, len, flags);
781} 788}
782 789
@@ -3069,6 +3076,8 @@ int kernel_setsockopt(struct socket *sock, int level, int optname,
3069int kernel_sendpage(struct socket *sock, struct page *page, int offset, 3076int kernel_sendpage(struct socket *sock, struct page *page, int offset,
3070 size_t size, int flags) 3077 size_t size, int flags)
3071{ 3078{
3079 sock_update_classid(sock->sk);
3080
3072 if (sock->ops->sendpage) 3081 if (sock->ops->sendpage)
3073 return sock->ops->sendpage(sock, page, offset, size, flags); 3082 return sock->ops->sendpage(sock, page, offset, size, flags);
3074 3083
diff --git a/net/sunrpc/cache.c b/net/sunrpc/cache.c
index c2173ebdb33c..58de76c8540c 100644
--- a/net/sunrpc/cache.c
+++ b/net/sunrpc/cache.c
@@ -34,6 +34,7 @@
34#include <linux/sunrpc/cache.h> 34#include <linux/sunrpc/cache.h>
35#include <linux/sunrpc/stats.h> 35#include <linux/sunrpc/stats.h>
36#include <linux/sunrpc/rpc_pipe_fs.h> 36#include <linux/sunrpc/rpc_pipe_fs.h>
37#include <linux/smp_lock.h>
37 38
38#define RPCDBG_FACILITY RPCDBG_CACHE 39#define RPCDBG_FACILITY RPCDBG_CACHE
39 40
@@ -1545,12 +1546,18 @@ static unsigned int cache_poll_pipefs(struct file *filp, poll_table *wait)
1545 return cache_poll(filp, wait, cd); 1546 return cache_poll(filp, wait, cd);
1546} 1547}
1547 1548
1548static int cache_ioctl_pipefs(struct inode *inode, struct file *filp, 1549static long cache_ioctl_pipefs(struct file *filp,
1549 unsigned int cmd, unsigned long arg) 1550 unsigned int cmd, unsigned long arg)
1550{ 1551{
1552 struct inode *inode = filp->f_dentry->d_inode;
1551 struct cache_detail *cd = RPC_I(inode)->private; 1553 struct cache_detail *cd = RPC_I(inode)->private;
1554 long ret;
1552 1555
1553 return cache_ioctl(inode, filp, cmd, arg, cd); 1556 lock_kernel();
1557 ret = cache_ioctl(inode, filp, cmd, arg, cd);
1558 unlock_kernel();
1559
1560 return ret;
1554} 1561}
1555 1562
1556static int cache_open_pipefs(struct inode *inode, struct file *filp) 1563static int cache_open_pipefs(struct inode *inode, struct file *filp)
@@ -1573,7 +1580,7 @@ const struct file_operations cache_file_operations_pipefs = {
1573 .read = cache_read_pipefs, 1580 .read = cache_read_pipefs,
1574 .write = cache_write_pipefs, 1581 .write = cache_write_pipefs,
1575 .poll = cache_poll_pipefs, 1582 .poll = cache_poll_pipefs,
1576 .ioctl = cache_ioctl_pipefs, /* for FIONREAD */ 1583 .unlocked_ioctl = cache_ioctl_pipefs, /* for FIONREAD */
1577 .open = cache_open_pipefs, 1584 .open = cache_open_pipefs,
1578 .release = cache_release_pipefs, 1585 .release = cache_release_pipefs,
1579}; 1586};
diff --git a/net/sunrpc/rpc_pipe.c b/net/sunrpc/rpc_pipe.c
index 20e30c6f8355..95ccbcf45d3e 100644
--- a/net/sunrpc/rpc_pipe.c
+++ b/net/sunrpc/rpc_pipe.c
@@ -27,6 +27,7 @@
27#include <linux/workqueue.h> 27#include <linux/workqueue.h>
28#include <linux/sunrpc/rpc_pipe_fs.h> 28#include <linux/sunrpc/rpc_pipe_fs.h>
29#include <linux/sunrpc/cache.h> 29#include <linux/sunrpc/cache.h>
30#include <linux/smp_lock.h>
30 31
31static struct vfsmount *rpc_mount __read_mostly; 32static struct vfsmount *rpc_mount __read_mostly;
32static int rpc_mount_count; 33static int rpc_mount_count;
@@ -309,8 +310,7 @@ rpc_pipe_poll(struct file *filp, struct poll_table_struct *wait)
309} 310}
310 311
311static int 312static int
312rpc_pipe_ioctl(struct inode *ino, struct file *filp, 313rpc_pipe_ioctl_unlocked(struct file *filp, unsigned int cmd, unsigned long arg)
313 unsigned int cmd, unsigned long arg)
314{ 314{
315 struct rpc_inode *rpci = RPC_I(filp->f_path.dentry->d_inode); 315 struct rpc_inode *rpci = RPC_I(filp->f_path.dentry->d_inode);
316 int len; 316 int len;
@@ -331,13 +331,25 @@ rpc_pipe_ioctl(struct inode *ino, struct file *filp,
331 } 331 }
332} 332}
333 333
334static long
335rpc_pipe_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
336{
337 long ret;
338
339 lock_kernel();
340 ret = rpc_pipe_ioctl_unlocked(filp, cmd, arg);
341 unlock_kernel();
342
343 return ret;
344}
345
334static const struct file_operations rpc_pipe_fops = { 346static const struct file_operations rpc_pipe_fops = {
335 .owner = THIS_MODULE, 347 .owner = THIS_MODULE,
336 .llseek = no_llseek, 348 .llseek = no_llseek,
337 .read = rpc_pipe_read, 349 .read = rpc_pipe_read,
338 .write = rpc_pipe_write, 350 .write = rpc_pipe_write,
339 .poll = rpc_pipe_poll, 351 .poll = rpc_pipe_poll,
340 .ioctl = rpc_pipe_ioctl, 352 .unlocked_ioctl = rpc_pipe_ioctl,
341 .open = rpc_pipe_open, 353 .open = rpc_pipe_open,
342 .release = rpc_pipe_release, 354 .release = rpc_pipe_release,
343}; 355};
diff --git a/net/sunrpc/rpcb_clnt.c b/net/sunrpc/rpcb_clnt.c
index 121105355f60..dac219a56ae1 100644
--- a/net/sunrpc/rpcb_clnt.c
+++ b/net/sunrpc/rpcb_clnt.c
@@ -783,7 +783,7 @@ static int rpcb_dec_getport(struct rpc_rqst *req, __be32 *p,
783 port = ntohl(*p); 783 port = ntohl(*p);
784 dprintk("RPC: %5u PMAP_%s result: %lu\n", task->tk_pid, 784 dprintk("RPC: %5u PMAP_%s result: %lu\n", task->tk_pid,
785 task->tk_msg.rpc_proc->p_name, port); 785 task->tk_msg.rpc_proc->p_name, port);
786 if (unlikely(port > USHORT_MAX)) 786 if (unlikely(port > USHRT_MAX))
787 return -EIO; 787 return -EIO;
788 788
789 rpcb->r_port = port; 789 rpcb->r_port = port;
diff --git a/net/sunrpc/xprt.c b/net/sunrpc/xprt.c
index 3fc325399ee4..dcd0132396ba 100644
--- a/net/sunrpc/xprt.c
+++ b/net/sunrpc/xprt.c
@@ -166,7 +166,6 @@ EXPORT_SYMBOL_GPL(xprt_unregister_transport);
166int xprt_load_transport(const char *transport_name) 166int xprt_load_transport(const char *transport_name)
167{ 167{
168 struct xprt_class *t; 168 struct xprt_class *t;
169 char module_name[sizeof t->name + 5];
170 int result; 169 int result;
171 170
172 result = 0; 171 result = 0;
@@ -178,9 +177,7 @@ int xprt_load_transport(const char *transport_name)
178 } 177 }
179 } 178 }
180 spin_unlock(&xprt_list_lock); 179 spin_unlock(&xprt_list_lock);
181 strcpy(module_name, "xprt"); 180 result = request_module("xprt%s", transport_name);
182 strncat(module_name, transport_name, sizeof t->name);
183 result = request_module(module_name);
184out: 181out:
185 return result; 182 return result;
186} 183}
diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c
index b7cd8cccbe72..2a9675136c68 100644
--- a/net/sunrpc/xprtsock.c
+++ b/net/sunrpc/xprtsock.c
@@ -2293,6 +2293,7 @@ static struct rpc_xprt *xs_setup_udp(struct xprt_create *args)
2293 struct sockaddr *addr = args->dstaddr; 2293 struct sockaddr *addr = args->dstaddr;
2294 struct rpc_xprt *xprt; 2294 struct rpc_xprt *xprt;
2295 struct sock_xprt *transport; 2295 struct sock_xprt *transport;
2296 struct rpc_xprt *ret;
2296 2297
2297 xprt = xs_setup_xprt(args, xprt_udp_slot_table_entries); 2298 xprt = xs_setup_xprt(args, xprt_udp_slot_table_entries);
2298 if (IS_ERR(xprt)) 2299 if (IS_ERR(xprt))
@@ -2330,8 +2331,8 @@ static struct rpc_xprt *xs_setup_udp(struct xprt_create *args)
2330 xs_format_peer_addresses(xprt, "udp", RPCBIND_NETID_UDP6); 2331 xs_format_peer_addresses(xprt, "udp", RPCBIND_NETID_UDP6);
2331 break; 2332 break;
2332 default: 2333 default:
2333 kfree(xprt); 2334 ret = ERR_PTR(-EAFNOSUPPORT);
2334 return ERR_PTR(-EAFNOSUPPORT); 2335 goto out_err;
2335 } 2336 }
2336 2337
2337 if (xprt_bound(xprt)) 2338 if (xprt_bound(xprt))
@@ -2346,10 +2347,11 @@ static struct rpc_xprt *xs_setup_udp(struct xprt_create *args)
2346 2347
2347 if (try_module_get(THIS_MODULE)) 2348 if (try_module_get(THIS_MODULE))
2348 return xprt; 2349 return xprt;
2349 2350 ret = ERR_PTR(-EINVAL);
2351out_err:
2350 kfree(xprt->slot); 2352 kfree(xprt->slot);
2351 kfree(xprt); 2353 kfree(xprt);
2352 return ERR_PTR(-EINVAL); 2354 return ret;
2353} 2355}
2354 2356
2355static const struct rpc_timeout xs_tcp_default_timeout = { 2357static const struct rpc_timeout xs_tcp_default_timeout = {
@@ -2368,6 +2370,7 @@ static struct rpc_xprt *xs_setup_tcp(struct xprt_create *args)
2368 struct sockaddr *addr = args->dstaddr; 2370 struct sockaddr *addr = args->dstaddr;
2369 struct rpc_xprt *xprt; 2371 struct rpc_xprt *xprt;
2370 struct sock_xprt *transport; 2372 struct sock_xprt *transport;
2373 struct rpc_xprt *ret;
2371 2374
2372 xprt = xs_setup_xprt(args, xprt_tcp_slot_table_entries); 2375 xprt = xs_setup_xprt(args, xprt_tcp_slot_table_entries);
2373 if (IS_ERR(xprt)) 2376 if (IS_ERR(xprt))
@@ -2403,8 +2406,8 @@ static struct rpc_xprt *xs_setup_tcp(struct xprt_create *args)
2403 xs_format_peer_addresses(xprt, "tcp", RPCBIND_NETID_TCP6); 2406 xs_format_peer_addresses(xprt, "tcp", RPCBIND_NETID_TCP6);
2404 break; 2407 break;
2405 default: 2408 default:
2406 kfree(xprt); 2409 ret = ERR_PTR(-EAFNOSUPPORT);
2407 return ERR_PTR(-EAFNOSUPPORT); 2410 goto out_err;
2408 } 2411 }
2409 2412
2410 if (xprt_bound(xprt)) 2413 if (xprt_bound(xprt))
@@ -2420,10 +2423,11 @@ static struct rpc_xprt *xs_setup_tcp(struct xprt_create *args)
2420 2423
2421 if (try_module_get(THIS_MODULE)) 2424 if (try_module_get(THIS_MODULE))
2422 return xprt; 2425 return xprt;
2423 2426 ret = ERR_PTR(-EINVAL);
2427out_err:
2424 kfree(xprt->slot); 2428 kfree(xprt->slot);
2425 kfree(xprt); 2429 kfree(xprt);
2426 return ERR_PTR(-EINVAL); 2430 return ret;
2427} 2431}
2428 2432
2429/** 2433/**
@@ -2437,6 +2441,7 @@ static struct rpc_xprt *xs_setup_bc_tcp(struct xprt_create *args)
2437 struct rpc_xprt *xprt; 2441 struct rpc_xprt *xprt;
2438 struct sock_xprt *transport; 2442 struct sock_xprt *transport;
2439 struct svc_sock *bc_sock; 2443 struct svc_sock *bc_sock;
2444 struct rpc_xprt *ret;
2440 2445
2441 xprt = xs_setup_xprt(args, xprt_tcp_slot_table_entries); 2446 xprt = xs_setup_xprt(args, xprt_tcp_slot_table_entries);
2442 if (IS_ERR(xprt)) 2447 if (IS_ERR(xprt))
@@ -2476,8 +2481,8 @@ static struct rpc_xprt *xs_setup_bc_tcp(struct xprt_create *args)
2476 RPCBIND_NETID_TCP6); 2481 RPCBIND_NETID_TCP6);
2477 break; 2482 break;
2478 default: 2483 default:
2479 kfree(xprt); 2484 ret = ERR_PTR(-EAFNOSUPPORT);
2480 return ERR_PTR(-EAFNOSUPPORT); 2485 goto out_err;
2481 } 2486 }
2482 2487
2483 if (xprt_bound(xprt)) 2488 if (xprt_bound(xprt))
@@ -2499,9 +2504,11 @@ static struct rpc_xprt *xs_setup_bc_tcp(struct xprt_create *args)
2499 2504
2500 if (try_module_get(THIS_MODULE)) 2505 if (try_module_get(THIS_MODULE))
2501 return xprt; 2506 return xprt;
2507 ret = ERR_PTR(-EINVAL);
2508out_err:
2502 kfree(xprt->slot); 2509 kfree(xprt->slot);
2503 kfree(xprt); 2510 kfree(xprt);
2504 return ERR_PTR(-EINVAL); 2511 return ret;
2505} 2512}
2506 2513
2507static struct xprt_class xs_udp_transport = { 2514static struct xprt_class xs_udp_transport = {
diff --git a/net/wireless/chan.c b/net/wireless/chan.c
index d92d088026bf..b01a6f6397d7 100644
--- a/net/wireless/chan.c
+++ b/net/wireless/chan.c
@@ -50,7 +50,7 @@ int cfg80211_set_freq(struct cfg80211_registered_device *rdev,
50 struct ieee80211_channel *chan; 50 struct ieee80211_channel *chan;
51 int result; 51 int result;
52 52
53 if (wdev->iftype == NL80211_IFTYPE_MONITOR) 53 if (wdev && wdev->iftype == NL80211_IFTYPE_MONITOR)
54 wdev = NULL; 54 wdev = NULL;
55 55
56 if (wdev) { 56 if (wdev) {
diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c
index aaa1aad566cd..db71150b8040 100644
--- a/net/wireless/nl80211.c
+++ b/net/wireless/nl80211.c
@@ -4443,9 +4443,10 @@ static int nl80211_remain_on_channel(struct sk_buff *skb,
4443 if (channel_type != NL80211_CHAN_NO_HT && 4443 if (channel_type != NL80211_CHAN_NO_HT &&
4444 channel_type != NL80211_CHAN_HT20 && 4444 channel_type != NL80211_CHAN_HT20 &&
4445 channel_type != NL80211_CHAN_HT40PLUS && 4445 channel_type != NL80211_CHAN_HT40PLUS &&
4446 channel_type != NL80211_CHAN_HT40MINUS) 4446 channel_type != NL80211_CHAN_HT40MINUS) {
4447 err = -EINVAL; 4447 err = -EINVAL;
4448 goto out; 4448 goto out;
4449 }
4449 } 4450 }
4450 4451
4451 freq = nla_get_u32(info->attrs[NL80211_ATTR_WIPHY_FREQ]); 4452 freq = nla_get_u32(info->attrs[NL80211_ATTR_WIPHY_FREQ]);
@@ -4717,9 +4718,10 @@ static int nl80211_action(struct sk_buff *skb, struct genl_info *info)
4717 if (channel_type != NL80211_CHAN_NO_HT && 4718 if (channel_type != NL80211_CHAN_NO_HT &&
4718 channel_type != NL80211_CHAN_HT20 && 4719 channel_type != NL80211_CHAN_HT20 &&
4719 channel_type != NL80211_CHAN_HT40PLUS && 4720 channel_type != NL80211_CHAN_HT40PLUS &&
4720 channel_type != NL80211_CHAN_HT40MINUS) 4721 channel_type != NL80211_CHAN_HT40MINUS) {
4721 err = -EINVAL; 4722 err = -EINVAL;
4722 goto out; 4723 goto out;
4724 }
4723 } 4725 }
4724 4726
4725 freq = nla_get_u32(info->attrs[NL80211_ATTR_WIPHY_FREQ]); 4727 freq = nla_get_u32(info->attrs[NL80211_ATTR_WIPHY_FREQ]);
diff --git a/net/wireless/scan.c b/net/wireless/scan.c
index a026c6d56bd3..58401d246bda 100644
--- a/net/wireless/scan.c
+++ b/net/wireless/scan.c
@@ -515,7 +515,7 @@ cfg80211_inform_bss(struct wiphy *wiphy,
515 515
516 privsz = wiphy->bss_priv_size; 516 privsz = wiphy->bss_priv_size;
517 517
518 if (WARN_ON(wiphy->signal_type == NL80211_BSS_SIGNAL_UNSPEC && 518 if (WARN_ON(wiphy->signal_type == CFG80211_SIGNAL_TYPE_UNSPEC &&
519 (signal < 0 || signal > 100))) 519 (signal < 0 || signal > 100)))
520 return NULL; 520 return NULL;
521 521
@@ -571,7 +571,7 @@ cfg80211_inform_bss_frame(struct wiphy *wiphy,
571 u.probe_resp.variable); 571 u.probe_resp.variable);
572 size_t privsz = wiphy->bss_priv_size; 572 size_t privsz = wiphy->bss_priv_size;
573 573
574 if (WARN_ON(wiphy->signal_type == NL80211_BSS_SIGNAL_UNSPEC && 574 if (WARN_ON(wiphy->signal_type == CFG80211_SIGNAL_TYPE_UNSPEC &&
575 (signal < 0 || signal > 100))) 575 (signal < 0 || signal > 100)))
576 return NULL; 576 return NULL;
577 577