aboutsummaryrefslogtreecommitdiffstats
path: root/net
diff options
context:
space:
mode:
Diffstat (limited to 'net')
-rw-r--r--net/802/psnap.c1
-rw-r--r--net/8021q/vlan.h5
-rw-r--r--net/9p/Kconfig8
-rw-r--r--net/9p/client.c30
-rw-r--r--net/9p/mod.c4
-rw-r--r--net/9p/trans_fd.c7
-rw-r--r--net/9p/trans_rdma.c3
-rw-r--r--net/9p/util.c2
-rw-r--r--net/atm/proc.c4
-rw-r--r--net/bridge/br_netfilter.c6
-rw-r--r--net/can/bcm.c6
-rw-r--r--net/ceph/messenger.c82
-rw-r--r--net/ceph/osd_client.c19
-rw-r--r--net/ceph/osdmap.c13
-rw-r--r--net/core/dev.c12
-rw-r--r--net/core/dst.c2
-rw-r--r--net/core/fib_rules.c1
-rw-r--r--net/core/filter.c4
-rw-r--r--net/core/net_namespace.c65
-rw-r--r--net/core/rtnetlink.c14
-rw-r--r--net/dns_resolver/dns_key.c10
-rw-r--r--net/ipv4/igmp.c10
-rw-r--r--net/ipv4/ping.c3
-rw-r--r--net/ipv4/raw.c2
-rw-r--r--net/ipv4/tcp_ipv4.c6
-rw-r--r--net/ipv4/udp.c2
-rw-r--r--net/ipv6/raw.c2
-rw-r--r--net/ipv6/tcp_ipv6.c6
-rw-r--r--net/ipv6/udp.c2
-rw-r--r--net/ipv6/xfrm6_tunnel.c2
-rw-r--r--net/key/af_key.c2
-rw-r--r--net/mac80211/iface.c4
-rw-r--r--net/mac80211/main.c22
-rw-r--r--net/mac80211/mesh.h7
-rw-r--r--net/mac80211/mesh_pathtbl.c204
-rw-r--r--net/mac80211/scan.c5
-rw-r--r--net/netlink/af_netlink.c2
-rw-r--r--net/packet/af_packet.c2
-rw-r--r--net/phonet/socket.c2
-rw-r--r--net/rds/ib.c2
-rw-r--r--net/rds/ib_cm.c2
-rw-r--r--net/rds/iw.c2
-rw-r--r--net/rds/iw_cm.c2
-rw-r--r--net/rds/rdma_transport.c3
-rw-r--r--net/rfkill/Kconfig9
-rw-r--r--net/rfkill/Makefile1
-rw-r--r--net/rfkill/rfkill-gpio.c227
-rw-r--r--net/sched/sch_sfq.c22
-rw-r--r--net/sctp/associola.c16
-rw-r--r--net/sctp/proc.c4
-rw-r--r--net/sunrpc/auth.c4
-rw-r--r--net/sunrpc/xprtrdma/svc_rdma_transport.c3
-rw-r--r--net/sunrpc/xprtrdma/verbs.c2
-rw-r--r--net/unix/af_unix.c2
-rw-r--r--net/wireless/core.h5
-rw-r--r--net/wireless/nl80211.c12
-rw-r--r--net/wireless/sme.c19
-rw-r--r--net/wireless/util.c2
58 files changed, 702 insertions, 220 deletions
diff --git a/net/802/psnap.c b/net/802/psnap.c
index 21cde8fd5795..db6baf7cf6e9 100644
--- a/net/802/psnap.c
+++ b/net/802/psnap.c
@@ -147,7 +147,6 @@ struct datalink_proto *register_snap_client(const unsigned char *desc,
147out: 147out:
148 spin_unlock_bh(&snap_lock); 148 spin_unlock_bh(&snap_lock);
149 149
150 synchronize_net();
151 return proto; 150 return proto;
152} 151}
153 152
diff --git a/net/8021q/vlan.h b/net/8021q/vlan.h
index c3408def8a19..9da07e30d1a2 100644
--- a/net/8021q/vlan.h
+++ b/net/8021q/vlan.h
@@ -118,11 +118,6 @@ extern void vlan_netlink_fini(void);
118 118
119extern struct rtnl_link_ops vlan_link_ops; 119extern struct rtnl_link_ops vlan_link_ops;
120 120
121static inline int is_vlan_dev(struct net_device *dev)
122{
123 return dev->priv_flags & IFF_802_1Q_VLAN;
124}
125
126extern int vlan_net_id; 121extern int vlan_net_id;
127 122
128struct proc_dir_entry; 123struct proc_dir_entry;
diff --git a/net/9p/Kconfig b/net/9p/Kconfig
index 7ed75c7bd5d1..d9ea09b11cf8 100644
--- a/net/9p/Kconfig
+++ b/net/9p/Kconfig
@@ -3,8 +3,8 @@
3# 3#
4 4
5menuconfig NET_9P 5menuconfig NET_9P
6 depends on NET && EXPERIMENTAL 6 depends on NET
7 tristate "Plan 9 Resource Sharing Support (9P2000) (Experimental)" 7 tristate "Plan 9 Resource Sharing Support (9P2000)"
8 help 8 help
9 If you say Y here, you will get experimental support for 9 If you say Y here, you will get experimental support for
10 Plan 9 resource sharing via the 9P2000 protocol. 10 Plan 9 resource sharing via the 9P2000 protocol.
@@ -16,8 +16,8 @@ menuconfig NET_9P
16if NET_9P 16if NET_9P
17 17
18config NET_9P_VIRTIO 18config NET_9P_VIRTIO
19 depends on EXPERIMENTAL && VIRTIO 19 depends on VIRTIO
20 tristate "9P Virtio Transport (Experimental)" 20 tristate "9P Virtio Transport"
21 help 21 help
22 This builds support for a transports between 22 This builds support for a transports between
23 guest partitions and a host partition. 23 guest partitions and a host partition.
diff --git a/net/9p/client.c b/net/9p/client.c
index ceab943dfc49..9e3b0e640da1 100644
--- a/net/9p/client.c
+++ b/net/9p/client.c
@@ -92,9 +92,6 @@ static int get_protocol_version(const substring_t *name)
92 return version; 92 return version;
93} 93}
94 94
95static struct p9_req_t *
96p9_client_rpc(struct p9_client *c, int8_t type, const char *fmt, ...);
97
98/** 95/**
99 * parse_options - parse mount options into client structure 96 * parse_options - parse mount options into client structure
100 * @opts: options string passed from mount 97 * @opts: options string passed from mount
@@ -307,12 +304,13 @@ static int p9_tag_init(struct p9_client *c)
307 c->tagpool = p9_idpool_create(); 304 c->tagpool = p9_idpool_create();
308 if (IS_ERR(c->tagpool)) { 305 if (IS_ERR(c->tagpool)) {
309 err = PTR_ERR(c->tagpool); 306 err = PTR_ERR(c->tagpool);
310 c->tagpool = NULL;
311 goto error; 307 goto error;
312 } 308 }
313 309 err = p9_idpool_get(c->tagpool); /* reserve tag 0 */
314 p9_idpool_get(c->tagpool); /* reserve tag 0 */ 310 if (err < 0) {
315 311 p9_idpool_destroy(c->tagpool);
312 goto error;
313 }
316 c->max_tag = 0; 314 c->max_tag = 0;
317error: 315error:
318 return err; 316 return err;
@@ -518,12 +516,15 @@ out_err:
518 return err; 516 return err;
519} 517}
520 518
519static struct p9_req_t *
520p9_client_rpc(struct p9_client *c, int8_t type, const char *fmt, ...);
521
521/** 522/**
522 * p9_client_flush - flush (cancel) a request 523 * p9_client_flush - flush (cancel) a request
523 * @c: client state 524 * @c: client state
524 * @oldreq: request to cancel 525 * @oldreq: request to cancel
525 * 526 *
526 * This sents a flush for a particular requests and links 527 * This sents a flush for a particular request and links
527 * the flush request to the original request. The current 528 * the flush request to the original request. The current
528 * code only supports a single flush request although the protocol 529 * code only supports a single flush request although the protocol
529 * allows for multiple flush requests to be sent for a single request. 530 * allows for multiple flush requests to be sent for a single request.
@@ -789,11 +790,13 @@ struct p9_client *p9_client_create(const char *dev_name, char *options)
789 spin_lock_init(&clnt->lock); 790 spin_lock_init(&clnt->lock);
790 INIT_LIST_HEAD(&clnt->fidlist); 791 INIT_LIST_HEAD(&clnt->fidlist);
791 792
792 p9_tag_init(clnt); 793 err = p9_tag_init(clnt);
794 if (err < 0)
795 goto free_client;
793 796
794 err = parse_opts(options, clnt); 797 err = parse_opts(options, clnt);
795 if (err < 0) 798 if (err < 0)
796 goto free_client; 799 goto destroy_tagpool;
797 800
798 if (!clnt->trans_mod) 801 if (!clnt->trans_mod)
799 clnt->trans_mod = v9fs_get_default_trans(); 802 clnt->trans_mod = v9fs_get_default_trans();
@@ -802,13 +805,12 @@ struct p9_client *p9_client_create(const char *dev_name, char *options)
802 err = -EPROTONOSUPPORT; 805 err = -EPROTONOSUPPORT;
803 P9_DPRINTK(P9_DEBUG_ERROR, 806 P9_DPRINTK(P9_DEBUG_ERROR,
804 "No transport defined or default transport\n"); 807 "No transport defined or default transport\n");
805 goto free_client; 808 goto destroy_tagpool;
806 } 809 }
807 810
808 clnt->fidpool = p9_idpool_create(); 811 clnt->fidpool = p9_idpool_create();
809 if (IS_ERR(clnt->fidpool)) { 812 if (IS_ERR(clnt->fidpool)) {
810 err = PTR_ERR(clnt->fidpool); 813 err = PTR_ERR(clnt->fidpool);
811 clnt->fidpool = NULL;
812 goto put_trans; 814 goto put_trans;
813 } 815 }
814 816
@@ -834,6 +836,8 @@ destroy_fidpool:
834 p9_idpool_destroy(clnt->fidpool); 836 p9_idpool_destroy(clnt->fidpool);
835put_trans: 837put_trans:
836 v9fs_put_trans(clnt->trans_mod); 838 v9fs_put_trans(clnt->trans_mod);
839destroy_tagpool:
840 p9_idpool_destroy(clnt->tagpool);
837free_client: 841free_client:
838 kfree(clnt); 842 kfree(clnt);
839 return ERR_PTR(err); 843 return ERR_PTR(err);
@@ -1298,7 +1302,7 @@ p9_client_read(struct p9_fid *fid, char *data, char __user *udata, u64 offset,
1298 if (count < rsize) 1302 if (count < rsize)
1299 rsize = count; 1303 rsize = count;
1300 1304
1301 /* Don't bother zerocopy form small IO (< 1024) */ 1305 /* Don't bother zerocopy for small IO (< 1024) */
1302 if (((clnt->trans_mod->pref & P9_TRANS_PREF_PAYLOAD_MASK) == 1306 if (((clnt->trans_mod->pref & P9_TRANS_PREF_PAYLOAD_MASK) ==
1303 P9_TRANS_PREF_PAYLOAD_SEP) && (rsize > 1024)) { 1307 P9_TRANS_PREF_PAYLOAD_SEP) && (rsize > 1024)) {
1304 req = p9_client_rpc(clnt, P9_TREAD, "dqE", fid->fid, offset, 1308 req = p9_client_rpc(clnt, P9_TREAD, "dqE", fid->fid, offset,
diff --git a/net/9p/mod.c b/net/9p/mod.c
index cf8a4128cd5c..72c398275051 100644
--- a/net/9p/mod.c
+++ b/net/9p/mod.c
@@ -139,7 +139,7 @@ void v9fs_put_trans(struct p9_trans_module *m)
139} 139}
140 140
141/** 141/**
142 * v9fs_init - Initialize module 142 * init_p9 - Initialize module
143 * 143 *
144 */ 144 */
145static int __init init_p9(void) 145static int __init init_p9(void)
@@ -154,7 +154,7 @@ static int __init init_p9(void)
154} 154}
155 155
156/** 156/**
157 * v9fs_init - shutdown module 157 * exit_p9 - shutdown module
158 * 158 *
159 */ 159 */
160 160
diff --git a/net/9p/trans_fd.c b/net/9p/trans_fd.c
index 4a9084395d35..fdfdb5747f63 100644
--- a/net/9p/trans_fd.c
+++ b/net/9p/trans_fd.c
@@ -916,8 +916,8 @@ p9_fd_create_tcp(struct p9_client *client, const char *addr, char *args)
916 sin_server.sin_family = AF_INET; 916 sin_server.sin_family = AF_INET;
917 sin_server.sin_addr.s_addr = in_aton(addr); 917 sin_server.sin_addr.s_addr = in_aton(addr);
918 sin_server.sin_port = htons(opts.port); 918 sin_server.sin_port = htons(opts.port);
919 err = sock_create_kern(PF_INET, SOCK_STREAM, IPPROTO_TCP, &csocket); 919 err = __sock_create(read_pnet(&current->nsproxy->net_ns), PF_INET,
920 920 SOCK_STREAM, IPPROTO_TCP, &csocket, 1);
921 if (err) { 921 if (err) {
922 P9_EPRINTK(KERN_ERR, "p9_trans_tcp: problem creating socket\n"); 922 P9_EPRINTK(KERN_ERR, "p9_trans_tcp: problem creating socket\n");
923 return err; 923 return err;
@@ -954,7 +954,8 @@ p9_fd_create_unix(struct p9_client *client, const char *addr, char *args)
954 954
955 sun_server.sun_family = PF_UNIX; 955 sun_server.sun_family = PF_UNIX;
956 strcpy(sun_server.sun_path, addr); 956 strcpy(sun_server.sun_path, addr);
957 err = sock_create_kern(PF_UNIX, SOCK_STREAM, 0, &csocket); 957 err = __sock_create(read_pnet(&current->nsproxy->net_ns), PF_UNIX,
958 SOCK_STREAM, 0, &csocket, 1);
958 if (err < 0) { 959 if (err < 0) {
959 P9_EPRINTK(KERN_ERR, "p9_trans_unix: problem creating socket\n"); 960 P9_EPRINTK(KERN_ERR, "p9_trans_unix: problem creating socket\n");
960 return err; 961 return err;
diff --git a/net/9p/trans_rdma.c b/net/9p/trans_rdma.c
index 844a7a5607e3..159c50f1c6bf 100644
--- a/net/9p/trans_rdma.c
+++ b/net/9p/trans_rdma.c
@@ -589,7 +589,8 @@ rdma_create_trans(struct p9_client *client, const char *addr, char *args)
589 return -ENOMEM; 589 return -ENOMEM;
590 590
591 /* Create the RDMA CM ID */ 591 /* Create the RDMA CM ID */
592 rdma->cm_id = rdma_create_id(p9_cm_event_handler, client, RDMA_PS_TCP); 592 rdma->cm_id = rdma_create_id(p9_cm_event_handler, client, RDMA_PS_TCP,
593 IB_QPT_RC);
593 if (IS_ERR(rdma->cm_id)) 594 if (IS_ERR(rdma->cm_id))
594 goto error; 595 goto error;
595 596
diff --git a/net/9p/util.c b/net/9p/util.c
index da6af81e59d9..9c1c9348ac35 100644
--- a/net/9p/util.c
+++ b/net/9p/util.c
@@ -93,7 +93,7 @@ int p9_idpool_get(struct p9_idpool *p)
93 93
94retry: 94retry:
95 if (idr_pre_get(&p->pool, GFP_NOFS) == 0) 95 if (idr_pre_get(&p->pool, GFP_NOFS) == 0)
96 return 0; 96 return -1;
97 97
98 spin_lock_irqsave(&p->lock, flags); 98 spin_lock_irqsave(&p->lock, flags);
99 99
diff --git a/net/atm/proc.c b/net/atm/proc.c
index f85da0779e5e..be3afdefec58 100644
--- a/net/atm/proc.c
+++ b/net/atm/proc.c
@@ -191,7 +191,7 @@ static void vcc_info(struct seq_file *seq, struct atm_vcc *vcc)
191{ 191{
192 struct sock *sk = sk_atm(vcc); 192 struct sock *sk = sk_atm(vcc);
193 193
194 seq_printf(seq, "%p ", vcc); 194 seq_printf(seq, "%pK ", vcc);
195 if (!vcc->dev) 195 if (!vcc->dev)
196 seq_printf(seq, "Unassigned "); 196 seq_printf(seq, "Unassigned ");
197 else 197 else
@@ -218,7 +218,7 @@ static void svc_info(struct seq_file *seq, struct atm_vcc *vcc)
218{ 218{
219 if (!vcc->dev) 219 if (!vcc->dev)
220 seq_printf(seq, sizeof(void *) == 4 ? 220 seq_printf(seq, sizeof(void *) == 4 ?
221 "N/A@%p%10s" : "N/A@%p%2s", vcc, ""); 221 "N/A@%pK%10s" : "N/A@%pK%2s", vcc, "");
222 else 222 else
223 seq_printf(seq, "%3d %3d %5d ", 223 seq_printf(seq, "%3d %3d %5d ",
224 vcc->dev->number, vcc->vpi, vcc->vci); 224 vcc->dev->number, vcc->vpi, vcc->vci);
diff --git a/net/bridge/br_netfilter.c b/net/bridge/br_netfilter.c
index e1f5ec75e91c..3fa123185e89 100644
--- a/net/bridge/br_netfilter.c
+++ b/net/bridge/br_netfilter.c
@@ -117,6 +117,10 @@ static struct dst_ops fake_dst_ops = {
117 * ipt_REJECT needs it. Future netfilter modules might 117 * ipt_REJECT needs it. Future netfilter modules might
118 * require us to fill additional fields. 118 * require us to fill additional fields.
119 */ 119 */
120static const u32 br_dst_default_metrics[RTAX_MAX] = {
121 [RTAX_MTU - 1] = 1500,
122};
123
120void br_netfilter_rtable_init(struct net_bridge *br) 124void br_netfilter_rtable_init(struct net_bridge *br)
121{ 125{
122 struct rtable *rt = &br->fake_rtable; 126 struct rtable *rt = &br->fake_rtable;
@@ -124,7 +128,7 @@ void br_netfilter_rtable_init(struct net_bridge *br)
124 atomic_set(&rt->dst.__refcnt, 1); 128 atomic_set(&rt->dst.__refcnt, 1);
125 rt->dst.dev = br->dev; 129 rt->dst.dev = br->dev;
126 rt->dst.path = &rt->dst; 130 rt->dst.path = &rt->dst;
127 dst_metric_set(&rt->dst, RTAX_MTU, 1500); 131 dst_init_metrics(&rt->dst, br_dst_default_metrics, true);
128 rt->dst.flags = DST_NOXFRM; 132 rt->dst.flags = DST_NOXFRM;
129 rt->dst.ops = &fake_dst_ops; 133 rt->dst.ops = &fake_dst_ops;
130} 134}
diff --git a/net/can/bcm.c b/net/can/bcm.c
index cced806098a9..184a6572b67e 100644
--- a/net/can/bcm.c
+++ b/net/can/bcm.c
@@ -165,9 +165,9 @@ static int bcm_proc_show(struct seq_file *m, void *v)
165 struct bcm_sock *bo = bcm_sk(sk); 165 struct bcm_sock *bo = bcm_sk(sk);
166 struct bcm_op *op; 166 struct bcm_op *op;
167 167
168 seq_printf(m, ">>> socket %p", sk->sk_socket); 168 seq_printf(m, ">>> socket %pK", sk->sk_socket);
169 seq_printf(m, " / sk %p", sk); 169 seq_printf(m, " / sk %pK", sk);
170 seq_printf(m, " / bo %p", bo); 170 seq_printf(m, " / bo %pK", bo);
171 seq_printf(m, " / dropped %lu", bo->dropped_usr_msgs); 171 seq_printf(m, " / dropped %lu", bo->dropped_usr_msgs);
172 seq_printf(m, " / bound %s", bcm_proc_getifname(ifname, bo->ifindex)); 172 seq_printf(m, " / bound %s", bcm_proc_getifname(ifname, bo->ifindex));
173 seq_printf(m, " <<<\n"); 173 seq_printf(m, " <<<\n");
diff --git a/net/ceph/messenger.c b/net/ceph/messenger.c
index e15a82ccc05f..78b55f49de7c 100644
--- a/net/ceph/messenger.c
+++ b/net/ceph/messenger.c
@@ -76,7 +76,8 @@ const char *ceph_pr_addr(const struct sockaddr_storage *ss)
76 break; 76 break;
77 77
78 default: 78 default:
79 sprintf(s, "(unknown sockaddr family %d)", (int)ss->ss_family); 79 snprintf(s, MAX_ADDR_STR_LEN, "(unknown sockaddr family %d)",
80 (int)ss->ss_family);
80 } 81 }
81 82
82 return s; 83 return s;
@@ -598,7 +599,7 @@ static void prepare_write_keepalive(struct ceph_connection *con)
598 * Connection negotiation. 599 * Connection negotiation.
599 */ 600 */
600 601
601static void prepare_connect_authorizer(struct ceph_connection *con) 602static int prepare_connect_authorizer(struct ceph_connection *con)
602{ 603{
603 void *auth_buf; 604 void *auth_buf;
604 int auth_len = 0; 605 int auth_len = 0;
@@ -612,13 +613,20 @@ static void prepare_connect_authorizer(struct ceph_connection *con)
612 con->auth_retry); 613 con->auth_retry);
613 mutex_lock(&con->mutex); 614 mutex_lock(&con->mutex);
614 615
616 if (test_bit(CLOSED, &con->state) ||
617 test_bit(OPENING, &con->state))
618 return -EAGAIN;
619
615 con->out_connect.authorizer_protocol = cpu_to_le32(auth_protocol); 620 con->out_connect.authorizer_protocol = cpu_to_le32(auth_protocol);
616 con->out_connect.authorizer_len = cpu_to_le32(auth_len); 621 con->out_connect.authorizer_len = cpu_to_le32(auth_len);
617 622
618 con->out_kvec[con->out_kvec_left].iov_base = auth_buf; 623 if (auth_len) {
619 con->out_kvec[con->out_kvec_left].iov_len = auth_len; 624 con->out_kvec[con->out_kvec_left].iov_base = auth_buf;
620 con->out_kvec_left++; 625 con->out_kvec[con->out_kvec_left].iov_len = auth_len;
621 con->out_kvec_bytes += auth_len; 626 con->out_kvec_left++;
627 con->out_kvec_bytes += auth_len;
628 }
629 return 0;
622} 630}
623 631
624/* 632/*
@@ -640,9 +648,9 @@ static void prepare_write_banner(struct ceph_messenger *msgr,
640 set_bit(WRITE_PENDING, &con->state); 648 set_bit(WRITE_PENDING, &con->state);
641} 649}
642 650
643static void prepare_write_connect(struct ceph_messenger *msgr, 651static int prepare_write_connect(struct ceph_messenger *msgr,
644 struct ceph_connection *con, 652 struct ceph_connection *con,
645 int after_banner) 653 int after_banner)
646{ 654{
647 unsigned global_seq = get_global_seq(con->msgr, 0); 655 unsigned global_seq = get_global_seq(con->msgr, 0);
648 int proto; 656 int proto;
@@ -683,7 +691,7 @@ static void prepare_write_connect(struct ceph_messenger *msgr,
683 con->out_more = 0; 691 con->out_more = 0;
684 set_bit(WRITE_PENDING, &con->state); 692 set_bit(WRITE_PENDING, &con->state);
685 693
686 prepare_connect_authorizer(con); 694 return prepare_connect_authorizer(con);
687} 695}
688 696
689 697
@@ -1065,8 +1073,10 @@ static void addr_set_port(struct sockaddr_storage *ss, int p)
1065 switch (ss->ss_family) { 1073 switch (ss->ss_family) {
1066 case AF_INET: 1074 case AF_INET:
1067 ((struct sockaddr_in *)ss)->sin_port = htons(p); 1075 ((struct sockaddr_in *)ss)->sin_port = htons(p);
1076 break;
1068 case AF_INET6: 1077 case AF_INET6:
1069 ((struct sockaddr_in6 *)ss)->sin6_port = htons(p); 1078 ((struct sockaddr_in6 *)ss)->sin6_port = htons(p);
1079 break;
1070 } 1080 }
1071} 1081}
1072 1082
@@ -1216,6 +1226,7 @@ static int process_connect(struct ceph_connection *con)
1216 u64 sup_feat = con->msgr->supported_features; 1226 u64 sup_feat = con->msgr->supported_features;
1217 u64 req_feat = con->msgr->required_features; 1227 u64 req_feat = con->msgr->required_features;
1218 u64 server_feat = le64_to_cpu(con->in_reply.features); 1228 u64 server_feat = le64_to_cpu(con->in_reply.features);
1229 int ret;
1219 1230
1220 dout("process_connect on %p tag %d\n", con, (int)con->in_tag); 1231 dout("process_connect on %p tag %d\n", con, (int)con->in_tag);
1221 1232
@@ -1250,7 +1261,9 @@ static int process_connect(struct ceph_connection *con)
1250 return -1; 1261 return -1;
1251 } 1262 }
1252 con->auth_retry = 1; 1263 con->auth_retry = 1;
1253 prepare_write_connect(con->msgr, con, 0); 1264 ret = prepare_write_connect(con->msgr, con, 0);
1265 if (ret < 0)
1266 return ret;
1254 prepare_read_connect(con); 1267 prepare_read_connect(con);
1255 break; 1268 break;
1256 1269
@@ -1277,6 +1290,9 @@ static int process_connect(struct ceph_connection *con)
1277 if (con->ops->peer_reset) 1290 if (con->ops->peer_reset)
1278 con->ops->peer_reset(con); 1291 con->ops->peer_reset(con);
1279 mutex_lock(&con->mutex); 1292 mutex_lock(&con->mutex);
1293 if (test_bit(CLOSED, &con->state) ||
1294 test_bit(OPENING, &con->state))
1295 return -EAGAIN;
1280 break; 1296 break;
1281 1297
1282 case CEPH_MSGR_TAG_RETRY_SESSION: 1298 case CEPH_MSGR_TAG_RETRY_SESSION:
@@ -1341,7 +1357,9 @@ static int process_connect(struct ceph_connection *con)
1341 * to WAIT. This shouldn't happen if we are the 1357 * to WAIT. This shouldn't happen if we are the
1342 * client. 1358 * client.
1343 */ 1359 */
1344 pr_err("process_connect peer connecting WAIT\n"); 1360 pr_err("process_connect got WAIT as client\n");
1361 con->error_msg = "protocol error, got WAIT as client";
1362 return -1;
1345 1363
1346 default: 1364 default:
1347 pr_err("connect protocol error, will retry\n"); 1365 pr_err("connect protocol error, will retry\n");
@@ -1810,6 +1828,17 @@ static int try_read(struct ceph_connection *con)
1810more: 1828more:
1811 dout("try_read tag %d in_base_pos %d\n", (int)con->in_tag, 1829 dout("try_read tag %d in_base_pos %d\n", (int)con->in_tag,
1812 con->in_base_pos); 1830 con->in_base_pos);
1831
1832 /*
1833 * process_connect and process_message drop and re-take
1834 * con->mutex. make sure we handle a racing close or reopen.
1835 */
1836 if (test_bit(CLOSED, &con->state) ||
1837 test_bit(OPENING, &con->state)) {
1838 ret = -EAGAIN;
1839 goto out;
1840 }
1841
1813 if (test_bit(CONNECTING, &con->state)) { 1842 if (test_bit(CONNECTING, &con->state)) {
1814 if (!test_bit(NEGOTIATING, &con->state)) { 1843 if (!test_bit(NEGOTIATING, &con->state)) {
1815 dout("try_read connecting\n"); 1844 dout("try_read connecting\n");
@@ -1938,8 +1967,10 @@ static void con_work(struct work_struct *work)
1938{ 1967{
1939 struct ceph_connection *con = container_of(work, struct ceph_connection, 1968 struct ceph_connection *con = container_of(work, struct ceph_connection,
1940 work.work); 1969 work.work);
1970 int ret;
1941 1971
1942 mutex_lock(&con->mutex); 1972 mutex_lock(&con->mutex);
1973restart:
1943 if (test_and_clear_bit(BACKOFF, &con->state)) { 1974 if (test_and_clear_bit(BACKOFF, &con->state)) {
1944 dout("con_work %p backing off\n", con); 1975 dout("con_work %p backing off\n", con);
1945 if (queue_delayed_work(ceph_msgr_wq, &con->work, 1976 if (queue_delayed_work(ceph_msgr_wq, &con->work,
@@ -1969,18 +2000,31 @@ static void con_work(struct work_struct *work)
1969 con_close_socket(con); 2000 con_close_socket(con);
1970 } 2001 }
1971 2002
1972 if (test_and_clear_bit(SOCK_CLOSED, &con->state) || 2003 if (test_and_clear_bit(SOCK_CLOSED, &con->state))
1973 try_read(con) < 0 || 2004 goto fault;
1974 try_write(con) < 0) { 2005
1975 mutex_unlock(&con->mutex); 2006 ret = try_read(con);
1976 ceph_fault(con); /* error/fault path */ 2007 if (ret == -EAGAIN)
1977 goto done_unlocked; 2008 goto restart;
1978 } 2009 if (ret < 0)
2010 goto fault;
2011
2012 ret = try_write(con);
2013 if (ret == -EAGAIN)
2014 goto restart;
2015 if (ret < 0)
2016 goto fault;
1979 2017
1980done: 2018done:
1981 mutex_unlock(&con->mutex); 2019 mutex_unlock(&con->mutex);
1982done_unlocked: 2020done_unlocked:
1983 con->ops->put(con); 2021 con->ops->put(con);
2022 return;
2023
2024fault:
2025 mutex_unlock(&con->mutex);
2026 ceph_fault(con); /* error/fault path */
2027 goto done_unlocked;
1984} 2028}
1985 2029
1986 2030
diff --git a/net/ceph/osd_client.c b/net/ceph/osd_client.c
index 6b5dda1cb5df..6ea2b892f44b 100644
--- a/net/ceph/osd_client.c
+++ b/net/ceph/osd_client.c
@@ -124,7 +124,7 @@ static void calc_layout(struct ceph_osd_client *osdc,
124 ceph_calc_raw_layout(osdc, layout, vino.snap, off, 124 ceph_calc_raw_layout(osdc, layout, vino.snap, off,
125 plen, &bno, req, op); 125 plen, &bno, req, op);
126 126
127 sprintf(req->r_oid, "%llx.%08llx", vino.ino, bno); 127 snprintf(req->r_oid, sizeof(req->r_oid), "%llx.%08llx", vino.ino, bno);
128 req->r_oid_len = strlen(req->r_oid); 128 req->r_oid_len = strlen(req->r_oid);
129} 129}
130 130
@@ -1421,6 +1421,15 @@ void ceph_osdc_handle_map(struct ceph_osd_client *osdc, struct ceph_msg *msg)
1421done: 1421done:
1422 downgrade_write(&osdc->map_sem); 1422 downgrade_write(&osdc->map_sem);
1423 ceph_monc_got_osdmap(&osdc->client->monc, osdc->osdmap->epoch); 1423 ceph_monc_got_osdmap(&osdc->client->monc, osdc->osdmap->epoch);
1424
1425 /*
1426 * subscribe to subsequent osdmap updates if full to ensure
1427 * we find out when we are no longer full and stop returning
1428 * ENOSPC.
1429 */
1430 if (ceph_osdmap_flag(osdc->osdmap, CEPH_OSDMAP_FULL))
1431 ceph_monc_request_next_osdmap(&osdc->client->monc);
1432
1424 send_queued(osdc); 1433 send_queued(osdc);
1425 up_read(&osdc->map_sem); 1434 up_read(&osdc->map_sem);
1426 wake_up_all(&osdc->client->auth_wq); 1435 wake_up_all(&osdc->client->auth_wq);
@@ -1677,8 +1686,14 @@ int ceph_osdc_start_request(struct ceph_osd_client *osdc,
1677 */ 1686 */
1678 if (req->r_sent == 0) { 1687 if (req->r_sent == 0) {
1679 rc = __map_request(osdc, req); 1688 rc = __map_request(osdc, req);
1680 if (rc < 0) 1689 if (rc < 0) {
1690 if (nofail) {
1691 dout("osdc_start_request failed map, "
1692 " will retry %lld\n", req->r_tid);
1693 rc = 0;
1694 }
1681 goto out_unlock; 1695 goto out_unlock;
1696 }
1682 if (req->r_osd == NULL) { 1697 if (req->r_osd == NULL) {
1683 dout("send_request %p no up osds in pg\n", req); 1698 dout("send_request %p no up osds in pg\n", req);
1684 ceph_monc_request_next_osdmap(&osdc->client->monc); 1699 ceph_monc_request_next_osdmap(&osdc->client->monc);
diff --git a/net/ceph/osdmap.c b/net/ceph/osdmap.c
index 71603ac3dff5..e97c3588c3ec 100644
--- a/net/ceph/osdmap.c
+++ b/net/ceph/osdmap.c
@@ -765,7 +765,7 @@ struct ceph_osdmap *osdmap_apply_incremental(void **p, void *end,
765 } 765 }
766 766
767 map->epoch++; 767 map->epoch++;
768 map->modified = map->modified; 768 map->modified = modified;
769 if (newcrush) { 769 if (newcrush) {
770 if (map->crush) 770 if (map->crush)
771 crush_destroy(map->crush); 771 crush_destroy(map->crush);
@@ -830,15 +830,20 @@ struct ceph_osdmap *osdmap_apply_incremental(void **p, void *end,
830 map->osd_addr[osd] = addr; 830 map->osd_addr[osd] = addr;
831 } 831 }
832 832
833 /* new_down */ 833 /* new_state */
834 ceph_decode_32_safe(p, end, len, bad); 834 ceph_decode_32_safe(p, end, len, bad);
835 while (len--) { 835 while (len--) {
836 u32 osd; 836 u32 osd;
837 u8 xorstate;
837 ceph_decode_32_safe(p, end, osd, bad); 838 ceph_decode_32_safe(p, end, osd, bad);
839 xorstate = **(u8 **)p;
838 (*p)++; /* clean flag */ 840 (*p)++; /* clean flag */
839 pr_info("osd%d down\n", osd); 841 if (xorstate == 0)
842 xorstate = CEPH_OSD_UP;
843 if (xorstate & CEPH_OSD_UP)
844 pr_info("osd%d down\n", osd);
840 if (osd < map->max_osd) 845 if (osd < map->max_osd)
841 map->osd_state[osd] &= ~CEPH_OSD_UP; 846 map->osd_state[osd] ^= xorstate;
842 } 847 }
843 848
844 /* new_weight */ 849 /* new_weight */
diff --git a/net/core/dev.c b/net/core/dev.c
index bcb05cb799c1..c7e305d13b71 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -1308,6 +1308,13 @@ void dev_disable_lro(struct net_device *dev)
1308{ 1308{
1309 u32 flags; 1309 u32 flags;
1310 1310
1311 /*
1312 * If we're trying to disable lro on a vlan device
1313 * use the underlying physical device instead
1314 */
1315 if (is_vlan_dev(dev))
1316 dev = vlan_dev_real_dev(dev);
1317
1311 if (dev->ethtool_ops && dev->ethtool_ops->get_flags) 1318 if (dev->ethtool_ops && dev->ethtool_ops->get_flags)
1312 flags = dev->ethtool_ops->get_flags(dev); 1319 flags = dev->ethtool_ops->get_flags(dev);
1313 else 1320 else
@@ -5954,7 +5961,10 @@ EXPORT_SYMBOL(free_netdev);
5954void synchronize_net(void) 5961void synchronize_net(void)
5955{ 5962{
5956 might_sleep(); 5963 might_sleep();
5957 synchronize_rcu(); 5964 if (rtnl_is_locked())
5965 synchronize_rcu_expedited();
5966 else
5967 synchronize_rcu();
5958} 5968}
5959EXPORT_SYMBOL(synchronize_net); 5969EXPORT_SYMBOL(synchronize_net);
5960 5970
diff --git a/net/core/dst.c b/net/core/dst.c
index 81a4fa1c95ed..9ccca038444f 100644
--- a/net/core/dst.c
+++ b/net/core/dst.c
@@ -315,7 +315,7 @@ void __dst_destroy_metrics_generic(struct dst_entry *dst, unsigned long old)
315{ 315{
316 unsigned long prev, new; 316 unsigned long prev, new;
317 317
318 new = (unsigned long) dst_default_metrics; 318 new = ((unsigned long) dst_default_metrics) | DST_METRICS_READ_ONLY;
319 prev = cmpxchg(&dst->_metrics, old, new); 319 prev = cmpxchg(&dst->_metrics, old, new);
320 if (prev == old) 320 if (prev == old)
321 kfree(__DST_METRICS_PTR(old)); 321 kfree(__DST_METRICS_PTR(old));
diff --git a/net/core/fib_rules.c b/net/core/fib_rules.c
index 3911586e12e4..008dc70b064b 100644
--- a/net/core/fib_rules.c
+++ b/net/core/fib_rules.c
@@ -602,6 +602,7 @@ static int dump_rules(struct sk_buff *skb, struct netlink_callback *cb,
602skip: 602skip:
603 idx++; 603 idx++;
604 } 604 }
605 rcu_read_unlock();
605 cb->args[1] = idx; 606 cb->args[1] = idx;
606 rules_ops_put(ops); 607 rules_ops_put(ops);
607 608
diff --git a/net/core/filter.c b/net/core/filter.c
index 0eb8c4466eaa..0e3622f1dcb1 100644
--- a/net/core/filter.c
+++ b/net/core/filter.c
@@ -350,7 +350,9 @@ load_b:
350 continue; 350 continue;
351 } 351 }
352 default: 352 default:
353 WARN_ON(1); 353 WARN_RATELIMIT(1, "Unknown code:%u jt:%u tf:%u k:%u\n",
354 fentry->code, fentry->jt,
355 fentry->jf, fentry->k);
354 return 0; 356 return 0;
355 } 357 }
356 } 358 }
diff --git a/net/core/net_namespace.c b/net/core/net_namespace.c
index 2e2dce6583e1..6c6b86d0da15 100644
--- a/net/core/net_namespace.c
+++ b/net/core/net_namespace.c
@@ -8,6 +8,8 @@
8#include <linux/idr.h> 8#include <linux/idr.h>
9#include <linux/rculist.h> 9#include <linux/rculist.h>
10#include <linux/nsproxy.h> 10#include <linux/nsproxy.h>
11#include <linux/proc_fs.h>
12#include <linux/file.h>
11#include <net/net_namespace.h> 13#include <net/net_namespace.h>
12#include <net/netns/generic.h> 14#include <net/netns/generic.h>
13 15
@@ -302,6 +304,28 @@ void __put_net(struct net *net)
302} 304}
303EXPORT_SYMBOL_GPL(__put_net); 305EXPORT_SYMBOL_GPL(__put_net);
304 306
307struct net *get_net_ns_by_fd(int fd)
308{
309 struct proc_inode *ei;
310 struct file *file;
311 struct net *net;
312
313 net = ERR_PTR(-EINVAL);
314 file = proc_ns_fget(fd);
315 if (!file)
316 goto out;
317
318 ei = PROC_I(file->f_dentry->d_inode);
319 if (ei->ns_ops != &netns_operations)
320 goto out;
321
322 net = get_net(ei->ns);
323out:
324 if (file)
325 fput(file);
326 return net;
327}
328
305#else 329#else
306struct net *copy_net_ns(unsigned long flags, struct net *old_net) 330struct net *copy_net_ns(unsigned long flags, struct net *old_net)
307{ 331{
@@ -309,6 +333,11 @@ struct net *copy_net_ns(unsigned long flags, struct net *old_net)
309 return ERR_PTR(-EINVAL); 333 return ERR_PTR(-EINVAL);
310 return old_net; 334 return old_net;
311} 335}
336
337struct net *get_net_ns_by_fd(int fd)
338{
339 return ERR_PTR(-EINVAL);
340}
312#endif 341#endif
313 342
314struct net *get_net_ns_by_pid(pid_t pid) 343struct net *get_net_ns_by_pid(pid_t pid)
@@ -561,3 +590,39 @@ void unregister_pernet_device(struct pernet_operations *ops)
561 mutex_unlock(&net_mutex); 590 mutex_unlock(&net_mutex);
562} 591}
563EXPORT_SYMBOL_GPL(unregister_pernet_device); 592EXPORT_SYMBOL_GPL(unregister_pernet_device);
593
594#ifdef CONFIG_NET_NS
595static void *netns_get(struct task_struct *task)
596{
597 struct net *net = NULL;
598 struct nsproxy *nsproxy;
599
600 rcu_read_lock();
601 nsproxy = task_nsproxy(task);
602 if (nsproxy)
603 net = get_net(nsproxy->net_ns);
604 rcu_read_unlock();
605
606 return net;
607}
608
609static void netns_put(void *ns)
610{
611 put_net(ns);
612}
613
614static int netns_install(struct nsproxy *nsproxy, void *ns)
615{
616 put_net(nsproxy->net_ns);
617 nsproxy->net_ns = get_net(ns);
618 return 0;
619}
620
621const struct proc_ns_operations netns_operations = {
622 .name = "net",
623 .type = CLONE_NEWNET,
624 .get = netns_get,
625 .put = netns_put,
626 .install = netns_install,
627};
628#endif
diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
index d1644e317e70..abd936d8a716 100644
--- a/net/core/rtnetlink.c
+++ b/net/core/rtnetlink.c
@@ -850,6 +850,7 @@ static int rtnl_fill_ifinfo(struct sk_buff *skb, struct net_device *dev,
850 struct nlattr *attr, *af_spec; 850 struct nlattr *attr, *af_spec;
851 struct rtnl_af_ops *af_ops; 851 struct rtnl_af_ops *af_ops;
852 852
853 ASSERT_RTNL();
853 nlh = nlmsg_put(skb, pid, seq, type, sizeof(*ifm), flags); 854 nlh = nlmsg_put(skb, pid, seq, type, sizeof(*ifm), flags);
854 if (nlh == NULL) 855 if (nlh == NULL)
855 return -EMSGSIZE; 856 return -EMSGSIZE;
@@ -1045,6 +1046,7 @@ const struct nla_policy ifla_policy[IFLA_MAX+1] = {
1045 [IFLA_LINKMODE] = { .type = NLA_U8 }, 1046 [IFLA_LINKMODE] = { .type = NLA_U8 },
1046 [IFLA_LINKINFO] = { .type = NLA_NESTED }, 1047 [IFLA_LINKINFO] = { .type = NLA_NESTED },
1047 [IFLA_NET_NS_PID] = { .type = NLA_U32 }, 1048 [IFLA_NET_NS_PID] = { .type = NLA_U32 },
1049 [IFLA_NET_NS_FD] = { .type = NLA_U32 },
1048 [IFLA_IFALIAS] = { .type = NLA_STRING, .len = IFALIASZ-1 }, 1050 [IFLA_IFALIAS] = { .type = NLA_STRING, .len = IFALIASZ-1 },
1049 [IFLA_VFINFO_LIST] = {. type = NLA_NESTED }, 1051 [IFLA_VFINFO_LIST] = {. type = NLA_NESTED },
1050 [IFLA_VF_PORTS] = { .type = NLA_NESTED }, 1052 [IFLA_VF_PORTS] = { .type = NLA_NESTED },
@@ -1093,6 +1095,8 @@ struct net *rtnl_link_get_net(struct net *src_net, struct nlattr *tb[])
1093 */ 1095 */
1094 if (tb[IFLA_NET_NS_PID]) 1096 if (tb[IFLA_NET_NS_PID])
1095 net = get_net_ns_by_pid(nla_get_u32(tb[IFLA_NET_NS_PID])); 1097 net = get_net_ns_by_pid(nla_get_u32(tb[IFLA_NET_NS_PID]));
1098 else if (tb[IFLA_NET_NS_FD])
1099 net = get_net_ns_by_fd(nla_get_u32(tb[IFLA_NET_NS_FD]));
1096 else 1100 else
1097 net = get_net(src_net); 1101 net = get_net(src_net);
1098 return net; 1102 return net;
@@ -1223,7 +1227,7 @@ static int do_setlink(struct net_device *dev, struct ifinfomsg *ifm,
1223 int send_addr_notify = 0; 1227 int send_addr_notify = 0;
1224 int err; 1228 int err;
1225 1229
1226 if (tb[IFLA_NET_NS_PID]) { 1230 if (tb[IFLA_NET_NS_PID] || tb[IFLA_NET_NS_FD]) {
1227 struct net *net = rtnl_link_get_net(dev_net(dev), tb); 1231 struct net *net = rtnl_link_get_net(dev_net(dev), tb);
1228 if (IS_ERR(net)) { 1232 if (IS_ERR(net)) {
1229 err = PTR_ERR(net); 1233 err = PTR_ERR(net);
@@ -1876,6 +1880,7 @@ static int rtnetlink_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
1876 int min_len; 1880 int min_len;
1877 int family; 1881 int family;
1878 int type; 1882 int type;
1883 int err;
1879 1884
1880 type = nlh->nlmsg_type; 1885 type = nlh->nlmsg_type;
1881 if (type > RTM_MAX) 1886 if (type > RTM_MAX)
@@ -1902,8 +1907,11 @@ static int rtnetlink_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
1902 if (dumpit == NULL) 1907 if (dumpit == NULL)
1903 return -EOPNOTSUPP; 1908 return -EOPNOTSUPP;
1904 1909
1910 __rtnl_unlock();
1905 rtnl = net->rtnl; 1911 rtnl = net->rtnl;
1906 return netlink_dump_start(rtnl, skb, nlh, dumpit, NULL); 1912 err = netlink_dump_start(rtnl, skb, nlh, dumpit, NULL);
1913 rtnl_lock();
1914 return err;
1907 } 1915 }
1908 1916
1909 memset(rta_buf, 0, (rtattr_max * sizeof(struct rtattr *))); 1917 memset(rta_buf, 0, (rtattr_max * sizeof(struct rtattr *)));
@@ -1975,7 +1983,7 @@ static int __net_init rtnetlink_net_init(struct net *net)
1975{ 1983{
1976 struct sock *sk; 1984 struct sock *sk;
1977 sk = netlink_kernel_create(net, NETLINK_ROUTE, RTNLGRP_MAX, 1985 sk = netlink_kernel_create(net, NETLINK_ROUTE, RTNLGRP_MAX,
1978 rtnetlink_rcv, NULL, THIS_MODULE); 1986 rtnetlink_rcv, &rtnl_mutex, THIS_MODULE);
1979 if (!sk) 1987 if (!sk)
1980 return -ENOMEM; 1988 return -ENOMEM;
1981 net->rtnl = sk; 1989 net->rtnl = sk;
diff --git a/net/dns_resolver/dns_key.c b/net/dns_resolver/dns_key.c
index cfa7a5e1c5c9..fa000d26dc60 100644
--- a/net/dns_resolver/dns_key.c
+++ b/net/dns_resolver/dns_key.c
@@ -212,10 +212,12 @@ static void dns_resolver_describe(const struct key *key, struct seq_file *m)
212 int err = key->type_data.x[0]; 212 int err = key->type_data.x[0];
213 213
214 seq_puts(m, key->description); 214 seq_puts(m, key->description);
215 if (err) 215 if (key_is_instantiated(key)) {
216 seq_printf(m, ": %d", err); 216 if (err)
217 else 217 seq_printf(m, ": %d", err);
218 seq_printf(m, ": %u", key->datalen); 218 else
219 seq_printf(m, ": %u", key->datalen);
220 }
219} 221}
220 222
221/* 223/*
diff --git a/net/ipv4/igmp.c b/net/ipv4/igmp.c
index 672e476c8c8a..f1d27f6c9351 100644
--- a/net/ipv4/igmp.c
+++ b/net/ipv4/igmp.c
@@ -1155,20 +1155,18 @@ static void igmp_group_dropped(struct ip_mc_list *im)
1155 1155
1156 if (!in_dev->dead) { 1156 if (!in_dev->dead) {
1157 if (IGMP_V1_SEEN(in_dev)) 1157 if (IGMP_V1_SEEN(in_dev))
1158 goto done; 1158 return;
1159 if (IGMP_V2_SEEN(in_dev)) { 1159 if (IGMP_V2_SEEN(in_dev)) {
1160 if (reporter) 1160 if (reporter)
1161 igmp_send_report(in_dev, im, IGMP_HOST_LEAVE_MESSAGE); 1161 igmp_send_report(in_dev, im, IGMP_HOST_LEAVE_MESSAGE);
1162 goto done; 1162 return;
1163 } 1163 }
1164 /* IGMPv3 */ 1164 /* IGMPv3 */
1165 igmpv3_add_delrec(in_dev, im); 1165 igmpv3_add_delrec(in_dev, im);
1166 1166
1167 igmp_ifc_event(in_dev); 1167 igmp_ifc_event(in_dev);
1168 } 1168 }
1169done:
1170#endif 1169#endif
1171 ip_mc_clear_src(im);
1172} 1170}
1173 1171
1174static void igmp_group_added(struct ip_mc_list *im) 1172static void igmp_group_added(struct ip_mc_list *im)
@@ -1305,6 +1303,7 @@ void ip_mc_dec_group(struct in_device *in_dev, __be32 addr)
1305 *ip = i->next_rcu; 1303 *ip = i->next_rcu;
1306 in_dev->mc_count--; 1304 in_dev->mc_count--;
1307 igmp_group_dropped(i); 1305 igmp_group_dropped(i);
1306 ip_mc_clear_src(i);
1308 1307
1309 if (!in_dev->dead) 1308 if (!in_dev->dead)
1310 ip_rt_multicast_event(in_dev); 1309 ip_rt_multicast_event(in_dev);
@@ -1414,7 +1413,8 @@ void ip_mc_destroy_dev(struct in_device *in_dev)
1414 in_dev->mc_list = i->next_rcu; 1413 in_dev->mc_list = i->next_rcu;
1415 in_dev->mc_count--; 1414 in_dev->mc_count--;
1416 1415
1417 igmp_group_dropped(i); 1416 /* We've dropped the groups in ip_mc_down already */
1417 ip_mc_clear_src(i);
1418 ip_ma_put(i); 1418 ip_ma_put(i);
1419 } 1419 }
1420} 1420}
diff --git a/net/ipv4/ping.c b/net/ipv4/ping.c
index 1f3bb11490c9..9aaa67165f42 100644
--- a/net/ipv4/ping.c
+++ b/net/ipv4/ping.c
@@ -137,9 +137,6 @@ static void ping_v4_unhash(struct sock *sk)
137 struct inet_sock *isk = inet_sk(sk); 137 struct inet_sock *isk = inet_sk(sk);
138 pr_debug("ping_v4_unhash(isk=%p,isk->num=%u)\n", isk, isk->inet_num); 138 pr_debug("ping_v4_unhash(isk=%p,isk->num=%u)\n", isk, isk->inet_num);
139 if (sk_hashed(sk)) { 139 if (sk_hashed(sk)) {
140 struct hlist_nulls_head *hslot;
141
142 hslot = ping_hashslot(&ping_table, sock_net(sk), isk->inet_num);
143 write_lock_bh(&ping_table.lock); 140 write_lock_bh(&ping_table.lock);
144 hlist_nulls_del(&sk->sk_nulls_node); 141 hlist_nulls_del(&sk->sk_nulls_node);
145 sock_put(sk); 142 sock_put(sk);
diff --git a/net/ipv4/raw.c b/net/ipv4/raw.c
index 11e1780455f2..c9893d43242e 100644
--- a/net/ipv4/raw.c
+++ b/net/ipv4/raw.c
@@ -979,7 +979,7 @@ static void raw_sock_seq_show(struct seq_file *seq, struct sock *sp, int i)
979 srcp = inet->inet_num; 979 srcp = inet->inet_num;
980 980
981 seq_printf(seq, "%4d: %08X:%04X %08X:%04X" 981 seq_printf(seq, "%4d: %08X:%04X %08X:%04X"
982 " %02X %08X:%08X %02X:%08lX %08X %5d %8d %lu %d %p %d\n", 982 " %02X %08X:%08X %02X:%08lX %08X %5d %8d %lu %d %pK %d\n",
983 i, src, srcp, dest, destp, sp->sk_state, 983 i, src, srcp, dest, destp, sp->sk_state,
984 sk_wmem_alloc_get(sp), 984 sk_wmem_alloc_get(sp),
985 sk_rmem_alloc_get(sp), 985 sk_rmem_alloc_get(sp),
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
index 3c8d9b6f1ea4..a7d6671e33b8 100644
--- a/net/ipv4/tcp_ipv4.c
+++ b/net/ipv4/tcp_ipv4.c
@@ -2371,7 +2371,7 @@ static void get_openreq4(struct sock *sk, struct request_sock *req,
2371 int ttd = req->expires - jiffies; 2371 int ttd = req->expires - jiffies;
2372 2372
2373 seq_printf(f, "%4d: %08X:%04X %08X:%04X" 2373 seq_printf(f, "%4d: %08X:%04X %08X:%04X"
2374 " %02X %08X:%08X %02X:%08lX %08X %5d %8d %u %d %p%n", 2374 " %02X %08X:%08X %02X:%08lX %08X %5d %8d %u %d %pK%n",
2375 i, 2375 i,
2376 ireq->loc_addr, 2376 ireq->loc_addr,
2377 ntohs(inet_sk(sk)->inet_sport), 2377 ntohs(inet_sk(sk)->inet_sport),
@@ -2426,7 +2426,7 @@ static void get_tcp4_sock(struct sock *sk, struct seq_file *f, int i, int *len)
2426 rx_queue = max_t(int, tp->rcv_nxt - tp->copied_seq, 0); 2426 rx_queue = max_t(int, tp->rcv_nxt - tp->copied_seq, 0);
2427 2427
2428 seq_printf(f, "%4d: %08X:%04X %08X:%04X %02X %08X:%08X %02X:%08lX " 2428 seq_printf(f, "%4d: %08X:%04X %08X:%04X %02X %08X:%08X %02X:%08lX "
2429 "%08X %5d %8d %lu %d %p %lu %lu %u %u %d%n", 2429 "%08X %5d %8d %lu %d %pK %lu %lu %u %u %d%n",
2430 i, src, srcp, dest, destp, sk->sk_state, 2430 i, src, srcp, dest, destp, sk->sk_state,
2431 tp->write_seq - tp->snd_una, 2431 tp->write_seq - tp->snd_una,
2432 rx_queue, 2432 rx_queue,
@@ -2461,7 +2461,7 @@ static void get_timewait4_sock(struct inet_timewait_sock *tw,
2461 srcp = ntohs(tw->tw_sport); 2461 srcp = ntohs(tw->tw_sport);
2462 2462
2463 seq_printf(f, "%4d: %08X:%04X %08X:%04X" 2463 seq_printf(f, "%4d: %08X:%04X %08X:%04X"
2464 " %02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %p%n", 2464 " %02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %pK%n",
2465 i, src, srcp, dest, destp, tw->tw_substate, 0, 0, 2465 i, src, srcp, dest, destp, tw->tw_substate, 0, 0,
2466 3, jiffies_to_clock_t(ttd), 0, 0, 0, 0, 2466 3, jiffies_to_clock_t(ttd), 0, 0, 0, 0,
2467 atomic_read(&tw->tw_refcnt), tw, len); 2467 atomic_read(&tw->tw_refcnt), tw, len);
diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
index 599374f65c76..abca870d8ff6 100644
--- a/net/ipv4/udp.c
+++ b/net/ipv4/udp.c
@@ -2090,7 +2090,7 @@ static void udp4_format_sock(struct sock *sp, struct seq_file *f,
2090 __u16 srcp = ntohs(inet->inet_sport); 2090 __u16 srcp = ntohs(inet->inet_sport);
2091 2091
2092 seq_printf(f, "%5d: %08X:%04X %08X:%04X" 2092 seq_printf(f, "%5d: %08X:%04X %08X:%04X"
2093 " %02X %08X:%08X %02X:%08lX %08X %5d %8d %lu %d %p %d%n", 2093 " %02X %08X:%08X %02X:%08lX %08X %5d %8d %lu %d %pK %d%n",
2094 bucket, src, srcp, dest, destp, sp->sk_state, 2094 bucket, src, srcp, dest, destp, sp->sk_state,
2095 sk_wmem_alloc_get(sp), 2095 sk_wmem_alloc_get(sp),
2096 sk_rmem_alloc_get(sp), 2096 sk_rmem_alloc_get(sp),
diff --git a/net/ipv6/raw.c b/net/ipv6/raw.c
index ae64984f81aa..cc7313b8f7ea 100644
--- a/net/ipv6/raw.c
+++ b/net/ipv6/raw.c
@@ -1240,7 +1240,7 @@ static void raw6_sock_seq_show(struct seq_file *seq, struct sock *sp, int i)
1240 srcp = inet_sk(sp)->inet_num; 1240 srcp = inet_sk(sp)->inet_num;
1241 seq_printf(seq, 1241 seq_printf(seq,
1242 "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X " 1242 "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
1243 "%02X %08X:%08X %02X:%08lX %08X %5d %8d %lu %d %p %d\n", 1243 "%02X %08X:%08X %02X:%08lX %08X %5d %8d %lu %d %pK %d\n",
1244 i, 1244 i,
1245 src->s6_addr32[0], src->s6_addr32[1], 1245 src->s6_addr32[0], src->s6_addr32[1],
1246 src->s6_addr32[2], src->s6_addr32[3], srcp, 1246 src->s6_addr32[2], src->s6_addr32[3], srcp,
diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
index 868366470b4a..d1fd28711ba5 100644
--- a/net/ipv6/tcp_ipv6.c
+++ b/net/ipv6/tcp_ipv6.c
@@ -2036,7 +2036,7 @@ static void get_openreq6(struct seq_file *seq,
2036 2036
2037 seq_printf(seq, 2037 seq_printf(seq,
2038 "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X " 2038 "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
2039 "%02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %p\n", 2039 "%02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %pK\n",
2040 i, 2040 i,
2041 src->s6_addr32[0], src->s6_addr32[1], 2041 src->s6_addr32[0], src->s6_addr32[1],
2042 src->s6_addr32[2], src->s6_addr32[3], 2042 src->s6_addr32[2], src->s6_addr32[3],
@@ -2087,7 +2087,7 @@ static void get_tcp6_sock(struct seq_file *seq, struct sock *sp, int i)
2087 2087
2088 seq_printf(seq, 2088 seq_printf(seq,
2089 "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X " 2089 "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
2090 "%02X %08X:%08X %02X:%08lX %08X %5d %8d %lu %d %p %lu %lu %u %u %d\n", 2090 "%02X %08X:%08X %02X:%08lX %08X %5d %8d %lu %d %pK %lu %lu %u %u %d\n",
2091 i, 2091 i,
2092 src->s6_addr32[0], src->s6_addr32[1], 2092 src->s6_addr32[0], src->s6_addr32[1],
2093 src->s6_addr32[2], src->s6_addr32[3], srcp, 2093 src->s6_addr32[2], src->s6_addr32[3], srcp,
@@ -2129,7 +2129,7 @@ static void get_timewait6_sock(struct seq_file *seq,
2129 2129
2130 seq_printf(seq, 2130 seq_printf(seq,
2131 "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X " 2131 "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
2132 "%02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %p\n", 2132 "%02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %pK\n",
2133 i, 2133 i,
2134 src->s6_addr32[0], src->s6_addr32[1], 2134 src->s6_addr32[0], src->s6_addr32[1],
2135 src->s6_addr32[2], src->s6_addr32[3], srcp, 2135 src->s6_addr32[2], src->s6_addr32[3], srcp,
diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
index fc0c42a88e54..41f8c9c08dba 100644
--- a/net/ipv6/udp.c
+++ b/net/ipv6/udp.c
@@ -1391,7 +1391,7 @@ static void udp6_sock_seq_show(struct seq_file *seq, struct sock *sp, int bucket
1391 srcp = ntohs(inet->inet_sport); 1391 srcp = ntohs(inet->inet_sport);
1392 seq_printf(seq, 1392 seq_printf(seq,
1393 "%5d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X " 1393 "%5d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
1394 "%02X %08X:%08X %02X:%08lX %08X %5d %8d %lu %d %p %d\n", 1394 "%02X %08X:%08X %02X:%08lX %08X %5d %8d %lu %d %pK %d\n",
1395 bucket, 1395 bucket,
1396 src->s6_addr32[0], src->s6_addr32[1], 1396 src->s6_addr32[0], src->s6_addr32[1],
1397 src->s6_addr32[2], src->s6_addr32[3], srcp, 1397 src->s6_addr32[2], src->s6_addr32[3], srcp,
diff --git a/net/ipv6/xfrm6_tunnel.c b/net/ipv6/xfrm6_tunnel.c
index a6770a04e3bd..4fe1db12d2a3 100644
--- a/net/ipv6/xfrm6_tunnel.c
+++ b/net/ipv6/xfrm6_tunnel.c
@@ -241,7 +241,7 @@ static int xfrm6_tunnel_rcv(struct sk_buff *skb)
241 __be32 spi; 241 __be32 spi;
242 242
243 spi = xfrm6_tunnel_spi_lookup(net, (const xfrm_address_t *)&iph->saddr); 243 spi = xfrm6_tunnel_spi_lookup(net, (const xfrm_address_t *)&iph->saddr);
244 return xfrm6_rcv_spi(skb, IPPROTO_IPV6, spi) > 0 ? : 0; 244 return xfrm6_rcv_spi(skb, IPPROTO_IPV6, spi);
245} 245}
246 246
247static int xfrm6_tunnel_err(struct sk_buff *skb, struct inet6_skb_parm *opt, 247static int xfrm6_tunnel_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
diff --git a/net/key/af_key.c b/net/key/af_key.c
index d62401c25684..8f92cf8116ea 100644
--- a/net/key/af_key.c
+++ b/net/key/af_key.c
@@ -3656,7 +3656,7 @@ static int pfkey_seq_show(struct seq_file *f, void *v)
3656 if (v == SEQ_START_TOKEN) 3656 if (v == SEQ_START_TOKEN)
3657 seq_printf(f ,"sk RefCnt Rmem Wmem User Inode\n"); 3657 seq_printf(f ,"sk RefCnt Rmem Wmem User Inode\n");
3658 else 3658 else
3659 seq_printf(f ,"%p %-6d %-6u %-6u %-6u %-6lu\n", 3659 seq_printf(f, "%pK %-6d %-6u %-6u %-6u %-6lu\n",
3660 s, 3660 s,
3661 atomic_read(&s->sk_refcnt), 3661 atomic_read(&s->sk_refcnt),
3662 sk_rmem_alloc_get(s), 3662 sk_rmem_alloc_get(s),
diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c
index 7dfbe71dc637..49d4f869e0bc 100644
--- a/net/mac80211/iface.c
+++ b/net/mac80211/iface.c
@@ -384,11 +384,11 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata,
384 int i; 384 int i;
385 enum nl80211_channel_type orig_ct; 385 enum nl80211_channel_type orig_ct;
386 386
387 clear_bit(SDATA_STATE_RUNNING, &sdata->state);
388
387 if (local->scan_sdata == sdata) 389 if (local->scan_sdata == sdata)
388 ieee80211_scan_cancel(local); 390 ieee80211_scan_cancel(local);
389 391
390 clear_bit(SDATA_STATE_RUNNING, &sdata->state);
391
392 /* 392 /*
393 * Stop TX on this interface first. 393 * Stop TX on this interface first.
394 */ 394 */
diff --git a/net/mac80211/main.c b/net/mac80211/main.c
index 0d7b08db8e56..866f269183cf 100644
--- a/net/mac80211/main.c
+++ b/net/mac80211/main.c
@@ -752,11 +752,25 @@ int ieee80211_register_hw(struct ieee80211_hw *hw)
752 hw->wiphy->interface_modes |= BIT(NL80211_IFTYPE_MONITOR); 752 hw->wiphy->interface_modes |= BIT(NL80211_IFTYPE_MONITOR);
753 hw->wiphy->software_iftypes |= BIT(NL80211_IFTYPE_MONITOR); 753 hw->wiphy->software_iftypes |= BIT(NL80211_IFTYPE_MONITOR);
754 754
755 /* mac80211 doesn't support more than 1 channel */ 755 /*
756 for (i = 0; i < hw->wiphy->n_iface_combinations; i++) 756 * mac80211 doesn't support more than 1 channel, and also not more
757 if (hw->wiphy->iface_combinations[i].num_different_channels > 1) 757 * than one IBSS interface
758 */
759 for (i = 0; i < hw->wiphy->n_iface_combinations; i++) {
760 const struct ieee80211_iface_combination *c;
761 int j;
762
763 c = &hw->wiphy->iface_combinations[i];
764
765 if (c->num_different_channels > 1)
758 return -EINVAL; 766 return -EINVAL;
759 767
768 for (j = 0; j < c->n_limits; j++)
769 if ((c->limits[j].types & BIT(NL80211_IFTYPE_ADHOC)) &&
770 c->limits[j].max > 1)
771 return -EINVAL;
772 }
773
760#ifndef CONFIG_MAC80211_MESH 774#ifndef CONFIG_MAC80211_MESH
761 /* mesh depends on Kconfig, but drivers should set it if they want */ 775 /* mesh depends on Kconfig, but drivers should set it if they want */
762 local->hw.wiphy->interface_modes &= ~BIT(NL80211_IFTYPE_MESH_POINT); 776 local->hw.wiphy->interface_modes &= ~BIT(NL80211_IFTYPE_MESH_POINT);
@@ -1076,6 +1090,8 @@ static void __exit ieee80211_exit(void)
1076 ieee80211s_stop(); 1090 ieee80211s_stop();
1077 1091
1078 ieee80211_iface_exit(); 1092 ieee80211_iface_exit();
1093
1094 rcu_barrier();
1079} 1095}
1080 1096
1081 1097
diff --git a/net/mac80211/mesh.h b/net/mac80211/mesh.h
index e7c5fddb4804..249e733362e7 100644
--- a/net/mac80211/mesh.h
+++ b/net/mac80211/mesh.h
@@ -120,6 +120,7 @@ struct mesh_path {
120 * buckets 120 * buckets
121 * @mean_chain_len: maximum average length for the hash buckets' list, if it is 121 * @mean_chain_len: maximum average length for the hash buckets' list, if it is
122 * reached, the table will grow 122 * reached, the table will grow
123 * rcu_head: RCU head to free the table
123 */ 124 */
124struct mesh_table { 125struct mesh_table {
125 /* Number of buckets will be 2^N */ 126 /* Number of buckets will be 2^N */
@@ -132,6 +133,8 @@ struct mesh_table {
132 int (*copy_node) (struct hlist_node *p, struct mesh_table *newtbl); 133 int (*copy_node) (struct hlist_node *p, struct mesh_table *newtbl);
133 int size_order; 134 int size_order;
134 int mean_chain_len; 135 int mean_chain_len;
136
137 struct rcu_head rcu_head;
135}; 138};
136 139
137/* Recent multicast cache */ 140/* Recent multicast cache */
@@ -286,10 +289,6 @@ static inline bool mesh_path_sel_is_hwmp(struct ieee80211_sub_if_data *sdata)
286 return sdata->u.mesh.mesh_pp_id == IEEE80211_PATH_PROTOCOL_HWMP; 289 return sdata->u.mesh.mesh_pp_id == IEEE80211_PATH_PROTOCOL_HWMP;
287} 290}
288 291
289#define for_each_mesh_entry(x, p, node, i) \
290 for (i = 0; i <= x->hash_mask; i++) \
291 hlist_for_each_entry_rcu(node, p, &x->hash_buckets[i], list)
292
293void ieee80211_mesh_notify_scan_completed(struct ieee80211_local *local); 292void ieee80211_mesh_notify_scan_completed(struct ieee80211_local *local);
294 293
295void ieee80211_mesh_quiesce(struct ieee80211_sub_if_data *sdata); 294void ieee80211_mesh_quiesce(struct ieee80211_sub_if_data *sdata);
diff --git a/net/mac80211/mesh_pathtbl.c b/net/mac80211/mesh_pathtbl.c
index 83ce48e31913..0d2faacc3e87 100644
--- a/net/mac80211/mesh_pathtbl.c
+++ b/net/mac80211/mesh_pathtbl.c
@@ -36,8 +36,8 @@ struct mpath_node {
36 struct mesh_path *mpath; 36 struct mesh_path *mpath;
37}; 37};
38 38
39static struct mesh_table *mesh_paths; 39static struct mesh_table __rcu *mesh_paths;
40static struct mesh_table *mpp_paths; /* Store paths for MPP&MAP */ 40static struct mesh_table __rcu *mpp_paths; /* Store paths for MPP&MAP */
41 41
42int mesh_paths_generation; 42int mesh_paths_generation;
43 43
@@ -48,17 +48,40 @@ int mesh_paths_generation;
48static DEFINE_RWLOCK(pathtbl_resize_lock); 48static DEFINE_RWLOCK(pathtbl_resize_lock);
49 49
50 50
51static inline struct mesh_table *resize_dereference_mesh_paths(void)
52{
53 return rcu_dereference_protected(mesh_paths,
54 lockdep_is_held(&pathtbl_resize_lock));
55}
56
57static inline struct mesh_table *resize_dereference_mpp_paths(void)
58{
59 return rcu_dereference_protected(mpp_paths,
60 lockdep_is_held(&pathtbl_resize_lock));
61}
62
63/*
64 * CAREFUL -- "tbl" must not be an expression,
65 * in particular not an rcu_dereference(), since
66 * it's used twice. So it is illegal to do
67 * for_each_mesh_entry(rcu_dereference(...), ...)
68 */
69#define for_each_mesh_entry(tbl, p, node, i) \
70 for (i = 0; i <= tbl->hash_mask; i++) \
71 hlist_for_each_entry_rcu(node, p, &tbl->hash_buckets[i], list)
72
73
51static struct mesh_table *mesh_table_alloc(int size_order) 74static struct mesh_table *mesh_table_alloc(int size_order)
52{ 75{
53 int i; 76 int i;
54 struct mesh_table *newtbl; 77 struct mesh_table *newtbl;
55 78
56 newtbl = kmalloc(sizeof(struct mesh_table), GFP_KERNEL); 79 newtbl = kmalloc(sizeof(struct mesh_table), GFP_ATOMIC);
57 if (!newtbl) 80 if (!newtbl)
58 return NULL; 81 return NULL;
59 82
60 newtbl->hash_buckets = kzalloc(sizeof(struct hlist_head) * 83 newtbl->hash_buckets = kzalloc(sizeof(struct hlist_head) *
61 (1 << size_order), GFP_KERNEL); 84 (1 << size_order), GFP_ATOMIC);
62 85
63 if (!newtbl->hash_buckets) { 86 if (!newtbl->hash_buckets) {
64 kfree(newtbl); 87 kfree(newtbl);
@@ -66,7 +89,7 @@ static struct mesh_table *mesh_table_alloc(int size_order)
66 } 89 }
67 90
68 newtbl->hashwlock = kmalloc(sizeof(spinlock_t) * 91 newtbl->hashwlock = kmalloc(sizeof(spinlock_t) *
69 (1 << size_order), GFP_KERNEL); 92 (1 << size_order), GFP_ATOMIC);
70 if (!newtbl->hashwlock) { 93 if (!newtbl->hashwlock) {
71 kfree(newtbl->hash_buckets); 94 kfree(newtbl->hash_buckets);
72 kfree(newtbl); 95 kfree(newtbl);
@@ -258,12 +281,13 @@ struct mesh_path *mpp_path_lookup(u8 *dst, struct ieee80211_sub_if_data *sdata)
258 */ 281 */
259struct mesh_path *mesh_path_lookup_by_idx(int idx, struct ieee80211_sub_if_data *sdata) 282struct mesh_path *mesh_path_lookup_by_idx(int idx, struct ieee80211_sub_if_data *sdata)
260{ 283{
284 struct mesh_table *tbl = rcu_dereference(mesh_paths);
261 struct mpath_node *node; 285 struct mpath_node *node;
262 struct hlist_node *p; 286 struct hlist_node *p;
263 int i; 287 int i;
264 int j = 0; 288 int j = 0;
265 289
266 for_each_mesh_entry(mesh_paths, p, node, i) { 290 for_each_mesh_entry(tbl, p, node, i) {
267 if (sdata && node->mpath->sdata != sdata) 291 if (sdata && node->mpath->sdata != sdata)
268 continue; 292 continue;
269 if (j++ == idx) { 293 if (j++ == idx) {
@@ -293,6 +317,7 @@ int mesh_path_add(u8 *dst, struct ieee80211_sub_if_data *sdata)
293{ 317{
294 struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh; 318 struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
295 struct ieee80211_local *local = sdata->local; 319 struct ieee80211_local *local = sdata->local;
320 struct mesh_table *tbl;
296 struct mesh_path *mpath, *new_mpath; 321 struct mesh_path *mpath, *new_mpath;
297 struct mpath_node *node, *new_node; 322 struct mpath_node *node, *new_node;
298 struct hlist_head *bucket; 323 struct hlist_head *bucket;
@@ -332,10 +357,12 @@ int mesh_path_add(u8 *dst, struct ieee80211_sub_if_data *sdata)
332 spin_lock_init(&new_mpath->state_lock); 357 spin_lock_init(&new_mpath->state_lock);
333 init_timer(&new_mpath->timer); 358 init_timer(&new_mpath->timer);
334 359
335 hash_idx = mesh_table_hash(dst, sdata, mesh_paths); 360 tbl = resize_dereference_mesh_paths();
336 bucket = &mesh_paths->hash_buckets[hash_idx]; 361
362 hash_idx = mesh_table_hash(dst, sdata, tbl);
363 bucket = &tbl->hash_buckets[hash_idx];
337 364
338 spin_lock_bh(&mesh_paths->hashwlock[hash_idx]); 365 spin_lock_bh(&tbl->hashwlock[hash_idx]);
339 366
340 err = -EEXIST; 367 err = -EEXIST;
341 hlist_for_each_entry(node, n, bucket, list) { 368 hlist_for_each_entry(node, n, bucket, list) {
@@ -345,13 +372,13 @@ int mesh_path_add(u8 *dst, struct ieee80211_sub_if_data *sdata)
345 } 372 }
346 373
347 hlist_add_head_rcu(&new_node->list, bucket); 374 hlist_add_head_rcu(&new_node->list, bucket);
348 if (atomic_inc_return(&mesh_paths->entries) >= 375 if (atomic_inc_return(&tbl->entries) >=
349 mesh_paths->mean_chain_len * (mesh_paths->hash_mask + 1)) 376 tbl->mean_chain_len * (tbl->hash_mask + 1))
350 grow = 1; 377 grow = 1;
351 378
352 mesh_paths_generation++; 379 mesh_paths_generation++;
353 380
354 spin_unlock_bh(&mesh_paths->hashwlock[hash_idx]); 381 spin_unlock_bh(&tbl->hashwlock[hash_idx]);
355 read_unlock_bh(&pathtbl_resize_lock); 382 read_unlock_bh(&pathtbl_resize_lock);
356 if (grow) { 383 if (grow) {
357 set_bit(MESH_WORK_GROW_MPATH_TABLE, &ifmsh->wrkq_flags); 384 set_bit(MESH_WORK_GROW_MPATH_TABLE, &ifmsh->wrkq_flags);
@@ -360,7 +387,7 @@ int mesh_path_add(u8 *dst, struct ieee80211_sub_if_data *sdata)
360 return 0; 387 return 0;
361 388
362err_exists: 389err_exists:
363 spin_unlock_bh(&mesh_paths->hashwlock[hash_idx]); 390 spin_unlock_bh(&tbl->hashwlock[hash_idx]);
364 read_unlock_bh(&pathtbl_resize_lock); 391 read_unlock_bh(&pathtbl_resize_lock);
365 kfree(new_node); 392 kfree(new_node);
366err_node_alloc: 393err_node_alloc:
@@ -370,58 +397,59 @@ err_path_alloc:
370 return err; 397 return err;
371} 398}
372 399
400static void mesh_table_free_rcu(struct rcu_head *rcu)
401{
402 struct mesh_table *tbl = container_of(rcu, struct mesh_table, rcu_head);
403
404 mesh_table_free(tbl, false);
405}
406
373void mesh_mpath_table_grow(void) 407void mesh_mpath_table_grow(void)
374{ 408{
375 struct mesh_table *oldtbl, *newtbl; 409 struct mesh_table *oldtbl, *newtbl;
376 410
377 rcu_read_lock();
378 newtbl = mesh_table_alloc(rcu_dereference(mesh_paths)->size_order + 1);
379 if (!newtbl)
380 return;
381 write_lock_bh(&pathtbl_resize_lock); 411 write_lock_bh(&pathtbl_resize_lock);
382 oldtbl = mesh_paths; 412 oldtbl = resize_dereference_mesh_paths();
383 if (mesh_table_grow(mesh_paths, newtbl) < 0) { 413 newtbl = mesh_table_alloc(oldtbl->size_order + 1);
384 rcu_read_unlock(); 414 if (!newtbl)
415 goto out;
416 if (mesh_table_grow(oldtbl, newtbl) < 0) {
385 __mesh_table_free(newtbl); 417 __mesh_table_free(newtbl);
386 write_unlock_bh(&pathtbl_resize_lock); 418 goto out;
387 return;
388 } 419 }
389 rcu_read_unlock();
390 rcu_assign_pointer(mesh_paths, newtbl); 420 rcu_assign_pointer(mesh_paths, newtbl);
391 write_unlock_bh(&pathtbl_resize_lock);
392 421
393 synchronize_rcu(); 422 call_rcu(&oldtbl->rcu_head, mesh_table_free_rcu);
394 mesh_table_free(oldtbl, false); 423
424 out:
425 write_unlock_bh(&pathtbl_resize_lock);
395} 426}
396 427
397void mesh_mpp_table_grow(void) 428void mesh_mpp_table_grow(void)
398{ 429{
399 struct mesh_table *oldtbl, *newtbl; 430 struct mesh_table *oldtbl, *newtbl;
400 431
401 rcu_read_lock();
402 newtbl = mesh_table_alloc(rcu_dereference(mpp_paths)->size_order + 1);
403 if (!newtbl)
404 return;
405 write_lock_bh(&pathtbl_resize_lock); 432 write_lock_bh(&pathtbl_resize_lock);
406 oldtbl = mpp_paths; 433 oldtbl = resize_dereference_mpp_paths();
407 if (mesh_table_grow(mpp_paths, newtbl) < 0) { 434 newtbl = mesh_table_alloc(oldtbl->size_order + 1);
408 rcu_read_unlock(); 435 if (!newtbl)
436 goto out;
437 if (mesh_table_grow(oldtbl, newtbl) < 0) {
409 __mesh_table_free(newtbl); 438 __mesh_table_free(newtbl);
410 write_unlock_bh(&pathtbl_resize_lock); 439 goto out;
411 return;
412 } 440 }
413 rcu_read_unlock();
414 rcu_assign_pointer(mpp_paths, newtbl); 441 rcu_assign_pointer(mpp_paths, newtbl);
415 write_unlock_bh(&pathtbl_resize_lock); 442 call_rcu(&oldtbl->rcu_head, mesh_table_free_rcu);
416 443
417 synchronize_rcu(); 444 out:
418 mesh_table_free(oldtbl, false); 445 write_unlock_bh(&pathtbl_resize_lock);
419} 446}
420 447
421int mpp_path_add(u8 *dst, u8 *mpp, struct ieee80211_sub_if_data *sdata) 448int mpp_path_add(u8 *dst, u8 *mpp, struct ieee80211_sub_if_data *sdata)
422{ 449{
423 struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh; 450 struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
424 struct ieee80211_local *local = sdata->local; 451 struct ieee80211_local *local = sdata->local;
452 struct mesh_table *tbl;
425 struct mesh_path *mpath, *new_mpath; 453 struct mesh_path *mpath, *new_mpath;
426 struct mpath_node *node, *new_node; 454 struct mpath_node *node, *new_node;
427 struct hlist_head *bucket; 455 struct hlist_head *bucket;
@@ -456,10 +484,12 @@ int mpp_path_add(u8 *dst, u8 *mpp, struct ieee80211_sub_if_data *sdata)
456 new_mpath->exp_time = jiffies; 484 new_mpath->exp_time = jiffies;
457 spin_lock_init(&new_mpath->state_lock); 485 spin_lock_init(&new_mpath->state_lock);
458 486
459 hash_idx = mesh_table_hash(dst, sdata, mpp_paths); 487 tbl = resize_dereference_mpp_paths();
460 bucket = &mpp_paths->hash_buckets[hash_idx];
461 488
462 spin_lock_bh(&mpp_paths->hashwlock[hash_idx]); 489 hash_idx = mesh_table_hash(dst, sdata, tbl);
490 bucket = &tbl->hash_buckets[hash_idx];
491
492 spin_lock_bh(&tbl->hashwlock[hash_idx]);
463 493
464 err = -EEXIST; 494 err = -EEXIST;
465 hlist_for_each_entry(node, n, bucket, list) { 495 hlist_for_each_entry(node, n, bucket, list) {
@@ -469,11 +499,11 @@ int mpp_path_add(u8 *dst, u8 *mpp, struct ieee80211_sub_if_data *sdata)
469 } 499 }
470 500
471 hlist_add_head_rcu(&new_node->list, bucket); 501 hlist_add_head_rcu(&new_node->list, bucket);
472 if (atomic_inc_return(&mpp_paths->entries) >= 502 if (atomic_inc_return(&tbl->entries) >=
473 mpp_paths->mean_chain_len * (mpp_paths->hash_mask + 1)) 503 tbl->mean_chain_len * (tbl->hash_mask + 1))
474 grow = 1; 504 grow = 1;
475 505
476 spin_unlock_bh(&mpp_paths->hashwlock[hash_idx]); 506 spin_unlock_bh(&tbl->hashwlock[hash_idx]);
477 read_unlock_bh(&pathtbl_resize_lock); 507 read_unlock_bh(&pathtbl_resize_lock);
478 if (grow) { 508 if (grow) {
479 set_bit(MESH_WORK_GROW_MPP_TABLE, &ifmsh->wrkq_flags); 509 set_bit(MESH_WORK_GROW_MPP_TABLE, &ifmsh->wrkq_flags);
@@ -482,7 +512,7 @@ int mpp_path_add(u8 *dst, u8 *mpp, struct ieee80211_sub_if_data *sdata)
482 return 0; 512 return 0;
483 513
484err_exists: 514err_exists:
485 spin_unlock_bh(&mpp_paths->hashwlock[hash_idx]); 515 spin_unlock_bh(&tbl->hashwlock[hash_idx]);
486 read_unlock_bh(&pathtbl_resize_lock); 516 read_unlock_bh(&pathtbl_resize_lock);
487 kfree(new_node); 517 kfree(new_node);
488err_node_alloc: 518err_node_alloc:
@@ -502,6 +532,7 @@ err_path_alloc:
502 */ 532 */
503void mesh_plink_broken(struct sta_info *sta) 533void mesh_plink_broken(struct sta_info *sta)
504{ 534{
535 struct mesh_table *tbl;
505 static const u8 bcast[ETH_ALEN] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff}; 536 static const u8 bcast[ETH_ALEN] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
506 struct mesh_path *mpath; 537 struct mesh_path *mpath;
507 struct mpath_node *node; 538 struct mpath_node *node;
@@ -510,10 +541,11 @@ void mesh_plink_broken(struct sta_info *sta)
510 int i; 541 int i;
511 542
512 rcu_read_lock(); 543 rcu_read_lock();
513 for_each_mesh_entry(mesh_paths, p, node, i) { 544 tbl = rcu_dereference(mesh_paths);
545 for_each_mesh_entry(tbl, p, node, i) {
514 mpath = node->mpath; 546 mpath = node->mpath;
515 spin_lock_bh(&mpath->state_lock); 547 spin_lock_bh(&mpath->state_lock);
516 if (mpath->next_hop == sta && 548 if (rcu_dereference(mpath->next_hop) == sta &&
517 mpath->flags & MESH_PATH_ACTIVE && 549 mpath->flags & MESH_PATH_ACTIVE &&
518 !(mpath->flags & MESH_PATH_FIXED)) { 550 !(mpath->flags & MESH_PATH_FIXED)) {
519 mpath->flags &= ~MESH_PATH_ACTIVE; 551 mpath->flags &= ~MESH_PATH_ACTIVE;
@@ -542,30 +574,38 @@ void mesh_plink_broken(struct sta_info *sta)
542 */ 574 */
543void mesh_path_flush_by_nexthop(struct sta_info *sta) 575void mesh_path_flush_by_nexthop(struct sta_info *sta)
544{ 576{
577 struct mesh_table *tbl;
545 struct mesh_path *mpath; 578 struct mesh_path *mpath;
546 struct mpath_node *node; 579 struct mpath_node *node;
547 struct hlist_node *p; 580 struct hlist_node *p;
548 int i; 581 int i;
549 582
550 for_each_mesh_entry(mesh_paths, p, node, i) { 583 rcu_read_lock();
584 tbl = rcu_dereference(mesh_paths);
585 for_each_mesh_entry(tbl, p, node, i) {
551 mpath = node->mpath; 586 mpath = node->mpath;
552 if (mpath->next_hop == sta) 587 if (rcu_dereference(mpath->next_hop) == sta)
553 mesh_path_del(mpath->dst, mpath->sdata); 588 mesh_path_del(mpath->dst, mpath->sdata);
554 } 589 }
590 rcu_read_unlock();
555} 591}
556 592
557void mesh_path_flush(struct ieee80211_sub_if_data *sdata) 593void mesh_path_flush(struct ieee80211_sub_if_data *sdata)
558{ 594{
595 struct mesh_table *tbl;
559 struct mesh_path *mpath; 596 struct mesh_path *mpath;
560 struct mpath_node *node; 597 struct mpath_node *node;
561 struct hlist_node *p; 598 struct hlist_node *p;
562 int i; 599 int i;
563 600
564 for_each_mesh_entry(mesh_paths, p, node, i) { 601 rcu_read_lock();
602 tbl = rcu_dereference(mesh_paths);
603 for_each_mesh_entry(tbl, p, node, i) {
565 mpath = node->mpath; 604 mpath = node->mpath;
566 if (mpath->sdata == sdata) 605 if (mpath->sdata == sdata)
567 mesh_path_del(mpath->dst, mpath->sdata); 606 mesh_path_del(mpath->dst, mpath->sdata);
568 } 607 }
608 rcu_read_unlock();
569} 609}
570 610
571static void mesh_path_node_reclaim(struct rcu_head *rp) 611static void mesh_path_node_reclaim(struct rcu_head *rp)
@@ -589,6 +629,7 @@ static void mesh_path_node_reclaim(struct rcu_head *rp)
589 */ 629 */
590int mesh_path_del(u8 *addr, struct ieee80211_sub_if_data *sdata) 630int mesh_path_del(u8 *addr, struct ieee80211_sub_if_data *sdata)
591{ 631{
632 struct mesh_table *tbl;
592 struct mesh_path *mpath; 633 struct mesh_path *mpath;
593 struct mpath_node *node; 634 struct mpath_node *node;
594 struct hlist_head *bucket; 635 struct hlist_head *bucket;
@@ -597,19 +638,20 @@ int mesh_path_del(u8 *addr, struct ieee80211_sub_if_data *sdata)
597 int err = 0; 638 int err = 0;
598 639
599 read_lock_bh(&pathtbl_resize_lock); 640 read_lock_bh(&pathtbl_resize_lock);
600 hash_idx = mesh_table_hash(addr, sdata, mesh_paths); 641 tbl = resize_dereference_mesh_paths();
601 bucket = &mesh_paths->hash_buckets[hash_idx]; 642 hash_idx = mesh_table_hash(addr, sdata, tbl);
643 bucket = &tbl->hash_buckets[hash_idx];
602 644
603 spin_lock_bh(&mesh_paths->hashwlock[hash_idx]); 645 spin_lock_bh(&tbl->hashwlock[hash_idx]);
604 hlist_for_each_entry(node, n, bucket, list) { 646 hlist_for_each_entry(node, n, bucket, list) {
605 mpath = node->mpath; 647 mpath = node->mpath;
606 if (mpath->sdata == sdata && 648 if (mpath->sdata == sdata &&
607 memcmp(addr, mpath->dst, ETH_ALEN) == 0) { 649 memcmp(addr, mpath->dst, ETH_ALEN) == 0) {
608 spin_lock_bh(&mpath->state_lock); 650 spin_lock_bh(&mpath->state_lock);
609 mpath->flags |= MESH_PATH_RESOLVING; 651 mpath->flags |= MESH_PATH_RESOLVING;
610 hlist_del_rcu(&node->list); 652 hlist_del_rcu(&node->list);
611 call_rcu(&node->rcu, mesh_path_node_reclaim); 653 call_rcu(&node->rcu, mesh_path_node_reclaim);
612 atomic_dec(&mesh_paths->entries); 654 atomic_dec(&tbl->entries);
613 spin_unlock_bh(&mpath->state_lock); 655 spin_unlock_bh(&mpath->state_lock);
614 goto enddel; 656 goto enddel;
615 } 657 }
@@ -618,7 +660,7 @@ int mesh_path_del(u8 *addr, struct ieee80211_sub_if_data *sdata)
618 err = -ENXIO; 660 err = -ENXIO;
619enddel: 661enddel:
620 mesh_paths_generation++; 662 mesh_paths_generation++;
621 spin_unlock_bh(&mesh_paths->hashwlock[hash_idx]); 663 spin_unlock_bh(&tbl->hashwlock[hash_idx]);
622 read_unlock_bh(&pathtbl_resize_lock); 664 read_unlock_bh(&pathtbl_resize_lock);
623 return err; 665 return err;
624} 666}
@@ -719,8 +761,10 @@ static void mesh_path_node_free(struct hlist_node *p, bool free_leafs)
719 struct mpath_node *node = hlist_entry(p, struct mpath_node, list); 761 struct mpath_node *node = hlist_entry(p, struct mpath_node, list);
720 mpath = node->mpath; 762 mpath = node->mpath;
721 hlist_del_rcu(p); 763 hlist_del_rcu(p);
722 if (free_leafs) 764 if (free_leafs) {
765 del_timer_sync(&mpath->timer);
723 kfree(mpath); 766 kfree(mpath);
767 }
724 kfree(node); 768 kfree(node);
725} 769}
726 770
@@ -745,52 +789,60 @@ static int mesh_path_node_copy(struct hlist_node *p, struct mesh_table *newtbl)
745 789
746int mesh_pathtbl_init(void) 790int mesh_pathtbl_init(void)
747{ 791{
748 mesh_paths = mesh_table_alloc(INIT_PATHS_SIZE_ORDER); 792 struct mesh_table *tbl_path, *tbl_mpp;
749 if (!mesh_paths) 793
794 tbl_path = mesh_table_alloc(INIT_PATHS_SIZE_ORDER);
795 if (!tbl_path)
750 return -ENOMEM; 796 return -ENOMEM;
751 mesh_paths->free_node = &mesh_path_node_free; 797 tbl_path->free_node = &mesh_path_node_free;
752 mesh_paths->copy_node = &mesh_path_node_copy; 798 tbl_path->copy_node = &mesh_path_node_copy;
753 mesh_paths->mean_chain_len = MEAN_CHAIN_LEN; 799 tbl_path->mean_chain_len = MEAN_CHAIN_LEN;
754 800
755 mpp_paths = mesh_table_alloc(INIT_PATHS_SIZE_ORDER); 801 tbl_mpp = mesh_table_alloc(INIT_PATHS_SIZE_ORDER);
756 if (!mpp_paths) { 802 if (!tbl_mpp) {
757 mesh_table_free(mesh_paths, true); 803 mesh_table_free(tbl_path, true);
758 return -ENOMEM; 804 return -ENOMEM;
759 } 805 }
760 mpp_paths->free_node = &mesh_path_node_free; 806 tbl_mpp->free_node = &mesh_path_node_free;
761 mpp_paths->copy_node = &mesh_path_node_copy; 807 tbl_mpp->copy_node = &mesh_path_node_copy;
762 mpp_paths->mean_chain_len = MEAN_CHAIN_LEN; 808 tbl_mpp->mean_chain_len = MEAN_CHAIN_LEN;
809
810 /* Need no locking since this is during init */
811 RCU_INIT_POINTER(mesh_paths, tbl_path);
812 RCU_INIT_POINTER(mpp_paths, tbl_mpp);
763 813
764 return 0; 814 return 0;
765} 815}
766 816
767void mesh_path_expire(struct ieee80211_sub_if_data *sdata) 817void mesh_path_expire(struct ieee80211_sub_if_data *sdata)
768{ 818{
819 struct mesh_table *tbl;
769 struct mesh_path *mpath; 820 struct mesh_path *mpath;
770 struct mpath_node *node; 821 struct mpath_node *node;
771 struct hlist_node *p; 822 struct hlist_node *p;
772 int i; 823 int i;
773 824
774 read_lock_bh(&pathtbl_resize_lock); 825 rcu_read_lock();
775 for_each_mesh_entry(mesh_paths, p, node, i) { 826 tbl = rcu_dereference(mesh_paths);
827 for_each_mesh_entry(tbl, p, node, i) {
776 if (node->mpath->sdata != sdata) 828 if (node->mpath->sdata != sdata)
777 continue; 829 continue;
778 mpath = node->mpath; 830 mpath = node->mpath;
779 spin_lock_bh(&mpath->state_lock); 831 spin_lock_bh(&mpath->state_lock);
780 if ((!(mpath->flags & MESH_PATH_RESOLVING)) && 832 if ((!(mpath->flags & MESH_PATH_RESOLVING)) &&
781 (!(mpath->flags & MESH_PATH_FIXED)) && 833 (!(mpath->flags & MESH_PATH_FIXED)) &&
782 time_after(jiffies, 834 time_after(jiffies, mpath->exp_time + MESH_PATH_EXPIRE)) {
783 mpath->exp_time + MESH_PATH_EXPIRE)) {
784 spin_unlock_bh(&mpath->state_lock); 835 spin_unlock_bh(&mpath->state_lock);
785 mesh_path_del(mpath->dst, mpath->sdata); 836 mesh_path_del(mpath->dst, mpath->sdata);
786 } else 837 } else
787 spin_unlock_bh(&mpath->state_lock); 838 spin_unlock_bh(&mpath->state_lock);
788 } 839 }
789 read_unlock_bh(&pathtbl_resize_lock); 840 rcu_read_unlock();
790} 841}
791 842
792void mesh_pathtbl_unregister(void) 843void mesh_pathtbl_unregister(void)
793{ 844{
794 mesh_table_free(mesh_paths, true); 845 /* no need for locking during exit path */
795 mesh_table_free(mpp_paths, true); 846 mesh_table_free(rcu_dereference_raw(mesh_paths), true);
847 mesh_table_free(rcu_dereference_raw(mpp_paths), true);
796} 848}
diff --git a/net/mac80211/scan.c b/net/mac80211/scan.c
index d20046b5d8f4..27af6723cb5e 100644
--- a/net/mac80211/scan.c
+++ b/net/mac80211/scan.c
@@ -719,6 +719,11 @@ void ieee80211_scan_work(struct work_struct *work)
719 * without scheduling a new work 719 * without scheduling a new work
720 */ 720 */
721 do { 721 do {
722 if (!ieee80211_sdata_running(sdata)) {
723 aborted = true;
724 goto out_complete;
725 }
726
722 switch (local->next_scan_state) { 727 switch (local->next_scan_state) {
723 case SCAN_DECISION: 728 case SCAN_DECISION:
724 /* if no more bands/channels left, complete scan */ 729 /* if no more bands/channels left, complete scan */
diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
index 5fe4f3b04ed3..6ef64adf7362 100644
--- a/net/netlink/af_netlink.c
+++ b/net/netlink/af_netlink.c
@@ -1985,7 +1985,7 @@ static int netlink_seq_show(struct seq_file *seq, void *v)
1985 struct sock *s = v; 1985 struct sock *s = v;
1986 struct netlink_sock *nlk = nlk_sk(s); 1986 struct netlink_sock *nlk = nlk_sk(s);
1987 1987
1988 seq_printf(seq, "%p %-3d %-6d %08x %-8d %-8d %p %-8d %-8d %-8lu\n", 1988 seq_printf(seq, "%pK %-3d %-6d %08x %-8d %-8d %pK %-8d %-8d %-8lu\n",
1989 s, 1989 s,
1990 s->sk_protocol, 1990 s->sk_protocol,
1991 nlk->pid, 1991 nlk->pid,
diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
index 549527bca87a..925f715686a5 100644
--- a/net/packet/af_packet.c
+++ b/net/packet/af_packet.c
@@ -2706,7 +2706,7 @@ static int packet_seq_show(struct seq_file *seq, void *v)
2706 const struct packet_sock *po = pkt_sk(s); 2706 const struct packet_sock *po = pkt_sk(s);
2707 2707
2708 seq_printf(seq, 2708 seq_printf(seq,
2709 "%p %-6d %-4d %04x %-5d %1d %-6u %-6u %-6lu\n", 2709 "%pK %-6d %-4d %04x %-5d %1d %-6u %-6u %-6lu\n",
2710 s, 2710 s,
2711 atomic_read(&s->sk_refcnt), 2711 atomic_read(&s->sk_refcnt),
2712 s->sk_type, 2712 s->sk_type,
diff --git a/net/phonet/socket.c b/net/phonet/socket.c
index 8c5bfcef92cb..ab07711cf2f4 100644
--- a/net/phonet/socket.c
+++ b/net/phonet/socket.c
@@ -607,7 +607,7 @@ static int pn_sock_seq_show(struct seq_file *seq, void *v)
607 struct pn_sock *pn = pn_sk(sk); 607 struct pn_sock *pn = pn_sk(sk);
608 608
609 seq_printf(seq, "%2d %04X:%04X:%02X %02X %08X:%08X %5d %lu " 609 seq_printf(seq, "%2d %04X:%04X:%02X %02X %08X:%08X %5d %lu "
610 "%d %p %d%n", 610 "%d %pK %d%n",
611 sk->sk_protocol, pn->sobject, pn->dobject, 611 sk->sk_protocol, pn->sobject, pn->dobject,
612 pn->resource, sk->sk_state, 612 pn->resource, sk->sk_state,
613 sk_wmem_alloc_get(sk), sk_rmem_alloc_get(sk), 613 sk_wmem_alloc_get(sk), sk_rmem_alloc_get(sk),
diff --git a/net/rds/ib.c b/net/rds/ib.c
index cce19f95c624..3b83086bcc30 100644
--- a/net/rds/ib.c
+++ b/net/rds/ib.c
@@ -325,7 +325,7 @@ static int rds_ib_laddr_check(__be32 addr)
325 /* Create a CMA ID and try to bind it. This catches both 325 /* Create a CMA ID and try to bind it. This catches both
326 * IB and iWARP capable NICs. 326 * IB and iWARP capable NICs.
327 */ 327 */
328 cm_id = rdma_create_id(NULL, NULL, RDMA_PS_TCP); 328 cm_id = rdma_create_id(NULL, NULL, RDMA_PS_TCP, IB_QPT_RC);
329 if (IS_ERR(cm_id)) 329 if (IS_ERR(cm_id))
330 return PTR_ERR(cm_id); 330 return PTR_ERR(cm_id);
331 331
diff --git a/net/rds/ib_cm.c b/net/rds/ib_cm.c
index ee369d201a65..fd453dd5124b 100644
--- a/net/rds/ib_cm.c
+++ b/net/rds/ib_cm.c
@@ -587,7 +587,7 @@ int rds_ib_conn_connect(struct rds_connection *conn)
587 /* XXX I wonder what affect the port space has */ 587 /* XXX I wonder what affect the port space has */
588 /* delegate cm event handler to rdma_transport */ 588 /* delegate cm event handler to rdma_transport */
589 ic->i_cm_id = rdma_create_id(rds_rdma_cm_event_handler, conn, 589 ic->i_cm_id = rdma_create_id(rds_rdma_cm_event_handler, conn,
590 RDMA_PS_TCP); 590 RDMA_PS_TCP, IB_QPT_RC);
591 if (IS_ERR(ic->i_cm_id)) { 591 if (IS_ERR(ic->i_cm_id)) {
592 ret = PTR_ERR(ic->i_cm_id); 592 ret = PTR_ERR(ic->i_cm_id);
593 ic->i_cm_id = NULL; 593 ic->i_cm_id = NULL;
diff --git a/net/rds/iw.c b/net/rds/iw.c
index 5a9676fe594f..f7474844f096 100644
--- a/net/rds/iw.c
+++ b/net/rds/iw.c
@@ -226,7 +226,7 @@ static int rds_iw_laddr_check(__be32 addr)
226 /* Create a CMA ID and try to bind it. This catches both 226 /* Create a CMA ID and try to bind it. This catches both
227 * IB and iWARP capable NICs. 227 * IB and iWARP capable NICs.
228 */ 228 */
229 cm_id = rdma_create_id(NULL, NULL, RDMA_PS_TCP); 229 cm_id = rdma_create_id(NULL, NULL, RDMA_PS_TCP, IB_QPT_RC);
230 if (IS_ERR(cm_id)) 230 if (IS_ERR(cm_id))
231 return PTR_ERR(cm_id); 231 return PTR_ERR(cm_id);
232 232
diff --git a/net/rds/iw_cm.c b/net/rds/iw_cm.c
index 3a60a15d1b4a..c12db66f24c7 100644
--- a/net/rds/iw_cm.c
+++ b/net/rds/iw_cm.c
@@ -522,7 +522,7 @@ int rds_iw_conn_connect(struct rds_connection *conn)
522 /* XXX I wonder what affect the port space has */ 522 /* XXX I wonder what affect the port space has */
523 /* delegate cm event handler to rdma_transport */ 523 /* delegate cm event handler to rdma_transport */
524 ic->i_cm_id = rdma_create_id(rds_rdma_cm_event_handler, conn, 524 ic->i_cm_id = rdma_create_id(rds_rdma_cm_event_handler, conn,
525 RDMA_PS_TCP); 525 RDMA_PS_TCP, IB_QPT_RC);
526 if (IS_ERR(ic->i_cm_id)) { 526 if (IS_ERR(ic->i_cm_id)) {
527 ret = PTR_ERR(ic->i_cm_id); 527 ret = PTR_ERR(ic->i_cm_id);
528 ic->i_cm_id = NULL; 528 ic->i_cm_id = NULL;
diff --git a/net/rds/rdma_transport.c b/net/rds/rdma_transport.c
index 4195a0539829..f8760e1b6688 100644
--- a/net/rds/rdma_transport.c
+++ b/net/rds/rdma_transport.c
@@ -158,7 +158,8 @@ static int rds_rdma_listen_init(void)
158 struct rdma_cm_id *cm_id; 158 struct rdma_cm_id *cm_id;
159 int ret; 159 int ret;
160 160
161 cm_id = rdma_create_id(rds_rdma_cm_event_handler, NULL, RDMA_PS_TCP); 161 cm_id = rdma_create_id(rds_rdma_cm_event_handler, NULL, RDMA_PS_TCP,
162 IB_QPT_RC);
162 if (IS_ERR(cm_id)) { 163 if (IS_ERR(cm_id)) {
163 ret = PTR_ERR(cm_id); 164 ret = PTR_ERR(cm_id);
164 printk(KERN_ERR "RDS/RDMA: failed to setup listener, " 165 printk(KERN_ERR "RDS/RDMA: failed to setup listener, "
diff --git a/net/rfkill/Kconfig b/net/rfkill/Kconfig
index 48464ca13b24..78efe895b663 100644
--- a/net/rfkill/Kconfig
+++ b/net/rfkill/Kconfig
@@ -33,3 +33,12 @@ config RFKILL_REGULATOR
33 33
34 To compile this driver as a module, choose M here: the module will 34 To compile this driver as a module, choose M here: the module will
35 be called rfkill-regulator. 35 be called rfkill-regulator.
36
37config RFKILL_GPIO
38 tristate "GPIO RFKILL driver"
39 depends on RFKILL && GPIOLIB && HAVE_CLK
40 default n
41 help
42 If you say yes here you get support of a generic gpio RFKILL
43 driver. The platform should fill in the appropriate fields in the
44 rfkill_gpio_platform_data structure and pass that to the driver.
diff --git a/net/rfkill/Makefile b/net/rfkill/Makefile
index d9a5a58ffd8c..311768783f4a 100644
--- a/net/rfkill/Makefile
+++ b/net/rfkill/Makefile
@@ -6,3 +6,4 @@ rfkill-y += core.o
6rfkill-$(CONFIG_RFKILL_INPUT) += input.o 6rfkill-$(CONFIG_RFKILL_INPUT) += input.o
7obj-$(CONFIG_RFKILL) += rfkill.o 7obj-$(CONFIG_RFKILL) += rfkill.o
8obj-$(CONFIG_RFKILL_REGULATOR) += rfkill-regulator.o 8obj-$(CONFIG_RFKILL_REGULATOR) += rfkill-regulator.o
9obj-$(CONFIG_RFKILL_GPIO) += rfkill-gpio.o
diff --git a/net/rfkill/rfkill-gpio.c b/net/rfkill/rfkill-gpio.c
new file mode 100644
index 000000000000..256c5ddd2d72
--- /dev/null
+++ b/net/rfkill/rfkill-gpio.c
@@ -0,0 +1,227 @@
1/*
2 * Copyright (c) 2011, NVIDIA Corporation.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along
15 * with this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
17 */
18
19#include <linux/gpio.h>
20#include <linux/init.h>
21#include <linux/kernel.h>
22#include <linux/module.h>
23#include <linux/rfkill.h>
24#include <linux/platform_device.h>
25#include <linux/clk.h>
26#include <linux/slab.h>
27
28#include <linux/rfkill-gpio.h>
29
30enum rfkill_gpio_clk_state {
31 UNSPECIFIED = 0,
32 PWR_ENABLED,
33 PWR_DISABLED
34};
35
36#define PWR_CLK_SET(_RF, _EN) \
37 ((_RF)->pwr_clk_enabled = (!(_EN) ? PWR_ENABLED : PWR_DISABLED))
38#define PWR_CLK_ENABLED(_RF) ((_RF)->pwr_clk_enabled == PWR_ENABLED)
39#define PWR_CLK_DISABLED(_RF) ((_RF)->pwr_clk_enabled != PWR_ENABLED)
40
41struct rfkill_gpio_data {
42 struct rfkill_gpio_platform_data *pdata;
43 struct rfkill *rfkill_dev;
44 char *reset_name;
45 char *shutdown_name;
46 enum rfkill_gpio_clk_state pwr_clk_enabled;
47 struct clk *pwr_clk;
48};
49
50static int rfkill_gpio_set_power(void *data, bool blocked)
51{
52 struct rfkill_gpio_data *rfkill = data;
53
54 if (blocked) {
55 if (gpio_is_valid(rfkill->pdata->shutdown_gpio))
56 gpio_direction_output(rfkill->pdata->shutdown_gpio, 0);
57 if (gpio_is_valid(rfkill->pdata->reset_gpio))
58 gpio_direction_output(rfkill->pdata->reset_gpio, 0);
59 if (rfkill->pwr_clk && PWR_CLK_ENABLED(rfkill))
60 clk_disable(rfkill->pwr_clk);
61 } else {
62 if (rfkill->pwr_clk && PWR_CLK_DISABLED(rfkill))
63 clk_enable(rfkill->pwr_clk);
64 if (gpio_is_valid(rfkill->pdata->reset_gpio))
65 gpio_direction_output(rfkill->pdata->reset_gpio, 1);
66 if (gpio_is_valid(rfkill->pdata->shutdown_gpio))
67 gpio_direction_output(rfkill->pdata->shutdown_gpio, 1);
68 }
69
70 if (rfkill->pwr_clk)
71 PWR_CLK_SET(rfkill, blocked);
72
73 return 0;
74}
75
76static const struct rfkill_ops rfkill_gpio_ops = {
77 .set_block = rfkill_gpio_set_power,
78};
79
80static int rfkill_gpio_probe(struct platform_device *pdev)
81{
82 struct rfkill_gpio_data *rfkill;
83 struct rfkill_gpio_platform_data *pdata = pdev->dev.platform_data;
84 int ret = 0;
85 int len = 0;
86
87 if (!pdata) {
88 pr_warn("%s: No platform data specified\n", __func__);
89 return -EINVAL;
90 }
91
92 /* make sure at-least one of the GPIO is defined and that
93 * a name is specified for this instance */
94 if (!pdata->name || (!gpio_is_valid(pdata->reset_gpio) &&
95 !gpio_is_valid(pdata->shutdown_gpio))) {
96 pr_warn("%s: invalid platform data\n", __func__);
97 return -EINVAL;
98 }
99
100 rfkill = kzalloc(sizeof(*rfkill), GFP_KERNEL);
101 if (!rfkill)
102 return -ENOMEM;
103
104 rfkill->pdata = pdata;
105
106 len = strlen(pdata->name);
107 rfkill->reset_name = kzalloc(len + 7, GFP_KERNEL);
108 if (!rfkill->reset_name) {
109 ret = -ENOMEM;
110 goto fail_alloc;
111 }
112
113 rfkill->shutdown_name = kzalloc(len + 10, GFP_KERNEL);
114 if (!rfkill->shutdown_name) {
115 ret = -ENOMEM;
116 goto fail_reset_name;
117 }
118
119 snprintf(rfkill->reset_name, len + 6 , "%s_reset", pdata->name);
120 snprintf(rfkill->shutdown_name, len + 9, "%s_shutdown", pdata->name);
121
122 if (pdata->power_clk_name) {
123 rfkill->pwr_clk = clk_get(&pdev->dev, pdata->power_clk_name);
124 if (IS_ERR(rfkill->pwr_clk)) {
125 pr_warn("%s: can't find pwr_clk.\n", __func__);
126 goto fail_shutdown_name;
127 }
128 }
129
130 if (gpio_is_valid(pdata->reset_gpio)) {
131 ret = gpio_request(pdata->reset_gpio, rfkill->reset_name);
132 if (ret) {
133 pr_warn("%s: failed to get reset gpio.\n", __func__);
134 goto fail_clock;
135 }
136 }
137
138 if (gpio_is_valid(pdata->shutdown_gpio)) {
139 ret = gpio_request(pdata->shutdown_gpio, rfkill->shutdown_name);
140 if (ret) {
141 pr_warn("%s: failed to get shutdown gpio.\n", __func__);
142 goto fail_reset;
143 }
144 }
145
146 rfkill->rfkill_dev = rfkill_alloc(pdata->name, &pdev->dev, pdata->type,
147 &rfkill_gpio_ops, rfkill);
148 if (!rfkill->rfkill_dev)
149 goto fail_shutdown;
150
151 ret = rfkill_register(rfkill->rfkill_dev);
152 if (ret < 0)
153 goto fail_rfkill;
154
155 platform_set_drvdata(pdev, rfkill);
156
157 dev_info(&pdev->dev, "%s device registered.\n", pdata->name);
158
159 return 0;
160
161fail_rfkill:
162 rfkill_destroy(rfkill->rfkill_dev);
163fail_shutdown:
164 if (gpio_is_valid(pdata->shutdown_gpio))
165 gpio_free(pdata->shutdown_gpio);
166fail_reset:
167 if (gpio_is_valid(pdata->reset_gpio))
168 gpio_free(pdata->reset_gpio);
169fail_clock:
170 if (rfkill->pwr_clk)
171 clk_put(rfkill->pwr_clk);
172fail_shutdown_name:
173 kfree(rfkill->shutdown_name);
174fail_reset_name:
175 kfree(rfkill->reset_name);
176fail_alloc:
177 kfree(rfkill);
178
179 return ret;
180}
181
182static int rfkill_gpio_remove(struct platform_device *pdev)
183{
184 struct rfkill_gpio_data *rfkill = platform_get_drvdata(pdev);
185
186 rfkill_unregister(rfkill->rfkill_dev);
187 rfkill_destroy(rfkill->rfkill_dev);
188 if (gpio_is_valid(rfkill->pdata->shutdown_gpio))
189 gpio_free(rfkill->pdata->shutdown_gpio);
190 if (gpio_is_valid(rfkill->pdata->reset_gpio))
191 gpio_free(rfkill->pdata->reset_gpio);
192 if (rfkill->pwr_clk && PWR_CLK_ENABLED(rfkill))
193 clk_disable(rfkill->pwr_clk);
194 if (rfkill->pwr_clk)
195 clk_put(rfkill->pwr_clk);
196 kfree(rfkill->shutdown_name);
197 kfree(rfkill->reset_name);
198 kfree(rfkill);
199
200 return 0;
201}
202
203static struct platform_driver rfkill_gpio_driver = {
204 .probe = rfkill_gpio_probe,
205 .remove = __devexit_p(rfkill_gpio_remove),
206 .driver = {
207 .name = "rfkill_gpio",
208 .owner = THIS_MODULE,
209 },
210};
211
212static int __init rfkill_gpio_init(void)
213{
214 return platform_driver_register(&rfkill_gpio_driver);
215}
216
217static void __exit rfkill_gpio_exit(void)
218{
219 platform_driver_unregister(&rfkill_gpio_driver);
220}
221
222module_init(rfkill_gpio_init);
223module_exit(rfkill_gpio_exit);
224
225MODULE_DESCRIPTION("gpio rfkill");
226MODULE_AUTHOR("NVIDIA");
227MODULE_LICENSE("GPL");
diff --git a/net/sched/sch_sfq.c b/net/sched/sch_sfq.c
index 7ef87f9eb675..b6ea6afa55b0 100644
--- a/net/sched/sch_sfq.c
+++ b/net/sched/sch_sfq.c
@@ -361,7 +361,7 @@ sfq_enqueue(struct sk_buff *skb, struct Qdisc *sch)
361{ 361{
362 struct sfq_sched_data *q = qdisc_priv(sch); 362 struct sfq_sched_data *q = qdisc_priv(sch);
363 unsigned int hash; 363 unsigned int hash;
364 sfq_index x; 364 sfq_index x, qlen;
365 struct sfq_slot *slot; 365 struct sfq_slot *slot;
366 int uninitialized_var(ret); 366 int uninitialized_var(ret);
367 367
@@ -405,20 +405,12 @@ sfq_enqueue(struct sk_buff *skb, struct Qdisc *sch)
405 if (++sch->q.qlen <= q->limit) 405 if (++sch->q.qlen <= q->limit)
406 return NET_XMIT_SUCCESS; 406 return NET_XMIT_SUCCESS;
407 407
408 qlen = slot->qlen;
408 sfq_drop(sch); 409 sfq_drop(sch);
409 return NET_XMIT_CN; 410 /* Return Congestion Notification only if we dropped a packet
410} 411 * from this flow.
411 412 */
412static struct sk_buff * 413 return (qlen != slot->qlen) ? NET_XMIT_CN : NET_XMIT_SUCCESS;
413sfq_peek(struct Qdisc *sch)
414{
415 struct sfq_sched_data *q = qdisc_priv(sch);
416
417 /* No active slots */
418 if (q->tail == NULL)
419 return NULL;
420
421 return q->slots[q->tail->next].skblist_next;
422} 414}
423 415
424static struct sk_buff * 416static struct sk_buff *
@@ -702,7 +694,7 @@ static struct Qdisc_ops sfq_qdisc_ops __read_mostly = {
702 .priv_size = sizeof(struct sfq_sched_data), 694 .priv_size = sizeof(struct sfq_sched_data),
703 .enqueue = sfq_enqueue, 695 .enqueue = sfq_enqueue,
704 .dequeue = sfq_dequeue, 696 .dequeue = sfq_dequeue,
705 .peek = sfq_peek, 697 .peek = qdisc_peek_dequeued,
706 .drop = sfq_drop, 698 .drop = sfq_drop,
707 .init = sfq_init, 699 .init = sfq_init,
708 .reset = sfq_reset, 700 .reset = sfq_reset,
diff --git a/net/sctp/associola.c b/net/sctp/associola.c
index 1a21c571aa03..525f97c467e9 100644
--- a/net/sctp/associola.c
+++ b/net/sctp/associola.c
@@ -64,6 +64,7 @@
64/* Forward declarations for internal functions. */ 64/* Forward declarations for internal functions. */
65static void sctp_assoc_bh_rcv(struct work_struct *work); 65static void sctp_assoc_bh_rcv(struct work_struct *work);
66static void sctp_assoc_free_asconf_acks(struct sctp_association *asoc); 66static void sctp_assoc_free_asconf_acks(struct sctp_association *asoc);
67static void sctp_assoc_free_asconf_queue(struct sctp_association *asoc);
67 68
68/* Keep track of the new idr low so that we don't re-use association id 69/* Keep track of the new idr low so that we don't re-use association id
69 * numbers too fast. It is protected by they idr spin lock is in the 70 * numbers too fast. It is protected by they idr spin lock is in the
@@ -446,6 +447,9 @@ void sctp_association_free(struct sctp_association *asoc)
446 /* Free any cached ASCONF_ACK chunk. */ 447 /* Free any cached ASCONF_ACK chunk. */
447 sctp_assoc_free_asconf_acks(asoc); 448 sctp_assoc_free_asconf_acks(asoc);
448 449
450 /* Free the ASCONF queue. */
451 sctp_assoc_free_asconf_queue(asoc);
452
449 /* Free any cached ASCONF chunk. */ 453 /* Free any cached ASCONF chunk. */
450 if (asoc->addip_last_asconf) 454 if (asoc->addip_last_asconf)
451 sctp_chunk_free(asoc->addip_last_asconf); 455 sctp_chunk_free(asoc->addip_last_asconf);
@@ -1578,6 +1582,18 @@ retry:
1578 return error; 1582 return error;
1579} 1583}
1580 1584
1585/* Free the ASCONF queue */
1586static void sctp_assoc_free_asconf_queue(struct sctp_association *asoc)
1587{
1588 struct sctp_chunk *asconf;
1589 struct sctp_chunk *tmp;
1590
1591 list_for_each_entry_safe(asconf, tmp, &asoc->addip_chunk_list, list) {
1592 list_del_init(&asconf->list);
1593 sctp_chunk_free(asconf);
1594 }
1595}
1596
1581/* Free asconf_ack cache */ 1597/* Free asconf_ack cache */
1582static void sctp_assoc_free_asconf_acks(struct sctp_association *asoc) 1598static void sctp_assoc_free_asconf_acks(struct sctp_association *asoc)
1583{ 1599{
diff --git a/net/sctp/proc.c b/net/sctp/proc.c
index 61aacfbbaa92..05a6ce214714 100644
--- a/net/sctp/proc.c
+++ b/net/sctp/proc.c
@@ -212,7 +212,7 @@ static int sctp_eps_seq_show(struct seq_file *seq, void *v)
212 sctp_for_each_hentry(epb, node, &head->chain) { 212 sctp_for_each_hentry(epb, node, &head->chain) {
213 ep = sctp_ep(epb); 213 ep = sctp_ep(epb);
214 sk = epb->sk; 214 sk = epb->sk;
215 seq_printf(seq, "%8p %8p %-3d %-3d %-4d %-5d %5d %5lu ", ep, sk, 215 seq_printf(seq, "%8pK %8pK %-3d %-3d %-4d %-5d %5d %5lu ", ep, sk,
216 sctp_sk(sk)->type, sk->sk_state, hash, 216 sctp_sk(sk)->type, sk->sk_state, hash,
217 epb->bind_addr.port, 217 epb->bind_addr.port,
218 sock_i_uid(sk), sock_i_ino(sk)); 218 sock_i_uid(sk), sock_i_ino(sk));
@@ -316,7 +316,7 @@ static int sctp_assocs_seq_show(struct seq_file *seq, void *v)
316 assoc = sctp_assoc(epb); 316 assoc = sctp_assoc(epb);
317 sk = epb->sk; 317 sk = epb->sk;
318 seq_printf(seq, 318 seq_printf(seq,
319 "%8p %8p %-3d %-3d %-2d %-4d " 319 "%8pK %8pK %-3d %-3d %-2d %-4d "
320 "%4d %8d %8d %7d %5lu %-5d %5d ", 320 "%4d %8d %8d %7d %5lu %-5d %5d ",
321 assoc, sk, sctp_sk(sk)->type, sk->sk_state, 321 assoc, sk, sctp_sk(sk)->type, sk->sk_state,
322 assoc->state, hash, 322 assoc->state, hash,
diff --git a/net/sunrpc/auth.c b/net/sunrpc/auth.c
index 67e31276682a..cd6e4aa19dbf 100644
--- a/net/sunrpc/auth.c
+++ b/net/sunrpc/auth.c
@@ -326,10 +326,12 @@ rpcauth_prune_expired(struct list_head *free, int nr_to_scan)
326 * Run memory cache shrinker. 326 * Run memory cache shrinker.
327 */ 327 */
328static int 328static int
329rpcauth_cache_shrinker(struct shrinker *shrink, int nr_to_scan, gfp_t gfp_mask) 329rpcauth_cache_shrinker(struct shrinker *shrink, struct shrink_control *sc)
330{ 330{
331 LIST_HEAD(free); 331 LIST_HEAD(free);
332 int res; 332 int res;
333 int nr_to_scan = sc->nr_to_scan;
334 gfp_t gfp_mask = sc->gfp_mask;
333 335
334 if ((gfp_mask & GFP_KERNEL) != GFP_KERNEL) 336 if ((gfp_mask & GFP_KERNEL) != GFP_KERNEL)
335 return (nr_to_scan == 0) ? 0 : -1; 337 return (nr_to_scan == 0) ? 0 : -1;
diff --git a/net/sunrpc/xprtrdma/svc_rdma_transport.c b/net/sunrpc/xprtrdma/svc_rdma_transport.c
index 6c014dd3a20b..c3c232a88d94 100644
--- a/net/sunrpc/xprtrdma/svc_rdma_transport.c
+++ b/net/sunrpc/xprtrdma/svc_rdma_transport.c
@@ -695,7 +695,8 @@ static struct svc_xprt *svc_rdma_create(struct svc_serv *serv,
695 return ERR_PTR(-ENOMEM); 695 return ERR_PTR(-ENOMEM);
696 xprt = &cma_xprt->sc_xprt; 696 xprt = &cma_xprt->sc_xprt;
697 697
698 listen_id = rdma_create_id(rdma_listen_handler, cma_xprt, RDMA_PS_TCP); 698 listen_id = rdma_create_id(rdma_listen_handler, cma_xprt, RDMA_PS_TCP,
699 IB_QPT_RC);
699 if (IS_ERR(listen_id)) { 700 if (IS_ERR(listen_id)) {
700 ret = PTR_ERR(listen_id); 701 ret = PTR_ERR(listen_id);
701 dprintk("svcrdma: rdma_create_id failed = %d\n", ret); 702 dprintk("svcrdma: rdma_create_id failed = %d\n", ret);
diff --git a/net/sunrpc/xprtrdma/verbs.c b/net/sunrpc/xprtrdma/verbs.c
index d4297dc43dc4..80f8da344df5 100644
--- a/net/sunrpc/xprtrdma/verbs.c
+++ b/net/sunrpc/xprtrdma/verbs.c
@@ -387,7 +387,7 @@ rpcrdma_create_id(struct rpcrdma_xprt *xprt,
387 387
388 init_completion(&ia->ri_done); 388 init_completion(&ia->ri_done);
389 389
390 id = rdma_create_id(rpcrdma_conn_upcall, xprt, RDMA_PS_TCP); 390 id = rdma_create_id(rpcrdma_conn_upcall, xprt, RDMA_PS_TCP, IB_QPT_RC);
391 if (IS_ERR(id)) { 391 if (IS_ERR(id)) {
392 rc = PTR_ERR(id); 392 rc = PTR_ERR(id);
393 dprintk("RPC: %s: rdma_create_id() failed %i\n", 393 dprintk("RPC: %s: rdma_create_id() failed %i\n",
diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c
index b1d75beb7e20..0722a25a3a33 100644
--- a/net/unix/af_unix.c
+++ b/net/unix/af_unix.c
@@ -2254,7 +2254,7 @@ static int unix_seq_show(struct seq_file *seq, void *v)
2254 struct unix_sock *u = unix_sk(s); 2254 struct unix_sock *u = unix_sk(s);
2255 unix_state_lock(s); 2255 unix_state_lock(s);
2256 2256
2257 seq_printf(seq, "%p: %08X %08X %08X %04X %02X %5lu", 2257 seq_printf(seq, "%pK: %08X %08X %08X %04X %02X %5lu",
2258 s, 2258 s,
2259 atomic_read(&s->sk_refcnt), 2259 atomic_read(&s->sk_refcnt),
2260 0, 2260 0,
diff --git a/net/wireless/core.h b/net/wireless/core.h
index bf0fb40e3c8b..3dce1f167eba 100644
--- a/net/wireless/core.h
+++ b/net/wireless/core.h
@@ -245,6 +245,7 @@ struct cfg80211_event {
245 u16 status; 245 u16 status;
246 } cr; 246 } cr;
247 struct { 247 struct {
248 struct ieee80211_channel *channel;
248 u8 bssid[ETH_ALEN]; 249 u8 bssid[ETH_ALEN];
249 const u8 *req_ie; 250 const u8 *req_ie;
250 const u8 *resp_ie; 251 const u8 *resp_ie;
@@ -392,7 +393,9 @@ int __cfg80211_disconnect(struct cfg80211_registered_device *rdev,
392int cfg80211_disconnect(struct cfg80211_registered_device *rdev, 393int cfg80211_disconnect(struct cfg80211_registered_device *rdev,
393 struct net_device *dev, u16 reason, 394 struct net_device *dev, u16 reason,
394 bool wextev); 395 bool wextev);
395void __cfg80211_roamed(struct wireless_dev *wdev, const u8 *bssid, 396void __cfg80211_roamed(struct wireless_dev *wdev,
397 struct ieee80211_channel *channel,
398 const u8 *bssid,
396 const u8 *req_ie, size_t req_ie_len, 399 const u8 *req_ie, size_t req_ie_len,
397 const u8 *resp_ie, size_t resp_ie_len); 400 const u8 *resp_ie, size_t resp_ie_len);
398int cfg80211_mgd_wext_connect(struct cfg80211_registered_device *rdev, 401int cfg80211_mgd_wext_connect(struct cfg80211_registered_device *rdev,
diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c
index 2222ce08ee91..ec83f413a7ed 100644
--- a/net/wireless/nl80211.c
+++ b/net/wireless/nl80211.c
@@ -3294,8 +3294,6 @@ static int nl80211_trigger_scan(struct sk_buff *skb, struct genl_info *info)
3294 struct cfg80211_registered_device *rdev = info->user_ptr[0]; 3294 struct cfg80211_registered_device *rdev = info->user_ptr[0];
3295 struct net_device *dev = info->user_ptr[1]; 3295 struct net_device *dev = info->user_ptr[1];
3296 struct cfg80211_scan_request *request; 3296 struct cfg80211_scan_request *request;
3297 struct cfg80211_ssid *ssid;
3298 struct ieee80211_channel *channel;
3299 struct nlattr *attr; 3297 struct nlattr *attr;
3300 struct wiphy *wiphy; 3298 struct wiphy *wiphy;
3301 int err, tmp, n_ssids = 0, n_channels, i; 3299 int err, tmp, n_ssids = 0, n_channels, i;
@@ -3342,8 +3340,8 @@ static int nl80211_trigger_scan(struct sk_buff *skb, struct genl_info *info)
3342 return -EINVAL; 3340 return -EINVAL;
3343 3341
3344 request = kzalloc(sizeof(*request) 3342 request = kzalloc(sizeof(*request)
3345 + sizeof(*ssid) * n_ssids 3343 + sizeof(*request->ssids) * n_ssids
3346 + sizeof(channel) * n_channels 3344 + sizeof(*request->channels) * n_channels
3347 + ie_len, GFP_KERNEL); 3345 + ie_len, GFP_KERNEL);
3348 if (!request) 3346 if (!request)
3349 return -ENOMEM; 3347 return -ENOMEM;
@@ -3449,8 +3447,6 @@ static int nl80211_start_sched_scan(struct sk_buff *skb,
3449 struct cfg80211_sched_scan_request *request; 3447 struct cfg80211_sched_scan_request *request;
3450 struct cfg80211_registered_device *rdev = info->user_ptr[0]; 3448 struct cfg80211_registered_device *rdev = info->user_ptr[0];
3451 struct net_device *dev = info->user_ptr[1]; 3449 struct net_device *dev = info->user_ptr[1];
3452 struct cfg80211_ssid *ssid;
3453 struct ieee80211_channel *channel;
3454 struct nlattr *attr; 3450 struct nlattr *attr;
3455 struct wiphy *wiphy; 3451 struct wiphy *wiphy;
3456 int err, tmp, n_ssids = 0, n_channels, i; 3452 int err, tmp, n_ssids = 0, n_channels, i;
@@ -3507,8 +3503,8 @@ static int nl80211_start_sched_scan(struct sk_buff *skb,
3507 return -EINVAL; 3503 return -EINVAL;
3508 3504
3509 request = kzalloc(sizeof(*request) 3505 request = kzalloc(sizeof(*request)
3510 + sizeof(*ssid) * n_ssids 3506 + sizeof(*request->ssids) * n_ssids
3511 + sizeof(channel) * n_channels 3507 + sizeof(*request->channels) * n_channels
3512 + ie_len, GFP_KERNEL); 3508 + ie_len, GFP_KERNEL);
3513 if (!request) 3509 if (!request)
3514 return -ENOMEM; 3510 return -ENOMEM;
diff --git a/net/wireless/sme.c b/net/wireless/sme.c
index e17b0bee6bdc..b7b6ff8be553 100644
--- a/net/wireless/sme.c
+++ b/net/wireless/sme.c
@@ -250,7 +250,8 @@ static struct cfg80211_bss *cfg80211_get_conn_bss(struct wireless_dev *wdev)
250 if (wdev->conn->params.privacy) 250 if (wdev->conn->params.privacy)
251 capa |= WLAN_CAPABILITY_PRIVACY; 251 capa |= WLAN_CAPABILITY_PRIVACY;
252 252
253 bss = cfg80211_get_bss(wdev->wiphy, NULL, wdev->conn->params.bssid, 253 bss = cfg80211_get_bss(wdev->wiphy, wdev->conn->params.channel,
254 wdev->conn->params.bssid,
254 wdev->conn->params.ssid, 255 wdev->conn->params.ssid,
255 wdev->conn->params.ssid_len, 256 wdev->conn->params.ssid_len,
256 WLAN_CAPABILITY_ESS | WLAN_CAPABILITY_PRIVACY, 257 WLAN_CAPABILITY_ESS | WLAN_CAPABILITY_PRIVACY,
@@ -470,7 +471,10 @@ void __cfg80211_connect_result(struct net_device *dev, const u8 *bssid,
470 } 471 }
471 472
472 if (!bss) 473 if (!bss)
473 bss = cfg80211_get_bss(wdev->wiphy, NULL, bssid, 474 bss = cfg80211_get_bss(wdev->wiphy,
475 wdev->conn ? wdev->conn->params.channel :
476 NULL,
477 bssid,
474 wdev->ssid, wdev->ssid_len, 478 wdev->ssid, wdev->ssid_len,
475 WLAN_CAPABILITY_ESS, 479 WLAN_CAPABILITY_ESS,
476 WLAN_CAPABILITY_ESS); 480 WLAN_CAPABILITY_ESS);
@@ -538,7 +542,9 @@ void cfg80211_connect_result(struct net_device *dev, const u8 *bssid,
538} 542}
539EXPORT_SYMBOL(cfg80211_connect_result); 543EXPORT_SYMBOL(cfg80211_connect_result);
540 544
541void __cfg80211_roamed(struct wireless_dev *wdev, const u8 *bssid, 545void __cfg80211_roamed(struct wireless_dev *wdev,
546 struct ieee80211_channel *channel,
547 const u8 *bssid,
542 const u8 *req_ie, size_t req_ie_len, 548 const u8 *req_ie, size_t req_ie_len,
543 const u8 *resp_ie, size_t resp_ie_len) 549 const u8 *resp_ie, size_t resp_ie_len)
544{ 550{
@@ -565,7 +571,7 @@ void __cfg80211_roamed(struct wireless_dev *wdev, const u8 *bssid,
565 cfg80211_put_bss(&wdev->current_bss->pub); 571 cfg80211_put_bss(&wdev->current_bss->pub);
566 wdev->current_bss = NULL; 572 wdev->current_bss = NULL;
567 573
568 bss = cfg80211_get_bss(wdev->wiphy, NULL, bssid, 574 bss = cfg80211_get_bss(wdev->wiphy, channel, bssid,
569 wdev->ssid, wdev->ssid_len, 575 wdev->ssid, wdev->ssid_len,
570 WLAN_CAPABILITY_ESS, WLAN_CAPABILITY_ESS); 576 WLAN_CAPABILITY_ESS, WLAN_CAPABILITY_ESS);
571 577
@@ -603,7 +609,9 @@ void __cfg80211_roamed(struct wireless_dev *wdev, const u8 *bssid,
603#endif 609#endif
604} 610}
605 611
606void cfg80211_roamed(struct net_device *dev, const u8 *bssid, 612void cfg80211_roamed(struct net_device *dev,
613 struct ieee80211_channel *channel,
614 const u8 *bssid,
607 const u8 *req_ie, size_t req_ie_len, 615 const u8 *req_ie, size_t req_ie_len,
608 const u8 *resp_ie, size_t resp_ie_len, gfp_t gfp) 616 const u8 *resp_ie, size_t resp_ie_len, gfp_t gfp)
609{ 617{
@@ -619,6 +627,7 @@ void cfg80211_roamed(struct net_device *dev, const u8 *bssid,
619 return; 627 return;
620 628
621 ev->type = EVENT_ROAMED; 629 ev->type = EVENT_ROAMED;
630 ev->rm.channel = channel;
622 memcpy(ev->rm.bssid, bssid, ETH_ALEN); 631 memcpy(ev->rm.bssid, bssid, ETH_ALEN);
623 ev->rm.req_ie = ((u8 *)ev) + sizeof(*ev); 632 ev->rm.req_ie = ((u8 *)ev) + sizeof(*ev);
624 ev->rm.req_ie_len = req_ie_len; 633 ev->rm.req_ie_len = req_ie_len;
diff --git a/net/wireless/util.c b/net/wireless/util.c
index f0536d44d43c..4d7b83fbc32f 100644
--- a/net/wireless/util.c
+++ b/net/wireless/util.c
@@ -746,7 +746,7 @@ static void cfg80211_process_wdev_events(struct wireless_dev *wdev)
746 NULL); 746 NULL);
747 break; 747 break;
748 case EVENT_ROAMED: 748 case EVENT_ROAMED:
749 __cfg80211_roamed(wdev, ev->rm.bssid, 749 __cfg80211_roamed(wdev, ev->rm.channel, ev->rm.bssid,
750 ev->rm.req_ie, ev->rm.req_ie_len, 750 ev->rm.req_ie, ev->rm.req_ie_len,
751 ev->rm.resp_ie, ev->rm.resp_ie_len); 751 ev->rm.resp_ie, ev->rm.resp_ie_len);
752 break; 752 break;