aboutsummaryrefslogtreecommitdiffstats
path: root/net
diff options
context:
space:
mode:
authorGreg Kroah-Hartman <gregkh@linuxfoundation.org>2012-06-26 19:04:29 -0400
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>2012-06-26 19:04:29 -0400
commitfc915c8b930c3114f2a838f7e2cd8789ad6fedc3 (patch)
tree795197f8ec2bc5ba42ee975ecd4d5a16c64e063c /net
parentfe89def79c48e2149abdd1e816523e69a9067191 (diff)
parent6b16351acbd415e66ba16bf7d473ece1574cf0bc (diff)
Merge 3.5-rc4 into tty-next
This is to pick up the serial port and tty changes in Linus's tree to allow everyone to sync up. Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Diffstat (limited to 'net')
-rw-r--r--net/9p/trans_virtio.c2
-rw-r--r--net/appletalk/ddp.c4
-rw-r--r--net/bluetooth/af_bluetooth.c2
-rw-r--r--net/ceph/ceph_common.c7
-rw-r--r--net/ceph/messenger.c4
-rw-r--r--net/ceph/mon_client.c8
-rw-r--r--net/ceph/osd_client.c12
-rw-r--r--net/core/drop_monitor.c102
-rw-r--r--net/core/filter.c4
-rw-r--r--net/core/neighbour.c14
-rw-r--r--net/core/netpoll.c11
-rw-r--r--net/core/skbuff.c2
-rw-r--r--net/ipv4/inetpeer.c16
-rw-r--r--net/ipv4/ip_forward.c1
-rw-r--r--net/ipv4/ipmr.c1
-rw-r--r--net/ipv6/ip6_fib.c2
-rw-r--r--net/ipv6/ip6_output.c1
-rw-r--r--net/ipv6/ip6mr.c2
-rw-r--r--net/l2tp/l2tp_eth.c2
-rw-r--r--net/l2tp/l2tp_ip.c9
-rw-r--r--net/mac80211/agg-rx.c7
-rw-r--r--net/mac80211/cfg.c6
-rw-r--r--net/mac80211/iface.c12
-rw-r--r--net/mac80211/mlme.c38
-rw-r--r--net/mac80211/offchannel.c16
-rw-r--r--net/mac80211/sta_info.c4
-rw-r--r--net/mac80211/tx.c9
-rw-r--r--net/mac80211/util.c2
-rw-r--r--net/netfilter/nf_conntrack_h323_main.c5
-rw-r--r--net/netfilter/xt_HMARK.c72
-rw-r--r--net/nfc/llcp/sock.c3
-rw-r--r--net/sunrpc/rpc_pipe.c12
-rw-r--r--net/sunrpc/svc.c3
-rw-r--r--net/wireless/ibss.c6
-rw-r--r--net/wireless/util.c19
35 files changed, 246 insertions, 174 deletions
diff --git a/net/9p/trans_virtio.c b/net/9p/trans_virtio.c
index 5af18d11b518..2a167658bb95 100644
--- a/net/9p/trans_virtio.c
+++ b/net/9p/trans_virtio.c
@@ -192,10 +192,10 @@ static int pack_sg_list(struct scatterlist *sg, int start,
192 s = rest_of_page(data); 192 s = rest_of_page(data);
193 if (s > count) 193 if (s > count)
194 s = count; 194 s = count;
195 BUG_ON(index > limit);
195 sg_set_buf(&sg[index++], data, s); 196 sg_set_buf(&sg[index++], data, s);
196 count -= s; 197 count -= s;
197 data += s; 198 data += s;
198 BUG_ON(index > limit);
199 } 199 }
200 200
201 return index-start; 201 return index-start;
diff --git a/net/appletalk/ddp.c b/net/appletalk/ddp.c
index 0301b328cf0f..86852963b7f7 100644
--- a/net/appletalk/ddp.c
+++ b/net/appletalk/ddp.c
@@ -1208,9 +1208,7 @@ static int atalk_connect(struct socket *sock, struct sockaddr *uaddr,
1208 if (addr->sat_addr.s_node == ATADDR_BCAST && 1208 if (addr->sat_addr.s_node == ATADDR_BCAST &&
1209 !sock_flag(sk, SOCK_BROADCAST)) { 1209 !sock_flag(sk, SOCK_BROADCAST)) {
1210#if 1 1210#if 1
1211 printk(KERN_WARNING "%s is broken and did not set " 1211 pr_warn("atalk_connect: %s is broken and did not set SO_BROADCAST.\n",
1212 "SO_BROADCAST. It will break when 2.2 is "
1213 "released.\n",
1214 current->comm); 1212 current->comm);
1215#else 1213#else
1216 return -EACCES; 1214 return -EACCES;
diff --git a/net/bluetooth/af_bluetooth.c b/net/bluetooth/af_bluetooth.c
index 46e7f86acfc9..3e18af4dadc4 100644
--- a/net/bluetooth/af_bluetooth.c
+++ b/net/bluetooth/af_bluetooth.c
@@ -210,7 +210,7 @@ struct sock *bt_accept_dequeue(struct sock *parent, struct socket *newsock)
210 } 210 }
211 211
212 if (sk->sk_state == BT_CONNECTED || !newsock || 212 if (sk->sk_state == BT_CONNECTED || !newsock ||
213 test_bit(BT_DEFER_SETUP, &bt_sk(parent)->flags)) { 213 test_bit(BT_SK_DEFER_SETUP, &bt_sk(parent)->flags)) {
214 bt_accept_unlink(sk); 214 bt_accept_unlink(sk);
215 if (newsock) 215 if (newsock)
216 sock_graft(sk, newsock); 216 sock_graft(sk, newsock);
diff --git a/net/ceph/ceph_common.c b/net/ceph/ceph_common.c
index a776f751edbf..ba4323bce0e9 100644
--- a/net/ceph/ceph_common.c
+++ b/net/ceph/ceph_common.c
@@ -504,13 +504,6 @@ void ceph_destroy_client(struct ceph_client *client)
504 /* unmount */ 504 /* unmount */
505 ceph_osdc_stop(&client->osdc); 505 ceph_osdc_stop(&client->osdc);
506 506
507 /*
508 * make sure osd connections close out before destroying the
509 * auth module, which is needed to free those connections'
510 * ceph_authorizers.
511 */
512 ceph_msgr_flush();
513
514 ceph_monc_stop(&client->monc); 507 ceph_monc_stop(&client->monc);
515 508
516 ceph_debugfs_client_cleanup(client); 509 ceph_debugfs_client_cleanup(client);
diff --git a/net/ceph/messenger.c b/net/ceph/messenger.c
index 524f4e4f598b..b332c3d76059 100644
--- a/net/ceph/messenger.c
+++ b/net/ceph/messenger.c
@@ -563,6 +563,10 @@ static void prepare_write_message(struct ceph_connection *con)
563 m->hdr.seq = cpu_to_le64(++con->out_seq); 563 m->hdr.seq = cpu_to_le64(++con->out_seq);
564 m->needs_out_seq = false; 564 m->needs_out_seq = false;
565 } 565 }
566#ifdef CONFIG_BLOCK
567 else
568 m->bio_iter = NULL;
569#endif
566 570
567 dout("prepare_write_message %p seq %lld type %d len %d+%d+%d %d pgs\n", 571 dout("prepare_write_message %p seq %lld type %d len %d+%d+%d %d pgs\n",
568 m, con->out_seq, le16_to_cpu(m->hdr.type), 572 m, con->out_seq, le16_to_cpu(m->hdr.type),
diff --git a/net/ceph/mon_client.c b/net/ceph/mon_client.c
index 10d6008d31f2..d0649a9655be 100644
--- a/net/ceph/mon_client.c
+++ b/net/ceph/mon_client.c
@@ -847,6 +847,14 @@ void ceph_monc_stop(struct ceph_mon_client *monc)
847 847
848 mutex_unlock(&monc->mutex); 848 mutex_unlock(&monc->mutex);
849 849
850 /*
851 * flush msgr queue before we destroy ourselves to ensure that:
852 * - any work that references our embedded con is finished.
853 * - any osd_client or other work that may reference an authorizer
854 * finishes before we shut down the auth subsystem.
855 */
856 ceph_msgr_flush();
857
850 ceph_auth_destroy(monc->auth); 858 ceph_auth_destroy(monc->auth);
851 859
852 ceph_msg_put(monc->m_auth); 860 ceph_msg_put(monc->m_auth);
diff --git a/net/ceph/osd_client.c b/net/ceph/osd_client.c
index 1ffebed5ce0f..ca59e66c9787 100644
--- a/net/ceph/osd_client.c
+++ b/net/ceph/osd_client.c
@@ -139,15 +139,15 @@ void ceph_osdc_release_request(struct kref *kref)
139 139
140 if (req->r_request) 140 if (req->r_request)
141 ceph_msg_put(req->r_request); 141 ceph_msg_put(req->r_request);
142 if (req->r_reply)
143 ceph_msg_put(req->r_reply);
144 if (req->r_con_filling_msg) { 142 if (req->r_con_filling_msg) {
145 dout("release_request revoking pages %p from con %p\n", 143 dout("release_request revoking pages %p from con %p\n",
146 req->r_pages, req->r_con_filling_msg); 144 req->r_pages, req->r_con_filling_msg);
147 ceph_con_revoke_message(req->r_con_filling_msg, 145 ceph_con_revoke_message(req->r_con_filling_msg,
148 req->r_reply); 146 req->r_reply);
149 ceph_con_put(req->r_con_filling_msg); 147 req->r_con_filling_msg->ops->put(req->r_con_filling_msg);
150 } 148 }
149 if (req->r_reply)
150 ceph_msg_put(req->r_reply);
151 if (req->r_own_pages) 151 if (req->r_own_pages)
152 ceph_release_page_vector(req->r_pages, 152 ceph_release_page_vector(req->r_pages,
153 req->r_num_pages); 153 req->r_num_pages);
@@ -1216,7 +1216,7 @@ static void handle_reply(struct ceph_osd_client *osdc, struct ceph_msg *msg,
1216 if (req->r_con_filling_msg == con && req->r_reply == msg) { 1216 if (req->r_con_filling_msg == con && req->r_reply == msg) {
1217 dout(" dropping con_filling_msg ref %p\n", con); 1217 dout(" dropping con_filling_msg ref %p\n", con);
1218 req->r_con_filling_msg = NULL; 1218 req->r_con_filling_msg = NULL;
1219 ceph_con_put(con); 1219 con->ops->put(con);
1220 } 1220 }
1221 1221
1222 if (!req->r_got_reply) { 1222 if (!req->r_got_reply) {
@@ -2028,7 +2028,7 @@ static struct ceph_msg *get_reply(struct ceph_connection *con,
2028 dout("get_reply revoking msg %p from old con %p\n", 2028 dout("get_reply revoking msg %p from old con %p\n",
2029 req->r_reply, req->r_con_filling_msg); 2029 req->r_reply, req->r_con_filling_msg);
2030 ceph_con_revoke_message(req->r_con_filling_msg, req->r_reply); 2030 ceph_con_revoke_message(req->r_con_filling_msg, req->r_reply);
2031 ceph_con_put(req->r_con_filling_msg); 2031 req->r_con_filling_msg->ops->put(req->r_con_filling_msg);
2032 req->r_con_filling_msg = NULL; 2032 req->r_con_filling_msg = NULL;
2033 } 2033 }
2034 2034
@@ -2063,7 +2063,7 @@ static struct ceph_msg *get_reply(struct ceph_connection *con,
2063#endif 2063#endif
2064 } 2064 }
2065 *skip = 0; 2065 *skip = 0;
2066 req->r_con_filling_msg = ceph_con_get(con); 2066 req->r_con_filling_msg = con->ops->get(con);
2067 dout("get_reply tid %lld %p\n", tid, m); 2067 dout("get_reply tid %lld %p\n", tid, m);
2068 2068
2069out: 2069out:
diff --git a/net/core/drop_monitor.c b/net/core/drop_monitor.c
index ea5fb9fcc3f5..d23b6682f4e9 100644
--- a/net/core/drop_monitor.c
+++ b/net/core/drop_monitor.c
@@ -36,9 +36,6 @@
36#define TRACE_ON 1 36#define TRACE_ON 1
37#define TRACE_OFF 0 37#define TRACE_OFF 0
38 38
39static void send_dm_alert(struct work_struct *unused);
40
41
42/* 39/*
43 * Globals, our netlink socket pointer 40 * Globals, our netlink socket pointer
44 * and the work handle that will send up 41 * and the work handle that will send up
@@ -48,11 +45,10 @@ static int trace_state = TRACE_OFF;
48static DEFINE_MUTEX(trace_state_mutex); 45static DEFINE_MUTEX(trace_state_mutex);
49 46
50struct per_cpu_dm_data { 47struct per_cpu_dm_data {
51 struct work_struct dm_alert_work; 48 spinlock_t lock;
52 struct sk_buff __rcu *skb; 49 struct sk_buff *skb;
53 atomic_t dm_hit_count; 50 struct work_struct dm_alert_work;
54 struct timer_list send_timer; 51 struct timer_list send_timer;
55 int cpu;
56}; 52};
57 53
58struct dm_hw_stat_delta { 54struct dm_hw_stat_delta {
@@ -78,13 +74,13 @@ static int dm_delay = 1;
78static unsigned long dm_hw_check_delta = 2*HZ; 74static unsigned long dm_hw_check_delta = 2*HZ;
79static LIST_HEAD(hw_stats_list); 75static LIST_HEAD(hw_stats_list);
80 76
81static void reset_per_cpu_data(struct per_cpu_dm_data *data) 77static struct sk_buff *reset_per_cpu_data(struct per_cpu_dm_data *data)
82{ 78{
83 size_t al; 79 size_t al;
84 struct net_dm_alert_msg *msg; 80 struct net_dm_alert_msg *msg;
85 struct nlattr *nla; 81 struct nlattr *nla;
86 struct sk_buff *skb; 82 struct sk_buff *skb;
87 struct sk_buff *oskb = rcu_dereference_protected(data->skb, 1); 83 unsigned long flags;
88 84
89 al = sizeof(struct net_dm_alert_msg); 85 al = sizeof(struct net_dm_alert_msg);
90 al += dm_hit_limit * sizeof(struct net_dm_drop_point); 86 al += dm_hit_limit * sizeof(struct net_dm_drop_point);
@@ -99,65 +95,40 @@ static void reset_per_cpu_data(struct per_cpu_dm_data *data)
99 sizeof(struct net_dm_alert_msg)); 95 sizeof(struct net_dm_alert_msg));
100 msg = nla_data(nla); 96 msg = nla_data(nla);
101 memset(msg, 0, al); 97 memset(msg, 0, al);
102 } else 98 } else {
103 schedule_work_on(data->cpu, &data->dm_alert_work); 99 mod_timer(&data->send_timer, jiffies + HZ / 10);
104
105 /*
106 * Don't need to lock this, since we are guaranteed to only
107 * run this on a single cpu at a time.
108 * Note also that we only update data->skb if the old and new skb
109 * pointers don't match. This ensures that we don't continually call
110 * synchornize_rcu if we repeatedly fail to alloc a new netlink message.
111 */
112 if (skb != oskb) {
113 rcu_assign_pointer(data->skb, skb);
114
115 synchronize_rcu();
116
117 atomic_set(&data->dm_hit_count, dm_hit_limit);
118 } 100 }
119 101
102 spin_lock_irqsave(&data->lock, flags);
103 swap(data->skb, skb);
104 spin_unlock_irqrestore(&data->lock, flags);
105
106 return skb;
120} 107}
121 108
122static void send_dm_alert(struct work_struct *unused) 109static void send_dm_alert(struct work_struct *work)
123{ 110{
124 struct sk_buff *skb; 111 struct sk_buff *skb;
125 struct per_cpu_dm_data *data = &get_cpu_var(dm_cpu_data); 112 struct per_cpu_dm_data *data;
126 113
127 WARN_ON_ONCE(data->cpu != smp_processor_id()); 114 data = container_of(work, struct per_cpu_dm_data, dm_alert_work);
128 115
129 /* 116 skb = reset_per_cpu_data(data);
130 * Grab the skb we're about to send
131 */
132 skb = rcu_dereference_protected(data->skb, 1);
133
134 /*
135 * Replace it with a new one
136 */
137 reset_per_cpu_data(data);
138 117
139 /*
140 * Ship it!
141 */
142 if (skb) 118 if (skb)
143 genlmsg_multicast(skb, 0, NET_DM_GRP_ALERT, GFP_KERNEL); 119 genlmsg_multicast(skb, 0, NET_DM_GRP_ALERT, GFP_KERNEL);
144
145 put_cpu_var(dm_cpu_data);
146} 120}
147 121
148/* 122/*
149 * This is the timer function to delay the sending of an alert 123 * This is the timer function to delay the sending of an alert
150 * in the event that more drops will arrive during the 124 * in the event that more drops will arrive during the
151 * hysteresis period. Note that it operates under the timer interrupt 125 * hysteresis period.
152 * so we don't need to disable preemption here
153 */ 126 */
154static void sched_send_work(unsigned long unused) 127static void sched_send_work(unsigned long _data)
155{ 128{
156 struct per_cpu_dm_data *data = &get_cpu_var(dm_cpu_data); 129 struct per_cpu_dm_data *data = (struct per_cpu_dm_data *)_data;
157
158 schedule_work_on(smp_processor_id(), &data->dm_alert_work);
159 130
160 put_cpu_var(dm_cpu_data); 131 schedule_work(&data->dm_alert_work);
161} 132}
162 133
163static void trace_drop_common(struct sk_buff *skb, void *location) 134static void trace_drop_common(struct sk_buff *skb, void *location)
@@ -167,33 +138,28 @@ static void trace_drop_common(struct sk_buff *skb, void *location)
167 struct nlattr *nla; 138 struct nlattr *nla;
168 int i; 139 int i;
169 struct sk_buff *dskb; 140 struct sk_buff *dskb;
170 struct per_cpu_dm_data *data = &get_cpu_var(dm_cpu_data); 141 struct per_cpu_dm_data *data;
171 142 unsigned long flags;
172 143
173 rcu_read_lock(); 144 local_irq_save(flags);
174 dskb = rcu_dereference(data->skb); 145 data = &__get_cpu_var(dm_cpu_data);
146 spin_lock(&data->lock);
147 dskb = data->skb;
175 148
176 if (!dskb) 149 if (!dskb)
177 goto out; 150 goto out;
178 151
179 if (!atomic_add_unless(&data->dm_hit_count, -1, 0)) {
180 /*
181 * we're already at zero, discard this hit
182 */
183 goto out;
184 }
185
186 nlh = (struct nlmsghdr *)dskb->data; 152 nlh = (struct nlmsghdr *)dskb->data;
187 nla = genlmsg_data(nlmsg_data(nlh)); 153 nla = genlmsg_data(nlmsg_data(nlh));
188 msg = nla_data(nla); 154 msg = nla_data(nla);
189 for (i = 0; i < msg->entries; i++) { 155 for (i = 0; i < msg->entries; i++) {
190 if (!memcmp(&location, msg->points[i].pc, sizeof(void *))) { 156 if (!memcmp(&location, msg->points[i].pc, sizeof(void *))) {
191 msg->points[i].count++; 157 msg->points[i].count++;
192 atomic_inc(&data->dm_hit_count);
193 goto out; 158 goto out;
194 } 159 }
195 } 160 }
196 161 if (msg->entries == dm_hit_limit)
162 goto out;
197 /* 163 /*
198 * We need to create a new entry 164 * We need to create a new entry
199 */ 165 */
@@ -205,13 +171,11 @@ static void trace_drop_common(struct sk_buff *skb, void *location)
205 171
206 if (!timer_pending(&data->send_timer)) { 172 if (!timer_pending(&data->send_timer)) {
207 data->send_timer.expires = jiffies + dm_delay * HZ; 173 data->send_timer.expires = jiffies + dm_delay * HZ;
208 add_timer_on(&data->send_timer, smp_processor_id()); 174 add_timer(&data->send_timer);
209 } 175 }
210 176
211out: 177out:
212 rcu_read_unlock(); 178 spin_unlock_irqrestore(&data->lock, flags);
213 put_cpu_var(dm_cpu_data);
214 return;
215} 179}
216 180
217static void trace_kfree_skb_hit(void *ignore, struct sk_buff *skb, void *location) 181static void trace_kfree_skb_hit(void *ignore, struct sk_buff *skb, void *location)
@@ -418,11 +382,11 @@ static int __init init_net_drop_monitor(void)
418 382
419 for_each_possible_cpu(cpu) { 383 for_each_possible_cpu(cpu) {
420 data = &per_cpu(dm_cpu_data, cpu); 384 data = &per_cpu(dm_cpu_data, cpu);
421 data->cpu = cpu;
422 INIT_WORK(&data->dm_alert_work, send_dm_alert); 385 INIT_WORK(&data->dm_alert_work, send_dm_alert);
423 init_timer(&data->send_timer); 386 init_timer(&data->send_timer);
424 data->send_timer.data = cpu; 387 data->send_timer.data = (unsigned long)data;
425 data->send_timer.function = sched_send_work; 388 data->send_timer.function = sched_send_work;
389 spin_lock_init(&data->lock);
426 reset_per_cpu_data(data); 390 reset_per_cpu_data(data);
427 } 391 }
428 392
diff --git a/net/core/filter.c b/net/core/filter.c
index a3eddb515d1b..d4ce2dc712e3 100644
--- a/net/core/filter.c
+++ b/net/core/filter.c
@@ -616,9 +616,9 @@ static int __sk_prepare_filter(struct sk_filter *fp)
616/** 616/**
617 * sk_unattached_filter_create - create an unattached filter 617 * sk_unattached_filter_create - create an unattached filter
618 * @fprog: the filter program 618 * @fprog: the filter program
619 * @sk: the socket to use 619 * @pfp: the unattached filter that is created
620 * 620 *
621 * Create a filter independent ofr any socket. We first run some 621 * Create a filter independent of any socket. We first run some
622 * sanity checks on it to make sure it does not explode on us later. 622 * sanity checks on it to make sure it does not explode on us later.
623 * If an error occurs or there is insufficient memory for the filter 623 * If an error occurs or there is insufficient memory for the filter
624 * a negative errno code is returned. On success the return is zero. 624 * a negative errno code is returned. On success the return is zero.
diff --git a/net/core/neighbour.c b/net/core/neighbour.c
index eb09f8bbbf07..d81d026138f0 100644
--- a/net/core/neighbour.c
+++ b/net/core/neighbour.c
@@ -2219,9 +2219,7 @@ static int neigh_dump_table(struct neigh_table *tbl, struct sk_buff *skb,
2219 rcu_read_lock_bh(); 2219 rcu_read_lock_bh();
2220 nht = rcu_dereference_bh(tbl->nht); 2220 nht = rcu_dereference_bh(tbl->nht);
2221 2221
2222 for (h = 0; h < (1 << nht->hash_shift); h++) { 2222 for (h = s_h; h < (1 << nht->hash_shift); h++) {
2223 if (h < s_h)
2224 continue;
2225 if (h > s_h) 2223 if (h > s_h)
2226 s_idx = 0; 2224 s_idx = 0;
2227 for (n = rcu_dereference_bh(nht->hash_buckets[h]), idx = 0; 2225 for (n = rcu_dereference_bh(nht->hash_buckets[h]), idx = 0;
@@ -2260,9 +2258,7 @@ static int pneigh_dump_table(struct neigh_table *tbl, struct sk_buff *skb,
2260 2258
2261 read_lock_bh(&tbl->lock); 2259 read_lock_bh(&tbl->lock);
2262 2260
2263 for (h = 0; h <= PNEIGH_HASHMASK; h++) { 2261 for (h = s_h; h <= PNEIGH_HASHMASK; h++) {
2264 if (h < s_h)
2265 continue;
2266 if (h > s_h) 2262 if (h > s_h)
2267 s_idx = 0; 2263 s_idx = 0;
2268 for (n = tbl->phash_buckets[h], idx = 0; n; n = n->next) { 2264 for (n = tbl->phash_buckets[h], idx = 0; n; n = n->next) {
@@ -2297,7 +2293,7 @@ static int neigh_dump_info(struct sk_buff *skb, struct netlink_callback *cb)
2297 struct neigh_table *tbl; 2293 struct neigh_table *tbl;
2298 int t, family, s_t; 2294 int t, family, s_t;
2299 int proxy = 0; 2295 int proxy = 0;
2300 int err = 0; 2296 int err;
2301 2297
2302 read_lock(&neigh_tbl_lock); 2298 read_lock(&neigh_tbl_lock);
2303 family = ((struct rtgenmsg *) nlmsg_data(cb->nlh))->rtgen_family; 2299 family = ((struct rtgenmsg *) nlmsg_data(cb->nlh))->rtgen_family;
@@ -2311,7 +2307,7 @@ static int neigh_dump_info(struct sk_buff *skb, struct netlink_callback *cb)
2311 2307
2312 s_t = cb->args[0]; 2308 s_t = cb->args[0];
2313 2309
2314 for (tbl = neigh_tables, t = 0; tbl && (err >= 0); 2310 for (tbl = neigh_tables, t = 0; tbl;
2315 tbl = tbl->next, t++) { 2311 tbl = tbl->next, t++) {
2316 if (t < s_t || (family && tbl->family != family)) 2312 if (t < s_t || (family && tbl->family != family))
2317 continue; 2313 continue;
@@ -2322,6 +2318,8 @@ static int neigh_dump_info(struct sk_buff *skb, struct netlink_callback *cb)
2322 err = pneigh_dump_table(tbl, skb, cb); 2318 err = pneigh_dump_table(tbl, skb, cb);
2323 else 2319 else
2324 err = neigh_dump_table(tbl, skb, cb); 2320 err = neigh_dump_table(tbl, skb, cb);
2321 if (err < 0)
2322 break;
2325 } 2323 }
2326 read_unlock(&neigh_tbl_lock); 2324 read_unlock(&neigh_tbl_lock);
2327 2325
diff --git a/net/core/netpoll.c b/net/core/netpoll.c
index 3d84fb9d8873..f9f40b932e4b 100644
--- a/net/core/netpoll.c
+++ b/net/core/netpoll.c
@@ -362,22 +362,23 @@ EXPORT_SYMBOL(netpoll_send_skb_on_dev);
362 362
363void netpoll_send_udp(struct netpoll *np, const char *msg, int len) 363void netpoll_send_udp(struct netpoll *np, const char *msg, int len)
364{ 364{
365 int total_len, eth_len, ip_len, udp_len; 365 int total_len, ip_len, udp_len;
366 struct sk_buff *skb; 366 struct sk_buff *skb;
367 struct udphdr *udph; 367 struct udphdr *udph;
368 struct iphdr *iph; 368 struct iphdr *iph;
369 struct ethhdr *eth; 369 struct ethhdr *eth;
370 370
371 udp_len = len + sizeof(*udph); 371 udp_len = len + sizeof(*udph);
372 ip_len = eth_len = udp_len + sizeof(*iph); 372 ip_len = udp_len + sizeof(*iph);
373 total_len = eth_len + ETH_HLEN + NET_IP_ALIGN; 373 total_len = ip_len + LL_RESERVED_SPACE(np->dev);
374 374
375 skb = find_skb(np, total_len, total_len - len); 375 skb = find_skb(np, total_len + np->dev->needed_tailroom,
376 total_len - len);
376 if (!skb) 377 if (!skb)
377 return; 378 return;
378 379
379 skb_copy_to_linear_data(skb, msg, len); 380 skb_copy_to_linear_data(skb, msg, len);
380 skb->len += len; 381 skb_put(skb, len);
381 382
382 skb_push(skb, sizeof(*udph)); 383 skb_push(skb, sizeof(*udph));
383 skb_reset_transport_header(skb); 384 skb_reset_transport_header(skb);
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index 016694d62484..d78671e9d545 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -3361,7 +3361,7 @@ EXPORT_SYMBOL(kfree_skb_partial);
3361 * @to: prior buffer 3361 * @to: prior buffer
3362 * @from: buffer to add 3362 * @from: buffer to add
3363 * @fragstolen: pointer to boolean 3363 * @fragstolen: pointer to boolean
3364 * 3364 * @delta_truesize: how much more was allocated than was requested
3365 */ 3365 */
3366bool skb_try_coalesce(struct sk_buff *to, struct sk_buff *from, 3366bool skb_try_coalesce(struct sk_buff *to, struct sk_buff *from,
3367 bool *fragstolen, int *delta_truesize) 3367 bool *fragstolen, int *delta_truesize)
diff --git a/net/ipv4/inetpeer.c b/net/ipv4/inetpeer.c
index d4d61b694fab..dfba343b2509 100644
--- a/net/ipv4/inetpeer.c
+++ b/net/ipv4/inetpeer.c
@@ -560,6 +560,17 @@ bool inet_peer_xrlim_allow(struct inet_peer *peer, int timeout)
560} 560}
561EXPORT_SYMBOL(inet_peer_xrlim_allow); 561EXPORT_SYMBOL(inet_peer_xrlim_allow);
562 562
563static void inetpeer_inval_rcu(struct rcu_head *head)
564{
565 struct inet_peer *p = container_of(head, struct inet_peer, gc_rcu);
566
567 spin_lock_bh(&gc_lock);
568 list_add_tail(&p->gc_list, &gc_list);
569 spin_unlock_bh(&gc_lock);
570
571 schedule_delayed_work(&gc_work, gc_delay);
572}
573
563void inetpeer_invalidate_tree(int family) 574void inetpeer_invalidate_tree(int family)
564{ 575{
565 struct inet_peer *old, *new, *prev; 576 struct inet_peer *old, *new, *prev;
@@ -576,10 +587,7 @@ void inetpeer_invalidate_tree(int family)
576 prev = cmpxchg(&base->root, old, new); 587 prev = cmpxchg(&base->root, old, new);
577 if (prev == old) { 588 if (prev == old) {
578 base->total = 0; 589 base->total = 0;
579 spin_lock(&gc_lock); 590 call_rcu(&prev->gc_rcu, inetpeer_inval_rcu);
580 list_add_tail(&prev->gc_list, &gc_list);
581 spin_unlock(&gc_lock);
582 schedule_delayed_work(&gc_work, gc_delay);
583 } 591 }
584 592
585out: 593out:
diff --git a/net/ipv4/ip_forward.c b/net/ipv4/ip_forward.c
index e5c44fc586ab..ab09b126423c 100644
--- a/net/ipv4/ip_forward.c
+++ b/net/ipv4/ip_forward.c
@@ -44,6 +44,7 @@ static int ip_forward_finish(struct sk_buff *skb)
44 struct ip_options *opt = &(IPCB(skb)->opt); 44 struct ip_options *opt = &(IPCB(skb)->opt);
45 45
46 IP_INC_STATS_BH(dev_net(skb_dst(skb)->dev), IPSTATS_MIB_OUTFORWDATAGRAMS); 46 IP_INC_STATS_BH(dev_net(skb_dst(skb)->dev), IPSTATS_MIB_OUTFORWDATAGRAMS);
47 IP_ADD_STATS_BH(dev_net(skb_dst(skb)->dev), IPSTATS_MIB_OUTOCTETS, skb->len);
47 48
48 if (unlikely(opt->optlen)) 49 if (unlikely(opt->optlen))
49 ip_forward_options(skb); 50 ip_forward_options(skb);
diff --git a/net/ipv4/ipmr.c b/net/ipv4/ipmr.c
index a9e519ad6db5..c94bbc6f2ba3 100644
--- a/net/ipv4/ipmr.c
+++ b/net/ipv4/ipmr.c
@@ -1574,6 +1574,7 @@ static inline int ipmr_forward_finish(struct sk_buff *skb)
1574 struct ip_options *opt = &(IPCB(skb)->opt); 1574 struct ip_options *opt = &(IPCB(skb)->opt);
1575 1575
1576 IP_INC_STATS_BH(dev_net(skb_dst(skb)->dev), IPSTATS_MIB_OUTFORWDATAGRAMS); 1576 IP_INC_STATS_BH(dev_net(skb_dst(skb)->dev), IPSTATS_MIB_OUTFORWDATAGRAMS);
1577 IP_ADD_STATS_BH(dev_net(skb_dst(skb)->dev), IPSTATS_MIB_OUTOCTETS, skb->len);
1577 1578
1578 if (unlikely(opt->optlen)) 1579 if (unlikely(opt->optlen))
1579 ip_forward_options(skb); 1580 ip_forward_options(skb);
diff --git a/net/ipv6/ip6_fib.c b/net/ipv6/ip6_fib.c
index 0c220a416626..74c21b924a79 100644
--- a/net/ipv6/ip6_fib.c
+++ b/net/ipv6/ip6_fib.c
@@ -1561,7 +1561,7 @@ static int fib6_age(struct rt6_info *rt, void *arg)
1561 neigh_flags = neigh->flags; 1561 neigh_flags = neigh->flags;
1562 neigh_release(neigh); 1562 neigh_release(neigh);
1563 } 1563 }
1564 if (neigh_flags & NTF_ROUTER) { 1564 if (!(neigh_flags & NTF_ROUTER)) {
1565 RT6_TRACE("purging route %p via non-router but gateway\n", 1565 RT6_TRACE("purging route %p via non-router but gateway\n",
1566 rt); 1566 rt);
1567 return -1; 1567 return -1;
diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
index 17b8c67998bb..decc21d19c53 100644
--- a/net/ipv6/ip6_output.c
+++ b/net/ipv6/ip6_output.c
@@ -526,6 +526,7 @@ int ip6_forward(struct sk_buff *skb)
526 hdr->hop_limit--; 526 hdr->hop_limit--;
527 527
528 IP6_INC_STATS_BH(net, ip6_dst_idev(dst), IPSTATS_MIB_OUTFORWDATAGRAMS); 528 IP6_INC_STATS_BH(net, ip6_dst_idev(dst), IPSTATS_MIB_OUTFORWDATAGRAMS);
529 IP6_ADD_STATS_BH(net, ip6_dst_idev(dst), IPSTATS_MIB_OUTOCTETS, skb->len);
529 return NF_HOOK(NFPROTO_IPV6, NF_INET_FORWARD, skb, skb->dev, dst->dev, 530 return NF_HOOK(NFPROTO_IPV6, NF_INET_FORWARD, skb, skb->dev, dst->dev,
530 ip6_forward_finish); 531 ip6_forward_finish);
531 532
diff --git a/net/ipv6/ip6mr.c b/net/ipv6/ip6mr.c
index b15dc08643a4..461e47c8e956 100644
--- a/net/ipv6/ip6mr.c
+++ b/net/ipv6/ip6mr.c
@@ -1886,6 +1886,8 @@ static inline int ip6mr_forward2_finish(struct sk_buff *skb)
1886{ 1886{
1887 IP6_INC_STATS_BH(dev_net(skb_dst(skb)->dev), ip6_dst_idev(skb_dst(skb)), 1887 IP6_INC_STATS_BH(dev_net(skb_dst(skb)->dev), ip6_dst_idev(skb_dst(skb)),
1888 IPSTATS_MIB_OUTFORWDATAGRAMS); 1888 IPSTATS_MIB_OUTFORWDATAGRAMS);
1889 IP6_ADD_STATS_BH(dev_net(skb_dst(skb)->dev), ip6_dst_idev(skb_dst(skb)),
1890 IPSTATS_MIB_OUTOCTETS, skb->len);
1889 return dst_output(skb); 1891 return dst_output(skb);
1890} 1892}
1891 1893
diff --git a/net/l2tp/l2tp_eth.c b/net/l2tp/l2tp_eth.c
index 443591d629ca..185f12f4a5fa 100644
--- a/net/l2tp/l2tp_eth.c
+++ b/net/l2tp/l2tp_eth.c
@@ -162,6 +162,7 @@ static void l2tp_eth_delete(struct l2tp_session *session)
162 if (dev) { 162 if (dev) {
163 unregister_netdev(dev); 163 unregister_netdev(dev);
164 spriv->dev = NULL; 164 spriv->dev = NULL;
165 module_put(THIS_MODULE);
165 } 166 }
166 } 167 }
167} 168}
@@ -249,6 +250,7 @@ static int l2tp_eth_create(struct net *net, u32 tunnel_id, u32 session_id, u32 p
249 if (rc < 0) 250 if (rc < 0)
250 goto out_del_dev; 251 goto out_del_dev;
251 252
253 __module_get(THIS_MODULE);
252 /* Must be done after register_netdev() */ 254 /* Must be done after register_netdev() */
253 strlcpy(session->ifname, dev->name, IFNAMSIZ); 255 strlcpy(session->ifname, dev->name, IFNAMSIZ);
254 256
diff --git a/net/l2tp/l2tp_ip.c b/net/l2tp/l2tp_ip.c
index 70614e7affab..61d8b75d2686 100644
--- a/net/l2tp/l2tp_ip.c
+++ b/net/l2tp/l2tp_ip.c
@@ -464,10 +464,12 @@ static int l2tp_ip_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *m
464 sk->sk_bound_dev_if); 464 sk->sk_bound_dev_if);
465 if (IS_ERR(rt)) 465 if (IS_ERR(rt))
466 goto no_route; 466 goto no_route;
467 if (connected) 467 if (connected) {
468 sk_setup_caps(sk, &rt->dst); 468 sk_setup_caps(sk, &rt->dst);
469 else 469 } else {
470 dst_release(&rt->dst); /* safe since we hold rcu_read_lock */ 470 skb_dst_set(skb, &rt->dst);
471 goto xmit;
472 }
471 } 473 }
472 474
473 /* We dont need to clone dst here, it is guaranteed to not disappear. 475 /* We dont need to clone dst here, it is guaranteed to not disappear.
@@ -475,6 +477,7 @@ static int l2tp_ip_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *m
475 */ 477 */
476 skb_dst_set_noref(skb, &rt->dst); 478 skb_dst_set_noref(skb, &rt->dst);
477 479
480xmit:
478 /* Queue the packet to IP for output */ 481 /* Queue the packet to IP for output */
479 rc = ip_queue_xmit(skb, &inet->cork.fl); 482 rc = ip_queue_xmit(skb, &inet->cork.fl);
480 rcu_read_unlock(); 483 rcu_read_unlock();
diff --git a/net/mac80211/agg-rx.c b/net/mac80211/agg-rx.c
index 26ddb699d693..c649188314cc 100644
--- a/net/mac80211/agg-rx.c
+++ b/net/mac80211/agg-rx.c
@@ -145,15 +145,20 @@ static void sta_rx_agg_session_timer_expired(unsigned long data)
145 struct tid_ampdu_rx *tid_rx; 145 struct tid_ampdu_rx *tid_rx;
146 unsigned long timeout; 146 unsigned long timeout;
147 147
148 rcu_read_lock();
148 tid_rx = rcu_dereference(sta->ampdu_mlme.tid_rx[*ptid]); 149 tid_rx = rcu_dereference(sta->ampdu_mlme.tid_rx[*ptid]);
149 if (!tid_rx) 150 if (!tid_rx) {
151 rcu_read_unlock();
150 return; 152 return;
153 }
151 154
152 timeout = tid_rx->last_rx + TU_TO_JIFFIES(tid_rx->timeout); 155 timeout = tid_rx->last_rx + TU_TO_JIFFIES(tid_rx->timeout);
153 if (time_is_after_jiffies(timeout)) { 156 if (time_is_after_jiffies(timeout)) {
154 mod_timer(&tid_rx->session_timer, timeout); 157 mod_timer(&tid_rx->session_timer, timeout);
158 rcu_read_unlock();
155 return; 159 return;
156 } 160 }
161 rcu_read_unlock();
157 162
158#ifdef CONFIG_MAC80211_HT_DEBUG 163#ifdef CONFIG_MAC80211_HT_DEBUG
159 printk(KERN_DEBUG "rx session timer expired on tid %d\n", (u16)*ptid); 164 printk(KERN_DEBUG "rx session timer expired on tid %d\n", (u16)*ptid);
diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c
index 495831ee48f1..e9cecca5c44d 100644
--- a/net/mac80211/cfg.c
+++ b/net/mac80211/cfg.c
@@ -533,16 +533,16 @@ static void ieee80211_get_et_stats(struct wiphy *wiphy,
533 sinfo.filled = 0; 533 sinfo.filled = 0;
534 sta_set_sinfo(sta, &sinfo); 534 sta_set_sinfo(sta, &sinfo);
535 535
536 if (sinfo.filled | STATION_INFO_TX_BITRATE) 536 if (sinfo.filled & STATION_INFO_TX_BITRATE)
537 data[i] = 100000 * 537 data[i] = 100000 *
538 cfg80211_calculate_bitrate(&sinfo.txrate); 538 cfg80211_calculate_bitrate(&sinfo.txrate);
539 i++; 539 i++;
540 if (sinfo.filled | STATION_INFO_RX_BITRATE) 540 if (sinfo.filled & STATION_INFO_RX_BITRATE)
541 data[i] = 100000 * 541 data[i] = 100000 *
542 cfg80211_calculate_bitrate(&sinfo.rxrate); 542 cfg80211_calculate_bitrate(&sinfo.rxrate);
543 i++; 543 i++;
544 544
545 if (sinfo.filled | STATION_INFO_SIGNAL_AVG) 545 if (sinfo.filled & STATION_INFO_SIGNAL_AVG)
546 data[i] = (u8)sinfo.signal_avg; 546 data[i] = (u8)sinfo.signal_avg;
547 i++; 547 i++;
548 } else { 548 } else {
diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c
index d4c19a7773db..8664111d0566 100644
--- a/net/mac80211/iface.c
+++ b/net/mac80211/iface.c
@@ -637,6 +637,18 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata,
637 ieee80211_configure_filter(local); 637 ieee80211_configure_filter(local);
638 break; 638 break;
639 default: 639 default:
640 mutex_lock(&local->mtx);
641 if (local->hw_roc_dev == sdata->dev &&
642 local->hw_roc_channel) {
643 /* ignore return value since this is racy */
644 drv_cancel_remain_on_channel(local);
645 ieee80211_queue_work(&local->hw, &local->hw_roc_done);
646 }
647 mutex_unlock(&local->mtx);
648
649 flush_work(&local->hw_roc_start);
650 flush_work(&local->hw_roc_done);
651
640 flush_work(&sdata->work); 652 flush_work(&sdata->work);
641 /* 653 /*
642 * When we get here, the interface is marked down. 654 * When we get here, the interface is marked down.
diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c
index 04c306308987..91d84cc77bbf 100644
--- a/net/mac80211/mlme.c
+++ b/net/mac80211/mlme.c
@@ -1220,6 +1220,22 @@ static void ieee80211_sta_wmm_params(struct ieee80211_local *local,
1220 sdata->vif.bss_conf.qos = true; 1220 sdata->vif.bss_conf.qos = true;
1221} 1221}
1222 1222
1223static void __ieee80211_stop_poll(struct ieee80211_sub_if_data *sdata)
1224{
1225 lockdep_assert_held(&sdata->local->mtx);
1226
1227 sdata->u.mgd.flags &= ~(IEEE80211_STA_CONNECTION_POLL |
1228 IEEE80211_STA_BEACON_POLL);
1229 ieee80211_run_deferred_scan(sdata->local);
1230}
1231
1232static void ieee80211_stop_poll(struct ieee80211_sub_if_data *sdata)
1233{
1234 mutex_lock(&sdata->local->mtx);
1235 __ieee80211_stop_poll(sdata);
1236 mutex_unlock(&sdata->local->mtx);
1237}
1238
1223static u32 ieee80211_handle_bss_capability(struct ieee80211_sub_if_data *sdata, 1239static u32 ieee80211_handle_bss_capability(struct ieee80211_sub_if_data *sdata,
1224 u16 capab, bool erp_valid, u8 erp) 1240 u16 capab, bool erp_valid, u8 erp)
1225{ 1241{
@@ -1285,8 +1301,7 @@ static void ieee80211_set_associated(struct ieee80211_sub_if_data *sdata,
1285 sdata->u.mgd.flags |= IEEE80211_STA_RESET_SIGNAL_AVE; 1301 sdata->u.mgd.flags |= IEEE80211_STA_RESET_SIGNAL_AVE;
1286 1302
1287 /* just to be sure */ 1303 /* just to be sure */
1288 sdata->u.mgd.flags &= ~(IEEE80211_STA_CONNECTION_POLL | 1304 ieee80211_stop_poll(sdata);
1289 IEEE80211_STA_BEACON_POLL);
1290 1305
1291 ieee80211_led_assoc(local, 1); 1306 ieee80211_led_assoc(local, 1);
1292 1307
@@ -1456,8 +1471,7 @@ static void ieee80211_reset_ap_probe(struct ieee80211_sub_if_data *sdata)
1456 return; 1471 return;
1457 } 1472 }
1458 1473
1459 ifmgd->flags &= ~(IEEE80211_STA_CONNECTION_POLL | 1474 __ieee80211_stop_poll(sdata);
1460 IEEE80211_STA_BEACON_POLL);
1461 1475
1462 mutex_lock(&local->iflist_mtx); 1476 mutex_lock(&local->iflist_mtx);
1463 ieee80211_recalc_ps(local, -1); 1477 ieee80211_recalc_ps(local, -1);
@@ -1477,7 +1491,6 @@ static void ieee80211_reset_ap_probe(struct ieee80211_sub_if_data *sdata)
1477 round_jiffies_up(jiffies + 1491 round_jiffies_up(jiffies +
1478 IEEE80211_CONNECTION_IDLE_TIME)); 1492 IEEE80211_CONNECTION_IDLE_TIME));
1479out: 1493out:
1480 ieee80211_run_deferred_scan(local);
1481 mutex_unlock(&local->mtx); 1494 mutex_unlock(&local->mtx);
1482} 1495}
1483 1496
@@ -2408,7 +2421,11 @@ static void ieee80211_rx_mgmt_beacon(struct ieee80211_sub_if_data *sdata,
2408 net_dbg_ratelimited("%s: cancelling probereq poll due to a received beacon\n", 2421 net_dbg_ratelimited("%s: cancelling probereq poll due to a received beacon\n",
2409 sdata->name); 2422 sdata->name);
2410#endif 2423#endif
2424 mutex_lock(&local->mtx);
2411 ifmgd->flags &= ~IEEE80211_STA_BEACON_POLL; 2425 ifmgd->flags &= ~IEEE80211_STA_BEACON_POLL;
2426 ieee80211_run_deferred_scan(local);
2427 mutex_unlock(&local->mtx);
2428
2412 mutex_lock(&local->iflist_mtx); 2429 mutex_lock(&local->iflist_mtx);
2413 ieee80211_recalc_ps(local, -1); 2430 ieee80211_recalc_ps(local, -1);
2414 mutex_unlock(&local->iflist_mtx); 2431 mutex_unlock(&local->iflist_mtx);
@@ -2595,8 +2612,7 @@ static void ieee80211_sta_connection_lost(struct ieee80211_sub_if_data *sdata,
2595 struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; 2612 struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
2596 u8 frame_buf[DEAUTH_DISASSOC_LEN]; 2613 u8 frame_buf[DEAUTH_DISASSOC_LEN];
2597 2614
2598 ifmgd->flags &= ~(IEEE80211_STA_CONNECTION_POLL | 2615 ieee80211_stop_poll(sdata);
2599 IEEE80211_STA_BEACON_POLL);
2600 2616
2601 ieee80211_set_disassoc(sdata, IEEE80211_STYPE_DEAUTH, reason, 2617 ieee80211_set_disassoc(sdata, IEEE80211_STYPE_DEAUTH, reason,
2602 false, frame_buf); 2618 false, frame_buf);
@@ -2874,8 +2890,7 @@ static void ieee80211_restart_sta_timer(struct ieee80211_sub_if_data *sdata)
2874 u32 flags; 2890 u32 flags;
2875 2891
2876 if (sdata->vif.type == NL80211_IFTYPE_STATION) { 2892 if (sdata->vif.type == NL80211_IFTYPE_STATION) {
2877 sdata->u.mgd.flags &= ~(IEEE80211_STA_BEACON_POLL | 2893 __ieee80211_stop_poll(sdata);
2878 IEEE80211_STA_CONNECTION_POLL);
2879 2894
2880 /* let's probe the connection once */ 2895 /* let's probe the connection once */
2881 flags = sdata->local->hw.flags; 2896 flags = sdata->local->hw.flags;
@@ -2944,7 +2959,10 @@ void ieee80211_sta_restart(struct ieee80211_sub_if_data *sdata)
2944 if (test_and_clear_bit(TMR_RUNNING_CHANSW, &ifmgd->timers_running)) 2959 if (test_and_clear_bit(TMR_RUNNING_CHANSW, &ifmgd->timers_running))
2945 add_timer(&ifmgd->chswitch_timer); 2960 add_timer(&ifmgd->chswitch_timer);
2946 ieee80211_sta_reset_beacon_monitor(sdata); 2961 ieee80211_sta_reset_beacon_monitor(sdata);
2962
2963 mutex_lock(&sdata->local->mtx);
2947 ieee80211_restart_sta_timer(sdata); 2964 ieee80211_restart_sta_timer(sdata);
2965 mutex_unlock(&sdata->local->mtx);
2948} 2966}
2949#endif 2967#endif
2950 2968
@@ -3106,7 +3124,7 @@ static int ieee80211_prep_connection(struct ieee80211_sub_if_data *sdata,
3106 } 3124 }
3107 3125
3108 local->oper_channel = cbss->channel; 3126 local->oper_channel = cbss->channel;
3109 ieee80211_hw_config(local, 0); 3127 ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_CHANNEL);
3110 3128
3111 if (!have_sta) { 3129 if (!have_sta) {
3112 u32 rates = 0, basic_rates = 0; 3130 u32 rates = 0, basic_rates = 0;
diff --git a/net/mac80211/offchannel.c b/net/mac80211/offchannel.c
index f054e94901a2..935aa4b6deee 100644
--- a/net/mac80211/offchannel.c
+++ b/net/mac80211/offchannel.c
@@ -234,6 +234,22 @@ static void ieee80211_hw_roc_done(struct work_struct *work)
234 return; 234 return;
235 } 235 }
236 236
237 /* was never transmitted */
238 if (local->hw_roc_skb) {
239 u64 cookie;
240
241 cookie = local->hw_roc_cookie ^ 2;
242
243 cfg80211_mgmt_tx_status(local->hw_roc_dev, cookie,
244 local->hw_roc_skb->data,
245 local->hw_roc_skb->len, false,
246 GFP_KERNEL);
247
248 kfree_skb(local->hw_roc_skb);
249 local->hw_roc_skb = NULL;
250 local->hw_roc_skb_for_status = NULL;
251 }
252
237 if (!local->hw_roc_for_tx) 253 if (!local->hw_roc_for_tx)
238 cfg80211_remain_on_channel_expired(local->hw_roc_dev, 254 cfg80211_remain_on_channel_expired(local->hw_roc_dev,
239 local->hw_roc_cookie, 255 local->hw_roc_cookie,
diff --git a/net/mac80211/sta_info.c b/net/mac80211/sta_info.c
index f5b1638fbf80..de455f8bbb91 100644
--- a/net/mac80211/sta_info.c
+++ b/net/mac80211/sta_info.c
@@ -378,7 +378,7 @@ static int sta_info_insert_finish(struct sta_info *sta) __acquires(RCU)
378 /* make the station visible */ 378 /* make the station visible */
379 sta_info_hash_add(local, sta); 379 sta_info_hash_add(local, sta);
380 380
381 list_add(&sta->list, &local->sta_list); 381 list_add_rcu(&sta->list, &local->sta_list);
382 382
383 set_sta_flag(sta, WLAN_STA_INSERTED); 383 set_sta_flag(sta, WLAN_STA_INSERTED);
384 384
@@ -688,7 +688,7 @@ int __must_check __sta_info_destroy(struct sta_info *sta)
688 if (ret) 688 if (ret)
689 return ret; 689 return ret;
690 690
691 list_del(&sta->list); 691 list_del_rcu(&sta->list);
692 692
693 mutex_lock(&local->key_mtx); 693 mutex_lock(&local->key_mtx);
694 for (i = 0; i < NUM_DEFAULT_KEYS; i++) 694 for (i = 0; i < NUM_DEFAULT_KEYS; i++)
diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c
index 847215bb2a6f..e453212fa17f 100644
--- a/net/mac80211/tx.c
+++ b/net/mac80211/tx.c
@@ -1737,7 +1737,7 @@ netdev_tx_t ieee80211_subif_start_xmit(struct sk_buff *skb,
1737 __le16 fc; 1737 __le16 fc;
1738 struct ieee80211_hdr hdr; 1738 struct ieee80211_hdr hdr;
1739 struct ieee80211s_hdr mesh_hdr __maybe_unused; 1739 struct ieee80211s_hdr mesh_hdr __maybe_unused;
1740 struct mesh_path __maybe_unused *mppath = NULL; 1740 struct mesh_path __maybe_unused *mppath = NULL, *mpath = NULL;
1741 const u8 *encaps_data; 1741 const u8 *encaps_data;
1742 int encaps_len, skip_header_bytes; 1742 int encaps_len, skip_header_bytes;
1743 int nh_pos, h_pos; 1743 int nh_pos, h_pos;
@@ -1803,8 +1803,11 @@ netdev_tx_t ieee80211_subif_start_xmit(struct sk_buff *skb,
1803 goto fail; 1803 goto fail;
1804 } 1804 }
1805 rcu_read_lock(); 1805 rcu_read_lock();
1806 if (!is_multicast_ether_addr(skb->data)) 1806 if (!is_multicast_ether_addr(skb->data)) {
1807 mppath = mpp_path_lookup(skb->data, sdata); 1807 mpath = mesh_path_lookup(skb->data, sdata);
1808 if (!mpath)
1809 mppath = mpp_path_lookup(skb->data, sdata);
1810 }
1808 1811
1809 /* 1812 /*
1810 * Use address extension if it is a packet from 1813 * Use address extension if it is a packet from
diff --git a/net/mac80211/util.c b/net/mac80211/util.c
index a44c6807df01..8dd4712620ff 100644
--- a/net/mac80211/util.c
+++ b/net/mac80211/util.c
@@ -1271,7 +1271,7 @@ int ieee80211_reconfig(struct ieee80211_local *local)
1271 enum ieee80211_sta_state state; 1271 enum ieee80211_sta_state state;
1272 1272
1273 for (state = IEEE80211_STA_NOTEXIST; 1273 for (state = IEEE80211_STA_NOTEXIST;
1274 state < sta->sta_state - 1; state++) 1274 state < sta->sta_state; state++)
1275 WARN_ON(drv_sta_state(local, sta->sdata, sta, 1275 WARN_ON(drv_sta_state(local, sta->sdata, sta,
1276 state, state + 1)); 1276 state, state + 1));
1277 } 1277 }
diff --git a/net/netfilter/nf_conntrack_h323_main.c b/net/netfilter/nf_conntrack_h323_main.c
index 46d69d7f1bb4..31f50bc3a312 100644
--- a/net/netfilter/nf_conntrack_h323_main.c
+++ b/net/netfilter/nf_conntrack_h323_main.c
@@ -270,9 +270,8 @@ static int expect_rtp_rtcp(struct sk_buff *skb, struct nf_conn *ct,
270 return 0; 270 return 0;
271 271
272 /* RTP port is even */ 272 /* RTP port is even */
273 port &= htons(~1); 273 rtp_port = port & ~htons(1);
274 rtp_port = port; 274 rtcp_port = port | htons(1);
275 rtcp_port = htons(ntohs(port) + 1);
276 275
277 /* Create expect for RTP */ 276 /* Create expect for RTP */
278 if ((rtp_exp = nf_ct_expect_alloc(ct)) == NULL) 277 if ((rtp_exp = nf_ct_expect_alloc(ct)) == NULL)
diff --git a/net/netfilter/xt_HMARK.c b/net/netfilter/xt_HMARK.c
index 0a96a43108ed..1686ca1b53a1 100644
--- a/net/netfilter/xt_HMARK.c
+++ b/net/netfilter/xt_HMARK.c
@@ -32,13 +32,13 @@ MODULE_ALIAS("ipt_HMARK");
32MODULE_ALIAS("ip6t_HMARK"); 32MODULE_ALIAS("ip6t_HMARK");
33 33
34struct hmark_tuple { 34struct hmark_tuple {
35 u32 src; 35 __be32 src;
36 u32 dst; 36 __be32 dst;
37 union hmark_ports uports; 37 union hmark_ports uports;
38 uint8_t proto; 38 u8 proto;
39}; 39};
40 40
41static inline u32 hmark_addr6_mask(const __u32 *addr32, const __u32 *mask) 41static inline __be32 hmark_addr6_mask(const __be32 *addr32, const __be32 *mask)
42{ 42{
43 return (addr32[0] & mask[0]) ^ 43 return (addr32[0] & mask[0]) ^
44 (addr32[1] & mask[1]) ^ 44 (addr32[1] & mask[1]) ^
@@ -46,8 +46,8 @@ static inline u32 hmark_addr6_mask(const __u32 *addr32, const __u32 *mask)
46 (addr32[3] & mask[3]); 46 (addr32[3] & mask[3]);
47} 47}
48 48
49static inline u32 49static inline __be32
50hmark_addr_mask(int l3num, const __u32 *addr32, const __u32 *mask) 50hmark_addr_mask(int l3num, const __be32 *addr32, const __be32 *mask)
51{ 51{
52 switch (l3num) { 52 switch (l3num) {
53 case AF_INET: 53 case AF_INET:
@@ -58,6 +58,22 @@ hmark_addr_mask(int l3num, const __u32 *addr32, const __u32 *mask)
58 return 0; 58 return 0;
59} 59}
60 60
61static inline void hmark_swap_ports(union hmark_ports *uports,
62 const struct xt_hmark_info *info)
63{
64 union hmark_ports hp;
65 u16 src, dst;
66
67 hp.b32 = (uports->b32 & info->port_mask.b32) | info->port_set.b32;
68 src = ntohs(hp.b16.src);
69 dst = ntohs(hp.b16.dst);
70
71 if (dst > src)
72 uports->v32 = (dst << 16) | src;
73 else
74 uports->v32 = (src << 16) | dst;
75}
76
61static int 77static int
62hmark_ct_set_htuple(const struct sk_buff *skb, struct hmark_tuple *t, 78hmark_ct_set_htuple(const struct sk_buff *skb, struct hmark_tuple *t,
63 const struct xt_hmark_info *info) 79 const struct xt_hmark_info *info)
@@ -74,22 +90,19 @@ hmark_ct_set_htuple(const struct sk_buff *skb, struct hmark_tuple *t,
74 otuple = &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple; 90 otuple = &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple;
75 rtuple = &ct->tuplehash[IP_CT_DIR_REPLY].tuple; 91 rtuple = &ct->tuplehash[IP_CT_DIR_REPLY].tuple;
76 92
77 t->src = hmark_addr_mask(otuple->src.l3num, otuple->src.u3.all, 93 t->src = hmark_addr_mask(otuple->src.l3num, otuple->src.u3.ip6,
78 info->src_mask.all); 94 info->src_mask.ip6);
79 t->dst = hmark_addr_mask(otuple->src.l3num, rtuple->src.u3.all, 95 t->dst = hmark_addr_mask(otuple->src.l3num, rtuple->src.u3.ip6,
80 info->dst_mask.all); 96 info->dst_mask.ip6);
81 97
82 if (info->flags & XT_HMARK_FLAG(XT_HMARK_METHOD_L3)) 98 if (info->flags & XT_HMARK_FLAG(XT_HMARK_METHOD_L3))
83 return 0; 99 return 0;
84 100
85 t->proto = nf_ct_protonum(ct); 101 t->proto = nf_ct_protonum(ct);
86 if (t->proto != IPPROTO_ICMP) { 102 if (t->proto != IPPROTO_ICMP) {
87 t->uports.p16.src = otuple->src.u.all; 103 t->uports.b16.src = otuple->src.u.all;
88 t->uports.p16.dst = rtuple->src.u.all; 104 t->uports.b16.dst = rtuple->src.u.all;
89 t->uports.v32 = (t->uports.v32 & info->port_mask.v32) | 105 hmark_swap_ports(&t->uports, info);
90 info->port_set.v32;
91 if (t->uports.p16.dst < t->uports.p16.src)
92 swap(t->uports.p16.dst, t->uports.p16.src);
93 } 106 }
94 107
95 return 0; 108 return 0;
@@ -98,15 +111,19 @@ hmark_ct_set_htuple(const struct sk_buff *skb, struct hmark_tuple *t,
98#endif 111#endif
99} 112}
100 113
114/* This hash function is endian independent, to ensure consistent hashing if
115 * the cluster is composed of big and little endian systems. */
101static inline u32 116static inline u32
102hmark_hash(struct hmark_tuple *t, const struct xt_hmark_info *info) 117hmark_hash(struct hmark_tuple *t, const struct xt_hmark_info *info)
103{ 118{
104 u32 hash; 119 u32 hash;
120 u32 src = ntohl(t->src);
121 u32 dst = ntohl(t->dst);
105 122
106 if (t->dst < t->src) 123 if (dst < src)
107 swap(t->src, t->dst); 124 swap(src, dst);
108 125
109 hash = jhash_3words(t->src, t->dst, t->uports.v32, info->hashrnd); 126 hash = jhash_3words(src, dst, t->uports.v32, info->hashrnd);
110 hash = hash ^ (t->proto & info->proto_mask); 127 hash = hash ^ (t->proto & info->proto_mask);
111 128
112 return (((u64)hash * info->hmodulus) >> 32) + info->hoffset; 129 return (((u64)hash * info->hmodulus) >> 32) + info->hoffset;
@@ -126,11 +143,7 @@ hmark_set_tuple_ports(const struct sk_buff *skb, unsigned int nhoff,
126 if (skb_copy_bits(skb, nhoff, &t->uports, sizeof(t->uports)) < 0) 143 if (skb_copy_bits(skb, nhoff, &t->uports, sizeof(t->uports)) < 0)
127 return; 144 return;
128 145
129 t->uports.v32 = (t->uports.v32 & info->port_mask.v32) | 146 hmark_swap_ports(&t->uports, info);
130 info->port_set.v32;
131
132 if (t->uports.p16.dst < t->uports.p16.src)
133 swap(t->uports.p16.dst, t->uports.p16.src);
134} 147}
135 148
136#if IS_ENABLED(CONFIG_IP6_NF_IPTABLES) 149#if IS_ENABLED(CONFIG_IP6_NF_IPTABLES)
@@ -178,8 +191,8 @@ hmark_pkt_set_htuple_ipv6(const struct sk_buff *skb, struct hmark_tuple *t,
178 return -1; 191 return -1;
179 } 192 }
180noicmp: 193noicmp:
181 t->src = hmark_addr6_mask(ip6->saddr.s6_addr32, info->src_mask.all); 194 t->src = hmark_addr6_mask(ip6->saddr.s6_addr32, info->src_mask.ip6);
182 t->dst = hmark_addr6_mask(ip6->daddr.s6_addr32, info->dst_mask.all); 195 t->dst = hmark_addr6_mask(ip6->daddr.s6_addr32, info->dst_mask.ip6);
183 196
184 if (info->flags & XT_HMARK_FLAG(XT_HMARK_METHOD_L3)) 197 if (info->flags & XT_HMARK_FLAG(XT_HMARK_METHOD_L3))
185 return 0; 198 return 0;
@@ -255,11 +268,8 @@ hmark_pkt_set_htuple_ipv4(const struct sk_buff *skb, struct hmark_tuple *t,
255 } 268 }
256 } 269 }
257 270
258 t->src = (__force u32) ip->saddr; 271 t->src = ip->saddr & info->src_mask.ip;
259 t->dst = (__force u32) ip->daddr; 272 t->dst = ip->daddr & info->dst_mask.ip;
260
261 t->src &= info->src_mask.ip;
262 t->dst &= info->dst_mask.ip;
263 273
264 if (info->flags & XT_HMARK_FLAG(XT_HMARK_METHOD_L3)) 274 if (info->flags & XT_HMARK_FLAG(XT_HMARK_METHOD_L3))
265 return 0; 275 return 0;
diff --git a/net/nfc/llcp/sock.c b/net/nfc/llcp/sock.c
index 3f339b19d140..17a707db40eb 100644
--- a/net/nfc/llcp/sock.c
+++ b/net/nfc/llcp/sock.c
@@ -292,6 +292,9 @@ static int llcp_sock_getname(struct socket *sock, struct sockaddr *addr,
292 292
293 pr_debug("%p\n", sk); 293 pr_debug("%p\n", sk);
294 294
295 if (llcp_sock == NULL)
296 return -EBADFD;
297
295 addr->sa_family = AF_NFC; 298 addr->sa_family = AF_NFC;
296 *len = sizeof(struct sockaddr_nfc_llcp); 299 *len = sizeof(struct sockaddr_nfc_llcp);
297 300
diff --git a/net/sunrpc/rpc_pipe.c b/net/sunrpc/rpc_pipe.c
index 04040476082e..21fde99e5c56 100644
--- a/net/sunrpc/rpc_pipe.c
+++ b/net/sunrpc/rpc_pipe.c
@@ -71,7 +71,9 @@ static void rpc_purge_list(wait_queue_head_t *waitq, struct list_head *head,
71 msg->errno = err; 71 msg->errno = err;
72 destroy_msg(msg); 72 destroy_msg(msg);
73 } while (!list_empty(head)); 73 } while (!list_empty(head));
74 wake_up(waitq); 74
75 if (waitq)
76 wake_up(waitq);
75} 77}
76 78
77static void 79static void
@@ -91,11 +93,9 @@ rpc_timeout_upcall_queue(struct work_struct *work)
91 } 93 }
92 dentry = dget(pipe->dentry); 94 dentry = dget(pipe->dentry);
93 spin_unlock(&pipe->lock); 95 spin_unlock(&pipe->lock);
94 if (dentry) { 96 rpc_purge_list(dentry ? &RPC_I(dentry->d_inode)->waitq : NULL,
95 rpc_purge_list(&RPC_I(dentry->d_inode)->waitq, 97 &free_list, destroy_msg, -ETIMEDOUT);
96 &free_list, destroy_msg, -ETIMEDOUT); 98 dput(dentry);
97 dput(dentry);
98 }
99} 99}
100 100
101ssize_t rpc_pipe_generic_upcall(struct file *filp, struct rpc_pipe_msg *msg, 101ssize_t rpc_pipe_generic_upcall(struct file *filp, struct rpc_pipe_msg *msg,
diff --git a/net/sunrpc/svc.c b/net/sunrpc/svc.c
index 7e9baaa1e543..3ee7461926d8 100644
--- a/net/sunrpc/svc.c
+++ b/net/sunrpc/svc.c
@@ -1374,7 +1374,8 @@ bc_svc_process(struct svc_serv *serv, struct rpc_rqst *req,
1374 sizeof(req->rq_snd_buf)); 1374 sizeof(req->rq_snd_buf));
1375 return bc_send(req); 1375 return bc_send(req);
1376 } else { 1376 } else {
1377 /* Nothing to do to drop request */ 1377 /* drop request */
1378 xprt_free_bc_request(req);
1378 return 0; 1379 return 0;
1379 } 1380 }
1380} 1381}
diff --git a/net/wireless/ibss.c b/net/wireless/ibss.c
index d2a19b0ff71f..89baa3328411 100644
--- a/net/wireless/ibss.c
+++ b/net/wireless/ibss.c
@@ -42,6 +42,7 @@ void __cfg80211_ibss_joined(struct net_device *dev, const u8 *bssid)
42 cfg80211_hold_bss(bss_from_pub(bss)); 42 cfg80211_hold_bss(bss_from_pub(bss));
43 wdev->current_bss = bss_from_pub(bss); 43 wdev->current_bss = bss_from_pub(bss);
44 44
45 wdev->sme_state = CFG80211_SME_CONNECTED;
45 cfg80211_upload_connect_keys(wdev); 46 cfg80211_upload_connect_keys(wdev);
46 47
47 nl80211_send_ibss_bssid(wiphy_to_dev(wdev->wiphy), dev, bssid, 48 nl80211_send_ibss_bssid(wiphy_to_dev(wdev->wiphy), dev, bssid,
@@ -60,7 +61,7 @@ void cfg80211_ibss_joined(struct net_device *dev, const u8 *bssid, gfp_t gfp)
60 struct cfg80211_event *ev; 61 struct cfg80211_event *ev;
61 unsigned long flags; 62 unsigned long flags;
62 63
63 CFG80211_DEV_WARN_ON(!wdev->ssid_len); 64 CFG80211_DEV_WARN_ON(wdev->sme_state != CFG80211_SME_CONNECTING);
64 65
65 ev = kzalloc(sizeof(*ev), gfp); 66 ev = kzalloc(sizeof(*ev), gfp);
66 if (!ev) 67 if (!ev)
@@ -115,9 +116,11 @@ int __cfg80211_join_ibss(struct cfg80211_registered_device *rdev,
115#ifdef CONFIG_CFG80211_WEXT 116#ifdef CONFIG_CFG80211_WEXT
116 wdev->wext.ibss.channel = params->channel; 117 wdev->wext.ibss.channel = params->channel;
117#endif 118#endif
119 wdev->sme_state = CFG80211_SME_CONNECTING;
118 err = rdev->ops->join_ibss(&rdev->wiphy, dev, params); 120 err = rdev->ops->join_ibss(&rdev->wiphy, dev, params);
119 if (err) { 121 if (err) {
120 wdev->connect_keys = NULL; 122 wdev->connect_keys = NULL;
123 wdev->sme_state = CFG80211_SME_IDLE;
121 return err; 124 return err;
122 } 125 }
123 126
@@ -169,6 +172,7 @@ static void __cfg80211_clear_ibss(struct net_device *dev, bool nowext)
169 } 172 }
170 173
171 wdev->current_bss = NULL; 174 wdev->current_bss = NULL;
175 wdev->sme_state = CFG80211_SME_IDLE;
172 wdev->ssid_len = 0; 176 wdev->ssid_len = 0;
173#ifdef CONFIG_CFG80211_WEXT 177#ifdef CONFIG_CFG80211_WEXT
174 if (!nowext) 178 if (!nowext)
diff --git a/net/wireless/util.c b/net/wireless/util.c
index 55d99466babb..8f2d68fc3a44 100644
--- a/net/wireless/util.c
+++ b/net/wireless/util.c
@@ -935,6 +935,7 @@ int cfg80211_can_change_interface(struct cfg80211_registered_device *rdev,
935 enum nl80211_iftype iftype) 935 enum nl80211_iftype iftype)
936{ 936{
937 struct wireless_dev *wdev_iter; 937 struct wireless_dev *wdev_iter;
938 u32 used_iftypes = BIT(iftype);
938 int num[NUM_NL80211_IFTYPES]; 939 int num[NUM_NL80211_IFTYPES];
939 int total = 1; 940 int total = 1;
940 int i, j; 941 int i, j;
@@ -961,6 +962,7 @@ int cfg80211_can_change_interface(struct cfg80211_registered_device *rdev,
961 962
962 num[wdev_iter->iftype]++; 963 num[wdev_iter->iftype]++;
963 total++; 964 total++;
965 used_iftypes |= BIT(wdev_iter->iftype);
964 } 966 }
965 mutex_unlock(&rdev->devlist_mtx); 967 mutex_unlock(&rdev->devlist_mtx);
966 968
@@ -970,6 +972,7 @@ int cfg80211_can_change_interface(struct cfg80211_registered_device *rdev,
970 for (i = 0; i < rdev->wiphy.n_iface_combinations; i++) { 972 for (i = 0; i < rdev->wiphy.n_iface_combinations; i++) {
971 const struct ieee80211_iface_combination *c; 973 const struct ieee80211_iface_combination *c;
972 struct ieee80211_iface_limit *limits; 974 struct ieee80211_iface_limit *limits;
975 u32 all_iftypes = 0;
973 976
974 c = &rdev->wiphy.iface_combinations[i]; 977 c = &rdev->wiphy.iface_combinations[i];
975 978
@@ -984,6 +987,7 @@ int cfg80211_can_change_interface(struct cfg80211_registered_device *rdev,
984 if (rdev->wiphy.software_iftypes & BIT(iftype)) 987 if (rdev->wiphy.software_iftypes & BIT(iftype))
985 continue; 988 continue;
986 for (j = 0; j < c->n_limits; j++) { 989 for (j = 0; j < c->n_limits; j++) {
990 all_iftypes |= limits[j].types;
987 if (!(limits[j].types & BIT(iftype))) 991 if (!(limits[j].types & BIT(iftype)))
988 continue; 992 continue;
989 if (limits[j].max < num[iftype]) 993 if (limits[j].max < num[iftype])
@@ -991,7 +995,20 @@ int cfg80211_can_change_interface(struct cfg80211_registered_device *rdev,
991 limits[j].max -= num[iftype]; 995 limits[j].max -= num[iftype];
992 } 996 }
993 } 997 }
994 /* yay, it fits */ 998
999 /*
1000 * Finally check that all iftypes that we're currently
1001 * using are actually part of this combination. If they
1002 * aren't then we can't use this combination and have
1003 * to continue to the next.
1004 */
1005 if ((all_iftypes & used_iftypes) != used_iftypes)
1006 goto cont;
1007
1008 /*
1009 * This combination covered all interface types and
1010 * supported the requested numbers, so we're good.
1011 */
995 kfree(limits); 1012 kfree(limits);
996 return 0; 1013 return 0;
997 cont: 1014 cont: