aboutsummaryrefslogtreecommitdiffstats
path: root/net
diff options
context:
space:
mode:
Diffstat (limited to 'net')
-rw-r--r--net/Kconfig6
-rw-r--r--net/bluetooth/hci_conn.c6
-rw-r--r--net/bluetooth/hci_event.c2
-rw-r--r--net/bluetooth/hci_sysfs.c10
-rw-r--r--net/core/skbuff.c2
-rw-r--r--net/ipv4/Kconfig2
-rw-r--r--net/ipv6/netfilter/ip6t_ipv6header.c6
-rw-r--r--net/mac80211/rc80211_minstrel.c4
-rw-r--r--net/mac80211/rc80211_pid_algo.c73
-rw-r--r--net/mac80211/tx.c2
-rw-r--r--net/netfilter/ipvs/ip_vs_conn.c9
-rw-r--r--net/netfilter/ipvs/ip_vs_core.c4
-rw-r--r--net/netfilter/nf_conntrack_netlink.c48
-rw-r--r--net/netfilter/xt_cluster.c8
-rw-r--r--net/sched/sch_fifo.c2
-rw-r--r--net/sunrpc/xprtrdma/svc_rdma_recvfrom.c2
-rw-r--r--net/sunrpc/xprtrdma/svc_rdma_sendto.c3
-rw-r--r--net/sunrpc/xprtrdma/svc_rdma_transport.c3
-rw-r--r--net/wimax/op-msg.c11
-rw-r--r--net/wimax/stack.c17
-rw-r--r--net/wireless/reg.c17
-rw-r--r--net/wireless/scan.c1
22 files changed, 138 insertions, 100 deletions
diff --git a/net/Kconfig b/net/Kconfig
index ce77db4fcec8..c19f549c8e74 100644
--- a/net/Kconfig
+++ b/net/Kconfig
@@ -119,12 +119,6 @@ menuconfig NETFILTER
119 <file:Documentation/Changes> under "iptables" for the location of 119 <file:Documentation/Changes> under "iptables" for the location of
120 these packages. 120 these packages.
121 121
122 Make sure to say N to "Fast switching" below if you intend to say Y
123 here, as Fast switching currently bypasses netfilter.
124
125 Chances are that you should say Y here if you compile a kernel which
126 will run as a router and N for regular hosts. If unsure, say N.
127
128if NETFILTER 122if NETFILTER
129 123
130config NETFILTER_DEBUG 124config NETFILTER_DEBUG
diff --git a/net/bluetooth/hci_conn.c b/net/bluetooth/hci_conn.c
index 61309b26f271..fa47d5d84f5c 100644
--- a/net/bluetooth/hci_conn.c
+++ b/net/bluetooth/hci_conn.c
@@ -171,10 +171,8 @@ static void hci_conn_timeout(unsigned long arg)
171 switch (conn->state) { 171 switch (conn->state) {
172 case BT_CONNECT: 172 case BT_CONNECT:
173 case BT_CONNECT2: 173 case BT_CONNECT2:
174 if (conn->type == ACL_LINK) 174 if (conn->type == ACL_LINK && conn->out)
175 hci_acl_connect_cancel(conn); 175 hci_acl_connect_cancel(conn);
176 else
177 hci_acl_disconn(conn, 0x13);
178 break; 176 break;
179 case BT_CONFIG: 177 case BT_CONFIG:
180 case BT_CONNECTED: 178 case BT_CONNECTED:
@@ -292,6 +290,8 @@ int hci_conn_del(struct hci_conn *conn)
292 290
293 hci_conn_del_sysfs(conn); 291 hci_conn_del_sysfs(conn);
294 292
293 hci_dev_put(hdev);
294
295 return 0; 295 return 0;
296} 296}
297 297
diff --git a/net/bluetooth/hci_event.c b/net/bluetooth/hci_event.c
index 4e7cb88e5da9..184ba0a88ec0 100644
--- a/net/bluetooth/hci_event.c
+++ b/net/bluetooth/hci_event.c
@@ -1493,7 +1493,7 @@ static inline void hci_pin_code_request_evt(struct hci_dev *hdev, struct sk_buff
1493 hci_dev_lock(hdev); 1493 hci_dev_lock(hdev);
1494 1494
1495 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr); 1495 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
1496 if (conn) { 1496 if (conn && conn->state == BT_CONNECTED) {
1497 hci_conn_hold(conn); 1497 hci_conn_hold(conn);
1498 conn->disc_timeout = HCI_PAIRING_TIMEOUT; 1498 conn->disc_timeout = HCI_PAIRING_TIMEOUT;
1499 hci_conn_put(conn); 1499 hci_conn_put(conn);
diff --git a/net/bluetooth/hci_sysfs.c b/net/bluetooth/hci_sysfs.c
index 582d8877078c..4cc3624bd22d 100644
--- a/net/bluetooth/hci_sysfs.c
+++ b/net/bluetooth/hci_sysfs.c
@@ -88,14 +88,19 @@ static struct device_type bt_link = {
88static void add_conn(struct work_struct *work) 88static void add_conn(struct work_struct *work)
89{ 89{
90 struct hci_conn *conn = container_of(work, struct hci_conn, work_add); 90 struct hci_conn *conn = container_of(work, struct hci_conn, work_add);
91 struct hci_dev *hdev = conn->hdev;
91 92
92 /* ensure previous del is complete */ 93 /* ensure previous del is complete */
93 flush_work(&conn->work_del); 94 flush_work(&conn->work_del);
94 95
96 dev_set_name(&conn->dev, "%s:%d", hdev->name, conn->handle);
97
95 if (device_add(&conn->dev) < 0) { 98 if (device_add(&conn->dev) < 0) {
96 BT_ERR("Failed to register connection device"); 99 BT_ERR("Failed to register connection device");
97 return; 100 return;
98 } 101 }
102
103 hci_dev_hold(hdev);
99} 104}
100 105
101/* 106/*
@@ -131,6 +136,7 @@ static void del_conn(struct work_struct *work)
131 136
132 device_del(&conn->dev); 137 device_del(&conn->dev);
133 put_device(&conn->dev); 138 put_device(&conn->dev);
139
134 hci_dev_put(hdev); 140 hci_dev_put(hdev);
135} 141}
136 142
@@ -154,12 +160,8 @@ void hci_conn_init_sysfs(struct hci_conn *conn)
154 160
155void hci_conn_add_sysfs(struct hci_conn *conn) 161void hci_conn_add_sysfs(struct hci_conn *conn)
156{ 162{
157 struct hci_dev *hdev = conn->hdev;
158
159 BT_DBG("conn %p", conn); 163 BT_DBG("conn %p", conn);
160 164
161 dev_set_name(&conn->dev, "%s:%d", hdev->name, conn->handle);
162
163 queue_work(bt_workq, &conn->work_add); 165 queue_work(bt_workq, &conn->work_add);
164} 166}
165 167
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index f8bcc06ae8a0..c4906bbcd60e 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -502,7 +502,9 @@ int skb_recycle_check(struct sk_buff *skb, int skb_size)
502 shinfo->gso_segs = 0; 502 shinfo->gso_segs = 0;
503 shinfo->gso_type = 0; 503 shinfo->gso_type = 0;
504 shinfo->ip6_frag_id = 0; 504 shinfo->ip6_frag_id = 0;
505 shinfo->tx_flags.flags = 0;
505 shinfo->frag_list = NULL; 506 shinfo->frag_list = NULL;
507 memset(&shinfo->hwtstamps, 0, sizeof(shinfo->hwtstamps));
506 508
507 memset(skb, 0, offsetof(struct sk_buff, tail)); 509 memset(skb, 0, offsetof(struct sk_buff, tail));
508 skb->data = skb->head + NET_SKB_PAD; 510 skb->data = skb->head + NET_SKB_PAD;
diff --git a/net/ipv4/Kconfig b/net/ipv4/Kconfig
index b2cf91e4ccaa..9d26a3da37e5 100644
--- a/net/ipv4/Kconfig
+++ b/net/ipv4/Kconfig
@@ -407,7 +407,7 @@ config INET_XFRM_MODE_BEET
407 If unsure, say Y. 407 If unsure, say Y.
408 408
409config INET_LRO 409config INET_LRO
410 tristate "Large Receive Offload (ipv4/tcp)" 410 bool "Large Receive Offload (ipv4/tcp)"
411 411
412 ---help--- 412 ---help---
413 Support for Large Receive Offload (ipv4/tcp). 413 Support for Large Receive Offload (ipv4/tcp).
diff --git a/net/ipv6/netfilter/ip6t_ipv6header.c b/net/ipv6/netfilter/ip6t_ipv6header.c
index 14e6724d5672..91490ad9302c 100644
--- a/net/ipv6/netfilter/ip6t_ipv6header.c
+++ b/net/ipv6/netfilter/ip6t_ipv6header.c
@@ -50,14 +50,14 @@ ipv6header_mt6(const struct sk_buff *skb, const struct xt_match_param *par)
50 struct ipv6_opt_hdr _hdr; 50 struct ipv6_opt_hdr _hdr;
51 int hdrlen; 51 int hdrlen;
52 52
53 /* Is there enough space for the next ext header? */
54 if (len < (int)sizeof(struct ipv6_opt_hdr))
55 return false;
56 /* No more exthdr -> evaluate */ 53 /* No more exthdr -> evaluate */
57 if (nexthdr == NEXTHDR_NONE) { 54 if (nexthdr == NEXTHDR_NONE) {
58 temp |= MASK_NONE; 55 temp |= MASK_NONE;
59 break; 56 break;
60 } 57 }
58 /* Is there enough space for the next ext header? */
59 if (len < (int)sizeof(struct ipv6_opt_hdr))
60 return false;
61 /* ESP -> evaluate */ 61 /* ESP -> evaluate */
62 if (nexthdr == NEXTHDR_ESP) { 62 if (nexthdr == NEXTHDR_ESP) {
63 temp |= MASK_ESP; 63 temp |= MASK_ESP;
diff --git a/net/mac80211/rc80211_minstrel.c b/net/mac80211/rc80211_minstrel.c
index 3824990d340b..d9233ec50610 100644
--- a/net/mac80211/rc80211_minstrel.c
+++ b/net/mac80211/rc80211_minstrel.c
@@ -476,8 +476,8 @@ minstrel_alloc_sta(void *priv, struct ieee80211_sta *sta, gfp_t gfp)
476 return NULL; 476 return NULL;
477 477
478 for (i = 0; i < IEEE80211_NUM_BANDS; i++) { 478 for (i = 0; i < IEEE80211_NUM_BANDS; i++) {
479 sband = hw->wiphy->bands[hw->conf.channel->band]; 479 sband = hw->wiphy->bands[i];
480 if (sband->n_bitrates > max_rates) 480 if (sband && sband->n_bitrates > max_rates)
481 max_rates = sband->n_bitrates; 481 max_rates = sband->n_bitrates;
482 } 482 }
483 483
diff --git a/net/mac80211/rc80211_pid_algo.c b/net/mac80211/rc80211_pid_algo.c
index b16801cde06f..8bef9a1262ff 100644
--- a/net/mac80211/rc80211_pid_algo.c
+++ b/net/mac80211/rc80211_pid_algo.c
@@ -317,13 +317,44 @@ rate_control_pid_rate_init(void *priv, struct ieee80211_supported_band *sband,
317 struct ieee80211_sta *sta, void *priv_sta) 317 struct ieee80211_sta *sta, void *priv_sta)
318{ 318{
319 struct rc_pid_sta_info *spinfo = priv_sta; 319 struct rc_pid_sta_info *spinfo = priv_sta;
320 struct rc_pid_info *pinfo = priv;
321 struct rc_pid_rateinfo *rinfo = pinfo->rinfo;
320 struct sta_info *si; 322 struct sta_info *si;
323 int i, j, tmp;
324 bool s;
321 325
322 /* TODO: This routine should consider using RSSI from previous packets 326 /* TODO: This routine should consider using RSSI from previous packets
323 * as we need to have IEEE 802.1X auth succeed immediately after assoc.. 327 * as we need to have IEEE 802.1X auth succeed immediately after assoc..
324 * Until that method is implemented, we will use the lowest supported 328 * Until that method is implemented, we will use the lowest supported
325 * rate as a workaround. */ 329 * rate as a workaround. */
326 330
331 /* Sort the rates. This is optimized for the most common case (i.e.
332 * almost-sorted CCK+OFDM rates). Kind of bubble-sort with reversed
333 * mapping too. */
334 for (i = 0; i < sband->n_bitrates; i++) {
335 rinfo[i].index = i;
336 rinfo[i].rev_index = i;
337 if (RC_PID_FAST_START)
338 rinfo[i].diff = 0;
339 else
340 rinfo[i].diff = i * pinfo->norm_offset;
341 }
342 for (i = 1; i < sband->n_bitrates; i++) {
343 s = 0;
344 for (j = 0; j < sband->n_bitrates - i; j++)
345 if (unlikely(sband->bitrates[rinfo[j].index].bitrate >
346 sband->bitrates[rinfo[j + 1].index].bitrate)) {
347 tmp = rinfo[j].index;
348 rinfo[j].index = rinfo[j + 1].index;
349 rinfo[j + 1].index = tmp;
350 rinfo[rinfo[j].index].rev_index = j;
351 rinfo[rinfo[j + 1].index].rev_index = j + 1;
352 s = 1;
353 }
354 if (!s)
355 break;
356 }
357
327 spinfo->txrate_idx = rate_lowest_index(sband, sta); 358 spinfo->txrate_idx = rate_lowest_index(sband, sta);
328 /* HACK */ 359 /* HACK */
329 si = container_of(sta, struct sta_info, sta); 360 si = container_of(sta, struct sta_info, sta);
@@ -336,21 +367,22 @@ static void *rate_control_pid_alloc(struct ieee80211_hw *hw,
336 struct rc_pid_info *pinfo; 367 struct rc_pid_info *pinfo;
337 struct rc_pid_rateinfo *rinfo; 368 struct rc_pid_rateinfo *rinfo;
338 struct ieee80211_supported_band *sband; 369 struct ieee80211_supported_band *sband;
339 int i, j, tmp; 370 int i, max_rates = 0;
340 bool s;
341#ifdef CONFIG_MAC80211_DEBUGFS 371#ifdef CONFIG_MAC80211_DEBUGFS
342 struct rc_pid_debugfs_entries *de; 372 struct rc_pid_debugfs_entries *de;
343#endif 373#endif
344 374
345 sband = hw->wiphy->bands[hw->conf.channel->band];
346
347 pinfo = kmalloc(sizeof(*pinfo), GFP_ATOMIC); 375 pinfo = kmalloc(sizeof(*pinfo), GFP_ATOMIC);
348 if (!pinfo) 376 if (!pinfo)
349 return NULL; 377 return NULL;
350 378
351 /* We can safely assume that sband won't change unless we get 379 for (i = 0; i < IEEE80211_NUM_BANDS; i++) {
352 * reinitialized. */ 380 sband = hw->wiphy->bands[i];
353 rinfo = kmalloc(sizeof(*rinfo) * sband->n_bitrates, GFP_ATOMIC); 381 if (sband && sband->n_bitrates > max_rates)
382 max_rates = sband->n_bitrates;
383 }
384
385 rinfo = kmalloc(sizeof(*rinfo) * max_rates, GFP_ATOMIC);
354 if (!rinfo) { 386 if (!rinfo) {
355 kfree(pinfo); 387 kfree(pinfo);
356 return NULL; 388 return NULL;
@@ -368,33 +400,6 @@ static void *rate_control_pid_alloc(struct ieee80211_hw *hw,
368 pinfo->rinfo = rinfo; 400 pinfo->rinfo = rinfo;
369 pinfo->oldrate = 0; 401 pinfo->oldrate = 0;
370 402
371 /* Sort the rates. This is optimized for the most common case (i.e.
372 * almost-sorted CCK+OFDM rates). Kind of bubble-sort with reversed
373 * mapping too. */
374 for (i = 0; i < sband->n_bitrates; i++) {
375 rinfo[i].index = i;
376 rinfo[i].rev_index = i;
377 if (RC_PID_FAST_START)
378 rinfo[i].diff = 0;
379 else
380 rinfo[i].diff = i * pinfo->norm_offset;
381 }
382 for (i = 1; i < sband->n_bitrates; i++) {
383 s = 0;
384 for (j = 0; j < sband->n_bitrates - i; j++)
385 if (unlikely(sband->bitrates[rinfo[j].index].bitrate >
386 sband->bitrates[rinfo[j + 1].index].bitrate)) {
387 tmp = rinfo[j].index;
388 rinfo[j].index = rinfo[j + 1].index;
389 rinfo[j + 1].index = tmp;
390 rinfo[rinfo[j].index].rev_index = j;
391 rinfo[rinfo[j + 1].index].rev_index = j + 1;
392 s = 1;
393 }
394 if (!s)
395 break;
396 }
397
398#ifdef CONFIG_MAC80211_DEBUGFS 403#ifdef CONFIG_MAC80211_DEBUGFS
399 de = &pinfo->dentries; 404 de = &pinfo->dentries;
400 de->target = debugfs_create_u32("target_pf", S_IRUSR | S_IWUSR, 405 de->target = debugfs_create_u32("target_pf", S_IRUSR | S_IWUSR,
diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c
index 3fb04a86444d..63656266d567 100644
--- a/net/mac80211/tx.c
+++ b/net/mac80211/tx.c
@@ -772,7 +772,7 @@ ieee80211_tx_h_fragment(struct ieee80211_tx_data *tx)
772 hdrlen = ieee80211_hdrlen(hdr->frame_control); 772 hdrlen = ieee80211_hdrlen(hdr->frame_control);
773 773
774 /* internal error, why is TX_FRAGMENTED set? */ 774 /* internal error, why is TX_FRAGMENTED set? */
775 if (WARN_ON(skb->len <= frag_threshold)) 775 if (WARN_ON(skb->len + FCS_LEN <= frag_threshold))
776 return TX_DROP; 776 return TX_DROP;
777 777
778 /* 778 /*
diff --git a/net/netfilter/ipvs/ip_vs_conn.c b/net/netfilter/ipvs/ip_vs_conn.c
index 60aba45023ff..77bfdfeb966e 100644
--- a/net/netfilter/ipvs/ip_vs_conn.c
+++ b/net/netfilter/ipvs/ip_vs_conn.c
@@ -260,7 +260,10 @@ struct ip_vs_conn *ip_vs_ct_in_get
260 list_for_each_entry(cp, &ip_vs_conn_tab[hash], c_list) { 260 list_for_each_entry(cp, &ip_vs_conn_tab[hash], c_list) {
261 if (cp->af == af && 261 if (cp->af == af &&
262 ip_vs_addr_equal(af, s_addr, &cp->caddr) && 262 ip_vs_addr_equal(af, s_addr, &cp->caddr) &&
263 ip_vs_addr_equal(af, d_addr, &cp->vaddr) && 263 /* protocol should only be IPPROTO_IP if
264 * d_addr is a fwmark */
265 ip_vs_addr_equal(protocol == IPPROTO_IP ? AF_UNSPEC : af,
266 d_addr, &cp->vaddr) &&
264 s_port == cp->cport && d_port == cp->vport && 267 s_port == cp->cport && d_port == cp->vport &&
265 cp->flags & IP_VS_CONN_F_TEMPLATE && 268 cp->flags & IP_VS_CONN_F_TEMPLATE &&
266 protocol == cp->protocol) { 269 protocol == cp->protocol) {
@@ -698,7 +701,9 @@ ip_vs_conn_new(int af, int proto, const union nf_inet_addr *caddr, __be16 cport,
698 cp->cport = cport; 701 cp->cport = cport;
699 ip_vs_addr_copy(af, &cp->vaddr, vaddr); 702 ip_vs_addr_copy(af, &cp->vaddr, vaddr);
700 cp->vport = vport; 703 cp->vport = vport;
701 ip_vs_addr_copy(af, &cp->daddr, daddr); 704 /* proto should only be IPPROTO_IP if d_addr is a fwmark */
705 ip_vs_addr_copy(proto == IPPROTO_IP ? AF_UNSPEC : af,
706 &cp->daddr, daddr);
702 cp->dport = dport; 707 cp->dport = dport;
703 cp->flags = flags; 708 cp->flags = flags;
704 spin_lock_init(&cp->lock); 709 spin_lock_init(&cp->lock);
diff --git a/net/netfilter/ipvs/ip_vs_core.c b/net/netfilter/ipvs/ip_vs_core.c
index cb3e031335eb..8dddb17a947a 100644
--- a/net/netfilter/ipvs/ip_vs_core.c
+++ b/net/netfilter/ipvs/ip_vs_core.c
@@ -278,7 +278,7 @@ ip_vs_sched_persist(struct ip_vs_service *svc,
278 */ 278 */
279 if (svc->fwmark) { 279 if (svc->fwmark) {
280 union nf_inet_addr fwmark = { 280 union nf_inet_addr fwmark = {
281 .all = { 0, 0, 0, htonl(svc->fwmark) } 281 .ip = htonl(svc->fwmark)
282 }; 282 };
283 283
284 ct = ip_vs_ct_in_get(svc->af, IPPROTO_IP, &snet, 0, 284 ct = ip_vs_ct_in_get(svc->af, IPPROTO_IP, &snet, 0,
@@ -306,7 +306,7 @@ ip_vs_sched_persist(struct ip_vs_service *svc,
306 */ 306 */
307 if (svc->fwmark) { 307 if (svc->fwmark) {
308 union nf_inet_addr fwmark = { 308 union nf_inet_addr fwmark = {
309 .all = { 0, 0, 0, htonl(svc->fwmark) } 309 .ip = htonl(svc->fwmark)
310 }; 310 };
311 311
312 ct = ip_vs_conn_new(svc->af, IPPROTO_IP, 312 ct = ip_vs_conn_new(svc->af, IPPROTO_IP,
diff --git a/net/netfilter/nf_conntrack_netlink.c b/net/netfilter/nf_conntrack_netlink.c
index f13fc57e1ecb..c523f0b8cee5 100644
--- a/net/netfilter/nf_conntrack_netlink.c
+++ b/net/netfilter/nf_conntrack_netlink.c
@@ -1186,28 +1186,6 @@ ctnetlink_change_conntrack(struct nf_conn *ct, struct nlattr *cda[])
1186 return 0; 1186 return 0;
1187} 1187}
1188 1188
1189static inline void
1190ctnetlink_event_report(struct nf_conn *ct, u32 pid, int report)
1191{
1192 unsigned int events = 0;
1193
1194 if (test_bit(IPS_EXPECTED_BIT, &ct->status))
1195 events |= IPCT_RELATED;
1196 else
1197 events |= IPCT_NEW;
1198
1199 nf_conntrack_event_report(IPCT_STATUS |
1200 IPCT_HELPER |
1201 IPCT_REFRESH |
1202 IPCT_PROTOINFO |
1203 IPCT_NATSEQADJ |
1204 IPCT_MARK |
1205 events,
1206 ct,
1207 pid,
1208 report);
1209}
1210
1211static struct nf_conn * 1189static struct nf_conn *
1212ctnetlink_create_conntrack(struct nlattr *cda[], 1190ctnetlink_create_conntrack(struct nlattr *cda[],
1213 struct nf_conntrack_tuple *otuple, 1191 struct nf_conntrack_tuple *otuple,
@@ -1373,6 +1351,7 @@ ctnetlink_new_conntrack(struct sock *ctnl, struct sk_buff *skb,
1373 err = -ENOENT; 1351 err = -ENOENT;
1374 if (nlh->nlmsg_flags & NLM_F_CREATE) { 1352 if (nlh->nlmsg_flags & NLM_F_CREATE) {
1375 struct nf_conn *ct; 1353 struct nf_conn *ct;
1354 enum ip_conntrack_events events;
1376 1355
1377 ct = ctnetlink_create_conntrack(cda, &otuple, 1356 ct = ctnetlink_create_conntrack(cda, &otuple,
1378 &rtuple, u3); 1357 &rtuple, u3);
@@ -1383,9 +1362,18 @@ ctnetlink_new_conntrack(struct sock *ctnl, struct sk_buff *skb,
1383 err = 0; 1362 err = 0;
1384 nf_conntrack_get(&ct->ct_general); 1363 nf_conntrack_get(&ct->ct_general);
1385 spin_unlock_bh(&nf_conntrack_lock); 1364 spin_unlock_bh(&nf_conntrack_lock);
1386 ctnetlink_event_report(ct, 1365 if (test_bit(IPS_EXPECTED_BIT, &ct->status))
1387 NETLINK_CB(skb).pid, 1366 events = IPCT_RELATED;
1388 nlmsg_report(nlh)); 1367 else
1368 events = IPCT_NEW;
1369
1370 nf_conntrack_event_report(IPCT_STATUS |
1371 IPCT_HELPER |
1372 IPCT_PROTOINFO |
1373 IPCT_NATSEQADJ |
1374 IPCT_MARK | events,
1375 ct, NETLINK_CB(skb).pid,
1376 nlmsg_report(nlh));
1389 nf_ct_put(ct); 1377 nf_ct_put(ct);
1390 } else 1378 } else
1391 spin_unlock_bh(&nf_conntrack_lock); 1379 spin_unlock_bh(&nf_conntrack_lock);
@@ -1404,9 +1392,13 @@ ctnetlink_new_conntrack(struct sock *ctnl, struct sk_buff *skb,
1404 if (err == 0) { 1392 if (err == 0) {
1405 nf_conntrack_get(&ct->ct_general); 1393 nf_conntrack_get(&ct->ct_general);
1406 spin_unlock_bh(&nf_conntrack_lock); 1394 spin_unlock_bh(&nf_conntrack_lock);
1407 ctnetlink_event_report(ct, 1395 nf_conntrack_event_report(IPCT_STATUS |
1408 NETLINK_CB(skb).pid, 1396 IPCT_HELPER |
1409 nlmsg_report(nlh)); 1397 IPCT_PROTOINFO |
1398 IPCT_NATSEQADJ |
1399 IPCT_MARK,
1400 ct, NETLINK_CB(skb).pid,
1401 nlmsg_report(nlh));
1410 nf_ct_put(ct); 1402 nf_ct_put(ct);
1411 } else 1403 } else
1412 spin_unlock_bh(&nf_conntrack_lock); 1404 spin_unlock_bh(&nf_conntrack_lock);
diff --git a/net/netfilter/xt_cluster.c b/net/netfilter/xt_cluster.c
index 6c4847662b85..69a639f35403 100644
--- a/net/netfilter/xt_cluster.c
+++ b/net/netfilter/xt_cluster.c
@@ -135,7 +135,13 @@ static bool xt_cluster_mt_checkentry(const struct xt_mtchk_param *par)
135{ 135{
136 struct xt_cluster_match_info *info = par->matchinfo; 136 struct xt_cluster_match_info *info = par->matchinfo;
137 137
138 if (info->node_mask >= (1 << info->total_nodes)) { 138 if (info->total_nodes > XT_CLUSTER_NODES_MAX) {
139 printk(KERN_ERR "xt_cluster: you have exceeded the maximum "
140 "number of cluster nodes (%u > %u)\n",
141 info->total_nodes, XT_CLUSTER_NODES_MAX);
142 return false;
143 }
144 if (info->node_mask >= (1ULL << info->total_nodes)) {
139 printk(KERN_ERR "xt_cluster: this node mask cannot be " 145 printk(KERN_ERR "xt_cluster: this node mask cannot be "
140 "higher than the total number of nodes\n"); 146 "higher than the total number of nodes\n");
141 return false; 147 return false;
diff --git a/net/sched/sch_fifo.c b/net/sched/sch_fifo.c
index 92cfc9d7e3b9..69188e8358b4 100644
--- a/net/sched/sch_fifo.c
+++ b/net/sched/sch_fifo.c
@@ -51,7 +51,7 @@ static int fifo_init(struct Qdisc *sch, struct nlattr *opt)
51 u32 limit = qdisc_dev(sch)->tx_queue_len ? : 1; 51 u32 limit = qdisc_dev(sch)->tx_queue_len ? : 1;
52 52
53 if (sch->ops == &bfifo_qdisc_ops) 53 if (sch->ops == &bfifo_qdisc_ops)
54 limit *= qdisc_dev(sch)->mtu; 54 limit *= psched_mtu(qdisc_dev(sch));
55 55
56 q->limit = limit; 56 q->limit = limit;
57 } else { 57 } else {
diff --git a/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c b/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c
index 629a28764da9..42a6f9f20285 100644
--- a/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c
+++ b/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c
@@ -265,7 +265,7 @@ static int fast_reg_read_chunks(struct svcxprt_rdma *xprt,
265 frmr->page_list->page_list[page_no] = 265 frmr->page_list->page_list[page_no] =
266 ib_dma_map_single(xprt->sc_cm_id->device, 266 ib_dma_map_single(xprt->sc_cm_id->device,
267 page_address(rqstp->rq_arg.pages[page_no]), 267 page_address(rqstp->rq_arg.pages[page_no]),
268 PAGE_SIZE, DMA_TO_DEVICE); 268 PAGE_SIZE, DMA_FROM_DEVICE);
269 if (ib_dma_mapping_error(xprt->sc_cm_id->device, 269 if (ib_dma_mapping_error(xprt->sc_cm_id->device,
270 frmr->page_list->page_list[page_no])) 270 frmr->page_list->page_list[page_no]))
271 goto fatal_err; 271 goto fatal_err;
diff --git a/net/sunrpc/xprtrdma/svc_rdma_sendto.c b/net/sunrpc/xprtrdma/svc_rdma_sendto.c
index 6c26a675435a..8b510c5e8777 100644
--- a/net/sunrpc/xprtrdma/svc_rdma_sendto.c
+++ b/net/sunrpc/xprtrdma/svc_rdma_sendto.c
@@ -183,6 +183,7 @@ static int fast_reg_xdr(struct svcxprt_rdma *xprt,
183 183
184 fatal_err: 184 fatal_err:
185 printk("svcrdma: Error fast registering memory for xprt %p\n", xprt); 185 printk("svcrdma: Error fast registering memory for xprt %p\n", xprt);
186 vec->frmr = NULL;
186 svc_rdma_put_frmr(xprt, frmr); 187 svc_rdma_put_frmr(xprt, frmr);
187 return -EIO; 188 return -EIO;
188} 189}
@@ -516,6 +517,7 @@ static int send_reply(struct svcxprt_rdma *rdma,
516 "svcrdma: could not post a receive buffer, err=%d." 517 "svcrdma: could not post a receive buffer, err=%d."
517 "Closing transport %p.\n", ret, rdma); 518 "Closing transport %p.\n", ret, rdma);
518 set_bit(XPT_CLOSE, &rdma->sc_xprt.xpt_flags); 519 set_bit(XPT_CLOSE, &rdma->sc_xprt.xpt_flags);
520 svc_rdma_put_frmr(rdma, vec->frmr);
519 svc_rdma_put_context(ctxt, 0); 521 svc_rdma_put_context(ctxt, 0);
520 return -ENOTCONN; 522 return -ENOTCONN;
521 } 523 }
@@ -606,6 +608,7 @@ static int send_reply(struct svcxprt_rdma *rdma,
606 return 0; 608 return 0;
607 609
608 err: 610 err:
611 svc_rdma_unmap_dma(ctxt);
609 svc_rdma_put_frmr(rdma, vec->frmr); 612 svc_rdma_put_frmr(rdma, vec->frmr);
610 svc_rdma_put_context(ctxt, 1); 613 svc_rdma_put_context(ctxt, 1);
611 return -EIO; 614 return -EIO;
diff --git a/net/sunrpc/xprtrdma/svc_rdma_transport.c b/net/sunrpc/xprtrdma/svc_rdma_transport.c
index 3d810e7df3fb..4b0c2fa15e0b 100644
--- a/net/sunrpc/xprtrdma/svc_rdma_transport.c
+++ b/net/sunrpc/xprtrdma/svc_rdma_transport.c
@@ -520,8 +520,9 @@ int svc_rdma_post_recv(struct svcxprt_rdma *xprt)
520 svc_xprt_get(&xprt->sc_xprt); 520 svc_xprt_get(&xprt->sc_xprt);
521 ret = ib_post_recv(xprt->sc_qp, &recv_wr, &bad_recv_wr); 521 ret = ib_post_recv(xprt->sc_qp, &recv_wr, &bad_recv_wr);
522 if (ret) { 522 if (ret) {
523 svc_xprt_put(&xprt->sc_xprt); 523 svc_rdma_unmap_dma(ctxt);
524 svc_rdma_put_context(ctxt, 1); 524 svc_rdma_put_context(ctxt, 1);
525 svc_xprt_put(&xprt->sc_xprt);
525 } 526 }
526 return ret; 527 return ret;
527 528
diff --git a/net/wimax/op-msg.c b/net/wimax/op-msg.c
index 5d149c1b5f0d..9ad4d893a566 100644
--- a/net/wimax/op-msg.c
+++ b/net/wimax/op-msg.c
@@ -149,7 +149,8 @@ struct sk_buff *wimax_msg_alloc(struct wimax_dev *wimax_dev,
149 } 149 }
150 result = nla_put(skb, WIMAX_GNL_MSG_DATA, size, msg); 150 result = nla_put(skb, WIMAX_GNL_MSG_DATA, size, msg);
151 if (result < 0) { 151 if (result < 0) {
152 dev_err(dev, "no memory to add payload in attribute\n"); 152 dev_err(dev, "no memory to add payload (msg %p size %zu) in "
153 "attribute: %d\n", msg, size, result);
153 goto error_nla_put; 154 goto error_nla_put;
154 } 155 }
155 genlmsg_end(skb, genl_msg); 156 genlmsg_end(skb, genl_msg);
@@ -299,10 +300,10 @@ int wimax_msg(struct wimax_dev *wimax_dev, const char *pipe_name,
299 struct sk_buff *skb; 300 struct sk_buff *skb;
300 301
301 skb = wimax_msg_alloc(wimax_dev, pipe_name, buf, size, gfp_flags); 302 skb = wimax_msg_alloc(wimax_dev, pipe_name, buf, size, gfp_flags);
302 if (skb == NULL) 303 if (IS_ERR(skb))
303 goto error_msg_new; 304 result = PTR_ERR(skb);
304 result = wimax_msg_send(wimax_dev, skb); 305 else
305error_msg_new: 306 result = wimax_msg_send(wimax_dev, skb);
306 return result; 307 return result;
307} 308}
308EXPORT_SYMBOL_GPL(wimax_msg); 309EXPORT_SYMBOL_GPL(wimax_msg);
diff --git a/net/wimax/stack.c b/net/wimax/stack.c
index a0ee76b52510..933e1422b09f 100644
--- a/net/wimax/stack.c
+++ b/net/wimax/stack.c
@@ -338,8 +338,21 @@ out:
338 */ 338 */
339void wimax_state_change(struct wimax_dev *wimax_dev, enum wimax_st new_state) 339void wimax_state_change(struct wimax_dev *wimax_dev, enum wimax_st new_state)
340{ 340{
341 /*
342 * A driver cannot take the wimax_dev out of the
343 * __WIMAX_ST_NULL state unless by calling wimax_dev_add(). If
344 * the wimax_dev's state is still NULL, we ignore any request
345 * to change its state because it means it hasn't been yet
346 * registered.
347 *
348 * There is no need to complain about it, as routines that
349 * call this might be shared from different code paths that
350 * are called before or after wimax_dev_add() has done its
351 * job.
352 */
341 mutex_lock(&wimax_dev->mutex); 353 mutex_lock(&wimax_dev->mutex);
342 __wimax_state_change(wimax_dev, new_state); 354 if (wimax_dev->state > __WIMAX_ST_NULL)
355 __wimax_state_change(wimax_dev, new_state);
343 mutex_unlock(&wimax_dev->mutex); 356 mutex_unlock(&wimax_dev->mutex);
344 return; 357 return;
345} 358}
@@ -376,7 +389,7 @@ EXPORT_SYMBOL_GPL(wimax_state_get);
376void wimax_dev_init(struct wimax_dev *wimax_dev) 389void wimax_dev_init(struct wimax_dev *wimax_dev)
377{ 390{
378 INIT_LIST_HEAD(&wimax_dev->id_table_node); 391 INIT_LIST_HEAD(&wimax_dev->id_table_node);
379 __wimax_state_set(wimax_dev, WIMAX_ST_UNINITIALIZED); 392 __wimax_state_set(wimax_dev, __WIMAX_ST_NULL);
380 mutex_init(&wimax_dev->mutex); 393 mutex_init(&wimax_dev->mutex);
381 mutex_init(&wimax_dev->mutex_reset); 394 mutex_init(&wimax_dev->mutex_reset);
382} 395}
diff --git a/net/wireless/reg.c b/net/wireless/reg.c
index 6c1993d99902..08265ca15785 100644
--- a/net/wireless/reg.c
+++ b/net/wireless/reg.c
@@ -907,6 +907,7 @@ EXPORT_SYMBOL(freq_reg_info);
907int freq_reg_info(struct wiphy *wiphy, u32 center_freq, u32 *bandwidth, 907int freq_reg_info(struct wiphy *wiphy, u32 center_freq, u32 *bandwidth,
908 const struct ieee80211_reg_rule **reg_rule) 908 const struct ieee80211_reg_rule **reg_rule)
909{ 909{
910 assert_cfg80211_lock();
910 return freq_reg_info_regd(wiphy, center_freq, 911 return freq_reg_info_regd(wiphy, center_freq,
911 bandwidth, reg_rule, NULL); 912 bandwidth, reg_rule, NULL);
912} 913}
@@ -1133,7 +1134,8 @@ static bool reg_is_world_roaming(struct wiphy *wiphy)
1133 if (is_world_regdom(cfg80211_regdomain->alpha2) || 1134 if (is_world_regdom(cfg80211_regdomain->alpha2) ||
1134 (wiphy->regd && is_world_regdom(wiphy->regd->alpha2))) 1135 (wiphy->regd && is_world_regdom(wiphy->regd->alpha2)))
1135 return true; 1136 return true;
1136 if (last_request->initiator != NL80211_REGDOM_SET_BY_COUNTRY_IE && 1137 if (last_request &&
1138 last_request->initiator != NL80211_REGDOM_SET_BY_COUNTRY_IE &&
1137 wiphy->custom_regulatory) 1139 wiphy->custom_regulatory)
1138 return true; 1140 return true;
1139 return false; 1141 return false;
@@ -1142,6 +1144,12 @@ static bool reg_is_world_roaming(struct wiphy *wiphy)
1142/* Reap the advantages of previously found beacons */ 1144/* Reap the advantages of previously found beacons */
1143static void reg_process_beacons(struct wiphy *wiphy) 1145static void reg_process_beacons(struct wiphy *wiphy)
1144{ 1146{
1147 /*
1148 * Means we are just firing up cfg80211, so no beacons would
1149 * have been processed yet.
1150 */
1151 if (!last_request)
1152 return;
1145 if (!reg_is_world_roaming(wiphy)) 1153 if (!reg_is_world_roaming(wiphy))
1146 return; 1154 return;
1147 wiphy_update_beacon_reg(wiphy); 1155 wiphy_update_beacon_reg(wiphy);
@@ -1176,6 +1184,8 @@ static void handle_channel_custom(struct wiphy *wiphy,
1176 struct ieee80211_supported_band *sband; 1184 struct ieee80211_supported_band *sband;
1177 struct ieee80211_channel *chan; 1185 struct ieee80211_channel *chan;
1178 1186
1187 assert_cfg80211_lock();
1188
1179 sband = wiphy->bands[band]; 1189 sband = wiphy->bands[band];
1180 BUG_ON(chan_idx >= sband->n_channels); 1190 BUG_ON(chan_idx >= sband->n_channels);
1181 chan = &sband->channels[chan_idx]; 1191 chan = &sband->channels[chan_idx];
@@ -1214,10 +1224,13 @@ void wiphy_apply_custom_regulatory(struct wiphy *wiphy,
1214 const struct ieee80211_regdomain *regd) 1224 const struct ieee80211_regdomain *regd)
1215{ 1225{
1216 enum ieee80211_band band; 1226 enum ieee80211_band band;
1227
1228 mutex_lock(&cfg80211_mutex);
1217 for (band = 0; band < IEEE80211_NUM_BANDS; band++) { 1229 for (band = 0; band < IEEE80211_NUM_BANDS; band++) {
1218 if (wiphy->bands[band]) 1230 if (wiphy->bands[band])
1219 handle_band_custom(wiphy, band, regd); 1231 handle_band_custom(wiphy, band, regd);
1220 } 1232 }
1233 mutex_unlock(&cfg80211_mutex);
1221} 1234}
1222EXPORT_SYMBOL(wiphy_apply_custom_regulatory); 1235EXPORT_SYMBOL(wiphy_apply_custom_regulatory);
1223 1236
@@ -1423,7 +1436,7 @@ new_request:
1423 return call_crda(last_request->alpha2); 1436 return call_crda(last_request->alpha2);
1424} 1437}
1425 1438
1426/* This currently only processes user and driver regulatory hints */ 1439/* This processes *all* regulatory hints */
1427static void reg_process_hint(struct regulatory_request *reg_request) 1440static void reg_process_hint(struct regulatory_request *reg_request)
1428{ 1441{
1429 int r = 0; 1442 int r = 0;
diff --git a/net/wireless/scan.c b/net/wireless/scan.c
index 2ae65b39b529..1f260c40b6ca 100644
--- a/net/wireless/scan.c
+++ b/net/wireless/scan.c
@@ -395,6 +395,7 @@ cfg80211_bss_update(struct cfg80211_registered_device *dev,
395 memcpy(ies, res->pub.information_elements, ielen); 395 memcpy(ies, res->pub.information_elements, ielen);
396 found->ies_allocated = true; 396 found->ies_allocated = true;
397 found->pub.information_elements = ies; 397 found->pub.information_elements = ies;
398 found->pub.len_information_elements = ielen;
398 } 399 }
399 } 400 }
400 } 401 }