aboutsummaryrefslogtreecommitdiffstats
path: root/net
diff options
context:
space:
mode:
authorIngo Molnar <mingo@elte.hu>2009-05-07 05:17:13 -0400
committerIngo Molnar <mingo@elte.hu>2009-05-07 05:17:34 -0400
commit44347d947f628060b92449702071bfe1d31dfb75 (patch)
treec6ed74610d5b3295df4296659f80f5feb94b28cc /net
parentd94fc523f3c35bd8013f04827e94756cbc0212f4 (diff)
parent413f81eba35d6ede9289b0c8a920c013a84fac71 (diff)
Merge branch 'linus' into tracing/core
Merge reason: tracing/core was on a .30-rc1 base and was missing out on on a handful of tracing fixes present in .30-rc5-almost. Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'net')
-rw-r--r--net/802/tr.c3
-rw-r--r--net/8021q/vlan.c2
-rw-r--r--net/8021q/vlan_core.c4
-rw-r--r--net/8021q/vlan_dev.c8
-rw-r--r--net/9p/client.c114
-rw-r--r--net/9p/trans_fd.c14
-rw-r--r--net/9p/trans_rdma.c1
-rw-r--r--net/9p/trans_virtio.c1
-rw-r--r--net/atm/br2684.c1
-rw-r--r--net/ax25/ax25_uid.c12
-rw-r--r--net/bluetooth/hci_conn.c12
-rw-r--r--net/bluetooth/hci_event.c74
-rw-r--r--net/bluetooth/hci_sysfs.c87
-rw-r--r--net/bluetooth/rfcomm/core.c2
-rw-r--r--net/bridge/br_netfilter.c10
-rw-r--r--net/can/af_can.c4
-rw-r--r--net/core/datagram.c14
-rw-r--r--net/core/dev.c31
-rw-r--r--net/core/skbuff.c27
-rw-r--r--net/ipv4/netfilter/arp_tables.c125
-rw-r--r--net/ipv4/netfilter/ip_tables.c126
-rw-r--r--net/ipv4/netfilter/nf_nat_core.c3
-rw-r--r--net/ipv4/route.c2
-rw-r--r--net/ipv4/tcp.c6
-rw-r--r--net/ipv4/tcp_input.c13
-rw-r--r--net/ipv4/tcp_output.c2
-rw-r--r--net/ipv4/udp.c3
-rw-r--r--net/ipv6/ipv6_sockglue.c4
-rw-r--r--net/ipv6/netfilter/ip6_tables.c123
-rw-r--r--net/ipv6/udp.c6
-rw-r--r--net/iucv/af_iucv.c24
-rw-r--r--net/mac80211/Kconfig7
-rw-r--r--net/mac80211/main.c22
-rw-r--r--net/mac80211/mlme.c38
-rw-r--r--net/mac80211/pm.c15
-rw-r--r--net/mac80211/rx.c15
-rw-r--r--net/mac80211/wext.c43
-rw-r--r--net/netfilter/Kconfig5
-rw-r--r--net/netfilter/nf_conntrack_helper.c2
-rw-r--r--net/netfilter/nf_conntrack_netlink.c14
-rw-r--r--net/netfilter/nf_conntrack_proto_dccp.c16
-rw-r--r--net/netfilter/nf_conntrack_proto_udplite.c1
-rw-r--r--net/netfilter/nf_log.c4
-rw-r--r--net/netfilter/nfnetlink.c2
-rw-r--r--net/netfilter/x_tables.c53
-rw-r--r--net/netfilter/xt_recent.c9
-rw-r--r--net/netlabel/netlabel_addrlist.c26
-rw-r--r--net/netrom/af_netrom.c6
-rw-r--r--net/packet/af_packet.c5
-rw-r--r--net/rds/rds.h2
-rw-r--r--net/rose/af_rose.c10
-rw-r--r--net/sched/cls_api.c2
-rw-r--r--net/sched/em_meta.c6
-rw-r--r--net/sched/sch_netem.c8
-rw-r--r--net/sunrpc/xprt.c6
-rw-r--r--net/sunrpc/xprtsock.c26
-rw-r--r--net/wireless/core.h2
-rw-r--r--net/wireless/nl80211.c4
-rw-r--r--net/wireless/reg.c5
-rw-r--r--net/wireless/scan.c40
-rw-r--r--net/xfrm/xfrm_state.c6
61 files changed, 662 insertions, 596 deletions
diff --git a/net/802/tr.c b/net/802/tr.c
index e7eb13084d71..e874447ad144 100644
--- a/net/802/tr.c
+++ b/net/802/tr.c
@@ -561,6 +561,9 @@ static int rif_seq_show(struct seq_file *seq, void *v)
561 } 561 }
562 seq_putc(seq, '\n'); 562 seq_putc(seq, '\n');
563 } 563 }
564
565 if (dev)
566 dev_put(dev);
564 } 567 }
565 return 0; 568 return 0;
566} 569}
diff --git a/net/8021q/vlan.c b/net/8021q/vlan.c
index 2b7390e377b3..d1e10546eb85 100644
--- a/net/8021q/vlan.c
+++ b/net/8021q/vlan.c
@@ -492,6 +492,7 @@ static int vlan_device_event(struct notifier_block *unused, unsigned long event,
492 continue; 492 continue;
493 493
494 dev_change_flags(vlandev, flgs & ~IFF_UP); 494 dev_change_flags(vlandev, flgs & ~IFF_UP);
495 vlan_transfer_operstate(dev, vlandev);
495 } 496 }
496 break; 497 break;
497 498
@@ -507,6 +508,7 @@ static int vlan_device_event(struct notifier_block *unused, unsigned long event,
507 continue; 508 continue;
508 509
509 dev_change_flags(vlandev, flgs | IFF_UP); 510 dev_change_flags(vlandev, flgs | IFF_UP);
511 vlan_transfer_operstate(dev, vlandev);
510 } 512 }
511 break; 513 break;
512 514
diff --git a/net/8021q/vlan_core.c b/net/8021q/vlan_core.c
index 654e45f5719d..c67fe6f75653 100644
--- a/net/8021q/vlan_core.c
+++ b/net/8021q/vlan_core.c
@@ -121,8 +121,10 @@ int vlan_gro_frags(struct napi_struct *napi, struct vlan_group *grp,
121 if (!skb) 121 if (!skb)
122 return NET_RX_DROP; 122 return NET_RX_DROP;
123 123
124 if (netpoll_rx_on(skb)) 124 if (netpoll_rx_on(skb)) {
125 skb->protocol = eth_type_trans(skb, skb->dev);
125 return vlan_hwaccel_receive_skb(skb, grp, vlan_tci); 126 return vlan_hwaccel_receive_skb(skb, grp, vlan_tci);
127 }
126 128
127 return napi_frags_finish(napi, skb, 129 return napi_frags_finish(napi, skb,
128 vlan_gro_common(napi, grp, vlan_tci, skb)); 130 vlan_gro_common(napi, grp, vlan_tci, skb));
diff --git a/net/8021q/vlan_dev.c b/net/8021q/vlan_dev.c
index 1b34135cf990..b4b9068e55a7 100644
--- a/net/8021q/vlan_dev.c
+++ b/net/8021q/vlan_dev.c
@@ -462,6 +462,7 @@ static int vlan_dev_open(struct net_device *dev)
462 if (vlan->flags & VLAN_FLAG_GVRP) 462 if (vlan->flags & VLAN_FLAG_GVRP)
463 vlan_gvrp_request_join(dev); 463 vlan_gvrp_request_join(dev);
464 464
465 netif_carrier_on(dev);
465 return 0; 466 return 0;
466 467
467clear_allmulti: 468clear_allmulti:
@@ -471,6 +472,7 @@ del_unicast:
471 if (compare_ether_addr(dev->dev_addr, real_dev->dev_addr)) 472 if (compare_ether_addr(dev->dev_addr, real_dev->dev_addr))
472 dev_unicast_delete(real_dev, dev->dev_addr, ETH_ALEN); 473 dev_unicast_delete(real_dev, dev->dev_addr, ETH_ALEN);
473out: 474out:
475 netif_carrier_off(dev);
474 return err; 476 return err;
475} 477}
476 478
@@ -492,6 +494,7 @@ static int vlan_dev_stop(struct net_device *dev)
492 if (compare_ether_addr(dev->dev_addr, real_dev->dev_addr)) 494 if (compare_ether_addr(dev->dev_addr, real_dev->dev_addr))
493 dev_unicast_delete(real_dev, dev->dev_addr, dev->addr_len); 495 dev_unicast_delete(real_dev, dev->dev_addr, dev->addr_len);
494 496
497 netif_carrier_off(dev);
495 return 0; 498 return 0;
496} 499}
497 500
@@ -612,6 +615,8 @@ static int vlan_dev_init(struct net_device *dev)
612 struct net_device *real_dev = vlan_dev_info(dev)->real_dev; 615 struct net_device *real_dev = vlan_dev_info(dev)->real_dev;
613 int subclass = 0; 616 int subclass = 0;
614 617
618 netif_carrier_off(dev);
619
615 /* IFF_BROADCAST|IFF_MULTICAST; ??? */ 620 /* IFF_BROADCAST|IFF_MULTICAST; ??? */
616 dev->flags = real_dev->flags & ~(IFF_UP | IFF_PROMISC | IFF_ALLMULTI); 621 dev->flags = real_dev->flags & ~(IFF_UP | IFF_PROMISC | IFF_ALLMULTI);
617 dev->iflink = real_dev->ifindex; 622 dev->iflink = real_dev->ifindex;
@@ -668,7 +673,8 @@ static int vlan_ethtool_get_settings(struct net_device *dev,
668 const struct vlan_dev_info *vlan = vlan_dev_info(dev); 673 const struct vlan_dev_info *vlan = vlan_dev_info(dev);
669 struct net_device *real_dev = vlan->real_dev; 674 struct net_device *real_dev = vlan->real_dev;
670 675
671 if (!real_dev->ethtool_ops->get_settings) 676 if (!real_dev->ethtool_ops ||
677 !real_dev->ethtool_ops->get_settings)
672 return -EOPNOTSUPP; 678 return -EOPNOTSUPP;
673 679
674 return real_dev->ethtool_ops->get_settings(real_dev, cmd); 680 return real_dev->ethtool_ops->get_settings(real_dev, cmd);
diff --git a/net/9p/client.c b/net/9p/client.c
index 1eb580c38fbb..dd43a8289b0d 100644
--- a/net/9p/client.c
+++ b/net/9p/client.c
@@ -203,7 +203,6 @@ static struct p9_req_t *p9_tag_alloc(struct p9_client *c, u16 tag)
203 p9pdu_reset(req->tc); 203 p9pdu_reset(req->tc);
204 p9pdu_reset(req->rc); 204 p9pdu_reset(req->rc);
205 205
206 req->flush_tag = 0;
207 req->tc->tag = tag-1; 206 req->tc->tag = tag-1;
208 req->status = REQ_STATUS_ALLOC; 207 req->status = REQ_STATUS_ALLOC;
209 208
@@ -324,35 +323,9 @@ static void p9_free_req(struct p9_client *c, struct p9_req_t *r)
324 */ 323 */
325void p9_client_cb(struct p9_client *c, struct p9_req_t *req) 324void p9_client_cb(struct p9_client *c, struct p9_req_t *req)
326{ 325{
327 struct p9_req_t *other_req;
328 unsigned long flags;
329
330 P9_DPRINTK(P9_DEBUG_MUX, " tag %d\n", req->tc->tag); 326 P9_DPRINTK(P9_DEBUG_MUX, " tag %d\n", req->tc->tag);
331 327 wake_up(req->wq);
332 if (req->status == REQ_STATUS_ERROR) 328 P9_DPRINTK(P9_DEBUG_MUX, "wakeup: %d\n", req->tc->tag);
333 wake_up(req->wq);
334
335 if (req->flush_tag) { /* flush receive path */
336 P9_DPRINTK(P9_DEBUG_9P, "<<< RFLUSH %d\n", req->tc->tag);
337 spin_lock_irqsave(&c->lock, flags);
338 other_req = p9_tag_lookup(c, req->flush_tag);
339 if (other_req->status != REQ_STATUS_FLSH) /* stale flush */
340 spin_unlock_irqrestore(&c->lock, flags);
341 else {
342 other_req->status = REQ_STATUS_FLSHD;
343 spin_unlock_irqrestore(&c->lock, flags);
344 wake_up(other_req->wq);
345 }
346 p9_free_req(c, req);
347 } else { /* normal receive path */
348 P9_DPRINTK(P9_DEBUG_MUX, "normal: tag %d\n", req->tc->tag);
349 spin_lock_irqsave(&c->lock, flags);
350 if (req->status != REQ_STATUS_FLSHD)
351 req->status = REQ_STATUS_RCVD;
352 spin_unlock_irqrestore(&c->lock, flags);
353 wake_up(req->wq);
354 P9_DPRINTK(P9_DEBUG_MUX, "wakeup: %d\n", req->tc->tag);
355 }
356} 329}
357EXPORT_SYMBOL(p9_client_cb); 330EXPORT_SYMBOL(p9_client_cb);
358 331
@@ -486,9 +459,15 @@ static int p9_client_flush(struct p9_client *c, struct p9_req_t *oldreq)
486 if (IS_ERR(req)) 459 if (IS_ERR(req))
487 return PTR_ERR(req); 460 return PTR_ERR(req);
488 461
489 req->flush_tag = oldtag;
490 462
491 /* we don't free anything here because RPC isn't complete */ 463 /* if we haven't received a response for oldreq,
464 remove it from the list. */
465 spin_lock(&c->lock);
466 if (oldreq->status == REQ_STATUS_FLSH)
467 list_del(&oldreq->req_list);
468 spin_unlock(&c->lock);
469
470 p9_free_req(c, req);
492 return 0; 471 return 0;
493} 472}
494 473
@@ -509,7 +488,6 @@ p9_client_rpc(struct p9_client *c, int8_t type, const char *fmt, ...)
509 struct p9_req_t *req; 488 struct p9_req_t *req;
510 unsigned long flags; 489 unsigned long flags;
511 int sigpending; 490 int sigpending;
512 int flushed = 0;
513 491
514 P9_DPRINTK(P9_DEBUG_MUX, "client %p op %d\n", c, type); 492 P9_DPRINTK(P9_DEBUG_MUX, "client %p op %d\n", c, type);
515 493
@@ -546,42 +524,28 @@ p9_client_rpc(struct p9_client *c, int8_t type, const char *fmt, ...)
546 goto reterr; 524 goto reterr;
547 } 525 }
548 526
549 /* if it was a flush we just transmitted, return our tag */
550 if (type == P9_TFLUSH)
551 return req;
552again:
553 P9_DPRINTK(P9_DEBUG_MUX, "wait %p tag: %d\n", req->wq, tag); 527 P9_DPRINTK(P9_DEBUG_MUX, "wait %p tag: %d\n", req->wq, tag);
554 err = wait_event_interruptible(*req->wq, 528 err = wait_event_interruptible(*req->wq,
555 req->status >= REQ_STATUS_RCVD); 529 req->status >= REQ_STATUS_RCVD);
556 P9_DPRINTK(P9_DEBUG_MUX, "wait %p tag: %d returned %d (flushed=%d)\n", 530 P9_DPRINTK(P9_DEBUG_MUX, "wait %p tag: %d returned %d\n",
557 req->wq, tag, err, flushed); 531 req->wq, tag, err);
558 532
559 if (req->status == REQ_STATUS_ERROR) { 533 if (req->status == REQ_STATUS_ERROR) {
560 P9_DPRINTK(P9_DEBUG_ERROR, "req_status error %d\n", req->t_err); 534 P9_DPRINTK(P9_DEBUG_ERROR, "req_status error %d\n", req->t_err);
561 err = req->t_err; 535 err = req->t_err;
562 } else if (err == -ERESTARTSYS && flushed) {
563 P9_DPRINTK(P9_DEBUG_MUX, "flushed - going again\n");
564 goto again;
565 } else if (req->status == REQ_STATUS_FLSHD) {
566 P9_DPRINTK(P9_DEBUG_MUX, "flushed - erestartsys\n");
567 err = -ERESTARTSYS;
568 } 536 }
569 537
570 if ((err == -ERESTARTSYS) && (c->status == Connected) && (!flushed)) { 538 if ((err == -ERESTARTSYS) && (c->status == Connected)) {
571 P9_DPRINTK(P9_DEBUG_MUX, "flushing\n"); 539 P9_DPRINTK(P9_DEBUG_MUX, "flushing\n");
572 spin_lock_irqsave(&c->lock, flags);
573 if (req->status == REQ_STATUS_SENT)
574 req->status = REQ_STATUS_FLSH;
575 spin_unlock_irqrestore(&c->lock, flags);
576 sigpending = 1; 540 sigpending = 1;
577 flushed = 1;
578 clear_thread_flag(TIF_SIGPENDING); 541 clear_thread_flag(TIF_SIGPENDING);
579 542
580 if (c->trans_mod->cancel(c, req)) { 543 if (c->trans_mod->cancel(c, req))
581 err = p9_client_flush(c, req); 544 p9_client_flush(c, req);
582 if (err == 0) 545
583 goto again; 546 /* if we received the response anyway, don't signal error */
584 } 547 if (req->status == REQ_STATUS_RCVD)
548 err = 0;
585 } 549 }
586 550
587 if (sigpending) { 551 if (sigpending) {
@@ -1244,19 +1208,53 @@ struct p9_wstat *p9_client_stat(struct p9_fid *fid)
1244 ret->name, ret->uid, ret->gid, ret->muid, ret->extension, 1208 ret->name, ret->uid, ret->gid, ret->muid, ret->extension,
1245 ret->n_uid, ret->n_gid, ret->n_muid); 1209 ret->n_uid, ret->n_gid, ret->n_muid);
1246 1210
1211 p9_free_req(clnt, req);
1212 return ret;
1213
1247free_and_error: 1214free_and_error:
1248 p9_free_req(clnt, req); 1215 p9_free_req(clnt, req);
1249error: 1216error:
1250 return ret; 1217 kfree(ret);
1218 return ERR_PTR(err);
1251} 1219}
1252EXPORT_SYMBOL(p9_client_stat); 1220EXPORT_SYMBOL(p9_client_stat);
1253 1221
1222static int p9_client_statsize(struct p9_wstat *wst, int optional)
1223{
1224 int ret;
1225
1226 /* size[2] type[2] dev[4] qid[13] */
1227 /* mode[4] atime[4] mtime[4] length[8]*/
1228 /* name[s] uid[s] gid[s] muid[s] */
1229 ret = 2+2+4+13+4+4+4+8+2+2+2+2;
1230
1231 if (wst->name)
1232 ret += strlen(wst->name);
1233 if (wst->uid)
1234 ret += strlen(wst->uid);
1235 if (wst->gid)
1236 ret += strlen(wst->gid);
1237 if (wst->muid)
1238 ret += strlen(wst->muid);
1239
1240 if (optional) {
1241 ret += 2+4+4+4; /* extension[s] n_uid[4] n_gid[4] n_muid[4] */
1242 if (wst->extension)
1243 ret += strlen(wst->extension);
1244 }
1245
1246 return ret;
1247}
1248
1254int p9_client_wstat(struct p9_fid *fid, struct p9_wstat *wst) 1249int p9_client_wstat(struct p9_fid *fid, struct p9_wstat *wst)
1255{ 1250{
1256 int err; 1251 int err;
1257 struct p9_req_t *req; 1252 struct p9_req_t *req;
1258 struct p9_client *clnt; 1253 struct p9_client *clnt;
1259 1254
1255 err = 0;
1256 clnt = fid->clnt;
1257 wst->size = p9_client_statsize(wst, clnt->dotu);
1260 P9_DPRINTK(P9_DEBUG_9P, ">>> TWSTAT fid %d\n", fid->fid); 1258 P9_DPRINTK(P9_DEBUG_9P, ">>> TWSTAT fid %d\n", fid->fid);
1261 P9_DPRINTK(P9_DEBUG_9P, 1259 P9_DPRINTK(P9_DEBUG_9P,
1262 " sz=%x type=%x dev=%x qid=%x.%llx.%x\n" 1260 " sz=%x type=%x dev=%x qid=%x.%llx.%x\n"
@@ -1268,10 +1266,8 @@ int p9_client_wstat(struct p9_fid *fid, struct p9_wstat *wst)
1268 wst->atime, wst->mtime, (unsigned long long)wst->length, 1266 wst->atime, wst->mtime, (unsigned long long)wst->length,
1269 wst->name, wst->uid, wst->gid, wst->muid, wst->extension, 1267 wst->name, wst->uid, wst->gid, wst->muid, wst->extension,
1270 wst->n_uid, wst->n_gid, wst->n_muid); 1268 wst->n_uid, wst->n_gid, wst->n_muid);
1271 err = 0;
1272 clnt = fid->clnt;
1273 1269
1274 req = p9_client_rpc(clnt, P9_TWSTAT, "dwS", fid->fid, 0, wst); 1270 req = p9_client_rpc(clnt, P9_TWSTAT, "dwS", fid->fid, wst->size, wst);
1275 if (IS_ERR(req)) { 1271 if (IS_ERR(req)) {
1276 err = PTR_ERR(req); 1272 err = PTR_ERR(req);
1277 goto error; 1273 goto error;
diff --git a/net/9p/trans_fd.c b/net/9p/trans_fd.c
index c613ed08a5ee..a2a1814c7a8d 100644
--- a/net/9p/trans_fd.c
+++ b/net/9p/trans_fd.c
@@ -213,8 +213,8 @@ static void p9_conn_cancel(struct p9_conn *m, int err)
213 spin_unlock_irqrestore(&m->client->lock, flags); 213 spin_unlock_irqrestore(&m->client->lock, flags);
214 214
215 list_for_each_entry_safe(req, rtmp, &cancel_list, req_list) { 215 list_for_each_entry_safe(req, rtmp, &cancel_list, req_list) {
216 list_del(&req->req_list);
217 P9_DPRINTK(P9_DEBUG_ERROR, "call back req %p\n", req); 216 P9_DPRINTK(P9_DEBUG_ERROR, "call back req %p\n", req);
217 list_del(&req->req_list);
218 p9_client_cb(m->client, req); 218 p9_client_cb(m->client, req);
219 } 219 }
220} 220}
@@ -336,7 +336,8 @@ static void p9_read_work(struct work_struct *work)
336 "mux %p pkt: size: %d bytes tag: %d\n", m, n, tag); 336 "mux %p pkt: size: %d bytes tag: %d\n", m, n, tag);
337 337
338 m->req = p9_tag_lookup(m->client, tag); 338 m->req = p9_tag_lookup(m->client, tag);
339 if (!m->req) { 339 if (!m->req || (m->req->status != REQ_STATUS_SENT &&
340 m->req->status != REQ_STATUS_FLSH)) {
340 P9_DPRINTK(P9_DEBUG_ERROR, "Unexpected packet tag %d\n", 341 P9_DPRINTK(P9_DEBUG_ERROR, "Unexpected packet tag %d\n",
341 tag); 342 tag);
342 err = -EIO; 343 err = -EIO;
@@ -361,10 +362,11 @@ static void p9_read_work(struct work_struct *work)
361 if ((m->req) && (m->rpos == m->rsize)) { /* packet is read in */ 362 if ((m->req) && (m->rpos == m->rsize)) { /* packet is read in */
362 P9_DPRINTK(P9_DEBUG_TRANS, "got new packet\n"); 363 P9_DPRINTK(P9_DEBUG_TRANS, "got new packet\n");
363 spin_lock(&m->client->lock); 364 spin_lock(&m->client->lock);
365 if (m->req->status != REQ_STATUS_ERROR)
366 m->req->status = REQ_STATUS_RCVD;
364 list_del(&m->req->req_list); 367 list_del(&m->req->req_list);
365 spin_unlock(&m->client->lock); 368 spin_unlock(&m->client->lock);
366 p9_client_cb(m->client, m->req); 369 p9_client_cb(m->client, m->req);
367
368 m->rbuf = NULL; 370 m->rbuf = NULL;
369 m->rpos = 0; 371 m->rpos = 0;
370 m->rsize = 0; 372 m->rsize = 0;
@@ -454,6 +456,7 @@ static void p9_write_work(struct work_struct *work)
454 req = list_entry(m->unsent_req_list.next, struct p9_req_t, 456 req = list_entry(m->unsent_req_list.next, struct p9_req_t,
455 req_list); 457 req_list);
456 req->status = REQ_STATUS_SENT; 458 req->status = REQ_STATUS_SENT;
459 P9_DPRINTK(P9_DEBUG_TRANS, "move req %p\n", req);
457 list_move_tail(&req->req_list, &m->req_list); 460 list_move_tail(&req->req_list, &m->req_list);
458 461
459 m->wbuf = req->tc->sdata; 462 m->wbuf = req->tc->sdata;
@@ -683,12 +686,13 @@ static int p9_fd_cancel(struct p9_client *client, struct p9_req_t *req)
683 P9_DPRINTK(P9_DEBUG_TRANS, "client %p req %p\n", client, req); 686 P9_DPRINTK(P9_DEBUG_TRANS, "client %p req %p\n", client, req);
684 687
685 spin_lock(&client->lock); 688 spin_lock(&client->lock);
686 list_del(&req->req_list);
687 689
688 if (req->status == REQ_STATUS_UNSENT) { 690 if (req->status == REQ_STATUS_UNSENT) {
691 list_del(&req->req_list);
689 req->status = REQ_STATUS_FLSHD; 692 req->status = REQ_STATUS_FLSHD;
690 ret = 0; 693 ret = 0;
691 } 694 } else if (req->status == REQ_STATUS_SENT)
695 req->status = REQ_STATUS_FLSH;
692 696
693 spin_unlock(&client->lock); 697 spin_unlock(&client->lock);
694 698
diff --git a/net/9p/trans_rdma.c b/net/9p/trans_rdma.c
index 7fa0eb20b2f6..ac4990041ebb 100644
--- a/net/9p/trans_rdma.c
+++ b/net/9p/trans_rdma.c
@@ -295,6 +295,7 @@ handle_recv(struct p9_client *client, struct p9_trans_rdma *rdma,
295 goto err_out; 295 goto err_out;
296 296
297 req->rc = c->rc; 297 req->rc = c->rc;
298 req->status = REQ_STATUS_RCVD;
298 p9_client_cb(client, req); 299 p9_client_cb(client, req);
299 300
300 return; 301 return;
diff --git a/net/9p/trans_virtio.c b/net/9p/trans_virtio.c
index 2d7781ec663b..bb8579a141a8 100644
--- a/net/9p/trans_virtio.c
+++ b/net/9p/trans_virtio.c
@@ -134,6 +134,7 @@ static void req_done(struct virtqueue *vq)
134 P9_DPRINTK(P9_DEBUG_TRANS, ": rc %p\n", rc); 134 P9_DPRINTK(P9_DEBUG_TRANS, ": rc %p\n", rc);
135 P9_DPRINTK(P9_DEBUG_TRANS, ": lookup tag %d\n", rc->tag); 135 P9_DPRINTK(P9_DEBUG_TRANS, ": lookup tag %d\n", rc->tag);
136 req = p9_tag_lookup(chan->client, rc->tag); 136 req = p9_tag_lookup(chan->client, rc->tag);
137 req->status = REQ_STATUS_RCVD;
137 p9_client_cb(chan->client, req); 138 p9_client_cb(chan->client, req);
138 } 139 }
139} 140}
diff --git a/net/atm/br2684.c b/net/atm/br2684.c
index 334fcd4a4ea4..3100a8940afc 100644
--- a/net/atm/br2684.c
+++ b/net/atm/br2684.c
@@ -549,6 +549,7 @@ static void br2684_setup(struct net_device *netdev)
549 struct br2684_dev *brdev = BRPRIV(netdev); 549 struct br2684_dev *brdev = BRPRIV(netdev);
550 550
551 ether_setup(netdev); 551 ether_setup(netdev);
552 brdev->net_dev = netdev;
552 553
553 netdev->netdev_ops = &br2684_netdev_ops; 554 netdev->netdev_ops = &br2684_netdev_ops;
554 555
diff --git a/net/ax25/ax25_uid.c b/net/ax25/ax25_uid.c
index 57aeba729bae..832bcf092a01 100644
--- a/net/ax25/ax25_uid.c
+++ b/net/ax25/ax25_uid.c
@@ -148,9 +148,13 @@ static void *ax25_uid_seq_start(struct seq_file *seq, loff_t *pos)
148{ 148{
149 struct ax25_uid_assoc *pt; 149 struct ax25_uid_assoc *pt;
150 struct hlist_node *node; 150 struct hlist_node *node;
151 int i = 0; 151 int i = 1;
152 152
153 read_lock(&ax25_uid_lock); 153 read_lock(&ax25_uid_lock);
154
155 if (*pos == 0)
156 return SEQ_START_TOKEN;
157
154 ax25_uid_for_each(pt, node, &ax25_uid_list) { 158 ax25_uid_for_each(pt, node, &ax25_uid_list) {
155 if (i == *pos) 159 if (i == *pos)
156 return pt; 160 return pt;
@@ -162,8 +166,10 @@ static void *ax25_uid_seq_start(struct seq_file *seq, loff_t *pos)
162static void *ax25_uid_seq_next(struct seq_file *seq, void *v, loff_t *pos) 166static void *ax25_uid_seq_next(struct seq_file *seq, void *v, loff_t *pos)
163{ 167{
164 ++*pos; 168 ++*pos;
165 169 if (v == SEQ_START_TOKEN)
166 return hlist_entry(((ax25_uid_assoc *)v)->uid_node.next, 170 return ax25_uid_list.first;
171 else
172 return hlist_entry(((ax25_uid_assoc *)v)->uid_node.next,
167 ax25_uid_assoc, uid_node); 173 ax25_uid_assoc, uid_node);
168} 174}
169 175
diff --git a/net/bluetooth/hci_conn.c b/net/bluetooth/hci_conn.c
index 1181db08d9de..61309b26f271 100644
--- a/net/bluetooth/hci_conn.c
+++ b/net/bluetooth/hci_conn.c
@@ -215,6 +215,7 @@ struct hci_conn *hci_conn_add(struct hci_dev *hdev, int type, bdaddr_t *dst)
215 conn->state = BT_OPEN; 215 conn->state = BT_OPEN;
216 216
217 conn->power_save = 1; 217 conn->power_save = 1;
218 conn->disc_timeout = HCI_DISCONN_TIMEOUT;
218 219
219 switch (type) { 220 switch (type) {
220 case ACL_LINK: 221 case ACL_LINK:
@@ -247,6 +248,8 @@ struct hci_conn *hci_conn_add(struct hci_dev *hdev, int type, bdaddr_t *dst)
247 if (hdev->notify) 248 if (hdev->notify)
248 hdev->notify(hdev, HCI_NOTIFY_CONN_ADD); 249 hdev->notify(hdev, HCI_NOTIFY_CONN_ADD);
249 250
251 hci_conn_init_sysfs(conn);
252
250 tasklet_enable(&hdev->tx_task); 253 tasklet_enable(&hdev->tx_task);
251 254
252 return conn; 255 return conn;
@@ -424,12 +427,9 @@ int hci_conn_security(struct hci_conn *conn, __u8 sec_level, __u8 auth_type)
424 if (sec_level == BT_SECURITY_SDP) 427 if (sec_level == BT_SECURITY_SDP)
425 return 1; 428 return 1;
426 429
427 if (sec_level == BT_SECURITY_LOW) { 430 if (sec_level == BT_SECURITY_LOW &&
428 if (conn->ssp_mode > 0 && conn->hdev->ssp_mode > 0) 431 (!conn->ssp_mode || !conn->hdev->ssp_mode))
429 return hci_conn_auth(conn, sec_level, auth_type); 432 return 1;
430 else
431 return 1;
432 }
433 433
434 if (conn->link_mode & HCI_LM_ENCRYPT) 434 if (conn->link_mode & HCI_LM_ENCRYPT)
435 return hci_conn_auth(conn, sec_level, auth_type); 435 return hci_conn_auth(conn, sec_level, auth_type);
diff --git a/net/bluetooth/hci_event.c b/net/bluetooth/hci_event.c
index 55534244c3a0..4e7cb88e5da9 100644
--- a/net/bluetooth/hci_event.c
+++ b/net/bluetooth/hci_event.c
@@ -866,8 +866,16 @@ static inline void hci_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *s
866 hci_dev_lock(hdev); 866 hci_dev_lock(hdev);
867 867
868 conn = hci_conn_hash_lookup_ba(hdev, ev->link_type, &ev->bdaddr); 868 conn = hci_conn_hash_lookup_ba(hdev, ev->link_type, &ev->bdaddr);
869 if (!conn) 869 if (!conn) {
870 goto unlock; 870 if (ev->link_type != SCO_LINK)
871 goto unlock;
872
873 conn = hci_conn_hash_lookup_ba(hdev, ESCO_LINK, &ev->bdaddr);
874 if (!conn)
875 goto unlock;
876
877 conn->type = SCO_LINK;
878 }
871 879
872 if (!ev->status) { 880 if (!ev->status) {
873 conn->handle = __le16_to_cpu(ev->handle); 881 conn->handle = __le16_to_cpu(ev->handle);
@@ -875,6 +883,7 @@ static inline void hci_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *s
875 if (conn->type == ACL_LINK) { 883 if (conn->type == ACL_LINK) {
876 conn->state = BT_CONFIG; 884 conn->state = BT_CONFIG;
877 hci_conn_hold(conn); 885 hci_conn_hold(conn);
886 conn->disc_timeout = HCI_DISCONN_TIMEOUT;
878 } else 887 } else
879 conn->state = BT_CONNECTED; 888 conn->state = BT_CONNECTED;
880 889
@@ -1055,9 +1064,14 @@ static inline void hci_auth_complete_evt(struct hci_dev *hdev, struct sk_buff *s
1055 hci_proto_connect_cfm(conn, ev->status); 1064 hci_proto_connect_cfm(conn, ev->status);
1056 hci_conn_put(conn); 1065 hci_conn_put(conn);
1057 } 1066 }
1058 } else 1067 } else {
1059 hci_auth_cfm(conn, ev->status); 1068 hci_auth_cfm(conn, ev->status);
1060 1069
1070 hci_conn_hold(conn);
1071 conn->disc_timeout = HCI_DISCONN_TIMEOUT;
1072 hci_conn_put(conn);
1073 }
1074
1061 if (test_bit(HCI_CONN_ENCRYPT_PEND, &conn->pend)) { 1075 if (test_bit(HCI_CONN_ENCRYPT_PEND, &conn->pend)) {
1062 if (!ev->status) { 1076 if (!ev->status) {
1063 struct hci_cp_set_conn_encrypt cp; 1077 struct hci_cp_set_conn_encrypt cp;
@@ -1471,7 +1485,21 @@ static inline void hci_mode_change_evt(struct hci_dev *hdev, struct sk_buff *skb
1471 1485
1472static inline void hci_pin_code_request_evt(struct hci_dev *hdev, struct sk_buff *skb) 1486static inline void hci_pin_code_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
1473{ 1487{
1488 struct hci_ev_pin_code_req *ev = (void *) skb->data;
1489 struct hci_conn *conn;
1490
1474 BT_DBG("%s", hdev->name); 1491 BT_DBG("%s", hdev->name);
1492
1493 hci_dev_lock(hdev);
1494
1495 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
1496 if (conn) {
1497 hci_conn_hold(conn);
1498 conn->disc_timeout = HCI_PAIRING_TIMEOUT;
1499 hci_conn_put(conn);
1500 }
1501
1502 hci_dev_unlock(hdev);
1475} 1503}
1476 1504
1477static inline void hci_link_key_request_evt(struct hci_dev *hdev, struct sk_buff *skb) 1505static inline void hci_link_key_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
@@ -1481,7 +1509,21 @@ static inline void hci_link_key_request_evt(struct hci_dev *hdev, struct sk_buff
1481 1509
1482static inline void hci_link_key_notify_evt(struct hci_dev *hdev, struct sk_buff *skb) 1510static inline void hci_link_key_notify_evt(struct hci_dev *hdev, struct sk_buff *skb)
1483{ 1511{
1512 struct hci_ev_link_key_notify *ev = (void *) skb->data;
1513 struct hci_conn *conn;
1514
1484 BT_DBG("%s", hdev->name); 1515 BT_DBG("%s", hdev->name);
1516
1517 hci_dev_lock(hdev);
1518
1519 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
1520 if (conn) {
1521 hci_conn_hold(conn);
1522 conn->disc_timeout = HCI_DISCONN_TIMEOUT;
1523 hci_conn_put(conn);
1524 }
1525
1526 hci_dev_unlock(hdev);
1485} 1527}
1486 1528
1487static inline void hci_clock_offset_evt(struct hci_dev *hdev, struct sk_buff *skb) 1529static inline void hci_clock_offset_evt(struct hci_dev *hdev, struct sk_buff *skb)
@@ -1646,20 +1688,28 @@ static inline void hci_sync_conn_complete_evt(struct hci_dev *hdev, struct sk_bu
1646 conn->type = SCO_LINK; 1688 conn->type = SCO_LINK;
1647 } 1689 }
1648 1690
1649 if (conn->out && ev->status == 0x1c && conn->attempt < 2) { 1691 switch (ev->status) {
1650 conn->pkt_type = (hdev->esco_type & SCO_ESCO_MASK) | 1692 case 0x00:
1651 (hdev->esco_type & EDR_ESCO_MASK);
1652 hci_setup_sync(conn, conn->link->handle);
1653 goto unlock;
1654 }
1655
1656 if (!ev->status) {
1657 conn->handle = __le16_to_cpu(ev->handle); 1693 conn->handle = __le16_to_cpu(ev->handle);
1658 conn->state = BT_CONNECTED; 1694 conn->state = BT_CONNECTED;
1659 1695
1660 hci_conn_add_sysfs(conn); 1696 hci_conn_add_sysfs(conn);
1661 } else 1697 break;
1698
1699 case 0x1c: /* SCO interval rejected */
1700 case 0x1f: /* Unspecified error */
1701 if (conn->out && conn->attempt < 2) {
1702 conn->pkt_type = (hdev->esco_type & SCO_ESCO_MASK) |
1703 (hdev->esco_type & EDR_ESCO_MASK);
1704 hci_setup_sync(conn, conn->link->handle);
1705 goto unlock;
1706 }
1707 /* fall through */
1708
1709 default:
1662 conn->state = BT_CLOSED; 1710 conn->state = BT_CLOSED;
1711 break;
1712 }
1663 1713
1664 hci_proto_connect_cfm(conn, ev->status); 1714 hci_proto_connect_cfm(conn, ev->status);
1665 if (ev->status) 1715 if (ev->status)
diff --git a/net/bluetooth/hci_sysfs.c b/net/bluetooth/hci_sysfs.c
index ed82796d4a0f..582d8877078c 100644
--- a/net/bluetooth/hci_sysfs.c
+++ b/net/bluetooth/hci_sysfs.c
@@ -9,8 +9,7 @@
9struct class *bt_class = NULL; 9struct class *bt_class = NULL;
10EXPORT_SYMBOL_GPL(bt_class); 10EXPORT_SYMBOL_GPL(bt_class);
11 11
12static struct workqueue_struct *btaddconn; 12static struct workqueue_struct *bt_workq;
13static struct workqueue_struct *btdelconn;
14 13
15static inline char *link_typetostr(int type) 14static inline char *link_typetostr(int type)
16{ 15{
@@ -88,9 +87,10 @@ static struct device_type bt_link = {
88 87
89static void add_conn(struct work_struct *work) 88static void add_conn(struct work_struct *work)
90{ 89{
91 struct hci_conn *conn = container_of(work, struct hci_conn, work); 90 struct hci_conn *conn = container_of(work, struct hci_conn, work_add);
92 91
93 flush_workqueue(btdelconn); 92 /* ensure previous del is complete */
93 flush_work(&conn->work_del);
94 94
95 if (device_add(&conn->dev) < 0) { 95 if (device_add(&conn->dev) < 0) {
96 BT_ERR("Failed to register connection device"); 96 BT_ERR("Failed to register connection device");
@@ -98,27 +98,6 @@ static void add_conn(struct work_struct *work)
98 } 98 }
99} 99}
100 100
101void hci_conn_add_sysfs(struct hci_conn *conn)
102{
103 struct hci_dev *hdev = conn->hdev;
104
105 BT_DBG("conn %p", conn);
106
107 conn->dev.type = &bt_link;
108 conn->dev.class = bt_class;
109 conn->dev.parent = &hdev->dev;
110
111 dev_set_name(&conn->dev, "%s:%d", hdev->name, conn->handle);
112
113 dev_set_drvdata(&conn->dev, conn);
114
115 device_initialize(&conn->dev);
116
117 INIT_WORK(&conn->work, add_conn);
118
119 queue_work(btaddconn, &conn->work);
120}
121
122/* 101/*
123 * The rfcomm tty device will possibly retain even when conn 102 * The rfcomm tty device will possibly retain even when conn
124 * is down, and sysfs doesn't support move zombie device, 103 * is down, and sysfs doesn't support move zombie device,
@@ -131,9 +110,15 @@ static int __match_tty(struct device *dev, void *data)
131 110
132static void del_conn(struct work_struct *work) 111static void del_conn(struct work_struct *work)
133{ 112{
134 struct hci_conn *conn = container_of(work, struct hci_conn, work); 113 struct hci_conn *conn = container_of(work, struct hci_conn, work_del);
135 struct hci_dev *hdev = conn->hdev; 114 struct hci_dev *hdev = conn->hdev;
136 115
116 /* ensure previous add is complete */
117 flush_work(&conn->work_add);
118
119 if (!device_is_registered(&conn->dev))
120 return;
121
137 while (1) { 122 while (1) {
138 struct device *dev; 123 struct device *dev;
139 124
@@ -149,16 +134,40 @@ static void del_conn(struct work_struct *work)
149 hci_dev_put(hdev); 134 hci_dev_put(hdev);
150} 135}
151 136
152void hci_conn_del_sysfs(struct hci_conn *conn) 137void hci_conn_init_sysfs(struct hci_conn *conn)
153{ 138{
139 struct hci_dev *hdev = conn->hdev;
140
154 BT_DBG("conn %p", conn); 141 BT_DBG("conn %p", conn);
155 142
156 if (!device_is_registered(&conn->dev)) 143 conn->dev.type = &bt_link;
157 return; 144 conn->dev.class = bt_class;
145 conn->dev.parent = &hdev->dev;
146
147 dev_set_drvdata(&conn->dev, conn);
148
149 device_initialize(&conn->dev);
150
151 INIT_WORK(&conn->work_add, add_conn);
152 INIT_WORK(&conn->work_del, del_conn);
153}
154
155void hci_conn_add_sysfs(struct hci_conn *conn)
156{
157 struct hci_dev *hdev = conn->hdev;
158
159 BT_DBG("conn %p", conn);
158 160
159 INIT_WORK(&conn->work, del_conn); 161 dev_set_name(&conn->dev, "%s:%d", hdev->name, conn->handle);
160 162
161 queue_work(btdelconn, &conn->work); 163 queue_work(bt_workq, &conn->work_add);
164}
165
166void hci_conn_del_sysfs(struct hci_conn *conn)
167{
168 BT_DBG("conn %p", conn);
169
170 queue_work(bt_workq, &conn->work_del);
162} 171}
163 172
164static inline char *host_typetostr(int type) 173static inline char *host_typetostr(int type)
@@ -435,20 +444,13 @@ void hci_unregister_sysfs(struct hci_dev *hdev)
435 444
436int __init bt_sysfs_init(void) 445int __init bt_sysfs_init(void)
437{ 446{
438 btaddconn = create_singlethread_workqueue("btaddconn"); 447 bt_workq = create_singlethread_workqueue("bluetooth");
439 if (!btaddconn) 448 if (!bt_workq)
440 return -ENOMEM; 449 return -ENOMEM;
441 450
442 btdelconn = create_singlethread_workqueue("btdelconn");
443 if (!btdelconn) {
444 destroy_workqueue(btaddconn);
445 return -ENOMEM;
446 }
447
448 bt_class = class_create(THIS_MODULE, "bluetooth"); 451 bt_class = class_create(THIS_MODULE, "bluetooth");
449 if (IS_ERR(bt_class)) { 452 if (IS_ERR(bt_class)) {
450 destroy_workqueue(btdelconn); 453 destroy_workqueue(bt_workq);
451 destroy_workqueue(btaddconn);
452 return PTR_ERR(bt_class); 454 return PTR_ERR(bt_class);
453 } 455 }
454 456
@@ -457,8 +459,7 @@ int __init bt_sysfs_init(void)
457 459
458void bt_sysfs_cleanup(void) 460void bt_sysfs_cleanup(void)
459{ 461{
460 destroy_workqueue(btaddconn); 462 destroy_workqueue(bt_workq);
461 destroy_workqueue(btdelconn);
462 463
463 class_destroy(bt_class); 464 class_destroy(bt_class);
464} 465}
diff --git a/net/bluetooth/rfcomm/core.c b/net/bluetooth/rfcomm/core.c
index 1d0fb0f23c63..374536e050aa 100644
--- a/net/bluetooth/rfcomm/core.c
+++ b/net/bluetooth/rfcomm/core.c
@@ -1194,6 +1194,8 @@ void rfcomm_dlc_accept(struct rfcomm_dlc *d)
1194 1194
1195 rfcomm_send_ua(d->session, d->dlci); 1195 rfcomm_send_ua(d->session, d->dlci);
1196 1196
1197 rfcomm_dlc_clear_timer(d);
1198
1197 rfcomm_dlc_lock(d); 1199 rfcomm_dlc_lock(d);
1198 d->state = BT_CONNECTED; 1200 d->state = BT_CONNECTED;
1199 d->state_change(d, 0); 1201 d->state_change(d, 0);
diff --git a/net/bridge/br_netfilter.c b/net/bridge/br_netfilter.c
index 3953ac4214c8..e4a418fcb35b 100644
--- a/net/bridge/br_netfilter.c
+++ b/net/bridge/br_netfilter.c
@@ -788,15 +788,23 @@ static unsigned int br_nf_local_out(unsigned int hook, struct sk_buff *skb,
788 return NF_STOLEN; 788 return NF_STOLEN;
789} 789}
790 790
791#if defined(CONFIG_NF_CONNTRACK_IPV4) || defined(CONFIG_NF_CONNTRACK_IPV4_MODULE)
791static int br_nf_dev_queue_xmit(struct sk_buff *skb) 792static int br_nf_dev_queue_xmit(struct sk_buff *skb)
792{ 793{
793 if (skb->protocol == htons(ETH_P_IP) && 794 if (skb->nfct != NULL &&
795 (skb->protocol == htons(ETH_P_IP) || IS_VLAN_IP(skb)) &&
794 skb->len > skb->dev->mtu && 796 skb->len > skb->dev->mtu &&
795 !skb_is_gso(skb)) 797 !skb_is_gso(skb))
796 return ip_fragment(skb, br_dev_queue_push_xmit); 798 return ip_fragment(skb, br_dev_queue_push_xmit);
797 else 799 else
798 return br_dev_queue_push_xmit(skb); 800 return br_dev_queue_push_xmit(skb);
799} 801}
802#else
803static int br_nf_dev_queue_xmit(struct sk_buff *skb)
804{
805 return br_dev_queue_push_xmit(skb);
806}
807#endif
800 808
801/* PF_BRIDGE/POST_ROUTING ********************************************/ 809/* PF_BRIDGE/POST_ROUTING ********************************************/
802static unsigned int br_nf_post_routing(unsigned int hook, struct sk_buff *skb, 810static unsigned int br_nf_post_routing(unsigned int hook, struct sk_buff *skb,
diff --git a/net/can/af_can.c b/net/can/af_can.c
index 547bafc79e28..10f0528c3bf5 100644
--- a/net/can/af_can.c
+++ b/net/can/af_can.c
@@ -674,8 +674,8 @@ static int can_rcv(struct sk_buff *skb, struct net_device *dev,
674 674
675 rcu_read_unlock(); 675 rcu_read_unlock();
676 676
677 /* free the skbuff allocated by the netdevice driver */ 677 /* consume the skbuff allocated by the netdevice driver */
678 kfree_skb(skb); 678 consume_skb(skb);
679 679
680 if (matches > 0) { 680 if (matches > 0) {
681 can_stats.matches++; 681 can_stats.matches++;
diff --git a/net/core/datagram.c b/net/core/datagram.c
index d0de644b378d..b01a76abe1d2 100644
--- a/net/core/datagram.c
+++ b/net/core/datagram.c
@@ -64,13 +64,25 @@ static inline int connection_based(struct sock *sk)
64 return sk->sk_type == SOCK_SEQPACKET || sk->sk_type == SOCK_STREAM; 64 return sk->sk_type == SOCK_SEQPACKET || sk->sk_type == SOCK_STREAM;
65} 65}
66 66
67static int receiver_wake_function(wait_queue_t *wait, unsigned mode, int sync,
68 void *key)
69{
70 unsigned long bits = (unsigned long)key;
71
72 /*
73 * Avoid a wakeup if event not interesting for us
74 */
75 if (bits && !(bits & (POLLIN | POLLERR)))
76 return 0;
77 return autoremove_wake_function(wait, mode, sync, key);
78}
67/* 79/*
68 * Wait for a packet.. 80 * Wait for a packet..
69 */ 81 */
70static int wait_for_packet(struct sock *sk, int *err, long *timeo_p) 82static int wait_for_packet(struct sock *sk, int *err, long *timeo_p)
71{ 83{
72 int error; 84 int error;
73 DEFINE_WAIT(wait); 85 DEFINE_WAIT_FUNC(wait, receiver_wake_function);
74 86
75 prepare_to_wait_exclusive(sk->sk_sleep, &wait, TASK_INTERRUPTIBLE); 87 prepare_to_wait_exclusive(sk->sk_sleep, &wait, TASK_INTERRUPTIBLE);
76 88
diff --git a/net/core/dev.c b/net/core/dev.c
index 91d792d17e09..e2e9e4af3ace 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -1336,7 +1336,12 @@ static void dev_queue_xmit_nit(struct sk_buff *skb, struct net_device *dev)
1336{ 1336{
1337 struct packet_type *ptype; 1337 struct packet_type *ptype;
1338 1338
1339#ifdef CONFIG_NET_CLS_ACT
1340 if (!(skb->tstamp.tv64 && (G_TC_FROM(skb->tc_verd) & AT_INGRESS)))
1341 net_timestamp(skb);
1342#else
1339 net_timestamp(skb); 1343 net_timestamp(skb);
1344#endif
1340 1345
1341 rcu_read_lock(); 1346 rcu_read_lock();
1342 list_for_each_entry_rcu(ptype, &ptype_all, list) { 1347 list_for_each_entry_rcu(ptype, &ptype_all, list) {
@@ -1430,7 +1435,7 @@ void netif_device_detach(struct net_device *dev)
1430{ 1435{
1431 if (test_and_clear_bit(__LINK_STATE_PRESENT, &dev->state) && 1436 if (test_and_clear_bit(__LINK_STATE_PRESENT, &dev->state) &&
1432 netif_running(dev)) { 1437 netif_running(dev)) {
1433 netif_stop_queue(dev); 1438 netif_tx_stop_all_queues(dev);
1434 } 1439 }
1435} 1440}
1436EXPORT_SYMBOL(netif_device_detach); 1441EXPORT_SYMBOL(netif_device_detach);
@@ -1445,7 +1450,7 @@ void netif_device_attach(struct net_device *dev)
1445{ 1450{
1446 if (!test_and_set_bit(__LINK_STATE_PRESENT, &dev->state) && 1451 if (!test_and_set_bit(__LINK_STATE_PRESENT, &dev->state) &&
1447 netif_running(dev)) { 1452 netif_running(dev)) {
1448 netif_wake_queue(dev); 1453 netif_tx_wake_all_queues(dev);
1449 __netdev_watchdog_up(dev); 1454 __netdev_watchdog_up(dev);
1450 } 1455 }
1451} 1456}
@@ -1730,11 +1735,12 @@ u16 skb_tx_hash(const struct net_device *dev, const struct sk_buff *skb)
1730{ 1735{
1731 u32 hash; 1736 u32 hash;
1732 1737
1733 if (skb_rx_queue_recorded(skb)) { 1738 if (skb_rx_queue_recorded(skb))
1734 hash = skb_get_rx_queue(skb); 1739 return skb_get_rx_queue(skb) % dev->real_num_tx_queues;
1735 } else if (skb->sk && skb->sk->sk_hash) { 1740
1741 if (skb->sk && skb->sk->sk_hash)
1736 hash = skb->sk->sk_hash; 1742 hash = skb->sk->sk_hash;
1737 } else 1743 else
1738 hash = skb->protocol; 1744 hash = skb->protocol;
1739 1745
1740 hash = jhash_1word(hash, skb_tx_hashrnd); 1746 hash = jhash_1word(hash, skb_tx_hashrnd);
@@ -2328,8 +2334,10 @@ static int napi_gro_complete(struct sk_buff *skb)
2328 struct list_head *head = &ptype_base[ntohs(type) & PTYPE_HASH_MASK]; 2334 struct list_head *head = &ptype_base[ntohs(type) & PTYPE_HASH_MASK];
2329 int err = -ENOENT; 2335 int err = -ENOENT;
2330 2336
2331 if (NAPI_GRO_CB(skb)->count == 1) 2337 if (NAPI_GRO_CB(skb)->count == 1) {
2338 skb_shinfo(skb)->gso_size = 0;
2332 goto out; 2339 goto out;
2340 }
2333 2341
2334 rcu_read_lock(); 2342 rcu_read_lock();
2335 list_for_each_entry_rcu(ptype, head, list) { 2343 list_for_each_entry_rcu(ptype, head, list) {
@@ -2348,7 +2356,6 @@ static int napi_gro_complete(struct sk_buff *skb)
2348 } 2356 }
2349 2357
2350out: 2358out:
2351 skb_shinfo(skb)->gso_size = 0;
2352 return netif_receive_skb(skb); 2359 return netif_receive_skb(skb);
2353} 2360}
2354 2361
@@ -2539,9 +2546,9 @@ struct sk_buff *napi_fraginfo_skb(struct napi_struct *napi,
2539 } 2546 }
2540 2547
2541 BUG_ON(info->nr_frags > MAX_SKB_FRAGS); 2548 BUG_ON(info->nr_frags > MAX_SKB_FRAGS);
2542 frag = &info->frags[info->nr_frags - 1]; 2549 frag = info->frags;
2543 2550
2544 for (i = skb_shinfo(skb)->nr_frags; i < info->nr_frags; i++) { 2551 for (i = 0; i < info->nr_frags; i++) {
2545 skb_fill_page_desc(skb, i, frag->page, frag->page_offset, 2552 skb_fill_page_desc(skb, i, frag->page, frag->page_offset,
2546 frag->size); 2553 frag->size);
2547 frag++; 2554 frag++;
@@ -4399,7 +4406,7 @@ int register_netdevice(struct net_device *dev)
4399 dev->iflink = -1; 4406 dev->iflink = -1;
4400 4407
4401#ifdef CONFIG_COMPAT_NET_DEV_OPS 4408#ifdef CONFIG_COMPAT_NET_DEV_OPS
4402 /* Netdevice_ops API compatiability support. 4409 /* Netdevice_ops API compatibility support.
4403 * This is temporary until all network devices are converted. 4410 * This is temporary until all network devices are converted.
4404 */ 4411 */
4405 if (dev->netdev_ops) { 4412 if (dev->netdev_ops) {
@@ -4410,7 +4417,7 @@ int register_netdevice(struct net_device *dev)
4410 dev->name, netdev_drivername(dev, drivername, 64)); 4417 dev->name, netdev_drivername(dev, drivername, 64));
4411 4418
4412 /* This works only because net_device_ops and the 4419 /* This works only because net_device_ops and the
4413 compatiablity structure are the same. */ 4420 compatibility structure are the same. */
4414 dev->netdev_ops = (void *) &(dev->init); 4421 dev->netdev_ops = (void *) &(dev->init);
4415 } 4422 }
4416#endif 4423#endif
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index 12806b844456..f8bcc06ae8a0 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -1365,9 +1365,8 @@ static void sock_spd_release(struct splice_pipe_desc *spd, unsigned int i)
1365 1365
1366static inline struct page *linear_to_page(struct page *page, unsigned int *len, 1366static inline struct page *linear_to_page(struct page *page, unsigned int *len,
1367 unsigned int *offset, 1367 unsigned int *offset,
1368 struct sk_buff *skb) 1368 struct sk_buff *skb, struct sock *sk)
1369{ 1369{
1370 struct sock *sk = skb->sk;
1371 struct page *p = sk->sk_sndmsg_page; 1370 struct page *p = sk->sk_sndmsg_page;
1372 unsigned int off; 1371 unsigned int off;
1373 1372
@@ -1405,13 +1404,14 @@ new_page:
1405 */ 1404 */
1406static inline int spd_fill_page(struct splice_pipe_desc *spd, struct page *page, 1405static inline int spd_fill_page(struct splice_pipe_desc *spd, struct page *page,
1407 unsigned int *len, unsigned int offset, 1406 unsigned int *len, unsigned int offset,
1408 struct sk_buff *skb, int linear) 1407 struct sk_buff *skb, int linear,
1408 struct sock *sk)
1409{ 1409{
1410 if (unlikely(spd->nr_pages == PIPE_BUFFERS)) 1410 if (unlikely(spd->nr_pages == PIPE_BUFFERS))
1411 return 1; 1411 return 1;
1412 1412
1413 if (linear) { 1413 if (linear) {
1414 page = linear_to_page(page, len, &offset, skb); 1414 page = linear_to_page(page, len, &offset, skb, sk);
1415 if (!page) 1415 if (!page)
1416 return 1; 1416 return 1;
1417 } else 1417 } else
@@ -1442,7 +1442,8 @@ static inline void __segment_seek(struct page **page, unsigned int *poff,
1442static inline int __splice_segment(struct page *page, unsigned int poff, 1442static inline int __splice_segment(struct page *page, unsigned int poff,
1443 unsigned int plen, unsigned int *off, 1443 unsigned int plen, unsigned int *off,
1444 unsigned int *len, struct sk_buff *skb, 1444 unsigned int *len, struct sk_buff *skb,
1445 struct splice_pipe_desc *spd, int linear) 1445 struct splice_pipe_desc *spd, int linear,
1446 struct sock *sk)
1446{ 1447{
1447 if (!*len) 1448 if (!*len)
1448 return 1; 1449 return 1;
@@ -1465,7 +1466,7 @@ static inline int __splice_segment(struct page *page, unsigned int poff,
1465 /* the linear region may spread across several pages */ 1466 /* the linear region may spread across several pages */
1466 flen = min_t(unsigned int, flen, PAGE_SIZE - poff); 1467 flen = min_t(unsigned int, flen, PAGE_SIZE - poff);
1467 1468
1468 if (spd_fill_page(spd, page, &flen, poff, skb, linear)) 1469 if (spd_fill_page(spd, page, &flen, poff, skb, linear, sk))
1469 return 1; 1470 return 1;
1470 1471
1471 __segment_seek(&page, &poff, &plen, flen); 1472 __segment_seek(&page, &poff, &plen, flen);
@@ -1481,8 +1482,8 @@ static inline int __splice_segment(struct page *page, unsigned int poff,
1481 * pipe is full or if we already spliced the requested length. 1482 * pipe is full or if we already spliced the requested length.
1482 */ 1483 */
1483static int __skb_splice_bits(struct sk_buff *skb, unsigned int *offset, 1484static int __skb_splice_bits(struct sk_buff *skb, unsigned int *offset,
1484 unsigned int *len, 1485 unsigned int *len, struct splice_pipe_desc *spd,
1485 struct splice_pipe_desc *spd) 1486 struct sock *sk)
1486{ 1487{
1487 int seg; 1488 int seg;
1488 1489
@@ -1492,7 +1493,7 @@ static int __skb_splice_bits(struct sk_buff *skb, unsigned int *offset,
1492 if (__splice_segment(virt_to_page(skb->data), 1493 if (__splice_segment(virt_to_page(skb->data),
1493 (unsigned long) skb->data & (PAGE_SIZE - 1), 1494 (unsigned long) skb->data & (PAGE_SIZE - 1),
1494 skb_headlen(skb), 1495 skb_headlen(skb),
1495 offset, len, skb, spd, 1)) 1496 offset, len, skb, spd, 1, sk))
1496 return 1; 1497 return 1;
1497 1498
1498 /* 1499 /*
@@ -1502,7 +1503,7 @@ static int __skb_splice_bits(struct sk_buff *skb, unsigned int *offset,
1502 const skb_frag_t *f = &skb_shinfo(skb)->frags[seg]; 1503 const skb_frag_t *f = &skb_shinfo(skb)->frags[seg];
1503 1504
1504 if (__splice_segment(f->page, f->page_offset, f->size, 1505 if (__splice_segment(f->page, f->page_offset, f->size,
1505 offset, len, skb, spd, 0)) 1506 offset, len, skb, spd, 0, sk))
1506 return 1; 1507 return 1;
1507 } 1508 }
1508 1509
@@ -1528,12 +1529,13 @@ int skb_splice_bits(struct sk_buff *skb, unsigned int offset,
1528 .ops = &sock_pipe_buf_ops, 1529 .ops = &sock_pipe_buf_ops,
1529 .spd_release = sock_spd_release, 1530 .spd_release = sock_spd_release,
1530 }; 1531 };
1532 struct sock *sk = skb->sk;
1531 1533
1532 /* 1534 /*
1533 * __skb_splice_bits() only fails if the output has no room left, 1535 * __skb_splice_bits() only fails if the output has no room left,
1534 * so no point in going over the frag_list for the error case. 1536 * so no point in going over the frag_list for the error case.
1535 */ 1537 */
1536 if (__skb_splice_bits(skb, &offset, &tlen, &spd)) 1538 if (__skb_splice_bits(skb, &offset, &tlen, &spd, sk))
1537 goto done; 1539 goto done;
1538 else if (!tlen) 1540 else if (!tlen)
1539 goto done; 1541 goto done;
@@ -1545,14 +1547,13 @@ int skb_splice_bits(struct sk_buff *skb, unsigned int offset,
1545 struct sk_buff *list = skb_shinfo(skb)->frag_list; 1547 struct sk_buff *list = skb_shinfo(skb)->frag_list;
1546 1548
1547 for (; list && tlen; list = list->next) { 1549 for (; list && tlen; list = list->next) {
1548 if (__skb_splice_bits(list, &offset, &tlen, &spd)) 1550 if (__skb_splice_bits(list, &offset, &tlen, &spd, sk))
1549 break; 1551 break;
1550 } 1552 }
1551 } 1553 }
1552 1554
1553done: 1555done:
1554 if (spd.nr_pages) { 1556 if (spd.nr_pages) {
1555 struct sock *sk = skb->sk;
1556 int ret; 1557 int ret;
1557 1558
1558 /* 1559 /*
diff --git a/net/ipv4/netfilter/arp_tables.c b/net/ipv4/netfilter/arp_tables.c
index 5ba533d234db..831fe1879dc0 100644
--- a/net/ipv4/netfilter/arp_tables.c
+++ b/net/ipv4/netfilter/arp_tables.c
@@ -253,9 +253,9 @@ unsigned int arpt_do_table(struct sk_buff *skb,
253 indev = in ? in->name : nulldevname; 253 indev = in ? in->name : nulldevname;
254 outdev = out ? out->name : nulldevname; 254 outdev = out ? out->name : nulldevname;
255 255
256 rcu_read_lock_bh(); 256 xt_info_rdlock_bh();
257 private = rcu_dereference(table->private); 257 private = table->private;
258 table_base = rcu_dereference(private->entries[smp_processor_id()]); 258 table_base = private->entries[smp_processor_id()];
259 259
260 e = get_entry(table_base, private->hook_entry[hook]); 260 e = get_entry(table_base, private->hook_entry[hook]);
261 back = get_entry(table_base, private->underflow[hook]); 261 back = get_entry(table_base, private->underflow[hook]);
@@ -273,6 +273,7 @@ unsigned int arpt_do_table(struct sk_buff *skb,
273 273
274 hdr_len = sizeof(*arp) + (2 * sizeof(struct in_addr)) + 274 hdr_len = sizeof(*arp) + (2 * sizeof(struct in_addr)) +
275 (2 * skb->dev->addr_len); 275 (2 * skb->dev->addr_len);
276
276 ADD_COUNTER(e->counters, hdr_len, 1); 277 ADD_COUNTER(e->counters, hdr_len, 1);
277 278
278 t = arpt_get_target(e); 279 t = arpt_get_target(e);
@@ -328,8 +329,7 @@ unsigned int arpt_do_table(struct sk_buff *skb,
328 e = (void *)e + e->next_offset; 329 e = (void *)e + e->next_offset;
329 } 330 }
330 } while (!hotdrop); 331 } while (!hotdrop);
331 332 xt_info_rdunlock_bh();
332 rcu_read_unlock_bh();
333 333
334 if (hotdrop) 334 if (hotdrop)
335 return NF_DROP; 335 return NF_DROP;
@@ -711,9 +711,12 @@ static void get_counters(const struct xt_table_info *t,
711 /* Instead of clearing (by a previous call to memset()) 711 /* Instead of clearing (by a previous call to memset())
712 * the counters and using adds, we set the counters 712 * the counters and using adds, we set the counters
713 * with data used by 'current' CPU 713 * with data used by 'current' CPU
714 * We dont care about preemption here. 714 *
715 * Bottom half has to be disabled to prevent deadlock
716 * if new softirq were to run and call ipt_do_table
715 */ 717 */
716 curcpu = raw_smp_processor_id(); 718 local_bh_disable();
719 curcpu = smp_processor_id();
717 720
718 i = 0; 721 i = 0;
719 ARPT_ENTRY_ITERATE(t->entries[curcpu], 722 ARPT_ENTRY_ITERATE(t->entries[curcpu],
@@ -726,73 +729,22 @@ static void get_counters(const struct xt_table_info *t,
726 if (cpu == curcpu) 729 if (cpu == curcpu)
727 continue; 730 continue;
728 i = 0; 731 i = 0;
732 xt_info_wrlock(cpu);
729 ARPT_ENTRY_ITERATE(t->entries[cpu], 733 ARPT_ENTRY_ITERATE(t->entries[cpu],
730 t->size, 734 t->size,
731 add_entry_to_counter, 735 add_entry_to_counter,
732 counters, 736 counters,
733 &i); 737 &i);
738 xt_info_wrunlock(cpu);
734 } 739 }
735}
736
737
738/* We're lazy, and add to the first CPU; overflow works its fey magic
739 * and everything is OK. */
740static int
741add_counter_to_entry(struct arpt_entry *e,
742 const struct xt_counters addme[],
743 unsigned int *i)
744{
745 ADD_COUNTER(e->counters, addme[*i].bcnt, addme[*i].pcnt);
746
747 (*i)++;
748 return 0;
749}
750
751/* Take values from counters and add them back onto the current cpu */
752static void put_counters(struct xt_table_info *t,
753 const struct xt_counters counters[])
754{
755 unsigned int i, cpu;
756
757 local_bh_disable();
758 cpu = smp_processor_id();
759 i = 0;
760 ARPT_ENTRY_ITERATE(t->entries[cpu],
761 t->size,
762 add_counter_to_entry,
763 counters,
764 &i);
765 local_bh_enable(); 740 local_bh_enable();
766} 741}
767 742
768static inline int
769zero_entry_counter(struct arpt_entry *e, void *arg)
770{
771 e->counters.bcnt = 0;
772 e->counters.pcnt = 0;
773 return 0;
774}
775
776static void
777clone_counters(struct xt_table_info *newinfo, const struct xt_table_info *info)
778{
779 unsigned int cpu;
780 const void *loc_cpu_entry = info->entries[raw_smp_processor_id()];
781
782 memcpy(newinfo, info, offsetof(struct xt_table_info, entries));
783 for_each_possible_cpu(cpu) {
784 memcpy(newinfo->entries[cpu], loc_cpu_entry, info->size);
785 ARPT_ENTRY_ITERATE(newinfo->entries[cpu], newinfo->size,
786 zero_entry_counter, NULL);
787 }
788}
789
790static struct xt_counters *alloc_counters(struct xt_table *table) 743static struct xt_counters *alloc_counters(struct xt_table *table)
791{ 744{
792 unsigned int countersize; 745 unsigned int countersize;
793 struct xt_counters *counters; 746 struct xt_counters *counters;
794 struct xt_table_info *private = table->private; 747 struct xt_table_info *private = table->private;
795 struct xt_table_info *info;
796 748
797 /* We need atomic snapshot of counters: rest doesn't change 749 /* We need atomic snapshot of counters: rest doesn't change
798 * (other than comefrom, which userspace doesn't care 750 * (other than comefrom, which userspace doesn't care
@@ -802,30 +754,11 @@ static struct xt_counters *alloc_counters(struct xt_table *table)
802 counters = vmalloc_node(countersize, numa_node_id()); 754 counters = vmalloc_node(countersize, numa_node_id());
803 755
804 if (counters == NULL) 756 if (counters == NULL)
805 goto nomem; 757 return ERR_PTR(-ENOMEM);
806
807 info = xt_alloc_table_info(private->size);
808 if (!info)
809 goto free_counters;
810
811 clone_counters(info, private);
812
813 mutex_lock(&table->lock);
814 xt_table_entry_swap_rcu(private, info);
815 synchronize_net(); /* Wait until smoke has cleared */
816 758
817 get_counters(info, counters); 759 get_counters(private, counters);
818 put_counters(private, counters);
819 mutex_unlock(&table->lock);
820
821 xt_free_table_info(info);
822 760
823 return counters; 761 return counters;
824
825 free_counters:
826 vfree(counters);
827 nomem:
828 return ERR_PTR(-ENOMEM);
829} 762}
830 763
831static int copy_entries_to_user(unsigned int total_size, 764static int copy_entries_to_user(unsigned int total_size,
@@ -1094,8 +1027,9 @@ static int __do_replace(struct net *net, const char *name,
1094 (newinfo->number <= oldinfo->initial_entries)) 1027 (newinfo->number <= oldinfo->initial_entries))
1095 module_put(t->me); 1028 module_put(t->me);
1096 1029
1097 /* Get the old counters. */ 1030 /* Get the old counters, and synchronize with replace */
1098 get_counters(oldinfo, counters); 1031 get_counters(oldinfo, counters);
1032
1099 /* Decrease module usage counts and free resource */ 1033 /* Decrease module usage counts and free resource */
1100 loc_cpu_old_entry = oldinfo->entries[raw_smp_processor_id()]; 1034 loc_cpu_old_entry = oldinfo->entries[raw_smp_processor_id()];
1101 ARPT_ENTRY_ITERATE(loc_cpu_old_entry, oldinfo->size, cleanup_entry, 1035 ARPT_ENTRY_ITERATE(loc_cpu_old_entry, oldinfo->size, cleanup_entry,
@@ -1165,10 +1099,23 @@ static int do_replace(struct net *net, void __user *user, unsigned int len)
1165 return ret; 1099 return ret;
1166} 1100}
1167 1101
1102/* We're lazy, and add to the first CPU; overflow works its fey magic
1103 * and everything is OK. */
1104static int
1105add_counter_to_entry(struct arpt_entry *e,
1106 const struct xt_counters addme[],
1107 unsigned int *i)
1108{
1109 ADD_COUNTER(e->counters, addme[*i].bcnt, addme[*i].pcnt);
1110
1111 (*i)++;
1112 return 0;
1113}
1114
1168static int do_add_counters(struct net *net, void __user *user, unsigned int len, 1115static int do_add_counters(struct net *net, void __user *user, unsigned int len,
1169 int compat) 1116 int compat)
1170{ 1117{
1171 unsigned int i; 1118 unsigned int i, curcpu;
1172 struct xt_counters_info tmp; 1119 struct xt_counters_info tmp;
1173 struct xt_counters *paddc; 1120 struct xt_counters *paddc;
1174 unsigned int num_counters; 1121 unsigned int num_counters;
@@ -1224,26 +1171,26 @@ static int do_add_counters(struct net *net, void __user *user, unsigned int len,
1224 goto free; 1171 goto free;
1225 } 1172 }
1226 1173
1227 mutex_lock(&t->lock); 1174 local_bh_disable();
1228 private = t->private; 1175 private = t->private;
1229 if (private->number != num_counters) { 1176 if (private->number != num_counters) {
1230 ret = -EINVAL; 1177 ret = -EINVAL;
1231 goto unlock_up_free; 1178 goto unlock_up_free;
1232 } 1179 }
1233 1180
1234 preempt_disable();
1235 i = 0; 1181 i = 0;
1236 /* Choose the copy that is on our node */ 1182 /* Choose the copy that is on our node */
1237 loc_cpu_entry = private->entries[smp_processor_id()]; 1183 curcpu = smp_processor_id();
1184 loc_cpu_entry = private->entries[curcpu];
1185 xt_info_wrlock(curcpu);
1238 ARPT_ENTRY_ITERATE(loc_cpu_entry, 1186 ARPT_ENTRY_ITERATE(loc_cpu_entry,
1239 private->size, 1187 private->size,
1240 add_counter_to_entry, 1188 add_counter_to_entry,
1241 paddc, 1189 paddc,
1242 &i); 1190 &i);
1243 preempt_enable(); 1191 xt_info_wrunlock(curcpu);
1244 unlock_up_free: 1192 unlock_up_free:
1245 mutex_unlock(&t->lock); 1193 local_bh_enable();
1246
1247 xt_table_unlock(t); 1194 xt_table_unlock(t);
1248 module_put(t->me); 1195 module_put(t->me);
1249 free: 1196 free:
diff --git a/net/ipv4/netfilter/ip_tables.c b/net/ipv4/netfilter/ip_tables.c
index 810c0b62c7d4..2ec8d7290c40 100644
--- a/net/ipv4/netfilter/ip_tables.c
+++ b/net/ipv4/netfilter/ip_tables.c
@@ -338,10 +338,9 @@ ipt_do_table(struct sk_buff *skb,
338 tgpar.hooknum = hook; 338 tgpar.hooknum = hook;
339 339
340 IP_NF_ASSERT(table->valid_hooks & (1 << hook)); 340 IP_NF_ASSERT(table->valid_hooks & (1 << hook));
341 341 xt_info_rdlock_bh();
342 rcu_read_lock_bh(); 342 private = table->private;
343 private = rcu_dereference(table->private); 343 table_base = private->entries[smp_processor_id()];
344 table_base = rcu_dereference(private->entries[smp_processor_id()]);
345 344
346 e = get_entry(table_base, private->hook_entry[hook]); 345 e = get_entry(table_base, private->hook_entry[hook]);
347 346
@@ -436,8 +435,7 @@ ipt_do_table(struct sk_buff *skb,
436 e = (void *)e + e->next_offset; 435 e = (void *)e + e->next_offset;
437 } 436 }
438 } while (!hotdrop); 437 } while (!hotdrop);
439 438 xt_info_rdunlock_bh();
440 rcu_read_unlock_bh();
441 439
442#ifdef DEBUG_ALLOW_ALL 440#ifdef DEBUG_ALLOW_ALL
443 return NF_ACCEPT; 441 return NF_ACCEPT;
@@ -896,10 +894,13 @@ get_counters(const struct xt_table_info *t,
896 894
897 /* Instead of clearing (by a previous call to memset()) 895 /* Instead of clearing (by a previous call to memset())
898 * the counters and using adds, we set the counters 896 * the counters and using adds, we set the counters
899 * with data used by 'current' CPU 897 * with data used by 'current' CPU.
900 * We dont care about preemption here. 898 *
899 * Bottom half has to be disabled to prevent deadlock
900 * if new softirq were to run and call ipt_do_table
901 */ 901 */
902 curcpu = raw_smp_processor_id(); 902 local_bh_disable();
903 curcpu = smp_processor_id();
903 904
904 i = 0; 905 i = 0;
905 IPT_ENTRY_ITERATE(t->entries[curcpu], 906 IPT_ENTRY_ITERATE(t->entries[curcpu],
@@ -912,74 +913,22 @@ get_counters(const struct xt_table_info *t,
912 if (cpu == curcpu) 913 if (cpu == curcpu)
913 continue; 914 continue;
914 i = 0; 915 i = 0;
916 xt_info_wrlock(cpu);
915 IPT_ENTRY_ITERATE(t->entries[cpu], 917 IPT_ENTRY_ITERATE(t->entries[cpu],
916 t->size, 918 t->size,
917 add_entry_to_counter, 919 add_entry_to_counter,
918 counters, 920 counters,
919 &i); 921 &i);
922 xt_info_wrunlock(cpu);
920 } 923 }
921
922}
923
924/* We're lazy, and add to the first CPU; overflow works its fey magic
925 * and everything is OK. */
926static int
927add_counter_to_entry(struct ipt_entry *e,
928 const struct xt_counters addme[],
929 unsigned int *i)
930{
931 ADD_COUNTER(e->counters, addme[*i].bcnt, addme[*i].pcnt);
932
933 (*i)++;
934 return 0;
935}
936
937/* Take values from counters and add them back onto the current cpu */
938static void put_counters(struct xt_table_info *t,
939 const struct xt_counters counters[])
940{
941 unsigned int i, cpu;
942
943 local_bh_disable();
944 cpu = smp_processor_id();
945 i = 0;
946 IPT_ENTRY_ITERATE(t->entries[cpu],
947 t->size,
948 add_counter_to_entry,
949 counters,
950 &i);
951 local_bh_enable(); 924 local_bh_enable();
952} 925}
953 926
954
955static inline int
956zero_entry_counter(struct ipt_entry *e, void *arg)
957{
958 e->counters.bcnt = 0;
959 e->counters.pcnt = 0;
960 return 0;
961}
962
963static void
964clone_counters(struct xt_table_info *newinfo, const struct xt_table_info *info)
965{
966 unsigned int cpu;
967 const void *loc_cpu_entry = info->entries[raw_smp_processor_id()];
968
969 memcpy(newinfo, info, offsetof(struct xt_table_info, entries));
970 for_each_possible_cpu(cpu) {
971 memcpy(newinfo->entries[cpu], loc_cpu_entry, info->size);
972 IPT_ENTRY_ITERATE(newinfo->entries[cpu], newinfo->size,
973 zero_entry_counter, NULL);
974 }
975}
976
977static struct xt_counters * alloc_counters(struct xt_table *table) 927static struct xt_counters * alloc_counters(struct xt_table *table)
978{ 928{
979 unsigned int countersize; 929 unsigned int countersize;
980 struct xt_counters *counters; 930 struct xt_counters *counters;
981 struct xt_table_info *private = table->private; 931 struct xt_table_info *private = table->private;
982 struct xt_table_info *info;
983 932
984 /* We need atomic snapshot of counters: rest doesn't change 933 /* We need atomic snapshot of counters: rest doesn't change
985 (other than comefrom, which userspace doesn't care 934 (other than comefrom, which userspace doesn't care
@@ -988,30 +937,11 @@ static struct xt_counters * alloc_counters(struct xt_table *table)
988 counters = vmalloc_node(countersize, numa_node_id()); 937 counters = vmalloc_node(countersize, numa_node_id());
989 938
990 if (counters == NULL) 939 if (counters == NULL)
991 goto nomem; 940 return ERR_PTR(-ENOMEM);
992 941
993 info = xt_alloc_table_info(private->size); 942 get_counters(private, counters);
994 if (!info)
995 goto free_counters;
996
997 clone_counters(info, private);
998
999 mutex_lock(&table->lock);
1000 xt_table_entry_swap_rcu(private, info);
1001 synchronize_net(); /* Wait until smoke has cleared */
1002
1003 get_counters(info, counters);
1004 put_counters(private, counters);
1005 mutex_unlock(&table->lock);
1006
1007 xt_free_table_info(info);
1008 943
1009 return counters; 944 return counters;
1010
1011 free_counters:
1012 vfree(counters);
1013 nomem:
1014 return ERR_PTR(-ENOMEM);
1015} 945}
1016 946
1017static int 947static int
@@ -1306,8 +1236,9 @@ __do_replace(struct net *net, const char *name, unsigned int valid_hooks,
1306 (newinfo->number <= oldinfo->initial_entries)) 1236 (newinfo->number <= oldinfo->initial_entries))
1307 module_put(t->me); 1237 module_put(t->me);
1308 1238
1309 /* Get the old counters. */ 1239 /* Get the old counters, and synchronize with replace */
1310 get_counters(oldinfo, counters); 1240 get_counters(oldinfo, counters);
1241
1311 /* Decrease module usage counts and free resource */ 1242 /* Decrease module usage counts and free resource */
1312 loc_cpu_old_entry = oldinfo->entries[raw_smp_processor_id()]; 1243 loc_cpu_old_entry = oldinfo->entries[raw_smp_processor_id()];
1313 IPT_ENTRY_ITERATE(loc_cpu_old_entry, oldinfo->size, cleanup_entry, 1244 IPT_ENTRY_ITERATE(loc_cpu_old_entry, oldinfo->size, cleanup_entry,
@@ -1377,11 +1308,23 @@ do_replace(struct net *net, void __user *user, unsigned int len)
1377 return ret; 1308 return ret;
1378} 1309}
1379 1310
1311/* We're lazy, and add to the first CPU; overflow works its fey magic
1312 * and everything is OK. */
1313static int
1314add_counter_to_entry(struct ipt_entry *e,
1315 const struct xt_counters addme[],
1316 unsigned int *i)
1317{
1318 ADD_COUNTER(e->counters, addme[*i].bcnt, addme[*i].pcnt);
1319
1320 (*i)++;
1321 return 0;
1322}
1380 1323
1381static int 1324static int
1382do_add_counters(struct net *net, void __user *user, unsigned int len, int compat) 1325do_add_counters(struct net *net, void __user *user, unsigned int len, int compat)
1383{ 1326{
1384 unsigned int i; 1327 unsigned int i, curcpu;
1385 struct xt_counters_info tmp; 1328 struct xt_counters_info tmp;
1386 struct xt_counters *paddc; 1329 struct xt_counters *paddc;
1387 unsigned int num_counters; 1330 unsigned int num_counters;
@@ -1437,25 +1380,26 @@ do_add_counters(struct net *net, void __user *user, unsigned int len, int compat
1437 goto free; 1380 goto free;
1438 } 1381 }
1439 1382
1440 mutex_lock(&t->lock); 1383 local_bh_disable();
1441 private = t->private; 1384 private = t->private;
1442 if (private->number != num_counters) { 1385 if (private->number != num_counters) {
1443 ret = -EINVAL; 1386 ret = -EINVAL;
1444 goto unlock_up_free; 1387 goto unlock_up_free;
1445 } 1388 }
1446 1389
1447 preempt_disable();
1448 i = 0; 1390 i = 0;
1449 /* Choose the copy that is on our node */ 1391 /* Choose the copy that is on our node */
1450 loc_cpu_entry = private->entries[raw_smp_processor_id()]; 1392 curcpu = smp_processor_id();
1393 loc_cpu_entry = private->entries[curcpu];
1394 xt_info_wrlock(curcpu);
1451 IPT_ENTRY_ITERATE(loc_cpu_entry, 1395 IPT_ENTRY_ITERATE(loc_cpu_entry,
1452 private->size, 1396 private->size,
1453 add_counter_to_entry, 1397 add_counter_to_entry,
1454 paddc, 1398 paddc,
1455 &i); 1399 &i);
1456 preempt_enable(); 1400 xt_info_wrunlock(curcpu);
1457 unlock_up_free: 1401 unlock_up_free:
1458 mutex_unlock(&t->lock); 1402 local_bh_enable();
1459 xt_table_unlock(t); 1403 xt_table_unlock(t);
1460 module_put(t->me); 1404 module_put(t->me);
1461 free: 1405 free:
diff --git a/net/ipv4/netfilter/nf_nat_core.c b/net/ipv4/netfilter/nf_nat_core.c
index fe65187810f0..3229e0a81ba6 100644
--- a/net/ipv4/netfilter/nf_nat_core.c
+++ b/net/ipv4/netfilter/nf_nat_core.c
@@ -211,7 +211,8 @@ find_best_ips_proto(struct nf_conntrack_tuple *tuple,
211 minip = ntohl(range->min_ip); 211 minip = ntohl(range->min_ip);
212 maxip = ntohl(range->max_ip); 212 maxip = ntohl(range->max_ip);
213 j = jhash_2words((__force u32)tuple->src.u3.ip, 213 j = jhash_2words((__force u32)tuple->src.u3.ip,
214 (__force u32)tuple->dst.u3.ip, 0); 214 range->flags & IP_NAT_RANGE_PERSISTENT ?
215 (__force u32)tuple->dst.u3.ip : 0, 0);
215 j = ((u64)j * (maxip - minip + 1)) >> 32; 216 j = ((u64)j * (maxip - minip + 1)) >> 32;
216 *var_ipp = htonl(minip + j); 217 *var_ipp = htonl(minip + j);
217} 218}
diff --git a/net/ipv4/route.c b/net/ipv4/route.c
index c40debe51b38..c4c60e9f068a 100644
--- a/net/ipv4/route.c
+++ b/net/ipv4/route.c
@@ -3397,7 +3397,7 @@ int __init ip_rt_init(void)
3397 0, 3397 0,
3398 &rt_hash_log, 3398 &rt_hash_log,
3399 &rt_hash_mask, 3399 &rt_hash_mask,
3400 0); 3400 rhash_entries ? 0 : 512 * 1024);
3401 memset(rt_hash_table, 0, (rt_hash_mask + 1) * sizeof(struct rt_hash_bucket)); 3401 memset(rt_hash_table, 0, (rt_hash_mask + 1) * sizeof(struct rt_hash_bucket));
3402 rt_hash_lock_init(); 3402 rt_hash_lock_init();
3403 3403
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index fafbec8b073e..1d7f49c6f0ca 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -2511,6 +2511,7 @@ struct sk_buff **tcp_gro_receive(struct sk_buff **head, struct sk_buff *skb)
2511 struct sk_buff *p; 2511 struct sk_buff *p;
2512 struct tcphdr *th; 2512 struct tcphdr *th;
2513 struct tcphdr *th2; 2513 struct tcphdr *th2;
2514 unsigned int len;
2514 unsigned int thlen; 2515 unsigned int thlen;
2515 unsigned int flags; 2516 unsigned int flags;
2516 unsigned int mss = 1; 2517 unsigned int mss = 1;
@@ -2531,6 +2532,7 @@ struct sk_buff **tcp_gro_receive(struct sk_buff **head, struct sk_buff *skb)
2531 2532
2532 skb_gro_pull(skb, thlen); 2533 skb_gro_pull(skb, thlen);
2533 2534
2535 len = skb_gro_len(skb);
2534 flags = tcp_flag_word(th); 2536 flags = tcp_flag_word(th);
2535 2537
2536 for (; (p = *head); head = &p->next) { 2538 for (; (p = *head); head = &p->next) {
@@ -2561,7 +2563,7 @@ found:
2561 2563
2562 mss = skb_shinfo(p)->gso_size; 2564 mss = skb_shinfo(p)->gso_size;
2563 2565
2564 flush |= (skb_gro_len(skb) > mss) | !skb_gro_len(skb); 2566 flush |= (len > mss) | !len;
2565 flush |= (ntohl(th2->seq) + skb_gro_len(p)) ^ ntohl(th->seq); 2567 flush |= (ntohl(th2->seq) + skb_gro_len(p)) ^ ntohl(th->seq);
2566 2568
2567 if (flush || skb_gro_receive(head, skb)) { 2569 if (flush || skb_gro_receive(head, skb)) {
@@ -2574,7 +2576,7 @@ found:
2574 tcp_flag_word(th2) |= flags & (TCP_FLAG_FIN | TCP_FLAG_PSH); 2576 tcp_flag_word(th2) |= flags & (TCP_FLAG_FIN | TCP_FLAG_PSH);
2575 2577
2576out_check_final: 2578out_check_final:
2577 flush = skb_gro_len(skb) < mss; 2579 flush = len < mss;
2578 flush |= flags & (TCP_FLAG_URG | TCP_FLAG_PSH | TCP_FLAG_RST | 2580 flush |= flags & (TCP_FLAG_URG | TCP_FLAG_PSH | TCP_FLAG_RST |
2579 TCP_FLAG_SYN | TCP_FLAG_FIN); 2581 TCP_FLAG_SYN | TCP_FLAG_FIN);
2580 2582
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index 2bc8e27a163d..eec3e6f9956c 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -597,16 +597,6 @@ static void tcp_event_data_recv(struct sock *sk, struct sk_buff *skb)
597 tcp_grow_window(sk, skb); 597 tcp_grow_window(sk, skb);
598} 598}
599 599
600static u32 tcp_rto_min(struct sock *sk)
601{
602 struct dst_entry *dst = __sk_dst_get(sk);
603 u32 rto_min = TCP_RTO_MIN;
604
605 if (dst && dst_metric_locked(dst, RTAX_RTO_MIN))
606 rto_min = dst_metric_rtt(dst, RTAX_RTO_MIN);
607 return rto_min;
608}
609
610/* Called to compute a smoothed rtt estimate. The data fed to this 600/* Called to compute a smoothed rtt estimate. The data fed to this
611 * routine either comes from timestamps, or from segments that were 601 * routine either comes from timestamps, or from segments that were
612 * known _not_ to have been retransmitted [see Karn/Partridge 602 * known _not_ to have been retransmitted [see Karn/Partridge
@@ -928,6 +918,8 @@ static void tcp_init_metrics(struct sock *sk)
928 tcp_set_rto(sk); 918 tcp_set_rto(sk);
929 if (inet_csk(sk)->icsk_rto < TCP_TIMEOUT_INIT && !tp->rx_opt.saw_tstamp) 919 if (inet_csk(sk)->icsk_rto < TCP_TIMEOUT_INIT && !tp->rx_opt.saw_tstamp)
930 goto reset; 920 goto reset;
921
922cwnd:
931 tp->snd_cwnd = tcp_init_cwnd(tp, dst); 923 tp->snd_cwnd = tcp_init_cwnd(tp, dst);
932 tp->snd_cwnd_stamp = tcp_time_stamp; 924 tp->snd_cwnd_stamp = tcp_time_stamp;
933 return; 925 return;
@@ -942,6 +934,7 @@ reset:
942 tp->mdev = tp->mdev_max = tp->rttvar = TCP_TIMEOUT_INIT; 934 tp->mdev = tp->mdev_max = tp->rttvar = TCP_TIMEOUT_INIT;
943 inet_csk(sk)->icsk_rto = TCP_TIMEOUT_INIT; 935 inet_csk(sk)->icsk_rto = TCP_TIMEOUT_INIT;
944 } 936 }
937 goto cwnd;
945} 938}
946 939
947static void tcp_update_reordering(struct sock *sk, const int metric, 940static void tcp_update_reordering(struct sock *sk, const int metric,
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
index 53300fa2359f..59aec609cec6 100644
--- a/net/ipv4/tcp_output.c
+++ b/net/ipv4/tcp_output.c
@@ -778,7 +778,7 @@ static void tcp_adjust_pcount(struct sock *sk, struct sk_buff *skb, int decr)
778 778
779 if (tp->lost_skb_hint && 779 if (tp->lost_skb_hint &&
780 before(TCP_SKB_CB(skb)->seq, TCP_SKB_CB(tp->lost_skb_hint)->seq) && 780 before(TCP_SKB_CB(skb)->seq, TCP_SKB_CB(tp->lost_skb_hint)->seq) &&
781 (tcp_is_fack(tp) || TCP_SKB_CB(skb)->sacked)) 781 (tcp_is_fack(tp) || (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_ACKED)))
782 tp->lost_cnt_hint -= decr; 782 tp->lost_cnt_hint -= decr;
783 783
784 tcp_verify_left_out(tp); 784 tcp_verify_left_out(tp);
diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
index bda08a09357d..7a1d1ce22e66 100644
--- a/net/ipv4/udp.c
+++ b/net/ipv4/udp.c
@@ -222,7 +222,7 @@ fail:
222 return error; 222 return error;
223} 223}
224 224
225int ipv4_rcv_saddr_equal(const struct sock *sk1, const struct sock *sk2) 225static int ipv4_rcv_saddr_equal(const struct sock *sk1, const struct sock *sk2)
226{ 226{
227 struct inet_sock *inet1 = inet_sk(sk1), *inet2 = inet_sk(sk2); 227 struct inet_sock *inet1 = inet_sk(sk1), *inet2 = inet_sk(sk2);
228 228
@@ -1823,7 +1823,6 @@ EXPORT_SYMBOL(udp_lib_getsockopt);
1823EXPORT_SYMBOL(udp_lib_setsockopt); 1823EXPORT_SYMBOL(udp_lib_setsockopt);
1824EXPORT_SYMBOL(udp_poll); 1824EXPORT_SYMBOL(udp_poll);
1825EXPORT_SYMBOL(udp_lib_get_port); 1825EXPORT_SYMBOL(udp_lib_get_port);
1826EXPORT_SYMBOL(ipv4_rcv_saddr_equal);
1827 1826
1828#ifdef CONFIG_PROC_FS 1827#ifdef CONFIG_PROC_FS
1829EXPORT_SYMBOL(udp_proc_register); 1828EXPORT_SYMBOL(udp_proc_register);
diff --git a/net/ipv6/ipv6_sockglue.c b/net/ipv6/ipv6_sockglue.c
index d31df0f4bc9a..a7fdf9a27f15 100644
--- a/net/ipv6/ipv6_sockglue.c
+++ b/net/ipv6/ipv6_sockglue.c
@@ -380,10 +380,6 @@ static int do_ipv6_setsockopt(struct sock *sk, int level, int optname,
380 default: 380 default:
381 goto sticky_done; 381 goto sticky_done;
382 } 382 }
383
384 if ((rthdr->hdrlen & 1) ||
385 (rthdr->hdrlen >> 1) != rthdr->segments_left)
386 goto sticky_done;
387 } 383 }
388 384
389 retv = 0; 385 retv = 0;
diff --git a/net/ipv6/netfilter/ip6_tables.c b/net/ipv6/netfilter/ip6_tables.c
index 800ae8542471..219e165aea10 100644
--- a/net/ipv6/netfilter/ip6_tables.c
+++ b/net/ipv6/netfilter/ip6_tables.c
@@ -365,9 +365,9 @@ ip6t_do_table(struct sk_buff *skb,
365 365
366 IP_NF_ASSERT(table->valid_hooks & (1 << hook)); 366 IP_NF_ASSERT(table->valid_hooks & (1 << hook));
367 367
368 rcu_read_lock_bh(); 368 xt_info_rdlock_bh();
369 private = rcu_dereference(table->private); 369 private = table->private;
370 table_base = rcu_dereference(private->entries[smp_processor_id()]); 370 table_base = private->entries[smp_processor_id()];
371 371
372 e = get_entry(table_base, private->hook_entry[hook]); 372 e = get_entry(table_base, private->hook_entry[hook]);
373 373
@@ -466,7 +466,7 @@ ip6t_do_table(struct sk_buff *skb,
466#ifdef CONFIG_NETFILTER_DEBUG 466#ifdef CONFIG_NETFILTER_DEBUG
467 ((struct ip6t_entry *)table_base)->comefrom = NETFILTER_LINK_POISON; 467 ((struct ip6t_entry *)table_base)->comefrom = NETFILTER_LINK_POISON;
468#endif 468#endif
469 rcu_read_unlock_bh(); 469 xt_info_rdunlock_bh();
470 470
471#ifdef DEBUG_ALLOW_ALL 471#ifdef DEBUG_ALLOW_ALL
472 return NF_ACCEPT; 472 return NF_ACCEPT;
@@ -926,9 +926,12 @@ get_counters(const struct xt_table_info *t,
926 /* Instead of clearing (by a previous call to memset()) 926 /* Instead of clearing (by a previous call to memset())
927 * the counters and using adds, we set the counters 927 * the counters and using adds, we set the counters
928 * with data used by 'current' CPU 928 * with data used by 'current' CPU
929 * We dont care about preemption here. 929 *
930 * Bottom half has to be disabled to prevent deadlock
931 * if new softirq were to run and call ipt_do_table
930 */ 932 */
931 curcpu = raw_smp_processor_id(); 933 local_bh_disable();
934 curcpu = smp_processor_id();
932 935
933 i = 0; 936 i = 0;
934 IP6T_ENTRY_ITERATE(t->entries[curcpu], 937 IP6T_ENTRY_ITERATE(t->entries[curcpu],
@@ -941,72 +944,22 @@ get_counters(const struct xt_table_info *t,
941 if (cpu == curcpu) 944 if (cpu == curcpu)
942 continue; 945 continue;
943 i = 0; 946 i = 0;
947 xt_info_wrlock(cpu);
944 IP6T_ENTRY_ITERATE(t->entries[cpu], 948 IP6T_ENTRY_ITERATE(t->entries[cpu],
945 t->size, 949 t->size,
946 add_entry_to_counter, 950 add_entry_to_counter,
947 counters, 951 counters,
948 &i); 952 &i);
953 xt_info_wrunlock(cpu);
949 } 954 }
950}
951
952/* We're lazy, and add to the first CPU; overflow works its fey magic
953 * and everything is OK. */
954static int
955add_counter_to_entry(struct ip6t_entry *e,
956 const struct xt_counters addme[],
957 unsigned int *i)
958{
959 ADD_COUNTER(e->counters, addme[*i].bcnt, addme[*i].pcnt);
960
961 (*i)++;
962 return 0;
963}
964
965/* Take values from counters and add them back onto the current cpu */
966static void put_counters(struct xt_table_info *t,
967 const struct xt_counters counters[])
968{
969 unsigned int i, cpu;
970
971 local_bh_disable();
972 cpu = smp_processor_id();
973 i = 0;
974 IP6T_ENTRY_ITERATE(t->entries[cpu],
975 t->size,
976 add_counter_to_entry,
977 counters,
978 &i);
979 local_bh_enable(); 955 local_bh_enable();
980} 956}
981 957
982static inline int
983zero_entry_counter(struct ip6t_entry *e, void *arg)
984{
985 e->counters.bcnt = 0;
986 e->counters.pcnt = 0;
987 return 0;
988}
989
990static void
991clone_counters(struct xt_table_info *newinfo, const struct xt_table_info *info)
992{
993 unsigned int cpu;
994 const void *loc_cpu_entry = info->entries[raw_smp_processor_id()];
995
996 memcpy(newinfo, info, offsetof(struct xt_table_info, entries));
997 for_each_possible_cpu(cpu) {
998 memcpy(newinfo->entries[cpu], loc_cpu_entry, info->size);
999 IP6T_ENTRY_ITERATE(newinfo->entries[cpu], newinfo->size,
1000 zero_entry_counter, NULL);
1001 }
1002}
1003
1004static struct xt_counters *alloc_counters(struct xt_table *table) 958static struct xt_counters *alloc_counters(struct xt_table *table)
1005{ 959{
1006 unsigned int countersize; 960 unsigned int countersize;
1007 struct xt_counters *counters; 961 struct xt_counters *counters;
1008 struct xt_table_info *private = table->private; 962 struct xt_table_info *private = table->private;
1009 struct xt_table_info *info;
1010 963
1011 /* We need atomic snapshot of counters: rest doesn't change 964 /* We need atomic snapshot of counters: rest doesn't change
1012 (other than comefrom, which userspace doesn't care 965 (other than comefrom, which userspace doesn't care
@@ -1015,30 +968,11 @@ static struct xt_counters *alloc_counters(struct xt_table *table)
1015 counters = vmalloc_node(countersize, numa_node_id()); 968 counters = vmalloc_node(countersize, numa_node_id());
1016 969
1017 if (counters == NULL) 970 if (counters == NULL)
1018 goto nomem; 971 return ERR_PTR(-ENOMEM);
1019 972
1020 info = xt_alloc_table_info(private->size); 973 get_counters(private, counters);
1021 if (!info)
1022 goto free_counters;
1023
1024 clone_counters(info, private);
1025
1026 mutex_lock(&table->lock);
1027 xt_table_entry_swap_rcu(private, info);
1028 synchronize_net(); /* Wait until smoke has cleared */
1029
1030 get_counters(info, counters);
1031 put_counters(private, counters);
1032 mutex_unlock(&table->lock);
1033
1034 xt_free_table_info(info);
1035 974
1036 return counters; 975 return counters;
1037
1038 free_counters:
1039 vfree(counters);
1040 nomem:
1041 return ERR_PTR(-ENOMEM);
1042} 976}
1043 977
1044static int 978static int
@@ -1334,8 +1268,9 @@ __do_replace(struct net *net, const char *name, unsigned int valid_hooks,
1334 (newinfo->number <= oldinfo->initial_entries)) 1268 (newinfo->number <= oldinfo->initial_entries))
1335 module_put(t->me); 1269 module_put(t->me);
1336 1270
1337 /* Get the old counters. */ 1271 /* Get the old counters, and synchronize with replace */
1338 get_counters(oldinfo, counters); 1272 get_counters(oldinfo, counters);
1273
1339 /* Decrease module usage counts and free resource */ 1274 /* Decrease module usage counts and free resource */
1340 loc_cpu_old_entry = oldinfo->entries[raw_smp_processor_id()]; 1275 loc_cpu_old_entry = oldinfo->entries[raw_smp_processor_id()];
1341 IP6T_ENTRY_ITERATE(loc_cpu_old_entry, oldinfo->size, cleanup_entry, 1276 IP6T_ENTRY_ITERATE(loc_cpu_old_entry, oldinfo->size, cleanup_entry,
@@ -1405,11 +1340,24 @@ do_replace(struct net *net, void __user *user, unsigned int len)
1405 return ret; 1340 return ret;
1406} 1341}
1407 1342
1343/* We're lazy, and add to the first CPU; overflow works its fey magic
1344 * and everything is OK. */
1345static int
1346add_counter_to_entry(struct ip6t_entry *e,
1347 const struct xt_counters addme[],
1348 unsigned int *i)
1349{
1350 ADD_COUNTER(e->counters, addme[*i].bcnt, addme[*i].pcnt);
1351
1352 (*i)++;
1353 return 0;
1354}
1355
1408static int 1356static int
1409do_add_counters(struct net *net, void __user *user, unsigned int len, 1357do_add_counters(struct net *net, void __user *user, unsigned int len,
1410 int compat) 1358 int compat)
1411{ 1359{
1412 unsigned int i; 1360 unsigned int i, curcpu;
1413 struct xt_counters_info tmp; 1361 struct xt_counters_info tmp;
1414 struct xt_counters *paddc; 1362 struct xt_counters *paddc;
1415 unsigned int num_counters; 1363 unsigned int num_counters;
@@ -1465,25 +1413,28 @@ do_add_counters(struct net *net, void __user *user, unsigned int len,
1465 goto free; 1413 goto free;
1466 } 1414 }
1467 1415
1468 mutex_lock(&t->lock); 1416
1417 local_bh_disable();
1469 private = t->private; 1418 private = t->private;
1470 if (private->number != num_counters) { 1419 if (private->number != num_counters) {
1471 ret = -EINVAL; 1420 ret = -EINVAL;
1472 goto unlock_up_free; 1421 goto unlock_up_free;
1473 } 1422 }
1474 1423
1475 preempt_disable();
1476 i = 0; 1424 i = 0;
1477 /* Choose the copy that is on our node */ 1425 /* Choose the copy that is on our node */
1478 loc_cpu_entry = private->entries[raw_smp_processor_id()]; 1426 curcpu = smp_processor_id();
1427 xt_info_wrlock(curcpu);
1428 loc_cpu_entry = private->entries[curcpu];
1479 IP6T_ENTRY_ITERATE(loc_cpu_entry, 1429 IP6T_ENTRY_ITERATE(loc_cpu_entry,
1480 private->size, 1430 private->size,
1481 add_counter_to_entry, 1431 add_counter_to_entry,
1482 paddc, 1432 paddc,
1483 &i); 1433 &i);
1484 preempt_enable(); 1434 xt_info_wrunlock(curcpu);
1435
1485 unlock_up_free: 1436 unlock_up_free:
1486 mutex_unlock(&t->lock); 1437 local_bh_enable();
1487 xt_table_unlock(t); 1438 xt_table_unlock(t);
1488 module_put(t->me); 1439 module_put(t->me);
1489 free: 1440 free:
diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
index 6842dd2edd5b..8905712cfbb8 100644
--- a/net/ipv6/udp.c
+++ b/net/ipv6/udp.c
@@ -53,6 +53,8 @@ int ipv6_rcv_saddr_equal(const struct sock *sk, const struct sock *sk2)
53{ 53{
54 const struct in6_addr *sk_rcv_saddr6 = &inet6_sk(sk)->rcv_saddr; 54 const struct in6_addr *sk_rcv_saddr6 = &inet6_sk(sk)->rcv_saddr;
55 const struct in6_addr *sk2_rcv_saddr6 = inet6_rcv_saddr(sk2); 55 const struct in6_addr *sk2_rcv_saddr6 = inet6_rcv_saddr(sk2);
56 __be32 sk_rcv_saddr = inet_sk(sk)->rcv_saddr;
57 __be32 sk2_rcv_saddr = inet_rcv_saddr(sk2);
56 int sk_ipv6only = ipv6_only_sock(sk); 58 int sk_ipv6only = ipv6_only_sock(sk);
57 int sk2_ipv6only = inet_v6_ipv6only(sk2); 59 int sk2_ipv6only = inet_v6_ipv6only(sk2);
58 int addr_type = ipv6_addr_type(sk_rcv_saddr6); 60 int addr_type = ipv6_addr_type(sk_rcv_saddr6);
@@ -60,7 +62,9 @@ int ipv6_rcv_saddr_equal(const struct sock *sk, const struct sock *sk2)
60 62
61 /* if both are mapped, treat as IPv4 */ 63 /* if both are mapped, treat as IPv4 */
62 if (addr_type == IPV6_ADDR_MAPPED && addr_type2 == IPV6_ADDR_MAPPED) 64 if (addr_type == IPV6_ADDR_MAPPED && addr_type2 == IPV6_ADDR_MAPPED)
63 return ipv4_rcv_saddr_equal(sk, sk2); 65 return (!sk2_ipv6only &&
66 (!sk_rcv_saddr || !sk2_rcv_saddr ||
67 sk_rcv_saddr == sk2_rcv_saddr));
64 68
65 if (addr_type2 == IPV6_ADDR_ANY && 69 if (addr_type2 == IPV6_ADDR_ANY &&
66 !(sk2_ipv6only && addr_type == IPV6_ADDR_MAPPED)) 70 !(sk2_ipv6only && addr_type == IPV6_ADDR_MAPPED))
diff --git a/net/iucv/af_iucv.c b/net/iucv/af_iucv.c
index 49e786535dc8..b51c9187c347 100644
--- a/net/iucv/af_iucv.c
+++ b/net/iucv/af_iucv.c
@@ -172,6 +172,7 @@ static void iucv_sock_close(struct sock *sk)
172 err = iucv_sock_wait_state(sk, IUCV_CLOSED, 0, timeo); 172 err = iucv_sock_wait_state(sk, IUCV_CLOSED, 0, timeo);
173 } 173 }
174 174
175 case IUCV_CLOSING: /* fall through */
175 sk->sk_state = IUCV_CLOSED; 176 sk->sk_state = IUCV_CLOSED;
176 sk->sk_state_change(sk); 177 sk->sk_state_change(sk);
177 178
@@ -224,6 +225,8 @@ static struct sock *iucv_sock_alloc(struct socket *sock, int proto, gfp_t prio)
224 spin_lock_init(&iucv_sk(sk)->message_q.lock); 225 spin_lock_init(&iucv_sk(sk)->message_q.lock);
225 skb_queue_head_init(&iucv_sk(sk)->backlog_skb_q); 226 skb_queue_head_init(&iucv_sk(sk)->backlog_skb_q);
226 iucv_sk(sk)->send_tag = 0; 227 iucv_sk(sk)->send_tag = 0;
228 iucv_sk(sk)->path = NULL;
229 memset(&iucv_sk(sk)->src_user_id , 0, 32);
227 230
228 sk->sk_destruct = iucv_sock_destruct; 231 sk->sk_destruct = iucv_sock_destruct;
229 sk->sk_sndtimeo = IUCV_CONN_TIMEOUT; 232 sk->sk_sndtimeo = IUCV_CONN_TIMEOUT;
@@ -811,6 +814,8 @@ static int iucv_sock_recvmsg(struct kiocb *iocb, struct socket *sock,
811 814
812 target = sock_rcvlowat(sk, flags & MSG_WAITALL, len); 815 target = sock_rcvlowat(sk, flags & MSG_WAITALL, len);
813 816
817 /* receive/dequeue next skb:
818 * the function understands MSG_PEEK and, thus, does not dequeue skb */
814 skb = skb_recv_datagram(sk, flags, noblock, &err); 819 skb = skb_recv_datagram(sk, flags, noblock, &err);
815 if (!skb) { 820 if (!skb) {
816 if (sk->sk_shutdown & RCV_SHUTDOWN) 821 if (sk->sk_shutdown & RCV_SHUTDOWN)
@@ -858,9 +863,7 @@ static int iucv_sock_recvmsg(struct kiocb *iocb, struct socket *sock,
858 iucv_process_message_q(sk); 863 iucv_process_message_q(sk);
859 spin_unlock_bh(&iucv->message_q.lock); 864 spin_unlock_bh(&iucv->message_q.lock);
860 } 865 }
861 866 }
862 } else
863 skb_queue_head(&sk->sk_receive_queue, skb);
864 867
865done: 868done:
866 return err ? : copied; 869 return err ? : copied;
@@ -934,6 +937,9 @@ static int iucv_sock_shutdown(struct socket *sock, int how)
934 937
935 lock_sock(sk); 938 lock_sock(sk);
936 switch (sk->sk_state) { 939 switch (sk->sk_state) {
940 case IUCV_DISCONN:
941 case IUCV_CLOSING:
942 case IUCV_SEVERED:
937 case IUCV_CLOSED: 943 case IUCV_CLOSED:
938 err = -ENOTCONN; 944 err = -ENOTCONN;
939 goto fail; 945 goto fail;
@@ -1113,8 +1119,12 @@ static void iucv_callback_rx(struct iucv_path *path, struct iucv_message *msg)
1113 struct sock_msg_q *save_msg; 1119 struct sock_msg_q *save_msg;
1114 int len; 1120 int len;
1115 1121
1116 if (sk->sk_shutdown & RCV_SHUTDOWN) 1122 if (sk->sk_shutdown & RCV_SHUTDOWN) {
1123 iucv_message_reject(path, msg);
1117 return; 1124 return;
1125 }
1126
1127 spin_lock(&iucv->message_q.lock);
1118 1128
1119 if (!list_empty(&iucv->message_q.list) || 1129 if (!list_empty(&iucv->message_q.list) ||
1120 !skb_queue_empty(&iucv->backlog_skb_q)) 1130 !skb_queue_empty(&iucv->backlog_skb_q))
@@ -1129,9 +1139,8 @@ static void iucv_callback_rx(struct iucv_path *path, struct iucv_message *msg)
1129 if (!skb) 1139 if (!skb)
1130 goto save_message; 1140 goto save_message;
1131 1141
1132 spin_lock(&iucv->message_q.lock);
1133 iucv_process_message(sk, skb, path, msg); 1142 iucv_process_message(sk, skb, path, msg);
1134 spin_unlock(&iucv->message_q.lock); 1143 goto out_unlock;
1135 1144
1136 return; 1145 return;
1137 1146
@@ -1142,8 +1151,9 @@ save_message:
1142 save_msg->path = path; 1151 save_msg->path = path;
1143 save_msg->msg = *msg; 1152 save_msg->msg = *msg;
1144 1153
1145 spin_lock(&iucv->message_q.lock);
1146 list_add_tail(&save_msg->list, &iucv->message_q.list); 1154 list_add_tail(&save_msg->list, &iucv->message_q.list);
1155
1156out_unlock:
1147 spin_unlock(&iucv->message_q.lock); 1157 spin_unlock(&iucv->message_q.lock);
1148} 1158}
1149 1159
diff --git a/net/mac80211/Kconfig b/net/mac80211/Kconfig
index f3d9ae350fb6..ecc3faf9f11a 100644
--- a/net/mac80211/Kconfig
+++ b/net/mac80211/Kconfig
@@ -202,10 +202,3 @@ config MAC80211_DEBUG_COUNTERS
202 and show them in debugfs. 202 and show them in debugfs.
203 203
204 If unsure, say N. 204 If unsure, say N.
205
206config MAC80211_VERBOSE_SPECT_MGMT_DEBUG
207 bool "Verbose Spectrum Management (IEEE 802.11h)debugging"
208 depends on MAC80211_DEBUG_MENU
209 ---help---
210 Say Y here to print out verbose Spectrum Management (IEEE 802.11h)
211 debug messages.
diff --git a/net/mac80211/main.c b/net/mac80211/main.c
index a6f1d8a869bc..14134193cd17 100644
--- a/net/mac80211/main.c
+++ b/net/mac80211/main.c
@@ -258,7 +258,7 @@ int ieee80211_hw_config(struct ieee80211_local *local, u32 changed)
258 (chan->max_power - local->power_constr_level) : 258 (chan->max_power - local->power_constr_level) :
259 chan->max_power; 259 chan->max_power;
260 260
261 if (local->user_power_level) 261 if (local->user_power_level >= 0)
262 power = min(power, local->user_power_level); 262 power = min(power, local->user_power_level);
263 263
264 if (local->hw.conf.power_level != power) { 264 if (local->hw.conf.power_level != power) {
@@ -757,6 +757,7 @@ struct ieee80211_hw *ieee80211_alloc_hw(size_t priv_data_len,
757 local->hw.conf.long_frame_max_tx_count = 4; 757 local->hw.conf.long_frame_max_tx_count = 4;
758 local->hw.conf.short_frame_max_tx_count = 7; 758 local->hw.conf.short_frame_max_tx_count = 7;
759 local->hw.conf.radio_enabled = true; 759 local->hw.conf.radio_enabled = true;
760 local->user_power_level = -1;
760 761
761 INIT_LIST_HEAD(&local->interfaces); 762 INIT_LIST_HEAD(&local->interfaces);
762 mutex_init(&local->iflist_mtx); 763 mutex_init(&local->iflist_mtx);
@@ -909,6 +910,13 @@ int ieee80211_register_hw(struct ieee80211_hw *hw)
909 if (result < 0) 910 if (result < 0)
910 goto fail_sta_info; 911 goto fail_sta_info;
911 912
913 result = ieee80211_wep_init(local);
914 if (result < 0) {
915 printk(KERN_DEBUG "%s: Failed to initialize wep: %d\n",
916 wiphy_name(local->hw.wiphy), result);
917 goto fail_wep;
918 }
919
912 rtnl_lock(); 920 rtnl_lock();
913 result = dev_alloc_name(local->mdev, local->mdev->name); 921 result = dev_alloc_name(local->mdev, local->mdev->name);
914 if (result < 0) 922 if (result < 0)
@@ -930,14 +938,6 @@ int ieee80211_register_hw(struct ieee80211_hw *hw)
930 goto fail_rate; 938 goto fail_rate;
931 } 939 }
932 940
933 result = ieee80211_wep_init(local);
934
935 if (result < 0) {
936 printk(KERN_DEBUG "%s: Failed to initialize wep: %d\n",
937 wiphy_name(local->hw.wiphy), result);
938 goto fail_wep;
939 }
940
941 /* add one default STA interface if supported */ 941 /* add one default STA interface if supported */
942 if (local->hw.wiphy->interface_modes & BIT(NL80211_IFTYPE_STATION)) { 942 if (local->hw.wiphy->interface_modes & BIT(NL80211_IFTYPE_STATION)) {
943 result = ieee80211_if_add(local, "wlan%d", NULL, 943 result = ieee80211_if_add(local, "wlan%d", NULL,
@@ -967,13 +967,13 @@ int ieee80211_register_hw(struct ieee80211_hw *hw)
967 967
968 return 0; 968 return 0;
969 969
970fail_wep:
971 rate_control_deinitialize(local);
972fail_rate: 970fail_rate:
973 unregister_netdevice(local->mdev); 971 unregister_netdevice(local->mdev);
974 local->mdev = NULL; 972 local->mdev = NULL;
975fail_dev: 973fail_dev:
976 rtnl_unlock(); 974 rtnl_unlock();
975 ieee80211_wep_free(local);
976fail_wep:
977 sta_info_stop(local); 977 sta_info_stop(local);
978fail_sta_info: 978fail_sta_info:
979 debugfs_hw_del(local); 979 debugfs_hw_del(local);
diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c
index 7ecda9d59d8a..132938b073dc 100644
--- a/net/mac80211/mlme.c
+++ b/net/mac80211/mlme.c
@@ -441,6 +441,9 @@ static bool ieee80211_check_tim(struct ieee802_11_elems *elems, u16 aid)
441 u8 index, indexn1, indexn2; 441 u8 index, indexn1, indexn2;
442 struct ieee80211_tim_ie *tim = (struct ieee80211_tim_ie *) elems->tim; 442 struct ieee80211_tim_ie *tim = (struct ieee80211_tim_ie *) elems->tim;
443 443
444 if (unlikely(!tim || elems->tim_len < 4))
445 return false;
446
444 aid &= 0x3fff; 447 aid &= 0x3fff;
445 index = aid / 8; 448 index = aid / 8;
446 mask = 1 << (aid & 7); 449 mask = 1 << (aid & 7);
@@ -945,9 +948,13 @@ void ieee80211_beacon_loss_work(struct work_struct *work)
945 u.mgd.beacon_loss_work); 948 u.mgd.beacon_loss_work);
946 struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; 949 struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
947 950
948 printk(KERN_DEBUG "%s: driver reports beacon loss from AP %pM " 951#ifdef CONFIG_MAC80211_VERBOSE_DEBUG
949 "- sending probe request\n", sdata->dev->name, 952 if (net_ratelimit()) {
950 sdata->u.mgd.bssid); 953 printk(KERN_DEBUG "%s: driver reports beacon loss from AP %pM "
954 "- sending probe request\n", sdata->dev->name,
955 sdata->u.mgd.bssid);
956 }
957#endif
951 958
952 ifmgd->flags |= IEEE80211_STA_PROBEREQ_POLL; 959 ifmgd->flags |= IEEE80211_STA_PROBEREQ_POLL;
953 ieee80211_send_probe_req(sdata, ifmgd->bssid, ifmgd->ssid, 960 ieee80211_send_probe_req(sdata, ifmgd->bssid, ifmgd->ssid,
@@ -1007,9 +1014,13 @@ static void ieee80211_associated(struct ieee80211_sub_if_data *sdata)
1007 (local->hw.conf.flags & IEEE80211_CONF_PS)) && 1014 (local->hw.conf.flags & IEEE80211_CONF_PS)) &&
1008 time_after(jiffies, 1015 time_after(jiffies,
1009 ifmgd->last_beacon + IEEE80211_MONITORING_INTERVAL)) { 1016 ifmgd->last_beacon + IEEE80211_MONITORING_INTERVAL)) {
1010 printk(KERN_DEBUG "%s: beacon loss from AP %pM " 1017#ifdef CONFIG_MAC80211_VERBOSE_DEBUG
1011 "- sending probe request\n", 1018 if (net_ratelimit()) {
1012 sdata->dev->name, ifmgd->bssid); 1019 printk(KERN_DEBUG "%s: beacon loss from AP %pM "
1020 "- sending probe request\n",
1021 sdata->dev->name, ifmgd->bssid);
1022 }
1023#endif
1013 ifmgd->flags |= IEEE80211_STA_PROBEREQ_POLL; 1024 ifmgd->flags |= IEEE80211_STA_PROBEREQ_POLL;
1014 ieee80211_send_probe_req(sdata, ifmgd->bssid, ifmgd->ssid, 1025 ieee80211_send_probe_req(sdata, ifmgd->bssid, ifmgd->ssid,
1015 ifmgd->ssid_len, NULL, 0); 1026 ifmgd->ssid_len, NULL, 0);
@@ -1355,7 +1366,7 @@ static void ieee80211_rx_mgmt_assoc_resp(struct ieee80211_sub_if_data *sdata,
1355 1366
1356 for (i = 0; i < elems.ext_supp_rates_len; i++) { 1367 for (i = 0; i < elems.ext_supp_rates_len; i++) {
1357 int rate = (elems.ext_supp_rates[i] & 0x7f) * 5; 1368 int rate = (elems.ext_supp_rates[i] & 0x7f) * 5;
1358 bool is_basic = !!(elems.supp_rates[i] & 0x80); 1369 bool is_basic = !!(elems.ext_supp_rates[i] & 0x80);
1359 1370
1360 if (rate > 110) 1371 if (rate > 110)
1361 have_higher_than_11mbit = true; 1372 have_higher_than_11mbit = true;
@@ -1902,9 +1913,17 @@ static void ieee80211_sta_work(struct work_struct *work)
1902 1913
1903static void ieee80211_restart_sta_timer(struct ieee80211_sub_if_data *sdata) 1914static void ieee80211_restart_sta_timer(struct ieee80211_sub_if_data *sdata)
1904{ 1915{
1905 if (sdata->vif.type == NL80211_IFTYPE_STATION) 1916 if (sdata->vif.type == NL80211_IFTYPE_STATION) {
1917 /*
1918 * Need to update last_beacon to avoid beacon loss
1919 * test to trigger.
1920 */
1921 sdata->u.mgd.last_beacon = jiffies;
1922
1923
1906 queue_work(sdata->local->hw.workqueue, 1924 queue_work(sdata->local->hw.workqueue,
1907 &sdata->u.mgd.work); 1925 &sdata->u.mgd.work);
1926 }
1908} 1927}
1909 1928
1910/* interface setup */ 1929/* interface setup */
@@ -2105,12 +2124,13 @@ void ieee80211_dynamic_ps_enable_work(struct work_struct *work)
2105 struct ieee80211_local *local = 2124 struct ieee80211_local *local =
2106 container_of(work, struct ieee80211_local, 2125 container_of(work, struct ieee80211_local,
2107 dynamic_ps_enable_work); 2126 dynamic_ps_enable_work);
2127 /* XXX: using scan_sdata is completely broken! */
2108 struct ieee80211_sub_if_data *sdata = local->scan_sdata; 2128 struct ieee80211_sub_if_data *sdata = local->scan_sdata;
2109 2129
2110 if (local->hw.conf.flags & IEEE80211_CONF_PS) 2130 if (local->hw.conf.flags & IEEE80211_CONF_PS)
2111 return; 2131 return;
2112 2132
2113 if (local->hw.flags & IEEE80211_HW_PS_NULLFUNC_STACK) 2133 if (local->hw.flags & IEEE80211_HW_PS_NULLFUNC_STACK && sdata)
2114 ieee80211_send_nullfunc(local, sdata, 1); 2134 ieee80211_send_nullfunc(local, sdata, 1);
2115 2135
2116 local->hw.conf.flags |= IEEE80211_CONF_PS; 2136 local->hw.conf.flags |= IEEE80211_CONF_PS;
diff --git a/net/mac80211/pm.c b/net/mac80211/pm.c
index 027302326498..81985d27cbda 100644
--- a/net/mac80211/pm.c
+++ b/net/mac80211/pm.c
@@ -156,8 +156,19 @@ int __ieee80211_resume(struct ieee80211_hw *hw)
156 case NL80211_IFTYPE_ADHOC: 156 case NL80211_IFTYPE_ADHOC:
157 case NL80211_IFTYPE_AP: 157 case NL80211_IFTYPE_AP:
158 case NL80211_IFTYPE_MESH_POINT: 158 case NL80211_IFTYPE_MESH_POINT:
159 WARN_ON(ieee80211_if_config(sdata, changed)); 159 /*
160 ieee80211_bss_info_change_notify(sdata, ~0); 160 * Driver's config_interface can fail if rfkill is
161 * enabled. Accommodate this return code.
162 * FIXME: When mac80211 has knowledge of rfkill
163 * state the code below can change back to:
164 * WARN(ieee80211_if_config(sdata, changed));
165 * ieee80211_bss_info_change_notify(sdata, ~0);
166 */
167 if (ieee80211_if_config(sdata, changed))
168 printk(KERN_DEBUG "%s: failed to configure interface during resume\n",
169 sdata->dev->name);
170 else
171 ieee80211_bss_info_change_notify(sdata, ~0);
161 break; 172 break;
162 case NL80211_IFTYPE_WDS: 173 case NL80211_IFTYPE_WDS:
163 break; 174 break;
diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c
index 64ebe664effc..9776f73c51ad 100644
--- a/net/mac80211/rx.c
+++ b/net/mac80211/rx.c
@@ -29,6 +29,7 @@
29static u8 ieee80211_sta_manage_reorder_buf(struct ieee80211_hw *hw, 29static u8 ieee80211_sta_manage_reorder_buf(struct ieee80211_hw *hw,
30 struct tid_ampdu_rx *tid_agg_rx, 30 struct tid_ampdu_rx *tid_agg_rx,
31 struct sk_buff *skb, 31 struct sk_buff *skb,
32 struct ieee80211_rx_status *status,
32 u16 mpdu_seq_num, 33 u16 mpdu_seq_num,
33 int bar_req); 34 int bar_req);
34/* 35/*
@@ -1396,7 +1397,7 @@ ieee80211_deliver_skb(struct ieee80211_rx_data *rx)
1396 * mac80211. That also explains the __skb_push() 1397 * mac80211. That also explains the __skb_push()
1397 * below. 1398 * below.
1398 */ 1399 */
1399 align = (unsigned long)skb->data & 4; 1400 align = (unsigned long)skb->data & 3;
1400 if (align) { 1401 if (align) {
1401 if (WARN_ON(skb_headroom(skb) < 3)) { 1402 if (WARN_ON(skb_headroom(skb) < 3)) {
1402 dev_kfree_skb(skb); 1403 dev_kfree_skb(skb);
@@ -1688,7 +1689,7 @@ ieee80211_rx_h_ctrl(struct ieee80211_rx_data *rx)
1688 /* manage reordering buffer according to requested */ 1689 /* manage reordering buffer according to requested */
1689 /* sequence number */ 1690 /* sequence number */
1690 rcu_read_lock(); 1691 rcu_read_lock();
1691 ieee80211_sta_manage_reorder_buf(hw, tid_agg_rx, NULL, 1692 ieee80211_sta_manage_reorder_buf(hw, tid_agg_rx, NULL, NULL,
1692 start_seq_num, 1); 1693 start_seq_num, 1);
1693 rcu_read_unlock(); 1694 rcu_read_unlock();
1694 return RX_DROP_UNUSABLE; 1695 return RX_DROP_UNUSABLE;
@@ -2293,6 +2294,7 @@ static inline u16 seq_sub(u16 sq1, u16 sq2)
2293static u8 ieee80211_sta_manage_reorder_buf(struct ieee80211_hw *hw, 2294static u8 ieee80211_sta_manage_reorder_buf(struct ieee80211_hw *hw,
2294 struct tid_ampdu_rx *tid_agg_rx, 2295 struct tid_ampdu_rx *tid_agg_rx,
2295 struct sk_buff *skb, 2296 struct sk_buff *skb,
2297 struct ieee80211_rx_status *rxstatus,
2296 u16 mpdu_seq_num, 2298 u16 mpdu_seq_num,
2297 int bar_req) 2299 int bar_req)
2298{ 2300{
@@ -2374,6 +2376,8 @@ static u8 ieee80211_sta_manage_reorder_buf(struct ieee80211_hw *hw,
2374 2376
2375 /* put the frame in the reordering buffer */ 2377 /* put the frame in the reordering buffer */
2376 tid_agg_rx->reorder_buf[index] = skb; 2378 tid_agg_rx->reorder_buf[index] = skb;
2379 memcpy(tid_agg_rx->reorder_buf[index]->cb, rxstatus,
2380 sizeof(*rxstatus));
2377 tid_agg_rx->stored_mpdu_num++; 2381 tid_agg_rx->stored_mpdu_num++;
2378 /* release the buffer until next missing frame */ 2382 /* release the buffer until next missing frame */
2379 index = seq_sub(tid_agg_rx->head_seq_num, tid_agg_rx->ssn) 2383 index = seq_sub(tid_agg_rx->head_seq_num, tid_agg_rx->ssn)
@@ -2399,7 +2403,8 @@ static u8 ieee80211_sta_manage_reorder_buf(struct ieee80211_hw *hw,
2399} 2403}
2400 2404
2401static u8 ieee80211_rx_reorder_ampdu(struct ieee80211_local *local, 2405static u8 ieee80211_rx_reorder_ampdu(struct ieee80211_local *local,
2402 struct sk_buff *skb) 2406 struct sk_buff *skb,
2407 struct ieee80211_rx_status *status)
2403{ 2408{
2404 struct ieee80211_hw *hw = &local->hw; 2409 struct ieee80211_hw *hw = &local->hw;
2405 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; 2410 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
@@ -2448,7 +2453,7 @@ static u8 ieee80211_rx_reorder_ampdu(struct ieee80211_local *local,
2448 2453
2449 /* according to mpdu sequence number deal with reordering buffer */ 2454 /* according to mpdu sequence number deal with reordering buffer */
2450 mpdu_seq_num = (sc & IEEE80211_SCTL_SEQ) >> 4; 2455 mpdu_seq_num = (sc & IEEE80211_SCTL_SEQ) >> 4;
2451 ret = ieee80211_sta_manage_reorder_buf(hw, tid_agg_rx, skb, 2456 ret = ieee80211_sta_manage_reorder_buf(hw, tid_agg_rx, skb, status,
2452 mpdu_seq_num, 0); 2457 mpdu_seq_num, 0);
2453 end_reorder: 2458 end_reorder:
2454 return ret; 2459 return ret;
@@ -2512,7 +2517,7 @@ void __ieee80211_rx(struct ieee80211_hw *hw, struct sk_buff *skb,
2512 return; 2517 return;
2513 } 2518 }
2514 2519
2515 if (!ieee80211_rx_reorder_ampdu(local, skb)) 2520 if (!ieee80211_rx_reorder_ampdu(local, skb, status))
2516 __ieee80211_rx_handle_packet(hw, skb, status, rate); 2521 __ieee80211_rx_handle_packet(hw, skb, status, rate);
2517 2522
2518 rcu_read_unlock(); 2523 rcu_read_unlock();
diff --git a/net/mac80211/wext.c b/net/mac80211/wext.c
index deb4ecec122a..959aa8379ccf 100644
--- a/net/mac80211/wext.c
+++ b/net/mac80211/wext.c
@@ -417,6 +417,7 @@ static int ieee80211_ioctl_siwtxpower(struct net_device *dev,
417{ 417{
418 struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); 418 struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr);
419 struct ieee80211_channel* chan = local->hw.conf.channel; 419 struct ieee80211_channel* chan = local->hw.conf.channel;
420 bool reconf = false;
420 u32 reconf_flags = 0; 421 u32 reconf_flags = 0;
421 int new_power_level; 422 int new_power_level;
422 423
@@ -427,14 +428,38 @@ static int ieee80211_ioctl_siwtxpower(struct net_device *dev,
427 if (!chan) 428 if (!chan)
428 return -EINVAL; 429 return -EINVAL;
429 430
430 if (data->txpower.fixed) 431 /* only change when not disabling */
431 new_power_level = min(data->txpower.value, chan->max_power); 432 if (!data->txpower.disabled) {
432 else /* Automatic power level setting */ 433 if (data->txpower.fixed) {
433 new_power_level = chan->max_power; 434 if (data->txpower.value < 0)
435 return -EINVAL;
436 new_power_level = data->txpower.value;
437 /*
438 * Debatable, but we cannot do a fixed power
439 * level above the regulatory constraint.
440 * Use "iwconfig wlan0 txpower 15dBm" instead.
441 */
442 if (new_power_level > chan->max_power)
443 return -EINVAL;
444 } else {
445 /*
446 * Automatic power level setting, max being the value
447 * passed in from userland.
448 */
449 if (data->txpower.value < 0)
450 new_power_level = -1;
451 else
452 new_power_level = data->txpower.value;
453 }
454
455 reconf = true;
434 456
435 local->user_power_level = new_power_level; 457 /*
436 if (local->hw.conf.power_level != new_power_level) 458 * ieee80211_hw_config() will limit to the channel's
437 reconf_flags |= IEEE80211_CONF_CHANGE_POWER; 459 * max power and possibly power constraint from AP.
460 */
461 local->user_power_level = new_power_level;
462 }
438 463
439 if (local->hw.conf.radio_enabled != !(data->txpower.disabled)) { 464 if (local->hw.conf.radio_enabled != !(data->txpower.disabled)) {
440 local->hw.conf.radio_enabled = !(data->txpower.disabled); 465 local->hw.conf.radio_enabled = !(data->txpower.disabled);
@@ -442,7 +467,7 @@ static int ieee80211_ioctl_siwtxpower(struct net_device *dev,
442 ieee80211_led_radio(local, local->hw.conf.radio_enabled); 467 ieee80211_led_radio(local, local->hw.conf.radio_enabled);
443 } 468 }
444 469
445 if (reconf_flags) 470 if (reconf || reconf_flags)
446 ieee80211_hw_config(local, reconf_flags); 471 ieee80211_hw_config(local, reconf_flags);
447 472
448 return 0; 473 return 0;
@@ -530,7 +555,7 @@ static int ieee80211_ioctl_giwfrag(struct net_device *dev,
530 struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); 555 struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr);
531 556
532 frag->value = local->fragmentation_threshold; 557 frag->value = local->fragmentation_threshold;
533 frag->disabled = (frag->value >= IEEE80211_MAX_RTS_THRESHOLD); 558 frag->disabled = (frag->value >= IEEE80211_MAX_FRAG_THRESHOLD);
534 frag->fixed = 1; 559 frag->fixed = 1;
535 560
536 return 0; 561 return 0;
diff --git a/net/netfilter/Kconfig b/net/netfilter/Kconfig
index 2329c5f50551..cb3ad741ebf8 100644
--- a/net/netfilter/Kconfig
+++ b/net/netfilter/Kconfig
@@ -275,6 +275,8 @@ config NF_CT_NETLINK
275 help 275 help
276 This option enables support for a netlink-based userspace interface 276 This option enables support for a netlink-based userspace interface
277 277
278endif # NF_CONNTRACK
279
278# transparent proxy support 280# transparent proxy support
279config NETFILTER_TPROXY 281config NETFILTER_TPROXY
280 tristate "Transparent proxying support (EXPERIMENTAL)" 282 tristate "Transparent proxying support (EXPERIMENTAL)"
@@ -290,8 +292,6 @@ config NETFILTER_TPROXY
290 292
291 To compile it as a module, choose M here. If unsure, say N. 293 To compile it as a module, choose M here. If unsure, say N.
292 294
293endif # NF_CONNTRACK
294
295config NETFILTER_XTABLES 295config NETFILTER_XTABLES
296 tristate "Netfilter Xtables support (required for ip_tables)" 296 tristate "Netfilter Xtables support (required for ip_tables)"
297 default m if NETFILTER_ADVANCED=n 297 default m if NETFILTER_ADVANCED=n
@@ -837,6 +837,7 @@ config NETFILTER_XT_MATCH_SOCKET
837 depends on NETFILTER_TPROXY 837 depends on NETFILTER_TPROXY
838 depends on NETFILTER_XTABLES 838 depends on NETFILTER_XTABLES
839 depends on NETFILTER_ADVANCED 839 depends on NETFILTER_ADVANCED
840 depends on !NF_CONNTRACK || NF_CONNTRACK
840 select NF_DEFRAG_IPV4 841 select NF_DEFRAG_IPV4
841 help 842 help
842 This option adds a `socket' match, which can be used to match 843 This option adds a `socket' match, which can be used to match
diff --git a/net/netfilter/nf_conntrack_helper.c b/net/netfilter/nf_conntrack_helper.c
index 30b8e9009f99..0fa5a422959f 100644
--- a/net/netfilter/nf_conntrack_helper.c
+++ b/net/netfilter/nf_conntrack_helper.c
@@ -176,7 +176,7 @@ static void __nf_conntrack_helper_unregister(struct nf_conntrack_helper *me,
176 } 176 }
177 177
178 /* Get rid of expecteds, set helpers to NULL. */ 178 /* Get rid of expecteds, set helpers to NULL. */
179 hlist_for_each_entry(h, nn, &net->ct.unconfirmed, hnnode) 179 hlist_nulls_for_each_entry(h, nn, &net->ct.unconfirmed, hnnode)
180 unhelp(h, me); 180 unhelp(h, me);
181 for (i = 0; i < nf_conntrack_htable_size; i++) { 181 for (i = 0; i < nf_conntrack_htable_size; i++) {
182 hlist_nulls_for_each_entry(h, nn, &net->ct.hash[i], hnnode) 182 hlist_nulls_for_each_entry(h, nn, &net->ct.hash[i], hnnode)
diff --git a/net/netfilter/nf_conntrack_netlink.c b/net/netfilter/nf_conntrack_netlink.c
index c6439c77953c..f13fc57e1ecb 100644
--- a/net/netfilter/nf_conntrack_netlink.c
+++ b/net/netfilter/nf_conntrack_netlink.c
@@ -512,7 +512,7 @@ static int ctnetlink_conntrack_event(struct notifier_block *this,
512 512
513 skb = ctnetlink_alloc_skb(tuple(ct, IP_CT_DIR_ORIGINAL), GFP_ATOMIC); 513 skb = ctnetlink_alloc_skb(tuple(ct, IP_CT_DIR_ORIGINAL), GFP_ATOMIC);
514 if (!skb) 514 if (!skb)
515 return NOTIFY_DONE; 515 goto errout;
516 516
517 b = skb->tail; 517 b = skb->tail;
518 518
@@ -591,8 +591,9 @@ static int ctnetlink_conntrack_event(struct notifier_block *this,
591nla_put_failure: 591nla_put_failure:
592 rcu_read_unlock(); 592 rcu_read_unlock();
593nlmsg_failure: 593nlmsg_failure:
594 nfnetlink_set_err(0, group, -ENOBUFS);
595 kfree_skb(skb); 594 kfree_skb(skb);
595errout:
596 nfnetlink_set_err(0, group, -ENOBUFS);
596 return NOTIFY_DONE; 597 return NOTIFY_DONE;
597} 598}
598#endif /* CONFIG_NF_CONNTRACK_EVENTS */ 599#endif /* CONFIG_NF_CONNTRACK_EVENTS */
@@ -987,7 +988,7 @@ ctnetlink_change_helper(struct nf_conn *ct, struct nlattr *cda[])
987{ 988{
988 struct nf_conntrack_helper *helper; 989 struct nf_conntrack_helper *helper;
989 struct nf_conn_help *help = nfct_help(ct); 990 struct nf_conn_help *help = nfct_help(ct);
990 char *helpname; 991 char *helpname = NULL;
991 int err; 992 int err;
992 993
993 /* don't change helper of sibling connections */ 994 /* don't change helper of sibling connections */
@@ -1230,7 +1231,7 @@ ctnetlink_create_conntrack(struct nlattr *cda[],
1230 1231
1231 rcu_read_lock(); 1232 rcu_read_lock();
1232 if (cda[CTA_HELP]) { 1233 if (cda[CTA_HELP]) {
1233 char *helpname; 1234 char *helpname = NULL;
1234 1235
1235 err = ctnetlink_parse_help(cda[CTA_HELP], &helpname); 1236 err = ctnetlink_parse_help(cda[CTA_HELP], &helpname);
1236 if (err < 0) 1237 if (err < 0)
@@ -1564,7 +1565,7 @@ static int ctnetlink_expect_event(struct notifier_block *this,
1564 1565
1565 skb = alloc_skb(NLMSG_GOODSIZE, GFP_ATOMIC); 1566 skb = alloc_skb(NLMSG_GOODSIZE, GFP_ATOMIC);
1566 if (!skb) 1567 if (!skb)
1567 return NOTIFY_DONE; 1568 goto errout;
1568 1569
1569 b = skb->tail; 1570 b = skb->tail;
1570 1571
@@ -1589,8 +1590,9 @@ static int ctnetlink_expect_event(struct notifier_block *this,
1589nla_put_failure: 1590nla_put_failure:
1590 rcu_read_unlock(); 1591 rcu_read_unlock();
1591nlmsg_failure: 1592nlmsg_failure:
1592 nfnetlink_set_err(0, 0, -ENOBUFS);
1593 kfree_skb(skb); 1593 kfree_skb(skb);
1594errout:
1595 nfnetlink_set_err(0, 0, -ENOBUFS);
1594 return NOTIFY_DONE; 1596 return NOTIFY_DONE;
1595} 1597}
1596#endif 1598#endif
diff --git a/net/netfilter/nf_conntrack_proto_dccp.c b/net/netfilter/nf_conntrack_proto_dccp.c
index 50dac8dbe7d8..8e757dd53396 100644
--- a/net/netfilter/nf_conntrack_proto_dccp.c
+++ b/net/netfilter/nf_conntrack_proto_dccp.c
@@ -633,6 +633,8 @@ static int dccp_to_nlattr(struct sk_buff *skb, struct nlattr *nla,
633 if (!nest_parms) 633 if (!nest_parms)
634 goto nla_put_failure; 634 goto nla_put_failure;
635 NLA_PUT_U8(skb, CTA_PROTOINFO_DCCP_STATE, ct->proto.dccp.state); 635 NLA_PUT_U8(skb, CTA_PROTOINFO_DCCP_STATE, ct->proto.dccp.state);
636 NLA_PUT_U8(skb, CTA_PROTOINFO_DCCP_ROLE,
637 ct->proto.dccp.role[IP_CT_DIR_ORIGINAL]);
636 nla_nest_end(skb, nest_parms); 638 nla_nest_end(skb, nest_parms);
637 read_unlock_bh(&dccp_lock); 639 read_unlock_bh(&dccp_lock);
638 return 0; 640 return 0;
@@ -644,6 +646,7 @@ nla_put_failure:
644 646
645static const struct nla_policy dccp_nla_policy[CTA_PROTOINFO_DCCP_MAX + 1] = { 647static const struct nla_policy dccp_nla_policy[CTA_PROTOINFO_DCCP_MAX + 1] = {
646 [CTA_PROTOINFO_DCCP_STATE] = { .type = NLA_U8 }, 648 [CTA_PROTOINFO_DCCP_STATE] = { .type = NLA_U8 },
649 [CTA_PROTOINFO_DCCP_ROLE] = { .type = NLA_U8 },
647}; 650};
648 651
649static int nlattr_to_dccp(struct nlattr *cda[], struct nf_conn *ct) 652static int nlattr_to_dccp(struct nlattr *cda[], struct nf_conn *ct)
@@ -661,11 +664,21 @@ static int nlattr_to_dccp(struct nlattr *cda[], struct nf_conn *ct)
661 return err; 664 return err;
662 665
663 if (!tb[CTA_PROTOINFO_DCCP_STATE] || 666 if (!tb[CTA_PROTOINFO_DCCP_STATE] ||
664 nla_get_u8(tb[CTA_PROTOINFO_DCCP_STATE]) >= CT_DCCP_IGNORE) 667 !tb[CTA_PROTOINFO_DCCP_ROLE] ||
668 nla_get_u8(tb[CTA_PROTOINFO_DCCP_ROLE]) > CT_DCCP_ROLE_MAX ||
669 nla_get_u8(tb[CTA_PROTOINFO_DCCP_STATE]) >= CT_DCCP_IGNORE) {
665 return -EINVAL; 670 return -EINVAL;
671 }
666 672
667 write_lock_bh(&dccp_lock); 673 write_lock_bh(&dccp_lock);
668 ct->proto.dccp.state = nla_get_u8(tb[CTA_PROTOINFO_DCCP_STATE]); 674 ct->proto.dccp.state = nla_get_u8(tb[CTA_PROTOINFO_DCCP_STATE]);
675 if (nla_get_u8(tb[CTA_PROTOINFO_DCCP_ROLE]) == CT_DCCP_ROLE_CLIENT) {
676 ct->proto.dccp.role[IP_CT_DIR_ORIGINAL] = CT_DCCP_ROLE_CLIENT;
677 ct->proto.dccp.role[IP_CT_DIR_REPLY] = CT_DCCP_ROLE_SERVER;
678 } else {
679 ct->proto.dccp.role[IP_CT_DIR_ORIGINAL] = CT_DCCP_ROLE_SERVER;
680 ct->proto.dccp.role[IP_CT_DIR_REPLY] = CT_DCCP_ROLE_CLIENT;
681 }
669 write_unlock_bh(&dccp_lock); 682 write_unlock_bh(&dccp_lock);
670 return 0; 683 return 0;
671} 684}
@@ -777,6 +790,7 @@ static struct nf_conntrack_l4proto dccp_proto6 __read_mostly = {
777 .print_conntrack = dccp_print_conntrack, 790 .print_conntrack = dccp_print_conntrack,
778#if defined(CONFIG_NF_CT_NETLINK) || defined(CONFIG_NF_CT_NETLINK_MODULE) 791#if defined(CONFIG_NF_CT_NETLINK) || defined(CONFIG_NF_CT_NETLINK_MODULE)
779 .to_nlattr = dccp_to_nlattr, 792 .to_nlattr = dccp_to_nlattr,
793 .nlattr_size = dccp_nlattr_size,
780 .from_nlattr = nlattr_to_dccp, 794 .from_nlattr = nlattr_to_dccp,
781 .tuple_to_nlattr = nf_ct_port_tuple_to_nlattr, 795 .tuple_to_nlattr = nf_ct_port_tuple_to_nlattr,
782 .nlattr_tuple_size = nf_ct_port_nlattr_tuple_size, 796 .nlattr_tuple_size = nf_ct_port_nlattr_tuple_size,
diff --git a/net/netfilter/nf_conntrack_proto_udplite.c b/net/netfilter/nf_conntrack_proto_udplite.c
index 4614696c1b88..0badedc542d3 100644
--- a/net/netfilter/nf_conntrack_proto_udplite.c
+++ b/net/netfilter/nf_conntrack_proto_udplite.c
@@ -204,6 +204,7 @@ static struct nf_conntrack_l4proto nf_conntrack_l4proto_udplite6 __read_mostly =
204 .error = udplite_error, 204 .error = udplite_error,
205#if defined(CONFIG_NF_CT_NETLINK) || defined(CONFIG_NF_CT_NETLINK_MODULE) 205#if defined(CONFIG_NF_CT_NETLINK) || defined(CONFIG_NF_CT_NETLINK_MODULE)
206 .tuple_to_nlattr = nf_ct_port_tuple_to_nlattr, 206 .tuple_to_nlattr = nf_ct_port_tuple_to_nlattr,
207 .nlattr_tuple_size = nf_ct_port_nlattr_tuple_size,
207 .nlattr_to_tuple = nf_ct_port_nlattr_to_tuple, 208 .nlattr_to_tuple = nf_ct_port_nlattr_to_tuple,
208 .nla_policy = nf_ct_port_nla_policy, 209 .nla_policy = nf_ct_port_nla_policy,
209#endif 210#endif
diff --git a/net/netfilter/nf_log.c b/net/netfilter/nf_log.c
index 8bb998fe098b..beb37311e1a5 100644
--- a/net/netfilter/nf_log.c
+++ b/net/netfilter/nf_log.c
@@ -36,10 +36,14 @@ static struct nf_logger *__find_logger(int pf, const char *str_logger)
36int nf_log_register(u_int8_t pf, struct nf_logger *logger) 36int nf_log_register(u_int8_t pf, struct nf_logger *logger)
37{ 37{
38 const struct nf_logger *llog; 38 const struct nf_logger *llog;
39 int i;
39 40
40 if (pf >= ARRAY_SIZE(nf_loggers)) 41 if (pf >= ARRAY_SIZE(nf_loggers))
41 return -EINVAL; 42 return -EINVAL;
42 43
44 for (i = 0; i < ARRAY_SIZE(logger->list); i++)
45 INIT_LIST_HEAD(&logger->list[i]);
46
43 mutex_lock(&nf_log_mutex); 47 mutex_lock(&nf_log_mutex);
44 48
45 if (pf == NFPROTO_UNSPEC) { 49 if (pf == NFPROTO_UNSPEC) {
diff --git a/net/netfilter/nfnetlink.c b/net/netfilter/nfnetlink.c
index 2785d66a7e38..b8ab37ad7ed5 100644
--- a/net/netfilter/nfnetlink.c
+++ b/net/netfilter/nfnetlink.c
@@ -203,7 +203,7 @@ static int __init nfnetlink_init(void)
203 nfnetlink_rcv, NULL, THIS_MODULE); 203 nfnetlink_rcv, NULL, THIS_MODULE);
204 if (!nfnl) { 204 if (!nfnl) {
205 printk(KERN_ERR "cannot initialize nfnetlink!\n"); 205 printk(KERN_ERR "cannot initialize nfnetlink!\n");
206 return -1; 206 return -ENOMEM;
207 } 207 }
208 208
209 return 0; 209 return 0;
diff --git a/net/netfilter/x_tables.c b/net/netfilter/x_tables.c
index 509a95621f9f..150e5cf62f85 100644
--- a/net/netfilter/x_tables.c
+++ b/net/netfilter/x_tables.c
@@ -625,20 +625,6 @@ void xt_free_table_info(struct xt_table_info *info)
625} 625}
626EXPORT_SYMBOL(xt_free_table_info); 626EXPORT_SYMBOL(xt_free_table_info);
627 627
628void xt_table_entry_swap_rcu(struct xt_table_info *oldinfo,
629 struct xt_table_info *newinfo)
630{
631 unsigned int cpu;
632
633 for_each_possible_cpu(cpu) {
634 void *p = oldinfo->entries[cpu];
635 rcu_assign_pointer(oldinfo->entries[cpu], newinfo->entries[cpu]);
636 newinfo->entries[cpu] = p;
637 }
638
639}
640EXPORT_SYMBOL_GPL(xt_table_entry_swap_rcu);
641
642/* Find table by name, grabs mutex & ref. Returns ERR_PTR() on error. */ 628/* Find table by name, grabs mutex & ref. Returns ERR_PTR() on error. */
643struct xt_table *xt_find_table_lock(struct net *net, u_int8_t af, 629struct xt_table *xt_find_table_lock(struct net *net, u_int8_t af,
644 const char *name) 630 const char *name)
@@ -676,32 +662,43 @@ void xt_compat_unlock(u_int8_t af)
676EXPORT_SYMBOL_GPL(xt_compat_unlock); 662EXPORT_SYMBOL_GPL(xt_compat_unlock);
677#endif 663#endif
678 664
665DEFINE_PER_CPU(struct xt_info_lock, xt_info_locks);
666EXPORT_PER_CPU_SYMBOL_GPL(xt_info_locks);
667
668
679struct xt_table_info * 669struct xt_table_info *
680xt_replace_table(struct xt_table *table, 670xt_replace_table(struct xt_table *table,
681 unsigned int num_counters, 671 unsigned int num_counters,
682 struct xt_table_info *newinfo, 672 struct xt_table_info *newinfo,
683 int *error) 673 int *error)
684{ 674{
685 struct xt_table_info *oldinfo, *private; 675 struct xt_table_info *private;
686 676
687 /* Do the substitution. */ 677 /* Do the substitution. */
688 mutex_lock(&table->lock); 678 local_bh_disable();
689 private = table->private; 679 private = table->private;
680
690 /* Check inside lock: is the old number correct? */ 681 /* Check inside lock: is the old number correct? */
691 if (num_counters != private->number) { 682 if (num_counters != private->number) {
692 duprintf("num_counters != table->private->number (%u/%u)\n", 683 duprintf("num_counters != table->private->number (%u/%u)\n",
693 num_counters, private->number); 684 num_counters, private->number);
694 mutex_unlock(&table->lock); 685 local_bh_enable();
695 *error = -EAGAIN; 686 *error = -EAGAIN;
696 return NULL; 687 return NULL;
697 } 688 }
698 oldinfo = private;
699 rcu_assign_pointer(table->private, newinfo);
700 newinfo->initial_entries = oldinfo->initial_entries;
701 mutex_unlock(&table->lock);
702 689
703 synchronize_net(); 690 table->private = newinfo;
704 return oldinfo; 691 newinfo->initial_entries = private->initial_entries;
692
693 /*
694 * Even though table entries have now been swapped, other CPU's
695 * may still be using the old entries. This is okay, because
696 * resynchronization happens because of the locking done
697 * during the get_counters() routine.
698 */
699 local_bh_enable();
700
701 return private;
705} 702}
706EXPORT_SYMBOL_GPL(xt_replace_table); 703EXPORT_SYMBOL_GPL(xt_replace_table);
707 704
@@ -734,7 +731,6 @@ struct xt_table *xt_register_table(struct net *net, struct xt_table *table,
734 731
735 /* Simplifies replace_table code. */ 732 /* Simplifies replace_table code. */
736 table->private = bootstrap; 733 table->private = bootstrap;
737 mutex_init(&table->lock);
738 734
739 if (!xt_replace_table(table, 0, newinfo, &ret)) 735 if (!xt_replace_table(table, 0, newinfo, &ret))
740 goto unlock; 736 goto unlock;
@@ -1147,7 +1143,14 @@ static struct pernet_operations xt_net_ops = {
1147 1143
1148static int __init xt_init(void) 1144static int __init xt_init(void)
1149{ 1145{
1150 int i, rv; 1146 unsigned int i;
1147 int rv;
1148
1149 for_each_possible_cpu(i) {
1150 struct xt_info_lock *lock = &per_cpu(xt_info_locks, i);
1151 spin_lock_init(&lock->lock);
1152 lock->readers = 0;
1153 }
1151 1154
1152 xt = kmalloc(sizeof(struct xt_af) * NFPROTO_NUMPROTO, GFP_KERNEL); 1155 xt = kmalloc(sizeof(struct xt_af) * NFPROTO_NUMPROTO, GFP_KERNEL);
1153 if (!xt) 1156 if (!xt)
diff --git a/net/netfilter/xt_recent.c b/net/netfilter/xt_recent.c
index 791e030ea903..eb0ceb846527 100644
--- a/net/netfilter/xt_recent.c
+++ b/net/netfilter/xt_recent.c
@@ -474,7 +474,7 @@ static ssize_t recent_old_proc_write(struct file *file,
474 struct recent_table *t = pde->data; 474 struct recent_table *t = pde->data;
475 struct recent_entry *e; 475 struct recent_entry *e;
476 char buf[sizeof("+255.255.255.255")], *c = buf; 476 char buf[sizeof("+255.255.255.255")], *c = buf;
477 __be32 addr; 477 union nf_inet_addr addr = {};
478 int add; 478 int add;
479 479
480 if (size > sizeof(buf)) 480 if (size > sizeof(buf))
@@ -506,14 +506,13 @@ static ssize_t recent_old_proc_write(struct file *file,
506 add = 1; 506 add = 1;
507 break; 507 break;
508 } 508 }
509 addr = in_aton(c); 509 addr.ip = in_aton(c);
510 510
511 spin_lock_bh(&recent_lock); 511 spin_lock_bh(&recent_lock);
512 e = recent_entry_lookup(t, (const void *)&addr, NFPROTO_IPV4, 0); 512 e = recent_entry_lookup(t, &addr, NFPROTO_IPV4, 0);
513 if (e == NULL) { 513 if (e == NULL) {
514 if (add) 514 if (add)
515 recent_entry_init(t, (const void *)&addr, 515 recent_entry_init(t, &addr, NFPROTO_IPV4, 0);
516 NFPROTO_IPV4, 0);
517 } else { 516 } else {
518 if (add) 517 if (add)
519 recent_entry_update(t, e); 518 recent_entry_update(t, e);
diff --git a/net/netlabel/netlabel_addrlist.c b/net/netlabel/netlabel_addrlist.c
index 834c6eb7f484..c0519139679e 100644
--- a/net/netlabel/netlabel_addrlist.c
+++ b/net/netlabel/netlabel_addrlist.c
@@ -256,13 +256,11 @@ struct netlbl_af4list *netlbl_af4list_remove(__be32 addr, __be32 mask,
256{ 256{
257 struct netlbl_af4list *entry; 257 struct netlbl_af4list *entry;
258 258
259 entry = netlbl_af4list_search(addr, head); 259 entry = netlbl_af4list_search_exact(addr, mask, head);
260 if (entry != NULL && entry->addr == addr && entry->mask == mask) { 260 if (entry == NULL)
261 netlbl_af4list_remove_entry(entry); 261 return NULL;
262 return entry; 262 netlbl_af4list_remove_entry(entry);
263 } 263 return entry;
264
265 return NULL;
266} 264}
267 265
268#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) 266#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
@@ -299,15 +297,11 @@ struct netlbl_af6list *netlbl_af6list_remove(const struct in6_addr *addr,
299{ 297{
300 struct netlbl_af6list *entry; 298 struct netlbl_af6list *entry;
301 299
302 entry = netlbl_af6list_search(addr, head); 300 entry = netlbl_af6list_search_exact(addr, mask, head);
303 if (entry != NULL && 301 if (entry == NULL)
304 ipv6_addr_equal(&entry->addr, addr) && 302 return NULL;
305 ipv6_addr_equal(&entry->mask, mask)) { 303 netlbl_af6list_remove_entry(entry);
306 netlbl_af6list_remove_entry(entry); 304 return entry;
307 return entry;
308 }
309
310 return NULL;
311} 305}
312#endif /* IPv6 */ 306#endif /* IPv6 */
313 307
diff --git a/net/netrom/af_netrom.c b/net/netrom/af_netrom.c
index 4e705f87969f..3be0e016ab7d 100644
--- a/net/netrom/af_netrom.c
+++ b/net/netrom/af_netrom.c
@@ -1084,8 +1084,10 @@ static int nr_sendmsg(struct kiocb *iocb, struct socket *sock,
1084 1084
1085 /* Build a packet - the conventional user limit is 236 bytes. We can 1085 /* Build a packet - the conventional user limit is 236 bytes. We can
1086 do ludicrously large NetROM frames but must not overflow */ 1086 do ludicrously large NetROM frames but must not overflow */
1087 if (len > 65536) 1087 if (len > 65536) {
1088 return -EMSGSIZE; 1088 err = -EMSGSIZE;
1089 goto out;
1090 }
1089 1091
1090 SOCK_DEBUG(sk, "NET/ROM: sendto: building packet.\n"); 1092 SOCK_DEBUG(sk, "NET/ROM: sendto: building packet.\n");
1091 size = len + NR_NETWORK_LEN + NR_TRANSPORT_LEN; 1093 size = len + NR_NETWORK_LEN + NR_TRANSPORT_LEN;
diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
index 74776de523ec..f546e81acc45 100644
--- a/net/packet/af_packet.c
+++ b/net/packet/af_packet.c
@@ -1758,8 +1758,9 @@ static void free_pg_vec(char **pg_vec, unsigned int order, unsigned int len)
1758 1758
1759static inline char *alloc_one_pg_vec_page(unsigned long order) 1759static inline char *alloc_one_pg_vec_page(unsigned long order)
1760{ 1760{
1761 return (char *) __get_free_pages(GFP_KERNEL | __GFP_COMP | __GFP_ZERO, 1761 gfp_t gfp_flags = GFP_KERNEL | __GFP_COMP | __GFP_ZERO | __GFP_NOWARN;
1762 order); 1762
1763 return (char *) __get_free_pages(gfp_flags, order);
1763} 1764}
1764 1765
1765static char **alloc_pg_vec(struct tpacket_req *req, int order) 1766static char **alloc_pg_vec(struct tpacket_req *req, int order)
diff --git a/net/rds/rds.h b/net/rds/rds.h
index 619f0a30a4e5..71794449ca4e 100644
--- a/net/rds/rds.h
+++ b/net/rds/rds.h
@@ -638,7 +638,7 @@ struct rds_message *rds_send_get_message(struct rds_connection *,
638void rds_rdma_unuse(struct rds_sock *rs, u32 r_key, int force); 638void rds_rdma_unuse(struct rds_sock *rs, u32 r_key, int force);
639 639
640/* stats.c */ 640/* stats.c */
641DECLARE_PER_CPU(struct rds_statistics, rds_stats); 641DECLARE_PER_CPU_SHARED_ALIGNED(struct rds_statistics, rds_stats);
642#define rds_stats_inc_which(which, member) do { \ 642#define rds_stats_inc_which(which, member) do { \
643 per_cpu(which, get_cpu()).member++; \ 643 per_cpu(which, get_cpu()).member++; \
644 put_cpu(); \ 644 put_cpu(); \
diff --git a/net/rose/af_rose.c b/net/rose/af_rose.c
index 0f36e8d59b29..877a7f65f707 100644
--- a/net/rose/af_rose.c
+++ b/net/rose/af_rose.c
@@ -1072,10 +1072,6 @@ static int rose_sendmsg(struct kiocb *iocb, struct socket *sock,
1072 unsigned char *asmptr; 1072 unsigned char *asmptr;
1073 int n, size, qbit = 0; 1073 int n, size, qbit = 0;
1074 1074
1075 /* ROSE empty frame has no meaning : don't send */
1076 if (len == 0)
1077 return 0;
1078
1079 if (msg->msg_flags & ~(MSG_DONTWAIT|MSG_EOR|MSG_CMSG_COMPAT)) 1075 if (msg->msg_flags & ~(MSG_DONTWAIT|MSG_EOR|MSG_CMSG_COMPAT))
1080 return -EINVAL; 1076 return -EINVAL;
1081 1077
@@ -1273,12 +1269,6 @@ static int rose_recvmsg(struct kiocb *iocb, struct socket *sock,
1273 skb_reset_transport_header(skb); 1269 skb_reset_transport_header(skb);
1274 copied = skb->len; 1270 copied = skb->len;
1275 1271
1276 /* ROSE empty frame has no meaning : ignore it */
1277 if (copied == 0) {
1278 skb_free_datagram(sk, skb);
1279 return copied;
1280 }
1281
1282 if (copied > size) { 1272 if (copied > size) {
1283 copied = size; 1273 copied = size;
1284 msg->msg_flags |= MSG_TRUNC; 1274 msg->msg_flags |= MSG_TRUNC;
diff --git a/net/sched/cls_api.c b/net/sched/cls_api.c
index 173fcc4b050d..0759f32e9dca 100644
--- a/net/sched/cls_api.c
+++ b/net/sched/cls_api.c
@@ -254,7 +254,7 @@ replay:
254 } 254 }
255 tp->ops = tp_ops; 255 tp->ops = tp_ops;
256 tp->protocol = protocol; 256 tp->protocol = protocol;
257 tp->prio = nprio ? : tcf_auto_prio(*back); 257 tp->prio = nprio ? : TC_H_MAJ(tcf_auto_prio(*back));
258 tp->q = q; 258 tp->q = q;
259 tp->classify = tp_ops->classify; 259 tp->classify = tp_ops->classify;
260 tp->classid = parent; 260 tp->classid = parent;
diff --git a/net/sched/em_meta.c b/net/sched/em_meta.c
index 72cf86e3c090..fad596bf32d7 100644
--- a/net/sched/em_meta.c
+++ b/net/sched/em_meta.c
@@ -176,8 +176,10 @@ META_COLLECTOR(var_dev)
176 176
177META_COLLECTOR(int_vlan_tag) 177META_COLLECTOR(int_vlan_tag)
178{ 178{
179 unsigned short uninitialized_var(tag); 179 unsigned short tag;
180 if (vlan_get_tag(skb, &tag) < 0) 180
181 tag = vlan_tx_tag_get(skb);
182 if (!tag && __vlan_get_tag(skb, &tag))
181 *err = -1; 183 *err = -1;
182 else 184 else
183 dst->value = tag; 185 dst->value = tag;
diff --git a/net/sched/sch_netem.c b/net/sched/sch_netem.c
index d876b8734848..2b88295cb7b7 100644
--- a/net/sched/sch_netem.c
+++ b/net/sched/sch_netem.c
@@ -280,6 +280,14 @@ static struct sk_buff *netem_dequeue(struct Qdisc *sch)
280 if (unlikely(!skb)) 280 if (unlikely(!skb))
281 return NULL; 281 return NULL;
282 282
283#ifdef CONFIG_NET_CLS_ACT
284 /*
285 * If it's at ingress let's pretend the delay is
286 * from the network (tstamp will be updated).
287 */
288 if (G_TC_FROM(skb->tc_verd) & AT_INGRESS)
289 skb->tstamp.tv64 = 0;
290#endif
283 pr_debug("netem_dequeue: return skb=%p\n", skb); 291 pr_debug("netem_dequeue: return skb=%p\n", skb);
284 sch->q.qlen--; 292 sch->q.qlen--;
285 return skb; 293 return skb;
diff --git a/net/sunrpc/xprt.c b/net/sunrpc/xprt.c
index a0bfe53f1621..06ca058572f2 100644
--- a/net/sunrpc/xprt.c
+++ b/net/sunrpc/xprt.c
@@ -672,10 +672,8 @@ xprt_init_autodisconnect(unsigned long data)
672 if (test_and_set_bit(XPRT_LOCKED, &xprt->state)) 672 if (test_and_set_bit(XPRT_LOCKED, &xprt->state))
673 goto out_abort; 673 goto out_abort;
674 spin_unlock(&xprt->transport_lock); 674 spin_unlock(&xprt->transport_lock);
675 if (xprt_connecting(xprt)) 675 set_bit(XPRT_CONNECTION_CLOSE, &xprt->state);
676 xprt_release_write(xprt, NULL); 676 queue_work(rpciod_workqueue, &xprt->task_cleanup);
677 else
678 queue_work(rpciod_workqueue, &xprt->task_cleanup);
679 return; 677 return;
680out_abort: 678out_abort:
681 spin_unlock(&xprt->transport_lock); 679 spin_unlock(&xprt->transport_lock);
diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c
index d40ff50887aa..e18596146013 100644
--- a/net/sunrpc/xprtsock.c
+++ b/net/sunrpc/xprtsock.c
@@ -807,6 +807,9 @@ static void xs_reset_transport(struct sock_xprt *transport)
807 * 807 *
808 * This is used when all requests are complete; ie, no DRC state remains 808 * This is used when all requests are complete; ie, no DRC state remains
809 * on the server we want to save. 809 * on the server we want to save.
810 *
811 * The caller _must_ be holding XPRT_LOCKED in order to avoid issues with
812 * xs_reset_transport() zeroing the socket from underneath a writer.
810 */ 813 */
811static void xs_close(struct rpc_xprt *xprt) 814static void xs_close(struct rpc_xprt *xprt)
812{ 815{
@@ -824,6 +827,14 @@ static void xs_close(struct rpc_xprt *xprt)
824 xprt_disconnect_done(xprt); 827 xprt_disconnect_done(xprt);
825} 828}
826 829
830static void xs_tcp_close(struct rpc_xprt *xprt)
831{
832 if (test_and_clear_bit(XPRT_CONNECTION_CLOSE, &xprt->state))
833 xs_close(xprt);
834 else
835 xs_tcp_shutdown(xprt);
836}
837
827/** 838/**
828 * xs_destroy - prepare to shutdown a transport 839 * xs_destroy - prepare to shutdown a transport
829 * @xprt: doomed transport 840 * @xprt: doomed transport
@@ -1772,6 +1783,15 @@ static void xs_tcp_setup_socket(struct rpc_xprt *xprt,
1772 xprt, -status, xprt_connected(xprt), 1783 xprt, -status, xprt_connected(xprt),
1773 sock->sk->sk_state); 1784 sock->sk->sk_state);
1774 switch (status) { 1785 switch (status) {
1786 default:
1787 printk("%s: connect returned unhandled error %d\n",
1788 __func__, status);
1789 case -EADDRNOTAVAIL:
1790 /* We're probably in TIME_WAIT. Get rid of existing socket,
1791 * and retry
1792 */
1793 set_bit(XPRT_CONNECTION_CLOSE, &xprt->state);
1794 xprt_force_disconnect(xprt);
1775 case -ECONNREFUSED: 1795 case -ECONNREFUSED:
1776 case -ECONNRESET: 1796 case -ECONNRESET:
1777 case -ENETUNREACH: 1797 case -ENETUNREACH:
@@ -1782,10 +1802,6 @@ static void xs_tcp_setup_socket(struct rpc_xprt *xprt,
1782 xprt_clear_connecting(xprt); 1802 xprt_clear_connecting(xprt);
1783 return; 1803 return;
1784 } 1804 }
1785 /* get rid of existing socket, and retry */
1786 xs_tcp_shutdown(xprt);
1787 printk("%s: connect returned unhandled error %d\n",
1788 __func__, status);
1789out_eagain: 1805out_eagain:
1790 status = -EAGAIN; 1806 status = -EAGAIN;
1791out: 1807out:
@@ -1994,7 +2010,7 @@ static struct rpc_xprt_ops xs_tcp_ops = {
1994 .buf_free = rpc_free, 2010 .buf_free = rpc_free,
1995 .send_request = xs_tcp_send_request, 2011 .send_request = xs_tcp_send_request,
1996 .set_retrans_timeout = xprt_set_retrans_timeout_def, 2012 .set_retrans_timeout = xprt_set_retrans_timeout_def,
1997 .close = xs_tcp_shutdown, 2013 .close = xs_tcp_close,
1998 .destroy = xs_destroy, 2014 .destroy = xs_destroy,
1999 .print_stats = xs_tcp_print_stats, 2015 .print_stats = xs_tcp_print_stats,
2000}; 2016};
diff --git a/net/wireless/core.h b/net/wireless/core.h
index d43daa236ef9..0a592e4295f0 100644
--- a/net/wireless/core.h
+++ b/net/wireless/core.h
@@ -90,7 +90,7 @@ struct cfg80211_internal_bss {
90 struct rb_node rbn; 90 struct rb_node rbn;
91 unsigned long ts; 91 unsigned long ts;
92 struct kref ref; 92 struct kref ref;
93 bool hold; 93 bool hold, ies_allocated;
94 94
95 /* must be last because of priv member */ 95 /* must be last because of priv member */
96 struct cfg80211_bss pub; 96 struct cfg80211_bss pub;
diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c
index 353e1a4ece83..2456e4ee445e 100644
--- a/net/wireless/nl80211.c
+++ b/net/wireless/nl80211.c
@@ -3334,7 +3334,7 @@ static void nl80211_send_mlme_event(struct cfg80211_registered_device *rdev,
3334 struct sk_buff *msg; 3334 struct sk_buff *msg;
3335 void *hdr; 3335 void *hdr;
3336 3336
3337 msg = nlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL); 3337 msg = nlmsg_new(NLMSG_GOODSIZE, GFP_ATOMIC);
3338 if (!msg) 3338 if (!msg)
3339 return; 3339 return;
3340 3340
@@ -3353,7 +3353,7 @@ static void nl80211_send_mlme_event(struct cfg80211_registered_device *rdev,
3353 return; 3353 return;
3354 } 3354 }
3355 3355
3356 genlmsg_multicast(msg, 0, nl80211_mlme_mcgrp.id, GFP_KERNEL); 3356 genlmsg_multicast(msg, 0, nl80211_mlme_mcgrp.id, GFP_ATOMIC);
3357 return; 3357 return;
3358 3358
3359 nla_put_failure: 3359 nla_put_failure:
diff --git a/net/wireless/reg.c b/net/wireless/reg.c
index 6327e1617acb..6c1993d99902 100644
--- a/net/wireless/reg.c
+++ b/net/wireless/reg.c
@@ -2095,11 +2095,12 @@ int set_regdom(const struct ieee80211_regdomain *rd)
2095/* Caller must hold cfg80211_mutex */ 2095/* Caller must hold cfg80211_mutex */
2096void reg_device_remove(struct wiphy *wiphy) 2096void reg_device_remove(struct wiphy *wiphy)
2097{ 2097{
2098 struct wiphy *request_wiphy; 2098 struct wiphy *request_wiphy = NULL;
2099 2099
2100 assert_cfg80211_lock(); 2100 assert_cfg80211_lock();
2101 2101
2102 request_wiphy = wiphy_idx_to_wiphy(last_request->wiphy_idx); 2102 if (last_request)
2103 request_wiphy = wiphy_idx_to_wiphy(last_request->wiphy_idx);
2103 2104
2104 kfree(wiphy->regd); 2105 kfree(wiphy->regd);
2105 if (!last_request || !request_wiphy) 2106 if (!last_request || !request_wiphy)
diff --git a/net/wireless/scan.c b/net/wireless/scan.c
index 2a00e362f5fe..2ae65b39b529 100644
--- a/net/wireless/scan.c
+++ b/net/wireless/scan.c
@@ -58,6 +58,10 @@ static void bss_release(struct kref *ref)
58 bss = container_of(ref, struct cfg80211_internal_bss, ref); 58 bss = container_of(ref, struct cfg80211_internal_bss, ref);
59 if (bss->pub.free_priv) 59 if (bss->pub.free_priv)
60 bss->pub.free_priv(&bss->pub); 60 bss->pub.free_priv(&bss->pub);
61
62 if (bss->ies_allocated)
63 kfree(bss->pub.information_elements);
64
61 kfree(bss); 65 kfree(bss);
62} 66}
63 67
@@ -360,19 +364,41 @@ cfg80211_bss_update(struct cfg80211_registered_device *dev,
360 364
361 found = rb_find_bss(dev, res); 365 found = rb_find_bss(dev, res);
362 366
363 if (found && overwrite) { 367 if (found) {
364 list_replace(&found->list, &res->list);
365 rb_replace_node(&found->rbn, &res->rbn,
366 &dev->bss_tree);
367 kref_put(&found->ref, bss_release);
368 found = res;
369 } else if (found) {
370 kref_get(&found->ref); 368 kref_get(&found->ref);
371 found->pub.beacon_interval = res->pub.beacon_interval; 369 found->pub.beacon_interval = res->pub.beacon_interval;
372 found->pub.tsf = res->pub.tsf; 370 found->pub.tsf = res->pub.tsf;
373 found->pub.signal = res->pub.signal; 371 found->pub.signal = res->pub.signal;
374 found->pub.capability = res->pub.capability; 372 found->pub.capability = res->pub.capability;
375 found->ts = res->ts; 373 found->ts = res->ts;
374
375 /* overwrite IEs */
376 if (overwrite) {
377 size_t used = dev->wiphy.bss_priv_size + sizeof(*res);
378 size_t ielen = res->pub.len_information_elements;
379
380 if (ksize(found) >= used + ielen) {
381 memcpy(found->pub.information_elements,
382 res->pub.information_elements, ielen);
383 found->pub.len_information_elements = ielen;
384 } else {
385 u8 *ies = found->pub.information_elements;
386
387 if (found->ies_allocated) {
388 if (ksize(ies) < ielen)
389 ies = krealloc(ies, ielen,
390 GFP_ATOMIC);
391 } else
392 ies = kmalloc(ielen, GFP_ATOMIC);
393
394 if (ies) {
395 memcpy(ies, res->pub.information_elements, ielen);
396 found->ies_allocated = true;
397 found->pub.information_elements = ies;
398 }
399 }
400 }
401
376 kref_put(&res->ref, bss_release); 402 kref_put(&res->ref, bss_release);
377 } else { 403 } else {
378 /* this "consumes" the reference */ 404 /* this "consumes" the reference */
diff --git a/net/xfrm/xfrm_state.c b/net/xfrm/xfrm_state.c
index 82271720d970..5f1f86565f16 100644
--- a/net/xfrm/xfrm_state.c
+++ b/net/xfrm/xfrm_state.c
@@ -794,7 +794,7 @@ xfrm_state_find(xfrm_address_t *daddr, xfrm_address_t *saddr,
794{ 794{
795 static xfrm_address_t saddr_wildcard = { }; 795 static xfrm_address_t saddr_wildcard = { };
796 struct net *net = xp_net(pol); 796 struct net *net = xp_net(pol);
797 unsigned int h; 797 unsigned int h, h_wildcard;
798 struct hlist_node *entry; 798 struct hlist_node *entry;
799 struct xfrm_state *x, *x0, *to_put; 799 struct xfrm_state *x, *x0, *to_put;
800 int acquire_in_progress = 0; 800 int acquire_in_progress = 0;
@@ -819,8 +819,8 @@ xfrm_state_find(xfrm_address_t *daddr, xfrm_address_t *saddr,
819 if (best) 819 if (best)
820 goto found; 820 goto found;
821 821
822 h = xfrm_dst_hash(net, daddr, &saddr_wildcard, tmpl->reqid, family); 822 h_wildcard = xfrm_dst_hash(net, daddr, &saddr_wildcard, tmpl->reqid, family);
823 hlist_for_each_entry(x, entry, net->xfrm.state_bydst+h, bydst) { 823 hlist_for_each_entry(x, entry, net->xfrm.state_bydst+h_wildcard, bydst) {
824 if (x->props.family == family && 824 if (x->props.family == family &&
825 x->props.reqid == tmpl->reqid && 825 x->props.reqid == tmpl->reqid &&
826 !(x->props.flags & XFRM_STATE_WILDRECV) && 826 !(x->props.flags & XFRM_STATE_WILDRECV) &&