diff options
author | Takashi Iwai <tiwai@suse.de> | 2009-05-12 05:57:09 -0400 |
---|---|---|
committer | Takashi Iwai <tiwai@suse.de> | 2009-05-12 05:57:09 -0400 |
commit | ddc4097b77bbb227851a44287acb2fb8a9896cc1 (patch) | |
tree | 319d098879bed7c45543d983de8becd509701b22 /net | |
parent | bec4c99e8637b5b8bd4b0513eacb51da25885e3b (diff) | |
parent | ae31c1fbdbb18d917b0a1139497c2dbd35886989 (diff) |
Merge branch 'topic/drvdata-fix' into topic/asoc
Diffstat (limited to 'net')
29 files changed, 397 insertions, 466 deletions
diff --git a/net/8021q/vlan.c b/net/8021q/vlan.c index 2b7390e377b3..d1e10546eb85 100644 --- a/net/8021q/vlan.c +++ b/net/8021q/vlan.c | |||
@@ -492,6 +492,7 @@ static int vlan_device_event(struct notifier_block *unused, unsigned long event, | |||
492 | continue; | 492 | continue; |
493 | 493 | ||
494 | dev_change_flags(vlandev, flgs & ~IFF_UP); | 494 | dev_change_flags(vlandev, flgs & ~IFF_UP); |
495 | vlan_transfer_operstate(dev, vlandev); | ||
495 | } | 496 | } |
496 | break; | 497 | break; |
497 | 498 | ||
@@ -507,6 +508,7 @@ static int vlan_device_event(struct notifier_block *unused, unsigned long event, | |||
507 | continue; | 508 | continue; |
508 | 509 | ||
509 | dev_change_flags(vlandev, flgs | IFF_UP); | 510 | dev_change_flags(vlandev, flgs | IFF_UP); |
511 | vlan_transfer_operstate(dev, vlandev); | ||
510 | } | 512 | } |
511 | break; | 513 | break; |
512 | 514 | ||
diff --git a/net/8021q/vlan_dev.c b/net/8021q/vlan_dev.c index 6b0921364014..b4b9068e55a7 100644 --- a/net/8021q/vlan_dev.c +++ b/net/8021q/vlan_dev.c | |||
@@ -462,6 +462,7 @@ static int vlan_dev_open(struct net_device *dev) | |||
462 | if (vlan->flags & VLAN_FLAG_GVRP) | 462 | if (vlan->flags & VLAN_FLAG_GVRP) |
463 | vlan_gvrp_request_join(dev); | 463 | vlan_gvrp_request_join(dev); |
464 | 464 | ||
465 | netif_carrier_on(dev); | ||
465 | return 0; | 466 | return 0; |
466 | 467 | ||
467 | clear_allmulti: | 468 | clear_allmulti: |
@@ -471,6 +472,7 @@ del_unicast: | |||
471 | if (compare_ether_addr(dev->dev_addr, real_dev->dev_addr)) | 472 | if (compare_ether_addr(dev->dev_addr, real_dev->dev_addr)) |
472 | dev_unicast_delete(real_dev, dev->dev_addr, ETH_ALEN); | 473 | dev_unicast_delete(real_dev, dev->dev_addr, ETH_ALEN); |
473 | out: | 474 | out: |
475 | netif_carrier_off(dev); | ||
474 | return err; | 476 | return err; |
475 | } | 477 | } |
476 | 478 | ||
@@ -492,6 +494,7 @@ static int vlan_dev_stop(struct net_device *dev) | |||
492 | if (compare_ether_addr(dev->dev_addr, real_dev->dev_addr)) | 494 | if (compare_ether_addr(dev->dev_addr, real_dev->dev_addr)) |
493 | dev_unicast_delete(real_dev, dev->dev_addr, dev->addr_len); | 495 | dev_unicast_delete(real_dev, dev->dev_addr, dev->addr_len); |
494 | 496 | ||
497 | netif_carrier_off(dev); | ||
495 | return 0; | 498 | return 0; |
496 | } | 499 | } |
497 | 500 | ||
@@ -612,6 +615,8 @@ static int vlan_dev_init(struct net_device *dev) | |||
612 | struct net_device *real_dev = vlan_dev_info(dev)->real_dev; | 615 | struct net_device *real_dev = vlan_dev_info(dev)->real_dev; |
613 | int subclass = 0; | 616 | int subclass = 0; |
614 | 617 | ||
618 | netif_carrier_off(dev); | ||
619 | |||
615 | /* IFF_BROADCAST|IFF_MULTICAST; ??? */ | 620 | /* IFF_BROADCAST|IFF_MULTICAST; ??? */ |
616 | dev->flags = real_dev->flags & ~(IFF_UP | IFF_PROMISC | IFF_ALLMULTI); | 621 | dev->flags = real_dev->flags & ~(IFF_UP | IFF_PROMISC | IFF_ALLMULTI); |
617 | dev->iflink = real_dev->ifindex; | 622 | dev->iflink = real_dev->ifindex; |
diff --git a/net/9p/client.c b/net/9p/client.c index 1eb580c38fbb..dd43a8289b0d 100644 --- a/net/9p/client.c +++ b/net/9p/client.c | |||
@@ -203,7 +203,6 @@ static struct p9_req_t *p9_tag_alloc(struct p9_client *c, u16 tag) | |||
203 | p9pdu_reset(req->tc); | 203 | p9pdu_reset(req->tc); |
204 | p9pdu_reset(req->rc); | 204 | p9pdu_reset(req->rc); |
205 | 205 | ||
206 | req->flush_tag = 0; | ||
207 | req->tc->tag = tag-1; | 206 | req->tc->tag = tag-1; |
208 | req->status = REQ_STATUS_ALLOC; | 207 | req->status = REQ_STATUS_ALLOC; |
209 | 208 | ||
@@ -324,35 +323,9 @@ static void p9_free_req(struct p9_client *c, struct p9_req_t *r) | |||
324 | */ | 323 | */ |
325 | void p9_client_cb(struct p9_client *c, struct p9_req_t *req) | 324 | void p9_client_cb(struct p9_client *c, struct p9_req_t *req) |
326 | { | 325 | { |
327 | struct p9_req_t *other_req; | ||
328 | unsigned long flags; | ||
329 | |||
330 | P9_DPRINTK(P9_DEBUG_MUX, " tag %d\n", req->tc->tag); | 326 | P9_DPRINTK(P9_DEBUG_MUX, " tag %d\n", req->tc->tag); |
331 | 327 | wake_up(req->wq); | |
332 | if (req->status == REQ_STATUS_ERROR) | 328 | P9_DPRINTK(P9_DEBUG_MUX, "wakeup: %d\n", req->tc->tag); |
333 | wake_up(req->wq); | ||
334 | |||
335 | if (req->flush_tag) { /* flush receive path */ | ||
336 | P9_DPRINTK(P9_DEBUG_9P, "<<< RFLUSH %d\n", req->tc->tag); | ||
337 | spin_lock_irqsave(&c->lock, flags); | ||
338 | other_req = p9_tag_lookup(c, req->flush_tag); | ||
339 | if (other_req->status != REQ_STATUS_FLSH) /* stale flush */ | ||
340 | spin_unlock_irqrestore(&c->lock, flags); | ||
341 | else { | ||
342 | other_req->status = REQ_STATUS_FLSHD; | ||
343 | spin_unlock_irqrestore(&c->lock, flags); | ||
344 | wake_up(other_req->wq); | ||
345 | } | ||
346 | p9_free_req(c, req); | ||
347 | } else { /* normal receive path */ | ||
348 | P9_DPRINTK(P9_DEBUG_MUX, "normal: tag %d\n", req->tc->tag); | ||
349 | spin_lock_irqsave(&c->lock, flags); | ||
350 | if (req->status != REQ_STATUS_FLSHD) | ||
351 | req->status = REQ_STATUS_RCVD; | ||
352 | spin_unlock_irqrestore(&c->lock, flags); | ||
353 | wake_up(req->wq); | ||
354 | P9_DPRINTK(P9_DEBUG_MUX, "wakeup: %d\n", req->tc->tag); | ||
355 | } | ||
356 | } | 329 | } |
357 | EXPORT_SYMBOL(p9_client_cb); | 330 | EXPORT_SYMBOL(p9_client_cb); |
358 | 331 | ||
@@ -486,9 +459,15 @@ static int p9_client_flush(struct p9_client *c, struct p9_req_t *oldreq) | |||
486 | if (IS_ERR(req)) | 459 | if (IS_ERR(req)) |
487 | return PTR_ERR(req); | 460 | return PTR_ERR(req); |
488 | 461 | ||
489 | req->flush_tag = oldtag; | ||
490 | 462 | ||
491 | /* we don't free anything here because RPC isn't complete */ | 463 | /* if we haven't received a response for oldreq, |
464 | remove it from the list. */ | ||
465 | spin_lock(&c->lock); | ||
466 | if (oldreq->status == REQ_STATUS_FLSH) | ||
467 | list_del(&oldreq->req_list); | ||
468 | spin_unlock(&c->lock); | ||
469 | |||
470 | p9_free_req(c, req); | ||
492 | return 0; | 471 | return 0; |
493 | } | 472 | } |
494 | 473 | ||
@@ -509,7 +488,6 @@ p9_client_rpc(struct p9_client *c, int8_t type, const char *fmt, ...) | |||
509 | struct p9_req_t *req; | 488 | struct p9_req_t *req; |
510 | unsigned long flags; | 489 | unsigned long flags; |
511 | int sigpending; | 490 | int sigpending; |
512 | int flushed = 0; | ||
513 | 491 | ||
514 | P9_DPRINTK(P9_DEBUG_MUX, "client %p op %d\n", c, type); | 492 | P9_DPRINTK(P9_DEBUG_MUX, "client %p op %d\n", c, type); |
515 | 493 | ||
@@ -546,42 +524,28 @@ p9_client_rpc(struct p9_client *c, int8_t type, const char *fmt, ...) | |||
546 | goto reterr; | 524 | goto reterr; |
547 | } | 525 | } |
548 | 526 | ||
549 | /* if it was a flush we just transmitted, return our tag */ | ||
550 | if (type == P9_TFLUSH) | ||
551 | return req; | ||
552 | again: | ||
553 | P9_DPRINTK(P9_DEBUG_MUX, "wait %p tag: %d\n", req->wq, tag); | 527 | P9_DPRINTK(P9_DEBUG_MUX, "wait %p tag: %d\n", req->wq, tag); |
554 | err = wait_event_interruptible(*req->wq, | 528 | err = wait_event_interruptible(*req->wq, |
555 | req->status >= REQ_STATUS_RCVD); | 529 | req->status >= REQ_STATUS_RCVD); |
556 | P9_DPRINTK(P9_DEBUG_MUX, "wait %p tag: %d returned %d (flushed=%d)\n", | 530 | P9_DPRINTK(P9_DEBUG_MUX, "wait %p tag: %d returned %d\n", |
557 | req->wq, tag, err, flushed); | 531 | req->wq, tag, err); |
558 | 532 | ||
559 | if (req->status == REQ_STATUS_ERROR) { | 533 | if (req->status == REQ_STATUS_ERROR) { |
560 | P9_DPRINTK(P9_DEBUG_ERROR, "req_status error %d\n", req->t_err); | 534 | P9_DPRINTK(P9_DEBUG_ERROR, "req_status error %d\n", req->t_err); |
561 | err = req->t_err; | 535 | err = req->t_err; |
562 | } else if (err == -ERESTARTSYS && flushed) { | ||
563 | P9_DPRINTK(P9_DEBUG_MUX, "flushed - going again\n"); | ||
564 | goto again; | ||
565 | } else if (req->status == REQ_STATUS_FLSHD) { | ||
566 | P9_DPRINTK(P9_DEBUG_MUX, "flushed - erestartsys\n"); | ||
567 | err = -ERESTARTSYS; | ||
568 | } | 536 | } |
569 | 537 | ||
570 | if ((err == -ERESTARTSYS) && (c->status == Connected) && (!flushed)) { | 538 | if ((err == -ERESTARTSYS) && (c->status == Connected)) { |
571 | P9_DPRINTK(P9_DEBUG_MUX, "flushing\n"); | 539 | P9_DPRINTK(P9_DEBUG_MUX, "flushing\n"); |
572 | spin_lock_irqsave(&c->lock, flags); | ||
573 | if (req->status == REQ_STATUS_SENT) | ||
574 | req->status = REQ_STATUS_FLSH; | ||
575 | spin_unlock_irqrestore(&c->lock, flags); | ||
576 | sigpending = 1; | 540 | sigpending = 1; |
577 | flushed = 1; | ||
578 | clear_thread_flag(TIF_SIGPENDING); | 541 | clear_thread_flag(TIF_SIGPENDING); |
579 | 542 | ||
580 | if (c->trans_mod->cancel(c, req)) { | 543 | if (c->trans_mod->cancel(c, req)) |
581 | err = p9_client_flush(c, req); | 544 | p9_client_flush(c, req); |
582 | if (err == 0) | 545 | |
583 | goto again; | 546 | /* if we received the response anyway, don't signal error */ |
584 | } | 547 | if (req->status == REQ_STATUS_RCVD) |
548 | err = 0; | ||
585 | } | 549 | } |
586 | 550 | ||
587 | if (sigpending) { | 551 | if (sigpending) { |
@@ -1244,19 +1208,53 @@ struct p9_wstat *p9_client_stat(struct p9_fid *fid) | |||
1244 | ret->name, ret->uid, ret->gid, ret->muid, ret->extension, | 1208 | ret->name, ret->uid, ret->gid, ret->muid, ret->extension, |
1245 | ret->n_uid, ret->n_gid, ret->n_muid); | 1209 | ret->n_uid, ret->n_gid, ret->n_muid); |
1246 | 1210 | ||
1211 | p9_free_req(clnt, req); | ||
1212 | return ret; | ||
1213 | |||
1247 | free_and_error: | 1214 | free_and_error: |
1248 | p9_free_req(clnt, req); | 1215 | p9_free_req(clnt, req); |
1249 | error: | 1216 | error: |
1250 | return ret; | 1217 | kfree(ret); |
1218 | return ERR_PTR(err); | ||
1251 | } | 1219 | } |
1252 | EXPORT_SYMBOL(p9_client_stat); | 1220 | EXPORT_SYMBOL(p9_client_stat); |
1253 | 1221 | ||
1222 | static int p9_client_statsize(struct p9_wstat *wst, int optional) | ||
1223 | { | ||
1224 | int ret; | ||
1225 | |||
1226 | /* size[2] type[2] dev[4] qid[13] */ | ||
1227 | /* mode[4] atime[4] mtime[4] length[8]*/ | ||
1228 | /* name[s] uid[s] gid[s] muid[s] */ | ||
1229 | ret = 2+2+4+13+4+4+4+8+2+2+2+2; | ||
1230 | |||
1231 | if (wst->name) | ||
1232 | ret += strlen(wst->name); | ||
1233 | if (wst->uid) | ||
1234 | ret += strlen(wst->uid); | ||
1235 | if (wst->gid) | ||
1236 | ret += strlen(wst->gid); | ||
1237 | if (wst->muid) | ||
1238 | ret += strlen(wst->muid); | ||
1239 | |||
1240 | if (optional) { | ||
1241 | ret += 2+4+4+4; /* extension[s] n_uid[4] n_gid[4] n_muid[4] */ | ||
1242 | if (wst->extension) | ||
1243 | ret += strlen(wst->extension); | ||
1244 | } | ||
1245 | |||
1246 | return ret; | ||
1247 | } | ||
1248 | |||
1254 | int p9_client_wstat(struct p9_fid *fid, struct p9_wstat *wst) | 1249 | int p9_client_wstat(struct p9_fid *fid, struct p9_wstat *wst) |
1255 | { | 1250 | { |
1256 | int err; | 1251 | int err; |
1257 | struct p9_req_t *req; | 1252 | struct p9_req_t *req; |
1258 | struct p9_client *clnt; | 1253 | struct p9_client *clnt; |
1259 | 1254 | ||
1255 | err = 0; | ||
1256 | clnt = fid->clnt; | ||
1257 | wst->size = p9_client_statsize(wst, clnt->dotu); | ||
1260 | P9_DPRINTK(P9_DEBUG_9P, ">>> TWSTAT fid %d\n", fid->fid); | 1258 | P9_DPRINTK(P9_DEBUG_9P, ">>> TWSTAT fid %d\n", fid->fid); |
1261 | P9_DPRINTK(P9_DEBUG_9P, | 1259 | P9_DPRINTK(P9_DEBUG_9P, |
1262 | " sz=%x type=%x dev=%x qid=%x.%llx.%x\n" | 1260 | " sz=%x type=%x dev=%x qid=%x.%llx.%x\n" |
@@ -1268,10 +1266,8 @@ int p9_client_wstat(struct p9_fid *fid, struct p9_wstat *wst) | |||
1268 | wst->atime, wst->mtime, (unsigned long long)wst->length, | 1266 | wst->atime, wst->mtime, (unsigned long long)wst->length, |
1269 | wst->name, wst->uid, wst->gid, wst->muid, wst->extension, | 1267 | wst->name, wst->uid, wst->gid, wst->muid, wst->extension, |
1270 | wst->n_uid, wst->n_gid, wst->n_muid); | 1268 | wst->n_uid, wst->n_gid, wst->n_muid); |
1271 | err = 0; | ||
1272 | clnt = fid->clnt; | ||
1273 | 1269 | ||
1274 | req = p9_client_rpc(clnt, P9_TWSTAT, "dwS", fid->fid, 0, wst); | 1270 | req = p9_client_rpc(clnt, P9_TWSTAT, "dwS", fid->fid, wst->size, wst); |
1275 | if (IS_ERR(req)) { | 1271 | if (IS_ERR(req)) { |
1276 | err = PTR_ERR(req); | 1272 | err = PTR_ERR(req); |
1277 | goto error; | 1273 | goto error; |
diff --git a/net/9p/trans_fd.c b/net/9p/trans_fd.c index c613ed08a5ee..a2a1814c7a8d 100644 --- a/net/9p/trans_fd.c +++ b/net/9p/trans_fd.c | |||
@@ -213,8 +213,8 @@ static void p9_conn_cancel(struct p9_conn *m, int err) | |||
213 | spin_unlock_irqrestore(&m->client->lock, flags); | 213 | spin_unlock_irqrestore(&m->client->lock, flags); |
214 | 214 | ||
215 | list_for_each_entry_safe(req, rtmp, &cancel_list, req_list) { | 215 | list_for_each_entry_safe(req, rtmp, &cancel_list, req_list) { |
216 | list_del(&req->req_list); | ||
217 | P9_DPRINTK(P9_DEBUG_ERROR, "call back req %p\n", req); | 216 | P9_DPRINTK(P9_DEBUG_ERROR, "call back req %p\n", req); |
217 | list_del(&req->req_list); | ||
218 | p9_client_cb(m->client, req); | 218 | p9_client_cb(m->client, req); |
219 | } | 219 | } |
220 | } | 220 | } |
@@ -336,7 +336,8 @@ static void p9_read_work(struct work_struct *work) | |||
336 | "mux %p pkt: size: %d bytes tag: %d\n", m, n, tag); | 336 | "mux %p pkt: size: %d bytes tag: %d\n", m, n, tag); |
337 | 337 | ||
338 | m->req = p9_tag_lookup(m->client, tag); | 338 | m->req = p9_tag_lookup(m->client, tag); |
339 | if (!m->req) { | 339 | if (!m->req || (m->req->status != REQ_STATUS_SENT && |
340 | m->req->status != REQ_STATUS_FLSH)) { | ||
340 | P9_DPRINTK(P9_DEBUG_ERROR, "Unexpected packet tag %d\n", | 341 | P9_DPRINTK(P9_DEBUG_ERROR, "Unexpected packet tag %d\n", |
341 | tag); | 342 | tag); |
342 | err = -EIO; | 343 | err = -EIO; |
@@ -361,10 +362,11 @@ static void p9_read_work(struct work_struct *work) | |||
361 | if ((m->req) && (m->rpos == m->rsize)) { /* packet is read in */ | 362 | if ((m->req) && (m->rpos == m->rsize)) { /* packet is read in */ |
362 | P9_DPRINTK(P9_DEBUG_TRANS, "got new packet\n"); | 363 | P9_DPRINTK(P9_DEBUG_TRANS, "got new packet\n"); |
363 | spin_lock(&m->client->lock); | 364 | spin_lock(&m->client->lock); |
365 | if (m->req->status != REQ_STATUS_ERROR) | ||
366 | m->req->status = REQ_STATUS_RCVD; | ||
364 | list_del(&m->req->req_list); | 367 | list_del(&m->req->req_list); |
365 | spin_unlock(&m->client->lock); | 368 | spin_unlock(&m->client->lock); |
366 | p9_client_cb(m->client, m->req); | 369 | p9_client_cb(m->client, m->req); |
367 | |||
368 | m->rbuf = NULL; | 370 | m->rbuf = NULL; |
369 | m->rpos = 0; | 371 | m->rpos = 0; |
370 | m->rsize = 0; | 372 | m->rsize = 0; |
@@ -454,6 +456,7 @@ static void p9_write_work(struct work_struct *work) | |||
454 | req = list_entry(m->unsent_req_list.next, struct p9_req_t, | 456 | req = list_entry(m->unsent_req_list.next, struct p9_req_t, |
455 | req_list); | 457 | req_list); |
456 | req->status = REQ_STATUS_SENT; | 458 | req->status = REQ_STATUS_SENT; |
459 | P9_DPRINTK(P9_DEBUG_TRANS, "move req %p\n", req); | ||
457 | list_move_tail(&req->req_list, &m->req_list); | 460 | list_move_tail(&req->req_list, &m->req_list); |
458 | 461 | ||
459 | m->wbuf = req->tc->sdata; | 462 | m->wbuf = req->tc->sdata; |
@@ -683,12 +686,13 @@ static int p9_fd_cancel(struct p9_client *client, struct p9_req_t *req) | |||
683 | P9_DPRINTK(P9_DEBUG_TRANS, "client %p req %p\n", client, req); | 686 | P9_DPRINTK(P9_DEBUG_TRANS, "client %p req %p\n", client, req); |
684 | 687 | ||
685 | spin_lock(&client->lock); | 688 | spin_lock(&client->lock); |
686 | list_del(&req->req_list); | ||
687 | 689 | ||
688 | if (req->status == REQ_STATUS_UNSENT) { | 690 | if (req->status == REQ_STATUS_UNSENT) { |
691 | list_del(&req->req_list); | ||
689 | req->status = REQ_STATUS_FLSHD; | 692 | req->status = REQ_STATUS_FLSHD; |
690 | ret = 0; | 693 | ret = 0; |
691 | } | 694 | } else if (req->status == REQ_STATUS_SENT) |
695 | req->status = REQ_STATUS_FLSH; | ||
692 | 696 | ||
693 | spin_unlock(&client->lock); | 697 | spin_unlock(&client->lock); |
694 | 698 | ||
diff --git a/net/9p/trans_rdma.c b/net/9p/trans_rdma.c index 7fa0eb20b2f6..ac4990041ebb 100644 --- a/net/9p/trans_rdma.c +++ b/net/9p/trans_rdma.c | |||
@@ -295,6 +295,7 @@ handle_recv(struct p9_client *client, struct p9_trans_rdma *rdma, | |||
295 | goto err_out; | 295 | goto err_out; |
296 | 296 | ||
297 | req->rc = c->rc; | 297 | req->rc = c->rc; |
298 | req->status = REQ_STATUS_RCVD; | ||
298 | p9_client_cb(client, req); | 299 | p9_client_cb(client, req); |
299 | 300 | ||
300 | return; | 301 | return; |
diff --git a/net/9p/trans_virtio.c b/net/9p/trans_virtio.c index 2d7781ec663b..bb8579a141a8 100644 --- a/net/9p/trans_virtio.c +++ b/net/9p/trans_virtio.c | |||
@@ -134,6 +134,7 @@ static void req_done(struct virtqueue *vq) | |||
134 | P9_DPRINTK(P9_DEBUG_TRANS, ": rc %p\n", rc); | 134 | P9_DPRINTK(P9_DEBUG_TRANS, ": rc %p\n", rc); |
135 | P9_DPRINTK(P9_DEBUG_TRANS, ": lookup tag %d\n", rc->tag); | 135 | P9_DPRINTK(P9_DEBUG_TRANS, ": lookup tag %d\n", rc->tag); |
136 | req = p9_tag_lookup(chan->client, rc->tag); | 136 | req = p9_tag_lookup(chan->client, rc->tag); |
137 | req->status = REQ_STATUS_RCVD; | ||
137 | p9_client_cb(chan->client, req); | 138 | p9_client_cb(chan->client, req); |
138 | } | 139 | } |
139 | } | 140 | } |
diff --git a/net/atm/br2684.c b/net/atm/br2684.c index 334fcd4a4ea4..3100a8940afc 100644 --- a/net/atm/br2684.c +++ b/net/atm/br2684.c | |||
@@ -549,6 +549,7 @@ static void br2684_setup(struct net_device *netdev) | |||
549 | struct br2684_dev *brdev = BRPRIV(netdev); | 549 | struct br2684_dev *brdev = BRPRIV(netdev); |
550 | 550 | ||
551 | ether_setup(netdev); | 551 | ether_setup(netdev); |
552 | brdev->net_dev = netdev; | ||
552 | 553 | ||
553 | netdev->netdev_ops = &br2684_netdev_ops; | 554 | netdev->netdev_ops = &br2684_netdev_ops; |
554 | 555 | ||
diff --git a/net/bluetooth/hci_conn.c b/net/bluetooth/hci_conn.c index 1181db08d9de..61309b26f271 100644 --- a/net/bluetooth/hci_conn.c +++ b/net/bluetooth/hci_conn.c | |||
@@ -215,6 +215,7 @@ struct hci_conn *hci_conn_add(struct hci_dev *hdev, int type, bdaddr_t *dst) | |||
215 | conn->state = BT_OPEN; | 215 | conn->state = BT_OPEN; |
216 | 216 | ||
217 | conn->power_save = 1; | 217 | conn->power_save = 1; |
218 | conn->disc_timeout = HCI_DISCONN_TIMEOUT; | ||
218 | 219 | ||
219 | switch (type) { | 220 | switch (type) { |
220 | case ACL_LINK: | 221 | case ACL_LINK: |
@@ -247,6 +248,8 @@ struct hci_conn *hci_conn_add(struct hci_dev *hdev, int type, bdaddr_t *dst) | |||
247 | if (hdev->notify) | 248 | if (hdev->notify) |
248 | hdev->notify(hdev, HCI_NOTIFY_CONN_ADD); | 249 | hdev->notify(hdev, HCI_NOTIFY_CONN_ADD); |
249 | 250 | ||
251 | hci_conn_init_sysfs(conn); | ||
252 | |||
250 | tasklet_enable(&hdev->tx_task); | 253 | tasklet_enable(&hdev->tx_task); |
251 | 254 | ||
252 | return conn; | 255 | return conn; |
@@ -424,12 +427,9 @@ int hci_conn_security(struct hci_conn *conn, __u8 sec_level, __u8 auth_type) | |||
424 | if (sec_level == BT_SECURITY_SDP) | 427 | if (sec_level == BT_SECURITY_SDP) |
425 | return 1; | 428 | return 1; |
426 | 429 | ||
427 | if (sec_level == BT_SECURITY_LOW) { | 430 | if (sec_level == BT_SECURITY_LOW && |
428 | if (conn->ssp_mode > 0 && conn->hdev->ssp_mode > 0) | 431 | (!conn->ssp_mode || !conn->hdev->ssp_mode)) |
429 | return hci_conn_auth(conn, sec_level, auth_type); | 432 | return 1; |
430 | else | ||
431 | return 1; | ||
432 | } | ||
433 | 433 | ||
434 | if (conn->link_mode & HCI_LM_ENCRYPT) | 434 | if (conn->link_mode & HCI_LM_ENCRYPT) |
435 | return hci_conn_auth(conn, sec_level, auth_type); | 435 | return hci_conn_auth(conn, sec_level, auth_type); |
diff --git a/net/bluetooth/hci_event.c b/net/bluetooth/hci_event.c index 15f40ea8d544..4e7cb88e5da9 100644 --- a/net/bluetooth/hci_event.c +++ b/net/bluetooth/hci_event.c | |||
@@ -883,6 +883,7 @@ static inline void hci_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *s | |||
883 | if (conn->type == ACL_LINK) { | 883 | if (conn->type == ACL_LINK) { |
884 | conn->state = BT_CONFIG; | 884 | conn->state = BT_CONFIG; |
885 | hci_conn_hold(conn); | 885 | hci_conn_hold(conn); |
886 | conn->disc_timeout = HCI_DISCONN_TIMEOUT; | ||
886 | } else | 887 | } else |
887 | conn->state = BT_CONNECTED; | 888 | conn->state = BT_CONNECTED; |
888 | 889 | ||
@@ -1063,9 +1064,14 @@ static inline void hci_auth_complete_evt(struct hci_dev *hdev, struct sk_buff *s | |||
1063 | hci_proto_connect_cfm(conn, ev->status); | 1064 | hci_proto_connect_cfm(conn, ev->status); |
1064 | hci_conn_put(conn); | 1065 | hci_conn_put(conn); |
1065 | } | 1066 | } |
1066 | } else | 1067 | } else { |
1067 | hci_auth_cfm(conn, ev->status); | 1068 | hci_auth_cfm(conn, ev->status); |
1068 | 1069 | ||
1070 | hci_conn_hold(conn); | ||
1071 | conn->disc_timeout = HCI_DISCONN_TIMEOUT; | ||
1072 | hci_conn_put(conn); | ||
1073 | } | ||
1074 | |||
1069 | if (test_bit(HCI_CONN_ENCRYPT_PEND, &conn->pend)) { | 1075 | if (test_bit(HCI_CONN_ENCRYPT_PEND, &conn->pend)) { |
1070 | if (!ev->status) { | 1076 | if (!ev->status) { |
1071 | struct hci_cp_set_conn_encrypt cp; | 1077 | struct hci_cp_set_conn_encrypt cp; |
@@ -1479,7 +1485,21 @@ static inline void hci_mode_change_evt(struct hci_dev *hdev, struct sk_buff *skb | |||
1479 | 1485 | ||
1480 | static inline void hci_pin_code_request_evt(struct hci_dev *hdev, struct sk_buff *skb) | 1486 | static inline void hci_pin_code_request_evt(struct hci_dev *hdev, struct sk_buff *skb) |
1481 | { | 1487 | { |
1488 | struct hci_ev_pin_code_req *ev = (void *) skb->data; | ||
1489 | struct hci_conn *conn; | ||
1490 | |||
1482 | BT_DBG("%s", hdev->name); | 1491 | BT_DBG("%s", hdev->name); |
1492 | |||
1493 | hci_dev_lock(hdev); | ||
1494 | |||
1495 | conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr); | ||
1496 | if (conn) { | ||
1497 | hci_conn_hold(conn); | ||
1498 | conn->disc_timeout = HCI_PAIRING_TIMEOUT; | ||
1499 | hci_conn_put(conn); | ||
1500 | } | ||
1501 | |||
1502 | hci_dev_unlock(hdev); | ||
1483 | } | 1503 | } |
1484 | 1504 | ||
1485 | static inline void hci_link_key_request_evt(struct hci_dev *hdev, struct sk_buff *skb) | 1505 | static inline void hci_link_key_request_evt(struct hci_dev *hdev, struct sk_buff *skb) |
@@ -1489,7 +1509,21 @@ static inline void hci_link_key_request_evt(struct hci_dev *hdev, struct sk_buff | |||
1489 | 1509 | ||
1490 | static inline void hci_link_key_notify_evt(struct hci_dev *hdev, struct sk_buff *skb) | 1510 | static inline void hci_link_key_notify_evt(struct hci_dev *hdev, struct sk_buff *skb) |
1491 | { | 1511 | { |
1512 | struct hci_ev_link_key_notify *ev = (void *) skb->data; | ||
1513 | struct hci_conn *conn; | ||
1514 | |||
1492 | BT_DBG("%s", hdev->name); | 1515 | BT_DBG("%s", hdev->name); |
1516 | |||
1517 | hci_dev_lock(hdev); | ||
1518 | |||
1519 | conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr); | ||
1520 | if (conn) { | ||
1521 | hci_conn_hold(conn); | ||
1522 | conn->disc_timeout = HCI_DISCONN_TIMEOUT; | ||
1523 | hci_conn_put(conn); | ||
1524 | } | ||
1525 | |||
1526 | hci_dev_unlock(hdev); | ||
1493 | } | 1527 | } |
1494 | 1528 | ||
1495 | static inline void hci_clock_offset_evt(struct hci_dev *hdev, struct sk_buff *skb) | 1529 | static inline void hci_clock_offset_evt(struct hci_dev *hdev, struct sk_buff *skb) |
diff --git a/net/bluetooth/hci_sysfs.c b/net/bluetooth/hci_sysfs.c index ed82796d4a0f..582d8877078c 100644 --- a/net/bluetooth/hci_sysfs.c +++ b/net/bluetooth/hci_sysfs.c | |||
@@ -9,8 +9,7 @@ | |||
9 | struct class *bt_class = NULL; | 9 | struct class *bt_class = NULL; |
10 | EXPORT_SYMBOL_GPL(bt_class); | 10 | EXPORT_SYMBOL_GPL(bt_class); |
11 | 11 | ||
12 | static struct workqueue_struct *btaddconn; | 12 | static struct workqueue_struct *bt_workq; |
13 | static struct workqueue_struct *btdelconn; | ||
14 | 13 | ||
15 | static inline char *link_typetostr(int type) | 14 | static inline char *link_typetostr(int type) |
16 | { | 15 | { |
@@ -88,9 +87,10 @@ static struct device_type bt_link = { | |||
88 | 87 | ||
89 | static void add_conn(struct work_struct *work) | 88 | static void add_conn(struct work_struct *work) |
90 | { | 89 | { |
91 | struct hci_conn *conn = container_of(work, struct hci_conn, work); | 90 | struct hci_conn *conn = container_of(work, struct hci_conn, work_add); |
92 | 91 | ||
93 | flush_workqueue(btdelconn); | 92 | /* ensure previous del is complete */ |
93 | flush_work(&conn->work_del); | ||
94 | 94 | ||
95 | if (device_add(&conn->dev) < 0) { | 95 | if (device_add(&conn->dev) < 0) { |
96 | BT_ERR("Failed to register connection device"); | 96 | BT_ERR("Failed to register connection device"); |
@@ -98,27 +98,6 @@ static void add_conn(struct work_struct *work) | |||
98 | } | 98 | } |
99 | } | 99 | } |
100 | 100 | ||
101 | void hci_conn_add_sysfs(struct hci_conn *conn) | ||
102 | { | ||
103 | struct hci_dev *hdev = conn->hdev; | ||
104 | |||
105 | BT_DBG("conn %p", conn); | ||
106 | |||
107 | conn->dev.type = &bt_link; | ||
108 | conn->dev.class = bt_class; | ||
109 | conn->dev.parent = &hdev->dev; | ||
110 | |||
111 | dev_set_name(&conn->dev, "%s:%d", hdev->name, conn->handle); | ||
112 | |||
113 | dev_set_drvdata(&conn->dev, conn); | ||
114 | |||
115 | device_initialize(&conn->dev); | ||
116 | |||
117 | INIT_WORK(&conn->work, add_conn); | ||
118 | |||
119 | queue_work(btaddconn, &conn->work); | ||
120 | } | ||
121 | |||
122 | /* | 101 | /* |
123 | * The rfcomm tty device will possibly retain even when conn | 102 | * The rfcomm tty device will possibly retain even when conn |
124 | * is down, and sysfs doesn't support move zombie device, | 103 | * is down, and sysfs doesn't support move zombie device, |
@@ -131,9 +110,15 @@ static int __match_tty(struct device *dev, void *data) | |||
131 | 110 | ||
132 | static void del_conn(struct work_struct *work) | 111 | static void del_conn(struct work_struct *work) |
133 | { | 112 | { |
134 | struct hci_conn *conn = container_of(work, struct hci_conn, work); | 113 | struct hci_conn *conn = container_of(work, struct hci_conn, work_del); |
135 | struct hci_dev *hdev = conn->hdev; | 114 | struct hci_dev *hdev = conn->hdev; |
136 | 115 | ||
116 | /* ensure previous add is complete */ | ||
117 | flush_work(&conn->work_add); | ||
118 | |||
119 | if (!device_is_registered(&conn->dev)) | ||
120 | return; | ||
121 | |||
137 | while (1) { | 122 | while (1) { |
138 | struct device *dev; | 123 | struct device *dev; |
139 | 124 | ||
@@ -149,16 +134,40 @@ static void del_conn(struct work_struct *work) | |||
149 | hci_dev_put(hdev); | 134 | hci_dev_put(hdev); |
150 | } | 135 | } |
151 | 136 | ||
152 | void hci_conn_del_sysfs(struct hci_conn *conn) | 137 | void hci_conn_init_sysfs(struct hci_conn *conn) |
153 | { | 138 | { |
139 | struct hci_dev *hdev = conn->hdev; | ||
140 | |||
154 | BT_DBG("conn %p", conn); | 141 | BT_DBG("conn %p", conn); |
155 | 142 | ||
156 | if (!device_is_registered(&conn->dev)) | 143 | conn->dev.type = &bt_link; |
157 | return; | 144 | conn->dev.class = bt_class; |
145 | conn->dev.parent = &hdev->dev; | ||
146 | |||
147 | dev_set_drvdata(&conn->dev, conn); | ||
148 | |||
149 | device_initialize(&conn->dev); | ||
150 | |||
151 | INIT_WORK(&conn->work_add, add_conn); | ||
152 | INIT_WORK(&conn->work_del, del_conn); | ||
153 | } | ||
154 | |||
155 | void hci_conn_add_sysfs(struct hci_conn *conn) | ||
156 | { | ||
157 | struct hci_dev *hdev = conn->hdev; | ||
158 | |||
159 | BT_DBG("conn %p", conn); | ||
158 | 160 | ||
159 | INIT_WORK(&conn->work, del_conn); | 161 | dev_set_name(&conn->dev, "%s:%d", hdev->name, conn->handle); |
160 | 162 | ||
161 | queue_work(btdelconn, &conn->work); | 163 | queue_work(bt_workq, &conn->work_add); |
164 | } | ||
165 | |||
166 | void hci_conn_del_sysfs(struct hci_conn *conn) | ||
167 | { | ||
168 | BT_DBG("conn %p", conn); | ||
169 | |||
170 | queue_work(bt_workq, &conn->work_del); | ||
162 | } | 171 | } |
163 | 172 | ||
164 | static inline char *host_typetostr(int type) | 173 | static inline char *host_typetostr(int type) |
@@ -435,20 +444,13 @@ void hci_unregister_sysfs(struct hci_dev *hdev) | |||
435 | 444 | ||
436 | int __init bt_sysfs_init(void) | 445 | int __init bt_sysfs_init(void) |
437 | { | 446 | { |
438 | btaddconn = create_singlethread_workqueue("btaddconn"); | 447 | bt_workq = create_singlethread_workqueue("bluetooth"); |
439 | if (!btaddconn) | 448 | if (!bt_workq) |
440 | return -ENOMEM; | 449 | return -ENOMEM; |
441 | 450 | ||
442 | btdelconn = create_singlethread_workqueue("btdelconn"); | ||
443 | if (!btdelconn) { | ||
444 | destroy_workqueue(btaddconn); | ||
445 | return -ENOMEM; | ||
446 | } | ||
447 | |||
448 | bt_class = class_create(THIS_MODULE, "bluetooth"); | 451 | bt_class = class_create(THIS_MODULE, "bluetooth"); |
449 | if (IS_ERR(bt_class)) { | 452 | if (IS_ERR(bt_class)) { |
450 | destroy_workqueue(btdelconn); | 453 | destroy_workqueue(bt_workq); |
451 | destroy_workqueue(btaddconn); | ||
452 | return PTR_ERR(bt_class); | 454 | return PTR_ERR(bt_class); |
453 | } | 455 | } |
454 | 456 | ||
@@ -457,8 +459,7 @@ int __init bt_sysfs_init(void) | |||
457 | 459 | ||
458 | void bt_sysfs_cleanup(void) | 460 | void bt_sysfs_cleanup(void) |
459 | { | 461 | { |
460 | destroy_workqueue(btaddconn); | 462 | destroy_workqueue(bt_workq); |
461 | destroy_workqueue(btdelconn); | ||
462 | 463 | ||
463 | class_destroy(bt_class); | 464 | class_destroy(bt_class); |
464 | } | 465 | } |
diff --git a/net/bridge/br_netfilter.c b/net/bridge/br_netfilter.c index 3953ac4214c8..e4a418fcb35b 100644 --- a/net/bridge/br_netfilter.c +++ b/net/bridge/br_netfilter.c | |||
@@ -788,15 +788,23 @@ static unsigned int br_nf_local_out(unsigned int hook, struct sk_buff *skb, | |||
788 | return NF_STOLEN; | 788 | return NF_STOLEN; |
789 | } | 789 | } |
790 | 790 | ||
791 | #if defined(CONFIG_NF_CONNTRACK_IPV4) || defined(CONFIG_NF_CONNTRACK_IPV4_MODULE) | ||
791 | static int br_nf_dev_queue_xmit(struct sk_buff *skb) | 792 | static int br_nf_dev_queue_xmit(struct sk_buff *skb) |
792 | { | 793 | { |
793 | if (skb->protocol == htons(ETH_P_IP) && | 794 | if (skb->nfct != NULL && |
795 | (skb->protocol == htons(ETH_P_IP) || IS_VLAN_IP(skb)) && | ||
794 | skb->len > skb->dev->mtu && | 796 | skb->len > skb->dev->mtu && |
795 | !skb_is_gso(skb)) | 797 | !skb_is_gso(skb)) |
796 | return ip_fragment(skb, br_dev_queue_push_xmit); | 798 | return ip_fragment(skb, br_dev_queue_push_xmit); |
797 | else | 799 | else |
798 | return br_dev_queue_push_xmit(skb); | 800 | return br_dev_queue_push_xmit(skb); |
799 | } | 801 | } |
802 | #else | ||
803 | static int br_nf_dev_queue_xmit(struct sk_buff *skb) | ||
804 | { | ||
805 | return br_dev_queue_push_xmit(skb); | ||
806 | } | ||
807 | #endif | ||
800 | 808 | ||
801 | /* PF_BRIDGE/POST_ROUTING ********************************************/ | 809 | /* PF_BRIDGE/POST_ROUTING ********************************************/ |
802 | static unsigned int br_nf_post_routing(unsigned int hook, struct sk_buff *skb, | 810 | static unsigned int br_nf_post_routing(unsigned int hook, struct sk_buff *skb, |
diff --git a/net/core/datagram.c b/net/core/datagram.c index d0de644b378d..b01a76abe1d2 100644 --- a/net/core/datagram.c +++ b/net/core/datagram.c | |||
@@ -64,13 +64,25 @@ static inline int connection_based(struct sock *sk) | |||
64 | return sk->sk_type == SOCK_SEQPACKET || sk->sk_type == SOCK_STREAM; | 64 | return sk->sk_type == SOCK_SEQPACKET || sk->sk_type == SOCK_STREAM; |
65 | } | 65 | } |
66 | 66 | ||
67 | static int receiver_wake_function(wait_queue_t *wait, unsigned mode, int sync, | ||
68 | void *key) | ||
69 | { | ||
70 | unsigned long bits = (unsigned long)key; | ||
71 | |||
72 | /* | ||
73 | * Avoid a wakeup if event not interesting for us | ||
74 | */ | ||
75 | if (bits && !(bits & (POLLIN | POLLERR))) | ||
76 | return 0; | ||
77 | return autoremove_wake_function(wait, mode, sync, key); | ||
78 | } | ||
67 | /* | 79 | /* |
68 | * Wait for a packet.. | 80 | * Wait for a packet.. |
69 | */ | 81 | */ |
70 | static int wait_for_packet(struct sock *sk, int *err, long *timeo_p) | 82 | static int wait_for_packet(struct sock *sk, int *err, long *timeo_p) |
71 | { | 83 | { |
72 | int error; | 84 | int error; |
73 | DEFINE_WAIT(wait); | 85 | DEFINE_WAIT_FUNC(wait, receiver_wake_function); |
74 | 86 | ||
75 | prepare_to_wait_exclusive(sk->sk_sleep, &wait, TASK_INTERRUPTIBLE); | 87 | prepare_to_wait_exclusive(sk->sk_sleep, &wait, TASK_INTERRUPTIBLE); |
76 | 88 | ||
diff --git a/net/core/dev.c b/net/core/dev.c index 308a7d0c277f..e2e9e4af3ace 100644 --- a/net/core/dev.c +++ b/net/core/dev.c | |||
@@ -1735,11 +1735,12 @@ u16 skb_tx_hash(const struct net_device *dev, const struct sk_buff *skb) | |||
1735 | { | 1735 | { |
1736 | u32 hash; | 1736 | u32 hash; |
1737 | 1737 | ||
1738 | if (skb_rx_queue_recorded(skb)) { | 1738 | if (skb_rx_queue_recorded(skb)) |
1739 | hash = skb_get_rx_queue(skb); | 1739 | return skb_get_rx_queue(skb) % dev->real_num_tx_queues; |
1740 | } else if (skb->sk && skb->sk->sk_hash) { | 1740 | |
1741 | if (skb->sk && skb->sk->sk_hash) | ||
1741 | hash = skb->sk->sk_hash; | 1742 | hash = skb->sk->sk_hash; |
1742 | } else | 1743 | else |
1743 | hash = skb->protocol; | 1744 | hash = skb->protocol; |
1744 | 1745 | ||
1745 | hash = jhash_1word(hash, skb_tx_hashrnd); | 1746 | hash = jhash_1word(hash, skb_tx_hashrnd); |
diff --git a/net/core/skbuff.c b/net/core/skbuff.c index ce6356cd9f71..f091a5a845c1 100644 --- a/net/core/skbuff.c +++ b/net/core/skbuff.c | |||
@@ -1365,9 +1365,8 @@ static void sock_spd_release(struct splice_pipe_desc *spd, unsigned int i) | |||
1365 | 1365 | ||
1366 | static inline struct page *linear_to_page(struct page *page, unsigned int *len, | 1366 | static inline struct page *linear_to_page(struct page *page, unsigned int *len, |
1367 | unsigned int *offset, | 1367 | unsigned int *offset, |
1368 | struct sk_buff *skb) | 1368 | struct sk_buff *skb, struct sock *sk) |
1369 | { | 1369 | { |
1370 | struct sock *sk = skb->sk; | ||
1371 | struct page *p = sk->sk_sndmsg_page; | 1370 | struct page *p = sk->sk_sndmsg_page; |
1372 | unsigned int off; | 1371 | unsigned int off; |
1373 | 1372 | ||
@@ -1405,13 +1404,14 @@ new_page: | |||
1405 | */ | 1404 | */ |
1406 | static inline int spd_fill_page(struct splice_pipe_desc *spd, struct page *page, | 1405 | static inline int spd_fill_page(struct splice_pipe_desc *spd, struct page *page, |
1407 | unsigned int *len, unsigned int offset, | 1406 | unsigned int *len, unsigned int offset, |
1408 | struct sk_buff *skb, int linear) | 1407 | struct sk_buff *skb, int linear, |
1408 | struct sock *sk) | ||
1409 | { | 1409 | { |
1410 | if (unlikely(spd->nr_pages == PIPE_BUFFERS)) | 1410 | if (unlikely(spd->nr_pages == PIPE_BUFFERS)) |
1411 | return 1; | 1411 | return 1; |
1412 | 1412 | ||
1413 | if (linear) { | 1413 | if (linear) { |
1414 | page = linear_to_page(page, len, &offset, skb); | 1414 | page = linear_to_page(page, len, &offset, skb, sk); |
1415 | if (!page) | 1415 | if (!page) |
1416 | return 1; | 1416 | return 1; |
1417 | } else | 1417 | } else |
@@ -1442,7 +1442,8 @@ static inline void __segment_seek(struct page **page, unsigned int *poff, | |||
1442 | static inline int __splice_segment(struct page *page, unsigned int poff, | 1442 | static inline int __splice_segment(struct page *page, unsigned int poff, |
1443 | unsigned int plen, unsigned int *off, | 1443 | unsigned int plen, unsigned int *off, |
1444 | unsigned int *len, struct sk_buff *skb, | 1444 | unsigned int *len, struct sk_buff *skb, |
1445 | struct splice_pipe_desc *spd, int linear) | 1445 | struct splice_pipe_desc *spd, int linear, |
1446 | struct sock *sk) | ||
1446 | { | 1447 | { |
1447 | if (!*len) | 1448 | if (!*len) |
1448 | return 1; | 1449 | return 1; |
@@ -1465,7 +1466,7 @@ static inline int __splice_segment(struct page *page, unsigned int poff, | |||
1465 | /* the linear region may spread across several pages */ | 1466 | /* the linear region may spread across several pages */ |
1466 | flen = min_t(unsigned int, flen, PAGE_SIZE - poff); | 1467 | flen = min_t(unsigned int, flen, PAGE_SIZE - poff); |
1467 | 1468 | ||
1468 | if (spd_fill_page(spd, page, &flen, poff, skb, linear)) | 1469 | if (spd_fill_page(spd, page, &flen, poff, skb, linear, sk)) |
1469 | return 1; | 1470 | return 1; |
1470 | 1471 | ||
1471 | __segment_seek(&page, &poff, &plen, flen); | 1472 | __segment_seek(&page, &poff, &plen, flen); |
@@ -1481,8 +1482,8 @@ static inline int __splice_segment(struct page *page, unsigned int poff, | |||
1481 | * pipe is full or if we already spliced the requested length. | 1482 | * pipe is full or if we already spliced the requested length. |
1482 | */ | 1483 | */ |
1483 | static int __skb_splice_bits(struct sk_buff *skb, unsigned int *offset, | 1484 | static int __skb_splice_bits(struct sk_buff *skb, unsigned int *offset, |
1484 | unsigned int *len, | 1485 | unsigned int *len, struct splice_pipe_desc *spd, |
1485 | struct splice_pipe_desc *spd) | 1486 | struct sock *sk) |
1486 | { | 1487 | { |
1487 | int seg; | 1488 | int seg; |
1488 | 1489 | ||
@@ -1492,7 +1493,7 @@ static int __skb_splice_bits(struct sk_buff *skb, unsigned int *offset, | |||
1492 | if (__splice_segment(virt_to_page(skb->data), | 1493 | if (__splice_segment(virt_to_page(skb->data), |
1493 | (unsigned long) skb->data & (PAGE_SIZE - 1), | 1494 | (unsigned long) skb->data & (PAGE_SIZE - 1), |
1494 | skb_headlen(skb), | 1495 | skb_headlen(skb), |
1495 | offset, len, skb, spd, 1)) | 1496 | offset, len, skb, spd, 1, sk)) |
1496 | return 1; | 1497 | return 1; |
1497 | 1498 | ||
1498 | /* | 1499 | /* |
@@ -1502,7 +1503,7 @@ static int __skb_splice_bits(struct sk_buff *skb, unsigned int *offset, | |||
1502 | const skb_frag_t *f = &skb_shinfo(skb)->frags[seg]; | 1503 | const skb_frag_t *f = &skb_shinfo(skb)->frags[seg]; |
1503 | 1504 | ||
1504 | if (__splice_segment(f->page, f->page_offset, f->size, | 1505 | if (__splice_segment(f->page, f->page_offset, f->size, |
1505 | offset, len, skb, spd, 0)) | 1506 | offset, len, skb, spd, 0, sk)) |
1506 | return 1; | 1507 | return 1; |
1507 | } | 1508 | } |
1508 | 1509 | ||
@@ -1528,12 +1529,13 @@ int skb_splice_bits(struct sk_buff *skb, unsigned int offset, | |||
1528 | .ops = &sock_pipe_buf_ops, | 1529 | .ops = &sock_pipe_buf_ops, |
1529 | .spd_release = sock_spd_release, | 1530 | .spd_release = sock_spd_release, |
1530 | }; | 1531 | }; |
1532 | struct sock *sk = skb->sk; | ||
1531 | 1533 | ||
1532 | /* | 1534 | /* |
1533 | * __skb_splice_bits() only fails if the output has no room left, | 1535 | * __skb_splice_bits() only fails if the output has no room left, |
1534 | * so no point in going over the frag_list for the error case. | 1536 | * so no point in going over the frag_list for the error case. |
1535 | */ | 1537 | */ |
1536 | if (__skb_splice_bits(skb, &offset, &tlen, &spd)) | 1538 | if (__skb_splice_bits(skb, &offset, &tlen, &spd, sk)) |
1537 | goto done; | 1539 | goto done; |
1538 | else if (!tlen) | 1540 | else if (!tlen) |
1539 | goto done; | 1541 | goto done; |
@@ -1545,14 +1547,13 @@ int skb_splice_bits(struct sk_buff *skb, unsigned int offset, | |||
1545 | struct sk_buff *list = skb_shinfo(skb)->frag_list; | 1547 | struct sk_buff *list = skb_shinfo(skb)->frag_list; |
1546 | 1548 | ||
1547 | for (; list && tlen; list = list->next) { | 1549 | for (; list && tlen; list = list->next) { |
1548 | if (__skb_splice_bits(list, &offset, &tlen, &spd)) | 1550 | if (__skb_splice_bits(list, &offset, &tlen, &spd, sk)) |
1549 | break; | 1551 | break; |
1550 | } | 1552 | } |
1551 | } | 1553 | } |
1552 | 1554 | ||
1553 | done: | 1555 | done: |
1554 | if (spd.nr_pages) { | 1556 | if (spd.nr_pages) { |
1555 | struct sock *sk = skb->sk; | ||
1556 | int ret; | 1557 | int ret; |
1557 | 1558 | ||
1558 | /* | 1559 | /* |
diff --git a/net/ipv4/netfilter/arp_tables.c b/net/ipv4/netfilter/arp_tables.c index 5ba533d234db..831fe1879dc0 100644 --- a/net/ipv4/netfilter/arp_tables.c +++ b/net/ipv4/netfilter/arp_tables.c | |||
@@ -253,9 +253,9 @@ unsigned int arpt_do_table(struct sk_buff *skb, | |||
253 | indev = in ? in->name : nulldevname; | 253 | indev = in ? in->name : nulldevname; |
254 | outdev = out ? out->name : nulldevname; | 254 | outdev = out ? out->name : nulldevname; |
255 | 255 | ||
256 | rcu_read_lock_bh(); | 256 | xt_info_rdlock_bh(); |
257 | private = rcu_dereference(table->private); | 257 | private = table->private; |
258 | table_base = rcu_dereference(private->entries[smp_processor_id()]); | 258 | table_base = private->entries[smp_processor_id()]; |
259 | 259 | ||
260 | e = get_entry(table_base, private->hook_entry[hook]); | 260 | e = get_entry(table_base, private->hook_entry[hook]); |
261 | back = get_entry(table_base, private->underflow[hook]); | 261 | back = get_entry(table_base, private->underflow[hook]); |
@@ -273,6 +273,7 @@ unsigned int arpt_do_table(struct sk_buff *skb, | |||
273 | 273 | ||
274 | hdr_len = sizeof(*arp) + (2 * sizeof(struct in_addr)) + | 274 | hdr_len = sizeof(*arp) + (2 * sizeof(struct in_addr)) + |
275 | (2 * skb->dev->addr_len); | 275 | (2 * skb->dev->addr_len); |
276 | |||
276 | ADD_COUNTER(e->counters, hdr_len, 1); | 277 | ADD_COUNTER(e->counters, hdr_len, 1); |
277 | 278 | ||
278 | t = arpt_get_target(e); | 279 | t = arpt_get_target(e); |
@@ -328,8 +329,7 @@ unsigned int arpt_do_table(struct sk_buff *skb, | |||
328 | e = (void *)e + e->next_offset; | 329 | e = (void *)e + e->next_offset; |
329 | } | 330 | } |
330 | } while (!hotdrop); | 331 | } while (!hotdrop); |
331 | 332 | xt_info_rdunlock_bh(); | |
332 | rcu_read_unlock_bh(); | ||
333 | 333 | ||
334 | if (hotdrop) | 334 | if (hotdrop) |
335 | return NF_DROP; | 335 | return NF_DROP; |
@@ -711,9 +711,12 @@ static void get_counters(const struct xt_table_info *t, | |||
711 | /* Instead of clearing (by a previous call to memset()) | 711 | /* Instead of clearing (by a previous call to memset()) |
712 | * the counters and using adds, we set the counters | 712 | * the counters and using adds, we set the counters |
713 | * with data used by 'current' CPU | 713 | * with data used by 'current' CPU |
714 | * We dont care about preemption here. | 714 | * |
715 | * Bottom half has to be disabled to prevent deadlock | ||
716 | * if new softirq were to run and call ipt_do_table | ||
715 | */ | 717 | */ |
716 | curcpu = raw_smp_processor_id(); | 718 | local_bh_disable(); |
719 | curcpu = smp_processor_id(); | ||
717 | 720 | ||
718 | i = 0; | 721 | i = 0; |
719 | ARPT_ENTRY_ITERATE(t->entries[curcpu], | 722 | ARPT_ENTRY_ITERATE(t->entries[curcpu], |
@@ -726,73 +729,22 @@ static void get_counters(const struct xt_table_info *t, | |||
726 | if (cpu == curcpu) | 729 | if (cpu == curcpu) |
727 | continue; | 730 | continue; |
728 | i = 0; | 731 | i = 0; |
732 | xt_info_wrlock(cpu); | ||
729 | ARPT_ENTRY_ITERATE(t->entries[cpu], | 733 | ARPT_ENTRY_ITERATE(t->entries[cpu], |
730 | t->size, | 734 | t->size, |
731 | add_entry_to_counter, | 735 | add_entry_to_counter, |
732 | counters, | 736 | counters, |
733 | &i); | 737 | &i); |
738 | xt_info_wrunlock(cpu); | ||
734 | } | 739 | } |
735 | } | ||
736 | |||
737 | |||
738 | /* We're lazy, and add to the first CPU; overflow works its fey magic | ||
739 | * and everything is OK. */ | ||
740 | static int | ||
741 | add_counter_to_entry(struct arpt_entry *e, | ||
742 | const struct xt_counters addme[], | ||
743 | unsigned int *i) | ||
744 | { | ||
745 | ADD_COUNTER(e->counters, addme[*i].bcnt, addme[*i].pcnt); | ||
746 | |||
747 | (*i)++; | ||
748 | return 0; | ||
749 | } | ||
750 | |||
751 | /* Take values from counters and add them back onto the current cpu */ | ||
752 | static void put_counters(struct xt_table_info *t, | ||
753 | const struct xt_counters counters[]) | ||
754 | { | ||
755 | unsigned int i, cpu; | ||
756 | |||
757 | local_bh_disable(); | ||
758 | cpu = smp_processor_id(); | ||
759 | i = 0; | ||
760 | ARPT_ENTRY_ITERATE(t->entries[cpu], | ||
761 | t->size, | ||
762 | add_counter_to_entry, | ||
763 | counters, | ||
764 | &i); | ||
765 | local_bh_enable(); | 740 | local_bh_enable(); |
766 | } | 741 | } |
767 | 742 | ||
768 | static inline int | ||
769 | zero_entry_counter(struct arpt_entry *e, void *arg) | ||
770 | { | ||
771 | e->counters.bcnt = 0; | ||
772 | e->counters.pcnt = 0; | ||
773 | return 0; | ||
774 | } | ||
775 | |||
776 | static void | ||
777 | clone_counters(struct xt_table_info *newinfo, const struct xt_table_info *info) | ||
778 | { | ||
779 | unsigned int cpu; | ||
780 | const void *loc_cpu_entry = info->entries[raw_smp_processor_id()]; | ||
781 | |||
782 | memcpy(newinfo, info, offsetof(struct xt_table_info, entries)); | ||
783 | for_each_possible_cpu(cpu) { | ||
784 | memcpy(newinfo->entries[cpu], loc_cpu_entry, info->size); | ||
785 | ARPT_ENTRY_ITERATE(newinfo->entries[cpu], newinfo->size, | ||
786 | zero_entry_counter, NULL); | ||
787 | } | ||
788 | } | ||
789 | |||
790 | static struct xt_counters *alloc_counters(struct xt_table *table) | 743 | static struct xt_counters *alloc_counters(struct xt_table *table) |
791 | { | 744 | { |
792 | unsigned int countersize; | 745 | unsigned int countersize; |
793 | struct xt_counters *counters; | 746 | struct xt_counters *counters; |
794 | struct xt_table_info *private = table->private; | 747 | struct xt_table_info *private = table->private; |
795 | struct xt_table_info *info; | ||
796 | 748 | ||
797 | /* We need atomic snapshot of counters: rest doesn't change | 749 | /* We need atomic snapshot of counters: rest doesn't change |
798 | * (other than comefrom, which userspace doesn't care | 750 | * (other than comefrom, which userspace doesn't care |
@@ -802,30 +754,11 @@ static struct xt_counters *alloc_counters(struct xt_table *table) | |||
802 | counters = vmalloc_node(countersize, numa_node_id()); | 754 | counters = vmalloc_node(countersize, numa_node_id()); |
803 | 755 | ||
804 | if (counters == NULL) | 756 | if (counters == NULL) |
805 | goto nomem; | 757 | return ERR_PTR(-ENOMEM); |
806 | |||
807 | info = xt_alloc_table_info(private->size); | ||
808 | if (!info) | ||
809 | goto free_counters; | ||
810 | |||
811 | clone_counters(info, private); | ||
812 | |||
813 | mutex_lock(&table->lock); | ||
814 | xt_table_entry_swap_rcu(private, info); | ||
815 | synchronize_net(); /* Wait until smoke has cleared */ | ||
816 | 758 | ||
817 | get_counters(info, counters); | 759 | get_counters(private, counters); |
818 | put_counters(private, counters); | ||
819 | mutex_unlock(&table->lock); | ||
820 | |||
821 | xt_free_table_info(info); | ||
822 | 760 | ||
823 | return counters; | 761 | return counters; |
824 | |||
825 | free_counters: | ||
826 | vfree(counters); | ||
827 | nomem: | ||
828 | return ERR_PTR(-ENOMEM); | ||
829 | } | 762 | } |
830 | 763 | ||
831 | static int copy_entries_to_user(unsigned int total_size, | 764 | static int copy_entries_to_user(unsigned int total_size, |
@@ -1094,8 +1027,9 @@ static int __do_replace(struct net *net, const char *name, | |||
1094 | (newinfo->number <= oldinfo->initial_entries)) | 1027 | (newinfo->number <= oldinfo->initial_entries)) |
1095 | module_put(t->me); | 1028 | module_put(t->me); |
1096 | 1029 | ||
1097 | /* Get the old counters. */ | 1030 | /* Get the old counters, and synchronize with replace */ |
1098 | get_counters(oldinfo, counters); | 1031 | get_counters(oldinfo, counters); |
1032 | |||
1099 | /* Decrease module usage counts and free resource */ | 1033 | /* Decrease module usage counts and free resource */ |
1100 | loc_cpu_old_entry = oldinfo->entries[raw_smp_processor_id()]; | 1034 | loc_cpu_old_entry = oldinfo->entries[raw_smp_processor_id()]; |
1101 | ARPT_ENTRY_ITERATE(loc_cpu_old_entry, oldinfo->size, cleanup_entry, | 1035 | ARPT_ENTRY_ITERATE(loc_cpu_old_entry, oldinfo->size, cleanup_entry, |
@@ -1165,10 +1099,23 @@ static int do_replace(struct net *net, void __user *user, unsigned int len) | |||
1165 | return ret; | 1099 | return ret; |
1166 | } | 1100 | } |
1167 | 1101 | ||
1102 | /* We're lazy, and add to the first CPU; overflow works its fey magic | ||
1103 | * and everything is OK. */ | ||
1104 | static int | ||
1105 | add_counter_to_entry(struct arpt_entry *e, | ||
1106 | const struct xt_counters addme[], | ||
1107 | unsigned int *i) | ||
1108 | { | ||
1109 | ADD_COUNTER(e->counters, addme[*i].bcnt, addme[*i].pcnt); | ||
1110 | |||
1111 | (*i)++; | ||
1112 | return 0; | ||
1113 | } | ||
1114 | |||
1168 | static int do_add_counters(struct net *net, void __user *user, unsigned int len, | 1115 | static int do_add_counters(struct net *net, void __user *user, unsigned int len, |
1169 | int compat) | 1116 | int compat) |
1170 | { | 1117 | { |
1171 | unsigned int i; | 1118 | unsigned int i, curcpu; |
1172 | struct xt_counters_info tmp; | 1119 | struct xt_counters_info tmp; |
1173 | struct xt_counters *paddc; | 1120 | struct xt_counters *paddc; |
1174 | unsigned int num_counters; | 1121 | unsigned int num_counters; |
@@ -1224,26 +1171,26 @@ static int do_add_counters(struct net *net, void __user *user, unsigned int len, | |||
1224 | goto free; | 1171 | goto free; |
1225 | } | 1172 | } |
1226 | 1173 | ||
1227 | mutex_lock(&t->lock); | 1174 | local_bh_disable(); |
1228 | private = t->private; | 1175 | private = t->private; |
1229 | if (private->number != num_counters) { | 1176 | if (private->number != num_counters) { |
1230 | ret = -EINVAL; | 1177 | ret = -EINVAL; |
1231 | goto unlock_up_free; | 1178 | goto unlock_up_free; |
1232 | } | 1179 | } |
1233 | 1180 | ||
1234 | preempt_disable(); | ||
1235 | i = 0; | 1181 | i = 0; |
1236 | /* Choose the copy that is on our node */ | 1182 | /* Choose the copy that is on our node */ |
1237 | loc_cpu_entry = private->entries[smp_processor_id()]; | 1183 | curcpu = smp_processor_id(); |
1184 | loc_cpu_entry = private->entries[curcpu]; | ||
1185 | xt_info_wrlock(curcpu); | ||
1238 | ARPT_ENTRY_ITERATE(loc_cpu_entry, | 1186 | ARPT_ENTRY_ITERATE(loc_cpu_entry, |
1239 | private->size, | 1187 | private->size, |
1240 | add_counter_to_entry, | 1188 | add_counter_to_entry, |
1241 | paddc, | 1189 | paddc, |
1242 | &i); | 1190 | &i); |
1243 | preempt_enable(); | 1191 | xt_info_wrunlock(curcpu); |
1244 | unlock_up_free: | 1192 | unlock_up_free: |
1245 | mutex_unlock(&t->lock); | 1193 | local_bh_enable(); |
1246 | |||
1247 | xt_table_unlock(t); | 1194 | xt_table_unlock(t); |
1248 | module_put(t->me); | 1195 | module_put(t->me); |
1249 | free: | 1196 | free: |
diff --git a/net/ipv4/netfilter/ip_tables.c b/net/ipv4/netfilter/ip_tables.c index 810c0b62c7d4..2ec8d7290c40 100644 --- a/net/ipv4/netfilter/ip_tables.c +++ b/net/ipv4/netfilter/ip_tables.c | |||
@@ -338,10 +338,9 @@ ipt_do_table(struct sk_buff *skb, | |||
338 | tgpar.hooknum = hook; | 338 | tgpar.hooknum = hook; |
339 | 339 | ||
340 | IP_NF_ASSERT(table->valid_hooks & (1 << hook)); | 340 | IP_NF_ASSERT(table->valid_hooks & (1 << hook)); |
341 | 341 | xt_info_rdlock_bh(); | |
342 | rcu_read_lock_bh(); | 342 | private = table->private; |
343 | private = rcu_dereference(table->private); | 343 | table_base = private->entries[smp_processor_id()]; |
344 | table_base = rcu_dereference(private->entries[smp_processor_id()]); | ||
345 | 344 | ||
346 | e = get_entry(table_base, private->hook_entry[hook]); | 345 | e = get_entry(table_base, private->hook_entry[hook]); |
347 | 346 | ||
@@ -436,8 +435,7 @@ ipt_do_table(struct sk_buff *skb, | |||
436 | e = (void *)e + e->next_offset; | 435 | e = (void *)e + e->next_offset; |
437 | } | 436 | } |
438 | } while (!hotdrop); | 437 | } while (!hotdrop); |
439 | 438 | xt_info_rdunlock_bh(); | |
440 | rcu_read_unlock_bh(); | ||
441 | 439 | ||
442 | #ifdef DEBUG_ALLOW_ALL | 440 | #ifdef DEBUG_ALLOW_ALL |
443 | return NF_ACCEPT; | 441 | return NF_ACCEPT; |
@@ -896,10 +894,13 @@ get_counters(const struct xt_table_info *t, | |||
896 | 894 | ||
897 | /* Instead of clearing (by a previous call to memset()) | 895 | /* Instead of clearing (by a previous call to memset()) |
898 | * the counters and using adds, we set the counters | 896 | * the counters and using adds, we set the counters |
899 | * with data used by 'current' CPU | 897 | * with data used by 'current' CPU. |
900 | * We dont care about preemption here. | 898 | * |
899 | * Bottom half has to be disabled to prevent deadlock | ||
900 | * if new softirq were to run and call ipt_do_table | ||
901 | */ | 901 | */ |
902 | curcpu = raw_smp_processor_id(); | 902 | local_bh_disable(); |
903 | curcpu = smp_processor_id(); | ||
903 | 904 | ||
904 | i = 0; | 905 | i = 0; |
905 | IPT_ENTRY_ITERATE(t->entries[curcpu], | 906 | IPT_ENTRY_ITERATE(t->entries[curcpu], |
@@ -912,74 +913,22 @@ get_counters(const struct xt_table_info *t, | |||
912 | if (cpu == curcpu) | 913 | if (cpu == curcpu) |
913 | continue; | 914 | continue; |
914 | i = 0; | 915 | i = 0; |
916 | xt_info_wrlock(cpu); | ||
915 | IPT_ENTRY_ITERATE(t->entries[cpu], | 917 | IPT_ENTRY_ITERATE(t->entries[cpu], |
916 | t->size, | 918 | t->size, |
917 | add_entry_to_counter, | 919 | add_entry_to_counter, |
918 | counters, | 920 | counters, |
919 | &i); | 921 | &i); |
922 | xt_info_wrunlock(cpu); | ||
920 | } | 923 | } |
921 | |||
922 | } | ||
923 | |||
924 | /* We're lazy, and add to the first CPU; overflow works its fey magic | ||
925 | * and everything is OK. */ | ||
926 | static int | ||
927 | add_counter_to_entry(struct ipt_entry *e, | ||
928 | const struct xt_counters addme[], | ||
929 | unsigned int *i) | ||
930 | { | ||
931 | ADD_COUNTER(e->counters, addme[*i].bcnt, addme[*i].pcnt); | ||
932 | |||
933 | (*i)++; | ||
934 | return 0; | ||
935 | } | ||
936 | |||
937 | /* Take values from counters and add them back onto the current cpu */ | ||
938 | static void put_counters(struct xt_table_info *t, | ||
939 | const struct xt_counters counters[]) | ||
940 | { | ||
941 | unsigned int i, cpu; | ||
942 | |||
943 | local_bh_disable(); | ||
944 | cpu = smp_processor_id(); | ||
945 | i = 0; | ||
946 | IPT_ENTRY_ITERATE(t->entries[cpu], | ||
947 | t->size, | ||
948 | add_counter_to_entry, | ||
949 | counters, | ||
950 | &i); | ||
951 | local_bh_enable(); | 924 | local_bh_enable(); |
952 | } | 925 | } |
953 | 926 | ||
954 | |||
955 | static inline int | ||
956 | zero_entry_counter(struct ipt_entry *e, void *arg) | ||
957 | { | ||
958 | e->counters.bcnt = 0; | ||
959 | e->counters.pcnt = 0; | ||
960 | return 0; | ||
961 | } | ||
962 | |||
963 | static void | ||
964 | clone_counters(struct xt_table_info *newinfo, const struct xt_table_info *info) | ||
965 | { | ||
966 | unsigned int cpu; | ||
967 | const void *loc_cpu_entry = info->entries[raw_smp_processor_id()]; | ||
968 | |||
969 | memcpy(newinfo, info, offsetof(struct xt_table_info, entries)); | ||
970 | for_each_possible_cpu(cpu) { | ||
971 | memcpy(newinfo->entries[cpu], loc_cpu_entry, info->size); | ||
972 | IPT_ENTRY_ITERATE(newinfo->entries[cpu], newinfo->size, | ||
973 | zero_entry_counter, NULL); | ||
974 | } | ||
975 | } | ||
976 | |||
977 | static struct xt_counters * alloc_counters(struct xt_table *table) | 927 | static struct xt_counters * alloc_counters(struct xt_table *table) |
978 | { | 928 | { |
979 | unsigned int countersize; | 929 | unsigned int countersize; |
980 | struct xt_counters *counters; | 930 | struct xt_counters *counters; |
981 | struct xt_table_info *private = table->private; | 931 | struct xt_table_info *private = table->private; |
982 | struct xt_table_info *info; | ||
983 | 932 | ||
984 | /* We need atomic snapshot of counters: rest doesn't change | 933 | /* We need atomic snapshot of counters: rest doesn't change |
985 | (other than comefrom, which userspace doesn't care | 934 | (other than comefrom, which userspace doesn't care |
@@ -988,30 +937,11 @@ static struct xt_counters * alloc_counters(struct xt_table *table) | |||
988 | counters = vmalloc_node(countersize, numa_node_id()); | 937 | counters = vmalloc_node(countersize, numa_node_id()); |
989 | 938 | ||
990 | if (counters == NULL) | 939 | if (counters == NULL) |
991 | goto nomem; | 940 | return ERR_PTR(-ENOMEM); |
992 | 941 | ||
993 | info = xt_alloc_table_info(private->size); | 942 | get_counters(private, counters); |
994 | if (!info) | ||
995 | goto free_counters; | ||
996 | |||
997 | clone_counters(info, private); | ||
998 | |||
999 | mutex_lock(&table->lock); | ||
1000 | xt_table_entry_swap_rcu(private, info); | ||
1001 | synchronize_net(); /* Wait until smoke has cleared */ | ||
1002 | |||
1003 | get_counters(info, counters); | ||
1004 | put_counters(private, counters); | ||
1005 | mutex_unlock(&table->lock); | ||
1006 | |||
1007 | xt_free_table_info(info); | ||
1008 | 943 | ||
1009 | return counters; | 944 | return counters; |
1010 | |||
1011 | free_counters: | ||
1012 | vfree(counters); | ||
1013 | nomem: | ||
1014 | return ERR_PTR(-ENOMEM); | ||
1015 | } | 945 | } |
1016 | 946 | ||
1017 | static int | 947 | static int |
@@ -1306,8 +1236,9 @@ __do_replace(struct net *net, const char *name, unsigned int valid_hooks, | |||
1306 | (newinfo->number <= oldinfo->initial_entries)) | 1236 | (newinfo->number <= oldinfo->initial_entries)) |
1307 | module_put(t->me); | 1237 | module_put(t->me); |
1308 | 1238 | ||
1309 | /* Get the old counters. */ | 1239 | /* Get the old counters, and synchronize with replace */ |
1310 | get_counters(oldinfo, counters); | 1240 | get_counters(oldinfo, counters); |
1241 | |||
1311 | /* Decrease module usage counts and free resource */ | 1242 | /* Decrease module usage counts and free resource */ |
1312 | loc_cpu_old_entry = oldinfo->entries[raw_smp_processor_id()]; | 1243 | loc_cpu_old_entry = oldinfo->entries[raw_smp_processor_id()]; |
1313 | IPT_ENTRY_ITERATE(loc_cpu_old_entry, oldinfo->size, cleanup_entry, | 1244 | IPT_ENTRY_ITERATE(loc_cpu_old_entry, oldinfo->size, cleanup_entry, |
@@ -1377,11 +1308,23 @@ do_replace(struct net *net, void __user *user, unsigned int len) | |||
1377 | return ret; | 1308 | return ret; |
1378 | } | 1309 | } |
1379 | 1310 | ||
1311 | /* We're lazy, and add to the first CPU; overflow works its fey magic | ||
1312 | * and everything is OK. */ | ||
1313 | static int | ||
1314 | add_counter_to_entry(struct ipt_entry *e, | ||
1315 | const struct xt_counters addme[], | ||
1316 | unsigned int *i) | ||
1317 | { | ||
1318 | ADD_COUNTER(e->counters, addme[*i].bcnt, addme[*i].pcnt); | ||
1319 | |||
1320 | (*i)++; | ||
1321 | return 0; | ||
1322 | } | ||
1380 | 1323 | ||
1381 | static int | 1324 | static int |
1382 | do_add_counters(struct net *net, void __user *user, unsigned int len, int compat) | 1325 | do_add_counters(struct net *net, void __user *user, unsigned int len, int compat) |
1383 | { | 1326 | { |
1384 | unsigned int i; | 1327 | unsigned int i, curcpu; |
1385 | struct xt_counters_info tmp; | 1328 | struct xt_counters_info tmp; |
1386 | struct xt_counters *paddc; | 1329 | struct xt_counters *paddc; |
1387 | unsigned int num_counters; | 1330 | unsigned int num_counters; |
@@ -1437,25 +1380,26 @@ do_add_counters(struct net *net, void __user *user, unsigned int len, int compat | |||
1437 | goto free; | 1380 | goto free; |
1438 | } | 1381 | } |
1439 | 1382 | ||
1440 | mutex_lock(&t->lock); | 1383 | local_bh_disable(); |
1441 | private = t->private; | 1384 | private = t->private; |
1442 | if (private->number != num_counters) { | 1385 | if (private->number != num_counters) { |
1443 | ret = -EINVAL; | 1386 | ret = -EINVAL; |
1444 | goto unlock_up_free; | 1387 | goto unlock_up_free; |
1445 | } | 1388 | } |
1446 | 1389 | ||
1447 | preempt_disable(); | ||
1448 | i = 0; | 1390 | i = 0; |
1449 | /* Choose the copy that is on our node */ | 1391 | /* Choose the copy that is on our node */ |
1450 | loc_cpu_entry = private->entries[raw_smp_processor_id()]; | 1392 | curcpu = smp_processor_id(); |
1393 | loc_cpu_entry = private->entries[curcpu]; | ||
1394 | xt_info_wrlock(curcpu); | ||
1451 | IPT_ENTRY_ITERATE(loc_cpu_entry, | 1395 | IPT_ENTRY_ITERATE(loc_cpu_entry, |
1452 | private->size, | 1396 | private->size, |
1453 | add_counter_to_entry, | 1397 | add_counter_to_entry, |
1454 | paddc, | 1398 | paddc, |
1455 | &i); | 1399 | &i); |
1456 | preempt_enable(); | 1400 | xt_info_wrunlock(curcpu); |
1457 | unlock_up_free: | 1401 | unlock_up_free: |
1458 | mutex_unlock(&t->lock); | 1402 | local_bh_enable(); |
1459 | xt_table_unlock(t); | 1403 | xt_table_unlock(t); |
1460 | module_put(t->me); | 1404 | module_put(t->me); |
1461 | free: | 1405 | free: |
diff --git a/net/ipv4/route.c b/net/ipv4/route.c index c40debe51b38..c4c60e9f068a 100644 --- a/net/ipv4/route.c +++ b/net/ipv4/route.c | |||
@@ -3397,7 +3397,7 @@ int __init ip_rt_init(void) | |||
3397 | 0, | 3397 | 0, |
3398 | &rt_hash_log, | 3398 | &rt_hash_log, |
3399 | &rt_hash_mask, | 3399 | &rt_hash_mask, |
3400 | 0); | 3400 | rhash_entries ? 0 : 512 * 1024); |
3401 | memset(rt_hash_table, 0, (rt_hash_mask + 1) * sizeof(struct rt_hash_bucket)); | 3401 | memset(rt_hash_table, 0, (rt_hash_mask + 1) * sizeof(struct rt_hash_bucket)); |
3402 | rt_hash_lock_init(); | 3402 | rt_hash_lock_init(); |
3403 | 3403 | ||
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c index c96a6bb25430..eec3e6f9956c 100644 --- a/net/ipv4/tcp_input.c +++ b/net/ipv4/tcp_input.c | |||
@@ -597,16 +597,6 @@ static void tcp_event_data_recv(struct sock *sk, struct sk_buff *skb) | |||
597 | tcp_grow_window(sk, skb); | 597 | tcp_grow_window(sk, skb); |
598 | } | 598 | } |
599 | 599 | ||
600 | static u32 tcp_rto_min(struct sock *sk) | ||
601 | { | ||
602 | struct dst_entry *dst = __sk_dst_get(sk); | ||
603 | u32 rto_min = TCP_RTO_MIN; | ||
604 | |||
605 | if (dst && dst_metric_locked(dst, RTAX_RTO_MIN)) | ||
606 | rto_min = dst_metric_rtt(dst, RTAX_RTO_MIN); | ||
607 | return rto_min; | ||
608 | } | ||
609 | |||
610 | /* Called to compute a smoothed rtt estimate. The data fed to this | 600 | /* Called to compute a smoothed rtt estimate. The data fed to this |
611 | * routine either comes from timestamps, or from segments that were | 601 | * routine either comes from timestamps, or from segments that were |
612 | * known _not_ to have been retransmitted [see Karn/Partridge | 602 | * known _not_ to have been retransmitted [see Karn/Partridge |
diff --git a/net/ipv6/netfilter/ip6_tables.c b/net/ipv6/netfilter/ip6_tables.c index 800ae8542471..219e165aea10 100644 --- a/net/ipv6/netfilter/ip6_tables.c +++ b/net/ipv6/netfilter/ip6_tables.c | |||
@@ -365,9 +365,9 @@ ip6t_do_table(struct sk_buff *skb, | |||
365 | 365 | ||
366 | IP_NF_ASSERT(table->valid_hooks & (1 << hook)); | 366 | IP_NF_ASSERT(table->valid_hooks & (1 << hook)); |
367 | 367 | ||
368 | rcu_read_lock_bh(); | 368 | xt_info_rdlock_bh(); |
369 | private = rcu_dereference(table->private); | 369 | private = table->private; |
370 | table_base = rcu_dereference(private->entries[smp_processor_id()]); | 370 | table_base = private->entries[smp_processor_id()]; |
371 | 371 | ||
372 | e = get_entry(table_base, private->hook_entry[hook]); | 372 | e = get_entry(table_base, private->hook_entry[hook]); |
373 | 373 | ||
@@ -466,7 +466,7 @@ ip6t_do_table(struct sk_buff *skb, | |||
466 | #ifdef CONFIG_NETFILTER_DEBUG | 466 | #ifdef CONFIG_NETFILTER_DEBUG |
467 | ((struct ip6t_entry *)table_base)->comefrom = NETFILTER_LINK_POISON; | 467 | ((struct ip6t_entry *)table_base)->comefrom = NETFILTER_LINK_POISON; |
468 | #endif | 468 | #endif |
469 | rcu_read_unlock_bh(); | 469 | xt_info_rdunlock_bh(); |
470 | 470 | ||
471 | #ifdef DEBUG_ALLOW_ALL | 471 | #ifdef DEBUG_ALLOW_ALL |
472 | return NF_ACCEPT; | 472 | return NF_ACCEPT; |
@@ -926,9 +926,12 @@ get_counters(const struct xt_table_info *t, | |||
926 | /* Instead of clearing (by a previous call to memset()) | 926 | /* Instead of clearing (by a previous call to memset()) |
927 | * the counters and using adds, we set the counters | 927 | * the counters and using adds, we set the counters |
928 | * with data used by 'current' CPU | 928 | * with data used by 'current' CPU |
929 | * We dont care about preemption here. | 929 | * |
930 | * Bottom half has to be disabled to prevent deadlock | ||
931 | * if new softirq were to run and call ipt_do_table | ||
930 | */ | 932 | */ |
931 | curcpu = raw_smp_processor_id(); | 933 | local_bh_disable(); |
934 | curcpu = smp_processor_id(); | ||
932 | 935 | ||
933 | i = 0; | 936 | i = 0; |
934 | IP6T_ENTRY_ITERATE(t->entries[curcpu], | 937 | IP6T_ENTRY_ITERATE(t->entries[curcpu], |
@@ -941,72 +944,22 @@ get_counters(const struct xt_table_info *t, | |||
941 | if (cpu == curcpu) | 944 | if (cpu == curcpu) |
942 | continue; | 945 | continue; |
943 | i = 0; | 946 | i = 0; |
947 | xt_info_wrlock(cpu); | ||
944 | IP6T_ENTRY_ITERATE(t->entries[cpu], | 948 | IP6T_ENTRY_ITERATE(t->entries[cpu], |
945 | t->size, | 949 | t->size, |
946 | add_entry_to_counter, | 950 | add_entry_to_counter, |
947 | counters, | 951 | counters, |
948 | &i); | 952 | &i); |
953 | xt_info_wrunlock(cpu); | ||
949 | } | 954 | } |
950 | } | ||
951 | |||
952 | /* We're lazy, and add to the first CPU; overflow works its fey magic | ||
953 | * and everything is OK. */ | ||
954 | static int | ||
955 | add_counter_to_entry(struct ip6t_entry *e, | ||
956 | const struct xt_counters addme[], | ||
957 | unsigned int *i) | ||
958 | { | ||
959 | ADD_COUNTER(e->counters, addme[*i].bcnt, addme[*i].pcnt); | ||
960 | |||
961 | (*i)++; | ||
962 | return 0; | ||
963 | } | ||
964 | |||
965 | /* Take values from counters and add them back onto the current cpu */ | ||
966 | static void put_counters(struct xt_table_info *t, | ||
967 | const struct xt_counters counters[]) | ||
968 | { | ||
969 | unsigned int i, cpu; | ||
970 | |||
971 | local_bh_disable(); | ||
972 | cpu = smp_processor_id(); | ||
973 | i = 0; | ||
974 | IP6T_ENTRY_ITERATE(t->entries[cpu], | ||
975 | t->size, | ||
976 | add_counter_to_entry, | ||
977 | counters, | ||
978 | &i); | ||
979 | local_bh_enable(); | 955 | local_bh_enable(); |
980 | } | 956 | } |
981 | 957 | ||
982 | static inline int | ||
983 | zero_entry_counter(struct ip6t_entry *e, void *arg) | ||
984 | { | ||
985 | e->counters.bcnt = 0; | ||
986 | e->counters.pcnt = 0; | ||
987 | return 0; | ||
988 | } | ||
989 | |||
990 | static void | ||
991 | clone_counters(struct xt_table_info *newinfo, const struct xt_table_info *info) | ||
992 | { | ||
993 | unsigned int cpu; | ||
994 | const void *loc_cpu_entry = info->entries[raw_smp_processor_id()]; | ||
995 | |||
996 | memcpy(newinfo, info, offsetof(struct xt_table_info, entries)); | ||
997 | for_each_possible_cpu(cpu) { | ||
998 | memcpy(newinfo->entries[cpu], loc_cpu_entry, info->size); | ||
999 | IP6T_ENTRY_ITERATE(newinfo->entries[cpu], newinfo->size, | ||
1000 | zero_entry_counter, NULL); | ||
1001 | } | ||
1002 | } | ||
1003 | |||
1004 | static struct xt_counters *alloc_counters(struct xt_table *table) | 958 | static struct xt_counters *alloc_counters(struct xt_table *table) |
1005 | { | 959 | { |
1006 | unsigned int countersize; | 960 | unsigned int countersize; |
1007 | struct xt_counters *counters; | 961 | struct xt_counters *counters; |
1008 | struct xt_table_info *private = table->private; | 962 | struct xt_table_info *private = table->private; |
1009 | struct xt_table_info *info; | ||
1010 | 963 | ||
1011 | /* We need atomic snapshot of counters: rest doesn't change | 964 | /* We need atomic snapshot of counters: rest doesn't change |
1012 | (other than comefrom, which userspace doesn't care | 965 | (other than comefrom, which userspace doesn't care |
@@ -1015,30 +968,11 @@ static struct xt_counters *alloc_counters(struct xt_table *table) | |||
1015 | counters = vmalloc_node(countersize, numa_node_id()); | 968 | counters = vmalloc_node(countersize, numa_node_id()); |
1016 | 969 | ||
1017 | if (counters == NULL) | 970 | if (counters == NULL) |
1018 | goto nomem; | 971 | return ERR_PTR(-ENOMEM); |
1019 | 972 | ||
1020 | info = xt_alloc_table_info(private->size); | 973 | get_counters(private, counters); |
1021 | if (!info) | ||
1022 | goto free_counters; | ||
1023 | |||
1024 | clone_counters(info, private); | ||
1025 | |||
1026 | mutex_lock(&table->lock); | ||
1027 | xt_table_entry_swap_rcu(private, info); | ||
1028 | synchronize_net(); /* Wait until smoke has cleared */ | ||
1029 | |||
1030 | get_counters(info, counters); | ||
1031 | put_counters(private, counters); | ||
1032 | mutex_unlock(&table->lock); | ||
1033 | |||
1034 | xt_free_table_info(info); | ||
1035 | 974 | ||
1036 | return counters; | 975 | return counters; |
1037 | |||
1038 | free_counters: | ||
1039 | vfree(counters); | ||
1040 | nomem: | ||
1041 | return ERR_PTR(-ENOMEM); | ||
1042 | } | 976 | } |
1043 | 977 | ||
1044 | static int | 978 | static int |
@@ -1334,8 +1268,9 @@ __do_replace(struct net *net, const char *name, unsigned int valid_hooks, | |||
1334 | (newinfo->number <= oldinfo->initial_entries)) | 1268 | (newinfo->number <= oldinfo->initial_entries)) |
1335 | module_put(t->me); | 1269 | module_put(t->me); |
1336 | 1270 | ||
1337 | /* Get the old counters. */ | 1271 | /* Get the old counters, and synchronize with replace */ |
1338 | get_counters(oldinfo, counters); | 1272 | get_counters(oldinfo, counters); |
1273 | |||
1339 | /* Decrease module usage counts and free resource */ | 1274 | /* Decrease module usage counts and free resource */ |
1340 | loc_cpu_old_entry = oldinfo->entries[raw_smp_processor_id()]; | 1275 | loc_cpu_old_entry = oldinfo->entries[raw_smp_processor_id()]; |
1341 | IP6T_ENTRY_ITERATE(loc_cpu_old_entry, oldinfo->size, cleanup_entry, | 1276 | IP6T_ENTRY_ITERATE(loc_cpu_old_entry, oldinfo->size, cleanup_entry, |
@@ -1405,11 +1340,24 @@ do_replace(struct net *net, void __user *user, unsigned int len) | |||
1405 | return ret; | 1340 | return ret; |
1406 | } | 1341 | } |
1407 | 1342 | ||
1343 | /* We're lazy, and add to the first CPU; overflow works its fey magic | ||
1344 | * and everything is OK. */ | ||
1345 | static int | ||
1346 | add_counter_to_entry(struct ip6t_entry *e, | ||
1347 | const struct xt_counters addme[], | ||
1348 | unsigned int *i) | ||
1349 | { | ||
1350 | ADD_COUNTER(e->counters, addme[*i].bcnt, addme[*i].pcnt); | ||
1351 | |||
1352 | (*i)++; | ||
1353 | return 0; | ||
1354 | } | ||
1355 | |||
1408 | static int | 1356 | static int |
1409 | do_add_counters(struct net *net, void __user *user, unsigned int len, | 1357 | do_add_counters(struct net *net, void __user *user, unsigned int len, |
1410 | int compat) | 1358 | int compat) |
1411 | { | 1359 | { |
1412 | unsigned int i; | 1360 | unsigned int i, curcpu; |
1413 | struct xt_counters_info tmp; | 1361 | struct xt_counters_info tmp; |
1414 | struct xt_counters *paddc; | 1362 | struct xt_counters *paddc; |
1415 | unsigned int num_counters; | 1363 | unsigned int num_counters; |
@@ -1465,25 +1413,28 @@ do_add_counters(struct net *net, void __user *user, unsigned int len, | |||
1465 | goto free; | 1413 | goto free; |
1466 | } | 1414 | } |
1467 | 1415 | ||
1468 | mutex_lock(&t->lock); | 1416 | |
1417 | local_bh_disable(); | ||
1469 | private = t->private; | 1418 | private = t->private; |
1470 | if (private->number != num_counters) { | 1419 | if (private->number != num_counters) { |
1471 | ret = -EINVAL; | 1420 | ret = -EINVAL; |
1472 | goto unlock_up_free; | 1421 | goto unlock_up_free; |
1473 | } | 1422 | } |
1474 | 1423 | ||
1475 | preempt_disable(); | ||
1476 | i = 0; | 1424 | i = 0; |
1477 | /* Choose the copy that is on our node */ | 1425 | /* Choose the copy that is on our node */ |
1478 | loc_cpu_entry = private->entries[raw_smp_processor_id()]; | 1426 | curcpu = smp_processor_id(); |
1427 | xt_info_wrlock(curcpu); | ||
1428 | loc_cpu_entry = private->entries[curcpu]; | ||
1479 | IP6T_ENTRY_ITERATE(loc_cpu_entry, | 1429 | IP6T_ENTRY_ITERATE(loc_cpu_entry, |
1480 | private->size, | 1430 | private->size, |
1481 | add_counter_to_entry, | 1431 | add_counter_to_entry, |
1482 | paddc, | 1432 | paddc, |
1483 | &i); | 1433 | &i); |
1484 | preempt_enable(); | 1434 | xt_info_wrunlock(curcpu); |
1435 | |||
1485 | unlock_up_free: | 1436 | unlock_up_free: |
1486 | mutex_unlock(&t->lock); | 1437 | local_bh_enable(); |
1487 | xt_table_unlock(t); | 1438 | xt_table_unlock(t); |
1488 | module_put(t->me); | 1439 | module_put(t->me); |
1489 | free: | 1440 | free: |
diff --git a/net/mac80211/main.c b/net/mac80211/main.c index fbcbed6cad01..14134193cd17 100644 --- a/net/mac80211/main.c +++ b/net/mac80211/main.c | |||
@@ -757,6 +757,7 @@ struct ieee80211_hw *ieee80211_alloc_hw(size_t priv_data_len, | |||
757 | local->hw.conf.long_frame_max_tx_count = 4; | 757 | local->hw.conf.long_frame_max_tx_count = 4; |
758 | local->hw.conf.short_frame_max_tx_count = 7; | 758 | local->hw.conf.short_frame_max_tx_count = 7; |
759 | local->hw.conf.radio_enabled = true; | 759 | local->hw.conf.radio_enabled = true; |
760 | local->user_power_level = -1; | ||
760 | 761 | ||
761 | INIT_LIST_HEAD(&local->interfaces); | 762 | INIT_LIST_HEAD(&local->interfaces); |
762 | mutex_init(&local->iflist_mtx); | 763 | mutex_init(&local->iflist_mtx); |
@@ -909,6 +910,13 @@ int ieee80211_register_hw(struct ieee80211_hw *hw) | |||
909 | if (result < 0) | 910 | if (result < 0) |
910 | goto fail_sta_info; | 911 | goto fail_sta_info; |
911 | 912 | ||
913 | result = ieee80211_wep_init(local); | ||
914 | if (result < 0) { | ||
915 | printk(KERN_DEBUG "%s: Failed to initialize wep: %d\n", | ||
916 | wiphy_name(local->hw.wiphy), result); | ||
917 | goto fail_wep; | ||
918 | } | ||
919 | |||
912 | rtnl_lock(); | 920 | rtnl_lock(); |
913 | result = dev_alloc_name(local->mdev, local->mdev->name); | 921 | result = dev_alloc_name(local->mdev, local->mdev->name); |
914 | if (result < 0) | 922 | if (result < 0) |
@@ -930,14 +938,6 @@ int ieee80211_register_hw(struct ieee80211_hw *hw) | |||
930 | goto fail_rate; | 938 | goto fail_rate; |
931 | } | 939 | } |
932 | 940 | ||
933 | result = ieee80211_wep_init(local); | ||
934 | |||
935 | if (result < 0) { | ||
936 | printk(KERN_DEBUG "%s: Failed to initialize wep: %d\n", | ||
937 | wiphy_name(local->hw.wiphy), result); | ||
938 | goto fail_wep; | ||
939 | } | ||
940 | |||
941 | /* add one default STA interface if supported */ | 941 | /* add one default STA interface if supported */ |
942 | if (local->hw.wiphy->interface_modes & BIT(NL80211_IFTYPE_STATION)) { | 942 | if (local->hw.wiphy->interface_modes & BIT(NL80211_IFTYPE_STATION)) { |
943 | result = ieee80211_if_add(local, "wlan%d", NULL, | 943 | result = ieee80211_if_add(local, "wlan%d", NULL, |
@@ -967,13 +967,13 @@ int ieee80211_register_hw(struct ieee80211_hw *hw) | |||
967 | 967 | ||
968 | return 0; | 968 | return 0; |
969 | 969 | ||
970 | fail_wep: | ||
971 | rate_control_deinitialize(local); | ||
972 | fail_rate: | 970 | fail_rate: |
973 | unregister_netdevice(local->mdev); | 971 | unregister_netdevice(local->mdev); |
974 | local->mdev = NULL; | 972 | local->mdev = NULL; |
975 | fail_dev: | 973 | fail_dev: |
976 | rtnl_unlock(); | 974 | rtnl_unlock(); |
975 | ieee80211_wep_free(local); | ||
976 | fail_wep: | ||
977 | sta_info_stop(local); | 977 | sta_info_stop(local); |
978 | fail_sta_info: | 978 | fail_sta_info: |
979 | debugfs_hw_del(local); | 979 | debugfs_hw_del(local); |
diff --git a/net/netfilter/Kconfig b/net/netfilter/Kconfig index 2329c5f50551..cb3ad741ebf8 100644 --- a/net/netfilter/Kconfig +++ b/net/netfilter/Kconfig | |||
@@ -275,6 +275,8 @@ config NF_CT_NETLINK | |||
275 | help | 275 | help |
276 | This option enables support for a netlink-based userspace interface | 276 | This option enables support for a netlink-based userspace interface |
277 | 277 | ||
278 | endif # NF_CONNTRACK | ||
279 | |||
278 | # transparent proxy support | 280 | # transparent proxy support |
279 | config NETFILTER_TPROXY | 281 | config NETFILTER_TPROXY |
280 | tristate "Transparent proxying support (EXPERIMENTAL)" | 282 | tristate "Transparent proxying support (EXPERIMENTAL)" |
@@ -290,8 +292,6 @@ config NETFILTER_TPROXY | |||
290 | 292 | ||
291 | To compile it as a module, choose M here. If unsure, say N. | 293 | To compile it as a module, choose M here. If unsure, say N. |
292 | 294 | ||
293 | endif # NF_CONNTRACK | ||
294 | |||
295 | config NETFILTER_XTABLES | 295 | config NETFILTER_XTABLES |
296 | tristate "Netfilter Xtables support (required for ip_tables)" | 296 | tristate "Netfilter Xtables support (required for ip_tables)" |
297 | default m if NETFILTER_ADVANCED=n | 297 | default m if NETFILTER_ADVANCED=n |
@@ -837,6 +837,7 @@ config NETFILTER_XT_MATCH_SOCKET | |||
837 | depends on NETFILTER_TPROXY | 837 | depends on NETFILTER_TPROXY |
838 | depends on NETFILTER_XTABLES | 838 | depends on NETFILTER_XTABLES |
839 | depends on NETFILTER_ADVANCED | 839 | depends on NETFILTER_ADVANCED |
840 | depends on !NF_CONNTRACK || NF_CONNTRACK | ||
840 | select NF_DEFRAG_IPV4 | 841 | select NF_DEFRAG_IPV4 |
841 | help | 842 | help |
842 | This option adds a `socket' match, which can be used to match | 843 | This option adds a `socket' match, which can be used to match |
diff --git a/net/netfilter/nf_conntrack_proto_dccp.c b/net/netfilter/nf_conntrack_proto_dccp.c index 50dac8dbe7d8..8e757dd53396 100644 --- a/net/netfilter/nf_conntrack_proto_dccp.c +++ b/net/netfilter/nf_conntrack_proto_dccp.c | |||
@@ -633,6 +633,8 @@ static int dccp_to_nlattr(struct sk_buff *skb, struct nlattr *nla, | |||
633 | if (!nest_parms) | 633 | if (!nest_parms) |
634 | goto nla_put_failure; | 634 | goto nla_put_failure; |
635 | NLA_PUT_U8(skb, CTA_PROTOINFO_DCCP_STATE, ct->proto.dccp.state); | 635 | NLA_PUT_U8(skb, CTA_PROTOINFO_DCCP_STATE, ct->proto.dccp.state); |
636 | NLA_PUT_U8(skb, CTA_PROTOINFO_DCCP_ROLE, | ||
637 | ct->proto.dccp.role[IP_CT_DIR_ORIGINAL]); | ||
636 | nla_nest_end(skb, nest_parms); | 638 | nla_nest_end(skb, nest_parms); |
637 | read_unlock_bh(&dccp_lock); | 639 | read_unlock_bh(&dccp_lock); |
638 | return 0; | 640 | return 0; |
@@ -644,6 +646,7 @@ nla_put_failure: | |||
644 | 646 | ||
645 | static const struct nla_policy dccp_nla_policy[CTA_PROTOINFO_DCCP_MAX + 1] = { | 647 | static const struct nla_policy dccp_nla_policy[CTA_PROTOINFO_DCCP_MAX + 1] = { |
646 | [CTA_PROTOINFO_DCCP_STATE] = { .type = NLA_U8 }, | 648 | [CTA_PROTOINFO_DCCP_STATE] = { .type = NLA_U8 }, |
649 | [CTA_PROTOINFO_DCCP_ROLE] = { .type = NLA_U8 }, | ||
647 | }; | 650 | }; |
648 | 651 | ||
649 | static int nlattr_to_dccp(struct nlattr *cda[], struct nf_conn *ct) | 652 | static int nlattr_to_dccp(struct nlattr *cda[], struct nf_conn *ct) |
@@ -661,11 +664,21 @@ static int nlattr_to_dccp(struct nlattr *cda[], struct nf_conn *ct) | |||
661 | return err; | 664 | return err; |
662 | 665 | ||
663 | if (!tb[CTA_PROTOINFO_DCCP_STATE] || | 666 | if (!tb[CTA_PROTOINFO_DCCP_STATE] || |
664 | nla_get_u8(tb[CTA_PROTOINFO_DCCP_STATE]) >= CT_DCCP_IGNORE) | 667 | !tb[CTA_PROTOINFO_DCCP_ROLE] || |
668 | nla_get_u8(tb[CTA_PROTOINFO_DCCP_ROLE]) > CT_DCCP_ROLE_MAX || | ||
669 | nla_get_u8(tb[CTA_PROTOINFO_DCCP_STATE]) >= CT_DCCP_IGNORE) { | ||
665 | return -EINVAL; | 670 | return -EINVAL; |
671 | } | ||
666 | 672 | ||
667 | write_lock_bh(&dccp_lock); | 673 | write_lock_bh(&dccp_lock); |
668 | ct->proto.dccp.state = nla_get_u8(tb[CTA_PROTOINFO_DCCP_STATE]); | 674 | ct->proto.dccp.state = nla_get_u8(tb[CTA_PROTOINFO_DCCP_STATE]); |
675 | if (nla_get_u8(tb[CTA_PROTOINFO_DCCP_ROLE]) == CT_DCCP_ROLE_CLIENT) { | ||
676 | ct->proto.dccp.role[IP_CT_DIR_ORIGINAL] = CT_DCCP_ROLE_CLIENT; | ||
677 | ct->proto.dccp.role[IP_CT_DIR_REPLY] = CT_DCCP_ROLE_SERVER; | ||
678 | } else { | ||
679 | ct->proto.dccp.role[IP_CT_DIR_ORIGINAL] = CT_DCCP_ROLE_SERVER; | ||
680 | ct->proto.dccp.role[IP_CT_DIR_REPLY] = CT_DCCP_ROLE_CLIENT; | ||
681 | } | ||
669 | write_unlock_bh(&dccp_lock); | 682 | write_unlock_bh(&dccp_lock); |
670 | return 0; | 683 | return 0; |
671 | } | 684 | } |
@@ -777,6 +790,7 @@ static struct nf_conntrack_l4proto dccp_proto6 __read_mostly = { | |||
777 | .print_conntrack = dccp_print_conntrack, | 790 | .print_conntrack = dccp_print_conntrack, |
778 | #if defined(CONFIG_NF_CT_NETLINK) || defined(CONFIG_NF_CT_NETLINK_MODULE) | 791 | #if defined(CONFIG_NF_CT_NETLINK) || defined(CONFIG_NF_CT_NETLINK_MODULE) |
779 | .to_nlattr = dccp_to_nlattr, | 792 | .to_nlattr = dccp_to_nlattr, |
793 | .nlattr_size = dccp_nlattr_size, | ||
780 | .from_nlattr = nlattr_to_dccp, | 794 | .from_nlattr = nlattr_to_dccp, |
781 | .tuple_to_nlattr = nf_ct_port_tuple_to_nlattr, | 795 | .tuple_to_nlattr = nf_ct_port_tuple_to_nlattr, |
782 | .nlattr_tuple_size = nf_ct_port_nlattr_tuple_size, | 796 | .nlattr_tuple_size = nf_ct_port_nlattr_tuple_size, |
diff --git a/net/netfilter/nf_conntrack_proto_udplite.c b/net/netfilter/nf_conntrack_proto_udplite.c index 4614696c1b88..0badedc542d3 100644 --- a/net/netfilter/nf_conntrack_proto_udplite.c +++ b/net/netfilter/nf_conntrack_proto_udplite.c | |||
@@ -204,6 +204,7 @@ static struct nf_conntrack_l4proto nf_conntrack_l4proto_udplite6 __read_mostly = | |||
204 | .error = udplite_error, | 204 | .error = udplite_error, |
205 | #if defined(CONFIG_NF_CT_NETLINK) || defined(CONFIG_NF_CT_NETLINK_MODULE) | 205 | #if defined(CONFIG_NF_CT_NETLINK) || defined(CONFIG_NF_CT_NETLINK_MODULE) |
206 | .tuple_to_nlattr = nf_ct_port_tuple_to_nlattr, | 206 | .tuple_to_nlattr = nf_ct_port_tuple_to_nlattr, |
207 | .nlattr_tuple_size = nf_ct_port_nlattr_tuple_size, | ||
207 | .nlattr_to_tuple = nf_ct_port_nlattr_to_tuple, | 208 | .nlattr_to_tuple = nf_ct_port_nlattr_to_tuple, |
208 | .nla_policy = nf_ct_port_nla_policy, | 209 | .nla_policy = nf_ct_port_nla_policy, |
209 | #endif | 210 | #endif |
diff --git a/net/netfilter/x_tables.c b/net/netfilter/x_tables.c index 509a95621f9f..150e5cf62f85 100644 --- a/net/netfilter/x_tables.c +++ b/net/netfilter/x_tables.c | |||
@@ -625,20 +625,6 @@ void xt_free_table_info(struct xt_table_info *info) | |||
625 | } | 625 | } |
626 | EXPORT_SYMBOL(xt_free_table_info); | 626 | EXPORT_SYMBOL(xt_free_table_info); |
627 | 627 | ||
628 | void xt_table_entry_swap_rcu(struct xt_table_info *oldinfo, | ||
629 | struct xt_table_info *newinfo) | ||
630 | { | ||
631 | unsigned int cpu; | ||
632 | |||
633 | for_each_possible_cpu(cpu) { | ||
634 | void *p = oldinfo->entries[cpu]; | ||
635 | rcu_assign_pointer(oldinfo->entries[cpu], newinfo->entries[cpu]); | ||
636 | newinfo->entries[cpu] = p; | ||
637 | } | ||
638 | |||
639 | } | ||
640 | EXPORT_SYMBOL_GPL(xt_table_entry_swap_rcu); | ||
641 | |||
642 | /* Find table by name, grabs mutex & ref. Returns ERR_PTR() on error. */ | 628 | /* Find table by name, grabs mutex & ref. Returns ERR_PTR() on error. */ |
643 | struct xt_table *xt_find_table_lock(struct net *net, u_int8_t af, | 629 | struct xt_table *xt_find_table_lock(struct net *net, u_int8_t af, |
644 | const char *name) | 630 | const char *name) |
@@ -676,32 +662,43 @@ void xt_compat_unlock(u_int8_t af) | |||
676 | EXPORT_SYMBOL_GPL(xt_compat_unlock); | 662 | EXPORT_SYMBOL_GPL(xt_compat_unlock); |
677 | #endif | 663 | #endif |
678 | 664 | ||
665 | DEFINE_PER_CPU(struct xt_info_lock, xt_info_locks); | ||
666 | EXPORT_PER_CPU_SYMBOL_GPL(xt_info_locks); | ||
667 | |||
668 | |||
679 | struct xt_table_info * | 669 | struct xt_table_info * |
680 | xt_replace_table(struct xt_table *table, | 670 | xt_replace_table(struct xt_table *table, |
681 | unsigned int num_counters, | 671 | unsigned int num_counters, |
682 | struct xt_table_info *newinfo, | 672 | struct xt_table_info *newinfo, |
683 | int *error) | 673 | int *error) |
684 | { | 674 | { |
685 | struct xt_table_info *oldinfo, *private; | 675 | struct xt_table_info *private; |
686 | 676 | ||
687 | /* Do the substitution. */ | 677 | /* Do the substitution. */ |
688 | mutex_lock(&table->lock); | 678 | local_bh_disable(); |
689 | private = table->private; | 679 | private = table->private; |
680 | |||
690 | /* Check inside lock: is the old number correct? */ | 681 | /* Check inside lock: is the old number correct? */ |
691 | if (num_counters != private->number) { | 682 | if (num_counters != private->number) { |
692 | duprintf("num_counters != table->private->number (%u/%u)\n", | 683 | duprintf("num_counters != table->private->number (%u/%u)\n", |
693 | num_counters, private->number); | 684 | num_counters, private->number); |
694 | mutex_unlock(&table->lock); | 685 | local_bh_enable(); |
695 | *error = -EAGAIN; | 686 | *error = -EAGAIN; |
696 | return NULL; | 687 | return NULL; |
697 | } | 688 | } |
698 | oldinfo = private; | ||
699 | rcu_assign_pointer(table->private, newinfo); | ||
700 | newinfo->initial_entries = oldinfo->initial_entries; | ||
701 | mutex_unlock(&table->lock); | ||
702 | 689 | ||
703 | synchronize_net(); | 690 | table->private = newinfo; |
704 | return oldinfo; | 691 | newinfo->initial_entries = private->initial_entries; |
692 | |||
693 | /* | ||
694 | * Even though table entries have now been swapped, other CPU's | ||
695 | * may still be using the old entries. This is okay, because | ||
696 | * resynchronization happens because of the locking done | ||
697 | * during the get_counters() routine. | ||
698 | */ | ||
699 | local_bh_enable(); | ||
700 | |||
701 | return private; | ||
705 | } | 702 | } |
706 | EXPORT_SYMBOL_GPL(xt_replace_table); | 703 | EXPORT_SYMBOL_GPL(xt_replace_table); |
707 | 704 | ||
@@ -734,7 +731,6 @@ struct xt_table *xt_register_table(struct net *net, struct xt_table *table, | |||
734 | 731 | ||
735 | /* Simplifies replace_table code. */ | 732 | /* Simplifies replace_table code. */ |
736 | table->private = bootstrap; | 733 | table->private = bootstrap; |
737 | mutex_init(&table->lock); | ||
738 | 734 | ||
739 | if (!xt_replace_table(table, 0, newinfo, &ret)) | 735 | if (!xt_replace_table(table, 0, newinfo, &ret)) |
740 | goto unlock; | 736 | goto unlock; |
@@ -1147,7 +1143,14 @@ static struct pernet_operations xt_net_ops = { | |||
1147 | 1143 | ||
1148 | static int __init xt_init(void) | 1144 | static int __init xt_init(void) |
1149 | { | 1145 | { |
1150 | int i, rv; | 1146 | unsigned int i; |
1147 | int rv; | ||
1148 | |||
1149 | for_each_possible_cpu(i) { | ||
1150 | struct xt_info_lock *lock = &per_cpu(xt_info_locks, i); | ||
1151 | spin_lock_init(&lock->lock); | ||
1152 | lock->readers = 0; | ||
1153 | } | ||
1151 | 1154 | ||
1152 | xt = kmalloc(sizeof(struct xt_af) * NFPROTO_NUMPROTO, GFP_KERNEL); | 1155 | xt = kmalloc(sizeof(struct xt_af) * NFPROTO_NUMPROTO, GFP_KERNEL); |
1153 | if (!xt) | 1156 | if (!xt) |
diff --git a/net/netfilter/xt_recent.c b/net/netfilter/xt_recent.c index 791e030ea903..eb0ceb846527 100644 --- a/net/netfilter/xt_recent.c +++ b/net/netfilter/xt_recent.c | |||
@@ -474,7 +474,7 @@ static ssize_t recent_old_proc_write(struct file *file, | |||
474 | struct recent_table *t = pde->data; | 474 | struct recent_table *t = pde->data; |
475 | struct recent_entry *e; | 475 | struct recent_entry *e; |
476 | char buf[sizeof("+255.255.255.255")], *c = buf; | 476 | char buf[sizeof("+255.255.255.255")], *c = buf; |
477 | __be32 addr; | 477 | union nf_inet_addr addr = {}; |
478 | int add; | 478 | int add; |
479 | 479 | ||
480 | if (size > sizeof(buf)) | 480 | if (size > sizeof(buf)) |
@@ -506,14 +506,13 @@ static ssize_t recent_old_proc_write(struct file *file, | |||
506 | add = 1; | 506 | add = 1; |
507 | break; | 507 | break; |
508 | } | 508 | } |
509 | addr = in_aton(c); | 509 | addr.ip = in_aton(c); |
510 | 510 | ||
511 | spin_lock_bh(&recent_lock); | 511 | spin_lock_bh(&recent_lock); |
512 | e = recent_entry_lookup(t, (const void *)&addr, NFPROTO_IPV4, 0); | 512 | e = recent_entry_lookup(t, &addr, NFPROTO_IPV4, 0); |
513 | if (e == NULL) { | 513 | if (e == NULL) { |
514 | if (add) | 514 | if (add) |
515 | recent_entry_init(t, (const void *)&addr, | 515 | recent_entry_init(t, &addr, NFPROTO_IPV4, 0); |
516 | NFPROTO_IPV4, 0); | ||
517 | } else { | 516 | } else { |
518 | if (add) | 517 | if (add) |
519 | recent_entry_update(t, e); | 518 | recent_entry_update(t, e); |
diff --git a/net/sched/cls_api.c b/net/sched/cls_api.c index 173fcc4b050d..0759f32e9dca 100644 --- a/net/sched/cls_api.c +++ b/net/sched/cls_api.c | |||
@@ -254,7 +254,7 @@ replay: | |||
254 | } | 254 | } |
255 | tp->ops = tp_ops; | 255 | tp->ops = tp_ops; |
256 | tp->protocol = protocol; | 256 | tp->protocol = protocol; |
257 | tp->prio = nprio ? : tcf_auto_prio(*back); | 257 | tp->prio = nprio ? : TC_H_MAJ(tcf_auto_prio(*back)); |
258 | tp->q = q; | 258 | tp->q = q; |
259 | tp->classify = tp_ops->classify; | 259 | tp->classify = tp_ops->classify; |
260 | tp->classid = parent; | 260 | tp->classid = parent; |
diff --git a/net/sunrpc/xprt.c b/net/sunrpc/xprt.c index a0bfe53f1621..06ca058572f2 100644 --- a/net/sunrpc/xprt.c +++ b/net/sunrpc/xprt.c | |||
@@ -672,10 +672,8 @@ xprt_init_autodisconnect(unsigned long data) | |||
672 | if (test_and_set_bit(XPRT_LOCKED, &xprt->state)) | 672 | if (test_and_set_bit(XPRT_LOCKED, &xprt->state)) |
673 | goto out_abort; | 673 | goto out_abort; |
674 | spin_unlock(&xprt->transport_lock); | 674 | spin_unlock(&xprt->transport_lock); |
675 | if (xprt_connecting(xprt)) | 675 | set_bit(XPRT_CONNECTION_CLOSE, &xprt->state); |
676 | xprt_release_write(xprt, NULL); | 676 | queue_work(rpciod_workqueue, &xprt->task_cleanup); |
677 | else | ||
678 | queue_work(rpciod_workqueue, &xprt->task_cleanup); | ||
679 | return; | 677 | return; |
680 | out_abort: | 678 | out_abort: |
681 | spin_unlock(&xprt->transport_lock); | 679 | spin_unlock(&xprt->transport_lock); |
diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c index d40ff50887aa..e18596146013 100644 --- a/net/sunrpc/xprtsock.c +++ b/net/sunrpc/xprtsock.c | |||
@@ -807,6 +807,9 @@ static void xs_reset_transport(struct sock_xprt *transport) | |||
807 | * | 807 | * |
808 | * This is used when all requests are complete; ie, no DRC state remains | 808 | * This is used when all requests are complete; ie, no DRC state remains |
809 | * on the server we want to save. | 809 | * on the server we want to save. |
810 | * | ||
811 | * The caller _must_ be holding XPRT_LOCKED in order to avoid issues with | ||
812 | * xs_reset_transport() zeroing the socket from underneath a writer. | ||
810 | */ | 813 | */ |
811 | static void xs_close(struct rpc_xprt *xprt) | 814 | static void xs_close(struct rpc_xprt *xprt) |
812 | { | 815 | { |
@@ -824,6 +827,14 @@ static void xs_close(struct rpc_xprt *xprt) | |||
824 | xprt_disconnect_done(xprt); | 827 | xprt_disconnect_done(xprt); |
825 | } | 828 | } |
826 | 829 | ||
830 | static void xs_tcp_close(struct rpc_xprt *xprt) | ||
831 | { | ||
832 | if (test_and_clear_bit(XPRT_CONNECTION_CLOSE, &xprt->state)) | ||
833 | xs_close(xprt); | ||
834 | else | ||
835 | xs_tcp_shutdown(xprt); | ||
836 | } | ||
837 | |||
827 | /** | 838 | /** |
828 | * xs_destroy - prepare to shutdown a transport | 839 | * xs_destroy - prepare to shutdown a transport |
829 | * @xprt: doomed transport | 840 | * @xprt: doomed transport |
@@ -1772,6 +1783,15 @@ static void xs_tcp_setup_socket(struct rpc_xprt *xprt, | |||
1772 | xprt, -status, xprt_connected(xprt), | 1783 | xprt, -status, xprt_connected(xprt), |
1773 | sock->sk->sk_state); | 1784 | sock->sk->sk_state); |
1774 | switch (status) { | 1785 | switch (status) { |
1786 | default: | ||
1787 | printk("%s: connect returned unhandled error %d\n", | ||
1788 | __func__, status); | ||
1789 | case -EADDRNOTAVAIL: | ||
1790 | /* We're probably in TIME_WAIT. Get rid of existing socket, | ||
1791 | * and retry | ||
1792 | */ | ||
1793 | set_bit(XPRT_CONNECTION_CLOSE, &xprt->state); | ||
1794 | xprt_force_disconnect(xprt); | ||
1775 | case -ECONNREFUSED: | 1795 | case -ECONNREFUSED: |
1776 | case -ECONNRESET: | 1796 | case -ECONNRESET: |
1777 | case -ENETUNREACH: | 1797 | case -ENETUNREACH: |
@@ -1782,10 +1802,6 @@ static void xs_tcp_setup_socket(struct rpc_xprt *xprt, | |||
1782 | xprt_clear_connecting(xprt); | 1802 | xprt_clear_connecting(xprt); |
1783 | return; | 1803 | return; |
1784 | } | 1804 | } |
1785 | /* get rid of existing socket, and retry */ | ||
1786 | xs_tcp_shutdown(xprt); | ||
1787 | printk("%s: connect returned unhandled error %d\n", | ||
1788 | __func__, status); | ||
1789 | out_eagain: | 1805 | out_eagain: |
1790 | status = -EAGAIN; | 1806 | status = -EAGAIN; |
1791 | out: | 1807 | out: |
@@ -1994,7 +2010,7 @@ static struct rpc_xprt_ops xs_tcp_ops = { | |||
1994 | .buf_free = rpc_free, | 2010 | .buf_free = rpc_free, |
1995 | .send_request = xs_tcp_send_request, | 2011 | .send_request = xs_tcp_send_request, |
1996 | .set_retrans_timeout = xprt_set_retrans_timeout_def, | 2012 | .set_retrans_timeout = xprt_set_retrans_timeout_def, |
1997 | .close = xs_tcp_shutdown, | 2013 | .close = xs_tcp_close, |
1998 | .destroy = xs_destroy, | 2014 | .destroy = xs_destroy, |
1999 | .print_stats = xs_tcp_print_stats, | 2015 | .print_stats = xs_tcp_print_stats, |
2000 | }; | 2016 | }; |
diff --git a/net/xfrm/xfrm_state.c b/net/xfrm/xfrm_state.c index 82271720d970..5f1f86565f16 100644 --- a/net/xfrm/xfrm_state.c +++ b/net/xfrm/xfrm_state.c | |||
@@ -794,7 +794,7 @@ xfrm_state_find(xfrm_address_t *daddr, xfrm_address_t *saddr, | |||
794 | { | 794 | { |
795 | static xfrm_address_t saddr_wildcard = { }; | 795 | static xfrm_address_t saddr_wildcard = { }; |
796 | struct net *net = xp_net(pol); | 796 | struct net *net = xp_net(pol); |
797 | unsigned int h; | 797 | unsigned int h, h_wildcard; |
798 | struct hlist_node *entry; | 798 | struct hlist_node *entry; |
799 | struct xfrm_state *x, *x0, *to_put; | 799 | struct xfrm_state *x, *x0, *to_put; |
800 | int acquire_in_progress = 0; | 800 | int acquire_in_progress = 0; |
@@ -819,8 +819,8 @@ xfrm_state_find(xfrm_address_t *daddr, xfrm_address_t *saddr, | |||
819 | if (best) | 819 | if (best) |
820 | goto found; | 820 | goto found; |
821 | 821 | ||
822 | h = xfrm_dst_hash(net, daddr, &saddr_wildcard, tmpl->reqid, family); | 822 | h_wildcard = xfrm_dst_hash(net, daddr, &saddr_wildcard, tmpl->reqid, family); |
823 | hlist_for_each_entry(x, entry, net->xfrm.state_bydst+h, bydst) { | 823 | hlist_for_each_entry(x, entry, net->xfrm.state_bydst+h_wildcard, bydst) { |
824 | if (x->props.family == family && | 824 | if (x->props.family == family && |
825 | x->props.reqid == tmpl->reqid && | 825 | x->props.reqid == tmpl->reqid && |
826 | !(x->props.flags & XFRM_STATE_WILDRECV) && | 826 | !(x->props.flags & XFRM_STATE_WILDRECV) && |