diff options
Diffstat (limited to 'net')
47 files changed, 529 insertions, 560 deletions
diff --git a/net/8021q/vlan.c b/net/8021q/vlan.c index 2b7390e377b3..d1e10546eb85 100644 --- a/net/8021q/vlan.c +++ b/net/8021q/vlan.c | |||
@@ -492,6 +492,7 @@ static int vlan_device_event(struct notifier_block *unused, unsigned long event, | |||
492 | continue; | 492 | continue; |
493 | 493 | ||
494 | dev_change_flags(vlandev, flgs & ~IFF_UP); | 494 | dev_change_flags(vlandev, flgs & ~IFF_UP); |
495 | vlan_transfer_operstate(dev, vlandev); | ||
495 | } | 496 | } |
496 | break; | 497 | break; |
497 | 498 | ||
@@ -507,6 +508,7 @@ static int vlan_device_event(struct notifier_block *unused, unsigned long event, | |||
507 | continue; | 508 | continue; |
508 | 509 | ||
509 | dev_change_flags(vlandev, flgs | IFF_UP); | 510 | dev_change_flags(vlandev, flgs | IFF_UP); |
511 | vlan_transfer_operstate(dev, vlandev); | ||
510 | } | 512 | } |
511 | break; | 513 | break; |
512 | 514 | ||
diff --git a/net/8021q/vlan_dev.c b/net/8021q/vlan_dev.c index 6b0921364014..b4b9068e55a7 100644 --- a/net/8021q/vlan_dev.c +++ b/net/8021q/vlan_dev.c | |||
@@ -462,6 +462,7 @@ static int vlan_dev_open(struct net_device *dev) | |||
462 | if (vlan->flags & VLAN_FLAG_GVRP) | 462 | if (vlan->flags & VLAN_FLAG_GVRP) |
463 | vlan_gvrp_request_join(dev); | 463 | vlan_gvrp_request_join(dev); |
464 | 464 | ||
465 | netif_carrier_on(dev); | ||
465 | return 0; | 466 | return 0; |
466 | 467 | ||
467 | clear_allmulti: | 468 | clear_allmulti: |
@@ -471,6 +472,7 @@ del_unicast: | |||
471 | if (compare_ether_addr(dev->dev_addr, real_dev->dev_addr)) | 472 | if (compare_ether_addr(dev->dev_addr, real_dev->dev_addr)) |
472 | dev_unicast_delete(real_dev, dev->dev_addr, ETH_ALEN); | 473 | dev_unicast_delete(real_dev, dev->dev_addr, ETH_ALEN); |
473 | out: | 474 | out: |
475 | netif_carrier_off(dev); | ||
474 | return err; | 476 | return err; |
475 | } | 477 | } |
476 | 478 | ||
@@ -492,6 +494,7 @@ static int vlan_dev_stop(struct net_device *dev) | |||
492 | if (compare_ether_addr(dev->dev_addr, real_dev->dev_addr)) | 494 | if (compare_ether_addr(dev->dev_addr, real_dev->dev_addr)) |
493 | dev_unicast_delete(real_dev, dev->dev_addr, dev->addr_len); | 495 | dev_unicast_delete(real_dev, dev->dev_addr, dev->addr_len); |
494 | 496 | ||
497 | netif_carrier_off(dev); | ||
495 | return 0; | 498 | return 0; |
496 | } | 499 | } |
497 | 500 | ||
@@ -612,6 +615,8 @@ static int vlan_dev_init(struct net_device *dev) | |||
612 | struct net_device *real_dev = vlan_dev_info(dev)->real_dev; | 615 | struct net_device *real_dev = vlan_dev_info(dev)->real_dev; |
613 | int subclass = 0; | 616 | int subclass = 0; |
614 | 617 | ||
618 | netif_carrier_off(dev); | ||
619 | |||
615 | /* IFF_BROADCAST|IFF_MULTICAST; ??? */ | 620 | /* IFF_BROADCAST|IFF_MULTICAST; ??? */ |
616 | dev->flags = real_dev->flags & ~(IFF_UP | IFF_PROMISC | IFF_ALLMULTI); | 621 | dev->flags = real_dev->flags & ~(IFF_UP | IFF_PROMISC | IFF_ALLMULTI); |
617 | dev->iflink = real_dev->ifindex; | 622 | dev->iflink = real_dev->ifindex; |
diff --git a/net/9p/client.c b/net/9p/client.c index 1eb580c38fbb..dd43a8289b0d 100644 --- a/net/9p/client.c +++ b/net/9p/client.c | |||
@@ -203,7 +203,6 @@ static struct p9_req_t *p9_tag_alloc(struct p9_client *c, u16 tag) | |||
203 | p9pdu_reset(req->tc); | 203 | p9pdu_reset(req->tc); |
204 | p9pdu_reset(req->rc); | 204 | p9pdu_reset(req->rc); |
205 | 205 | ||
206 | req->flush_tag = 0; | ||
207 | req->tc->tag = tag-1; | 206 | req->tc->tag = tag-1; |
208 | req->status = REQ_STATUS_ALLOC; | 207 | req->status = REQ_STATUS_ALLOC; |
209 | 208 | ||
@@ -324,35 +323,9 @@ static void p9_free_req(struct p9_client *c, struct p9_req_t *r) | |||
324 | */ | 323 | */ |
325 | void p9_client_cb(struct p9_client *c, struct p9_req_t *req) | 324 | void p9_client_cb(struct p9_client *c, struct p9_req_t *req) |
326 | { | 325 | { |
327 | struct p9_req_t *other_req; | ||
328 | unsigned long flags; | ||
329 | |||
330 | P9_DPRINTK(P9_DEBUG_MUX, " tag %d\n", req->tc->tag); | 326 | P9_DPRINTK(P9_DEBUG_MUX, " tag %d\n", req->tc->tag); |
331 | 327 | wake_up(req->wq); | |
332 | if (req->status == REQ_STATUS_ERROR) | 328 | P9_DPRINTK(P9_DEBUG_MUX, "wakeup: %d\n", req->tc->tag); |
333 | wake_up(req->wq); | ||
334 | |||
335 | if (req->flush_tag) { /* flush receive path */ | ||
336 | P9_DPRINTK(P9_DEBUG_9P, "<<< RFLUSH %d\n", req->tc->tag); | ||
337 | spin_lock_irqsave(&c->lock, flags); | ||
338 | other_req = p9_tag_lookup(c, req->flush_tag); | ||
339 | if (other_req->status != REQ_STATUS_FLSH) /* stale flush */ | ||
340 | spin_unlock_irqrestore(&c->lock, flags); | ||
341 | else { | ||
342 | other_req->status = REQ_STATUS_FLSHD; | ||
343 | spin_unlock_irqrestore(&c->lock, flags); | ||
344 | wake_up(other_req->wq); | ||
345 | } | ||
346 | p9_free_req(c, req); | ||
347 | } else { /* normal receive path */ | ||
348 | P9_DPRINTK(P9_DEBUG_MUX, "normal: tag %d\n", req->tc->tag); | ||
349 | spin_lock_irqsave(&c->lock, flags); | ||
350 | if (req->status != REQ_STATUS_FLSHD) | ||
351 | req->status = REQ_STATUS_RCVD; | ||
352 | spin_unlock_irqrestore(&c->lock, flags); | ||
353 | wake_up(req->wq); | ||
354 | P9_DPRINTK(P9_DEBUG_MUX, "wakeup: %d\n", req->tc->tag); | ||
355 | } | ||
356 | } | 329 | } |
357 | EXPORT_SYMBOL(p9_client_cb); | 330 | EXPORT_SYMBOL(p9_client_cb); |
358 | 331 | ||
@@ -486,9 +459,15 @@ static int p9_client_flush(struct p9_client *c, struct p9_req_t *oldreq) | |||
486 | if (IS_ERR(req)) | 459 | if (IS_ERR(req)) |
487 | return PTR_ERR(req); | 460 | return PTR_ERR(req); |
488 | 461 | ||
489 | req->flush_tag = oldtag; | ||
490 | 462 | ||
491 | /* we don't free anything here because RPC isn't complete */ | 463 | /* if we haven't received a response for oldreq, |
464 | remove it from the list. */ | ||
465 | spin_lock(&c->lock); | ||
466 | if (oldreq->status == REQ_STATUS_FLSH) | ||
467 | list_del(&oldreq->req_list); | ||
468 | spin_unlock(&c->lock); | ||
469 | |||
470 | p9_free_req(c, req); | ||
492 | return 0; | 471 | return 0; |
493 | } | 472 | } |
494 | 473 | ||
@@ -509,7 +488,6 @@ p9_client_rpc(struct p9_client *c, int8_t type, const char *fmt, ...) | |||
509 | struct p9_req_t *req; | 488 | struct p9_req_t *req; |
510 | unsigned long flags; | 489 | unsigned long flags; |
511 | int sigpending; | 490 | int sigpending; |
512 | int flushed = 0; | ||
513 | 491 | ||
514 | P9_DPRINTK(P9_DEBUG_MUX, "client %p op %d\n", c, type); | 492 | P9_DPRINTK(P9_DEBUG_MUX, "client %p op %d\n", c, type); |
515 | 493 | ||
@@ -546,42 +524,28 @@ p9_client_rpc(struct p9_client *c, int8_t type, const char *fmt, ...) | |||
546 | goto reterr; | 524 | goto reterr; |
547 | } | 525 | } |
548 | 526 | ||
549 | /* if it was a flush we just transmitted, return our tag */ | ||
550 | if (type == P9_TFLUSH) | ||
551 | return req; | ||
552 | again: | ||
553 | P9_DPRINTK(P9_DEBUG_MUX, "wait %p tag: %d\n", req->wq, tag); | 527 | P9_DPRINTK(P9_DEBUG_MUX, "wait %p tag: %d\n", req->wq, tag); |
554 | err = wait_event_interruptible(*req->wq, | 528 | err = wait_event_interruptible(*req->wq, |
555 | req->status >= REQ_STATUS_RCVD); | 529 | req->status >= REQ_STATUS_RCVD); |
556 | P9_DPRINTK(P9_DEBUG_MUX, "wait %p tag: %d returned %d (flushed=%d)\n", | 530 | P9_DPRINTK(P9_DEBUG_MUX, "wait %p tag: %d returned %d\n", |
557 | req->wq, tag, err, flushed); | 531 | req->wq, tag, err); |
558 | 532 | ||
559 | if (req->status == REQ_STATUS_ERROR) { | 533 | if (req->status == REQ_STATUS_ERROR) { |
560 | P9_DPRINTK(P9_DEBUG_ERROR, "req_status error %d\n", req->t_err); | 534 | P9_DPRINTK(P9_DEBUG_ERROR, "req_status error %d\n", req->t_err); |
561 | err = req->t_err; | 535 | err = req->t_err; |
562 | } else if (err == -ERESTARTSYS && flushed) { | ||
563 | P9_DPRINTK(P9_DEBUG_MUX, "flushed - going again\n"); | ||
564 | goto again; | ||
565 | } else if (req->status == REQ_STATUS_FLSHD) { | ||
566 | P9_DPRINTK(P9_DEBUG_MUX, "flushed - erestartsys\n"); | ||
567 | err = -ERESTARTSYS; | ||
568 | } | 536 | } |
569 | 537 | ||
570 | if ((err == -ERESTARTSYS) && (c->status == Connected) && (!flushed)) { | 538 | if ((err == -ERESTARTSYS) && (c->status == Connected)) { |
571 | P9_DPRINTK(P9_DEBUG_MUX, "flushing\n"); | 539 | P9_DPRINTK(P9_DEBUG_MUX, "flushing\n"); |
572 | spin_lock_irqsave(&c->lock, flags); | ||
573 | if (req->status == REQ_STATUS_SENT) | ||
574 | req->status = REQ_STATUS_FLSH; | ||
575 | spin_unlock_irqrestore(&c->lock, flags); | ||
576 | sigpending = 1; | 540 | sigpending = 1; |
577 | flushed = 1; | ||
578 | clear_thread_flag(TIF_SIGPENDING); | 541 | clear_thread_flag(TIF_SIGPENDING); |
579 | 542 | ||
580 | if (c->trans_mod->cancel(c, req)) { | 543 | if (c->trans_mod->cancel(c, req)) |
581 | err = p9_client_flush(c, req); | 544 | p9_client_flush(c, req); |
582 | if (err == 0) | 545 | |
583 | goto again; | 546 | /* if we received the response anyway, don't signal error */ |
584 | } | 547 | if (req->status == REQ_STATUS_RCVD) |
548 | err = 0; | ||
585 | } | 549 | } |
586 | 550 | ||
587 | if (sigpending) { | 551 | if (sigpending) { |
@@ -1244,19 +1208,53 @@ struct p9_wstat *p9_client_stat(struct p9_fid *fid) | |||
1244 | ret->name, ret->uid, ret->gid, ret->muid, ret->extension, | 1208 | ret->name, ret->uid, ret->gid, ret->muid, ret->extension, |
1245 | ret->n_uid, ret->n_gid, ret->n_muid); | 1209 | ret->n_uid, ret->n_gid, ret->n_muid); |
1246 | 1210 | ||
1211 | p9_free_req(clnt, req); | ||
1212 | return ret; | ||
1213 | |||
1247 | free_and_error: | 1214 | free_and_error: |
1248 | p9_free_req(clnt, req); | 1215 | p9_free_req(clnt, req); |
1249 | error: | 1216 | error: |
1250 | return ret; | 1217 | kfree(ret); |
1218 | return ERR_PTR(err); | ||
1251 | } | 1219 | } |
1252 | EXPORT_SYMBOL(p9_client_stat); | 1220 | EXPORT_SYMBOL(p9_client_stat); |
1253 | 1221 | ||
1222 | static int p9_client_statsize(struct p9_wstat *wst, int optional) | ||
1223 | { | ||
1224 | int ret; | ||
1225 | |||
1226 | /* size[2] type[2] dev[4] qid[13] */ | ||
1227 | /* mode[4] atime[4] mtime[4] length[8]*/ | ||
1228 | /* name[s] uid[s] gid[s] muid[s] */ | ||
1229 | ret = 2+2+4+13+4+4+4+8+2+2+2+2; | ||
1230 | |||
1231 | if (wst->name) | ||
1232 | ret += strlen(wst->name); | ||
1233 | if (wst->uid) | ||
1234 | ret += strlen(wst->uid); | ||
1235 | if (wst->gid) | ||
1236 | ret += strlen(wst->gid); | ||
1237 | if (wst->muid) | ||
1238 | ret += strlen(wst->muid); | ||
1239 | |||
1240 | if (optional) { | ||
1241 | ret += 2+4+4+4; /* extension[s] n_uid[4] n_gid[4] n_muid[4] */ | ||
1242 | if (wst->extension) | ||
1243 | ret += strlen(wst->extension); | ||
1244 | } | ||
1245 | |||
1246 | return ret; | ||
1247 | } | ||
1248 | |||
1254 | int p9_client_wstat(struct p9_fid *fid, struct p9_wstat *wst) | 1249 | int p9_client_wstat(struct p9_fid *fid, struct p9_wstat *wst) |
1255 | { | 1250 | { |
1256 | int err; | 1251 | int err; |
1257 | struct p9_req_t *req; | 1252 | struct p9_req_t *req; |
1258 | struct p9_client *clnt; | 1253 | struct p9_client *clnt; |
1259 | 1254 | ||
1255 | err = 0; | ||
1256 | clnt = fid->clnt; | ||
1257 | wst->size = p9_client_statsize(wst, clnt->dotu); | ||
1260 | P9_DPRINTK(P9_DEBUG_9P, ">>> TWSTAT fid %d\n", fid->fid); | 1258 | P9_DPRINTK(P9_DEBUG_9P, ">>> TWSTAT fid %d\n", fid->fid); |
1261 | P9_DPRINTK(P9_DEBUG_9P, | 1259 | P9_DPRINTK(P9_DEBUG_9P, |
1262 | " sz=%x type=%x dev=%x qid=%x.%llx.%x\n" | 1260 | " sz=%x type=%x dev=%x qid=%x.%llx.%x\n" |
@@ -1268,10 +1266,8 @@ int p9_client_wstat(struct p9_fid *fid, struct p9_wstat *wst) | |||
1268 | wst->atime, wst->mtime, (unsigned long long)wst->length, | 1266 | wst->atime, wst->mtime, (unsigned long long)wst->length, |
1269 | wst->name, wst->uid, wst->gid, wst->muid, wst->extension, | 1267 | wst->name, wst->uid, wst->gid, wst->muid, wst->extension, |
1270 | wst->n_uid, wst->n_gid, wst->n_muid); | 1268 | wst->n_uid, wst->n_gid, wst->n_muid); |
1271 | err = 0; | ||
1272 | clnt = fid->clnt; | ||
1273 | 1269 | ||
1274 | req = p9_client_rpc(clnt, P9_TWSTAT, "dwS", fid->fid, 0, wst); | 1270 | req = p9_client_rpc(clnt, P9_TWSTAT, "dwS", fid->fid, wst->size, wst); |
1275 | if (IS_ERR(req)) { | 1271 | if (IS_ERR(req)) { |
1276 | err = PTR_ERR(req); | 1272 | err = PTR_ERR(req); |
1277 | goto error; | 1273 | goto error; |
diff --git a/net/9p/trans_fd.c b/net/9p/trans_fd.c index c613ed08a5ee..a2a1814c7a8d 100644 --- a/net/9p/trans_fd.c +++ b/net/9p/trans_fd.c | |||
@@ -213,8 +213,8 @@ static void p9_conn_cancel(struct p9_conn *m, int err) | |||
213 | spin_unlock_irqrestore(&m->client->lock, flags); | 213 | spin_unlock_irqrestore(&m->client->lock, flags); |
214 | 214 | ||
215 | list_for_each_entry_safe(req, rtmp, &cancel_list, req_list) { | 215 | list_for_each_entry_safe(req, rtmp, &cancel_list, req_list) { |
216 | list_del(&req->req_list); | ||
217 | P9_DPRINTK(P9_DEBUG_ERROR, "call back req %p\n", req); | 216 | P9_DPRINTK(P9_DEBUG_ERROR, "call back req %p\n", req); |
217 | list_del(&req->req_list); | ||
218 | p9_client_cb(m->client, req); | 218 | p9_client_cb(m->client, req); |
219 | } | 219 | } |
220 | } | 220 | } |
@@ -336,7 +336,8 @@ static void p9_read_work(struct work_struct *work) | |||
336 | "mux %p pkt: size: %d bytes tag: %d\n", m, n, tag); | 336 | "mux %p pkt: size: %d bytes tag: %d\n", m, n, tag); |
337 | 337 | ||
338 | m->req = p9_tag_lookup(m->client, tag); | 338 | m->req = p9_tag_lookup(m->client, tag); |
339 | if (!m->req) { | 339 | if (!m->req || (m->req->status != REQ_STATUS_SENT && |
340 | m->req->status != REQ_STATUS_FLSH)) { | ||
340 | P9_DPRINTK(P9_DEBUG_ERROR, "Unexpected packet tag %d\n", | 341 | P9_DPRINTK(P9_DEBUG_ERROR, "Unexpected packet tag %d\n", |
341 | tag); | 342 | tag); |
342 | err = -EIO; | 343 | err = -EIO; |
@@ -361,10 +362,11 @@ static void p9_read_work(struct work_struct *work) | |||
361 | if ((m->req) && (m->rpos == m->rsize)) { /* packet is read in */ | 362 | if ((m->req) && (m->rpos == m->rsize)) { /* packet is read in */ |
362 | P9_DPRINTK(P9_DEBUG_TRANS, "got new packet\n"); | 363 | P9_DPRINTK(P9_DEBUG_TRANS, "got new packet\n"); |
363 | spin_lock(&m->client->lock); | 364 | spin_lock(&m->client->lock); |
365 | if (m->req->status != REQ_STATUS_ERROR) | ||
366 | m->req->status = REQ_STATUS_RCVD; | ||
364 | list_del(&m->req->req_list); | 367 | list_del(&m->req->req_list); |
365 | spin_unlock(&m->client->lock); | 368 | spin_unlock(&m->client->lock); |
366 | p9_client_cb(m->client, m->req); | 369 | p9_client_cb(m->client, m->req); |
367 | |||
368 | m->rbuf = NULL; | 370 | m->rbuf = NULL; |
369 | m->rpos = 0; | 371 | m->rpos = 0; |
370 | m->rsize = 0; | 372 | m->rsize = 0; |
@@ -454,6 +456,7 @@ static void p9_write_work(struct work_struct *work) | |||
454 | req = list_entry(m->unsent_req_list.next, struct p9_req_t, | 456 | req = list_entry(m->unsent_req_list.next, struct p9_req_t, |
455 | req_list); | 457 | req_list); |
456 | req->status = REQ_STATUS_SENT; | 458 | req->status = REQ_STATUS_SENT; |
459 | P9_DPRINTK(P9_DEBUG_TRANS, "move req %p\n", req); | ||
457 | list_move_tail(&req->req_list, &m->req_list); | 460 | list_move_tail(&req->req_list, &m->req_list); |
458 | 461 | ||
459 | m->wbuf = req->tc->sdata; | 462 | m->wbuf = req->tc->sdata; |
@@ -683,12 +686,13 @@ static int p9_fd_cancel(struct p9_client *client, struct p9_req_t *req) | |||
683 | P9_DPRINTK(P9_DEBUG_TRANS, "client %p req %p\n", client, req); | 686 | P9_DPRINTK(P9_DEBUG_TRANS, "client %p req %p\n", client, req); |
684 | 687 | ||
685 | spin_lock(&client->lock); | 688 | spin_lock(&client->lock); |
686 | list_del(&req->req_list); | ||
687 | 689 | ||
688 | if (req->status == REQ_STATUS_UNSENT) { | 690 | if (req->status == REQ_STATUS_UNSENT) { |
691 | list_del(&req->req_list); | ||
689 | req->status = REQ_STATUS_FLSHD; | 692 | req->status = REQ_STATUS_FLSHD; |
690 | ret = 0; | 693 | ret = 0; |
691 | } | 694 | } else if (req->status == REQ_STATUS_SENT) |
695 | req->status = REQ_STATUS_FLSH; | ||
692 | 696 | ||
693 | spin_unlock(&client->lock); | 697 | spin_unlock(&client->lock); |
694 | 698 | ||
diff --git a/net/9p/trans_rdma.c b/net/9p/trans_rdma.c index 7fa0eb20b2f6..ac4990041ebb 100644 --- a/net/9p/trans_rdma.c +++ b/net/9p/trans_rdma.c | |||
@@ -295,6 +295,7 @@ handle_recv(struct p9_client *client, struct p9_trans_rdma *rdma, | |||
295 | goto err_out; | 295 | goto err_out; |
296 | 296 | ||
297 | req->rc = c->rc; | 297 | req->rc = c->rc; |
298 | req->status = REQ_STATUS_RCVD; | ||
298 | p9_client_cb(client, req); | 299 | p9_client_cb(client, req); |
299 | 300 | ||
300 | return; | 301 | return; |
diff --git a/net/9p/trans_virtio.c b/net/9p/trans_virtio.c index 2d7781ec663b..bb8579a141a8 100644 --- a/net/9p/trans_virtio.c +++ b/net/9p/trans_virtio.c | |||
@@ -134,6 +134,7 @@ static void req_done(struct virtqueue *vq) | |||
134 | P9_DPRINTK(P9_DEBUG_TRANS, ": rc %p\n", rc); | 134 | P9_DPRINTK(P9_DEBUG_TRANS, ": rc %p\n", rc); |
135 | P9_DPRINTK(P9_DEBUG_TRANS, ": lookup tag %d\n", rc->tag); | 135 | P9_DPRINTK(P9_DEBUG_TRANS, ": lookup tag %d\n", rc->tag); |
136 | req = p9_tag_lookup(chan->client, rc->tag); | 136 | req = p9_tag_lookup(chan->client, rc->tag); |
137 | req->status = REQ_STATUS_RCVD; | ||
137 | p9_client_cb(chan->client, req); | 138 | p9_client_cb(chan->client, req); |
138 | } | 139 | } |
139 | } | 140 | } |
diff --git a/net/Kconfig b/net/Kconfig index ce77db4fcec8..c19f549c8e74 100644 --- a/net/Kconfig +++ b/net/Kconfig | |||
@@ -119,12 +119,6 @@ menuconfig NETFILTER | |||
119 | <file:Documentation/Changes> under "iptables" for the location of | 119 | <file:Documentation/Changes> under "iptables" for the location of |
120 | these packages. | 120 | these packages. |
121 | 121 | ||
122 | Make sure to say N to "Fast switching" below if you intend to say Y | ||
123 | here, as Fast switching currently bypasses netfilter. | ||
124 | |||
125 | Chances are that you should say Y here if you compile a kernel which | ||
126 | will run as a router and N for regular hosts. If unsure, say N. | ||
127 | |||
128 | if NETFILTER | 122 | if NETFILTER |
129 | 123 | ||
130 | config NETFILTER_DEBUG | 124 | config NETFILTER_DEBUG |
diff --git a/net/atm/br2684.c b/net/atm/br2684.c index 334fcd4a4ea4..3100a8940afc 100644 --- a/net/atm/br2684.c +++ b/net/atm/br2684.c | |||
@@ -549,6 +549,7 @@ static void br2684_setup(struct net_device *netdev) | |||
549 | struct br2684_dev *brdev = BRPRIV(netdev); | 549 | struct br2684_dev *brdev = BRPRIV(netdev); |
550 | 550 | ||
551 | ether_setup(netdev); | 551 | ether_setup(netdev); |
552 | brdev->net_dev = netdev; | ||
552 | 553 | ||
553 | netdev->netdev_ops = &br2684_netdev_ops; | 554 | netdev->netdev_ops = &br2684_netdev_ops; |
554 | 555 | ||
diff --git a/net/bluetooth/hci_conn.c b/net/bluetooth/hci_conn.c index 1181db08d9de..fa47d5d84f5c 100644 --- a/net/bluetooth/hci_conn.c +++ b/net/bluetooth/hci_conn.c | |||
@@ -171,10 +171,8 @@ static void hci_conn_timeout(unsigned long arg) | |||
171 | switch (conn->state) { | 171 | switch (conn->state) { |
172 | case BT_CONNECT: | 172 | case BT_CONNECT: |
173 | case BT_CONNECT2: | 173 | case BT_CONNECT2: |
174 | if (conn->type == ACL_LINK) | 174 | if (conn->type == ACL_LINK && conn->out) |
175 | hci_acl_connect_cancel(conn); | 175 | hci_acl_connect_cancel(conn); |
176 | else | ||
177 | hci_acl_disconn(conn, 0x13); | ||
178 | break; | 176 | break; |
179 | case BT_CONFIG: | 177 | case BT_CONFIG: |
180 | case BT_CONNECTED: | 178 | case BT_CONNECTED: |
@@ -215,6 +213,7 @@ struct hci_conn *hci_conn_add(struct hci_dev *hdev, int type, bdaddr_t *dst) | |||
215 | conn->state = BT_OPEN; | 213 | conn->state = BT_OPEN; |
216 | 214 | ||
217 | conn->power_save = 1; | 215 | conn->power_save = 1; |
216 | conn->disc_timeout = HCI_DISCONN_TIMEOUT; | ||
218 | 217 | ||
219 | switch (type) { | 218 | switch (type) { |
220 | case ACL_LINK: | 219 | case ACL_LINK: |
@@ -247,6 +246,8 @@ struct hci_conn *hci_conn_add(struct hci_dev *hdev, int type, bdaddr_t *dst) | |||
247 | if (hdev->notify) | 246 | if (hdev->notify) |
248 | hdev->notify(hdev, HCI_NOTIFY_CONN_ADD); | 247 | hdev->notify(hdev, HCI_NOTIFY_CONN_ADD); |
249 | 248 | ||
249 | hci_conn_init_sysfs(conn); | ||
250 | |||
250 | tasklet_enable(&hdev->tx_task); | 251 | tasklet_enable(&hdev->tx_task); |
251 | 252 | ||
252 | return conn; | 253 | return conn; |
@@ -289,6 +290,8 @@ int hci_conn_del(struct hci_conn *conn) | |||
289 | 290 | ||
290 | hci_conn_del_sysfs(conn); | 291 | hci_conn_del_sysfs(conn); |
291 | 292 | ||
293 | hci_dev_put(hdev); | ||
294 | |||
292 | return 0; | 295 | return 0; |
293 | } | 296 | } |
294 | 297 | ||
@@ -424,12 +427,9 @@ int hci_conn_security(struct hci_conn *conn, __u8 sec_level, __u8 auth_type) | |||
424 | if (sec_level == BT_SECURITY_SDP) | 427 | if (sec_level == BT_SECURITY_SDP) |
425 | return 1; | 428 | return 1; |
426 | 429 | ||
427 | if (sec_level == BT_SECURITY_LOW) { | 430 | if (sec_level == BT_SECURITY_LOW && |
428 | if (conn->ssp_mode > 0 && conn->hdev->ssp_mode > 0) | 431 | (!conn->ssp_mode || !conn->hdev->ssp_mode)) |
429 | return hci_conn_auth(conn, sec_level, auth_type); | 432 | return 1; |
430 | else | ||
431 | return 1; | ||
432 | } | ||
433 | 433 | ||
434 | if (conn->link_mode & HCI_LM_ENCRYPT) | 434 | if (conn->link_mode & HCI_LM_ENCRYPT) |
435 | return hci_conn_auth(conn, sec_level, auth_type); | 435 | return hci_conn_auth(conn, sec_level, auth_type); |
diff --git a/net/bluetooth/hci_event.c b/net/bluetooth/hci_event.c index 15f40ea8d544..184ba0a88ec0 100644 --- a/net/bluetooth/hci_event.c +++ b/net/bluetooth/hci_event.c | |||
@@ -883,6 +883,7 @@ static inline void hci_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *s | |||
883 | if (conn->type == ACL_LINK) { | 883 | if (conn->type == ACL_LINK) { |
884 | conn->state = BT_CONFIG; | 884 | conn->state = BT_CONFIG; |
885 | hci_conn_hold(conn); | 885 | hci_conn_hold(conn); |
886 | conn->disc_timeout = HCI_DISCONN_TIMEOUT; | ||
886 | } else | 887 | } else |
887 | conn->state = BT_CONNECTED; | 888 | conn->state = BT_CONNECTED; |
888 | 889 | ||
@@ -1063,9 +1064,14 @@ static inline void hci_auth_complete_evt(struct hci_dev *hdev, struct sk_buff *s | |||
1063 | hci_proto_connect_cfm(conn, ev->status); | 1064 | hci_proto_connect_cfm(conn, ev->status); |
1064 | hci_conn_put(conn); | 1065 | hci_conn_put(conn); |
1065 | } | 1066 | } |
1066 | } else | 1067 | } else { |
1067 | hci_auth_cfm(conn, ev->status); | 1068 | hci_auth_cfm(conn, ev->status); |
1068 | 1069 | ||
1070 | hci_conn_hold(conn); | ||
1071 | conn->disc_timeout = HCI_DISCONN_TIMEOUT; | ||
1072 | hci_conn_put(conn); | ||
1073 | } | ||
1074 | |||
1069 | if (test_bit(HCI_CONN_ENCRYPT_PEND, &conn->pend)) { | 1075 | if (test_bit(HCI_CONN_ENCRYPT_PEND, &conn->pend)) { |
1070 | if (!ev->status) { | 1076 | if (!ev->status) { |
1071 | struct hci_cp_set_conn_encrypt cp; | 1077 | struct hci_cp_set_conn_encrypt cp; |
@@ -1479,7 +1485,21 @@ static inline void hci_mode_change_evt(struct hci_dev *hdev, struct sk_buff *skb | |||
1479 | 1485 | ||
1480 | static inline void hci_pin_code_request_evt(struct hci_dev *hdev, struct sk_buff *skb) | 1486 | static inline void hci_pin_code_request_evt(struct hci_dev *hdev, struct sk_buff *skb) |
1481 | { | 1487 | { |
1488 | struct hci_ev_pin_code_req *ev = (void *) skb->data; | ||
1489 | struct hci_conn *conn; | ||
1490 | |||
1482 | BT_DBG("%s", hdev->name); | 1491 | BT_DBG("%s", hdev->name); |
1492 | |||
1493 | hci_dev_lock(hdev); | ||
1494 | |||
1495 | conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr); | ||
1496 | if (conn && conn->state == BT_CONNECTED) { | ||
1497 | hci_conn_hold(conn); | ||
1498 | conn->disc_timeout = HCI_PAIRING_TIMEOUT; | ||
1499 | hci_conn_put(conn); | ||
1500 | } | ||
1501 | |||
1502 | hci_dev_unlock(hdev); | ||
1483 | } | 1503 | } |
1484 | 1504 | ||
1485 | static inline void hci_link_key_request_evt(struct hci_dev *hdev, struct sk_buff *skb) | 1505 | static inline void hci_link_key_request_evt(struct hci_dev *hdev, struct sk_buff *skb) |
@@ -1489,7 +1509,21 @@ static inline void hci_link_key_request_evt(struct hci_dev *hdev, struct sk_buff | |||
1489 | 1509 | ||
1490 | static inline void hci_link_key_notify_evt(struct hci_dev *hdev, struct sk_buff *skb) | 1510 | static inline void hci_link_key_notify_evt(struct hci_dev *hdev, struct sk_buff *skb) |
1491 | { | 1511 | { |
1512 | struct hci_ev_link_key_notify *ev = (void *) skb->data; | ||
1513 | struct hci_conn *conn; | ||
1514 | |||
1492 | BT_DBG("%s", hdev->name); | 1515 | BT_DBG("%s", hdev->name); |
1516 | |||
1517 | hci_dev_lock(hdev); | ||
1518 | |||
1519 | conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr); | ||
1520 | if (conn) { | ||
1521 | hci_conn_hold(conn); | ||
1522 | conn->disc_timeout = HCI_DISCONN_TIMEOUT; | ||
1523 | hci_conn_put(conn); | ||
1524 | } | ||
1525 | |||
1526 | hci_dev_unlock(hdev); | ||
1493 | } | 1527 | } |
1494 | 1528 | ||
1495 | static inline void hci_clock_offset_evt(struct hci_dev *hdev, struct sk_buff *skb) | 1529 | static inline void hci_clock_offset_evt(struct hci_dev *hdev, struct sk_buff *skb) |
diff --git a/net/bluetooth/hci_sysfs.c b/net/bluetooth/hci_sysfs.c index ed82796d4a0f..4cc3624bd22d 100644 --- a/net/bluetooth/hci_sysfs.c +++ b/net/bluetooth/hci_sysfs.c | |||
@@ -9,8 +9,7 @@ | |||
9 | struct class *bt_class = NULL; | 9 | struct class *bt_class = NULL; |
10 | EXPORT_SYMBOL_GPL(bt_class); | 10 | EXPORT_SYMBOL_GPL(bt_class); |
11 | 11 | ||
12 | static struct workqueue_struct *btaddconn; | 12 | static struct workqueue_struct *bt_workq; |
13 | static struct workqueue_struct *btdelconn; | ||
14 | 13 | ||
15 | static inline char *link_typetostr(int type) | 14 | static inline char *link_typetostr(int type) |
16 | { | 15 | { |
@@ -88,35 +87,20 @@ static struct device_type bt_link = { | |||
88 | 87 | ||
89 | static void add_conn(struct work_struct *work) | 88 | static void add_conn(struct work_struct *work) |
90 | { | 89 | { |
91 | struct hci_conn *conn = container_of(work, struct hci_conn, work); | 90 | struct hci_conn *conn = container_of(work, struct hci_conn, work_add); |
91 | struct hci_dev *hdev = conn->hdev; | ||
92 | |||
93 | /* ensure previous del is complete */ | ||
94 | flush_work(&conn->work_del); | ||
92 | 95 | ||
93 | flush_workqueue(btdelconn); | 96 | dev_set_name(&conn->dev, "%s:%d", hdev->name, conn->handle); |
94 | 97 | ||
95 | if (device_add(&conn->dev) < 0) { | 98 | if (device_add(&conn->dev) < 0) { |
96 | BT_ERR("Failed to register connection device"); | 99 | BT_ERR("Failed to register connection device"); |
97 | return; | 100 | return; |
98 | } | 101 | } |
99 | } | ||
100 | |||
101 | void hci_conn_add_sysfs(struct hci_conn *conn) | ||
102 | { | ||
103 | struct hci_dev *hdev = conn->hdev; | ||
104 | |||
105 | BT_DBG("conn %p", conn); | ||
106 | |||
107 | conn->dev.type = &bt_link; | ||
108 | conn->dev.class = bt_class; | ||
109 | conn->dev.parent = &hdev->dev; | ||
110 | |||
111 | dev_set_name(&conn->dev, "%s:%d", hdev->name, conn->handle); | ||
112 | |||
113 | dev_set_drvdata(&conn->dev, conn); | ||
114 | |||
115 | device_initialize(&conn->dev); | ||
116 | 102 | ||
117 | INIT_WORK(&conn->work, add_conn); | 103 | hci_dev_hold(hdev); |
118 | |||
119 | queue_work(btaddconn, &conn->work); | ||
120 | } | 104 | } |
121 | 105 | ||
122 | /* | 106 | /* |
@@ -131,9 +115,15 @@ static int __match_tty(struct device *dev, void *data) | |||
131 | 115 | ||
132 | static void del_conn(struct work_struct *work) | 116 | static void del_conn(struct work_struct *work) |
133 | { | 117 | { |
134 | struct hci_conn *conn = container_of(work, struct hci_conn, work); | 118 | struct hci_conn *conn = container_of(work, struct hci_conn, work_del); |
135 | struct hci_dev *hdev = conn->hdev; | 119 | struct hci_dev *hdev = conn->hdev; |
136 | 120 | ||
121 | /* ensure previous add is complete */ | ||
122 | flush_work(&conn->work_add); | ||
123 | |||
124 | if (!device_is_registered(&conn->dev)) | ||
125 | return; | ||
126 | |||
137 | while (1) { | 127 | while (1) { |
138 | struct device *dev; | 128 | struct device *dev; |
139 | 129 | ||
@@ -146,19 +136,40 @@ static void del_conn(struct work_struct *work) | |||
146 | 136 | ||
147 | device_del(&conn->dev); | 137 | device_del(&conn->dev); |
148 | put_device(&conn->dev); | 138 | put_device(&conn->dev); |
139 | |||
149 | hci_dev_put(hdev); | 140 | hci_dev_put(hdev); |
150 | } | 141 | } |
151 | 142 | ||
152 | void hci_conn_del_sysfs(struct hci_conn *conn) | 143 | void hci_conn_init_sysfs(struct hci_conn *conn) |
153 | { | 144 | { |
145 | struct hci_dev *hdev = conn->hdev; | ||
146 | |||
154 | BT_DBG("conn %p", conn); | 147 | BT_DBG("conn %p", conn); |
155 | 148 | ||
156 | if (!device_is_registered(&conn->dev)) | 149 | conn->dev.type = &bt_link; |
157 | return; | 150 | conn->dev.class = bt_class; |
151 | conn->dev.parent = &hdev->dev; | ||
152 | |||
153 | dev_set_drvdata(&conn->dev, conn); | ||
154 | |||
155 | device_initialize(&conn->dev); | ||
156 | |||
157 | INIT_WORK(&conn->work_add, add_conn); | ||
158 | INIT_WORK(&conn->work_del, del_conn); | ||
159 | } | ||
158 | 160 | ||
159 | INIT_WORK(&conn->work, del_conn); | 161 | void hci_conn_add_sysfs(struct hci_conn *conn) |
162 | { | ||
163 | BT_DBG("conn %p", conn); | ||
160 | 164 | ||
161 | queue_work(btdelconn, &conn->work); | 165 | queue_work(bt_workq, &conn->work_add); |
166 | } | ||
167 | |||
168 | void hci_conn_del_sysfs(struct hci_conn *conn) | ||
169 | { | ||
170 | BT_DBG("conn %p", conn); | ||
171 | |||
172 | queue_work(bt_workq, &conn->work_del); | ||
162 | } | 173 | } |
163 | 174 | ||
164 | static inline char *host_typetostr(int type) | 175 | static inline char *host_typetostr(int type) |
@@ -435,20 +446,13 @@ void hci_unregister_sysfs(struct hci_dev *hdev) | |||
435 | 446 | ||
436 | int __init bt_sysfs_init(void) | 447 | int __init bt_sysfs_init(void) |
437 | { | 448 | { |
438 | btaddconn = create_singlethread_workqueue("btaddconn"); | 449 | bt_workq = create_singlethread_workqueue("bluetooth"); |
439 | if (!btaddconn) | 450 | if (!bt_workq) |
440 | return -ENOMEM; | 451 | return -ENOMEM; |
441 | 452 | ||
442 | btdelconn = create_singlethread_workqueue("btdelconn"); | ||
443 | if (!btdelconn) { | ||
444 | destroy_workqueue(btaddconn); | ||
445 | return -ENOMEM; | ||
446 | } | ||
447 | |||
448 | bt_class = class_create(THIS_MODULE, "bluetooth"); | 453 | bt_class = class_create(THIS_MODULE, "bluetooth"); |
449 | if (IS_ERR(bt_class)) { | 454 | if (IS_ERR(bt_class)) { |
450 | destroy_workqueue(btdelconn); | 455 | destroy_workqueue(bt_workq); |
451 | destroy_workqueue(btaddconn); | ||
452 | return PTR_ERR(bt_class); | 456 | return PTR_ERR(bt_class); |
453 | } | 457 | } |
454 | 458 | ||
@@ -457,8 +461,7 @@ int __init bt_sysfs_init(void) | |||
457 | 461 | ||
458 | void bt_sysfs_cleanup(void) | 462 | void bt_sysfs_cleanup(void) |
459 | { | 463 | { |
460 | destroy_workqueue(btaddconn); | 464 | destroy_workqueue(bt_workq); |
461 | destroy_workqueue(btdelconn); | ||
462 | 465 | ||
463 | class_destroy(bt_class); | 466 | class_destroy(bt_class); |
464 | } | 467 | } |
diff --git a/net/bridge/br_netfilter.c b/net/bridge/br_netfilter.c index 3953ac4214c8..e4a418fcb35b 100644 --- a/net/bridge/br_netfilter.c +++ b/net/bridge/br_netfilter.c | |||
@@ -788,15 +788,23 @@ static unsigned int br_nf_local_out(unsigned int hook, struct sk_buff *skb, | |||
788 | return NF_STOLEN; | 788 | return NF_STOLEN; |
789 | } | 789 | } |
790 | 790 | ||
791 | #if defined(CONFIG_NF_CONNTRACK_IPV4) || defined(CONFIG_NF_CONNTRACK_IPV4_MODULE) | ||
791 | static int br_nf_dev_queue_xmit(struct sk_buff *skb) | 792 | static int br_nf_dev_queue_xmit(struct sk_buff *skb) |
792 | { | 793 | { |
793 | if (skb->protocol == htons(ETH_P_IP) && | 794 | if (skb->nfct != NULL && |
795 | (skb->protocol == htons(ETH_P_IP) || IS_VLAN_IP(skb)) && | ||
794 | skb->len > skb->dev->mtu && | 796 | skb->len > skb->dev->mtu && |
795 | !skb_is_gso(skb)) | 797 | !skb_is_gso(skb)) |
796 | return ip_fragment(skb, br_dev_queue_push_xmit); | 798 | return ip_fragment(skb, br_dev_queue_push_xmit); |
797 | else | 799 | else |
798 | return br_dev_queue_push_xmit(skb); | 800 | return br_dev_queue_push_xmit(skb); |
799 | } | 801 | } |
802 | #else | ||
803 | static int br_nf_dev_queue_xmit(struct sk_buff *skb) | ||
804 | { | ||
805 | return br_dev_queue_push_xmit(skb); | ||
806 | } | ||
807 | #endif | ||
800 | 808 | ||
801 | /* PF_BRIDGE/POST_ROUTING ********************************************/ | 809 | /* PF_BRIDGE/POST_ROUTING ********************************************/ |
802 | static unsigned int br_nf_post_routing(unsigned int hook, struct sk_buff *skb, | 810 | static unsigned int br_nf_post_routing(unsigned int hook, struct sk_buff *skb, |
diff --git a/net/core/datagram.c b/net/core/datagram.c index d0de644b378d..b01a76abe1d2 100644 --- a/net/core/datagram.c +++ b/net/core/datagram.c | |||
@@ -64,13 +64,25 @@ static inline int connection_based(struct sock *sk) | |||
64 | return sk->sk_type == SOCK_SEQPACKET || sk->sk_type == SOCK_STREAM; | 64 | return sk->sk_type == SOCK_SEQPACKET || sk->sk_type == SOCK_STREAM; |
65 | } | 65 | } |
66 | 66 | ||
67 | static int receiver_wake_function(wait_queue_t *wait, unsigned mode, int sync, | ||
68 | void *key) | ||
69 | { | ||
70 | unsigned long bits = (unsigned long)key; | ||
71 | |||
72 | /* | ||
73 | * Avoid a wakeup if event not interesting for us | ||
74 | */ | ||
75 | if (bits && !(bits & (POLLIN | POLLERR))) | ||
76 | return 0; | ||
77 | return autoremove_wake_function(wait, mode, sync, key); | ||
78 | } | ||
67 | /* | 79 | /* |
68 | * Wait for a packet.. | 80 | * Wait for a packet.. |
69 | */ | 81 | */ |
70 | static int wait_for_packet(struct sock *sk, int *err, long *timeo_p) | 82 | static int wait_for_packet(struct sock *sk, int *err, long *timeo_p) |
71 | { | 83 | { |
72 | int error; | 84 | int error; |
73 | DEFINE_WAIT(wait); | 85 | DEFINE_WAIT_FUNC(wait, receiver_wake_function); |
74 | 86 | ||
75 | prepare_to_wait_exclusive(sk->sk_sleep, &wait, TASK_INTERRUPTIBLE); | 87 | prepare_to_wait_exclusive(sk->sk_sleep, &wait, TASK_INTERRUPTIBLE); |
76 | 88 | ||
diff --git a/net/core/dev.c b/net/core/dev.c index 308a7d0c277f..e2e9e4af3ace 100644 --- a/net/core/dev.c +++ b/net/core/dev.c | |||
@@ -1735,11 +1735,12 @@ u16 skb_tx_hash(const struct net_device *dev, const struct sk_buff *skb) | |||
1735 | { | 1735 | { |
1736 | u32 hash; | 1736 | u32 hash; |
1737 | 1737 | ||
1738 | if (skb_rx_queue_recorded(skb)) { | 1738 | if (skb_rx_queue_recorded(skb)) |
1739 | hash = skb_get_rx_queue(skb); | 1739 | return skb_get_rx_queue(skb) % dev->real_num_tx_queues; |
1740 | } else if (skb->sk && skb->sk->sk_hash) { | 1740 | |
1741 | if (skb->sk && skb->sk->sk_hash) | ||
1741 | hash = skb->sk->sk_hash; | 1742 | hash = skb->sk->sk_hash; |
1742 | } else | 1743 | else |
1743 | hash = skb->protocol; | 1744 | hash = skb->protocol; |
1744 | 1745 | ||
1745 | hash = jhash_1word(hash, skb_tx_hashrnd); | 1746 | hash = jhash_1word(hash, skb_tx_hashrnd); |
diff --git a/net/core/skbuff.c b/net/core/skbuff.c index ce6356cd9f71..d152394b2611 100644 --- a/net/core/skbuff.c +++ b/net/core/skbuff.c | |||
@@ -502,7 +502,9 @@ int skb_recycle_check(struct sk_buff *skb, int skb_size) | |||
502 | shinfo->gso_segs = 0; | 502 | shinfo->gso_segs = 0; |
503 | shinfo->gso_type = 0; | 503 | shinfo->gso_type = 0; |
504 | shinfo->ip6_frag_id = 0; | 504 | shinfo->ip6_frag_id = 0; |
505 | shinfo->tx_flags.flags = 0; | ||
505 | shinfo->frag_list = NULL; | 506 | shinfo->frag_list = NULL; |
507 | memset(&shinfo->hwtstamps, 0, sizeof(shinfo->hwtstamps)); | ||
506 | 508 | ||
507 | memset(skb, 0, offsetof(struct sk_buff, tail)); | 509 | memset(skb, 0, offsetof(struct sk_buff, tail)); |
508 | skb->data = skb->head + NET_SKB_PAD; | 510 | skb->data = skb->head + NET_SKB_PAD; |
@@ -1365,9 +1367,8 @@ static void sock_spd_release(struct splice_pipe_desc *spd, unsigned int i) | |||
1365 | 1367 | ||
1366 | static inline struct page *linear_to_page(struct page *page, unsigned int *len, | 1368 | static inline struct page *linear_to_page(struct page *page, unsigned int *len, |
1367 | unsigned int *offset, | 1369 | unsigned int *offset, |
1368 | struct sk_buff *skb) | 1370 | struct sk_buff *skb, struct sock *sk) |
1369 | { | 1371 | { |
1370 | struct sock *sk = skb->sk; | ||
1371 | struct page *p = sk->sk_sndmsg_page; | 1372 | struct page *p = sk->sk_sndmsg_page; |
1372 | unsigned int off; | 1373 | unsigned int off; |
1373 | 1374 | ||
@@ -1405,13 +1406,14 @@ new_page: | |||
1405 | */ | 1406 | */ |
1406 | static inline int spd_fill_page(struct splice_pipe_desc *spd, struct page *page, | 1407 | static inline int spd_fill_page(struct splice_pipe_desc *spd, struct page *page, |
1407 | unsigned int *len, unsigned int offset, | 1408 | unsigned int *len, unsigned int offset, |
1408 | struct sk_buff *skb, int linear) | 1409 | struct sk_buff *skb, int linear, |
1410 | struct sock *sk) | ||
1409 | { | 1411 | { |
1410 | if (unlikely(spd->nr_pages == PIPE_BUFFERS)) | 1412 | if (unlikely(spd->nr_pages == PIPE_BUFFERS)) |
1411 | return 1; | 1413 | return 1; |
1412 | 1414 | ||
1413 | if (linear) { | 1415 | if (linear) { |
1414 | page = linear_to_page(page, len, &offset, skb); | 1416 | page = linear_to_page(page, len, &offset, skb, sk); |
1415 | if (!page) | 1417 | if (!page) |
1416 | return 1; | 1418 | return 1; |
1417 | } else | 1419 | } else |
@@ -1442,7 +1444,8 @@ static inline void __segment_seek(struct page **page, unsigned int *poff, | |||
1442 | static inline int __splice_segment(struct page *page, unsigned int poff, | 1444 | static inline int __splice_segment(struct page *page, unsigned int poff, |
1443 | unsigned int plen, unsigned int *off, | 1445 | unsigned int plen, unsigned int *off, |
1444 | unsigned int *len, struct sk_buff *skb, | 1446 | unsigned int *len, struct sk_buff *skb, |
1445 | struct splice_pipe_desc *spd, int linear) | 1447 | struct splice_pipe_desc *spd, int linear, |
1448 | struct sock *sk) | ||
1446 | { | 1449 | { |
1447 | if (!*len) | 1450 | if (!*len) |
1448 | return 1; | 1451 | return 1; |
@@ -1465,7 +1468,7 @@ static inline int __splice_segment(struct page *page, unsigned int poff, | |||
1465 | /* the linear region may spread across several pages */ | 1468 | /* the linear region may spread across several pages */ |
1466 | flen = min_t(unsigned int, flen, PAGE_SIZE - poff); | 1469 | flen = min_t(unsigned int, flen, PAGE_SIZE - poff); |
1467 | 1470 | ||
1468 | if (spd_fill_page(spd, page, &flen, poff, skb, linear)) | 1471 | if (spd_fill_page(spd, page, &flen, poff, skb, linear, sk)) |
1469 | return 1; | 1472 | return 1; |
1470 | 1473 | ||
1471 | __segment_seek(&page, &poff, &plen, flen); | 1474 | __segment_seek(&page, &poff, &plen, flen); |
@@ -1481,8 +1484,8 @@ static inline int __splice_segment(struct page *page, unsigned int poff, | |||
1481 | * pipe is full or if we already spliced the requested length. | 1484 | * pipe is full or if we already spliced the requested length. |
1482 | */ | 1485 | */ |
1483 | static int __skb_splice_bits(struct sk_buff *skb, unsigned int *offset, | 1486 | static int __skb_splice_bits(struct sk_buff *skb, unsigned int *offset, |
1484 | unsigned int *len, | 1487 | unsigned int *len, struct splice_pipe_desc *spd, |
1485 | struct splice_pipe_desc *spd) | 1488 | struct sock *sk) |
1486 | { | 1489 | { |
1487 | int seg; | 1490 | int seg; |
1488 | 1491 | ||
@@ -1492,7 +1495,7 @@ static int __skb_splice_bits(struct sk_buff *skb, unsigned int *offset, | |||
1492 | if (__splice_segment(virt_to_page(skb->data), | 1495 | if (__splice_segment(virt_to_page(skb->data), |
1493 | (unsigned long) skb->data & (PAGE_SIZE - 1), | 1496 | (unsigned long) skb->data & (PAGE_SIZE - 1), |
1494 | skb_headlen(skb), | 1497 | skb_headlen(skb), |
1495 | offset, len, skb, spd, 1)) | 1498 | offset, len, skb, spd, 1, sk)) |
1496 | return 1; | 1499 | return 1; |
1497 | 1500 | ||
1498 | /* | 1501 | /* |
@@ -1502,7 +1505,7 @@ static int __skb_splice_bits(struct sk_buff *skb, unsigned int *offset, | |||
1502 | const skb_frag_t *f = &skb_shinfo(skb)->frags[seg]; | 1505 | const skb_frag_t *f = &skb_shinfo(skb)->frags[seg]; |
1503 | 1506 | ||
1504 | if (__splice_segment(f->page, f->page_offset, f->size, | 1507 | if (__splice_segment(f->page, f->page_offset, f->size, |
1505 | offset, len, skb, spd, 0)) | 1508 | offset, len, skb, spd, 0, sk)) |
1506 | return 1; | 1509 | return 1; |
1507 | } | 1510 | } |
1508 | 1511 | ||
@@ -1528,12 +1531,13 @@ int skb_splice_bits(struct sk_buff *skb, unsigned int offset, | |||
1528 | .ops = &sock_pipe_buf_ops, | 1531 | .ops = &sock_pipe_buf_ops, |
1529 | .spd_release = sock_spd_release, | 1532 | .spd_release = sock_spd_release, |
1530 | }; | 1533 | }; |
1534 | struct sock *sk = skb->sk; | ||
1531 | 1535 | ||
1532 | /* | 1536 | /* |
1533 | * __skb_splice_bits() only fails if the output has no room left, | 1537 | * __skb_splice_bits() only fails if the output has no room left, |
1534 | * so no point in going over the frag_list for the error case. | 1538 | * so no point in going over the frag_list for the error case. |
1535 | */ | 1539 | */ |
1536 | if (__skb_splice_bits(skb, &offset, &tlen, &spd)) | 1540 | if (__skb_splice_bits(skb, &offset, &tlen, &spd, sk)) |
1537 | goto done; | 1541 | goto done; |
1538 | else if (!tlen) | 1542 | else if (!tlen) |
1539 | goto done; | 1543 | goto done; |
@@ -1545,14 +1549,13 @@ int skb_splice_bits(struct sk_buff *skb, unsigned int offset, | |||
1545 | struct sk_buff *list = skb_shinfo(skb)->frag_list; | 1549 | struct sk_buff *list = skb_shinfo(skb)->frag_list; |
1546 | 1550 | ||
1547 | for (; list && tlen; list = list->next) { | 1551 | for (; list && tlen; list = list->next) { |
1548 | if (__skb_splice_bits(list, &offset, &tlen, &spd)) | 1552 | if (__skb_splice_bits(list, &offset, &tlen, &spd, sk)) |
1549 | break; | 1553 | break; |
1550 | } | 1554 | } |
1551 | } | 1555 | } |
1552 | 1556 | ||
1553 | done: | 1557 | done: |
1554 | if (spd.nr_pages) { | 1558 | if (spd.nr_pages) { |
1555 | struct sock *sk = skb->sk; | ||
1556 | int ret; | 1559 | int ret; |
1557 | 1560 | ||
1558 | /* | 1561 | /* |
diff --git a/net/ipv4/Kconfig b/net/ipv4/Kconfig index b2cf91e4ccaa..9d26a3da37e5 100644 --- a/net/ipv4/Kconfig +++ b/net/ipv4/Kconfig | |||
@@ -407,7 +407,7 @@ config INET_XFRM_MODE_BEET | |||
407 | If unsure, say Y. | 407 | If unsure, say Y. |
408 | 408 | ||
409 | config INET_LRO | 409 | config INET_LRO |
410 | tristate "Large Receive Offload (ipv4/tcp)" | 410 | bool "Large Receive Offload (ipv4/tcp)" |
411 | 411 | ||
412 | ---help--- | 412 | ---help--- |
413 | Support for Large Receive Offload (ipv4/tcp). | 413 | Support for Large Receive Offload (ipv4/tcp). |
diff --git a/net/ipv4/netfilter/arp_tables.c b/net/ipv4/netfilter/arp_tables.c index 5ba533d234db..831fe1879dc0 100644 --- a/net/ipv4/netfilter/arp_tables.c +++ b/net/ipv4/netfilter/arp_tables.c | |||
@@ -253,9 +253,9 @@ unsigned int arpt_do_table(struct sk_buff *skb, | |||
253 | indev = in ? in->name : nulldevname; | 253 | indev = in ? in->name : nulldevname; |
254 | outdev = out ? out->name : nulldevname; | 254 | outdev = out ? out->name : nulldevname; |
255 | 255 | ||
256 | rcu_read_lock_bh(); | 256 | xt_info_rdlock_bh(); |
257 | private = rcu_dereference(table->private); | 257 | private = table->private; |
258 | table_base = rcu_dereference(private->entries[smp_processor_id()]); | 258 | table_base = private->entries[smp_processor_id()]; |
259 | 259 | ||
260 | e = get_entry(table_base, private->hook_entry[hook]); | 260 | e = get_entry(table_base, private->hook_entry[hook]); |
261 | back = get_entry(table_base, private->underflow[hook]); | 261 | back = get_entry(table_base, private->underflow[hook]); |
@@ -273,6 +273,7 @@ unsigned int arpt_do_table(struct sk_buff *skb, | |||
273 | 273 | ||
274 | hdr_len = sizeof(*arp) + (2 * sizeof(struct in_addr)) + | 274 | hdr_len = sizeof(*arp) + (2 * sizeof(struct in_addr)) + |
275 | (2 * skb->dev->addr_len); | 275 | (2 * skb->dev->addr_len); |
276 | |||
276 | ADD_COUNTER(e->counters, hdr_len, 1); | 277 | ADD_COUNTER(e->counters, hdr_len, 1); |
277 | 278 | ||
278 | t = arpt_get_target(e); | 279 | t = arpt_get_target(e); |
@@ -328,8 +329,7 @@ unsigned int arpt_do_table(struct sk_buff *skb, | |||
328 | e = (void *)e + e->next_offset; | 329 | e = (void *)e + e->next_offset; |
329 | } | 330 | } |
330 | } while (!hotdrop); | 331 | } while (!hotdrop); |
331 | 332 | xt_info_rdunlock_bh(); | |
332 | rcu_read_unlock_bh(); | ||
333 | 333 | ||
334 | if (hotdrop) | 334 | if (hotdrop) |
335 | return NF_DROP; | 335 | return NF_DROP; |
@@ -711,9 +711,12 @@ static void get_counters(const struct xt_table_info *t, | |||
711 | /* Instead of clearing (by a previous call to memset()) | 711 | /* Instead of clearing (by a previous call to memset()) |
712 | * the counters and using adds, we set the counters | 712 | * the counters and using adds, we set the counters |
713 | * with data used by 'current' CPU | 713 | * with data used by 'current' CPU |
714 | * We dont care about preemption here. | 714 | * |
715 | * Bottom half has to be disabled to prevent deadlock | ||
716 | * if new softirq were to run and call ipt_do_table | ||
715 | */ | 717 | */ |
716 | curcpu = raw_smp_processor_id(); | 718 | local_bh_disable(); |
719 | curcpu = smp_processor_id(); | ||
717 | 720 | ||
718 | i = 0; | 721 | i = 0; |
719 | ARPT_ENTRY_ITERATE(t->entries[curcpu], | 722 | ARPT_ENTRY_ITERATE(t->entries[curcpu], |
@@ -726,73 +729,22 @@ static void get_counters(const struct xt_table_info *t, | |||
726 | if (cpu == curcpu) | 729 | if (cpu == curcpu) |
727 | continue; | 730 | continue; |
728 | i = 0; | 731 | i = 0; |
732 | xt_info_wrlock(cpu); | ||
729 | ARPT_ENTRY_ITERATE(t->entries[cpu], | 733 | ARPT_ENTRY_ITERATE(t->entries[cpu], |
730 | t->size, | 734 | t->size, |
731 | add_entry_to_counter, | 735 | add_entry_to_counter, |
732 | counters, | 736 | counters, |
733 | &i); | 737 | &i); |
738 | xt_info_wrunlock(cpu); | ||
734 | } | 739 | } |
735 | } | ||
736 | |||
737 | |||
738 | /* We're lazy, and add to the first CPU; overflow works its fey magic | ||
739 | * and everything is OK. */ | ||
740 | static int | ||
741 | add_counter_to_entry(struct arpt_entry *e, | ||
742 | const struct xt_counters addme[], | ||
743 | unsigned int *i) | ||
744 | { | ||
745 | ADD_COUNTER(e->counters, addme[*i].bcnt, addme[*i].pcnt); | ||
746 | |||
747 | (*i)++; | ||
748 | return 0; | ||
749 | } | ||
750 | |||
751 | /* Take values from counters and add them back onto the current cpu */ | ||
752 | static void put_counters(struct xt_table_info *t, | ||
753 | const struct xt_counters counters[]) | ||
754 | { | ||
755 | unsigned int i, cpu; | ||
756 | |||
757 | local_bh_disable(); | ||
758 | cpu = smp_processor_id(); | ||
759 | i = 0; | ||
760 | ARPT_ENTRY_ITERATE(t->entries[cpu], | ||
761 | t->size, | ||
762 | add_counter_to_entry, | ||
763 | counters, | ||
764 | &i); | ||
765 | local_bh_enable(); | 740 | local_bh_enable(); |
766 | } | 741 | } |
767 | 742 | ||
768 | static inline int | ||
769 | zero_entry_counter(struct arpt_entry *e, void *arg) | ||
770 | { | ||
771 | e->counters.bcnt = 0; | ||
772 | e->counters.pcnt = 0; | ||
773 | return 0; | ||
774 | } | ||
775 | |||
776 | static void | ||
777 | clone_counters(struct xt_table_info *newinfo, const struct xt_table_info *info) | ||
778 | { | ||
779 | unsigned int cpu; | ||
780 | const void *loc_cpu_entry = info->entries[raw_smp_processor_id()]; | ||
781 | |||
782 | memcpy(newinfo, info, offsetof(struct xt_table_info, entries)); | ||
783 | for_each_possible_cpu(cpu) { | ||
784 | memcpy(newinfo->entries[cpu], loc_cpu_entry, info->size); | ||
785 | ARPT_ENTRY_ITERATE(newinfo->entries[cpu], newinfo->size, | ||
786 | zero_entry_counter, NULL); | ||
787 | } | ||
788 | } | ||
789 | |||
790 | static struct xt_counters *alloc_counters(struct xt_table *table) | 743 | static struct xt_counters *alloc_counters(struct xt_table *table) |
791 | { | 744 | { |
792 | unsigned int countersize; | 745 | unsigned int countersize; |
793 | struct xt_counters *counters; | 746 | struct xt_counters *counters; |
794 | struct xt_table_info *private = table->private; | 747 | struct xt_table_info *private = table->private; |
795 | struct xt_table_info *info; | ||
796 | 748 | ||
797 | /* We need atomic snapshot of counters: rest doesn't change | 749 | /* We need atomic snapshot of counters: rest doesn't change |
798 | * (other than comefrom, which userspace doesn't care | 750 | * (other than comefrom, which userspace doesn't care |
@@ -802,30 +754,11 @@ static struct xt_counters *alloc_counters(struct xt_table *table) | |||
802 | counters = vmalloc_node(countersize, numa_node_id()); | 754 | counters = vmalloc_node(countersize, numa_node_id()); |
803 | 755 | ||
804 | if (counters == NULL) | 756 | if (counters == NULL) |
805 | goto nomem; | 757 | return ERR_PTR(-ENOMEM); |
806 | |||
807 | info = xt_alloc_table_info(private->size); | ||
808 | if (!info) | ||
809 | goto free_counters; | ||
810 | |||
811 | clone_counters(info, private); | ||
812 | |||
813 | mutex_lock(&table->lock); | ||
814 | xt_table_entry_swap_rcu(private, info); | ||
815 | synchronize_net(); /* Wait until smoke has cleared */ | ||
816 | 758 | ||
817 | get_counters(info, counters); | 759 | get_counters(private, counters); |
818 | put_counters(private, counters); | ||
819 | mutex_unlock(&table->lock); | ||
820 | |||
821 | xt_free_table_info(info); | ||
822 | 760 | ||
823 | return counters; | 761 | return counters; |
824 | |||
825 | free_counters: | ||
826 | vfree(counters); | ||
827 | nomem: | ||
828 | return ERR_PTR(-ENOMEM); | ||
829 | } | 762 | } |
830 | 763 | ||
831 | static int copy_entries_to_user(unsigned int total_size, | 764 | static int copy_entries_to_user(unsigned int total_size, |
@@ -1094,8 +1027,9 @@ static int __do_replace(struct net *net, const char *name, | |||
1094 | (newinfo->number <= oldinfo->initial_entries)) | 1027 | (newinfo->number <= oldinfo->initial_entries)) |
1095 | module_put(t->me); | 1028 | module_put(t->me); |
1096 | 1029 | ||
1097 | /* Get the old counters. */ | 1030 | /* Get the old counters, and synchronize with replace */ |
1098 | get_counters(oldinfo, counters); | 1031 | get_counters(oldinfo, counters); |
1032 | |||
1099 | /* Decrease module usage counts and free resource */ | 1033 | /* Decrease module usage counts and free resource */ |
1100 | loc_cpu_old_entry = oldinfo->entries[raw_smp_processor_id()]; | 1034 | loc_cpu_old_entry = oldinfo->entries[raw_smp_processor_id()]; |
1101 | ARPT_ENTRY_ITERATE(loc_cpu_old_entry, oldinfo->size, cleanup_entry, | 1035 | ARPT_ENTRY_ITERATE(loc_cpu_old_entry, oldinfo->size, cleanup_entry, |
@@ -1165,10 +1099,23 @@ static int do_replace(struct net *net, void __user *user, unsigned int len) | |||
1165 | return ret; | 1099 | return ret; |
1166 | } | 1100 | } |
1167 | 1101 | ||
1102 | /* We're lazy, and add to the first CPU; overflow works its fey magic | ||
1103 | * and everything is OK. */ | ||
1104 | static int | ||
1105 | add_counter_to_entry(struct arpt_entry *e, | ||
1106 | const struct xt_counters addme[], | ||
1107 | unsigned int *i) | ||
1108 | { | ||
1109 | ADD_COUNTER(e->counters, addme[*i].bcnt, addme[*i].pcnt); | ||
1110 | |||
1111 | (*i)++; | ||
1112 | return 0; | ||
1113 | } | ||
1114 | |||
1168 | static int do_add_counters(struct net *net, void __user *user, unsigned int len, | 1115 | static int do_add_counters(struct net *net, void __user *user, unsigned int len, |
1169 | int compat) | 1116 | int compat) |
1170 | { | 1117 | { |
1171 | unsigned int i; | 1118 | unsigned int i, curcpu; |
1172 | struct xt_counters_info tmp; | 1119 | struct xt_counters_info tmp; |
1173 | struct xt_counters *paddc; | 1120 | struct xt_counters *paddc; |
1174 | unsigned int num_counters; | 1121 | unsigned int num_counters; |
@@ -1224,26 +1171,26 @@ static int do_add_counters(struct net *net, void __user *user, unsigned int len, | |||
1224 | goto free; | 1171 | goto free; |
1225 | } | 1172 | } |
1226 | 1173 | ||
1227 | mutex_lock(&t->lock); | 1174 | local_bh_disable(); |
1228 | private = t->private; | 1175 | private = t->private; |
1229 | if (private->number != num_counters) { | 1176 | if (private->number != num_counters) { |
1230 | ret = -EINVAL; | 1177 | ret = -EINVAL; |
1231 | goto unlock_up_free; | 1178 | goto unlock_up_free; |
1232 | } | 1179 | } |
1233 | 1180 | ||
1234 | preempt_disable(); | ||
1235 | i = 0; | 1181 | i = 0; |
1236 | /* Choose the copy that is on our node */ | 1182 | /* Choose the copy that is on our node */ |
1237 | loc_cpu_entry = private->entries[smp_processor_id()]; | 1183 | curcpu = smp_processor_id(); |
1184 | loc_cpu_entry = private->entries[curcpu]; | ||
1185 | xt_info_wrlock(curcpu); | ||
1238 | ARPT_ENTRY_ITERATE(loc_cpu_entry, | 1186 | ARPT_ENTRY_ITERATE(loc_cpu_entry, |
1239 | private->size, | 1187 | private->size, |
1240 | add_counter_to_entry, | 1188 | add_counter_to_entry, |
1241 | paddc, | 1189 | paddc, |
1242 | &i); | 1190 | &i); |
1243 | preempt_enable(); | 1191 | xt_info_wrunlock(curcpu); |
1244 | unlock_up_free: | 1192 | unlock_up_free: |
1245 | mutex_unlock(&t->lock); | 1193 | local_bh_enable(); |
1246 | |||
1247 | xt_table_unlock(t); | 1194 | xt_table_unlock(t); |
1248 | module_put(t->me); | 1195 | module_put(t->me); |
1249 | free: | 1196 | free: |
diff --git a/net/ipv4/netfilter/ip_tables.c b/net/ipv4/netfilter/ip_tables.c index 810c0b62c7d4..2ec8d7290c40 100644 --- a/net/ipv4/netfilter/ip_tables.c +++ b/net/ipv4/netfilter/ip_tables.c | |||
@@ -338,10 +338,9 @@ ipt_do_table(struct sk_buff *skb, | |||
338 | tgpar.hooknum = hook; | 338 | tgpar.hooknum = hook; |
339 | 339 | ||
340 | IP_NF_ASSERT(table->valid_hooks & (1 << hook)); | 340 | IP_NF_ASSERT(table->valid_hooks & (1 << hook)); |
341 | 341 | xt_info_rdlock_bh(); | |
342 | rcu_read_lock_bh(); | 342 | private = table->private; |
343 | private = rcu_dereference(table->private); | 343 | table_base = private->entries[smp_processor_id()]; |
344 | table_base = rcu_dereference(private->entries[smp_processor_id()]); | ||
345 | 344 | ||
346 | e = get_entry(table_base, private->hook_entry[hook]); | 345 | e = get_entry(table_base, private->hook_entry[hook]); |
347 | 346 | ||
@@ -436,8 +435,7 @@ ipt_do_table(struct sk_buff *skb, | |||
436 | e = (void *)e + e->next_offset; | 435 | e = (void *)e + e->next_offset; |
437 | } | 436 | } |
438 | } while (!hotdrop); | 437 | } while (!hotdrop); |
439 | 438 | xt_info_rdunlock_bh(); | |
440 | rcu_read_unlock_bh(); | ||
441 | 439 | ||
442 | #ifdef DEBUG_ALLOW_ALL | 440 | #ifdef DEBUG_ALLOW_ALL |
443 | return NF_ACCEPT; | 441 | return NF_ACCEPT; |
@@ -896,10 +894,13 @@ get_counters(const struct xt_table_info *t, | |||
896 | 894 | ||
897 | /* Instead of clearing (by a previous call to memset()) | 895 | /* Instead of clearing (by a previous call to memset()) |
898 | * the counters and using adds, we set the counters | 896 | * the counters and using adds, we set the counters |
899 | * with data used by 'current' CPU | 897 | * with data used by 'current' CPU. |
900 | * We dont care about preemption here. | 898 | * |
899 | * Bottom half has to be disabled to prevent deadlock | ||
900 | * if new softirq were to run and call ipt_do_table | ||
901 | */ | 901 | */ |
902 | curcpu = raw_smp_processor_id(); | 902 | local_bh_disable(); |
903 | curcpu = smp_processor_id(); | ||
903 | 904 | ||
904 | i = 0; | 905 | i = 0; |
905 | IPT_ENTRY_ITERATE(t->entries[curcpu], | 906 | IPT_ENTRY_ITERATE(t->entries[curcpu], |
@@ -912,74 +913,22 @@ get_counters(const struct xt_table_info *t, | |||
912 | if (cpu == curcpu) | 913 | if (cpu == curcpu) |
913 | continue; | 914 | continue; |
914 | i = 0; | 915 | i = 0; |
916 | xt_info_wrlock(cpu); | ||
915 | IPT_ENTRY_ITERATE(t->entries[cpu], | 917 | IPT_ENTRY_ITERATE(t->entries[cpu], |
916 | t->size, | 918 | t->size, |
917 | add_entry_to_counter, | 919 | add_entry_to_counter, |
918 | counters, | 920 | counters, |
919 | &i); | 921 | &i); |
922 | xt_info_wrunlock(cpu); | ||
920 | } | 923 | } |
921 | |||
922 | } | ||
923 | |||
924 | /* We're lazy, and add to the first CPU; overflow works its fey magic | ||
925 | * and everything is OK. */ | ||
926 | static int | ||
927 | add_counter_to_entry(struct ipt_entry *e, | ||
928 | const struct xt_counters addme[], | ||
929 | unsigned int *i) | ||
930 | { | ||
931 | ADD_COUNTER(e->counters, addme[*i].bcnt, addme[*i].pcnt); | ||
932 | |||
933 | (*i)++; | ||
934 | return 0; | ||
935 | } | ||
936 | |||
937 | /* Take values from counters and add them back onto the current cpu */ | ||
938 | static void put_counters(struct xt_table_info *t, | ||
939 | const struct xt_counters counters[]) | ||
940 | { | ||
941 | unsigned int i, cpu; | ||
942 | |||
943 | local_bh_disable(); | ||
944 | cpu = smp_processor_id(); | ||
945 | i = 0; | ||
946 | IPT_ENTRY_ITERATE(t->entries[cpu], | ||
947 | t->size, | ||
948 | add_counter_to_entry, | ||
949 | counters, | ||
950 | &i); | ||
951 | local_bh_enable(); | 924 | local_bh_enable(); |
952 | } | 925 | } |
953 | 926 | ||
954 | |||
955 | static inline int | ||
956 | zero_entry_counter(struct ipt_entry *e, void *arg) | ||
957 | { | ||
958 | e->counters.bcnt = 0; | ||
959 | e->counters.pcnt = 0; | ||
960 | return 0; | ||
961 | } | ||
962 | |||
963 | static void | ||
964 | clone_counters(struct xt_table_info *newinfo, const struct xt_table_info *info) | ||
965 | { | ||
966 | unsigned int cpu; | ||
967 | const void *loc_cpu_entry = info->entries[raw_smp_processor_id()]; | ||
968 | |||
969 | memcpy(newinfo, info, offsetof(struct xt_table_info, entries)); | ||
970 | for_each_possible_cpu(cpu) { | ||
971 | memcpy(newinfo->entries[cpu], loc_cpu_entry, info->size); | ||
972 | IPT_ENTRY_ITERATE(newinfo->entries[cpu], newinfo->size, | ||
973 | zero_entry_counter, NULL); | ||
974 | } | ||
975 | } | ||
976 | |||
977 | static struct xt_counters * alloc_counters(struct xt_table *table) | 927 | static struct xt_counters * alloc_counters(struct xt_table *table) |
978 | { | 928 | { |
979 | unsigned int countersize; | 929 | unsigned int countersize; |
980 | struct xt_counters *counters; | 930 | struct xt_counters *counters; |
981 | struct xt_table_info *private = table->private; | 931 | struct xt_table_info *private = table->private; |
982 | struct xt_table_info *info; | ||
983 | 932 | ||
984 | /* We need atomic snapshot of counters: rest doesn't change | 933 | /* We need atomic snapshot of counters: rest doesn't change |
985 | (other than comefrom, which userspace doesn't care | 934 | (other than comefrom, which userspace doesn't care |
@@ -988,30 +937,11 @@ static struct xt_counters * alloc_counters(struct xt_table *table) | |||
988 | counters = vmalloc_node(countersize, numa_node_id()); | 937 | counters = vmalloc_node(countersize, numa_node_id()); |
989 | 938 | ||
990 | if (counters == NULL) | 939 | if (counters == NULL) |
991 | goto nomem; | 940 | return ERR_PTR(-ENOMEM); |
992 | 941 | ||
993 | info = xt_alloc_table_info(private->size); | 942 | get_counters(private, counters); |
994 | if (!info) | ||
995 | goto free_counters; | ||
996 | |||
997 | clone_counters(info, private); | ||
998 | |||
999 | mutex_lock(&table->lock); | ||
1000 | xt_table_entry_swap_rcu(private, info); | ||
1001 | synchronize_net(); /* Wait until smoke has cleared */ | ||
1002 | |||
1003 | get_counters(info, counters); | ||
1004 | put_counters(private, counters); | ||
1005 | mutex_unlock(&table->lock); | ||
1006 | |||
1007 | xt_free_table_info(info); | ||
1008 | 943 | ||
1009 | return counters; | 944 | return counters; |
1010 | |||
1011 | free_counters: | ||
1012 | vfree(counters); | ||
1013 | nomem: | ||
1014 | return ERR_PTR(-ENOMEM); | ||
1015 | } | 945 | } |
1016 | 946 | ||
1017 | static int | 947 | static int |
@@ -1306,8 +1236,9 @@ __do_replace(struct net *net, const char *name, unsigned int valid_hooks, | |||
1306 | (newinfo->number <= oldinfo->initial_entries)) | 1236 | (newinfo->number <= oldinfo->initial_entries)) |
1307 | module_put(t->me); | 1237 | module_put(t->me); |
1308 | 1238 | ||
1309 | /* Get the old counters. */ | 1239 | /* Get the old counters, and synchronize with replace */ |
1310 | get_counters(oldinfo, counters); | 1240 | get_counters(oldinfo, counters); |
1241 | |||
1311 | /* Decrease module usage counts and free resource */ | 1242 | /* Decrease module usage counts and free resource */ |
1312 | loc_cpu_old_entry = oldinfo->entries[raw_smp_processor_id()]; | 1243 | loc_cpu_old_entry = oldinfo->entries[raw_smp_processor_id()]; |
1313 | IPT_ENTRY_ITERATE(loc_cpu_old_entry, oldinfo->size, cleanup_entry, | 1244 | IPT_ENTRY_ITERATE(loc_cpu_old_entry, oldinfo->size, cleanup_entry, |
@@ -1377,11 +1308,23 @@ do_replace(struct net *net, void __user *user, unsigned int len) | |||
1377 | return ret; | 1308 | return ret; |
1378 | } | 1309 | } |
1379 | 1310 | ||
1311 | /* We're lazy, and add to the first CPU; overflow works its fey magic | ||
1312 | * and everything is OK. */ | ||
1313 | static int | ||
1314 | add_counter_to_entry(struct ipt_entry *e, | ||
1315 | const struct xt_counters addme[], | ||
1316 | unsigned int *i) | ||
1317 | { | ||
1318 | ADD_COUNTER(e->counters, addme[*i].bcnt, addme[*i].pcnt); | ||
1319 | |||
1320 | (*i)++; | ||
1321 | return 0; | ||
1322 | } | ||
1380 | 1323 | ||
1381 | static int | 1324 | static int |
1382 | do_add_counters(struct net *net, void __user *user, unsigned int len, int compat) | 1325 | do_add_counters(struct net *net, void __user *user, unsigned int len, int compat) |
1383 | { | 1326 | { |
1384 | unsigned int i; | 1327 | unsigned int i, curcpu; |
1385 | struct xt_counters_info tmp; | 1328 | struct xt_counters_info tmp; |
1386 | struct xt_counters *paddc; | 1329 | struct xt_counters *paddc; |
1387 | unsigned int num_counters; | 1330 | unsigned int num_counters; |
@@ -1437,25 +1380,26 @@ do_add_counters(struct net *net, void __user *user, unsigned int len, int compat | |||
1437 | goto free; | 1380 | goto free; |
1438 | } | 1381 | } |
1439 | 1382 | ||
1440 | mutex_lock(&t->lock); | 1383 | local_bh_disable(); |
1441 | private = t->private; | 1384 | private = t->private; |
1442 | if (private->number != num_counters) { | 1385 | if (private->number != num_counters) { |
1443 | ret = -EINVAL; | 1386 | ret = -EINVAL; |
1444 | goto unlock_up_free; | 1387 | goto unlock_up_free; |
1445 | } | 1388 | } |
1446 | 1389 | ||
1447 | preempt_disable(); | ||
1448 | i = 0; | 1390 | i = 0; |
1449 | /* Choose the copy that is on our node */ | 1391 | /* Choose the copy that is on our node */ |
1450 | loc_cpu_entry = private->entries[raw_smp_processor_id()]; | 1392 | curcpu = smp_processor_id(); |
1393 | loc_cpu_entry = private->entries[curcpu]; | ||
1394 | xt_info_wrlock(curcpu); | ||
1451 | IPT_ENTRY_ITERATE(loc_cpu_entry, | 1395 | IPT_ENTRY_ITERATE(loc_cpu_entry, |
1452 | private->size, | 1396 | private->size, |
1453 | add_counter_to_entry, | 1397 | add_counter_to_entry, |
1454 | paddc, | 1398 | paddc, |
1455 | &i); | 1399 | &i); |
1456 | preempt_enable(); | 1400 | xt_info_wrunlock(curcpu); |
1457 | unlock_up_free: | 1401 | unlock_up_free: |
1458 | mutex_unlock(&t->lock); | 1402 | local_bh_enable(); |
1459 | xt_table_unlock(t); | 1403 | xt_table_unlock(t); |
1460 | module_put(t->me); | 1404 | module_put(t->me); |
1461 | free: | 1405 | free: |
diff --git a/net/ipv4/route.c b/net/ipv4/route.c index c40debe51b38..c4c60e9f068a 100644 --- a/net/ipv4/route.c +++ b/net/ipv4/route.c | |||
@@ -3397,7 +3397,7 @@ int __init ip_rt_init(void) | |||
3397 | 0, | 3397 | 0, |
3398 | &rt_hash_log, | 3398 | &rt_hash_log, |
3399 | &rt_hash_mask, | 3399 | &rt_hash_mask, |
3400 | 0); | 3400 | rhash_entries ? 0 : 512 * 1024); |
3401 | memset(rt_hash_table, 0, (rt_hash_mask + 1) * sizeof(struct rt_hash_bucket)); | 3401 | memset(rt_hash_table, 0, (rt_hash_mask + 1) * sizeof(struct rt_hash_bucket)); |
3402 | rt_hash_lock_init(); | 3402 | rt_hash_lock_init(); |
3403 | 3403 | ||
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c index c96a6bb25430..eec3e6f9956c 100644 --- a/net/ipv4/tcp_input.c +++ b/net/ipv4/tcp_input.c | |||
@@ -597,16 +597,6 @@ static void tcp_event_data_recv(struct sock *sk, struct sk_buff *skb) | |||
597 | tcp_grow_window(sk, skb); | 597 | tcp_grow_window(sk, skb); |
598 | } | 598 | } |
599 | 599 | ||
600 | static u32 tcp_rto_min(struct sock *sk) | ||
601 | { | ||
602 | struct dst_entry *dst = __sk_dst_get(sk); | ||
603 | u32 rto_min = TCP_RTO_MIN; | ||
604 | |||
605 | if (dst && dst_metric_locked(dst, RTAX_RTO_MIN)) | ||
606 | rto_min = dst_metric_rtt(dst, RTAX_RTO_MIN); | ||
607 | return rto_min; | ||
608 | } | ||
609 | |||
610 | /* Called to compute a smoothed rtt estimate. The data fed to this | 600 | /* Called to compute a smoothed rtt estimate. The data fed to this |
611 | * routine either comes from timestamps, or from segments that were | 601 | * routine either comes from timestamps, or from segments that were |
612 | * known _not_ to have been retransmitted [see Karn/Partridge | 602 | * known _not_ to have been retransmitted [see Karn/Partridge |
diff --git a/net/ipv6/netfilter/ip6_tables.c b/net/ipv6/netfilter/ip6_tables.c index 800ae8542471..219e165aea10 100644 --- a/net/ipv6/netfilter/ip6_tables.c +++ b/net/ipv6/netfilter/ip6_tables.c | |||
@@ -365,9 +365,9 @@ ip6t_do_table(struct sk_buff *skb, | |||
365 | 365 | ||
366 | IP_NF_ASSERT(table->valid_hooks & (1 << hook)); | 366 | IP_NF_ASSERT(table->valid_hooks & (1 << hook)); |
367 | 367 | ||
368 | rcu_read_lock_bh(); | 368 | xt_info_rdlock_bh(); |
369 | private = rcu_dereference(table->private); | 369 | private = table->private; |
370 | table_base = rcu_dereference(private->entries[smp_processor_id()]); | 370 | table_base = private->entries[smp_processor_id()]; |
371 | 371 | ||
372 | e = get_entry(table_base, private->hook_entry[hook]); | 372 | e = get_entry(table_base, private->hook_entry[hook]); |
373 | 373 | ||
@@ -466,7 +466,7 @@ ip6t_do_table(struct sk_buff *skb, | |||
466 | #ifdef CONFIG_NETFILTER_DEBUG | 466 | #ifdef CONFIG_NETFILTER_DEBUG |
467 | ((struct ip6t_entry *)table_base)->comefrom = NETFILTER_LINK_POISON; | 467 | ((struct ip6t_entry *)table_base)->comefrom = NETFILTER_LINK_POISON; |
468 | #endif | 468 | #endif |
469 | rcu_read_unlock_bh(); | 469 | xt_info_rdunlock_bh(); |
470 | 470 | ||
471 | #ifdef DEBUG_ALLOW_ALL | 471 | #ifdef DEBUG_ALLOW_ALL |
472 | return NF_ACCEPT; | 472 | return NF_ACCEPT; |
@@ -926,9 +926,12 @@ get_counters(const struct xt_table_info *t, | |||
926 | /* Instead of clearing (by a previous call to memset()) | 926 | /* Instead of clearing (by a previous call to memset()) |
927 | * the counters and using adds, we set the counters | 927 | * the counters and using adds, we set the counters |
928 | * with data used by 'current' CPU | 928 | * with data used by 'current' CPU |
929 | * We dont care about preemption here. | 929 | * |
930 | * Bottom half has to be disabled to prevent deadlock | ||
931 | * if new softirq were to run and call ipt_do_table | ||
930 | */ | 932 | */ |
931 | curcpu = raw_smp_processor_id(); | 933 | local_bh_disable(); |
934 | curcpu = smp_processor_id(); | ||
932 | 935 | ||
933 | i = 0; | 936 | i = 0; |
934 | IP6T_ENTRY_ITERATE(t->entries[curcpu], | 937 | IP6T_ENTRY_ITERATE(t->entries[curcpu], |
@@ -941,72 +944,22 @@ get_counters(const struct xt_table_info *t, | |||
941 | if (cpu == curcpu) | 944 | if (cpu == curcpu) |
942 | continue; | 945 | continue; |
943 | i = 0; | 946 | i = 0; |
947 | xt_info_wrlock(cpu); | ||
944 | IP6T_ENTRY_ITERATE(t->entries[cpu], | 948 | IP6T_ENTRY_ITERATE(t->entries[cpu], |
945 | t->size, | 949 | t->size, |
946 | add_entry_to_counter, | 950 | add_entry_to_counter, |
947 | counters, | 951 | counters, |
948 | &i); | 952 | &i); |
953 | xt_info_wrunlock(cpu); | ||
949 | } | 954 | } |
950 | } | ||
951 | |||
952 | /* We're lazy, and add to the first CPU; overflow works its fey magic | ||
953 | * and everything is OK. */ | ||
954 | static int | ||
955 | add_counter_to_entry(struct ip6t_entry *e, | ||
956 | const struct xt_counters addme[], | ||
957 | unsigned int *i) | ||
958 | { | ||
959 | ADD_COUNTER(e->counters, addme[*i].bcnt, addme[*i].pcnt); | ||
960 | |||
961 | (*i)++; | ||
962 | return 0; | ||
963 | } | ||
964 | |||
965 | /* Take values from counters and add them back onto the current cpu */ | ||
966 | static void put_counters(struct xt_table_info *t, | ||
967 | const struct xt_counters counters[]) | ||
968 | { | ||
969 | unsigned int i, cpu; | ||
970 | |||
971 | local_bh_disable(); | ||
972 | cpu = smp_processor_id(); | ||
973 | i = 0; | ||
974 | IP6T_ENTRY_ITERATE(t->entries[cpu], | ||
975 | t->size, | ||
976 | add_counter_to_entry, | ||
977 | counters, | ||
978 | &i); | ||
979 | local_bh_enable(); | 955 | local_bh_enable(); |
980 | } | 956 | } |
981 | 957 | ||
982 | static inline int | ||
983 | zero_entry_counter(struct ip6t_entry *e, void *arg) | ||
984 | { | ||
985 | e->counters.bcnt = 0; | ||
986 | e->counters.pcnt = 0; | ||
987 | return 0; | ||
988 | } | ||
989 | |||
990 | static void | ||
991 | clone_counters(struct xt_table_info *newinfo, const struct xt_table_info *info) | ||
992 | { | ||
993 | unsigned int cpu; | ||
994 | const void *loc_cpu_entry = info->entries[raw_smp_processor_id()]; | ||
995 | |||
996 | memcpy(newinfo, info, offsetof(struct xt_table_info, entries)); | ||
997 | for_each_possible_cpu(cpu) { | ||
998 | memcpy(newinfo->entries[cpu], loc_cpu_entry, info->size); | ||
999 | IP6T_ENTRY_ITERATE(newinfo->entries[cpu], newinfo->size, | ||
1000 | zero_entry_counter, NULL); | ||
1001 | } | ||
1002 | } | ||
1003 | |||
1004 | static struct xt_counters *alloc_counters(struct xt_table *table) | 958 | static struct xt_counters *alloc_counters(struct xt_table *table) |
1005 | { | 959 | { |
1006 | unsigned int countersize; | 960 | unsigned int countersize; |
1007 | struct xt_counters *counters; | 961 | struct xt_counters *counters; |
1008 | struct xt_table_info *private = table->private; | 962 | struct xt_table_info *private = table->private; |
1009 | struct xt_table_info *info; | ||
1010 | 963 | ||
1011 | /* We need atomic snapshot of counters: rest doesn't change | 964 | /* We need atomic snapshot of counters: rest doesn't change |
1012 | (other than comefrom, which userspace doesn't care | 965 | (other than comefrom, which userspace doesn't care |
@@ -1015,30 +968,11 @@ static struct xt_counters *alloc_counters(struct xt_table *table) | |||
1015 | counters = vmalloc_node(countersize, numa_node_id()); | 968 | counters = vmalloc_node(countersize, numa_node_id()); |
1016 | 969 | ||
1017 | if (counters == NULL) | 970 | if (counters == NULL) |
1018 | goto nomem; | 971 | return ERR_PTR(-ENOMEM); |
1019 | 972 | ||
1020 | info = xt_alloc_table_info(private->size); | 973 | get_counters(private, counters); |
1021 | if (!info) | ||
1022 | goto free_counters; | ||
1023 | |||
1024 | clone_counters(info, private); | ||
1025 | |||
1026 | mutex_lock(&table->lock); | ||
1027 | xt_table_entry_swap_rcu(private, info); | ||
1028 | synchronize_net(); /* Wait until smoke has cleared */ | ||
1029 | |||
1030 | get_counters(info, counters); | ||
1031 | put_counters(private, counters); | ||
1032 | mutex_unlock(&table->lock); | ||
1033 | |||
1034 | xt_free_table_info(info); | ||
1035 | 974 | ||
1036 | return counters; | 975 | return counters; |
1037 | |||
1038 | free_counters: | ||
1039 | vfree(counters); | ||
1040 | nomem: | ||
1041 | return ERR_PTR(-ENOMEM); | ||
1042 | } | 976 | } |
1043 | 977 | ||
1044 | static int | 978 | static int |
@@ -1334,8 +1268,9 @@ __do_replace(struct net *net, const char *name, unsigned int valid_hooks, | |||
1334 | (newinfo->number <= oldinfo->initial_entries)) | 1268 | (newinfo->number <= oldinfo->initial_entries)) |
1335 | module_put(t->me); | 1269 | module_put(t->me); |
1336 | 1270 | ||
1337 | /* Get the old counters. */ | 1271 | /* Get the old counters, and synchronize with replace */ |
1338 | get_counters(oldinfo, counters); | 1272 | get_counters(oldinfo, counters); |
1273 | |||
1339 | /* Decrease module usage counts and free resource */ | 1274 | /* Decrease module usage counts and free resource */ |
1340 | loc_cpu_old_entry = oldinfo->entries[raw_smp_processor_id()]; | 1275 | loc_cpu_old_entry = oldinfo->entries[raw_smp_processor_id()]; |
1341 | IP6T_ENTRY_ITERATE(loc_cpu_old_entry, oldinfo->size, cleanup_entry, | 1276 | IP6T_ENTRY_ITERATE(loc_cpu_old_entry, oldinfo->size, cleanup_entry, |
@@ -1405,11 +1340,24 @@ do_replace(struct net *net, void __user *user, unsigned int len) | |||
1405 | return ret; | 1340 | return ret; |
1406 | } | 1341 | } |
1407 | 1342 | ||
1343 | /* We're lazy, and add to the first CPU; overflow works its fey magic | ||
1344 | * and everything is OK. */ | ||
1345 | static int | ||
1346 | add_counter_to_entry(struct ip6t_entry *e, | ||
1347 | const struct xt_counters addme[], | ||
1348 | unsigned int *i) | ||
1349 | { | ||
1350 | ADD_COUNTER(e->counters, addme[*i].bcnt, addme[*i].pcnt); | ||
1351 | |||
1352 | (*i)++; | ||
1353 | return 0; | ||
1354 | } | ||
1355 | |||
1408 | static int | 1356 | static int |
1409 | do_add_counters(struct net *net, void __user *user, unsigned int len, | 1357 | do_add_counters(struct net *net, void __user *user, unsigned int len, |
1410 | int compat) | 1358 | int compat) |
1411 | { | 1359 | { |
1412 | unsigned int i; | 1360 | unsigned int i, curcpu; |
1413 | struct xt_counters_info tmp; | 1361 | struct xt_counters_info tmp; |
1414 | struct xt_counters *paddc; | 1362 | struct xt_counters *paddc; |
1415 | unsigned int num_counters; | 1363 | unsigned int num_counters; |
@@ -1465,25 +1413,28 @@ do_add_counters(struct net *net, void __user *user, unsigned int len, | |||
1465 | goto free; | 1413 | goto free; |
1466 | } | 1414 | } |
1467 | 1415 | ||
1468 | mutex_lock(&t->lock); | 1416 | |
1417 | local_bh_disable(); | ||
1469 | private = t->private; | 1418 | private = t->private; |
1470 | if (private->number != num_counters) { | 1419 | if (private->number != num_counters) { |
1471 | ret = -EINVAL; | 1420 | ret = -EINVAL; |
1472 | goto unlock_up_free; | 1421 | goto unlock_up_free; |
1473 | } | 1422 | } |
1474 | 1423 | ||
1475 | preempt_disable(); | ||
1476 | i = 0; | 1424 | i = 0; |
1477 | /* Choose the copy that is on our node */ | 1425 | /* Choose the copy that is on our node */ |
1478 | loc_cpu_entry = private->entries[raw_smp_processor_id()]; | 1426 | curcpu = smp_processor_id(); |
1427 | xt_info_wrlock(curcpu); | ||
1428 | loc_cpu_entry = private->entries[curcpu]; | ||
1479 | IP6T_ENTRY_ITERATE(loc_cpu_entry, | 1429 | IP6T_ENTRY_ITERATE(loc_cpu_entry, |
1480 | private->size, | 1430 | private->size, |
1481 | add_counter_to_entry, | 1431 | add_counter_to_entry, |
1482 | paddc, | 1432 | paddc, |
1483 | &i); | 1433 | &i); |
1484 | preempt_enable(); | 1434 | xt_info_wrunlock(curcpu); |
1435 | |||
1485 | unlock_up_free: | 1436 | unlock_up_free: |
1486 | mutex_unlock(&t->lock); | 1437 | local_bh_enable(); |
1487 | xt_table_unlock(t); | 1438 | xt_table_unlock(t); |
1488 | module_put(t->me); | 1439 | module_put(t->me); |
1489 | free: | 1440 | free: |
diff --git a/net/ipv6/netfilter/ip6t_ipv6header.c b/net/ipv6/netfilter/ip6t_ipv6header.c index 14e6724d5672..91490ad9302c 100644 --- a/net/ipv6/netfilter/ip6t_ipv6header.c +++ b/net/ipv6/netfilter/ip6t_ipv6header.c | |||
@@ -50,14 +50,14 @@ ipv6header_mt6(const struct sk_buff *skb, const struct xt_match_param *par) | |||
50 | struct ipv6_opt_hdr _hdr; | 50 | struct ipv6_opt_hdr _hdr; |
51 | int hdrlen; | 51 | int hdrlen; |
52 | 52 | ||
53 | /* Is there enough space for the next ext header? */ | ||
54 | if (len < (int)sizeof(struct ipv6_opt_hdr)) | ||
55 | return false; | ||
56 | /* No more exthdr -> evaluate */ | 53 | /* No more exthdr -> evaluate */ |
57 | if (nexthdr == NEXTHDR_NONE) { | 54 | if (nexthdr == NEXTHDR_NONE) { |
58 | temp |= MASK_NONE; | 55 | temp |= MASK_NONE; |
59 | break; | 56 | break; |
60 | } | 57 | } |
58 | /* Is there enough space for the next ext header? */ | ||
59 | if (len < (int)sizeof(struct ipv6_opt_hdr)) | ||
60 | return false; | ||
61 | /* ESP -> evaluate */ | 61 | /* ESP -> evaluate */ |
62 | if (nexthdr == NEXTHDR_ESP) { | 62 | if (nexthdr == NEXTHDR_ESP) { |
63 | temp |= MASK_ESP; | 63 | temp |= MASK_ESP; |
diff --git a/net/mac80211/main.c b/net/mac80211/main.c index fbcbed6cad01..14134193cd17 100644 --- a/net/mac80211/main.c +++ b/net/mac80211/main.c | |||
@@ -757,6 +757,7 @@ struct ieee80211_hw *ieee80211_alloc_hw(size_t priv_data_len, | |||
757 | local->hw.conf.long_frame_max_tx_count = 4; | 757 | local->hw.conf.long_frame_max_tx_count = 4; |
758 | local->hw.conf.short_frame_max_tx_count = 7; | 758 | local->hw.conf.short_frame_max_tx_count = 7; |
759 | local->hw.conf.radio_enabled = true; | 759 | local->hw.conf.radio_enabled = true; |
760 | local->user_power_level = -1; | ||
760 | 761 | ||
761 | INIT_LIST_HEAD(&local->interfaces); | 762 | INIT_LIST_HEAD(&local->interfaces); |
762 | mutex_init(&local->iflist_mtx); | 763 | mutex_init(&local->iflist_mtx); |
@@ -909,6 +910,13 @@ int ieee80211_register_hw(struct ieee80211_hw *hw) | |||
909 | if (result < 0) | 910 | if (result < 0) |
910 | goto fail_sta_info; | 911 | goto fail_sta_info; |
911 | 912 | ||
913 | result = ieee80211_wep_init(local); | ||
914 | if (result < 0) { | ||
915 | printk(KERN_DEBUG "%s: Failed to initialize wep: %d\n", | ||
916 | wiphy_name(local->hw.wiphy), result); | ||
917 | goto fail_wep; | ||
918 | } | ||
919 | |||
912 | rtnl_lock(); | 920 | rtnl_lock(); |
913 | result = dev_alloc_name(local->mdev, local->mdev->name); | 921 | result = dev_alloc_name(local->mdev, local->mdev->name); |
914 | if (result < 0) | 922 | if (result < 0) |
@@ -930,14 +938,6 @@ int ieee80211_register_hw(struct ieee80211_hw *hw) | |||
930 | goto fail_rate; | 938 | goto fail_rate; |
931 | } | 939 | } |
932 | 940 | ||
933 | result = ieee80211_wep_init(local); | ||
934 | |||
935 | if (result < 0) { | ||
936 | printk(KERN_DEBUG "%s: Failed to initialize wep: %d\n", | ||
937 | wiphy_name(local->hw.wiphy), result); | ||
938 | goto fail_wep; | ||
939 | } | ||
940 | |||
941 | /* add one default STA interface if supported */ | 941 | /* add one default STA interface if supported */ |
942 | if (local->hw.wiphy->interface_modes & BIT(NL80211_IFTYPE_STATION)) { | 942 | if (local->hw.wiphy->interface_modes & BIT(NL80211_IFTYPE_STATION)) { |
943 | result = ieee80211_if_add(local, "wlan%d", NULL, | 943 | result = ieee80211_if_add(local, "wlan%d", NULL, |
@@ -967,13 +967,13 @@ int ieee80211_register_hw(struct ieee80211_hw *hw) | |||
967 | 967 | ||
968 | return 0; | 968 | return 0; |
969 | 969 | ||
970 | fail_wep: | ||
971 | rate_control_deinitialize(local); | ||
972 | fail_rate: | 970 | fail_rate: |
973 | unregister_netdevice(local->mdev); | 971 | unregister_netdevice(local->mdev); |
974 | local->mdev = NULL; | 972 | local->mdev = NULL; |
975 | fail_dev: | 973 | fail_dev: |
976 | rtnl_unlock(); | 974 | rtnl_unlock(); |
975 | ieee80211_wep_free(local); | ||
976 | fail_wep: | ||
977 | sta_info_stop(local); | 977 | sta_info_stop(local); |
978 | fail_sta_info: | 978 | fail_sta_info: |
979 | debugfs_hw_del(local); | 979 | debugfs_hw_del(local); |
diff --git a/net/mac80211/rc80211_minstrel.c b/net/mac80211/rc80211_minstrel.c index 3824990d340b..d9233ec50610 100644 --- a/net/mac80211/rc80211_minstrel.c +++ b/net/mac80211/rc80211_minstrel.c | |||
@@ -476,8 +476,8 @@ minstrel_alloc_sta(void *priv, struct ieee80211_sta *sta, gfp_t gfp) | |||
476 | return NULL; | 476 | return NULL; |
477 | 477 | ||
478 | for (i = 0; i < IEEE80211_NUM_BANDS; i++) { | 478 | for (i = 0; i < IEEE80211_NUM_BANDS; i++) { |
479 | sband = hw->wiphy->bands[hw->conf.channel->band]; | 479 | sband = hw->wiphy->bands[i]; |
480 | if (sband->n_bitrates > max_rates) | 480 | if (sband && sband->n_bitrates > max_rates) |
481 | max_rates = sband->n_bitrates; | 481 | max_rates = sband->n_bitrates; |
482 | } | 482 | } |
483 | 483 | ||
diff --git a/net/mac80211/rc80211_pid_algo.c b/net/mac80211/rc80211_pid_algo.c index b16801cde06f..8bef9a1262ff 100644 --- a/net/mac80211/rc80211_pid_algo.c +++ b/net/mac80211/rc80211_pid_algo.c | |||
@@ -317,13 +317,44 @@ rate_control_pid_rate_init(void *priv, struct ieee80211_supported_band *sband, | |||
317 | struct ieee80211_sta *sta, void *priv_sta) | 317 | struct ieee80211_sta *sta, void *priv_sta) |
318 | { | 318 | { |
319 | struct rc_pid_sta_info *spinfo = priv_sta; | 319 | struct rc_pid_sta_info *spinfo = priv_sta; |
320 | struct rc_pid_info *pinfo = priv; | ||
321 | struct rc_pid_rateinfo *rinfo = pinfo->rinfo; | ||
320 | struct sta_info *si; | 322 | struct sta_info *si; |
323 | int i, j, tmp; | ||
324 | bool s; | ||
321 | 325 | ||
322 | /* TODO: This routine should consider using RSSI from previous packets | 326 | /* TODO: This routine should consider using RSSI from previous packets |
323 | * as we need to have IEEE 802.1X auth succeed immediately after assoc.. | 327 | * as we need to have IEEE 802.1X auth succeed immediately after assoc.. |
324 | * Until that method is implemented, we will use the lowest supported | 328 | * Until that method is implemented, we will use the lowest supported |
325 | * rate as a workaround. */ | 329 | * rate as a workaround. */ |
326 | 330 | ||
331 | /* Sort the rates. This is optimized for the most common case (i.e. | ||
332 | * almost-sorted CCK+OFDM rates). Kind of bubble-sort with reversed | ||
333 | * mapping too. */ | ||
334 | for (i = 0; i < sband->n_bitrates; i++) { | ||
335 | rinfo[i].index = i; | ||
336 | rinfo[i].rev_index = i; | ||
337 | if (RC_PID_FAST_START) | ||
338 | rinfo[i].diff = 0; | ||
339 | else | ||
340 | rinfo[i].diff = i * pinfo->norm_offset; | ||
341 | } | ||
342 | for (i = 1; i < sband->n_bitrates; i++) { | ||
343 | s = 0; | ||
344 | for (j = 0; j < sband->n_bitrates - i; j++) | ||
345 | if (unlikely(sband->bitrates[rinfo[j].index].bitrate > | ||
346 | sband->bitrates[rinfo[j + 1].index].bitrate)) { | ||
347 | tmp = rinfo[j].index; | ||
348 | rinfo[j].index = rinfo[j + 1].index; | ||
349 | rinfo[j + 1].index = tmp; | ||
350 | rinfo[rinfo[j].index].rev_index = j; | ||
351 | rinfo[rinfo[j + 1].index].rev_index = j + 1; | ||
352 | s = 1; | ||
353 | } | ||
354 | if (!s) | ||
355 | break; | ||
356 | } | ||
357 | |||
327 | spinfo->txrate_idx = rate_lowest_index(sband, sta); | 358 | spinfo->txrate_idx = rate_lowest_index(sband, sta); |
328 | /* HACK */ | 359 | /* HACK */ |
329 | si = container_of(sta, struct sta_info, sta); | 360 | si = container_of(sta, struct sta_info, sta); |
@@ -336,21 +367,22 @@ static void *rate_control_pid_alloc(struct ieee80211_hw *hw, | |||
336 | struct rc_pid_info *pinfo; | 367 | struct rc_pid_info *pinfo; |
337 | struct rc_pid_rateinfo *rinfo; | 368 | struct rc_pid_rateinfo *rinfo; |
338 | struct ieee80211_supported_band *sband; | 369 | struct ieee80211_supported_band *sband; |
339 | int i, j, tmp; | 370 | int i, max_rates = 0; |
340 | bool s; | ||
341 | #ifdef CONFIG_MAC80211_DEBUGFS | 371 | #ifdef CONFIG_MAC80211_DEBUGFS |
342 | struct rc_pid_debugfs_entries *de; | 372 | struct rc_pid_debugfs_entries *de; |
343 | #endif | 373 | #endif |
344 | 374 | ||
345 | sband = hw->wiphy->bands[hw->conf.channel->band]; | ||
346 | |||
347 | pinfo = kmalloc(sizeof(*pinfo), GFP_ATOMIC); | 375 | pinfo = kmalloc(sizeof(*pinfo), GFP_ATOMIC); |
348 | if (!pinfo) | 376 | if (!pinfo) |
349 | return NULL; | 377 | return NULL; |
350 | 378 | ||
351 | /* We can safely assume that sband won't change unless we get | 379 | for (i = 0; i < IEEE80211_NUM_BANDS; i++) { |
352 | * reinitialized. */ | 380 | sband = hw->wiphy->bands[i]; |
353 | rinfo = kmalloc(sizeof(*rinfo) * sband->n_bitrates, GFP_ATOMIC); | 381 | if (sband && sband->n_bitrates > max_rates) |
382 | max_rates = sband->n_bitrates; | ||
383 | } | ||
384 | |||
385 | rinfo = kmalloc(sizeof(*rinfo) * max_rates, GFP_ATOMIC); | ||
354 | if (!rinfo) { | 386 | if (!rinfo) { |
355 | kfree(pinfo); | 387 | kfree(pinfo); |
356 | return NULL; | 388 | return NULL; |
@@ -368,33 +400,6 @@ static void *rate_control_pid_alloc(struct ieee80211_hw *hw, | |||
368 | pinfo->rinfo = rinfo; | 400 | pinfo->rinfo = rinfo; |
369 | pinfo->oldrate = 0; | 401 | pinfo->oldrate = 0; |
370 | 402 | ||
371 | /* Sort the rates. This is optimized for the most common case (i.e. | ||
372 | * almost-sorted CCK+OFDM rates). Kind of bubble-sort with reversed | ||
373 | * mapping too. */ | ||
374 | for (i = 0; i < sband->n_bitrates; i++) { | ||
375 | rinfo[i].index = i; | ||
376 | rinfo[i].rev_index = i; | ||
377 | if (RC_PID_FAST_START) | ||
378 | rinfo[i].diff = 0; | ||
379 | else | ||
380 | rinfo[i].diff = i * pinfo->norm_offset; | ||
381 | } | ||
382 | for (i = 1; i < sband->n_bitrates; i++) { | ||
383 | s = 0; | ||
384 | for (j = 0; j < sband->n_bitrates - i; j++) | ||
385 | if (unlikely(sband->bitrates[rinfo[j].index].bitrate > | ||
386 | sband->bitrates[rinfo[j + 1].index].bitrate)) { | ||
387 | tmp = rinfo[j].index; | ||
388 | rinfo[j].index = rinfo[j + 1].index; | ||
389 | rinfo[j + 1].index = tmp; | ||
390 | rinfo[rinfo[j].index].rev_index = j; | ||
391 | rinfo[rinfo[j + 1].index].rev_index = j + 1; | ||
392 | s = 1; | ||
393 | } | ||
394 | if (!s) | ||
395 | break; | ||
396 | } | ||
397 | |||
398 | #ifdef CONFIG_MAC80211_DEBUGFS | 403 | #ifdef CONFIG_MAC80211_DEBUGFS |
399 | de = &pinfo->dentries; | 404 | de = &pinfo->dentries; |
400 | de->target = debugfs_create_u32("target_pf", S_IRUSR | S_IWUSR, | 405 | de->target = debugfs_create_u32("target_pf", S_IRUSR | S_IWUSR, |
diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c index 3fb04a86444d..63656266d567 100644 --- a/net/mac80211/tx.c +++ b/net/mac80211/tx.c | |||
@@ -772,7 +772,7 @@ ieee80211_tx_h_fragment(struct ieee80211_tx_data *tx) | |||
772 | hdrlen = ieee80211_hdrlen(hdr->frame_control); | 772 | hdrlen = ieee80211_hdrlen(hdr->frame_control); |
773 | 773 | ||
774 | /* internal error, why is TX_FRAGMENTED set? */ | 774 | /* internal error, why is TX_FRAGMENTED set? */ |
775 | if (WARN_ON(skb->len <= frag_threshold)) | 775 | if (WARN_ON(skb->len + FCS_LEN <= frag_threshold)) |
776 | return TX_DROP; | 776 | return TX_DROP; |
777 | 777 | ||
778 | /* | 778 | /* |
diff --git a/net/netfilter/Kconfig b/net/netfilter/Kconfig index 2329c5f50551..cb3ad741ebf8 100644 --- a/net/netfilter/Kconfig +++ b/net/netfilter/Kconfig | |||
@@ -275,6 +275,8 @@ config NF_CT_NETLINK | |||
275 | help | 275 | help |
276 | This option enables support for a netlink-based userspace interface | 276 | This option enables support for a netlink-based userspace interface |
277 | 277 | ||
278 | endif # NF_CONNTRACK | ||
279 | |||
278 | # transparent proxy support | 280 | # transparent proxy support |
279 | config NETFILTER_TPROXY | 281 | config NETFILTER_TPROXY |
280 | tristate "Transparent proxying support (EXPERIMENTAL)" | 282 | tristate "Transparent proxying support (EXPERIMENTAL)" |
@@ -290,8 +292,6 @@ config NETFILTER_TPROXY | |||
290 | 292 | ||
291 | To compile it as a module, choose M here. If unsure, say N. | 293 | To compile it as a module, choose M here. If unsure, say N. |
292 | 294 | ||
293 | endif # NF_CONNTRACK | ||
294 | |||
295 | config NETFILTER_XTABLES | 295 | config NETFILTER_XTABLES |
296 | tristate "Netfilter Xtables support (required for ip_tables)" | 296 | tristate "Netfilter Xtables support (required for ip_tables)" |
297 | default m if NETFILTER_ADVANCED=n | 297 | default m if NETFILTER_ADVANCED=n |
@@ -837,6 +837,7 @@ config NETFILTER_XT_MATCH_SOCKET | |||
837 | depends on NETFILTER_TPROXY | 837 | depends on NETFILTER_TPROXY |
838 | depends on NETFILTER_XTABLES | 838 | depends on NETFILTER_XTABLES |
839 | depends on NETFILTER_ADVANCED | 839 | depends on NETFILTER_ADVANCED |
840 | depends on !NF_CONNTRACK || NF_CONNTRACK | ||
840 | select NF_DEFRAG_IPV4 | 841 | select NF_DEFRAG_IPV4 |
841 | help | 842 | help |
842 | This option adds a `socket' match, which can be used to match | 843 | This option adds a `socket' match, which can be used to match |
diff --git a/net/netfilter/ipvs/ip_vs_conn.c b/net/netfilter/ipvs/ip_vs_conn.c index 60aba45023ff..77bfdfeb966e 100644 --- a/net/netfilter/ipvs/ip_vs_conn.c +++ b/net/netfilter/ipvs/ip_vs_conn.c | |||
@@ -260,7 +260,10 @@ struct ip_vs_conn *ip_vs_ct_in_get | |||
260 | list_for_each_entry(cp, &ip_vs_conn_tab[hash], c_list) { | 260 | list_for_each_entry(cp, &ip_vs_conn_tab[hash], c_list) { |
261 | if (cp->af == af && | 261 | if (cp->af == af && |
262 | ip_vs_addr_equal(af, s_addr, &cp->caddr) && | 262 | ip_vs_addr_equal(af, s_addr, &cp->caddr) && |
263 | ip_vs_addr_equal(af, d_addr, &cp->vaddr) && | 263 | /* protocol should only be IPPROTO_IP if |
264 | * d_addr is a fwmark */ | ||
265 | ip_vs_addr_equal(protocol == IPPROTO_IP ? AF_UNSPEC : af, | ||
266 | d_addr, &cp->vaddr) && | ||
264 | s_port == cp->cport && d_port == cp->vport && | 267 | s_port == cp->cport && d_port == cp->vport && |
265 | cp->flags & IP_VS_CONN_F_TEMPLATE && | 268 | cp->flags & IP_VS_CONN_F_TEMPLATE && |
266 | protocol == cp->protocol) { | 269 | protocol == cp->protocol) { |
@@ -698,7 +701,9 @@ ip_vs_conn_new(int af, int proto, const union nf_inet_addr *caddr, __be16 cport, | |||
698 | cp->cport = cport; | 701 | cp->cport = cport; |
699 | ip_vs_addr_copy(af, &cp->vaddr, vaddr); | 702 | ip_vs_addr_copy(af, &cp->vaddr, vaddr); |
700 | cp->vport = vport; | 703 | cp->vport = vport; |
701 | ip_vs_addr_copy(af, &cp->daddr, daddr); | 704 | /* proto should only be IPPROTO_IP if d_addr is a fwmark */ |
705 | ip_vs_addr_copy(proto == IPPROTO_IP ? AF_UNSPEC : af, | ||
706 | &cp->daddr, daddr); | ||
702 | cp->dport = dport; | 707 | cp->dport = dport; |
703 | cp->flags = flags; | 708 | cp->flags = flags; |
704 | spin_lock_init(&cp->lock); | 709 | spin_lock_init(&cp->lock); |
diff --git a/net/netfilter/ipvs/ip_vs_core.c b/net/netfilter/ipvs/ip_vs_core.c index cb3e031335eb..8dddb17a947a 100644 --- a/net/netfilter/ipvs/ip_vs_core.c +++ b/net/netfilter/ipvs/ip_vs_core.c | |||
@@ -278,7 +278,7 @@ ip_vs_sched_persist(struct ip_vs_service *svc, | |||
278 | */ | 278 | */ |
279 | if (svc->fwmark) { | 279 | if (svc->fwmark) { |
280 | union nf_inet_addr fwmark = { | 280 | union nf_inet_addr fwmark = { |
281 | .all = { 0, 0, 0, htonl(svc->fwmark) } | 281 | .ip = htonl(svc->fwmark) |
282 | }; | 282 | }; |
283 | 283 | ||
284 | ct = ip_vs_ct_in_get(svc->af, IPPROTO_IP, &snet, 0, | 284 | ct = ip_vs_ct_in_get(svc->af, IPPROTO_IP, &snet, 0, |
@@ -306,7 +306,7 @@ ip_vs_sched_persist(struct ip_vs_service *svc, | |||
306 | */ | 306 | */ |
307 | if (svc->fwmark) { | 307 | if (svc->fwmark) { |
308 | union nf_inet_addr fwmark = { | 308 | union nf_inet_addr fwmark = { |
309 | .all = { 0, 0, 0, htonl(svc->fwmark) } | 309 | .ip = htonl(svc->fwmark) |
310 | }; | 310 | }; |
311 | 311 | ||
312 | ct = ip_vs_conn_new(svc->af, IPPROTO_IP, | 312 | ct = ip_vs_conn_new(svc->af, IPPROTO_IP, |
diff --git a/net/netfilter/nf_conntrack_netlink.c b/net/netfilter/nf_conntrack_netlink.c index f13fc57e1ecb..c523f0b8cee5 100644 --- a/net/netfilter/nf_conntrack_netlink.c +++ b/net/netfilter/nf_conntrack_netlink.c | |||
@@ -1186,28 +1186,6 @@ ctnetlink_change_conntrack(struct nf_conn *ct, struct nlattr *cda[]) | |||
1186 | return 0; | 1186 | return 0; |
1187 | } | 1187 | } |
1188 | 1188 | ||
1189 | static inline void | ||
1190 | ctnetlink_event_report(struct nf_conn *ct, u32 pid, int report) | ||
1191 | { | ||
1192 | unsigned int events = 0; | ||
1193 | |||
1194 | if (test_bit(IPS_EXPECTED_BIT, &ct->status)) | ||
1195 | events |= IPCT_RELATED; | ||
1196 | else | ||
1197 | events |= IPCT_NEW; | ||
1198 | |||
1199 | nf_conntrack_event_report(IPCT_STATUS | | ||
1200 | IPCT_HELPER | | ||
1201 | IPCT_REFRESH | | ||
1202 | IPCT_PROTOINFO | | ||
1203 | IPCT_NATSEQADJ | | ||
1204 | IPCT_MARK | | ||
1205 | events, | ||
1206 | ct, | ||
1207 | pid, | ||
1208 | report); | ||
1209 | } | ||
1210 | |||
1211 | static struct nf_conn * | 1189 | static struct nf_conn * |
1212 | ctnetlink_create_conntrack(struct nlattr *cda[], | 1190 | ctnetlink_create_conntrack(struct nlattr *cda[], |
1213 | struct nf_conntrack_tuple *otuple, | 1191 | struct nf_conntrack_tuple *otuple, |
@@ -1373,6 +1351,7 @@ ctnetlink_new_conntrack(struct sock *ctnl, struct sk_buff *skb, | |||
1373 | err = -ENOENT; | 1351 | err = -ENOENT; |
1374 | if (nlh->nlmsg_flags & NLM_F_CREATE) { | 1352 | if (nlh->nlmsg_flags & NLM_F_CREATE) { |
1375 | struct nf_conn *ct; | 1353 | struct nf_conn *ct; |
1354 | enum ip_conntrack_events events; | ||
1376 | 1355 | ||
1377 | ct = ctnetlink_create_conntrack(cda, &otuple, | 1356 | ct = ctnetlink_create_conntrack(cda, &otuple, |
1378 | &rtuple, u3); | 1357 | &rtuple, u3); |
@@ -1383,9 +1362,18 @@ ctnetlink_new_conntrack(struct sock *ctnl, struct sk_buff *skb, | |||
1383 | err = 0; | 1362 | err = 0; |
1384 | nf_conntrack_get(&ct->ct_general); | 1363 | nf_conntrack_get(&ct->ct_general); |
1385 | spin_unlock_bh(&nf_conntrack_lock); | 1364 | spin_unlock_bh(&nf_conntrack_lock); |
1386 | ctnetlink_event_report(ct, | 1365 | if (test_bit(IPS_EXPECTED_BIT, &ct->status)) |
1387 | NETLINK_CB(skb).pid, | 1366 | events = IPCT_RELATED; |
1388 | nlmsg_report(nlh)); | 1367 | else |
1368 | events = IPCT_NEW; | ||
1369 | |||
1370 | nf_conntrack_event_report(IPCT_STATUS | | ||
1371 | IPCT_HELPER | | ||
1372 | IPCT_PROTOINFO | | ||
1373 | IPCT_NATSEQADJ | | ||
1374 | IPCT_MARK | events, | ||
1375 | ct, NETLINK_CB(skb).pid, | ||
1376 | nlmsg_report(nlh)); | ||
1389 | nf_ct_put(ct); | 1377 | nf_ct_put(ct); |
1390 | } else | 1378 | } else |
1391 | spin_unlock_bh(&nf_conntrack_lock); | 1379 | spin_unlock_bh(&nf_conntrack_lock); |
@@ -1404,9 +1392,13 @@ ctnetlink_new_conntrack(struct sock *ctnl, struct sk_buff *skb, | |||
1404 | if (err == 0) { | 1392 | if (err == 0) { |
1405 | nf_conntrack_get(&ct->ct_general); | 1393 | nf_conntrack_get(&ct->ct_general); |
1406 | spin_unlock_bh(&nf_conntrack_lock); | 1394 | spin_unlock_bh(&nf_conntrack_lock); |
1407 | ctnetlink_event_report(ct, | 1395 | nf_conntrack_event_report(IPCT_STATUS | |
1408 | NETLINK_CB(skb).pid, | 1396 | IPCT_HELPER | |
1409 | nlmsg_report(nlh)); | 1397 | IPCT_PROTOINFO | |
1398 | IPCT_NATSEQADJ | | ||
1399 | IPCT_MARK, | ||
1400 | ct, NETLINK_CB(skb).pid, | ||
1401 | nlmsg_report(nlh)); | ||
1410 | nf_ct_put(ct); | 1402 | nf_ct_put(ct); |
1411 | } else | 1403 | } else |
1412 | spin_unlock_bh(&nf_conntrack_lock); | 1404 | spin_unlock_bh(&nf_conntrack_lock); |
diff --git a/net/netfilter/nf_conntrack_proto_dccp.c b/net/netfilter/nf_conntrack_proto_dccp.c index 50dac8dbe7d8..8e757dd53396 100644 --- a/net/netfilter/nf_conntrack_proto_dccp.c +++ b/net/netfilter/nf_conntrack_proto_dccp.c | |||
@@ -633,6 +633,8 @@ static int dccp_to_nlattr(struct sk_buff *skb, struct nlattr *nla, | |||
633 | if (!nest_parms) | 633 | if (!nest_parms) |
634 | goto nla_put_failure; | 634 | goto nla_put_failure; |
635 | NLA_PUT_U8(skb, CTA_PROTOINFO_DCCP_STATE, ct->proto.dccp.state); | 635 | NLA_PUT_U8(skb, CTA_PROTOINFO_DCCP_STATE, ct->proto.dccp.state); |
636 | NLA_PUT_U8(skb, CTA_PROTOINFO_DCCP_ROLE, | ||
637 | ct->proto.dccp.role[IP_CT_DIR_ORIGINAL]); | ||
636 | nla_nest_end(skb, nest_parms); | 638 | nla_nest_end(skb, nest_parms); |
637 | read_unlock_bh(&dccp_lock); | 639 | read_unlock_bh(&dccp_lock); |
638 | return 0; | 640 | return 0; |
@@ -644,6 +646,7 @@ nla_put_failure: | |||
644 | 646 | ||
645 | static const struct nla_policy dccp_nla_policy[CTA_PROTOINFO_DCCP_MAX + 1] = { | 647 | static const struct nla_policy dccp_nla_policy[CTA_PROTOINFO_DCCP_MAX + 1] = { |
646 | [CTA_PROTOINFO_DCCP_STATE] = { .type = NLA_U8 }, | 648 | [CTA_PROTOINFO_DCCP_STATE] = { .type = NLA_U8 }, |
649 | [CTA_PROTOINFO_DCCP_ROLE] = { .type = NLA_U8 }, | ||
647 | }; | 650 | }; |
648 | 651 | ||
649 | static int nlattr_to_dccp(struct nlattr *cda[], struct nf_conn *ct) | 652 | static int nlattr_to_dccp(struct nlattr *cda[], struct nf_conn *ct) |
@@ -661,11 +664,21 @@ static int nlattr_to_dccp(struct nlattr *cda[], struct nf_conn *ct) | |||
661 | return err; | 664 | return err; |
662 | 665 | ||
663 | if (!tb[CTA_PROTOINFO_DCCP_STATE] || | 666 | if (!tb[CTA_PROTOINFO_DCCP_STATE] || |
664 | nla_get_u8(tb[CTA_PROTOINFO_DCCP_STATE]) >= CT_DCCP_IGNORE) | 667 | !tb[CTA_PROTOINFO_DCCP_ROLE] || |
668 | nla_get_u8(tb[CTA_PROTOINFO_DCCP_ROLE]) > CT_DCCP_ROLE_MAX || | ||
669 | nla_get_u8(tb[CTA_PROTOINFO_DCCP_STATE]) >= CT_DCCP_IGNORE) { | ||
665 | return -EINVAL; | 670 | return -EINVAL; |
671 | } | ||
666 | 672 | ||
667 | write_lock_bh(&dccp_lock); | 673 | write_lock_bh(&dccp_lock); |
668 | ct->proto.dccp.state = nla_get_u8(tb[CTA_PROTOINFO_DCCP_STATE]); | 674 | ct->proto.dccp.state = nla_get_u8(tb[CTA_PROTOINFO_DCCP_STATE]); |
675 | if (nla_get_u8(tb[CTA_PROTOINFO_DCCP_ROLE]) == CT_DCCP_ROLE_CLIENT) { | ||
676 | ct->proto.dccp.role[IP_CT_DIR_ORIGINAL] = CT_DCCP_ROLE_CLIENT; | ||
677 | ct->proto.dccp.role[IP_CT_DIR_REPLY] = CT_DCCP_ROLE_SERVER; | ||
678 | } else { | ||
679 | ct->proto.dccp.role[IP_CT_DIR_ORIGINAL] = CT_DCCP_ROLE_SERVER; | ||
680 | ct->proto.dccp.role[IP_CT_DIR_REPLY] = CT_DCCP_ROLE_CLIENT; | ||
681 | } | ||
669 | write_unlock_bh(&dccp_lock); | 682 | write_unlock_bh(&dccp_lock); |
670 | return 0; | 683 | return 0; |
671 | } | 684 | } |
@@ -777,6 +790,7 @@ static struct nf_conntrack_l4proto dccp_proto6 __read_mostly = { | |||
777 | .print_conntrack = dccp_print_conntrack, | 790 | .print_conntrack = dccp_print_conntrack, |
778 | #if defined(CONFIG_NF_CT_NETLINK) || defined(CONFIG_NF_CT_NETLINK_MODULE) | 791 | #if defined(CONFIG_NF_CT_NETLINK) || defined(CONFIG_NF_CT_NETLINK_MODULE) |
779 | .to_nlattr = dccp_to_nlattr, | 792 | .to_nlattr = dccp_to_nlattr, |
793 | .nlattr_size = dccp_nlattr_size, | ||
780 | .from_nlattr = nlattr_to_dccp, | 794 | .from_nlattr = nlattr_to_dccp, |
781 | .tuple_to_nlattr = nf_ct_port_tuple_to_nlattr, | 795 | .tuple_to_nlattr = nf_ct_port_tuple_to_nlattr, |
782 | .nlattr_tuple_size = nf_ct_port_nlattr_tuple_size, | 796 | .nlattr_tuple_size = nf_ct_port_nlattr_tuple_size, |
diff --git a/net/netfilter/nf_conntrack_proto_udplite.c b/net/netfilter/nf_conntrack_proto_udplite.c index 4614696c1b88..0badedc542d3 100644 --- a/net/netfilter/nf_conntrack_proto_udplite.c +++ b/net/netfilter/nf_conntrack_proto_udplite.c | |||
@@ -204,6 +204,7 @@ static struct nf_conntrack_l4proto nf_conntrack_l4proto_udplite6 __read_mostly = | |||
204 | .error = udplite_error, | 204 | .error = udplite_error, |
205 | #if defined(CONFIG_NF_CT_NETLINK) || defined(CONFIG_NF_CT_NETLINK_MODULE) | 205 | #if defined(CONFIG_NF_CT_NETLINK) || defined(CONFIG_NF_CT_NETLINK_MODULE) |
206 | .tuple_to_nlattr = nf_ct_port_tuple_to_nlattr, | 206 | .tuple_to_nlattr = nf_ct_port_tuple_to_nlattr, |
207 | .nlattr_tuple_size = nf_ct_port_nlattr_tuple_size, | ||
207 | .nlattr_to_tuple = nf_ct_port_nlattr_to_tuple, | 208 | .nlattr_to_tuple = nf_ct_port_nlattr_to_tuple, |
208 | .nla_policy = nf_ct_port_nla_policy, | 209 | .nla_policy = nf_ct_port_nla_policy, |
209 | #endif | 210 | #endif |
diff --git a/net/netfilter/x_tables.c b/net/netfilter/x_tables.c index 509a95621f9f..150e5cf62f85 100644 --- a/net/netfilter/x_tables.c +++ b/net/netfilter/x_tables.c | |||
@@ -625,20 +625,6 @@ void xt_free_table_info(struct xt_table_info *info) | |||
625 | } | 625 | } |
626 | EXPORT_SYMBOL(xt_free_table_info); | 626 | EXPORT_SYMBOL(xt_free_table_info); |
627 | 627 | ||
628 | void xt_table_entry_swap_rcu(struct xt_table_info *oldinfo, | ||
629 | struct xt_table_info *newinfo) | ||
630 | { | ||
631 | unsigned int cpu; | ||
632 | |||
633 | for_each_possible_cpu(cpu) { | ||
634 | void *p = oldinfo->entries[cpu]; | ||
635 | rcu_assign_pointer(oldinfo->entries[cpu], newinfo->entries[cpu]); | ||
636 | newinfo->entries[cpu] = p; | ||
637 | } | ||
638 | |||
639 | } | ||
640 | EXPORT_SYMBOL_GPL(xt_table_entry_swap_rcu); | ||
641 | |||
642 | /* Find table by name, grabs mutex & ref. Returns ERR_PTR() on error. */ | 628 | /* Find table by name, grabs mutex & ref. Returns ERR_PTR() on error. */ |
643 | struct xt_table *xt_find_table_lock(struct net *net, u_int8_t af, | 629 | struct xt_table *xt_find_table_lock(struct net *net, u_int8_t af, |
644 | const char *name) | 630 | const char *name) |
@@ -676,32 +662,43 @@ void xt_compat_unlock(u_int8_t af) | |||
676 | EXPORT_SYMBOL_GPL(xt_compat_unlock); | 662 | EXPORT_SYMBOL_GPL(xt_compat_unlock); |
677 | #endif | 663 | #endif |
678 | 664 | ||
665 | DEFINE_PER_CPU(struct xt_info_lock, xt_info_locks); | ||
666 | EXPORT_PER_CPU_SYMBOL_GPL(xt_info_locks); | ||
667 | |||
668 | |||
679 | struct xt_table_info * | 669 | struct xt_table_info * |
680 | xt_replace_table(struct xt_table *table, | 670 | xt_replace_table(struct xt_table *table, |
681 | unsigned int num_counters, | 671 | unsigned int num_counters, |
682 | struct xt_table_info *newinfo, | 672 | struct xt_table_info *newinfo, |
683 | int *error) | 673 | int *error) |
684 | { | 674 | { |
685 | struct xt_table_info *oldinfo, *private; | 675 | struct xt_table_info *private; |
686 | 676 | ||
687 | /* Do the substitution. */ | 677 | /* Do the substitution. */ |
688 | mutex_lock(&table->lock); | 678 | local_bh_disable(); |
689 | private = table->private; | 679 | private = table->private; |
680 | |||
690 | /* Check inside lock: is the old number correct? */ | 681 | /* Check inside lock: is the old number correct? */ |
691 | if (num_counters != private->number) { | 682 | if (num_counters != private->number) { |
692 | duprintf("num_counters != table->private->number (%u/%u)\n", | 683 | duprintf("num_counters != table->private->number (%u/%u)\n", |
693 | num_counters, private->number); | 684 | num_counters, private->number); |
694 | mutex_unlock(&table->lock); | 685 | local_bh_enable(); |
695 | *error = -EAGAIN; | 686 | *error = -EAGAIN; |
696 | return NULL; | 687 | return NULL; |
697 | } | 688 | } |
698 | oldinfo = private; | ||
699 | rcu_assign_pointer(table->private, newinfo); | ||
700 | newinfo->initial_entries = oldinfo->initial_entries; | ||
701 | mutex_unlock(&table->lock); | ||
702 | 689 | ||
703 | synchronize_net(); | 690 | table->private = newinfo; |
704 | return oldinfo; | 691 | newinfo->initial_entries = private->initial_entries; |
692 | |||
693 | /* | ||
694 | * Even though table entries have now been swapped, other CPU's | ||
695 | * may still be using the old entries. This is okay, because | ||
696 | * resynchronization happens because of the locking done | ||
697 | * during the get_counters() routine. | ||
698 | */ | ||
699 | local_bh_enable(); | ||
700 | |||
701 | return private; | ||
705 | } | 702 | } |
706 | EXPORT_SYMBOL_GPL(xt_replace_table); | 703 | EXPORT_SYMBOL_GPL(xt_replace_table); |
707 | 704 | ||
@@ -734,7 +731,6 @@ struct xt_table *xt_register_table(struct net *net, struct xt_table *table, | |||
734 | 731 | ||
735 | /* Simplifies replace_table code. */ | 732 | /* Simplifies replace_table code. */ |
736 | table->private = bootstrap; | 733 | table->private = bootstrap; |
737 | mutex_init(&table->lock); | ||
738 | 734 | ||
739 | if (!xt_replace_table(table, 0, newinfo, &ret)) | 735 | if (!xt_replace_table(table, 0, newinfo, &ret)) |
740 | goto unlock; | 736 | goto unlock; |
@@ -1147,7 +1143,14 @@ static struct pernet_operations xt_net_ops = { | |||
1147 | 1143 | ||
1148 | static int __init xt_init(void) | 1144 | static int __init xt_init(void) |
1149 | { | 1145 | { |
1150 | int i, rv; | 1146 | unsigned int i; |
1147 | int rv; | ||
1148 | |||
1149 | for_each_possible_cpu(i) { | ||
1150 | struct xt_info_lock *lock = &per_cpu(xt_info_locks, i); | ||
1151 | spin_lock_init(&lock->lock); | ||
1152 | lock->readers = 0; | ||
1153 | } | ||
1151 | 1154 | ||
1152 | xt = kmalloc(sizeof(struct xt_af) * NFPROTO_NUMPROTO, GFP_KERNEL); | 1155 | xt = kmalloc(sizeof(struct xt_af) * NFPROTO_NUMPROTO, GFP_KERNEL); |
1153 | if (!xt) | 1156 | if (!xt) |
diff --git a/net/netfilter/xt_cluster.c b/net/netfilter/xt_cluster.c index 6c4847662b85..69a639f35403 100644 --- a/net/netfilter/xt_cluster.c +++ b/net/netfilter/xt_cluster.c | |||
@@ -135,7 +135,13 @@ static bool xt_cluster_mt_checkentry(const struct xt_mtchk_param *par) | |||
135 | { | 135 | { |
136 | struct xt_cluster_match_info *info = par->matchinfo; | 136 | struct xt_cluster_match_info *info = par->matchinfo; |
137 | 137 | ||
138 | if (info->node_mask >= (1 << info->total_nodes)) { | 138 | if (info->total_nodes > XT_CLUSTER_NODES_MAX) { |
139 | printk(KERN_ERR "xt_cluster: you have exceeded the maximum " | ||
140 | "number of cluster nodes (%u > %u)\n", | ||
141 | info->total_nodes, XT_CLUSTER_NODES_MAX); | ||
142 | return false; | ||
143 | } | ||
144 | if (info->node_mask >= (1ULL << info->total_nodes)) { | ||
139 | printk(KERN_ERR "xt_cluster: this node mask cannot be " | 145 | printk(KERN_ERR "xt_cluster: this node mask cannot be " |
140 | "higher than the total number of nodes\n"); | 146 | "higher than the total number of nodes\n"); |
141 | return false; | 147 | return false; |
diff --git a/net/netfilter/xt_recent.c b/net/netfilter/xt_recent.c index 791e030ea903..eb0ceb846527 100644 --- a/net/netfilter/xt_recent.c +++ b/net/netfilter/xt_recent.c | |||
@@ -474,7 +474,7 @@ static ssize_t recent_old_proc_write(struct file *file, | |||
474 | struct recent_table *t = pde->data; | 474 | struct recent_table *t = pde->data; |
475 | struct recent_entry *e; | 475 | struct recent_entry *e; |
476 | char buf[sizeof("+255.255.255.255")], *c = buf; | 476 | char buf[sizeof("+255.255.255.255")], *c = buf; |
477 | __be32 addr; | 477 | union nf_inet_addr addr = {}; |
478 | int add; | 478 | int add; |
479 | 479 | ||
480 | if (size > sizeof(buf)) | 480 | if (size > sizeof(buf)) |
@@ -506,14 +506,13 @@ static ssize_t recent_old_proc_write(struct file *file, | |||
506 | add = 1; | 506 | add = 1; |
507 | break; | 507 | break; |
508 | } | 508 | } |
509 | addr = in_aton(c); | 509 | addr.ip = in_aton(c); |
510 | 510 | ||
511 | spin_lock_bh(&recent_lock); | 511 | spin_lock_bh(&recent_lock); |
512 | e = recent_entry_lookup(t, (const void *)&addr, NFPROTO_IPV4, 0); | 512 | e = recent_entry_lookup(t, &addr, NFPROTO_IPV4, 0); |
513 | if (e == NULL) { | 513 | if (e == NULL) { |
514 | if (add) | 514 | if (add) |
515 | recent_entry_init(t, (const void *)&addr, | 515 | recent_entry_init(t, &addr, NFPROTO_IPV4, 0); |
516 | NFPROTO_IPV4, 0); | ||
517 | } else { | 516 | } else { |
518 | if (add) | 517 | if (add) |
519 | recent_entry_update(t, e); | 518 | recent_entry_update(t, e); |
diff --git a/net/sched/cls_api.c b/net/sched/cls_api.c index 173fcc4b050d..0759f32e9dca 100644 --- a/net/sched/cls_api.c +++ b/net/sched/cls_api.c | |||
@@ -254,7 +254,7 @@ replay: | |||
254 | } | 254 | } |
255 | tp->ops = tp_ops; | 255 | tp->ops = tp_ops; |
256 | tp->protocol = protocol; | 256 | tp->protocol = protocol; |
257 | tp->prio = nprio ? : tcf_auto_prio(*back); | 257 | tp->prio = nprio ? : TC_H_MAJ(tcf_auto_prio(*back)); |
258 | tp->q = q; | 258 | tp->q = q; |
259 | tp->classify = tp_ops->classify; | 259 | tp->classify = tp_ops->classify; |
260 | tp->classid = parent; | 260 | tp->classid = parent; |
diff --git a/net/sched/sch_fifo.c b/net/sched/sch_fifo.c index 92cfc9d7e3b9..69188e8358b4 100644 --- a/net/sched/sch_fifo.c +++ b/net/sched/sch_fifo.c | |||
@@ -51,7 +51,7 @@ static int fifo_init(struct Qdisc *sch, struct nlattr *opt) | |||
51 | u32 limit = qdisc_dev(sch)->tx_queue_len ? : 1; | 51 | u32 limit = qdisc_dev(sch)->tx_queue_len ? : 1; |
52 | 52 | ||
53 | if (sch->ops == &bfifo_qdisc_ops) | 53 | if (sch->ops == &bfifo_qdisc_ops) |
54 | limit *= qdisc_dev(sch)->mtu; | 54 | limit *= psched_mtu(qdisc_dev(sch)); |
55 | 55 | ||
56 | q->limit = limit; | 56 | q->limit = limit; |
57 | } else { | 57 | } else { |
diff --git a/net/sunrpc/xprt.c b/net/sunrpc/xprt.c index a0bfe53f1621..06ca058572f2 100644 --- a/net/sunrpc/xprt.c +++ b/net/sunrpc/xprt.c | |||
@@ -672,10 +672,8 @@ xprt_init_autodisconnect(unsigned long data) | |||
672 | if (test_and_set_bit(XPRT_LOCKED, &xprt->state)) | 672 | if (test_and_set_bit(XPRT_LOCKED, &xprt->state)) |
673 | goto out_abort; | 673 | goto out_abort; |
674 | spin_unlock(&xprt->transport_lock); | 674 | spin_unlock(&xprt->transport_lock); |
675 | if (xprt_connecting(xprt)) | 675 | set_bit(XPRT_CONNECTION_CLOSE, &xprt->state); |
676 | xprt_release_write(xprt, NULL); | 676 | queue_work(rpciod_workqueue, &xprt->task_cleanup); |
677 | else | ||
678 | queue_work(rpciod_workqueue, &xprt->task_cleanup); | ||
679 | return; | 677 | return; |
680 | out_abort: | 678 | out_abort: |
681 | spin_unlock(&xprt->transport_lock); | 679 | spin_unlock(&xprt->transport_lock); |
diff --git a/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c b/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c index 629a28764da9..42a6f9f20285 100644 --- a/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c +++ b/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c | |||
@@ -265,7 +265,7 @@ static int fast_reg_read_chunks(struct svcxprt_rdma *xprt, | |||
265 | frmr->page_list->page_list[page_no] = | 265 | frmr->page_list->page_list[page_no] = |
266 | ib_dma_map_single(xprt->sc_cm_id->device, | 266 | ib_dma_map_single(xprt->sc_cm_id->device, |
267 | page_address(rqstp->rq_arg.pages[page_no]), | 267 | page_address(rqstp->rq_arg.pages[page_no]), |
268 | PAGE_SIZE, DMA_TO_DEVICE); | 268 | PAGE_SIZE, DMA_FROM_DEVICE); |
269 | if (ib_dma_mapping_error(xprt->sc_cm_id->device, | 269 | if (ib_dma_mapping_error(xprt->sc_cm_id->device, |
270 | frmr->page_list->page_list[page_no])) | 270 | frmr->page_list->page_list[page_no])) |
271 | goto fatal_err; | 271 | goto fatal_err; |
diff --git a/net/sunrpc/xprtrdma/svc_rdma_sendto.c b/net/sunrpc/xprtrdma/svc_rdma_sendto.c index 6c26a675435a..8b510c5e8777 100644 --- a/net/sunrpc/xprtrdma/svc_rdma_sendto.c +++ b/net/sunrpc/xprtrdma/svc_rdma_sendto.c | |||
@@ -183,6 +183,7 @@ static int fast_reg_xdr(struct svcxprt_rdma *xprt, | |||
183 | 183 | ||
184 | fatal_err: | 184 | fatal_err: |
185 | printk("svcrdma: Error fast registering memory for xprt %p\n", xprt); | 185 | printk("svcrdma: Error fast registering memory for xprt %p\n", xprt); |
186 | vec->frmr = NULL; | ||
186 | svc_rdma_put_frmr(xprt, frmr); | 187 | svc_rdma_put_frmr(xprt, frmr); |
187 | return -EIO; | 188 | return -EIO; |
188 | } | 189 | } |
@@ -516,6 +517,7 @@ static int send_reply(struct svcxprt_rdma *rdma, | |||
516 | "svcrdma: could not post a receive buffer, err=%d." | 517 | "svcrdma: could not post a receive buffer, err=%d." |
517 | "Closing transport %p.\n", ret, rdma); | 518 | "Closing transport %p.\n", ret, rdma); |
518 | set_bit(XPT_CLOSE, &rdma->sc_xprt.xpt_flags); | 519 | set_bit(XPT_CLOSE, &rdma->sc_xprt.xpt_flags); |
520 | svc_rdma_put_frmr(rdma, vec->frmr); | ||
519 | svc_rdma_put_context(ctxt, 0); | 521 | svc_rdma_put_context(ctxt, 0); |
520 | return -ENOTCONN; | 522 | return -ENOTCONN; |
521 | } | 523 | } |
@@ -606,6 +608,7 @@ static int send_reply(struct svcxprt_rdma *rdma, | |||
606 | return 0; | 608 | return 0; |
607 | 609 | ||
608 | err: | 610 | err: |
611 | svc_rdma_unmap_dma(ctxt); | ||
609 | svc_rdma_put_frmr(rdma, vec->frmr); | 612 | svc_rdma_put_frmr(rdma, vec->frmr); |
610 | svc_rdma_put_context(ctxt, 1); | 613 | svc_rdma_put_context(ctxt, 1); |
611 | return -EIO; | 614 | return -EIO; |
diff --git a/net/sunrpc/xprtrdma/svc_rdma_transport.c b/net/sunrpc/xprtrdma/svc_rdma_transport.c index 3d810e7df3fb..4b0c2fa15e0b 100644 --- a/net/sunrpc/xprtrdma/svc_rdma_transport.c +++ b/net/sunrpc/xprtrdma/svc_rdma_transport.c | |||
@@ -520,8 +520,9 @@ int svc_rdma_post_recv(struct svcxprt_rdma *xprt) | |||
520 | svc_xprt_get(&xprt->sc_xprt); | 520 | svc_xprt_get(&xprt->sc_xprt); |
521 | ret = ib_post_recv(xprt->sc_qp, &recv_wr, &bad_recv_wr); | 521 | ret = ib_post_recv(xprt->sc_qp, &recv_wr, &bad_recv_wr); |
522 | if (ret) { | 522 | if (ret) { |
523 | svc_xprt_put(&xprt->sc_xprt); | 523 | svc_rdma_unmap_dma(ctxt); |
524 | svc_rdma_put_context(ctxt, 1); | 524 | svc_rdma_put_context(ctxt, 1); |
525 | svc_xprt_put(&xprt->sc_xprt); | ||
525 | } | 526 | } |
526 | return ret; | 527 | return ret; |
527 | 528 | ||
diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c index d40ff50887aa..e18596146013 100644 --- a/net/sunrpc/xprtsock.c +++ b/net/sunrpc/xprtsock.c | |||
@@ -807,6 +807,9 @@ static void xs_reset_transport(struct sock_xprt *transport) | |||
807 | * | 807 | * |
808 | * This is used when all requests are complete; ie, no DRC state remains | 808 | * This is used when all requests are complete; ie, no DRC state remains |
809 | * on the server we want to save. | 809 | * on the server we want to save. |
810 | * | ||
811 | * The caller _must_ be holding XPRT_LOCKED in order to avoid issues with | ||
812 | * xs_reset_transport() zeroing the socket from underneath a writer. | ||
810 | */ | 813 | */ |
811 | static void xs_close(struct rpc_xprt *xprt) | 814 | static void xs_close(struct rpc_xprt *xprt) |
812 | { | 815 | { |
@@ -824,6 +827,14 @@ static void xs_close(struct rpc_xprt *xprt) | |||
824 | xprt_disconnect_done(xprt); | 827 | xprt_disconnect_done(xprt); |
825 | } | 828 | } |
826 | 829 | ||
830 | static void xs_tcp_close(struct rpc_xprt *xprt) | ||
831 | { | ||
832 | if (test_and_clear_bit(XPRT_CONNECTION_CLOSE, &xprt->state)) | ||
833 | xs_close(xprt); | ||
834 | else | ||
835 | xs_tcp_shutdown(xprt); | ||
836 | } | ||
837 | |||
827 | /** | 838 | /** |
828 | * xs_destroy - prepare to shutdown a transport | 839 | * xs_destroy - prepare to shutdown a transport |
829 | * @xprt: doomed transport | 840 | * @xprt: doomed transport |
@@ -1772,6 +1783,15 @@ static void xs_tcp_setup_socket(struct rpc_xprt *xprt, | |||
1772 | xprt, -status, xprt_connected(xprt), | 1783 | xprt, -status, xprt_connected(xprt), |
1773 | sock->sk->sk_state); | 1784 | sock->sk->sk_state); |
1774 | switch (status) { | 1785 | switch (status) { |
1786 | default: | ||
1787 | printk("%s: connect returned unhandled error %d\n", | ||
1788 | __func__, status); | ||
1789 | case -EADDRNOTAVAIL: | ||
1790 | /* We're probably in TIME_WAIT. Get rid of existing socket, | ||
1791 | * and retry | ||
1792 | */ | ||
1793 | set_bit(XPRT_CONNECTION_CLOSE, &xprt->state); | ||
1794 | xprt_force_disconnect(xprt); | ||
1775 | case -ECONNREFUSED: | 1795 | case -ECONNREFUSED: |
1776 | case -ECONNRESET: | 1796 | case -ECONNRESET: |
1777 | case -ENETUNREACH: | 1797 | case -ENETUNREACH: |
@@ -1782,10 +1802,6 @@ static void xs_tcp_setup_socket(struct rpc_xprt *xprt, | |||
1782 | xprt_clear_connecting(xprt); | 1802 | xprt_clear_connecting(xprt); |
1783 | return; | 1803 | return; |
1784 | } | 1804 | } |
1785 | /* get rid of existing socket, and retry */ | ||
1786 | xs_tcp_shutdown(xprt); | ||
1787 | printk("%s: connect returned unhandled error %d\n", | ||
1788 | __func__, status); | ||
1789 | out_eagain: | 1805 | out_eagain: |
1790 | status = -EAGAIN; | 1806 | status = -EAGAIN; |
1791 | out: | 1807 | out: |
@@ -1994,7 +2010,7 @@ static struct rpc_xprt_ops xs_tcp_ops = { | |||
1994 | .buf_free = rpc_free, | 2010 | .buf_free = rpc_free, |
1995 | .send_request = xs_tcp_send_request, | 2011 | .send_request = xs_tcp_send_request, |
1996 | .set_retrans_timeout = xprt_set_retrans_timeout_def, | 2012 | .set_retrans_timeout = xprt_set_retrans_timeout_def, |
1997 | .close = xs_tcp_shutdown, | 2013 | .close = xs_tcp_close, |
1998 | .destroy = xs_destroy, | 2014 | .destroy = xs_destroy, |
1999 | .print_stats = xs_tcp_print_stats, | 2015 | .print_stats = xs_tcp_print_stats, |
2000 | }; | 2016 | }; |
diff --git a/net/wimax/op-msg.c b/net/wimax/op-msg.c index 5d149c1b5f0d..9ad4d893a566 100644 --- a/net/wimax/op-msg.c +++ b/net/wimax/op-msg.c | |||
@@ -149,7 +149,8 @@ struct sk_buff *wimax_msg_alloc(struct wimax_dev *wimax_dev, | |||
149 | } | 149 | } |
150 | result = nla_put(skb, WIMAX_GNL_MSG_DATA, size, msg); | 150 | result = nla_put(skb, WIMAX_GNL_MSG_DATA, size, msg); |
151 | if (result < 0) { | 151 | if (result < 0) { |
152 | dev_err(dev, "no memory to add payload in attribute\n"); | 152 | dev_err(dev, "no memory to add payload (msg %p size %zu) in " |
153 | "attribute: %d\n", msg, size, result); | ||
153 | goto error_nla_put; | 154 | goto error_nla_put; |
154 | } | 155 | } |
155 | genlmsg_end(skb, genl_msg); | 156 | genlmsg_end(skb, genl_msg); |
@@ -299,10 +300,10 @@ int wimax_msg(struct wimax_dev *wimax_dev, const char *pipe_name, | |||
299 | struct sk_buff *skb; | 300 | struct sk_buff *skb; |
300 | 301 | ||
301 | skb = wimax_msg_alloc(wimax_dev, pipe_name, buf, size, gfp_flags); | 302 | skb = wimax_msg_alloc(wimax_dev, pipe_name, buf, size, gfp_flags); |
302 | if (skb == NULL) | 303 | if (IS_ERR(skb)) |
303 | goto error_msg_new; | 304 | result = PTR_ERR(skb); |
304 | result = wimax_msg_send(wimax_dev, skb); | 305 | else |
305 | error_msg_new: | 306 | result = wimax_msg_send(wimax_dev, skb); |
306 | return result; | 307 | return result; |
307 | } | 308 | } |
308 | EXPORT_SYMBOL_GPL(wimax_msg); | 309 | EXPORT_SYMBOL_GPL(wimax_msg); |
diff --git a/net/wimax/stack.c b/net/wimax/stack.c index a0ee76b52510..933e1422b09f 100644 --- a/net/wimax/stack.c +++ b/net/wimax/stack.c | |||
@@ -338,8 +338,21 @@ out: | |||
338 | */ | 338 | */ |
339 | void wimax_state_change(struct wimax_dev *wimax_dev, enum wimax_st new_state) | 339 | void wimax_state_change(struct wimax_dev *wimax_dev, enum wimax_st new_state) |
340 | { | 340 | { |
341 | /* | ||
342 | * A driver cannot take the wimax_dev out of the | ||
343 | * __WIMAX_ST_NULL state unless by calling wimax_dev_add(). If | ||
344 | * the wimax_dev's state is still NULL, we ignore any request | ||
345 | * to change its state because it means it hasn't been yet | ||
346 | * registered. | ||
347 | * | ||
348 | * There is no need to complain about it, as routines that | ||
349 | * call this might be shared from different code paths that | ||
350 | * are called before or after wimax_dev_add() has done its | ||
351 | * job. | ||
352 | */ | ||
341 | mutex_lock(&wimax_dev->mutex); | 353 | mutex_lock(&wimax_dev->mutex); |
342 | __wimax_state_change(wimax_dev, new_state); | 354 | if (wimax_dev->state > __WIMAX_ST_NULL) |
355 | __wimax_state_change(wimax_dev, new_state); | ||
343 | mutex_unlock(&wimax_dev->mutex); | 356 | mutex_unlock(&wimax_dev->mutex); |
344 | return; | 357 | return; |
345 | } | 358 | } |
@@ -376,7 +389,7 @@ EXPORT_SYMBOL_GPL(wimax_state_get); | |||
376 | void wimax_dev_init(struct wimax_dev *wimax_dev) | 389 | void wimax_dev_init(struct wimax_dev *wimax_dev) |
377 | { | 390 | { |
378 | INIT_LIST_HEAD(&wimax_dev->id_table_node); | 391 | INIT_LIST_HEAD(&wimax_dev->id_table_node); |
379 | __wimax_state_set(wimax_dev, WIMAX_ST_UNINITIALIZED); | 392 | __wimax_state_set(wimax_dev, __WIMAX_ST_NULL); |
380 | mutex_init(&wimax_dev->mutex); | 393 | mutex_init(&wimax_dev->mutex); |
381 | mutex_init(&wimax_dev->mutex_reset); | 394 | mutex_init(&wimax_dev->mutex_reset); |
382 | } | 395 | } |
diff --git a/net/wireless/reg.c b/net/wireless/reg.c index 6c1993d99902..08265ca15785 100644 --- a/net/wireless/reg.c +++ b/net/wireless/reg.c | |||
@@ -907,6 +907,7 @@ EXPORT_SYMBOL(freq_reg_info); | |||
907 | int freq_reg_info(struct wiphy *wiphy, u32 center_freq, u32 *bandwidth, | 907 | int freq_reg_info(struct wiphy *wiphy, u32 center_freq, u32 *bandwidth, |
908 | const struct ieee80211_reg_rule **reg_rule) | 908 | const struct ieee80211_reg_rule **reg_rule) |
909 | { | 909 | { |
910 | assert_cfg80211_lock(); | ||
910 | return freq_reg_info_regd(wiphy, center_freq, | 911 | return freq_reg_info_regd(wiphy, center_freq, |
911 | bandwidth, reg_rule, NULL); | 912 | bandwidth, reg_rule, NULL); |
912 | } | 913 | } |
@@ -1133,7 +1134,8 @@ static bool reg_is_world_roaming(struct wiphy *wiphy) | |||
1133 | if (is_world_regdom(cfg80211_regdomain->alpha2) || | 1134 | if (is_world_regdom(cfg80211_regdomain->alpha2) || |
1134 | (wiphy->regd && is_world_regdom(wiphy->regd->alpha2))) | 1135 | (wiphy->regd && is_world_regdom(wiphy->regd->alpha2))) |
1135 | return true; | 1136 | return true; |
1136 | if (last_request->initiator != NL80211_REGDOM_SET_BY_COUNTRY_IE && | 1137 | if (last_request && |
1138 | last_request->initiator != NL80211_REGDOM_SET_BY_COUNTRY_IE && | ||
1137 | wiphy->custom_regulatory) | 1139 | wiphy->custom_regulatory) |
1138 | return true; | 1140 | return true; |
1139 | return false; | 1141 | return false; |
@@ -1142,6 +1144,12 @@ static bool reg_is_world_roaming(struct wiphy *wiphy) | |||
1142 | /* Reap the advantages of previously found beacons */ | 1144 | /* Reap the advantages of previously found beacons */ |
1143 | static void reg_process_beacons(struct wiphy *wiphy) | 1145 | static void reg_process_beacons(struct wiphy *wiphy) |
1144 | { | 1146 | { |
1147 | /* | ||
1148 | * Means we are just firing up cfg80211, so no beacons would | ||
1149 | * have been processed yet. | ||
1150 | */ | ||
1151 | if (!last_request) | ||
1152 | return; | ||
1145 | if (!reg_is_world_roaming(wiphy)) | 1153 | if (!reg_is_world_roaming(wiphy)) |
1146 | return; | 1154 | return; |
1147 | wiphy_update_beacon_reg(wiphy); | 1155 | wiphy_update_beacon_reg(wiphy); |
@@ -1176,6 +1184,8 @@ static void handle_channel_custom(struct wiphy *wiphy, | |||
1176 | struct ieee80211_supported_band *sband; | 1184 | struct ieee80211_supported_band *sband; |
1177 | struct ieee80211_channel *chan; | 1185 | struct ieee80211_channel *chan; |
1178 | 1186 | ||
1187 | assert_cfg80211_lock(); | ||
1188 | |||
1179 | sband = wiphy->bands[band]; | 1189 | sband = wiphy->bands[band]; |
1180 | BUG_ON(chan_idx >= sband->n_channels); | 1190 | BUG_ON(chan_idx >= sband->n_channels); |
1181 | chan = &sband->channels[chan_idx]; | 1191 | chan = &sband->channels[chan_idx]; |
@@ -1214,10 +1224,13 @@ void wiphy_apply_custom_regulatory(struct wiphy *wiphy, | |||
1214 | const struct ieee80211_regdomain *regd) | 1224 | const struct ieee80211_regdomain *regd) |
1215 | { | 1225 | { |
1216 | enum ieee80211_band band; | 1226 | enum ieee80211_band band; |
1227 | |||
1228 | mutex_lock(&cfg80211_mutex); | ||
1217 | for (band = 0; band < IEEE80211_NUM_BANDS; band++) { | 1229 | for (band = 0; band < IEEE80211_NUM_BANDS; band++) { |
1218 | if (wiphy->bands[band]) | 1230 | if (wiphy->bands[band]) |
1219 | handle_band_custom(wiphy, band, regd); | 1231 | handle_band_custom(wiphy, band, regd); |
1220 | } | 1232 | } |
1233 | mutex_unlock(&cfg80211_mutex); | ||
1221 | } | 1234 | } |
1222 | EXPORT_SYMBOL(wiphy_apply_custom_regulatory); | 1235 | EXPORT_SYMBOL(wiphy_apply_custom_regulatory); |
1223 | 1236 | ||
@@ -1423,7 +1436,7 @@ new_request: | |||
1423 | return call_crda(last_request->alpha2); | 1436 | return call_crda(last_request->alpha2); |
1424 | } | 1437 | } |
1425 | 1438 | ||
1426 | /* This currently only processes user and driver regulatory hints */ | 1439 | /* This processes *all* regulatory hints */ |
1427 | static void reg_process_hint(struct regulatory_request *reg_request) | 1440 | static void reg_process_hint(struct regulatory_request *reg_request) |
1428 | { | 1441 | { |
1429 | int r = 0; | 1442 | int r = 0; |
diff --git a/net/wireless/scan.c b/net/wireless/scan.c index 2ae65b39b529..1f260c40b6ca 100644 --- a/net/wireless/scan.c +++ b/net/wireless/scan.c | |||
@@ -395,6 +395,7 @@ cfg80211_bss_update(struct cfg80211_registered_device *dev, | |||
395 | memcpy(ies, res->pub.information_elements, ielen); | 395 | memcpy(ies, res->pub.information_elements, ielen); |
396 | found->ies_allocated = true; | 396 | found->ies_allocated = true; |
397 | found->pub.information_elements = ies; | 397 | found->pub.information_elements = ies; |
398 | found->pub.len_information_elements = ielen; | ||
398 | } | 399 | } |
399 | } | 400 | } |
400 | } | 401 | } |
diff --git a/net/xfrm/xfrm_state.c b/net/xfrm/xfrm_state.c index 82271720d970..5f1f86565f16 100644 --- a/net/xfrm/xfrm_state.c +++ b/net/xfrm/xfrm_state.c | |||
@@ -794,7 +794,7 @@ xfrm_state_find(xfrm_address_t *daddr, xfrm_address_t *saddr, | |||
794 | { | 794 | { |
795 | static xfrm_address_t saddr_wildcard = { }; | 795 | static xfrm_address_t saddr_wildcard = { }; |
796 | struct net *net = xp_net(pol); | 796 | struct net *net = xp_net(pol); |
797 | unsigned int h; | 797 | unsigned int h, h_wildcard; |
798 | struct hlist_node *entry; | 798 | struct hlist_node *entry; |
799 | struct xfrm_state *x, *x0, *to_put; | 799 | struct xfrm_state *x, *x0, *to_put; |
800 | int acquire_in_progress = 0; | 800 | int acquire_in_progress = 0; |
@@ -819,8 +819,8 @@ xfrm_state_find(xfrm_address_t *daddr, xfrm_address_t *saddr, | |||
819 | if (best) | 819 | if (best) |
820 | goto found; | 820 | goto found; |
821 | 821 | ||
822 | h = xfrm_dst_hash(net, daddr, &saddr_wildcard, tmpl->reqid, family); | 822 | h_wildcard = xfrm_dst_hash(net, daddr, &saddr_wildcard, tmpl->reqid, family); |
823 | hlist_for_each_entry(x, entry, net->xfrm.state_bydst+h, bydst) { | 823 | hlist_for_each_entry(x, entry, net->xfrm.state_bydst+h_wildcard, bydst) { |
824 | if (x->props.family == family && | 824 | if (x->props.family == family && |
825 | x->props.reqid == tmpl->reqid && | 825 | x->props.reqid == tmpl->reqid && |
826 | !(x->props.flags & XFRM_STATE_WILDRECV) && | 826 | !(x->props.flags & XFRM_STATE_WILDRECV) && |