diff options
Diffstat (limited to 'net')
67 files changed, 342 insertions, 275 deletions
diff --git a/net/9p/client.c b/net/9p/client.c index 9186550d77a6..0004cbaac4a4 100644 --- a/net/9p/client.c +++ b/net/9p/client.c | |||
| @@ -415,9 +415,17 @@ static void p9_free_req(struct p9_client *c, struct p9_req_t *r) | |||
| 415 | * req: request received | 415 | * req: request received |
| 416 | * | 416 | * |
| 417 | */ | 417 | */ |
| 418 | void p9_client_cb(struct p9_client *c, struct p9_req_t *req) | 418 | void p9_client_cb(struct p9_client *c, struct p9_req_t *req, int status) |
| 419 | { | 419 | { |
| 420 | p9_debug(P9_DEBUG_MUX, " tag %d\n", req->tc->tag); | 420 | p9_debug(P9_DEBUG_MUX, " tag %d\n", req->tc->tag); |
| 421 | |||
| 422 | /* | ||
| 423 | * This barrier is needed to make sure any change made to req before | ||
| 424 | * the other thread wakes up will indeed be seen by the waiting side. | ||
| 425 | */ | ||
| 426 | smp_wmb(); | ||
| 427 | req->status = status; | ||
| 428 | |||
| 421 | wake_up(req->wq); | 429 | wake_up(req->wq); |
| 422 | p9_debug(P9_DEBUG_MUX, "wakeup: %d\n", req->tc->tag); | 430 | p9_debug(P9_DEBUG_MUX, "wakeup: %d\n", req->tc->tag); |
| 423 | } | 431 | } |
| @@ -655,16 +663,13 @@ static int p9_client_flush(struct p9_client *c, struct p9_req_t *oldreq) | |||
| 655 | if (IS_ERR(req)) | 663 | if (IS_ERR(req)) |
| 656 | return PTR_ERR(req); | 664 | return PTR_ERR(req); |
| 657 | 665 | ||
| 658 | |||
| 659 | /* | 666 | /* |
| 660 | * if we haven't received a response for oldreq, | 667 | * if we haven't received a response for oldreq, |
| 661 | * remove it from the list | 668 | * remove it from the list |
| 662 | */ | 669 | */ |
| 663 | if (oldreq->status == REQ_STATUS_FLSH) { | 670 | if (oldreq->status == REQ_STATUS_SENT) |
| 664 | spin_lock(&c->lock); | 671 | if (c->trans_mod->cancelled) |
| 665 | list_del(&oldreq->req_list); | 672 | c->trans_mod->cancelled(c, oldreq); |
| 666 | spin_unlock(&c->lock); | ||
| 667 | } | ||
| 668 | 673 | ||
| 669 | p9_free_req(c, req); | 674 | p9_free_req(c, req); |
| 670 | return 0; | 675 | return 0; |
| @@ -751,6 +756,12 @@ again: | |||
| 751 | err = wait_event_interruptible(*req->wq, | 756 | err = wait_event_interruptible(*req->wq, |
| 752 | req->status >= REQ_STATUS_RCVD); | 757 | req->status >= REQ_STATUS_RCVD); |
| 753 | 758 | ||
| 759 | /* | ||
| 760 | * Make sure our req is coherent with regard to updates in other | ||
| 761 | * threads - echoes to wmb() in the callback | ||
| 762 | */ | ||
| 763 | smp_rmb(); | ||
| 764 | |||
| 754 | if ((err == -ERESTARTSYS) && (c->status == Connected) | 765 | if ((err == -ERESTARTSYS) && (c->status == Connected) |
| 755 | && (type == P9_TFLUSH)) { | 766 | && (type == P9_TFLUSH)) { |
| 756 | sigpending = 1; | 767 | sigpending = 1; |
diff --git a/net/9p/trans_fd.c b/net/9p/trans_fd.c index b7bd7f2961bf..80d08f6664cb 100644 --- a/net/9p/trans_fd.c +++ b/net/9p/trans_fd.c | |||
| @@ -66,20 +66,6 @@ struct p9_fd_opts { | |||
| 66 | int privport; | 66 | int privport; |
| 67 | }; | 67 | }; |
| 68 | 68 | ||
| 69 | /** | ||
| 70 | * struct p9_trans_fd - transport state | ||
| 71 | * @rd: reference to file to read from | ||
| 72 | * @wr: reference of file to write to | ||
| 73 | * @conn: connection state reference | ||
| 74 | * | ||
| 75 | */ | ||
| 76 | |||
| 77 | struct p9_trans_fd { | ||
| 78 | struct file *rd; | ||
| 79 | struct file *wr; | ||
| 80 | struct p9_conn *conn; | ||
| 81 | }; | ||
| 82 | |||
| 83 | /* | 69 | /* |
| 84 | * Option Parsing (code inspired by NFS code) | 70 | * Option Parsing (code inspired by NFS code) |
| 85 | * - a little lazy - parse all fd-transport options | 71 | * - a little lazy - parse all fd-transport options |
| @@ -159,6 +145,20 @@ struct p9_conn { | |||
| 159 | unsigned long wsched; | 145 | unsigned long wsched; |
| 160 | }; | 146 | }; |
| 161 | 147 | ||
| 148 | /** | ||
| 149 | * struct p9_trans_fd - transport state | ||
| 150 | * @rd: reference to file to read from | ||
| 151 | * @wr: reference of file to write to | ||
| 152 | * @conn: connection state reference | ||
| 153 | * | ||
| 154 | */ | ||
| 155 | |||
| 156 | struct p9_trans_fd { | ||
| 157 | struct file *rd; | ||
| 158 | struct file *wr; | ||
| 159 | struct p9_conn conn; | ||
| 160 | }; | ||
| 161 | |||
| 162 | static void p9_poll_workfn(struct work_struct *work); | 162 | static void p9_poll_workfn(struct work_struct *work); |
| 163 | 163 | ||
| 164 | static DEFINE_SPINLOCK(p9_poll_lock); | 164 | static DEFINE_SPINLOCK(p9_poll_lock); |
| @@ -212,15 +212,9 @@ static void p9_conn_cancel(struct p9_conn *m, int err) | |||
| 212 | m->err = err; | 212 | m->err = err; |
| 213 | 213 | ||
| 214 | list_for_each_entry_safe(req, rtmp, &m->req_list, req_list) { | 214 | list_for_each_entry_safe(req, rtmp, &m->req_list, req_list) { |
| 215 | req->status = REQ_STATUS_ERROR; | ||
| 216 | if (!req->t_err) | ||
| 217 | req->t_err = err; | ||
| 218 | list_move(&req->req_list, &cancel_list); | 215 | list_move(&req->req_list, &cancel_list); |
| 219 | } | 216 | } |
| 220 | list_for_each_entry_safe(req, rtmp, &m->unsent_req_list, req_list) { | 217 | list_for_each_entry_safe(req, rtmp, &m->unsent_req_list, req_list) { |
| 221 | req->status = REQ_STATUS_ERROR; | ||
| 222 | if (!req->t_err) | ||
| 223 | req->t_err = err; | ||
| 224 | list_move(&req->req_list, &cancel_list); | 218 | list_move(&req->req_list, &cancel_list); |
| 225 | } | 219 | } |
| 226 | spin_unlock_irqrestore(&m->client->lock, flags); | 220 | spin_unlock_irqrestore(&m->client->lock, flags); |
| @@ -228,7 +222,9 @@ static void p9_conn_cancel(struct p9_conn *m, int err) | |||
| 228 | list_for_each_entry_safe(req, rtmp, &cancel_list, req_list) { | 222 | list_for_each_entry_safe(req, rtmp, &cancel_list, req_list) { |
| 229 | p9_debug(P9_DEBUG_ERROR, "call back req %p\n", req); | 223 | p9_debug(P9_DEBUG_ERROR, "call back req %p\n", req); |
| 230 | list_del(&req->req_list); | 224 | list_del(&req->req_list); |
| 231 | p9_client_cb(m->client, req); | 225 | if (!req->t_err) |
| 226 | req->t_err = err; | ||
| 227 | p9_client_cb(m->client, req, REQ_STATUS_ERROR); | ||
| 232 | } | 228 | } |
| 233 | } | 229 | } |
| 234 | 230 | ||
| @@ -302,6 +298,7 @@ static void p9_read_work(struct work_struct *work) | |||
| 302 | { | 298 | { |
| 303 | int n, err; | 299 | int n, err; |
| 304 | struct p9_conn *m; | 300 | struct p9_conn *m; |
| 301 | int status = REQ_STATUS_ERROR; | ||
| 305 | 302 | ||
| 306 | m = container_of(work, struct p9_conn, rq); | 303 | m = container_of(work, struct p9_conn, rq); |
| 307 | 304 | ||
| @@ -348,8 +345,7 @@ static void p9_read_work(struct work_struct *work) | |||
| 348 | "mux %p pkt: size: %d bytes tag: %d\n", m, n, tag); | 345 | "mux %p pkt: size: %d bytes tag: %d\n", m, n, tag); |
| 349 | 346 | ||
| 350 | m->req = p9_tag_lookup(m->client, tag); | 347 | m->req = p9_tag_lookup(m->client, tag); |
| 351 | if (!m->req || (m->req->status != REQ_STATUS_SENT && | 348 | if (!m->req || (m->req->status != REQ_STATUS_SENT)) { |
| 352 | m->req->status != REQ_STATUS_FLSH)) { | ||
| 353 | p9_debug(P9_DEBUG_ERROR, "Unexpected packet tag %d\n", | 349 | p9_debug(P9_DEBUG_ERROR, "Unexpected packet tag %d\n", |
| 354 | tag); | 350 | tag); |
| 355 | err = -EIO; | 351 | err = -EIO; |
| @@ -375,10 +371,10 @@ static void p9_read_work(struct work_struct *work) | |||
| 375 | p9_debug(P9_DEBUG_TRANS, "got new packet\n"); | 371 | p9_debug(P9_DEBUG_TRANS, "got new packet\n"); |
| 376 | spin_lock(&m->client->lock); | 372 | spin_lock(&m->client->lock); |
| 377 | if (m->req->status != REQ_STATUS_ERROR) | 373 | if (m->req->status != REQ_STATUS_ERROR) |
| 378 | m->req->status = REQ_STATUS_RCVD; | 374 | status = REQ_STATUS_RCVD; |
| 379 | list_del(&m->req->req_list); | 375 | list_del(&m->req->req_list); |
| 380 | spin_unlock(&m->client->lock); | 376 | spin_unlock(&m->client->lock); |
| 381 | p9_client_cb(m->client, m->req); | 377 | p9_client_cb(m->client, m->req, status); |
| 382 | m->rbuf = NULL; | 378 | m->rbuf = NULL; |
| 383 | m->rpos = 0; | 379 | m->rpos = 0; |
| 384 | m->rsize = 0; | 380 | m->rsize = 0; |
| @@ -573,21 +569,19 @@ p9_pollwait(struct file *filp, wait_queue_head_t *wait_address, poll_table *p) | |||
| 573 | } | 569 | } |
| 574 | 570 | ||
| 575 | /** | 571 | /** |
| 576 | * p9_conn_create - allocate and initialize the per-session mux data | 572 | * p9_conn_create - initialize the per-session mux data |
| 577 | * @client: client instance | 573 | * @client: client instance |
| 578 | * | 574 | * |
| 579 | * Note: Creates the polling task if this is the first session. | 575 | * Note: Creates the polling task if this is the first session. |
| 580 | */ | 576 | */ |
| 581 | 577 | ||
| 582 | static struct p9_conn *p9_conn_create(struct p9_client *client) | 578 | static void p9_conn_create(struct p9_client *client) |
| 583 | { | 579 | { |
| 584 | int n; | 580 | int n; |
| 585 | struct p9_conn *m; | 581 | struct p9_trans_fd *ts = client->trans; |
| 582 | struct p9_conn *m = &ts->conn; | ||
| 586 | 583 | ||
| 587 | p9_debug(P9_DEBUG_TRANS, "client %p msize %d\n", client, client->msize); | 584 | p9_debug(P9_DEBUG_TRANS, "client %p msize %d\n", client, client->msize); |
| 588 | m = kzalloc(sizeof(struct p9_conn), GFP_KERNEL); | ||
| 589 | if (!m) | ||
| 590 | return ERR_PTR(-ENOMEM); | ||
| 591 | 585 | ||
| 592 | INIT_LIST_HEAD(&m->mux_list); | 586 | INIT_LIST_HEAD(&m->mux_list); |
| 593 | m->client = client; | 587 | m->client = client; |
| @@ -609,8 +603,6 @@ static struct p9_conn *p9_conn_create(struct p9_client *client) | |||
| 609 | p9_debug(P9_DEBUG_TRANS, "mux %p can write\n", m); | 603 | p9_debug(P9_DEBUG_TRANS, "mux %p can write\n", m); |
| 610 | set_bit(Wpending, &m->wsched); | 604 | set_bit(Wpending, &m->wsched); |
| 611 | } | 605 | } |
| 612 | |||
| 613 | return m; | ||
| 614 | } | 606 | } |
| 615 | 607 | ||
| 616 | /** | 608 | /** |
| @@ -669,7 +661,7 @@ static int p9_fd_request(struct p9_client *client, struct p9_req_t *req) | |||
| 669 | { | 661 | { |
| 670 | int n; | 662 | int n; |
| 671 | struct p9_trans_fd *ts = client->trans; | 663 | struct p9_trans_fd *ts = client->trans; |
| 672 | struct p9_conn *m = ts->conn; | 664 | struct p9_conn *m = &ts->conn; |
| 673 | 665 | ||
| 674 | p9_debug(P9_DEBUG_TRANS, "mux %p task %p tcall %p id %d\n", | 666 | p9_debug(P9_DEBUG_TRANS, "mux %p task %p tcall %p id %d\n", |
| 675 | m, current, req->tc, req->tc->id); | 667 | m, current, req->tc, req->tc->id); |
| @@ -704,14 +696,26 @@ static int p9_fd_cancel(struct p9_client *client, struct p9_req_t *req) | |||
| 704 | list_del(&req->req_list); | 696 | list_del(&req->req_list); |
| 705 | req->status = REQ_STATUS_FLSHD; | 697 | req->status = REQ_STATUS_FLSHD; |
| 706 | ret = 0; | 698 | ret = 0; |
| 707 | } else if (req->status == REQ_STATUS_SENT) | 699 | } |
| 708 | req->status = REQ_STATUS_FLSH; | ||
| 709 | |||
| 710 | spin_unlock(&client->lock); | 700 | spin_unlock(&client->lock); |
| 711 | 701 | ||
| 712 | return ret; | 702 | return ret; |
| 713 | } | 703 | } |
| 714 | 704 | ||
| 705 | static int p9_fd_cancelled(struct p9_client *client, struct p9_req_t *req) | ||
| 706 | { | ||
| 707 | p9_debug(P9_DEBUG_TRANS, "client %p req %p\n", client, req); | ||
| 708 | |||
| 709 | /* we haven't received a response for oldreq, | ||
| 710 | * remove it from the list. | ||
| 711 | */ | ||
| 712 | spin_lock(&client->lock); | ||
| 713 | list_del(&req->req_list); | ||
| 714 | spin_unlock(&client->lock); | ||
| 715 | |||
| 716 | return 0; | ||
| 717 | } | ||
| 718 | |||
| 715 | /** | 719 | /** |
| 716 | * parse_opts - parse mount options into p9_fd_opts structure | 720 | * parse_opts - parse mount options into p9_fd_opts structure |
| 717 | * @params: options string passed from mount | 721 | * @params: options string passed from mount |
| @@ -780,7 +784,7 @@ static int parse_opts(char *params, struct p9_fd_opts *opts) | |||
| 780 | 784 | ||
| 781 | static int p9_fd_open(struct p9_client *client, int rfd, int wfd) | 785 | static int p9_fd_open(struct p9_client *client, int rfd, int wfd) |
| 782 | { | 786 | { |
| 783 | struct p9_trans_fd *ts = kmalloc(sizeof(struct p9_trans_fd), | 787 | struct p9_trans_fd *ts = kzalloc(sizeof(struct p9_trans_fd), |
| 784 | GFP_KERNEL); | 788 | GFP_KERNEL); |
| 785 | if (!ts) | 789 | if (!ts) |
| 786 | return -ENOMEM; | 790 | return -ENOMEM; |
| @@ -806,9 +810,8 @@ static int p9_socket_open(struct p9_client *client, struct socket *csocket) | |||
| 806 | { | 810 | { |
| 807 | struct p9_trans_fd *p; | 811 | struct p9_trans_fd *p; |
| 808 | struct file *file; | 812 | struct file *file; |
| 809 | int ret; | ||
| 810 | 813 | ||
| 811 | p = kmalloc(sizeof(struct p9_trans_fd), GFP_KERNEL); | 814 | p = kzalloc(sizeof(struct p9_trans_fd), GFP_KERNEL); |
| 812 | if (!p) | 815 | if (!p) |
| 813 | return -ENOMEM; | 816 | return -ENOMEM; |
| 814 | 817 | ||
| @@ -829,20 +832,12 @@ static int p9_socket_open(struct p9_client *client, struct socket *csocket) | |||
| 829 | 832 | ||
| 830 | p->rd->f_flags |= O_NONBLOCK; | 833 | p->rd->f_flags |= O_NONBLOCK; |
| 831 | 834 | ||
| 832 | p->conn = p9_conn_create(client); | 835 | p9_conn_create(client); |
| 833 | if (IS_ERR(p->conn)) { | ||
| 834 | ret = PTR_ERR(p->conn); | ||
| 835 | p->conn = NULL; | ||
| 836 | kfree(p); | ||
| 837 | sockfd_put(csocket); | ||
| 838 | sockfd_put(csocket); | ||
| 839 | return ret; | ||
| 840 | } | ||
| 841 | return 0; | 836 | return 0; |
| 842 | } | 837 | } |
| 843 | 838 | ||
| 844 | /** | 839 | /** |
| 845 | * p9_mux_destroy - cancels all pending requests and frees mux resources | 840 | * p9_mux_destroy - cancels all pending requests of mux |
| 846 | * @m: mux to destroy | 841 | * @m: mux to destroy |
| 847 | * | 842 | * |
| 848 | */ | 843 | */ |
| @@ -859,7 +854,6 @@ static void p9_conn_destroy(struct p9_conn *m) | |||
| 859 | p9_conn_cancel(m, -ECONNRESET); | 854 | p9_conn_cancel(m, -ECONNRESET); |
| 860 | 855 | ||
| 861 | m->client = NULL; | 856 | m->client = NULL; |
| 862 | kfree(m); | ||
| 863 | } | 857 | } |
| 864 | 858 | ||
| 865 | /** | 859 | /** |
| @@ -881,7 +875,7 @@ static void p9_fd_close(struct p9_client *client) | |||
| 881 | 875 | ||
| 882 | client->status = Disconnected; | 876 | client->status = Disconnected; |
| 883 | 877 | ||
| 884 | p9_conn_destroy(ts->conn); | 878 | p9_conn_destroy(&ts->conn); |
| 885 | 879 | ||
| 886 | if (ts->rd) | 880 | if (ts->rd) |
| 887 | fput(ts->rd); | 881 | fput(ts->rd); |
| @@ -1033,14 +1027,7 @@ p9_fd_create(struct p9_client *client, const char *addr, char *args) | |||
| 1033 | return err; | 1027 | return err; |
| 1034 | 1028 | ||
| 1035 | p = (struct p9_trans_fd *) client->trans; | 1029 | p = (struct p9_trans_fd *) client->trans; |
| 1036 | p->conn = p9_conn_create(client); | 1030 | p9_conn_create(client); |
| 1037 | if (IS_ERR(p->conn)) { | ||
| 1038 | err = PTR_ERR(p->conn); | ||
| 1039 | p->conn = NULL; | ||
| 1040 | fput(p->rd); | ||
| 1041 | fput(p->wr); | ||
| 1042 | return err; | ||
| 1043 | } | ||
| 1044 | 1031 | ||
| 1045 | return 0; | 1032 | return 0; |
| 1046 | } | 1033 | } |
| @@ -1053,6 +1040,7 @@ static struct p9_trans_module p9_tcp_trans = { | |||
| 1053 | .close = p9_fd_close, | 1040 | .close = p9_fd_close, |
| 1054 | .request = p9_fd_request, | 1041 | .request = p9_fd_request, |
| 1055 | .cancel = p9_fd_cancel, | 1042 | .cancel = p9_fd_cancel, |
| 1043 | .cancelled = p9_fd_cancelled, | ||
| 1056 | .owner = THIS_MODULE, | 1044 | .owner = THIS_MODULE, |
| 1057 | }; | 1045 | }; |
| 1058 | 1046 | ||
| @@ -1064,6 +1052,7 @@ static struct p9_trans_module p9_unix_trans = { | |||
| 1064 | .close = p9_fd_close, | 1052 | .close = p9_fd_close, |
| 1065 | .request = p9_fd_request, | 1053 | .request = p9_fd_request, |
| 1066 | .cancel = p9_fd_cancel, | 1054 | .cancel = p9_fd_cancel, |
| 1055 | .cancelled = p9_fd_cancelled, | ||
| 1067 | .owner = THIS_MODULE, | 1056 | .owner = THIS_MODULE, |
| 1068 | }; | 1057 | }; |
| 1069 | 1058 | ||
| @@ -1075,6 +1064,7 @@ static struct p9_trans_module p9_fd_trans = { | |||
| 1075 | .close = p9_fd_close, | 1064 | .close = p9_fd_close, |
| 1076 | .request = p9_fd_request, | 1065 | .request = p9_fd_request, |
| 1077 | .cancel = p9_fd_cancel, | 1066 | .cancel = p9_fd_cancel, |
| 1067 | .cancelled = p9_fd_cancelled, | ||
| 1078 | .owner = THIS_MODULE, | 1068 | .owner = THIS_MODULE, |
| 1079 | }; | 1069 | }; |
| 1080 | 1070 | ||
diff --git a/net/9p/trans_rdma.c b/net/9p/trans_rdma.c index 8f68df5d2973..14ad43b5cf89 100644 --- a/net/9p/trans_rdma.c +++ b/net/9p/trans_rdma.c | |||
| @@ -193,6 +193,8 @@ static int parse_opts(char *params, struct p9_rdma_opts *opts) | |||
| 193 | if (!*p) | 193 | if (!*p) |
| 194 | continue; | 194 | continue; |
| 195 | token = match_token(p, tokens, args); | 195 | token = match_token(p, tokens, args); |
| 196 | if (token == Opt_err) | ||
| 197 | continue; | ||
| 196 | r = match_int(&args[0], &option); | 198 | r = match_int(&args[0], &option); |
| 197 | if (r < 0) { | 199 | if (r < 0) { |
| 198 | p9_debug(P9_DEBUG_ERROR, | 200 | p9_debug(P9_DEBUG_ERROR, |
| @@ -305,8 +307,7 @@ handle_recv(struct p9_client *client, struct p9_trans_rdma *rdma, | |||
| 305 | } | 307 | } |
| 306 | 308 | ||
| 307 | req->rc = c->rc; | 309 | req->rc = c->rc; |
| 308 | req->status = REQ_STATUS_RCVD; | 310 | p9_client_cb(client, req, REQ_STATUS_RCVD); |
| 309 | p9_client_cb(client, req); | ||
| 310 | 311 | ||
| 311 | return; | 312 | return; |
| 312 | 313 | ||
| @@ -511,6 +512,11 @@ dont_need_post_recv: | |||
| 511 | goto send_error; | 512 | goto send_error; |
| 512 | } | 513 | } |
| 513 | 514 | ||
| 515 | /* Mark request as `sent' *before* we actually send it, | ||
| 516 | * because doing if after could erase the REQ_STATUS_RCVD | ||
| 517 | * status in case of a very fast reply. | ||
| 518 | */ | ||
| 519 | req->status = REQ_STATUS_SENT; | ||
| 514 | err = ib_post_send(rdma->qp, &wr, &bad_wr); | 520 | err = ib_post_send(rdma->qp, &wr, &bad_wr); |
| 515 | if (err) | 521 | if (err) |
| 516 | goto send_error; | 522 | goto send_error; |
| @@ -520,6 +526,7 @@ dont_need_post_recv: | |||
| 520 | 526 | ||
| 521 | /* Handle errors that happened during or while preparing the send: */ | 527 | /* Handle errors that happened during or while preparing the send: */ |
| 522 | send_error: | 528 | send_error: |
| 529 | req->status = REQ_STATUS_ERROR; | ||
| 523 | kfree(c); | 530 | kfree(c); |
| 524 | p9_debug(P9_DEBUG_ERROR, "Error %d in rdma_request()\n", err); | 531 | p9_debug(P9_DEBUG_ERROR, "Error %d in rdma_request()\n", err); |
| 525 | 532 | ||
| @@ -582,12 +589,24 @@ static struct p9_trans_rdma *alloc_rdma(struct p9_rdma_opts *opts) | |||
| 582 | return rdma; | 589 | return rdma; |
| 583 | } | 590 | } |
| 584 | 591 | ||
| 585 | /* its not clear to me we can do anything after send has been posted */ | ||
| 586 | static int rdma_cancel(struct p9_client *client, struct p9_req_t *req) | 592 | static int rdma_cancel(struct p9_client *client, struct p9_req_t *req) |
| 587 | { | 593 | { |
| 594 | /* Nothing to do here. | ||
| 595 | * We will take care of it (if we have to) in rdma_cancelled() | ||
| 596 | */ | ||
| 588 | return 1; | 597 | return 1; |
| 589 | } | 598 | } |
| 590 | 599 | ||
| 600 | /* A request has been fully flushed without a reply. | ||
| 601 | * That means we have posted one buffer in excess. | ||
| 602 | */ | ||
| 603 | static int rdma_cancelled(struct p9_client *client, struct p9_req_t *req) | ||
| 604 | { | ||
| 605 | struct p9_trans_rdma *rdma = client->trans; | ||
| 606 | atomic_inc(&rdma->excess_rc); | ||
| 607 | return 0; | ||
| 608 | } | ||
| 609 | |||
| 591 | /** | 610 | /** |
| 592 | * trans_create_rdma - Transport method for creating atransport instance | 611 | * trans_create_rdma - Transport method for creating atransport instance |
| 593 | * @client: client instance | 612 | * @client: client instance |
| @@ -721,6 +740,7 @@ static struct p9_trans_module p9_rdma_trans = { | |||
| 721 | .close = rdma_close, | 740 | .close = rdma_close, |
| 722 | .request = rdma_request, | 741 | .request = rdma_request, |
| 723 | .cancel = rdma_cancel, | 742 | .cancel = rdma_cancel, |
| 743 | .cancelled = rdma_cancelled, | ||
| 724 | }; | 744 | }; |
| 725 | 745 | ||
| 726 | /** | 746 | /** |
diff --git a/net/9p/trans_virtio.c b/net/9p/trans_virtio.c index ac2666c1d011..6940d8fe8971 100644 --- a/net/9p/trans_virtio.c +++ b/net/9p/trans_virtio.c | |||
| @@ -164,8 +164,7 @@ static void req_done(struct virtqueue *vq) | |||
| 164 | p9_debug(P9_DEBUG_TRANS, ": rc %p\n", rc); | 164 | p9_debug(P9_DEBUG_TRANS, ": rc %p\n", rc); |
| 165 | p9_debug(P9_DEBUG_TRANS, ": lookup tag %d\n", rc->tag); | 165 | p9_debug(P9_DEBUG_TRANS, ": lookup tag %d\n", rc->tag); |
| 166 | req = p9_tag_lookup(chan->client, rc->tag); | 166 | req = p9_tag_lookup(chan->client, rc->tag); |
| 167 | req->status = REQ_STATUS_RCVD; | 167 | p9_client_cb(chan->client, req, REQ_STATUS_RCVD); |
| 168 | p9_client_cb(chan->client, req); | ||
| 169 | } | 168 | } |
| 170 | } | 169 | } |
| 171 | 170 | ||
diff --git a/net/atm/clip.c b/net/atm/clip.c index 8215f7cb170b..ba291ce4bdff 100644 --- a/net/atm/clip.c +++ b/net/atm/clip.c | |||
| @@ -68,7 +68,7 @@ static int to_atmarpd(enum atmarp_ctrl_type type, int itf, __be32 ip) | |||
| 68 | 68 | ||
| 69 | sk = sk_atm(atmarpd); | 69 | sk = sk_atm(atmarpd); |
| 70 | skb_queue_tail(&sk->sk_receive_queue, skb); | 70 | skb_queue_tail(&sk->sk_receive_queue, skb); |
| 71 | sk->sk_data_ready(sk, skb->len); | 71 | sk->sk_data_ready(sk); |
| 72 | return 0; | 72 | return 0; |
| 73 | } | 73 | } |
| 74 | 74 | ||
diff --git a/net/atm/lec.c b/net/atm/lec.c index 5a2f602d07e1..4c5b8ba0f84f 100644 --- a/net/atm/lec.c +++ b/net/atm/lec.c | |||
| @@ -152,7 +152,7 @@ static void lec_handle_bridge(struct sk_buff *skb, struct net_device *dev) | |||
| 152 | atm_force_charge(priv->lecd, skb2->truesize); | 152 | atm_force_charge(priv->lecd, skb2->truesize); |
| 153 | sk = sk_atm(priv->lecd); | 153 | sk = sk_atm(priv->lecd); |
| 154 | skb_queue_tail(&sk->sk_receive_queue, skb2); | 154 | skb_queue_tail(&sk->sk_receive_queue, skb2); |
| 155 | sk->sk_data_ready(sk, skb2->len); | 155 | sk->sk_data_ready(sk); |
| 156 | } | 156 | } |
| 157 | } | 157 | } |
| 158 | #endif /* defined(CONFIG_BRIDGE) || defined(CONFIG_BRIDGE_MODULE) */ | 158 | #endif /* defined(CONFIG_BRIDGE) || defined(CONFIG_BRIDGE_MODULE) */ |
| @@ -447,7 +447,7 @@ static int lec_atm_send(struct atm_vcc *vcc, struct sk_buff *skb) | |||
| 447 | atm_force_charge(priv->lecd, skb2->truesize); | 447 | atm_force_charge(priv->lecd, skb2->truesize); |
| 448 | sk = sk_atm(priv->lecd); | 448 | sk = sk_atm(priv->lecd); |
| 449 | skb_queue_tail(&sk->sk_receive_queue, skb2); | 449 | skb_queue_tail(&sk->sk_receive_queue, skb2); |
| 450 | sk->sk_data_ready(sk, skb2->len); | 450 | sk->sk_data_ready(sk); |
| 451 | } | 451 | } |
| 452 | } | 452 | } |
| 453 | #endif /* defined(CONFIG_BRIDGE) || defined(CONFIG_BRIDGE_MODULE) */ | 453 | #endif /* defined(CONFIG_BRIDGE) || defined(CONFIG_BRIDGE_MODULE) */ |
| @@ -530,13 +530,13 @@ send_to_lecd(struct lec_priv *priv, atmlec_msg_type type, | |||
| 530 | atm_force_charge(priv->lecd, skb->truesize); | 530 | atm_force_charge(priv->lecd, skb->truesize); |
| 531 | sk = sk_atm(priv->lecd); | 531 | sk = sk_atm(priv->lecd); |
| 532 | skb_queue_tail(&sk->sk_receive_queue, skb); | 532 | skb_queue_tail(&sk->sk_receive_queue, skb); |
| 533 | sk->sk_data_ready(sk, skb->len); | 533 | sk->sk_data_ready(sk); |
| 534 | 534 | ||
| 535 | if (data != NULL) { | 535 | if (data != NULL) { |
| 536 | pr_debug("about to send %d bytes of data\n", data->len); | 536 | pr_debug("about to send %d bytes of data\n", data->len); |
| 537 | atm_force_charge(priv->lecd, data->truesize); | 537 | atm_force_charge(priv->lecd, data->truesize); |
| 538 | skb_queue_tail(&sk->sk_receive_queue, data); | 538 | skb_queue_tail(&sk->sk_receive_queue, data); |
| 539 | sk->sk_data_ready(sk, skb->len); | 539 | sk->sk_data_ready(sk); |
| 540 | } | 540 | } |
| 541 | 541 | ||
| 542 | return 0; | 542 | return 0; |
| @@ -616,7 +616,7 @@ static void lec_push(struct atm_vcc *vcc, struct sk_buff *skb) | |||
| 616 | 616 | ||
| 617 | pr_debug("%s: To daemon\n", dev->name); | 617 | pr_debug("%s: To daemon\n", dev->name); |
| 618 | skb_queue_tail(&sk->sk_receive_queue, skb); | 618 | skb_queue_tail(&sk->sk_receive_queue, skb); |
| 619 | sk->sk_data_ready(sk, skb->len); | 619 | sk->sk_data_ready(sk); |
| 620 | } else { /* Data frame, queue to protocol handlers */ | 620 | } else { /* Data frame, queue to protocol handlers */ |
| 621 | struct lec_arp_table *entry; | 621 | struct lec_arp_table *entry; |
| 622 | unsigned char *src, *dst; | 622 | unsigned char *src, *dst; |
diff --git a/net/atm/mpc.c b/net/atm/mpc.c index 91dc58f1124d..e8e0e7a8a23d 100644 --- a/net/atm/mpc.c +++ b/net/atm/mpc.c | |||
| @@ -706,7 +706,7 @@ static void mpc_push(struct atm_vcc *vcc, struct sk_buff *skb) | |||
| 706 | dprintk("(%s) control packet arrived\n", dev->name); | 706 | dprintk("(%s) control packet arrived\n", dev->name); |
| 707 | /* Pass control packets to daemon */ | 707 | /* Pass control packets to daemon */ |
| 708 | skb_queue_tail(&sk->sk_receive_queue, skb); | 708 | skb_queue_tail(&sk->sk_receive_queue, skb); |
| 709 | sk->sk_data_ready(sk, skb->len); | 709 | sk->sk_data_ready(sk); |
| 710 | return; | 710 | return; |
| 711 | } | 711 | } |
| 712 | 712 | ||
| @@ -992,7 +992,7 @@ int msg_to_mpoad(struct k_message *mesg, struct mpoa_client *mpc) | |||
| 992 | 992 | ||
| 993 | sk = sk_atm(mpc->mpoad_vcc); | 993 | sk = sk_atm(mpc->mpoad_vcc); |
| 994 | skb_queue_tail(&sk->sk_receive_queue, skb); | 994 | skb_queue_tail(&sk->sk_receive_queue, skb); |
| 995 | sk->sk_data_ready(sk, skb->len); | 995 | sk->sk_data_ready(sk); |
| 996 | 996 | ||
| 997 | return 0; | 997 | return 0; |
| 998 | } | 998 | } |
| @@ -1273,7 +1273,7 @@ static void purge_egress_shortcut(struct atm_vcc *vcc, eg_cache_entry *entry) | |||
| 1273 | 1273 | ||
| 1274 | sk = sk_atm(vcc); | 1274 | sk = sk_atm(vcc); |
| 1275 | skb_queue_tail(&sk->sk_receive_queue, skb); | 1275 | skb_queue_tail(&sk->sk_receive_queue, skb); |
| 1276 | sk->sk_data_ready(sk, skb->len); | 1276 | sk->sk_data_ready(sk); |
| 1277 | dprintk("exiting\n"); | 1277 | dprintk("exiting\n"); |
| 1278 | } | 1278 | } |
| 1279 | 1279 | ||
diff --git a/net/atm/raw.c b/net/atm/raw.c index b4f7b9ff3c74..2e17e97a7a8b 100644 --- a/net/atm/raw.c +++ b/net/atm/raw.c | |||
| @@ -25,7 +25,7 @@ static void atm_push_raw(struct atm_vcc *vcc, struct sk_buff *skb) | |||
| 25 | struct sock *sk = sk_atm(vcc); | 25 | struct sock *sk = sk_atm(vcc); |
| 26 | 26 | ||
| 27 | skb_queue_tail(&sk->sk_receive_queue, skb); | 27 | skb_queue_tail(&sk->sk_receive_queue, skb); |
| 28 | sk->sk_data_ready(sk, skb->len); | 28 | sk->sk_data_ready(sk); |
| 29 | } | 29 | } |
| 30 | } | 30 | } |
| 31 | 31 | ||
diff --git a/net/atm/signaling.c b/net/atm/signaling.c index 4176887e72eb..523bce72f698 100644 --- a/net/atm/signaling.c +++ b/net/atm/signaling.c | |||
| @@ -51,7 +51,7 @@ static void sigd_put_skb(struct sk_buff *skb) | |||
| 51 | #endif | 51 | #endif |
| 52 | atm_force_charge(sigd, skb->truesize); | 52 | atm_force_charge(sigd, skb->truesize); |
| 53 | skb_queue_tail(&sk_atm(sigd)->sk_receive_queue, skb); | 53 | skb_queue_tail(&sk_atm(sigd)->sk_receive_queue, skb); |
| 54 | sk_atm(sigd)->sk_data_ready(sk_atm(sigd), skb->len); | 54 | sk_atm(sigd)->sk_data_ready(sk_atm(sigd)); |
| 55 | } | 55 | } |
| 56 | 56 | ||
| 57 | static void modify_qos(struct atm_vcc *vcc, struct atmsvc_msg *msg) | 57 | static void modify_qos(struct atm_vcc *vcc, struct atmsvc_msg *msg) |
diff --git a/net/ax25/ax25_in.c b/net/ax25/ax25_in.c index 96f4cab3a2f9..7ed8ab724819 100644 --- a/net/ax25/ax25_in.c +++ b/net/ax25/ax25_in.c | |||
| @@ -422,7 +422,7 @@ static int ax25_rcv(struct sk_buff *skb, struct net_device *dev, | |||
| 422 | 422 | ||
| 423 | if (sk) { | 423 | if (sk) { |
| 424 | if (!sock_flag(sk, SOCK_DEAD)) | 424 | if (!sock_flag(sk, SOCK_DEAD)) |
| 425 | sk->sk_data_ready(sk, skb->len); | 425 | sk->sk_data_ready(sk); |
| 426 | sock_put(sk); | 426 | sock_put(sk); |
| 427 | } else { | 427 | } else { |
| 428 | free: | 428 | free: |
diff --git a/net/bluetooth/hci_conn.c b/net/bluetooth/hci_conn.c index d958e2dca52f..521fd4f3985e 100644 --- a/net/bluetooth/hci_conn.c +++ b/net/bluetooth/hci_conn.c | |||
| @@ -819,14 +819,17 @@ static int hci_conn_auth(struct hci_conn *conn, __u8 sec_level, __u8 auth_type) | |||
| 819 | if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->flags)) { | 819 | if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->flags)) { |
| 820 | struct hci_cp_auth_requested cp; | 820 | struct hci_cp_auth_requested cp; |
| 821 | 821 | ||
| 822 | /* encrypt must be pending if auth is also pending */ | ||
| 823 | set_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags); | ||
| 824 | |||
| 825 | cp.handle = cpu_to_le16(conn->handle); | 822 | cp.handle = cpu_to_le16(conn->handle); |
| 826 | hci_send_cmd(conn->hdev, HCI_OP_AUTH_REQUESTED, | 823 | hci_send_cmd(conn->hdev, HCI_OP_AUTH_REQUESTED, |
| 827 | sizeof(cp), &cp); | 824 | sizeof(cp), &cp); |
| 825 | |||
| 826 | /* If we're already encrypted set the REAUTH_PEND flag, | ||
| 827 | * otherwise set the ENCRYPT_PEND. | ||
| 828 | */ | ||
| 828 | if (conn->key_type != 0xff) | 829 | if (conn->key_type != 0xff) |
| 829 | set_bit(HCI_CONN_REAUTH_PEND, &conn->flags); | 830 | set_bit(HCI_CONN_REAUTH_PEND, &conn->flags); |
| 831 | else | ||
| 832 | set_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags); | ||
| 830 | } | 833 | } |
| 831 | 834 | ||
| 832 | return 0; | 835 | return 0; |
diff --git a/net/bluetooth/hci_event.c b/net/bluetooth/hci_event.c index 49774912cb01..15010a230b6d 100644 --- a/net/bluetooth/hci_event.c +++ b/net/bluetooth/hci_event.c | |||
| @@ -3330,6 +3330,12 @@ static void hci_key_refresh_complete_evt(struct hci_dev *hdev, | |||
| 3330 | if (!conn) | 3330 | if (!conn) |
| 3331 | goto unlock; | 3331 | goto unlock; |
| 3332 | 3332 | ||
| 3333 | /* For BR/EDR the necessary steps are taken through the | ||
| 3334 | * auth_complete event. | ||
| 3335 | */ | ||
| 3336 | if (conn->type != LE_LINK) | ||
| 3337 | goto unlock; | ||
| 3338 | |||
| 3333 | if (!ev->status) | 3339 | if (!ev->status) |
| 3334 | conn->sec_level = conn->pending_sec_level; | 3340 | conn->sec_level = conn->pending_sec_level; |
| 3335 | 3341 | ||
diff --git a/net/bluetooth/l2cap_sock.c b/net/bluetooth/l2cap_sock.c index f59e00c2daa9..ef5e5b04f34f 100644 --- a/net/bluetooth/l2cap_sock.c +++ b/net/bluetooth/l2cap_sock.c | |||
| @@ -1271,7 +1271,7 @@ static void l2cap_sock_teardown_cb(struct l2cap_chan *chan, int err) | |||
| 1271 | 1271 | ||
| 1272 | if (parent) { | 1272 | if (parent) { |
| 1273 | bt_accept_unlink(sk); | 1273 | bt_accept_unlink(sk); |
| 1274 | parent->sk_data_ready(parent, 0); | 1274 | parent->sk_data_ready(parent); |
| 1275 | } else { | 1275 | } else { |
| 1276 | sk->sk_state_change(sk); | 1276 | sk->sk_state_change(sk); |
| 1277 | } | 1277 | } |
| @@ -1327,7 +1327,7 @@ static void l2cap_sock_ready_cb(struct l2cap_chan *chan) | |||
| 1327 | sk->sk_state_change(sk); | 1327 | sk->sk_state_change(sk); |
| 1328 | 1328 | ||
| 1329 | if (parent) | 1329 | if (parent) |
| 1330 | parent->sk_data_ready(parent, 0); | 1330 | parent->sk_data_ready(parent); |
| 1331 | 1331 | ||
| 1332 | release_sock(sk); | 1332 | release_sock(sk); |
| 1333 | } | 1333 | } |
| @@ -1340,7 +1340,7 @@ static void l2cap_sock_defer_cb(struct l2cap_chan *chan) | |||
| 1340 | 1340 | ||
| 1341 | parent = bt_sk(sk)->parent; | 1341 | parent = bt_sk(sk)->parent; |
| 1342 | if (parent) | 1342 | if (parent) |
| 1343 | parent->sk_data_ready(parent, 0); | 1343 | parent->sk_data_ready(parent); |
| 1344 | 1344 | ||
| 1345 | release_sock(sk); | 1345 | release_sock(sk); |
| 1346 | } | 1346 | } |
diff --git a/net/bluetooth/rfcomm/core.c b/net/bluetooth/rfcomm/core.c index 633cceeb943e..cf620260affa 100644 --- a/net/bluetooth/rfcomm/core.c +++ b/net/bluetooth/rfcomm/core.c | |||
| @@ -186,9 +186,9 @@ static void rfcomm_l2state_change(struct sock *sk) | |||
| 186 | rfcomm_schedule(); | 186 | rfcomm_schedule(); |
| 187 | } | 187 | } |
| 188 | 188 | ||
| 189 | static void rfcomm_l2data_ready(struct sock *sk, int bytes) | 189 | static void rfcomm_l2data_ready(struct sock *sk) |
| 190 | { | 190 | { |
| 191 | BT_DBG("%p bytes %d", sk, bytes); | 191 | BT_DBG("%p", sk); |
| 192 | rfcomm_schedule(); | 192 | rfcomm_schedule(); |
| 193 | } | 193 | } |
| 194 | 194 | ||
diff --git a/net/bluetooth/rfcomm/sock.c b/net/bluetooth/rfcomm/sock.c index eabd25ab5ad9..c603a5eb4720 100644 --- a/net/bluetooth/rfcomm/sock.c +++ b/net/bluetooth/rfcomm/sock.c | |||
| @@ -54,7 +54,7 @@ static void rfcomm_sk_data_ready(struct rfcomm_dlc *d, struct sk_buff *skb) | |||
| 54 | 54 | ||
| 55 | atomic_add(skb->len, &sk->sk_rmem_alloc); | 55 | atomic_add(skb->len, &sk->sk_rmem_alloc); |
| 56 | skb_queue_tail(&sk->sk_receive_queue, skb); | 56 | skb_queue_tail(&sk->sk_receive_queue, skb); |
| 57 | sk->sk_data_ready(sk, skb->len); | 57 | sk->sk_data_ready(sk); |
| 58 | 58 | ||
| 59 | if (atomic_read(&sk->sk_rmem_alloc) >= sk->sk_rcvbuf) | 59 | if (atomic_read(&sk->sk_rmem_alloc) >= sk->sk_rcvbuf) |
| 60 | rfcomm_dlc_throttle(d); | 60 | rfcomm_dlc_throttle(d); |
| @@ -84,7 +84,7 @@ static void rfcomm_sk_state_change(struct rfcomm_dlc *d, int err) | |||
| 84 | sock_set_flag(sk, SOCK_ZAPPED); | 84 | sock_set_flag(sk, SOCK_ZAPPED); |
| 85 | bt_accept_unlink(sk); | 85 | bt_accept_unlink(sk); |
| 86 | } | 86 | } |
| 87 | parent->sk_data_ready(parent, 0); | 87 | parent->sk_data_ready(parent); |
| 88 | } else { | 88 | } else { |
| 89 | if (d->state == BT_CONNECTED) | 89 | if (d->state == BT_CONNECTED) |
| 90 | rfcomm_session_getaddr(d->session, | 90 | rfcomm_session_getaddr(d->session, |
diff --git a/net/bluetooth/sco.c b/net/bluetooth/sco.c index ab1e6fcca4c5..c06dbd3938e8 100644 --- a/net/bluetooth/sco.c +++ b/net/bluetooth/sco.c | |||
| @@ -1024,7 +1024,7 @@ static void sco_conn_ready(struct sco_conn *conn) | |||
| 1024 | sk->sk_state = BT_CONNECTED; | 1024 | sk->sk_state = BT_CONNECTED; |
| 1025 | 1025 | ||
| 1026 | /* Wake up parent */ | 1026 | /* Wake up parent */ |
| 1027 | parent->sk_data_ready(parent, 1); | 1027 | parent->sk_data_ready(parent); |
| 1028 | 1028 | ||
| 1029 | bh_unlock_sock(parent); | 1029 | bh_unlock_sock(parent); |
| 1030 | 1030 | ||
diff --git a/net/bridge/br_input.c b/net/bridge/br_input.c index d0cca3c65f01..7985deaff52f 100644 --- a/net/bridge/br_input.c +++ b/net/bridge/br_input.c | |||
| @@ -73,7 +73,7 @@ int br_handle_frame_finish(struct sk_buff *skb) | |||
| 73 | goto drop; | 73 | goto drop; |
| 74 | 74 | ||
| 75 | if (!br_allowed_ingress(p->br, nbp_get_vlan_info(p), skb, &vid)) | 75 | if (!br_allowed_ingress(p->br, nbp_get_vlan_info(p), skb, &vid)) |
| 76 | goto drop; | 76 | goto out; |
| 77 | 77 | ||
| 78 | /* insert into forwarding database after filtering to avoid spoofing */ | 78 | /* insert into forwarding database after filtering to avoid spoofing */ |
| 79 | br = p->br; | 79 | br = p->br; |
diff --git a/net/bridge/br_vlan.c b/net/bridge/br_vlan.c index 91510712c7a7..4a3716102789 100644 --- a/net/bridge/br_vlan.c +++ b/net/bridge/br_vlan.c | |||
| @@ -170,7 +170,7 @@ bool br_allowed_ingress(struct net_bridge *br, struct net_port_vlans *v, | |||
| 170 | * rejected. | 170 | * rejected. |
| 171 | */ | 171 | */ |
| 172 | if (!v) | 172 | if (!v) |
| 173 | return false; | 173 | goto drop; |
| 174 | 174 | ||
| 175 | /* If vlan tx offload is disabled on bridge device and frame was | 175 | /* If vlan tx offload is disabled on bridge device and frame was |
| 176 | * sent from vlan device on the bridge device, it does not have | 176 | * sent from vlan device on the bridge device, it does not have |
| @@ -193,7 +193,7 @@ bool br_allowed_ingress(struct net_bridge *br, struct net_port_vlans *v, | |||
| 193 | * vlan untagged or priority-tagged traffic belongs to. | 193 | * vlan untagged or priority-tagged traffic belongs to. |
| 194 | */ | 194 | */ |
| 195 | if (pvid == VLAN_N_VID) | 195 | if (pvid == VLAN_N_VID) |
| 196 | return false; | 196 | goto drop; |
| 197 | 197 | ||
| 198 | /* PVID is set on this port. Any untagged or priority-tagged | 198 | /* PVID is set on this port. Any untagged or priority-tagged |
| 199 | * ingress frame is considered to belong to this vlan. | 199 | * ingress frame is considered to belong to this vlan. |
| @@ -216,7 +216,8 @@ bool br_allowed_ingress(struct net_bridge *br, struct net_port_vlans *v, | |||
| 216 | /* Frame had a valid vlan tag. See if vlan is allowed */ | 216 | /* Frame had a valid vlan tag. See if vlan is allowed */ |
| 217 | if (test_bit(*vid, v->vlan_bitmap)) | 217 | if (test_bit(*vid, v->vlan_bitmap)) |
| 218 | return true; | 218 | return true; |
| 219 | 219 | drop: | |
| 220 | kfree_skb(skb); | ||
| 220 | return false; | 221 | return false; |
| 221 | } | 222 | } |
| 222 | 223 | ||
diff --git a/net/caif/caif_socket.c b/net/caif/caif_socket.c index d6be3edb7a43..e8437094d15f 100644 --- a/net/caif/caif_socket.c +++ b/net/caif/caif_socket.c | |||
| @@ -124,7 +124,6 @@ static void caif_flow_ctrl(struct sock *sk, int mode) | |||
| 124 | static int caif_queue_rcv_skb(struct sock *sk, struct sk_buff *skb) | 124 | static int caif_queue_rcv_skb(struct sock *sk, struct sk_buff *skb) |
| 125 | { | 125 | { |
| 126 | int err; | 126 | int err; |
| 127 | int skb_len; | ||
| 128 | unsigned long flags; | 127 | unsigned long flags; |
| 129 | struct sk_buff_head *list = &sk->sk_receive_queue; | 128 | struct sk_buff_head *list = &sk->sk_receive_queue; |
| 130 | struct caifsock *cf_sk = container_of(sk, struct caifsock, sk); | 129 | struct caifsock *cf_sk = container_of(sk, struct caifsock, sk); |
| @@ -153,14 +152,13 @@ static int caif_queue_rcv_skb(struct sock *sk, struct sk_buff *skb) | |||
| 153 | * may be freed by other threads of control pulling packets | 152 | * may be freed by other threads of control pulling packets |
| 154 | * from the queue. | 153 | * from the queue. |
| 155 | */ | 154 | */ |
| 156 | skb_len = skb->len; | ||
| 157 | spin_lock_irqsave(&list->lock, flags); | 155 | spin_lock_irqsave(&list->lock, flags); |
| 158 | if (!sock_flag(sk, SOCK_DEAD)) | 156 | if (!sock_flag(sk, SOCK_DEAD)) |
| 159 | __skb_queue_tail(list, skb); | 157 | __skb_queue_tail(list, skb); |
| 160 | spin_unlock_irqrestore(&list->lock, flags); | 158 | spin_unlock_irqrestore(&list->lock, flags); |
| 161 | 159 | ||
| 162 | if (!sock_flag(sk, SOCK_DEAD)) | 160 | if (!sock_flag(sk, SOCK_DEAD)) |
| 163 | sk->sk_data_ready(sk, skb_len); | 161 | sk->sk_data_ready(sk); |
| 164 | else | 162 | else |
| 165 | kfree_skb(skb); | 163 | kfree_skb(skb); |
| 166 | return 0; | 164 | return 0; |
diff --git a/net/ceph/messenger.c b/net/ceph/messenger.c index 4f55f9ce63fa..dac7f9b98687 100644 --- a/net/ceph/messenger.c +++ b/net/ceph/messenger.c | |||
| @@ -383,7 +383,7 @@ static void con_sock_state_closed(struct ceph_connection *con) | |||
| 383 | */ | 383 | */ |
| 384 | 384 | ||
| 385 | /* data available on socket, or listen socket received a connect */ | 385 | /* data available on socket, or listen socket received a connect */ |
| 386 | static void ceph_sock_data_ready(struct sock *sk, int count_unused) | 386 | static void ceph_sock_data_ready(struct sock *sk) |
| 387 | { | 387 | { |
| 388 | struct ceph_connection *con = sk->sk_user_data; | 388 | struct ceph_connection *con = sk->sk_user_data; |
| 389 | if (atomic_read(&con->msgr->stopping)) { | 389 | if (atomic_read(&con->msgr->stopping)) { |
diff --git a/net/core/pktgen.c b/net/core/pktgen.c index d068ec25db1e..0304f981f7ff 100644 --- a/net/core/pktgen.c +++ b/net/core/pktgen.c | |||
| @@ -3338,7 +3338,9 @@ static void pktgen_xmit(struct pktgen_dev *pkt_dev) | |||
| 3338 | queue_map = skb_get_queue_mapping(pkt_dev->skb); | 3338 | queue_map = skb_get_queue_mapping(pkt_dev->skb); |
| 3339 | txq = netdev_get_tx_queue(odev, queue_map); | 3339 | txq = netdev_get_tx_queue(odev, queue_map); |
| 3340 | 3340 | ||
| 3341 | __netif_tx_lock_bh(txq); | 3341 | local_bh_disable(); |
| 3342 | |||
| 3343 | HARD_TX_LOCK(odev, txq, smp_processor_id()); | ||
| 3342 | 3344 | ||
| 3343 | if (unlikely(netif_xmit_frozen_or_drv_stopped(txq))) { | 3345 | if (unlikely(netif_xmit_frozen_or_drv_stopped(txq))) { |
| 3344 | ret = NETDEV_TX_BUSY; | 3346 | ret = NETDEV_TX_BUSY; |
| @@ -3374,7 +3376,9 @@ static void pktgen_xmit(struct pktgen_dev *pkt_dev) | |||
| 3374 | pkt_dev->last_ok = 0; | 3376 | pkt_dev->last_ok = 0; |
| 3375 | } | 3377 | } |
| 3376 | unlock: | 3378 | unlock: |
| 3377 | __netif_tx_unlock_bh(txq); | 3379 | HARD_TX_UNLOCK(odev, txq); |
| 3380 | |||
| 3381 | local_bh_enable(); | ||
| 3378 | 3382 | ||
| 3379 | /* If pkt_dev->count is zero, then run forever */ | 3383 | /* If pkt_dev->count is zero, then run forever */ |
| 3380 | if ((pkt_dev->count != 0) && (pkt_dev->sofar >= pkt_dev->count)) { | 3384 | if ((pkt_dev->count != 0) && (pkt_dev->sofar >= pkt_dev->count)) { |
diff --git a/net/core/skbuff.c b/net/core/skbuff.c index 30c7d35dd862..1b62343f5837 100644 --- a/net/core/skbuff.c +++ b/net/core/skbuff.c | |||
| @@ -3458,8 +3458,6 @@ static void sock_rmem_free(struct sk_buff *skb) | |||
| 3458 | */ | 3458 | */ |
| 3459 | int sock_queue_err_skb(struct sock *sk, struct sk_buff *skb) | 3459 | int sock_queue_err_skb(struct sock *sk, struct sk_buff *skb) |
| 3460 | { | 3460 | { |
| 3461 | int len = skb->len; | ||
| 3462 | |||
| 3463 | if (atomic_read(&sk->sk_rmem_alloc) + skb->truesize >= | 3461 | if (atomic_read(&sk->sk_rmem_alloc) + skb->truesize >= |
| 3464 | (unsigned int)sk->sk_rcvbuf) | 3462 | (unsigned int)sk->sk_rcvbuf) |
| 3465 | return -ENOMEM; | 3463 | return -ENOMEM; |
| @@ -3474,7 +3472,7 @@ int sock_queue_err_skb(struct sock *sk, struct sk_buff *skb) | |||
| 3474 | 3472 | ||
| 3475 | skb_queue_tail(&sk->sk_error_queue, skb); | 3473 | skb_queue_tail(&sk->sk_error_queue, skb); |
| 3476 | if (!sock_flag(sk, SOCK_DEAD)) | 3474 | if (!sock_flag(sk, SOCK_DEAD)) |
| 3477 | sk->sk_data_ready(sk, len); | 3475 | sk->sk_data_ready(sk); |
| 3478 | return 0; | 3476 | return 0; |
| 3479 | } | 3477 | } |
| 3480 | EXPORT_SYMBOL(sock_queue_err_skb); | 3478 | EXPORT_SYMBOL(sock_queue_err_skb); |
| @@ -3937,12 +3935,14 @@ EXPORT_SYMBOL_GPL(skb_scrub_packet); | |||
| 3937 | unsigned int skb_gso_transport_seglen(const struct sk_buff *skb) | 3935 | unsigned int skb_gso_transport_seglen(const struct sk_buff *skb) |
| 3938 | { | 3936 | { |
| 3939 | const struct skb_shared_info *shinfo = skb_shinfo(skb); | 3937 | const struct skb_shared_info *shinfo = skb_shinfo(skb); |
| 3940 | unsigned int hdr_len; | ||
| 3941 | 3938 | ||
| 3942 | if (likely(shinfo->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6))) | 3939 | if (likely(shinfo->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6))) |
| 3943 | hdr_len = tcp_hdrlen(skb); | 3940 | return tcp_hdrlen(skb) + shinfo->gso_size; |
| 3944 | else | 3941 | |
| 3945 | hdr_len = sizeof(struct udphdr); | 3942 | /* UFO sets gso_size to the size of the fragmentation |
| 3946 | return hdr_len + shinfo->gso_size; | 3943 | * payload, i.e. the size of the L4 (UDP) header is already |
| 3944 | * accounted for. | ||
| 3945 | */ | ||
| 3946 | return shinfo->gso_size; | ||
| 3947 | } | 3947 | } |
| 3948 | EXPORT_SYMBOL_GPL(skb_gso_transport_seglen); | 3948 | EXPORT_SYMBOL_GPL(skb_gso_transport_seglen); |
diff --git a/net/core/sock.c b/net/core/sock.c index c0fc6bdad1e3..b4fff008136f 100644 --- a/net/core/sock.c +++ b/net/core/sock.c | |||
| @@ -428,7 +428,7 @@ int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb) | |||
| 428 | spin_unlock_irqrestore(&list->lock, flags); | 428 | spin_unlock_irqrestore(&list->lock, flags); |
| 429 | 429 | ||
| 430 | if (!sock_flag(sk, SOCK_DEAD)) | 430 | if (!sock_flag(sk, SOCK_DEAD)) |
| 431 | sk->sk_data_ready(sk, skb_len); | 431 | sk->sk_data_ready(sk); |
| 432 | return 0; | 432 | return 0; |
| 433 | } | 433 | } |
| 434 | EXPORT_SYMBOL(sock_queue_rcv_skb); | 434 | EXPORT_SYMBOL(sock_queue_rcv_skb); |
| @@ -2196,7 +2196,7 @@ static void sock_def_error_report(struct sock *sk) | |||
| 2196 | rcu_read_unlock(); | 2196 | rcu_read_unlock(); |
| 2197 | } | 2197 | } |
| 2198 | 2198 | ||
| 2199 | static void sock_def_readable(struct sock *sk, int len) | 2199 | static void sock_def_readable(struct sock *sk) |
| 2200 | { | 2200 | { |
| 2201 | struct socket_wq *wq; | 2201 | struct socket_wq *wq; |
| 2202 | 2202 | ||
diff --git a/net/dccp/input.c b/net/dccp/input.c index 14cdafad7a90..3c8ec7d4a34e 100644 --- a/net/dccp/input.c +++ b/net/dccp/input.c | |||
| @@ -28,7 +28,7 @@ static void dccp_enqueue_skb(struct sock *sk, struct sk_buff *skb) | |||
| 28 | __skb_pull(skb, dccp_hdr(skb)->dccph_doff * 4); | 28 | __skb_pull(skb, dccp_hdr(skb)->dccph_doff * 4); |
| 29 | __skb_queue_tail(&sk->sk_receive_queue, skb); | 29 | __skb_queue_tail(&sk->sk_receive_queue, skb); |
| 30 | skb_set_owner_r(skb, sk); | 30 | skb_set_owner_r(skb, sk); |
| 31 | sk->sk_data_ready(sk, 0); | 31 | sk->sk_data_ready(sk); |
| 32 | } | 32 | } |
| 33 | 33 | ||
| 34 | static void dccp_fin(struct sock *sk, struct sk_buff *skb) | 34 | static void dccp_fin(struct sock *sk, struct sk_buff *skb) |
diff --git a/net/dccp/minisocks.c b/net/dccp/minisocks.c index 9e2f78bc1553..c69eb9c4fbb8 100644 --- a/net/dccp/minisocks.c +++ b/net/dccp/minisocks.c | |||
| @@ -237,7 +237,7 @@ int dccp_child_process(struct sock *parent, struct sock *child, | |||
| 237 | 237 | ||
| 238 | /* Wakeup parent, send SIGIO */ | 238 | /* Wakeup parent, send SIGIO */ |
| 239 | if (state == DCCP_RESPOND && child->sk_state != state) | 239 | if (state == DCCP_RESPOND && child->sk_state != state) |
| 240 | parent->sk_data_ready(parent, 0); | 240 | parent->sk_data_ready(parent); |
| 241 | } else { | 241 | } else { |
| 242 | /* Alas, it is possible again, because we do lookup | 242 | /* Alas, it is possible again, because we do lookup |
| 243 | * in main socket hash table and lock on listening | 243 | * in main socket hash table and lock on listening |
diff --git a/net/decnet/dn_nsp_in.c b/net/decnet/dn_nsp_in.c index c344163e6ac0..fe5f01485d33 100644 --- a/net/decnet/dn_nsp_in.c +++ b/net/decnet/dn_nsp_in.c | |||
| @@ -585,7 +585,6 @@ out: | |||
| 585 | static __inline__ int dn_queue_skb(struct sock *sk, struct sk_buff *skb, int sig, struct sk_buff_head *queue) | 585 | static __inline__ int dn_queue_skb(struct sock *sk, struct sk_buff *skb, int sig, struct sk_buff_head *queue) |
| 586 | { | 586 | { |
| 587 | int err; | 587 | int err; |
| 588 | int skb_len; | ||
| 589 | 588 | ||
| 590 | /* Cast skb->rcvbuf to unsigned... It's pointless, but reduces | 589 | /* Cast skb->rcvbuf to unsigned... It's pointless, but reduces |
| 591 | number of warnings when compiling with -W --ANK | 590 | number of warnings when compiling with -W --ANK |
| @@ -600,12 +599,11 @@ static __inline__ int dn_queue_skb(struct sock *sk, struct sk_buff *skb, int sig | |||
| 600 | if (err) | 599 | if (err) |
| 601 | goto out; | 600 | goto out; |
| 602 | 601 | ||
| 603 | skb_len = skb->len; | ||
| 604 | skb_set_owner_r(skb, sk); | 602 | skb_set_owner_r(skb, sk); |
| 605 | skb_queue_tail(queue, skb); | 603 | skb_queue_tail(queue, skb); |
| 606 | 604 | ||
| 607 | if (!sock_flag(sk, SOCK_DEAD)) | 605 | if (!sock_flag(sk, SOCK_DEAD)) |
| 608 | sk->sk_data_ready(sk, skb_len); | 606 | sk->sk_data_ready(sk); |
| 609 | out: | 607 | out: |
| 610 | return err; | 608 | return err; |
| 611 | } | 609 | } |
diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c index ec4f762efda5..94213c891565 100644 --- a/net/ipv4/ip_gre.c +++ b/net/ipv4/ip_gre.c | |||
| @@ -463,6 +463,7 @@ static const struct net_device_ops ipgre_netdev_ops = { | |||
| 463 | static void ipgre_tunnel_setup(struct net_device *dev) | 463 | static void ipgre_tunnel_setup(struct net_device *dev) |
| 464 | { | 464 | { |
| 465 | dev->netdev_ops = &ipgre_netdev_ops; | 465 | dev->netdev_ops = &ipgre_netdev_ops; |
| 466 | dev->type = ARPHRD_IPGRE; | ||
| 466 | ip_tunnel_setup(dev, ipgre_net_id); | 467 | ip_tunnel_setup(dev, ipgre_net_id); |
| 467 | } | 468 | } |
| 468 | 469 | ||
| @@ -501,7 +502,6 @@ static int ipgre_tunnel_init(struct net_device *dev) | |||
| 501 | memcpy(dev->dev_addr, &iph->saddr, 4); | 502 | memcpy(dev->dev_addr, &iph->saddr, 4); |
| 502 | memcpy(dev->broadcast, &iph->daddr, 4); | 503 | memcpy(dev->broadcast, &iph->daddr, 4); |
| 503 | 504 | ||
| 504 | dev->type = ARPHRD_IPGRE; | ||
| 505 | dev->flags = IFF_NOARP; | 505 | dev->flags = IFF_NOARP; |
| 506 | dev->priv_flags &= ~IFF_XMIT_DST_RELEASE; | 506 | dev->priv_flags &= ~IFF_XMIT_DST_RELEASE; |
| 507 | dev->addr_len = 4; | 507 | dev->addr_len = 4; |
diff --git a/net/ipv4/ip_vti.c b/net/ipv4/ip_vti.c index 687ddef4e574..afcee51b90ed 100644 --- a/net/ipv4/ip_vti.c +++ b/net/ipv4/ip_vti.c | |||
| @@ -337,6 +337,7 @@ static const struct net_device_ops vti_netdev_ops = { | |||
| 337 | static void vti_tunnel_setup(struct net_device *dev) | 337 | static void vti_tunnel_setup(struct net_device *dev) |
| 338 | { | 338 | { |
| 339 | dev->netdev_ops = &vti_netdev_ops; | 339 | dev->netdev_ops = &vti_netdev_ops; |
| 340 | dev->type = ARPHRD_TUNNEL; | ||
| 340 | ip_tunnel_setup(dev, vti_net_id); | 341 | ip_tunnel_setup(dev, vti_net_id); |
| 341 | } | 342 | } |
| 342 | 343 | ||
| @@ -348,7 +349,6 @@ static int vti_tunnel_init(struct net_device *dev) | |||
| 348 | memcpy(dev->dev_addr, &iph->saddr, 4); | 349 | memcpy(dev->dev_addr, &iph->saddr, 4); |
| 349 | memcpy(dev->broadcast, &iph->daddr, 4); | 350 | memcpy(dev->broadcast, &iph->daddr, 4); |
| 350 | 351 | ||
| 351 | dev->type = ARPHRD_TUNNEL; | ||
| 352 | dev->hard_header_len = LL_MAX_HEADER + sizeof(struct iphdr); | 352 | dev->hard_header_len = LL_MAX_HEADER + sizeof(struct iphdr); |
| 353 | dev->mtu = ETH_DATA_LEN; | 353 | dev->mtu = ETH_DATA_LEN; |
| 354 | dev->flags = IFF_NOARP; | 354 | dev->flags = IFF_NOARP; |
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c index e1661f46fd19..d6b46eb2f94c 100644 --- a/net/ipv4/tcp_input.c +++ b/net/ipv4/tcp_input.c | |||
| @@ -4413,7 +4413,7 @@ queue_and_out: | |||
| 4413 | if (eaten > 0) | 4413 | if (eaten > 0) |
| 4414 | kfree_skb_partial(skb, fragstolen); | 4414 | kfree_skb_partial(skb, fragstolen); |
| 4415 | if (!sock_flag(sk, SOCK_DEAD)) | 4415 | if (!sock_flag(sk, SOCK_DEAD)) |
| 4416 | sk->sk_data_ready(sk, 0); | 4416 | sk->sk_data_ready(sk); |
| 4417 | return; | 4417 | return; |
| 4418 | } | 4418 | } |
| 4419 | 4419 | ||
| @@ -4914,7 +4914,7 @@ static void tcp_urg(struct sock *sk, struct sk_buff *skb, const struct tcphdr *t | |||
| 4914 | BUG(); | 4914 | BUG(); |
| 4915 | tp->urg_data = TCP_URG_VALID | tmp; | 4915 | tp->urg_data = TCP_URG_VALID | tmp; |
| 4916 | if (!sock_flag(sk, SOCK_DEAD)) | 4916 | if (!sock_flag(sk, SOCK_DEAD)) |
| 4917 | sk->sk_data_ready(sk, 0); | 4917 | sk->sk_data_ready(sk); |
| 4918 | } | 4918 | } |
| 4919 | } | 4919 | } |
| 4920 | } | 4920 | } |
| @@ -5000,11 +5000,11 @@ static bool tcp_dma_try_early_copy(struct sock *sk, struct sk_buff *skb, | |||
| 5000 | (tcp_flag_word(tcp_hdr(skb)) & TCP_FLAG_PSH) || | 5000 | (tcp_flag_word(tcp_hdr(skb)) & TCP_FLAG_PSH) || |
| 5001 | (atomic_read(&sk->sk_rmem_alloc) > (sk->sk_rcvbuf >> 1))) { | 5001 | (atomic_read(&sk->sk_rmem_alloc) > (sk->sk_rcvbuf >> 1))) { |
| 5002 | tp->ucopy.wakeup = 1; | 5002 | tp->ucopy.wakeup = 1; |
| 5003 | sk->sk_data_ready(sk, 0); | 5003 | sk->sk_data_ready(sk); |
| 5004 | } | 5004 | } |
| 5005 | } else if (chunk > 0) { | 5005 | } else if (chunk > 0) { |
| 5006 | tp->ucopy.wakeup = 1; | 5006 | tp->ucopy.wakeup = 1; |
| 5007 | sk->sk_data_ready(sk, 0); | 5007 | sk->sk_data_ready(sk); |
| 5008 | } | 5008 | } |
| 5009 | out: | 5009 | out: |
| 5010 | return copied_early; | 5010 | return copied_early; |
| @@ -5275,7 +5275,7 @@ no_ack: | |||
| 5275 | #endif | 5275 | #endif |
| 5276 | if (eaten) | 5276 | if (eaten) |
| 5277 | kfree_skb_partial(skb, fragstolen); | 5277 | kfree_skb_partial(skb, fragstolen); |
| 5278 | sk->sk_data_ready(sk, 0); | 5278 | sk->sk_data_ready(sk); |
| 5279 | return; | 5279 | return; |
| 5280 | } | 5280 | } |
| 5281 | } | 5281 | } |
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c index 6379894ec210..438f3b95143d 100644 --- a/net/ipv4/tcp_ipv4.c +++ b/net/ipv4/tcp_ipv4.c | |||
| @@ -1434,7 +1434,7 @@ static int tcp_v4_conn_req_fastopen(struct sock *sk, | |||
| 1434 | tp->rcv_nxt = TCP_SKB_CB(skb)->end_seq; | 1434 | tp->rcv_nxt = TCP_SKB_CB(skb)->end_seq; |
| 1435 | tp->syn_data_acked = 1; | 1435 | tp->syn_data_acked = 1; |
| 1436 | } | 1436 | } |
| 1437 | sk->sk_data_ready(sk, 0); | 1437 | sk->sk_data_ready(sk); |
| 1438 | bh_unlock_sock(child); | 1438 | bh_unlock_sock(child); |
| 1439 | sock_put(child); | 1439 | sock_put(child); |
| 1440 | WARN_ON(req->sk == NULL); | 1440 | WARN_ON(req->sk == NULL); |
diff --git a/net/ipv4/tcp_minisocks.c b/net/ipv4/tcp_minisocks.c index ca788ada5bd3..05c1b155251d 100644 --- a/net/ipv4/tcp_minisocks.c +++ b/net/ipv4/tcp_minisocks.c | |||
| @@ -745,7 +745,7 @@ int tcp_child_process(struct sock *parent, struct sock *child, | |||
| 745 | skb->len); | 745 | skb->len); |
| 746 | /* Wakeup parent, send SIGIO */ | 746 | /* Wakeup parent, send SIGIO */ |
| 747 | if (state == TCP_SYN_RECV && child->sk_state != state) | 747 | if (state == TCP_SYN_RECV && child->sk_state != state) |
| 748 | parent->sk_data_ready(parent, 0); | 748 | parent->sk_data_ready(parent); |
| 749 | } else { | 749 | } else { |
| 750 | /* Alas, it is possible again, because we do lookup | 750 | /* Alas, it is possible again, because we do lookup |
| 751 | * in main socket hash table and lock on listening | 751 | * in main socket hash table and lock on listening |
diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c index 5ca56cee2dae..e289830ed6e3 100644 --- a/net/ipv6/tcp_ipv6.c +++ b/net/ipv6/tcp_ipv6.c | |||
| @@ -798,7 +798,7 @@ static void tcp_v6_send_response(struct sk_buff *skb, u32 seq, u32 ack, u32 win, | |||
| 798 | __tcp_v6_send_check(buff, &fl6.saddr, &fl6.daddr); | 798 | __tcp_v6_send_check(buff, &fl6.saddr, &fl6.daddr); |
| 799 | 799 | ||
| 800 | fl6.flowi6_proto = IPPROTO_TCP; | 800 | fl6.flowi6_proto = IPPROTO_TCP; |
| 801 | if (rt6_need_strict(&fl6.daddr) || !oif) | 801 | if (rt6_need_strict(&fl6.daddr) && !oif) |
| 802 | fl6.flowi6_oif = inet6_iif(skb); | 802 | fl6.flowi6_oif = inet6_iif(skb); |
| 803 | else | 803 | else |
| 804 | fl6.flowi6_oif = oif; | 804 | fl6.flowi6_oif = oif; |
diff --git a/net/iucv/af_iucv.c b/net/iucv/af_iucv.c index a5e03119107a..01e77b0ae075 100644 --- a/net/iucv/af_iucv.c +++ b/net/iucv/af_iucv.c | |||
| @@ -1757,7 +1757,7 @@ static int iucv_callback_connreq(struct iucv_path *path, | |||
| 1757 | 1757 | ||
| 1758 | /* Wake up accept */ | 1758 | /* Wake up accept */ |
| 1759 | nsk->sk_state = IUCV_CONNECTED; | 1759 | nsk->sk_state = IUCV_CONNECTED; |
| 1760 | sk->sk_data_ready(sk, 1); | 1760 | sk->sk_data_ready(sk); |
| 1761 | err = 0; | 1761 | err = 0; |
| 1762 | fail: | 1762 | fail: |
| 1763 | bh_unlock_sock(sk); | 1763 | bh_unlock_sock(sk); |
| @@ -1968,7 +1968,7 @@ static int afiucv_hs_callback_syn(struct sock *sk, struct sk_buff *skb) | |||
| 1968 | if (!err) { | 1968 | if (!err) { |
| 1969 | iucv_accept_enqueue(sk, nsk); | 1969 | iucv_accept_enqueue(sk, nsk); |
| 1970 | nsk->sk_state = IUCV_CONNECTED; | 1970 | nsk->sk_state = IUCV_CONNECTED; |
| 1971 | sk->sk_data_ready(sk, 1); | 1971 | sk->sk_data_ready(sk); |
| 1972 | } else | 1972 | } else |
| 1973 | iucv_sock_kill(nsk); | 1973 | iucv_sock_kill(nsk); |
| 1974 | bh_unlock_sock(sk); | 1974 | bh_unlock_sock(sk); |
diff --git a/net/key/af_key.c b/net/key/af_key.c index e72589a8400d..f3c83073afc4 100644 --- a/net/key/af_key.c +++ b/net/key/af_key.c | |||
| @@ -205,7 +205,7 @@ static int pfkey_broadcast_one(struct sk_buff *skb, struct sk_buff **skb2, | |||
| 205 | if (atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf) { | 205 | if (atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf) { |
| 206 | skb_set_owner_r(*skb2, sk); | 206 | skb_set_owner_r(*skb2, sk); |
| 207 | skb_queue_tail(&sk->sk_receive_queue, *skb2); | 207 | skb_queue_tail(&sk->sk_receive_queue, *skb2); |
| 208 | sk->sk_data_ready(sk, (*skb2)->len); | 208 | sk->sk_data_ready(sk); |
| 209 | *skb2 = NULL; | 209 | *skb2 = NULL; |
| 210 | err = 0; | 210 | err = 0; |
| 211 | } | 211 | } |
diff --git a/net/l2tp/l2tp_ppp.c b/net/l2tp/l2tp_ppp.c index d276e2d4a589..950909f04ee6 100644 --- a/net/l2tp/l2tp_ppp.c +++ b/net/l2tp/l2tp_ppp.c | |||
| @@ -753,9 +753,9 @@ static int pppol2tp_connect(struct socket *sock, struct sockaddr *uservaddr, | |||
| 753 | session->deref = pppol2tp_session_sock_put; | 753 | session->deref = pppol2tp_session_sock_put; |
| 754 | 754 | ||
| 755 | /* If PMTU discovery was enabled, use the MTU that was discovered */ | 755 | /* If PMTU discovery was enabled, use the MTU that was discovered */ |
| 756 | dst = sk_dst_get(sk); | 756 | dst = sk_dst_get(tunnel->sock); |
| 757 | if (dst != NULL) { | 757 | if (dst != NULL) { |
| 758 | u32 pmtu = dst_mtu(__sk_dst_get(sk)); | 758 | u32 pmtu = dst_mtu(__sk_dst_get(tunnel->sock)); |
| 759 | if (pmtu != 0) | 759 | if (pmtu != 0) |
| 760 | session->mtu = session->mru = pmtu - | 760 | session->mtu = session->mru = pmtu - |
| 761 | PPPOL2TP_HEADER_OVERHEAD; | 761 | PPPOL2TP_HEADER_OVERHEAD; |
diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c index c2d585c4f7c5..894cda0206bb 100644 --- a/net/netlink/af_netlink.c +++ b/net/netlink/af_netlink.c | |||
| @@ -1653,7 +1653,7 @@ static int __netlink_sendskb(struct sock *sk, struct sk_buff *skb) | |||
| 1653 | else | 1653 | else |
| 1654 | #endif /* CONFIG_NETLINK_MMAP */ | 1654 | #endif /* CONFIG_NETLINK_MMAP */ |
| 1655 | skb_queue_tail(&sk->sk_receive_queue, skb); | 1655 | skb_queue_tail(&sk->sk_receive_queue, skb); |
| 1656 | sk->sk_data_ready(sk, len); | 1656 | sk->sk_data_ready(sk); |
| 1657 | return len; | 1657 | return len; |
| 1658 | } | 1658 | } |
| 1659 | 1659 | ||
| @@ -2394,7 +2394,7 @@ out: | |||
| 2394 | return err ? : copied; | 2394 | return err ? : copied; |
| 2395 | } | 2395 | } |
| 2396 | 2396 | ||
| 2397 | static void netlink_data_ready(struct sock *sk, int len) | 2397 | static void netlink_data_ready(struct sock *sk) |
| 2398 | { | 2398 | { |
| 2399 | BUG(); | 2399 | BUG(); |
| 2400 | } | 2400 | } |
diff --git a/net/netrom/af_netrom.c b/net/netrom/af_netrom.c index b74aa0755521..ede50d197e10 100644 --- a/net/netrom/af_netrom.c +++ b/net/netrom/af_netrom.c | |||
| @@ -1011,7 +1011,7 @@ int nr_rx_frame(struct sk_buff *skb, struct net_device *dev) | |||
| 1011 | skb_queue_head(&sk->sk_receive_queue, skb); | 1011 | skb_queue_head(&sk->sk_receive_queue, skb); |
| 1012 | 1012 | ||
| 1013 | if (!sock_flag(sk, SOCK_DEAD)) | 1013 | if (!sock_flag(sk, SOCK_DEAD)) |
| 1014 | sk->sk_data_ready(sk, skb->len); | 1014 | sk->sk_data_ready(sk); |
| 1015 | 1015 | ||
| 1016 | bh_unlock_sock(sk); | 1016 | bh_unlock_sock(sk); |
| 1017 | 1017 | ||
diff --git a/net/nfc/llcp_core.c b/net/nfc/llcp_core.c index b486f12ae243..b4671958fcf9 100644 --- a/net/nfc/llcp_core.c +++ b/net/nfc/llcp_core.c | |||
| @@ -976,7 +976,7 @@ static void nfc_llcp_recv_connect(struct nfc_llcp_local *local, | |||
| 976 | new_sk->sk_state = LLCP_CONNECTED; | 976 | new_sk->sk_state = LLCP_CONNECTED; |
| 977 | 977 | ||
| 978 | /* Wake the listening processes */ | 978 | /* Wake the listening processes */ |
| 979 | parent->sk_data_ready(parent, 0); | 979 | parent->sk_data_ready(parent); |
| 980 | 980 | ||
| 981 | /* Send CC */ | 981 | /* Send CC */ |
| 982 | nfc_llcp_send_cc(new_sock); | 982 | nfc_llcp_send_cc(new_sock); |
diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c index 72e0c71fb01d..b85c67ccb797 100644 --- a/net/packet/af_packet.c +++ b/net/packet/af_packet.c | |||
| @@ -1848,7 +1848,7 @@ static int packet_rcv(struct sk_buff *skb, struct net_device *dev, | |||
| 1848 | skb->dropcount = atomic_read(&sk->sk_drops); | 1848 | skb->dropcount = atomic_read(&sk->sk_drops); |
| 1849 | __skb_queue_tail(&sk->sk_receive_queue, skb); | 1849 | __skb_queue_tail(&sk->sk_receive_queue, skb); |
| 1850 | spin_unlock(&sk->sk_receive_queue.lock); | 1850 | spin_unlock(&sk->sk_receive_queue.lock); |
| 1851 | sk->sk_data_ready(sk, skb->len); | 1851 | sk->sk_data_ready(sk); |
| 1852 | return 0; | 1852 | return 0; |
| 1853 | 1853 | ||
| 1854 | drop_n_acct: | 1854 | drop_n_acct: |
| @@ -2054,7 +2054,7 @@ static int tpacket_rcv(struct sk_buff *skb, struct net_device *dev, | |||
| 2054 | else | 2054 | else |
| 2055 | prb_clear_blk_fill_status(&po->rx_ring); | 2055 | prb_clear_blk_fill_status(&po->rx_ring); |
| 2056 | 2056 | ||
| 2057 | sk->sk_data_ready(sk, 0); | 2057 | sk->sk_data_ready(sk); |
| 2058 | 2058 | ||
| 2059 | drop_n_restore: | 2059 | drop_n_restore: |
| 2060 | if (skb_head != skb->data && skb_shared(skb)) { | 2060 | if (skb_head != skb->data && skb_shared(skb)) { |
| @@ -2069,7 +2069,7 @@ ring_is_full: | |||
| 2069 | po->stats.stats1.tp_drops++; | 2069 | po->stats.stats1.tp_drops++; |
| 2070 | spin_unlock(&sk->sk_receive_queue.lock); | 2070 | spin_unlock(&sk->sk_receive_queue.lock); |
| 2071 | 2071 | ||
| 2072 | sk->sk_data_ready(sk, 0); | 2072 | sk->sk_data_ready(sk); |
| 2073 | kfree_skb(copy_skb); | 2073 | kfree_skb(copy_skb); |
| 2074 | goto drop_n_restore; | 2074 | goto drop_n_restore; |
| 2075 | } | 2075 | } |
diff --git a/net/phonet/pep-gprs.c b/net/phonet/pep-gprs.c index a2fba7edfd1f..66dc65e7c6a1 100644 --- a/net/phonet/pep-gprs.c +++ b/net/phonet/pep-gprs.c | |||
| @@ -37,7 +37,7 @@ | |||
| 37 | struct gprs_dev { | 37 | struct gprs_dev { |
| 38 | struct sock *sk; | 38 | struct sock *sk; |
| 39 | void (*old_state_change)(struct sock *); | 39 | void (*old_state_change)(struct sock *); |
| 40 | void (*old_data_ready)(struct sock *, int); | 40 | void (*old_data_ready)(struct sock *); |
| 41 | void (*old_write_space)(struct sock *); | 41 | void (*old_write_space)(struct sock *); |
| 42 | 42 | ||
| 43 | struct net_device *dev; | 43 | struct net_device *dev; |
| @@ -146,7 +146,7 @@ drop: | |||
| 146 | return err; | 146 | return err; |
| 147 | } | 147 | } |
| 148 | 148 | ||
| 149 | static void gprs_data_ready(struct sock *sk, int len) | 149 | static void gprs_data_ready(struct sock *sk) |
| 150 | { | 150 | { |
| 151 | struct gprs_dev *gp = sk->sk_user_data; | 151 | struct gprs_dev *gp = sk->sk_user_data; |
| 152 | struct sk_buff *skb; | 152 | struct sk_buff *skb; |
diff --git a/net/phonet/pep.c b/net/phonet/pep.c index e77411735de8..70a547ea5177 100644 --- a/net/phonet/pep.c +++ b/net/phonet/pep.c | |||
| @@ -462,10 +462,9 @@ out: | |||
| 462 | queue: | 462 | queue: |
| 463 | skb->dev = NULL; | 463 | skb->dev = NULL; |
| 464 | skb_set_owner_r(skb, sk); | 464 | skb_set_owner_r(skb, sk); |
| 465 | err = skb->len; | ||
| 466 | skb_queue_tail(queue, skb); | 465 | skb_queue_tail(queue, skb); |
| 467 | if (!sock_flag(sk, SOCK_DEAD)) | 466 | if (!sock_flag(sk, SOCK_DEAD)) |
| 468 | sk->sk_data_ready(sk, err); | 467 | sk->sk_data_ready(sk); |
| 469 | return NET_RX_SUCCESS; | 468 | return NET_RX_SUCCESS; |
| 470 | } | 469 | } |
| 471 | 470 | ||
| @@ -587,10 +586,9 @@ static int pipe_handler_do_rcv(struct sock *sk, struct sk_buff *skb) | |||
| 587 | pn->rx_credits--; | 586 | pn->rx_credits--; |
| 588 | skb->dev = NULL; | 587 | skb->dev = NULL; |
| 589 | skb_set_owner_r(skb, sk); | 588 | skb_set_owner_r(skb, sk); |
| 590 | err = skb->len; | ||
| 591 | skb_queue_tail(&sk->sk_receive_queue, skb); | 589 | skb_queue_tail(&sk->sk_receive_queue, skb); |
| 592 | if (!sock_flag(sk, SOCK_DEAD)) | 590 | if (!sock_flag(sk, SOCK_DEAD)) |
| 593 | sk->sk_data_ready(sk, err); | 591 | sk->sk_data_ready(sk); |
| 594 | return NET_RX_SUCCESS; | 592 | return NET_RX_SUCCESS; |
| 595 | 593 | ||
| 596 | case PNS_PEP_CONNECT_RESP: | 594 | case PNS_PEP_CONNECT_RESP: |
| @@ -698,7 +696,7 @@ static int pep_do_rcv(struct sock *sk, struct sk_buff *skb) | |||
| 698 | skb_queue_head(&sk->sk_receive_queue, skb); | 696 | skb_queue_head(&sk->sk_receive_queue, skb); |
| 699 | sk_acceptq_added(sk); | 697 | sk_acceptq_added(sk); |
| 700 | if (!sock_flag(sk, SOCK_DEAD)) | 698 | if (!sock_flag(sk, SOCK_DEAD)) |
| 701 | sk->sk_data_ready(sk, 0); | 699 | sk->sk_data_ready(sk); |
| 702 | return NET_RX_SUCCESS; | 700 | return NET_RX_SUCCESS; |
| 703 | 701 | ||
| 704 | case PNS_PEP_DISCONNECT_REQ: | 702 | case PNS_PEP_DISCONNECT_REQ: |
diff --git a/net/rds/tcp.h b/net/rds/tcp.h index 9cf2927d0021..65637491f728 100644 --- a/net/rds/tcp.h +++ b/net/rds/tcp.h | |||
| @@ -61,12 +61,12 @@ void rds_tcp_state_change(struct sock *sk); | |||
| 61 | /* tcp_listen.c */ | 61 | /* tcp_listen.c */ |
| 62 | int rds_tcp_listen_init(void); | 62 | int rds_tcp_listen_init(void); |
| 63 | void rds_tcp_listen_stop(void); | 63 | void rds_tcp_listen_stop(void); |
| 64 | void rds_tcp_listen_data_ready(struct sock *sk, int bytes); | 64 | void rds_tcp_listen_data_ready(struct sock *sk); |
| 65 | 65 | ||
| 66 | /* tcp_recv.c */ | 66 | /* tcp_recv.c */ |
| 67 | int rds_tcp_recv_init(void); | 67 | int rds_tcp_recv_init(void); |
| 68 | void rds_tcp_recv_exit(void); | 68 | void rds_tcp_recv_exit(void); |
| 69 | void rds_tcp_data_ready(struct sock *sk, int bytes); | 69 | void rds_tcp_data_ready(struct sock *sk); |
| 70 | int rds_tcp_recv(struct rds_connection *conn); | 70 | int rds_tcp_recv(struct rds_connection *conn); |
| 71 | void rds_tcp_inc_free(struct rds_incoming *inc); | 71 | void rds_tcp_inc_free(struct rds_incoming *inc); |
| 72 | int rds_tcp_inc_copy_to_user(struct rds_incoming *inc, struct iovec *iov, | 72 | int rds_tcp_inc_copy_to_user(struct rds_incoming *inc, struct iovec *iov, |
diff --git a/net/rds/tcp_listen.c b/net/rds/tcp_listen.c index 7787537e9c2e..4e638f851185 100644 --- a/net/rds/tcp_listen.c +++ b/net/rds/tcp_listen.c | |||
| @@ -108,9 +108,9 @@ static void rds_tcp_accept_worker(struct work_struct *work) | |||
| 108 | cond_resched(); | 108 | cond_resched(); |
| 109 | } | 109 | } |
| 110 | 110 | ||
| 111 | void rds_tcp_listen_data_ready(struct sock *sk, int bytes) | 111 | void rds_tcp_listen_data_ready(struct sock *sk) |
| 112 | { | 112 | { |
| 113 | void (*ready)(struct sock *sk, int bytes); | 113 | void (*ready)(struct sock *sk); |
| 114 | 114 | ||
| 115 | rdsdebug("listen data ready sk %p\n", sk); | 115 | rdsdebug("listen data ready sk %p\n", sk); |
| 116 | 116 | ||
| @@ -132,7 +132,7 @@ void rds_tcp_listen_data_ready(struct sock *sk, int bytes) | |||
| 132 | 132 | ||
| 133 | out: | 133 | out: |
| 134 | read_unlock(&sk->sk_callback_lock); | 134 | read_unlock(&sk->sk_callback_lock); |
| 135 | ready(sk, bytes); | 135 | ready(sk); |
| 136 | } | 136 | } |
| 137 | 137 | ||
| 138 | int rds_tcp_listen_init(void) | 138 | int rds_tcp_listen_init(void) |
diff --git a/net/rds/tcp_recv.c b/net/rds/tcp_recv.c index 4fac4f2bb9dc..9ae6e0a264ec 100644 --- a/net/rds/tcp_recv.c +++ b/net/rds/tcp_recv.c | |||
| @@ -314,13 +314,13 @@ int rds_tcp_recv(struct rds_connection *conn) | |||
| 314 | return ret; | 314 | return ret; |
| 315 | } | 315 | } |
| 316 | 316 | ||
| 317 | void rds_tcp_data_ready(struct sock *sk, int bytes) | 317 | void rds_tcp_data_ready(struct sock *sk) |
| 318 | { | 318 | { |
| 319 | void (*ready)(struct sock *sk, int bytes); | 319 | void (*ready)(struct sock *sk); |
| 320 | struct rds_connection *conn; | 320 | struct rds_connection *conn; |
| 321 | struct rds_tcp_connection *tc; | 321 | struct rds_tcp_connection *tc; |
| 322 | 322 | ||
| 323 | rdsdebug("data ready sk %p bytes %d\n", sk, bytes); | 323 | rdsdebug("data ready sk %p\n", sk); |
| 324 | 324 | ||
| 325 | read_lock(&sk->sk_callback_lock); | 325 | read_lock(&sk->sk_callback_lock); |
| 326 | conn = sk->sk_user_data; | 326 | conn = sk->sk_user_data; |
| @@ -337,7 +337,7 @@ void rds_tcp_data_ready(struct sock *sk, int bytes) | |||
| 337 | queue_delayed_work(rds_wq, &conn->c_recv_w, 0); | 337 | queue_delayed_work(rds_wq, &conn->c_recv_w, 0); |
| 338 | out: | 338 | out: |
| 339 | read_unlock(&sk->sk_callback_lock); | 339 | read_unlock(&sk->sk_callback_lock); |
| 340 | ready(sk, bytes); | 340 | ready(sk); |
| 341 | } | 341 | } |
| 342 | 342 | ||
| 343 | int rds_tcp_recv_init(void) | 343 | int rds_tcp_recv_init(void) |
diff --git a/net/rose/af_rose.c b/net/rose/af_rose.c index c2cca2ee6aef..8451c8cdc9de 100644 --- a/net/rose/af_rose.c +++ b/net/rose/af_rose.c | |||
| @@ -1041,7 +1041,7 @@ int rose_rx_call_request(struct sk_buff *skb, struct net_device *dev, struct ros | |||
| 1041 | rose_start_heartbeat(make); | 1041 | rose_start_heartbeat(make); |
| 1042 | 1042 | ||
| 1043 | if (!sock_flag(sk, SOCK_DEAD)) | 1043 | if (!sock_flag(sk, SOCK_DEAD)) |
| 1044 | sk->sk_data_ready(sk, skb->len); | 1044 | sk->sk_data_ready(sk); |
| 1045 | 1045 | ||
| 1046 | return 1; | 1046 | return 1; |
| 1047 | } | 1047 | } |
diff --git a/net/rxrpc/ar-input.c b/net/rxrpc/ar-input.c index 73742647c135..63b21e580de9 100644 --- a/net/rxrpc/ar-input.c +++ b/net/rxrpc/ar-input.c | |||
| @@ -113,7 +113,7 @@ int rxrpc_queue_rcv_skb(struct rxrpc_call *call, struct sk_buff *skb, | |||
| 113 | spin_unlock_bh(&sk->sk_receive_queue.lock); | 113 | spin_unlock_bh(&sk->sk_receive_queue.lock); |
| 114 | 114 | ||
| 115 | if (!sock_flag(sk, SOCK_DEAD)) | 115 | if (!sock_flag(sk, SOCK_DEAD)) |
| 116 | sk->sk_data_ready(sk, skb_len); | 116 | sk->sk_data_ready(sk); |
| 117 | } | 117 | } |
| 118 | skb = NULL; | 118 | skb = NULL; |
| 119 | } else { | 119 | } else { |
| @@ -632,14 +632,14 @@ cant_find_conn: | |||
| 632 | * handle data received on the local endpoint | 632 | * handle data received on the local endpoint |
| 633 | * - may be called in interrupt context | 633 | * - may be called in interrupt context |
| 634 | */ | 634 | */ |
| 635 | void rxrpc_data_ready(struct sock *sk, int count) | 635 | void rxrpc_data_ready(struct sock *sk) |
| 636 | { | 636 | { |
| 637 | struct rxrpc_skb_priv *sp; | 637 | struct rxrpc_skb_priv *sp; |
| 638 | struct rxrpc_local *local; | 638 | struct rxrpc_local *local; |
| 639 | struct sk_buff *skb; | 639 | struct sk_buff *skb; |
| 640 | int ret; | 640 | int ret; |
| 641 | 641 | ||
| 642 | _enter("%p, %d", sk, count); | 642 | _enter("%p", sk); |
| 643 | 643 | ||
| 644 | ASSERT(!irqs_disabled()); | 644 | ASSERT(!irqs_disabled()); |
| 645 | 645 | ||
diff --git a/net/rxrpc/ar-internal.h b/net/rxrpc/ar-internal.h index c831d44b0841..ba9fd36d3f15 100644 --- a/net/rxrpc/ar-internal.h +++ b/net/rxrpc/ar-internal.h | |||
| @@ -518,7 +518,7 @@ void rxrpc_UDP_error_handler(struct work_struct *); | |||
| 518 | */ | 518 | */ |
| 519 | extern const char *rxrpc_pkts[]; | 519 | extern const char *rxrpc_pkts[]; |
| 520 | 520 | ||
| 521 | void rxrpc_data_ready(struct sock *, int); | 521 | void rxrpc_data_ready(struct sock *); |
| 522 | int rxrpc_queue_rcv_skb(struct rxrpc_call *, struct sk_buff *, bool, bool); | 522 | int rxrpc_queue_rcv_skb(struct rxrpc_call *, struct sk_buff *, bool, bool); |
| 523 | void rxrpc_fast_process_packet(struct rxrpc_call *, struct sk_buff *); | 523 | void rxrpc_fast_process_packet(struct rxrpc_call *, struct sk_buff *); |
| 524 | 524 | ||
diff --git a/net/sctp/socket.c b/net/sctp/socket.c index 5f83a6a2fa67..e13519e9df80 100644 --- a/net/sctp/socket.c +++ b/net/sctp/socket.c | |||
| @@ -6604,6 +6604,12 @@ static void sctp_wake_up_waiters(struct sock *sk, | |||
| 6604 | if (asoc->ep->sndbuf_policy) | 6604 | if (asoc->ep->sndbuf_policy) |
| 6605 | return __sctp_write_space(asoc); | 6605 | return __sctp_write_space(asoc); |
| 6606 | 6606 | ||
| 6607 | /* If association goes down and is just flushing its | ||
| 6608 | * outq, then just normally notify others. | ||
| 6609 | */ | ||
| 6610 | if (asoc->base.dead) | ||
| 6611 | return sctp_write_space(sk); | ||
| 6612 | |||
| 6607 | /* Accounting for the sndbuf space is per socket, so we | 6613 | /* Accounting for the sndbuf space is per socket, so we |
| 6608 | * need to wake up others, try to be fair and in case of | 6614 | * need to wake up others, try to be fair and in case of |
| 6609 | * other associations, let them have a go first instead | 6615 | * other associations, let them have a go first instead |
| @@ -6739,7 +6745,7 @@ do_nonblock: | |||
| 6739 | goto out; | 6745 | goto out; |
| 6740 | } | 6746 | } |
| 6741 | 6747 | ||
| 6742 | void sctp_data_ready(struct sock *sk, int len) | 6748 | void sctp_data_ready(struct sock *sk) |
| 6743 | { | 6749 | { |
| 6744 | struct socket_wq *wq; | 6750 | struct socket_wq *wq; |
| 6745 | 6751 | ||
diff --git a/net/sctp/ulpqueue.c b/net/sctp/ulpqueue.c index 5dc94117e9d4..7144eb6a1b95 100644 --- a/net/sctp/ulpqueue.c +++ b/net/sctp/ulpqueue.c | |||
| @@ -259,7 +259,7 @@ int sctp_ulpq_tail_event(struct sctp_ulpq *ulpq, struct sctp_ulpevent *event) | |||
| 259 | sctp_ulpq_clear_pd(ulpq); | 259 | sctp_ulpq_clear_pd(ulpq); |
| 260 | 260 | ||
| 261 | if (queue == &sk->sk_receive_queue) | 261 | if (queue == &sk->sk_receive_queue) |
| 262 | sk->sk_data_ready(sk, 0); | 262 | sk->sk_data_ready(sk); |
| 263 | return 1; | 263 | return 1; |
| 264 | 264 | ||
| 265 | out_free: | 265 | out_free: |
| @@ -1135,5 +1135,5 @@ void sctp_ulpq_abort_pd(struct sctp_ulpq *ulpq, gfp_t gfp) | |||
| 1135 | 1135 | ||
| 1136 | /* If there is data waiting, send it up the socket now. */ | 1136 | /* If there is data waiting, send it up the socket now. */ |
| 1137 | if (sctp_ulpq_clear_pd(ulpq) || ev) | 1137 | if (sctp_ulpq_clear_pd(ulpq) || ev) |
| 1138 | sk->sk_data_ready(sk, 0); | 1138 | sk->sk_data_ready(sk); |
| 1139 | } | 1139 | } |
diff --git a/net/sunrpc/Kconfig b/net/sunrpc/Kconfig index 241b54f30204..0754d0f466d2 100644 --- a/net/sunrpc/Kconfig +++ b/net/sunrpc/Kconfig | |||
| @@ -9,19 +9,6 @@ config SUNRPC_BACKCHANNEL | |||
| 9 | bool | 9 | bool |
| 10 | depends on SUNRPC | 10 | depends on SUNRPC |
| 11 | 11 | ||
| 12 | config SUNRPC_XPRT_RDMA | ||
| 13 | tristate | ||
| 14 | depends on SUNRPC && INFINIBAND && INFINIBAND_ADDR_TRANS | ||
| 15 | default SUNRPC && INFINIBAND | ||
| 16 | help | ||
| 17 | This option allows the NFS client and server to support | ||
| 18 | an RDMA-enabled transport. | ||
| 19 | |||
| 20 | To compile RPC client RDMA transport support as a module, | ||
| 21 | choose M here: the module will be called xprtrdma. | ||
| 22 | |||
| 23 | If unsure, say N. | ||
| 24 | |||
| 25 | config SUNRPC_SWAP | 12 | config SUNRPC_SWAP |
| 26 | bool | 13 | bool |
| 27 | depends on SUNRPC | 14 | depends on SUNRPC |
| @@ -57,3 +44,29 @@ config SUNRPC_DEBUG | |||
| 57 | but makes troubleshooting NFS issues significantly harder. | 44 | but makes troubleshooting NFS issues significantly harder. |
| 58 | 45 | ||
| 59 | If unsure, say Y. | 46 | If unsure, say Y. |
| 47 | |||
| 48 | config SUNRPC_XPRT_RDMA_CLIENT | ||
| 49 | tristate "RPC over RDMA Client Support" | ||
| 50 | depends on SUNRPC && INFINIBAND && INFINIBAND_ADDR_TRANS | ||
| 51 | default SUNRPC && INFINIBAND | ||
| 52 | help | ||
| 53 | This option allows the NFS client to support an RDMA-enabled | ||
| 54 | transport. | ||
| 55 | |||
| 56 | To compile RPC client RDMA transport support as a module, | ||
| 57 | choose M here: the module will be called xprtrdma. | ||
| 58 | |||
| 59 | If unsure, say N. | ||
| 60 | |||
| 61 | config SUNRPC_XPRT_RDMA_SERVER | ||
| 62 | tristate "RPC over RDMA Server Support" | ||
| 63 | depends on SUNRPC && INFINIBAND && INFINIBAND_ADDR_TRANS | ||
| 64 | default SUNRPC && INFINIBAND | ||
| 65 | help | ||
| 66 | This option allows the NFS server to support an RDMA-enabled | ||
| 67 | transport. | ||
| 68 | |||
| 69 | To compile RPC server RDMA transport support as a module, | ||
| 70 | choose M here: the module will be called svcrdma. | ||
| 71 | |||
| 72 | If unsure, say N. | ||
diff --git a/net/sunrpc/Makefile b/net/sunrpc/Makefile index 8209a0411bca..e5a7a1cac8f3 100644 --- a/net/sunrpc/Makefile +++ b/net/sunrpc/Makefile | |||
| @@ -5,7 +5,8 @@ | |||
| 5 | 5 | ||
| 6 | obj-$(CONFIG_SUNRPC) += sunrpc.o | 6 | obj-$(CONFIG_SUNRPC) += sunrpc.o |
| 7 | obj-$(CONFIG_SUNRPC_GSS) += auth_gss/ | 7 | obj-$(CONFIG_SUNRPC_GSS) += auth_gss/ |
| 8 | obj-$(CONFIG_SUNRPC_XPRT_RDMA) += xprtrdma/ | 8 | |
| 9 | obj-y += xprtrdma/ | ||
| 9 | 10 | ||
| 10 | sunrpc-y := clnt.o xprt.o socklib.o xprtsock.o sched.o \ | 11 | sunrpc-y := clnt.o xprt.o socklib.o xprtsock.o sched.o \ |
| 11 | auth.o auth_null.o auth_unix.o auth_generic.o \ | 12 | auth.o auth_null.o auth_unix.o auth_generic.o \ |
diff --git a/net/sunrpc/clnt.c b/net/sunrpc/clnt.c index f400445d1a44..2e6ab10734f6 100644 --- a/net/sunrpc/clnt.c +++ b/net/sunrpc/clnt.c | |||
| @@ -438,6 +438,38 @@ out_no_rpciod: | |||
| 438 | return ERR_PTR(err); | 438 | return ERR_PTR(err); |
| 439 | } | 439 | } |
| 440 | 440 | ||
| 441 | struct rpc_clnt *rpc_create_xprt(struct rpc_create_args *args, | ||
| 442 | struct rpc_xprt *xprt) | ||
| 443 | { | ||
| 444 | struct rpc_clnt *clnt = NULL; | ||
| 445 | |||
| 446 | clnt = rpc_new_client(args, xprt, NULL); | ||
| 447 | if (IS_ERR(clnt)) | ||
| 448 | return clnt; | ||
| 449 | |||
| 450 | if (!(args->flags & RPC_CLNT_CREATE_NOPING)) { | ||
| 451 | int err = rpc_ping(clnt); | ||
| 452 | if (err != 0) { | ||
| 453 | rpc_shutdown_client(clnt); | ||
| 454 | return ERR_PTR(err); | ||
| 455 | } | ||
| 456 | } | ||
| 457 | |||
| 458 | clnt->cl_softrtry = 1; | ||
| 459 | if (args->flags & RPC_CLNT_CREATE_HARDRTRY) | ||
| 460 | clnt->cl_softrtry = 0; | ||
| 461 | |||
| 462 | if (args->flags & RPC_CLNT_CREATE_AUTOBIND) | ||
| 463 | clnt->cl_autobind = 1; | ||
| 464 | if (args->flags & RPC_CLNT_CREATE_DISCRTRY) | ||
| 465 | clnt->cl_discrtry = 1; | ||
| 466 | if (!(args->flags & RPC_CLNT_CREATE_QUIET)) | ||
| 467 | clnt->cl_chatty = 1; | ||
| 468 | |||
| 469 | return clnt; | ||
| 470 | } | ||
| 471 | EXPORT_SYMBOL_GPL(rpc_create_xprt); | ||
| 472 | |||
| 441 | /** | 473 | /** |
| 442 | * rpc_create - create an RPC client and transport with one call | 474 | * rpc_create - create an RPC client and transport with one call |
| 443 | * @args: rpc_clnt create argument structure | 475 | * @args: rpc_clnt create argument structure |
| @@ -451,7 +483,6 @@ out_no_rpciod: | |||
| 451 | struct rpc_clnt *rpc_create(struct rpc_create_args *args) | 483 | struct rpc_clnt *rpc_create(struct rpc_create_args *args) |
| 452 | { | 484 | { |
| 453 | struct rpc_xprt *xprt; | 485 | struct rpc_xprt *xprt; |
| 454 | struct rpc_clnt *clnt; | ||
| 455 | struct xprt_create xprtargs = { | 486 | struct xprt_create xprtargs = { |
| 456 | .net = args->net, | 487 | .net = args->net, |
| 457 | .ident = args->protocol, | 488 | .ident = args->protocol, |
| @@ -515,30 +546,7 @@ struct rpc_clnt *rpc_create(struct rpc_create_args *args) | |||
| 515 | if (args->flags & RPC_CLNT_CREATE_NONPRIVPORT) | 546 | if (args->flags & RPC_CLNT_CREATE_NONPRIVPORT) |
| 516 | xprt->resvport = 0; | 547 | xprt->resvport = 0; |
| 517 | 548 | ||
| 518 | clnt = rpc_new_client(args, xprt, NULL); | 549 | return rpc_create_xprt(args, xprt); |
| 519 | if (IS_ERR(clnt)) | ||
| 520 | return clnt; | ||
| 521 | |||
| 522 | if (!(args->flags & RPC_CLNT_CREATE_NOPING)) { | ||
| 523 | int err = rpc_ping(clnt); | ||
| 524 | if (err != 0) { | ||
| 525 | rpc_shutdown_client(clnt); | ||
| 526 | return ERR_PTR(err); | ||
| 527 | } | ||
| 528 | } | ||
| 529 | |||
| 530 | clnt->cl_softrtry = 1; | ||
| 531 | if (args->flags & RPC_CLNT_CREATE_HARDRTRY) | ||
| 532 | clnt->cl_softrtry = 0; | ||
| 533 | |||
| 534 | if (args->flags & RPC_CLNT_CREATE_AUTOBIND) | ||
| 535 | clnt->cl_autobind = 1; | ||
| 536 | if (args->flags & RPC_CLNT_CREATE_DISCRTRY) | ||
| 537 | clnt->cl_discrtry = 1; | ||
| 538 | if (!(args->flags & RPC_CLNT_CREATE_QUIET)) | ||
| 539 | clnt->cl_chatty = 1; | ||
| 540 | |||
| 541 | return clnt; | ||
| 542 | } | 550 | } |
| 543 | EXPORT_SYMBOL_GPL(rpc_create); | 551 | EXPORT_SYMBOL_GPL(rpc_create); |
| 544 | 552 | ||
diff --git a/net/sunrpc/svcsock.c b/net/sunrpc/svcsock.c index b6e59f0a9475..43bcb4699d69 100644 --- a/net/sunrpc/svcsock.c +++ b/net/sunrpc/svcsock.c | |||
| @@ -60,7 +60,7 @@ | |||
| 60 | 60 | ||
| 61 | static struct svc_sock *svc_setup_socket(struct svc_serv *, struct socket *, | 61 | static struct svc_sock *svc_setup_socket(struct svc_serv *, struct socket *, |
| 62 | int flags); | 62 | int flags); |
| 63 | static void svc_udp_data_ready(struct sock *, int); | 63 | static void svc_udp_data_ready(struct sock *); |
| 64 | static int svc_udp_recvfrom(struct svc_rqst *); | 64 | static int svc_udp_recvfrom(struct svc_rqst *); |
| 65 | static int svc_udp_sendto(struct svc_rqst *); | 65 | static int svc_udp_sendto(struct svc_rqst *); |
| 66 | static void svc_sock_detach(struct svc_xprt *); | 66 | static void svc_sock_detach(struct svc_xprt *); |
| @@ -403,14 +403,14 @@ static void svc_sock_setbufsize(struct socket *sock, unsigned int snd, | |||
| 403 | /* | 403 | /* |
| 404 | * INET callback when data has been received on the socket. | 404 | * INET callback when data has been received on the socket. |
| 405 | */ | 405 | */ |
| 406 | static void svc_udp_data_ready(struct sock *sk, int count) | 406 | static void svc_udp_data_ready(struct sock *sk) |
| 407 | { | 407 | { |
| 408 | struct svc_sock *svsk = (struct svc_sock *)sk->sk_user_data; | 408 | struct svc_sock *svsk = (struct svc_sock *)sk->sk_user_data; |
| 409 | wait_queue_head_t *wq = sk_sleep(sk); | 409 | wait_queue_head_t *wq = sk_sleep(sk); |
| 410 | 410 | ||
| 411 | if (svsk) { | 411 | if (svsk) { |
| 412 | dprintk("svc: socket %p(inet %p), count=%d, busy=%d\n", | 412 | dprintk("svc: socket %p(inet %p), busy=%d\n", |
| 413 | svsk, sk, count, | 413 | svsk, sk, |
| 414 | test_bit(XPT_BUSY, &svsk->sk_xprt.xpt_flags)); | 414 | test_bit(XPT_BUSY, &svsk->sk_xprt.xpt_flags)); |
| 415 | set_bit(XPT_DATA, &svsk->sk_xprt.xpt_flags); | 415 | set_bit(XPT_DATA, &svsk->sk_xprt.xpt_flags); |
| 416 | svc_xprt_enqueue(&svsk->sk_xprt); | 416 | svc_xprt_enqueue(&svsk->sk_xprt); |
| @@ -731,7 +731,7 @@ static void svc_udp_init(struct svc_sock *svsk, struct svc_serv *serv) | |||
| 731 | * A data_ready event on a listening socket means there's a connection | 731 | * A data_ready event on a listening socket means there's a connection |
| 732 | * pending. Do not use state_change as a substitute for it. | 732 | * pending. Do not use state_change as a substitute for it. |
| 733 | */ | 733 | */ |
| 734 | static void svc_tcp_listen_data_ready(struct sock *sk, int count_unused) | 734 | static void svc_tcp_listen_data_ready(struct sock *sk) |
| 735 | { | 735 | { |
| 736 | struct svc_sock *svsk = (struct svc_sock *)sk->sk_user_data; | 736 | struct svc_sock *svsk = (struct svc_sock *)sk->sk_user_data; |
| 737 | wait_queue_head_t *wq; | 737 | wait_queue_head_t *wq; |
| @@ -783,7 +783,7 @@ static void svc_tcp_state_change(struct sock *sk) | |||
| 783 | wake_up_interruptible_all(wq); | 783 | wake_up_interruptible_all(wq); |
| 784 | } | 784 | } |
| 785 | 785 | ||
| 786 | static void svc_tcp_data_ready(struct sock *sk, int count) | 786 | static void svc_tcp_data_ready(struct sock *sk) |
| 787 | { | 787 | { |
| 788 | struct svc_sock *svsk = (struct svc_sock *)sk->sk_user_data; | 788 | struct svc_sock *svsk = (struct svc_sock *)sk->sk_user_data; |
| 789 | wait_queue_head_t *wq = sk_sleep(sk); | 789 | wait_queue_head_t *wq = sk_sleep(sk); |
| @@ -1397,6 +1397,22 @@ static struct svc_sock *svc_setup_socket(struct svc_serv *serv, | |||
| 1397 | return svsk; | 1397 | return svsk; |
| 1398 | } | 1398 | } |
| 1399 | 1399 | ||
| 1400 | bool svc_alien_sock(struct net *net, int fd) | ||
| 1401 | { | ||
| 1402 | int err; | ||
| 1403 | struct socket *sock = sockfd_lookup(fd, &err); | ||
| 1404 | bool ret = false; | ||
| 1405 | |||
| 1406 | if (!sock) | ||
| 1407 | goto out; | ||
| 1408 | if (sock_net(sock->sk) != net) | ||
| 1409 | ret = true; | ||
| 1410 | sockfd_put(sock); | ||
| 1411 | out: | ||
| 1412 | return ret; | ||
| 1413 | } | ||
| 1414 | EXPORT_SYMBOL_GPL(svc_alien_sock); | ||
| 1415 | |||
| 1400 | /** | 1416 | /** |
| 1401 | * svc_addsock - add a listener socket to an RPC service | 1417 | * svc_addsock - add a listener socket to an RPC service |
| 1402 | * @serv: pointer to RPC service to which to add a new listener | 1418 | * @serv: pointer to RPC service to which to add a new listener |
diff --git a/net/sunrpc/xdr.c b/net/sunrpc/xdr.c index 1504bb11e4f3..dd97ba3c4456 100644 --- a/net/sunrpc/xdr.c +++ b/net/sunrpc/xdr.c | |||
| @@ -833,8 +833,20 @@ xdr_buf_from_iov(struct kvec *iov, struct xdr_buf *buf) | |||
| 833 | } | 833 | } |
| 834 | EXPORT_SYMBOL_GPL(xdr_buf_from_iov); | 834 | EXPORT_SYMBOL_GPL(xdr_buf_from_iov); |
| 835 | 835 | ||
| 836 | /* Sets subbuf to the portion of buf of length len beginning base bytes | 836 | /** |
| 837 | * from the start of buf. Returns -1 if base of length are out of bounds. */ | 837 | * xdr_buf_subsegment - set subbuf to a portion of buf |
| 838 | * @buf: an xdr buffer | ||
| 839 | * @subbuf: the result buffer | ||
| 840 | * @base: beginning of range in bytes | ||
| 841 | * @len: length of range in bytes | ||
| 842 | * | ||
| 843 | * sets @subbuf to an xdr buffer representing the portion of @buf of | ||
| 844 | * length @len starting at offset @base. | ||
| 845 | * | ||
| 846 | * @buf and @subbuf may be pointers to the same struct xdr_buf. | ||
| 847 | * | ||
| 848 | * Returns -1 if base of length are out of bounds. | ||
| 849 | */ | ||
| 838 | int | 850 | int |
| 839 | xdr_buf_subsegment(struct xdr_buf *buf, struct xdr_buf *subbuf, | 851 | xdr_buf_subsegment(struct xdr_buf *buf, struct xdr_buf *subbuf, |
| 840 | unsigned int base, unsigned int len) | 852 | unsigned int base, unsigned int len) |
| @@ -847,9 +859,8 @@ xdr_buf_subsegment(struct xdr_buf *buf, struct xdr_buf *subbuf, | |||
| 847 | len -= subbuf->head[0].iov_len; | 859 | len -= subbuf->head[0].iov_len; |
| 848 | base = 0; | 860 | base = 0; |
| 849 | } else { | 861 | } else { |
| 850 | subbuf->head[0].iov_base = NULL; | ||
| 851 | subbuf->head[0].iov_len = 0; | ||
| 852 | base -= buf->head[0].iov_len; | 862 | base -= buf->head[0].iov_len; |
| 863 | subbuf->head[0].iov_len = 0; | ||
| 853 | } | 864 | } |
| 854 | 865 | ||
| 855 | if (base < buf->page_len) { | 866 | if (base < buf->page_len) { |
| @@ -871,9 +882,8 @@ xdr_buf_subsegment(struct xdr_buf *buf, struct xdr_buf *subbuf, | |||
| 871 | len -= subbuf->tail[0].iov_len; | 882 | len -= subbuf->tail[0].iov_len; |
| 872 | base = 0; | 883 | base = 0; |
| 873 | } else { | 884 | } else { |
| 874 | subbuf->tail[0].iov_base = NULL; | ||
| 875 | subbuf->tail[0].iov_len = 0; | ||
| 876 | base -= buf->tail[0].iov_len; | 885 | base -= buf->tail[0].iov_len; |
| 886 | subbuf->tail[0].iov_len = 0; | ||
| 877 | } | 887 | } |
| 878 | 888 | ||
| 879 | if (base || len) | 889 | if (base || len) |
diff --git a/net/sunrpc/xprt.c b/net/sunrpc/xprt.c index 7d4df99f761f..d173f79947c6 100644 --- a/net/sunrpc/xprt.c +++ b/net/sunrpc/xprt.c | |||
| @@ -1383,15 +1383,3 @@ void xprt_put(struct rpc_xprt *xprt) | |||
| 1383 | if (atomic_dec_and_test(&xprt->count)) | 1383 | if (atomic_dec_and_test(&xprt->count)) |
| 1384 | xprt_destroy(xprt); | 1384 | xprt_destroy(xprt); |
| 1385 | } | 1385 | } |
| 1386 | |||
| 1387 | /** | ||
| 1388 | * xprt_get - return a reference to an RPC transport. | ||
| 1389 | * @xprt: pointer to the transport | ||
| 1390 | * | ||
| 1391 | */ | ||
| 1392 | struct rpc_xprt *xprt_get(struct rpc_xprt *xprt) | ||
| 1393 | { | ||
| 1394 | if (atomic_inc_not_zero(&xprt->count)) | ||
| 1395 | return xprt; | ||
| 1396 | return NULL; | ||
| 1397 | } | ||
diff --git a/net/sunrpc/xprtrdma/Makefile b/net/sunrpc/xprtrdma/Makefile index 5a8f268bdd30..da5136fd5694 100644 --- a/net/sunrpc/xprtrdma/Makefile +++ b/net/sunrpc/xprtrdma/Makefile | |||
| @@ -1,8 +1,8 @@ | |||
| 1 | obj-$(CONFIG_SUNRPC_XPRT_RDMA) += xprtrdma.o | 1 | obj-$(CONFIG_SUNRPC_XPRT_RDMA_CLIENT) += xprtrdma.o |
| 2 | 2 | ||
| 3 | xprtrdma-y := transport.o rpc_rdma.o verbs.o | 3 | xprtrdma-y := transport.o rpc_rdma.o verbs.o |
| 4 | 4 | ||
| 5 | obj-$(CONFIG_SUNRPC_XPRT_RDMA) += svcrdma.o | 5 | obj-$(CONFIG_SUNRPC_XPRT_RDMA_SERVER) += svcrdma.o |
| 6 | 6 | ||
| 7 | svcrdma-y := svc_rdma.o svc_rdma_transport.o \ | 7 | svcrdma-y := svc_rdma.o svc_rdma_transport.o \ |
| 8 | svc_rdma_marshal.o svc_rdma_sendto.o svc_rdma_recvfrom.o | 8 | svc_rdma_marshal.o svc_rdma_sendto.o svc_rdma_recvfrom.o |
diff --git a/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c b/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c index 0ce75524ed21..8d904e4eef15 100644 --- a/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c +++ b/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c | |||
| @@ -90,6 +90,7 @@ static void rdma_build_arg_xdr(struct svc_rqst *rqstp, | |||
| 90 | sge_no++; | 90 | sge_no++; |
| 91 | } | 91 | } |
| 92 | rqstp->rq_respages = &rqstp->rq_pages[sge_no]; | 92 | rqstp->rq_respages = &rqstp->rq_pages[sge_no]; |
| 93 | rqstp->rq_next_page = rqstp->rq_respages + 1; | ||
| 93 | 94 | ||
| 94 | /* We should never run out of SGE because the limit is defined to | 95 | /* We should never run out of SGE because the limit is defined to |
| 95 | * support the max allowed RPC data length | 96 | * support the max allowed RPC data length |
| @@ -169,6 +170,7 @@ static int map_read_chunks(struct svcxprt_rdma *xprt, | |||
| 169 | */ | 170 | */ |
| 170 | head->arg.pages[page_no] = rqstp->rq_arg.pages[page_no]; | 171 | head->arg.pages[page_no] = rqstp->rq_arg.pages[page_no]; |
| 171 | rqstp->rq_respages = &rqstp->rq_arg.pages[page_no+1]; | 172 | rqstp->rq_respages = &rqstp->rq_arg.pages[page_no+1]; |
| 173 | rqstp->rq_next_page = rqstp->rq_respages + 1; | ||
| 172 | 174 | ||
| 173 | byte_count -= sge_bytes; | 175 | byte_count -= sge_bytes; |
| 174 | ch_bytes -= sge_bytes; | 176 | ch_bytes -= sge_bytes; |
| @@ -276,6 +278,7 @@ static int fast_reg_read_chunks(struct svcxprt_rdma *xprt, | |||
| 276 | 278 | ||
| 277 | /* rq_respages points one past arg pages */ | 279 | /* rq_respages points one past arg pages */ |
| 278 | rqstp->rq_respages = &rqstp->rq_arg.pages[page_no]; | 280 | rqstp->rq_respages = &rqstp->rq_arg.pages[page_no]; |
| 281 | rqstp->rq_next_page = rqstp->rq_respages + 1; | ||
| 279 | 282 | ||
| 280 | /* Create the reply and chunk maps */ | 283 | /* Create the reply and chunk maps */ |
| 281 | offset = 0; | 284 | offset = 0; |
| @@ -520,13 +523,6 @@ next_sge: | |||
| 520 | for (ch_no = 0; &rqstp->rq_pages[ch_no] < rqstp->rq_respages; ch_no++) | 523 | for (ch_no = 0; &rqstp->rq_pages[ch_no] < rqstp->rq_respages; ch_no++) |
| 521 | rqstp->rq_pages[ch_no] = NULL; | 524 | rqstp->rq_pages[ch_no] = NULL; |
| 522 | 525 | ||
| 523 | /* | ||
| 524 | * Detach res pages. If svc_release sees any it will attempt to | ||
| 525 | * put them. | ||
| 526 | */ | ||
| 527 | while (rqstp->rq_next_page != rqstp->rq_respages) | ||
| 528 | *(--rqstp->rq_next_page) = NULL; | ||
| 529 | |||
| 530 | return err; | 526 | return err; |
| 531 | } | 527 | } |
| 532 | 528 | ||
| @@ -550,7 +546,7 @@ static int rdma_read_complete(struct svc_rqst *rqstp, | |||
| 550 | 546 | ||
| 551 | /* rq_respages starts after the last arg page */ | 547 | /* rq_respages starts after the last arg page */ |
| 552 | rqstp->rq_respages = &rqstp->rq_arg.pages[page_no]; | 548 | rqstp->rq_respages = &rqstp->rq_arg.pages[page_no]; |
| 553 | rqstp->rq_next_page = &rqstp->rq_arg.pages[page_no]; | 549 | rqstp->rq_next_page = rqstp->rq_respages + 1; |
| 554 | 550 | ||
| 555 | /* Rebuild rq_arg head and tail. */ | 551 | /* Rebuild rq_arg head and tail. */ |
| 556 | rqstp->rq_arg.head[0] = head->arg.head[0]; | 552 | rqstp->rq_arg.head[0] = head->arg.head[0]; |
diff --git a/net/sunrpc/xprtrdma/svc_rdma_sendto.c b/net/sunrpc/xprtrdma/svc_rdma_sendto.c index c1d124dc772b..7e024a51617e 100644 --- a/net/sunrpc/xprtrdma/svc_rdma_sendto.c +++ b/net/sunrpc/xprtrdma/svc_rdma_sendto.c | |||
| @@ -265,6 +265,7 @@ static dma_addr_t dma_map_xdr(struct svcxprt_rdma *xprt, | |||
| 265 | xdr_off -= xdr->head[0].iov_len; | 265 | xdr_off -= xdr->head[0].iov_len; |
| 266 | if (xdr_off < xdr->page_len) { | 266 | if (xdr_off < xdr->page_len) { |
| 267 | /* This offset is in the page list */ | 267 | /* This offset is in the page list */ |
| 268 | xdr_off += xdr->page_base; | ||
| 268 | page = xdr->pages[xdr_off >> PAGE_SHIFT]; | 269 | page = xdr->pages[xdr_off >> PAGE_SHIFT]; |
| 269 | xdr_off &= ~PAGE_MASK; | 270 | xdr_off &= ~PAGE_MASK; |
| 270 | } else { | 271 | } else { |
| @@ -625,6 +626,7 @@ static int send_reply(struct svcxprt_rdma *rdma, | |||
| 625 | if (page_no+1 >= sge_no) | 626 | if (page_no+1 >= sge_no) |
| 626 | ctxt->sge[page_no+1].length = 0; | 627 | ctxt->sge[page_no+1].length = 0; |
| 627 | } | 628 | } |
| 629 | rqstp->rq_next_page = rqstp->rq_respages + 1; | ||
| 628 | BUG_ON(sge_no > rdma->sc_max_sge); | 630 | BUG_ON(sge_no > rdma->sc_max_sge); |
| 629 | memset(&send_wr, 0, sizeof send_wr); | 631 | memset(&send_wr, 0, sizeof send_wr); |
| 630 | ctxt->wr_op = IB_WR_SEND; | 632 | ctxt->wr_op = IB_WR_SEND; |
diff --git a/net/sunrpc/xprtrdma/svc_rdma_transport.c b/net/sunrpc/xprtrdma/svc_rdma_transport.c index 62e4f9bcc387..25688fa2207f 100644 --- a/net/sunrpc/xprtrdma/svc_rdma_transport.c +++ b/net/sunrpc/xprtrdma/svc_rdma_transport.c | |||
| @@ -477,8 +477,7 @@ struct page *svc_rdma_get_page(void) | |||
| 477 | 477 | ||
| 478 | while ((page = alloc_page(GFP_KERNEL)) == NULL) { | 478 | while ((page = alloc_page(GFP_KERNEL)) == NULL) { |
| 479 | /* If we can't get memory, wait a bit and try again */ | 479 | /* If we can't get memory, wait a bit and try again */ |
| 480 | printk(KERN_INFO "svcrdma: out of memory...retrying in 1000 " | 480 | printk(KERN_INFO "svcrdma: out of memory...retrying in 1s\n"); |
| 481 | "jiffies.\n"); | ||
| 482 | schedule_timeout_uninterruptible(msecs_to_jiffies(1000)); | 481 | schedule_timeout_uninterruptible(msecs_to_jiffies(1000)); |
| 483 | } | 482 | } |
| 484 | return page; | 483 | return page; |
diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c index 966763d735e9..25a3dcf15cae 100644 --- a/net/sunrpc/xprtsock.c +++ b/net/sunrpc/xprtsock.c | |||
| @@ -254,7 +254,7 @@ struct sock_xprt { | |||
| 254 | /* | 254 | /* |
| 255 | * Saved socket callback addresses | 255 | * Saved socket callback addresses |
| 256 | */ | 256 | */ |
| 257 | void (*old_data_ready)(struct sock *, int); | 257 | void (*old_data_ready)(struct sock *); |
| 258 | void (*old_state_change)(struct sock *); | 258 | void (*old_state_change)(struct sock *); |
| 259 | void (*old_write_space)(struct sock *); | 259 | void (*old_write_space)(struct sock *); |
| 260 | void (*old_error_report)(struct sock *); | 260 | void (*old_error_report)(struct sock *); |
| @@ -909,6 +909,12 @@ static void xs_tcp_close(struct rpc_xprt *xprt) | |||
| 909 | xs_tcp_shutdown(xprt); | 909 | xs_tcp_shutdown(xprt); |
| 910 | } | 910 | } |
| 911 | 911 | ||
| 912 | static void xs_xprt_free(struct rpc_xprt *xprt) | ||
| 913 | { | ||
| 914 | xs_free_peer_addresses(xprt); | ||
| 915 | xprt_free(xprt); | ||
| 916 | } | ||
| 917 | |||
| 912 | /** | 918 | /** |
| 913 | * xs_destroy - prepare to shutdown a transport | 919 | * xs_destroy - prepare to shutdown a transport |
| 914 | * @xprt: doomed transport | 920 | * @xprt: doomed transport |
| @@ -919,8 +925,7 @@ static void xs_destroy(struct rpc_xprt *xprt) | |||
| 919 | dprintk("RPC: xs_destroy xprt %p\n", xprt); | 925 | dprintk("RPC: xs_destroy xprt %p\n", xprt); |
| 920 | 926 | ||
| 921 | xs_close(xprt); | 927 | xs_close(xprt); |
| 922 | xs_free_peer_addresses(xprt); | 928 | xs_xprt_free(xprt); |
| 923 | xprt_free(xprt); | ||
| 924 | module_put(THIS_MODULE); | 929 | module_put(THIS_MODULE); |
| 925 | } | 930 | } |
| 926 | 931 | ||
| @@ -946,7 +951,7 @@ static int xs_local_copy_to_xdr(struct xdr_buf *xdr, struct sk_buff *skb) | |||
| 946 | * | 951 | * |
| 947 | * Currently this assumes we can read the whole reply in a single gulp. | 952 | * Currently this assumes we can read the whole reply in a single gulp. |
| 948 | */ | 953 | */ |
| 949 | static void xs_local_data_ready(struct sock *sk, int len) | 954 | static void xs_local_data_ready(struct sock *sk) |
| 950 | { | 955 | { |
| 951 | struct rpc_task *task; | 956 | struct rpc_task *task; |
| 952 | struct rpc_xprt *xprt; | 957 | struct rpc_xprt *xprt; |
| @@ -1009,7 +1014,7 @@ static void xs_local_data_ready(struct sock *sk, int len) | |||
| 1009 | * @len: how much data to read | 1014 | * @len: how much data to read |
| 1010 | * | 1015 | * |
| 1011 | */ | 1016 | */ |
| 1012 | static void xs_udp_data_ready(struct sock *sk, int len) | 1017 | static void xs_udp_data_ready(struct sock *sk) |
| 1013 | { | 1018 | { |
| 1014 | struct rpc_task *task; | 1019 | struct rpc_task *task; |
| 1015 | struct rpc_xprt *xprt; | 1020 | struct rpc_xprt *xprt; |
| @@ -1432,7 +1437,7 @@ static int xs_tcp_data_recv(read_descriptor_t *rd_desc, struct sk_buff *skb, uns | |||
| 1432 | * @bytes: how much data to read | 1437 | * @bytes: how much data to read |
| 1433 | * | 1438 | * |
| 1434 | */ | 1439 | */ |
| 1435 | static void xs_tcp_data_ready(struct sock *sk, int bytes) | 1440 | static void xs_tcp_data_ready(struct sock *sk) |
| 1436 | { | 1441 | { |
| 1437 | struct rpc_xprt *xprt; | 1442 | struct rpc_xprt *xprt; |
| 1438 | read_descriptor_t rd_desc; | 1443 | read_descriptor_t rd_desc; |
| @@ -2532,6 +2537,10 @@ static void bc_close(struct rpc_xprt *xprt) | |||
| 2532 | 2537 | ||
| 2533 | static void bc_destroy(struct rpc_xprt *xprt) | 2538 | static void bc_destroy(struct rpc_xprt *xprt) |
| 2534 | { | 2539 | { |
| 2540 | dprintk("RPC: bc_destroy xprt %p\n", xprt); | ||
| 2541 | |||
| 2542 | xs_xprt_free(xprt); | ||
| 2543 | module_put(THIS_MODULE); | ||
| 2535 | } | 2544 | } |
| 2536 | 2545 | ||
| 2537 | static struct rpc_xprt_ops xs_local_ops = { | 2546 | static struct rpc_xprt_ops xs_local_ops = { |
| @@ -2732,7 +2741,7 @@ static struct rpc_xprt *xs_setup_local(struct xprt_create *args) | |||
| 2732 | return xprt; | 2741 | return xprt; |
| 2733 | ret = ERR_PTR(-EINVAL); | 2742 | ret = ERR_PTR(-EINVAL); |
| 2734 | out_err: | 2743 | out_err: |
| 2735 | xprt_free(xprt); | 2744 | xs_xprt_free(xprt); |
| 2736 | return ret; | 2745 | return ret; |
| 2737 | } | 2746 | } |
| 2738 | 2747 | ||
| @@ -2810,7 +2819,7 @@ static struct rpc_xprt *xs_setup_udp(struct xprt_create *args) | |||
| 2810 | return xprt; | 2819 | return xprt; |
| 2811 | ret = ERR_PTR(-EINVAL); | 2820 | ret = ERR_PTR(-EINVAL); |
| 2812 | out_err: | 2821 | out_err: |
| 2813 | xprt_free(xprt); | 2822 | xs_xprt_free(xprt); |
| 2814 | return ret; | 2823 | return ret; |
| 2815 | } | 2824 | } |
| 2816 | 2825 | ||
| @@ -2885,12 +2894,11 @@ static struct rpc_xprt *xs_setup_tcp(struct xprt_create *args) | |||
| 2885 | xprt->address_strings[RPC_DISPLAY_ADDR], | 2894 | xprt->address_strings[RPC_DISPLAY_ADDR], |
| 2886 | xprt->address_strings[RPC_DISPLAY_PROTO]); | 2895 | xprt->address_strings[RPC_DISPLAY_PROTO]); |
| 2887 | 2896 | ||
| 2888 | |||
| 2889 | if (try_module_get(THIS_MODULE)) | 2897 | if (try_module_get(THIS_MODULE)) |
| 2890 | return xprt; | 2898 | return xprt; |
| 2891 | ret = ERR_PTR(-EINVAL); | 2899 | ret = ERR_PTR(-EINVAL); |
| 2892 | out_err: | 2900 | out_err: |
| 2893 | xprt_free(xprt); | 2901 | xs_xprt_free(xprt); |
| 2894 | return ret; | 2902 | return ret; |
| 2895 | } | 2903 | } |
| 2896 | 2904 | ||
| @@ -2907,15 +2915,6 @@ static struct rpc_xprt *xs_setup_bc_tcp(struct xprt_create *args) | |||
| 2907 | struct svc_sock *bc_sock; | 2915 | struct svc_sock *bc_sock; |
| 2908 | struct rpc_xprt *ret; | 2916 | struct rpc_xprt *ret; |
| 2909 | 2917 | ||
| 2910 | if (args->bc_xprt->xpt_bc_xprt) { | ||
| 2911 | /* | ||
| 2912 | * This server connection already has a backchannel | ||
| 2913 | * transport; we can't create a new one, as we wouldn't | ||
| 2914 | * be able to match replies based on xid any more. So, | ||
| 2915 | * reuse the already-existing one: | ||
| 2916 | */ | ||
| 2917 | return args->bc_xprt->xpt_bc_xprt; | ||
| 2918 | } | ||
| 2919 | xprt = xs_setup_xprt(args, xprt_tcp_slot_table_entries, | 2918 | xprt = xs_setup_xprt(args, xprt_tcp_slot_table_entries, |
| 2920 | xprt_tcp_slot_table_entries); | 2919 | xprt_tcp_slot_table_entries); |
| 2921 | if (IS_ERR(xprt)) | 2920 | if (IS_ERR(xprt)) |
| @@ -2973,13 +2972,14 @@ static struct rpc_xprt *xs_setup_bc_tcp(struct xprt_create *args) | |||
| 2973 | */ | 2972 | */ |
| 2974 | xprt_set_connected(xprt); | 2973 | xprt_set_connected(xprt); |
| 2975 | 2974 | ||
| 2976 | |||
| 2977 | if (try_module_get(THIS_MODULE)) | 2975 | if (try_module_get(THIS_MODULE)) |
| 2978 | return xprt; | 2976 | return xprt; |
| 2977 | |||
| 2978 | args->bc_xprt->xpt_bc_xprt = NULL; | ||
| 2979 | xprt_put(xprt); | 2979 | xprt_put(xprt); |
| 2980 | ret = ERR_PTR(-EINVAL); | 2980 | ret = ERR_PTR(-EINVAL); |
| 2981 | out_err: | 2981 | out_err: |
| 2982 | xprt_free(xprt); | 2982 | xs_xprt_free(xprt); |
| 2983 | return ret; | 2983 | return ret; |
| 2984 | } | 2984 | } |
| 2985 | 2985 | ||
diff --git a/net/tipc/server.c b/net/tipc/server.c index 646a930eefbf..a538a02f869b 100644 --- a/net/tipc/server.c +++ b/net/tipc/server.c | |||
| @@ -119,7 +119,7 @@ static struct tipc_conn *tipc_conn_lookup(struct tipc_server *s, int conid) | |||
| 119 | return con; | 119 | return con; |
| 120 | } | 120 | } |
| 121 | 121 | ||
| 122 | static void sock_data_ready(struct sock *sk, int unused) | 122 | static void sock_data_ready(struct sock *sk) |
| 123 | { | 123 | { |
| 124 | struct tipc_conn *con; | 124 | struct tipc_conn *con; |
| 125 | 125 | ||
| @@ -297,7 +297,7 @@ static int tipc_accept_from_sock(struct tipc_conn *con) | |||
| 297 | newcon->usr_data = s->tipc_conn_new(newcon->conid); | 297 | newcon->usr_data = s->tipc_conn_new(newcon->conid); |
| 298 | 298 | ||
| 299 | /* Wake up receive process in case of 'SYN+' message */ | 299 | /* Wake up receive process in case of 'SYN+' message */ |
| 300 | newsock->sk->sk_data_ready(newsock->sk, 0); | 300 | newsock->sk->sk_data_ready(newsock->sk); |
| 301 | return ret; | 301 | return ret; |
| 302 | } | 302 | } |
| 303 | 303 | ||
diff --git a/net/tipc/socket.c b/net/tipc/socket.c index adc12e227303..3c0256962f7d 100644 --- a/net/tipc/socket.c +++ b/net/tipc/socket.c | |||
| @@ -45,7 +45,7 @@ | |||
| 45 | #define CONN_TIMEOUT_DEFAULT 8000 /* default connect timeout = 8s */ | 45 | #define CONN_TIMEOUT_DEFAULT 8000 /* default connect timeout = 8s */ |
| 46 | 46 | ||
| 47 | static int backlog_rcv(struct sock *sk, struct sk_buff *skb); | 47 | static int backlog_rcv(struct sock *sk, struct sk_buff *skb); |
| 48 | static void tipc_data_ready(struct sock *sk, int len); | 48 | static void tipc_data_ready(struct sock *sk); |
| 49 | static void tipc_write_space(struct sock *sk); | 49 | static void tipc_write_space(struct sock *sk); |
| 50 | static int tipc_release(struct socket *sock); | 50 | static int tipc_release(struct socket *sock); |
| 51 | static int tipc_accept(struct socket *sock, struct socket *new_sock, int flags); | 51 | static int tipc_accept(struct socket *sock, struct socket *new_sock, int flags); |
| @@ -1248,7 +1248,7 @@ static void tipc_write_space(struct sock *sk) | |||
| 1248 | * @sk: socket | 1248 | * @sk: socket |
| 1249 | * @len: the length of messages | 1249 | * @len: the length of messages |
| 1250 | */ | 1250 | */ |
| 1251 | static void tipc_data_ready(struct sock *sk, int len) | 1251 | static void tipc_data_ready(struct sock *sk) |
| 1252 | { | 1252 | { |
| 1253 | struct socket_wq *wq; | 1253 | struct socket_wq *wq; |
| 1254 | 1254 | ||
| @@ -1410,7 +1410,7 @@ static u32 filter_rcv(struct sock *sk, struct sk_buff *buf) | |||
| 1410 | __skb_queue_tail(&sk->sk_receive_queue, buf); | 1410 | __skb_queue_tail(&sk->sk_receive_queue, buf); |
| 1411 | skb_set_owner_r(buf, sk); | 1411 | skb_set_owner_r(buf, sk); |
| 1412 | 1412 | ||
| 1413 | sk->sk_data_ready(sk, 0); | 1413 | sk->sk_data_ready(sk); |
| 1414 | return TIPC_OK; | 1414 | return TIPC_OK; |
| 1415 | } | 1415 | } |
| 1416 | 1416 | ||
diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c index 94404f19f9de..bb7e8ba821f4 100644 --- a/net/unix/af_unix.c +++ b/net/unix/af_unix.c | |||
| @@ -1217,7 +1217,7 @@ restart: | |||
| 1217 | __skb_queue_tail(&other->sk_receive_queue, skb); | 1217 | __skb_queue_tail(&other->sk_receive_queue, skb); |
| 1218 | spin_unlock(&other->sk_receive_queue.lock); | 1218 | spin_unlock(&other->sk_receive_queue.lock); |
| 1219 | unix_state_unlock(other); | 1219 | unix_state_unlock(other); |
| 1220 | other->sk_data_ready(other, 0); | 1220 | other->sk_data_ready(other); |
| 1221 | sock_put(other); | 1221 | sock_put(other); |
| 1222 | return 0; | 1222 | return 0; |
| 1223 | 1223 | ||
| @@ -1600,7 +1600,7 @@ restart: | |||
| 1600 | if (max_level > unix_sk(other)->recursion_level) | 1600 | if (max_level > unix_sk(other)->recursion_level) |
| 1601 | unix_sk(other)->recursion_level = max_level; | 1601 | unix_sk(other)->recursion_level = max_level; |
| 1602 | unix_state_unlock(other); | 1602 | unix_state_unlock(other); |
| 1603 | other->sk_data_ready(other, len); | 1603 | other->sk_data_ready(other); |
| 1604 | sock_put(other); | 1604 | sock_put(other); |
| 1605 | scm_destroy(siocb->scm); | 1605 | scm_destroy(siocb->scm); |
| 1606 | return len; | 1606 | return len; |
| @@ -1706,7 +1706,7 @@ static int unix_stream_sendmsg(struct kiocb *kiocb, struct socket *sock, | |||
| 1706 | if (max_level > unix_sk(other)->recursion_level) | 1706 | if (max_level > unix_sk(other)->recursion_level) |
| 1707 | unix_sk(other)->recursion_level = max_level; | 1707 | unix_sk(other)->recursion_level = max_level; |
| 1708 | unix_state_unlock(other); | 1708 | unix_state_unlock(other); |
| 1709 | other->sk_data_ready(other, size); | 1709 | other->sk_data_ready(other); |
| 1710 | sent += size; | 1710 | sent += size; |
| 1711 | } | 1711 | } |
| 1712 | 1712 | ||
diff --git a/net/vmw_vsock/vmci_transport_notify.c b/net/vmw_vsock/vmci_transport_notify.c index 9a730744e7bc..9b7f207f2bee 100644 --- a/net/vmw_vsock/vmci_transport_notify.c +++ b/net/vmw_vsock/vmci_transport_notify.c | |||
| @@ -315,7 +315,7 @@ vmci_transport_handle_wrote(struct sock *sk, | |||
| 315 | struct vsock_sock *vsk = vsock_sk(sk); | 315 | struct vsock_sock *vsk = vsock_sk(sk); |
| 316 | PKT_FIELD(vsk, sent_waiting_read) = false; | 316 | PKT_FIELD(vsk, sent_waiting_read) = false; |
| 317 | #endif | 317 | #endif |
| 318 | sk->sk_data_ready(sk, 0); | 318 | sk->sk_data_ready(sk); |
| 319 | } | 319 | } |
| 320 | 320 | ||
| 321 | static void vmci_transport_notify_pkt_socket_init(struct sock *sk) | 321 | static void vmci_transport_notify_pkt_socket_init(struct sock *sk) |
diff --git a/net/vmw_vsock/vmci_transport_notify_qstate.c b/net/vmw_vsock/vmci_transport_notify_qstate.c index 622bd7aa1016..dc9c7929a2f9 100644 --- a/net/vmw_vsock/vmci_transport_notify_qstate.c +++ b/net/vmw_vsock/vmci_transport_notify_qstate.c | |||
| @@ -92,7 +92,7 @@ vmci_transport_handle_wrote(struct sock *sk, | |||
| 92 | bool bottom_half, | 92 | bool bottom_half, |
| 93 | struct sockaddr_vm *dst, struct sockaddr_vm *src) | 93 | struct sockaddr_vm *dst, struct sockaddr_vm *src) |
| 94 | { | 94 | { |
| 95 | sk->sk_data_ready(sk, 0); | 95 | sk->sk_data_ready(sk); |
| 96 | } | 96 | } |
| 97 | 97 | ||
| 98 | static void vsock_block_update_write_window(struct sock *sk) | 98 | static void vsock_block_update_write_window(struct sock *sk) |
| @@ -290,7 +290,7 @@ vmci_transport_notify_pkt_recv_post_dequeue( | |||
| 290 | /* See the comment in | 290 | /* See the comment in |
| 291 | * vmci_transport_notify_pkt_send_post_enqueue(). | 291 | * vmci_transport_notify_pkt_send_post_enqueue(). |
| 292 | */ | 292 | */ |
| 293 | sk->sk_data_ready(sk, 0); | 293 | sk->sk_data_ready(sk); |
| 294 | } | 294 | } |
| 295 | 295 | ||
| 296 | return err; | 296 | return err; |
diff --git a/net/x25/af_x25.c b/net/x25/af_x25.c index 6177479c7de9..5ad4418ef093 100644 --- a/net/x25/af_x25.c +++ b/net/x25/af_x25.c | |||
| @@ -1064,7 +1064,7 @@ int x25_rx_call_request(struct sk_buff *skb, struct x25_neigh *nb, | |||
| 1064 | x25_start_heartbeat(make); | 1064 | x25_start_heartbeat(make); |
| 1065 | 1065 | ||
| 1066 | if (!sock_flag(sk, SOCK_DEAD)) | 1066 | if (!sock_flag(sk, SOCK_DEAD)) |
| 1067 | sk->sk_data_ready(sk, skb->len); | 1067 | sk->sk_data_ready(sk); |
| 1068 | rc = 1; | 1068 | rc = 1; |
| 1069 | sock_put(sk); | 1069 | sock_put(sk); |
| 1070 | out: | 1070 | out: |
diff --git a/net/x25/x25_in.c b/net/x25/x25_in.c index d1b0dc79bb6f..7ac50098a375 100644 --- a/net/x25/x25_in.c +++ b/net/x25/x25_in.c | |||
| @@ -79,7 +79,7 @@ static int x25_queue_rx_frame(struct sock *sk, struct sk_buff *skb, int more) | |||
| 79 | skb_set_owner_r(skbn, sk); | 79 | skb_set_owner_r(skbn, sk); |
| 80 | skb_queue_tail(&sk->sk_receive_queue, skbn); | 80 | skb_queue_tail(&sk->sk_receive_queue, skbn); |
| 81 | if (!sock_flag(sk, SOCK_DEAD)) | 81 | if (!sock_flag(sk, SOCK_DEAD)) |
| 82 | sk->sk_data_ready(sk, skbn->len); | 82 | sk->sk_data_ready(sk); |
| 83 | 83 | ||
| 84 | return 0; | 84 | return 0; |
| 85 | } | 85 | } |
