diff options
author | Jeff Layton <jlayton@redhat.com> | 2012-09-18 19:20:35 -0400 |
---|---|---|
committer | Steve French <smfrench@gmail.com> | 2012-09-24 22:46:31 -0400 |
commit | fec344e3f31aa911297cd3a4639432d983b1f324 (patch) | |
tree | c46deb8e54d4a1f00be47f7d34e572730b0df988 /fs/cifs/transport.c | |
parent | fb308a6f22f7f4f3574dab6b36c4a3598e50cf05 (diff) |
cifs: change cifs_call_async to use smb_rqst structs
For now, none of the callers populate rq_pages. That will be done for
writes in a later patch. While we're at it, change the prototype of
setup_async_request not to need a return pointer argument. Just
return the pointer to the mid_q_entry or an ERR_PTR.
Reviewed-by: Pavel Shilovsky <pshilovsky@samba.org>
Signed-off-by: Jeff Layton <jlayton@redhat.com>
Signed-off-by: Steve French <smfrench@gmail.com>
Diffstat (limited to 'fs/cifs/transport.c')
-rw-r--r-- | fs/cifs/transport.c | 56 |
1 files changed, 28 insertions, 28 deletions
diff --git a/fs/cifs/transport.c b/fs/cifs/transport.c index b6097344cd5b..2126ab185045 100644 --- a/fs/cifs/transport.c +++ b/fs/cifs/transport.c | |||
@@ -454,12 +454,11 @@ wait_for_response(struct TCP_Server_Info *server, struct mid_q_entry *midQ) | |||
454 | return 0; | 454 | return 0; |
455 | } | 455 | } |
456 | 456 | ||
457 | int | 457 | struct mid_q_entry * |
458 | cifs_setup_async_request(struct TCP_Server_Info *server, struct kvec *iov, | 458 | cifs_setup_async_request(struct TCP_Server_Info *server, struct smb_rqst *rqst) |
459 | unsigned int nvec, struct mid_q_entry **ret_mid) | ||
460 | { | 459 | { |
461 | int rc; | 460 | int rc; |
462 | struct smb_hdr *hdr = (struct smb_hdr *)iov[0].iov_base; | 461 | struct smb_hdr *hdr = (struct smb_hdr *)rqst->rq_iov[0].iov_base; |
463 | struct mid_q_entry *mid; | 462 | struct mid_q_entry *mid; |
464 | 463 | ||
465 | /* enable signing if server requires it */ | 464 | /* enable signing if server requires it */ |
@@ -468,16 +467,15 @@ cifs_setup_async_request(struct TCP_Server_Info *server, struct kvec *iov, | |||
468 | 467 | ||
469 | mid = AllocMidQEntry(hdr, server); | 468 | mid = AllocMidQEntry(hdr, server); |
470 | if (mid == NULL) | 469 | if (mid == NULL) |
471 | return -ENOMEM; | 470 | return ERR_PTR(-ENOMEM); |
472 | 471 | ||
473 | rc = cifs_sign_smbv(iov, nvec, server, &mid->sequence_number); | 472 | rc = cifs_sign_rqst(rqst, server, &mid->sequence_number); |
474 | if (rc) { | 473 | if (rc) { |
475 | DeleteMidQEntry(mid); | 474 | DeleteMidQEntry(mid); |
476 | return rc; | 475 | return ERR_PTR(rc); |
477 | } | 476 | } |
478 | 477 | ||
479 | *ret_mid = mid; | 478 | return mid; |
480 | return 0; | ||
481 | } | 479 | } |
482 | 480 | ||
483 | /* | 481 | /* |
@@ -485,9 +483,9 @@ cifs_setup_async_request(struct TCP_Server_Info *server, struct kvec *iov, | |||
485 | * the result. Caller is responsible for dealing with timeouts. | 483 | * the result. Caller is responsible for dealing with timeouts. |
486 | */ | 484 | */ |
487 | int | 485 | int |
488 | cifs_call_async(struct TCP_Server_Info *server, struct kvec *iov, | 486 | cifs_call_async(struct TCP_Server_Info *server, struct smb_rqst *rqst, |
489 | unsigned int nvec, mid_receive_t *receive, | 487 | mid_receive_t *receive, mid_callback_t *callback, |
490 | mid_callback_t *callback, void *cbdata, const int flags) | 488 | void *cbdata, const int flags) |
491 | { | 489 | { |
492 | int rc, timeout, optype; | 490 | int rc, timeout, optype; |
493 | struct mid_q_entry *mid; | 491 | struct mid_q_entry *mid; |
@@ -500,12 +498,12 @@ cifs_call_async(struct TCP_Server_Info *server, struct kvec *iov, | |||
500 | return rc; | 498 | return rc; |
501 | 499 | ||
502 | mutex_lock(&server->srv_mutex); | 500 | mutex_lock(&server->srv_mutex); |
503 | rc = server->ops->setup_async_request(server, iov, nvec, &mid); | 501 | mid = server->ops->setup_async_request(server, rqst); |
504 | if (rc) { | 502 | if (IS_ERR(mid)) { |
505 | mutex_unlock(&server->srv_mutex); | 503 | mutex_unlock(&server->srv_mutex); |
506 | add_credits(server, 1, optype); | 504 | add_credits(server, 1, optype); |
507 | wake_up(&server->request_q); | 505 | wake_up(&server->request_q); |
508 | return rc; | 506 | return PTR_ERR(mid); |
509 | } | 507 | } |
510 | 508 | ||
511 | mid->receive = receive; | 509 | mid->receive = receive; |
@@ -520,7 +518,7 @@ cifs_call_async(struct TCP_Server_Info *server, struct kvec *iov, | |||
520 | 518 | ||
521 | 519 | ||
522 | cifs_in_send_inc(server); | 520 | cifs_in_send_inc(server); |
523 | rc = smb_sendv(server, iov, nvec); | 521 | rc = smb_send_rqst(server, rqst); |
524 | cifs_in_send_dec(server); | 522 | cifs_in_send_dec(server); |
525 | cifs_save_when_sent(mid); | 523 | cifs_save_when_sent(mid); |
526 | mutex_unlock(&server->srv_mutex); | 524 | mutex_unlock(&server->srv_mutex); |
@@ -630,22 +628,22 @@ cifs_check_receive(struct mid_q_entry *mid, struct TCP_Server_Info *server, | |||
630 | return map_smb_to_linux_error(mid->resp_buf, log_error); | 628 | return map_smb_to_linux_error(mid->resp_buf, log_error); |
631 | } | 629 | } |
632 | 630 | ||
633 | int | 631 | struct mid_q_entry * |
634 | cifs_setup_request(struct cifs_ses *ses, struct kvec *iov, | 632 | cifs_setup_request(struct cifs_ses *ses, struct smb_rqst *rqst) |
635 | unsigned int nvec, struct mid_q_entry **ret_mid) | ||
636 | { | 633 | { |
637 | int rc; | 634 | int rc; |
638 | struct smb_hdr *hdr = (struct smb_hdr *)iov[0].iov_base; | 635 | struct smb_hdr *hdr = (struct smb_hdr *)rqst->rq_iov[0].iov_base; |
639 | struct mid_q_entry *mid; | 636 | struct mid_q_entry *mid; |
640 | 637 | ||
641 | rc = allocate_mid(ses, hdr, &mid); | 638 | rc = allocate_mid(ses, hdr, &mid); |
642 | if (rc) | 639 | if (rc) |
643 | return rc; | 640 | return ERR_PTR(rc); |
644 | rc = cifs_sign_smbv(iov, nvec, ses->server, &mid->sequence_number); | 641 | rc = cifs_sign_rqst(rqst, ses->server, &mid->sequence_number); |
645 | if (rc) | 642 | if (rc) { |
646 | cifs_delete_mid(mid); | 643 | cifs_delete_mid(mid); |
647 | *ret_mid = mid; | 644 | return ERR_PTR(rc); |
648 | return rc; | 645 | } |
646 | return mid; | ||
649 | } | 647 | } |
650 | 648 | ||
651 | int | 649 | int |
@@ -658,6 +656,8 @@ SendReceive2(const unsigned int xid, struct cifs_ses *ses, | |||
658 | struct mid_q_entry *midQ; | 656 | struct mid_q_entry *midQ; |
659 | char *buf = iov[0].iov_base; | 657 | char *buf = iov[0].iov_base; |
660 | unsigned int credits = 1; | 658 | unsigned int credits = 1; |
659 | struct smb_rqst rqst = { .rq_iov = iov, | ||
660 | .rq_nvec = n_vec }; | ||
661 | 661 | ||
662 | timeout = flags & CIFS_TIMEOUT_MASK; | 662 | timeout = flags & CIFS_TIMEOUT_MASK; |
663 | optype = flags & CIFS_OP_MASK; | 663 | optype = flags & CIFS_OP_MASK; |
@@ -695,13 +695,13 @@ SendReceive2(const unsigned int xid, struct cifs_ses *ses, | |||
695 | 695 | ||
696 | mutex_lock(&ses->server->srv_mutex); | 696 | mutex_lock(&ses->server->srv_mutex); |
697 | 697 | ||
698 | rc = ses->server->ops->setup_request(ses, iov, n_vec, &midQ); | 698 | midQ = ses->server->ops->setup_request(ses, &rqst); |
699 | if (rc) { | 699 | if (IS_ERR(midQ)) { |
700 | mutex_unlock(&ses->server->srv_mutex); | 700 | mutex_unlock(&ses->server->srv_mutex); |
701 | cifs_small_buf_release(buf); | 701 | cifs_small_buf_release(buf); |
702 | /* Update # of requests on wire to server */ | 702 | /* Update # of requests on wire to server */ |
703 | add_credits(ses->server, 1, optype); | 703 | add_credits(ses->server, 1, optype); |
704 | return rc; | 704 | return PTR_ERR(midQ); |
705 | } | 705 | } |
706 | 706 | ||
707 | midQ->mid_state = MID_REQUEST_SUBMITTED; | 707 | midQ->mid_state = MID_REQUEST_SUBMITTED; |