aboutsummaryrefslogtreecommitdiffstats
path: root/fs/cifs/transport.c
diff options
context:
space:
mode:
Diffstat (limited to 'fs/cifs/transport.c')
-rw-r--r--fs/cifs/transport.c290
1 files changed, 202 insertions, 88 deletions
diff --git a/fs/cifs/transport.c b/fs/cifs/transport.c
index 83867ef348df..2126ab185045 100644
--- a/fs/cifs/transport.c
+++ b/fs/cifs/transport.c
@@ -27,6 +27,8 @@
27#include <linux/net.h> 27#include <linux/net.h>
28#include <linux/delay.h> 28#include <linux/delay.h>
29#include <linux/freezer.h> 29#include <linux/freezer.h>
30#include <linux/tcp.h>
31#include <linux/highmem.h>
30#include <asm/uaccess.h> 32#include <asm/uaccess.h>
31#include <asm/processor.h> 33#include <asm/processor.h>
32#include <linux/mempool.h> 34#include <linux/mempool.h>
@@ -109,8 +111,8 @@ DeleteMidQEntry(struct mid_q_entry *midEntry)
109 mempool_free(midEntry, cifs_mid_poolp); 111 mempool_free(midEntry, cifs_mid_poolp);
110} 112}
111 113
112static void 114void
113delete_mid(struct mid_q_entry *mid) 115cifs_delete_mid(struct mid_q_entry *mid)
114{ 116{
115 spin_lock(&GlobalMid_Lock); 117 spin_lock(&GlobalMid_Lock);
116 list_del(&mid->qhead); 118 list_del(&mid->qhead);
@@ -119,18 +121,29 @@ delete_mid(struct mid_q_entry *mid)
119 DeleteMidQEntry(mid); 121 DeleteMidQEntry(mid);
120} 122}
121 123
124/*
125 * smb_send_kvec - send an array of kvecs to the server
126 * @server: Server to send the data to
127 * @iov: Pointer to array of kvecs
128 * @n_vec: length of kvec array
129 * @sent: amount of data sent on socket is stored here
130 *
131 * Our basic "send data to server" function. Should be called with srv_mutex
132 * held. The caller is responsible for handling the results.
133 */
122static int 134static int
123smb_sendv(struct TCP_Server_Info *server, struct kvec *iov, int n_vec) 135smb_send_kvec(struct TCP_Server_Info *server, struct kvec *iov, size_t n_vec,
136 size_t *sent)
124{ 137{
125 int rc = 0; 138 int rc = 0;
126 int i = 0; 139 int i = 0;
127 struct msghdr smb_msg; 140 struct msghdr smb_msg;
128 unsigned int len = iov[0].iov_len; 141 unsigned int remaining;
129 unsigned int total_len; 142 size_t first_vec = 0;
130 int first_vec = 0;
131 unsigned int smb_buf_length = get_rfc1002_length(iov[0].iov_base);
132 struct socket *ssocket = server->ssocket; 143 struct socket *ssocket = server->ssocket;
133 144
145 *sent = 0;
146
134 if (ssocket == NULL) 147 if (ssocket == NULL)
135 return -ENOTSOCK; /* BB eventually add reconnect code here */ 148 return -ENOTSOCK; /* BB eventually add reconnect code here */
136 149
@@ -143,56 +156,60 @@ smb_sendv(struct TCP_Server_Info *server, struct kvec *iov, int n_vec)
143 else 156 else
144 smb_msg.msg_flags = MSG_NOSIGNAL; 157 smb_msg.msg_flags = MSG_NOSIGNAL;
145 158
146 total_len = 0; 159 remaining = 0;
147 for (i = 0; i < n_vec; i++) 160 for (i = 0; i < n_vec; i++)
148 total_len += iov[i].iov_len; 161 remaining += iov[i].iov_len;
149
150 cFYI(1, "Sending smb: total_len %d", total_len);
151 dump_smb(iov[0].iov_base, len);
152 162
153 i = 0; 163 i = 0;
154 while (total_len) { 164 while (remaining) {
165 /*
166 * If blocking send, we try 3 times, since each can block
167 * for 5 seconds. For nonblocking we have to try more
168 * but wait increasing amounts of time allowing time for
169 * socket to clear. The overall time we wait in either
170 * case to send on the socket is about 15 seconds.
171 * Similarly we wait for 15 seconds for a response from
172 * the server in SendReceive[2] for the server to send
173 * a response back for most types of requests (except
174 * SMB Write past end of file which can be slow, and
175 * blocking lock operations). NFS waits slightly longer
176 * than CIFS, but this can make it take longer for
177 * nonresponsive servers to be detected and 15 seconds
178 * is more than enough time for modern networks to
179 * send a packet. In most cases if we fail to send
180 * after the retries we will kill the socket and
181 * reconnect which may clear the network problem.
182 */
155 rc = kernel_sendmsg(ssocket, &smb_msg, &iov[first_vec], 183 rc = kernel_sendmsg(ssocket, &smb_msg, &iov[first_vec],
156 n_vec - first_vec, total_len); 184 n_vec - first_vec, remaining);
157 if ((rc == -ENOSPC) || (rc == -EAGAIN)) { 185 if (rc == -ENOSPC || rc == -EAGAIN) {
158 i++; 186 i++;
159 /* 187 if (i >= 14 || (!server->noblocksnd && (i > 2))) {
160 * If blocking send we try 3 times, since each can block 188 cERROR(1, "sends on sock %p stuck for 15 "
161 * for 5 seconds. For nonblocking we have to try more 189 "seconds", ssocket);
162 * but wait increasing amounts of time allowing time for
163 * socket to clear. The overall time we wait in either
164 * case to send on the socket is about 15 seconds.
165 * Similarly we wait for 15 seconds for a response from
166 * the server in SendReceive[2] for the server to send
167 * a response back for most types of requests (except
168 * SMB Write past end of file which can be slow, and
169 * blocking lock operations). NFS waits slightly longer
170 * than CIFS, but this can make it take longer for
171 * nonresponsive servers to be detected and 15 seconds
172 * is more than enough time for modern networks to
173 * send a packet. In most cases if we fail to send
174 * after the retries we will kill the socket and
175 * reconnect which may clear the network problem.
176 */
177 if ((i >= 14) || (!server->noblocksnd && (i > 2))) {
178 cERROR(1, "sends on sock %p stuck for 15 seconds",
179 ssocket);
180 rc = -EAGAIN; 190 rc = -EAGAIN;
181 break; 191 break;
182 } 192 }
183 msleep(1 << i); 193 msleep(1 << i);
184 continue; 194 continue;
185 } 195 }
196
186 if (rc < 0) 197 if (rc < 0)
187 break; 198 break;
188 199
189 if (rc == total_len) { 200 /* send was at least partially successful */
190 total_len = 0; 201 *sent += rc;
202
203 if (rc == remaining) {
204 remaining = 0;
191 break; 205 break;
192 } else if (rc > total_len) { 206 }
193 cERROR(1, "sent %d requested %d", rc, total_len); 207
208 if (rc > remaining) {
209 cERROR(1, "sent %d requested %d", rc, remaining);
194 break; 210 break;
195 } 211 }
212
196 if (rc == 0) { 213 if (rc == 0) {
197 /* should never happen, letting socket clear before 214 /* should never happen, letting socket clear before
198 retrying is our only obvious option here */ 215 retrying is our only obvious option here */
@@ -200,7 +217,9 @@ smb_sendv(struct TCP_Server_Info *server, struct kvec *iov, int n_vec)
200 msleep(500); 217 msleep(500);
201 continue; 218 continue;
202 } 219 }
203 total_len -= rc; 220
221 remaining -= rc;
222
204 /* the line below resets i */ 223 /* the line below resets i */
205 for (i = first_vec; i < n_vec; i++) { 224 for (i = first_vec; i < n_vec; i++) {
206 if (iov[i].iov_len) { 225 if (iov[i].iov_len) {
@@ -215,16 +234,97 @@ smb_sendv(struct TCP_Server_Info *server, struct kvec *iov, int n_vec)
215 } 234 }
216 } 235 }
217 } 236 }
237
218 i = 0; /* in case we get ENOSPC on the next send */ 238 i = 0; /* in case we get ENOSPC on the next send */
239 rc = 0;
219 } 240 }
241 return rc;
242}
243
244/**
245 * rqst_page_to_kvec - Turn a slot in the smb_rqst page array into a kvec
246 * @rqst: pointer to smb_rqst
247 * @idx: index into the array of the page
248 * @iov: pointer to struct kvec that will hold the result
249 *
250 * Helper function to convert a slot in the rqst->rq_pages array into a kvec.
251 * The page will be kmapped and the address placed into iov_base. The length
252 * will then be adjusted according to the ptailoff.
253 */
254void
255cifs_rqst_page_to_kvec(struct smb_rqst *rqst, unsigned int idx,
256 struct kvec *iov)
257{
258 /*
259 * FIXME: We could avoid this kmap altogether if we used
260 * kernel_sendpage instead of kernel_sendmsg. That will only
261 * work if signing is disabled though as sendpage inlines the
262 * page directly into the fraglist. If userspace modifies the
263 * page after we calculate the signature, then the server will
264 * reject it and may break the connection. kernel_sendmsg does
265 * an extra copy of the data and avoids that issue.
266 */
267 iov->iov_base = kmap(rqst->rq_pages[idx]);
268
269 /* if last page, don't send beyond this offset into page */
270 if (idx == (rqst->rq_npages - 1))
271 iov->iov_len = rqst->rq_tailsz;
272 else
273 iov->iov_len = rqst->rq_pagesz;
274}
275
276static int
277smb_send_rqst(struct TCP_Server_Info *server, struct smb_rqst *rqst)
278{
279 int rc;
280 struct kvec *iov = rqst->rq_iov;
281 int n_vec = rqst->rq_nvec;
282 unsigned int smb_buf_length = get_rfc1002_length(iov[0].iov_base);
283 unsigned int i;
284 size_t total_len = 0, sent;
285 struct socket *ssocket = server->ssocket;
286 int val = 1;
287
288 cFYI(1, "Sending smb: smb_len=%u", smb_buf_length);
289 dump_smb(iov[0].iov_base, iov[0].iov_len);
290
291 /* cork the socket */
292 kernel_setsockopt(ssocket, SOL_TCP, TCP_CORK,
293 (char *)&val, sizeof(val));
294
295 rc = smb_send_kvec(server, iov, n_vec, &sent);
296 if (rc < 0)
297 goto uncork;
298
299 total_len += sent;
300
301 /* now walk the page array and send each page in it */
302 for (i = 0; i < rqst->rq_npages; i++) {
303 struct kvec p_iov;
304
305 cifs_rqst_page_to_kvec(rqst, i, &p_iov);
306 rc = smb_send_kvec(server, &p_iov, 1, &sent);
307 kunmap(rqst->rq_pages[i]);
308 if (rc < 0)
309 break;
310
311 total_len += sent;
312 }
313
314uncork:
315 /* uncork it */
316 val = 0;
317 kernel_setsockopt(ssocket, SOL_TCP, TCP_CORK,
318 (char *)&val, sizeof(val));
220 319
221 if ((total_len > 0) && (total_len != smb_buf_length + 4)) { 320 if ((total_len > 0) && (total_len != smb_buf_length + 4)) {
222 cFYI(1, "partial send (%d remaining), terminating session", 321 cFYI(1, "partial send (wanted=%u sent=%zu): terminating "
223 total_len); 322 "session", smb_buf_length + 4, total_len);
224 /* If we have only sent part of an SMB then the next SMB 323 /*
225 could be taken as the remainder of this one. We need 324 * If we have only sent part of an SMB then the next SMB could
226 to kill the socket so the server throws away the partial 325 * be taken as the remainder of this one. We need to kill the
227 SMB */ 326 * socket so the server throws away the partial SMB
327 */
228 server->tcpStatus = CifsNeedReconnect; 328 server->tcpStatus = CifsNeedReconnect;
229 } 329 }
230 330
@@ -236,6 +336,15 @@ smb_sendv(struct TCP_Server_Info *server, struct kvec *iov, int n_vec)
236 return rc; 336 return rc;
237} 337}
238 338
339static int
340smb_sendv(struct TCP_Server_Info *server, struct kvec *iov, int n_vec)
341{
342 struct smb_rqst rqst = { .rq_iov = iov,
343 .rq_nvec = n_vec };
344
345 return smb_send_rqst(server, &rqst);
346}
347
239int 348int
240smb_send(struct TCP_Server_Info *server, struct smb_hdr *smb_buffer, 349smb_send(struct TCP_Server_Info *server, struct smb_hdr *smb_buffer,
241 unsigned int smb_buf_length) 350 unsigned int smb_buf_length)
@@ -345,12 +454,11 @@ wait_for_response(struct TCP_Server_Info *server, struct mid_q_entry *midQ)
345 return 0; 454 return 0;
346} 455}
347 456
348int 457struct mid_q_entry *
349cifs_setup_async_request(struct TCP_Server_Info *server, struct kvec *iov, 458cifs_setup_async_request(struct TCP_Server_Info *server, struct smb_rqst *rqst)
350 unsigned int nvec, struct mid_q_entry **ret_mid)
351{ 459{
352 int rc; 460 int rc;
353 struct smb_hdr *hdr = (struct smb_hdr *)iov[0].iov_base; 461 struct smb_hdr *hdr = (struct smb_hdr *)rqst->rq_iov[0].iov_base;
354 struct mid_q_entry *mid; 462 struct mid_q_entry *mid;
355 463
356 /* enable signing if server requires it */ 464 /* enable signing if server requires it */
@@ -359,16 +467,15 @@ cifs_setup_async_request(struct TCP_Server_Info *server, struct kvec *iov,
359 467
360 mid = AllocMidQEntry(hdr, server); 468 mid = AllocMidQEntry(hdr, server);
361 if (mid == NULL) 469 if (mid == NULL)
362 return -ENOMEM; 470 return ERR_PTR(-ENOMEM);
363 471
364 rc = cifs_sign_smbv(iov, nvec, server, &mid->sequence_number); 472 rc = cifs_sign_rqst(rqst, server, &mid->sequence_number);
365 if (rc) { 473 if (rc) {
366 DeleteMidQEntry(mid); 474 DeleteMidQEntry(mid);
367 return rc; 475 return ERR_PTR(rc);
368 } 476 }
369 477
370 *ret_mid = mid; 478 return mid;
371 return 0;
372} 479}
373 480
374/* 481/*
@@ -376,9 +483,9 @@ cifs_setup_async_request(struct TCP_Server_Info *server, struct kvec *iov,
376 * the result. Caller is responsible for dealing with timeouts. 483 * the result. Caller is responsible for dealing with timeouts.
377 */ 484 */
378int 485int
379cifs_call_async(struct TCP_Server_Info *server, struct kvec *iov, 486cifs_call_async(struct TCP_Server_Info *server, struct smb_rqst *rqst,
380 unsigned int nvec, mid_receive_t *receive, 487 mid_receive_t *receive, mid_callback_t *callback,
381 mid_callback_t *callback, void *cbdata, const int flags) 488 void *cbdata, const int flags)
382{ 489{
383 int rc, timeout, optype; 490 int rc, timeout, optype;
384 struct mid_q_entry *mid; 491 struct mid_q_entry *mid;
@@ -391,12 +498,12 @@ cifs_call_async(struct TCP_Server_Info *server, struct kvec *iov,
391 return rc; 498 return rc;
392 499
393 mutex_lock(&server->srv_mutex); 500 mutex_lock(&server->srv_mutex);
394 rc = server->ops->setup_async_request(server, iov, nvec, &mid); 501 mid = server->ops->setup_async_request(server, rqst);
395 if (rc) { 502 if (IS_ERR(mid)) {
396 mutex_unlock(&server->srv_mutex); 503 mutex_unlock(&server->srv_mutex);
397 add_credits(server, 1, optype); 504 add_credits(server, 1, optype);
398 wake_up(&server->request_q); 505 wake_up(&server->request_q);
399 return rc; 506 return PTR_ERR(mid);
400 } 507 }
401 508
402 mid->receive = receive; 509 mid->receive = receive;
@@ -411,7 +518,7 @@ cifs_call_async(struct TCP_Server_Info *server, struct kvec *iov,
411 518
412 519
413 cifs_in_send_inc(server); 520 cifs_in_send_inc(server);
414 rc = smb_sendv(server, iov, nvec); 521 rc = smb_send_rqst(server, rqst);
415 cifs_in_send_dec(server); 522 cifs_in_send_dec(server);
416 cifs_save_when_sent(mid); 523 cifs_save_when_sent(mid);
417 mutex_unlock(&server->srv_mutex); 524 mutex_unlock(&server->srv_mutex);
@@ -419,7 +526,7 @@ cifs_call_async(struct TCP_Server_Info *server, struct kvec *iov,
419 if (rc == 0) 526 if (rc == 0)
420 return 0; 527 return 0;
421 528
422 delete_mid(mid); 529 cifs_delete_mid(mid);
423 add_credits(server, 1, optype); 530 add_credits(server, 1, optype);
424 wake_up(&server->request_q); 531 wake_up(&server->request_q);
425 return rc; 532 return rc;
@@ -503,35 +610,40 @@ cifs_check_receive(struct mid_q_entry *mid, struct TCP_Server_Info *server,
503 /* convert the length into a more usable form */ 610 /* convert the length into a more usable form */
504 if (server->sec_mode & (SECMODE_SIGN_REQUIRED | SECMODE_SIGN_ENABLED)) { 611 if (server->sec_mode & (SECMODE_SIGN_REQUIRED | SECMODE_SIGN_ENABLED)) {
505 struct kvec iov; 612 struct kvec iov;
613 int rc = 0;
614 struct smb_rqst rqst = { .rq_iov = &iov,
615 .rq_nvec = 1 };
506 616
507 iov.iov_base = mid->resp_buf; 617 iov.iov_base = mid->resp_buf;
508 iov.iov_len = len; 618 iov.iov_len = len;
509 /* FIXME: add code to kill session */ 619 /* FIXME: add code to kill session */
510 if (cifs_verify_signature(&iov, 1, server, 620 rc = cifs_verify_signature(&rqst, server,
511 mid->sequence_number + 1) != 0) 621 mid->sequence_number + 1);
512 cERROR(1, "Unexpected SMB signature"); 622 if (rc)
623 cERROR(1, "SMB signature verification returned error = "
624 "%d", rc);
513 } 625 }
514 626
515 /* BB special case reconnect tid and uid here? */ 627 /* BB special case reconnect tid and uid here? */
516 return map_smb_to_linux_error(mid->resp_buf, log_error); 628 return map_smb_to_linux_error(mid->resp_buf, log_error);
517} 629}
518 630
519int 631struct mid_q_entry *
520cifs_setup_request(struct cifs_ses *ses, struct kvec *iov, 632cifs_setup_request(struct cifs_ses *ses, struct smb_rqst *rqst)
521 unsigned int nvec, struct mid_q_entry **ret_mid)
522{ 633{
523 int rc; 634 int rc;
524 struct smb_hdr *hdr = (struct smb_hdr *)iov[0].iov_base; 635 struct smb_hdr *hdr = (struct smb_hdr *)rqst->rq_iov[0].iov_base;
525 struct mid_q_entry *mid; 636 struct mid_q_entry *mid;
526 637
527 rc = allocate_mid(ses, hdr, &mid); 638 rc = allocate_mid(ses, hdr, &mid);
528 if (rc) 639 if (rc)
529 return rc; 640 return ERR_PTR(rc);
530 rc = cifs_sign_smbv(iov, nvec, ses->server, &mid->sequence_number); 641 rc = cifs_sign_rqst(rqst, ses->server, &mid->sequence_number);
531 if (rc) 642 if (rc) {
532 delete_mid(mid); 643 cifs_delete_mid(mid);
533 *ret_mid = mid; 644 return ERR_PTR(rc);
534 return rc; 645 }
646 return mid;
535} 647}
536 648
537int 649int
@@ -544,6 +656,8 @@ SendReceive2(const unsigned int xid, struct cifs_ses *ses,
544 struct mid_q_entry *midQ; 656 struct mid_q_entry *midQ;
545 char *buf = iov[0].iov_base; 657 char *buf = iov[0].iov_base;
546 unsigned int credits = 1; 658 unsigned int credits = 1;
659 struct smb_rqst rqst = { .rq_iov = iov,
660 .rq_nvec = n_vec };
547 661
548 timeout = flags & CIFS_TIMEOUT_MASK; 662 timeout = flags & CIFS_TIMEOUT_MASK;
549 optype = flags & CIFS_OP_MASK; 663 optype = flags & CIFS_OP_MASK;
@@ -581,13 +695,13 @@ SendReceive2(const unsigned int xid, struct cifs_ses *ses,
581 695
582 mutex_lock(&ses->server->srv_mutex); 696 mutex_lock(&ses->server->srv_mutex);
583 697
584 rc = ses->server->ops->setup_request(ses, iov, n_vec, &midQ); 698 midQ = ses->server->ops->setup_request(ses, &rqst);
585 if (rc) { 699 if (IS_ERR(midQ)) {
586 mutex_unlock(&ses->server->srv_mutex); 700 mutex_unlock(&ses->server->srv_mutex);
587 cifs_small_buf_release(buf); 701 cifs_small_buf_release(buf);
588 /* Update # of requests on wire to server */ 702 /* Update # of requests on wire to server */
589 add_credits(ses->server, 1, optype); 703 add_credits(ses->server, 1, optype);
590 return rc; 704 return PTR_ERR(midQ);
591 } 705 }
592 706
593 midQ->mid_state = MID_REQUEST_SUBMITTED; 707 midQ->mid_state = MID_REQUEST_SUBMITTED;
@@ -649,11 +763,11 @@ SendReceive2(const unsigned int xid, struct cifs_ses *ses,
649 rc = ses->server->ops->check_receive(midQ, ses->server, 763 rc = ses->server->ops->check_receive(midQ, ses->server,
650 flags & CIFS_LOG_ERROR); 764 flags & CIFS_LOG_ERROR);
651 765
652 /* mark it so buf will not be freed by delete_mid */ 766 /* mark it so buf will not be freed by cifs_delete_mid */
653 if ((flags & CIFS_NO_RESP) == 0) 767 if ((flags & CIFS_NO_RESP) == 0)
654 midQ->resp_buf = NULL; 768 midQ->resp_buf = NULL;
655out: 769out:
656 delete_mid(midQ); 770 cifs_delete_mid(midQ);
657 add_credits(ses->server, credits, optype); 771 add_credits(ses->server, credits, optype);
658 772
659 return rc; 773 return rc;
@@ -759,7 +873,7 @@ SendReceive(const unsigned int xid, struct cifs_ses *ses,
759 memcpy(out_buf, midQ->resp_buf, *pbytes_returned + 4); 873 memcpy(out_buf, midQ->resp_buf, *pbytes_returned + 4);
760 rc = cifs_check_receive(midQ, ses->server, 0); 874 rc = cifs_check_receive(midQ, ses->server, 0);
761out: 875out:
762 delete_mid(midQ); 876 cifs_delete_mid(midQ);
763 add_credits(ses->server, 1, 0); 877 add_credits(ses->server, 1, 0);
764 878
765 return rc; 879 return rc;
@@ -843,7 +957,7 @@ SendReceiveBlockingLock(const unsigned int xid, struct cifs_tcon *tcon,
843 957
844 rc = cifs_sign_smb(in_buf, ses->server, &midQ->sequence_number); 958 rc = cifs_sign_smb(in_buf, ses->server, &midQ->sequence_number);
845 if (rc) { 959 if (rc) {
846 delete_mid(midQ); 960 cifs_delete_mid(midQ);
847 mutex_unlock(&ses->server->srv_mutex); 961 mutex_unlock(&ses->server->srv_mutex);
848 return rc; 962 return rc;
849 } 963 }
@@ -856,7 +970,7 @@ SendReceiveBlockingLock(const unsigned int xid, struct cifs_tcon *tcon,
856 mutex_unlock(&ses->server->srv_mutex); 970 mutex_unlock(&ses->server->srv_mutex);
857 971
858 if (rc < 0) { 972 if (rc < 0) {
859 delete_mid(midQ); 973 cifs_delete_mid(midQ);
860 return rc; 974 return rc;
861 } 975 }
862 976
@@ -877,7 +991,7 @@ SendReceiveBlockingLock(const unsigned int xid, struct cifs_tcon *tcon,
877 blocking lock to return. */ 991 blocking lock to return. */
878 rc = send_cancel(ses->server, in_buf, midQ); 992 rc = send_cancel(ses->server, in_buf, midQ);
879 if (rc) { 993 if (rc) {
880 delete_mid(midQ); 994 cifs_delete_mid(midQ);
881 return rc; 995 return rc;
882 } 996 }
883 } else { 997 } else {
@@ -889,7 +1003,7 @@ SendReceiveBlockingLock(const unsigned int xid, struct cifs_tcon *tcon,
889 /* If we get -ENOLCK back the lock may have 1003 /* If we get -ENOLCK back the lock may have
890 already been removed. Don't exit in this case. */ 1004 already been removed. Don't exit in this case. */
891 if (rc && rc != -ENOLCK) { 1005 if (rc && rc != -ENOLCK) {
892 delete_mid(midQ); 1006 cifs_delete_mid(midQ);
893 return rc; 1007 return rc;
894 } 1008 }
895 } 1009 }
@@ -926,7 +1040,7 @@ SendReceiveBlockingLock(const unsigned int xid, struct cifs_tcon *tcon,
926 memcpy(out_buf, midQ->resp_buf, *pbytes_returned + 4); 1040 memcpy(out_buf, midQ->resp_buf, *pbytes_returned + 4);
927 rc = cifs_check_receive(midQ, ses->server, 0); 1041 rc = cifs_check_receive(midQ, ses->server, 0);
928out: 1042out:
929 delete_mid(midQ); 1043 cifs_delete_mid(midQ);
930 if (rstart && rc == -EACCES) 1044 if (rstart && rc == -EACCES)
931 return -ERESTARTSYS; 1045 return -ERESTARTSYS;
932 return rc; 1046 return rc;