diff options
Diffstat (limited to 'fs/cifs/transport.c')
-rw-r--r-- | fs/cifs/transport.c | 375 |
1 files changed, 246 insertions, 129 deletions
diff --git a/fs/cifs/transport.c b/fs/cifs/transport.c index f25d4ea14be4..2126ab185045 100644 --- a/fs/cifs/transport.c +++ b/fs/cifs/transport.c | |||
@@ -27,6 +27,8 @@ | |||
27 | #include <linux/net.h> | 27 | #include <linux/net.h> |
28 | #include <linux/delay.h> | 28 | #include <linux/delay.h> |
29 | #include <linux/freezer.h> | 29 | #include <linux/freezer.h> |
30 | #include <linux/tcp.h> | ||
31 | #include <linux/highmem.h> | ||
30 | #include <asm/uaccess.h> | 32 | #include <asm/uaccess.h> |
31 | #include <asm/processor.h> | 33 | #include <asm/processor.h> |
32 | #include <linux/mempool.h> | 34 | #include <linux/mempool.h> |
@@ -35,10 +37,8 @@ | |||
35 | #include "cifsproto.h" | 37 | #include "cifsproto.h" |
36 | #include "cifs_debug.h" | 38 | #include "cifs_debug.h" |
37 | 39 | ||
38 | extern mempool_t *cifs_mid_poolp; | 40 | void |
39 | 41 | cifs_wake_up_task(struct mid_q_entry *mid) | |
40 | static void | ||
41 | wake_up_task(struct mid_q_entry *mid) | ||
42 | { | 42 | { |
43 | wake_up_process(mid->callback_data); | 43 | wake_up_process(mid->callback_data); |
44 | } | 44 | } |
@@ -65,12 +65,13 @@ AllocMidQEntry(const struct smb_hdr *smb_buffer, struct TCP_Server_Info *server) | |||
65 | /* do_gettimeofday(&temp->when_sent);*/ /* easier to use jiffies */ | 65 | /* do_gettimeofday(&temp->when_sent);*/ /* easier to use jiffies */ |
66 | /* when mid allocated can be before when sent */ | 66 | /* when mid allocated can be before when sent */ |
67 | temp->when_alloc = jiffies; | 67 | temp->when_alloc = jiffies; |
68 | temp->server = server; | ||
68 | 69 | ||
69 | /* | 70 | /* |
70 | * The default is for the mid to be synchronous, so the | 71 | * The default is for the mid to be synchronous, so the |
71 | * default callback just wakes up the current task. | 72 | * default callback just wakes up the current task. |
72 | */ | 73 | */ |
73 | temp->callback = wake_up_task; | 74 | temp->callback = cifs_wake_up_task; |
74 | temp->callback_data = current; | 75 | temp->callback_data = current; |
75 | } | 76 | } |
76 | 77 | ||
@@ -83,6 +84,7 @@ void | |||
83 | DeleteMidQEntry(struct mid_q_entry *midEntry) | 84 | DeleteMidQEntry(struct mid_q_entry *midEntry) |
84 | { | 85 | { |
85 | #ifdef CONFIG_CIFS_STATS2 | 86 | #ifdef CONFIG_CIFS_STATS2 |
87 | __le16 command = midEntry->server->vals->lock_cmd; | ||
86 | unsigned long now; | 88 | unsigned long now; |
87 | #endif | 89 | #endif |
88 | midEntry->mid_state = MID_FREE; | 90 | midEntry->mid_state = MID_FREE; |
@@ -96,8 +98,7 @@ DeleteMidQEntry(struct mid_q_entry *midEntry) | |||
96 | /* commands taking longer than one second are indications that | 98 | /* commands taking longer than one second are indications that |
97 | something is wrong, unless it is quite a slow link or server */ | 99 | something is wrong, unless it is quite a slow link or server */ |
98 | if ((now - midEntry->when_alloc) > HZ) { | 100 | if ((now - midEntry->when_alloc) > HZ) { |
99 | if ((cifsFYI & CIFS_TIMER) && | 101 | if ((cifsFYI & CIFS_TIMER) && (midEntry->command != command)) { |
100 | (midEntry->command != cpu_to_le16(SMB_COM_LOCKING_ANDX))) { | ||
101 | printk(KERN_DEBUG " CIFS slow rsp: cmd %d mid %llu", | 102 | printk(KERN_DEBUG " CIFS slow rsp: cmd %d mid %llu", |
102 | midEntry->command, midEntry->mid); | 103 | midEntry->command, midEntry->mid); |
103 | printk(" A: 0x%lx S: 0x%lx R: 0x%lx\n", | 104 | printk(" A: 0x%lx S: 0x%lx R: 0x%lx\n", |
@@ -110,8 +111,8 @@ DeleteMidQEntry(struct mid_q_entry *midEntry) | |||
110 | mempool_free(midEntry, cifs_mid_poolp); | 111 | mempool_free(midEntry, cifs_mid_poolp); |
111 | } | 112 | } |
112 | 113 | ||
113 | static void | 114 | void |
114 | delete_mid(struct mid_q_entry *mid) | 115 | cifs_delete_mid(struct mid_q_entry *mid) |
115 | { | 116 | { |
116 | spin_lock(&GlobalMid_Lock); | 117 | spin_lock(&GlobalMid_Lock); |
117 | list_del(&mid->qhead); | 118 | list_del(&mid->qhead); |
@@ -120,19 +121,29 @@ delete_mid(struct mid_q_entry *mid) | |||
120 | DeleteMidQEntry(mid); | 121 | DeleteMidQEntry(mid); |
121 | } | 122 | } |
122 | 123 | ||
124 | /* | ||
125 | * smb_send_kvec - send an array of kvecs to the server | ||
126 | * @server: Server to send the data to | ||
127 | * @iov: Pointer to array of kvecs | ||
128 | * @n_vec: length of kvec array | ||
129 | * @sent: amount of data sent on socket is stored here | ||
130 | * | ||
131 | * Our basic "send data to server" function. Should be called with srv_mutex | ||
132 | * held. The caller is responsible for handling the results. | ||
133 | */ | ||
123 | static int | 134 | static int |
124 | smb_sendv(struct TCP_Server_Info *server, struct kvec *iov, int n_vec) | 135 | smb_send_kvec(struct TCP_Server_Info *server, struct kvec *iov, size_t n_vec, |
136 | size_t *sent) | ||
125 | { | 137 | { |
126 | int rc = 0; | 138 | int rc = 0; |
127 | int i = 0; | 139 | int i = 0; |
128 | struct msghdr smb_msg; | 140 | struct msghdr smb_msg; |
129 | __be32 *buf_len = (__be32 *)(iov[0].iov_base); | 141 | unsigned int remaining; |
130 | unsigned int len = iov[0].iov_len; | 142 | size_t first_vec = 0; |
131 | unsigned int total_len; | ||
132 | int first_vec = 0; | ||
133 | unsigned int smb_buf_length = get_rfc1002_length(iov[0].iov_base); | ||
134 | struct socket *ssocket = server->ssocket; | 143 | struct socket *ssocket = server->ssocket; |
135 | 144 | ||
145 | *sent = 0; | ||
146 | |||
136 | if (ssocket == NULL) | 147 | if (ssocket == NULL) |
137 | return -ENOTSOCK; /* BB eventually add reconnect code here */ | 148 | return -ENOTSOCK; /* BB eventually add reconnect code here */ |
138 | 149 | ||
@@ -145,56 +156,60 @@ smb_sendv(struct TCP_Server_Info *server, struct kvec *iov, int n_vec) | |||
145 | else | 156 | else |
146 | smb_msg.msg_flags = MSG_NOSIGNAL; | 157 | smb_msg.msg_flags = MSG_NOSIGNAL; |
147 | 158 | ||
148 | total_len = 0; | 159 | remaining = 0; |
149 | for (i = 0; i < n_vec; i++) | 160 | for (i = 0; i < n_vec; i++) |
150 | total_len += iov[i].iov_len; | 161 | remaining += iov[i].iov_len; |
151 | |||
152 | cFYI(1, "Sending smb: total_len %d", total_len); | ||
153 | dump_smb(iov[0].iov_base, len); | ||
154 | 162 | ||
155 | i = 0; | 163 | i = 0; |
156 | while (total_len) { | 164 | while (remaining) { |
165 | /* | ||
166 | * If blocking send, we try 3 times, since each can block | ||
167 | * for 5 seconds. For nonblocking we have to try more | ||
168 | * but wait increasing amounts of time allowing time for | ||
169 | * socket to clear. The overall time we wait in either | ||
170 | * case to send on the socket is about 15 seconds. | ||
171 | * Similarly we wait for 15 seconds for a response from | ||
172 | * the server in SendReceive[2] for the server to send | ||
173 | * a response back for most types of requests (except | ||
174 | * SMB Write past end of file which can be slow, and | ||
175 | * blocking lock operations). NFS waits slightly longer | ||
176 | * than CIFS, but this can make it take longer for | ||
177 | * nonresponsive servers to be detected and 15 seconds | ||
178 | * is more than enough time for modern networks to | ||
179 | * send a packet. In most cases if we fail to send | ||
180 | * after the retries we will kill the socket and | ||
181 | * reconnect which may clear the network problem. | ||
182 | */ | ||
157 | rc = kernel_sendmsg(ssocket, &smb_msg, &iov[first_vec], | 183 | rc = kernel_sendmsg(ssocket, &smb_msg, &iov[first_vec], |
158 | n_vec - first_vec, total_len); | 184 | n_vec - first_vec, remaining); |
159 | if ((rc == -ENOSPC) || (rc == -EAGAIN)) { | 185 | if (rc == -ENOSPC || rc == -EAGAIN) { |
160 | i++; | 186 | i++; |
161 | /* | 187 | if (i >= 14 || (!server->noblocksnd && (i > 2))) { |
162 | * If blocking send we try 3 times, since each can block | 188 | cERROR(1, "sends on sock %p stuck for 15 " |
163 | * for 5 seconds. For nonblocking we have to try more | 189 | "seconds", ssocket); |
164 | * but wait increasing amounts of time allowing time for | ||
165 | * socket to clear. The overall time we wait in either | ||
166 | * case to send on the socket is about 15 seconds. | ||
167 | * Similarly we wait for 15 seconds for a response from | ||
168 | * the server in SendReceive[2] for the server to send | ||
169 | * a response back for most types of requests (except | ||
170 | * SMB Write past end of file which can be slow, and | ||
171 | * blocking lock operations). NFS waits slightly longer | ||
172 | * than CIFS, but this can make it take longer for | ||
173 | * nonresponsive servers to be detected and 15 seconds | ||
174 | * is more than enough time for modern networks to | ||
175 | * send a packet. In most cases if we fail to send | ||
176 | * after the retries we will kill the socket and | ||
177 | * reconnect which may clear the network problem. | ||
178 | */ | ||
179 | if ((i >= 14) || (!server->noblocksnd && (i > 2))) { | ||
180 | cERROR(1, "sends on sock %p stuck for 15 seconds", | ||
181 | ssocket); | ||
182 | rc = -EAGAIN; | 190 | rc = -EAGAIN; |
183 | break; | 191 | break; |
184 | } | 192 | } |
185 | msleep(1 << i); | 193 | msleep(1 << i); |
186 | continue; | 194 | continue; |
187 | } | 195 | } |
196 | |||
188 | if (rc < 0) | 197 | if (rc < 0) |
189 | break; | 198 | break; |
190 | 199 | ||
191 | if (rc == total_len) { | 200 | /* send was at least partially successful */ |
192 | total_len = 0; | 201 | *sent += rc; |
202 | |||
203 | if (rc == remaining) { | ||
204 | remaining = 0; | ||
193 | break; | 205 | break; |
194 | } else if (rc > total_len) { | 206 | } |
195 | cERROR(1, "sent %d requested %d", rc, total_len); | 207 | |
208 | if (rc > remaining) { | ||
209 | cERROR(1, "sent %d requested %d", rc, remaining); | ||
196 | break; | 210 | break; |
197 | } | 211 | } |
212 | |||
198 | if (rc == 0) { | 213 | if (rc == 0) { |
199 | /* should never happen, letting socket clear before | 214 | /* should never happen, letting socket clear before |
200 | retrying is our only obvious option here */ | 215 | retrying is our only obvious option here */ |
@@ -202,7 +217,9 @@ smb_sendv(struct TCP_Server_Info *server, struct kvec *iov, int n_vec) | |||
202 | msleep(500); | 217 | msleep(500); |
203 | continue; | 218 | continue; |
204 | } | 219 | } |
205 | total_len -= rc; | 220 | |
221 | remaining -= rc; | ||
222 | |||
206 | /* the line below resets i */ | 223 | /* the line below resets i */ |
207 | for (i = first_vec; i < n_vec; i++) { | 224 | for (i = first_vec; i < n_vec; i++) { |
208 | if (iov[i].iov_len) { | 225 | if (iov[i].iov_len) { |
@@ -217,16 +234,97 @@ smb_sendv(struct TCP_Server_Info *server, struct kvec *iov, int n_vec) | |||
217 | } | 234 | } |
218 | } | 235 | } |
219 | } | 236 | } |
237 | |||
220 | i = 0; /* in case we get ENOSPC on the next send */ | 238 | i = 0; /* in case we get ENOSPC on the next send */ |
239 | rc = 0; | ||
221 | } | 240 | } |
241 | return rc; | ||
242 | } | ||
243 | |||
244 | /** | ||
245 | * rqst_page_to_kvec - Turn a slot in the smb_rqst page array into a kvec | ||
246 | * @rqst: pointer to smb_rqst | ||
247 | * @idx: index into the array of the page | ||
248 | * @iov: pointer to struct kvec that will hold the result | ||
249 | * | ||
250 | * Helper function to convert a slot in the rqst->rq_pages array into a kvec. | ||
251 | * The page will be kmapped and the address placed into iov_base. The length | ||
252 | * will then be adjusted according to the ptailoff. | ||
253 | */ | ||
254 | void | ||
255 | cifs_rqst_page_to_kvec(struct smb_rqst *rqst, unsigned int idx, | ||
256 | struct kvec *iov) | ||
257 | { | ||
258 | /* | ||
259 | * FIXME: We could avoid this kmap altogether if we used | ||
260 | * kernel_sendpage instead of kernel_sendmsg. That will only | ||
261 | * work if signing is disabled though as sendpage inlines the | ||
262 | * page directly into the fraglist. If userspace modifies the | ||
263 | * page after we calculate the signature, then the server will | ||
264 | * reject it and may break the connection. kernel_sendmsg does | ||
265 | * an extra copy of the data and avoids that issue. | ||
266 | */ | ||
267 | iov->iov_base = kmap(rqst->rq_pages[idx]); | ||
268 | |||
269 | /* if last page, don't send beyond this offset into page */ | ||
270 | if (idx == (rqst->rq_npages - 1)) | ||
271 | iov->iov_len = rqst->rq_tailsz; | ||
272 | else | ||
273 | iov->iov_len = rqst->rq_pagesz; | ||
274 | } | ||
275 | |||
276 | static int | ||
277 | smb_send_rqst(struct TCP_Server_Info *server, struct smb_rqst *rqst) | ||
278 | { | ||
279 | int rc; | ||
280 | struct kvec *iov = rqst->rq_iov; | ||
281 | int n_vec = rqst->rq_nvec; | ||
282 | unsigned int smb_buf_length = get_rfc1002_length(iov[0].iov_base); | ||
283 | unsigned int i; | ||
284 | size_t total_len = 0, sent; | ||
285 | struct socket *ssocket = server->ssocket; | ||
286 | int val = 1; | ||
287 | |||
288 | cFYI(1, "Sending smb: smb_len=%u", smb_buf_length); | ||
289 | dump_smb(iov[0].iov_base, iov[0].iov_len); | ||
290 | |||
291 | /* cork the socket */ | ||
292 | kernel_setsockopt(ssocket, SOL_TCP, TCP_CORK, | ||
293 | (char *)&val, sizeof(val)); | ||
294 | |||
295 | rc = smb_send_kvec(server, iov, n_vec, &sent); | ||
296 | if (rc < 0) | ||
297 | goto uncork; | ||
298 | |||
299 | total_len += sent; | ||
300 | |||
301 | /* now walk the page array and send each page in it */ | ||
302 | for (i = 0; i < rqst->rq_npages; i++) { | ||
303 | struct kvec p_iov; | ||
304 | |||
305 | cifs_rqst_page_to_kvec(rqst, i, &p_iov); | ||
306 | rc = smb_send_kvec(server, &p_iov, 1, &sent); | ||
307 | kunmap(rqst->rq_pages[i]); | ||
308 | if (rc < 0) | ||
309 | break; | ||
310 | |||
311 | total_len += sent; | ||
312 | } | ||
313 | |||
314 | uncork: | ||
315 | /* uncork it */ | ||
316 | val = 0; | ||
317 | kernel_setsockopt(ssocket, SOL_TCP, TCP_CORK, | ||
318 | (char *)&val, sizeof(val)); | ||
222 | 319 | ||
223 | if ((total_len > 0) && (total_len != smb_buf_length + 4)) { | 320 | if ((total_len > 0) && (total_len != smb_buf_length + 4)) { |
224 | cFYI(1, "partial send (%d remaining), terminating session", | 321 | cFYI(1, "partial send (wanted=%u sent=%zu): terminating " |
225 | total_len); | 322 | "session", smb_buf_length + 4, total_len); |
226 | /* If we have only sent part of an SMB then the next SMB | 323 | /* |
227 | could be taken as the remainder of this one. We need | 324 | * If we have only sent part of an SMB then the next SMB could |
228 | to kill the socket so the server throws away the partial | 325 | * be taken as the remainder of this one. We need to kill the |
229 | SMB */ | 326 | * socket so the server throws away the partial SMB |
327 | */ | ||
230 | server->tcpStatus = CifsNeedReconnect; | 328 | server->tcpStatus = CifsNeedReconnect; |
231 | } | 329 | } |
232 | 330 | ||
@@ -235,12 +333,18 @@ smb_sendv(struct TCP_Server_Info *server, struct kvec *iov, int n_vec) | |||
235 | else | 333 | else |
236 | rc = 0; | 334 | rc = 0; |
237 | 335 | ||
238 | /* Don't want to modify the buffer as a side effect of this call. */ | ||
239 | *buf_len = cpu_to_be32(smb_buf_length); | ||
240 | |||
241 | return rc; | 336 | return rc; |
242 | } | 337 | } |
243 | 338 | ||
339 | static int | ||
340 | smb_sendv(struct TCP_Server_Info *server, struct kvec *iov, int n_vec) | ||
341 | { | ||
342 | struct smb_rqst rqst = { .rq_iov = iov, | ||
343 | .rq_nvec = n_vec }; | ||
344 | |||
345 | return smb_send_rqst(server, &rqst); | ||
346 | } | ||
347 | |||
244 | int | 348 | int |
245 | smb_send(struct TCP_Server_Info *server, struct smb_hdr *smb_buffer, | 349 | smb_send(struct TCP_Server_Info *server, struct smb_hdr *smb_buffer, |
246 | unsigned int smb_buf_length) | 350 | unsigned int smb_buf_length) |
@@ -254,13 +358,13 @@ smb_send(struct TCP_Server_Info *server, struct smb_hdr *smb_buffer, | |||
254 | } | 358 | } |
255 | 359 | ||
256 | static int | 360 | static int |
257 | wait_for_free_credits(struct TCP_Server_Info *server, const int optype, | 361 | wait_for_free_credits(struct TCP_Server_Info *server, const int timeout, |
258 | int *credits) | 362 | int *credits) |
259 | { | 363 | { |
260 | int rc; | 364 | int rc; |
261 | 365 | ||
262 | spin_lock(&server->req_lock); | 366 | spin_lock(&server->req_lock); |
263 | if (optype == CIFS_ASYNC_OP) { | 367 | if (timeout == CIFS_ASYNC_OP) { |
264 | /* oplock breaks must not be held up */ | 368 | /* oplock breaks must not be held up */ |
265 | server->in_flight++; | 369 | server->in_flight++; |
266 | *credits -= 1; | 370 | *credits -= 1; |
@@ -290,7 +394,7 @@ wait_for_free_credits(struct TCP_Server_Info *server, const int optype, | |||
290 | */ | 394 | */ |
291 | 395 | ||
292 | /* update # of requests on the wire to server */ | 396 | /* update # of requests on the wire to server */ |
293 | if (optype != CIFS_BLOCKING_OP) { | 397 | if (timeout != CIFS_BLOCKING_OP) { |
294 | *credits -= 1; | 398 | *credits -= 1; |
295 | server->in_flight++; | 399 | server->in_flight++; |
296 | } | 400 | } |
@@ -302,10 +406,11 @@ wait_for_free_credits(struct TCP_Server_Info *server, const int optype, | |||
302 | } | 406 | } |
303 | 407 | ||
304 | static int | 408 | static int |
305 | wait_for_free_request(struct TCP_Server_Info *server, const int optype) | 409 | wait_for_free_request(struct TCP_Server_Info *server, const int timeout, |
410 | const int optype) | ||
306 | { | 411 | { |
307 | return wait_for_free_credits(server, optype, | 412 | return wait_for_free_credits(server, timeout, |
308 | server->ops->get_credits_field(server)); | 413 | server->ops->get_credits_field(server, optype)); |
309 | } | 414 | } |
310 | 415 | ||
311 | static int allocate_mid(struct cifs_ses *ses, struct smb_hdr *in_buf, | 416 | static int allocate_mid(struct cifs_ses *ses, struct smb_hdr *in_buf, |
@@ -349,12 +454,11 @@ wait_for_response(struct TCP_Server_Info *server, struct mid_q_entry *midQ) | |||
349 | return 0; | 454 | return 0; |
350 | } | 455 | } |
351 | 456 | ||
352 | static int | 457 | struct mid_q_entry * |
353 | cifs_setup_async_request(struct TCP_Server_Info *server, struct kvec *iov, | 458 | cifs_setup_async_request(struct TCP_Server_Info *server, struct smb_rqst *rqst) |
354 | unsigned int nvec, struct mid_q_entry **ret_mid) | ||
355 | { | 459 | { |
356 | int rc; | 460 | int rc; |
357 | struct smb_hdr *hdr = (struct smb_hdr *)iov[0].iov_base; | 461 | struct smb_hdr *hdr = (struct smb_hdr *)rqst->rq_iov[0].iov_base; |
358 | struct mid_q_entry *mid; | 462 | struct mid_q_entry *mid; |
359 | 463 | ||
360 | /* enable signing if server requires it */ | 464 | /* enable signing if server requires it */ |
@@ -363,16 +467,15 @@ cifs_setup_async_request(struct TCP_Server_Info *server, struct kvec *iov, | |||
363 | 467 | ||
364 | mid = AllocMidQEntry(hdr, server); | 468 | mid = AllocMidQEntry(hdr, server); |
365 | if (mid == NULL) | 469 | if (mid == NULL) |
366 | return -ENOMEM; | 470 | return ERR_PTR(-ENOMEM); |
367 | 471 | ||
368 | rc = cifs_sign_smb2(iov, nvec, server, &mid->sequence_number); | 472 | rc = cifs_sign_rqst(rqst, server, &mid->sequence_number); |
369 | if (rc) { | 473 | if (rc) { |
370 | DeleteMidQEntry(mid); | 474 | DeleteMidQEntry(mid); |
371 | return rc; | 475 | return ERR_PTR(rc); |
372 | } | 476 | } |
373 | 477 | ||
374 | *ret_mid = mid; | 478 | return mid; |
375 | return 0; | ||
376 | } | 479 | } |
377 | 480 | ||
378 | /* | 481 | /* |
@@ -380,24 +483,27 @@ cifs_setup_async_request(struct TCP_Server_Info *server, struct kvec *iov, | |||
380 | * the result. Caller is responsible for dealing with timeouts. | 483 | * the result. Caller is responsible for dealing with timeouts. |
381 | */ | 484 | */ |
382 | int | 485 | int |
383 | cifs_call_async(struct TCP_Server_Info *server, struct kvec *iov, | 486 | cifs_call_async(struct TCP_Server_Info *server, struct smb_rqst *rqst, |
384 | unsigned int nvec, mid_receive_t *receive, | 487 | mid_receive_t *receive, mid_callback_t *callback, |
385 | mid_callback_t *callback, void *cbdata, bool ignore_pend) | 488 | void *cbdata, const int flags) |
386 | { | 489 | { |
387 | int rc; | 490 | int rc, timeout, optype; |
388 | struct mid_q_entry *mid; | 491 | struct mid_q_entry *mid; |
389 | 492 | ||
390 | rc = wait_for_free_request(server, ignore_pend ? CIFS_ASYNC_OP : 0); | 493 | timeout = flags & CIFS_TIMEOUT_MASK; |
494 | optype = flags & CIFS_OP_MASK; | ||
495 | |||
496 | rc = wait_for_free_request(server, timeout, optype); | ||
391 | if (rc) | 497 | if (rc) |
392 | return rc; | 498 | return rc; |
393 | 499 | ||
394 | mutex_lock(&server->srv_mutex); | 500 | mutex_lock(&server->srv_mutex); |
395 | rc = cifs_setup_async_request(server, iov, nvec, &mid); | 501 | mid = server->ops->setup_async_request(server, rqst); |
396 | if (rc) { | 502 | if (IS_ERR(mid)) { |
397 | mutex_unlock(&server->srv_mutex); | 503 | mutex_unlock(&server->srv_mutex); |
398 | add_credits(server, 1); | 504 | add_credits(server, 1, optype); |
399 | wake_up(&server->request_q); | 505 | wake_up(&server->request_q); |
400 | return rc; | 506 | return PTR_ERR(mid); |
401 | } | 507 | } |
402 | 508 | ||
403 | mid->receive = receive; | 509 | mid->receive = receive; |
@@ -412,7 +518,7 @@ cifs_call_async(struct TCP_Server_Info *server, struct kvec *iov, | |||
412 | 518 | ||
413 | 519 | ||
414 | cifs_in_send_inc(server); | 520 | cifs_in_send_inc(server); |
415 | rc = smb_sendv(server, iov, nvec); | 521 | rc = smb_send_rqst(server, rqst); |
416 | cifs_in_send_dec(server); | 522 | cifs_in_send_dec(server); |
417 | cifs_save_when_sent(mid); | 523 | cifs_save_when_sent(mid); |
418 | mutex_unlock(&server->srv_mutex); | 524 | mutex_unlock(&server->srv_mutex); |
@@ -420,8 +526,8 @@ cifs_call_async(struct TCP_Server_Info *server, struct kvec *iov, | |||
420 | if (rc == 0) | 526 | if (rc == 0) |
421 | return 0; | 527 | return 0; |
422 | 528 | ||
423 | delete_mid(mid); | 529 | cifs_delete_mid(mid); |
424 | add_credits(server, 1); | 530 | add_credits(server, 1, optype); |
425 | wake_up(&server->request_q); | 531 | wake_up(&server->request_q); |
426 | return rc; | 532 | return rc; |
427 | } | 533 | } |
@@ -504,50 +610,59 @@ cifs_check_receive(struct mid_q_entry *mid, struct TCP_Server_Info *server, | |||
504 | /* convert the length into a more usable form */ | 610 | /* convert the length into a more usable form */ |
505 | if (server->sec_mode & (SECMODE_SIGN_REQUIRED | SECMODE_SIGN_ENABLED)) { | 611 | if (server->sec_mode & (SECMODE_SIGN_REQUIRED | SECMODE_SIGN_ENABLED)) { |
506 | struct kvec iov; | 612 | struct kvec iov; |
613 | int rc = 0; | ||
614 | struct smb_rqst rqst = { .rq_iov = &iov, | ||
615 | .rq_nvec = 1 }; | ||
507 | 616 | ||
508 | iov.iov_base = mid->resp_buf; | 617 | iov.iov_base = mid->resp_buf; |
509 | iov.iov_len = len; | 618 | iov.iov_len = len; |
510 | /* FIXME: add code to kill session */ | 619 | /* FIXME: add code to kill session */ |
511 | if (cifs_verify_signature(&iov, 1, server, | 620 | rc = cifs_verify_signature(&rqst, server, |
512 | mid->sequence_number + 1) != 0) | 621 | mid->sequence_number + 1); |
513 | cERROR(1, "Unexpected SMB signature"); | 622 | if (rc) |
623 | cERROR(1, "SMB signature verification returned error = " | ||
624 | "%d", rc); | ||
514 | } | 625 | } |
515 | 626 | ||
516 | /* BB special case reconnect tid and uid here? */ | 627 | /* BB special case reconnect tid and uid here? */ |
517 | return map_smb_to_linux_error(mid->resp_buf, log_error); | 628 | return map_smb_to_linux_error(mid->resp_buf, log_error); |
518 | } | 629 | } |
519 | 630 | ||
520 | int | 631 | struct mid_q_entry * |
521 | cifs_setup_request(struct cifs_ses *ses, struct kvec *iov, | 632 | cifs_setup_request(struct cifs_ses *ses, struct smb_rqst *rqst) |
522 | unsigned int nvec, struct mid_q_entry **ret_mid) | ||
523 | { | 633 | { |
524 | int rc; | 634 | int rc; |
525 | struct smb_hdr *hdr = (struct smb_hdr *)iov[0].iov_base; | 635 | struct smb_hdr *hdr = (struct smb_hdr *)rqst->rq_iov[0].iov_base; |
526 | struct mid_q_entry *mid; | 636 | struct mid_q_entry *mid; |
527 | 637 | ||
528 | rc = allocate_mid(ses, hdr, &mid); | 638 | rc = allocate_mid(ses, hdr, &mid); |
529 | if (rc) | 639 | if (rc) |
530 | return rc; | 640 | return ERR_PTR(rc); |
531 | rc = cifs_sign_smb2(iov, nvec, ses->server, &mid->sequence_number); | 641 | rc = cifs_sign_rqst(rqst, ses->server, &mid->sequence_number); |
532 | if (rc) | 642 | if (rc) { |
533 | delete_mid(mid); | 643 | cifs_delete_mid(mid); |
534 | *ret_mid = mid; | 644 | return ERR_PTR(rc); |
535 | return rc; | 645 | } |
646 | return mid; | ||
536 | } | 647 | } |
537 | 648 | ||
538 | int | 649 | int |
539 | SendReceive2(const unsigned int xid, struct cifs_ses *ses, | 650 | SendReceive2(const unsigned int xid, struct cifs_ses *ses, |
540 | struct kvec *iov, int n_vec, int *pRespBufType /* ret */, | 651 | struct kvec *iov, int n_vec, int *resp_buf_type /* ret */, |
541 | const int flags) | 652 | const int flags) |
542 | { | 653 | { |
543 | int rc = 0; | 654 | int rc = 0; |
544 | int long_op; | 655 | int timeout, optype; |
545 | struct mid_q_entry *midQ; | 656 | struct mid_q_entry *midQ; |
546 | char *buf = iov[0].iov_base; | 657 | char *buf = iov[0].iov_base; |
658 | unsigned int credits = 1; | ||
659 | struct smb_rqst rqst = { .rq_iov = iov, | ||
660 | .rq_nvec = n_vec }; | ||
547 | 661 | ||
548 | long_op = flags & CIFS_TIMEOUT_MASK; | 662 | timeout = flags & CIFS_TIMEOUT_MASK; |
663 | optype = flags & CIFS_OP_MASK; | ||
549 | 664 | ||
550 | *pRespBufType = CIFS_NO_BUFFER; /* no response buf yet */ | 665 | *resp_buf_type = CIFS_NO_BUFFER; /* no response buf yet */ |
551 | 666 | ||
552 | if ((ses == NULL) || (ses->server == NULL)) { | 667 | if ((ses == NULL) || (ses->server == NULL)) { |
553 | cifs_small_buf_release(buf); | 668 | cifs_small_buf_release(buf); |
@@ -566,7 +681,7 @@ SendReceive2(const unsigned int xid, struct cifs_ses *ses, | |||
566 | * use ses->maxReq. | 681 | * use ses->maxReq. |
567 | */ | 682 | */ |
568 | 683 | ||
569 | rc = wait_for_free_request(ses->server, long_op); | 684 | rc = wait_for_free_request(ses->server, timeout, optype); |
570 | if (rc) { | 685 | if (rc) { |
571 | cifs_small_buf_release(buf); | 686 | cifs_small_buf_release(buf); |
572 | return rc; | 687 | return rc; |
@@ -580,13 +695,13 @@ SendReceive2(const unsigned int xid, struct cifs_ses *ses, | |||
580 | 695 | ||
581 | mutex_lock(&ses->server->srv_mutex); | 696 | mutex_lock(&ses->server->srv_mutex); |
582 | 697 | ||
583 | rc = ses->server->ops->setup_request(ses, iov, n_vec, &midQ); | 698 | midQ = ses->server->ops->setup_request(ses, &rqst); |
584 | if (rc) { | 699 | if (IS_ERR(midQ)) { |
585 | mutex_unlock(&ses->server->srv_mutex); | 700 | mutex_unlock(&ses->server->srv_mutex); |
586 | cifs_small_buf_release(buf); | 701 | cifs_small_buf_release(buf); |
587 | /* Update # of requests on wire to server */ | 702 | /* Update # of requests on wire to server */ |
588 | add_credits(ses->server, 1); | 703 | add_credits(ses->server, 1, optype); |
589 | return rc; | 704 | return PTR_ERR(midQ); |
590 | } | 705 | } |
591 | 706 | ||
592 | midQ->mid_state = MID_REQUEST_SUBMITTED; | 707 | midQ->mid_state = MID_REQUEST_SUBMITTED; |
@@ -602,7 +717,7 @@ SendReceive2(const unsigned int xid, struct cifs_ses *ses, | |||
602 | goto out; | 717 | goto out; |
603 | } | 718 | } |
604 | 719 | ||
605 | if (long_op == CIFS_ASYNC_OP) { | 720 | if (timeout == CIFS_ASYNC_OP) { |
606 | cifs_small_buf_release(buf); | 721 | cifs_small_buf_release(buf); |
607 | goto out; | 722 | goto out; |
608 | } | 723 | } |
@@ -615,7 +730,7 @@ SendReceive2(const unsigned int xid, struct cifs_ses *ses, | |||
615 | midQ->callback = DeleteMidQEntry; | 730 | midQ->callback = DeleteMidQEntry; |
616 | spin_unlock(&GlobalMid_Lock); | 731 | spin_unlock(&GlobalMid_Lock); |
617 | cifs_small_buf_release(buf); | 732 | cifs_small_buf_release(buf); |
618 | add_credits(ses->server, 1); | 733 | add_credits(ses->server, 1, optype); |
619 | return rc; | 734 | return rc; |
620 | } | 735 | } |
621 | spin_unlock(&GlobalMid_Lock); | 736 | spin_unlock(&GlobalMid_Lock); |
@@ -625,7 +740,7 @@ SendReceive2(const unsigned int xid, struct cifs_ses *ses, | |||
625 | 740 | ||
626 | rc = cifs_sync_mid_result(midQ, ses->server); | 741 | rc = cifs_sync_mid_result(midQ, ses->server); |
627 | if (rc != 0) { | 742 | if (rc != 0) { |
628 | add_credits(ses->server, 1); | 743 | add_credits(ses->server, 1, optype); |
629 | return rc; | 744 | return rc; |
630 | } | 745 | } |
631 | 746 | ||
@@ -639,19 +754,21 @@ SendReceive2(const unsigned int xid, struct cifs_ses *ses, | |||
639 | iov[0].iov_base = buf; | 754 | iov[0].iov_base = buf; |
640 | iov[0].iov_len = get_rfc1002_length(buf) + 4; | 755 | iov[0].iov_len = get_rfc1002_length(buf) + 4; |
641 | if (midQ->large_buf) | 756 | if (midQ->large_buf) |
642 | *pRespBufType = CIFS_LARGE_BUFFER; | 757 | *resp_buf_type = CIFS_LARGE_BUFFER; |
643 | else | 758 | else |
644 | *pRespBufType = CIFS_SMALL_BUFFER; | 759 | *resp_buf_type = CIFS_SMALL_BUFFER; |
760 | |||
761 | credits = ses->server->ops->get_credits(midQ); | ||
645 | 762 | ||
646 | rc = ses->server->ops->check_receive(midQ, ses->server, | 763 | rc = ses->server->ops->check_receive(midQ, ses->server, |
647 | flags & CIFS_LOG_ERROR); | 764 | flags & CIFS_LOG_ERROR); |
648 | 765 | ||
649 | /* mark it so buf will not be freed by delete_mid */ | 766 | /* mark it so buf will not be freed by cifs_delete_mid */ |
650 | if ((flags & CIFS_NO_RESP) == 0) | 767 | if ((flags & CIFS_NO_RESP) == 0) |
651 | midQ->resp_buf = NULL; | 768 | midQ->resp_buf = NULL; |
652 | out: | 769 | out: |
653 | delete_mid(midQ); | 770 | cifs_delete_mid(midQ); |
654 | add_credits(ses->server, 1); | 771 | add_credits(ses->server, credits, optype); |
655 | 772 | ||
656 | return rc; | 773 | return rc; |
657 | } | 774 | } |
@@ -659,7 +776,7 @@ out: | |||
659 | int | 776 | int |
660 | SendReceive(const unsigned int xid, struct cifs_ses *ses, | 777 | SendReceive(const unsigned int xid, struct cifs_ses *ses, |
661 | struct smb_hdr *in_buf, struct smb_hdr *out_buf, | 778 | struct smb_hdr *in_buf, struct smb_hdr *out_buf, |
662 | int *pbytes_returned, const int long_op) | 779 | int *pbytes_returned, const int timeout) |
663 | { | 780 | { |
664 | int rc = 0; | 781 | int rc = 0; |
665 | struct mid_q_entry *midQ; | 782 | struct mid_q_entry *midQ; |
@@ -687,7 +804,7 @@ SendReceive(const unsigned int xid, struct cifs_ses *ses, | |||
687 | return -EIO; | 804 | return -EIO; |
688 | } | 805 | } |
689 | 806 | ||
690 | rc = wait_for_free_request(ses->server, long_op); | 807 | rc = wait_for_free_request(ses->server, timeout, 0); |
691 | if (rc) | 808 | if (rc) |
692 | return rc; | 809 | return rc; |
693 | 810 | ||
@@ -701,7 +818,7 @@ SendReceive(const unsigned int xid, struct cifs_ses *ses, | |||
701 | if (rc) { | 818 | if (rc) { |
702 | mutex_unlock(&ses->server->srv_mutex); | 819 | mutex_unlock(&ses->server->srv_mutex); |
703 | /* Update # of requests on wire to server */ | 820 | /* Update # of requests on wire to server */ |
704 | add_credits(ses->server, 1); | 821 | add_credits(ses->server, 1, 0); |
705 | return rc; | 822 | return rc; |
706 | } | 823 | } |
707 | 824 | ||
@@ -722,7 +839,7 @@ SendReceive(const unsigned int xid, struct cifs_ses *ses, | |||
722 | if (rc < 0) | 839 | if (rc < 0) |
723 | goto out; | 840 | goto out; |
724 | 841 | ||
725 | if (long_op == CIFS_ASYNC_OP) | 842 | if (timeout == CIFS_ASYNC_OP) |
726 | goto out; | 843 | goto out; |
727 | 844 | ||
728 | rc = wait_for_response(ses->server, midQ); | 845 | rc = wait_for_response(ses->server, midQ); |
@@ -733,7 +850,7 @@ SendReceive(const unsigned int xid, struct cifs_ses *ses, | |||
733 | /* no longer considered to be "in-flight" */ | 850 | /* no longer considered to be "in-flight" */ |
734 | midQ->callback = DeleteMidQEntry; | 851 | midQ->callback = DeleteMidQEntry; |
735 | spin_unlock(&GlobalMid_Lock); | 852 | spin_unlock(&GlobalMid_Lock); |
736 | add_credits(ses->server, 1); | 853 | add_credits(ses->server, 1, 0); |
737 | return rc; | 854 | return rc; |
738 | } | 855 | } |
739 | spin_unlock(&GlobalMid_Lock); | 856 | spin_unlock(&GlobalMid_Lock); |
@@ -741,7 +858,7 @@ SendReceive(const unsigned int xid, struct cifs_ses *ses, | |||
741 | 858 | ||
742 | rc = cifs_sync_mid_result(midQ, ses->server); | 859 | rc = cifs_sync_mid_result(midQ, ses->server); |
743 | if (rc != 0) { | 860 | if (rc != 0) { |
744 | add_credits(ses->server, 1); | 861 | add_credits(ses->server, 1, 0); |
745 | return rc; | 862 | return rc; |
746 | } | 863 | } |
747 | 864 | ||
@@ -756,8 +873,8 @@ SendReceive(const unsigned int xid, struct cifs_ses *ses, | |||
756 | memcpy(out_buf, midQ->resp_buf, *pbytes_returned + 4); | 873 | memcpy(out_buf, midQ->resp_buf, *pbytes_returned + 4); |
757 | rc = cifs_check_receive(midQ, ses->server, 0); | 874 | rc = cifs_check_receive(midQ, ses->server, 0); |
758 | out: | 875 | out: |
759 | delete_mid(midQ); | 876 | cifs_delete_mid(midQ); |
760 | add_credits(ses->server, 1); | 877 | add_credits(ses->server, 1, 0); |
761 | 878 | ||
762 | return rc; | 879 | return rc; |
763 | } | 880 | } |
@@ -822,7 +939,7 @@ SendReceiveBlockingLock(const unsigned int xid, struct cifs_tcon *tcon, | |||
822 | return -EIO; | 939 | return -EIO; |
823 | } | 940 | } |
824 | 941 | ||
825 | rc = wait_for_free_request(ses->server, CIFS_BLOCKING_OP); | 942 | rc = wait_for_free_request(ses->server, CIFS_BLOCKING_OP, 0); |
826 | if (rc) | 943 | if (rc) |
827 | return rc; | 944 | return rc; |
828 | 945 | ||
@@ -840,7 +957,7 @@ SendReceiveBlockingLock(const unsigned int xid, struct cifs_tcon *tcon, | |||
840 | 957 | ||
841 | rc = cifs_sign_smb(in_buf, ses->server, &midQ->sequence_number); | 958 | rc = cifs_sign_smb(in_buf, ses->server, &midQ->sequence_number); |
842 | if (rc) { | 959 | if (rc) { |
843 | delete_mid(midQ); | 960 | cifs_delete_mid(midQ); |
844 | mutex_unlock(&ses->server->srv_mutex); | 961 | mutex_unlock(&ses->server->srv_mutex); |
845 | return rc; | 962 | return rc; |
846 | } | 963 | } |
@@ -853,7 +970,7 @@ SendReceiveBlockingLock(const unsigned int xid, struct cifs_tcon *tcon, | |||
853 | mutex_unlock(&ses->server->srv_mutex); | 970 | mutex_unlock(&ses->server->srv_mutex); |
854 | 971 | ||
855 | if (rc < 0) { | 972 | if (rc < 0) { |
856 | delete_mid(midQ); | 973 | cifs_delete_mid(midQ); |
857 | return rc; | 974 | return rc; |
858 | } | 975 | } |
859 | 976 | ||
@@ -874,7 +991,7 @@ SendReceiveBlockingLock(const unsigned int xid, struct cifs_tcon *tcon, | |||
874 | blocking lock to return. */ | 991 | blocking lock to return. */ |
875 | rc = send_cancel(ses->server, in_buf, midQ); | 992 | rc = send_cancel(ses->server, in_buf, midQ); |
876 | if (rc) { | 993 | if (rc) { |
877 | delete_mid(midQ); | 994 | cifs_delete_mid(midQ); |
878 | return rc; | 995 | return rc; |
879 | } | 996 | } |
880 | } else { | 997 | } else { |
@@ -886,7 +1003,7 @@ SendReceiveBlockingLock(const unsigned int xid, struct cifs_tcon *tcon, | |||
886 | /* If we get -ENOLCK back the lock may have | 1003 | /* If we get -ENOLCK back the lock may have |
887 | already been removed. Don't exit in this case. */ | 1004 | already been removed. Don't exit in this case. */ |
888 | if (rc && rc != -ENOLCK) { | 1005 | if (rc && rc != -ENOLCK) { |
889 | delete_mid(midQ); | 1006 | cifs_delete_mid(midQ); |
890 | return rc; | 1007 | return rc; |
891 | } | 1008 | } |
892 | } | 1009 | } |
@@ -923,7 +1040,7 @@ SendReceiveBlockingLock(const unsigned int xid, struct cifs_tcon *tcon, | |||
923 | memcpy(out_buf, midQ->resp_buf, *pbytes_returned + 4); | 1040 | memcpy(out_buf, midQ->resp_buf, *pbytes_returned + 4); |
924 | rc = cifs_check_receive(midQ, ses->server, 0); | 1041 | rc = cifs_check_receive(midQ, ses->server, 0); |
925 | out: | 1042 | out: |
926 | delete_mid(midQ); | 1043 | cifs_delete_mid(midQ); |
927 | if (rstart && rc == -EACCES) | 1044 | if (rstart && rc == -EACCES) |
928 | return -ERESTARTSYS; | 1045 | return -ERESTARTSYS; |
929 | return rc; | 1046 | return rc; |