diff options
author | Jeff Layton <jlayton@redhat.com> | 2012-09-18 19:20:34 -0400 |
---|---|---|
committer | Steve French <smfrench@gmail.com> | 2012-09-24 22:46:31 -0400 |
commit | 6f49f46b187df34539f1e5df2469b8a541897700 (patch) | |
tree | 60c2b50ced0a71357fd7747228e7b70b0169d13b /fs/cifs | |
parent | 0b688cfc8b3472f5bad104abe0675a060e32ad7b (diff) |
cifs: convert send code to use smb_rqst structs
Again, just a change in the arguments and some function renaming here.
In later patches, we'll change this code to deal with page arrays.
In this patch, we add a new smb_send_rqst wrapper and have smb_sendv
call that. Then we move most of the existing smb_sendv code into a new
function -- smb_send_kvec. This seems a little redundant, but later
we'll flesh this out to deal with arrays of pages.
Reviewed-by: Pavel Shilovsky <pshilovsky@samba.org>
Signed-off-by: Jeff Layton <jlayton@redhat.com>
Signed-off-by: Steve French <smfrench@gmail.com>
Diffstat (limited to 'fs/cifs')
-rw-r--r-- | fs/cifs/transport.c | 135 |
1 files changed, 90 insertions, 45 deletions
diff --git a/fs/cifs/transport.c b/fs/cifs/transport.c index bc9ccddad937..766307b725bd 100644 --- a/fs/cifs/transport.c +++ b/fs/cifs/transport.c | |||
@@ -119,18 +119,29 @@ cifs_delete_mid(struct mid_q_entry *mid) | |||
119 | DeleteMidQEntry(mid); | 119 | DeleteMidQEntry(mid); |
120 | } | 120 | } |
121 | 121 | ||
122 | /* | ||
123 | * smb_send_kvec - send an array of kvecs to the server | ||
124 | * @server: Server to send the data to | ||
125 | * @iov: Pointer to array of kvecs | ||
126 | * @n_vec: length of kvec array | ||
127 | * @sent: amount of data sent on socket is stored here | ||
128 | * | ||
129 | * Our basic "send data to server" function. Should be called with srv_mutex | ||
130 | * held. The caller is responsible for handling the results. | ||
131 | */ | ||
122 | static int | 132 | static int |
123 | smb_sendv(struct TCP_Server_Info *server, struct kvec *iov, int n_vec) | 133 | smb_send_kvec(struct TCP_Server_Info *server, struct kvec *iov, size_t n_vec, |
134 | size_t *sent) | ||
124 | { | 135 | { |
125 | int rc = 0; | 136 | int rc = 0; |
126 | int i = 0; | 137 | int i = 0; |
127 | struct msghdr smb_msg; | 138 | struct msghdr smb_msg; |
128 | unsigned int len = iov[0].iov_len; | 139 | unsigned int remaining; |
129 | unsigned int total_len; | 140 | size_t first_vec = 0; |
130 | int first_vec = 0; | ||
131 | unsigned int smb_buf_length = get_rfc1002_length(iov[0].iov_base); | ||
132 | struct socket *ssocket = server->ssocket; | 141 | struct socket *ssocket = server->ssocket; |
133 | 142 | ||
143 | *sent = 0; | ||
144 | |||
134 | if (ssocket == NULL) | 145 | if (ssocket == NULL) |
135 | return -ENOTSOCK; /* BB eventually add reconnect code here */ | 146 | return -ENOTSOCK; /* BB eventually add reconnect code here */ |
136 | 147 | ||
@@ -143,56 +154,60 @@ smb_sendv(struct TCP_Server_Info *server, struct kvec *iov, int n_vec) | |||
143 | else | 154 | else |
144 | smb_msg.msg_flags = MSG_NOSIGNAL; | 155 | smb_msg.msg_flags = MSG_NOSIGNAL; |
145 | 156 | ||
146 | total_len = 0; | 157 | remaining = 0; |
147 | for (i = 0; i < n_vec; i++) | 158 | for (i = 0; i < n_vec; i++) |
148 | total_len += iov[i].iov_len; | 159 | remaining += iov[i].iov_len; |
149 | |||
150 | cFYI(1, "Sending smb: total_len %d", total_len); | ||
151 | dump_smb(iov[0].iov_base, len); | ||
152 | 160 | ||
153 | i = 0; | 161 | i = 0; |
154 | while (total_len) { | 162 | while (remaining) { |
163 | /* | ||
164 | * If blocking send, we try 3 times, since each can block | ||
165 | * for 5 seconds. For nonblocking we have to try more | ||
166 | * but wait increasing amounts of time allowing time for | ||
167 | * socket to clear. The overall time we wait in either | ||
168 | * case to send on the socket is about 15 seconds. | ||
169 | * Similarly we wait for 15 seconds for a response from | ||
170 | * the server in SendReceive[2] for the server to send | ||
171 | * a response back for most types of requests (except | ||
172 | * SMB Write past end of file which can be slow, and | ||
173 | * blocking lock operations). NFS waits slightly longer | ||
174 | * than CIFS, but this can make it take longer for | ||
175 | * nonresponsive servers to be detected and 15 seconds | ||
176 | * is more than enough time for modern networks to | ||
177 | * send a packet. In most cases if we fail to send | ||
178 | * after the retries we will kill the socket and | ||
179 | * reconnect which may clear the network problem. | ||
180 | */ | ||
155 | rc = kernel_sendmsg(ssocket, &smb_msg, &iov[first_vec], | 181 | rc = kernel_sendmsg(ssocket, &smb_msg, &iov[first_vec], |
156 | n_vec - first_vec, total_len); | 182 | n_vec - first_vec, remaining); |
157 | if ((rc == -ENOSPC) || (rc == -EAGAIN)) { | 183 | if (rc == -ENOSPC || rc == -EAGAIN) { |
158 | i++; | 184 | i++; |
159 | /* | 185 | if (i >= 14 || (!server->noblocksnd && (i > 2))) { |
160 | * If blocking send we try 3 times, since each can block | 186 | cERROR(1, "sends on sock %p stuck for 15 " |
161 | * for 5 seconds. For nonblocking we have to try more | 187 | "seconds", ssocket); |
162 | * but wait increasing amounts of time allowing time for | ||
163 | * socket to clear. The overall time we wait in either | ||
164 | * case to send on the socket is about 15 seconds. | ||
165 | * Similarly we wait for 15 seconds for a response from | ||
166 | * the server in SendReceive[2] for the server to send | ||
167 | * a response back for most types of requests (except | ||
168 | * SMB Write past end of file which can be slow, and | ||
169 | * blocking lock operations). NFS waits slightly longer | ||
170 | * than CIFS, but this can make it take longer for | ||
171 | * nonresponsive servers to be detected and 15 seconds | ||
172 | * is more than enough time for modern networks to | ||
173 | * send a packet. In most cases if we fail to send | ||
174 | * after the retries we will kill the socket and | ||
175 | * reconnect which may clear the network problem. | ||
176 | */ | ||
177 | if ((i >= 14) || (!server->noblocksnd && (i > 2))) { | ||
178 | cERROR(1, "sends on sock %p stuck for 15 seconds", | ||
179 | ssocket); | ||
180 | rc = -EAGAIN; | 188 | rc = -EAGAIN; |
181 | break; | 189 | break; |
182 | } | 190 | } |
183 | msleep(1 << i); | 191 | msleep(1 << i); |
184 | continue; | 192 | continue; |
185 | } | 193 | } |
194 | |||
186 | if (rc < 0) | 195 | if (rc < 0) |
187 | break; | 196 | break; |
188 | 197 | ||
189 | if (rc == total_len) { | 198 | /* send was at least partially successful */ |
190 | total_len = 0; | 199 | *sent += rc; |
200 | |||
201 | if (rc == remaining) { | ||
202 | remaining = 0; | ||
191 | break; | 203 | break; |
192 | } else if (rc > total_len) { | 204 | } |
193 | cERROR(1, "sent %d requested %d", rc, total_len); | 205 | |
206 | if (rc > remaining) { | ||
207 | cERROR(1, "sent %d requested %d", rc, remaining); | ||
194 | break; | 208 | break; |
195 | } | 209 | } |
210 | |||
196 | if (rc == 0) { | 211 | if (rc == 0) { |
197 | /* should never happen, letting socket clear before | 212 | /* should never happen, letting socket clear before |
198 | retrying is our only obvious option here */ | 213 | retrying is our only obvious option here */ |
@@ -200,7 +215,9 @@ smb_sendv(struct TCP_Server_Info *server, struct kvec *iov, int n_vec) | |||
200 | msleep(500); | 215 | msleep(500); |
201 | continue; | 216 | continue; |
202 | } | 217 | } |
203 | total_len -= rc; | 218 | |
219 | remaining -= rc; | ||
220 | |||
204 | /* the line below resets i */ | 221 | /* the line below resets i */ |
205 | for (i = first_vec; i < n_vec; i++) { | 222 | for (i = first_vec; i < n_vec; i++) { |
206 | if (iov[i].iov_len) { | 223 | if (iov[i].iov_len) { |
@@ -215,16 +232,35 @@ smb_sendv(struct TCP_Server_Info *server, struct kvec *iov, int n_vec) | |||
215 | } | 232 | } |
216 | } | 233 | } |
217 | } | 234 | } |
235 | |||
218 | i = 0; /* in case we get ENOSPC on the next send */ | 236 | i = 0; /* in case we get ENOSPC on the next send */ |
237 | rc = 0; | ||
219 | } | 238 | } |
239 | return rc; | ||
240 | } | ||
241 | |||
242 | static int | ||
243 | smb_send_rqst(struct TCP_Server_Info *server, struct smb_rqst *rqst) | ||
244 | { | ||
245 | int rc; | ||
246 | struct kvec *iov = rqst->rq_iov; | ||
247 | int n_vec = rqst->rq_nvec; | ||
248 | unsigned int smb_buf_length = get_rfc1002_length(iov[0].iov_base); | ||
249 | size_t total_len; | ||
250 | |||
251 | cFYI(1, "Sending smb: smb_len=%u", smb_buf_length); | ||
252 | dump_smb(iov[0].iov_base, iov[0].iov_len); | ||
253 | |||
254 | rc = smb_send_kvec(server, iov, n_vec, &total_len); | ||
220 | 255 | ||
221 | if ((total_len > 0) && (total_len != smb_buf_length + 4)) { | 256 | if ((total_len > 0) && (total_len != smb_buf_length + 4)) { |
222 | cFYI(1, "partial send (%d remaining), terminating session", | 257 | cFYI(1, "partial send (wanted=%u sent=%zu): terminating " |
223 | total_len); | 258 | "session", smb_buf_length + 4, total_len); |
224 | /* If we have only sent part of an SMB then the next SMB | 259 | /* |
225 | could be taken as the remainder of this one. We need | 260 | * If we have only sent part of an SMB then the next SMB could |
226 | to kill the socket so the server throws away the partial | 261 | * be taken as the remainder of this one. We need to kill the |
227 | SMB */ | 262 | * socket so the server throws away the partial SMB |
263 | */ | ||
228 | server->tcpStatus = CifsNeedReconnect; | 264 | server->tcpStatus = CifsNeedReconnect; |
229 | } | 265 | } |
230 | 266 | ||
@@ -236,6 +272,15 @@ smb_sendv(struct TCP_Server_Info *server, struct kvec *iov, int n_vec) | |||
236 | return rc; | 272 | return rc; |
237 | } | 273 | } |
238 | 274 | ||
275 | static int | ||
276 | smb_sendv(struct TCP_Server_Info *server, struct kvec *iov, int n_vec) | ||
277 | { | ||
278 | struct smb_rqst rqst = { .rq_iov = iov, | ||
279 | .rq_nvec = n_vec }; | ||
280 | |||
281 | return smb_send_rqst(server, &rqst); | ||
282 | } | ||
283 | |||
239 | int | 284 | int |
240 | smb_send(struct TCP_Server_Info *server, struct smb_hdr *smb_buffer, | 285 | smb_send(struct TCP_Server_Info *server, struct smb_hdr *smb_buffer, |
241 | unsigned int smb_buf_length) | 286 | unsigned int smb_buf_length) |