diff options
author | Pavel Shilovsky <pshilovsky@samba.org> | 2012-05-23 06:01:59 -0400 |
---|---|---|
committer | Steve French <sfrench@us.ibm.com> | 2012-06-01 13:35:19 -0400 |
commit | 88257360605f9362dc4d79326c268dd334f61c90 (patch) | |
tree | 81770ae1d528f0d19e9e3a7a78ed90cdf147d452 /fs/cifs/misc.c | |
parent | 7f0adb53bcf5bdb92236cda8ec92ea5e40993028 (diff) |
CIFS: Move get_next_mid to ops struct
Reviewed-by: Jeff Layton <jlayton@redhat.com>
Signed-off-by: Pavel Shilovsky <pshilovsky@samba.org>
Signed-off-by: Steve French <sfrench@us.ibm.com>
Diffstat (limited to 'fs/cifs/misc.c')
-rw-r--r-- | fs/cifs/misc.c | 89 |
1 files changed, 1 insertions, 88 deletions
diff --git a/fs/cifs/misc.c b/fs/cifs/misc.c index e2552d2b2e42..557506ae1e2a 100644 --- a/fs/cifs/misc.c +++ b/fs/cifs/misc.c | |||
@@ -212,93 +212,6 @@ cifs_small_buf_release(void *buf_to_free) | |||
212 | return; | 212 | return; |
213 | } | 213 | } |
214 | 214 | ||
215 | /* | ||
216 | * Find a free multiplex id (SMB mid). Otherwise there could be | ||
217 | * mid collisions which might cause problems, demultiplexing the | ||
218 | * wrong response to this request. Multiplex ids could collide if | ||
219 | * one of a series requests takes much longer than the others, or | ||
220 | * if a very large number of long lived requests (byte range | ||
221 | * locks or FindNotify requests) are pending. No more than | ||
222 | * 64K-1 requests can be outstanding at one time. If no | ||
223 | * mids are available, return zero. A future optimization | ||
224 | * could make the combination of mids and uid the key we use | ||
225 | * to demultiplex on (rather than mid alone). | ||
226 | * In addition to the above check, the cifs demultiplex | ||
227 | * code already used the command code as a secondary | ||
228 | * check of the frame and if signing is negotiated the | ||
229 | * response would be discarded if the mid were the same | ||
230 | * but the signature was wrong. Since the mid is not put in the | ||
231 | * pending queue until later (when it is about to be dispatched) | ||
232 | * we do have to limit the number of outstanding requests | ||
233 | * to somewhat less than 64K-1 although it is hard to imagine | ||
234 | * so many threads being in the vfs at one time. | ||
235 | */ | ||
236 | __u64 GetNextMid(struct TCP_Server_Info *server) | ||
237 | { | ||
238 | __u64 mid = 0; | ||
239 | __u16 last_mid, cur_mid; | ||
240 | bool collision; | ||
241 | |||
242 | spin_lock(&GlobalMid_Lock); | ||
243 | |||
244 | /* mid is 16 bit only for CIFS/SMB */ | ||
245 | cur_mid = (__u16)((server->CurrentMid) & 0xffff); | ||
246 | /* we do not want to loop forever */ | ||
247 | last_mid = cur_mid; | ||
248 | cur_mid++; | ||
249 | |||
250 | /* | ||
251 | * This nested loop looks more expensive than it is. | ||
252 | * In practice the list of pending requests is short, | ||
253 | * fewer than 50, and the mids are likely to be unique | ||
254 | * on the first pass through the loop unless some request | ||
255 | * takes longer than the 64 thousand requests before it | ||
256 | * (and it would also have to have been a request that | ||
257 | * did not time out). | ||
258 | */ | ||
259 | while (cur_mid != last_mid) { | ||
260 | struct mid_q_entry *mid_entry; | ||
261 | unsigned int num_mids; | ||
262 | |||
263 | collision = false; | ||
264 | if (cur_mid == 0) | ||
265 | cur_mid++; | ||
266 | |||
267 | num_mids = 0; | ||
268 | list_for_each_entry(mid_entry, &server->pending_mid_q, qhead) { | ||
269 | ++num_mids; | ||
270 | if (mid_entry->mid == cur_mid && | ||
271 | mid_entry->mid_state == MID_REQUEST_SUBMITTED) { | ||
272 | /* This mid is in use, try a different one */ | ||
273 | collision = true; | ||
274 | break; | ||
275 | } | ||
276 | } | ||
277 | |||
278 | /* | ||
279 | * if we have more than 32k mids in the list, then something | ||
280 | * is very wrong. Possibly a local user is trying to DoS the | ||
281 | * box by issuing long-running calls and SIGKILL'ing them. If | ||
282 | * we get to 2^16 mids then we're in big trouble as this | ||
283 | * function could loop forever. | ||
284 | * | ||
285 | * Go ahead and assign out the mid in this situation, but force | ||
286 | * an eventual reconnect to clean out the pending_mid_q. | ||
287 | */ | ||
288 | if (num_mids > 32768) | ||
289 | server->tcpStatus = CifsNeedReconnect; | ||
290 | |||
291 | if (!collision) { | ||
292 | mid = (__u64)cur_mid; | ||
293 | server->CurrentMid = mid; | ||
294 | break; | ||
295 | } | ||
296 | cur_mid++; | ||
297 | } | ||
298 | spin_unlock(&GlobalMid_Lock); | ||
299 | return mid; | ||
300 | } | ||
301 | |||
302 | /* NB: MID can not be set if treeCon not passed in, in that | 215 | /* NB: MID can not be set if treeCon not passed in, in that |
303 | case it is responsbility of caller to set the mid */ | 216 | case it is responsbility of caller to set the mid */ |
304 | void | 217 | void |
@@ -334,7 +247,7 @@ header_assemble(struct smb_hdr *buffer, char smb_command /* command */ , | |||
334 | 247 | ||
335 | /* Uid is not converted */ | 248 | /* Uid is not converted */ |
336 | buffer->Uid = treeCon->ses->Suid; | 249 | buffer->Uid = treeCon->ses->Suid; |
337 | buffer->Mid = GetNextMid(treeCon->ses->server); | 250 | buffer->Mid = get_next_mid(treeCon->ses->server); |
338 | } | 251 | } |
339 | if (treeCon->Flags & SMB_SHARE_IS_IN_DFS) | 252 | if (treeCon->Flags & SMB_SHARE_IS_IN_DFS) |
340 | buffer->Flags2 |= SMBFLG2_DFS; | 253 | buffer->Flags2 |= SMBFLG2_DFS; |