diff options
| author | Linus Torvalds <torvalds@linux-foundation.org> | 2012-06-04 18:00:58 -0400 |
|---|---|---|
| committer | Linus Torvalds <torvalds@linux-foundation.org> | 2012-06-04 18:00:58 -0400 |
| commit | bf2785a818372603ad3ca3abcab65010f08a1d68 (patch) | |
| tree | 8217f81fbfb686c006ade225d479c9000c311e7a | |
| parent | a3fe778c7895cd847d23c25ad566d83346282a77 (diff) | |
| parent | 88257360605f9362dc4d79326c268dd334f61c90 (diff) | |
Merge branch 'for-next' of git://git.samba.org/sfrench/cifs-2.6
Pull cifs fixes from Steve French.
* 'for-next' of git://git.samba.org/sfrench/cifs-2.6:
CIFS: Move get_next_mid to ops struct
CIFS: Make accessing is_valid_oplock/dump_detail ops struct field safe
CIFS: Improve identation in cifs_unlock_range
CIFS: Fix possible wrong memory allocation
| -rw-r--r-- | fs/cifs/cifsglob.h | 7 | ||||
| -rw-r--r-- | fs/cifs/cifsproto.h | 1 | ||||
| -rw-r--r-- | fs/cifs/cifssmb.c | 8 | ||||
| -rw-r--r-- | fs/cifs/connect.c | 8 | ||||
| -rw-r--r-- | fs/cifs/file.c | 106 | ||||
| -rw-r--r-- | fs/cifs/misc.c | 89 | ||||
| -rw-r--r-- | fs/cifs/smb1ops.c | 89 | ||||
| -rw-r--r-- | fs/cifs/transport.c | 2 |
8 files changed, 167 insertions, 143 deletions
diff --git a/fs/cifs/cifsglob.h b/fs/cifs/cifsglob.h index 20350a93ed99..6df0cbe1cbc9 100644 --- a/fs/cifs/cifsglob.h +++ b/fs/cifs/cifsglob.h | |||
| @@ -174,6 +174,7 @@ struct smb_version_operations { | |||
| 174 | void (*add_credits)(struct TCP_Server_Info *, const unsigned int); | 174 | void (*add_credits)(struct TCP_Server_Info *, const unsigned int); |
| 175 | void (*set_credits)(struct TCP_Server_Info *, const int); | 175 | void (*set_credits)(struct TCP_Server_Info *, const int); |
| 176 | int * (*get_credits_field)(struct TCP_Server_Info *); | 176 | int * (*get_credits_field)(struct TCP_Server_Info *); |
| 177 | __u64 (*get_next_mid)(struct TCP_Server_Info *); | ||
| 177 | /* data offset from read response message */ | 178 | /* data offset from read response message */ |
| 178 | unsigned int (*read_data_offset)(char *); | 179 | unsigned int (*read_data_offset)(char *); |
| 179 | /* data length from read response message */ | 180 | /* data length from read response message */ |
| @@ -399,6 +400,12 @@ set_credits(struct TCP_Server_Info *server, const int val) | |||
| 399 | server->ops->set_credits(server, val); | 400 | server->ops->set_credits(server, val); |
| 400 | } | 401 | } |
| 401 | 402 | ||
| 403 | static inline __u64 | ||
| 404 | get_next_mid(struct TCP_Server_Info *server) | ||
| 405 | { | ||
| 406 | return server->ops->get_next_mid(server); | ||
| 407 | } | ||
| 408 | |||
| 402 | /* | 409 | /* |
| 403 | * Macros to allow the TCP_Server_Info->net field and related code to drop out | 410 | * Macros to allow the TCP_Server_Info->net field and related code to drop out |
| 404 | * when CONFIG_NET_NS isn't set. | 411 | * when CONFIG_NET_NS isn't set. |
diff --git a/fs/cifs/cifsproto.h b/fs/cifs/cifsproto.h index 5ec21ecf7980..0a6cbfe2761e 100644 --- a/fs/cifs/cifsproto.h +++ b/fs/cifs/cifsproto.h | |||
| @@ -114,7 +114,6 @@ extern int small_smb_init_no_tc(const int smb_cmd, const int wct, | |||
| 114 | void **request_buf); | 114 | void **request_buf); |
| 115 | extern int CIFS_SessSetup(unsigned int xid, struct cifs_ses *ses, | 115 | extern int CIFS_SessSetup(unsigned int xid, struct cifs_ses *ses, |
| 116 | const struct nls_table *nls_cp); | 116 | const struct nls_table *nls_cp); |
| 117 | extern __u64 GetNextMid(struct TCP_Server_Info *server); | ||
| 118 | extern struct timespec cifs_NTtimeToUnix(__le64 utc_nanoseconds_since_1601); | 117 | extern struct timespec cifs_NTtimeToUnix(__le64 utc_nanoseconds_since_1601); |
| 119 | extern u64 cifs_UnixTimeToNT(struct timespec); | 118 | extern u64 cifs_UnixTimeToNT(struct timespec); |
| 120 | extern struct timespec cnvrtDosUnixTm(__le16 le_date, __le16 le_time, | 119 | extern struct timespec cnvrtDosUnixTm(__le16 le_date, __le16 le_time, |
diff --git a/fs/cifs/cifssmb.c b/fs/cifs/cifssmb.c index b5ad716b2642..5b400730c213 100644 --- a/fs/cifs/cifssmb.c +++ b/fs/cifs/cifssmb.c | |||
| @@ -268,7 +268,7 @@ small_smb_init_no_tc(const int smb_command, const int wct, | |||
| 268 | return rc; | 268 | return rc; |
| 269 | 269 | ||
| 270 | buffer = (struct smb_hdr *)*request_buf; | 270 | buffer = (struct smb_hdr *)*request_buf; |
| 271 | buffer->Mid = GetNextMid(ses->server); | 271 | buffer->Mid = get_next_mid(ses->server); |
| 272 | if (ses->capabilities & CAP_UNICODE) | 272 | if (ses->capabilities & CAP_UNICODE) |
| 273 | buffer->Flags2 |= SMBFLG2_UNICODE; | 273 | buffer->Flags2 |= SMBFLG2_UNICODE; |
| 274 | if (ses->capabilities & CAP_STATUS32) | 274 | if (ses->capabilities & CAP_STATUS32) |
| @@ -402,7 +402,7 @@ CIFSSMBNegotiate(unsigned int xid, struct cifs_ses *ses) | |||
| 402 | 402 | ||
| 403 | cFYI(1, "secFlags 0x%x", secFlags); | 403 | cFYI(1, "secFlags 0x%x", secFlags); |
| 404 | 404 | ||
| 405 | pSMB->hdr.Mid = GetNextMid(server); | 405 | pSMB->hdr.Mid = get_next_mid(server); |
| 406 | pSMB->hdr.Flags2 |= (SMBFLG2_UNICODE | SMBFLG2_ERR_STATUS); | 406 | pSMB->hdr.Flags2 |= (SMBFLG2_UNICODE | SMBFLG2_ERR_STATUS); |
| 407 | 407 | ||
| 408 | if ((secFlags & CIFSSEC_MUST_KRB5) == CIFSSEC_MUST_KRB5) | 408 | if ((secFlags & CIFSSEC_MUST_KRB5) == CIFSSEC_MUST_KRB5) |
| @@ -782,7 +782,7 @@ CIFSSMBLogoff(const int xid, struct cifs_ses *ses) | |||
| 782 | return rc; | 782 | return rc; |
| 783 | } | 783 | } |
| 784 | 784 | ||
| 785 | pSMB->hdr.Mid = GetNextMid(ses->server); | 785 | pSMB->hdr.Mid = get_next_mid(ses->server); |
| 786 | 786 | ||
| 787 | if (ses->server->sec_mode & | 787 | if (ses->server->sec_mode & |
| 788 | (SECMODE_SIGN_REQUIRED | SECMODE_SIGN_ENABLED)) | 788 | (SECMODE_SIGN_REQUIRED | SECMODE_SIGN_ENABLED)) |
| @@ -4762,7 +4762,7 @@ getDFSRetry: | |||
| 4762 | 4762 | ||
| 4763 | /* server pointer checked in called function, | 4763 | /* server pointer checked in called function, |
| 4764 | but should never be null here anyway */ | 4764 | but should never be null here anyway */ |
| 4765 | pSMB->hdr.Mid = GetNextMid(ses->server); | 4765 | pSMB->hdr.Mid = get_next_mid(ses->server); |
| 4766 | pSMB->hdr.Tid = ses->ipc_tid; | 4766 | pSMB->hdr.Tid = ses->ipc_tid; |
| 4767 | pSMB->hdr.Uid = ses->Suid; | 4767 | pSMB->hdr.Uid = ses->Suid; |
| 4768 | if (ses->capabilities & CAP_STATUS32) | 4768 | if (ses->capabilities & CAP_STATUS32) |
diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c index ccafdedd0dbc..78db68a5cf44 100644 --- a/fs/cifs/connect.c +++ b/fs/cifs/connect.c | |||
| @@ -1058,13 +1058,15 @@ cifs_demultiplex_thread(void *p) | |||
| 1058 | if (mid_entry != NULL) { | 1058 | if (mid_entry != NULL) { |
| 1059 | if (!mid_entry->multiRsp || mid_entry->multiEnd) | 1059 | if (!mid_entry->multiRsp || mid_entry->multiEnd) |
| 1060 | mid_entry->callback(mid_entry); | 1060 | mid_entry->callback(mid_entry); |
| 1061 | } else if (!server->ops->is_oplock_break(buf, server)) { | 1061 | } else if (!server->ops->is_oplock_break || |
| 1062 | !server->ops->is_oplock_break(buf, server)) { | ||
| 1062 | cERROR(1, "No task to wake, unknown frame received! " | 1063 | cERROR(1, "No task to wake, unknown frame received! " |
| 1063 | "NumMids %d", atomic_read(&midCount)); | 1064 | "NumMids %d", atomic_read(&midCount)); |
| 1064 | cifs_dump_mem("Received Data is: ", buf, | 1065 | cifs_dump_mem("Received Data is: ", buf, |
| 1065 | HEADER_SIZE(server)); | 1066 | HEADER_SIZE(server)); |
| 1066 | #ifdef CONFIG_CIFS_DEBUG2 | 1067 | #ifdef CONFIG_CIFS_DEBUG2 |
| 1067 | server->ops->dump_detail(buf); | 1068 | if (server->ops->dump_detail) |
| 1069 | server->ops->dump_detail(buf); | ||
| 1068 | cifs_dump_mids(server); | 1070 | cifs_dump_mids(server); |
| 1069 | #endif /* CIFS_DEBUG2 */ | 1071 | #endif /* CIFS_DEBUG2 */ |
| 1070 | 1072 | ||
| @@ -3938,7 +3940,7 @@ CIFSTCon(unsigned int xid, struct cifs_ses *ses, | |||
| 3938 | header_assemble(smb_buffer, SMB_COM_TREE_CONNECT_ANDX, | 3940 | header_assemble(smb_buffer, SMB_COM_TREE_CONNECT_ANDX, |
| 3939 | NULL /*no tid */ , 4 /*wct */ ); | 3941 | NULL /*no tid */ , 4 /*wct */ ); |
| 3940 | 3942 | ||
| 3941 | smb_buffer->Mid = GetNextMid(ses->server); | 3943 | smb_buffer->Mid = get_next_mid(ses->server); |
| 3942 | smb_buffer->Uid = ses->Suid; | 3944 | smb_buffer->Uid = ses->Suid; |
| 3943 | pSMB = (TCONX_REQ *) smb_buffer; | 3945 | pSMB = (TCONX_REQ *) smb_buffer; |
| 3944 | pSMBr = (TCONX_RSP *) smb_buffer_response; | 3946 | pSMBr = (TCONX_RSP *) smb_buffer_response; |
diff --git a/fs/cifs/file.c b/fs/cifs/file.c index 253170dfa716..513adbc211d7 100644 --- a/fs/cifs/file.c +++ b/fs/cifs/file.c | |||
| @@ -876,7 +876,7 @@ cifs_push_mandatory_locks(struct cifsFileInfo *cfile) | |||
| 876 | struct cifsLockInfo *li, *tmp; | 876 | struct cifsLockInfo *li, *tmp; |
| 877 | struct cifs_tcon *tcon; | 877 | struct cifs_tcon *tcon; |
| 878 | struct cifsInodeInfo *cinode = CIFS_I(cfile->dentry->d_inode); | 878 | struct cifsInodeInfo *cinode = CIFS_I(cfile->dentry->d_inode); |
| 879 | unsigned int num, max_num; | 879 | unsigned int num, max_num, max_buf; |
| 880 | LOCKING_ANDX_RANGE *buf, *cur; | 880 | LOCKING_ANDX_RANGE *buf, *cur; |
| 881 | int types[] = {LOCKING_ANDX_LARGE_FILES, | 881 | int types[] = {LOCKING_ANDX_LARGE_FILES, |
| 882 | LOCKING_ANDX_SHARED_LOCK | LOCKING_ANDX_LARGE_FILES}; | 882 | LOCKING_ANDX_SHARED_LOCK | LOCKING_ANDX_LARGE_FILES}; |
| @@ -892,8 +892,19 @@ cifs_push_mandatory_locks(struct cifsFileInfo *cfile) | |||
| 892 | return rc; | 892 | return rc; |
| 893 | } | 893 | } |
| 894 | 894 | ||
| 895 | max_num = (tcon->ses->server->maxBuf - sizeof(struct smb_hdr)) / | 895 | /* |
| 896 | sizeof(LOCKING_ANDX_RANGE); | 896 | * Accessing maxBuf is racy with cifs_reconnect - need to store value |
| 897 | * and check it for zero before using. | ||
| 898 | */ | ||
| 899 | max_buf = tcon->ses->server->maxBuf; | ||
| 900 | if (!max_buf) { | ||
| 901 | mutex_unlock(&cinode->lock_mutex); | ||
| 902 | FreeXid(xid); | ||
| 903 | return -EINVAL; | ||
| 904 | } | ||
| 905 | |||
| 906 | max_num = (max_buf - sizeof(struct smb_hdr)) / | ||
| 907 | sizeof(LOCKING_ANDX_RANGE); | ||
| 897 | buf = kzalloc(max_num * sizeof(LOCKING_ANDX_RANGE), GFP_KERNEL); | 908 | buf = kzalloc(max_num * sizeof(LOCKING_ANDX_RANGE), GFP_KERNEL); |
| 898 | if (!buf) { | 909 | if (!buf) { |
| 899 | mutex_unlock(&cinode->lock_mutex); | 910 | mutex_unlock(&cinode->lock_mutex); |
| @@ -1218,7 +1229,7 @@ cifs_unlock_range(struct cifsFileInfo *cfile, struct file_lock *flock, int xid) | |||
| 1218 | int types[] = {LOCKING_ANDX_LARGE_FILES, | 1229 | int types[] = {LOCKING_ANDX_LARGE_FILES, |
| 1219 | LOCKING_ANDX_SHARED_LOCK | LOCKING_ANDX_LARGE_FILES}; | 1230 | LOCKING_ANDX_SHARED_LOCK | LOCKING_ANDX_LARGE_FILES}; |
| 1220 | unsigned int i; | 1231 | unsigned int i; |
| 1221 | unsigned int max_num, num; | 1232 | unsigned int max_num, num, max_buf; |
| 1222 | LOCKING_ANDX_RANGE *buf, *cur; | 1233 | LOCKING_ANDX_RANGE *buf, *cur; |
| 1223 | struct cifs_tcon *tcon = tlink_tcon(cfile->tlink); | 1234 | struct cifs_tcon *tcon = tlink_tcon(cfile->tlink); |
| 1224 | struct cifsInodeInfo *cinode = CIFS_I(cfile->dentry->d_inode); | 1235 | struct cifsInodeInfo *cinode = CIFS_I(cfile->dentry->d_inode); |
| @@ -1228,8 +1239,16 @@ cifs_unlock_range(struct cifsFileInfo *cfile, struct file_lock *flock, int xid) | |||
| 1228 | 1239 | ||
| 1229 | INIT_LIST_HEAD(&tmp_llist); | 1240 | INIT_LIST_HEAD(&tmp_llist); |
| 1230 | 1241 | ||
| 1231 | max_num = (tcon->ses->server->maxBuf - sizeof(struct smb_hdr)) / | 1242 | /* |
| 1232 | sizeof(LOCKING_ANDX_RANGE); | 1243 | * Accessing maxBuf is racy with cifs_reconnect - need to store value |
| 1244 | * and check it for zero before using. | ||
| 1245 | */ | ||
| 1246 | max_buf = tcon->ses->server->maxBuf; | ||
| 1247 | if (!max_buf) | ||
| 1248 | return -EINVAL; | ||
| 1249 | |||
| 1250 | max_num = (max_buf - sizeof(struct smb_hdr)) / | ||
| 1251 | sizeof(LOCKING_ANDX_RANGE); | ||
| 1233 | buf = kzalloc(max_num * sizeof(LOCKING_ANDX_RANGE), GFP_KERNEL); | 1252 | buf = kzalloc(max_num * sizeof(LOCKING_ANDX_RANGE), GFP_KERNEL); |
| 1234 | if (!buf) | 1253 | if (!buf) |
| 1235 | return -ENOMEM; | 1254 | return -ENOMEM; |
| @@ -1247,46 +1266,7 @@ cifs_unlock_range(struct cifsFileInfo *cfile, struct file_lock *flock, int xid) | |||
| 1247 | continue; | 1266 | continue; |
| 1248 | if (types[i] != li->type) | 1267 | if (types[i] != li->type) |
| 1249 | continue; | 1268 | continue; |
| 1250 | if (!cinode->can_cache_brlcks) { | 1269 | if (cinode->can_cache_brlcks) { |
| 1251 | cur->Pid = cpu_to_le16(li->pid); | ||
| 1252 | cur->LengthLow = cpu_to_le32((u32)li->length); | ||
| 1253 | cur->LengthHigh = | ||
| 1254 | cpu_to_le32((u32)(li->length>>32)); | ||
| 1255 | cur->OffsetLow = cpu_to_le32((u32)li->offset); | ||
| 1256 | cur->OffsetHigh = | ||
| 1257 | cpu_to_le32((u32)(li->offset>>32)); | ||
| 1258 | /* | ||
| 1259 | * We need to save a lock here to let us add | ||
| 1260 | * it again to the file's list if the unlock | ||
| 1261 | * range request fails on the server. | ||
| 1262 | */ | ||
| 1263 | list_move(&li->llist, &tmp_llist); | ||
| 1264 | if (++num == max_num) { | ||
| 1265 | stored_rc = cifs_lockv(xid, tcon, | ||
| 1266 | cfile->netfid, | ||
| 1267 | li->type, num, | ||
| 1268 | 0, buf); | ||
| 1269 | if (stored_rc) { | ||
| 1270 | /* | ||
| 1271 | * We failed on the unlock range | ||
| 1272 | * request - add all locks from | ||
| 1273 | * the tmp list to the head of | ||
| 1274 | * the file's list. | ||
| 1275 | */ | ||
| 1276 | cifs_move_llist(&tmp_llist, | ||
| 1277 | &cfile->llist); | ||
| 1278 | rc = stored_rc; | ||
| 1279 | } else | ||
| 1280 | /* | ||
| 1281 | * The unlock range request | ||
| 1282 | * succeed - free the tmp list. | ||
| 1283 | */ | ||
| 1284 | cifs_free_llist(&tmp_llist); | ||
| 1285 | cur = buf; | ||
| 1286 | num = 0; | ||
| 1287 | } else | ||
| 1288 | cur++; | ||
| 1289 | } else { | ||
| 1290 | /* | 1270 | /* |
| 1291 | * We can cache brlock requests - simply remove | 1271 | * We can cache brlock requests - simply remove |
| 1292 | * a lock from the file's list. | 1272 | * a lock from the file's list. |
| @@ -1294,7 +1274,41 @@ cifs_unlock_range(struct cifsFileInfo *cfile, struct file_lock *flock, int xid) | |||
| 1294 | list_del(&li->llist); | 1274 | list_del(&li->llist); |
| 1295 | cifs_del_lock_waiters(li); | 1275 | cifs_del_lock_waiters(li); |
| 1296 | kfree(li); | 1276 | kfree(li); |
| 1277 | continue; | ||
| 1297 | } | 1278 | } |
| 1279 | cur->Pid = cpu_to_le16(li->pid); | ||
| 1280 | cur->LengthLow = cpu_to_le32((u32)li->length); | ||
| 1281 | cur->LengthHigh = cpu_to_le32((u32)(li->length>>32)); | ||
| 1282 | cur->OffsetLow = cpu_to_le32((u32)li->offset); | ||
| 1283 | cur->OffsetHigh = cpu_to_le32((u32)(li->offset>>32)); | ||
| 1284 | /* | ||
| 1285 | * We need to save a lock here to let us add it again to | ||
| 1286 | * the file's list if the unlock range request fails on | ||
| 1287 | * the server. | ||
| 1288 | */ | ||
| 1289 | list_move(&li->llist, &tmp_llist); | ||
| 1290 | if (++num == max_num) { | ||
| 1291 | stored_rc = cifs_lockv(xid, tcon, cfile->netfid, | ||
| 1292 | li->type, num, 0, buf); | ||
| 1293 | if (stored_rc) { | ||
| 1294 | /* | ||
| 1295 | * We failed on the unlock range | ||
| 1296 | * request - add all locks from the tmp | ||
| 1297 | * list to the head of the file's list. | ||
| 1298 | */ | ||
| 1299 | cifs_move_llist(&tmp_llist, | ||
| 1300 | &cfile->llist); | ||
| 1301 | rc = stored_rc; | ||
| 1302 | } else | ||
| 1303 | /* | ||
| 1304 | * The unlock range request succeed - | ||
| 1305 | * free the tmp list. | ||
| 1306 | */ | ||
| 1307 | cifs_free_llist(&tmp_llist); | ||
| 1308 | cur = buf; | ||
| 1309 | num = 0; | ||
| 1310 | } else | ||
| 1311 | cur++; | ||
| 1298 | } | 1312 | } |
| 1299 | if (num) { | 1313 | if (num) { |
| 1300 | stored_rc = cifs_lockv(xid, tcon, cfile->netfid, | 1314 | stored_rc = cifs_lockv(xid, tcon, cfile->netfid, |
diff --git a/fs/cifs/misc.c b/fs/cifs/misc.c index e2552d2b2e42..557506ae1e2a 100644 --- a/fs/cifs/misc.c +++ b/fs/cifs/misc.c | |||
| @@ -212,93 +212,6 @@ cifs_small_buf_release(void *buf_to_free) | |||
| 212 | return; | 212 | return; |
| 213 | } | 213 | } |
| 214 | 214 | ||
| 215 | /* | ||
| 216 | * Find a free multiplex id (SMB mid). Otherwise there could be | ||
| 217 | * mid collisions which might cause problems, demultiplexing the | ||
| 218 | * wrong response to this request. Multiplex ids could collide if | ||
| 219 | * one of a series requests takes much longer than the others, or | ||
| 220 | * if a very large number of long lived requests (byte range | ||
| 221 | * locks or FindNotify requests) are pending. No more than | ||
| 222 | * 64K-1 requests can be outstanding at one time. If no | ||
| 223 | * mids are available, return zero. A future optimization | ||
| 224 | * could make the combination of mids and uid the key we use | ||
| 225 | * to demultiplex on (rather than mid alone). | ||
| 226 | * In addition to the above check, the cifs demultiplex | ||
| 227 | * code already used the command code as a secondary | ||
| 228 | * check of the frame and if signing is negotiated the | ||
| 229 | * response would be discarded if the mid were the same | ||
| 230 | * but the signature was wrong. Since the mid is not put in the | ||
| 231 | * pending queue until later (when it is about to be dispatched) | ||
| 232 | * we do have to limit the number of outstanding requests | ||
| 233 | * to somewhat less than 64K-1 although it is hard to imagine | ||
| 234 | * so many threads being in the vfs at one time. | ||
| 235 | */ | ||
| 236 | __u64 GetNextMid(struct TCP_Server_Info *server) | ||
| 237 | { | ||
| 238 | __u64 mid = 0; | ||
| 239 | __u16 last_mid, cur_mid; | ||
| 240 | bool collision; | ||
| 241 | |||
| 242 | spin_lock(&GlobalMid_Lock); | ||
| 243 | |||
| 244 | /* mid is 16 bit only for CIFS/SMB */ | ||
| 245 | cur_mid = (__u16)((server->CurrentMid) & 0xffff); | ||
| 246 | /* we do not want to loop forever */ | ||
| 247 | last_mid = cur_mid; | ||
| 248 | cur_mid++; | ||
| 249 | |||
| 250 | /* | ||
| 251 | * This nested loop looks more expensive than it is. | ||
| 252 | * In practice the list of pending requests is short, | ||
| 253 | * fewer than 50, and the mids are likely to be unique | ||
| 254 | * on the first pass through the loop unless some request | ||
| 255 | * takes longer than the 64 thousand requests before it | ||
| 256 | * (and it would also have to have been a request that | ||
| 257 | * did not time out). | ||
| 258 | */ | ||
| 259 | while (cur_mid != last_mid) { | ||
| 260 | struct mid_q_entry *mid_entry; | ||
| 261 | unsigned int num_mids; | ||
| 262 | |||
| 263 | collision = false; | ||
| 264 | if (cur_mid == 0) | ||
| 265 | cur_mid++; | ||
| 266 | |||
| 267 | num_mids = 0; | ||
| 268 | list_for_each_entry(mid_entry, &server->pending_mid_q, qhead) { | ||
| 269 | ++num_mids; | ||
| 270 | if (mid_entry->mid == cur_mid && | ||
| 271 | mid_entry->mid_state == MID_REQUEST_SUBMITTED) { | ||
| 272 | /* This mid is in use, try a different one */ | ||
| 273 | collision = true; | ||
| 274 | break; | ||
| 275 | } | ||
| 276 | } | ||
| 277 | |||
| 278 | /* | ||
| 279 | * if we have more than 32k mids in the list, then something | ||
| 280 | * is very wrong. Possibly a local user is trying to DoS the | ||
| 281 | * box by issuing long-running calls and SIGKILL'ing them. If | ||
| 282 | * we get to 2^16 mids then we're in big trouble as this | ||
| 283 | * function could loop forever. | ||
| 284 | * | ||
| 285 | * Go ahead and assign out the mid in this situation, but force | ||
| 286 | * an eventual reconnect to clean out the pending_mid_q. | ||
| 287 | */ | ||
| 288 | if (num_mids > 32768) | ||
| 289 | server->tcpStatus = CifsNeedReconnect; | ||
| 290 | |||
| 291 | if (!collision) { | ||
| 292 | mid = (__u64)cur_mid; | ||
| 293 | server->CurrentMid = mid; | ||
| 294 | break; | ||
| 295 | } | ||
| 296 | cur_mid++; | ||
| 297 | } | ||
| 298 | spin_unlock(&GlobalMid_Lock); | ||
| 299 | return mid; | ||
| 300 | } | ||
| 301 | |||
| 302 | /* NB: MID can not be set if treeCon not passed in, in that | 215 | /* NB: MID can not be set if treeCon not passed in, in that |
| 303 | case it is responsbility of caller to set the mid */ | 216 | case it is responsbility of caller to set the mid */ |
| 304 | void | 217 | void |
| @@ -334,7 +247,7 @@ header_assemble(struct smb_hdr *buffer, char smb_command /* command */ , | |||
| 334 | 247 | ||
| 335 | /* Uid is not converted */ | 248 | /* Uid is not converted */ |
| 336 | buffer->Uid = treeCon->ses->Suid; | 249 | buffer->Uid = treeCon->ses->Suid; |
| 337 | buffer->Mid = GetNextMid(treeCon->ses->server); | 250 | buffer->Mid = get_next_mid(treeCon->ses->server); |
| 338 | } | 251 | } |
| 339 | if (treeCon->Flags & SMB_SHARE_IS_IN_DFS) | 252 | if (treeCon->Flags & SMB_SHARE_IS_IN_DFS) |
| 340 | buffer->Flags2 |= SMBFLG2_DFS; | 253 | buffer->Flags2 |= SMBFLG2_DFS; |
diff --git a/fs/cifs/smb1ops.c b/fs/cifs/smb1ops.c index d9d615fbed3f..6dec38f5522d 100644 --- a/fs/cifs/smb1ops.c +++ b/fs/cifs/smb1ops.c | |||
| @@ -125,6 +125,94 @@ cifs_get_credits_field(struct TCP_Server_Info *server) | |||
| 125 | return &server->credits; | 125 | return &server->credits; |
| 126 | } | 126 | } |
| 127 | 127 | ||
| 128 | /* | ||
| 129 | * Find a free multiplex id (SMB mid). Otherwise there could be | ||
| 130 | * mid collisions which might cause problems, demultiplexing the | ||
| 131 | * wrong response to this request. Multiplex ids could collide if | ||
| 132 | * one of a series requests takes much longer than the others, or | ||
| 133 | * if a very large number of long lived requests (byte range | ||
| 134 | * locks or FindNotify requests) are pending. No more than | ||
| 135 | * 64K-1 requests can be outstanding at one time. If no | ||
| 136 | * mids are available, return zero. A future optimization | ||
| 137 | * could make the combination of mids and uid the key we use | ||
| 138 | * to demultiplex on (rather than mid alone). | ||
| 139 | * In addition to the above check, the cifs demultiplex | ||
| 140 | * code already used the command code as a secondary | ||
| 141 | * check of the frame and if signing is negotiated the | ||
| 142 | * response would be discarded if the mid were the same | ||
| 143 | * but the signature was wrong. Since the mid is not put in the | ||
| 144 | * pending queue until later (when it is about to be dispatched) | ||
| 145 | * we do have to limit the number of outstanding requests | ||
| 146 | * to somewhat less than 64K-1 although it is hard to imagine | ||
| 147 | * so many threads being in the vfs at one time. | ||
| 148 | */ | ||
| 149 | static __u64 | ||
| 150 | cifs_get_next_mid(struct TCP_Server_Info *server) | ||
| 151 | { | ||
| 152 | __u64 mid = 0; | ||
| 153 | __u16 last_mid, cur_mid; | ||
| 154 | bool collision; | ||
| 155 | |||
| 156 | spin_lock(&GlobalMid_Lock); | ||
| 157 | |||
| 158 | /* mid is 16 bit only for CIFS/SMB */ | ||
| 159 | cur_mid = (__u16)((server->CurrentMid) & 0xffff); | ||
| 160 | /* we do not want to loop forever */ | ||
| 161 | last_mid = cur_mid; | ||
| 162 | cur_mid++; | ||
| 163 | |||
| 164 | /* | ||
| 165 | * This nested loop looks more expensive than it is. | ||
| 166 | * In practice the list of pending requests is short, | ||
| 167 | * fewer than 50, and the mids are likely to be unique | ||
| 168 | * on the first pass through the loop unless some request | ||
| 169 | * takes longer than the 64 thousand requests before it | ||
| 170 | * (and it would also have to have been a request that | ||
| 171 | * did not time out). | ||
| 172 | */ | ||
| 173 | while (cur_mid != last_mid) { | ||
| 174 | struct mid_q_entry *mid_entry; | ||
| 175 | unsigned int num_mids; | ||
| 176 | |||
| 177 | collision = false; | ||
| 178 | if (cur_mid == 0) | ||
| 179 | cur_mid++; | ||
| 180 | |||
| 181 | num_mids = 0; | ||
| 182 | list_for_each_entry(mid_entry, &server->pending_mid_q, qhead) { | ||
| 183 | ++num_mids; | ||
| 184 | if (mid_entry->mid == cur_mid && | ||
| 185 | mid_entry->mid_state == MID_REQUEST_SUBMITTED) { | ||
| 186 | /* This mid is in use, try a different one */ | ||
| 187 | collision = true; | ||
| 188 | break; | ||
| 189 | } | ||
| 190 | } | ||
| 191 | |||
| 192 | /* | ||
| 193 | * if we have more than 32k mids in the list, then something | ||
| 194 | * is very wrong. Possibly a local user is trying to DoS the | ||
| 195 | * box by issuing long-running calls and SIGKILL'ing them. If | ||
| 196 | * we get to 2^16 mids then we're in big trouble as this | ||
| 197 | * function could loop forever. | ||
| 198 | * | ||
| 199 | * Go ahead and assign out the mid in this situation, but force | ||
| 200 | * an eventual reconnect to clean out the pending_mid_q. | ||
| 201 | */ | ||
| 202 | if (num_mids > 32768) | ||
| 203 | server->tcpStatus = CifsNeedReconnect; | ||
| 204 | |||
| 205 | if (!collision) { | ||
| 206 | mid = (__u64)cur_mid; | ||
| 207 | server->CurrentMid = mid; | ||
| 208 | break; | ||
| 209 | } | ||
| 210 | cur_mid++; | ||
| 211 | } | ||
| 212 | spin_unlock(&GlobalMid_Lock); | ||
| 213 | return mid; | ||
| 214 | } | ||
| 215 | |||
| 128 | struct smb_version_operations smb1_operations = { | 216 | struct smb_version_operations smb1_operations = { |
| 129 | .send_cancel = send_nt_cancel, | 217 | .send_cancel = send_nt_cancel, |
| 130 | .compare_fids = cifs_compare_fids, | 218 | .compare_fids = cifs_compare_fids, |
| @@ -133,6 +221,7 @@ struct smb_version_operations smb1_operations = { | |||
| 133 | .add_credits = cifs_add_credits, | 221 | .add_credits = cifs_add_credits, |
| 134 | .set_credits = cifs_set_credits, | 222 | .set_credits = cifs_set_credits, |
| 135 | .get_credits_field = cifs_get_credits_field, | 223 | .get_credits_field = cifs_get_credits_field, |
| 224 | .get_next_mid = cifs_get_next_mid, | ||
| 136 | .read_data_offset = cifs_read_data_offset, | 225 | .read_data_offset = cifs_read_data_offset, |
| 137 | .read_data_length = cifs_read_data_length, | 226 | .read_data_length = cifs_read_data_length, |
| 138 | .map_error = map_smb_to_linux_error, | 227 | .map_error = map_smb_to_linux_error, |
diff --git a/fs/cifs/transport.c b/fs/cifs/transport.c index 1b36ffe6a47b..3097ee58fd7d 100644 --- a/fs/cifs/transport.c +++ b/fs/cifs/transport.c | |||
| @@ -779,7 +779,7 @@ send_lock_cancel(const unsigned int xid, struct cifs_tcon *tcon, | |||
| 779 | 779 | ||
| 780 | pSMB->LockType = LOCKING_ANDX_CANCEL_LOCK|LOCKING_ANDX_LARGE_FILES; | 780 | pSMB->LockType = LOCKING_ANDX_CANCEL_LOCK|LOCKING_ANDX_LARGE_FILES; |
| 781 | pSMB->Timeout = 0; | 781 | pSMB->Timeout = 0; |
| 782 | pSMB->hdr.Mid = GetNextMid(ses->server); | 782 | pSMB->hdr.Mid = get_next_mid(ses->server); |
| 783 | 783 | ||
| 784 | return SendReceive(xid, ses, in_buf, out_buf, | 784 | return SendReceive(xid, ses, in_buf, out_buf, |
| 785 | &bytes_returned, 0); | 785 | &bytes_returned, 0); |
