aboutsummaryrefslogtreecommitdiffstats
path: root/fs/cifs
diff options
context:
space:
mode:
Diffstat (limited to 'fs/cifs')
-rw-r--r--fs/cifs/README14
-rw-r--r--fs/cifs/cifs_debug.c9
-rw-r--r--fs/cifs/cifs_fs_sb.h4
-rw-r--r--fs/cifs/cifsacl.c347
-rw-r--r--fs/cifs/cifsencrypt.c105
-rw-r--r--fs/cifs/cifsfs.c24
-rw-r--r--fs/cifs/cifsfs.h4
-rw-r--r--fs/cifs/cifsglob.h55
-rw-r--r--fs/cifs/cifspdu.h48
-rw-r--r--fs/cifs/cifsproto.h48
-rw-r--r--fs/cifs/cifssmb.c454
-rw-r--r--fs/cifs/connect.c699
-rw-r--r--fs/cifs/dir.c22
-rw-r--r--fs/cifs/export.c4
-rw-r--r--fs/cifs/file.c1126
-rw-r--r--fs/cifs/inode.c54
-rw-r--r--fs/cifs/link.c17
-rw-r--r--fs/cifs/misc.c66
-rw-r--r--fs/cifs/sess.c4
-rw-r--r--fs/cifs/smbencrypt.c121
-rw-r--r--fs/cifs/transport.c19
-rw-r--r--fs/cifs/xattr.c42
22 files changed, 2248 insertions, 1038 deletions
diff --git a/fs/cifs/README b/fs/cifs/README
index c5c2c5e5f0f..895da1dc155 100644
--- a/fs/cifs/README
+++ b/fs/cifs/README
@@ -745,4 +745,18 @@ installed and something like the following lines should be added to the
745create cifs.spnego * * /usr/local/sbin/cifs.upcall %k 745create cifs.spnego * * /usr/local/sbin/cifs.upcall %k
746create dns_resolver * * /usr/local/sbin/cifs.upcall %k 746create dns_resolver * * /usr/local/sbin/cifs.upcall %k
747 747
748CIFS kernel module parameters
749=============================
750These module parameters can be specified or modified either during the time of
751module loading or during the runtime by using the interface
752 /proc/module/cifs/parameters/<param>
753
754i.e. echo "value" > /sys/module/cifs/parameters/<param>
755
7561. echo_retries - The number of echo attempts before giving up and
757 reconnecting to the server. The default is 5. The value 0
758 means never reconnect.
759
7602. enable_oplocks - Enable or disable oplocks. Oplocks are enabled by default.
761 [Y/y/1]. To disable use any of [N/n/0].
748 762
diff --git a/fs/cifs/cifs_debug.c b/fs/cifs/cifs_debug.c
index 6d40656e1e2..84e8c072470 100644
--- a/fs/cifs/cifs_debug.c
+++ b/fs/cifs/cifs_debug.c
@@ -511,7 +511,7 @@ static const struct file_operations cifsFYI_proc_fops = {
511 511
512static int cifs_oplock_proc_show(struct seq_file *m, void *v) 512static int cifs_oplock_proc_show(struct seq_file *m, void *v)
513{ 513{
514 seq_printf(m, "%d\n", oplockEnabled); 514 seq_printf(m, "%d\n", enable_oplocks);
515 return 0; 515 return 0;
516} 516}
517 517
@@ -526,13 +526,16 @@ static ssize_t cifs_oplock_proc_write(struct file *file,
526 char c; 526 char c;
527 int rc; 527 int rc;
528 528
529 printk(KERN_WARNING "CIFS: The /proc/fs/cifs/OplockEnabled interface "
530 "will be removed in kernel version 3.4. Please migrate to "
531 "using the 'enable_oplocks' module parameter in cifs.ko.\n");
529 rc = get_user(c, buffer); 532 rc = get_user(c, buffer);
530 if (rc) 533 if (rc)
531 return rc; 534 return rc;
532 if (c == '0' || c == 'n' || c == 'N') 535 if (c == '0' || c == 'n' || c == 'N')
533 oplockEnabled = 0; 536 enable_oplocks = false;
534 else if (c == '1' || c == 'y' || c == 'Y') 537 else if (c == '1' || c == 'y' || c == 'Y')
535 oplockEnabled = 1; 538 enable_oplocks = true;
536 539
537 return count; 540 return count;
538} 541}
diff --git a/fs/cifs/cifs_fs_sb.h b/fs/cifs/cifs_fs_sb.h
index 7260e11e21f..500d6585927 100644
--- a/fs/cifs/cifs_fs_sb.h
+++ b/fs/cifs/cifs_fs_sb.h
@@ -43,6 +43,8 @@
43#define CIFS_MOUNT_STRICT_IO 0x40000 /* strict cache mode */ 43#define CIFS_MOUNT_STRICT_IO 0x40000 /* strict cache mode */
44#define CIFS_MOUNT_RWPIDFORWARD 0x80000 /* use pid forwarding for rw */ 44#define CIFS_MOUNT_RWPIDFORWARD 0x80000 /* use pid forwarding for rw */
45#define CIFS_MOUNT_POSIXACL 0x100000 /* mirror of MS_POSIXACL in mnt_cifs_flags */ 45#define CIFS_MOUNT_POSIXACL 0x100000 /* mirror of MS_POSIXACL in mnt_cifs_flags */
46#define CIFS_MOUNT_CIFS_BACKUPUID 0x200000 /* backup intent bit for a user */
47#define CIFS_MOUNT_CIFS_BACKUPGID 0x400000 /* backup intent bit for a group */
46 48
47struct cifs_sb_info { 49struct cifs_sb_info {
48 struct rb_root tlink_tree; 50 struct rb_root tlink_tree;
@@ -55,6 +57,8 @@ struct cifs_sb_info {
55 atomic_t active; 57 atomic_t active;
56 uid_t mnt_uid; 58 uid_t mnt_uid;
57 gid_t mnt_gid; 59 gid_t mnt_gid;
60 uid_t mnt_backupuid;
61 gid_t mnt_backupgid;
58 mode_t mnt_file_mode; 62 mode_t mnt_file_mode;
59 mode_t mnt_dir_mode; 63 mode_t mnt_dir_mode;
60 unsigned int mnt_cifs_flags; 64 unsigned int mnt_cifs_flags;
diff --git a/fs/cifs/cifsacl.c b/fs/cifs/cifsacl.c
index d0f59faefb7..72ddf23ef6f 100644
--- a/fs/cifs/cifsacl.c
+++ b/fs/cifs/cifsacl.c
@@ -91,9 +91,76 @@ cifs_idmap_shrinker(struct shrinker *shrink, struct shrink_control *sc)
91 shrink_idmap_tree(root, nr_to_scan, &nr_rem, &nr_del); 91 shrink_idmap_tree(root, nr_to_scan, &nr_rem, &nr_del);
92 spin_unlock(&sidgidlock); 92 spin_unlock(&sidgidlock);
93 93
94 root = &siduidtree;
95 spin_lock(&uidsidlock);
96 shrink_idmap_tree(root, nr_to_scan, &nr_rem, &nr_del);
97 spin_unlock(&uidsidlock);
98
99 root = &sidgidtree;
100 spin_lock(&gidsidlock);
101 shrink_idmap_tree(root, nr_to_scan, &nr_rem, &nr_del);
102 spin_unlock(&gidsidlock);
103
94 return nr_rem; 104 return nr_rem;
95} 105}
96 106
107static void
108sid_rb_insert(struct rb_root *root, unsigned long cid,
109 struct cifs_sid_id **psidid, char *typestr)
110{
111 char *strptr;
112 struct rb_node *node = root->rb_node;
113 struct rb_node *parent = NULL;
114 struct rb_node **linkto = &(root->rb_node);
115 struct cifs_sid_id *lsidid;
116
117 while (node) {
118 lsidid = rb_entry(node, struct cifs_sid_id, rbnode);
119 parent = node;
120 if (cid > lsidid->id) {
121 linkto = &(node->rb_left);
122 node = node->rb_left;
123 }
124 if (cid < lsidid->id) {
125 linkto = &(node->rb_right);
126 node = node->rb_right;
127 }
128 }
129
130 (*psidid)->id = cid;
131 (*psidid)->time = jiffies - (SID_MAP_RETRY + 1);
132 (*psidid)->refcount = 0;
133
134 sprintf((*psidid)->sidstr, "%s", typestr);
135 strptr = (*psidid)->sidstr + strlen((*psidid)->sidstr);
136 sprintf(strptr, "%ld", cid);
137
138 clear_bit(SID_ID_PENDING, &(*psidid)->state);
139 clear_bit(SID_ID_MAPPED, &(*psidid)->state);
140
141 rb_link_node(&(*psidid)->rbnode, parent, linkto);
142 rb_insert_color(&(*psidid)->rbnode, root);
143}
144
145static struct cifs_sid_id *
146sid_rb_search(struct rb_root *root, unsigned long cid)
147{
148 struct rb_node *node = root->rb_node;
149 struct cifs_sid_id *lsidid;
150
151 while (node) {
152 lsidid = rb_entry(node, struct cifs_sid_id, rbnode);
153 if (cid > lsidid->id)
154 node = node->rb_left;
155 else if (cid < lsidid->id)
156 node = node->rb_right;
157 else /* node found */
158 return lsidid;
159 }
160
161 return NULL;
162}
163
97static struct shrinker cifs_shrinker = { 164static struct shrinker cifs_shrinker = {
98 .shrink = cifs_idmap_shrinker, 165 .shrink = cifs_idmap_shrinker,
99 .seeks = DEFAULT_SEEKS, 166 .seeks = DEFAULT_SEEKS,
@@ -110,6 +177,7 @@ cifs_idmap_key_instantiate(struct key *key, const void *data, size_t datalen)
110 177
111 memcpy(payload, data, datalen); 178 memcpy(payload, data, datalen);
112 key->payload.data = payload; 179 key->payload.data = payload;
180 key->datalen = datalen;
113 return 0; 181 return 0;
114} 182}
115 183
@@ -224,6 +292,120 @@ sidid_pending_wait(void *unused)
224} 292}
225 293
226static int 294static int
295id_to_sid(unsigned long cid, uint sidtype, struct cifs_sid *ssid)
296{
297 int rc = 0;
298 struct key *sidkey;
299 const struct cred *saved_cred;
300 struct cifs_sid *lsid;
301 struct cifs_sid_id *psidid, *npsidid;
302 struct rb_root *cidtree;
303 spinlock_t *cidlock;
304
305 if (sidtype == SIDOWNER) {
306 cidlock = &siduidlock;
307 cidtree = &uidtree;
308 } else if (sidtype == SIDGROUP) {
309 cidlock = &sidgidlock;
310 cidtree = &gidtree;
311 } else
312 return -EINVAL;
313
314 spin_lock(cidlock);
315 psidid = sid_rb_search(cidtree, cid);
316
317 if (!psidid) { /* node does not exist, allocate one & attempt adding */
318 spin_unlock(cidlock);
319 npsidid = kzalloc(sizeof(struct cifs_sid_id), GFP_KERNEL);
320 if (!npsidid)
321 return -ENOMEM;
322
323 npsidid->sidstr = kmalloc(SIDLEN, GFP_KERNEL);
324 if (!npsidid->sidstr) {
325 kfree(npsidid);
326 return -ENOMEM;
327 }
328
329 spin_lock(cidlock);
330 psidid = sid_rb_search(cidtree, cid);
331 if (psidid) { /* node happened to get inserted meanwhile */
332 ++psidid->refcount;
333 spin_unlock(cidlock);
334 kfree(npsidid->sidstr);
335 kfree(npsidid);
336 } else {
337 psidid = npsidid;
338 sid_rb_insert(cidtree, cid, &psidid,
339 sidtype == SIDOWNER ? "oi:" : "gi:");
340 ++psidid->refcount;
341 spin_unlock(cidlock);
342 }
343 } else {
344 ++psidid->refcount;
345 spin_unlock(cidlock);
346 }
347
348 /*
349 * If we are here, it is safe to access psidid and its fields
350 * since a reference was taken earlier while holding the spinlock.
351 * A reference on the node is put without holding the spinlock
352 * and it is OK to do so in this case, shrinker will not erase
353 * this node until all references are put and we do not access
354 * any fields of the node after a reference is put .
355 */
356 if (test_bit(SID_ID_MAPPED, &psidid->state)) {
357 memcpy(ssid, &psidid->sid, sizeof(struct cifs_sid));
358 psidid->time = jiffies; /* update ts for accessing */
359 goto id_sid_out;
360 }
361
362 if (time_after(psidid->time + SID_MAP_RETRY, jiffies)) {
363 rc = -EINVAL;
364 goto id_sid_out;
365 }
366
367 if (!test_and_set_bit(SID_ID_PENDING, &psidid->state)) {
368 saved_cred = override_creds(root_cred);
369 sidkey = request_key(&cifs_idmap_key_type, psidid->sidstr, "");
370 if (IS_ERR(sidkey)) {
371 rc = -EINVAL;
372 cFYI(1, "%s: Can't map and id to a SID", __func__);
373 } else {
374 lsid = (struct cifs_sid *)sidkey->payload.data;
375 memcpy(&psidid->sid, lsid,
376 sidkey->datalen < sizeof(struct cifs_sid) ?
377 sidkey->datalen : sizeof(struct cifs_sid));
378 memcpy(ssid, &psidid->sid,
379 sidkey->datalen < sizeof(struct cifs_sid) ?
380 sidkey->datalen : sizeof(struct cifs_sid));
381 set_bit(SID_ID_MAPPED, &psidid->state);
382 key_put(sidkey);
383 kfree(psidid->sidstr);
384 }
385 psidid->time = jiffies; /* update ts for accessing */
386 revert_creds(saved_cred);
387 clear_bit(SID_ID_PENDING, &psidid->state);
388 wake_up_bit(&psidid->state, SID_ID_PENDING);
389 } else {
390 rc = wait_on_bit(&psidid->state, SID_ID_PENDING,
391 sidid_pending_wait, TASK_INTERRUPTIBLE);
392 if (rc) {
393 cFYI(1, "%s: sidid_pending_wait interrupted %d",
394 __func__, rc);
395 --psidid->refcount;
396 return rc;
397 }
398 if (test_bit(SID_ID_MAPPED, &psidid->state))
399 memcpy(ssid, &psidid->sid, sizeof(struct cifs_sid));
400 else
401 rc = -EINVAL;
402 }
403id_sid_out:
404 --psidid->refcount;
405 return rc;
406}
407
408static int
227sid_to_id(struct cifs_sb_info *cifs_sb, struct cifs_sid *psid, 409sid_to_id(struct cifs_sb_info *cifs_sb, struct cifs_sid *psid,
228 struct cifs_fattr *fattr, uint sidtype) 410 struct cifs_fattr *fattr, uint sidtype)
229{ 411{
@@ -383,6 +565,10 @@ init_cifs_idmap(void)
383 spin_lock_init(&sidgidlock); 565 spin_lock_init(&sidgidlock);
384 gidtree = RB_ROOT; 566 gidtree = RB_ROOT;
385 567
568 spin_lock_init(&uidsidlock);
569 siduidtree = RB_ROOT;
570 spin_lock_init(&gidsidlock);
571 sidgidtree = RB_ROOT;
386 register_shrinker(&cifs_shrinker); 572 register_shrinker(&cifs_shrinker);
387 573
388 cFYI(1, "cifs idmap keyring: %d\n", key_serial(keyring)); 574 cFYI(1, "cifs idmap keyring: %d\n", key_serial(keyring));
@@ -422,6 +608,18 @@ cifs_destroy_idmaptrees(void)
422 while ((node = rb_first(root))) 608 while ((node = rb_first(root)))
423 rb_erase(node, root); 609 rb_erase(node, root);
424 spin_unlock(&sidgidlock); 610 spin_unlock(&sidgidlock);
611
612 root = &siduidtree;
613 spin_lock(&uidsidlock);
614 while ((node = rb_first(root)))
615 rb_erase(node, root);
616 spin_unlock(&uidsidlock);
617
618 root = &sidgidtree;
619 spin_lock(&gidsidlock);
620 while ((node = rb_first(root)))
621 rb_erase(node, root);
622 spin_unlock(&gidsidlock);
425} 623}
426 624
427/* if the two SIDs (roughly equivalent to a UUID for a user or group) are 625/* if the two SIDs (roughly equivalent to a UUID for a user or group) are
@@ -706,7 +904,7 @@ static void parse_dacl(struct cifs_acl *pdacl, char *end_of_acl,
706 acl_size = sizeof(struct cifs_acl); 904 acl_size = sizeof(struct cifs_acl);
707 905
708 num_aces = le32_to_cpu(pdacl->num_aces); 906 num_aces = le32_to_cpu(pdacl->num_aces);
709 if (num_aces > 0) { 907 if (num_aces > 0) {
710 umode_t user_mask = S_IRWXU; 908 umode_t user_mask = S_IRWXU;
711 umode_t group_mask = S_IRWXG; 909 umode_t group_mask = S_IRWXG;
712 umode_t other_mask = S_IRWXU | S_IRWXG | S_IRWXO; 910 umode_t other_mask = S_IRWXU | S_IRWXG | S_IRWXO;
@@ -868,52 +1066,82 @@ static int parse_sec_desc(struct cifs_sb_info *cifs_sb,
868 else 1066 else
869 cFYI(1, "no ACL"); /* BB grant all or default perms? */ 1067 cFYI(1, "no ACL"); /* BB grant all or default perms? */
870 1068
871/* cifscred->uid = owner_sid_ptr->rid;
872 cifscred->gid = group_sid_ptr->rid;
873 memcpy((void *)(&(cifscred->osid)), (void *)owner_sid_ptr,
874 sizeof(struct cifs_sid));
875 memcpy((void *)(&(cifscred->gsid)), (void *)group_sid_ptr,
876 sizeof(struct cifs_sid)); */
877
878 return rc; 1069 return rc;
879} 1070}
880 1071
881
882/* Convert permission bits from mode to equivalent CIFS ACL */ 1072/* Convert permission bits from mode to equivalent CIFS ACL */
883static int build_sec_desc(struct cifs_ntsd *pntsd, struct cifs_ntsd *pnntsd, 1073static int build_sec_desc(struct cifs_ntsd *pntsd, struct cifs_ntsd *pnntsd,
884 struct inode *inode, __u64 nmode) 1074 __u32 secdesclen, __u64 nmode, uid_t uid, gid_t gid, int *aclflag)
885{ 1075{
886 int rc = 0; 1076 int rc = 0;
887 __u32 dacloffset; 1077 __u32 dacloffset;
888 __u32 ndacloffset; 1078 __u32 ndacloffset;
889 __u32 sidsoffset; 1079 __u32 sidsoffset;
890 struct cifs_sid *owner_sid_ptr, *group_sid_ptr; 1080 struct cifs_sid *owner_sid_ptr, *group_sid_ptr;
1081 struct cifs_sid *nowner_sid_ptr, *ngroup_sid_ptr;
891 struct cifs_acl *dacl_ptr = NULL; /* no need for SACL ptr */ 1082 struct cifs_acl *dacl_ptr = NULL; /* no need for SACL ptr */
892 struct cifs_acl *ndacl_ptr = NULL; /* no need for SACL ptr */ 1083 struct cifs_acl *ndacl_ptr = NULL; /* no need for SACL ptr */
893 1084
894 if ((inode == NULL) || (pntsd == NULL) || (pnntsd == NULL)) 1085 if (nmode != NO_CHANGE_64) { /* chmod */
895 return -EIO; 1086 owner_sid_ptr = (struct cifs_sid *)((char *)pntsd +
896
897 owner_sid_ptr = (struct cifs_sid *)((char *)pntsd +
898 le32_to_cpu(pntsd->osidoffset)); 1087 le32_to_cpu(pntsd->osidoffset));
899 group_sid_ptr = (struct cifs_sid *)((char *)pntsd + 1088 group_sid_ptr = (struct cifs_sid *)((char *)pntsd +
900 le32_to_cpu(pntsd->gsidoffset)); 1089 le32_to_cpu(pntsd->gsidoffset));
901 1090 dacloffset = le32_to_cpu(pntsd->dacloffset);
902 dacloffset = le32_to_cpu(pntsd->dacloffset); 1091 dacl_ptr = (struct cifs_acl *)((char *)pntsd + dacloffset);
903 dacl_ptr = (struct cifs_acl *)((char *)pntsd + dacloffset); 1092 ndacloffset = sizeof(struct cifs_ntsd);
904 1093 ndacl_ptr = (struct cifs_acl *)((char *)pnntsd + ndacloffset);
905 ndacloffset = sizeof(struct cifs_ntsd); 1094 ndacl_ptr->revision = dacl_ptr->revision;
906 ndacl_ptr = (struct cifs_acl *)((char *)pnntsd + ndacloffset); 1095 ndacl_ptr->size = 0;
907 ndacl_ptr->revision = dacl_ptr->revision; 1096 ndacl_ptr->num_aces = 0;
908 ndacl_ptr->size = 0; 1097
909 ndacl_ptr->num_aces = 0; 1098 rc = set_chmod_dacl(ndacl_ptr, owner_sid_ptr, group_sid_ptr,
910 1099 nmode);
911 rc = set_chmod_dacl(ndacl_ptr, owner_sid_ptr, group_sid_ptr, nmode); 1100 sidsoffset = ndacloffset + le16_to_cpu(ndacl_ptr->size);
912 1101 /* copy sec desc control portion & owner and group sids */
913 sidsoffset = ndacloffset + le16_to_cpu(ndacl_ptr->size); 1102 copy_sec_desc(pntsd, pnntsd, sidsoffset);
914 1103 *aclflag = CIFS_ACL_DACL;
915 /* copy security descriptor control portion and owner and group sid */ 1104 } else {
916 copy_sec_desc(pntsd, pnntsd, sidsoffset); 1105 memcpy(pnntsd, pntsd, secdesclen);
1106 if (uid != NO_CHANGE_32) { /* chown */
1107 owner_sid_ptr = (struct cifs_sid *)((char *)pnntsd +
1108 le32_to_cpu(pnntsd->osidoffset));
1109 nowner_sid_ptr = kmalloc(sizeof(struct cifs_sid),
1110 GFP_KERNEL);
1111 if (!nowner_sid_ptr)
1112 return -ENOMEM;
1113 rc = id_to_sid(uid, SIDOWNER, nowner_sid_ptr);
1114 if (rc) {
1115 cFYI(1, "%s: Mapping error %d for owner id %d",
1116 __func__, rc, uid);
1117 kfree(nowner_sid_ptr);
1118 return rc;
1119 }
1120 memcpy(owner_sid_ptr, nowner_sid_ptr,
1121 sizeof(struct cifs_sid));
1122 kfree(nowner_sid_ptr);
1123 *aclflag = CIFS_ACL_OWNER;
1124 }
1125 if (gid != NO_CHANGE_32) { /* chgrp */
1126 group_sid_ptr = (struct cifs_sid *)((char *)pnntsd +
1127 le32_to_cpu(pnntsd->gsidoffset));
1128 ngroup_sid_ptr = kmalloc(sizeof(struct cifs_sid),
1129 GFP_KERNEL);
1130 if (!ngroup_sid_ptr)
1131 return -ENOMEM;
1132 rc = id_to_sid(gid, SIDGROUP, ngroup_sid_ptr);
1133 if (rc) {
1134 cFYI(1, "%s: Mapping error %d for group id %d",
1135 __func__, rc, gid);
1136 kfree(ngroup_sid_ptr);
1137 return rc;
1138 }
1139 memcpy(group_sid_ptr, ngroup_sid_ptr,
1140 sizeof(struct cifs_sid));
1141 kfree(ngroup_sid_ptr);
1142 *aclflag = CIFS_ACL_GROUP;
1143 }
1144 }
917 1145
918 return rc; 1146 return rc;
919} 1147}
@@ -945,7 +1173,7 @@ static struct cifs_ntsd *get_cifs_acl_by_path(struct cifs_sb_info *cifs_sb,
945{ 1173{
946 struct cifs_ntsd *pntsd = NULL; 1174 struct cifs_ntsd *pntsd = NULL;
947 int oplock = 0; 1175 int oplock = 0;
948 int xid, rc; 1176 int xid, rc, create_options = 0;
949 __u16 fid; 1177 __u16 fid;
950 struct cifs_tcon *tcon; 1178 struct cifs_tcon *tcon;
951 struct tcon_link *tlink = cifs_sb_tlink(cifs_sb); 1179 struct tcon_link *tlink = cifs_sb_tlink(cifs_sb);
@@ -956,9 +1184,12 @@ static struct cifs_ntsd *get_cifs_acl_by_path(struct cifs_sb_info *cifs_sb,
956 tcon = tlink_tcon(tlink); 1184 tcon = tlink_tcon(tlink);
957 xid = GetXid(); 1185 xid = GetXid();
958 1186
959 rc = CIFSSMBOpen(xid, tcon, path, FILE_OPEN, READ_CONTROL, 0, 1187 if (backup_cred(cifs_sb))
960 &fid, &oplock, NULL, cifs_sb->local_nls, 1188 create_options |= CREATE_OPEN_BACKUP_INTENT;
961 cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MAP_SPECIAL_CHR); 1189
1190 rc = CIFSSMBOpen(xid, tcon, path, FILE_OPEN, READ_CONTROL,
1191 create_options, &fid, &oplock, NULL, cifs_sb->local_nls,
1192 cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MAP_SPECIAL_CHR);
962 if (!rc) { 1193 if (!rc) {
963 rc = CIFSSMBGetCIFSACL(xid, tcon, fid, &pntsd, pacllen); 1194 rc = CIFSSMBGetCIFSACL(xid, tcon, fid, &pntsd, pacllen);
964 CIFSSMBClose(xid, tcon, fid); 1195 CIFSSMBClose(xid, tcon, fid);
@@ -991,13 +1222,15 @@ struct cifs_ntsd *get_cifs_acl(struct cifs_sb_info *cifs_sb,
991 return pntsd; 1222 return pntsd;
992} 1223}
993 1224
994static int set_cifs_acl_by_path(struct cifs_sb_info *cifs_sb, const char *path, 1225 /* Set an ACL on the server */
995 struct cifs_ntsd *pnntsd, u32 acllen) 1226int set_cifs_acl(struct cifs_ntsd *pnntsd, __u32 acllen,
1227 struct inode *inode, const char *path, int aclflag)
996{ 1228{
997 int oplock = 0; 1229 int oplock = 0;
998 int xid, rc; 1230 int xid, rc, access_flags, create_options = 0;
999 __u16 fid; 1231 __u16 fid;
1000 struct cifs_tcon *tcon; 1232 struct cifs_tcon *tcon;
1233 struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
1001 struct tcon_link *tlink = cifs_sb_tlink(cifs_sb); 1234 struct tcon_link *tlink = cifs_sb_tlink(cifs_sb);
1002 1235
1003 if (IS_ERR(tlink)) 1236 if (IS_ERR(tlink))
@@ -1006,15 +1239,23 @@ static int set_cifs_acl_by_path(struct cifs_sb_info *cifs_sb, const char *path,
1006 tcon = tlink_tcon(tlink); 1239 tcon = tlink_tcon(tlink);
1007 xid = GetXid(); 1240 xid = GetXid();
1008 1241
1009 rc = CIFSSMBOpen(xid, tcon, path, FILE_OPEN, WRITE_DAC, 0, 1242 if (backup_cred(cifs_sb))
1010 &fid, &oplock, NULL, cifs_sb->local_nls, 1243 create_options |= CREATE_OPEN_BACKUP_INTENT;
1011 cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MAP_SPECIAL_CHR); 1244
1245 if (aclflag == CIFS_ACL_OWNER || aclflag == CIFS_ACL_GROUP)
1246 access_flags = WRITE_OWNER;
1247 else
1248 access_flags = WRITE_DAC;
1249
1250 rc = CIFSSMBOpen(xid, tcon, path, FILE_OPEN, access_flags,
1251 create_options, &fid, &oplock, NULL, cifs_sb->local_nls,
1252 cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MAP_SPECIAL_CHR);
1012 if (rc) { 1253 if (rc) {
1013 cERROR(1, "Unable to open file to set ACL"); 1254 cERROR(1, "Unable to open file to set ACL");
1014 goto out; 1255 goto out;
1015 } 1256 }
1016 1257
1017 rc = CIFSSMBSetCIFSACL(xid, tcon, fid, pnntsd, acllen); 1258 rc = CIFSSMBSetCIFSACL(xid, tcon, fid, pnntsd, acllen, aclflag);
1018 cFYI(DBG2, "SetCIFSACL rc = %d", rc); 1259 cFYI(DBG2, "SetCIFSACL rc = %d", rc);
1019 1260
1020 CIFSSMBClose(xid, tcon, fid); 1261 CIFSSMBClose(xid, tcon, fid);
@@ -1024,17 +1265,6 @@ out:
1024 return rc; 1265 return rc;
1025} 1266}
1026 1267
1027/* Set an ACL on the server */
1028int set_cifs_acl(struct cifs_ntsd *pnntsd, __u32 acllen,
1029 struct inode *inode, const char *path)
1030{
1031 struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
1032
1033 cFYI(DBG2, "set ACL for %s from mode 0x%x", path, inode->i_mode);
1034
1035 return set_cifs_acl_by_path(cifs_sb, path, pnntsd, acllen);
1036}
1037
1038/* Translate the CIFS ACL (simlar to NTFS ACL) for a file into mode bits */ 1268/* Translate the CIFS ACL (simlar to NTFS ACL) for a file into mode bits */
1039int 1269int
1040cifs_acl_to_fattr(struct cifs_sb_info *cifs_sb, struct cifs_fattr *fattr, 1270cifs_acl_to_fattr(struct cifs_sb_info *cifs_sb, struct cifs_fattr *fattr,
@@ -1066,9 +1296,12 @@ cifs_acl_to_fattr(struct cifs_sb_info *cifs_sb, struct cifs_fattr *fattr,
1066} 1296}
1067 1297
1068/* Convert mode bits to an ACL so we can update the ACL on the server */ 1298/* Convert mode bits to an ACL so we can update the ACL on the server */
1069int mode_to_cifs_acl(struct inode *inode, const char *path, __u64 nmode) 1299int
1300id_mode_to_cifs_acl(struct inode *inode, const char *path, __u64 nmode,
1301 uid_t uid, gid_t gid)
1070{ 1302{
1071 int rc = 0; 1303 int rc = 0;
1304 int aclflag = CIFS_ACL_DACL; /* default flag to set */
1072 __u32 secdesclen = 0; 1305 __u32 secdesclen = 0;
1073 struct cifs_ntsd *pntsd = NULL; /* acl obtained from server */ 1306 struct cifs_ntsd *pntsd = NULL; /* acl obtained from server */
1074 struct cifs_ntsd *pnntsd = NULL; /* modified acl to be sent to server */ 1307 struct cifs_ntsd *pnntsd = NULL; /* modified acl to be sent to server */
@@ -1098,13 +1331,15 @@ int mode_to_cifs_acl(struct inode *inode, const char *path, __u64 nmode)
1098 return -ENOMEM; 1331 return -ENOMEM;
1099 } 1332 }
1100 1333
1101 rc = build_sec_desc(pntsd, pnntsd, inode, nmode); 1334 rc = build_sec_desc(pntsd, pnntsd, secdesclen, nmode, uid, gid,
1335 &aclflag);
1102 1336
1103 cFYI(DBG2, "build_sec_desc rc: %d", rc); 1337 cFYI(DBG2, "build_sec_desc rc: %d", rc);
1104 1338
1105 if (!rc) { 1339 if (!rc) {
1106 /* Set the security descriptor */ 1340 /* Set the security descriptor */
1107 rc = set_cifs_acl(pnntsd, secdesclen, inode, path); 1341 rc = set_cifs_acl(pnntsd, secdesclen, inode,
1342 path, aclflag);
1108 cFYI(DBG2, "set_cifs_acl rc: %d", rc); 1343 cFYI(DBG2, "set_cifs_acl rc: %d", rc);
1109 } 1344 }
1110 1345
diff --git a/fs/cifs/cifsencrypt.c b/fs/cifs/cifsencrypt.c
index 30acd22147e..2cfb695d1f8 100644
--- a/fs/cifs/cifsencrypt.c
+++ b/fs/cifs/cifsencrypt.c
@@ -37,83 +37,8 @@
37 * the sequence number before this function is called. Also, this function 37 * the sequence number before this function is called. Also, this function
38 * should be called with the server->srv_mutex held. 38 * should be called with the server->srv_mutex held.
39 */ 39 */
40static int cifs_calculate_signature(const struct smb_hdr *cifs_pdu, 40static int cifs_calc_signature(const struct kvec *iov, int n_vec,
41 struct TCP_Server_Info *server, char *signature) 41 struct TCP_Server_Info *server, char *signature)
42{
43 int rc;
44
45 if (cifs_pdu == NULL || signature == NULL || server == NULL)
46 return -EINVAL;
47
48 if (!server->secmech.sdescmd5) {
49 cERROR(1, "%s: Can't generate signature\n", __func__);
50 return -1;
51 }
52
53 rc = crypto_shash_init(&server->secmech.sdescmd5->shash);
54 if (rc) {
55 cERROR(1, "%s: Could not init md5\n", __func__);
56 return rc;
57 }
58
59 rc = crypto_shash_update(&server->secmech.sdescmd5->shash,
60 server->session_key.response, server->session_key.len);
61 if (rc) {
62 cERROR(1, "%s: Could not update with response\n", __func__);
63 return rc;
64 }
65
66 rc = crypto_shash_update(&server->secmech.sdescmd5->shash,
67 cifs_pdu->Protocol, be32_to_cpu(cifs_pdu->smb_buf_length));
68 if (rc) {
69 cERROR(1, "%s: Could not update with payload\n", __func__);
70 return rc;
71 }
72
73 rc = crypto_shash_final(&server->secmech.sdescmd5->shash, signature);
74 if (rc)
75 cERROR(1, "%s: Could not generate md5 hash\n", __func__);
76
77 return rc;
78}
79
80/* must be called with server->srv_mutex held */
81int cifs_sign_smb(struct smb_hdr *cifs_pdu, struct TCP_Server_Info *server,
82 __u32 *pexpected_response_sequence_number)
83{
84 int rc = 0;
85 char smb_signature[20];
86
87 if ((cifs_pdu == NULL) || (server == NULL))
88 return -EINVAL;
89
90 if (!(cifs_pdu->Flags2 & SMBFLG2_SECURITY_SIGNATURE) ||
91 server->tcpStatus == CifsNeedNegotiate)
92 return rc;
93
94 if (!server->session_estab) {
95 strncpy(cifs_pdu->Signature.SecuritySignature, "BSRSPYL", 8);
96 return rc;
97 }
98
99 cifs_pdu->Signature.Sequence.SequenceNumber =
100 cpu_to_le32(server->sequence_number);
101 cifs_pdu->Signature.Sequence.Reserved = 0;
102
103 *pexpected_response_sequence_number = server->sequence_number++;
104 server->sequence_number++;
105
106 rc = cifs_calculate_signature(cifs_pdu, server, smb_signature);
107 if (rc)
108 memset(cifs_pdu->Signature.SecuritySignature, 0, 8);
109 else
110 memcpy(cifs_pdu->Signature.SecuritySignature, smb_signature, 8);
111
112 return rc;
113}
114
115static int cifs_calc_signature2(const struct kvec *iov, int n_vec,
116 struct TCP_Server_Info *server, char *signature)
117{ 42{
118 int i; 43 int i;
119 int rc; 44 int rc;
@@ -179,7 +104,7 @@ int cifs_sign_smb2(struct kvec *iov, int n_vec, struct TCP_Server_Info *server,
179{ 104{
180 int rc = 0; 105 int rc = 0;
181 char smb_signature[20]; 106 char smb_signature[20];
182 struct smb_hdr *cifs_pdu = iov[0].iov_base; 107 struct smb_hdr *cifs_pdu = (struct smb_hdr *)iov[0].iov_base;
183 108
184 if ((cifs_pdu == NULL) || (server == NULL)) 109 if ((cifs_pdu == NULL) || (server == NULL))
185 return -EINVAL; 110 return -EINVAL;
@@ -189,7 +114,7 @@ int cifs_sign_smb2(struct kvec *iov, int n_vec, struct TCP_Server_Info *server,
189 return rc; 114 return rc;
190 115
191 if (!server->session_estab) { 116 if (!server->session_estab) {
192 strncpy(cifs_pdu->Signature.SecuritySignature, "BSRSPYL", 8); 117 memcpy(cifs_pdu->Signature.SecuritySignature, "BSRSPYL", 8);
193 return rc; 118 return rc;
194 } 119 }
195 120
@@ -200,7 +125,7 @@ int cifs_sign_smb2(struct kvec *iov, int n_vec, struct TCP_Server_Info *server,
200 *pexpected_response_sequence_number = server->sequence_number++; 125 *pexpected_response_sequence_number = server->sequence_number++;
201 server->sequence_number++; 126 server->sequence_number++;
202 127
203 rc = cifs_calc_signature2(iov, n_vec, server, smb_signature); 128 rc = cifs_calc_signature(iov, n_vec, server, smb_signature);
204 if (rc) 129 if (rc)
205 memset(cifs_pdu->Signature.SecuritySignature, 0, 8); 130 memset(cifs_pdu->Signature.SecuritySignature, 0, 8);
206 else 131 else
@@ -209,13 +134,27 @@ int cifs_sign_smb2(struct kvec *iov, int n_vec, struct TCP_Server_Info *server,
209 return rc; 134 return rc;
210} 135}
211 136
212int cifs_verify_signature(struct smb_hdr *cifs_pdu, 137/* must be called with server->srv_mutex held */
138int cifs_sign_smb(struct smb_hdr *cifs_pdu, struct TCP_Server_Info *server,
139 __u32 *pexpected_response_sequence_number)
140{
141 struct kvec iov;
142
143 iov.iov_base = cifs_pdu;
144 iov.iov_len = be32_to_cpu(cifs_pdu->smb_buf_length) + 4;
145
146 return cifs_sign_smb2(&iov, 1, server,
147 pexpected_response_sequence_number);
148}
149
150int cifs_verify_signature(struct kvec *iov, unsigned int nr_iov,
213 struct TCP_Server_Info *server, 151 struct TCP_Server_Info *server,
214 __u32 expected_sequence_number) 152 __u32 expected_sequence_number)
215{ 153{
216 unsigned int rc; 154 unsigned int rc;
217 char server_response_sig[8]; 155 char server_response_sig[8];
218 char what_we_think_sig_should_be[20]; 156 char what_we_think_sig_should_be[20];
157 struct smb_hdr *cifs_pdu = (struct smb_hdr *)iov[0].iov_base;
219 158
220 if (cifs_pdu == NULL || server == NULL) 159 if (cifs_pdu == NULL || server == NULL)
221 return -EINVAL; 160 return -EINVAL;
@@ -247,8 +186,8 @@ int cifs_verify_signature(struct smb_hdr *cifs_pdu,
247 cifs_pdu->Signature.Sequence.Reserved = 0; 186 cifs_pdu->Signature.Sequence.Reserved = 0;
248 187
249 mutex_lock(&server->srv_mutex); 188 mutex_lock(&server->srv_mutex);
250 rc = cifs_calculate_signature(cifs_pdu, server, 189 rc = cifs_calc_signature(iov, nr_iov, server,
251 what_we_think_sig_should_be); 190 what_we_think_sig_should_be);
252 mutex_unlock(&server->srv_mutex); 191 mutex_unlock(&server->srv_mutex);
253 192
254 if (rc) 193 if (rc)
diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c
index db7ce87d37a..8f1fe324162 100644
--- a/fs/cifs/cifsfs.c
+++ b/fs/cifs/cifsfs.c
@@ -53,7 +53,7 @@
53int cifsFYI = 0; 53int cifsFYI = 0;
54int cifsERROR = 1; 54int cifsERROR = 1;
55int traceSMB = 0; 55int traceSMB = 0;
56unsigned int oplockEnabled = 1; 56bool enable_oplocks = true;
57unsigned int linuxExtEnabled = 1; 57unsigned int linuxExtEnabled = 1;
58unsigned int lookupCacheEnabled = 1; 58unsigned int lookupCacheEnabled = 1;
59unsigned int multiuser_mount = 0; 59unsigned int multiuser_mount = 0;
@@ -74,7 +74,7 @@ module_param(cifs_min_small, int, 0);
74MODULE_PARM_DESC(cifs_min_small, "Small network buffers in pool. Default: 30 " 74MODULE_PARM_DESC(cifs_min_small, "Small network buffers in pool. Default: 30 "
75 "Range: 2 to 256"); 75 "Range: 2 to 256");
76unsigned int cifs_max_pending = CIFS_MAX_REQ; 76unsigned int cifs_max_pending = CIFS_MAX_REQ;
77module_param(cifs_max_pending, int, 0); 77module_param(cifs_max_pending, int, 0444);
78MODULE_PARM_DESC(cifs_max_pending, "Simultaneous requests to server. " 78MODULE_PARM_DESC(cifs_max_pending, "Simultaneous requests to server. "
79 "Default: 50 Range: 2 to 256"); 79 "Default: 50 Range: 2 to 256");
80unsigned short echo_retries = 5; 80unsigned short echo_retries = 5;
@@ -82,6 +82,10 @@ module_param(echo_retries, ushort, 0644);
82MODULE_PARM_DESC(echo_retries, "Number of echo attempts before giving up and " 82MODULE_PARM_DESC(echo_retries, "Number of echo attempts before giving up and "
83 "reconnecting server. Default: 5. 0 means " 83 "reconnecting server. Default: 5. 0 means "
84 "never reconnect."); 84 "never reconnect.");
85module_param(enable_oplocks, bool, 0644);
86MODULE_PARM_DESC(enable_oplocks, "Enable or disable oplocks (bool). Default:"
87 "y/Y/1");
88
85extern mempool_t *cifs_sm_req_poolp; 89extern mempool_t *cifs_sm_req_poolp;
86extern mempool_t *cifs_req_poolp; 90extern mempool_t *cifs_req_poolp;
87extern mempool_t *cifs_mid_poolp; 91extern mempool_t *cifs_mid_poolp;
@@ -132,12 +136,12 @@ cifs_read_super(struct super_block *sb)
132 else 136 else
133 sb->s_d_op = &cifs_dentry_ops; 137 sb->s_d_op = &cifs_dentry_ops;
134 138
135#ifdef CIFS_NFSD_EXPORT 139#ifdef CONFIG_CIFS_NFSD_EXPORT
136 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SERVER_INUM) { 140 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SERVER_INUM) {
137 cFYI(1, "export ops supported"); 141 cFYI(1, "export ops supported");
138 sb->s_export_op = &cifs_export_ops; 142 sb->s_export_op = &cifs_export_ops;
139 } 143 }
140#endif /* CIFS_NFSD_EXPORT */ 144#endif /* CONFIG_CIFS_NFSD_EXPORT */
141 145
142 return 0; 146 return 0;
143 147
@@ -432,6 +436,12 @@ cifs_show_options(struct seq_file *s, struct vfsmount *m)
432 seq_printf(s, ",mfsymlinks"); 436 seq_printf(s, ",mfsymlinks");
433 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_FSCACHE) 437 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_FSCACHE)
434 seq_printf(s, ",fsc"); 438 seq_printf(s, ",fsc");
439 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOSSYNC)
440 seq_printf(s, ",nostrictsync");
441 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_PERM)
442 seq_printf(s, ",noperm");
443 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_STRICT_IO)
444 seq_printf(s, ",strictcache");
435 445
436 seq_printf(s, ",rsize=%d", cifs_sb->rsize); 446 seq_printf(s, ",rsize=%d", cifs_sb->rsize);
437 seq_printf(s, ",wsize=%d", cifs_sb->wsize); 447 seq_printf(s, ",wsize=%d", cifs_sb->wsize);
@@ -530,7 +540,6 @@ cifs_get_root(struct smb_vol *vol, struct super_block *sb)
530 char *full_path = NULL; 540 char *full_path = NULL;
531 char *s, *p; 541 char *s, *p;
532 char sep; 542 char sep;
533 int xid;
534 543
535 full_path = cifs_build_path_to_root(vol, cifs_sb, 544 full_path = cifs_build_path_to_root(vol, cifs_sb,
536 cifs_sb_master_tcon(cifs_sb)); 545 cifs_sb_master_tcon(cifs_sb));
@@ -539,7 +548,6 @@ cifs_get_root(struct smb_vol *vol, struct super_block *sb)
539 548
540 cFYI(1, "Get root dentry for %s", full_path); 549 cFYI(1, "Get root dentry for %s", full_path);
541 550
542 xid = GetXid();
543 sep = CIFS_DIR_SEP(cifs_sb); 551 sep = CIFS_DIR_SEP(cifs_sb);
544 dentry = dget(sb->s_root); 552 dentry = dget(sb->s_root);
545 p = s = full_path; 553 p = s = full_path;
@@ -570,7 +578,6 @@ cifs_get_root(struct smb_vol *vol, struct super_block *sb)
570 dput(dentry); 578 dput(dentry);
571 dentry = child; 579 dentry = child;
572 } while (!IS_ERR(dentry)); 580 } while (!IS_ERR(dentry));
573 _FreeXid(xid);
574 kfree(full_path); 581 kfree(full_path);
575 return dentry; 582 return dentry;
576} 583}
@@ -942,7 +949,8 @@ cifs_init_once(void *inode)
942 struct cifsInodeInfo *cifsi = inode; 949 struct cifsInodeInfo *cifsi = inode;
943 950
944 inode_init_once(&cifsi->vfs_inode); 951 inode_init_once(&cifsi->vfs_inode);
945 INIT_LIST_HEAD(&cifsi->lockList); 952 INIT_LIST_HEAD(&cifsi->llist);
953 mutex_init(&cifsi->lock_mutex);
946} 954}
947 955
948static int 956static int
diff --git a/fs/cifs/cifsfs.h b/fs/cifs/cifsfs.h
index 95da8027983..d9dbaf869cd 100644
--- a/fs/cifs/cifsfs.h
+++ b/fs/cifs/cifsfs.h
@@ -121,9 +121,9 @@ extern ssize_t cifs_getxattr(struct dentry *, const char *, void *, size_t);
121extern ssize_t cifs_listxattr(struct dentry *, char *, size_t); 121extern ssize_t cifs_listxattr(struct dentry *, char *, size_t);
122extern long cifs_ioctl(struct file *filep, unsigned int cmd, unsigned long arg); 122extern long cifs_ioctl(struct file *filep, unsigned int cmd, unsigned long arg);
123 123
124#ifdef CIFS_NFSD_EXPORT 124#ifdef CONFIG_CIFS_NFSD_EXPORT
125extern const struct export_operations cifs_export_ops; 125extern const struct export_operations cifs_export_ops;
126#endif /* CIFS_NFSD_EXPORT */ 126#endif /* CONFIG_CIFS_NFSD_EXPORT */
127 127
128#define CIFS_VERSION "1.75" 128#define CIFS_VERSION "1.75"
129#endif /* _CIFSFS_H */ 129#endif /* _CIFSFS_H */
diff --git a/fs/cifs/cifsglob.h b/fs/cifs/cifsglob.h
index 95dad9d14cf..8238aa13e01 100644
--- a/fs/cifs/cifsglob.h
+++ b/fs/cifs/cifsglob.h
@@ -167,6 +167,8 @@ struct smb_vol {
167 uid_t cred_uid; 167 uid_t cred_uid;
168 uid_t linux_uid; 168 uid_t linux_uid;
169 gid_t linux_gid; 169 gid_t linux_gid;
170 uid_t backupuid;
171 gid_t backupgid;
170 mode_t file_mode; 172 mode_t file_mode;
171 mode_t dir_mode; 173 mode_t dir_mode;
172 unsigned secFlg; 174 unsigned secFlg;
@@ -179,6 +181,8 @@ struct smb_vol {
179 bool noperm:1; 181 bool noperm:1;
180 bool no_psx_acl:1; /* set if posix acl support should be disabled */ 182 bool no_psx_acl:1; /* set if posix acl support should be disabled */
181 bool cifs_acl:1; 183 bool cifs_acl:1;
184 bool backupuid_specified; /* mount option backupuid is specified */
185 bool backupgid_specified; /* mount option backupgid is specified */
182 bool no_xattr:1; /* set if xattr (EA) support should be disabled*/ 186 bool no_xattr:1; /* set if xattr (EA) support should be disabled*/
183 bool server_ino:1; /* use inode numbers from server ie UniqueId */ 187 bool server_ino:1; /* use inode numbers from server ie UniqueId */
184 bool direct_io:1; 188 bool direct_io:1;
@@ -219,7 +223,8 @@ struct smb_vol {
219 CIFS_MOUNT_OVERR_GID | CIFS_MOUNT_DYNPERM | \ 223 CIFS_MOUNT_OVERR_GID | CIFS_MOUNT_DYNPERM | \
220 CIFS_MOUNT_NOPOSIXBRL | CIFS_MOUNT_NOSSYNC | \ 224 CIFS_MOUNT_NOPOSIXBRL | CIFS_MOUNT_NOSSYNC | \
221 CIFS_MOUNT_FSCACHE | CIFS_MOUNT_MF_SYMLINKS | \ 225 CIFS_MOUNT_FSCACHE | CIFS_MOUNT_MF_SYMLINKS | \
222 CIFS_MOUNT_MULTIUSER | CIFS_MOUNT_STRICT_IO) 226 CIFS_MOUNT_MULTIUSER | CIFS_MOUNT_STRICT_IO | \
227 CIFS_MOUNT_CIFS_BACKUPUID | CIFS_MOUNT_CIFS_BACKUPGID)
223 228
224#define CIFS_MS_MASK (MS_RDONLY | MS_MANDLOCK | MS_NOEXEC | MS_NOSUID | \ 229#define CIFS_MS_MASK (MS_RDONLY | MS_MANDLOCK | MS_NOEXEC | MS_NOSUID | \
225 MS_NODEV | MS_SYNCHRONOUS) 230 MS_NODEV | MS_SYNCHRONOUS)
@@ -286,7 +291,13 @@ struct TCP_Server_Info {
286 bool sec_kerberosu2u; /* supports U2U Kerberos */ 291 bool sec_kerberosu2u; /* supports U2U Kerberos */
287 bool sec_kerberos; /* supports plain Kerberos */ 292 bool sec_kerberos; /* supports plain Kerberos */
288 bool sec_mskerberos; /* supports legacy MS Kerberos */ 293 bool sec_mskerberos; /* supports legacy MS Kerberos */
294 bool large_buf; /* is current buffer large? */
289 struct delayed_work echo; /* echo ping workqueue job */ 295 struct delayed_work echo; /* echo ping workqueue job */
296 struct kvec *iov; /* reusable kvec array for receives */
297 unsigned int nr_iov; /* number of kvecs in array */
298 char *smallbuf; /* pointer to current "small" buffer */
299 char *bigbuf; /* pointer to current "big" buffer */
300 unsigned int total_read; /* total amount of data read in this pass */
290#ifdef CONFIG_CIFS_FSCACHE 301#ifdef CONFIG_CIFS_FSCACHE
291 struct fscache_cookie *fscache; /* client index cache cookie */ 302 struct fscache_cookie *fscache; /* client index cache cookie */
292#endif 303#endif
@@ -485,9 +496,13 @@ extern struct cifs_tcon *cifs_sb_master_tcon(struct cifs_sb_info *cifs_sb);
485 */ 496 */
486struct cifsLockInfo { 497struct cifsLockInfo {
487 struct list_head llist; /* pointer to next cifsLockInfo */ 498 struct list_head llist; /* pointer to next cifsLockInfo */
499 struct list_head blist; /* pointer to locks blocked on this */
500 wait_queue_head_t block_q;
488 __u64 offset; 501 __u64 offset;
489 __u64 length; 502 __u64 length;
503 __u32 pid;
490 __u8 type; 504 __u8 type;
505 __u16 netfid;
491}; 506};
492 507
493/* 508/*
@@ -520,8 +535,6 @@ struct cifsFileInfo {
520 struct dentry *dentry; 535 struct dentry *dentry;
521 unsigned int f_flags; 536 unsigned int f_flags;
522 struct tcon_link *tlink; 537 struct tcon_link *tlink;
523 struct mutex lock_mutex;
524 struct list_head llist; /* list of byte range locks we have. */
525 bool invalidHandle:1; /* file closed via session abend */ 538 bool invalidHandle:1; /* file closed via session abend */
526 bool oplock_break_cancelled:1; 539 bool oplock_break_cancelled:1;
527 int count; /* refcount protected by cifs_file_list_lock */ 540 int count; /* refcount protected by cifs_file_list_lock */
@@ -554,7 +567,9 @@ void cifsFileInfo_put(struct cifsFileInfo *cifs_file);
554 */ 567 */
555 568
556struct cifsInodeInfo { 569struct cifsInodeInfo {
557 struct list_head lockList; 570 struct list_head llist; /* brlocks for this inode */
571 bool can_cache_brlcks;
572 struct mutex lock_mutex; /* protect two fields above */
558 /* BB add in lists for dirty pages i.e. write caching info for oplock */ 573 /* BB add in lists for dirty pages i.e. write caching info for oplock */
559 struct list_head openFileList; 574 struct list_head openFileList;
560 __u32 cifsAttrs; /* e.g. DOS archive bit, sparse, compressed, system */ 575 __u32 cifsAttrs; /* e.g. DOS archive bit, sparse, compressed, system */
@@ -643,8 +658,24 @@ static inline void cifs_stats_bytes_read(struct cifs_tcon *tcon,
643struct mid_q_entry; 658struct mid_q_entry;
644 659
645/* 660/*
646 * This is the prototype for the mid callback function. When creating one, 661 * This is the prototype for the mid receive function. This function is for
647 * take special care to avoid deadlocks. Things to bear in mind: 662 * receiving the rest of the SMB frame, starting with the WordCount (which is
663 * just after the MID in struct smb_hdr). Note:
664 *
665 * - This will be called by cifsd, with no locks held.
666 * - The mid will still be on the pending_mid_q.
667 * - mid->resp_buf will point to the current buffer.
668 *
669 * Returns zero on a successful receive, or an error. The receive state in
670 * the TCP_Server_Info will also be updated.
671 */
672typedef int (mid_receive_t)(struct TCP_Server_Info *server,
673 struct mid_q_entry *mid);
674
675/*
676 * This is the prototype for the mid callback function. This is called once the
677 * mid has been received off of the socket. When creating one, take special
678 * care to avoid deadlocks. Things to bear in mind:
648 * 679 *
649 * - it will be called by cifsd, with no locks held 680 * - it will be called by cifsd, with no locks held
650 * - the mid will be removed from any lists 681 * - the mid will be removed from any lists
@@ -662,9 +693,10 @@ struct mid_q_entry {
662 unsigned long when_sent; /* time when smb send finished */ 693 unsigned long when_sent; /* time when smb send finished */
663 unsigned long when_received; /* when demux complete (taken off wire) */ 694 unsigned long when_received; /* when demux complete (taken off wire) */
664#endif 695#endif
696 mid_receive_t *receive; /* call receive callback */
665 mid_callback_t *callback; /* call completion callback */ 697 mid_callback_t *callback; /* call completion callback */
666 void *callback_data; /* general purpose pointer for callback */ 698 void *callback_data; /* general purpose pointer for callback */
667 struct smb_hdr *resp_buf; /* response buffer */ 699 struct smb_hdr *resp_buf; /* pointer to received SMB header */
668 int midState; /* wish this were enum but can not pass to wait_event */ 700 int midState; /* wish this were enum but can not pass to wait_event */
669 __u8 command; /* smb command code */ 701 __u8 command; /* smb command code */
670 bool largeBuf:1; /* if valid response, is pointer to large buf */ 702 bool largeBuf:1; /* if valid response, is pointer to large buf */
@@ -964,7 +996,8 @@ GLOBAL_EXTERN unsigned int multiuser_mount; /* if enabled allows new sessions
964 to be established on existing mount if we 996 to be established on existing mount if we
965 have the uid/password or Kerberos credential 997 have the uid/password or Kerberos credential
966 or equivalent for current user */ 998 or equivalent for current user */
967GLOBAL_EXTERN unsigned int oplockEnabled; 999/* enable or disable oplocks */
1000GLOBAL_EXTERN bool enable_oplocks;
968GLOBAL_EXTERN unsigned int lookupCacheEnabled; 1001GLOBAL_EXTERN unsigned int lookupCacheEnabled;
969GLOBAL_EXTERN unsigned int global_secflags; /* if on, session setup sent 1002GLOBAL_EXTERN unsigned int global_secflags; /* if on, session setup sent
970 with more secure ntlmssp2 challenge/resp */ 1003 with more secure ntlmssp2 challenge/resp */
@@ -978,10 +1011,16 @@ GLOBAL_EXTERN unsigned int cifs_max_pending; /* MAX requests at once to server*/
978/* reconnect after this many failed echo attempts */ 1011/* reconnect after this many failed echo attempts */
979GLOBAL_EXTERN unsigned short echo_retries; 1012GLOBAL_EXTERN unsigned short echo_retries;
980 1013
1014#ifdef CONFIG_CIFS_ACL
981GLOBAL_EXTERN struct rb_root uidtree; 1015GLOBAL_EXTERN struct rb_root uidtree;
982GLOBAL_EXTERN struct rb_root gidtree; 1016GLOBAL_EXTERN struct rb_root gidtree;
983GLOBAL_EXTERN spinlock_t siduidlock; 1017GLOBAL_EXTERN spinlock_t siduidlock;
984GLOBAL_EXTERN spinlock_t sidgidlock; 1018GLOBAL_EXTERN spinlock_t sidgidlock;
1019GLOBAL_EXTERN struct rb_root siduidtree;
1020GLOBAL_EXTERN struct rb_root sidgidtree;
1021GLOBAL_EXTERN spinlock_t uidsidlock;
1022GLOBAL_EXTERN spinlock_t gidsidlock;
1023#endif /* CONFIG_CIFS_ACL */
985 1024
986void cifs_oplock_break(struct work_struct *work); 1025void cifs_oplock_break(struct work_struct *work);
987 1026
diff --git a/fs/cifs/cifspdu.h b/fs/cifs/cifspdu.h
index de3aa285de0..3fb03e2c8e8 100644
--- a/fs/cifs/cifspdu.h
+++ b/fs/cifs/cifspdu.h
@@ -1089,9 +1089,7 @@ typedef struct smb_com_read_rsp {
1089 __le16 DataLengthHigh; 1089 __le16 DataLengthHigh;
1090 __u64 Reserved2; 1090 __u64 Reserved2;
1091 __u16 ByteCount; 1091 __u16 ByteCount;
1092 __u8 Pad; /* BB check for whether padded to DWORD 1092 /* read response data immediately follows */
1093 boundary and optimum performance here */
1094 char Data[1];
1095} __attribute__((packed)) READ_RSP; 1093} __attribute__((packed)) READ_RSP;
1096 1094
1097typedef struct locking_andx_range { 1095typedef struct locking_andx_range {
@@ -1913,6 +1911,10 @@ typedef struct whoami_rsp_data { /* Query level 0x202 */
1913 1911
1914/* SETFSInfo Levels */ 1912/* SETFSInfo Levels */
1915#define SMB_SET_CIFS_UNIX_INFO 0x200 1913#define SMB_SET_CIFS_UNIX_INFO 0x200
1914/* level 0x203 is defined above in list of QFS info levels */
1915/* #define SMB_REQUEST_TRANSPORT_ENCRYPTION 0x203 */
1916
1917/* Level 0x200 request structure follows */
1916typedef struct smb_com_transaction2_setfsi_req { 1918typedef struct smb_com_transaction2_setfsi_req {
1917 struct smb_hdr hdr; /* wct = 15 */ 1919 struct smb_hdr hdr; /* wct = 15 */
1918 __le16 TotalParameterCount; 1920 __le16 TotalParameterCount;
@@ -1940,13 +1942,39 @@ typedef struct smb_com_transaction2_setfsi_req {
1940 __le64 ClientUnixCap; /* Data end */ 1942 __le64 ClientUnixCap; /* Data end */
1941} __attribute__((packed)) TRANSACTION2_SETFSI_REQ; 1943} __attribute__((packed)) TRANSACTION2_SETFSI_REQ;
1942 1944
1945/* level 0x203 request structure follows */
1946typedef struct smb_com_transaction2_setfs_enc_req {
1947 struct smb_hdr hdr; /* wct = 15 */
1948 __le16 TotalParameterCount;
1949 __le16 TotalDataCount;
1950 __le16 MaxParameterCount;
1951 __le16 MaxDataCount;
1952 __u8 MaxSetupCount;
1953 __u8 Reserved;
1954 __le16 Flags;
1955 __le32 Timeout;
1956 __u16 Reserved2;
1957 __le16 ParameterCount; /* 4 */
1958 __le16 ParameterOffset;
1959 __le16 DataCount; /* 12 */
1960 __le16 DataOffset;
1961 __u8 SetupCount; /* one */
1962 __u8 Reserved3;
1963 __le16 SubCommand; /* TRANS2_SET_FS_INFORMATION */
1964 __le16 ByteCount;
1965 __u8 Pad;
1966 __u16 Reserved4; /* Parameters start. */
1967 __le16 InformationLevel;/* Parameters end. */
1968 /* NTLMSSP Blob, Data start. */
1969} __attribute__((packed)) TRANSACTION2_SETFSI_ENC_REQ;
1970
1971/* response for setfsinfo levels 0x200 and 0x203 */
1943typedef struct smb_com_transaction2_setfsi_rsp { 1972typedef struct smb_com_transaction2_setfsi_rsp {
1944 struct smb_hdr hdr; /* wct = 10 */ 1973 struct smb_hdr hdr; /* wct = 10 */
1945 struct trans2_resp t2; 1974 struct trans2_resp t2;
1946 __u16 ByteCount; 1975 __u16 ByteCount;
1947} __attribute__((packed)) TRANSACTION2_SETFSI_RSP; 1976} __attribute__((packed)) TRANSACTION2_SETFSI_RSP;
1948 1977
1949
1950typedef struct smb_com_transaction2_get_dfs_refer_req { 1978typedef struct smb_com_transaction2_get_dfs_refer_req {
1951 struct smb_hdr hdr; /* wct = 15 */ 1979 struct smb_hdr hdr; /* wct = 15 */
1952 __le16 TotalParameterCount; 1980 __le16 TotalParameterCount;
@@ -2098,13 +2126,13 @@ typedef struct {
2098#define CIFS_UNIX_PROXY_CAP 0x00000400 /* Proxy cap: 0xACE ioctl and 2126#define CIFS_UNIX_PROXY_CAP 0x00000400 /* Proxy cap: 0xACE ioctl and
2099 QFS PROXY call */ 2127 QFS PROXY call */
2100#ifdef CONFIG_CIFS_POSIX 2128#ifdef CONFIG_CIFS_POSIX
2101/* Can not set pathnames cap yet until we send new posix create SMB since 2129/* presumably don't need the 0x20 POSIX_PATH_OPS_CAP since we never send
2102 otherwise server can treat such handles opened with older ntcreatex 2130 LockingX instead of posix locking call on unix sess (and we do not expect
2103 (by a new client which knows how to send posix path ops) 2131 LockingX to use different (ie Windows) semantics than posix locking on
2104 as non-posix handles (can affect write behavior with byte range locks. 2132 the same session (if WINE needs to do this later, we can add this cap
2105 We can add back in POSIX_PATH_OPS cap when Posix Create/Mkdir finished */ 2133 back in later */
2106/* #define CIFS_UNIX_CAP_MASK 0x000000fb */ 2134/* #define CIFS_UNIX_CAP_MASK 0x000000fb */
2107#define CIFS_UNIX_CAP_MASK 0x000000db 2135#define CIFS_UNIX_CAP_MASK 0x000003db
2108#else 2136#else
2109#define CIFS_UNIX_CAP_MASK 0x00000013 2137#define CIFS_UNIX_CAP_MASK 0x00000013
2110#endif /* CONFIG_CIFS_POSIX */ 2138#endif /* CONFIG_CIFS_POSIX */
diff --git a/fs/cifs/cifsproto.h b/fs/cifs/cifsproto.h
index 8df28e925e5..ef4f631e4c0 100644
--- a/fs/cifs/cifsproto.h
+++ b/fs/cifs/cifsproto.h
@@ -69,8 +69,9 @@ extern struct mid_q_entry *AllocMidQEntry(const struct smb_hdr *smb_buffer,
69 struct TCP_Server_Info *server); 69 struct TCP_Server_Info *server);
70extern void DeleteMidQEntry(struct mid_q_entry *midEntry); 70extern void DeleteMidQEntry(struct mid_q_entry *midEntry);
71extern int cifs_call_async(struct TCP_Server_Info *server, struct kvec *iov, 71extern int cifs_call_async(struct TCP_Server_Info *server, struct kvec *iov,
72 unsigned int nvec, mid_callback_t *callback, 72 unsigned int nvec, mid_receive_t *receive,
73 void *cbdata, bool ignore_pend); 73 mid_callback_t *callback, void *cbdata,
74 bool ignore_pend);
74extern int SendReceive(const unsigned int /* xid */ , struct cifs_ses *, 75extern int SendReceive(const unsigned int /* xid */ , struct cifs_ses *,
75 struct smb_hdr * /* input */ , 76 struct smb_hdr * /* input */ ,
76 struct smb_hdr * /* out */ , 77 struct smb_hdr * /* out */ ,
@@ -90,6 +91,7 @@ extern int SendReceiveBlockingLock(const unsigned int xid,
90extern int checkSMB(struct smb_hdr *smb, __u16 mid, unsigned int length); 91extern int checkSMB(struct smb_hdr *smb, __u16 mid, unsigned int length);
91extern bool is_valid_oplock_break(struct smb_hdr *smb, 92extern bool is_valid_oplock_break(struct smb_hdr *smb,
92 struct TCP_Server_Info *); 93 struct TCP_Server_Info *);
94extern bool backup_cred(struct cifs_sb_info *);
93extern bool is_size_safe_to_change(struct cifsInodeInfo *, __u64 eof); 95extern bool is_size_safe_to_change(struct cifsInodeInfo *, __u64 eof);
94extern void cifs_update_eof(struct cifsInodeInfo *cifsi, loff_t offset, 96extern void cifs_update_eof(struct cifsInodeInfo *cifsi, loff_t offset,
95 unsigned int bytes_written); 97 unsigned int bytes_written);
@@ -145,12 +147,19 @@ extern int cifs_get_inode_info_unix(struct inode **pinode,
145extern int cifs_acl_to_fattr(struct cifs_sb_info *cifs_sb, 147extern int cifs_acl_to_fattr(struct cifs_sb_info *cifs_sb,
146 struct cifs_fattr *fattr, struct inode *inode, 148 struct cifs_fattr *fattr, struct inode *inode,
147 const char *path, const __u16 *pfid); 149 const char *path, const __u16 *pfid);
148extern int mode_to_cifs_acl(struct inode *inode, const char *path, __u64); 150extern int id_mode_to_cifs_acl(struct inode *inode, const char *path, __u64,
151 uid_t, gid_t);
149extern struct cifs_ntsd *get_cifs_acl(struct cifs_sb_info *, struct inode *, 152extern struct cifs_ntsd *get_cifs_acl(struct cifs_sb_info *, struct inode *,
150 const char *, u32 *); 153 const char *, u32 *);
151extern int set_cifs_acl(struct cifs_ntsd *, __u32, struct inode *, 154extern int set_cifs_acl(struct cifs_ntsd *, __u32, struct inode *,
152 const char *); 155 const char *, int);
153 156
157extern void dequeue_mid(struct mid_q_entry *mid, bool malformed);
158extern int cifs_read_from_socket(struct TCP_Server_Info *server, char *buf,
159 unsigned int to_read);
160extern int cifs_readv_from_socket(struct TCP_Server_Info *server,
161 struct kvec *iov_orig, unsigned int nr_segs,
162 unsigned int to_read);
154extern void cifs_setup_cifs_sb(struct smb_vol *pvolume_info, 163extern void cifs_setup_cifs_sb(struct smb_vol *pvolume_info,
155 struct cifs_sb_info *cifs_sb); 164 struct cifs_sb_info *cifs_sb);
156extern int cifs_match_super(struct super_block *, void *); 165extern int cifs_match_super(struct super_block *, void *);
@@ -359,14 +368,17 @@ extern int CIFSGetSrvInodeNumber(const int xid, struct cifs_tcon *tcon,
359 const struct nls_table *nls_codepage, 368 const struct nls_table *nls_codepage,
360 int remap_special_chars); 369 int remap_special_chars);
361 370
371extern int cifs_lockv(const int xid, struct cifs_tcon *tcon, const __u16 netfid,
372 const __u8 lock_type, const __u32 num_unlock,
373 const __u32 num_lock, LOCKING_ANDX_RANGE *buf);
362extern int CIFSSMBLock(const int xid, struct cifs_tcon *tcon, 374extern int CIFSSMBLock(const int xid, struct cifs_tcon *tcon,
363 const __u16 netfid, const __u64 len, 375 const __u16 netfid, const __u32 netpid, const __u64 len,
364 const __u64 offset, const __u32 numUnlock, 376 const __u64 offset, const __u32 numUnlock,
365 const __u32 numLock, const __u8 lockType, 377 const __u32 numLock, const __u8 lockType,
366 const bool waitFlag, const __u8 oplock_level); 378 const bool waitFlag, const __u8 oplock_level);
367extern int CIFSSMBPosixLock(const int xid, struct cifs_tcon *tcon, 379extern int CIFSSMBPosixLock(const int xid, struct cifs_tcon *tcon,
368 const __u16 smb_file_id, const int get_flag, 380 const __u16 smb_file_id, const __u32 netpid,
369 const __u64 len, struct file_lock *, 381 const int get_flag, const __u64 len, struct file_lock *,
370 const __u16 lock_type, const bool waitFlag); 382 const __u16 lock_type, const bool waitFlag);
371extern int CIFSSMBTDis(const int xid, struct cifs_tcon *tcon); 383extern int CIFSSMBTDis(const int xid, struct cifs_tcon *tcon);
372extern int CIFSSMBEcho(struct TCP_Server_Info *server); 384extern int CIFSSMBEcho(struct TCP_Server_Info *server);
@@ -380,7 +392,7 @@ extern void tconInfoFree(struct cifs_tcon *);
380extern int cifs_sign_smb(struct smb_hdr *, struct TCP_Server_Info *, __u32 *); 392extern int cifs_sign_smb(struct smb_hdr *, struct TCP_Server_Info *, __u32 *);
381extern int cifs_sign_smb2(struct kvec *iov, int n_vec, struct TCP_Server_Info *, 393extern int cifs_sign_smb2(struct kvec *iov, int n_vec, struct TCP_Server_Info *,
382 __u32 *); 394 __u32 *);
383extern int cifs_verify_signature(struct smb_hdr *, 395extern int cifs_verify_signature(struct kvec *iov, unsigned int nr_iov,
384 struct TCP_Server_Info *server, 396 struct TCP_Server_Info *server,
385 __u32 expected_sequence_number); 397 __u32 expected_sequence_number);
386extern int SMBNTencrypt(unsigned char *, unsigned char *, unsigned char *); 398extern int SMBNTencrypt(unsigned char *, unsigned char *, unsigned char *);
@@ -419,7 +431,7 @@ extern int CIFSSMBSetEA(const int xid, struct cifs_tcon *tcon,
419extern int CIFSSMBGetCIFSACL(const int xid, struct cifs_tcon *tcon, 431extern int CIFSSMBGetCIFSACL(const int xid, struct cifs_tcon *tcon,
420 __u16 fid, struct cifs_ntsd **acl_inf, __u32 *buflen); 432 __u16 fid, struct cifs_ntsd **acl_inf, __u32 *buflen);
421extern int CIFSSMBSetCIFSACL(const int, struct cifs_tcon *, __u16, 433extern int CIFSSMBSetCIFSACL(const int, struct cifs_tcon *, __u16,
422 struct cifs_ntsd *, __u32); 434 struct cifs_ntsd *, __u32, int);
423extern int CIFSSMBGetPosixACL(const int xid, struct cifs_tcon *tcon, 435extern int CIFSSMBGetPosixACL(const int xid, struct cifs_tcon *tcon,
424 const unsigned char *searchName, 436 const unsigned char *searchName,
425 char *acl_inf, const int buflen, const int acl_type, 437 char *acl_inf, const int buflen, const int acl_type,
@@ -440,6 +452,24 @@ extern int E_md4hash(const unsigned char *passwd, unsigned char *p16);
440extern int SMBencrypt(unsigned char *passwd, const unsigned char *c8, 452extern int SMBencrypt(unsigned char *passwd, const unsigned char *c8,
441 unsigned char *p24); 453 unsigned char *p24);
442 454
455/* asynchronous read support */
456struct cifs_readdata {
457 struct cifsFileInfo *cfile;
458 struct address_space *mapping;
459 __u64 offset;
460 unsigned int bytes;
461 pid_t pid;
462 int result;
463 struct list_head pages;
464 struct work_struct work;
465 unsigned int nr_iov;
466 struct kvec iov[1];
467};
468
469struct cifs_readdata *cifs_readdata_alloc(unsigned int nr_pages);
470void cifs_readdata_free(struct cifs_readdata *rdata);
471int cifs_async_readv(struct cifs_readdata *rdata);
472
443/* asynchronous write support */ 473/* asynchronous write support */
444struct cifs_writedata { 474struct cifs_writedata {
445 struct kref refcount; 475 struct kref refcount;
diff --git a/fs/cifs/cifssmb.c b/fs/cifs/cifssmb.c
index a80f7bd97b9..6600aa2d2ef 100644
--- a/fs/cifs/cifssmb.c
+++ b/fs/cifs/cifssmb.c
@@ -33,6 +33,8 @@
33#include <linux/slab.h> 33#include <linux/slab.h>
34#include <linux/posix_acl_xattr.h> 34#include <linux/posix_acl_xattr.h>
35#include <linux/pagemap.h> 35#include <linux/pagemap.h>
36#include <linux/swap.h>
37#include <linux/task_io_accounting_ops.h>
36#include <asm/uaccess.h> 38#include <asm/uaccess.h>
37#include "cifspdu.h" 39#include "cifspdu.h"
38#include "cifsglob.h" 40#include "cifsglob.h"
@@ -40,6 +42,7 @@
40#include "cifsproto.h" 42#include "cifsproto.h"
41#include "cifs_unicode.h" 43#include "cifs_unicode.h"
42#include "cifs_debug.h" 44#include "cifs_debug.h"
45#include "fscache.h"
43 46
44#ifdef CONFIG_CIFS_POSIX 47#ifdef CONFIG_CIFS_POSIX
45static struct { 48static struct {
@@ -83,6 +86,9 @@ static struct {
83#endif /* CONFIG_CIFS_WEAK_PW_HASH */ 86#endif /* CONFIG_CIFS_WEAK_PW_HASH */
84#endif /* CIFS_POSIX */ 87#endif /* CIFS_POSIX */
85 88
89/* Forward declarations */
90static void cifs_readv_complete(struct work_struct *work);
91
86/* Mark as invalid, all open files on tree connections since they 92/* Mark as invalid, all open files on tree connections since they
87 were closed when session to server was lost */ 93 were closed when session to server was lost */
88static void mark_open_files_invalid(struct cifs_tcon *pTcon) 94static void mark_open_files_invalid(struct cifs_tcon *pTcon)
@@ -453,8 +459,7 @@ CIFSSMBNegotiate(unsigned int xid, struct cifs_ses *ses)
453 } 459 }
454 server->sec_mode = (__u8)le16_to_cpu(rsp->SecurityMode); 460 server->sec_mode = (__u8)le16_to_cpu(rsp->SecurityMode);
455 server->maxReq = le16_to_cpu(rsp->MaxMpxCount); 461 server->maxReq = le16_to_cpu(rsp->MaxMpxCount);
456 server->maxBuf = min((__u32)le16_to_cpu(rsp->MaxBufSize), 462 server->maxBuf = le16_to_cpu(rsp->MaxBufSize);
457 (__u32)CIFSMaxBufSize + MAX_CIFS_HDR_SIZE);
458 server->max_vcs = le16_to_cpu(rsp->MaxNumberVcs); 463 server->max_vcs = le16_to_cpu(rsp->MaxNumberVcs);
459 /* even though we do not use raw we might as well set this 464 /* even though we do not use raw we might as well set this
460 accurately, in case we ever find a need for it */ 465 accurately, in case we ever find a need for it */
@@ -561,8 +566,7 @@ CIFSSMBNegotiate(unsigned int xid, struct cifs_ses *ses)
561 little endian */ 566 little endian */
562 server->maxReq = le16_to_cpu(pSMBr->MaxMpxCount); 567 server->maxReq = le16_to_cpu(pSMBr->MaxMpxCount);
563 /* probably no need to store and check maxvcs */ 568 /* probably no need to store and check maxvcs */
564 server->maxBuf = min(le32_to_cpu(pSMBr->MaxBufferSize), 569 server->maxBuf = le32_to_cpu(pSMBr->MaxBufferSize);
565 (__u32) CIFSMaxBufSize + MAX_CIFS_HDR_SIZE);
566 server->max_rw = le32_to_cpu(pSMBr->MaxRawSize); 570 server->max_rw = le32_to_cpu(pSMBr->MaxRawSize);
567 cFYI(DBG2, "Max buf = %d", ses->server->maxBuf); 571 cFYI(DBG2, "Max buf = %d", ses->server->maxBuf);
568 server->capabilities = le32_to_cpu(pSMBr->Capabilities); 572 server->capabilities = le32_to_cpu(pSMBr->Capabilities);
@@ -739,7 +743,8 @@ CIFSSMBEcho(struct TCP_Server_Info *server)
739 iov.iov_base = smb; 743 iov.iov_base = smb;
740 iov.iov_len = be32_to_cpu(smb->hdr.smb_buf_length) + 4; 744 iov.iov_len = be32_to_cpu(smb->hdr.smb_buf_length) + 4;
741 745
742 rc = cifs_call_async(server, &iov, 1, cifs_echo_callback, server, true); 746 rc = cifs_call_async(server, &iov, 1, NULL, cifs_echo_callback,
747 server, true);
743 if (rc) 748 if (rc)
744 cFYI(1, "Echo request failed: %d", rc); 749 cFYI(1, "Echo request failed: %d", rc);
745 750
@@ -1376,6 +1381,359 @@ openRetry:
1376 return rc; 1381 return rc;
1377} 1382}
1378 1383
1384struct cifs_readdata *
1385cifs_readdata_alloc(unsigned int nr_pages)
1386{
1387 struct cifs_readdata *rdata;
1388
1389 /* readdata + 1 kvec for each page */
1390 rdata = kzalloc(sizeof(*rdata) +
1391 sizeof(struct kvec) * nr_pages, GFP_KERNEL);
1392 if (rdata != NULL) {
1393 INIT_WORK(&rdata->work, cifs_readv_complete);
1394 INIT_LIST_HEAD(&rdata->pages);
1395 }
1396 return rdata;
1397}
1398
1399void
1400cifs_readdata_free(struct cifs_readdata *rdata)
1401{
1402 cifsFileInfo_put(rdata->cfile);
1403 kfree(rdata);
1404}
1405
1406/*
1407 * Discard any remaining data in the current SMB. To do this, we borrow the
1408 * current bigbuf.
1409 */
1410static int
1411cifs_readv_discard(struct TCP_Server_Info *server, struct mid_q_entry *mid)
1412{
1413 READ_RSP *rsp = (READ_RSP *)server->smallbuf;
1414 unsigned int rfclen = be32_to_cpu(rsp->hdr.smb_buf_length);
1415 int remaining = rfclen + 4 - server->total_read;
1416 struct cifs_readdata *rdata = mid->callback_data;
1417
1418 while (remaining > 0) {
1419 int length;
1420
1421 length = cifs_read_from_socket(server, server->bigbuf,
1422 min_t(unsigned int, remaining,
1423 CIFSMaxBufSize + MAX_CIFS_HDR_SIZE));
1424 if (length < 0)
1425 return length;
1426 server->total_read += length;
1427 remaining -= length;
1428 }
1429
1430 dequeue_mid(mid, rdata->result);
1431 return 0;
1432}
1433
1434static int
1435cifs_readv_receive(struct TCP_Server_Info *server, struct mid_q_entry *mid)
1436{
1437 int length, len;
1438 unsigned int data_offset, remaining, data_len;
1439 struct cifs_readdata *rdata = mid->callback_data;
1440 READ_RSP *rsp = (READ_RSP *)server->smallbuf;
1441 unsigned int rfclen = be32_to_cpu(rsp->hdr.smb_buf_length) + 4;
1442 u64 eof;
1443 pgoff_t eof_index;
1444 struct page *page, *tpage;
1445
1446 cFYI(1, "%s: mid=%u offset=%llu bytes=%u", __func__,
1447 mid->mid, rdata->offset, rdata->bytes);
1448
1449 /*
1450 * read the rest of READ_RSP header (sans Data array), or whatever we
1451 * can if there's not enough data. At this point, we've read down to
1452 * the Mid.
1453 */
1454 len = min_t(unsigned int, rfclen, sizeof(*rsp)) -
1455 sizeof(struct smb_hdr) + 1;
1456
1457 rdata->iov[0].iov_base = server->smallbuf + sizeof(struct smb_hdr) - 1;
1458 rdata->iov[0].iov_len = len;
1459
1460 length = cifs_readv_from_socket(server, rdata->iov, 1, len);
1461 if (length < 0)
1462 return length;
1463 server->total_read += length;
1464
1465 /* Was the SMB read successful? */
1466 rdata->result = map_smb_to_linux_error(&rsp->hdr, false);
1467 if (rdata->result != 0) {
1468 cFYI(1, "%s: server returned error %d", __func__,
1469 rdata->result);
1470 return cifs_readv_discard(server, mid);
1471 }
1472
1473 /* Is there enough to get to the rest of the READ_RSP header? */
1474 if (server->total_read < sizeof(READ_RSP)) {
1475 cFYI(1, "%s: server returned short header. got=%u expected=%zu",
1476 __func__, server->total_read, sizeof(READ_RSP));
1477 rdata->result = -EIO;
1478 return cifs_readv_discard(server, mid);
1479 }
1480
1481 data_offset = le16_to_cpu(rsp->DataOffset) + 4;
1482 if (data_offset < server->total_read) {
1483 /*
1484 * win2k8 sometimes sends an offset of 0 when the read
1485 * is beyond the EOF. Treat it as if the data starts just after
1486 * the header.
1487 */
1488 cFYI(1, "%s: data offset (%u) inside read response header",
1489 __func__, data_offset);
1490 data_offset = server->total_read;
1491 } else if (data_offset > MAX_CIFS_SMALL_BUFFER_SIZE) {
1492 /* data_offset is beyond the end of smallbuf */
1493 cFYI(1, "%s: data offset (%u) beyond end of smallbuf",
1494 __func__, data_offset);
1495 rdata->result = -EIO;
1496 return cifs_readv_discard(server, mid);
1497 }
1498
1499 cFYI(1, "%s: total_read=%u data_offset=%u", __func__,
1500 server->total_read, data_offset);
1501
1502 len = data_offset - server->total_read;
1503 if (len > 0) {
1504 /* read any junk before data into the rest of smallbuf */
1505 rdata->iov[0].iov_base = server->smallbuf + server->total_read;
1506 rdata->iov[0].iov_len = len;
1507 length = cifs_readv_from_socket(server, rdata->iov, 1, len);
1508 if (length < 0)
1509 return length;
1510 server->total_read += length;
1511 }
1512
1513 /* set up first iov for signature check */
1514 rdata->iov[0].iov_base = server->smallbuf;
1515 rdata->iov[0].iov_len = server->total_read;
1516 cFYI(1, "0: iov_base=%p iov_len=%zu",
1517 rdata->iov[0].iov_base, rdata->iov[0].iov_len);
1518
1519 /* how much data is in the response? */
1520 data_len = le16_to_cpu(rsp->DataLengthHigh) << 16;
1521 data_len += le16_to_cpu(rsp->DataLength);
1522 if (data_offset + data_len > rfclen) {
1523 /* data_len is corrupt -- discard frame */
1524 rdata->result = -EIO;
1525 return cifs_readv_discard(server, mid);
1526 }
1527
1528 /* marshal up the page array */
1529 len = 0;
1530 remaining = data_len;
1531 rdata->nr_iov = 1;
1532
1533 /* determine the eof that the server (probably) has */
1534 eof = CIFS_I(rdata->mapping->host)->server_eof;
1535 eof_index = eof ? (eof - 1) >> PAGE_CACHE_SHIFT : 0;
1536 cFYI(1, "eof=%llu eof_index=%lu", eof, eof_index);
1537
1538 list_for_each_entry_safe(page, tpage, &rdata->pages, lru) {
1539 if (remaining >= PAGE_CACHE_SIZE) {
1540 /* enough data to fill the page */
1541 rdata->iov[rdata->nr_iov].iov_base = kmap(page);
1542 rdata->iov[rdata->nr_iov].iov_len = PAGE_CACHE_SIZE;
1543 cFYI(1, "%u: idx=%lu iov_base=%p iov_len=%zu",
1544 rdata->nr_iov, page->index,
1545 rdata->iov[rdata->nr_iov].iov_base,
1546 rdata->iov[rdata->nr_iov].iov_len);
1547 ++rdata->nr_iov;
1548 len += PAGE_CACHE_SIZE;
1549 remaining -= PAGE_CACHE_SIZE;
1550 } else if (remaining > 0) {
1551 /* enough for partial page, fill and zero the rest */
1552 rdata->iov[rdata->nr_iov].iov_base = kmap(page);
1553 rdata->iov[rdata->nr_iov].iov_len = remaining;
1554 cFYI(1, "%u: idx=%lu iov_base=%p iov_len=%zu",
1555 rdata->nr_iov, page->index,
1556 rdata->iov[rdata->nr_iov].iov_base,
1557 rdata->iov[rdata->nr_iov].iov_len);
1558 memset(rdata->iov[rdata->nr_iov].iov_base + remaining,
1559 '\0', PAGE_CACHE_SIZE - remaining);
1560 ++rdata->nr_iov;
1561 len += remaining;
1562 remaining = 0;
1563 } else if (page->index > eof_index) {
1564 /*
1565 * The VFS will not try to do readahead past the
1566 * i_size, but it's possible that we have outstanding
1567 * writes with gaps in the middle and the i_size hasn't
1568 * caught up yet. Populate those with zeroed out pages
1569 * to prevent the VFS from repeatedly attempting to
1570 * fill them until the writes are flushed.
1571 */
1572 zero_user(page, 0, PAGE_CACHE_SIZE);
1573 list_del(&page->lru);
1574 lru_cache_add_file(page);
1575 flush_dcache_page(page);
1576 SetPageUptodate(page);
1577 unlock_page(page);
1578 page_cache_release(page);
1579 } else {
1580 /* no need to hold page hostage */
1581 list_del(&page->lru);
1582 lru_cache_add_file(page);
1583 unlock_page(page);
1584 page_cache_release(page);
1585 }
1586 }
1587
1588 /* issue the read if we have any iovecs left to fill */
1589 if (rdata->nr_iov > 1) {
1590 length = cifs_readv_from_socket(server, &rdata->iov[1],
1591 rdata->nr_iov - 1, len);
1592 if (length < 0)
1593 return length;
1594 server->total_read += length;
1595 } else {
1596 length = 0;
1597 }
1598
1599 rdata->bytes = length;
1600
1601 cFYI(1, "total_read=%u rfclen=%u remaining=%u", server->total_read,
1602 rfclen, remaining);
1603
1604 /* discard anything left over */
1605 if (server->total_read < rfclen)
1606 return cifs_readv_discard(server, mid);
1607
1608 dequeue_mid(mid, false);
1609 return length;
1610}
1611
1612static void
1613cifs_readv_complete(struct work_struct *work)
1614{
1615 struct cifs_readdata *rdata = container_of(work,
1616 struct cifs_readdata, work);
1617 struct page *page, *tpage;
1618
1619 list_for_each_entry_safe(page, tpage, &rdata->pages, lru) {
1620 list_del(&page->lru);
1621 lru_cache_add_file(page);
1622
1623 if (rdata->result == 0) {
1624 kunmap(page);
1625 flush_dcache_page(page);
1626 SetPageUptodate(page);
1627 }
1628
1629 unlock_page(page);
1630
1631 if (rdata->result == 0)
1632 cifs_readpage_to_fscache(rdata->mapping->host, page);
1633
1634 page_cache_release(page);
1635 }
1636 cifs_readdata_free(rdata);
1637}
1638
1639static void
1640cifs_readv_callback(struct mid_q_entry *mid)
1641{
1642 struct cifs_readdata *rdata = mid->callback_data;
1643 struct cifs_tcon *tcon = tlink_tcon(rdata->cfile->tlink);
1644 struct TCP_Server_Info *server = tcon->ses->server;
1645
1646 cFYI(1, "%s: mid=%u state=%d result=%d bytes=%u", __func__,
1647 mid->mid, mid->midState, rdata->result, rdata->bytes);
1648
1649 switch (mid->midState) {
1650 case MID_RESPONSE_RECEIVED:
1651 /* result already set, check signature */
1652 if (server->sec_mode &
1653 (SECMODE_SIGN_REQUIRED | SECMODE_SIGN_ENABLED)) {
1654 if (cifs_verify_signature(rdata->iov, rdata->nr_iov,
1655 server, mid->sequence_number + 1))
1656 cERROR(1, "Unexpected SMB signature");
1657 }
1658 /* FIXME: should this be counted toward the initiating task? */
1659 task_io_account_read(rdata->bytes);
1660 cifs_stats_bytes_read(tcon, rdata->bytes);
1661 break;
1662 case MID_REQUEST_SUBMITTED:
1663 case MID_RETRY_NEEDED:
1664 rdata->result = -EAGAIN;
1665 break;
1666 default:
1667 rdata->result = -EIO;
1668 }
1669
1670 queue_work(system_nrt_wq, &rdata->work);
1671 DeleteMidQEntry(mid);
1672 atomic_dec(&server->inFlight);
1673 wake_up(&server->request_q);
1674}
1675
1676/* cifs_async_readv - send an async write, and set up mid to handle result */
1677int
1678cifs_async_readv(struct cifs_readdata *rdata)
1679{
1680 int rc;
1681 READ_REQ *smb = NULL;
1682 int wct;
1683 struct cifs_tcon *tcon = tlink_tcon(rdata->cfile->tlink);
1684
1685 cFYI(1, "%s: offset=%llu bytes=%u", __func__,
1686 rdata->offset, rdata->bytes);
1687
1688 if (tcon->ses->capabilities & CAP_LARGE_FILES)
1689 wct = 12;
1690 else {
1691 wct = 10; /* old style read */
1692 if ((rdata->offset >> 32) > 0) {
1693 /* can not handle this big offset for old */
1694 return -EIO;
1695 }
1696 }
1697
1698 rc = small_smb_init(SMB_COM_READ_ANDX, wct, tcon, (void **)&smb);
1699 if (rc)
1700 return rc;
1701
1702 smb->hdr.Pid = cpu_to_le16((__u16)rdata->pid);
1703 smb->hdr.PidHigh = cpu_to_le16((__u16)(rdata->pid >> 16));
1704
1705 smb->AndXCommand = 0xFF; /* none */
1706 smb->Fid = rdata->cfile->netfid;
1707 smb->OffsetLow = cpu_to_le32(rdata->offset & 0xFFFFFFFF);
1708 if (wct == 12)
1709 smb->OffsetHigh = cpu_to_le32(rdata->offset >> 32);
1710 smb->Remaining = 0;
1711 smb->MaxCount = cpu_to_le16(rdata->bytes & 0xFFFF);
1712 smb->MaxCountHigh = cpu_to_le32(rdata->bytes >> 16);
1713 if (wct == 12)
1714 smb->ByteCount = 0;
1715 else {
1716 /* old style read */
1717 struct smb_com_readx_req *smbr =
1718 (struct smb_com_readx_req *)smb;
1719 smbr->ByteCount = 0;
1720 }
1721
1722 /* 4 for RFC1001 length + 1 for BCC */
1723 rdata->iov[0].iov_base = smb;
1724 rdata->iov[0].iov_len = be32_to_cpu(smb->hdr.smb_buf_length) + 4;
1725
1726 rc = cifs_call_async(tcon->ses->server, rdata->iov, 1,
1727 cifs_readv_receive, cifs_readv_callback,
1728 rdata, false);
1729
1730 if (rc == 0)
1731 cifs_stats_inc(&tcon->num_reads);
1732
1733 cifs_small_buf_release(smb);
1734 return rc;
1735}
1736
1379int 1737int
1380CIFSSMBRead(const int xid, struct cifs_io_parms *io_parms, unsigned int *nbytes, 1738CIFSSMBRead(const int xid, struct cifs_io_parms *io_parms, unsigned int *nbytes,
1381 char **buf, int *pbuf_type) 1739 char **buf, int *pbuf_type)
@@ -1836,7 +2194,7 @@ cifs_async_writev(struct cifs_writedata *wdata)
1836 2194
1837 kref_get(&wdata->refcount); 2195 kref_get(&wdata->refcount);
1838 rc = cifs_call_async(tcon->ses->server, iov, wdata->nr_pages + 1, 2196 rc = cifs_call_async(tcon->ses->server, iov, wdata->nr_pages + 1,
1839 cifs_writev_callback, wdata, false); 2197 NULL, cifs_writev_callback, wdata, false);
1840 2198
1841 if (rc == 0) 2199 if (rc == 0)
1842 cifs_stats_inc(&tcon->num_writes); 2200 cifs_stats_inc(&tcon->num_writes);
@@ -1962,10 +2320,50 @@ CIFSSMBWrite2(const int xid, struct cifs_io_parms *io_parms,
1962 return rc; 2320 return rc;
1963} 2321}
1964 2322
2323int cifs_lockv(const int xid, struct cifs_tcon *tcon, const __u16 netfid,
2324 const __u8 lock_type, const __u32 num_unlock,
2325 const __u32 num_lock, LOCKING_ANDX_RANGE *buf)
2326{
2327 int rc = 0;
2328 LOCK_REQ *pSMB = NULL;
2329 struct kvec iov[2];
2330 int resp_buf_type;
2331 __u16 count;
2332
2333 cFYI(1, "cifs_lockv num lock %d num unlock %d", num_lock, num_unlock);
2334
2335 rc = small_smb_init(SMB_COM_LOCKING_ANDX, 8, tcon, (void **) &pSMB);
2336 if (rc)
2337 return rc;
2338
2339 pSMB->Timeout = 0;
2340 pSMB->NumberOfLocks = cpu_to_le16(num_lock);
2341 pSMB->NumberOfUnlocks = cpu_to_le16(num_unlock);
2342 pSMB->LockType = lock_type;
2343 pSMB->AndXCommand = 0xFF; /* none */
2344 pSMB->Fid = netfid; /* netfid stays le */
2345
2346 count = (num_unlock + num_lock) * sizeof(LOCKING_ANDX_RANGE);
2347 inc_rfc1001_len(pSMB, count);
2348 pSMB->ByteCount = cpu_to_le16(count);
2349
2350 iov[0].iov_base = (char *)pSMB;
2351 iov[0].iov_len = be32_to_cpu(pSMB->hdr.smb_buf_length) + 4 -
2352 (num_unlock + num_lock) * sizeof(LOCKING_ANDX_RANGE);
2353 iov[1].iov_base = (char *)buf;
2354 iov[1].iov_len = (num_unlock + num_lock) * sizeof(LOCKING_ANDX_RANGE);
2355
2356 cifs_stats_inc(&tcon->num_locks);
2357 rc = SendReceive2(xid, tcon->ses, iov, 2, &resp_buf_type, CIFS_NO_RESP);
2358 if (rc)
2359 cFYI(1, "Send error in cifs_lockv = %d", rc);
2360
2361 return rc;
2362}
1965 2363
1966int 2364int
1967CIFSSMBLock(const int xid, struct cifs_tcon *tcon, 2365CIFSSMBLock(const int xid, struct cifs_tcon *tcon,
1968 const __u16 smb_file_id, const __u64 len, 2366 const __u16 smb_file_id, const __u32 netpid, const __u64 len,
1969 const __u64 offset, const __u32 numUnlock, 2367 const __u64 offset, const __u32 numUnlock,
1970 const __u32 numLock, const __u8 lockType, 2368 const __u32 numLock, const __u8 lockType,
1971 const bool waitFlag, const __u8 oplock_level) 2369 const bool waitFlag, const __u8 oplock_level)
@@ -2001,7 +2399,7 @@ CIFSSMBLock(const int xid, struct cifs_tcon *tcon,
2001 pSMB->Fid = smb_file_id; /* netfid stays le */ 2399 pSMB->Fid = smb_file_id; /* netfid stays le */
2002 2400
2003 if ((numLock != 0) || (numUnlock != 0)) { 2401 if ((numLock != 0) || (numUnlock != 0)) {
2004 pSMB->Locks[0].Pid = cpu_to_le16(current->tgid); 2402 pSMB->Locks[0].Pid = cpu_to_le16(netpid);
2005 /* BB where to store pid high? */ 2403 /* BB where to store pid high? */
2006 pSMB->Locks[0].LengthLow = cpu_to_le32((u32)len); 2404 pSMB->Locks[0].LengthLow = cpu_to_le32((u32)len);
2007 pSMB->Locks[0].LengthHigh = cpu_to_le32((u32)(len>>32)); 2405 pSMB->Locks[0].LengthHigh = cpu_to_le32((u32)(len>>32));
@@ -2035,9 +2433,9 @@ CIFSSMBLock(const int xid, struct cifs_tcon *tcon,
2035 2433
2036int 2434int
2037CIFSSMBPosixLock(const int xid, struct cifs_tcon *tcon, 2435CIFSSMBPosixLock(const int xid, struct cifs_tcon *tcon,
2038 const __u16 smb_file_id, const int get_flag, const __u64 len, 2436 const __u16 smb_file_id, const __u32 netpid, const int get_flag,
2039 struct file_lock *pLockData, const __u16 lock_type, 2437 const __u64 len, struct file_lock *pLockData,
2040 const bool waitFlag) 2438 const __u16 lock_type, const bool waitFlag)
2041{ 2439{
2042 struct smb_com_transaction2_sfi_req *pSMB = NULL; 2440 struct smb_com_transaction2_sfi_req *pSMB = NULL;
2043 struct smb_com_transaction2_sfi_rsp *pSMBr = NULL; 2441 struct smb_com_transaction2_sfi_rsp *pSMBr = NULL;
@@ -2095,7 +2493,7 @@ CIFSSMBPosixLock(const int xid, struct cifs_tcon *tcon,
2095 } else 2493 } else
2096 pSMB->Timeout = 0; 2494 pSMB->Timeout = 0;
2097 2495
2098 parm_data->pid = cpu_to_le32(current->tgid); 2496 parm_data->pid = cpu_to_le32(netpid);
2099 parm_data->start = cpu_to_le64(pLockData->fl_start); 2497 parm_data->start = cpu_to_le64(pLockData->fl_start);
2100 parm_data->length = cpu_to_le64(len); /* normalize negative numbers */ 2498 parm_data->length = cpu_to_le64(len); /* normalize negative numbers */
2101 2499
@@ -2812,8 +3210,7 @@ CIFSSMBQueryReparseLinkInfo(const int xid, struct cifs_tcon *tcon,
2812 pSMB->TotalDataCount = 0; 3210 pSMB->TotalDataCount = 0;
2813 pSMB->MaxParameterCount = cpu_to_le32(2); 3211 pSMB->MaxParameterCount = cpu_to_le32(2);
2814 /* BB find exact data count max from sess structure BB */ 3212 /* BB find exact data count max from sess structure BB */
2815 pSMB->MaxDataCount = cpu_to_le32((tcon->ses->server->maxBuf - 3213 pSMB->MaxDataCount = cpu_to_le32(CIFSMaxBufSize & 0xFFFFFF00);
2816 MAX_CIFS_HDR_SIZE) & 0xFFFFFF00);
2817 pSMB->MaxSetupCount = 4; 3214 pSMB->MaxSetupCount = 4;
2818 pSMB->Reserved = 0; 3215 pSMB->Reserved = 0;
2819 pSMB->ParameterOffset = 0; 3216 pSMB->ParameterOffset = 0;
@@ -3306,8 +3703,7 @@ smb_init_nttransact(const __u16 sub_command, const int setup_count,
3306 pSMB->Reserved = 0; 3703 pSMB->Reserved = 0;
3307 pSMB->TotalParameterCount = cpu_to_le32(parm_len); 3704 pSMB->TotalParameterCount = cpu_to_le32(parm_len);
3308 pSMB->TotalDataCount = 0; 3705 pSMB->TotalDataCount = 0;
3309 pSMB->MaxDataCount = cpu_to_le32((tcon->ses->server->maxBuf - 3706 pSMB->MaxDataCount = cpu_to_le32(CIFSMaxBufSize & 0xFFFFFF00);
3310 MAX_CIFS_HDR_SIZE) & 0xFFFFFF00);
3311 pSMB->ParameterCount = pSMB->TotalParameterCount; 3707 pSMB->ParameterCount = pSMB->TotalParameterCount;
3312 pSMB->DataCount = pSMB->TotalDataCount; 3708 pSMB->DataCount = pSMB->TotalDataCount;
3313 temp_offset = offsetof(struct smb_com_ntransact_req, Parms) + 3709 temp_offset = offsetof(struct smb_com_ntransact_req, Parms) +
@@ -3467,7 +3863,7 @@ qsec_out:
3467 3863
3468int 3864int
3469CIFSSMBSetCIFSACL(const int xid, struct cifs_tcon *tcon, __u16 fid, 3865CIFSSMBSetCIFSACL(const int xid, struct cifs_tcon *tcon, __u16 fid,
3470 struct cifs_ntsd *pntsd, __u32 acllen) 3866 struct cifs_ntsd *pntsd, __u32 acllen, int aclflag)
3471{ 3867{
3472 __u16 byte_count, param_count, data_count, param_offset, data_offset; 3868 __u16 byte_count, param_count, data_count, param_offset, data_offset;
3473 int rc = 0; 3869 int rc = 0;
@@ -3504,7 +3900,7 @@ setCifsAclRetry:
3504 3900
3505 pSMB->Fid = fid; /* file handle always le */ 3901 pSMB->Fid = fid; /* file handle always le */
3506 pSMB->Reserved2 = 0; 3902 pSMB->Reserved2 = 0;
3507 pSMB->AclFlags = cpu_to_le32(CIFS_ACL_DACL); 3903 pSMB->AclFlags = cpu_to_le32(aclflag);
3508 3904
3509 if (pntsd && acllen) { 3905 if (pntsd && acllen) {
3510 memcpy((char *) &pSMBr->hdr.Protocol + data_offset, 3906 memcpy((char *) &pSMBr->hdr.Protocol + data_offset,
@@ -3977,8 +4373,7 @@ findFirstRetry:
3977 params = 12 + name_len /* includes null */ ; 4373 params = 12 + name_len /* includes null */ ;
3978 pSMB->TotalDataCount = 0; /* no EAs */ 4374 pSMB->TotalDataCount = 0; /* no EAs */
3979 pSMB->MaxParameterCount = cpu_to_le16(10); 4375 pSMB->MaxParameterCount = cpu_to_le16(10);
3980 pSMB->MaxDataCount = cpu_to_le16((tcon->ses->server->maxBuf - 4376 pSMB->MaxDataCount = cpu_to_le16(CIFSMaxBufSize & 0xFFFFFF00);
3981 MAX_CIFS_HDR_SIZE) & 0xFFFFFF00);
3982 pSMB->MaxSetupCount = 0; 4377 pSMB->MaxSetupCount = 0;
3983 pSMB->Reserved = 0; 4378 pSMB->Reserved = 0;
3984 pSMB->Flags = 0; 4379 pSMB->Flags = 0;
@@ -4052,8 +4447,7 @@ findFirstRetry:
4052 psrch_inf->index_of_last_entry = 2 /* skip . and .. */ + 4447 psrch_inf->index_of_last_entry = 2 /* skip . and .. */ +
4053 psrch_inf->entries_in_buffer; 4448 psrch_inf->entries_in_buffer;
4054 lnoff = le16_to_cpu(parms->LastNameOffset); 4449 lnoff = le16_to_cpu(parms->LastNameOffset);
4055 if (tcon->ses->server->maxBuf - MAX_CIFS_HDR_SIZE < 4450 if (CIFSMaxBufSize < lnoff) {
4056 lnoff) {
4057 cERROR(1, "ignoring corrupt resume name"); 4451 cERROR(1, "ignoring corrupt resume name");
4058 psrch_inf->last_entry = NULL; 4452 psrch_inf->last_entry = NULL;
4059 return rc; 4453 return rc;
@@ -4097,9 +4491,7 @@ int CIFSFindNext(const int xid, struct cifs_tcon *tcon,
4097 byte_count = 0; 4491 byte_count = 0;
4098 pSMB->TotalDataCount = 0; /* no EAs */ 4492 pSMB->TotalDataCount = 0; /* no EAs */
4099 pSMB->MaxParameterCount = cpu_to_le16(8); 4493 pSMB->MaxParameterCount = cpu_to_le16(8);
4100 pSMB->MaxDataCount = 4494 pSMB->MaxDataCount = cpu_to_le16(CIFSMaxBufSize & 0xFFFFFF00);
4101 cpu_to_le16((tcon->ses->server->maxBuf - MAX_CIFS_HDR_SIZE) &
4102 0xFFFFFF00);
4103 pSMB->MaxSetupCount = 0; 4495 pSMB->MaxSetupCount = 0;
4104 pSMB->Reserved = 0; 4496 pSMB->Reserved = 0;
4105 pSMB->Flags = 0; 4497 pSMB->Flags = 0;
@@ -4181,8 +4573,7 @@ int CIFSFindNext(const int xid, struct cifs_tcon *tcon,
4181 psrch_inf->index_of_last_entry += 4573 psrch_inf->index_of_last_entry +=
4182 psrch_inf->entries_in_buffer; 4574 psrch_inf->entries_in_buffer;
4183 lnoff = le16_to_cpu(parms->LastNameOffset); 4575 lnoff = le16_to_cpu(parms->LastNameOffset);
4184 if (tcon->ses->server->maxBuf - MAX_CIFS_HDR_SIZE < 4576 if (CIFSMaxBufSize < lnoff) {
4185 lnoff) {
4186 cERROR(1, "ignoring corrupt resume name"); 4577 cERROR(1, "ignoring corrupt resume name");
4187 psrch_inf->last_entry = NULL; 4578 psrch_inf->last_entry = NULL;
4188 return rc; 4579 return rc;
@@ -5840,7 +6231,7 @@ QAllEAsRetry:
5840 6231
5841 if (ea_name) { 6232 if (ea_name) {
5842 if (ea_name_len == name_len && 6233 if (ea_name_len == name_len &&
5843 strncmp(ea_name, temp_ptr, name_len) == 0) { 6234 memcmp(ea_name, temp_ptr, name_len) == 0) {
5844 temp_ptr += name_len + 1; 6235 temp_ptr += name_len + 1;
5845 rc = value_len; 6236 rc = value_len;
5846 if (buf_size == 0) 6237 if (buf_size == 0)
@@ -6035,12 +6426,7 @@ int CIFSSMBNotify(const int xid, struct cifs_tcon *tcon,
6035 pSMB->TotalParameterCount = 0 ; 6426 pSMB->TotalParameterCount = 0 ;
6036 pSMB->TotalDataCount = 0; 6427 pSMB->TotalDataCount = 0;
6037 pSMB->MaxParameterCount = cpu_to_le32(2); 6428 pSMB->MaxParameterCount = cpu_to_le32(2);
6038 /* BB find exact data count max from sess structure BB */ 6429 pSMB->MaxDataCount = cpu_to_le32(CIFSMaxBufSize & 0xFFFFFF00);
6039 pSMB->MaxDataCount = 0; /* same in little endian or be */
6040/* BB VERIFY verify which is correct for above BB */
6041 pSMB->MaxDataCount = cpu_to_le32((tcon->ses->server->maxBuf -
6042 MAX_CIFS_HDR_SIZE) & 0xFFFFFF00);
6043
6044 pSMB->MaxSetupCount = 4; 6430 pSMB->MaxSetupCount = 4;
6045 pSMB->Reserved = 0; 6431 pSMB->Reserved = 0;
6046 pSMB->ParameterOffset = 0; 6432 pSMB->ParameterOffset = 0;
diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c
index 71beb020197..d545a95c30e 100644
--- a/fs/cifs/connect.c
+++ b/fs/cifs/connect.c
@@ -181,7 +181,7 @@ cifs_reconnect(struct TCP_Server_Info *server)
181 -EINVAL = invalid transact2 181 -EINVAL = invalid transact2
182 182
183 */ 183 */
184static int check2ndT2(struct smb_hdr *pSMB, unsigned int maxBufSize) 184static int check2ndT2(struct smb_hdr *pSMB)
185{ 185{
186 struct smb_t2_rsp *pSMBt; 186 struct smb_t2_rsp *pSMBt;
187 int remaining; 187 int remaining;
@@ -214,9 +214,9 @@ static int check2ndT2(struct smb_hdr *pSMB, unsigned int maxBufSize)
214 214
215 cFYI(1, "missing %d bytes from transact2, check next response", 215 cFYI(1, "missing %d bytes from transact2, check next response",
216 remaining); 216 remaining);
217 if (total_data_size > maxBufSize) { 217 if (total_data_size > CIFSMaxBufSize) {
218 cERROR(1, "TotalDataSize %d is over maximum buffer %d", 218 cERROR(1, "TotalDataSize %d is over maximum buffer %d",
219 total_data_size, maxBufSize); 219 total_data_size, CIFSMaxBufSize);
220 return -EINVAL; 220 return -EINVAL;
221 } 221 }
222 return remaining; 222 return remaining;
@@ -320,27 +320,24 @@ requeue_echo:
320} 320}
321 321
322static bool 322static bool
323allocate_buffers(char **bigbuf, char **smallbuf, unsigned int size, 323allocate_buffers(struct TCP_Server_Info *server)
324 bool is_large_buf)
325{ 324{
326 char *bbuf = *bigbuf, *sbuf = *smallbuf; 325 if (!server->bigbuf) {
327 326 server->bigbuf = (char *)cifs_buf_get();
328 if (bbuf == NULL) { 327 if (!server->bigbuf) {
329 bbuf = (char *)cifs_buf_get();
330 if (!bbuf) {
331 cERROR(1, "No memory for large SMB response"); 328 cERROR(1, "No memory for large SMB response");
332 msleep(3000); 329 msleep(3000);
333 /* retry will check if exiting */ 330 /* retry will check if exiting */
334 return false; 331 return false;
335 } 332 }
336 } else if (is_large_buf) { 333 } else if (server->large_buf) {
337 /* we are reusing a dirty large buf, clear its start */ 334 /* we are reusing a dirty large buf, clear its start */
338 memset(bbuf, 0, size); 335 memset(server->bigbuf, 0, sizeof(struct smb_hdr));
339 } 336 }
340 337
341 if (sbuf == NULL) { 338 if (!server->smallbuf) {
342 sbuf = (char *)cifs_small_buf_get(); 339 server->smallbuf = (char *)cifs_small_buf_get();
343 if (!sbuf) { 340 if (!server->smallbuf) {
344 cERROR(1, "No memory for SMB response"); 341 cERROR(1, "No memory for SMB response");
345 msleep(1000); 342 msleep(1000);
346 /* retry will check if exiting */ 343 /* retry will check if exiting */
@@ -349,36 +346,116 @@ allocate_buffers(char **bigbuf, char **smallbuf, unsigned int size,
349 /* beginning of smb buffer is cleared in our buf_get */ 346 /* beginning of smb buffer is cleared in our buf_get */
350 } else { 347 } else {
351 /* if existing small buf clear beginning */ 348 /* if existing small buf clear beginning */
352 memset(sbuf, 0, size); 349 memset(server->smallbuf, 0, sizeof(struct smb_hdr));
353 } 350 }
354 351
355 *bigbuf = bbuf;
356 *smallbuf = sbuf;
357
358 return true; 352 return true;
359} 353}
360 354
361static int 355static bool
362read_from_socket(struct TCP_Server_Info *server, struct msghdr *smb_msg, 356server_unresponsive(struct TCP_Server_Info *server)
363 struct kvec *iov, unsigned int to_read, 357{
364 unsigned int *ptotal_read, bool is_header_read) 358 if (echo_retries > 0 && server->tcpStatus == CifsGood &&
359 time_after(jiffies, server->lstrp +
360 (echo_retries * SMB_ECHO_INTERVAL))) {
361 cERROR(1, "Server %s has not responded in %d seconds. "
362 "Reconnecting...", server->hostname,
363 (echo_retries * SMB_ECHO_INTERVAL / HZ));
364 cifs_reconnect(server);
365 wake_up(&server->response_q);
366 return true;
367 }
368
369 return false;
370}
371
372/*
373 * kvec_array_init - clone a kvec array, and advance into it
374 * @new: pointer to memory for cloned array
375 * @iov: pointer to original array
376 * @nr_segs: number of members in original array
377 * @bytes: number of bytes to advance into the cloned array
378 *
379 * This function will copy the array provided in iov to a section of memory
380 * and advance the specified number of bytes into the new array. It returns
381 * the number of segments in the new array. "new" must be at least as big as
382 * the original iov array.
383 */
384static unsigned int
385kvec_array_init(struct kvec *new, struct kvec *iov, unsigned int nr_segs,
386 size_t bytes)
387{
388 size_t base = 0;
389
390 while (bytes || !iov->iov_len) {
391 int copy = min(bytes, iov->iov_len);
392
393 bytes -= copy;
394 base += copy;
395 if (iov->iov_len == base) {
396 iov++;
397 nr_segs--;
398 base = 0;
399 }
400 }
401 memcpy(new, iov, sizeof(*iov) * nr_segs);
402 new->iov_base += base;
403 new->iov_len -= base;
404 return nr_segs;
405}
406
407static struct kvec *
408get_server_iovec(struct TCP_Server_Info *server, unsigned int nr_segs)
365{ 409{
366 int length, rc = 0; 410 struct kvec *new_iov;
367 unsigned int total_read; 411
368 char *buf = iov->iov_base; 412 if (server->iov && nr_segs <= server->nr_iov)
413 return server->iov;
414
415 /* not big enough -- allocate a new one and release the old */
416 new_iov = kmalloc(sizeof(*new_iov) * nr_segs, GFP_NOFS);
417 if (new_iov) {
418 kfree(server->iov);
419 server->iov = new_iov;
420 server->nr_iov = nr_segs;
421 }
422 return new_iov;
423}
424
425int
426cifs_readv_from_socket(struct TCP_Server_Info *server, struct kvec *iov_orig,
427 unsigned int nr_segs, unsigned int to_read)
428{
429 int length = 0;
430 int total_read;
431 unsigned int segs;
432 struct msghdr smb_msg;
433 struct kvec *iov;
434
435 iov = get_server_iovec(server, nr_segs);
436 if (!iov)
437 return -ENOMEM;
438
439 smb_msg.msg_control = NULL;
440 smb_msg.msg_controllen = 0;
441
442 for (total_read = 0; to_read; total_read += length, to_read -= length) {
443 if (server_unresponsive(server)) {
444 total_read = -EAGAIN;
445 break;
446 }
447
448 segs = kvec_array_init(iov, iov_orig, nr_segs, total_read);
449
450 length = kernel_recvmsg(server->ssocket, &smb_msg,
451 iov, segs, to_read, 0);
369 452
370 for (total_read = 0; total_read < to_read; total_read += length) {
371 length = kernel_recvmsg(server->ssocket, smb_msg, iov, 1,
372 to_read - total_read, 0);
373 if (server->tcpStatus == CifsExiting) { 453 if (server->tcpStatus == CifsExiting) {
374 /* then will exit */ 454 total_read = -ESHUTDOWN;
375 rc = 2;
376 break; 455 break;
377 } else if (server->tcpStatus == CifsNeedReconnect) { 456 } else if (server->tcpStatus == CifsNeedReconnect) {
378 cifs_reconnect(server); 457 cifs_reconnect(server);
379 /* Reconnect wakes up rspns q */ 458 total_read = -EAGAIN;
380 /* Now we will reread sock */
381 rc = 1;
382 break; 459 break;
383 } else if (length == -ERESTARTSYS || 460 } else if (length == -ERESTARTSYS ||
384 length == -EAGAIN || 461 length == -EAGAIN ||
@@ -390,56 +467,54 @@ read_from_socket(struct TCP_Server_Info *server, struct msghdr *smb_msg,
390 */ 467 */
391 usleep_range(1000, 2000); 468 usleep_range(1000, 2000);
392 length = 0; 469 length = 0;
393 if (!is_header_read) 470 continue;
394 continue;
395 /* Special handling for header read */
396 if (total_read) {
397 iov->iov_base = (to_read - total_read) +
398 buf;
399 iov->iov_len = to_read - total_read;
400 smb_msg->msg_control = NULL;
401 smb_msg->msg_controllen = 0;
402 rc = 3;
403 } else
404 rc = 1;
405 break;
406 } else if (length <= 0) { 471 } else if (length <= 0) {
407 cERROR(1, "Received no data, expecting %d", 472 cFYI(1, "Received no data or error: expecting %d "
408 to_read - total_read); 473 "got %d", to_read, length);
409 cifs_reconnect(server); 474 cifs_reconnect(server);
410 rc = 1; 475 total_read = -EAGAIN;
411 break; 476 break;
412 } 477 }
413 } 478 }
479 return total_read;
480}
414 481
415 *ptotal_read = total_read; 482int
416 return rc; 483cifs_read_from_socket(struct TCP_Server_Info *server, char *buf,
484 unsigned int to_read)
485{
486 struct kvec iov;
487
488 iov.iov_base = buf;
489 iov.iov_len = to_read;
490
491 return cifs_readv_from_socket(server, &iov, 1, to_read);
417} 492}
418 493
419static bool 494static bool
420check_rfc1002_header(struct TCP_Server_Info *server, char *buf) 495is_smb_response(struct TCP_Server_Info *server, unsigned char type)
421{ 496{
422 char temp = *buf;
423 unsigned int pdu_length = be32_to_cpu(
424 ((struct smb_hdr *)buf)->smb_buf_length);
425
426 /* 497 /*
427 * The first byte big endian of the length field, 498 * The first byte big endian of the length field,
428 * is actually not part of the length but the type 499 * is actually not part of the length but the type
429 * with the most common, zero, as regular data. 500 * with the most common, zero, as regular data.
430 */ 501 */
431 if (temp == (char) RFC1002_SESSION_KEEP_ALIVE) { 502 switch (type) {
432 return false; 503 case RFC1002_SESSION_MESSAGE:
433 } else if (temp == (char)RFC1002_POSITIVE_SESSION_RESPONSE) { 504 /* Regular SMB response */
434 cFYI(1, "Good RFC 1002 session rsp"); 505 return true;
435 return false; 506 case RFC1002_SESSION_KEEP_ALIVE:
436 } else if (temp == (char)RFC1002_NEGATIVE_SESSION_RESPONSE) { 507 cFYI(1, "RFC 1002 session keep alive");
508 break;
509 case RFC1002_POSITIVE_SESSION_RESPONSE:
510 cFYI(1, "RFC 1002 positive session response");
511 break;
512 case RFC1002_NEGATIVE_SESSION_RESPONSE:
437 /* 513 /*
438 * We get this from Windows 98 instead of an error on 514 * We get this from Windows 98 instead of an error on
439 * SMB negprot response. 515 * SMB negprot response.
440 */ 516 */
441 cFYI(1, "Negative RFC1002 Session Response Error 0x%x)", 517 cFYI(1, "RFC 1002 negative session response");
442 pdu_length);
443 /* give server a second to clean up */ 518 /* give server a second to clean up */
444 msleep(1000); 519 msleep(1000);
445 /* 520 /*
@@ -448,87 +523,89 @@ check_rfc1002_header(struct TCP_Server_Info *server, char *buf)
448 * is since we do not begin with RFC1001 session 523 * is since we do not begin with RFC1001 session
449 * initialize frame). 524 * initialize frame).
450 */ 525 */
451 cifs_set_port((struct sockaddr *) 526 cifs_set_port((struct sockaddr *)&server->dstaddr, CIFS_PORT);
452 &server->dstaddr, CIFS_PORT);
453 cifs_reconnect(server); 527 cifs_reconnect(server);
454 wake_up(&server->response_q); 528 wake_up(&server->response_q);
455 return false; 529 break;
456 } else if (temp != (char) 0) { 530 default:
457 cERROR(1, "Unknown RFC 1002 frame"); 531 cERROR(1, "RFC 1002 unknown response type 0x%x", type);
458 cifs_dump_mem(" Received Data: ", buf, 4);
459 cifs_reconnect(server);
460 return false;
461 }
462
463 /* else we have an SMB response */
464 if ((pdu_length > CIFSMaxBufSize + MAX_CIFS_HDR_SIZE - 4) ||
465 (pdu_length < sizeof(struct smb_hdr) - 1 - 4)) {
466 cERROR(1, "Invalid size SMB length %d pdu_length %d",
467 4, pdu_length+4);
468 cifs_reconnect(server); 532 cifs_reconnect(server);
469 wake_up(&server->response_q);
470 return false;
471 } 533 }
472 534
473 return true; 535 return false;
474} 536}
475 537
476static struct mid_q_entry * 538static struct mid_q_entry *
477find_cifs_mid(struct TCP_Server_Info *server, struct smb_hdr *buf, 539find_mid(struct TCP_Server_Info *server, struct smb_hdr *buf)
478 int *length, bool is_large_buf, bool *is_multi_rsp, char **bigbuf)
479{ 540{
480 struct mid_q_entry *mid = NULL, *tmp_mid, *ret = NULL; 541 struct mid_q_entry *mid;
481 542
482 spin_lock(&GlobalMid_Lock); 543 spin_lock(&GlobalMid_Lock);
483 list_for_each_entry_safe(mid, tmp_mid, &server->pending_mid_q, qhead) { 544 list_for_each_entry(mid, &server->pending_mid_q, qhead) {
484 if (mid->mid != buf->Mid || 545 if (mid->mid == buf->Mid &&
485 mid->midState != MID_REQUEST_SUBMITTED || 546 mid->midState == MID_REQUEST_SUBMITTED &&
486 mid->command != buf->Command) 547 mid->command == buf->Command) {
487 continue; 548 spin_unlock(&GlobalMid_Lock);
488 549 return mid;
489 if (*length == 0 && check2ndT2(buf, server->maxBuf) > 0) {
490 /* We have a multipart transact2 resp */
491 *is_multi_rsp = true;
492 if (mid->resp_buf) {
493 /* merge response - fix up 1st*/
494 *length = coalesce_t2(buf, mid->resp_buf);
495 if (*length > 0) {
496 *length = 0;
497 mid->multiRsp = true;
498 break;
499 }
500 /* All parts received or packet is malformed. */
501 mid->multiEnd = true;
502 goto multi_t2_fnd;
503 }
504 if (!is_large_buf) {
505 /*FIXME: switch to already allocated largebuf?*/
506 cERROR(1, "1st trans2 resp needs bigbuf");
507 } else {
508 /* Have first buffer */
509 mid->resp_buf = buf;
510 mid->largeBuf = true;
511 *bigbuf = NULL;
512 }
513 break;
514 } 550 }
515 mid->resp_buf = buf; 551 }
516 mid->largeBuf = is_large_buf; 552 spin_unlock(&GlobalMid_Lock);
517multi_t2_fnd: 553 return NULL;
518 if (*length == 0) 554}
519 mid->midState = MID_RESPONSE_RECEIVED; 555
520 else 556void
521 mid->midState = MID_RESPONSE_MALFORMED; 557dequeue_mid(struct mid_q_entry *mid, bool malformed)
558{
522#ifdef CONFIG_CIFS_STATS2 559#ifdef CONFIG_CIFS_STATS2
523 mid->when_received = jiffies; 560 mid->when_received = jiffies;
524#endif 561#endif
525 list_del_init(&mid->qhead); 562 spin_lock(&GlobalMid_Lock);
526 ret = mid; 563 if (!malformed)
527 break; 564 mid->midState = MID_RESPONSE_RECEIVED;
528 } 565 else
566 mid->midState = MID_RESPONSE_MALFORMED;
567 list_del_init(&mid->qhead);
529 spin_unlock(&GlobalMid_Lock); 568 spin_unlock(&GlobalMid_Lock);
569}
530 570
531 return ret; 571static void
572handle_mid(struct mid_q_entry *mid, struct TCP_Server_Info *server,
573 struct smb_hdr *buf, int malformed)
574{
575 if (malformed == 0 && check2ndT2(buf) > 0) {
576 mid->multiRsp = true;
577 if (mid->resp_buf) {
578 /* merge response - fix up 1st*/
579 malformed = coalesce_t2(buf, mid->resp_buf);
580 if (malformed > 0)
581 return;
582
583 /* All parts received or packet is malformed. */
584 mid->multiEnd = true;
585 return dequeue_mid(mid, malformed);
586 }
587 if (!server->large_buf) {
588 /*FIXME: switch to already allocated largebuf?*/
589 cERROR(1, "1st trans2 resp needs bigbuf");
590 } else {
591 /* Have first buffer */
592 mid->resp_buf = buf;
593 mid->largeBuf = true;
594 server->bigbuf = NULL;
595 }
596 return;
597 }
598 mid->resp_buf = buf;
599 mid->largeBuf = server->large_buf;
600 /* Was previous buf put in mpx struct for multi-rsp? */
601 if (!mid->multiRsp) {
602 /* smb buffer will be freed by user thread */
603 if (server->large_buf)
604 server->bigbuf = NULL;
605 else
606 server->smallbuf = NULL;
607 }
608 dequeue_mid(mid, malformed);
532} 609}
533 610
534static void clean_demultiplex_info(struct TCP_Server_Info *server) 611static void clean_demultiplex_info(struct TCP_Server_Info *server)
@@ -618,6 +695,7 @@ static void clean_demultiplex_info(struct TCP_Server_Info *server)
618 } 695 }
619 696
620 kfree(server->hostname); 697 kfree(server->hostname);
698 kfree(server->iov);
621 kfree(server); 699 kfree(server);
622 700
623 length = atomic_dec_return(&tcpSesAllocCount); 701 length = atomic_dec_return(&tcpSesAllocCount);
@@ -627,20 +705,70 @@ static void clean_demultiplex_info(struct TCP_Server_Info *server)
627} 705}
628 706
629static int 707static int
708standard_receive3(struct TCP_Server_Info *server, struct mid_q_entry *mid)
709{
710 int length;
711 char *buf = server->smallbuf;
712 struct smb_hdr *smb_buffer = (struct smb_hdr *)buf;
713 unsigned int pdu_length = be32_to_cpu(smb_buffer->smb_buf_length);
714
715 /* make sure this will fit in a large buffer */
716 if (pdu_length > CIFSMaxBufSize + MAX_CIFS_HDR_SIZE - 4) {
717 cERROR(1, "SMB response too long (%u bytes)",
718 pdu_length);
719 cifs_reconnect(server);
720 wake_up(&server->response_q);
721 return -EAGAIN;
722 }
723
724 /* switch to large buffer if too big for a small one */
725 if (pdu_length > MAX_CIFS_SMALL_BUFFER_SIZE - 4) {
726 server->large_buf = true;
727 memcpy(server->bigbuf, server->smallbuf, server->total_read);
728 buf = server->bigbuf;
729 smb_buffer = (struct smb_hdr *)buf;
730 }
731
732 /* now read the rest */
733 length = cifs_read_from_socket(server,
734 buf + sizeof(struct smb_hdr) - 1,
735 pdu_length - sizeof(struct smb_hdr) + 1 + 4);
736 if (length < 0)
737 return length;
738 server->total_read += length;
739
740 dump_smb(smb_buffer, server->total_read);
741
742 /*
743 * We know that we received enough to get to the MID as we
744 * checked the pdu_length earlier. Now check to see
745 * if the rest of the header is OK. We borrow the length
746 * var for the rest of the loop to avoid a new stack var.
747 *
748 * 48 bytes is enough to display the header and a little bit
749 * into the payload for debugging purposes.
750 */
751 length = checkSMB(smb_buffer, smb_buffer->Mid, server->total_read);
752 if (length != 0)
753 cifs_dump_mem("Bad SMB: ", buf,
754 min_t(unsigned int, server->total_read, 48));
755
756 if (mid)
757 handle_mid(mid, server, smb_buffer, length);
758
759 return length;
760}
761
762static int
630cifs_demultiplex_thread(void *p) 763cifs_demultiplex_thread(void *p)
631{ 764{
632 int length; 765 int length;
633 struct TCP_Server_Info *server = p; 766 struct TCP_Server_Info *server = p;
634 unsigned int pdu_length, total_read; 767 unsigned int pdu_length;
635 char *buf = NULL, *bigbuf = NULL, *smallbuf = NULL; 768 char *buf = NULL;
636 struct smb_hdr *smb_buffer = NULL; 769 struct smb_hdr *smb_buffer = NULL;
637 struct msghdr smb_msg;
638 struct kvec iov;
639 struct task_struct *task_to_wake = NULL; 770 struct task_struct *task_to_wake = NULL;
640 struct mid_q_entry *mid_entry; 771 struct mid_q_entry *mid_entry;
641 bool isLargeBuf = false;
642 bool isMultiRsp = false;
643 int rc;
644 772
645 current->flags |= PF_MEMALLOC; 773 current->flags |= PF_MEMALLOC;
646 cFYI(1, "Demultiplex PID: %d", task_pid_nr(current)); 774 cFYI(1, "Demultiplex PID: %d", task_pid_nr(current));
@@ -655,111 +783,65 @@ cifs_demultiplex_thread(void *p)
655 if (try_to_freeze()) 783 if (try_to_freeze())
656 continue; 784 continue;
657 785
658 if (!allocate_buffers(&bigbuf, &smallbuf, 786 if (!allocate_buffers(server))
659 sizeof(struct smb_hdr), isLargeBuf))
660 continue; 787 continue;
661 788
662 isLargeBuf = false; 789 server->large_buf = false;
663 isMultiRsp = false; 790 smb_buffer = (struct smb_hdr *)server->smallbuf;
664 smb_buffer = (struct smb_hdr *)smallbuf; 791 buf = server->smallbuf;
665 buf = smallbuf;
666 iov.iov_base = buf;
667 iov.iov_len = 4;
668 smb_msg.msg_control = NULL;
669 smb_msg.msg_controllen = 0;
670 pdu_length = 4; /* enough to get RFC1001 header */ 792 pdu_length = 4; /* enough to get RFC1001 header */
671 793
672incomplete_rcv: 794 length = cifs_read_from_socket(server, buf, pdu_length);
673 if (echo_retries > 0 && server->tcpStatus == CifsGood && 795 if (length < 0)
674 time_after(jiffies, server->lstrp +
675 (echo_retries * SMB_ECHO_INTERVAL))) {
676 cERROR(1, "Server %s has not responded in %d seconds. "
677 "Reconnecting...", server->hostname,
678 (echo_retries * SMB_ECHO_INTERVAL / HZ));
679 cifs_reconnect(server);
680 wake_up(&server->response_q);
681 continue;
682 }
683
684 rc = read_from_socket(server, &smb_msg, &iov, pdu_length,
685 &total_read, true /* header read */);
686 if (rc == 3)
687 goto incomplete_rcv;
688 else if (rc == 2)
689 break;
690 else if (rc == 1)
691 continue; 796 continue;
797 server->total_read = length;
692 798
693 /* 799 /*
694 * The right amount was read from socket - 4 bytes, 800 * The right amount was read from socket - 4 bytes,
695 * so we can now interpret the length field. 801 * so we can now interpret the length field.
696 */ 802 */
697
698 /*
699 * Note that RFC 1001 length is big endian on the wire,
700 * but we convert it here so it is always manipulated
701 * as host byte order.
702 */
703 pdu_length = be32_to_cpu(smb_buffer->smb_buf_length); 803 pdu_length = be32_to_cpu(smb_buffer->smb_buf_length);
704 804
705 cFYI(1, "rfc1002 length 0x%x", pdu_length+4); 805 cFYI(1, "RFC1002 header 0x%x", pdu_length);
706 if (!check_rfc1002_header(server, buf)) 806 if (!is_smb_response(server, buf[0]))
707 continue; 807 continue;
708 808
709 /* else length ok */ 809 /* make sure we have enough to get to the MID */
710 if (pdu_length > MAX_CIFS_SMALL_BUFFER_SIZE - 4) { 810 if (pdu_length < sizeof(struct smb_hdr) - 1 - 4) {
711 isLargeBuf = true; 811 cERROR(1, "SMB response too short (%u bytes)",
712 memcpy(bigbuf, smallbuf, 4); 812 pdu_length);
713 smb_buffer = (struct smb_hdr *)bigbuf; 813 cifs_reconnect(server);
714 buf = bigbuf; 814 wake_up(&server->response_q);
815 continue;
715 } 816 }
716 817
717 iov.iov_base = 4 + buf; 818 /* read down to the MID */
718 iov.iov_len = pdu_length; 819 length = cifs_read_from_socket(server, buf + 4,
719 rc = read_from_socket(server, &smb_msg, &iov, pdu_length, 820 sizeof(struct smb_hdr) - 1 - 4);
720 &total_read, false); 821 if (length < 0)
721 if (rc == 2)
722 break;
723 else if (rc == 1)
724 continue; 822 continue;
823 server->total_read += length;
725 824
726 total_read += 4; /* account for rfc1002 hdr */ 825 mid_entry = find_mid(server, smb_buffer);
727 826
728 dump_smb(smb_buffer, total_read); 827 if (!mid_entry || !mid_entry->receive)
828 length = standard_receive3(server, mid_entry);
829 else
830 length = mid_entry->receive(server, mid_entry);
729 831
730 /* 832 if (length < 0)
731 * We know that we received enough to get to the MID as we 833 continue;
732 * checked the pdu_length earlier. Now check to see
733 * if the rest of the header is OK. We borrow the length
734 * var for the rest of the loop to avoid a new stack var.
735 *
736 * 48 bytes is enough to display the header and a little bit
737 * into the payload for debugging purposes.
738 */
739 length = checkSMB(smb_buffer, smb_buffer->Mid, total_read);
740 if (length != 0)
741 cifs_dump_mem("Bad SMB: ", buf,
742 min_t(unsigned int, total_read, 48));
743 834
744 server->lstrp = jiffies; 835 if (server->large_buf) {
836 buf = server->bigbuf;
837 smb_buffer = (struct smb_hdr *)buf;
838 }
745 839
746 mid_entry = find_cifs_mid(server, smb_buffer, &length, 840 server->lstrp = jiffies;
747 isLargeBuf, &isMultiRsp, &bigbuf);
748 if (mid_entry != NULL) { 841 if (mid_entry != NULL) {
749 mid_entry->callback(mid_entry); 842 if (!mid_entry->multiRsp || mid_entry->multiEnd)
750 /* Was previous buf put in mpx struct for multi-rsp? */ 843 mid_entry->callback(mid_entry);
751 if (!isMultiRsp) { 844 } else if (!is_valid_oplock_break(smb_buffer, server)) {
752 /* smb buffer will be freed by user thread */
753 if (isLargeBuf)
754 bigbuf = NULL;
755 else
756 smallbuf = NULL;
757 }
758 } else if (length != 0) {
759 /* response sanity checks failed */
760 continue;
761 } else if (!is_valid_oplock_break(smb_buffer, server) &&
762 !isMultiRsp) {
763 cERROR(1, "No task to wake, unknown frame received! " 845 cERROR(1, "No task to wake, unknown frame received! "
764 "NumMids %d", atomic_read(&midCount)); 846 "NumMids %d", atomic_read(&midCount));
765 cifs_dump_mem("Received Data is: ", buf, 847 cifs_dump_mem("Received Data is: ", buf,
@@ -773,9 +855,9 @@ incomplete_rcv:
773 } /* end while !EXITING */ 855 } /* end while !EXITING */
774 856
775 /* buffer usually freed in free_mid - need to free it here on exit */ 857 /* buffer usually freed in free_mid - need to free it here on exit */
776 cifs_buf_release(bigbuf); 858 cifs_buf_release(server->bigbuf);
777 if (smallbuf) /* no sense logging a debug message if NULL */ 859 if (server->smallbuf) /* no sense logging a debug message if NULL */
778 cifs_small_buf_release(smallbuf); 860 cifs_small_buf_release(server->smallbuf);
779 861
780 task_to_wake = xchg(&server->tsk, NULL); 862 task_to_wake = xchg(&server->tsk, NULL);
781 clean_demultiplex_info(server); 863 clean_demultiplex_info(server);
@@ -827,6 +909,7 @@ cifs_parse_mount_options(const char *mountdata, const char *devname,
827{ 909{
828 char *value, *data, *end; 910 char *value, *data, *end;
829 char *mountdata_copy = NULL, *options; 911 char *mountdata_copy = NULL, *options;
912 int err;
830 unsigned int temp_len, i, j; 913 unsigned int temp_len, i, j;
831 char separator[2]; 914 char separator[2];
832 short int override_uid = -1; 915 short int override_uid = -1;
@@ -883,6 +966,8 @@ cifs_parse_mount_options(const char *mountdata, const char *devname,
883 cFYI(1, "Null separator not allowed"); 966 cFYI(1, "Null separator not allowed");
884 } 967 }
885 } 968 }
969 vol->backupuid_specified = false; /* no backup intent for a user */
970 vol->backupgid_specified = false; /* no backup intent for a group */
886 971
887 while ((data = strsep(&options, separator)) != NULL) { 972 while ((data = strsep(&options, separator)) != NULL) {
888 if (!*data) 973 if (!*data)
@@ -1442,6 +1527,22 @@ cifs_parse_mount_options(const char *mountdata, const char *devname,
1442 vol->mfsymlinks = true; 1527 vol->mfsymlinks = true;
1443 } else if (strnicmp(data, "multiuser", 8) == 0) { 1528 } else if (strnicmp(data, "multiuser", 8) == 0) {
1444 vol->multiuser = true; 1529 vol->multiuser = true;
1530 } else if (!strnicmp(data, "backupuid", 9) && value && *value) {
1531 err = kstrtouint(value, 0, &vol->backupuid);
1532 if (err < 0) {
1533 cERROR(1, "%s: Invalid backupuid value",
1534 __func__);
1535 goto cifs_parse_mount_err;
1536 }
1537 vol->backupuid_specified = true;
1538 } else if (!strnicmp(data, "backupgid", 9) && value && *value) {
1539 err = kstrtouint(value, 0, &vol->backupgid);
1540 if (err < 0) {
1541 cERROR(1, "%s: Invalid backupgid value",
1542 __func__);
1543 goto cifs_parse_mount_err;
1544 }
1545 vol->backupgid_specified = true;
1445 } else 1546 } else
1446 printk(KERN_WARNING "CIFS: Unknown mount option %s\n", 1547 printk(KERN_WARNING "CIFS: Unknown mount option %s\n",
1447 data); 1548 data);
@@ -2209,16 +2310,16 @@ compare_mount_options(struct super_block *sb, struct cifs_mnt_data *mnt_data)
2209 (new->mnt_cifs_flags & CIFS_MOUNT_MASK)) 2310 (new->mnt_cifs_flags & CIFS_MOUNT_MASK))
2210 return 0; 2311 return 0;
2211 2312
2212 if (old->rsize != new->rsize)
2213 return 0;
2214
2215 /* 2313 /*
2216 * We want to share sb only if we don't specify wsize or specified wsize 2314 * We want to share sb only if we don't specify an r/wsize or
2217 * is greater or equal than existing one. 2315 * specified r/wsize is greater than or equal to existing one.
2218 */ 2316 */
2219 if (new->wsize && new->wsize < old->wsize) 2317 if (new->wsize && new->wsize < old->wsize)
2220 return 0; 2318 return 0;
2221 2319
2320 if (new->rsize && new->rsize < old->rsize)
2321 return 0;
2322
2222 if (old->mnt_uid != new->mnt_uid || old->mnt_gid != new->mnt_gid) 2323 if (old->mnt_uid != new->mnt_uid || old->mnt_gid != new->mnt_gid)
2223 return 0; 2324 return 0;
2224 2325
@@ -2656,14 +2757,6 @@ void reset_cifs_unix_caps(int xid, struct cifs_tcon *tcon,
2656 CIFS_MOUNT_POSIX_PATHS; 2757 CIFS_MOUNT_POSIX_PATHS;
2657 } 2758 }
2658 2759
2659 if (cifs_sb && (cifs_sb->rsize > 127 * 1024)) {
2660 if ((cap & CIFS_UNIX_LARGE_READ_CAP) == 0) {
2661 cifs_sb->rsize = 127 * 1024;
2662 cFYI(DBG2, "larger reads not supported by srv");
2663 }
2664 }
2665
2666
2667 cFYI(1, "Negotiate caps 0x%x", (int)cap); 2760 cFYI(1, "Negotiate caps 0x%x", (int)cap);
2668#ifdef CONFIG_CIFS_DEBUG2 2761#ifdef CONFIG_CIFS_DEBUG2
2669 if (cap & CIFS_UNIX_FCNTL_CAP) 2762 if (cap & CIFS_UNIX_FCNTL_CAP)
@@ -2708,31 +2801,19 @@ void cifs_setup_cifs_sb(struct smb_vol *pvolume_info,
2708 spin_lock_init(&cifs_sb->tlink_tree_lock); 2801 spin_lock_init(&cifs_sb->tlink_tree_lock);
2709 cifs_sb->tlink_tree = RB_ROOT; 2802 cifs_sb->tlink_tree = RB_ROOT;
2710 2803
2711 if (pvolume_info->rsize > CIFSMaxBufSize) {
2712 cERROR(1, "rsize %d too large, using MaxBufSize",
2713 pvolume_info->rsize);
2714 cifs_sb->rsize = CIFSMaxBufSize;
2715 } else if ((pvolume_info->rsize) &&
2716 (pvolume_info->rsize <= CIFSMaxBufSize))
2717 cifs_sb->rsize = pvolume_info->rsize;
2718 else /* default */
2719 cifs_sb->rsize = CIFSMaxBufSize;
2720
2721 if (cifs_sb->rsize < 2048) {
2722 cifs_sb->rsize = 2048;
2723 /* Windows ME may prefer this */
2724 cFYI(1, "readsize set to minimum: 2048");
2725 }
2726
2727 /* 2804 /*
2728 * Temporarily set wsize for matching superblock. If we end up using 2805 * Temporarily set r/wsize for matching superblock. If we end up using
2729 * new sb then cifs_negotiate_wsize will later negotiate it downward 2806 * new sb then client will later negotiate it downward if needed.
2730 * if needed.
2731 */ 2807 */
2808 cifs_sb->rsize = pvolume_info->rsize;
2732 cifs_sb->wsize = pvolume_info->wsize; 2809 cifs_sb->wsize = pvolume_info->wsize;
2733 2810
2734 cifs_sb->mnt_uid = pvolume_info->linux_uid; 2811 cifs_sb->mnt_uid = pvolume_info->linux_uid;
2735 cifs_sb->mnt_gid = pvolume_info->linux_gid; 2812 cifs_sb->mnt_gid = pvolume_info->linux_gid;
2813 if (pvolume_info->backupuid_specified)
2814 cifs_sb->mnt_backupuid = pvolume_info->backupuid;
2815 if (pvolume_info->backupgid_specified)
2816 cifs_sb->mnt_backupgid = pvolume_info->backupgid;
2736 cifs_sb->mnt_file_mode = pvolume_info->file_mode; 2817 cifs_sb->mnt_file_mode = pvolume_info->file_mode;
2737 cifs_sb->mnt_dir_mode = pvolume_info->dir_mode; 2818 cifs_sb->mnt_dir_mode = pvolume_info->dir_mode;
2738 cFYI(1, "file mode: 0x%x dir mode: 0x%x", 2819 cFYI(1, "file mode: 0x%x dir mode: 0x%x",
@@ -2763,6 +2844,10 @@ void cifs_setup_cifs_sb(struct smb_vol *pvolume_info,
2763 cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_RWPIDFORWARD; 2844 cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_RWPIDFORWARD;
2764 if (pvolume_info->cifs_acl) 2845 if (pvolume_info->cifs_acl)
2765 cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_CIFS_ACL; 2846 cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_CIFS_ACL;
2847 if (pvolume_info->backupuid_specified)
2848 cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_CIFS_BACKUPUID;
2849 if (pvolume_info->backupgid_specified)
2850 cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_CIFS_BACKUPGID;
2766 if (pvolume_info->override_uid) 2851 if (pvolume_info->override_uid)
2767 cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_OVERR_UID; 2852 cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_OVERR_UID;
2768 if (pvolume_info->override_gid) 2853 if (pvolume_info->override_gid)
@@ -2795,29 +2880,41 @@ void cifs_setup_cifs_sb(struct smb_vol *pvolume_info,
2795} 2880}
2796 2881
2797/* 2882/*
2798 * When the server supports very large writes via POSIX extensions, we can 2883 * When the server supports very large reads and writes via POSIX extensions,
2799 * allow up to 2^24-1, minus the size of a WRITE_AND_X header, not including 2884 * we can allow up to 2^24-1, minus the size of a READ/WRITE_AND_X header, not
2800 * the RFC1001 length. 2885 * including the RFC1001 length.
2801 * 2886 *
2802 * Note that this might make for "interesting" allocation problems during 2887 * Note that this might make for "interesting" allocation problems during
2803 * writeback however as we have to allocate an array of pointers for the 2888 * writeback however as we have to allocate an array of pointers for the
2804 * pages. A 16M write means ~32kb page array with PAGE_CACHE_SIZE == 4096. 2889 * pages. A 16M write means ~32kb page array with PAGE_CACHE_SIZE == 4096.
2890 *
2891 * For reads, there is a similar problem as we need to allocate an array
2892 * of kvecs to handle the receive, though that should only need to be done
2893 * once.
2805 */ 2894 */
2806#define CIFS_MAX_WSIZE ((1<<24) - 1 - sizeof(WRITE_REQ) + 4) 2895#define CIFS_MAX_WSIZE ((1<<24) - 1 - sizeof(WRITE_REQ) + 4)
2896#define CIFS_MAX_RSIZE ((1<<24) - sizeof(READ_RSP) + 4)
2807 2897
2808/* 2898/*
2809 * When the server doesn't allow large posix writes, only allow a wsize of 2899 * When the server doesn't allow large posix writes, only allow a rsize/wsize
2810 * 128k minus the size of the WRITE_AND_X header. That allows for a write up 2900 * of 2^17-1 minus the size of the call header. That allows for a read or
2811 * to the maximum size described by RFC1002. 2901 * write up to the maximum size described by RFC1002.
2812 */ 2902 */
2813#define CIFS_MAX_RFC1002_WSIZE (128 * 1024 - sizeof(WRITE_REQ) + 4) 2903#define CIFS_MAX_RFC1002_WSIZE ((1<<17) - 1 - sizeof(WRITE_REQ) + 4)
2904#define CIFS_MAX_RFC1002_RSIZE ((1<<17) - 1 - sizeof(READ_RSP) + 4)
2814 2905
2815/* 2906/*
2816 * The default wsize is 1M. find_get_pages seems to return a maximum of 256 2907 * The default wsize is 1M. find_get_pages seems to return a maximum of 256
2817 * pages in a single call. With PAGE_CACHE_SIZE == 4k, this means we can fill 2908 * pages in a single call. With PAGE_CACHE_SIZE == 4k, this means we can fill
2818 * a single wsize request with a single call. 2909 * a single wsize request with a single call.
2819 */ 2910 */
2820#define CIFS_DEFAULT_WSIZE (1024 * 1024) 2911#define CIFS_DEFAULT_IOSIZE (1024 * 1024)
2912
2913/*
2914 * Windows only supports a max of 60k reads. Default to that when posix
2915 * extensions aren't in force.
2916 */
2917#define CIFS_DEFAULT_NON_POSIX_RSIZE (60 * 1024)
2821 2918
2822static unsigned int 2919static unsigned int
2823cifs_negotiate_wsize(struct cifs_tcon *tcon, struct smb_vol *pvolume_info) 2920cifs_negotiate_wsize(struct cifs_tcon *tcon, struct smb_vol *pvolume_info)
@@ -2825,7 +2922,7 @@ cifs_negotiate_wsize(struct cifs_tcon *tcon, struct smb_vol *pvolume_info)
2825 __u64 unix_cap = le64_to_cpu(tcon->fsUnixInfo.Capability); 2922 __u64 unix_cap = le64_to_cpu(tcon->fsUnixInfo.Capability);
2826 struct TCP_Server_Info *server = tcon->ses->server; 2923 struct TCP_Server_Info *server = tcon->ses->server;
2827 unsigned int wsize = pvolume_info->wsize ? pvolume_info->wsize : 2924 unsigned int wsize = pvolume_info->wsize ? pvolume_info->wsize :
2828 CIFS_DEFAULT_WSIZE; 2925 CIFS_DEFAULT_IOSIZE;
2829 2926
2830 /* can server support 24-bit write sizes? (via UNIX extensions) */ 2927 /* can server support 24-bit write sizes? (via UNIX extensions) */
2831 if (!tcon->unix_ext || !(unix_cap & CIFS_UNIX_LARGE_WRITE_CAP)) 2928 if (!tcon->unix_ext || !(unix_cap & CIFS_UNIX_LARGE_WRITE_CAP))
@@ -2848,6 +2945,50 @@ cifs_negotiate_wsize(struct cifs_tcon *tcon, struct smb_vol *pvolume_info)
2848 return wsize; 2945 return wsize;
2849} 2946}
2850 2947
2948static unsigned int
2949cifs_negotiate_rsize(struct cifs_tcon *tcon, struct smb_vol *pvolume_info)
2950{
2951 __u64 unix_cap = le64_to_cpu(tcon->fsUnixInfo.Capability);
2952 struct TCP_Server_Info *server = tcon->ses->server;
2953 unsigned int rsize, defsize;
2954
2955 /*
2956 * Set default value...
2957 *
2958 * HACK alert! Ancient servers have very small buffers. Even though
2959 * MS-CIFS indicates that servers are only limited by the client's
2960 * bufsize for reads, testing against win98se shows that it throws
2961 * INVALID_PARAMETER errors if you try to request too large a read.
2962 *
2963 * If the server advertises a MaxBufferSize of less than one page,
2964 * assume that it also can't satisfy reads larger than that either.
2965 *
2966 * FIXME: Is there a better heuristic for this?
2967 */
2968 if (tcon->unix_ext && (unix_cap & CIFS_UNIX_LARGE_READ_CAP))
2969 defsize = CIFS_DEFAULT_IOSIZE;
2970 else if (server->capabilities & CAP_LARGE_READ_X)
2971 defsize = CIFS_DEFAULT_NON_POSIX_RSIZE;
2972 else if (server->maxBuf >= PAGE_CACHE_SIZE)
2973 defsize = CIFSMaxBufSize;
2974 else
2975 defsize = server->maxBuf - sizeof(READ_RSP);
2976
2977 rsize = pvolume_info->rsize ? pvolume_info->rsize : defsize;
2978
2979 /*
2980 * no CAP_LARGE_READ_X? Then MS-CIFS states that we must limit this to
2981 * the client's MaxBufferSize.
2982 */
2983 if (!(server->capabilities & CAP_LARGE_READ_X))
2984 rsize = min_t(unsigned int, CIFSMaxBufSize, rsize);
2985
2986 /* hard limit of CIFS_MAX_RSIZE */
2987 rsize = min_t(unsigned int, rsize, CIFS_MAX_RSIZE);
2988
2989 return rsize;
2990}
2991
2851static int 2992static int
2852is_path_accessible(int xid, struct cifs_tcon *tcon, 2993is_path_accessible(int xid, struct cifs_tcon *tcon,
2853 struct cifs_sb_info *cifs_sb, const char *full_path) 2994 struct cifs_sb_info *cifs_sb, const char *full_path)
@@ -2877,9 +3018,9 @@ cleanup_volume_info_contents(struct smb_vol *volume_info)
2877{ 3018{
2878 kfree(volume_info->username); 3019 kfree(volume_info->username);
2879 kzfree(volume_info->password); 3020 kzfree(volume_info->password);
2880 kfree(volume_info->UNC);
2881 if (volume_info->UNCip != volume_info->UNC + 2) 3021 if (volume_info->UNCip != volume_info->UNC + 2)
2882 kfree(volume_info->UNCip); 3022 kfree(volume_info->UNCip);
3023 kfree(volume_info->UNC);
2883 kfree(volume_info->domainname); 3024 kfree(volume_info->domainname);
2884 kfree(volume_info->iocharset); 3025 kfree(volume_info->iocharset);
2885 kfree(volume_info->prepath); 3026 kfree(volume_info->prepath);
@@ -3041,6 +3182,22 @@ cifs_get_volume_info(char *mount_data, const char *devname)
3041 return volume_info; 3182 return volume_info;
3042} 3183}
3043 3184
3185/* make sure ra_pages is a multiple of rsize */
3186static inline unsigned int
3187cifs_ra_pages(struct cifs_sb_info *cifs_sb)
3188{
3189 unsigned int reads;
3190 unsigned int rsize_pages = cifs_sb->rsize / PAGE_CACHE_SIZE;
3191
3192 if (rsize_pages >= default_backing_dev_info.ra_pages)
3193 return default_backing_dev_info.ra_pages;
3194 else if (rsize_pages == 0)
3195 return rsize_pages;
3196
3197 reads = default_backing_dev_info.ra_pages / rsize_pages;
3198 return reads * rsize_pages;
3199}
3200
3044int 3201int
3045cifs_mount(struct cifs_sb_info *cifs_sb, struct smb_vol *volume_info) 3202cifs_mount(struct cifs_sb_info *cifs_sb, struct smb_vol *volume_info)
3046{ 3203{
@@ -3059,8 +3216,6 @@ cifs_mount(struct cifs_sb_info *cifs_sb, struct smb_vol *volume_info)
3059 if (rc) 3216 if (rc)
3060 return rc; 3217 return rc;
3061 3218
3062 cifs_sb->bdi.ra_pages = default_backing_dev_info.ra_pages;
3063
3064#ifdef CONFIG_CIFS_DFS_UPCALL 3219#ifdef CONFIG_CIFS_DFS_UPCALL
3065try_mount_again: 3220try_mount_again:
3066 /* cleanup activities if we're chasing a referral */ 3221 /* cleanup activities if we're chasing a referral */
@@ -3125,15 +3280,11 @@ try_mount_again:
3125 CIFSSMBQFSAttributeInfo(xid, tcon); 3280 CIFSSMBQFSAttributeInfo(xid, tcon);
3126 } 3281 }
3127 3282
3128 if ((tcon->unix_ext == 0) && (cifs_sb->rsize > (1024 * 127))) {
3129 cifs_sb->rsize = 1024 * 127;
3130 cFYI(DBG2, "no very large read support, rsize now 127K");
3131 }
3132 if (!(tcon->ses->capabilities & CAP_LARGE_READ_X))
3133 cifs_sb->rsize = min(cifs_sb->rsize,
3134 (tcon->ses->server->maxBuf - MAX_CIFS_HDR_SIZE));
3135
3136 cifs_sb->wsize = cifs_negotiate_wsize(tcon, volume_info); 3283 cifs_sb->wsize = cifs_negotiate_wsize(tcon, volume_info);
3284 cifs_sb->rsize = cifs_negotiate_rsize(tcon, volume_info);
3285
3286 /* tune readahead according to rsize */
3287 cifs_sb->bdi.ra_pages = cifs_ra_pages(cifs_sb);
3137 3288
3138remote_path_check: 3289remote_path_check:
3139#ifdef CONFIG_CIFS_DFS_UPCALL 3290#ifdef CONFIG_CIFS_DFS_UPCALL
diff --git a/fs/cifs/dir.c b/fs/cifs/dir.c
index 72d448bf96c..d7eeb9d3ed6 100644
--- a/fs/cifs/dir.c
+++ b/fs/cifs/dir.c
@@ -171,7 +171,7 @@ cifs_create(struct inode *inode, struct dentry *direntry, int mode,
171 } 171 }
172 tcon = tlink_tcon(tlink); 172 tcon = tlink_tcon(tlink);
173 173
174 if (oplockEnabled) 174 if (enable_oplocks)
175 oplock = REQ_OPLOCK; 175 oplock = REQ_OPLOCK;
176 176
177 if (nd) 177 if (nd)
@@ -244,6 +244,9 @@ cifs_create(struct inode *inode, struct dentry *direntry, int mode,
244 if (!tcon->unix_ext && (mode & S_IWUGO) == 0) 244 if (!tcon->unix_ext && (mode & S_IWUGO) == 0)
245 create_options |= CREATE_OPTION_READONLY; 245 create_options |= CREATE_OPTION_READONLY;
246 246
247 if (backup_cred(cifs_sb))
248 create_options |= CREATE_OPEN_BACKUP_INTENT;
249
247 if (tcon->ses->capabilities & CAP_NT_SMBS) 250 if (tcon->ses->capabilities & CAP_NT_SMBS)
248 rc = CIFSSMBOpen(xid, tcon, full_path, disposition, 251 rc = CIFSSMBOpen(xid, tcon, full_path, disposition,
249 desiredAccess, create_options, 252 desiredAccess, create_options,
@@ -357,6 +360,7 @@ int cifs_mknod(struct inode *inode, struct dentry *direntry, int mode,
357{ 360{
358 int rc = -EPERM; 361 int rc = -EPERM;
359 int xid; 362 int xid;
363 int create_options = CREATE_NOT_DIR | CREATE_OPTION_SPECIAL;
360 struct cifs_sb_info *cifs_sb; 364 struct cifs_sb_info *cifs_sb;
361 struct tcon_link *tlink; 365 struct tcon_link *tlink;
362 struct cifs_tcon *pTcon; 366 struct cifs_tcon *pTcon;
@@ -431,9 +435,11 @@ int cifs_mknod(struct inode *inode, struct dentry *direntry, int mode,
431 return rc; 435 return rc;
432 } 436 }
433 437
434 /* FIXME: would WRITE_OWNER | WRITE_DAC be better? */ 438 if (backup_cred(cifs_sb))
439 create_options |= CREATE_OPEN_BACKUP_INTENT;
440
435 rc = CIFSSMBOpen(xid, pTcon, full_path, FILE_CREATE, 441 rc = CIFSSMBOpen(xid, pTcon, full_path, FILE_CREATE,
436 GENERIC_WRITE, CREATE_NOT_DIR | CREATE_OPTION_SPECIAL, 442 GENERIC_WRITE, create_options,
437 &fileHandle, &oplock, buf, cifs_sb->local_nls, 443 &fileHandle, &oplock, buf, cifs_sb->local_nls,
438 cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MAP_SPECIAL_CHR); 444 cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MAP_SPECIAL_CHR);
439 if (rc) 445 if (rc)
@@ -642,8 +648,16 @@ cifs_d_revalidate(struct dentry *direntry, struct nameidata *nd)
642 if (direntry->d_inode) { 648 if (direntry->d_inode) {
643 if (cifs_revalidate_dentry(direntry)) 649 if (cifs_revalidate_dentry(direntry))
644 return 0; 650 return 0;
645 else 651 else {
652 /*
653 * Forcibly invalidate automounting directory inodes
654 * (remote DFS directories) so to have them
655 * instantiated again for automount
656 */
657 if (IS_AUTOMOUNT(direntry->d_inode))
658 return 0;
646 return 1; 659 return 1;
660 }
647 } 661 }
648 662
649 /* 663 /*
diff --git a/fs/cifs/export.c b/fs/cifs/export.c
index 55d87ac5200..9c7ecdccf2f 100644
--- a/fs/cifs/export.c
+++ b/fs/cifs/export.c
@@ -45,7 +45,7 @@
45#include "cifs_debug.h" 45#include "cifs_debug.h"
46#include "cifsfs.h" 46#include "cifsfs.h"
47 47
48#ifdef CIFS_NFSD_EXPORT 48#ifdef CONFIG_CIFS_NFSD_EXPORT
49static struct dentry *cifs_get_parent(struct dentry *dentry) 49static struct dentry *cifs_get_parent(struct dentry *dentry)
50{ 50{
51 /* BB need to add code here eventually to enable export via NFSD */ 51 /* BB need to add code here eventually to enable export via NFSD */
@@ -63,5 +63,5 @@ const struct export_operations cifs_export_ops = {
63 .encode_fs = */ 63 .encode_fs = */
64}; 64};
65 65
66#endif /* CIFS_NFSD_EXPORT */ 66#endif /* CONFIG_CIFS_NFSD_EXPORT */
67 67
diff --git a/fs/cifs/file.c b/fs/cifs/file.c
index 9f41a10523a..ea096ce5d4f 100644
--- a/fs/cifs/file.c
+++ b/fs/cifs/file.c
@@ -32,6 +32,7 @@
32#include <linux/delay.h> 32#include <linux/delay.h>
33#include <linux/mount.h> 33#include <linux/mount.h>
34#include <linux/slab.h> 34#include <linux/slab.h>
35#include <linux/swap.h>
35#include <asm/div64.h> 36#include <asm/div64.h>
36#include "cifsfs.h" 37#include "cifsfs.h"
37#include "cifspdu.h" 38#include "cifspdu.h"
@@ -174,6 +175,7 @@ cifs_nt_open(char *full_path, struct inode *inode, struct cifs_sb_info *cifs_sb,
174 int rc; 175 int rc;
175 int desiredAccess; 176 int desiredAccess;
176 int disposition; 177 int disposition;
178 int create_options = CREATE_NOT_DIR;
177 FILE_ALL_INFO *buf; 179 FILE_ALL_INFO *buf;
178 180
179 desiredAccess = cifs_convert_flags(f_flags); 181 desiredAccess = cifs_convert_flags(f_flags);
@@ -210,9 +212,12 @@ cifs_nt_open(char *full_path, struct inode *inode, struct cifs_sb_info *cifs_sb,
210 if (!buf) 212 if (!buf)
211 return -ENOMEM; 213 return -ENOMEM;
212 214
215 if (backup_cred(cifs_sb))
216 create_options |= CREATE_OPEN_BACKUP_INTENT;
217
213 if (tcon->ses->capabilities & CAP_NT_SMBS) 218 if (tcon->ses->capabilities & CAP_NT_SMBS)
214 rc = CIFSSMBOpen(xid, tcon, full_path, disposition, 219 rc = CIFSSMBOpen(xid, tcon, full_path, disposition,
215 desiredAccess, CREATE_NOT_DIR, pnetfid, poplock, buf, 220 desiredAccess, create_options, pnetfid, poplock, buf,
216 cifs_sb->local_nls, cifs_sb->mnt_cifs_flags 221 cifs_sb->local_nls, cifs_sb->mnt_cifs_flags
217 & CIFS_MOUNT_MAP_SPECIAL_CHR); 222 & CIFS_MOUNT_MAP_SPECIAL_CHR);
218 else 223 else
@@ -258,8 +263,6 @@ cifs_new_fileinfo(__u16 fileHandle, struct file *file,
258 pCifsFile->invalidHandle = false; 263 pCifsFile->invalidHandle = false;
259 pCifsFile->tlink = cifs_get_tlink(tlink); 264 pCifsFile->tlink = cifs_get_tlink(tlink);
260 mutex_init(&pCifsFile->fh_mutex); 265 mutex_init(&pCifsFile->fh_mutex);
261 mutex_init(&pCifsFile->lock_mutex);
262 INIT_LIST_HEAD(&pCifsFile->llist);
263 INIT_WORK(&pCifsFile->oplock_break, cifs_oplock_break); 266 INIT_WORK(&pCifsFile->oplock_break, cifs_oplock_break);
264 267
265 spin_lock(&cifs_file_list_lock); 268 spin_lock(&cifs_file_list_lock);
@@ -272,11 +275,14 @@ cifs_new_fileinfo(__u16 fileHandle, struct file *file,
272 spin_unlock(&cifs_file_list_lock); 275 spin_unlock(&cifs_file_list_lock);
273 276
274 cifs_set_oplock_level(pCifsInode, oplock); 277 cifs_set_oplock_level(pCifsInode, oplock);
278 pCifsInode->can_cache_brlcks = pCifsInode->clientCanCacheAll;
275 279
276 file->private_data = pCifsFile; 280 file->private_data = pCifsFile;
277 return pCifsFile; 281 return pCifsFile;
278} 282}
279 283
284static void cifs_del_lock_waiters(struct cifsLockInfo *lock);
285
280/* 286/*
281 * Release a reference on the file private data. This may involve closing 287 * Release a reference on the file private data. This may involve closing
282 * the filehandle out on the server. Must be called without holding 288 * the filehandle out on the server. Must be called without holding
@@ -327,12 +333,15 @@ void cifsFileInfo_put(struct cifsFileInfo *cifs_file)
327 /* Delete any outstanding lock records. We'll lose them when the file 333 /* Delete any outstanding lock records. We'll lose them when the file
328 * is closed anyway. 334 * is closed anyway.
329 */ 335 */
330 mutex_lock(&cifs_file->lock_mutex); 336 mutex_lock(&cifsi->lock_mutex);
331 list_for_each_entry_safe(li, tmp, &cifs_file->llist, llist) { 337 list_for_each_entry_safe(li, tmp, &cifsi->llist, llist) {
338 if (li->netfid != cifs_file->netfid)
339 continue;
332 list_del(&li->llist); 340 list_del(&li->llist);
341 cifs_del_lock_waiters(li);
333 kfree(li); 342 kfree(li);
334 } 343 }
335 mutex_unlock(&cifs_file->lock_mutex); 344 mutex_unlock(&cifsi->lock_mutex);
336 345
337 cifs_put_tlink(cifs_file->tlink); 346 cifs_put_tlink(cifs_file->tlink);
338 dput(cifs_file->dentry); 347 dput(cifs_file->dentry);
@@ -371,7 +380,7 @@ int cifs_open(struct inode *inode, struct file *file)
371 cFYI(1, "inode = 0x%p file flags are 0x%x for %s", 380 cFYI(1, "inode = 0x%p file flags are 0x%x for %s",
372 inode, file->f_flags, full_path); 381 inode, file->f_flags, full_path);
373 382
374 if (oplockEnabled) 383 if (enable_oplocks)
375 oplock = REQ_OPLOCK; 384 oplock = REQ_OPLOCK;
376 else 385 else
377 oplock = 0; 386 oplock = 0;
@@ -465,6 +474,7 @@ static int cifs_reopen_file(struct cifsFileInfo *pCifsFile, bool can_flush)
465 char *full_path = NULL; 474 char *full_path = NULL;
466 int desiredAccess; 475 int desiredAccess;
467 int disposition = FILE_OPEN; 476 int disposition = FILE_OPEN;
477 int create_options = CREATE_NOT_DIR;
468 __u16 netfid; 478 __u16 netfid;
469 479
470 xid = GetXid(); 480 xid = GetXid();
@@ -495,7 +505,7 @@ static int cifs_reopen_file(struct cifsFileInfo *pCifsFile, bool can_flush)
495 cFYI(1, "inode = 0x%p file flags 0x%x for %s", 505 cFYI(1, "inode = 0x%p file flags 0x%x for %s",
496 inode, pCifsFile->f_flags, full_path); 506 inode, pCifsFile->f_flags, full_path);
497 507
498 if (oplockEnabled) 508 if (enable_oplocks)
499 oplock = REQ_OPLOCK; 509 oplock = REQ_OPLOCK;
500 else 510 else
501 oplock = 0; 511 oplock = 0;
@@ -524,6 +534,9 @@ static int cifs_reopen_file(struct cifsFileInfo *pCifsFile, bool can_flush)
524 534
525 desiredAccess = cifs_convert_flags(pCifsFile->f_flags); 535 desiredAccess = cifs_convert_flags(pCifsFile->f_flags);
526 536
537 if (backup_cred(cifs_sb))
538 create_options |= CREATE_OPEN_BACKUP_INTENT;
539
527 /* Can not refresh inode by passing in file_info buf to be returned 540 /* Can not refresh inode by passing in file_info buf to be returned
528 by SMBOpen and then calling get_inode_info with returned buf 541 by SMBOpen and then calling get_inode_info with returned buf
529 since file might have write behind data that needs to be flushed 542 since file might have write behind data that needs to be flushed
@@ -531,7 +544,7 @@ static int cifs_reopen_file(struct cifsFileInfo *pCifsFile, bool can_flush)
531 that inode was not dirty locally we could do this */ 544 that inode was not dirty locally we could do this */
532 545
533 rc = CIFSSMBOpen(xid, tcon, full_path, disposition, desiredAccess, 546 rc = CIFSSMBOpen(xid, tcon, full_path, disposition, desiredAccess,
534 CREATE_NOT_DIR, &netfid, &oplock, NULL, 547 create_options, &netfid, &oplock, NULL,
535 cifs_sb->local_nls, cifs_sb->mnt_cifs_flags & 548 cifs_sb->local_nls, cifs_sb->mnt_cifs_flags &
536 CIFS_MOUNT_MAP_SPECIAL_CHR); 549 CIFS_MOUNT_MAP_SPECIAL_CHR);
537 if (rc) { 550 if (rc) {
@@ -631,219 +644,687 @@ int cifs_closedir(struct inode *inode, struct file *file)
631 return rc; 644 return rc;
632} 645}
633 646
634static int store_file_lock(struct cifsFileInfo *fid, __u64 len, 647static struct cifsLockInfo *
635 __u64 offset, __u8 lockType) 648cifs_lock_init(__u64 len, __u64 offset, __u8 type, __u16 netfid)
636{ 649{
637 struct cifsLockInfo *li = 650 struct cifsLockInfo *li =
638 kmalloc(sizeof(struct cifsLockInfo), GFP_KERNEL); 651 kmalloc(sizeof(struct cifsLockInfo), GFP_KERNEL);
639 if (li == NULL) 652 if (!li)
640 return -ENOMEM; 653 return li;
654 li->netfid = netfid;
641 li->offset = offset; 655 li->offset = offset;
642 li->length = len; 656 li->length = len;
643 li->type = lockType; 657 li->type = type;
644 mutex_lock(&fid->lock_mutex); 658 li->pid = current->tgid;
645 list_add(&li->llist, &fid->llist); 659 INIT_LIST_HEAD(&li->blist);
646 mutex_unlock(&fid->lock_mutex); 660 init_waitqueue_head(&li->block_q);
661 return li;
662}
663
664static void
665cifs_del_lock_waiters(struct cifsLockInfo *lock)
666{
667 struct cifsLockInfo *li, *tmp;
668 list_for_each_entry_safe(li, tmp, &lock->blist, blist) {
669 list_del_init(&li->blist);
670 wake_up(&li->block_q);
671 }
672}
673
674static bool
675cifs_find_lock_conflict(struct cifsInodeInfo *cinode, __u64 offset,
676 __u64 length, __u8 type, __u16 netfid,
677 struct cifsLockInfo **conf_lock)
678{
679 struct cifsLockInfo *li, *tmp;
680
681 list_for_each_entry_safe(li, tmp, &cinode->llist, llist) {
682 if (offset + length <= li->offset ||
683 offset >= li->offset + li->length)
684 continue;
685 else if ((type & LOCKING_ANDX_SHARED_LOCK) &&
686 ((netfid == li->netfid && current->tgid == li->pid) ||
687 type == li->type))
688 continue;
689 else {
690 *conf_lock = li;
691 return true;
692 }
693 }
694 return false;
695}
696
697static int
698cifs_lock_test(struct cifsInodeInfo *cinode, __u64 offset, __u64 length,
699 __u8 type, __u16 netfid, struct file_lock *flock)
700{
701 int rc = 0;
702 struct cifsLockInfo *conf_lock;
703 bool exist;
704
705 mutex_lock(&cinode->lock_mutex);
706
707 exist = cifs_find_lock_conflict(cinode, offset, length, type, netfid,
708 &conf_lock);
709 if (exist) {
710 flock->fl_start = conf_lock->offset;
711 flock->fl_end = conf_lock->offset + conf_lock->length - 1;
712 flock->fl_pid = conf_lock->pid;
713 if (conf_lock->type & LOCKING_ANDX_SHARED_LOCK)
714 flock->fl_type = F_RDLCK;
715 else
716 flock->fl_type = F_WRLCK;
717 } else if (!cinode->can_cache_brlcks)
718 rc = 1;
719 else
720 flock->fl_type = F_UNLCK;
721
722 mutex_unlock(&cinode->lock_mutex);
723 return rc;
724}
725
726static int
727cifs_lock_add(struct cifsInodeInfo *cinode, __u64 len, __u64 offset,
728 __u8 type, __u16 netfid)
729{
730 struct cifsLockInfo *li;
731
732 li = cifs_lock_init(len, offset, type, netfid);
733 if (!li)
734 return -ENOMEM;
735
736 mutex_lock(&cinode->lock_mutex);
737 list_add_tail(&li->llist, &cinode->llist);
738 mutex_unlock(&cinode->lock_mutex);
647 return 0; 739 return 0;
648} 740}
649 741
650int cifs_lock(struct file *file, int cmd, struct file_lock *pfLock) 742static int
743cifs_lock_add_if(struct cifsInodeInfo *cinode, __u64 offset, __u64 length,
744 __u8 type, __u16 netfid, bool wait)
651{ 745{
652 int rc, xid; 746 struct cifsLockInfo *lock, *conf_lock;
653 __u32 numLock = 0; 747 bool exist;
654 __u32 numUnlock = 0; 748 int rc = 0;
655 __u64 length; 749
656 bool wait_flag = false; 750 lock = cifs_lock_init(length, offset, type, netfid);
657 struct cifs_sb_info *cifs_sb; 751 if (!lock)
752 return -ENOMEM;
753
754try_again:
755 exist = false;
756 mutex_lock(&cinode->lock_mutex);
757
758 exist = cifs_find_lock_conflict(cinode, offset, length, type, netfid,
759 &conf_lock);
760 if (!exist && cinode->can_cache_brlcks) {
761 list_add_tail(&lock->llist, &cinode->llist);
762 mutex_unlock(&cinode->lock_mutex);
763 return rc;
764 }
765
766 if (!exist)
767 rc = 1;
768 else if (!wait)
769 rc = -EACCES;
770 else {
771 list_add_tail(&lock->blist, &conf_lock->blist);
772 mutex_unlock(&cinode->lock_mutex);
773 rc = wait_event_interruptible(lock->block_q,
774 (lock->blist.prev == &lock->blist) &&
775 (lock->blist.next == &lock->blist));
776 if (!rc)
777 goto try_again;
778 else {
779 mutex_lock(&cinode->lock_mutex);
780 list_del_init(&lock->blist);
781 mutex_unlock(&cinode->lock_mutex);
782 }
783 }
784
785 kfree(lock);
786 mutex_unlock(&cinode->lock_mutex);
787 return rc;
788}
789
790static int
791cifs_posix_lock_test(struct file *file, struct file_lock *flock)
792{
793 int rc = 0;
794 struct cifsInodeInfo *cinode = CIFS_I(file->f_path.dentry->d_inode);
795 unsigned char saved_type = flock->fl_type;
796
797 mutex_lock(&cinode->lock_mutex);
798 posix_test_lock(file, flock);
799
800 if (flock->fl_type == F_UNLCK && !cinode->can_cache_brlcks) {
801 flock->fl_type = saved_type;
802 rc = 1;
803 }
804
805 mutex_unlock(&cinode->lock_mutex);
806 return rc;
807}
808
809static int
810cifs_posix_lock_set(struct file *file, struct file_lock *flock)
811{
812 struct cifsInodeInfo *cinode = CIFS_I(file->f_path.dentry->d_inode);
813 int rc;
814
815 mutex_lock(&cinode->lock_mutex);
816 if (!cinode->can_cache_brlcks) {
817 mutex_unlock(&cinode->lock_mutex);
818 return 1;
819 }
820 rc = posix_lock_file_wait(file, flock);
821 mutex_unlock(&cinode->lock_mutex);
822 return rc;
823}
824
825static int
826cifs_push_mandatory_locks(struct cifsFileInfo *cfile)
827{
828 int xid, rc = 0, stored_rc;
829 struct cifsLockInfo *li, *tmp;
658 struct cifs_tcon *tcon; 830 struct cifs_tcon *tcon;
659 __u16 netfid; 831 struct cifsInodeInfo *cinode = CIFS_I(cfile->dentry->d_inode);
660 __u8 lockType = LOCKING_ANDX_LARGE_FILES; 832 unsigned int num, max_num;
661 bool posix_locking = 0; 833 LOCKING_ANDX_RANGE *buf, *cur;
834 int types[] = {LOCKING_ANDX_LARGE_FILES,
835 LOCKING_ANDX_SHARED_LOCK | LOCKING_ANDX_LARGE_FILES};
836 int i;
837
838 xid = GetXid();
839 tcon = tlink_tcon(cfile->tlink);
840
841 mutex_lock(&cinode->lock_mutex);
842 if (!cinode->can_cache_brlcks) {
843 mutex_unlock(&cinode->lock_mutex);
844 FreeXid(xid);
845 return rc;
846 }
847
848 max_num = (tcon->ses->server->maxBuf - sizeof(struct smb_hdr)) /
849 sizeof(LOCKING_ANDX_RANGE);
850 buf = kzalloc(max_num * sizeof(LOCKING_ANDX_RANGE), GFP_KERNEL);
851 if (!buf) {
852 mutex_unlock(&cinode->lock_mutex);
853 FreeXid(xid);
854 return rc;
855 }
856
857 for (i = 0; i < 2; i++) {
858 cur = buf;
859 num = 0;
860 list_for_each_entry_safe(li, tmp, &cinode->llist, llist) {
861 if (li->type != types[i])
862 continue;
863 cur->Pid = cpu_to_le16(li->pid);
864 cur->LengthLow = cpu_to_le32((u32)li->length);
865 cur->LengthHigh = cpu_to_le32((u32)(li->length>>32));
866 cur->OffsetLow = cpu_to_le32((u32)li->offset);
867 cur->OffsetHigh = cpu_to_le32((u32)(li->offset>>32));
868 if (++num == max_num) {
869 stored_rc = cifs_lockv(xid, tcon, cfile->netfid,
870 li->type, 0, num, buf);
871 if (stored_rc)
872 rc = stored_rc;
873 cur = buf;
874 num = 0;
875 } else
876 cur++;
877 }
878
879 if (num) {
880 stored_rc = cifs_lockv(xid, tcon, cfile->netfid,
881 types[i], 0, num, buf);
882 if (stored_rc)
883 rc = stored_rc;
884 }
885 }
886
887 cinode->can_cache_brlcks = false;
888 mutex_unlock(&cinode->lock_mutex);
889
890 kfree(buf);
891 FreeXid(xid);
892 return rc;
893}
894
895/* copied from fs/locks.c with a name change */
896#define cifs_for_each_lock(inode, lockp) \
897 for (lockp = &inode->i_flock; *lockp != NULL; \
898 lockp = &(*lockp)->fl_next)
899
900static int
901cifs_push_posix_locks(struct cifsFileInfo *cfile)
902{
903 struct cifsInodeInfo *cinode = CIFS_I(cfile->dentry->d_inode);
904 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
905 struct file_lock *flock, **before;
906 struct cifsLockInfo *lck, *tmp;
907 int rc = 0, xid, type;
908 __u64 length;
909 struct list_head locks_to_send;
662 910
663 length = 1 + pfLock->fl_end - pfLock->fl_start;
664 rc = -EACCES;
665 xid = GetXid(); 911 xid = GetXid();
666 912
667 cFYI(1, "Lock parm: 0x%x flockflags: " 913 mutex_lock(&cinode->lock_mutex);
668 "0x%x flocktype: 0x%x start: %lld end: %lld", 914 if (!cinode->can_cache_brlcks) {
669 cmd, pfLock->fl_flags, pfLock->fl_type, pfLock->fl_start, 915 mutex_unlock(&cinode->lock_mutex);
670 pfLock->fl_end); 916 FreeXid(xid);
917 return rc;
918 }
919
920 INIT_LIST_HEAD(&locks_to_send);
671 921
672 if (pfLock->fl_flags & FL_POSIX) 922 lock_flocks();
923 cifs_for_each_lock(cfile->dentry->d_inode, before) {
924 flock = *before;
925 length = 1 + flock->fl_end - flock->fl_start;
926 if (flock->fl_type == F_RDLCK || flock->fl_type == F_SHLCK)
927 type = CIFS_RDLCK;
928 else
929 type = CIFS_WRLCK;
930
931 lck = cifs_lock_init(length, flock->fl_start, type,
932 cfile->netfid);
933 if (!lck) {
934 rc = -ENOMEM;
935 goto send_locks;
936 }
937 lck->pid = flock->fl_pid;
938
939 list_add_tail(&lck->llist, &locks_to_send);
940 }
941
942send_locks:
943 unlock_flocks();
944
945 list_for_each_entry_safe(lck, tmp, &locks_to_send, llist) {
946 struct file_lock tmp_lock;
947 int stored_rc;
948
949 tmp_lock.fl_start = lck->offset;
950 stored_rc = CIFSSMBPosixLock(xid, tcon, lck->netfid, lck->pid,
951 0, lck->length, &tmp_lock,
952 lck->type, 0);
953 if (stored_rc)
954 rc = stored_rc;
955 list_del(&lck->llist);
956 kfree(lck);
957 }
958
959 cinode->can_cache_brlcks = false;
960 mutex_unlock(&cinode->lock_mutex);
961
962 FreeXid(xid);
963 return rc;
964}
965
966static int
967cifs_push_locks(struct cifsFileInfo *cfile)
968{
969 struct cifs_sb_info *cifs_sb = CIFS_SB(cfile->dentry->d_sb);
970 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
971
972 if ((tcon->ses->capabilities & CAP_UNIX) &&
973 (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability)) &&
974 ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0))
975 return cifs_push_posix_locks(cfile);
976
977 return cifs_push_mandatory_locks(cfile);
978}
979
980static void
981cifs_read_flock(struct file_lock *flock, __u8 *type, int *lock, int *unlock,
982 bool *wait_flag)
983{
984 if (flock->fl_flags & FL_POSIX)
673 cFYI(1, "Posix"); 985 cFYI(1, "Posix");
674 if (pfLock->fl_flags & FL_FLOCK) 986 if (flock->fl_flags & FL_FLOCK)
675 cFYI(1, "Flock"); 987 cFYI(1, "Flock");
676 if (pfLock->fl_flags & FL_SLEEP) { 988 if (flock->fl_flags & FL_SLEEP) {
677 cFYI(1, "Blocking lock"); 989 cFYI(1, "Blocking lock");
678 wait_flag = true; 990 *wait_flag = true;
679 } 991 }
680 if (pfLock->fl_flags & FL_ACCESS) 992 if (flock->fl_flags & FL_ACCESS)
681 cFYI(1, "Process suspended by mandatory locking - " 993 cFYI(1, "Process suspended by mandatory locking - "
682 "not implemented yet"); 994 "not implemented yet");
683 if (pfLock->fl_flags & FL_LEASE) 995 if (flock->fl_flags & FL_LEASE)
684 cFYI(1, "Lease on file - not implemented yet"); 996 cFYI(1, "Lease on file - not implemented yet");
685 if (pfLock->fl_flags & 997 if (flock->fl_flags &
686 (~(FL_POSIX | FL_FLOCK | FL_SLEEP | FL_ACCESS | FL_LEASE))) 998 (~(FL_POSIX | FL_FLOCK | FL_SLEEP | FL_ACCESS | FL_LEASE)))
687 cFYI(1, "Unknown lock flags 0x%x", pfLock->fl_flags); 999 cFYI(1, "Unknown lock flags 0x%x", flock->fl_flags);
688 1000
689 if (pfLock->fl_type == F_WRLCK) { 1001 *type = LOCKING_ANDX_LARGE_FILES;
1002 if (flock->fl_type == F_WRLCK) {
690 cFYI(1, "F_WRLCK "); 1003 cFYI(1, "F_WRLCK ");
691 numLock = 1; 1004 *lock = 1;
692 } else if (pfLock->fl_type == F_UNLCK) { 1005 } else if (flock->fl_type == F_UNLCK) {
693 cFYI(1, "F_UNLCK"); 1006 cFYI(1, "F_UNLCK");
694 numUnlock = 1; 1007 *unlock = 1;
695 /* Check if unlock includes more than 1008 /* Check if unlock includes more than one lock range */
696 one lock range */ 1009 } else if (flock->fl_type == F_RDLCK) {
697 } else if (pfLock->fl_type == F_RDLCK) {
698 cFYI(1, "F_RDLCK"); 1010 cFYI(1, "F_RDLCK");
699 lockType |= LOCKING_ANDX_SHARED_LOCK; 1011 *type |= LOCKING_ANDX_SHARED_LOCK;
700 numLock = 1; 1012 *lock = 1;
701 } else if (pfLock->fl_type == F_EXLCK) { 1013 } else if (flock->fl_type == F_EXLCK) {
702 cFYI(1, "F_EXLCK"); 1014 cFYI(1, "F_EXLCK");
703 numLock = 1; 1015 *lock = 1;
704 } else if (pfLock->fl_type == F_SHLCK) { 1016 } else if (flock->fl_type == F_SHLCK) {
705 cFYI(1, "F_SHLCK"); 1017 cFYI(1, "F_SHLCK");
706 lockType |= LOCKING_ANDX_SHARED_LOCK; 1018 *type |= LOCKING_ANDX_SHARED_LOCK;
707 numLock = 1; 1019 *lock = 1;
708 } else 1020 } else
709 cFYI(1, "Unknown type of lock"); 1021 cFYI(1, "Unknown type of lock");
1022}
710 1023
711 cifs_sb = CIFS_SB(file->f_path.dentry->d_sb); 1024static int
712 tcon = tlink_tcon(((struct cifsFileInfo *)file->private_data)->tlink); 1025cifs_getlk(struct file *file, struct file_lock *flock, __u8 type,
713 netfid = ((struct cifsFileInfo *)file->private_data)->netfid; 1026 bool wait_flag, bool posix_lck, int xid)
1027{
1028 int rc = 0;
1029 __u64 length = 1 + flock->fl_end - flock->fl_start;
1030 struct cifsFileInfo *cfile = (struct cifsFileInfo *)file->private_data;
1031 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
1032 struct cifsInodeInfo *cinode = CIFS_I(cfile->dentry->d_inode);
1033 __u16 netfid = cfile->netfid;
714 1034
715 if ((tcon->ses->capabilities & CAP_UNIX) && 1035 if (posix_lck) {
716 (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability)) && 1036 int posix_lock_type;
717 ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0)) 1037
718 posix_locking = 1; 1038 rc = cifs_posix_lock_test(file, flock);
719 /* BB add code here to normalize offset and length to 1039 if (!rc)
720 account for negative length which we can not accept over the
721 wire */
722 if (IS_GETLK(cmd)) {
723 if (posix_locking) {
724 int posix_lock_type;
725 if (lockType & LOCKING_ANDX_SHARED_LOCK)
726 posix_lock_type = CIFS_RDLCK;
727 else
728 posix_lock_type = CIFS_WRLCK;
729 rc = CIFSSMBPosixLock(xid, tcon, netfid, 1 /* get */,
730 length, pfLock, posix_lock_type,
731 wait_flag);
732 FreeXid(xid);
733 return rc; 1040 return rc;
734 }
735 1041
736 /* BB we could chain these into one lock request BB */ 1042 if (type & LOCKING_ANDX_SHARED_LOCK)
737 rc = CIFSSMBLock(xid, tcon, netfid, length, pfLock->fl_start, 1043 posix_lock_type = CIFS_RDLCK;
738 0, 1, lockType, 0 /* wait flag */, 0); 1044 else
739 if (rc == 0) { 1045 posix_lock_type = CIFS_WRLCK;
740 rc = CIFSSMBLock(xid, tcon, netfid, length, 1046 rc = CIFSSMBPosixLock(xid, tcon, netfid, current->tgid,
741 pfLock->fl_start, 1 /* numUnlock */ , 1047 1 /* get */, length, flock,
742 0 /* numLock */ , lockType, 1048 posix_lock_type, wait_flag);
743 0 /* wait flag */, 0); 1049 return rc;
744 pfLock->fl_type = F_UNLCK; 1050 }
745 if (rc != 0)
746 cERROR(1, "Error unlocking previously locked "
747 "range %d during test of lock", rc);
748 rc = 0;
749 1051
750 } else { 1052 rc = cifs_lock_test(cinode, flock->fl_start, length, type, netfid,
751 /* if rc == ERR_SHARING_VIOLATION ? */ 1053 flock);
752 rc = 0; 1054 if (!rc)
1055 return rc;
753 1056
754 if (lockType & LOCKING_ANDX_SHARED_LOCK) { 1057 /* BB we could chain these into one lock request BB */
755 pfLock->fl_type = F_WRLCK; 1058 rc = CIFSSMBLock(xid, tcon, netfid, current->tgid, length,
756 } else { 1059 flock->fl_start, 0, 1, type, 0, 0);
757 rc = CIFSSMBLock(xid, tcon, netfid, length, 1060 if (rc == 0) {
758 pfLock->fl_start, 0, 1, 1061 rc = CIFSSMBLock(xid, tcon, netfid, current->tgid,
759 lockType | LOCKING_ANDX_SHARED_LOCK, 1062 length, flock->fl_start, 1, 0,
760 0 /* wait flag */, 0); 1063 type, 0, 0);
761 if (rc == 0) { 1064 flock->fl_type = F_UNLCK;
762 rc = CIFSSMBLock(xid, tcon, netfid, 1065 if (rc != 0)
763 length, pfLock->fl_start, 1, 0, 1066 cERROR(1, "Error unlocking previously locked "
764 lockType | 1067 "range %d during test of lock", rc);
765 LOCKING_ANDX_SHARED_LOCK, 1068 rc = 0;
766 0 /* wait flag */, 0); 1069 return rc;
767 pfLock->fl_type = F_RDLCK; 1070 }
768 if (rc != 0)
769 cERROR(1, "Error unlocking "
770 "previously locked range %d "
771 "during test of lock", rc);
772 rc = 0;
773 } else {
774 pfLock->fl_type = F_WRLCK;
775 rc = 0;
776 }
777 }
778 }
779 1071
780 FreeXid(xid); 1072 if (type & LOCKING_ANDX_SHARED_LOCK) {
1073 flock->fl_type = F_WRLCK;
1074 rc = 0;
781 return rc; 1075 return rc;
782 } 1076 }
783 1077
784 if (!numLock && !numUnlock) { 1078 rc = CIFSSMBLock(xid, tcon, netfid, current->tgid, length,
785 /* if no lock or unlock then nothing 1079 flock->fl_start, 0, 1,
786 to do since we do not know what it is */ 1080 type | LOCKING_ANDX_SHARED_LOCK, 0, 0);
787 FreeXid(xid); 1081 if (rc == 0) {
788 return -EOPNOTSUPP; 1082 rc = CIFSSMBLock(xid, tcon, netfid, current->tgid,
1083 length, flock->fl_start, 1, 0,
1084 type | LOCKING_ANDX_SHARED_LOCK,
1085 0, 0);
1086 flock->fl_type = F_RDLCK;
1087 if (rc != 0)
1088 cERROR(1, "Error unlocking previously locked "
1089 "range %d during test of lock", rc);
1090 } else
1091 flock->fl_type = F_WRLCK;
1092
1093 rc = 0;
1094 return rc;
1095}
1096
1097static void
1098cifs_move_llist(struct list_head *source, struct list_head *dest)
1099{
1100 struct list_head *li, *tmp;
1101 list_for_each_safe(li, tmp, source)
1102 list_move(li, dest);
1103}
1104
1105static void
1106cifs_free_llist(struct list_head *llist)
1107{
1108 struct cifsLockInfo *li, *tmp;
1109 list_for_each_entry_safe(li, tmp, llist, llist) {
1110 cifs_del_lock_waiters(li);
1111 list_del(&li->llist);
1112 kfree(li);
789 } 1113 }
1114}
1115
1116static int
1117cifs_unlock_range(struct cifsFileInfo *cfile, struct file_lock *flock, int xid)
1118{
1119 int rc = 0, stored_rc;
1120 int types[] = {LOCKING_ANDX_LARGE_FILES,
1121 LOCKING_ANDX_SHARED_LOCK | LOCKING_ANDX_LARGE_FILES};
1122 unsigned int i;
1123 unsigned int max_num, num;
1124 LOCKING_ANDX_RANGE *buf, *cur;
1125 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
1126 struct cifsInodeInfo *cinode = CIFS_I(cfile->dentry->d_inode);
1127 struct cifsLockInfo *li, *tmp;
1128 __u64 length = 1 + flock->fl_end - flock->fl_start;
1129 struct list_head tmp_llist;
1130
1131 INIT_LIST_HEAD(&tmp_llist);
1132
1133 max_num = (tcon->ses->server->maxBuf - sizeof(struct smb_hdr)) /
1134 sizeof(LOCKING_ANDX_RANGE);
1135 buf = kzalloc(max_num * sizeof(LOCKING_ANDX_RANGE), GFP_KERNEL);
1136 if (!buf)
1137 return -ENOMEM;
1138
1139 mutex_lock(&cinode->lock_mutex);
1140 for (i = 0; i < 2; i++) {
1141 cur = buf;
1142 num = 0;
1143 list_for_each_entry_safe(li, tmp, &cinode->llist, llist) {
1144 if (flock->fl_start > li->offset ||
1145 (flock->fl_start + length) <
1146 (li->offset + li->length))
1147 continue;
1148 if (current->tgid != li->pid)
1149 continue;
1150 if (cfile->netfid != li->netfid)
1151 continue;
1152 if (types[i] != li->type)
1153 continue;
1154 if (!cinode->can_cache_brlcks) {
1155 cur->Pid = cpu_to_le16(li->pid);
1156 cur->LengthLow = cpu_to_le32((u32)li->length);
1157 cur->LengthHigh =
1158 cpu_to_le32((u32)(li->length>>32));
1159 cur->OffsetLow = cpu_to_le32((u32)li->offset);
1160 cur->OffsetHigh =
1161 cpu_to_le32((u32)(li->offset>>32));
1162 /*
1163 * We need to save a lock here to let us add
1164 * it again to the inode list if the unlock
1165 * range request fails on the server.
1166 */
1167 list_move(&li->llist, &tmp_llist);
1168 if (++num == max_num) {
1169 stored_rc = cifs_lockv(xid, tcon,
1170 cfile->netfid,
1171 li->type, num,
1172 0, buf);
1173 if (stored_rc) {
1174 /*
1175 * We failed on the unlock range
1176 * request - add all locks from
1177 * the tmp list to the head of
1178 * the inode list.
1179 */
1180 cifs_move_llist(&tmp_llist,
1181 &cinode->llist);
1182 rc = stored_rc;
1183 } else
1184 /*
1185 * The unlock range request
1186 * succeed - free the tmp list.
1187 */
1188 cifs_free_llist(&tmp_llist);
1189 cur = buf;
1190 num = 0;
1191 } else
1192 cur++;
1193 } else {
1194 /*
1195 * We can cache brlock requests - simply remove
1196 * a lock from the inode list.
1197 */
1198 list_del(&li->llist);
1199 cifs_del_lock_waiters(li);
1200 kfree(li);
1201 }
1202 }
1203 if (num) {
1204 stored_rc = cifs_lockv(xid, tcon, cfile->netfid,
1205 types[i], num, 0, buf);
1206 if (stored_rc) {
1207 cifs_move_llist(&tmp_llist, &cinode->llist);
1208 rc = stored_rc;
1209 } else
1210 cifs_free_llist(&tmp_llist);
1211 }
1212 }
1213
1214 mutex_unlock(&cinode->lock_mutex);
1215 kfree(buf);
1216 return rc;
1217}
790 1218
791 if (posix_locking) { 1219static int
1220cifs_setlk(struct file *file, struct file_lock *flock, __u8 type,
1221 bool wait_flag, bool posix_lck, int lock, int unlock, int xid)
1222{
1223 int rc = 0;
1224 __u64 length = 1 + flock->fl_end - flock->fl_start;
1225 struct cifsFileInfo *cfile = (struct cifsFileInfo *)file->private_data;
1226 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
1227 struct cifsInodeInfo *cinode = CIFS_I(file->f_path.dentry->d_inode);
1228 __u16 netfid = cfile->netfid;
1229
1230 if (posix_lck) {
792 int posix_lock_type; 1231 int posix_lock_type;
793 if (lockType & LOCKING_ANDX_SHARED_LOCK) 1232
1233 rc = cifs_posix_lock_set(file, flock);
1234 if (!rc || rc < 0)
1235 return rc;
1236
1237 if (type & LOCKING_ANDX_SHARED_LOCK)
794 posix_lock_type = CIFS_RDLCK; 1238 posix_lock_type = CIFS_RDLCK;
795 else 1239 else
796 posix_lock_type = CIFS_WRLCK; 1240 posix_lock_type = CIFS_WRLCK;
797 1241
798 if (numUnlock == 1) 1242 if (unlock == 1)
799 posix_lock_type = CIFS_UNLCK; 1243 posix_lock_type = CIFS_UNLCK;
800 1244
801 rc = CIFSSMBPosixLock(xid, tcon, netfid, 0 /* set */, 1245 rc = CIFSSMBPosixLock(xid, tcon, netfid, current->tgid,
802 length, pfLock, posix_lock_type, 1246 0 /* set */, length, flock,
803 wait_flag); 1247 posix_lock_type, wait_flag);
804 } else { 1248 goto out;
805 struct cifsFileInfo *fid = file->private_data; 1249 }
806
807 if (numLock) {
808 rc = CIFSSMBLock(xid, tcon, netfid, length,
809 pfLock->fl_start, 0, numLock, lockType,
810 wait_flag, 0);
811 1250
812 if (rc == 0) { 1251 if (lock) {
813 /* For Windows locks we must store them. */ 1252 rc = cifs_lock_add_if(cinode, flock->fl_start, length,
814 rc = store_file_lock(fid, length, 1253 type, netfid, wait_flag);
815 pfLock->fl_start, lockType); 1254 if (rc < 0)
816 } 1255 return rc;
817 } else if (numUnlock) { 1256 else if (!rc)
818 /* For each stored lock that this unlock overlaps 1257 goto out;
819 completely, unlock it. */
820 int stored_rc = 0;
821 struct cifsLockInfo *li, *tmp;
822 1258
823 rc = 0; 1259 rc = CIFSSMBLock(xid, tcon, netfid, current->tgid, length,
824 mutex_lock(&fid->lock_mutex); 1260 flock->fl_start, 0, 1, type, wait_flag, 0);
825 list_for_each_entry_safe(li, tmp, &fid->llist, llist) { 1261 if (rc == 0) {
826 if (pfLock->fl_start <= li->offset && 1262 /* For Windows locks we must store them. */
827 (pfLock->fl_start + length) >= 1263 rc = cifs_lock_add(cinode, length, flock->fl_start,
828 (li->offset + li->length)) { 1264 type, netfid);
829 stored_rc = CIFSSMBLock(xid, tcon,
830 netfid, li->length,
831 li->offset, 1, 0,
832 li->type, false, 0);
833 if (stored_rc)
834 rc = stored_rc;
835 else {
836 list_del(&li->llist);
837 kfree(li);
838 }
839 }
840 }
841 mutex_unlock(&fid->lock_mutex);
842 } 1265 }
1266 } else if (unlock)
1267 rc = cifs_unlock_range(cfile, flock, xid);
1268
1269out:
1270 if (flock->fl_flags & FL_POSIX)
1271 posix_lock_file_wait(file, flock);
1272 return rc;
1273}
1274
1275int cifs_lock(struct file *file, int cmd, struct file_lock *flock)
1276{
1277 int rc, xid;
1278 int lock = 0, unlock = 0;
1279 bool wait_flag = false;
1280 bool posix_lck = false;
1281 struct cifs_sb_info *cifs_sb;
1282 struct cifs_tcon *tcon;
1283 struct cifsInodeInfo *cinode;
1284 struct cifsFileInfo *cfile;
1285 __u16 netfid;
1286 __u8 type;
1287
1288 rc = -EACCES;
1289 xid = GetXid();
1290
1291 cFYI(1, "Lock parm: 0x%x flockflags: 0x%x flocktype: 0x%x start: %lld "
1292 "end: %lld", cmd, flock->fl_flags, flock->fl_type,
1293 flock->fl_start, flock->fl_end);
1294
1295 cifs_read_flock(flock, &type, &lock, &unlock, &wait_flag);
1296
1297 cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
1298 cfile = (struct cifsFileInfo *)file->private_data;
1299 tcon = tlink_tcon(cfile->tlink);
1300 netfid = cfile->netfid;
1301 cinode = CIFS_I(file->f_path.dentry->d_inode);
1302
1303 if ((tcon->ses->capabilities & CAP_UNIX) &&
1304 (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability)) &&
1305 ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0))
1306 posix_lck = true;
1307 /*
1308 * BB add code here to normalize offset and length to account for
1309 * negative length which we can not accept over the wire.
1310 */
1311 if (IS_GETLK(cmd)) {
1312 rc = cifs_getlk(file, flock, type, wait_flag, posix_lck, xid);
1313 FreeXid(xid);
1314 return rc;
843 } 1315 }
844 1316
845 if (pfLock->fl_flags & FL_POSIX) 1317 if (!lock && !unlock) {
846 posix_lock_file_wait(file, pfLock); 1318 /*
1319 * if no lock or unlock then nothing to do since we do not
1320 * know what it is
1321 */
1322 FreeXid(xid);
1323 return -EOPNOTSUPP;
1324 }
1325
1326 rc = cifs_setlk(file, flock, type, wait_flag, posix_lck, lock, unlock,
1327 xid);
847 FreeXid(xid); 1328 FreeXid(xid);
848 return rc; 1329 return rc;
849} 1330}
@@ -1714,6 +2195,7 @@ cifs_iovec_read(struct file *file, const struct iovec *iov,
1714 struct smb_com_read_rsp *pSMBr; 2195 struct smb_com_read_rsp *pSMBr;
1715 struct cifs_io_parms io_parms; 2196 struct cifs_io_parms io_parms;
1716 char *read_data; 2197 char *read_data;
2198 unsigned int rsize;
1717 __u32 pid; 2199 __u32 pid;
1718 2200
1719 if (!nr_segs) 2201 if (!nr_segs)
@@ -1726,6 +2208,9 @@ cifs_iovec_read(struct file *file, const struct iovec *iov,
1726 xid = GetXid(); 2208 xid = GetXid();
1727 cifs_sb = CIFS_SB(file->f_path.dentry->d_sb); 2209 cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
1728 2210
2211 /* FIXME: set up handlers for larger reads and/or convert to async */
2212 rsize = min_t(unsigned int, cifs_sb->rsize, CIFSMaxBufSize);
2213
1729 open_file = file->private_data; 2214 open_file = file->private_data;
1730 pTcon = tlink_tcon(open_file->tlink); 2215 pTcon = tlink_tcon(open_file->tlink);
1731 2216
@@ -1738,7 +2223,7 @@ cifs_iovec_read(struct file *file, const struct iovec *iov,
1738 cFYI(1, "attempting read on write only file instance"); 2223 cFYI(1, "attempting read on write only file instance");
1739 2224
1740 for (total_read = 0; total_read < len; total_read += bytes_read) { 2225 for (total_read = 0; total_read < len; total_read += bytes_read) {
1741 cur_len = min_t(const size_t, len - total_read, cifs_sb->rsize); 2226 cur_len = min_t(const size_t, len - total_read, rsize);
1742 rc = -EAGAIN; 2227 rc = -EAGAIN;
1743 read_data = NULL; 2228 read_data = NULL;
1744 2229
@@ -1830,6 +2315,7 @@ static ssize_t cifs_read(struct file *file, char *read_data, size_t read_size,
1830 unsigned int bytes_read = 0; 2315 unsigned int bytes_read = 0;
1831 unsigned int total_read; 2316 unsigned int total_read;
1832 unsigned int current_read_size; 2317 unsigned int current_read_size;
2318 unsigned int rsize;
1833 struct cifs_sb_info *cifs_sb; 2319 struct cifs_sb_info *cifs_sb;
1834 struct cifs_tcon *pTcon; 2320 struct cifs_tcon *pTcon;
1835 int xid; 2321 int xid;
@@ -1842,6 +2328,9 @@ static ssize_t cifs_read(struct file *file, char *read_data, size_t read_size,
1842 xid = GetXid(); 2328 xid = GetXid();
1843 cifs_sb = CIFS_SB(file->f_path.dentry->d_sb); 2329 cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
1844 2330
2331 /* FIXME: set up handlers for larger reads and/or convert to async */
2332 rsize = min_t(unsigned int, cifs_sb->rsize, CIFSMaxBufSize);
2333
1845 if (file->private_data == NULL) { 2334 if (file->private_data == NULL) {
1846 rc = -EBADF; 2335 rc = -EBADF;
1847 FreeXid(xid); 2336 FreeXid(xid);
@@ -1861,14 +2350,14 @@ static ssize_t cifs_read(struct file *file, char *read_data, size_t read_size,
1861 for (total_read = 0, current_offset = read_data; 2350 for (total_read = 0, current_offset = read_data;
1862 read_size > total_read; 2351 read_size > total_read;
1863 total_read += bytes_read, current_offset += bytes_read) { 2352 total_read += bytes_read, current_offset += bytes_read) {
1864 current_read_size = min_t(const int, read_size - total_read, 2353 current_read_size = min_t(uint, read_size - total_read, rsize);
1865 cifs_sb->rsize); 2354
1866 /* For windows me and 9x we do not want to request more 2355 /* For windows me and 9x we do not want to request more
1867 than it negotiated since it will refuse the read then */ 2356 than it negotiated since it will refuse the read then */
1868 if ((pTcon->ses) && 2357 if ((pTcon->ses) &&
1869 !(pTcon->ses->capabilities & CAP_LARGE_FILES)) { 2358 !(pTcon->ses->capabilities & CAP_LARGE_FILES)) {
1870 current_read_size = min_t(const int, current_read_size, 2359 current_read_size = min_t(uint, current_read_size,
1871 pTcon->ses->server->maxBuf - 128); 2360 CIFSMaxBufSize);
1872 } 2361 }
1873 rc = -EAGAIN; 2362 rc = -EAGAIN;
1874 while (rc == -EAGAIN) { 2363 while (rc == -EAGAIN) {
@@ -1957,82 +2446,24 @@ int cifs_file_mmap(struct file *file, struct vm_area_struct *vma)
1957 return rc; 2446 return rc;
1958} 2447}
1959 2448
1960
1961static void cifs_copy_cache_pages(struct address_space *mapping,
1962 struct list_head *pages, int bytes_read, char *data)
1963{
1964 struct page *page;
1965 char *target;
1966
1967 while (bytes_read > 0) {
1968 if (list_empty(pages))
1969 break;
1970
1971 page = list_entry(pages->prev, struct page, lru);
1972 list_del(&page->lru);
1973
1974 if (add_to_page_cache_lru(page, mapping, page->index,
1975 GFP_KERNEL)) {
1976 page_cache_release(page);
1977 cFYI(1, "Add page cache failed");
1978 data += PAGE_CACHE_SIZE;
1979 bytes_read -= PAGE_CACHE_SIZE;
1980 continue;
1981 }
1982 page_cache_release(page);
1983
1984 target = kmap_atomic(page, KM_USER0);
1985
1986 if (PAGE_CACHE_SIZE > bytes_read) {
1987 memcpy(target, data, bytes_read);
1988 /* zero the tail end of this partial page */
1989 memset(target + bytes_read, 0,
1990 PAGE_CACHE_SIZE - bytes_read);
1991 bytes_read = 0;
1992 } else {
1993 memcpy(target, data, PAGE_CACHE_SIZE);
1994 bytes_read -= PAGE_CACHE_SIZE;
1995 }
1996 kunmap_atomic(target, KM_USER0);
1997
1998 flush_dcache_page(page);
1999 SetPageUptodate(page);
2000 unlock_page(page);
2001 data += PAGE_CACHE_SIZE;
2002
2003 /* add page to FS-Cache */
2004 cifs_readpage_to_fscache(mapping->host, page);
2005 }
2006 return;
2007}
2008
2009static int cifs_readpages(struct file *file, struct address_space *mapping, 2449static int cifs_readpages(struct file *file, struct address_space *mapping,
2010 struct list_head *page_list, unsigned num_pages) 2450 struct list_head *page_list, unsigned num_pages)
2011{ 2451{
2012 int rc = -EACCES; 2452 int rc;
2013 int xid; 2453 struct list_head tmplist;
2014 loff_t offset; 2454 struct cifsFileInfo *open_file = file->private_data;
2015 struct page *page; 2455 struct cifs_sb_info *cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
2016 struct cifs_sb_info *cifs_sb; 2456 unsigned int rsize = cifs_sb->rsize;
2017 struct cifs_tcon *pTcon; 2457 pid_t pid;
2018 unsigned int bytes_read = 0;
2019 unsigned int read_size, i;
2020 char *smb_read_data = NULL;
2021 struct smb_com_read_rsp *pSMBr;
2022 struct cifsFileInfo *open_file;
2023 struct cifs_io_parms io_parms;
2024 int buf_type = CIFS_NO_BUFFER;
2025 __u32 pid;
2026 2458
2027 xid = GetXid(); 2459 /*
2028 if (file->private_data == NULL) { 2460 * Give up immediately if rsize is too small to read an entire page.
2029 rc = -EBADF; 2461 * The VFS will fall back to readpage. We should never reach this
2030 FreeXid(xid); 2462 * point however since we set ra_pages to 0 when the rsize is smaller
2031 return rc; 2463 * than a cache page.
2032 } 2464 */
2033 open_file = file->private_data; 2465 if (unlikely(rsize < PAGE_CACHE_SIZE))
2034 cifs_sb = CIFS_SB(file->f_path.dentry->d_sb); 2466 return 0;
2035 pTcon = tlink_tcon(open_file->tlink);
2036 2467
2037 /* 2468 /*
2038 * Reads as many pages as possible from fscache. Returns -ENOBUFS 2469 * Reads as many pages as possible from fscache. Returns -ENOBUFS
@@ -2041,125 +2472,127 @@ static int cifs_readpages(struct file *file, struct address_space *mapping,
2041 rc = cifs_readpages_from_fscache(mapping->host, mapping, page_list, 2472 rc = cifs_readpages_from_fscache(mapping->host, mapping, page_list,
2042 &num_pages); 2473 &num_pages);
2043 if (rc == 0) 2474 if (rc == 0)
2044 goto read_complete; 2475 return rc;
2045 2476
2046 cFYI(DBG2, "rpages: num pages %d", num_pages);
2047 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD) 2477 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
2048 pid = open_file->pid; 2478 pid = open_file->pid;
2049 else 2479 else
2050 pid = current->tgid; 2480 pid = current->tgid;
2051 2481
2052 for (i = 0; i < num_pages; ) { 2482 rc = 0;
2053 unsigned contig_pages; 2483 INIT_LIST_HEAD(&tmplist);
2054 struct page *tmp_page;
2055 unsigned long expected_index;
2056 2484
2057 if (list_empty(page_list)) 2485 cFYI(1, "%s: file=%p mapping=%p num_pages=%u", __func__, file,
2058 break; 2486 mapping, num_pages);
2487
2488 /*
2489 * Start with the page at end of list and move it to private
2490 * list. Do the same with any following pages until we hit
2491 * the rsize limit, hit an index discontinuity, or run out of
2492 * pages. Issue the async read and then start the loop again
2493 * until the list is empty.
2494 *
2495 * Note that list order is important. The page_list is in
2496 * the order of declining indexes. When we put the pages in
2497 * the rdata->pages, then we want them in increasing order.
2498 */
2499 while (!list_empty(page_list)) {
2500 unsigned int bytes = PAGE_CACHE_SIZE;
2501 unsigned int expected_index;
2502 unsigned int nr_pages = 1;
2503 loff_t offset;
2504 struct page *page, *tpage;
2505 struct cifs_readdata *rdata;
2059 2506
2060 page = list_entry(page_list->prev, struct page, lru); 2507 page = list_entry(page_list->prev, struct page, lru);
2508
2509 /*
2510 * Lock the page and put it in the cache. Since no one else
2511 * should have access to this page, we're safe to simply set
2512 * PG_locked without checking it first.
2513 */
2514 __set_page_locked(page);
2515 rc = add_to_page_cache_locked(page, mapping,
2516 page->index, GFP_KERNEL);
2517
2518 /* give up if we can't stick it in the cache */
2519 if (rc) {
2520 __clear_page_locked(page);
2521 break;
2522 }
2523
2524 /* move first page to the tmplist */
2061 offset = (loff_t)page->index << PAGE_CACHE_SHIFT; 2525 offset = (loff_t)page->index << PAGE_CACHE_SHIFT;
2526 list_move_tail(&page->lru, &tmplist);
2062 2527
2063 /* count adjacent pages that we will read into */ 2528 /* now try and add more pages onto the request */
2064 contig_pages = 0; 2529 expected_index = page->index + 1;
2065 expected_index = 2530 list_for_each_entry_safe_reverse(page, tpage, page_list, lru) {
2066 list_entry(page_list->prev, struct page, lru)->index; 2531 /* discontinuity ? */
2067 list_for_each_entry_reverse(tmp_page, page_list, lru) { 2532 if (page->index != expected_index)
2068 if (tmp_page->index == expected_index) {
2069 contig_pages++;
2070 expected_index++;
2071 } else
2072 break; 2533 break;
2534
2535 /* would this page push the read over the rsize? */
2536 if (bytes + PAGE_CACHE_SIZE > rsize)
2537 break;
2538
2539 __set_page_locked(page);
2540 if (add_to_page_cache_locked(page, mapping,
2541 page->index, GFP_KERNEL)) {
2542 __clear_page_locked(page);
2543 break;
2544 }
2545 list_move_tail(&page->lru, &tmplist);
2546 bytes += PAGE_CACHE_SIZE;
2547 expected_index++;
2548 nr_pages++;
2073 } 2549 }
2074 if (contig_pages + i > num_pages) 2550
2075 contig_pages = num_pages - i; 2551 rdata = cifs_readdata_alloc(nr_pages);
2076 2552 if (!rdata) {
2077 /* for reads over a certain size could initiate async 2553 /* best to give up if we're out of mem */
2078 read ahead */ 2554 list_for_each_entry_safe(page, tpage, &tmplist, lru) {
2079 2555 list_del(&page->lru);
2080 read_size = contig_pages * PAGE_CACHE_SIZE; 2556 lru_cache_add_file(page);
2081 /* Read size needs to be in multiples of one page */ 2557 unlock_page(page);
2082 read_size = min_t(const unsigned int, read_size, 2558 page_cache_release(page);
2083 cifs_sb->rsize & PAGE_CACHE_MASK); 2559 }
2084 cFYI(DBG2, "rpages: read size 0x%x contiguous pages %d", 2560 rc = -ENOMEM;
2085 read_size, contig_pages); 2561 break;
2086 rc = -EAGAIN; 2562 }
2087 while (rc == -EAGAIN) { 2563
2564 spin_lock(&cifs_file_list_lock);
2565 cifsFileInfo_get(open_file);
2566 spin_unlock(&cifs_file_list_lock);
2567 rdata->cfile = open_file;
2568 rdata->mapping = mapping;
2569 rdata->offset = offset;
2570 rdata->bytes = bytes;
2571 rdata->pid = pid;
2572 list_splice_init(&tmplist, &rdata->pages);
2573
2574 do {
2088 if (open_file->invalidHandle) { 2575 if (open_file->invalidHandle) {
2089 rc = cifs_reopen_file(open_file, true); 2576 rc = cifs_reopen_file(open_file, true);
2090 if (rc != 0) 2577 if (rc != 0)
2091 break; 2578 continue;
2092 } 2579 }
2093 io_parms.netfid = open_file->netfid; 2580 rc = cifs_async_readv(rdata);
2094 io_parms.pid = pid; 2581 } while (rc == -EAGAIN);
2095 io_parms.tcon = pTcon;
2096 io_parms.offset = offset;
2097 io_parms.length = read_size;
2098 rc = CIFSSMBRead(xid, &io_parms, &bytes_read,
2099 &smb_read_data, &buf_type);
2100 /* BB more RC checks ? */
2101 if (rc == -EAGAIN) {
2102 if (smb_read_data) {
2103 if (buf_type == CIFS_SMALL_BUFFER)
2104 cifs_small_buf_release(smb_read_data);
2105 else if (buf_type == CIFS_LARGE_BUFFER)
2106 cifs_buf_release(smb_read_data);
2107 smb_read_data = NULL;
2108 }
2109 }
2110 }
2111 if ((rc < 0) || (smb_read_data == NULL)) {
2112 cFYI(1, "Read error in readpages: %d", rc);
2113 break;
2114 } else if (bytes_read > 0) {
2115 task_io_account_read(bytes_read);
2116 pSMBr = (struct smb_com_read_rsp *)smb_read_data;
2117 cifs_copy_cache_pages(mapping, page_list, bytes_read,
2118 smb_read_data + 4 /* RFC1001 hdr */ +
2119 le16_to_cpu(pSMBr->DataOffset));
2120
2121 i += bytes_read >> PAGE_CACHE_SHIFT;
2122 cifs_stats_bytes_read(pTcon, bytes_read);
2123 if ((bytes_read & PAGE_CACHE_MASK) != bytes_read) {
2124 i++; /* account for partial page */
2125
2126 /* server copy of file can have smaller size
2127 than client */
2128 /* BB do we need to verify this common case ?
2129 this case is ok - if we are at server EOF
2130 we will hit it on next read */
2131 2582
2132 /* break; */ 2583 if (rc != 0) {
2584 list_for_each_entry_safe(page, tpage, &rdata->pages,
2585 lru) {
2586 list_del(&page->lru);
2587 lru_cache_add_file(page);
2588 unlock_page(page);
2589 page_cache_release(page);
2133 } 2590 }
2134 } else { 2591 cifs_readdata_free(rdata);
2135 cFYI(1, "No bytes read (%d) at offset %lld . "
2136 "Cleaning remaining pages from readahead list",
2137 bytes_read, offset);
2138 /* BB turn off caching and do new lookup on
2139 file size at server? */
2140 break; 2592 break;
2141 } 2593 }
2142 if (smb_read_data) {
2143 if (buf_type == CIFS_SMALL_BUFFER)
2144 cifs_small_buf_release(smb_read_data);
2145 else if (buf_type == CIFS_LARGE_BUFFER)
2146 cifs_buf_release(smb_read_data);
2147 smb_read_data = NULL;
2148 }
2149 bytes_read = 0;
2150 } 2594 }
2151 2595
2152/* need to free smb_read_data buf before exit */
2153 if (smb_read_data) {
2154 if (buf_type == CIFS_SMALL_BUFFER)
2155 cifs_small_buf_release(smb_read_data);
2156 else if (buf_type == CIFS_LARGE_BUFFER)
2157 cifs_buf_release(smb_read_data);
2158 smb_read_data = NULL;
2159 }
2160
2161read_complete:
2162 FreeXid(xid);
2163 return rc; 2596 return rc;
2164} 2597}
2165 2598
@@ -2408,6 +2841,10 @@ void cifs_oplock_break(struct work_struct *work)
2408 cFYI(1, "Oplock flush inode %p rc %d", inode, rc); 2841 cFYI(1, "Oplock flush inode %p rc %d", inode, rc);
2409 } 2842 }
2410 2843
2844 rc = cifs_push_locks(cfile);
2845 if (rc)
2846 cERROR(1, "Push locks rc = %d", rc);
2847
2411 /* 2848 /*
2412 * releasing stale oplock after recent reconnect of smb session using 2849 * releasing stale oplock after recent reconnect of smb session using
2413 * a now incorrect file handle is not a data integrity issue but do 2850 * a now incorrect file handle is not a data integrity issue but do
@@ -2415,8 +2852,9 @@ void cifs_oplock_break(struct work_struct *work)
2415 * disconnected since oplock already released by the server 2852 * disconnected since oplock already released by the server
2416 */ 2853 */
2417 if (!cfile->oplock_break_cancelled) { 2854 if (!cfile->oplock_break_cancelled) {
2418 rc = CIFSSMBLock(0, tlink_tcon(cfile->tlink), cfile->netfid, 0, 2855 rc = CIFSSMBLock(0, tlink_tcon(cfile->tlink), cfile->netfid,
2419 0, 0, 0, LOCKING_ANDX_OPLOCK_RELEASE, false, 2856 current->tgid, 0, 0, 0, 0,
2857 LOCKING_ANDX_OPLOCK_RELEASE, false,
2420 cinode->clientCanCacheRead ? 1 : 0); 2858 cinode->clientCanCacheRead ? 1 : 0);
2421 cFYI(1, "Oplock release rc = %d", rc); 2859 cFYI(1, "Oplock release rc = %d", rc);
2422 } 2860 }
diff --git a/fs/cifs/inode.c b/fs/cifs/inode.c
index a7b2dcd4a53..2c50bd2f65d 100644
--- a/fs/cifs/inode.c
+++ b/fs/cifs/inode.c
@@ -562,7 +562,16 @@ int cifs_get_file_info(struct file *filp)
562 562
563 xid = GetXid(); 563 xid = GetXid();
564 rc = CIFSSMBQFileInfo(xid, tcon, cfile->netfid, &find_data); 564 rc = CIFSSMBQFileInfo(xid, tcon, cfile->netfid, &find_data);
565 if (rc == -EOPNOTSUPP || rc == -EINVAL) { 565 switch (rc) {
566 case 0:
567 cifs_all_info_to_fattr(&fattr, &find_data, cifs_sb, false);
568 break;
569 case -EREMOTE:
570 cifs_create_dfs_fattr(&fattr, inode->i_sb);
571 rc = 0;
572 break;
573 case -EOPNOTSUPP:
574 case -EINVAL:
566 /* 575 /*
567 * FIXME: legacy server -- fall back to path-based call? 576 * FIXME: legacy server -- fall back to path-based call?
568 * for now, just skip revalidating and mark inode for 577 * for now, just skip revalidating and mark inode for
@@ -570,18 +579,14 @@ int cifs_get_file_info(struct file *filp)
570 */ 579 */
571 rc = 0; 580 rc = 0;
572 CIFS_I(inode)->time = 0; 581 CIFS_I(inode)->time = 0;
582 default:
573 goto cgfi_exit; 583 goto cgfi_exit;
574 } else if (rc == -EREMOTE) { 584 }
575 cifs_create_dfs_fattr(&fattr, inode->i_sb);
576 rc = 0;
577 } else if (rc)
578 goto cgfi_exit;
579 585
580 /* 586 /*
581 * don't bother with SFU junk here -- just mark inode as needing 587 * don't bother with SFU junk here -- just mark inode as needing
582 * revalidation. 588 * revalidation.
583 */ 589 */
584 cifs_all_info_to_fattr(&fattr, &find_data, cifs_sb, false);
585 fattr.cf_uniqueid = CIFS_I(inode)->uniqueid; 590 fattr.cf_uniqueid = CIFS_I(inode)->uniqueid;
586 fattr.cf_flags |= CIFS_FATTR_NEED_REVAL; 591 fattr.cf_flags |= CIFS_FATTR_NEED_REVAL;
587 cifs_fattr_to_inode(inode, &fattr); 592 cifs_fattr_to_inode(inode, &fattr);
@@ -2096,6 +2101,8 @@ static int
2096cifs_setattr_nounix(struct dentry *direntry, struct iattr *attrs) 2101cifs_setattr_nounix(struct dentry *direntry, struct iattr *attrs)
2097{ 2102{
2098 int xid; 2103 int xid;
2104 uid_t uid = NO_CHANGE_32;
2105 gid_t gid = NO_CHANGE_32;
2099 struct inode *inode = direntry->d_inode; 2106 struct inode *inode = direntry->d_inode;
2100 struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb); 2107 struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
2101 struct cifsInodeInfo *cifsInode = CIFS_I(inode); 2108 struct cifsInodeInfo *cifsInode = CIFS_I(inode);
@@ -2146,13 +2153,25 @@ cifs_setattr_nounix(struct dentry *direntry, struct iattr *attrs)
2146 goto cifs_setattr_exit; 2153 goto cifs_setattr_exit;
2147 } 2154 }
2148 2155
2149 /* 2156 if (attrs->ia_valid & ATTR_UID)
2150 * Without unix extensions we can't send ownership changes to the 2157 uid = attrs->ia_uid;
2151 * server, so silently ignore them. This is consistent with how 2158
2152 * local DOS/Windows filesystems behave (VFAT, NTFS, etc). With 2159 if (attrs->ia_valid & ATTR_GID)
2153 * CIFSACL support + proper Windows to Unix idmapping, we may be 2160 gid = attrs->ia_gid;
2154 * able to support this in the future. 2161
2155 */ 2162#ifdef CONFIG_CIFS_ACL
2163 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_CIFS_ACL) {
2164 if (uid != NO_CHANGE_32 || gid != NO_CHANGE_32) {
2165 rc = id_mode_to_cifs_acl(inode, full_path, NO_CHANGE_64,
2166 uid, gid);
2167 if (rc) {
2168 cFYI(1, "%s: Setting id failed with error: %d",
2169 __func__, rc);
2170 goto cifs_setattr_exit;
2171 }
2172 }
2173 } else
2174#endif /* CONFIG_CIFS_ACL */
2156 if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SET_UID)) 2175 if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SET_UID))
2157 attrs->ia_valid &= ~(ATTR_UID | ATTR_GID); 2176 attrs->ia_valid &= ~(ATTR_UID | ATTR_GID);
2158 2177
@@ -2161,15 +2180,12 @@ cifs_setattr_nounix(struct dentry *direntry, struct iattr *attrs)
2161 attrs->ia_valid &= ~ATTR_MODE; 2180 attrs->ia_valid &= ~ATTR_MODE;
2162 2181
2163 if (attrs->ia_valid & ATTR_MODE) { 2182 if (attrs->ia_valid & ATTR_MODE) {
2164 cFYI(1, "Mode changed to 0%o", attrs->ia_mode);
2165 mode = attrs->ia_mode; 2183 mode = attrs->ia_mode;
2166 }
2167
2168 if (attrs->ia_valid & ATTR_MODE) {
2169 rc = 0; 2184 rc = 0;
2170#ifdef CONFIG_CIFS_ACL 2185#ifdef CONFIG_CIFS_ACL
2171 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_CIFS_ACL) { 2186 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_CIFS_ACL) {
2172 rc = mode_to_cifs_acl(inode, full_path, mode); 2187 rc = id_mode_to_cifs_acl(inode, full_path, mode,
2188 NO_CHANGE_32, NO_CHANGE_32);
2173 if (rc) { 2189 if (rc) {
2174 cFYI(1, "%s: Setting ACL failed with error: %d", 2190 cFYI(1, "%s: Setting ACL failed with error: %d",
2175 __func__, rc); 2191 __func__, rc);
diff --git a/fs/cifs/link.c b/fs/cifs/link.c
index db3f18cdf02..8693b5d0e18 100644
--- a/fs/cifs/link.c
+++ b/fs/cifs/link.c
@@ -183,14 +183,20 @@ CIFSFormatMFSymlink(u8 *buf, unsigned int buf_len, const char *link_str)
183static int 183static int
184CIFSCreateMFSymLink(const int xid, struct cifs_tcon *tcon, 184CIFSCreateMFSymLink(const int xid, struct cifs_tcon *tcon,
185 const char *fromName, const char *toName, 185 const char *fromName, const char *toName,
186 const struct nls_table *nls_codepage, int remap) 186 struct cifs_sb_info *cifs_sb)
187{ 187{
188 int rc; 188 int rc;
189 int oplock = 0; 189 int oplock = 0;
190 int remap;
191 int create_options = CREATE_NOT_DIR;
190 __u16 netfid = 0; 192 __u16 netfid = 0;
191 u8 *buf; 193 u8 *buf;
192 unsigned int bytes_written = 0; 194 unsigned int bytes_written = 0;
193 struct cifs_io_parms io_parms; 195 struct cifs_io_parms io_parms;
196 struct nls_table *nls_codepage;
197
198 nls_codepage = cifs_sb->local_nls;
199 remap = cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MAP_SPECIAL_CHR;
194 200
195 buf = kmalloc(CIFS_MF_SYMLINK_FILE_SIZE, GFP_KERNEL); 201 buf = kmalloc(CIFS_MF_SYMLINK_FILE_SIZE, GFP_KERNEL);
196 if (!buf) 202 if (!buf)
@@ -202,8 +208,11 @@ CIFSCreateMFSymLink(const int xid, struct cifs_tcon *tcon,
202 return rc; 208 return rc;
203 } 209 }
204 210
211 if (backup_cred(cifs_sb))
212 create_options |= CREATE_OPEN_BACKUP_INTENT;
213
205 rc = CIFSSMBOpen(xid, tcon, fromName, FILE_CREATE, GENERIC_WRITE, 214 rc = CIFSSMBOpen(xid, tcon, fromName, FILE_CREATE, GENERIC_WRITE,
206 CREATE_NOT_DIR, &netfid, &oplock, NULL, 215 create_options, &netfid, &oplock, NULL,
207 nls_codepage, remap); 216 nls_codepage, remap);
208 if (rc != 0) { 217 if (rc != 0) {
209 kfree(buf); 218 kfree(buf);
@@ -559,9 +568,7 @@ cifs_symlink(struct inode *inode, struct dentry *direntry, const char *symname)
559 /* BB what if DFS and this volume is on different share? BB */ 568 /* BB what if DFS and this volume is on different share? BB */
560 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MF_SYMLINKS) 569 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MF_SYMLINKS)
561 rc = CIFSCreateMFSymLink(xid, pTcon, full_path, symname, 570 rc = CIFSCreateMFSymLink(xid, pTcon, full_path, symname,
562 cifs_sb->local_nls, 571 cifs_sb);
563 cifs_sb->mnt_cifs_flags &
564 CIFS_MOUNT_MAP_SPECIAL_CHR);
565 else if (pTcon->unix_ext) 572 else if (pTcon->unix_ext)
566 rc = CIFSUnixCreateSymLink(xid, pTcon, full_path, symname, 573 rc = CIFSUnixCreateSymLink(xid, pTcon, full_path, symname,
567 cifs_sb->local_nls); 574 cifs_sb->local_nls);
diff --git a/fs/cifs/misc.c b/fs/cifs/misc.c
index 7c169339259..703ef5c6fdb 100644
--- a/fs/cifs/misc.c
+++ b/fs/cifs/misc.c
@@ -420,19 +420,22 @@ check_smb_hdr(struct smb_hdr *smb, __u16 mid)
420} 420}
421 421
422int 422int
423checkSMB(struct smb_hdr *smb, __u16 mid, unsigned int length) 423checkSMB(struct smb_hdr *smb, __u16 mid, unsigned int total_read)
424{ 424{
425 __u32 len = be32_to_cpu(smb->smb_buf_length); 425 __u32 rfclen = be32_to_cpu(smb->smb_buf_length);
426 __u32 clc_len; /* calculated length */ 426 __u32 clc_len; /* calculated length */
427 cFYI(0, "checkSMB Length: 0x%x, smb_buf_length: 0x%x", length, len); 427 cFYI(0, "checkSMB Length: 0x%x, smb_buf_length: 0x%x",
428 total_read, rfclen);
428 429
429 if (length < 2 + sizeof(struct smb_hdr)) { 430 /* is this frame too small to even get to a BCC? */
430 if ((length >= sizeof(struct smb_hdr) - 1) 431 if (total_read < 2 + sizeof(struct smb_hdr)) {
432 if ((total_read >= sizeof(struct smb_hdr) - 1)
431 && (smb->Status.CifsError != 0)) { 433 && (smb->Status.CifsError != 0)) {
434 /* it's an error return */
432 smb->WordCount = 0; 435 smb->WordCount = 0;
433 /* some error cases do not return wct and bcc */ 436 /* some error cases do not return wct and bcc */
434 return 0; 437 return 0;
435 } else if ((length == sizeof(struct smb_hdr) + 1) && 438 } else if ((total_read == sizeof(struct smb_hdr) + 1) &&
436 (smb->WordCount == 0)) { 439 (smb->WordCount == 0)) {
437 char *tmp = (char *)smb; 440 char *tmp = (char *)smb;
438 /* Need to work around a bug in two servers here */ 441 /* Need to work around a bug in two servers here */
@@ -452,39 +455,35 @@ checkSMB(struct smb_hdr *smb, __u16 mid, unsigned int length)
452 } else { 455 } else {
453 cERROR(1, "Length less than smb header size"); 456 cERROR(1, "Length less than smb header size");
454 } 457 }
455 return 1; 458 return -EIO;
456 }
457 if (len > CIFSMaxBufSize + MAX_CIFS_HDR_SIZE - 4) {
458 cERROR(1, "smb length greater than MaxBufSize, mid=%d",
459 smb->Mid);
460 return 1;
461 } 459 }
462 460
461 /* otherwise, there is enough to get to the BCC */
463 if (check_smb_hdr(smb, mid)) 462 if (check_smb_hdr(smb, mid))
464 return 1; 463 return -EIO;
465 clc_len = smbCalcSize(smb); 464 clc_len = smbCalcSize(smb);
466 465
467 if (4 + len != length) { 466 if (4 + rfclen != total_read) {
468 cERROR(1, "Length read does not match RFC1001 length %d", 467 cERROR(1, "Length read does not match RFC1001 length %d",
469 len); 468 rfclen);
470 return 1; 469 return -EIO;
471 } 470 }
472 471
473 if (4 + len != clc_len) { 472 if (4 + rfclen != clc_len) {
474 /* check if bcc wrapped around for large read responses */ 473 /* check if bcc wrapped around for large read responses */
475 if ((len > 64 * 1024) && (len > clc_len)) { 474 if ((rfclen > 64 * 1024) && (rfclen > clc_len)) {
476 /* check if lengths match mod 64K */ 475 /* check if lengths match mod 64K */
477 if (((4 + len) & 0xFFFF) == (clc_len & 0xFFFF)) 476 if (((4 + rfclen) & 0xFFFF) == (clc_len & 0xFFFF))
478 return 0; /* bcc wrapped */ 477 return 0; /* bcc wrapped */
479 } 478 }
480 cFYI(1, "Calculated size %u vs length %u mismatch for mid=%u", 479 cFYI(1, "Calculated size %u vs length %u mismatch for mid=%u",
481 clc_len, 4 + len, smb->Mid); 480 clc_len, 4 + rfclen, smb->Mid);
482 481
483 if (4 + len < clc_len) { 482 if (4 + rfclen < clc_len) {
484 cERROR(1, "RFC1001 size %u smaller than SMB for mid=%u", 483 cERROR(1, "RFC1001 size %u smaller than SMB for mid=%u",
485 len, smb->Mid); 484 rfclen, smb->Mid);
486 return 1; 485 return -EIO;
487 } else if (len > clc_len + 512) { 486 } else if (rfclen > clc_len + 512) {
488 /* 487 /*
489 * Some servers (Windows XP in particular) send more 488 * Some servers (Windows XP in particular) send more
490 * data than the lengths in the SMB packet would 489 * data than the lengths in the SMB packet would
@@ -495,8 +494,8 @@ checkSMB(struct smb_hdr *smb, __u16 mid, unsigned int length)
495 * data to 512 bytes. 494 * data to 512 bytes.
496 */ 495 */
497 cERROR(1, "RFC1001 size %u more than 512 bytes larger " 496 cERROR(1, "RFC1001 size %u more than 512 bytes larger "
498 "than SMB for mid=%u", len, smb->Mid); 497 "than SMB for mid=%u", rfclen, smb->Mid);
499 return 1; 498 return -EIO;
500 } 499 }
501 } 500 }
502 return 0; 501 return 0;
@@ -676,3 +675,18 @@ void cifs_set_oplock_level(struct cifsInodeInfo *cinode, __u32 oplock)
676 cinode->clientCanCacheRead = false; 675 cinode->clientCanCacheRead = false;
677 } 676 }
678} 677}
678
679bool
680backup_cred(struct cifs_sb_info *cifs_sb)
681{
682 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_CIFS_BACKUPUID) {
683 if (cifs_sb->mnt_backupuid == current_fsuid())
684 return true;
685 }
686 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_CIFS_BACKUPGID) {
687 if (in_group_p(cifs_sb->mnt_backupgid))
688 return true;
689 }
690
691 return false;
692}
diff --git a/fs/cifs/sess.c b/fs/cifs/sess.c
index d3e619692ee..c7d80e24f24 100644
--- a/fs/cifs/sess.c
+++ b/fs/cifs/sess.c
@@ -124,7 +124,9 @@ static __u32 cifs_ssetup_hdr(struct cifs_ses *ses, SESSION_SETUP_ANDX *pSMB)
124 /* that we use in next few lines */ 124 /* that we use in next few lines */
125 /* Note that header is initialized to zero in header_assemble */ 125 /* Note that header is initialized to zero in header_assemble */
126 pSMB->req.AndXCommand = 0xFF; 126 pSMB->req.AndXCommand = 0xFF;
127 pSMB->req.MaxBufferSize = cpu_to_le16(ses->server->maxBuf); 127 pSMB->req.MaxBufferSize = cpu_to_le16(min_t(u32,
128 CIFSMaxBufSize + MAX_CIFS_HDR_SIZE - 4,
129 USHRT_MAX));
128 pSMB->req.MaxMpxCount = cpu_to_le16(ses->server->maxReq); 130 pSMB->req.MaxMpxCount = cpu_to_le16(ses->server->maxReq);
129 pSMB->req.VcNumber = get_next_vcnum(ses); 131 pSMB->req.VcNumber = get_next_vcnum(ses);
130 132
diff --git a/fs/cifs/smbencrypt.c b/fs/cifs/smbencrypt.c
index 42b9fff4875..ac1221d969d 100644
--- a/fs/cifs/smbencrypt.c
+++ b/fs/cifs/smbencrypt.c
@@ -265,91 +265,6 @@ E_md4hash(const unsigned char *passwd, unsigned char *p16)
265 return rc; 265 return rc;
266} 266}
267 267
268#if 0 /* currently unused */
269/* Does both the NT and LM owfs of a user's password */
270static void
271nt_lm_owf_gen(char *pwd, unsigned char nt_p16[16], unsigned char p16[16])
272{
273 char passwd[514];
274
275 memset(passwd, '\0', 514);
276 if (strlen(pwd) < 513)
277 strcpy(passwd, pwd);
278 else
279 memcpy(passwd, pwd, 512);
280 /* Calculate the MD4 hash (NT compatible) of the password */
281 memset(nt_p16, '\0', 16);
282 E_md4hash(passwd, nt_p16);
283
284 /* Mangle the passwords into Lanman format */
285 passwd[14] = '\0';
286/* strupper(passwd); */
287
288 /* Calculate the SMB (lanman) hash functions of the password */
289
290 memset(p16, '\0', 16);
291 E_P16((unsigned char *) passwd, (unsigned char *) p16);
292
293 /* clear out local copy of user's password (just being paranoid). */
294 memset(passwd, '\0', sizeof(passwd));
295}
296#endif
297
298/* Does the NTLMv2 owfs of a user's password */
299#if 0 /* function not needed yet - but will be soon */
300static void
301ntv2_owf_gen(const unsigned char owf[16], const char *user_n,
302 const char *domain_n, unsigned char kr_buf[16],
303 const struct nls_table *nls_codepage)
304{
305 wchar_t *user_u;
306 wchar_t *dom_u;
307 int user_l, domain_l;
308 struct HMACMD5Context ctx;
309
310 /* might as well do one alloc to hold both (user_u and dom_u) */
311 user_u = kmalloc(2048 * sizeof(wchar_t), GFP_KERNEL);
312 if (user_u == NULL)
313 return;
314 dom_u = user_u + 1024;
315
316 /* push_ucs2(NULL, user_u, user_n, (user_l+1)*2,
317 STR_UNICODE|STR_NOALIGN|STR_TERMINATE|STR_UPPER);
318 push_ucs2(NULL, dom_u, domain_n, (domain_l+1)*2,
319 STR_UNICODE|STR_NOALIGN|STR_TERMINATE|STR_UPPER); */
320
321 /* BB user and domain may need to be uppercased */
322 user_l = cifs_strtoUCS(user_u, user_n, 511, nls_codepage);
323 domain_l = cifs_strtoUCS(dom_u, domain_n, 511, nls_codepage);
324
325 user_l++; /* trailing null */
326 domain_l++;
327
328 hmac_md5_init_limK_to_64(owf, 16, &ctx);
329 hmac_md5_update((const unsigned char *) user_u, user_l * 2, &ctx);
330 hmac_md5_update((const unsigned char *) dom_u, domain_l * 2, &ctx);
331 hmac_md5_final(kr_buf, &ctx);
332
333 kfree(user_u);
334}
335#endif
336
337/* Does the des encryption from the FIRST 8 BYTES of the NT or LM MD4 hash. */
338#if 0 /* currently unused */
339static void
340NTLMSSPOWFencrypt(unsigned char passwd[8],
341 unsigned char *ntlmchalresp, unsigned char p24[24])
342{
343 unsigned char p21[21];
344
345 memset(p21, '\0', 21);
346 memcpy(p21, passwd, 8);
347 memset(p21 + 8, 0xbd, 8);
348
349 E_P24(p21, ntlmchalresp, p24);
350}
351#endif
352
353/* Does the NT MD4 hash then des encryption. */ 268/* Does the NT MD4 hash then des encryption. */
354int 269int
355SMBNTencrypt(unsigned char *passwd, unsigned char *c8, unsigned char *p24) 270SMBNTencrypt(unsigned char *passwd, unsigned char *c8, unsigned char *p24)
@@ -369,39 +284,3 @@ SMBNTencrypt(unsigned char *passwd, unsigned char *c8, unsigned char *p24)
369 rc = E_P24(p21, c8, p24); 284 rc = E_P24(p21, c8, p24);
370 return rc; 285 return rc;
371} 286}
372
373
374/* Does the md5 encryption from the NT hash for NTLMv2. */
375/* These routines will be needed later */
376#if 0
377static void
378SMBOWFencrypt_ntv2(const unsigned char kr[16],
379 const struct data_blob *srv_chal,
380 const struct data_blob *cli_chal, unsigned char resp_buf[16])
381{
382 struct HMACMD5Context ctx;
383
384 hmac_md5_init_limK_to_64(kr, 16, &ctx);
385 hmac_md5_update(srv_chal->data, srv_chal->length, &ctx);
386 hmac_md5_update(cli_chal->data, cli_chal->length, &ctx);
387 hmac_md5_final(resp_buf, &ctx);
388}
389
390static void
391SMBsesskeygen_ntv2(const unsigned char kr[16],
392 const unsigned char *nt_resp, __u8 sess_key[16])
393{
394 struct HMACMD5Context ctx;
395
396 hmac_md5_init_limK_to_64(kr, 16, &ctx);
397 hmac_md5_update(nt_resp, 16, &ctx);
398 hmac_md5_final((unsigned char *) sess_key, &ctx);
399}
400
401static void
402SMBsesskeygen_ntv1(const unsigned char kr[16],
403 const unsigned char *nt_resp, __u8 sess_key[16])
404{
405 mdfour((unsigned char *) sess_key, (unsigned char *) kr, 16);
406}
407#endif
diff --git a/fs/cifs/transport.c b/fs/cifs/transport.c
index 10ca6b2c26b..0cc9584f588 100644
--- a/fs/cifs/transport.c
+++ b/fs/cifs/transport.c
@@ -26,6 +26,7 @@
26#include <linux/wait.h> 26#include <linux/wait.h>
27#include <linux/net.h> 27#include <linux/net.h>
28#include <linux/delay.h> 28#include <linux/delay.h>
29#include <linux/freezer.h>
29#include <asm/uaccess.h> 30#include <asm/uaccess.h>
30#include <asm/processor.h> 31#include <asm/processor.h>
31#include <linux/mempool.h> 32#include <linux/mempool.h>
@@ -324,7 +325,7 @@ wait_for_response(struct TCP_Server_Info *server, struct mid_q_entry *midQ)
324{ 325{
325 int error; 326 int error;
326 327
327 error = wait_event_killable(server->response_q, 328 error = wait_event_freezekillable(server->response_q,
328 midQ->midState != MID_REQUEST_SUBMITTED); 329 midQ->midState != MID_REQUEST_SUBMITTED);
329 if (error < 0) 330 if (error < 0)
330 return -ERESTARTSYS; 331 return -ERESTARTSYS;
@@ -339,8 +340,8 @@ wait_for_response(struct TCP_Server_Info *server, struct mid_q_entry *midQ)
339 */ 340 */
340int 341int
341cifs_call_async(struct TCP_Server_Info *server, struct kvec *iov, 342cifs_call_async(struct TCP_Server_Info *server, struct kvec *iov,
342 unsigned int nvec, mid_callback_t *callback, void *cbdata, 343 unsigned int nvec, mid_receive_t *receive,
343 bool ignore_pend) 344 mid_callback_t *callback, void *cbdata, bool ignore_pend)
344{ 345{
345 int rc; 346 int rc;
346 struct mid_q_entry *mid; 347 struct mid_q_entry *mid;
@@ -374,6 +375,7 @@ cifs_call_async(struct TCP_Server_Info *server, struct kvec *iov,
374 goto out_err; 375 goto out_err;
375 } 376 }
376 377
378 mid->receive = receive;
377 mid->callback = callback; 379 mid->callback = callback;
378 mid->callback_data = cbdata; 380 mid->callback_data = cbdata;
379 mid->midState = MID_REQUEST_SUBMITTED; 381 mid->midState = MID_REQUEST_SUBMITTED;
@@ -496,13 +498,18 @@ int
496cifs_check_receive(struct mid_q_entry *mid, struct TCP_Server_Info *server, 498cifs_check_receive(struct mid_q_entry *mid, struct TCP_Server_Info *server,
497 bool log_error) 499 bool log_error)
498{ 500{
499 dump_smb(mid->resp_buf, 501 unsigned int len = be32_to_cpu(mid->resp_buf->smb_buf_length) + 4;
500 min_t(u32, 92, be32_to_cpu(mid->resp_buf->smb_buf_length))); 502
503 dump_smb(mid->resp_buf, min_t(u32, 92, len));
501 504
502 /* convert the length into a more usable form */ 505 /* convert the length into a more usable form */
503 if (server->sec_mode & (SECMODE_SIGN_REQUIRED | SECMODE_SIGN_ENABLED)) { 506 if (server->sec_mode & (SECMODE_SIGN_REQUIRED | SECMODE_SIGN_ENABLED)) {
507 struct kvec iov;
508
509 iov.iov_base = mid->resp_buf;
510 iov.iov_len = len;
504 /* FIXME: add code to kill session */ 511 /* FIXME: add code to kill session */
505 if (cifs_verify_signature(mid->resp_buf, server, 512 if (cifs_verify_signature(&iov, 1, server,
506 mid->sequence_number + 1) != 0) 513 mid->sequence_number + 1) != 0)
507 cERROR(1, "Unexpected SMB signature"); 514 cERROR(1, "Unexpected SMB signature");
508 } 515 }
diff --git a/fs/cifs/xattr.c b/fs/cifs/xattr.c
index 2a22fb2989e..45f07c46f3e 100644
--- a/fs/cifs/xattr.c
+++ b/fs/cifs/xattr.c
@@ -22,6 +22,7 @@
22#include <linux/fs.h> 22#include <linux/fs.h>
23#include <linux/posix_acl_xattr.h> 23#include <linux/posix_acl_xattr.h>
24#include <linux/slab.h> 24#include <linux/slab.h>
25#include <linux/xattr.h>
25#include "cifsfs.h" 26#include "cifsfs.h"
26#include "cifspdu.h" 27#include "cifspdu.h"
27#include "cifsglob.h" 28#include "cifsglob.h"
@@ -31,16 +32,8 @@
31#define MAX_EA_VALUE_SIZE 65535 32#define MAX_EA_VALUE_SIZE 65535
32#define CIFS_XATTR_DOS_ATTRIB "user.DosAttrib" 33#define CIFS_XATTR_DOS_ATTRIB "user.DosAttrib"
33#define CIFS_XATTR_CIFS_ACL "system.cifs_acl" 34#define CIFS_XATTR_CIFS_ACL "system.cifs_acl"
34#define CIFS_XATTR_USER_PREFIX "user."
35#define CIFS_XATTR_SYSTEM_PREFIX "system."
36#define CIFS_XATTR_OS2_PREFIX "os2."
37#define CIFS_XATTR_SECURITY_PREFIX "security."
38#define CIFS_XATTR_TRUSTED_PREFIX "trusted."
39#define XATTR_TRUSTED_PREFIX_LEN 8
40#define XATTR_SECURITY_PREFIX_LEN 9
41/* BB need to add server (Samba e.g) support for security and trusted prefix */
42
43 35
36/* BB need to add server (Samba e.g) support for security and trusted prefix */
44 37
45int cifs_removexattr(struct dentry *direntry, const char *ea_name) 38int cifs_removexattr(struct dentry *direntry, const char *ea_name)
46{ 39{
@@ -76,8 +69,8 @@ int cifs_removexattr(struct dentry *direntry, const char *ea_name)
76 } 69 }
77 if (ea_name == NULL) { 70 if (ea_name == NULL) {
78 cFYI(1, "Null xattr names not supported"); 71 cFYI(1, "Null xattr names not supported");
79 } else if (strncmp(ea_name, CIFS_XATTR_USER_PREFIX, 5) 72 } else if (strncmp(ea_name, XATTR_USER_PREFIX, XATTR_USER_PREFIX_LEN)
80 && (strncmp(ea_name, CIFS_XATTR_OS2_PREFIX, 4))) { 73 && (strncmp(ea_name, XATTR_OS2_PREFIX, XATTR_OS2_PREFIX_LEN))) {
81 cFYI(1, 74 cFYI(1,
82 "illegal xattr request %s (only user namespace supported)", 75 "illegal xattr request %s (only user namespace supported)",
83 ea_name); 76 ea_name);
@@ -88,7 +81,7 @@ int cifs_removexattr(struct dentry *direntry, const char *ea_name)
88 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_XATTR) 81 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_XATTR)
89 goto remove_ea_exit; 82 goto remove_ea_exit;
90 83
91 ea_name += 5; /* skip past user. prefix */ 84 ea_name += XATTR_USER_PREFIX_LEN; /* skip past user. prefix */
92 rc = CIFSSMBSetEA(xid, pTcon, full_path, ea_name, NULL, 85 rc = CIFSSMBSetEA(xid, pTcon, full_path, ea_name, NULL,
93 (__u16)0, cifs_sb->local_nls, 86 (__u16)0, cifs_sb->local_nls,
94 cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MAP_SPECIAL_CHR); 87 cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MAP_SPECIAL_CHR);
@@ -149,21 +142,23 @@ int cifs_setxattr(struct dentry *direntry, const char *ea_name,
149 142
150 if (ea_name == NULL) { 143 if (ea_name == NULL) {
151 cFYI(1, "Null xattr names not supported"); 144 cFYI(1, "Null xattr names not supported");
152 } else if (strncmp(ea_name, CIFS_XATTR_USER_PREFIX, 5) == 0) { 145 } else if (strncmp(ea_name, XATTR_USER_PREFIX, XATTR_USER_PREFIX_LEN)
146 == 0) {
153 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_XATTR) 147 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_XATTR)
154 goto set_ea_exit; 148 goto set_ea_exit;
155 if (strncmp(ea_name, CIFS_XATTR_DOS_ATTRIB, 14) == 0) 149 if (strncmp(ea_name, CIFS_XATTR_DOS_ATTRIB, 14) == 0)
156 cFYI(1, "attempt to set cifs inode metadata"); 150 cFYI(1, "attempt to set cifs inode metadata");
157 151
158 ea_name += 5; /* skip past user. prefix */ 152 ea_name += XATTR_USER_PREFIX_LEN; /* skip past user. prefix */
159 rc = CIFSSMBSetEA(xid, pTcon, full_path, ea_name, ea_value, 153 rc = CIFSSMBSetEA(xid, pTcon, full_path, ea_name, ea_value,
160 (__u16)value_size, cifs_sb->local_nls, 154 (__u16)value_size, cifs_sb->local_nls,
161 cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MAP_SPECIAL_CHR); 155 cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MAP_SPECIAL_CHR);
162 } else if (strncmp(ea_name, CIFS_XATTR_OS2_PREFIX, 4) == 0) { 156 } else if (strncmp(ea_name, XATTR_OS2_PREFIX, XATTR_OS2_PREFIX_LEN)
157 == 0) {
163 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_XATTR) 158 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_XATTR)
164 goto set_ea_exit; 159 goto set_ea_exit;
165 160
166 ea_name += 4; /* skip past os2. prefix */ 161 ea_name += XATTR_OS2_PREFIX_LEN; /* skip past os2. prefix */
167 rc = CIFSSMBSetEA(xid, pTcon, full_path, ea_name, ea_value, 162 rc = CIFSSMBSetEA(xid, pTcon, full_path, ea_name, ea_value,
168 (__u16)value_size, cifs_sb->local_nls, 163 (__u16)value_size, cifs_sb->local_nls,
169 cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MAP_SPECIAL_CHR); 164 cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MAP_SPECIAL_CHR);
@@ -178,7 +173,7 @@ int cifs_setxattr(struct dentry *direntry, const char *ea_name,
178#ifdef CONFIG_CIFS_ACL 173#ifdef CONFIG_CIFS_ACL
179 memcpy(pacl, ea_value, value_size); 174 memcpy(pacl, ea_value, value_size);
180 rc = set_cifs_acl(pacl, value_size, 175 rc = set_cifs_acl(pacl, value_size,
181 direntry->d_inode, full_path); 176 direntry->d_inode, full_path, CIFS_ACL_DACL);
182 if (rc == 0) /* force revalidate of the inode */ 177 if (rc == 0) /* force revalidate of the inode */
183 CIFS_I(direntry->d_inode)->time = 0; 178 CIFS_I(direntry->d_inode)->time = 0;
184 kfree(pacl); 179 kfree(pacl);
@@ -269,7 +264,8 @@ ssize_t cifs_getxattr(struct dentry *direntry, const char *ea_name,
269 /* return alt name if available as pseudo attr */ 264 /* return alt name if available as pseudo attr */
270 if (ea_name == NULL) { 265 if (ea_name == NULL) {
271 cFYI(1, "Null xattr names not supported"); 266 cFYI(1, "Null xattr names not supported");
272 } else if (strncmp(ea_name, CIFS_XATTR_USER_PREFIX, 5) == 0) { 267 } else if (strncmp(ea_name, XATTR_USER_PREFIX, XATTR_USER_PREFIX_LEN)
268 == 0) {
273 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_XATTR) 269 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_XATTR)
274 goto get_ea_exit; 270 goto get_ea_exit;
275 271
@@ -277,15 +273,15 @@ ssize_t cifs_getxattr(struct dentry *direntry, const char *ea_name,
277 cFYI(1, "attempt to query cifs inode metadata"); 273 cFYI(1, "attempt to query cifs inode metadata");
278 /* revalidate/getattr then populate from inode */ 274 /* revalidate/getattr then populate from inode */
279 } /* BB add else when above is implemented */ 275 } /* BB add else when above is implemented */
280 ea_name += 5; /* skip past user. prefix */ 276 ea_name += XATTR_USER_PREFIX_LEN; /* skip past user. prefix */
281 rc = CIFSSMBQAllEAs(xid, pTcon, full_path, ea_name, ea_value, 277 rc = CIFSSMBQAllEAs(xid, pTcon, full_path, ea_name, ea_value,
282 buf_size, cifs_sb->local_nls, 278 buf_size, cifs_sb->local_nls,
283 cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MAP_SPECIAL_CHR); 279 cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MAP_SPECIAL_CHR);
284 } else if (strncmp(ea_name, CIFS_XATTR_OS2_PREFIX, 4) == 0) { 280 } else if (strncmp(ea_name, XATTR_OS2_PREFIX, XATTR_OS2_PREFIX_LEN) == 0) {
285 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_XATTR) 281 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_XATTR)
286 goto get_ea_exit; 282 goto get_ea_exit;
287 283
288 ea_name += 4; /* skip past os2. prefix */ 284 ea_name += XATTR_OS2_PREFIX_LEN; /* skip past os2. prefix */
289 rc = CIFSSMBQAllEAs(xid, pTcon, full_path, ea_name, ea_value, 285 rc = CIFSSMBQAllEAs(xid, pTcon, full_path, ea_name, ea_value,
290 buf_size, cifs_sb->local_nls, 286 buf_size, cifs_sb->local_nls,
291 cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MAP_SPECIAL_CHR); 287 cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MAP_SPECIAL_CHR);
@@ -339,10 +335,10 @@ ssize_t cifs_getxattr(struct dentry *direntry, const char *ea_name,
339 cFYI(1, "Query CIFS ACL not supported yet"); 335 cFYI(1, "Query CIFS ACL not supported yet");
340#endif /* CONFIG_CIFS_ACL */ 336#endif /* CONFIG_CIFS_ACL */
341 } else if (strncmp(ea_name, 337 } else if (strncmp(ea_name,
342 CIFS_XATTR_TRUSTED_PREFIX, XATTR_TRUSTED_PREFIX_LEN) == 0) { 338 XATTR_TRUSTED_PREFIX, XATTR_TRUSTED_PREFIX_LEN) == 0) {
343 cFYI(1, "Trusted xattr namespace not supported yet"); 339 cFYI(1, "Trusted xattr namespace not supported yet");
344 } else if (strncmp(ea_name, 340 } else if (strncmp(ea_name,
345 CIFS_XATTR_SECURITY_PREFIX, XATTR_SECURITY_PREFIX_LEN) == 0) { 341 XATTR_SECURITY_PREFIX, XATTR_SECURITY_PREFIX_LEN) == 0) {
346 cFYI(1, "Security xattr namespace not supported yet"); 342 cFYI(1, "Security xattr namespace not supported yet");
347 } else 343 } else
348 cFYI(1, 344 cFYI(1,