aboutsummaryrefslogtreecommitdiffstats
path: root/fs
diff options
context:
space:
mode:
Diffstat (limited to 'fs')
-rw-r--r--fs/ceph/addr.c12
-rw-r--r--fs/ceph/auth_x.c15
-rw-r--r--fs/ceph/caps.c32
-rw-r--r--fs/ceph/debugfs.c4
-rw-r--r--fs/ceph/dir.c2
-rw-r--r--fs/ceph/inode.c5
-rw-r--r--fs/ceph/locks.c14
-rw-r--r--fs/ceph/mds_client.c101
-rw-r--r--fs/ceph/mds_client.h3
-rw-r--r--fs/ceph/osd_client.c2
-rw-r--r--fs/ceph/snap.c89
-rw-r--r--fs/ceph/super.h11
-rw-r--r--fs/ceph/xattr.c1
-rw-r--r--fs/cifs/Kconfig2
-rw-r--r--fs/cifs/asn1.c6
-rw-r--r--fs/cifs/cifs_unicode.h18
-rw-r--r--fs/cifs/cifs_uniupr.h16
-rw-r--r--fs/cifs/cifsencrypt.c475
-rw-r--r--fs/cifs/cifsglob.h25
-rw-r--r--fs/cifs/cifspdu.h7
-rw-r--r--fs/cifs/cifsproto.h12
-rw-r--r--fs/cifs/cifssmb.c13
-rw-r--r--fs/cifs/connect.c17
-rw-r--r--fs/cifs/dir.c157
-rw-r--r--fs/cifs/file.c3
-rw-r--r--fs/cifs/inode.c2
-rw-r--r--fs/cifs/ntlmssp.h13
-rw-r--r--fs/cifs/sess.c132
-rw-r--r--fs/cifs/transport.c6
-rw-r--r--fs/ecryptfs/crypto.c3
-rw-r--r--fs/ecryptfs/inode.c31
-rw-r--r--fs/ecryptfs/keystore.c2
-rw-r--r--fs/ecryptfs/kthread.c2
-rw-r--r--fs/ecryptfs/messaging.c2
-rw-r--r--fs/ecryptfs/miscdev.c2
-rw-r--r--fs/nfsd/nfs4state.c26
-rw-r--r--fs/nfsd/state.h14
-rw-r--r--fs/nfsd/vfs.c14
-rw-r--r--fs/xfs/linux-2.6/xfs_aops.c13
-rw-r--r--fs/xfs/linux-2.6/xfs_super.c9
-rw-r--r--fs/xfs/linux-2.6/xfs_sync.c42
-rw-r--r--fs/xfs/xfs_fsops.c31
-rw-r--r--fs/xfs/xfs_fsops.h2
-rw-r--r--fs/xfs/xfs_ialloc.c16
-rw-r--r--fs/xfs/xfs_inode.c49
-rw-r--r--fs/xfs/xfs_log.c7
-rw-r--r--fs/xfs/xfs_log_cil.c263
-rw-r--r--fs/xfs/xfs_log_priv.h13
-rw-r--r--fs/xfs/xfs_trans.c5
-rw-r--r--fs/xfs/xfs_trans_priv.h3
50 files changed, 1119 insertions, 625 deletions
diff --git a/fs/ceph/addr.c b/fs/ceph/addr.c
index 5598a0d02295..4cfce1ee31fa 100644
--- a/fs/ceph/addr.c
+++ b/fs/ceph/addr.c
@@ -87,7 +87,7 @@ static int ceph_set_page_dirty(struct page *page)
87 87
88 /* dirty the head */ 88 /* dirty the head */
89 spin_lock(&inode->i_lock); 89 spin_lock(&inode->i_lock);
90 if (ci->i_wrbuffer_ref_head == 0) 90 if (ci->i_head_snapc == NULL)
91 ci->i_head_snapc = ceph_get_snap_context(snapc); 91 ci->i_head_snapc = ceph_get_snap_context(snapc);
92 ++ci->i_wrbuffer_ref_head; 92 ++ci->i_wrbuffer_ref_head;
93 if (ci->i_wrbuffer_ref == 0) 93 if (ci->i_wrbuffer_ref == 0)
@@ -105,13 +105,7 @@ static int ceph_set_page_dirty(struct page *page)
105 spin_lock_irq(&mapping->tree_lock); 105 spin_lock_irq(&mapping->tree_lock);
106 if (page->mapping) { /* Race with truncate? */ 106 if (page->mapping) { /* Race with truncate? */
107 WARN_ON_ONCE(!PageUptodate(page)); 107 WARN_ON_ONCE(!PageUptodate(page));
108 108 account_page_dirtied(page, page->mapping);
109 if (mapping_cap_account_dirty(mapping)) {
110 __inc_zone_page_state(page, NR_FILE_DIRTY);
111 __inc_bdi_stat(mapping->backing_dev_info,
112 BDI_RECLAIMABLE);
113 task_io_account_write(PAGE_CACHE_SIZE);
114 }
115 radix_tree_tag_set(&mapping->page_tree, 109 radix_tree_tag_set(&mapping->page_tree,
116 page_index(page), PAGECACHE_TAG_DIRTY); 110 page_index(page), PAGECACHE_TAG_DIRTY);
117 111
@@ -352,7 +346,7 @@ static struct ceph_snap_context *get_oldest_context(struct inode *inode,
352 break; 346 break;
353 } 347 }
354 } 348 }
355 if (!snapc && ci->i_head_snapc) { 349 if (!snapc && ci->i_wrbuffer_ref_head) {
356 snapc = ceph_get_snap_context(ci->i_head_snapc); 350 snapc = ceph_get_snap_context(ci->i_head_snapc);
357 dout(" head snapc %p has %d dirty pages\n", 351 dout(" head snapc %p has %d dirty pages\n",
358 snapc, ci->i_wrbuffer_ref_head); 352 snapc, ci->i_wrbuffer_ref_head);
diff --git a/fs/ceph/auth_x.c b/fs/ceph/auth_x.c
index 582e0b2caf8a..a2d002cbdec2 100644
--- a/fs/ceph/auth_x.c
+++ b/fs/ceph/auth_x.c
@@ -376,7 +376,7 @@ static void ceph_x_validate_tickets(struct ceph_auth_client *ac, int *pneed)
376 376
377 th = get_ticket_handler(ac, service); 377 th = get_ticket_handler(ac, service);
378 378
379 if (!th) { 379 if (IS_ERR(th)) {
380 *pneed |= service; 380 *pneed |= service;
381 continue; 381 continue;
382 } 382 }
@@ -399,6 +399,9 @@ static int ceph_x_build_request(struct ceph_auth_client *ac,
399 struct ceph_x_ticket_handler *th = 399 struct ceph_x_ticket_handler *th =
400 get_ticket_handler(ac, CEPH_ENTITY_TYPE_AUTH); 400 get_ticket_handler(ac, CEPH_ENTITY_TYPE_AUTH);
401 401
402 if (IS_ERR(th))
403 return PTR_ERR(th);
404
402 ceph_x_validate_tickets(ac, &need); 405 ceph_x_validate_tickets(ac, &need);
403 406
404 dout("build_request want %x have %x need %x\n", 407 dout("build_request want %x have %x need %x\n",
@@ -450,7 +453,6 @@ static int ceph_x_build_request(struct ceph_auth_client *ac,
450 return -ERANGE; 453 return -ERANGE;
451 head->op = cpu_to_le16(CEPHX_GET_PRINCIPAL_SESSION_KEY); 454 head->op = cpu_to_le16(CEPHX_GET_PRINCIPAL_SESSION_KEY);
452 455
453 BUG_ON(!th);
454 ret = ceph_x_build_authorizer(ac, th, &xi->auth_authorizer); 456 ret = ceph_x_build_authorizer(ac, th, &xi->auth_authorizer);
455 if (ret) 457 if (ret)
456 return ret; 458 return ret;
@@ -505,7 +507,8 @@ static int ceph_x_handle_reply(struct ceph_auth_client *ac, int result,
505 507
506 case CEPHX_GET_PRINCIPAL_SESSION_KEY: 508 case CEPHX_GET_PRINCIPAL_SESSION_KEY:
507 th = get_ticket_handler(ac, CEPH_ENTITY_TYPE_AUTH); 509 th = get_ticket_handler(ac, CEPH_ENTITY_TYPE_AUTH);
508 BUG_ON(!th); 510 if (IS_ERR(th))
511 return PTR_ERR(th);
509 ret = ceph_x_proc_ticket_reply(ac, &th->session_key, 512 ret = ceph_x_proc_ticket_reply(ac, &th->session_key,
510 buf + sizeof(*head), end); 513 buf + sizeof(*head), end);
511 break; 514 break;
@@ -563,8 +566,8 @@ static int ceph_x_verify_authorizer_reply(struct ceph_auth_client *ac,
563 void *end = p + sizeof(au->reply_buf); 566 void *end = p + sizeof(au->reply_buf);
564 567
565 th = get_ticket_handler(ac, au->service); 568 th = get_ticket_handler(ac, au->service);
566 if (!th) 569 if (IS_ERR(th))
567 return -EIO; /* hrm! */ 570 return PTR_ERR(th);
568 ret = ceph_x_decrypt(&th->session_key, &p, end, &reply, sizeof(reply)); 571 ret = ceph_x_decrypt(&th->session_key, &p, end, &reply, sizeof(reply));
569 if (ret < 0) 572 if (ret < 0)
570 return ret; 573 return ret;
@@ -626,7 +629,7 @@ static void ceph_x_invalidate_authorizer(struct ceph_auth_client *ac,
626 struct ceph_x_ticket_handler *th; 629 struct ceph_x_ticket_handler *th;
627 630
628 th = get_ticket_handler(ac, peer_type); 631 th = get_ticket_handler(ac, peer_type);
629 if (th && !IS_ERR(th)) 632 if (!IS_ERR(th))
630 remove_ticket_handler(ac, th); 633 remove_ticket_handler(ac, th);
631} 634}
632 635
diff --git a/fs/ceph/caps.c b/fs/ceph/caps.c
index 7bf182b03973..a2069b6680ae 100644
--- a/fs/ceph/caps.c
+++ b/fs/ceph/caps.c
@@ -1082,6 +1082,7 @@ static int __send_cap(struct ceph_mds_client *mdsc, struct ceph_cap *cap,
1082 gid_t gid; 1082 gid_t gid;
1083 struct ceph_mds_session *session; 1083 struct ceph_mds_session *session;
1084 u64 xattr_version = 0; 1084 u64 xattr_version = 0;
1085 struct ceph_buffer *xattr_blob = NULL;
1085 int delayed = 0; 1086 int delayed = 0;
1086 u64 flush_tid = 0; 1087 u64 flush_tid = 0;
1087 int i; 1088 int i;
@@ -1142,6 +1143,10 @@ static int __send_cap(struct ceph_mds_client *mdsc, struct ceph_cap *cap,
1142 for (i = 0; i < CEPH_CAP_BITS; i++) 1143 for (i = 0; i < CEPH_CAP_BITS; i++)
1143 if (flushing & (1 << i)) 1144 if (flushing & (1 << i))
1144 ci->i_cap_flush_tid[i] = flush_tid; 1145 ci->i_cap_flush_tid[i] = flush_tid;
1146
1147 follows = ci->i_head_snapc->seq;
1148 } else {
1149 follows = 0;
1145 } 1150 }
1146 1151
1147 keep = cap->implemented; 1152 keep = cap->implemented;
@@ -1155,14 +1160,14 @@ static int __send_cap(struct ceph_mds_client *mdsc, struct ceph_cap *cap,
1155 mtime = inode->i_mtime; 1160 mtime = inode->i_mtime;
1156 atime = inode->i_atime; 1161 atime = inode->i_atime;
1157 time_warp_seq = ci->i_time_warp_seq; 1162 time_warp_seq = ci->i_time_warp_seq;
1158 follows = ci->i_snap_realm->cached_context->seq;
1159 uid = inode->i_uid; 1163 uid = inode->i_uid;
1160 gid = inode->i_gid; 1164 gid = inode->i_gid;
1161 mode = inode->i_mode; 1165 mode = inode->i_mode;
1162 1166
1163 if (dropping & CEPH_CAP_XATTR_EXCL) { 1167 if (flushing & CEPH_CAP_XATTR_EXCL) {
1164 __ceph_build_xattrs_blob(ci); 1168 __ceph_build_xattrs_blob(ci);
1165 xattr_version = ci->i_xattrs.version + 1; 1169 xattr_blob = ci->i_xattrs.blob;
1170 xattr_version = ci->i_xattrs.version;
1166 } 1171 }
1167 1172
1168 spin_unlock(&inode->i_lock); 1173 spin_unlock(&inode->i_lock);
@@ -1170,9 +1175,7 @@ static int __send_cap(struct ceph_mds_client *mdsc, struct ceph_cap *cap,
1170 ret = send_cap_msg(session, ceph_vino(inode).ino, cap_id, 1175 ret = send_cap_msg(session, ceph_vino(inode).ino, cap_id,
1171 op, keep, want, flushing, seq, flush_tid, issue_seq, mseq, 1176 op, keep, want, flushing, seq, flush_tid, issue_seq, mseq,
1172 size, max_size, &mtime, &atime, time_warp_seq, 1177 size, max_size, &mtime, &atime, time_warp_seq,
1173 uid, gid, mode, 1178 uid, gid, mode, xattr_version, xattr_blob,
1174 xattr_version,
1175 (flushing & CEPH_CAP_XATTR_EXCL) ? ci->i_xattrs.blob : NULL,
1176 follows); 1179 follows);
1177 if (ret < 0) { 1180 if (ret < 0) {
1178 dout("error sending cap msg, must requeue %p\n", inode); 1181 dout("error sending cap msg, must requeue %p\n", inode);
@@ -1282,7 +1285,7 @@ retry:
1282 &capsnap->mtime, &capsnap->atime, 1285 &capsnap->mtime, &capsnap->atime,
1283 capsnap->time_warp_seq, 1286 capsnap->time_warp_seq,
1284 capsnap->uid, capsnap->gid, capsnap->mode, 1287 capsnap->uid, capsnap->gid, capsnap->mode,
1285 0, NULL, 1288 capsnap->xattr_version, capsnap->xattr_blob,
1286 capsnap->follows); 1289 capsnap->follows);
1287 1290
1288 next_follows = capsnap->follows + 1; 1291 next_follows = capsnap->follows + 1;
@@ -1332,7 +1335,11 @@ void __ceph_mark_dirty_caps(struct ceph_inode_info *ci, int mask)
1332 ceph_cap_string(was | mask)); 1335 ceph_cap_string(was | mask));
1333 ci->i_dirty_caps |= mask; 1336 ci->i_dirty_caps |= mask;
1334 if (was == 0) { 1337 if (was == 0) {
1335 dout(" inode %p now dirty\n", &ci->vfs_inode); 1338 if (!ci->i_head_snapc)
1339 ci->i_head_snapc = ceph_get_snap_context(
1340 ci->i_snap_realm->cached_context);
1341 dout(" inode %p now dirty snapc %p\n", &ci->vfs_inode,
1342 ci->i_head_snapc);
1336 BUG_ON(!list_empty(&ci->i_dirty_item)); 1343 BUG_ON(!list_empty(&ci->i_dirty_item));
1337 spin_lock(&mdsc->cap_dirty_lock); 1344 spin_lock(&mdsc->cap_dirty_lock);
1338 list_add(&ci->i_dirty_item, &mdsc->cap_dirty); 1345 list_add(&ci->i_dirty_item, &mdsc->cap_dirty);
@@ -2190,7 +2197,9 @@ void ceph_put_wrbuffer_cap_refs(struct ceph_inode_info *ci, int nr,
2190 2197
2191 if (ci->i_head_snapc == snapc) { 2198 if (ci->i_head_snapc == snapc) {
2192 ci->i_wrbuffer_ref_head -= nr; 2199 ci->i_wrbuffer_ref_head -= nr;
2193 if (!ci->i_wrbuffer_ref_head) { 2200 if (ci->i_wrbuffer_ref_head == 0 &&
2201 ci->i_dirty_caps == 0 && ci->i_flushing_caps == 0) {
2202 BUG_ON(!ci->i_head_snapc);
2194 ceph_put_snap_context(ci->i_head_snapc); 2203 ceph_put_snap_context(ci->i_head_snapc);
2195 ci->i_head_snapc = NULL; 2204 ci->i_head_snapc = NULL;
2196 } 2205 }
@@ -2483,6 +2492,11 @@ static void handle_cap_flush_ack(struct inode *inode, u64 flush_tid,
2483 dout(" inode %p now clean\n", inode); 2492 dout(" inode %p now clean\n", inode);
2484 BUG_ON(!list_empty(&ci->i_dirty_item)); 2493 BUG_ON(!list_empty(&ci->i_dirty_item));
2485 drop = 1; 2494 drop = 1;
2495 if (ci->i_wrbuffer_ref_head == 0) {
2496 BUG_ON(!ci->i_head_snapc);
2497 ceph_put_snap_context(ci->i_head_snapc);
2498 ci->i_head_snapc = NULL;
2499 }
2486 } else { 2500 } else {
2487 BUG_ON(list_empty(&ci->i_dirty_item)); 2501 BUG_ON(list_empty(&ci->i_dirty_item));
2488 } 2502 }
diff --git a/fs/ceph/debugfs.c b/fs/ceph/debugfs.c
index 360c4f22718d..6fd8b20a8611 100644
--- a/fs/ceph/debugfs.c
+++ b/fs/ceph/debugfs.c
@@ -171,6 +171,8 @@ static int mdsc_show(struct seq_file *s, void *p)
171 } else if (req->r_dentry) { 171 } else if (req->r_dentry) {
172 path = ceph_mdsc_build_path(req->r_dentry, &pathlen, 172 path = ceph_mdsc_build_path(req->r_dentry, &pathlen,
173 &pathbase, 0); 173 &pathbase, 0);
174 if (IS_ERR(path))
175 path = NULL;
174 spin_lock(&req->r_dentry->d_lock); 176 spin_lock(&req->r_dentry->d_lock);
175 seq_printf(s, " #%llx/%.*s (%s)", 177 seq_printf(s, " #%llx/%.*s (%s)",
176 ceph_ino(req->r_dentry->d_parent->d_inode), 178 ceph_ino(req->r_dentry->d_parent->d_inode),
@@ -187,6 +189,8 @@ static int mdsc_show(struct seq_file *s, void *p)
187 if (req->r_old_dentry) { 189 if (req->r_old_dentry) {
188 path = ceph_mdsc_build_path(req->r_old_dentry, &pathlen, 190 path = ceph_mdsc_build_path(req->r_old_dentry, &pathlen,
189 &pathbase, 0); 191 &pathbase, 0);
192 if (IS_ERR(path))
193 path = NULL;
190 spin_lock(&req->r_old_dentry->d_lock); 194 spin_lock(&req->r_old_dentry->d_lock);
191 seq_printf(s, " #%llx/%.*s (%s)", 195 seq_printf(s, " #%llx/%.*s (%s)",
192 ceph_ino(req->r_old_dentry->d_parent->d_inode), 196 ceph_ino(req->r_old_dentry->d_parent->d_inode),
diff --git a/fs/ceph/dir.c b/fs/ceph/dir.c
index 67bbb41d5526..6e4f43ff23ec 100644
--- a/fs/ceph/dir.c
+++ b/fs/ceph/dir.c
@@ -46,7 +46,7 @@ int ceph_init_dentry(struct dentry *dentry)
46 else 46 else
47 dentry->d_op = &ceph_snap_dentry_ops; 47 dentry->d_op = &ceph_snap_dentry_ops;
48 48
49 di = kmem_cache_alloc(ceph_dentry_cachep, GFP_NOFS); 49 di = kmem_cache_alloc(ceph_dentry_cachep, GFP_NOFS | __GFP_ZERO);
50 if (!di) 50 if (!di)
51 return -ENOMEM; /* oh well */ 51 return -ENOMEM; /* oh well */
52 52
diff --git a/fs/ceph/inode.c b/fs/ceph/inode.c
index 5d893d31e399..e7cca414da03 100644
--- a/fs/ceph/inode.c
+++ b/fs/ceph/inode.c
@@ -677,6 +677,7 @@ static int fill_inode(struct inode *inode,
677 if (ci->i_files == 0 && ci->i_subdirs == 0 && 677 if (ci->i_files == 0 && ci->i_subdirs == 0 &&
678 ceph_snap(inode) == CEPH_NOSNAP && 678 ceph_snap(inode) == CEPH_NOSNAP &&
679 (le32_to_cpu(info->cap.caps) & CEPH_CAP_FILE_SHARED) && 679 (le32_to_cpu(info->cap.caps) & CEPH_CAP_FILE_SHARED) &&
680 (issued & CEPH_CAP_FILE_EXCL) == 0 &&
680 (ci->i_ceph_flags & CEPH_I_COMPLETE) == 0) { 681 (ci->i_ceph_flags & CEPH_I_COMPLETE) == 0) {
681 dout(" marking %p complete (empty)\n", inode); 682 dout(" marking %p complete (empty)\n", inode);
682 ci->i_ceph_flags |= CEPH_I_COMPLETE; 683 ci->i_ceph_flags |= CEPH_I_COMPLETE;
@@ -1229,11 +1230,11 @@ retry_lookup:
1229 in = dn->d_inode; 1230 in = dn->d_inode;
1230 } else { 1231 } else {
1231 in = ceph_get_inode(parent->d_sb, vino); 1232 in = ceph_get_inode(parent->d_sb, vino);
1232 if (in == NULL) { 1233 if (IS_ERR(in)) {
1233 dout("new_inode badness\n"); 1234 dout("new_inode badness\n");
1234 d_delete(dn); 1235 d_delete(dn);
1235 dput(dn); 1236 dput(dn);
1236 err = -ENOMEM; 1237 err = PTR_ERR(in);
1237 goto out; 1238 goto out;
1238 } 1239 }
1239 dn = splice_dentry(dn, in, NULL); 1240 dn = splice_dentry(dn, in, NULL);
diff --git a/fs/ceph/locks.c b/fs/ceph/locks.c
index ae85af06454f..ff4e753aae92 100644
--- a/fs/ceph/locks.c
+++ b/fs/ceph/locks.c
@@ -82,7 +82,8 @@ int ceph_lock(struct file *file, int cmd, struct file_lock *fl)
82 length = fl->fl_end - fl->fl_start + 1; 82 length = fl->fl_end - fl->fl_start + 1;
83 83
84 err = ceph_lock_message(CEPH_LOCK_FCNTL, op, file, 84 err = ceph_lock_message(CEPH_LOCK_FCNTL, op, file,
85 (u64)fl->fl_pid, (u64)fl->fl_nspid, 85 (u64)fl->fl_pid,
86 (u64)(unsigned long)fl->fl_nspid,
86 lock_cmd, fl->fl_start, 87 lock_cmd, fl->fl_start,
87 length, wait); 88 length, wait);
88 if (!err) { 89 if (!err) {
@@ -92,7 +93,8 @@ int ceph_lock(struct file *file, int cmd, struct file_lock *fl)
92 /* undo! This should only happen if the kernel detects 93 /* undo! This should only happen if the kernel detects
93 * local deadlock. */ 94 * local deadlock. */
94 ceph_lock_message(CEPH_LOCK_FCNTL, op, file, 95 ceph_lock_message(CEPH_LOCK_FCNTL, op, file,
95 (u64)fl->fl_pid, (u64)fl->fl_nspid, 96 (u64)fl->fl_pid,
97 (u64)(unsigned long)fl->fl_nspid,
96 CEPH_LOCK_UNLOCK, fl->fl_start, 98 CEPH_LOCK_UNLOCK, fl->fl_start,
97 length, 0); 99 length, 0);
98 dout("got %d on posix_lock_file, undid lock", err); 100 dout("got %d on posix_lock_file, undid lock", err);
@@ -132,7 +134,8 @@ int ceph_flock(struct file *file, int cmd, struct file_lock *fl)
132 length = fl->fl_end - fl->fl_start + 1; 134 length = fl->fl_end - fl->fl_start + 1;
133 135
134 err = ceph_lock_message(CEPH_LOCK_FLOCK, CEPH_MDS_OP_SETFILELOCK, 136 err = ceph_lock_message(CEPH_LOCK_FLOCK, CEPH_MDS_OP_SETFILELOCK,
135 file, (u64)fl->fl_pid, (u64)fl->fl_nspid, 137 file, (u64)fl->fl_pid,
138 (u64)(unsigned long)fl->fl_nspid,
136 lock_cmd, fl->fl_start, 139 lock_cmd, fl->fl_start,
137 length, wait); 140 length, wait);
138 if (!err) { 141 if (!err) {
@@ -141,7 +144,7 @@ int ceph_flock(struct file *file, int cmd, struct file_lock *fl)
141 ceph_lock_message(CEPH_LOCK_FLOCK, 144 ceph_lock_message(CEPH_LOCK_FLOCK,
142 CEPH_MDS_OP_SETFILELOCK, 145 CEPH_MDS_OP_SETFILELOCK,
143 file, (u64)fl->fl_pid, 146 file, (u64)fl->fl_pid,
144 (u64)fl->fl_nspid, 147 (u64)(unsigned long)fl->fl_nspid,
145 CEPH_LOCK_UNLOCK, fl->fl_start, 148 CEPH_LOCK_UNLOCK, fl->fl_start,
146 length, 0); 149 length, 0);
147 dout("got %d on flock_lock_file_wait, undid lock", err); 150 dout("got %d on flock_lock_file_wait, undid lock", err);
@@ -235,7 +238,8 @@ int lock_to_ceph_filelock(struct file_lock *lock,
235 cephlock->length = cpu_to_le64(lock->fl_end - lock->fl_start + 1); 238 cephlock->length = cpu_to_le64(lock->fl_end - lock->fl_start + 1);
236 cephlock->client = cpu_to_le64(0); 239 cephlock->client = cpu_to_le64(0);
237 cephlock->pid = cpu_to_le64(lock->fl_pid); 240 cephlock->pid = cpu_to_le64(lock->fl_pid);
238 cephlock->pid_namespace = cpu_to_le64((u64)lock->fl_nspid); 241 cephlock->pid_namespace =
242 cpu_to_le64((u64)(unsigned long)lock->fl_nspid);
239 243
240 switch (lock->fl_type) { 244 switch (lock->fl_type) {
241 case F_RDLCK: 245 case F_RDLCK:
diff --git a/fs/ceph/mds_client.c b/fs/ceph/mds_client.c
index a75ddbf9fe37..f091b1351786 100644
--- a/fs/ceph/mds_client.c
+++ b/fs/ceph/mds_client.c
@@ -560,6 +560,13 @@ static void __unregister_request(struct ceph_mds_client *mdsc,
560 * 560 *
561 * Called under mdsc->mutex. 561 * Called under mdsc->mutex.
562 */ 562 */
563struct dentry *get_nonsnap_parent(struct dentry *dentry)
564{
565 while (!IS_ROOT(dentry) && ceph_snap(dentry->d_inode) != CEPH_NOSNAP)
566 dentry = dentry->d_parent;
567 return dentry;
568}
569
563static int __choose_mds(struct ceph_mds_client *mdsc, 570static int __choose_mds(struct ceph_mds_client *mdsc,
564 struct ceph_mds_request *req) 571 struct ceph_mds_request *req)
565{ 572{
@@ -590,14 +597,29 @@ static int __choose_mds(struct ceph_mds_client *mdsc,
590 if (req->r_inode) { 597 if (req->r_inode) {
591 inode = req->r_inode; 598 inode = req->r_inode;
592 } else if (req->r_dentry) { 599 } else if (req->r_dentry) {
593 if (req->r_dentry->d_inode) { 600 struct inode *dir = req->r_dentry->d_parent->d_inode;
601
602 if (dir->i_sb != mdsc->client->sb) {
603 /* not this fs! */
604 inode = req->r_dentry->d_inode;
605 } else if (ceph_snap(dir) != CEPH_NOSNAP) {
606 /* direct snapped/virtual snapdir requests
607 * based on parent dir inode */
608 struct dentry *dn =
609 get_nonsnap_parent(req->r_dentry->d_parent);
610 inode = dn->d_inode;
611 dout("__choose_mds using nonsnap parent %p\n", inode);
612 } else if (req->r_dentry->d_inode) {
613 /* dentry target */
594 inode = req->r_dentry->d_inode; 614 inode = req->r_dentry->d_inode;
595 } else { 615 } else {
596 inode = req->r_dentry->d_parent->d_inode; 616 /* dir + name */
617 inode = dir;
597 hash = req->r_dentry->d_name.hash; 618 hash = req->r_dentry->d_name.hash;
598 is_hash = true; 619 is_hash = true;
599 } 620 }
600 } 621 }
622
601 dout("__choose_mds %p is_hash=%d (%d) mode %d\n", inode, (int)is_hash, 623 dout("__choose_mds %p is_hash=%d (%d) mode %d\n", inode, (int)is_hash,
602 (int)hash, mode); 624 (int)hash, mode);
603 if (!inode) 625 if (!inode)
@@ -2208,7 +2230,7 @@ static void handle_session(struct ceph_mds_session *session,
2208 pr_info("mds%d reconnect denied\n", session->s_mds); 2230 pr_info("mds%d reconnect denied\n", session->s_mds);
2209 remove_session_caps(session); 2231 remove_session_caps(session);
2210 wake = 1; /* for good measure */ 2232 wake = 1; /* for good measure */
2211 complete_all(&mdsc->session_close_waiters); 2233 wake_up_all(&mdsc->session_close_wq);
2212 kick_requests(mdsc, mds); 2234 kick_requests(mdsc, mds);
2213 break; 2235 break;
2214 2236
@@ -2302,7 +2324,7 @@ static int encode_caps_cb(struct inode *inode, struct ceph_cap *cap,
2302 path = ceph_mdsc_build_path(dentry, &pathlen, &pathbase, 0); 2324 path = ceph_mdsc_build_path(dentry, &pathlen, &pathbase, 0);
2303 if (IS_ERR(path)) { 2325 if (IS_ERR(path)) {
2304 err = PTR_ERR(path); 2326 err = PTR_ERR(path);
2305 BUG_ON(err); 2327 goto out_dput;
2306 } 2328 }
2307 } else { 2329 } else {
2308 path = NULL; 2330 path = NULL;
@@ -2310,7 +2332,7 @@ static int encode_caps_cb(struct inode *inode, struct ceph_cap *cap,
2310 } 2332 }
2311 err = ceph_pagelist_encode_string(pagelist, path, pathlen); 2333 err = ceph_pagelist_encode_string(pagelist, path, pathlen);
2312 if (err) 2334 if (err)
2313 goto out; 2335 goto out_free;
2314 2336
2315 spin_lock(&inode->i_lock); 2337 spin_lock(&inode->i_lock);
2316 cap->seq = 0; /* reset cap seq */ 2338 cap->seq = 0; /* reset cap seq */
@@ -2354,8 +2376,9 @@ static int encode_caps_cb(struct inode *inode, struct ceph_cap *cap,
2354 unlock_kernel(); 2376 unlock_kernel();
2355 } 2377 }
2356 2378
2357out: 2379out_free:
2358 kfree(path); 2380 kfree(path);
2381out_dput:
2359 dput(dentry); 2382 dput(dentry);
2360 return err; 2383 return err;
2361} 2384}
@@ -2876,7 +2899,7 @@ int ceph_mdsc_init(struct ceph_mds_client *mdsc, struct ceph_client *client)
2876 return -ENOMEM; 2899 return -ENOMEM;
2877 2900
2878 init_completion(&mdsc->safe_umount_waiters); 2901 init_completion(&mdsc->safe_umount_waiters);
2879 init_completion(&mdsc->session_close_waiters); 2902 init_waitqueue_head(&mdsc->session_close_wq);
2880 INIT_LIST_HEAD(&mdsc->waiting_for_map); 2903 INIT_LIST_HEAD(&mdsc->waiting_for_map);
2881 mdsc->sessions = NULL; 2904 mdsc->sessions = NULL;
2882 mdsc->max_sessions = 0; 2905 mdsc->max_sessions = 0;
@@ -3021,6 +3044,23 @@ void ceph_mdsc_sync(struct ceph_mds_client *mdsc)
3021 wait_event(mdsc->cap_flushing_wq, check_cap_flush(mdsc, want_flush)); 3044 wait_event(mdsc->cap_flushing_wq, check_cap_flush(mdsc, want_flush));
3022} 3045}
3023 3046
3047/*
3048 * true if all sessions are closed, or we force unmount
3049 */
3050bool done_closing_sessions(struct ceph_mds_client *mdsc)
3051{
3052 int i, n = 0;
3053
3054 if (mdsc->client->mount_state == CEPH_MOUNT_SHUTDOWN)
3055 return true;
3056
3057 mutex_lock(&mdsc->mutex);
3058 for (i = 0; i < mdsc->max_sessions; i++)
3059 if (mdsc->sessions[i])
3060 n++;
3061 mutex_unlock(&mdsc->mutex);
3062 return n == 0;
3063}
3024 3064
3025/* 3065/*
3026 * called after sb is ro. 3066 * called after sb is ro.
@@ -3029,45 +3069,32 @@ void ceph_mdsc_close_sessions(struct ceph_mds_client *mdsc)
3029{ 3069{
3030 struct ceph_mds_session *session; 3070 struct ceph_mds_session *session;
3031 int i; 3071 int i;
3032 int n;
3033 struct ceph_client *client = mdsc->client; 3072 struct ceph_client *client = mdsc->client;
3034 unsigned long started, timeout = client->mount_args->mount_timeout * HZ; 3073 unsigned long timeout = client->mount_args->mount_timeout * HZ;
3035 3074
3036 dout("close_sessions\n"); 3075 dout("close_sessions\n");
3037 3076
3038 mutex_lock(&mdsc->mutex);
3039
3040 /* close sessions */ 3077 /* close sessions */
3041 started = jiffies; 3078 mutex_lock(&mdsc->mutex);
3042 while (time_before(jiffies, started + timeout)) { 3079 for (i = 0; i < mdsc->max_sessions; i++) {
3043 dout("closing sessions\n"); 3080 session = __ceph_lookup_mds_session(mdsc, i);
3044 n = 0; 3081 if (!session)
3045 for (i = 0; i < mdsc->max_sessions; i++) { 3082 continue;
3046 session = __ceph_lookup_mds_session(mdsc, i);
3047 if (!session)
3048 continue;
3049 mutex_unlock(&mdsc->mutex);
3050 mutex_lock(&session->s_mutex);
3051 __close_session(mdsc, session);
3052 mutex_unlock(&session->s_mutex);
3053 ceph_put_mds_session(session);
3054 mutex_lock(&mdsc->mutex);
3055 n++;
3056 }
3057 if (n == 0)
3058 break;
3059
3060 if (client->mount_state == CEPH_MOUNT_SHUTDOWN)
3061 break;
3062
3063 dout("waiting for sessions to close\n");
3064 mutex_unlock(&mdsc->mutex); 3083 mutex_unlock(&mdsc->mutex);
3065 wait_for_completion_timeout(&mdsc->session_close_waiters, 3084 mutex_lock(&session->s_mutex);
3066 timeout); 3085 __close_session(mdsc, session);
3086 mutex_unlock(&session->s_mutex);
3087 ceph_put_mds_session(session);
3067 mutex_lock(&mdsc->mutex); 3088 mutex_lock(&mdsc->mutex);
3068 } 3089 }
3090 mutex_unlock(&mdsc->mutex);
3091
3092 dout("waiting for sessions to close\n");
3093 wait_event_timeout(mdsc->session_close_wq, done_closing_sessions(mdsc),
3094 timeout);
3069 3095
3070 /* tear down remaining sessions */ 3096 /* tear down remaining sessions */
3097 mutex_lock(&mdsc->mutex);
3071 for (i = 0; i < mdsc->max_sessions; i++) { 3098 for (i = 0; i < mdsc->max_sessions; i++) {
3072 if (mdsc->sessions[i]) { 3099 if (mdsc->sessions[i]) {
3073 session = get_session(mdsc->sessions[i]); 3100 session = get_session(mdsc->sessions[i]);
@@ -3080,9 +3107,7 @@ void ceph_mdsc_close_sessions(struct ceph_mds_client *mdsc)
3080 mutex_lock(&mdsc->mutex); 3107 mutex_lock(&mdsc->mutex);
3081 } 3108 }
3082 } 3109 }
3083
3084 WARN_ON(!list_empty(&mdsc->cap_delay_list)); 3110 WARN_ON(!list_empty(&mdsc->cap_delay_list));
3085
3086 mutex_unlock(&mdsc->mutex); 3111 mutex_unlock(&mdsc->mutex);
3087 3112
3088 ceph_cleanup_empty_realms(mdsc); 3113 ceph_cleanup_empty_realms(mdsc);
diff --git a/fs/ceph/mds_client.h b/fs/ceph/mds_client.h
index ab7e89f5e344..c98267ce6d2a 100644
--- a/fs/ceph/mds_client.h
+++ b/fs/ceph/mds_client.h
@@ -234,7 +234,8 @@ struct ceph_mds_client {
234 struct mutex mutex; /* all nested structures */ 234 struct mutex mutex; /* all nested structures */
235 235
236 struct ceph_mdsmap *mdsmap; 236 struct ceph_mdsmap *mdsmap;
237 struct completion safe_umount_waiters, session_close_waiters; 237 struct completion safe_umount_waiters;
238 wait_queue_head_t session_close_wq;
238 struct list_head waiting_for_map; 239 struct list_head waiting_for_map;
239 240
240 struct ceph_mds_session **sessions; /* NULL for mds if no session */ 241 struct ceph_mds_session **sessions; /* NULL for mds if no session */
diff --git a/fs/ceph/osd_client.c b/fs/ceph/osd_client.c
index bed6391e52c7..dfced1dacbcd 100644
--- a/fs/ceph/osd_client.c
+++ b/fs/ceph/osd_client.c
@@ -661,7 +661,7 @@ static int __send_request(struct ceph_osd_client *osdc,
661 reqhead->reassert_version = req->r_reassert_version; 661 reqhead->reassert_version = req->r_reassert_version;
662 662
663 req->r_stamp = jiffies; 663 req->r_stamp = jiffies;
664 list_move_tail(&osdc->req_lru, &req->r_req_lru_item); 664 list_move_tail(&req->r_req_lru_item, &osdc->req_lru);
665 665
666 ceph_msg_get(req->r_request); /* send consumes a ref */ 666 ceph_msg_get(req->r_request); /* send consumes a ref */
667 ceph_con_send(&req->r_osd->o_con, req->r_request); 667 ceph_con_send(&req->r_osd->o_con, req->r_request);
diff --git a/fs/ceph/snap.c b/fs/ceph/snap.c
index c0b26b6badba..4868b9dcac5a 100644
--- a/fs/ceph/snap.c
+++ b/fs/ceph/snap.c
@@ -435,7 +435,7 @@ void ceph_queue_cap_snap(struct ceph_inode_info *ci)
435{ 435{
436 struct inode *inode = &ci->vfs_inode; 436 struct inode *inode = &ci->vfs_inode;
437 struct ceph_cap_snap *capsnap; 437 struct ceph_cap_snap *capsnap;
438 int used; 438 int used, dirty;
439 439
440 capsnap = kzalloc(sizeof(*capsnap), GFP_NOFS); 440 capsnap = kzalloc(sizeof(*capsnap), GFP_NOFS);
441 if (!capsnap) { 441 if (!capsnap) {
@@ -445,6 +445,7 @@ void ceph_queue_cap_snap(struct ceph_inode_info *ci)
445 445
446 spin_lock(&inode->i_lock); 446 spin_lock(&inode->i_lock);
447 used = __ceph_caps_used(ci); 447 used = __ceph_caps_used(ci);
448 dirty = __ceph_caps_dirty(ci);
448 if (__ceph_have_pending_cap_snap(ci)) { 449 if (__ceph_have_pending_cap_snap(ci)) {
449 /* there is no point in queuing multiple "pending" cap_snaps, 450 /* there is no point in queuing multiple "pending" cap_snaps,
450 as no new writes are allowed to start when pending, so any 451 as no new writes are allowed to start when pending, so any
@@ -452,11 +453,15 @@ void ceph_queue_cap_snap(struct ceph_inode_info *ci)
452 cap_snap. lucky us. */ 453 cap_snap. lucky us. */
453 dout("queue_cap_snap %p already pending\n", inode); 454 dout("queue_cap_snap %p already pending\n", inode);
454 kfree(capsnap); 455 kfree(capsnap);
455 } else if (ci->i_wrbuffer_ref_head || (used & CEPH_CAP_FILE_WR)) { 456 } else if (ci->i_wrbuffer_ref_head || (used & CEPH_CAP_FILE_WR) ||
457 (dirty & (CEPH_CAP_AUTH_EXCL|CEPH_CAP_XATTR_EXCL|
458 CEPH_CAP_FILE_EXCL|CEPH_CAP_FILE_WR))) {
456 struct ceph_snap_context *snapc = ci->i_head_snapc; 459 struct ceph_snap_context *snapc = ci->i_head_snapc;
457 460
461 dout("queue_cap_snap %p cap_snap %p queuing under %p\n", inode,
462 capsnap, snapc);
458 igrab(inode); 463 igrab(inode);
459 464
460 atomic_set(&capsnap->nref, 1); 465 atomic_set(&capsnap->nref, 1);
461 capsnap->ci = ci; 466 capsnap->ci = ci;
462 INIT_LIST_HEAD(&capsnap->ci_item); 467 INIT_LIST_HEAD(&capsnap->ci_item);
@@ -464,15 +469,21 @@ void ceph_queue_cap_snap(struct ceph_inode_info *ci)
464 469
465 capsnap->follows = snapc->seq - 1; 470 capsnap->follows = snapc->seq - 1;
466 capsnap->issued = __ceph_caps_issued(ci, NULL); 471 capsnap->issued = __ceph_caps_issued(ci, NULL);
467 capsnap->dirty = __ceph_caps_dirty(ci); 472 capsnap->dirty = dirty;
468 473
469 capsnap->mode = inode->i_mode; 474 capsnap->mode = inode->i_mode;
470 capsnap->uid = inode->i_uid; 475 capsnap->uid = inode->i_uid;
471 capsnap->gid = inode->i_gid; 476 capsnap->gid = inode->i_gid;
472 477
473 /* fixme? */ 478 if (dirty & CEPH_CAP_XATTR_EXCL) {
474 capsnap->xattr_blob = NULL; 479 __ceph_build_xattrs_blob(ci);
475 capsnap->xattr_len = 0; 480 capsnap->xattr_blob =
481 ceph_buffer_get(ci->i_xattrs.blob);
482 capsnap->xattr_version = ci->i_xattrs.version;
483 } else {
484 capsnap->xattr_blob = NULL;
485 capsnap->xattr_version = 0;
486 }
476 487
477 /* dirty page count moved from _head to this cap_snap; 488 /* dirty page count moved from _head to this cap_snap;
478 all subsequent writes page dirties occur _after_ this 489 all subsequent writes page dirties occur _after_ this
@@ -480,7 +491,9 @@ void ceph_queue_cap_snap(struct ceph_inode_info *ci)
480 capsnap->dirty_pages = ci->i_wrbuffer_ref_head; 491 capsnap->dirty_pages = ci->i_wrbuffer_ref_head;
481 ci->i_wrbuffer_ref_head = 0; 492 ci->i_wrbuffer_ref_head = 0;
482 capsnap->context = snapc; 493 capsnap->context = snapc;
483 ci->i_head_snapc = NULL; 494 ci->i_head_snapc =
495 ceph_get_snap_context(ci->i_snap_realm->cached_context);
496 dout(" new snapc is %p\n", ci->i_head_snapc);
484 list_add_tail(&capsnap->ci_item, &ci->i_cap_snaps); 497 list_add_tail(&capsnap->ci_item, &ci->i_cap_snaps);
485 498
486 if (used & CEPH_CAP_FILE_WR) { 499 if (used & CEPH_CAP_FILE_WR) {
@@ -539,6 +552,41 @@ int __ceph_finish_cap_snap(struct ceph_inode_info *ci,
539 return 1; /* caller may want to ceph_flush_snaps */ 552 return 1; /* caller may want to ceph_flush_snaps */
540} 553}
541 554
555/*
556 * Queue cap_snaps for snap writeback for this realm and its children.
557 * Called under snap_rwsem, so realm topology won't change.
558 */
559static void queue_realm_cap_snaps(struct ceph_snap_realm *realm)
560{
561 struct ceph_inode_info *ci;
562 struct inode *lastinode = NULL;
563 struct ceph_snap_realm *child;
564
565 dout("queue_realm_cap_snaps %p %llx inodes\n", realm, realm->ino);
566
567 spin_lock(&realm->inodes_with_caps_lock);
568 list_for_each_entry(ci, &realm->inodes_with_caps,
569 i_snap_realm_item) {
570 struct inode *inode = igrab(&ci->vfs_inode);
571 if (!inode)
572 continue;
573 spin_unlock(&realm->inodes_with_caps_lock);
574 if (lastinode)
575 iput(lastinode);
576 lastinode = inode;
577 ceph_queue_cap_snap(ci);
578 spin_lock(&realm->inodes_with_caps_lock);
579 }
580 spin_unlock(&realm->inodes_with_caps_lock);
581 if (lastinode)
582 iput(lastinode);
583
584 dout("queue_realm_cap_snaps %p %llx children\n", realm, realm->ino);
585 list_for_each_entry(child, &realm->children, child_item)
586 queue_realm_cap_snaps(child);
587
588 dout("queue_realm_cap_snaps %p %llx done\n", realm, realm->ino);
589}
542 590
543/* 591/*
544 * Parse and apply a snapblob "snap trace" from the MDS. This specifies 592 * Parse and apply a snapblob "snap trace" from the MDS. This specifies
@@ -589,29 +637,8 @@ more:
589 * 637 *
590 * ...unless it's a snap deletion! 638 * ...unless it's a snap deletion!
591 */ 639 */
592 if (!deletion) { 640 if (!deletion)
593 struct ceph_inode_info *ci; 641 queue_realm_cap_snaps(realm);
594 struct inode *lastinode = NULL;
595
596 spin_lock(&realm->inodes_with_caps_lock);
597 list_for_each_entry(ci, &realm->inodes_with_caps,
598 i_snap_realm_item) {
599 struct inode *inode = igrab(&ci->vfs_inode);
600 if (!inode)
601 continue;
602 spin_unlock(&realm->inodes_with_caps_lock);
603 if (lastinode)
604 iput(lastinode);
605 lastinode = inode;
606 ceph_queue_cap_snap(ci);
607 spin_lock(&realm->inodes_with_caps_lock);
608 }
609 spin_unlock(&realm->inodes_with_caps_lock);
610 if (lastinode)
611 iput(lastinode);
612 dout("update_snap_trace cap_snaps queued\n");
613 }
614
615 } else { 642 } else {
616 dout("update_snap_trace %llx %p seq %lld unchanged\n", 643 dout("update_snap_trace %llx %p seq %lld unchanged\n",
617 realm->ino, realm, realm->seq); 644 realm->ino, realm, realm->seq);
diff --git a/fs/ceph/super.h b/fs/ceph/super.h
index 2482d696f0de..c33897ae5725 100644
--- a/fs/ceph/super.h
+++ b/fs/ceph/super.h
@@ -216,8 +216,7 @@ struct ceph_cap_snap {
216 uid_t uid; 216 uid_t uid;
217 gid_t gid; 217 gid_t gid;
218 218
219 void *xattr_blob; 219 struct ceph_buffer *xattr_blob;
220 int xattr_len;
221 u64 xattr_version; 220 u64 xattr_version;
222 221
223 u64 size; 222 u64 size;
@@ -229,8 +228,11 @@ struct ceph_cap_snap {
229 228
230static inline void ceph_put_cap_snap(struct ceph_cap_snap *capsnap) 229static inline void ceph_put_cap_snap(struct ceph_cap_snap *capsnap)
231{ 230{
232 if (atomic_dec_and_test(&capsnap->nref)) 231 if (atomic_dec_and_test(&capsnap->nref)) {
232 if (capsnap->xattr_blob)
233 ceph_buffer_put(capsnap->xattr_blob);
233 kfree(capsnap); 234 kfree(capsnap);
235 }
234} 236}
235 237
236/* 238/*
@@ -342,7 +344,8 @@ struct ceph_inode_info {
342 unsigned i_cap_exporting_issued; 344 unsigned i_cap_exporting_issued;
343 struct ceph_cap_reservation i_cap_migration_resv; 345 struct ceph_cap_reservation i_cap_migration_resv;
344 struct list_head i_cap_snaps; /* snapped state pending flush to mds */ 346 struct list_head i_cap_snaps; /* snapped state pending flush to mds */
345 struct ceph_snap_context *i_head_snapc; /* set if wr_buffer_head > 0 */ 347 struct ceph_snap_context *i_head_snapc; /* set if wr_buffer_head > 0 or
348 dirty|flushing caps */
346 unsigned i_snap_caps; /* cap bits for snapped files */ 349 unsigned i_snap_caps; /* cap bits for snapped files */
347 350
348 int i_nr_by_mode[CEPH_FILE_MODE_NUM]; /* open file counts */ 351 int i_nr_by_mode[CEPH_FILE_MODE_NUM]; /* open file counts */
diff --git a/fs/ceph/xattr.c b/fs/ceph/xattr.c
index 097a2654c00f..9578af610b73 100644
--- a/fs/ceph/xattr.c
+++ b/fs/ceph/xattr.c
@@ -485,6 +485,7 @@ void __ceph_build_xattrs_blob(struct ceph_inode_info *ci)
485 ci->i_xattrs.blob = ci->i_xattrs.prealloc_blob; 485 ci->i_xattrs.blob = ci->i_xattrs.prealloc_blob;
486 ci->i_xattrs.prealloc_blob = NULL; 486 ci->i_xattrs.prealloc_blob = NULL;
487 ci->i_xattrs.dirty = false; 487 ci->i_xattrs.dirty = false;
488 ci->i_xattrs.version++;
488 } 489 }
489} 490}
490 491
diff --git a/fs/cifs/Kconfig b/fs/cifs/Kconfig
index 917b7d449bb2..0da1debd499d 100644
--- a/fs/cifs/Kconfig
+++ b/fs/cifs/Kconfig
@@ -2,6 +2,8 @@ config CIFS
2 tristate "CIFS support (advanced network filesystem, SMBFS successor)" 2 tristate "CIFS support (advanced network filesystem, SMBFS successor)"
3 depends on INET 3 depends on INET
4 select NLS 4 select NLS
5 select CRYPTO_MD5
6 select CRYPTO_ARC4
5 help 7 help
6 This is the client VFS module for the Common Internet File System 8 This is the client VFS module for the Common Internet File System
7 (CIFS) protocol which is the successor to the Server Message Block 9 (CIFS) protocol which is the successor to the Server Message Block
diff --git a/fs/cifs/asn1.c b/fs/cifs/asn1.c
index cfd1ce34e0bc..21f0fbd86989 100644
--- a/fs/cifs/asn1.c
+++ b/fs/cifs/asn1.c
@@ -597,13 +597,13 @@ decode_negTokenInit(unsigned char *security_blob, int length,
597 if (compare_oid(oid, oidlen, MSKRB5_OID, 597 if (compare_oid(oid, oidlen, MSKRB5_OID,
598 MSKRB5_OID_LEN)) 598 MSKRB5_OID_LEN))
599 server->sec_mskerberos = true; 599 server->sec_mskerberos = true;
600 else if (compare_oid(oid, oidlen, KRB5U2U_OID, 600 if (compare_oid(oid, oidlen, KRB5U2U_OID,
601 KRB5U2U_OID_LEN)) 601 KRB5U2U_OID_LEN))
602 server->sec_kerberosu2u = true; 602 server->sec_kerberosu2u = true;
603 else if (compare_oid(oid, oidlen, KRB5_OID, 603 if (compare_oid(oid, oidlen, KRB5_OID,
604 KRB5_OID_LEN)) 604 KRB5_OID_LEN))
605 server->sec_kerberos = true; 605 server->sec_kerberos = true;
606 else if (compare_oid(oid, oidlen, NTLMSSP_OID, 606 if (compare_oid(oid, oidlen, NTLMSSP_OID,
607 NTLMSSP_OID_LEN)) 607 NTLMSSP_OID_LEN))
608 server->sec_ntlmssp = true; 608 server->sec_ntlmssp = true;
609 609
diff --git a/fs/cifs/cifs_unicode.h b/fs/cifs/cifs_unicode.h
index 650638275a6f..7fe6b52df507 100644
--- a/fs/cifs/cifs_unicode.h
+++ b/fs/cifs/cifs_unicode.h
@@ -30,6 +30,8 @@
30 * This is a compressed table of upper and lower case conversion. 30 * This is a compressed table of upper and lower case conversion.
31 * 31 *
32 */ 32 */
33#ifndef _CIFS_UNICODE_H
34#define _CIFS_UNICODE_H
33 35
34#include <asm/byteorder.h> 36#include <asm/byteorder.h>
35#include <linux/types.h> 37#include <linux/types.h>
@@ -67,8 +69,8 @@ extern const struct UniCaseRange CifsUniUpperRange[];
67#endif /* UNIUPR_NOUPPER */ 69#endif /* UNIUPR_NOUPPER */
68 70
69#ifndef UNIUPR_NOLOWER 71#ifndef UNIUPR_NOLOWER
70extern signed char UniLowerTable[512]; 72extern signed char CifsUniLowerTable[512];
71extern struct UniCaseRange UniLowerRange[]; 73extern const struct UniCaseRange CifsUniLowerRange[];
72#endif /* UNIUPR_NOLOWER */ 74#endif /* UNIUPR_NOLOWER */
73 75
74#ifdef __KERNEL__ 76#ifdef __KERNEL__
@@ -337,15 +339,15 @@ UniStrupr(register wchar_t *upin)
337 * UniTolower: Convert a unicode character to lower case 339 * UniTolower: Convert a unicode character to lower case
338 */ 340 */
339static inline wchar_t 341static inline wchar_t
340UniTolower(wchar_t uc) 342UniTolower(register wchar_t uc)
341{ 343{
342 register struct UniCaseRange *rp; 344 register const struct UniCaseRange *rp;
343 345
344 if (uc < sizeof(UniLowerTable)) { 346 if (uc < sizeof(CifsUniLowerTable)) {
345 /* Latin characters */ 347 /* Latin characters */
346 return uc + UniLowerTable[uc]; /* Use base tables */ 348 return uc + CifsUniLowerTable[uc]; /* Use base tables */
347 } else { 349 } else {
348 rp = UniLowerRange; /* Use range tables */ 350 rp = CifsUniLowerRange; /* Use range tables */
349 while (rp->start) { 351 while (rp->start) {
350 if (uc < rp->start) /* Before start of range */ 352 if (uc < rp->start) /* Before start of range */
351 return uc; /* Uppercase = input */ 353 return uc; /* Uppercase = input */
@@ -374,3 +376,5 @@ UniStrlwr(register wchar_t *upin)
374} 376}
375 377
376#endif 378#endif
379
380#endif /* _CIFS_UNICODE_H */
diff --git a/fs/cifs/cifs_uniupr.h b/fs/cifs/cifs_uniupr.h
index 18a9d978e519..0ac7c5a8633a 100644
--- a/fs/cifs/cifs_uniupr.h
+++ b/fs/cifs/cifs_uniupr.h
@@ -140,7 +140,7 @@ const struct UniCaseRange CifsUniUpperRange[] = {
140/* 140/*
141 * Latin lower case 141 * Latin lower case
142 */ 142 */
143static signed char CifsUniLowerTable[512] = { 143signed char CifsUniLowerTable[512] = {
144 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* 000-00f */ 144 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* 000-00f */
145 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* 010-01f */ 145 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* 010-01f */
146 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* 020-02f */ 146 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* 020-02f */
@@ -242,12 +242,12 @@ static signed char UniCaseRangeLff20[27] = {
242/* 242/*
243 * Lower Case Range 243 * Lower Case Range
244 */ 244 */
245static const struct UniCaseRange CifsUniLowerRange[] = { 245const struct UniCaseRange CifsUniLowerRange[] = {
246 0x0380, 0x03ab, UniCaseRangeL0380, 246 {0x0380, 0x03ab, UniCaseRangeL0380},
247 0x0400, 0x042f, UniCaseRangeL0400, 247 {0x0400, 0x042f, UniCaseRangeL0400},
248 0x0490, 0x04cb, UniCaseRangeL0490, 248 {0x0490, 0x04cb, UniCaseRangeL0490},
249 0x1e00, 0x1ff7, UniCaseRangeL1e00, 249 {0x1e00, 0x1ff7, UniCaseRangeL1e00},
250 0xff20, 0xff3a, UniCaseRangeLff20, 250 {0xff20, 0xff3a, UniCaseRangeLff20},
251 0, 0, 0 251 {0}
252}; 252};
253#endif 253#endif
diff --git a/fs/cifs/cifsencrypt.c b/fs/cifs/cifsencrypt.c
index 847628dfdc44..709f2296bdb4 100644
--- a/fs/cifs/cifsencrypt.c
+++ b/fs/cifs/cifsencrypt.c
@@ -27,6 +27,7 @@
27#include "md5.h" 27#include "md5.h"
28#include "cifs_unicode.h" 28#include "cifs_unicode.h"
29#include "cifsproto.h" 29#include "cifsproto.h"
30#include "ntlmssp.h"
30#include <linux/ctype.h> 31#include <linux/ctype.h>
31#include <linux/random.h> 32#include <linux/random.h>
32 33
@@ -42,21 +43,43 @@ extern void SMBencrypt(unsigned char *passwd, const unsigned char *c8,
42 unsigned char *p24); 43 unsigned char *p24);
43 44
44static int cifs_calculate_signature(const struct smb_hdr *cifs_pdu, 45static int cifs_calculate_signature(const struct smb_hdr *cifs_pdu,
45 const struct mac_key *key, char *signature) 46 struct TCP_Server_Info *server, char *signature)
46{ 47{
47 struct MD5Context context; 48 int rc;
48 49
49 if ((cifs_pdu == NULL) || (signature == NULL) || (key == NULL)) 50 if (cifs_pdu == NULL || server == NULL || signature == NULL)
50 return -EINVAL; 51 return -EINVAL;
51 52
52 cifs_MD5_init(&context); 53 if (!server->ntlmssp.sdescmd5) {
53 cifs_MD5_update(&context, (char *)&key->data, key->len); 54 cERROR(1,
54 cifs_MD5_update(&context, cifs_pdu->Protocol, cifs_pdu->smb_buf_length); 55 "cifs_calculate_signature: can't generate signature\n");
56 return -1;
57 }
55 58
56 cifs_MD5_final(signature, &context); 59 rc = crypto_shash_init(&server->ntlmssp.sdescmd5->shash);
57 return 0; 60 if (rc) {
61 cERROR(1, "cifs_calculate_signature: oould not init md5\n");
62 return rc;
63 }
64
65 if (server->secType == RawNTLMSSP)
66 crypto_shash_update(&server->ntlmssp.sdescmd5->shash,
67 server->session_key.data.ntlmv2.key,
68 CIFS_NTLMV2_SESSKEY_SIZE);
69 else
70 crypto_shash_update(&server->ntlmssp.sdescmd5->shash,
71 (char *)&server->session_key.data,
72 server->session_key.len);
73
74 crypto_shash_update(&server->ntlmssp.sdescmd5->shash,
75 cifs_pdu->Protocol, cifs_pdu->smb_buf_length);
76
77 rc = crypto_shash_final(&server->ntlmssp.sdescmd5->shash, signature);
78
79 return rc;
58} 80}
59 81
82
60int cifs_sign_smb(struct smb_hdr *cifs_pdu, struct TCP_Server_Info *server, 83int cifs_sign_smb(struct smb_hdr *cifs_pdu, struct TCP_Server_Info *server,
61 __u32 *pexpected_response_sequence_number) 84 __u32 *pexpected_response_sequence_number)
62{ 85{
@@ -78,8 +101,7 @@ int cifs_sign_smb(struct smb_hdr *cifs_pdu, struct TCP_Server_Info *server,
78 server->sequence_number++; 101 server->sequence_number++;
79 spin_unlock(&GlobalMid_Lock); 102 spin_unlock(&GlobalMid_Lock);
80 103
81 rc = cifs_calculate_signature(cifs_pdu, &server->mac_signing_key, 104 rc = cifs_calculate_signature(cifs_pdu, server, smb_signature);
82 smb_signature);
83 if (rc) 105 if (rc)
84 memset(cifs_pdu->Signature.SecuritySignature, 0, 8); 106 memset(cifs_pdu->Signature.SecuritySignature, 0, 8);
85 else 107 else
@@ -89,21 +111,39 @@ int cifs_sign_smb(struct smb_hdr *cifs_pdu, struct TCP_Server_Info *server,
89} 111}
90 112
91static int cifs_calc_signature2(const struct kvec *iov, int n_vec, 113static int cifs_calc_signature2(const struct kvec *iov, int n_vec,
92 const struct mac_key *key, char *signature) 114 struct TCP_Server_Info *server, char *signature)
93{ 115{
94 struct MD5Context context;
95 int i; 116 int i;
117 int rc;
96 118
97 if ((iov == NULL) || (signature == NULL) || (key == NULL)) 119 if (iov == NULL || server == NULL || signature == NULL)
98 return -EINVAL; 120 return -EINVAL;
99 121
100 cifs_MD5_init(&context); 122 if (!server->ntlmssp.sdescmd5) {
101 cifs_MD5_update(&context, (char *)&key->data, key->len); 123 cERROR(1, "cifs_calc_signature2: can't generate signature\n");
124 return -1;
125 }
126
127 rc = crypto_shash_init(&server->ntlmssp.sdescmd5->shash);
128 if (rc) {
129 cERROR(1, "cifs_calc_signature2: oould not init md5\n");
130 return rc;
131 }
132
133 if (server->secType == RawNTLMSSP)
134 crypto_shash_update(&server->ntlmssp.sdescmd5->shash,
135 server->session_key.data.ntlmv2.key,
136 CIFS_NTLMV2_SESSKEY_SIZE);
137 else
138 crypto_shash_update(&server->ntlmssp.sdescmd5->shash,
139 (char *)&server->session_key.data,
140 server->session_key.len);
141
102 for (i = 0; i < n_vec; i++) { 142 for (i = 0; i < n_vec; i++) {
103 if (iov[i].iov_len == 0) 143 if (iov[i].iov_len == 0)
104 continue; 144 continue;
105 if (iov[i].iov_base == NULL) { 145 if (iov[i].iov_base == NULL) {
106 cERROR(1, "null iovec entry"); 146 cERROR(1, "cifs_calc_signature2: null iovec entry");
107 return -EIO; 147 return -EIO;
108 } 148 }
109 /* The first entry includes a length field (which does not get 149 /* The first entry includes a length field (which does not get
@@ -111,18 +151,18 @@ static int cifs_calc_signature2(const struct kvec *iov, int n_vec,
111 if (i == 0) { 151 if (i == 0) {
112 if (iov[0].iov_len <= 8) /* cmd field at offset 9 */ 152 if (iov[0].iov_len <= 8) /* cmd field at offset 9 */
113 break; /* nothing to sign or corrupt header */ 153 break; /* nothing to sign or corrupt header */
114 cifs_MD5_update(&context, iov[0].iov_base+4, 154 crypto_shash_update(&server->ntlmssp.sdescmd5->shash,
115 iov[0].iov_len-4); 155 iov[i].iov_base + 4, iov[i].iov_len - 4);
116 } else 156 } else
117 cifs_MD5_update(&context, iov[i].iov_base, iov[i].iov_len); 157 crypto_shash_update(&server->ntlmssp.sdescmd5->shash,
158 iov[i].iov_base, iov[i].iov_len);
118 } 159 }
119 160
120 cifs_MD5_final(signature, &context); 161 rc = crypto_shash_final(&server->ntlmssp.sdescmd5->shash, signature);
121 162
122 return 0; 163 return rc;
123} 164}
124 165
125
126int cifs_sign_smb2(struct kvec *iov, int n_vec, struct TCP_Server_Info *server, 166int cifs_sign_smb2(struct kvec *iov, int n_vec, struct TCP_Server_Info *server,
127 __u32 *pexpected_response_sequence_number) 167 __u32 *pexpected_response_sequence_number)
128{ 168{
@@ -145,8 +185,7 @@ int cifs_sign_smb2(struct kvec *iov, int n_vec, struct TCP_Server_Info *server,
145 server->sequence_number++; 185 server->sequence_number++;
146 spin_unlock(&GlobalMid_Lock); 186 spin_unlock(&GlobalMid_Lock);
147 187
148 rc = cifs_calc_signature2(iov, n_vec, &server->mac_signing_key, 188 rc = cifs_calc_signature2(iov, n_vec, server, smb_signature);
149 smb_signature);
150 if (rc) 189 if (rc)
151 memset(cifs_pdu->Signature.SecuritySignature, 0, 8); 190 memset(cifs_pdu->Signature.SecuritySignature, 0, 8);
152 else 191 else
@@ -156,14 +195,14 @@ int cifs_sign_smb2(struct kvec *iov, int n_vec, struct TCP_Server_Info *server,
156} 195}
157 196
158int cifs_verify_signature(struct smb_hdr *cifs_pdu, 197int cifs_verify_signature(struct smb_hdr *cifs_pdu,
159 const struct mac_key *mac_key, 198 struct TCP_Server_Info *server,
160 __u32 expected_sequence_number) 199 __u32 expected_sequence_number)
161{ 200{
162 unsigned int rc; 201 int rc;
163 char server_response_sig[8]; 202 char server_response_sig[8];
164 char what_we_think_sig_should_be[20]; 203 char what_we_think_sig_should_be[20];
165 204
166 if ((cifs_pdu == NULL) || (mac_key == NULL)) 205 if (cifs_pdu == NULL || server == NULL)
167 return -EINVAL; 206 return -EINVAL;
168 207
169 if (cifs_pdu->Command == SMB_COM_NEGOTIATE) 208 if (cifs_pdu->Command == SMB_COM_NEGOTIATE)
@@ -192,7 +231,7 @@ int cifs_verify_signature(struct smb_hdr *cifs_pdu,
192 cpu_to_le32(expected_sequence_number); 231 cpu_to_le32(expected_sequence_number);
193 cifs_pdu->Signature.Sequence.Reserved = 0; 232 cifs_pdu->Signature.Sequence.Reserved = 0;
194 233
195 rc = cifs_calculate_signature(cifs_pdu, mac_key, 234 rc = cifs_calculate_signature(cifs_pdu, server,
196 what_we_think_sig_should_be); 235 what_we_think_sig_should_be);
197 236
198 if (rc) 237 if (rc)
@@ -209,7 +248,7 @@ int cifs_verify_signature(struct smb_hdr *cifs_pdu,
209} 248}
210 249
211/* We fill in key by putting in 40 byte array which was allocated by caller */ 250/* We fill in key by putting in 40 byte array which was allocated by caller */
212int cifs_calculate_mac_key(struct mac_key *key, const char *rn, 251int cifs_calculate_session_key(struct session_key *key, const char *rn,
213 const char *password) 252 const char *password)
214{ 253{
215 char temp_key[16]; 254 char temp_key[16];
@@ -223,63 +262,6 @@ int cifs_calculate_mac_key(struct mac_key *key, const char *rn,
223 return 0; 262 return 0;
224} 263}
225 264
226int CalcNTLMv2_partial_mac_key(struct cifsSesInfo *ses,
227 const struct nls_table *nls_info)
228{
229 char temp_hash[16];
230 struct HMACMD5Context ctx;
231 char *ucase_buf;
232 __le16 *unicode_buf;
233 unsigned int i, user_name_len, dom_name_len;
234
235 if (ses == NULL)
236 return -EINVAL;
237
238 E_md4hash(ses->password, temp_hash);
239
240 hmac_md5_init_limK_to_64(temp_hash, 16, &ctx);
241 user_name_len = strlen(ses->userName);
242 if (user_name_len > MAX_USERNAME_SIZE)
243 return -EINVAL;
244 if (ses->domainName == NULL)
245 return -EINVAL; /* BB should we use CIFS_LINUX_DOM */
246 dom_name_len = strlen(ses->domainName);
247 if (dom_name_len > MAX_USERNAME_SIZE)
248 return -EINVAL;
249
250 ucase_buf = kmalloc((MAX_USERNAME_SIZE+1), GFP_KERNEL);
251 if (ucase_buf == NULL)
252 return -ENOMEM;
253 unicode_buf = kmalloc((MAX_USERNAME_SIZE+1)*4, GFP_KERNEL);
254 if (unicode_buf == NULL) {
255 kfree(ucase_buf);
256 return -ENOMEM;
257 }
258
259 for (i = 0; i < user_name_len; i++)
260 ucase_buf[i] = nls_info->charset2upper[(int)ses->userName[i]];
261 ucase_buf[i] = 0;
262 user_name_len = cifs_strtoUCS(unicode_buf, ucase_buf,
263 MAX_USERNAME_SIZE*2, nls_info);
264 unicode_buf[user_name_len] = 0;
265 user_name_len++;
266
267 for (i = 0; i < dom_name_len; i++)
268 ucase_buf[i] = nls_info->charset2upper[(int)ses->domainName[i]];
269 ucase_buf[i] = 0;
270 dom_name_len = cifs_strtoUCS(unicode_buf+user_name_len, ucase_buf,
271 MAX_USERNAME_SIZE*2, nls_info);
272
273 unicode_buf[user_name_len + dom_name_len] = 0;
274 hmac_md5_update((const unsigned char *) unicode_buf,
275 (user_name_len+dom_name_len)*2, &ctx);
276
277 hmac_md5_final(ses->server->ntlmv2_hash, &ctx);
278 kfree(ucase_buf);
279 kfree(unicode_buf);
280 return 0;
281}
282
283#ifdef CONFIG_CIFS_WEAK_PW_HASH 265#ifdef CONFIG_CIFS_WEAK_PW_HASH
284void calc_lanman_hash(const char *password, const char *cryptkey, bool encrypt, 266void calc_lanman_hash(const char *password, const char *cryptkey, bool encrypt,
285 char *lnm_session_key) 267 char *lnm_session_key)
@@ -324,38 +306,52 @@ static int calc_ntlmv2_hash(struct cifsSesInfo *ses,
324{ 306{
325 int rc = 0; 307 int rc = 0;
326 int len; 308 int len;
327 char nt_hash[16]; 309 char nt_hash[CIFS_NTHASH_SIZE];
328 struct HMACMD5Context *pctxt;
329 wchar_t *user; 310 wchar_t *user;
330 wchar_t *domain; 311 wchar_t *domain;
312 wchar_t *server;
331 313
332 pctxt = kmalloc(sizeof(struct HMACMD5Context), GFP_KERNEL); 314 if (!ses->server->ntlmssp.sdeschmacmd5) {
333 315 cERROR(1, "calc_ntlmv2_hash: can't generate ntlmv2 hash\n");
334 if (pctxt == NULL) 316 return -1;
335 return -ENOMEM; 317 }
336 318
337 /* calculate md4 hash of password */ 319 /* calculate md4 hash of password */
338 E_md4hash(ses->password, nt_hash); 320 E_md4hash(ses->password, nt_hash);
339 321
340 /* convert Domainname to unicode and uppercase */ 322 crypto_shash_setkey(ses->server->ntlmssp.hmacmd5, nt_hash,
341 hmac_md5_init_limK_to_64(nt_hash, 16, pctxt); 323 CIFS_NTHASH_SIZE);
324
325 rc = crypto_shash_init(&ses->server->ntlmssp.sdeschmacmd5->shash);
326 if (rc) {
327 cERROR(1, "calc_ntlmv2_hash: could not init hmacmd5\n");
328 return rc;
329 }
342 330
343 /* convert ses->userName to unicode and uppercase */ 331 /* convert ses->userName to unicode and uppercase */
344 len = strlen(ses->userName); 332 len = strlen(ses->userName);
345 user = kmalloc(2 + (len * 2), GFP_KERNEL); 333 user = kmalloc(2 + (len * 2), GFP_KERNEL);
346 if (user == NULL) 334 if (user == NULL) {
335 cERROR(1, "calc_ntlmv2_hash: user mem alloc failure\n");
336 rc = -ENOMEM;
347 goto calc_exit_2; 337 goto calc_exit_2;
338 }
348 len = cifs_strtoUCS((__le16 *)user, ses->userName, len, nls_cp); 339 len = cifs_strtoUCS((__le16 *)user, ses->userName, len, nls_cp);
349 UniStrupr(user); 340 UniStrupr(user);
350 hmac_md5_update((char *)user, 2*len, pctxt); 341
342 crypto_shash_update(&ses->server->ntlmssp.sdeschmacmd5->shash,
343 (char *)user, 2 * len);
351 344
352 /* convert ses->domainName to unicode and uppercase */ 345 /* convert ses->domainName to unicode and uppercase */
353 if (ses->domainName) { 346 if (ses->domainName) {
354 len = strlen(ses->domainName); 347 len = strlen(ses->domainName);
355 348
356 domain = kmalloc(2 + (len * 2), GFP_KERNEL); 349 domain = kmalloc(2 + (len * 2), GFP_KERNEL);
357 if (domain == NULL) 350 if (domain == NULL) {
351 cERROR(1, "calc_ntlmv2_hash: domain mem alloc failure");
352 rc = -ENOMEM;
358 goto calc_exit_1; 353 goto calc_exit_1;
354 }
359 len = cifs_strtoUCS((__le16 *)domain, ses->domainName, len, 355 len = cifs_strtoUCS((__le16 *)domain, ses->domainName, len,
360 nls_cp); 356 nls_cp);
361 /* the following line was removed since it didn't work well 357 /* the following line was removed since it didn't work well
@@ -363,65 +359,292 @@ static int calc_ntlmv2_hash(struct cifsSesInfo *ses,
363 Maybe converting the domain name earlier makes sense */ 359 Maybe converting the domain name earlier makes sense */
364 /* UniStrupr(domain); */ 360 /* UniStrupr(domain); */
365 361
366 hmac_md5_update((char *)domain, 2*len, pctxt); 362 crypto_shash_update(&ses->server->ntlmssp.sdeschmacmd5->shash,
363 (char *)domain, 2 * len);
367 364
368 kfree(domain); 365 kfree(domain);
366 } else if (ses->serverName) {
367 len = strlen(ses->serverName);
368
369 server = kmalloc(2 + (len * 2), GFP_KERNEL);
370 if (server == NULL) {
371 cERROR(1, "calc_ntlmv2_hash: server mem alloc failure");
372 rc = -ENOMEM;
373 goto calc_exit_1;
374 }
375 len = cifs_strtoUCS((__le16 *)server, ses->serverName, len,
376 nls_cp);
377 /* the following line was removed since it didn't work well
378 with lower cased domain name that passed as an option.
379 Maybe converting the domain name earlier makes sense */
380 /* UniStrupr(domain); */
381
382 crypto_shash_update(&ses->server->ntlmssp.sdeschmacmd5->shash,
383 (char *)server, 2 * len);
384
385 kfree(server);
369 } 386 }
387
388 rc = crypto_shash_final(&ses->server->ntlmssp.sdeschmacmd5->shash,
389 ses->server->ntlmv2_hash);
390
370calc_exit_1: 391calc_exit_1:
371 kfree(user); 392 kfree(user);
372calc_exit_2: 393calc_exit_2:
373 /* BB FIXME what about bytes 24 through 40 of the signing key? 394 /* BB FIXME what about bytes 24 through 40 of the signing key?
374 compare with the NTLM example */ 395 compare with the NTLM example */
375 hmac_md5_final(ses->server->ntlmv2_hash, pctxt);
376 396
377 kfree(pctxt);
378 return rc; 397 return rc;
379} 398}
380 399
381void setup_ntlmv2_rsp(struct cifsSesInfo *ses, char *resp_buf, 400static int
382 const struct nls_table *nls_cp) 401find_domain_name(struct cifsSesInfo *ses)
402{
403 int rc = 0;
404 unsigned int attrsize;
405 unsigned int type;
406 unsigned char *blobptr;
407 struct ntlmssp2_name *attrptr;
408
409 if (ses->server->tiblob) {
410 blobptr = ses->server->tiblob;
411 attrptr = (struct ntlmssp2_name *) blobptr;
412
413 while ((type = attrptr->type) != 0) {
414 blobptr += 2; /* advance attr type */
415 attrsize = attrptr->length;
416 blobptr += 2; /* advance attr size */
417 if (type == NTLMSSP_AV_NB_DOMAIN_NAME) {
418 if (!ses->domainName) {
419 ses->domainName =
420 kmalloc(attrptr->length + 1,
421 GFP_KERNEL);
422 if (!ses->domainName)
423 return -ENOMEM;
424 cifs_from_ucs2(ses->domainName,
425 (__le16 *)blobptr,
426 attrptr->length,
427 attrptr->length,
428 load_nls_default(), false);
429 }
430 }
431 blobptr += attrsize; /* advance attr value */
432 attrptr = (struct ntlmssp2_name *) blobptr;
433 }
434 } else {
435 ses->server->tilen = 2 * sizeof(struct ntlmssp2_name);
436 ses->server->tiblob = kmalloc(ses->server->tilen, GFP_KERNEL);
437 if (!ses->server->tiblob) {
438 ses->server->tilen = 0;
439 cERROR(1, "Challenge target info allocation failure");
440 return -ENOMEM;
441 }
442 memset(ses->server->tiblob, 0x0, ses->server->tilen);
443 attrptr = (struct ntlmssp2_name *) ses->server->tiblob;
444 attrptr->type = cpu_to_le16(NTLMSSP_DOMAIN_TYPE);
445 }
446
447 return rc;
448}
449
450static int
451CalcNTLMv2_response(const struct TCP_Server_Info *server,
452 char *v2_session_response)
383{ 453{
384 int rc; 454 int rc;
455
456 if (!server->ntlmssp.sdeschmacmd5) {
457 cERROR(1, "calc_ntlmv2_hash: can't generate ntlmv2 hash\n");
458 return -1;
459 }
460
461 crypto_shash_setkey(server->ntlmssp.hmacmd5, server->ntlmv2_hash,
462 CIFS_HMAC_MD5_HASH_SIZE);
463
464 rc = crypto_shash_init(&server->ntlmssp.sdeschmacmd5->shash);
465 if (rc) {
466 cERROR(1, "CalcNTLMv2_response: could not init hmacmd5");
467 return rc;
468 }
469
470 memcpy(v2_session_response + CIFS_SERVER_CHALLENGE_SIZE,
471 server->cryptKey, CIFS_SERVER_CHALLENGE_SIZE);
472 crypto_shash_update(&server->ntlmssp.sdeschmacmd5->shash,
473 v2_session_response + CIFS_SERVER_CHALLENGE_SIZE,
474 sizeof(struct ntlmv2_resp) - CIFS_SERVER_CHALLENGE_SIZE);
475
476 if (server->tilen)
477 crypto_shash_update(&server->ntlmssp.sdeschmacmd5->shash,
478 server->tiblob, server->tilen);
479
480 rc = crypto_shash_final(&server->ntlmssp.sdeschmacmd5->shash,
481 v2_session_response);
482
483 return rc;
484}
485
486int
487setup_ntlmv2_rsp(struct cifsSesInfo *ses, char *resp_buf,
488 const struct nls_table *nls_cp)
489{
490 int rc = 0;
385 struct ntlmv2_resp *buf = (struct ntlmv2_resp *)resp_buf; 491 struct ntlmv2_resp *buf = (struct ntlmv2_resp *)resp_buf;
386 struct HMACMD5Context context;
387 492
388 buf->blob_signature = cpu_to_le32(0x00000101); 493 buf->blob_signature = cpu_to_le32(0x00000101);
389 buf->reserved = 0; 494 buf->reserved = 0;
390 buf->time = cpu_to_le64(cifs_UnixTimeToNT(CURRENT_TIME)); 495 buf->time = cpu_to_le64(cifs_UnixTimeToNT(CURRENT_TIME));
391 get_random_bytes(&buf->client_chal, sizeof(buf->client_chal)); 496 get_random_bytes(&buf->client_chal, sizeof(buf->client_chal));
392 buf->reserved2 = 0; 497 buf->reserved2 = 0;
393 buf->names[0].type = cpu_to_le16(NTLMSSP_DOMAIN_TYPE); 498
394 buf->names[0].length = 0; 499 if (!ses->domainName) {
395 buf->names[1].type = 0; 500 rc = find_domain_name(ses);
396 buf->names[1].length = 0; 501 if (rc) {
502 cERROR(1, "could not get domain/server name rc %d", rc);
503 return rc;
504 }
505 }
397 506
398 /* calculate buf->ntlmv2_hash */ 507 /* calculate buf->ntlmv2_hash */
399 rc = calc_ntlmv2_hash(ses, nls_cp); 508 rc = calc_ntlmv2_hash(ses, nls_cp);
400 if (rc) 509 if (rc) {
401 cERROR(1, "could not get v2 hash rc %d", rc); 510 cERROR(1, "could not get v2 hash rc %d", rc);
402 CalcNTLMv2_response(ses, resp_buf); 511 return rc;
512 }
513 rc = CalcNTLMv2_response(ses->server, resp_buf);
514 if (rc) {
515 cERROR(1, "could not get v2 hash rc %d", rc);
516 return rc;
517 }
403 518
404 /* now calculate the MAC key for NTLMv2 */ 519 if (!ses->server->ntlmssp.sdeschmacmd5) {
405 hmac_md5_init_limK_to_64(ses->server->ntlmv2_hash, 16, &context); 520 cERROR(1, "calc_ntlmv2_hash: can't generate ntlmv2 hash\n");
406 hmac_md5_update(resp_buf, 16, &context); 521 return -1;
407 hmac_md5_final(ses->server->mac_signing_key.data.ntlmv2.key, &context); 522 }
408 523
409 memcpy(&ses->server->mac_signing_key.data.ntlmv2.resp, resp_buf, 524 crypto_shash_setkey(ses->server->ntlmssp.hmacmd5,
410 sizeof(struct ntlmv2_resp)); 525 ses->server->ntlmv2_hash, CIFS_HMAC_MD5_HASH_SIZE);
411 ses->server->mac_signing_key.len = 16 + sizeof(struct ntlmv2_resp); 526
527 rc = crypto_shash_init(&ses->server->ntlmssp.sdeschmacmd5->shash);
528 if (rc) {
529 cERROR(1, "setup_ntlmv2_rsp: could not init hmacmd5\n");
530 return rc;
531 }
532
533 crypto_shash_update(&ses->server->ntlmssp.sdeschmacmd5->shash,
534 resp_buf, CIFS_HMAC_MD5_HASH_SIZE);
535
536 rc = crypto_shash_final(&ses->server->ntlmssp.sdeschmacmd5->shash,
537 ses->server->session_key.data.ntlmv2.key);
538
539 memcpy(&ses->server->session_key.data.ntlmv2.resp, resp_buf,
540 sizeof(struct ntlmv2_resp));
541 ses->server->session_key.len = 16 + sizeof(struct ntlmv2_resp);
542
543 return rc;
412} 544}
413 545
414void CalcNTLMv2_response(const struct cifsSesInfo *ses, 546int
415 char *v2_session_response) 547calc_seckey(struct TCP_Server_Info *server)
416{ 548{
417 struct HMACMD5Context context; 549 int rc;
418 /* rest of v2 struct already generated */ 550 unsigned char sec_key[CIFS_NTLMV2_SESSKEY_SIZE];
419 memcpy(v2_session_response + 8, ses->server->cryptKey, 8); 551 struct crypto_blkcipher *tfm_arc4;
420 hmac_md5_init_limK_to_64(ses->server->ntlmv2_hash, 16, &context); 552 struct scatterlist sgin, sgout;
553 struct blkcipher_desc desc;
554
555 get_random_bytes(sec_key, CIFS_NTLMV2_SESSKEY_SIZE);
556
557 tfm_arc4 = crypto_alloc_blkcipher("ecb(arc4)",
558 0, CRYPTO_ALG_ASYNC);
559 if (!tfm_arc4 || IS_ERR(tfm_arc4)) {
560 cERROR(1, "could not allocate " "master crypto API arc4\n");
561 return 1;
562 }
563
564 desc.tfm = tfm_arc4;
565
566 crypto_blkcipher_setkey(tfm_arc4,
567 server->session_key.data.ntlmv2.key, CIFS_CPHTXT_SIZE);
568 sg_init_one(&sgin, sec_key, CIFS_CPHTXT_SIZE);
569 sg_init_one(&sgout, server->ntlmssp.ciphertext, CIFS_CPHTXT_SIZE);
570 rc = crypto_blkcipher_encrypt(&desc, &sgout, &sgin, CIFS_CPHTXT_SIZE);
421 571
422 hmac_md5_update(v2_session_response+8, 572 if (!rc)
423 sizeof(struct ntlmv2_resp) - 8, &context); 573 memcpy(server->session_key.data.ntlmv2.key,
574 sec_key, CIFS_NTLMV2_SESSKEY_SIZE);
575
576 crypto_free_blkcipher(tfm_arc4);
577
578 return 0;
579}
424 580
425 hmac_md5_final(v2_session_response, &context); 581void
426/* cifs_dump_mem("v2_sess_rsp: ", v2_session_response, 32); */ 582cifs_crypto_shash_release(struct TCP_Server_Info *server)
583{
584 if (server->ntlmssp.md5)
585 crypto_free_shash(server->ntlmssp.md5);
586
587 if (server->ntlmssp.hmacmd5)
588 crypto_free_shash(server->ntlmssp.hmacmd5);
589
590 kfree(server->ntlmssp.sdeschmacmd5);
591
592 kfree(server->ntlmssp.sdescmd5);
593}
594
595int
596cifs_crypto_shash_allocate(struct TCP_Server_Info *server)
597{
598 int rc;
599 unsigned int size;
600
601 server->ntlmssp.hmacmd5 = crypto_alloc_shash("hmac(md5)", 0, 0);
602 if (!server->ntlmssp.hmacmd5 ||
603 IS_ERR(server->ntlmssp.hmacmd5)) {
604 cERROR(1, "could not allocate crypto hmacmd5\n");
605 return 1;
606 }
607
608 server->ntlmssp.md5 = crypto_alloc_shash("md5", 0, 0);
609 if (!server->ntlmssp.md5 || IS_ERR(server->ntlmssp.md5)) {
610 cERROR(1, "could not allocate crypto md5\n");
611 rc = 1;
612 goto cifs_crypto_shash_allocate_ret1;
613 }
614
615 size = sizeof(struct shash_desc) +
616 crypto_shash_descsize(server->ntlmssp.hmacmd5);
617 server->ntlmssp.sdeschmacmd5 = kmalloc(size, GFP_KERNEL);
618 if (!server->ntlmssp.sdeschmacmd5) {
619 cERROR(1, "cifs_crypto_shash_allocate: can't alloc hmacmd5\n");
620 rc = -ENOMEM;
621 goto cifs_crypto_shash_allocate_ret2;
622 }
623 server->ntlmssp.sdeschmacmd5->shash.tfm = server->ntlmssp.hmacmd5;
624 server->ntlmssp.sdeschmacmd5->shash.flags = 0x0;
625
626
627 size = sizeof(struct shash_desc) +
628 crypto_shash_descsize(server->ntlmssp.md5);
629 server->ntlmssp.sdescmd5 = kmalloc(size, GFP_KERNEL);
630 if (!server->ntlmssp.sdescmd5) {
631 cERROR(1, "cifs_crypto_shash_allocate: can't alloc md5\n");
632 rc = -ENOMEM;
633 goto cifs_crypto_shash_allocate_ret3;
634 }
635 server->ntlmssp.sdescmd5->shash.tfm = server->ntlmssp.md5;
636 server->ntlmssp.sdescmd5->shash.flags = 0x0;
637
638 return 0;
639
640cifs_crypto_shash_allocate_ret3:
641 kfree(server->ntlmssp.sdeschmacmd5);
642
643cifs_crypto_shash_allocate_ret2:
644 crypto_free_shash(server->ntlmssp.md5);
645
646cifs_crypto_shash_allocate_ret1:
647 crypto_free_shash(server->ntlmssp.hmacmd5);
648
649 return rc;
427} 650}
diff --git a/fs/cifs/cifsglob.h b/fs/cifs/cifsglob.h
index 0cdfb8c32ac6..c9d0cfc086eb 100644
--- a/fs/cifs/cifsglob.h
+++ b/fs/cifs/cifsglob.h
@@ -25,6 +25,9 @@
25#include <linux/workqueue.h> 25#include <linux/workqueue.h>
26#include "cifs_fs_sb.h" 26#include "cifs_fs_sb.h"
27#include "cifsacl.h" 27#include "cifsacl.h"
28#include <crypto/internal/hash.h>
29#include <linux/scatterlist.h>
30
28/* 31/*
29 * The sizes of various internal tables and strings 32 * The sizes of various internal tables and strings
30 */ 33 */
@@ -97,7 +100,7 @@ enum protocolEnum {
97 /* Netbios frames protocol not supported at this time */ 100 /* Netbios frames protocol not supported at this time */
98}; 101};
99 102
100struct mac_key { 103struct session_key {
101 unsigned int len; 104 unsigned int len;
102 union { 105 union {
103 char ntlm[CIFS_SESS_KEY_SIZE + 16]; 106 char ntlm[CIFS_SESS_KEY_SIZE + 16];
@@ -120,6 +123,21 @@ struct cifs_cred {
120 struct cifs_ace *aces; 123 struct cifs_ace *aces;
121}; 124};
122 125
126struct sdesc {
127 struct shash_desc shash;
128 char ctx[];
129};
130
131struct ntlmssp_auth {
132 __u32 client_flags;
133 __u32 server_flags;
134 unsigned char ciphertext[CIFS_CPHTXT_SIZE];
135 struct crypto_shash *hmacmd5;
136 struct crypto_shash *md5;
137 struct sdesc *sdeschmacmd5;
138 struct sdesc *sdescmd5;
139};
140
123/* 141/*
124 ***************************************************************** 142 *****************************************************************
125 * Except the CIFS PDUs themselves all the 143 * Except the CIFS PDUs themselves all the
@@ -182,11 +200,14 @@ struct TCP_Server_Info {
182 /* 16th byte of RFC1001 workstation name is always null */ 200 /* 16th byte of RFC1001 workstation name is always null */
183 char workstation_RFC1001_name[RFC1001_NAME_LEN_WITH_NULL]; 201 char workstation_RFC1001_name[RFC1001_NAME_LEN_WITH_NULL];
184 __u32 sequence_number; /* needed for CIFS PDU signature */ 202 __u32 sequence_number; /* needed for CIFS PDU signature */
185 struct mac_key mac_signing_key; 203 struct session_key session_key;
186 char ntlmv2_hash[16]; 204 char ntlmv2_hash[16];
187 unsigned long lstrp; /* when we got last response from this server */ 205 unsigned long lstrp; /* when we got last response from this server */
188 u16 dialect; /* dialect index that server chose */ 206 u16 dialect; /* dialect index that server chose */
189 /* extended security flavors that server supports */ 207 /* extended security flavors that server supports */
208 unsigned int tilen; /* length of the target info blob */
209 unsigned char *tiblob; /* target info blob in challenge response */
210 struct ntlmssp_auth ntlmssp; /* various keys, ciphers, flags */
190 bool sec_kerberos; /* supports plain Kerberos */ 211 bool sec_kerberos; /* supports plain Kerberos */
191 bool sec_mskerberos; /* supports legacy MS Kerberos */ 212 bool sec_mskerberos; /* supports legacy MS Kerberos */
192 bool sec_kerberosu2u; /* supports U2U Kerberos */ 213 bool sec_kerberosu2u; /* supports U2U Kerberos */
diff --git a/fs/cifs/cifspdu.h b/fs/cifs/cifspdu.h
index 14d036d8db11..320e0fd0ba7b 100644
--- a/fs/cifs/cifspdu.h
+++ b/fs/cifs/cifspdu.h
@@ -134,6 +134,12 @@
134 * Size of the session key (crypto key encrypted with the password 134 * Size of the session key (crypto key encrypted with the password
135 */ 135 */
136#define CIFS_SESS_KEY_SIZE (24) 136#define CIFS_SESS_KEY_SIZE (24)
137#define CIFS_CLIENT_CHALLENGE_SIZE (8)
138#define CIFS_SERVER_CHALLENGE_SIZE (8)
139#define CIFS_HMAC_MD5_HASH_SIZE (16)
140#define CIFS_CPHTXT_SIZE (16)
141#define CIFS_NTLMV2_SESSKEY_SIZE (16)
142#define CIFS_NTHASH_SIZE (16)
137 143
138/* 144/*
139 * Maximum user name length 145 * Maximum user name length
@@ -663,7 +669,6 @@ struct ntlmv2_resp {
663 __le64 time; 669 __le64 time;
664 __u64 client_chal; /* random */ 670 __u64 client_chal; /* random */
665 __u32 reserved2; 671 __u32 reserved2;
666 struct ntlmssp2_name names[2];
667 /* array of name entries could follow ending in minimum 4 byte struct */ 672 /* array of name entries could follow ending in minimum 4 byte struct */
668} __attribute__((packed)); 673} __attribute__((packed));
669 674
diff --git a/fs/cifs/cifsproto.h b/fs/cifs/cifsproto.h
index 1f5450814087..1378d9133844 100644
--- a/fs/cifs/cifsproto.h
+++ b/fs/cifs/cifsproto.h
@@ -361,15 +361,15 @@ extern int cifs_sign_smb(struct smb_hdr *, struct TCP_Server_Info *, __u32 *);
361extern int cifs_sign_smb2(struct kvec *iov, int n_vec, struct TCP_Server_Info *, 361extern int cifs_sign_smb2(struct kvec *iov, int n_vec, struct TCP_Server_Info *,
362 __u32 *); 362 __u32 *);
363extern int cifs_verify_signature(struct smb_hdr *, 363extern int cifs_verify_signature(struct smb_hdr *,
364 const struct mac_key *mac_key, 364 struct TCP_Server_Info *server,
365 __u32 expected_sequence_number); 365 __u32 expected_sequence_number);
366extern int cifs_calculate_mac_key(struct mac_key *key, const char *rn, 366extern int cifs_calculate_session_key(struct session_key *key, const char *rn,
367 const char *pass); 367 const char *pass);
368extern int CalcNTLMv2_partial_mac_key(struct cifsSesInfo *, 368extern int setup_ntlmv2_rsp(struct cifsSesInfo *, char *,
369 const struct nls_table *);
370extern void CalcNTLMv2_response(const struct cifsSesInfo *, char *);
371extern void setup_ntlmv2_rsp(struct cifsSesInfo *, char *,
372 const struct nls_table *); 369 const struct nls_table *);
370extern int cifs_crypto_shash_allocate(struct TCP_Server_Info *);
371extern void cifs_crypto_shash_release(struct TCP_Server_Info *);
372extern int calc_seckey(struct TCP_Server_Info *);
373#ifdef CONFIG_CIFS_WEAK_PW_HASH 373#ifdef CONFIG_CIFS_WEAK_PW_HASH
374extern void calc_lanman_hash(const char *password, const char *cryptkey, 374extern void calc_lanman_hash(const char *password, const char *cryptkey,
375 bool encrypt, char *lnm_session_key); 375 bool encrypt, char *lnm_session_key);
diff --git a/fs/cifs/cifssmb.c b/fs/cifs/cifssmb.c
index c65c3419dd37..4bda920d1f75 100644
--- a/fs/cifs/cifssmb.c
+++ b/fs/cifs/cifssmb.c
@@ -604,11 +604,14 @@ CIFSSMBNegotiate(unsigned int xid, struct cifsSesInfo *ses)
604 else 604 else
605 rc = -EINVAL; 605 rc = -EINVAL;
606 606
607 if (server->sec_kerberos || server->sec_mskerberos) 607 if (server->secType == Kerberos) {
608 server->secType = Kerberos; 608 if (!server->sec_kerberos &&
609 else if (server->sec_ntlmssp) 609 !server->sec_mskerberos)
610 server->secType = RawNTLMSSP; 610 rc = -EOPNOTSUPP;
611 else 611 } else if (server->secType == RawNTLMSSP) {
612 if (!server->sec_ntlmssp)
613 rc = -EOPNOTSUPP;
614 } else
612 rc = -EOPNOTSUPP; 615 rc = -EOPNOTSUPP;
613 } 616 }
614 } else 617 } else
diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c
index 95c2ea67edfb..ec0ea4a43bdb 100644
--- a/fs/cifs/connect.c
+++ b/fs/cifs/connect.c
@@ -1673,7 +1673,9 @@ cifs_find_smb_ses(struct TCP_Server_Info *server, struct smb_vol *vol)
1673 MAX_USERNAME_SIZE)) 1673 MAX_USERNAME_SIZE))
1674 continue; 1674 continue;
1675 if (strlen(vol->username) != 0 && 1675 if (strlen(vol->username) != 0 &&
1676 strncmp(ses->password, vol->password, 1676 ses->password != NULL &&
1677 strncmp(ses->password,
1678 vol->password ? vol->password : "",
1677 MAX_PASSWORD_SIZE)) 1679 MAX_PASSWORD_SIZE))
1678 continue; 1680 continue;
1679 } 1681 }
@@ -1706,6 +1708,7 @@ cifs_put_smb_ses(struct cifsSesInfo *ses)
1706 CIFSSMBLogoff(xid, ses); 1708 CIFSSMBLogoff(xid, ses);
1707 _FreeXid(xid); 1709 _FreeXid(xid);
1708 } 1710 }
1711 cifs_crypto_shash_release(server);
1709 sesInfoFree(ses); 1712 sesInfoFree(ses);
1710 cifs_put_tcp_session(server); 1713 cifs_put_tcp_session(server);
1711} 1714}
@@ -1785,13 +1788,23 @@ cifs_get_smb_ses(struct TCP_Server_Info *server, struct smb_vol *volume_info)
1785 ses->linux_uid = volume_info->linux_uid; 1788 ses->linux_uid = volume_info->linux_uid;
1786 ses->overrideSecFlg = volume_info->secFlg; 1789 ses->overrideSecFlg = volume_info->secFlg;
1787 1790
1791 rc = cifs_crypto_shash_allocate(server);
1792 if (rc) {
1793 cERROR(1, "could not setup hash structures rc %d", rc);
1794 goto get_ses_fail;
1795 }
1796 server->tilen = 0;
1797 server->tiblob = NULL;
1798
1788 mutex_lock(&ses->session_mutex); 1799 mutex_lock(&ses->session_mutex);
1789 rc = cifs_negotiate_protocol(xid, ses); 1800 rc = cifs_negotiate_protocol(xid, ses);
1790 if (!rc) 1801 if (!rc)
1791 rc = cifs_setup_session(xid, ses, volume_info->local_nls); 1802 rc = cifs_setup_session(xid, ses, volume_info->local_nls);
1792 mutex_unlock(&ses->session_mutex); 1803 mutex_unlock(&ses->session_mutex);
1793 if (rc) 1804 if (rc) {
1805 cifs_crypto_shash_release(ses->server);
1794 goto get_ses_fail; 1806 goto get_ses_fail;
1807 }
1795 1808
1796 /* success, put it on the list */ 1809 /* success, put it on the list */
1797 write_lock(&cifs_tcp_ses_lock); 1810 write_lock(&cifs_tcp_ses_lock);
diff --git a/fs/cifs/dir.c b/fs/cifs/dir.c
index 578d88c5b46e..f9ed0751cc12 100644
--- a/fs/cifs/dir.c
+++ b/fs/cifs/dir.c
@@ -305,8 +305,7 @@ cifs_create(struct inode *inode, struct dentry *direntry, int mode,
305 full_path = build_path_from_dentry(direntry); 305 full_path = build_path_from_dentry(direntry);
306 if (full_path == NULL) { 306 if (full_path == NULL) {
307 rc = -ENOMEM; 307 rc = -ENOMEM;
308 FreeXid(xid); 308 goto cifs_create_out;
309 return rc;
310 } 309 }
311 310
312 if (oplockEnabled) 311 if (oplockEnabled)
@@ -365,9 +364,8 @@ cifs_create(struct inode *inode, struct dentry *direntry, int mode,
365 364
366 buf = kmalloc(sizeof(FILE_ALL_INFO), GFP_KERNEL); 365 buf = kmalloc(sizeof(FILE_ALL_INFO), GFP_KERNEL);
367 if (buf == NULL) { 366 if (buf == NULL) {
368 kfree(full_path); 367 rc = -ENOMEM;
369 FreeXid(xid); 368 goto cifs_create_out;
370 return -ENOMEM;
371 } 369 }
372 370
373 /* 371 /*
@@ -496,6 +494,11 @@ int cifs_mknod(struct inode *inode, struct dentry *direntry, int mode,
496 struct cifsTconInfo *pTcon; 494 struct cifsTconInfo *pTcon;
497 char *full_path = NULL; 495 char *full_path = NULL;
498 struct inode *newinode = NULL; 496 struct inode *newinode = NULL;
497 int oplock = 0;
498 u16 fileHandle;
499 FILE_ALL_INFO *buf = NULL;
500 unsigned int bytes_written;
501 struct win_dev *pdev;
499 502
500 if (!old_valid_dev(device_number)) 503 if (!old_valid_dev(device_number))
501 return -EINVAL; 504 return -EINVAL;
@@ -506,9 +509,12 @@ int cifs_mknod(struct inode *inode, struct dentry *direntry, int mode,
506 pTcon = cifs_sb->tcon; 509 pTcon = cifs_sb->tcon;
507 510
508 full_path = build_path_from_dentry(direntry); 511 full_path = build_path_from_dentry(direntry);
509 if (full_path == NULL) 512 if (full_path == NULL) {
510 rc = -ENOMEM; 513 rc = -ENOMEM;
511 else if (pTcon->unix_ext) { 514 goto mknod_out;
515 }
516
517 if (pTcon->unix_ext) {
512 struct cifs_unix_set_info_args args = { 518 struct cifs_unix_set_info_args args = {
513 .mode = mode & ~current_umask(), 519 .mode = mode & ~current_umask(),
514 .ctime = NO_CHANGE_64, 520 .ctime = NO_CHANGE_64,
@@ -527,87 +533,78 @@ int cifs_mknod(struct inode *inode, struct dentry *direntry, int mode,
527 cifs_sb->local_nls, 533 cifs_sb->local_nls,
528 cifs_sb->mnt_cifs_flags & 534 cifs_sb->mnt_cifs_flags &
529 CIFS_MOUNT_MAP_SPECIAL_CHR); 535 CIFS_MOUNT_MAP_SPECIAL_CHR);
536 if (rc)
537 goto mknod_out;
530 538
531 if (!rc) { 539 rc = cifs_get_inode_info_unix(&newinode, full_path,
532 rc = cifs_get_inode_info_unix(&newinode, full_path,
533 inode->i_sb, xid); 540 inode->i_sb, xid);
534 if (pTcon->nocase) 541 if (pTcon->nocase)
535 direntry->d_op = &cifs_ci_dentry_ops; 542 direntry->d_op = &cifs_ci_dentry_ops;
536 else 543 else
537 direntry->d_op = &cifs_dentry_ops; 544 direntry->d_op = &cifs_dentry_ops;
538 if (rc == 0)
539 d_instantiate(direntry, newinode);
540 }
541 } else {
542 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_UNX_EMUL) {
543 int oplock = 0;
544 u16 fileHandle;
545 FILE_ALL_INFO *buf;
546 545
547 cFYI(1, "sfu compat create special file"); 546 if (rc == 0)
547 d_instantiate(direntry, newinode);
548 goto mknod_out;
549 }
548 550
549 buf = kmalloc(sizeof(FILE_ALL_INFO), GFP_KERNEL); 551 if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_UNX_EMUL))
550 if (buf == NULL) { 552 goto mknod_out;
551 kfree(full_path);
552 rc = -ENOMEM;
553 FreeXid(xid);
554 return rc;
555 }
556 553
557 rc = CIFSSMBOpen(xid, pTcon, full_path, 554
558 FILE_CREATE, /* fail if exists */ 555 cFYI(1, "sfu compat create special file");
559 GENERIC_WRITE /* BB would 556
560 WRITE_OWNER | WRITE_DAC be better? */, 557 buf = kmalloc(sizeof(FILE_ALL_INFO), GFP_KERNEL);
561 /* Create a file and set the 558 if (buf == NULL) {
562 file attribute to SYSTEM */ 559 kfree(full_path);
563 CREATE_NOT_DIR | CREATE_OPTION_SPECIAL, 560 rc = -ENOMEM;
564 &fileHandle, &oplock, buf, 561 FreeXid(xid);
565 cifs_sb->local_nls, 562 return rc;
566 cifs_sb->mnt_cifs_flags &
567 CIFS_MOUNT_MAP_SPECIAL_CHR);
568
569 /* BB FIXME - add handling for backlevel servers
570 which need legacy open and check for all
571 calls to SMBOpen for fallback to SMBLeagcyOpen */
572 if (!rc) {
573 /* BB Do not bother to decode buf since no
574 local inode yet to put timestamps in,
575 but we can reuse it safely */
576 unsigned int bytes_written;
577 struct win_dev *pdev;
578 pdev = (struct win_dev *)buf;
579 if (S_ISCHR(mode)) {
580 memcpy(pdev->type, "IntxCHR", 8);
581 pdev->major =
582 cpu_to_le64(MAJOR(device_number));
583 pdev->minor =
584 cpu_to_le64(MINOR(device_number));
585 rc = CIFSSMBWrite(xid, pTcon,
586 fileHandle,
587 sizeof(struct win_dev),
588 0, &bytes_written, (char *)pdev,
589 NULL, 0);
590 } else if (S_ISBLK(mode)) {
591 memcpy(pdev->type, "IntxBLK", 8);
592 pdev->major =
593 cpu_to_le64(MAJOR(device_number));
594 pdev->minor =
595 cpu_to_le64(MINOR(device_number));
596 rc = CIFSSMBWrite(xid, pTcon,
597 fileHandle,
598 sizeof(struct win_dev),
599 0, &bytes_written, (char *)pdev,
600 NULL, 0);
601 } /* else if(S_ISFIFO */
602 CIFSSMBClose(xid, pTcon, fileHandle);
603 d_drop(direntry);
604 }
605 kfree(buf);
606 /* add code here to set EAs */
607 }
608 } 563 }
609 564
565 /* FIXME: would WRITE_OWNER | WRITE_DAC be better? */
566 rc = CIFSSMBOpen(xid, pTcon, full_path, FILE_CREATE,
567 GENERIC_WRITE, CREATE_NOT_DIR | CREATE_OPTION_SPECIAL,
568 &fileHandle, &oplock, buf, cifs_sb->local_nls,
569 cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MAP_SPECIAL_CHR);
570 if (rc)
571 goto mknod_out;
572
573 /* BB Do not bother to decode buf since no local inode yet to put
574 * timestamps in, but we can reuse it safely */
575
576 pdev = (struct win_dev *)buf;
577 if (S_ISCHR(mode)) {
578 memcpy(pdev->type, "IntxCHR", 8);
579 pdev->major =
580 cpu_to_le64(MAJOR(device_number));
581 pdev->minor =
582 cpu_to_le64(MINOR(device_number));
583 rc = CIFSSMBWrite(xid, pTcon,
584 fileHandle,
585 sizeof(struct win_dev),
586 0, &bytes_written, (char *)pdev,
587 NULL, 0);
588 } else if (S_ISBLK(mode)) {
589 memcpy(pdev->type, "IntxBLK", 8);
590 pdev->major =
591 cpu_to_le64(MAJOR(device_number));
592 pdev->minor =
593 cpu_to_le64(MINOR(device_number));
594 rc = CIFSSMBWrite(xid, pTcon,
595 fileHandle,
596 sizeof(struct win_dev),
597 0, &bytes_written, (char *)pdev,
598 NULL, 0);
599 } /* else if (S_ISFIFO) */
600 CIFSSMBClose(xid, pTcon, fileHandle);
601 d_drop(direntry);
602
603 /* FIXME: add code here to set EAs */
604
605mknod_out:
610 kfree(full_path); 606 kfree(full_path);
607 kfree(buf);
611 FreeXid(xid); 608 FreeXid(xid);
612 return rc; 609 return rc;
613} 610}
diff --git a/fs/cifs/file.c b/fs/cifs/file.c
index db11fdef0e92..de748c652d11 100644
--- a/fs/cifs/file.c
+++ b/fs/cifs/file.c
@@ -242,8 +242,7 @@ int cifs_open(struct inode *inode, struct file *file)
242 full_path = build_path_from_dentry(file->f_path.dentry); 242 full_path = build_path_from_dentry(file->f_path.dentry);
243 if (full_path == NULL) { 243 if (full_path == NULL) {
244 rc = -ENOMEM; 244 rc = -ENOMEM;
245 FreeXid(xid); 245 goto out;
246 return rc;
247 } 246 }
248 247
249 cFYI(1, "inode = 0x%p file flags are 0x%x for %s", 248 cFYI(1, "inode = 0x%p file flags are 0x%x for %s",
diff --git a/fs/cifs/inode.c b/fs/cifs/inode.c
index 4bc47e5b5f29..86a164f08a74 100644
--- a/fs/cifs/inode.c
+++ b/fs/cifs/inode.c
@@ -834,7 +834,7 @@ struct inode *cifs_root_iget(struct super_block *sb, unsigned long ino)
834 xid, NULL); 834 xid, NULL);
835 835
836 if (!inode) 836 if (!inode)
837 return ERR_PTR(-ENOMEM); 837 return ERR_PTR(rc);
838 838
839#ifdef CONFIG_CIFS_FSCACHE 839#ifdef CONFIG_CIFS_FSCACHE
840 /* populate tcon->resource_id */ 840 /* populate tcon->resource_id */
diff --git a/fs/cifs/ntlmssp.h b/fs/cifs/ntlmssp.h
index 49c9a4e75319..1db0f0746a5b 100644
--- a/fs/cifs/ntlmssp.h
+++ b/fs/cifs/ntlmssp.h
@@ -61,6 +61,19 @@
61#define NTLMSSP_NEGOTIATE_KEY_XCH 0x40000000 61#define NTLMSSP_NEGOTIATE_KEY_XCH 0x40000000
62#define NTLMSSP_NEGOTIATE_56 0x80000000 62#define NTLMSSP_NEGOTIATE_56 0x80000000
63 63
64/* Define AV Pair Field IDs */
65#define NTLMSSP_AV_EOL 0
66#define NTLMSSP_AV_NB_COMPUTER_NAME 1
67#define NTLMSSP_AV_NB_DOMAIN_NAME 2
68#define NTLMSSP_AV_DNS_COMPUTER_NAME 3
69#define NTLMSSP_AV_DNS_DOMAIN_NAME 4
70#define NTLMSSP_AV_DNS_TREE_NAME 5
71#define NTLMSSP_AV_FLAGS 6
72#define NTLMSSP_AV_TIMESTAMP 7
73#define NTLMSSP_AV_RESTRICTION 8
74#define NTLMSSP_AV_TARGET_NAME 9
75#define NTLMSSP_AV_CHANNEL_BINDINGS 10
76
64/* Although typedefs are not commonly used for structure definitions */ 77/* Although typedefs are not commonly used for structure definitions */
65/* in the Linux kernel, in this particular case they are useful */ 78/* in the Linux kernel, in this particular case they are useful */
66/* to more closely match the standards document for NTLMSSP from */ 79/* to more closely match the standards document for NTLMSSP from */
diff --git a/fs/cifs/sess.c b/fs/cifs/sess.c
index 0a57cb7db5dd..795095f4eac6 100644
--- a/fs/cifs/sess.c
+++ b/fs/cifs/sess.c
@@ -383,6 +383,9 @@ static int decode_ascii_ssetup(char **pbcc_area, int bleft,
383static int decode_ntlmssp_challenge(char *bcc_ptr, int blob_len, 383static int decode_ntlmssp_challenge(char *bcc_ptr, int blob_len,
384 struct cifsSesInfo *ses) 384 struct cifsSesInfo *ses)
385{ 385{
386 unsigned int tioffset; /* challeng message target info area */
387 unsigned int tilen; /* challeng message target info area length */
388
386 CHALLENGE_MESSAGE *pblob = (CHALLENGE_MESSAGE *)bcc_ptr; 389 CHALLENGE_MESSAGE *pblob = (CHALLENGE_MESSAGE *)bcc_ptr;
387 390
388 if (blob_len < sizeof(CHALLENGE_MESSAGE)) { 391 if (blob_len < sizeof(CHALLENGE_MESSAGE)) {
@@ -405,6 +408,20 @@ static int decode_ntlmssp_challenge(char *bcc_ptr, int blob_len,
405 /* BB spec says that if AvId field of MsvAvTimestamp is populated then 408 /* BB spec says that if AvId field of MsvAvTimestamp is populated then
406 we must set the MIC field of the AUTHENTICATE_MESSAGE */ 409 we must set the MIC field of the AUTHENTICATE_MESSAGE */
407 410
411 ses->server->ntlmssp.server_flags = le32_to_cpu(pblob->NegotiateFlags);
412
413 tioffset = cpu_to_le16(pblob->TargetInfoArray.BufferOffset);
414 tilen = cpu_to_le16(pblob->TargetInfoArray.Length);
415 ses->server->tilen = tilen;
416 if (tilen) {
417 ses->server->tiblob = kmalloc(tilen, GFP_KERNEL);
418 if (!ses->server->tiblob) {
419 cERROR(1, "Challenge target info allocation failure");
420 return -ENOMEM;
421 }
422 memcpy(ses->server->tiblob, bcc_ptr + tioffset, tilen);
423 }
424
408 return 0; 425 return 0;
409} 426}
410 427
@@ -425,12 +442,13 @@ static void build_ntlmssp_negotiate_blob(unsigned char *pbuffer,
425 /* BB is NTLMV2 session security format easier to use here? */ 442 /* BB is NTLMV2 session security format easier to use here? */
426 flags = NTLMSSP_NEGOTIATE_56 | NTLMSSP_REQUEST_TARGET | 443 flags = NTLMSSP_NEGOTIATE_56 | NTLMSSP_REQUEST_TARGET |
427 NTLMSSP_NEGOTIATE_128 | NTLMSSP_NEGOTIATE_UNICODE | 444 NTLMSSP_NEGOTIATE_128 | NTLMSSP_NEGOTIATE_UNICODE |
428 NTLMSSP_NEGOTIATE_NT_ONLY | NTLMSSP_NEGOTIATE_NTLM; 445 NTLMSSP_NEGOTIATE_NTLM;
429 if (ses->server->secMode & 446 if (ses->server->secMode &
430 (SECMODE_SIGN_REQUIRED | SECMODE_SIGN_ENABLED)) 447 (SECMODE_SIGN_REQUIRED | SECMODE_SIGN_ENABLED)) {
431 flags |= NTLMSSP_NEGOTIATE_SIGN; 448 flags |= NTLMSSP_NEGOTIATE_SIGN |
432 if (ses->server->secMode & SECMODE_SIGN_REQUIRED) 449 NTLMSSP_NEGOTIATE_KEY_XCH |
433 flags |= NTLMSSP_NEGOTIATE_ALWAYS_SIGN; 450 NTLMSSP_NEGOTIATE_EXTENDED_SEC;
451 }
434 452
435 sec_blob->NegotiateFlags |= cpu_to_le32(flags); 453 sec_blob->NegotiateFlags |= cpu_to_le32(flags);
436 454
@@ -451,10 +469,12 @@ static int build_ntlmssp_auth_blob(unsigned char *pbuffer,
451 struct cifsSesInfo *ses, 469 struct cifsSesInfo *ses,
452 const struct nls_table *nls_cp, bool first) 470 const struct nls_table *nls_cp, bool first)
453{ 471{
472 int rc;
473 unsigned int size;
454 AUTHENTICATE_MESSAGE *sec_blob = (AUTHENTICATE_MESSAGE *)pbuffer; 474 AUTHENTICATE_MESSAGE *sec_blob = (AUTHENTICATE_MESSAGE *)pbuffer;
455 __u32 flags; 475 __u32 flags;
456 unsigned char *tmp; 476 unsigned char *tmp;
457 char ntlm_session_key[CIFS_SESS_KEY_SIZE]; 477 struct ntlmv2_resp ntlmv2_response = {};
458 478
459 memcpy(sec_blob->Signature, NTLMSSP_SIGNATURE, 8); 479 memcpy(sec_blob->Signature, NTLMSSP_SIGNATURE, 8);
460 sec_blob->MessageType = NtLmAuthenticate; 480 sec_blob->MessageType = NtLmAuthenticate;
@@ -477,19 +497,25 @@ static int build_ntlmssp_auth_blob(unsigned char *pbuffer,
477 sec_blob->LmChallengeResponse.Length = 0; 497 sec_blob->LmChallengeResponse.Length = 0;
478 sec_blob->LmChallengeResponse.MaximumLength = 0; 498 sec_blob->LmChallengeResponse.MaximumLength = 0;
479 499
480 /* calculate session key, BB what about adding similar ntlmv2 path? */
481 SMBNTencrypt(ses->password, ses->server->cryptKey, ntlm_session_key);
482 if (first)
483 cifs_calculate_mac_key(&ses->server->mac_signing_key,
484 ntlm_session_key, ses->password);
485
486 memcpy(tmp, ntlm_session_key, CIFS_SESS_KEY_SIZE);
487 sec_blob->NtChallengeResponse.BufferOffset = cpu_to_le32(tmp - pbuffer); 500 sec_blob->NtChallengeResponse.BufferOffset = cpu_to_le32(tmp - pbuffer);
488 sec_blob->NtChallengeResponse.Length = cpu_to_le16(CIFS_SESS_KEY_SIZE); 501 rc = setup_ntlmv2_rsp(ses, (char *)&ntlmv2_response, nls_cp);
489 sec_blob->NtChallengeResponse.MaximumLength = 502 if (rc) {
490 cpu_to_le16(CIFS_SESS_KEY_SIZE); 503 cERROR(1, "error rc: %d during ntlmssp ntlmv2 setup", rc);
504 goto setup_ntlmv2_ret;
505 }
506 size = sizeof(struct ntlmv2_resp);
507 memcpy(tmp, (char *)&ntlmv2_response, size);
508 tmp += size;
509 if (ses->server->tilen > 0) {
510 memcpy(tmp, ses->server->tiblob, ses->server->tilen);
511 tmp += ses->server->tilen;
512 } else
513 ses->server->tilen = 0;
491 514
492 tmp += CIFS_SESS_KEY_SIZE; 515 sec_blob->NtChallengeResponse.Length = cpu_to_le16(size +
516 ses->server->tilen);
517 sec_blob->NtChallengeResponse.MaximumLength =
518 cpu_to_le16(size + ses->server->tilen);
493 519
494 if (ses->domainName == NULL) { 520 if (ses->domainName == NULL) {
495 sec_blob->DomainName.BufferOffset = cpu_to_le32(tmp - pbuffer); 521 sec_blob->DomainName.BufferOffset = cpu_to_le32(tmp - pbuffer);
@@ -501,7 +527,6 @@ static int build_ntlmssp_auth_blob(unsigned char *pbuffer,
501 len = cifs_strtoUCS((__le16 *)tmp, ses->domainName, 527 len = cifs_strtoUCS((__le16 *)tmp, ses->domainName,
502 MAX_USERNAME_SIZE, nls_cp); 528 MAX_USERNAME_SIZE, nls_cp);
503 len *= 2; /* unicode is 2 bytes each */ 529 len *= 2; /* unicode is 2 bytes each */
504 len += 2; /* trailing null */
505 sec_blob->DomainName.BufferOffset = cpu_to_le32(tmp - pbuffer); 530 sec_blob->DomainName.BufferOffset = cpu_to_le32(tmp - pbuffer);
506 sec_blob->DomainName.Length = cpu_to_le16(len); 531 sec_blob->DomainName.Length = cpu_to_le16(len);
507 sec_blob->DomainName.MaximumLength = cpu_to_le16(len); 532 sec_blob->DomainName.MaximumLength = cpu_to_le16(len);
@@ -518,7 +543,6 @@ static int build_ntlmssp_auth_blob(unsigned char *pbuffer,
518 len = cifs_strtoUCS((__le16 *)tmp, ses->userName, 543 len = cifs_strtoUCS((__le16 *)tmp, ses->userName,
519 MAX_USERNAME_SIZE, nls_cp); 544 MAX_USERNAME_SIZE, nls_cp);
520 len *= 2; /* unicode is 2 bytes each */ 545 len *= 2; /* unicode is 2 bytes each */
521 len += 2; /* trailing null */
522 sec_blob->UserName.BufferOffset = cpu_to_le32(tmp - pbuffer); 546 sec_blob->UserName.BufferOffset = cpu_to_le32(tmp - pbuffer);
523 sec_blob->UserName.Length = cpu_to_le16(len); 547 sec_blob->UserName.Length = cpu_to_le16(len);
524 sec_blob->UserName.MaximumLength = cpu_to_le16(len); 548 sec_blob->UserName.MaximumLength = cpu_to_le16(len);
@@ -530,9 +554,26 @@ static int build_ntlmssp_auth_blob(unsigned char *pbuffer,
530 sec_blob->WorkstationName.MaximumLength = 0; 554 sec_blob->WorkstationName.MaximumLength = 0;
531 tmp += 2; 555 tmp += 2;
532 556
533 sec_blob->SessionKey.BufferOffset = cpu_to_le32(tmp - pbuffer); 557 if ((ses->server->ntlmssp.server_flags & NTLMSSP_NEGOTIATE_KEY_XCH) &&
534 sec_blob->SessionKey.Length = 0; 558 !calc_seckey(ses->server)) {
535 sec_blob->SessionKey.MaximumLength = 0; 559 memcpy(tmp, ses->server->ntlmssp.ciphertext, CIFS_CPHTXT_SIZE);
560 sec_blob->SessionKey.BufferOffset = cpu_to_le32(tmp - pbuffer);
561 sec_blob->SessionKey.Length = cpu_to_le16(CIFS_CPHTXT_SIZE);
562 sec_blob->SessionKey.MaximumLength =
563 cpu_to_le16(CIFS_CPHTXT_SIZE);
564 tmp += CIFS_CPHTXT_SIZE;
565 } else {
566 sec_blob->SessionKey.BufferOffset = cpu_to_le32(tmp - pbuffer);
567 sec_blob->SessionKey.Length = 0;
568 sec_blob->SessionKey.MaximumLength = 0;
569 }
570
571 ses->server->sequence_number = 0;
572
573setup_ntlmv2_ret:
574 if (ses->server->tilen > 0)
575 kfree(ses->server->tiblob);
576
536 return tmp - pbuffer; 577 return tmp - pbuffer;
537} 578}
538 579
@@ -546,15 +587,14 @@ static void setup_ntlmssp_neg_req(SESSION_SETUP_ANDX *pSMB,
546 return; 587 return;
547} 588}
548 589
549static int setup_ntlmssp_auth_req(SESSION_SETUP_ANDX *pSMB, 590static int setup_ntlmssp_auth_req(char *ntlmsspblob,
550 struct cifsSesInfo *ses, 591 struct cifsSesInfo *ses,
551 const struct nls_table *nls, bool first_time) 592 const struct nls_table *nls, bool first_time)
552{ 593{
553 int bloblen; 594 int bloblen;
554 595
555 bloblen = build_ntlmssp_auth_blob(&pSMB->req.SecurityBlob[0], ses, nls, 596 bloblen = build_ntlmssp_auth_blob(ntlmsspblob, ses, nls,
556 first_time); 597 first_time);
557 pSMB->req.SecurityBlobLength = cpu_to_le16(bloblen);
558 598
559 return bloblen; 599 return bloblen;
560} 600}
@@ -690,7 +730,7 @@ ssetup_ntlmssp_authenticate:
690 730
691 if (first_time) /* should this be moved into common code 731 if (first_time) /* should this be moved into common code
692 with similar ntlmv2 path? */ 732 with similar ntlmv2 path? */
693 cifs_calculate_mac_key(&ses->server->mac_signing_key, 733 cifs_calculate_session_key(&ses->server->session_key,
694 ntlm_session_key, ses->password); 734 ntlm_session_key, ses->password);
695 /* copy session key */ 735 /* copy session key */
696 736
@@ -729,12 +769,21 @@ ssetup_ntlmssp_authenticate:
729 cpu_to_le16(sizeof(struct ntlmv2_resp)); 769 cpu_to_le16(sizeof(struct ntlmv2_resp));
730 770
731 /* calculate session key */ 771 /* calculate session key */
732 setup_ntlmv2_rsp(ses, v2_sess_key, nls_cp); 772 rc = setup_ntlmv2_rsp(ses, v2_sess_key, nls_cp);
773 if (rc) {
774 kfree(v2_sess_key);
775 goto ssetup_exit;
776 }
733 /* FIXME: calculate MAC key */ 777 /* FIXME: calculate MAC key */
734 memcpy(bcc_ptr, (char *)v2_sess_key, 778 memcpy(bcc_ptr, (char *)v2_sess_key,
735 sizeof(struct ntlmv2_resp)); 779 sizeof(struct ntlmv2_resp));
736 bcc_ptr += sizeof(struct ntlmv2_resp); 780 bcc_ptr += sizeof(struct ntlmv2_resp);
737 kfree(v2_sess_key); 781 kfree(v2_sess_key);
782 if (ses->server->tilen > 0) {
783 memcpy(bcc_ptr, ses->server->tiblob,
784 ses->server->tilen);
785 bcc_ptr += ses->server->tilen;
786 }
738 if (ses->capabilities & CAP_UNICODE) { 787 if (ses->capabilities & CAP_UNICODE) {
739 if (iov[0].iov_len % 2) { 788 if (iov[0].iov_len % 2) {
740 *bcc_ptr = 0; 789 *bcc_ptr = 0;
@@ -765,15 +814,15 @@ ssetup_ntlmssp_authenticate:
765 } 814 }
766 /* bail out if key is too long */ 815 /* bail out if key is too long */
767 if (msg->sesskey_len > 816 if (msg->sesskey_len >
768 sizeof(ses->server->mac_signing_key.data.krb5)) { 817 sizeof(ses->server->session_key.data.krb5)) {
769 cERROR(1, "Kerberos signing key too long (%u bytes)", 818 cERROR(1, "Kerberos signing key too long (%u bytes)",
770 msg->sesskey_len); 819 msg->sesskey_len);
771 rc = -EOVERFLOW; 820 rc = -EOVERFLOW;
772 goto ssetup_exit; 821 goto ssetup_exit;
773 } 822 }
774 if (first_time) { 823 if (first_time) {
775 ses->server->mac_signing_key.len = msg->sesskey_len; 824 ses->server->session_key.len = msg->sesskey_len;
776 memcpy(ses->server->mac_signing_key.data.krb5, 825 memcpy(ses->server->session_key.data.krb5,
777 msg->data, msg->sesskey_len); 826 msg->data, msg->sesskey_len);
778 } 827 }
779 pSMB->req.hdr.Flags2 |= SMBFLG2_EXT_SEC; 828 pSMB->req.hdr.Flags2 |= SMBFLG2_EXT_SEC;
@@ -815,12 +864,28 @@ ssetup_ntlmssp_authenticate:
815 if (phase == NtLmNegotiate) { 864 if (phase == NtLmNegotiate) {
816 setup_ntlmssp_neg_req(pSMB, ses); 865 setup_ntlmssp_neg_req(pSMB, ses);
817 iov[1].iov_len = sizeof(NEGOTIATE_MESSAGE); 866 iov[1].iov_len = sizeof(NEGOTIATE_MESSAGE);
867 iov[1].iov_base = &pSMB->req.SecurityBlob[0];
818 } else if (phase == NtLmAuthenticate) { 868 } else if (phase == NtLmAuthenticate) {
819 int blob_len; 869 int blob_len;
820 blob_len = setup_ntlmssp_auth_req(pSMB, ses, 870 char *ntlmsspblob;
821 nls_cp, 871
822 first_time); 872 ntlmsspblob = kmalloc(5 *
873 sizeof(struct _AUTHENTICATE_MESSAGE),
874 GFP_KERNEL);
875 if (!ntlmsspblob) {
876 cERROR(1, "Can't allocate NTLMSSP");
877 rc = -ENOMEM;
878 goto ssetup_exit;
879 }
880
881 blob_len = setup_ntlmssp_auth_req(ntlmsspblob,
882 ses,
883 nls_cp,
884 first_time);
823 iov[1].iov_len = blob_len; 885 iov[1].iov_len = blob_len;
886 iov[1].iov_base = ntlmsspblob;
887 pSMB->req.SecurityBlobLength =
888 cpu_to_le16(blob_len);
824 /* Make sure that we tell the server that we 889 /* Make sure that we tell the server that we
825 are using the uid that it just gave us back 890 are using the uid that it just gave us back
826 on the response (challenge) */ 891 on the response (challenge) */
@@ -830,7 +895,6 @@ ssetup_ntlmssp_authenticate:
830 rc = -ENOSYS; 895 rc = -ENOSYS;
831 goto ssetup_exit; 896 goto ssetup_exit;
832 } 897 }
833 iov[1].iov_base = &pSMB->req.SecurityBlob[0];
834 /* unicode strings must be word aligned */ 898 /* unicode strings must be word aligned */
835 if ((iov[0].iov_len + iov[1].iov_len) % 2) { 899 if ((iov[0].iov_len + iov[1].iov_len) % 2) {
836 *bcc_ptr = 0; 900 *bcc_ptr = 0;
diff --git a/fs/cifs/transport.c b/fs/cifs/transport.c
index 82f78c4d6978..e0588cdf4cc5 100644
--- a/fs/cifs/transport.c
+++ b/fs/cifs/transport.c
@@ -543,7 +543,7 @@ SendReceive2(const unsigned int xid, struct cifsSesInfo *ses,
543 (ses->server->secMode & (SECMODE_SIGN_REQUIRED | 543 (ses->server->secMode & (SECMODE_SIGN_REQUIRED |
544 SECMODE_SIGN_ENABLED))) { 544 SECMODE_SIGN_ENABLED))) {
545 rc = cifs_verify_signature(midQ->resp_buf, 545 rc = cifs_verify_signature(midQ->resp_buf,
546 &ses->server->mac_signing_key, 546 ses->server,
547 midQ->sequence_number+1); 547 midQ->sequence_number+1);
548 if (rc) { 548 if (rc) {
549 cERROR(1, "Unexpected SMB signature"); 549 cERROR(1, "Unexpected SMB signature");
@@ -731,7 +731,7 @@ SendReceive(const unsigned int xid, struct cifsSesInfo *ses,
731 (ses->server->secMode & (SECMODE_SIGN_REQUIRED | 731 (ses->server->secMode & (SECMODE_SIGN_REQUIRED |
732 SECMODE_SIGN_ENABLED))) { 732 SECMODE_SIGN_ENABLED))) {
733 rc = cifs_verify_signature(out_buf, 733 rc = cifs_verify_signature(out_buf,
734 &ses->server->mac_signing_key, 734 ses->server,
735 midQ->sequence_number+1); 735 midQ->sequence_number+1);
736 if (rc) { 736 if (rc) {
737 cERROR(1, "Unexpected SMB signature"); 737 cERROR(1, "Unexpected SMB signature");
@@ -981,7 +981,7 @@ SendReceiveBlockingLock(const unsigned int xid, struct cifsTconInfo *tcon,
981 (ses->server->secMode & (SECMODE_SIGN_REQUIRED | 981 (ses->server->secMode & (SECMODE_SIGN_REQUIRED |
982 SECMODE_SIGN_ENABLED))) { 982 SECMODE_SIGN_ENABLED))) {
983 rc = cifs_verify_signature(out_buf, 983 rc = cifs_verify_signature(out_buf,
984 &ses->server->mac_signing_key, 984 ses->server,
985 midQ->sequence_number+1); 985 midQ->sequence_number+1);
986 if (rc) { 986 if (rc) {
987 cERROR(1, "Unexpected SMB signature"); 987 cERROR(1, "Unexpected SMB signature");
diff --git a/fs/ecryptfs/crypto.c b/fs/ecryptfs/crypto.c
index a2e3b562e65d..cbadc1bee6e7 100644
--- a/fs/ecryptfs/crypto.c
+++ b/fs/ecryptfs/crypto.c
@@ -1793,7 +1793,7 @@ struct kmem_cache *ecryptfs_key_tfm_cache;
1793static struct list_head key_tfm_list; 1793static struct list_head key_tfm_list;
1794struct mutex key_tfm_list_mutex; 1794struct mutex key_tfm_list_mutex;
1795 1795
1796int ecryptfs_init_crypto(void) 1796int __init ecryptfs_init_crypto(void)
1797{ 1797{
1798 mutex_init(&key_tfm_list_mutex); 1798 mutex_init(&key_tfm_list_mutex);
1799 INIT_LIST_HEAD(&key_tfm_list); 1799 INIT_LIST_HEAD(&key_tfm_list);
@@ -2169,7 +2169,6 @@ int ecryptfs_encrypt_and_encode_filename(
2169 (ECRYPTFS_FNEK_ENCRYPTED_FILENAME_PREFIX_SIZE 2169 (ECRYPTFS_FNEK_ENCRYPTED_FILENAME_PREFIX_SIZE
2170 + encoded_name_no_prefix_size); 2170 + encoded_name_no_prefix_size);
2171 (*encoded_name)[(*encoded_name_size)] = '\0'; 2171 (*encoded_name)[(*encoded_name_size)] = '\0';
2172 (*encoded_name_size)++;
2173 } else { 2172 } else {
2174 rc = -EOPNOTSUPP; 2173 rc = -EOPNOTSUPP;
2175 } 2174 }
diff --git a/fs/ecryptfs/inode.c b/fs/ecryptfs/inode.c
index 6c55113e7222..3fbc94203380 100644
--- a/fs/ecryptfs/inode.c
+++ b/fs/ecryptfs/inode.c
@@ -349,7 +349,7 @@ out:
349 349
350/** 350/**
351 * ecryptfs_new_lower_dentry 351 * ecryptfs_new_lower_dentry
352 * @ename: The name of the new dentry. 352 * @name: The name of the new dentry.
353 * @lower_dir_dentry: Parent directory of the new dentry. 353 * @lower_dir_dentry: Parent directory of the new dentry.
354 * @nd: nameidata from last lookup. 354 * @nd: nameidata from last lookup.
355 * 355 *
@@ -386,20 +386,19 @@ ecryptfs_new_lower_dentry(struct qstr *name, struct dentry *lower_dir_dentry,
386 * ecryptfs_lookup_one_lower 386 * ecryptfs_lookup_one_lower
387 * @ecryptfs_dentry: The eCryptfs dentry that we are looking up 387 * @ecryptfs_dentry: The eCryptfs dentry that we are looking up
388 * @lower_dir_dentry: lower parent directory 388 * @lower_dir_dentry: lower parent directory
389 * @name: lower file name
389 * 390 *
390 * Get the lower dentry from vfs. If lower dentry does not exist yet, 391 * Get the lower dentry from vfs. If lower dentry does not exist yet,
391 * create it. 392 * create it.
392 */ 393 */
393static struct dentry * 394static struct dentry *
394ecryptfs_lookup_one_lower(struct dentry *ecryptfs_dentry, 395ecryptfs_lookup_one_lower(struct dentry *ecryptfs_dentry,
395 struct dentry *lower_dir_dentry) 396 struct dentry *lower_dir_dentry, struct qstr *name)
396{ 397{
397 struct nameidata nd; 398 struct nameidata nd;
398 struct vfsmount *lower_mnt; 399 struct vfsmount *lower_mnt;
399 struct qstr *name;
400 int err; 400 int err;
401 401
402 name = &ecryptfs_dentry->d_name;
403 lower_mnt = mntget(ecryptfs_dentry_to_lower_mnt( 402 lower_mnt = mntget(ecryptfs_dentry_to_lower_mnt(
404 ecryptfs_dentry->d_parent)); 403 ecryptfs_dentry->d_parent));
405 err = vfs_path_lookup(lower_dir_dentry, lower_mnt, name->name , 0, &nd); 404 err = vfs_path_lookup(lower_dir_dentry, lower_mnt, name->name , 0, &nd);
@@ -434,6 +433,7 @@ static struct dentry *ecryptfs_lookup(struct inode *ecryptfs_dir_inode,
434 size_t encrypted_and_encoded_name_size; 433 size_t encrypted_and_encoded_name_size;
435 struct ecryptfs_mount_crypt_stat *mount_crypt_stat = NULL; 434 struct ecryptfs_mount_crypt_stat *mount_crypt_stat = NULL;
436 struct dentry *lower_dir_dentry, *lower_dentry; 435 struct dentry *lower_dir_dentry, *lower_dentry;
436 struct qstr lower_name;
437 int rc = 0; 437 int rc = 0;
438 438
439 ecryptfs_dentry->d_op = &ecryptfs_dops; 439 ecryptfs_dentry->d_op = &ecryptfs_dops;
@@ -444,9 +444,17 @@ static struct dentry *ecryptfs_lookup(struct inode *ecryptfs_dir_inode,
444 goto out_d_drop; 444 goto out_d_drop;
445 } 445 }
446 lower_dir_dentry = ecryptfs_dentry_to_lower(ecryptfs_dentry->d_parent); 446 lower_dir_dentry = ecryptfs_dentry_to_lower(ecryptfs_dentry->d_parent);
447 447 lower_name.name = ecryptfs_dentry->d_name.name;
448 lower_name.len = ecryptfs_dentry->d_name.len;
449 lower_name.hash = ecryptfs_dentry->d_name.hash;
450 if (lower_dir_dentry->d_op && lower_dir_dentry->d_op->d_hash) {
451 rc = lower_dir_dentry->d_op->d_hash(lower_dir_dentry,
452 &lower_name);
453 if (rc < 0)
454 goto out_d_drop;
455 }
448 lower_dentry = ecryptfs_lookup_one_lower(ecryptfs_dentry, 456 lower_dentry = ecryptfs_lookup_one_lower(ecryptfs_dentry,
449 lower_dir_dentry); 457 lower_dir_dentry, &lower_name);
450 if (IS_ERR(lower_dentry)) { 458 if (IS_ERR(lower_dentry)) {
451 rc = PTR_ERR(lower_dentry); 459 rc = PTR_ERR(lower_dentry);
452 ecryptfs_printk(KERN_DEBUG, "%s: lookup_one_lower() returned " 460 ecryptfs_printk(KERN_DEBUG, "%s: lookup_one_lower() returned "
@@ -471,8 +479,17 @@ static struct dentry *ecryptfs_lookup(struct inode *ecryptfs_dir_inode,
471 "filename; rc = [%d]\n", __func__, rc); 479 "filename; rc = [%d]\n", __func__, rc);
472 goto out_d_drop; 480 goto out_d_drop;
473 } 481 }
482 lower_name.name = encrypted_and_encoded_name;
483 lower_name.len = encrypted_and_encoded_name_size;
484 lower_name.hash = full_name_hash(lower_name.name, lower_name.len);
485 if (lower_dir_dentry->d_op && lower_dir_dentry->d_op->d_hash) {
486 rc = lower_dir_dentry->d_op->d_hash(lower_dir_dentry,
487 &lower_name);
488 if (rc < 0)
489 goto out_d_drop;
490 }
474 lower_dentry = ecryptfs_lookup_one_lower(ecryptfs_dentry, 491 lower_dentry = ecryptfs_lookup_one_lower(ecryptfs_dentry,
475 lower_dir_dentry); 492 lower_dir_dentry, &lower_name);
476 if (IS_ERR(lower_dentry)) { 493 if (IS_ERR(lower_dentry)) {
477 rc = PTR_ERR(lower_dentry); 494 rc = PTR_ERR(lower_dentry);
478 ecryptfs_printk(KERN_DEBUG, "%s: lookup_one_lower() returned " 495 ecryptfs_printk(KERN_DEBUG, "%s: lookup_one_lower() returned "
diff --git a/fs/ecryptfs/keystore.c b/fs/ecryptfs/keystore.c
index 89c5476506ef..73811cfa2ea4 100644
--- a/fs/ecryptfs/keystore.c
+++ b/fs/ecryptfs/keystore.c
@@ -515,6 +515,7 @@ ecryptfs_write_tag_70_packet(char *dest, size_t *remaining_bytes,
515 if (!s) { 515 if (!s) {
516 printk(KERN_ERR "%s: Out of memory whilst trying to kmalloc " 516 printk(KERN_ERR "%s: Out of memory whilst trying to kmalloc "
517 "[%zd] bytes of kernel memory\n", __func__, sizeof(*s)); 517 "[%zd] bytes of kernel memory\n", __func__, sizeof(*s));
518 rc = -ENOMEM;
518 goto out; 519 goto out;
519 } 520 }
520 s->desc.flags = CRYPTO_TFM_REQ_MAY_SLEEP; 521 s->desc.flags = CRYPTO_TFM_REQ_MAY_SLEEP;
@@ -806,6 +807,7 @@ ecryptfs_parse_tag_70_packet(char **filename, size_t *filename_size,
806 if (!s) { 807 if (!s) {
807 printk(KERN_ERR "%s: Out of memory whilst trying to kmalloc " 808 printk(KERN_ERR "%s: Out of memory whilst trying to kmalloc "
808 "[%zd] bytes of kernel memory\n", __func__, sizeof(*s)); 809 "[%zd] bytes of kernel memory\n", __func__, sizeof(*s));
810 rc = -ENOMEM;
809 goto out; 811 goto out;
810 } 812 }
811 s->desc.flags = CRYPTO_TFM_REQ_MAY_SLEEP; 813 s->desc.flags = CRYPTO_TFM_REQ_MAY_SLEEP;
diff --git a/fs/ecryptfs/kthread.c b/fs/ecryptfs/kthread.c
index d8c3a373aafa..0851ab6980f5 100644
--- a/fs/ecryptfs/kthread.c
+++ b/fs/ecryptfs/kthread.c
@@ -86,7 +86,7 @@ out:
86 return 0; 86 return 0;
87} 87}
88 88
89int ecryptfs_init_kthread(void) 89int __init ecryptfs_init_kthread(void)
90{ 90{
91 int rc = 0; 91 int rc = 0;
92 92
diff --git a/fs/ecryptfs/messaging.c b/fs/ecryptfs/messaging.c
index bcb68c0cb1f0..ab2248090515 100644
--- a/fs/ecryptfs/messaging.c
+++ b/fs/ecryptfs/messaging.c
@@ -473,7 +473,7 @@ sleep:
473 return rc; 473 return rc;
474} 474}
475 475
476int ecryptfs_init_messaging(void) 476int __init ecryptfs_init_messaging(void)
477{ 477{
478 int i; 478 int i;
479 int rc = 0; 479 int rc = 0;
diff --git a/fs/ecryptfs/miscdev.c b/fs/ecryptfs/miscdev.c
index 3745f612bcd4..00208c3d7e92 100644
--- a/fs/ecryptfs/miscdev.c
+++ b/fs/ecryptfs/miscdev.c
@@ -500,7 +500,7 @@ static struct miscdevice ecryptfs_miscdev = {
500 * 500 *
501 * Returns zero on success; non-zero otherwise 501 * Returns zero on success; non-zero otherwise
502 */ 502 */
503int ecryptfs_init_ecryptfs_miscdev(void) 503int __init ecryptfs_init_ecryptfs_miscdev(void)
504{ 504{
505 int rc; 505 int rc;
506 506
diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c
index 2e7357104cfd..3dfef0623968 100644
--- a/fs/nfsd/nfs4state.c
+++ b/fs/nfsd/nfs4state.c
@@ -2450,14 +2450,13 @@ nfsd4_truncate(struct svc_rqst *rqstp, struct svc_fh *fh,
2450static __be32 2450static __be32
2451nfs4_upgrade_open(struct svc_rqst *rqstp, struct nfs4_file *fp, struct svc_fh *cur_fh, struct nfs4_stateid *stp, struct nfsd4_open *open) 2451nfs4_upgrade_open(struct svc_rqst *rqstp, struct nfs4_file *fp, struct svc_fh *cur_fh, struct nfs4_stateid *stp, struct nfsd4_open *open)
2452{ 2452{
2453 u32 op_share_access, new_access; 2453 u32 op_share_access = open->op_share_access & ~NFS4_SHARE_WANT_MASK;
2454 bool new_access;
2454 __be32 status; 2455 __be32 status;
2455 2456
2456 set_access(&new_access, stp->st_access_bmap); 2457 new_access = !test_bit(op_share_access, &stp->st_access_bmap);
2457 new_access = (~new_access) & open->op_share_access & ~NFS4_SHARE_WANT_MASK;
2458
2459 if (new_access) { 2458 if (new_access) {
2460 status = nfs4_get_vfs_file(rqstp, fp, cur_fh, new_access); 2459 status = nfs4_get_vfs_file(rqstp, fp, cur_fh, op_share_access);
2461 if (status) 2460 if (status)
2462 return status; 2461 return status;
2463 } 2462 }
@@ -2470,7 +2469,6 @@ nfs4_upgrade_open(struct svc_rqst *rqstp, struct nfs4_file *fp, struct svc_fh *c
2470 return status; 2469 return status;
2471 } 2470 }
2472 /* remember the open */ 2471 /* remember the open */
2473 op_share_access = open->op_share_access & ~NFS4_SHARE_WANT_MASK;
2474 __set_bit(op_share_access, &stp->st_access_bmap); 2472 __set_bit(op_share_access, &stp->st_access_bmap);
2475 __set_bit(open->op_share_deny, &stp->st_deny_bmap); 2473 __set_bit(open->op_share_deny, &stp->st_deny_bmap);
2476 2474
@@ -2983,7 +2981,6 @@ nfs4_preprocess_stateid_op(struct nfsd4_compound_state *cstate,
2983 *filpp = find_readable_file(stp->st_file); 2981 *filpp = find_readable_file(stp->st_file);
2984 else 2982 else
2985 *filpp = find_writeable_file(stp->st_file); 2983 *filpp = find_writeable_file(stp->st_file);
2986 BUG_ON(!*filpp); /* assured by check_openmode */
2987 } 2984 }
2988 } 2985 }
2989 status = nfs_ok; 2986 status = nfs_ok;
@@ -3561,7 +3558,8 @@ nfsd4_lock(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
3561 struct nfs4_stateowner *open_sop = NULL; 3558 struct nfs4_stateowner *open_sop = NULL;
3562 struct nfs4_stateowner *lock_sop = NULL; 3559 struct nfs4_stateowner *lock_sop = NULL;
3563 struct nfs4_stateid *lock_stp; 3560 struct nfs4_stateid *lock_stp;
3564 struct file *filp; 3561 struct nfs4_file *fp;
3562 struct file *filp = NULL;
3565 struct file_lock file_lock; 3563 struct file_lock file_lock;
3566 struct file_lock conflock; 3564 struct file_lock conflock;
3567 __be32 status = 0; 3565 __be32 status = 0;
@@ -3591,7 +3589,6 @@ nfsd4_lock(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
3591 * lock stateid. 3589 * lock stateid.
3592 */ 3590 */
3593 struct nfs4_stateid *open_stp = NULL; 3591 struct nfs4_stateid *open_stp = NULL;
3594 struct nfs4_file *fp;
3595 3592
3596 status = nfserr_stale_clientid; 3593 status = nfserr_stale_clientid;
3597 if (!nfsd4_has_session(cstate) && 3594 if (!nfsd4_has_session(cstate) &&
@@ -3634,6 +3631,7 @@ nfsd4_lock(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
3634 if (status) 3631 if (status)
3635 goto out; 3632 goto out;
3636 lock_sop = lock->lk_replay_owner; 3633 lock_sop = lock->lk_replay_owner;
3634 fp = lock_stp->st_file;
3637 } 3635 }
3638 /* lock->lk_replay_owner and lock_stp have been created or found */ 3636 /* lock->lk_replay_owner and lock_stp have been created or found */
3639 3637
@@ -3648,13 +3646,19 @@ nfsd4_lock(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
3648 switch (lock->lk_type) { 3646 switch (lock->lk_type) {
3649 case NFS4_READ_LT: 3647 case NFS4_READ_LT:
3650 case NFS4_READW_LT: 3648 case NFS4_READW_LT:
3651 filp = find_readable_file(lock_stp->st_file); 3649 if (find_readable_file(lock_stp->st_file)) {
3650 nfs4_get_vfs_file(rqstp, fp, &cstate->current_fh, NFS4_SHARE_ACCESS_READ);
3651 filp = find_readable_file(lock_stp->st_file);
3652 }
3652 file_lock.fl_type = F_RDLCK; 3653 file_lock.fl_type = F_RDLCK;
3653 cmd = F_SETLK; 3654 cmd = F_SETLK;
3654 break; 3655 break;
3655 case NFS4_WRITE_LT: 3656 case NFS4_WRITE_LT:
3656 case NFS4_WRITEW_LT: 3657 case NFS4_WRITEW_LT:
3657 filp = find_writeable_file(lock_stp->st_file); 3658 if (find_writeable_file(lock_stp->st_file)) {
3659 nfs4_get_vfs_file(rqstp, fp, &cstate->current_fh, NFS4_SHARE_ACCESS_WRITE);
3660 filp = find_writeable_file(lock_stp->st_file);
3661 }
3658 file_lock.fl_type = F_WRLCK; 3662 file_lock.fl_type = F_WRLCK;
3659 cmd = F_SETLK; 3663 cmd = F_SETLK;
3660 break; 3664 break;
diff --git a/fs/nfsd/state.h b/fs/nfsd/state.h
index 7731a75971dd..322518c88e4b 100644
--- a/fs/nfsd/state.h
+++ b/fs/nfsd/state.h
@@ -363,23 +363,23 @@ struct nfs4_file {
363 * at all? */ 363 * at all? */
364static inline struct file *find_writeable_file(struct nfs4_file *f) 364static inline struct file *find_writeable_file(struct nfs4_file *f)
365{ 365{
366 if (f->fi_fds[O_RDWR]) 366 if (f->fi_fds[O_WRONLY])
367 return f->fi_fds[O_RDWR]; 367 return f->fi_fds[O_WRONLY];
368 return f->fi_fds[O_WRONLY]; 368 return f->fi_fds[O_RDWR];
369} 369}
370 370
371static inline struct file *find_readable_file(struct nfs4_file *f) 371static inline struct file *find_readable_file(struct nfs4_file *f)
372{ 372{
373 if (f->fi_fds[O_RDWR]) 373 if (f->fi_fds[O_RDONLY])
374 return f->fi_fds[O_RDWR]; 374 return f->fi_fds[O_RDONLY];
375 return f->fi_fds[O_RDONLY]; 375 return f->fi_fds[O_RDWR];
376} 376}
377 377
378static inline struct file *find_any_file(struct nfs4_file *f) 378static inline struct file *find_any_file(struct nfs4_file *f)
379{ 379{
380 if (f->fi_fds[O_RDWR]) 380 if (f->fi_fds[O_RDWR])
381 return f->fi_fds[O_RDWR]; 381 return f->fi_fds[O_RDWR];
382 else if (f->fi_fds[O_RDWR]) 382 else if (f->fi_fds[O_WRONLY])
383 return f->fi_fds[O_WRONLY]; 383 return f->fi_fds[O_WRONLY];
384 else 384 else
385 return f->fi_fds[O_RDONLY]; 385 return f->fi_fds[O_RDONLY];
diff --git a/fs/nfsd/vfs.c b/fs/nfsd/vfs.c
index 96360a83cb91..661a6cf8e826 100644
--- a/fs/nfsd/vfs.c
+++ b/fs/nfsd/vfs.c
@@ -2033,15 +2033,17 @@ out:
2033__be32 2033__be32
2034nfsd_statfs(struct svc_rqst *rqstp, struct svc_fh *fhp, struct kstatfs *stat, int access) 2034nfsd_statfs(struct svc_rqst *rqstp, struct svc_fh *fhp, struct kstatfs *stat, int access)
2035{ 2035{
2036 struct path path = {
2037 .mnt = fhp->fh_export->ex_path.mnt,
2038 .dentry = fhp->fh_dentry,
2039 };
2040 __be32 err; 2036 __be32 err;
2041 2037
2042 err = fh_verify(rqstp, fhp, 0, NFSD_MAY_NOP | access); 2038 err = fh_verify(rqstp, fhp, 0, NFSD_MAY_NOP | access);
2043 if (!err && vfs_statfs(&path, stat)) 2039 if (!err) {
2044 err = nfserr_io; 2040 struct path path = {
2041 .mnt = fhp->fh_export->ex_path.mnt,
2042 .dentry = fhp->fh_dentry,
2043 };
2044 if (vfs_statfs(&path, stat))
2045 err = nfserr_io;
2046 }
2045 return err; 2047 return err;
2046} 2048}
2047 2049
diff --git a/fs/xfs/linux-2.6/xfs_aops.c b/fs/xfs/linux-2.6/xfs_aops.c
index 15412fe15c3a..b552f816de15 100644
--- a/fs/xfs/linux-2.6/xfs_aops.c
+++ b/fs/xfs/linux-2.6/xfs_aops.c
@@ -852,8 +852,8 @@ xfs_convert_page(
852 SetPageUptodate(page); 852 SetPageUptodate(page);
853 853
854 if (count) { 854 if (count) {
855 wbc->nr_to_write--; 855 if (--wbc->nr_to_write <= 0 &&
856 if (wbc->nr_to_write <= 0) 856 wbc->sync_mode == WB_SYNC_NONE)
857 done = 1; 857 done = 1;
858 } 858 }
859 xfs_start_page_writeback(page, !page_dirty, count); 859 xfs_start_page_writeback(page, !page_dirty, count);
@@ -1068,7 +1068,7 @@ xfs_vm_writepage(
1068 * by themselves. 1068 * by themselves.
1069 */ 1069 */
1070 if ((current->flags & (PF_MEMALLOC|PF_KSWAPD)) == PF_MEMALLOC) 1070 if ((current->flags & (PF_MEMALLOC|PF_KSWAPD)) == PF_MEMALLOC)
1071 goto out_fail; 1071 goto redirty;
1072 1072
1073 /* 1073 /*
1074 * We need a transaction if there are delalloc or unwritten buffers 1074 * We need a transaction if there are delalloc or unwritten buffers
@@ -1080,7 +1080,7 @@ xfs_vm_writepage(
1080 */ 1080 */
1081 xfs_count_page_state(page, &delalloc, &unwritten); 1081 xfs_count_page_state(page, &delalloc, &unwritten);
1082 if ((current->flags & PF_FSTRANS) && (delalloc || unwritten)) 1082 if ((current->flags & PF_FSTRANS) && (delalloc || unwritten))
1083 goto out_fail; 1083 goto redirty;
1084 1084
1085 /* Is this page beyond the end of the file? */ 1085 /* Is this page beyond the end of the file? */
1086 offset = i_size_read(inode); 1086 offset = i_size_read(inode);
@@ -1245,12 +1245,15 @@ error:
1245 if (iohead) 1245 if (iohead)
1246 xfs_cancel_ioend(iohead); 1246 xfs_cancel_ioend(iohead);
1247 1247
1248 if (err == -EAGAIN)
1249 goto redirty;
1250
1248 xfs_aops_discard_page(page); 1251 xfs_aops_discard_page(page);
1249 ClearPageUptodate(page); 1252 ClearPageUptodate(page);
1250 unlock_page(page); 1253 unlock_page(page);
1251 return err; 1254 return err;
1252 1255
1253out_fail: 1256redirty:
1254 redirty_page_for_writepage(wbc, page); 1257 redirty_page_for_writepage(wbc, page);
1255 unlock_page(page); 1258 unlock_page(page);
1256 return 0; 1259 return 0;
diff --git a/fs/xfs/linux-2.6/xfs_super.c b/fs/xfs/linux-2.6/xfs_super.c
index 15c35b62ff14..a4e07974955b 100644
--- a/fs/xfs/linux-2.6/xfs_super.c
+++ b/fs/xfs/linux-2.6/xfs_super.c
@@ -1226,6 +1226,7 @@ xfs_fs_statfs(
1226 struct xfs_inode *ip = XFS_I(dentry->d_inode); 1226 struct xfs_inode *ip = XFS_I(dentry->d_inode);
1227 __uint64_t fakeinos, id; 1227 __uint64_t fakeinos, id;
1228 xfs_extlen_t lsize; 1228 xfs_extlen_t lsize;
1229 __int64_t ffree;
1229 1230
1230 statp->f_type = XFS_SB_MAGIC; 1231 statp->f_type = XFS_SB_MAGIC;
1231 statp->f_namelen = MAXNAMELEN - 1; 1232 statp->f_namelen = MAXNAMELEN - 1;
@@ -1249,7 +1250,11 @@ xfs_fs_statfs(
1249 statp->f_files = min_t(typeof(statp->f_files), 1250 statp->f_files = min_t(typeof(statp->f_files),
1250 statp->f_files, 1251 statp->f_files,
1251 mp->m_maxicount); 1252 mp->m_maxicount);
1252 statp->f_ffree = statp->f_files - (sbp->sb_icount - sbp->sb_ifree); 1253
1254 /* make sure statp->f_ffree does not underflow */
1255 ffree = statp->f_files - (sbp->sb_icount - sbp->sb_ifree);
1256 statp->f_ffree = max_t(__int64_t, ffree, 0);
1257
1253 spin_unlock(&mp->m_sb_lock); 1258 spin_unlock(&mp->m_sb_lock);
1254 1259
1255 if ((ip->i_d.di_flags & XFS_DIFLAG_PROJINHERIT) || 1260 if ((ip->i_d.di_flags & XFS_DIFLAG_PROJINHERIT) ||
@@ -1402,7 +1407,7 @@ xfs_fs_freeze(
1402 1407
1403 xfs_save_resvblks(mp); 1408 xfs_save_resvblks(mp);
1404 xfs_quiesce_attr(mp); 1409 xfs_quiesce_attr(mp);
1405 return -xfs_fs_log_dummy(mp); 1410 return -xfs_fs_log_dummy(mp, SYNC_WAIT);
1406} 1411}
1407 1412
1408STATIC int 1413STATIC int
diff --git a/fs/xfs/linux-2.6/xfs_sync.c b/fs/xfs/linux-2.6/xfs_sync.c
index dfcbd98d1599..d59c4a65d492 100644
--- a/fs/xfs/linux-2.6/xfs_sync.c
+++ b/fs/xfs/linux-2.6/xfs_sync.c
@@ -34,6 +34,7 @@
34#include "xfs_inode_item.h" 34#include "xfs_inode_item.h"
35#include "xfs_quota.h" 35#include "xfs_quota.h"
36#include "xfs_trace.h" 36#include "xfs_trace.h"
37#include "xfs_fsops.h"
37 38
38#include <linux/kthread.h> 39#include <linux/kthread.h>
39#include <linux/freezer.h> 40#include <linux/freezer.h>
@@ -341,38 +342,6 @@ xfs_sync_attr(
341} 342}
342 343
343STATIC int 344STATIC int
344xfs_commit_dummy_trans(
345 struct xfs_mount *mp,
346 uint flags)
347{
348 struct xfs_inode *ip = mp->m_rootip;
349 struct xfs_trans *tp;
350 int error;
351
352 /*
353 * Put a dummy transaction in the log to tell recovery
354 * that all others are OK.
355 */
356 tp = xfs_trans_alloc(mp, XFS_TRANS_DUMMY1);
357 error = xfs_trans_reserve(tp, 0, XFS_ICHANGE_LOG_RES(mp), 0, 0, 0);
358 if (error) {
359 xfs_trans_cancel(tp, 0);
360 return error;
361 }
362
363 xfs_ilock(ip, XFS_ILOCK_EXCL);
364
365 xfs_trans_ijoin(tp, ip);
366 xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
367 error = xfs_trans_commit(tp, 0);
368 xfs_iunlock(ip, XFS_ILOCK_EXCL);
369
370 /* the log force ensures this transaction is pushed to disk */
371 xfs_log_force(mp, (flags & SYNC_WAIT) ? XFS_LOG_SYNC : 0);
372 return error;
373}
374
375STATIC int
376xfs_sync_fsdata( 345xfs_sync_fsdata(
377 struct xfs_mount *mp) 346 struct xfs_mount *mp)
378{ 347{
@@ -432,7 +401,7 @@ xfs_quiesce_data(
432 401
433 /* mark the log as covered if needed */ 402 /* mark the log as covered if needed */
434 if (xfs_log_need_covered(mp)) 403 if (xfs_log_need_covered(mp))
435 error2 = xfs_commit_dummy_trans(mp, SYNC_WAIT); 404 error2 = xfs_fs_log_dummy(mp, SYNC_WAIT);
436 405
437 /* flush data-only devices */ 406 /* flush data-only devices */
438 if (mp->m_rtdev_targp) 407 if (mp->m_rtdev_targp)
@@ -563,7 +532,7 @@ xfs_flush_inodes(
563/* 532/*
564 * Every sync period we need to unpin all items, reclaim inodes and sync 533 * Every sync period we need to unpin all items, reclaim inodes and sync
565 * disk quotas. We might need to cover the log to indicate that the 534 * disk quotas. We might need to cover the log to indicate that the
566 * filesystem is idle. 535 * filesystem is idle and not frozen.
567 */ 536 */
568STATIC void 537STATIC void
569xfs_sync_worker( 538xfs_sync_worker(
@@ -577,8 +546,9 @@ xfs_sync_worker(
577 xfs_reclaim_inodes(mp, 0); 546 xfs_reclaim_inodes(mp, 0);
578 /* dgc: errors ignored here */ 547 /* dgc: errors ignored here */
579 error = xfs_qm_sync(mp, SYNC_TRYLOCK); 548 error = xfs_qm_sync(mp, SYNC_TRYLOCK);
580 if (xfs_log_need_covered(mp)) 549 if (mp->m_super->s_frozen == SB_UNFROZEN &&
581 error = xfs_commit_dummy_trans(mp, 0); 550 xfs_log_need_covered(mp))
551 error = xfs_fs_log_dummy(mp, 0);
582 } 552 }
583 mp->m_sync_seq++; 553 mp->m_sync_seq++;
584 wake_up(&mp->m_wait_single_sync_task); 554 wake_up(&mp->m_wait_single_sync_task);
diff --git a/fs/xfs/xfs_fsops.c b/fs/xfs/xfs_fsops.c
index dbca5f5c37ba..43b1d5699335 100644
--- a/fs/xfs/xfs_fsops.c
+++ b/fs/xfs/xfs_fsops.c
@@ -604,31 +604,36 @@ out:
604 return 0; 604 return 0;
605} 605}
606 606
607/*
608 * Dump a transaction into the log that contains no real change. This is needed
609 * to be able to make the log dirty or stamp the current tail LSN into the log
610 * during the covering operation.
611 *
612 * We cannot use an inode here for this - that will push dirty state back up
613 * into the VFS and then periodic inode flushing will prevent log covering from
614 * making progress. Hence we log a field in the superblock instead.
615 */
607int 616int
608xfs_fs_log_dummy( 617xfs_fs_log_dummy(
609 xfs_mount_t *mp) 618 xfs_mount_t *mp,
619 int flags)
610{ 620{
611 xfs_trans_t *tp; 621 xfs_trans_t *tp;
612 xfs_inode_t *ip;
613 int error; 622 int error;
614 623
615 tp = _xfs_trans_alloc(mp, XFS_TRANS_DUMMY1, KM_SLEEP); 624 tp = _xfs_trans_alloc(mp, XFS_TRANS_DUMMY1, KM_SLEEP);
616 error = xfs_trans_reserve(tp, 0, XFS_ICHANGE_LOG_RES(mp), 0, 0, 0); 625 error = xfs_trans_reserve(tp, 0, mp->m_sb.sb_sectsize + 128, 0, 0,
626 XFS_DEFAULT_LOG_COUNT);
617 if (error) { 627 if (error) {
618 xfs_trans_cancel(tp, 0); 628 xfs_trans_cancel(tp, 0);
619 return error; 629 return error;
620 } 630 }
621 631
622 ip = mp->m_rootip; 632 /* log the UUID because it is an unchanging field */
623 xfs_ilock(ip, XFS_ILOCK_EXCL); 633 xfs_mod_sb(tp, XFS_SB_UUID);
624 634 if (flags & SYNC_WAIT)
625 xfs_trans_ijoin(tp, ip); 635 xfs_trans_set_sync(tp);
626 xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE); 636 return xfs_trans_commit(tp, 0);
627 xfs_trans_set_sync(tp);
628 error = xfs_trans_commit(tp, 0);
629
630 xfs_iunlock(ip, XFS_ILOCK_EXCL);
631 return error;
632} 637}
633 638
634int 639int
diff --git a/fs/xfs/xfs_fsops.h b/fs/xfs/xfs_fsops.h
index 88435e0a77c9..a786c5212c1e 100644
--- a/fs/xfs/xfs_fsops.h
+++ b/fs/xfs/xfs_fsops.h
@@ -25,6 +25,6 @@ extern int xfs_fs_counts(xfs_mount_t *mp, xfs_fsop_counts_t *cnt);
25extern int xfs_reserve_blocks(xfs_mount_t *mp, __uint64_t *inval, 25extern int xfs_reserve_blocks(xfs_mount_t *mp, __uint64_t *inval,
26 xfs_fsop_resblks_t *outval); 26 xfs_fsop_resblks_t *outval);
27extern int xfs_fs_goingdown(xfs_mount_t *mp, __uint32_t inflags); 27extern int xfs_fs_goingdown(xfs_mount_t *mp, __uint32_t inflags);
28extern int xfs_fs_log_dummy(xfs_mount_t *mp); 28extern int xfs_fs_log_dummy(xfs_mount_t *mp, int flags);
29 29
30#endif /* __XFS_FSOPS_H__ */ 30#endif /* __XFS_FSOPS_H__ */
diff --git a/fs/xfs/xfs_ialloc.c b/fs/xfs/xfs_ialloc.c
index abf80ae1e95b..5371d2dc360e 100644
--- a/fs/xfs/xfs_ialloc.c
+++ b/fs/xfs/xfs_ialloc.c
@@ -1213,7 +1213,6 @@ xfs_imap_lookup(
1213 struct xfs_inobt_rec_incore rec; 1213 struct xfs_inobt_rec_incore rec;
1214 struct xfs_btree_cur *cur; 1214 struct xfs_btree_cur *cur;
1215 struct xfs_buf *agbp; 1215 struct xfs_buf *agbp;
1216 xfs_agino_t startino;
1217 int error; 1216 int error;
1218 int i; 1217 int i;
1219 1218
@@ -1227,13 +1226,13 @@ xfs_imap_lookup(
1227 } 1226 }
1228 1227
1229 /* 1228 /*
1230 * derive and lookup the exact inode record for the given agino. If the 1229 * Lookup the inode record for the given agino. If the record cannot be
1231 * record cannot be found, then it's an invalid inode number and we 1230 * found, then it's an invalid inode number and we should abort. Once
1232 * should abort. 1231 * we have a record, we need to ensure it contains the inode number
1232 * we are looking up.
1233 */ 1233 */
1234 cur = xfs_inobt_init_cursor(mp, tp, agbp, agno); 1234 cur = xfs_inobt_init_cursor(mp, tp, agbp, agno);
1235 startino = agino & ~(XFS_IALLOC_INODES(mp) - 1); 1235 error = xfs_inobt_lookup(cur, agino, XFS_LOOKUP_LE, &i);
1236 error = xfs_inobt_lookup(cur, startino, XFS_LOOKUP_EQ, &i);
1237 if (!error) { 1236 if (!error) {
1238 if (i) 1237 if (i)
1239 error = xfs_inobt_get_rec(cur, &rec, &i); 1238 error = xfs_inobt_get_rec(cur, &rec, &i);
@@ -1246,6 +1245,11 @@ xfs_imap_lookup(
1246 if (error) 1245 if (error)
1247 return error; 1246 return error;
1248 1247
1248 /* check that the returned record contains the required inode */
1249 if (rec.ir_startino > agino ||
1250 rec.ir_startino + XFS_IALLOC_INODES(mp) <= agino)
1251 return EINVAL;
1252
1249 /* for untrusted inodes check it is allocated first */ 1253 /* for untrusted inodes check it is allocated first */
1250 if ((flags & XFS_IGET_UNTRUSTED) && 1254 if ((flags & XFS_IGET_UNTRUSTED) &&
1251 (rec.ir_free & XFS_INOBT_MASK(agino - rec.ir_startino))) 1255 (rec.ir_free & XFS_INOBT_MASK(agino - rec.ir_startino)))
diff --git a/fs/xfs/xfs_inode.c b/fs/xfs/xfs_inode.c
index 68415cb4f23c..34798f391c49 100644
--- a/fs/xfs/xfs_inode.c
+++ b/fs/xfs/xfs_inode.c
@@ -1914,6 +1914,11 @@ xfs_iunlink_remove(
1914 return 0; 1914 return 0;
1915} 1915}
1916 1916
1917/*
1918 * A big issue when freeing the inode cluster is is that we _cannot_ skip any
1919 * inodes that are in memory - they all must be marked stale and attached to
1920 * the cluster buffer.
1921 */
1917STATIC void 1922STATIC void
1918xfs_ifree_cluster( 1923xfs_ifree_cluster(
1919 xfs_inode_t *free_ip, 1924 xfs_inode_t *free_ip,
@@ -1945,8 +1950,6 @@ xfs_ifree_cluster(
1945 } 1950 }
1946 1951
1947 for (j = 0; j < nbufs; j++, inum += ninodes) { 1952 for (j = 0; j < nbufs; j++, inum += ninodes) {
1948 int found = 0;
1949
1950 blkno = XFS_AGB_TO_DADDR(mp, XFS_INO_TO_AGNO(mp, inum), 1953 blkno = XFS_AGB_TO_DADDR(mp, XFS_INO_TO_AGNO(mp, inum),
1951 XFS_INO_TO_AGBNO(mp, inum)); 1954 XFS_INO_TO_AGBNO(mp, inum));
1952 1955
@@ -1965,7 +1968,9 @@ xfs_ifree_cluster(
1965 /* 1968 /*
1966 * Walk the inodes already attached to the buffer and mark them 1969 * Walk the inodes already attached to the buffer and mark them
1967 * stale. These will all have the flush locks held, so an 1970 * stale. These will all have the flush locks held, so an
1968 * in-memory inode walk can't lock them. 1971 * in-memory inode walk can't lock them. By marking them all
1972 * stale first, we will not attempt to lock them in the loop
1973 * below as the XFS_ISTALE flag will be set.
1969 */ 1974 */
1970 lip = XFS_BUF_FSPRIVATE(bp, xfs_log_item_t *); 1975 lip = XFS_BUF_FSPRIVATE(bp, xfs_log_item_t *);
1971 while (lip) { 1976 while (lip) {
@@ -1977,11 +1982,11 @@ xfs_ifree_cluster(
1977 &iip->ili_flush_lsn, 1982 &iip->ili_flush_lsn,
1978 &iip->ili_item.li_lsn); 1983 &iip->ili_item.li_lsn);
1979 xfs_iflags_set(iip->ili_inode, XFS_ISTALE); 1984 xfs_iflags_set(iip->ili_inode, XFS_ISTALE);
1980 found++;
1981 } 1985 }
1982 lip = lip->li_bio_list; 1986 lip = lip->li_bio_list;
1983 } 1987 }
1984 1988
1989
1985 /* 1990 /*
1986 * For each inode in memory attempt to add it to the inode 1991 * For each inode in memory attempt to add it to the inode
1987 * buffer and set it up for being staled on buffer IO 1992 * buffer and set it up for being staled on buffer IO
@@ -1993,6 +1998,7 @@ xfs_ifree_cluster(
1993 * even trying to lock them. 1998 * even trying to lock them.
1994 */ 1999 */
1995 for (i = 0; i < ninodes; i++) { 2000 for (i = 0; i < ninodes; i++) {
2001retry:
1996 read_lock(&pag->pag_ici_lock); 2002 read_lock(&pag->pag_ici_lock);
1997 ip = radix_tree_lookup(&pag->pag_ici_root, 2003 ip = radix_tree_lookup(&pag->pag_ici_root,
1998 XFS_INO_TO_AGINO(mp, (inum + i))); 2004 XFS_INO_TO_AGINO(mp, (inum + i)));
@@ -2003,38 +2009,36 @@ xfs_ifree_cluster(
2003 continue; 2009 continue;
2004 } 2010 }
2005 2011
2006 /* don't try to lock/unlock the current inode */ 2012 /*
2013 * Don't try to lock/unlock the current inode, but we
2014 * _cannot_ skip the other inodes that we did not find
2015 * in the list attached to the buffer and are not
2016 * already marked stale. If we can't lock it, back off
2017 * and retry.
2018 */
2007 if (ip != free_ip && 2019 if (ip != free_ip &&
2008 !xfs_ilock_nowait(ip, XFS_ILOCK_EXCL)) { 2020 !xfs_ilock_nowait(ip, XFS_ILOCK_EXCL)) {
2009 read_unlock(&pag->pag_ici_lock); 2021 read_unlock(&pag->pag_ici_lock);
2010 continue; 2022 delay(1);
2023 goto retry;
2011 } 2024 }
2012 read_unlock(&pag->pag_ici_lock); 2025 read_unlock(&pag->pag_ici_lock);
2013 2026
2014 if (!xfs_iflock_nowait(ip)) { 2027 xfs_iflock(ip);
2015 if (ip != free_ip)
2016 xfs_iunlock(ip, XFS_ILOCK_EXCL);
2017 continue;
2018 }
2019
2020 xfs_iflags_set(ip, XFS_ISTALE); 2028 xfs_iflags_set(ip, XFS_ISTALE);
2021 if (xfs_inode_clean(ip)) {
2022 ASSERT(ip != free_ip);
2023 xfs_ifunlock(ip);
2024 xfs_iunlock(ip, XFS_ILOCK_EXCL);
2025 continue;
2026 }
2027 2029
2030 /*
2031 * we don't need to attach clean inodes or those only
2032 * with unlogged changes (which we throw away, anyway).
2033 */
2028 iip = ip->i_itemp; 2034 iip = ip->i_itemp;
2029 if (!iip) { 2035 if (!iip || xfs_inode_clean(ip)) {
2030 /* inode with unlogged changes only */
2031 ASSERT(ip != free_ip); 2036 ASSERT(ip != free_ip);
2032 ip->i_update_core = 0; 2037 ip->i_update_core = 0;
2033 xfs_ifunlock(ip); 2038 xfs_ifunlock(ip);
2034 xfs_iunlock(ip, XFS_ILOCK_EXCL); 2039 xfs_iunlock(ip, XFS_ILOCK_EXCL);
2035 continue; 2040 continue;
2036 } 2041 }
2037 found++;
2038 2042
2039 iip->ili_last_fields = iip->ili_format.ilf_fields; 2043 iip->ili_last_fields = iip->ili_format.ilf_fields;
2040 iip->ili_format.ilf_fields = 0; 2044 iip->ili_format.ilf_fields = 0;
@@ -2049,8 +2053,7 @@ xfs_ifree_cluster(
2049 xfs_iunlock(ip, XFS_ILOCK_EXCL); 2053 xfs_iunlock(ip, XFS_ILOCK_EXCL);
2050 } 2054 }
2051 2055
2052 if (found) 2056 xfs_trans_stale_inode_buf(tp, bp);
2053 xfs_trans_stale_inode_buf(tp, bp);
2054 xfs_trans_binval(tp, bp); 2057 xfs_trans_binval(tp, bp);
2055 } 2058 }
2056 2059
diff --git a/fs/xfs/xfs_log.c b/fs/xfs/xfs_log.c
index 925d572bf0f4..33f718f92a48 100644
--- a/fs/xfs/xfs_log.c
+++ b/fs/xfs/xfs_log.c
@@ -3015,7 +3015,8 @@ _xfs_log_force(
3015 3015
3016 XFS_STATS_INC(xs_log_force); 3016 XFS_STATS_INC(xs_log_force);
3017 3017
3018 xlog_cil_push(log, 1); 3018 if (log->l_cilp)
3019 xlog_cil_force(log);
3019 3020
3020 spin_lock(&log->l_icloglock); 3021 spin_lock(&log->l_icloglock);
3021 3022
@@ -3167,7 +3168,7 @@ _xfs_log_force_lsn(
3167 XFS_STATS_INC(xs_log_force); 3168 XFS_STATS_INC(xs_log_force);
3168 3169
3169 if (log->l_cilp) { 3170 if (log->l_cilp) {
3170 lsn = xlog_cil_push_lsn(log, lsn); 3171 lsn = xlog_cil_force_lsn(log, lsn);
3171 if (lsn == NULLCOMMITLSN) 3172 if (lsn == NULLCOMMITLSN)
3172 return 0; 3173 return 0;
3173 } 3174 }
@@ -3724,7 +3725,7 @@ xfs_log_force_umount(
3724 * call below. 3725 * call below.
3725 */ 3726 */
3726 if (!logerror && (mp->m_flags & XFS_MOUNT_DELAYLOG)) 3727 if (!logerror && (mp->m_flags & XFS_MOUNT_DELAYLOG))
3727 xlog_cil_push(log, 1); 3728 xlog_cil_force(log);
3728 3729
3729 /* 3730 /*
3730 * We must hold both the GRANT lock and the LOG lock, 3731 * We must hold both the GRANT lock and the LOG lock,
diff --git a/fs/xfs/xfs_log_cil.c b/fs/xfs/xfs_log_cil.c
index 31e4ea2d19ac..ed575fb4b495 100644
--- a/fs/xfs/xfs_log_cil.c
+++ b/fs/xfs/xfs_log_cil.c
@@ -68,6 +68,7 @@ xlog_cil_init(
68 ctx->sequence = 1; 68 ctx->sequence = 1;
69 ctx->cil = cil; 69 ctx->cil = cil;
70 cil->xc_ctx = ctx; 70 cil->xc_ctx = ctx;
71 cil->xc_current_sequence = ctx->sequence;
71 72
72 cil->xc_log = log; 73 cil->xc_log = log;
73 log->l_cilp = cil; 74 log->l_cilp = cil;
@@ -269,15 +270,10 @@ xlog_cil_insert(
269static void 270static void
270xlog_cil_format_items( 271xlog_cil_format_items(
271 struct log *log, 272 struct log *log,
272 struct xfs_log_vec *log_vector, 273 struct xfs_log_vec *log_vector)
273 struct xlog_ticket *ticket,
274 xfs_lsn_t *start_lsn)
275{ 274{
276 struct xfs_log_vec *lv; 275 struct xfs_log_vec *lv;
277 276
278 if (start_lsn)
279 *start_lsn = log->l_cilp->xc_ctx->sequence;
280
281 ASSERT(log_vector); 277 ASSERT(log_vector);
282 for (lv = log_vector; lv; lv = lv->lv_next) { 278 for (lv = log_vector; lv; lv = lv->lv_next) {
283 void *ptr; 279 void *ptr;
@@ -301,9 +297,24 @@ xlog_cil_format_items(
301 ptr += vec->i_len; 297 ptr += vec->i_len;
302 } 298 }
303 ASSERT(ptr == lv->lv_buf + lv->lv_buf_len); 299 ASSERT(ptr == lv->lv_buf + lv->lv_buf_len);
300 }
301}
302
303static void
304xlog_cil_insert_items(
305 struct log *log,
306 struct xfs_log_vec *log_vector,
307 struct xlog_ticket *ticket,
308 xfs_lsn_t *start_lsn)
309{
310 struct xfs_log_vec *lv;
311
312 if (start_lsn)
313 *start_lsn = log->l_cilp->xc_ctx->sequence;
304 314
315 ASSERT(log_vector);
316 for (lv = log_vector; lv; lv = lv->lv_next)
305 xlog_cil_insert(log, ticket, lv->lv_item, lv); 317 xlog_cil_insert(log, ticket, lv->lv_item, lv);
306 }
307} 318}
308 319
309static void 320static void
@@ -321,80 +332,6 @@ xlog_cil_free_logvec(
321} 332}
322 333
323/* 334/*
324 * Commit a transaction with the given vector to the Committed Item List.
325 *
326 * To do this, we need to format the item, pin it in memory if required and
327 * account for the space used by the transaction. Once we have done that we
328 * need to release the unused reservation for the transaction, attach the
329 * transaction to the checkpoint context so we carry the busy extents through
330 * to checkpoint completion, and then unlock all the items in the transaction.
331 *
332 * For more specific information about the order of operations in
333 * xfs_log_commit_cil() please refer to the comments in
334 * xfs_trans_commit_iclog().
335 *
336 * Called with the context lock already held in read mode to lock out
337 * background commit, returns without it held once background commits are
338 * allowed again.
339 */
340int
341xfs_log_commit_cil(
342 struct xfs_mount *mp,
343 struct xfs_trans *tp,
344 struct xfs_log_vec *log_vector,
345 xfs_lsn_t *commit_lsn,
346 int flags)
347{
348 struct log *log = mp->m_log;
349 int log_flags = 0;
350 int push = 0;
351
352 if (flags & XFS_TRANS_RELEASE_LOG_RES)
353 log_flags = XFS_LOG_REL_PERM_RESERV;
354
355 if (XLOG_FORCED_SHUTDOWN(log)) {
356 xlog_cil_free_logvec(log_vector);
357 return XFS_ERROR(EIO);
358 }
359
360 /* lock out background commit */
361 down_read(&log->l_cilp->xc_ctx_lock);
362 xlog_cil_format_items(log, log_vector, tp->t_ticket, commit_lsn);
363
364 /* check we didn't blow the reservation */
365 if (tp->t_ticket->t_curr_res < 0)
366 xlog_print_tic_res(log->l_mp, tp->t_ticket);
367
368 /* attach the transaction to the CIL if it has any busy extents */
369 if (!list_empty(&tp->t_busy)) {
370 spin_lock(&log->l_cilp->xc_cil_lock);
371 list_splice_init(&tp->t_busy,
372 &log->l_cilp->xc_ctx->busy_extents);
373 spin_unlock(&log->l_cilp->xc_cil_lock);
374 }
375
376 tp->t_commit_lsn = *commit_lsn;
377 xfs_log_done(mp, tp->t_ticket, NULL, log_flags);
378 xfs_trans_unreserve_and_mod_sb(tp);
379
380 /* check for background commit before unlock */
381 if (log->l_cilp->xc_ctx->space_used > XLOG_CIL_SPACE_LIMIT(log))
382 push = 1;
383 up_read(&log->l_cilp->xc_ctx_lock);
384
385 /*
386 * We need to push CIL every so often so we don't cache more than we
387 * can fit in the log. The limit really is that a checkpoint can't be
388 * more than half the log (the current checkpoint is not allowed to
389 * overwrite the previous checkpoint), but commit latency and memory
390 * usage limit this to a smaller size in most cases.
391 */
392 if (push)
393 xlog_cil_push(log, 0);
394 return 0;
395}
396
397/*
398 * Mark all items committed and clear busy extents. We free the log vector 335 * Mark all items committed and clear busy extents. We free the log vector
399 * chains in a separate pass so that we unpin the log items as quickly as 336 * chains in a separate pass so that we unpin the log items as quickly as
400 * possible. 337 * possible.
@@ -427,13 +364,23 @@ xlog_cil_committed(
427} 364}
428 365
429/* 366/*
430 * Push the Committed Item List to the log. If the push_now flag is not set, 367 * Push the Committed Item List to the log. If @push_seq flag is zero, then it
431 * then it is a background flush and so we can chose to ignore it. 368 * is a background flush and so we can chose to ignore it. Otherwise, if the
369 * current sequence is the same as @push_seq we need to do a flush. If
370 * @push_seq is less than the current sequence, then it has already been
371 * flushed and we don't need to do anything - the caller will wait for it to
372 * complete if necessary.
373 *
374 * @push_seq is a value rather than a flag because that allows us to do an
375 * unlocked check of the sequence number for a match. Hence we can allows log
376 * forces to run racily and not issue pushes for the same sequence twice. If we
377 * get a race between multiple pushes for the same sequence they will block on
378 * the first one and then abort, hence avoiding needless pushes.
432 */ 379 */
433int 380STATIC int
434xlog_cil_push( 381xlog_cil_push(
435 struct log *log, 382 struct log *log,
436 int push_now) 383 xfs_lsn_t push_seq)
437{ 384{
438 struct xfs_cil *cil = log->l_cilp; 385 struct xfs_cil *cil = log->l_cilp;
439 struct xfs_log_vec *lv; 386 struct xfs_log_vec *lv;
@@ -453,12 +400,14 @@ xlog_cil_push(
453 if (!cil) 400 if (!cil)
454 return 0; 401 return 0;
455 402
403 ASSERT(!push_seq || push_seq <= cil->xc_ctx->sequence);
404
456 new_ctx = kmem_zalloc(sizeof(*new_ctx), KM_SLEEP|KM_NOFS); 405 new_ctx = kmem_zalloc(sizeof(*new_ctx), KM_SLEEP|KM_NOFS);
457 new_ctx->ticket = xlog_cil_ticket_alloc(log); 406 new_ctx->ticket = xlog_cil_ticket_alloc(log);
458 407
459 /* lock out transaction commit, but don't block on background push */ 408 /* lock out transaction commit, but don't block on background push */
460 if (!down_write_trylock(&cil->xc_ctx_lock)) { 409 if (!down_write_trylock(&cil->xc_ctx_lock)) {
461 if (!push_now) 410 if (!push_seq)
462 goto out_free_ticket; 411 goto out_free_ticket;
463 down_write(&cil->xc_ctx_lock); 412 down_write(&cil->xc_ctx_lock);
464 } 413 }
@@ -469,7 +418,11 @@ xlog_cil_push(
469 goto out_skip; 418 goto out_skip;
470 419
471 /* check for spurious background flush */ 420 /* check for spurious background flush */
472 if (!push_now && cil->xc_ctx->space_used < XLOG_CIL_SPACE_LIMIT(log)) 421 if (!push_seq && cil->xc_ctx->space_used < XLOG_CIL_SPACE_LIMIT(log))
422 goto out_skip;
423
424 /* check for a previously pushed seqeunce */
425 if (push_seq < cil->xc_ctx->sequence)
473 goto out_skip; 426 goto out_skip;
474 427
475 /* 428 /*
@@ -515,6 +468,13 @@ xlog_cil_push(
515 cil->xc_ctx = new_ctx; 468 cil->xc_ctx = new_ctx;
516 469
517 /* 470 /*
471 * mirror the new sequence into the cil structure so that we can do
472 * unlocked checks against the current sequence in log forces without
473 * risking deferencing a freed context pointer.
474 */
475 cil->xc_current_sequence = new_ctx->sequence;
476
477 /*
518 * The switch is now done, so we can drop the context lock and move out 478 * The switch is now done, so we can drop the context lock and move out
519 * of a shared context. We can't just go straight to the commit record, 479 * of a shared context. We can't just go straight to the commit record,
520 * though - we need to synchronise with previous and future commits so 480 * though - we need to synchronise with previous and future commits so
@@ -626,6 +586,102 @@ out_abort:
626} 586}
627 587
628/* 588/*
589 * Commit a transaction with the given vector to the Committed Item List.
590 *
591 * To do this, we need to format the item, pin it in memory if required and
592 * account for the space used by the transaction. Once we have done that we
593 * need to release the unused reservation for the transaction, attach the
594 * transaction to the checkpoint context so we carry the busy extents through
595 * to checkpoint completion, and then unlock all the items in the transaction.
596 *
597 * For more specific information about the order of operations in
598 * xfs_log_commit_cil() please refer to the comments in
599 * xfs_trans_commit_iclog().
600 *
601 * Called with the context lock already held in read mode to lock out
602 * background commit, returns without it held once background commits are
603 * allowed again.
604 */
605int
606xfs_log_commit_cil(
607 struct xfs_mount *mp,
608 struct xfs_trans *tp,
609 struct xfs_log_vec *log_vector,
610 xfs_lsn_t *commit_lsn,
611 int flags)
612{
613 struct log *log = mp->m_log;
614 int log_flags = 0;
615 int push = 0;
616
617 if (flags & XFS_TRANS_RELEASE_LOG_RES)
618 log_flags = XFS_LOG_REL_PERM_RESERV;
619
620 if (XLOG_FORCED_SHUTDOWN(log)) {
621 xlog_cil_free_logvec(log_vector);
622 return XFS_ERROR(EIO);
623 }
624
625 /*
626 * do all the hard work of formatting items (including memory
627 * allocation) outside the CIL context lock. This prevents stalling CIL
628 * pushes when we are low on memory and a transaction commit spends a
629 * lot of time in memory reclaim.
630 */
631 xlog_cil_format_items(log, log_vector);
632
633 /* lock out background commit */
634 down_read(&log->l_cilp->xc_ctx_lock);
635 xlog_cil_insert_items(log, log_vector, tp->t_ticket, commit_lsn);
636
637 /* check we didn't blow the reservation */
638 if (tp->t_ticket->t_curr_res < 0)
639 xlog_print_tic_res(log->l_mp, tp->t_ticket);
640
641 /* attach the transaction to the CIL if it has any busy extents */
642 if (!list_empty(&tp->t_busy)) {
643 spin_lock(&log->l_cilp->xc_cil_lock);
644 list_splice_init(&tp->t_busy,
645 &log->l_cilp->xc_ctx->busy_extents);
646 spin_unlock(&log->l_cilp->xc_cil_lock);
647 }
648
649 tp->t_commit_lsn = *commit_lsn;
650 xfs_log_done(mp, tp->t_ticket, NULL, log_flags);
651 xfs_trans_unreserve_and_mod_sb(tp);
652
653 /*
654 * Once all the items of the transaction have been copied to the CIL,
655 * the items can be unlocked and freed.
656 *
657 * This needs to be done before we drop the CIL context lock because we
658 * have to update state in the log items and unlock them before they go
659 * to disk. If we don't, then the CIL checkpoint can race with us and
660 * we can run checkpoint completion before we've updated and unlocked
661 * the log items. This affects (at least) processing of stale buffers,
662 * inodes and EFIs.
663 */
664 xfs_trans_free_items(tp, *commit_lsn, 0);
665
666 /* check for background commit before unlock */
667 if (log->l_cilp->xc_ctx->space_used > XLOG_CIL_SPACE_LIMIT(log))
668 push = 1;
669
670 up_read(&log->l_cilp->xc_ctx_lock);
671
672 /*
673 * We need to push CIL every so often so we don't cache more than we
674 * can fit in the log. The limit really is that a checkpoint can't be
675 * more than half the log (the current checkpoint is not allowed to
676 * overwrite the previous checkpoint), but commit latency and memory
677 * usage limit this to a smaller size in most cases.
678 */
679 if (push)
680 xlog_cil_push(log, 0);
681 return 0;
682}
683
684/*
629 * Conditionally push the CIL based on the sequence passed in. 685 * Conditionally push the CIL based on the sequence passed in.
630 * 686 *
631 * We only need to push if we haven't already pushed the sequence 687 * We only need to push if we haven't already pushed the sequence
@@ -639,39 +695,34 @@ out_abort:
639 * commit lsn is there. It'll be empty, so this is broken for now. 695 * commit lsn is there. It'll be empty, so this is broken for now.
640 */ 696 */
641xfs_lsn_t 697xfs_lsn_t
642xlog_cil_push_lsn( 698xlog_cil_force_lsn(
643 struct log *log, 699 struct log *log,
644 xfs_lsn_t push_seq) 700 xfs_lsn_t sequence)
645{ 701{
646 struct xfs_cil *cil = log->l_cilp; 702 struct xfs_cil *cil = log->l_cilp;
647 struct xfs_cil_ctx *ctx; 703 struct xfs_cil_ctx *ctx;
648 xfs_lsn_t commit_lsn = NULLCOMMITLSN; 704 xfs_lsn_t commit_lsn = NULLCOMMITLSN;
649 705
650restart: 706 ASSERT(sequence <= cil->xc_current_sequence);
651 down_write(&cil->xc_ctx_lock); 707
652 ASSERT(push_seq <= cil->xc_ctx->sequence); 708 /*
653 709 * check to see if we need to force out the current context.
654 /* check to see if we need to force out the current context */ 710 * xlog_cil_push() handles racing pushes for the same sequence,
655 if (push_seq == cil->xc_ctx->sequence) { 711 * so no need to deal with it here.
656 up_write(&cil->xc_ctx_lock); 712 */
657 xlog_cil_push(log, 1); 713 if (sequence == cil->xc_current_sequence)
658 goto restart; 714 xlog_cil_push(log, sequence);
659 }
660 715
661 /* 716 /*
662 * See if we can find a previous sequence still committing. 717 * See if we can find a previous sequence still committing.
663 * We can drop the flush lock as soon as we have the cil lock
664 * because we are now only comparing contexts protected by
665 * the cil lock.
666 *
667 * We need to wait for all previous sequence commits to complete 718 * We need to wait for all previous sequence commits to complete
668 * before allowing the force of push_seq to go ahead. Hence block 719 * before allowing the force of push_seq to go ahead. Hence block
669 * on commits for those as well. 720 * on commits for those as well.
670 */ 721 */
722restart:
671 spin_lock(&cil->xc_cil_lock); 723 spin_lock(&cil->xc_cil_lock);
672 up_write(&cil->xc_ctx_lock);
673 list_for_each_entry(ctx, &cil->xc_committing, committing) { 724 list_for_each_entry(ctx, &cil->xc_committing, committing) {
674 if (ctx->sequence > push_seq) 725 if (ctx->sequence > sequence)
675 continue; 726 continue;
676 if (!ctx->commit_lsn) { 727 if (!ctx->commit_lsn) {
677 /* 728 /*
@@ -681,7 +732,7 @@ restart:
681 sv_wait(&cil->xc_commit_wait, 0, &cil->xc_cil_lock, 0); 732 sv_wait(&cil->xc_commit_wait, 0, &cil->xc_cil_lock, 0);
682 goto restart; 733 goto restart;
683 } 734 }
684 if (ctx->sequence != push_seq) 735 if (ctx->sequence != sequence)
685 continue; 736 continue;
686 /* found it! */ 737 /* found it! */
687 commit_lsn = ctx->commit_lsn; 738 commit_lsn = ctx->commit_lsn;
diff --git a/fs/xfs/xfs_log_priv.h b/fs/xfs/xfs_log_priv.h
index 8c072618965c..ced52b98b322 100644
--- a/fs/xfs/xfs_log_priv.h
+++ b/fs/xfs/xfs_log_priv.h
@@ -422,6 +422,7 @@ struct xfs_cil {
422 struct rw_semaphore xc_ctx_lock; 422 struct rw_semaphore xc_ctx_lock;
423 struct list_head xc_committing; 423 struct list_head xc_committing;
424 sv_t xc_commit_wait; 424 sv_t xc_commit_wait;
425 xfs_lsn_t xc_current_sequence;
425}; 426};
426 427
427/* 428/*
@@ -562,8 +563,16 @@ int xlog_cil_init(struct log *log);
562void xlog_cil_init_post_recovery(struct log *log); 563void xlog_cil_init_post_recovery(struct log *log);
563void xlog_cil_destroy(struct log *log); 564void xlog_cil_destroy(struct log *log);
564 565
565int xlog_cil_push(struct log *log, int push_now); 566/*
566xfs_lsn_t xlog_cil_push_lsn(struct log *log, xfs_lsn_t push_sequence); 567 * CIL force routines
568 */
569xfs_lsn_t xlog_cil_force_lsn(struct log *log, xfs_lsn_t sequence);
570
571static inline void
572xlog_cil_force(struct log *log)
573{
574 xlog_cil_force_lsn(log, log->l_cilp->xc_current_sequence);
575}
567 576
568/* 577/*
569 * Unmount record type is used as a pseudo transaction type for the ticket. 578 * Unmount record type is used as a pseudo transaction type for the ticket.
diff --git a/fs/xfs/xfs_trans.c b/fs/xfs/xfs_trans.c
index fdca7416c754..1c47edaea0d2 100644
--- a/fs/xfs/xfs_trans.c
+++ b/fs/xfs/xfs_trans.c
@@ -1167,7 +1167,7 @@ xfs_trans_del_item(
1167 * Unlock all of the items of a transaction and free all the descriptors 1167 * Unlock all of the items of a transaction and free all the descriptors
1168 * of that transaction. 1168 * of that transaction.
1169 */ 1169 */
1170STATIC void 1170void
1171xfs_trans_free_items( 1171xfs_trans_free_items(
1172 struct xfs_trans *tp, 1172 struct xfs_trans *tp,
1173 xfs_lsn_t commit_lsn, 1173 xfs_lsn_t commit_lsn,
@@ -1653,9 +1653,6 @@ xfs_trans_commit_cil(
1653 return error; 1653 return error;
1654 1654
1655 current_restore_flags_nested(&tp->t_pflags, PF_FSTRANS); 1655 current_restore_flags_nested(&tp->t_pflags, PF_FSTRANS);
1656
1657 /* xfs_trans_free_items() unlocks them first */
1658 xfs_trans_free_items(tp, *commit_lsn, 0);
1659 xfs_trans_free(tp); 1656 xfs_trans_free(tp);
1660 return 0; 1657 return 0;
1661} 1658}
diff --git a/fs/xfs/xfs_trans_priv.h b/fs/xfs/xfs_trans_priv.h
index e2d93d8ead7b..62da86c90de5 100644
--- a/fs/xfs/xfs_trans_priv.h
+++ b/fs/xfs/xfs_trans_priv.h
@@ -25,7 +25,8 @@ struct xfs_trans;
25 25
26void xfs_trans_add_item(struct xfs_trans *, struct xfs_log_item *); 26void xfs_trans_add_item(struct xfs_trans *, struct xfs_log_item *);
27void xfs_trans_del_item(struct xfs_log_item *); 27void xfs_trans_del_item(struct xfs_log_item *);
28 28void xfs_trans_free_items(struct xfs_trans *tp, xfs_lsn_t commit_lsn,
29 int flags);
29void xfs_trans_item_committed(struct xfs_log_item *lip, 30void xfs_trans_item_committed(struct xfs_log_item *lip,
30 xfs_lsn_t commit_lsn, int aborted); 31 xfs_lsn_t commit_lsn, int aborted);
31void xfs_trans_unreserve_and_mod_sb(struct xfs_trans *tp); 32void xfs_trans_unreserve_and_mod_sb(struct xfs_trans *tp);