aboutsummaryrefslogtreecommitdiffstats
path: root/fs/ceph/caps.c
diff options
context:
space:
mode:
authorJonathan Herman <hermanjl@cs.unc.edu>2013-01-17 16:15:55 -0500
committerJonathan Herman <hermanjl@cs.unc.edu>2013-01-17 16:15:55 -0500
commit8dea78da5cee153b8af9c07a2745f6c55057fe12 (patch)
treea8f4d49d63b1ecc92f2fddceba0655b2472c5bd9 /fs/ceph/caps.c
parent406089d01562f1e2bf9f089fd7637009ebaad589 (diff)
Patched in Tegra support.
Diffstat (limited to 'fs/ceph/caps.c')
-rw-r--r--fs/ceph/caps.c227
1 files changed, 111 insertions, 116 deletions
diff --git a/fs/ceph/caps.c b/fs/ceph/caps.c
index a1d9bb30c1b..8d74ad7ba55 100644
--- a/fs/ceph/caps.c
+++ b/fs/ceph/caps.c
@@ -236,10 +236,8 @@ static struct ceph_cap *get_cap(struct ceph_mds_client *mdsc,
236 if (!ctx) { 236 if (!ctx) {
237 cap = kmem_cache_alloc(ceph_cap_cachep, GFP_NOFS); 237 cap = kmem_cache_alloc(ceph_cap_cachep, GFP_NOFS);
238 if (cap) { 238 if (cap) {
239 spin_lock(&mdsc->caps_list_lock);
240 mdsc->caps_use_count++; 239 mdsc->caps_use_count++;
241 mdsc->caps_total_count++; 240 mdsc->caps_total_count++;
242 spin_unlock(&mdsc->caps_list_lock);
243 } 241 }
244 return cap; 242 return cap;
245 } 243 }
@@ -311,7 +309,7 @@ void ceph_reservation_status(struct ceph_fs_client *fsc,
311/* 309/*
312 * Find ceph_cap for given mds, if any. 310 * Find ceph_cap for given mds, if any.
313 * 311 *
314 * Called with i_ceph_lock held. 312 * Called with i_lock held.
315 */ 313 */
316static struct ceph_cap *__get_cap_for_mds(struct ceph_inode_info *ci, int mds) 314static struct ceph_cap *__get_cap_for_mds(struct ceph_inode_info *ci, int mds)
317{ 315{
@@ -334,9 +332,9 @@ struct ceph_cap *ceph_get_cap_for_mds(struct ceph_inode_info *ci, int mds)
334{ 332{
335 struct ceph_cap *cap; 333 struct ceph_cap *cap;
336 334
337 spin_lock(&ci->i_ceph_lock); 335 spin_lock(&ci->vfs_inode.i_lock);
338 cap = __get_cap_for_mds(ci, mds); 336 cap = __get_cap_for_mds(ci, mds);
339 spin_unlock(&ci->i_ceph_lock); 337 spin_unlock(&ci->vfs_inode.i_lock);
340 return cap; 338 return cap;
341} 339}
342 340
@@ -363,16 +361,15 @@ static int __ceph_get_cap_mds(struct ceph_inode_info *ci)
363 361
364int ceph_get_cap_mds(struct inode *inode) 362int ceph_get_cap_mds(struct inode *inode)
365{ 363{
366 struct ceph_inode_info *ci = ceph_inode(inode);
367 int mds; 364 int mds;
368 spin_lock(&ci->i_ceph_lock); 365 spin_lock(&inode->i_lock);
369 mds = __ceph_get_cap_mds(ceph_inode(inode)); 366 mds = __ceph_get_cap_mds(ceph_inode(inode));
370 spin_unlock(&ci->i_ceph_lock); 367 spin_unlock(&inode->i_lock);
371 return mds; 368 return mds;
372} 369}
373 370
374/* 371/*
375 * Called under i_ceph_lock. 372 * Called under i_lock.
376 */ 373 */
377static void __insert_cap_node(struct ceph_inode_info *ci, 374static void __insert_cap_node(struct ceph_inode_info *ci,
378 struct ceph_cap *new) 375 struct ceph_cap *new)
@@ -418,7 +415,7 @@ static void __cap_set_timeouts(struct ceph_mds_client *mdsc,
418 * 415 *
419 * If I_FLUSH is set, leave the inode at the front of the list. 416 * If I_FLUSH is set, leave the inode at the front of the list.
420 * 417 *
421 * Caller holds i_ceph_lock 418 * Caller holds i_lock
422 * -> we take mdsc->cap_delay_lock 419 * -> we take mdsc->cap_delay_lock
423 */ 420 */
424static void __cap_delay_requeue(struct ceph_mds_client *mdsc, 421static void __cap_delay_requeue(struct ceph_mds_client *mdsc,
@@ -460,7 +457,7 @@ static void __cap_delay_requeue_front(struct ceph_mds_client *mdsc,
460/* 457/*
461 * Cancel delayed work on cap. 458 * Cancel delayed work on cap.
462 * 459 *
463 * Caller must hold i_ceph_lock. 460 * Caller must hold i_lock.
464 */ 461 */
465static void __cap_delay_cancel(struct ceph_mds_client *mdsc, 462static void __cap_delay_cancel(struct ceph_mds_client *mdsc,
466 struct ceph_inode_info *ci) 463 struct ceph_inode_info *ci)
@@ -490,15 +487,17 @@ static void __check_cap_issue(struct ceph_inode_info *ci, struct ceph_cap *cap,
490 ci->i_rdcache_gen++; 487 ci->i_rdcache_gen++;
491 488
492 /* 489 /*
493 * if we are newly issued FILE_SHARED, clear D_COMPLETE; we 490 * if we are newly issued FILE_SHARED, clear I_COMPLETE; we
494 * don't know what happened to this directory while we didn't 491 * don't know what happened to this directory while we didn't
495 * have the cap. 492 * have the cap.
496 */ 493 */
497 if ((issued & CEPH_CAP_FILE_SHARED) && 494 if ((issued & CEPH_CAP_FILE_SHARED) &&
498 (had & CEPH_CAP_FILE_SHARED) == 0) { 495 (had & CEPH_CAP_FILE_SHARED) == 0) {
499 ci->i_shared_gen++; 496 ci->i_shared_gen++;
500 if (S_ISDIR(ci->vfs_inode.i_mode)) 497 if (S_ISDIR(ci->vfs_inode.i_mode)) {
501 ceph_dir_clear_complete(&ci->vfs_inode); 498 dout(" marking %p NOT complete\n", &ci->vfs_inode);
499 ci->i_ceph_flags &= ~CEPH_I_COMPLETE;
500 }
502 } 501 }
503} 502}
504 503
@@ -535,14 +534,14 @@ int ceph_add_cap(struct inode *inode,
535 wanted |= ceph_caps_for_mode(fmode); 534 wanted |= ceph_caps_for_mode(fmode);
536 535
537retry: 536retry:
538 spin_lock(&ci->i_ceph_lock); 537 spin_lock(&inode->i_lock);
539 cap = __get_cap_for_mds(ci, mds); 538 cap = __get_cap_for_mds(ci, mds);
540 if (!cap) { 539 if (!cap) {
541 if (new_cap) { 540 if (new_cap) {
542 cap = new_cap; 541 cap = new_cap;
543 new_cap = NULL; 542 new_cap = NULL;
544 } else { 543 } else {
545 spin_unlock(&ci->i_ceph_lock); 544 spin_unlock(&inode->i_lock);
546 new_cap = get_cap(mdsc, caps_reservation); 545 new_cap = get_cap(mdsc, caps_reservation);
547 if (new_cap == NULL) 546 if (new_cap == NULL)
548 return -ENOMEM; 547 return -ENOMEM;
@@ -628,7 +627,7 @@ retry:
628 627
629 if (fmode >= 0) 628 if (fmode >= 0)
630 __ceph_get_fmode(ci, fmode); 629 __ceph_get_fmode(ci, fmode);
631 spin_unlock(&ci->i_ceph_lock); 630 spin_unlock(&inode->i_lock);
632 wake_up_all(&ci->i_cap_wq); 631 wake_up_all(&ci->i_cap_wq);
633 return 0; 632 return 0;
634} 633}
@@ -643,10 +642,10 @@ static int __cap_is_valid(struct ceph_cap *cap)
643 unsigned long ttl; 642 unsigned long ttl;
644 u32 gen; 643 u32 gen;
645 644
646 spin_lock(&cap->session->s_gen_ttl_lock); 645 spin_lock(&cap->session->s_cap_lock);
647 gen = cap->session->s_cap_gen; 646 gen = cap->session->s_cap_gen;
648 ttl = cap->session->s_cap_ttl; 647 ttl = cap->session->s_cap_ttl;
649 spin_unlock(&cap->session->s_gen_ttl_lock); 648 spin_unlock(&cap->session->s_cap_lock);
650 649
651 if (cap->cap_gen < gen || time_after_eq(jiffies, ttl)) { 650 if (cap->cap_gen < gen || time_after_eq(jiffies, ttl)) {
652 dout("__cap_is_valid %p cap %p issued %s " 651 dout("__cap_is_valid %p cap %p issued %s "
@@ -795,7 +794,7 @@ int ceph_caps_revoking(struct ceph_inode_info *ci, int mask)
795 struct rb_node *p; 794 struct rb_node *p;
796 int ret = 0; 795 int ret = 0;
797 796
798 spin_lock(&ci->i_ceph_lock); 797 spin_lock(&inode->i_lock);
799 for (p = rb_first(&ci->i_caps); p; p = rb_next(p)) { 798 for (p = rb_first(&ci->i_caps); p; p = rb_next(p)) {
800 cap = rb_entry(p, struct ceph_cap, ci_node); 799 cap = rb_entry(p, struct ceph_cap, ci_node);
801 if (__cap_is_valid(cap) && 800 if (__cap_is_valid(cap) &&
@@ -804,7 +803,7 @@ int ceph_caps_revoking(struct ceph_inode_info *ci, int mask)
804 break; 803 break;
805 } 804 }
806 } 805 }
807 spin_unlock(&ci->i_ceph_lock); 806 spin_unlock(&inode->i_lock);
808 dout("ceph_caps_revoking %p %s = %d\n", inode, 807 dout("ceph_caps_revoking %p %s = %d\n", inode,
809 ceph_cap_string(mask), ret); 808 ceph_cap_string(mask), ret);
810 return ret; 809 return ret;
@@ -858,7 +857,7 @@ int __ceph_caps_mds_wanted(struct ceph_inode_info *ci)
858} 857}
859 858
860/* 859/*
861 * called under i_ceph_lock 860 * called under i_lock
862 */ 861 */
863static int __ceph_is_any_caps(struct ceph_inode_info *ci) 862static int __ceph_is_any_caps(struct ceph_inode_info *ci)
864{ 863{
@@ -868,7 +867,7 @@ static int __ceph_is_any_caps(struct ceph_inode_info *ci)
868/* 867/*
869 * Remove a cap. Take steps to deal with a racing iterate_session_caps. 868 * Remove a cap. Take steps to deal with a racing iterate_session_caps.
870 * 869 *
871 * caller should hold i_ceph_lock. 870 * caller should hold i_lock.
872 * caller will not hold session s_mutex if called from destroy_inode. 871 * caller will not hold session s_mutex if called from destroy_inode.
873 */ 872 */
874void __ceph_remove_cap(struct ceph_cap *cap) 873void __ceph_remove_cap(struct ceph_cap *cap)
@@ -930,7 +929,7 @@ static int send_cap_msg(struct ceph_mds_session *session,
930 u64 size, u64 max_size, 929 u64 size, u64 max_size,
931 struct timespec *mtime, struct timespec *atime, 930 struct timespec *mtime, struct timespec *atime,
932 u64 time_warp_seq, 931 u64 time_warp_seq,
933 uid_t uid, gid_t gid, umode_t mode, 932 uid_t uid, gid_t gid, mode_t mode,
934 u64 xattr_version, 933 u64 xattr_version,
935 struct ceph_buffer *xattrs_buf, 934 struct ceph_buffer *xattrs_buf,
936 u64 follows) 935 u64 follows)
@@ -946,7 +945,7 @@ static int send_cap_msg(struct ceph_mds_session *session,
946 seq, issue_seq, mseq, follows, size, max_size, 945 seq, issue_seq, mseq, follows, size, max_size,
947 xattr_version, xattrs_buf ? (int)xattrs_buf->vec.iov_len : 0); 946 xattr_version, xattrs_buf ? (int)xattrs_buf->vec.iov_len : 0);
948 947
949 msg = ceph_msg_new(CEPH_MSG_CLIENT_CAPS, sizeof(*fc), GFP_NOFS, false); 948 msg = ceph_msg_new(CEPH_MSG_CLIENT_CAPS, sizeof(*fc), GFP_NOFS);
950 if (!msg) 949 if (!msg)
951 return -ENOMEM; 950 return -ENOMEM;
952 951
@@ -1007,7 +1006,7 @@ static void __queue_cap_release(struct ceph_mds_session *session,
1007 1006
1008 BUG_ON(msg->front.iov_len + sizeof(*item) > PAGE_CACHE_SIZE); 1007 BUG_ON(msg->front.iov_len + sizeof(*item) > PAGE_CACHE_SIZE);
1009 head = msg->front.iov_base; 1008 head = msg->front.iov_base;
1010 le32_add_cpu(&head->num, 1); 1009 head->num = cpu_to_le32(le32_to_cpu(head->num) + 1);
1011 item = msg->front.iov_base + msg->front.iov_len; 1010 item = msg->front.iov_base + msg->front.iov_len;
1012 item->ino = cpu_to_le64(ino); 1011 item->ino = cpu_to_le64(ino);
1013 item->cap_id = cpu_to_le64(cap_id); 1012 item->cap_id = cpu_to_le64(cap_id);
@@ -1031,7 +1030,7 @@ static void __queue_cap_release(struct ceph_mds_session *session,
1031 1030
1032/* 1031/*
1033 * Queue cap releases when an inode is dropped from our cache. Since 1032 * Queue cap releases when an inode is dropped from our cache. Since
1034 * inode is about to be destroyed, there is no need for i_ceph_lock. 1033 * inode is about to be destroyed, there is no need for i_lock.
1035 */ 1034 */
1036void ceph_queue_caps_release(struct inode *inode) 1035void ceph_queue_caps_release(struct inode *inode)
1037{ 1036{
@@ -1052,7 +1051,7 @@ void ceph_queue_caps_release(struct inode *inode)
1052 1051
1053/* 1052/*
1054 * Send a cap msg on the given inode. Update our caps state, then 1053 * Send a cap msg on the given inode. Update our caps state, then
1055 * drop i_ceph_lock and send the message. 1054 * drop i_lock and send the message.
1056 * 1055 *
1057 * Make note of max_size reported/requested from mds, revoked caps 1056 * Make note of max_size reported/requested from mds, revoked caps
1058 * that have now been implemented. 1057 * that have now been implemented.
@@ -1064,13 +1063,13 @@ void ceph_queue_caps_release(struct inode *inode)
1064 * Return non-zero if delayed release, or we experienced an error 1063 * Return non-zero if delayed release, or we experienced an error
1065 * such that the caller should requeue + retry later. 1064 * such that the caller should requeue + retry later.
1066 * 1065 *
1067 * called with i_ceph_lock, then drops it. 1066 * called with i_lock, then drops it.
1068 * caller should hold snap_rwsem (read), s_mutex. 1067 * caller should hold snap_rwsem (read), s_mutex.
1069 */ 1068 */
1070static int __send_cap(struct ceph_mds_client *mdsc, struct ceph_cap *cap, 1069static int __send_cap(struct ceph_mds_client *mdsc, struct ceph_cap *cap,
1071 int op, int used, int want, int retain, int flushing, 1070 int op, int used, int want, int retain, int flushing,
1072 unsigned *pflush_tid) 1071 unsigned *pflush_tid)
1073 __releases(cap->ci->i_ceph_lock) 1072 __releases(cap->ci->vfs_inode->i_lock)
1074{ 1073{
1075 struct ceph_inode_info *ci = cap->ci; 1074 struct ceph_inode_info *ci = cap->ci;
1076 struct inode *inode = &ci->vfs_inode; 1075 struct inode *inode = &ci->vfs_inode;
@@ -1080,7 +1079,7 @@ static int __send_cap(struct ceph_mds_client *mdsc, struct ceph_cap *cap,
1080 u64 size, max_size; 1079 u64 size, max_size;
1081 struct timespec mtime, atime; 1080 struct timespec mtime, atime;
1082 int wake = 0; 1081 int wake = 0;
1083 umode_t mode; 1082 mode_t mode;
1084 uid_t uid; 1083 uid_t uid;
1085 gid_t gid; 1084 gid_t gid;
1086 struct ceph_mds_session *session; 1085 struct ceph_mds_session *session;
@@ -1173,7 +1172,7 @@ static int __send_cap(struct ceph_mds_client *mdsc, struct ceph_cap *cap,
1173 xattr_version = ci->i_xattrs.version; 1172 xattr_version = ci->i_xattrs.version;
1174 } 1173 }
1175 1174
1176 spin_unlock(&ci->i_ceph_lock); 1175 spin_unlock(&inode->i_lock);
1177 1176
1178 ret = send_cap_msg(session, ceph_vino(inode).ino, cap_id, 1177 ret = send_cap_msg(session, ceph_vino(inode).ino, cap_id,
1179 op, keep, want, flushing, seq, flush_tid, issue_seq, mseq, 1178 op, keep, want, flushing, seq, flush_tid, issue_seq, mseq,
@@ -1201,13 +1200,13 @@ static int __send_cap(struct ceph_mds_client *mdsc, struct ceph_cap *cap,
1201 * Unless @again is true, skip cap_snaps that were already sent to 1200 * Unless @again is true, skip cap_snaps that were already sent to
1202 * the MDS (i.e., during this session). 1201 * the MDS (i.e., during this session).
1203 * 1202 *
1204 * Called under i_ceph_lock. Takes s_mutex as needed. 1203 * Called under i_lock. Takes s_mutex as needed.
1205 */ 1204 */
1206void __ceph_flush_snaps(struct ceph_inode_info *ci, 1205void __ceph_flush_snaps(struct ceph_inode_info *ci,
1207 struct ceph_mds_session **psession, 1206 struct ceph_mds_session **psession,
1208 int again) 1207 int again)
1209 __releases(ci->i_ceph_lock) 1208 __releases(ci->vfs_inode->i_lock)
1210 __acquires(ci->i_ceph_lock) 1209 __acquires(ci->vfs_inode->i_lock)
1211{ 1210{
1212 struct inode *inode = &ci->vfs_inode; 1211 struct inode *inode = &ci->vfs_inode;
1213 int mds; 1212 int mds;
@@ -1264,7 +1263,7 @@ retry:
1264 session = NULL; 1263 session = NULL;
1265 } 1264 }
1266 if (!session) { 1265 if (!session) {
1267 spin_unlock(&ci->i_ceph_lock); 1266 spin_unlock(&inode->i_lock);
1268 mutex_lock(&mdsc->mutex); 1267 mutex_lock(&mdsc->mutex);
1269 session = __ceph_lookup_mds_session(mdsc, mds); 1268 session = __ceph_lookup_mds_session(mdsc, mds);
1270 mutex_unlock(&mdsc->mutex); 1269 mutex_unlock(&mdsc->mutex);
@@ -1278,7 +1277,7 @@ retry:
1278 * deletion or migration. retry, and we'll 1277 * deletion or migration. retry, and we'll
1279 * get a better @mds value next time. 1278 * get a better @mds value next time.
1280 */ 1279 */
1281 spin_lock(&ci->i_ceph_lock); 1280 spin_lock(&inode->i_lock);
1282 goto retry; 1281 goto retry;
1283 } 1282 }
1284 1283
@@ -1288,7 +1287,7 @@ retry:
1288 list_del_init(&capsnap->flushing_item); 1287 list_del_init(&capsnap->flushing_item);
1289 list_add_tail(&capsnap->flushing_item, 1288 list_add_tail(&capsnap->flushing_item,
1290 &session->s_cap_snaps_flushing); 1289 &session->s_cap_snaps_flushing);
1291 spin_unlock(&ci->i_ceph_lock); 1290 spin_unlock(&inode->i_lock);
1292 1291
1293 dout("flush_snaps %p cap_snap %p follows %lld tid %llu\n", 1292 dout("flush_snaps %p cap_snap %p follows %lld tid %llu\n",
1294 inode, capsnap, capsnap->follows, capsnap->flush_tid); 1293 inode, capsnap, capsnap->follows, capsnap->flush_tid);
@@ -1305,7 +1304,7 @@ retry:
1305 next_follows = capsnap->follows + 1; 1304 next_follows = capsnap->follows + 1;
1306 ceph_put_cap_snap(capsnap); 1305 ceph_put_cap_snap(capsnap);
1307 1306
1308 spin_lock(&ci->i_ceph_lock); 1307 spin_lock(&inode->i_lock);
1309 goto retry; 1308 goto retry;
1310 } 1309 }
1311 1310
@@ -1325,9 +1324,11 @@ out:
1325 1324
1326static void ceph_flush_snaps(struct ceph_inode_info *ci) 1325static void ceph_flush_snaps(struct ceph_inode_info *ci)
1327{ 1326{
1328 spin_lock(&ci->i_ceph_lock); 1327 struct inode *inode = &ci->vfs_inode;
1328
1329 spin_lock(&inode->i_lock);
1329 __ceph_flush_snaps(ci, NULL, 0); 1330 __ceph_flush_snaps(ci, NULL, 0);
1330 spin_unlock(&ci->i_ceph_lock); 1331 spin_unlock(&inode->i_lock);
1331} 1332}
1332 1333
1333/* 1334/*
@@ -1351,15 +1352,11 @@ int __ceph_mark_dirty_caps(struct ceph_inode_info *ci, int mask)
1351 if (!ci->i_head_snapc) 1352 if (!ci->i_head_snapc)
1352 ci->i_head_snapc = ceph_get_snap_context( 1353 ci->i_head_snapc = ceph_get_snap_context(
1353 ci->i_snap_realm->cached_context); 1354 ci->i_snap_realm->cached_context);
1354 dout(" inode %p now dirty snapc %p auth cap %p\n", 1355 dout(" inode %p now dirty snapc %p\n", &ci->vfs_inode,
1355 &ci->vfs_inode, ci->i_head_snapc, ci->i_auth_cap); 1356 ci->i_head_snapc);
1356 BUG_ON(!list_empty(&ci->i_dirty_item)); 1357 BUG_ON(!list_empty(&ci->i_dirty_item));
1357 spin_lock(&mdsc->cap_dirty_lock); 1358 spin_lock(&mdsc->cap_dirty_lock);
1358 if (ci->i_auth_cap) 1359 list_add(&ci->i_dirty_item, &mdsc->cap_dirty);
1359 list_add(&ci->i_dirty_item, &mdsc->cap_dirty);
1360 else
1361 list_add(&ci->i_dirty_item,
1362 &mdsc->cap_dirty_migrating);
1363 spin_unlock(&mdsc->cap_dirty_lock); 1360 spin_unlock(&mdsc->cap_dirty_lock);
1364 if (ci->i_flushing_caps == 0) { 1361 if (ci->i_flushing_caps == 0) {
1365 ihold(inode); 1362 ihold(inode);
@@ -1378,7 +1375,7 @@ int __ceph_mark_dirty_caps(struct ceph_inode_info *ci, int mask)
1378 * Add dirty inode to the flushing list. Assigned a seq number so we 1375 * Add dirty inode to the flushing list. Assigned a seq number so we
1379 * can wait for caps to flush without starving. 1376 * can wait for caps to flush without starving.
1380 * 1377 *
1381 * Called under i_ceph_lock. 1378 * Called under i_lock.
1382 */ 1379 */
1383static int __mark_caps_flushing(struct inode *inode, 1380static int __mark_caps_flushing(struct inode *inode,
1384 struct ceph_mds_session *session) 1381 struct ceph_mds_session *session)
@@ -1426,9 +1423,9 @@ static int try_nonblocking_invalidate(struct inode *inode)
1426 struct ceph_inode_info *ci = ceph_inode(inode); 1423 struct ceph_inode_info *ci = ceph_inode(inode);
1427 u32 invalidating_gen = ci->i_rdcache_gen; 1424 u32 invalidating_gen = ci->i_rdcache_gen;
1428 1425
1429 spin_unlock(&ci->i_ceph_lock); 1426 spin_unlock(&inode->i_lock);
1430 invalidate_mapping_pages(&inode->i_data, 0, -1); 1427 invalidate_mapping_pages(&inode->i_data, 0, -1);
1431 spin_lock(&ci->i_ceph_lock); 1428 spin_lock(&inode->i_lock);
1432 1429
1433 if (inode->i_data.nrpages == 0 && 1430 if (inode->i_data.nrpages == 0 &&
1434 invalidating_gen == ci->i_rdcache_gen) { 1431 invalidating_gen == ci->i_rdcache_gen) {
@@ -1475,7 +1472,7 @@ void ceph_check_caps(struct ceph_inode_info *ci, int flags,
1475 if (mdsc->stopping) 1472 if (mdsc->stopping)
1476 is_delayed = 1; 1473 is_delayed = 1;
1477 1474
1478 spin_lock(&ci->i_ceph_lock); 1475 spin_lock(&inode->i_lock);
1479 1476
1480 if (ci->i_ceph_flags & CEPH_I_FLUSH) 1477 if (ci->i_ceph_flags & CEPH_I_FLUSH)
1481 flags |= CHECK_CAPS_FLUSH; 1478 flags |= CHECK_CAPS_FLUSH;
@@ -1485,7 +1482,7 @@ void ceph_check_caps(struct ceph_inode_info *ci, int flags,
1485 __ceph_flush_snaps(ci, &session, 0); 1482 __ceph_flush_snaps(ci, &session, 0);
1486 goto retry_locked; 1483 goto retry_locked;
1487retry: 1484retry:
1488 spin_lock(&ci->i_ceph_lock); 1485 spin_lock(&inode->i_lock);
1489retry_locked: 1486retry_locked:
1490 file_wanted = __ceph_caps_file_wanted(ci); 1487 file_wanted = __ceph_caps_file_wanted(ci);
1491 used = __ceph_caps_used(ci); 1488 used = __ceph_caps_used(ci);
@@ -1639,7 +1636,7 @@ ack:
1639 if (mutex_trylock(&session->s_mutex) == 0) { 1636 if (mutex_trylock(&session->s_mutex) == 0) {
1640 dout("inverting session/ino locks on %p\n", 1637 dout("inverting session/ino locks on %p\n",
1641 session); 1638 session);
1642 spin_unlock(&ci->i_ceph_lock); 1639 spin_unlock(&inode->i_lock);
1643 if (took_snap_rwsem) { 1640 if (took_snap_rwsem) {
1644 up_read(&mdsc->snap_rwsem); 1641 up_read(&mdsc->snap_rwsem);
1645 took_snap_rwsem = 0; 1642 took_snap_rwsem = 0;
@@ -1653,7 +1650,7 @@ ack:
1653 if (down_read_trylock(&mdsc->snap_rwsem) == 0) { 1650 if (down_read_trylock(&mdsc->snap_rwsem) == 0) {
1654 dout("inverting snap/in locks on %p\n", 1651 dout("inverting snap/in locks on %p\n",
1655 inode); 1652 inode);
1656 spin_unlock(&ci->i_ceph_lock); 1653 spin_unlock(&inode->i_lock);
1657 down_read(&mdsc->snap_rwsem); 1654 down_read(&mdsc->snap_rwsem);
1658 took_snap_rwsem = 1; 1655 took_snap_rwsem = 1;
1659 goto retry; 1656 goto retry;
@@ -1669,10 +1666,10 @@ ack:
1669 mds = cap->mds; /* remember mds, so we don't repeat */ 1666 mds = cap->mds; /* remember mds, so we don't repeat */
1670 sent++; 1667 sent++;
1671 1668
1672 /* __send_cap drops i_ceph_lock */ 1669 /* __send_cap drops i_lock */
1673 delayed += __send_cap(mdsc, cap, CEPH_CAP_OP_UPDATE, used, want, 1670 delayed += __send_cap(mdsc, cap, CEPH_CAP_OP_UPDATE, used, want,
1674 retain, flushing, NULL); 1671 retain, flushing, NULL);
1675 goto retry; /* retake i_ceph_lock and restart our cap scan. */ 1672 goto retry; /* retake i_lock and restart our cap scan. */
1676 } 1673 }
1677 1674
1678 /* 1675 /*
@@ -1686,7 +1683,7 @@ ack:
1686 else if (!is_delayed || force_requeue) 1683 else if (!is_delayed || force_requeue)
1687 __cap_delay_requeue(mdsc, ci); 1684 __cap_delay_requeue(mdsc, ci);
1688 1685
1689 spin_unlock(&ci->i_ceph_lock); 1686 spin_unlock(&inode->i_lock);
1690 1687
1691 if (queue_invalidate) 1688 if (queue_invalidate)
1692 ceph_queue_invalidate(inode); 1689 ceph_queue_invalidate(inode);
@@ -1709,7 +1706,7 @@ static int try_flush_caps(struct inode *inode, struct ceph_mds_session *session,
1709 int flushing = 0; 1706 int flushing = 0;
1710 1707
1711retry: 1708retry:
1712 spin_lock(&ci->i_ceph_lock); 1709 spin_lock(&inode->i_lock);
1713 if (ci->i_ceph_flags & CEPH_I_NOFLUSH) { 1710 if (ci->i_ceph_flags & CEPH_I_NOFLUSH) {
1714 dout("try_flush_caps skipping %p I_NOFLUSH set\n", inode); 1711 dout("try_flush_caps skipping %p I_NOFLUSH set\n", inode);
1715 goto out; 1712 goto out;
@@ -1721,7 +1718,7 @@ retry:
1721 int delayed; 1718 int delayed;
1722 1719
1723 if (!session) { 1720 if (!session) {
1724 spin_unlock(&ci->i_ceph_lock); 1721 spin_unlock(&inode->i_lock);
1725 session = cap->session; 1722 session = cap->session;
1726 mutex_lock(&session->s_mutex); 1723 mutex_lock(&session->s_mutex);
1727 goto retry; 1724 goto retry;
@@ -1732,18 +1729,18 @@ retry:
1732 1729
1733 flushing = __mark_caps_flushing(inode, session); 1730 flushing = __mark_caps_flushing(inode, session);
1734 1731
1735 /* __send_cap drops i_ceph_lock */ 1732 /* __send_cap drops i_lock */
1736 delayed = __send_cap(mdsc, cap, CEPH_CAP_OP_FLUSH, used, want, 1733 delayed = __send_cap(mdsc, cap, CEPH_CAP_OP_FLUSH, used, want,
1737 cap->issued | cap->implemented, flushing, 1734 cap->issued | cap->implemented, flushing,
1738 flush_tid); 1735 flush_tid);
1739 if (!delayed) 1736 if (!delayed)
1740 goto out_unlocked; 1737 goto out_unlocked;
1741 1738
1742 spin_lock(&ci->i_ceph_lock); 1739 spin_lock(&inode->i_lock);
1743 __cap_delay_requeue(mdsc, ci); 1740 __cap_delay_requeue(mdsc, ci);
1744 } 1741 }
1745out: 1742out:
1746 spin_unlock(&ci->i_ceph_lock); 1743 spin_unlock(&inode->i_lock);
1747out_unlocked: 1744out_unlocked:
1748 if (session && unlock_session) 1745 if (session && unlock_session)
1749 mutex_unlock(&session->s_mutex); 1746 mutex_unlock(&session->s_mutex);
@@ -1758,7 +1755,7 @@ static int caps_are_flushed(struct inode *inode, unsigned tid)
1758 struct ceph_inode_info *ci = ceph_inode(inode); 1755 struct ceph_inode_info *ci = ceph_inode(inode);
1759 int i, ret = 1; 1756 int i, ret = 1;
1760 1757
1761 spin_lock(&ci->i_ceph_lock); 1758 spin_lock(&inode->i_lock);
1762 for (i = 0; i < CEPH_CAP_BITS; i++) 1759 for (i = 0; i < CEPH_CAP_BITS; i++)
1763 if ((ci->i_flushing_caps & (1 << i)) && 1760 if ((ci->i_flushing_caps & (1 << i)) &&
1764 ci->i_cap_flush_tid[i] <= tid) { 1761 ci->i_cap_flush_tid[i] <= tid) {
@@ -1766,7 +1763,7 @@ static int caps_are_flushed(struct inode *inode, unsigned tid)
1766 ret = 0; 1763 ret = 0;
1767 break; 1764 break;
1768 } 1765 }
1769 spin_unlock(&ci->i_ceph_lock); 1766 spin_unlock(&inode->i_lock);
1770 return ret; 1767 return ret;
1771} 1768}
1772 1769
@@ -1873,10 +1870,10 @@ int ceph_write_inode(struct inode *inode, struct writeback_control *wbc)
1873 struct ceph_mds_client *mdsc = 1870 struct ceph_mds_client *mdsc =
1874 ceph_sb_to_client(inode->i_sb)->mdsc; 1871 ceph_sb_to_client(inode->i_sb)->mdsc;
1875 1872
1876 spin_lock(&ci->i_ceph_lock); 1873 spin_lock(&inode->i_lock);
1877 if (__ceph_caps_dirty(ci)) 1874 if (__ceph_caps_dirty(ci))
1878 __cap_delay_requeue_front(mdsc, ci); 1875 __cap_delay_requeue_front(mdsc, ci);
1879 spin_unlock(&ci->i_ceph_lock); 1876 spin_unlock(&inode->i_lock);
1880 } 1877 }
1881 return err; 1878 return err;
1882} 1879}
@@ -1899,7 +1896,7 @@ static void kick_flushing_capsnaps(struct ceph_mds_client *mdsc,
1899 struct inode *inode = &ci->vfs_inode; 1896 struct inode *inode = &ci->vfs_inode;
1900 struct ceph_cap *cap; 1897 struct ceph_cap *cap;
1901 1898
1902 spin_lock(&ci->i_ceph_lock); 1899 spin_lock(&inode->i_lock);
1903 cap = ci->i_auth_cap; 1900 cap = ci->i_auth_cap;
1904 if (cap && cap->session == session) { 1901 if (cap && cap->session == session) {
1905 dout("kick_flushing_caps %p cap %p capsnap %p\n", inode, 1902 dout("kick_flushing_caps %p cap %p capsnap %p\n", inode,
@@ -1909,7 +1906,7 @@ static void kick_flushing_capsnaps(struct ceph_mds_client *mdsc,
1909 pr_err("%p auth cap %p not mds%d ???\n", inode, 1906 pr_err("%p auth cap %p not mds%d ???\n", inode,
1910 cap, session->s_mds); 1907 cap, session->s_mds);
1911 } 1908 }
1912 spin_unlock(&ci->i_ceph_lock); 1909 spin_unlock(&inode->i_lock);
1913 } 1910 }
1914} 1911}
1915 1912
@@ -1926,7 +1923,7 @@ void ceph_kick_flushing_caps(struct ceph_mds_client *mdsc,
1926 struct ceph_cap *cap; 1923 struct ceph_cap *cap;
1927 int delayed = 0; 1924 int delayed = 0;
1928 1925
1929 spin_lock(&ci->i_ceph_lock); 1926 spin_lock(&inode->i_lock);
1930 cap = ci->i_auth_cap; 1927 cap = ci->i_auth_cap;
1931 if (cap && cap->session == session) { 1928 if (cap && cap->session == session) {
1932 dout("kick_flushing_caps %p cap %p %s\n", inode, 1929 dout("kick_flushing_caps %p cap %p %s\n", inode,
@@ -1937,14 +1934,14 @@ void ceph_kick_flushing_caps(struct ceph_mds_client *mdsc,
1937 cap->issued | cap->implemented, 1934 cap->issued | cap->implemented,
1938 ci->i_flushing_caps, NULL); 1935 ci->i_flushing_caps, NULL);
1939 if (delayed) { 1936 if (delayed) {
1940 spin_lock(&ci->i_ceph_lock); 1937 spin_lock(&inode->i_lock);
1941 __cap_delay_requeue(mdsc, ci); 1938 __cap_delay_requeue(mdsc, ci);
1942 spin_unlock(&ci->i_ceph_lock); 1939 spin_unlock(&inode->i_lock);
1943 } 1940 }
1944 } else { 1941 } else {
1945 pr_err("%p auth cap %p not mds%d ???\n", inode, 1942 pr_err("%p auth cap %p not mds%d ???\n", inode,
1946 cap, session->s_mds); 1943 cap, session->s_mds);
1947 spin_unlock(&ci->i_ceph_lock); 1944 spin_unlock(&inode->i_lock);
1948 } 1945 }
1949 } 1946 }
1950} 1947}
@@ -1957,7 +1954,7 @@ static void kick_flushing_inode_caps(struct ceph_mds_client *mdsc,
1957 struct ceph_cap *cap; 1954 struct ceph_cap *cap;
1958 int delayed = 0; 1955 int delayed = 0;
1959 1956
1960 spin_lock(&ci->i_ceph_lock); 1957 spin_lock(&inode->i_lock);
1961 cap = ci->i_auth_cap; 1958 cap = ci->i_auth_cap;
1962 dout("kick_flushing_inode_caps %p flushing %s flush_seq %lld\n", inode, 1959 dout("kick_flushing_inode_caps %p flushing %s flush_seq %lld\n", inode,
1963 ceph_cap_string(ci->i_flushing_caps), ci->i_cap_flush_seq); 1960 ceph_cap_string(ci->i_flushing_caps), ci->i_cap_flush_seq);
@@ -1969,12 +1966,12 @@ static void kick_flushing_inode_caps(struct ceph_mds_client *mdsc,
1969 cap->issued | cap->implemented, 1966 cap->issued | cap->implemented,
1970 ci->i_flushing_caps, NULL); 1967 ci->i_flushing_caps, NULL);
1971 if (delayed) { 1968 if (delayed) {
1972 spin_lock(&ci->i_ceph_lock); 1969 spin_lock(&inode->i_lock);
1973 __cap_delay_requeue(mdsc, ci); 1970 __cap_delay_requeue(mdsc, ci);
1974 spin_unlock(&ci->i_ceph_lock); 1971 spin_unlock(&inode->i_lock);
1975 } 1972 }
1976 } else { 1973 } else {
1977 spin_unlock(&ci->i_ceph_lock); 1974 spin_unlock(&inode->i_lock);
1978 } 1975 }
1979} 1976}
1980 1977
@@ -1983,7 +1980,7 @@ static void kick_flushing_inode_caps(struct ceph_mds_client *mdsc,
1983 * Take references to capabilities we hold, so that we don't release 1980 * Take references to capabilities we hold, so that we don't release
1984 * them to the MDS prematurely. 1981 * them to the MDS prematurely.
1985 * 1982 *
1986 * Protected by i_ceph_lock. 1983 * Protected by i_lock.
1987 */ 1984 */
1988static void __take_cap_refs(struct ceph_inode_info *ci, int got) 1985static void __take_cap_refs(struct ceph_inode_info *ci, int got)
1989{ 1986{
@@ -2021,7 +2018,7 @@ static int try_get_cap_refs(struct ceph_inode_info *ci, int need, int want,
2021 2018
2022 dout("get_cap_refs %p need %s want %s\n", inode, 2019 dout("get_cap_refs %p need %s want %s\n", inode,
2023 ceph_cap_string(need), ceph_cap_string(want)); 2020 ceph_cap_string(need), ceph_cap_string(want));
2024 spin_lock(&ci->i_ceph_lock); 2021 spin_lock(&inode->i_lock);
2025 2022
2026 /* make sure file is actually open */ 2023 /* make sure file is actually open */
2027 file_wanted = __ceph_caps_file_wanted(ci); 2024 file_wanted = __ceph_caps_file_wanted(ci);
@@ -2082,7 +2079,7 @@ static int try_get_cap_refs(struct ceph_inode_info *ci, int need, int want,
2082 ceph_cap_string(have), ceph_cap_string(need)); 2079 ceph_cap_string(have), ceph_cap_string(need));
2083 } 2080 }
2084out: 2081out:
2085 spin_unlock(&ci->i_ceph_lock); 2082 spin_unlock(&inode->i_lock);
2086 dout("get_cap_refs %p ret %d got %s\n", inode, 2083 dout("get_cap_refs %p ret %d got %s\n", inode,
2087 ret, ceph_cap_string(*got)); 2084 ret, ceph_cap_string(*got));
2088 return ret; 2085 return ret;
@@ -2099,7 +2096,7 @@ static void check_max_size(struct inode *inode, loff_t endoff)
2099 int check = 0; 2096 int check = 0;
2100 2097
2101 /* do we need to explicitly request a larger max_size? */ 2098 /* do we need to explicitly request a larger max_size? */
2102 spin_lock(&ci->i_ceph_lock); 2099 spin_lock(&inode->i_lock);
2103 if ((endoff >= ci->i_max_size || 2100 if ((endoff >= ci->i_max_size ||
2104 endoff > (inode->i_size << 1)) && 2101 endoff > (inode->i_size << 1)) &&
2105 endoff > ci->i_wanted_max_size) { 2102 endoff > ci->i_wanted_max_size) {
@@ -2108,7 +2105,7 @@ static void check_max_size(struct inode *inode, loff_t endoff)
2108 ci->i_wanted_max_size = endoff; 2105 ci->i_wanted_max_size = endoff;
2109 check = 1; 2106 check = 1;
2110 } 2107 }
2111 spin_unlock(&ci->i_ceph_lock); 2108 spin_unlock(&inode->i_lock);
2112 if (check) 2109 if (check)
2113 ceph_check_caps(ci, CHECK_CAPS_AUTHONLY, NULL); 2110 ceph_check_caps(ci, CHECK_CAPS_AUTHONLY, NULL);
2114} 2111}
@@ -2145,9 +2142,9 @@ retry:
2145 */ 2142 */
2146void ceph_get_cap_refs(struct ceph_inode_info *ci, int caps) 2143void ceph_get_cap_refs(struct ceph_inode_info *ci, int caps)
2147{ 2144{
2148 spin_lock(&ci->i_ceph_lock); 2145 spin_lock(&ci->vfs_inode.i_lock);
2149 __take_cap_refs(ci, caps); 2146 __take_cap_refs(ci, caps);
2150 spin_unlock(&ci->i_ceph_lock); 2147 spin_unlock(&ci->vfs_inode.i_lock);
2151} 2148}
2152 2149
2153/* 2150/*
@@ -2165,7 +2162,7 @@ void ceph_put_cap_refs(struct ceph_inode_info *ci, int had)
2165 int last = 0, put = 0, flushsnaps = 0, wake = 0; 2162 int last = 0, put = 0, flushsnaps = 0, wake = 0;
2166 struct ceph_cap_snap *capsnap; 2163 struct ceph_cap_snap *capsnap;
2167 2164
2168 spin_lock(&ci->i_ceph_lock); 2165 spin_lock(&inode->i_lock);
2169 if (had & CEPH_CAP_PIN) 2166 if (had & CEPH_CAP_PIN)
2170 --ci->i_pin_ref; 2167 --ci->i_pin_ref;
2171 if (had & CEPH_CAP_FILE_RD) 2168 if (had & CEPH_CAP_FILE_RD)
@@ -2198,7 +2195,7 @@ void ceph_put_cap_refs(struct ceph_inode_info *ci, int had)
2198 } 2195 }
2199 } 2196 }
2200 } 2197 }
2201 spin_unlock(&ci->i_ceph_lock); 2198 spin_unlock(&inode->i_lock);
2202 2199
2203 dout("put_cap_refs %p had %s%s%s\n", inode, ceph_cap_string(had), 2200 dout("put_cap_refs %p had %s%s%s\n", inode, ceph_cap_string(had),
2204 last ? " last" : "", put ? " put" : ""); 2201 last ? " last" : "", put ? " put" : "");
@@ -2230,7 +2227,7 @@ void ceph_put_wrbuffer_cap_refs(struct ceph_inode_info *ci, int nr,
2230 int found = 0; 2227 int found = 0;
2231 struct ceph_cap_snap *capsnap = NULL; 2228 struct ceph_cap_snap *capsnap = NULL;
2232 2229
2233 spin_lock(&ci->i_ceph_lock); 2230 spin_lock(&inode->i_lock);
2234 ci->i_wrbuffer_ref -= nr; 2231 ci->i_wrbuffer_ref -= nr;
2235 last = !ci->i_wrbuffer_ref; 2232 last = !ci->i_wrbuffer_ref;
2236 2233
@@ -2279,7 +2276,7 @@ void ceph_put_wrbuffer_cap_refs(struct ceph_inode_info *ci, int nr,
2279 } 2276 }
2280 } 2277 }
2281 2278
2282 spin_unlock(&ci->i_ceph_lock); 2279 spin_unlock(&inode->i_lock);
2283 2280
2284 if (last) { 2281 if (last) {
2285 ceph_check_caps(ci, CHECK_CAPS_AUTHONLY, NULL); 2282 ceph_check_caps(ci, CHECK_CAPS_AUTHONLY, NULL);
@@ -2296,7 +2293,7 @@ void ceph_put_wrbuffer_cap_refs(struct ceph_inode_info *ci, int nr,
2296 * Handle a cap GRANT message from the MDS. (Note that a GRANT may 2293 * Handle a cap GRANT message from the MDS. (Note that a GRANT may
2297 * actually be a revocation if it specifies a smaller cap set.) 2294 * actually be a revocation if it specifies a smaller cap set.)
2298 * 2295 *
2299 * caller holds s_mutex and i_ceph_lock, we drop both. 2296 * caller holds s_mutex and i_lock, we drop both.
2300 * 2297 *
2301 * return value: 2298 * return value:
2302 * 0 - ok 2299 * 0 - ok
@@ -2307,7 +2304,7 @@ static void handle_cap_grant(struct inode *inode, struct ceph_mds_caps *grant,
2307 struct ceph_mds_session *session, 2304 struct ceph_mds_session *session,
2308 struct ceph_cap *cap, 2305 struct ceph_cap *cap,
2309 struct ceph_buffer *xattr_buf) 2306 struct ceph_buffer *xattr_buf)
2310 __releases(ci->i_ceph_lock) 2307 __releases(inode->i_lock)
2311{ 2308{
2312 struct ceph_inode_info *ci = ceph_inode(inode); 2309 struct ceph_inode_info *ci = ceph_inode(inode);
2313 int mds = session->s_mds; 2310 int mds = session->s_mds;
@@ -2366,7 +2363,7 @@ static void handle_cap_grant(struct inode *inode, struct ceph_mds_caps *grant,
2366 } 2363 }
2367 2364
2368 if ((issued & CEPH_CAP_LINK_EXCL) == 0) 2365 if ((issued & CEPH_CAP_LINK_EXCL) == 0)
2369 set_nlink(inode, le32_to_cpu(grant->nlink)); 2366 inode->i_nlink = le32_to_cpu(grant->nlink);
2370 2367
2371 if ((issued & CEPH_CAP_XATTR_EXCL) == 0 && grant->xattr_len) { 2368 if ((issued & CEPH_CAP_XATTR_EXCL) == 0 && grant->xattr_len) {
2372 int len = le32_to_cpu(grant->xattr_len); 2369 int len = le32_to_cpu(grant->xattr_len);
@@ -2394,7 +2391,7 @@ static void handle_cap_grant(struct inode *inode, struct ceph_mds_caps *grant,
2394 &atime); 2391 &atime);
2395 2392
2396 /* max size increase? */ 2393 /* max size increase? */
2397 if (ci->i_auth_cap == cap && max_size != ci->i_max_size) { 2394 if (max_size != ci->i_max_size) {
2398 dout("max_size %lld -> %llu\n", ci->i_max_size, max_size); 2395 dout("max_size %lld -> %llu\n", ci->i_max_size, max_size);
2399 ci->i_max_size = max_size; 2396 ci->i_max_size = max_size;
2400 if (max_size >= ci->i_wanted_max_size) { 2397 if (max_size >= ci->i_wanted_max_size) {
@@ -2458,7 +2455,7 @@ static void handle_cap_grant(struct inode *inode, struct ceph_mds_caps *grant,
2458 } 2455 }
2459 BUG_ON(cap->issued & ~cap->implemented); 2456 BUG_ON(cap->issued & ~cap->implemented);
2460 2457
2461 spin_unlock(&ci->i_ceph_lock); 2458 spin_unlock(&inode->i_lock);
2462 if (writeback) 2459 if (writeback)
2463 /* 2460 /*
2464 * queue inode for writeback: we can't actually call 2461 * queue inode for writeback: we can't actually call
@@ -2488,7 +2485,7 @@ static void handle_cap_flush_ack(struct inode *inode, u64 flush_tid,
2488 struct ceph_mds_caps *m, 2485 struct ceph_mds_caps *m,
2489 struct ceph_mds_session *session, 2486 struct ceph_mds_session *session,
2490 struct ceph_cap *cap) 2487 struct ceph_cap *cap)
2491 __releases(ci->i_ceph_lock) 2488 __releases(inode->i_lock)
2492{ 2489{
2493 struct ceph_inode_info *ci = ceph_inode(inode); 2490 struct ceph_inode_info *ci = ceph_inode(inode);
2494 struct ceph_mds_client *mdsc = ceph_sb_to_client(inode->i_sb)->mdsc; 2491 struct ceph_mds_client *mdsc = ceph_sb_to_client(inode->i_sb)->mdsc;
@@ -2544,7 +2541,7 @@ static void handle_cap_flush_ack(struct inode *inode, u64 flush_tid,
2544 wake_up_all(&ci->i_cap_wq); 2541 wake_up_all(&ci->i_cap_wq);
2545 2542
2546out: 2543out:
2547 spin_unlock(&ci->i_ceph_lock); 2544 spin_unlock(&inode->i_lock);
2548 if (drop) 2545 if (drop)
2549 iput(inode); 2546 iput(inode);
2550} 2547}
@@ -2567,7 +2564,7 @@ static void handle_cap_flushsnap_ack(struct inode *inode, u64 flush_tid,
2567 dout("handle_cap_flushsnap_ack inode %p ci %p mds%d follows %lld\n", 2564 dout("handle_cap_flushsnap_ack inode %p ci %p mds%d follows %lld\n",
2568 inode, ci, session->s_mds, follows); 2565 inode, ci, session->s_mds, follows);
2569 2566
2570 spin_lock(&ci->i_ceph_lock); 2567 spin_lock(&inode->i_lock);
2571 list_for_each_entry(capsnap, &ci->i_cap_snaps, ci_item) { 2568 list_for_each_entry(capsnap, &ci->i_cap_snaps, ci_item) {
2572 if (capsnap->follows == follows) { 2569 if (capsnap->follows == follows) {
2573 if (capsnap->flush_tid != flush_tid) { 2570 if (capsnap->flush_tid != flush_tid) {
@@ -2590,7 +2587,7 @@ static void handle_cap_flushsnap_ack(struct inode *inode, u64 flush_tid,
2590 capsnap, capsnap->follows); 2587 capsnap, capsnap->follows);
2591 } 2588 }
2592 } 2589 }
2593 spin_unlock(&ci->i_ceph_lock); 2590 spin_unlock(&inode->i_lock);
2594 if (drop) 2591 if (drop)
2595 iput(inode); 2592 iput(inode);
2596} 2593}
@@ -2603,7 +2600,7 @@ static void handle_cap_flushsnap_ack(struct inode *inode, u64 flush_tid,
2603static void handle_cap_trunc(struct inode *inode, 2600static void handle_cap_trunc(struct inode *inode,
2604 struct ceph_mds_caps *trunc, 2601 struct ceph_mds_caps *trunc,
2605 struct ceph_mds_session *session) 2602 struct ceph_mds_session *session)
2606 __releases(ci->i_ceph_lock) 2603 __releases(inode->i_lock)
2607{ 2604{
2608 struct ceph_inode_info *ci = ceph_inode(inode); 2605 struct ceph_inode_info *ci = ceph_inode(inode);
2609 int mds = session->s_mds; 2606 int mds = session->s_mds;
@@ -2622,7 +2619,7 @@ static void handle_cap_trunc(struct inode *inode,
2622 inode, mds, seq, truncate_size, truncate_seq); 2619 inode, mds, seq, truncate_size, truncate_seq);
2623 queue_trunc = ceph_fill_file_size(inode, issued, 2620 queue_trunc = ceph_fill_file_size(inode, issued,
2624 truncate_seq, truncate_size, size); 2621 truncate_seq, truncate_size, size);
2625 spin_unlock(&ci->i_ceph_lock); 2622 spin_unlock(&inode->i_lock);
2626 2623
2627 if (queue_trunc) 2624 if (queue_trunc)
2628 ceph_queue_vmtruncate(inode); 2625 ceph_queue_vmtruncate(inode);
@@ -2651,7 +2648,7 @@ static void handle_cap_export(struct inode *inode, struct ceph_mds_caps *ex,
2651 dout("handle_cap_export inode %p ci %p mds%d mseq %d\n", 2648 dout("handle_cap_export inode %p ci %p mds%d mseq %d\n",
2652 inode, ci, mds, mseq); 2649 inode, ci, mds, mseq);
2653 2650
2654 spin_lock(&ci->i_ceph_lock); 2651 spin_lock(&inode->i_lock);
2655 2652
2656 /* make sure we haven't seen a higher mseq */ 2653 /* make sure we haven't seen a higher mseq */
2657 for (p = rb_first(&ci->i_caps); p; p = rb_next(p)) { 2654 for (p = rb_first(&ci->i_caps); p; p = rb_next(p)) {
@@ -2695,7 +2692,7 @@ static void handle_cap_export(struct inode *inode, struct ceph_mds_caps *ex,
2695 } 2692 }
2696 /* else, we already released it */ 2693 /* else, we already released it */
2697 2694
2698 spin_unlock(&ci->i_ceph_lock); 2695 spin_unlock(&inode->i_lock);
2699} 2696}
2700 2697
2701/* 2698/*
@@ -2750,10 +2747,9 @@ static void handle_cap_import(struct ceph_mds_client *mdsc,
2750 up_read(&mdsc->snap_rwsem); 2747 up_read(&mdsc->snap_rwsem);
2751 2748
2752 /* make sure we re-request max_size, if necessary */ 2749 /* make sure we re-request max_size, if necessary */
2753 spin_lock(&ci->i_ceph_lock); 2750 spin_lock(&inode->i_lock);
2754 ci->i_wanted_max_size = 0; /* reset */
2755 ci->i_requested_max_size = 0; 2751 ci->i_requested_max_size = 0;
2756 spin_unlock(&ci->i_ceph_lock); 2752 spin_unlock(&inode->i_lock);
2757} 2753}
2758 2754
2759/* 2755/*
@@ -2768,7 +2764,6 @@ void ceph_handle_caps(struct ceph_mds_session *session,
2768 struct ceph_mds_client *mdsc = session->s_mdsc; 2764 struct ceph_mds_client *mdsc = session->s_mdsc;
2769 struct super_block *sb = mdsc->fsc->sb; 2765 struct super_block *sb = mdsc->fsc->sb;
2770 struct inode *inode; 2766 struct inode *inode;
2771 struct ceph_inode_info *ci;
2772 struct ceph_cap *cap; 2767 struct ceph_cap *cap;
2773 struct ceph_mds_caps *h; 2768 struct ceph_mds_caps *h;
2774 int mds = session->s_mds; 2769 int mds = session->s_mds;
@@ -2822,7 +2817,6 @@ void ceph_handle_caps(struct ceph_mds_session *session,
2822 2817
2823 /* lookup ino */ 2818 /* lookup ino */
2824 inode = ceph_find_inode(sb, vino); 2819 inode = ceph_find_inode(sb, vino);
2825 ci = ceph_inode(inode);
2826 dout(" op %s ino %llx.%llx inode %p\n", ceph_cap_op_name(op), vino.ino, 2820 dout(" op %s ino %llx.%llx inode %p\n", ceph_cap_op_name(op), vino.ino,
2827 vino.snap, inode); 2821 vino.snap, inode);
2828 if (!inode) { 2822 if (!inode) {
@@ -2847,23 +2841,24 @@ void ceph_handle_caps(struct ceph_mds_session *session,
2847 case CEPH_CAP_OP_IMPORT: 2841 case CEPH_CAP_OP_IMPORT:
2848 handle_cap_import(mdsc, inode, h, session, 2842 handle_cap_import(mdsc, inode, h, session,
2849 snaptrace, snaptrace_len); 2843 snaptrace, snaptrace_len);
2844 ceph_check_caps(ceph_inode(inode), 0, session);
2845 goto done_unlocked;
2850 } 2846 }
2851 2847
2852 /* the rest require a cap */ 2848 /* the rest require a cap */
2853 spin_lock(&ci->i_ceph_lock); 2849 spin_lock(&inode->i_lock);
2854 cap = __get_cap_for_mds(ceph_inode(inode), mds); 2850 cap = __get_cap_for_mds(ceph_inode(inode), mds);
2855 if (!cap) { 2851 if (!cap) {
2856 dout(" no cap on %p ino %llx.%llx from mds%d\n", 2852 dout(" no cap on %p ino %llx.%llx from mds%d\n",
2857 inode, ceph_ino(inode), ceph_snap(inode), mds); 2853 inode, ceph_ino(inode), ceph_snap(inode), mds);
2858 spin_unlock(&ci->i_ceph_lock); 2854 spin_unlock(&inode->i_lock);
2859 goto flush_cap_releases; 2855 goto flush_cap_releases;
2860 } 2856 }
2861 2857
2862 /* note that each of these drops i_ceph_lock for us */ 2858 /* note that each of these drops i_lock for us */
2863 switch (op) { 2859 switch (op) {
2864 case CEPH_CAP_OP_REVOKE: 2860 case CEPH_CAP_OP_REVOKE:
2865 case CEPH_CAP_OP_GRANT: 2861 case CEPH_CAP_OP_GRANT:
2866 case CEPH_CAP_OP_IMPORT:
2867 handle_cap_grant(inode, h, session, cap, msg->middle); 2862 handle_cap_grant(inode, h, session, cap, msg->middle);
2868 goto done_unlocked; 2863 goto done_unlocked;
2869 2864
@@ -2876,7 +2871,7 @@ void ceph_handle_caps(struct ceph_mds_session *session,
2876 break; 2871 break;
2877 2872
2878 default: 2873 default:
2879 spin_unlock(&ci->i_ceph_lock); 2874 spin_unlock(&inode->i_lock);
2880 pr_err("ceph_handle_caps: unknown cap op %d %s\n", op, 2875 pr_err("ceph_handle_caps: unknown cap op %d %s\n", op,
2881 ceph_cap_op_name(op)); 2876 ceph_cap_op_name(op));
2882 } 2877 }
@@ -2969,13 +2964,13 @@ void ceph_put_fmode(struct ceph_inode_info *ci, int fmode)
2969 struct inode *inode = &ci->vfs_inode; 2964 struct inode *inode = &ci->vfs_inode;
2970 int last = 0; 2965 int last = 0;
2971 2966
2972 spin_lock(&ci->i_ceph_lock); 2967 spin_lock(&inode->i_lock);
2973 dout("put_fmode %p fmode %d %d -> %d\n", inode, fmode, 2968 dout("put_fmode %p fmode %d %d -> %d\n", inode, fmode,
2974 ci->i_nr_by_mode[fmode], ci->i_nr_by_mode[fmode]-1); 2969 ci->i_nr_by_mode[fmode], ci->i_nr_by_mode[fmode]-1);
2975 BUG_ON(ci->i_nr_by_mode[fmode] == 0); 2970 BUG_ON(ci->i_nr_by_mode[fmode] == 0);
2976 if (--ci->i_nr_by_mode[fmode] == 0) 2971 if (--ci->i_nr_by_mode[fmode] == 0)
2977 last++; 2972 last++;
2978 spin_unlock(&ci->i_ceph_lock); 2973 spin_unlock(&inode->i_lock);
2979 2974
2980 if (last && ci->i_vino.snap == CEPH_NOSNAP) 2975 if (last && ci->i_vino.snap == CEPH_NOSNAP)
2981 ceph_check_caps(ci, 0, NULL); 2976 ceph_check_caps(ci, 0, NULL);
@@ -2998,7 +2993,7 @@ int ceph_encode_inode_release(void **p, struct inode *inode,
2998 int used, dirty; 2993 int used, dirty;
2999 int ret = 0; 2994 int ret = 0;
3000 2995
3001 spin_lock(&ci->i_ceph_lock); 2996 spin_lock(&inode->i_lock);
3002 used = __ceph_caps_used(ci); 2997 used = __ceph_caps_used(ci);
3003 dirty = __ceph_caps_dirty(ci); 2998 dirty = __ceph_caps_dirty(ci);
3004 2999
@@ -3053,7 +3048,7 @@ int ceph_encode_inode_release(void **p, struct inode *inode,
3053 inode, cap, ceph_cap_string(cap->issued)); 3048 inode, cap, ceph_cap_string(cap->issued));
3054 } 3049 }
3055 } 3050 }
3056 spin_unlock(&ci->i_ceph_lock); 3051 spin_unlock(&inode->i_lock);
3057 return ret; 3052 return ret;
3058} 3053}
3059 3054
@@ -3068,7 +3063,7 @@ int ceph_encode_dentry_release(void **p, struct dentry *dentry,
3068 3063
3069 /* 3064 /*
3070 * force an record for the directory caps if we have a dentry lease. 3065 * force an record for the directory caps if we have a dentry lease.
3071 * this is racy (can't take i_ceph_lock and d_lock together), but it 3066 * this is racy (can't take i_lock and d_lock together), but it
3072 * doesn't have to be perfect; the mds will revoke anything we don't 3067 * doesn't have to be perfect; the mds will revoke anything we don't
3073 * release. 3068 * release.
3074 */ 3069 */