aboutsummaryrefslogtreecommitdiffstats
path: root/fs/ceph/inode.c
diff options
context:
space:
mode:
authorJonathan Herman <hermanjl@cs.unc.edu>2013-01-17 16:15:55 -0500
committerJonathan Herman <hermanjl@cs.unc.edu>2013-01-17 16:15:55 -0500
commit8dea78da5cee153b8af9c07a2745f6c55057fe12 (patch)
treea8f4d49d63b1ecc92f2fddceba0655b2472c5bd9 /fs/ceph/inode.c
parent406089d01562f1e2bf9f089fd7637009ebaad589 (diff)
Patched in Tegra support.
Diffstat (limited to 'fs/ceph/inode.c')
-rw-r--r--fs/ceph/inode.c165
1 files changed, 97 insertions, 68 deletions
diff --git a/fs/ceph/inode.c b/fs/ceph/inode.c
index 2971eaa65cd..095799ba9dd 100644
--- a/fs/ceph/inode.c
+++ b/fs/ceph/inode.c
@@ -9,6 +9,7 @@
9#include <linux/namei.h> 9#include <linux/namei.h>
10#include <linux/writeback.h> 10#include <linux/writeback.h>
11#include <linux/vmalloc.h> 11#include <linux/vmalloc.h>
12#include <linux/pagevec.h>
12 13
13#include "super.h" 14#include "super.h"
14#include "mds_client.h" 15#include "mds_client.h"
@@ -297,8 +298,6 @@ struct inode *ceph_alloc_inode(struct super_block *sb)
297 298
298 dout("alloc_inode %p\n", &ci->vfs_inode); 299 dout("alloc_inode %p\n", &ci->vfs_inode);
299 300
300 spin_lock_init(&ci->i_ceph_lock);
301
302 ci->i_version = 0; 301 ci->i_version = 0;
303 ci->i_time_warp_seq = 0; 302 ci->i_time_warp_seq = 0;
304 ci->i_ceph_flags = 0; 303 ci->i_ceph_flags = 0;
@@ -384,6 +383,7 @@ static void ceph_i_callback(struct rcu_head *head)
384 struct inode *inode = container_of(head, struct inode, i_rcu); 383 struct inode *inode = container_of(head, struct inode, i_rcu);
385 struct ceph_inode_info *ci = ceph_inode(inode); 384 struct ceph_inode_info *ci = ceph_inode(inode);
386 385
386 INIT_LIST_HEAD(&inode->i_dentry);
387 kmem_cache_free(ceph_inode_cachep, ci); 387 kmem_cache_free(ceph_inode_cachep, ci);
388} 388}
389 389
@@ -584,7 +584,7 @@ static int fill_inode(struct inode *inode,
584 iinfo->xattr_len); 584 iinfo->xattr_len);
585 } 585 }
586 586
587 spin_lock(&ci->i_ceph_lock); 587 spin_lock(&inode->i_lock);
588 588
589 /* 589 /*
590 * provided version will be odd if inode value is projected, 590 * provided version will be odd if inode value is projected,
@@ -619,7 +619,7 @@ static int fill_inode(struct inode *inode,
619 } 619 }
620 620
621 if ((issued & CEPH_CAP_LINK_EXCL) == 0) 621 if ((issued & CEPH_CAP_LINK_EXCL) == 0)
622 set_nlink(inode, le32_to_cpu(info->nlink)); 622 inode->i_nlink = le32_to_cpu(info->nlink);
623 623
624 /* be careful with mtime, atime, size */ 624 /* be careful with mtime, atime, size */
625 ceph_decode_timespec(&atime, &info->atime); 625 ceph_decode_timespec(&atime, &info->atime);
@@ -677,21 +677,20 @@ static int fill_inode(struct inode *inode,
677 case S_IFLNK: 677 case S_IFLNK:
678 inode->i_op = &ceph_symlink_iops; 678 inode->i_op = &ceph_symlink_iops;
679 if (!ci->i_symlink) { 679 if (!ci->i_symlink) {
680 u32 symlen = iinfo->symlink_len; 680 int symlen = iinfo->symlink_len;
681 char *sym; 681 char *sym;
682 682
683 spin_unlock(&ci->i_ceph_lock); 683 BUG_ON(symlen != inode->i_size);
684 684 spin_unlock(&inode->i_lock);
685 err = -EINVAL;
686 if (WARN_ON(symlen != inode->i_size))
687 goto out;
688 685
689 err = -ENOMEM; 686 err = -ENOMEM;
690 sym = kstrndup(iinfo->symlink, symlen, GFP_NOFS); 687 sym = kmalloc(symlen+1, GFP_NOFS);
691 if (!sym) 688 if (!sym)
692 goto out; 689 goto out;
690 memcpy(sym, iinfo->symlink, symlen);
691 sym[symlen] = 0;
693 692
694 spin_lock(&ci->i_ceph_lock); 693 spin_lock(&inode->i_lock);
695 if (!ci->i_symlink) 694 if (!ci->i_symlink)
696 ci->i_symlink = sym; 695 ci->i_symlink = sym;
697 else 696 else
@@ -717,7 +716,7 @@ static int fill_inode(struct inode *inode,
717 } 716 }
718 717
719no_change: 718no_change:
720 spin_unlock(&ci->i_ceph_lock); 719 spin_unlock(&inode->i_lock);
721 720
722 /* queue truncate if we saw i_size decrease */ 721 /* queue truncate if we saw i_size decrease */
723 if (queue_trunc) 722 if (queue_trunc)
@@ -752,13 +751,13 @@ no_change:
752 info->cap.flags, 751 info->cap.flags,
753 caps_reservation); 752 caps_reservation);
754 } else { 753 } else {
755 spin_lock(&ci->i_ceph_lock); 754 spin_lock(&inode->i_lock);
756 dout(" %p got snap_caps %s\n", inode, 755 dout(" %p got snap_caps %s\n", inode,
757 ceph_cap_string(le32_to_cpu(info->cap.caps))); 756 ceph_cap_string(le32_to_cpu(info->cap.caps)));
758 ci->i_snap_caps |= le32_to_cpu(info->cap.caps); 757 ci->i_snap_caps |= le32_to_cpu(info->cap.caps);
759 if (cap_fmode >= 0) 758 if (cap_fmode >= 0)
760 __ceph_get_fmode(ci, cap_fmode); 759 __ceph_get_fmode(ci, cap_fmode);
761 spin_unlock(&ci->i_ceph_lock); 760 spin_unlock(&inode->i_lock);
762 } 761 }
763 } else if (cap_fmode >= 0) { 762 } else if (cap_fmode >= 0) {
764 pr_warning("mds issued no caps on %llx.%llx\n", 763 pr_warning("mds issued no caps on %llx.%llx\n",
@@ -773,9 +772,9 @@ no_change:
773 ceph_snap(inode) == CEPH_NOSNAP && 772 ceph_snap(inode) == CEPH_NOSNAP &&
774 (le32_to_cpu(info->cap.caps) & CEPH_CAP_FILE_SHARED) && 773 (le32_to_cpu(info->cap.caps) & CEPH_CAP_FILE_SHARED) &&
775 (issued & CEPH_CAP_FILE_EXCL) == 0 && 774 (issued & CEPH_CAP_FILE_EXCL) == 0 &&
776 !ceph_dir_test_complete(inode)) { 775 (ci->i_ceph_flags & CEPH_I_COMPLETE) == 0) {
777 dout(" marking %p complete (empty)\n", inode); 776 dout(" marking %p complete (empty)\n", inode);
778 ceph_dir_set_complete(inode); 777 /* ci->i_ceph_flags |= CEPH_I_COMPLETE; */
779 ci->i_max_offset = 2; 778 ci->i_max_offset = 2;
780 } 779 }
781 780
@@ -851,21 +850,19 @@ static void ceph_set_dentry_offset(struct dentry *dn)
851{ 850{
852 struct dentry *dir = dn->d_parent; 851 struct dentry *dir = dn->d_parent;
853 struct inode *inode = dir->d_inode; 852 struct inode *inode = dir->d_inode;
854 struct ceph_inode_info *ci;
855 struct ceph_dentry_info *di; 853 struct ceph_dentry_info *di;
856 854
857 BUG_ON(!inode); 855 BUG_ON(!inode);
858 856
859 ci = ceph_inode(inode);
860 di = ceph_dentry(dn); 857 di = ceph_dentry(dn);
861 858
862 spin_lock(&ci->i_ceph_lock); 859 spin_lock(&inode->i_lock);
863 if (!ceph_dir_test_complete(inode)) { 860 if ((ceph_inode(inode)->i_ceph_flags & CEPH_I_COMPLETE) == 0) {
864 spin_unlock(&ci->i_ceph_lock); 861 spin_unlock(&inode->i_lock);
865 return; 862 return;
866 } 863 }
867 di->offset = ceph_inode(inode)->i_max_offset++; 864 di->offset = ceph_inode(inode)->i_max_offset++;
868 spin_unlock(&ci->i_ceph_lock); 865 spin_unlock(&inode->i_lock);
869 866
870 spin_lock(&dir->d_lock); 867 spin_lock(&dir->d_lock);
871 spin_lock_nested(&dn->d_lock, DENTRY_D_LOCK_NESTED); 868 spin_lock_nested(&dn->d_lock, DENTRY_D_LOCK_NESTED);
@@ -992,15 +989,11 @@ int ceph_fill_trace(struct super_block *sb, struct ceph_mds_request *req,
992 if (rinfo->head->is_dentry) { 989 if (rinfo->head->is_dentry) {
993 struct inode *dir = req->r_locked_dir; 990 struct inode *dir = req->r_locked_dir;
994 991
995 if (dir) { 992 err = fill_inode(dir, &rinfo->diri, rinfo->dirfrag,
996 err = fill_inode(dir, &rinfo->diri, rinfo->dirfrag, 993 session, req->r_request_started, -1,
997 session, req->r_request_started, -1, 994 &req->r_caps_reservation);
998 &req->r_caps_reservation); 995 if (err < 0)
999 if (err < 0) 996 return err;
1000 return err;
1001 } else {
1002 WARN_ON_ONCE(1);
1003 }
1004 } 997 }
1005 998
1006 /* 999 /*
@@ -1008,7 +1001,6 @@ int ceph_fill_trace(struct super_block *sb, struct ceph_mds_request *req,
1008 * will have trouble splicing in the virtual snapdir later 1001 * will have trouble splicing in the virtual snapdir later
1009 */ 1002 */
1010 if (rinfo->head->is_dentry && !req->r_aborted && 1003 if (rinfo->head->is_dentry && !req->r_aborted &&
1011 req->r_locked_dir &&
1012 (rinfo->head->is_target || strncmp(req->r_dentry->d_name.name, 1004 (rinfo->head->is_target || strncmp(req->r_dentry->d_name.name,
1013 fsc->mount_options->snapdir_name, 1005 fsc->mount_options->snapdir_name,
1014 req->r_dentry->d_name.len))) { 1006 req->r_dentry->d_name.len))) {
@@ -1065,7 +1057,7 @@ int ceph_fill_trace(struct super_block *sb, struct ceph_mds_request *req,
1065 * d_move() puts the renamed dentry at the end of 1057 * d_move() puts the renamed dentry at the end of
1066 * d_subdirs. We need to assign it an appropriate 1058 * d_subdirs. We need to assign it an appropriate
1067 * directory offset so we can behave when holding 1059 * directory offset so we can behave when holding
1068 * D_COMPLETE. 1060 * I_COMPLETE.
1069 */ 1061 */
1070 ceph_set_dentry_offset(req->r_old_dentry); 1062 ceph_set_dentry_offset(req->r_old_dentry);
1071 dout("dn %p gets new offset %lld\n", req->r_old_dentry, 1063 dout("dn %p gets new offset %lld\n", req->r_old_dentry,
@@ -1104,7 +1096,7 @@ int ceph_fill_trace(struct super_block *sb, struct ceph_mds_request *req,
1104 pr_err("fill_trace bad get_inode " 1096 pr_err("fill_trace bad get_inode "
1105 "%llx.%llx\n", vino.ino, vino.snap); 1097 "%llx.%llx\n", vino.ino, vino.snap);
1106 err = PTR_ERR(in); 1098 err = PTR_ERR(in);
1107 d_drop(dn); 1099 d_delete(dn);
1108 goto done; 1100 goto done;
1109 } 1101 }
1110 dn = splice_dentry(dn, in, &have_lease, true); 1102 dn = splice_dentry(dn, in, &have_lease, true);
@@ -1277,7 +1269,7 @@ retry_lookup:
1277 in = ceph_get_inode(parent->d_sb, vino); 1269 in = ceph_get_inode(parent->d_sb, vino);
1278 if (IS_ERR(in)) { 1270 if (IS_ERR(in)) {
1279 dout("new_inode badness\n"); 1271 dout("new_inode badness\n");
1280 d_drop(dn); 1272 d_delete(dn);
1281 dput(dn); 1273 dput(dn);
1282 err = PTR_ERR(in); 1274 err = PTR_ERR(in);
1283 goto out; 1275 goto out;
@@ -1317,7 +1309,7 @@ int ceph_inode_set_size(struct inode *inode, loff_t size)
1317 struct ceph_inode_info *ci = ceph_inode(inode); 1309 struct ceph_inode_info *ci = ceph_inode(inode);
1318 int ret = 0; 1310 int ret = 0;
1319 1311
1320 spin_lock(&ci->i_ceph_lock); 1312 spin_lock(&inode->i_lock);
1321 dout("set_size %p %llu -> %llu\n", inode, inode->i_size, size); 1313 dout("set_size %p %llu -> %llu\n", inode, inode->i_size, size);
1322 inode->i_size = size; 1314 inode->i_size = size;
1323 inode->i_blocks = (size + (1 << 9) - 1) >> 9; 1315 inode->i_blocks = (size + (1 << 9) - 1) >> 9;
@@ -1327,7 +1319,7 @@ int ceph_inode_set_size(struct inode *inode, loff_t size)
1327 (ci->i_reported_size << 1) < ci->i_max_size) 1319 (ci->i_reported_size << 1) < ci->i_max_size)
1328 ret = 1; 1320 ret = 1;
1329 1321
1330 spin_unlock(&ci->i_ceph_lock); 1322 spin_unlock(&inode->i_lock);
1331 return ret; 1323 return ret;
1332} 1324}
1333 1325
@@ -1337,13 +1329,12 @@ int ceph_inode_set_size(struct inode *inode, loff_t size)
1337 */ 1329 */
1338void ceph_queue_writeback(struct inode *inode) 1330void ceph_queue_writeback(struct inode *inode)
1339{ 1331{
1340 ihold(inode);
1341 if (queue_work(ceph_inode_to_client(inode)->wb_wq, 1332 if (queue_work(ceph_inode_to_client(inode)->wb_wq,
1342 &ceph_inode(inode)->i_wb_work)) { 1333 &ceph_inode(inode)->i_wb_work)) {
1343 dout("ceph_queue_writeback %p\n", inode); 1334 dout("ceph_queue_writeback %p\n", inode);
1335 ihold(inode);
1344 } else { 1336 } else {
1345 dout("ceph_queue_writeback %p failed\n", inode); 1337 dout("ceph_queue_writeback %p failed\n", inode);
1346 iput(inode);
1347 } 1338 }
1348} 1339}
1349 1340
@@ -1363,13 +1354,55 @@ static void ceph_writeback_work(struct work_struct *work)
1363 */ 1354 */
1364void ceph_queue_invalidate(struct inode *inode) 1355void ceph_queue_invalidate(struct inode *inode)
1365{ 1356{
1366 ihold(inode);
1367 if (queue_work(ceph_inode_to_client(inode)->pg_inv_wq, 1357 if (queue_work(ceph_inode_to_client(inode)->pg_inv_wq,
1368 &ceph_inode(inode)->i_pg_inv_work)) { 1358 &ceph_inode(inode)->i_pg_inv_work)) {
1369 dout("ceph_queue_invalidate %p\n", inode); 1359 dout("ceph_queue_invalidate %p\n", inode);
1360 ihold(inode);
1370 } else { 1361 } else {
1371 dout("ceph_queue_invalidate %p failed\n", inode); 1362 dout("ceph_queue_invalidate %p failed\n", inode);
1372 iput(inode); 1363 }
1364}
1365
1366/*
1367 * invalidate any pages that are not dirty or under writeback. this
1368 * includes pages that are clean and mapped.
1369 */
1370static void ceph_invalidate_nondirty_pages(struct address_space *mapping)
1371{
1372 struct pagevec pvec;
1373 pgoff_t next = 0;
1374 int i;
1375
1376 pagevec_init(&pvec, 0);
1377 while (pagevec_lookup(&pvec, mapping, next, PAGEVEC_SIZE)) {
1378 for (i = 0; i < pagevec_count(&pvec); i++) {
1379 struct page *page = pvec.pages[i];
1380 pgoff_t index;
1381 int skip_page =
1382 (PageDirty(page) || PageWriteback(page));
1383
1384 if (!skip_page)
1385 skip_page = !trylock_page(page);
1386
1387 /*
1388 * We really shouldn't be looking at the ->index of an
1389 * unlocked page. But we're not allowed to lock these
1390 * pages. So we rely upon nobody altering the ->index
1391 * of this (pinned-by-us) page.
1392 */
1393 index = page->index;
1394 if (index > next)
1395 next = index;
1396 next++;
1397
1398 if (skip_page)
1399 continue;
1400
1401 generic_error_remove_page(mapping, page);
1402 unlock_page(page);
1403 }
1404 pagevec_release(&pvec);
1405 cond_resched();
1373 } 1406 }
1374} 1407}
1375 1408
@@ -1385,20 +1418,20 @@ static void ceph_invalidate_work(struct work_struct *work)
1385 u32 orig_gen; 1418 u32 orig_gen;
1386 int check = 0; 1419 int check = 0;
1387 1420
1388 spin_lock(&ci->i_ceph_lock); 1421 spin_lock(&inode->i_lock);
1389 dout("invalidate_pages %p gen %d revoking %d\n", inode, 1422 dout("invalidate_pages %p gen %d revoking %d\n", inode,
1390 ci->i_rdcache_gen, ci->i_rdcache_revoking); 1423 ci->i_rdcache_gen, ci->i_rdcache_revoking);
1391 if (ci->i_rdcache_revoking != ci->i_rdcache_gen) { 1424 if (ci->i_rdcache_revoking != ci->i_rdcache_gen) {
1392 /* nevermind! */ 1425 /* nevermind! */
1393 spin_unlock(&ci->i_ceph_lock); 1426 spin_unlock(&inode->i_lock);
1394 goto out; 1427 goto out;
1395 } 1428 }
1396 orig_gen = ci->i_rdcache_gen; 1429 orig_gen = ci->i_rdcache_gen;
1397 spin_unlock(&ci->i_ceph_lock); 1430 spin_unlock(&inode->i_lock);
1398 1431
1399 truncate_inode_pages(&inode->i_data, 0); 1432 ceph_invalidate_nondirty_pages(inode->i_mapping);
1400 1433
1401 spin_lock(&ci->i_ceph_lock); 1434 spin_lock(&inode->i_lock);
1402 if (orig_gen == ci->i_rdcache_gen && 1435 if (orig_gen == ci->i_rdcache_gen &&
1403 orig_gen == ci->i_rdcache_revoking) { 1436 orig_gen == ci->i_rdcache_revoking) {
1404 dout("invalidate_pages %p gen %d successful\n", inode, 1437 dout("invalidate_pages %p gen %d successful\n", inode,
@@ -1410,7 +1443,7 @@ static void ceph_invalidate_work(struct work_struct *work)
1410 inode, orig_gen, ci->i_rdcache_gen, 1443 inode, orig_gen, ci->i_rdcache_gen,
1411 ci->i_rdcache_revoking); 1444 ci->i_rdcache_revoking);
1412 } 1445 }
1413 spin_unlock(&ci->i_ceph_lock); 1446 spin_unlock(&inode->i_lock);
1414 1447
1415 if (check) 1448 if (check)
1416 ceph_check_caps(ci, 0, NULL); 1449 ceph_check_caps(ci, 0, NULL);
@@ -1445,14 +1478,13 @@ void ceph_queue_vmtruncate(struct inode *inode)
1445{ 1478{
1446 struct ceph_inode_info *ci = ceph_inode(inode); 1479 struct ceph_inode_info *ci = ceph_inode(inode);
1447 1480
1448 ihold(inode);
1449 if (queue_work(ceph_sb_to_client(inode->i_sb)->trunc_wq, 1481 if (queue_work(ceph_sb_to_client(inode->i_sb)->trunc_wq,
1450 &ci->i_vmtruncate_work)) { 1482 &ci->i_vmtruncate_work)) {
1451 dout("ceph_queue_vmtruncate %p\n", inode); 1483 dout("ceph_queue_vmtruncate %p\n", inode);
1484 ihold(inode);
1452 } else { 1485 } else {
1453 dout("ceph_queue_vmtruncate %p failed, pending=%d\n", 1486 dout("ceph_queue_vmtruncate %p failed, pending=%d\n",
1454 inode, ci->i_truncate_pending); 1487 inode, ci->i_truncate_pending);
1455 iput(inode);
1456 } 1488 }
1457} 1489}
1458 1490
@@ -1466,13 +1498,13 @@ void __ceph_do_pending_vmtruncate(struct inode *inode)
1466{ 1498{
1467 struct ceph_inode_info *ci = ceph_inode(inode); 1499 struct ceph_inode_info *ci = ceph_inode(inode);
1468 u64 to; 1500 u64 to;
1469 int wrbuffer_refs, finish = 0; 1501 int wrbuffer_refs, wake = 0;
1470 1502
1471retry: 1503retry:
1472 spin_lock(&ci->i_ceph_lock); 1504 spin_lock(&inode->i_lock);
1473 if (ci->i_truncate_pending == 0) { 1505 if (ci->i_truncate_pending == 0) {
1474 dout("__do_pending_vmtruncate %p none pending\n", inode); 1506 dout("__do_pending_vmtruncate %p none pending\n", inode);
1475 spin_unlock(&ci->i_ceph_lock); 1507 spin_unlock(&inode->i_lock);
1476 return; 1508 return;
1477 } 1509 }
1478 1510
@@ -1483,7 +1515,7 @@ retry:
1483 if (ci->i_wrbuffer_ref_head < ci->i_wrbuffer_ref) { 1515 if (ci->i_wrbuffer_ref_head < ci->i_wrbuffer_ref) {
1484 dout("__do_pending_vmtruncate %p flushing snaps first\n", 1516 dout("__do_pending_vmtruncate %p flushing snaps first\n",
1485 inode); 1517 inode);
1486 spin_unlock(&ci->i_ceph_lock); 1518 spin_unlock(&inode->i_lock);
1487 filemap_write_and_wait_range(&inode->i_data, 0, 1519 filemap_write_and_wait_range(&inode->i_data, 0,
1488 inode->i_sb->s_maxbytes); 1520 inode->i_sb->s_maxbytes);
1489 goto retry; 1521 goto retry;
@@ -1493,23 +1525,20 @@ retry:
1493 wrbuffer_refs = ci->i_wrbuffer_ref; 1525 wrbuffer_refs = ci->i_wrbuffer_ref;
1494 dout("__do_pending_vmtruncate %p (%d) to %lld\n", inode, 1526 dout("__do_pending_vmtruncate %p (%d) to %lld\n", inode,
1495 ci->i_truncate_pending, to); 1527 ci->i_truncate_pending, to);
1496 spin_unlock(&ci->i_ceph_lock); 1528 spin_unlock(&inode->i_lock);
1497 1529
1498 truncate_inode_pages(inode->i_mapping, to); 1530 truncate_inode_pages(inode->i_mapping, to);
1499 1531
1500 spin_lock(&ci->i_ceph_lock); 1532 spin_lock(&inode->i_lock);
1501 if (to == ci->i_truncate_size) { 1533 ci->i_truncate_pending--;
1502 ci->i_truncate_pending = 0; 1534 if (ci->i_truncate_pending == 0)
1503 finish = 1; 1535 wake = 1;
1504 } 1536 spin_unlock(&inode->i_lock);
1505 spin_unlock(&ci->i_ceph_lock);
1506 if (!finish)
1507 goto retry;
1508 1537
1509 if (wrbuffer_refs == 0) 1538 if (wrbuffer_refs == 0)
1510 ceph_check_caps(ci, CHECK_CAPS_AUTHONLY, NULL); 1539 ceph_check_caps(ci, CHECK_CAPS_AUTHONLY, NULL);
1511 1540 if (wake)
1512 wake_up_all(&ci->i_cap_wq); 1541 wake_up_all(&ci->i_cap_wq);
1513} 1542}
1514 1543
1515 1544
@@ -1559,7 +1588,7 @@ int ceph_setattr(struct dentry *dentry, struct iattr *attr)
1559 if (IS_ERR(req)) 1588 if (IS_ERR(req))
1560 return PTR_ERR(req); 1589 return PTR_ERR(req);
1561 1590
1562 spin_lock(&ci->i_ceph_lock); 1591 spin_lock(&inode->i_lock);
1563 issued = __ceph_caps_issued(ci, NULL); 1592 issued = __ceph_caps_issued(ci, NULL);
1564 dout("setattr %p issued %s\n", inode, ceph_cap_string(issued)); 1593 dout("setattr %p issued %s\n", inode, ceph_cap_string(issued));
1565 1594
@@ -1707,7 +1736,7 @@ int ceph_setattr(struct dentry *dentry, struct iattr *attr)
1707 } 1736 }
1708 1737
1709 release &= issued; 1738 release &= issued;
1710 spin_unlock(&ci->i_ceph_lock); 1739 spin_unlock(&inode->i_lock);
1711 1740
1712 if (inode_dirty_flags) 1741 if (inode_dirty_flags)
1713 __mark_inode_dirty(inode, inode_dirty_flags); 1742 __mark_inode_dirty(inode, inode_dirty_flags);
@@ -1729,7 +1758,7 @@ int ceph_setattr(struct dentry *dentry, struct iattr *attr)
1729 __ceph_do_pending_vmtruncate(inode); 1758 __ceph_do_pending_vmtruncate(inode);
1730 return err; 1759 return err;
1731out: 1760out:
1732 spin_unlock(&ci->i_ceph_lock); 1761 spin_unlock(&inode->i_lock);
1733 ceph_mdsc_put_request(req); 1762 ceph_mdsc_put_request(req);
1734 return err; 1763 return err;
1735} 1764}