aboutsummaryrefslogtreecommitdiffstats
path: root/fs/ceph/snap.c
diff options
context:
space:
mode:
authorSage Weil <sage@newdream.net>2011-07-26 14:26:31 -0400
committerSage Weil <sage@newdream.net>2011-07-26 14:26:31 -0400
commitaf0ed569d7019f1b49e9e51e77b47092e656b00e (patch)
treea8b0ad426b22548eec9e2d350c131426e13af989 /fs/ceph/snap.c
parent9cfa1098dcfb34f71c5f3b7bcdbbb435a0cecab2 (diff)
ceph: fix snap writeback when racing with writes
There are two problems that come up when we try to queue a capsnap while a write is in progress: - The FILE_WR cap is held, but not yet dirty, so we may queue a capsnap with dirty == 0. That will crash later in __ceph_flush_snaps(). Or on the FILE_WR cap if a write is in progress. - We may not have i_head_snapc set, which causes problems pretty quickly. Look to the snaprealm in this case. Reviewed-by: Yehuda Sadeh <yehuda@hq.newdream.net> Signed-off-by: Sage Weil <sage@newdream.net>
Diffstat (limited to 'fs/ceph/snap.c')
-rw-r--r--fs/ceph/snap.c23
1 files changed, 20 insertions, 3 deletions
diff --git a/fs/ceph/snap.c b/fs/ceph/snap.c
index 54b14de2e729..ac030c9d9959 100644
--- a/fs/ceph/snap.c
+++ b/fs/ceph/snap.c
@@ -449,6 +449,15 @@ void ceph_queue_cap_snap(struct ceph_inode_info *ci)
449 spin_lock(&inode->i_lock); 449 spin_lock(&inode->i_lock);
450 used = __ceph_caps_used(ci); 450 used = __ceph_caps_used(ci);
451 dirty = __ceph_caps_dirty(ci); 451 dirty = __ceph_caps_dirty(ci);
452
453 /*
454 * If there is a write in progress, treat that as a dirty Fw,
455 * even though it hasn't completed yet; by the time we finish
456 * up this capsnap it will be.
457 */
458 if (used & CEPH_CAP_FILE_WR)
459 dirty |= CEPH_CAP_FILE_WR;
460
452 if (__ceph_have_pending_cap_snap(ci)) { 461 if (__ceph_have_pending_cap_snap(ci)) {
453 /* there is no point in queuing multiple "pending" cap_snaps, 462 /* there is no point in queuing multiple "pending" cap_snaps,
454 as no new writes are allowed to start when pending, so any 463 as no new writes are allowed to start when pending, so any
@@ -456,14 +465,22 @@ void ceph_queue_cap_snap(struct ceph_inode_info *ci)
456 cap_snap. lucky us. */ 465 cap_snap. lucky us. */
457 dout("queue_cap_snap %p already pending\n", inode); 466 dout("queue_cap_snap %p already pending\n", inode);
458 kfree(capsnap); 467 kfree(capsnap);
459 } else if (ci->i_wrbuffer_ref_head || (used & CEPH_CAP_FILE_WR) || 468 } else if (ci->i_wrbuffer_ref_head ||
460 (dirty & (CEPH_CAP_AUTH_EXCL|CEPH_CAP_XATTR_EXCL| 469 (dirty & (CEPH_CAP_AUTH_EXCL|CEPH_CAP_XATTR_EXCL|
461 CEPH_CAP_FILE_EXCL|CEPH_CAP_FILE_WR))) { 470 CEPH_CAP_FILE_EXCL|CEPH_CAP_FILE_WR))) {
462 struct ceph_snap_context *snapc = ci->i_head_snapc; 471 struct ceph_snap_context *snapc = ci->i_head_snapc;
463 472
464 dout("queue_cap_snap %p cap_snap %p queuing under %p\n", inode, 473 /*
465 capsnap, snapc); 474 * if we are a sync write, we may need to go to the snaprealm
475 * to get the current snapc.
476 */
477 if (!snapc)
478 snapc = ci->i_snap_realm->cached_context;
479
480 dout("queue_cap_snap %p cap_snap %p queuing under %p %s\n",
481 inode, capsnap, snapc, ceph_cap_string(dirty));
466 ihold(inode); 482 ihold(inode);
483 BUG_ON(dirty == 0);
467 484
468 atomic_set(&capsnap->nref, 1); 485 atomic_set(&capsnap->nref, 1);
469 capsnap->ci = ci; 486 capsnap->ci = ci;