aboutsummaryrefslogtreecommitdiffstats
path: root/fs/btrfs/extent_io.c
diff options
context:
space:
mode:
authorJosef Bacik <jbacik@fusionio.com>2012-09-14 13:43:01 -0400
committerChris Mason <chris.mason@fusionio.com>2012-10-04 09:39:59 -0400
commitb5bae2612af92fd8e7bcdcf7ce3e0259e8d341c9 (patch)
tree1a702f18f5f903f86a66f7a73264d153adac8dcf /fs/btrfs/extent_io.c
parentff44c6e36dc9dcc02652a1105b120bdf08cea9f7 (diff)
Btrfs: fix race when getting the eb out of page->private
We can race when checking wether PagePrivate is set on a page and we actually have an eb saved in the pages private pointer. We could have easily written out this page and released it in the time that we did the pagevec lookup and actually got around to looking at this page. So use mapping->private_lock to ensure we get a consistent view of the page->private pointer. This is inline with the alloc and releasepage paths which use private_lock when manipulating page->private. Thanks, Reported-by: David Sterba <dave@jikos.cz> Signed-off-by: Josef Bacik <jbacik@fusionio.com>
Diffstat (limited to 'fs/btrfs/extent_io.c')
-rw-r--r--fs/btrfs/extent_io.c23
1 files changed, 19 insertions, 4 deletions
diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c
index 90bd9f768c0a..a2c21570adf5 100644
--- a/fs/btrfs/extent_io.c
+++ b/fs/btrfs/extent_io.c
@@ -3256,19 +3256,34 @@ retry:
3256 break; 3256 break;
3257 } 3257 }
3258 3258
3259 spin_lock(&mapping->private_lock);
3260 if (!PagePrivate(page)) {
3261 spin_unlock(&mapping->private_lock);
3262 continue;
3263 }
3264
3259 eb = (struct extent_buffer *)page->private; 3265 eb = (struct extent_buffer *)page->private;
3266
3267 /*
3268 * Shouldn't happen and normally this would be a BUG_ON
3269 * but no sense in crashing the users box for something
3270 * we can survive anyway.
3271 */
3260 if (!eb) { 3272 if (!eb) {
3273 spin_unlock(&mapping->private_lock);
3261 WARN_ON(1); 3274 WARN_ON(1);
3262 continue; 3275 continue;
3263 } 3276 }
3264 3277
3265 if (eb == prev_eb) 3278 if (eb == prev_eb) {
3279 spin_unlock(&mapping->private_lock);
3266 continue; 3280 continue;
3281 }
3267 3282
3268 if (!atomic_inc_not_zero(&eb->refs)) { 3283 ret = atomic_inc_not_zero(&eb->refs);
3269 WARN_ON(1); 3284 spin_unlock(&mapping->private_lock);
3285 if (!ret)
3270 continue; 3286 continue;
3271 }
3272 3287
3273 prev_eb = eb; 3288 prev_eb = eb;
3274 ret = lock_extent_buffer_for_io(eb, fs_info, &epd); 3289 ret = lock_extent_buffer_for_io(eb, fs_info, &epd);