aboutsummaryrefslogtreecommitdiffstats
path: root/fs/btrfs/extent_io.c
diff options
context:
space:
mode:
authorJeff Mahoney <jeffm@suse.com>2012-03-01 08:57:19 -0500
committerDavid Sterba <dsterba@suse.cz>2012-03-21 20:45:35 -0400
commitd0082371cf086e0ba2bbd0367b2c9920532df24f (patch)
tree4ae3fb6565f36fd606ab6e4b83d489e66b3b86d7 /fs/btrfs/extent_io.c
parent143bede527b054a271053f41bfaca2b57baa9408 (diff)
btrfs: drop gfp_t from lock_extent
lock_extent and unlock_extent are always called with GFP_NOFS, drop the argument and use GFP_NOFS consistently. Signed-off-by: Jeff Mahoney <jeffm@suse.com>
Diffstat (limited to 'fs/btrfs/extent_io.c')
-rw-r--r--fs/btrfs/extent_io.c41
1 files changed, 19 insertions, 22 deletions
diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c
index 65216fabacbb..0112c02742f4 100644
--- a/fs/btrfs/extent_io.c
+++ b/fs/btrfs/extent_io.c
@@ -1171,42 +1171,40 @@ static int clear_extent_uptodate(struct extent_io_tree *tree, u64 start,
1171 * us if waiting is desired. 1171 * us if waiting is desired.
1172 */ 1172 */
1173int lock_extent_bits(struct extent_io_tree *tree, u64 start, u64 end, 1173int lock_extent_bits(struct extent_io_tree *tree, u64 start, u64 end,
1174 int bits, struct extent_state **cached_state, gfp_t mask) 1174 int bits, struct extent_state **cached_state)
1175{ 1175{
1176 int err; 1176 int err;
1177 u64 failed_start; 1177 u64 failed_start;
1178 while (1) { 1178 while (1) {
1179 err = set_extent_bit(tree, start, end, EXTENT_LOCKED | bits, 1179 err = set_extent_bit(tree, start, end, EXTENT_LOCKED | bits,
1180 EXTENT_LOCKED, &failed_start, 1180 EXTENT_LOCKED, &failed_start,
1181 cached_state, mask); 1181 cached_state, GFP_NOFS);
1182 if (err == -EEXIST && (mask & __GFP_WAIT)) { 1182 if (err == -EEXIST) {
1183 wait_extent_bit(tree, failed_start, end, EXTENT_LOCKED); 1183 wait_extent_bit(tree, failed_start, end, EXTENT_LOCKED);
1184 start = failed_start; 1184 start = failed_start;
1185 } else { 1185 } else
1186 break; 1186 break;
1187 }
1188 WARN_ON(start > end); 1187 WARN_ON(start > end);
1189 } 1188 }
1190 return err; 1189 return err;
1191} 1190}
1192 1191
1193int lock_extent(struct extent_io_tree *tree, u64 start, u64 end, gfp_t mask) 1192int lock_extent(struct extent_io_tree *tree, u64 start, u64 end)
1194{ 1193{
1195 return lock_extent_bits(tree, start, end, 0, NULL, mask); 1194 return lock_extent_bits(tree, start, end, 0, NULL);
1196} 1195}
1197 1196
1198int try_lock_extent(struct extent_io_tree *tree, u64 start, u64 end, 1197int try_lock_extent(struct extent_io_tree *tree, u64 start, u64 end)
1199 gfp_t mask)
1200{ 1198{
1201 int err; 1199 int err;
1202 u64 failed_start; 1200 u64 failed_start;
1203 1201
1204 err = set_extent_bit(tree, start, end, EXTENT_LOCKED, EXTENT_LOCKED, 1202 err = set_extent_bit(tree, start, end, EXTENT_LOCKED, EXTENT_LOCKED,
1205 &failed_start, NULL, mask); 1203 &failed_start, NULL, GFP_NOFS);
1206 if (err == -EEXIST) { 1204 if (err == -EEXIST) {
1207 if (failed_start > start) 1205 if (failed_start > start)
1208 clear_extent_bit(tree, start, failed_start - 1, 1206 clear_extent_bit(tree, start, failed_start - 1,
1209 EXTENT_LOCKED, 1, 0, NULL, mask); 1207 EXTENT_LOCKED, 1, 0, NULL, GFP_NOFS);
1210 return 0; 1208 return 0;
1211 } 1209 }
1212 return 1; 1210 return 1;
@@ -1219,10 +1217,10 @@ int unlock_extent_cached(struct extent_io_tree *tree, u64 start, u64 end,
1219 mask); 1217 mask);
1220} 1218}
1221 1219
1222int unlock_extent(struct extent_io_tree *tree, u64 start, u64 end, gfp_t mask) 1220int unlock_extent(struct extent_io_tree *tree, u64 start, u64 end)
1223{ 1221{
1224 return clear_extent_bit(tree, start, end, EXTENT_LOCKED, 1, 0, NULL, 1222 return clear_extent_bit(tree, start, end, EXTENT_LOCKED, 1, 0, NULL,
1225 mask); 1223 GFP_NOFS);
1226} 1224}
1227 1225
1228/* 1226/*
@@ -1518,8 +1516,7 @@ again:
1518 BUG_ON(ret); 1516 BUG_ON(ret);
1519 1517
1520 /* step three, lock the state bits for the whole range */ 1518 /* step three, lock the state bits for the whole range */
1521 lock_extent_bits(tree, delalloc_start, delalloc_end, 1519 lock_extent_bits(tree, delalloc_start, delalloc_end, 0, &cached_state);
1522 0, &cached_state, GFP_NOFS);
1523 1520
1524 /* then test to make sure it is all still delalloc */ 1521 /* then test to make sure it is all still delalloc */
1525 ret = test_range_bit(tree, delalloc_start, delalloc_end, 1522 ret = test_range_bit(tree, delalloc_start, delalloc_end,
@@ -2557,11 +2554,11 @@ static int __extent_read_full_page(struct extent_io_tree *tree,
2557 2554
2558 end = page_end; 2555 end = page_end;
2559 while (1) { 2556 while (1) {
2560 lock_extent(tree, start, end, GFP_NOFS); 2557 lock_extent(tree, start, end);
2561 ordered = btrfs_lookup_ordered_extent(inode, start); 2558 ordered = btrfs_lookup_ordered_extent(inode, start);
2562 if (!ordered) 2559 if (!ordered)
2563 break; 2560 break;
2564 unlock_extent(tree, start, end, GFP_NOFS); 2561 unlock_extent(tree, start, end);
2565 btrfs_start_ordered_extent(inode, ordered, 1); 2562 btrfs_start_ordered_extent(inode, ordered, 1);
2566 btrfs_put_ordered_extent(ordered); 2563 btrfs_put_ordered_extent(ordered);
2567 } 2564 }
@@ -2598,7 +2595,7 @@ static int __extent_read_full_page(struct extent_io_tree *tree,
2598 end - cur + 1, 0); 2595 end - cur + 1, 0);
2599 if (IS_ERR_OR_NULL(em)) { 2596 if (IS_ERR_OR_NULL(em)) {
2600 SetPageError(page); 2597 SetPageError(page);
2601 unlock_extent(tree, cur, end, GFP_NOFS); 2598 unlock_extent(tree, cur, end);
2602 break; 2599 break;
2603 } 2600 }
2604 extent_offset = cur - em->start; 2601 extent_offset = cur - em->start;
@@ -2650,7 +2647,7 @@ static int __extent_read_full_page(struct extent_io_tree *tree,
2650 if (test_range_bit(tree, cur, cur_end, 2647 if (test_range_bit(tree, cur, cur_end,
2651 EXTENT_UPTODATE, 1, NULL)) { 2648 EXTENT_UPTODATE, 1, NULL)) {
2652 check_page_uptodate(tree, page); 2649 check_page_uptodate(tree, page);
2653 unlock_extent(tree, cur, cur + iosize - 1, GFP_NOFS); 2650 unlock_extent(tree, cur, cur + iosize - 1);
2654 cur = cur + iosize; 2651 cur = cur + iosize;
2655 pg_offset += iosize; 2652 pg_offset += iosize;
2656 continue; 2653 continue;
@@ -2660,7 +2657,7 @@ static int __extent_read_full_page(struct extent_io_tree *tree,
2660 */ 2657 */
2661 if (block_start == EXTENT_MAP_INLINE) { 2658 if (block_start == EXTENT_MAP_INLINE) {
2662 SetPageError(page); 2659 SetPageError(page);
2663 unlock_extent(tree, cur, cur + iosize - 1, GFP_NOFS); 2660 unlock_extent(tree, cur, cur + iosize - 1);
2664 cur = cur + iosize; 2661 cur = cur + iosize;
2665 pg_offset += iosize; 2662 pg_offset += iosize;
2666 continue; 2663 continue;
@@ -3274,7 +3271,7 @@ int extent_invalidatepage(struct extent_io_tree *tree,
3274 if (start > end) 3271 if (start > end)
3275 return 0; 3272 return 0;
3276 3273
3277 lock_extent_bits(tree, start, end, 0, &cached_state, GFP_NOFS); 3274 lock_extent_bits(tree, start, end, 0, &cached_state);
3278 wait_on_page_writeback(page); 3275 wait_on_page_writeback(page);
3279 clear_extent_bit(tree, start, end, 3276 clear_extent_bit(tree, start, end,
3280 EXTENT_LOCKED | EXTENT_DIRTY | EXTENT_DELALLOC | 3277 EXTENT_LOCKED | EXTENT_DIRTY | EXTENT_DELALLOC |
@@ -3488,7 +3485,7 @@ int extent_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
3488 } 3485 }
3489 3486
3490 lock_extent_bits(&BTRFS_I(inode)->io_tree, start, start + len, 0, 3487 lock_extent_bits(&BTRFS_I(inode)->io_tree, start, start + len, 0,
3491 &cached_state, GFP_NOFS); 3488 &cached_state);
3492 3489
3493 em = get_extent_skip_holes(inode, start, last_for_get_extent, 3490 em = get_extent_skip_holes(inode, start, last_for_get_extent,
3494 get_extent); 3491 get_extent);