aboutsummaryrefslogtreecommitdiffstats
path: root/fs
diff options
context:
space:
mode:
authorChris Mason <chris.mason@oracle.com>2009-04-27 07:29:04 -0400
committerChris Mason <chris.mason@oracle.com>2009-04-27 07:40:52 -0400
commitb7967db75a38df4891b22efe1b0969b9357eb946 (patch)
tree63ce1e5394446d65111fbc768e44845c295c1049 /fs
parentd6397baee468809ef311e763dfc6e9f73418f8a6 (diff)
Btrfs: remove #if 0 code
Btrfs had some old code sitting around under #if 0, this drops it. Signed-off-by: Chris Mason <chris.mason@oracle.com>
Diffstat (limited to 'fs')
-rw-r--r--fs/btrfs/disk-io.c48
-rw-r--r--fs/btrfs/extent_io.c63
-rw-r--r--fs/btrfs/file.c78
3 files changed, 1 insertions, 188 deletions
diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c
index 44c94d808e2b..77f9a3b824be 100644
--- a/fs/btrfs/disk-io.c
+++ b/fs/btrfs/disk-io.c
@@ -584,18 +584,7 @@ int btrfs_wq_submit_bio(struct btrfs_fs_info *fs_info, struct inode *inode,
584 btrfs_set_work_high_prio(&async->work); 584 btrfs_set_work_high_prio(&async->work);
585 585
586 btrfs_queue_worker(&fs_info->workers, &async->work); 586 btrfs_queue_worker(&fs_info->workers, &async->work);
587#if 0
588 int limit = btrfs_async_submit_limit(fs_info);
589 if (atomic_read(&fs_info->nr_async_submits) > limit) {
590 wait_event_timeout(fs_info->async_submit_wait,
591 (atomic_read(&fs_info->nr_async_submits) < limit),
592 HZ/10);
593 587
594 wait_event_timeout(fs_info->async_submit_wait,
595 (atomic_read(&fs_info->nr_async_bios) < limit),
596 HZ/10);
597 }
598#endif
599 while (atomic_read(&fs_info->async_submit_draining) && 588 while (atomic_read(&fs_info->async_submit_draining) &&
600 atomic_read(&fs_info->nr_async_submits)) { 589 atomic_read(&fs_info->nr_async_submits)) {
601 wait_event(fs_info->async_submit_wait, 590 wait_event(fs_info->async_submit_wait,
@@ -770,27 +759,6 @@ static void btree_invalidatepage(struct page *page, unsigned long offset)
770 } 759 }
771} 760}
772 761
773#if 0
774static int btree_writepage(struct page *page, struct writeback_control *wbc)
775{
776 struct buffer_head *bh;
777 struct btrfs_root *root = BTRFS_I(page->mapping->host)->root;
778 struct buffer_head *head;
779 if (!page_has_buffers(page)) {
780 create_empty_buffers(page, root->fs_info->sb->s_blocksize,
781 (1 << BH_Dirty)|(1 << BH_Uptodate));
782 }
783 head = page_buffers(page);
784 bh = head;
785 do {
786 if (buffer_dirty(bh))
787 csum_tree_block(root, bh, 0);
788 bh = bh->b_this_page;
789 } while (bh != head);
790 return block_write_full_page(page, btree_get_block, wbc);
791}
792#endif
793
794static struct address_space_operations btree_aops = { 762static struct address_space_operations btree_aops = {
795 .readpage = btree_readpage, 763 .readpage = btree_readpage,
796 .writepage = btree_writepage, 764 .writepage = btree_writepage,
@@ -1278,11 +1246,7 @@ static int btrfs_congested_fn(void *congested_data, int bdi_bits)
1278 int ret = 0; 1246 int ret = 0;
1279 struct btrfs_device *device; 1247 struct btrfs_device *device;
1280 struct backing_dev_info *bdi; 1248 struct backing_dev_info *bdi;
1281#if 0 1249
1282 if ((bdi_bits & (1 << BDI_write_congested)) &&
1283 btrfs_congested_async(info, 0))
1284 return 1;
1285#endif
1286 list_for_each_entry(device, &info->fs_devices->devices, dev_list) { 1250 list_for_each_entry(device, &info->fs_devices->devices, dev_list) {
1287 if (!device->bdev) 1251 if (!device->bdev)
1288 continue; 1252 continue;
@@ -2334,16 +2298,6 @@ int close_ctree(struct btrfs_root *root)
2334 btrfs_stop_workers(&fs_info->endio_write_workers); 2298 btrfs_stop_workers(&fs_info->endio_write_workers);
2335 btrfs_stop_workers(&fs_info->submit_workers); 2299 btrfs_stop_workers(&fs_info->submit_workers);
2336 2300
2337#if 0
2338 while (!list_empty(&fs_info->hashers)) {
2339 struct btrfs_hasher *hasher;
2340 hasher = list_entry(fs_info->hashers.next, struct btrfs_hasher,
2341 hashers);
2342 list_del(&hasher->hashers);
2343 crypto_free_hash(&fs_info->hash_tfm);
2344 kfree(hasher);
2345 }
2346#endif
2347 btrfs_close_devices(fs_info->fs_devices); 2301 btrfs_close_devices(fs_info->fs_devices);
2348 btrfs_mapping_tree_free(&fs_info->mapping_tree); 2302 btrfs_mapping_tree_free(&fs_info->mapping_tree);
2349 2303
diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c
index c33b54029d78..fe9eb990e443 100644
--- a/fs/btrfs/extent_io.c
+++ b/fs/btrfs/extent_io.c
@@ -1401,69 +1401,6 @@ out:
1401 return total_bytes; 1401 return total_bytes;
1402} 1402}
1403 1403
1404#if 0
1405/*
1406 * helper function to lock both pages and extents in the tree.
1407 * pages must be locked first.
1408 */
1409static int lock_range(struct extent_io_tree *tree, u64 start, u64 end)
1410{
1411 unsigned long index = start >> PAGE_CACHE_SHIFT;
1412 unsigned long end_index = end >> PAGE_CACHE_SHIFT;
1413 struct page *page;
1414 int err;
1415
1416 while (index <= end_index) {
1417 page = grab_cache_page(tree->mapping, index);
1418 if (!page) {
1419 err = -ENOMEM;
1420 goto failed;
1421 }
1422 if (IS_ERR(page)) {
1423 err = PTR_ERR(page);
1424 goto failed;
1425 }
1426 index++;
1427 }
1428 lock_extent(tree, start, end, GFP_NOFS);
1429 return 0;
1430
1431failed:
1432 /*
1433 * we failed above in getting the page at 'index', so we undo here
1434 * up to but not including the page at 'index'
1435 */
1436 end_index = index;
1437 index = start >> PAGE_CACHE_SHIFT;
1438 while (index < end_index) {
1439 page = find_get_page(tree->mapping, index);
1440 unlock_page(page);
1441 page_cache_release(page);
1442 index++;
1443 }
1444 return err;
1445}
1446
1447/*
1448 * helper function to unlock both pages and extents in the tree.
1449 */
1450static int unlock_range(struct extent_io_tree *tree, u64 start, u64 end)
1451{
1452 unsigned long index = start >> PAGE_CACHE_SHIFT;
1453 unsigned long end_index = end >> PAGE_CACHE_SHIFT;
1454 struct page *page;
1455
1456 while (index <= end_index) {
1457 page = find_get_page(tree->mapping, index);
1458 unlock_page(page);
1459 page_cache_release(page);
1460 index++;
1461 }
1462 unlock_extent(tree, start, end, GFP_NOFS);
1463 return 0;
1464}
1465#endif
1466
1467/* 1404/*
1468 * set the private field for a given byte offset in the tree. If there isn't 1405 * set the private field for a given byte offset in the tree. If there isn't
1469 * an extent_state there already, this does nothing. 1406 * an extent_state there already, this does nothing.
diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c
index da3ed965c956..1d51dc38bb49 100644
--- a/fs/btrfs/file.c
+++ b/fs/btrfs/file.c
@@ -272,83 +272,6 @@ int btrfs_drop_extent_cache(struct inode *inode, u64 start, u64 end,
272 return 0; 272 return 0;
273} 273}
274 274
275int btrfs_check_file(struct btrfs_root *root, struct inode *inode)
276{
277 return 0;
278#if 0
279 struct btrfs_path *path;
280 struct btrfs_key found_key;
281 struct extent_buffer *leaf;
282 struct btrfs_file_extent_item *extent;
283 u64 last_offset = 0;
284 int nritems;
285 int slot;
286 int found_type;
287 int ret;
288 int err = 0;
289 u64 extent_end = 0;
290
291 path = btrfs_alloc_path();
292 ret = btrfs_lookup_file_extent(NULL, root, path, inode->i_ino,
293 last_offset, 0);
294 while (1) {
295 nritems = btrfs_header_nritems(path->nodes[0]);
296 if (path->slots[0] >= nritems) {
297 ret = btrfs_next_leaf(root, path);
298 if (ret)
299 goto out;
300 nritems = btrfs_header_nritems(path->nodes[0]);
301 }
302 slot = path->slots[0];
303 leaf = path->nodes[0];
304 btrfs_item_key_to_cpu(leaf, &found_key, slot);
305 if (found_key.objectid != inode->i_ino)
306 break;
307 if (found_key.type != BTRFS_EXTENT_DATA_KEY)
308 goto out;
309
310 if (found_key.offset < last_offset) {
311 WARN_ON(1);
312 btrfs_print_leaf(root, leaf);
313 printk(KERN_ERR "inode %lu found offset %llu "
314 "expected %llu\n", inode->i_ino,
315 (unsigned long long)found_key.offset,
316 (unsigned long long)last_offset);
317 err = 1;
318 goto out;
319 }
320 extent = btrfs_item_ptr(leaf, slot,
321 struct btrfs_file_extent_item);
322 found_type = btrfs_file_extent_type(leaf, extent);
323 if (found_type == BTRFS_FILE_EXTENT_REG) {
324 extent_end = found_key.offset +
325 btrfs_file_extent_num_bytes(leaf, extent);
326 } else if (found_type == BTRFS_FILE_EXTENT_INLINE) {
327 struct btrfs_item *item;
328 item = btrfs_item_nr(leaf, slot);
329 extent_end = found_key.offset +
330 btrfs_file_extent_inline_len(leaf, extent);
331 extent_end = (extent_end + root->sectorsize - 1) &
332 ~((u64)root->sectorsize - 1);
333 }
334 last_offset = extent_end;
335 path->slots[0]++;
336 }
337 if (0 && last_offset < inode->i_size) {
338 WARN_ON(1);
339 btrfs_print_leaf(root, leaf);
340 printk(KERN_ERR "inode %lu found offset %llu size %llu\n",
341 inode->i_ino, (unsigned long long)last_offset,
342 (unsigned long long)inode->i_size);
343 err = 1;
344
345 }
346out:
347 btrfs_free_path(path);
348 return err;
349#endif
350}
351
352/* 275/*
353 * this is very complex, but the basic idea is to drop all extents 276 * this is very complex, but the basic idea is to drop all extents
354 * in the range start - end. hint_block is filled in with a block number 277 * in the range start - end. hint_block is filled in with a block number
@@ -689,7 +612,6 @@ out:
689 unlock_extent(&BTRFS_I(inode)->io_tree, orig_locked_end, 612 unlock_extent(&BTRFS_I(inode)->io_tree, orig_locked_end,
690 locked_end - 1, GFP_NOFS); 613 locked_end - 1, GFP_NOFS);
691 } 614 }
692 btrfs_check_file(root, inode);
693 return ret; 615 return ret;
694} 616}
695 617