aboutsummaryrefslogtreecommitdiffstats
path: root/fs
diff options
context:
space:
mode:
Diffstat (limited to 'fs')
-rw-r--r--fs/btrfs/async-thread.c6
-rw-r--r--fs/btrfs/compat.h40
-rw-r--r--fs/btrfs/crc32c.h6
-rw-r--r--fs/btrfs/ctree.h7
-rw-r--r--fs/btrfs/disk-io.c28
-rw-r--r--fs/btrfs/export.c6
-rw-r--r--fs/btrfs/extent_io.c66
-rw-r--r--fs/btrfs/file.c24
-rw-r--r--fs/btrfs/inode.c135
-rw-r--r--fs/btrfs/super.c7
-rw-r--r--fs/btrfs/sysfs.c33
-rw-r--r--fs/btrfs/volumes.c20
12 files changed, 10 insertions, 368 deletions
diff --git a/fs/btrfs/async-thread.c b/fs/btrfs/async-thread.c
index 2ee301740195..4e780b279de6 100644
--- a/fs/btrfs/async-thread.c
+++ b/fs/btrfs/async-thread.c
@@ -20,13 +20,7 @@
20#include <linux/kthread.h> 20#include <linux/kthread.h>
21#include <linux/list.h> 21#include <linux/list.h>
22#include <linux/spinlock.h> 22#include <linux/spinlock.h>
23
24#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20)
25# include <linux/freezer.h> 23# include <linux/freezer.h>
26#else
27# include <linux/sched.h>
28#endif
29
30#include "async-thread.h" 24#include "async-thread.h"
31 25
32/* 26/*
diff --git a/fs/btrfs/compat.h b/fs/btrfs/compat.h
index b0ed1887d9b1..cd6598b169df 100644
--- a/fs/btrfs/compat.h
+++ b/fs/btrfs/compat.h
@@ -1,9 +1,8 @@
1#ifndef _COMPAT_H_ 1#ifndef _COMPAT_H_
2#define _COMPAT_H_ 2#define _COMPAT_H_
3 3
4#if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,26) 4#define btrfs_drop_nlink(inode) drop_nlink(inode)
5#define trylock_page(page) (!TestSetPageLocked(page)) 5#define btrfs_inc_nlink(inode) inc_nlink(inode)
6#endif
7 6
8#if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,27) 7#if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,27)
9static inline struct dentry *d_obtain_alias(struct inode *inode) 8static inline struct dentry *d_obtain_alias(struct inode *inode)
@@ -22,39 +21,4 @@ static inline struct dentry *d_obtain_alias(struct inode *inode)
22} 21}
23#endif 22#endif
24 23
25#if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,18)
26static inline void btrfs_drop_nlink(struct inode *inode)
27{
28 inode->i_nlink--;
29}
30
31static inline void btrfs_inc_nlink(struct inode *inode)
32{
33 inode->i_nlink++;
34}
35#else
36# define btrfs_drop_nlink(inode) drop_nlink(inode)
37# define btrfs_inc_nlink(inode) inc_nlink(inode)
38#endif
39
40/*
41 * Even if AppArmor isn't enabled, it still has different prototypes.
42 * Add more distro/version pairs here to declare which has AppArmor applied.
43 */
44#if defined(CONFIG_SUSE_KERNEL)
45# if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22)
46# define REMOVE_SUID_PATH 1
47# endif
48#endif
49
50/*
51 * catch any other distros that have patched in apparmor. This isn't
52 * 100% reliable because it won't catch people that hand compile their
53 * own distro kernels without apparmor compiled in. But, it is better
54 * than nothing.
55 */
56#ifdef CONFIG_SECURITY_APPARMOR
57# define REMOVE_SUID_PATH 1
58#endif
59
60#endif /* _COMPAT_H_ */ 24#endif /* _COMPAT_H_ */
diff --git a/fs/btrfs/crc32c.h b/fs/btrfs/crc32c.h
index bf6c12e85730..4f0fefed132a 100644
--- a/fs/btrfs/crc32c.h
+++ b/fs/btrfs/crc32c.h
@@ -96,13 +96,7 @@ static inline u32 __btrfs_crc32c(u32 crc, unsigned char const *address,
96 * We must workaround older implementations of crc32c_le() 96 * We must workaround older implementations of crc32c_le()
97 * found on older kernel versions. 97 * found on older kernel versions.
98 */ 98 */
99#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,23)
100#define btrfs_crc32c(seed, data, length) \
101 __cpu_to_le32( __btrfs_crc32c( __le32_to_cpu(seed), \
102 (unsigned char const *)data, length) )
103#else
104#define btrfs_crc32c(seed, data, length) \ 99#define btrfs_crc32c(seed, data, length) \
105 __btrfs_crc32c(seed, (unsigned char const *)data, length) 100 __btrfs_crc32c(seed, (unsigned char const *)data, length)
106#endif 101#endif
107#endif
108 102
diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h
index 138c157bbc45..3b3c1ca50c5d 100644
--- a/fs/btrfs/ctree.h
+++ b/fs/btrfs/ctree.h
@@ -1472,12 +1472,9 @@ static inline u32 btrfs_level_size(struct btrfs_root *root, int level) {
1472 ((unsigned long)(btrfs_leaf_data(leaf) + \ 1472 ((unsigned long)(btrfs_leaf_data(leaf) + \
1473 btrfs_item_offset_nr(leaf, slot))) 1473 btrfs_item_offset_nr(leaf, slot)))
1474 1474
1475static inline struct dentry *fdentry(struct file *file) { 1475static inline struct dentry *fdentry(struct file *file)
1476#if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,18) 1476{
1477 return file->f_dentry;
1478#else
1479 return file->f_path.dentry; 1477 return file->f_path.dentry;
1480#endif
1481} 1478}
1482 1479
1483/* extent-tree.c */ 1480/* extent-tree.c */
diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c
index d35ca6a3f513..dffb8dabd533 100644
--- a/fs/btrfs/disk-io.c
+++ b/fs/btrfs/disk-io.c
@@ -26,11 +26,7 @@
26#include <linux/buffer_head.h> // for block_sync_page 26#include <linux/buffer_head.h> // for block_sync_page
27#include <linux/workqueue.h> 27#include <linux/workqueue.h>
28#include <linux/kthread.h> 28#include <linux/kthread.h>
29#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20)
30# include <linux/freezer.h> 29# include <linux/freezer.h>
31#else
32# include <linux/sched.h>
33#endif
34#include "crc32c.h" 30#include "crc32c.h"
35#include "ctree.h" 31#include "ctree.h"
36#include "disk-io.h" 32#include "disk-io.h"
@@ -373,21 +369,11 @@ out:
373 return ret; 369 return ret;
374} 370}
375 371
376#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,23)
377static void end_workqueue_bio(struct bio *bio, int err) 372static void end_workqueue_bio(struct bio *bio, int err)
378#else
379static int end_workqueue_bio(struct bio *bio,
380 unsigned int bytes_done, int err)
381#endif
382{ 373{
383 struct end_io_wq *end_io_wq = bio->bi_private; 374 struct end_io_wq *end_io_wq = bio->bi_private;
384 struct btrfs_fs_info *fs_info; 375 struct btrfs_fs_info *fs_info;
385 376
386#if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,23)
387 if (bio->bi_size)
388 return 1;
389#endif
390
391 fs_info = end_io_wq->info; 377 fs_info = end_io_wq->info;
392 end_io_wq->error = err; 378 end_io_wq->error = err;
393 end_io_wq->work.func = end_workqueue_fn; 379 end_io_wq->work.func = end_workqueue_fn;
@@ -397,10 +383,6 @@ static int end_workqueue_bio(struct bio *bio,
397 &end_io_wq->work); 383 &end_io_wq->work);
398 else 384 else
399 btrfs_queue_worker(&fs_info->endio_workers, &end_io_wq->work); 385 btrfs_queue_worker(&fs_info->endio_workers, &end_io_wq->work);
400
401#if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,23)
402 return 0;
403#endif
404} 386}
405 387
406int btrfs_bio_wq_end_io(struct btrfs_fs_info *info, struct bio *bio, 388int btrfs_bio_wq_end_io(struct btrfs_fs_info *info, struct bio *bio,
@@ -1161,9 +1143,7 @@ void btrfs_unplug_io_fn(struct backing_dev_info *bdi, struct page *page)
1161 1143
1162static int setup_bdi(struct btrfs_fs_info *info, struct backing_dev_info *bdi) 1144static int setup_bdi(struct btrfs_fs_info *info, struct backing_dev_info *bdi)
1163{ 1145{
1164#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,23)
1165 bdi_init(bdi); 1146 bdi_init(bdi);
1166#endif
1167 bdi->ra_pages = default_backing_dev_info.ra_pages; 1147 bdi->ra_pages = default_backing_dev_info.ra_pages;
1168 bdi->state = 0; 1148 bdi->state = 0;
1169 bdi->capabilities = default_backing_dev_info.capabilities; 1149 bdi->capabilities = default_backing_dev_info.capabilities;
@@ -1242,11 +1222,7 @@ static void end_workqueue_fn(struct btrfs_work *work)
1242 bio->bi_private = end_io_wq->private; 1222 bio->bi_private = end_io_wq->private;
1243 bio->bi_end_io = end_io_wq->end_io; 1223 bio->bi_end_io = end_io_wq->end_io;
1244 kfree(end_io_wq); 1224 kfree(end_io_wq);
1245#if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,23)
1246 bio_endio(bio, bio->bi_size, error);
1247#else
1248 bio_endio(bio, error); 1225 bio_endio(bio, error);
1249#endif
1250} 1226}
1251 1227
1252static int cleaner_kthread(void *arg) 1228static int cleaner_kthread(void *arg)
@@ -1673,9 +1649,7 @@ fail:
1673 1649
1674 kfree(extent_root); 1650 kfree(extent_root);
1675 kfree(tree_root); 1651 kfree(tree_root);
1676#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,23)
1677 bdi_destroy(&fs_info->bdi); 1652 bdi_destroy(&fs_info->bdi);
1678#endif
1679 kfree(fs_info); 1653 kfree(fs_info);
1680 return ERR_PTR(err); 1654 return ERR_PTR(err);
1681} 1655}
@@ -1936,9 +1910,7 @@ int close_ctree(struct btrfs_root *root)
1936 btrfs_close_devices(fs_info->fs_devices); 1910 btrfs_close_devices(fs_info->fs_devices);
1937 btrfs_mapping_tree_free(&fs_info->mapping_tree); 1911 btrfs_mapping_tree_free(&fs_info->mapping_tree);
1938 1912
1939#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,23)
1940 bdi_destroy(&fs_info->bdi); 1913 bdi_destroy(&fs_info->bdi);
1941#endif
1942 1914
1943 kfree(fs_info->extent_root); 1915 kfree(fs_info->extent_root);
1944 kfree(fs_info->tree_root); 1916 kfree(fs_info->tree_root);
diff --git a/fs/btrfs/export.c b/fs/btrfs/export.c
index 2b357a6d2407..48b82cd7583c 100644
--- a/fs/btrfs/export.c
+++ b/fs/btrfs/export.c
@@ -7,12 +7,6 @@
7#include "export.h" 7#include "export.h"
8#include "compat.h" 8#include "compat.h"
9 9
10#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,28)
11#define FILEID_BTRFS_WITHOUT_PARENT 0x4d
12#define FILEID_BTRFS_WITH_PARENT 0x4e
13#define FILEID_BTRFS_WITH_PARENT_ROOT 0x4f
14#endif
15
16#define BTRFS_FID_SIZE_NON_CONNECTABLE (offsetof(struct btrfs_fid, parent_objectid)/4) 10#define BTRFS_FID_SIZE_NON_CONNECTABLE (offsetof(struct btrfs_fid, parent_objectid)/4)
17#define BTRFS_FID_SIZE_CONNECTABLE (offsetof(struct btrfs_fid, parent_root_objectid)/4) 11#define BTRFS_FID_SIZE_CONNECTABLE (offsetof(struct btrfs_fid, parent_root_objectid)/4)
18#define BTRFS_FID_SIZE_CONNECTABLE_ROOT (sizeof(struct btrfs_fid)/4) 12#define BTRFS_FID_SIZE_CONNECTABLE_ROOT (sizeof(struct btrfs_fid)/4)
diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c
index 58ad25838a41..e3a25be5c663 100644
--- a/fs/btrfs/extent_io.c
+++ b/fs/btrfs/extent_io.c
@@ -1397,12 +1397,7 @@ static int check_page_writeback(struct extent_io_tree *tree,
1397 * Scheduling is not allowed, so the extent state tree is expected 1397 * Scheduling is not allowed, so the extent state tree is expected
1398 * to have one and only one object corresponding to this IO. 1398 * to have one and only one object corresponding to this IO.
1399 */ 1399 */
1400#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,23)
1401static void end_bio_extent_writepage(struct bio *bio, int err) 1400static void end_bio_extent_writepage(struct bio *bio, int err)
1402#else
1403static int end_bio_extent_writepage(struct bio *bio,
1404 unsigned int bytes_done, int err)
1405#endif
1406{ 1401{
1407 int uptodate = err == 0; 1402 int uptodate = err == 0;
1408 struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1; 1403 struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1;
@@ -1412,10 +1407,6 @@ static int end_bio_extent_writepage(struct bio *bio,
1412 int whole_page; 1407 int whole_page;
1413 int ret; 1408 int ret;
1414 1409
1415#if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,23)
1416 if (bio->bi_size)
1417 return 1;
1418#endif
1419 do { 1410 do {
1420 struct page *page = bvec->bv_page; 1411 struct page *page = bvec->bv_page;
1421 tree = &BTRFS_I(page->mapping->host)->io_tree; 1412 tree = &BTRFS_I(page->mapping->host)->io_tree;
@@ -1461,10 +1452,8 @@ static int end_bio_extent_writepage(struct bio *bio,
1461 else 1452 else
1462 check_page_writeback(tree, page); 1453 check_page_writeback(tree, page);
1463 } while (bvec >= bio->bi_io_vec); 1454 } while (bvec >= bio->bi_io_vec);
1455
1464 bio_put(bio); 1456 bio_put(bio);
1465#if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,23)
1466 return 0;
1467#endif
1468} 1457}
1469 1458
1470/* 1459/*
@@ -1478,12 +1467,7 @@ static int end_bio_extent_writepage(struct bio *bio,
1478 * Scheduling is not allowed, so the extent state tree is expected 1467 * Scheduling is not allowed, so the extent state tree is expected
1479 * to have one and only one object corresponding to this IO. 1468 * to have one and only one object corresponding to this IO.
1480 */ 1469 */
1481#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,23)
1482static void end_bio_extent_readpage(struct bio *bio, int err) 1470static void end_bio_extent_readpage(struct bio *bio, int err)
1483#else
1484static int end_bio_extent_readpage(struct bio *bio,
1485 unsigned int bytes_done, int err)
1486#endif
1487{ 1471{
1488 int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags); 1472 int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
1489 struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1; 1473 struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1;
@@ -1493,11 +1477,6 @@ static int end_bio_extent_readpage(struct bio *bio,
1493 int whole_page; 1477 int whole_page;
1494 int ret; 1478 int ret;
1495 1479
1496#if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,23)
1497 if (bio->bi_size)
1498 return 1;
1499#endif
1500
1501 do { 1480 do {
1502 struct page *page = bvec->bv_page; 1481 struct page *page = bvec->bv_page;
1503 tree = &BTRFS_I(page->mapping->host)->io_tree; 1482 tree = &BTRFS_I(page->mapping->host)->io_tree;
@@ -1556,9 +1535,6 @@ static int end_bio_extent_readpage(struct bio *bio,
1556 } while (bvec >= bio->bi_io_vec); 1535 } while (bvec >= bio->bi_io_vec);
1557 1536
1558 bio_put(bio); 1537 bio_put(bio);
1559#if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,23)
1560 return 0;
1561#endif
1562} 1538}
1563 1539
1564/* 1540/*
@@ -1566,12 +1542,7 @@ static int end_bio_extent_readpage(struct bio *bio,
1566 * the structs in the extent tree when done, and set the uptodate bits 1542 * the structs in the extent tree when done, and set the uptodate bits
1567 * as appropriate. 1543 * as appropriate.
1568 */ 1544 */
1569#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,23)
1570static void end_bio_extent_preparewrite(struct bio *bio, int err) 1545static void end_bio_extent_preparewrite(struct bio *bio, int err)
1571#else
1572static int end_bio_extent_preparewrite(struct bio *bio,
1573 unsigned int bytes_done, int err)
1574#endif
1575{ 1546{
1576 const int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags); 1547 const int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
1577 struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1; 1548 struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1;
@@ -1579,11 +1550,6 @@ static int end_bio_extent_preparewrite(struct bio *bio,
1579 u64 start; 1550 u64 start;
1580 u64 end; 1551 u64 end;
1581 1552
1582#if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,23)
1583 if (bio->bi_size)
1584 return 1;
1585#endif
1586
1587 do { 1553 do {
1588 struct page *page = bvec->bv_page; 1554 struct page *page = bvec->bv_page;
1589 tree = &BTRFS_I(page->mapping->host)->io_tree; 1555 tree = &BTRFS_I(page->mapping->host)->io_tree;
@@ -1607,9 +1573,6 @@ static int end_bio_extent_preparewrite(struct bio *bio,
1607 } while (bvec >= bio->bi_io_vec); 1573 } while (bvec >= bio->bi_io_vec);
1608 1574
1609 bio_put(bio); 1575 bio_put(bio);
1610#if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,23)
1611 return 0;
1612#endif
1613} 1576}
1614 1577
1615static struct bio * 1578static struct bio *
@@ -2079,12 +2042,6 @@ done:
2079 return 0; 2042 return 0;
2080} 2043}
2081 2044
2082#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,22)
2083/* Taken directly from 2.6.23 with a mod for a lockpage hook */
2084typedef int (*writepage_t)(struct page *page, struct writeback_control *wbc,
2085 void *data);
2086#endif
2087
2088/** 2045/**
2089 * write_cache_pages - walk the list of dirty pages of the given address space and write all of them. 2046 * write_cache_pages - walk the list of dirty pages of the given address space and write all of them.
2090 * @mapping: address space structure to write 2047 * @mapping: address space structure to write
@@ -2201,10 +2158,9 @@ retry:
2201 } 2158 }
2202 if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0)) 2159 if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0))
2203 mapping->writeback_index = index; 2160 mapping->writeback_index = index;
2204#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,26) 2161
2205 if (wbc->range_cont) 2162 if (wbc->range_cont)
2206 wbc->range_start = index << PAGE_CACHE_SHIFT; 2163 wbc->range_start = index << PAGE_CACHE_SHIFT;
2207#endif
2208 return ret; 2164 return ret;
2209} 2165}
2210EXPORT_SYMBOL(extent_write_cache_pages); 2166EXPORT_SYMBOL(extent_write_cache_pages);
@@ -2560,18 +2516,10 @@ static inline struct page *extent_buffer_page(struct extent_buffer *eb,
2560 * by increasing the reference count. So we know the page must 2516 * by increasing the reference count. So we know the page must
2561 * be in the radix tree. 2517 * be in the radix tree.
2562 */ 2518 */
2563#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,26)
2564 rcu_read_lock(); 2519 rcu_read_lock();
2565#else
2566 read_lock_irq(&mapping->tree_lock);
2567#endif
2568 p = radix_tree_lookup(&mapping->page_tree, i); 2520 p = radix_tree_lookup(&mapping->page_tree, i);
2569
2570#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,26)
2571 rcu_read_unlock(); 2521 rcu_read_unlock();
2572#else 2522
2573 read_unlock_irq(&mapping->tree_lock);
2574#endif
2575 return p; 2523 return p;
2576} 2524}
2577 2525
@@ -2773,21 +2721,13 @@ int clear_extent_buffer_dirty(struct extent_io_tree *tree,
2773 } 2721 }
2774 } 2722 }
2775 clear_page_dirty_for_io(page); 2723 clear_page_dirty_for_io(page);
2776#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,26)
2777 spin_lock_irq(&page->mapping->tree_lock); 2724 spin_lock_irq(&page->mapping->tree_lock);
2778#else
2779 read_lock_irq(&page->mapping->tree_lock);
2780#endif
2781 if (!PageDirty(page)) { 2725 if (!PageDirty(page)) {
2782 radix_tree_tag_clear(&page->mapping->page_tree, 2726 radix_tree_tag_clear(&page->mapping->page_tree,
2783 page_index(page), 2727 page_index(page),
2784 PAGECACHE_TAG_DIRTY); 2728 PAGECACHE_TAG_DIRTY);
2785 } 2729 }
2786#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,26)
2787 spin_unlock_irq(&page->mapping->tree_lock); 2730 spin_unlock_irq(&page->mapping->tree_lock);
2788#else
2789 read_unlock_irq(&page->mapping->tree_lock);
2790#endif
2791 unlock_page(page); 2731 unlock_page(page);
2792 } 2732 }
2793 return 0; 2733 return 0;
diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c
index 48a702d41c8c..8856570a0ebd 100644
--- a/fs/btrfs/file.c
+++ b/fs/btrfs/file.c
@@ -871,15 +871,8 @@ static ssize_t btrfs_file_write(struct file *file, const char __user *buf,
871 goto out_nolock; 871 goto out_nolock;
872 if (count == 0) 872 if (count == 0)
873 goto out_nolock; 873 goto out_nolock;
874#ifdef REMOVE_SUID_PATH 874
875 err = remove_suid(&file->f_path);
876#else
877# if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,26)
878 err = file_remove_suid(file); 875 err = file_remove_suid(file);
879# else
880 err = remove_suid(fdentry(file));
881# endif
882#endif
883 if (err) 876 if (err)
884 goto out_nolock; 877 goto out_nolock;
885 file_update_time(file); 878 file_update_time(file);
@@ -1003,17 +996,10 @@ out_nolock:
1003 btrfs_commit_transaction(trans, root); 996 btrfs_commit_transaction(trans, root);
1004 } 997 }
1005 } else if (num_written > 0 && (file->f_flags & O_DIRECT)) { 998 } else if (num_written > 0 && (file->f_flags & O_DIRECT)) {
1006#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,22)
1007 do_sync_file_range(file, start_pos,
1008 start_pos + num_written - 1,
1009 SYNC_FILE_RANGE_WRITE |
1010 SYNC_FILE_RANGE_WAIT_AFTER);
1011#else
1012 do_sync_mapping_range(inode->i_mapping, start_pos, 999 do_sync_mapping_range(inode->i_mapping, start_pos,
1013 start_pos + num_written - 1, 1000 start_pos + num_written - 1,
1014 SYNC_FILE_RANGE_WRITE | 1001 SYNC_FILE_RANGE_WRITE |
1015 SYNC_FILE_RANGE_WAIT_AFTER); 1002 SYNC_FILE_RANGE_WAIT_AFTER);
1016#endif
1017 invalidate_mapping_pages(inode->i_mapping, 1003 invalidate_mapping_pages(inode->i_mapping,
1018 start_pos >> PAGE_CACHE_SHIFT, 1004 start_pos >> PAGE_CACHE_SHIFT,
1019 (start_pos + num_written - 1) >> PAGE_CACHE_SHIFT); 1005 (start_pos + num_written - 1) >> PAGE_CACHE_SHIFT);
@@ -1097,12 +1083,7 @@ out:
1097} 1083}
1098 1084
1099static struct vm_operations_struct btrfs_file_vm_ops = { 1085static struct vm_operations_struct btrfs_file_vm_ops = {
1100#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,23)
1101 .nopage = filemap_nopage,
1102 .populate = filemap_populate,
1103#else
1104 .fault = filemap_fault, 1086 .fault = filemap_fault,
1105#endif
1106 .page_mkwrite = btrfs_page_mkwrite, 1087 .page_mkwrite = btrfs_page_mkwrite,
1107}; 1088};
1108 1089
@@ -1118,9 +1099,6 @@ struct file_operations btrfs_file_operations = {
1118 .read = do_sync_read, 1099 .read = do_sync_read,
1119 .aio_read = generic_file_aio_read, 1100 .aio_read = generic_file_aio_read,
1120 .splice_read = generic_file_splice_read, 1101 .splice_read = generic_file_splice_read,
1121#if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,18)
1122 .sendfile = generic_file_sendfile,
1123#endif
1124 .write = btrfs_file_write, 1102 .write = btrfs_file_write,
1125 .mmap = btrfs_file_mmap, 1103 .mmap = btrfs_file_mmap,
1126 .open = generic_file_open, 1104 .open = generic_file_open,
diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
index adb169d739ce..48a3dc030807 100644
--- a/fs/btrfs/inode.c
+++ b/fs/btrfs/inode.c
@@ -2073,104 +2073,6 @@ err:
2073 return ret; 2073 return ret;
2074} 2074}
2075 2075
2076/* Kernels earlier than 2.6.28 still have the NFS deadlock where nfsd
2077 will call the file system's ->lookup() method from within its
2078 filldir callback, which in turn was called from the file system's
2079 ->readdir() method. And will deadlock for many file systems. */
2080#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,28)
2081
2082struct nfshack_dirent {
2083 u64 ino;
2084 loff_t offset;
2085 int namlen;
2086 unsigned int d_type;
2087 char name[];
2088};
2089
2090struct nfshack_readdir {
2091 char *dirent;
2092 size_t used;
2093 int full;
2094};
2095
2096
2097
2098static int btrfs_nfshack_filldir(void *__buf, const char *name, int namlen,
2099 loff_t offset, u64 ino, unsigned int d_type)
2100{
2101 struct nfshack_readdir *buf = __buf;
2102 struct nfshack_dirent *de = (void *)(buf->dirent + buf->used);
2103 unsigned int reclen;
2104
2105 reclen = ALIGN(sizeof(struct nfshack_dirent) + namlen, sizeof(u64));
2106 if (buf->used + reclen > PAGE_SIZE) {
2107 buf->full = 1;
2108 return -EINVAL;
2109 }
2110
2111 de->namlen = namlen;
2112 de->offset = offset;
2113 de->ino = ino;
2114 de->d_type = d_type;
2115 memcpy(de->name, name, namlen);
2116 buf->used += reclen;
2117
2118 return 0;
2119}
2120
2121static int btrfs_nfshack_readdir(struct file *file, void *dirent,
2122 filldir_t filldir)
2123{
2124 struct nfshack_readdir buf;
2125 struct nfshack_dirent *de;
2126 int err;
2127 int size;
2128 loff_t offset;
2129
2130 buf.dirent = (void *)__get_free_page(GFP_KERNEL);
2131 if (!buf.dirent)
2132 return -ENOMEM;
2133
2134 offset = file->f_pos;
2135
2136 do {
2137 unsigned int reclen;
2138
2139 buf.used = 0;
2140 buf.full = 0;
2141 err = btrfs_real_readdir(file, &buf, btrfs_nfshack_filldir);
2142 if (err)
2143 break;
2144
2145 size = buf.used;
2146
2147 if (!size)
2148 break;
2149
2150 de = (struct nfshack_dirent *)buf.dirent;
2151 while (size > 0) {
2152 offset = de->offset;
2153
2154 if (filldir(dirent, de->name, de->namlen, de->offset,
2155 de->ino, de->d_type))
2156 goto done;
2157 offset = file->f_pos;
2158
2159 reclen = ALIGN(sizeof(*de) + de->namlen,
2160 sizeof(u64));
2161 size -= reclen;
2162 de = (struct nfshack_dirent *)((char *)de + reclen);
2163 }
2164 } while (buf.full);
2165
2166 done:
2167 free_page((unsigned long)buf.dirent);
2168 file->f_pos = offset;
2169
2170 return err;
2171}
2172#endif
2173
2174int btrfs_write_inode(struct inode *inode, int wait) 2076int btrfs_write_inode(struct inode *inode, int wait)
2175{ 2077{
2176 struct btrfs_root *root = BTRFS_I(inode)->root; 2078 struct btrfs_root *root = BTRFS_I(inode)->root;
@@ -3311,13 +3213,8 @@ unsigned long btrfs_force_ra(struct address_space *mapping,
3311{ 3213{
3312 pgoff_t req_size = last_index - offset + 1; 3214 pgoff_t req_size = last_index - offset + 1;
3313 3215
3314#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,23)
3315 offset = page_cache_readahead(mapping, ra, file, offset, req_size);
3316 return offset;
3317#else
3318 page_cache_sync_readahead(mapping, ra, file, offset, req_size); 3216 page_cache_sync_readahead(mapping, ra, file, offset, req_size);
3319 return offset + req_size; 3217 return offset + req_size;
3320#endif
3321} 3218}
3322 3219
3323struct inode *btrfs_alloc_inode(struct super_block *sb) 3220struct inode *btrfs_alloc_inode(struct super_block *sb)
@@ -3373,14 +3270,7 @@ void btrfs_destroy_inode(struct inode *inode)
3373 kmem_cache_free(btrfs_inode_cachep, BTRFS_I(inode)); 3270 kmem_cache_free(btrfs_inode_cachep, BTRFS_I(inode));
3374} 3271}
3375 3272
3376#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,26)
3377static void init_once(void *foo) 3273static void init_once(void *foo)
3378#elif LINUX_VERSION_CODE > KERNEL_VERSION(2,6,23)
3379static void init_once(struct kmem_cache * cachep, void *foo)
3380#else
3381static void init_once(void * foo, struct kmem_cache * cachep,
3382 unsigned long flags)
3383#endif
3384{ 3274{
3385 struct btrfs_inode *ei = (struct btrfs_inode *) foo; 3275 struct btrfs_inode *ei = (struct btrfs_inode *) foo;
3386 3276
@@ -3403,22 +3293,10 @@ void btrfs_destroy_cachep(void)
3403 3293
3404struct kmem_cache *btrfs_cache_create(const char *name, size_t size, 3294struct kmem_cache *btrfs_cache_create(const char *name, size_t size,
3405 unsigned long extra_flags, 3295 unsigned long extra_flags,
3406#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,26) 3296 void (*ctor)(void *))
3407 void (*ctor)(void *)
3408#elif LINUX_VERSION_CODE > KERNEL_VERSION(2,6,23)
3409 void (*ctor)(struct kmem_cache *, void *)
3410#else
3411 void (*ctor)(void *, struct kmem_cache *,
3412 unsigned long)
3413#endif
3414 )
3415{ 3297{
3416 return kmem_cache_create(name, size, 0, (SLAB_RECLAIM_ACCOUNT | 3298 return kmem_cache_create(name, size, 0, (SLAB_RECLAIM_ACCOUNT |
3417 SLAB_MEM_SPREAD | extra_flags), ctor 3299 SLAB_MEM_SPREAD | extra_flags), ctor);
3418#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,23)
3419 ,NULL
3420#endif
3421 );
3422} 3300}
3423 3301
3424int btrfs_init_cachep(void) 3302int btrfs_init_cachep(void)
@@ -3666,12 +3544,7 @@ static int btrfs_set_page_dirty(struct page *page)
3666 return __set_page_dirty_nobuffers(page); 3544 return __set_page_dirty_nobuffers(page);
3667} 3545}
3668 3546
3669#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,26)
3670static int btrfs_permission(struct inode *inode, int mask) 3547static int btrfs_permission(struct inode *inode, int mask)
3671#else
3672static int btrfs_permission(struct inode *inode, int mask,
3673 struct nameidata *nd)
3674#endif
3675{ 3548{
3676 if (btrfs_test_flag(inode, READONLY) && (mask & MAY_WRITE)) 3549 if (btrfs_test_flag(inode, READONLY) && (mask & MAY_WRITE))
3677 return -EACCES; 3550 return -EACCES;
@@ -3702,11 +3575,7 @@ static struct inode_operations btrfs_dir_ro_inode_operations = {
3702static struct file_operations btrfs_dir_file_operations = { 3575static struct file_operations btrfs_dir_file_operations = {
3703 .llseek = generic_file_llseek, 3576 .llseek = generic_file_llseek,
3704 .read = generic_read_dir, 3577 .read = generic_read_dir,
3705#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,28)
3706 .readdir = btrfs_nfshack_readdir,
3707#else /* NFSd readdir/lookup deadlock is fixed */
3708 .readdir = btrfs_real_readdir, 3578 .readdir = btrfs_real_readdir,
3709#endif
3710 .unlocked_ioctl = btrfs_ioctl, 3579 .unlocked_ioctl = btrfs_ioctl,
3711#ifdef CONFIG_COMPAT 3580#ifdef CONFIG_COMPAT
3712 .compat_ioctl = btrfs_ioctl, 3581 .compat_ioctl = btrfs_ioctl,
diff --git a/fs/btrfs/super.c b/fs/btrfs/super.c
index f7b3eac7ac6d..8399d6d05d63 100644
--- a/fs/btrfs/super.c
+++ b/fs/btrfs/super.c
@@ -349,10 +349,7 @@ static int btrfs_fill_super(struct super_block * sb,
349 349
350 sb->s_root = root_dentry; 350 sb->s_root = root_dentry;
351 351
352#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,25)
353 save_mount_options(sb, data); 352 save_mount_options(sb, data);
354#endif
355
356 return 0; 353 return 0;
357 354
358fail_close: 355fail_close:
@@ -566,11 +563,7 @@ static struct super_operations btrfs_super_ops = {
566 .put_super = btrfs_put_super, 563 .put_super = btrfs_put_super,
567 .write_super = btrfs_write_super, 564 .write_super = btrfs_write_super,
568 .sync_fs = btrfs_sync_fs, 565 .sync_fs = btrfs_sync_fs,
569#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,25)
570 .read_inode = btrfs_read_locked_inode,
571#else
572 .show_options = generic_show_options, 566 .show_options = generic_show_options,
573#endif
574 .write_inode = btrfs_write_inode, 567 .write_inode = btrfs_write_inode,
575 .dirty_inode = btrfs_dirty_inode, 568 .dirty_inode = btrfs_dirty_inode,
576 .alloc_inode = btrfs_alloc_inode, 569 .alloc_inode = btrfs_alloc_inode,
diff --git a/fs/btrfs/sysfs.c b/fs/btrfs/sysfs.c
index 61af5d8446e3..300076e66765 100644
--- a/fs/btrfs/sysfs.c
+++ b/fs/btrfs/sysfs.c
@@ -28,7 +28,6 @@
28#include "disk-io.h" 28#include "disk-io.h"
29#include "transaction.h" 29#include "transaction.h"
30 30
31#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,25)
32static ssize_t root_blocks_used_show(struct btrfs_root *root, char *buf) 31static ssize_t root_blocks_used_show(struct btrfs_root *root, char *buf)
33{ 32{
34 return snprintf(buf, PAGE_SIZE, "%llu\n", 33 return snprintf(buf, PAGE_SIZE, "%llu\n",
@@ -267,35 +266,3 @@ void btrfs_exit_sysfs(void)
267 kset_unregister(btrfs_kset); 266 kset_unregister(btrfs_kset);
268} 267}
269 268
270#else
271
272int btrfs_sysfs_add_super(struct btrfs_fs_info *fs)
273{
274 return 0;
275}
276
277int btrfs_sysfs_add_root(struct btrfs_root *root)
278{
279 return 0;
280}
281
282void btrfs_sysfs_del_root(struct btrfs_root *root)
283{
284 return;
285}
286
287void btrfs_sysfs_del_super(struct btrfs_fs_info *fs)
288{
289 return;
290}
291
292int btrfs_init_sysfs(void)
293{
294 return 0;
295}
296
297void btrfs_exit_sysfs(void)
298{
299 return;
300}
301#endif
diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c
index b9e5c2d82dde..ddf89626498a 100644
--- a/fs/btrfs/volumes.c
+++ b/fs/btrfs/volumes.c
@@ -2080,20 +2080,11 @@ int btrfs_unplug_page(struct btrfs_mapping_tree *map_tree,
2080} 2080}
2081 2081
2082 2082
2083#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,23)
2084static void end_bio_multi_stripe(struct bio *bio, int err) 2083static void end_bio_multi_stripe(struct bio *bio, int err)
2085#else
2086static int end_bio_multi_stripe(struct bio *bio,
2087 unsigned int bytes_done, int err)
2088#endif
2089{ 2084{
2090 struct btrfs_multi_bio *multi = bio->bi_private; 2085 struct btrfs_multi_bio *multi = bio->bi_private;
2091 int is_orig_bio = 0; 2086 int is_orig_bio = 0;
2092 2087
2093#if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,23)
2094 if (bio->bi_size)
2095 return 1;
2096#endif
2097 if (err) 2088 if (err)
2098 atomic_inc(&multi->error); 2089 atomic_inc(&multi->error);
2099 2090
@@ -2122,17 +2113,10 @@ static int end_bio_multi_stripe(struct bio *bio,
2122 } 2113 }
2123 kfree(multi); 2114 kfree(multi);
2124 2115
2125#if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,23)
2126 bio_endio(bio, bio->bi_size, err);
2127#else
2128 bio_endio(bio, err); 2116 bio_endio(bio, err);
2129#endif
2130 } else if (!is_orig_bio) { 2117 } else if (!is_orig_bio) {
2131 bio_put(bio); 2118 bio_put(bio);
2132 } 2119 }
2133#if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,23)
2134 return 0;
2135#endif
2136} 2120}
2137 2121
2138struct async_sched { 2122struct async_sched {
@@ -2248,11 +2232,7 @@ int btrfs_map_bio(struct btrfs_root *root, int rw, struct bio *bio,
2248 } else { 2232 } else {
2249 bio->bi_bdev = root->fs_info->fs_devices->latest_bdev; 2233 bio->bi_bdev = root->fs_info->fs_devices->latest_bdev;
2250 bio->bi_sector = logical >> 9; 2234 bio->bi_sector = logical >> 9;
2251#if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,23)
2252 bio_endio(bio, bio->bi_size, -EIO);
2253#else
2254 bio_endio(bio, -EIO); 2235 bio_endio(bio, -EIO);
2255#endif
2256 } 2236 }
2257 dev_nr++; 2237 dev_nr++;
2258 } 2238 }