aboutsummaryrefslogtreecommitdiffstats
path: root/fs/btrfs/extent_io.h
diff options
context:
space:
mode:
authorChris Mason <chris.mason@oracle.com>2010-08-06 13:21:20 -0400
committerChris Mason <chris.mason@oracle.com>2012-03-26 16:50:37 -0400
commit727011e07cbdf87772fcc1999cccd15cc915eb62 (patch)
tree05405dc1e9c86d67dbb02ddf063bd0c137ce6707 /fs/btrfs/extent_io.h
parent81c9ad237c604adec79fd4d4034264c6669e0ab3 (diff)
Btrfs: allow metadata blocks larger than the page size
A few years ago the btrfs code to support blocks lager than the page size was disabled to fix a few corner cases in the page cache handling. This fixes the code to properly support large metadata blocks again. Since current kernels will crash early and often with larger metadata blocks, this adds an incompat bit so that older kernels can't mount it. This also does away with different blocksizes for nodes and leaves. You get a single block size for all tree blocks. Signed-off-by: Chris Mason <chris.mason@oracle.com>
Diffstat (limited to 'fs/btrfs/extent_io.h')
-rw-r--r--fs/btrfs/extent_io.h12
1 files changed, 8 insertions, 4 deletions
diff --git a/fs/btrfs/extent_io.h b/fs/btrfs/extent_io.h
index cecc3518c121..4e38a3d9631a 100644
--- a/fs/btrfs/extent_io.h
+++ b/fs/btrfs/extent_io.h
@@ -119,16 +119,18 @@ struct extent_state {
119 struct list_head leak_list; 119 struct list_head leak_list;
120}; 120};
121 121
122#define INLINE_EXTENT_BUFFER_PAGES 16
123#define MAX_INLINE_EXTENT_BUFFER_SIZE (INLINE_EXTENT_BUFFER_PAGES * PAGE_CACHE_SIZE)
122struct extent_buffer { 124struct extent_buffer {
123 u64 start; 125 u64 start;
124 unsigned long len; 126 unsigned long len;
125 unsigned long map_start; 127 unsigned long map_start;
126 unsigned long map_len; 128 unsigned long map_len;
127 struct page *first_page;
128 unsigned long bflags; 129 unsigned long bflags;
130 atomic_t refs;
131 atomic_t pages_reading;
129 struct list_head leak_list; 132 struct list_head leak_list;
130 struct rcu_head rcu_head; 133 struct rcu_head rcu_head;
131 atomic_t refs;
132 pid_t lock_owner; 134 pid_t lock_owner;
133 135
134 /* count of read lock holders on the extent buffer */ 136 /* count of read lock holders on the extent buffer */
@@ -152,6 +154,9 @@ struct extent_buffer {
152 * to unlock 154 * to unlock
153 */ 155 */
154 wait_queue_head_t read_lock_wq; 156 wait_queue_head_t read_lock_wq;
157 wait_queue_head_t lock_wq;
158 struct page *inline_pages[INLINE_EXTENT_BUFFER_PAGES];
159 struct page **pages;
155}; 160};
156 161
157static inline void extent_set_compress_type(unsigned long *bio_flags, 162static inline void extent_set_compress_type(unsigned long *bio_flags,
@@ -251,8 +256,7 @@ int get_state_private(struct extent_io_tree *tree, u64 start, u64 *private);
251void set_page_extent_mapped(struct page *page); 256void set_page_extent_mapped(struct page *page);
252 257
253struct extent_buffer *alloc_extent_buffer(struct extent_io_tree *tree, 258struct extent_buffer *alloc_extent_buffer(struct extent_io_tree *tree,
254 u64 start, unsigned long len, 259 u64 start, unsigned long len);
255 struct page *page0);
256struct extent_buffer *find_extent_buffer(struct extent_io_tree *tree, 260struct extent_buffer *find_extent_buffer(struct extent_io_tree *tree,
257 u64 start, unsigned long len); 261 u64 start, unsigned long len);
258void free_extent_buffer(struct extent_buffer *eb); 262void free_extent_buffer(struct extent_buffer *eb);