diff options
-rw-r--r-- | fs/btrfs/Makefile | 3 | ||||
-rw-r--r-- | fs/btrfs/btrfs_inode.h | 3 | ||||
-rw-r--r-- | fs/btrfs/ctree.h | 7 | ||||
-rw-r--r-- | fs/btrfs/disk-io.h | 1 | ||||
-rw-r--r-- | fs/btrfs/extent_map.c | 1656 | ||||
-rw-r--r-- | fs/btrfs/extent_map.h | 89 | ||||
-rw-r--r-- | fs/btrfs/file.c | 295 | ||||
-rw-r--r-- | fs/btrfs/inode.c | 816 | ||||
-rw-r--r-- | fs/btrfs/super.c | 2 |
9 files changed, 2203 insertions, 669 deletions
diff --git a/fs/btrfs/Makefile b/fs/btrfs/Makefile index 9321438c5eef..ea7a22870350 100644 --- a/fs/btrfs/Makefile +++ b/fs/btrfs/Makefile | |||
@@ -4,7 +4,8 @@ ifneq ($(KERNELRELEASE),) | |||
4 | obj-m := btrfs.o | 4 | obj-m := btrfs.o |
5 | btrfs-y := super.o ctree.o extent-tree.o print-tree.o root-tree.o dir-item.o \ | 5 | btrfs-y := super.o ctree.o extent-tree.o print-tree.o root-tree.o dir-item.o \ |
6 | hash.o file-item.o inode-item.o inode-map.o disk-io.o \ | 6 | hash.o file-item.o inode-item.o inode-map.o disk-io.o \ |
7 | transaction.o bit-radix.o inode.o file.o tree-defrag.o | 7 | transaction.o bit-radix.o inode.o file.o tree-defrag.o \ |
8 | extent_map.o | ||
8 | 9 | ||
9 | #btrfs-y := ctree.o disk-io.o radix-tree.o extent-tree.o print-tree.o \ | 10 | #btrfs-y := ctree.o disk-io.o radix-tree.o extent-tree.o print-tree.o \ |
10 | # root-tree.o dir-item.o hash.o file-item.o inode-item.o \ | 11 | # root-tree.o dir-item.o hash.o file-item.o inode-item.o \ |
diff --git a/fs/btrfs/btrfs_inode.h b/fs/btrfs/btrfs_inode.h index f446f161f736..d1d5af471c3c 100644 --- a/fs/btrfs/btrfs_inode.h +++ b/fs/btrfs/btrfs_inode.h | |||
@@ -19,11 +19,14 @@ | |||
19 | #ifndef __BTRFS_I__ | 19 | #ifndef __BTRFS_I__ |
20 | #define __BTRFS_I__ | 20 | #define __BTRFS_I__ |
21 | 21 | ||
22 | #include "extent_map.h" | ||
23 | |||
22 | /* in memory btrfs inode */ | 24 | /* in memory btrfs inode */ |
23 | struct btrfs_inode { | 25 | struct btrfs_inode { |
24 | struct btrfs_root *root; | 26 | struct btrfs_root *root; |
25 | struct btrfs_block_group_cache *block_group; | 27 | struct btrfs_block_group_cache *block_group; |
26 | struct btrfs_key location; | 28 | struct btrfs_key location; |
29 | struct extent_map_tree extent_tree; | ||
27 | struct inode vfs_inode; | 30 | struct inode vfs_inode; |
28 | 31 | ||
29 | /* | 32 | /* |
diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h index c7f5161271d3..947f061ed118 100644 --- a/fs/btrfs/ctree.h +++ b/fs/btrfs/ctree.h | |||
@@ -1176,7 +1176,14 @@ int btrfs_commit_write(struct file *file, struct page *page, | |||
1176 | unsigned from, unsigned to); | 1176 | unsigned from, unsigned to); |
1177 | int btrfs_get_block(struct inode *inode, sector_t iblock, | 1177 | int btrfs_get_block(struct inode *inode, sector_t iblock, |
1178 | struct buffer_head *result, int create); | 1178 | struct buffer_head *result, int create); |
1179 | struct extent_map *btrfs_get_extent(struct inode *inode, struct page *page, | ||
1180 | size_t page_offset, u64 start, u64 end, | ||
1181 | int create); | ||
1182 | int btrfs_update_inode(struct btrfs_trans_handle *trans, | ||
1183 | struct btrfs_root *root, | ||
1184 | struct inode *inode); | ||
1179 | /* file.c */ | 1185 | /* file.c */ |
1186 | int btrfs_drop_extent_cache(struct inode *inode, u64 start, u64 end); | ||
1180 | extern struct file_operations btrfs_file_operations; | 1187 | extern struct file_operations btrfs_file_operations; |
1181 | int btrfs_drop_extents(struct btrfs_trans_handle *trans, | 1188 | int btrfs_drop_extents(struct btrfs_trans_handle *trans, |
1182 | struct btrfs_root *root, struct inode *inode, | 1189 | struct btrfs_root *root, struct inode *inode, |
diff --git a/fs/btrfs/disk-io.h b/fs/btrfs/disk-io.h index 81fd18cbd824..5261733b8735 100644 --- a/fs/btrfs/disk-io.h +++ b/fs/btrfs/disk-io.h | |||
@@ -77,7 +77,6 @@ int btrfs_insert_dev_radix(struct btrfs_root *root, | |||
77 | u64 num_blocks); | 77 | u64 num_blocks); |
78 | int btrfs_map_bh_to_logical(struct btrfs_root *root, struct buffer_head *bh, | 78 | int btrfs_map_bh_to_logical(struct btrfs_root *root, struct buffer_head *bh, |
79 | u64 logical); | 79 | u64 logical); |
80 | int btrfs_releasepage(struct page *page, gfp_t flags); | ||
81 | void btrfs_btree_balance_dirty(struct btrfs_root *root); | 80 | void btrfs_btree_balance_dirty(struct btrfs_root *root); |
82 | int btrfs_free_fs_root(struct btrfs_fs_info *fs_info, struct btrfs_root *root); | 81 | int btrfs_free_fs_root(struct btrfs_fs_info *fs_info, struct btrfs_root *root); |
83 | void btrfs_mark_buffer_dirty(struct buffer_head *bh); | 82 | void btrfs_mark_buffer_dirty(struct buffer_head *bh); |
diff --git a/fs/btrfs/extent_map.c b/fs/btrfs/extent_map.c new file mode 100644 index 000000000000..d378edf0964e --- /dev/null +++ b/fs/btrfs/extent_map.c | |||
@@ -0,0 +1,1656 @@ | |||
1 | #include <linux/bitops.h> | ||
2 | #include <linux/slab.h> | ||
3 | #include <linux/bio.h> | ||
4 | #include <linux/mm.h> | ||
5 | #include <linux/gfp.h> | ||
6 | #include <linux/pagemap.h> | ||
7 | #include <linux/page-flags.h> | ||
8 | #include <linux/module.h> | ||
9 | #include <linux/spinlock.h> | ||
10 | #include <linux/blkdev.h> | ||
11 | #include "extent_map.h" | ||
12 | |||
13 | static struct kmem_cache *extent_map_cache; | ||
14 | static struct kmem_cache *extent_state_cache; | ||
15 | |||
16 | struct tree_entry { | ||
17 | u64 start; | ||
18 | u64 end; | ||
19 | int in_tree; | ||
20 | struct rb_node rb_node; | ||
21 | }; | ||
22 | |||
23 | /* bits for the extent state */ | ||
24 | #define EXTENT_DIRTY 1 | ||
25 | #define EXTENT_WRITEBACK (1 << 1) | ||
26 | #define EXTENT_UPTODATE (1 << 2) | ||
27 | #define EXTENT_LOCKED (1 << 3) | ||
28 | #define EXTENT_NEW (1 << 4) | ||
29 | #define EXTENT_DELALLOC (1 << 5) | ||
30 | |||
31 | #define EXTENT_IOBITS (EXTENT_LOCKED | EXTENT_WRITEBACK) | ||
32 | |||
33 | static LIST_HEAD(all_states); | ||
34 | spinlock_t state_lock = SPIN_LOCK_UNLOCKED; | ||
35 | |||
36 | void __init extent_map_init(void) | ||
37 | { | ||
38 | extent_map_cache = kmem_cache_create("extent_map", | ||
39 | sizeof(struct extent_map), 0, | ||
40 | SLAB_RECLAIM_ACCOUNT | | ||
41 | SLAB_DESTROY_BY_RCU, | ||
42 | NULL); | ||
43 | extent_state_cache = kmem_cache_create("extent_state", | ||
44 | sizeof(struct extent_state), 0, | ||
45 | SLAB_RECLAIM_ACCOUNT | | ||
46 | SLAB_DESTROY_BY_RCU, | ||
47 | NULL); | ||
48 | } | ||
49 | |||
50 | void __exit extent_map_exit(void) | ||
51 | { | ||
52 | while(!list_empty(&all_states)) { | ||
53 | struct extent_state *state; | ||
54 | struct list_head *cur = all_states.next; | ||
55 | state = list_entry(cur, struct extent_state, list); | ||
56 | printk("found leaked state %Lu %Lu state %d in_tree %d\n", | ||
57 | state->start, state->end, state->state, state->in_tree); | ||
58 | list_del(&state->list); | ||
59 | kfree(state); | ||
60 | } | ||
61 | if (extent_map_cache) | ||
62 | kmem_cache_destroy(extent_map_cache); | ||
63 | if (extent_state_cache) | ||
64 | kmem_cache_destroy(extent_state_cache); | ||
65 | } | ||
66 | |||
67 | void extent_map_tree_init(struct extent_map_tree *tree, | ||
68 | struct address_space *mapping, gfp_t mask) | ||
69 | { | ||
70 | tree->map.rb_node = NULL; | ||
71 | tree->state.rb_node = NULL; | ||
72 | rwlock_init(&tree->lock); | ||
73 | tree->mapping = mapping; | ||
74 | } | ||
75 | EXPORT_SYMBOL(extent_map_tree_init); | ||
76 | |||
77 | struct extent_map *alloc_extent_map(gfp_t mask) | ||
78 | { | ||
79 | struct extent_map *em; | ||
80 | em = kmem_cache_alloc(extent_map_cache, mask); | ||
81 | if (!em || IS_ERR(em)) | ||
82 | return em; | ||
83 | em->in_tree = 0; | ||
84 | atomic_set(&em->refs, 1); | ||
85 | return em; | ||
86 | } | ||
87 | EXPORT_SYMBOL(alloc_extent_map); | ||
88 | |||
89 | void free_extent_map(struct extent_map *em) | ||
90 | { | ||
91 | if (atomic_dec_and_test(&em->refs)) { | ||
92 | WARN_ON(em->in_tree); | ||
93 | kmem_cache_free(extent_map_cache, em); | ||
94 | } | ||
95 | } | ||
96 | EXPORT_SYMBOL(free_extent_map); | ||
97 | |||
98 | |||
99 | struct extent_state *alloc_extent_state(gfp_t mask) | ||
100 | { | ||
101 | struct extent_state *state; | ||
102 | state = kmem_cache_alloc(extent_state_cache, mask); | ||
103 | if (!state || IS_ERR(state)) | ||
104 | return state; | ||
105 | state->state = 0; | ||
106 | state->in_tree = 0; | ||
107 | atomic_set(&state->refs, 1); | ||
108 | init_waitqueue_head(&state->wq); | ||
109 | spin_lock_irq(&state_lock); | ||
110 | list_add(&state->list, &all_states); | ||
111 | spin_unlock_irq(&state_lock); | ||
112 | return state; | ||
113 | } | ||
114 | EXPORT_SYMBOL(alloc_extent_state); | ||
115 | |||
116 | void free_extent_state(struct extent_state *state) | ||
117 | { | ||
118 | if (atomic_dec_and_test(&state->refs)) { | ||
119 | WARN_ON(state->in_tree); | ||
120 | spin_lock_irq(&state_lock); | ||
121 | list_del_init(&state->list); | ||
122 | spin_unlock_irq(&state_lock); | ||
123 | kmem_cache_free(extent_state_cache, state); | ||
124 | } | ||
125 | } | ||
126 | EXPORT_SYMBOL(free_extent_state); | ||
127 | |||
128 | static struct rb_node *tree_insert(struct rb_root *root, u64 offset, | ||
129 | struct rb_node *node) | ||
130 | { | ||
131 | struct rb_node ** p = &root->rb_node; | ||
132 | struct rb_node * parent = NULL; | ||
133 | struct tree_entry *entry; | ||
134 | |||
135 | while(*p) { | ||
136 | parent = *p; | ||
137 | entry = rb_entry(parent, struct tree_entry, rb_node); | ||
138 | |||
139 | if (offset < entry->start) | ||
140 | p = &(*p)->rb_left; | ||
141 | else if (offset > entry->end) | ||
142 | p = &(*p)->rb_right; | ||
143 | else | ||
144 | return parent; | ||
145 | } | ||
146 | |||
147 | entry = rb_entry(node, struct tree_entry, rb_node); | ||
148 | entry->in_tree = 1; | ||
149 | rb_link_node(node, parent, p); | ||
150 | rb_insert_color(node, root); | ||
151 | return NULL; | ||
152 | } | ||
153 | |||
154 | static struct rb_node *__tree_search(struct rb_root *root, u64 offset, | ||
155 | struct rb_node **prev_ret) | ||
156 | { | ||
157 | struct rb_node * n = root->rb_node; | ||
158 | struct rb_node *prev = NULL; | ||
159 | struct tree_entry *entry; | ||
160 | struct tree_entry *prev_entry = NULL; | ||
161 | |||
162 | while(n) { | ||
163 | entry = rb_entry(n, struct tree_entry, rb_node); | ||
164 | prev = n; | ||
165 | prev_entry = entry; | ||
166 | |||
167 | if (offset < entry->start) | ||
168 | n = n->rb_left; | ||
169 | else if (offset > entry->end) | ||
170 | n = n->rb_right; | ||
171 | else | ||
172 | return n; | ||
173 | } | ||
174 | if (!prev_ret) | ||
175 | return NULL; | ||
176 | while(prev && offset > prev_entry->end) { | ||
177 | prev = rb_next(prev); | ||
178 | prev_entry = rb_entry(prev, struct tree_entry, rb_node); | ||
179 | } | ||
180 | *prev_ret = prev; | ||
181 | return NULL; | ||
182 | } | ||
183 | |||
184 | static inline struct rb_node *tree_search(struct rb_root *root, u64 offset) | ||
185 | { | ||
186 | struct rb_node *prev; | ||
187 | struct rb_node *ret; | ||
188 | ret = __tree_search(root, offset, &prev); | ||
189 | if (!ret) | ||
190 | return prev; | ||
191 | return ret; | ||
192 | } | ||
193 | |||
194 | static int tree_delete(struct rb_root *root, u64 offset) | ||
195 | { | ||
196 | struct rb_node *node; | ||
197 | struct tree_entry *entry; | ||
198 | |||
199 | node = __tree_search(root, offset, NULL); | ||
200 | if (!node) | ||
201 | return -ENOENT; | ||
202 | entry = rb_entry(node, struct tree_entry, rb_node); | ||
203 | entry->in_tree = 0; | ||
204 | rb_erase(node, root); | ||
205 | return 0; | ||
206 | } | ||
207 | |||
208 | /* | ||
209 | * add_extent_mapping tries a simple backward merge with existing | ||
210 | * mappings. The extent_map struct passed in will be inserted into | ||
211 | * the tree directly (no copies made, just a reference taken). | ||
212 | */ | ||
213 | int add_extent_mapping(struct extent_map_tree *tree, | ||
214 | struct extent_map *em) | ||
215 | { | ||
216 | int ret = 0; | ||
217 | struct extent_map *prev = NULL; | ||
218 | struct rb_node *rb; | ||
219 | |||
220 | write_lock_irq(&tree->lock); | ||
221 | rb = tree_insert(&tree->map, em->end, &em->rb_node); | ||
222 | if (rb) { | ||
223 | prev = rb_entry(rb, struct extent_map, rb_node); | ||
224 | printk("found extent map %Lu %Lu on insert of %Lu %Lu\n", prev->start, prev->end, em->start, em->end); | ||
225 | ret = -EEXIST; | ||
226 | goto out; | ||
227 | } | ||
228 | atomic_inc(&em->refs); | ||
229 | if (em->start != 0) { | ||
230 | rb = rb_prev(&em->rb_node); | ||
231 | if (rb) | ||
232 | prev = rb_entry(rb, struct extent_map, rb_node); | ||
233 | if (prev && prev->end + 1 == em->start && | ||
234 | ((em->block_start == 0 && prev->block_start == 0) || | ||
235 | (em->block_start == prev->block_end + 1))) { | ||
236 | em->start = prev->start; | ||
237 | em->block_start = prev->block_start; | ||
238 | rb_erase(&prev->rb_node, &tree->map); | ||
239 | prev->in_tree = 0; | ||
240 | free_extent_map(prev); | ||
241 | } | ||
242 | } | ||
243 | out: | ||
244 | write_unlock_irq(&tree->lock); | ||
245 | return ret; | ||
246 | } | ||
247 | EXPORT_SYMBOL(add_extent_mapping); | ||
248 | |||
249 | /* | ||
250 | * lookup_extent_mapping returns the first extent_map struct in the | ||
251 | * tree that intersects the [start, end] (inclusive) range. There may | ||
252 | * be additional objects in the tree that intersect, so check the object | ||
253 | * returned carefully to make sure you don't need additional lookups. | ||
254 | */ | ||
255 | struct extent_map *lookup_extent_mapping(struct extent_map_tree *tree, | ||
256 | u64 start, u64 end) | ||
257 | { | ||
258 | struct extent_map *em; | ||
259 | struct rb_node *rb_node; | ||
260 | |||
261 | read_lock_irq(&tree->lock); | ||
262 | rb_node = tree_search(&tree->map, start); | ||
263 | if (!rb_node) { | ||
264 | em = NULL; | ||
265 | goto out; | ||
266 | } | ||
267 | if (IS_ERR(rb_node)) { | ||
268 | em = ERR_PTR(PTR_ERR(rb_node)); | ||
269 | goto out; | ||
270 | } | ||
271 | em = rb_entry(rb_node, struct extent_map, rb_node); | ||
272 | if (em->end < start || em->start > end) { | ||
273 | em = NULL; | ||
274 | goto out; | ||
275 | } | ||
276 | atomic_inc(&em->refs); | ||
277 | out: | ||
278 | read_unlock_irq(&tree->lock); | ||
279 | return em; | ||
280 | } | ||
281 | EXPORT_SYMBOL(lookup_extent_mapping); | ||
282 | |||
283 | /* | ||
284 | * removes an extent_map struct from the tree. No reference counts are | ||
285 | * dropped, and no checks are done to see if the range is in use | ||
286 | */ | ||
287 | int remove_extent_mapping(struct extent_map_tree *tree, struct extent_map *em) | ||
288 | { | ||
289 | int ret; | ||
290 | |||
291 | write_lock_irq(&tree->lock); | ||
292 | ret = tree_delete(&tree->map, em->end); | ||
293 | write_unlock_irq(&tree->lock); | ||
294 | return ret; | ||
295 | } | ||
296 | EXPORT_SYMBOL(remove_extent_mapping); | ||
297 | |||
298 | /* | ||
299 | * utility function to look for merge candidates inside a given range. | ||
300 | * Any extents with matching state are merged together into a single | ||
301 | * extent in the tree. Extents with EXTENT_IO in their state field | ||
302 | * are not merged because the end_io handlers need to be able to do | ||
303 | * operations on them without sleeping (or doing allocations/splits). | ||
304 | * | ||
305 | * This should be called with the tree lock held. | ||
306 | */ | ||
307 | static int merge_state(struct extent_map_tree *tree, | ||
308 | struct extent_state *state) | ||
309 | { | ||
310 | struct extent_state *other; | ||
311 | struct rb_node *other_node; | ||
312 | |||
313 | if (state->state & EXTENT_IOBITS) | ||
314 | return 0; | ||
315 | |||
316 | other_node = rb_prev(&state->rb_node); | ||
317 | if (other_node) { | ||
318 | other = rb_entry(other_node, struct extent_state, rb_node); | ||
319 | if (other->end == state->start - 1 && | ||
320 | other->state == state->state) { | ||
321 | state->start = other->start; | ||
322 | other->in_tree = 0; | ||
323 | rb_erase(&other->rb_node, &tree->state); | ||
324 | free_extent_state(other); | ||
325 | } | ||
326 | } | ||
327 | other_node = rb_next(&state->rb_node); | ||
328 | if (other_node) { | ||
329 | other = rb_entry(other_node, struct extent_state, rb_node); | ||
330 | if (other->start == state->end + 1 && | ||
331 | other->state == state->state) { | ||
332 | other->start = state->start; | ||
333 | state->in_tree = 0; | ||
334 | rb_erase(&state->rb_node, &tree->state); | ||
335 | free_extent_state(state); | ||
336 | } | ||
337 | } | ||
338 | return 0; | ||
339 | } | ||
340 | |||
341 | /* | ||
342 | * insert an extent_state struct into the tree. 'bits' are set on the | ||
343 | * struct before it is inserted. | ||
344 | * | ||
345 | * This may return -EEXIST if the extent is already there, in which case the | ||
346 | * state struct is freed. | ||
347 | * | ||
348 | * The tree lock is not taken internally. This is a utility function and | ||
349 | * probably isn't what you want to call (see set/clear_extent_bit). | ||
350 | */ | ||
351 | static int insert_state(struct extent_map_tree *tree, | ||
352 | struct extent_state *state, u64 start, u64 end, | ||
353 | int bits) | ||
354 | { | ||
355 | struct rb_node *node; | ||
356 | |||
357 | if (end < start) { | ||
358 | printk("end < start %Lu %Lu\n", end, start); | ||
359 | WARN_ON(1); | ||
360 | } | ||
361 | state->state |= bits; | ||
362 | state->start = start; | ||
363 | state->end = end; | ||
364 | if ((end & 4095) == 0) { | ||
365 | printk("insert state %Lu %Lu strange end\n", start, end); | ||
366 | WARN_ON(1); | ||
367 | } | ||
368 | node = tree_insert(&tree->state, end, &state->rb_node); | ||
369 | if (node) { | ||
370 | struct extent_state *found; | ||
371 | found = rb_entry(node, struct extent_state, rb_node); | ||
372 | printk("found node %Lu %Lu on insert of %Lu %Lu\n", found->start, found->end, start, end); | ||
373 | free_extent_state(state); | ||
374 | return -EEXIST; | ||
375 | } | ||
376 | merge_state(tree, state); | ||
377 | return 0; | ||
378 | } | ||
379 | |||
380 | /* | ||
381 | * split a given extent state struct in two, inserting the preallocated | ||
382 | * struct 'prealloc' as the newly created second half. 'split' indicates an | ||
383 | * offset inside 'orig' where it should be split. | ||
384 | * | ||
385 | * Before calling, | ||
386 | * the tree has 'orig' at [orig->start, orig->end]. After calling, there | ||
387 | * are two extent state structs in the tree: | ||
388 | * prealloc: [orig->start, split - 1] | ||
389 | * orig: [ split, orig->end ] | ||
390 | * | ||
391 | * The tree locks are not taken by this function. They need to be held | ||
392 | * by the caller. | ||
393 | */ | ||
394 | static int split_state(struct extent_map_tree *tree, struct extent_state *orig, | ||
395 | struct extent_state *prealloc, u64 split) | ||
396 | { | ||
397 | struct rb_node *node; | ||
398 | prealloc->start = orig->start; | ||
399 | prealloc->end = split - 1; | ||
400 | prealloc->state = orig->state; | ||
401 | orig->start = split; | ||
402 | if ((prealloc->end & 4095) == 0) { | ||
403 | printk("insert state %Lu %Lu strange end\n", prealloc->start, | ||
404 | prealloc->end); | ||
405 | WARN_ON(1); | ||
406 | } | ||
407 | node = tree_insert(&tree->state, prealloc->end, &prealloc->rb_node); | ||
408 | if (node) { | ||
409 | struct extent_state *found; | ||
410 | found = rb_entry(node, struct extent_state, rb_node); | ||
411 | printk("found node %Lu %Lu on insert of %Lu %Lu\n", found->start, found->end, prealloc->start, prealloc->end); | ||
412 | free_extent_state(prealloc); | ||
413 | return -EEXIST; | ||
414 | } | ||
415 | return 0; | ||
416 | } | ||
417 | |||
418 | /* | ||
419 | * utility function to clear some bits in an extent state struct. | ||
420 | * it will optionally wake up any one waiting on this state (wake == 1), or | ||
421 | * forcibly remove the state from the tree (delete == 1). | ||
422 | * | ||
423 | * If no bits are set on the state struct after clearing things, the | ||
424 | * struct is freed and removed from the tree | ||
425 | */ | ||
426 | static int clear_state_bit(struct extent_map_tree *tree, | ||
427 | struct extent_state *state, int bits, int wake, | ||
428 | int delete) | ||
429 | { | ||
430 | int ret = state->state & bits; | ||
431 | state->state &= ~bits; | ||
432 | if (wake) | ||
433 | wake_up(&state->wq); | ||
434 | if (delete || state->state == 0) { | ||
435 | if (state->in_tree) { | ||
436 | rb_erase(&state->rb_node, &tree->state); | ||
437 | state->in_tree = 0; | ||
438 | free_extent_state(state); | ||
439 | } else { | ||
440 | WARN_ON(1); | ||
441 | } | ||
442 | } else { | ||
443 | merge_state(tree, state); | ||
444 | } | ||
445 | return ret; | ||
446 | } | ||
447 | |||
448 | /* | ||
449 | * clear some bits on a range in the tree. This may require splitting | ||
450 | * or inserting elements in the tree, so the gfp mask is used to | ||
451 | * indicate which allocations or sleeping are allowed. | ||
452 | * | ||
453 | * pass 'wake' == 1 to kick any sleepers, and 'delete' == 1 to remove | ||
454 | * the given range from the tree regardless of state (ie for truncate). | ||
455 | * | ||
456 | * the range [start, end] is inclusive. | ||
457 | * | ||
458 | * This takes the tree lock, and returns < 0 on error, > 0 if any of the | ||
459 | * bits were already set, or zero if none of the bits were already set. | ||
460 | */ | ||
461 | int clear_extent_bit(struct extent_map_tree *tree, u64 start, u64 end, | ||
462 | int bits, int wake, int delete, gfp_t mask) | ||
463 | { | ||
464 | struct extent_state *state; | ||
465 | struct extent_state *prealloc = NULL; | ||
466 | struct rb_node *node; | ||
467 | int err; | ||
468 | int set = 0; | ||
469 | |||
470 | again: | ||
471 | if (!prealloc && (mask & __GFP_WAIT)) { | ||
472 | prealloc = alloc_extent_state(mask); | ||
473 | if (!prealloc) | ||
474 | return -ENOMEM; | ||
475 | } | ||
476 | |||
477 | write_lock_irq(&tree->lock); | ||
478 | /* | ||
479 | * this search will find the extents that end after | ||
480 | * our range starts | ||
481 | */ | ||
482 | node = tree_search(&tree->state, start); | ||
483 | if (!node) | ||
484 | goto out; | ||
485 | state = rb_entry(node, struct extent_state, rb_node); | ||
486 | if (state->start > end) | ||
487 | goto out; | ||
488 | WARN_ON(state->end < start); | ||
489 | |||
490 | /* | ||
491 | * | ---- desired range ---- | | ||
492 | * | state | or | ||
493 | * | ------------- state -------------- | | ||
494 | * | ||
495 | * We need to split the extent we found, and may flip | ||
496 | * bits on second half. | ||
497 | * | ||
498 | * If the extent we found extends past our range, we | ||
499 | * just split and search again. It'll get split again | ||
500 | * the next time though. | ||
501 | * | ||
502 | * If the extent we found is inside our range, we clear | ||
503 | * the desired bit on it. | ||
504 | */ | ||
505 | |||
506 | if (state->start < start) { | ||
507 | err = split_state(tree, state, prealloc, start); | ||
508 | BUG_ON(err == -EEXIST); | ||
509 | prealloc = NULL; | ||
510 | if (err) | ||
511 | goto out; | ||
512 | if (state->end <= end) { | ||
513 | start = state->end + 1; | ||
514 | set |= clear_state_bit(tree, state, bits, | ||
515 | wake, delete); | ||
516 | } else { | ||
517 | start = state->start; | ||
518 | } | ||
519 | goto search_again; | ||
520 | } | ||
521 | /* | ||
522 | * | ---- desired range ---- | | ||
523 | * | state | | ||
524 | * We need to split the extent, and clear the bit | ||
525 | * on the first half | ||
526 | */ | ||
527 | if (state->start <= end && state->end > end) { | ||
528 | err = split_state(tree, state, prealloc, end + 1); | ||
529 | BUG_ON(err == -EEXIST); | ||
530 | |||
531 | if (wake) | ||
532 | wake_up(&state->wq); | ||
533 | set |= clear_state_bit(tree, prealloc, bits, | ||
534 | wake, delete); | ||
535 | prealloc = NULL; | ||
536 | goto out; | ||
537 | } | ||
538 | |||
539 | start = state->end + 1; | ||
540 | set |= clear_state_bit(tree, state, bits, wake, delete); | ||
541 | goto search_again; | ||
542 | |||
543 | out: | ||
544 | write_unlock_irq(&tree->lock); | ||
545 | if (prealloc) | ||
546 | free_extent_state(prealloc); | ||
547 | |||
548 | return set; | ||
549 | |||
550 | search_again: | ||
551 | if (start >= end) | ||
552 | goto out; | ||
553 | write_unlock_irq(&tree->lock); | ||
554 | if (mask & __GFP_WAIT) | ||
555 | cond_resched(); | ||
556 | goto again; | ||
557 | } | ||
558 | EXPORT_SYMBOL(clear_extent_bit); | ||
559 | |||
560 | static int wait_on_state(struct extent_map_tree *tree, | ||
561 | struct extent_state *state) | ||
562 | { | ||
563 | DEFINE_WAIT(wait); | ||
564 | prepare_to_wait(&state->wq, &wait, TASK_UNINTERRUPTIBLE); | ||
565 | read_unlock_irq(&tree->lock); | ||
566 | schedule(); | ||
567 | read_lock_irq(&tree->lock); | ||
568 | finish_wait(&state->wq, &wait); | ||
569 | return 0; | ||
570 | } | ||
571 | |||
572 | /* | ||
573 | * waits for one or more bits to clear on a range in the state tree. | ||
574 | * The range [start, end] is inclusive. | ||
575 | * The tree lock is taken by this function | ||
576 | */ | ||
577 | int wait_extent_bit(struct extent_map_tree *tree, u64 start, u64 end, int bits) | ||
578 | { | ||
579 | struct extent_state *state; | ||
580 | struct rb_node *node; | ||
581 | |||
582 | read_lock_irq(&tree->lock); | ||
583 | again: | ||
584 | while (1) { | ||
585 | /* | ||
586 | * this search will find all the extents that end after | ||
587 | * our range starts | ||
588 | */ | ||
589 | node = tree_search(&tree->state, start); | ||
590 | if (!node) | ||
591 | break; | ||
592 | |||
593 | state = rb_entry(node, struct extent_state, rb_node); | ||
594 | |||
595 | if (state->start > end) | ||
596 | goto out; | ||
597 | |||
598 | if (state->state & bits) { | ||
599 | start = state->start; | ||
600 | atomic_inc(&state->refs); | ||
601 | wait_on_state(tree, state); | ||
602 | free_extent_state(state); | ||
603 | goto again; | ||
604 | } | ||
605 | start = state->end + 1; | ||
606 | |||
607 | if (start > end) | ||
608 | break; | ||
609 | |||
610 | if (need_resched()) { | ||
611 | read_unlock_irq(&tree->lock); | ||
612 | cond_resched(); | ||
613 | read_lock_irq(&tree->lock); | ||
614 | } | ||
615 | } | ||
616 | out: | ||
617 | read_unlock_irq(&tree->lock); | ||
618 | return 0; | ||
619 | } | ||
620 | EXPORT_SYMBOL(wait_extent_bit); | ||
621 | |||
622 | /* | ||
623 | * set some bits on a range in the tree. This may require allocations | ||
624 | * or sleeping, so the gfp mask is used to indicate what is allowed. | ||
625 | * | ||
626 | * If 'exclusive' == 1, this will fail with -EEXIST if some part of the | ||
627 | * range already has the desired bits set. The start of the existing | ||
628 | * range is returned in failed_start in this case. | ||
629 | * | ||
630 | * [start, end] is inclusive | ||
631 | * This takes the tree lock. | ||
632 | */ | ||
633 | int set_extent_bit(struct extent_map_tree *tree, u64 start, u64 end, int bits, | ||
634 | int exclusive, u64 *failed_start, gfp_t mask) | ||
635 | { | ||
636 | struct extent_state *state; | ||
637 | struct extent_state *prealloc = NULL; | ||
638 | struct rb_node *node; | ||
639 | int err = 0; | ||
640 | int set; | ||
641 | u64 last_start; | ||
642 | u64 last_end; | ||
643 | again: | ||
644 | if (!prealloc && (mask & __GFP_WAIT)) { | ||
645 | prealloc = alloc_extent_state(mask); | ||
646 | if (!prealloc) | ||
647 | return -ENOMEM; | ||
648 | } | ||
649 | |||
650 | write_lock_irq(&tree->lock); | ||
651 | /* | ||
652 | * this search will find all the extents that end after | ||
653 | * our range starts. | ||
654 | */ | ||
655 | node = tree_search(&tree->state, start); | ||
656 | if (!node) { | ||
657 | err = insert_state(tree, prealloc, start, end, bits); | ||
658 | prealloc = NULL; | ||
659 | BUG_ON(err == -EEXIST); | ||
660 | goto out; | ||
661 | } | ||
662 | |||
663 | state = rb_entry(node, struct extent_state, rb_node); | ||
664 | last_start = state->start; | ||
665 | last_end = state->end; | ||
666 | |||
667 | /* | ||
668 | * | ---- desired range ---- | | ||
669 | * | state | | ||
670 | * | ||
671 | * Just lock what we found and keep going | ||
672 | */ | ||
673 | if (state->start == start && state->end <= end) { | ||
674 | set = state->state & bits; | ||
675 | if (set && exclusive) { | ||
676 | *failed_start = state->start; | ||
677 | err = -EEXIST; | ||
678 | goto out; | ||
679 | } | ||
680 | state->state |= bits; | ||
681 | start = state->end + 1; | ||
682 | merge_state(tree, state); | ||
683 | goto search_again; | ||
684 | } | ||
685 | |||
686 | /* | ||
687 | * | ---- desired range ---- | | ||
688 | * | state | | ||
689 | * or | ||
690 | * | ------------- state -------------- | | ||
691 | * | ||
692 | * We need to split the extent we found, and may flip bits on | ||
693 | * second half. | ||
694 | * | ||
695 | * If the extent we found extends past our | ||
696 | * range, we just split and search again. It'll get split | ||
697 | * again the next time though. | ||
698 | * | ||
699 | * If the extent we found is inside our range, we set the | ||
700 | * desired bit on it. | ||
701 | */ | ||
702 | if (state->start < start) { | ||
703 | set = state->state & bits; | ||
704 | if (exclusive && set) { | ||
705 | *failed_start = start; | ||
706 | err = -EEXIST; | ||
707 | goto out; | ||
708 | } | ||
709 | err = split_state(tree, state, prealloc, start); | ||
710 | BUG_ON(err == -EEXIST); | ||
711 | prealloc = NULL; | ||
712 | if (err) | ||
713 | goto out; | ||
714 | if (state->end <= end) { | ||
715 | state->state |= bits; | ||
716 | start = state->end + 1; | ||
717 | merge_state(tree, state); | ||
718 | } else { | ||
719 | start = state->start; | ||
720 | } | ||
721 | goto search_again; | ||
722 | } | ||
723 | /* | ||
724 | * | ---- desired range ---- | | ||
725 | * | state | | ||
726 | * We need to split the extent, and set the bit | ||
727 | * on the first half | ||
728 | */ | ||
729 | if (state->start <= end && state->end > end) { | ||
730 | set = state->state & bits; | ||
731 | if (exclusive && set) { | ||
732 | *failed_start = start; | ||
733 | err = -EEXIST; | ||
734 | goto out; | ||
735 | } | ||
736 | err = split_state(tree, state, prealloc, end + 1); | ||
737 | BUG_ON(err == -EEXIST); | ||
738 | |||
739 | prealloc->state |= bits; | ||
740 | merge_state(tree, prealloc); | ||
741 | prealloc = NULL; | ||
742 | goto out; | ||
743 | } | ||
744 | |||
745 | /* | ||
746 | * | ---- desired range ---- | | ||
747 | * | state | or | state | | ||
748 | * | ||
749 | * There's a hole, we need to insert something in it and | ||
750 | * ignore the extent we found. | ||
751 | */ | ||
752 | if (state->start > start) { | ||
753 | u64 this_end; | ||
754 | if (end < last_start) | ||
755 | this_end = end; | ||
756 | else | ||
757 | this_end = last_start -1; | ||
758 | err = insert_state(tree, prealloc, start, this_end, | ||
759 | bits); | ||
760 | prealloc = NULL; | ||
761 | BUG_ON(err == -EEXIST); | ||
762 | if (err) | ||
763 | goto out; | ||
764 | start = this_end + 1; | ||
765 | goto search_again; | ||
766 | } | ||
767 | goto search_again; | ||
768 | |||
769 | out: | ||
770 | write_unlock_irq(&tree->lock); | ||
771 | if (prealloc) | ||
772 | free_extent_state(prealloc); | ||
773 | |||
774 | return err; | ||
775 | |||
776 | search_again: | ||
777 | if (start > end) | ||
778 | goto out; | ||
779 | write_unlock_irq(&tree->lock); | ||
780 | if (mask & __GFP_WAIT) | ||
781 | cond_resched(); | ||
782 | goto again; | ||
783 | } | ||
784 | EXPORT_SYMBOL(set_extent_bit); | ||
785 | |||
786 | /* wrappers around set/clear extent bit */ | ||
787 | int set_extent_dirty(struct extent_map_tree *tree, u64 start, u64 end, | ||
788 | gfp_t mask) | ||
789 | { | ||
790 | return set_extent_bit(tree, start, end, EXTENT_DIRTY, 0, NULL, | ||
791 | mask); | ||
792 | } | ||
793 | EXPORT_SYMBOL(set_extent_dirty); | ||
794 | |||
795 | int clear_extent_dirty(struct extent_map_tree *tree, u64 start, u64 end, | ||
796 | gfp_t mask) | ||
797 | { | ||
798 | return clear_extent_bit(tree, start, end, EXTENT_DIRTY, 0, 0, mask); | ||
799 | } | ||
800 | EXPORT_SYMBOL(clear_extent_dirty); | ||
801 | |||
802 | int set_extent_new(struct extent_map_tree *tree, u64 start, u64 end, | ||
803 | gfp_t mask) | ||
804 | { | ||
805 | return set_extent_bit(tree, start, end, EXTENT_NEW, 0, NULL, | ||
806 | mask); | ||
807 | } | ||
808 | EXPORT_SYMBOL(set_extent_new); | ||
809 | |||
810 | int clear_extent_new(struct extent_map_tree *tree, u64 start, u64 end, | ||
811 | gfp_t mask) | ||
812 | { | ||
813 | return clear_extent_bit(tree, start, end, EXTENT_NEW, 0, 0, mask); | ||
814 | } | ||
815 | EXPORT_SYMBOL(clear_extent_new); | ||
816 | |||
817 | int set_extent_uptodate(struct extent_map_tree *tree, u64 start, u64 end, | ||
818 | gfp_t mask) | ||
819 | { | ||
820 | return set_extent_bit(tree, start, end, EXTENT_UPTODATE, 0, NULL, | ||
821 | mask); | ||
822 | } | ||
823 | EXPORT_SYMBOL(set_extent_uptodate); | ||
824 | |||
825 | int clear_extent_uptodate(struct extent_map_tree *tree, u64 start, u64 end, | ||
826 | gfp_t mask) | ||
827 | { | ||
828 | return clear_extent_bit(tree, start, end, EXTENT_UPTODATE, 0, 0, mask); | ||
829 | } | ||
830 | EXPORT_SYMBOL(clear_extent_uptodate); | ||
831 | |||
832 | int set_extent_writeback(struct extent_map_tree *tree, u64 start, u64 end, | ||
833 | gfp_t mask) | ||
834 | { | ||
835 | return set_extent_bit(tree, start, end, EXTENT_WRITEBACK, | ||
836 | 0, NULL, mask); | ||
837 | } | ||
838 | EXPORT_SYMBOL(set_extent_writeback); | ||
839 | |||
840 | int clear_extent_writeback(struct extent_map_tree *tree, u64 start, u64 end, | ||
841 | gfp_t mask) | ||
842 | { | ||
843 | return clear_extent_bit(tree, start, end, EXTENT_WRITEBACK, 1, 0, mask); | ||
844 | } | ||
845 | EXPORT_SYMBOL(clear_extent_writeback); | ||
846 | |||
847 | int wait_on_extent_writeback(struct extent_map_tree *tree, u64 start, u64 end) | ||
848 | { | ||
849 | return wait_extent_bit(tree, start, end, EXTENT_WRITEBACK); | ||
850 | } | ||
851 | EXPORT_SYMBOL(wait_on_extent_writeback); | ||
852 | |||
853 | /* | ||
854 | * locks a range in ascending order, waiting for any locked regions | ||
855 | * it hits on the way. [start,end] are inclusive, and this will sleep. | ||
856 | */ | ||
857 | int lock_extent(struct extent_map_tree *tree, u64 start, u64 end, gfp_t mask) | ||
858 | { | ||
859 | int err; | ||
860 | u64 failed_start; | ||
861 | while (1) { | ||
862 | err = set_extent_bit(tree, start, end, EXTENT_LOCKED, 1, | ||
863 | &failed_start, mask); | ||
864 | if (err == -EEXIST && (mask & __GFP_WAIT)) { | ||
865 | wait_extent_bit(tree, failed_start, end, EXTENT_LOCKED); | ||
866 | start = failed_start; | ||
867 | } else { | ||
868 | break; | ||
869 | } | ||
870 | WARN_ON(start > end); | ||
871 | } | ||
872 | return err; | ||
873 | } | ||
874 | EXPORT_SYMBOL(lock_extent); | ||
875 | |||
876 | int unlock_extent(struct extent_map_tree *tree, u64 start, u64 end, | ||
877 | gfp_t mask) | ||
878 | { | ||
879 | return clear_extent_bit(tree, start, end, EXTENT_LOCKED, 1, 0, mask); | ||
880 | } | ||
881 | EXPORT_SYMBOL(unlock_extent); | ||
882 | |||
883 | /* | ||
884 | * helper function to set pages and extents in the tree dirty | ||
885 | */ | ||
886 | int set_range_dirty(struct extent_map_tree *tree, u64 start, u64 end) | ||
887 | { | ||
888 | unsigned long index = start >> PAGE_CACHE_SHIFT; | ||
889 | unsigned long end_index = end >> PAGE_CACHE_SHIFT; | ||
890 | struct page *page; | ||
891 | |||
892 | while (index <= end_index) { | ||
893 | page = find_get_page(tree->mapping, index); | ||
894 | BUG_ON(!page); | ||
895 | __set_page_dirty_nobuffers(page); | ||
896 | page_cache_release(page); | ||
897 | index++; | ||
898 | } | ||
899 | set_extent_dirty(tree, start, end, GFP_NOFS); | ||
900 | return 0; | ||
901 | } | ||
902 | EXPORT_SYMBOL(set_range_dirty); | ||
903 | |||
904 | /* | ||
905 | * helper function to set both pages and extents in the tree writeback | ||
906 | */ | ||
907 | int set_range_writeback(struct extent_map_tree *tree, u64 start, u64 end) | ||
908 | { | ||
909 | unsigned long index = start >> PAGE_CACHE_SHIFT; | ||
910 | unsigned long end_index = end >> PAGE_CACHE_SHIFT; | ||
911 | struct page *page; | ||
912 | |||
913 | while (index <= end_index) { | ||
914 | page = find_get_page(tree->mapping, index); | ||
915 | BUG_ON(!page); | ||
916 | set_page_writeback(page); | ||
917 | page_cache_release(page); | ||
918 | index++; | ||
919 | } | ||
920 | set_extent_writeback(tree, start, end, GFP_NOFS); | ||
921 | return 0; | ||
922 | } | ||
923 | EXPORT_SYMBOL(set_range_writeback); | ||
924 | |||
925 | /* | ||
926 | * helper function to lock both pages and extents in the tree. | ||
927 | * pages must be locked first. | ||
928 | */ | ||
929 | int lock_range(struct extent_map_tree *tree, u64 start, u64 end) | ||
930 | { | ||
931 | unsigned long index = start >> PAGE_CACHE_SHIFT; | ||
932 | unsigned long end_index = end >> PAGE_CACHE_SHIFT; | ||
933 | struct page *page; | ||
934 | int err; | ||
935 | |||
936 | while (index <= end_index) { | ||
937 | page = grab_cache_page(tree->mapping, index); | ||
938 | if (!page) { | ||
939 | err = -ENOMEM; | ||
940 | goto failed; | ||
941 | } | ||
942 | if (IS_ERR(page)) { | ||
943 | err = PTR_ERR(page); | ||
944 | goto failed; | ||
945 | } | ||
946 | index++; | ||
947 | } | ||
948 | lock_extent(tree, start, end, GFP_NOFS); | ||
949 | return 0; | ||
950 | |||
951 | failed: | ||
952 | /* | ||
953 | * we failed above in getting the page at 'index', so we undo here | ||
954 | * up to but not including the page at 'index' | ||
955 | */ | ||
956 | end_index = index; | ||
957 | index = start >> PAGE_CACHE_SHIFT; | ||
958 | while (index < end_index) { | ||
959 | page = find_get_page(tree->mapping, index); | ||
960 | unlock_page(page); | ||
961 | page_cache_release(page); | ||
962 | index++; | ||
963 | } | ||
964 | return err; | ||
965 | } | ||
966 | EXPORT_SYMBOL(lock_range); | ||
967 | |||
968 | /* | ||
969 | * helper function to unlock both pages and extents in the tree. | ||
970 | */ | ||
971 | int unlock_range(struct extent_map_tree *tree, u64 start, u64 end) | ||
972 | { | ||
973 | unsigned long index = start >> PAGE_CACHE_SHIFT; | ||
974 | unsigned long end_index = end >> PAGE_CACHE_SHIFT; | ||
975 | struct page *page; | ||
976 | |||
977 | while (index <= end_index) { | ||
978 | page = find_get_page(tree->mapping, index); | ||
979 | unlock_page(page); | ||
980 | page_cache_release(page); | ||
981 | index++; | ||
982 | } | ||
983 | unlock_extent(tree, start, end, GFP_NOFS); | ||
984 | return 0; | ||
985 | } | ||
986 | EXPORT_SYMBOL(unlock_range); | ||
987 | |||
988 | /* | ||
989 | * searches a range in the state tree for a given mask. | ||
990 | * If 'filled' == 1, this returns 1 only if ever extent in the tree | ||
991 | * has the bits set. Otherwise, 1 is returned if any bit in the | ||
992 | * range is found set. | ||
993 | */ | ||
994 | static int test_range_bit(struct extent_map_tree *tree, u64 start, u64 end, | ||
995 | int bits, int filled) | ||
996 | { | ||
997 | struct extent_state *state = NULL; | ||
998 | struct rb_node *node; | ||
999 | int bitset = 0; | ||
1000 | |||
1001 | read_lock_irq(&tree->lock); | ||
1002 | node = tree_search(&tree->state, start); | ||
1003 | while (node && start <= end) { | ||
1004 | state = rb_entry(node, struct extent_state, rb_node); | ||
1005 | if (state->start > end) | ||
1006 | break; | ||
1007 | |||
1008 | if (filled && state->start > start) { | ||
1009 | bitset = 0; | ||
1010 | break; | ||
1011 | } | ||
1012 | if (state->state & bits) { | ||
1013 | bitset = 1; | ||
1014 | if (!filled) | ||
1015 | break; | ||
1016 | } else if (filled) { | ||
1017 | bitset = 0; | ||
1018 | break; | ||
1019 | } | ||
1020 | start = state->end + 1; | ||
1021 | if (start > end) | ||
1022 | break; | ||
1023 | node = rb_next(node); | ||
1024 | } | ||
1025 | read_unlock_irq(&tree->lock); | ||
1026 | return bitset; | ||
1027 | } | ||
1028 | |||
1029 | /* | ||
1030 | * helper function to set a given page up to date if all the | ||
1031 | * extents in the tree for that page are up to date | ||
1032 | */ | ||
1033 | static int check_page_uptodate(struct extent_map_tree *tree, | ||
1034 | struct page *page) | ||
1035 | { | ||
1036 | u64 start = page->index << PAGE_CACHE_SHIFT; | ||
1037 | u64 end = start + PAGE_CACHE_SIZE - 1; | ||
1038 | if (test_range_bit(tree, start, end, EXTENT_UPTODATE, 1)) | ||
1039 | SetPageUptodate(page); | ||
1040 | return 0; | ||
1041 | } | ||
1042 | |||
1043 | /* | ||
1044 | * helper function to unlock a page if all the extents in the tree | ||
1045 | * for that page are unlocked | ||
1046 | */ | ||
1047 | static int check_page_locked(struct extent_map_tree *tree, | ||
1048 | struct page *page) | ||
1049 | { | ||
1050 | u64 start = page->index << PAGE_CACHE_SHIFT; | ||
1051 | u64 end = start + PAGE_CACHE_SIZE - 1; | ||
1052 | if (!test_range_bit(tree, start, end, EXTENT_LOCKED, 0)) | ||
1053 | unlock_page(page); | ||
1054 | return 0; | ||
1055 | } | ||
1056 | |||
1057 | /* | ||
1058 | * helper function to end page writeback if all the extents | ||
1059 | * in the tree for that page are done with writeback | ||
1060 | */ | ||
1061 | static int check_page_writeback(struct extent_map_tree *tree, | ||
1062 | struct page *page) | ||
1063 | { | ||
1064 | u64 start = page->index << PAGE_CACHE_SHIFT; | ||
1065 | u64 end = start + PAGE_CACHE_SIZE - 1; | ||
1066 | if (!test_range_bit(tree, start, end, EXTENT_WRITEBACK, 0)) | ||
1067 | end_page_writeback(page); | ||
1068 | return 0; | ||
1069 | } | ||
1070 | |||
1071 | /* lots and lots of room for performance fixes in the end_bio funcs */ | ||
1072 | |||
1073 | /* | ||
1074 | * after a writepage IO is done, we need to: | ||
1075 | * clear the uptodate bits on error | ||
1076 | * clear the writeback bits in the extent tree for this IO | ||
1077 | * end_page_writeback if the page has no more pending IO | ||
1078 | * | ||
1079 | * Scheduling is not allowed, so the extent state tree is expected | ||
1080 | * to have one and only one object corresponding to this IO. | ||
1081 | */ | ||
1082 | static int end_bio_extent_writepage(struct bio *bio, | ||
1083 | unsigned int bytes_done, int err) | ||
1084 | { | ||
1085 | const int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags); | ||
1086 | struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1; | ||
1087 | struct extent_map_tree *tree = bio->bi_private; | ||
1088 | u64 start; | ||
1089 | u64 end; | ||
1090 | int whole_page; | ||
1091 | |||
1092 | if (bio->bi_size) | ||
1093 | return 1; | ||
1094 | |||
1095 | do { | ||
1096 | struct page *page = bvec->bv_page; | ||
1097 | start = (page->index << PAGE_CACHE_SHIFT) + bvec->bv_offset; | ||
1098 | end = start + bvec->bv_len - 1; | ||
1099 | |||
1100 | if (bvec->bv_offset == 0 && bvec->bv_len == PAGE_CACHE_SIZE) | ||
1101 | whole_page = 1; | ||
1102 | else | ||
1103 | whole_page = 0; | ||
1104 | |||
1105 | if (--bvec >= bio->bi_io_vec) | ||
1106 | prefetchw(&bvec->bv_page->flags); | ||
1107 | |||
1108 | if (!uptodate) { | ||
1109 | clear_extent_uptodate(tree, start, end, GFP_ATOMIC); | ||
1110 | ClearPageUptodate(page); | ||
1111 | SetPageError(page); | ||
1112 | } | ||
1113 | clear_extent_writeback(tree, start, end, GFP_ATOMIC); | ||
1114 | |||
1115 | if (whole_page) | ||
1116 | end_page_writeback(page); | ||
1117 | else | ||
1118 | check_page_writeback(tree, page); | ||
1119 | } while (bvec >= bio->bi_io_vec); | ||
1120 | |||
1121 | bio_put(bio); | ||
1122 | return 0; | ||
1123 | } | ||
1124 | |||
1125 | /* | ||
1126 | * after a readpage IO is done, we need to: | ||
1127 | * clear the uptodate bits on error | ||
1128 | * set the uptodate bits if things worked | ||
1129 | * set the page up to date if all extents in the tree are uptodate | ||
1130 | * clear the lock bit in the extent tree | ||
1131 | * unlock the page if there are no other extents locked for it | ||
1132 | * | ||
1133 | * Scheduling is not allowed, so the extent state tree is expected | ||
1134 | * to have one and only one object corresponding to this IO. | ||
1135 | */ | ||
1136 | static int end_bio_extent_readpage(struct bio *bio, | ||
1137 | unsigned int bytes_done, int err) | ||
1138 | { | ||
1139 | const int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags); | ||
1140 | struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1; | ||
1141 | struct extent_map_tree *tree = bio->bi_private; | ||
1142 | u64 start; | ||
1143 | u64 end; | ||
1144 | int whole_page; | ||
1145 | |||
1146 | if (bio->bi_size) | ||
1147 | return 1; | ||
1148 | |||
1149 | do { | ||
1150 | struct page *page = bvec->bv_page; | ||
1151 | start = (page->index << PAGE_CACHE_SHIFT) + bvec->bv_offset; | ||
1152 | end = start + bvec->bv_len - 1; | ||
1153 | |||
1154 | if (bvec->bv_offset == 0 && bvec->bv_len == PAGE_CACHE_SIZE) | ||
1155 | whole_page = 1; | ||
1156 | else | ||
1157 | whole_page = 0; | ||
1158 | |||
1159 | if (--bvec >= bio->bi_io_vec) | ||
1160 | prefetchw(&bvec->bv_page->flags); | ||
1161 | |||
1162 | if (uptodate) { | ||
1163 | set_extent_uptodate(tree, start, end, GFP_ATOMIC); | ||
1164 | if (whole_page) | ||
1165 | SetPageUptodate(page); | ||
1166 | else | ||
1167 | check_page_uptodate(tree, page); | ||
1168 | } else { | ||
1169 | ClearPageUptodate(page); | ||
1170 | SetPageError(page); | ||
1171 | } | ||
1172 | |||
1173 | unlock_extent(tree, start, end, GFP_ATOMIC); | ||
1174 | |||
1175 | if (whole_page) | ||
1176 | unlock_page(page); | ||
1177 | else | ||
1178 | check_page_locked(tree, page); | ||
1179 | } while (bvec >= bio->bi_io_vec); | ||
1180 | |||
1181 | bio_put(bio); | ||
1182 | return 0; | ||
1183 | } | ||
1184 | |||
1185 | /* | ||
1186 | * IO done from prepare_write is pretty simple, we just unlock | ||
1187 | * the structs in the extent tree when done, and set the uptodate bits | ||
1188 | * as appropriate. | ||
1189 | */ | ||
1190 | static int end_bio_extent_preparewrite(struct bio *bio, | ||
1191 | unsigned int bytes_done, int err) | ||
1192 | { | ||
1193 | const int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags); | ||
1194 | struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1; | ||
1195 | struct extent_map_tree *tree = bio->bi_private; | ||
1196 | u64 start; | ||
1197 | u64 end; | ||
1198 | |||
1199 | if (bio->bi_size) | ||
1200 | return 1; | ||
1201 | |||
1202 | do { | ||
1203 | struct page *page = bvec->bv_page; | ||
1204 | start = (page->index << PAGE_CACHE_SHIFT) + bvec->bv_offset; | ||
1205 | end = start + bvec->bv_len - 1; | ||
1206 | |||
1207 | if (--bvec >= bio->bi_io_vec) | ||
1208 | prefetchw(&bvec->bv_page->flags); | ||
1209 | |||
1210 | if (uptodate) { | ||
1211 | set_extent_uptodate(tree, start, end, GFP_ATOMIC); | ||
1212 | } else { | ||
1213 | ClearPageUptodate(page); | ||
1214 | SetPageError(page); | ||
1215 | } | ||
1216 | |||
1217 | unlock_extent(tree, start, end, GFP_ATOMIC); | ||
1218 | |||
1219 | } while (bvec >= bio->bi_io_vec); | ||
1220 | |||
1221 | bio_put(bio); | ||
1222 | return 0; | ||
1223 | } | ||
1224 | |||
1225 | static int submit_extent_page(int rw, struct extent_map_tree *tree, | ||
1226 | struct page *page, sector_t sector, | ||
1227 | size_t size, unsigned long offset, | ||
1228 | struct block_device *bdev, | ||
1229 | bio_end_io_t end_io_func) | ||
1230 | { | ||
1231 | struct bio *bio; | ||
1232 | int ret = 0; | ||
1233 | |||
1234 | bio = bio_alloc(GFP_NOIO, 1); | ||
1235 | |||
1236 | bio->bi_sector = sector; | ||
1237 | bio->bi_bdev = bdev; | ||
1238 | bio->bi_io_vec[0].bv_page = page; | ||
1239 | bio->bi_io_vec[0].bv_len = size; | ||
1240 | bio->bi_io_vec[0].bv_offset = offset; | ||
1241 | |||
1242 | bio->bi_vcnt = 1; | ||
1243 | bio->bi_idx = 0; | ||
1244 | bio->bi_size = size; | ||
1245 | |||
1246 | bio->bi_end_io = end_io_func; | ||
1247 | bio->bi_private = tree; | ||
1248 | |||
1249 | bio_get(bio); | ||
1250 | submit_bio(rw, bio); | ||
1251 | |||
1252 | if (bio_flagged(bio, BIO_EOPNOTSUPP)) | ||
1253 | ret = -EOPNOTSUPP; | ||
1254 | |||
1255 | bio_put(bio); | ||
1256 | return ret; | ||
1257 | } | ||
1258 | |||
1259 | /* | ||
1260 | * basic readpage implementation. Locked extent state structs are inserted | ||
1261 | * into the tree that are removed when the IO is done (by the end_io | ||
1262 | * handlers) | ||
1263 | */ | ||
1264 | int extent_read_full_page(struct extent_map_tree *tree, struct page *page, | ||
1265 | get_extent_t *get_extent) | ||
1266 | { | ||
1267 | struct inode *inode = page->mapping->host; | ||
1268 | u64 start = page->index << PAGE_CACHE_SHIFT; | ||
1269 | u64 page_end = start + PAGE_CACHE_SIZE - 1; | ||
1270 | u64 end; | ||
1271 | u64 cur = start; | ||
1272 | u64 extent_offset; | ||
1273 | u64 last_byte = i_size_read(inode); | ||
1274 | u64 block_start; | ||
1275 | u64 cur_end; | ||
1276 | sector_t sector; | ||
1277 | struct extent_map *em; | ||
1278 | struct block_device *bdev; | ||
1279 | int ret; | ||
1280 | int nr = 0; | ||
1281 | size_t page_offset = 0; | ||
1282 | size_t iosize; | ||
1283 | size_t blocksize = inode->i_sb->s_blocksize; | ||
1284 | |||
1285 | if (!PagePrivate(page)) { | ||
1286 | SetPagePrivate(page); | ||
1287 | set_page_private(page, 1); | ||
1288 | page_cache_get(page); | ||
1289 | } | ||
1290 | |||
1291 | end = page_end; | ||
1292 | lock_extent(tree, start, end, GFP_NOFS); | ||
1293 | |||
1294 | while (cur <= end) { | ||
1295 | if (cur >= last_byte) { | ||
1296 | iosize = PAGE_CACHE_SIZE - page_offset; | ||
1297 | zero_user_page(page, page_offset, iosize, KM_USER0); | ||
1298 | set_extent_uptodate(tree, cur, cur + iosize - 1, | ||
1299 | GFP_NOFS); | ||
1300 | unlock_extent(tree, cur, cur + iosize - 1, GFP_NOFS); | ||
1301 | break; | ||
1302 | } | ||
1303 | em = get_extent(inode, page, page_offset, cur, end, 0); | ||
1304 | if (IS_ERR(em) || !em) { | ||
1305 | SetPageError(page); | ||
1306 | unlock_extent(tree, cur, end, GFP_NOFS); | ||
1307 | break; | ||
1308 | } | ||
1309 | |||
1310 | extent_offset = cur - em->start; | ||
1311 | BUG_ON(em->end < cur); | ||
1312 | BUG_ON(end < cur); | ||
1313 | |||
1314 | iosize = min(em->end - cur, end - cur) + 1; | ||
1315 | cur_end = min(em->end, end); | ||
1316 | iosize = (iosize + blocksize - 1) & ~((u64)blocksize - 1); | ||
1317 | sector = (em->block_start + extent_offset) >> 9; | ||
1318 | bdev = em->bdev; | ||
1319 | block_start = em->block_start; | ||
1320 | free_extent_map(em); | ||
1321 | em = NULL; | ||
1322 | |||
1323 | /* we've found a hole, just zero and go on */ | ||
1324 | if (block_start == 0) { | ||
1325 | zero_user_page(page, page_offset, iosize, KM_USER0); | ||
1326 | set_extent_uptodate(tree, cur, cur + iosize - 1, | ||
1327 | GFP_NOFS); | ||
1328 | unlock_extent(tree, cur, cur + iosize - 1, GFP_NOFS); | ||
1329 | cur = cur + iosize; | ||
1330 | page_offset += iosize; | ||
1331 | continue; | ||
1332 | } | ||
1333 | /* the get_extent function already copied into the page */ | ||
1334 | if (test_range_bit(tree, cur, cur_end, EXTENT_UPTODATE, 1)) { | ||
1335 | unlock_extent(tree, cur, cur + iosize - 1, GFP_NOFS); | ||
1336 | cur = cur + iosize; | ||
1337 | page_offset += iosize; | ||
1338 | continue; | ||
1339 | } | ||
1340 | |||
1341 | ret = submit_extent_page(READ, tree, page, | ||
1342 | sector, iosize, page_offset, bdev, | ||
1343 | end_bio_extent_readpage); | ||
1344 | if (ret) | ||
1345 | SetPageError(page); | ||
1346 | cur = cur + iosize; | ||
1347 | page_offset += iosize; | ||
1348 | nr++; | ||
1349 | } | ||
1350 | if (!nr) { | ||
1351 | if (!PageError(page)) | ||
1352 | SetPageUptodate(page); | ||
1353 | unlock_page(page); | ||
1354 | } | ||
1355 | return 0; | ||
1356 | } | ||
1357 | EXPORT_SYMBOL(extent_read_full_page); | ||
1358 | |||
1359 | /* | ||
1360 | * the writepage semantics are similar to regular writepage. extent | ||
1361 | * records are inserted to lock ranges in the tree, and as dirty areas | ||
1362 | * are found, they are marked writeback. Then the lock bits are removed | ||
1363 | * and the end_io handler clears the writeback ranges | ||
1364 | */ | ||
1365 | int extent_write_full_page(struct extent_map_tree *tree, struct page *page, | ||
1366 | get_extent_t *get_extent, | ||
1367 | struct writeback_control *wbc) | ||
1368 | { | ||
1369 | struct inode *inode = page->mapping->host; | ||
1370 | u64 start = page->index << PAGE_CACHE_SHIFT; | ||
1371 | u64 page_end = start + PAGE_CACHE_SIZE - 1; | ||
1372 | u64 end; | ||
1373 | u64 cur = start; | ||
1374 | u64 extent_offset; | ||
1375 | u64 last_byte = i_size_read(inode); | ||
1376 | u64 block_start; | ||
1377 | sector_t sector; | ||
1378 | struct extent_map *em; | ||
1379 | struct block_device *bdev; | ||
1380 | int ret; | ||
1381 | int nr = 0; | ||
1382 | size_t page_offset = 0; | ||
1383 | size_t iosize; | ||
1384 | size_t blocksize; | ||
1385 | loff_t i_size = i_size_read(inode); | ||
1386 | unsigned long end_index = i_size >> PAGE_CACHE_SHIFT; | ||
1387 | |||
1388 | if (page->index > end_index) { | ||
1389 | clear_extent_dirty(tree, start, page_end, GFP_NOFS); | ||
1390 | unlock_page(page); | ||
1391 | return 0; | ||
1392 | } | ||
1393 | |||
1394 | if (page->index == end_index) { | ||
1395 | size_t offset = i_size & (PAGE_CACHE_SIZE - 1); | ||
1396 | zero_user_page(page, offset, | ||
1397 | PAGE_CACHE_SIZE - offset, KM_USER0); | ||
1398 | } | ||
1399 | |||
1400 | if (!PagePrivate(page)) { | ||
1401 | SetPagePrivate(page); | ||
1402 | set_page_private(page, 1); | ||
1403 | page_cache_get(page); | ||
1404 | } | ||
1405 | |||
1406 | end = page_end; | ||
1407 | lock_extent(tree, start, page_end, GFP_NOFS); | ||
1408 | |||
1409 | if (last_byte <= start) { | ||
1410 | clear_extent_dirty(tree, start, page_end, GFP_NOFS); | ||
1411 | goto done; | ||
1412 | } | ||
1413 | |||
1414 | set_extent_uptodate(tree, start, page_end, GFP_NOFS); | ||
1415 | blocksize = inode->i_sb->s_blocksize; | ||
1416 | |||
1417 | while (cur <= end) { | ||
1418 | if (cur >= last_byte) { | ||
1419 | clear_extent_dirty(tree, cur, page_end, GFP_NOFS); | ||
1420 | break; | ||
1421 | } | ||
1422 | em = get_extent(inode, page, page_offset, cur, end, 1); | ||
1423 | if (IS_ERR(em) || !em) { | ||
1424 | SetPageError(page); | ||
1425 | break; | ||
1426 | } | ||
1427 | |||
1428 | extent_offset = cur - em->start; | ||
1429 | BUG_ON(em->end < cur); | ||
1430 | BUG_ON(end < cur); | ||
1431 | iosize = min(em->end - cur, end - cur) + 1; | ||
1432 | iosize = (iosize + blocksize - 1) & ~((u64)blocksize - 1); | ||
1433 | sector = (em->block_start + extent_offset) >> 9; | ||
1434 | bdev = em->bdev; | ||
1435 | block_start = em->block_start; | ||
1436 | free_extent_map(em); | ||
1437 | em = NULL; | ||
1438 | |||
1439 | if (block_start == 0 || block_start == EXTENT_MAP_INLINE) { | ||
1440 | clear_extent_dirty(tree, cur, | ||
1441 | cur + iosize - 1, GFP_NOFS); | ||
1442 | cur = cur + iosize; | ||
1443 | page_offset += iosize; | ||
1444 | continue; | ||
1445 | } | ||
1446 | |||
1447 | /* leave this out until we have a page_mkwrite call */ | ||
1448 | if (0 && !test_range_bit(tree, cur, cur + iosize - 1, | ||
1449 | EXTENT_DIRTY, 0)) { | ||
1450 | cur = cur + iosize; | ||
1451 | page_offset += iosize; | ||
1452 | continue; | ||
1453 | } | ||
1454 | clear_extent_dirty(tree, cur, cur + iosize - 1, GFP_NOFS); | ||
1455 | set_range_writeback(tree, cur, cur + iosize - 1); | ||
1456 | ret = submit_extent_page(WRITE, tree, page, | ||
1457 | sector, iosize, page_offset, bdev, | ||
1458 | end_bio_extent_writepage); | ||
1459 | if (ret) | ||
1460 | SetPageError(page); | ||
1461 | cur = cur + iosize; | ||
1462 | page_offset += iosize; | ||
1463 | nr++; | ||
1464 | } | ||
1465 | done: | ||
1466 | WARN_ON(test_range_bit(tree, start, page_end, EXTENT_DIRTY, 0)); | ||
1467 | unlock_extent(tree, start, page_end, GFP_NOFS); | ||
1468 | unlock_page(page); | ||
1469 | return 0; | ||
1470 | } | ||
1471 | EXPORT_SYMBOL(extent_write_full_page); | ||
1472 | |||
1473 | /* | ||
1474 | * basic invalidatepage code, this waits on any locked or writeback | ||
1475 | * ranges corresponding to the page, and then deletes any extent state | ||
1476 | * records from the tree | ||
1477 | */ | ||
1478 | int extent_invalidatepage(struct extent_map_tree *tree, | ||
1479 | struct page *page, unsigned long offset) | ||
1480 | { | ||
1481 | u64 start = (page->index << PAGE_CACHE_SHIFT); | ||
1482 | u64 end = start + PAGE_CACHE_SIZE - 1; | ||
1483 | size_t blocksize = page->mapping->host->i_sb->s_blocksize; | ||
1484 | |||
1485 | start += (offset + blocksize -1) & ~(blocksize - 1); | ||
1486 | if (start > end) | ||
1487 | return 0; | ||
1488 | |||
1489 | lock_extent(tree, start, end, GFP_NOFS); | ||
1490 | wait_on_extent_writeback(tree, start, end); | ||
1491 | clear_extent_bit(tree, start, end, EXTENT_LOCKED | EXTENT_DIRTY, | ||
1492 | 1, 1, GFP_NOFS); | ||
1493 | return 0; | ||
1494 | } | ||
1495 | EXPORT_SYMBOL(extent_invalidatepage); | ||
1496 | |||
1497 | /* | ||
1498 | * simple commit_write call, set_range_dirty is used to mark both | ||
1499 | * the pages and the extent records as dirty | ||
1500 | */ | ||
1501 | int extent_commit_write(struct extent_map_tree *tree, | ||
1502 | struct inode *inode, struct page *page, | ||
1503 | unsigned from, unsigned to) | ||
1504 | { | ||
1505 | loff_t pos = ((loff_t)page->index << PAGE_CACHE_SHIFT) + to; | ||
1506 | |||
1507 | if (!PagePrivate(page)) { | ||
1508 | SetPagePrivate(page); | ||
1509 | set_page_private(page, 1); | ||
1510 | page_cache_get(page); | ||
1511 | } | ||
1512 | |||
1513 | set_page_dirty(page); | ||
1514 | |||
1515 | if (pos > inode->i_size) { | ||
1516 | i_size_write(inode, pos); | ||
1517 | mark_inode_dirty(inode); | ||
1518 | } | ||
1519 | return 0; | ||
1520 | } | ||
1521 | EXPORT_SYMBOL(extent_commit_write); | ||
1522 | |||
1523 | int extent_prepare_write(struct extent_map_tree *tree, | ||
1524 | struct inode *inode, struct page *page, | ||
1525 | unsigned from, unsigned to, get_extent_t *get_extent) | ||
1526 | { | ||
1527 | u64 page_start = page->index << PAGE_CACHE_SHIFT; | ||
1528 | u64 page_end = page_start + PAGE_CACHE_SIZE - 1; | ||
1529 | u64 block_start; | ||
1530 | u64 orig_block_start; | ||
1531 | u64 block_end; | ||
1532 | u64 cur_end; | ||
1533 | struct extent_map *em; | ||
1534 | unsigned blocksize = 1 << inode->i_blkbits; | ||
1535 | size_t page_offset = 0; | ||
1536 | size_t block_off_start; | ||
1537 | size_t block_off_end; | ||
1538 | int err = 0; | ||
1539 | int iocount = 0; | ||
1540 | int ret = 0; | ||
1541 | int isnew; | ||
1542 | |||
1543 | if (!PagePrivate(page)) { | ||
1544 | SetPagePrivate(page); | ||
1545 | set_page_private(page, 1); | ||
1546 | page_cache_get(page); | ||
1547 | } | ||
1548 | block_start = (page_start + from) & ~((u64)blocksize - 1); | ||
1549 | block_end = (page_start + to - 1) | (blocksize - 1); | ||
1550 | orig_block_start = block_start; | ||
1551 | |||
1552 | lock_extent(tree, page_start, page_end, GFP_NOFS); | ||
1553 | while(block_start <= block_end) { | ||
1554 | em = get_extent(inode, page, page_offset, block_start, | ||
1555 | block_end, 1); | ||
1556 | if (IS_ERR(em) || !em) { | ||
1557 | goto err; | ||
1558 | } | ||
1559 | cur_end = min(block_end, em->end); | ||
1560 | block_off_start = block_start & (PAGE_CACHE_SIZE - 1); | ||
1561 | block_off_end = block_off_start + blocksize; | ||
1562 | isnew = clear_extent_new(tree, block_start, cur_end, GFP_NOFS); | ||
1563 | |||
1564 | if (!PageUptodate(page) && isnew && | ||
1565 | (block_off_end > to || block_off_start < from)) { | ||
1566 | void *kaddr; | ||
1567 | |||
1568 | kaddr = kmap_atomic(page, KM_USER0); | ||
1569 | if (block_off_end > to) | ||
1570 | memset(kaddr + to, 0, block_off_end - to); | ||
1571 | if (block_off_start < from) | ||
1572 | memset(kaddr + block_off_start, 0, | ||
1573 | from - block_off_start); | ||
1574 | flush_dcache_page(page); | ||
1575 | kunmap_atomic(kaddr, KM_USER0); | ||
1576 | } | ||
1577 | if (!isnew && !PageUptodate(page) && | ||
1578 | (block_off_end > to || block_off_start < from) && | ||
1579 | !test_range_bit(tree, block_start, cur_end, | ||
1580 | EXTENT_UPTODATE, 1)) { | ||
1581 | u64 sector; | ||
1582 | u64 extent_offset = block_start - em->start; | ||
1583 | size_t iosize; | ||
1584 | sector = (em->block_start + extent_offset) >> 9; | ||
1585 | iosize = (cur_end - block_start + blocksize - 1) & | ||
1586 | ~((u64)blocksize - 1); | ||
1587 | /* | ||
1588 | * we've already got the extent locked, but we | ||
1589 | * need to split the state such that our end_bio | ||
1590 | * handler can clear the lock. | ||
1591 | */ | ||
1592 | set_extent_bit(tree, block_start, | ||
1593 | block_start + iosize - 1, | ||
1594 | EXTENT_LOCKED, 0, NULL, GFP_NOFS); | ||
1595 | ret = submit_extent_page(READ, tree, page, | ||
1596 | sector, iosize, page_offset, em->bdev, | ||
1597 | end_bio_extent_preparewrite); | ||
1598 | iocount++; | ||
1599 | block_start = block_start + iosize; | ||
1600 | } else { | ||
1601 | set_extent_uptodate(tree, block_start, cur_end, | ||
1602 | GFP_NOFS); | ||
1603 | unlock_extent(tree, block_start, cur_end, GFP_NOFS); | ||
1604 | block_start = cur_end + 1; | ||
1605 | } | ||
1606 | page_offset = block_start & (PAGE_CACHE_SIZE - 1); | ||
1607 | free_extent_map(em); | ||
1608 | } | ||
1609 | if (iocount) { | ||
1610 | wait_extent_bit(tree, orig_block_start, | ||
1611 | block_end, EXTENT_LOCKED); | ||
1612 | } | ||
1613 | check_page_uptodate(tree, page); | ||
1614 | err: | ||
1615 | /* FIXME, zero out newly allocated blocks on error */ | ||
1616 | return err; | ||
1617 | } | ||
1618 | EXPORT_SYMBOL(extent_prepare_write); | ||
1619 | |||
1620 | /* | ||
1621 | * a helper for releasepage. As long as there are no locked extents | ||
1622 | * in the range corresponding to the page, both state records and extent | ||
1623 | * map records are removed | ||
1624 | */ | ||
1625 | int try_release_extent_mapping(struct extent_map_tree *tree, struct page *page) | ||
1626 | { | ||
1627 | struct extent_map *em; | ||
1628 | u64 start = page->index << PAGE_CACHE_SHIFT; | ||
1629 | u64 end = start + PAGE_CACHE_SIZE - 1; | ||
1630 | u64 orig_start = start; | ||
1631 | |||
1632 | while (start <= end) { | ||
1633 | em = lookup_extent_mapping(tree, start, end); | ||
1634 | if (!em || IS_ERR(em)) | ||
1635 | break; | ||
1636 | if (test_range_bit(tree, em->start, em->end, | ||
1637 | EXTENT_LOCKED, 0)) { | ||
1638 | free_extent_map(em); | ||
1639 | start = em->end + 1; | ||
1640 | printk("range still locked %Lu %Lu\n", em->start, em->end); | ||
1641 | break; | ||
1642 | } | ||
1643 | remove_extent_mapping(tree, em); | ||
1644 | start = em->end + 1; | ||
1645 | /* once for the rb tree */ | ||
1646 | free_extent_map(em); | ||
1647 | /* once for us */ | ||
1648 | free_extent_map(em); | ||
1649 | } | ||
1650 | WARN_ON(test_range_bit(tree, orig_start, end, EXTENT_WRITEBACK, 0)); | ||
1651 | clear_extent_bit(tree, orig_start, end, EXTENT_UPTODATE, | ||
1652 | 1, 1, GFP_NOFS); | ||
1653 | return 1; | ||
1654 | } | ||
1655 | EXPORT_SYMBOL(try_release_extent_mapping); | ||
1656 | |||
diff --git a/fs/btrfs/extent_map.h b/fs/btrfs/extent_map.h new file mode 100644 index 000000000000..108944aab4bd --- /dev/null +++ b/fs/btrfs/extent_map.h | |||
@@ -0,0 +1,89 @@ | |||
1 | #ifndef __EXTENTMAP__ | ||
2 | #define __EXTENTMAP__ | ||
3 | |||
4 | #include <linux/rbtree.h> | ||
5 | |||
6 | #define EXTENT_MAP_INLINE (u64)-2 | ||
7 | #define EXTENT_MAP_DELALLOC (u64)-1 | ||
8 | |||
9 | struct extent_map_tree { | ||
10 | struct rb_root map; | ||
11 | struct rb_root state; | ||
12 | struct address_space *mapping; | ||
13 | rwlock_t lock; | ||
14 | }; | ||
15 | |||
16 | /* note, this must start with the same fields as fs/extent_map.c:tree_entry */ | ||
17 | struct extent_map { | ||
18 | u64 start; | ||
19 | u64 end; /* inclusive */ | ||
20 | int in_tree; | ||
21 | struct rb_node rb_node; | ||
22 | /* block_start and block_end are in bytes */ | ||
23 | u64 block_start; | ||
24 | u64 block_end; /* inclusive */ | ||
25 | struct block_device *bdev; | ||
26 | atomic_t refs; | ||
27 | }; | ||
28 | |||
29 | /* note, this must start with the same fields as fs/extent_map.c:tree_entry */ | ||
30 | struct extent_state { | ||
31 | u64 start; | ||
32 | u64 end; /* inclusive */ | ||
33 | int in_tree; | ||
34 | struct rb_node rb_node; | ||
35 | wait_queue_head_t wq; | ||
36 | atomic_t refs; | ||
37 | unsigned long state; | ||
38 | struct list_head list; | ||
39 | }; | ||
40 | |||
41 | struct extent_buffer { | ||
42 | u64 start; | ||
43 | u64 end; /* inclusive */ | ||
44 | char *addr; | ||
45 | struct page *pages[]; | ||
46 | }; | ||
47 | |||
48 | typedef struct extent_map *(get_extent_t)(struct inode *inode, | ||
49 | struct page *page, | ||
50 | size_t page_offset, | ||
51 | u64 start, u64 end, | ||
52 | int create); | ||
53 | |||
54 | void extent_map_tree_init(struct extent_map_tree *tree, | ||
55 | struct address_space *mapping, gfp_t mask); | ||
56 | struct extent_map *lookup_extent_mapping(struct extent_map_tree *tree, | ||
57 | u64 start, u64 end); | ||
58 | int add_extent_mapping(struct extent_map_tree *tree, | ||
59 | struct extent_map *em); | ||
60 | int remove_extent_mapping(struct extent_map_tree *tree, struct extent_map *em); | ||
61 | int try_release_extent_mapping(struct extent_map_tree *tree, struct page *page); | ||
62 | int lock_extent(struct extent_map_tree *tree, u64 start, u64 end, gfp_t mask); | ||
63 | int unlock_extent(struct extent_map_tree *tree, u64 start, u64 end, gfp_t mask); | ||
64 | struct extent_map *alloc_extent_map(gfp_t mask); | ||
65 | void free_extent_map(struct extent_map *em); | ||
66 | int extent_read_full_page(struct extent_map_tree *tree, struct page *page, | ||
67 | get_extent_t *get_extent); | ||
68 | void __init extent_map_init(void); | ||
69 | void __exit extent_map_exit(void); | ||
70 | int extent_clean_all_trees(struct extent_map_tree *tree); | ||
71 | int set_extent_uptodate(struct extent_map_tree *tree, u64 start, u64 end, | ||
72 | gfp_t mask); | ||
73 | int set_extent_new(struct extent_map_tree *tree, u64 start, u64 end, | ||
74 | gfp_t mask); | ||
75 | int set_extent_dirty(struct extent_map_tree *tree, u64 start, u64 end, | ||
76 | gfp_t mask); | ||
77 | int extent_invalidatepage(struct extent_map_tree *tree, | ||
78 | struct page *page, unsigned long offset); | ||
79 | int extent_write_full_page(struct extent_map_tree *tree, struct page *page, | ||
80 | get_extent_t *get_extent, | ||
81 | struct writeback_control *wbc); | ||
82 | int extent_prepare_write(struct extent_map_tree *tree, | ||
83 | struct inode *inode, struct page *page, | ||
84 | unsigned from, unsigned to, get_extent_t *get_extent); | ||
85 | int extent_commit_write(struct extent_map_tree *tree, | ||
86 | struct inode *inode, struct page *page, | ||
87 | unsigned from, unsigned to); | ||
88 | int set_range_dirty(struct extent_map_tree *tree, u64 start, u64 end); | ||
89 | #endif | ||
diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c index 6933ab11a5cd..71a481894ab6 100644 --- a/fs/btrfs/file.c +++ b/fs/btrfs/file.c | |||
@@ -81,14 +81,14 @@ static void btrfs_drop_pages(struct page **pages, size_t num_pages) | |||
81 | } | 81 | } |
82 | } | 82 | } |
83 | 83 | ||
84 | static int insert_inline_extent(struct btrfs_root *root, struct inode *inode, | 84 | static int insert_inline_extent(struct btrfs_trans_handle *trans, |
85 | struct btrfs_root *root, struct inode *inode, | ||
85 | u64 offset, ssize_t size, | 86 | u64 offset, ssize_t size, |
86 | struct buffer_head *bh) | 87 | struct page *page, size_t page_offset) |
87 | { | 88 | { |
88 | struct btrfs_key key; | 89 | struct btrfs_key key; |
89 | struct btrfs_path *path; | 90 | struct btrfs_path *path; |
90 | char *ptr, *kaddr; | 91 | char *ptr, *kaddr; |
91 | struct btrfs_trans_handle *trans; | ||
92 | struct btrfs_file_extent_item *ei; | 92 | struct btrfs_file_extent_item *ei; |
93 | u32 datasize; | 93 | u32 datasize; |
94 | int err = 0; | 94 | int err = 0; |
@@ -98,8 +98,6 @@ static int insert_inline_extent(struct btrfs_root *root, struct inode *inode, | |||
98 | if (!path) | 98 | if (!path) |
99 | return -ENOMEM; | 99 | return -ENOMEM; |
100 | 100 | ||
101 | mutex_lock(&root->fs_info->fs_mutex); | ||
102 | trans = btrfs_start_transaction(root, 1); | ||
103 | btrfs_set_trans_block_group(trans, inode); | 101 | btrfs_set_trans_block_group(trans, inode); |
104 | 102 | ||
105 | key.objectid = inode->i_ino; | 103 | key.objectid = inode->i_ino; |
@@ -122,18 +120,13 @@ static int insert_inline_extent(struct btrfs_root *root, struct inode *inode, | |||
122 | BTRFS_FILE_EXTENT_INLINE); | 120 | BTRFS_FILE_EXTENT_INLINE); |
123 | ptr = btrfs_file_extent_inline_start(ei); | 121 | ptr = btrfs_file_extent_inline_start(ei); |
124 | 122 | ||
125 | kaddr = kmap_atomic(bh->b_page, KM_USER0); | 123 | kaddr = kmap_atomic(page, KM_USER0); |
126 | btrfs_memcpy(root, path->nodes[0]->b_data, | 124 | btrfs_memcpy(root, path->nodes[0]->b_data, |
127 | ptr, kaddr + bh_offset(bh), | 125 | ptr, kaddr + page_offset, size); |
128 | size); | ||
129 | kunmap_atomic(kaddr, KM_USER0); | 126 | kunmap_atomic(kaddr, KM_USER0); |
130 | btrfs_mark_buffer_dirty(path->nodes[0]); | 127 | btrfs_mark_buffer_dirty(path->nodes[0]); |
131 | fail: | 128 | fail: |
132 | btrfs_free_path(path); | 129 | btrfs_free_path(path); |
133 | ret = btrfs_end_transaction(trans, root); | ||
134 | if (ret && !err) | ||
135 | err = ret; | ||
136 | mutex_unlock(&root->fs_info->fs_mutex); | ||
137 | return err; | 130 | return err; |
138 | } | 131 | } |
139 | 132 | ||
@@ -145,45 +138,143 @@ static int dirty_and_release_pages(struct btrfs_trans_handle *trans, | |||
145 | loff_t pos, | 138 | loff_t pos, |
146 | size_t write_bytes) | 139 | size_t write_bytes) |
147 | { | 140 | { |
148 | int i; | ||
149 | int offset; | ||
150 | int err = 0; | 141 | int err = 0; |
151 | int ret; | 142 | int i; |
152 | int this_write; | ||
153 | struct inode *inode = file->f_path.dentry->d_inode; | 143 | struct inode *inode = file->f_path.dentry->d_inode; |
154 | struct buffer_head *bh; | 144 | struct extent_map *em; |
145 | struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree; | ||
146 | struct btrfs_key ins; | ||
147 | u64 hint_block; | ||
148 | u64 num_blocks; | ||
149 | u64 start_pos; | ||
150 | u64 end_of_last_block; | ||
151 | u64 end_pos = pos + write_bytes; | ||
152 | loff_t isize = i_size_read(inode); | ||
155 | 153 | ||
156 | for (i = 0; i < num_pages; i++) { | 154 | em = alloc_extent_map(GFP_NOFS); |
157 | offset = pos & (PAGE_CACHE_SIZE -1); | 155 | if (!em) |
158 | this_write = min((size_t)PAGE_CACHE_SIZE - offset, write_bytes); | 156 | return -ENOMEM; |
159 | 157 | ||
160 | /* FIXME, one block at a time */ | 158 | em->bdev = inode->i_sb->s_bdev; |
161 | bh = page_buffers(pages[i]); | ||
162 | 159 | ||
163 | if (buffer_mapped(bh) && bh->b_blocknr == 0) { | 160 | start_pos = pos & ~((u64)root->blocksize - 1); |
164 | ret = insert_inline_extent(root, inode, | 161 | num_blocks = (write_bytes + pos - start_pos + root->blocksize - 1) >> |
165 | pages[i]->index << PAGE_CACHE_SHIFT, | 162 | inode->i_blkbits; |
166 | offset + this_write, bh); | ||
167 | if (ret) { | ||
168 | err = ret; | ||
169 | goto failed; | ||
170 | } | ||
171 | } | ||
172 | 163 | ||
173 | ret = btrfs_commit_write(file, pages[i], offset, | 164 | end_of_last_block = start_pos + (num_blocks << inode->i_blkbits) - 1; |
174 | offset + this_write); | 165 | mutex_lock(&root->fs_info->fs_mutex); |
175 | pos += this_write; | 166 | trans = btrfs_start_transaction(root, 1); |
176 | if (ret) { | 167 | if (!trans) { |
177 | err = ret; | 168 | err = -ENOMEM; |
169 | goto out_unlock; | ||
170 | } | ||
171 | btrfs_set_trans_block_group(trans, inode); | ||
172 | inode->i_blocks += num_blocks << 3; | ||
173 | hint_block = 0; | ||
174 | |||
175 | if ((end_of_last_block & 4095) == 0) { | ||
176 | printk("strange end of last %Lu %lu %Lu\n", start_pos, write_bytes, end_of_last_block); | ||
177 | } | ||
178 | set_extent_uptodate(em_tree, start_pos, end_of_last_block, GFP_NOFS); | ||
179 | |||
180 | /* FIXME...EIEIO, ENOSPC and more */ | ||
181 | |||
182 | /* step one, delete the existing extents in this range */ | ||
183 | /* FIXME blocksize != pagesize */ | ||
184 | if (start_pos < inode->i_size) { | ||
185 | err = btrfs_drop_extents(trans, root, inode, | ||
186 | start_pos, (pos + write_bytes + root->blocksize -1) & | ||
187 | ~((u64)root->blocksize - 1), &hint_block); | ||
188 | if (err) | ||
189 | goto failed; | ||
190 | } | ||
191 | |||
192 | /* insert any holes we need to create */ | ||
193 | if (inode->i_size < start_pos) { | ||
194 | u64 last_pos_in_file; | ||
195 | u64 hole_size; | ||
196 | u64 mask = root->blocksize - 1; | ||
197 | last_pos_in_file = (isize + mask) & ~mask; | ||
198 | hole_size = (start_pos - last_pos_in_file + mask) & ~mask; | ||
199 | hole_size >>= inode->i_blkbits; | ||
200 | if (last_pos_in_file < start_pos) { | ||
201 | err = btrfs_insert_file_extent(trans, root, | ||
202 | inode->i_ino, | ||
203 | last_pos_in_file, | ||
204 | 0, 0, hole_size); | ||
205 | } | ||
206 | if (err) | ||
178 | goto failed; | 207 | goto failed; |
208 | } | ||
209 | |||
210 | /* | ||
211 | * either allocate an extent for the new bytes or setup the key | ||
212 | * to show we are doing inline data in the extent | ||
213 | */ | ||
214 | if (isize >= PAGE_CACHE_SIZE || pos + write_bytes < inode->i_size || | ||
215 | pos + write_bytes - start_pos > BTRFS_MAX_INLINE_DATA_SIZE(root)) { | ||
216 | err = btrfs_alloc_extent(trans, root, inode->i_ino, | ||
217 | num_blocks, 0, hint_block, (u64)-1, | ||
218 | &ins, 1); | ||
219 | BUG_ON(err); | ||
220 | err = btrfs_insert_file_extent(trans, root, inode->i_ino, | ||
221 | start_pos, ins.objectid, ins.offset, | ||
222 | ins.offset); | ||
223 | BUG_ON(err); | ||
224 | em->start = start_pos; | ||
225 | em->end = end_of_last_block; | ||
226 | em->block_start = ins.objectid << inode->i_blkbits; | ||
227 | em->block_end = em->block_start + | ||
228 | (ins.offset << inode->i_blkbits) - 1; | ||
229 | set_extent_dirty(em_tree, start_pos, end_of_last_block, | ||
230 | GFP_NOFS); | ||
231 | err = add_extent_mapping(em_tree, em); | ||
232 | for (i = 0; i < num_pages; i++) { | ||
233 | struct page *p = pages[i]; | ||
234 | SetPageUptodate(p); | ||
235 | __set_page_dirty_nobuffers(p); | ||
179 | } | 236 | } |
180 | WARN_ON(this_write > write_bytes); | 237 | } else { |
181 | write_bytes -= this_write; | 238 | struct page *p = pages[0]; |
239 | err = insert_inline_extent(trans, root, inode, start_pos, | ||
240 | end_pos - start_pos, p, 0); | ||
241 | BUG_ON(err); | ||
242 | em->start = start_pos; | ||
243 | em->end = end_pos; | ||
244 | em->block_start = EXTENT_MAP_INLINE; | ||
245 | em->block_end = EXTENT_MAP_INLINE; | ||
246 | add_extent_mapping(em_tree, em); | ||
247 | } | ||
248 | if (end_pos > isize) { | ||
249 | i_size_write(inode, end_pos); | ||
250 | btrfs_update_inode(trans, root, inode); | ||
182 | } | 251 | } |
183 | failed: | 252 | failed: |
253 | err = btrfs_end_transaction(trans, root); | ||
254 | out_unlock: | ||
255 | mutex_unlock(&root->fs_info->fs_mutex); | ||
256 | free_extent_map(em); | ||
184 | return err; | 257 | return err; |
185 | } | 258 | } |
186 | 259 | ||
260 | int btrfs_drop_extent_cache(struct inode *inode, u64 start, u64 end) | ||
261 | { | ||
262 | struct extent_map *em; | ||
263 | struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree; | ||
264 | |||
265 | while(1) { | ||
266 | em = lookup_extent_mapping(em_tree, start, end); | ||
267 | if (!em) | ||
268 | break; | ||
269 | remove_extent_mapping(em_tree, em); | ||
270 | /* once for us */ | ||
271 | free_extent_map(em); | ||
272 | /* once for the tree*/ | ||
273 | free_extent_map(em); | ||
274 | } | ||
275 | return 0; | ||
276 | } | ||
277 | |||
187 | /* | 278 | /* |
188 | * this is very complex, but the basic idea is to drop all extents | 279 | * this is very complex, but the basic idea is to drop all extents |
189 | * in the range start - end. hint_block is filled in with a block number | 280 | * in the range start - end. hint_block is filled in with a block number |
@@ -213,6 +304,8 @@ int btrfs_drop_extents(struct btrfs_trans_handle *trans, | |||
213 | int found_inline; | 304 | int found_inline; |
214 | int recow; | 305 | int recow; |
215 | 306 | ||
307 | btrfs_drop_extent_cache(inode, start, end - 1); | ||
308 | |||
216 | path = btrfs_alloc_path(); | 309 | path = btrfs_alloc_path(); |
217 | if (!path) | 310 | if (!path) |
218 | return -ENOMEM; | 311 | return -ENOMEM; |
@@ -434,18 +527,9 @@ static int prepare_pages(struct btrfs_root *root, | |||
434 | int i; | 527 | int i; |
435 | unsigned long index = pos >> PAGE_CACHE_SHIFT; | 528 | unsigned long index = pos >> PAGE_CACHE_SHIFT; |
436 | struct inode *inode = file->f_path.dentry->d_inode; | 529 | struct inode *inode = file->f_path.dentry->d_inode; |
437 | int offset; | ||
438 | int err = 0; | 530 | int err = 0; |
439 | int this_write; | ||
440 | struct buffer_head *bh; | ||
441 | struct buffer_head *head; | ||
442 | loff_t isize = i_size_read(inode); | ||
443 | struct btrfs_trans_handle *trans; | ||
444 | u64 hint_block; | ||
445 | u64 num_blocks; | 531 | u64 num_blocks; |
446 | u64 alloc_extent_start; | ||
447 | u64 start_pos; | 532 | u64 start_pos; |
448 | struct btrfs_key ins; | ||
449 | 533 | ||
450 | start_pos = pos & ~((u64)PAGE_CACHE_SIZE - 1); | 534 | start_pos = pos & ~((u64)PAGE_CACHE_SIZE - 1); |
451 | num_blocks = (write_bytes + pos - start_pos + root->blocksize - 1) >> | 535 | num_blocks = (write_bytes + pos - start_pos + root->blocksize - 1) >> |
@@ -457,119 +541,17 @@ static int prepare_pages(struct btrfs_root *root, | |||
457 | pages[i] = grab_cache_page(inode->i_mapping, index + i); | 541 | pages[i] = grab_cache_page(inode->i_mapping, index + i); |
458 | if (!pages[i]) { | 542 | if (!pages[i]) { |
459 | err = -ENOMEM; | 543 | err = -ENOMEM; |
460 | goto failed_release; | 544 | BUG_ON(1); |
461 | } | 545 | } |
462 | cancel_dirty_page(pages[i], PAGE_CACHE_SIZE); | 546 | cancel_dirty_page(pages[i], PAGE_CACHE_SIZE); |
463 | wait_on_page_writeback(pages[i]); | 547 | wait_on_page_writeback(pages[i]); |
464 | } | 548 | if (!PagePrivate(pages[i])) { |
465 | 549 | SetPagePrivate(pages[i]); | |
466 | mutex_lock(&root->fs_info->fs_mutex); | 550 | set_page_private(pages[i], 1); |
467 | trans = btrfs_start_transaction(root, 1); | 551 | page_cache_get(pages[i]); |
468 | if (!trans) { | ||
469 | err = -ENOMEM; | ||
470 | mutex_unlock(&root->fs_info->fs_mutex); | ||
471 | goto out_unlock; | ||
472 | } | ||
473 | btrfs_set_trans_block_group(trans, inode); | ||
474 | /* FIXME blocksize != 4096 */ | ||
475 | inode->i_blocks += num_blocks << 3; | ||
476 | hint_block = 0; | ||
477 | |||
478 | /* FIXME...EIEIO, ENOSPC and more */ | ||
479 | |||
480 | /* step one, delete the existing extents in this range */ | ||
481 | /* FIXME blocksize != pagesize */ | ||
482 | if (start_pos < inode->i_size) { | ||
483 | err = btrfs_drop_extents(trans, root, inode, | ||
484 | start_pos, (pos + write_bytes + root->blocksize -1) & | ||
485 | ~((u64)root->blocksize - 1), &hint_block); | ||
486 | if (err) | ||
487 | goto failed_release; | ||
488 | } | ||
489 | |||
490 | /* insert any holes we need to create */ | ||
491 | if (inode->i_size < start_pos) { | ||
492 | u64 last_pos_in_file; | ||
493 | u64 hole_size; | ||
494 | u64 mask = root->blocksize - 1; | ||
495 | last_pos_in_file = (isize + mask) & ~mask; | ||
496 | hole_size = (start_pos - last_pos_in_file + mask) & ~mask; | ||
497 | hole_size >>= inode->i_blkbits; | ||
498 | if (last_pos_in_file < start_pos) { | ||
499 | err = btrfs_insert_file_extent(trans, root, | ||
500 | inode->i_ino, | ||
501 | last_pos_in_file, | ||
502 | 0, 0, hole_size); | ||
503 | } | ||
504 | if (err) | ||
505 | goto failed_release; | ||
506 | } | ||
507 | |||
508 | /* | ||
509 | * either allocate an extent for the new bytes or setup the key | ||
510 | * to show we are doing inline data in the extent | ||
511 | */ | ||
512 | if (isize >= PAGE_CACHE_SIZE || pos + write_bytes < inode->i_size || | ||
513 | pos + write_bytes - start_pos > BTRFS_MAX_INLINE_DATA_SIZE(root)) { | ||
514 | err = btrfs_alloc_extent(trans, root, inode->i_ino, | ||
515 | num_blocks, 0, hint_block, (u64)-1, | ||
516 | &ins, 1); | ||
517 | if (err) | ||
518 | goto failed_truncate; | ||
519 | err = btrfs_insert_file_extent(trans, root, inode->i_ino, | ||
520 | start_pos, ins.objectid, ins.offset, | ||
521 | ins.offset); | ||
522 | if (err) | ||
523 | goto failed_truncate; | ||
524 | } else { | ||
525 | ins.offset = 0; | ||
526 | ins.objectid = 0; | ||
527 | } | ||
528 | BUG_ON(err); | ||
529 | alloc_extent_start = ins.objectid; | ||
530 | err = btrfs_end_transaction(trans, root); | ||
531 | mutex_unlock(&root->fs_info->fs_mutex); | ||
532 | |||
533 | for (i = 0; i < num_pages; i++) { | ||
534 | offset = pos & (PAGE_CACHE_SIZE -1); | ||
535 | this_write = min((size_t)PAGE_CACHE_SIZE - offset, write_bytes); | ||
536 | if (!page_has_buffers(pages[i])) { | ||
537 | create_empty_buffers(pages[i], | ||
538 | root->fs_info->sb->s_blocksize, | ||
539 | (1 << BH_Uptodate)); | ||
540 | } | 552 | } |
541 | head = page_buffers(pages[i]); | ||
542 | bh = head; | ||
543 | do { | ||
544 | err = btrfs_map_bh_to_logical(root, bh, | ||
545 | alloc_extent_start); | ||
546 | BUG_ON(err); | ||
547 | if (err) | ||
548 | goto failed_truncate; | ||
549 | bh = bh->b_this_page; | ||
550 | if (alloc_extent_start) | ||
551 | alloc_extent_start++; | ||
552 | } while (bh != head); | ||
553 | pos += this_write; | ||
554 | WARN_ON(this_write > write_bytes); | ||
555 | write_bytes -= this_write; | ||
556 | } | 553 | } |
557 | return 0; | 554 | return 0; |
558 | |||
559 | failed_release: | ||
560 | btrfs_drop_pages(pages, num_pages); | ||
561 | return err; | ||
562 | |||
563 | failed_truncate: | ||
564 | btrfs_drop_pages(pages, num_pages); | ||
565 | if (pos > isize) | ||
566 | vmtruncate(inode, isize); | ||
567 | return err; | ||
568 | |||
569 | out_unlock: | ||
570 | mutex_unlock(&root->fs_info->fs_mutex); | ||
571 | goto failed_release; | ||
572 | |||
573 | } | 555 | } |
574 | 556 | ||
575 | static ssize_t btrfs_file_write(struct file *file, const char __user *buf, | 557 | static ssize_t btrfs_file_write(struct file *file, const char __user *buf, |
@@ -685,7 +667,6 @@ out: | |||
685 | page_cache_release(pinned[1]); | 667 | page_cache_release(pinned[1]); |
686 | *ppos = pos; | 668 | *ppos = pos; |
687 | current->backing_dev_info = NULL; | 669 | current->backing_dev_info = NULL; |
688 | mark_inode_dirty(inode); | ||
689 | return num_written ? num_written : err; | 670 | return num_written ? num_written : err; |
690 | } | 671 | } |
691 | 672 | ||
@@ -714,8 +695,8 @@ static int btrfs_sync_file(struct file *file, | |||
714 | mutex_unlock(&root->fs_info->trans_mutex); | 695 | mutex_unlock(&root->fs_info->trans_mutex); |
715 | 696 | ||
716 | /* | 697 | /* |
717 | * ok we haven't committed the transaction yet, lets do a commit | 698 | * ok we haven't committed the transaction yet, lets do a commit |
718 | */ | 699 | */ |
719 | trans = btrfs_start_transaction(root, 1); | 700 | trans = btrfs_start_transaction(root, 1); |
720 | if (!trans) { | 701 | if (!trans) { |
721 | ret = -ENOMEM; | 702 | ret = -ENOMEM; |
diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index 504d37dfa6c9..fd4cb65fcf1f 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c | |||
@@ -121,6 +121,8 @@ void btrfs_read_locked_inode(struct inode *inode) | |||
121 | switch (inode->i_mode & S_IFMT) { | 121 | switch (inode->i_mode & S_IFMT) { |
122 | case S_IFREG: | 122 | case S_IFREG: |
123 | inode->i_mapping->a_ops = &btrfs_aops; | 123 | inode->i_mapping->a_ops = &btrfs_aops; |
124 | extent_map_tree_init(&BTRFS_I(inode)->extent_tree, | ||
125 | inode->i_mapping, GFP_NOFS); | ||
124 | inode->i_fop = &btrfs_file_operations; | 126 | inode->i_fop = &btrfs_file_operations; |
125 | inode->i_op = &btrfs_file_inode_operations; | 127 | inode->i_op = &btrfs_file_inode_operations; |
126 | break; | 128 | break; |
@@ -169,7 +171,7 @@ static void fill_inode_item(struct btrfs_inode_item *item, | |||
169 | BTRFS_I(inode)->block_group->key.objectid); | 171 | BTRFS_I(inode)->block_group->key.objectid); |
170 | } | 172 | } |
171 | 173 | ||
172 | static int btrfs_update_inode(struct btrfs_trans_handle *trans, | 174 | int btrfs_update_inode(struct btrfs_trans_handle *trans, |
173 | struct btrfs_root *root, | 175 | struct btrfs_root *root, |
174 | struct inode *inode) | 176 | struct inode *inode) |
175 | { | 177 | { |
@@ -400,6 +402,7 @@ static int btrfs_truncate_in_trans(struct btrfs_trans_handle *trans, | |||
400 | int found_extent; | 402 | int found_extent; |
401 | int del_item; | 403 | int del_item; |
402 | 404 | ||
405 | btrfs_drop_extent_cache(inode, inode->i_size, (u64)-1); | ||
403 | path = btrfs_alloc_path(); | 406 | path = btrfs_alloc_path(); |
404 | path->reada = -1; | 407 | path->reada = -1; |
405 | BUG_ON(!path); | 408 | BUG_ON(!path); |
@@ -511,6 +514,50 @@ error: | |||
511 | return ret; | 514 | return ret; |
512 | } | 515 | } |
513 | 516 | ||
517 | static int btrfs_cow_one_page(struct btrfs_trans_handle *trans, | ||
518 | struct inode *inode, struct page *page, | ||
519 | size_t zero_start) | ||
520 | { | ||
521 | char *kaddr; | ||
522 | int ret = 0; | ||
523 | struct btrfs_root *root = BTRFS_I(inode)->root; | ||
524 | u64 alloc_hint = 0; | ||
525 | u64 page_start = page->index << PAGE_CACHE_SHIFT; | ||
526 | struct btrfs_key ins; | ||
527 | |||
528 | if (!PagePrivate(page)) { | ||
529 | SetPagePrivate(page); | ||
530 | set_page_private(page, 1); | ||
531 | page_cache_get(page); | ||
532 | } | ||
533 | |||
534 | btrfs_set_trans_block_group(trans, inode); | ||
535 | |||
536 | ret = btrfs_drop_extents(trans, root, inode, | ||
537 | page_start, page_start + PAGE_CACHE_SIZE, | ||
538 | &alloc_hint); | ||
539 | if (ret) | ||
540 | goto out; | ||
541 | ret = btrfs_alloc_extent(trans, root, inode->i_ino, 1, 0, | ||
542 | alloc_hint, (u64)-1, &ins, 1); | ||
543 | if (ret) | ||
544 | goto out; | ||
545 | ret = btrfs_insert_file_extent(trans, root, inode->i_ino, | ||
546 | page_start, ins.objectid, 1, 1); | ||
547 | if (ret) | ||
548 | goto out; | ||
549 | SetPageChecked(page); | ||
550 | kaddr = kmap(page); | ||
551 | if (zero_start != PAGE_CACHE_SIZE) { | ||
552 | memset(kaddr + zero_start, 0, PAGE_CACHE_SIZE - zero_start); | ||
553 | flush_dcache_page(page); | ||
554 | } | ||
555 | kunmap(page); | ||
556 | |||
557 | out: | ||
558 | return ret; | ||
559 | } | ||
560 | |||
514 | /* | 561 | /* |
515 | * taken from block_truncate_page, but does cow as it zeros out | 562 | * taken from block_truncate_page, but does cow as it zeros out |
516 | * any bytes left in the last page in the file. | 563 | * any bytes left in the last page in the file. |
@@ -518,16 +565,14 @@ error: | |||
518 | static int btrfs_truncate_page(struct address_space *mapping, loff_t from) | 565 | static int btrfs_truncate_page(struct address_space *mapping, loff_t from) |
519 | { | 566 | { |
520 | struct inode *inode = mapping->host; | 567 | struct inode *inode = mapping->host; |
568 | struct btrfs_root *root = BTRFS_I(inode)->root; | ||
569 | struct btrfs_trans_handle *trans; | ||
521 | unsigned blocksize = 1 << inode->i_blkbits; | 570 | unsigned blocksize = 1 << inode->i_blkbits; |
522 | pgoff_t index = from >> PAGE_CACHE_SHIFT; | 571 | pgoff_t index = from >> PAGE_CACHE_SHIFT; |
523 | unsigned offset = from & (PAGE_CACHE_SIZE-1); | 572 | unsigned offset = from & (PAGE_CACHE_SIZE-1); |
524 | struct page *page; | 573 | struct page *page; |
525 | char *kaddr; | ||
526 | int ret = 0; | 574 | int ret = 0; |
527 | struct btrfs_root *root = BTRFS_I(inode)->root; | 575 | u64 page_start; |
528 | u64 alloc_hint = 0; | ||
529 | struct btrfs_key ins; | ||
530 | struct btrfs_trans_handle *trans; | ||
531 | 576 | ||
532 | if ((offset & (blocksize - 1)) == 0) | 577 | if ((offset & (blocksize - 1)) == 0) |
533 | goto out; | 578 | goto out; |
@@ -536,7 +581,6 @@ static int btrfs_truncate_page(struct address_space *mapping, loff_t from) | |||
536 | page = grab_cache_page(mapping, index); | 581 | page = grab_cache_page(mapping, index); |
537 | if (!page) | 582 | if (!page) |
538 | goto out; | 583 | goto out; |
539 | |||
540 | if (!PageUptodate(page)) { | 584 | if (!PageUptodate(page)) { |
541 | ret = btrfs_readpage(NULL, page); | 585 | ret = btrfs_readpage(NULL, page); |
542 | lock_page(page); | 586 | lock_page(page); |
@@ -545,37 +589,24 @@ static int btrfs_truncate_page(struct address_space *mapping, loff_t from) | |||
545 | goto out; | 589 | goto out; |
546 | } | 590 | } |
547 | } | 591 | } |
592 | page_start = page->index << PAGE_CACHE_SHIFT; | ||
593 | |||
548 | mutex_lock(&root->fs_info->fs_mutex); | 594 | mutex_lock(&root->fs_info->fs_mutex); |
549 | trans = btrfs_start_transaction(root, 1); | 595 | trans = btrfs_start_transaction(root, 1); |
550 | btrfs_set_trans_block_group(trans, inode); | 596 | ret = btrfs_cow_one_page(trans, inode, page, offset); |
551 | 597 | if (!ret) { | |
552 | ret = btrfs_drop_extents(trans, root, inode, | 598 | char *kaddr = kmap(page); |
553 | page->index << PAGE_CACHE_SHIFT, | 599 | ret = btrfs_csum_file_block(trans, root, inode->i_ino, |
554 | (page->index + 1) << PAGE_CACHE_SHIFT, | 600 | page_start, kaddr, PAGE_CACHE_SIZE); |
555 | &alloc_hint); | 601 | kunmap(page); |
556 | if (ret) | 602 | } |
557 | goto out; | 603 | set_extent_dirty(&BTRFS_I(inode)->extent_tree, |
558 | ret = btrfs_alloc_extent(trans, root, inode->i_ino, 1, 0, | 604 | page_start, page_start + PAGE_CACHE_SIZE - 1, |
559 | alloc_hint, (u64)-1, &ins, 1); | 605 | GFP_NOFS); |
560 | if (ret) | 606 | set_page_dirty(page); |
561 | goto out; | ||
562 | ret = btrfs_insert_file_extent(trans, root, inode->i_ino, | ||
563 | page->index << PAGE_CACHE_SHIFT, | ||
564 | ins.objectid, 1, 1); | ||
565 | if (ret) | ||
566 | goto out; | ||
567 | SetPageChecked(page); | ||
568 | kaddr = kmap(page); | ||
569 | memset(kaddr + offset, 0, PAGE_CACHE_SIZE - offset); | ||
570 | flush_dcache_page(page); | ||
571 | ret = btrfs_csum_file_block(trans, root, inode->i_ino, | ||
572 | page->index << PAGE_CACHE_SHIFT, | ||
573 | kaddr, PAGE_CACHE_SIZE); | ||
574 | kunmap(page); | ||
575 | btrfs_end_transaction(trans, root); | 607 | btrfs_end_transaction(trans, root); |
576 | mutex_unlock(&root->fs_info->fs_mutex); | 608 | mutex_unlock(&root->fs_info->fs_mutex); |
577 | 609 | ||
578 | set_page_dirty(page); | ||
579 | unlock_page(page); | 610 | unlock_page(page); |
580 | page_cache_release(page); | 611 | page_cache_release(page); |
581 | out: | 612 | out: |
@@ -1095,6 +1126,8 @@ static int btrfs_create(struct inode *dir, struct dentry *dentry, | |||
1095 | inode->i_mapping->a_ops = &btrfs_aops; | 1126 | inode->i_mapping->a_ops = &btrfs_aops; |
1096 | inode->i_fop = &btrfs_file_operations; | 1127 | inode->i_fop = &btrfs_file_operations; |
1097 | inode->i_op = &btrfs_file_inode_operations; | 1128 | inode->i_op = &btrfs_file_inode_operations; |
1129 | extent_map_tree_init(&BTRFS_I(inode)->extent_tree, | ||
1130 | inode->i_mapping, GFP_NOFS); | ||
1098 | } | 1131 | } |
1099 | dir->i_sb->s_dirt = 1; | 1132 | dir->i_sb->s_dirt = 1; |
1100 | btrfs_update_inode_block_group(trans, inode); | 1133 | btrfs_update_inode_block_group(trans, inode); |
@@ -1238,6 +1271,182 @@ out_unlock: | |||
1238 | return err; | 1271 | return err; |
1239 | } | 1272 | } |
1240 | 1273 | ||
1274 | struct extent_map *btrfs_get_extent(struct inode *inode, struct page *page, | ||
1275 | size_t page_offset, u64 start, u64 end, | ||
1276 | int create) | ||
1277 | { | ||
1278 | int ret; | ||
1279 | int err = 0; | ||
1280 | u64 blocknr; | ||
1281 | u64 extent_start = 0; | ||
1282 | u64 extent_end = 0; | ||
1283 | u64 objectid = inode->i_ino; | ||
1284 | u32 found_type; | ||
1285 | int failed_insert = 0; | ||
1286 | struct btrfs_path *path; | ||
1287 | struct btrfs_root *root = BTRFS_I(inode)->root; | ||
1288 | struct btrfs_file_extent_item *item; | ||
1289 | struct btrfs_leaf *leaf; | ||
1290 | struct btrfs_disk_key *found_key; | ||
1291 | struct extent_map *em = NULL; | ||
1292 | struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree; | ||
1293 | struct btrfs_trans_handle *trans = NULL; | ||
1294 | |||
1295 | path = btrfs_alloc_path(); | ||
1296 | BUG_ON(!path); | ||
1297 | mutex_lock(&root->fs_info->fs_mutex); | ||
1298 | |||
1299 | again: | ||
1300 | em = lookup_extent_mapping(em_tree, start, end); | ||
1301 | if (em) { | ||
1302 | goto out; | ||
1303 | } | ||
1304 | if (!em) { | ||
1305 | em = alloc_extent_map(GFP_NOFS); | ||
1306 | if (!em) { | ||
1307 | err = -ENOMEM; | ||
1308 | goto out; | ||
1309 | } | ||
1310 | em->start = 0; | ||
1311 | em->end = 0; | ||
1312 | } | ||
1313 | em->bdev = inode->i_sb->s_bdev; | ||
1314 | ret = btrfs_lookup_file_extent(NULL, root, path, | ||
1315 | objectid, start, 0); | ||
1316 | if (ret < 0) { | ||
1317 | err = ret; | ||
1318 | goto out; | ||
1319 | } | ||
1320 | |||
1321 | if (ret != 0) { | ||
1322 | if (path->slots[0] == 0) | ||
1323 | goto not_found; | ||
1324 | path->slots[0]--; | ||
1325 | } | ||
1326 | |||
1327 | item = btrfs_item_ptr(btrfs_buffer_leaf(path->nodes[0]), path->slots[0], | ||
1328 | struct btrfs_file_extent_item); | ||
1329 | leaf = btrfs_buffer_leaf(path->nodes[0]); | ||
1330 | blocknr = btrfs_file_extent_disk_blocknr(item); | ||
1331 | blocknr += btrfs_file_extent_offset(item); | ||
1332 | |||
1333 | /* are we inside the extent that was found? */ | ||
1334 | found_key = &leaf->items[path->slots[0]].key; | ||
1335 | found_type = btrfs_disk_key_type(found_key); | ||
1336 | if (btrfs_disk_key_objectid(found_key) != objectid || | ||
1337 | found_type != BTRFS_EXTENT_DATA_KEY) { | ||
1338 | goto not_found; | ||
1339 | } | ||
1340 | |||
1341 | found_type = btrfs_file_extent_type(item); | ||
1342 | extent_start = btrfs_disk_key_offset(&leaf->items[path->slots[0]].key); | ||
1343 | if (found_type == BTRFS_FILE_EXTENT_REG) { | ||
1344 | extent_end = extent_start + | ||
1345 | (btrfs_file_extent_num_blocks(item) << inode->i_blkbits); | ||
1346 | err = 0; | ||
1347 | if (start < extent_start || start > extent_end) { | ||
1348 | em->start = start; | ||
1349 | if (start < extent_start) { | ||
1350 | em->end = extent_end - 1; | ||
1351 | } else { | ||
1352 | em->end = end; | ||
1353 | } | ||
1354 | goto not_found_em; | ||
1355 | } | ||
1356 | if (btrfs_file_extent_disk_blocknr(item) == 0) { | ||
1357 | em->start = extent_start; | ||
1358 | em->end = extent_end - 1; | ||
1359 | em->block_start = 0; | ||
1360 | em->block_end = 0; | ||
1361 | goto insert; | ||
1362 | } | ||
1363 | em->block_start = blocknr << inode->i_blkbits; | ||
1364 | em->block_end = em->block_start + | ||
1365 | (btrfs_file_extent_num_blocks(item) << | ||
1366 | inode->i_blkbits) - 1; | ||
1367 | em->start = extent_start; | ||
1368 | em->end = extent_end - 1; | ||
1369 | goto insert; | ||
1370 | } else if (found_type == BTRFS_FILE_EXTENT_INLINE) { | ||
1371 | char *ptr; | ||
1372 | char *map; | ||
1373 | u32 size; | ||
1374 | |||
1375 | size = btrfs_file_extent_inline_len(leaf->items + | ||
1376 | path->slots[0]); | ||
1377 | extent_end = extent_start + size; | ||
1378 | if (start < extent_start || start > extent_end) { | ||
1379 | em->start = start; | ||
1380 | if (start < extent_start) { | ||
1381 | em->end = extent_end - 1; | ||
1382 | } else { | ||
1383 | em->end = end; | ||
1384 | } | ||
1385 | goto not_found_em; | ||
1386 | } | ||
1387 | em->block_start = EXTENT_MAP_INLINE; | ||
1388 | em->block_end = EXTENT_MAP_INLINE; | ||
1389 | em->start = extent_start; | ||
1390 | em->end = extent_end - 1; | ||
1391 | if (!page) { | ||
1392 | goto insert; | ||
1393 | } | ||
1394 | ptr = btrfs_file_extent_inline_start(item); | ||
1395 | map = kmap(page); | ||
1396 | memcpy(map + page_offset, ptr, size); | ||
1397 | flush_dcache_page(result->b_page); | ||
1398 | kunmap(page); | ||
1399 | set_extent_uptodate(em_tree, extent_start, | ||
1400 | extent_end, GFP_NOFS); | ||
1401 | goto insert; | ||
1402 | } else { | ||
1403 | printk("unkknown found_type %d\n", found_type); | ||
1404 | WARN_ON(1); | ||
1405 | } | ||
1406 | not_found: | ||
1407 | em->start = start; | ||
1408 | em->end = end; | ||
1409 | not_found_em: | ||
1410 | em->block_start = 0; | ||
1411 | em->block_end = 0; | ||
1412 | insert: | ||
1413 | btrfs_release_path(root, path); | ||
1414 | if (em->start > start || em->end < start) { | ||
1415 | printk("bad extent! %Lu %Lu start %Lu end %Lu\n", em->start, em->end, start, end); | ||
1416 | WARN_ON(1); | ||
1417 | err = -EIO; | ||
1418 | goto out; | ||
1419 | } | ||
1420 | ret = add_extent_mapping(em_tree, em); | ||
1421 | if (ret == -EEXIST) { | ||
1422 | free_extent_map(em); | ||
1423 | failed_insert++; | ||
1424 | if (failed_insert > 5) { | ||
1425 | printk("failing to insert %Lu %Lu\n", start, end); | ||
1426 | err = -EIO; | ||
1427 | goto out; | ||
1428 | } | ||
1429 | em = NULL; | ||
1430 | goto again; | ||
1431 | } | ||
1432 | err = 0; | ||
1433 | out: | ||
1434 | btrfs_free_path(path); | ||
1435 | if (trans) { | ||
1436 | ret = btrfs_end_transaction(trans, root); | ||
1437 | if (!err) | ||
1438 | err = ret; | ||
1439 | } | ||
1440 | mutex_unlock(&root->fs_info->fs_mutex); | ||
1441 | if (err) { | ||
1442 | free_extent_map(em); | ||
1443 | WARN_ON(1); | ||
1444 | return ERR_PTR(err); | ||
1445 | } | ||
1446 | return em; | ||
1447 | } | ||
1448 | |||
1449 | |||
1241 | /* | 1450 | /* |
1242 | * FIBMAP and others want to pass in a fake buffer head. They need to | 1451 | * FIBMAP and others want to pass in a fake buffer head. They need to |
1243 | * use BTRFS_GET_BLOCK_NO_DIRECT to make sure we don't try to memcpy | 1452 | * use BTRFS_GET_BLOCK_NO_DIRECT to make sure we don't try to memcpy |
@@ -1398,46 +1607,22 @@ int btrfs_get_block(struct inode *inode, sector_t iblock, | |||
1398 | return err; | 1607 | return err; |
1399 | } | 1608 | } |
1400 | 1609 | ||
1401 | static int btrfs_get_block_csum(struct inode *inode, sector_t iblock, | ||
1402 | struct buffer_head *result, int create) | ||
1403 | { | ||
1404 | int ret; | ||
1405 | struct btrfs_root *root = BTRFS_I(inode)->root; | ||
1406 | struct page *page = result->b_page; | ||
1407 | u64 offset = (page->index << PAGE_CACHE_SHIFT) + bh_offset(result); | ||
1408 | struct btrfs_csum_item *item; | ||
1409 | struct btrfs_path *path = NULL; | ||
1410 | |||
1411 | mutex_lock(&root->fs_info->fs_mutex); | ||
1412 | ret = btrfs_get_block_lock(inode, iblock, result, create); | ||
1413 | if (ret) | ||
1414 | goto out; | ||
1415 | |||
1416 | path = btrfs_alloc_path(); | ||
1417 | item = btrfs_lookup_csum(NULL, root, path, inode->i_ino, offset, 0); | ||
1418 | if (IS_ERR(item)) { | ||
1419 | ret = PTR_ERR(item); | ||
1420 | /* a csum that isn't present is a preallocated region. */ | ||
1421 | if (ret == -ENOENT || ret == -EFBIG) | ||
1422 | ret = 0; | ||
1423 | result->b_private = NULL; | ||
1424 | goto out; | ||
1425 | } | ||
1426 | memcpy((char *)&result->b_private, &item->csum, BTRFS_CRC32_SIZE); | ||
1427 | out: | ||
1428 | if (path) | ||
1429 | btrfs_free_path(path); | ||
1430 | mutex_unlock(&root->fs_info->fs_mutex); | ||
1431 | return ret; | ||
1432 | } | ||
1433 | |||
1434 | static int btrfs_get_block_bmap(struct inode *inode, sector_t iblock, | 1610 | static int btrfs_get_block_bmap(struct inode *inode, sector_t iblock, |
1435 | struct buffer_head *result, int create) | 1611 | struct buffer_head *result, int create) |
1436 | { | 1612 | { |
1437 | struct btrfs_root *root = BTRFS_I(inode)->root; | 1613 | struct btrfs_root *root = BTRFS_I(inode)->root; |
1438 | mutex_lock(&root->fs_info->fs_mutex); | 1614 | u64 start = iblock << inode->i_blkbits; |
1439 | btrfs_get_block_lock(inode, iblock, result, BTRFS_GET_BLOCK_NO_DIRECT); | 1615 | u64 end = start + root->blocksize -1; |
1440 | mutex_unlock(&root->fs_info->fs_mutex); | 1616 | struct extent_map *em; |
1617 | |||
1618 | em = btrfs_get_extent(inode, NULL, 0, start, end, 0); | ||
1619 | if (em && !IS_ERR(em) && em->block_start != EXTENT_MAP_INLINE && | ||
1620 | em->block_start != 0) { | ||
1621 | u64 offset; | ||
1622 | offset = start - em->start; | ||
1623 | start = (em->block_start + offset) >> inode->i_blkbits; | ||
1624 | btrfs_map_bh_to_logical(root, result, start); | ||
1625 | } | ||
1441 | return 0; | 1626 | return 0; |
1442 | } | 1627 | } |
1443 | 1628 | ||
@@ -1449,442 +1634,50 @@ static sector_t btrfs_bmap(struct address_space *as, sector_t block) | |||
1449 | static int btrfs_prepare_write(struct file *file, struct page *page, | 1634 | static int btrfs_prepare_write(struct file *file, struct page *page, |
1450 | unsigned from, unsigned to) | 1635 | unsigned from, unsigned to) |
1451 | { | 1636 | { |
1452 | return block_prepare_write(page, from, to, btrfs_get_block); | 1637 | return extent_prepare_write(&BTRFS_I(page->mapping->host)->extent_tree, |
1638 | page->mapping->host, page, from, to, | ||
1639 | btrfs_get_extent); | ||
1453 | } | 1640 | } |
1454 | 1641 | ||
1455 | static void buffer_io_error(struct buffer_head *bh) | 1642 | int btrfs_readpage(struct file *file, struct page *page) |
1456 | { | 1643 | { |
1457 | char b[BDEVNAME_SIZE]; | 1644 | struct extent_map_tree *tree; |
1458 | 1645 | tree = &BTRFS_I(page->mapping->host)->extent_tree; | |
1459 | printk(KERN_ERR "Buffer I/O error on device %s, logical block %Lu\n", | 1646 | return extent_read_full_page(tree, page, btrfs_get_extent); |
1460 | bdevname(bh->b_bdev, b), | ||
1461 | (unsigned long long)bh->b_blocknr); | ||
1462 | } | 1647 | } |
1463 | 1648 | static int btrfs_writepage(struct page *page, struct writeback_control *wbc) | |
1464 | /* | ||
1465 | * I/O completion handler for block_read_full_page() - pages | ||
1466 | * which come unlocked at the end of I/O. | ||
1467 | */ | ||
1468 | static void btrfs_end_buffer_async_read(struct buffer_head *bh, int uptodate) | ||
1469 | { | 1649 | { |
1470 | unsigned long flags; | 1650 | struct extent_map_tree *tree; |
1471 | struct buffer_head *first; | 1651 | tree = &BTRFS_I(page->mapping->host)->extent_tree; |
1472 | struct buffer_head *tmp; | 1652 | return extent_write_full_page(tree, page, btrfs_get_extent, wbc); |
1473 | struct page *page; | ||
1474 | int page_uptodate = 1; | ||
1475 | struct inode *inode; | ||
1476 | int ret; | ||
1477 | |||
1478 | BUG_ON(!buffer_async_read(bh)); | ||
1479 | |||
1480 | page = bh->b_page; | ||
1481 | inode = page->mapping->host; | ||
1482 | if (uptodate) { | ||
1483 | void *kaddr; | ||
1484 | struct btrfs_root *root = BTRFS_I(page->mapping->host)->root; | ||
1485 | if (bh->b_private) { | ||
1486 | char csum[BTRFS_CRC32_SIZE]; | ||
1487 | kaddr = kmap_atomic(page, KM_IRQ0); | ||
1488 | ret = btrfs_csum_data(root, kaddr + bh_offset(bh), | ||
1489 | bh->b_size, csum); | ||
1490 | BUG_ON(ret); | ||
1491 | if (memcmp(csum, &bh->b_private, BTRFS_CRC32_SIZE)) { | ||
1492 | u64 offset; | ||
1493 | offset = (page->index << PAGE_CACHE_SHIFT) + | ||
1494 | bh_offset(bh); | ||
1495 | printk("btrfs csum failed ino %lu off %llu\n", | ||
1496 | page->mapping->host->i_ino, | ||
1497 | (unsigned long long)offset); | ||
1498 | memset(kaddr + bh_offset(bh), 1, bh->b_size); | ||
1499 | flush_dcache_page(page); | ||
1500 | } | ||
1501 | kunmap_atomic(kaddr, KM_IRQ0); | ||
1502 | } | ||
1503 | set_buffer_uptodate(bh); | ||
1504 | } else { | ||
1505 | clear_buffer_uptodate(bh); | ||
1506 | if (printk_ratelimit()) | ||
1507 | buffer_io_error(bh); | ||
1508 | SetPageError(page); | ||
1509 | } | ||
1510 | |||
1511 | /* | ||
1512 | * Be _very_ careful from here on. Bad things can happen if | ||
1513 | * two buffer heads end IO at almost the same time and both | ||
1514 | * decide that the page is now completely done. | ||
1515 | */ | ||
1516 | first = page_buffers(page); | ||
1517 | local_irq_save(flags); | ||
1518 | bit_spin_lock(BH_Uptodate_Lock, &first->b_state); | ||
1519 | clear_buffer_async_read(bh); | ||
1520 | unlock_buffer(bh); | ||
1521 | tmp = bh; | ||
1522 | do { | ||
1523 | if (!buffer_uptodate(tmp)) | ||
1524 | page_uptodate = 0; | ||
1525 | if (buffer_async_read(tmp)) { | ||
1526 | BUG_ON(!buffer_locked(tmp)); | ||
1527 | goto still_busy; | ||
1528 | } | ||
1529 | tmp = tmp->b_this_page; | ||
1530 | } while (tmp != bh); | ||
1531 | bit_spin_unlock(BH_Uptodate_Lock, &first->b_state); | ||
1532 | local_irq_restore(flags); | ||
1533 | |||
1534 | /* | ||
1535 | * If none of the buffers had errors and they are all | ||
1536 | * uptodate then we can set the page uptodate. | ||
1537 | */ | ||
1538 | if (page_uptodate && !PageError(page)) | ||
1539 | SetPageUptodate(page); | ||
1540 | unlock_page(page); | ||
1541 | return; | ||
1542 | |||
1543 | still_busy: | ||
1544 | bit_spin_unlock(BH_Uptodate_Lock, &first->b_state); | ||
1545 | local_irq_restore(flags); | ||
1546 | return; | ||
1547 | } | 1653 | } |
1548 | 1654 | ||
1549 | /* | 1655 | static int btrfs_releasepage(struct page *page, gfp_t unused_gfp_flags) |
1550 | * Generic "read page" function for block devices that have the normal | ||
1551 | * get_block functionality. This is most of the block device filesystems. | ||
1552 | * Reads the page asynchronously --- the unlock_buffer() and | ||
1553 | * set/clear_buffer_uptodate() functions propagate buffer state into the | ||
1554 | * page struct once IO has completed. | ||
1555 | */ | ||
1556 | int btrfs_readpage(struct file *file, struct page *page) | ||
1557 | { | 1656 | { |
1558 | struct inode *inode = page->mapping->host; | 1657 | struct extent_map_tree *tree; |
1559 | sector_t iblock, lblock; | 1658 | int ret; |
1560 | struct buffer_head *bh, *head, *arr[MAX_BUF_PER_PAGE]; | ||
1561 | unsigned int blocksize; | ||
1562 | int nr, i; | ||
1563 | int fully_mapped = 1; | ||
1564 | |||
1565 | BUG_ON(!PageLocked(page)); | ||
1566 | blocksize = 1 << inode->i_blkbits; | ||
1567 | if (!page_has_buffers(page)) | ||
1568 | create_empty_buffers(page, blocksize, 0); | ||
1569 | head = page_buffers(page); | ||
1570 | |||
1571 | iblock = (sector_t)page->index << (PAGE_CACHE_SHIFT - inode->i_blkbits); | ||
1572 | lblock = (i_size_read(inode)+blocksize-1) >> inode->i_blkbits; | ||
1573 | bh = head; | ||
1574 | nr = 0; | ||
1575 | i = 0; | ||
1576 | |||
1577 | do { | ||
1578 | if (buffer_uptodate(bh)) | ||
1579 | continue; | ||
1580 | |||
1581 | if (!buffer_mapped(bh)) { | ||
1582 | int err = 0; | ||
1583 | |||
1584 | fully_mapped = 0; | ||
1585 | if (iblock < lblock) { | ||
1586 | WARN_ON(bh->b_size != blocksize); | ||
1587 | err = btrfs_get_block_csum(inode, iblock, | ||
1588 | bh, 0); | ||
1589 | if (err) | ||
1590 | SetPageError(page); | ||
1591 | } | ||
1592 | if (!buffer_mapped(bh)) { | ||
1593 | void *kaddr = kmap_atomic(page, KM_USER0); | ||
1594 | memset(kaddr + i * blocksize, 0, blocksize); | ||
1595 | flush_dcache_page(page); | ||
1596 | kunmap_atomic(kaddr, KM_USER0); | ||
1597 | if (!err) | ||
1598 | set_buffer_uptodate(bh); | ||
1599 | continue; | ||
1600 | } | ||
1601 | /* | ||
1602 | * get_block() might have updated the buffer | ||
1603 | * synchronously | ||
1604 | */ | ||
1605 | if (buffer_uptodate(bh)) | ||
1606 | continue; | ||
1607 | } | ||
1608 | arr[nr++] = bh; | ||
1609 | } while (i++, iblock++, (bh = bh->b_this_page) != head); | ||
1610 | |||
1611 | if (fully_mapped) | ||
1612 | SetPageMappedToDisk(page); | ||
1613 | |||
1614 | if (!nr) { | ||
1615 | /* | ||
1616 | * All buffers are uptodate - we can set the page uptodate | ||
1617 | * as well. But not if get_block() returned an error. | ||
1618 | */ | ||
1619 | if (!PageError(page)) | ||
1620 | SetPageUptodate(page); | ||
1621 | unlock_page(page); | ||
1622 | return 0; | ||
1623 | } | ||
1624 | |||
1625 | /* Stage two: lock the buffers */ | ||
1626 | for (i = 0; i < nr; i++) { | ||
1627 | bh = arr[i]; | ||
1628 | lock_buffer(bh); | ||
1629 | bh->b_end_io = btrfs_end_buffer_async_read; | ||
1630 | set_buffer_async_read(bh); | ||
1631 | } | ||
1632 | |||
1633 | /* | ||
1634 | * Stage 3: start the IO. Check for uptodateness | ||
1635 | * inside the buffer lock in case another process reading | ||
1636 | * the underlying blockdev brought it uptodate (the sct fix). | ||
1637 | */ | ||
1638 | for (i = 0; i < nr; i++) { | ||
1639 | bh = arr[i]; | ||
1640 | if (buffer_uptodate(bh)) | ||
1641 | btrfs_end_buffer_async_read(bh, 1); | ||
1642 | else | ||
1643 | submit_bh(READ, bh); | ||
1644 | } | ||
1645 | return 0; | ||
1646 | } | ||
1647 | |||
1648 | /* | ||
1649 | * Aside from a tiny bit of packed file data handling, this is the | ||
1650 | * same as the generic code. | ||
1651 | * | ||
1652 | * While block_write_full_page is writing back the dirty buffers under | ||
1653 | * the page lock, whoever dirtied the buffers may decide to clean them | ||
1654 | * again at any time. We handle that by only looking at the buffer | ||
1655 | * state inside lock_buffer(). | ||
1656 | * | ||
1657 | * If block_write_full_page() is called for regular writeback | ||
1658 | * (wbc->sync_mode == WB_SYNC_NONE) then it will redirty a page which has a | ||
1659 | * locked buffer. This only can happen if someone has written the buffer | ||
1660 | * directly, with submit_bh(). At the address_space level PageWriteback | ||
1661 | * prevents this contention from occurring. | ||
1662 | */ | ||
1663 | static int __btrfs_write_full_page(struct inode *inode, struct page *page, | ||
1664 | struct writeback_control *wbc) | ||
1665 | { | ||
1666 | int err; | ||
1667 | sector_t block; | ||
1668 | sector_t last_block; | ||
1669 | struct buffer_head *bh, *head; | ||
1670 | const unsigned blocksize = 1 << inode->i_blkbits; | ||
1671 | int nr_underway = 0; | ||
1672 | struct btrfs_root *root = BTRFS_I(inode)->root; | ||
1673 | |||
1674 | BUG_ON(!PageLocked(page)); | ||
1675 | |||
1676 | last_block = (i_size_read(inode) - 1) >> inode->i_blkbits; | ||
1677 | |||
1678 | /* no csumming allowed when from PF_MEMALLOC */ | ||
1679 | if (current->flags & PF_MEMALLOC) { | ||
1680 | redirty_page_for_writepage(wbc, page); | ||
1681 | unlock_page(page); | ||
1682 | return 0; | ||
1683 | } | ||
1684 | 1659 | ||
1685 | if (!page_has_buffers(page)) { | 1660 | if (page->private != 1) { |
1686 | create_empty_buffers(page, blocksize, | 1661 | WARN_ON(1); |
1687 | (1 << BH_Dirty)|(1 << BH_Uptodate)); | 1662 | return try_to_free_buffers(page); |
1688 | } | 1663 | } |
1689 | 1664 | tree = &BTRFS_I(page->mapping->host)->extent_tree; | |
1690 | /* | 1665 | ret = try_release_extent_mapping(tree, page); |
1691 | * Be very careful. We have no exclusion from __set_page_dirty_buffers | 1666 | if (ret == 1) { |
1692 | * here, and the (potentially unmapped) buffers may become dirty at | 1667 | ClearPagePrivate(page); |
1693 | * any time. If a buffer becomes dirty here after we've inspected it | 1668 | set_page_private(page, 0); |
1694 | * then we just miss that fact, and the page stays dirty. | 1669 | page_cache_release(page); |
1695 | * | ||
1696 | * Buffers outside i_size may be dirtied by __set_page_dirty_buffers; | ||
1697 | * handle that here by just cleaning them. | ||
1698 | */ | ||
1699 | |||
1700 | block = (sector_t)page->index << (PAGE_CACHE_SHIFT - inode->i_blkbits); | ||
1701 | head = page_buffers(page); | ||
1702 | bh = head; | ||
1703 | |||
1704 | /* | ||
1705 | * Get all the dirty buffers mapped to disk addresses and | ||
1706 | * handle any aliases from the underlying blockdev's mapping. | ||
1707 | */ | ||
1708 | do { | ||
1709 | if (block > last_block) { | ||
1710 | /* | ||
1711 | * mapped buffers outside i_size will occur, because | ||
1712 | * this page can be outside i_size when there is a | ||
1713 | * truncate in progress. | ||
1714 | */ | ||
1715 | /* | ||
1716 | * The buffer was zeroed by block_write_full_page() | ||
1717 | */ | ||
1718 | clear_buffer_dirty(bh); | ||
1719 | set_buffer_uptodate(bh); | ||
1720 | } else if (!buffer_mapped(bh) && buffer_dirty(bh)) { | ||
1721 | WARN_ON(bh->b_size != blocksize); | ||
1722 | err = btrfs_get_block(inode, block, bh, 0); | ||
1723 | if (err) { | ||
1724 | goto recover; | ||
1725 | } | ||
1726 | if (buffer_new(bh)) { | ||
1727 | /* blockdev mappings never come here */ | ||
1728 | clear_buffer_new(bh); | ||
1729 | } | ||
1730 | } | ||
1731 | bh = bh->b_this_page; | ||
1732 | block++; | ||
1733 | } while (bh != head); | ||
1734 | |||
1735 | do { | ||
1736 | if (!buffer_mapped(bh)) | ||
1737 | continue; | ||
1738 | /* | ||
1739 | * If it's a fully non-blocking write attempt and we cannot | ||
1740 | * lock the buffer then redirty the page. Note that this can | ||
1741 | * potentially cause a busy-wait loop from pdflush and kswapd | ||
1742 | * activity, but those code paths have their own higher-level | ||
1743 | * throttling. | ||
1744 | */ | ||
1745 | if (wbc->sync_mode != WB_SYNC_NONE || !wbc->nonblocking) { | ||
1746 | lock_buffer(bh); | ||
1747 | } else if (test_set_buffer_locked(bh)) { | ||
1748 | redirty_page_for_writepage(wbc, page); | ||
1749 | continue; | ||
1750 | } | ||
1751 | if (test_clear_buffer_dirty(bh) && bh->b_blocknr != 0) { | ||
1752 | struct btrfs_trans_handle *trans; | ||
1753 | int ret; | ||
1754 | u64 off = page->index << PAGE_CACHE_SHIFT; | ||
1755 | char *kaddr; | ||
1756 | |||
1757 | off += bh_offset(bh); | ||
1758 | mutex_lock(&root->fs_info->fs_mutex); | ||
1759 | trans = btrfs_start_transaction(root, 1); | ||
1760 | btrfs_set_trans_block_group(trans, inode); | ||
1761 | kaddr = kmap(page); | ||
1762 | btrfs_csum_file_block(trans, root, inode->i_ino, | ||
1763 | off, kaddr + bh_offset(bh), | ||
1764 | bh->b_size); | ||
1765 | kunmap(page); | ||
1766 | ret = btrfs_end_transaction(trans, root); | ||
1767 | BUG_ON(ret); | ||
1768 | mutex_unlock(&root->fs_info->fs_mutex); | ||
1769 | mark_buffer_async_write(bh); | ||
1770 | } else { | ||
1771 | unlock_buffer(bh); | ||
1772 | } | ||
1773 | } while ((bh = bh->b_this_page) != head); | ||
1774 | |||
1775 | /* | ||
1776 | * The page and its buffers are protected by PageWriteback(), so we can | ||
1777 | * drop the bh refcounts early. | ||
1778 | */ | ||
1779 | BUG_ON(PageWriteback(page)); | ||
1780 | set_page_writeback(page); | ||
1781 | |||
1782 | do { | ||
1783 | struct buffer_head *next = bh->b_this_page; | ||
1784 | if (buffer_async_write(bh)) { | ||
1785 | submit_bh(WRITE, bh); | ||
1786 | nr_underway++; | ||
1787 | } | ||
1788 | bh = next; | ||
1789 | } while (bh != head); | ||
1790 | unlock_page(page); | ||
1791 | |||
1792 | err = 0; | ||
1793 | done: | ||
1794 | if (nr_underway == 0) { | ||
1795 | /* | ||
1796 | * The page was marked dirty, but the buffers were | ||
1797 | * clean. Someone wrote them back by hand with | ||
1798 | * ll_rw_block/submit_bh. A rare case. | ||
1799 | */ | ||
1800 | int uptodate = 1; | ||
1801 | do { | ||
1802 | if (!buffer_uptodate(bh)) { | ||
1803 | uptodate = 0; | ||
1804 | break; | ||
1805 | } | ||
1806 | bh = bh->b_this_page; | ||
1807 | } while (bh != head); | ||
1808 | if (uptodate) | ||
1809 | SetPageUptodate(page); | ||
1810 | end_page_writeback(page); | ||
1811 | } | 1670 | } |
1812 | return err; | 1671 | return ret; |
1813 | |||
1814 | recover: | ||
1815 | /* | ||
1816 | * ENOSPC, or some other error. We may already have added some | ||
1817 | * blocks to the file, so we need to write these out to avoid | ||
1818 | * exposing stale data. | ||
1819 | * The page is currently locked and not marked for writeback | ||
1820 | */ | ||
1821 | bh = head; | ||
1822 | /* Recovery: lock and submit the mapped buffers */ | ||
1823 | do { | ||
1824 | if (buffer_mapped(bh) && buffer_dirty(bh)) { | ||
1825 | lock_buffer(bh); | ||
1826 | mark_buffer_async_write(bh); | ||
1827 | } else { | ||
1828 | /* | ||
1829 | * The buffer may have been set dirty during | ||
1830 | * attachment to a dirty page. | ||
1831 | */ | ||
1832 | clear_buffer_dirty(bh); | ||
1833 | } | ||
1834 | } while ((bh = bh->b_this_page) != head); | ||
1835 | SetPageError(page); | ||
1836 | BUG_ON(PageWriteback(page)); | ||
1837 | set_page_writeback(page); | ||
1838 | do { | ||
1839 | struct buffer_head *next = bh->b_this_page; | ||
1840 | if (buffer_async_write(bh)) { | ||
1841 | clear_buffer_dirty(bh); | ||
1842 | submit_bh(WRITE, bh); | ||
1843 | nr_underway++; | ||
1844 | } | ||
1845 | bh = next; | ||
1846 | } while (bh != head); | ||
1847 | unlock_page(page); | ||
1848 | goto done; | ||
1849 | } | 1672 | } |
1850 | 1673 | ||
1851 | static int btrfs_writepage(struct page *page, struct writeback_control *wbc) | 1674 | static void btrfs_invalidatepage(struct page *page, unsigned long offset) |
1852 | { | 1675 | { |
1853 | struct inode * const inode = page->mapping->host; | 1676 | struct extent_map_tree *tree; |
1854 | loff_t i_size = i_size_read(inode); | ||
1855 | const pgoff_t end_index = i_size >> PAGE_CACHE_SHIFT; | ||
1856 | unsigned offset; | ||
1857 | void *kaddr; | ||
1858 | |||
1859 | /* Is the page fully inside i_size? */ | ||
1860 | if (page->index < end_index) | ||
1861 | return __btrfs_write_full_page(inode, page, wbc); | ||
1862 | |||
1863 | /* Is the page fully outside i_size? (truncate in progress) */ | ||
1864 | offset = i_size & (PAGE_CACHE_SIZE-1); | ||
1865 | if (page->index >= end_index+1 || !offset) { | ||
1866 | /* | ||
1867 | * The page may have dirty, unmapped buffers. For example, | ||
1868 | * they may have been added in ext3_writepage(). Make them | ||
1869 | * freeable here, so the page does not leak. | ||
1870 | */ | ||
1871 | block_invalidatepage(page, 0); | ||
1872 | unlock_page(page); | ||
1873 | return 0; /* don't care */ | ||
1874 | } | ||
1875 | 1677 | ||
1876 | /* | 1678 | tree = &BTRFS_I(page->mapping->host)->extent_tree; |
1877 | * The page straddles i_size. It must be zeroed out on each and every | 1679 | extent_invalidatepage(tree, page, offset); |
1878 | * writepage invokation because it may be mmapped. "A file is mapped | 1680 | btrfs_releasepage(page, GFP_NOFS); |
1879 | * in multiples of the page size. For a file that is not a multiple of | ||
1880 | * the page size, the remaining memory is zeroed when mapped, and | ||
1881 | * writes to that region are not written out to the file." | ||
1882 | */ | ||
1883 | kaddr = kmap_atomic(page, KM_USER0); | ||
1884 | memset(kaddr + offset, 0, PAGE_CACHE_SIZE - offset); | ||
1885 | flush_dcache_page(page); | ||
1886 | kunmap_atomic(kaddr, KM_USER0); | ||
1887 | return __btrfs_write_full_page(inode, page, wbc); | ||
1888 | } | 1681 | } |
1889 | 1682 | ||
1890 | /* | 1683 | /* |
@@ -1905,28 +1698,39 @@ static int btrfs_writepage(struct page *page, struct writeback_control *wbc) | |||
1905 | int btrfs_page_mkwrite(struct vm_area_struct *vma, struct page *page) | 1698 | int btrfs_page_mkwrite(struct vm_area_struct *vma, struct page *page) |
1906 | { | 1699 | { |
1907 | struct inode *inode = vma->vm_file->f_path.dentry->d_inode; | 1700 | struct inode *inode = vma->vm_file->f_path.dentry->d_inode; |
1701 | struct btrfs_root *root = BTRFS_I(inode)->root; | ||
1702 | struct btrfs_trans_handle *trans; | ||
1908 | unsigned long end; | 1703 | unsigned long end; |
1909 | loff_t size; | 1704 | loff_t size; |
1910 | int ret = -EINVAL; | 1705 | int ret = -EINVAL; |
1706 | u64 page_start; | ||
1911 | 1707 | ||
1912 | lock_page(page); | 1708 | lock_page(page); |
1913 | wait_on_page_writeback(page); | 1709 | wait_on_page_writeback(page); |
1914 | size = i_size_read(inode); | 1710 | size = i_size_read(inode); |
1711 | page_start = page->index << PAGE_CACHE_SHIFT; | ||
1712 | |||
1915 | if ((page->mapping != inode->i_mapping) || | 1713 | if ((page->mapping != inode->i_mapping) || |
1916 | ((page->index << PAGE_CACHE_SHIFT) > size)) { | 1714 | (page_start > size)) { |
1917 | /* page got truncated out from underneath us */ | 1715 | /* page got truncated out from underneath us */ |
1918 | goto out_unlock; | 1716 | goto out_unlock; |
1919 | } | 1717 | } |
1920 | 1718 | ||
1921 | /* page is wholly or partially inside EOF */ | 1719 | /* page is wholly or partially inside EOF */ |
1922 | if (((page->index + 1) << PAGE_CACHE_SHIFT) > size) | 1720 | if (page_start + PAGE_CACHE_SIZE > size) |
1923 | end = size & ~PAGE_CACHE_MASK; | 1721 | end = size & ~PAGE_CACHE_MASK; |
1924 | else | 1722 | else |
1925 | end = PAGE_CACHE_SIZE; | 1723 | end = PAGE_CACHE_SIZE; |
1926 | 1724 | ||
1927 | ret = btrfs_prepare_write(NULL, page, 0, end); | 1725 | mutex_lock(&root->fs_info->fs_mutex); |
1928 | if (!ret) | 1726 | trans = btrfs_start_transaction(root, 1); |
1929 | ret = btrfs_commit_write(NULL, page, 0, end); | 1727 | ret = btrfs_cow_one_page(trans, inode, page, end); |
1728 | btrfs_end_transaction(trans, root); | ||
1729 | mutex_unlock(&root->fs_info->fs_mutex); | ||
1730 | set_extent_dirty(&BTRFS_I(inode)->extent_tree, | ||
1731 | page_start, page_start + PAGE_CACHE_SIZE - 1, | ||
1732 | GFP_NOFS); | ||
1733 | set_page_dirty(page); | ||
1930 | 1734 | ||
1931 | out_unlock: | 1735 | out_unlock: |
1932 | unlock_page(page); | 1736 | unlock_page(page); |
@@ -1962,21 +1766,8 @@ static void btrfs_truncate(struct inode *inode) | |||
1962 | int btrfs_commit_write(struct file *file, struct page *page, | 1766 | int btrfs_commit_write(struct file *file, struct page *page, |
1963 | unsigned from, unsigned to) | 1767 | unsigned from, unsigned to) |
1964 | { | 1768 | { |
1965 | struct inode *inode = page->mapping->host; | 1769 | return extent_commit_write(&BTRFS_I(page->mapping->host)->extent_tree, |
1966 | struct buffer_head *bh; | 1770 | page->mapping->host, page, from, to); |
1967 | loff_t pos = ((loff_t)page->index << PAGE_CACHE_SHIFT) + to; | ||
1968 | |||
1969 | SetPageUptodate(page); | ||
1970 | bh = page_buffers(page); | ||
1971 | set_buffer_uptodate(bh); | ||
1972 | if (buffer_mapped(bh) && bh->b_blocknr != 0) { | ||
1973 | set_page_dirty(page); | ||
1974 | } | ||
1975 | if (pos > inode->i_size) { | ||
1976 | i_size_write(inode, pos); | ||
1977 | mark_inode_dirty(inode); | ||
1978 | } | ||
1979 | return 0; | ||
1980 | } | 1771 | } |
1981 | 1772 | ||
1982 | static int create_subvol(struct btrfs_root *root, char *name, int namelen) | 1773 | static int create_subvol(struct btrfs_root *root, char *name, int namelen) |
@@ -2471,6 +2262,8 @@ static int btrfs_symlink(struct inode *dir, struct dentry *dentry, | |||
2471 | inode->i_mapping->a_ops = &btrfs_aops; | 2262 | inode->i_mapping->a_ops = &btrfs_aops; |
2472 | inode->i_fop = &btrfs_file_operations; | 2263 | inode->i_fop = &btrfs_file_operations; |
2473 | inode->i_op = &btrfs_file_inode_operations; | 2264 | inode->i_op = &btrfs_file_inode_operations; |
2265 | extent_map_tree_init(&BTRFS_I(inode)->extent_tree, | ||
2266 | inode->i_mapping, GFP_NOFS); | ||
2474 | } | 2267 | } |
2475 | dir->i_sb->s_dirt = 1; | 2268 | dir->i_sb->s_dirt = 1; |
2476 | btrfs_update_inode_block_group(trans, inode); | 2269 | btrfs_update_inode_block_group(trans, inode); |
@@ -2553,6 +2346,9 @@ static struct address_space_operations btrfs_aops = { | |||
2553 | .prepare_write = btrfs_prepare_write, | 2346 | .prepare_write = btrfs_prepare_write, |
2554 | .commit_write = btrfs_commit_write, | 2347 | .commit_write = btrfs_commit_write, |
2555 | .bmap = btrfs_bmap, | 2348 | .bmap = btrfs_bmap, |
2349 | .invalidatepage = btrfs_invalidatepage, | ||
2350 | .releasepage = btrfs_releasepage, | ||
2351 | .set_page_dirty = __set_page_dirty_nobuffers, | ||
2556 | }; | 2352 | }; |
2557 | 2353 | ||
2558 | static struct address_space_operations btrfs_symlink_aops = { | 2354 | static struct address_space_operations btrfs_symlink_aops = { |
diff --git a/fs/btrfs/super.c b/fs/btrfs/super.c index 74f3de47423c..7b7c32c73a2c 100644 --- a/fs/btrfs/super.c +++ b/fs/btrfs/super.c | |||
@@ -185,6 +185,7 @@ static int __init init_btrfs_fs(void) | |||
185 | err = btrfs_init_cachep(); | 185 | err = btrfs_init_cachep(); |
186 | if (err) | 186 | if (err) |
187 | return err; | 187 | return err; |
188 | extent_map_init(); | ||
188 | return register_filesystem(&btrfs_fs_type); | 189 | return register_filesystem(&btrfs_fs_type); |
189 | } | 190 | } |
190 | 191 | ||
@@ -192,6 +193,7 @@ static void __exit exit_btrfs_fs(void) | |||
192 | { | 193 | { |
193 | btrfs_exit_transaction_sys(); | 194 | btrfs_exit_transaction_sys(); |
194 | btrfs_destroy_cachep(); | 195 | btrfs_destroy_cachep(); |
196 | extent_map_exit(); | ||
195 | unregister_filesystem(&btrfs_fs_type); | 197 | unregister_filesystem(&btrfs_fs_type); |
196 | } | 198 | } |
197 | 199 | ||