aboutsummaryrefslogtreecommitdiffstats
path: root/fs/btrfs/extent_map.c
diff options
context:
space:
mode:
authorChris Mason <chris.mason@oracle.com>2007-10-15 16:15:53 -0400
committerChris Mason <chris.mason@oracle.com>2008-09-25 11:03:56 -0400
commitdb94535db75e67fab12ccbb7f5ee548e33fed891 (patch)
tree1ad7dfc82b003294a94ee87d7329b24b902b369f /fs/btrfs/extent_map.c
parent1a5bc167f6707542b79a55452075525620ed43f5 (diff)
Btrfs: Allow tree blocks larger than the page size
Signed-off-by: Chris Mason <chris.mason@oracle.com>
Diffstat (limited to 'fs/btrfs/extent_map.c')
-rw-r--r--fs/btrfs/extent_map.c91
1 files changed, 66 insertions, 25 deletions
diff --git a/fs/btrfs/extent_map.c b/fs/btrfs/extent_map.c
index e081558d52ff..f658703c42e6 100644
--- a/fs/btrfs/extent_map.c
+++ b/fs/btrfs/extent_map.c
@@ -1963,18 +1963,27 @@ static inline struct page *extent_buffer_page(struct extent_buffer *eb, int i)
1963 struct page *p; 1963 struct page *p;
1964 if (i == 0) 1964 if (i == 0)
1965 return eb->first_page; 1965 return eb->first_page;
1966
1966 i += eb->start >> PAGE_CACHE_SHIFT; 1967 i += eb->start >> PAGE_CACHE_SHIFT;
1968 if (eb->last_page && eb->last_page->index == i)
1969 return eb->last_page;
1970
1967 p = find_get_page(eb->first_page->mapping, i); 1971 p = find_get_page(eb->first_page->mapping, i);
1968 page_cache_release(p); 1972 page_cache_release(p);
1973 eb->last_page = p;
1969 return p; 1974 return p;
1970} 1975}
1971 1976
1977static inline unsigned long num_extent_pages(u64 start, u64 len)
1978{
1979 return ((start + len + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT) -
1980 (start >> PAGE_CACHE_SHIFT);
1981}
1972struct extent_buffer *alloc_extent_buffer(struct extent_map_tree *tree, 1982struct extent_buffer *alloc_extent_buffer(struct extent_map_tree *tree,
1973 u64 start, unsigned long len, 1983 u64 start, unsigned long len,
1974 gfp_t mask) 1984 gfp_t mask)
1975{ 1985{
1976 unsigned long num_pages = ((start + len - 1) >> PAGE_CACHE_SHIFT) - 1986 unsigned long num_pages = num_extent_pages(start, len);
1977 (start >> PAGE_CACHE_SHIFT) + 1;
1978 unsigned long i; 1987 unsigned long i;
1979 unsigned long index = start >> PAGE_CACHE_SHIFT; 1988 unsigned long index = start >> PAGE_CACHE_SHIFT;
1980 struct extent_buffer *eb; 1989 struct extent_buffer *eb;
@@ -1986,7 +1995,7 @@ struct extent_buffer *alloc_extent_buffer(struct extent_map_tree *tree,
1986 if (!eb || IS_ERR(eb)) 1995 if (!eb || IS_ERR(eb))
1987 return NULL; 1996 return NULL;
1988 1997
1989 eb->alloc_addr = __builtin_return_address(0); 1998 eb->alloc_addr = (unsigned long)__builtin_return_address(0);
1990 eb->start = start; 1999 eb->start = start;
1991 eb->len = len; 2000 eb->len = len;
1992 atomic_set(&eb->refs, 1); 2001 atomic_set(&eb->refs, 1);
@@ -1994,6 +2003,7 @@ struct extent_buffer *alloc_extent_buffer(struct extent_map_tree *tree,
1994 for (i = 0; i < num_pages; i++, index++) { 2003 for (i = 0; i < num_pages; i++, index++) {
1995 p = find_or_create_page(mapping, index, mask | __GFP_HIGHMEM); 2004 p = find_or_create_page(mapping, index, mask | __GFP_HIGHMEM);
1996 if (!p) { 2005 if (!p) {
2006 WARN_ON(1);
1997 /* make sure the free only frees the pages we've 2007 /* make sure the free only frees the pages we've
1998 * grabbed a reference on 2008 * grabbed a reference on
1999 */ 2009 */
@@ -2021,8 +2031,7 @@ struct extent_buffer *find_extent_buffer(struct extent_map_tree *tree,
2021 u64 start, unsigned long len, 2031 u64 start, unsigned long len,
2022 gfp_t mask) 2032 gfp_t mask)
2023{ 2033{
2024 unsigned long num_pages = ((start + len - 1) >> PAGE_CACHE_SHIFT) - 2034 unsigned long num_pages = num_extent_pages(start, len);
2025 (start >> PAGE_CACHE_SHIFT) + 1;
2026 unsigned long i; 2035 unsigned long i;
2027 unsigned long index = start >> PAGE_CACHE_SHIFT; 2036 unsigned long index = start >> PAGE_CACHE_SHIFT;
2028 struct extent_buffer *eb; 2037 struct extent_buffer *eb;
@@ -2033,7 +2042,7 @@ struct extent_buffer *find_extent_buffer(struct extent_map_tree *tree,
2033 if (!eb || IS_ERR(eb)) 2042 if (!eb || IS_ERR(eb))
2034 return NULL; 2043 return NULL;
2035 2044
2036 eb->alloc_addr = __builtin_return_address(0); 2045 eb->alloc_addr = (unsigned long)__builtin_return_address(0);
2037 eb->start = start; 2046 eb->start = start;
2038 eb->len = len; 2047 eb->len = len;
2039 atomic_set(&eb->refs, 1); 2048 atomic_set(&eb->refs, 1);
@@ -2070,8 +2079,7 @@ void free_extent_buffer(struct extent_buffer *eb)
2070 if (!atomic_dec_and_test(&eb->refs)) 2079 if (!atomic_dec_and_test(&eb->refs))
2071 return; 2080 return;
2072 2081
2073 num_pages = ((eb->start + eb->len - 1) >> PAGE_CACHE_SHIFT) - 2082 num_pages = num_extent_pages(eb->start, eb->len);
2074 (eb->start >> PAGE_CACHE_SHIFT) + 1;
2075 2083
2076 if (eb->first_page) 2084 if (eb->first_page)
2077 page_cache_release(eb->first_page); 2085 page_cache_release(eb->first_page);
@@ -2094,8 +2102,7 @@ int clear_extent_buffer_dirty(struct extent_map_tree *tree,
2094 u64 end = start + eb->len - 1; 2102 u64 end = start + eb->len - 1;
2095 2103
2096 set = clear_extent_dirty(tree, start, end, GFP_NOFS); 2104 set = clear_extent_dirty(tree, start, end, GFP_NOFS);
2097 num_pages = ((eb->start + eb->len - 1) >> PAGE_CACHE_SHIFT) - 2105 num_pages = num_extent_pages(eb->start, eb->len);
2098 (eb->start >> PAGE_CACHE_SHIFT) + 1;
2099 2106
2100 for (i = 0; i < num_pages; i++) { 2107 for (i = 0; i < num_pages; i++) {
2101 page = extent_buffer_page(eb, i); 2108 page = extent_buffer_page(eb, i);
@@ -2145,8 +2152,7 @@ int set_extent_buffer_uptodate(struct extent_map_tree *tree,
2145 struct page *page; 2152 struct page *page;
2146 unsigned long num_pages; 2153 unsigned long num_pages;
2147 2154
2148 num_pages = ((eb->start + eb->len - 1) >> PAGE_CACHE_SHIFT) - 2155 num_pages = num_extent_pages(eb->start, eb->len);
2149 (eb->start >> PAGE_CACHE_SHIFT) + 1;
2150 2156
2151 set_extent_uptodate(tree, eb->start, eb->start + eb->len - 1, 2157 set_extent_uptodate(tree, eb->start, eb->start + eb->len - 1,
2152 GFP_NOFS); 2158 GFP_NOFS);
@@ -2191,8 +2197,7 @@ int read_extent_buffer_pages(struct extent_map_tree *tree,
2191 return 0; 2197 return 0;
2192 } 2198 }
2193 2199
2194 num_pages = ((eb->start + eb->len - 1) >> PAGE_CACHE_SHIFT) - 2200 num_pages = num_extent_pages(eb->start, eb->len);
2195 (eb->start >> PAGE_CACHE_SHIFT) + 1;
2196 for (i = 0; i < num_pages; i++) { 2201 for (i = 0; i < num_pages; i++) {
2197 page = extent_buffer_page(eb, i); 2202 page = extent_buffer_page(eb, i);
2198 if (PageUptodate(page)) { 2203 if (PageUptodate(page)) {
@@ -2267,14 +2272,14 @@ void read_extent_buffer(struct extent_buffer *eb, void *dstv,
2267} 2272}
2268EXPORT_SYMBOL(read_extent_buffer); 2273EXPORT_SYMBOL(read_extent_buffer);
2269 2274
2270int map_extent_buffer(struct extent_buffer *eb, unsigned long start, 2275static int __map_extent_buffer(struct extent_buffer *eb, unsigned long start,
2271 unsigned long min_len, 2276 unsigned long min_len, char **token, char **map,
2272 char **token, char **map, 2277 unsigned long *map_start,
2273 unsigned long *map_start, 2278 unsigned long *map_len, int km)
2274 unsigned long *map_len, int km)
2275{ 2279{
2276 size_t offset = start & (PAGE_CACHE_SIZE - 1); 2280 size_t offset = start & (PAGE_CACHE_SIZE - 1);
2277 char *kaddr; 2281 char *kaddr;
2282 struct page *p;
2278 size_t start_offset = eb->start & ((u64)PAGE_CACHE_SIZE - 1); 2283 size_t start_offset = eb->start & ((u64)PAGE_CACHE_SIZE - 1);
2279 unsigned long i = (start_offset + start) >> PAGE_CACHE_SHIFT; 2284 unsigned long i = (start_offset + start) >> PAGE_CACHE_SHIFT;
2280 unsigned long end_i = (start_offset + start + min_len) >> 2285 unsigned long end_i = (start_offset + start + min_len) >>
@@ -2283,21 +2288,59 @@ int map_extent_buffer(struct extent_buffer *eb, unsigned long start,
2283 if (i != end_i) 2288 if (i != end_i)
2284 return -EINVAL; 2289 return -EINVAL;
2285 2290
2286 WARN_ON(start > eb->len); 2291 if (start >= eb->len) {
2292 printk("bad start in map eb start %Lu len %lu caller start %lu min %lu\n", eb->start, eb->len, start, min_len);
2293 WARN_ON(1);
2294 }
2287 2295
2288 if (i == 0) { 2296 if (i == 0) {
2289 offset = start_offset; 2297 offset = start_offset;
2290 *map_start = 0; 2298 *map_start = 0;
2291 } else { 2299 } else {
2300 offset = 0;
2292 *map_start = (i << PAGE_CACHE_SHIFT) - start_offset; 2301 *map_start = (i << PAGE_CACHE_SHIFT) - start_offset;
2293 } 2302 }
2294 2303
2295 kaddr = kmap_atomic(extent_buffer_page(eb, i), km); 2304 p = extent_buffer_page(eb, i);
2305 WARN_ON(!PageUptodate(p));
2306 kaddr = kmap_atomic(p, km);
2296 *token = kaddr; 2307 *token = kaddr;
2297 *map = kaddr + offset; 2308 *map = kaddr + offset;
2298 *map_len = PAGE_CACHE_SIZE - offset; 2309 *map_len = PAGE_CACHE_SIZE - offset;
2299 return 0; 2310 return 0;
2300} 2311}
2312
2313int map_extent_buffer(struct extent_buffer *eb, unsigned long start,
2314 unsigned long min_len,
2315 char **token, char **map,
2316 unsigned long *map_start,
2317 unsigned long *map_len, int km)
2318{
2319 int err;
2320 int save = 0;
2321 if (eb->map_token) {
2322 if (start >= eb->map_start &&
2323 start + min_len <= eb->map_start + eb->map_len) {
2324 *token = eb->map_token;
2325 *map = eb->kaddr;
2326 *map_start = eb->map_start;
2327 *map_len = eb->map_len;
2328 return 0;
2329 }
2330 unmap_extent_buffer(eb, eb->map_token, km);
2331 eb->map_token = NULL;
2332 save = 1;
2333 }
2334 err = __map_extent_buffer(eb, start, min_len, token, map,
2335 map_start, map_len, km);
2336 if (!err && save) {
2337 eb->map_token = *token;
2338 eb->kaddr = *map;
2339 eb->map_start = *map_start;
2340 eb->map_len = *map_len;
2341 }
2342 return err;
2343}
2301EXPORT_SYMBOL(map_extent_buffer); 2344EXPORT_SYMBOL(map_extent_buffer);
2302 2345
2303void unmap_extent_buffer(struct extent_buffer *eb, char *token, int km) 2346void unmap_extent_buffer(struct extent_buffer *eb, char *token, int km)
@@ -2574,7 +2617,6 @@ void memmove_extent_buffer(struct extent_buffer *dst, unsigned long dst_offset,
2574 ((unsigned long)PAGE_CACHE_SIZE - 1); 2617 ((unsigned long)PAGE_CACHE_SIZE - 1);
2575 src_off_in_page = src_end & 2618 src_off_in_page = src_end &
2576 ((unsigned long)PAGE_CACHE_SIZE - 1); 2619 ((unsigned long)PAGE_CACHE_SIZE - 1);
2577
2578 if (src_i == 0) 2620 if (src_i == 0)
2579 src_off_in_page += start_offset; 2621 src_off_in_page += start_offset;
2580 if (dst_i == 0) 2622 if (dst_i == 0)
@@ -2582,14 +2624,13 @@ void memmove_extent_buffer(struct extent_buffer *dst, unsigned long dst_offset,
2582 2624
2583 cur = min(len, src_off_in_page + 1); 2625 cur = min(len, src_off_in_page + 1);
2584 cur = min(cur, dst_off_in_page + 1); 2626 cur = min(cur, dst_off_in_page + 1);
2585
2586 move_pages(extent_buffer_page(dst, dst_i), 2627 move_pages(extent_buffer_page(dst, dst_i),
2587 extent_buffer_page(dst, src_i), 2628 extent_buffer_page(dst, src_i),
2588 dst_off_in_page - cur + 1, 2629 dst_off_in_page - cur + 1,
2589 src_off_in_page - cur + 1, cur); 2630 src_off_in_page - cur + 1, cur);
2590 2631
2591 dst_end -= cur - 1; 2632 dst_end -= cur;
2592 src_end -= cur - 1; 2633 src_end -= cur;
2593 len -= cur; 2634 len -= cur;
2594 } 2635 }
2595} 2636}