aboutsummaryrefslogtreecommitdiffstats
path: root/fs/btrfs/extent_map.c
diff options
context:
space:
mode:
Diffstat (limited to 'fs/btrfs/extent_map.c')
-rw-r--r--fs/btrfs/extent_map.c672
1 files changed, 656 insertions, 16 deletions
diff --git a/fs/btrfs/extent_map.c b/fs/btrfs/extent_map.c
index 0ab368e091f9..55fefdfab84c 100644
--- a/fs/btrfs/extent_map.c
+++ b/fs/btrfs/extent_map.c
@@ -26,16 +26,6 @@ struct tree_entry {
26 struct rb_node rb_node; 26 struct rb_node rb_node;
27}; 27};
28 28
29/* bits for the extent state */
30#define EXTENT_DIRTY 1
31#define EXTENT_WRITEBACK (1 << 1)
32#define EXTENT_UPTODATE (1 << 2)
33#define EXTENT_LOCKED (1 << 3)
34#define EXTENT_NEW (1 << 4)
35#define EXTENT_DELALLOC (1 << 5)
36
37#define EXTENT_IOBITS (EXTENT_LOCKED | EXTENT_WRITEBACK)
38
39void __init extent_map_init(void) 29void __init extent_map_init(void)
40{ 30{
41 extent_map_cache = btrfs_cache_create("extent_map", 31 extent_map_cache = btrfs_cache_create("extent_map",
@@ -223,7 +213,8 @@ int add_extent_mapping(struct extent_map_tree *tree,
223 if (rb) 213 if (rb)
224 prev = rb_entry(rb, struct extent_map, rb_node); 214 prev = rb_entry(rb, struct extent_map, rb_node);
225 if (prev && prev->end + 1 == em->start && 215 if (prev && prev->end + 1 == em->start &&
226 ((em->block_start == 0 && prev->block_start == 0) || 216 ((em->block_start == EXTENT_MAP_HOLE &&
217 prev->block_start == EXTENT_MAP_HOLE) ||
227 (em->block_start == prev->block_end + 1))) { 218 (em->block_start == prev->block_end + 1))) {
228 em->start = prev->start; 219 em->start = prev->start;
229 em->block_start = prev->block_start; 220 em->block_start = prev->block_start;
@@ -926,6 +917,40 @@ int set_range_writeback(struct extent_map_tree *tree, u64 start, u64 end)
926} 917}
927EXPORT_SYMBOL(set_range_writeback); 918EXPORT_SYMBOL(set_range_writeback);
928 919
920int find_first_extent_bit(struct extent_map_tree *tree, u64 start,
921 u64 *start_ret, u64 *end_ret, int bits)
922{
923 struct rb_node *node;
924 struct extent_state *state;
925 int ret = 1;
926
927 write_lock_irq(&tree->lock);
928 /*
929 * this search will find all the extents that end after
930 * our range starts.
931 */
932 node = tree_search(&tree->state, start);
933 if (!node || IS_ERR(node)) {
934 goto out;
935 }
936
937 while(1) {
938 state = rb_entry(node, struct extent_state, rb_node);
939 if (state->state & bits) {
940 *start_ret = state->start;
941 *end_ret = state->end;
942 ret = 0;
943 }
944 node = rb_next(node);
945 if (!node)
946 break;
947 }
948out:
949 write_unlock_irq(&tree->lock);
950 return ret;
951}
952EXPORT_SYMBOL(find_first_extent_bit);
953
929u64 find_lock_delalloc_range(struct extent_map_tree *tree, 954u64 find_lock_delalloc_range(struct extent_map_tree *tree,
930 u64 start, u64 lock_start, u64 *end, u64 max_bytes) 955 u64 start, u64 lock_start, u64 *end, u64 max_bytes)
931{ 956{
@@ -1450,7 +1475,7 @@ int extent_read_full_page(struct extent_map_tree *tree, struct page *page,
1450 em = NULL; 1475 em = NULL;
1451 1476
1452 /* we've found a hole, just zero and go on */ 1477 /* we've found a hole, just zero and go on */
1453 if (block_start == 0) { 1478 if (block_start == EXTENT_MAP_HOLE) {
1454 zero_user_page(page, page_offset, iosize, KM_USER0); 1479 zero_user_page(page, page_offset, iosize, KM_USER0);
1455 set_extent_uptodate(tree, cur, cur + iosize - 1, 1480 set_extent_uptodate(tree, cur, cur + iosize - 1,
1456 GFP_NOFS); 1481 GFP_NOFS);
@@ -1593,7 +1618,8 @@ int extent_write_full_page(struct extent_map_tree *tree, struct page *page,
1593 free_extent_map(em); 1618 free_extent_map(em);
1594 em = NULL; 1619 em = NULL;
1595 1620
1596 if (block_start == 0 || block_start == EXTENT_MAP_INLINE) { 1621 if (block_start == EXTENT_MAP_HOLE ||
1622 block_start == EXTENT_MAP_INLINE) {
1597 clear_extent_dirty(tree, cur, 1623 clear_extent_dirty(tree, cur,
1598 cur + iosize - 1, GFP_NOFS); 1624 cur + iosize - 1, GFP_NOFS);
1599 cur = cur + iosize; 1625 cur = cur + iosize;
@@ -1630,7 +1656,6 @@ int extent_write_full_page(struct extent_map_tree *tree, struct page *page,
1630 nr++; 1656 nr++;
1631 } 1657 }
1632done: 1658done:
1633 WARN_ON(test_range_bit(tree, start, page_end, EXTENT_DIRTY, 0));
1634 unlock_extent(tree, start, page_end, GFP_NOFS); 1659 unlock_extent(tree, start, page_end, GFP_NOFS);
1635 unlock_page(page); 1660 unlock_page(page);
1636 return 0; 1661 return 0;
@@ -1827,8 +1852,623 @@ sector_t extent_bmap(struct address_space *mapping, sector_t iblock,
1827 1852
1828 // XXX(hch): block 0 is valid in some cases, e.g. XFS RT device 1853 // XXX(hch): block 0 is valid in some cases, e.g. XFS RT device
1829 if (em->block_start == EXTENT_MAP_INLINE || 1854 if (em->block_start == EXTENT_MAP_INLINE ||
1830 em->block_start == 0) 1855 em->block_start == EXTENT_MAP_HOLE)
1831 return 0; 1856 return 0;
1832 1857
1833 return (em->block_start + start - em->start) >> inode->i_blkbits; 1858 return (em->block_start + start - em->start) >> inode->i_blkbits;
1834} 1859}
1860
1861struct extent_buffer *alloc_extent_buffer(struct extent_map_tree *tree,
1862 u64 start, unsigned long len,
1863 gfp_t mask)
1864{
1865 unsigned long num_pages = ((start + len - 1) >> PAGE_CACHE_SHIFT) -
1866 (start >> PAGE_CACHE_SHIFT) + 1;
1867 unsigned long i;
1868 unsigned long index = start >> PAGE_CACHE_SHIFT;
1869 struct extent_buffer *eb;
1870 struct page *p;
1871 struct address_space *mapping = tree->mapping;
1872 int uptodate = 0;
1873
1874 eb = kzalloc(EXTENT_BUFFER_SIZE(num_pages), mask);
1875 if (!eb || IS_ERR(eb))
1876 return NULL;
1877
1878 eb->start = start;
1879 eb->len = len;
1880 atomic_set(&eb->refs, 1);
1881
1882 for (i = 0; i < num_pages; i++, index++) {
1883 p = find_or_create_page(mapping, index, mask | __GFP_HIGHMEM);
1884 if (!p)
1885 goto fail;
1886 eb->pages[i] = p;
1887 if (!PageUptodate(p))
1888 uptodate = 0;
1889 unlock_page(p);
1890 }
1891 if (uptodate)
1892 eb->flags |= EXTENT_UPTODATE;
1893 return eb;
1894fail:
1895 free_extent_buffer(eb);
1896 return NULL;
1897}
1898EXPORT_SYMBOL(alloc_extent_buffer);
1899
1900struct extent_buffer *find_extent_buffer(struct extent_map_tree *tree,
1901 u64 start, unsigned long len,
1902 gfp_t mask)
1903{
1904 unsigned long num_pages = ((start + len - 1) >> PAGE_CACHE_SHIFT) -
1905 (start >> PAGE_CACHE_SHIFT) + 1;
1906 unsigned long i;
1907 unsigned long index = start >> PAGE_CACHE_SHIFT;
1908 struct extent_buffer *eb;
1909 struct page *p;
1910 struct address_space *mapping = tree->mapping;
1911
1912 eb = kzalloc(EXTENT_BUFFER_SIZE(num_pages), mask);
1913 if (!eb || IS_ERR(eb))
1914 return NULL;
1915
1916 eb->start = start;
1917 eb->len = len;
1918 atomic_set(&eb->refs, 1);
1919
1920 for (i = 0; i < num_pages; i++, index++) {
1921 p = find_get_page(mapping, index);
1922 if (!p)
1923 goto fail;
1924 eb->pages[i] = p;
1925 }
1926 return eb;
1927fail:
1928 free_extent_buffer(eb);
1929 return NULL;
1930}
1931EXPORT_SYMBOL(find_extent_buffer);
1932
1933void free_extent_buffer(struct extent_buffer *eb)
1934{
1935 unsigned long i;
1936 unsigned long num_pages;
1937
1938 if (!eb)
1939 return;
1940
1941 if (!atomic_dec_and_test(&eb->refs))
1942 return;
1943
1944 num_pages = ((eb->start + eb->len - 1) >> PAGE_CACHE_SHIFT) -
1945 (eb->start >> PAGE_CACHE_SHIFT) + 1;
1946
1947 for (i = 0; i < num_pages; i++) {
1948 if (eb->pages[i])
1949 page_cache_release(eb->pages[i]);
1950 }
1951 kfree(eb);
1952}
1953EXPORT_SYMBOL(free_extent_buffer);
1954
1955int clear_extent_buffer_dirty(struct extent_map_tree *tree,
1956 struct extent_buffer *eb)
1957{
1958 int set;
1959 unsigned long i;
1960 unsigned long num_pages;
1961 struct page *page;
1962
1963 u64 start = eb->start;
1964 u64 end = start + eb->len - 1;
1965
1966 set = clear_extent_dirty(tree, start, end, GFP_NOFS);
1967 num_pages = ((eb->start + eb->len - 1) >> PAGE_CACHE_SHIFT) -
1968 (eb->start >> PAGE_CACHE_SHIFT) + 1;
1969
1970 for (i = 0; i < num_pages; i++) {
1971 page = eb->pages[i];
1972 lock_page(page);
1973 /*
1974 * if we're on the last page or the first page and the
1975 * block isn't aligned on a page boundary, do extra checks
1976 * to make sure we don't clean page that is partially dirty
1977 */
1978 if ((i == 0 && (eb->start & (PAGE_CACHE_SIZE - 1))) ||
1979 ((i == num_pages - 1) &&
1980 ((eb->start + eb->len - 1) & (PAGE_CACHE_SIZE - 1)))) {
1981 start = page->index << PAGE_CACHE_SHIFT;
1982 end = start + PAGE_CACHE_SIZE - 1;
1983 if (test_range_bit(tree, start, end,
1984 EXTENT_DIRTY, 0)) {
1985 unlock_page(page);
1986 continue;
1987 }
1988 }
1989 clear_page_dirty_for_io(page);
1990 unlock_page(page);
1991 }
1992 return 0;
1993}
1994EXPORT_SYMBOL(clear_extent_buffer_dirty);
1995
1996int wait_on_extent_buffer_writeback(struct extent_map_tree *tree,
1997 struct extent_buffer *eb)
1998{
1999 return wait_on_extent_writeback(tree, eb->start,
2000 eb->start + eb->len - 1);
2001}
2002EXPORT_SYMBOL(wait_on_extent_buffer_writeback);
2003
2004int set_extent_buffer_dirty(struct extent_map_tree *tree,
2005 struct extent_buffer *eb)
2006{
2007 return set_range_dirty(tree, eb->start, eb->start + eb->len - 1);
2008}
2009EXPORT_SYMBOL(set_extent_buffer_dirty);
2010
2011int set_extent_buffer_uptodate(struct extent_map_tree *tree,
2012 struct extent_buffer *eb)
2013{
2014 unsigned long i;
2015 struct page *page;
2016 unsigned long num_pages;
2017
2018 num_pages = ((eb->start + eb->len - 1) >> PAGE_CACHE_SHIFT) -
2019 (eb->start >> PAGE_CACHE_SHIFT) + 1;
2020
2021 set_extent_uptodate(tree, eb->start, eb->start + eb->len - 1,
2022 GFP_NOFS);
2023 for (i = 0; i < num_pages; i++) {
2024 page = eb->pages[i];
2025 if ((i == 0 && (eb->start & (PAGE_CACHE_SIZE - 1))) ||
2026 ((i == num_pages - 1) &&
2027 ((eb->start + eb->len - 1) & (PAGE_CACHE_SIZE - 1)))) {
2028 check_page_uptodate(tree, page);
2029 continue;
2030 }
2031 SetPageUptodate(page);
2032 }
2033 return 0;
2034}
2035EXPORT_SYMBOL(set_extent_buffer_uptodate);
2036
2037int extent_buffer_uptodate(struct extent_map_tree *tree,
2038 struct extent_buffer *eb)
2039{
2040 if (eb->flags & EXTENT_UPTODATE)
2041 return 1;
2042 return test_range_bit(tree, eb->start, eb->start + eb->len - 1,
2043 EXTENT_UPTODATE, 1);
2044}
2045EXPORT_SYMBOL(extent_buffer_uptodate);
2046
2047int read_extent_buffer_pages(struct extent_map_tree *tree,
2048 struct extent_buffer *eb, int wait)
2049{
2050 unsigned long i;
2051 struct page *page;
2052 int err;
2053 int ret = 0;
2054 unsigned long num_pages;
2055
2056 if (eb->flags & EXTENT_UPTODATE)
2057 return 0;
2058
2059 if (test_range_bit(tree, eb->start, eb->start + eb->len - 1,
2060 EXTENT_UPTODATE, 1)) {
2061 return 0;
2062 }
2063
2064 num_pages = ((eb->start + eb->len - 1) >> PAGE_CACHE_SHIFT) -
2065 (eb->start >> PAGE_CACHE_SHIFT) + 1;
2066 for (i = 0; i < num_pages; i++) {
2067 page = eb->pages[i];
2068 if (PageUptodate(page)) {
2069 continue;
2070 }
2071 if (!wait) {
2072 if (TestSetPageLocked(page)) {
2073 continue;
2074 }
2075 } else {
2076 lock_page(page);
2077 }
2078 if (!PageUptodate(page)) {
2079 err = page->mapping->a_ops->readpage(NULL, page);
2080 if (err) {
2081 ret = err;
2082 }
2083 } else {
2084 unlock_page(page);
2085 }
2086 }
2087
2088 if (ret || !wait) {
2089 return ret;
2090 }
2091
2092 for (i = 0; i < num_pages; i++) {
2093 page = eb->pages[i];
2094 wait_on_page_locked(page);
2095 if (!PageUptodate(page)) {
2096 ret = -EIO;
2097 }
2098 }
2099 eb->flags |= EXTENT_UPTODATE;
2100 return ret;
2101}
2102EXPORT_SYMBOL(read_extent_buffer_pages);
2103
2104void read_extent_buffer(struct extent_buffer *eb, void *dstv,
2105 unsigned long start,
2106 unsigned long len)
2107{
2108 size_t cur;
2109 size_t offset;
2110 struct page *page;
2111 char *kaddr;
2112 char *dst = (char *)dstv;
2113 size_t start_offset = eb->start & ((u64)PAGE_CACHE_SIZE - 1);
2114 unsigned long i = (start_offset + start) >> PAGE_CACHE_SHIFT;
2115
2116 WARN_ON(start > eb->len);
2117 WARN_ON(start + len > eb->start + eb->len);
2118
2119 page = eb->pages[i];
2120 offset = start & ((unsigned long)PAGE_CACHE_SIZE - 1);
2121 if (i == 0)
2122 offset += start_offset;
2123
2124 while(len > 0) {
2125 WARN_ON(!PageUptodate(page));
2126
2127 cur = min(len, (PAGE_CACHE_SIZE - offset));
2128 // kaddr = kmap_atomic(page, KM_USER0);
2129 kaddr = page_address(page);
2130 memcpy(dst, kaddr + offset, cur);
2131 // kunmap_atomic(kaddr, KM_USER0);
2132
2133 dst += cur;
2134 len -= cur;
2135 offset = 0;
2136 i++;
2137 page = eb->pages[i];
2138 }
2139}
2140EXPORT_SYMBOL(read_extent_buffer);
2141
2142int map_extent_buffer(struct extent_buffer *eb, unsigned long start,
2143 char **token, char **map,
2144 unsigned long *map_start,
2145 unsigned long *map_len, int km)
2146{
2147 size_t offset;
2148 char *kaddr;
2149 size_t start_offset = eb->start & ((u64)PAGE_CACHE_SIZE - 1);
2150 unsigned long i = (start_offset + start) >> PAGE_CACHE_SHIFT;
2151
2152 WARN_ON(start > eb->len);
2153
2154 if (i == 0) {
2155 offset = start_offset;
2156 *map_start = 0;
2157 } else {
2158 offset = 0;
2159 *map_start = (i << PAGE_CACHE_SHIFT) - offset;
2160 }
2161
2162 // kaddr = kmap_atomic(eb->pages[i], km);
2163 kaddr = page_address(eb->pages[i]);
2164 *token = kaddr;
2165 *map = kaddr + offset;
2166 *map_len = PAGE_CACHE_SIZE - offset;
2167 return 0;
2168}
2169EXPORT_SYMBOL(map_extent_buffer);
2170
2171void unmap_extent_buffer(struct extent_buffer *eb, char *token, int km)
2172{
2173 // kunmap_atomic(token, km);
2174}
2175EXPORT_SYMBOL(unmap_extent_buffer);
2176
2177int memcmp_extent_buffer(struct extent_buffer *eb, const void *ptrv,
2178 unsigned long start,
2179 unsigned long len)
2180{
2181 size_t cur;
2182 size_t offset;
2183 struct page *page;
2184 char *kaddr;
2185 char *ptr = (char *)ptrv;
2186 size_t start_offset = eb->start & ((u64)PAGE_CACHE_SIZE - 1);
2187 unsigned long i = (start_offset + start) >> PAGE_CACHE_SHIFT;
2188 int ret = 0;
2189
2190 WARN_ON(start > eb->len);
2191 WARN_ON(start + len > eb->start + eb->len);
2192
2193 page = eb->pages[i];
2194 offset = start & ((unsigned long)PAGE_CACHE_SIZE - 1);
2195 if (i == 0)
2196 offset += start_offset;
2197
2198 while(len > 0) {
2199 WARN_ON(!PageUptodate(page));
2200
2201 cur = min(len, (PAGE_CACHE_SIZE - offset));
2202
2203 // kaddr = kmap_atomic(page, KM_USER0);
2204 kaddr = page_address(page);
2205 ret = memcmp(ptr, kaddr + offset, cur);
2206 // kunmap_atomic(kaddr, KM_USER0);
2207 if (ret)
2208 break;
2209
2210 ptr += cur;
2211 len -= cur;
2212 offset = 0;
2213 i++;
2214 page = eb->pages[i];
2215 }
2216 return ret;
2217}
2218EXPORT_SYMBOL(memcmp_extent_buffer);
2219
2220void write_extent_buffer(struct extent_buffer *eb, const void *srcv,
2221 unsigned long start, unsigned long len)
2222{
2223 size_t cur;
2224 size_t offset;
2225 struct page *page;
2226 char *kaddr;
2227 char *src = (char *)srcv;
2228 size_t start_offset = eb->start & ((u64)PAGE_CACHE_SIZE - 1);
2229 unsigned long i = (start_offset + start) >> PAGE_CACHE_SHIFT;
2230
2231 WARN_ON(start > eb->len);
2232 WARN_ON(start + len > eb->start + eb->len);
2233
2234 page = eb->pages[i];
2235 offset = start & ((unsigned long)PAGE_CACHE_SIZE - 1);
2236 if (i == 0)
2237 offset += start_offset;
2238
2239 while(len > 0) {
2240 WARN_ON(!PageUptodate(page));
2241
2242 cur = min(len, PAGE_CACHE_SIZE - offset);
2243 // kaddr = kmap_atomic(page, KM_USER0);
2244 kaddr = page_address(page);
2245 memcpy(kaddr + offset, src, cur);
2246 // kunmap_atomic(kaddr, KM_USER0);
2247
2248 src += cur;
2249 len -= cur;
2250 offset = 0;
2251 i++;
2252 page = eb->pages[i];
2253 }
2254}
2255EXPORT_SYMBOL(write_extent_buffer);
2256
2257void memset_extent_buffer(struct extent_buffer *eb, char c,
2258 unsigned long start, unsigned long len)
2259{
2260 size_t cur;
2261 size_t offset;
2262 struct page *page;
2263 char *kaddr;
2264 size_t start_offset = eb->start & ((u64)PAGE_CACHE_SIZE - 1);
2265 unsigned long i = (start_offset + start) >> PAGE_CACHE_SHIFT;
2266
2267 WARN_ON(start > eb->len);
2268 WARN_ON(start + len > eb->start + eb->len);
2269
2270 page = eb->pages[i];
2271 offset = start & ((unsigned long)PAGE_CACHE_SIZE - 1);
2272 if (i == 0)
2273 offset += start_offset;
2274
2275 while(len > 0) {
2276 WARN_ON(!PageUptodate(page));
2277
2278 cur = min(len, PAGE_CACHE_SIZE - offset);
2279 // kaddr = kmap_atomic(page, KM_USER0);
2280 kaddr = page_address(page);
2281 memset(kaddr + offset, c, cur);
2282 // kunmap_atomic(kaddr, KM_USER0);
2283
2284 len -= cur;
2285 offset = 0;
2286 i++;
2287 page = eb->pages[i];
2288 }
2289}
2290EXPORT_SYMBOL(memset_extent_buffer);
2291
2292void copy_extent_buffer(struct extent_buffer *dst, struct extent_buffer *src,
2293 unsigned long dst_offset, unsigned long src_offset,
2294 unsigned long len)
2295{
2296 u64 dst_len = dst->len;
2297 size_t cur;
2298 size_t offset;
2299 struct page *page;
2300 char *kaddr;
2301 size_t start_offset = dst->start & ((u64)PAGE_CACHE_SIZE - 1);
2302 unsigned long i = (start_offset + dst_offset) >> PAGE_CACHE_SHIFT;
2303
2304 WARN_ON(src->len != dst_len);
2305
2306 offset = dst_offset & ((unsigned long)PAGE_CACHE_SIZE - 1);
2307 if (i == 0)
2308 offset += start_offset;
2309
2310 while(len > 0) {
2311 page = dst->pages[i];
2312 WARN_ON(!PageUptodate(page));
2313
2314 cur = min(len, (unsigned long)(PAGE_CACHE_SIZE - offset));
2315
2316 // kaddr = kmap_atomic(page, KM_USER1);
2317 kaddr = page_address(page);
2318 read_extent_buffer(src, kaddr + offset, src_offset, cur);
2319 // kunmap_atomic(kaddr, KM_USER1);
2320
2321 src_offset += cur;
2322 len -= cur;
2323 offset = 0;
2324 i++;
2325 }
2326}
2327EXPORT_SYMBOL(copy_extent_buffer);
2328
2329static void move_pages(struct page *dst_page, struct page *src_page,
2330 unsigned long dst_off, unsigned long src_off,
2331 unsigned long len)
2332{
2333 // char *dst_kaddr = kmap_atomic(dst_page, KM_USER0);
2334 char *dst_kaddr = page_address(dst_page);
2335 if (dst_page == src_page) {
2336 memmove(dst_kaddr + dst_off, dst_kaddr + src_off, len);
2337 } else {
2338 // char *src_kaddr = kmap_atomic(src_page, KM_USER1);
2339 char *src_kaddr = page_address(src_page);
2340 char *p = dst_kaddr + dst_off + len;
2341 char *s = src_kaddr + src_off + len;
2342
2343 while (len--)
2344 *--p = *--s;
2345
2346 // kunmap_atomic(src_kaddr, KM_USER1);
2347 }
2348 // kunmap_atomic(dst_kaddr, KM_USER0);
2349}
2350
2351static void copy_pages(struct page *dst_page, struct page *src_page,
2352 unsigned long dst_off, unsigned long src_off,
2353 unsigned long len)
2354{
2355 //kmap_atomic(dst_page, KM_USER0);
2356 char *dst_kaddr = page_address(dst_page);
2357 char *src_kaddr;
2358
2359 if (dst_page != src_page)
2360 src_kaddr = page_address(src_page); // kmap_atomic(src_page, KM_USER1);
2361 else
2362 src_kaddr = dst_kaddr;
2363
2364 memcpy(dst_kaddr + dst_off, src_kaddr + src_off, len);
2365 /*
2366 kunmap_atomic(dst_kaddr, KM_USER0);
2367 if (dst_page != src_page)
2368 kunmap_atomic(src_kaddr, KM_USER1);
2369 */
2370}
2371
2372void memcpy_extent_buffer(struct extent_buffer *dst, unsigned long dst_offset,
2373 unsigned long src_offset, unsigned long len)
2374{
2375 size_t cur;
2376 size_t dst_off_in_page;
2377 size_t src_off_in_page;
2378 size_t start_offset = dst->start & ((u64)PAGE_CACHE_SIZE - 1);
2379 unsigned long dst_i;
2380 unsigned long src_i;
2381
2382 if (src_offset + len > dst->len) {
2383 printk("memmove bogus src_offset %lu move len %lu len %lu\n",
2384 src_offset, len, dst->len);
2385 BUG_ON(1);
2386 }
2387 if (dst_offset + len > dst->len) {
2388 printk("memmove bogus dst_offset %lu move len %lu len %lu\n",
2389 dst_offset, len, dst->len);
2390 BUG_ON(1);
2391 }
2392
2393 while(len > 0) {
2394 dst_off_in_page = dst_offset &
2395 ((unsigned long)PAGE_CACHE_SIZE - 1);
2396 src_off_in_page = src_offset &
2397 ((unsigned long)PAGE_CACHE_SIZE - 1);
2398
2399 dst_i = (start_offset + dst_offset) >> PAGE_CACHE_SHIFT;
2400 src_i = (start_offset + src_offset) >> PAGE_CACHE_SHIFT;
2401
2402 if (src_i == 0)
2403 src_off_in_page += start_offset;
2404 if (dst_i == 0)
2405 dst_off_in_page += start_offset;
2406
2407 cur = min(len, (unsigned long)(PAGE_CACHE_SIZE -
2408 src_off_in_page));
2409 cur = min(cur, (unsigned long)(PAGE_CACHE_SIZE -
2410 dst_off_in_page));
2411
2412 copy_pages(dst->pages[dst_i], dst->pages[src_i],
2413 dst_off_in_page, src_off_in_page, cur);
2414
2415 src_offset += cur;
2416 dst_offset += cur;
2417 len -= cur;
2418 }
2419}
2420EXPORT_SYMBOL(memcpy_extent_buffer);
2421
2422void memmove_extent_buffer(struct extent_buffer *dst, unsigned long dst_offset,
2423 unsigned long src_offset, unsigned long len)
2424{
2425 size_t cur;
2426 size_t dst_off_in_page;
2427 size_t src_off_in_page;
2428 unsigned long dst_end = dst_offset + len - 1;
2429 unsigned long src_end = src_offset + len - 1;
2430 size_t start_offset = dst->start & ((u64)PAGE_CACHE_SIZE - 1);
2431 unsigned long dst_i;
2432 unsigned long src_i;
2433
2434 if (src_offset + len > dst->len) {
2435 printk("memmove bogus src_offset %lu move len %lu len %lu\n",
2436 src_offset, len, dst->len);
2437 BUG_ON(1);
2438 }
2439 if (dst_offset + len > dst->len) {
2440 printk("memmove bogus dst_offset %lu move len %lu len %lu\n",
2441 dst_offset, len, dst->len);
2442 BUG_ON(1);
2443 }
2444 if (dst_offset < src_offset) {
2445 memcpy_extent_buffer(dst, dst_offset, src_offset, len);
2446 return;
2447 }
2448 while(len > 0) {
2449 dst_i = (start_offset + dst_end) >> PAGE_CACHE_SHIFT;
2450 src_i = (start_offset + src_end) >> PAGE_CACHE_SHIFT;
2451
2452 dst_off_in_page = dst_end &
2453 ((unsigned long)PAGE_CACHE_SIZE - 1);
2454 src_off_in_page = src_end &
2455 ((unsigned long)PAGE_CACHE_SIZE - 1);
2456
2457 if (src_i == 0)
2458 src_off_in_page += start_offset;
2459 if (dst_i == 0)
2460 dst_off_in_page += start_offset;
2461
2462 cur = min(len, src_off_in_page + 1);
2463 cur = min(cur, dst_off_in_page + 1);
2464// printk("move pages orig dst %lu src %lu len %lu, this %lu %lu %lu\n", dst_offset, src_offset, len, dst_off_in_page - cur + 1, src_off_in_page - cur + 1, cur);
2465 move_pages(dst->pages[dst_i], dst->pages[src_i],
2466 dst_off_in_page - cur + 1,
2467 src_off_in_page - cur + 1, cur);
2468
2469 dst_end -= cur - 1;
2470 src_end -= cur - 1;
2471 len -= cur;
2472 }
2473}
2474EXPORT_SYMBOL(memmove_extent_buffer);