summaryrefslogtreecommitdiffstats
path: root/fs/btrfs/compression.c
diff options
context:
space:
mode:
authorKirill A. Shutemov <kirill.shutemov@linux.intel.com>2016-04-01 08:29:47 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2016-04-04 13:41:08 -0400
commit09cbfeaf1a5a67bfb3201e0c83c810cecb2efa5a (patch)
tree6cdf210c9c0f981cd22544feeba701892ec19464 /fs/btrfs/compression.c
parentc05c2ec96bb8b7310da1055c7b9d786a3ec6dc0c (diff)
mm, fs: get rid of PAGE_CACHE_* and page_cache_{get,release} macros
PAGE_CACHE_{SIZE,SHIFT,MASK,ALIGN} macros were introduced *long* time ago with promise that one day it will be possible to implement page cache with bigger chunks than PAGE_SIZE. This promise never materialized. And unlikely will. We have many places where PAGE_CACHE_SIZE assumed to be equal to PAGE_SIZE. And it's constant source of confusion on whether PAGE_CACHE_* or PAGE_* constant should be used in a particular case, especially on the border between fs and mm. Global switching to PAGE_CACHE_SIZE != PAGE_SIZE would cause to much breakage to be doable. Let's stop pretending that pages in page cache are special. They are not. The changes are pretty straight-forward: - <foo> << (PAGE_CACHE_SHIFT - PAGE_SHIFT) -> <foo>; - <foo> >> (PAGE_CACHE_SHIFT - PAGE_SHIFT) -> <foo>; - PAGE_CACHE_{SIZE,SHIFT,MASK,ALIGN} -> PAGE_{SIZE,SHIFT,MASK,ALIGN}; - page_cache_get() -> get_page(); - page_cache_release() -> put_page(); This patch contains automated changes generated with coccinelle using script below. For some reason, coccinelle doesn't patch header files. I've called spatch for them manually. The only adjustment after coccinelle is revert of changes to PAGE_CAHCE_ALIGN definition: we are going to drop it later. There are few places in the code where coccinelle didn't reach. I'll fix them manually in a separate patch. Comments and documentation also will be addressed with the separate patch. virtual patch @@ expression E; @@ - E << (PAGE_CACHE_SHIFT - PAGE_SHIFT) + E @@ expression E; @@ - E >> (PAGE_CACHE_SHIFT - PAGE_SHIFT) + E @@ @@ - PAGE_CACHE_SHIFT + PAGE_SHIFT @@ @@ - PAGE_CACHE_SIZE + PAGE_SIZE @@ @@ - PAGE_CACHE_MASK + PAGE_MASK @@ expression E; @@ - PAGE_CACHE_ALIGN(E) + PAGE_ALIGN(E) @@ expression E; @@ - page_cache_get(E) + get_page(E) @@ expression E; @@ - page_cache_release(E) + put_page(E) Signed-off-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com> Acked-by: Michal Hocko <mhocko@suse.com> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'fs/btrfs/compression.c')
-rw-r--r--fs/btrfs/compression.c84
1 files changed, 42 insertions, 42 deletions
diff --git a/fs/btrfs/compression.c b/fs/btrfs/compression.c
index 3346cd8f9910..ff61a41ac90b 100644
--- a/fs/btrfs/compression.c
+++ b/fs/btrfs/compression.c
@@ -119,7 +119,7 @@ static int check_compressed_csum(struct inode *inode,
119 csum = ~(u32)0; 119 csum = ~(u32)0;
120 120
121 kaddr = kmap_atomic(page); 121 kaddr = kmap_atomic(page);
122 csum = btrfs_csum_data(kaddr, csum, PAGE_CACHE_SIZE); 122 csum = btrfs_csum_data(kaddr, csum, PAGE_SIZE);
123 btrfs_csum_final(csum, (char *)&csum); 123 btrfs_csum_final(csum, (char *)&csum);
124 kunmap_atomic(kaddr); 124 kunmap_atomic(kaddr);
125 125
@@ -190,7 +190,7 @@ csum_failed:
190 for (index = 0; index < cb->nr_pages; index++) { 190 for (index = 0; index < cb->nr_pages; index++) {
191 page = cb->compressed_pages[index]; 191 page = cb->compressed_pages[index];
192 page->mapping = NULL; 192 page->mapping = NULL;
193 page_cache_release(page); 193 put_page(page);
194 } 194 }
195 195
196 /* do io completion on the original bio */ 196 /* do io completion on the original bio */
@@ -224,8 +224,8 @@ out:
224static noinline void end_compressed_writeback(struct inode *inode, 224static noinline void end_compressed_writeback(struct inode *inode,
225 const struct compressed_bio *cb) 225 const struct compressed_bio *cb)
226{ 226{
227 unsigned long index = cb->start >> PAGE_CACHE_SHIFT; 227 unsigned long index = cb->start >> PAGE_SHIFT;
228 unsigned long end_index = (cb->start + cb->len - 1) >> PAGE_CACHE_SHIFT; 228 unsigned long end_index = (cb->start + cb->len - 1) >> PAGE_SHIFT;
229 struct page *pages[16]; 229 struct page *pages[16];
230 unsigned long nr_pages = end_index - index + 1; 230 unsigned long nr_pages = end_index - index + 1;
231 int i; 231 int i;
@@ -247,7 +247,7 @@ static noinline void end_compressed_writeback(struct inode *inode,
247 if (cb->errors) 247 if (cb->errors)
248 SetPageError(pages[i]); 248 SetPageError(pages[i]);
249 end_page_writeback(pages[i]); 249 end_page_writeback(pages[i]);
250 page_cache_release(pages[i]); 250 put_page(pages[i]);
251 } 251 }
252 nr_pages -= ret; 252 nr_pages -= ret;
253 index += ret; 253 index += ret;
@@ -304,7 +304,7 @@ static void end_compressed_bio_write(struct bio *bio)
304 for (index = 0; index < cb->nr_pages; index++) { 304 for (index = 0; index < cb->nr_pages; index++) {
305 page = cb->compressed_pages[index]; 305 page = cb->compressed_pages[index];
306 page->mapping = NULL; 306 page->mapping = NULL;
307 page_cache_release(page); 307 put_page(page);
308 } 308 }
309 309
310 /* finally free the cb struct */ 310 /* finally free the cb struct */
@@ -341,7 +341,7 @@ int btrfs_submit_compressed_write(struct inode *inode, u64 start,
341 int ret; 341 int ret;
342 int skip_sum = BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM; 342 int skip_sum = BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM;
343 343
344 WARN_ON(start & ((u64)PAGE_CACHE_SIZE - 1)); 344 WARN_ON(start & ((u64)PAGE_SIZE - 1));
345 cb = kmalloc(compressed_bio_size(root, compressed_len), GFP_NOFS); 345 cb = kmalloc(compressed_bio_size(root, compressed_len), GFP_NOFS);
346 if (!cb) 346 if (!cb)
347 return -ENOMEM; 347 return -ENOMEM;
@@ -374,14 +374,14 @@ int btrfs_submit_compressed_write(struct inode *inode, u64 start,
374 page->mapping = inode->i_mapping; 374 page->mapping = inode->i_mapping;
375 if (bio->bi_iter.bi_size) 375 if (bio->bi_iter.bi_size)
376 ret = io_tree->ops->merge_bio_hook(WRITE, page, 0, 376 ret = io_tree->ops->merge_bio_hook(WRITE, page, 0,
377 PAGE_CACHE_SIZE, 377 PAGE_SIZE,
378 bio, 0); 378 bio, 0);
379 else 379 else
380 ret = 0; 380 ret = 0;
381 381
382 page->mapping = NULL; 382 page->mapping = NULL;
383 if (ret || bio_add_page(bio, page, PAGE_CACHE_SIZE, 0) < 383 if (ret || bio_add_page(bio, page, PAGE_SIZE, 0) <
384 PAGE_CACHE_SIZE) { 384 PAGE_SIZE) {
385 bio_get(bio); 385 bio_get(bio);
386 386
387 /* 387 /*
@@ -410,15 +410,15 @@ int btrfs_submit_compressed_write(struct inode *inode, u64 start,
410 BUG_ON(!bio); 410 BUG_ON(!bio);
411 bio->bi_private = cb; 411 bio->bi_private = cb;
412 bio->bi_end_io = end_compressed_bio_write; 412 bio->bi_end_io = end_compressed_bio_write;
413 bio_add_page(bio, page, PAGE_CACHE_SIZE, 0); 413 bio_add_page(bio, page, PAGE_SIZE, 0);
414 } 414 }
415 if (bytes_left < PAGE_CACHE_SIZE) { 415 if (bytes_left < PAGE_SIZE) {
416 btrfs_info(BTRFS_I(inode)->root->fs_info, 416 btrfs_info(BTRFS_I(inode)->root->fs_info,
417 "bytes left %lu compress len %lu nr %lu", 417 "bytes left %lu compress len %lu nr %lu",
418 bytes_left, cb->compressed_len, cb->nr_pages); 418 bytes_left, cb->compressed_len, cb->nr_pages);
419 } 419 }
420 bytes_left -= PAGE_CACHE_SIZE; 420 bytes_left -= PAGE_SIZE;
421 first_byte += PAGE_CACHE_SIZE; 421 first_byte += PAGE_SIZE;
422 cond_resched(); 422 cond_resched();
423 } 423 }
424 bio_get(bio); 424 bio_get(bio);
@@ -457,17 +457,17 @@ static noinline int add_ra_bio_pages(struct inode *inode,
457 int misses = 0; 457 int misses = 0;
458 458
459 page = cb->orig_bio->bi_io_vec[cb->orig_bio->bi_vcnt - 1].bv_page; 459 page = cb->orig_bio->bi_io_vec[cb->orig_bio->bi_vcnt - 1].bv_page;
460 last_offset = (page_offset(page) + PAGE_CACHE_SIZE); 460 last_offset = (page_offset(page) + PAGE_SIZE);
461 em_tree = &BTRFS_I(inode)->extent_tree; 461 em_tree = &BTRFS_I(inode)->extent_tree;
462 tree = &BTRFS_I(inode)->io_tree; 462 tree = &BTRFS_I(inode)->io_tree;
463 463
464 if (isize == 0) 464 if (isize == 0)
465 return 0; 465 return 0;
466 466
467 end_index = (i_size_read(inode) - 1) >> PAGE_CACHE_SHIFT; 467 end_index = (i_size_read(inode) - 1) >> PAGE_SHIFT;
468 468
469 while (last_offset < compressed_end) { 469 while (last_offset < compressed_end) {
470 pg_index = last_offset >> PAGE_CACHE_SHIFT; 470 pg_index = last_offset >> PAGE_SHIFT;
471 471
472 if (pg_index > end_index) 472 if (pg_index > end_index)
473 break; 473 break;
@@ -488,11 +488,11 @@ static noinline int add_ra_bio_pages(struct inode *inode,
488 break; 488 break;
489 489
490 if (add_to_page_cache_lru(page, mapping, pg_index, GFP_NOFS)) { 490 if (add_to_page_cache_lru(page, mapping, pg_index, GFP_NOFS)) {
491 page_cache_release(page); 491 put_page(page);
492 goto next; 492 goto next;
493 } 493 }
494 494
495 end = last_offset + PAGE_CACHE_SIZE - 1; 495 end = last_offset + PAGE_SIZE - 1;
496 /* 496 /*
497 * at this point, we have a locked page in the page cache 497 * at this point, we have a locked page in the page cache
498 * for these bytes in the file. But, we have to make 498 * for these bytes in the file. But, we have to make
@@ -502,27 +502,27 @@ static noinline int add_ra_bio_pages(struct inode *inode,
502 lock_extent(tree, last_offset, end); 502 lock_extent(tree, last_offset, end);
503 read_lock(&em_tree->lock); 503 read_lock(&em_tree->lock);
504 em = lookup_extent_mapping(em_tree, last_offset, 504 em = lookup_extent_mapping(em_tree, last_offset,
505 PAGE_CACHE_SIZE); 505 PAGE_SIZE);
506 read_unlock(&em_tree->lock); 506 read_unlock(&em_tree->lock);
507 507
508 if (!em || last_offset < em->start || 508 if (!em || last_offset < em->start ||
509 (last_offset + PAGE_CACHE_SIZE > extent_map_end(em)) || 509 (last_offset + PAGE_SIZE > extent_map_end(em)) ||
510 (em->block_start >> 9) != cb->orig_bio->bi_iter.bi_sector) { 510 (em->block_start >> 9) != cb->orig_bio->bi_iter.bi_sector) {
511 free_extent_map(em); 511 free_extent_map(em);
512 unlock_extent(tree, last_offset, end); 512 unlock_extent(tree, last_offset, end);
513 unlock_page(page); 513 unlock_page(page);
514 page_cache_release(page); 514 put_page(page);
515 break; 515 break;
516 } 516 }
517 free_extent_map(em); 517 free_extent_map(em);
518 518
519 if (page->index == end_index) { 519 if (page->index == end_index) {
520 char *userpage; 520 char *userpage;
521 size_t zero_offset = isize & (PAGE_CACHE_SIZE - 1); 521 size_t zero_offset = isize & (PAGE_SIZE - 1);
522 522
523 if (zero_offset) { 523 if (zero_offset) {
524 int zeros; 524 int zeros;
525 zeros = PAGE_CACHE_SIZE - zero_offset; 525 zeros = PAGE_SIZE - zero_offset;
526 userpage = kmap_atomic(page); 526 userpage = kmap_atomic(page);
527 memset(userpage + zero_offset, 0, zeros); 527 memset(userpage + zero_offset, 0, zeros);
528 flush_dcache_page(page); 528 flush_dcache_page(page);
@@ -531,19 +531,19 @@ static noinline int add_ra_bio_pages(struct inode *inode,
531 } 531 }
532 532
533 ret = bio_add_page(cb->orig_bio, page, 533 ret = bio_add_page(cb->orig_bio, page,
534 PAGE_CACHE_SIZE, 0); 534 PAGE_SIZE, 0);
535 535
536 if (ret == PAGE_CACHE_SIZE) { 536 if (ret == PAGE_SIZE) {
537 nr_pages++; 537 nr_pages++;
538 page_cache_release(page); 538 put_page(page);
539 } else { 539 } else {
540 unlock_extent(tree, last_offset, end); 540 unlock_extent(tree, last_offset, end);
541 unlock_page(page); 541 unlock_page(page);
542 page_cache_release(page); 542 put_page(page);
543 break; 543 break;
544 } 544 }
545next: 545next:
546 last_offset += PAGE_CACHE_SIZE; 546 last_offset += PAGE_SIZE;
547 } 547 }
548 return 0; 548 return 0;
549} 549}
@@ -567,7 +567,7 @@ int btrfs_submit_compressed_read(struct inode *inode, struct bio *bio,
567 struct extent_map_tree *em_tree; 567 struct extent_map_tree *em_tree;
568 struct compressed_bio *cb; 568 struct compressed_bio *cb;
569 struct btrfs_root *root = BTRFS_I(inode)->root; 569 struct btrfs_root *root = BTRFS_I(inode)->root;
570 unsigned long uncompressed_len = bio->bi_vcnt * PAGE_CACHE_SIZE; 570 unsigned long uncompressed_len = bio->bi_vcnt * PAGE_SIZE;
571 unsigned long compressed_len; 571 unsigned long compressed_len;
572 unsigned long nr_pages; 572 unsigned long nr_pages;
573 unsigned long pg_index; 573 unsigned long pg_index;
@@ -589,7 +589,7 @@ int btrfs_submit_compressed_read(struct inode *inode, struct bio *bio,
589 read_lock(&em_tree->lock); 589 read_lock(&em_tree->lock);
590 em = lookup_extent_mapping(em_tree, 590 em = lookup_extent_mapping(em_tree,
591 page_offset(bio->bi_io_vec->bv_page), 591 page_offset(bio->bi_io_vec->bv_page),
592 PAGE_CACHE_SIZE); 592 PAGE_SIZE);
593 read_unlock(&em_tree->lock); 593 read_unlock(&em_tree->lock);
594 if (!em) 594 if (!em)
595 return -EIO; 595 return -EIO;
@@ -617,7 +617,7 @@ int btrfs_submit_compressed_read(struct inode *inode, struct bio *bio,
617 cb->compress_type = extent_compress_type(bio_flags); 617 cb->compress_type = extent_compress_type(bio_flags);
618 cb->orig_bio = bio; 618 cb->orig_bio = bio;
619 619
620 nr_pages = DIV_ROUND_UP(compressed_len, PAGE_CACHE_SIZE); 620 nr_pages = DIV_ROUND_UP(compressed_len, PAGE_SIZE);
621 cb->compressed_pages = kcalloc(nr_pages, sizeof(struct page *), 621 cb->compressed_pages = kcalloc(nr_pages, sizeof(struct page *),
622 GFP_NOFS); 622 GFP_NOFS);
623 if (!cb->compressed_pages) 623 if (!cb->compressed_pages)
@@ -640,7 +640,7 @@ int btrfs_submit_compressed_read(struct inode *inode, struct bio *bio,
640 add_ra_bio_pages(inode, em_start + em_len, cb); 640 add_ra_bio_pages(inode, em_start + em_len, cb);
641 641
642 /* include any pages we added in add_ra-bio_pages */ 642 /* include any pages we added in add_ra-bio_pages */
643 uncompressed_len = bio->bi_vcnt * PAGE_CACHE_SIZE; 643 uncompressed_len = bio->bi_vcnt * PAGE_SIZE;
644 cb->len = uncompressed_len; 644 cb->len = uncompressed_len;
645 645
646 comp_bio = compressed_bio_alloc(bdev, cur_disk_byte, GFP_NOFS); 646 comp_bio = compressed_bio_alloc(bdev, cur_disk_byte, GFP_NOFS);
@@ -653,18 +653,18 @@ int btrfs_submit_compressed_read(struct inode *inode, struct bio *bio,
653 for (pg_index = 0; pg_index < nr_pages; pg_index++) { 653 for (pg_index = 0; pg_index < nr_pages; pg_index++) {
654 page = cb->compressed_pages[pg_index]; 654 page = cb->compressed_pages[pg_index];
655 page->mapping = inode->i_mapping; 655 page->mapping = inode->i_mapping;
656 page->index = em_start >> PAGE_CACHE_SHIFT; 656 page->index = em_start >> PAGE_SHIFT;
657 657
658 if (comp_bio->bi_iter.bi_size) 658 if (comp_bio->bi_iter.bi_size)
659 ret = tree->ops->merge_bio_hook(READ, page, 0, 659 ret = tree->ops->merge_bio_hook(READ, page, 0,
660 PAGE_CACHE_SIZE, 660 PAGE_SIZE,
661 comp_bio, 0); 661 comp_bio, 0);
662 else 662 else
663 ret = 0; 663 ret = 0;
664 664
665 page->mapping = NULL; 665 page->mapping = NULL;
666 if (ret || bio_add_page(comp_bio, page, PAGE_CACHE_SIZE, 0) < 666 if (ret || bio_add_page(comp_bio, page, PAGE_SIZE, 0) <
667 PAGE_CACHE_SIZE) { 667 PAGE_SIZE) {
668 bio_get(comp_bio); 668 bio_get(comp_bio);
669 669
670 ret = btrfs_bio_wq_end_io(root->fs_info, comp_bio, 670 ret = btrfs_bio_wq_end_io(root->fs_info, comp_bio,
@@ -702,9 +702,9 @@ int btrfs_submit_compressed_read(struct inode *inode, struct bio *bio,
702 comp_bio->bi_private = cb; 702 comp_bio->bi_private = cb;
703 comp_bio->bi_end_io = end_compressed_bio_read; 703 comp_bio->bi_end_io = end_compressed_bio_read;
704 704
705 bio_add_page(comp_bio, page, PAGE_CACHE_SIZE, 0); 705 bio_add_page(comp_bio, page, PAGE_SIZE, 0);
706 } 706 }
707 cur_disk_byte += PAGE_CACHE_SIZE; 707 cur_disk_byte += PAGE_SIZE;
708 } 708 }
709 bio_get(comp_bio); 709 bio_get(comp_bio);
710 710
@@ -1013,8 +1013,8 @@ int btrfs_decompress_buf2page(char *buf, unsigned long buf_start,
1013 1013
1014 /* copy bytes from the working buffer into the pages */ 1014 /* copy bytes from the working buffer into the pages */
1015 while (working_bytes > 0) { 1015 while (working_bytes > 0) {
1016 bytes = min(PAGE_CACHE_SIZE - *pg_offset, 1016 bytes = min(PAGE_SIZE - *pg_offset,
1017 PAGE_CACHE_SIZE - buf_offset); 1017 PAGE_SIZE - buf_offset);
1018 bytes = min(bytes, working_bytes); 1018 bytes = min(bytes, working_bytes);
1019 kaddr = kmap_atomic(page_out); 1019 kaddr = kmap_atomic(page_out);
1020 memcpy(kaddr + *pg_offset, buf + buf_offset, bytes); 1020 memcpy(kaddr + *pg_offset, buf + buf_offset, bytes);
@@ -1027,7 +1027,7 @@ int btrfs_decompress_buf2page(char *buf, unsigned long buf_start,
1027 current_buf_start += bytes; 1027 current_buf_start += bytes;
1028 1028
1029 /* check if we need to pick another page */ 1029 /* check if we need to pick another page */
1030 if (*pg_offset == PAGE_CACHE_SIZE) { 1030 if (*pg_offset == PAGE_SIZE) {
1031 (*pg_index)++; 1031 (*pg_index)++;
1032 if (*pg_index >= vcnt) 1032 if (*pg_index >= vcnt)
1033 return 0; 1033 return 0;