aboutsummaryrefslogtreecommitdiffstats
path: root/fs/ntfs/compress.c
diff options
context:
space:
mode:
Diffstat (limited to 'fs/ntfs/compress.c')
-rw-r--r--fs/ntfs/compress.c77
1 files changed, 34 insertions, 43 deletions
diff --git a/fs/ntfs/compress.c b/fs/ntfs/compress.c
index f82498c35e78..f2b5e746f49b 100644
--- a/fs/ntfs/compress.c
+++ b/fs/ntfs/compress.c
@@ -104,16 +104,12 @@ static void zero_partial_compressed_page(struct page *page,
104 unsigned int kp_ofs; 104 unsigned int kp_ofs;
105 105
106 ntfs_debug("Zeroing page region outside initialized size."); 106 ntfs_debug("Zeroing page region outside initialized size.");
107 if (((s64)page->index << PAGE_CACHE_SHIFT) >= initialized_size) { 107 if (((s64)page->index << PAGE_SHIFT) >= initialized_size) {
108 /*
109 * FIXME: Using clear_page() will become wrong when we get
110 * PAGE_CACHE_SIZE != PAGE_SIZE but for now there is no problem.
111 */
112 clear_page(kp); 108 clear_page(kp);
113 return; 109 return;
114 } 110 }
115 kp_ofs = initialized_size & ~PAGE_CACHE_MASK; 111 kp_ofs = initialized_size & ~PAGE_MASK;
116 memset(kp + kp_ofs, 0, PAGE_CACHE_SIZE - kp_ofs); 112 memset(kp + kp_ofs, 0, PAGE_SIZE - kp_ofs);
117 return; 113 return;
118} 114}
119 115
@@ -123,7 +119,7 @@ static void zero_partial_compressed_page(struct page *page,
123static inline void handle_bounds_compressed_page(struct page *page, 119static inline void handle_bounds_compressed_page(struct page *page,
124 const loff_t i_size, const s64 initialized_size) 120 const loff_t i_size, const s64 initialized_size)
125{ 121{
126 if ((page->index >= (initialized_size >> PAGE_CACHE_SHIFT)) && 122 if ((page->index >= (initialized_size >> PAGE_SHIFT)) &&
127 (initialized_size < i_size)) 123 (initialized_size < i_size))
128 zero_partial_compressed_page(page, initialized_size); 124 zero_partial_compressed_page(page, initialized_size);
129 return; 125 return;
@@ -160,7 +156,7 @@ static inline void handle_bounds_compressed_page(struct page *page,
160 * @xpage_done indicates whether the target page (@dest_pages[@xpage]) was 156 * @xpage_done indicates whether the target page (@dest_pages[@xpage]) was
161 * completed during the decompression of the compression block (@cb_start). 157 * completed during the decompression of the compression block (@cb_start).
162 * 158 *
163 * Warning: This function *REQUIRES* PAGE_CACHE_SIZE >= 4096 or it will blow up 159 * Warning: This function *REQUIRES* PAGE_SIZE >= 4096 or it will blow up
164 * unpredicatbly! You have been warned! 160 * unpredicatbly! You have been warned!
165 * 161 *
166 * Note to hackers: This function may not sleep until it has finished accessing 162 * Note to hackers: This function may not sleep until it has finished accessing
@@ -241,7 +237,7 @@ return_error:
241 if (di == xpage) 237 if (di == xpage)
242 *xpage_done = 1; 238 *xpage_done = 1;
243 else 239 else
244 page_cache_release(dp); 240 put_page(dp);
245 dest_pages[di] = NULL; 241 dest_pages[di] = NULL;
246 } 242 }
247 } 243 }
@@ -274,7 +270,7 @@ return_error:
274 cb = cb_sb_end; 270 cb = cb_sb_end;
275 271
276 /* Advance destination position to next sub-block. */ 272 /* Advance destination position to next sub-block. */
277 *dest_ofs = (*dest_ofs + NTFS_SB_SIZE) & ~PAGE_CACHE_MASK; 273 *dest_ofs = (*dest_ofs + NTFS_SB_SIZE) & ~PAGE_MASK;
278 if (!*dest_ofs && (++*dest_index > dest_max_index)) 274 if (!*dest_ofs && (++*dest_index > dest_max_index))
279 goto return_overflow; 275 goto return_overflow;
280 goto do_next_sb; 276 goto do_next_sb;
@@ -301,7 +297,7 @@ return_error:
301 297
302 /* Advance destination position to next sub-block. */ 298 /* Advance destination position to next sub-block. */
303 *dest_ofs += NTFS_SB_SIZE; 299 *dest_ofs += NTFS_SB_SIZE;
304 if (!(*dest_ofs &= ~PAGE_CACHE_MASK)) { 300 if (!(*dest_ofs &= ~PAGE_MASK)) {
305finalize_page: 301finalize_page:
306 /* 302 /*
307 * First stage: add current page index to array of 303 * First stage: add current page index to array of
@@ -335,7 +331,7 @@ do_next_tag:
335 *dest_ofs += nr_bytes; 331 *dest_ofs += nr_bytes;
336 } 332 }
337 /* We have finished the current sub-block. */ 333 /* We have finished the current sub-block. */
338 if (!(*dest_ofs &= ~PAGE_CACHE_MASK)) 334 if (!(*dest_ofs &= ~PAGE_MASK))
339 goto finalize_page; 335 goto finalize_page;
340 goto do_next_sb; 336 goto do_next_sb;
341 } 337 }
@@ -462,7 +458,7 @@ return_overflow:
462 * have been written to so that we would lose data if we were to just overwrite 458 * have been written to so that we would lose data if we were to just overwrite
463 * them with the out-of-date uncompressed data. 459 * them with the out-of-date uncompressed data.
464 * 460 *
465 * FIXME: For PAGE_CACHE_SIZE > cb_size we are not doing the Right Thing(TM) at 461 * FIXME: For PAGE_SIZE > cb_size we are not doing the Right Thing(TM) at
466 * the end of the file I think. We need to detect this case and zero the out 462 * the end of the file I think. We need to detect this case and zero the out
467 * of bounds remainder of the page in question and mark it as handled. At the 463 * of bounds remainder of the page in question and mark it as handled. At the
468 * moment we would just return -EIO on such a page. This bug will only become 464 * moment we would just return -EIO on such a page. This bug will only become
@@ -470,7 +466,7 @@ return_overflow:
470 * clusters so is probably not going to be seen by anyone. Still this should 466 * clusters so is probably not going to be seen by anyone. Still this should
471 * be fixed. (AIA) 467 * be fixed. (AIA)
472 * 468 *
473 * FIXME: Again for PAGE_CACHE_SIZE > cb_size we are screwing up both in 469 * FIXME: Again for PAGE_SIZE > cb_size we are screwing up both in
474 * handling sparse and compressed cbs. (AIA) 470 * handling sparse and compressed cbs. (AIA)
475 * 471 *
476 * FIXME: At the moment we don't do any zeroing out in the case that 472 * FIXME: At the moment we don't do any zeroing out in the case that
@@ -497,14 +493,14 @@ int ntfs_read_compressed_block(struct page *page)
497 u64 cb_size_mask = cb_size - 1UL; 493 u64 cb_size_mask = cb_size - 1UL;
498 VCN vcn; 494 VCN vcn;
499 LCN lcn; 495 LCN lcn;
500 /* The first wanted vcn (minimum alignment is PAGE_CACHE_SIZE). */ 496 /* The first wanted vcn (minimum alignment is PAGE_SIZE). */
501 VCN start_vcn = (((s64)index << PAGE_CACHE_SHIFT) & ~cb_size_mask) >> 497 VCN start_vcn = (((s64)index << PAGE_SHIFT) & ~cb_size_mask) >>
502 vol->cluster_size_bits; 498 vol->cluster_size_bits;
503 /* 499 /*
504 * The first vcn after the last wanted vcn (minimum alignment is again 500 * The first vcn after the last wanted vcn (minimum alignment is again
505 * PAGE_CACHE_SIZE. 501 * PAGE_SIZE.
506 */ 502 */
507 VCN end_vcn = ((((s64)(index + 1UL) << PAGE_CACHE_SHIFT) + cb_size - 1) 503 VCN end_vcn = ((((s64)(index + 1UL) << PAGE_SHIFT) + cb_size - 1)
508 & ~cb_size_mask) >> vol->cluster_size_bits; 504 & ~cb_size_mask) >> vol->cluster_size_bits;
509 /* Number of compression blocks (cbs) in the wanted vcn range. */ 505 /* Number of compression blocks (cbs) in the wanted vcn range. */
510 unsigned int nr_cbs = (end_vcn - start_vcn) << vol->cluster_size_bits 506 unsigned int nr_cbs = (end_vcn - start_vcn) << vol->cluster_size_bits
@@ -515,7 +511,7 @@ int ntfs_read_compressed_block(struct page *page)
515 * guarantees of start_vcn and end_vcn, no need to round up here. 511 * guarantees of start_vcn and end_vcn, no need to round up here.
516 */ 512 */
517 unsigned int nr_pages = (end_vcn - start_vcn) << 513 unsigned int nr_pages = (end_vcn - start_vcn) <<
518 vol->cluster_size_bits >> PAGE_CACHE_SHIFT; 514 vol->cluster_size_bits >> PAGE_SHIFT;
519 unsigned int xpage, max_page, cur_page, cur_ofs, i; 515 unsigned int xpage, max_page, cur_page, cur_ofs, i;
520 unsigned int cb_clusters, cb_max_ofs; 516 unsigned int cb_clusters, cb_max_ofs;
521 int block, max_block, cb_max_page, bhs_size, nr_bhs, err = 0; 517 int block, max_block, cb_max_page, bhs_size, nr_bhs, err = 0;
@@ -549,7 +545,7 @@ int ntfs_read_compressed_block(struct page *page)
549 * We have already been given one page, this is the one we must do. 545 * We have already been given one page, this is the one we must do.
550 * Once again, the alignment guarantees keep it simple. 546 * Once again, the alignment guarantees keep it simple.
551 */ 547 */
552 offset = start_vcn << vol->cluster_size_bits >> PAGE_CACHE_SHIFT; 548 offset = start_vcn << vol->cluster_size_bits >> PAGE_SHIFT;
553 xpage = index - offset; 549 xpage = index - offset;
554 pages[xpage] = page; 550 pages[xpage] = page;
555 /* 551 /*
@@ -560,13 +556,13 @@ int ntfs_read_compressed_block(struct page *page)
560 i_size = i_size_read(VFS_I(ni)); 556 i_size = i_size_read(VFS_I(ni));
561 initialized_size = ni->initialized_size; 557 initialized_size = ni->initialized_size;
562 read_unlock_irqrestore(&ni->size_lock, flags); 558 read_unlock_irqrestore(&ni->size_lock, flags);
563 max_page = ((i_size + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT) - 559 max_page = ((i_size + PAGE_SIZE - 1) >> PAGE_SHIFT) -
564 offset; 560 offset;
565 /* Is the page fully outside i_size? (truncate in progress) */ 561 /* Is the page fully outside i_size? (truncate in progress) */
566 if (xpage >= max_page) { 562 if (xpage >= max_page) {
567 kfree(bhs); 563 kfree(bhs);
568 kfree(pages); 564 kfree(pages);
569 zero_user(page, 0, PAGE_CACHE_SIZE); 565 zero_user(page, 0, PAGE_SIZE);
570 ntfs_debug("Compressed read outside i_size - truncated?"); 566 ntfs_debug("Compressed read outside i_size - truncated?");
571 SetPageUptodate(page); 567 SetPageUptodate(page);
572 unlock_page(page); 568 unlock_page(page);
@@ -591,7 +587,7 @@ int ntfs_read_compressed_block(struct page *page)
591 continue; 587 continue;
592 } 588 }
593 unlock_page(page); 589 unlock_page(page);
594 page_cache_release(page); 590 put_page(page);
595 pages[i] = NULL; 591 pages[i] = NULL;
596 } 592 }
597 } 593 }
@@ -735,9 +731,9 @@ lock_retry_remap:
735 ntfs_debug("Successfully read the compression block."); 731 ntfs_debug("Successfully read the compression block.");
736 732
737 /* The last page and maximum offset within it for the current cb. */ 733 /* The last page and maximum offset within it for the current cb. */
738 cb_max_page = (cur_page << PAGE_CACHE_SHIFT) + cur_ofs + cb_size; 734 cb_max_page = (cur_page << PAGE_SHIFT) + cur_ofs + cb_size;
739 cb_max_ofs = cb_max_page & ~PAGE_CACHE_MASK; 735 cb_max_ofs = cb_max_page & ~PAGE_MASK;
740 cb_max_page >>= PAGE_CACHE_SHIFT; 736 cb_max_page >>= PAGE_SHIFT;
741 737
742 /* Catch end of file inside a compression block. */ 738 /* Catch end of file inside a compression block. */
743 if (cb_max_page > max_page) 739 if (cb_max_page > max_page)
@@ -753,16 +749,11 @@ lock_retry_remap:
753 for (; cur_page < cb_max_page; cur_page++) { 749 for (; cur_page < cb_max_page; cur_page++) {
754 page = pages[cur_page]; 750 page = pages[cur_page];
755 if (page) { 751 if (page) {
756 /*
757 * FIXME: Using clear_page() will become wrong
758 * when we get PAGE_CACHE_SIZE != PAGE_SIZE but
759 * for now there is no problem.
760 */
761 if (likely(!cur_ofs)) 752 if (likely(!cur_ofs))
762 clear_page(page_address(page)); 753 clear_page(page_address(page));
763 else 754 else
764 memset(page_address(page) + cur_ofs, 0, 755 memset(page_address(page) + cur_ofs, 0,
765 PAGE_CACHE_SIZE - 756 PAGE_SIZE -
766 cur_ofs); 757 cur_ofs);
767 flush_dcache_page(page); 758 flush_dcache_page(page);
768 kunmap(page); 759 kunmap(page);
@@ -771,10 +762,10 @@ lock_retry_remap:
771 if (cur_page == xpage) 762 if (cur_page == xpage)
772 xpage_done = 1; 763 xpage_done = 1;
773 else 764 else
774 page_cache_release(page); 765 put_page(page);
775 pages[cur_page] = NULL; 766 pages[cur_page] = NULL;
776 } 767 }
777 cb_pos += PAGE_CACHE_SIZE - cur_ofs; 768 cb_pos += PAGE_SIZE - cur_ofs;
778 cur_ofs = 0; 769 cur_ofs = 0;
779 if (cb_pos >= cb_end) 770 if (cb_pos >= cb_end)
780 break; 771 break;
@@ -807,7 +798,7 @@ lock_retry_remap:
807 * synchronous io for the majority of pages. 798 * synchronous io for the majority of pages.
808 * Or if we choose not to do the read-ahead/-behind stuff, we 799 * Or if we choose not to do the read-ahead/-behind stuff, we
809 * could just return block_read_full_page(pages[xpage]) as long 800 * could just return block_read_full_page(pages[xpage]) as long
810 * as PAGE_CACHE_SIZE <= cb_size. 801 * as PAGE_SIZE <= cb_size.
811 */ 802 */
812 if (cb_max_ofs) 803 if (cb_max_ofs)
813 cb_max_page--; 804 cb_max_page--;
@@ -816,8 +807,8 @@ lock_retry_remap:
816 page = pages[cur_page]; 807 page = pages[cur_page];
817 if (page) 808 if (page)
818 memcpy(page_address(page) + cur_ofs, cb_pos, 809 memcpy(page_address(page) + cur_ofs, cb_pos,
819 PAGE_CACHE_SIZE - cur_ofs); 810 PAGE_SIZE - cur_ofs);
820 cb_pos += PAGE_CACHE_SIZE - cur_ofs; 811 cb_pos += PAGE_SIZE - cur_ofs;
821 cur_ofs = 0; 812 cur_ofs = 0;
822 if (cb_pos >= cb_end) 813 if (cb_pos >= cb_end)
823 break; 814 break;
@@ -850,10 +841,10 @@ lock_retry_remap:
850 if (cur2_page == xpage) 841 if (cur2_page == xpage)
851 xpage_done = 1; 842 xpage_done = 1;
852 else 843 else
853 page_cache_release(page); 844 put_page(page);
854 pages[cur2_page] = NULL; 845 pages[cur2_page] = NULL;
855 } 846 }
856 cb_pos2 += PAGE_CACHE_SIZE - cur_ofs2; 847 cb_pos2 += PAGE_SIZE - cur_ofs2;
857 cur_ofs2 = 0; 848 cur_ofs2 = 0;
858 if (cb_pos2 >= cb_end) 849 if (cb_pos2 >= cb_end)
859 break; 850 break;
@@ -884,7 +875,7 @@ lock_retry_remap:
884 kunmap(page); 875 kunmap(page);
885 unlock_page(page); 876 unlock_page(page);
886 if (prev_cur_page != xpage) 877 if (prev_cur_page != xpage)
887 page_cache_release(page); 878 put_page(page);
888 pages[prev_cur_page] = NULL; 879 pages[prev_cur_page] = NULL;
889 } 880 }
890 } 881 }
@@ -914,7 +905,7 @@ lock_retry_remap:
914 kunmap(page); 905 kunmap(page);
915 unlock_page(page); 906 unlock_page(page);
916 if (cur_page != xpage) 907 if (cur_page != xpage)
917 page_cache_release(page); 908 put_page(page);
918 pages[cur_page] = NULL; 909 pages[cur_page] = NULL;
919 } 910 }
920 } 911 }
@@ -961,7 +952,7 @@ err_out:
961 kunmap(page); 952 kunmap(page);
962 unlock_page(page); 953 unlock_page(page);
963 if (i != xpage) 954 if (i != xpage)
964 page_cache_release(page); 955 put_page(page);
965 } 956 }
966 } 957 }
967 kfree(pages); 958 kfree(pages);