aboutsummaryrefslogtreecommitdiffstats
path: root/fs/ntfs/compress.c
diff options
context:
space:
mode:
authorAnton Altaparmakov <aia21@cantab.net>2004-11-18 08:46:45 -0500
committerAnton Altaparmakov <aia21@cantab.net>2005-05-05 05:30:29 -0400
commit367636772f094fd840d2d79e75257bcfaa28e70f (patch)
treea8d8f3154eea80710a8cad2b7de082046aa012f0 /fs/ntfs/compress.c
parent899101aebb9ab3692aa8efe2805174ee0ee3edb5 (diff)
NTFS: - In fs/ntfs/compress.c, use i_size_read() at the start and then use the
cached value everywhere. Cache the initialized_size in the same way and protect the critical region where the two sizes are read using the new size_lock of the ntfs inode. - Add the new size_lock to the ntfs_inode structure (fs/ntfs/inode.h) and initialize it (fs/ntfs/inode.c). Signed-off-by: Anton Altaparmakov <aia21@cantab.net>
Diffstat (limited to 'fs/ntfs/compress.c')
-rw-r--r--fs/ntfs/compress.c46
1 files changed, 28 insertions, 18 deletions
diff --git a/fs/ntfs/compress.c b/fs/ntfs/compress.c
index ee5ae706f861..6d265cfd49aa 100644
--- a/fs/ntfs/compress.c
+++ b/fs/ntfs/compress.c
@@ -96,13 +96,14 @@ void free_compression_buffers(void)
96/** 96/**
97 * zero_partial_compressed_page - zero out of bounds compressed page region 97 * zero_partial_compressed_page - zero out of bounds compressed page region
98 */ 98 */
99static void zero_partial_compressed_page(ntfs_inode *ni, struct page *page) 99static void zero_partial_compressed_page(struct page *page,
100 const s64 initialized_size)
100{ 101{
101 u8 *kp = page_address(page); 102 u8 *kp = page_address(page);
102 unsigned int kp_ofs; 103 unsigned int kp_ofs;
103 104
104 ntfs_debug("Zeroing page region outside initialized size."); 105 ntfs_debug("Zeroing page region outside initialized size.");
105 if (((s64)page->index << PAGE_CACHE_SHIFT) >= ni->initialized_size) { 106 if (((s64)page->index << PAGE_CACHE_SHIFT) >= initialized_size) {
106 /* 107 /*
107 * FIXME: Using clear_page() will become wrong when we get 108 * FIXME: Using clear_page() will become wrong when we get
108 * PAGE_CACHE_SIZE != PAGE_SIZE but for now there is no problem. 109 * PAGE_CACHE_SIZE != PAGE_SIZE but for now there is no problem.
@@ -110,7 +111,7 @@ static void zero_partial_compressed_page(ntfs_inode *ni, struct page *page)
110 clear_page(kp); 111 clear_page(kp);
111 return; 112 return;
112 } 113 }
113 kp_ofs = ni->initialized_size & ~PAGE_CACHE_MASK; 114 kp_ofs = initialized_size & ~PAGE_CACHE_MASK;
114 memset(kp + kp_ofs, 0, PAGE_CACHE_SIZE - kp_ofs); 115 memset(kp + kp_ofs, 0, PAGE_CACHE_SIZE - kp_ofs);
115 return; 116 return;
116} 117}
@@ -118,12 +119,12 @@ static void zero_partial_compressed_page(ntfs_inode *ni, struct page *page)
118/** 119/**
119 * handle_bounds_compressed_page - test for&handle out of bounds compressed page 120 * handle_bounds_compressed_page - test for&handle out of bounds compressed page
120 */ 121 */
121static inline void handle_bounds_compressed_page(ntfs_inode *ni, 122static inline void handle_bounds_compressed_page(struct page *page,
122 struct page *page) 123 const loff_t i_size, const s64 initialized_size)
123{ 124{
124 if ((page->index >= (ni->initialized_size >> PAGE_CACHE_SHIFT)) && 125 if ((page->index >= (initialized_size >> PAGE_CACHE_SHIFT)) &&
125 (ni->initialized_size < VFS_I(ni)->i_size)) 126 (initialized_size < i_size))
126 zero_partial_compressed_page(ni, page); 127 zero_partial_compressed_page(page, initialized_size);
127 return; 128 return;
128} 129}
129 130
@@ -138,6 +139,8 @@ static inline void handle_bounds_compressed_page(ntfs_inode *ni,
138 * @xpage_done: set to 1 if xpage was completed successfully (IN/OUT) 139 * @xpage_done: set to 1 if xpage was completed successfully (IN/OUT)
139 * @cb_start: compression block to decompress (IN) 140 * @cb_start: compression block to decompress (IN)
140 * @cb_size: size of compression block @cb_start in bytes (IN) 141 * @cb_size: size of compression block @cb_start in bytes (IN)
142 * @i_size: file size when we started the read (IN)
143 * @initialized_size: initialized file size when we started the read (IN)
141 * 144 *
142 * The caller must have disabled preemption. ntfs_decompress() reenables it when 145 * The caller must have disabled preemption. ntfs_decompress() reenables it when
143 * the critical section is finished. 146 * the critical section is finished.
@@ -165,7 +168,8 @@ static inline void handle_bounds_compressed_page(ntfs_inode *ni,
165static int ntfs_decompress(struct page *dest_pages[], int *dest_index, 168static int ntfs_decompress(struct page *dest_pages[], int *dest_index,
166 int *dest_ofs, const int dest_max_index, const int dest_max_ofs, 169 int *dest_ofs, const int dest_max_index, const int dest_max_ofs,
167 const int xpage, char *xpage_done, u8 *const cb_start, 170 const int xpage, char *xpage_done, u8 *const cb_start,
168 const u32 cb_size) 171 const u32 cb_size, const loff_t i_size,
172 const s64 initialized_size)
169{ 173{
170 /* 174 /*
171 * Pointers into the compressed data, i.e. the compression block (cb), 175 * Pointers into the compressed data, i.e. the compression block (cb),
@@ -219,9 +223,6 @@ return_error:
219 spin_unlock(&ntfs_cb_lock); 223 spin_unlock(&ntfs_cb_lock);
220 /* Second stage: finalize completed pages. */ 224 /* Second stage: finalize completed pages. */
221 if (nr_completed_pages > 0) { 225 if (nr_completed_pages > 0) {
222 struct page *page = dest_pages[completed_pages[0]];
223 ntfs_inode *ni = NTFS_I(page->mapping->host);
224
225 for (i = 0; i < nr_completed_pages; i++) { 226 for (i = 0; i < nr_completed_pages; i++) {
226 int di = completed_pages[i]; 227 int di = completed_pages[i];
227 228
@@ -230,7 +231,8 @@ return_error:
230 * If we are outside the initialized size, zero 231 * If we are outside the initialized size, zero
231 * the out of bounds page range. 232 * the out of bounds page range.
232 */ 233 */
233 handle_bounds_compressed_page(ni, dp); 234 handle_bounds_compressed_page(dp, i_size,
235 initialized_size);
234 flush_dcache_page(dp); 236 flush_dcache_page(dp);
235 kunmap(dp); 237 kunmap(dp);
236 SetPageUptodate(dp); 238 SetPageUptodate(dp);
@@ -478,12 +480,14 @@ return_overflow:
478 */ 480 */
479int ntfs_read_compressed_block(struct page *page) 481int ntfs_read_compressed_block(struct page *page)
480{ 482{
483 loff_t i_size;
484 s64 initialized_size;
481 struct address_space *mapping = page->mapping; 485 struct address_space *mapping = page->mapping;
482 ntfs_inode *ni = NTFS_I(mapping->host); 486 ntfs_inode *ni = NTFS_I(mapping->host);
483 ntfs_volume *vol = ni->vol; 487 ntfs_volume *vol = ni->vol;
484 struct super_block *sb = vol->sb; 488 struct super_block *sb = vol->sb;
485 runlist_element *rl; 489 runlist_element *rl;
486 unsigned long block_size = sb->s_blocksize; 490 unsigned long flags, block_size = sb->s_blocksize;
487 unsigned char block_size_bits = sb->s_blocksize_bits; 491 unsigned char block_size_bits = sb->s_blocksize_bits;
488 u8 *cb, *cb_pos, *cb_end; 492 u8 *cb, *cb_pos, *cb_end;
489 struct buffer_head **bhs; 493 struct buffer_head **bhs;
@@ -552,8 +556,12 @@ int ntfs_read_compressed_block(struct page *page)
552 * The remaining pages need to be allocated and inserted into the page 556 * The remaining pages need to be allocated and inserted into the page
553 * cache, alignment guarantees keep all the below much simpler. (-8 557 * cache, alignment guarantees keep all the below much simpler. (-8
554 */ 558 */
555 max_page = ((VFS_I(ni)->i_size + PAGE_CACHE_SIZE - 1) >> 559 read_lock_irqsave(&ni->size_lock, flags);
556 PAGE_CACHE_SHIFT) - offset; 560 i_size = i_size_read(VFS_I(ni));
561 initialized_size = ni->initialized_size;
562 read_unlock_irqrestore(&ni->size_lock, flags);
563 max_page = ((i_size + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT) -
564 offset;
557 if (nr_pages < max_page) 565 if (nr_pages < max_page)
558 max_page = nr_pages; 566 max_page = nr_pages;
559 for (i = 0; i < max_page; i++, offset++) { 567 for (i = 0; i < max_page; i++, offset++) {
@@ -824,7 +832,8 @@ lock_retry_remap:
824 * If we are outside the initialized size, zero 832 * If we are outside the initialized size, zero
825 * the out of bounds page range. 833 * the out of bounds page range.
826 */ 834 */
827 handle_bounds_compressed_page(ni, page); 835 handle_bounds_compressed_page(page, i_size,
836 initialized_size);
828 flush_dcache_page(page); 837 flush_dcache_page(page);
829 kunmap(page); 838 kunmap(page);
830 SetPageUptodate(page); 839 SetPageUptodate(page);
@@ -847,7 +856,8 @@ lock_retry_remap:
847 ntfs_debug("Found compressed compression block."); 856 ntfs_debug("Found compressed compression block.");
848 err = ntfs_decompress(pages, &cur_page, &cur_ofs, 857 err = ntfs_decompress(pages, &cur_page, &cur_ofs,
849 cb_max_page, cb_max_ofs, xpage, &xpage_done, 858 cb_max_page, cb_max_ofs, xpage, &xpage_done,
850 cb_pos, cb_size - (cb_pos - cb)); 859 cb_pos, cb_size - (cb_pos - cb), i_size,
860 initialized_size);
851 /* 861 /*
852 * We can sleep from now on, lock already dropped by 862 * We can sleep from now on, lock already dropped by
853 * ntfs_decompress(). 863 * ntfs_decompress().