diff options
author | Jan Kara <jack@suse.cz> | 2009-09-23 08:44:56 -0400 |
---|---|---|
committer | Jan Kara <jack@suse.cz> | 2009-12-10 09:02:49 -0500 |
commit | 59bc055211b8d266ab6089158058bf8268e02006 (patch) | |
tree | 3c041c412fd798d321cae2550da2bce7c363eabf /fs/isofs/compress.c | |
parent | 3067e02f8f3ae2f3f02ba76400d03b8bcb4942b0 (diff) |
zisofs: Implement reading of compressed files when PAGE_CACHE_SIZE > compress block size
Also split and cleanup zisofs_readpage() when we are changing it anyway.
Signed-off-by: Jan Kara <jack@suse.cz>
Diffstat (limited to 'fs/isofs/compress.c')
-rw-r--r-- | fs/isofs/compress.c | 533 |
1 files changed, 285 insertions, 248 deletions
diff --git a/fs/isofs/compress.c b/fs/isofs/compress.c index defb932eee9a..0b3fa7974fa8 100644 --- a/fs/isofs/compress.c +++ b/fs/isofs/compress.c | |||
@@ -36,286 +36,323 @@ static void *zisofs_zlib_workspace; | |||
36 | static DEFINE_MUTEX(zisofs_zlib_lock); | 36 | static DEFINE_MUTEX(zisofs_zlib_lock); |
37 | 37 | ||
38 | /* | 38 | /* |
39 | * When decompressing, we typically obtain more than one page | 39 | * Read data of @inode from @block_start to @block_end and uncompress |
40 | * per reference. We inject the additional pages into the page | 40 | * to one zisofs block. Store the data in the @pages array with @pcount |
41 | * cache as a form of readahead. | 41 | * entries. Start storing at offset @poffset of the first page. |
42 | */ | 42 | */ |
43 | static int zisofs_readpage(struct file *file, struct page *page) | 43 | static loff_t zisofs_uncompress_block(struct inode *inode, loff_t block_start, |
44 | loff_t block_end, int pcount, | ||
45 | struct page **pages, unsigned poffset, | ||
46 | int *errp) | ||
44 | { | 47 | { |
45 | struct inode *inode = file->f_path.dentry->d_inode; | ||
46 | struct address_space *mapping = inode->i_mapping; | ||
47 | unsigned int maxpage, xpage, fpage, blockindex; | ||
48 | unsigned long offset; | ||
49 | unsigned long blockptr, blockendptr, cstart, cend, csize; | ||
50 | struct buffer_head *bh, *ptrbh[2]; | ||
51 | unsigned long bufsize = ISOFS_BUFFER_SIZE(inode); | ||
52 | unsigned int bufshift = ISOFS_BUFFER_BITS(inode); | ||
53 | unsigned long bufmask = bufsize - 1; | ||
54 | int err = -EIO; | ||
55 | int i; | ||
56 | unsigned int header_size = ISOFS_I(inode)->i_format_parm[0]; | ||
57 | unsigned int zisofs_block_shift = ISOFS_I(inode)->i_format_parm[1]; | 48 | unsigned int zisofs_block_shift = ISOFS_I(inode)->i_format_parm[1]; |
58 | /* unsigned long zisofs_block_size = 1UL << zisofs_block_shift; */ | 49 | unsigned int bufsize = ISOFS_BUFFER_SIZE(inode); |
59 | unsigned int zisofs_block_page_shift = zisofs_block_shift-PAGE_CACHE_SHIFT; | 50 | unsigned int bufshift = ISOFS_BUFFER_BITS(inode); |
60 | unsigned long zisofs_block_pages = 1UL << zisofs_block_page_shift; | 51 | unsigned int bufmask = bufsize - 1; |
61 | unsigned long zisofs_block_page_mask = zisofs_block_pages-1; | 52 | int i, block_size = block_end - block_start; |
62 | struct page *pages[zisofs_block_pages]; | 53 | z_stream stream = { .total_out = 0, |
63 | unsigned long index = page->index; | 54 | .avail_in = 0, |
64 | int indexblocks; | 55 | .avail_out = 0, }; |
65 | 56 | int zerr; | |
66 | /* We have already been given one page, this is the one | 57 | int needblocks = (block_size + (block_start & bufmask) + bufmask) |
67 | we must do. */ | 58 | >> bufshift; |
68 | xpage = index & zisofs_block_page_mask; | 59 | int haveblocks; |
69 | pages[xpage] = page; | 60 | blkcnt_t blocknum; |
70 | 61 | struct buffer_head *bhs[needblocks + 1]; | |
71 | /* The remaining pages need to be allocated and inserted */ | 62 | int curbh, curpage; |
72 | offset = index & ~zisofs_block_page_mask; | 63 | |
73 | blockindex = offset >> zisofs_block_page_shift; | 64 | if (block_size > deflateBound(1UL << zisofs_block_shift)) { |
74 | maxpage = (inode->i_size + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT; | 65 | *errp = -EIO; |
75 | |||
76 | /* | ||
77 | * If this page is wholly outside i_size we just return zero; | ||
78 | * do_generic_file_read() will handle this for us | ||
79 | */ | ||
80 | if (page->index >= maxpage) { | ||
81 | SetPageUptodate(page); | ||
82 | unlock_page(page); | ||
83 | return 0; | 66 | return 0; |
84 | } | 67 | } |
85 | 68 | /* Empty block? */ | |
86 | maxpage = min(zisofs_block_pages, maxpage-offset); | 69 | if (block_size == 0) { |
87 | 70 | for ( i = 0 ; i < pcount ; i++ ) { | |
88 | for ( i = 0 ; i < maxpage ; i++, offset++ ) { | 71 | if (!pages[i]) |
89 | if ( i != xpage ) { | 72 | continue; |
90 | pages[i] = grab_cache_page_nowait(mapping, offset); | 73 | memset(page_address(pages[i]), 0, PAGE_CACHE_SIZE); |
91 | } | 74 | flush_dcache_page(pages[i]); |
92 | page = pages[i]; | 75 | SetPageUptodate(pages[i]); |
93 | if ( page ) { | ||
94 | ClearPageError(page); | ||
95 | kmap(page); | ||
96 | } | 76 | } |
77 | return ((loff_t)pcount) << PAGE_CACHE_SHIFT; | ||
97 | } | 78 | } |
98 | 79 | ||
99 | /* This is the last page filled, plus one; used in case of abort. */ | 80 | /* Because zlib is not thread-safe, do all the I/O at the top. */ |
100 | fpage = 0; | 81 | blocknum = block_start >> bufshift; |
82 | memset(bhs, 0, (needblocks + 1) * sizeof(struct buffer_head *)); | ||
83 | haveblocks = isofs_get_blocks(inode, blocknum, bhs, needblocks); | ||
84 | ll_rw_block(READ, haveblocks, bhs); | ||
101 | 85 | ||
102 | /* Find the pointer to this specific chunk */ | 86 | curbh = 0; |
103 | /* Note: we're not using isonum_731() here because the data is known aligned */ | 87 | curpage = 0; |
104 | /* Note: header_size is in 32-bit words (4 bytes) */ | 88 | /* |
105 | blockptr = (header_size + blockindex) << 2; | 89 | * First block is special since it may be fractional. We also wait for |
106 | blockendptr = blockptr + 4; | 90 | * it before grabbing the zlib mutex; odds are that the subsequent |
91 | * blocks are going to come in in short order so we don't hold the zlib | ||
92 | * mutex longer than necessary. | ||
93 | */ | ||
107 | 94 | ||
108 | indexblocks = ((blockptr^blockendptr) >> bufshift) ? 2 : 1; | 95 | if (!bhs[0]) |
109 | ptrbh[0] = ptrbh[1] = NULL; | 96 | goto b_eio; |
110 | 97 | ||
111 | if ( isofs_get_blocks(inode, blockptr >> bufshift, ptrbh, indexblocks) != indexblocks ) { | 98 | wait_on_buffer(bhs[0]); |
112 | if ( ptrbh[0] ) brelse(ptrbh[0]); | 99 | if (!buffer_uptodate(bhs[0])) { |
113 | printk(KERN_DEBUG "zisofs: Null buffer on reading block table, inode = %lu, block = %lu\n", | 100 | *errp = -EIO; |
114 | inode->i_ino, blockptr >> bufshift); | 101 | goto b_eio; |
115 | goto eio; | ||
116 | } | ||
117 | ll_rw_block(READ, indexblocks, ptrbh); | ||
118 | |||
119 | bh = ptrbh[0]; | ||
120 | if ( !bh || (wait_on_buffer(bh), !buffer_uptodate(bh)) ) { | ||
121 | printk(KERN_DEBUG "zisofs: Failed to read block table, inode = %lu, block = %lu\n", | ||
122 | inode->i_ino, blockptr >> bufshift); | ||
123 | if ( ptrbh[1] ) | ||
124 | brelse(ptrbh[1]); | ||
125 | goto eio; | ||
126 | } | ||
127 | cstart = le32_to_cpu(*(__le32 *)(bh->b_data + (blockptr & bufmask))); | ||
128 | |||
129 | if ( indexblocks == 2 ) { | ||
130 | /* We just crossed a block boundary. Switch to the next block */ | ||
131 | brelse(bh); | ||
132 | bh = ptrbh[1]; | ||
133 | if ( !bh || (wait_on_buffer(bh), !buffer_uptodate(bh)) ) { | ||
134 | printk(KERN_DEBUG "zisofs: Failed to read block table, inode = %lu, block = %lu\n", | ||
135 | inode->i_ino, blockendptr >> bufshift); | ||
136 | goto eio; | ||
137 | } | ||
138 | } | 102 | } |
139 | cend = le32_to_cpu(*(__le32 *)(bh->b_data + (blockendptr & bufmask))); | ||
140 | brelse(bh); | ||
141 | 103 | ||
142 | if (cstart > cend) | 104 | stream.workspace = zisofs_zlib_workspace; |
143 | goto eio; | 105 | mutex_lock(&zisofs_zlib_lock); |
144 | 106 | ||
145 | csize = cend-cstart; | 107 | zerr = zlib_inflateInit(&stream); |
146 | 108 | if (zerr != Z_OK) { | |
147 | if (csize > deflateBound(1UL << zisofs_block_shift)) | 109 | if (zerr == Z_MEM_ERROR) |
148 | goto eio; | 110 | *errp = -ENOMEM; |
149 | 111 | else | |
150 | /* Now page[] contains an array of pages, any of which can be NULL, | 112 | *errp = -EIO; |
151 | and the locks on which we hold. We should now read the data and | 113 | printk(KERN_DEBUG "zisofs: zisofs_inflateInit returned %d\n", |
152 | release the pages. If the pages are NULL the decompressed data | 114 | zerr); |
153 | for that particular page should be discarded. */ | 115 | goto z_eio; |
154 | 116 | } | |
155 | if ( csize == 0 ) { | 117 | |
156 | /* This data block is empty. */ | 118 | while (curpage < pcount && curbh < haveblocks && |
157 | 119 | zerr != Z_STREAM_END) { | |
158 | for ( fpage = 0 ; fpage < maxpage ; fpage++ ) { | 120 | if (!stream.avail_out) { |
159 | if ( (page = pages[fpage]) != NULL ) { | 121 | if (pages[curpage]) { |
160 | memset(page_address(page), 0, PAGE_CACHE_SIZE); | 122 | stream.next_out = page_address(pages[curpage]) |
161 | 123 | + poffset; | |
162 | flush_dcache_page(page); | 124 | stream.avail_out = PAGE_CACHE_SIZE - poffset; |
163 | SetPageUptodate(page); | 125 | poffset = 0; |
164 | kunmap(page); | 126 | } else { |
165 | unlock_page(page); | 127 | stream.next_out = (void *)&zisofs_sink_page; |
166 | if ( fpage == xpage ) | 128 | stream.avail_out = PAGE_CACHE_SIZE; |
167 | err = 0; /* The critical page */ | ||
168 | else | ||
169 | page_cache_release(page); | ||
170 | } | 129 | } |
171 | } | 130 | } |
172 | } else { | 131 | if (!stream.avail_in) { |
173 | /* This data block is compressed. */ | 132 | wait_on_buffer(bhs[curbh]); |
174 | z_stream stream; | 133 | if (!buffer_uptodate(bhs[curbh])) { |
175 | int bail = 0, left_out = -1; | 134 | *errp = -EIO; |
176 | int zerr; | 135 | break; |
177 | int needblocks = (csize + (cstart & bufmask) + bufmask) >> bufshift; | 136 | } |
178 | int haveblocks; | 137 | stream.next_in = bhs[curbh]->b_data + |
179 | struct buffer_head *bhs[needblocks+1]; | 138 | (block_start & bufmask); |
180 | struct buffer_head **bhptr; | 139 | stream.avail_in = min_t(unsigned, bufsize - |
181 | 140 | (block_start & bufmask), | |
182 | /* Because zlib is not thread-safe, do all the I/O at the top. */ | 141 | block_size); |
183 | 142 | block_size -= stream.avail_in; | |
184 | blockptr = cstart >> bufshift; | 143 | block_start = 0; |
185 | memset(bhs, 0, (needblocks+1)*sizeof(struct buffer_head *)); | ||
186 | haveblocks = isofs_get_blocks(inode, blockptr, bhs, needblocks); | ||
187 | ll_rw_block(READ, haveblocks, bhs); | ||
188 | |||
189 | bhptr = &bhs[0]; | ||
190 | bh = *bhptr++; | ||
191 | |||
192 | /* First block is special since it may be fractional. | ||
193 | We also wait for it before grabbing the zlib | ||
194 | mutex; odds are that the subsequent blocks are | ||
195 | going to come in in short order so we don't hold | ||
196 | the zlib mutex longer than necessary. */ | ||
197 | |||
198 | if ( !bh || (wait_on_buffer(bh), !buffer_uptodate(bh)) ) { | ||
199 | printk(KERN_DEBUG "zisofs: Hit null buffer, fpage = %d, xpage = %d, csize = %ld\n", | ||
200 | fpage, xpage, csize); | ||
201 | goto b_eio; | ||
202 | } | ||
203 | stream.next_in = bh->b_data + (cstart & bufmask); | ||
204 | stream.avail_in = min(bufsize-(cstart & bufmask), csize); | ||
205 | csize -= stream.avail_in; | ||
206 | |||
207 | stream.workspace = zisofs_zlib_workspace; | ||
208 | mutex_lock(&zisofs_zlib_lock); | ||
209 | |||
210 | zerr = zlib_inflateInit(&stream); | ||
211 | if ( zerr != Z_OK ) { | ||
212 | if ( err && zerr == Z_MEM_ERROR ) | ||
213 | err = -ENOMEM; | ||
214 | printk(KERN_DEBUG "zisofs: zisofs_inflateInit returned %d\n", | ||
215 | zerr); | ||
216 | goto z_eio; | ||
217 | } | 144 | } |
218 | 145 | ||
219 | while ( !bail && fpage < maxpage ) { | 146 | while (stream.avail_out && stream.avail_in) { |
220 | page = pages[fpage]; | 147 | zerr = zlib_inflate(&stream, Z_SYNC_FLUSH); |
221 | if ( page ) | 148 | if (zerr == Z_BUF_ERROR && stream.avail_in == 0) |
222 | stream.next_out = page_address(page); | 149 | break; |
223 | else | 150 | if (zerr == Z_STREAM_END) |
224 | stream.next_out = (void *)&zisofs_sink_page; | 151 | break; |
225 | stream.avail_out = PAGE_CACHE_SIZE; | 152 | if (zerr != Z_OK) { |
226 | 153 | /* EOF, error, or trying to read beyond end of input */ | |
227 | while ( stream.avail_out ) { | 154 | if (zerr == Z_MEM_ERROR) |
228 | int ao, ai; | 155 | *errp = -ENOMEM; |
229 | if ( stream.avail_in == 0 && left_out ) { | 156 | else { |
230 | if ( !csize ) { | 157 | printk(KERN_DEBUG |
231 | printk(KERN_WARNING "zisofs: ZF read beyond end of input\n"); | 158 | "zisofs: zisofs_inflate returned" |
232 | bail = 1; | 159 | " %d, inode = %lu," |
233 | break; | 160 | " page idx = %d, bh idx = %d," |
234 | } else { | 161 | " avail_in = %d," |
235 | bh = *bhptr++; | 162 | " avail_out = %d\n", |
236 | if ( !bh || | 163 | zerr, inode->i_ino, curpage, |
237 | (wait_on_buffer(bh), !buffer_uptodate(bh)) ) { | 164 | curbh, stream.avail_in, |
238 | /* Reached an EIO */ | 165 | stream.avail_out); |
239 | printk(KERN_DEBUG "zisofs: Hit null buffer, fpage = %d, xpage = %d, csize = %ld\n", | 166 | *errp = -EIO; |
240 | fpage, xpage, csize); | ||
241 | |||
242 | bail = 1; | ||
243 | break; | ||
244 | } | ||
245 | stream.next_in = bh->b_data; | ||
246 | stream.avail_in = min(csize,bufsize); | ||
247 | csize -= stream.avail_in; | ||
248 | } | ||
249 | } | ||
250 | ao = stream.avail_out; ai = stream.avail_in; | ||
251 | zerr = zlib_inflate(&stream, Z_SYNC_FLUSH); | ||
252 | left_out = stream.avail_out; | ||
253 | if ( zerr == Z_BUF_ERROR && stream.avail_in == 0 ) | ||
254 | continue; | ||
255 | if ( zerr != Z_OK ) { | ||
256 | /* EOF, error, or trying to read beyond end of input */ | ||
257 | if ( err && zerr == Z_MEM_ERROR ) | ||
258 | err = -ENOMEM; | ||
259 | if ( zerr != Z_STREAM_END ) | ||
260 | printk(KERN_DEBUG "zisofs: zisofs_inflate returned %d, inode = %lu, index = %lu, fpage = %d, xpage = %d, avail_in = %d, avail_out = %d, ai = %d, ao = %d\n", | ||
261 | zerr, inode->i_ino, index, | ||
262 | fpage, xpage, | ||
263 | stream.avail_in, stream.avail_out, | ||
264 | ai, ao); | ||
265 | bail = 1; | ||
266 | break; | ||
267 | } | 167 | } |
168 | goto inflate_out; | ||
268 | } | 169 | } |
170 | } | ||
269 | 171 | ||
270 | if ( stream.avail_out && zerr == Z_STREAM_END ) { | 172 | if (!stream.avail_out) { |
271 | /* Fractional page written before EOF. This may | 173 | /* This page completed */ |
272 | be the last page in the file. */ | 174 | if (pages[curpage]) { |
273 | memset(stream.next_out, 0, stream.avail_out); | 175 | flush_dcache_page(pages[curpage]); |
274 | stream.avail_out = 0; | 176 | SetPageUptodate(pages[curpage]); |
275 | } | 177 | } |
178 | curpage++; | ||
179 | } | ||
180 | if (!stream.avail_in) | ||
181 | curbh++; | ||
182 | } | ||
183 | inflate_out: | ||
184 | zlib_inflateEnd(&stream); | ||
276 | 185 | ||
277 | if ( !stream.avail_out ) { | 186 | z_eio: |
278 | /* This page completed */ | 187 | mutex_unlock(&zisofs_zlib_lock); |
279 | if ( page ) { | 188 | |
280 | flush_dcache_page(page); | 189 | b_eio: |
281 | SetPageUptodate(page); | 190 | for (i = 0; i < haveblocks; i++) |
282 | kunmap(page); | 191 | brelse(bhs[i]); |
283 | unlock_page(page); | 192 | return stream.total_out; |
284 | if ( fpage == xpage ) | 193 | } |
285 | err = 0; /* The critical page */ | 194 | |
286 | else | 195 | /* |
287 | page_cache_release(page); | 196 | * Uncompress data so that pages[full_page] is fully uptodate and possibly |
288 | } | 197 | * fills in other pages if we have data for them. |
289 | fpage++; | 198 | */ |
290 | } | 199 | static int zisofs_fill_pages(struct inode *inode, int full_page, int pcount, |
200 | struct page **pages) | ||
201 | { | ||
202 | loff_t start_off, end_off; | ||
203 | loff_t block_start, block_end; | ||
204 | unsigned int header_size = ISOFS_I(inode)->i_format_parm[0]; | ||
205 | unsigned int zisofs_block_shift = ISOFS_I(inode)->i_format_parm[1]; | ||
206 | unsigned int blockptr; | ||
207 | loff_t poffset = 0; | ||
208 | blkcnt_t cstart_block, cend_block; | ||
209 | struct buffer_head *bh; | ||
210 | unsigned int blkbits = ISOFS_BUFFER_BITS(inode); | ||
211 | unsigned int blksize = 1 << blkbits; | ||
212 | int err; | ||
213 | loff_t ret; | ||
214 | |||
215 | BUG_ON(!pages[full_page]); | ||
216 | |||
217 | /* | ||
218 | * We want to read at least 'full_page' page. Because we have to | ||
219 | * uncompress the whole compression block anyway, fill the surrounding | ||
220 | * pages with the data we have anyway... | ||
221 | */ | ||
222 | start_off = page_offset(pages[full_page]); | ||
223 | end_off = min_t(loff_t, start_off + PAGE_CACHE_SIZE, inode->i_size); | ||
224 | |||
225 | cstart_block = start_off >> zisofs_block_shift; | ||
226 | cend_block = (end_off + (1 << zisofs_block_shift) - 1) | ||
227 | >> zisofs_block_shift; | ||
228 | |||
229 | WARN_ON(start_off - (full_page << PAGE_CACHE_SHIFT) != | ||
230 | ((cstart_block << zisofs_block_shift) & PAGE_CACHE_MASK)); | ||
231 | |||
232 | /* Find the pointer to this specific chunk */ | ||
233 | /* Note: we're not using isonum_731() here because the data is known aligned */ | ||
234 | /* Note: header_size is in 32-bit words (4 bytes) */ | ||
235 | blockptr = (header_size + cstart_block) << 2; | ||
236 | bh = isofs_bread(inode, blockptr >> blkbits); | ||
237 | if (!bh) | ||
238 | return -EIO; | ||
239 | block_start = le32_to_cpu(*(__le32 *) | ||
240 | (bh->b_data + (blockptr & (blksize - 1)))); | ||
241 | |||
242 | while (cstart_block < cend_block && pcount > 0) { | ||
243 | /* Load end of the compressed block in the file */ | ||
244 | blockptr += 4; | ||
245 | /* Traversed to next block? */ | ||
246 | if (!(blockptr & (blksize - 1))) { | ||
247 | brelse(bh); | ||
248 | |||
249 | bh = isofs_bread(inode, blockptr >> blkbits); | ||
250 | if (!bh) | ||
251 | return -EIO; | ||
252 | } | ||
253 | block_end = le32_to_cpu(*(__le32 *) | ||
254 | (bh->b_data + (blockptr & (blksize - 1)))); | ||
255 | if (block_start > block_end) { | ||
256 | brelse(bh); | ||
257 | return -EIO; | ||
258 | } | ||
259 | err = 0; | ||
260 | ret = zisofs_uncompress_block(inode, block_start, block_end, | ||
261 | pcount, pages, poffset, &err); | ||
262 | poffset += ret; | ||
263 | pages += poffset >> PAGE_CACHE_SHIFT; | ||
264 | pcount -= poffset >> PAGE_CACHE_SHIFT; | ||
265 | full_page -= poffset >> PAGE_CACHE_SHIFT; | ||
266 | poffset &= ~PAGE_CACHE_MASK; | ||
267 | |||
268 | if (err) { | ||
269 | brelse(bh); | ||
270 | /* | ||
271 | * Did we finish reading the page we really wanted | ||
272 | * to read? | ||
273 | */ | ||
274 | if (full_page < 0) | ||
275 | return 0; | ||
276 | return err; | ||
291 | } | 277 | } |
292 | zlib_inflateEnd(&stream); | ||
293 | 278 | ||
294 | z_eio: | 279 | block_start = block_end; |
295 | mutex_unlock(&zisofs_zlib_lock); | 280 | cstart_block++; |
281 | } | ||
282 | |||
283 | if (poffset && *pages) { | ||
284 | memset(page_address(*pages) + poffset, 0, | ||
285 | PAGE_CACHE_SIZE - poffset); | ||
286 | flush_dcache_page(*pages); | ||
287 | SetPageUptodate(*pages); | ||
288 | } | ||
289 | return 0; | ||
290 | } | ||
296 | 291 | ||
297 | b_eio: | 292 | /* |
298 | for ( i = 0 ; i < haveblocks ; i++ ) { | 293 | * When decompressing, we typically obtain more than one page |
299 | if ( bhs[i] ) | 294 | * per reference. We inject the additional pages into the page |
300 | brelse(bhs[i]); | 295 | * cache as a form of readahead. |
296 | */ | ||
297 | static int zisofs_readpage(struct file *file, struct page *page) | ||
298 | { | ||
299 | struct inode *inode = file->f_path.dentry->d_inode; | ||
300 | struct address_space *mapping = inode->i_mapping; | ||
301 | int err; | ||
302 | int i, pcount, full_page; | ||
303 | unsigned int zisofs_block_shift = ISOFS_I(inode)->i_format_parm[1]; | ||
304 | unsigned int zisofs_pages_per_cblock = | ||
305 | PAGE_CACHE_SHIFT <= zisofs_block_shift ? | ||
306 | (1 << (zisofs_block_shift - PAGE_CACHE_SHIFT)) : 0; | ||
307 | struct page *pages[max_t(unsigned, zisofs_pages_per_cblock, 1)]; | ||
308 | pgoff_t index = page->index, end_index; | ||
309 | |||
310 | end_index = (inode->i_size + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT; | ||
311 | /* | ||
312 | * If this page is wholly outside i_size we just return zero; | ||
313 | * do_generic_file_read() will handle this for us | ||
314 | */ | ||
315 | if (index >= end_index) { | ||
316 | SetPageUptodate(page); | ||
317 | unlock_page(page); | ||
318 | return 0; | ||
319 | } | ||
320 | |||
321 | if (PAGE_CACHE_SHIFT <= zisofs_block_shift) { | ||
322 | /* We have already been given one page, this is the one | ||
323 | we must do. */ | ||
324 | full_page = index & (zisofs_pages_per_cblock - 1); | ||
325 | pcount = min_t(int, zisofs_pages_per_cblock, | ||
326 | end_index - (index & ~(zisofs_pages_per_cblock - 1))); | ||
327 | index -= full_page; | ||
328 | } else { | ||
329 | full_page = 0; | ||
330 | pcount = 1; | ||
331 | } | ||
332 | pages[full_page] = page; | ||
333 | |||
334 | for (i = 0; i < pcount; i++, index++) { | ||
335 | if (i != full_page) | ||
336 | pages[i] = grab_cache_page_nowait(mapping, index); | ||
337 | if (pages[i]) { | ||
338 | ClearPageError(pages[i]); | ||
339 | kmap(pages[i]); | ||
301 | } | 340 | } |
302 | } | 341 | } |
303 | 342 | ||
304 | eio: | 343 | err = zisofs_fill_pages(inode, full_page, pcount, pages); |
305 | 344 | ||
306 | /* Release any residual pages, do not SetPageUptodate */ | 345 | /* Release any residual pages, do not SetPageUptodate */ |
307 | while ( fpage < maxpage ) { | 346 | for (i = 0; i < pcount; i++) { |
308 | page = pages[fpage]; | 347 | if (pages[i]) { |
309 | if ( page ) { | 348 | flush_dcache_page(pages[i]); |
310 | flush_dcache_page(page); | 349 | if (i == full_page && err) |
311 | if ( fpage == xpage ) | 350 | SetPageError(pages[i]); |
312 | SetPageError(page); | 351 | kunmap(pages[i]); |
313 | kunmap(page); | 352 | unlock_page(pages[i]); |
314 | unlock_page(page); | 353 | if (i != full_page) |
315 | if ( fpage != xpage ) | 354 | page_cache_release(pages[i]); |
316 | page_cache_release(page); | ||
317 | } | 355 | } |
318 | fpage++; | ||
319 | } | 356 | } |
320 | 357 | ||
321 | /* At this point, err contains 0 or -EIO depending on the "critical" page */ | 358 | /* At this point, err contains 0 or -EIO depending on the "critical" page */ |