aboutsummaryrefslogtreecommitdiffstats
path: root/fs/isofs/compress.c
diff options
context:
space:
mode:
Diffstat (limited to 'fs/isofs/compress.c')
-rw-r--r--fs/isofs/compress.c359
1 files changed, 359 insertions, 0 deletions
diff --git a/fs/isofs/compress.c b/fs/isofs/compress.c
new file mode 100644
index 000000000000..fb42c3f3bf0d
--- /dev/null
+++ b/fs/isofs/compress.c
@@ -0,0 +1,359 @@
1/* -*- linux-c -*- ------------------------------------------------------- *
2 *
3 * Copyright 2001 H. Peter Anvin - All Rights Reserved
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation, Inc., 675 Mass Ave, Cambridge MA 02139,
8 * USA; either version 2 of the License, or (at your option) any later
9 * version; incorporated herein by reference.
10 *
11 * ----------------------------------------------------------------------- */
12
13/*
14 * linux/fs/isofs/compress.c
15 *
16 * Transparent decompression of files on an iso9660 filesystem
17 */
18
19#include <linux/config.h>
20#include <linux/module.h>
21
22#include <linux/stat.h>
23#include <linux/time.h>
24#include <linux/iso_fs.h>
25#include <linux/kernel.h>
26#include <linux/major.h>
27#include <linux/mm.h>
28#include <linux/string.h>
29#include <linux/slab.h>
30#include <linux/errno.h>
31#include <linux/init.h>
32#include <linux/nls.h>
33#include <linux/ctype.h>
34#include <linux/smp_lock.h>
35#include <linux/blkdev.h>
36#include <linux/vmalloc.h>
37#include <linux/zlib.h>
38#include <linux/buffer_head.h>
39
40#include <asm/system.h>
41#include <asm/uaccess.h>
42#include <asm/semaphore.h>
43
44#include "zisofs.h"
45
46/* This should probably be global. */
47static char zisofs_sink_page[PAGE_CACHE_SIZE];
48
49/*
50 * This contains the zlib memory allocation and the mutex for the
51 * allocation; this avoids failures at block-decompression time.
52 */
53static void *zisofs_zlib_workspace;
54static struct semaphore zisofs_zlib_semaphore;
55
56/*
57 * When decompressing, we typically obtain more than one page
58 * per reference. We inject the additional pages into the page
59 * cache as a form of readahead.
60 */
61static int zisofs_readpage(struct file *file, struct page *page)
62{
63 struct inode *inode = file->f_dentry->d_inode;
64 struct address_space *mapping = inode->i_mapping;
65 unsigned int maxpage, xpage, fpage, blockindex;
66 unsigned long offset;
67 unsigned long blockptr, blockendptr, cstart, cend, csize;
68 struct buffer_head *bh, *ptrbh[2];
69 unsigned long bufsize = ISOFS_BUFFER_SIZE(inode);
70 unsigned int bufshift = ISOFS_BUFFER_BITS(inode);
71 unsigned long bufmask = bufsize - 1;
72 int err = -EIO;
73 int i;
74 unsigned int header_size = ISOFS_I(inode)->i_format_parm[0];
75 unsigned int zisofs_block_shift = ISOFS_I(inode)->i_format_parm[1];
76 /* unsigned long zisofs_block_size = 1UL << zisofs_block_shift; */
77 unsigned int zisofs_block_page_shift = zisofs_block_shift-PAGE_CACHE_SHIFT;
78 unsigned long zisofs_block_pages = 1UL << zisofs_block_page_shift;
79 unsigned long zisofs_block_page_mask = zisofs_block_pages-1;
80 struct page *pages[zisofs_block_pages];
81 unsigned long index = page->index;
82 int indexblocks;
83
84 /* We have already been given one page, this is the one
85 we must do. */
86 xpage = index & zisofs_block_page_mask;
87 pages[xpage] = page;
88
89 /* The remaining pages need to be allocated and inserted */
90 offset = index & ~zisofs_block_page_mask;
91 blockindex = offset >> zisofs_block_page_shift;
92 maxpage = (inode->i_size + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
93 maxpage = min(zisofs_block_pages, maxpage-offset);
94
95 for ( i = 0 ; i < maxpage ; i++, offset++ ) {
96 if ( i != xpage ) {
97 pages[i] = grab_cache_page_nowait(mapping, offset);
98 }
99 page = pages[i];
100 if ( page ) {
101 ClearPageError(page);
102 kmap(page);
103 }
104 }
105
106 /* This is the last page filled, plus one; used in case of abort. */
107 fpage = 0;
108
109 /* Find the pointer to this specific chunk */
110 /* Note: we're not using isonum_731() here because the data is known aligned */
111 /* Note: header_size is in 32-bit words (4 bytes) */
112 blockptr = (header_size + blockindex) << 2;
113 blockendptr = blockptr + 4;
114
115 indexblocks = ((blockptr^blockendptr) >> bufshift) ? 2 : 1;
116 ptrbh[0] = ptrbh[1] = NULL;
117
118 if ( isofs_get_blocks(inode, blockptr >> bufshift, ptrbh, indexblocks) != indexblocks ) {
119 if ( ptrbh[0] ) brelse(ptrbh[0]);
120 printk(KERN_DEBUG "zisofs: Null buffer on reading block table, inode = %lu, block = %lu\n",
121 inode->i_ino, blockptr >> bufshift);
122 goto eio;
123 }
124 ll_rw_block(READ, indexblocks, ptrbh);
125
126 bh = ptrbh[0];
127 if ( !bh || (wait_on_buffer(bh), !buffer_uptodate(bh)) ) {
128 printk(KERN_DEBUG "zisofs: Failed to read block table, inode = %lu, block = %lu\n",
129 inode->i_ino, blockptr >> bufshift);
130 if ( ptrbh[1] )
131 brelse(ptrbh[1]);
132 goto eio;
133 }
134 cstart = le32_to_cpu(*(__le32 *)(bh->b_data + (blockptr & bufmask)));
135
136 if ( indexblocks == 2 ) {
137 /* We just crossed a block boundary. Switch to the next block */
138 brelse(bh);
139 bh = ptrbh[1];
140 if ( !bh || (wait_on_buffer(bh), !buffer_uptodate(bh)) ) {
141 printk(KERN_DEBUG "zisofs: Failed to read block table, inode = %lu, block = %lu\n",
142 inode->i_ino, blockendptr >> bufshift);
143 goto eio;
144 }
145 }
146 cend = le32_to_cpu(*(__le32 *)(bh->b_data + (blockendptr & bufmask)));
147 brelse(bh);
148
149 csize = cend-cstart;
150
151 /* Now page[] contains an array of pages, any of which can be NULL,
152 and the locks on which we hold. We should now read the data and
153 release the pages. If the pages are NULL the decompressed data
154 for that particular page should be discarded. */
155
156 if ( csize == 0 ) {
157 /* This data block is empty. */
158
159 for ( fpage = 0 ; fpage < maxpage ; fpage++ ) {
160 if ( (page = pages[fpage]) != NULL ) {
161 memset(page_address(page), 0, PAGE_CACHE_SIZE);
162
163 flush_dcache_page(page);
164 SetPageUptodate(page);
165 kunmap(page);
166 unlock_page(page);
167 if ( fpage == xpage )
168 err = 0; /* The critical page */
169 else
170 page_cache_release(page);
171 }
172 }
173 } else {
174 /* This data block is compressed. */
175 z_stream stream;
176 int bail = 0, left_out = -1;
177 int zerr;
178 int needblocks = (csize + (cstart & bufmask) + bufmask) >> bufshift;
179 int haveblocks;
180 struct buffer_head *bhs[needblocks+1];
181 struct buffer_head **bhptr;
182
183 /* Because zlib is not thread-safe, do all the I/O at the top. */
184
185 blockptr = cstart >> bufshift;
186 memset(bhs, 0, (needblocks+1)*sizeof(struct buffer_head *));
187 haveblocks = isofs_get_blocks(inode, blockptr, bhs, needblocks);
188 ll_rw_block(READ, haveblocks, bhs);
189
190 bhptr = &bhs[0];
191 bh = *bhptr++;
192
193 /* First block is special since it may be fractional.
194 We also wait for it before grabbing the zlib
195 semaphore; odds are that the subsequent blocks are
196 going to come in in short order so we don't hold
197 the zlib semaphore longer than necessary. */
198
199 if ( !bh || (wait_on_buffer(bh), !buffer_uptodate(bh)) ) {
200 printk(KERN_DEBUG "zisofs: Hit null buffer, fpage = %d, xpage = %d, csize = %ld\n",
201 fpage, xpage, csize);
202 goto b_eio;
203 }
204 stream.next_in = bh->b_data + (cstart & bufmask);
205 stream.avail_in = min(bufsize-(cstart & bufmask), csize);
206 csize -= stream.avail_in;
207
208 stream.workspace = zisofs_zlib_workspace;
209 down(&zisofs_zlib_semaphore);
210
211 zerr = zlib_inflateInit(&stream);
212 if ( zerr != Z_OK ) {
213 if ( err && zerr == Z_MEM_ERROR )
214 err = -ENOMEM;
215 printk(KERN_DEBUG "zisofs: zisofs_inflateInit returned %d\n",
216 zerr);
217 goto z_eio;
218 }
219
220 while ( !bail && fpage < maxpage ) {
221 page = pages[fpage];
222 if ( page )
223 stream.next_out = page_address(page);
224 else
225 stream.next_out = (void *)&zisofs_sink_page;
226 stream.avail_out = PAGE_CACHE_SIZE;
227
228 while ( stream.avail_out ) {
229 int ao, ai;
230 if ( stream.avail_in == 0 && left_out ) {
231 if ( !csize ) {
232 printk(KERN_WARNING "zisofs: ZF read beyond end of input\n");
233 bail = 1;
234 break;
235 } else {
236 bh = *bhptr++;
237 if ( !bh ||
238 (wait_on_buffer(bh), !buffer_uptodate(bh)) ) {
239 /* Reached an EIO */
240 printk(KERN_DEBUG "zisofs: Hit null buffer, fpage = %d, xpage = %d, csize = %ld\n",
241 fpage, xpage, csize);
242
243 bail = 1;
244 break;
245 }
246 stream.next_in = bh->b_data;
247 stream.avail_in = min(csize,bufsize);
248 csize -= stream.avail_in;
249 }
250 }
251 ao = stream.avail_out; ai = stream.avail_in;
252 zerr = zlib_inflate(&stream, Z_SYNC_FLUSH);
253 left_out = stream.avail_out;
254 if ( zerr == Z_BUF_ERROR && stream.avail_in == 0 )
255 continue;
256 if ( zerr != Z_OK ) {
257 /* EOF, error, or trying to read beyond end of input */
258 if ( err && zerr == Z_MEM_ERROR )
259 err = -ENOMEM;
260 if ( zerr != Z_STREAM_END )
261 printk(KERN_DEBUG "zisofs: zisofs_inflate returned %d, inode = %lu, index = %lu, fpage = %d, xpage = %d, avail_in = %d, avail_out = %d, ai = %d, ao = %d\n",
262 zerr, inode->i_ino, index,
263 fpage, xpage,
264 stream.avail_in, stream.avail_out,
265 ai, ao);
266 bail = 1;
267 break;
268 }
269 }
270
271 if ( stream.avail_out && zerr == Z_STREAM_END ) {
272 /* Fractional page written before EOF. This may
273 be the last page in the file. */
274 memset(stream.next_out, 0, stream.avail_out);
275 stream.avail_out = 0;
276 }
277
278 if ( !stream.avail_out ) {
279 /* This page completed */
280 if ( page ) {
281 flush_dcache_page(page);
282 SetPageUptodate(page);
283 kunmap(page);
284 unlock_page(page);
285 if ( fpage == xpage )
286 err = 0; /* The critical page */
287 else
288 page_cache_release(page);
289 }
290 fpage++;
291 }
292 }
293 zlib_inflateEnd(&stream);
294
295 z_eio:
296 up(&zisofs_zlib_semaphore);
297
298 b_eio:
299 for ( i = 0 ; i < haveblocks ; i++ ) {
300 if ( bhs[i] )
301 brelse(bhs[i]);
302 }
303 }
304
305eio:
306
307 /* Release any residual pages, do not SetPageUptodate */
308 while ( fpage < maxpage ) {
309 page = pages[fpage];
310 if ( page ) {
311 flush_dcache_page(page);
312 if ( fpage == xpage )
313 SetPageError(page);
314 kunmap(page);
315 unlock_page(page);
316 if ( fpage != xpage )
317 page_cache_release(page);
318 }
319 fpage++;
320 }
321
322 /* At this point, err contains 0 or -EIO depending on the "critical" page */
323 return err;
324}
325
326struct address_space_operations zisofs_aops = {
327 .readpage = zisofs_readpage,
328 /* No sync_page operation supported? */
329 /* No bmap operation supported */
330};
331
332static int initialized;
333
334int __init zisofs_init(void)
335{
336 if ( initialized ) {
337 printk("zisofs_init: called more than once\n");
338 return 0;
339 }
340
341 zisofs_zlib_workspace = vmalloc(zlib_inflate_workspacesize());
342 if ( !zisofs_zlib_workspace )
343 return -ENOMEM;
344 init_MUTEX(&zisofs_zlib_semaphore);
345
346 initialized = 1;
347 return 0;
348}
349
350void zisofs_cleanup(void)
351{
352 if ( !initialized ) {
353 printk("zisofs_cleanup: called without initialization\n");
354 return;
355 }
356
357 vfree(zisofs_zlib_workspace);
358 initialized = 0;
359}