diff options
-rw-r--r-- | include/linux/decompress/bunzip2.h | 10 | ||||
-rw-r--r-- | include/linux/decompress/generic.h | 30 | ||||
-rw-r--r-- | include/linux/decompress/inflate.h | 13 | ||||
-rw-r--r-- | include/linux/decompress/mm.h | 87 | ||||
-rw-r--r-- | include/linux/decompress/unlzma.h | 12 | ||||
-rw-r--r-- | lib/decompress_bunzip2.c | 735 | ||||
-rw-r--r-- | lib/decompress_inflate.c | 167 | ||||
-rw-r--r-- | lib/decompress_unlzma.c | 647 | ||||
-rw-r--r-- | lib/zlib_inflate/inflate.h | 4 | ||||
-rw-r--r-- | lib/zlib_inflate/inftrees.h | 4 | ||||
-rw-r--r-- | scripts/Makefile.lib | 14 | ||||
-rw-r--r-- | scripts/bin_size | 10 |
12 files changed, 1733 insertions, 0 deletions
diff --git a/include/linux/decompress/bunzip2.h b/include/linux/decompress/bunzip2.h new file mode 100644 index 000000000000..115272137a9c --- /dev/null +++ b/include/linux/decompress/bunzip2.h | |||
@@ -0,0 +1,10 @@ | |||
1 | #ifndef DECOMPRESS_BUNZIP2_H | ||
2 | #define DECOMPRESS_BUNZIP2_H | ||
3 | |||
4 | int bunzip2(unsigned char *inbuf, int len, | ||
5 | int(*fill)(void*, unsigned int), | ||
6 | int(*flush)(void*, unsigned int), | ||
7 | unsigned char *output, | ||
8 | int *pos, | ||
9 | void(*error)(char *x)); | ||
10 | #endif | ||
diff --git a/include/linux/decompress/generic.h b/include/linux/decompress/generic.h new file mode 100644 index 000000000000..f847f514f78e --- /dev/null +++ b/include/linux/decompress/generic.h | |||
@@ -0,0 +1,30 @@ | |||
1 | #ifndef DECOMPRESS_GENERIC_H | ||
2 | #define DECOMPRESS_GENERIC_H | ||
3 | |||
4 | /* Minimal chunksize to be read. | ||
5 | *Bzip2 prefers at least 4096 | ||
6 | *Lzma prefers 0x10000 */ | ||
7 | #define COMPR_IOBUF_SIZE 4096 | ||
8 | |||
9 | typedef int (*decompress_fn) (unsigned char *inbuf, int len, | ||
10 | int(*fill)(void*, unsigned int), | ||
11 | int(*writebb)(void*, unsigned int), | ||
12 | unsigned char *output, | ||
13 | int *posp, | ||
14 | void(*error)(char *x)); | ||
15 | |||
16 | /* inbuf - input buffer | ||
17 | *len - len of pre-read data in inbuf | ||
18 | *fill - function to fill inbuf if empty | ||
19 | *writebb - function to write out outbug | ||
20 | *posp - if non-null, input position (number of bytes read) will be | ||
21 | * returned here | ||
22 | * | ||
23 | *If len != 0, the inbuf is initialized (with as much data), and fill | ||
24 | *should not be called | ||
25 | *If len = 0, the inbuf is allocated, but empty. Its size is IOBUF_SIZE | ||
26 | *fill should be called (repeatedly...) to read data, at most IOBUF_SIZE | ||
27 | */ | ||
28 | |||
29 | |||
30 | #endif | ||
diff --git a/include/linux/decompress/inflate.h b/include/linux/decompress/inflate.h new file mode 100644 index 000000000000..f9b06ccc3e5c --- /dev/null +++ b/include/linux/decompress/inflate.h | |||
@@ -0,0 +1,13 @@ | |||
1 | #ifndef INFLATE_H | ||
2 | #define INFLATE_H | ||
3 | |||
4 | /* Other housekeeping constants */ | ||
5 | #define INBUFSIZ 4096 | ||
6 | |||
7 | int gunzip(unsigned char *inbuf, int len, | ||
8 | int(*fill)(void*, unsigned int), | ||
9 | int(*flush)(void*, unsigned int), | ||
10 | unsigned char *output, | ||
11 | int *pos, | ||
12 | void(*error_fn)(char *x)); | ||
13 | #endif | ||
diff --git a/include/linux/decompress/mm.h b/include/linux/decompress/mm.h new file mode 100644 index 000000000000..12ff8c3f1d05 --- /dev/null +++ b/include/linux/decompress/mm.h | |||
@@ -0,0 +1,87 @@ | |||
1 | /* | ||
2 | * linux/compr_mm.h | ||
3 | * | ||
4 | * Memory management for pre-boot and ramdisk uncompressors | ||
5 | * | ||
6 | * Authors: Alain Knaff <alain@knaff.lu> | ||
7 | * | ||
8 | */ | ||
9 | |||
10 | #ifndef DECOMPR_MM_H | ||
11 | #define DECOMPR_MM_H | ||
12 | |||
13 | #ifdef STATIC | ||
14 | |||
15 | /* Code active when included from pre-boot environment: */ | ||
16 | |||
17 | /* A trivial malloc implementation, adapted from | ||
18 | * malloc by Hannu Savolainen 1993 and Matthias Urlichs 1994 | ||
19 | */ | ||
20 | static unsigned long malloc_ptr; | ||
21 | static int malloc_count; | ||
22 | |||
23 | static void *malloc(int size) | ||
24 | { | ||
25 | void *p; | ||
26 | |||
27 | if (size < 0) | ||
28 | error("Malloc error"); | ||
29 | if (!malloc_ptr) | ||
30 | malloc_ptr = free_mem_ptr; | ||
31 | |||
32 | malloc_ptr = (malloc_ptr + 3) & ~3; /* Align */ | ||
33 | |||
34 | p = (void *)malloc_ptr; | ||
35 | malloc_ptr += size; | ||
36 | |||
37 | if (free_mem_end_ptr && malloc_ptr >= free_mem_end_ptr) | ||
38 | error("Out of memory"); | ||
39 | |||
40 | malloc_count++; | ||
41 | return p; | ||
42 | } | ||
43 | |||
44 | static void free(void *where) | ||
45 | { | ||
46 | malloc_count--; | ||
47 | if (!malloc_count) | ||
48 | malloc_ptr = free_mem_ptr; | ||
49 | } | ||
50 | |||
51 | #define large_malloc(a) malloc(a) | ||
52 | #define large_free(a) free(a) | ||
53 | |||
54 | #define set_error_fn(x) | ||
55 | |||
56 | #define INIT | ||
57 | |||
58 | #else /* STATIC */ | ||
59 | |||
60 | /* Code active when compiled standalone for use when loading ramdisk: */ | ||
61 | |||
62 | #include <linux/kernel.h> | ||
63 | #include <linux/fs.h> | ||
64 | #include <linux/string.h> | ||
65 | #include <linux/vmalloc.h> | ||
66 | |||
67 | /* Use defines rather than static inline in order to avoid spurious | ||
68 | * warnings when not needed (indeed large_malloc / large_free are not | ||
69 | * needed by inflate */ | ||
70 | |||
71 | #define malloc(a) kmalloc(a, GFP_KERNEL) | ||
72 | #define free(a) kfree(a) | ||
73 | |||
74 | #define large_malloc(a) vmalloc(a) | ||
75 | #define large_free(a) vfree(a) | ||
76 | |||
77 | static void(*error)(char *m); | ||
78 | #define set_error_fn(x) error = x; | ||
79 | |||
80 | #define INIT __init | ||
81 | #define STATIC | ||
82 | |||
83 | #include <linux/init.h> | ||
84 | |||
85 | #endif /* STATIC */ | ||
86 | |||
87 | #endif /* DECOMPR_MM_H */ | ||
diff --git a/include/linux/decompress/unlzma.h b/include/linux/decompress/unlzma.h new file mode 100644 index 000000000000..7796538f1bf4 --- /dev/null +++ b/include/linux/decompress/unlzma.h | |||
@@ -0,0 +1,12 @@ | |||
1 | #ifndef DECOMPRESS_UNLZMA_H | ||
2 | #define DECOMPRESS_UNLZMA_H | ||
3 | |||
4 | int unlzma(unsigned char *, int, | ||
5 | int(*fill)(void*, unsigned int), | ||
6 | int(*flush)(void*, unsigned int), | ||
7 | unsigned char *output, | ||
8 | int *posp, | ||
9 | void(*error)(char *x) | ||
10 | ); | ||
11 | |||
12 | #endif | ||
diff --git a/lib/decompress_bunzip2.c b/lib/decompress_bunzip2.c new file mode 100644 index 000000000000..5d3ddb5fcfd9 --- /dev/null +++ b/lib/decompress_bunzip2.c | |||
@@ -0,0 +1,735 @@ | |||
1 | /* vi: set sw = 4 ts = 4: */ | ||
2 | /* Small bzip2 deflate implementation, by Rob Landley (rob@landley.net). | ||
3 | |||
4 | Based on bzip2 decompression code by Julian R Seward (jseward@acm.org), | ||
5 | which also acknowledges contributions by Mike Burrows, David Wheeler, | ||
6 | Peter Fenwick, Alistair Moffat, Radford Neal, Ian H. Witten, | ||
7 | Robert Sedgewick, and Jon L. Bentley. | ||
8 | |||
9 | This code is licensed under the LGPLv2: | ||
10 | LGPL (http://www.gnu.org/copyleft/lgpl.html | ||
11 | */ | ||
12 | |||
13 | /* | ||
14 | Size and speed optimizations by Manuel Novoa III (mjn3@codepoet.org). | ||
15 | |||
16 | More efficient reading of Huffman codes, a streamlined read_bunzip() | ||
17 | function, and various other tweaks. In (limited) tests, approximately | ||
18 | 20% faster than bzcat on x86 and about 10% faster on arm. | ||
19 | |||
20 | Note that about 2/3 of the time is spent in read_unzip() reversing | ||
21 | the Burrows-Wheeler transformation. Much of that time is delay | ||
22 | resulting from cache misses. | ||
23 | |||
24 | I would ask that anyone benefiting from this work, especially those | ||
25 | using it in commercial products, consider making a donation to my local | ||
26 | non-profit hospice organization in the name of the woman I loved, who | ||
27 | passed away Feb. 12, 2003. | ||
28 | |||
29 | In memory of Toni W. Hagan | ||
30 | |||
31 | Hospice of Acadiana, Inc. | ||
32 | 2600 Johnston St., Suite 200 | ||
33 | Lafayette, LA 70503-3240 | ||
34 | |||
35 | Phone (337) 232-1234 or 1-800-738-2226 | ||
36 | Fax (337) 232-1297 | ||
37 | |||
38 | http://www.hospiceacadiana.com/ | ||
39 | |||
40 | Manuel | ||
41 | */ | ||
42 | |||
43 | /* | ||
44 | Made it fit for running in Linux Kernel by Alain Knaff (alain@knaff.lu) | ||
45 | */ | ||
46 | |||
47 | |||
48 | #ifndef STATIC | ||
49 | #include <linux/decompress/bunzip2.h> | ||
50 | #endif /* !STATIC */ | ||
51 | |||
52 | #include <linux/decompress/mm.h> | ||
53 | |||
54 | #ifndef INT_MAX | ||
55 | #define INT_MAX 0x7fffffff | ||
56 | #endif | ||
57 | |||
58 | /* Constants for Huffman coding */ | ||
59 | #define MAX_GROUPS 6 | ||
60 | #define GROUP_SIZE 50 /* 64 would have been more efficient */ | ||
61 | #define MAX_HUFCODE_BITS 20 /* Longest Huffman code allowed */ | ||
62 | #define MAX_SYMBOLS 258 /* 256 literals + RUNA + RUNB */ | ||
63 | #define SYMBOL_RUNA 0 | ||
64 | #define SYMBOL_RUNB 1 | ||
65 | |||
66 | /* Status return values */ | ||
67 | #define RETVAL_OK 0 | ||
68 | #define RETVAL_LAST_BLOCK (-1) | ||
69 | #define RETVAL_NOT_BZIP_DATA (-2) | ||
70 | #define RETVAL_UNEXPECTED_INPUT_EOF (-3) | ||
71 | #define RETVAL_UNEXPECTED_OUTPUT_EOF (-4) | ||
72 | #define RETVAL_DATA_ERROR (-5) | ||
73 | #define RETVAL_OUT_OF_MEMORY (-6) | ||
74 | #define RETVAL_OBSOLETE_INPUT (-7) | ||
75 | |||
76 | /* Other housekeeping constants */ | ||
77 | #define BZIP2_IOBUF_SIZE 4096 | ||
78 | |||
79 | /* This is what we know about each Huffman coding group */ | ||
80 | struct group_data { | ||
81 | /* We have an extra slot at the end of limit[] for a sentinal value. */ | ||
82 | int limit[MAX_HUFCODE_BITS+1]; | ||
83 | int base[MAX_HUFCODE_BITS]; | ||
84 | int permute[MAX_SYMBOLS]; | ||
85 | int minLen, maxLen; | ||
86 | }; | ||
87 | |||
88 | /* Structure holding all the housekeeping data, including IO buffers and | ||
89 | memory that persists between calls to bunzip */ | ||
90 | struct bunzip_data { | ||
91 | /* State for interrupting output loop */ | ||
92 | int writeCopies, writePos, writeRunCountdown, writeCount, writeCurrent; | ||
93 | /* I/O tracking data (file handles, buffers, positions, etc.) */ | ||
94 | int (*fill)(void*, unsigned int); | ||
95 | int inbufCount, inbufPos /*, outbufPos*/; | ||
96 | unsigned char *inbuf /*,*outbuf*/; | ||
97 | unsigned int inbufBitCount, inbufBits; | ||
98 | /* The CRC values stored in the block header and calculated from the | ||
99 | data */ | ||
100 | unsigned int crc32Table[256], headerCRC, totalCRC, writeCRC; | ||
101 | /* Intermediate buffer and its size (in bytes) */ | ||
102 | unsigned int *dbuf, dbufSize; | ||
103 | /* These things are a bit too big to go on the stack */ | ||
104 | unsigned char selectors[32768]; /* nSelectors = 15 bits */ | ||
105 | struct group_data groups[MAX_GROUPS]; /* Huffman coding tables */ | ||
106 | int io_error; /* non-zero if we have IO error */ | ||
107 | }; | ||
108 | |||
109 | |||
110 | /* Return the next nnn bits of input. All reads from the compressed input | ||
111 | are done through this function. All reads are big endian */ | ||
112 | static unsigned int INIT get_bits(struct bunzip_data *bd, char bits_wanted) | ||
113 | { | ||
114 | unsigned int bits = 0; | ||
115 | |||
116 | /* If we need to get more data from the byte buffer, do so. | ||
117 | (Loop getting one byte at a time to enforce endianness and avoid | ||
118 | unaligned access.) */ | ||
119 | while (bd->inbufBitCount < bits_wanted) { | ||
120 | /* If we need to read more data from file into byte buffer, do | ||
121 | so */ | ||
122 | if (bd->inbufPos == bd->inbufCount) { | ||
123 | if (bd->io_error) | ||
124 | return 0; | ||
125 | bd->inbufCount = bd->fill(bd->inbuf, BZIP2_IOBUF_SIZE); | ||
126 | if (bd->inbufCount <= 0) { | ||
127 | bd->io_error = RETVAL_UNEXPECTED_INPUT_EOF; | ||
128 | return 0; | ||
129 | } | ||
130 | bd->inbufPos = 0; | ||
131 | } | ||
132 | /* Avoid 32-bit overflow (dump bit buffer to top of output) */ | ||
133 | if (bd->inbufBitCount >= 24) { | ||
134 | bits = bd->inbufBits&((1 << bd->inbufBitCount)-1); | ||
135 | bits_wanted -= bd->inbufBitCount; | ||
136 | bits <<= bits_wanted; | ||
137 | bd->inbufBitCount = 0; | ||
138 | } | ||
139 | /* Grab next 8 bits of input from buffer. */ | ||
140 | bd->inbufBits = (bd->inbufBits << 8)|bd->inbuf[bd->inbufPos++]; | ||
141 | bd->inbufBitCount += 8; | ||
142 | } | ||
143 | /* Calculate result */ | ||
144 | bd->inbufBitCount -= bits_wanted; | ||
145 | bits |= (bd->inbufBits >> bd->inbufBitCount)&((1 << bits_wanted)-1); | ||
146 | |||
147 | return bits; | ||
148 | } | ||
149 | |||
150 | /* Unpacks the next block and sets up for the inverse burrows-wheeler step. */ | ||
151 | |||
152 | static int INIT get_next_block(struct bunzip_data *bd) | ||
153 | { | ||
154 | struct group_data *hufGroup = NULL; | ||
155 | int *base = NULL; | ||
156 | int *limit = NULL; | ||
157 | int dbufCount, nextSym, dbufSize, groupCount, selector, | ||
158 | i, j, k, t, runPos, symCount, symTotal, nSelectors, | ||
159 | byteCount[256]; | ||
160 | unsigned char uc, symToByte[256], mtfSymbol[256], *selectors; | ||
161 | unsigned int *dbuf, origPtr; | ||
162 | |||
163 | dbuf = bd->dbuf; | ||
164 | dbufSize = bd->dbufSize; | ||
165 | selectors = bd->selectors; | ||
166 | |||
167 | /* Read in header signature and CRC, then validate signature. | ||
168 | (last block signature means CRC is for whole file, return now) */ | ||
169 | i = get_bits(bd, 24); | ||
170 | j = get_bits(bd, 24); | ||
171 | bd->headerCRC = get_bits(bd, 32); | ||
172 | if ((i == 0x177245) && (j == 0x385090)) | ||
173 | return RETVAL_LAST_BLOCK; | ||
174 | if ((i != 0x314159) || (j != 0x265359)) | ||
175 | return RETVAL_NOT_BZIP_DATA; | ||
176 | /* We can add support for blockRandomised if anybody complains. | ||
177 | There was some code for this in busybox 1.0.0-pre3, but nobody ever | ||
178 | noticed that it didn't actually work. */ | ||
179 | if (get_bits(bd, 1)) | ||
180 | return RETVAL_OBSOLETE_INPUT; | ||
181 | origPtr = get_bits(bd, 24); | ||
182 | if (origPtr > dbufSize) | ||
183 | return RETVAL_DATA_ERROR; | ||
184 | /* mapping table: if some byte values are never used (encoding things | ||
185 | like ascii text), the compression code removes the gaps to have fewer | ||
186 | symbols to deal with, and writes a sparse bitfield indicating which | ||
187 | values were present. We make a translation table to convert the | ||
188 | symbols back to the corresponding bytes. */ | ||
189 | t = get_bits(bd, 16); | ||
190 | symTotal = 0; | ||
191 | for (i = 0; i < 16; i++) { | ||
192 | if (t&(1 << (15-i))) { | ||
193 | k = get_bits(bd, 16); | ||
194 | for (j = 0; j < 16; j++) | ||
195 | if (k&(1 << (15-j))) | ||
196 | symToByte[symTotal++] = (16*i)+j; | ||
197 | } | ||
198 | } | ||
199 | /* How many different Huffman coding groups does this block use? */ | ||
200 | groupCount = get_bits(bd, 3); | ||
201 | if (groupCount < 2 || groupCount > MAX_GROUPS) | ||
202 | return RETVAL_DATA_ERROR; | ||
203 | /* nSelectors: Every GROUP_SIZE many symbols we select a new | ||
204 | Huffman coding group. Read in the group selector list, | ||
205 | which is stored as MTF encoded bit runs. (MTF = Move To | ||
206 | Front, as each value is used it's moved to the start of the | ||
207 | list.) */ | ||
208 | nSelectors = get_bits(bd, 15); | ||
209 | if (!nSelectors) | ||
210 | return RETVAL_DATA_ERROR; | ||
211 | for (i = 0; i < groupCount; i++) | ||
212 | mtfSymbol[i] = i; | ||
213 | for (i = 0; i < nSelectors; i++) { | ||
214 | /* Get next value */ | ||
215 | for (j = 0; get_bits(bd, 1); j++) | ||
216 | if (j >= groupCount) | ||
217 | return RETVAL_DATA_ERROR; | ||
218 | /* Decode MTF to get the next selector */ | ||
219 | uc = mtfSymbol[j]; | ||
220 | for (; j; j--) | ||
221 | mtfSymbol[j] = mtfSymbol[j-1]; | ||
222 | mtfSymbol[0] = selectors[i] = uc; | ||
223 | } | ||
224 | /* Read the Huffman coding tables for each group, which code | ||
225 | for symTotal literal symbols, plus two run symbols (RUNA, | ||
226 | RUNB) */ | ||
227 | symCount = symTotal+2; | ||
228 | for (j = 0; j < groupCount; j++) { | ||
229 | unsigned char length[MAX_SYMBOLS], temp[MAX_HUFCODE_BITS+1]; | ||
230 | int minLen, maxLen, pp; | ||
231 | /* Read Huffman code lengths for each symbol. They're | ||
232 | stored in a way similar to mtf; record a starting | ||
233 | value for the first symbol, and an offset from the | ||
234 | previous value for everys symbol after that. | ||
235 | (Subtracting 1 before the loop and then adding it | ||
236 | back at the end is an optimization that makes the | ||
237 | test inside the loop simpler: symbol length 0 | ||
238 | becomes negative, so an unsigned inequality catches | ||
239 | it.) */ | ||
240 | t = get_bits(bd, 5)-1; | ||
241 | for (i = 0; i < symCount; i++) { | ||
242 | for (;;) { | ||
243 | if (((unsigned)t) > (MAX_HUFCODE_BITS-1)) | ||
244 | return RETVAL_DATA_ERROR; | ||
245 | |||
246 | /* If first bit is 0, stop. Else | ||
247 | second bit indicates whether to | ||
248 | increment or decrement the value. | ||
249 | Optimization: grab 2 bits and unget | ||
250 | the second if the first was 0. */ | ||
251 | |||
252 | k = get_bits(bd, 2); | ||
253 | if (k < 2) { | ||
254 | bd->inbufBitCount++; | ||
255 | break; | ||
256 | } | ||
257 | /* Add one if second bit 1, else | ||
258 | * subtract 1. Avoids if/else */ | ||
259 | t += (((k+1)&2)-1); | ||
260 | } | ||
261 | /* Correct for the initial -1, to get the | ||
262 | * final symbol length */ | ||
263 | length[i] = t+1; | ||
264 | } | ||
265 | /* Find largest and smallest lengths in this group */ | ||
266 | minLen = maxLen = length[0]; | ||
267 | |||
268 | for (i = 1; i < symCount; i++) { | ||
269 | if (length[i] > maxLen) | ||
270 | maxLen = length[i]; | ||
271 | else if (length[i] < minLen) | ||
272 | minLen = length[i]; | ||
273 | } | ||
274 | |||
275 | /* Calculate permute[], base[], and limit[] tables from | ||
276 | * length[]. | ||
277 | * | ||
278 | * permute[] is the lookup table for converting | ||
279 | * Huffman coded symbols into decoded symbols. base[] | ||
280 | * is the amount to subtract from the value of a | ||
281 | * Huffman symbol of a given length when using | ||
282 | * permute[]. | ||
283 | * | ||
284 | * limit[] indicates the largest numerical value a | ||
285 | * symbol with a given number of bits can have. This | ||
286 | * is how the Huffman codes can vary in length: each | ||
287 | * code with a value > limit[length] needs another | ||
288 | * bit. | ||
289 | */ | ||
290 | hufGroup = bd->groups+j; | ||
291 | hufGroup->minLen = minLen; | ||
292 | hufGroup->maxLen = maxLen; | ||
293 | /* Note that minLen can't be smaller than 1, so we | ||
294 | adjust the base and limit array pointers so we're | ||
295 | not always wasting the first entry. We do this | ||
296 | again when using them (during symbol decoding).*/ | ||
297 | base = hufGroup->base-1; | ||
298 | limit = hufGroup->limit-1; | ||
299 | /* Calculate permute[]. Concurently, initialize | ||
300 | * temp[] and limit[]. */ | ||
301 | pp = 0; | ||
302 | for (i = minLen; i <= maxLen; i++) { | ||
303 | temp[i] = limit[i] = 0; | ||
304 | for (t = 0; t < symCount; t++) | ||
305 | if (length[t] == i) | ||
306 | hufGroup->permute[pp++] = t; | ||
307 | } | ||
308 | /* Count symbols coded for at each bit length */ | ||
309 | for (i = 0; i < symCount; i++) | ||
310 | temp[length[i]]++; | ||
311 | /* Calculate limit[] (the largest symbol-coding value | ||
312 | *at each bit length, which is (previous limit << | ||
313 | *1)+symbols at this level), and base[] (number of | ||
314 | *symbols to ignore at each bit length, which is limit | ||
315 | *minus the cumulative count of symbols coded for | ||
316 | *already). */ | ||
317 | pp = t = 0; | ||
318 | for (i = minLen; i < maxLen; i++) { | ||
319 | pp += temp[i]; | ||
320 | /* We read the largest possible symbol size | ||
321 | and then unget bits after determining how | ||
322 | many we need, and those extra bits could be | ||
323 | set to anything. (They're noise from | ||
324 | future symbols.) At each level we're | ||
325 | really only interested in the first few | ||
326 | bits, so here we set all the trailing | ||
327 | to-be-ignored bits to 1 so they don't | ||
328 | affect the value > limit[length] | ||
329 | comparison. */ | ||
330 | limit[i] = (pp << (maxLen - i)) - 1; | ||
331 | pp <<= 1; | ||
332 | base[i+1] = pp-(t += temp[i]); | ||
333 | } | ||
334 | limit[maxLen+1] = INT_MAX; /* Sentinal value for | ||
335 | * reading next sym. */ | ||
336 | limit[maxLen] = pp+temp[maxLen]-1; | ||
337 | base[minLen] = 0; | ||
338 | } | ||
339 | /* We've finished reading and digesting the block header. Now | ||
340 | read this block's Huffman coded symbols from the file and | ||
341 | undo the Huffman coding and run length encoding, saving the | ||
342 | result into dbuf[dbufCount++] = uc */ | ||
343 | |||
344 | /* Initialize symbol occurrence counters and symbol Move To | ||
345 | * Front table */ | ||
346 | for (i = 0; i < 256; i++) { | ||
347 | byteCount[i] = 0; | ||
348 | mtfSymbol[i] = (unsigned char)i; | ||
349 | } | ||
350 | /* Loop through compressed symbols. */ | ||
351 | runPos = dbufCount = symCount = selector = 0; | ||
352 | for (;;) { | ||
353 | /* Determine which Huffman coding group to use. */ | ||
354 | if (!(symCount--)) { | ||
355 | symCount = GROUP_SIZE-1; | ||
356 | if (selector >= nSelectors) | ||
357 | return RETVAL_DATA_ERROR; | ||
358 | hufGroup = bd->groups+selectors[selector++]; | ||
359 | base = hufGroup->base-1; | ||
360 | limit = hufGroup->limit-1; | ||
361 | } | ||
362 | /* Read next Huffman-coded symbol. */ | ||
363 | /* Note: It is far cheaper to read maxLen bits and | ||
364 | back up than it is to read minLen bits and then an | ||
365 | additional bit at a time, testing as we go. | ||
366 | Because there is a trailing last block (with file | ||
367 | CRC), there is no danger of the overread causing an | ||
368 | unexpected EOF for a valid compressed file. As a | ||
369 | further optimization, we do the read inline | ||
370 | (falling back to a call to get_bits if the buffer | ||
371 | runs dry). The following (up to got_huff_bits:) is | ||
372 | equivalent to j = get_bits(bd, hufGroup->maxLen); | ||
373 | */ | ||
374 | while (bd->inbufBitCount < hufGroup->maxLen) { | ||
375 | if (bd->inbufPos == bd->inbufCount) { | ||
376 | j = get_bits(bd, hufGroup->maxLen); | ||
377 | goto got_huff_bits; | ||
378 | } | ||
379 | bd->inbufBits = | ||
380 | (bd->inbufBits << 8)|bd->inbuf[bd->inbufPos++]; | ||
381 | bd->inbufBitCount += 8; | ||
382 | }; | ||
383 | bd->inbufBitCount -= hufGroup->maxLen; | ||
384 | j = (bd->inbufBits >> bd->inbufBitCount)& | ||
385 | ((1 << hufGroup->maxLen)-1); | ||
386 | got_huff_bits: | ||
387 | /* Figure how how many bits are in next symbol and | ||
388 | * unget extras */ | ||
389 | i = hufGroup->minLen; | ||
390 | while (j > limit[i]) | ||
391 | ++i; | ||
392 | bd->inbufBitCount += (hufGroup->maxLen - i); | ||
393 | /* Huffman decode value to get nextSym (with bounds checking) */ | ||
394 | if ((i > hufGroup->maxLen) | ||
395 | || (((unsigned)(j = (j>>(hufGroup->maxLen-i))-base[i])) | ||
396 | >= MAX_SYMBOLS)) | ||
397 | return RETVAL_DATA_ERROR; | ||
398 | nextSym = hufGroup->permute[j]; | ||
399 | /* We have now decoded the symbol, which indicates | ||
400 | either a new literal byte, or a repeated run of the | ||
401 | most recent literal byte. First, check if nextSym | ||
402 | indicates a repeated run, and if so loop collecting | ||
403 | how many times to repeat the last literal. */ | ||
404 | if (((unsigned)nextSym) <= SYMBOL_RUNB) { /* RUNA or RUNB */ | ||
405 | /* If this is the start of a new run, zero out | ||
406 | * counter */ | ||
407 | if (!runPos) { | ||
408 | runPos = 1; | ||
409 | t = 0; | ||
410 | } | ||
411 | /* Neat trick that saves 1 symbol: instead of | ||
412 | or-ing 0 or 1 at each bit position, add 1 | ||
413 | or 2 instead. For example, 1011 is 1 << 0 | ||
414 | + 1 << 1 + 2 << 2. 1010 is 2 << 0 + 2 << 1 | ||
415 | + 1 << 2. You can make any bit pattern | ||
416 | that way using 1 less symbol than the basic | ||
417 | or 0/1 method (except all bits 0, which | ||
418 | would use no symbols, but a run of length 0 | ||
419 | doesn't mean anything in this context). | ||
420 | Thus space is saved. */ | ||
421 | t += (runPos << nextSym); | ||
422 | /* +runPos if RUNA; +2*runPos if RUNB */ | ||
423 | |||
424 | runPos <<= 1; | ||
425 | continue; | ||
426 | } | ||
427 | /* When we hit the first non-run symbol after a run, | ||
428 | we now know how many times to repeat the last | ||
429 | literal, so append that many copies to our buffer | ||
430 | of decoded symbols (dbuf) now. (The last literal | ||
431 | used is the one at the head of the mtfSymbol | ||
432 | array.) */ | ||
433 | if (runPos) { | ||
434 | runPos = 0; | ||
435 | if (dbufCount+t >= dbufSize) | ||
436 | return RETVAL_DATA_ERROR; | ||
437 | |||
438 | uc = symToByte[mtfSymbol[0]]; | ||
439 | byteCount[uc] += t; | ||
440 | while (t--) | ||
441 | dbuf[dbufCount++] = uc; | ||
442 | } | ||
443 | /* Is this the terminating symbol? */ | ||
444 | if (nextSym > symTotal) | ||
445 | break; | ||
446 | /* At this point, nextSym indicates a new literal | ||
447 | character. Subtract one to get the position in the | ||
448 | MTF array at which this literal is currently to be | ||
449 | found. (Note that the result can't be -1 or 0, | ||
450 | because 0 and 1 are RUNA and RUNB. But another | ||
451 | instance of the first symbol in the mtf array, | ||
452 | position 0, would have been handled as part of a | ||
453 | run above. Therefore 1 unused mtf position minus 2 | ||
454 | non-literal nextSym values equals -1.) */ | ||
455 | if (dbufCount >= dbufSize) | ||
456 | return RETVAL_DATA_ERROR; | ||
457 | i = nextSym - 1; | ||
458 | uc = mtfSymbol[i]; | ||
459 | /* Adjust the MTF array. Since we typically expect to | ||
460 | *move only a small number of symbols, and are bound | ||
461 | *by 256 in any case, using memmove here would | ||
462 | *typically be bigger and slower due to function call | ||
463 | *overhead and other assorted setup costs. */ | ||
464 | do { | ||
465 | mtfSymbol[i] = mtfSymbol[i-1]; | ||
466 | } while (--i); | ||
467 | mtfSymbol[0] = uc; | ||
468 | uc = symToByte[uc]; | ||
469 | /* We have our literal byte. Save it into dbuf. */ | ||
470 | byteCount[uc]++; | ||
471 | dbuf[dbufCount++] = (unsigned int)uc; | ||
472 | } | ||
473 | /* At this point, we've read all the Huffman-coded symbols | ||
474 | (and repeated runs) for this block from the input stream, | ||
475 | and decoded them into the intermediate buffer. There are | ||
476 | dbufCount many decoded bytes in dbuf[]. Now undo the | ||
477 | Burrows-Wheeler transform on dbuf. See | ||
478 | http://dogma.net/markn/articles/bwt/bwt.htm | ||
479 | */ | ||
480 | /* Turn byteCount into cumulative occurrence counts of 0 to n-1. */ | ||
481 | j = 0; | ||
482 | for (i = 0; i < 256; i++) { | ||
483 | k = j+byteCount[i]; | ||
484 | byteCount[i] = j; | ||
485 | j = k; | ||
486 | } | ||
487 | /* Figure out what order dbuf would be in if we sorted it. */ | ||
488 | for (i = 0; i < dbufCount; i++) { | ||
489 | uc = (unsigned char)(dbuf[i] & 0xff); | ||
490 | dbuf[byteCount[uc]] |= (i << 8); | ||
491 | byteCount[uc]++; | ||
492 | } | ||
493 | /* Decode first byte by hand to initialize "previous" byte. | ||
494 | Note that it doesn't get output, and if the first three | ||
495 | characters are identical it doesn't qualify as a run (hence | ||
496 | writeRunCountdown = 5). */ | ||
497 | if (dbufCount) { | ||
498 | if (origPtr >= dbufCount) | ||
499 | return RETVAL_DATA_ERROR; | ||
500 | bd->writePos = dbuf[origPtr]; | ||
501 | bd->writeCurrent = (unsigned char)(bd->writePos&0xff); | ||
502 | bd->writePos >>= 8; | ||
503 | bd->writeRunCountdown = 5; | ||
504 | } | ||
505 | bd->writeCount = dbufCount; | ||
506 | |||
507 | return RETVAL_OK; | ||
508 | } | ||
509 | |||
510 | /* Undo burrows-wheeler transform on intermediate buffer to produce output. | ||
511 | If start_bunzip was initialized with out_fd =-1, then up to len bytes of | ||
512 | data are written to outbuf. Return value is number of bytes written or | ||
513 | error (all errors are negative numbers). If out_fd!=-1, outbuf and len | ||
514 | are ignored, data is written to out_fd and return is RETVAL_OK or error. | ||
515 | */ | ||
516 | |||
517 | static int INIT read_bunzip(struct bunzip_data *bd, char *outbuf, int len) | ||
518 | { | ||
519 | const unsigned int *dbuf; | ||
520 | int pos, xcurrent, previous, gotcount; | ||
521 | |||
522 | /* If last read was short due to end of file, return last block now */ | ||
523 | if (bd->writeCount < 0) | ||
524 | return bd->writeCount; | ||
525 | |||
526 | gotcount = 0; | ||
527 | dbuf = bd->dbuf; | ||
528 | pos = bd->writePos; | ||
529 | xcurrent = bd->writeCurrent; | ||
530 | |||
531 | /* We will always have pending decoded data to write into the output | ||
532 | buffer unless this is the very first call (in which case we haven't | ||
533 | Huffman-decoded a block into the intermediate buffer yet). */ | ||
534 | |||
535 | if (bd->writeCopies) { | ||
536 | /* Inside the loop, writeCopies means extra copies (beyond 1) */ | ||
537 | --bd->writeCopies; | ||
538 | /* Loop outputting bytes */ | ||
539 | for (;;) { | ||
540 | /* If the output buffer is full, snapshot | ||
541 | * state and return */ | ||
542 | if (gotcount >= len) { | ||
543 | bd->writePos = pos; | ||
544 | bd->writeCurrent = xcurrent; | ||
545 | bd->writeCopies++; | ||
546 | return len; | ||
547 | } | ||
548 | /* Write next byte into output buffer, updating CRC */ | ||
549 | outbuf[gotcount++] = xcurrent; | ||
550 | bd->writeCRC = (((bd->writeCRC) << 8) | ||
551 | ^bd->crc32Table[((bd->writeCRC) >> 24) | ||
552 | ^xcurrent]); | ||
553 | /* Loop now if we're outputting multiple | ||
554 | * copies of this byte */ | ||
555 | if (bd->writeCopies) { | ||
556 | --bd->writeCopies; | ||
557 | continue; | ||
558 | } | ||
559 | decode_next_byte: | ||
560 | if (!bd->writeCount--) | ||
561 | break; | ||
562 | /* Follow sequence vector to undo | ||
563 | * Burrows-Wheeler transform */ | ||
564 | previous = xcurrent; | ||
565 | pos = dbuf[pos]; | ||
566 | xcurrent = pos&0xff; | ||
567 | pos >>= 8; | ||
568 | /* After 3 consecutive copies of the same | ||
569 | byte, the 4th is a repeat count. We count | ||
570 | down from 4 instead *of counting up because | ||
571 | testing for non-zero is faster */ | ||
572 | if (--bd->writeRunCountdown) { | ||
573 | if (xcurrent != previous) | ||
574 | bd->writeRunCountdown = 4; | ||
575 | } else { | ||
576 | /* We have a repeated run, this byte | ||
577 | * indicates the count */ | ||
578 | bd->writeCopies = xcurrent; | ||
579 | xcurrent = previous; | ||
580 | bd->writeRunCountdown = 5; | ||
581 | /* Sometimes there are just 3 bytes | ||
582 | * (run length 0) */ | ||
583 | if (!bd->writeCopies) | ||
584 | goto decode_next_byte; | ||
585 | /* Subtract the 1 copy we'd output | ||
586 | * anyway to get extras */ | ||
587 | --bd->writeCopies; | ||
588 | } | ||
589 | } | ||
590 | /* Decompression of this block completed successfully */ | ||
591 | bd->writeCRC = ~bd->writeCRC; | ||
592 | bd->totalCRC = ((bd->totalCRC << 1) | | ||
593 | (bd->totalCRC >> 31)) ^ bd->writeCRC; | ||
594 | /* If this block had a CRC error, force file level CRC error. */ | ||
595 | if (bd->writeCRC != bd->headerCRC) { | ||
596 | bd->totalCRC = bd->headerCRC+1; | ||
597 | return RETVAL_LAST_BLOCK; | ||
598 | } | ||
599 | } | ||
600 | |||
601 | /* Refill the intermediate buffer by Huffman-decoding next | ||
602 | * block of input */ | ||
603 | /* (previous is just a convenient unused temp variable here) */ | ||
604 | previous = get_next_block(bd); | ||
605 | if (previous) { | ||
606 | bd->writeCount = previous; | ||
607 | return (previous != RETVAL_LAST_BLOCK) ? previous : gotcount; | ||
608 | } | ||
609 | bd->writeCRC = 0xffffffffUL; | ||
610 | pos = bd->writePos; | ||
611 | xcurrent = bd->writeCurrent; | ||
612 | goto decode_next_byte; | ||
613 | } | ||
614 | |||
615 | static int INIT nofill(void *buf, unsigned int len) | ||
616 | { | ||
617 | return -1; | ||
618 | } | ||
619 | |||
620 | /* Allocate the structure, read file header. If in_fd ==-1, inbuf must contain | ||
621 | a complete bunzip file (len bytes long). If in_fd!=-1, inbuf and len are | ||
622 | ignored, and data is read from file handle into temporary buffer. */ | ||
623 | static int INIT start_bunzip(struct bunzip_data **bdp, void *inbuf, int len, | ||
624 | int (*fill)(void*, unsigned int)) | ||
625 | { | ||
626 | struct bunzip_data *bd; | ||
627 | unsigned int i, j, c; | ||
628 | const unsigned int BZh0 = | ||
629 | (((unsigned int)'B') << 24)+(((unsigned int)'Z') << 16) | ||
630 | +(((unsigned int)'h') << 8)+(unsigned int)'0'; | ||
631 | |||
632 | /* Figure out how much data to allocate */ | ||
633 | i = sizeof(struct bunzip_data); | ||
634 | |||
635 | /* Allocate bunzip_data. Most fields initialize to zero. */ | ||
636 | bd = *bdp = malloc(i); | ||
637 | memset(bd, 0, sizeof(struct bunzip_data)); | ||
638 | /* Setup input buffer */ | ||
639 | bd->inbuf = inbuf; | ||
640 | bd->inbufCount = len; | ||
641 | if (fill != NULL) | ||
642 | bd->fill = fill; | ||
643 | else | ||
644 | bd->fill = nofill; | ||
645 | |||
646 | /* Init the CRC32 table (big endian) */ | ||
647 | for (i = 0; i < 256; i++) { | ||
648 | c = i << 24; | ||
649 | for (j = 8; j; j--) | ||
650 | c = c&0x80000000 ? (c << 1)^0x04c11db7 : (c << 1); | ||
651 | bd->crc32Table[i] = c; | ||
652 | } | ||
653 | |||
654 | /* Ensure that file starts with "BZh['1'-'9']." */ | ||
655 | i = get_bits(bd, 32); | ||
656 | if (((unsigned int)(i-BZh0-1)) >= 9) | ||
657 | return RETVAL_NOT_BZIP_DATA; | ||
658 | |||
659 | /* Fourth byte (ascii '1'-'9'), indicates block size in units of 100k of | ||
660 | uncompressed data. Allocate intermediate buffer for block. */ | ||
661 | bd->dbufSize = 100000*(i-BZh0); | ||
662 | |||
663 | bd->dbuf = large_malloc(bd->dbufSize * sizeof(int)); | ||
664 | return RETVAL_OK; | ||
665 | } | ||
666 | |||
667 | /* Example usage: decompress src_fd to dst_fd. (Stops at end of bzip2 data, | ||
668 | not end of file.) */ | ||
669 | STATIC int INIT bunzip2(unsigned char *buf, int len, | ||
670 | int(*fill)(void*, unsigned int), | ||
671 | int(*flush)(void*, unsigned int), | ||
672 | unsigned char *outbuf, | ||
673 | int *pos, | ||
674 | void(*error_fn)(char *x)) | ||
675 | { | ||
676 | struct bunzip_data *bd; | ||
677 | int i = -1; | ||
678 | unsigned char *inbuf; | ||
679 | |||
680 | set_error_fn(error_fn); | ||
681 | if (flush) | ||
682 | outbuf = malloc(BZIP2_IOBUF_SIZE); | ||
683 | else | ||
684 | len -= 4; /* Uncompressed size hack active in pre-boot | ||
685 | environment */ | ||
686 | if (!outbuf) { | ||
687 | error("Could not allocate output bufer"); | ||
688 | return -1; | ||
689 | } | ||
690 | if (buf) | ||
691 | inbuf = buf; | ||
692 | else | ||
693 | inbuf = malloc(BZIP2_IOBUF_SIZE); | ||
694 | if (!inbuf) { | ||
695 | error("Could not allocate input bufer"); | ||
696 | goto exit_0; | ||
697 | } | ||
698 | i = start_bunzip(&bd, inbuf, len, fill); | ||
699 | if (!i) { | ||
700 | for (;;) { | ||
701 | i = read_bunzip(bd, outbuf, BZIP2_IOBUF_SIZE); | ||
702 | if (i <= 0) | ||
703 | break; | ||
704 | if (!flush) | ||
705 | outbuf += i; | ||
706 | else | ||
707 | if (i != flush(outbuf, i)) { | ||
708 | i = RETVAL_UNEXPECTED_OUTPUT_EOF; | ||
709 | break; | ||
710 | } | ||
711 | } | ||
712 | } | ||
713 | /* Check CRC and release memory */ | ||
714 | if (i == RETVAL_LAST_BLOCK) { | ||
715 | if (bd->headerCRC != bd->totalCRC) | ||
716 | error("Data integrity error when decompressing."); | ||
717 | else | ||
718 | i = RETVAL_OK; | ||
719 | } else if (i == RETVAL_UNEXPECTED_OUTPUT_EOF) { | ||
720 | error("Compressed file ends unexpectedly"); | ||
721 | } | ||
722 | if (bd->dbuf) | ||
723 | large_free(bd->dbuf); | ||
724 | if (pos) | ||
725 | *pos = bd->inbufPos; | ||
726 | free(bd); | ||
727 | if (!buf) | ||
728 | free(inbuf); | ||
729 | exit_0: | ||
730 | if (flush) | ||
731 | free(outbuf); | ||
732 | return i; | ||
733 | } | ||
734 | |||
735 | #define decompress bunzip2 | ||
diff --git a/lib/decompress_inflate.c b/lib/decompress_inflate.c new file mode 100644 index 000000000000..163e66aea5f6 --- /dev/null +++ b/lib/decompress_inflate.c | |||
@@ -0,0 +1,167 @@ | |||
1 | #ifdef STATIC | ||
2 | /* Pre-boot environment: included */ | ||
3 | |||
4 | /* prevent inclusion of _LINUX_KERNEL_H in pre-boot environment: lots | ||
5 | * errors about console_printk etc... on ARM */ | ||
6 | #define _LINUX_KERNEL_H | ||
7 | |||
8 | #include "zlib_inflate/inftrees.c" | ||
9 | #include "zlib_inflate/inffast.c" | ||
10 | #include "zlib_inflate/inflate.c" | ||
11 | |||
12 | #else /* STATIC */ | ||
13 | /* initramfs et al: linked */ | ||
14 | |||
15 | #include <linux/zutil.h> | ||
16 | |||
17 | #include "zlib_inflate/inftrees.h" | ||
18 | #include "zlib_inflate/inffast.h" | ||
19 | #include "zlib_inflate/inflate.h" | ||
20 | |||
21 | #include "zlib_inflate/infutil.h" | ||
22 | |||
23 | #endif /* STATIC */ | ||
24 | |||
25 | #include <linux/decompress/mm.h> | ||
26 | |||
27 | #define INBUF_LEN (16*1024) | ||
28 | |||
29 | /* Included from initramfs et al code */ | ||
30 | STATIC int INIT gunzip(unsigned char *buf, int len, | ||
31 | int(*fill)(void*, unsigned int), | ||
32 | int(*flush)(void*, unsigned int), | ||
33 | unsigned char *out_buf, | ||
34 | int *pos, | ||
35 | void(*error_fn)(char *x)) { | ||
36 | u8 *zbuf; | ||
37 | struct z_stream_s *strm; | ||
38 | int rc; | ||
39 | size_t out_len; | ||
40 | |||
41 | set_error_fn(error_fn); | ||
42 | rc = -1; | ||
43 | if (flush) { | ||
44 | out_len = 0x8100; /* 32 K */ | ||
45 | out_buf = malloc(out_len); | ||
46 | } else { | ||
47 | out_len = 0x7fffffff; /* no limit */ | ||
48 | } | ||
49 | if (!out_buf) { | ||
50 | error("Out of memory while allocating output buffer"); | ||
51 | goto gunzip_nomem1; | ||
52 | } | ||
53 | |||
54 | if (buf) | ||
55 | zbuf = buf; | ||
56 | else { | ||
57 | zbuf = malloc(INBUF_LEN); | ||
58 | len = 0; | ||
59 | } | ||
60 | if (!zbuf) { | ||
61 | error("Out of memory while allocating input buffer"); | ||
62 | goto gunzip_nomem2; | ||
63 | } | ||
64 | |||
65 | strm = malloc(sizeof(*strm)); | ||
66 | if (strm == NULL) { | ||
67 | error("Out of memory while allocating z_stream"); | ||
68 | goto gunzip_nomem3; | ||
69 | } | ||
70 | |||
71 | strm->workspace = malloc(flush ? zlib_inflate_workspacesize() : | ||
72 | sizeof(struct inflate_state)); | ||
73 | if (strm->workspace == NULL) { | ||
74 | error("Out of memory while allocating workspace"); | ||
75 | goto gunzip_nomem4; | ||
76 | } | ||
77 | |||
78 | if (len == 0) | ||
79 | len = fill(zbuf, INBUF_LEN); | ||
80 | |||
81 | /* verify the gzip header */ | ||
82 | if (len < 10 || | ||
83 | zbuf[0] != 0x1f || zbuf[1] != 0x8b || zbuf[2] != 0x08) { | ||
84 | if (pos) | ||
85 | *pos = 0; | ||
86 | error("Not a gzip file"); | ||
87 | goto gunzip_5; | ||
88 | } | ||
89 | |||
90 | /* skip over gzip header (1f,8b,08... 10 bytes total + | ||
91 | * possible asciz filename) | ||
92 | */ | ||
93 | strm->next_in = zbuf + 10; | ||
94 | /* skip over asciz filename */ | ||
95 | if (zbuf[3] & 0x8) { | ||
96 | while (strm->next_in[0]) | ||
97 | strm->next_in++; | ||
98 | strm->next_in++; | ||
99 | } | ||
100 | strm->avail_in = len - 10; | ||
101 | |||
102 | strm->next_out = out_buf; | ||
103 | strm->avail_out = out_len; | ||
104 | |||
105 | rc = zlib_inflateInit2(strm, -MAX_WBITS); | ||
106 | |||
107 | if (!flush) { | ||
108 | WS(strm)->inflate_state.wsize = 0; | ||
109 | WS(strm)->inflate_state.window = NULL; | ||
110 | } | ||
111 | |||
112 | while (rc == Z_OK) { | ||
113 | if (strm->avail_in == 0) { | ||
114 | /* TODO: handle case where both pos and fill are set */ | ||
115 | len = fill(zbuf, INBUF_LEN); | ||
116 | if (len < 0) { | ||
117 | rc = -1; | ||
118 | error("read error"); | ||
119 | break; | ||
120 | } | ||
121 | strm->next_in = zbuf; | ||
122 | strm->avail_in = len; | ||
123 | } | ||
124 | rc = zlib_inflate(strm, 0); | ||
125 | |||
126 | /* Write any data generated */ | ||
127 | if (flush && strm->next_out > out_buf) { | ||
128 | int l = strm->next_out - out_buf; | ||
129 | if (l != flush(out_buf, l)) { | ||
130 | rc = -1; | ||
131 | error("write error"); | ||
132 | break; | ||
133 | } | ||
134 | strm->next_out = out_buf; | ||
135 | strm->avail_out = out_len; | ||
136 | } | ||
137 | |||
138 | /* after Z_FINISH, only Z_STREAM_END is "we unpacked it all" */ | ||
139 | if (rc == Z_STREAM_END) { | ||
140 | rc = 0; | ||
141 | break; | ||
142 | } else if (rc != Z_OK) { | ||
143 | error("uncompression error"); | ||
144 | rc = -1; | ||
145 | } | ||
146 | } | ||
147 | |||
148 | zlib_inflateEnd(strm); | ||
149 | if (pos) | ||
150 | /* add + 8 to skip over trailer */ | ||
151 | *pos = strm->next_in - zbuf+8; | ||
152 | |||
153 | gunzip_5: | ||
154 | free(strm->workspace); | ||
155 | gunzip_nomem4: | ||
156 | free(strm); | ||
157 | gunzip_nomem3: | ||
158 | if (!buf) | ||
159 | free(zbuf); | ||
160 | gunzip_nomem2: | ||
161 | if (flush) | ||
162 | free(out_buf); | ||
163 | gunzip_nomem1: | ||
164 | return rc; /* returns Z_OK (0) if successful */ | ||
165 | } | ||
166 | |||
167 | #define decompress gunzip | ||
diff --git a/lib/decompress_unlzma.c b/lib/decompress_unlzma.c new file mode 100644 index 000000000000..546f2f4c157e --- /dev/null +++ b/lib/decompress_unlzma.c | |||
@@ -0,0 +1,647 @@ | |||
1 | /* Lzma decompressor for Linux kernel. Shamelessly snarfed | ||
2 | *from busybox 1.1.1 | ||
3 | * | ||
4 | *Linux kernel adaptation | ||
5 | *Copyright (C) 2006 Alain < alain@knaff.lu > | ||
6 | * | ||
7 | *Based on small lzma deflate implementation/Small range coder | ||
8 | *implementation for lzma. | ||
9 | *Copyright (C) 2006 Aurelien Jacobs < aurel@gnuage.org > | ||
10 | * | ||
11 | *Based on LzmaDecode.c from the LZMA SDK 4.22 (http://www.7-zip.org/) | ||
12 | *Copyright (C) 1999-2005 Igor Pavlov | ||
13 | * | ||
14 | *Copyrights of the parts, see headers below. | ||
15 | * | ||
16 | * | ||
17 | *This program is free software; you can redistribute it and/or | ||
18 | *modify it under the terms of the GNU Lesser General Public | ||
19 | *License as published by the Free Software Foundation; either | ||
20 | *version 2.1 of the License, or (at your option) any later version. | ||
21 | * | ||
22 | *This program is distributed in the hope that it will be useful, | ||
23 | *but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
24 | *MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | ||
25 | *Lesser General Public License for more details. | ||
26 | * | ||
27 | *You should have received a copy of the GNU Lesser General Public | ||
28 | *License along with this library; if not, write to the Free Software | ||
29 | *Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA | ||
30 | */ | ||
31 | |||
32 | #ifndef STATIC | ||
33 | #include <linux/decompress/unlzma.h> | ||
34 | #endif /* STATIC */ | ||
35 | |||
36 | #include <linux/decompress/mm.h> | ||
37 | |||
38 | #define MIN(a, b) (((a) < (b)) ? (a) : (b)) | ||
39 | |||
40 | static long long INIT read_int(unsigned char *ptr, int size) | ||
41 | { | ||
42 | int i; | ||
43 | long long ret = 0; | ||
44 | |||
45 | for (i = 0; i < size; i++) | ||
46 | ret = (ret << 8) | ptr[size-i-1]; | ||
47 | return ret; | ||
48 | } | ||
49 | |||
50 | #define ENDIAN_CONVERT(x) \ | ||
51 | x = (typeof(x))read_int((unsigned char *)&x, sizeof(x)) | ||
52 | |||
53 | |||
54 | /* Small range coder implementation for lzma. | ||
55 | *Copyright (C) 2006 Aurelien Jacobs < aurel@gnuage.org > | ||
56 | * | ||
57 | *Based on LzmaDecode.c from the LZMA SDK 4.22 (http://www.7-zip.org/) | ||
58 | *Copyright (c) 1999-2005 Igor Pavlov | ||
59 | */ | ||
60 | |||
61 | #include <linux/compiler.h> | ||
62 | |||
63 | #define LZMA_IOBUF_SIZE 0x10000 | ||
64 | |||
65 | struct rc { | ||
66 | int (*fill)(void*, unsigned int); | ||
67 | uint8_t *ptr; | ||
68 | uint8_t *buffer; | ||
69 | uint8_t *buffer_end; | ||
70 | int buffer_size; | ||
71 | uint32_t code; | ||
72 | uint32_t range; | ||
73 | uint32_t bound; | ||
74 | }; | ||
75 | |||
76 | |||
77 | #define RC_TOP_BITS 24 | ||
78 | #define RC_MOVE_BITS 5 | ||
79 | #define RC_MODEL_TOTAL_BITS 11 | ||
80 | |||
81 | |||
82 | /* Called twice: once at startup and once in rc_normalize() */ | ||
83 | static void INIT rc_read(struct rc *rc) | ||
84 | { | ||
85 | rc->buffer_size = rc->fill((char *)rc->buffer, LZMA_IOBUF_SIZE); | ||
86 | if (rc->buffer_size <= 0) | ||
87 | error("unexpected EOF"); | ||
88 | rc->ptr = rc->buffer; | ||
89 | rc->buffer_end = rc->buffer + rc->buffer_size; | ||
90 | } | ||
91 | |||
92 | /* Called once */ | ||
93 | static inline void INIT rc_init(struct rc *rc, | ||
94 | int (*fill)(void*, unsigned int), | ||
95 | char *buffer, int buffer_size) | ||
96 | { | ||
97 | rc->fill = fill; | ||
98 | rc->buffer = (uint8_t *)buffer; | ||
99 | rc->buffer_size = buffer_size; | ||
100 | rc->buffer_end = rc->buffer + rc->buffer_size; | ||
101 | rc->ptr = rc->buffer; | ||
102 | |||
103 | rc->code = 0; | ||
104 | rc->range = 0xFFFFFFFF; | ||
105 | } | ||
106 | |||
107 | static inline void INIT rc_init_code(struct rc *rc) | ||
108 | { | ||
109 | int i; | ||
110 | |||
111 | for (i = 0; i < 5; i++) { | ||
112 | if (rc->ptr >= rc->buffer_end) | ||
113 | rc_read(rc); | ||
114 | rc->code = (rc->code << 8) | *rc->ptr++; | ||
115 | } | ||
116 | } | ||
117 | |||
118 | |||
119 | /* Called once. TODO: bb_maybe_free() */ | ||
120 | static inline void INIT rc_free(struct rc *rc) | ||
121 | { | ||
122 | free(rc->buffer); | ||
123 | } | ||
124 | |||
125 | /* Called twice, but one callsite is in inline'd rc_is_bit_0_helper() */ | ||
126 | static void INIT rc_do_normalize(struct rc *rc) | ||
127 | { | ||
128 | if (rc->ptr >= rc->buffer_end) | ||
129 | rc_read(rc); | ||
130 | rc->range <<= 8; | ||
131 | rc->code = (rc->code << 8) | *rc->ptr++; | ||
132 | } | ||
133 | static inline void INIT rc_normalize(struct rc *rc) | ||
134 | { | ||
135 | if (rc->range < (1 << RC_TOP_BITS)) | ||
136 | rc_do_normalize(rc); | ||
137 | } | ||
138 | |||
139 | /* Called 9 times */ | ||
140 | /* Why rc_is_bit_0_helper exists? | ||
141 | *Because we want to always expose (rc->code < rc->bound) to optimizer | ||
142 | */ | ||
143 | static inline uint32_t INIT rc_is_bit_0_helper(struct rc *rc, uint16_t *p) | ||
144 | { | ||
145 | rc_normalize(rc); | ||
146 | rc->bound = *p * (rc->range >> RC_MODEL_TOTAL_BITS); | ||
147 | return rc->bound; | ||
148 | } | ||
149 | static inline int INIT rc_is_bit_0(struct rc *rc, uint16_t *p) | ||
150 | { | ||
151 | uint32_t t = rc_is_bit_0_helper(rc, p); | ||
152 | return rc->code < t; | ||
153 | } | ||
154 | |||
155 | /* Called ~10 times, but very small, thus inlined */ | ||
156 | static inline void INIT rc_update_bit_0(struct rc *rc, uint16_t *p) | ||
157 | { | ||
158 | rc->range = rc->bound; | ||
159 | *p += ((1 << RC_MODEL_TOTAL_BITS) - *p) >> RC_MOVE_BITS; | ||
160 | } | ||
161 | static inline void rc_update_bit_1(struct rc *rc, uint16_t *p) | ||
162 | { | ||
163 | rc->range -= rc->bound; | ||
164 | rc->code -= rc->bound; | ||
165 | *p -= *p >> RC_MOVE_BITS; | ||
166 | } | ||
167 | |||
168 | /* Called 4 times in unlzma loop */ | ||
169 | static int INIT rc_get_bit(struct rc *rc, uint16_t *p, int *symbol) | ||
170 | { | ||
171 | if (rc_is_bit_0(rc, p)) { | ||
172 | rc_update_bit_0(rc, p); | ||
173 | *symbol *= 2; | ||
174 | return 0; | ||
175 | } else { | ||
176 | rc_update_bit_1(rc, p); | ||
177 | *symbol = *symbol * 2 + 1; | ||
178 | return 1; | ||
179 | } | ||
180 | } | ||
181 | |||
182 | /* Called once */ | ||
183 | static inline int INIT rc_direct_bit(struct rc *rc) | ||
184 | { | ||
185 | rc_normalize(rc); | ||
186 | rc->range >>= 1; | ||
187 | if (rc->code >= rc->range) { | ||
188 | rc->code -= rc->range; | ||
189 | return 1; | ||
190 | } | ||
191 | return 0; | ||
192 | } | ||
193 | |||
194 | /* Called twice */ | ||
195 | static inline void INIT | ||
196 | rc_bit_tree_decode(struct rc *rc, uint16_t *p, int num_levels, int *symbol) | ||
197 | { | ||
198 | int i = num_levels; | ||
199 | |||
200 | *symbol = 1; | ||
201 | while (i--) | ||
202 | rc_get_bit(rc, p + *symbol, symbol); | ||
203 | *symbol -= 1 << num_levels; | ||
204 | } | ||
205 | |||
206 | |||
207 | /* | ||
208 | * Small lzma deflate implementation. | ||
209 | * Copyright (C) 2006 Aurelien Jacobs < aurel@gnuage.org > | ||
210 | * | ||
211 | * Based on LzmaDecode.c from the LZMA SDK 4.22 (http://www.7-zip.org/) | ||
212 | * Copyright (C) 1999-2005 Igor Pavlov | ||
213 | */ | ||
214 | |||
215 | |||
216 | struct lzma_header { | ||
217 | uint8_t pos; | ||
218 | uint32_t dict_size; | ||
219 | uint64_t dst_size; | ||
220 | } __attribute__ ((packed)) ; | ||
221 | |||
222 | |||
223 | #define LZMA_BASE_SIZE 1846 | ||
224 | #define LZMA_LIT_SIZE 768 | ||
225 | |||
226 | #define LZMA_NUM_POS_BITS_MAX 4 | ||
227 | |||
228 | #define LZMA_LEN_NUM_LOW_BITS 3 | ||
229 | #define LZMA_LEN_NUM_MID_BITS 3 | ||
230 | #define LZMA_LEN_NUM_HIGH_BITS 8 | ||
231 | |||
232 | #define LZMA_LEN_CHOICE 0 | ||
233 | #define LZMA_LEN_CHOICE_2 (LZMA_LEN_CHOICE + 1) | ||
234 | #define LZMA_LEN_LOW (LZMA_LEN_CHOICE_2 + 1) | ||
235 | #define LZMA_LEN_MID (LZMA_LEN_LOW \ | ||
236 | + (1 << (LZMA_NUM_POS_BITS_MAX + LZMA_LEN_NUM_LOW_BITS))) | ||
237 | #define LZMA_LEN_HIGH (LZMA_LEN_MID \ | ||
238 | +(1 << (LZMA_NUM_POS_BITS_MAX + LZMA_LEN_NUM_MID_BITS))) | ||
239 | #define LZMA_NUM_LEN_PROBS (LZMA_LEN_HIGH + (1 << LZMA_LEN_NUM_HIGH_BITS)) | ||
240 | |||
241 | #define LZMA_NUM_STATES 12 | ||
242 | #define LZMA_NUM_LIT_STATES 7 | ||
243 | |||
244 | #define LZMA_START_POS_MODEL_INDEX 4 | ||
245 | #define LZMA_END_POS_MODEL_INDEX 14 | ||
246 | #define LZMA_NUM_FULL_DISTANCES (1 << (LZMA_END_POS_MODEL_INDEX >> 1)) | ||
247 | |||
248 | #define LZMA_NUM_POS_SLOT_BITS 6 | ||
249 | #define LZMA_NUM_LEN_TO_POS_STATES 4 | ||
250 | |||
251 | #define LZMA_NUM_ALIGN_BITS 4 | ||
252 | |||
253 | #define LZMA_MATCH_MIN_LEN 2 | ||
254 | |||
255 | #define LZMA_IS_MATCH 0 | ||
256 | #define LZMA_IS_REP (LZMA_IS_MATCH + (LZMA_NUM_STATES << LZMA_NUM_POS_BITS_MAX)) | ||
257 | #define LZMA_IS_REP_G0 (LZMA_IS_REP + LZMA_NUM_STATES) | ||
258 | #define LZMA_IS_REP_G1 (LZMA_IS_REP_G0 + LZMA_NUM_STATES) | ||
259 | #define LZMA_IS_REP_G2 (LZMA_IS_REP_G1 + LZMA_NUM_STATES) | ||
260 | #define LZMA_IS_REP_0_LONG (LZMA_IS_REP_G2 + LZMA_NUM_STATES) | ||
261 | #define LZMA_POS_SLOT (LZMA_IS_REP_0_LONG \ | ||
262 | + (LZMA_NUM_STATES << LZMA_NUM_POS_BITS_MAX)) | ||
263 | #define LZMA_SPEC_POS (LZMA_POS_SLOT \ | ||
264 | +(LZMA_NUM_LEN_TO_POS_STATES << LZMA_NUM_POS_SLOT_BITS)) | ||
265 | #define LZMA_ALIGN (LZMA_SPEC_POS \ | ||
266 | + LZMA_NUM_FULL_DISTANCES - LZMA_END_POS_MODEL_INDEX) | ||
267 | #define LZMA_LEN_CODER (LZMA_ALIGN + (1 << LZMA_NUM_ALIGN_BITS)) | ||
268 | #define LZMA_REP_LEN_CODER (LZMA_LEN_CODER + LZMA_NUM_LEN_PROBS) | ||
269 | #define LZMA_LITERAL (LZMA_REP_LEN_CODER + LZMA_NUM_LEN_PROBS) | ||
270 | |||
271 | |||
272 | struct writer { | ||
273 | uint8_t *buffer; | ||
274 | uint8_t previous_byte; | ||
275 | size_t buffer_pos; | ||
276 | int bufsize; | ||
277 | size_t global_pos; | ||
278 | int(*flush)(void*, unsigned int); | ||
279 | struct lzma_header *header; | ||
280 | }; | ||
281 | |||
282 | struct cstate { | ||
283 | int state; | ||
284 | uint32_t rep0, rep1, rep2, rep3; | ||
285 | }; | ||
286 | |||
287 | static inline size_t INIT get_pos(struct writer *wr) | ||
288 | { | ||
289 | return | ||
290 | wr->global_pos + wr->buffer_pos; | ||
291 | } | ||
292 | |||
293 | static inline uint8_t INIT peek_old_byte(struct writer *wr, | ||
294 | uint32_t offs) | ||
295 | { | ||
296 | if (!wr->flush) { | ||
297 | int32_t pos; | ||
298 | while (offs > wr->header->dict_size) | ||
299 | offs -= wr->header->dict_size; | ||
300 | pos = wr->buffer_pos - offs; | ||
301 | return wr->buffer[pos]; | ||
302 | } else { | ||
303 | uint32_t pos = wr->buffer_pos - offs; | ||
304 | while (pos >= wr->header->dict_size) | ||
305 | pos += wr->header->dict_size; | ||
306 | return wr->buffer[pos]; | ||
307 | } | ||
308 | |||
309 | } | ||
310 | |||
311 | static inline void INIT write_byte(struct writer *wr, uint8_t byte) | ||
312 | { | ||
313 | wr->buffer[wr->buffer_pos++] = wr->previous_byte = byte; | ||
314 | if (wr->flush && wr->buffer_pos == wr->header->dict_size) { | ||
315 | wr->buffer_pos = 0; | ||
316 | wr->global_pos += wr->header->dict_size; | ||
317 | wr->flush((char *)wr->buffer, wr->header->dict_size); | ||
318 | } | ||
319 | } | ||
320 | |||
321 | |||
322 | static inline void INIT copy_byte(struct writer *wr, uint32_t offs) | ||
323 | { | ||
324 | write_byte(wr, peek_old_byte(wr, offs)); | ||
325 | } | ||
326 | |||
327 | static inline void INIT copy_bytes(struct writer *wr, | ||
328 | uint32_t rep0, int len) | ||
329 | { | ||
330 | do { | ||
331 | copy_byte(wr, rep0); | ||
332 | len--; | ||
333 | } while (len != 0 && wr->buffer_pos < wr->header->dst_size); | ||
334 | } | ||
335 | |||
336 | static inline void INIT process_bit0(struct writer *wr, struct rc *rc, | ||
337 | struct cstate *cst, uint16_t *p, | ||
338 | int pos_state, uint16_t *prob, | ||
339 | int lc, uint32_t literal_pos_mask) { | ||
340 | int mi = 1; | ||
341 | rc_update_bit_0(rc, prob); | ||
342 | prob = (p + LZMA_LITERAL + | ||
343 | (LZMA_LIT_SIZE | ||
344 | * (((get_pos(wr) & literal_pos_mask) << lc) | ||
345 | + (wr->previous_byte >> (8 - lc)))) | ||
346 | ); | ||
347 | |||
348 | if (cst->state >= LZMA_NUM_LIT_STATES) { | ||
349 | int match_byte = peek_old_byte(wr, cst->rep0); | ||
350 | do { | ||
351 | int bit; | ||
352 | uint16_t *prob_lit; | ||
353 | |||
354 | match_byte <<= 1; | ||
355 | bit = match_byte & 0x100; | ||
356 | prob_lit = prob + 0x100 + bit + mi; | ||
357 | if (rc_get_bit(rc, prob_lit, &mi)) { | ||
358 | if (!bit) | ||
359 | break; | ||
360 | } else { | ||
361 | if (bit) | ||
362 | break; | ||
363 | } | ||
364 | } while (mi < 0x100); | ||
365 | } | ||
366 | while (mi < 0x100) { | ||
367 | uint16_t *prob_lit = prob + mi; | ||
368 | rc_get_bit(rc, prob_lit, &mi); | ||
369 | } | ||
370 | write_byte(wr, mi); | ||
371 | if (cst->state < 4) | ||
372 | cst->state = 0; | ||
373 | else if (cst->state < 10) | ||
374 | cst->state -= 3; | ||
375 | else | ||
376 | cst->state -= 6; | ||
377 | } | ||
378 | |||
379 | static inline void INIT process_bit1(struct writer *wr, struct rc *rc, | ||
380 | struct cstate *cst, uint16_t *p, | ||
381 | int pos_state, uint16_t *prob) { | ||
382 | int offset; | ||
383 | uint16_t *prob_len; | ||
384 | int num_bits; | ||
385 | int len; | ||
386 | |||
387 | rc_update_bit_1(rc, prob); | ||
388 | prob = p + LZMA_IS_REP + cst->state; | ||
389 | if (rc_is_bit_0(rc, prob)) { | ||
390 | rc_update_bit_0(rc, prob); | ||
391 | cst->rep3 = cst->rep2; | ||
392 | cst->rep2 = cst->rep1; | ||
393 | cst->rep1 = cst->rep0; | ||
394 | cst->state = cst->state < LZMA_NUM_LIT_STATES ? 0 : 3; | ||
395 | prob = p + LZMA_LEN_CODER; | ||
396 | } else { | ||
397 | rc_update_bit_1(rc, prob); | ||
398 | prob = p + LZMA_IS_REP_G0 + cst->state; | ||
399 | if (rc_is_bit_0(rc, prob)) { | ||
400 | rc_update_bit_0(rc, prob); | ||
401 | prob = (p + LZMA_IS_REP_0_LONG | ||
402 | + (cst->state << | ||
403 | LZMA_NUM_POS_BITS_MAX) + | ||
404 | pos_state); | ||
405 | if (rc_is_bit_0(rc, prob)) { | ||
406 | rc_update_bit_0(rc, prob); | ||
407 | |||
408 | cst->state = cst->state < LZMA_NUM_LIT_STATES ? | ||
409 | 9 : 11; | ||
410 | copy_byte(wr, cst->rep0); | ||
411 | return; | ||
412 | } else { | ||
413 | rc_update_bit_1(rc, prob); | ||
414 | } | ||
415 | } else { | ||
416 | uint32_t distance; | ||
417 | |||
418 | rc_update_bit_1(rc, prob); | ||
419 | prob = p + LZMA_IS_REP_G1 + cst->state; | ||
420 | if (rc_is_bit_0(rc, prob)) { | ||
421 | rc_update_bit_0(rc, prob); | ||
422 | distance = cst->rep1; | ||
423 | } else { | ||
424 | rc_update_bit_1(rc, prob); | ||
425 | prob = p + LZMA_IS_REP_G2 + cst->state; | ||
426 | if (rc_is_bit_0(rc, prob)) { | ||
427 | rc_update_bit_0(rc, prob); | ||
428 | distance = cst->rep2; | ||
429 | } else { | ||
430 | rc_update_bit_1(rc, prob); | ||
431 | distance = cst->rep3; | ||
432 | cst->rep3 = cst->rep2; | ||
433 | } | ||
434 | cst->rep2 = cst->rep1; | ||
435 | } | ||
436 | cst->rep1 = cst->rep0; | ||
437 | cst->rep0 = distance; | ||
438 | } | ||
439 | cst->state = cst->state < LZMA_NUM_LIT_STATES ? 8 : 11; | ||
440 | prob = p + LZMA_REP_LEN_CODER; | ||
441 | } | ||
442 | |||
443 | prob_len = prob + LZMA_LEN_CHOICE; | ||
444 | if (rc_is_bit_0(rc, prob_len)) { | ||
445 | rc_update_bit_0(rc, prob_len); | ||
446 | prob_len = (prob + LZMA_LEN_LOW | ||
447 | + (pos_state << | ||
448 | LZMA_LEN_NUM_LOW_BITS)); | ||
449 | offset = 0; | ||
450 | num_bits = LZMA_LEN_NUM_LOW_BITS; | ||
451 | } else { | ||
452 | rc_update_bit_1(rc, prob_len); | ||
453 | prob_len = prob + LZMA_LEN_CHOICE_2; | ||
454 | if (rc_is_bit_0(rc, prob_len)) { | ||
455 | rc_update_bit_0(rc, prob_len); | ||
456 | prob_len = (prob + LZMA_LEN_MID | ||
457 | + (pos_state << | ||
458 | LZMA_LEN_NUM_MID_BITS)); | ||
459 | offset = 1 << LZMA_LEN_NUM_LOW_BITS; | ||
460 | num_bits = LZMA_LEN_NUM_MID_BITS; | ||
461 | } else { | ||
462 | rc_update_bit_1(rc, prob_len); | ||
463 | prob_len = prob + LZMA_LEN_HIGH; | ||
464 | offset = ((1 << LZMA_LEN_NUM_LOW_BITS) | ||
465 | + (1 << LZMA_LEN_NUM_MID_BITS)); | ||
466 | num_bits = LZMA_LEN_NUM_HIGH_BITS; | ||
467 | } | ||
468 | } | ||
469 | |||
470 | rc_bit_tree_decode(rc, prob_len, num_bits, &len); | ||
471 | len += offset; | ||
472 | |||
473 | if (cst->state < 4) { | ||
474 | int pos_slot; | ||
475 | |||
476 | cst->state += LZMA_NUM_LIT_STATES; | ||
477 | prob = | ||
478 | p + LZMA_POS_SLOT + | ||
479 | ((len < | ||
480 | LZMA_NUM_LEN_TO_POS_STATES ? len : | ||
481 | LZMA_NUM_LEN_TO_POS_STATES - 1) | ||
482 | << LZMA_NUM_POS_SLOT_BITS); | ||
483 | rc_bit_tree_decode(rc, prob, | ||
484 | LZMA_NUM_POS_SLOT_BITS, | ||
485 | &pos_slot); | ||
486 | if (pos_slot >= LZMA_START_POS_MODEL_INDEX) { | ||
487 | int i, mi; | ||
488 | num_bits = (pos_slot >> 1) - 1; | ||
489 | cst->rep0 = 2 | (pos_slot & 1); | ||
490 | if (pos_slot < LZMA_END_POS_MODEL_INDEX) { | ||
491 | cst->rep0 <<= num_bits; | ||
492 | prob = p + LZMA_SPEC_POS + | ||
493 | cst->rep0 - pos_slot - 1; | ||
494 | } else { | ||
495 | num_bits -= LZMA_NUM_ALIGN_BITS; | ||
496 | while (num_bits--) | ||
497 | cst->rep0 = (cst->rep0 << 1) | | ||
498 | rc_direct_bit(rc); | ||
499 | prob = p + LZMA_ALIGN; | ||
500 | cst->rep0 <<= LZMA_NUM_ALIGN_BITS; | ||
501 | num_bits = LZMA_NUM_ALIGN_BITS; | ||
502 | } | ||
503 | i = 1; | ||
504 | mi = 1; | ||
505 | while (num_bits--) { | ||
506 | if (rc_get_bit(rc, prob + mi, &mi)) | ||
507 | cst->rep0 |= i; | ||
508 | i <<= 1; | ||
509 | } | ||
510 | } else | ||
511 | cst->rep0 = pos_slot; | ||
512 | if (++(cst->rep0) == 0) | ||
513 | return; | ||
514 | } | ||
515 | |||
516 | len += LZMA_MATCH_MIN_LEN; | ||
517 | |||
518 | copy_bytes(wr, cst->rep0, len); | ||
519 | } | ||
520 | |||
521 | |||
522 | |||
523 | STATIC inline int INIT unlzma(unsigned char *buf, int in_len, | ||
524 | int(*fill)(void*, unsigned int), | ||
525 | int(*flush)(void*, unsigned int), | ||
526 | unsigned char *output, | ||
527 | int *posp, | ||
528 | void(*error_fn)(char *x) | ||
529 | ) | ||
530 | { | ||
531 | struct lzma_header header; | ||
532 | int lc, pb, lp; | ||
533 | uint32_t pos_state_mask; | ||
534 | uint32_t literal_pos_mask; | ||
535 | uint16_t *p; | ||
536 | int num_probs; | ||
537 | struct rc rc; | ||
538 | int i, mi; | ||
539 | struct writer wr; | ||
540 | struct cstate cst; | ||
541 | unsigned char *inbuf; | ||
542 | int ret = -1; | ||
543 | |||
544 | set_error_fn(error_fn); | ||
545 | if (!flush) | ||
546 | in_len -= 4; /* Uncompressed size hack active in pre-boot | ||
547 | environment */ | ||
548 | if (buf) | ||
549 | inbuf = buf; | ||
550 | else | ||
551 | inbuf = malloc(LZMA_IOBUF_SIZE); | ||
552 | if (!inbuf) { | ||
553 | error("Could not allocate input bufer"); | ||
554 | goto exit_0; | ||
555 | } | ||
556 | |||
557 | cst.state = 0; | ||
558 | cst.rep0 = cst.rep1 = cst.rep2 = cst.rep3 = 1; | ||
559 | |||
560 | wr.header = &header; | ||
561 | wr.flush = flush; | ||
562 | wr.global_pos = 0; | ||
563 | wr.previous_byte = 0; | ||
564 | wr.buffer_pos = 0; | ||
565 | |||
566 | rc_init(&rc, fill, inbuf, in_len); | ||
567 | |||
568 | for (i = 0; i < sizeof(header); i++) { | ||
569 | if (rc.ptr >= rc.buffer_end) | ||
570 | rc_read(&rc); | ||
571 | ((unsigned char *)&header)[i] = *rc.ptr++; | ||
572 | } | ||
573 | |||
574 | if (header.pos >= (9 * 5 * 5)) | ||
575 | error("bad header"); | ||
576 | |||
577 | mi = 0; | ||
578 | lc = header.pos; | ||
579 | while (lc >= 9) { | ||
580 | mi++; | ||
581 | lc -= 9; | ||
582 | } | ||
583 | pb = 0; | ||
584 | lp = mi; | ||
585 | while (lp >= 5) { | ||
586 | pb++; | ||
587 | lp -= 5; | ||
588 | } | ||
589 | pos_state_mask = (1 << pb) - 1; | ||
590 | literal_pos_mask = (1 << lp) - 1; | ||
591 | |||
592 | ENDIAN_CONVERT(header.dict_size); | ||
593 | ENDIAN_CONVERT(header.dst_size); | ||
594 | |||
595 | if (header.dict_size == 0) | ||
596 | header.dict_size = 1; | ||
597 | |||
598 | if (output) | ||
599 | wr.buffer = output; | ||
600 | else { | ||
601 | wr.bufsize = MIN(header.dst_size, header.dict_size); | ||
602 | wr.buffer = large_malloc(wr.bufsize); | ||
603 | } | ||
604 | if (wr.buffer == NULL) | ||
605 | goto exit_1; | ||
606 | |||
607 | num_probs = LZMA_BASE_SIZE + (LZMA_LIT_SIZE << (lc + lp)); | ||
608 | p = (uint16_t *) large_malloc(num_probs * sizeof(*p)); | ||
609 | if (p == 0) | ||
610 | goto exit_2; | ||
611 | num_probs = LZMA_LITERAL + (LZMA_LIT_SIZE << (lc + lp)); | ||
612 | for (i = 0; i < num_probs; i++) | ||
613 | p[i] = (1 << RC_MODEL_TOTAL_BITS) >> 1; | ||
614 | |||
615 | rc_init_code(&rc); | ||
616 | |||
617 | while (get_pos(&wr) < header.dst_size) { | ||
618 | int pos_state = get_pos(&wr) & pos_state_mask; | ||
619 | uint16_t *prob = p + LZMA_IS_MATCH + | ||
620 | (cst.state << LZMA_NUM_POS_BITS_MAX) + pos_state; | ||
621 | if (rc_is_bit_0(&rc, prob)) | ||
622 | process_bit0(&wr, &rc, &cst, p, pos_state, prob, | ||
623 | lc, literal_pos_mask); | ||
624 | else { | ||
625 | process_bit1(&wr, &rc, &cst, p, pos_state, prob); | ||
626 | if (cst.rep0 == 0) | ||
627 | break; | ||
628 | } | ||
629 | } | ||
630 | |||
631 | if (posp) | ||
632 | *posp = rc.ptr-rc.buffer; | ||
633 | if (wr.flush) | ||
634 | wr.flush(wr.buffer, wr.buffer_pos); | ||
635 | ret = 0; | ||
636 | large_free(p); | ||
637 | exit_2: | ||
638 | if (!output) | ||
639 | large_free(wr.buffer); | ||
640 | exit_1: | ||
641 | if (!buf) | ||
642 | free(inbuf); | ||
643 | exit_0: | ||
644 | return ret; | ||
645 | } | ||
646 | |||
647 | #define decompress unlzma | ||
diff --git a/lib/zlib_inflate/inflate.h b/lib/zlib_inflate/inflate.h index df8a6c92052d..3d17b3d1b21f 100644 --- a/lib/zlib_inflate/inflate.h +++ b/lib/zlib_inflate/inflate.h | |||
@@ -1,3 +1,6 @@ | |||
1 | #ifndef INFLATE_H | ||
2 | #define INFLATE_H | ||
3 | |||
1 | /* inflate.h -- internal inflate state definition | 4 | /* inflate.h -- internal inflate state definition |
2 | * Copyright (C) 1995-2004 Mark Adler | 5 | * Copyright (C) 1995-2004 Mark Adler |
3 | * For conditions of distribution and use, see copyright notice in zlib.h | 6 | * For conditions of distribution and use, see copyright notice in zlib.h |
@@ -105,3 +108,4 @@ struct inflate_state { | |||
105 | unsigned short work[288]; /* work area for code table building */ | 108 | unsigned short work[288]; /* work area for code table building */ |
106 | code codes[ENOUGH]; /* space for code tables */ | 109 | code codes[ENOUGH]; /* space for code tables */ |
107 | }; | 110 | }; |
111 | #endif | ||
diff --git a/lib/zlib_inflate/inftrees.h b/lib/zlib_inflate/inftrees.h index 5f5219b1240e..b70b4731ac7a 100644 --- a/lib/zlib_inflate/inftrees.h +++ b/lib/zlib_inflate/inftrees.h | |||
@@ -1,3 +1,6 @@ | |||
1 | #ifndef INFTREES_H | ||
2 | #define INFTREES_H | ||
3 | |||
1 | /* inftrees.h -- header to use inftrees.c | 4 | /* inftrees.h -- header to use inftrees.c |
2 | * Copyright (C) 1995-2005 Mark Adler | 5 | * Copyright (C) 1995-2005 Mark Adler |
3 | * For conditions of distribution and use, see copyright notice in zlib.h | 6 | * For conditions of distribution and use, see copyright notice in zlib.h |
@@ -53,3 +56,4 @@ typedef enum { | |||
53 | extern int zlib_inflate_table (codetype type, unsigned short *lens, | 56 | extern int zlib_inflate_table (codetype type, unsigned short *lens, |
54 | unsigned codes, code **table, | 57 | unsigned codes, code **table, |
55 | unsigned *bits, unsigned short *work); | 58 | unsigned *bits, unsigned short *work); |
59 | #endif | ||
diff --git a/scripts/Makefile.lib b/scripts/Makefile.lib index e06365775bdf..70b4676e3b99 100644 --- a/scripts/Makefile.lib +++ b/scripts/Makefile.lib | |||
@@ -186,3 +186,17 @@ quiet_cmd_gzip = GZIP $@ | |||
186 | cmd_gzip = gzip -f -9 < $< > $@ | 186 | cmd_gzip = gzip -f -9 < $< > $@ |
187 | 187 | ||
188 | 188 | ||
189 | # Bzip2 | ||
190 | # --------------------------------------------------------------------------- | ||
191 | |||
192 | # Bzip2 does not include size in file... so we have to fake that | ||
193 | size_append=$(CONFIG_SHELL) $(srctree)/scripts/bin_size | ||
194 | |||
195 | quiet_cmd_bzip2 = BZIP2 $@ | ||
196 | cmd_bzip2 = (bzip2 -9 < $< ; $(size_append) $<) > $@ || (rm -f $@ ; false) | ||
197 | |||
198 | # Lzma | ||
199 | # --------------------------------------------------------------------------- | ||
200 | |||
201 | quiet_cmd_lzma = LZMA $@ | ||
202 | cmd_lzma = (lzma -9 -c $< ; $(size_append) $<) >$@ || (rm -f $@ ; false) | ||
diff --git a/scripts/bin_size b/scripts/bin_size new file mode 100644 index 000000000000..43e1b360cee6 --- /dev/null +++ b/scripts/bin_size | |||
@@ -0,0 +1,10 @@ | |||
1 | #!/bin/sh | ||
2 | |||
3 | if [ $# = 0 ] ; then | ||
4 | echo Usage: $0 file | ||
5 | fi | ||
6 | |||
7 | size_dec=`stat -c "%s" $1` | ||
8 | size_hex_echo_string=`printf "%08x" $size_dec | | ||
9 | sed 's/\(..\)\(..\)\(..\)\(..\)/\\\\x\4\\\\x\3\\\\x\2\\\\x\1/g'` | ||
10 | /bin/echo -ne $size_hex_echo_string | ||