diff options
author | Linus Torvalds <torvalds@ppc970.osdl.org> | 2005-04-16 18:20:36 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@ppc970.osdl.org> | 2005-04-16 18:20:36 -0400 |
commit | 1da177e4c3f41524e886b7f1b8a0c1fc7321cac2 (patch) | |
tree | 0bba044c4ce775e45a88a51686b5d9f90697ea9d /lib |
Linux-2.6.12-rc2v2.6.12-rc2
Initial git repository build. I'm not bothering with the full history,
even though we have it. We can create a separate "historical" git
archive of that later if we want to, and in the meantime it's about
3.2GB when imported into git - space that would just make the early
git days unnecessarily complicated, when we don't have a lot of good
infrastructure for it.
Let it rip!
Diffstat (limited to 'lib')
60 files changed, 15017 insertions, 0 deletions
diff --git a/lib/Kconfig b/lib/Kconfig new file mode 100644 index 000000000000..eeb45225248f --- /dev/null +++ b/lib/Kconfig | |||
@@ -0,0 +1,61 @@ | |||
1 | # | ||
2 | # Library configuration | ||
3 | # | ||
4 | |||
5 | menu "Library routines" | ||
6 | |||
7 | config CRC_CCITT | ||
8 | tristate "CRC-CCITT functions" | ||
9 | help | ||
10 | This option is provided for the case where no in-kernel-tree | ||
11 | modules require CRC-CCITT functions, but a module built outside | ||
12 | the kernel tree does. Such modules that use library CRC-CCITT | ||
13 | functions require M here. | ||
14 | |||
15 | config CRC32 | ||
16 | tristate "CRC32 functions" | ||
17 | default y | ||
18 | help | ||
19 | This option is provided for the case where no in-kernel-tree | ||
20 | modules require CRC32 functions, but a module built outside the | ||
21 | kernel tree does. Such modules that use library CRC32 functions | ||
22 | require M here. | ||
23 | |||
24 | config LIBCRC32C | ||
25 | tristate "CRC32c (Castagnoli, et al) Cyclic Redundancy-Check" | ||
26 | help | ||
27 | This option is provided for the case where no in-kernel-tree | ||
28 | modules require CRC32c functions, but a module built outside the | ||
29 | kernel tree does. Such modules that use library CRC32c functions | ||
30 | require M here. See Castagnoli93. | ||
31 | Module will be libcrc32c. | ||
32 | |||
33 | # | ||
34 | # compression support is select'ed if needed | ||
35 | # | ||
36 | config ZLIB_INFLATE | ||
37 | tristate | ||
38 | |||
39 | config ZLIB_DEFLATE | ||
40 | tristate | ||
41 | |||
42 | # | ||
43 | # reed solomon support is select'ed if needed | ||
44 | # | ||
45 | config REED_SOLOMON | ||
46 | tristate | ||
47 | |||
48 | config REED_SOLOMON_ENC8 | ||
49 | boolean | ||
50 | |||
51 | config REED_SOLOMON_DEC8 | ||
52 | boolean | ||
53 | |||
54 | config REED_SOLOMON_ENC16 | ||
55 | boolean | ||
56 | |||
57 | config REED_SOLOMON_DEC16 | ||
58 | boolean | ||
59 | |||
60 | endmenu | ||
61 | |||
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug new file mode 100644 index 000000000000..426a0cf7b11c --- /dev/null +++ b/lib/Kconfig.debug | |||
@@ -0,0 +1,159 @@ | |||
1 | |||
2 | config PRINTK_TIME | ||
3 | bool "Show timing information on printks" | ||
4 | help | ||
5 | Selecting this option causes timing information to be | ||
6 | included in printk output. This allows you to measure | ||
7 | the interval between kernel operations, including bootup | ||
8 | operations. This is useful for identifying long delays | ||
9 | in kernel startup. | ||
10 | |||
11 | |||
12 | config DEBUG_KERNEL | ||
13 | bool "Kernel debugging" | ||
14 | help | ||
15 | Say Y here if you are developing drivers or trying to debug and | ||
16 | identify kernel problems. | ||
17 | |||
18 | config MAGIC_SYSRQ | ||
19 | bool "Magic SysRq key" | ||
20 | depends on DEBUG_KERNEL && !UML | ||
21 | help | ||
22 | If you say Y here, you will have some control over the system even | ||
23 | if the system crashes for example during kernel debugging (e.g., you | ||
24 | will be able to flush the buffer cache to disk, reboot the system | ||
25 | immediately or dump some status information). This is accomplished | ||
26 | by pressing various keys while holding SysRq (Alt+PrintScreen). It | ||
27 | also works on a serial console (on PC hardware at least), if you | ||
28 | send a BREAK and then within 5 seconds a command keypress. The | ||
29 | keys are documented in <file:Documentation/sysrq.txt>. Don't say Y | ||
30 | unless you really know what this hack does. | ||
31 | |||
32 | config LOG_BUF_SHIFT | ||
33 | int "Kernel log buffer size (16 => 64KB, 17 => 128KB)" if DEBUG_KERNEL | ||
34 | range 12 21 | ||
35 | default 17 if ARCH_S390 | ||
36 | default 16 if X86_NUMAQ || IA64 | ||
37 | default 15 if SMP | ||
38 | default 14 | ||
39 | help | ||
40 | Select kernel log buffer size as a power of 2. | ||
41 | Defaults and Examples: | ||
42 | 17 => 128 KB for S/390 | ||
43 | 16 => 64 KB for x86 NUMAQ or IA-64 | ||
44 | 15 => 32 KB for SMP | ||
45 | 14 => 16 KB for uniprocessor | ||
46 | 13 => 8 KB | ||
47 | 12 => 4 KB | ||
48 | |||
49 | config SCHEDSTATS | ||
50 | bool "Collect scheduler statistics" | ||
51 | depends on DEBUG_KERNEL && PROC_FS | ||
52 | help | ||
53 | If you say Y here, additional code will be inserted into the | ||
54 | scheduler and related routines to collect statistics about | ||
55 | scheduler behavior and provide them in /proc/schedstat. These | ||
56 | stats may be useful for both tuning and debugging the scheduler | ||
57 | If you aren't debugging the scheduler or trying to tune a specific | ||
58 | application, you can say N to avoid the very slight overhead | ||
59 | this adds. | ||
60 | |||
61 | config DEBUG_SLAB | ||
62 | bool "Debug memory allocations" | ||
63 | depends on DEBUG_KERNEL | ||
64 | help | ||
65 | Say Y here to have the kernel do limited verification on memory | ||
66 | allocation as well as poisoning memory on free to catch use of freed | ||
67 | memory. This can make kmalloc/kfree-intensive workloads much slower. | ||
68 | |||
69 | config DEBUG_PREEMPT | ||
70 | bool "Debug preemptible kernel" | ||
71 | depends on DEBUG_KERNEL && PREEMPT | ||
72 | default y | ||
73 | help | ||
74 | If you say Y here then the kernel will use a debug variant of the | ||
75 | commonly used smp_processor_id() function and will print warnings | ||
76 | if kernel code uses it in a preemption-unsafe way. Also, the kernel | ||
77 | will detect preemption count underflows. | ||
78 | |||
79 | config DEBUG_SPINLOCK | ||
80 | bool "Spinlock debugging" | ||
81 | depends on DEBUG_KERNEL | ||
82 | help | ||
83 | Say Y here and build SMP to catch missing spinlock initialization | ||
84 | and certain other kinds of spinlock errors commonly made. This is | ||
85 | best used in conjunction with the NMI watchdog so that spinlock | ||
86 | deadlocks are also debuggable. | ||
87 | |||
88 | config DEBUG_SPINLOCK_SLEEP | ||
89 | bool "Sleep-inside-spinlock checking" | ||
90 | depends on DEBUG_KERNEL | ||
91 | help | ||
92 | If you say Y here, various routines which may sleep will become very | ||
93 | noisy if they are called with a spinlock held. | ||
94 | |||
95 | config DEBUG_KOBJECT | ||
96 | bool "kobject debugging" | ||
97 | depends on DEBUG_KERNEL | ||
98 | help | ||
99 | If you say Y here, some extra kobject debugging messages will be sent | ||
100 | to the syslog. | ||
101 | |||
102 | config DEBUG_HIGHMEM | ||
103 | bool "Highmem debugging" | ||
104 | depends on DEBUG_KERNEL && HIGHMEM | ||
105 | help | ||
106 | This options enables addition error checking for high memory systems. | ||
107 | Disable for production systems. | ||
108 | |||
109 | config DEBUG_BUGVERBOSE | ||
110 | bool "Verbose BUG() reporting (adds 70K)" if DEBUG_KERNEL && EMBEDDED | ||
111 | depends on ARM || ARM26 || M32R || M68K || SPARC32 || SPARC64 || (X86 && !X86_64) || FRV | ||
112 | default !EMBEDDED | ||
113 | help | ||
114 | Say Y here to make BUG() panics output the file name and line number | ||
115 | of the BUG call as well as the EIP and oops trace. This aids | ||
116 | debugging but costs about 70-100K of memory. | ||
117 | |||
118 | config DEBUG_INFO | ||
119 | bool "Compile the kernel with debug info" | ||
120 | depends on DEBUG_KERNEL | ||
121 | help | ||
122 | If you say Y here the resulting kernel image will include | ||
123 | debugging info resulting in a larger kernel image. | ||
124 | Say Y here only if you plan to debug the kernel. | ||
125 | |||
126 | If unsure, say N. | ||
127 | |||
128 | config DEBUG_IOREMAP | ||
129 | bool "Enable ioremap() debugging" | ||
130 | depends on DEBUG_KERNEL && PARISC | ||
131 | help | ||
132 | Enabling this option will cause the kernel to distinguish between | ||
133 | ioremapped and physical addresses. It will print a backtrace (at | ||
134 | most one every 10 seconds), hopefully allowing you to see which | ||
135 | drivers need work. Fixing all these problems is a prerequisite | ||
136 | for turning on USE_HPPA_IOREMAP. The warnings are harmless; | ||
137 | the kernel has enough information to fix the broken drivers | ||
138 | automatically, but we'd like to make it more efficient by not | ||
139 | having to do that. | ||
140 | |||
141 | config DEBUG_FS | ||
142 | bool "Debug Filesystem" | ||
143 | depends on DEBUG_KERNEL | ||
144 | help | ||
145 | debugfs is a virtual file system that kernel developers use to put | ||
146 | debugging files into. Enable this option to be able to read and | ||
147 | write to these files. | ||
148 | |||
149 | If unsure, say N. | ||
150 | |||
151 | config FRAME_POINTER | ||
152 | bool "Compile the kernel with frame pointers" | ||
153 | depends on DEBUG_KERNEL && ((X86 && !X86_64) || CRIS || M68K || M68KNOMMU || FRV) | ||
154 | help | ||
155 | If you say Y here the resulting kernel image will be slightly larger | ||
156 | and slower, but it will give very useful debugging information. | ||
157 | If you don't debug the kernel, you can say N, but we may not be able | ||
158 | to solve problems without frame pointers. | ||
159 | |||
diff --git a/lib/Makefile b/lib/Makefile new file mode 100644 index 000000000000..7c70db79c0e0 --- /dev/null +++ b/lib/Makefile | |||
@@ -0,0 +1,45 @@ | |||
1 | # | ||
2 | # Makefile for some libs needed in the kernel. | ||
3 | # | ||
4 | |||
5 | lib-y := errno.o ctype.o string.o vsprintf.o cmdline.o \ | ||
6 | bust_spinlocks.o rbtree.o radix-tree.o dump_stack.o \ | ||
7 | kobject.o kref.o idr.o div64.o int_sqrt.o \ | ||
8 | bitmap.o extable.o kobject_uevent.o prio_tree.o sha1.o \ | ||
9 | halfmd4.o | ||
10 | |||
11 | obj-y += sort.o parser.o | ||
12 | |||
13 | ifeq ($(CONFIG_DEBUG_KOBJECT),y) | ||
14 | CFLAGS_kobject.o += -DDEBUG | ||
15 | CFLAGS_kobject_uevent.o += -DDEBUG | ||
16 | endif | ||
17 | |||
18 | lib-$(CONFIG_RWSEM_GENERIC_SPINLOCK) += rwsem-spinlock.o | ||
19 | lib-$(CONFIG_RWSEM_XCHGADD_ALGORITHM) += rwsem.o | ||
20 | lib-$(CONFIG_GENERIC_FIND_NEXT_BIT) += find_next_bit.o | ||
21 | obj-$(CONFIG_LOCK_KERNEL) += kernel_lock.o | ||
22 | |||
23 | ifneq ($(CONFIG_HAVE_DEC_LOCK),y) | ||
24 | lib-y += dec_and_lock.o | ||
25 | endif | ||
26 | |||
27 | obj-$(CONFIG_CRC_CCITT) += crc-ccitt.o | ||
28 | obj-$(CONFIG_CRC32) += crc32.o | ||
29 | obj-$(CONFIG_LIBCRC32C) += libcrc32c.o | ||
30 | obj-$(CONFIG_GENERIC_IOMAP) += iomap.o | ||
31 | |||
32 | obj-$(CONFIG_ZLIB_INFLATE) += zlib_inflate/ | ||
33 | obj-$(CONFIG_ZLIB_DEFLATE) += zlib_deflate/ | ||
34 | obj-$(CONFIG_REED_SOLOMON) += reed_solomon/ | ||
35 | |||
36 | hostprogs-y := gen_crc32table | ||
37 | clean-files := crc32table.h | ||
38 | |||
39 | $(obj)/crc32.o: $(obj)/crc32table.h | ||
40 | |||
41 | quiet_cmd_crc32 = GEN $@ | ||
42 | cmd_crc32 = $< > $@ | ||
43 | |||
44 | $(obj)/crc32table.h: $(obj)/gen_crc32table | ||
45 | $(call cmd,crc32) | ||
diff --git a/lib/bitmap.c b/lib/bitmap.c new file mode 100644 index 000000000000..d1388a5ce89c --- /dev/null +++ b/lib/bitmap.c | |||
@@ -0,0 +1,595 @@ | |||
1 | /* | ||
2 | * lib/bitmap.c | ||
3 | * Helper functions for bitmap.h. | ||
4 | * | ||
5 | * This source code is licensed under the GNU General Public License, | ||
6 | * Version 2. See the file COPYING for more details. | ||
7 | */ | ||
8 | #include <linux/module.h> | ||
9 | #include <linux/ctype.h> | ||
10 | #include <linux/errno.h> | ||
11 | #include <linux/bitmap.h> | ||
12 | #include <linux/bitops.h> | ||
13 | #include <asm/uaccess.h> | ||
14 | |||
15 | /* | ||
16 | * bitmaps provide an array of bits, implemented using an an | ||
17 | * array of unsigned longs. The number of valid bits in a | ||
18 | * given bitmap does _not_ need to be an exact multiple of | ||
19 | * BITS_PER_LONG. | ||
20 | * | ||
21 | * The possible unused bits in the last, partially used word | ||
22 | * of a bitmap are 'don't care'. The implementation makes | ||
23 | * no particular effort to keep them zero. It ensures that | ||
24 | * their value will not affect the results of any operation. | ||
25 | * The bitmap operations that return Boolean (bitmap_empty, | ||
26 | * for example) or scalar (bitmap_weight, for example) results | ||
27 | * carefully filter out these unused bits from impacting their | ||
28 | * results. | ||
29 | * | ||
30 | * These operations actually hold to a slightly stronger rule: | ||
31 | * if you don't input any bitmaps to these ops that have some | ||
32 | * unused bits set, then they won't output any set unused bits | ||
33 | * in output bitmaps. | ||
34 | * | ||
35 | * The byte ordering of bitmaps is more natural on little | ||
36 | * endian architectures. See the big-endian headers | ||
37 | * include/asm-ppc64/bitops.h and include/asm-s390/bitops.h | ||
38 | * for the best explanations of this ordering. | ||
39 | */ | ||
40 | |||
41 | int __bitmap_empty(const unsigned long *bitmap, int bits) | ||
42 | { | ||
43 | int k, lim = bits/BITS_PER_LONG; | ||
44 | for (k = 0; k < lim; ++k) | ||
45 | if (bitmap[k]) | ||
46 | return 0; | ||
47 | |||
48 | if (bits % BITS_PER_LONG) | ||
49 | if (bitmap[k] & BITMAP_LAST_WORD_MASK(bits)) | ||
50 | return 0; | ||
51 | |||
52 | return 1; | ||
53 | } | ||
54 | EXPORT_SYMBOL(__bitmap_empty); | ||
55 | |||
56 | int __bitmap_full(const unsigned long *bitmap, int bits) | ||
57 | { | ||
58 | int k, lim = bits/BITS_PER_LONG; | ||
59 | for (k = 0; k < lim; ++k) | ||
60 | if (~bitmap[k]) | ||
61 | return 0; | ||
62 | |||
63 | if (bits % BITS_PER_LONG) | ||
64 | if (~bitmap[k] & BITMAP_LAST_WORD_MASK(bits)) | ||
65 | return 0; | ||
66 | |||
67 | return 1; | ||
68 | } | ||
69 | EXPORT_SYMBOL(__bitmap_full); | ||
70 | |||
71 | int __bitmap_equal(const unsigned long *bitmap1, | ||
72 | const unsigned long *bitmap2, int bits) | ||
73 | { | ||
74 | int k, lim = bits/BITS_PER_LONG; | ||
75 | for (k = 0; k < lim; ++k) | ||
76 | if (bitmap1[k] != bitmap2[k]) | ||
77 | return 0; | ||
78 | |||
79 | if (bits % BITS_PER_LONG) | ||
80 | if ((bitmap1[k] ^ bitmap2[k]) & BITMAP_LAST_WORD_MASK(bits)) | ||
81 | return 0; | ||
82 | |||
83 | return 1; | ||
84 | } | ||
85 | EXPORT_SYMBOL(__bitmap_equal); | ||
86 | |||
87 | void __bitmap_complement(unsigned long *dst, const unsigned long *src, int bits) | ||
88 | { | ||
89 | int k, lim = bits/BITS_PER_LONG; | ||
90 | for (k = 0; k < lim; ++k) | ||
91 | dst[k] = ~src[k]; | ||
92 | |||
93 | if (bits % BITS_PER_LONG) | ||
94 | dst[k] = ~src[k] & BITMAP_LAST_WORD_MASK(bits); | ||
95 | } | ||
96 | EXPORT_SYMBOL(__bitmap_complement); | ||
97 | |||
98 | /* | ||
99 | * __bitmap_shift_right - logical right shift of the bits in a bitmap | ||
100 | * @dst - destination bitmap | ||
101 | * @src - source bitmap | ||
102 | * @nbits - shift by this many bits | ||
103 | * @bits - bitmap size, in bits | ||
104 | * | ||
105 | * Shifting right (dividing) means moving bits in the MS -> LS bit | ||
106 | * direction. Zeros are fed into the vacated MS positions and the | ||
107 | * LS bits shifted off the bottom are lost. | ||
108 | */ | ||
109 | void __bitmap_shift_right(unsigned long *dst, | ||
110 | const unsigned long *src, int shift, int bits) | ||
111 | { | ||
112 | int k, lim = BITS_TO_LONGS(bits), left = bits % BITS_PER_LONG; | ||
113 | int off = shift/BITS_PER_LONG, rem = shift % BITS_PER_LONG; | ||
114 | unsigned long mask = (1UL << left) - 1; | ||
115 | for (k = 0; off + k < lim; ++k) { | ||
116 | unsigned long upper, lower; | ||
117 | |||
118 | /* | ||
119 | * If shift is not word aligned, take lower rem bits of | ||
120 | * word above and make them the top rem bits of result. | ||
121 | */ | ||
122 | if (!rem || off + k + 1 >= lim) | ||
123 | upper = 0; | ||
124 | else { | ||
125 | upper = src[off + k + 1]; | ||
126 | if (off + k + 1 == lim - 1 && left) | ||
127 | upper &= mask; | ||
128 | } | ||
129 | lower = src[off + k]; | ||
130 | if (left && off + k == lim - 1) | ||
131 | lower &= mask; | ||
132 | dst[k] = upper << (BITS_PER_LONG - rem) | lower >> rem; | ||
133 | if (left && k == lim - 1) | ||
134 | dst[k] &= mask; | ||
135 | } | ||
136 | if (off) | ||
137 | memset(&dst[lim - off], 0, off*sizeof(unsigned long)); | ||
138 | } | ||
139 | EXPORT_SYMBOL(__bitmap_shift_right); | ||
140 | |||
141 | |||
142 | /* | ||
143 | * __bitmap_shift_left - logical left shift of the bits in a bitmap | ||
144 | * @dst - destination bitmap | ||
145 | * @src - source bitmap | ||
146 | * @nbits - shift by this many bits | ||
147 | * @bits - bitmap size, in bits | ||
148 | * | ||
149 | * Shifting left (multiplying) means moving bits in the LS -> MS | ||
150 | * direction. Zeros are fed into the vacated LS bit positions | ||
151 | * and those MS bits shifted off the top are lost. | ||
152 | */ | ||
153 | |||
154 | void __bitmap_shift_left(unsigned long *dst, | ||
155 | const unsigned long *src, int shift, int bits) | ||
156 | { | ||
157 | int k, lim = BITS_TO_LONGS(bits), left = bits % BITS_PER_LONG; | ||
158 | int off = shift/BITS_PER_LONG, rem = shift % BITS_PER_LONG; | ||
159 | for (k = lim - off - 1; k >= 0; --k) { | ||
160 | unsigned long upper, lower; | ||
161 | |||
162 | /* | ||
163 | * If shift is not word aligned, take upper rem bits of | ||
164 | * word below and make them the bottom rem bits of result. | ||
165 | */ | ||
166 | if (rem && k > 0) | ||
167 | lower = src[k - 1]; | ||
168 | else | ||
169 | lower = 0; | ||
170 | upper = src[k]; | ||
171 | if (left && k == lim - 1) | ||
172 | upper &= (1UL << left) - 1; | ||
173 | dst[k + off] = lower >> (BITS_PER_LONG - rem) | upper << rem; | ||
174 | if (left && k + off == lim - 1) | ||
175 | dst[k + off] &= (1UL << left) - 1; | ||
176 | } | ||
177 | if (off) | ||
178 | memset(dst, 0, off*sizeof(unsigned long)); | ||
179 | } | ||
180 | EXPORT_SYMBOL(__bitmap_shift_left); | ||
181 | |||
182 | void __bitmap_and(unsigned long *dst, const unsigned long *bitmap1, | ||
183 | const unsigned long *bitmap2, int bits) | ||
184 | { | ||
185 | int k; | ||
186 | int nr = BITS_TO_LONGS(bits); | ||
187 | |||
188 | for (k = 0; k < nr; k++) | ||
189 | dst[k] = bitmap1[k] & bitmap2[k]; | ||
190 | } | ||
191 | EXPORT_SYMBOL(__bitmap_and); | ||
192 | |||
193 | void __bitmap_or(unsigned long *dst, const unsigned long *bitmap1, | ||
194 | const unsigned long *bitmap2, int bits) | ||
195 | { | ||
196 | int k; | ||
197 | int nr = BITS_TO_LONGS(bits); | ||
198 | |||
199 | for (k = 0; k < nr; k++) | ||
200 | dst[k] = bitmap1[k] | bitmap2[k]; | ||
201 | } | ||
202 | EXPORT_SYMBOL(__bitmap_or); | ||
203 | |||
204 | void __bitmap_xor(unsigned long *dst, const unsigned long *bitmap1, | ||
205 | const unsigned long *bitmap2, int bits) | ||
206 | { | ||
207 | int k; | ||
208 | int nr = BITS_TO_LONGS(bits); | ||
209 | |||
210 | for (k = 0; k < nr; k++) | ||
211 | dst[k] = bitmap1[k] ^ bitmap2[k]; | ||
212 | } | ||
213 | EXPORT_SYMBOL(__bitmap_xor); | ||
214 | |||
215 | void __bitmap_andnot(unsigned long *dst, const unsigned long *bitmap1, | ||
216 | const unsigned long *bitmap2, int bits) | ||
217 | { | ||
218 | int k; | ||
219 | int nr = BITS_TO_LONGS(bits); | ||
220 | |||
221 | for (k = 0; k < nr; k++) | ||
222 | dst[k] = bitmap1[k] & ~bitmap2[k]; | ||
223 | } | ||
224 | EXPORT_SYMBOL(__bitmap_andnot); | ||
225 | |||
226 | int __bitmap_intersects(const unsigned long *bitmap1, | ||
227 | const unsigned long *bitmap2, int bits) | ||
228 | { | ||
229 | int k, lim = bits/BITS_PER_LONG; | ||
230 | for (k = 0; k < lim; ++k) | ||
231 | if (bitmap1[k] & bitmap2[k]) | ||
232 | return 1; | ||
233 | |||
234 | if (bits % BITS_PER_LONG) | ||
235 | if ((bitmap1[k] & bitmap2[k]) & BITMAP_LAST_WORD_MASK(bits)) | ||
236 | return 1; | ||
237 | return 0; | ||
238 | } | ||
239 | EXPORT_SYMBOL(__bitmap_intersects); | ||
240 | |||
241 | int __bitmap_subset(const unsigned long *bitmap1, | ||
242 | const unsigned long *bitmap2, int bits) | ||
243 | { | ||
244 | int k, lim = bits/BITS_PER_LONG; | ||
245 | for (k = 0; k < lim; ++k) | ||
246 | if (bitmap1[k] & ~bitmap2[k]) | ||
247 | return 0; | ||
248 | |||
249 | if (bits % BITS_PER_LONG) | ||
250 | if ((bitmap1[k] & ~bitmap2[k]) & BITMAP_LAST_WORD_MASK(bits)) | ||
251 | return 0; | ||
252 | return 1; | ||
253 | } | ||
254 | EXPORT_SYMBOL(__bitmap_subset); | ||
255 | |||
256 | #if BITS_PER_LONG == 32 | ||
257 | int __bitmap_weight(const unsigned long *bitmap, int bits) | ||
258 | { | ||
259 | int k, w = 0, lim = bits/BITS_PER_LONG; | ||
260 | |||
261 | for (k = 0; k < lim; k++) | ||
262 | w += hweight32(bitmap[k]); | ||
263 | |||
264 | if (bits % BITS_PER_LONG) | ||
265 | w += hweight32(bitmap[k] & BITMAP_LAST_WORD_MASK(bits)); | ||
266 | |||
267 | return w; | ||
268 | } | ||
269 | #else | ||
270 | int __bitmap_weight(const unsigned long *bitmap, int bits) | ||
271 | { | ||
272 | int k, w = 0, lim = bits/BITS_PER_LONG; | ||
273 | |||
274 | for (k = 0; k < lim; k++) | ||
275 | w += hweight64(bitmap[k]); | ||
276 | |||
277 | if (bits % BITS_PER_LONG) | ||
278 | w += hweight64(bitmap[k] & BITMAP_LAST_WORD_MASK(bits)); | ||
279 | |||
280 | return w; | ||
281 | } | ||
282 | #endif | ||
283 | EXPORT_SYMBOL(__bitmap_weight); | ||
284 | |||
285 | /* | ||
286 | * Bitmap printing & parsing functions: first version by Bill Irwin, | ||
287 | * second version by Paul Jackson, third by Joe Korty. | ||
288 | */ | ||
289 | |||
290 | #define CHUNKSZ 32 | ||
291 | #define nbits_to_hold_value(val) fls(val) | ||
292 | #define roundup_power2(val,modulus) (((val) + (modulus) - 1) & ~((modulus) - 1)) | ||
293 | #define unhex(c) (isdigit(c) ? (c - '0') : (toupper(c) - 'A' + 10)) | ||
294 | #define BASEDEC 10 /* fancier cpuset lists input in decimal */ | ||
295 | |||
296 | /** | ||
297 | * bitmap_scnprintf - convert bitmap to an ASCII hex string. | ||
298 | * @buf: byte buffer into which string is placed | ||
299 | * @buflen: reserved size of @buf, in bytes | ||
300 | * @maskp: pointer to bitmap to convert | ||
301 | * @nmaskbits: size of bitmap, in bits | ||
302 | * | ||
303 | * Exactly @nmaskbits bits are displayed. Hex digits are grouped into | ||
304 | * comma-separated sets of eight digits per set. | ||
305 | */ | ||
306 | int bitmap_scnprintf(char *buf, unsigned int buflen, | ||
307 | const unsigned long *maskp, int nmaskbits) | ||
308 | { | ||
309 | int i, word, bit, len = 0; | ||
310 | unsigned long val; | ||
311 | const char *sep = ""; | ||
312 | int chunksz; | ||
313 | u32 chunkmask; | ||
314 | |||
315 | chunksz = nmaskbits & (CHUNKSZ - 1); | ||
316 | if (chunksz == 0) | ||
317 | chunksz = CHUNKSZ; | ||
318 | |||
319 | i = roundup_power2(nmaskbits, CHUNKSZ) - CHUNKSZ; | ||
320 | for (; i >= 0; i -= CHUNKSZ) { | ||
321 | chunkmask = ((1ULL << chunksz) - 1); | ||
322 | word = i / BITS_PER_LONG; | ||
323 | bit = i % BITS_PER_LONG; | ||
324 | val = (maskp[word] >> bit) & chunkmask; | ||
325 | len += scnprintf(buf+len, buflen-len, "%s%0*lx", sep, | ||
326 | (chunksz+3)/4, val); | ||
327 | chunksz = CHUNKSZ; | ||
328 | sep = ","; | ||
329 | } | ||
330 | return len; | ||
331 | } | ||
332 | EXPORT_SYMBOL(bitmap_scnprintf); | ||
333 | |||
334 | /** | ||
335 | * bitmap_parse - convert an ASCII hex string into a bitmap. | ||
336 | * @buf: pointer to buffer in user space containing string. | ||
337 | * @buflen: buffer size in bytes. If string is smaller than this | ||
338 | * then it must be terminated with a \0. | ||
339 | * @maskp: pointer to bitmap array that will contain result. | ||
340 | * @nmaskbits: size of bitmap, in bits. | ||
341 | * | ||
342 | * Commas group hex digits into chunks. Each chunk defines exactly 32 | ||
343 | * bits of the resultant bitmask. No chunk may specify a value larger | ||
344 | * than 32 bits (-EOVERFLOW), and if a chunk specifies a smaller value | ||
345 | * then leading 0-bits are prepended. -EINVAL is returned for illegal | ||
346 | * characters and for grouping errors such as "1,,5", ",44", "," and "". | ||
347 | * Leading and trailing whitespace accepted, but not embedded whitespace. | ||
348 | */ | ||
349 | int bitmap_parse(const char __user *ubuf, unsigned int ubuflen, | ||
350 | unsigned long *maskp, int nmaskbits) | ||
351 | { | ||
352 | int c, old_c, totaldigits, ndigits, nchunks, nbits; | ||
353 | u32 chunk; | ||
354 | |||
355 | bitmap_zero(maskp, nmaskbits); | ||
356 | |||
357 | nchunks = nbits = totaldigits = c = 0; | ||
358 | do { | ||
359 | chunk = ndigits = 0; | ||
360 | |||
361 | /* Get the next chunk of the bitmap */ | ||
362 | while (ubuflen) { | ||
363 | old_c = c; | ||
364 | if (get_user(c, ubuf++)) | ||
365 | return -EFAULT; | ||
366 | ubuflen--; | ||
367 | if (isspace(c)) | ||
368 | continue; | ||
369 | |||
370 | /* | ||
371 | * If the last character was a space and the current | ||
372 | * character isn't '\0', we've got embedded whitespace. | ||
373 | * This is a no-no, so throw an error. | ||
374 | */ | ||
375 | if (totaldigits && c && isspace(old_c)) | ||
376 | return -EINVAL; | ||
377 | |||
378 | /* A '\0' or a ',' signal the end of the chunk */ | ||
379 | if (c == '\0' || c == ',') | ||
380 | break; | ||
381 | |||
382 | if (!isxdigit(c)) | ||
383 | return -EINVAL; | ||
384 | |||
385 | /* | ||
386 | * Make sure there are at least 4 free bits in 'chunk'. | ||
387 | * If not, this hexdigit will overflow 'chunk', so | ||
388 | * throw an error. | ||
389 | */ | ||
390 | if (chunk & ~((1UL << (CHUNKSZ - 4)) - 1)) | ||
391 | return -EOVERFLOW; | ||
392 | |||
393 | chunk = (chunk << 4) | unhex(c); | ||
394 | ndigits++; totaldigits++; | ||
395 | } | ||
396 | if (ndigits == 0) | ||
397 | return -EINVAL; | ||
398 | if (nchunks == 0 && chunk == 0) | ||
399 | continue; | ||
400 | |||
401 | __bitmap_shift_left(maskp, maskp, CHUNKSZ, nmaskbits); | ||
402 | *maskp |= chunk; | ||
403 | nchunks++; | ||
404 | nbits += (nchunks == 1) ? nbits_to_hold_value(chunk) : CHUNKSZ; | ||
405 | if (nbits > nmaskbits) | ||
406 | return -EOVERFLOW; | ||
407 | } while (ubuflen && c == ','); | ||
408 | |||
409 | return 0; | ||
410 | } | ||
411 | EXPORT_SYMBOL(bitmap_parse); | ||
412 | |||
413 | /* | ||
414 | * bscnl_emit(buf, buflen, rbot, rtop, bp) | ||
415 | * | ||
416 | * Helper routine for bitmap_scnlistprintf(). Write decimal number | ||
417 | * or range to buf, suppressing output past buf+buflen, with optional | ||
418 | * comma-prefix. Return len of what would be written to buf, if it | ||
419 | * all fit. | ||
420 | */ | ||
421 | static inline int bscnl_emit(char *buf, int buflen, int rbot, int rtop, int len) | ||
422 | { | ||
423 | if (len > 0) | ||
424 | len += scnprintf(buf + len, buflen - len, ","); | ||
425 | if (rbot == rtop) | ||
426 | len += scnprintf(buf + len, buflen - len, "%d", rbot); | ||
427 | else | ||
428 | len += scnprintf(buf + len, buflen - len, "%d-%d", rbot, rtop); | ||
429 | return len; | ||
430 | } | ||
431 | |||
432 | /** | ||
433 | * bitmap_scnlistprintf - convert bitmap to list format ASCII string | ||
434 | * @buf: byte buffer into which string is placed | ||
435 | * @buflen: reserved size of @buf, in bytes | ||
436 | * @maskp: pointer to bitmap to convert | ||
437 | * @nmaskbits: size of bitmap, in bits | ||
438 | * | ||
439 | * Output format is a comma-separated list of decimal numbers and | ||
440 | * ranges. Consecutively set bits are shown as two hyphen-separated | ||
441 | * decimal numbers, the smallest and largest bit numbers set in | ||
442 | * the range. Output format is compatible with the format | ||
443 | * accepted as input by bitmap_parselist(). | ||
444 | * | ||
445 | * The return value is the number of characters which would be | ||
446 | * generated for the given input, excluding the trailing '\0', as | ||
447 | * per ISO C99. | ||
448 | */ | ||
449 | int bitmap_scnlistprintf(char *buf, unsigned int buflen, | ||
450 | const unsigned long *maskp, int nmaskbits) | ||
451 | { | ||
452 | int len = 0; | ||
453 | /* current bit is 'cur', most recently seen range is [rbot, rtop] */ | ||
454 | int cur, rbot, rtop; | ||
455 | |||
456 | rbot = cur = find_first_bit(maskp, nmaskbits); | ||
457 | while (cur < nmaskbits) { | ||
458 | rtop = cur; | ||
459 | cur = find_next_bit(maskp, nmaskbits, cur+1); | ||
460 | if (cur >= nmaskbits || cur > rtop + 1) { | ||
461 | len = bscnl_emit(buf, buflen, rbot, rtop, len); | ||
462 | rbot = cur; | ||
463 | } | ||
464 | } | ||
465 | return len; | ||
466 | } | ||
467 | EXPORT_SYMBOL(bitmap_scnlistprintf); | ||
468 | |||
469 | /** | ||
470 | * bitmap_parselist - convert list format ASCII string to bitmap | ||
471 | * @buf: read nul-terminated user string from this buffer | ||
472 | * @mask: write resulting mask here | ||
473 | * @nmaskbits: number of bits in mask to be written | ||
474 | * | ||
475 | * Input format is a comma-separated list of decimal numbers and | ||
476 | * ranges. Consecutively set bits are shown as two hyphen-separated | ||
477 | * decimal numbers, the smallest and largest bit numbers set in | ||
478 | * the range. | ||
479 | * | ||
480 | * Returns 0 on success, -errno on invalid input strings: | ||
481 | * -EINVAL: second number in range smaller than first | ||
482 | * -EINVAL: invalid character in string | ||
483 | * -ERANGE: bit number specified too large for mask | ||
484 | */ | ||
485 | int bitmap_parselist(const char *bp, unsigned long *maskp, int nmaskbits) | ||
486 | { | ||
487 | unsigned a, b; | ||
488 | |||
489 | bitmap_zero(maskp, nmaskbits); | ||
490 | do { | ||
491 | if (!isdigit(*bp)) | ||
492 | return -EINVAL; | ||
493 | b = a = simple_strtoul(bp, (char **)&bp, BASEDEC); | ||
494 | if (*bp == '-') { | ||
495 | bp++; | ||
496 | if (!isdigit(*bp)) | ||
497 | return -EINVAL; | ||
498 | b = simple_strtoul(bp, (char **)&bp, BASEDEC); | ||
499 | } | ||
500 | if (!(a <= b)) | ||
501 | return -EINVAL; | ||
502 | if (b >= nmaskbits) | ||
503 | return -ERANGE; | ||
504 | while (a <= b) { | ||
505 | set_bit(a, maskp); | ||
506 | a++; | ||
507 | } | ||
508 | if (*bp == ',') | ||
509 | bp++; | ||
510 | } while (*bp != '\0' && *bp != '\n'); | ||
511 | return 0; | ||
512 | } | ||
513 | EXPORT_SYMBOL(bitmap_parselist); | ||
514 | |||
515 | /** | ||
516 | * bitmap_find_free_region - find a contiguous aligned mem region | ||
517 | * @bitmap: an array of unsigned longs corresponding to the bitmap | ||
518 | * @bits: number of bits in the bitmap | ||
519 | * @order: region size to find (size is actually 1<<order) | ||
520 | * | ||
521 | * This is used to allocate a memory region from a bitmap. The idea is | ||
522 | * that the region has to be 1<<order sized and 1<<order aligned (this | ||
523 | * makes the search algorithm much faster). | ||
524 | * | ||
525 | * The region is marked as set bits in the bitmap if a free one is | ||
526 | * found. | ||
527 | * | ||
528 | * Returns either beginning of region or negative error | ||
529 | */ | ||
530 | int bitmap_find_free_region(unsigned long *bitmap, int bits, int order) | ||
531 | { | ||
532 | unsigned long mask; | ||
533 | int pages = 1 << order; | ||
534 | int i; | ||
535 | |||
536 | if(pages > BITS_PER_LONG) | ||
537 | return -EINVAL; | ||
538 | |||
539 | /* make a mask of the order */ | ||
540 | mask = (1ul << (pages - 1)); | ||
541 | mask += mask - 1; | ||
542 | |||
543 | /* run up the bitmap pages bits at a time */ | ||
544 | for (i = 0; i < bits; i += pages) { | ||
545 | int index = i/BITS_PER_LONG; | ||
546 | int offset = i - (index * BITS_PER_LONG); | ||
547 | if((bitmap[index] & (mask << offset)) == 0) { | ||
548 | /* set region in bimap */ | ||
549 | bitmap[index] |= (mask << offset); | ||
550 | return i; | ||
551 | } | ||
552 | } | ||
553 | return -ENOMEM; | ||
554 | } | ||
555 | EXPORT_SYMBOL(bitmap_find_free_region); | ||
556 | |||
557 | /** | ||
558 | * bitmap_release_region - release allocated bitmap region | ||
559 | * @bitmap: a pointer to the bitmap | ||
560 | * @pos: the beginning of the region | ||
561 | * @order: the order of the bits to release (number is 1<<order) | ||
562 | * | ||
563 | * This is the complement to __bitmap_find_free_region and releases | ||
564 | * the found region (by clearing it in the bitmap). | ||
565 | */ | ||
566 | void bitmap_release_region(unsigned long *bitmap, int pos, int order) | ||
567 | { | ||
568 | int pages = 1 << order; | ||
569 | unsigned long mask = (1ul << (pages - 1)); | ||
570 | int index = pos/BITS_PER_LONG; | ||
571 | int offset = pos - (index * BITS_PER_LONG); | ||
572 | mask += mask - 1; | ||
573 | bitmap[index] &= ~(mask << offset); | ||
574 | } | ||
575 | EXPORT_SYMBOL(bitmap_release_region); | ||
576 | |||
577 | int bitmap_allocate_region(unsigned long *bitmap, int pos, int order) | ||
578 | { | ||
579 | int pages = 1 << order; | ||
580 | unsigned long mask = (1ul << (pages - 1)); | ||
581 | int index = pos/BITS_PER_LONG; | ||
582 | int offset = pos - (index * BITS_PER_LONG); | ||
583 | |||
584 | /* We don't do regions of pages > BITS_PER_LONG. The | ||
585 | * algorithm would be a simple look for multiple zeros in the | ||
586 | * array, but there's no driver today that needs this. If you | ||
587 | * trip this BUG(), you get to code it... */ | ||
588 | BUG_ON(pages > BITS_PER_LONG); | ||
589 | mask += mask - 1; | ||
590 | if (bitmap[index] & (mask << offset)) | ||
591 | return -EBUSY; | ||
592 | bitmap[index] |= (mask << offset); | ||
593 | return 0; | ||
594 | } | ||
595 | EXPORT_SYMBOL(bitmap_allocate_region); | ||
diff --git a/lib/bust_spinlocks.c b/lib/bust_spinlocks.c new file mode 100644 index 000000000000..6bb7319e09a0 --- /dev/null +++ b/lib/bust_spinlocks.c | |||
@@ -0,0 +1,39 @@ | |||
1 | /* | ||
2 | * lib/bust_spinlocks.c | ||
3 | * | ||
4 | * Provides a minimal bust_spinlocks for architectures which don't have one of their own. | ||
5 | * | ||
6 | * bust_spinlocks() clears any spinlocks which would prevent oops, die(), BUG() | ||
7 | * and panic() information from reaching the user. | ||
8 | */ | ||
9 | |||
10 | #include <linux/config.h> | ||
11 | #include <linux/kernel.h> | ||
12 | #include <linux/spinlock.h> | ||
13 | #include <linux/tty.h> | ||
14 | #include <linux/wait.h> | ||
15 | #include <linux/vt_kern.h> | ||
16 | |||
17 | |||
18 | void bust_spinlocks(int yes) | ||
19 | { | ||
20 | if (yes) { | ||
21 | oops_in_progress = 1; | ||
22 | } else { | ||
23 | int loglevel_save = console_loglevel; | ||
24 | #ifdef CONFIG_VT | ||
25 | unblank_screen(); | ||
26 | #endif | ||
27 | oops_in_progress = 0; | ||
28 | /* | ||
29 | * OK, the message is on the console. Now we call printk() | ||
30 | * without oops_in_progress set so that printk() will give klogd | ||
31 | * and the blanked console a poke. Hold onto your hats... | ||
32 | */ | ||
33 | console_loglevel = 15; /* NMI oopser may have shut the console up */ | ||
34 | printk(" "); | ||
35 | console_loglevel = loglevel_save; | ||
36 | } | ||
37 | } | ||
38 | |||
39 | |||
diff --git a/lib/cmdline.c b/lib/cmdline.c new file mode 100644 index 000000000000..0331ed825ea7 --- /dev/null +++ b/lib/cmdline.c | |||
@@ -0,0 +1,120 @@ | |||
1 | /* | ||
2 | * linux/lib/cmdline.c | ||
3 | * Helper functions generally used for parsing kernel command line | ||
4 | * and module options. | ||
5 | * | ||
6 | * Code and copyrights come from init/main.c and arch/i386/kernel/setup.c. | ||
7 | * | ||
8 | * This source code is licensed under the GNU General Public License, | ||
9 | * Version 2. See the file COPYING for more details. | ||
10 | * | ||
11 | * GNU Indent formatting options for this file: -kr -i8 -npsl -pcs | ||
12 | * | ||
13 | */ | ||
14 | |||
15 | #include <linux/module.h> | ||
16 | #include <linux/kernel.h> | ||
17 | #include <linux/string.h> | ||
18 | |||
19 | |||
20 | /** | ||
21 | * get_option - Parse integer from an option string | ||
22 | * @str: option string | ||
23 | * @pint: (output) integer value parsed from @str | ||
24 | * | ||
25 | * Read an int from an option string; if available accept a subsequent | ||
26 | * comma as well. | ||
27 | * | ||
28 | * Return values: | ||
29 | * 0 : no int in string | ||
30 | * 1 : int found, no subsequent comma | ||
31 | * 2 : int found including a subsequent comma | ||
32 | */ | ||
33 | |||
34 | int get_option (char **str, int *pint) | ||
35 | { | ||
36 | char *cur = *str; | ||
37 | |||
38 | if (!cur || !(*cur)) | ||
39 | return 0; | ||
40 | *pint = simple_strtol (cur, str, 0); | ||
41 | if (cur == *str) | ||
42 | return 0; | ||
43 | if (**str == ',') { | ||
44 | (*str)++; | ||
45 | return 2; | ||
46 | } | ||
47 | |||
48 | return 1; | ||
49 | } | ||
50 | |||
51 | /** | ||
52 | * get_options - Parse a string into a list of integers | ||
53 | * @str: String to be parsed | ||
54 | * @nints: size of integer array | ||
55 | * @ints: integer array | ||
56 | * | ||
57 | * This function parses a string containing a comma-separated | ||
58 | * list of integers. The parse halts when the array is | ||
59 | * full, or when no more numbers can be retrieved from the | ||
60 | * string. | ||
61 | * | ||
62 | * Return value is the character in the string which caused | ||
63 | * the parse to end (typically a null terminator, if @str is | ||
64 | * completely parseable). | ||
65 | */ | ||
66 | |||
67 | char *get_options(const char *str, int nints, int *ints) | ||
68 | { | ||
69 | int res, i = 1; | ||
70 | |||
71 | while (i < nints) { | ||
72 | res = get_option ((char **)&str, ints + i); | ||
73 | if (res == 0) | ||
74 | break; | ||
75 | i++; | ||
76 | if (res == 1) | ||
77 | break; | ||
78 | } | ||
79 | ints[0] = i - 1; | ||
80 | return (char *)str; | ||
81 | } | ||
82 | |||
83 | /** | ||
84 | * memparse - parse a string with mem suffixes into a number | ||
85 | * @ptr: Where parse begins | ||
86 | * @retptr: (output) Pointer to next char after parse completes | ||
87 | * | ||
88 | * Parses a string into a number. The number stored at @ptr is | ||
89 | * potentially suffixed with %K (for kilobytes, or 1024 bytes), | ||
90 | * %M (for megabytes, or 1048576 bytes), or %G (for gigabytes, or | ||
91 | * 1073741824). If the number is suffixed with K, M, or G, then | ||
92 | * the return value is the number multiplied by one kilobyte, one | ||
93 | * megabyte, or one gigabyte, respectively. | ||
94 | */ | ||
95 | |||
96 | unsigned long long memparse (char *ptr, char **retptr) | ||
97 | { | ||
98 | unsigned long long ret = simple_strtoull (ptr, retptr, 0); | ||
99 | |||
100 | switch (**retptr) { | ||
101 | case 'G': | ||
102 | case 'g': | ||
103 | ret <<= 10; | ||
104 | case 'M': | ||
105 | case 'm': | ||
106 | ret <<= 10; | ||
107 | case 'K': | ||
108 | case 'k': | ||
109 | ret <<= 10; | ||
110 | (*retptr)++; | ||
111 | default: | ||
112 | break; | ||
113 | } | ||
114 | return ret; | ||
115 | } | ||
116 | |||
117 | |||
118 | EXPORT_SYMBOL(memparse); | ||
119 | EXPORT_SYMBOL(get_option); | ||
120 | EXPORT_SYMBOL(get_options); | ||
diff --git a/lib/crc-ccitt.c b/lib/crc-ccitt.c new file mode 100644 index 000000000000..115d149af407 --- /dev/null +++ b/lib/crc-ccitt.c | |||
@@ -0,0 +1,69 @@ | |||
1 | /* | ||
2 | * linux/lib/crc-ccitt.c | ||
3 | * | ||
4 | * This source code is licensed under the GNU General Public License, | ||
5 | * Version 2. See the file COPYING for more details. | ||
6 | */ | ||
7 | |||
8 | #include <linux/types.h> | ||
9 | #include <linux/module.h> | ||
10 | #include <linux/crc-ccitt.h> | ||
11 | |||
12 | /* | ||
13 | * This mysterious table is just the CRC of each possible byte. It can be | ||
14 | * computed using the standard bit-at-a-time methods. The polynomial can | ||
15 | * be seen in entry 128, 0x8408. This corresponds to x^0 + x^5 + x^12. | ||
16 | * Add the implicit x^16, and you have the standard CRC-CCITT. | ||
17 | */ | ||
18 | u16 const crc_ccitt_table[256] = { | ||
19 | 0x0000, 0x1189, 0x2312, 0x329b, 0x4624, 0x57ad, 0x6536, 0x74bf, | ||
20 | 0x8c48, 0x9dc1, 0xaf5a, 0xbed3, 0xca6c, 0xdbe5, 0xe97e, 0xf8f7, | ||
21 | 0x1081, 0x0108, 0x3393, 0x221a, 0x56a5, 0x472c, 0x75b7, 0x643e, | ||
22 | 0x9cc9, 0x8d40, 0xbfdb, 0xae52, 0xdaed, 0xcb64, 0xf9ff, 0xe876, | ||
23 | 0x2102, 0x308b, 0x0210, 0x1399, 0x6726, 0x76af, 0x4434, 0x55bd, | ||
24 | 0xad4a, 0xbcc3, 0x8e58, 0x9fd1, 0xeb6e, 0xfae7, 0xc87c, 0xd9f5, | ||
25 | 0x3183, 0x200a, 0x1291, 0x0318, 0x77a7, 0x662e, 0x54b5, 0x453c, | ||
26 | 0xbdcb, 0xac42, 0x9ed9, 0x8f50, 0xfbef, 0xea66, 0xd8fd, 0xc974, | ||
27 | 0x4204, 0x538d, 0x6116, 0x709f, 0x0420, 0x15a9, 0x2732, 0x36bb, | ||
28 | 0xce4c, 0xdfc5, 0xed5e, 0xfcd7, 0x8868, 0x99e1, 0xab7a, 0xbaf3, | ||
29 | 0x5285, 0x430c, 0x7197, 0x601e, 0x14a1, 0x0528, 0x37b3, 0x263a, | ||
30 | 0xdecd, 0xcf44, 0xfddf, 0xec56, 0x98e9, 0x8960, 0xbbfb, 0xaa72, | ||
31 | 0x6306, 0x728f, 0x4014, 0x519d, 0x2522, 0x34ab, 0x0630, 0x17b9, | ||
32 | 0xef4e, 0xfec7, 0xcc5c, 0xddd5, 0xa96a, 0xb8e3, 0x8a78, 0x9bf1, | ||
33 | 0x7387, 0x620e, 0x5095, 0x411c, 0x35a3, 0x242a, 0x16b1, 0x0738, | ||
34 | 0xffcf, 0xee46, 0xdcdd, 0xcd54, 0xb9eb, 0xa862, 0x9af9, 0x8b70, | ||
35 | 0x8408, 0x9581, 0xa71a, 0xb693, 0xc22c, 0xd3a5, 0xe13e, 0xf0b7, | ||
36 | 0x0840, 0x19c9, 0x2b52, 0x3adb, 0x4e64, 0x5fed, 0x6d76, 0x7cff, | ||
37 | 0x9489, 0x8500, 0xb79b, 0xa612, 0xd2ad, 0xc324, 0xf1bf, 0xe036, | ||
38 | 0x18c1, 0x0948, 0x3bd3, 0x2a5a, 0x5ee5, 0x4f6c, 0x7df7, 0x6c7e, | ||
39 | 0xa50a, 0xb483, 0x8618, 0x9791, 0xe32e, 0xf2a7, 0xc03c, 0xd1b5, | ||
40 | 0x2942, 0x38cb, 0x0a50, 0x1bd9, 0x6f66, 0x7eef, 0x4c74, 0x5dfd, | ||
41 | 0xb58b, 0xa402, 0x9699, 0x8710, 0xf3af, 0xe226, 0xd0bd, 0xc134, | ||
42 | 0x39c3, 0x284a, 0x1ad1, 0x0b58, 0x7fe7, 0x6e6e, 0x5cf5, 0x4d7c, | ||
43 | 0xc60c, 0xd785, 0xe51e, 0xf497, 0x8028, 0x91a1, 0xa33a, 0xb2b3, | ||
44 | 0x4a44, 0x5bcd, 0x6956, 0x78df, 0x0c60, 0x1de9, 0x2f72, 0x3efb, | ||
45 | 0xd68d, 0xc704, 0xf59f, 0xe416, 0x90a9, 0x8120, 0xb3bb, 0xa232, | ||
46 | 0x5ac5, 0x4b4c, 0x79d7, 0x685e, 0x1ce1, 0x0d68, 0x3ff3, 0x2e7a, | ||
47 | 0xe70e, 0xf687, 0xc41c, 0xd595, 0xa12a, 0xb0a3, 0x8238, 0x93b1, | ||
48 | 0x6b46, 0x7acf, 0x4854, 0x59dd, 0x2d62, 0x3ceb, 0x0e70, 0x1ff9, | ||
49 | 0xf78f, 0xe606, 0xd49d, 0xc514, 0xb1ab, 0xa022, 0x92b9, 0x8330, | ||
50 | 0x7bc7, 0x6a4e, 0x58d5, 0x495c, 0x3de3, 0x2c6a, 0x1ef1, 0x0f78 | ||
51 | }; | ||
52 | EXPORT_SYMBOL(crc_ccitt_table); | ||
53 | |||
54 | /** | ||
55 | * crc_ccitt - recompute the CRC for the data buffer | ||
56 | * @crc - previous CRC value | ||
57 | * @buffer - data pointer | ||
58 | * @len - number of bytes in the buffer | ||
59 | */ | ||
60 | u16 crc_ccitt(u16 crc, u8 const *buffer, size_t len) | ||
61 | { | ||
62 | while (len--) | ||
63 | crc = crc_ccitt_byte(crc, *buffer++); | ||
64 | return crc; | ||
65 | } | ||
66 | EXPORT_SYMBOL(crc_ccitt); | ||
67 | |||
68 | MODULE_DESCRIPTION("CRC-CCITT calculations"); | ||
69 | MODULE_LICENSE("GPL"); | ||
diff --git a/lib/crc32.c b/lib/crc32.c new file mode 100644 index 000000000000..58b222783f9c --- /dev/null +++ b/lib/crc32.c | |||
@@ -0,0 +1,529 @@ | |||
1 | /* | ||
2 | * Oct 15, 2000 Matt Domsch <Matt_Domsch@dell.com> | ||
3 | * Nicer crc32 functions/docs submitted by linux@horizon.com. Thanks! | ||
4 | * Code was from the public domain, copyright abandoned. Code was | ||
5 | * subsequently included in the kernel, thus was re-licensed under the | ||
6 | * GNU GPL v2. | ||
7 | * | ||
8 | * Oct 12, 2000 Matt Domsch <Matt_Domsch@dell.com> | ||
9 | * Same crc32 function was used in 5 other places in the kernel. | ||
10 | * I made one version, and deleted the others. | ||
11 | * There are various incantations of crc32(). Some use a seed of 0 or ~0. | ||
12 | * Some xor at the end with ~0. The generic crc32() function takes | ||
13 | * seed as an argument, and doesn't xor at the end. Then individual | ||
14 | * users can do whatever they need. | ||
15 | * drivers/net/smc9194.c uses seed ~0, doesn't xor with ~0. | ||
16 | * fs/jffs2 uses seed 0, doesn't xor with ~0. | ||
17 | * fs/partitions/efi.c uses seed ~0, xor's with ~0. | ||
18 | * | ||
19 | * This source code is licensed under the GNU General Public License, | ||
20 | * Version 2. See the file COPYING for more details. | ||
21 | */ | ||
22 | |||
23 | #include <linux/crc32.h> | ||
24 | #include <linux/kernel.h> | ||
25 | #include <linux/module.h> | ||
26 | #include <linux/compiler.h> | ||
27 | #include <linux/types.h> | ||
28 | #include <linux/slab.h> | ||
29 | #include <linux/init.h> | ||
30 | #include <asm/atomic.h> | ||
31 | #include "crc32defs.h" | ||
32 | #if CRC_LE_BITS == 8 | ||
33 | #define tole(x) __constant_cpu_to_le32(x) | ||
34 | #define tobe(x) __constant_cpu_to_be32(x) | ||
35 | #else | ||
36 | #define tole(x) (x) | ||
37 | #define tobe(x) (x) | ||
38 | #endif | ||
39 | #include "crc32table.h" | ||
40 | |||
41 | MODULE_AUTHOR("Matt Domsch <Matt_Domsch@dell.com>"); | ||
42 | MODULE_DESCRIPTION("Ethernet CRC32 calculations"); | ||
43 | MODULE_LICENSE("GPL"); | ||
44 | |||
45 | #if CRC_LE_BITS == 1 | ||
46 | /* | ||
47 | * In fact, the table-based code will work in this case, but it can be | ||
48 | * simplified by inlining the table in ?: form. | ||
49 | */ | ||
50 | |||
51 | /** | ||
52 | * crc32_le() - Calculate bitwise little-endian Ethernet AUTODIN II CRC32 | ||
53 | * @crc - seed value for computation. ~0 for Ethernet, sometimes 0 for | ||
54 | * other uses, or the previous crc32 value if computing incrementally. | ||
55 | * @p - pointer to buffer over which CRC is run | ||
56 | * @len - length of buffer @p | ||
57 | * | ||
58 | */ | ||
59 | u32 __attribute_pure__ crc32_le(u32 crc, unsigned char const *p, size_t len) | ||
60 | { | ||
61 | int i; | ||
62 | while (len--) { | ||
63 | crc ^= *p++; | ||
64 | for (i = 0; i < 8; i++) | ||
65 | crc = (crc >> 1) ^ ((crc & 1) ? CRCPOLY_LE : 0); | ||
66 | } | ||
67 | return crc; | ||
68 | } | ||
69 | #else /* Table-based approach */ | ||
70 | |||
71 | /** | ||
72 | * crc32_le() - Calculate bitwise little-endian Ethernet AUTODIN II CRC32 | ||
73 | * @crc - seed value for computation. ~0 for Ethernet, sometimes 0 for | ||
74 | * other uses, or the previous crc32 value if computing incrementally. | ||
75 | * @p - pointer to buffer over which CRC is run | ||
76 | * @len - length of buffer @p | ||
77 | * | ||
78 | */ | ||
79 | u32 __attribute_pure__ crc32_le(u32 crc, unsigned char const *p, size_t len) | ||
80 | { | ||
81 | # if CRC_LE_BITS == 8 | ||
82 | const u32 *b =(u32 *)p; | ||
83 | const u32 *tab = crc32table_le; | ||
84 | |||
85 | # ifdef __LITTLE_ENDIAN | ||
86 | # define DO_CRC(x) crc = tab[ (crc ^ (x)) & 255 ] ^ (crc>>8) | ||
87 | # else | ||
88 | # define DO_CRC(x) crc = tab[ ((crc >> 24) ^ (x)) & 255] ^ (crc<<8) | ||
89 | # endif | ||
90 | |||
91 | crc = __cpu_to_le32(crc); | ||
92 | /* Align it */ | ||
93 | if(unlikely(((long)b)&3 && len)){ | ||
94 | do { | ||
95 | u8 *p = (u8 *)b; | ||
96 | DO_CRC(*p++); | ||
97 | b = (void *)p; | ||
98 | } while ((--len) && ((long)b)&3 ); | ||
99 | } | ||
100 | if(likely(len >= 4)){ | ||
101 | /* load data 32 bits wide, xor data 32 bits wide. */ | ||
102 | size_t save_len = len & 3; | ||
103 | len = len >> 2; | ||
104 | --b; /* use pre increment below(*++b) for speed */ | ||
105 | do { | ||
106 | crc ^= *++b; | ||
107 | DO_CRC(0); | ||
108 | DO_CRC(0); | ||
109 | DO_CRC(0); | ||
110 | DO_CRC(0); | ||
111 | } while (--len); | ||
112 | b++; /* point to next byte(s) */ | ||
113 | len = save_len; | ||
114 | } | ||
115 | /* And the last few bytes */ | ||
116 | if(len){ | ||
117 | do { | ||
118 | u8 *p = (u8 *)b; | ||
119 | DO_CRC(*p++); | ||
120 | b = (void *)p; | ||
121 | } while (--len); | ||
122 | } | ||
123 | |||
124 | return __le32_to_cpu(crc); | ||
125 | #undef ENDIAN_SHIFT | ||
126 | #undef DO_CRC | ||
127 | |||
128 | # elif CRC_LE_BITS == 4 | ||
129 | while (len--) { | ||
130 | crc ^= *p++; | ||
131 | crc = (crc >> 4) ^ crc32table_le[crc & 15]; | ||
132 | crc = (crc >> 4) ^ crc32table_le[crc & 15]; | ||
133 | } | ||
134 | return crc; | ||
135 | # elif CRC_LE_BITS == 2 | ||
136 | while (len--) { | ||
137 | crc ^= *p++; | ||
138 | crc = (crc >> 2) ^ crc32table_le[crc & 3]; | ||
139 | crc = (crc >> 2) ^ crc32table_le[crc & 3]; | ||
140 | crc = (crc >> 2) ^ crc32table_le[crc & 3]; | ||
141 | crc = (crc >> 2) ^ crc32table_le[crc & 3]; | ||
142 | } | ||
143 | return crc; | ||
144 | # endif | ||
145 | } | ||
146 | #endif | ||
147 | |||
148 | #if CRC_BE_BITS == 1 | ||
149 | /* | ||
150 | * In fact, the table-based code will work in this case, but it can be | ||
151 | * simplified by inlining the table in ?: form. | ||
152 | */ | ||
153 | |||
154 | /** | ||
155 | * crc32_be() - Calculate bitwise big-endian Ethernet AUTODIN II CRC32 | ||
156 | * @crc - seed value for computation. ~0 for Ethernet, sometimes 0 for | ||
157 | * other uses, or the previous crc32 value if computing incrementally. | ||
158 | * @p - pointer to buffer over which CRC is run | ||
159 | * @len - length of buffer @p | ||
160 | * | ||
161 | */ | ||
162 | u32 __attribute_pure__ crc32_be(u32 crc, unsigned char const *p, size_t len) | ||
163 | { | ||
164 | int i; | ||
165 | while (len--) { | ||
166 | crc ^= *p++ << 24; | ||
167 | for (i = 0; i < 8; i++) | ||
168 | crc = | ||
169 | (crc << 1) ^ ((crc & 0x80000000) ? CRCPOLY_BE : | ||
170 | 0); | ||
171 | } | ||
172 | return crc; | ||
173 | } | ||
174 | |||
175 | #else /* Table-based approach */ | ||
176 | /** | ||
177 | * crc32_be() - Calculate bitwise big-endian Ethernet AUTODIN II CRC32 | ||
178 | * @crc - seed value for computation. ~0 for Ethernet, sometimes 0 for | ||
179 | * other uses, or the previous crc32 value if computing incrementally. | ||
180 | * @p - pointer to buffer over which CRC is run | ||
181 | * @len - length of buffer @p | ||
182 | * | ||
183 | */ | ||
184 | u32 __attribute_pure__ crc32_be(u32 crc, unsigned char const *p, size_t len) | ||
185 | { | ||
186 | # if CRC_BE_BITS == 8 | ||
187 | const u32 *b =(u32 *)p; | ||
188 | const u32 *tab = crc32table_be; | ||
189 | |||
190 | # ifdef __LITTLE_ENDIAN | ||
191 | # define DO_CRC(x) crc = tab[ (crc ^ (x)) & 255 ] ^ (crc>>8) | ||
192 | # else | ||
193 | # define DO_CRC(x) crc = tab[ ((crc >> 24) ^ (x)) & 255] ^ (crc<<8) | ||
194 | # endif | ||
195 | |||
196 | crc = __cpu_to_be32(crc); | ||
197 | /* Align it */ | ||
198 | if(unlikely(((long)b)&3 && len)){ | ||
199 | do { | ||
200 | u8 *p = (u8 *)b; | ||
201 | DO_CRC(*p++); | ||
202 | b = (u32 *)p; | ||
203 | } while ((--len) && ((long)b)&3 ); | ||
204 | } | ||
205 | if(likely(len >= 4)){ | ||
206 | /* load data 32 bits wide, xor data 32 bits wide. */ | ||
207 | size_t save_len = len & 3; | ||
208 | len = len >> 2; | ||
209 | --b; /* use pre increment below(*++b) for speed */ | ||
210 | do { | ||
211 | crc ^= *++b; | ||
212 | DO_CRC(0); | ||
213 | DO_CRC(0); | ||
214 | DO_CRC(0); | ||
215 | DO_CRC(0); | ||
216 | } while (--len); | ||
217 | b++; /* point to next byte(s) */ | ||
218 | len = save_len; | ||
219 | } | ||
220 | /* And the last few bytes */ | ||
221 | if(len){ | ||
222 | do { | ||
223 | u8 *p = (u8 *)b; | ||
224 | DO_CRC(*p++); | ||
225 | b = (void *)p; | ||
226 | } while (--len); | ||
227 | } | ||
228 | return __be32_to_cpu(crc); | ||
229 | #undef ENDIAN_SHIFT | ||
230 | #undef DO_CRC | ||
231 | |||
232 | # elif CRC_BE_BITS == 4 | ||
233 | while (len--) { | ||
234 | crc ^= *p++ << 24; | ||
235 | crc = (crc << 4) ^ crc32table_be[crc >> 28]; | ||
236 | crc = (crc << 4) ^ crc32table_be[crc >> 28]; | ||
237 | } | ||
238 | return crc; | ||
239 | # elif CRC_BE_BITS == 2 | ||
240 | while (len--) { | ||
241 | crc ^= *p++ << 24; | ||
242 | crc = (crc << 2) ^ crc32table_be[crc >> 30]; | ||
243 | crc = (crc << 2) ^ crc32table_be[crc >> 30]; | ||
244 | crc = (crc << 2) ^ crc32table_be[crc >> 30]; | ||
245 | crc = (crc << 2) ^ crc32table_be[crc >> 30]; | ||
246 | } | ||
247 | return crc; | ||
248 | # endif | ||
249 | } | ||
250 | #endif | ||
251 | |||
252 | u32 bitreverse(u32 x) | ||
253 | { | ||
254 | x = (x >> 16) | (x << 16); | ||
255 | x = (x >> 8 & 0x00ff00ff) | (x << 8 & 0xff00ff00); | ||
256 | x = (x >> 4 & 0x0f0f0f0f) | (x << 4 & 0xf0f0f0f0); | ||
257 | x = (x >> 2 & 0x33333333) | (x << 2 & 0xcccccccc); | ||
258 | x = (x >> 1 & 0x55555555) | (x << 1 & 0xaaaaaaaa); | ||
259 | return x; | ||
260 | } | ||
261 | |||
262 | EXPORT_SYMBOL(crc32_le); | ||
263 | EXPORT_SYMBOL(crc32_be); | ||
264 | EXPORT_SYMBOL(bitreverse); | ||
265 | |||
266 | /* | ||
267 | * A brief CRC tutorial. | ||
268 | * | ||
269 | * A CRC is a long-division remainder. You add the CRC to the message, | ||
270 | * and the whole thing (message+CRC) is a multiple of the given | ||
271 | * CRC polynomial. To check the CRC, you can either check that the | ||
272 | * CRC matches the recomputed value, *or* you can check that the | ||
273 | * remainder computed on the message+CRC is 0. This latter approach | ||
274 | * is used by a lot of hardware implementations, and is why so many | ||
275 | * protocols put the end-of-frame flag after the CRC. | ||
276 | * | ||
277 | * It's actually the same long division you learned in school, except that | ||
278 | * - We're working in binary, so the digits are only 0 and 1, and | ||
279 | * - When dividing polynomials, there are no carries. Rather than add and | ||
280 | * subtract, we just xor. Thus, we tend to get a bit sloppy about | ||
281 | * the difference between adding and subtracting. | ||
282 | * | ||
283 | * A 32-bit CRC polynomial is actually 33 bits long. But since it's | ||
284 | * 33 bits long, bit 32 is always going to be set, so usually the CRC | ||
285 | * is written in hex with the most significant bit omitted. (If you're | ||
286 | * familiar with the IEEE 754 floating-point format, it's the same idea.) | ||
287 | * | ||
288 | * Note that a CRC is computed over a string of *bits*, so you have | ||
289 | * to decide on the endianness of the bits within each byte. To get | ||
290 | * the best error-detecting properties, this should correspond to the | ||
291 | * order they're actually sent. For example, standard RS-232 serial is | ||
292 | * little-endian; the most significant bit (sometimes used for parity) | ||
293 | * is sent last. And when appending a CRC word to a message, you should | ||
294 | * do it in the right order, matching the endianness. | ||
295 | * | ||
296 | * Just like with ordinary division, the remainder is always smaller than | ||
297 | * the divisor (the CRC polynomial) you're dividing by. Each step of the | ||
298 | * division, you take one more digit (bit) of the dividend and append it | ||
299 | * to the current remainder. Then you figure out the appropriate multiple | ||
300 | * of the divisor to subtract to being the remainder back into range. | ||
301 | * In binary, it's easy - it has to be either 0 or 1, and to make the | ||
302 | * XOR cancel, it's just a copy of bit 32 of the remainder. | ||
303 | * | ||
304 | * When computing a CRC, we don't care about the quotient, so we can | ||
305 | * throw the quotient bit away, but subtract the appropriate multiple of | ||
306 | * the polynomial from the remainder and we're back to where we started, | ||
307 | * ready to process the next bit. | ||
308 | * | ||
309 | * A big-endian CRC written this way would be coded like: | ||
310 | * for (i = 0; i < input_bits; i++) { | ||
311 | * multiple = remainder & 0x80000000 ? CRCPOLY : 0; | ||
312 | * remainder = (remainder << 1 | next_input_bit()) ^ multiple; | ||
313 | * } | ||
314 | * Notice how, to get at bit 32 of the shifted remainder, we look | ||
315 | * at bit 31 of the remainder *before* shifting it. | ||
316 | * | ||
317 | * But also notice how the next_input_bit() bits we're shifting into | ||
318 | * the remainder don't actually affect any decision-making until | ||
319 | * 32 bits later. Thus, the first 32 cycles of this are pretty boring. | ||
320 | * Also, to add the CRC to a message, we need a 32-bit-long hole for it at | ||
321 | * the end, so we have to add 32 extra cycles shifting in zeros at the | ||
322 | * end of every message, | ||
323 | * | ||
324 | * So the standard trick is to rearrage merging in the next_input_bit() | ||
325 | * until the moment it's needed. Then the first 32 cycles can be precomputed, | ||
326 | * and merging in the final 32 zero bits to make room for the CRC can be | ||
327 | * skipped entirely. | ||
328 | * This changes the code to: | ||
329 | * for (i = 0; i < input_bits; i++) { | ||
330 | * remainder ^= next_input_bit() << 31; | ||
331 | * multiple = (remainder & 0x80000000) ? CRCPOLY : 0; | ||
332 | * remainder = (remainder << 1) ^ multiple; | ||
333 | * } | ||
334 | * With this optimization, the little-endian code is simpler: | ||
335 | * for (i = 0; i < input_bits; i++) { | ||
336 | * remainder ^= next_input_bit(); | ||
337 | * multiple = (remainder & 1) ? CRCPOLY : 0; | ||
338 | * remainder = (remainder >> 1) ^ multiple; | ||
339 | * } | ||
340 | * | ||
341 | * Note that the other details of endianness have been hidden in CRCPOLY | ||
342 | * (which must be bit-reversed) and next_input_bit(). | ||
343 | * | ||
344 | * However, as long as next_input_bit is returning the bits in a sensible | ||
345 | * order, we can actually do the merging 8 or more bits at a time rather | ||
346 | * than one bit at a time: | ||
347 | * for (i = 0; i < input_bytes; i++) { | ||
348 | * remainder ^= next_input_byte() << 24; | ||
349 | * for (j = 0; j < 8; j++) { | ||
350 | * multiple = (remainder & 0x80000000) ? CRCPOLY : 0; | ||
351 | * remainder = (remainder << 1) ^ multiple; | ||
352 | * } | ||
353 | * } | ||
354 | * Or in little-endian: | ||
355 | * for (i = 0; i < input_bytes; i++) { | ||
356 | * remainder ^= next_input_byte(); | ||
357 | * for (j = 0; j < 8; j++) { | ||
358 | * multiple = (remainder & 1) ? CRCPOLY : 0; | ||
359 | * remainder = (remainder << 1) ^ multiple; | ||
360 | * } | ||
361 | * } | ||
362 | * If the input is a multiple of 32 bits, you can even XOR in a 32-bit | ||
363 | * word at a time and increase the inner loop count to 32. | ||
364 | * | ||
365 | * You can also mix and match the two loop styles, for example doing the | ||
366 | * bulk of a message byte-at-a-time and adding bit-at-a-time processing | ||
367 | * for any fractional bytes at the end. | ||
368 | * | ||
369 | * The only remaining optimization is to the byte-at-a-time table method. | ||
370 | * Here, rather than just shifting one bit of the remainder to decide | ||
371 | * in the correct multiple to subtract, we can shift a byte at a time. | ||
372 | * This produces a 40-bit (rather than a 33-bit) intermediate remainder, | ||
373 | * but again the multiple of the polynomial to subtract depends only on | ||
374 | * the high bits, the high 8 bits in this case. | ||
375 | * | ||
376 | * The multile we need in that case is the low 32 bits of a 40-bit | ||
377 | * value whose high 8 bits are given, and which is a multiple of the | ||
378 | * generator polynomial. This is simply the CRC-32 of the given | ||
379 | * one-byte message. | ||
380 | * | ||
381 | * Two more details: normally, appending zero bits to a message which | ||
382 | * is already a multiple of a polynomial produces a larger multiple of that | ||
383 | * polynomial. To enable a CRC to detect this condition, it's common to | ||
384 | * invert the CRC before appending it. This makes the remainder of the | ||
385 | * message+crc come out not as zero, but some fixed non-zero value. | ||
386 | * | ||
387 | * The same problem applies to zero bits prepended to the message, and | ||
388 | * a similar solution is used. Instead of starting with a remainder of | ||
389 | * 0, an initial remainder of all ones is used. As long as you start | ||
390 | * the same way on decoding, it doesn't make a difference. | ||
391 | */ | ||
392 | |||
393 | #ifdef UNITTEST | ||
394 | |||
395 | #include <stdlib.h> | ||
396 | #include <stdio.h> | ||
397 | |||
398 | #if 0 /*Not used at present */ | ||
399 | static void | ||
400 | buf_dump(char const *prefix, unsigned char const *buf, size_t len) | ||
401 | { | ||
402 | fputs(prefix, stdout); | ||
403 | while (len--) | ||
404 | printf(" %02x", *buf++); | ||
405 | putchar('\n'); | ||
406 | |||
407 | } | ||
408 | #endif | ||
409 | |||
410 | static void bytereverse(unsigned char *buf, size_t len) | ||
411 | { | ||
412 | while (len--) { | ||
413 | unsigned char x = *buf; | ||
414 | x = (x >> 4) | (x << 4); | ||
415 | x = (x >> 2 & 0x33) | (x << 2 & 0xcc); | ||
416 | x = (x >> 1 & 0x55) | (x << 1 & 0xaa); | ||
417 | *buf++ = x; | ||
418 | } | ||
419 | } | ||
420 | |||
421 | static void random_garbage(unsigned char *buf, size_t len) | ||
422 | { | ||
423 | while (len--) | ||
424 | *buf++ = (unsigned char) random(); | ||
425 | } | ||
426 | |||
427 | #if 0 /* Not used at present */ | ||
428 | static void store_le(u32 x, unsigned char *buf) | ||
429 | { | ||
430 | buf[0] = (unsigned char) x; | ||
431 | buf[1] = (unsigned char) (x >> 8); | ||
432 | buf[2] = (unsigned char) (x >> 16); | ||
433 | buf[3] = (unsigned char) (x >> 24); | ||
434 | } | ||
435 | #endif | ||
436 | |||
437 | static void store_be(u32 x, unsigned char *buf) | ||
438 | { | ||
439 | buf[0] = (unsigned char) (x >> 24); | ||
440 | buf[1] = (unsigned char) (x >> 16); | ||
441 | buf[2] = (unsigned char) (x >> 8); | ||
442 | buf[3] = (unsigned char) x; | ||
443 | } | ||
444 | |||
445 | /* | ||
446 | * This checks that CRC(buf + CRC(buf)) = 0, and that | ||
447 | * CRC commutes with bit-reversal. This has the side effect | ||
448 | * of bytewise bit-reversing the input buffer, and returns | ||
449 | * the CRC of the reversed buffer. | ||
450 | */ | ||
451 | static u32 test_step(u32 init, unsigned char *buf, size_t len) | ||
452 | { | ||
453 | u32 crc1, crc2; | ||
454 | size_t i; | ||
455 | |||
456 | crc1 = crc32_be(init, buf, len); | ||
457 | store_be(crc1, buf + len); | ||
458 | crc2 = crc32_be(init, buf, len + 4); | ||
459 | if (crc2) | ||
460 | printf("\nCRC cancellation fail: 0x%08x should be 0\n", | ||
461 | crc2); | ||
462 | |||
463 | for (i = 0; i <= len + 4; i++) { | ||
464 | crc2 = crc32_be(init, buf, i); | ||
465 | crc2 = crc32_be(crc2, buf + i, len + 4 - i); | ||
466 | if (crc2) | ||
467 | printf("\nCRC split fail: 0x%08x\n", crc2); | ||
468 | } | ||
469 | |||
470 | /* Now swap it around for the other test */ | ||
471 | |||
472 | bytereverse(buf, len + 4); | ||
473 | init = bitreverse(init); | ||
474 | crc2 = bitreverse(crc1); | ||
475 | if (crc1 != bitreverse(crc2)) | ||
476 | printf("\nBit reversal fail: 0x%08x -> %0x08x -> 0x%08x\n", | ||
477 | crc1, crc2, bitreverse(crc2)); | ||
478 | crc1 = crc32_le(init, buf, len); | ||
479 | if (crc1 != crc2) | ||
480 | printf("\nCRC endianness fail: 0x%08x != 0x%08x\n", crc1, | ||
481 | crc2); | ||
482 | crc2 = crc32_le(init, buf, len + 4); | ||
483 | if (crc2) | ||
484 | printf("\nCRC cancellation fail: 0x%08x should be 0\n", | ||
485 | crc2); | ||
486 | |||
487 | for (i = 0; i <= len + 4; i++) { | ||
488 | crc2 = crc32_le(init, buf, i); | ||
489 | crc2 = crc32_le(crc2, buf + i, len + 4 - i); | ||
490 | if (crc2) | ||
491 | printf("\nCRC split fail: 0x%08x\n", crc2); | ||
492 | } | ||
493 | |||
494 | return crc1; | ||
495 | } | ||
496 | |||
497 | #define SIZE 64 | ||
498 | #define INIT1 0 | ||
499 | #define INIT2 0 | ||
500 | |||
501 | int main(void) | ||
502 | { | ||
503 | unsigned char buf1[SIZE + 4]; | ||
504 | unsigned char buf2[SIZE + 4]; | ||
505 | unsigned char buf3[SIZE + 4]; | ||
506 | int i, j; | ||
507 | u32 crc1, crc2, crc3; | ||
508 | |||
509 | for (i = 0; i <= SIZE; i++) { | ||
510 | printf("\rTesting length %d...", i); | ||
511 | fflush(stdout); | ||
512 | random_garbage(buf1, i); | ||
513 | random_garbage(buf2, i); | ||
514 | for (j = 0; j < i; j++) | ||
515 | buf3[j] = buf1[j] ^ buf2[j]; | ||
516 | |||
517 | crc1 = test_step(INIT1, buf1, i); | ||
518 | crc2 = test_step(INIT2, buf2, i); | ||
519 | /* Now check that CRC(buf1 ^ buf2) = CRC(buf1) ^ CRC(buf2) */ | ||
520 | crc3 = test_step(INIT1 ^ INIT2, buf3, i); | ||
521 | if (crc3 != (crc1 ^ crc2)) | ||
522 | printf("CRC XOR fail: 0x%08x != 0x%08x ^ 0x%08x\n", | ||
523 | crc3, crc1, crc2); | ||
524 | } | ||
525 | printf("\nAll test complete. No failures expected.\n"); | ||
526 | return 0; | ||
527 | } | ||
528 | |||
529 | #endif /* UNITTEST */ | ||
diff --git a/lib/crc32defs.h b/lib/crc32defs.h new file mode 100644 index 000000000000..9b6773d73749 --- /dev/null +++ b/lib/crc32defs.h | |||
@@ -0,0 +1,32 @@ | |||
1 | /* | ||
2 | * There are multiple 16-bit CRC polynomials in common use, but this is | ||
3 | * *the* standard CRC-32 polynomial, first popularized by Ethernet. | ||
4 | * x^32+x^26+x^23+x^22+x^16+x^12+x^11+x^10+x^8+x^7+x^5+x^4+x^2+x^1+x^0 | ||
5 | */ | ||
6 | #define CRCPOLY_LE 0xedb88320 | ||
7 | #define CRCPOLY_BE 0x04c11db7 | ||
8 | |||
9 | /* How many bits at a time to use. Requires a table of 4<<CRC_xx_BITS bytes. */ | ||
10 | /* For less performance-sensitive, use 4 */ | ||
11 | #ifndef CRC_LE_BITS | ||
12 | # define CRC_LE_BITS 8 | ||
13 | #endif | ||
14 | #ifndef CRC_BE_BITS | ||
15 | # define CRC_BE_BITS 8 | ||
16 | #endif | ||
17 | |||
18 | /* | ||
19 | * Little-endian CRC computation. Used with serial bit streams sent | ||
20 | * lsbit-first. Be sure to use cpu_to_le32() to append the computed CRC. | ||
21 | */ | ||
22 | #if CRC_LE_BITS > 8 || CRC_LE_BITS < 1 || CRC_LE_BITS & CRC_LE_BITS-1 | ||
23 | # error CRC_LE_BITS must be a power of 2 between 1 and 8 | ||
24 | #endif | ||
25 | |||
26 | /* | ||
27 | * Big-endian CRC computation. Used with serial bit streams sent | ||
28 | * msbit-first. Be sure to use cpu_to_be32() to append the computed CRC. | ||
29 | */ | ||
30 | #if CRC_BE_BITS > 8 || CRC_BE_BITS < 1 || CRC_BE_BITS & CRC_BE_BITS-1 | ||
31 | # error CRC_BE_BITS must be a power of 2 between 1 and 8 | ||
32 | #endif | ||
diff --git a/lib/ctype.c b/lib/ctype.c new file mode 100644 index 000000000000..d02ace14a322 --- /dev/null +++ b/lib/ctype.c | |||
@@ -0,0 +1,36 @@ | |||
1 | /* | ||
2 | * linux/lib/ctype.c | ||
3 | * | ||
4 | * Copyright (C) 1991, 1992 Linus Torvalds | ||
5 | */ | ||
6 | |||
7 | #include <linux/ctype.h> | ||
8 | #include <linux/module.h> | ||
9 | |||
10 | unsigned char _ctype[] = { | ||
11 | _C,_C,_C,_C,_C,_C,_C,_C, /* 0-7 */ | ||
12 | _C,_C|_S,_C|_S,_C|_S,_C|_S,_C|_S,_C,_C, /* 8-15 */ | ||
13 | _C,_C,_C,_C,_C,_C,_C,_C, /* 16-23 */ | ||
14 | _C,_C,_C,_C,_C,_C,_C,_C, /* 24-31 */ | ||
15 | _S|_SP,_P,_P,_P,_P,_P,_P,_P, /* 32-39 */ | ||
16 | _P,_P,_P,_P,_P,_P,_P,_P, /* 40-47 */ | ||
17 | _D,_D,_D,_D,_D,_D,_D,_D, /* 48-55 */ | ||
18 | _D,_D,_P,_P,_P,_P,_P,_P, /* 56-63 */ | ||
19 | _P,_U|_X,_U|_X,_U|_X,_U|_X,_U|_X,_U|_X,_U, /* 64-71 */ | ||
20 | _U,_U,_U,_U,_U,_U,_U,_U, /* 72-79 */ | ||
21 | _U,_U,_U,_U,_U,_U,_U,_U, /* 80-87 */ | ||
22 | _U,_U,_U,_P,_P,_P,_P,_P, /* 88-95 */ | ||
23 | _P,_L|_X,_L|_X,_L|_X,_L|_X,_L|_X,_L|_X,_L, /* 96-103 */ | ||
24 | _L,_L,_L,_L,_L,_L,_L,_L, /* 104-111 */ | ||
25 | _L,_L,_L,_L,_L,_L,_L,_L, /* 112-119 */ | ||
26 | _L,_L,_L,_P,_P,_P,_P,_C, /* 120-127 */ | ||
27 | 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* 128-143 */ | ||
28 | 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* 144-159 */ | ||
29 | _S|_SP,_P,_P,_P,_P,_P,_P,_P,_P,_P,_P,_P,_P,_P,_P,_P, /* 160-175 */ | ||
30 | _P,_P,_P,_P,_P,_P,_P,_P,_P,_P,_P,_P,_P,_P,_P,_P, /* 176-191 */ | ||
31 | _U,_U,_U,_U,_U,_U,_U,_U,_U,_U,_U,_U,_U,_U,_U,_U, /* 192-207 */ | ||
32 | _U,_U,_U,_U,_U,_U,_U,_P,_U,_U,_U,_U,_U,_U,_U,_L, /* 208-223 */ | ||
33 | _L,_L,_L,_L,_L,_L,_L,_L,_L,_L,_L,_L,_L,_L,_L,_L, /* 224-239 */ | ||
34 | _L,_L,_L,_L,_L,_L,_L,_P,_L,_L,_L,_L,_L,_L,_L,_L}; /* 240-255 */ | ||
35 | |||
36 | EXPORT_SYMBOL(_ctype); | ||
diff --git a/lib/dec_and_lock.c b/lib/dec_and_lock.c new file mode 100644 index 000000000000..6658d81e1836 --- /dev/null +++ b/lib/dec_and_lock.c | |||
@@ -0,0 +1,40 @@ | |||
1 | #include <linux/module.h> | ||
2 | #include <linux/spinlock.h> | ||
3 | #include <asm/atomic.h> | ||
4 | |||
5 | /* | ||
6 | * This is an architecture-neutral, but slow, | ||
7 | * implementation of the notion of "decrement | ||
8 | * a reference count, and return locked if it | ||
9 | * decremented to zero". | ||
10 | * | ||
11 | * NOTE NOTE NOTE! This is _not_ equivalent to | ||
12 | * | ||
13 | * if (atomic_dec_and_test(&atomic)) { | ||
14 | * spin_lock(&lock); | ||
15 | * return 1; | ||
16 | * } | ||
17 | * return 0; | ||
18 | * | ||
19 | * because the spin-lock and the decrement must be | ||
20 | * "atomic". | ||
21 | * | ||
22 | * This slow version gets the spinlock unconditionally, | ||
23 | * and releases it if it isn't needed. Architectures | ||
24 | * are encouraged to come up with better approaches, | ||
25 | * this is trivially done efficiently using a load-locked | ||
26 | * store-conditional approach, for example. | ||
27 | */ | ||
28 | |||
29 | #ifndef ATOMIC_DEC_AND_LOCK | ||
30 | int _atomic_dec_and_lock(atomic_t *atomic, spinlock_t *lock) | ||
31 | { | ||
32 | spin_lock(lock); | ||
33 | if (atomic_dec_and_test(atomic)) | ||
34 | return 1; | ||
35 | spin_unlock(lock); | ||
36 | return 0; | ||
37 | } | ||
38 | |||
39 | EXPORT_SYMBOL(_atomic_dec_and_lock); | ||
40 | #endif | ||
diff --git a/lib/div64.c b/lib/div64.c new file mode 100644 index 000000000000..365719f84832 --- /dev/null +++ b/lib/div64.c | |||
@@ -0,0 +1,61 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2003 Bernardo Innocenti <bernie@develer.com> | ||
3 | * | ||
4 | * Based on former do_div() implementation from asm-parisc/div64.h: | ||
5 | * Copyright (C) 1999 Hewlett-Packard Co | ||
6 | * Copyright (C) 1999 David Mosberger-Tang <davidm@hpl.hp.com> | ||
7 | * | ||
8 | * | ||
9 | * Generic C version of 64bit/32bit division and modulo, with | ||
10 | * 64bit result and 32bit remainder. | ||
11 | * | ||
12 | * The fast case for (n>>32 == 0) is handled inline by do_div(). | ||
13 | * | ||
14 | * Code generated for this function might be very inefficient | ||
15 | * for some CPUs. __div64_32() can be overridden by linking arch-specific | ||
16 | * assembly versions such as arch/ppc/lib/div64.S and arch/sh/lib/div64.S. | ||
17 | */ | ||
18 | |||
19 | #include <linux/types.h> | ||
20 | #include <linux/module.h> | ||
21 | #include <asm/div64.h> | ||
22 | |||
23 | /* Not needed on 64bit architectures */ | ||
24 | #if BITS_PER_LONG == 32 | ||
25 | |||
26 | uint32_t __div64_32(uint64_t *n, uint32_t base) | ||
27 | { | ||
28 | uint64_t rem = *n; | ||
29 | uint64_t b = base; | ||
30 | uint64_t res, d = 1; | ||
31 | uint32_t high = rem >> 32; | ||
32 | |||
33 | /* Reduce the thing a bit first */ | ||
34 | res = 0; | ||
35 | if (high >= base) { | ||
36 | high /= base; | ||
37 | res = (uint64_t) high << 32; | ||
38 | rem -= (uint64_t) (high*base) << 32; | ||
39 | } | ||
40 | |||
41 | while ((int64_t)b > 0 && b < rem) { | ||
42 | b = b+b; | ||
43 | d = d+d; | ||
44 | } | ||
45 | |||
46 | do { | ||
47 | if (rem >= b) { | ||
48 | rem -= b; | ||
49 | res += d; | ||
50 | } | ||
51 | b >>= 1; | ||
52 | d >>= 1; | ||
53 | } while (d); | ||
54 | |||
55 | *n = res; | ||
56 | return rem; | ||
57 | } | ||
58 | |||
59 | EXPORT_SYMBOL(__div64_32); | ||
60 | |||
61 | #endif /* BITS_PER_LONG == 32 */ | ||
diff --git a/lib/dump_stack.c b/lib/dump_stack.c new file mode 100644 index 000000000000..53bff4c8452b --- /dev/null +++ b/lib/dump_stack.c | |||
@@ -0,0 +1,15 @@ | |||
1 | /* | ||
2 | * Provide a default dump_stack() function for architectures | ||
3 | * which don't implement their own. | ||
4 | */ | ||
5 | |||
6 | #include <linux/kernel.h> | ||
7 | #include <linux/module.h> | ||
8 | |||
9 | void dump_stack(void) | ||
10 | { | ||
11 | printk(KERN_NOTICE | ||
12 | "This architecture does not implement dump_stack()\n"); | ||
13 | } | ||
14 | |||
15 | EXPORT_SYMBOL(dump_stack); | ||
diff --git a/lib/errno.c b/lib/errno.c new file mode 100644 index 000000000000..41cb9d76c052 --- /dev/null +++ b/lib/errno.c | |||
@@ -0,0 +1,7 @@ | |||
1 | /* | ||
2 | * linux/lib/errno.c | ||
3 | * | ||
4 | * Copyright (C) 1991, 1992 Linus Torvalds | ||
5 | */ | ||
6 | |||
7 | int errno; | ||
diff --git a/lib/extable.c b/lib/extable.c new file mode 100644 index 000000000000..3f677a8f0c3c --- /dev/null +++ b/lib/extable.c | |||
@@ -0,0 +1,79 @@ | |||
1 | /* | ||
2 | * lib/extable.c | ||
3 | * Derived from arch/ppc/mm/extable.c and arch/i386/mm/extable.c. | ||
4 | * | ||
5 | * Copyright (C) 2004 Paul Mackerras, IBM Corp. | ||
6 | * | ||
7 | * This program is free software; you can redistribute it and/or | ||
8 | * modify it under the terms of the GNU General Public License | ||
9 | * as published by the Free Software Foundation; either version | ||
10 | * 2 of the License, or (at your option) any later version. | ||
11 | */ | ||
12 | |||
13 | #include <linux/config.h> | ||
14 | #include <linux/module.h> | ||
15 | #include <linux/init.h> | ||
16 | #include <linux/sort.h> | ||
17 | #include <asm/uaccess.h> | ||
18 | |||
19 | extern struct exception_table_entry __start___ex_table[]; | ||
20 | extern struct exception_table_entry __stop___ex_table[]; | ||
21 | |||
22 | #ifndef ARCH_HAS_SORT_EXTABLE | ||
23 | /* | ||
24 | * The exception table needs to be sorted so that the binary | ||
25 | * search that we use to find entries in it works properly. | ||
26 | * This is used both for the kernel exception table and for | ||
27 | * the exception tables of modules that get loaded. | ||
28 | */ | ||
29 | static int cmp_ex(const void *a, const void *b) | ||
30 | { | ||
31 | const struct exception_table_entry *x = a, *y = b; | ||
32 | |||
33 | /* avoid overflow */ | ||
34 | if (x->insn > y->insn) | ||
35 | return 1; | ||
36 | if (x->insn < y->insn) | ||
37 | return -1; | ||
38 | return 0; | ||
39 | } | ||
40 | |||
41 | void sort_extable(struct exception_table_entry *start, | ||
42 | struct exception_table_entry *finish) | ||
43 | { | ||
44 | sort(start, finish - start, sizeof(struct exception_table_entry), | ||
45 | cmp_ex, NULL); | ||
46 | } | ||
47 | #endif | ||
48 | |||
49 | #ifndef ARCH_HAS_SEARCH_EXTABLE | ||
50 | /* | ||
51 | * Search one exception table for an entry corresponding to the | ||
52 | * given instruction address, and return the address of the entry, | ||
53 | * or NULL if none is found. | ||
54 | * We use a binary search, and thus we assume that the table is | ||
55 | * already sorted. | ||
56 | */ | ||
57 | const struct exception_table_entry * | ||
58 | search_extable(const struct exception_table_entry *first, | ||
59 | const struct exception_table_entry *last, | ||
60 | unsigned long value) | ||
61 | { | ||
62 | while (first <= last) { | ||
63 | const struct exception_table_entry *mid; | ||
64 | |||
65 | mid = (last - first) / 2 + first; | ||
66 | /* | ||
67 | * careful, the distance between entries can be | ||
68 | * larger than 2GB: | ||
69 | */ | ||
70 | if (mid->insn < value) | ||
71 | first = mid + 1; | ||
72 | else if (mid->insn > value) | ||
73 | last = mid - 1; | ||
74 | else | ||
75 | return mid; | ||
76 | } | ||
77 | return NULL; | ||
78 | } | ||
79 | #endif | ||
diff --git a/lib/find_next_bit.c b/lib/find_next_bit.c new file mode 100644 index 000000000000..d08302d2a42c --- /dev/null +++ b/lib/find_next_bit.c | |||
@@ -0,0 +1,55 @@ | |||
1 | /* find_next_bit.c: fallback find next bit implementation | ||
2 | * | ||
3 | * Copyright (C) 2004 Red Hat, Inc. All Rights Reserved. | ||
4 | * Written by David Howells (dhowells@redhat.com) | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or | ||
7 | * modify it under the terms of the GNU General Public License | ||
8 | * as published by the Free Software Foundation; either version | ||
9 | * 2 of the License, or (at your option) any later version. | ||
10 | */ | ||
11 | |||
12 | #include <linux/bitops.h> | ||
13 | |||
14 | int find_next_bit(const unsigned long *addr, int size, int offset) | ||
15 | { | ||
16 | const unsigned long *base; | ||
17 | const int NBITS = sizeof(*addr) * 8; | ||
18 | unsigned long tmp; | ||
19 | |||
20 | base = addr; | ||
21 | if (offset) { | ||
22 | int suboffset; | ||
23 | |||
24 | addr += offset / NBITS; | ||
25 | |||
26 | suboffset = offset % NBITS; | ||
27 | if (suboffset) { | ||
28 | tmp = *addr; | ||
29 | tmp >>= suboffset; | ||
30 | if (tmp) | ||
31 | goto finish; | ||
32 | } | ||
33 | |||
34 | addr++; | ||
35 | } | ||
36 | |||
37 | while ((tmp = *addr) == 0) | ||
38 | addr++; | ||
39 | |||
40 | offset = (addr - base) * NBITS; | ||
41 | |||
42 | finish: | ||
43 | /* count the remaining bits without using __ffs() since that takes a 32-bit arg */ | ||
44 | while (!(tmp & 0xff)) { | ||
45 | offset += 8; | ||
46 | tmp >>= 8; | ||
47 | } | ||
48 | |||
49 | while (!(tmp & 1)) { | ||
50 | offset++; | ||
51 | tmp >>= 1; | ||
52 | } | ||
53 | |||
54 | return offset; | ||
55 | } | ||
diff --git a/lib/gen_crc32table.c b/lib/gen_crc32table.c new file mode 100644 index 000000000000..bea5d97df991 --- /dev/null +++ b/lib/gen_crc32table.c | |||
@@ -0,0 +1,82 @@ | |||
1 | #include <stdio.h> | ||
2 | #include "crc32defs.h" | ||
3 | #include <inttypes.h> | ||
4 | |||
5 | #define ENTRIES_PER_LINE 4 | ||
6 | |||
7 | #define LE_TABLE_SIZE (1 << CRC_LE_BITS) | ||
8 | #define BE_TABLE_SIZE (1 << CRC_BE_BITS) | ||
9 | |||
10 | static uint32_t crc32table_le[LE_TABLE_SIZE]; | ||
11 | static uint32_t crc32table_be[BE_TABLE_SIZE]; | ||
12 | |||
13 | /** | ||
14 | * crc32init_le() - allocate and initialize LE table data | ||
15 | * | ||
16 | * crc is the crc of the byte i; other entries are filled in based on the | ||
17 | * fact that crctable[i^j] = crctable[i] ^ crctable[j]. | ||
18 | * | ||
19 | */ | ||
20 | static void crc32init_le(void) | ||
21 | { | ||
22 | unsigned i, j; | ||
23 | uint32_t crc = 1; | ||
24 | |||
25 | crc32table_le[0] = 0; | ||
26 | |||
27 | for (i = 1 << (CRC_LE_BITS - 1); i; i >>= 1) { | ||
28 | crc = (crc >> 1) ^ ((crc & 1) ? CRCPOLY_LE : 0); | ||
29 | for (j = 0; j < LE_TABLE_SIZE; j += 2 * i) | ||
30 | crc32table_le[i + j] = crc ^ crc32table_le[j]; | ||
31 | } | ||
32 | } | ||
33 | |||
34 | /** | ||
35 | * crc32init_be() - allocate and initialize BE table data | ||
36 | */ | ||
37 | static void crc32init_be(void) | ||
38 | { | ||
39 | unsigned i, j; | ||
40 | uint32_t crc = 0x80000000; | ||
41 | |||
42 | crc32table_be[0] = 0; | ||
43 | |||
44 | for (i = 1; i < BE_TABLE_SIZE; i <<= 1) { | ||
45 | crc = (crc << 1) ^ ((crc & 0x80000000) ? CRCPOLY_BE : 0); | ||
46 | for (j = 0; j < i; j++) | ||
47 | crc32table_be[i + j] = crc ^ crc32table_be[j]; | ||
48 | } | ||
49 | } | ||
50 | |||
51 | static void output_table(uint32_t table[], int len, char *trans) | ||
52 | { | ||
53 | int i; | ||
54 | |||
55 | for (i = 0; i < len - 1; i++) { | ||
56 | if (i % ENTRIES_PER_LINE == 0) | ||
57 | printf("\n"); | ||
58 | printf("%s(0x%8.8xL), ", trans, table[i]); | ||
59 | } | ||
60 | printf("%s(0x%8.8xL)\n", trans, table[len - 1]); | ||
61 | } | ||
62 | |||
63 | int main(int argc, char** argv) | ||
64 | { | ||
65 | printf("/* this file is generated - do not edit */\n\n"); | ||
66 | |||
67 | if (CRC_LE_BITS > 1) { | ||
68 | crc32init_le(); | ||
69 | printf("static const u32 crc32table_le[] = {"); | ||
70 | output_table(crc32table_le, LE_TABLE_SIZE, "tole"); | ||
71 | printf("};\n"); | ||
72 | } | ||
73 | |||
74 | if (CRC_BE_BITS > 1) { | ||
75 | crc32init_be(); | ||
76 | printf("static const u32 crc32table_be[] = {"); | ||
77 | output_table(crc32table_be, BE_TABLE_SIZE, "tobe"); | ||
78 | printf("};\n"); | ||
79 | } | ||
80 | |||
81 | return 0; | ||
82 | } | ||
diff --git a/lib/halfmd4.c b/lib/halfmd4.c new file mode 100644 index 000000000000..e11db26f8ae5 --- /dev/null +++ b/lib/halfmd4.c | |||
@@ -0,0 +1,66 @@ | |||
1 | #include <linux/kernel.h> | ||
2 | #include <linux/module.h> | ||
3 | #include <linux/cryptohash.h> | ||
4 | |||
5 | /* F, G and H are basic MD4 functions: selection, majority, parity */ | ||
6 | #define F(x, y, z) ((z) ^ ((x) & ((y) ^ (z)))) | ||
7 | #define G(x, y, z) (((x) & (y)) + (((x) ^ (y)) & (z))) | ||
8 | #define H(x, y, z) ((x) ^ (y) ^ (z)) | ||
9 | |||
10 | /* | ||
11 | * The generic round function. The application is so specific that | ||
12 | * we don't bother protecting all the arguments with parens, as is generally | ||
13 | * good macro practice, in favor of extra legibility. | ||
14 | * Rotation is separate from addition to prevent recomputation | ||
15 | */ | ||
16 | #define ROUND(f, a, b, c, d, x, s) \ | ||
17 | (a += f(b, c, d) + x, a = (a << s) | (a >> (32 - s))) | ||
18 | #define K1 0 | ||
19 | #define K2 013240474631UL | ||
20 | #define K3 015666365641UL | ||
21 | |||
22 | /* | ||
23 | * Basic cut-down MD4 transform. Returns only 32 bits of result. | ||
24 | */ | ||
25 | __u32 half_md4_transform(__u32 buf[4], __u32 const in[8]) | ||
26 | { | ||
27 | __u32 a = buf[0], b = buf[1], c = buf[2], d = buf[3]; | ||
28 | |||
29 | /* Round 1 */ | ||
30 | ROUND(F, a, b, c, d, in[0] + K1, 3); | ||
31 | ROUND(F, d, a, b, c, in[1] + K1, 7); | ||
32 | ROUND(F, c, d, a, b, in[2] + K1, 11); | ||
33 | ROUND(F, b, c, d, a, in[3] + K1, 19); | ||
34 | ROUND(F, a, b, c, d, in[4] + K1, 3); | ||
35 | ROUND(F, d, a, b, c, in[5] + K1, 7); | ||
36 | ROUND(F, c, d, a, b, in[6] + K1, 11); | ||
37 | ROUND(F, b, c, d, a, in[7] + K1, 19); | ||
38 | |||
39 | /* Round 2 */ | ||
40 | ROUND(G, a, b, c, d, in[1] + K2, 3); | ||
41 | ROUND(G, d, a, b, c, in[3] + K2, 5); | ||
42 | ROUND(G, c, d, a, b, in[5] + K2, 9); | ||
43 | ROUND(G, b, c, d, a, in[7] + K2, 13); | ||
44 | ROUND(G, a, b, c, d, in[0] + K2, 3); | ||
45 | ROUND(G, d, a, b, c, in[2] + K2, 5); | ||
46 | ROUND(G, c, d, a, b, in[4] + K2, 9); | ||
47 | ROUND(G, b, c, d, a, in[6] + K2, 13); | ||
48 | |||
49 | /* Round 3 */ | ||
50 | ROUND(H, a, b, c, d, in[3] + K3, 3); | ||
51 | ROUND(H, d, a, b, c, in[7] + K3, 9); | ||
52 | ROUND(H, c, d, a, b, in[2] + K3, 11); | ||
53 | ROUND(H, b, c, d, a, in[6] + K3, 15); | ||
54 | ROUND(H, a, b, c, d, in[1] + K3, 3); | ||
55 | ROUND(H, d, a, b, c, in[5] + K3, 9); | ||
56 | ROUND(H, c, d, a, b, in[0] + K3, 11); | ||
57 | ROUND(H, b, c, d, a, in[4] + K3, 15); | ||
58 | |||
59 | buf[0] += a; | ||
60 | buf[1] += b; | ||
61 | buf[2] += c; | ||
62 | buf[3] += d; | ||
63 | |||
64 | return buf[1]; /* "most hashed" word */ | ||
65 | } | ||
66 | EXPORT_SYMBOL(half_md4_transform); | ||
diff --git a/lib/idr.c b/lib/idr.c new file mode 100644 index 000000000000..81fc430602ee --- /dev/null +++ b/lib/idr.c | |||
@@ -0,0 +1,408 @@ | |||
1 | /* | ||
2 | * 2002-10-18 written by Jim Houston jim.houston@ccur.com | ||
3 | * Copyright (C) 2002 by Concurrent Computer Corporation | ||
4 | * Distributed under the GNU GPL license version 2. | ||
5 | * | ||
6 | * Modified by George Anzinger to reuse immediately and to use | ||
7 | * find bit instructions. Also removed _irq on spinlocks. | ||
8 | * | ||
9 | * Small id to pointer translation service. | ||
10 | * | ||
11 | * It uses a radix tree like structure as a sparse array indexed | ||
12 | * by the id to obtain the pointer. The bitmap makes allocating | ||
13 | * a new id quick. | ||
14 | * | ||
15 | * You call it to allocate an id (an int) an associate with that id a | ||
16 | * pointer or what ever, we treat it as a (void *). You can pass this | ||
17 | * id to a user for him to pass back at a later time. You then pass | ||
18 | * that id to this code and it returns your pointer. | ||
19 | |||
20 | * You can release ids at any time. When all ids are released, most of | ||
21 | * the memory is returned (we keep IDR_FREE_MAX) in a local pool so we | ||
22 | * don't need to go to the memory "store" during an id allocate, just | ||
23 | * so you don't need to be too concerned about locking and conflicts | ||
24 | * with the slab allocator. | ||
25 | */ | ||
26 | |||
27 | #ifndef TEST // to test in user space... | ||
28 | #include <linux/slab.h> | ||
29 | #include <linux/init.h> | ||
30 | #include <linux/module.h> | ||
31 | #endif | ||
32 | #include <linux/string.h> | ||
33 | #include <linux/idr.h> | ||
34 | |||
35 | static kmem_cache_t *idr_layer_cache; | ||
36 | |||
37 | static struct idr_layer *alloc_layer(struct idr *idp) | ||
38 | { | ||
39 | struct idr_layer *p; | ||
40 | |||
41 | spin_lock(&idp->lock); | ||
42 | if ((p = idp->id_free)) { | ||
43 | idp->id_free = p->ary[0]; | ||
44 | idp->id_free_cnt--; | ||
45 | p->ary[0] = NULL; | ||
46 | } | ||
47 | spin_unlock(&idp->lock); | ||
48 | return(p); | ||
49 | } | ||
50 | |||
51 | static void free_layer(struct idr *idp, struct idr_layer *p) | ||
52 | { | ||
53 | /* | ||
54 | * Depends on the return element being zeroed. | ||
55 | */ | ||
56 | spin_lock(&idp->lock); | ||
57 | p->ary[0] = idp->id_free; | ||
58 | idp->id_free = p; | ||
59 | idp->id_free_cnt++; | ||
60 | spin_unlock(&idp->lock); | ||
61 | } | ||
62 | |||
63 | /** | ||
64 | * idr_pre_get - reserver resources for idr allocation | ||
65 | * @idp: idr handle | ||
66 | * @gfp_mask: memory allocation flags | ||
67 | * | ||
68 | * This function should be called prior to locking and calling the | ||
69 | * following function. It preallocates enough memory to satisfy | ||
70 | * the worst possible allocation. | ||
71 | * | ||
72 | * If the system is REALLY out of memory this function returns 0, | ||
73 | * otherwise 1. | ||
74 | */ | ||
75 | int idr_pre_get(struct idr *idp, unsigned gfp_mask) | ||
76 | { | ||
77 | while (idp->id_free_cnt < IDR_FREE_MAX) { | ||
78 | struct idr_layer *new; | ||
79 | new = kmem_cache_alloc(idr_layer_cache, gfp_mask); | ||
80 | if(new == NULL) | ||
81 | return (0); | ||
82 | free_layer(idp, new); | ||
83 | } | ||
84 | return 1; | ||
85 | } | ||
86 | EXPORT_SYMBOL(idr_pre_get); | ||
87 | |||
88 | static int sub_alloc(struct idr *idp, void *ptr, int *starting_id) | ||
89 | { | ||
90 | int n, m, sh; | ||
91 | struct idr_layer *p, *new; | ||
92 | struct idr_layer *pa[MAX_LEVEL]; | ||
93 | int l, id; | ||
94 | long bm; | ||
95 | |||
96 | id = *starting_id; | ||
97 | p = idp->top; | ||
98 | l = idp->layers; | ||
99 | pa[l--] = NULL; | ||
100 | while (1) { | ||
101 | /* | ||
102 | * We run around this while until we reach the leaf node... | ||
103 | */ | ||
104 | n = (id >> (IDR_BITS*l)) & IDR_MASK; | ||
105 | bm = ~p->bitmap; | ||
106 | m = find_next_bit(&bm, IDR_SIZE, n); | ||
107 | if (m == IDR_SIZE) { | ||
108 | /* no space available go back to previous layer. */ | ||
109 | l++; | ||
110 | id = (id | ((1 << (IDR_BITS*l))-1)) + 1; | ||
111 | if (!(p = pa[l])) { | ||
112 | *starting_id = id; | ||
113 | return -2; | ||
114 | } | ||
115 | continue; | ||
116 | } | ||
117 | if (m != n) { | ||
118 | sh = IDR_BITS*l; | ||
119 | id = ((id >> sh) ^ n ^ m) << sh; | ||
120 | } | ||
121 | if ((id >= MAX_ID_BIT) || (id < 0)) | ||
122 | return -3; | ||
123 | if (l == 0) | ||
124 | break; | ||
125 | /* | ||
126 | * Create the layer below if it is missing. | ||
127 | */ | ||
128 | if (!p->ary[m]) { | ||
129 | if (!(new = alloc_layer(idp))) | ||
130 | return -1; | ||
131 | p->ary[m] = new; | ||
132 | p->count++; | ||
133 | } | ||
134 | pa[l--] = p; | ||
135 | p = p->ary[m]; | ||
136 | } | ||
137 | /* | ||
138 | * We have reached the leaf node, plant the | ||
139 | * users pointer and return the raw id. | ||
140 | */ | ||
141 | p->ary[m] = (struct idr_layer *)ptr; | ||
142 | __set_bit(m, &p->bitmap); | ||
143 | p->count++; | ||
144 | /* | ||
145 | * If this layer is full mark the bit in the layer above | ||
146 | * to show that this part of the radix tree is full. | ||
147 | * This may complete the layer above and require walking | ||
148 | * up the radix tree. | ||
149 | */ | ||
150 | n = id; | ||
151 | while (p->bitmap == IDR_FULL) { | ||
152 | if (!(p = pa[++l])) | ||
153 | break; | ||
154 | n = n >> IDR_BITS; | ||
155 | __set_bit((n & IDR_MASK), &p->bitmap); | ||
156 | } | ||
157 | return(id); | ||
158 | } | ||
159 | |||
160 | static int idr_get_new_above_int(struct idr *idp, void *ptr, int starting_id) | ||
161 | { | ||
162 | struct idr_layer *p, *new; | ||
163 | int layers, v, id; | ||
164 | |||
165 | id = starting_id; | ||
166 | build_up: | ||
167 | p = idp->top; | ||
168 | layers = idp->layers; | ||
169 | if (unlikely(!p)) { | ||
170 | if (!(p = alloc_layer(idp))) | ||
171 | return -1; | ||
172 | layers = 1; | ||
173 | } | ||
174 | /* | ||
175 | * Add a new layer to the top of the tree if the requested | ||
176 | * id is larger than the currently allocated space. | ||
177 | */ | ||
178 | while ((layers < MAX_LEVEL) && (id >= (1 << (layers*IDR_BITS)))) { | ||
179 | layers++; | ||
180 | if (!p->count) | ||
181 | continue; | ||
182 | if (!(new = alloc_layer(idp))) { | ||
183 | /* | ||
184 | * The allocation failed. If we built part of | ||
185 | * the structure tear it down. | ||
186 | */ | ||
187 | for (new = p; p && p != idp->top; new = p) { | ||
188 | p = p->ary[0]; | ||
189 | new->ary[0] = NULL; | ||
190 | new->bitmap = new->count = 0; | ||
191 | free_layer(idp, new); | ||
192 | } | ||
193 | return -1; | ||
194 | } | ||
195 | new->ary[0] = p; | ||
196 | new->count = 1; | ||
197 | if (p->bitmap == IDR_FULL) | ||
198 | __set_bit(0, &new->bitmap); | ||
199 | p = new; | ||
200 | } | ||
201 | idp->top = p; | ||
202 | idp->layers = layers; | ||
203 | v = sub_alloc(idp, ptr, &id); | ||
204 | if (v == -2) | ||
205 | goto build_up; | ||
206 | return(v); | ||
207 | } | ||
208 | |||
209 | /** | ||
210 | * idr_get_new_above - allocate new idr entry above a start id | ||
211 | * @idp: idr handle | ||
212 | * @ptr: pointer you want associated with the ide | ||
213 | * @start_id: id to start search at | ||
214 | * @id: pointer to the allocated handle | ||
215 | * | ||
216 | * This is the allocate id function. It should be called with any | ||
217 | * required locks. | ||
218 | * | ||
219 | * If memory is required, it will return -EAGAIN, you should unlock | ||
220 | * and go back to the idr_pre_get() call. If the idr is full, it will | ||
221 | * return -ENOSPC. | ||
222 | * | ||
223 | * @id returns a value in the range 0 ... 0x7fffffff | ||
224 | */ | ||
225 | int idr_get_new_above(struct idr *idp, void *ptr, int starting_id, int *id) | ||
226 | { | ||
227 | int rv; | ||
228 | rv = idr_get_new_above_int(idp, ptr, starting_id); | ||
229 | /* | ||
230 | * This is a cheap hack until the IDR code can be fixed to | ||
231 | * return proper error values. | ||
232 | */ | ||
233 | if (rv < 0) { | ||
234 | if (rv == -1) | ||
235 | return -EAGAIN; | ||
236 | else /* Will be -3 */ | ||
237 | return -ENOSPC; | ||
238 | } | ||
239 | *id = rv; | ||
240 | return 0; | ||
241 | } | ||
242 | EXPORT_SYMBOL(idr_get_new_above); | ||
243 | |||
244 | /** | ||
245 | * idr_get_new - allocate new idr entry | ||
246 | * @idp: idr handle | ||
247 | * @ptr: pointer you want associated with the ide | ||
248 | * @id: pointer to the allocated handle | ||
249 | * | ||
250 | * This is the allocate id function. It should be called with any | ||
251 | * required locks. | ||
252 | * | ||
253 | * If memory is required, it will return -EAGAIN, you should unlock | ||
254 | * and go back to the idr_pre_get() call. If the idr is full, it will | ||
255 | * return -ENOSPC. | ||
256 | * | ||
257 | * @id returns a value in the range 0 ... 0x7fffffff | ||
258 | */ | ||
259 | int idr_get_new(struct idr *idp, void *ptr, int *id) | ||
260 | { | ||
261 | int rv; | ||
262 | rv = idr_get_new_above_int(idp, ptr, 0); | ||
263 | /* | ||
264 | * This is a cheap hack until the IDR code can be fixed to | ||
265 | * return proper error values. | ||
266 | */ | ||
267 | if (rv < 0) { | ||
268 | if (rv == -1) | ||
269 | return -EAGAIN; | ||
270 | else /* Will be -3 */ | ||
271 | return -ENOSPC; | ||
272 | } | ||
273 | *id = rv; | ||
274 | return 0; | ||
275 | } | ||
276 | EXPORT_SYMBOL(idr_get_new); | ||
277 | |||
278 | static void idr_remove_warning(int id) | ||
279 | { | ||
280 | printk("idr_remove called for id=%d which is not allocated.\n", id); | ||
281 | dump_stack(); | ||
282 | } | ||
283 | |||
284 | static void sub_remove(struct idr *idp, int shift, int id) | ||
285 | { | ||
286 | struct idr_layer *p = idp->top; | ||
287 | struct idr_layer **pa[MAX_LEVEL]; | ||
288 | struct idr_layer ***paa = &pa[0]; | ||
289 | int n; | ||
290 | |||
291 | *paa = NULL; | ||
292 | *++paa = &idp->top; | ||
293 | |||
294 | while ((shift > 0) && p) { | ||
295 | n = (id >> shift) & IDR_MASK; | ||
296 | __clear_bit(n, &p->bitmap); | ||
297 | *++paa = &p->ary[n]; | ||
298 | p = p->ary[n]; | ||
299 | shift -= IDR_BITS; | ||
300 | } | ||
301 | n = id & IDR_MASK; | ||
302 | if (likely(p != NULL && test_bit(n, &p->bitmap))){ | ||
303 | __clear_bit(n, &p->bitmap); | ||
304 | p->ary[n] = NULL; | ||
305 | while(*paa && ! --((**paa)->count)){ | ||
306 | free_layer(idp, **paa); | ||
307 | **paa-- = NULL; | ||
308 | } | ||
309 | if ( ! *paa ) | ||
310 | idp->layers = 0; | ||
311 | } else { | ||
312 | idr_remove_warning(id); | ||
313 | } | ||
314 | } | ||
315 | |||
316 | /** | ||
317 | * idr_remove - remove the given id and free it's slot | ||
318 | * idp: idr handle | ||
319 | * id: uniqueue key | ||
320 | */ | ||
321 | void idr_remove(struct idr *idp, int id) | ||
322 | { | ||
323 | struct idr_layer *p; | ||
324 | |||
325 | /* Mask off upper bits we don't use for the search. */ | ||
326 | id &= MAX_ID_MASK; | ||
327 | |||
328 | sub_remove(idp, (idp->layers - 1) * IDR_BITS, id); | ||
329 | if ( idp->top && idp->top->count == 1 && | ||
330 | (idp->layers > 1) && | ||
331 | idp->top->ary[0]){ // We can drop a layer | ||
332 | |||
333 | p = idp->top->ary[0]; | ||
334 | idp->top->bitmap = idp->top->count = 0; | ||
335 | free_layer(idp, idp->top); | ||
336 | idp->top = p; | ||
337 | --idp->layers; | ||
338 | } | ||
339 | while (idp->id_free_cnt >= IDR_FREE_MAX) { | ||
340 | |||
341 | p = alloc_layer(idp); | ||
342 | kmem_cache_free(idr_layer_cache, p); | ||
343 | return; | ||
344 | } | ||
345 | } | ||
346 | EXPORT_SYMBOL(idr_remove); | ||
347 | |||
348 | /** | ||
349 | * idr_find - return pointer for given id | ||
350 | * @idp: idr handle | ||
351 | * @id: lookup key | ||
352 | * | ||
353 | * Return the pointer given the id it has been registered with. A %NULL | ||
354 | * return indicates that @id is not valid or you passed %NULL in | ||
355 | * idr_get_new(). | ||
356 | * | ||
357 | * The caller must serialize idr_find() vs idr_get_new() and idr_remove(). | ||
358 | */ | ||
359 | void *idr_find(struct idr *idp, int id) | ||
360 | { | ||
361 | int n; | ||
362 | struct idr_layer *p; | ||
363 | |||
364 | n = idp->layers * IDR_BITS; | ||
365 | p = idp->top; | ||
366 | |||
367 | /* Mask off upper bits we don't use for the search. */ | ||
368 | id &= MAX_ID_MASK; | ||
369 | |||
370 | if (id >= (1 << n)) | ||
371 | return NULL; | ||
372 | |||
373 | while (n > 0 && p) { | ||
374 | n -= IDR_BITS; | ||
375 | p = p->ary[(id >> n) & IDR_MASK]; | ||
376 | } | ||
377 | return((void *)p); | ||
378 | } | ||
379 | EXPORT_SYMBOL(idr_find); | ||
380 | |||
381 | static void idr_cache_ctor(void * idr_layer, | ||
382 | kmem_cache_t *idr_layer_cache, unsigned long flags) | ||
383 | { | ||
384 | memset(idr_layer, 0, sizeof(struct idr_layer)); | ||
385 | } | ||
386 | |||
387 | static int init_id_cache(void) | ||
388 | { | ||
389 | if (!idr_layer_cache) | ||
390 | idr_layer_cache = kmem_cache_create("idr_layer_cache", | ||
391 | sizeof(struct idr_layer), 0, 0, idr_cache_ctor, NULL); | ||
392 | return 0; | ||
393 | } | ||
394 | |||
395 | /** | ||
396 | * idr_init - initialize idr handle | ||
397 | * @idp: idr handle | ||
398 | * | ||
399 | * This function is use to set up the handle (@idp) that you will pass | ||
400 | * to the rest of the functions. | ||
401 | */ | ||
402 | void idr_init(struct idr *idp) | ||
403 | { | ||
404 | init_id_cache(); | ||
405 | memset(idp, 0, sizeof(struct idr)); | ||
406 | spin_lock_init(&idp->lock); | ||
407 | } | ||
408 | EXPORT_SYMBOL(idr_init); | ||
diff --git a/lib/inflate.c b/lib/inflate.c new file mode 100644 index 000000000000..75e7d303c72e --- /dev/null +++ b/lib/inflate.c | |||
@@ -0,0 +1,1210 @@ | |||
1 | #define DEBG(x) | ||
2 | #define DEBG1(x) | ||
3 | /* inflate.c -- Not copyrighted 1992 by Mark Adler | ||
4 | version c10p1, 10 January 1993 */ | ||
5 | |||
6 | /* | ||
7 | * Adapted for booting Linux by Hannu Savolainen 1993 | ||
8 | * based on gzip-1.0.3 | ||
9 | * | ||
10 | * Nicolas Pitre <nico@cam.org>, 1999/04/14 : | ||
11 | * Little mods for all variable to reside either into rodata or bss segments | ||
12 | * by marking constant variables with 'const' and initializing all the others | ||
13 | * at run-time only. This allows for the kernel uncompressor to run | ||
14 | * directly from Flash or ROM memory on embedded systems. | ||
15 | */ | ||
16 | |||
17 | /* | ||
18 | Inflate deflated (PKZIP's method 8 compressed) data. The compression | ||
19 | method searches for as much of the current string of bytes (up to a | ||
20 | length of 258) in the previous 32 K bytes. If it doesn't find any | ||
21 | matches (of at least length 3), it codes the next byte. Otherwise, it | ||
22 | codes the length of the matched string and its distance backwards from | ||
23 | the current position. There is a single Huffman code that codes both | ||
24 | single bytes (called "literals") and match lengths. A second Huffman | ||
25 | code codes the distance information, which follows a length code. Each | ||
26 | length or distance code actually represents a base value and a number | ||
27 | of "extra" (sometimes zero) bits to get to add to the base value. At | ||
28 | the end of each deflated block is a special end-of-block (EOB) literal/ | ||
29 | length code. The decoding process is basically: get a literal/length | ||
30 | code; if EOB then done; if a literal, emit the decoded byte; if a | ||
31 | length then get the distance and emit the referred-to bytes from the | ||
32 | sliding window of previously emitted data. | ||
33 | |||
34 | There are (currently) three kinds of inflate blocks: stored, fixed, and | ||
35 | dynamic. The compressor deals with some chunk of data at a time, and | ||
36 | decides which method to use on a chunk-by-chunk basis. A chunk might | ||
37 | typically be 32 K or 64 K. If the chunk is incompressible, then the | ||
38 | "stored" method is used. In this case, the bytes are simply stored as | ||
39 | is, eight bits per byte, with none of the above coding. The bytes are | ||
40 | preceded by a count, since there is no longer an EOB code. | ||
41 | |||
42 | If the data is compressible, then either the fixed or dynamic methods | ||
43 | are used. In the dynamic method, the compressed data is preceded by | ||
44 | an encoding of the literal/length and distance Huffman codes that are | ||
45 | to be used to decode this block. The representation is itself Huffman | ||
46 | coded, and so is preceded by a description of that code. These code | ||
47 | descriptions take up a little space, and so for small blocks, there is | ||
48 | a predefined set of codes, called the fixed codes. The fixed method is | ||
49 | used if the block codes up smaller that way (usually for quite small | ||
50 | chunks), otherwise the dynamic method is used. In the latter case, the | ||
51 | codes are customized to the probabilities in the current block, and so | ||
52 | can code it much better than the pre-determined fixed codes. | ||
53 | |||
54 | The Huffman codes themselves are decoded using a multi-level table | ||
55 | lookup, in order to maximize the speed of decoding plus the speed of | ||
56 | building the decoding tables. See the comments below that precede the | ||
57 | lbits and dbits tuning parameters. | ||
58 | */ | ||
59 | |||
60 | |||
61 | /* | ||
62 | Notes beyond the 1.93a appnote.txt: | ||
63 | |||
64 | 1. Distance pointers never point before the beginning of the output | ||
65 | stream. | ||
66 | 2. Distance pointers can point back across blocks, up to 32k away. | ||
67 | 3. There is an implied maximum of 7 bits for the bit length table and | ||
68 | 15 bits for the actual data. | ||
69 | 4. If only one code exists, then it is encoded using one bit. (Zero | ||
70 | would be more efficient, but perhaps a little confusing.) If two | ||
71 | codes exist, they are coded using one bit each (0 and 1). | ||
72 | 5. There is no way of sending zero distance codes--a dummy must be | ||
73 | sent if there are none. (History: a pre 2.0 version of PKZIP would | ||
74 | store blocks with no distance codes, but this was discovered to be | ||
75 | too harsh a criterion.) Valid only for 1.93a. 2.04c does allow | ||
76 | zero distance codes, which is sent as one code of zero bits in | ||
77 | length. | ||
78 | 6. There are up to 286 literal/length codes. Code 256 represents the | ||
79 | end-of-block. Note however that the static length tree defines | ||
80 | 288 codes just to fill out the Huffman codes. Codes 286 and 287 | ||
81 | cannot be used though, since there is no length base or extra bits | ||
82 | defined for them. Similarly, there are up to 30 distance codes. | ||
83 | However, static trees define 32 codes (all 5 bits) to fill out the | ||
84 | Huffman codes, but the last two had better not show up in the data. | ||
85 | 7. Unzip can check dynamic Huffman blocks for complete code sets. | ||
86 | The exception is that a single code would not be complete (see #4). | ||
87 | 8. The five bits following the block type is really the number of | ||
88 | literal codes sent minus 257. | ||
89 | 9. Length codes 8,16,16 are interpreted as 13 length codes of 8 bits | ||
90 | (1+6+6). Therefore, to output three times the length, you output | ||
91 | three codes (1+1+1), whereas to output four times the same length, | ||
92 | you only need two codes (1+3). Hmm. | ||
93 | 10. In the tree reconstruction algorithm, Code = Code + Increment | ||
94 | only if BitLength(i) is not zero. (Pretty obvious.) | ||
95 | 11. Correction: 4 Bits: # of Bit Length codes - 4 (4 - 19) | ||
96 | 12. Note: length code 284 can represent 227-258, but length code 285 | ||
97 | really is 258. The last length deserves its own, short code | ||
98 | since it gets used a lot in very redundant files. The length | ||
99 | 258 is special since 258 - 3 (the min match length) is 255. | ||
100 | 13. The literal/length and distance code bit lengths are read as a | ||
101 | single stream of lengths. It is possible (and advantageous) for | ||
102 | a repeat code (16, 17, or 18) to go across the boundary between | ||
103 | the two sets of lengths. | ||
104 | */ | ||
105 | #include <linux/compiler.h> | ||
106 | |||
107 | #ifdef RCSID | ||
108 | static char rcsid[] = "#Id: inflate.c,v 0.14 1993/06/10 13:27:04 jloup Exp #"; | ||
109 | #endif | ||
110 | |||
111 | #ifndef STATIC | ||
112 | |||
113 | #if defined(STDC_HEADERS) || defined(HAVE_STDLIB_H) | ||
114 | # include <sys/types.h> | ||
115 | # include <stdlib.h> | ||
116 | #endif | ||
117 | |||
118 | #include "gzip.h" | ||
119 | #define STATIC | ||
120 | #endif /* !STATIC */ | ||
121 | |||
122 | #ifndef INIT | ||
123 | #define INIT | ||
124 | #endif | ||
125 | |||
126 | #define slide window | ||
127 | |||
128 | /* Huffman code lookup table entry--this entry is four bytes for machines | ||
129 | that have 16-bit pointers (e.g. PC's in the small or medium model). | ||
130 | Valid extra bits are 0..13. e == 15 is EOB (end of block), e == 16 | ||
131 | means that v is a literal, 16 < e < 32 means that v is a pointer to | ||
132 | the next table, which codes e - 16 bits, and lastly e == 99 indicates | ||
133 | an unused code. If a code with e == 99 is looked up, this implies an | ||
134 | error in the data. */ | ||
135 | struct huft { | ||
136 | uch e; /* number of extra bits or operation */ | ||
137 | uch b; /* number of bits in this code or subcode */ | ||
138 | union { | ||
139 | ush n; /* literal, length base, or distance base */ | ||
140 | struct huft *t; /* pointer to next level of table */ | ||
141 | } v; | ||
142 | }; | ||
143 | |||
144 | |||
145 | /* Function prototypes */ | ||
146 | STATIC int INIT huft_build OF((unsigned *, unsigned, unsigned, | ||
147 | const ush *, const ush *, struct huft **, int *)); | ||
148 | STATIC int INIT huft_free OF((struct huft *)); | ||
149 | STATIC int INIT inflate_codes OF((struct huft *, struct huft *, int, int)); | ||
150 | STATIC int INIT inflate_stored OF((void)); | ||
151 | STATIC int INIT inflate_fixed OF((void)); | ||
152 | STATIC int INIT inflate_dynamic OF((void)); | ||
153 | STATIC int INIT inflate_block OF((int *)); | ||
154 | STATIC int INIT inflate OF((void)); | ||
155 | |||
156 | |||
157 | /* The inflate algorithm uses a sliding 32 K byte window on the uncompressed | ||
158 | stream to find repeated byte strings. This is implemented here as a | ||
159 | circular buffer. The index is updated simply by incrementing and then | ||
160 | ANDing with 0x7fff (32K-1). */ | ||
161 | /* It is left to other modules to supply the 32 K area. It is assumed | ||
162 | to be usable as if it were declared "uch slide[32768];" or as just | ||
163 | "uch *slide;" and then malloc'ed in the latter case. The definition | ||
164 | must be in unzip.h, included above. */ | ||
165 | /* unsigned wp; current position in slide */ | ||
166 | #define wp outcnt | ||
167 | #define flush_output(w) (wp=(w),flush_window()) | ||
168 | |||
169 | /* Tables for deflate from PKZIP's appnote.txt. */ | ||
170 | static const unsigned border[] = { /* Order of the bit length code lengths */ | ||
171 | 16, 17, 18, 0, 8, 7, 9, 6, 10, 5, 11, 4, 12, 3, 13, 2, 14, 1, 15}; | ||
172 | static const ush cplens[] = { /* Copy lengths for literal codes 257..285 */ | ||
173 | 3, 4, 5, 6, 7, 8, 9, 10, 11, 13, 15, 17, 19, 23, 27, 31, | ||
174 | 35, 43, 51, 59, 67, 83, 99, 115, 131, 163, 195, 227, 258, 0, 0}; | ||
175 | /* note: see note #13 above about the 258 in this list. */ | ||
176 | static const ush cplext[] = { /* Extra bits for literal codes 257..285 */ | ||
177 | 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 2, 2, 2, 2, | ||
178 | 3, 3, 3, 3, 4, 4, 4, 4, 5, 5, 5, 5, 0, 99, 99}; /* 99==invalid */ | ||
179 | static const ush cpdist[] = { /* Copy offsets for distance codes 0..29 */ | ||
180 | 1, 2, 3, 4, 5, 7, 9, 13, 17, 25, 33, 49, 65, 97, 129, 193, | ||
181 | 257, 385, 513, 769, 1025, 1537, 2049, 3073, 4097, 6145, | ||
182 | 8193, 12289, 16385, 24577}; | ||
183 | static const ush cpdext[] = { /* Extra bits for distance codes */ | ||
184 | 0, 0, 0, 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, | ||
185 | 7, 7, 8, 8, 9, 9, 10, 10, 11, 11, | ||
186 | 12, 12, 13, 13}; | ||
187 | |||
188 | |||
189 | |||
190 | /* Macros for inflate() bit peeking and grabbing. | ||
191 | The usage is: | ||
192 | |||
193 | NEEDBITS(j) | ||
194 | x = b & mask_bits[j]; | ||
195 | DUMPBITS(j) | ||
196 | |||
197 | where NEEDBITS makes sure that b has at least j bits in it, and | ||
198 | DUMPBITS removes the bits from b. The macros use the variable k | ||
199 | for the number of bits in b. Normally, b and k are register | ||
200 | variables for speed, and are initialized at the beginning of a | ||
201 | routine that uses these macros from a global bit buffer and count. | ||
202 | |||
203 | If we assume that EOB will be the longest code, then we will never | ||
204 | ask for bits with NEEDBITS that are beyond the end of the stream. | ||
205 | So, NEEDBITS should not read any more bytes than are needed to | ||
206 | meet the request. Then no bytes need to be "returned" to the buffer | ||
207 | at the end of the last block. | ||
208 | |||
209 | However, this assumption is not true for fixed blocks--the EOB code | ||
210 | is 7 bits, but the other literal/length codes can be 8 or 9 bits. | ||
211 | (The EOB code is shorter than other codes because fixed blocks are | ||
212 | generally short. So, while a block always has an EOB, many other | ||
213 | literal/length codes have a significantly lower probability of | ||
214 | showing up at all.) However, by making the first table have a | ||
215 | lookup of seven bits, the EOB code will be found in that first | ||
216 | lookup, and so will not require that too many bits be pulled from | ||
217 | the stream. | ||
218 | */ | ||
219 | |||
220 | STATIC ulg bb; /* bit buffer */ | ||
221 | STATIC unsigned bk; /* bits in bit buffer */ | ||
222 | |||
223 | STATIC const ush mask_bits[] = { | ||
224 | 0x0000, | ||
225 | 0x0001, 0x0003, 0x0007, 0x000f, 0x001f, 0x003f, 0x007f, 0x00ff, | ||
226 | 0x01ff, 0x03ff, 0x07ff, 0x0fff, 0x1fff, 0x3fff, 0x7fff, 0xffff | ||
227 | }; | ||
228 | |||
229 | #define NEXTBYTE() ({ int v = get_byte(); if (v < 0) goto underrun; (uch)v; }) | ||
230 | #define NEEDBITS(n) {while(k<(n)){b|=((ulg)NEXTBYTE())<<k;k+=8;}} | ||
231 | #define DUMPBITS(n) {b>>=(n);k-=(n);} | ||
232 | |||
233 | |||
234 | /* | ||
235 | Huffman code decoding is performed using a multi-level table lookup. | ||
236 | The fastest way to decode is to simply build a lookup table whose | ||
237 | size is determined by the longest code. However, the time it takes | ||
238 | to build this table can also be a factor if the data being decoded | ||
239 | is not very long. The most common codes are necessarily the | ||
240 | shortest codes, so those codes dominate the decoding time, and hence | ||
241 | the speed. The idea is you can have a shorter table that decodes the | ||
242 | shorter, more probable codes, and then point to subsidiary tables for | ||
243 | the longer codes. The time it costs to decode the longer codes is | ||
244 | then traded against the time it takes to make longer tables. | ||
245 | |||
246 | This results of this trade are in the variables lbits and dbits | ||
247 | below. lbits is the number of bits the first level table for literal/ | ||
248 | length codes can decode in one step, and dbits is the same thing for | ||
249 | the distance codes. Subsequent tables are also less than or equal to | ||
250 | those sizes. These values may be adjusted either when all of the | ||
251 | codes are shorter than that, in which case the longest code length in | ||
252 | bits is used, or when the shortest code is *longer* than the requested | ||
253 | table size, in which case the length of the shortest code in bits is | ||
254 | used. | ||
255 | |||
256 | There are two different values for the two tables, since they code a | ||
257 | different number of possibilities each. The literal/length table | ||
258 | codes 286 possible values, or in a flat code, a little over eight | ||
259 | bits. The distance table codes 30 possible values, or a little less | ||
260 | than five bits, flat. The optimum values for speed end up being | ||
261 | about one bit more than those, so lbits is 8+1 and dbits is 5+1. | ||
262 | The optimum values may differ though from machine to machine, and | ||
263 | possibly even between compilers. Your mileage may vary. | ||
264 | */ | ||
265 | |||
266 | |||
267 | STATIC const int lbits = 9; /* bits in base literal/length lookup table */ | ||
268 | STATIC const int dbits = 6; /* bits in base distance lookup table */ | ||
269 | |||
270 | |||
271 | /* If BMAX needs to be larger than 16, then h and x[] should be ulg. */ | ||
272 | #define BMAX 16 /* maximum bit length of any code (16 for explode) */ | ||
273 | #define N_MAX 288 /* maximum number of codes in any set */ | ||
274 | |||
275 | |||
276 | STATIC unsigned hufts; /* track memory usage */ | ||
277 | |||
278 | |||
279 | STATIC int INIT huft_build( | ||
280 | unsigned *b, /* code lengths in bits (all assumed <= BMAX) */ | ||
281 | unsigned n, /* number of codes (assumed <= N_MAX) */ | ||
282 | unsigned s, /* number of simple-valued codes (0..s-1) */ | ||
283 | const ush *d, /* list of base values for non-simple codes */ | ||
284 | const ush *e, /* list of extra bits for non-simple codes */ | ||
285 | struct huft **t, /* result: starting table */ | ||
286 | int *m /* maximum lookup bits, returns actual */ | ||
287 | ) | ||
288 | /* Given a list of code lengths and a maximum table size, make a set of | ||
289 | tables to decode that set of codes. Return zero on success, one if | ||
290 | the given code set is incomplete (the tables are still built in this | ||
291 | case), two if the input is invalid (all zero length codes or an | ||
292 | oversubscribed set of lengths), and three if not enough memory. */ | ||
293 | { | ||
294 | unsigned a; /* counter for codes of length k */ | ||
295 | unsigned c[BMAX+1]; /* bit length count table */ | ||
296 | unsigned f; /* i repeats in table every f entries */ | ||
297 | int g; /* maximum code length */ | ||
298 | int h; /* table level */ | ||
299 | register unsigned i; /* counter, current code */ | ||
300 | register unsigned j; /* counter */ | ||
301 | register int k; /* number of bits in current code */ | ||
302 | int l; /* bits per table (returned in m) */ | ||
303 | register unsigned *p; /* pointer into c[], b[], or v[] */ | ||
304 | register struct huft *q; /* points to current table */ | ||
305 | struct huft r; /* table entry for structure assignment */ | ||
306 | struct huft *u[BMAX]; /* table stack */ | ||
307 | unsigned v[N_MAX]; /* values in order of bit length */ | ||
308 | register int w; /* bits before this table == (l * h) */ | ||
309 | unsigned x[BMAX+1]; /* bit offsets, then code stack */ | ||
310 | unsigned *xp; /* pointer into x */ | ||
311 | int y; /* number of dummy codes added */ | ||
312 | unsigned z; /* number of entries in current table */ | ||
313 | |||
314 | DEBG("huft1 "); | ||
315 | |||
316 | /* Generate counts for each bit length */ | ||
317 | memzero(c, sizeof(c)); | ||
318 | p = b; i = n; | ||
319 | do { | ||
320 | Tracecv(*p, (stderr, (n-i >= ' ' && n-i <= '~' ? "%c %d\n" : "0x%x %d\n"), | ||
321 | n-i, *p)); | ||
322 | c[*p]++; /* assume all entries <= BMAX */ | ||
323 | p++; /* Can't combine with above line (Solaris bug) */ | ||
324 | } while (--i); | ||
325 | if (c[0] == n) /* null input--all zero length codes */ | ||
326 | { | ||
327 | *t = (struct huft *)NULL; | ||
328 | *m = 0; | ||
329 | return 0; | ||
330 | } | ||
331 | |||
332 | DEBG("huft2 "); | ||
333 | |||
334 | /* Find minimum and maximum length, bound *m by those */ | ||
335 | l = *m; | ||
336 | for (j = 1; j <= BMAX; j++) | ||
337 | if (c[j]) | ||
338 | break; | ||
339 | k = j; /* minimum code length */ | ||
340 | if ((unsigned)l < j) | ||
341 | l = j; | ||
342 | for (i = BMAX; i; i--) | ||
343 | if (c[i]) | ||
344 | break; | ||
345 | g = i; /* maximum code length */ | ||
346 | if ((unsigned)l > i) | ||
347 | l = i; | ||
348 | *m = l; | ||
349 | |||
350 | DEBG("huft3 "); | ||
351 | |||
352 | /* Adjust last length count to fill out codes, if needed */ | ||
353 | for (y = 1 << j; j < i; j++, y <<= 1) | ||
354 | if ((y -= c[j]) < 0) | ||
355 | return 2; /* bad input: more codes than bits */ | ||
356 | if ((y -= c[i]) < 0) | ||
357 | return 2; | ||
358 | c[i] += y; | ||
359 | |||
360 | DEBG("huft4 "); | ||
361 | |||
362 | /* Generate starting offsets into the value table for each length */ | ||
363 | x[1] = j = 0; | ||
364 | p = c + 1; xp = x + 2; | ||
365 | while (--i) { /* note that i == g from above */ | ||
366 | *xp++ = (j += *p++); | ||
367 | } | ||
368 | |||
369 | DEBG("huft5 "); | ||
370 | |||
371 | /* Make a table of values in order of bit lengths */ | ||
372 | p = b; i = 0; | ||
373 | do { | ||
374 | if ((j = *p++) != 0) | ||
375 | v[x[j]++] = i; | ||
376 | } while (++i < n); | ||
377 | |||
378 | DEBG("h6 "); | ||
379 | |||
380 | /* Generate the Huffman codes and for each, make the table entries */ | ||
381 | x[0] = i = 0; /* first Huffman code is zero */ | ||
382 | p = v; /* grab values in bit order */ | ||
383 | h = -1; /* no tables yet--level -1 */ | ||
384 | w = -l; /* bits decoded == (l * h) */ | ||
385 | u[0] = (struct huft *)NULL; /* just to keep compilers happy */ | ||
386 | q = (struct huft *)NULL; /* ditto */ | ||
387 | z = 0; /* ditto */ | ||
388 | DEBG("h6a "); | ||
389 | |||
390 | /* go through the bit lengths (k already is bits in shortest code) */ | ||
391 | for (; k <= g; k++) | ||
392 | { | ||
393 | DEBG("h6b "); | ||
394 | a = c[k]; | ||
395 | while (a--) | ||
396 | { | ||
397 | DEBG("h6b1 "); | ||
398 | /* here i is the Huffman code of length k bits for value *p */ | ||
399 | /* make tables up to required level */ | ||
400 | while (k > w + l) | ||
401 | { | ||
402 | DEBG1("1 "); | ||
403 | h++; | ||
404 | w += l; /* previous table always l bits */ | ||
405 | |||
406 | /* compute minimum size table less than or equal to l bits */ | ||
407 | z = (z = g - w) > (unsigned)l ? l : z; /* upper limit on table size */ | ||
408 | if ((f = 1 << (j = k - w)) > a + 1) /* try a k-w bit table */ | ||
409 | { /* too few codes for k-w bit table */ | ||
410 | DEBG1("2 "); | ||
411 | f -= a + 1; /* deduct codes from patterns left */ | ||
412 | xp = c + k; | ||
413 | while (++j < z) /* try smaller tables up to z bits */ | ||
414 | { | ||
415 | if ((f <<= 1) <= *++xp) | ||
416 | break; /* enough codes to use up j bits */ | ||
417 | f -= *xp; /* else deduct codes from patterns */ | ||
418 | } | ||
419 | } | ||
420 | DEBG1("3 "); | ||
421 | z = 1 << j; /* table entries for j-bit table */ | ||
422 | |||
423 | /* allocate and link in new table */ | ||
424 | if ((q = (struct huft *)malloc((z + 1)*sizeof(struct huft))) == | ||
425 | (struct huft *)NULL) | ||
426 | { | ||
427 | if (h) | ||
428 | huft_free(u[0]); | ||
429 | return 3; /* not enough memory */ | ||
430 | } | ||
431 | DEBG1("4 "); | ||
432 | hufts += z + 1; /* track memory usage */ | ||
433 | *t = q + 1; /* link to list for huft_free() */ | ||
434 | *(t = &(q->v.t)) = (struct huft *)NULL; | ||
435 | u[h] = ++q; /* table starts after link */ | ||
436 | |||
437 | DEBG1("5 "); | ||
438 | /* connect to last table, if there is one */ | ||
439 | if (h) | ||
440 | { | ||
441 | x[h] = i; /* save pattern for backing up */ | ||
442 | r.b = (uch)l; /* bits to dump before this table */ | ||
443 | r.e = (uch)(16 + j); /* bits in this table */ | ||
444 | r.v.t = q; /* pointer to this table */ | ||
445 | j = i >> (w - l); /* (get around Turbo C bug) */ | ||
446 | u[h-1][j] = r; /* connect to last table */ | ||
447 | } | ||
448 | DEBG1("6 "); | ||
449 | } | ||
450 | DEBG("h6c "); | ||
451 | |||
452 | /* set up table entry in r */ | ||
453 | r.b = (uch)(k - w); | ||
454 | if (p >= v + n) | ||
455 | r.e = 99; /* out of values--invalid code */ | ||
456 | else if (*p < s) | ||
457 | { | ||
458 | r.e = (uch)(*p < 256 ? 16 : 15); /* 256 is end-of-block code */ | ||
459 | r.v.n = (ush)(*p); /* simple code is just the value */ | ||
460 | p++; /* one compiler does not like *p++ */ | ||
461 | } | ||
462 | else | ||
463 | { | ||
464 | r.e = (uch)e[*p - s]; /* non-simple--look up in lists */ | ||
465 | r.v.n = d[*p++ - s]; | ||
466 | } | ||
467 | DEBG("h6d "); | ||
468 | |||
469 | /* fill code-like entries with r */ | ||
470 | f = 1 << (k - w); | ||
471 | for (j = i >> w; j < z; j += f) | ||
472 | q[j] = r; | ||
473 | |||
474 | /* backwards increment the k-bit code i */ | ||
475 | for (j = 1 << (k - 1); i & j; j >>= 1) | ||
476 | i ^= j; | ||
477 | i ^= j; | ||
478 | |||
479 | /* backup over finished tables */ | ||
480 | while ((i & ((1 << w) - 1)) != x[h]) | ||
481 | { | ||
482 | h--; /* don't need to update q */ | ||
483 | w -= l; | ||
484 | } | ||
485 | DEBG("h6e "); | ||
486 | } | ||
487 | DEBG("h6f "); | ||
488 | } | ||
489 | |||
490 | DEBG("huft7 "); | ||
491 | |||
492 | /* Return true (1) if we were given an incomplete table */ | ||
493 | return y != 0 && g != 1; | ||
494 | } | ||
495 | |||
496 | |||
497 | |||
498 | STATIC int INIT huft_free( | ||
499 | struct huft *t /* table to free */ | ||
500 | ) | ||
501 | /* Free the malloc'ed tables built by huft_build(), which makes a linked | ||
502 | list of the tables it made, with the links in a dummy first entry of | ||
503 | each table. */ | ||
504 | { | ||
505 | register struct huft *p, *q; | ||
506 | |||
507 | |||
508 | /* Go through linked list, freeing from the malloced (t[-1]) address. */ | ||
509 | p = t; | ||
510 | while (p != (struct huft *)NULL) | ||
511 | { | ||
512 | q = (--p)->v.t; | ||
513 | free((char*)p); | ||
514 | p = q; | ||
515 | } | ||
516 | return 0; | ||
517 | } | ||
518 | |||
519 | |||
520 | STATIC int INIT inflate_codes( | ||
521 | struct huft *tl, /* literal/length decoder tables */ | ||
522 | struct huft *td, /* distance decoder tables */ | ||
523 | int bl, /* number of bits decoded by tl[] */ | ||
524 | int bd /* number of bits decoded by td[] */ | ||
525 | ) | ||
526 | /* inflate (decompress) the codes in a deflated (compressed) block. | ||
527 | Return an error code or zero if it all goes ok. */ | ||
528 | { | ||
529 | register unsigned e; /* table entry flag/number of extra bits */ | ||
530 | unsigned n, d; /* length and index for copy */ | ||
531 | unsigned w; /* current window position */ | ||
532 | struct huft *t; /* pointer to table entry */ | ||
533 | unsigned ml, md; /* masks for bl and bd bits */ | ||
534 | register ulg b; /* bit buffer */ | ||
535 | register unsigned k; /* number of bits in bit buffer */ | ||
536 | |||
537 | |||
538 | /* make local copies of globals */ | ||
539 | b = bb; /* initialize bit buffer */ | ||
540 | k = bk; | ||
541 | w = wp; /* initialize window position */ | ||
542 | |||
543 | /* inflate the coded data */ | ||
544 | ml = mask_bits[bl]; /* precompute masks for speed */ | ||
545 | md = mask_bits[bd]; | ||
546 | for (;;) /* do until end of block */ | ||
547 | { | ||
548 | NEEDBITS((unsigned)bl) | ||
549 | if ((e = (t = tl + ((unsigned)b & ml))->e) > 16) | ||
550 | do { | ||
551 | if (e == 99) | ||
552 | return 1; | ||
553 | DUMPBITS(t->b) | ||
554 | e -= 16; | ||
555 | NEEDBITS(e) | ||
556 | } while ((e = (t = t->v.t + ((unsigned)b & mask_bits[e]))->e) > 16); | ||
557 | DUMPBITS(t->b) | ||
558 | if (e == 16) /* then it's a literal */ | ||
559 | { | ||
560 | slide[w++] = (uch)t->v.n; | ||
561 | Tracevv((stderr, "%c", slide[w-1])); | ||
562 | if (w == WSIZE) | ||
563 | { | ||
564 | flush_output(w); | ||
565 | w = 0; | ||
566 | } | ||
567 | } | ||
568 | else /* it's an EOB or a length */ | ||
569 | { | ||
570 | /* exit if end of block */ | ||
571 | if (e == 15) | ||
572 | break; | ||
573 | |||
574 | /* get length of block to copy */ | ||
575 | NEEDBITS(e) | ||
576 | n = t->v.n + ((unsigned)b & mask_bits[e]); | ||
577 | DUMPBITS(e); | ||
578 | |||
579 | /* decode distance of block to copy */ | ||
580 | NEEDBITS((unsigned)bd) | ||
581 | if ((e = (t = td + ((unsigned)b & md))->e) > 16) | ||
582 | do { | ||
583 | if (e == 99) | ||
584 | return 1; | ||
585 | DUMPBITS(t->b) | ||
586 | e -= 16; | ||
587 | NEEDBITS(e) | ||
588 | } while ((e = (t = t->v.t + ((unsigned)b & mask_bits[e]))->e) > 16); | ||
589 | DUMPBITS(t->b) | ||
590 | NEEDBITS(e) | ||
591 | d = w - t->v.n - ((unsigned)b & mask_bits[e]); | ||
592 | DUMPBITS(e) | ||
593 | Tracevv((stderr,"\\[%d,%d]", w-d, n)); | ||
594 | |||
595 | /* do the copy */ | ||
596 | do { | ||
597 | n -= (e = (e = WSIZE - ((d &= WSIZE-1) > w ? d : w)) > n ? n : e); | ||
598 | #if !defined(NOMEMCPY) && !defined(DEBUG) | ||
599 | if (w - d >= e) /* (this test assumes unsigned comparison) */ | ||
600 | { | ||
601 | memcpy(slide + w, slide + d, e); | ||
602 | w += e; | ||
603 | d += e; | ||
604 | } | ||
605 | else /* do it slow to avoid memcpy() overlap */ | ||
606 | #endif /* !NOMEMCPY */ | ||
607 | do { | ||
608 | slide[w++] = slide[d++]; | ||
609 | Tracevv((stderr, "%c", slide[w-1])); | ||
610 | } while (--e); | ||
611 | if (w == WSIZE) | ||
612 | { | ||
613 | flush_output(w); | ||
614 | w = 0; | ||
615 | } | ||
616 | } while (n); | ||
617 | } | ||
618 | } | ||
619 | |||
620 | |||
621 | /* restore the globals from the locals */ | ||
622 | wp = w; /* restore global window pointer */ | ||
623 | bb = b; /* restore global bit buffer */ | ||
624 | bk = k; | ||
625 | |||
626 | /* done */ | ||
627 | return 0; | ||
628 | |||
629 | underrun: | ||
630 | return 4; /* Input underrun */ | ||
631 | } | ||
632 | |||
633 | |||
634 | |||
635 | STATIC int INIT inflate_stored(void) | ||
636 | /* "decompress" an inflated type 0 (stored) block. */ | ||
637 | { | ||
638 | unsigned n; /* number of bytes in block */ | ||
639 | unsigned w; /* current window position */ | ||
640 | register ulg b; /* bit buffer */ | ||
641 | register unsigned k; /* number of bits in bit buffer */ | ||
642 | |||
643 | DEBG("<stor"); | ||
644 | |||
645 | /* make local copies of globals */ | ||
646 | b = bb; /* initialize bit buffer */ | ||
647 | k = bk; | ||
648 | w = wp; /* initialize window position */ | ||
649 | |||
650 | |||
651 | /* go to byte boundary */ | ||
652 | n = k & 7; | ||
653 | DUMPBITS(n); | ||
654 | |||
655 | |||
656 | /* get the length and its complement */ | ||
657 | NEEDBITS(16) | ||
658 | n = ((unsigned)b & 0xffff); | ||
659 | DUMPBITS(16) | ||
660 | NEEDBITS(16) | ||
661 | if (n != (unsigned)((~b) & 0xffff)) | ||
662 | return 1; /* error in compressed data */ | ||
663 | DUMPBITS(16) | ||
664 | |||
665 | |||
666 | /* read and output the compressed data */ | ||
667 | while (n--) | ||
668 | { | ||
669 | NEEDBITS(8) | ||
670 | slide[w++] = (uch)b; | ||
671 | if (w == WSIZE) | ||
672 | { | ||
673 | flush_output(w); | ||
674 | w = 0; | ||
675 | } | ||
676 | DUMPBITS(8) | ||
677 | } | ||
678 | |||
679 | |||
680 | /* restore the globals from the locals */ | ||
681 | wp = w; /* restore global window pointer */ | ||
682 | bb = b; /* restore global bit buffer */ | ||
683 | bk = k; | ||
684 | |||
685 | DEBG(">"); | ||
686 | return 0; | ||
687 | |||
688 | underrun: | ||
689 | return 4; /* Input underrun */ | ||
690 | } | ||
691 | |||
692 | |||
693 | /* | ||
694 | * We use `noinline' here to prevent gcc-3.5 from using too much stack space | ||
695 | */ | ||
696 | STATIC int noinline INIT inflate_fixed(void) | ||
697 | /* decompress an inflated type 1 (fixed Huffman codes) block. We should | ||
698 | either replace this with a custom decoder, or at least precompute the | ||
699 | Huffman tables. */ | ||
700 | { | ||
701 | int i; /* temporary variable */ | ||
702 | struct huft *tl; /* literal/length code table */ | ||
703 | struct huft *td; /* distance code table */ | ||
704 | int bl; /* lookup bits for tl */ | ||
705 | int bd; /* lookup bits for td */ | ||
706 | unsigned l[288]; /* length list for huft_build */ | ||
707 | |||
708 | DEBG("<fix"); | ||
709 | |||
710 | /* set up literal table */ | ||
711 | for (i = 0; i < 144; i++) | ||
712 | l[i] = 8; | ||
713 | for (; i < 256; i++) | ||
714 | l[i] = 9; | ||
715 | for (; i < 280; i++) | ||
716 | l[i] = 7; | ||
717 | for (; i < 288; i++) /* make a complete, but wrong code set */ | ||
718 | l[i] = 8; | ||
719 | bl = 7; | ||
720 | if ((i = huft_build(l, 288, 257, cplens, cplext, &tl, &bl)) != 0) | ||
721 | return i; | ||
722 | |||
723 | |||
724 | /* set up distance table */ | ||
725 | for (i = 0; i < 30; i++) /* make an incomplete code set */ | ||
726 | l[i] = 5; | ||
727 | bd = 5; | ||
728 | if ((i = huft_build(l, 30, 0, cpdist, cpdext, &td, &bd)) > 1) | ||
729 | { | ||
730 | huft_free(tl); | ||
731 | |||
732 | DEBG(">"); | ||
733 | return i; | ||
734 | } | ||
735 | |||
736 | |||
737 | /* decompress until an end-of-block code */ | ||
738 | if (inflate_codes(tl, td, bl, bd)) | ||
739 | return 1; | ||
740 | |||
741 | |||
742 | /* free the decoding tables, return */ | ||
743 | huft_free(tl); | ||
744 | huft_free(td); | ||
745 | return 0; | ||
746 | } | ||
747 | |||
748 | |||
749 | /* | ||
750 | * We use `noinline' here to prevent gcc-3.5 from using too much stack space | ||
751 | */ | ||
752 | STATIC int noinline INIT inflate_dynamic(void) | ||
753 | /* decompress an inflated type 2 (dynamic Huffman codes) block. */ | ||
754 | { | ||
755 | int i; /* temporary variables */ | ||
756 | unsigned j; | ||
757 | unsigned l; /* last length */ | ||
758 | unsigned m; /* mask for bit lengths table */ | ||
759 | unsigned n; /* number of lengths to get */ | ||
760 | struct huft *tl; /* literal/length code table */ | ||
761 | struct huft *td; /* distance code table */ | ||
762 | int bl; /* lookup bits for tl */ | ||
763 | int bd; /* lookup bits for td */ | ||
764 | unsigned nb; /* number of bit length codes */ | ||
765 | unsigned nl; /* number of literal/length codes */ | ||
766 | unsigned nd; /* number of distance codes */ | ||
767 | #ifdef PKZIP_BUG_WORKAROUND | ||
768 | unsigned ll[288+32]; /* literal/length and distance code lengths */ | ||
769 | #else | ||
770 | unsigned ll[286+30]; /* literal/length and distance code lengths */ | ||
771 | #endif | ||
772 | register ulg b; /* bit buffer */ | ||
773 | register unsigned k; /* number of bits in bit buffer */ | ||
774 | |||
775 | DEBG("<dyn"); | ||
776 | |||
777 | /* make local bit buffer */ | ||
778 | b = bb; | ||
779 | k = bk; | ||
780 | |||
781 | |||
782 | /* read in table lengths */ | ||
783 | NEEDBITS(5) | ||
784 | nl = 257 + ((unsigned)b & 0x1f); /* number of literal/length codes */ | ||
785 | DUMPBITS(5) | ||
786 | NEEDBITS(5) | ||
787 | nd = 1 + ((unsigned)b & 0x1f); /* number of distance codes */ | ||
788 | DUMPBITS(5) | ||
789 | NEEDBITS(4) | ||
790 | nb = 4 + ((unsigned)b & 0xf); /* number of bit length codes */ | ||
791 | DUMPBITS(4) | ||
792 | #ifdef PKZIP_BUG_WORKAROUND | ||
793 | if (nl > 288 || nd > 32) | ||
794 | #else | ||
795 | if (nl > 286 || nd > 30) | ||
796 | #endif | ||
797 | return 1; /* bad lengths */ | ||
798 | |||
799 | DEBG("dyn1 "); | ||
800 | |||
801 | /* read in bit-length-code lengths */ | ||
802 | for (j = 0; j < nb; j++) | ||
803 | { | ||
804 | NEEDBITS(3) | ||
805 | ll[border[j]] = (unsigned)b & 7; | ||
806 | DUMPBITS(3) | ||
807 | } | ||
808 | for (; j < 19; j++) | ||
809 | ll[border[j]] = 0; | ||
810 | |||
811 | DEBG("dyn2 "); | ||
812 | |||
813 | /* build decoding table for trees--single level, 7 bit lookup */ | ||
814 | bl = 7; | ||
815 | if ((i = huft_build(ll, 19, 19, NULL, NULL, &tl, &bl)) != 0) | ||
816 | { | ||
817 | if (i == 1) | ||
818 | huft_free(tl); | ||
819 | return i; /* incomplete code set */ | ||
820 | } | ||
821 | |||
822 | DEBG("dyn3 "); | ||
823 | |||
824 | /* read in literal and distance code lengths */ | ||
825 | n = nl + nd; | ||
826 | m = mask_bits[bl]; | ||
827 | i = l = 0; | ||
828 | while ((unsigned)i < n) | ||
829 | { | ||
830 | NEEDBITS((unsigned)bl) | ||
831 | j = (td = tl + ((unsigned)b & m))->b; | ||
832 | DUMPBITS(j) | ||
833 | j = td->v.n; | ||
834 | if (j < 16) /* length of code in bits (0..15) */ | ||
835 | ll[i++] = l = j; /* save last length in l */ | ||
836 | else if (j == 16) /* repeat last length 3 to 6 times */ | ||
837 | { | ||
838 | NEEDBITS(2) | ||
839 | j = 3 + ((unsigned)b & 3); | ||
840 | DUMPBITS(2) | ||
841 | if ((unsigned)i + j > n) | ||
842 | return 1; | ||
843 | while (j--) | ||
844 | ll[i++] = l; | ||
845 | } | ||
846 | else if (j == 17) /* 3 to 10 zero length codes */ | ||
847 | { | ||
848 | NEEDBITS(3) | ||
849 | j = 3 + ((unsigned)b & 7); | ||
850 | DUMPBITS(3) | ||
851 | if ((unsigned)i + j > n) | ||
852 | return 1; | ||
853 | while (j--) | ||
854 | ll[i++] = 0; | ||
855 | l = 0; | ||
856 | } | ||
857 | else /* j == 18: 11 to 138 zero length codes */ | ||
858 | { | ||
859 | NEEDBITS(7) | ||
860 | j = 11 + ((unsigned)b & 0x7f); | ||
861 | DUMPBITS(7) | ||
862 | if ((unsigned)i + j > n) | ||
863 | return 1; | ||
864 | while (j--) | ||
865 | ll[i++] = 0; | ||
866 | l = 0; | ||
867 | } | ||
868 | } | ||
869 | |||
870 | DEBG("dyn4 "); | ||
871 | |||
872 | /* free decoding table for trees */ | ||
873 | huft_free(tl); | ||
874 | |||
875 | DEBG("dyn5 "); | ||
876 | |||
877 | /* restore the global bit buffer */ | ||
878 | bb = b; | ||
879 | bk = k; | ||
880 | |||
881 | DEBG("dyn5a "); | ||
882 | |||
883 | /* build the decoding tables for literal/length and distance codes */ | ||
884 | bl = lbits; | ||
885 | if ((i = huft_build(ll, nl, 257, cplens, cplext, &tl, &bl)) != 0) | ||
886 | { | ||
887 | DEBG("dyn5b "); | ||
888 | if (i == 1) { | ||
889 | error("incomplete literal tree"); | ||
890 | huft_free(tl); | ||
891 | } | ||
892 | return i; /* incomplete code set */ | ||
893 | } | ||
894 | DEBG("dyn5c "); | ||
895 | bd = dbits; | ||
896 | if ((i = huft_build(ll + nl, nd, 0, cpdist, cpdext, &td, &bd)) != 0) | ||
897 | { | ||
898 | DEBG("dyn5d "); | ||
899 | if (i == 1) { | ||
900 | error("incomplete distance tree"); | ||
901 | #ifdef PKZIP_BUG_WORKAROUND | ||
902 | i = 0; | ||
903 | } | ||
904 | #else | ||
905 | huft_free(td); | ||
906 | } | ||
907 | huft_free(tl); | ||
908 | return i; /* incomplete code set */ | ||
909 | #endif | ||
910 | } | ||
911 | |||
912 | DEBG("dyn6 "); | ||
913 | |||
914 | /* decompress until an end-of-block code */ | ||
915 | if (inflate_codes(tl, td, bl, bd)) | ||
916 | return 1; | ||
917 | |||
918 | DEBG("dyn7 "); | ||
919 | |||
920 | /* free the decoding tables, return */ | ||
921 | huft_free(tl); | ||
922 | huft_free(td); | ||
923 | |||
924 | DEBG(">"); | ||
925 | return 0; | ||
926 | |||
927 | underrun: | ||
928 | return 4; /* Input underrun */ | ||
929 | } | ||
930 | |||
931 | |||
932 | |||
933 | STATIC int INIT inflate_block( | ||
934 | int *e /* last block flag */ | ||
935 | ) | ||
936 | /* decompress an inflated block */ | ||
937 | { | ||
938 | unsigned t; /* block type */ | ||
939 | register ulg b; /* bit buffer */ | ||
940 | register unsigned k; /* number of bits in bit buffer */ | ||
941 | |||
942 | DEBG("<blk"); | ||
943 | |||
944 | /* make local bit buffer */ | ||
945 | b = bb; | ||
946 | k = bk; | ||
947 | |||
948 | |||
949 | /* read in last block bit */ | ||
950 | NEEDBITS(1) | ||
951 | *e = (int)b & 1; | ||
952 | DUMPBITS(1) | ||
953 | |||
954 | |||
955 | /* read in block type */ | ||
956 | NEEDBITS(2) | ||
957 | t = (unsigned)b & 3; | ||
958 | DUMPBITS(2) | ||
959 | |||
960 | |||
961 | /* restore the global bit buffer */ | ||
962 | bb = b; | ||
963 | bk = k; | ||
964 | |||
965 | /* inflate that block type */ | ||
966 | if (t == 2) | ||
967 | return inflate_dynamic(); | ||
968 | if (t == 0) | ||
969 | return inflate_stored(); | ||
970 | if (t == 1) | ||
971 | return inflate_fixed(); | ||
972 | |||
973 | DEBG(">"); | ||
974 | |||
975 | /* bad block type */ | ||
976 | return 2; | ||
977 | |||
978 | underrun: | ||
979 | return 4; /* Input underrun */ | ||
980 | } | ||
981 | |||
982 | |||
983 | |||
984 | STATIC int INIT inflate(void) | ||
985 | /* decompress an inflated entry */ | ||
986 | { | ||
987 | int e; /* last block flag */ | ||
988 | int r; /* result code */ | ||
989 | unsigned h; /* maximum struct huft's malloc'ed */ | ||
990 | void *ptr; | ||
991 | |||
992 | /* initialize window, bit buffer */ | ||
993 | wp = 0; | ||
994 | bk = 0; | ||
995 | bb = 0; | ||
996 | |||
997 | |||
998 | /* decompress until the last block */ | ||
999 | h = 0; | ||
1000 | do { | ||
1001 | hufts = 0; | ||
1002 | gzip_mark(&ptr); | ||
1003 | if ((r = inflate_block(&e)) != 0) { | ||
1004 | gzip_release(&ptr); | ||
1005 | return r; | ||
1006 | } | ||
1007 | gzip_release(&ptr); | ||
1008 | if (hufts > h) | ||
1009 | h = hufts; | ||
1010 | } while (!e); | ||
1011 | |||
1012 | /* Undo too much lookahead. The next read will be byte aligned so we | ||
1013 | * can discard unused bits in the last meaningful byte. | ||
1014 | */ | ||
1015 | while (bk >= 8) { | ||
1016 | bk -= 8; | ||
1017 | inptr--; | ||
1018 | } | ||
1019 | |||
1020 | /* flush out slide */ | ||
1021 | flush_output(wp); | ||
1022 | |||
1023 | |||
1024 | /* return success */ | ||
1025 | #ifdef DEBUG | ||
1026 | fprintf(stderr, "<%u> ", h); | ||
1027 | #endif /* DEBUG */ | ||
1028 | return 0; | ||
1029 | } | ||
1030 | |||
1031 | /********************************************************************** | ||
1032 | * | ||
1033 | * The following are support routines for inflate.c | ||
1034 | * | ||
1035 | **********************************************************************/ | ||
1036 | |||
1037 | static ulg crc_32_tab[256]; | ||
1038 | static ulg crc; /* initialized in makecrc() so it'll reside in bss */ | ||
1039 | #define CRC_VALUE (crc ^ 0xffffffffUL) | ||
1040 | |||
1041 | /* | ||
1042 | * Code to compute the CRC-32 table. Borrowed from | ||
1043 | * gzip-1.0.3/makecrc.c. | ||
1044 | */ | ||
1045 | |||
1046 | static void INIT | ||
1047 | makecrc(void) | ||
1048 | { | ||
1049 | /* Not copyrighted 1990 Mark Adler */ | ||
1050 | |||
1051 | unsigned long c; /* crc shift register */ | ||
1052 | unsigned long e; /* polynomial exclusive-or pattern */ | ||
1053 | int i; /* counter for all possible eight bit values */ | ||
1054 | int k; /* byte being shifted into crc apparatus */ | ||
1055 | |||
1056 | /* terms of polynomial defining this crc (except x^32): */ | ||
1057 | static const int p[] = {0,1,2,4,5,7,8,10,11,12,16,22,23,26}; | ||
1058 | |||
1059 | /* Make exclusive-or pattern from polynomial */ | ||
1060 | e = 0; | ||
1061 | for (i = 0; i < sizeof(p)/sizeof(int); i++) | ||
1062 | e |= 1L << (31 - p[i]); | ||
1063 | |||
1064 | crc_32_tab[0] = 0; | ||
1065 | |||
1066 | for (i = 1; i < 256; i++) | ||
1067 | { | ||
1068 | c = 0; | ||
1069 | for (k = i | 256; k != 1; k >>= 1) | ||
1070 | { | ||
1071 | c = c & 1 ? (c >> 1) ^ e : c >> 1; | ||
1072 | if (k & 1) | ||
1073 | c ^= e; | ||
1074 | } | ||
1075 | crc_32_tab[i] = c; | ||
1076 | } | ||
1077 | |||
1078 | /* this is initialized here so this code could reside in ROM */ | ||
1079 | crc = (ulg)0xffffffffUL; /* shift register contents */ | ||
1080 | } | ||
1081 | |||
1082 | /* gzip flag byte */ | ||
1083 | #define ASCII_FLAG 0x01 /* bit 0 set: file probably ASCII text */ | ||
1084 | #define CONTINUATION 0x02 /* bit 1 set: continuation of multi-part gzip file */ | ||
1085 | #define EXTRA_FIELD 0x04 /* bit 2 set: extra field present */ | ||
1086 | #define ORIG_NAME 0x08 /* bit 3 set: original file name present */ | ||
1087 | #define COMMENT 0x10 /* bit 4 set: file comment present */ | ||
1088 | #define ENCRYPTED 0x20 /* bit 5 set: file is encrypted */ | ||
1089 | #define RESERVED 0xC0 /* bit 6,7: reserved */ | ||
1090 | |||
1091 | /* | ||
1092 | * Do the uncompression! | ||
1093 | */ | ||
1094 | static int INIT gunzip(void) | ||
1095 | { | ||
1096 | uch flags; | ||
1097 | unsigned char magic[2]; /* magic header */ | ||
1098 | char method; | ||
1099 | ulg orig_crc = 0; /* original crc */ | ||
1100 | ulg orig_len = 0; /* original uncompressed length */ | ||
1101 | int res; | ||
1102 | |||
1103 | magic[0] = NEXTBYTE(); | ||
1104 | magic[1] = NEXTBYTE(); | ||
1105 | method = NEXTBYTE(); | ||
1106 | |||
1107 | if (magic[0] != 037 || | ||
1108 | ((magic[1] != 0213) && (magic[1] != 0236))) { | ||
1109 | error("bad gzip magic numbers"); | ||
1110 | return -1; | ||
1111 | } | ||
1112 | |||
1113 | /* We only support method #8, DEFLATED */ | ||
1114 | if (method != 8) { | ||
1115 | error("internal error, invalid method"); | ||
1116 | return -1; | ||
1117 | } | ||
1118 | |||
1119 | flags = (uch)get_byte(); | ||
1120 | if ((flags & ENCRYPTED) != 0) { | ||
1121 | error("Input is encrypted"); | ||
1122 | return -1; | ||
1123 | } | ||
1124 | if ((flags & CONTINUATION) != 0) { | ||
1125 | error("Multi part input"); | ||
1126 | return -1; | ||
1127 | } | ||
1128 | if ((flags & RESERVED) != 0) { | ||
1129 | error("Input has invalid flags"); | ||
1130 | return -1; | ||
1131 | } | ||
1132 | NEXTBYTE(); /* Get timestamp */ | ||
1133 | NEXTBYTE(); | ||
1134 | NEXTBYTE(); | ||
1135 | NEXTBYTE(); | ||
1136 | |||
1137 | (void)NEXTBYTE(); /* Ignore extra flags for the moment */ | ||
1138 | (void)NEXTBYTE(); /* Ignore OS type for the moment */ | ||
1139 | |||
1140 | if ((flags & EXTRA_FIELD) != 0) { | ||
1141 | unsigned len = (unsigned)NEXTBYTE(); | ||
1142 | len |= ((unsigned)NEXTBYTE())<<8; | ||
1143 | while (len--) (void)NEXTBYTE(); | ||
1144 | } | ||
1145 | |||
1146 | /* Get original file name if it was truncated */ | ||
1147 | if ((flags & ORIG_NAME) != 0) { | ||
1148 | /* Discard the old name */ | ||
1149 | while (NEXTBYTE() != 0) /* null */ ; | ||
1150 | } | ||
1151 | |||
1152 | /* Discard file comment if any */ | ||
1153 | if ((flags & COMMENT) != 0) { | ||
1154 | while (NEXTBYTE() != 0) /* null */ ; | ||
1155 | } | ||
1156 | |||
1157 | /* Decompress */ | ||
1158 | if ((res = inflate())) { | ||
1159 | switch (res) { | ||
1160 | case 0: | ||
1161 | break; | ||
1162 | case 1: | ||
1163 | error("invalid compressed format (err=1)"); | ||
1164 | break; | ||
1165 | case 2: | ||
1166 | error("invalid compressed format (err=2)"); | ||
1167 | break; | ||
1168 | case 3: | ||
1169 | error("out of memory"); | ||
1170 | break; | ||
1171 | case 4: | ||
1172 | error("out of input data"); | ||
1173 | break; | ||
1174 | default: | ||
1175 | error("invalid compressed format (other)"); | ||
1176 | } | ||
1177 | return -1; | ||
1178 | } | ||
1179 | |||
1180 | /* Get the crc and original length */ | ||
1181 | /* crc32 (see algorithm.doc) | ||
1182 | * uncompressed input size modulo 2^32 | ||
1183 | */ | ||
1184 | orig_crc = (ulg) NEXTBYTE(); | ||
1185 | orig_crc |= (ulg) NEXTBYTE() << 8; | ||
1186 | orig_crc |= (ulg) NEXTBYTE() << 16; | ||
1187 | orig_crc |= (ulg) NEXTBYTE() << 24; | ||
1188 | |||
1189 | orig_len = (ulg) NEXTBYTE(); | ||
1190 | orig_len |= (ulg) NEXTBYTE() << 8; | ||
1191 | orig_len |= (ulg) NEXTBYTE() << 16; | ||
1192 | orig_len |= (ulg) NEXTBYTE() << 24; | ||
1193 | |||
1194 | /* Validate decompression */ | ||
1195 | if (orig_crc != CRC_VALUE) { | ||
1196 | error("crc error"); | ||
1197 | return -1; | ||
1198 | } | ||
1199 | if (orig_len != bytes_out) { | ||
1200 | error("length error"); | ||
1201 | return -1; | ||
1202 | } | ||
1203 | return 0; | ||
1204 | |||
1205 | underrun: /* NEXTBYTE() goto's here if needed */ | ||
1206 | error("out of input data"); | ||
1207 | return -1; | ||
1208 | } | ||
1209 | |||
1210 | |||
diff --git a/lib/int_sqrt.c b/lib/int_sqrt.c new file mode 100644 index 000000000000..a5d2cdc5684c --- /dev/null +++ b/lib/int_sqrt.c | |||
@@ -0,0 +1,32 @@ | |||
1 | |||
2 | #include <linux/kernel.h> | ||
3 | #include <linux/module.h> | ||
4 | |||
5 | /** | ||
6 | * int_sqrt - rough approximation to sqrt | ||
7 | * @x: integer of which to calculate the sqrt | ||
8 | * | ||
9 | * A very rough approximation to the sqrt() function. | ||
10 | */ | ||
11 | unsigned long int_sqrt(unsigned long x) | ||
12 | { | ||
13 | unsigned long op, res, one; | ||
14 | |||
15 | op = x; | ||
16 | res = 0; | ||
17 | |||
18 | one = 1 << 30; | ||
19 | while (one > op) | ||
20 | one >>= 2; | ||
21 | |||
22 | while (one != 0) { | ||
23 | if (op >= res + one) { | ||
24 | op = op - (res + one); | ||
25 | res = res + 2 * one; | ||
26 | } | ||
27 | res /= 2; | ||
28 | one /= 4; | ||
29 | } | ||
30 | return res; | ||
31 | } | ||
32 | EXPORT_SYMBOL(int_sqrt); | ||
diff --git a/lib/iomap.c b/lib/iomap.c new file mode 100644 index 000000000000..5e74390852b0 --- /dev/null +++ b/lib/iomap.c | |||
@@ -0,0 +1,212 @@ | |||
1 | /* | ||
2 | * Implement the default iomap interfaces | ||
3 | * | ||
4 | * (C) Copyright 2004 Linus Torvalds | ||
5 | */ | ||
6 | #include <linux/pci.h> | ||
7 | #include <linux/module.h> | ||
8 | #include <asm/io.h> | ||
9 | |||
10 | /* | ||
11 | * Read/write from/to an (offsettable) iomem cookie. It might be a PIO | ||
12 | * access or a MMIO access, these functions don't care. The info is | ||
13 | * encoded in the hardware mapping set up by the mapping functions | ||
14 | * (or the cookie itself, depending on implementation and hw). | ||
15 | * | ||
16 | * The generic routines don't assume any hardware mappings, and just | ||
17 | * encode the PIO/MMIO as part of the cookie. They coldly assume that | ||
18 | * the MMIO IO mappings are not in the low address range. | ||
19 | * | ||
20 | * Architectures for which this is not true can't use this generic | ||
21 | * implementation and should do their own copy. | ||
22 | */ | ||
23 | |||
24 | #ifndef HAVE_ARCH_PIO_SIZE | ||
25 | /* | ||
26 | * We encode the physical PIO addresses (0-0xffff) into the | ||
27 | * pointer by offsetting them with a constant (0x10000) and | ||
28 | * assuming that all the low addresses are always PIO. That means | ||
29 | * we can do some sanity checks on the low bits, and don't | ||
30 | * need to just take things for granted. | ||
31 | */ | ||
32 | #define PIO_OFFSET 0x10000UL | ||
33 | #define PIO_MASK 0x0ffffUL | ||
34 | #define PIO_RESERVED 0x40000UL | ||
35 | #endif | ||
36 | |||
37 | /* | ||
38 | * Ugly macros are a way of life. | ||
39 | */ | ||
40 | #define VERIFY_PIO(port) BUG_ON((port & ~PIO_MASK) != PIO_OFFSET) | ||
41 | |||
42 | #define IO_COND(addr, is_pio, is_mmio) do { \ | ||
43 | unsigned long port = (unsigned long __force)addr; \ | ||
44 | if (port < PIO_RESERVED) { \ | ||
45 | VERIFY_PIO(port); \ | ||
46 | port &= PIO_MASK; \ | ||
47 | is_pio; \ | ||
48 | } else { \ | ||
49 | is_mmio; \ | ||
50 | } \ | ||
51 | } while (0) | ||
52 | |||
53 | unsigned int fastcall ioread8(void __iomem *addr) | ||
54 | { | ||
55 | IO_COND(addr, return inb(port), return readb(addr)); | ||
56 | } | ||
57 | unsigned int fastcall ioread16(void __iomem *addr) | ||
58 | { | ||
59 | IO_COND(addr, return inw(port), return readw(addr)); | ||
60 | } | ||
61 | unsigned int fastcall ioread32(void __iomem *addr) | ||
62 | { | ||
63 | IO_COND(addr, return inl(port), return readl(addr)); | ||
64 | } | ||
65 | EXPORT_SYMBOL(ioread8); | ||
66 | EXPORT_SYMBOL(ioread16); | ||
67 | EXPORT_SYMBOL(ioread32); | ||
68 | |||
69 | void fastcall iowrite8(u8 val, void __iomem *addr) | ||
70 | { | ||
71 | IO_COND(addr, outb(val,port), writeb(val, addr)); | ||
72 | } | ||
73 | void fastcall iowrite16(u16 val, void __iomem *addr) | ||
74 | { | ||
75 | IO_COND(addr, outw(val,port), writew(val, addr)); | ||
76 | } | ||
77 | void fastcall iowrite32(u32 val, void __iomem *addr) | ||
78 | { | ||
79 | IO_COND(addr, outl(val,port), writel(val, addr)); | ||
80 | } | ||
81 | EXPORT_SYMBOL(iowrite8); | ||
82 | EXPORT_SYMBOL(iowrite16); | ||
83 | EXPORT_SYMBOL(iowrite32); | ||
84 | |||
85 | /* | ||
86 | * These are the "repeat MMIO read/write" functions. | ||
87 | * Note the "__raw" accesses, since we don't want to | ||
88 | * convert to CPU byte order. We write in "IO byte | ||
89 | * order" (we also don't have IO barriers). | ||
90 | */ | ||
91 | static inline void mmio_insb(void __iomem *addr, u8 *dst, int count) | ||
92 | { | ||
93 | while (--count >= 0) { | ||
94 | u8 data = __raw_readb(addr); | ||
95 | *dst = data; | ||
96 | dst++; | ||
97 | } | ||
98 | } | ||
99 | static inline void mmio_insw(void __iomem *addr, u16 *dst, int count) | ||
100 | { | ||
101 | while (--count >= 0) { | ||
102 | u16 data = __raw_readw(addr); | ||
103 | *dst = data; | ||
104 | dst++; | ||
105 | } | ||
106 | } | ||
107 | static inline void mmio_insl(void __iomem *addr, u32 *dst, int count) | ||
108 | { | ||
109 | while (--count >= 0) { | ||
110 | u32 data = __raw_readl(addr); | ||
111 | *dst = data; | ||
112 | dst++; | ||
113 | } | ||
114 | } | ||
115 | |||
116 | static inline void mmio_outsb(void __iomem *addr, const u8 *src, int count) | ||
117 | { | ||
118 | while (--count >= 0) { | ||
119 | __raw_writeb(*src, addr); | ||
120 | src++; | ||
121 | } | ||
122 | } | ||
123 | static inline void mmio_outsw(void __iomem *addr, const u16 *src, int count) | ||
124 | { | ||
125 | while (--count >= 0) { | ||
126 | __raw_writew(*src, addr); | ||
127 | src++; | ||
128 | } | ||
129 | } | ||
130 | static inline void mmio_outsl(void __iomem *addr, const u32 *src, int count) | ||
131 | { | ||
132 | while (--count >= 0) { | ||
133 | __raw_writel(*src, addr); | ||
134 | src++; | ||
135 | } | ||
136 | } | ||
137 | |||
138 | void fastcall ioread8_rep(void __iomem *addr, void *dst, unsigned long count) | ||
139 | { | ||
140 | IO_COND(addr, insb(port,dst,count), mmio_insb(addr, dst, count)); | ||
141 | } | ||
142 | void fastcall ioread16_rep(void __iomem *addr, void *dst, unsigned long count) | ||
143 | { | ||
144 | IO_COND(addr, insw(port,dst,count), mmio_insw(addr, dst, count)); | ||
145 | } | ||
146 | void fastcall ioread32_rep(void __iomem *addr, void *dst, unsigned long count) | ||
147 | { | ||
148 | IO_COND(addr, insl(port,dst,count), mmio_insl(addr, dst, count)); | ||
149 | } | ||
150 | EXPORT_SYMBOL(ioread8_rep); | ||
151 | EXPORT_SYMBOL(ioread16_rep); | ||
152 | EXPORT_SYMBOL(ioread32_rep); | ||
153 | |||
154 | void fastcall iowrite8_rep(void __iomem *addr, const void *src, unsigned long count) | ||
155 | { | ||
156 | IO_COND(addr, outsb(port, src, count), mmio_outsb(addr, src, count)); | ||
157 | } | ||
158 | void fastcall iowrite16_rep(void __iomem *addr, const void *src, unsigned long count) | ||
159 | { | ||
160 | IO_COND(addr, outsw(port, src, count), mmio_outsw(addr, src, count)); | ||
161 | } | ||
162 | void fastcall iowrite32_rep(void __iomem *addr, const void *src, unsigned long count) | ||
163 | { | ||
164 | IO_COND(addr, outsl(port, src,count), mmio_outsl(addr, src, count)); | ||
165 | } | ||
166 | EXPORT_SYMBOL(iowrite8_rep); | ||
167 | EXPORT_SYMBOL(iowrite16_rep); | ||
168 | EXPORT_SYMBOL(iowrite32_rep); | ||
169 | |||
170 | /* Create a virtual mapping cookie for an IO port range */ | ||
171 | void __iomem *ioport_map(unsigned long port, unsigned int nr) | ||
172 | { | ||
173 | if (port > PIO_MASK) | ||
174 | return NULL; | ||
175 | return (void __iomem *) (unsigned long) (port + PIO_OFFSET); | ||
176 | } | ||
177 | |||
178 | void ioport_unmap(void __iomem *addr) | ||
179 | { | ||
180 | /* Nothing to do */ | ||
181 | } | ||
182 | EXPORT_SYMBOL(ioport_map); | ||
183 | EXPORT_SYMBOL(ioport_unmap); | ||
184 | |||
185 | /* Create a virtual mapping cookie for a PCI BAR (memory or IO) */ | ||
186 | void __iomem *pci_iomap(struct pci_dev *dev, int bar, unsigned long maxlen) | ||
187 | { | ||
188 | unsigned long start = pci_resource_start(dev, bar); | ||
189 | unsigned long len = pci_resource_len(dev, bar); | ||
190 | unsigned long flags = pci_resource_flags(dev, bar); | ||
191 | |||
192 | if (!len || !start) | ||
193 | return NULL; | ||
194 | if (maxlen && len > maxlen) | ||
195 | len = maxlen; | ||
196 | if (flags & IORESOURCE_IO) | ||
197 | return ioport_map(start, len); | ||
198 | if (flags & IORESOURCE_MEM) { | ||
199 | if (flags & IORESOURCE_CACHEABLE) | ||
200 | return ioremap(start, len); | ||
201 | return ioremap_nocache(start, len); | ||
202 | } | ||
203 | /* What? */ | ||
204 | return NULL; | ||
205 | } | ||
206 | |||
207 | void pci_iounmap(struct pci_dev *dev, void __iomem * addr) | ||
208 | { | ||
209 | IO_COND(addr, /* nothing */, iounmap(addr)); | ||
210 | } | ||
211 | EXPORT_SYMBOL(pci_iomap); | ||
212 | EXPORT_SYMBOL(pci_iounmap); | ||
diff --git a/lib/kernel_lock.c b/lib/kernel_lock.c new file mode 100644 index 000000000000..99b0ae3d51dd --- /dev/null +++ b/lib/kernel_lock.c | |||
@@ -0,0 +1,264 @@ | |||
1 | /* | ||
2 | * lib/kernel_lock.c | ||
3 | * | ||
4 | * This is the traditional BKL - big kernel lock. Largely | ||
5 | * relegated to obsolescense, but used by various less | ||
6 | * important (or lazy) subsystems. | ||
7 | */ | ||
8 | #include <linux/smp_lock.h> | ||
9 | #include <linux/module.h> | ||
10 | #include <linux/kallsyms.h> | ||
11 | |||
12 | #if defined(CONFIG_PREEMPT) && defined(__smp_processor_id) && \ | ||
13 | defined(CONFIG_DEBUG_PREEMPT) | ||
14 | |||
15 | /* | ||
16 | * Debugging check. | ||
17 | */ | ||
18 | unsigned int smp_processor_id(void) | ||
19 | { | ||
20 | unsigned long preempt_count = preempt_count(); | ||
21 | int this_cpu = __smp_processor_id(); | ||
22 | cpumask_t this_mask; | ||
23 | |||
24 | if (likely(preempt_count)) | ||
25 | goto out; | ||
26 | |||
27 | if (irqs_disabled()) | ||
28 | goto out; | ||
29 | |||
30 | /* | ||
31 | * Kernel threads bound to a single CPU can safely use | ||
32 | * smp_processor_id(): | ||
33 | */ | ||
34 | this_mask = cpumask_of_cpu(this_cpu); | ||
35 | |||
36 | if (cpus_equal(current->cpus_allowed, this_mask)) | ||
37 | goto out; | ||
38 | |||
39 | /* | ||
40 | * It is valid to assume CPU-locality during early bootup: | ||
41 | */ | ||
42 | if (system_state != SYSTEM_RUNNING) | ||
43 | goto out; | ||
44 | |||
45 | /* | ||
46 | * Avoid recursion: | ||
47 | */ | ||
48 | preempt_disable(); | ||
49 | |||
50 | if (!printk_ratelimit()) | ||
51 | goto out_enable; | ||
52 | |||
53 | printk(KERN_ERR "BUG: using smp_processor_id() in preemptible [%08x] code: %s/%d\n", preempt_count(), current->comm, current->pid); | ||
54 | print_symbol("caller is %s\n", (long)__builtin_return_address(0)); | ||
55 | dump_stack(); | ||
56 | |||
57 | out_enable: | ||
58 | preempt_enable_no_resched(); | ||
59 | out: | ||
60 | return this_cpu; | ||
61 | } | ||
62 | |||
63 | EXPORT_SYMBOL(smp_processor_id); | ||
64 | |||
65 | #endif /* PREEMPT && __smp_processor_id && DEBUG_PREEMPT */ | ||
66 | |||
67 | #ifdef CONFIG_PREEMPT_BKL | ||
68 | /* | ||
69 | * The 'big kernel semaphore' | ||
70 | * | ||
71 | * This mutex is taken and released recursively by lock_kernel() | ||
72 | * and unlock_kernel(). It is transparently dropped and reaquired | ||
73 | * over schedule(). It is used to protect legacy code that hasn't | ||
74 | * been migrated to a proper locking design yet. | ||
75 | * | ||
76 | * Note: code locked by this semaphore will only be serialized against | ||
77 | * other code using the same locking facility. The code guarantees that | ||
78 | * the task remains on the same CPU. | ||
79 | * | ||
80 | * Don't use in new code. | ||
81 | */ | ||
82 | static DECLARE_MUTEX(kernel_sem); | ||
83 | |||
84 | /* | ||
85 | * Re-acquire the kernel semaphore. | ||
86 | * | ||
87 | * This function is called with preemption off. | ||
88 | * | ||
89 | * We are executing in schedule() so the code must be extremely careful | ||
90 | * about recursion, both due to the down() and due to the enabling of | ||
91 | * preemption. schedule() will re-check the preemption flag after | ||
92 | * reacquiring the semaphore. | ||
93 | */ | ||
94 | int __lockfunc __reacquire_kernel_lock(void) | ||
95 | { | ||
96 | struct task_struct *task = current; | ||
97 | int saved_lock_depth = task->lock_depth; | ||
98 | |||
99 | BUG_ON(saved_lock_depth < 0); | ||
100 | |||
101 | task->lock_depth = -1; | ||
102 | preempt_enable_no_resched(); | ||
103 | |||
104 | down(&kernel_sem); | ||
105 | |||
106 | preempt_disable(); | ||
107 | task->lock_depth = saved_lock_depth; | ||
108 | |||
109 | return 0; | ||
110 | } | ||
111 | |||
112 | void __lockfunc __release_kernel_lock(void) | ||
113 | { | ||
114 | up(&kernel_sem); | ||
115 | } | ||
116 | |||
117 | /* | ||
118 | * Getting the big kernel semaphore. | ||
119 | */ | ||
120 | void __lockfunc lock_kernel(void) | ||
121 | { | ||
122 | struct task_struct *task = current; | ||
123 | int depth = task->lock_depth + 1; | ||
124 | |||
125 | if (likely(!depth)) | ||
126 | /* | ||
127 | * No recursion worries - we set up lock_depth _after_ | ||
128 | */ | ||
129 | down(&kernel_sem); | ||
130 | |||
131 | task->lock_depth = depth; | ||
132 | } | ||
133 | |||
134 | void __lockfunc unlock_kernel(void) | ||
135 | { | ||
136 | struct task_struct *task = current; | ||
137 | |||
138 | BUG_ON(task->lock_depth < 0); | ||
139 | |||
140 | if (likely(--task->lock_depth < 0)) | ||
141 | up(&kernel_sem); | ||
142 | } | ||
143 | |||
144 | #else | ||
145 | |||
146 | /* | ||
147 | * The 'big kernel lock' | ||
148 | * | ||
149 | * This spinlock is taken and released recursively by lock_kernel() | ||
150 | * and unlock_kernel(). It is transparently dropped and reaquired | ||
151 | * over schedule(). It is used to protect legacy code that hasn't | ||
152 | * been migrated to a proper locking design yet. | ||
153 | * | ||
154 | * Don't use in new code. | ||
155 | */ | ||
156 | static __cacheline_aligned_in_smp DEFINE_SPINLOCK(kernel_flag); | ||
157 | |||
158 | |||
159 | /* | ||
160 | * Acquire/release the underlying lock from the scheduler. | ||
161 | * | ||
162 | * This is called with preemption disabled, and should | ||
163 | * return an error value if it cannot get the lock and | ||
164 | * TIF_NEED_RESCHED gets set. | ||
165 | * | ||
166 | * If it successfully gets the lock, it should increment | ||
167 | * the preemption count like any spinlock does. | ||
168 | * | ||
169 | * (This works on UP too - _raw_spin_trylock will never | ||
170 | * return false in that case) | ||
171 | */ | ||
172 | int __lockfunc __reacquire_kernel_lock(void) | ||
173 | { | ||
174 | while (!_raw_spin_trylock(&kernel_flag)) { | ||
175 | if (test_thread_flag(TIF_NEED_RESCHED)) | ||
176 | return -EAGAIN; | ||
177 | cpu_relax(); | ||
178 | } | ||
179 | preempt_disable(); | ||
180 | return 0; | ||
181 | } | ||
182 | |||
183 | void __lockfunc __release_kernel_lock(void) | ||
184 | { | ||
185 | _raw_spin_unlock(&kernel_flag); | ||
186 | preempt_enable_no_resched(); | ||
187 | } | ||
188 | |||
189 | /* | ||
190 | * These are the BKL spinlocks - we try to be polite about preemption. | ||
191 | * If SMP is not on (ie UP preemption), this all goes away because the | ||
192 | * _raw_spin_trylock() will always succeed. | ||
193 | */ | ||
194 | #ifdef CONFIG_PREEMPT | ||
195 | static inline void __lock_kernel(void) | ||
196 | { | ||
197 | preempt_disable(); | ||
198 | if (unlikely(!_raw_spin_trylock(&kernel_flag))) { | ||
199 | /* | ||
200 | * If preemption was disabled even before this | ||
201 | * was called, there's nothing we can be polite | ||
202 | * about - just spin. | ||
203 | */ | ||
204 | if (preempt_count() > 1) { | ||
205 | _raw_spin_lock(&kernel_flag); | ||
206 | return; | ||
207 | } | ||
208 | |||
209 | /* | ||
210 | * Otherwise, let's wait for the kernel lock | ||
211 | * with preemption enabled.. | ||
212 | */ | ||
213 | do { | ||
214 | preempt_enable(); | ||
215 | while (spin_is_locked(&kernel_flag)) | ||
216 | cpu_relax(); | ||
217 | preempt_disable(); | ||
218 | } while (!_raw_spin_trylock(&kernel_flag)); | ||
219 | } | ||
220 | } | ||
221 | |||
222 | #else | ||
223 | |||
224 | /* | ||
225 | * Non-preemption case - just get the spinlock | ||
226 | */ | ||
227 | static inline void __lock_kernel(void) | ||
228 | { | ||
229 | _raw_spin_lock(&kernel_flag); | ||
230 | } | ||
231 | #endif | ||
232 | |||
233 | static inline void __unlock_kernel(void) | ||
234 | { | ||
235 | _raw_spin_unlock(&kernel_flag); | ||
236 | preempt_enable(); | ||
237 | } | ||
238 | |||
239 | /* | ||
240 | * Getting the big kernel lock. | ||
241 | * | ||
242 | * This cannot happen asynchronously, so we only need to | ||
243 | * worry about other CPU's. | ||
244 | */ | ||
245 | void __lockfunc lock_kernel(void) | ||
246 | { | ||
247 | int depth = current->lock_depth+1; | ||
248 | if (likely(!depth)) | ||
249 | __lock_kernel(); | ||
250 | current->lock_depth = depth; | ||
251 | } | ||
252 | |||
253 | void __lockfunc unlock_kernel(void) | ||
254 | { | ||
255 | BUG_ON(current->lock_depth < 0); | ||
256 | if (likely(--current->lock_depth < 0)) | ||
257 | __unlock_kernel(); | ||
258 | } | ||
259 | |||
260 | #endif | ||
261 | |||
262 | EXPORT_SYMBOL(lock_kernel); | ||
263 | EXPORT_SYMBOL(unlock_kernel); | ||
264 | |||
diff --git a/lib/kobject.c b/lib/kobject.c new file mode 100644 index 000000000000..ff9491986b38 --- /dev/null +++ b/lib/kobject.c | |||
@@ -0,0 +1,544 @@ | |||
1 | /* | ||
2 | * kobject.c - library routines for handling generic kernel objects | ||
3 | * | ||
4 | * Copyright (c) 2002-2003 Patrick Mochel <mochel@osdl.org> | ||
5 | * | ||
6 | * This file is released under the GPLv2. | ||
7 | * | ||
8 | * | ||
9 | * Please see the file Documentation/kobject.txt for critical information | ||
10 | * about using the kobject interface. | ||
11 | */ | ||
12 | |||
13 | #include <linux/kobject.h> | ||
14 | #include <linux/string.h> | ||
15 | #include <linux/module.h> | ||
16 | #include <linux/stat.h> | ||
17 | |||
18 | /** | ||
19 | * populate_dir - populate directory with attributes. | ||
20 | * @kobj: object we're working on. | ||
21 | * | ||
22 | * Most subsystems have a set of default attributes that | ||
23 | * are associated with an object that registers with them. | ||
24 | * This is a helper called during object registration that | ||
25 | * loops through the default attributes of the subsystem | ||
26 | * and creates attributes files for them in sysfs. | ||
27 | * | ||
28 | */ | ||
29 | |||
30 | static int populate_dir(struct kobject * kobj) | ||
31 | { | ||
32 | struct kobj_type * t = get_ktype(kobj); | ||
33 | struct attribute * attr; | ||
34 | int error = 0; | ||
35 | int i; | ||
36 | |||
37 | if (t && t->default_attrs) { | ||
38 | for (i = 0; (attr = t->default_attrs[i]) != NULL; i++) { | ||
39 | if ((error = sysfs_create_file(kobj,attr))) | ||
40 | break; | ||
41 | } | ||
42 | } | ||
43 | return error; | ||
44 | } | ||
45 | |||
46 | static int create_dir(struct kobject * kobj) | ||
47 | { | ||
48 | int error = 0; | ||
49 | if (kobject_name(kobj)) { | ||
50 | error = sysfs_create_dir(kobj); | ||
51 | if (!error) { | ||
52 | if ((error = populate_dir(kobj))) | ||
53 | sysfs_remove_dir(kobj); | ||
54 | } | ||
55 | } | ||
56 | return error; | ||
57 | } | ||
58 | |||
59 | static inline struct kobject * to_kobj(struct list_head * entry) | ||
60 | { | ||
61 | return container_of(entry,struct kobject,entry); | ||
62 | } | ||
63 | |||
64 | static int get_kobj_path_length(struct kobject *kobj) | ||
65 | { | ||
66 | int length = 1; | ||
67 | struct kobject * parent = kobj; | ||
68 | |||
69 | /* walk up the ancestors until we hit the one pointing to the | ||
70 | * root. | ||
71 | * Add 1 to strlen for leading '/' of each level. | ||
72 | */ | ||
73 | do { | ||
74 | length += strlen(kobject_name(parent)) + 1; | ||
75 | parent = parent->parent; | ||
76 | } while (parent); | ||
77 | return length; | ||
78 | } | ||
79 | |||
80 | static void fill_kobj_path(struct kobject *kobj, char *path, int length) | ||
81 | { | ||
82 | struct kobject * parent; | ||
83 | |||
84 | --length; | ||
85 | for (parent = kobj; parent; parent = parent->parent) { | ||
86 | int cur = strlen(kobject_name(parent)); | ||
87 | /* back up enough to print this name with '/' */ | ||
88 | length -= cur; | ||
89 | strncpy (path + length, kobject_name(parent), cur); | ||
90 | *(path + --length) = '/'; | ||
91 | } | ||
92 | |||
93 | pr_debug("%s: path = '%s'\n",__FUNCTION__,path); | ||
94 | } | ||
95 | |||
96 | /** | ||
97 | * kobject_get_path - generate and return the path associated with a given kobj | ||
98 | * and kset pair. The result must be freed by the caller with kfree(). | ||
99 | * | ||
100 | * @kobj: kobject in question, with which to build the path | ||
101 | * @gfp_mask: the allocation type used to allocate the path | ||
102 | */ | ||
103 | char *kobject_get_path(struct kobject *kobj, int gfp_mask) | ||
104 | { | ||
105 | char *path; | ||
106 | int len; | ||
107 | |||
108 | len = get_kobj_path_length(kobj); | ||
109 | path = kmalloc(len, gfp_mask); | ||
110 | if (!path) | ||
111 | return NULL; | ||
112 | memset(path, 0x00, len); | ||
113 | fill_kobj_path(kobj, path, len); | ||
114 | |||
115 | return path; | ||
116 | } | ||
117 | |||
118 | /** | ||
119 | * kobject_init - initialize object. | ||
120 | * @kobj: object in question. | ||
121 | */ | ||
122 | void kobject_init(struct kobject * kobj) | ||
123 | { | ||
124 | kref_init(&kobj->kref); | ||
125 | INIT_LIST_HEAD(&kobj->entry); | ||
126 | kobj->kset = kset_get(kobj->kset); | ||
127 | } | ||
128 | |||
129 | |||
130 | /** | ||
131 | * unlink - remove kobject from kset list. | ||
132 | * @kobj: kobject. | ||
133 | * | ||
134 | * Remove the kobject from the kset list and decrement | ||
135 | * its parent's refcount. | ||
136 | * This is separated out, so we can use it in both | ||
137 | * kobject_del() and kobject_add() on error. | ||
138 | */ | ||
139 | |||
140 | static void unlink(struct kobject * kobj) | ||
141 | { | ||
142 | if (kobj->kset) { | ||
143 | spin_lock(&kobj->kset->list_lock); | ||
144 | list_del_init(&kobj->entry); | ||
145 | spin_unlock(&kobj->kset->list_lock); | ||
146 | } | ||
147 | kobject_put(kobj); | ||
148 | } | ||
149 | |||
150 | /** | ||
151 | * kobject_add - add an object to the hierarchy. | ||
152 | * @kobj: object. | ||
153 | */ | ||
154 | |||
155 | int kobject_add(struct kobject * kobj) | ||
156 | { | ||
157 | int error = 0; | ||
158 | struct kobject * parent; | ||
159 | |||
160 | if (!(kobj = kobject_get(kobj))) | ||
161 | return -ENOENT; | ||
162 | if (!kobj->k_name) | ||
163 | kobj->k_name = kobj->name; | ||
164 | parent = kobject_get(kobj->parent); | ||
165 | |||
166 | pr_debug("kobject %s: registering. parent: %s, set: %s\n", | ||
167 | kobject_name(kobj), parent ? kobject_name(parent) : "<NULL>", | ||
168 | kobj->kset ? kobj->kset->kobj.name : "<NULL>" ); | ||
169 | |||
170 | if (kobj->kset) { | ||
171 | spin_lock(&kobj->kset->list_lock); | ||
172 | |||
173 | if (!parent) | ||
174 | parent = kobject_get(&kobj->kset->kobj); | ||
175 | |||
176 | list_add_tail(&kobj->entry,&kobj->kset->list); | ||
177 | spin_unlock(&kobj->kset->list_lock); | ||
178 | } | ||
179 | kobj->parent = parent; | ||
180 | |||
181 | error = create_dir(kobj); | ||
182 | if (error) { | ||
183 | /* unlink does the kobject_put() for us */ | ||
184 | unlink(kobj); | ||
185 | if (parent) | ||
186 | kobject_put(parent); | ||
187 | } else { | ||
188 | kobject_hotplug(kobj, KOBJ_ADD); | ||
189 | } | ||
190 | |||
191 | return error; | ||
192 | } | ||
193 | |||
194 | |||
195 | /** | ||
196 | * kobject_register - initialize and add an object. | ||
197 | * @kobj: object in question. | ||
198 | */ | ||
199 | |||
200 | int kobject_register(struct kobject * kobj) | ||
201 | { | ||
202 | int error = 0; | ||
203 | if (kobj) { | ||
204 | kobject_init(kobj); | ||
205 | error = kobject_add(kobj); | ||
206 | if (error) { | ||
207 | printk("kobject_register failed for %s (%d)\n", | ||
208 | kobject_name(kobj),error); | ||
209 | dump_stack(); | ||
210 | } | ||
211 | } else | ||
212 | error = -EINVAL; | ||
213 | return error; | ||
214 | } | ||
215 | |||
216 | |||
217 | /** | ||
218 | * kobject_set_name - Set the name of an object | ||
219 | * @kobj: object. | ||
220 | * @name: name. | ||
221 | * | ||
222 | * If strlen(name) >= KOBJ_NAME_LEN, then use a dynamically allocated | ||
223 | * string that @kobj->k_name points to. Otherwise, use the static | ||
224 | * @kobj->name array. | ||
225 | */ | ||
226 | |||
227 | int kobject_set_name(struct kobject * kobj, const char * fmt, ...) | ||
228 | { | ||
229 | int error = 0; | ||
230 | int limit = KOBJ_NAME_LEN; | ||
231 | int need; | ||
232 | va_list args; | ||
233 | char * name; | ||
234 | |||
235 | /* | ||
236 | * First, try the static array | ||
237 | */ | ||
238 | va_start(args,fmt); | ||
239 | need = vsnprintf(kobj->name,limit,fmt,args); | ||
240 | va_end(args); | ||
241 | if (need < limit) | ||
242 | name = kobj->name; | ||
243 | else { | ||
244 | /* | ||
245 | * Need more space? Allocate it and try again | ||
246 | */ | ||
247 | limit = need + 1; | ||
248 | name = kmalloc(limit,GFP_KERNEL); | ||
249 | if (!name) { | ||
250 | error = -ENOMEM; | ||
251 | goto Done; | ||
252 | } | ||
253 | va_start(args,fmt); | ||
254 | need = vsnprintf(name,limit,fmt,args); | ||
255 | va_end(args); | ||
256 | |||
257 | /* Still? Give up. */ | ||
258 | if (need >= limit) { | ||
259 | kfree(name); | ||
260 | error = -EFAULT; | ||
261 | goto Done; | ||
262 | } | ||
263 | } | ||
264 | |||
265 | /* Free the old name, if necessary. */ | ||
266 | if (kobj->k_name && kobj->k_name != kobj->name) | ||
267 | kfree(kobj->k_name); | ||
268 | |||
269 | /* Now, set the new name */ | ||
270 | kobj->k_name = name; | ||
271 | Done: | ||
272 | return error; | ||
273 | } | ||
274 | |||
275 | EXPORT_SYMBOL(kobject_set_name); | ||
276 | |||
277 | |||
278 | /** | ||
279 | * kobject_rename - change the name of an object | ||
280 | * @kobj: object in question. | ||
281 | * @new_name: object's new name | ||
282 | */ | ||
283 | |||
284 | int kobject_rename(struct kobject * kobj, char *new_name) | ||
285 | { | ||
286 | int error = 0; | ||
287 | |||
288 | kobj = kobject_get(kobj); | ||
289 | if (!kobj) | ||
290 | return -EINVAL; | ||
291 | error = sysfs_rename_dir(kobj, new_name); | ||
292 | kobject_put(kobj); | ||
293 | |||
294 | return error; | ||
295 | } | ||
296 | |||
297 | /** | ||
298 | * kobject_del - unlink kobject from hierarchy. | ||
299 | * @kobj: object. | ||
300 | */ | ||
301 | |||
302 | void kobject_del(struct kobject * kobj) | ||
303 | { | ||
304 | kobject_hotplug(kobj, KOBJ_REMOVE); | ||
305 | sysfs_remove_dir(kobj); | ||
306 | unlink(kobj); | ||
307 | } | ||
308 | |||
309 | /** | ||
310 | * kobject_unregister - remove object from hierarchy and decrement refcount. | ||
311 | * @kobj: object going away. | ||
312 | */ | ||
313 | |||
314 | void kobject_unregister(struct kobject * kobj) | ||
315 | { | ||
316 | pr_debug("kobject %s: unregistering\n",kobject_name(kobj)); | ||
317 | kobject_del(kobj); | ||
318 | kobject_put(kobj); | ||
319 | } | ||
320 | |||
321 | /** | ||
322 | * kobject_get - increment refcount for object. | ||
323 | * @kobj: object. | ||
324 | */ | ||
325 | |||
326 | struct kobject * kobject_get(struct kobject * kobj) | ||
327 | { | ||
328 | if (kobj) | ||
329 | kref_get(&kobj->kref); | ||
330 | return kobj; | ||
331 | } | ||
332 | |||
333 | /** | ||
334 | * kobject_cleanup - free kobject resources. | ||
335 | * @kobj: object. | ||
336 | */ | ||
337 | |||
338 | void kobject_cleanup(struct kobject * kobj) | ||
339 | { | ||
340 | struct kobj_type * t = get_ktype(kobj); | ||
341 | struct kset * s = kobj->kset; | ||
342 | struct kobject * parent = kobj->parent; | ||
343 | |||
344 | pr_debug("kobject %s: cleaning up\n",kobject_name(kobj)); | ||
345 | if (kobj->k_name != kobj->name) | ||
346 | kfree(kobj->k_name); | ||
347 | kobj->k_name = NULL; | ||
348 | if (t && t->release) | ||
349 | t->release(kobj); | ||
350 | if (s) | ||
351 | kset_put(s); | ||
352 | if (parent) | ||
353 | kobject_put(parent); | ||
354 | } | ||
355 | |||
356 | static void kobject_release(struct kref *kref) | ||
357 | { | ||
358 | kobject_cleanup(container_of(kref, struct kobject, kref)); | ||
359 | } | ||
360 | |||
361 | /** | ||
362 | * kobject_put - decrement refcount for object. | ||
363 | * @kobj: object. | ||
364 | * | ||
365 | * Decrement the refcount, and if 0, call kobject_cleanup(). | ||
366 | */ | ||
367 | void kobject_put(struct kobject * kobj) | ||
368 | { | ||
369 | if (kobj) | ||
370 | kref_put(&kobj->kref, kobject_release); | ||
371 | } | ||
372 | |||
373 | |||
374 | /** | ||
375 | * kset_init - initialize a kset for use | ||
376 | * @k: kset | ||
377 | */ | ||
378 | |||
379 | void kset_init(struct kset * k) | ||
380 | { | ||
381 | kobject_init(&k->kobj); | ||
382 | INIT_LIST_HEAD(&k->list); | ||
383 | spin_lock_init(&k->list_lock); | ||
384 | } | ||
385 | |||
386 | |||
387 | /** | ||
388 | * kset_add - add a kset object to the hierarchy. | ||
389 | * @k: kset. | ||
390 | * | ||
391 | * Simply, this adds the kset's embedded kobject to the | ||
392 | * hierarchy. | ||
393 | * We also try to make sure that the kset's embedded kobject | ||
394 | * has a parent before it is added. We only care if the embedded | ||
395 | * kobject is not part of a kset itself, since kobject_add() | ||
396 | * assigns a parent in that case. | ||
397 | * If that is the case, and the kset has a controlling subsystem, | ||
398 | * then we set the kset's parent to be said subsystem. | ||
399 | */ | ||
400 | |||
401 | int kset_add(struct kset * k) | ||
402 | { | ||
403 | if (!k->kobj.parent && !k->kobj.kset && k->subsys) | ||
404 | k->kobj.parent = &k->subsys->kset.kobj; | ||
405 | |||
406 | return kobject_add(&k->kobj); | ||
407 | } | ||
408 | |||
409 | |||
410 | /** | ||
411 | * kset_register - initialize and add a kset. | ||
412 | * @k: kset. | ||
413 | */ | ||
414 | |||
415 | int kset_register(struct kset * k) | ||
416 | { | ||
417 | kset_init(k); | ||
418 | return kset_add(k); | ||
419 | } | ||
420 | |||
421 | |||
422 | /** | ||
423 | * kset_unregister - remove a kset. | ||
424 | * @k: kset. | ||
425 | */ | ||
426 | |||
427 | void kset_unregister(struct kset * k) | ||
428 | { | ||
429 | kobject_unregister(&k->kobj); | ||
430 | } | ||
431 | |||
432 | |||
433 | /** | ||
434 | * kset_find_obj - search for object in kset. | ||
435 | * @kset: kset we're looking in. | ||
436 | * @name: object's name. | ||
437 | * | ||
438 | * Lock kset via @kset->subsys, and iterate over @kset->list, | ||
439 | * looking for a matching kobject. If matching object is found | ||
440 | * take a reference and return the object. | ||
441 | */ | ||
442 | |||
443 | struct kobject * kset_find_obj(struct kset * kset, const char * name) | ||
444 | { | ||
445 | struct list_head * entry; | ||
446 | struct kobject * ret = NULL; | ||
447 | |||
448 | spin_lock(&kset->list_lock); | ||
449 | list_for_each(entry,&kset->list) { | ||
450 | struct kobject * k = to_kobj(entry); | ||
451 | if (kobject_name(k) && !strcmp(kobject_name(k),name)) { | ||
452 | ret = kobject_get(k); | ||
453 | break; | ||
454 | } | ||
455 | } | ||
456 | spin_unlock(&kset->list_lock); | ||
457 | return ret; | ||
458 | } | ||
459 | |||
460 | |||
461 | void subsystem_init(struct subsystem * s) | ||
462 | { | ||
463 | init_rwsem(&s->rwsem); | ||
464 | kset_init(&s->kset); | ||
465 | } | ||
466 | |||
467 | /** | ||
468 | * subsystem_register - register a subsystem. | ||
469 | * @s: the subsystem we're registering. | ||
470 | * | ||
471 | * Once we register the subsystem, we want to make sure that | ||
472 | * the kset points back to this subsystem for correct usage of | ||
473 | * the rwsem. | ||
474 | */ | ||
475 | |||
476 | int subsystem_register(struct subsystem * s) | ||
477 | { | ||
478 | int error; | ||
479 | |||
480 | subsystem_init(s); | ||
481 | pr_debug("subsystem %s: registering\n",s->kset.kobj.name); | ||
482 | |||
483 | if (!(error = kset_add(&s->kset))) { | ||
484 | if (!s->kset.subsys) | ||
485 | s->kset.subsys = s; | ||
486 | } | ||
487 | return error; | ||
488 | } | ||
489 | |||
490 | void subsystem_unregister(struct subsystem * s) | ||
491 | { | ||
492 | pr_debug("subsystem %s: unregistering\n",s->kset.kobj.name); | ||
493 | kset_unregister(&s->kset); | ||
494 | } | ||
495 | |||
496 | |||
497 | /** | ||
498 | * subsystem_create_file - export sysfs attribute file. | ||
499 | * @s: subsystem. | ||
500 | * @a: subsystem attribute descriptor. | ||
501 | */ | ||
502 | |||
503 | int subsys_create_file(struct subsystem * s, struct subsys_attribute * a) | ||
504 | { | ||
505 | int error = 0; | ||
506 | if (subsys_get(s)) { | ||
507 | error = sysfs_create_file(&s->kset.kobj,&a->attr); | ||
508 | subsys_put(s); | ||
509 | } | ||
510 | return error; | ||
511 | } | ||
512 | |||
513 | |||
514 | /** | ||
515 | * subsystem_remove_file - remove sysfs attribute file. | ||
516 | * @s: subsystem. | ||
517 | * @a: attribute desciptor. | ||
518 | */ | ||
519 | |||
520 | void subsys_remove_file(struct subsystem * s, struct subsys_attribute * a) | ||
521 | { | ||
522 | if (subsys_get(s)) { | ||
523 | sysfs_remove_file(&s->kset.kobj,&a->attr); | ||
524 | subsys_put(s); | ||
525 | } | ||
526 | } | ||
527 | |||
528 | EXPORT_SYMBOL(kobject_init); | ||
529 | EXPORT_SYMBOL(kobject_register); | ||
530 | EXPORT_SYMBOL(kobject_unregister); | ||
531 | EXPORT_SYMBOL(kobject_get); | ||
532 | EXPORT_SYMBOL(kobject_put); | ||
533 | EXPORT_SYMBOL(kobject_add); | ||
534 | EXPORT_SYMBOL(kobject_del); | ||
535 | |||
536 | EXPORT_SYMBOL(kset_register); | ||
537 | EXPORT_SYMBOL(kset_unregister); | ||
538 | EXPORT_SYMBOL(kset_find_obj); | ||
539 | |||
540 | EXPORT_SYMBOL(subsystem_init); | ||
541 | EXPORT_SYMBOL(subsystem_register); | ||
542 | EXPORT_SYMBOL(subsystem_unregister); | ||
543 | EXPORT_SYMBOL(subsys_create_file); | ||
544 | EXPORT_SYMBOL(subsys_remove_file); | ||
diff --git a/lib/kobject_uevent.c b/lib/kobject_uevent.c new file mode 100644 index 000000000000..2a4e7671eaf4 --- /dev/null +++ b/lib/kobject_uevent.c | |||
@@ -0,0 +1,369 @@ | |||
1 | /* | ||
2 | * kernel userspace event delivery | ||
3 | * | ||
4 | * Copyright (C) 2004 Red Hat, Inc. All rights reserved. | ||
5 | * Copyright (C) 2004 Novell, Inc. All rights reserved. | ||
6 | * Copyright (C) 2004 IBM, Inc. All rights reserved. | ||
7 | * | ||
8 | * Licensed under the GNU GPL v2. | ||
9 | * | ||
10 | * Authors: | ||
11 | * Robert Love <rml@novell.com> | ||
12 | * Kay Sievers <kay.sievers@vrfy.org> | ||
13 | * Arjan van de Ven <arjanv@redhat.com> | ||
14 | * Greg Kroah-Hartman <greg@kroah.com> | ||
15 | */ | ||
16 | |||
17 | #include <linux/spinlock.h> | ||
18 | #include <linux/socket.h> | ||
19 | #include <linux/skbuff.h> | ||
20 | #include <linux/netlink.h> | ||
21 | #include <linux/string.h> | ||
22 | #include <linux/kobject_uevent.h> | ||
23 | #include <linux/kobject.h> | ||
24 | #include <net/sock.h> | ||
25 | |||
26 | #define BUFFER_SIZE 1024 /* buffer for the hotplug env */ | ||
27 | #define NUM_ENVP 32 /* number of env pointers */ | ||
28 | |||
29 | #if defined(CONFIG_KOBJECT_UEVENT) || defined(CONFIG_HOTPLUG) | ||
30 | static char *action_to_string(enum kobject_action action) | ||
31 | { | ||
32 | switch (action) { | ||
33 | case KOBJ_ADD: | ||
34 | return "add"; | ||
35 | case KOBJ_REMOVE: | ||
36 | return "remove"; | ||
37 | case KOBJ_CHANGE: | ||
38 | return "change"; | ||
39 | case KOBJ_MOUNT: | ||
40 | return "mount"; | ||
41 | case KOBJ_UMOUNT: | ||
42 | return "umount"; | ||
43 | case KOBJ_OFFLINE: | ||
44 | return "offline"; | ||
45 | case KOBJ_ONLINE: | ||
46 | return "online"; | ||
47 | default: | ||
48 | return NULL; | ||
49 | } | ||
50 | } | ||
51 | #endif | ||
52 | |||
53 | #ifdef CONFIG_KOBJECT_UEVENT | ||
54 | static struct sock *uevent_sock; | ||
55 | |||
56 | /** | ||
57 | * send_uevent - notify userspace by sending event trough netlink socket | ||
58 | * | ||
59 | * @signal: signal name | ||
60 | * @obj: object path (kobject) | ||
61 | * @envp: possible hotplug environment to pass with the message | ||
62 | * @gfp_mask: | ||
63 | */ | ||
64 | static int send_uevent(const char *signal, const char *obj, | ||
65 | char **envp, int gfp_mask) | ||
66 | { | ||
67 | struct sk_buff *skb; | ||
68 | char *pos; | ||
69 | int len; | ||
70 | |||
71 | if (!uevent_sock) | ||
72 | return -EIO; | ||
73 | |||
74 | len = strlen(signal) + 1; | ||
75 | len += strlen(obj) + 1; | ||
76 | |||
77 | /* allocate buffer with the maximum possible message size */ | ||
78 | skb = alloc_skb(len + BUFFER_SIZE, gfp_mask); | ||
79 | if (!skb) | ||
80 | return -ENOMEM; | ||
81 | |||
82 | pos = skb_put(skb, len); | ||
83 | sprintf(pos, "%s@%s", signal, obj); | ||
84 | |||
85 | /* copy the environment key by key to our continuous buffer */ | ||
86 | if (envp) { | ||
87 | int i; | ||
88 | |||
89 | for (i = 2; envp[i]; i++) { | ||
90 | len = strlen(envp[i]) + 1; | ||
91 | pos = skb_put(skb, len); | ||
92 | strcpy(pos, envp[i]); | ||
93 | } | ||
94 | } | ||
95 | |||
96 | return netlink_broadcast(uevent_sock, skb, 0, 1, gfp_mask); | ||
97 | } | ||
98 | |||
99 | static int do_kobject_uevent(struct kobject *kobj, enum kobject_action action, | ||
100 | struct attribute *attr, int gfp_mask) | ||
101 | { | ||
102 | char *path; | ||
103 | char *attrpath; | ||
104 | char *signal; | ||
105 | int len; | ||
106 | int rc = -ENOMEM; | ||
107 | |||
108 | path = kobject_get_path(kobj, gfp_mask); | ||
109 | if (!path) | ||
110 | return -ENOMEM; | ||
111 | |||
112 | signal = action_to_string(action); | ||
113 | if (!signal) | ||
114 | return -EINVAL; | ||
115 | |||
116 | if (attr) { | ||
117 | len = strlen(path); | ||
118 | len += strlen(attr->name) + 2; | ||
119 | attrpath = kmalloc(len, gfp_mask); | ||
120 | if (!attrpath) | ||
121 | goto exit; | ||
122 | sprintf(attrpath, "%s/%s", path, attr->name); | ||
123 | rc = send_uevent(signal, attrpath, NULL, gfp_mask); | ||
124 | kfree(attrpath); | ||
125 | } else | ||
126 | rc = send_uevent(signal, path, NULL, gfp_mask); | ||
127 | |||
128 | exit: | ||
129 | kfree(path); | ||
130 | return rc; | ||
131 | } | ||
132 | |||
133 | /** | ||
134 | * kobject_uevent - notify userspace by sending event through netlink socket | ||
135 | * | ||
136 | * @signal: signal name | ||
137 | * @kobj: struct kobject that the event is happening to | ||
138 | * @attr: optional struct attribute the event belongs to | ||
139 | */ | ||
140 | int kobject_uevent(struct kobject *kobj, enum kobject_action action, | ||
141 | struct attribute *attr) | ||
142 | { | ||
143 | return do_kobject_uevent(kobj, action, attr, GFP_KERNEL); | ||
144 | } | ||
145 | EXPORT_SYMBOL_GPL(kobject_uevent); | ||
146 | |||
147 | int kobject_uevent_atomic(struct kobject *kobj, enum kobject_action action, | ||
148 | struct attribute *attr) | ||
149 | { | ||
150 | return do_kobject_uevent(kobj, action, attr, GFP_ATOMIC); | ||
151 | } | ||
152 | EXPORT_SYMBOL_GPL(kobject_uevent_atomic); | ||
153 | |||
154 | static int __init kobject_uevent_init(void) | ||
155 | { | ||
156 | uevent_sock = netlink_kernel_create(NETLINK_KOBJECT_UEVENT, NULL); | ||
157 | |||
158 | if (!uevent_sock) { | ||
159 | printk(KERN_ERR | ||
160 | "kobject_uevent: unable to create netlink socket!\n"); | ||
161 | return -ENODEV; | ||
162 | } | ||
163 | |||
164 | return 0; | ||
165 | } | ||
166 | |||
167 | postcore_initcall(kobject_uevent_init); | ||
168 | |||
169 | #else | ||
170 | static inline int send_uevent(const char *signal, const char *obj, | ||
171 | char **envp, int gfp_mask) | ||
172 | { | ||
173 | return 0; | ||
174 | } | ||
175 | |||
176 | #endif /* CONFIG_KOBJECT_UEVENT */ | ||
177 | |||
178 | |||
179 | #ifdef CONFIG_HOTPLUG | ||
180 | char hotplug_path[HOTPLUG_PATH_LEN] = "/sbin/hotplug"; | ||
181 | u64 hotplug_seqnum; | ||
182 | static DEFINE_SPINLOCK(sequence_lock); | ||
183 | |||
184 | /** | ||
185 | * kobject_hotplug - notify userspace by executing /sbin/hotplug | ||
186 | * | ||
187 | * @action: action that is happening (usually "ADD" or "REMOVE") | ||
188 | * @kobj: struct kobject that the action is happening to | ||
189 | */ | ||
190 | void kobject_hotplug(struct kobject *kobj, enum kobject_action action) | ||
191 | { | ||
192 | char *argv [3]; | ||
193 | char **envp = NULL; | ||
194 | char *buffer = NULL; | ||
195 | char *seq_buff; | ||
196 | char *scratch; | ||
197 | int i = 0; | ||
198 | int retval; | ||
199 | char *kobj_path = NULL; | ||
200 | char *name = NULL; | ||
201 | char *action_string; | ||
202 | u64 seq; | ||
203 | struct kobject *top_kobj = kobj; | ||
204 | struct kset *kset; | ||
205 | static struct kset_hotplug_ops null_hotplug_ops; | ||
206 | struct kset_hotplug_ops *hotplug_ops = &null_hotplug_ops; | ||
207 | |||
208 | /* If this kobj does not belong to a kset, | ||
209 | try to find a parent that does. */ | ||
210 | if (!top_kobj->kset && top_kobj->parent) { | ||
211 | do { | ||
212 | top_kobj = top_kobj->parent; | ||
213 | } while (!top_kobj->kset && top_kobj->parent); | ||
214 | } | ||
215 | |||
216 | if (top_kobj->kset) | ||
217 | kset = top_kobj->kset; | ||
218 | else | ||
219 | return; | ||
220 | |||
221 | if (kset->hotplug_ops) | ||
222 | hotplug_ops = kset->hotplug_ops; | ||
223 | |||
224 | /* If the kset has a filter operation, call it. | ||
225 | Skip the event, if the filter returns zero. */ | ||
226 | if (hotplug_ops->filter) { | ||
227 | if (!hotplug_ops->filter(kset, kobj)) | ||
228 | return; | ||
229 | } | ||
230 | |||
231 | pr_debug ("%s\n", __FUNCTION__); | ||
232 | |||
233 | action_string = action_to_string(action); | ||
234 | if (!action_string) | ||
235 | return; | ||
236 | |||
237 | envp = kmalloc(NUM_ENVP * sizeof (char *), GFP_KERNEL); | ||
238 | if (!envp) | ||
239 | return; | ||
240 | memset (envp, 0x00, NUM_ENVP * sizeof (char *)); | ||
241 | |||
242 | buffer = kmalloc(BUFFER_SIZE, GFP_KERNEL); | ||
243 | if (!buffer) | ||
244 | goto exit; | ||
245 | |||
246 | if (hotplug_ops->name) | ||
247 | name = hotplug_ops->name(kset, kobj); | ||
248 | if (name == NULL) | ||
249 | name = kset->kobj.name; | ||
250 | |||
251 | argv [0] = hotplug_path; | ||
252 | argv [1] = name; | ||
253 | argv [2] = NULL; | ||
254 | |||
255 | /* minimal command environment */ | ||
256 | envp [i++] = "HOME=/"; | ||
257 | envp [i++] = "PATH=/sbin:/bin:/usr/sbin:/usr/bin"; | ||
258 | |||
259 | scratch = buffer; | ||
260 | |||
261 | envp [i++] = scratch; | ||
262 | scratch += sprintf(scratch, "ACTION=%s", action_string) + 1; | ||
263 | |||
264 | kobj_path = kobject_get_path(kobj, GFP_KERNEL); | ||
265 | if (!kobj_path) | ||
266 | goto exit; | ||
267 | |||
268 | envp [i++] = scratch; | ||
269 | scratch += sprintf (scratch, "DEVPATH=%s", kobj_path) + 1; | ||
270 | |||
271 | envp [i++] = scratch; | ||
272 | scratch += sprintf(scratch, "SUBSYSTEM=%s", name) + 1; | ||
273 | |||
274 | /* reserve space for the sequence, | ||
275 | * put the real one in after the hotplug call */ | ||
276 | envp[i++] = seq_buff = scratch; | ||
277 | scratch += strlen("SEQNUM=18446744073709551616") + 1; | ||
278 | |||
279 | if (hotplug_ops->hotplug) { | ||
280 | /* have the kset specific function add its stuff */ | ||
281 | retval = hotplug_ops->hotplug (kset, kobj, | ||
282 | &envp[i], NUM_ENVP - i, scratch, | ||
283 | BUFFER_SIZE - (scratch - buffer)); | ||
284 | if (retval) { | ||
285 | pr_debug ("%s - hotplug() returned %d\n", | ||
286 | __FUNCTION__, retval); | ||
287 | goto exit; | ||
288 | } | ||
289 | } | ||
290 | |||
291 | spin_lock(&sequence_lock); | ||
292 | seq = ++hotplug_seqnum; | ||
293 | spin_unlock(&sequence_lock); | ||
294 | sprintf(seq_buff, "SEQNUM=%llu", (unsigned long long)seq); | ||
295 | |||
296 | pr_debug ("%s: %s %s seq=%llu %s %s %s %s %s\n", | ||
297 | __FUNCTION__, argv[0], argv[1], (unsigned long long)seq, | ||
298 | envp[0], envp[1], envp[2], envp[3], envp[4]); | ||
299 | |||
300 | send_uevent(action_string, kobj_path, envp, GFP_KERNEL); | ||
301 | |||
302 | if (!hotplug_path[0]) | ||
303 | goto exit; | ||
304 | |||
305 | retval = call_usermodehelper (argv[0], argv, envp, 0); | ||
306 | if (retval) | ||
307 | pr_debug ("%s - call_usermodehelper returned %d\n", | ||
308 | __FUNCTION__, retval); | ||
309 | |||
310 | exit: | ||
311 | kfree(kobj_path); | ||
312 | kfree(buffer); | ||
313 | kfree(envp); | ||
314 | return; | ||
315 | } | ||
316 | EXPORT_SYMBOL(kobject_hotplug); | ||
317 | |||
318 | /** | ||
319 | * add_hotplug_env_var - helper for creating hotplug environment variables | ||
320 | * @envp: Pointer to table of environment variables, as passed into | ||
321 | * hotplug() method. | ||
322 | * @num_envp: Number of environment variable slots available, as | ||
323 | * passed into hotplug() method. | ||
324 | * @cur_index: Pointer to current index into @envp. It should be | ||
325 | * initialized to 0 before the first call to add_hotplug_env_var(), | ||
326 | * and will be incremented on success. | ||
327 | * @buffer: Pointer to buffer for environment variables, as passed | ||
328 | * into hotplug() method. | ||
329 | * @buffer_size: Length of @buffer, as passed into hotplug() method. | ||
330 | * @cur_len: Pointer to current length of space used in @buffer. | ||
331 | * Should be initialized to 0 before the first call to | ||
332 | * add_hotplug_env_var(), and will be incremented on success. | ||
333 | * @format: Format for creating environment variable (of the form | ||
334 | * "XXX=%x") for snprintf(). | ||
335 | * | ||
336 | * Returns 0 if environment variable was added successfully or -ENOMEM | ||
337 | * if no space was available. | ||
338 | */ | ||
339 | int add_hotplug_env_var(char **envp, int num_envp, int *cur_index, | ||
340 | char *buffer, int buffer_size, int *cur_len, | ||
341 | const char *format, ...) | ||
342 | { | ||
343 | va_list args; | ||
344 | |||
345 | /* | ||
346 | * We check against num_envp - 1 to make sure there is at | ||
347 | * least one slot left after we return, since the hotplug | ||
348 | * method needs to set the last slot to NULL. | ||
349 | */ | ||
350 | if (*cur_index >= num_envp - 1) | ||
351 | return -ENOMEM; | ||
352 | |||
353 | envp[*cur_index] = buffer + *cur_len; | ||
354 | |||
355 | va_start(args, format); | ||
356 | *cur_len += vsnprintf(envp[*cur_index], | ||
357 | max(buffer_size - *cur_len, 0), | ||
358 | format, args) + 1; | ||
359 | va_end(args); | ||
360 | |||
361 | if (*cur_len > buffer_size) | ||
362 | return -ENOMEM; | ||
363 | |||
364 | (*cur_index)++; | ||
365 | return 0; | ||
366 | } | ||
367 | EXPORT_SYMBOL(add_hotplug_env_var); | ||
368 | |||
369 | #endif /* CONFIG_HOTPLUG */ | ||
diff --git a/lib/kref.c b/lib/kref.c new file mode 100644 index 000000000000..0d07cc31c818 --- /dev/null +++ b/lib/kref.c | |||
@@ -0,0 +1,64 @@ | |||
1 | /* | ||
2 | * kref.c - library routines for handling generic reference counted objects | ||
3 | * | ||
4 | * Copyright (C) 2004 Greg Kroah-Hartman <greg@kroah.com> | ||
5 | * Copyright (C) 2004 IBM Corp. | ||
6 | * | ||
7 | * based on lib/kobject.c which was: | ||
8 | * Copyright (C) 2002-2003 Patrick Mochel <mochel@osdl.org> | ||
9 | * | ||
10 | * This file is released under the GPLv2. | ||
11 | * | ||
12 | */ | ||
13 | |||
14 | #include <linux/kref.h> | ||
15 | #include <linux/module.h> | ||
16 | |||
17 | /** | ||
18 | * kref_init - initialize object. | ||
19 | * @kref: object in question. | ||
20 | */ | ||
21 | void kref_init(struct kref *kref) | ||
22 | { | ||
23 | atomic_set(&kref->refcount,1); | ||
24 | } | ||
25 | |||
26 | /** | ||
27 | * kref_get - increment refcount for object. | ||
28 | * @kref: object. | ||
29 | */ | ||
30 | void kref_get(struct kref *kref) | ||
31 | { | ||
32 | WARN_ON(!atomic_read(&kref->refcount)); | ||
33 | atomic_inc(&kref->refcount); | ||
34 | } | ||
35 | |||
36 | /** | ||
37 | * kref_put - decrement refcount for object. | ||
38 | * @kref: object. | ||
39 | * @release: pointer to the function that will clean up the object when the | ||
40 | * last reference to the object is released. | ||
41 | * This pointer is required, and it is not acceptable to pass kfree | ||
42 | * in as this function. | ||
43 | * | ||
44 | * Decrement the refcount, and if 0, call release(). | ||
45 | * Return 1 if the object was removed, otherwise return 0. Beware, if this | ||
46 | * function returns 0, you still can not count on the kref from remaining in | ||
47 | * memory. Only use the return value if you want to see if the kref is now | ||
48 | * gone, not present. | ||
49 | */ | ||
50 | int kref_put(struct kref *kref, void (*release)(struct kref *kref)) | ||
51 | { | ||
52 | WARN_ON(release == NULL); | ||
53 | WARN_ON(release == (void (*)(struct kref *))kfree); | ||
54 | |||
55 | if (atomic_dec_and_test(&kref->refcount)) { | ||
56 | release(kref); | ||
57 | return 1; | ||
58 | } | ||
59 | return 0; | ||
60 | } | ||
61 | |||
62 | EXPORT_SYMBOL(kref_init); | ||
63 | EXPORT_SYMBOL(kref_get); | ||
64 | EXPORT_SYMBOL(kref_put); | ||
diff --git a/lib/libcrc32c.c b/lib/libcrc32c.c new file mode 100644 index 000000000000..52b6dc144ce3 --- /dev/null +++ b/lib/libcrc32c.c | |||
@@ -0,0 +1,200 @@ | |||
1 | /* | ||
2 | * CRC32C | ||
3 | *@Article{castagnoli-crc, | ||
4 | * author = { Guy Castagnoli and Stefan Braeuer and Martin Herrman}, | ||
5 | * title = {{Optimization of Cyclic Redundancy-Check Codes with 24 | ||
6 | * and 32 Parity Bits}}, | ||
7 | * journal = IEEE Transactions on Communication, | ||
8 | * year = {1993}, | ||
9 | * volume = {41}, | ||
10 | * number = {6}, | ||
11 | * pages = {}, | ||
12 | * month = {June}, | ||
13 | *} | ||
14 | * Used by the iSCSI driver, possibly others, and derived from the | ||
15 | * the iscsi-crc.c module of the linux-iscsi driver at | ||
16 | * http://linux-iscsi.sourceforge.net. | ||
17 | * | ||
18 | * Following the example of lib/crc32, this function is intended to be | ||
19 | * flexible and useful for all users. Modules that currently have their | ||
20 | * own crc32c, but hopefully may be able to use this one are: | ||
21 | * net/sctp (please add all your doco to here if you change to | ||
22 | * use this one!) | ||
23 | * <endoflist> | ||
24 | * | ||
25 | * Copyright (c) 2004 Cisco Systems, Inc. | ||
26 | * | ||
27 | * This program is free software; you can redistribute it and/or modify it | ||
28 | * under the terms of the GNU General Public License as published by the Free | ||
29 | * Software Foundation; either version 2 of the License, or (at your option) | ||
30 | * any later version. | ||
31 | * | ||
32 | */ | ||
33 | #include <linux/crc32c.h> | ||
34 | #include <linux/compiler.h> | ||
35 | #include <linux/module.h> | ||
36 | #include <asm/byteorder.h> | ||
37 | |||
38 | MODULE_AUTHOR("Clay Haapala <chaapala@cisco.com>"); | ||
39 | MODULE_DESCRIPTION("CRC32c (Castagnoli) calculations"); | ||
40 | MODULE_LICENSE("GPL"); | ||
41 | |||
42 | #define CRC32C_POLY_BE 0x1EDC6F41 | ||
43 | #define CRC32C_POLY_LE 0x82F63B78 | ||
44 | |||
45 | #ifndef CRC_LE_BITS | ||
46 | # define CRC_LE_BITS 8 | ||
47 | #endif | ||
48 | |||
49 | |||
50 | /* | ||
51 | * Haven't generated a big-endian table yet, but the bit-wise version | ||
52 | * should at least work. | ||
53 | */ | ||
54 | #if defined CRC_BE_BITS && CRC_BE_BITS != 1 | ||
55 | #undef CRC_BE_BITS | ||
56 | #endif | ||
57 | #ifndef CRC_BE_BITS | ||
58 | # define CRC_BE_BITS 1 | ||
59 | #endif | ||
60 | |||
61 | EXPORT_SYMBOL(crc32c_le); | ||
62 | |||
63 | #if CRC_LE_BITS == 1 | ||
64 | /* | ||
65 | * Compute things bit-wise, as done in crc32.c. We could share the tight | ||
66 | * loop below with crc32 and vary the POLY if we don't find value in terms | ||
67 | * of space and maintainability in keeping the two modules separate. | ||
68 | */ | ||
69 | u32 __attribute_pure__ | ||
70 | crc32c_le(u32 crc, unsigned char const *p, size_t len) | ||
71 | { | ||
72 | int i; | ||
73 | while (len--) { | ||
74 | crc ^= *p++; | ||
75 | for (i = 0; i < 8; i++) | ||
76 | crc = (crc >> 1) ^ ((crc & 1) ? CRC32C_POLY_LE : 0); | ||
77 | } | ||
78 | return crc; | ||
79 | } | ||
80 | #else | ||
81 | |||
82 | /* | ||
83 | * This is the CRC-32C table | ||
84 | * Generated with: | ||
85 | * width = 32 bits | ||
86 | * poly = 0x1EDC6F41 | ||
87 | * reflect input bytes = true | ||
88 | * reflect output bytes = true | ||
89 | */ | ||
90 | |||
91 | static u32 crc32c_table[256] = { | ||
92 | 0x00000000L, 0xF26B8303L, 0xE13B70F7L, 0x1350F3F4L, | ||
93 | 0xC79A971FL, 0x35F1141CL, 0x26A1E7E8L, 0xD4CA64EBL, | ||
94 | 0x8AD958CFL, 0x78B2DBCCL, 0x6BE22838L, 0x9989AB3BL, | ||
95 | 0x4D43CFD0L, 0xBF284CD3L, 0xAC78BF27L, 0x5E133C24L, | ||
96 | 0x105EC76FL, 0xE235446CL, 0xF165B798L, 0x030E349BL, | ||
97 | 0xD7C45070L, 0x25AFD373L, 0x36FF2087L, 0xC494A384L, | ||
98 | 0x9A879FA0L, 0x68EC1CA3L, 0x7BBCEF57L, 0x89D76C54L, | ||
99 | 0x5D1D08BFL, 0xAF768BBCL, 0xBC267848L, 0x4E4DFB4BL, | ||
100 | 0x20BD8EDEL, 0xD2D60DDDL, 0xC186FE29L, 0x33ED7D2AL, | ||
101 | 0xE72719C1L, 0x154C9AC2L, 0x061C6936L, 0xF477EA35L, | ||
102 | 0xAA64D611L, 0x580F5512L, 0x4B5FA6E6L, 0xB93425E5L, | ||
103 | 0x6DFE410EL, 0x9F95C20DL, 0x8CC531F9L, 0x7EAEB2FAL, | ||
104 | 0x30E349B1L, 0xC288CAB2L, 0xD1D83946L, 0x23B3BA45L, | ||
105 | 0xF779DEAEL, 0x05125DADL, 0x1642AE59L, 0xE4292D5AL, | ||
106 | 0xBA3A117EL, 0x4851927DL, 0x5B016189L, 0xA96AE28AL, | ||
107 | 0x7DA08661L, 0x8FCB0562L, 0x9C9BF696L, 0x6EF07595L, | ||
108 | 0x417B1DBCL, 0xB3109EBFL, 0xA0406D4BL, 0x522BEE48L, | ||
109 | 0x86E18AA3L, 0x748A09A0L, 0x67DAFA54L, 0x95B17957L, | ||
110 | 0xCBA24573L, 0x39C9C670L, 0x2A993584L, 0xD8F2B687L, | ||
111 | 0x0C38D26CL, 0xFE53516FL, 0xED03A29BL, 0x1F682198L, | ||
112 | 0x5125DAD3L, 0xA34E59D0L, 0xB01EAA24L, 0x42752927L, | ||
113 | 0x96BF4DCCL, 0x64D4CECFL, 0x77843D3BL, 0x85EFBE38L, | ||
114 | 0xDBFC821CL, 0x2997011FL, 0x3AC7F2EBL, 0xC8AC71E8L, | ||
115 | 0x1C661503L, 0xEE0D9600L, 0xFD5D65F4L, 0x0F36E6F7L, | ||
116 | 0x61C69362L, 0x93AD1061L, 0x80FDE395L, 0x72966096L, | ||
117 | 0xA65C047DL, 0x5437877EL, 0x4767748AL, 0xB50CF789L, | ||
118 | 0xEB1FCBADL, 0x197448AEL, 0x0A24BB5AL, 0xF84F3859L, | ||
119 | 0x2C855CB2L, 0xDEEEDFB1L, 0xCDBE2C45L, 0x3FD5AF46L, | ||
120 | 0x7198540DL, 0x83F3D70EL, 0x90A324FAL, 0x62C8A7F9L, | ||
121 | 0xB602C312L, 0x44694011L, 0x5739B3E5L, 0xA55230E6L, | ||
122 | 0xFB410CC2L, 0x092A8FC1L, 0x1A7A7C35L, 0xE811FF36L, | ||
123 | 0x3CDB9BDDL, 0xCEB018DEL, 0xDDE0EB2AL, 0x2F8B6829L, | ||
124 | 0x82F63B78L, 0x709DB87BL, 0x63CD4B8FL, 0x91A6C88CL, | ||
125 | 0x456CAC67L, 0xB7072F64L, 0xA457DC90L, 0x563C5F93L, | ||
126 | 0x082F63B7L, 0xFA44E0B4L, 0xE9141340L, 0x1B7F9043L, | ||
127 | 0xCFB5F4A8L, 0x3DDE77ABL, 0x2E8E845FL, 0xDCE5075CL, | ||
128 | 0x92A8FC17L, 0x60C37F14L, 0x73938CE0L, 0x81F80FE3L, | ||
129 | 0x55326B08L, 0xA759E80BL, 0xB4091BFFL, 0x466298FCL, | ||
130 | 0x1871A4D8L, 0xEA1A27DBL, 0xF94AD42FL, 0x0B21572CL, | ||
131 | 0xDFEB33C7L, 0x2D80B0C4L, 0x3ED04330L, 0xCCBBC033L, | ||
132 | 0xA24BB5A6L, 0x502036A5L, 0x4370C551L, 0xB11B4652L, | ||
133 | 0x65D122B9L, 0x97BAA1BAL, 0x84EA524EL, 0x7681D14DL, | ||
134 | 0x2892ED69L, 0xDAF96E6AL, 0xC9A99D9EL, 0x3BC21E9DL, | ||
135 | 0xEF087A76L, 0x1D63F975L, 0x0E330A81L, 0xFC588982L, | ||
136 | 0xB21572C9L, 0x407EF1CAL, 0x532E023EL, 0xA145813DL, | ||
137 | 0x758FE5D6L, 0x87E466D5L, 0x94B49521L, 0x66DF1622L, | ||
138 | 0x38CC2A06L, 0xCAA7A905L, 0xD9F75AF1L, 0x2B9CD9F2L, | ||
139 | 0xFF56BD19L, 0x0D3D3E1AL, 0x1E6DCDEEL, 0xEC064EEDL, | ||
140 | 0xC38D26C4L, 0x31E6A5C7L, 0x22B65633L, 0xD0DDD530L, | ||
141 | 0x0417B1DBL, 0xF67C32D8L, 0xE52CC12CL, 0x1747422FL, | ||
142 | 0x49547E0BL, 0xBB3FFD08L, 0xA86F0EFCL, 0x5A048DFFL, | ||
143 | 0x8ECEE914L, 0x7CA56A17L, 0x6FF599E3L, 0x9D9E1AE0L, | ||
144 | 0xD3D3E1ABL, 0x21B862A8L, 0x32E8915CL, 0xC083125FL, | ||
145 | 0x144976B4L, 0xE622F5B7L, 0xF5720643L, 0x07198540L, | ||
146 | 0x590AB964L, 0xAB613A67L, 0xB831C993L, 0x4A5A4A90L, | ||
147 | 0x9E902E7BL, 0x6CFBAD78L, 0x7FAB5E8CL, 0x8DC0DD8FL, | ||
148 | 0xE330A81AL, 0x115B2B19L, 0x020BD8EDL, 0xF0605BEEL, | ||
149 | 0x24AA3F05L, 0xD6C1BC06L, 0xC5914FF2L, 0x37FACCF1L, | ||
150 | 0x69E9F0D5L, 0x9B8273D6L, 0x88D28022L, 0x7AB90321L, | ||
151 | 0xAE7367CAL, 0x5C18E4C9L, 0x4F48173DL, 0xBD23943EL, | ||
152 | 0xF36E6F75L, 0x0105EC76L, 0x12551F82L, 0xE03E9C81L, | ||
153 | 0x34F4F86AL, 0xC69F7B69L, 0xD5CF889DL, 0x27A40B9EL, | ||
154 | 0x79B737BAL, 0x8BDCB4B9L, 0x988C474DL, 0x6AE7C44EL, | ||
155 | 0xBE2DA0A5L, 0x4C4623A6L, 0x5F16D052L, 0xAD7D5351L | ||
156 | }; | ||
157 | |||
158 | /* | ||
159 | * Steps through buffer one byte at at time, calculates reflected | ||
160 | * crc using table. | ||
161 | */ | ||
162 | |||
163 | u32 __attribute_pure__ | ||
164 | crc32c_le(u32 seed, unsigned char const *data, size_t length) | ||
165 | { | ||
166 | u32 crc = __cpu_to_le32(seed); | ||
167 | |||
168 | while (length--) | ||
169 | crc = | ||
170 | crc32c_table[(crc ^ *data++) & 0xFFL] ^ (crc >> 8); | ||
171 | |||
172 | return __le32_to_cpu(crc); | ||
173 | } | ||
174 | |||
175 | #endif /* CRC_LE_BITS == 8 */ | ||
176 | |||
177 | EXPORT_SYMBOL(crc32c_be); | ||
178 | |||
179 | #if CRC_BE_BITS == 1 | ||
180 | u32 __attribute_pure__ | ||
181 | crc32c_be(u32 crc, unsigned char const *p, size_t len) | ||
182 | { | ||
183 | int i; | ||
184 | while (len--) { | ||
185 | crc ^= *p++ << 24; | ||
186 | for (i = 0; i < 8; i++) | ||
187 | crc = | ||
188 | (crc << 1) ^ ((crc & 0x80000000) ? CRC32C_POLY_BE : | ||
189 | 0); | ||
190 | } | ||
191 | return crc; | ||
192 | } | ||
193 | #endif | ||
194 | |||
195 | /* | ||
196 | * Unit test | ||
197 | * | ||
198 | * A small unit test suite is implemented as part of the crypto suite. | ||
199 | * Select CRYPTO_CRC32C and use the tcrypt module to run the tests. | ||
200 | */ | ||
diff --git a/lib/parser.c b/lib/parser.c new file mode 100644 index 000000000000..7ad2a48abc5e --- /dev/null +++ b/lib/parser.c | |||
@@ -0,0 +1,220 @@ | |||
1 | /* | ||
2 | * lib/parser.c - simple parser for mount, etc. options. | ||
3 | * | ||
4 | * This source code is licensed under the GNU General Public License, | ||
5 | * Version 2. See the file COPYING for more details. | ||
6 | */ | ||
7 | |||
8 | #include <linux/ctype.h> | ||
9 | #include <linux/module.h> | ||
10 | #include <linux/parser.h> | ||
11 | #include <linux/slab.h> | ||
12 | #include <linux/string.h> | ||
13 | |||
14 | /** | ||
15 | * match_one: - Determines if a string matches a simple pattern | ||
16 | * @s: the string to examine for presense of the pattern | ||
17 | * @p: the string containing the pattern | ||
18 | * @args: array of %MAX_OPT_ARGS &substring_t elements. Used to return match | ||
19 | * locations. | ||
20 | * | ||
21 | * Description: Determines if the pattern @p is present in string @s. Can only | ||
22 | * match extremely simple token=arg style patterns. If the pattern is found, | ||
23 | * the location(s) of the arguments will be returned in the @args array. | ||
24 | */ | ||
25 | static int match_one(char *s, char *p, substring_t args[]) | ||
26 | { | ||
27 | char *meta; | ||
28 | int argc = 0; | ||
29 | |||
30 | if (!p) | ||
31 | return 1; | ||
32 | |||
33 | while(1) { | ||
34 | int len = -1; | ||
35 | meta = strchr(p, '%'); | ||
36 | if (!meta) | ||
37 | return strcmp(p, s) == 0; | ||
38 | |||
39 | if (strncmp(p, s, meta-p)) | ||
40 | return 0; | ||
41 | |||
42 | s += meta - p; | ||
43 | p = meta + 1; | ||
44 | |||
45 | if (isdigit(*p)) | ||
46 | len = simple_strtoul(p, &p, 10); | ||
47 | else if (*p == '%') { | ||
48 | if (*s++ != '%') | ||
49 | return 0; | ||
50 | p++; | ||
51 | continue; | ||
52 | } | ||
53 | |||
54 | if (argc >= MAX_OPT_ARGS) | ||
55 | return 0; | ||
56 | |||
57 | args[argc].from = s; | ||
58 | switch (*p++) { | ||
59 | case 's': | ||
60 | if (strlen(s) == 0) | ||
61 | return 0; | ||
62 | else if (len == -1 || len > strlen(s)) | ||
63 | len = strlen(s); | ||
64 | args[argc].to = s + len; | ||
65 | break; | ||
66 | case 'd': | ||
67 | simple_strtol(s, &args[argc].to, 0); | ||
68 | goto num; | ||
69 | case 'u': | ||
70 | simple_strtoul(s, &args[argc].to, 0); | ||
71 | goto num; | ||
72 | case 'o': | ||
73 | simple_strtoul(s, &args[argc].to, 8); | ||
74 | goto num; | ||
75 | case 'x': | ||
76 | simple_strtoul(s, &args[argc].to, 16); | ||
77 | num: | ||
78 | if (args[argc].to == args[argc].from) | ||
79 | return 0; | ||
80 | break; | ||
81 | default: | ||
82 | return 0; | ||
83 | } | ||
84 | s = args[argc].to; | ||
85 | argc++; | ||
86 | } | ||
87 | } | ||
88 | |||
89 | /** | ||
90 | * match_token: - Find a token (and optional args) in a string | ||
91 | * @s: the string to examine for token/argument pairs | ||
92 | * @table: match_table_t describing the set of allowed option tokens and the | ||
93 | * arguments that may be associated with them. Must be terminated with a | ||
94 | * &struct match_token whose pattern is set to the NULL pointer. | ||
95 | * @args: array of %MAX_OPT_ARGS &substring_t elements. Used to return match | ||
96 | * locations. | ||
97 | * | ||
98 | * Description: Detects which if any of a set of token strings has been passed | ||
99 | * to it. Tokens can include up to MAX_OPT_ARGS instances of basic c-style | ||
100 | * format identifiers which will be taken into account when matching the | ||
101 | * tokens, and whose locations will be returned in the @args array. | ||
102 | */ | ||
103 | int match_token(char *s, match_table_t table, substring_t args[]) | ||
104 | { | ||
105 | struct match_token *p; | ||
106 | |||
107 | for (p = table; !match_one(s, p->pattern, args) ; p++) | ||
108 | ; | ||
109 | |||
110 | return p->token; | ||
111 | } | ||
112 | |||
113 | /** | ||
114 | * match_number: scan a number in the given base from a substring_t | ||
115 | * @s: substring to be scanned | ||
116 | * @result: resulting integer on success | ||
117 | * @base: base to use when converting string | ||
118 | * | ||
119 | * Description: Given a &substring_t and a base, attempts to parse the substring | ||
120 | * as a number in that base. On success, sets @result to the integer represented | ||
121 | * by the string and returns 0. Returns either -ENOMEM or -EINVAL on failure. | ||
122 | */ | ||
123 | static int match_number(substring_t *s, int *result, int base) | ||
124 | { | ||
125 | char *endp; | ||
126 | char *buf; | ||
127 | int ret; | ||
128 | |||
129 | buf = kmalloc(s->to - s->from + 1, GFP_KERNEL); | ||
130 | if (!buf) | ||
131 | return -ENOMEM; | ||
132 | memcpy(buf, s->from, s->to - s->from); | ||
133 | buf[s->to - s->from] = '\0'; | ||
134 | *result = simple_strtol(buf, &endp, base); | ||
135 | ret = 0; | ||
136 | if (endp == buf) | ||
137 | ret = -EINVAL; | ||
138 | kfree(buf); | ||
139 | return ret; | ||
140 | } | ||
141 | |||
142 | /** | ||
143 | * match_int: - scan a decimal representation of an integer from a substring_t | ||
144 | * @s: substring_t to be scanned | ||
145 | * @result: resulting integer on success | ||
146 | * | ||
147 | * Description: Attempts to parse the &substring_t @s as a decimal integer. On | ||
148 | * success, sets @result to the integer represented by the string and returns 0. | ||
149 | * Returns either -ENOMEM or -EINVAL on failure. | ||
150 | */ | ||
151 | int match_int(substring_t *s, int *result) | ||
152 | { | ||
153 | return match_number(s, result, 0); | ||
154 | } | ||
155 | |||
156 | /** | ||
157 | * match_octal: - scan an octal representation of an integer from a substring_t | ||
158 | * @s: substring_t to be scanned | ||
159 | * @result: resulting integer on success | ||
160 | * | ||
161 | * Description: Attempts to parse the &substring_t @s as an octal integer. On | ||
162 | * success, sets @result to the integer represented by the string and returns | ||
163 | * 0. Returns either -ENOMEM or -EINVAL on failure. | ||
164 | */ | ||
165 | int match_octal(substring_t *s, int *result) | ||
166 | { | ||
167 | return match_number(s, result, 8); | ||
168 | } | ||
169 | |||
170 | /** | ||
171 | * match_hex: - scan a hex representation of an integer from a substring_t | ||
172 | * @s: substring_t to be scanned | ||
173 | * @result: resulting integer on success | ||
174 | * | ||
175 | * Description: Attempts to parse the &substring_t @s as a hexadecimal integer. | ||
176 | * On success, sets @result to the integer represented by the string and | ||
177 | * returns 0. Returns either -ENOMEM or -EINVAL on failure. | ||
178 | */ | ||
179 | int match_hex(substring_t *s, int *result) | ||
180 | { | ||
181 | return match_number(s, result, 16); | ||
182 | } | ||
183 | |||
184 | /** | ||
185 | * match_strcpy: - copies the characters from a substring_t to a string | ||
186 | * @to: string to copy characters to. | ||
187 | * @s: &substring_t to copy | ||
188 | * | ||
189 | * Description: Copies the set of characters represented by the given | ||
190 | * &substring_t @s to the c-style string @to. Caller guarantees that @to is | ||
191 | * large enough to hold the characters of @s. | ||
192 | */ | ||
193 | void match_strcpy(char *to, substring_t *s) | ||
194 | { | ||
195 | memcpy(to, s->from, s->to - s->from); | ||
196 | to[s->to - s->from] = '\0'; | ||
197 | } | ||
198 | |||
199 | /** | ||
200 | * match_strdup: - allocate a new string with the contents of a substring_t | ||
201 | * @s: &substring_t to copy | ||
202 | * | ||
203 | * Description: Allocates and returns a string filled with the contents of | ||
204 | * the &substring_t @s. The caller is responsible for freeing the returned | ||
205 | * string with kfree(). | ||
206 | */ | ||
207 | char *match_strdup(substring_t *s) | ||
208 | { | ||
209 | char *p = kmalloc(s->to - s->from + 1, GFP_KERNEL); | ||
210 | if (p) | ||
211 | match_strcpy(p, s); | ||
212 | return p; | ||
213 | } | ||
214 | |||
215 | EXPORT_SYMBOL(match_token); | ||
216 | EXPORT_SYMBOL(match_int); | ||
217 | EXPORT_SYMBOL(match_octal); | ||
218 | EXPORT_SYMBOL(match_hex); | ||
219 | EXPORT_SYMBOL(match_strcpy); | ||
220 | EXPORT_SYMBOL(match_strdup); | ||
diff --git a/lib/prio_tree.c b/lib/prio_tree.c new file mode 100644 index 000000000000..ccfd850b0dec --- /dev/null +++ b/lib/prio_tree.c | |||
@@ -0,0 +1,484 @@ | |||
1 | /* | ||
2 | * lib/prio_tree.c - priority search tree | ||
3 | * | ||
4 | * Copyright (C) 2004, Rajesh Venkatasubramanian <vrajesh@umich.edu> | ||
5 | * | ||
6 | * This file is released under the GPL v2. | ||
7 | * | ||
8 | * Based on the radix priority search tree proposed by Edward M. McCreight | ||
9 | * SIAM Journal of Computing, vol. 14, no.2, pages 257-276, May 1985 | ||
10 | * | ||
11 | * 02Feb2004 Initial version | ||
12 | */ | ||
13 | |||
14 | #include <linux/init.h> | ||
15 | #include <linux/mm.h> | ||
16 | #include <linux/prio_tree.h> | ||
17 | |||
18 | /* | ||
19 | * A clever mix of heap and radix trees forms a radix priority search tree (PST) | ||
20 | * which is useful for storing intervals, e.g, we can consider a vma as a closed | ||
21 | * interval of file pages [offset_begin, offset_end], and store all vmas that | ||
22 | * map a file in a PST. Then, using the PST, we can answer a stabbing query, | ||
23 | * i.e., selecting a set of stored intervals (vmas) that overlap with (map) a | ||
24 | * given input interval X (a set of consecutive file pages), in "O(log n + m)" | ||
25 | * time where 'log n' is the height of the PST, and 'm' is the number of stored | ||
26 | * intervals (vmas) that overlap (map) with the input interval X (the set of | ||
27 | * consecutive file pages). | ||
28 | * | ||
29 | * In our implementation, we store closed intervals of the form [radix_index, | ||
30 | * heap_index]. We assume that always radix_index <= heap_index. McCreight's PST | ||
31 | * is designed for storing intervals with unique radix indices, i.e., each | ||
32 | * interval have different radix_index. However, this limitation can be easily | ||
33 | * overcome by using the size, i.e., heap_index - radix_index, as part of the | ||
34 | * index, so we index the tree using [(radix_index,size), heap_index]. | ||
35 | * | ||
36 | * When the above-mentioned indexing scheme is used, theoretically, in a 32 bit | ||
37 | * machine, the maximum height of a PST can be 64. We can use a balanced version | ||
38 | * of the priority search tree to optimize the tree height, but the balanced | ||
39 | * tree proposed by McCreight is too complex and memory-hungry for our purpose. | ||
40 | */ | ||
41 | |||
42 | /* | ||
43 | * The following macros are used for implementing prio_tree for i_mmap | ||
44 | */ | ||
45 | |||
46 | #define RADIX_INDEX(vma) ((vma)->vm_pgoff) | ||
47 | #define VMA_SIZE(vma) (((vma)->vm_end - (vma)->vm_start) >> PAGE_SHIFT) | ||
48 | /* avoid overflow */ | ||
49 | #define HEAP_INDEX(vma) ((vma)->vm_pgoff + (VMA_SIZE(vma) - 1)) | ||
50 | |||
51 | |||
52 | static void get_index(const struct prio_tree_root *root, | ||
53 | const struct prio_tree_node *node, | ||
54 | unsigned long *radix, unsigned long *heap) | ||
55 | { | ||
56 | if (root->raw) { | ||
57 | struct vm_area_struct *vma = prio_tree_entry( | ||
58 | node, struct vm_area_struct, shared.prio_tree_node); | ||
59 | |||
60 | *radix = RADIX_INDEX(vma); | ||
61 | *heap = HEAP_INDEX(vma); | ||
62 | } | ||
63 | else { | ||
64 | *radix = node->start; | ||
65 | *heap = node->last; | ||
66 | } | ||
67 | } | ||
68 | |||
69 | static unsigned long index_bits_to_maxindex[BITS_PER_LONG]; | ||
70 | |||
71 | void __init prio_tree_init(void) | ||
72 | { | ||
73 | unsigned int i; | ||
74 | |||
75 | for (i = 0; i < ARRAY_SIZE(index_bits_to_maxindex) - 1; i++) | ||
76 | index_bits_to_maxindex[i] = (1UL << (i + 1)) - 1; | ||
77 | index_bits_to_maxindex[ARRAY_SIZE(index_bits_to_maxindex) - 1] = ~0UL; | ||
78 | } | ||
79 | |||
80 | /* | ||
81 | * Maximum heap_index that can be stored in a PST with index_bits bits | ||
82 | */ | ||
83 | static inline unsigned long prio_tree_maxindex(unsigned int bits) | ||
84 | { | ||
85 | return index_bits_to_maxindex[bits - 1]; | ||
86 | } | ||
87 | |||
88 | /* | ||
89 | * Extend a priority search tree so that it can store a node with heap_index | ||
90 | * max_heap_index. In the worst case, this algorithm takes O((log n)^2). | ||
91 | * However, this function is used rarely and the common case performance is | ||
92 | * not bad. | ||
93 | */ | ||
94 | static struct prio_tree_node *prio_tree_expand(struct prio_tree_root *root, | ||
95 | struct prio_tree_node *node, unsigned long max_heap_index) | ||
96 | { | ||
97 | struct prio_tree_node *first = NULL, *prev, *last = NULL; | ||
98 | |||
99 | if (max_heap_index > prio_tree_maxindex(root->index_bits)) | ||
100 | root->index_bits++; | ||
101 | |||
102 | while (max_heap_index > prio_tree_maxindex(root->index_bits)) { | ||
103 | root->index_bits++; | ||
104 | |||
105 | if (prio_tree_empty(root)) | ||
106 | continue; | ||
107 | |||
108 | if (first == NULL) { | ||
109 | first = root->prio_tree_node; | ||
110 | prio_tree_remove(root, root->prio_tree_node); | ||
111 | INIT_PRIO_TREE_NODE(first); | ||
112 | last = first; | ||
113 | } else { | ||
114 | prev = last; | ||
115 | last = root->prio_tree_node; | ||
116 | prio_tree_remove(root, root->prio_tree_node); | ||
117 | INIT_PRIO_TREE_NODE(last); | ||
118 | prev->left = last; | ||
119 | last->parent = prev; | ||
120 | } | ||
121 | } | ||
122 | |||
123 | INIT_PRIO_TREE_NODE(node); | ||
124 | |||
125 | if (first) { | ||
126 | node->left = first; | ||
127 | first->parent = node; | ||
128 | } else | ||
129 | last = node; | ||
130 | |||
131 | if (!prio_tree_empty(root)) { | ||
132 | last->left = root->prio_tree_node; | ||
133 | last->left->parent = last; | ||
134 | } | ||
135 | |||
136 | root->prio_tree_node = node; | ||
137 | return node; | ||
138 | } | ||
139 | |||
140 | /* | ||
141 | * Replace a prio_tree_node with a new node and return the old node | ||
142 | */ | ||
143 | struct prio_tree_node *prio_tree_replace(struct prio_tree_root *root, | ||
144 | struct prio_tree_node *old, struct prio_tree_node *node) | ||
145 | { | ||
146 | INIT_PRIO_TREE_NODE(node); | ||
147 | |||
148 | if (prio_tree_root(old)) { | ||
149 | BUG_ON(root->prio_tree_node != old); | ||
150 | /* | ||
151 | * We can reduce root->index_bits here. However, it is complex | ||
152 | * and does not help much to improve performance (IMO). | ||
153 | */ | ||
154 | node->parent = node; | ||
155 | root->prio_tree_node = node; | ||
156 | } else { | ||
157 | node->parent = old->parent; | ||
158 | if (old->parent->left == old) | ||
159 | old->parent->left = node; | ||
160 | else | ||
161 | old->parent->right = node; | ||
162 | } | ||
163 | |||
164 | if (!prio_tree_left_empty(old)) { | ||
165 | node->left = old->left; | ||
166 | old->left->parent = node; | ||
167 | } | ||
168 | |||
169 | if (!prio_tree_right_empty(old)) { | ||
170 | node->right = old->right; | ||
171 | old->right->parent = node; | ||
172 | } | ||
173 | |||
174 | return old; | ||
175 | } | ||
176 | |||
177 | /* | ||
178 | * Insert a prio_tree_node @node into a radix priority search tree @root. The | ||
179 | * algorithm typically takes O(log n) time where 'log n' is the number of bits | ||
180 | * required to represent the maximum heap_index. In the worst case, the algo | ||
181 | * can take O((log n)^2) - check prio_tree_expand. | ||
182 | * | ||
183 | * If a prior node with same radix_index and heap_index is already found in | ||
184 | * the tree, then returns the address of the prior node. Otherwise, inserts | ||
185 | * @node into the tree and returns @node. | ||
186 | */ | ||
187 | struct prio_tree_node *prio_tree_insert(struct prio_tree_root *root, | ||
188 | struct prio_tree_node *node) | ||
189 | { | ||
190 | struct prio_tree_node *cur, *res = node; | ||
191 | unsigned long radix_index, heap_index; | ||
192 | unsigned long r_index, h_index, index, mask; | ||
193 | int size_flag = 0; | ||
194 | |||
195 | get_index(root, node, &radix_index, &heap_index); | ||
196 | |||
197 | if (prio_tree_empty(root) || | ||
198 | heap_index > prio_tree_maxindex(root->index_bits)) | ||
199 | return prio_tree_expand(root, node, heap_index); | ||
200 | |||
201 | cur = root->prio_tree_node; | ||
202 | mask = 1UL << (root->index_bits - 1); | ||
203 | |||
204 | while (mask) { | ||
205 | get_index(root, cur, &r_index, &h_index); | ||
206 | |||
207 | if (r_index == radix_index && h_index == heap_index) | ||
208 | return cur; | ||
209 | |||
210 | if (h_index < heap_index || | ||
211 | (h_index == heap_index && r_index > radix_index)) { | ||
212 | struct prio_tree_node *tmp = node; | ||
213 | node = prio_tree_replace(root, cur, node); | ||
214 | cur = tmp; | ||
215 | /* swap indices */ | ||
216 | index = r_index; | ||
217 | r_index = radix_index; | ||
218 | radix_index = index; | ||
219 | index = h_index; | ||
220 | h_index = heap_index; | ||
221 | heap_index = index; | ||
222 | } | ||
223 | |||
224 | if (size_flag) | ||
225 | index = heap_index - radix_index; | ||
226 | else | ||
227 | index = radix_index; | ||
228 | |||
229 | if (index & mask) { | ||
230 | if (prio_tree_right_empty(cur)) { | ||
231 | INIT_PRIO_TREE_NODE(node); | ||
232 | cur->right = node; | ||
233 | node->parent = cur; | ||
234 | return res; | ||
235 | } else | ||
236 | cur = cur->right; | ||
237 | } else { | ||
238 | if (prio_tree_left_empty(cur)) { | ||
239 | INIT_PRIO_TREE_NODE(node); | ||
240 | cur->left = node; | ||
241 | node->parent = cur; | ||
242 | return res; | ||
243 | } else | ||
244 | cur = cur->left; | ||
245 | } | ||
246 | |||
247 | mask >>= 1; | ||
248 | |||
249 | if (!mask) { | ||
250 | mask = 1UL << (BITS_PER_LONG - 1); | ||
251 | size_flag = 1; | ||
252 | } | ||
253 | } | ||
254 | /* Should not reach here */ | ||
255 | BUG(); | ||
256 | return NULL; | ||
257 | } | ||
258 | |||
259 | /* | ||
260 | * Remove a prio_tree_node @node from a radix priority search tree @root. The | ||
261 | * algorithm takes O(log n) time where 'log n' is the number of bits required | ||
262 | * to represent the maximum heap_index. | ||
263 | */ | ||
264 | void prio_tree_remove(struct prio_tree_root *root, struct prio_tree_node *node) | ||
265 | { | ||
266 | struct prio_tree_node *cur; | ||
267 | unsigned long r_index, h_index_right, h_index_left; | ||
268 | |||
269 | cur = node; | ||
270 | |||
271 | while (!prio_tree_left_empty(cur) || !prio_tree_right_empty(cur)) { | ||
272 | if (!prio_tree_left_empty(cur)) | ||
273 | get_index(root, cur->left, &r_index, &h_index_left); | ||
274 | else { | ||
275 | cur = cur->right; | ||
276 | continue; | ||
277 | } | ||
278 | |||
279 | if (!prio_tree_right_empty(cur)) | ||
280 | get_index(root, cur->right, &r_index, &h_index_right); | ||
281 | else { | ||
282 | cur = cur->left; | ||
283 | continue; | ||
284 | } | ||
285 | |||
286 | /* both h_index_left and h_index_right cannot be 0 */ | ||
287 | if (h_index_left >= h_index_right) | ||
288 | cur = cur->left; | ||
289 | else | ||
290 | cur = cur->right; | ||
291 | } | ||
292 | |||
293 | if (prio_tree_root(cur)) { | ||
294 | BUG_ON(root->prio_tree_node != cur); | ||
295 | __INIT_PRIO_TREE_ROOT(root, root->raw); | ||
296 | return; | ||
297 | } | ||
298 | |||
299 | if (cur->parent->right == cur) | ||
300 | cur->parent->right = cur->parent; | ||
301 | else | ||
302 | cur->parent->left = cur->parent; | ||
303 | |||
304 | while (cur != node) | ||
305 | cur = prio_tree_replace(root, cur->parent, cur); | ||
306 | } | ||
307 | |||
308 | /* | ||
309 | * Following functions help to enumerate all prio_tree_nodes in the tree that | ||
310 | * overlap with the input interval X [radix_index, heap_index]. The enumeration | ||
311 | * takes O(log n + m) time where 'log n' is the height of the tree (which is | ||
312 | * proportional to # of bits required to represent the maximum heap_index) and | ||
313 | * 'm' is the number of prio_tree_nodes that overlap the interval X. | ||
314 | */ | ||
315 | |||
316 | static struct prio_tree_node *prio_tree_left(struct prio_tree_iter *iter, | ||
317 | unsigned long *r_index, unsigned long *h_index) | ||
318 | { | ||
319 | if (prio_tree_left_empty(iter->cur)) | ||
320 | return NULL; | ||
321 | |||
322 | get_index(iter->root, iter->cur->left, r_index, h_index); | ||
323 | |||
324 | if (iter->r_index <= *h_index) { | ||
325 | iter->cur = iter->cur->left; | ||
326 | iter->mask >>= 1; | ||
327 | if (iter->mask) { | ||
328 | if (iter->size_level) | ||
329 | iter->size_level++; | ||
330 | } else { | ||
331 | if (iter->size_level) { | ||
332 | BUG_ON(!prio_tree_left_empty(iter->cur)); | ||
333 | BUG_ON(!prio_tree_right_empty(iter->cur)); | ||
334 | iter->size_level++; | ||
335 | iter->mask = ULONG_MAX; | ||
336 | } else { | ||
337 | iter->size_level = 1; | ||
338 | iter->mask = 1UL << (BITS_PER_LONG - 1); | ||
339 | } | ||
340 | } | ||
341 | return iter->cur; | ||
342 | } | ||
343 | |||
344 | return NULL; | ||
345 | } | ||
346 | |||
347 | static struct prio_tree_node *prio_tree_right(struct prio_tree_iter *iter, | ||
348 | unsigned long *r_index, unsigned long *h_index) | ||
349 | { | ||
350 | unsigned long value; | ||
351 | |||
352 | if (prio_tree_right_empty(iter->cur)) | ||
353 | return NULL; | ||
354 | |||
355 | if (iter->size_level) | ||
356 | value = iter->value; | ||
357 | else | ||
358 | value = iter->value | iter->mask; | ||
359 | |||
360 | if (iter->h_index < value) | ||
361 | return NULL; | ||
362 | |||
363 | get_index(iter->root, iter->cur->right, r_index, h_index); | ||
364 | |||
365 | if (iter->r_index <= *h_index) { | ||
366 | iter->cur = iter->cur->right; | ||
367 | iter->mask >>= 1; | ||
368 | iter->value = value; | ||
369 | if (iter->mask) { | ||
370 | if (iter->size_level) | ||
371 | iter->size_level++; | ||
372 | } else { | ||
373 | if (iter->size_level) { | ||
374 | BUG_ON(!prio_tree_left_empty(iter->cur)); | ||
375 | BUG_ON(!prio_tree_right_empty(iter->cur)); | ||
376 | iter->size_level++; | ||
377 | iter->mask = ULONG_MAX; | ||
378 | } else { | ||
379 | iter->size_level = 1; | ||
380 | iter->mask = 1UL << (BITS_PER_LONG - 1); | ||
381 | } | ||
382 | } | ||
383 | return iter->cur; | ||
384 | } | ||
385 | |||
386 | return NULL; | ||
387 | } | ||
388 | |||
389 | static struct prio_tree_node *prio_tree_parent(struct prio_tree_iter *iter) | ||
390 | { | ||
391 | iter->cur = iter->cur->parent; | ||
392 | if (iter->mask == ULONG_MAX) | ||
393 | iter->mask = 1UL; | ||
394 | else if (iter->size_level == 1) | ||
395 | iter->mask = 1UL; | ||
396 | else | ||
397 | iter->mask <<= 1; | ||
398 | if (iter->size_level) | ||
399 | iter->size_level--; | ||
400 | if (!iter->size_level && (iter->value & iter->mask)) | ||
401 | iter->value ^= iter->mask; | ||
402 | return iter->cur; | ||
403 | } | ||
404 | |||
405 | static inline int overlap(struct prio_tree_iter *iter, | ||
406 | unsigned long r_index, unsigned long h_index) | ||
407 | { | ||
408 | return iter->h_index >= r_index && iter->r_index <= h_index; | ||
409 | } | ||
410 | |||
411 | /* | ||
412 | * prio_tree_first: | ||
413 | * | ||
414 | * Get the first prio_tree_node that overlaps with the interval [radix_index, | ||
415 | * heap_index]. Note that always radix_index <= heap_index. We do a pre-order | ||
416 | * traversal of the tree. | ||
417 | */ | ||
418 | static struct prio_tree_node *prio_tree_first(struct prio_tree_iter *iter) | ||
419 | { | ||
420 | struct prio_tree_root *root; | ||
421 | unsigned long r_index, h_index; | ||
422 | |||
423 | INIT_PRIO_TREE_ITER(iter); | ||
424 | |||
425 | root = iter->root; | ||
426 | if (prio_tree_empty(root)) | ||
427 | return NULL; | ||
428 | |||
429 | get_index(root, root->prio_tree_node, &r_index, &h_index); | ||
430 | |||
431 | if (iter->r_index > h_index) | ||
432 | return NULL; | ||
433 | |||
434 | iter->mask = 1UL << (root->index_bits - 1); | ||
435 | iter->cur = root->prio_tree_node; | ||
436 | |||
437 | while (1) { | ||
438 | if (overlap(iter, r_index, h_index)) | ||
439 | return iter->cur; | ||
440 | |||
441 | if (prio_tree_left(iter, &r_index, &h_index)) | ||
442 | continue; | ||
443 | |||
444 | if (prio_tree_right(iter, &r_index, &h_index)) | ||
445 | continue; | ||
446 | |||
447 | break; | ||
448 | } | ||
449 | return NULL; | ||
450 | } | ||
451 | |||
452 | /* | ||
453 | * prio_tree_next: | ||
454 | * | ||
455 | * Get the next prio_tree_node that overlaps with the input interval in iter | ||
456 | */ | ||
457 | struct prio_tree_node *prio_tree_next(struct prio_tree_iter *iter) | ||
458 | { | ||
459 | unsigned long r_index, h_index; | ||
460 | |||
461 | if (iter->cur == NULL) | ||
462 | return prio_tree_first(iter); | ||
463 | |||
464 | repeat: | ||
465 | while (prio_tree_left(iter, &r_index, &h_index)) | ||
466 | if (overlap(iter, r_index, h_index)) | ||
467 | return iter->cur; | ||
468 | |||
469 | while (!prio_tree_right(iter, &r_index, &h_index)) { | ||
470 | while (!prio_tree_root(iter->cur) && | ||
471 | iter->cur->parent->right == iter->cur) | ||
472 | prio_tree_parent(iter); | ||
473 | |||
474 | if (prio_tree_root(iter->cur)) | ||
475 | return NULL; | ||
476 | |||
477 | prio_tree_parent(iter); | ||
478 | } | ||
479 | |||
480 | if (overlap(iter, r_index, h_index)) | ||
481 | return iter->cur; | ||
482 | |||
483 | goto repeat; | ||
484 | } | ||
diff --git a/lib/radix-tree.c b/lib/radix-tree.c new file mode 100644 index 000000000000..04d664377f2c --- /dev/null +++ b/lib/radix-tree.c | |||
@@ -0,0 +1,807 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2001 Momchil Velikov | ||
3 | * Portions Copyright (C) 2001 Christoph Hellwig | ||
4 | * | ||
5 | * This program is free software; you can redistribute it and/or | ||
6 | * modify it under the terms of the GNU General Public License as | ||
7 | * published by the Free Software Foundation; either version 2, or (at | ||
8 | * your option) any later version. | ||
9 | * | ||
10 | * This program is distributed in the hope that it will be useful, but | ||
11 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
12 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | ||
13 | * General Public License for more details. | ||
14 | * | ||
15 | * You should have received a copy of the GNU General Public License | ||
16 | * along with this program; if not, write to the Free Software | ||
17 | * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. | ||
18 | */ | ||
19 | |||
20 | #include <linux/errno.h> | ||
21 | #include <linux/init.h> | ||
22 | #include <linux/kernel.h> | ||
23 | #include <linux/module.h> | ||
24 | #include <linux/radix-tree.h> | ||
25 | #include <linux/percpu.h> | ||
26 | #include <linux/slab.h> | ||
27 | #include <linux/notifier.h> | ||
28 | #include <linux/cpu.h> | ||
29 | #include <linux/gfp.h> | ||
30 | #include <linux/string.h> | ||
31 | #include <linux/bitops.h> | ||
32 | |||
33 | |||
34 | #ifdef __KERNEL__ | ||
35 | #define RADIX_TREE_MAP_SHIFT 6 | ||
36 | #else | ||
37 | #define RADIX_TREE_MAP_SHIFT 3 /* For more stressful testing */ | ||
38 | #endif | ||
39 | #define RADIX_TREE_TAGS 2 | ||
40 | |||
41 | #define RADIX_TREE_MAP_SIZE (1UL << RADIX_TREE_MAP_SHIFT) | ||
42 | #define RADIX_TREE_MAP_MASK (RADIX_TREE_MAP_SIZE-1) | ||
43 | |||
44 | #define RADIX_TREE_TAG_LONGS \ | ||
45 | ((RADIX_TREE_MAP_SIZE + BITS_PER_LONG - 1) / BITS_PER_LONG) | ||
46 | |||
47 | struct radix_tree_node { | ||
48 | unsigned int count; | ||
49 | void *slots[RADIX_TREE_MAP_SIZE]; | ||
50 | unsigned long tags[RADIX_TREE_TAGS][RADIX_TREE_TAG_LONGS]; | ||
51 | }; | ||
52 | |||
53 | struct radix_tree_path { | ||
54 | struct radix_tree_node *node, **slot; | ||
55 | int offset; | ||
56 | }; | ||
57 | |||
58 | #define RADIX_TREE_INDEX_BITS (8 /* CHAR_BIT */ * sizeof(unsigned long)) | ||
59 | #define RADIX_TREE_MAX_PATH (RADIX_TREE_INDEX_BITS/RADIX_TREE_MAP_SHIFT + 2) | ||
60 | |||
61 | static unsigned long height_to_maxindex[RADIX_TREE_MAX_PATH]; | ||
62 | |||
63 | /* | ||
64 | * Radix tree node cache. | ||
65 | */ | ||
66 | static kmem_cache_t *radix_tree_node_cachep; | ||
67 | |||
68 | /* | ||
69 | * Per-cpu pool of preloaded nodes | ||
70 | */ | ||
71 | struct radix_tree_preload { | ||
72 | int nr; | ||
73 | struct radix_tree_node *nodes[RADIX_TREE_MAX_PATH]; | ||
74 | }; | ||
75 | DEFINE_PER_CPU(struct radix_tree_preload, radix_tree_preloads) = { 0, }; | ||
76 | |||
77 | /* | ||
78 | * This assumes that the caller has performed appropriate preallocation, and | ||
79 | * that the caller has pinned this thread of control to the current CPU. | ||
80 | */ | ||
81 | static struct radix_tree_node * | ||
82 | radix_tree_node_alloc(struct radix_tree_root *root) | ||
83 | { | ||
84 | struct radix_tree_node *ret; | ||
85 | |||
86 | ret = kmem_cache_alloc(radix_tree_node_cachep, root->gfp_mask); | ||
87 | if (ret == NULL && !(root->gfp_mask & __GFP_WAIT)) { | ||
88 | struct radix_tree_preload *rtp; | ||
89 | |||
90 | rtp = &__get_cpu_var(radix_tree_preloads); | ||
91 | if (rtp->nr) { | ||
92 | ret = rtp->nodes[rtp->nr - 1]; | ||
93 | rtp->nodes[rtp->nr - 1] = NULL; | ||
94 | rtp->nr--; | ||
95 | } | ||
96 | } | ||
97 | return ret; | ||
98 | } | ||
99 | |||
100 | static inline void | ||
101 | radix_tree_node_free(struct radix_tree_node *node) | ||
102 | { | ||
103 | kmem_cache_free(radix_tree_node_cachep, node); | ||
104 | } | ||
105 | |||
106 | /* | ||
107 | * Load up this CPU's radix_tree_node buffer with sufficient objects to | ||
108 | * ensure that the addition of a single element in the tree cannot fail. On | ||
109 | * success, return zero, with preemption disabled. On error, return -ENOMEM | ||
110 | * with preemption not disabled. | ||
111 | */ | ||
112 | int radix_tree_preload(int gfp_mask) | ||
113 | { | ||
114 | struct radix_tree_preload *rtp; | ||
115 | struct radix_tree_node *node; | ||
116 | int ret = -ENOMEM; | ||
117 | |||
118 | preempt_disable(); | ||
119 | rtp = &__get_cpu_var(radix_tree_preloads); | ||
120 | while (rtp->nr < ARRAY_SIZE(rtp->nodes)) { | ||
121 | preempt_enable(); | ||
122 | node = kmem_cache_alloc(radix_tree_node_cachep, gfp_mask); | ||
123 | if (node == NULL) | ||
124 | goto out; | ||
125 | preempt_disable(); | ||
126 | rtp = &__get_cpu_var(radix_tree_preloads); | ||
127 | if (rtp->nr < ARRAY_SIZE(rtp->nodes)) | ||
128 | rtp->nodes[rtp->nr++] = node; | ||
129 | else | ||
130 | kmem_cache_free(radix_tree_node_cachep, node); | ||
131 | } | ||
132 | ret = 0; | ||
133 | out: | ||
134 | return ret; | ||
135 | } | ||
136 | |||
137 | static inline void tag_set(struct radix_tree_node *node, int tag, int offset) | ||
138 | { | ||
139 | if (!test_bit(offset, &node->tags[tag][0])) | ||
140 | __set_bit(offset, &node->tags[tag][0]); | ||
141 | } | ||
142 | |||
143 | static inline void tag_clear(struct radix_tree_node *node, int tag, int offset) | ||
144 | { | ||
145 | __clear_bit(offset, &node->tags[tag][0]); | ||
146 | } | ||
147 | |||
148 | static inline int tag_get(struct radix_tree_node *node, int tag, int offset) | ||
149 | { | ||
150 | return test_bit(offset, &node->tags[tag][0]); | ||
151 | } | ||
152 | |||
153 | /* | ||
154 | * Return the maximum key which can be store into a | ||
155 | * radix tree with height HEIGHT. | ||
156 | */ | ||
157 | static inline unsigned long radix_tree_maxindex(unsigned int height) | ||
158 | { | ||
159 | return height_to_maxindex[height]; | ||
160 | } | ||
161 | |||
162 | /* | ||
163 | * Extend a radix tree so it can store key @index. | ||
164 | */ | ||
165 | static int radix_tree_extend(struct radix_tree_root *root, unsigned long index) | ||
166 | { | ||
167 | struct radix_tree_node *node; | ||
168 | unsigned int height; | ||
169 | char tags[RADIX_TREE_TAGS]; | ||
170 | int tag; | ||
171 | |||
172 | /* Figure out what the height should be. */ | ||
173 | height = root->height + 1; | ||
174 | while (index > radix_tree_maxindex(height)) | ||
175 | height++; | ||
176 | |||
177 | if (root->rnode == NULL) { | ||
178 | root->height = height; | ||
179 | goto out; | ||
180 | } | ||
181 | |||
182 | /* | ||
183 | * Prepare the tag status of the top-level node for propagation | ||
184 | * into the newly-pushed top-level node(s) | ||
185 | */ | ||
186 | for (tag = 0; tag < RADIX_TREE_TAGS; tag++) { | ||
187 | int idx; | ||
188 | |||
189 | tags[tag] = 0; | ||
190 | for (idx = 0; idx < RADIX_TREE_TAG_LONGS; idx++) { | ||
191 | if (root->rnode->tags[tag][idx]) { | ||
192 | tags[tag] = 1; | ||
193 | break; | ||
194 | } | ||
195 | } | ||
196 | } | ||
197 | |||
198 | do { | ||
199 | if (!(node = radix_tree_node_alloc(root))) | ||
200 | return -ENOMEM; | ||
201 | |||
202 | /* Increase the height. */ | ||
203 | node->slots[0] = root->rnode; | ||
204 | |||
205 | /* Propagate the aggregated tag info into the new root */ | ||
206 | for (tag = 0; tag < RADIX_TREE_TAGS; tag++) { | ||
207 | if (tags[tag]) | ||
208 | tag_set(node, tag, 0); | ||
209 | } | ||
210 | |||
211 | node->count = 1; | ||
212 | root->rnode = node; | ||
213 | root->height++; | ||
214 | } while (height > root->height); | ||
215 | out: | ||
216 | return 0; | ||
217 | } | ||
218 | |||
219 | /** | ||
220 | * radix_tree_insert - insert into a radix tree | ||
221 | * @root: radix tree root | ||
222 | * @index: index key | ||
223 | * @item: item to insert | ||
224 | * | ||
225 | * Insert an item into the radix tree at position @index. | ||
226 | */ | ||
227 | int radix_tree_insert(struct radix_tree_root *root, | ||
228 | unsigned long index, void *item) | ||
229 | { | ||
230 | struct radix_tree_node *node = NULL, *tmp, **slot; | ||
231 | unsigned int height, shift; | ||
232 | int offset; | ||
233 | int error; | ||
234 | |||
235 | /* Make sure the tree is high enough. */ | ||
236 | if ((!index && !root->rnode) || | ||
237 | index > radix_tree_maxindex(root->height)) { | ||
238 | error = radix_tree_extend(root, index); | ||
239 | if (error) | ||
240 | return error; | ||
241 | } | ||
242 | |||
243 | slot = &root->rnode; | ||
244 | height = root->height; | ||
245 | shift = (height-1) * RADIX_TREE_MAP_SHIFT; | ||
246 | |||
247 | offset = 0; /* uninitialised var warning */ | ||
248 | while (height > 0) { | ||
249 | if (*slot == NULL) { | ||
250 | /* Have to add a child node. */ | ||
251 | if (!(tmp = radix_tree_node_alloc(root))) | ||
252 | return -ENOMEM; | ||
253 | *slot = tmp; | ||
254 | if (node) | ||
255 | node->count++; | ||
256 | } | ||
257 | |||
258 | /* Go a level down */ | ||
259 | offset = (index >> shift) & RADIX_TREE_MAP_MASK; | ||
260 | node = *slot; | ||
261 | slot = (struct radix_tree_node **)(node->slots + offset); | ||
262 | shift -= RADIX_TREE_MAP_SHIFT; | ||
263 | height--; | ||
264 | } | ||
265 | |||
266 | if (*slot != NULL) | ||
267 | return -EEXIST; | ||
268 | if (node) { | ||
269 | node->count++; | ||
270 | BUG_ON(tag_get(node, 0, offset)); | ||
271 | BUG_ON(tag_get(node, 1, offset)); | ||
272 | } | ||
273 | |||
274 | *slot = item; | ||
275 | return 0; | ||
276 | } | ||
277 | EXPORT_SYMBOL(radix_tree_insert); | ||
278 | |||
279 | /** | ||
280 | * radix_tree_lookup - perform lookup operation on a radix tree | ||
281 | * @root: radix tree root | ||
282 | * @index: index key | ||
283 | * | ||
284 | * Lookup the item at the position @index in the radix tree @root. | ||
285 | */ | ||
286 | void *radix_tree_lookup(struct radix_tree_root *root, unsigned long index) | ||
287 | { | ||
288 | unsigned int height, shift; | ||
289 | struct radix_tree_node **slot; | ||
290 | |||
291 | height = root->height; | ||
292 | if (index > radix_tree_maxindex(height)) | ||
293 | return NULL; | ||
294 | |||
295 | shift = (height-1) * RADIX_TREE_MAP_SHIFT; | ||
296 | slot = &root->rnode; | ||
297 | |||
298 | while (height > 0) { | ||
299 | if (*slot == NULL) | ||
300 | return NULL; | ||
301 | |||
302 | slot = (struct radix_tree_node **) | ||
303 | ((*slot)->slots + | ||
304 | ((index >> shift) & RADIX_TREE_MAP_MASK)); | ||
305 | shift -= RADIX_TREE_MAP_SHIFT; | ||
306 | height--; | ||
307 | } | ||
308 | |||
309 | return *slot; | ||
310 | } | ||
311 | EXPORT_SYMBOL(radix_tree_lookup); | ||
312 | |||
313 | /** | ||
314 | * radix_tree_tag_set - set a tag on a radix tree node | ||
315 | * @root: radix tree root | ||
316 | * @index: index key | ||
317 | * @tag: tag index | ||
318 | * | ||
319 | * Set the search tag corresponging to @index in the radix tree. From | ||
320 | * the root all the way down to the leaf node. | ||
321 | * | ||
322 | * Returns the address of the tagged item. Setting a tag on a not-present | ||
323 | * item is a bug. | ||
324 | */ | ||
325 | void *radix_tree_tag_set(struct radix_tree_root *root, | ||
326 | unsigned long index, int tag) | ||
327 | { | ||
328 | unsigned int height, shift; | ||
329 | struct radix_tree_node **slot; | ||
330 | |||
331 | height = root->height; | ||
332 | if (index > radix_tree_maxindex(height)) | ||
333 | return NULL; | ||
334 | |||
335 | shift = (height - 1) * RADIX_TREE_MAP_SHIFT; | ||
336 | slot = &root->rnode; | ||
337 | |||
338 | while (height > 0) { | ||
339 | int offset; | ||
340 | |||
341 | offset = (index >> shift) & RADIX_TREE_MAP_MASK; | ||
342 | tag_set(*slot, tag, offset); | ||
343 | slot = (struct radix_tree_node **)((*slot)->slots + offset); | ||
344 | BUG_ON(*slot == NULL); | ||
345 | shift -= RADIX_TREE_MAP_SHIFT; | ||
346 | height--; | ||
347 | } | ||
348 | |||
349 | return *slot; | ||
350 | } | ||
351 | EXPORT_SYMBOL(radix_tree_tag_set); | ||
352 | |||
353 | /** | ||
354 | * radix_tree_tag_clear - clear a tag on a radix tree node | ||
355 | * @root: radix tree root | ||
356 | * @index: index key | ||
357 | * @tag: tag index | ||
358 | * | ||
359 | * Clear the search tag corresponging to @index in the radix tree. If | ||
360 | * this causes the leaf node to have no tags set then clear the tag in the | ||
361 | * next-to-leaf node, etc. | ||
362 | * | ||
363 | * Returns the address of the tagged item on success, else NULL. ie: | ||
364 | * has the same return value and semantics as radix_tree_lookup(). | ||
365 | */ | ||
366 | void *radix_tree_tag_clear(struct radix_tree_root *root, | ||
367 | unsigned long index, int tag) | ||
368 | { | ||
369 | struct radix_tree_path path[RADIX_TREE_MAX_PATH], *pathp = path; | ||
370 | unsigned int height, shift; | ||
371 | void *ret = NULL; | ||
372 | |||
373 | height = root->height; | ||
374 | if (index > radix_tree_maxindex(height)) | ||
375 | goto out; | ||
376 | |||
377 | shift = (height - 1) * RADIX_TREE_MAP_SHIFT; | ||
378 | pathp->node = NULL; | ||
379 | pathp->slot = &root->rnode; | ||
380 | |||
381 | while (height > 0) { | ||
382 | int offset; | ||
383 | |||
384 | if (*pathp->slot == NULL) | ||
385 | goto out; | ||
386 | |||
387 | offset = (index >> shift) & RADIX_TREE_MAP_MASK; | ||
388 | pathp[1].offset = offset; | ||
389 | pathp[1].node = *pathp[0].slot; | ||
390 | pathp[1].slot = (struct radix_tree_node **) | ||
391 | (pathp[1].node->slots + offset); | ||
392 | pathp++; | ||
393 | shift -= RADIX_TREE_MAP_SHIFT; | ||
394 | height--; | ||
395 | } | ||
396 | |||
397 | ret = *pathp[0].slot; | ||
398 | if (ret == NULL) | ||
399 | goto out; | ||
400 | |||
401 | do { | ||
402 | int idx; | ||
403 | |||
404 | tag_clear(pathp[0].node, tag, pathp[0].offset); | ||
405 | for (idx = 0; idx < RADIX_TREE_TAG_LONGS; idx++) { | ||
406 | if (pathp[0].node->tags[tag][idx]) | ||
407 | goto out; | ||
408 | } | ||
409 | pathp--; | ||
410 | } while (pathp[0].node); | ||
411 | out: | ||
412 | return ret; | ||
413 | } | ||
414 | EXPORT_SYMBOL(radix_tree_tag_clear); | ||
415 | |||
416 | #ifndef __KERNEL__ /* Only the test harness uses this at present */ | ||
417 | /** | ||
418 | * radix_tree_tag_get - get a tag on a radix tree node | ||
419 | * @root: radix tree root | ||
420 | * @index: index key | ||
421 | * @tag: tag index | ||
422 | * | ||
423 | * Return the search tag corresponging to @index in the radix tree. | ||
424 | * | ||
425 | * Returns zero if the tag is unset, or if there is no corresponding item | ||
426 | * in the tree. | ||
427 | */ | ||
428 | int radix_tree_tag_get(struct radix_tree_root *root, | ||
429 | unsigned long index, int tag) | ||
430 | { | ||
431 | unsigned int height, shift; | ||
432 | struct radix_tree_node **slot; | ||
433 | int saw_unset_tag = 0; | ||
434 | |||
435 | height = root->height; | ||
436 | if (index > radix_tree_maxindex(height)) | ||
437 | return 0; | ||
438 | |||
439 | shift = (height - 1) * RADIX_TREE_MAP_SHIFT; | ||
440 | slot = &root->rnode; | ||
441 | |||
442 | for ( ; ; ) { | ||
443 | int offset; | ||
444 | |||
445 | if (*slot == NULL) | ||
446 | return 0; | ||
447 | |||
448 | offset = (index >> shift) & RADIX_TREE_MAP_MASK; | ||
449 | |||
450 | /* | ||
451 | * This is just a debug check. Later, we can bale as soon as | ||
452 | * we see an unset tag. | ||
453 | */ | ||
454 | if (!tag_get(*slot, tag, offset)) | ||
455 | saw_unset_tag = 1; | ||
456 | if (height == 1) { | ||
457 | int ret = tag_get(*slot, tag, offset); | ||
458 | |||
459 | BUG_ON(ret && saw_unset_tag); | ||
460 | return ret; | ||
461 | } | ||
462 | slot = (struct radix_tree_node **)((*slot)->slots + offset); | ||
463 | shift -= RADIX_TREE_MAP_SHIFT; | ||
464 | height--; | ||
465 | } | ||
466 | } | ||
467 | EXPORT_SYMBOL(radix_tree_tag_get); | ||
468 | #endif | ||
469 | |||
470 | static unsigned int | ||
471 | __lookup(struct radix_tree_root *root, void **results, unsigned long index, | ||
472 | unsigned int max_items, unsigned long *next_index) | ||
473 | { | ||
474 | unsigned int nr_found = 0; | ||
475 | unsigned int shift; | ||
476 | unsigned int height = root->height; | ||
477 | struct radix_tree_node *slot; | ||
478 | |||
479 | shift = (height-1) * RADIX_TREE_MAP_SHIFT; | ||
480 | slot = root->rnode; | ||
481 | |||
482 | while (height > 0) { | ||
483 | unsigned long i = (index >> shift) & RADIX_TREE_MAP_MASK; | ||
484 | |||
485 | for ( ; i < RADIX_TREE_MAP_SIZE; i++) { | ||
486 | if (slot->slots[i] != NULL) | ||
487 | break; | ||
488 | index &= ~((1UL << shift) - 1); | ||
489 | index += 1UL << shift; | ||
490 | if (index == 0) | ||
491 | goto out; /* 32-bit wraparound */ | ||
492 | } | ||
493 | if (i == RADIX_TREE_MAP_SIZE) | ||
494 | goto out; | ||
495 | height--; | ||
496 | if (height == 0) { /* Bottom level: grab some items */ | ||
497 | unsigned long j = index & RADIX_TREE_MAP_MASK; | ||
498 | |||
499 | for ( ; j < RADIX_TREE_MAP_SIZE; j++) { | ||
500 | index++; | ||
501 | if (slot->slots[j]) { | ||
502 | results[nr_found++] = slot->slots[j]; | ||
503 | if (nr_found == max_items) | ||
504 | goto out; | ||
505 | } | ||
506 | } | ||
507 | } | ||
508 | shift -= RADIX_TREE_MAP_SHIFT; | ||
509 | slot = slot->slots[i]; | ||
510 | } | ||
511 | out: | ||
512 | *next_index = index; | ||
513 | return nr_found; | ||
514 | } | ||
515 | |||
516 | /** | ||
517 | * radix_tree_gang_lookup - perform multiple lookup on a radix tree | ||
518 | * @root: radix tree root | ||
519 | * @results: where the results of the lookup are placed | ||
520 | * @first_index: start the lookup from this key | ||
521 | * @max_items: place up to this many items at *results | ||
522 | * | ||
523 | * Performs an index-ascending scan of the tree for present items. Places | ||
524 | * them at *@results and returns the number of items which were placed at | ||
525 | * *@results. | ||
526 | * | ||
527 | * The implementation is naive. | ||
528 | */ | ||
529 | unsigned int | ||
530 | radix_tree_gang_lookup(struct radix_tree_root *root, void **results, | ||
531 | unsigned long first_index, unsigned int max_items) | ||
532 | { | ||
533 | const unsigned long max_index = radix_tree_maxindex(root->height); | ||
534 | unsigned long cur_index = first_index; | ||
535 | unsigned int ret = 0; | ||
536 | |||
537 | while (ret < max_items) { | ||
538 | unsigned int nr_found; | ||
539 | unsigned long next_index; /* Index of next search */ | ||
540 | |||
541 | if (cur_index > max_index) | ||
542 | break; | ||
543 | nr_found = __lookup(root, results + ret, cur_index, | ||
544 | max_items - ret, &next_index); | ||
545 | ret += nr_found; | ||
546 | if (next_index == 0) | ||
547 | break; | ||
548 | cur_index = next_index; | ||
549 | } | ||
550 | return ret; | ||
551 | } | ||
552 | EXPORT_SYMBOL(radix_tree_gang_lookup); | ||
553 | |||
554 | /* | ||
555 | * FIXME: the two tag_get()s here should use find_next_bit() instead of | ||
556 | * open-coding the search. | ||
557 | */ | ||
558 | static unsigned int | ||
559 | __lookup_tag(struct radix_tree_root *root, void **results, unsigned long index, | ||
560 | unsigned int max_items, unsigned long *next_index, int tag) | ||
561 | { | ||
562 | unsigned int nr_found = 0; | ||
563 | unsigned int shift; | ||
564 | unsigned int height = root->height; | ||
565 | struct radix_tree_node *slot; | ||
566 | |||
567 | shift = (height - 1) * RADIX_TREE_MAP_SHIFT; | ||
568 | slot = root->rnode; | ||
569 | |||
570 | while (height > 0) { | ||
571 | unsigned long i = (index >> shift) & RADIX_TREE_MAP_MASK; | ||
572 | |||
573 | for ( ; i < RADIX_TREE_MAP_SIZE; i++) { | ||
574 | if (tag_get(slot, tag, i)) { | ||
575 | BUG_ON(slot->slots[i] == NULL); | ||
576 | break; | ||
577 | } | ||
578 | index &= ~((1UL << shift) - 1); | ||
579 | index += 1UL << shift; | ||
580 | if (index == 0) | ||
581 | goto out; /* 32-bit wraparound */ | ||
582 | } | ||
583 | if (i == RADIX_TREE_MAP_SIZE) | ||
584 | goto out; | ||
585 | height--; | ||
586 | if (height == 0) { /* Bottom level: grab some items */ | ||
587 | unsigned long j = index & RADIX_TREE_MAP_MASK; | ||
588 | |||
589 | for ( ; j < RADIX_TREE_MAP_SIZE; j++) { | ||
590 | index++; | ||
591 | if (tag_get(slot, tag, j)) { | ||
592 | BUG_ON(slot->slots[j] == NULL); | ||
593 | results[nr_found++] = slot->slots[j]; | ||
594 | if (nr_found == max_items) | ||
595 | goto out; | ||
596 | } | ||
597 | } | ||
598 | } | ||
599 | shift -= RADIX_TREE_MAP_SHIFT; | ||
600 | slot = slot->slots[i]; | ||
601 | } | ||
602 | out: | ||
603 | *next_index = index; | ||
604 | return nr_found; | ||
605 | } | ||
606 | |||
607 | /** | ||
608 | * radix_tree_gang_lookup_tag - perform multiple lookup on a radix tree | ||
609 | * based on a tag | ||
610 | * @root: radix tree root | ||
611 | * @results: where the results of the lookup are placed | ||
612 | * @first_index: start the lookup from this key | ||
613 | * @max_items: place up to this many items at *results | ||
614 | * @tag: the tag index | ||
615 | * | ||
616 | * Performs an index-ascending scan of the tree for present items which | ||
617 | * have the tag indexed by @tag set. Places the items at *@results and | ||
618 | * returns the number of items which were placed at *@results. | ||
619 | */ | ||
620 | unsigned int | ||
621 | radix_tree_gang_lookup_tag(struct radix_tree_root *root, void **results, | ||
622 | unsigned long first_index, unsigned int max_items, int tag) | ||
623 | { | ||
624 | const unsigned long max_index = radix_tree_maxindex(root->height); | ||
625 | unsigned long cur_index = first_index; | ||
626 | unsigned int ret = 0; | ||
627 | |||
628 | while (ret < max_items) { | ||
629 | unsigned int nr_found; | ||
630 | unsigned long next_index; /* Index of next search */ | ||
631 | |||
632 | if (cur_index > max_index) | ||
633 | break; | ||
634 | nr_found = __lookup_tag(root, results + ret, cur_index, | ||
635 | max_items - ret, &next_index, tag); | ||
636 | ret += nr_found; | ||
637 | if (next_index == 0) | ||
638 | break; | ||
639 | cur_index = next_index; | ||
640 | } | ||
641 | return ret; | ||
642 | } | ||
643 | EXPORT_SYMBOL(radix_tree_gang_lookup_tag); | ||
644 | |||
645 | /** | ||
646 | * radix_tree_delete - delete an item from a radix tree | ||
647 | * @root: radix tree root | ||
648 | * @index: index key | ||
649 | * | ||
650 | * Remove the item at @index from the radix tree rooted at @root. | ||
651 | * | ||
652 | * Returns the address of the deleted item, or NULL if it was not present. | ||
653 | */ | ||
654 | void *radix_tree_delete(struct radix_tree_root *root, unsigned long index) | ||
655 | { | ||
656 | struct radix_tree_path path[RADIX_TREE_MAX_PATH], *pathp = path; | ||
657 | struct radix_tree_path *orig_pathp; | ||
658 | unsigned int height, shift; | ||
659 | void *ret = NULL; | ||
660 | char tags[RADIX_TREE_TAGS]; | ||
661 | int nr_cleared_tags; | ||
662 | |||
663 | height = root->height; | ||
664 | if (index > radix_tree_maxindex(height)) | ||
665 | goto out; | ||
666 | |||
667 | shift = (height - 1) * RADIX_TREE_MAP_SHIFT; | ||
668 | pathp->node = NULL; | ||
669 | pathp->slot = &root->rnode; | ||
670 | |||
671 | while (height > 0) { | ||
672 | int offset; | ||
673 | |||
674 | if (*pathp->slot == NULL) | ||
675 | goto out; | ||
676 | |||
677 | offset = (index >> shift) & RADIX_TREE_MAP_MASK; | ||
678 | pathp[1].offset = offset; | ||
679 | pathp[1].node = *pathp[0].slot; | ||
680 | pathp[1].slot = (struct radix_tree_node **) | ||
681 | (pathp[1].node->slots + offset); | ||
682 | pathp++; | ||
683 | shift -= RADIX_TREE_MAP_SHIFT; | ||
684 | height--; | ||
685 | } | ||
686 | |||
687 | ret = *pathp[0].slot; | ||
688 | if (ret == NULL) | ||
689 | goto out; | ||
690 | |||
691 | orig_pathp = pathp; | ||
692 | |||
693 | /* | ||
694 | * Clear all tags associated with the just-deleted item | ||
695 | */ | ||
696 | memset(tags, 0, sizeof(tags)); | ||
697 | do { | ||
698 | int tag; | ||
699 | |||
700 | nr_cleared_tags = RADIX_TREE_TAGS; | ||
701 | for (tag = 0; tag < RADIX_TREE_TAGS; tag++) { | ||
702 | int idx; | ||
703 | |||
704 | if (tags[tag]) | ||
705 | continue; | ||
706 | |||
707 | tag_clear(pathp[0].node, tag, pathp[0].offset); | ||
708 | |||
709 | for (idx = 0; idx < RADIX_TREE_TAG_LONGS; idx++) { | ||
710 | if (pathp[0].node->tags[tag][idx]) { | ||
711 | tags[tag] = 1; | ||
712 | nr_cleared_tags--; | ||
713 | break; | ||
714 | } | ||
715 | } | ||
716 | } | ||
717 | pathp--; | ||
718 | } while (pathp[0].node && nr_cleared_tags); | ||
719 | |||
720 | pathp = orig_pathp; | ||
721 | *pathp[0].slot = NULL; | ||
722 | while (pathp[0].node && --pathp[0].node->count == 0) { | ||
723 | pathp--; | ||
724 | BUG_ON(*pathp[0].slot == NULL); | ||
725 | *pathp[0].slot = NULL; | ||
726 | radix_tree_node_free(pathp[1].node); | ||
727 | } | ||
728 | if (root->rnode == NULL) | ||
729 | root->height = 0; | ||
730 | out: | ||
731 | return ret; | ||
732 | } | ||
733 | EXPORT_SYMBOL(radix_tree_delete); | ||
734 | |||
735 | /** | ||
736 | * radix_tree_tagged - test whether any items in the tree are tagged | ||
737 | * @root: radix tree root | ||
738 | * @tag: tag to test | ||
739 | */ | ||
740 | int radix_tree_tagged(struct radix_tree_root *root, int tag) | ||
741 | { | ||
742 | int idx; | ||
743 | |||
744 | if (!root->rnode) | ||
745 | return 0; | ||
746 | for (idx = 0; idx < RADIX_TREE_TAG_LONGS; idx++) { | ||
747 | if (root->rnode->tags[tag][idx]) | ||
748 | return 1; | ||
749 | } | ||
750 | return 0; | ||
751 | } | ||
752 | EXPORT_SYMBOL(radix_tree_tagged); | ||
753 | |||
754 | static void | ||
755 | radix_tree_node_ctor(void *node, kmem_cache_t *cachep, unsigned long flags) | ||
756 | { | ||
757 | memset(node, 0, sizeof(struct radix_tree_node)); | ||
758 | } | ||
759 | |||
760 | static __init unsigned long __maxindex(unsigned int height) | ||
761 | { | ||
762 | unsigned int tmp = height * RADIX_TREE_MAP_SHIFT; | ||
763 | unsigned long index = (~0UL >> (RADIX_TREE_INDEX_BITS - tmp - 1)) >> 1; | ||
764 | |||
765 | if (tmp >= RADIX_TREE_INDEX_BITS) | ||
766 | index = ~0UL; | ||
767 | return index; | ||
768 | } | ||
769 | |||
770 | static __init void radix_tree_init_maxindex(void) | ||
771 | { | ||
772 | unsigned int i; | ||
773 | |||
774 | for (i = 0; i < ARRAY_SIZE(height_to_maxindex); i++) | ||
775 | height_to_maxindex[i] = __maxindex(i); | ||
776 | } | ||
777 | |||
778 | #ifdef CONFIG_HOTPLUG_CPU | ||
779 | static int radix_tree_callback(struct notifier_block *nfb, | ||
780 | unsigned long action, | ||
781 | void *hcpu) | ||
782 | { | ||
783 | int cpu = (long)hcpu; | ||
784 | struct radix_tree_preload *rtp; | ||
785 | |||
786 | /* Free per-cpu pool of perloaded nodes */ | ||
787 | if (action == CPU_DEAD) { | ||
788 | rtp = &per_cpu(radix_tree_preloads, cpu); | ||
789 | while (rtp->nr) { | ||
790 | kmem_cache_free(radix_tree_node_cachep, | ||
791 | rtp->nodes[rtp->nr-1]); | ||
792 | rtp->nodes[rtp->nr-1] = NULL; | ||
793 | rtp->nr--; | ||
794 | } | ||
795 | } | ||
796 | return NOTIFY_OK; | ||
797 | } | ||
798 | #endif /* CONFIG_HOTPLUG_CPU */ | ||
799 | |||
800 | void __init radix_tree_init(void) | ||
801 | { | ||
802 | radix_tree_node_cachep = kmem_cache_create("radix_tree_node", | ||
803 | sizeof(struct radix_tree_node), 0, | ||
804 | SLAB_PANIC, radix_tree_node_ctor, NULL); | ||
805 | radix_tree_init_maxindex(); | ||
806 | hotcpu_notifier(radix_tree_callback, 0); | ||
807 | } | ||
diff --git a/lib/rbtree.c b/lib/rbtree.c new file mode 100644 index 000000000000..14b791ac5089 --- /dev/null +++ b/lib/rbtree.c | |||
@@ -0,0 +1,394 @@ | |||
1 | /* | ||
2 | Red Black Trees | ||
3 | (C) 1999 Andrea Arcangeli <andrea@suse.de> | ||
4 | (C) 2002 David Woodhouse <dwmw2@infradead.org> | ||
5 | |||
6 | This program is free software; you can redistribute it and/or modify | ||
7 | it under the terms of the GNU General Public License as published by | ||
8 | the Free Software Foundation; either version 2 of the License, or | ||
9 | (at your option) any later version. | ||
10 | |||
11 | This program is distributed in the hope that it will be useful, | ||
12 | but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
13 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
14 | GNU General Public License for more details. | ||
15 | |||
16 | You should have received a copy of the GNU General Public License | ||
17 | along with this program; if not, write to the Free Software | ||
18 | Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA | ||
19 | |||
20 | linux/lib/rbtree.c | ||
21 | */ | ||
22 | |||
23 | #include <linux/rbtree.h> | ||
24 | #include <linux/module.h> | ||
25 | |||
26 | static void __rb_rotate_left(struct rb_node *node, struct rb_root *root) | ||
27 | { | ||
28 | struct rb_node *right = node->rb_right; | ||
29 | |||
30 | if ((node->rb_right = right->rb_left)) | ||
31 | right->rb_left->rb_parent = node; | ||
32 | right->rb_left = node; | ||
33 | |||
34 | if ((right->rb_parent = node->rb_parent)) | ||
35 | { | ||
36 | if (node == node->rb_parent->rb_left) | ||
37 | node->rb_parent->rb_left = right; | ||
38 | else | ||
39 | node->rb_parent->rb_right = right; | ||
40 | } | ||
41 | else | ||
42 | root->rb_node = right; | ||
43 | node->rb_parent = right; | ||
44 | } | ||
45 | |||
46 | static void __rb_rotate_right(struct rb_node *node, struct rb_root *root) | ||
47 | { | ||
48 | struct rb_node *left = node->rb_left; | ||
49 | |||
50 | if ((node->rb_left = left->rb_right)) | ||
51 | left->rb_right->rb_parent = node; | ||
52 | left->rb_right = node; | ||
53 | |||
54 | if ((left->rb_parent = node->rb_parent)) | ||
55 | { | ||
56 | if (node == node->rb_parent->rb_right) | ||
57 | node->rb_parent->rb_right = left; | ||
58 | else | ||
59 | node->rb_parent->rb_left = left; | ||
60 | } | ||
61 | else | ||
62 | root->rb_node = left; | ||
63 | node->rb_parent = left; | ||
64 | } | ||
65 | |||
66 | void rb_insert_color(struct rb_node *node, struct rb_root *root) | ||
67 | { | ||
68 | struct rb_node *parent, *gparent; | ||
69 | |||
70 | while ((parent = node->rb_parent) && parent->rb_color == RB_RED) | ||
71 | { | ||
72 | gparent = parent->rb_parent; | ||
73 | |||
74 | if (parent == gparent->rb_left) | ||
75 | { | ||
76 | { | ||
77 | register struct rb_node *uncle = gparent->rb_right; | ||
78 | if (uncle && uncle->rb_color == RB_RED) | ||
79 | { | ||
80 | uncle->rb_color = RB_BLACK; | ||
81 | parent->rb_color = RB_BLACK; | ||
82 | gparent->rb_color = RB_RED; | ||
83 | node = gparent; | ||
84 | continue; | ||
85 | } | ||
86 | } | ||
87 | |||
88 | if (parent->rb_right == node) | ||
89 | { | ||
90 | register struct rb_node *tmp; | ||
91 | __rb_rotate_left(parent, root); | ||
92 | tmp = parent; | ||
93 | parent = node; | ||
94 | node = tmp; | ||
95 | } | ||
96 | |||
97 | parent->rb_color = RB_BLACK; | ||
98 | gparent->rb_color = RB_RED; | ||
99 | __rb_rotate_right(gparent, root); | ||
100 | } else { | ||
101 | { | ||
102 | register struct rb_node *uncle = gparent->rb_left; | ||
103 | if (uncle && uncle->rb_color == RB_RED) | ||
104 | { | ||
105 | uncle->rb_color = RB_BLACK; | ||
106 | parent->rb_color = RB_BLACK; | ||
107 | gparent->rb_color = RB_RED; | ||
108 | node = gparent; | ||
109 | continue; | ||
110 | } | ||
111 | } | ||
112 | |||
113 | if (parent->rb_left == node) | ||
114 | { | ||
115 | register struct rb_node *tmp; | ||
116 | __rb_rotate_right(parent, root); | ||
117 | tmp = parent; | ||
118 | parent = node; | ||
119 | node = tmp; | ||
120 | } | ||
121 | |||
122 | parent->rb_color = RB_BLACK; | ||
123 | gparent->rb_color = RB_RED; | ||
124 | __rb_rotate_left(gparent, root); | ||
125 | } | ||
126 | } | ||
127 | |||
128 | root->rb_node->rb_color = RB_BLACK; | ||
129 | } | ||
130 | EXPORT_SYMBOL(rb_insert_color); | ||
131 | |||
132 | static void __rb_erase_color(struct rb_node *node, struct rb_node *parent, | ||
133 | struct rb_root *root) | ||
134 | { | ||
135 | struct rb_node *other; | ||
136 | |||
137 | while ((!node || node->rb_color == RB_BLACK) && node != root->rb_node) | ||
138 | { | ||
139 | if (parent->rb_left == node) | ||
140 | { | ||
141 | other = parent->rb_right; | ||
142 | if (other->rb_color == RB_RED) | ||
143 | { | ||
144 | other->rb_color = RB_BLACK; | ||
145 | parent->rb_color = RB_RED; | ||
146 | __rb_rotate_left(parent, root); | ||
147 | other = parent->rb_right; | ||
148 | } | ||
149 | if ((!other->rb_left || | ||
150 | other->rb_left->rb_color == RB_BLACK) | ||
151 | && (!other->rb_right || | ||
152 | other->rb_right->rb_color == RB_BLACK)) | ||
153 | { | ||
154 | other->rb_color = RB_RED; | ||
155 | node = parent; | ||
156 | parent = node->rb_parent; | ||
157 | } | ||
158 | else | ||
159 | { | ||
160 | if (!other->rb_right || | ||
161 | other->rb_right->rb_color == RB_BLACK) | ||
162 | { | ||
163 | register struct rb_node *o_left; | ||
164 | if ((o_left = other->rb_left)) | ||
165 | o_left->rb_color = RB_BLACK; | ||
166 | other->rb_color = RB_RED; | ||
167 | __rb_rotate_right(other, root); | ||
168 | other = parent->rb_right; | ||
169 | } | ||
170 | other->rb_color = parent->rb_color; | ||
171 | parent->rb_color = RB_BLACK; | ||
172 | if (other->rb_right) | ||
173 | other->rb_right->rb_color = RB_BLACK; | ||
174 | __rb_rotate_left(parent, root); | ||
175 | node = root->rb_node; | ||
176 | break; | ||
177 | } | ||
178 | } | ||
179 | else | ||
180 | { | ||
181 | other = parent->rb_left; | ||
182 | if (other->rb_color == RB_RED) | ||
183 | { | ||
184 | other->rb_color = RB_BLACK; | ||
185 | parent->rb_color = RB_RED; | ||
186 | __rb_rotate_right(parent, root); | ||
187 | other = parent->rb_left; | ||
188 | } | ||
189 | if ((!other->rb_left || | ||
190 | other->rb_left->rb_color == RB_BLACK) | ||
191 | && (!other->rb_right || | ||
192 | other->rb_right->rb_color == RB_BLACK)) | ||
193 | { | ||
194 | other->rb_color = RB_RED; | ||
195 | node = parent; | ||
196 | parent = node->rb_parent; | ||
197 | } | ||
198 | else | ||
199 | { | ||
200 | if (!other->rb_left || | ||
201 | other->rb_left->rb_color == RB_BLACK) | ||
202 | { | ||
203 | register struct rb_node *o_right; | ||
204 | if ((o_right = other->rb_right)) | ||
205 | o_right->rb_color = RB_BLACK; | ||
206 | other->rb_color = RB_RED; | ||
207 | __rb_rotate_left(other, root); | ||
208 | other = parent->rb_left; | ||
209 | } | ||
210 | other->rb_color = parent->rb_color; | ||
211 | parent->rb_color = RB_BLACK; | ||
212 | if (other->rb_left) | ||
213 | other->rb_left->rb_color = RB_BLACK; | ||
214 | __rb_rotate_right(parent, root); | ||
215 | node = root->rb_node; | ||
216 | break; | ||
217 | } | ||
218 | } | ||
219 | } | ||
220 | if (node) | ||
221 | node->rb_color = RB_BLACK; | ||
222 | } | ||
223 | |||
224 | void rb_erase(struct rb_node *node, struct rb_root *root) | ||
225 | { | ||
226 | struct rb_node *child, *parent; | ||
227 | int color; | ||
228 | |||
229 | if (!node->rb_left) | ||
230 | child = node->rb_right; | ||
231 | else if (!node->rb_right) | ||
232 | child = node->rb_left; | ||
233 | else | ||
234 | { | ||
235 | struct rb_node *old = node, *left; | ||
236 | |||
237 | node = node->rb_right; | ||
238 | while ((left = node->rb_left) != NULL) | ||
239 | node = left; | ||
240 | child = node->rb_right; | ||
241 | parent = node->rb_parent; | ||
242 | color = node->rb_color; | ||
243 | |||
244 | if (child) | ||
245 | child->rb_parent = parent; | ||
246 | if (parent) | ||
247 | { | ||
248 | if (parent->rb_left == node) | ||
249 | parent->rb_left = child; | ||
250 | else | ||
251 | parent->rb_right = child; | ||
252 | } | ||
253 | else | ||
254 | root->rb_node = child; | ||
255 | |||
256 | if (node->rb_parent == old) | ||
257 | parent = node; | ||
258 | node->rb_parent = old->rb_parent; | ||
259 | node->rb_color = old->rb_color; | ||
260 | node->rb_right = old->rb_right; | ||
261 | node->rb_left = old->rb_left; | ||
262 | |||
263 | if (old->rb_parent) | ||
264 | { | ||
265 | if (old->rb_parent->rb_left == old) | ||
266 | old->rb_parent->rb_left = node; | ||
267 | else | ||
268 | old->rb_parent->rb_right = node; | ||
269 | } else | ||
270 | root->rb_node = node; | ||
271 | |||
272 | old->rb_left->rb_parent = node; | ||
273 | if (old->rb_right) | ||
274 | old->rb_right->rb_parent = node; | ||
275 | goto color; | ||
276 | } | ||
277 | |||
278 | parent = node->rb_parent; | ||
279 | color = node->rb_color; | ||
280 | |||
281 | if (child) | ||
282 | child->rb_parent = parent; | ||
283 | if (parent) | ||
284 | { | ||
285 | if (parent->rb_left == node) | ||
286 | parent->rb_left = child; | ||
287 | else | ||
288 | parent->rb_right = child; | ||
289 | } | ||
290 | else | ||
291 | root->rb_node = child; | ||
292 | |||
293 | color: | ||
294 | if (color == RB_BLACK) | ||
295 | __rb_erase_color(child, parent, root); | ||
296 | } | ||
297 | EXPORT_SYMBOL(rb_erase); | ||
298 | |||
299 | /* | ||
300 | * This function returns the first node (in sort order) of the tree. | ||
301 | */ | ||
302 | struct rb_node *rb_first(struct rb_root *root) | ||
303 | { | ||
304 | struct rb_node *n; | ||
305 | |||
306 | n = root->rb_node; | ||
307 | if (!n) | ||
308 | return NULL; | ||
309 | while (n->rb_left) | ||
310 | n = n->rb_left; | ||
311 | return n; | ||
312 | } | ||
313 | EXPORT_SYMBOL(rb_first); | ||
314 | |||
315 | struct rb_node *rb_last(struct rb_root *root) | ||
316 | { | ||
317 | struct rb_node *n; | ||
318 | |||
319 | n = root->rb_node; | ||
320 | if (!n) | ||
321 | return NULL; | ||
322 | while (n->rb_right) | ||
323 | n = n->rb_right; | ||
324 | return n; | ||
325 | } | ||
326 | EXPORT_SYMBOL(rb_last); | ||
327 | |||
328 | struct rb_node *rb_next(struct rb_node *node) | ||
329 | { | ||
330 | /* If we have a right-hand child, go down and then left as far | ||
331 | as we can. */ | ||
332 | if (node->rb_right) { | ||
333 | node = node->rb_right; | ||
334 | while (node->rb_left) | ||
335 | node=node->rb_left; | ||
336 | return node; | ||
337 | } | ||
338 | |||
339 | /* No right-hand children. Everything down and left is | ||
340 | smaller than us, so any 'next' node must be in the general | ||
341 | direction of our parent. Go up the tree; any time the | ||
342 | ancestor is a right-hand child of its parent, keep going | ||
343 | up. First time it's a left-hand child of its parent, said | ||
344 | parent is our 'next' node. */ | ||
345 | while (node->rb_parent && node == node->rb_parent->rb_right) | ||
346 | node = node->rb_parent; | ||
347 | |||
348 | return node->rb_parent; | ||
349 | } | ||
350 | EXPORT_SYMBOL(rb_next); | ||
351 | |||
352 | struct rb_node *rb_prev(struct rb_node *node) | ||
353 | { | ||
354 | /* If we have a left-hand child, go down and then right as far | ||
355 | as we can. */ | ||
356 | if (node->rb_left) { | ||
357 | node = node->rb_left; | ||
358 | while (node->rb_right) | ||
359 | node=node->rb_right; | ||
360 | return node; | ||
361 | } | ||
362 | |||
363 | /* No left-hand children. Go up till we find an ancestor which | ||
364 | is a right-hand child of its parent */ | ||
365 | while (node->rb_parent && node == node->rb_parent->rb_left) | ||
366 | node = node->rb_parent; | ||
367 | |||
368 | return node->rb_parent; | ||
369 | } | ||
370 | EXPORT_SYMBOL(rb_prev); | ||
371 | |||
372 | void rb_replace_node(struct rb_node *victim, struct rb_node *new, | ||
373 | struct rb_root *root) | ||
374 | { | ||
375 | struct rb_node *parent = victim->rb_parent; | ||
376 | |||
377 | /* Set the surrounding nodes to point to the replacement */ | ||
378 | if (parent) { | ||
379 | if (victim == parent->rb_left) | ||
380 | parent->rb_left = new; | ||
381 | else | ||
382 | parent->rb_right = new; | ||
383 | } else { | ||
384 | root->rb_node = new; | ||
385 | } | ||
386 | if (victim->rb_left) | ||
387 | victim->rb_left->rb_parent = new; | ||
388 | if (victim->rb_right) | ||
389 | victim->rb_right->rb_parent = new; | ||
390 | |||
391 | /* Copy the pointers/colour from the victim to the replacement */ | ||
392 | *new = *victim; | ||
393 | } | ||
394 | EXPORT_SYMBOL(rb_replace_node); | ||
diff --git a/lib/reed_solomon/Makefile b/lib/reed_solomon/Makefile new file mode 100644 index 000000000000..747a2de29346 --- /dev/null +++ b/lib/reed_solomon/Makefile | |||
@@ -0,0 +1,6 @@ | |||
1 | # | ||
2 | # This is a modified version of reed solomon lib, | ||
3 | # | ||
4 | |||
5 | obj-$(CONFIG_REED_SOLOMON) += reed_solomon.o | ||
6 | |||
diff --git a/lib/reed_solomon/decode_rs.c b/lib/reed_solomon/decode_rs.c new file mode 100644 index 000000000000..d401decd6289 --- /dev/null +++ b/lib/reed_solomon/decode_rs.c | |||
@@ -0,0 +1,272 @@ | |||
1 | /* | ||
2 | * lib/reed_solomon/decode_rs.c | ||
3 | * | ||
4 | * Overview: | ||
5 | * Generic Reed Solomon encoder / decoder library | ||
6 | * | ||
7 | * Copyright 2002, Phil Karn, KA9Q | ||
8 | * May be used under the terms of the GNU General Public License (GPL) | ||
9 | * | ||
10 | * Adaption to the kernel by Thomas Gleixner (tglx@linutronix.de) | ||
11 | * | ||
12 | * $Id: decode_rs.c,v 1.6 2004/10/22 15:41:47 gleixner Exp $ | ||
13 | * | ||
14 | */ | ||
15 | |||
16 | /* Generic data width independent code which is included by the | ||
17 | * wrappers. | ||
18 | */ | ||
19 | { | ||
20 | int deg_lambda, el, deg_omega; | ||
21 | int i, j, r, k, pad; | ||
22 | int nn = rs->nn; | ||
23 | int nroots = rs->nroots; | ||
24 | int fcr = rs->fcr; | ||
25 | int prim = rs->prim; | ||
26 | int iprim = rs->iprim; | ||
27 | uint16_t *alpha_to = rs->alpha_to; | ||
28 | uint16_t *index_of = rs->index_of; | ||
29 | uint16_t u, q, tmp, num1, num2, den, discr_r, syn_error; | ||
30 | /* Err+Eras Locator poly and syndrome poly The maximum value | ||
31 | * of nroots is 8. So the necessary stack size will be about | ||
32 | * 220 bytes max. | ||
33 | */ | ||
34 | uint16_t lambda[nroots + 1], syn[nroots]; | ||
35 | uint16_t b[nroots + 1], t[nroots + 1], omega[nroots + 1]; | ||
36 | uint16_t root[nroots], reg[nroots + 1], loc[nroots]; | ||
37 | int count = 0; | ||
38 | uint16_t msk = (uint16_t) rs->nn; | ||
39 | |||
40 | /* Check length parameter for validity */ | ||
41 | pad = nn - nroots - len; | ||
42 | if (pad < 0 || pad >= nn) | ||
43 | return -ERANGE; | ||
44 | |||
45 | /* Does the caller provide the syndrome ? */ | ||
46 | if (s != NULL) | ||
47 | goto decode; | ||
48 | |||
49 | /* form the syndromes; i.e., evaluate data(x) at roots of | ||
50 | * g(x) */ | ||
51 | for (i = 0; i < nroots; i++) | ||
52 | syn[i] = (((uint16_t) data[0]) ^ invmsk) & msk; | ||
53 | |||
54 | for (j = 1; j < len; j++) { | ||
55 | for (i = 0; i < nroots; i++) { | ||
56 | if (syn[i] == 0) { | ||
57 | syn[i] = (((uint16_t) data[j]) ^ | ||
58 | invmsk) & msk; | ||
59 | } else { | ||
60 | syn[i] = ((((uint16_t) data[j]) ^ | ||
61 | invmsk) & msk) ^ | ||
62 | alpha_to[rs_modnn(rs, index_of[syn[i]] + | ||
63 | (fcr + i) * prim)]; | ||
64 | } | ||
65 | } | ||
66 | } | ||
67 | |||
68 | for (j = 0; j < nroots; j++) { | ||
69 | for (i = 0; i < nroots; i++) { | ||
70 | if (syn[i] == 0) { | ||
71 | syn[i] = ((uint16_t) par[j]) & msk; | ||
72 | } else { | ||
73 | syn[i] = (((uint16_t) par[j]) & msk) ^ | ||
74 | alpha_to[rs_modnn(rs, index_of[syn[i]] + | ||
75 | (fcr+i)*prim)]; | ||
76 | } | ||
77 | } | ||
78 | } | ||
79 | s = syn; | ||
80 | |||
81 | /* Convert syndromes to index form, checking for nonzero condition */ | ||
82 | syn_error = 0; | ||
83 | for (i = 0; i < nroots; i++) { | ||
84 | syn_error |= s[i]; | ||
85 | s[i] = index_of[s[i]]; | ||
86 | } | ||
87 | |||
88 | if (!syn_error) { | ||
89 | /* if syndrome is zero, data[] is a codeword and there are no | ||
90 | * errors to correct. So return data[] unmodified | ||
91 | */ | ||
92 | count = 0; | ||
93 | goto finish; | ||
94 | } | ||
95 | |||
96 | decode: | ||
97 | memset(&lambda[1], 0, nroots * sizeof(lambda[0])); | ||
98 | lambda[0] = 1; | ||
99 | |||
100 | if (no_eras > 0) { | ||
101 | /* Init lambda to be the erasure locator polynomial */ | ||
102 | lambda[1] = alpha_to[rs_modnn(rs, | ||
103 | prim * (nn - 1 - eras_pos[0]))]; | ||
104 | for (i = 1; i < no_eras; i++) { | ||
105 | u = rs_modnn(rs, prim * (nn - 1 - eras_pos[i])); | ||
106 | for (j = i + 1; j > 0; j--) { | ||
107 | tmp = index_of[lambda[j - 1]]; | ||
108 | if (tmp != nn) { | ||
109 | lambda[j] ^= | ||
110 | alpha_to[rs_modnn(rs, u + tmp)]; | ||
111 | } | ||
112 | } | ||
113 | } | ||
114 | } | ||
115 | |||
116 | for (i = 0; i < nroots + 1; i++) | ||
117 | b[i] = index_of[lambda[i]]; | ||
118 | |||
119 | /* | ||
120 | * Begin Berlekamp-Massey algorithm to determine error+erasure | ||
121 | * locator polynomial | ||
122 | */ | ||
123 | r = no_eras; | ||
124 | el = no_eras; | ||
125 | while (++r <= nroots) { /* r is the step number */ | ||
126 | /* Compute discrepancy at the r-th step in poly-form */ | ||
127 | discr_r = 0; | ||
128 | for (i = 0; i < r; i++) { | ||
129 | if ((lambda[i] != 0) && (s[r - i - 1] != nn)) { | ||
130 | discr_r ^= | ||
131 | alpha_to[rs_modnn(rs, | ||
132 | index_of[lambda[i]] + | ||
133 | s[r - i - 1])]; | ||
134 | } | ||
135 | } | ||
136 | discr_r = index_of[discr_r]; /* Index form */ | ||
137 | if (discr_r == nn) { | ||
138 | /* 2 lines below: B(x) <-- x*B(x) */ | ||
139 | memmove (&b[1], b, nroots * sizeof (b[0])); | ||
140 | b[0] = nn; | ||
141 | } else { | ||
142 | /* 7 lines below: T(x) <-- lambda(x)-discr_r*x*b(x) */ | ||
143 | t[0] = lambda[0]; | ||
144 | for (i = 0; i < nroots; i++) { | ||
145 | if (b[i] != nn) { | ||
146 | t[i + 1] = lambda[i + 1] ^ | ||
147 | alpha_to[rs_modnn(rs, discr_r + | ||
148 | b[i])]; | ||
149 | } else | ||
150 | t[i + 1] = lambda[i + 1]; | ||
151 | } | ||
152 | if (2 * el <= r + no_eras - 1) { | ||
153 | el = r + no_eras - el; | ||
154 | /* | ||
155 | * 2 lines below: B(x) <-- inv(discr_r) * | ||
156 | * lambda(x) | ||
157 | */ | ||
158 | for (i = 0; i <= nroots; i++) { | ||
159 | b[i] = (lambda[i] == 0) ? nn : | ||
160 | rs_modnn(rs, index_of[lambda[i]] | ||
161 | - discr_r + nn); | ||
162 | } | ||
163 | } else { | ||
164 | /* 2 lines below: B(x) <-- x*B(x) */ | ||
165 | memmove(&b[1], b, nroots * sizeof(b[0])); | ||
166 | b[0] = nn; | ||
167 | } | ||
168 | memcpy(lambda, t, (nroots + 1) * sizeof(t[0])); | ||
169 | } | ||
170 | } | ||
171 | |||
172 | /* Convert lambda to index form and compute deg(lambda(x)) */ | ||
173 | deg_lambda = 0; | ||
174 | for (i = 0; i < nroots + 1; i++) { | ||
175 | lambda[i] = index_of[lambda[i]]; | ||
176 | if (lambda[i] != nn) | ||
177 | deg_lambda = i; | ||
178 | } | ||
179 | /* Find roots of error+erasure locator polynomial by Chien search */ | ||
180 | memcpy(®[1], &lambda[1], nroots * sizeof(reg[0])); | ||
181 | count = 0; /* Number of roots of lambda(x) */ | ||
182 | for (i = 1, k = iprim - 1; i <= nn; i++, k = rs_modnn(rs, k + iprim)) { | ||
183 | q = 1; /* lambda[0] is always 0 */ | ||
184 | for (j = deg_lambda; j > 0; j--) { | ||
185 | if (reg[j] != nn) { | ||
186 | reg[j] = rs_modnn(rs, reg[j] + j); | ||
187 | q ^= alpha_to[reg[j]]; | ||
188 | } | ||
189 | } | ||
190 | if (q != 0) | ||
191 | continue; /* Not a root */ | ||
192 | /* store root (index-form) and error location number */ | ||
193 | root[count] = i; | ||
194 | loc[count] = k; | ||
195 | /* If we've already found max possible roots, | ||
196 | * abort the search to save time | ||
197 | */ | ||
198 | if (++count == deg_lambda) | ||
199 | break; | ||
200 | } | ||
201 | if (deg_lambda != count) { | ||
202 | /* | ||
203 | * deg(lambda) unequal to number of roots => uncorrectable | ||
204 | * error detected | ||
205 | */ | ||
206 | count = -1; | ||
207 | goto finish; | ||
208 | } | ||
209 | /* | ||
210 | * Compute err+eras evaluator poly omega(x) = s(x)*lambda(x) (modulo | ||
211 | * x**nroots). in index form. Also find deg(omega). | ||
212 | */ | ||
213 | deg_omega = deg_lambda - 1; | ||
214 | for (i = 0; i <= deg_omega; i++) { | ||
215 | tmp = 0; | ||
216 | for (j = i; j >= 0; j--) { | ||
217 | if ((s[i - j] != nn) && (lambda[j] != nn)) | ||
218 | tmp ^= | ||
219 | alpha_to[rs_modnn(rs, s[i - j] + lambda[j])]; | ||
220 | } | ||
221 | omega[i] = index_of[tmp]; | ||
222 | } | ||
223 | |||
224 | /* | ||
225 | * Compute error values in poly-form. num1 = omega(inv(X(l))), num2 = | ||
226 | * inv(X(l))**(fcr-1) and den = lambda_pr(inv(X(l))) all in poly-form | ||
227 | */ | ||
228 | for (j = count - 1; j >= 0; j--) { | ||
229 | num1 = 0; | ||
230 | for (i = deg_omega; i >= 0; i--) { | ||
231 | if (omega[i] != nn) | ||
232 | num1 ^= alpha_to[rs_modnn(rs, omega[i] + | ||
233 | i * root[j])]; | ||
234 | } | ||
235 | num2 = alpha_to[rs_modnn(rs, root[j] * (fcr - 1) + nn)]; | ||
236 | den = 0; | ||
237 | |||
238 | /* lambda[i+1] for i even is the formal derivative | ||
239 | * lambda_pr of lambda[i] */ | ||
240 | for (i = min(deg_lambda, nroots - 1) & ~1; i >= 0; i -= 2) { | ||
241 | if (lambda[i + 1] != nn) { | ||
242 | den ^= alpha_to[rs_modnn(rs, lambda[i + 1] + | ||
243 | i * root[j])]; | ||
244 | } | ||
245 | } | ||
246 | /* Apply error to data */ | ||
247 | if (num1 != 0 && loc[j] >= pad) { | ||
248 | uint16_t cor = alpha_to[rs_modnn(rs,index_of[num1] + | ||
249 | index_of[num2] + | ||
250 | nn - index_of[den])]; | ||
251 | /* Store the error correction pattern, if a | ||
252 | * correction buffer is available */ | ||
253 | if (corr) { | ||
254 | corr[j] = cor; | ||
255 | } else { | ||
256 | /* If a data buffer is given and the | ||
257 | * error is inside the message, | ||
258 | * correct it */ | ||
259 | if (data && (loc[j] < (nn - nroots))) | ||
260 | data[loc[j] - pad] ^= cor; | ||
261 | } | ||
262 | } | ||
263 | } | ||
264 | |||
265 | finish: | ||
266 | if (eras_pos != NULL) { | ||
267 | for (i = 0; i < count; i++) | ||
268 | eras_pos[i] = loc[i] - pad; | ||
269 | } | ||
270 | return count; | ||
271 | |||
272 | } | ||
diff --git a/lib/reed_solomon/encode_rs.c b/lib/reed_solomon/encode_rs.c new file mode 100644 index 000000000000..237bf65ae886 --- /dev/null +++ b/lib/reed_solomon/encode_rs.c | |||
@@ -0,0 +1,54 @@ | |||
1 | /* | ||
2 | * lib/reed_solomon/encode_rs.c | ||
3 | * | ||
4 | * Overview: | ||
5 | * Generic Reed Solomon encoder / decoder library | ||
6 | * | ||
7 | * Copyright 2002, Phil Karn, KA9Q | ||
8 | * May be used under the terms of the GNU General Public License (GPL) | ||
9 | * | ||
10 | * Adaption to the kernel by Thomas Gleixner (tglx@linutronix.de) | ||
11 | * | ||
12 | * $Id: encode_rs.c,v 1.4 2004/10/22 15:41:47 gleixner Exp $ | ||
13 | * | ||
14 | */ | ||
15 | |||
16 | /* Generic data width independent code which is included by the | ||
17 | * wrappers. | ||
18 | * int encode_rsX (struct rs_control *rs, uintX_t *data, int len, uintY_t *par) | ||
19 | */ | ||
20 | { | ||
21 | int i, j, pad; | ||
22 | int nn = rs->nn; | ||
23 | int nroots = rs->nroots; | ||
24 | uint16_t *alpha_to = rs->alpha_to; | ||
25 | uint16_t *index_of = rs->index_of; | ||
26 | uint16_t *genpoly = rs->genpoly; | ||
27 | uint16_t fb; | ||
28 | uint16_t msk = (uint16_t) rs->nn; | ||
29 | |||
30 | /* Check length parameter for validity */ | ||
31 | pad = nn - nroots - len; | ||
32 | if (pad < 0 || pad >= nn) | ||
33 | return -ERANGE; | ||
34 | |||
35 | for (i = 0; i < len; i++) { | ||
36 | fb = index_of[((((uint16_t) data[i])^invmsk) & msk) ^ par[0]]; | ||
37 | /* feedback term is non-zero */ | ||
38 | if (fb != nn) { | ||
39 | for (j = 1; j < nroots; j++) { | ||
40 | par[j] ^= alpha_to[rs_modnn(rs, fb + | ||
41 | genpoly[nroots - j])]; | ||
42 | } | ||
43 | } | ||
44 | /* Shift */ | ||
45 | memmove(&par[0], &par[1], sizeof(uint16_t) * (nroots - 1)); | ||
46 | if (fb != nn) { | ||
47 | par[nroots - 1] = alpha_to[rs_modnn(rs, | ||
48 | fb + genpoly[0])]; | ||
49 | } else { | ||
50 | par[nroots - 1] = 0; | ||
51 | } | ||
52 | } | ||
53 | return 0; | ||
54 | } | ||
diff --git a/lib/reed_solomon/reed_solomon.c b/lib/reed_solomon/reed_solomon.c new file mode 100644 index 000000000000..6604e3b1940c --- /dev/null +++ b/lib/reed_solomon/reed_solomon.c | |||
@@ -0,0 +1,335 @@ | |||
1 | /* | ||
2 | * lib/reed_solomon/rslib.c | ||
3 | * | ||
4 | * Overview: | ||
5 | * Generic Reed Solomon encoder / decoder library | ||
6 | * | ||
7 | * Copyright (C) 2004 Thomas Gleixner (tglx@linutronix.de) | ||
8 | * | ||
9 | * Reed Solomon code lifted from reed solomon library written by Phil Karn | ||
10 | * Copyright 2002 Phil Karn, KA9Q | ||
11 | * | ||
12 | * $Id: rslib.c,v 1.5 2004/10/22 15:41:47 gleixner Exp $ | ||
13 | * | ||
14 | * This program is free software; you can redistribute it and/or modify | ||
15 | * it under the terms of the GNU General Public License version 2 as | ||
16 | * published by the Free Software Foundation. | ||
17 | * | ||
18 | * Description: | ||
19 | * | ||
20 | * The generic Reed Solomon library provides runtime configurable | ||
21 | * encoding / decoding of RS codes. | ||
22 | * Each user must call init_rs to get a pointer to a rs_control | ||
23 | * structure for the given rs parameters. This structure is either | ||
24 | * generated or a already available matching control structure is used. | ||
25 | * If a structure is generated then the polynomial arrays for | ||
26 | * fast encoding / decoding are built. This can take some time so | ||
27 | * make sure not to call this function from a time critical path. | ||
28 | * Usually a module / driver should initialize the necessary | ||
29 | * rs_control structure on module / driver init and release it | ||
30 | * on exit. | ||
31 | * The encoding puts the calculated syndrome into a given syndrome | ||
32 | * buffer. | ||
33 | * The decoding is a two step process. The first step calculates | ||
34 | * the syndrome over the received (data + syndrome) and calls the | ||
35 | * second stage, which does the decoding / error correction itself. | ||
36 | * Many hw encoders provide a syndrome calculation over the received | ||
37 | * data + syndrome and can call the second stage directly. | ||
38 | * | ||
39 | */ | ||
40 | |||
41 | #include <linux/errno.h> | ||
42 | #include <linux/kernel.h> | ||
43 | #include <linux/init.h> | ||
44 | #include <linux/module.h> | ||
45 | #include <linux/rslib.h> | ||
46 | #include <linux/slab.h> | ||
47 | #include <asm/semaphore.h> | ||
48 | |||
49 | /* This list holds all currently allocated rs control structures */ | ||
50 | static LIST_HEAD (rslist); | ||
51 | /* Protection for the list */ | ||
52 | static DECLARE_MUTEX(rslistlock); | ||
53 | |||
54 | /** | ||
55 | * rs_init - Initialize a Reed-Solomon codec | ||
56 | * | ||
57 | * @symsize: symbol size, bits (1-8) | ||
58 | * @gfpoly: Field generator polynomial coefficients | ||
59 | * @fcr: first root of RS code generator polynomial, index form | ||
60 | * @prim: primitive element to generate polynomial roots | ||
61 | * @nroots: RS code generator polynomial degree (number of roots) | ||
62 | * | ||
63 | * Allocate a control structure and the polynom arrays for faster | ||
64 | * en/decoding. Fill the arrays according to the given parameters | ||
65 | */ | ||
66 | static struct rs_control *rs_init(int symsize, int gfpoly, int fcr, | ||
67 | int prim, int nroots) | ||
68 | { | ||
69 | struct rs_control *rs; | ||
70 | int i, j, sr, root, iprim; | ||
71 | |||
72 | /* Allocate the control structure */ | ||
73 | rs = kmalloc(sizeof (struct rs_control), GFP_KERNEL); | ||
74 | if (rs == NULL) | ||
75 | return NULL; | ||
76 | |||
77 | INIT_LIST_HEAD(&rs->list); | ||
78 | |||
79 | rs->mm = symsize; | ||
80 | rs->nn = (1 << symsize) - 1; | ||
81 | rs->fcr = fcr; | ||
82 | rs->prim = prim; | ||
83 | rs->nroots = nroots; | ||
84 | rs->gfpoly = gfpoly; | ||
85 | |||
86 | /* Allocate the arrays */ | ||
87 | rs->alpha_to = kmalloc(sizeof(uint16_t) * (rs->nn + 1), GFP_KERNEL); | ||
88 | if (rs->alpha_to == NULL) | ||
89 | goto errrs; | ||
90 | |||
91 | rs->index_of = kmalloc(sizeof(uint16_t) * (rs->nn + 1), GFP_KERNEL); | ||
92 | if (rs->index_of == NULL) | ||
93 | goto erralp; | ||
94 | |||
95 | rs->genpoly = kmalloc(sizeof(uint16_t) * (rs->nroots + 1), GFP_KERNEL); | ||
96 | if(rs->genpoly == NULL) | ||
97 | goto erridx; | ||
98 | |||
99 | /* Generate Galois field lookup tables */ | ||
100 | rs->index_of[0] = rs->nn; /* log(zero) = -inf */ | ||
101 | rs->alpha_to[rs->nn] = 0; /* alpha**-inf = 0 */ | ||
102 | sr = 1; | ||
103 | for (i = 0; i < rs->nn; i++) { | ||
104 | rs->index_of[sr] = i; | ||
105 | rs->alpha_to[i] = sr; | ||
106 | sr <<= 1; | ||
107 | if (sr & (1 << symsize)) | ||
108 | sr ^= gfpoly; | ||
109 | sr &= rs->nn; | ||
110 | } | ||
111 | /* If it's not primitive, exit */ | ||
112 | if(sr != 1) | ||
113 | goto errpol; | ||
114 | |||
115 | /* Find prim-th root of 1, used in decoding */ | ||
116 | for(iprim = 1; (iprim % prim) != 0; iprim += rs->nn); | ||
117 | /* prim-th root of 1, index form */ | ||
118 | rs->iprim = iprim / prim; | ||
119 | |||
120 | /* Form RS code generator polynomial from its roots */ | ||
121 | rs->genpoly[0] = 1; | ||
122 | for (i = 0, root = fcr * prim; i < nroots; i++, root += prim) { | ||
123 | rs->genpoly[i + 1] = 1; | ||
124 | /* Multiply rs->genpoly[] by @**(root + x) */ | ||
125 | for (j = i; j > 0; j--) { | ||
126 | if (rs->genpoly[j] != 0) { | ||
127 | rs->genpoly[j] = rs->genpoly[j -1] ^ | ||
128 | rs->alpha_to[rs_modnn(rs, | ||
129 | rs->index_of[rs->genpoly[j]] + root)]; | ||
130 | } else | ||
131 | rs->genpoly[j] = rs->genpoly[j - 1]; | ||
132 | } | ||
133 | /* rs->genpoly[0] can never be zero */ | ||
134 | rs->genpoly[0] = | ||
135 | rs->alpha_to[rs_modnn(rs, | ||
136 | rs->index_of[rs->genpoly[0]] + root)]; | ||
137 | } | ||
138 | /* convert rs->genpoly[] to index form for quicker encoding */ | ||
139 | for (i = 0; i <= nroots; i++) | ||
140 | rs->genpoly[i] = rs->index_of[rs->genpoly[i]]; | ||
141 | return rs; | ||
142 | |||
143 | /* Error exit */ | ||
144 | errpol: | ||
145 | kfree(rs->genpoly); | ||
146 | erridx: | ||
147 | kfree(rs->index_of); | ||
148 | erralp: | ||
149 | kfree(rs->alpha_to); | ||
150 | errrs: | ||
151 | kfree(rs); | ||
152 | return NULL; | ||
153 | } | ||
154 | |||
155 | |||
156 | /** | ||
157 | * free_rs - Free the rs control structure, if its not longer used | ||
158 | * | ||
159 | * @rs: the control structure which is not longer used by the | ||
160 | * caller | ||
161 | */ | ||
162 | void free_rs(struct rs_control *rs) | ||
163 | { | ||
164 | down(&rslistlock); | ||
165 | rs->users--; | ||
166 | if(!rs->users) { | ||
167 | list_del(&rs->list); | ||
168 | kfree(rs->alpha_to); | ||
169 | kfree(rs->index_of); | ||
170 | kfree(rs->genpoly); | ||
171 | kfree(rs); | ||
172 | } | ||
173 | up(&rslistlock); | ||
174 | } | ||
175 | |||
176 | /** | ||
177 | * init_rs - Find a matching or allocate a new rs control structure | ||
178 | * | ||
179 | * @symsize: the symbol size (number of bits) | ||
180 | * @gfpoly: the extended Galois field generator polynomial coefficients, | ||
181 | * with the 0th coefficient in the low order bit. The polynomial | ||
182 | * must be primitive; | ||
183 | * @fcr: the first consecutive root of the rs code generator polynomial | ||
184 | * in index form | ||
185 | * @prim: primitive element to generate polynomial roots | ||
186 | * @nroots: RS code generator polynomial degree (number of roots) | ||
187 | */ | ||
188 | struct rs_control *init_rs(int symsize, int gfpoly, int fcr, int prim, | ||
189 | int nroots) | ||
190 | { | ||
191 | struct list_head *tmp; | ||
192 | struct rs_control *rs; | ||
193 | |||
194 | /* Sanity checks */ | ||
195 | if (symsize < 1) | ||
196 | return NULL; | ||
197 | if (fcr < 0 || fcr >= (1<<symsize)) | ||
198 | return NULL; | ||
199 | if (prim <= 0 || prim >= (1<<symsize)) | ||
200 | return NULL; | ||
201 | if (nroots < 0 || nroots >= (1<<symsize) || nroots > 8) | ||
202 | return NULL; | ||
203 | |||
204 | down(&rslistlock); | ||
205 | |||
206 | /* Walk through the list and look for a matching entry */ | ||
207 | list_for_each(tmp, &rslist) { | ||
208 | rs = list_entry(tmp, struct rs_control, list); | ||
209 | if (symsize != rs->mm) | ||
210 | continue; | ||
211 | if (gfpoly != rs->gfpoly) | ||
212 | continue; | ||
213 | if (fcr != rs->fcr) | ||
214 | continue; | ||
215 | if (prim != rs->prim) | ||
216 | continue; | ||
217 | if (nroots != rs->nroots) | ||
218 | continue; | ||
219 | /* We have a matching one already */ | ||
220 | rs->users++; | ||
221 | goto out; | ||
222 | } | ||
223 | |||
224 | /* Create a new one */ | ||
225 | rs = rs_init(symsize, gfpoly, fcr, prim, nroots); | ||
226 | if (rs) { | ||
227 | rs->users = 1; | ||
228 | list_add(&rs->list, &rslist); | ||
229 | } | ||
230 | out: | ||
231 | up(&rslistlock); | ||
232 | return rs; | ||
233 | } | ||
234 | |||
235 | #ifdef CONFIG_REED_SOLOMON_ENC8 | ||
236 | /** | ||
237 | * encode_rs8 - Calculate the parity for data values (8bit data width) | ||
238 | * | ||
239 | * @rs: the rs control structure | ||
240 | * @data: data field of a given type | ||
241 | * @len: data length | ||
242 | * @par: parity data, must be initialized by caller (usually all 0) | ||
243 | * @invmsk: invert data mask (will be xored on data) | ||
244 | * | ||
245 | * The parity uses a uint16_t data type to enable | ||
246 | * symbol size > 8. The calling code must take care of encoding of the | ||
247 | * syndrome result for storage itself. | ||
248 | */ | ||
249 | int encode_rs8(struct rs_control *rs, uint8_t *data, int len, uint16_t *par, | ||
250 | uint16_t invmsk) | ||
251 | { | ||
252 | #include "encode_rs.c" | ||
253 | } | ||
254 | EXPORT_SYMBOL_GPL(encode_rs8); | ||
255 | #endif | ||
256 | |||
257 | #ifdef CONFIG_REED_SOLOMON_DEC8 | ||
258 | /** | ||
259 | * decode_rs8 - Decode codeword (8bit data width) | ||
260 | * | ||
261 | * @rs: the rs control structure | ||
262 | * @data: data field of a given type | ||
263 | * @par: received parity data field | ||
264 | * @len: data length | ||
265 | * @s: syndrome data field (if NULL, syndrome is calculated) | ||
266 | * @no_eras: number of erasures | ||
267 | * @eras_pos: position of erasures, can be NULL | ||
268 | * @invmsk: invert data mask (will be xored on data, not on parity!) | ||
269 | * @corr: buffer to store correction bitmask on eras_pos | ||
270 | * | ||
271 | * The syndrome and parity uses a uint16_t data type to enable | ||
272 | * symbol size > 8. The calling code must take care of decoding of the | ||
273 | * syndrome result and the received parity before calling this code. | ||
274 | */ | ||
275 | int decode_rs8(struct rs_control *rs, uint8_t *data, uint16_t *par, int len, | ||
276 | uint16_t *s, int no_eras, int *eras_pos, uint16_t invmsk, | ||
277 | uint16_t *corr) | ||
278 | { | ||
279 | #include "decode_rs.c" | ||
280 | } | ||
281 | EXPORT_SYMBOL_GPL(decode_rs8); | ||
282 | #endif | ||
283 | |||
284 | #ifdef CONFIG_REED_SOLOMON_ENC16 | ||
285 | /** | ||
286 | * encode_rs16 - Calculate the parity for data values (16bit data width) | ||
287 | * | ||
288 | * @rs: the rs control structure | ||
289 | * @data: data field of a given type | ||
290 | * @len: data length | ||
291 | * @par: parity data, must be initialized by caller (usually all 0) | ||
292 | * @invmsk: invert data mask (will be xored on data, not on parity!) | ||
293 | * | ||
294 | * Each field in the data array contains up to symbol size bits of valid data. | ||
295 | */ | ||
296 | int encode_rs16(struct rs_control *rs, uint16_t *data, int len, uint16_t *par, | ||
297 | uint16_t invmsk) | ||
298 | { | ||
299 | #include "encode_rs.c" | ||
300 | } | ||
301 | EXPORT_SYMBOL_GPL(encode_rs16); | ||
302 | #endif | ||
303 | |||
304 | #ifdef CONFIG_REED_SOLOMON_DEC16 | ||
305 | /** | ||
306 | * decode_rs16 - Decode codeword (16bit data width) | ||
307 | * | ||
308 | * @rs: the rs control structure | ||
309 | * @data: data field of a given type | ||
310 | * @par: received parity data field | ||
311 | * @len: data length | ||
312 | * @s: syndrome data field (if NULL, syndrome is calculated) | ||
313 | * @no_eras: number of erasures | ||
314 | * @eras_pos: position of erasures, can be NULL | ||
315 | * @invmsk: invert data mask (will be xored on data, not on parity!) | ||
316 | * @corr: buffer to store correction bitmask on eras_pos | ||
317 | * | ||
318 | * Each field in the data array contains up to symbol size bits of valid data. | ||
319 | */ | ||
320 | int decode_rs16(struct rs_control *rs, uint16_t *data, uint16_t *par, int len, | ||
321 | uint16_t *s, int no_eras, int *eras_pos, uint16_t invmsk, | ||
322 | uint16_t *corr) | ||
323 | { | ||
324 | #include "decode_rs.c" | ||
325 | } | ||
326 | EXPORT_SYMBOL_GPL(decode_rs16); | ||
327 | #endif | ||
328 | |||
329 | EXPORT_SYMBOL_GPL(init_rs); | ||
330 | EXPORT_SYMBOL_GPL(free_rs); | ||
331 | |||
332 | MODULE_LICENSE("GPL"); | ||
333 | MODULE_DESCRIPTION("Reed Solomon encoder/decoder"); | ||
334 | MODULE_AUTHOR("Phil Karn, Thomas Gleixner"); | ||
335 | |||
diff --git a/lib/rwsem-spinlock.c b/lib/rwsem-spinlock.c new file mode 100644 index 000000000000..21f0db2c9711 --- /dev/null +++ b/lib/rwsem-spinlock.c | |||
@@ -0,0 +1,344 @@ | |||
1 | /* rwsem-spinlock.c: R/W semaphores: contention handling functions for | ||
2 | * generic spinlock implementation | ||
3 | * | ||
4 | * Copyright (c) 2001 David Howells (dhowells@redhat.com). | ||
5 | * - Derived partially from idea by Andrea Arcangeli <andrea@suse.de> | ||
6 | * - Derived also from comments by Linus | ||
7 | */ | ||
8 | #include <linux/rwsem.h> | ||
9 | #include <linux/sched.h> | ||
10 | #include <linux/module.h> | ||
11 | |||
12 | struct rwsem_waiter { | ||
13 | struct list_head list; | ||
14 | struct task_struct *task; | ||
15 | unsigned int flags; | ||
16 | #define RWSEM_WAITING_FOR_READ 0x00000001 | ||
17 | #define RWSEM_WAITING_FOR_WRITE 0x00000002 | ||
18 | }; | ||
19 | |||
20 | #if RWSEM_DEBUG | ||
21 | void rwsemtrace(struct rw_semaphore *sem, const char *str) | ||
22 | { | ||
23 | if (sem->debug) | ||
24 | printk("[%d] %s({%d,%d})\n", | ||
25 | current->pid, str, sem->activity, | ||
26 | list_empty(&sem->wait_list) ? 0 : 1); | ||
27 | } | ||
28 | #endif | ||
29 | |||
30 | /* | ||
31 | * initialise the semaphore | ||
32 | */ | ||
33 | void fastcall init_rwsem(struct rw_semaphore *sem) | ||
34 | { | ||
35 | sem->activity = 0; | ||
36 | spin_lock_init(&sem->wait_lock); | ||
37 | INIT_LIST_HEAD(&sem->wait_list); | ||
38 | #if RWSEM_DEBUG | ||
39 | sem->debug = 0; | ||
40 | #endif | ||
41 | } | ||
42 | |||
43 | /* | ||
44 | * handle the lock release when processes blocked on it that can now run | ||
45 | * - if we come here, then: | ||
46 | * - the 'active count' _reached_ zero | ||
47 | * - the 'waiting count' is non-zero | ||
48 | * - the spinlock must be held by the caller | ||
49 | * - woken process blocks are discarded from the list after having task zeroed | ||
50 | * - writers are only woken if wakewrite is non-zero | ||
51 | */ | ||
52 | static inline struct rw_semaphore * | ||
53 | __rwsem_do_wake(struct rw_semaphore *sem, int wakewrite) | ||
54 | { | ||
55 | struct rwsem_waiter *waiter; | ||
56 | struct task_struct *tsk; | ||
57 | int woken; | ||
58 | |||
59 | rwsemtrace(sem, "Entering __rwsem_do_wake"); | ||
60 | |||
61 | waiter = list_entry(sem->wait_list.next, struct rwsem_waiter, list); | ||
62 | |||
63 | if (!wakewrite) { | ||
64 | if (waiter->flags & RWSEM_WAITING_FOR_WRITE) | ||
65 | goto out; | ||
66 | goto dont_wake_writers; | ||
67 | } | ||
68 | |||
69 | /* if we are allowed to wake writers try to grant a single write lock | ||
70 | * if there's a writer at the front of the queue | ||
71 | * - we leave the 'waiting count' incremented to signify potential | ||
72 | * contention | ||
73 | */ | ||
74 | if (waiter->flags & RWSEM_WAITING_FOR_WRITE) { | ||
75 | sem->activity = -1; | ||
76 | list_del(&waiter->list); | ||
77 | tsk = waiter->task; | ||
78 | /* Don't touch waiter after ->task has been NULLed */ | ||
79 | mb(); | ||
80 | waiter->task = NULL; | ||
81 | wake_up_process(tsk); | ||
82 | put_task_struct(tsk); | ||
83 | goto out; | ||
84 | } | ||
85 | |||
86 | /* grant an infinite number of read locks to the front of the queue */ | ||
87 | dont_wake_writers: | ||
88 | woken = 0; | ||
89 | while (waiter->flags & RWSEM_WAITING_FOR_READ) { | ||
90 | struct list_head *next = waiter->list.next; | ||
91 | |||
92 | list_del(&waiter->list); | ||
93 | tsk = waiter->task; | ||
94 | mb(); | ||
95 | waiter->task = NULL; | ||
96 | wake_up_process(tsk); | ||
97 | put_task_struct(tsk); | ||
98 | woken++; | ||
99 | if (list_empty(&sem->wait_list)) | ||
100 | break; | ||
101 | waiter = list_entry(next, struct rwsem_waiter, list); | ||
102 | } | ||
103 | |||
104 | sem->activity += woken; | ||
105 | |||
106 | out: | ||
107 | rwsemtrace(sem, "Leaving __rwsem_do_wake"); | ||
108 | return sem; | ||
109 | } | ||
110 | |||
111 | /* | ||
112 | * wake a single writer | ||
113 | */ | ||
114 | static inline struct rw_semaphore * | ||
115 | __rwsem_wake_one_writer(struct rw_semaphore *sem) | ||
116 | { | ||
117 | struct rwsem_waiter *waiter; | ||
118 | struct task_struct *tsk; | ||
119 | |||
120 | sem->activity = -1; | ||
121 | |||
122 | waiter = list_entry(sem->wait_list.next, struct rwsem_waiter, list); | ||
123 | list_del(&waiter->list); | ||
124 | |||
125 | tsk = waiter->task; | ||
126 | mb(); | ||
127 | waiter->task = NULL; | ||
128 | wake_up_process(tsk); | ||
129 | put_task_struct(tsk); | ||
130 | return sem; | ||
131 | } | ||
132 | |||
133 | /* | ||
134 | * get a read lock on the semaphore | ||
135 | */ | ||
136 | void fastcall __sched __down_read(struct rw_semaphore *sem) | ||
137 | { | ||
138 | struct rwsem_waiter waiter; | ||
139 | struct task_struct *tsk; | ||
140 | |||
141 | rwsemtrace(sem, "Entering __down_read"); | ||
142 | |||
143 | spin_lock_irq(&sem->wait_lock); | ||
144 | |||
145 | if (sem->activity >= 0 && list_empty(&sem->wait_list)) { | ||
146 | /* granted */ | ||
147 | sem->activity++; | ||
148 | spin_unlock_irq(&sem->wait_lock); | ||
149 | goto out; | ||
150 | } | ||
151 | |||
152 | tsk = current; | ||
153 | set_task_state(tsk, TASK_UNINTERRUPTIBLE); | ||
154 | |||
155 | /* set up my own style of waitqueue */ | ||
156 | waiter.task = tsk; | ||
157 | waiter.flags = RWSEM_WAITING_FOR_READ; | ||
158 | get_task_struct(tsk); | ||
159 | |||
160 | list_add_tail(&waiter.list, &sem->wait_list); | ||
161 | |||
162 | /* we don't need to touch the semaphore struct anymore */ | ||
163 | spin_unlock_irq(&sem->wait_lock); | ||
164 | |||
165 | /* wait to be given the lock */ | ||
166 | for (;;) { | ||
167 | if (!waiter.task) | ||
168 | break; | ||
169 | schedule(); | ||
170 | set_task_state(tsk, TASK_UNINTERRUPTIBLE); | ||
171 | } | ||
172 | |||
173 | tsk->state = TASK_RUNNING; | ||
174 | |||
175 | out: | ||
176 | rwsemtrace(sem, "Leaving __down_read"); | ||
177 | } | ||
178 | |||
179 | /* | ||
180 | * trylock for reading -- returns 1 if successful, 0 if contention | ||
181 | */ | ||
182 | int fastcall __down_read_trylock(struct rw_semaphore *sem) | ||
183 | { | ||
184 | unsigned long flags; | ||
185 | int ret = 0; | ||
186 | |||
187 | rwsemtrace(sem, "Entering __down_read_trylock"); | ||
188 | |||
189 | spin_lock_irqsave(&sem->wait_lock, flags); | ||
190 | |||
191 | if (sem->activity >= 0 && list_empty(&sem->wait_list)) { | ||
192 | /* granted */ | ||
193 | sem->activity++; | ||
194 | ret = 1; | ||
195 | } | ||
196 | |||
197 | spin_unlock_irqrestore(&sem->wait_lock, flags); | ||
198 | |||
199 | rwsemtrace(sem, "Leaving __down_read_trylock"); | ||
200 | return ret; | ||
201 | } | ||
202 | |||
203 | /* | ||
204 | * get a write lock on the semaphore | ||
205 | * - we increment the waiting count anyway to indicate an exclusive lock | ||
206 | */ | ||
207 | void fastcall __sched __down_write(struct rw_semaphore *sem) | ||
208 | { | ||
209 | struct rwsem_waiter waiter; | ||
210 | struct task_struct *tsk; | ||
211 | |||
212 | rwsemtrace(sem, "Entering __down_write"); | ||
213 | |||
214 | spin_lock_irq(&sem->wait_lock); | ||
215 | |||
216 | if (sem->activity == 0 && list_empty(&sem->wait_list)) { | ||
217 | /* granted */ | ||
218 | sem->activity = -1; | ||
219 | spin_unlock_irq(&sem->wait_lock); | ||
220 | goto out; | ||
221 | } | ||
222 | |||
223 | tsk = current; | ||
224 | set_task_state(tsk, TASK_UNINTERRUPTIBLE); | ||
225 | |||
226 | /* set up my own style of waitqueue */ | ||
227 | waiter.task = tsk; | ||
228 | waiter.flags = RWSEM_WAITING_FOR_WRITE; | ||
229 | get_task_struct(tsk); | ||
230 | |||
231 | list_add_tail(&waiter.list, &sem->wait_list); | ||
232 | |||
233 | /* we don't need to touch the semaphore struct anymore */ | ||
234 | spin_unlock_irq(&sem->wait_lock); | ||
235 | |||
236 | /* wait to be given the lock */ | ||
237 | for (;;) { | ||
238 | if (!waiter.task) | ||
239 | break; | ||
240 | schedule(); | ||
241 | set_task_state(tsk, TASK_UNINTERRUPTIBLE); | ||
242 | } | ||
243 | |||
244 | tsk->state = TASK_RUNNING; | ||
245 | |||
246 | out: | ||
247 | rwsemtrace(sem, "Leaving __down_write"); | ||
248 | } | ||
249 | |||
250 | /* | ||
251 | * trylock for writing -- returns 1 if successful, 0 if contention | ||
252 | */ | ||
253 | int fastcall __down_write_trylock(struct rw_semaphore *sem) | ||
254 | { | ||
255 | unsigned long flags; | ||
256 | int ret = 0; | ||
257 | |||
258 | rwsemtrace(sem, "Entering __down_write_trylock"); | ||
259 | |||
260 | spin_lock_irqsave(&sem->wait_lock, flags); | ||
261 | |||
262 | if (sem->activity == 0 && list_empty(&sem->wait_list)) { | ||
263 | /* granted */ | ||
264 | sem->activity = -1; | ||
265 | ret = 1; | ||
266 | } | ||
267 | |||
268 | spin_unlock_irqrestore(&sem->wait_lock, flags); | ||
269 | |||
270 | rwsemtrace(sem, "Leaving __down_write_trylock"); | ||
271 | return ret; | ||
272 | } | ||
273 | |||
274 | /* | ||
275 | * release a read lock on the semaphore | ||
276 | */ | ||
277 | void fastcall __up_read(struct rw_semaphore *sem) | ||
278 | { | ||
279 | unsigned long flags; | ||
280 | |||
281 | rwsemtrace(sem, "Entering __up_read"); | ||
282 | |||
283 | spin_lock_irqsave(&sem->wait_lock, flags); | ||
284 | |||
285 | if (--sem->activity == 0 && !list_empty(&sem->wait_list)) | ||
286 | sem = __rwsem_wake_one_writer(sem); | ||
287 | |||
288 | spin_unlock_irqrestore(&sem->wait_lock, flags); | ||
289 | |||
290 | rwsemtrace(sem, "Leaving __up_read"); | ||
291 | } | ||
292 | |||
293 | /* | ||
294 | * release a write lock on the semaphore | ||
295 | */ | ||
296 | void fastcall __up_write(struct rw_semaphore *sem) | ||
297 | { | ||
298 | unsigned long flags; | ||
299 | |||
300 | rwsemtrace(sem, "Entering __up_write"); | ||
301 | |||
302 | spin_lock_irqsave(&sem->wait_lock, flags); | ||
303 | |||
304 | sem->activity = 0; | ||
305 | if (!list_empty(&sem->wait_list)) | ||
306 | sem = __rwsem_do_wake(sem, 1); | ||
307 | |||
308 | spin_unlock_irqrestore(&sem->wait_lock, flags); | ||
309 | |||
310 | rwsemtrace(sem, "Leaving __up_write"); | ||
311 | } | ||
312 | |||
313 | /* | ||
314 | * downgrade a write lock into a read lock | ||
315 | * - just wake up any readers at the front of the queue | ||
316 | */ | ||
317 | void fastcall __downgrade_write(struct rw_semaphore *sem) | ||
318 | { | ||
319 | unsigned long flags; | ||
320 | |||
321 | rwsemtrace(sem, "Entering __downgrade_write"); | ||
322 | |||
323 | spin_lock_irqsave(&sem->wait_lock, flags); | ||
324 | |||
325 | sem->activity = 1; | ||
326 | if (!list_empty(&sem->wait_list)) | ||
327 | sem = __rwsem_do_wake(sem, 0); | ||
328 | |||
329 | spin_unlock_irqrestore(&sem->wait_lock, flags); | ||
330 | |||
331 | rwsemtrace(sem, "Leaving __downgrade_write"); | ||
332 | } | ||
333 | |||
334 | EXPORT_SYMBOL(init_rwsem); | ||
335 | EXPORT_SYMBOL(__down_read); | ||
336 | EXPORT_SYMBOL(__down_read_trylock); | ||
337 | EXPORT_SYMBOL(__down_write); | ||
338 | EXPORT_SYMBOL(__down_write_trylock); | ||
339 | EXPORT_SYMBOL(__up_read); | ||
340 | EXPORT_SYMBOL(__up_write); | ||
341 | EXPORT_SYMBOL(__downgrade_write); | ||
342 | #if RWSEM_DEBUG | ||
343 | EXPORT_SYMBOL(rwsemtrace); | ||
344 | #endif | ||
diff --git a/lib/rwsem.c b/lib/rwsem.c new file mode 100644 index 000000000000..7644089ec8fa --- /dev/null +++ b/lib/rwsem.c | |||
@@ -0,0 +1,268 @@ | |||
1 | /* rwsem.c: R/W semaphores: contention handling functions | ||
2 | * | ||
3 | * Written by David Howells (dhowells@redhat.com). | ||
4 | * Derived from arch/i386/kernel/semaphore.c | ||
5 | */ | ||
6 | #include <linux/rwsem.h> | ||
7 | #include <linux/sched.h> | ||
8 | #include <linux/init.h> | ||
9 | #include <linux/module.h> | ||
10 | |||
11 | struct rwsem_waiter { | ||
12 | struct list_head list; | ||
13 | struct task_struct *task; | ||
14 | unsigned int flags; | ||
15 | #define RWSEM_WAITING_FOR_READ 0x00000001 | ||
16 | #define RWSEM_WAITING_FOR_WRITE 0x00000002 | ||
17 | }; | ||
18 | |||
19 | #if RWSEM_DEBUG | ||
20 | #undef rwsemtrace | ||
21 | void rwsemtrace(struct rw_semaphore *sem, const char *str) | ||
22 | { | ||
23 | printk("sem=%p\n", sem); | ||
24 | printk("(sem)=%08lx\n", sem->count); | ||
25 | if (sem->debug) | ||
26 | printk("[%d] %s({%08lx})\n", current->pid, str, sem->count); | ||
27 | } | ||
28 | #endif | ||
29 | |||
30 | /* | ||
31 | * handle the lock release when processes blocked on it that can now run | ||
32 | * - if we come here from up_xxxx(), then: | ||
33 | * - the 'active part' of count (&0x0000ffff) reached 0 (but may have changed) | ||
34 | * - the 'waiting part' of count (&0xffff0000) is -ve (and will still be so) | ||
35 | * - there must be someone on the queue | ||
36 | * - the spinlock must be held by the caller | ||
37 | * - woken process blocks are discarded from the list after having task zeroed | ||
38 | * - writers are only woken if downgrading is false | ||
39 | */ | ||
40 | static inline struct rw_semaphore * | ||
41 | __rwsem_do_wake(struct rw_semaphore *sem, int downgrading) | ||
42 | { | ||
43 | struct rwsem_waiter *waiter; | ||
44 | struct task_struct *tsk; | ||
45 | struct list_head *next; | ||
46 | signed long oldcount, woken, loop; | ||
47 | |||
48 | rwsemtrace(sem, "Entering __rwsem_do_wake"); | ||
49 | |||
50 | if (downgrading) | ||
51 | goto dont_wake_writers; | ||
52 | |||
53 | /* if we came through an up_xxxx() call, we only only wake someone up | ||
54 | * if we can transition the active part of the count from 0 -> 1 | ||
55 | */ | ||
56 | try_again: | ||
57 | oldcount = rwsem_atomic_update(RWSEM_ACTIVE_BIAS, sem) | ||
58 | - RWSEM_ACTIVE_BIAS; | ||
59 | if (oldcount & RWSEM_ACTIVE_MASK) | ||
60 | goto undo; | ||
61 | |||
62 | waiter = list_entry(sem->wait_list.next, struct rwsem_waiter, list); | ||
63 | |||
64 | /* try to grant a single write lock if there's a writer at the front | ||
65 | * of the queue - note we leave the 'active part' of the count | ||
66 | * incremented by 1 and the waiting part incremented by 0x00010000 | ||
67 | */ | ||
68 | if (!(waiter->flags & RWSEM_WAITING_FOR_WRITE)) | ||
69 | goto readers_only; | ||
70 | |||
71 | /* We must be careful not to touch 'waiter' after we set ->task = NULL. | ||
72 | * It is an allocated on the waiter's stack and may become invalid at | ||
73 | * any time after that point (due to a wakeup from another source). | ||
74 | */ | ||
75 | list_del(&waiter->list); | ||
76 | tsk = waiter->task; | ||
77 | mb(); | ||
78 | waiter->task = NULL; | ||
79 | wake_up_process(tsk); | ||
80 | put_task_struct(tsk); | ||
81 | goto out; | ||
82 | |||
83 | /* don't want to wake any writers */ | ||
84 | dont_wake_writers: | ||
85 | waiter = list_entry(sem->wait_list.next, struct rwsem_waiter, list); | ||
86 | if (waiter->flags & RWSEM_WAITING_FOR_WRITE) | ||
87 | goto out; | ||
88 | |||
89 | /* grant an infinite number of read locks to the readers at the front | ||
90 | * of the queue | ||
91 | * - note we increment the 'active part' of the count by the number of | ||
92 | * readers before waking any processes up | ||
93 | */ | ||
94 | readers_only: | ||
95 | woken = 0; | ||
96 | do { | ||
97 | woken++; | ||
98 | |||
99 | if (waiter->list.next == &sem->wait_list) | ||
100 | break; | ||
101 | |||
102 | waiter = list_entry(waiter->list.next, | ||
103 | struct rwsem_waiter, list); | ||
104 | |||
105 | } while (waiter->flags & RWSEM_WAITING_FOR_READ); | ||
106 | |||
107 | loop = woken; | ||
108 | woken *= RWSEM_ACTIVE_BIAS - RWSEM_WAITING_BIAS; | ||
109 | if (!downgrading) | ||
110 | /* we'd already done one increment earlier */ | ||
111 | woken -= RWSEM_ACTIVE_BIAS; | ||
112 | |||
113 | rwsem_atomic_add(woken, sem); | ||
114 | |||
115 | next = sem->wait_list.next; | ||
116 | for (; loop > 0; loop--) { | ||
117 | waiter = list_entry(next, struct rwsem_waiter, list); | ||
118 | next = waiter->list.next; | ||
119 | tsk = waiter->task; | ||
120 | mb(); | ||
121 | waiter->task = NULL; | ||
122 | wake_up_process(tsk); | ||
123 | put_task_struct(tsk); | ||
124 | } | ||
125 | |||
126 | sem->wait_list.next = next; | ||
127 | next->prev = &sem->wait_list; | ||
128 | |||
129 | out: | ||
130 | rwsemtrace(sem, "Leaving __rwsem_do_wake"); | ||
131 | return sem; | ||
132 | |||
133 | /* undo the change to count, but check for a transition 1->0 */ | ||
134 | undo: | ||
135 | if (rwsem_atomic_update(-RWSEM_ACTIVE_BIAS, sem) != 0) | ||
136 | goto out; | ||
137 | goto try_again; | ||
138 | } | ||
139 | |||
140 | /* | ||
141 | * wait for a lock to be granted | ||
142 | */ | ||
143 | static inline struct rw_semaphore * | ||
144 | rwsem_down_failed_common(struct rw_semaphore *sem, | ||
145 | struct rwsem_waiter *waiter, signed long adjustment) | ||
146 | { | ||
147 | struct task_struct *tsk = current; | ||
148 | signed long count; | ||
149 | |||
150 | set_task_state(tsk, TASK_UNINTERRUPTIBLE); | ||
151 | |||
152 | /* set up my own style of waitqueue */ | ||
153 | spin_lock_irq(&sem->wait_lock); | ||
154 | waiter->task = tsk; | ||
155 | get_task_struct(tsk); | ||
156 | |||
157 | list_add_tail(&waiter->list, &sem->wait_list); | ||
158 | |||
159 | /* we're now waiting on the lock, but no longer actively read-locking */ | ||
160 | count = rwsem_atomic_update(adjustment, sem); | ||
161 | |||
162 | /* if there are no active locks, wake the front queued process(es) up */ | ||
163 | if (!(count & RWSEM_ACTIVE_MASK)) | ||
164 | sem = __rwsem_do_wake(sem, 0); | ||
165 | |||
166 | spin_unlock_irq(&sem->wait_lock); | ||
167 | |||
168 | /* wait to be given the lock */ | ||
169 | for (;;) { | ||
170 | if (!waiter->task) | ||
171 | break; | ||
172 | schedule(); | ||
173 | set_task_state(tsk, TASK_UNINTERRUPTIBLE); | ||
174 | } | ||
175 | |||
176 | tsk->state = TASK_RUNNING; | ||
177 | |||
178 | return sem; | ||
179 | } | ||
180 | |||
181 | /* | ||
182 | * wait for the read lock to be granted | ||
183 | */ | ||
184 | struct rw_semaphore fastcall __sched * | ||
185 | rwsem_down_read_failed(struct rw_semaphore *sem) | ||
186 | { | ||
187 | struct rwsem_waiter waiter; | ||
188 | |||
189 | rwsemtrace(sem, "Entering rwsem_down_read_failed"); | ||
190 | |||
191 | waiter.flags = RWSEM_WAITING_FOR_READ; | ||
192 | rwsem_down_failed_common(sem, &waiter, | ||
193 | RWSEM_WAITING_BIAS - RWSEM_ACTIVE_BIAS); | ||
194 | |||
195 | rwsemtrace(sem, "Leaving rwsem_down_read_failed"); | ||
196 | return sem; | ||
197 | } | ||
198 | |||
199 | /* | ||
200 | * wait for the write lock to be granted | ||
201 | */ | ||
202 | struct rw_semaphore fastcall __sched * | ||
203 | rwsem_down_write_failed(struct rw_semaphore *sem) | ||
204 | { | ||
205 | struct rwsem_waiter waiter; | ||
206 | |||
207 | rwsemtrace(sem, "Entering rwsem_down_write_failed"); | ||
208 | |||
209 | waiter.flags = RWSEM_WAITING_FOR_WRITE; | ||
210 | rwsem_down_failed_common(sem, &waiter, -RWSEM_ACTIVE_BIAS); | ||
211 | |||
212 | rwsemtrace(sem, "Leaving rwsem_down_write_failed"); | ||
213 | return sem; | ||
214 | } | ||
215 | |||
216 | /* | ||
217 | * handle waking up a waiter on the semaphore | ||
218 | * - up_read/up_write has decremented the active part of count if we come here | ||
219 | */ | ||
220 | struct rw_semaphore fastcall *rwsem_wake(struct rw_semaphore *sem) | ||
221 | { | ||
222 | unsigned long flags; | ||
223 | |||
224 | rwsemtrace(sem, "Entering rwsem_wake"); | ||
225 | |||
226 | spin_lock_irqsave(&sem->wait_lock, flags); | ||
227 | |||
228 | /* do nothing if list empty */ | ||
229 | if (!list_empty(&sem->wait_list)) | ||
230 | sem = __rwsem_do_wake(sem, 0); | ||
231 | |||
232 | spin_unlock_irqrestore(&sem->wait_lock, flags); | ||
233 | |||
234 | rwsemtrace(sem, "Leaving rwsem_wake"); | ||
235 | |||
236 | return sem; | ||
237 | } | ||
238 | |||
239 | /* | ||
240 | * downgrade a write lock into a read lock | ||
241 | * - caller incremented waiting part of count and discovered it still negative | ||
242 | * - just wake up any readers at the front of the queue | ||
243 | */ | ||
244 | struct rw_semaphore fastcall *rwsem_downgrade_wake(struct rw_semaphore *sem) | ||
245 | { | ||
246 | unsigned long flags; | ||
247 | |||
248 | rwsemtrace(sem, "Entering rwsem_downgrade_wake"); | ||
249 | |||
250 | spin_lock_irqsave(&sem->wait_lock, flags); | ||
251 | |||
252 | /* do nothing if list empty */ | ||
253 | if (!list_empty(&sem->wait_list)) | ||
254 | sem = __rwsem_do_wake(sem, 1); | ||
255 | |||
256 | spin_unlock_irqrestore(&sem->wait_lock, flags); | ||
257 | |||
258 | rwsemtrace(sem, "Leaving rwsem_downgrade_wake"); | ||
259 | return sem; | ||
260 | } | ||
261 | |||
262 | EXPORT_SYMBOL(rwsem_down_read_failed); | ||
263 | EXPORT_SYMBOL(rwsem_down_write_failed); | ||
264 | EXPORT_SYMBOL(rwsem_wake); | ||
265 | EXPORT_SYMBOL(rwsem_downgrade_wake); | ||
266 | #if RWSEM_DEBUG | ||
267 | EXPORT_SYMBOL(rwsemtrace); | ||
268 | #endif | ||
diff --git a/lib/sha1.c b/lib/sha1.c new file mode 100644 index 000000000000..2f7f1148dfde --- /dev/null +++ b/lib/sha1.c | |||
@@ -0,0 +1,96 @@ | |||
1 | /* | ||
2 | * SHA transform algorithm, originally taken from code written by | ||
3 | * Peter Gutmann, and placed in the public domain. | ||
4 | */ | ||
5 | |||
6 | #include <linux/kernel.h> | ||
7 | #include <linux/module.h> | ||
8 | #include <linux/cryptohash.h> | ||
9 | |||
10 | /* The SHA f()-functions. */ | ||
11 | |||
12 | #define f1(x,y,z) (z ^ (x & (y ^ z))) /* x ? y : z */ | ||
13 | #define f2(x,y,z) (x ^ y ^ z) /* XOR */ | ||
14 | #define f3(x,y,z) ((x & y) + (z & (x ^ y))) /* majority */ | ||
15 | |||
16 | /* The SHA Mysterious Constants */ | ||
17 | |||
18 | #define K1 0x5A827999L /* Rounds 0-19: sqrt(2) * 2^30 */ | ||
19 | #define K2 0x6ED9EBA1L /* Rounds 20-39: sqrt(3) * 2^30 */ | ||
20 | #define K3 0x8F1BBCDCL /* Rounds 40-59: sqrt(5) * 2^30 */ | ||
21 | #define K4 0xCA62C1D6L /* Rounds 60-79: sqrt(10) * 2^30 */ | ||
22 | |||
23 | /* | ||
24 | * sha_transform: single block SHA1 transform | ||
25 | * | ||
26 | * @digest: 160 bit digest to update | ||
27 | * @data: 512 bits of data to hash | ||
28 | * @W: 80 words of workspace (see note) | ||
29 | * | ||
30 | * This function generates a SHA1 digest for a single 512-bit block. | ||
31 | * Be warned, it does not handle padding and message digest, do not | ||
32 | * confuse it with the full FIPS 180-1 digest algorithm for variable | ||
33 | * length messages. | ||
34 | * | ||
35 | * Note: If the hash is security sensitive, the caller should be sure | ||
36 | * to clear the workspace. This is left to the caller to avoid | ||
37 | * unnecessary clears between chained hashing operations. | ||
38 | */ | ||
39 | void sha_transform(__u32 *digest, const char *in, __u32 *W) | ||
40 | { | ||
41 | __u32 a, b, c, d, e, t, i; | ||
42 | |||
43 | for (i = 0; i < 16; i++) | ||
44 | W[i] = be32_to_cpu(((const __u32 *)in)[i]); | ||
45 | |||
46 | for (i = 0; i < 64; i++) | ||
47 | W[i+16] = rol32(W[i+13] ^ W[i+8] ^ W[i+2] ^ W[i], 1); | ||
48 | |||
49 | a = digest[0]; | ||
50 | b = digest[1]; | ||
51 | c = digest[2]; | ||
52 | d = digest[3]; | ||
53 | e = digest[4]; | ||
54 | |||
55 | for (i = 0; i < 20; i++) { | ||
56 | t = f1(b, c, d) + K1 + rol32(a, 5) + e + W[i]; | ||
57 | e = d; d = c; c = rol32(b, 30); b = a; a = t; | ||
58 | } | ||
59 | |||
60 | for (; i < 40; i ++) { | ||
61 | t = f2(b, c, d) + K2 + rol32(a, 5) + e + W[i]; | ||
62 | e = d; d = c; c = rol32(b, 30); b = a; a = t; | ||
63 | } | ||
64 | |||
65 | for (; i < 60; i ++) { | ||
66 | t = f3(b, c, d) + K3 + rol32(a, 5) + e + W[i]; | ||
67 | e = d; d = c; c = rol32(b, 30); b = a; a = t; | ||
68 | } | ||
69 | |||
70 | for (; i < 80; i ++) { | ||
71 | t = f2(b, c, d) + K4 + rol32(a, 5) + e + W[i]; | ||
72 | e = d; d = c; c = rol32(b, 30); b = a; a = t; | ||
73 | } | ||
74 | |||
75 | digest[0] += a; | ||
76 | digest[1] += b; | ||
77 | digest[2] += c; | ||
78 | digest[3] += d; | ||
79 | digest[4] += e; | ||
80 | } | ||
81 | EXPORT_SYMBOL(sha_transform); | ||
82 | |||
83 | /* | ||
84 | * sha_init: initialize the vectors for a SHA1 digest | ||
85 | * | ||
86 | * @buf: vector to initialize | ||
87 | */ | ||
88 | void sha_init(__u32 *buf) | ||
89 | { | ||
90 | buf[0] = 0x67452301; | ||
91 | buf[1] = 0xefcdab89; | ||
92 | buf[2] = 0x98badcfe; | ||
93 | buf[3] = 0x10325476; | ||
94 | buf[4] = 0xc3d2e1f0; | ||
95 | } | ||
96 | |||
diff --git a/lib/sort.c b/lib/sort.c new file mode 100644 index 000000000000..ea3caedeabdb --- /dev/null +++ b/lib/sort.c | |||
@@ -0,0 +1,119 @@ | |||
1 | /* | ||
2 | * A fast, small, non-recursive O(nlog n) sort for the Linux kernel | ||
3 | * | ||
4 | * Jan 23 2005 Matt Mackall <mpm@selenic.com> | ||
5 | */ | ||
6 | |||
7 | #include <linux/kernel.h> | ||
8 | #include <linux/module.h> | ||
9 | |||
10 | void u32_swap(void *a, void *b, int size) | ||
11 | { | ||
12 | u32 t = *(u32 *)a; | ||
13 | *(u32 *)a = *(u32 *)b; | ||
14 | *(u32 *)b = t; | ||
15 | } | ||
16 | |||
17 | void generic_swap(void *a, void *b, int size) | ||
18 | { | ||
19 | char t; | ||
20 | |||
21 | do { | ||
22 | t = *(char *)a; | ||
23 | *(char *)a++ = *(char *)b; | ||
24 | *(char *)b++ = t; | ||
25 | } while (--size > 0); | ||
26 | } | ||
27 | |||
28 | /* | ||
29 | * sort - sort an array of elements | ||
30 | * @base: pointer to data to sort | ||
31 | * @num: number of elements | ||
32 | * @size: size of each element | ||
33 | * @cmp: pointer to comparison function | ||
34 | * @swap: pointer to swap function or NULL | ||
35 | * | ||
36 | * This function does a heapsort on the given array. You may provide a | ||
37 | * swap function optimized to your element type. | ||
38 | * | ||
39 | * Sorting time is O(n log n) both on average and worst-case. While | ||
40 | * qsort is about 20% faster on average, it suffers from exploitable | ||
41 | * O(n*n) worst-case behavior and extra memory requirements that make | ||
42 | * it less suitable for kernel use. | ||
43 | */ | ||
44 | |||
45 | void sort(void *base, size_t num, size_t size, | ||
46 | int (*cmp)(const void *, const void *), | ||
47 | void (*swap)(void *, void *, int size)) | ||
48 | { | ||
49 | /* pre-scale counters for performance */ | ||
50 | int i = (num/2) * size, n = num * size, c, r; | ||
51 | |||
52 | if (!swap) | ||
53 | swap = (size == 4 ? u32_swap : generic_swap); | ||
54 | |||
55 | /* heapify */ | ||
56 | for ( ; i >= 0; i -= size) { | ||
57 | for (r = i; r * 2 < n; r = c) { | ||
58 | c = r * 2; | ||
59 | if (c < n - size && cmp(base + c, base + c + size) < 0) | ||
60 | c += size; | ||
61 | if (cmp(base + r, base + c) >= 0) | ||
62 | break; | ||
63 | swap(base + r, base + c, size); | ||
64 | } | ||
65 | } | ||
66 | |||
67 | /* sort */ | ||
68 | for (i = n - size; i >= 0; i -= size) { | ||
69 | swap(base, base + i, size); | ||
70 | for (r = 0; r * 2 < i; r = c) { | ||
71 | c = r * 2; | ||
72 | if (c < i - size && cmp(base + c, base + c + size) < 0) | ||
73 | c += size; | ||
74 | if (cmp(base + r, base + c) >= 0) | ||
75 | break; | ||
76 | swap(base + r, base + c, size); | ||
77 | } | ||
78 | } | ||
79 | } | ||
80 | |||
81 | EXPORT_SYMBOL(sort); | ||
82 | |||
83 | #if 0 | ||
84 | /* a simple boot-time regression test */ | ||
85 | |||
86 | int cmpint(const void *a, const void *b) | ||
87 | { | ||
88 | return *(int *)a - *(int *)b; | ||
89 | } | ||
90 | |||
91 | static int sort_test(void) | ||
92 | { | ||
93 | int *a, i, r = 0; | ||
94 | |||
95 | a = kmalloc(1000 * sizeof(int), GFP_KERNEL); | ||
96 | BUG_ON(!a); | ||
97 | |||
98 | printk("testing sort()\n"); | ||
99 | |||
100 | for (i = 0; i < 1000; i++) { | ||
101 | r = (r * 725861) % 6599; | ||
102 | a[i] = r; | ||
103 | } | ||
104 | |||
105 | sort(a, 1000, sizeof(int), cmpint, NULL); | ||
106 | |||
107 | for (i = 0; i < 999; i++) | ||
108 | if (a[i] > a[i+1]) { | ||
109 | printk("sort() failed!\n"); | ||
110 | break; | ||
111 | } | ||
112 | |||
113 | kfree(a); | ||
114 | |||
115 | return 0; | ||
116 | } | ||
117 | |||
118 | module_init(sort_test); | ||
119 | #endif | ||
diff --git a/lib/string.c b/lib/string.c new file mode 100644 index 000000000000..4bb93ad23c60 --- /dev/null +++ b/lib/string.c | |||
@@ -0,0 +1,601 @@ | |||
1 | /* | ||
2 | * linux/lib/string.c | ||
3 | * | ||
4 | * Copyright (C) 1991, 1992 Linus Torvalds | ||
5 | */ | ||
6 | |||
7 | /* | ||
8 | * stupid library routines.. The optimized versions should generally be found | ||
9 | * as inline code in <asm-xx/string.h> | ||
10 | * | ||
11 | * These are buggy as well.. | ||
12 | * | ||
13 | * * Fri Jun 25 1999, Ingo Oeser <ioe@informatik.tu-chemnitz.de> | ||
14 | * - Added strsep() which will replace strtok() soon (because strsep() is | ||
15 | * reentrant and should be faster). Use only strsep() in new code, please. | ||
16 | * | ||
17 | * * Sat Feb 09 2002, Jason Thomas <jason@topic.com.au>, | ||
18 | * Matthew Hawkins <matt@mh.dropbear.id.au> | ||
19 | * - Kissed strtok() goodbye | ||
20 | */ | ||
21 | |||
22 | #include <linux/types.h> | ||
23 | #include <linux/string.h> | ||
24 | #include <linux/ctype.h> | ||
25 | #include <linux/module.h> | ||
26 | |||
27 | #ifndef __HAVE_ARCH_STRNICMP | ||
28 | /** | ||
29 | * strnicmp - Case insensitive, length-limited string comparison | ||
30 | * @s1: One string | ||
31 | * @s2: The other string | ||
32 | * @len: the maximum number of characters to compare | ||
33 | */ | ||
34 | int strnicmp(const char *s1, const char *s2, size_t len) | ||
35 | { | ||
36 | /* Yes, Virginia, it had better be unsigned */ | ||
37 | unsigned char c1, c2; | ||
38 | |||
39 | c1 = 0; c2 = 0; | ||
40 | if (len) { | ||
41 | do { | ||
42 | c1 = *s1; c2 = *s2; | ||
43 | s1++; s2++; | ||
44 | if (!c1) | ||
45 | break; | ||
46 | if (!c2) | ||
47 | break; | ||
48 | if (c1 == c2) | ||
49 | continue; | ||
50 | c1 = tolower(c1); | ||
51 | c2 = tolower(c2); | ||
52 | if (c1 != c2) | ||
53 | break; | ||
54 | } while (--len); | ||
55 | } | ||
56 | return (int)c1 - (int)c2; | ||
57 | } | ||
58 | |||
59 | EXPORT_SYMBOL(strnicmp); | ||
60 | #endif | ||
61 | |||
62 | #ifndef __HAVE_ARCH_STRCPY | ||
63 | /** | ||
64 | * strcpy - Copy a %NUL terminated string | ||
65 | * @dest: Where to copy the string to | ||
66 | * @src: Where to copy the string from | ||
67 | */ | ||
68 | char * strcpy(char * dest,const char *src) | ||
69 | { | ||
70 | char *tmp = dest; | ||
71 | |||
72 | while ((*dest++ = *src++) != '\0') | ||
73 | /* nothing */; | ||
74 | return tmp; | ||
75 | } | ||
76 | EXPORT_SYMBOL(strcpy); | ||
77 | #endif | ||
78 | |||
79 | #ifndef __HAVE_ARCH_STRNCPY | ||
80 | /** | ||
81 | * strncpy - Copy a length-limited, %NUL-terminated string | ||
82 | * @dest: Where to copy the string to | ||
83 | * @src: Where to copy the string from | ||
84 | * @count: The maximum number of bytes to copy | ||
85 | * | ||
86 | * The result is not %NUL-terminated if the source exceeds | ||
87 | * @count bytes. | ||
88 | */ | ||
89 | char * strncpy(char * dest,const char *src,size_t count) | ||
90 | { | ||
91 | char *tmp = dest; | ||
92 | |||
93 | while (count) { | ||
94 | if ((*tmp = *src) != 0) src++; | ||
95 | tmp++; | ||
96 | count--; | ||
97 | } | ||
98 | return dest; | ||
99 | } | ||
100 | EXPORT_SYMBOL(strncpy); | ||
101 | #endif | ||
102 | |||
103 | #ifndef __HAVE_ARCH_STRLCPY | ||
104 | /** | ||
105 | * strlcpy - Copy a %NUL terminated string into a sized buffer | ||
106 | * @dest: Where to copy the string to | ||
107 | * @src: Where to copy the string from | ||
108 | * @size: size of destination buffer | ||
109 | * | ||
110 | * Compatible with *BSD: the result is always a valid | ||
111 | * NUL-terminated string that fits in the buffer (unless, | ||
112 | * of course, the buffer size is zero). It does not pad | ||
113 | * out the result like strncpy() does. | ||
114 | */ | ||
115 | size_t strlcpy(char *dest, const char *src, size_t size) | ||
116 | { | ||
117 | size_t ret = strlen(src); | ||
118 | |||
119 | if (size) { | ||
120 | size_t len = (ret >= size) ? size-1 : ret; | ||
121 | memcpy(dest, src, len); | ||
122 | dest[len] = '\0'; | ||
123 | } | ||
124 | return ret; | ||
125 | } | ||
126 | EXPORT_SYMBOL(strlcpy); | ||
127 | #endif | ||
128 | |||
129 | #ifndef __HAVE_ARCH_STRCAT | ||
130 | /** | ||
131 | * strcat - Append one %NUL-terminated string to another | ||
132 | * @dest: The string to be appended to | ||
133 | * @src: The string to append to it | ||
134 | */ | ||
135 | char * strcat(char * dest, const char * src) | ||
136 | { | ||
137 | char *tmp = dest; | ||
138 | |||
139 | while (*dest) | ||
140 | dest++; | ||
141 | while ((*dest++ = *src++) != '\0') | ||
142 | ; | ||
143 | |||
144 | return tmp; | ||
145 | } | ||
146 | EXPORT_SYMBOL(strcat); | ||
147 | #endif | ||
148 | |||
149 | #ifndef __HAVE_ARCH_STRNCAT | ||
150 | /** | ||
151 | * strncat - Append a length-limited, %NUL-terminated string to another | ||
152 | * @dest: The string to be appended to | ||
153 | * @src: The string to append to it | ||
154 | * @count: The maximum numbers of bytes to copy | ||
155 | * | ||
156 | * Note that in contrast to strncpy, strncat ensures the result is | ||
157 | * terminated. | ||
158 | */ | ||
159 | char * strncat(char *dest, const char *src, size_t count) | ||
160 | { | ||
161 | char *tmp = dest; | ||
162 | |||
163 | if (count) { | ||
164 | while (*dest) | ||
165 | dest++; | ||
166 | while ((*dest++ = *src++) != 0) { | ||
167 | if (--count == 0) { | ||
168 | *dest = '\0'; | ||
169 | break; | ||
170 | } | ||
171 | } | ||
172 | } | ||
173 | |||
174 | return tmp; | ||
175 | } | ||
176 | EXPORT_SYMBOL(strncat); | ||
177 | #endif | ||
178 | |||
179 | #ifndef __HAVE_ARCH_STRLCAT | ||
180 | /** | ||
181 | * strlcat - Append a length-limited, %NUL-terminated string to another | ||
182 | * @dest: The string to be appended to | ||
183 | * @src: The string to append to it | ||
184 | * @count: The size of the destination buffer. | ||
185 | */ | ||
186 | size_t strlcat(char *dest, const char *src, size_t count) | ||
187 | { | ||
188 | size_t dsize = strlen(dest); | ||
189 | size_t len = strlen(src); | ||
190 | size_t res = dsize + len; | ||
191 | |||
192 | /* This would be a bug */ | ||
193 | BUG_ON(dsize >= count); | ||
194 | |||
195 | dest += dsize; | ||
196 | count -= dsize; | ||
197 | if (len >= count) | ||
198 | len = count-1; | ||
199 | memcpy(dest, src, len); | ||
200 | dest[len] = 0; | ||
201 | return res; | ||
202 | } | ||
203 | EXPORT_SYMBOL(strlcat); | ||
204 | #endif | ||
205 | |||
206 | #ifndef __HAVE_ARCH_STRCMP | ||
207 | /** | ||
208 | * strcmp - Compare two strings | ||
209 | * @cs: One string | ||
210 | * @ct: Another string | ||
211 | */ | ||
212 | int strcmp(const char * cs,const char * ct) | ||
213 | { | ||
214 | register signed char __res; | ||
215 | |||
216 | while (1) { | ||
217 | if ((__res = *cs - *ct++) != 0 || !*cs++) | ||
218 | break; | ||
219 | } | ||
220 | |||
221 | return __res; | ||
222 | } | ||
223 | EXPORT_SYMBOL(strcmp); | ||
224 | #endif | ||
225 | |||
226 | #ifndef __HAVE_ARCH_STRNCMP | ||
227 | /** | ||
228 | * strncmp - Compare two length-limited strings | ||
229 | * @cs: One string | ||
230 | * @ct: Another string | ||
231 | * @count: The maximum number of bytes to compare | ||
232 | */ | ||
233 | int strncmp(const char * cs,const char * ct,size_t count) | ||
234 | { | ||
235 | register signed char __res = 0; | ||
236 | |||
237 | while (count) { | ||
238 | if ((__res = *cs - *ct++) != 0 || !*cs++) | ||
239 | break; | ||
240 | count--; | ||
241 | } | ||
242 | |||
243 | return __res; | ||
244 | } | ||
245 | EXPORT_SYMBOL(strncmp); | ||
246 | #endif | ||
247 | |||
248 | #ifndef __HAVE_ARCH_STRCHR | ||
249 | /** | ||
250 | * strchr - Find the first occurrence of a character in a string | ||
251 | * @s: The string to be searched | ||
252 | * @c: The character to search for | ||
253 | */ | ||
254 | char * strchr(const char * s, int c) | ||
255 | { | ||
256 | for(; *s != (char) c; ++s) | ||
257 | if (*s == '\0') | ||
258 | return NULL; | ||
259 | return (char *) s; | ||
260 | } | ||
261 | EXPORT_SYMBOL(strchr); | ||
262 | #endif | ||
263 | |||
264 | #ifndef __HAVE_ARCH_STRRCHR | ||
265 | /** | ||
266 | * strrchr - Find the last occurrence of a character in a string | ||
267 | * @s: The string to be searched | ||
268 | * @c: The character to search for | ||
269 | */ | ||
270 | char * strrchr(const char * s, int c) | ||
271 | { | ||
272 | const char *p = s + strlen(s); | ||
273 | do { | ||
274 | if (*p == (char)c) | ||
275 | return (char *)p; | ||
276 | } while (--p >= s); | ||
277 | return NULL; | ||
278 | } | ||
279 | EXPORT_SYMBOL(strrchr); | ||
280 | #endif | ||
281 | |||
282 | #ifndef __HAVE_ARCH_STRNCHR | ||
283 | /** | ||
284 | * strnchr - Find a character in a length limited string | ||
285 | * @s: The string to be searched | ||
286 | * @count: The number of characters to be searched | ||
287 | * @c: The character to search for | ||
288 | */ | ||
289 | char *strnchr(const char *s, size_t count, int c) | ||
290 | { | ||
291 | for (; count-- && *s != '\0'; ++s) | ||
292 | if (*s == (char) c) | ||
293 | return (char *) s; | ||
294 | return NULL; | ||
295 | } | ||
296 | EXPORT_SYMBOL(strnchr); | ||
297 | #endif | ||
298 | |||
299 | #ifndef __HAVE_ARCH_STRLEN | ||
300 | /** | ||
301 | * strlen - Find the length of a string | ||
302 | * @s: The string to be sized | ||
303 | */ | ||
304 | size_t strlen(const char * s) | ||
305 | { | ||
306 | const char *sc; | ||
307 | |||
308 | for (sc = s; *sc != '\0'; ++sc) | ||
309 | /* nothing */; | ||
310 | return sc - s; | ||
311 | } | ||
312 | EXPORT_SYMBOL(strlen); | ||
313 | #endif | ||
314 | |||
315 | #ifndef __HAVE_ARCH_STRNLEN | ||
316 | /** | ||
317 | * strnlen - Find the length of a length-limited string | ||
318 | * @s: The string to be sized | ||
319 | * @count: The maximum number of bytes to search | ||
320 | */ | ||
321 | size_t strnlen(const char * s, size_t count) | ||
322 | { | ||
323 | const char *sc; | ||
324 | |||
325 | for (sc = s; count-- && *sc != '\0'; ++sc) | ||
326 | /* nothing */; | ||
327 | return sc - s; | ||
328 | } | ||
329 | EXPORT_SYMBOL(strnlen); | ||
330 | #endif | ||
331 | |||
332 | #ifndef __HAVE_ARCH_STRSPN | ||
333 | /** | ||
334 | * strspn - Calculate the length of the initial substring of @s which only | ||
335 | * contain letters in @accept | ||
336 | * @s: The string to be searched | ||
337 | * @accept: The string to search for | ||
338 | */ | ||
339 | size_t strspn(const char *s, const char *accept) | ||
340 | { | ||
341 | const char *p; | ||
342 | const char *a; | ||
343 | size_t count = 0; | ||
344 | |||
345 | for (p = s; *p != '\0'; ++p) { | ||
346 | for (a = accept; *a != '\0'; ++a) { | ||
347 | if (*p == *a) | ||
348 | break; | ||
349 | } | ||
350 | if (*a == '\0') | ||
351 | return count; | ||
352 | ++count; | ||
353 | } | ||
354 | |||
355 | return count; | ||
356 | } | ||
357 | |||
358 | EXPORT_SYMBOL(strspn); | ||
359 | #endif | ||
360 | |||
361 | /** | ||
362 | * strcspn - Calculate the length of the initial substring of @s which does | ||
363 | * not contain letters in @reject | ||
364 | * @s: The string to be searched | ||
365 | * @reject: The string to avoid | ||
366 | */ | ||
367 | size_t strcspn(const char *s, const char *reject) | ||
368 | { | ||
369 | const char *p; | ||
370 | const char *r; | ||
371 | size_t count = 0; | ||
372 | |||
373 | for (p = s; *p != '\0'; ++p) { | ||
374 | for (r = reject; *r != '\0'; ++r) { | ||
375 | if (*p == *r) | ||
376 | return count; | ||
377 | } | ||
378 | ++count; | ||
379 | } | ||
380 | |||
381 | return count; | ||
382 | } | ||
383 | EXPORT_SYMBOL(strcspn); | ||
384 | |||
385 | #ifndef __HAVE_ARCH_STRPBRK | ||
386 | /** | ||
387 | * strpbrk - Find the first occurrence of a set of characters | ||
388 | * @cs: The string to be searched | ||
389 | * @ct: The characters to search for | ||
390 | */ | ||
391 | char * strpbrk(const char * cs,const char * ct) | ||
392 | { | ||
393 | const char *sc1,*sc2; | ||
394 | |||
395 | for( sc1 = cs; *sc1 != '\0'; ++sc1) { | ||
396 | for( sc2 = ct; *sc2 != '\0'; ++sc2) { | ||
397 | if (*sc1 == *sc2) | ||
398 | return (char *) sc1; | ||
399 | } | ||
400 | } | ||
401 | return NULL; | ||
402 | } | ||
403 | EXPORT_SYMBOL(strpbrk); | ||
404 | #endif | ||
405 | |||
406 | #ifndef __HAVE_ARCH_STRSEP | ||
407 | /** | ||
408 | * strsep - Split a string into tokens | ||
409 | * @s: The string to be searched | ||
410 | * @ct: The characters to search for | ||
411 | * | ||
412 | * strsep() updates @s to point after the token, ready for the next call. | ||
413 | * | ||
414 | * It returns empty tokens, too, behaving exactly like the libc function | ||
415 | * of that name. In fact, it was stolen from glibc2 and de-fancy-fied. | ||
416 | * Same semantics, slimmer shape. ;) | ||
417 | */ | ||
418 | char * strsep(char **s, const char *ct) | ||
419 | { | ||
420 | char *sbegin = *s, *end; | ||
421 | |||
422 | if (sbegin == NULL) | ||
423 | return NULL; | ||
424 | |||
425 | end = strpbrk(sbegin, ct); | ||
426 | if (end) | ||
427 | *end++ = '\0'; | ||
428 | *s = end; | ||
429 | |||
430 | return sbegin; | ||
431 | } | ||
432 | |||
433 | EXPORT_SYMBOL(strsep); | ||
434 | #endif | ||
435 | |||
436 | #ifndef __HAVE_ARCH_MEMSET | ||
437 | /** | ||
438 | * memset - Fill a region of memory with the given value | ||
439 | * @s: Pointer to the start of the area. | ||
440 | * @c: The byte to fill the area with | ||
441 | * @count: The size of the area. | ||
442 | * | ||
443 | * Do not use memset() to access IO space, use memset_io() instead. | ||
444 | */ | ||
445 | void * memset(void * s,int c,size_t count) | ||
446 | { | ||
447 | char *xs = (char *) s; | ||
448 | |||
449 | while (count--) | ||
450 | *xs++ = c; | ||
451 | |||
452 | return s; | ||
453 | } | ||
454 | EXPORT_SYMBOL(memset); | ||
455 | #endif | ||
456 | |||
457 | #ifndef __HAVE_ARCH_MEMCPY | ||
458 | /** | ||
459 | * memcpy - Copy one area of memory to another | ||
460 | * @dest: Where to copy to | ||
461 | * @src: Where to copy from | ||
462 | * @count: The size of the area. | ||
463 | * | ||
464 | * You should not use this function to access IO space, use memcpy_toio() | ||
465 | * or memcpy_fromio() instead. | ||
466 | */ | ||
467 | void * memcpy(void * dest,const void *src,size_t count) | ||
468 | { | ||
469 | char *tmp = (char *) dest, *s = (char *) src; | ||
470 | |||
471 | while (count--) | ||
472 | *tmp++ = *s++; | ||
473 | |||
474 | return dest; | ||
475 | } | ||
476 | EXPORT_SYMBOL(memcpy); | ||
477 | #endif | ||
478 | |||
479 | #ifndef __HAVE_ARCH_MEMMOVE | ||
480 | /** | ||
481 | * memmove - Copy one area of memory to another | ||
482 | * @dest: Where to copy to | ||
483 | * @src: Where to copy from | ||
484 | * @count: The size of the area. | ||
485 | * | ||
486 | * Unlike memcpy(), memmove() copes with overlapping areas. | ||
487 | */ | ||
488 | void * memmove(void * dest,const void *src,size_t count) | ||
489 | { | ||
490 | char *tmp, *s; | ||
491 | |||
492 | if (dest <= src) { | ||
493 | tmp = (char *) dest; | ||
494 | s = (char *) src; | ||
495 | while (count--) | ||
496 | *tmp++ = *s++; | ||
497 | } | ||
498 | else { | ||
499 | tmp = (char *) dest + count; | ||
500 | s = (char *) src + count; | ||
501 | while (count--) | ||
502 | *--tmp = *--s; | ||
503 | } | ||
504 | |||
505 | return dest; | ||
506 | } | ||
507 | EXPORT_SYMBOL(memmove); | ||
508 | #endif | ||
509 | |||
510 | #ifndef __HAVE_ARCH_MEMCMP | ||
511 | /** | ||
512 | * memcmp - Compare two areas of memory | ||
513 | * @cs: One area of memory | ||
514 | * @ct: Another area of memory | ||
515 | * @count: The size of the area. | ||
516 | */ | ||
517 | int memcmp(const void * cs,const void * ct,size_t count) | ||
518 | { | ||
519 | const unsigned char *su1, *su2; | ||
520 | int res = 0; | ||
521 | |||
522 | for( su1 = cs, su2 = ct; 0 < count; ++su1, ++su2, count--) | ||
523 | if ((res = *su1 - *su2) != 0) | ||
524 | break; | ||
525 | return res; | ||
526 | } | ||
527 | EXPORT_SYMBOL(memcmp); | ||
528 | #endif | ||
529 | |||
530 | #ifndef __HAVE_ARCH_MEMSCAN | ||
531 | /** | ||
532 | * memscan - Find a character in an area of memory. | ||
533 | * @addr: The memory area | ||
534 | * @c: The byte to search for | ||
535 | * @size: The size of the area. | ||
536 | * | ||
537 | * returns the address of the first occurrence of @c, or 1 byte past | ||
538 | * the area if @c is not found | ||
539 | */ | ||
540 | void * memscan(void * addr, int c, size_t size) | ||
541 | { | ||
542 | unsigned char * p = (unsigned char *) addr; | ||
543 | |||
544 | while (size) { | ||
545 | if (*p == c) | ||
546 | return (void *) p; | ||
547 | p++; | ||
548 | size--; | ||
549 | } | ||
550 | return (void *) p; | ||
551 | } | ||
552 | EXPORT_SYMBOL(memscan); | ||
553 | #endif | ||
554 | |||
555 | #ifndef __HAVE_ARCH_STRSTR | ||
556 | /** | ||
557 | * strstr - Find the first substring in a %NUL terminated string | ||
558 | * @s1: The string to be searched | ||
559 | * @s2: The string to search for | ||
560 | */ | ||
561 | char * strstr(const char * s1,const char * s2) | ||
562 | { | ||
563 | int l1, l2; | ||
564 | |||
565 | l2 = strlen(s2); | ||
566 | if (!l2) | ||
567 | return (char *) s1; | ||
568 | l1 = strlen(s1); | ||
569 | while (l1 >= l2) { | ||
570 | l1--; | ||
571 | if (!memcmp(s1,s2,l2)) | ||
572 | return (char *) s1; | ||
573 | s1++; | ||
574 | } | ||
575 | return NULL; | ||
576 | } | ||
577 | EXPORT_SYMBOL(strstr); | ||
578 | #endif | ||
579 | |||
580 | #ifndef __HAVE_ARCH_MEMCHR | ||
581 | /** | ||
582 | * memchr - Find a character in an area of memory. | ||
583 | * @s: The memory area | ||
584 | * @c: The byte to search for | ||
585 | * @n: The size of the area. | ||
586 | * | ||
587 | * returns the address of the first occurrence of @c, or %NULL | ||
588 | * if @c is not found | ||
589 | */ | ||
590 | void *memchr(const void *s, int c, size_t n) | ||
591 | { | ||
592 | const unsigned char *p = s; | ||
593 | while (n-- != 0) { | ||
594 | if ((unsigned char)c == *p++) { | ||
595 | return (void *)(p-1); | ||
596 | } | ||
597 | } | ||
598 | return NULL; | ||
599 | } | ||
600 | EXPORT_SYMBOL(memchr); | ||
601 | #endif | ||
diff --git a/lib/vsprintf.c b/lib/vsprintf.c new file mode 100644 index 000000000000..a9bda0a361f3 --- /dev/null +++ b/lib/vsprintf.c | |||
@@ -0,0 +1,846 @@ | |||
1 | /* | ||
2 | * linux/lib/vsprintf.c | ||
3 | * | ||
4 | * Copyright (C) 1991, 1992 Linus Torvalds | ||
5 | */ | ||
6 | |||
7 | /* vsprintf.c -- Lars Wirzenius & Linus Torvalds. */ | ||
8 | /* | ||
9 | * Wirzenius wrote this portably, Torvalds fucked it up :-) | ||
10 | */ | ||
11 | |||
12 | /* | ||
13 | * Fri Jul 13 2001 Crutcher Dunnavant <crutcher+kernel@datastacks.com> | ||
14 | * - changed to provide snprintf and vsnprintf functions | ||
15 | * So Feb 1 16:51:32 CET 2004 Juergen Quade <quade@hsnr.de> | ||
16 | * - scnprintf and vscnprintf | ||
17 | */ | ||
18 | |||
19 | #include <stdarg.h> | ||
20 | #include <linux/module.h> | ||
21 | #include <linux/types.h> | ||
22 | #include <linux/string.h> | ||
23 | #include <linux/ctype.h> | ||
24 | #include <linux/kernel.h> | ||
25 | |||
26 | #include <asm/div64.h> | ||
27 | |||
28 | /** | ||
29 | * simple_strtoul - convert a string to an unsigned long | ||
30 | * @cp: The start of the string | ||
31 | * @endp: A pointer to the end of the parsed string will be placed here | ||
32 | * @base: The number base to use | ||
33 | */ | ||
34 | unsigned long simple_strtoul(const char *cp,char **endp,unsigned int base) | ||
35 | { | ||
36 | unsigned long result = 0,value; | ||
37 | |||
38 | if (!base) { | ||
39 | base = 10; | ||
40 | if (*cp == '0') { | ||
41 | base = 8; | ||
42 | cp++; | ||
43 | if ((toupper(*cp) == 'X') && isxdigit(cp[1])) { | ||
44 | cp++; | ||
45 | base = 16; | ||
46 | } | ||
47 | } | ||
48 | } else if (base == 16) { | ||
49 | if (cp[0] == '0' && toupper(cp[1]) == 'X') | ||
50 | cp += 2; | ||
51 | } | ||
52 | while (isxdigit(*cp) && | ||
53 | (value = isdigit(*cp) ? *cp-'0' : toupper(*cp)-'A'+10) < base) { | ||
54 | result = result*base + value; | ||
55 | cp++; | ||
56 | } | ||
57 | if (endp) | ||
58 | *endp = (char *)cp; | ||
59 | return result; | ||
60 | } | ||
61 | |||
62 | EXPORT_SYMBOL(simple_strtoul); | ||
63 | |||
64 | /** | ||
65 | * simple_strtol - convert a string to a signed long | ||
66 | * @cp: The start of the string | ||
67 | * @endp: A pointer to the end of the parsed string will be placed here | ||
68 | * @base: The number base to use | ||
69 | */ | ||
70 | long simple_strtol(const char *cp,char **endp,unsigned int base) | ||
71 | { | ||
72 | if(*cp=='-') | ||
73 | return -simple_strtoul(cp+1,endp,base); | ||
74 | return simple_strtoul(cp,endp,base); | ||
75 | } | ||
76 | |||
77 | EXPORT_SYMBOL(simple_strtol); | ||
78 | |||
79 | /** | ||
80 | * simple_strtoull - convert a string to an unsigned long long | ||
81 | * @cp: The start of the string | ||
82 | * @endp: A pointer to the end of the parsed string will be placed here | ||
83 | * @base: The number base to use | ||
84 | */ | ||
85 | unsigned long long simple_strtoull(const char *cp,char **endp,unsigned int base) | ||
86 | { | ||
87 | unsigned long long result = 0,value; | ||
88 | |||
89 | if (!base) { | ||
90 | base = 10; | ||
91 | if (*cp == '0') { | ||
92 | base = 8; | ||
93 | cp++; | ||
94 | if ((toupper(*cp) == 'X') && isxdigit(cp[1])) { | ||
95 | cp++; | ||
96 | base = 16; | ||
97 | } | ||
98 | } | ||
99 | } else if (base == 16) { | ||
100 | if (cp[0] == '0' && toupper(cp[1]) == 'X') | ||
101 | cp += 2; | ||
102 | } | ||
103 | while (isxdigit(*cp) && (value = isdigit(*cp) ? *cp-'0' : (islower(*cp) | ||
104 | ? toupper(*cp) : *cp)-'A'+10) < base) { | ||
105 | result = result*base + value; | ||
106 | cp++; | ||
107 | } | ||
108 | if (endp) | ||
109 | *endp = (char *)cp; | ||
110 | return result; | ||
111 | } | ||
112 | |||
113 | EXPORT_SYMBOL(simple_strtoull); | ||
114 | |||
115 | /** | ||
116 | * simple_strtoll - convert a string to a signed long long | ||
117 | * @cp: The start of the string | ||
118 | * @endp: A pointer to the end of the parsed string will be placed here | ||
119 | * @base: The number base to use | ||
120 | */ | ||
121 | long long simple_strtoll(const char *cp,char **endp,unsigned int base) | ||
122 | { | ||
123 | if(*cp=='-') | ||
124 | return -simple_strtoull(cp+1,endp,base); | ||
125 | return simple_strtoull(cp,endp,base); | ||
126 | } | ||
127 | |||
128 | static int skip_atoi(const char **s) | ||
129 | { | ||
130 | int i=0; | ||
131 | |||
132 | while (isdigit(**s)) | ||
133 | i = i*10 + *((*s)++) - '0'; | ||
134 | return i; | ||
135 | } | ||
136 | |||
137 | #define ZEROPAD 1 /* pad with zero */ | ||
138 | #define SIGN 2 /* unsigned/signed long */ | ||
139 | #define PLUS 4 /* show plus */ | ||
140 | #define SPACE 8 /* space if plus */ | ||
141 | #define LEFT 16 /* left justified */ | ||
142 | #define SPECIAL 32 /* 0x */ | ||
143 | #define LARGE 64 /* use 'ABCDEF' instead of 'abcdef' */ | ||
144 | |||
145 | static char * number(char * buf, char * end, unsigned long long num, int base, int size, int precision, int type) | ||
146 | { | ||
147 | char c,sign,tmp[66]; | ||
148 | const char *digits; | ||
149 | static const char small_digits[] = "0123456789abcdefghijklmnopqrstuvwxyz"; | ||
150 | static const char large_digits[] = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ"; | ||
151 | int i; | ||
152 | |||
153 | digits = (type & LARGE) ? large_digits : small_digits; | ||
154 | if (type & LEFT) | ||
155 | type &= ~ZEROPAD; | ||
156 | if (base < 2 || base > 36) | ||
157 | return NULL; | ||
158 | c = (type & ZEROPAD) ? '0' : ' '; | ||
159 | sign = 0; | ||
160 | if (type & SIGN) { | ||
161 | if ((signed long long) num < 0) { | ||
162 | sign = '-'; | ||
163 | num = - (signed long long) num; | ||
164 | size--; | ||
165 | } else if (type & PLUS) { | ||
166 | sign = '+'; | ||
167 | size--; | ||
168 | } else if (type & SPACE) { | ||
169 | sign = ' '; | ||
170 | size--; | ||
171 | } | ||
172 | } | ||
173 | if (type & SPECIAL) { | ||
174 | if (base == 16) | ||
175 | size -= 2; | ||
176 | else if (base == 8) | ||
177 | size--; | ||
178 | } | ||
179 | i = 0; | ||
180 | if (num == 0) | ||
181 | tmp[i++]='0'; | ||
182 | else while (num != 0) | ||
183 | tmp[i++] = digits[do_div(num,base)]; | ||
184 | if (i > precision) | ||
185 | precision = i; | ||
186 | size -= precision; | ||
187 | if (!(type&(ZEROPAD+LEFT))) { | ||
188 | while(size-->0) { | ||
189 | if (buf <= end) | ||
190 | *buf = ' '; | ||
191 | ++buf; | ||
192 | } | ||
193 | } | ||
194 | if (sign) { | ||
195 | if (buf <= end) | ||
196 | *buf = sign; | ||
197 | ++buf; | ||
198 | } | ||
199 | if (type & SPECIAL) { | ||
200 | if (base==8) { | ||
201 | if (buf <= end) | ||
202 | *buf = '0'; | ||
203 | ++buf; | ||
204 | } else if (base==16) { | ||
205 | if (buf <= end) | ||
206 | *buf = '0'; | ||
207 | ++buf; | ||
208 | if (buf <= end) | ||
209 | *buf = digits[33]; | ||
210 | ++buf; | ||
211 | } | ||
212 | } | ||
213 | if (!(type & LEFT)) { | ||
214 | while (size-- > 0) { | ||
215 | if (buf <= end) | ||
216 | *buf = c; | ||
217 | ++buf; | ||
218 | } | ||
219 | } | ||
220 | while (i < precision--) { | ||
221 | if (buf <= end) | ||
222 | *buf = '0'; | ||
223 | ++buf; | ||
224 | } | ||
225 | while (i-- > 0) { | ||
226 | if (buf <= end) | ||
227 | *buf = tmp[i]; | ||
228 | ++buf; | ||
229 | } | ||
230 | while (size-- > 0) { | ||
231 | if (buf <= end) | ||
232 | *buf = ' '; | ||
233 | ++buf; | ||
234 | } | ||
235 | return buf; | ||
236 | } | ||
237 | |||
238 | /** | ||
239 | * vsnprintf - Format a string and place it in a buffer | ||
240 | * @buf: The buffer to place the result into | ||
241 | * @size: The size of the buffer, including the trailing null space | ||
242 | * @fmt: The format string to use | ||
243 | * @args: Arguments for the format string | ||
244 | * | ||
245 | * The return value is the number of characters which would | ||
246 | * be generated for the given input, excluding the trailing | ||
247 | * '\0', as per ISO C99. If you want to have the exact | ||
248 | * number of characters written into @buf as return value | ||
249 | * (not including the trailing '\0'), use vscnprintf. If the | ||
250 | * return is greater than or equal to @size, the resulting | ||
251 | * string is truncated. | ||
252 | * | ||
253 | * Call this function if you are already dealing with a va_list. | ||
254 | * You probably want snprintf instead. | ||
255 | */ | ||
256 | int vsnprintf(char *buf, size_t size, const char *fmt, va_list args) | ||
257 | { | ||
258 | int len; | ||
259 | unsigned long long num; | ||
260 | int i, base; | ||
261 | char *str, *end, c; | ||
262 | const char *s; | ||
263 | |||
264 | int flags; /* flags to number() */ | ||
265 | |||
266 | int field_width; /* width of output field */ | ||
267 | int precision; /* min. # of digits for integers; max | ||
268 | number of chars for from string */ | ||
269 | int qualifier; /* 'h', 'l', or 'L' for integer fields */ | ||
270 | /* 'z' support added 23/7/1999 S.H. */ | ||
271 | /* 'z' changed to 'Z' --davidm 1/25/99 */ | ||
272 | |||
273 | /* Reject out-of-range values early */ | ||
274 | if (unlikely((int) size < 0)) { | ||
275 | /* There can be only one.. */ | ||
276 | static int warn = 1; | ||
277 | WARN_ON(warn); | ||
278 | warn = 0; | ||
279 | return 0; | ||
280 | } | ||
281 | |||
282 | str = buf; | ||
283 | end = buf + size - 1; | ||
284 | |||
285 | if (end < buf - 1) { | ||
286 | end = ((void *) -1); | ||
287 | size = end - buf + 1; | ||
288 | } | ||
289 | |||
290 | for (; *fmt ; ++fmt) { | ||
291 | if (*fmt != '%') { | ||
292 | if (str <= end) | ||
293 | *str = *fmt; | ||
294 | ++str; | ||
295 | continue; | ||
296 | } | ||
297 | |||
298 | /* process flags */ | ||
299 | flags = 0; | ||
300 | repeat: | ||
301 | ++fmt; /* this also skips first '%' */ | ||
302 | switch (*fmt) { | ||
303 | case '-': flags |= LEFT; goto repeat; | ||
304 | case '+': flags |= PLUS; goto repeat; | ||
305 | case ' ': flags |= SPACE; goto repeat; | ||
306 | case '#': flags |= SPECIAL; goto repeat; | ||
307 | case '0': flags |= ZEROPAD; goto repeat; | ||
308 | } | ||
309 | |||
310 | /* get field width */ | ||
311 | field_width = -1; | ||
312 | if (isdigit(*fmt)) | ||
313 | field_width = skip_atoi(&fmt); | ||
314 | else if (*fmt == '*') { | ||
315 | ++fmt; | ||
316 | /* it's the next argument */ | ||
317 | field_width = va_arg(args, int); | ||
318 | if (field_width < 0) { | ||
319 | field_width = -field_width; | ||
320 | flags |= LEFT; | ||
321 | } | ||
322 | } | ||
323 | |||
324 | /* get the precision */ | ||
325 | precision = -1; | ||
326 | if (*fmt == '.') { | ||
327 | ++fmt; | ||
328 | if (isdigit(*fmt)) | ||
329 | precision = skip_atoi(&fmt); | ||
330 | else if (*fmt == '*') { | ||
331 | ++fmt; | ||
332 | /* it's the next argument */ | ||
333 | precision = va_arg(args, int); | ||
334 | } | ||
335 | if (precision < 0) | ||
336 | precision = 0; | ||
337 | } | ||
338 | |||
339 | /* get the conversion qualifier */ | ||
340 | qualifier = -1; | ||
341 | if (*fmt == 'h' || *fmt == 'l' || *fmt == 'L' || | ||
342 | *fmt =='Z' || *fmt == 'z') { | ||
343 | qualifier = *fmt; | ||
344 | ++fmt; | ||
345 | if (qualifier == 'l' && *fmt == 'l') { | ||
346 | qualifier = 'L'; | ||
347 | ++fmt; | ||
348 | } | ||
349 | } | ||
350 | |||
351 | /* default base */ | ||
352 | base = 10; | ||
353 | |||
354 | switch (*fmt) { | ||
355 | case 'c': | ||
356 | if (!(flags & LEFT)) { | ||
357 | while (--field_width > 0) { | ||
358 | if (str <= end) | ||
359 | *str = ' '; | ||
360 | ++str; | ||
361 | } | ||
362 | } | ||
363 | c = (unsigned char) va_arg(args, int); | ||
364 | if (str <= end) | ||
365 | *str = c; | ||
366 | ++str; | ||
367 | while (--field_width > 0) { | ||
368 | if (str <= end) | ||
369 | *str = ' '; | ||
370 | ++str; | ||
371 | } | ||
372 | continue; | ||
373 | |||
374 | case 's': | ||
375 | s = va_arg(args, char *); | ||
376 | if ((unsigned long)s < PAGE_SIZE) | ||
377 | s = "<NULL>"; | ||
378 | |||
379 | len = strnlen(s, precision); | ||
380 | |||
381 | if (!(flags & LEFT)) { | ||
382 | while (len < field_width--) { | ||
383 | if (str <= end) | ||
384 | *str = ' '; | ||
385 | ++str; | ||
386 | } | ||
387 | } | ||
388 | for (i = 0; i < len; ++i) { | ||
389 | if (str <= end) | ||
390 | *str = *s; | ||
391 | ++str; ++s; | ||
392 | } | ||
393 | while (len < field_width--) { | ||
394 | if (str <= end) | ||
395 | *str = ' '; | ||
396 | ++str; | ||
397 | } | ||
398 | continue; | ||
399 | |||
400 | case 'p': | ||
401 | if (field_width == -1) { | ||
402 | field_width = 2*sizeof(void *); | ||
403 | flags |= ZEROPAD; | ||
404 | } | ||
405 | str = number(str, end, | ||
406 | (unsigned long) va_arg(args, void *), | ||
407 | 16, field_width, precision, flags); | ||
408 | continue; | ||
409 | |||
410 | |||
411 | case 'n': | ||
412 | /* FIXME: | ||
413 | * What does C99 say about the overflow case here? */ | ||
414 | if (qualifier == 'l') { | ||
415 | long * ip = va_arg(args, long *); | ||
416 | *ip = (str - buf); | ||
417 | } else if (qualifier == 'Z' || qualifier == 'z') { | ||
418 | size_t * ip = va_arg(args, size_t *); | ||
419 | *ip = (str - buf); | ||
420 | } else { | ||
421 | int * ip = va_arg(args, int *); | ||
422 | *ip = (str - buf); | ||
423 | } | ||
424 | continue; | ||
425 | |||
426 | case '%': | ||
427 | if (str <= end) | ||
428 | *str = '%'; | ||
429 | ++str; | ||
430 | continue; | ||
431 | |||
432 | /* integer number formats - set up the flags and "break" */ | ||
433 | case 'o': | ||
434 | base = 8; | ||
435 | break; | ||
436 | |||
437 | case 'X': | ||
438 | flags |= LARGE; | ||
439 | case 'x': | ||
440 | base = 16; | ||
441 | break; | ||
442 | |||
443 | case 'd': | ||
444 | case 'i': | ||
445 | flags |= SIGN; | ||
446 | case 'u': | ||
447 | break; | ||
448 | |||
449 | default: | ||
450 | if (str <= end) | ||
451 | *str = '%'; | ||
452 | ++str; | ||
453 | if (*fmt) { | ||
454 | if (str <= end) | ||
455 | *str = *fmt; | ||
456 | ++str; | ||
457 | } else { | ||
458 | --fmt; | ||
459 | } | ||
460 | continue; | ||
461 | } | ||
462 | if (qualifier == 'L') | ||
463 | num = va_arg(args, long long); | ||
464 | else if (qualifier == 'l') { | ||
465 | num = va_arg(args, unsigned long); | ||
466 | if (flags & SIGN) | ||
467 | num = (signed long) num; | ||
468 | } else if (qualifier == 'Z' || qualifier == 'z') { | ||
469 | num = va_arg(args, size_t); | ||
470 | } else if (qualifier == 'h') { | ||
471 | num = (unsigned short) va_arg(args, int); | ||
472 | if (flags & SIGN) | ||
473 | num = (signed short) num; | ||
474 | } else { | ||
475 | num = va_arg(args, unsigned int); | ||
476 | if (flags & SIGN) | ||
477 | num = (signed int) num; | ||
478 | } | ||
479 | str = number(str, end, num, base, | ||
480 | field_width, precision, flags); | ||
481 | } | ||
482 | if (str <= end) | ||
483 | *str = '\0'; | ||
484 | else if (size > 0) | ||
485 | /* don't write out a null byte if the buf size is zero */ | ||
486 | *end = '\0'; | ||
487 | /* the trailing null byte doesn't count towards the total | ||
488 | * ++str; | ||
489 | */ | ||
490 | return str-buf; | ||
491 | } | ||
492 | |||
493 | EXPORT_SYMBOL(vsnprintf); | ||
494 | |||
495 | /** | ||
496 | * vscnprintf - Format a string and place it in a buffer | ||
497 | * @buf: The buffer to place the result into | ||
498 | * @size: The size of the buffer, including the trailing null space | ||
499 | * @fmt: The format string to use | ||
500 | * @args: Arguments for the format string | ||
501 | * | ||
502 | * The return value is the number of characters which have been written into | ||
503 | * the @buf not including the trailing '\0'. If @size is <= 0 the function | ||
504 | * returns 0. | ||
505 | * | ||
506 | * Call this function if you are already dealing with a va_list. | ||
507 | * You probably want scnprintf instead. | ||
508 | */ | ||
509 | int vscnprintf(char *buf, size_t size, const char *fmt, va_list args) | ||
510 | { | ||
511 | int i; | ||
512 | |||
513 | i=vsnprintf(buf,size,fmt,args); | ||
514 | return (i >= size) ? (size - 1) : i; | ||
515 | } | ||
516 | |||
517 | EXPORT_SYMBOL(vscnprintf); | ||
518 | |||
519 | /** | ||
520 | * snprintf - Format a string and place it in a buffer | ||
521 | * @buf: The buffer to place the result into | ||
522 | * @size: The size of the buffer, including the trailing null space | ||
523 | * @fmt: The format string to use | ||
524 | * @...: Arguments for the format string | ||
525 | * | ||
526 | * The return value is the number of characters which would be | ||
527 | * generated for the given input, excluding the trailing null, | ||
528 | * as per ISO C99. If the return is greater than or equal to | ||
529 | * @size, the resulting string is truncated. | ||
530 | */ | ||
531 | int snprintf(char * buf, size_t size, const char *fmt, ...) | ||
532 | { | ||
533 | va_list args; | ||
534 | int i; | ||
535 | |||
536 | va_start(args, fmt); | ||
537 | i=vsnprintf(buf,size,fmt,args); | ||
538 | va_end(args); | ||
539 | return i; | ||
540 | } | ||
541 | |||
542 | EXPORT_SYMBOL(snprintf); | ||
543 | |||
544 | /** | ||
545 | * scnprintf - Format a string and place it in a buffer | ||
546 | * @buf: The buffer to place the result into | ||
547 | * @size: The size of the buffer, including the trailing null space | ||
548 | * @fmt: The format string to use | ||
549 | * @...: Arguments for the format string | ||
550 | * | ||
551 | * The return value is the number of characters written into @buf not including | ||
552 | * the trailing '\0'. If @size is <= 0 the function returns 0. If the return is | ||
553 | * greater than or equal to @size, the resulting string is truncated. | ||
554 | */ | ||
555 | |||
556 | int scnprintf(char * buf, size_t size, const char *fmt, ...) | ||
557 | { | ||
558 | va_list args; | ||
559 | int i; | ||
560 | |||
561 | va_start(args, fmt); | ||
562 | i = vsnprintf(buf, size, fmt, args); | ||
563 | va_end(args); | ||
564 | return (i >= size) ? (size - 1) : i; | ||
565 | } | ||
566 | EXPORT_SYMBOL(scnprintf); | ||
567 | |||
568 | /** | ||
569 | * vsprintf - Format a string and place it in a buffer | ||
570 | * @buf: The buffer to place the result into | ||
571 | * @fmt: The format string to use | ||
572 | * @args: Arguments for the format string | ||
573 | * | ||
574 | * The function returns the number of characters written | ||
575 | * into @buf. Use vsnprintf or vscnprintf in order to avoid | ||
576 | * buffer overflows. | ||
577 | * | ||
578 | * Call this function if you are already dealing with a va_list. | ||
579 | * You probably want sprintf instead. | ||
580 | */ | ||
581 | int vsprintf(char *buf, const char *fmt, va_list args) | ||
582 | { | ||
583 | return vsnprintf(buf, INT_MAX, fmt, args); | ||
584 | } | ||
585 | |||
586 | EXPORT_SYMBOL(vsprintf); | ||
587 | |||
588 | /** | ||
589 | * sprintf - Format a string and place it in a buffer | ||
590 | * @buf: The buffer to place the result into | ||
591 | * @fmt: The format string to use | ||
592 | * @...: Arguments for the format string | ||
593 | * | ||
594 | * The function returns the number of characters written | ||
595 | * into @buf. Use snprintf or scnprintf in order to avoid | ||
596 | * buffer overflows. | ||
597 | */ | ||
598 | int sprintf(char * buf, const char *fmt, ...) | ||
599 | { | ||
600 | va_list args; | ||
601 | int i; | ||
602 | |||
603 | va_start(args, fmt); | ||
604 | i=vsnprintf(buf, INT_MAX, fmt, args); | ||
605 | va_end(args); | ||
606 | return i; | ||
607 | } | ||
608 | |||
609 | EXPORT_SYMBOL(sprintf); | ||
610 | |||
611 | /** | ||
612 | * vsscanf - Unformat a buffer into a list of arguments | ||
613 | * @buf: input buffer | ||
614 | * @fmt: format of buffer | ||
615 | * @args: arguments | ||
616 | */ | ||
617 | int vsscanf(const char * buf, const char * fmt, va_list args) | ||
618 | { | ||
619 | const char *str = buf; | ||
620 | char *next; | ||
621 | char digit; | ||
622 | int num = 0; | ||
623 | int qualifier; | ||
624 | int base; | ||
625 | int field_width; | ||
626 | int is_sign = 0; | ||
627 | |||
628 | while(*fmt && *str) { | ||
629 | /* skip any white space in format */ | ||
630 | /* white space in format matchs any amount of | ||
631 | * white space, including none, in the input. | ||
632 | */ | ||
633 | if (isspace(*fmt)) { | ||
634 | while (isspace(*fmt)) | ||
635 | ++fmt; | ||
636 | while (isspace(*str)) | ||
637 | ++str; | ||
638 | } | ||
639 | |||
640 | /* anything that is not a conversion must match exactly */ | ||
641 | if (*fmt != '%' && *fmt) { | ||
642 | if (*fmt++ != *str++) | ||
643 | break; | ||
644 | continue; | ||
645 | } | ||
646 | |||
647 | if (!*fmt) | ||
648 | break; | ||
649 | ++fmt; | ||
650 | |||
651 | /* skip this conversion. | ||
652 | * advance both strings to next white space | ||
653 | */ | ||
654 | if (*fmt == '*') { | ||
655 | while (!isspace(*fmt) && *fmt) | ||
656 | fmt++; | ||
657 | while (!isspace(*str) && *str) | ||
658 | str++; | ||
659 | continue; | ||
660 | } | ||
661 | |||
662 | /* get field width */ | ||
663 | field_width = -1; | ||
664 | if (isdigit(*fmt)) | ||
665 | field_width = skip_atoi(&fmt); | ||
666 | |||
667 | /* get conversion qualifier */ | ||
668 | qualifier = -1; | ||
669 | if (*fmt == 'h' || *fmt == 'l' || *fmt == 'L' || | ||
670 | *fmt == 'Z' || *fmt == 'z') { | ||
671 | qualifier = *fmt++; | ||
672 | if (unlikely(qualifier == *fmt)) { | ||
673 | if (qualifier == 'h') { | ||
674 | qualifier = 'H'; | ||
675 | fmt++; | ||
676 | } else if (qualifier == 'l') { | ||
677 | qualifier = 'L'; | ||
678 | fmt++; | ||
679 | } | ||
680 | } | ||
681 | } | ||
682 | base = 10; | ||
683 | is_sign = 0; | ||
684 | |||
685 | if (!*fmt || !*str) | ||
686 | break; | ||
687 | |||
688 | switch(*fmt++) { | ||
689 | case 'c': | ||
690 | { | ||
691 | char *s = (char *) va_arg(args,char*); | ||
692 | if (field_width == -1) | ||
693 | field_width = 1; | ||
694 | do { | ||
695 | *s++ = *str++; | ||
696 | } while (--field_width > 0 && *str); | ||
697 | num++; | ||
698 | } | ||
699 | continue; | ||
700 | case 's': | ||
701 | { | ||
702 | char *s = (char *) va_arg(args, char *); | ||
703 | if(field_width == -1) | ||
704 | field_width = INT_MAX; | ||
705 | /* first, skip leading white space in buffer */ | ||
706 | while (isspace(*str)) | ||
707 | str++; | ||
708 | |||
709 | /* now copy until next white space */ | ||
710 | while (*str && !isspace(*str) && field_width--) { | ||
711 | *s++ = *str++; | ||
712 | } | ||
713 | *s = '\0'; | ||
714 | num++; | ||
715 | } | ||
716 | continue; | ||
717 | case 'n': | ||
718 | /* return number of characters read so far */ | ||
719 | { | ||
720 | int *i = (int *)va_arg(args,int*); | ||
721 | *i = str - buf; | ||
722 | } | ||
723 | continue; | ||
724 | case 'o': | ||
725 | base = 8; | ||
726 | break; | ||
727 | case 'x': | ||
728 | case 'X': | ||
729 | base = 16; | ||
730 | break; | ||
731 | case 'i': | ||
732 | base = 0; | ||
733 | case 'd': | ||
734 | is_sign = 1; | ||
735 | case 'u': | ||
736 | break; | ||
737 | case '%': | ||
738 | /* looking for '%' in str */ | ||
739 | if (*str++ != '%') | ||
740 | return num; | ||
741 | continue; | ||
742 | default: | ||
743 | /* invalid format; stop here */ | ||
744 | return num; | ||
745 | } | ||
746 | |||
747 | /* have some sort of integer conversion. | ||
748 | * first, skip white space in buffer. | ||
749 | */ | ||
750 | while (isspace(*str)) | ||
751 | str++; | ||
752 | |||
753 | digit = *str; | ||
754 | if (is_sign && digit == '-') | ||
755 | digit = *(str + 1); | ||
756 | |||
757 | if (!digit | ||
758 | || (base == 16 && !isxdigit(digit)) | ||
759 | || (base == 10 && !isdigit(digit)) | ||
760 | || (base == 8 && (!isdigit(digit) || digit > '7')) | ||
761 | || (base == 0 && !isdigit(digit))) | ||
762 | break; | ||
763 | |||
764 | switch(qualifier) { | ||
765 | case 'H': /* that's 'hh' in format */ | ||
766 | if (is_sign) { | ||
767 | signed char *s = (signed char *) va_arg(args,signed char *); | ||
768 | *s = (signed char) simple_strtol(str,&next,base); | ||
769 | } else { | ||
770 | unsigned char *s = (unsigned char *) va_arg(args, unsigned char *); | ||
771 | *s = (unsigned char) simple_strtoul(str, &next, base); | ||
772 | } | ||
773 | break; | ||
774 | case 'h': | ||
775 | if (is_sign) { | ||
776 | short *s = (short *) va_arg(args,short *); | ||
777 | *s = (short) simple_strtol(str,&next,base); | ||
778 | } else { | ||
779 | unsigned short *s = (unsigned short *) va_arg(args, unsigned short *); | ||
780 | *s = (unsigned short) simple_strtoul(str, &next, base); | ||
781 | } | ||
782 | break; | ||
783 | case 'l': | ||
784 | if (is_sign) { | ||
785 | long *l = (long *) va_arg(args,long *); | ||
786 | *l = simple_strtol(str,&next,base); | ||
787 | } else { | ||
788 | unsigned long *l = (unsigned long*) va_arg(args,unsigned long*); | ||
789 | *l = simple_strtoul(str,&next,base); | ||
790 | } | ||
791 | break; | ||
792 | case 'L': | ||
793 | if (is_sign) { | ||
794 | long long *l = (long long*) va_arg(args,long long *); | ||
795 | *l = simple_strtoll(str,&next,base); | ||
796 | } else { | ||
797 | unsigned long long *l = (unsigned long long*) va_arg(args,unsigned long long*); | ||
798 | *l = simple_strtoull(str,&next,base); | ||
799 | } | ||
800 | break; | ||
801 | case 'Z': | ||
802 | case 'z': | ||
803 | { | ||
804 | size_t *s = (size_t*) va_arg(args,size_t*); | ||
805 | *s = (size_t) simple_strtoul(str,&next,base); | ||
806 | } | ||
807 | break; | ||
808 | default: | ||
809 | if (is_sign) { | ||
810 | int *i = (int *) va_arg(args, int*); | ||
811 | *i = (int) simple_strtol(str,&next,base); | ||
812 | } else { | ||
813 | unsigned int *i = (unsigned int*) va_arg(args, unsigned int*); | ||
814 | *i = (unsigned int) simple_strtoul(str,&next,base); | ||
815 | } | ||
816 | break; | ||
817 | } | ||
818 | num++; | ||
819 | |||
820 | if (!next) | ||
821 | break; | ||
822 | str = next; | ||
823 | } | ||
824 | return num; | ||
825 | } | ||
826 | |||
827 | EXPORT_SYMBOL(vsscanf); | ||
828 | |||
829 | /** | ||
830 | * sscanf - Unformat a buffer into a list of arguments | ||
831 | * @buf: input buffer | ||
832 | * @fmt: formatting of buffer | ||
833 | * @...: resulting arguments | ||
834 | */ | ||
835 | int sscanf(const char * buf, const char * fmt, ...) | ||
836 | { | ||
837 | va_list args; | ||
838 | int i; | ||
839 | |||
840 | va_start(args,fmt); | ||
841 | i = vsscanf(buf,fmt,args); | ||
842 | va_end(args); | ||
843 | return i; | ||
844 | } | ||
845 | |||
846 | EXPORT_SYMBOL(sscanf); | ||
diff --git a/lib/zlib_deflate/Makefile b/lib/zlib_deflate/Makefile new file mode 100644 index 000000000000..86275e3fdcbc --- /dev/null +++ b/lib/zlib_deflate/Makefile | |||
@@ -0,0 +1,11 @@ | |||
1 | # | ||
2 | # This is a modified version of zlib, which does all memory | ||
3 | # allocation ahead of time. | ||
4 | # | ||
5 | # This is the compression code, see zlib_inflate for the | ||
6 | # decompression code. | ||
7 | # | ||
8 | |||
9 | obj-$(CONFIG_ZLIB_DEFLATE) += zlib_deflate.o | ||
10 | |||
11 | zlib_deflate-objs := deflate.o deftree.o deflate_syms.o | ||
diff --git a/lib/zlib_deflate/deflate.c b/lib/zlib_deflate/deflate.c new file mode 100644 index 000000000000..ad9a1bf4fc63 --- /dev/null +++ b/lib/zlib_deflate/deflate.c | |||
@@ -0,0 +1,1268 @@ | |||
1 | /* +++ deflate.c */ | ||
2 | /* deflate.c -- compress data using the deflation algorithm | ||
3 | * Copyright (C) 1995-1996 Jean-loup Gailly. | ||
4 | * For conditions of distribution and use, see copyright notice in zlib.h | ||
5 | */ | ||
6 | |||
7 | /* | ||
8 | * ALGORITHM | ||
9 | * | ||
10 | * The "deflation" process depends on being able to identify portions | ||
11 | * of the input text which are identical to earlier input (within a | ||
12 | * sliding window trailing behind the input currently being processed). | ||
13 | * | ||
14 | * The most straightforward technique turns out to be the fastest for | ||
15 | * most input files: try all possible matches and select the longest. | ||
16 | * The key feature of this algorithm is that insertions into the string | ||
17 | * dictionary are very simple and thus fast, and deletions are avoided | ||
18 | * completely. Insertions are performed at each input character, whereas | ||
19 | * string matches are performed only when the previous match ends. So it | ||
20 | * is preferable to spend more time in matches to allow very fast string | ||
21 | * insertions and avoid deletions. The matching algorithm for small | ||
22 | * strings is inspired from that of Rabin & Karp. A brute force approach | ||
23 | * is used to find longer strings when a small match has been found. | ||
24 | * A similar algorithm is used in comic (by Jan-Mark Wams) and freeze | ||
25 | * (by Leonid Broukhis). | ||
26 | * A previous version of this file used a more sophisticated algorithm | ||
27 | * (by Fiala and Greene) which is guaranteed to run in linear amortized | ||
28 | * time, but has a larger average cost, uses more memory and is patented. | ||
29 | * However the F&G algorithm may be faster for some highly redundant | ||
30 | * files if the parameter max_chain_length (described below) is too large. | ||
31 | * | ||
32 | * ACKNOWLEDGEMENTS | ||
33 | * | ||
34 | * The idea of lazy evaluation of matches is due to Jan-Mark Wams, and | ||
35 | * I found it in 'freeze' written by Leonid Broukhis. | ||
36 | * Thanks to many people for bug reports and testing. | ||
37 | * | ||
38 | * REFERENCES | ||
39 | * | ||
40 | * Deutsch, L.P.,"DEFLATE Compressed Data Format Specification". | ||
41 | * Available in ftp://ds.internic.net/rfc/rfc1951.txt | ||
42 | * | ||
43 | * A description of the Rabin and Karp algorithm is given in the book | ||
44 | * "Algorithms" by R. Sedgewick, Addison-Wesley, p252. | ||
45 | * | ||
46 | * Fiala,E.R., and Greene,D.H. | ||
47 | * Data Compression with Finite Windows, Comm.ACM, 32,4 (1989) 490-595 | ||
48 | * | ||
49 | */ | ||
50 | |||
51 | #include <linux/module.h> | ||
52 | #include <linux/zutil.h> | ||
53 | #include "defutil.h" | ||
54 | |||
55 | |||
56 | /* =========================================================================== | ||
57 | * Function prototypes. | ||
58 | */ | ||
59 | typedef enum { | ||
60 | need_more, /* block not completed, need more input or more output */ | ||
61 | block_done, /* block flush performed */ | ||
62 | finish_started, /* finish started, need only more output at next deflate */ | ||
63 | finish_done /* finish done, accept no more input or output */ | ||
64 | } block_state; | ||
65 | |||
66 | typedef block_state (*compress_func) (deflate_state *s, int flush); | ||
67 | /* Compression function. Returns the block state after the call. */ | ||
68 | |||
69 | static void fill_window (deflate_state *s); | ||
70 | static block_state deflate_stored (deflate_state *s, int flush); | ||
71 | static block_state deflate_fast (deflate_state *s, int flush); | ||
72 | static block_state deflate_slow (deflate_state *s, int flush); | ||
73 | static void lm_init (deflate_state *s); | ||
74 | static void putShortMSB (deflate_state *s, uInt b); | ||
75 | static void flush_pending (z_streamp strm); | ||
76 | static int read_buf (z_streamp strm, Byte *buf, unsigned size); | ||
77 | static uInt longest_match (deflate_state *s, IPos cur_match); | ||
78 | |||
79 | #ifdef DEBUG_ZLIB | ||
80 | static void check_match (deflate_state *s, IPos start, IPos match, | ||
81 | int length); | ||
82 | #endif | ||
83 | |||
84 | /* =========================================================================== | ||
85 | * Local data | ||
86 | */ | ||
87 | |||
88 | #define NIL 0 | ||
89 | /* Tail of hash chains */ | ||
90 | |||
91 | #ifndef TOO_FAR | ||
92 | # define TOO_FAR 4096 | ||
93 | #endif | ||
94 | /* Matches of length 3 are discarded if their distance exceeds TOO_FAR */ | ||
95 | |||
96 | #define MIN_LOOKAHEAD (MAX_MATCH+MIN_MATCH+1) | ||
97 | /* Minimum amount of lookahead, except at the end of the input file. | ||
98 | * See deflate.c for comments about the MIN_MATCH+1. | ||
99 | */ | ||
100 | |||
101 | /* Values for max_lazy_match, good_match and max_chain_length, depending on | ||
102 | * the desired pack level (0..9). The values given below have been tuned to | ||
103 | * exclude worst case performance for pathological files. Better values may be | ||
104 | * found for specific files. | ||
105 | */ | ||
106 | typedef struct config_s { | ||
107 | ush good_length; /* reduce lazy search above this match length */ | ||
108 | ush max_lazy; /* do not perform lazy search above this match length */ | ||
109 | ush nice_length; /* quit search above this match length */ | ||
110 | ush max_chain; | ||
111 | compress_func func; | ||
112 | } config; | ||
113 | |||
114 | static const config configuration_table[10] = { | ||
115 | /* good lazy nice chain */ | ||
116 | /* 0 */ {0, 0, 0, 0, deflate_stored}, /* store only */ | ||
117 | /* 1 */ {4, 4, 8, 4, deflate_fast}, /* maximum speed, no lazy matches */ | ||
118 | /* 2 */ {4, 5, 16, 8, deflate_fast}, | ||
119 | /* 3 */ {4, 6, 32, 32, deflate_fast}, | ||
120 | |||
121 | /* 4 */ {4, 4, 16, 16, deflate_slow}, /* lazy matches */ | ||
122 | /* 5 */ {8, 16, 32, 32, deflate_slow}, | ||
123 | /* 6 */ {8, 16, 128, 128, deflate_slow}, | ||
124 | /* 7 */ {8, 32, 128, 256, deflate_slow}, | ||
125 | /* 8 */ {32, 128, 258, 1024, deflate_slow}, | ||
126 | /* 9 */ {32, 258, 258, 4096, deflate_slow}}; /* maximum compression */ | ||
127 | |||
128 | /* Note: the deflate() code requires max_lazy >= MIN_MATCH and max_chain >= 4 | ||
129 | * For deflate_fast() (levels <= 3) good is ignored and lazy has a different | ||
130 | * meaning. | ||
131 | */ | ||
132 | |||
133 | #define EQUAL 0 | ||
134 | /* result of memcmp for equal strings */ | ||
135 | |||
136 | /* =========================================================================== | ||
137 | * Update a hash value with the given input byte | ||
138 | * IN assertion: all calls to to UPDATE_HASH are made with consecutive | ||
139 | * input characters, so that a running hash key can be computed from the | ||
140 | * previous key instead of complete recalculation each time. | ||
141 | */ | ||
142 | #define UPDATE_HASH(s,h,c) (h = (((h)<<s->hash_shift) ^ (c)) & s->hash_mask) | ||
143 | |||
144 | |||
145 | /* =========================================================================== | ||
146 | * Insert string str in the dictionary and set match_head to the previous head | ||
147 | * of the hash chain (the most recent string with same hash key). Return | ||
148 | * the previous length of the hash chain. | ||
149 | * IN assertion: all calls to to INSERT_STRING are made with consecutive | ||
150 | * input characters and the first MIN_MATCH bytes of str are valid | ||
151 | * (except for the last MIN_MATCH-1 bytes of the input file). | ||
152 | */ | ||
153 | #define INSERT_STRING(s, str, match_head) \ | ||
154 | (UPDATE_HASH(s, s->ins_h, s->window[(str) + (MIN_MATCH-1)]), \ | ||
155 | s->prev[(str) & s->w_mask] = match_head = s->head[s->ins_h], \ | ||
156 | s->head[s->ins_h] = (Pos)(str)) | ||
157 | |||
158 | /* =========================================================================== | ||
159 | * Initialize the hash table (avoiding 64K overflow for 16 bit systems). | ||
160 | * prev[] will be initialized on the fly. | ||
161 | */ | ||
162 | #define CLEAR_HASH(s) \ | ||
163 | s->head[s->hash_size-1] = NIL; \ | ||
164 | memset((char *)s->head, 0, (unsigned)(s->hash_size-1)*sizeof(*s->head)); | ||
165 | |||
166 | /* ========================================================================= */ | ||
167 | int zlib_deflateInit_( | ||
168 | z_streamp strm, | ||
169 | int level, | ||
170 | const char *version, | ||
171 | int stream_size | ||
172 | ) | ||
173 | { | ||
174 | return zlib_deflateInit2_(strm, level, Z_DEFLATED, MAX_WBITS, | ||
175 | DEF_MEM_LEVEL, | ||
176 | Z_DEFAULT_STRATEGY, version, stream_size); | ||
177 | /* To do: ignore strm->next_in if we use it as window */ | ||
178 | } | ||
179 | |||
180 | /* ========================================================================= */ | ||
181 | int zlib_deflateInit2_( | ||
182 | z_streamp strm, | ||
183 | int level, | ||
184 | int method, | ||
185 | int windowBits, | ||
186 | int memLevel, | ||
187 | int strategy, | ||
188 | const char *version, | ||
189 | int stream_size | ||
190 | ) | ||
191 | { | ||
192 | deflate_state *s; | ||
193 | int noheader = 0; | ||
194 | static char* my_version = ZLIB_VERSION; | ||
195 | deflate_workspace *mem; | ||
196 | |||
197 | ush *overlay; | ||
198 | /* We overlay pending_buf and d_buf+l_buf. This works since the average | ||
199 | * output size for (length,distance) codes is <= 24 bits. | ||
200 | */ | ||
201 | |||
202 | if (version == NULL || version[0] != my_version[0] || | ||
203 | stream_size != sizeof(z_stream)) { | ||
204 | return Z_VERSION_ERROR; | ||
205 | } | ||
206 | if (strm == NULL) return Z_STREAM_ERROR; | ||
207 | |||
208 | strm->msg = NULL; | ||
209 | |||
210 | if (level == Z_DEFAULT_COMPRESSION) level = 6; | ||
211 | |||
212 | mem = (deflate_workspace *) strm->workspace; | ||
213 | |||
214 | if (windowBits < 0) { /* undocumented feature: suppress zlib header */ | ||
215 | noheader = 1; | ||
216 | windowBits = -windowBits; | ||
217 | } | ||
218 | if (memLevel < 1 || memLevel > MAX_MEM_LEVEL || method != Z_DEFLATED || | ||
219 | windowBits < 9 || windowBits > 15 || level < 0 || level > 9 || | ||
220 | strategy < 0 || strategy > Z_HUFFMAN_ONLY) { | ||
221 | return Z_STREAM_ERROR; | ||
222 | } | ||
223 | s = (deflate_state *) &(mem->deflate_memory); | ||
224 | strm->state = (struct internal_state *)s; | ||
225 | s->strm = strm; | ||
226 | |||
227 | s->noheader = noheader; | ||
228 | s->w_bits = windowBits; | ||
229 | s->w_size = 1 << s->w_bits; | ||
230 | s->w_mask = s->w_size - 1; | ||
231 | |||
232 | s->hash_bits = memLevel + 7; | ||
233 | s->hash_size = 1 << s->hash_bits; | ||
234 | s->hash_mask = s->hash_size - 1; | ||
235 | s->hash_shift = ((s->hash_bits+MIN_MATCH-1)/MIN_MATCH); | ||
236 | |||
237 | s->window = (Byte *) mem->window_memory; | ||
238 | s->prev = (Pos *) mem->prev_memory; | ||
239 | s->head = (Pos *) mem->head_memory; | ||
240 | |||
241 | s->lit_bufsize = 1 << (memLevel + 6); /* 16K elements by default */ | ||
242 | |||
243 | overlay = (ush *) mem->overlay_memory; | ||
244 | s->pending_buf = (uch *) overlay; | ||
245 | s->pending_buf_size = (ulg)s->lit_bufsize * (sizeof(ush)+2L); | ||
246 | |||
247 | s->d_buf = overlay + s->lit_bufsize/sizeof(ush); | ||
248 | s->l_buf = s->pending_buf + (1+sizeof(ush))*s->lit_bufsize; | ||
249 | |||
250 | s->level = level; | ||
251 | s->strategy = strategy; | ||
252 | s->method = (Byte)method; | ||
253 | |||
254 | return zlib_deflateReset(strm); | ||
255 | } | ||
256 | |||
257 | /* ========================================================================= */ | ||
258 | int zlib_deflateSetDictionary( | ||
259 | z_streamp strm, | ||
260 | const Byte *dictionary, | ||
261 | uInt dictLength | ||
262 | ) | ||
263 | { | ||
264 | deflate_state *s; | ||
265 | uInt length = dictLength; | ||
266 | uInt n; | ||
267 | IPos hash_head = 0; | ||
268 | |||
269 | if (strm == NULL || strm->state == NULL || dictionary == NULL) | ||
270 | return Z_STREAM_ERROR; | ||
271 | |||
272 | s = (deflate_state *) strm->state; | ||
273 | if (s->status != INIT_STATE) return Z_STREAM_ERROR; | ||
274 | |||
275 | strm->adler = zlib_adler32(strm->adler, dictionary, dictLength); | ||
276 | |||
277 | if (length < MIN_MATCH) return Z_OK; | ||
278 | if (length > MAX_DIST(s)) { | ||
279 | length = MAX_DIST(s); | ||
280 | #ifndef USE_DICT_HEAD | ||
281 | dictionary += dictLength - length; /* use the tail of the dictionary */ | ||
282 | #endif | ||
283 | } | ||
284 | memcpy((char *)s->window, dictionary, length); | ||
285 | s->strstart = length; | ||
286 | s->block_start = (long)length; | ||
287 | |||
288 | /* Insert all strings in the hash table (except for the last two bytes). | ||
289 | * s->lookahead stays null, so s->ins_h will be recomputed at the next | ||
290 | * call of fill_window. | ||
291 | */ | ||
292 | s->ins_h = s->window[0]; | ||
293 | UPDATE_HASH(s, s->ins_h, s->window[1]); | ||
294 | for (n = 0; n <= length - MIN_MATCH; n++) { | ||
295 | INSERT_STRING(s, n, hash_head); | ||
296 | } | ||
297 | if (hash_head) hash_head = 0; /* to make compiler happy */ | ||
298 | return Z_OK; | ||
299 | } | ||
300 | |||
301 | /* ========================================================================= */ | ||
302 | int zlib_deflateReset( | ||
303 | z_streamp strm | ||
304 | ) | ||
305 | { | ||
306 | deflate_state *s; | ||
307 | |||
308 | if (strm == NULL || strm->state == NULL) | ||
309 | return Z_STREAM_ERROR; | ||
310 | |||
311 | strm->total_in = strm->total_out = 0; | ||
312 | strm->msg = NULL; | ||
313 | strm->data_type = Z_UNKNOWN; | ||
314 | |||
315 | s = (deflate_state *)strm->state; | ||
316 | s->pending = 0; | ||
317 | s->pending_out = s->pending_buf; | ||
318 | |||
319 | if (s->noheader < 0) { | ||
320 | s->noheader = 0; /* was set to -1 by deflate(..., Z_FINISH); */ | ||
321 | } | ||
322 | s->status = s->noheader ? BUSY_STATE : INIT_STATE; | ||
323 | strm->adler = 1; | ||
324 | s->last_flush = Z_NO_FLUSH; | ||
325 | |||
326 | zlib_tr_init(s); | ||
327 | lm_init(s); | ||
328 | |||
329 | return Z_OK; | ||
330 | } | ||
331 | |||
332 | /* ========================================================================= */ | ||
333 | int zlib_deflateParams( | ||
334 | z_streamp strm, | ||
335 | int level, | ||
336 | int strategy | ||
337 | ) | ||
338 | { | ||
339 | deflate_state *s; | ||
340 | compress_func func; | ||
341 | int err = Z_OK; | ||
342 | |||
343 | if (strm == NULL || strm->state == NULL) return Z_STREAM_ERROR; | ||
344 | s = (deflate_state *) strm->state; | ||
345 | |||
346 | if (level == Z_DEFAULT_COMPRESSION) { | ||
347 | level = 6; | ||
348 | } | ||
349 | if (level < 0 || level > 9 || strategy < 0 || strategy > Z_HUFFMAN_ONLY) { | ||
350 | return Z_STREAM_ERROR; | ||
351 | } | ||
352 | func = configuration_table[s->level].func; | ||
353 | |||
354 | if (func != configuration_table[level].func && strm->total_in != 0) { | ||
355 | /* Flush the last buffer: */ | ||
356 | err = zlib_deflate(strm, Z_PARTIAL_FLUSH); | ||
357 | } | ||
358 | if (s->level != level) { | ||
359 | s->level = level; | ||
360 | s->max_lazy_match = configuration_table[level].max_lazy; | ||
361 | s->good_match = configuration_table[level].good_length; | ||
362 | s->nice_match = configuration_table[level].nice_length; | ||
363 | s->max_chain_length = configuration_table[level].max_chain; | ||
364 | } | ||
365 | s->strategy = strategy; | ||
366 | return err; | ||
367 | } | ||
368 | |||
369 | /* ========================================================================= | ||
370 | * Put a short in the pending buffer. The 16-bit value is put in MSB order. | ||
371 | * IN assertion: the stream state is correct and there is enough room in | ||
372 | * pending_buf. | ||
373 | */ | ||
374 | static void putShortMSB( | ||
375 | deflate_state *s, | ||
376 | uInt b | ||
377 | ) | ||
378 | { | ||
379 | put_byte(s, (Byte)(b >> 8)); | ||
380 | put_byte(s, (Byte)(b & 0xff)); | ||
381 | } | ||
382 | |||
383 | /* ========================================================================= | ||
384 | * Flush as much pending output as possible. All deflate() output goes | ||
385 | * through this function so some applications may wish to modify it | ||
386 | * to avoid allocating a large strm->next_out buffer and copying into it. | ||
387 | * (See also read_buf()). | ||
388 | */ | ||
389 | static void flush_pending( | ||
390 | z_streamp strm | ||
391 | ) | ||
392 | { | ||
393 | deflate_state *s = (deflate_state *) strm->state; | ||
394 | unsigned len = s->pending; | ||
395 | |||
396 | if (len > strm->avail_out) len = strm->avail_out; | ||
397 | if (len == 0) return; | ||
398 | |||
399 | if (strm->next_out != NULL) { | ||
400 | memcpy(strm->next_out, s->pending_out, len); | ||
401 | strm->next_out += len; | ||
402 | } | ||
403 | s->pending_out += len; | ||
404 | strm->total_out += len; | ||
405 | strm->avail_out -= len; | ||
406 | s->pending -= len; | ||
407 | if (s->pending == 0) { | ||
408 | s->pending_out = s->pending_buf; | ||
409 | } | ||
410 | } | ||
411 | |||
412 | /* ========================================================================= */ | ||
413 | int zlib_deflate( | ||
414 | z_streamp strm, | ||
415 | int flush | ||
416 | ) | ||
417 | { | ||
418 | int old_flush; /* value of flush param for previous deflate call */ | ||
419 | deflate_state *s; | ||
420 | |||
421 | if (strm == NULL || strm->state == NULL || | ||
422 | flush > Z_FINISH || flush < 0) { | ||
423 | return Z_STREAM_ERROR; | ||
424 | } | ||
425 | s = (deflate_state *) strm->state; | ||
426 | |||
427 | if ((strm->next_in == NULL && strm->avail_in != 0) || | ||
428 | (s->status == FINISH_STATE && flush != Z_FINISH)) { | ||
429 | return Z_STREAM_ERROR; | ||
430 | } | ||
431 | if (strm->avail_out == 0) return Z_BUF_ERROR; | ||
432 | |||
433 | s->strm = strm; /* just in case */ | ||
434 | old_flush = s->last_flush; | ||
435 | s->last_flush = flush; | ||
436 | |||
437 | /* Write the zlib header */ | ||
438 | if (s->status == INIT_STATE) { | ||
439 | |||
440 | uInt header = (Z_DEFLATED + ((s->w_bits-8)<<4)) << 8; | ||
441 | uInt level_flags = (s->level-1) >> 1; | ||
442 | |||
443 | if (level_flags > 3) level_flags = 3; | ||
444 | header |= (level_flags << 6); | ||
445 | if (s->strstart != 0) header |= PRESET_DICT; | ||
446 | header += 31 - (header % 31); | ||
447 | |||
448 | s->status = BUSY_STATE; | ||
449 | putShortMSB(s, header); | ||
450 | |||
451 | /* Save the adler32 of the preset dictionary: */ | ||
452 | if (s->strstart != 0) { | ||
453 | putShortMSB(s, (uInt)(strm->adler >> 16)); | ||
454 | putShortMSB(s, (uInt)(strm->adler & 0xffff)); | ||
455 | } | ||
456 | strm->adler = 1L; | ||
457 | } | ||
458 | |||
459 | /* Flush as much pending output as possible */ | ||
460 | if (s->pending != 0) { | ||
461 | flush_pending(strm); | ||
462 | if (strm->avail_out == 0) { | ||
463 | /* Since avail_out is 0, deflate will be called again with | ||
464 | * more output space, but possibly with both pending and | ||
465 | * avail_in equal to zero. There won't be anything to do, | ||
466 | * but this is not an error situation so make sure we | ||
467 | * return OK instead of BUF_ERROR at next call of deflate: | ||
468 | */ | ||
469 | s->last_flush = -1; | ||
470 | return Z_OK; | ||
471 | } | ||
472 | |||
473 | /* Make sure there is something to do and avoid duplicate consecutive | ||
474 | * flushes. For repeated and useless calls with Z_FINISH, we keep | ||
475 | * returning Z_STREAM_END instead of Z_BUFF_ERROR. | ||
476 | */ | ||
477 | } else if (strm->avail_in == 0 && flush <= old_flush && | ||
478 | flush != Z_FINISH) { | ||
479 | return Z_BUF_ERROR; | ||
480 | } | ||
481 | |||
482 | /* User must not provide more input after the first FINISH: */ | ||
483 | if (s->status == FINISH_STATE && strm->avail_in != 0) { | ||
484 | return Z_BUF_ERROR; | ||
485 | } | ||
486 | |||
487 | /* Start a new block or continue the current one. | ||
488 | */ | ||
489 | if (strm->avail_in != 0 || s->lookahead != 0 || | ||
490 | (flush != Z_NO_FLUSH && s->status != FINISH_STATE)) { | ||
491 | block_state bstate; | ||
492 | |||
493 | bstate = (*(configuration_table[s->level].func))(s, flush); | ||
494 | |||
495 | if (bstate == finish_started || bstate == finish_done) { | ||
496 | s->status = FINISH_STATE; | ||
497 | } | ||
498 | if (bstate == need_more || bstate == finish_started) { | ||
499 | if (strm->avail_out == 0) { | ||
500 | s->last_flush = -1; /* avoid BUF_ERROR next call, see above */ | ||
501 | } | ||
502 | return Z_OK; | ||
503 | /* If flush != Z_NO_FLUSH && avail_out == 0, the next call | ||
504 | * of deflate should use the same flush parameter to make sure | ||
505 | * that the flush is complete. So we don't have to output an | ||
506 | * empty block here, this will be done at next call. This also | ||
507 | * ensures that for a very small output buffer, we emit at most | ||
508 | * one empty block. | ||
509 | */ | ||
510 | } | ||
511 | if (bstate == block_done) { | ||
512 | if (flush == Z_PARTIAL_FLUSH) { | ||
513 | zlib_tr_align(s); | ||
514 | } else if (flush == Z_PACKET_FLUSH) { | ||
515 | /* Output just the 3-bit `stored' block type value, | ||
516 | but not a zero length. */ | ||
517 | zlib_tr_stored_type_only(s); | ||
518 | } else { /* FULL_FLUSH or SYNC_FLUSH */ | ||
519 | zlib_tr_stored_block(s, (char*)0, 0L, 0); | ||
520 | /* For a full flush, this empty block will be recognized | ||
521 | * as a special marker by inflate_sync(). | ||
522 | */ | ||
523 | if (flush == Z_FULL_FLUSH) { | ||
524 | CLEAR_HASH(s); /* forget history */ | ||
525 | } | ||
526 | } | ||
527 | flush_pending(strm); | ||
528 | if (strm->avail_out == 0) { | ||
529 | s->last_flush = -1; /* avoid BUF_ERROR at next call, see above */ | ||
530 | return Z_OK; | ||
531 | } | ||
532 | } | ||
533 | } | ||
534 | Assert(strm->avail_out > 0, "bug2"); | ||
535 | |||
536 | if (flush != Z_FINISH) return Z_OK; | ||
537 | if (s->noheader) return Z_STREAM_END; | ||
538 | |||
539 | /* Write the zlib trailer (adler32) */ | ||
540 | putShortMSB(s, (uInt)(strm->adler >> 16)); | ||
541 | putShortMSB(s, (uInt)(strm->adler & 0xffff)); | ||
542 | flush_pending(strm); | ||
543 | /* If avail_out is zero, the application will call deflate again | ||
544 | * to flush the rest. | ||
545 | */ | ||
546 | s->noheader = -1; /* write the trailer only once! */ | ||
547 | return s->pending != 0 ? Z_OK : Z_STREAM_END; | ||
548 | } | ||
549 | |||
550 | /* ========================================================================= */ | ||
551 | int zlib_deflateEnd( | ||
552 | z_streamp strm | ||
553 | ) | ||
554 | { | ||
555 | int status; | ||
556 | deflate_state *s; | ||
557 | |||
558 | if (strm == NULL || strm->state == NULL) return Z_STREAM_ERROR; | ||
559 | s = (deflate_state *) strm->state; | ||
560 | |||
561 | status = s->status; | ||
562 | if (status != INIT_STATE && status != BUSY_STATE && | ||
563 | status != FINISH_STATE) { | ||
564 | return Z_STREAM_ERROR; | ||
565 | } | ||
566 | |||
567 | strm->state = NULL; | ||
568 | |||
569 | return status == BUSY_STATE ? Z_DATA_ERROR : Z_OK; | ||
570 | } | ||
571 | |||
572 | /* ========================================================================= | ||
573 | * Copy the source state to the destination state. | ||
574 | */ | ||
575 | int zlib_deflateCopy ( | ||
576 | z_streamp dest, | ||
577 | z_streamp source | ||
578 | ) | ||
579 | { | ||
580 | #ifdef MAXSEG_64K | ||
581 | return Z_STREAM_ERROR; | ||
582 | #else | ||
583 | deflate_state *ds; | ||
584 | deflate_state *ss; | ||
585 | ush *overlay; | ||
586 | deflate_workspace *mem; | ||
587 | |||
588 | |||
589 | if (source == NULL || dest == NULL || source->state == NULL) { | ||
590 | return Z_STREAM_ERROR; | ||
591 | } | ||
592 | |||
593 | ss = (deflate_state *) source->state; | ||
594 | |||
595 | *dest = *source; | ||
596 | |||
597 | mem = (deflate_workspace *) dest->workspace; | ||
598 | |||
599 | ds = &(mem->deflate_memory); | ||
600 | |||
601 | dest->state = (struct internal_state *) ds; | ||
602 | *ds = *ss; | ||
603 | ds->strm = dest; | ||
604 | |||
605 | ds->window = (Byte *) mem->window_memory; | ||
606 | ds->prev = (Pos *) mem->prev_memory; | ||
607 | ds->head = (Pos *) mem->head_memory; | ||
608 | overlay = (ush *) mem->overlay_memory; | ||
609 | ds->pending_buf = (uch *) overlay; | ||
610 | |||
611 | memcpy(ds->window, ss->window, ds->w_size * 2 * sizeof(Byte)); | ||
612 | memcpy(ds->prev, ss->prev, ds->w_size * sizeof(Pos)); | ||
613 | memcpy(ds->head, ss->head, ds->hash_size * sizeof(Pos)); | ||
614 | memcpy(ds->pending_buf, ss->pending_buf, (uInt)ds->pending_buf_size); | ||
615 | |||
616 | ds->pending_out = ds->pending_buf + (ss->pending_out - ss->pending_buf); | ||
617 | ds->d_buf = overlay + ds->lit_bufsize/sizeof(ush); | ||
618 | ds->l_buf = ds->pending_buf + (1+sizeof(ush))*ds->lit_bufsize; | ||
619 | |||
620 | ds->l_desc.dyn_tree = ds->dyn_ltree; | ||
621 | ds->d_desc.dyn_tree = ds->dyn_dtree; | ||
622 | ds->bl_desc.dyn_tree = ds->bl_tree; | ||
623 | |||
624 | return Z_OK; | ||
625 | #endif | ||
626 | } | ||
627 | |||
628 | /* =========================================================================== | ||
629 | * Read a new buffer from the current input stream, update the adler32 | ||
630 | * and total number of bytes read. All deflate() input goes through | ||
631 | * this function so some applications may wish to modify it to avoid | ||
632 | * allocating a large strm->next_in buffer and copying from it. | ||
633 | * (See also flush_pending()). | ||
634 | */ | ||
635 | static int read_buf( | ||
636 | z_streamp strm, | ||
637 | Byte *buf, | ||
638 | unsigned size | ||
639 | ) | ||
640 | { | ||
641 | unsigned len = strm->avail_in; | ||
642 | |||
643 | if (len > size) len = size; | ||
644 | if (len == 0) return 0; | ||
645 | |||
646 | strm->avail_in -= len; | ||
647 | |||
648 | if (!((deflate_state *)(strm->state))->noheader) { | ||
649 | strm->adler = zlib_adler32(strm->adler, strm->next_in, len); | ||
650 | } | ||
651 | memcpy(buf, strm->next_in, len); | ||
652 | strm->next_in += len; | ||
653 | strm->total_in += len; | ||
654 | |||
655 | return (int)len; | ||
656 | } | ||
657 | |||
658 | /* =========================================================================== | ||
659 | * Initialize the "longest match" routines for a new zlib stream | ||
660 | */ | ||
661 | static void lm_init( | ||
662 | deflate_state *s | ||
663 | ) | ||
664 | { | ||
665 | s->window_size = (ulg)2L*s->w_size; | ||
666 | |||
667 | CLEAR_HASH(s); | ||
668 | |||
669 | /* Set the default configuration parameters: | ||
670 | */ | ||
671 | s->max_lazy_match = configuration_table[s->level].max_lazy; | ||
672 | s->good_match = configuration_table[s->level].good_length; | ||
673 | s->nice_match = configuration_table[s->level].nice_length; | ||
674 | s->max_chain_length = configuration_table[s->level].max_chain; | ||
675 | |||
676 | s->strstart = 0; | ||
677 | s->block_start = 0L; | ||
678 | s->lookahead = 0; | ||
679 | s->match_length = s->prev_length = MIN_MATCH-1; | ||
680 | s->match_available = 0; | ||
681 | s->ins_h = 0; | ||
682 | } | ||
683 | |||
684 | /* =========================================================================== | ||
685 | * Set match_start to the longest match starting at the given string and | ||
686 | * return its length. Matches shorter or equal to prev_length are discarded, | ||
687 | * in which case the result is equal to prev_length and match_start is | ||
688 | * garbage. | ||
689 | * IN assertions: cur_match is the head of the hash chain for the current | ||
690 | * string (strstart) and its distance is <= MAX_DIST, and prev_length >= 1 | ||
691 | * OUT assertion: the match length is not greater than s->lookahead. | ||
692 | */ | ||
693 | /* For 80x86 and 680x0, an optimized version will be provided in match.asm or | ||
694 | * match.S. The code will be functionally equivalent. | ||
695 | */ | ||
696 | static uInt longest_match( | ||
697 | deflate_state *s, | ||
698 | IPos cur_match /* current match */ | ||
699 | ) | ||
700 | { | ||
701 | unsigned chain_length = s->max_chain_length;/* max hash chain length */ | ||
702 | register Byte *scan = s->window + s->strstart; /* current string */ | ||
703 | register Byte *match; /* matched string */ | ||
704 | register int len; /* length of current match */ | ||
705 | int best_len = s->prev_length; /* best match length so far */ | ||
706 | int nice_match = s->nice_match; /* stop if match long enough */ | ||
707 | IPos limit = s->strstart > (IPos)MAX_DIST(s) ? | ||
708 | s->strstart - (IPos)MAX_DIST(s) : NIL; | ||
709 | /* Stop when cur_match becomes <= limit. To simplify the code, | ||
710 | * we prevent matches with the string of window index 0. | ||
711 | */ | ||
712 | Pos *prev = s->prev; | ||
713 | uInt wmask = s->w_mask; | ||
714 | |||
715 | #ifdef UNALIGNED_OK | ||
716 | /* Compare two bytes at a time. Note: this is not always beneficial. | ||
717 | * Try with and without -DUNALIGNED_OK to check. | ||
718 | */ | ||
719 | register Byte *strend = s->window + s->strstart + MAX_MATCH - 1; | ||
720 | register ush scan_start = *(ush*)scan; | ||
721 | register ush scan_end = *(ush*)(scan+best_len-1); | ||
722 | #else | ||
723 | register Byte *strend = s->window + s->strstart + MAX_MATCH; | ||
724 | register Byte scan_end1 = scan[best_len-1]; | ||
725 | register Byte scan_end = scan[best_len]; | ||
726 | #endif | ||
727 | |||
728 | /* The code is optimized for HASH_BITS >= 8 and MAX_MATCH-2 multiple of 16. | ||
729 | * It is easy to get rid of this optimization if necessary. | ||
730 | */ | ||
731 | Assert(s->hash_bits >= 8 && MAX_MATCH == 258, "Code too clever"); | ||
732 | |||
733 | /* Do not waste too much time if we already have a good match: */ | ||
734 | if (s->prev_length >= s->good_match) { | ||
735 | chain_length >>= 2; | ||
736 | } | ||
737 | /* Do not look for matches beyond the end of the input. This is necessary | ||
738 | * to make deflate deterministic. | ||
739 | */ | ||
740 | if ((uInt)nice_match > s->lookahead) nice_match = s->lookahead; | ||
741 | |||
742 | Assert((ulg)s->strstart <= s->window_size-MIN_LOOKAHEAD, "need lookahead"); | ||
743 | |||
744 | do { | ||
745 | Assert(cur_match < s->strstart, "no future"); | ||
746 | match = s->window + cur_match; | ||
747 | |||
748 | /* Skip to next match if the match length cannot increase | ||
749 | * or if the match length is less than 2: | ||
750 | */ | ||
751 | #if (defined(UNALIGNED_OK) && MAX_MATCH == 258) | ||
752 | /* This code assumes sizeof(unsigned short) == 2. Do not use | ||
753 | * UNALIGNED_OK if your compiler uses a different size. | ||
754 | */ | ||
755 | if (*(ush*)(match+best_len-1) != scan_end || | ||
756 | *(ush*)match != scan_start) continue; | ||
757 | |||
758 | /* It is not necessary to compare scan[2] and match[2] since they are | ||
759 | * always equal when the other bytes match, given that the hash keys | ||
760 | * are equal and that HASH_BITS >= 8. Compare 2 bytes at a time at | ||
761 | * strstart+3, +5, ... up to strstart+257. We check for insufficient | ||
762 | * lookahead only every 4th comparison; the 128th check will be made | ||
763 | * at strstart+257. If MAX_MATCH-2 is not a multiple of 8, it is | ||
764 | * necessary to put more guard bytes at the end of the window, or | ||
765 | * to check more often for insufficient lookahead. | ||
766 | */ | ||
767 | Assert(scan[2] == match[2], "scan[2]?"); | ||
768 | scan++, match++; | ||
769 | do { | ||
770 | } while (*(ush*)(scan+=2) == *(ush*)(match+=2) && | ||
771 | *(ush*)(scan+=2) == *(ush*)(match+=2) && | ||
772 | *(ush*)(scan+=2) == *(ush*)(match+=2) && | ||
773 | *(ush*)(scan+=2) == *(ush*)(match+=2) && | ||
774 | scan < strend); | ||
775 | /* The funny "do {}" generates better code on most compilers */ | ||
776 | |||
777 | /* Here, scan <= window+strstart+257 */ | ||
778 | Assert(scan <= s->window+(unsigned)(s->window_size-1), "wild scan"); | ||
779 | if (*scan == *match) scan++; | ||
780 | |||
781 | len = (MAX_MATCH - 1) - (int)(strend-scan); | ||
782 | scan = strend - (MAX_MATCH-1); | ||
783 | |||
784 | #else /* UNALIGNED_OK */ | ||
785 | |||
786 | if (match[best_len] != scan_end || | ||
787 | match[best_len-1] != scan_end1 || | ||
788 | *match != *scan || | ||
789 | *++match != scan[1]) continue; | ||
790 | |||
791 | /* The check at best_len-1 can be removed because it will be made | ||
792 | * again later. (This heuristic is not always a win.) | ||
793 | * It is not necessary to compare scan[2] and match[2] since they | ||
794 | * are always equal when the other bytes match, given that | ||
795 | * the hash keys are equal and that HASH_BITS >= 8. | ||
796 | */ | ||
797 | scan += 2, match++; | ||
798 | Assert(*scan == *match, "match[2]?"); | ||
799 | |||
800 | /* We check for insufficient lookahead only every 8th comparison; | ||
801 | * the 256th check will be made at strstart+258. | ||
802 | */ | ||
803 | do { | ||
804 | } while (*++scan == *++match && *++scan == *++match && | ||
805 | *++scan == *++match && *++scan == *++match && | ||
806 | *++scan == *++match && *++scan == *++match && | ||
807 | *++scan == *++match && *++scan == *++match && | ||
808 | scan < strend); | ||
809 | |||
810 | Assert(scan <= s->window+(unsigned)(s->window_size-1), "wild scan"); | ||
811 | |||
812 | len = MAX_MATCH - (int)(strend - scan); | ||
813 | scan = strend - MAX_MATCH; | ||
814 | |||
815 | #endif /* UNALIGNED_OK */ | ||
816 | |||
817 | if (len > best_len) { | ||
818 | s->match_start = cur_match; | ||
819 | best_len = len; | ||
820 | if (len >= nice_match) break; | ||
821 | #ifdef UNALIGNED_OK | ||
822 | scan_end = *(ush*)(scan+best_len-1); | ||
823 | #else | ||
824 | scan_end1 = scan[best_len-1]; | ||
825 | scan_end = scan[best_len]; | ||
826 | #endif | ||
827 | } | ||
828 | } while ((cur_match = prev[cur_match & wmask]) > limit | ||
829 | && --chain_length != 0); | ||
830 | |||
831 | if ((uInt)best_len <= s->lookahead) return best_len; | ||
832 | return s->lookahead; | ||
833 | } | ||
834 | |||
835 | #ifdef DEBUG_ZLIB | ||
836 | /* =========================================================================== | ||
837 | * Check that the match at match_start is indeed a match. | ||
838 | */ | ||
839 | static void check_match( | ||
840 | deflate_state *s, | ||
841 | IPos start, | ||
842 | IPos match, | ||
843 | int length | ||
844 | ) | ||
845 | { | ||
846 | /* check that the match is indeed a match */ | ||
847 | if (memcmp((char *)s->window + match, | ||
848 | (char *)s->window + start, length) != EQUAL) { | ||
849 | fprintf(stderr, " start %u, match %u, length %d\n", | ||
850 | start, match, length); | ||
851 | do { | ||
852 | fprintf(stderr, "%c%c", s->window[match++], s->window[start++]); | ||
853 | } while (--length != 0); | ||
854 | z_error("invalid match"); | ||
855 | } | ||
856 | if (z_verbose > 1) { | ||
857 | fprintf(stderr,"\\[%d,%d]", start-match, length); | ||
858 | do { putc(s->window[start++], stderr); } while (--length != 0); | ||
859 | } | ||
860 | } | ||
861 | #else | ||
862 | # define check_match(s, start, match, length) | ||
863 | #endif | ||
864 | |||
865 | /* =========================================================================== | ||
866 | * Fill the window when the lookahead becomes insufficient. | ||
867 | * Updates strstart and lookahead. | ||
868 | * | ||
869 | * IN assertion: lookahead < MIN_LOOKAHEAD | ||
870 | * OUT assertions: strstart <= window_size-MIN_LOOKAHEAD | ||
871 | * At least one byte has been read, or avail_in == 0; reads are | ||
872 | * performed for at least two bytes (required for the zip translate_eol | ||
873 | * option -- not supported here). | ||
874 | */ | ||
875 | static void fill_window( | ||
876 | deflate_state *s | ||
877 | ) | ||
878 | { | ||
879 | register unsigned n, m; | ||
880 | register Pos *p; | ||
881 | unsigned more; /* Amount of free space at the end of the window. */ | ||
882 | uInt wsize = s->w_size; | ||
883 | |||
884 | do { | ||
885 | more = (unsigned)(s->window_size -(ulg)s->lookahead -(ulg)s->strstart); | ||
886 | |||
887 | /* Deal with !@#$% 64K limit: */ | ||
888 | if (more == 0 && s->strstart == 0 && s->lookahead == 0) { | ||
889 | more = wsize; | ||
890 | |||
891 | } else if (more == (unsigned)(-1)) { | ||
892 | /* Very unlikely, but possible on 16 bit machine if strstart == 0 | ||
893 | * and lookahead == 1 (input done one byte at time) | ||
894 | */ | ||
895 | more--; | ||
896 | |||
897 | /* If the window is almost full and there is insufficient lookahead, | ||
898 | * move the upper half to the lower one to make room in the upper half. | ||
899 | */ | ||
900 | } else if (s->strstart >= wsize+MAX_DIST(s)) { | ||
901 | |||
902 | memcpy((char *)s->window, (char *)s->window+wsize, | ||
903 | (unsigned)wsize); | ||
904 | s->match_start -= wsize; | ||
905 | s->strstart -= wsize; /* we now have strstart >= MAX_DIST */ | ||
906 | s->block_start -= (long) wsize; | ||
907 | |||
908 | /* Slide the hash table (could be avoided with 32 bit values | ||
909 | at the expense of memory usage). We slide even when level == 0 | ||
910 | to keep the hash table consistent if we switch back to level > 0 | ||
911 | later. (Using level 0 permanently is not an optimal usage of | ||
912 | zlib, so we don't care about this pathological case.) | ||
913 | */ | ||
914 | n = s->hash_size; | ||
915 | p = &s->head[n]; | ||
916 | do { | ||
917 | m = *--p; | ||
918 | *p = (Pos)(m >= wsize ? m-wsize : NIL); | ||
919 | } while (--n); | ||
920 | |||
921 | n = wsize; | ||
922 | p = &s->prev[n]; | ||
923 | do { | ||
924 | m = *--p; | ||
925 | *p = (Pos)(m >= wsize ? m-wsize : NIL); | ||
926 | /* If n is not on any hash chain, prev[n] is garbage but | ||
927 | * its value will never be used. | ||
928 | */ | ||
929 | } while (--n); | ||
930 | more += wsize; | ||
931 | } | ||
932 | if (s->strm->avail_in == 0) return; | ||
933 | |||
934 | /* If there was no sliding: | ||
935 | * strstart <= WSIZE+MAX_DIST-1 && lookahead <= MIN_LOOKAHEAD - 1 && | ||
936 | * more == window_size - lookahead - strstart | ||
937 | * => more >= window_size - (MIN_LOOKAHEAD-1 + WSIZE + MAX_DIST-1) | ||
938 | * => more >= window_size - 2*WSIZE + 2 | ||
939 | * In the BIG_MEM or MMAP case (not yet supported), | ||
940 | * window_size == input_size + MIN_LOOKAHEAD && | ||
941 | * strstart + s->lookahead <= input_size => more >= MIN_LOOKAHEAD. | ||
942 | * Otherwise, window_size == 2*WSIZE so more >= 2. | ||
943 | * If there was sliding, more >= WSIZE. So in all cases, more >= 2. | ||
944 | */ | ||
945 | Assert(more >= 2, "more < 2"); | ||
946 | |||
947 | n = read_buf(s->strm, s->window + s->strstart + s->lookahead, more); | ||
948 | s->lookahead += n; | ||
949 | |||
950 | /* Initialize the hash value now that we have some input: */ | ||
951 | if (s->lookahead >= MIN_MATCH) { | ||
952 | s->ins_h = s->window[s->strstart]; | ||
953 | UPDATE_HASH(s, s->ins_h, s->window[s->strstart+1]); | ||
954 | #if MIN_MATCH != 3 | ||
955 | Call UPDATE_HASH() MIN_MATCH-3 more times | ||
956 | #endif | ||
957 | } | ||
958 | /* If the whole input has less than MIN_MATCH bytes, ins_h is garbage, | ||
959 | * but this is not important since only literal bytes will be emitted. | ||
960 | */ | ||
961 | |||
962 | } while (s->lookahead < MIN_LOOKAHEAD && s->strm->avail_in != 0); | ||
963 | } | ||
964 | |||
965 | /* =========================================================================== | ||
966 | * Flush the current block, with given end-of-file flag. | ||
967 | * IN assertion: strstart is set to the end of the current match. | ||
968 | */ | ||
969 | #define FLUSH_BLOCK_ONLY(s, eof) { \ | ||
970 | zlib_tr_flush_block(s, (s->block_start >= 0L ? \ | ||
971 | (char *)&s->window[(unsigned)s->block_start] : \ | ||
972 | NULL), \ | ||
973 | (ulg)((long)s->strstart - s->block_start), \ | ||
974 | (eof)); \ | ||
975 | s->block_start = s->strstart; \ | ||
976 | flush_pending(s->strm); \ | ||
977 | Tracev((stderr,"[FLUSH]")); \ | ||
978 | } | ||
979 | |||
980 | /* Same but force premature exit if necessary. */ | ||
981 | #define FLUSH_BLOCK(s, eof) { \ | ||
982 | FLUSH_BLOCK_ONLY(s, eof); \ | ||
983 | if (s->strm->avail_out == 0) return (eof) ? finish_started : need_more; \ | ||
984 | } | ||
985 | |||
986 | /* =========================================================================== | ||
987 | * Copy without compression as much as possible from the input stream, return | ||
988 | * the current block state. | ||
989 | * This function does not insert new strings in the dictionary since | ||
990 | * uncompressible data is probably not useful. This function is used | ||
991 | * only for the level=0 compression option. | ||
992 | * NOTE: this function should be optimized to avoid extra copying from | ||
993 | * window to pending_buf. | ||
994 | */ | ||
995 | static block_state deflate_stored( | ||
996 | deflate_state *s, | ||
997 | int flush | ||
998 | ) | ||
999 | { | ||
1000 | /* Stored blocks are limited to 0xffff bytes, pending_buf is limited | ||
1001 | * to pending_buf_size, and each stored block has a 5 byte header: | ||
1002 | */ | ||
1003 | ulg max_block_size = 0xffff; | ||
1004 | ulg max_start; | ||
1005 | |||
1006 | if (max_block_size > s->pending_buf_size - 5) { | ||
1007 | max_block_size = s->pending_buf_size - 5; | ||
1008 | } | ||
1009 | |||
1010 | /* Copy as much as possible from input to output: */ | ||
1011 | for (;;) { | ||
1012 | /* Fill the window as much as possible: */ | ||
1013 | if (s->lookahead <= 1) { | ||
1014 | |||
1015 | Assert(s->strstart < s->w_size+MAX_DIST(s) || | ||
1016 | s->block_start >= (long)s->w_size, "slide too late"); | ||
1017 | |||
1018 | fill_window(s); | ||
1019 | if (s->lookahead == 0 && flush == Z_NO_FLUSH) return need_more; | ||
1020 | |||
1021 | if (s->lookahead == 0) break; /* flush the current block */ | ||
1022 | } | ||
1023 | Assert(s->block_start >= 0L, "block gone"); | ||
1024 | |||
1025 | s->strstart += s->lookahead; | ||
1026 | s->lookahead = 0; | ||
1027 | |||
1028 | /* Emit a stored block if pending_buf will be full: */ | ||
1029 | max_start = s->block_start + max_block_size; | ||
1030 | if (s->strstart == 0 || (ulg)s->strstart >= max_start) { | ||
1031 | /* strstart == 0 is possible when wraparound on 16-bit machine */ | ||
1032 | s->lookahead = (uInt)(s->strstart - max_start); | ||
1033 | s->strstart = (uInt)max_start; | ||
1034 | FLUSH_BLOCK(s, 0); | ||
1035 | } | ||
1036 | /* Flush if we may have to slide, otherwise block_start may become | ||
1037 | * negative and the data will be gone: | ||
1038 | */ | ||
1039 | if (s->strstart - (uInt)s->block_start >= MAX_DIST(s)) { | ||
1040 | FLUSH_BLOCK(s, 0); | ||
1041 | } | ||
1042 | } | ||
1043 | FLUSH_BLOCK(s, flush == Z_FINISH); | ||
1044 | return flush == Z_FINISH ? finish_done : block_done; | ||
1045 | } | ||
1046 | |||
1047 | /* =========================================================================== | ||
1048 | * Compress as much as possible from the input stream, return the current | ||
1049 | * block state. | ||
1050 | * This function does not perform lazy evaluation of matches and inserts | ||
1051 | * new strings in the dictionary only for unmatched strings or for short | ||
1052 | * matches. It is used only for the fast compression options. | ||
1053 | */ | ||
1054 | static block_state deflate_fast( | ||
1055 | deflate_state *s, | ||
1056 | int flush | ||
1057 | ) | ||
1058 | { | ||
1059 | IPos hash_head = NIL; /* head of the hash chain */ | ||
1060 | int bflush; /* set if current block must be flushed */ | ||
1061 | |||
1062 | for (;;) { | ||
1063 | /* Make sure that we always have enough lookahead, except | ||
1064 | * at the end of the input file. We need MAX_MATCH bytes | ||
1065 | * for the next match, plus MIN_MATCH bytes to insert the | ||
1066 | * string following the next match. | ||
1067 | */ | ||
1068 | if (s->lookahead < MIN_LOOKAHEAD) { | ||
1069 | fill_window(s); | ||
1070 | if (s->lookahead < MIN_LOOKAHEAD && flush == Z_NO_FLUSH) { | ||
1071 | return need_more; | ||
1072 | } | ||
1073 | if (s->lookahead == 0) break; /* flush the current block */ | ||
1074 | } | ||
1075 | |||
1076 | /* Insert the string window[strstart .. strstart+2] in the | ||
1077 | * dictionary, and set hash_head to the head of the hash chain: | ||
1078 | */ | ||
1079 | if (s->lookahead >= MIN_MATCH) { | ||
1080 | INSERT_STRING(s, s->strstart, hash_head); | ||
1081 | } | ||
1082 | |||
1083 | /* Find the longest match, discarding those <= prev_length. | ||
1084 | * At this point we have always match_length < MIN_MATCH | ||
1085 | */ | ||
1086 | if (hash_head != NIL && s->strstart - hash_head <= MAX_DIST(s)) { | ||
1087 | /* To simplify the code, we prevent matches with the string | ||
1088 | * of window index 0 (in particular we have to avoid a match | ||
1089 | * of the string with itself at the start of the input file). | ||
1090 | */ | ||
1091 | if (s->strategy != Z_HUFFMAN_ONLY) { | ||
1092 | s->match_length = longest_match (s, hash_head); | ||
1093 | } | ||
1094 | /* longest_match() sets match_start */ | ||
1095 | } | ||
1096 | if (s->match_length >= MIN_MATCH) { | ||
1097 | check_match(s, s->strstart, s->match_start, s->match_length); | ||
1098 | |||
1099 | bflush = zlib_tr_tally(s, s->strstart - s->match_start, | ||
1100 | s->match_length - MIN_MATCH); | ||
1101 | |||
1102 | s->lookahead -= s->match_length; | ||
1103 | |||
1104 | /* Insert new strings in the hash table only if the match length | ||
1105 | * is not too large. This saves time but degrades compression. | ||
1106 | */ | ||
1107 | if (s->match_length <= s->max_insert_length && | ||
1108 | s->lookahead >= MIN_MATCH) { | ||
1109 | s->match_length--; /* string at strstart already in hash table */ | ||
1110 | do { | ||
1111 | s->strstart++; | ||
1112 | INSERT_STRING(s, s->strstart, hash_head); | ||
1113 | /* strstart never exceeds WSIZE-MAX_MATCH, so there are | ||
1114 | * always MIN_MATCH bytes ahead. | ||
1115 | */ | ||
1116 | } while (--s->match_length != 0); | ||
1117 | s->strstart++; | ||
1118 | } else { | ||
1119 | s->strstart += s->match_length; | ||
1120 | s->match_length = 0; | ||
1121 | s->ins_h = s->window[s->strstart]; | ||
1122 | UPDATE_HASH(s, s->ins_h, s->window[s->strstart+1]); | ||
1123 | #if MIN_MATCH != 3 | ||
1124 | Call UPDATE_HASH() MIN_MATCH-3 more times | ||
1125 | #endif | ||
1126 | /* If lookahead < MIN_MATCH, ins_h is garbage, but it does not | ||
1127 | * matter since it will be recomputed at next deflate call. | ||
1128 | */ | ||
1129 | } | ||
1130 | } else { | ||
1131 | /* No match, output a literal byte */ | ||
1132 | Tracevv((stderr,"%c", s->window[s->strstart])); | ||
1133 | bflush = zlib_tr_tally (s, 0, s->window[s->strstart]); | ||
1134 | s->lookahead--; | ||
1135 | s->strstart++; | ||
1136 | } | ||
1137 | if (bflush) FLUSH_BLOCK(s, 0); | ||
1138 | } | ||
1139 | FLUSH_BLOCK(s, flush == Z_FINISH); | ||
1140 | return flush == Z_FINISH ? finish_done : block_done; | ||
1141 | } | ||
1142 | |||
1143 | /* =========================================================================== | ||
1144 | * Same as above, but achieves better compression. We use a lazy | ||
1145 | * evaluation for matches: a match is finally adopted only if there is | ||
1146 | * no better match at the next window position. | ||
1147 | */ | ||
1148 | static block_state deflate_slow( | ||
1149 | deflate_state *s, | ||
1150 | int flush | ||
1151 | ) | ||
1152 | { | ||
1153 | IPos hash_head = NIL; /* head of hash chain */ | ||
1154 | int bflush; /* set if current block must be flushed */ | ||
1155 | |||
1156 | /* Process the input block. */ | ||
1157 | for (;;) { | ||
1158 | /* Make sure that we always have enough lookahead, except | ||
1159 | * at the end of the input file. We need MAX_MATCH bytes | ||
1160 | * for the next match, plus MIN_MATCH bytes to insert the | ||
1161 | * string following the next match. | ||
1162 | */ | ||
1163 | if (s->lookahead < MIN_LOOKAHEAD) { | ||
1164 | fill_window(s); | ||
1165 | if (s->lookahead < MIN_LOOKAHEAD && flush == Z_NO_FLUSH) { | ||
1166 | return need_more; | ||
1167 | } | ||
1168 | if (s->lookahead == 0) break; /* flush the current block */ | ||
1169 | } | ||
1170 | |||
1171 | /* Insert the string window[strstart .. strstart+2] in the | ||
1172 | * dictionary, and set hash_head to the head of the hash chain: | ||
1173 | */ | ||
1174 | if (s->lookahead >= MIN_MATCH) { | ||
1175 | INSERT_STRING(s, s->strstart, hash_head); | ||
1176 | } | ||
1177 | |||
1178 | /* Find the longest match, discarding those <= prev_length. | ||
1179 | */ | ||
1180 | s->prev_length = s->match_length, s->prev_match = s->match_start; | ||
1181 | s->match_length = MIN_MATCH-1; | ||
1182 | |||
1183 | if (hash_head != NIL && s->prev_length < s->max_lazy_match && | ||
1184 | s->strstart - hash_head <= MAX_DIST(s)) { | ||
1185 | /* To simplify the code, we prevent matches with the string | ||
1186 | * of window index 0 (in particular we have to avoid a match | ||
1187 | * of the string with itself at the start of the input file). | ||
1188 | */ | ||
1189 | if (s->strategy != Z_HUFFMAN_ONLY) { | ||
1190 | s->match_length = longest_match (s, hash_head); | ||
1191 | } | ||
1192 | /* longest_match() sets match_start */ | ||
1193 | |||
1194 | if (s->match_length <= 5 && (s->strategy == Z_FILTERED || | ||
1195 | (s->match_length == MIN_MATCH && | ||
1196 | s->strstart - s->match_start > TOO_FAR))) { | ||
1197 | |||
1198 | /* If prev_match is also MIN_MATCH, match_start is garbage | ||
1199 | * but we will ignore the current match anyway. | ||
1200 | */ | ||
1201 | s->match_length = MIN_MATCH-1; | ||
1202 | } | ||
1203 | } | ||
1204 | /* If there was a match at the previous step and the current | ||
1205 | * match is not better, output the previous match: | ||
1206 | */ | ||
1207 | if (s->prev_length >= MIN_MATCH && s->match_length <= s->prev_length) { | ||
1208 | uInt max_insert = s->strstart + s->lookahead - MIN_MATCH; | ||
1209 | /* Do not insert strings in hash table beyond this. */ | ||
1210 | |||
1211 | check_match(s, s->strstart-1, s->prev_match, s->prev_length); | ||
1212 | |||
1213 | bflush = zlib_tr_tally(s, s->strstart -1 - s->prev_match, | ||
1214 | s->prev_length - MIN_MATCH); | ||
1215 | |||
1216 | /* Insert in hash table all strings up to the end of the match. | ||
1217 | * strstart-1 and strstart are already inserted. If there is not | ||
1218 | * enough lookahead, the last two strings are not inserted in | ||
1219 | * the hash table. | ||
1220 | */ | ||
1221 | s->lookahead -= s->prev_length-1; | ||
1222 | s->prev_length -= 2; | ||
1223 | do { | ||
1224 | if (++s->strstart <= max_insert) { | ||
1225 | INSERT_STRING(s, s->strstart, hash_head); | ||
1226 | } | ||
1227 | } while (--s->prev_length != 0); | ||
1228 | s->match_available = 0; | ||
1229 | s->match_length = MIN_MATCH-1; | ||
1230 | s->strstart++; | ||
1231 | |||
1232 | if (bflush) FLUSH_BLOCK(s, 0); | ||
1233 | |||
1234 | } else if (s->match_available) { | ||
1235 | /* If there was no match at the previous position, output a | ||
1236 | * single literal. If there was a match but the current match | ||
1237 | * is longer, truncate the previous match to a single literal. | ||
1238 | */ | ||
1239 | Tracevv((stderr,"%c", s->window[s->strstart-1])); | ||
1240 | if (zlib_tr_tally (s, 0, s->window[s->strstart-1])) { | ||
1241 | FLUSH_BLOCK_ONLY(s, 0); | ||
1242 | } | ||
1243 | s->strstart++; | ||
1244 | s->lookahead--; | ||
1245 | if (s->strm->avail_out == 0) return need_more; | ||
1246 | } else { | ||
1247 | /* There is no previous match to compare with, wait for | ||
1248 | * the next step to decide. | ||
1249 | */ | ||
1250 | s->match_available = 1; | ||
1251 | s->strstart++; | ||
1252 | s->lookahead--; | ||
1253 | } | ||
1254 | } | ||
1255 | Assert (flush != Z_NO_FLUSH, "no flush?"); | ||
1256 | if (s->match_available) { | ||
1257 | Tracevv((stderr,"%c", s->window[s->strstart-1])); | ||
1258 | zlib_tr_tally (s, 0, s->window[s->strstart-1]); | ||
1259 | s->match_available = 0; | ||
1260 | } | ||
1261 | FLUSH_BLOCK(s, flush == Z_FINISH); | ||
1262 | return flush == Z_FINISH ? finish_done : block_done; | ||
1263 | } | ||
1264 | |||
1265 | int zlib_deflate_workspacesize(void) | ||
1266 | { | ||
1267 | return sizeof(deflate_workspace); | ||
1268 | } | ||
diff --git a/lib/zlib_deflate/deflate_syms.c b/lib/zlib_deflate/deflate_syms.c new file mode 100644 index 000000000000..5985b28c8e30 --- /dev/null +++ b/lib/zlib_deflate/deflate_syms.c | |||
@@ -0,0 +1,21 @@ | |||
1 | /* | ||
2 | * linux/lib/zlib_deflate/deflate_syms.c | ||
3 | * | ||
4 | * Exported symbols for the deflate functionality. | ||
5 | * | ||
6 | */ | ||
7 | |||
8 | #include <linux/module.h> | ||
9 | #include <linux/init.h> | ||
10 | |||
11 | #include <linux/zlib.h> | ||
12 | |||
13 | EXPORT_SYMBOL(zlib_deflate_workspacesize); | ||
14 | EXPORT_SYMBOL(zlib_deflate); | ||
15 | EXPORT_SYMBOL(zlib_deflateInit_); | ||
16 | EXPORT_SYMBOL(zlib_deflateInit2_); | ||
17 | EXPORT_SYMBOL(zlib_deflateEnd); | ||
18 | EXPORT_SYMBOL(zlib_deflateReset); | ||
19 | EXPORT_SYMBOL(zlib_deflateCopy); | ||
20 | EXPORT_SYMBOL(zlib_deflateParams); | ||
21 | MODULE_LICENSE("GPL"); | ||
diff --git a/lib/zlib_deflate/deftree.c b/lib/zlib_deflate/deftree.c new file mode 100644 index 000000000000..ddf348299f24 --- /dev/null +++ b/lib/zlib_deflate/deftree.c | |||
@@ -0,0 +1,1113 @@ | |||
1 | /* +++ trees.c */ | ||
2 | /* trees.c -- output deflated data using Huffman coding | ||
3 | * Copyright (C) 1995-1996 Jean-loup Gailly | ||
4 | * For conditions of distribution and use, see copyright notice in zlib.h | ||
5 | */ | ||
6 | |||
7 | /* | ||
8 | * ALGORITHM | ||
9 | * | ||
10 | * The "deflation" process uses several Huffman trees. The more | ||
11 | * common source values are represented by shorter bit sequences. | ||
12 | * | ||
13 | * Each code tree is stored in a compressed form which is itself | ||
14 | * a Huffman encoding of the lengths of all the code strings (in | ||
15 | * ascending order by source values). The actual code strings are | ||
16 | * reconstructed from the lengths in the inflate process, as described | ||
17 | * in the deflate specification. | ||
18 | * | ||
19 | * REFERENCES | ||
20 | * | ||
21 | * Deutsch, L.P.,"'Deflate' Compressed Data Format Specification". | ||
22 | * Available in ftp.uu.net:/pub/archiving/zip/doc/deflate-1.1.doc | ||
23 | * | ||
24 | * Storer, James A. | ||
25 | * Data Compression: Methods and Theory, pp. 49-50. | ||
26 | * Computer Science Press, 1988. ISBN 0-7167-8156-5. | ||
27 | * | ||
28 | * Sedgewick, R. | ||
29 | * Algorithms, p290. | ||
30 | * Addison-Wesley, 1983. ISBN 0-201-06672-6. | ||
31 | */ | ||
32 | |||
33 | /* From: trees.c,v 1.11 1996/07/24 13:41:06 me Exp $ */ | ||
34 | |||
35 | /* #include "deflate.h" */ | ||
36 | |||
37 | #include <linux/zutil.h> | ||
38 | #include "defutil.h" | ||
39 | |||
40 | #ifdef DEBUG_ZLIB | ||
41 | # include <ctype.h> | ||
42 | #endif | ||
43 | |||
44 | /* =========================================================================== | ||
45 | * Constants | ||
46 | */ | ||
47 | |||
48 | #define MAX_BL_BITS 7 | ||
49 | /* Bit length codes must not exceed MAX_BL_BITS bits */ | ||
50 | |||
51 | #define END_BLOCK 256 | ||
52 | /* end of block literal code */ | ||
53 | |||
54 | #define REP_3_6 16 | ||
55 | /* repeat previous bit length 3-6 times (2 bits of repeat count) */ | ||
56 | |||
57 | #define REPZ_3_10 17 | ||
58 | /* repeat a zero length 3-10 times (3 bits of repeat count) */ | ||
59 | |||
60 | #define REPZ_11_138 18 | ||
61 | /* repeat a zero length 11-138 times (7 bits of repeat count) */ | ||
62 | |||
63 | static const int extra_lbits[LENGTH_CODES] /* extra bits for each length code */ | ||
64 | = {0,0,0,0,0,0,0,0,1,1,1,1,2,2,2,2,3,3,3,3,4,4,4,4,5,5,5,5,0}; | ||
65 | |||
66 | static const int extra_dbits[D_CODES] /* extra bits for each distance code */ | ||
67 | = {0,0,0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,8,8,9,9,10,10,11,11,12,12,13,13}; | ||
68 | |||
69 | static const int extra_blbits[BL_CODES]/* extra bits for each bit length code */ | ||
70 | = {0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,3,7}; | ||
71 | |||
72 | static const uch bl_order[BL_CODES] | ||
73 | = {16,17,18,0,8,7,9,6,10,5,11,4,12,3,13,2,14,1,15}; | ||
74 | /* The lengths of the bit length codes are sent in order of decreasing | ||
75 | * probability, to avoid transmitting the lengths for unused bit length codes. | ||
76 | */ | ||
77 | |||
78 | #define Buf_size (8 * 2*sizeof(char)) | ||
79 | /* Number of bits used within bi_buf. (bi_buf might be implemented on | ||
80 | * more than 16 bits on some systems.) | ||
81 | */ | ||
82 | |||
83 | /* =========================================================================== | ||
84 | * Local data. These are initialized only once. | ||
85 | */ | ||
86 | |||
87 | static ct_data static_ltree[L_CODES+2]; | ||
88 | /* The static literal tree. Since the bit lengths are imposed, there is no | ||
89 | * need for the L_CODES extra codes used during heap construction. However | ||
90 | * The codes 286 and 287 are needed to build a canonical tree (see zlib_tr_init | ||
91 | * below). | ||
92 | */ | ||
93 | |||
94 | static ct_data static_dtree[D_CODES]; | ||
95 | /* The static distance tree. (Actually a trivial tree since all codes use | ||
96 | * 5 bits.) | ||
97 | */ | ||
98 | |||
99 | static uch dist_code[512]; | ||
100 | /* distance codes. The first 256 values correspond to the distances | ||
101 | * 3 .. 258, the last 256 values correspond to the top 8 bits of | ||
102 | * the 15 bit distances. | ||
103 | */ | ||
104 | |||
105 | static uch length_code[MAX_MATCH-MIN_MATCH+1]; | ||
106 | /* length code for each normalized match length (0 == MIN_MATCH) */ | ||
107 | |||
108 | static int base_length[LENGTH_CODES]; | ||
109 | /* First normalized length for each code (0 = MIN_MATCH) */ | ||
110 | |||
111 | static int base_dist[D_CODES]; | ||
112 | /* First normalized distance for each code (0 = distance of 1) */ | ||
113 | |||
114 | struct static_tree_desc_s { | ||
115 | const ct_data *static_tree; /* static tree or NULL */ | ||
116 | const int *extra_bits; /* extra bits for each code or NULL */ | ||
117 | int extra_base; /* base index for extra_bits */ | ||
118 | int elems; /* max number of elements in the tree */ | ||
119 | int max_length; /* max bit length for the codes */ | ||
120 | }; | ||
121 | |||
122 | static static_tree_desc static_l_desc = | ||
123 | {static_ltree, extra_lbits, LITERALS+1, L_CODES, MAX_BITS}; | ||
124 | |||
125 | static static_tree_desc static_d_desc = | ||
126 | {static_dtree, extra_dbits, 0, D_CODES, MAX_BITS}; | ||
127 | |||
128 | static static_tree_desc static_bl_desc = | ||
129 | {(const ct_data *)0, extra_blbits, 0, BL_CODES, MAX_BL_BITS}; | ||
130 | |||
131 | /* =========================================================================== | ||
132 | * Local (static) routines in this file. | ||
133 | */ | ||
134 | |||
135 | static void tr_static_init (void); | ||
136 | static void init_block (deflate_state *s); | ||
137 | static void pqdownheap (deflate_state *s, ct_data *tree, int k); | ||
138 | static void gen_bitlen (deflate_state *s, tree_desc *desc); | ||
139 | static void gen_codes (ct_data *tree, int max_code, ush *bl_count); | ||
140 | static void build_tree (deflate_state *s, tree_desc *desc); | ||
141 | static void scan_tree (deflate_state *s, ct_data *tree, int max_code); | ||
142 | static void send_tree (deflate_state *s, ct_data *tree, int max_code); | ||
143 | static int build_bl_tree (deflate_state *s); | ||
144 | static void send_all_trees (deflate_state *s, int lcodes, int dcodes, | ||
145 | int blcodes); | ||
146 | static void compress_block (deflate_state *s, ct_data *ltree, | ||
147 | ct_data *dtree); | ||
148 | static void set_data_type (deflate_state *s); | ||
149 | static unsigned bi_reverse (unsigned value, int length); | ||
150 | static void bi_windup (deflate_state *s); | ||
151 | static void bi_flush (deflate_state *s); | ||
152 | static void copy_block (deflate_state *s, char *buf, unsigned len, | ||
153 | int header); | ||
154 | |||
155 | #ifndef DEBUG_ZLIB | ||
156 | # define send_code(s, c, tree) send_bits(s, tree[c].Code, tree[c].Len) | ||
157 | /* Send a code of the given tree. c and tree must not have side effects */ | ||
158 | |||
159 | #else /* DEBUG_ZLIB */ | ||
160 | # define send_code(s, c, tree) \ | ||
161 | { if (z_verbose>2) fprintf(stderr,"\ncd %3d ",(c)); \ | ||
162 | send_bits(s, tree[c].Code, tree[c].Len); } | ||
163 | #endif | ||
164 | |||
165 | #define d_code(dist) \ | ||
166 | ((dist) < 256 ? dist_code[dist] : dist_code[256+((dist)>>7)]) | ||
167 | /* Mapping from a distance to a distance code. dist is the distance - 1 and | ||
168 | * must not have side effects. dist_code[256] and dist_code[257] are never | ||
169 | * used. | ||
170 | */ | ||
171 | |||
172 | /* =========================================================================== | ||
173 | * Send a value on a given number of bits. | ||
174 | * IN assertion: length <= 16 and value fits in length bits. | ||
175 | */ | ||
176 | #ifdef DEBUG_ZLIB | ||
177 | static void send_bits (deflate_state *s, int value, int length); | ||
178 | |||
179 | static void send_bits( | ||
180 | deflate_state *s, | ||
181 | int value, /* value to send */ | ||
182 | int length /* number of bits */ | ||
183 | ) | ||
184 | { | ||
185 | Tracevv((stderr," l %2d v %4x ", length, value)); | ||
186 | Assert(length > 0 && length <= 15, "invalid length"); | ||
187 | s->bits_sent += (ulg)length; | ||
188 | |||
189 | /* If not enough room in bi_buf, use (valid) bits from bi_buf and | ||
190 | * (16 - bi_valid) bits from value, leaving (width - (16-bi_valid)) | ||
191 | * unused bits in value. | ||
192 | */ | ||
193 | if (s->bi_valid > (int)Buf_size - length) { | ||
194 | s->bi_buf |= (value << s->bi_valid); | ||
195 | put_short(s, s->bi_buf); | ||
196 | s->bi_buf = (ush)value >> (Buf_size - s->bi_valid); | ||
197 | s->bi_valid += length - Buf_size; | ||
198 | } else { | ||
199 | s->bi_buf |= value << s->bi_valid; | ||
200 | s->bi_valid += length; | ||
201 | } | ||
202 | } | ||
203 | #else /* !DEBUG_ZLIB */ | ||
204 | |||
205 | #define send_bits(s, value, length) \ | ||
206 | { int len = length;\ | ||
207 | if (s->bi_valid > (int)Buf_size - len) {\ | ||
208 | int val = value;\ | ||
209 | s->bi_buf |= (val << s->bi_valid);\ | ||
210 | put_short(s, s->bi_buf);\ | ||
211 | s->bi_buf = (ush)val >> (Buf_size - s->bi_valid);\ | ||
212 | s->bi_valid += len - Buf_size;\ | ||
213 | } else {\ | ||
214 | s->bi_buf |= (value) << s->bi_valid;\ | ||
215 | s->bi_valid += len;\ | ||
216 | }\ | ||
217 | } | ||
218 | #endif /* DEBUG_ZLIB */ | ||
219 | |||
220 | /* =========================================================================== | ||
221 | * Initialize the various 'constant' tables. In a multi-threaded environment, | ||
222 | * this function may be called by two threads concurrently, but this is | ||
223 | * harmless since both invocations do exactly the same thing. | ||
224 | */ | ||
225 | static void tr_static_init(void) | ||
226 | { | ||
227 | static int static_init_done; | ||
228 | int n; /* iterates over tree elements */ | ||
229 | int bits; /* bit counter */ | ||
230 | int length; /* length value */ | ||
231 | int code; /* code value */ | ||
232 | int dist; /* distance index */ | ||
233 | ush bl_count[MAX_BITS+1]; | ||
234 | /* number of codes at each bit length for an optimal tree */ | ||
235 | |||
236 | if (static_init_done) return; | ||
237 | |||
238 | /* Initialize the mapping length (0..255) -> length code (0..28) */ | ||
239 | length = 0; | ||
240 | for (code = 0; code < LENGTH_CODES-1; code++) { | ||
241 | base_length[code] = length; | ||
242 | for (n = 0; n < (1<<extra_lbits[code]); n++) { | ||
243 | length_code[length++] = (uch)code; | ||
244 | } | ||
245 | } | ||
246 | Assert (length == 256, "tr_static_init: length != 256"); | ||
247 | /* Note that the length 255 (match length 258) can be represented | ||
248 | * in two different ways: code 284 + 5 bits or code 285, so we | ||
249 | * overwrite length_code[255] to use the best encoding: | ||
250 | */ | ||
251 | length_code[length-1] = (uch)code; | ||
252 | |||
253 | /* Initialize the mapping dist (0..32K) -> dist code (0..29) */ | ||
254 | dist = 0; | ||
255 | for (code = 0 ; code < 16; code++) { | ||
256 | base_dist[code] = dist; | ||
257 | for (n = 0; n < (1<<extra_dbits[code]); n++) { | ||
258 | dist_code[dist++] = (uch)code; | ||
259 | } | ||
260 | } | ||
261 | Assert (dist == 256, "tr_static_init: dist != 256"); | ||
262 | dist >>= 7; /* from now on, all distances are divided by 128 */ | ||
263 | for ( ; code < D_CODES; code++) { | ||
264 | base_dist[code] = dist << 7; | ||
265 | for (n = 0; n < (1<<(extra_dbits[code]-7)); n++) { | ||
266 | dist_code[256 + dist++] = (uch)code; | ||
267 | } | ||
268 | } | ||
269 | Assert (dist == 256, "tr_static_init: 256+dist != 512"); | ||
270 | |||
271 | /* Construct the codes of the static literal tree */ | ||
272 | for (bits = 0; bits <= MAX_BITS; bits++) bl_count[bits] = 0; | ||
273 | n = 0; | ||
274 | while (n <= 143) static_ltree[n++].Len = 8, bl_count[8]++; | ||
275 | while (n <= 255) static_ltree[n++].Len = 9, bl_count[9]++; | ||
276 | while (n <= 279) static_ltree[n++].Len = 7, bl_count[7]++; | ||
277 | while (n <= 287) static_ltree[n++].Len = 8, bl_count[8]++; | ||
278 | /* Codes 286 and 287 do not exist, but we must include them in the | ||
279 | * tree construction to get a canonical Huffman tree (longest code | ||
280 | * all ones) | ||
281 | */ | ||
282 | gen_codes((ct_data *)static_ltree, L_CODES+1, bl_count); | ||
283 | |||
284 | /* The static distance tree is trivial: */ | ||
285 | for (n = 0; n < D_CODES; n++) { | ||
286 | static_dtree[n].Len = 5; | ||
287 | static_dtree[n].Code = bi_reverse((unsigned)n, 5); | ||
288 | } | ||
289 | static_init_done = 1; | ||
290 | } | ||
291 | |||
292 | /* =========================================================================== | ||
293 | * Initialize the tree data structures for a new zlib stream. | ||
294 | */ | ||
295 | void zlib_tr_init( | ||
296 | deflate_state *s | ||
297 | ) | ||
298 | { | ||
299 | tr_static_init(); | ||
300 | |||
301 | s->compressed_len = 0L; | ||
302 | |||
303 | s->l_desc.dyn_tree = s->dyn_ltree; | ||
304 | s->l_desc.stat_desc = &static_l_desc; | ||
305 | |||
306 | s->d_desc.dyn_tree = s->dyn_dtree; | ||
307 | s->d_desc.stat_desc = &static_d_desc; | ||
308 | |||
309 | s->bl_desc.dyn_tree = s->bl_tree; | ||
310 | s->bl_desc.stat_desc = &static_bl_desc; | ||
311 | |||
312 | s->bi_buf = 0; | ||
313 | s->bi_valid = 0; | ||
314 | s->last_eob_len = 8; /* enough lookahead for inflate */ | ||
315 | #ifdef DEBUG_ZLIB | ||
316 | s->bits_sent = 0L; | ||
317 | #endif | ||
318 | |||
319 | /* Initialize the first block of the first file: */ | ||
320 | init_block(s); | ||
321 | } | ||
322 | |||
323 | /* =========================================================================== | ||
324 | * Initialize a new block. | ||
325 | */ | ||
326 | static void init_block( | ||
327 | deflate_state *s | ||
328 | ) | ||
329 | { | ||
330 | int n; /* iterates over tree elements */ | ||
331 | |||
332 | /* Initialize the trees. */ | ||
333 | for (n = 0; n < L_CODES; n++) s->dyn_ltree[n].Freq = 0; | ||
334 | for (n = 0; n < D_CODES; n++) s->dyn_dtree[n].Freq = 0; | ||
335 | for (n = 0; n < BL_CODES; n++) s->bl_tree[n].Freq = 0; | ||
336 | |||
337 | s->dyn_ltree[END_BLOCK].Freq = 1; | ||
338 | s->opt_len = s->static_len = 0L; | ||
339 | s->last_lit = s->matches = 0; | ||
340 | } | ||
341 | |||
342 | #define SMALLEST 1 | ||
343 | /* Index within the heap array of least frequent node in the Huffman tree */ | ||
344 | |||
345 | |||
346 | /* =========================================================================== | ||
347 | * Remove the smallest element from the heap and recreate the heap with | ||
348 | * one less element. Updates heap and heap_len. | ||
349 | */ | ||
350 | #define pqremove(s, tree, top) \ | ||
351 | {\ | ||
352 | top = s->heap[SMALLEST]; \ | ||
353 | s->heap[SMALLEST] = s->heap[s->heap_len--]; \ | ||
354 | pqdownheap(s, tree, SMALLEST); \ | ||
355 | } | ||
356 | |||
357 | /* =========================================================================== | ||
358 | * Compares to subtrees, using the tree depth as tie breaker when | ||
359 | * the subtrees have equal frequency. This minimizes the worst case length. | ||
360 | */ | ||
361 | #define smaller(tree, n, m, depth) \ | ||
362 | (tree[n].Freq < tree[m].Freq || \ | ||
363 | (tree[n].Freq == tree[m].Freq && depth[n] <= depth[m])) | ||
364 | |||
365 | /* =========================================================================== | ||
366 | * Restore the heap property by moving down the tree starting at node k, | ||
367 | * exchanging a node with the smallest of its two sons if necessary, stopping | ||
368 | * when the heap property is re-established (each father smaller than its | ||
369 | * two sons). | ||
370 | */ | ||
371 | static void pqdownheap( | ||
372 | deflate_state *s, | ||
373 | ct_data *tree, /* the tree to restore */ | ||
374 | int k /* node to move down */ | ||
375 | ) | ||
376 | { | ||
377 | int v = s->heap[k]; | ||
378 | int j = k << 1; /* left son of k */ | ||
379 | while (j <= s->heap_len) { | ||
380 | /* Set j to the smallest of the two sons: */ | ||
381 | if (j < s->heap_len && | ||
382 | smaller(tree, s->heap[j+1], s->heap[j], s->depth)) { | ||
383 | j++; | ||
384 | } | ||
385 | /* Exit if v is smaller than both sons */ | ||
386 | if (smaller(tree, v, s->heap[j], s->depth)) break; | ||
387 | |||
388 | /* Exchange v with the smallest son */ | ||
389 | s->heap[k] = s->heap[j]; k = j; | ||
390 | |||
391 | /* And continue down the tree, setting j to the left son of k */ | ||
392 | j <<= 1; | ||
393 | } | ||
394 | s->heap[k] = v; | ||
395 | } | ||
396 | |||
397 | /* =========================================================================== | ||
398 | * Compute the optimal bit lengths for a tree and update the total bit length | ||
399 | * for the current block. | ||
400 | * IN assertion: the fields freq and dad are set, heap[heap_max] and | ||
401 | * above are the tree nodes sorted by increasing frequency. | ||
402 | * OUT assertions: the field len is set to the optimal bit length, the | ||
403 | * array bl_count contains the frequencies for each bit length. | ||
404 | * The length opt_len is updated; static_len is also updated if stree is | ||
405 | * not null. | ||
406 | */ | ||
407 | static void gen_bitlen( | ||
408 | deflate_state *s, | ||
409 | tree_desc *desc /* the tree descriptor */ | ||
410 | ) | ||
411 | { | ||
412 | ct_data *tree = desc->dyn_tree; | ||
413 | int max_code = desc->max_code; | ||
414 | const ct_data *stree = desc->stat_desc->static_tree; | ||
415 | const int *extra = desc->stat_desc->extra_bits; | ||
416 | int base = desc->stat_desc->extra_base; | ||
417 | int max_length = desc->stat_desc->max_length; | ||
418 | int h; /* heap index */ | ||
419 | int n, m; /* iterate over the tree elements */ | ||
420 | int bits; /* bit length */ | ||
421 | int xbits; /* extra bits */ | ||
422 | ush f; /* frequency */ | ||
423 | int overflow = 0; /* number of elements with bit length too large */ | ||
424 | |||
425 | for (bits = 0; bits <= MAX_BITS; bits++) s->bl_count[bits] = 0; | ||
426 | |||
427 | /* In a first pass, compute the optimal bit lengths (which may | ||
428 | * overflow in the case of the bit length tree). | ||
429 | */ | ||
430 | tree[s->heap[s->heap_max]].Len = 0; /* root of the heap */ | ||
431 | |||
432 | for (h = s->heap_max+1; h < HEAP_SIZE; h++) { | ||
433 | n = s->heap[h]; | ||
434 | bits = tree[tree[n].Dad].Len + 1; | ||
435 | if (bits > max_length) bits = max_length, overflow++; | ||
436 | tree[n].Len = (ush)bits; | ||
437 | /* We overwrite tree[n].Dad which is no longer needed */ | ||
438 | |||
439 | if (n > max_code) continue; /* not a leaf node */ | ||
440 | |||
441 | s->bl_count[bits]++; | ||
442 | xbits = 0; | ||
443 | if (n >= base) xbits = extra[n-base]; | ||
444 | f = tree[n].Freq; | ||
445 | s->opt_len += (ulg)f * (bits + xbits); | ||
446 | if (stree) s->static_len += (ulg)f * (stree[n].Len + xbits); | ||
447 | } | ||
448 | if (overflow == 0) return; | ||
449 | |||
450 | Trace((stderr,"\nbit length overflow\n")); | ||
451 | /* This happens for example on obj2 and pic of the Calgary corpus */ | ||
452 | |||
453 | /* Find the first bit length which could increase: */ | ||
454 | do { | ||
455 | bits = max_length-1; | ||
456 | while (s->bl_count[bits] == 0) bits--; | ||
457 | s->bl_count[bits]--; /* move one leaf down the tree */ | ||
458 | s->bl_count[bits+1] += 2; /* move one overflow item as its brother */ | ||
459 | s->bl_count[max_length]--; | ||
460 | /* The brother of the overflow item also moves one step up, | ||
461 | * but this does not affect bl_count[max_length] | ||
462 | */ | ||
463 | overflow -= 2; | ||
464 | } while (overflow > 0); | ||
465 | |||
466 | /* Now recompute all bit lengths, scanning in increasing frequency. | ||
467 | * h is still equal to HEAP_SIZE. (It is simpler to reconstruct all | ||
468 | * lengths instead of fixing only the wrong ones. This idea is taken | ||
469 | * from 'ar' written by Haruhiko Okumura.) | ||
470 | */ | ||
471 | for (bits = max_length; bits != 0; bits--) { | ||
472 | n = s->bl_count[bits]; | ||
473 | while (n != 0) { | ||
474 | m = s->heap[--h]; | ||
475 | if (m > max_code) continue; | ||
476 | if (tree[m].Len != (unsigned) bits) { | ||
477 | Trace((stderr,"code %d bits %d->%d\n", m, tree[m].Len, bits)); | ||
478 | s->opt_len += ((long)bits - (long)tree[m].Len) | ||
479 | *(long)tree[m].Freq; | ||
480 | tree[m].Len = (ush)bits; | ||
481 | } | ||
482 | n--; | ||
483 | } | ||
484 | } | ||
485 | } | ||
486 | |||
487 | /* =========================================================================== | ||
488 | * Generate the codes for a given tree and bit counts (which need not be | ||
489 | * optimal). | ||
490 | * IN assertion: the array bl_count contains the bit length statistics for | ||
491 | * the given tree and the field len is set for all tree elements. | ||
492 | * OUT assertion: the field code is set for all tree elements of non | ||
493 | * zero code length. | ||
494 | */ | ||
495 | static void gen_codes( | ||
496 | ct_data *tree, /* the tree to decorate */ | ||
497 | int max_code, /* largest code with non zero frequency */ | ||
498 | ush *bl_count /* number of codes at each bit length */ | ||
499 | ) | ||
500 | { | ||
501 | ush next_code[MAX_BITS+1]; /* next code value for each bit length */ | ||
502 | ush code = 0; /* running code value */ | ||
503 | int bits; /* bit index */ | ||
504 | int n; /* code index */ | ||
505 | |||
506 | /* The distribution counts are first used to generate the code values | ||
507 | * without bit reversal. | ||
508 | */ | ||
509 | for (bits = 1; bits <= MAX_BITS; bits++) { | ||
510 | next_code[bits] = code = (code + bl_count[bits-1]) << 1; | ||
511 | } | ||
512 | /* Check that the bit counts in bl_count are consistent. The last code | ||
513 | * must be all ones. | ||
514 | */ | ||
515 | Assert (code + bl_count[MAX_BITS]-1 == (1<<MAX_BITS)-1, | ||
516 | "inconsistent bit counts"); | ||
517 | Tracev((stderr,"\ngen_codes: max_code %d ", max_code)); | ||
518 | |||
519 | for (n = 0; n <= max_code; n++) { | ||
520 | int len = tree[n].Len; | ||
521 | if (len == 0) continue; | ||
522 | /* Now reverse the bits */ | ||
523 | tree[n].Code = bi_reverse(next_code[len]++, len); | ||
524 | |||
525 | Tracecv(tree != static_ltree, (stderr,"\nn %3d %c l %2d c %4x (%x) ", | ||
526 | n, (isgraph(n) ? n : ' '), len, tree[n].Code, next_code[len]-1)); | ||
527 | } | ||
528 | } | ||
529 | |||
530 | /* =========================================================================== | ||
531 | * Construct one Huffman tree and assigns the code bit strings and lengths. | ||
532 | * Update the total bit length for the current block. | ||
533 | * IN assertion: the field freq is set for all tree elements. | ||
534 | * OUT assertions: the fields len and code are set to the optimal bit length | ||
535 | * and corresponding code. The length opt_len is updated; static_len is | ||
536 | * also updated if stree is not null. The field max_code is set. | ||
537 | */ | ||
538 | static void build_tree( | ||
539 | deflate_state *s, | ||
540 | tree_desc *desc /* the tree descriptor */ | ||
541 | ) | ||
542 | { | ||
543 | ct_data *tree = desc->dyn_tree; | ||
544 | const ct_data *stree = desc->stat_desc->static_tree; | ||
545 | int elems = desc->stat_desc->elems; | ||
546 | int n, m; /* iterate over heap elements */ | ||
547 | int max_code = -1; /* largest code with non zero frequency */ | ||
548 | int node; /* new node being created */ | ||
549 | |||
550 | /* Construct the initial heap, with least frequent element in | ||
551 | * heap[SMALLEST]. The sons of heap[n] are heap[2*n] and heap[2*n+1]. | ||
552 | * heap[0] is not used. | ||
553 | */ | ||
554 | s->heap_len = 0, s->heap_max = HEAP_SIZE; | ||
555 | |||
556 | for (n = 0; n < elems; n++) { | ||
557 | if (tree[n].Freq != 0) { | ||
558 | s->heap[++(s->heap_len)] = max_code = n; | ||
559 | s->depth[n] = 0; | ||
560 | } else { | ||
561 | tree[n].Len = 0; | ||
562 | } | ||
563 | } | ||
564 | |||
565 | /* The pkzip format requires that at least one distance code exists, | ||
566 | * and that at least one bit should be sent even if there is only one | ||
567 | * possible code. So to avoid special checks later on we force at least | ||
568 | * two codes of non zero frequency. | ||
569 | */ | ||
570 | while (s->heap_len < 2) { | ||
571 | node = s->heap[++(s->heap_len)] = (max_code < 2 ? ++max_code : 0); | ||
572 | tree[node].Freq = 1; | ||
573 | s->depth[node] = 0; | ||
574 | s->opt_len--; if (stree) s->static_len -= stree[node].Len; | ||
575 | /* node is 0 or 1 so it does not have extra bits */ | ||
576 | } | ||
577 | desc->max_code = max_code; | ||
578 | |||
579 | /* The elements heap[heap_len/2+1 .. heap_len] are leaves of the tree, | ||
580 | * establish sub-heaps of increasing lengths: | ||
581 | */ | ||
582 | for (n = s->heap_len/2; n >= 1; n--) pqdownheap(s, tree, n); | ||
583 | |||
584 | /* Construct the Huffman tree by repeatedly combining the least two | ||
585 | * frequent nodes. | ||
586 | */ | ||
587 | node = elems; /* next internal node of the tree */ | ||
588 | do { | ||
589 | pqremove(s, tree, n); /* n = node of least frequency */ | ||
590 | m = s->heap[SMALLEST]; /* m = node of next least frequency */ | ||
591 | |||
592 | s->heap[--(s->heap_max)] = n; /* keep the nodes sorted by frequency */ | ||
593 | s->heap[--(s->heap_max)] = m; | ||
594 | |||
595 | /* Create a new node father of n and m */ | ||
596 | tree[node].Freq = tree[n].Freq + tree[m].Freq; | ||
597 | s->depth[node] = (uch) (max(s->depth[n], s->depth[m]) + 1); | ||
598 | tree[n].Dad = tree[m].Dad = (ush)node; | ||
599 | #ifdef DUMP_BL_TREE | ||
600 | if (tree == s->bl_tree) { | ||
601 | fprintf(stderr,"\nnode %d(%d), sons %d(%d) %d(%d)", | ||
602 | node, tree[node].Freq, n, tree[n].Freq, m, tree[m].Freq); | ||
603 | } | ||
604 | #endif | ||
605 | /* and insert the new node in the heap */ | ||
606 | s->heap[SMALLEST] = node++; | ||
607 | pqdownheap(s, tree, SMALLEST); | ||
608 | |||
609 | } while (s->heap_len >= 2); | ||
610 | |||
611 | s->heap[--(s->heap_max)] = s->heap[SMALLEST]; | ||
612 | |||
613 | /* At this point, the fields freq and dad are set. We can now | ||
614 | * generate the bit lengths. | ||
615 | */ | ||
616 | gen_bitlen(s, (tree_desc *)desc); | ||
617 | |||
618 | /* The field len is now set, we can generate the bit codes */ | ||
619 | gen_codes ((ct_data *)tree, max_code, s->bl_count); | ||
620 | } | ||
621 | |||
622 | /* =========================================================================== | ||
623 | * Scan a literal or distance tree to determine the frequencies of the codes | ||
624 | * in the bit length tree. | ||
625 | */ | ||
626 | static void scan_tree( | ||
627 | deflate_state *s, | ||
628 | ct_data *tree, /* the tree to be scanned */ | ||
629 | int max_code /* and its largest code of non zero frequency */ | ||
630 | ) | ||
631 | { | ||
632 | int n; /* iterates over all tree elements */ | ||
633 | int prevlen = -1; /* last emitted length */ | ||
634 | int curlen; /* length of current code */ | ||
635 | int nextlen = tree[0].Len; /* length of next code */ | ||
636 | int count = 0; /* repeat count of the current code */ | ||
637 | int max_count = 7; /* max repeat count */ | ||
638 | int min_count = 4; /* min repeat count */ | ||
639 | |||
640 | if (nextlen == 0) max_count = 138, min_count = 3; | ||
641 | tree[max_code+1].Len = (ush)0xffff; /* guard */ | ||
642 | |||
643 | for (n = 0; n <= max_code; n++) { | ||
644 | curlen = nextlen; nextlen = tree[n+1].Len; | ||
645 | if (++count < max_count && curlen == nextlen) { | ||
646 | continue; | ||
647 | } else if (count < min_count) { | ||
648 | s->bl_tree[curlen].Freq += count; | ||
649 | } else if (curlen != 0) { | ||
650 | if (curlen != prevlen) s->bl_tree[curlen].Freq++; | ||
651 | s->bl_tree[REP_3_6].Freq++; | ||
652 | } else if (count <= 10) { | ||
653 | s->bl_tree[REPZ_3_10].Freq++; | ||
654 | } else { | ||
655 | s->bl_tree[REPZ_11_138].Freq++; | ||
656 | } | ||
657 | count = 0; prevlen = curlen; | ||
658 | if (nextlen == 0) { | ||
659 | max_count = 138, min_count = 3; | ||
660 | } else if (curlen == nextlen) { | ||
661 | max_count = 6, min_count = 3; | ||
662 | } else { | ||
663 | max_count = 7, min_count = 4; | ||
664 | } | ||
665 | } | ||
666 | } | ||
667 | |||
668 | /* =========================================================================== | ||
669 | * Send a literal or distance tree in compressed form, using the codes in | ||
670 | * bl_tree. | ||
671 | */ | ||
672 | static void send_tree( | ||
673 | deflate_state *s, | ||
674 | ct_data *tree, /* the tree to be scanned */ | ||
675 | int max_code /* and its largest code of non zero frequency */ | ||
676 | ) | ||
677 | { | ||
678 | int n; /* iterates over all tree elements */ | ||
679 | int prevlen = -1; /* last emitted length */ | ||
680 | int curlen; /* length of current code */ | ||
681 | int nextlen = tree[0].Len; /* length of next code */ | ||
682 | int count = 0; /* repeat count of the current code */ | ||
683 | int max_count = 7; /* max repeat count */ | ||
684 | int min_count = 4; /* min repeat count */ | ||
685 | |||
686 | /* tree[max_code+1].Len = -1; */ /* guard already set */ | ||
687 | if (nextlen == 0) max_count = 138, min_count = 3; | ||
688 | |||
689 | for (n = 0; n <= max_code; n++) { | ||
690 | curlen = nextlen; nextlen = tree[n+1].Len; | ||
691 | if (++count < max_count && curlen == nextlen) { | ||
692 | continue; | ||
693 | } else if (count < min_count) { | ||
694 | do { send_code(s, curlen, s->bl_tree); } while (--count != 0); | ||
695 | |||
696 | } else if (curlen != 0) { | ||
697 | if (curlen != prevlen) { | ||
698 | send_code(s, curlen, s->bl_tree); count--; | ||
699 | } | ||
700 | Assert(count >= 3 && count <= 6, " 3_6?"); | ||
701 | send_code(s, REP_3_6, s->bl_tree); send_bits(s, count-3, 2); | ||
702 | |||
703 | } else if (count <= 10) { | ||
704 | send_code(s, REPZ_3_10, s->bl_tree); send_bits(s, count-3, 3); | ||
705 | |||
706 | } else { | ||
707 | send_code(s, REPZ_11_138, s->bl_tree); send_bits(s, count-11, 7); | ||
708 | } | ||
709 | count = 0; prevlen = curlen; | ||
710 | if (nextlen == 0) { | ||
711 | max_count = 138, min_count = 3; | ||
712 | } else if (curlen == nextlen) { | ||
713 | max_count = 6, min_count = 3; | ||
714 | } else { | ||
715 | max_count = 7, min_count = 4; | ||
716 | } | ||
717 | } | ||
718 | } | ||
719 | |||
720 | /* =========================================================================== | ||
721 | * Construct the Huffman tree for the bit lengths and return the index in | ||
722 | * bl_order of the last bit length code to send. | ||
723 | */ | ||
724 | static int build_bl_tree( | ||
725 | deflate_state *s | ||
726 | ) | ||
727 | { | ||
728 | int max_blindex; /* index of last bit length code of non zero freq */ | ||
729 | |||
730 | /* Determine the bit length frequencies for literal and distance trees */ | ||
731 | scan_tree(s, (ct_data *)s->dyn_ltree, s->l_desc.max_code); | ||
732 | scan_tree(s, (ct_data *)s->dyn_dtree, s->d_desc.max_code); | ||
733 | |||
734 | /* Build the bit length tree: */ | ||
735 | build_tree(s, (tree_desc *)(&(s->bl_desc))); | ||
736 | /* opt_len now includes the length of the tree representations, except | ||
737 | * the lengths of the bit lengths codes and the 5+5+4 bits for the counts. | ||
738 | */ | ||
739 | |||
740 | /* Determine the number of bit length codes to send. The pkzip format | ||
741 | * requires that at least 4 bit length codes be sent. (appnote.txt says | ||
742 | * 3 but the actual value used is 4.) | ||
743 | */ | ||
744 | for (max_blindex = BL_CODES-1; max_blindex >= 3; max_blindex--) { | ||
745 | if (s->bl_tree[bl_order[max_blindex]].Len != 0) break; | ||
746 | } | ||
747 | /* Update opt_len to include the bit length tree and counts */ | ||
748 | s->opt_len += 3*(max_blindex+1) + 5+5+4; | ||
749 | Tracev((stderr, "\ndyn trees: dyn %ld, stat %ld", | ||
750 | s->opt_len, s->static_len)); | ||
751 | |||
752 | return max_blindex; | ||
753 | } | ||
754 | |||
755 | /* =========================================================================== | ||
756 | * Send the header for a block using dynamic Huffman trees: the counts, the | ||
757 | * lengths of the bit length codes, the literal tree and the distance tree. | ||
758 | * IN assertion: lcodes >= 257, dcodes >= 1, blcodes >= 4. | ||
759 | */ | ||
760 | static void send_all_trees( | ||
761 | deflate_state *s, | ||
762 | int lcodes, /* number of codes for each tree */ | ||
763 | int dcodes, /* number of codes for each tree */ | ||
764 | int blcodes /* number of codes for each tree */ | ||
765 | ) | ||
766 | { | ||
767 | int rank; /* index in bl_order */ | ||
768 | |||
769 | Assert (lcodes >= 257 && dcodes >= 1 && blcodes >= 4, "not enough codes"); | ||
770 | Assert (lcodes <= L_CODES && dcodes <= D_CODES && blcodes <= BL_CODES, | ||
771 | "too many codes"); | ||
772 | Tracev((stderr, "\nbl counts: ")); | ||
773 | send_bits(s, lcodes-257, 5); /* not +255 as stated in appnote.txt */ | ||
774 | send_bits(s, dcodes-1, 5); | ||
775 | send_bits(s, blcodes-4, 4); /* not -3 as stated in appnote.txt */ | ||
776 | for (rank = 0; rank < blcodes; rank++) { | ||
777 | Tracev((stderr, "\nbl code %2d ", bl_order[rank])); | ||
778 | send_bits(s, s->bl_tree[bl_order[rank]].Len, 3); | ||
779 | } | ||
780 | Tracev((stderr, "\nbl tree: sent %ld", s->bits_sent)); | ||
781 | |||
782 | send_tree(s, (ct_data *)s->dyn_ltree, lcodes-1); /* literal tree */ | ||
783 | Tracev((stderr, "\nlit tree: sent %ld", s->bits_sent)); | ||
784 | |||
785 | send_tree(s, (ct_data *)s->dyn_dtree, dcodes-1); /* distance tree */ | ||
786 | Tracev((stderr, "\ndist tree: sent %ld", s->bits_sent)); | ||
787 | } | ||
788 | |||
789 | /* =========================================================================== | ||
790 | * Send a stored block | ||
791 | */ | ||
792 | void zlib_tr_stored_block( | ||
793 | deflate_state *s, | ||
794 | char *buf, /* input block */ | ||
795 | ulg stored_len, /* length of input block */ | ||
796 | int eof /* true if this is the last block for a file */ | ||
797 | ) | ||
798 | { | ||
799 | send_bits(s, (STORED_BLOCK<<1)+eof, 3); /* send block type */ | ||
800 | s->compressed_len = (s->compressed_len + 3 + 7) & (ulg)~7L; | ||
801 | s->compressed_len += (stored_len + 4) << 3; | ||
802 | |||
803 | copy_block(s, buf, (unsigned)stored_len, 1); /* with header */ | ||
804 | } | ||
805 | |||
806 | /* Send just the `stored block' type code without any length bytes or data. | ||
807 | */ | ||
808 | void zlib_tr_stored_type_only( | ||
809 | deflate_state *s | ||
810 | ) | ||
811 | { | ||
812 | send_bits(s, (STORED_BLOCK << 1), 3); | ||
813 | bi_windup(s); | ||
814 | s->compressed_len = (s->compressed_len + 3) & ~7L; | ||
815 | } | ||
816 | |||
817 | |||
818 | /* =========================================================================== | ||
819 | * Send one empty static block to give enough lookahead for inflate. | ||
820 | * This takes 10 bits, of which 7 may remain in the bit buffer. | ||
821 | * The current inflate code requires 9 bits of lookahead. If the | ||
822 | * last two codes for the previous block (real code plus EOB) were coded | ||
823 | * on 5 bits or less, inflate may have only 5+3 bits of lookahead to decode | ||
824 | * the last real code. In this case we send two empty static blocks instead | ||
825 | * of one. (There are no problems if the previous block is stored or fixed.) | ||
826 | * To simplify the code, we assume the worst case of last real code encoded | ||
827 | * on one bit only. | ||
828 | */ | ||
829 | void zlib_tr_align( | ||
830 | deflate_state *s | ||
831 | ) | ||
832 | { | ||
833 | send_bits(s, STATIC_TREES<<1, 3); | ||
834 | send_code(s, END_BLOCK, static_ltree); | ||
835 | s->compressed_len += 10L; /* 3 for block type, 7 for EOB */ | ||
836 | bi_flush(s); | ||
837 | /* Of the 10 bits for the empty block, we have already sent | ||
838 | * (10 - bi_valid) bits. The lookahead for the last real code (before | ||
839 | * the EOB of the previous block) was thus at least one plus the length | ||
840 | * of the EOB plus what we have just sent of the empty static block. | ||
841 | */ | ||
842 | if (1 + s->last_eob_len + 10 - s->bi_valid < 9) { | ||
843 | send_bits(s, STATIC_TREES<<1, 3); | ||
844 | send_code(s, END_BLOCK, static_ltree); | ||
845 | s->compressed_len += 10L; | ||
846 | bi_flush(s); | ||
847 | } | ||
848 | s->last_eob_len = 7; | ||
849 | } | ||
850 | |||
851 | /* =========================================================================== | ||
852 | * Determine the best encoding for the current block: dynamic trees, static | ||
853 | * trees or store, and output the encoded block to the zip file. This function | ||
854 | * returns the total compressed length for the file so far. | ||
855 | */ | ||
856 | ulg zlib_tr_flush_block( | ||
857 | deflate_state *s, | ||
858 | char *buf, /* input block, or NULL if too old */ | ||
859 | ulg stored_len, /* length of input block */ | ||
860 | int eof /* true if this is the last block for a file */ | ||
861 | ) | ||
862 | { | ||
863 | ulg opt_lenb, static_lenb; /* opt_len and static_len in bytes */ | ||
864 | int max_blindex = 0; /* index of last bit length code of non zero freq */ | ||
865 | |||
866 | /* Build the Huffman trees unless a stored block is forced */ | ||
867 | if (s->level > 0) { | ||
868 | |||
869 | /* Check if the file is ascii or binary */ | ||
870 | if (s->data_type == Z_UNKNOWN) set_data_type(s); | ||
871 | |||
872 | /* Construct the literal and distance trees */ | ||
873 | build_tree(s, (tree_desc *)(&(s->l_desc))); | ||
874 | Tracev((stderr, "\nlit data: dyn %ld, stat %ld", s->opt_len, | ||
875 | s->static_len)); | ||
876 | |||
877 | build_tree(s, (tree_desc *)(&(s->d_desc))); | ||
878 | Tracev((stderr, "\ndist data: dyn %ld, stat %ld", s->opt_len, | ||
879 | s->static_len)); | ||
880 | /* At this point, opt_len and static_len are the total bit lengths of | ||
881 | * the compressed block data, excluding the tree representations. | ||
882 | */ | ||
883 | |||
884 | /* Build the bit length tree for the above two trees, and get the index | ||
885 | * in bl_order of the last bit length code to send. | ||
886 | */ | ||
887 | max_blindex = build_bl_tree(s); | ||
888 | |||
889 | /* Determine the best encoding. Compute first the block length in bytes*/ | ||
890 | opt_lenb = (s->opt_len+3+7)>>3; | ||
891 | static_lenb = (s->static_len+3+7)>>3; | ||
892 | |||
893 | Tracev((stderr, "\nopt %lu(%lu) stat %lu(%lu) stored %lu lit %u ", | ||
894 | opt_lenb, s->opt_len, static_lenb, s->static_len, stored_len, | ||
895 | s->last_lit)); | ||
896 | |||
897 | if (static_lenb <= opt_lenb) opt_lenb = static_lenb; | ||
898 | |||
899 | } else { | ||
900 | Assert(buf != (char*)0, "lost buf"); | ||
901 | opt_lenb = static_lenb = stored_len + 5; /* force a stored block */ | ||
902 | } | ||
903 | |||
904 | /* If compression failed and this is the first and last block, | ||
905 | * and if the .zip file can be seeked (to rewrite the local header), | ||
906 | * the whole file is transformed into a stored file: | ||
907 | */ | ||
908 | #ifdef STORED_FILE_OK | ||
909 | # ifdef FORCE_STORED_FILE | ||
910 | if (eof && s->compressed_len == 0L) { /* force stored file */ | ||
911 | # else | ||
912 | if (stored_len <= opt_lenb && eof && s->compressed_len==0L && seekable()) { | ||
913 | # endif | ||
914 | /* Since LIT_BUFSIZE <= 2*WSIZE, the input data must be there: */ | ||
915 | if (buf == (char*)0) error ("block vanished"); | ||
916 | |||
917 | copy_block(s, buf, (unsigned)stored_len, 0); /* without header */ | ||
918 | s->compressed_len = stored_len << 3; | ||
919 | s->method = STORED; | ||
920 | } else | ||
921 | #endif /* STORED_FILE_OK */ | ||
922 | |||
923 | #ifdef FORCE_STORED | ||
924 | if (buf != (char*)0) { /* force stored block */ | ||
925 | #else | ||
926 | if (stored_len+4 <= opt_lenb && buf != (char*)0) { | ||
927 | /* 4: two words for the lengths */ | ||
928 | #endif | ||
929 | /* The test buf != NULL is only necessary if LIT_BUFSIZE > WSIZE. | ||
930 | * Otherwise we can't have processed more than WSIZE input bytes since | ||
931 | * the last block flush, because compression would have been | ||
932 | * successful. If LIT_BUFSIZE <= WSIZE, it is never too late to | ||
933 | * transform a block into a stored block. | ||
934 | */ | ||
935 | zlib_tr_stored_block(s, buf, stored_len, eof); | ||
936 | |||
937 | #ifdef FORCE_STATIC | ||
938 | } else if (static_lenb >= 0) { /* force static trees */ | ||
939 | #else | ||
940 | } else if (static_lenb == opt_lenb) { | ||
941 | #endif | ||
942 | send_bits(s, (STATIC_TREES<<1)+eof, 3); | ||
943 | compress_block(s, (ct_data *)static_ltree, (ct_data *)static_dtree); | ||
944 | s->compressed_len += 3 + s->static_len; | ||
945 | } else { | ||
946 | send_bits(s, (DYN_TREES<<1)+eof, 3); | ||
947 | send_all_trees(s, s->l_desc.max_code+1, s->d_desc.max_code+1, | ||
948 | max_blindex+1); | ||
949 | compress_block(s, (ct_data *)s->dyn_ltree, (ct_data *)s->dyn_dtree); | ||
950 | s->compressed_len += 3 + s->opt_len; | ||
951 | } | ||
952 | Assert (s->compressed_len == s->bits_sent, "bad compressed size"); | ||
953 | init_block(s); | ||
954 | |||
955 | if (eof) { | ||
956 | bi_windup(s); | ||
957 | s->compressed_len += 7; /* align on byte boundary */ | ||
958 | } | ||
959 | Tracev((stderr,"\ncomprlen %lu(%lu) ", s->compressed_len>>3, | ||
960 | s->compressed_len-7*eof)); | ||
961 | |||
962 | return s->compressed_len >> 3; | ||
963 | } | ||
964 | |||
965 | /* =========================================================================== | ||
966 | * Save the match info and tally the frequency counts. Return true if | ||
967 | * the current block must be flushed. | ||
968 | */ | ||
969 | int zlib_tr_tally( | ||
970 | deflate_state *s, | ||
971 | unsigned dist, /* distance of matched string */ | ||
972 | unsigned lc /* match length-MIN_MATCH or unmatched char (if dist==0) */ | ||
973 | ) | ||
974 | { | ||
975 | s->d_buf[s->last_lit] = (ush)dist; | ||
976 | s->l_buf[s->last_lit++] = (uch)lc; | ||
977 | if (dist == 0) { | ||
978 | /* lc is the unmatched char */ | ||
979 | s->dyn_ltree[lc].Freq++; | ||
980 | } else { | ||
981 | s->matches++; | ||
982 | /* Here, lc is the match length - MIN_MATCH */ | ||
983 | dist--; /* dist = match distance - 1 */ | ||
984 | Assert((ush)dist < (ush)MAX_DIST(s) && | ||
985 | (ush)lc <= (ush)(MAX_MATCH-MIN_MATCH) && | ||
986 | (ush)d_code(dist) < (ush)D_CODES, "zlib_tr_tally: bad match"); | ||
987 | |||
988 | s->dyn_ltree[length_code[lc]+LITERALS+1].Freq++; | ||
989 | s->dyn_dtree[d_code(dist)].Freq++; | ||
990 | } | ||
991 | |||
992 | /* Try to guess if it is profitable to stop the current block here */ | ||
993 | if ((s->last_lit & 0xfff) == 0 && s->level > 2) { | ||
994 | /* Compute an upper bound for the compressed length */ | ||
995 | ulg out_length = (ulg)s->last_lit*8L; | ||
996 | ulg in_length = (ulg)((long)s->strstart - s->block_start); | ||
997 | int dcode; | ||
998 | for (dcode = 0; dcode < D_CODES; dcode++) { | ||
999 | out_length += (ulg)s->dyn_dtree[dcode].Freq * | ||
1000 | (5L+extra_dbits[dcode]); | ||
1001 | } | ||
1002 | out_length >>= 3; | ||
1003 | Tracev((stderr,"\nlast_lit %u, in %ld, out ~%ld(%ld%%) ", | ||
1004 | s->last_lit, in_length, out_length, | ||
1005 | 100L - out_length*100L/in_length)); | ||
1006 | if (s->matches < s->last_lit/2 && out_length < in_length/2) return 1; | ||
1007 | } | ||
1008 | return (s->last_lit == s->lit_bufsize-1); | ||
1009 | /* We avoid equality with lit_bufsize because of wraparound at 64K | ||
1010 | * on 16 bit machines and because stored blocks are restricted to | ||
1011 | * 64K-1 bytes. | ||
1012 | */ | ||
1013 | } | ||
1014 | |||
1015 | /* =========================================================================== | ||
1016 | * Send the block data compressed using the given Huffman trees | ||
1017 | */ | ||
1018 | static void compress_block( | ||
1019 | deflate_state *s, | ||
1020 | ct_data *ltree, /* literal tree */ | ||
1021 | ct_data *dtree /* distance tree */ | ||
1022 | ) | ||
1023 | { | ||
1024 | unsigned dist; /* distance of matched string */ | ||
1025 | int lc; /* match length or unmatched char (if dist == 0) */ | ||
1026 | unsigned lx = 0; /* running index in l_buf */ | ||
1027 | unsigned code; /* the code to send */ | ||
1028 | int extra; /* number of extra bits to send */ | ||
1029 | |||
1030 | if (s->last_lit != 0) do { | ||
1031 | dist = s->d_buf[lx]; | ||
1032 | lc = s->l_buf[lx++]; | ||
1033 | if (dist == 0) { | ||
1034 | send_code(s, lc, ltree); /* send a literal byte */ | ||
1035 | Tracecv(isgraph(lc), (stderr," '%c' ", lc)); | ||
1036 | } else { | ||
1037 | /* Here, lc is the match length - MIN_MATCH */ | ||
1038 | code = length_code[lc]; | ||
1039 | send_code(s, code+LITERALS+1, ltree); /* send the length code */ | ||
1040 | extra = extra_lbits[code]; | ||
1041 | if (extra != 0) { | ||
1042 | lc -= base_length[code]; | ||
1043 | send_bits(s, lc, extra); /* send the extra length bits */ | ||
1044 | } | ||
1045 | dist--; /* dist is now the match distance - 1 */ | ||
1046 | code = d_code(dist); | ||
1047 | Assert (code < D_CODES, "bad d_code"); | ||
1048 | |||
1049 | send_code(s, code, dtree); /* send the distance code */ | ||
1050 | extra = extra_dbits[code]; | ||
1051 | if (extra != 0) { | ||
1052 | dist -= base_dist[code]; | ||
1053 | send_bits(s, dist, extra); /* send the extra distance bits */ | ||
1054 | } | ||
1055 | } /* literal or match pair ? */ | ||
1056 | |||
1057 | /* Check that the overlay between pending_buf and d_buf+l_buf is ok: */ | ||
1058 | Assert(s->pending < s->lit_bufsize + 2*lx, "pendingBuf overflow"); | ||
1059 | |||
1060 | } while (lx < s->last_lit); | ||
1061 | |||
1062 | send_code(s, END_BLOCK, ltree); | ||
1063 | s->last_eob_len = ltree[END_BLOCK].Len; | ||
1064 | } | ||
1065 | |||
1066 | /* =========================================================================== | ||
1067 | * Set the data type to ASCII or BINARY, using a crude approximation: | ||
1068 | * binary if more than 20% of the bytes are <= 6 or >= 128, ascii otherwise. | ||
1069 | * IN assertion: the fields freq of dyn_ltree are set and the total of all | ||
1070 | * frequencies does not exceed 64K (to fit in an int on 16 bit machines). | ||
1071 | */ | ||
1072 | static void set_data_type( | ||
1073 | deflate_state *s | ||
1074 | ) | ||
1075 | { | ||
1076 | int n = 0; | ||
1077 | unsigned ascii_freq = 0; | ||
1078 | unsigned bin_freq = 0; | ||
1079 | while (n < 7) bin_freq += s->dyn_ltree[n++].Freq; | ||
1080 | while (n < 128) ascii_freq += s->dyn_ltree[n++].Freq; | ||
1081 | while (n < LITERALS) bin_freq += s->dyn_ltree[n++].Freq; | ||
1082 | s->data_type = (Byte)(bin_freq > (ascii_freq >> 2) ? Z_BINARY : Z_ASCII); | ||
1083 | } | ||
1084 | |||
1085 | /* =========================================================================== | ||
1086 | * Copy a stored block, storing first the length and its | ||
1087 | * one's complement if requested. | ||
1088 | */ | ||
1089 | static void copy_block( | ||
1090 | deflate_state *s, | ||
1091 | char *buf, /* the input data */ | ||
1092 | unsigned len, /* its length */ | ||
1093 | int header /* true if block header must be written */ | ||
1094 | ) | ||
1095 | { | ||
1096 | bi_windup(s); /* align on byte boundary */ | ||
1097 | s->last_eob_len = 8; /* enough lookahead for inflate */ | ||
1098 | |||
1099 | if (header) { | ||
1100 | put_short(s, (ush)len); | ||
1101 | put_short(s, (ush)~len); | ||
1102 | #ifdef DEBUG_ZLIB | ||
1103 | s->bits_sent += 2*16; | ||
1104 | #endif | ||
1105 | } | ||
1106 | #ifdef DEBUG_ZLIB | ||
1107 | s->bits_sent += (ulg)len<<3; | ||
1108 | #endif | ||
1109 | /* bundle up the put_byte(s, *buf++) calls */ | ||
1110 | memcpy(&s->pending_buf[s->pending], buf, len); | ||
1111 | s->pending += len; | ||
1112 | } | ||
1113 | |||
diff --git a/lib/zlib_deflate/defutil.h b/lib/zlib_deflate/defutil.h new file mode 100644 index 000000000000..d9feaf638608 --- /dev/null +++ b/lib/zlib_deflate/defutil.h | |||
@@ -0,0 +1,334 @@ | |||
1 | |||
2 | |||
3 | |||
4 | #define Assert(err, str) | ||
5 | #define Trace(dummy) | ||
6 | #define Tracev(dummy) | ||
7 | #define Tracecv(err, dummy) | ||
8 | #define Tracevv(dummy) | ||
9 | |||
10 | |||
11 | |||
12 | #define LENGTH_CODES 29 | ||
13 | /* number of length codes, not counting the special END_BLOCK code */ | ||
14 | |||
15 | #define LITERALS 256 | ||
16 | /* number of literal bytes 0..255 */ | ||
17 | |||
18 | #define L_CODES (LITERALS+1+LENGTH_CODES) | ||
19 | /* number of Literal or Length codes, including the END_BLOCK code */ | ||
20 | |||
21 | #define D_CODES 30 | ||
22 | /* number of distance codes */ | ||
23 | |||
24 | #define BL_CODES 19 | ||
25 | /* number of codes used to transfer the bit lengths */ | ||
26 | |||
27 | #define HEAP_SIZE (2*L_CODES+1) | ||
28 | /* maximum heap size */ | ||
29 | |||
30 | #define MAX_BITS 15 | ||
31 | /* All codes must not exceed MAX_BITS bits */ | ||
32 | |||
33 | #define INIT_STATE 42 | ||
34 | #define BUSY_STATE 113 | ||
35 | #define FINISH_STATE 666 | ||
36 | /* Stream status */ | ||
37 | |||
38 | |||
39 | /* Data structure describing a single value and its code string. */ | ||
40 | typedef struct ct_data_s { | ||
41 | union { | ||
42 | ush freq; /* frequency count */ | ||
43 | ush code; /* bit string */ | ||
44 | } fc; | ||
45 | union { | ||
46 | ush dad; /* father node in Huffman tree */ | ||
47 | ush len; /* length of bit string */ | ||
48 | } dl; | ||
49 | } ct_data; | ||
50 | |||
51 | #define Freq fc.freq | ||
52 | #define Code fc.code | ||
53 | #define Dad dl.dad | ||
54 | #define Len dl.len | ||
55 | |||
56 | typedef struct static_tree_desc_s static_tree_desc; | ||
57 | |||
58 | typedef struct tree_desc_s { | ||
59 | ct_data *dyn_tree; /* the dynamic tree */ | ||
60 | int max_code; /* largest code with non zero frequency */ | ||
61 | static_tree_desc *stat_desc; /* the corresponding static tree */ | ||
62 | } tree_desc; | ||
63 | |||
64 | typedef ush Pos; | ||
65 | typedef unsigned IPos; | ||
66 | |||
67 | /* A Pos is an index in the character window. We use short instead of int to | ||
68 | * save space in the various tables. IPos is used only for parameter passing. | ||
69 | */ | ||
70 | |||
71 | typedef struct deflate_state { | ||
72 | z_streamp strm; /* pointer back to this zlib stream */ | ||
73 | int status; /* as the name implies */ | ||
74 | Byte *pending_buf; /* output still pending */ | ||
75 | ulg pending_buf_size; /* size of pending_buf */ | ||
76 | Byte *pending_out; /* next pending byte to output to the stream */ | ||
77 | int pending; /* nb of bytes in the pending buffer */ | ||
78 | int noheader; /* suppress zlib header and adler32 */ | ||
79 | Byte data_type; /* UNKNOWN, BINARY or ASCII */ | ||
80 | Byte method; /* STORED (for zip only) or DEFLATED */ | ||
81 | int last_flush; /* value of flush param for previous deflate call */ | ||
82 | |||
83 | /* used by deflate.c: */ | ||
84 | |||
85 | uInt w_size; /* LZ77 window size (32K by default) */ | ||
86 | uInt w_bits; /* log2(w_size) (8..16) */ | ||
87 | uInt w_mask; /* w_size - 1 */ | ||
88 | |||
89 | Byte *window; | ||
90 | /* Sliding window. Input bytes are read into the second half of the window, | ||
91 | * and move to the first half later to keep a dictionary of at least wSize | ||
92 | * bytes. With this organization, matches are limited to a distance of | ||
93 | * wSize-MAX_MATCH bytes, but this ensures that IO is always | ||
94 | * performed with a length multiple of the block size. Also, it limits | ||
95 | * the window size to 64K, which is quite useful on MSDOS. | ||
96 | * To do: use the user input buffer as sliding window. | ||
97 | */ | ||
98 | |||
99 | ulg window_size; | ||
100 | /* Actual size of window: 2*wSize, except when the user input buffer | ||
101 | * is directly used as sliding window. | ||
102 | */ | ||
103 | |||
104 | Pos *prev; | ||
105 | /* Link to older string with same hash index. To limit the size of this | ||
106 | * array to 64K, this link is maintained only for the last 32K strings. | ||
107 | * An index in this array is thus a window index modulo 32K. | ||
108 | */ | ||
109 | |||
110 | Pos *head; /* Heads of the hash chains or NIL. */ | ||
111 | |||
112 | uInt ins_h; /* hash index of string to be inserted */ | ||
113 | uInt hash_size; /* number of elements in hash table */ | ||
114 | uInt hash_bits; /* log2(hash_size) */ | ||
115 | uInt hash_mask; /* hash_size-1 */ | ||
116 | |||
117 | uInt hash_shift; | ||
118 | /* Number of bits by which ins_h must be shifted at each input | ||
119 | * step. It must be such that after MIN_MATCH steps, the oldest | ||
120 | * byte no longer takes part in the hash key, that is: | ||
121 | * hash_shift * MIN_MATCH >= hash_bits | ||
122 | */ | ||
123 | |||
124 | long block_start; | ||
125 | /* Window position at the beginning of the current output block. Gets | ||
126 | * negative when the window is moved backwards. | ||
127 | */ | ||
128 | |||
129 | uInt match_length; /* length of best match */ | ||
130 | IPos prev_match; /* previous match */ | ||
131 | int match_available; /* set if previous match exists */ | ||
132 | uInt strstart; /* start of string to insert */ | ||
133 | uInt match_start; /* start of matching string */ | ||
134 | uInt lookahead; /* number of valid bytes ahead in window */ | ||
135 | |||
136 | uInt prev_length; | ||
137 | /* Length of the best match at previous step. Matches not greater than this | ||
138 | * are discarded. This is used in the lazy match evaluation. | ||
139 | */ | ||
140 | |||
141 | uInt max_chain_length; | ||
142 | /* To speed up deflation, hash chains are never searched beyond this | ||
143 | * length. A higher limit improves compression ratio but degrades the | ||
144 | * speed. | ||
145 | */ | ||
146 | |||
147 | uInt max_lazy_match; | ||
148 | /* Attempt to find a better match only when the current match is strictly | ||
149 | * smaller than this value. This mechanism is used only for compression | ||
150 | * levels >= 4. | ||
151 | */ | ||
152 | # define max_insert_length max_lazy_match | ||
153 | /* Insert new strings in the hash table only if the match length is not | ||
154 | * greater than this length. This saves time but degrades compression. | ||
155 | * max_insert_length is used only for compression levels <= 3. | ||
156 | */ | ||
157 | |||
158 | int level; /* compression level (1..9) */ | ||
159 | int strategy; /* favor or force Huffman coding*/ | ||
160 | |||
161 | uInt good_match; | ||
162 | /* Use a faster search when the previous match is longer than this */ | ||
163 | |||
164 | int nice_match; /* Stop searching when current match exceeds this */ | ||
165 | |||
166 | /* used by trees.c: */ | ||
167 | /* Didn't use ct_data typedef below to supress compiler warning */ | ||
168 | struct ct_data_s dyn_ltree[HEAP_SIZE]; /* literal and length tree */ | ||
169 | struct ct_data_s dyn_dtree[2*D_CODES+1]; /* distance tree */ | ||
170 | struct ct_data_s bl_tree[2*BL_CODES+1]; /* Huffman tree for bit lengths */ | ||
171 | |||
172 | struct tree_desc_s l_desc; /* desc. for literal tree */ | ||
173 | struct tree_desc_s d_desc; /* desc. for distance tree */ | ||
174 | struct tree_desc_s bl_desc; /* desc. for bit length tree */ | ||
175 | |||
176 | ush bl_count[MAX_BITS+1]; | ||
177 | /* number of codes at each bit length for an optimal tree */ | ||
178 | |||
179 | int heap[2*L_CODES+1]; /* heap used to build the Huffman trees */ | ||
180 | int heap_len; /* number of elements in the heap */ | ||
181 | int heap_max; /* element of largest frequency */ | ||
182 | /* The sons of heap[n] are heap[2*n] and heap[2*n+1]. heap[0] is not used. | ||
183 | * The same heap array is used to build all trees. | ||
184 | */ | ||
185 | |||
186 | uch depth[2*L_CODES+1]; | ||
187 | /* Depth of each subtree used as tie breaker for trees of equal frequency | ||
188 | */ | ||
189 | |||
190 | uch *l_buf; /* buffer for literals or lengths */ | ||
191 | |||
192 | uInt lit_bufsize; | ||
193 | /* Size of match buffer for literals/lengths. There are 4 reasons for | ||
194 | * limiting lit_bufsize to 64K: | ||
195 | * - frequencies can be kept in 16 bit counters | ||
196 | * - if compression is not successful for the first block, all input | ||
197 | * data is still in the window so we can still emit a stored block even | ||
198 | * when input comes from standard input. (This can also be done for | ||
199 | * all blocks if lit_bufsize is not greater than 32K.) | ||
200 | * - if compression is not successful for a file smaller than 64K, we can | ||
201 | * even emit a stored file instead of a stored block (saving 5 bytes). | ||
202 | * This is applicable only for zip (not gzip or zlib). | ||
203 | * - creating new Huffman trees less frequently may not provide fast | ||
204 | * adaptation to changes in the input data statistics. (Take for | ||
205 | * example a binary file with poorly compressible code followed by | ||
206 | * a highly compressible string table.) Smaller buffer sizes give | ||
207 | * fast adaptation but have of course the overhead of transmitting | ||
208 | * trees more frequently. | ||
209 | * - I can't count above 4 | ||
210 | */ | ||
211 | |||
212 | uInt last_lit; /* running index in l_buf */ | ||
213 | |||
214 | ush *d_buf; | ||
215 | /* Buffer for distances. To simplify the code, d_buf and l_buf have | ||
216 | * the same number of elements. To use different lengths, an extra flag | ||
217 | * array would be necessary. | ||
218 | */ | ||
219 | |||
220 | ulg opt_len; /* bit length of current block with optimal trees */ | ||
221 | ulg static_len; /* bit length of current block with static trees */ | ||
222 | ulg compressed_len; /* total bit length of compressed file */ | ||
223 | uInt matches; /* number of string matches in current block */ | ||
224 | int last_eob_len; /* bit length of EOB code for last block */ | ||
225 | |||
226 | #ifdef DEBUG_ZLIB | ||
227 | ulg bits_sent; /* bit length of the compressed data */ | ||
228 | #endif | ||
229 | |||
230 | ush bi_buf; | ||
231 | /* Output buffer. bits are inserted starting at the bottom (least | ||
232 | * significant bits). | ||
233 | */ | ||
234 | int bi_valid; | ||
235 | /* Number of valid bits in bi_buf. All bits above the last valid bit | ||
236 | * are always zero. | ||
237 | */ | ||
238 | |||
239 | } deflate_state; | ||
240 | |||
241 | typedef struct deflate_workspace { | ||
242 | /* State memory for the deflator */ | ||
243 | deflate_state deflate_memory; | ||
244 | Byte window_memory[2 * (1 << MAX_WBITS)]; | ||
245 | Pos prev_memory[1 << MAX_WBITS]; | ||
246 | Pos head_memory[1 << (MAX_MEM_LEVEL + 7)]; | ||
247 | char overlay_memory[(1 << (MAX_MEM_LEVEL + 6)) * (sizeof(ush)+2)]; | ||
248 | } deflate_workspace; | ||
249 | |||
250 | /* Output a byte on the stream. | ||
251 | * IN assertion: there is enough room in pending_buf. | ||
252 | */ | ||
253 | #define put_byte(s, c) {s->pending_buf[s->pending++] = (c);} | ||
254 | |||
255 | |||
256 | #define MIN_LOOKAHEAD (MAX_MATCH+MIN_MATCH+1) | ||
257 | /* Minimum amount of lookahead, except at the end of the input file. | ||
258 | * See deflate.c for comments about the MIN_MATCH+1. | ||
259 | */ | ||
260 | |||
261 | #define MAX_DIST(s) ((s)->w_size-MIN_LOOKAHEAD) | ||
262 | /* In order to simplify the code, particularly on 16 bit machines, match | ||
263 | * distances are limited to MAX_DIST instead of WSIZE. | ||
264 | */ | ||
265 | |||
266 | /* in trees.c */ | ||
267 | void zlib_tr_init (deflate_state *s); | ||
268 | int zlib_tr_tally (deflate_state *s, unsigned dist, unsigned lc); | ||
269 | ulg zlib_tr_flush_block (deflate_state *s, char *buf, ulg stored_len, | ||
270 | int eof); | ||
271 | void zlib_tr_align (deflate_state *s); | ||
272 | void zlib_tr_stored_block (deflate_state *s, char *buf, ulg stored_len, | ||
273 | int eof); | ||
274 | void zlib_tr_stored_type_only (deflate_state *); | ||
275 | |||
276 | |||
277 | /* =========================================================================== | ||
278 | * Output a short LSB first on the stream. | ||
279 | * IN assertion: there is enough room in pendingBuf. | ||
280 | */ | ||
281 | #define put_short(s, w) { \ | ||
282 | put_byte(s, (uch)((w) & 0xff)); \ | ||
283 | put_byte(s, (uch)((ush)(w) >> 8)); \ | ||
284 | } | ||
285 | |||
286 | /* =========================================================================== | ||
287 | * Reverse the first len bits of a code, using straightforward code (a faster | ||
288 | * method would use a table) | ||
289 | * IN assertion: 1 <= len <= 15 | ||
290 | */ | ||
291 | static inline unsigned bi_reverse(unsigned code, /* the value to invert */ | ||
292 | int len) /* its bit length */ | ||
293 | { | ||
294 | register unsigned res = 0; | ||
295 | do { | ||
296 | res |= code & 1; | ||
297 | code >>= 1, res <<= 1; | ||
298 | } while (--len > 0); | ||
299 | return res >> 1; | ||
300 | } | ||
301 | |||
302 | /* =========================================================================== | ||
303 | * Flush the bit buffer, keeping at most 7 bits in it. | ||
304 | */ | ||
305 | static inline void bi_flush(deflate_state *s) | ||
306 | { | ||
307 | if (s->bi_valid == 16) { | ||
308 | put_short(s, s->bi_buf); | ||
309 | s->bi_buf = 0; | ||
310 | s->bi_valid = 0; | ||
311 | } else if (s->bi_valid >= 8) { | ||
312 | put_byte(s, (Byte)s->bi_buf); | ||
313 | s->bi_buf >>= 8; | ||
314 | s->bi_valid -= 8; | ||
315 | } | ||
316 | } | ||
317 | |||
318 | /* =========================================================================== | ||
319 | * Flush the bit buffer and align the output on a byte boundary | ||
320 | */ | ||
321 | static inline void bi_windup(deflate_state *s) | ||
322 | { | ||
323 | if (s->bi_valid > 8) { | ||
324 | put_short(s, s->bi_buf); | ||
325 | } else if (s->bi_valid > 0) { | ||
326 | put_byte(s, (Byte)s->bi_buf); | ||
327 | } | ||
328 | s->bi_buf = 0; | ||
329 | s->bi_valid = 0; | ||
330 | #ifdef DEBUG_ZLIB | ||
331 | s->bits_sent = (s->bits_sent+7) & ~7; | ||
332 | #endif | ||
333 | } | ||
334 | |||
diff --git a/lib/zlib_inflate/Makefile b/lib/zlib_inflate/Makefile new file mode 100644 index 000000000000..221c139e0df1 --- /dev/null +++ b/lib/zlib_inflate/Makefile | |||
@@ -0,0 +1,19 @@ | |||
1 | # | ||
2 | # This is a modified version of zlib, which does all memory | ||
3 | # allocation ahead of time. | ||
4 | # | ||
5 | # This is only the decompression, see zlib_deflate for the | ||
6 | # the compression | ||
7 | # | ||
8 | # Decompression needs to be serialized for each memory | ||
9 | # allocation. | ||
10 | # | ||
11 | # (The upsides of the simplification is that you can't get in | ||
12 | # any nasty situations wrt memory management, and that the | ||
13 | # uncompression can be done without blocking on allocation). | ||
14 | # | ||
15 | |||
16 | obj-$(CONFIG_ZLIB_INFLATE) += zlib_inflate.o | ||
17 | |||
18 | zlib_inflate-objs := infblock.o infcodes.o inffast.o inflate.o \ | ||
19 | inflate_sync.o inftrees.o infutil.o inflate_syms.o | ||
diff --git a/lib/zlib_inflate/infblock.c b/lib/zlib_inflate/infblock.c new file mode 100644 index 000000000000..50f21ca4ef7f --- /dev/null +++ b/lib/zlib_inflate/infblock.c | |||
@@ -0,0 +1,361 @@ | |||
1 | /* infblock.c -- interpret and process block types to last block | ||
2 | * Copyright (C) 1995-1998 Mark Adler | ||
3 | * For conditions of distribution and use, see copyright notice in zlib.h | ||
4 | */ | ||
5 | |||
6 | #include <linux/zutil.h> | ||
7 | #include "infblock.h" | ||
8 | #include "inftrees.h" | ||
9 | #include "infcodes.h" | ||
10 | #include "infutil.h" | ||
11 | |||
12 | struct inflate_codes_state; | ||
13 | |||
14 | /* simplify the use of the inflate_huft type with some defines */ | ||
15 | #define exop word.what.Exop | ||
16 | #define bits word.what.Bits | ||
17 | |||
18 | /* Table for deflate from PKZIP's appnote.txt. */ | ||
19 | static const uInt border[] = { /* Order of the bit length code lengths */ | ||
20 | 16, 17, 18, 0, 8, 7, 9, 6, 10, 5, 11, 4, 12, 3, 13, 2, 14, 1, 15}; | ||
21 | |||
22 | /* | ||
23 | Notes beyond the 1.93a appnote.txt: | ||
24 | |||
25 | 1. Distance pointers never point before the beginning of the output | ||
26 | stream. | ||
27 | 2. Distance pointers can point back across blocks, up to 32k away. | ||
28 | 3. There is an implied maximum of 7 bits for the bit length table and | ||
29 | 15 bits for the actual data. | ||
30 | 4. If only one code exists, then it is encoded using one bit. (Zero | ||
31 | would be more efficient, but perhaps a little confusing.) If two | ||
32 | codes exist, they are coded using one bit each (0 and 1). | ||
33 | 5. There is no way of sending zero distance codes--a dummy must be | ||
34 | sent if there are none. (History: a pre 2.0 version of PKZIP would | ||
35 | store blocks with no distance codes, but this was discovered to be | ||
36 | too harsh a criterion.) Valid only for 1.93a. 2.04c does allow | ||
37 | zero distance codes, which is sent as one code of zero bits in | ||
38 | length. | ||
39 | 6. There are up to 286 literal/length codes. Code 256 represents the | ||
40 | end-of-block. Note however that the static length tree defines | ||
41 | 288 codes just to fill out the Huffman codes. Codes 286 and 287 | ||
42 | cannot be used though, since there is no length base or extra bits | ||
43 | defined for them. Similarily, there are up to 30 distance codes. | ||
44 | However, static trees define 32 codes (all 5 bits) to fill out the | ||
45 | Huffman codes, but the last two had better not show up in the data. | ||
46 | 7. Unzip can check dynamic Huffman blocks for complete code sets. | ||
47 | The exception is that a single code would not be complete (see #4). | ||
48 | 8. The five bits following the block type is really the number of | ||
49 | literal codes sent minus 257. | ||
50 | 9. Length codes 8,16,16 are interpreted as 13 length codes of 8 bits | ||
51 | (1+6+6). Therefore, to output three times the length, you output | ||
52 | three codes (1+1+1), whereas to output four times the same length, | ||
53 | you only need two codes (1+3). Hmm. | ||
54 | 10. In the tree reconstruction algorithm, Code = Code + Increment | ||
55 | only if BitLength(i) is not zero. (Pretty obvious.) | ||
56 | 11. Correction: 4 Bits: # of Bit Length codes - 4 (4 - 19) | ||
57 | 12. Note: length code 284 can represent 227-258, but length code 285 | ||
58 | really is 258. The last length deserves its own, short code | ||
59 | since it gets used a lot in very redundant files. The length | ||
60 | 258 is special since 258 - 3 (the min match length) is 255. | ||
61 | 13. The literal/length and distance code bit lengths are read as a | ||
62 | single stream of lengths. It is possible (and advantageous) for | ||
63 | a repeat code (16, 17, or 18) to go across the boundary between | ||
64 | the two sets of lengths. | ||
65 | */ | ||
66 | |||
67 | |||
68 | void zlib_inflate_blocks_reset( | ||
69 | inflate_blocks_statef *s, | ||
70 | z_streamp z, | ||
71 | uLong *c | ||
72 | ) | ||
73 | { | ||
74 | if (c != NULL) | ||
75 | *c = s->check; | ||
76 | if (s->mode == CODES) | ||
77 | zlib_inflate_codes_free(s->sub.decode.codes, z); | ||
78 | s->mode = TYPE; | ||
79 | s->bitk = 0; | ||
80 | s->bitb = 0; | ||
81 | s->read = s->write = s->window; | ||
82 | if (s->checkfn != NULL) | ||
83 | z->adler = s->check = (*s->checkfn)(0L, NULL, 0); | ||
84 | } | ||
85 | |||
86 | inflate_blocks_statef *zlib_inflate_blocks_new( | ||
87 | z_streamp z, | ||
88 | check_func c, | ||
89 | uInt w | ||
90 | ) | ||
91 | { | ||
92 | inflate_blocks_statef *s; | ||
93 | |||
94 | s = &WS(z)->working_blocks_state; | ||
95 | s->hufts = WS(z)->working_hufts; | ||
96 | s->window = WS(z)->working_window; | ||
97 | s->end = s->window + w; | ||
98 | s->checkfn = c; | ||
99 | s->mode = TYPE; | ||
100 | zlib_inflate_blocks_reset(s, z, NULL); | ||
101 | return s; | ||
102 | } | ||
103 | |||
104 | |||
105 | int zlib_inflate_blocks( | ||
106 | inflate_blocks_statef *s, | ||
107 | z_streamp z, | ||
108 | int r | ||
109 | ) | ||
110 | { | ||
111 | uInt t; /* temporary storage */ | ||
112 | uLong b; /* bit buffer */ | ||
113 | uInt k; /* bits in bit buffer */ | ||
114 | Byte *p; /* input data pointer */ | ||
115 | uInt n; /* bytes available there */ | ||
116 | Byte *q; /* output window write pointer */ | ||
117 | uInt m; /* bytes to end of window or read pointer */ | ||
118 | |||
119 | /* copy input/output information to locals (UPDATE macro restores) */ | ||
120 | LOAD | ||
121 | |||
122 | /* process input based on current state */ | ||
123 | while (1) switch (s->mode) | ||
124 | { | ||
125 | case TYPE: | ||
126 | NEEDBITS(3) | ||
127 | t = (uInt)b & 7; | ||
128 | s->last = t & 1; | ||
129 | switch (t >> 1) | ||
130 | { | ||
131 | case 0: /* stored */ | ||
132 | DUMPBITS(3) | ||
133 | t = k & 7; /* go to byte boundary */ | ||
134 | DUMPBITS(t) | ||
135 | s->mode = LENS; /* get length of stored block */ | ||
136 | break; | ||
137 | case 1: /* fixed */ | ||
138 | { | ||
139 | uInt bl, bd; | ||
140 | inflate_huft *tl, *td; | ||
141 | |||
142 | zlib_inflate_trees_fixed(&bl, &bd, &tl, &td, s->hufts, z); | ||
143 | s->sub.decode.codes = zlib_inflate_codes_new(bl, bd, tl, td, z); | ||
144 | if (s->sub.decode.codes == NULL) | ||
145 | { | ||
146 | r = Z_MEM_ERROR; | ||
147 | LEAVE | ||
148 | } | ||
149 | } | ||
150 | DUMPBITS(3) | ||
151 | s->mode = CODES; | ||
152 | break; | ||
153 | case 2: /* dynamic */ | ||
154 | DUMPBITS(3) | ||
155 | s->mode = TABLE; | ||
156 | break; | ||
157 | case 3: /* illegal */ | ||
158 | DUMPBITS(3) | ||
159 | s->mode = B_BAD; | ||
160 | z->msg = (char*)"invalid block type"; | ||
161 | r = Z_DATA_ERROR; | ||
162 | LEAVE | ||
163 | } | ||
164 | break; | ||
165 | case LENS: | ||
166 | NEEDBITS(32) | ||
167 | if ((((~b) >> 16) & 0xffff) != (b & 0xffff)) | ||
168 | { | ||
169 | s->mode = B_BAD; | ||
170 | z->msg = (char*)"invalid stored block lengths"; | ||
171 | r = Z_DATA_ERROR; | ||
172 | LEAVE | ||
173 | } | ||
174 | s->sub.left = (uInt)b & 0xffff; | ||
175 | b = k = 0; /* dump bits */ | ||
176 | s->mode = s->sub.left ? STORED : (s->last ? DRY : TYPE); | ||
177 | break; | ||
178 | case STORED: | ||
179 | if (n == 0) | ||
180 | LEAVE | ||
181 | NEEDOUT | ||
182 | t = s->sub.left; | ||
183 | if (t > n) t = n; | ||
184 | if (t > m) t = m; | ||
185 | memcpy(q, p, t); | ||
186 | p += t; n -= t; | ||
187 | q += t; m -= t; | ||
188 | if ((s->sub.left -= t) != 0) | ||
189 | break; | ||
190 | s->mode = s->last ? DRY : TYPE; | ||
191 | break; | ||
192 | case TABLE: | ||
193 | NEEDBITS(14) | ||
194 | s->sub.trees.table = t = (uInt)b & 0x3fff; | ||
195 | #ifndef PKZIP_BUG_WORKAROUND | ||
196 | if ((t & 0x1f) > 29 || ((t >> 5) & 0x1f) > 29) | ||
197 | { | ||
198 | s->mode = B_BAD; | ||
199 | z->msg = (char*)"too many length or distance symbols"; | ||
200 | r = Z_DATA_ERROR; | ||
201 | LEAVE | ||
202 | } | ||
203 | #endif | ||
204 | { | ||
205 | s->sub.trees.blens = WS(z)->working_blens; | ||
206 | } | ||
207 | DUMPBITS(14) | ||
208 | s->sub.trees.index = 0; | ||
209 | s->mode = BTREE; | ||
210 | case BTREE: | ||
211 | while (s->sub.trees.index < 4 + (s->sub.trees.table >> 10)) | ||
212 | { | ||
213 | NEEDBITS(3) | ||
214 | s->sub.trees.blens[border[s->sub.trees.index++]] = (uInt)b & 7; | ||
215 | DUMPBITS(3) | ||
216 | } | ||
217 | while (s->sub.trees.index < 19) | ||
218 | s->sub.trees.blens[border[s->sub.trees.index++]] = 0; | ||
219 | s->sub.trees.bb = 7; | ||
220 | t = zlib_inflate_trees_bits(s->sub.trees.blens, &s->sub.trees.bb, | ||
221 | &s->sub.trees.tb, s->hufts, z); | ||
222 | if (t != Z_OK) | ||
223 | { | ||
224 | r = t; | ||
225 | if (r == Z_DATA_ERROR) | ||
226 | s->mode = B_BAD; | ||
227 | LEAVE | ||
228 | } | ||
229 | s->sub.trees.index = 0; | ||
230 | s->mode = DTREE; | ||
231 | case DTREE: | ||
232 | while (t = s->sub.trees.table, | ||
233 | s->sub.trees.index < 258 + (t & 0x1f) + ((t >> 5) & 0x1f)) | ||
234 | { | ||
235 | inflate_huft *h; | ||
236 | uInt i, j, c; | ||
237 | |||
238 | t = s->sub.trees.bb; | ||
239 | NEEDBITS(t) | ||
240 | h = s->sub.trees.tb + ((uInt)b & zlib_inflate_mask[t]); | ||
241 | t = h->bits; | ||
242 | c = h->base; | ||
243 | if (c < 16) | ||
244 | { | ||
245 | DUMPBITS(t) | ||
246 | s->sub.trees.blens[s->sub.trees.index++] = c; | ||
247 | } | ||
248 | else /* c == 16..18 */ | ||
249 | { | ||
250 | i = c == 18 ? 7 : c - 14; | ||
251 | j = c == 18 ? 11 : 3; | ||
252 | NEEDBITS(t + i) | ||
253 | DUMPBITS(t) | ||
254 | j += (uInt)b & zlib_inflate_mask[i]; | ||
255 | DUMPBITS(i) | ||
256 | i = s->sub.trees.index; | ||
257 | t = s->sub.trees.table; | ||
258 | if (i + j > 258 + (t & 0x1f) + ((t >> 5) & 0x1f) || | ||
259 | (c == 16 && i < 1)) | ||
260 | { | ||
261 | s->mode = B_BAD; | ||
262 | z->msg = (char*)"invalid bit length repeat"; | ||
263 | r = Z_DATA_ERROR; | ||
264 | LEAVE | ||
265 | } | ||
266 | c = c == 16 ? s->sub.trees.blens[i - 1] : 0; | ||
267 | do { | ||
268 | s->sub.trees.blens[i++] = c; | ||
269 | } while (--j); | ||
270 | s->sub.trees.index = i; | ||
271 | } | ||
272 | } | ||
273 | s->sub.trees.tb = NULL; | ||
274 | { | ||
275 | uInt bl, bd; | ||
276 | inflate_huft *tl, *td; | ||
277 | inflate_codes_statef *c; | ||
278 | |||
279 | bl = 9; /* must be <= 9 for lookahead assumptions */ | ||
280 | bd = 6; /* must be <= 9 for lookahead assumptions */ | ||
281 | t = s->sub.trees.table; | ||
282 | t = zlib_inflate_trees_dynamic(257 + (t & 0x1f), 1 + ((t >> 5) & 0x1f), | ||
283 | s->sub.trees.blens, &bl, &bd, &tl, &td, | ||
284 | s->hufts, z); | ||
285 | if (t != Z_OK) | ||
286 | { | ||
287 | if (t == (uInt)Z_DATA_ERROR) | ||
288 | s->mode = B_BAD; | ||
289 | r = t; | ||
290 | LEAVE | ||
291 | } | ||
292 | if ((c = zlib_inflate_codes_new(bl, bd, tl, td, z)) == NULL) | ||
293 | { | ||
294 | r = Z_MEM_ERROR; | ||
295 | LEAVE | ||
296 | } | ||
297 | s->sub.decode.codes = c; | ||
298 | } | ||
299 | s->mode = CODES; | ||
300 | case CODES: | ||
301 | UPDATE | ||
302 | if ((r = zlib_inflate_codes(s, z, r)) != Z_STREAM_END) | ||
303 | return zlib_inflate_flush(s, z, r); | ||
304 | r = Z_OK; | ||
305 | zlib_inflate_codes_free(s->sub.decode.codes, z); | ||
306 | LOAD | ||
307 | if (!s->last) | ||
308 | { | ||
309 | s->mode = TYPE; | ||
310 | break; | ||
311 | } | ||
312 | s->mode = DRY; | ||
313 | case DRY: | ||
314 | FLUSH | ||
315 | if (s->read != s->write) | ||
316 | LEAVE | ||
317 | s->mode = B_DONE; | ||
318 | case B_DONE: | ||
319 | r = Z_STREAM_END; | ||
320 | LEAVE | ||
321 | case B_BAD: | ||
322 | r = Z_DATA_ERROR; | ||
323 | LEAVE | ||
324 | default: | ||
325 | r = Z_STREAM_ERROR; | ||
326 | LEAVE | ||
327 | } | ||
328 | } | ||
329 | |||
330 | |||
331 | int zlib_inflate_blocks_free( | ||
332 | inflate_blocks_statef *s, | ||
333 | z_streamp z | ||
334 | ) | ||
335 | { | ||
336 | zlib_inflate_blocks_reset(s, z, NULL); | ||
337 | return Z_OK; | ||
338 | } | ||
339 | |||
340 | |||
341 | void zlib_inflate_set_dictionary( | ||
342 | inflate_blocks_statef *s, | ||
343 | const Byte *d, | ||
344 | uInt n | ||
345 | ) | ||
346 | { | ||
347 | memcpy(s->window, d, n); | ||
348 | s->read = s->write = s->window + n; | ||
349 | } | ||
350 | |||
351 | |||
352 | /* Returns true if inflate is currently at the end of a block generated | ||
353 | * by Z_SYNC_FLUSH or Z_FULL_FLUSH. | ||
354 | * IN assertion: s != NULL | ||
355 | */ | ||
356 | int zlib_inflate_blocks_sync_point( | ||
357 | inflate_blocks_statef *s | ||
358 | ) | ||
359 | { | ||
360 | return s->mode == LENS; | ||
361 | } | ||
diff --git a/lib/zlib_inflate/infblock.h b/lib/zlib_inflate/infblock.h new file mode 100644 index 000000000000..f5221ddf6054 --- /dev/null +++ b/lib/zlib_inflate/infblock.h | |||
@@ -0,0 +1,44 @@ | |||
1 | /* infblock.h -- header to use infblock.c | ||
2 | * Copyright (C) 1995-1998 Mark Adler | ||
3 | * For conditions of distribution and use, see copyright notice in zlib.h | ||
4 | */ | ||
5 | |||
6 | /* WARNING: this file should *not* be used by applications. It is | ||
7 | part of the implementation of the compression library and is | ||
8 | subject to change. Applications should only use zlib.h. | ||
9 | */ | ||
10 | |||
11 | #ifndef _INFBLOCK_H | ||
12 | #define _INFBLOCK_H | ||
13 | |||
14 | struct inflate_blocks_state; | ||
15 | typedef struct inflate_blocks_state inflate_blocks_statef; | ||
16 | |||
17 | extern inflate_blocks_statef * zlib_inflate_blocks_new ( | ||
18 | z_streamp z, | ||
19 | check_func c, /* check function */ | ||
20 | uInt w); /* window size */ | ||
21 | |||
22 | extern int zlib_inflate_blocks ( | ||
23 | inflate_blocks_statef *, | ||
24 | z_streamp , | ||
25 | int); /* initial return code */ | ||
26 | |||
27 | extern void zlib_inflate_blocks_reset ( | ||
28 | inflate_blocks_statef *, | ||
29 | z_streamp , | ||
30 | uLong *); /* check value on output */ | ||
31 | |||
32 | extern int zlib_inflate_blocks_free ( | ||
33 | inflate_blocks_statef *, | ||
34 | z_streamp); | ||
35 | |||
36 | extern void zlib_inflate_set_dictionary ( | ||
37 | inflate_blocks_statef *s, | ||
38 | const Byte *d, /* dictionary */ | ||
39 | uInt n); /* dictionary length */ | ||
40 | |||
41 | extern int zlib_inflate_blocks_sync_point ( | ||
42 | inflate_blocks_statef *s); | ||
43 | |||
44 | #endif /* _INFBLOCK_H */ | ||
diff --git a/lib/zlib_inflate/infcodes.c b/lib/zlib_inflate/infcodes.c new file mode 100644 index 000000000000..07cd7591cbb7 --- /dev/null +++ b/lib/zlib_inflate/infcodes.c | |||
@@ -0,0 +1,202 @@ | |||
1 | /* infcodes.c -- process literals and length/distance pairs | ||
2 | * Copyright (C) 1995-1998 Mark Adler | ||
3 | * For conditions of distribution and use, see copyright notice in zlib.h | ||
4 | */ | ||
5 | |||
6 | #include <linux/zutil.h> | ||
7 | #include "inftrees.h" | ||
8 | #include "infblock.h" | ||
9 | #include "infcodes.h" | ||
10 | #include "infutil.h" | ||
11 | #include "inffast.h" | ||
12 | |||
13 | /* simplify the use of the inflate_huft type with some defines */ | ||
14 | #define exop word.what.Exop | ||
15 | #define bits word.what.Bits | ||
16 | |||
17 | inflate_codes_statef *zlib_inflate_codes_new( | ||
18 | uInt bl, | ||
19 | uInt bd, | ||
20 | inflate_huft *tl, | ||
21 | inflate_huft *td, /* need separate declaration for Borland C++ */ | ||
22 | z_streamp z | ||
23 | ) | ||
24 | { | ||
25 | inflate_codes_statef *c; | ||
26 | |||
27 | c = &WS(z)->working_state; | ||
28 | { | ||
29 | c->mode = START; | ||
30 | c->lbits = (Byte)bl; | ||
31 | c->dbits = (Byte)bd; | ||
32 | c->ltree = tl; | ||
33 | c->dtree = td; | ||
34 | } | ||
35 | return c; | ||
36 | } | ||
37 | |||
38 | |||
39 | int zlib_inflate_codes( | ||
40 | inflate_blocks_statef *s, | ||
41 | z_streamp z, | ||
42 | int r | ||
43 | ) | ||
44 | { | ||
45 | uInt j; /* temporary storage */ | ||
46 | inflate_huft *t; /* temporary pointer */ | ||
47 | uInt e; /* extra bits or operation */ | ||
48 | uLong b; /* bit buffer */ | ||
49 | uInt k; /* bits in bit buffer */ | ||
50 | Byte *p; /* input data pointer */ | ||
51 | uInt n; /* bytes available there */ | ||
52 | Byte *q; /* output window write pointer */ | ||
53 | uInt m; /* bytes to end of window or read pointer */ | ||
54 | Byte *f; /* pointer to copy strings from */ | ||
55 | inflate_codes_statef *c = s->sub.decode.codes; /* codes state */ | ||
56 | |||
57 | /* copy input/output information to locals (UPDATE macro restores) */ | ||
58 | LOAD | ||
59 | |||
60 | /* process input and output based on current state */ | ||
61 | while (1) switch (c->mode) | ||
62 | { /* waiting for "i:"=input, "o:"=output, "x:"=nothing */ | ||
63 | case START: /* x: set up for LEN */ | ||
64 | #ifndef SLOW | ||
65 | if (m >= 258 && n >= 10) | ||
66 | { | ||
67 | UPDATE | ||
68 | r = zlib_inflate_fast(c->lbits, c->dbits, c->ltree, c->dtree, s, z); | ||
69 | LOAD | ||
70 | if (r != Z_OK) | ||
71 | { | ||
72 | c->mode = r == Z_STREAM_END ? WASH : BADCODE; | ||
73 | break; | ||
74 | } | ||
75 | } | ||
76 | #endif /* !SLOW */ | ||
77 | c->sub.code.need = c->lbits; | ||
78 | c->sub.code.tree = c->ltree; | ||
79 | c->mode = LEN; | ||
80 | case LEN: /* i: get length/literal/eob next */ | ||
81 | j = c->sub.code.need; | ||
82 | NEEDBITS(j) | ||
83 | t = c->sub.code.tree + ((uInt)b & zlib_inflate_mask[j]); | ||
84 | DUMPBITS(t->bits) | ||
85 | e = (uInt)(t->exop); | ||
86 | if (e == 0) /* literal */ | ||
87 | { | ||
88 | c->sub.lit = t->base; | ||
89 | c->mode = LIT; | ||
90 | break; | ||
91 | } | ||
92 | if (e & 16) /* length */ | ||
93 | { | ||
94 | c->sub.copy.get = e & 15; | ||
95 | c->len = t->base; | ||
96 | c->mode = LENEXT; | ||
97 | break; | ||
98 | } | ||
99 | if ((e & 64) == 0) /* next table */ | ||
100 | { | ||
101 | c->sub.code.need = e; | ||
102 | c->sub.code.tree = t + t->base; | ||
103 | break; | ||
104 | } | ||
105 | if (e & 32) /* end of block */ | ||
106 | { | ||
107 | c->mode = WASH; | ||
108 | break; | ||
109 | } | ||
110 | c->mode = BADCODE; /* invalid code */ | ||
111 | z->msg = (char*)"invalid literal/length code"; | ||
112 | r = Z_DATA_ERROR; | ||
113 | LEAVE | ||
114 | case LENEXT: /* i: getting length extra (have base) */ | ||
115 | j = c->sub.copy.get; | ||
116 | NEEDBITS(j) | ||
117 | c->len += (uInt)b & zlib_inflate_mask[j]; | ||
118 | DUMPBITS(j) | ||
119 | c->sub.code.need = c->dbits; | ||
120 | c->sub.code.tree = c->dtree; | ||
121 | c->mode = DIST; | ||
122 | case DIST: /* i: get distance next */ | ||
123 | j = c->sub.code.need; | ||
124 | NEEDBITS(j) | ||
125 | t = c->sub.code.tree + ((uInt)b & zlib_inflate_mask[j]); | ||
126 | DUMPBITS(t->bits) | ||
127 | e = (uInt)(t->exop); | ||
128 | if (e & 16) /* distance */ | ||
129 | { | ||
130 | c->sub.copy.get = e & 15; | ||
131 | c->sub.copy.dist = t->base; | ||
132 | c->mode = DISTEXT; | ||
133 | break; | ||
134 | } | ||
135 | if ((e & 64) == 0) /* next table */ | ||
136 | { | ||
137 | c->sub.code.need = e; | ||
138 | c->sub.code.tree = t + t->base; | ||
139 | break; | ||
140 | } | ||
141 | c->mode = BADCODE; /* invalid code */ | ||
142 | z->msg = (char*)"invalid distance code"; | ||
143 | r = Z_DATA_ERROR; | ||
144 | LEAVE | ||
145 | case DISTEXT: /* i: getting distance extra */ | ||
146 | j = c->sub.copy.get; | ||
147 | NEEDBITS(j) | ||
148 | c->sub.copy.dist += (uInt)b & zlib_inflate_mask[j]; | ||
149 | DUMPBITS(j) | ||
150 | c->mode = COPY; | ||
151 | case COPY: /* o: copying bytes in window, waiting for space */ | ||
152 | f = q - c->sub.copy.dist; | ||
153 | while (f < s->window) /* modulo window size-"while" instead */ | ||
154 | f += s->end - s->window; /* of "if" handles invalid distances */ | ||
155 | while (c->len) | ||
156 | { | ||
157 | NEEDOUT | ||
158 | OUTBYTE(*f++) | ||
159 | if (f == s->end) | ||
160 | f = s->window; | ||
161 | c->len--; | ||
162 | } | ||
163 | c->mode = START; | ||
164 | break; | ||
165 | case LIT: /* o: got literal, waiting for output space */ | ||
166 | NEEDOUT | ||
167 | OUTBYTE(c->sub.lit) | ||
168 | c->mode = START; | ||
169 | break; | ||
170 | case WASH: /* o: got eob, possibly more output */ | ||
171 | if (k > 7) /* return unused byte, if any */ | ||
172 | { | ||
173 | k -= 8; | ||
174 | n++; | ||
175 | p--; /* can always return one */ | ||
176 | } | ||
177 | FLUSH | ||
178 | if (s->read != s->write) | ||
179 | LEAVE | ||
180 | c->mode = END; | ||
181 | case END: | ||
182 | r = Z_STREAM_END; | ||
183 | LEAVE | ||
184 | case BADCODE: /* x: got error */ | ||
185 | r = Z_DATA_ERROR; | ||
186 | LEAVE | ||
187 | default: | ||
188 | r = Z_STREAM_ERROR; | ||
189 | LEAVE | ||
190 | } | ||
191 | #ifdef NEED_DUMMY_RETURN | ||
192 | return Z_STREAM_ERROR; /* Some dumb compilers complain without this */ | ||
193 | #endif | ||
194 | } | ||
195 | |||
196 | |||
197 | void zlib_inflate_codes_free( | ||
198 | inflate_codes_statef *c, | ||
199 | z_streamp z | ||
200 | ) | ||
201 | { | ||
202 | } | ||
diff --git a/lib/zlib_inflate/infcodes.h b/lib/zlib_inflate/infcodes.h new file mode 100644 index 000000000000..5cff417523b0 --- /dev/null +++ b/lib/zlib_inflate/infcodes.h | |||
@@ -0,0 +1,33 @@ | |||
1 | /* infcodes.h -- header to use infcodes.c | ||
2 | * Copyright (C) 1995-1998 Mark Adler | ||
3 | * For conditions of distribution and use, see copyright notice in zlib.h | ||
4 | */ | ||
5 | |||
6 | /* WARNING: this file should *not* be used by applications. It is | ||
7 | part of the implementation of the compression library and is | ||
8 | subject to change. Applications should only use zlib.h. | ||
9 | */ | ||
10 | |||
11 | #ifndef _INFCODES_H | ||
12 | #define _INFCODES_H | ||
13 | |||
14 | #include "infblock.h" | ||
15 | |||
16 | struct inflate_codes_state; | ||
17 | typedef struct inflate_codes_state inflate_codes_statef; | ||
18 | |||
19 | extern inflate_codes_statef *zlib_inflate_codes_new ( | ||
20 | uInt, uInt, | ||
21 | inflate_huft *, inflate_huft *, | ||
22 | z_streamp ); | ||
23 | |||
24 | extern int zlib_inflate_codes ( | ||
25 | inflate_blocks_statef *, | ||
26 | z_streamp , | ||
27 | int); | ||
28 | |||
29 | extern void zlib_inflate_codes_free ( | ||
30 | inflate_codes_statef *, | ||
31 | z_streamp ); | ||
32 | |||
33 | #endif /* _INFCODES_H */ | ||
diff --git a/lib/zlib_inflate/inffast.c b/lib/zlib_inflate/inffast.c new file mode 100644 index 000000000000..0bd7623fc85a --- /dev/null +++ b/lib/zlib_inflate/inffast.c | |||
@@ -0,0 +1,176 @@ | |||
1 | /* inffast.c -- process literals and length/distance pairs fast | ||
2 | * Copyright (C) 1995-1998 Mark Adler | ||
3 | * For conditions of distribution and use, see copyright notice in zlib.h | ||
4 | */ | ||
5 | |||
6 | #include <linux/zutil.h> | ||
7 | #include "inftrees.h" | ||
8 | #include "infblock.h" | ||
9 | #include "infcodes.h" | ||
10 | #include "infutil.h" | ||
11 | #include "inffast.h" | ||
12 | |||
13 | struct inflate_codes_state; | ||
14 | |||
15 | /* simplify the use of the inflate_huft type with some defines */ | ||
16 | #define exop word.what.Exop | ||
17 | #define bits word.what.Bits | ||
18 | |||
19 | /* macros for bit input with no checking and for returning unused bytes */ | ||
20 | #define GRABBITS(j) {while(k<(j)){b|=((uLong)NEXTBYTE)<<k;k+=8;}} | ||
21 | #define UNGRAB {c=z->avail_in-n;c=(k>>3)<c?k>>3:c;n+=c;p-=c;k-=c<<3;} | ||
22 | |||
23 | /* Called with number of bytes left to write in window at least 258 | ||
24 | (the maximum string length) and number of input bytes available | ||
25 | at least ten. The ten bytes are six bytes for the longest length/ | ||
26 | distance pair plus four bytes for overloading the bit buffer. */ | ||
27 | |||
28 | int zlib_inflate_fast( | ||
29 | uInt bl, | ||
30 | uInt bd, | ||
31 | inflate_huft *tl, | ||
32 | inflate_huft *td, /* need separate declaration for Borland C++ */ | ||
33 | inflate_blocks_statef *s, | ||
34 | z_streamp z | ||
35 | ) | ||
36 | { | ||
37 | inflate_huft *t; /* temporary pointer */ | ||
38 | uInt e; /* extra bits or operation */ | ||
39 | uLong b; /* bit buffer */ | ||
40 | uInt k; /* bits in bit buffer */ | ||
41 | Byte *p; /* input data pointer */ | ||
42 | uInt n; /* bytes available there */ | ||
43 | Byte *q; /* output window write pointer */ | ||
44 | uInt m; /* bytes to end of window or read pointer */ | ||
45 | uInt ml; /* mask for literal/length tree */ | ||
46 | uInt md; /* mask for distance tree */ | ||
47 | uInt c; /* bytes to copy */ | ||
48 | uInt d; /* distance back to copy from */ | ||
49 | Byte *r; /* copy source pointer */ | ||
50 | |||
51 | /* load input, output, bit values */ | ||
52 | LOAD | ||
53 | |||
54 | /* initialize masks */ | ||
55 | ml = zlib_inflate_mask[bl]; | ||
56 | md = zlib_inflate_mask[bd]; | ||
57 | |||
58 | /* do until not enough input or output space for fast loop */ | ||
59 | do { /* assume called with m >= 258 && n >= 10 */ | ||
60 | /* get literal/length code */ | ||
61 | GRABBITS(20) /* max bits for literal/length code */ | ||
62 | if ((e = (t = tl + ((uInt)b & ml))->exop) == 0) | ||
63 | { | ||
64 | DUMPBITS(t->bits) | ||
65 | *q++ = (Byte)t->base; | ||
66 | m--; | ||
67 | continue; | ||
68 | } | ||
69 | do { | ||
70 | DUMPBITS(t->bits) | ||
71 | if (e & 16) | ||
72 | { | ||
73 | /* get extra bits for length */ | ||
74 | e &= 15; | ||
75 | c = t->base + ((uInt)b & zlib_inflate_mask[e]); | ||
76 | DUMPBITS(e) | ||
77 | |||
78 | /* decode distance base of block to copy */ | ||
79 | GRABBITS(15); /* max bits for distance code */ | ||
80 | e = (t = td + ((uInt)b & md))->exop; | ||
81 | do { | ||
82 | DUMPBITS(t->bits) | ||
83 | if (e & 16) | ||
84 | { | ||
85 | /* get extra bits to add to distance base */ | ||
86 | e &= 15; | ||
87 | GRABBITS(e) /* get extra bits (up to 13) */ | ||
88 | d = t->base + ((uInt)b & zlib_inflate_mask[e]); | ||
89 | DUMPBITS(e) | ||
90 | |||
91 | /* do the copy */ | ||
92 | m -= c; | ||
93 | r = q - d; | ||
94 | if (r < s->window) /* wrap if needed */ | ||
95 | { | ||
96 | do { | ||
97 | r += s->end - s->window; /* force pointer in window */ | ||
98 | } while (r < s->window); /* covers invalid distances */ | ||
99 | e = s->end - r; | ||
100 | if (c > e) | ||
101 | { | ||
102 | c -= e; /* wrapped copy */ | ||
103 | do { | ||
104 | *q++ = *r++; | ||
105 | } while (--e); | ||
106 | r = s->window; | ||
107 | do { | ||
108 | *q++ = *r++; | ||
109 | } while (--c); | ||
110 | } | ||
111 | else /* normal copy */ | ||
112 | { | ||
113 | *q++ = *r++; c--; | ||
114 | *q++ = *r++; c--; | ||
115 | do { | ||
116 | *q++ = *r++; | ||
117 | } while (--c); | ||
118 | } | ||
119 | } | ||
120 | else /* normal copy */ | ||
121 | { | ||
122 | *q++ = *r++; c--; | ||
123 | *q++ = *r++; c--; | ||
124 | do { | ||
125 | *q++ = *r++; | ||
126 | } while (--c); | ||
127 | } | ||
128 | break; | ||
129 | } | ||
130 | else if ((e & 64) == 0) | ||
131 | { | ||
132 | t += t->base; | ||
133 | e = (t += ((uInt)b & zlib_inflate_mask[e]))->exop; | ||
134 | } | ||
135 | else | ||
136 | { | ||
137 | z->msg = (char*)"invalid distance code"; | ||
138 | UNGRAB | ||
139 | UPDATE | ||
140 | return Z_DATA_ERROR; | ||
141 | } | ||
142 | } while (1); | ||
143 | break; | ||
144 | } | ||
145 | if ((e & 64) == 0) | ||
146 | { | ||
147 | t += t->base; | ||
148 | if ((e = (t += ((uInt)b & zlib_inflate_mask[e]))->exop) == 0) | ||
149 | { | ||
150 | DUMPBITS(t->bits) | ||
151 | *q++ = (Byte)t->base; | ||
152 | m--; | ||
153 | break; | ||
154 | } | ||
155 | } | ||
156 | else if (e & 32) | ||
157 | { | ||
158 | UNGRAB | ||
159 | UPDATE | ||
160 | return Z_STREAM_END; | ||
161 | } | ||
162 | else | ||
163 | { | ||
164 | z->msg = (char*)"invalid literal/length code"; | ||
165 | UNGRAB | ||
166 | UPDATE | ||
167 | return Z_DATA_ERROR; | ||
168 | } | ||
169 | } while (1); | ||
170 | } while (m >= 258 && n >= 10); | ||
171 | |||
172 | /* not enough input or output--restore pointers and return */ | ||
173 | UNGRAB | ||
174 | UPDATE | ||
175 | return Z_OK; | ||
176 | } | ||
diff --git a/lib/zlib_inflate/inffast.h b/lib/zlib_inflate/inffast.h new file mode 100644 index 000000000000..fc720f0fa7f5 --- /dev/null +++ b/lib/zlib_inflate/inffast.h | |||
@@ -0,0 +1,17 @@ | |||
1 | /* inffast.h -- header to use inffast.c | ||
2 | * Copyright (C) 1995-1998 Mark Adler | ||
3 | * For conditions of distribution and use, see copyright notice in zlib.h | ||
4 | */ | ||
5 | |||
6 | /* WARNING: this file should *not* be used by applications. It is | ||
7 | part of the implementation of the compression library and is | ||
8 | subject to change. Applications should only use zlib.h. | ||
9 | */ | ||
10 | |||
11 | extern int zlib_inflate_fast ( | ||
12 | uInt, | ||
13 | uInt, | ||
14 | inflate_huft *, | ||
15 | inflate_huft *, | ||
16 | inflate_blocks_statef *, | ||
17 | z_streamp ); | ||
diff --git a/lib/zlib_inflate/inflate.c b/lib/zlib_inflate/inflate.c new file mode 100644 index 000000000000..3d94cb90c1d3 --- /dev/null +++ b/lib/zlib_inflate/inflate.c | |||
@@ -0,0 +1,248 @@ | |||
1 | /* inflate.c -- zlib interface to inflate modules | ||
2 | * Copyright (C) 1995-1998 Mark Adler | ||
3 | * For conditions of distribution and use, see copyright notice in zlib.h | ||
4 | */ | ||
5 | |||
6 | #include <linux/module.h> | ||
7 | #include <linux/zutil.h> | ||
8 | #include "infblock.h" | ||
9 | #include "infutil.h" | ||
10 | |||
11 | int zlib_inflate_workspacesize(void) | ||
12 | { | ||
13 | return sizeof(struct inflate_workspace); | ||
14 | } | ||
15 | |||
16 | |||
17 | int zlib_inflateReset( | ||
18 | z_streamp z | ||
19 | ) | ||
20 | { | ||
21 | if (z == NULL || z->state == NULL || z->workspace == NULL) | ||
22 | return Z_STREAM_ERROR; | ||
23 | z->total_in = z->total_out = 0; | ||
24 | z->msg = NULL; | ||
25 | z->state->mode = z->state->nowrap ? BLOCKS : METHOD; | ||
26 | zlib_inflate_blocks_reset(z->state->blocks, z, NULL); | ||
27 | return Z_OK; | ||
28 | } | ||
29 | |||
30 | |||
31 | int zlib_inflateEnd( | ||
32 | z_streamp z | ||
33 | ) | ||
34 | { | ||
35 | if (z == NULL || z->state == NULL || z->workspace == NULL) | ||
36 | return Z_STREAM_ERROR; | ||
37 | if (z->state->blocks != NULL) | ||
38 | zlib_inflate_blocks_free(z->state->blocks, z); | ||
39 | z->state = NULL; | ||
40 | return Z_OK; | ||
41 | } | ||
42 | |||
43 | |||
44 | int zlib_inflateInit2_( | ||
45 | z_streamp z, | ||
46 | int w, | ||
47 | const char *version, | ||
48 | int stream_size | ||
49 | ) | ||
50 | { | ||
51 | if (version == NULL || version[0] != ZLIB_VERSION[0] || | ||
52 | stream_size != sizeof(z_stream) || z->workspace == NULL) | ||
53 | return Z_VERSION_ERROR; | ||
54 | |||
55 | /* initialize state */ | ||
56 | z->msg = NULL; | ||
57 | z->state = &WS(z)->internal_state; | ||
58 | z->state->blocks = NULL; | ||
59 | |||
60 | /* handle undocumented nowrap option (no zlib header or check) */ | ||
61 | z->state->nowrap = 0; | ||
62 | if (w < 0) | ||
63 | { | ||
64 | w = - w; | ||
65 | z->state->nowrap = 1; | ||
66 | } | ||
67 | |||
68 | /* set window size */ | ||
69 | if (w < 8 || w > 15) | ||
70 | { | ||
71 | zlib_inflateEnd(z); | ||
72 | return Z_STREAM_ERROR; | ||
73 | } | ||
74 | z->state->wbits = (uInt)w; | ||
75 | |||
76 | /* create inflate_blocks state */ | ||
77 | if ((z->state->blocks = | ||
78 | zlib_inflate_blocks_new(z, z->state->nowrap ? NULL : zlib_adler32, (uInt)1 << w)) | ||
79 | == NULL) | ||
80 | { | ||
81 | zlib_inflateEnd(z); | ||
82 | return Z_MEM_ERROR; | ||
83 | } | ||
84 | |||
85 | /* reset state */ | ||
86 | zlib_inflateReset(z); | ||
87 | return Z_OK; | ||
88 | } | ||
89 | |||
90 | |||
91 | /* | ||
92 | * At the end of a Deflate-compressed PPP packet, we expect to have seen | ||
93 | * a `stored' block type value but not the (zero) length bytes. | ||
94 | */ | ||
95 | static int zlib_inflate_packet_flush(inflate_blocks_statef *s) | ||
96 | { | ||
97 | if (s->mode != LENS) | ||
98 | return Z_DATA_ERROR; | ||
99 | s->mode = TYPE; | ||
100 | return Z_OK; | ||
101 | } | ||
102 | |||
103 | |||
104 | int zlib_inflateInit_( | ||
105 | z_streamp z, | ||
106 | const char *version, | ||
107 | int stream_size | ||
108 | ) | ||
109 | { | ||
110 | return zlib_inflateInit2_(z, DEF_WBITS, version, stream_size); | ||
111 | } | ||
112 | |||
113 | #undef NEEDBYTE | ||
114 | #undef NEXTBYTE | ||
115 | #define NEEDBYTE {if(z->avail_in==0)goto empty;r=trv;} | ||
116 | #define NEXTBYTE (z->avail_in--,z->total_in++,*z->next_in++) | ||
117 | |||
118 | int zlib_inflate( | ||
119 | z_streamp z, | ||
120 | int f | ||
121 | ) | ||
122 | { | ||
123 | int r, trv; | ||
124 | uInt b; | ||
125 | |||
126 | if (z == NULL || z->state == NULL || z->next_in == NULL) | ||
127 | return Z_STREAM_ERROR; | ||
128 | trv = f == Z_FINISH ? Z_BUF_ERROR : Z_OK; | ||
129 | r = Z_BUF_ERROR; | ||
130 | while (1) switch (z->state->mode) | ||
131 | { | ||
132 | case METHOD: | ||
133 | NEEDBYTE | ||
134 | if (((z->state->sub.method = NEXTBYTE) & 0xf) != Z_DEFLATED) | ||
135 | { | ||
136 | z->state->mode = I_BAD; | ||
137 | z->msg = (char*)"unknown compression method"; | ||
138 | z->state->sub.marker = 5; /* can't try inflateSync */ | ||
139 | break; | ||
140 | } | ||
141 | if ((z->state->sub.method >> 4) + 8 > z->state->wbits) | ||
142 | { | ||
143 | z->state->mode = I_BAD; | ||
144 | z->msg = (char*)"invalid window size"; | ||
145 | z->state->sub.marker = 5; /* can't try inflateSync */ | ||
146 | break; | ||
147 | } | ||
148 | z->state->mode = FLAG; | ||
149 | case FLAG: | ||
150 | NEEDBYTE | ||
151 | b = NEXTBYTE; | ||
152 | if (((z->state->sub.method << 8) + b) % 31) | ||
153 | { | ||
154 | z->state->mode = I_BAD; | ||
155 | z->msg = (char*)"incorrect header check"; | ||
156 | z->state->sub.marker = 5; /* can't try inflateSync */ | ||
157 | break; | ||
158 | } | ||
159 | if (!(b & PRESET_DICT)) | ||
160 | { | ||
161 | z->state->mode = BLOCKS; | ||
162 | break; | ||
163 | } | ||
164 | z->state->mode = DICT4; | ||
165 | case DICT4: | ||
166 | NEEDBYTE | ||
167 | z->state->sub.check.need = (uLong)NEXTBYTE << 24; | ||
168 | z->state->mode = DICT3; | ||
169 | case DICT3: | ||
170 | NEEDBYTE | ||
171 | z->state->sub.check.need += (uLong)NEXTBYTE << 16; | ||
172 | z->state->mode = DICT2; | ||
173 | case DICT2: | ||
174 | NEEDBYTE | ||
175 | z->state->sub.check.need += (uLong)NEXTBYTE << 8; | ||
176 | z->state->mode = DICT1; | ||
177 | case DICT1: | ||
178 | NEEDBYTE | ||
179 | z->state->sub.check.need += (uLong)NEXTBYTE; | ||
180 | z->adler = z->state->sub.check.need; | ||
181 | z->state->mode = DICT0; | ||
182 | return Z_NEED_DICT; | ||
183 | case DICT0: | ||
184 | z->state->mode = I_BAD; | ||
185 | z->msg = (char*)"need dictionary"; | ||
186 | z->state->sub.marker = 0; /* can try inflateSync */ | ||
187 | return Z_STREAM_ERROR; | ||
188 | case BLOCKS: | ||
189 | r = zlib_inflate_blocks(z->state->blocks, z, r); | ||
190 | if (f == Z_PACKET_FLUSH && z->avail_in == 0 && z->avail_out != 0) | ||
191 | r = zlib_inflate_packet_flush(z->state->blocks); | ||
192 | if (r == Z_DATA_ERROR) | ||
193 | { | ||
194 | z->state->mode = I_BAD; | ||
195 | z->state->sub.marker = 0; /* can try inflateSync */ | ||
196 | break; | ||
197 | } | ||
198 | if (r == Z_OK) | ||
199 | r = trv; | ||
200 | if (r != Z_STREAM_END) | ||
201 | return r; | ||
202 | r = trv; | ||
203 | zlib_inflate_blocks_reset(z->state->blocks, z, &z->state->sub.check.was); | ||
204 | if (z->state->nowrap) | ||
205 | { | ||
206 | z->state->mode = I_DONE; | ||
207 | break; | ||
208 | } | ||
209 | z->state->mode = CHECK4; | ||
210 | case CHECK4: | ||
211 | NEEDBYTE | ||
212 | z->state->sub.check.need = (uLong)NEXTBYTE << 24; | ||
213 | z->state->mode = CHECK3; | ||
214 | case CHECK3: | ||
215 | NEEDBYTE | ||
216 | z->state->sub.check.need += (uLong)NEXTBYTE << 16; | ||
217 | z->state->mode = CHECK2; | ||
218 | case CHECK2: | ||
219 | NEEDBYTE | ||
220 | z->state->sub.check.need += (uLong)NEXTBYTE << 8; | ||
221 | z->state->mode = CHECK1; | ||
222 | case CHECK1: | ||
223 | NEEDBYTE | ||
224 | z->state->sub.check.need += (uLong)NEXTBYTE; | ||
225 | |||
226 | if (z->state->sub.check.was != z->state->sub.check.need) | ||
227 | { | ||
228 | z->state->mode = I_BAD; | ||
229 | z->msg = (char*)"incorrect data check"; | ||
230 | z->state->sub.marker = 5; /* can't try inflateSync */ | ||
231 | break; | ||
232 | } | ||
233 | z->state->mode = I_DONE; | ||
234 | case I_DONE: | ||
235 | return Z_STREAM_END; | ||
236 | case I_BAD: | ||
237 | return Z_DATA_ERROR; | ||
238 | default: | ||
239 | return Z_STREAM_ERROR; | ||
240 | } | ||
241 | empty: | ||
242 | if (f != Z_PACKET_FLUSH) | ||
243 | return r; | ||
244 | z->state->mode = I_BAD; | ||
245 | z->msg = (char *)"need more for packet flush"; | ||
246 | z->state->sub.marker = 0; /* can try inflateSync */ | ||
247 | return Z_DATA_ERROR; | ||
248 | } | ||
diff --git a/lib/zlib_inflate/inflate_syms.c b/lib/zlib_inflate/inflate_syms.c new file mode 100644 index 000000000000..aa1b08189121 --- /dev/null +++ b/lib/zlib_inflate/inflate_syms.c | |||
@@ -0,0 +1,22 @@ | |||
1 | /* | ||
2 | * linux/lib/zlib_inflate/inflate_syms.c | ||
3 | * | ||
4 | * Exported symbols for the inflate functionality. | ||
5 | * | ||
6 | */ | ||
7 | |||
8 | #include <linux/module.h> | ||
9 | #include <linux/init.h> | ||
10 | |||
11 | #include <linux/zlib.h> | ||
12 | |||
13 | EXPORT_SYMBOL(zlib_inflate_workspacesize); | ||
14 | EXPORT_SYMBOL(zlib_inflate); | ||
15 | EXPORT_SYMBOL(zlib_inflateInit_); | ||
16 | EXPORT_SYMBOL(zlib_inflateInit2_); | ||
17 | EXPORT_SYMBOL(zlib_inflateEnd); | ||
18 | EXPORT_SYMBOL(zlib_inflateSync); | ||
19 | EXPORT_SYMBOL(zlib_inflateReset); | ||
20 | EXPORT_SYMBOL(zlib_inflateSyncPoint); | ||
21 | EXPORT_SYMBOL(zlib_inflateIncomp); | ||
22 | MODULE_LICENSE("GPL"); | ||
diff --git a/lib/zlib_inflate/inflate_sync.c b/lib/zlib_inflate/inflate_sync.c new file mode 100644 index 000000000000..e07bdb21f55c --- /dev/null +++ b/lib/zlib_inflate/inflate_sync.c | |||
@@ -0,0 +1,148 @@ | |||
1 | /* inflate.c -- zlib interface to inflate modules | ||
2 | * Copyright (C) 1995-1998 Mark Adler | ||
3 | * For conditions of distribution and use, see copyright notice in zlib.h | ||
4 | */ | ||
5 | |||
6 | #include <linux/zutil.h> | ||
7 | #include "infblock.h" | ||
8 | #include "infutil.h" | ||
9 | |||
10 | int zlib_inflateSync( | ||
11 | z_streamp z | ||
12 | ) | ||
13 | { | ||
14 | uInt n; /* number of bytes to look at */ | ||
15 | Byte *p; /* pointer to bytes */ | ||
16 | uInt m; /* number of marker bytes found in a row */ | ||
17 | uLong r, w; /* temporaries to save total_in and total_out */ | ||
18 | |||
19 | /* set up */ | ||
20 | if (z == NULL || z->state == NULL) | ||
21 | return Z_STREAM_ERROR; | ||
22 | if (z->state->mode != I_BAD) | ||
23 | { | ||
24 | z->state->mode = I_BAD; | ||
25 | z->state->sub.marker = 0; | ||
26 | } | ||
27 | if ((n = z->avail_in) == 0) | ||
28 | return Z_BUF_ERROR; | ||
29 | p = z->next_in; | ||
30 | m = z->state->sub.marker; | ||
31 | |||
32 | /* search */ | ||
33 | while (n && m < 4) | ||
34 | { | ||
35 | static const Byte mark[4] = {0, 0, 0xff, 0xff}; | ||
36 | if (*p == mark[m]) | ||
37 | m++; | ||
38 | else if (*p) | ||
39 | m = 0; | ||
40 | else | ||
41 | m = 4 - m; | ||
42 | p++, n--; | ||
43 | } | ||
44 | |||
45 | /* restore */ | ||
46 | z->total_in += p - z->next_in; | ||
47 | z->next_in = p; | ||
48 | z->avail_in = n; | ||
49 | z->state->sub.marker = m; | ||
50 | |||
51 | /* return no joy or set up to restart on a new block */ | ||
52 | if (m != 4) | ||
53 | return Z_DATA_ERROR; | ||
54 | r = z->total_in; w = z->total_out; | ||
55 | zlib_inflateReset(z); | ||
56 | z->total_in = r; z->total_out = w; | ||
57 | z->state->mode = BLOCKS; | ||
58 | return Z_OK; | ||
59 | } | ||
60 | |||
61 | |||
62 | /* Returns true if inflate is currently at the end of a block generated | ||
63 | * by Z_SYNC_FLUSH or Z_FULL_FLUSH. This function is used by one PPP | ||
64 | * implementation to provide an additional safety check. PPP uses Z_SYNC_FLUSH | ||
65 | * but removes the length bytes of the resulting empty stored block. When | ||
66 | * decompressing, PPP checks that at the end of input packet, inflate is | ||
67 | * waiting for these length bytes. | ||
68 | */ | ||
69 | int zlib_inflateSyncPoint( | ||
70 | z_streamp z | ||
71 | ) | ||
72 | { | ||
73 | if (z == NULL || z->state == NULL || z->state->blocks == NULL) | ||
74 | return Z_STREAM_ERROR; | ||
75 | return zlib_inflate_blocks_sync_point(z->state->blocks); | ||
76 | } | ||
77 | |||
78 | /* | ||
79 | * This subroutine adds the data at next_in/avail_in to the output history | ||
80 | * without performing any output. The output buffer must be "caught up"; | ||
81 | * i.e. no pending output (hence s->read equals s->write), and the state must | ||
82 | * be BLOCKS (i.e. we should be willing to see the start of a series of | ||
83 | * BLOCKS). On exit, the output will also be caught up, and the checksum | ||
84 | * will have been updated if need be. | ||
85 | */ | ||
86 | static int zlib_inflate_addhistory(inflate_blocks_statef *s, | ||
87 | z_stream *z) | ||
88 | { | ||
89 | uLong b; /* bit buffer */ /* NOT USED HERE */ | ||
90 | uInt k; /* bits in bit buffer */ /* NOT USED HERE */ | ||
91 | uInt t; /* temporary storage */ | ||
92 | Byte *p; /* input data pointer */ | ||
93 | uInt n; /* bytes available there */ | ||
94 | Byte *q; /* output window write pointer */ | ||
95 | uInt m; /* bytes to end of window or read pointer */ | ||
96 | |||
97 | if (s->read != s->write) | ||
98 | return Z_STREAM_ERROR; | ||
99 | if (s->mode != TYPE) | ||
100 | return Z_DATA_ERROR; | ||
101 | |||
102 | /* we're ready to rock */ | ||
103 | LOAD | ||
104 | /* while there is input ready, copy to output buffer, moving | ||
105 | * pointers as needed. | ||
106 | */ | ||
107 | while (n) { | ||
108 | t = n; /* how many to do */ | ||
109 | /* is there room until end of buffer? */ | ||
110 | if (t > m) t = m; | ||
111 | /* update check information */ | ||
112 | if (s->checkfn != NULL) | ||
113 | s->check = (*s->checkfn)(s->check, q, t); | ||
114 | memcpy(q, p, t); | ||
115 | q += t; | ||
116 | p += t; | ||
117 | n -= t; | ||
118 | z->total_out += t; | ||
119 | s->read = q; /* drag read pointer forward */ | ||
120 | /* WWRAP */ /* expand WWRAP macro by hand to handle s->read */ | ||
121 | if (q == s->end) { | ||
122 | s->read = q = s->window; | ||
123 | m = WAVAIL; | ||
124 | } | ||
125 | } | ||
126 | UPDATE | ||
127 | return Z_OK; | ||
128 | } | ||
129 | |||
130 | |||
131 | /* | ||
132 | * This subroutine adds the data at next_in/avail_in to the output history | ||
133 | * without performing any output. The output buffer must be "caught up"; | ||
134 | * i.e. no pending output (hence s->read equals s->write), and the state must | ||
135 | * be BLOCKS (i.e. we should be willing to see the start of a series of | ||
136 | * BLOCKS). On exit, the output will also be caught up, and the checksum | ||
137 | * will have been updated if need be. | ||
138 | */ | ||
139 | |||
140 | int zlib_inflateIncomp( | ||
141 | z_stream *z | ||
142 | |||
143 | ) | ||
144 | { | ||
145 | if (z->state->mode != BLOCKS) | ||
146 | return Z_DATA_ERROR; | ||
147 | return zlib_inflate_addhistory(z->state->blocks, z); | ||
148 | } | ||
diff --git a/lib/zlib_inflate/inftrees.c b/lib/zlib_inflate/inftrees.c new file mode 100644 index 000000000000..874950ec4858 --- /dev/null +++ b/lib/zlib_inflate/inftrees.c | |||
@@ -0,0 +1,412 @@ | |||
1 | /* inftrees.c -- generate Huffman trees for efficient decoding | ||
2 | * Copyright (C) 1995-1998 Mark Adler | ||
3 | * For conditions of distribution and use, see copyright notice in zlib.h | ||
4 | */ | ||
5 | |||
6 | #include <linux/zutil.h> | ||
7 | #include "inftrees.h" | ||
8 | #include "infutil.h" | ||
9 | |||
10 | static const char inflate_copyright[] __attribute_used__ = | ||
11 | " inflate 1.1.3 Copyright 1995-1998 Mark Adler "; | ||
12 | /* | ||
13 | If you use the zlib library in a product, an acknowledgment is welcome | ||
14 | in the documentation of your product. If for some reason you cannot | ||
15 | include such an acknowledgment, I would appreciate that you keep this | ||
16 | copyright string in the executable of your product. | ||
17 | */ | ||
18 | struct internal_state; | ||
19 | |||
20 | /* simplify the use of the inflate_huft type with some defines */ | ||
21 | #define exop word.what.Exop | ||
22 | #define bits word.what.Bits | ||
23 | |||
24 | |||
25 | static int huft_build ( | ||
26 | uInt *, /* code lengths in bits */ | ||
27 | uInt, /* number of codes */ | ||
28 | uInt, /* number of "simple" codes */ | ||
29 | const uInt *, /* list of base values for non-simple codes */ | ||
30 | const uInt *, /* list of extra bits for non-simple codes */ | ||
31 | inflate_huft **, /* result: starting table */ | ||
32 | uInt *, /* maximum lookup bits (returns actual) */ | ||
33 | inflate_huft *, /* space for trees */ | ||
34 | uInt *, /* hufts used in space */ | ||
35 | uInt * ); /* space for values */ | ||
36 | |||
37 | /* Tables for deflate from PKZIP's appnote.txt. */ | ||
38 | static const uInt cplens[31] = { /* Copy lengths for literal codes 257..285 */ | ||
39 | 3, 4, 5, 6, 7, 8, 9, 10, 11, 13, 15, 17, 19, 23, 27, 31, | ||
40 | 35, 43, 51, 59, 67, 83, 99, 115, 131, 163, 195, 227, 258, 0, 0}; | ||
41 | /* see note #13 above about 258 */ | ||
42 | static const uInt cplext[31] = { /* Extra bits for literal codes 257..285 */ | ||
43 | 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 2, 2, 2, 2, | ||
44 | 3, 3, 3, 3, 4, 4, 4, 4, 5, 5, 5, 5, 0, 112, 112}; /* 112==invalid */ | ||
45 | static const uInt cpdist[30] = { /* Copy offsets for distance codes 0..29 */ | ||
46 | 1, 2, 3, 4, 5, 7, 9, 13, 17, 25, 33, 49, 65, 97, 129, 193, | ||
47 | 257, 385, 513, 769, 1025, 1537, 2049, 3073, 4097, 6145, | ||
48 | 8193, 12289, 16385, 24577}; | ||
49 | static const uInt cpdext[30] = { /* Extra bits for distance codes */ | ||
50 | 0, 0, 0, 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, | ||
51 | 7, 7, 8, 8, 9, 9, 10, 10, 11, 11, | ||
52 | 12, 12, 13, 13}; | ||
53 | |||
54 | /* | ||
55 | Huffman code decoding is performed using a multi-level table lookup. | ||
56 | The fastest way to decode is to simply build a lookup table whose | ||
57 | size is determined by the longest code. However, the time it takes | ||
58 | to build this table can also be a factor if the data being decoded | ||
59 | is not very long. The most common codes are necessarily the | ||
60 | shortest codes, so those codes dominate the decoding time, and hence | ||
61 | the speed. The idea is you can have a shorter table that decodes the | ||
62 | shorter, more probable codes, and then point to subsidiary tables for | ||
63 | the longer codes. The time it costs to decode the longer codes is | ||
64 | then traded against the time it takes to make longer tables. | ||
65 | |||
66 | This results of this trade are in the variables lbits and dbits | ||
67 | below. lbits is the number of bits the first level table for literal/ | ||
68 | length codes can decode in one step, and dbits is the same thing for | ||
69 | the distance codes. Subsequent tables are also less than or equal to | ||
70 | those sizes. These values may be adjusted either when all of the | ||
71 | codes are shorter than that, in which case the longest code length in | ||
72 | bits is used, or when the shortest code is *longer* than the requested | ||
73 | table size, in which case the length of the shortest code in bits is | ||
74 | used. | ||
75 | |||
76 | There are two different values for the two tables, since they code a | ||
77 | different number of possibilities each. The literal/length table | ||
78 | codes 286 possible values, or in a flat code, a little over eight | ||
79 | bits. The distance table codes 30 possible values, or a little less | ||
80 | than five bits, flat. The optimum values for speed end up being | ||
81 | about one bit more than those, so lbits is 8+1 and dbits is 5+1. | ||
82 | The optimum values may differ though from machine to machine, and | ||
83 | possibly even between compilers. Your mileage may vary. | ||
84 | */ | ||
85 | |||
86 | |||
87 | /* If BMAX needs to be larger than 16, then h and x[] should be uLong. */ | ||
88 | #define BMAX 15 /* maximum bit length of any code */ | ||
89 | |||
90 | static int huft_build( | ||
91 | uInt *b, /* code lengths in bits (all assumed <= BMAX) */ | ||
92 | uInt n, /* number of codes (assumed <= 288) */ | ||
93 | uInt s, /* number of simple-valued codes (0..s-1) */ | ||
94 | const uInt *d, /* list of base values for non-simple codes */ | ||
95 | const uInt *e, /* list of extra bits for non-simple codes */ | ||
96 | inflate_huft **t, /* result: starting table */ | ||
97 | uInt *m, /* maximum lookup bits, returns actual */ | ||
98 | inflate_huft *hp, /* space for trees */ | ||
99 | uInt *hn, /* hufts used in space */ | ||
100 | uInt *v /* working area: values in order of bit length */ | ||
101 | ) | ||
102 | /* Given a list of code lengths and a maximum table size, make a set of | ||
103 | tables to decode that set of codes. Return Z_OK on success, Z_BUF_ERROR | ||
104 | if the given code set is incomplete (the tables are still built in this | ||
105 | case), Z_DATA_ERROR if the input is invalid (an over-subscribed set of | ||
106 | lengths), or Z_MEM_ERROR if not enough memory. */ | ||
107 | { | ||
108 | |||
109 | uInt a; /* counter for codes of length k */ | ||
110 | uInt c[BMAX+1]; /* bit length count table */ | ||
111 | uInt f; /* i repeats in table every f entries */ | ||
112 | int g; /* maximum code length */ | ||
113 | int h; /* table level */ | ||
114 | register uInt i; /* counter, current code */ | ||
115 | register uInt j; /* counter */ | ||
116 | register int k; /* number of bits in current code */ | ||
117 | int l; /* bits per table (returned in m) */ | ||
118 | uInt mask; /* (1 << w) - 1, to avoid cc -O bug on HP */ | ||
119 | register uInt *p; /* pointer into c[], b[], or v[] */ | ||
120 | inflate_huft *q; /* points to current table */ | ||
121 | struct inflate_huft_s r; /* table entry for structure assignment */ | ||
122 | inflate_huft *u[BMAX]; /* table stack */ | ||
123 | register int w; /* bits before this table == (l * h) */ | ||
124 | uInt x[BMAX+1]; /* bit offsets, then code stack */ | ||
125 | uInt *xp; /* pointer into x */ | ||
126 | int y; /* number of dummy codes added */ | ||
127 | uInt z; /* number of entries in current table */ | ||
128 | |||
129 | |||
130 | /* Generate counts for each bit length */ | ||
131 | p = c; | ||
132 | #define C0 *p++ = 0; | ||
133 | #define C2 C0 C0 C0 C0 | ||
134 | #define C4 C2 C2 C2 C2 | ||
135 | C4 /* clear c[]--assume BMAX+1 is 16 */ | ||
136 | p = b; i = n; | ||
137 | do { | ||
138 | c[*p++]++; /* assume all entries <= BMAX */ | ||
139 | } while (--i); | ||
140 | if (c[0] == n) /* null input--all zero length codes */ | ||
141 | { | ||
142 | *t = NULL; | ||
143 | *m = 0; | ||
144 | return Z_OK; | ||
145 | } | ||
146 | |||
147 | |||
148 | /* Find minimum and maximum length, bound *m by those */ | ||
149 | l = *m; | ||
150 | for (j = 1; j <= BMAX; j++) | ||
151 | if (c[j]) | ||
152 | break; | ||
153 | k = j; /* minimum code length */ | ||
154 | if ((uInt)l < j) | ||
155 | l = j; | ||
156 | for (i = BMAX; i; i--) | ||
157 | if (c[i]) | ||
158 | break; | ||
159 | g = i; /* maximum code length */ | ||
160 | if ((uInt)l > i) | ||
161 | l = i; | ||
162 | *m = l; | ||
163 | |||
164 | |||
165 | /* Adjust last length count to fill out codes, if needed */ | ||
166 | for (y = 1 << j; j < i; j++, y <<= 1) | ||
167 | if ((y -= c[j]) < 0) | ||
168 | return Z_DATA_ERROR; | ||
169 | if ((y -= c[i]) < 0) | ||
170 | return Z_DATA_ERROR; | ||
171 | c[i] += y; | ||
172 | |||
173 | |||
174 | /* Generate starting offsets into the value table for each length */ | ||
175 | x[1] = j = 0; | ||
176 | p = c + 1; xp = x + 2; | ||
177 | while (--i) { /* note that i == g from above */ | ||
178 | *xp++ = (j += *p++); | ||
179 | } | ||
180 | |||
181 | |||
182 | /* Make a table of values in order of bit lengths */ | ||
183 | p = b; i = 0; | ||
184 | do { | ||
185 | if ((j = *p++) != 0) | ||
186 | v[x[j]++] = i; | ||
187 | } while (++i < n); | ||
188 | n = x[g]; /* set n to length of v */ | ||
189 | |||
190 | |||
191 | /* Generate the Huffman codes and for each, make the table entries */ | ||
192 | x[0] = i = 0; /* first Huffman code is zero */ | ||
193 | p = v; /* grab values in bit order */ | ||
194 | h = -1; /* no tables yet--level -1 */ | ||
195 | w = -l; /* bits decoded == (l * h) */ | ||
196 | u[0] = NULL; /* just to keep compilers happy */ | ||
197 | q = NULL; /* ditto */ | ||
198 | z = 0; /* ditto */ | ||
199 | |||
200 | /* go through the bit lengths (k already is bits in shortest code) */ | ||
201 | for (; k <= g; k++) | ||
202 | { | ||
203 | a = c[k]; | ||
204 | while (a--) | ||
205 | { | ||
206 | /* here i is the Huffman code of length k bits for value *p */ | ||
207 | /* make tables up to required level */ | ||
208 | while (k > w + l) | ||
209 | { | ||
210 | h++; | ||
211 | w += l; /* previous table always l bits */ | ||
212 | |||
213 | /* compute minimum size table less than or equal to l bits */ | ||
214 | z = g - w; | ||
215 | z = z > (uInt)l ? l : z; /* table size upper limit */ | ||
216 | if ((f = 1 << (j = k - w)) > a + 1) /* try a k-w bit table */ | ||
217 | { /* too few codes for k-w bit table */ | ||
218 | f -= a + 1; /* deduct codes from patterns left */ | ||
219 | xp = c + k; | ||
220 | if (j < z) | ||
221 | while (++j < z) /* try smaller tables up to z bits */ | ||
222 | { | ||
223 | if ((f <<= 1) <= *++xp) | ||
224 | break; /* enough codes to use up j bits */ | ||
225 | f -= *xp; /* else deduct codes from patterns */ | ||
226 | } | ||
227 | } | ||
228 | z = 1 << j; /* table entries for j-bit table */ | ||
229 | |||
230 | /* allocate new table */ | ||
231 | if (*hn + z > MANY) /* (note: doesn't matter for fixed) */ | ||
232 | return Z_DATA_ERROR; /* overflow of MANY */ | ||
233 | u[h] = q = hp + *hn; | ||
234 | *hn += z; | ||
235 | |||
236 | /* connect to last table, if there is one */ | ||
237 | if (h) | ||
238 | { | ||
239 | x[h] = i; /* save pattern for backing up */ | ||
240 | r.bits = (Byte)l; /* bits to dump before this table */ | ||
241 | r.exop = (Byte)j; /* bits in this table */ | ||
242 | j = i >> (w - l); | ||
243 | r.base = (uInt)(q - u[h-1] - j); /* offset to this table */ | ||
244 | u[h-1][j] = r; /* connect to last table */ | ||
245 | } | ||
246 | else | ||
247 | *t = q; /* first table is returned result */ | ||
248 | } | ||
249 | |||
250 | /* set up table entry in r */ | ||
251 | r.bits = (Byte)(k - w); | ||
252 | if (p >= v + n) | ||
253 | r.exop = 128 + 64; /* out of values--invalid code */ | ||
254 | else if (*p < s) | ||
255 | { | ||
256 | r.exop = (Byte)(*p < 256 ? 0 : 32 + 64); /* 256 is end-of-block */ | ||
257 | r.base = *p++; /* simple code is just the value */ | ||
258 | } | ||
259 | else | ||
260 | { | ||
261 | r.exop = (Byte)(e[*p - s] + 16 + 64);/* non-simple--look up in lists */ | ||
262 | r.base = d[*p++ - s]; | ||
263 | } | ||
264 | |||
265 | /* fill code-like entries with r */ | ||
266 | f = 1 << (k - w); | ||
267 | for (j = i >> w; j < z; j += f) | ||
268 | q[j] = r; | ||
269 | |||
270 | /* backwards increment the k-bit code i */ | ||
271 | for (j = 1 << (k - 1); i & j; j >>= 1) | ||
272 | i ^= j; | ||
273 | i ^= j; | ||
274 | |||
275 | /* backup over finished tables */ | ||
276 | mask = (1 << w) - 1; /* needed on HP, cc -O bug */ | ||
277 | while ((i & mask) != x[h]) | ||
278 | { | ||
279 | h--; /* don't need to update q */ | ||
280 | w -= l; | ||
281 | mask = (1 << w) - 1; | ||
282 | } | ||
283 | } | ||
284 | } | ||
285 | |||
286 | |||
287 | /* Return Z_BUF_ERROR if we were given an incomplete table */ | ||
288 | return y != 0 && g != 1 ? Z_BUF_ERROR : Z_OK; | ||
289 | } | ||
290 | |||
291 | |||
292 | int zlib_inflate_trees_bits( | ||
293 | uInt *c, /* 19 code lengths */ | ||
294 | uInt *bb, /* bits tree desired/actual depth */ | ||
295 | inflate_huft **tb, /* bits tree result */ | ||
296 | inflate_huft *hp, /* space for trees */ | ||
297 | z_streamp z /* for messages */ | ||
298 | ) | ||
299 | { | ||
300 | int r; | ||
301 | uInt hn = 0; /* hufts used in space */ | ||
302 | uInt *v; /* work area for huft_build */ | ||
303 | |||
304 | v = WS(z)->tree_work_area_1; | ||
305 | r = huft_build(c, 19, 19, NULL, NULL, tb, bb, hp, &hn, v); | ||
306 | if (r == Z_DATA_ERROR) | ||
307 | z->msg = (char*)"oversubscribed dynamic bit lengths tree"; | ||
308 | else if (r == Z_BUF_ERROR || *bb == 0) | ||
309 | { | ||
310 | z->msg = (char*)"incomplete dynamic bit lengths tree"; | ||
311 | r = Z_DATA_ERROR; | ||
312 | } | ||
313 | return r; | ||
314 | } | ||
315 | |||
316 | int zlib_inflate_trees_dynamic( | ||
317 | uInt nl, /* number of literal/length codes */ | ||
318 | uInt nd, /* number of distance codes */ | ||
319 | uInt *c, /* that many (total) code lengths */ | ||
320 | uInt *bl, /* literal desired/actual bit depth */ | ||
321 | uInt *bd, /* distance desired/actual bit depth */ | ||
322 | inflate_huft **tl, /* literal/length tree result */ | ||
323 | inflate_huft **td, /* distance tree result */ | ||
324 | inflate_huft *hp, /* space for trees */ | ||
325 | z_streamp z /* for messages */ | ||
326 | ) | ||
327 | { | ||
328 | int r; | ||
329 | uInt hn = 0; /* hufts used in space */ | ||
330 | uInt *v; /* work area for huft_build */ | ||
331 | |||
332 | /* allocate work area */ | ||
333 | v = WS(z)->tree_work_area_2; | ||
334 | |||
335 | /* build literal/length tree */ | ||
336 | r = huft_build(c, nl, 257, cplens, cplext, tl, bl, hp, &hn, v); | ||
337 | if (r != Z_OK || *bl == 0) | ||
338 | { | ||
339 | if (r == Z_DATA_ERROR) | ||
340 | z->msg = (char*)"oversubscribed literal/length tree"; | ||
341 | else if (r != Z_MEM_ERROR) | ||
342 | { | ||
343 | z->msg = (char*)"incomplete literal/length tree"; | ||
344 | r = Z_DATA_ERROR; | ||
345 | } | ||
346 | return r; | ||
347 | } | ||
348 | |||
349 | /* build distance tree */ | ||
350 | r = huft_build(c + nl, nd, 0, cpdist, cpdext, td, bd, hp, &hn, v); | ||
351 | if (r != Z_OK || (*bd == 0 && nl > 257)) | ||
352 | { | ||
353 | if (r == Z_DATA_ERROR) | ||
354 | z->msg = (char*)"oversubscribed distance tree"; | ||
355 | else if (r == Z_BUF_ERROR) { | ||
356 | #ifdef PKZIP_BUG_WORKAROUND | ||
357 | r = Z_OK; | ||
358 | } | ||
359 | #else | ||
360 | z->msg = (char*)"incomplete distance tree"; | ||
361 | r = Z_DATA_ERROR; | ||
362 | } | ||
363 | else if (r != Z_MEM_ERROR) | ||
364 | { | ||
365 | z->msg = (char*)"empty distance tree with lengths"; | ||
366 | r = Z_DATA_ERROR; | ||
367 | } | ||
368 | return r; | ||
369 | #endif | ||
370 | } | ||
371 | |||
372 | /* done */ | ||
373 | return Z_OK; | ||
374 | } | ||
375 | |||
376 | |||
377 | int zlib_inflate_trees_fixed( | ||
378 | uInt *bl, /* literal desired/actual bit depth */ | ||
379 | uInt *bd, /* distance desired/actual bit depth */ | ||
380 | inflate_huft **tl, /* literal/length tree result */ | ||
381 | inflate_huft **td, /* distance tree result */ | ||
382 | inflate_huft *hp, /* space for trees */ | ||
383 | z_streamp z /* for memory allocation */ | ||
384 | ) | ||
385 | { | ||
386 | int i; /* temporary variable */ | ||
387 | unsigned l[288]; /* length list for huft_build */ | ||
388 | uInt *v; /* work area for huft_build */ | ||
389 | |||
390 | /* set up literal table */ | ||
391 | for (i = 0; i < 144; i++) | ||
392 | l[i] = 8; | ||
393 | for (; i < 256; i++) | ||
394 | l[i] = 9; | ||
395 | for (; i < 280; i++) | ||
396 | l[i] = 7; | ||
397 | for (; i < 288; i++) /* make a complete, but wrong code set */ | ||
398 | l[i] = 8; | ||
399 | *bl = 9; | ||
400 | v = WS(z)->tree_work_area_1; | ||
401 | if ((i = huft_build(l, 288, 257, cplens, cplext, tl, bl, hp, &i, v)) != 0) | ||
402 | return i; | ||
403 | |||
404 | /* set up distance table */ | ||
405 | for (i = 0; i < 30; i++) /* make an incomplete code set */ | ||
406 | l[i] = 5; | ||
407 | *bd = 5; | ||
408 | if ((i = huft_build(l, 30, 0, cpdist, cpdext, td, bd, hp, &i, v)) > 1) | ||
409 | return i; | ||
410 | |||
411 | return Z_OK; | ||
412 | } | ||
diff --git a/lib/zlib_inflate/inftrees.h b/lib/zlib_inflate/inftrees.h new file mode 100644 index 000000000000..e37705adc008 --- /dev/null +++ b/lib/zlib_inflate/inftrees.h | |||
@@ -0,0 +1,64 @@ | |||
1 | /* inftrees.h -- header to use inftrees.c | ||
2 | * Copyright (C) 1995-1998 Mark Adler | ||
3 | * For conditions of distribution and use, see copyright notice in zlib.h | ||
4 | */ | ||
5 | |||
6 | /* WARNING: this file should *not* be used by applications. It is | ||
7 | part of the implementation of the compression library and is | ||
8 | subject to change. Applications should only use zlib.h. | ||
9 | */ | ||
10 | |||
11 | /* Huffman code lookup table entry--this entry is four bytes for machines | ||
12 | that have 16-bit pointers (e.g. PC's in the small or medium model). */ | ||
13 | |||
14 | #ifndef _INFTREES_H | ||
15 | #define _INFTREES_H | ||
16 | |||
17 | typedef struct inflate_huft_s inflate_huft; | ||
18 | |||
19 | struct inflate_huft_s { | ||
20 | union { | ||
21 | struct { | ||
22 | Byte Exop; /* number of extra bits or operation */ | ||
23 | Byte Bits; /* number of bits in this code or subcode */ | ||
24 | } what; | ||
25 | uInt pad; /* pad structure to a power of 2 (4 bytes for */ | ||
26 | } word; /* 16-bit, 8 bytes for 32-bit int's) */ | ||
27 | uInt base; /* literal, length base, distance base, | ||
28 | or table offset */ | ||
29 | }; | ||
30 | |||
31 | /* Maximum size of dynamic tree. The maximum found in a long but non- | ||
32 | exhaustive search was 1004 huft structures (850 for length/literals | ||
33 | and 154 for distances, the latter actually the result of an | ||
34 | exhaustive search). The actual maximum is not known, but the | ||
35 | value below is more than safe. */ | ||
36 | #define MANY 1440 | ||
37 | |||
38 | extern int zlib_inflate_trees_bits ( | ||
39 | uInt *, /* 19 code lengths */ | ||
40 | uInt *, /* bits tree desired/actual depth */ | ||
41 | inflate_huft **, /* bits tree result */ | ||
42 | inflate_huft *, /* space for trees */ | ||
43 | z_streamp); /* for messages */ | ||
44 | |||
45 | extern int zlib_inflate_trees_dynamic ( | ||
46 | uInt, /* number of literal/length codes */ | ||
47 | uInt, /* number of distance codes */ | ||
48 | uInt *, /* that many (total) code lengths */ | ||
49 | uInt *, /* literal desired/actual bit depth */ | ||
50 | uInt *, /* distance desired/actual bit depth */ | ||
51 | inflate_huft **, /* literal/length tree result */ | ||
52 | inflate_huft **, /* distance tree result */ | ||
53 | inflate_huft *, /* space for trees */ | ||
54 | z_streamp); /* for messages */ | ||
55 | |||
56 | extern int zlib_inflate_trees_fixed ( | ||
57 | uInt *, /* literal desired/actual bit depth */ | ||
58 | uInt *, /* distance desired/actual bit depth */ | ||
59 | inflate_huft **, /* literal/length tree result */ | ||
60 | inflate_huft **, /* distance tree result */ | ||
61 | inflate_huft *, /* space for trees */ | ||
62 | z_streamp); /* for memory allocation */ | ||
63 | |||
64 | #endif /* _INFTREES_H */ | ||
diff --git a/lib/zlib_inflate/infutil.c b/lib/zlib_inflate/infutil.c new file mode 100644 index 000000000000..00202b3438e1 --- /dev/null +++ b/lib/zlib_inflate/infutil.c | |||
@@ -0,0 +1,88 @@ | |||
1 | /* inflate_util.c -- data and routines common to blocks and codes | ||
2 | * Copyright (C) 1995-1998 Mark Adler | ||
3 | * For conditions of distribution and use, see copyright notice in zlib.h | ||
4 | */ | ||
5 | |||
6 | #include <linux/zutil.h> | ||
7 | #include "infblock.h" | ||
8 | #include "inftrees.h" | ||
9 | #include "infcodes.h" | ||
10 | #include "infutil.h" | ||
11 | |||
12 | struct inflate_codes_state; | ||
13 | |||
14 | /* And'ing with mask[n] masks the lower n bits */ | ||
15 | uInt zlib_inflate_mask[17] = { | ||
16 | 0x0000, | ||
17 | 0x0001, 0x0003, 0x0007, 0x000f, 0x001f, 0x003f, 0x007f, 0x00ff, | ||
18 | 0x01ff, 0x03ff, 0x07ff, 0x0fff, 0x1fff, 0x3fff, 0x7fff, 0xffff | ||
19 | }; | ||
20 | |||
21 | |||
22 | /* copy as much as possible from the sliding window to the output area */ | ||
23 | int zlib_inflate_flush( | ||
24 | inflate_blocks_statef *s, | ||
25 | z_streamp z, | ||
26 | int r | ||
27 | ) | ||
28 | { | ||
29 | uInt n; | ||
30 | Byte *p; | ||
31 | Byte *q; | ||
32 | |||
33 | /* local copies of source and destination pointers */ | ||
34 | p = z->next_out; | ||
35 | q = s->read; | ||
36 | |||
37 | /* compute number of bytes to copy as far as end of window */ | ||
38 | n = (uInt)((q <= s->write ? s->write : s->end) - q); | ||
39 | if (n > z->avail_out) n = z->avail_out; | ||
40 | if (n && r == Z_BUF_ERROR) r = Z_OK; | ||
41 | |||
42 | /* update counters */ | ||
43 | z->avail_out -= n; | ||
44 | z->total_out += n; | ||
45 | |||
46 | /* update check information */ | ||
47 | if (s->checkfn != NULL) | ||
48 | z->adler = s->check = (*s->checkfn)(s->check, q, n); | ||
49 | |||
50 | /* copy as far as end of window */ | ||
51 | memcpy(p, q, n); | ||
52 | p += n; | ||
53 | q += n; | ||
54 | |||
55 | /* see if more to copy at beginning of window */ | ||
56 | if (q == s->end) | ||
57 | { | ||
58 | /* wrap pointers */ | ||
59 | q = s->window; | ||
60 | if (s->write == s->end) | ||
61 | s->write = s->window; | ||
62 | |||
63 | /* compute bytes to copy */ | ||
64 | n = (uInt)(s->write - q); | ||
65 | if (n > z->avail_out) n = z->avail_out; | ||
66 | if (n && r == Z_BUF_ERROR) r = Z_OK; | ||
67 | |||
68 | /* update counters */ | ||
69 | z->avail_out -= n; | ||
70 | z->total_out += n; | ||
71 | |||
72 | /* update check information */ | ||
73 | if (s->checkfn != NULL) | ||
74 | z->adler = s->check = (*s->checkfn)(s->check, q, n); | ||
75 | |||
76 | /* copy */ | ||
77 | memcpy(p, q, n); | ||
78 | p += n; | ||
79 | q += n; | ||
80 | } | ||
81 | |||
82 | /* update pointers */ | ||
83 | z->next_out = p; | ||
84 | s->read = q; | ||
85 | |||
86 | /* done */ | ||
87 | return r; | ||
88 | } | ||
diff --git a/lib/zlib_inflate/infutil.h b/lib/zlib_inflate/infutil.h new file mode 100644 index 000000000000..a15875fc5f72 --- /dev/null +++ b/lib/zlib_inflate/infutil.h | |||
@@ -0,0 +1,197 @@ | |||
1 | /* infutil.h -- types and macros common to blocks and codes | ||
2 | * Copyright (C) 1995-1998 Mark Adler | ||
3 | * For conditions of distribution and use, see copyright notice in zlib.h | ||
4 | */ | ||
5 | |||
6 | /* WARNING: this file should *not* be used by applications. It is | ||
7 | part of the implementation of the compression library and is | ||
8 | subject to change. Applications should only use zlib.h. | ||
9 | */ | ||
10 | |||
11 | #ifndef _INFUTIL_H | ||
12 | #define _INFUTIL_H | ||
13 | |||
14 | #include <linux/zconf.h> | ||
15 | #include "inftrees.h" | ||
16 | #include "infcodes.h" | ||
17 | |||
18 | typedef enum { | ||
19 | TYPE, /* get type bits (3, including end bit) */ | ||
20 | LENS, /* get lengths for stored */ | ||
21 | STORED, /* processing stored block */ | ||
22 | TABLE, /* get table lengths */ | ||
23 | BTREE, /* get bit lengths tree for a dynamic block */ | ||
24 | DTREE, /* get length, distance trees for a dynamic block */ | ||
25 | CODES, /* processing fixed or dynamic block */ | ||
26 | DRY, /* output remaining window bytes */ | ||
27 | B_DONE, /* finished last block, done */ | ||
28 | B_BAD} /* got a data error--stuck here */ | ||
29 | inflate_block_mode; | ||
30 | |||
31 | /* inflate blocks semi-private state */ | ||
32 | struct inflate_blocks_state { | ||
33 | |||
34 | /* mode */ | ||
35 | inflate_block_mode mode; /* current inflate_block mode */ | ||
36 | |||
37 | /* mode dependent information */ | ||
38 | union { | ||
39 | uInt left; /* if STORED, bytes left to copy */ | ||
40 | struct { | ||
41 | uInt table; /* table lengths (14 bits) */ | ||
42 | uInt index; /* index into blens (or border) */ | ||
43 | uInt *blens; /* bit lengths of codes */ | ||
44 | uInt bb; /* bit length tree depth */ | ||
45 | inflate_huft *tb; /* bit length decoding tree */ | ||
46 | } trees; /* if DTREE, decoding info for trees */ | ||
47 | struct { | ||
48 | inflate_codes_statef | ||
49 | *codes; | ||
50 | } decode; /* if CODES, current state */ | ||
51 | } sub; /* submode */ | ||
52 | uInt last; /* true if this block is the last block */ | ||
53 | |||
54 | /* mode independent information */ | ||
55 | uInt bitk; /* bits in bit buffer */ | ||
56 | uLong bitb; /* bit buffer */ | ||
57 | inflate_huft *hufts; /* single malloc for tree space */ | ||
58 | Byte *window; /* sliding window */ | ||
59 | Byte *end; /* one byte after sliding window */ | ||
60 | Byte *read; /* window read pointer */ | ||
61 | Byte *write; /* window write pointer */ | ||
62 | check_func checkfn; /* check function */ | ||
63 | uLong check; /* check on output */ | ||
64 | |||
65 | }; | ||
66 | |||
67 | |||
68 | /* defines for inflate input/output */ | ||
69 | /* update pointers and return */ | ||
70 | #define UPDBITS {s->bitb=b;s->bitk=k;} | ||
71 | #define UPDIN {z->avail_in=n;z->total_in+=p-z->next_in;z->next_in=p;} | ||
72 | #define UPDOUT {s->write=q;} | ||
73 | #define UPDATE {UPDBITS UPDIN UPDOUT} | ||
74 | #define LEAVE {UPDATE return zlib_inflate_flush(s,z,r);} | ||
75 | /* get bytes and bits */ | ||
76 | #define LOADIN {p=z->next_in;n=z->avail_in;b=s->bitb;k=s->bitk;} | ||
77 | #define NEEDBYTE {if(n)r=Z_OK;else LEAVE} | ||
78 | #define NEXTBYTE (n--,*p++) | ||
79 | #define NEEDBITS(j) {while(k<(j)){NEEDBYTE;b|=((uLong)NEXTBYTE)<<k;k+=8;}} | ||
80 | #define DUMPBITS(j) {b>>=(j);k-=(j);} | ||
81 | /* output bytes */ | ||
82 | #define WAVAIL (uInt)(q<s->read?s->read-q-1:s->end-q) | ||
83 | #define LOADOUT {q=s->write;m=(uInt)WAVAIL;} | ||
84 | #define WRAP {if(q==s->end&&s->read!=s->window){q=s->window;m=(uInt)WAVAIL;}} | ||
85 | #define FLUSH {UPDOUT r=zlib_inflate_flush(s,z,r); LOADOUT} | ||
86 | #define NEEDOUT {if(m==0){WRAP if(m==0){FLUSH WRAP if(m==0) LEAVE}}r=Z_OK;} | ||
87 | #define OUTBYTE(a) {*q++=(Byte)(a);m--;} | ||
88 | /* load local pointers */ | ||
89 | #define LOAD {LOADIN LOADOUT} | ||
90 | |||
91 | /* masks for lower bits (size given to avoid silly warnings with Visual C++) */ | ||
92 | extern uInt zlib_inflate_mask[17]; | ||
93 | |||
94 | /* copy as much as possible from the sliding window to the output area */ | ||
95 | extern int zlib_inflate_flush ( | ||
96 | inflate_blocks_statef *, | ||
97 | z_streamp , | ||
98 | int); | ||
99 | |||
100 | /* inflate private state */ | ||
101 | typedef enum { | ||
102 | METHOD, /* waiting for method byte */ | ||
103 | FLAG, /* waiting for flag byte */ | ||
104 | DICT4, /* four dictionary check bytes to go */ | ||
105 | DICT3, /* three dictionary check bytes to go */ | ||
106 | DICT2, /* two dictionary check bytes to go */ | ||
107 | DICT1, /* one dictionary check byte to go */ | ||
108 | DICT0, /* waiting for inflateSetDictionary */ | ||
109 | BLOCKS, /* decompressing blocks */ | ||
110 | CHECK4, /* four check bytes to go */ | ||
111 | CHECK3, /* three check bytes to go */ | ||
112 | CHECK2, /* two check bytes to go */ | ||
113 | CHECK1, /* one check byte to go */ | ||
114 | I_DONE, /* finished check, done */ | ||
115 | I_BAD} /* got an error--stay here */ | ||
116 | inflate_mode; | ||
117 | |||
118 | struct internal_state { | ||
119 | |||
120 | /* mode */ | ||
121 | inflate_mode mode; /* current inflate mode */ | ||
122 | |||
123 | /* mode dependent information */ | ||
124 | union { | ||
125 | uInt method; /* if FLAGS, method byte */ | ||
126 | struct { | ||
127 | uLong was; /* computed check value */ | ||
128 | uLong need; /* stream check value */ | ||
129 | } check; /* if CHECK, check values to compare */ | ||
130 | uInt marker; /* if BAD, inflateSync's marker bytes count */ | ||
131 | } sub; /* submode */ | ||
132 | |||
133 | /* mode independent information */ | ||
134 | int nowrap; /* flag for no wrapper */ | ||
135 | uInt wbits; /* log2(window size) (8..15, defaults to 15) */ | ||
136 | inflate_blocks_statef | ||
137 | *blocks; /* current inflate_blocks state */ | ||
138 | |||
139 | }; | ||
140 | |||
141 | /* inflate codes private state */ | ||
142 | typedef enum { /* waiting for "i:"=input, "o:"=output, "x:"=nothing */ | ||
143 | START, /* x: set up for LEN */ | ||
144 | LEN, /* i: get length/literal/eob next */ | ||
145 | LENEXT, /* i: getting length extra (have base) */ | ||
146 | DIST, /* i: get distance next */ | ||
147 | DISTEXT, /* i: getting distance extra */ | ||
148 | COPY, /* o: copying bytes in window, waiting for space */ | ||
149 | LIT, /* o: got literal, waiting for output space */ | ||
150 | WASH, /* o: got eob, possibly still output waiting */ | ||
151 | END, /* x: got eob and all data flushed */ | ||
152 | BADCODE} /* x: got error */ | ||
153 | inflate_codes_mode; | ||
154 | |||
155 | struct inflate_codes_state { | ||
156 | |||
157 | /* mode */ | ||
158 | inflate_codes_mode mode; /* current inflate_codes mode */ | ||
159 | |||
160 | /* mode dependent information */ | ||
161 | uInt len; | ||
162 | union { | ||
163 | struct { | ||
164 | inflate_huft *tree; /* pointer into tree */ | ||
165 | uInt need; /* bits needed */ | ||
166 | } code; /* if LEN or DIST, where in tree */ | ||
167 | uInt lit; /* if LIT, literal */ | ||
168 | struct { | ||
169 | uInt get; /* bits to get for extra */ | ||
170 | uInt dist; /* distance back to copy from */ | ||
171 | } copy; /* if EXT or COPY, where and how much */ | ||
172 | } sub; /* submode */ | ||
173 | |||
174 | /* mode independent information */ | ||
175 | Byte lbits; /* ltree bits decoded per branch */ | ||
176 | Byte dbits; /* dtree bits decoder per branch */ | ||
177 | inflate_huft *ltree; /* literal/length/eob tree */ | ||
178 | inflate_huft *dtree; /* distance tree */ | ||
179 | |||
180 | }; | ||
181 | |||
182 | /* memory allocation for inflation */ | ||
183 | |||
184 | struct inflate_workspace { | ||
185 | inflate_codes_statef working_state; | ||
186 | struct inflate_blocks_state working_blocks_state; | ||
187 | struct internal_state internal_state; | ||
188 | unsigned int tree_work_area_1[19]; | ||
189 | unsigned int tree_work_area_2[288]; | ||
190 | unsigned working_blens[258 + 0x1f + 0x1f]; | ||
191 | inflate_huft working_hufts[MANY]; | ||
192 | unsigned char working_window[1 << MAX_WBITS]; | ||
193 | }; | ||
194 | |||
195 | #define WS(z) ((struct inflate_workspace *)(z->workspace)) | ||
196 | |||
197 | #endif | ||