aboutsummaryrefslogtreecommitdiffstats
path: root/lib
diff options
context:
space:
mode:
authorIngo Molnar <mingo@elte.hu>2009-03-28 18:05:50 -0400
committerIngo Molnar <mingo@elte.hu>2009-03-28 18:05:50 -0400
commitb0d44c0dbbd52effb731b1c0af9afd56215c48de (patch)
tree3237c0087d91a5390aed05689b9f610ba16fa116 /lib
parent9537a48ed4b9e4b738943d6da0a0fd4278adf905 (diff)
parent7c730ccdc1188b97f5c8cb690906242c7ed75c22 (diff)
Merge branch 'linus' into core/iommu
Conflicts: arch/x86/Kconfig
Diffstat (limited to 'lib')
-rw-r--r--lib/Kconfig26
-rw-r--r--lib/Kconfig.debug74
-rw-r--r--lib/Makefile12
-rw-r--r--lib/decompress.c54
-rw-r--r--lib/decompress_bunzip2.c735
-rw-r--r--lib/decompress_inflate.c167
-rw-r--r--lib/decompress_unlzma.c647
-rw-r--r--lib/dynamic_debug.c769
-rw-r--r--lib/dynamic_printk.c414
-rw-r--r--lib/kernel_lock.c2
-rw-r--r--lib/kobject.c2
-rw-r--r--lib/kobject_uevent.c12
-rw-r--r--lib/nlattr.c502
-rw-r--r--lib/zlib_inflate/inflate.h4
-rw-r--r--lib/zlib_inflate/inftrees.h4
15 files changed, 2966 insertions, 458 deletions
diff --git a/lib/Kconfig b/lib/Kconfig
index 03c2c24b9083..2a9c69f34482 100644
--- a/lib/Kconfig
+++ b/lib/Kconfig
@@ -98,6 +98,20 @@ config LZO_DECOMPRESS
98 tristate 98 tristate
99 99
100# 100#
101# These all provide a common interface (hence the apparent duplication with
102# ZLIB_INFLATE; DECOMPRESS_GZIP is just a wrapper.)
103#
104config DECOMPRESS_GZIP
105 select ZLIB_INFLATE
106 tristate
107
108config DECOMPRESS_BZIP2
109 tristate
110
111config DECOMPRESS_LZMA
112 tristate
113
114#
101# Generic allocator support is selected if needed 115# Generic allocator support is selected if needed
102# 116#
103config GENERIC_ALLOCATOR 117config GENERIC_ALLOCATOR
@@ -136,12 +150,6 @@ config TEXTSEARCH_BM
136config TEXTSEARCH_FSM 150config TEXTSEARCH_FSM
137 tristate 151 tristate
138 152
139#
140# plist support is select#ed if needed
141#
142config PLIST
143 boolean
144
145config HAS_IOMEM 153config HAS_IOMEM
146 boolean 154 boolean
147 depends on !NO_IOMEM 155 depends on !NO_IOMEM
@@ -174,4 +182,10 @@ config DISABLE_OBSOLETE_CPUMASK_FUNCTIONS
174 bool "Disable obsolete cpumask functions" if DEBUG_PER_CPU_MAPS 182 bool "Disable obsolete cpumask functions" if DEBUG_PER_CPU_MAPS
175 depends on EXPERIMENTAL && BROKEN 183 depends on EXPERIMENTAL && BROKEN
176 184
185#
186# Netlink attribute parsing support is select'ed if needed
187#
188config NLATTR
189 bool
190
177endmenu 191endmenu
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
index d9cbada7e2f8..251fa7ba3014 100644
--- a/lib/Kconfig.debug
+++ b/lib/Kconfig.debug
@@ -847,60 +847,70 @@ config BUILD_DOCSRC
847 847
848 Say N if you are unsure. 848 Say N if you are unsure.
849 849
850config DYNAMIC_PRINTK_DEBUG 850config DYNAMIC_DEBUG
851 bool "Enable dynamic printk() call support" 851 bool "Enable dynamic printk() support"
852 default n 852 default n
853 depends on PRINTK 853 depends on PRINTK
854 depends on DEBUG_FS
854 select PRINTK_DEBUG 855 select PRINTK_DEBUG
855 help 856 help
856 857
857 Compiles debug level messages into the kernel, which would not 858 Compiles debug level messages into the kernel, which would not
858 otherwise be available at runtime. These messages can then be 859 otherwise be available at runtime. These messages can then be
859 enabled/disabled on a per module basis. This mechanism implicitly 860 enabled/disabled based on various levels of scope - per source file,
860 enables all pr_debug() and dev_dbg() calls. The impact of this 861 function, module, format string, and line number. This mechanism
861 compile option is a larger kernel text size of about 2%. 862 implicitly enables all pr_debug() and dev_dbg() calls. The impact of
863 this compile option is a larger kernel text size of about 2%.
862 864
863 Usage: 865 Usage:
864 866
865 Dynamic debugging is controlled by the debugfs file, 867 Dynamic debugging is controlled via the 'dynamic_debug/ddebug' file,
866 dynamic_printk/modules. This file contains a list of the modules that 868 which is contained in the 'debugfs' filesystem. Thus, the debugfs
867 can be enabled. The format of the file is the module name, followed 869 filesystem must first be mounted before making use of this feature.
868 by a set of flags that can be enabled. The first flag is always the 870 We refer the control file as: <debugfs>/dynamic_debug/ddebug. This
869 'enabled' flag. For example: 871 file contains a list of the debug statements that can be enabled. The
872 format for each line of the file is:
870 873
871 <module_name> <enabled=0/1> 874 filename:lineno [module]function flags format
872 .
873 .
874 .
875 875
876 <module_name> : Name of the module in which the debug call resides 876 filename : source file of the debug statement
877 <enabled=0/1> : whether the messages are enabled or not 877 lineno : line number of the debug statement
878 module : module that contains the debug statement
879 function : function that contains the debug statement
880 flags : 'p' means the line is turned 'on' for printing
881 format : the format used for the debug statement
878 882
879 From a live system: 883 From a live system:
880 884
881 snd_hda_intel enabled=0 885 nullarbor:~ # cat <debugfs>/dynamic_debug/ddebug
882 fixup enabled=0 886 # filename:lineno [module]function flags format
883 driver enabled=0 887 fs/aio.c:222 [aio]__put_ioctx - "__put_ioctx:\040freeing\040%p\012"
888 fs/aio.c:248 [aio]ioctx_alloc - "ENOMEM:\040nr_events\040too\040high\012"
889 fs/aio.c:1770 [aio]sys_io_cancel - "calling\040cancel\012"
884 890
885 Enable a module: 891 Example usage:
886 892
887 $echo "set enabled=1 <module_name>" > dynamic_printk/modules 893 // enable the message at line 1603 of file svcsock.c
894 nullarbor:~ # echo -n 'file svcsock.c line 1603 +p' >
895 <debugfs>/dynamic_debug/ddebug
888 896
889 Disable a module: 897 // enable all the messages in file svcsock.c
898 nullarbor:~ # echo -n 'file svcsock.c +p' >
899 <debugfs>/dynamic_debug/ddebug
890 900
891 $echo "set enabled=0 <module_name>" > dynamic_printk/modules 901 // enable all the messages in the NFS server module
902 nullarbor:~ # echo -n 'module nfsd +p' >
903 <debugfs>/dynamic_debug/ddebug
892 904
893 Enable all modules: 905 // enable all 12 messages in the function svc_process()
906 nullarbor:~ # echo -n 'func svc_process +p' >
907 <debugfs>/dynamic_debug/ddebug
894 908
895 $echo "set enabled=1 all" > dynamic_printk/modules 909 // disable all 12 messages in the function svc_process()
910 nullarbor:~ # echo -n 'func svc_process -p' >
911 <debugfs>/dynamic_debug/ddebug
896 912
897 Disable all modules: 913 See Documentation/dynamic-debug-howto.txt for additional information.
898
899 $echo "set enabled=0 all" > dynamic_printk/modules
900
901 Finally, passing "dynamic_printk" at the command line enables
902 debugging for all modules. This mode can be turned off via the above
903 disable command.
904 914
905config DMA_API_DEBUG 915config DMA_API_DEBUG
906 bool "Enable debugging of DMA-API usage" 916 bool "Enable debugging of DMA-API usage"
diff --git a/lib/Makefile b/lib/Makefile
index 50b48cf63e4a..d6edd6753f40 100644
--- a/lib/Makefile
+++ b/lib/Makefile
@@ -11,7 +11,8 @@ lib-y := ctype.o string.o vsprintf.o cmdline.o \
11 rbtree.o radix-tree.o dump_stack.o \ 11 rbtree.o radix-tree.o dump_stack.o \
12 idr.o int_sqrt.o extable.o prio_tree.o \ 12 idr.o int_sqrt.o extable.o prio_tree.o \
13 sha1.o irq_regs.o reciprocal_div.o argv_split.o \ 13 sha1.o irq_regs.o reciprocal_div.o argv_split.o \
14 proportions.o prio_heap.o ratelimit.o show_mem.o is_single_threaded.o 14 proportions.o prio_heap.o ratelimit.o show_mem.o \
15 is_single_threaded.o plist.o decompress.o
15 16
16lib-$(CONFIG_MMU) += ioremap.o 17lib-$(CONFIG_MMU) += ioremap.o
17lib-$(CONFIG_SMP) += cpumask.o 18lib-$(CONFIG_SMP) += cpumask.o
@@ -40,7 +41,6 @@ lib-$(CONFIG_GENERIC_FIND_NEXT_BIT) += find_next_bit.o
40lib-$(CONFIG_GENERIC_FIND_LAST_BIT) += find_last_bit.o 41lib-$(CONFIG_GENERIC_FIND_LAST_BIT) += find_last_bit.o
41obj-$(CONFIG_GENERIC_HWEIGHT) += hweight.o 42obj-$(CONFIG_GENERIC_HWEIGHT) += hweight.o
42obj-$(CONFIG_LOCK_KERNEL) += kernel_lock.o 43obj-$(CONFIG_LOCK_KERNEL) += kernel_lock.o
43obj-$(CONFIG_PLIST) += plist.o
44obj-$(CONFIG_DEBUG_PREEMPT) += smp_processor_id.o 44obj-$(CONFIG_DEBUG_PREEMPT) += smp_processor_id.o
45obj-$(CONFIG_DEBUG_LIST) += list_debug.o 45obj-$(CONFIG_DEBUG_LIST) += list_debug.o
46obj-$(CONFIG_DEBUG_OBJECTS) += debugobjects.o 46obj-$(CONFIG_DEBUG_OBJECTS) += debugobjects.o
@@ -65,6 +65,10 @@ obj-$(CONFIG_REED_SOLOMON) += reed_solomon/
65obj-$(CONFIG_LZO_COMPRESS) += lzo/ 65obj-$(CONFIG_LZO_COMPRESS) += lzo/
66obj-$(CONFIG_LZO_DECOMPRESS) += lzo/ 66obj-$(CONFIG_LZO_DECOMPRESS) += lzo/
67 67
68lib-$(CONFIG_DECOMPRESS_GZIP) += decompress_inflate.o
69lib-$(CONFIG_DECOMPRESS_BZIP2) += decompress_bunzip2.o
70lib-$(CONFIG_DECOMPRESS_LZMA) += decompress_unlzma.o
71
68obj-$(CONFIG_TEXTSEARCH) += textsearch.o 72obj-$(CONFIG_TEXTSEARCH) += textsearch.o
69obj-$(CONFIG_TEXTSEARCH_KMP) += ts_kmp.o 73obj-$(CONFIG_TEXTSEARCH_KMP) += ts_kmp.o
70obj-$(CONFIG_TEXTSEARCH_BM) += ts_bm.o 74obj-$(CONFIG_TEXTSEARCH_BM) += ts_bm.o
@@ -82,7 +86,9 @@ obj-$(CONFIG_HAVE_LMB) += lmb.o
82 86
83obj-$(CONFIG_HAVE_ARCH_TRACEHOOK) += syscall.o 87obj-$(CONFIG_HAVE_ARCH_TRACEHOOK) += syscall.o
84 88
85obj-$(CONFIG_DYNAMIC_PRINTK_DEBUG) += dynamic_printk.o 89obj-$(CONFIG_DYNAMIC_DEBUG) += dynamic_debug.o
90
91obj-$(CONFIG_NLATTR) += nlattr.o
86 92
87obj-$(CONFIG_DMA_API_DEBUG) += dma-debug.o 93obj-$(CONFIG_DMA_API_DEBUG) += dma-debug.o
88 94
diff --git a/lib/decompress.c b/lib/decompress.c
new file mode 100644
index 000000000000..d2842f571674
--- /dev/null
+++ b/lib/decompress.c
@@ -0,0 +1,54 @@
1/*
2 * decompress.c
3 *
4 * Detect the decompression method based on magic number
5 */
6
7#include <linux/decompress/generic.h>
8
9#include <linux/decompress/bunzip2.h>
10#include <linux/decompress/unlzma.h>
11#include <linux/decompress/inflate.h>
12
13#include <linux/types.h>
14#include <linux/string.h>
15
16#ifndef CONFIG_DECOMPRESS_GZIP
17# define gunzip NULL
18#endif
19#ifndef CONFIG_DECOMPRESS_BZIP2
20# define bunzip2 NULL
21#endif
22#ifndef CONFIG_DECOMPRESS_LZMA
23# define unlzma NULL
24#endif
25
26static const struct compress_format {
27 unsigned char magic[2];
28 const char *name;
29 decompress_fn decompressor;
30} compressed_formats[] = {
31 { {037, 0213}, "gzip", gunzip },
32 { {037, 0236}, "gzip", gunzip },
33 { {0x42, 0x5a}, "bzip2", bunzip2 },
34 { {0x5d, 0x00}, "lzma", unlzma },
35 { {0, 0}, NULL, NULL }
36};
37
38decompress_fn decompress_method(const unsigned char *inbuf, int len,
39 const char **name)
40{
41 const struct compress_format *cf;
42
43 if (len < 2)
44 return NULL; /* Need at least this much... */
45
46 for (cf = compressed_formats; cf->name; cf++) {
47 if (!memcmp(inbuf, cf->magic, 2))
48 break;
49
50 }
51 if (name)
52 *name = cf->name;
53 return cf->decompressor;
54}
diff --git a/lib/decompress_bunzip2.c b/lib/decompress_bunzip2.c
new file mode 100644
index 000000000000..5d3ddb5fcfd9
--- /dev/null
+++ b/lib/decompress_bunzip2.c
@@ -0,0 +1,735 @@
1/* vi: set sw = 4 ts = 4: */
2/* Small bzip2 deflate implementation, by Rob Landley (rob@landley.net).
3
4 Based on bzip2 decompression code by Julian R Seward (jseward@acm.org),
5 which also acknowledges contributions by Mike Burrows, David Wheeler,
6 Peter Fenwick, Alistair Moffat, Radford Neal, Ian H. Witten,
7 Robert Sedgewick, and Jon L. Bentley.
8
9 This code is licensed under the LGPLv2:
10 LGPL (http://www.gnu.org/copyleft/lgpl.html
11*/
12
13/*
14 Size and speed optimizations by Manuel Novoa III (mjn3@codepoet.org).
15
16 More efficient reading of Huffman codes, a streamlined read_bunzip()
17 function, and various other tweaks. In (limited) tests, approximately
18 20% faster than bzcat on x86 and about 10% faster on arm.
19
20 Note that about 2/3 of the time is spent in read_unzip() reversing
21 the Burrows-Wheeler transformation. Much of that time is delay
22 resulting from cache misses.
23
24 I would ask that anyone benefiting from this work, especially those
25 using it in commercial products, consider making a donation to my local
26 non-profit hospice organization in the name of the woman I loved, who
27 passed away Feb. 12, 2003.
28
29 In memory of Toni W. Hagan
30
31 Hospice of Acadiana, Inc.
32 2600 Johnston St., Suite 200
33 Lafayette, LA 70503-3240
34
35 Phone (337) 232-1234 or 1-800-738-2226
36 Fax (337) 232-1297
37
38 http://www.hospiceacadiana.com/
39
40 Manuel
41 */
42
43/*
44 Made it fit for running in Linux Kernel by Alain Knaff (alain@knaff.lu)
45*/
46
47
48#ifndef STATIC
49#include <linux/decompress/bunzip2.h>
50#endif /* !STATIC */
51
52#include <linux/decompress/mm.h>
53
54#ifndef INT_MAX
55#define INT_MAX 0x7fffffff
56#endif
57
58/* Constants for Huffman coding */
59#define MAX_GROUPS 6
60#define GROUP_SIZE 50 /* 64 would have been more efficient */
61#define MAX_HUFCODE_BITS 20 /* Longest Huffman code allowed */
62#define MAX_SYMBOLS 258 /* 256 literals + RUNA + RUNB */
63#define SYMBOL_RUNA 0
64#define SYMBOL_RUNB 1
65
66/* Status return values */
67#define RETVAL_OK 0
68#define RETVAL_LAST_BLOCK (-1)
69#define RETVAL_NOT_BZIP_DATA (-2)
70#define RETVAL_UNEXPECTED_INPUT_EOF (-3)
71#define RETVAL_UNEXPECTED_OUTPUT_EOF (-4)
72#define RETVAL_DATA_ERROR (-5)
73#define RETVAL_OUT_OF_MEMORY (-6)
74#define RETVAL_OBSOLETE_INPUT (-7)
75
76/* Other housekeeping constants */
77#define BZIP2_IOBUF_SIZE 4096
78
79/* This is what we know about each Huffman coding group */
80struct group_data {
81 /* We have an extra slot at the end of limit[] for a sentinal value. */
82 int limit[MAX_HUFCODE_BITS+1];
83 int base[MAX_HUFCODE_BITS];
84 int permute[MAX_SYMBOLS];
85 int minLen, maxLen;
86};
87
88/* Structure holding all the housekeeping data, including IO buffers and
89 memory that persists between calls to bunzip */
90struct bunzip_data {
91 /* State for interrupting output loop */
92 int writeCopies, writePos, writeRunCountdown, writeCount, writeCurrent;
93 /* I/O tracking data (file handles, buffers, positions, etc.) */
94 int (*fill)(void*, unsigned int);
95 int inbufCount, inbufPos /*, outbufPos*/;
96 unsigned char *inbuf /*,*outbuf*/;
97 unsigned int inbufBitCount, inbufBits;
98 /* The CRC values stored in the block header and calculated from the
99 data */
100 unsigned int crc32Table[256], headerCRC, totalCRC, writeCRC;
101 /* Intermediate buffer and its size (in bytes) */
102 unsigned int *dbuf, dbufSize;
103 /* These things are a bit too big to go on the stack */
104 unsigned char selectors[32768]; /* nSelectors = 15 bits */
105 struct group_data groups[MAX_GROUPS]; /* Huffman coding tables */
106 int io_error; /* non-zero if we have IO error */
107};
108
109
110/* Return the next nnn bits of input. All reads from the compressed input
111 are done through this function. All reads are big endian */
112static unsigned int INIT get_bits(struct bunzip_data *bd, char bits_wanted)
113{
114 unsigned int bits = 0;
115
116 /* If we need to get more data from the byte buffer, do so.
117 (Loop getting one byte at a time to enforce endianness and avoid
118 unaligned access.) */
119 while (bd->inbufBitCount < bits_wanted) {
120 /* If we need to read more data from file into byte buffer, do
121 so */
122 if (bd->inbufPos == bd->inbufCount) {
123 if (bd->io_error)
124 return 0;
125 bd->inbufCount = bd->fill(bd->inbuf, BZIP2_IOBUF_SIZE);
126 if (bd->inbufCount <= 0) {
127 bd->io_error = RETVAL_UNEXPECTED_INPUT_EOF;
128 return 0;
129 }
130 bd->inbufPos = 0;
131 }
132 /* Avoid 32-bit overflow (dump bit buffer to top of output) */
133 if (bd->inbufBitCount >= 24) {
134 bits = bd->inbufBits&((1 << bd->inbufBitCount)-1);
135 bits_wanted -= bd->inbufBitCount;
136 bits <<= bits_wanted;
137 bd->inbufBitCount = 0;
138 }
139 /* Grab next 8 bits of input from buffer. */
140 bd->inbufBits = (bd->inbufBits << 8)|bd->inbuf[bd->inbufPos++];
141 bd->inbufBitCount += 8;
142 }
143 /* Calculate result */
144 bd->inbufBitCount -= bits_wanted;
145 bits |= (bd->inbufBits >> bd->inbufBitCount)&((1 << bits_wanted)-1);
146
147 return bits;
148}
149
150/* Unpacks the next block and sets up for the inverse burrows-wheeler step. */
151
152static int INIT get_next_block(struct bunzip_data *bd)
153{
154 struct group_data *hufGroup = NULL;
155 int *base = NULL;
156 int *limit = NULL;
157 int dbufCount, nextSym, dbufSize, groupCount, selector,
158 i, j, k, t, runPos, symCount, symTotal, nSelectors,
159 byteCount[256];
160 unsigned char uc, symToByte[256], mtfSymbol[256], *selectors;
161 unsigned int *dbuf, origPtr;
162
163 dbuf = bd->dbuf;
164 dbufSize = bd->dbufSize;
165 selectors = bd->selectors;
166
167 /* Read in header signature and CRC, then validate signature.
168 (last block signature means CRC is for whole file, return now) */
169 i = get_bits(bd, 24);
170 j = get_bits(bd, 24);
171 bd->headerCRC = get_bits(bd, 32);
172 if ((i == 0x177245) && (j == 0x385090))
173 return RETVAL_LAST_BLOCK;
174 if ((i != 0x314159) || (j != 0x265359))
175 return RETVAL_NOT_BZIP_DATA;
176 /* We can add support for blockRandomised if anybody complains.
177 There was some code for this in busybox 1.0.0-pre3, but nobody ever
178 noticed that it didn't actually work. */
179 if (get_bits(bd, 1))
180 return RETVAL_OBSOLETE_INPUT;
181 origPtr = get_bits(bd, 24);
182 if (origPtr > dbufSize)
183 return RETVAL_DATA_ERROR;
184 /* mapping table: if some byte values are never used (encoding things
185 like ascii text), the compression code removes the gaps to have fewer
186 symbols to deal with, and writes a sparse bitfield indicating which
187 values were present. We make a translation table to convert the
188 symbols back to the corresponding bytes. */
189 t = get_bits(bd, 16);
190 symTotal = 0;
191 for (i = 0; i < 16; i++) {
192 if (t&(1 << (15-i))) {
193 k = get_bits(bd, 16);
194 for (j = 0; j < 16; j++)
195 if (k&(1 << (15-j)))
196 symToByte[symTotal++] = (16*i)+j;
197 }
198 }
199 /* How many different Huffman coding groups does this block use? */
200 groupCount = get_bits(bd, 3);
201 if (groupCount < 2 || groupCount > MAX_GROUPS)
202 return RETVAL_DATA_ERROR;
203 /* nSelectors: Every GROUP_SIZE many symbols we select a new
204 Huffman coding group. Read in the group selector list,
205 which is stored as MTF encoded bit runs. (MTF = Move To
206 Front, as each value is used it's moved to the start of the
207 list.) */
208 nSelectors = get_bits(bd, 15);
209 if (!nSelectors)
210 return RETVAL_DATA_ERROR;
211 for (i = 0; i < groupCount; i++)
212 mtfSymbol[i] = i;
213 for (i = 0; i < nSelectors; i++) {
214 /* Get next value */
215 for (j = 0; get_bits(bd, 1); j++)
216 if (j >= groupCount)
217 return RETVAL_DATA_ERROR;
218 /* Decode MTF to get the next selector */
219 uc = mtfSymbol[j];
220 for (; j; j--)
221 mtfSymbol[j] = mtfSymbol[j-1];
222 mtfSymbol[0] = selectors[i] = uc;
223 }
224 /* Read the Huffman coding tables for each group, which code
225 for symTotal literal symbols, plus two run symbols (RUNA,
226 RUNB) */
227 symCount = symTotal+2;
228 for (j = 0; j < groupCount; j++) {
229 unsigned char length[MAX_SYMBOLS], temp[MAX_HUFCODE_BITS+1];
230 int minLen, maxLen, pp;
231 /* Read Huffman code lengths for each symbol. They're
232 stored in a way similar to mtf; record a starting
233 value for the first symbol, and an offset from the
234 previous value for everys symbol after that.
235 (Subtracting 1 before the loop and then adding it
236 back at the end is an optimization that makes the
237 test inside the loop simpler: symbol length 0
238 becomes negative, so an unsigned inequality catches
239 it.) */
240 t = get_bits(bd, 5)-1;
241 for (i = 0; i < symCount; i++) {
242 for (;;) {
243 if (((unsigned)t) > (MAX_HUFCODE_BITS-1))
244 return RETVAL_DATA_ERROR;
245
246 /* If first bit is 0, stop. Else
247 second bit indicates whether to
248 increment or decrement the value.
249 Optimization: grab 2 bits and unget
250 the second if the first was 0. */
251
252 k = get_bits(bd, 2);
253 if (k < 2) {
254 bd->inbufBitCount++;
255 break;
256 }
257 /* Add one if second bit 1, else
258 * subtract 1. Avoids if/else */
259 t += (((k+1)&2)-1);
260 }
261 /* Correct for the initial -1, to get the
262 * final symbol length */
263 length[i] = t+1;
264 }
265 /* Find largest and smallest lengths in this group */
266 minLen = maxLen = length[0];
267
268 for (i = 1; i < symCount; i++) {
269 if (length[i] > maxLen)
270 maxLen = length[i];
271 else if (length[i] < minLen)
272 minLen = length[i];
273 }
274
275 /* Calculate permute[], base[], and limit[] tables from
276 * length[].
277 *
278 * permute[] is the lookup table for converting
279 * Huffman coded symbols into decoded symbols. base[]
280 * is the amount to subtract from the value of a
281 * Huffman symbol of a given length when using
282 * permute[].
283 *
284 * limit[] indicates the largest numerical value a
285 * symbol with a given number of bits can have. This
286 * is how the Huffman codes can vary in length: each
287 * code with a value > limit[length] needs another
288 * bit.
289 */
290 hufGroup = bd->groups+j;
291 hufGroup->minLen = minLen;
292 hufGroup->maxLen = maxLen;
293 /* Note that minLen can't be smaller than 1, so we
294 adjust the base and limit array pointers so we're
295 not always wasting the first entry. We do this
296 again when using them (during symbol decoding).*/
297 base = hufGroup->base-1;
298 limit = hufGroup->limit-1;
299 /* Calculate permute[]. Concurently, initialize
300 * temp[] and limit[]. */
301 pp = 0;
302 for (i = minLen; i <= maxLen; i++) {
303 temp[i] = limit[i] = 0;
304 for (t = 0; t < symCount; t++)
305 if (length[t] == i)
306 hufGroup->permute[pp++] = t;
307 }
308 /* Count symbols coded for at each bit length */
309 for (i = 0; i < symCount; i++)
310 temp[length[i]]++;
311 /* Calculate limit[] (the largest symbol-coding value
312 *at each bit length, which is (previous limit <<
313 *1)+symbols at this level), and base[] (number of
314 *symbols to ignore at each bit length, which is limit
315 *minus the cumulative count of symbols coded for
316 *already). */
317 pp = t = 0;
318 for (i = minLen; i < maxLen; i++) {
319 pp += temp[i];
320 /* We read the largest possible symbol size
321 and then unget bits after determining how
322 many we need, and those extra bits could be
323 set to anything. (They're noise from
324 future symbols.) At each level we're
325 really only interested in the first few
326 bits, so here we set all the trailing
327 to-be-ignored bits to 1 so they don't
328 affect the value > limit[length]
329 comparison. */
330 limit[i] = (pp << (maxLen - i)) - 1;
331 pp <<= 1;
332 base[i+1] = pp-(t += temp[i]);
333 }
334 limit[maxLen+1] = INT_MAX; /* Sentinal value for
335 * reading next sym. */
336 limit[maxLen] = pp+temp[maxLen]-1;
337 base[minLen] = 0;
338 }
339 /* We've finished reading and digesting the block header. Now
340 read this block's Huffman coded symbols from the file and
341 undo the Huffman coding and run length encoding, saving the
342 result into dbuf[dbufCount++] = uc */
343
344 /* Initialize symbol occurrence counters and symbol Move To
345 * Front table */
346 for (i = 0; i < 256; i++) {
347 byteCount[i] = 0;
348 mtfSymbol[i] = (unsigned char)i;
349 }
350 /* Loop through compressed symbols. */
351 runPos = dbufCount = symCount = selector = 0;
352 for (;;) {
353 /* Determine which Huffman coding group to use. */
354 if (!(symCount--)) {
355 symCount = GROUP_SIZE-1;
356 if (selector >= nSelectors)
357 return RETVAL_DATA_ERROR;
358 hufGroup = bd->groups+selectors[selector++];
359 base = hufGroup->base-1;
360 limit = hufGroup->limit-1;
361 }
362 /* Read next Huffman-coded symbol. */
363 /* Note: It is far cheaper to read maxLen bits and
364 back up than it is to read minLen bits and then an
365 additional bit at a time, testing as we go.
366 Because there is a trailing last block (with file
367 CRC), there is no danger of the overread causing an
368 unexpected EOF for a valid compressed file. As a
369 further optimization, we do the read inline
370 (falling back to a call to get_bits if the buffer
371 runs dry). The following (up to got_huff_bits:) is
372 equivalent to j = get_bits(bd, hufGroup->maxLen);
373 */
374 while (bd->inbufBitCount < hufGroup->maxLen) {
375 if (bd->inbufPos == bd->inbufCount) {
376 j = get_bits(bd, hufGroup->maxLen);
377 goto got_huff_bits;
378 }
379 bd->inbufBits =
380 (bd->inbufBits << 8)|bd->inbuf[bd->inbufPos++];
381 bd->inbufBitCount += 8;
382 };
383 bd->inbufBitCount -= hufGroup->maxLen;
384 j = (bd->inbufBits >> bd->inbufBitCount)&
385 ((1 << hufGroup->maxLen)-1);
386got_huff_bits:
387 /* Figure how how many bits are in next symbol and
388 * unget extras */
389 i = hufGroup->minLen;
390 while (j > limit[i])
391 ++i;
392 bd->inbufBitCount += (hufGroup->maxLen - i);
393 /* Huffman decode value to get nextSym (with bounds checking) */
394 if ((i > hufGroup->maxLen)
395 || (((unsigned)(j = (j>>(hufGroup->maxLen-i))-base[i]))
396 >= MAX_SYMBOLS))
397 return RETVAL_DATA_ERROR;
398 nextSym = hufGroup->permute[j];
399 /* We have now decoded the symbol, which indicates
400 either a new literal byte, or a repeated run of the
401 most recent literal byte. First, check if nextSym
402 indicates a repeated run, and if so loop collecting
403 how many times to repeat the last literal. */
404 if (((unsigned)nextSym) <= SYMBOL_RUNB) { /* RUNA or RUNB */
405 /* If this is the start of a new run, zero out
406 * counter */
407 if (!runPos) {
408 runPos = 1;
409 t = 0;
410 }
411 /* Neat trick that saves 1 symbol: instead of
412 or-ing 0 or 1 at each bit position, add 1
413 or 2 instead. For example, 1011 is 1 << 0
414 + 1 << 1 + 2 << 2. 1010 is 2 << 0 + 2 << 1
415 + 1 << 2. You can make any bit pattern
416 that way using 1 less symbol than the basic
417 or 0/1 method (except all bits 0, which
418 would use no symbols, but a run of length 0
419 doesn't mean anything in this context).
420 Thus space is saved. */
421 t += (runPos << nextSym);
422 /* +runPos if RUNA; +2*runPos if RUNB */
423
424 runPos <<= 1;
425 continue;
426 }
427 /* When we hit the first non-run symbol after a run,
428 we now know how many times to repeat the last
429 literal, so append that many copies to our buffer
430 of decoded symbols (dbuf) now. (The last literal
431 used is the one at the head of the mtfSymbol
432 array.) */
433 if (runPos) {
434 runPos = 0;
435 if (dbufCount+t >= dbufSize)
436 return RETVAL_DATA_ERROR;
437
438 uc = symToByte[mtfSymbol[0]];
439 byteCount[uc] += t;
440 while (t--)
441 dbuf[dbufCount++] = uc;
442 }
443 /* Is this the terminating symbol? */
444 if (nextSym > symTotal)
445 break;
446 /* At this point, nextSym indicates a new literal
447 character. Subtract one to get the position in the
448 MTF array at which this literal is currently to be
449 found. (Note that the result can't be -1 or 0,
450 because 0 and 1 are RUNA and RUNB. But another
451 instance of the first symbol in the mtf array,
452 position 0, would have been handled as part of a
453 run above. Therefore 1 unused mtf position minus 2
454 non-literal nextSym values equals -1.) */
455 if (dbufCount >= dbufSize)
456 return RETVAL_DATA_ERROR;
457 i = nextSym - 1;
458 uc = mtfSymbol[i];
459 /* Adjust the MTF array. Since we typically expect to
460 *move only a small number of symbols, and are bound
461 *by 256 in any case, using memmove here would
462 *typically be bigger and slower due to function call
463 *overhead and other assorted setup costs. */
464 do {
465 mtfSymbol[i] = mtfSymbol[i-1];
466 } while (--i);
467 mtfSymbol[0] = uc;
468 uc = symToByte[uc];
469 /* We have our literal byte. Save it into dbuf. */
470 byteCount[uc]++;
471 dbuf[dbufCount++] = (unsigned int)uc;
472 }
473 /* At this point, we've read all the Huffman-coded symbols
474 (and repeated runs) for this block from the input stream,
475 and decoded them into the intermediate buffer. There are
476 dbufCount many decoded bytes in dbuf[]. Now undo the
477 Burrows-Wheeler transform on dbuf. See
478 http://dogma.net/markn/articles/bwt/bwt.htm
479 */
480 /* Turn byteCount into cumulative occurrence counts of 0 to n-1. */
481 j = 0;
482 for (i = 0; i < 256; i++) {
483 k = j+byteCount[i];
484 byteCount[i] = j;
485 j = k;
486 }
487 /* Figure out what order dbuf would be in if we sorted it. */
488 for (i = 0; i < dbufCount; i++) {
489 uc = (unsigned char)(dbuf[i] & 0xff);
490 dbuf[byteCount[uc]] |= (i << 8);
491 byteCount[uc]++;
492 }
493 /* Decode first byte by hand to initialize "previous" byte.
494 Note that it doesn't get output, and if the first three
495 characters are identical it doesn't qualify as a run (hence
496 writeRunCountdown = 5). */
497 if (dbufCount) {
498 if (origPtr >= dbufCount)
499 return RETVAL_DATA_ERROR;
500 bd->writePos = dbuf[origPtr];
501 bd->writeCurrent = (unsigned char)(bd->writePos&0xff);
502 bd->writePos >>= 8;
503 bd->writeRunCountdown = 5;
504 }
505 bd->writeCount = dbufCount;
506
507 return RETVAL_OK;
508}
509
510/* Undo burrows-wheeler transform on intermediate buffer to produce output.
511 If start_bunzip was initialized with out_fd =-1, then up to len bytes of
512 data are written to outbuf. Return value is number of bytes written or
513 error (all errors are negative numbers). If out_fd!=-1, outbuf and len
514 are ignored, data is written to out_fd and return is RETVAL_OK or error.
515*/
516
517static int INIT read_bunzip(struct bunzip_data *bd, char *outbuf, int len)
518{
519 const unsigned int *dbuf;
520 int pos, xcurrent, previous, gotcount;
521
522 /* If last read was short due to end of file, return last block now */
523 if (bd->writeCount < 0)
524 return bd->writeCount;
525
526 gotcount = 0;
527 dbuf = bd->dbuf;
528 pos = bd->writePos;
529 xcurrent = bd->writeCurrent;
530
531 /* We will always have pending decoded data to write into the output
532 buffer unless this is the very first call (in which case we haven't
533 Huffman-decoded a block into the intermediate buffer yet). */
534
535 if (bd->writeCopies) {
536 /* Inside the loop, writeCopies means extra copies (beyond 1) */
537 --bd->writeCopies;
538 /* Loop outputting bytes */
539 for (;;) {
540 /* If the output buffer is full, snapshot
541 * state and return */
542 if (gotcount >= len) {
543 bd->writePos = pos;
544 bd->writeCurrent = xcurrent;
545 bd->writeCopies++;
546 return len;
547 }
548 /* Write next byte into output buffer, updating CRC */
549 outbuf[gotcount++] = xcurrent;
550 bd->writeCRC = (((bd->writeCRC) << 8)
551 ^bd->crc32Table[((bd->writeCRC) >> 24)
552 ^xcurrent]);
553 /* Loop now if we're outputting multiple
554 * copies of this byte */
555 if (bd->writeCopies) {
556 --bd->writeCopies;
557 continue;
558 }
559decode_next_byte:
560 if (!bd->writeCount--)
561 break;
562 /* Follow sequence vector to undo
563 * Burrows-Wheeler transform */
564 previous = xcurrent;
565 pos = dbuf[pos];
566 xcurrent = pos&0xff;
567 pos >>= 8;
568 /* After 3 consecutive copies of the same
569 byte, the 4th is a repeat count. We count
570 down from 4 instead *of counting up because
571 testing for non-zero is faster */
572 if (--bd->writeRunCountdown) {
573 if (xcurrent != previous)
574 bd->writeRunCountdown = 4;
575 } else {
576 /* We have a repeated run, this byte
577 * indicates the count */
578 bd->writeCopies = xcurrent;
579 xcurrent = previous;
580 bd->writeRunCountdown = 5;
581 /* Sometimes there are just 3 bytes
582 * (run length 0) */
583 if (!bd->writeCopies)
584 goto decode_next_byte;
585 /* Subtract the 1 copy we'd output
586 * anyway to get extras */
587 --bd->writeCopies;
588 }
589 }
590 /* Decompression of this block completed successfully */
591 bd->writeCRC = ~bd->writeCRC;
592 bd->totalCRC = ((bd->totalCRC << 1) |
593 (bd->totalCRC >> 31)) ^ bd->writeCRC;
594 /* If this block had a CRC error, force file level CRC error. */
595 if (bd->writeCRC != bd->headerCRC) {
596 bd->totalCRC = bd->headerCRC+1;
597 return RETVAL_LAST_BLOCK;
598 }
599 }
600
601 /* Refill the intermediate buffer by Huffman-decoding next
602 * block of input */
603 /* (previous is just a convenient unused temp variable here) */
604 previous = get_next_block(bd);
605 if (previous) {
606 bd->writeCount = previous;
607 return (previous != RETVAL_LAST_BLOCK) ? previous : gotcount;
608 }
609 bd->writeCRC = 0xffffffffUL;
610 pos = bd->writePos;
611 xcurrent = bd->writeCurrent;
612 goto decode_next_byte;
613}
614
615static int INIT nofill(void *buf, unsigned int len)
616{
617 return -1;
618}
619
620/* Allocate the structure, read file header. If in_fd ==-1, inbuf must contain
621 a complete bunzip file (len bytes long). If in_fd!=-1, inbuf and len are
622 ignored, and data is read from file handle into temporary buffer. */
623static int INIT start_bunzip(struct bunzip_data **bdp, void *inbuf, int len,
624 int (*fill)(void*, unsigned int))
625{
626 struct bunzip_data *bd;
627 unsigned int i, j, c;
628 const unsigned int BZh0 =
629 (((unsigned int)'B') << 24)+(((unsigned int)'Z') << 16)
630 +(((unsigned int)'h') << 8)+(unsigned int)'0';
631
632 /* Figure out how much data to allocate */
633 i = sizeof(struct bunzip_data);
634
635 /* Allocate bunzip_data. Most fields initialize to zero. */
636 bd = *bdp = malloc(i);
637 memset(bd, 0, sizeof(struct bunzip_data));
638 /* Setup input buffer */
639 bd->inbuf = inbuf;
640 bd->inbufCount = len;
641 if (fill != NULL)
642 bd->fill = fill;
643 else
644 bd->fill = nofill;
645
646 /* Init the CRC32 table (big endian) */
647 for (i = 0; i < 256; i++) {
648 c = i << 24;
649 for (j = 8; j; j--)
650 c = c&0x80000000 ? (c << 1)^0x04c11db7 : (c << 1);
651 bd->crc32Table[i] = c;
652 }
653
654 /* Ensure that file starts with "BZh['1'-'9']." */
655 i = get_bits(bd, 32);
656 if (((unsigned int)(i-BZh0-1)) >= 9)
657 return RETVAL_NOT_BZIP_DATA;
658
659 /* Fourth byte (ascii '1'-'9'), indicates block size in units of 100k of
660 uncompressed data. Allocate intermediate buffer for block. */
661 bd->dbufSize = 100000*(i-BZh0);
662
663 bd->dbuf = large_malloc(bd->dbufSize * sizeof(int));
664 return RETVAL_OK;
665}
666
667/* Example usage: decompress src_fd to dst_fd. (Stops at end of bzip2 data,
668 not end of file.) */
669STATIC int INIT bunzip2(unsigned char *buf, int len,
670 int(*fill)(void*, unsigned int),
671 int(*flush)(void*, unsigned int),
672 unsigned char *outbuf,
673 int *pos,
674 void(*error_fn)(char *x))
675{
676 struct bunzip_data *bd;
677 int i = -1;
678 unsigned char *inbuf;
679
680 set_error_fn(error_fn);
681 if (flush)
682 outbuf = malloc(BZIP2_IOBUF_SIZE);
683 else
684 len -= 4; /* Uncompressed size hack active in pre-boot
685 environment */
686 if (!outbuf) {
687 error("Could not allocate output bufer");
688 return -1;
689 }
690 if (buf)
691 inbuf = buf;
692 else
693 inbuf = malloc(BZIP2_IOBUF_SIZE);
694 if (!inbuf) {
695 error("Could not allocate input bufer");
696 goto exit_0;
697 }
698 i = start_bunzip(&bd, inbuf, len, fill);
699 if (!i) {
700 for (;;) {
701 i = read_bunzip(bd, outbuf, BZIP2_IOBUF_SIZE);
702 if (i <= 0)
703 break;
704 if (!flush)
705 outbuf += i;
706 else
707 if (i != flush(outbuf, i)) {
708 i = RETVAL_UNEXPECTED_OUTPUT_EOF;
709 break;
710 }
711 }
712 }
713 /* Check CRC and release memory */
714 if (i == RETVAL_LAST_BLOCK) {
715 if (bd->headerCRC != bd->totalCRC)
716 error("Data integrity error when decompressing.");
717 else
718 i = RETVAL_OK;
719 } else if (i == RETVAL_UNEXPECTED_OUTPUT_EOF) {
720 error("Compressed file ends unexpectedly");
721 }
722 if (bd->dbuf)
723 large_free(bd->dbuf);
724 if (pos)
725 *pos = bd->inbufPos;
726 free(bd);
727 if (!buf)
728 free(inbuf);
729exit_0:
730 if (flush)
731 free(outbuf);
732 return i;
733}
734
735#define decompress bunzip2
diff --git a/lib/decompress_inflate.c b/lib/decompress_inflate.c
new file mode 100644
index 000000000000..839a329b4fc4
--- /dev/null
+++ b/lib/decompress_inflate.c
@@ -0,0 +1,167 @@
1#ifdef STATIC
2/* Pre-boot environment: included */
3
4/* prevent inclusion of _LINUX_KERNEL_H in pre-boot environment: lots
5 * errors about console_printk etc... on ARM */
6#define _LINUX_KERNEL_H
7
8#include "zlib_inflate/inftrees.c"
9#include "zlib_inflate/inffast.c"
10#include "zlib_inflate/inflate.c"
11
12#else /* STATIC */
13/* initramfs et al: linked */
14
15#include <linux/zutil.h>
16
17#include "zlib_inflate/inftrees.h"
18#include "zlib_inflate/inffast.h"
19#include "zlib_inflate/inflate.h"
20
21#include "zlib_inflate/infutil.h"
22
23#endif /* STATIC */
24
25#include <linux/decompress/mm.h>
26
27#define INBUF_LEN (16*1024)
28
29/* Included from initramfs et al code */
30STATIC int INIT gunzip(unsigned char *buf, int len,
31 int(*fill)(void*, unsigned int),
32 int(*flush)(void*, unsigned int),
33 unsigned char *out_buf,
34 int *pos,
35 void(*error_fn)(char *x)) {
36 u8 *zbuf;
37 struct z_stream_s *strm;
38 int rc;
39 size_t out_len;
40
41 set_error_fn(error_fn);
42 rc = -1;
43 if (flush) {
44 out_len = 0x8000; /* 32 K */
45 out_buf = malloc(out_len);
46 } else {
47 out_len = 0x7fffffff; /* no limit */
48 }
49 if (!out_buf) {
50 error("Out of memory while allocating output buffer");
51 goto gunzip_nomem1;
52 }
53
54 if (buf)
55 zbuf = buf;
56 else {
57 zbuf = malloc(INBUF_LEN);
58 len = 0;
59 }
60 if (!zbuf) {
61 error("Out of memory while allocating input buffer");
62 goto gunzip_nomem2;
63 }
64
65 strm = malloc(sizeof(*strm));
66 if (strm == NULL) {
67 error("Out of memory while allocating z_stream");
68 goto gunzip_nomem3;
69 }
70
71 strm->workspace = malloc(flush ? zlib_inflate_workspacesize() :
72 sizeof(struct inflate_state));
73 if (strm->workspace == NULL) {
74 error("Out of memory while allocating workspace");
75 goto gunzip_nomem4;
76 }
77
78 if (len == 0)
79 len = fill(zbuf, INBUF_LEN);
80
81 /* verify the gzip header */
82 if (len < 10 ||
83 zbuf[0] != 0x1f || zbuf[1] != 0x8b || zbuf[2] != 0x08) {
84 if (pos)
85 *pos = 0;
86 error("Not a gzip file");
87 goto gunzip_5;
88 }
89
90 /* skip over gzip header (1f,8b,08... 10 bytes total +
91 * possible asciz filename)
92 */
93 strm->next_in = zbuf + 10;
94 /* skip over asciz filename */
95 if (zbuf[3] & 0x8) {
96 while (strm->next_in[0])
97 strm->next_in++;
98 strm->next_in++;
99 }
100 strm->avail_in = len - (strm->next_in - zbuf);
101
102 strm->next_out = out_buf;
103 strm->avail_out = out_len;
104
105 rc = zlib_inflateInit2(strm, -MAX_WBITS);
106
107 if (!flush) {
108 WS(strm)->inflate_state.wsize = 0;
109 WS(strm)->inflate_state.window = NULL;
110 }
111
112 while (rc == Z_OK) {
113 if (strm->avail_in == 0) {
114 /* TODO: handle case where both pos and fill are set */
115 len = fill(zbuf, INBUF_LEN);
116 if (len < 0) {
117 rc = -1;
118 error("read error");
119 break;
120 }
121 strm->next_in = zbuf;
122 strm->avail_in = len;
123 }
124 rc = zlib_inflate(strm, 0);
125
126 /* Write any data generated */
127 if (flush && strm->next_out > out_buf) {
128 int l = strm->next_out - out_buf;
129 if (l != flush(out_buf, l)) {
130 rc = -1;
131 error("write error");
132 break;
133 }
134 strm->next_out = out_buf;
135 strm->avail_out = out_len;
136 }
137
138 /* after Z_FINISH, only Z_STREAM_END is "we unpacked it all" */
139 if (rc == Z_STREAM_END) {
140 rc = 0;
141 break;
142 } else if (rc != Z_OK) {
143 error("uncompression error");
144 rc = -1;
145 }
146 }
147
148 zlib_inflateEnd(strm);
149 if (pos)
150 /* add + 8 to skip over trailer */
151 *pos = strm->next_in - zbuf+8;
152
153gunzip_5:
154 free(strm->workspace);
155gunzip_nomem4:
156 free(strm);
157gunzip_nomem3:
158 if (!buf)
159 free(zbuf);
160gunzip_nomem2:
161 if (flush)
162 free(out_buf);
163gunzip_nomem1:
164 return rc; /* returns Z_OK (0) if successful */
165}
166
167#define decompress gunzip
diff --git a/lib/decompress_unlzma.c b/lib/decompress_unlzma.c
new file mode 100644
index 000000000000..546f2f4c157e
--- /dev/null
+++ b/lib/decompress_unlzma.c
@@ -0,0 +1,647 @@
1/* Lzma decompressor for Linux kernel. Shamelessly snarfed
2 *from busybox 1.1.1
3 *
4 *Linux kernel adaptation
5 *Copyright (C) 2006 Alain < alain@knaff.lu >
6 *
7 *Based on small lzma deflate implementation/Small range coder
8 *implementation for lzma.
9 *Copyright (C) 2006 Aurelien Jacobs < aurel@gnuage.org >
10 *
11 *Based on LzmaDecode.c from the LZMA SDK 4.22 (http://www.7-zip.org/)
12 *Copyright (C) 1999-2005 Igor Pavlov
13 *
14 *Copyrights of the parts, see headers below.
15 *
16 *
17 *This program is free software; you can redistribute it and/or
18 *modify it under the terms of the GNU Lesser General Public
19 *License as published by the Free Software Foundation; either
20 *version 2.1 of the License, or (at your option) any later version.
21 *
22 *This program is distributed in the hope that it will be useful,
23 *but WITHOUT ANY WARRANTY; without even the implied warranty of
24 *MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
25 *Lesser General Public License for more details.
26 *
27 *You should have received a copy of the GNU Lesser General Public
28 *License along with this library; if not, write to the Free Software
29 *Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
30 */
31
32#ifndef STATIC
33#include <linux/decompress/unlzma.h>
34#endif /* STATIC */
35
36#include <linux/decompress/mm.h>
37
38#define MIN(a, b) (((a) < (b)) ? (a) : (b))
39
40static long long INIT read_int(unsigned char *ptr, int size)
41{
42 int i;
43 long long ret = 0;
44
45 for (i = 0; i < size; i++)
46 ret = (ret << 8) | ptr[size-i-1];
47 return ret;
48}
49
50#define ENDIAN_CONVERT(x) \
51 x = (typeof(x))read_int((unsigned char *)&x, sizeof(x))
52
53
54/* Small range coder implementation for lzma.
55 *Copyright (C) 2006 Aurelien Jacobs < aurel@gnuage.org >
56 *
57 *Based on LzmaDecode.c from the LZMA SDK 4.22 (http://www.7-zip.org/)
58 *Copyright (c) 1999-2005 Igor Pavlov
59 */
60
61#include <linux/compiler.h>
62
63#define LZMA_IOBUF_SIZE 0x10000
64
65struct rc {
66 int (*fill)(void*, unsigned int);
67 uint8_t *ptr;
68 uint8_t *buffer;
69 uint8_t *buffer_end;
70 int buffer_size;
71 uint32_t code;
72 uint32_t range;
73 uint32_t bound;
74};
75
76
77#define RC_TOP_BITS 24
78#define RC_MOVE_BITS 5
79#define RC_MODEL_TOTAL_BITS 11
80
81
82/* Called twice: once at startup and once in rc_normalize() */
83static void INIT rc_read(struct rc *rc)
84{
85 rc->buffer_size = rc->fill((char *)rc->buffer, LZMA_IOBUF_SIZE);
86 if (rc->buffer_size <= 0)
87 error("unexpected EOF");
88 rc->ptr = rc->buffer;
89 rc->buffer_end = rc->buffer + rc->buffer_size;
90}
91
92/* Called once */
93static inline void INIT rc_init(struct rc *rc,
94 int (*fill)(void*, unsigned int),
95 char *buffer, int buffer_size)
96{
97 rc->fill = fill;
98 rc->buffer = (uint8_t *)buffer;
99 rc->buffer_size = buffer_size;
100 rc->buffer_end = rc->buffer + rc->buffer_size;
101 rc->ptr = rc->buffer;
102
103 rc->code = 0;
104 rc->range = 0xFFFFFFFF;
105}
106
107static inline void INIT rc_init_code(struct rc *rc)
108{
109 int i;
110
111 for (i = 0; i < 5; i++) {
112 if (rc->ptr >= rc->buffer_end)
113 rc_read(rc);
114 rc->code = (rc->code << 8) | *rc->ptr++;
115 }
116}
117
118
119/* Called once. TODO: bb_maybe_free() */
120static inline void INIT rc_free(struct rc *rc)
121{
122 free(rc->buffer);
123}
124
125/* Called twice, but one callsite is in inline'd rc_is_bit_0_helper() */
126static void INIT rc_do_normalize(struct rc *rc)
127{
128 if (rc->ptr >= rc->buffer_end)
129 rc_read(rc);
130 rc->range <<= 8;
131 rc->code = (rc->code << 8) | *rc->ptr++;
132}
133static inline void INIT rc_normalize(struct rc *rc)
134{
135 if (rc->range < (1 << RC_TOP_BITS))
136 rc_do_normalize(rc);
137}
138
139/* Called 9 times */
140/* Why rc_is_bit_0_helper exists?
141 *Because we want to always expose (rc->code < rc->bound) to optimizer
142 */
143static inline uint32_t INIT rc_is_bit_0_helper(struct rc *rc, uint16_t *p)
144{
145 rc_normalize(rc);
146 rc->bound = *p * (rc->range >> RC_MODEL_TOTAL_BITS);
147 return rc->bound;
148}
149static inline int INIT rc_is_bit_0(struct rc *rc, uint16_t *p)
150{
151 uint32_t t = rc_is_bit_0_helper(rc, p);
152 return rc->code < t;
153}
154
155/* Called ~10 times, but very small, thus inlined */
156static inline void INIT rc_update_bit_0(struct rc *rc, uint16_t *p)
157{
158 rc->range = rc->bound;
159 *p += ((1 << RC_MODEL_TOTAL_BITS) - *p) >> RC_MOVE_BITS;
160}
161static inline void rc_update_bit_1(struct rc *rc, uint16_t *p)
162{
163 rc->range -= rc->bound;
164 rc->code -= rc->bound;
165 *p -= *p >> RC_MOVE_BITS;
166}
167
168/* Called 4 times in unlzma loop */
169static int INIT rc_get_bit(struct rc *rc, uint16_t *p, int *symbol)
170{
171 if (rc_is_bit_0(rc, p)) {
172 rc_update_bit_0(rc, p);
173 *symbol *= 2;
174 return 0;
175 } else {
176 rc_update_bit_1(rc, p);
177 *symbol = *symbol * 2 + 1;
178 return 1;
179 }
180}
181
182/* Called once */
183static inline int INIT rc_direct_bit(struct rc *rc)
184{
185 rc_normalize(rc);
186 rc->range >>= 1;
187 if (rc->code >= rc->range) {
188 rc->code -= rc->range;
189 return 1;
190 }
191 return 0;
192}
193
194/* Called twice */
195static inline void INIT
196rc_bit_tree_decode(struct rc *rc, uint16_t *p, int num_levels, int *symbol)
197{
198 int i = num_levels;
199
200 *symbol = 1;
201 while (i--)
202 rc_get_bit(rc, p + *symbol, symbol);
203 *symbol -= 1 << num_levels;
204}
205
206
207/*
208 * Small lzma deflate implementation.
209 * Copyright (C) 2006 Aurelien Jacobs < aurel@gnuage.org >
210 *
211 * Based on LzmaDecode.c from the LZMA SDK 4.22 (http://www.7-zip.org/)
212 * Copyright (C) 1999-2005 Igor Pavlov
213 */
214
215
216struct lzma_header {
217 uint8_t pos;
218 uint32_t dict_size;
219 uint64_t dst_size;
220} __attribute__ ((packed)) ;
221
222
223#define LZMA_BASE_SIZE 1846
224#define LZMA_LIT_SIZE 768
225
226#define LZMA_NUM_POS_BITS_MAX 4
227
228#define LZMA_LEN_NUM_LOW_BITS 3
229#define LZMA_LEN_NUM_MID_BITS 3
230#define LZMA_LEN_NUM_HIGH_BITS 8
231
232#define LZMA_LEN_CHOICE 0
233#define LZMA_LEN_CHOICE_2 (LZMA_LEN_CHOICE + 1)
234#define LZMA_LEN_LOW (LZMA_LEN_CHOICE_2 + 1)
235#define LZMA_LEN_MID (LZMA_LEN_LOW \
236 + (1 << (LZMA_NUM_POS_BITS_MAX + LZMA_LEN_NUM_LOW_BITS)))
237#define LZMA_LEN_HIGH (LZMA_LEN_MID \
238 +(1 << (LZMA_NUM_POS_BITS_MAX + LZMA_LEN_NUM_MID_BITS)))
239#define LZMA_NUM_LEN_PROBS (LZMA_LEN_HIGH + (1 << LZMA_LEN_NUM_HIGH_BITS))
240
241#define LZMA_NUM_STATES 12
242#define LZMA_NUM_LIT_STATES 7
243
244#define LZMA_START_POS_MODEL_INDEX 4
245#define LZMA_END_POS_MODEL_INDEX 14
246#define LZMA_NUM_FULL_DISTANCES (1 << (LZMA_END_POS_MODEL_INDEX >> 1))
247
248#define LZMA_NUM_POS_SLOT_BITS 6
249#define LZMA_NUM_LEN_TO_POS_STATES 4
250
251#define LZMA_NUM_ALIGN_BITS 4
252
253#define LZMA_MATCH_MIN_LEN 2
254
255#define LZMA_IS_MATCH 0
256#define LZMA_IS_REP (LZMA_IS_MATCH + (LZMA_NUM_STATES << LZMA_NUM_POS_BITS_MAX))
257#define LZMA_IS_REP_G0 (LZMA_IS_REP + LZMA_NUM_STATES)
258#define LZMA_IS_REP_G1 (LZMA_IS_REP_G0 + LZMA_NUM_STATES)
259#define LZMA_IS_REP_G2 (LZMA_IS_REP_G1 + LZMA_NUM_STATES)
260#define LZMA_IS_REP_0_LONG (LZMA_IS_REP_G2 + LZMA_NUM_STATES)
261#define LZMA_POS_SLOT (LZMA_IS_REP_0_LONG \
262 + (LZMA_NUM_STATES << LZMA_NUM_POS_BITS_MAX))
263#define LZMA_SPEC_POS (LZMA_POS_SLOT \
264 +(LZMA_NUM_LEN_TO_POS_STATES << LZMA_NUM_POS_SLOT_BITS))
265#define LZMA_ALIGN (LZMA_SPEC_POS \
266 + LZMA_NUM_FULL_DISTANCES - LZMA_END_POS_MODEL_INDEX)
267#define LZMA_LEN_CODER (LZMA_ALIGN + (1 << LZMA_NUM_ALIGN_BITS))
268#define LZMA_REP_LEN_CODER (LZMA_LEN_CODER + LZMA_NUM_LEN_PROBS)
269#define LZMA_LITERAL (LZMA_REP_LEN_CODER + LZMA_NUM_LEN_PROBS)
270
271
272struct writer {
273 uint8_t *buffer;
274 uint8_t previous_byte;
275 size_t buffer_pos;
276 int bufsize;
277 size_t global_pos;
278 int(*flush)(void*, unsigned int);
279 struct lzma_header *header;
280};
281
282struct cstate {
283 int state;
284 uint32_t rep0, rep1, rep2, rep3;
285};
286
287static inline size_t INIT get_pos(struct writer *wr)
288{
289 return
290 wr->global_pos + wr->buffer_pos;
291}
292
293static inline uint8_t INIT peek_old_byte(struct writer *wr,
294 uint32_t offs)
295{
296 if (!wr->flush) {
297 int32_t pos;
298 while (offs > wr->header->dict_size)
299 offs -= wr->header->dict_size;
300 pos = wr->buffer_pos - offs;
301 return wr->buffer[pos];
302 } else {
303 uint32_t pos = wr->buffer_pos - offs;
304 while (pos >= wr->header->dict_size)
305 pos += wr->header->dict_size;
306 return wr->buffer[pos];
307 }
308
309}
310
311static inline void INIT write_byte(struct writer *wr, uint8_t byte)
312{
313 wr->buffer[wr->buffer_pos++] = wr->previous_byte = byte;
314 if (wr->flush && wr->buffer_pos == wr->header->dict_size) {
315 wr->buffer_pos = 0;
316 wr->global_pos += wr->header->dict_size;
317 wr->flush((char *)wr->buffer, wr->header->dict_size);
318 }
319}
320
321
322static inline void INIT copy_byte(struct writer *wr, uint32_t offs)
323{
324 write_byte(wr, peek_old_byte(wr, offs));
325}
326
327static inline void INIT copy_bytes(struct writer *wr,
328 uint32_t rep0, int len)
329{
330 do {
331 copy_byte(wr, rep0);
332 len--;
333 } while (len != 0 && wr->buffer_pos < wr->header->dst_size);
334}
335
336static inline void INIT process_bit0(struct writer *wr, struct rc *rc,
337 struct cstate *cst, uint16_t *p,
338 int pos_state, uint16_t *prob,
339 int lc, uint32_t literal_pos_mask) {
340 int mi = 1;
341 rc_update_bit_0(rc, prob);
342 prob = (p + LZMA_LITERAL +
343 (LZMA_LIT_SIZE
344 * (((get_pos(wr) & literal_pos_mask) << lc)
345 + (wr->previous_byte >> (8 - lc))))
346 );
347
348 if (cst->state >= LZMA_NUM_LIT_STATES) {
349 int match_byte = peek_old_byte(wr, cst->rep0);
350 do {
351 int bit;
352 uint16_t *prob_lit;
353
354 match_byte <<= 1;
355 bit = match_byte & 0x100;
356 prob_lit = prob + 0x100 + bit + mi;
357 if (rc_get_bit(rc, prob_lit, &mi)) {
358 if (!bit)
359 break;
360 } else {
361 if (bit)
362 break;
363 }
364 } while (mi < 0x100);
365 }
366 while (mi < 0x100) {
367 uint16_t *prob_lit = prob + mi;
368 rc_get_bit(rc, prob_lit, &mi);
369 }
370 write_byte(wr, mi);
371 if (cst->state < 4)
372 cst->state = 0;
373 else if (cst->state < 10)
374 cst->state -= 3;
375 else
376 cst->state -= 6;
377}
378
379static inline void INIT process_bit1(struct writer *wr, struct rc *rc,
380 struct cstate *cst, uint16_t *p,
381 int pos_state, uint16_t *prob) {
382 int offset;
383 uint16_t *prob_len;
384 int num_bits;
385 int len;
386
387 rc_update_bit_1(rc, prob);
388 prob = p + LZMA_IS_REP + cst->state;
389 if (rc_is_bit_0(rc, prob)) {
390 rc_update_bit_0(rc, prob);
391 cst->rep3 = cst->rep2;
392 cst->rep2 = cst->rep1;
393 cst->rep1 = cst->rep0;
394 cst->state = cst->state < LZMA_NUM_LIT_STATES ? 0 : 3;
395 prob = p + LZMA_LEN_CODER;
396 } else {
397 rc_update_bit_1(rc, prob);
398 prob = p + LZMA_IS_REP_G0 + cst->state;
399 if (rc_is_bit_0(rc, prob)) {
400 rc_update_bit_0(rc, prob);
401 prob = (p + LZMA_IS_REP_0_LONG
402 + (cst->state <<
403 LZMA_NUM_POS_BITS_MAX) +
404 pos_state);
405 if (rc_is_bit_0(rc, prob)) {
406 rc_update_bit_0(rc, prob);
407
408 cst->state = cst->state < LZMA_NUM_LIT_STATES ?
409 9 : 11;
410 copy_byte(wr, cst->rep0);
411 return;
412 } else {
413 rc_update_bit_1(rc, prob);
414 }
415 } else {
416 uint32_t distance;
417
418 rc_update_bit_1(rc, prob);
419 prob = p + LZMA_IS_REP_G1 + cst->state;
420 if (rc_is_bit_0(rc, prob)) {
421 rc_update_bit_0(rc, prob);
422 distance = cst->rep1;
423 } else {
424 rc_update_bit_1(rc, prob);
425 prob = p + LZMA_IS_REP_G2 + cst->state;
426 if (rc_is_bit_0(rc, prob)) {
427 rc_update_bit_0(rc, prob);
428 distance = cst->rep2;
429 } else {
430 rc_update_bit_1(rc, prob);
431 distance = cst->rep3;
432 cst->rep3 = cst->rep2;
433 }
434 cst->rep2 = cst->rep1;
435 }
436 cst->rep1 = cst->rep0;
437 cst->rep0 = distance;
438 }
439 cst->state = cst->state < LZMA_NUM_LIT_STATES ? 8 : 11;
440 prob = p + LZMA_REP_LEN_CODER;
441 }
442
443 prob_len = prob + LZMA_LEN_CHOICE;
444 if (rc_is_bit_0(rc, prob_len)) {
445 rc_update_bit_0(rc, prob_len);
446 prob_len = (prob + LZMA_LEN_LOW
447 + (pos_state <<
448 LZMA_LEN_NUM_LOW_BITS));
449 offset = 0;
450 num_bits = LZMA_LEN_NUM_LOW_BITS;
451 } else {
452 rc_update_bit_1(rc, prob_len);
453 prob_len = prob + LZMA_LEN_CHOICE_2;
454 if (rc_is_bit_0(rc, prob_len)) {
455 rc_update_bit_0(rc, prob_len);
456 prob_len = (prob + LZMA_LEN_MID
457 + (pos_state <<
458 LZMA_LEN_NUM_MID_BITS));
459 offset = 1 << LZMA_LEN_NUM_LOW_BITS;
460 num_bits = LZMA_LEN_NUM_MID_BITS;
461 } else {
462 rc_update_bit_1(rc, prob_len);
463 prob_len = prob + LZMA_LEN_HIGH;
464 offset = ((1 << LZMA_LEN_NUM_LOW_BITS)
465 + (1 << LZMA_LEN_NUM_MID_BITS));
466 num_bits = LZMA_LEN_NUM_HIGH_BITS;
467 }
468 }
469
470 rc_bit_tree_decode(rc, prob_len, num_bits, &len);
471 len += offset;
472
473 if (cst->state < 4) {
474 int pos_slot;
475
476 cst->state += LZMA_NUM_LIT_STATES;
477 prob =
478 p + LZMA_POS_SLOT +
479 ((len <
480 LZMA_NUM_LEN_TO_POS_STATES ? len :
481 LZMA_NUM_LEN_TO_POS_STATES - 1)
482 << LZMA_NUM_POS_SLOT_BITS);
483 rc_bit_tree_decode(rc, prob,
484 LZMA_NUM_POS_SLOT_BITS,
485 &pos_slot);
486 if (pos_slot >= LZMA_START_POS_MODEL_INDEX) {
487 int i, mi;
488 num_bits = (pos_slot >> 1) - 1;
489 cst->rep0 = 2 | (pos_slot & 1);
490 if (pos_slot < LZMA_END_POS_MODEL_INDEX) {
491 cst->rep0 <<= num_bits;
492 prob = p + LZMA_SPEC_POS +
493 cst->rep0 - pos_slot - 1;
494 } else {
495 num_bits -= LZMA_NUM_ALIGN_BITS;
496 while (num_bits--)
497 cst->rep0 = (cst->rep0 << 1) |
498 rc_direct_bit(rc);
499 prob = p + LZMA_ALIGN;
500 cst->rep0 <<= LZMA_NUM_ALIGN_BITS;
501 num_bits = LZMA_NUM_ALIGN_BITS;
502 }
503 i = 1;
504 mi = 1;
505 while (num_bits--) {
506 if (rc_get_bit(rc, prob + mi, &mi))
507 cst->rep0 |= i;
508 i <<= 1;
509 }
510 } else
511 cst->rep0 = pos_slot;
512 if (++(cst->rep0) == 0)
513 return;
514 }
515
516 len += LZMA_MATCH_MIN_LEN;
517
518 copy_bytes(wr, cst->rep0, len);
519}
520
521
522
523STATIC inline int INIT unlzma(unsigned char *buf, int in_len,
524 int(*fill)(void*, unsigned int),
525 int(*flush)(void*, unsigned int),
526 unsigned char *output,
527 int *posp,
528 void(*error_fn)(char *x)
529 )
530{
531 struct lzma_header header;
532 int lc, pb, lp;
533 uint32_t pos_state_mask;
534 uint32_t literal_pos_mask;
535 uint16_t *p;
536 int num_probs;
537 struct rc rc;
538 int i, mi;
539 struct writer wr;
540 struct cstate cst;
541 unsigned char *inbuf;
542 int ret = -1;
543
544 set_error_fn(error_fn);
545 if (!flush)
546 in_len -= 4; /* Uncompressed size hack active in pre-boot
547 environment */
548 if (buf)
549 inbuf = buf;
550 else
551 inbuf = malloc(LZMA_IOBUF_SIZE);
552 if (!inbuf) {
553 error("Could not allocate input bufer");
554 goto exit_0;
555 }
556
557 cst.state = 0;
558 cst.rep0 = cst.rep1 = cst.rep2 = cst.rep3 = 1;
559
560 wr.header = &header;
561 wr.flush = flush;
562 wr.global_pos = 0;
563 wr.previous_byte = 0;
564 wr.buffer_pos = 0;
565
566 rc_init(&rc, fill, inbuf, in_len);
567
568 for (i = 0; i < sizeof(header); i++) {
569 if (rc.ptr >= rc.buffer_end)
570 rc_read(&rc);
571 ((unsigned char *)&header)[i] = *rc.ptr++;
572 }
573
574 if (header.pos >= (9 * 5 * 5))
575 error("bad header");
576
577 mi = 0;
578 lc = header.pos;
579 while (lc >= 9) {
580 mi++;
581 lc -= 9;
582 }
583 pb = 0;
584 lp = mi;
585 while (lp >= 5) {
586 pb++;
587 lp -= 5;
588 }
589 pos_state_mask = (1 << pb) - 1;
590 literal_pos_mask = (1 << lp) - 1;
591
592 ENDIAN_CONVERT(header.dict_size);
593 ENDIAN_CONVERT(header.dst_size);
594
595 if (header.dict_size == 0)
596 header.dict_size = 1;
597
598 if (output)
599 wr.buffer = output;
600 else {
601 wr.bufsize = MIN(header.dst_size, header.dict_size);
602 wr.buffer = large_malloc(wr.bufsize);
603 }
604 if (wr.buffer == NULL)
605 goto exit_1;
606
607 num_probs = LZMA_BASE_SIZE + (LZMA_LIT_SIZE << (lc + lp));
608 p = (uint16_t *) large_malloc(num_probs * sizeof(*p));
609 if (p == 0)
610 goto exit_2;
611 num_probs = LZMA_LITERAL + (LZMA_LIT_SIZE << (lc + lp));
612 for (i = 0; i < num_probs; i++)
613 p[i] = (1 << RC_MODEL_TOTAL_BITS) >> 1;
614
615 rc_init_code(&rc);
616
617 while (get_pos(&wr) < header.dst_size) {
618 int pos_state = get_pos(&wr) & pos_state_mask;
619 uint16_t *prob = p + LZMA_IS_MATCH +
620 (cst.state << LZMA_NUM_POS_BITS_MAX) + pos_state;
621 if (rc_is_bit_0(&rc, prob))
622 process_bit0(&wr, &rc, &cst, p, pos_state, prob,
623 lc, literal_pos_mask);
624 else {
625 process_bit1(&wr, &rc, &cst, p, pos_state, prob);
626 if (cst.rep0 == 0)
627 break;
628 }
629 }
630
631 if (posp)
632 *posp = rc.ptr-rc.buffer;
633 if (wr.flush)
634 wr.flush(wr.buffer, wr.buffer_pos);
635 ret = 0;
636 large_free(p);
637exit_2:
638 if (!output)
639 large_free(wr.buffer);
640exit_1:
641 if (!buf)
642 free(inbuf);
643exit_0:
644 return ret;
645}
646
647#define decompress unlzma
diff --git a/lib/dynamic_debug.c b/lib/dynamic_debug.c
new file mode 100644
index 000000000000..833139ce1e22
--- /dev/null
+++ b/lib/dynamic_debug.c
@@ -0,0 +1,769 @@
1/*
2 * lib/dynamic_debug.c
3 *
4 * make pr_debug()/dev_dbg() calls runtime configurable based upon their
5 * source module.
6 *
7 * Copyright (C) 2008 Jason Baron <jbaron@redhat.com>
8 * By Greg Banks <gnb@melbourne.sgi.com>
9 * Copyright (c) 2008 Silicon Graphics Inc. All Rights Reserved.
10 */
11
12#include <linux/kernel.h>
13#include <linux/module.h>
14#include <linux/moduleparam.h>
15#include <linux/kallsyms.h>
16#include <linux/version.h>
17#include <linux/types.h>
18#include <linux/mutex.h>
19#include <linux/proc_fs.h>
20#include <linux/seq_file.h>
21#include <linux/list.h>
22#include <linux/sysctl.h>
23#include <linux/ctype.h>
24#include <linux/uaccess.h>
25#include <linux/dynamic_debug.h>
26#include <linux/debugfs.h>
27
28extern struct _ddebug __start___verbose[];
29extern struct _ddebug __stop___verbose[];
30
31/* dynamic_debug_enabled, and dynamic_debug_enabled2 are bitmasks in which
32 * bit n is set to 1 if any modname hashes into the bucket n, 0 otherwise. They
33 * use independent hash functions, to reduce the chance of false positives.
34 */
35long long dynamic_debug_enabled;
36EXPORT_SYMBOL_GPL(dynamic_debug_enabled);
37long long dynamic_debug_enabled2;
38EXPORT_SYMBOL_GPL(dynamic_debug_enabled2);
39
40struct ddebug_table {
41 struct list_head link;
42 char *mod_name;
43 unsigned int num_ddebugs;
44 unsigned int num_enabled;
45 struct _ddebug *ddebugs;
46};
47
48struct ddebug_query {
49 const char *filename;
50 const char *module;
51 const char *function;
52 const char *format;
53 unsigned int first_lineno, last_lineno;
54};
55
56struct ddebug_iter {
57 struct ddebug_table *table;
58 unsigned int idx;
59};
60
61static DEFINE_MUTEX(ddebug_lock);
62static LIST_HEAD(ddebug_tables);
63static int verbose = 0;
64
65/* Return the last part of a pathname */
66static inline const char *basename(const char *path)
67{
68 const char *tail = strrchr(path, '/');
69 return tail ? tail+1 : path;
70}
71
72/* format a string into buf[] which describes the _ddebug's flags */
73static char *ddebug_describe_flags(struct _ddebug *dp, char *buf,
74 size_t maxlen)
75{
76 char *p = buf;
77
78 BUG_ON(maxlen < 4);
79 if (dp->flags & _DPRINTK_FLAGS_PRINT)
80 *p++ = 'p';
81 if (p == buf)
82 *p++ = '-';
83 *p = '\0';
84
85 return buf;
86}
87
88/*
89 * must be called with ddebug_lock held
90 */
91
92static int disabled_hash(char hash, bool first_table)
93{
94 struct ddebug_table *dt;
95 char table_hash_value;
96
97 list_for_each_entry(dt, &ddebug_tables, link) {
98 if (first_table)
99 table_hash_value = dt->ddebugs->primary_hash;
100 else
101 table_hash_value = dt->ddebugs->secondary_hash;
102 if (dt->num_enabled && (hash == table_hash_value))
103 return 0;
104 }
105 return 1;
106}
107
108/*
109 * Search the tables for _ddebug's which match the given
110 * `query' and apply the `flags' and `mask' to them. Tells
111 * the user which ddebug's were changed, or whether none
112 * were matched.
113 */
114static void ddebug_change(const struct ddebug_query *query,
115 unsigned int flags, unsigned int mask)
116{
117 int i;
118 struct ddebug_table *dt;
119 unsigned int newflags;
120 unsigned int nfound = 0;
121 char flagbuf[8];
122
123 /* search for matching ddebugs */
124 mutex_lock(&ddebug_lock);
125 list_for_each_entry(dt, &ddebug_tables, link) {
126
127 /* match against the module name */
128 if (query->module != NULL &&
129 strcmp(query->module, dt->mod_name))
130 continue;
131
132 for (i = 0 ; i < dt->num_ddebugs ; i++) {
133 struct _ddebug *dp = &dt->ddebugs[i];
134
135 /* match against the source filename */
136 if (query->filename != NULL &&
137 strcmp(query->filename, dp->filename) &&
138 strcmp(query->filename, basename(dp->filename)))
139 continue;
140
141 /* match against the function */
142 if (query->function != NULL &&
143 strcmp(query->function, dp->function))
144 continue;
145
146 /* match against the format */
147 if (query->format != NULL &&
148 strstr(dp->format, query->format) == NULL)
149 continue;
150
151 /* match against the line number range */
152 if (query->first_lineno &&
153 dp->lineno < query->first_lineno)
154 continue;
155 if (query->last_lineno &&
156 dp->lineno > query->last_lineno)
157 continue;
158
159 nfound++;
160
161 newflags = (dp->flags & mask) | flags;
162 if (newflags == dp->flags)
163 continue;
164
165 if (!newflags)
166 dt->num_enabled--;
167 else if (!dp-flags)
168 dt->num_enabled++;
169 dp->flags = newflags;
170 if (newflags) {
171 dynamic_debug_enabled |=
172 (1LL << dp->primary_hash);
173 dynamic_debug_enabled2 |=
174 (1LL << dp->secondary_hash);
175 } else {
176 if (disabled_hash(dp->primary_hash, true))
177 dynamic_debug_enabled &=
178 ~(1LL << dp->primary_hash);
179 if (disabled_hash(dp->secondary_hash, false))
180 dynamic_debug_enabled2 &=
181 ~(1LL << dp->secondary_hash);
182 }
183 if (verbose)
184 printk(KERN_INFO
185 "ddebug: changed %s:%d [%s]%s %s\n",
186 dp->filename, dp->lineno,
187 dt->mod_name, dp->function,
188 ddebug_describe_flags(dp, flagbuf,
189 sizeof(flagbuf)));
190 }
191 }
192 mutex_unlock(&ddebug_lock);
193
194 if (!nfound && verbose)
195 printk(KERN_INFO "ddebug: no matches for query\n");
196}
197
198/*
199 * Split the buffer `buf' into space-separated words.
200 * Handles simple " and ' quoting, i.e. without nested,
201 * embedded or escaped \". Return the number of words
202 * or <0 on error.
203 */
204static int ddebug_tokenize(char *buf, char *words[], int maxwords)
205{
206 int nwords = 0;
207
208 while (*buf) {
209 char *end;
210
211 /* Skip leading whitespace */
212 while (*buf && isspace(*buf))
213 buf++;
214 if (!*buf)
215 break; /* oh, it was trailing whitespace */
216
217 /* Run `end' over a word, either whitespace separated or quoted */
218 if (*buf == '"' || *buf == '\'') {
219 int quote = *buf++;
220 for (end = buf ; *end && *end != quote ; end++)
221 ;
222 if (!*end)
223 return -EINVAL; /* unclosed quote */
224 } else {
225 for (end = buf ; *end && !isspace(*end) ; end++)
226 ;
227 BUG_ON(end == buf);
228 }
229 /* Here `buf' is the start of the word, `end' is one past the end */
230
231 if (nwords == maxwords)
232 return -EINVAL; /* ran out of words[] before bytes */
233 if (*end)
234 *end++ = '\0'; /* terminate the word */
235 words[nwords++] = buf;
236 buf = end;
237 }
238
239 if (verbose) {
240 int i;
241 printk(KERN_INFO "%s: split into words:", __func__);
242 for (i = 0 ; i < nwords ; i++)
243 printk(" \"%s\"", words[i]);
244 printk("\n");
245 }
246
247 return nwords;
248}
249
250/*
251 * Parse a single line number. Note that the empty string ""
252 * is treated as a special case and converted to zero, which
253 * is later treated as a "don't care" value.
254 */
255static inline int parse_lineno(const char *str, unsigned int *val)
256{
257 char *end = NULL;
258 BUG_ON(str == NULL);
259 if (*str == '\0') {
260 *val = 0;
261 return 0;
262 }
263 *val = simple_strtoul(str, &end, 10);
264 return end == NULL || end == str || *end != '\0' ? -EINVAL : 0;
265}
266
267/*
268 * Undo octal escaping in a string, inplace. This is useful to
269 * allow the user to express a query which matches a format
270 * containing embedded spaces.
271 */
272#define isodigit(c) ((c) >= '0' && (c) <= '7')
273static char *unescape(char *str)
274{
275 char *in = str;
276 char *out = str;
277
278 while (*in) {
279 if (*in == '\\') {
280 if (in[1] == '\\') {
281 *out++ = '\\';
282 in += 2;
283 continue;
284 } else if (in[1] == 't') {
285 *out++ = '\t';
286 in += 2;
287 continue;
288 } else if (in[1] == 'n') {
289 *out++ = '\n';
290 in += 2;
291 continue;
292 } else if (isodigit(in[1]) &&
293 isodigit(in[2]) &&
294 isodigit(in[3])) {
295 *out++ = ((in[1] - '0')<<6) |
296 ((in[2] - '0')<<3) |
297 (in[3] - '0');
298 in += 4;
299 continue;
300 }
301 }
302 *out++ = *in++;
303 }
304 *out = '\0';
305
306 return str;
307}
308
309/*
310 * Parse words[] as a ddebug query specification, which is a series
311 * of (keyword, value) pairs chosen from these possibilities:
312 *
313 * func <function-name>
314 * file <full-pathname>
315 * file <base-filename>
316 * module <module-name>
317 * format <escaped-string-to-find-in-format>
318 * line <lineno>
319 * line <first-lineno>-<last-lineno> // where either may be empty
320 */
321static int ddebug_parse_query(char *words[], int nwords,
322 struct ddebug_query *query)
323{
324 unsigned int i;
325
326 /* check we have an even number of words */
327 if (nwords % 2 != 0)
328 return -EINVAL;
329 memset(query, 0, sizeof(*query));
330
331 for (i = 0 ; i < nwords ; i += 2) {
332 if (!strcmp(words[i], "func"))
333 query->function = words[i+1];
334 else if (!strcmp(words[i], "file"))
335 query->filename = words[i+1];
336 else if (!strcmp(words[i], "module"))
337 query->module = words[i+1];
338 else if (!strcmp(words[i], "format"))
339 query->format = unescape(words[i+1]);
340 else if (!strcmp(words[i], "line")) {
341 char *first = words[i+1];
342 char *last = strchr(first, '-');
343 if (last)
344 *last++ = '\0';
345 if (parse_lineno(first, &query->first_lineno) < 0)
346 return -EINVAL;
347 if (last != NULL) {
348 /* range <first>-<last> */
349 if (parse_lineno(last, &query->last_lineno) < 0)
350 return -EINVAL;
351 } else {
352 query->last_lineno = query->first_lineno;
353 }
354 } else {
355 if (verbose)
356 printk(KERN_ERR "%s: unknown keyword \"%s\"\n",
357 __func__, words[i]);
358 return -EINVAL;
359 }
360 }
361
362 if (verbose)
363 printk(KERN_INFO "%s: q->function=\"%s\" q->filename=\"%s\" "
364 "q->module=\"%s\" q->format=\"%s\" q->lineno=%u-%u\n",
365 __func__, query->function, query->filename,
366 query->module, query->format, query->first_lineno,
367 query->last_lineno);
368
369 return 0;
370}
371
372/*
373 * Parse `str' as a flags specification, format [-+=][p]+.
374 * Sets up *maskp and *flagsp to be used when changing the
375 * flags fields of matched _ddebug's. Returns 0 on success
376 * or <0 on error.
377 */
378static int ddebug_parse_flags(const char *str, unsigned int *flagsp,
379 unsigned int *maskp)
380{
381 unsigned flags = 0;
382 int op = '=';
383
384 switch (*str) {
385 case '+':
386 case '-':
387 case '=':
388 op = *str++;
389 break;
390 default:
391 return -EINVAL;
392 }
393 if (verbose)
394 printk(KERN_INFO "%s: op='%c'\n", __func__, op);
395
396 for ( ; *str ; ++str) {
397 switch (*str) {
398 case 'p':
399 flags |= _DPRINTK_FLAGS_PRINT;
400 break;
401 default:
402 return -EINVAL;
403 }
404 }
405 if (flags == 0)
406 return -EINVAL;
407 if (verbose)
408 printk(KERN_INFO "%s: flags=0x%x\n", __func__, flags);
409
410 /* calculate final *flagsp, *maskp according to mask and op */
411 switch (op) {
412 case '=':
413 *maskp = 0;
414 *flagsp = flags;
415 break;
416 case '+':
417 *maskp = ~0U;
418 *flagsp = flags;
419 break;
420 case '-':
421 *maskp = ~flags;
422 *flagsp = 0;
423 break;
424 }
425 if (verbose)
426 printk(KERN_INFO "%s: *flagsp=0x%x *maskp=0x%x\n",
427 __func__, *flagsp, *maskp);
428 return 0;
429}
430
431/*
432 * File_ops->write method for <debugfs>/dynamic_debug/conrol. Gathers the
433 * command text from userspace, parses and executes it.
434 */
435static ssize_t ddebug_proc_write(struct file *file, const char __user *ubuf,
436 size_t len, loff_t *offp)
437{
438 unsigned int flags = 0, mask = 0;
439 struct ddebug_query query;
440#define MAXWORDS 9
441 int nwords;
442 char *words[MAXWORDS];
443 char tmpbuf[256];
444
445 if (len == 0)
446 return 0;
447 /* we don't check *offp -- multiple writes() are allowed */
448 if (len > sizeof(tmpbuf)-1)
449 return -E2BIG;
450 if (copy_from_user(tmpbuf, ubuf, len))
451 return -EFAULT;
452 tmpbuf[len] = '\0';
453 if (verbose)
454 printk(KERN_INFO "%s: read %d bytes from userspace\n",
455 __func__, (int)len);
456
457 nwords = ddebug_tokenize(tmpbuf, words, MAXWORDS);
458 if (nwords < 0)
459 return -EINVAL;
460 if (ddebug_parse_query(words, nwords-1, &query))
461 return -EINVAL;
462 if (ddebug_parse_flags(words[nwords-1], &flags, &mask))
463 return -EINVAL;
464
465 /* actually go and implement the change */
466 ddebug_change(&query, flags, mask);
467
468 *offp += len;
469 return len;
470}
471
472/*
473 * Set the iterator to point to the first _ddebug object
474 * and return a pointer to that first object. Returns
475 * NULL if there are no _ddebugs at all.
476 */
477static struct _ddebug *ddebug_iter_first(struct ddebug_iter *iter)
478{
479 if (list_empty(&ddebug_tables)) {
480 iter->table = NULL;
481 iter->idx = 0;
482 return NULL;
483 }
484 iter->table = list_entry(ddebug_tables.next,
485 struct ddebug_table, link);
486 iter->idx = 0;
487 return &iter->table->ddebugs[iter->idx];
488}
489
490/*
491 * Advance the iterator to point to the next _ddebug
492 * object from the one the iterator currently points at,
493 * and returns a pointer to the new _ddebug. Returns
494 * NULL if the iterator has seen all the _ddebugs.
495 */
496static struct _ddebug *ddebug_iter_next(struct ddebug_iter *iter)
497{
498 if (iter->table == NULL)
499 return NULL;
500 if (++iter->idx == iter->table->num_ddebugs) {
501 /* iterate to next table */
502 iter->idx = 0;
503 if (list_is_last(&iter->table->link, &ddebug_tables)) {
504 iter->table = NULL;
505 return NULL;
506 }
507 iter->table = list_entry(iter->table->link.next,
508 struct ddebug_table, link);
509 }
510 return &iter->table->ddebugs[iter->idx];
511}
512
513/*
514 * Seq_ops start method. Called at the start of every
515 * read() call from userspace. Takes the ddebug_lock and
516 * seeks the seq_file's iterator to the given position.
517 */
518static void *ddebug_proc_start(struct seq_file *m, loff_t *pos)
519{
520 struct ddebug_iter *iter = m->private;
521 struct _ddebug *dp;
522 int n = *pos;
523
524 if (verbose)
525 printk(KERN_INFO "%s: called m=%p *pos=%lld\n",
526 __func__, m, (unsigned long long)*pos);
527
528 mutex_lock(&ddebug_lock);
529
530 if (!n)
531 return SEQ_START_TOKEN;
532 if (n < 0)
533 return NULL;
534 dp = ddebug_iter_first(iter);
535 while (dp != NULL && --n > 0)
536 dp = ddebug_iter_next(iter);
537 return dp;
538}
539
540/*
541 * Seq_ops next method. Called several times within a read()
542 * call from userspace, with ddebug_lock held. Walks to the
543 * next _ddebug object with a special case for the header line.
544 */
545static void *ddebug_proc_next(struct seq_file *m, void *p, loff_t *pos)
546{
547 struct ddebug_iter *iter = m->private;
548 struct _ddebug *dp;
549
550 if (verbose)
551 printk(KERN_INFO "%s: called m=%p p=%p *pos=%lld\n",
552 __func__, m, p, (unsigned long long)*pos);
553
554 if (p == SEQ_START_TOKEN)
555 dp = ddebug_iter_first(iter);
556 else
557 dp = ddebug_iter_next(iter);
558 ++*pos;
559 return dp;
560}
561
562/*
563 * Seq_ops show method. Called several times within a read()
564 * call from userspace, with ddebug_lock held. Formats the
565 * current _ddebug as a single human-readable line, with a
566 * special case for the header line.
567 */
568static int ddebug_proc_show(struct seq_file *m, void *p)
569{
570 struct ddebug_iter *iter = m->private;
571 struct _ddebug *dp = p;
572 char flagsbuf[8];
573
574 if (verbose)
575 printk(KERN_INFO "%s: called m=%p p=%p\n",
576 __func__, m, p);
577
578 if (p == SEQ_START_TOKEN) {
579 seq_puts(m,
580 "# filename:lineno [module]function flags format\n");
581 return 0;
582 }
583
584 seq_printf(m, "%s:%u [%s]%s %s \"",
585 dp->filename, dp->lineno,
586 iter->table->mod_name, dp->function,
587 ddebug_describe_flags(dp, flagsbuf, sizeof(flagsbuf)));
588 seq_escape(m, dp->format, "\t\r\n\"");
589 seq_puts(m, "\"\n");
590
591 return 0;
592}
593
594/*
595 * Seq_ops stop method. Called at the end of each read()
596 * call from userspace. Drops ddebug_lock.
597 */
598static void ddebug_proc_stop(struct seq_file *m, void *p)
599{
600 if (verbose)
601 printk(KERN_INFO "%s: called m=%p p=%p\n",
602 __func__, m, p);
603 mutex_unlock(&ddebug_lock);
604}
605
606static const struct seq_operations ddebug_proc_seqops = {
607 .start = ddebug_proc_start,
608 .next = ddebug_proc_next,
609 .show = ddebug_proc_show,
610 .stop = ddebug_proc_stop
611};
612
613/*
614 * File_ops->open method for <debugfs>/dynamic_debug/control. Does the seq_file
615 * setup dance, and also creates an iterator to walk the _ddebugs.
616 * Note that we create a seq_file always, even for O_WRONLY files
617 * where it's not needed, as doing so simplifies the ->release method.
618 */
619static int ddebug_proc_open(struct inode *inode, struct file *file)
620{
621 struct ddebug_iter *iter;
622 int err;
623
624 if (verbose)
625 printk(KERN_INFO "%s: called\n", __func__);
626
627 iter = kzalloc(sizeof(*iter), GFP_KERNEL);
628 if (iter == NULL)
629 return -ENOMEM;
630
631 err = seq_open(file, &ddebug_proc_seqops);
632 if (err) {
633 kfree(iter);
634 return err;
635 }
636 ((struct seq_file *) file->private_data)->private = iter;
637 return 0;
638}
639
640static const struct file_operations ddebug_proc_fops = {
641 .owner = THIS_MODULE,
642 .open = ddebug_proc_open,
643 .read = seq_read,
644 .llseek = seq_lseek,
645 .release = seq_release_private,
646 .write = ddebug_proc_write
647};
648
649/*
650 * Allocate a new ddebug_table for the given module
651 * and add it to the global list.
652 */
653int ddebug_add_module(struct _ddebug *tab, unsigned int n,
654 const char *name)
655{
656 struct ddebug_table *dt;
657 char *new_name;
658
659 dt = kzalloc(sizeof(*dt), GFP_KERNEL);
660 if (dt == NULL)
661 return -ENOMEM;
662 new_name = kstrdup(name, GFP_KERNEL);
663 if (new_name == NULL) {
664 kfree(dt);
665 return -ENOMEM;
666 }
667 dt->mod_name = new_name;
668 dt->num_ddebugs = n;
669 dt->num_enabled = 0;
670 dt->ddebugs = tab;
671
672 mutex_lock(&ddebug_lock);
673 list_add_tail(&dt->link, &ddebug_tables);
674 mutex_unlock(&ddebug_lock);
675
676 if (verbose)
677 printk(KERN_INFO "%u debug prints in module %s\n",
678 n, dt->mod_name);
679 return 0;
680}
681EXPORT_SYMBOL_GPL(ddebug_add_module);
682
683static void ddebug_table_free(struct ddebug_table *dt)
684{
685 list_del_init(&dt->link);
686 kfree(dt->mod_name);
687 kfree(dt);
688}
689
690/*
691 * Called in response to a module being unloaded. Removes
692 * any ddebug_table's which point at the module.
693 */
694int ddebug_remove_module(char *mod_name)
695{
696 struct ddebug_table *dt, *nextdt;
697 int ret = -ENOENT;
698
699 if (verbose)
700 printk(KERN_INFO "%s: removing module \"%s\"\n",
701 __func__, mod_name);
702
703 mutex_lock(&ddebug_lock);
704 list_for_each_entry_safe(dt, nextdt, &ddebug_tables, link) {
705 if (!strcmp(dt->mod_name, mod_name)) {
706 ddebug_table_free(dt);
707 ret = 0;
708 }
709 }
710 mutex_unlock(&ddebug_lock);
711 return ret;
712}
713EXPORT_SYMBOL_GPL(ddebug_remove_module);
714
715static void ddebug_remove_all_tables(void)
716{
717 mutex_lock(&ddebug_lock);
718 while (!list_empty(&ddebug_tables)) {
719 struct ddebug_table *dt = list_entry(ddebug_tables.next,
720 struct ddebug_table,
721 link);
722 ddebug_table_free(dt);
723 }
724 mutex_unlock(&ddebug_lock);
725}
726
727static int __init dynamic_debug_init(void)
728{
729 struct dentry *dir, *file;
730 struct _ddebug *iter, *iter_start;
731 const char *modname = NULL;
732 int ret = 0;
733 int n = 0;
734
735 dir = debugfs_create_dir("dynamic_debug", NULL);
736 if (!dir)
737 return -ENOMEM;
738 file = debugfs_create_file("control", 0644, dir, NULL,
739 &ddebug_proc_fops);
740 if (!file) {
741 debugfs_remove(dir);
742 return -ENOMEM;
743 }
744 if (__start___verbose != __stop___verbose) {
745 iter = __start___verbose;
746 modname = iter->modname;
747 iter_start = iter;
748 for (; iter < __stop___verbose; iter++) {
749 if (strcmp(modname, iter->modname)) {
750 ret = ddebug_add_module(iter_start, n, modname);
751 if (ret)
752 goto out_free;
753 n = 0;
754 modname = iter->modname;
755 iter_start = iter;
756 }
757 n++;
758 }
759 ret = ddebug_add_module(iter_start, n, modname);
760 }
761out_free:
762 if (ret) {
763 ddebug_remove_all_tables();
764 debugfs_remove(dir);
765 debugfs_remove(file);
766 }
767 return 0;
768}
769module_init(dynamic_debug_init);
diff --git a/lib/dynamic_printk.c b/lib/dynamic_printk.c
deleted file mode 100644
index 165a19763dc9..000000000000
--- a/lib/dynamic_printk.c
+++ /dev/null
@@ -1,414 +0,0 @@
1/*
2 * lib/dynamic_printk.c
3 *
4 * make pr_debug()/dev_dbg() calls runtime configurable based upon their
5 * their source module.
6 *
7 * Copyright (C) 2008 Red Hat, Inc., Jason Baron <jbaron@redhat.com>
8 */
9
10#include <linux/kernel.h>
11#include <linux/module.h>
12#include <linux/uaccess.h>
13#include <linux/seq_file.h>
14#include <linux/debugfs.h>
15#include <linux/fs.h>
16
17extern struct mod_debug __start___verbose[];
18extern struct mod_debug __stop___verbose[];
19
20struct debug_name {
21 struct hlist_node hlist;
22 struct hlist_node hlist2;
23 int hash1;
24 int hash2;
25 char *name;
26 int enable;
27 int type;
28};
29
30static int nr_entries;
31static int num_enabled;
32int dynamic_enabled = DYNAMIC_ENABLED_NONE;
33static struct hlist_head module_table[DEBUG_HASH_TABLE_SIZE] =
34 { [0 ... DEBUG_HASH_TABLE_SIZE-1] = HLIST_HEAD_INIT };
35static struct hlist_head module_table2[DEBUG_HASH_TABLE_SIZE] =
36 { [0 ... DEBUG_HASH_TABLE_SIZE-1] = HLIST_HEAD_INIT };
37static DECLARE_MUTEX(debug_list_mutex);
38
39/* dynamic_printk_enabled, and dynamic_printk_enabled2 are bitmasks in which
40 * bit n is set to 1 if any modname hashes into the bucket n, 0 otherwise. They
41 * use independent hash functions, to reduce the chance of false positives.
42 */
43long long dynamic_printk_enabled;
44EXPORT_SYMBOL_GPL(dynamic_printk_enabled);
45long long dynamic_printk_enabled2;
46EXPORT_SYMBOL_GPL(dynamic_printk_enabled2);
47
48/* returns the debug module pointer. */
49static struct debug_name *find_debug_module(char *module_name)
50{
51 int i;
52 struct hlist_head *head;
53 struct hlist_node *node;
54 struct debug_name *element;
55
56 element = NULL;
57 for (i = 0; i < DEBUG_HASH_TABLE_SIZE; i++) {
58 head = &module_table[i];
59 hlist_for_each_entry_rcu(element, node, head, hlist)
60 if (!strcmp(element->name, module_name))
61 return element;
62 }
63 return NULL;
64}
65
66/* returns the debug module pointer. */
67static struct debug_name *find_debug_module_hash(char *module_name, int hash)
68{
69 struct hlist_head *head;
70 struct hlist_node *node;
71 struct debug_name *element;
72
73 element = NULL;
74 head = &module_table[hash];
75 hlist_for_each_entry_rcu(element, node, head, hlist)
76 if (!strcmp(element->name, module_name))
77 return element;
78 return NULL;
79}
80
81/* caller must hold mutex*/
82static int __add_debug_module(char *mod_name, int hash, int hash2)
83{
84 struct debug_name *new;
85 char *module_name;
86 int ret = 0;
87
88 if (find_debug_module(mod_name)) {
89 ret = -EINVAL;
90 goto out;
91 }
92 module_name = kmalloc(strlen(mod_name) + 1, GFP_KERNEL);
93 if (!module_name) {
94 ret = -ENOMEM;
95 goto out;
96 }
97 module_name = strcpy(module_name, mod_name);
98 module_name[strlen(mod_name)] = '\0';
99 new = kzalloc(sizeof(struct debug_name), GFP_KERNEL);
100 if (!new) {
101 kfree(module_name);
102 ret = -ENOMEM;
103 goto out;
104 }
105 INIT_HLIST_NODE(&new->hlist);
106 INIT_HLIST_NODE(&new->hlist2);
107 new->name = module_name;
108 new->hash1 = hash;
109 new->hash2 = hash2;
110 hlist_add_head_rcu(&new->hlist, &module_table[hash]);
111 hlist_add_head_rcu(&new->hlist2, &module_table2[hash2]);
112 nr_entries++;
113out:
114 return ret;
115}
116
117int unregister_dynamic_debug_module(char *mod_name)
118{
119 struct debug_name *element;
120 int ret = 0;
121
122 down(&debug_list_mutex);
123 element = find_debug_module(mod_name);
124 if (!element) {
125 ret = -EINVAL;
126 goto out;
127 }
128 hlist_del_rcu(&element->hlist);
129 hlist_del_rcu(&element->hlist2);
130 synchronize_rcu();
131 kfree(element->name);
132 if (element->enable)
133 num_enabled--;
134 kfree(element);
135 nr_entries--;
136out:
137 up(&debug_list_mutex);
138 return ret;
139}
140EXPORT_SYMBOL_GPL(unregister_dynamic_debug_module);
141
142int register_dynamic_debug_module(char *mod_name, int type, char *share_name,
143 char *flags, int hash, int hash2)
144{
145 struct debug_name *elem;
146 int ret = 0;
147
148 down(&debug_list_mutex);
149 elem = find_debug_module(mod_name);
150 if (!elem) {
151 if (__add_debug_module(mod_name, hash, hash2))
152 goto out;
153 elem = find_debug_module(mod_name);
154 if (dynamic_enabled == DYNAMIC_ENABLED_ALL &&
155 !strcmp(mod_name, share_name)) {
156 elem->enable = true;
157 num_enabled++;
158 }
159 }
160 elem->type |= type;
161out:
162 up(&debug_list_mutex);
163 return ret;
164}
165EXPORT_SYMBOL_GPL(register_dynamic_debug_module);
166
167int __dynamic_dbg_enabled_helper(char *mod_name, int type, int value, int hash)
168{
169 struct debug_name *elem;
170 int ret = 0;
171
172 if (dynamic_enabled == DYNAMIC_ENABLED_ALL)
173 return 1;
174 rcu_read_lock();
175 elem = find_debug_module_hash(mod_name, hash);
176 if (elem && elem->enable)
177 ret = 1;
178 rcu_read_unlock();
179 return ret;
180}
181EXPORT_SYMBOL_GPL(__dynamic_dbg_enabled_helper);
182
183static void set_all(bool enable)
184{
185 struct debug_name *e;
186 struct hlist_node *node;
187 int i;
188 long long enable_mask;
189
190 for (i = 0; i < DEBUG_HASH_TABLE_SIZE; i++) {
191 if (module_table[i].first != NULL) {
192 hlist_for_each_entry(e, node, &module_table[i], hlist) {
193 e->enable = enable;
194 }
195 }
196 }
197 if (enable)
198 enable_mask = ULLONG_MAX;
199 else
200 enable_mask = 0;
201 dynamic_printk_enabled = enable_mask;
202 dynamic_printk_enabled2 = enable_mask;
203}
204
205static int disabled_hash(int i, bool first_table)
206{
207 struct debug_name *e;
208 struct hlist_node *node;
209
210 if (first_table) {
211 hlist_for_each_entry(e, node, &module_table[i], hlist) {
212 if (e->enable)
213 return 0;
214 }
215 } else {
216 hlist_for_each_entry(e, node, &module_table2[i], hlist2) {
217 if (e->enable)
218 return 0;
219 }
220 }
221 return 1;
222}
223
224static ssize_t pr_debug_write(struct file *file, const char __user *buf,
225 size_t length, loff_t *ppos)
226{
227 char *buffer, *s, *value_str, *setting_str;
228 int err, value;
229 struct debug_name *elem = NULL;
230 int all = 0;
231
232 if (length > PAGE_SIZE || length < 0)
233 return -EINVAL;
234
235 buffer = (char *)__get_free_page(GFP_KERNEL);
236 if (!buffer)
237 return -ENOMEM;
238
239 err = -EFAULT;
240 if (copy_from_user(buffer, buf, length))
241 goto out;
242
243 err = -EINVAL;
244 if (length < PAGE_SIZE)
245 buffer[length] = '\0';
246 else if (buffer[PAGE_SIZE-1])
247 goto out;
248
249 err = -EINVAL;
250 down(&debug_list_mutex);
251
252 if (strncmp("set", buffer, 3))
253 goto out_up;
254 s = buffer + 3;
255 setting_str = strsep(&s, "=");
256 if (s == NULL)
257 goto out_up;
258 setting_str = strstrip(setting_str);
259 value_str = strsep(&s, " ");
260 if (s == NULL)
261 goto out_up;
262 s = strstrip(s);
263 if (!strncmp(s, "all", 3))
264 all = 1;
265 else
266 elem = find_debug_module(s);
267 if (!strncmp(setting_str, "enable", 6)) {
268 value = !!simple_strtol(value_str, NULL, 10);
269 if (all) {
270 if (value) {
271 set_all(true);
272 num_enabled = nr_entries;
273 dynamic_enabled = DYNAMIC_ENABLED_ALL;
274 } else {
275 set_all(false);
276 num_enabled = 0;
277 dynamic_enabled = DYNAMIC_ENABLED_NONE;
278 }
279 err = 0;
280 } else if (elem) {
281 if (value && (elem->enable == 0)) {
282 dynamic_printk_enabled |= (1LL << elem->hash1);
283 dynamic_printk_enabled2 |= (1LL << elem->hash2);
284 elem->enable = 1;
285 num_enabled++;
286 dynamic_enabled = DYNAMIC_ENABLED_SOME;
287 err = 0;
288 printk(KERN_DEBUG
289 "debugging enabled for module %s\n",
290 elem->name);
291 } else if (!value && (elem->enable == 1)) {
292 elem->enable = 0;
293 num_enabled--;
294 if (disabled_hash(elem->hash1, true))
295 dynamic_printk_enabled &=
296 ~(1LL << elem->hash1);
297 if (disabled_hash(elem->hash2, false))
298 dynamic_printk_enabled2 &=
299 ~(1LL << elem->hash2);
300 if (num_enabled)
301 dynamic_enabled = DYNAMIC_ENABLED_SOME;
302 else
303 dynamic_enabled = DYNAMIC_ENABLED_NONE;
304 err = 0;
305 printk(KERN_DEBUG
306 "debugging disabled for module %s\n",
307 elem->name);
308 }
309 }
310 }
311 if (!err)
312 err = length;
313out_up:
314 up(&debug_list_mutex);
315out:
316 free_page((unsigned long)buffer);
317 return err;
318}
319
320static void *pr_debug_seq_start(struct seq_file *f, loff_t *pos)
321{
322 return (*pos < DEBUG_HASH_TABLE_SIZE) ? pos : NULL;
323}
324
325static void *pr_debug_seq_next(struct seq_file *s, void *v, loff_t *pos)
326{
327 (*pos)++;
328 if (*pos >= DEBUG_HASH_TABLE_SIZE)
329 return NULL;
330 return pos;
331}
332
333static void pr_debug_seq_stop(struct seq_file *s, void *v)
334{
335 /* Nothing to do */
336}
337
338static int pr_debug_seq_show(struct seq_file *s, void *v)
339{
340 struct hlist_head *head;
341 struct hlist_node *node;
342 struct debug_name *elem;
343 unsigned int i = *(loff_t *) v;
344
345 rcu_read_lock();
346 head = &module_table[i];
347 hlist_for_each_entry_rcu(elem, node, head, hlist) {
348 seq_printf(s, "%s enabled=%d", elem->name, elem->enable);
349 seq_printf(s, "\n");
350 }
351 rcu_read_unlock();
352 return 0;
353}
354
355static struct seq_operations pr_debug_seq_ops = {
356 .start = pr_debug_seq_start,
357 .next = pr_debug_seq_next,
358 .stop = pr_debug_seq_stop,
359 .show = pr_debug_seq_show
360};
361
362static int pr_debug_open(struct inode *inode, struct file *filp)
363{
364 return seq_open(filp, &pr_debug_seq_ops);
365}
366
367static const struct file_operations pr_debug_operations = {
368 .open = pr_debug_open,
369 .read = seq_read,
370 .write = pr_debug_write,
371 .llseek = seq_lseek,
372 .release = seq_release,
373};
374
375static int __init dynamic_printk_init(void)
376{
377 struct dentry *dir, *file;
378 struct mod_debug *iter;
379 unsigned long value;
380
381 dir = debugfs_create_dir("dynamic_printk", NULL);
382 if (!dir)
383 return -ENOMEM;
384 file = debugfs_create_file("modules", 0644, dir, NULL,
385 &pr_debug_operations);
386 if (!file) {
387 debugfs_remove(dir);
388 return -ENOMEM;
389 }
390 for (value = (unsigned long)__start___verbose;
391 value < (unsigned long)__stop___verbose;
392 value += sizeof(struct mod_debug)) {
393 iter = (struct mod_debug *)value;
394 register_dynamic_debug_module(iter->modname,
395 iter->type,
396 iter->logical_modname,
397 iter->flag_names, iter->hash, iter->hash2);
398 }
399 if (dynamic_enabled == DYNAMIC_ENABLED_ALL)
400 set_all(true);
401 return 0;
402}
403module_init(dynamic_printk_init);
404/* may want to move this earlier so we can get traces as early as possible */
405
406static int __init dynamic_printk_setup(char *str)
407{
408 if (str)
409 return -ENOENT;
410 dynamic_enabled = DYNAMIC_ENABLED_ALL;
411 return 0;
412}
413/* Use early_param(), so we can get debug output as early as possible */
414early_param("dynamic_printk", dynamic_printk_setup);
diff --git a/lib/kernel_lock.c b/lib/kernel_lock.c
index 01a3c22c1b5a..39f1029e3525 100644
--- a/lib/kernel_lock.c
+++ b/lib/kernel_lock.c
@@ -39,7 +39,7 @@ static __cacheline_aligned_in_smp DEFINE_SPINLOCK(kernel_flag);
39int __lockfunc __reacquire_kernel_lock(void) 39int __lockfunc __reacquire_kernel_lock(void)
40{ 40{
41 while (!_raw_spin_trylock(&kernel_flag)) { 41 while (!_raw_spin_trylock(&kernel_flag)) {
42 if (test_thread_flag(TIF_NEED_RESCHED)) 42 if (need_resched())
43 return -EAGAIN; 43 return -EAGAIN;
44 cpu_relax(); 44 cpu_relax();
45 } 45 }
diff --git a/lib/kobject.c b/lib/kobject.c
index 0487d1f64806..a6dec32f2ddd 100644
--- a/lib/kobject.c
+++ b/lib/kobject.c
@@ -212,7 +212,7 @@ static int kobject_add_internal(struct kobject *kobj)
212 * @fmt: format string used to build the name 212 * @fmt: format string used to build the name
213 * @vargs: vargs to format the string. 213 * @vargs: vargs to format the string.
214 */ 214 */
215static int kobject_set_name_vargs(struct kobject *kobj, const char *fmt, 215int kobject_set_name_vargs(struct kobject *kobj, const char *fmt,
216 va_list vargs) 216 va_list vargs)
217{ 217{
218 const char *old_name = kobj->name; 218 const char *old_name = kobj->name;
diff --git a/lib/kobject_uevent.c b/lib/kobject_uevent.c
index 318328ddbd1c..97a777ad4f59 100644
--- a/lib/kobject_uevent.c
+++ b/lib/kobject_uevent.c
@@ -118,6 +118,13 @@ int kobject_uevent_env(struct kobject *kobj, enum kobject_action action,
118 kset = top_kobj->kset; 118 kset = top_kobj->kset;
119 uevent_ops = kset->uevent_ops; 119 uevent_ops = kset->uevent_ops;
120 120
121 /* skip the event, if uevent_suppress is set*/
122 if (kobj->uevent_suppress) {
123 pr_debug("kobject: '%s' (%p): %s: uevent_suppress "
124 "caused the event to drop!\n",
125 kobject_name(kobj), kobj, __func__);
126 return 0;
127 }
121 /* skip the event, if the filter returns zero. */ 128 /* skip the event, if the filter returns zero. */
122 if (uevent_ops && uevent_ops->filter) 129 if (uevent_ops && uevent_ops->filter)
123 if (!uevent_ops->filter(kset, kobj)) { 130 if (!uevent_ops->filter(kset, kobj)) {
@@ -227,6 +234,9 @@ int kobject_uevent_env(struct kobject *kobj, enum kobject_action action,
227 NETLINK_CB(skb).dst_group = 1; 234 NETLINK_CB(skb).dst_group = 1;
228 retval = netlink_broadcast(uevent_sock, skb, 0, 1, 235 retval = netlink_broadcast(uevent_sock, skb, 0, 1,
229 GFP_KERNEL); 236 GFP_KERNEL);
237 /* ENOBUFS should be handled in userspace */
238 if (retval == -ENOBUFS)
239 retval = 0;
230 } else 240 } else
231 retval = -ENOMEM; 241 retval = -ENOMEM;
232 } 242 }
@@ -248,7 +258,7 @@ int kobject_uevent_env(struct kobject *kobj, enum kobject_action action,
248 goto exit; 258 goto exit;
249 259
250 retval = call_usermodehelper(argv[0], argv, 260 retval = call_usermodehelper(argv[0], argv,
251 env->envp, UMH_WAIT_EXEC); 261 env->envp, UMH_NO_WAIT);
252 } 262 }
253 263
254exit: 264exit:
diff --git a/lib/nlattr.c b/lib/nlattr.c
new file mode 100644
index 000000000000..c4706eb98d3d
--- /dev/null
+++ b/lib/nlattr.c
@@ -0,0 +1,502 @@
1/*
2 * NETLINK Netlink attributes
3 *
4 * Authors: Thomas Graf <tgraf@suug.ch>
5 * Alexey Kuznetsov <kuznet@ms2.inr.ac.ru>
6 */
7
8#include <linux/module.h>
9#include <linux/kernel.h>
10#include <linux/errno.h>
11#include <linux/jiffies.h>
12#include <linux/netdevice.h>
13#include <linux/skbuff.h>
14#include <linux/string.h>
15#include <linux/types.h>
16#include <net/netlink.h>
17
18static u16 nla_attr_minlen[NLA_TYPE_MAX+1] __read_mostly = {
19 [NLA_U8] = sizeof(u8),
20 [NLA_U16] = sizeof(u16),
21 [NLA_U32] = sizeof(u32),
22 [NLA_U64] = sizeof(u64),
23 [NLA_NESTED] = NLA_HDRLEN,
24};
25
26static int validate_nla(struct nlattr *nla, int maxtype,
27 const struct nla_policy *policy)
28{
29 const struct nla_policy *pt;
30 int minlen = 0, attrlen = nla_len(nla), type = nla_type(nla);
31
32 if (type <= 0 || type > maxtype)
33 return 0;
34
35 pt = &policy[type];
36
37 BUG_ON(pt->type > NLA_TYPE_MAX);
38
39 switch (pt->type) {
40 case NLA_FLAG:
41 if (attrlen > 0)
42 return -ERANGE;
43 break;
44
45 case NLA_NUL_STRING:
46 if (pt->len)
47 minlen = min_t(int, attrlen, pt->len + 1);
48 else
49 minlen = attrlen;
50
51 if (!minlen || memchr(nla_data(nla), '\0', minlen) == NULL)
52 return -EINVAL;
53 /* fall through */
54
55 case NLA_STRING:
56 if (attrlen < 1)
57 return -ERANGE;
58
59 if (pt->len) {
60 char *buf = nla_data(nla);
61
62 if (buf[attrlen - 1] == '\0')
63 attrlen--;
64
65 if (attrlen > pt->len)
66 return -ERANGE;
67 }
68 break;
69
70 case NLA_BINARY:
71 if (pt->len && attrlen > pt->len)
72 return -ERANGE;
73 break;
74
75 case NLA_NESTED_COMPAT:
76 if (attrlen < pt->len)
77 return -ERANGE;
78 if (attrlen < NLA_ALIGN(pt->len))
79 break;
80 if (attrlen < NLA_ALIGN(pt->len) + NLA_HDRLEN)
81 return -ERANGE;
82 nla = nla_data(nla) + NLA_ALIGN(pt->len);
83 if (attrlen < NLA_ALIGN(pt->len) + NLA_HDRLEN + nla_len(nla))
84 return -ERANGE;
85 break;
86 case NLA_NESTED:
87 /* a nested attributes is allowed to be empty; if its not,
88 * it must have a size of at least NLA_HDRLEN.
89 */
90 if (attrlen == 0)
91 break;
92 default:
93 if (pt->len)
94 minlen = pt->len;
95 else if (pt->type != NLA_UNSPEC)
96 minlen = nla_attr_minlen[pt->type];
97
98 if (attrlen < minlen)
99 return -ERANGE;
100 }
101
102 return 0;
103}
104
105/**
106 * nla_validate - Validate a stream of attributes
107 * @head: head of attribute stream
108 * @len: length of attribute stream
109 * @maxtype: maximum attribute type to be expected
110 * @policy: validation policy
111 *
112 * Validates all attributes in the specified attribute stream against the
113 * specified policy. Attributes with a type exceeding maxtype will be
114 * ignored. See documenation of struct nla_policy for more details.
115 *
116 * Returns 0 on success or a negative error code.
117 */
118int nla_validate(struct nlattr *head, int len, int maxtype,
119 const struct nla_policy *policy)
120{
121 struct nlattr *nla;
122 int rem, err;
123
124 nla_for_each_attr(nla, head, len, rem) {
125 err = validate_nla(nla, maxtype, policy);
126 if (err < 0)
127 goto errout;
128 }
129
130 err = 0;
131errout:
132 return err;
133}
134
135/**
136 * nla_policy_len - Determin the max. length of a policy
137 * @policy: policy to use
138 * @n: number of policies
139 *
140 * Determines the max. length of the policy. It is currently used
141 * to allocated Netlink buffers roughly the size of the actual
142 * message.
143 *
144 * Returns 0 on success or a negative error code.
145 */
146int
147nla_policy_len(const struct nla_policy *p, int n)
148{
149 int i, len = 0;
150
151 for (i = 0; i < n; i++) {
152 if (p->len)
153 len += nla_total_size(p->len);
154 else if (nla_attr_minlen[p->type])
155 len += nla_total_size(nla_attr_minlen[p->type]);
156 }
157
158 return len;
159}
160
161/**
162 * nla_parse - Parse a stream of attributes into a tb buffer
163 * @tb: destination array with maxtype+1 elements
164 * @maxtype: maximum attribute type to be expected
165 * @head: head of attribute stream
166 * @len: length of attribute stream
167 * @policy: validation policy
168 *
169 * Parses a stream of attributes and stores a pointer to each attribute in
170 * the tb array accessable via the attribute type. Attributes with a type
171 * exceeding maxtype will be silently ignored for backwards compatibility
172 * reasons. policy may be set to NULL if no validation is required.
173 *
174 * Returns 0 on success or a negative error code.
175 */
176int nla_parse(struct nlattr *tb[], int maxtype, struct nlattr *head, int len,
177 const struct nla_policy *policy)
178{
179 struct nlattr *nla;
180 int rem, err;
181
182 memset(tb, 0, sizeof(struct nlattr *) * (maxtype + 1));
183
184 nla_for_each_attr(nla, head, len, rem) {
185 u16 type = nla_type(nla);
186
187 if (type > 0 && type <= maxtype) {
188 if (policy) {
189 err = validate_nla(nla, maxtype, policy);
190 if (err < 0)
191 goto errout;
192 }
193
194 tb[type] = nla;
195 }
196 }
197
198 if (unlikely(rem > 0))
199 printk(KERN_WARNING "netlink: %d bytes leftover after parsing "
200 "attributes.\n", rem);
201
202 err = 0;
203errout:
204 return err;
205}
206
207/**
208 * nla_find - Find a specific attribute in a stream of attributes
209 * @head: head of attribute stream
210 * @len: length of attribute stream
211 * @attrtype: type of attribute to look for
212 *
213 * Returns the first attribute in the stream matching the specified type.
214 */
215struct nlattr *nla_find(struct nlattr *head, int len, int attrtype)
216{
217 struct nlattr *nla;
218 int rem;
219
220 nla_for_each_attr(nla, head, len, rem)
221 if (nla_type(nla) == attrtype)
222 return nla;
223
224 return NULL;
225}
226
227/**
228 * nla_strlcpy - Copy string attribute payload into a sized buffer
229 * @dst: where to copy the string to
230 * @nla: attribute to copy the string from
231 * @dstsize: size of destination buffer
232 *
233 * Copies at most dstsize - 1 bytes into the destination buffer.
234 * The result is always a valid NUL-terminated string. Unlike
235 * strlcpy the destination buffer is always padded out.
236 *
237 * Returns the length of the source buffer.
238 */
239size_t nla_strlcpy(char *dst, const struct nlattr *nla, size_t dstsize)
240{
241 size_t srclen = nla_len(nla);
242 char *src = nla_data(nla);
243
244 if (srclen > 0 && src[srclen - 1] == '\0')
245 srclen--;
246
247 if (dstsize > 0) {
248 size_t len = (srclen >= dstsize) ? dstsize - 1 : srclen;
249
250 memset(dst, 0, dstsize);
251 memcpy(dst, src, len);
252 }
253
254 return srclen;
255}
256
257/**
258 * nla_memcpy - Copy a netlink attribute into another memory area
259 * @dest: where to copy to memcpy
260 * @src: netlink attribute to copy from
261 * @count: size of the destination area
262 *
263 * Note: The number of bytes copied is limited by the length of
264 * attribute's payload. memcpy
265 *
266 * Returns the number of bytes copied.
267 */
268int nla_memcpy(void *dest, const struct nlattr *src, int count)
269{
270 int minlen = min_t(int, count, nla_len(src));
271
272 memcpy(dest, nla_data(src), minlen);
273
274 return minlen;
275}
276
277/**
278 * nla_memcmp - Compare an attribute with sized memory area
279 * @nla: netlink attribute
280 * @data: memory area
281 * @size: size of memory area
282 */
283int nla_memcmp(const struct nlattr *nla, const void *data,
284 size_t size)
285{
286 int d = nla_len(nla) - size;
287
288 if (d == 0)
289 d = memcmp(nla_data(nla), data, size);
290
291 return d;
292}
293
294/**
295 * nla_strcmp - Compare a string attribute against a string
296 * @nla: netlink string attribute
297 * @str: another string
298 */
299int nla_strcmp(const struct nlattr *nla, const char *str)
300{
301 int len = strlen(str) + 1;
302 int d = nla_len(nla) - len;
303
304 if (d == 0)
305 d = memcmp(nla_data(nla), str, len);
306
307 return d;
308}
309
310#ifdef CONFIG_NET
311/**
312 * __nla_reserve - reserve room for attribute on the skb
313 * @skb: socket buffer to reserve room on
314 * @attrtype: attribute type
315 * @attrlen: length of attribute payload
316 *
317 * Adds a netlink attribute header to a socket buffer and reserves
318 * room for the payload but does not copy it.
319 *
320 * The caller is responsible to ensure that the skb provides enough
321 * tailroom for the attribute header and payload.
322 */
323struct nlattr *__nla_reserve(struct sk_buff *skb, int attrtype, int attrlen)
324{
325 struct nlattr *nla;
326
327 nla = (struct nlattr *) skb_put(skb, nla_total_size(attrlen));
328 nla->nla_type = attrtype;
329 nla->nla_len = nla_attr_size(attrlen);
330
331 memset((unsigned char *) nla + nla->nla_len, 0, nla_padlen(attrlen));
332
333 return nla;
334}
335EXPORT_SYMBOL(__nla_reserve);
336
337/**
338 * __nla_reserve_nohdr - reserve room for attribute without header
339 * @skb: socket buffer to reserve room on
340 * @attrlen: length of attribute payload
341 *
342 * Reserves room for attribute payload without a header.
343 *
344 * The caller is responsible to ensure that the skb provides enough
345 * tailroom for the payload.
346 */
347void *__nla_reserve_nohdr(struct sk_buff *skb, int attrlen)
348{
349 void *start;
350
351 start = skb_put(skb, NLA_ALIGN(attrlen));
352 memset(start, 0, NLA_ALIGN(attrlen));
353
354 return start;
355}
356EXPORT_SYMBOL(__nla_reserve_nohdr);
357
358/**
359 * nla_reserve - reserve room for attribute on the skb
360 * @skb: socket buffer to reserve room on
361 * @attrtype: attribute type
362 * @attrlen: length of attribute payload
363 *
364 * Adds a netlink attribute header to a socket buffer and reserves
365 * room for the payload but does not copy it.
366 *
367 * Returns NULL if the tailroom of the skb is insufficient to store
368 * the attribute header and payload.
369 */
370struct nlattr *nla_reserve(struct sk_buff *skb, int attrtype, int attrlen)
371{
372 if (unlikely(skb_tailroom(skb) < nla_total_size(attrlen)))
373 return NULL;
374
375 return __nla_reserve(skb, attrtype, attrlen);
376}
377EXPORT_SYMBOL(nla_reserve);
378
379/**
380 * nla_reserve_nohdr - reserve room for attribute without header
381 * @skb: socket buffer to reserve room on
382 * @attrlen: length of attribute payload
383 *
384 * Reserves room for attribute payload without a header.
385 *
386 * Returns NULL if the tailroom of the skb is insufficient to store
387 * the attribute payload.
388 */
389void *nla_reserve_nohdr(struct sk_buff *skb, int attrlen)
390{
391 if (unlikely(skb_tailroom(skb) < NLA_ALIGN(attrlen)))
392 return NULL;
393
394 return __nla_reserve_nohdr(skb, attrlen);
395}
396EXPORT_SYMBOL(nla_reserve_nohdr);
397
398/**
399 * __nla_put - Add a netlink attribute to a socket buffer
400 * @skb: socket buffer to add attribute to
401 * @attrtype: attribute type
402 * @attrlen: length of attribute payload
403 * @data: head of attribute payload
404 *
405 * The caller is responsible to ensure that the skb provides enough
406 * tailroom for the attribute header and payload.
407 */
408void __nla_put(struct sk_buff *skb, int attrtype, int attrlen,
409 const void *data)
410{
411 struct nlattr *nla;
412
413 nla = __nla_reserve(skb, attrtype, attrlen);
414 memcpy(nla_data(nla), data, attrlen);
415}
416EXPORT_SYMBOL(__nla_put);
417
418/**
419 * __nla_put_nohdr - Add a netlink attribute without header
420 * @skb: socket buffer to add attribute to
421 * @attrlen: length of attribute payload
422 * @data: head of attribute payload
423 *
424 * The caller is responsible to ensure that the skb provides enough
425 * tailroom for the attribute payload.
426 */
427void __nla_put_nohdr(struct sk_buff *skb, int attrlen, const void *data)
428{
429 void *start;
430
431 start = __nla_reserve_nohdr(skb, attrlen);
432 memcpy(start, data, attrlen);
433}
434EXPORT_SYMBOL(__nla_put_nohdr);
435
436/**
437 * nla_put - Add a netlink attribute to a socket buffer
438 * @skb: socket buffer to add attribute to
439 * @attrtype: attribute type
440 * @attrlen: length of attribute payload
441 * @data: head of attribute payload
442 *
443 * Returns -EMSGSIZE if the tailroom of the skb is insufficient to store
444 * the attribute header and payload.
445 */
446int nla_put(struct sk_buff *skb, int attrtype, int attrlen, const void *data)
447{
448 if (unlikely(skb_tailroom(skb) < nla_total_size(attrlen)))
449 return -EMSGSIZE;
450
451 __nla_put(skb, attrtype, attrlen, data);
452 return 0;
453}
454EXPORT_SYMBOL(nla_put);
455
456/**
457 * nla_put_nohdr - Add a netlink attribute without header
458 * @skb: socket buffer to add attribute to
459 * @attrlen: length of attribute payload
460 * @data: head of attribute payload
461 *
462 * Returns -EMSGSIZE if the tailroom of the skb is insufficient to store
463 * the attribute payload.
464 */
465int nla_put_nohdr(struct sk_buff *skb, int attrlen, const void *data)
466{
467 if (unlikely(skb_tailroom(skb) < NLA_ALIGN(attrlen)))
468 return -EMSGSIZE;
469
470 __nla_put_nohdr(skb, attrlen, data);
471 return 0;
472}
473EXPORT_SYMBOL(nla_put_nohdr);
474
475/**
476 * nla_append - Add a netlink attribute without header or padding
477 * @skb: socket buffer to add attribute to
478 * @attrlen: length of attribute payload
479 * @data: head of attribute payload
480 *
481 * Returns -EMSGSIZE if the tailroom of the skb is insufficient to store
482 * the attribute payload.
483 */
484int nla_append(struct sk_buff *skb, int attrlen, const void *data)
485{
486 if (unlikely(skb_tailroom(skb) < NLA_ALIGN(attrlen)))
487 return -EMSGSIZE;
488
489 memcpy(skb_put(skb, attrlen), data, attrlen);
490 return 0;
491}
492EXPORT_SYMBOL(nla_append);
493#endif
494
495EXPORT_SYMBOL(nla_validate);
496EXPORT_SYMBOL(nla_policy_len);
497EXPORT_SYMBOL(nla_parse);
498EXPORT_SYMBOL(nla_find);
499EXPORT_SYMBOL(nla_strlcpy);
500EXPORT_SYMBOL(nla_memcpy);
501EXPORT_SYMBOL(nla_memcmp);
502EXPORT_SYMBOL(nla_strcmp);
diff --git a/lib/zlib_inflate/inflate.h b/lib/zlib_inflate/inflate.h
index df8a6c92052d..3d17b3d1b21f 100644
--- a/lib/zlib_inflate/inflate.h
+++ b/lib/zlib_inflate/inflate.h
@@ -1,3 +1,6 @@
1#ifndef INFLATE_H
2#define INFLATE_H
3
1/* inflate.h -- internal inflate state definition 4/* inflate.h -- internal inflate state definition
2 * Copyright (C) 1995-2004 Mark Adler 5 * Copyright (C) 1995-2004 Mark Adler
3 * For conditions of distribution and use, see copyright notice in zlib.h 6 * For conditions of distribution and use, see copyright notice in zlib.h
@@ -105,3 +108,4 @@ struct inflate_state {
105 unsigned short work[288]; /* work area for code table building */ 108 unsigned short work[288]; /* work area for code table building */
106 code codes[ENOUGH]; /* space for code tables */ 109 code codes[ENOUGH]; /* space for code tables */
107}; 110};
111#endif
diff --git a/lib/zlib_inflate/inftrees.h b/lib/zlib_inflate/inftrees.h
index 5f5219b1240e..b70b4731ac7a 100644
--- a/lib/zlib_inflate/inftrees.h
+++ b/lib/zlib_inflate/inftrees.h
@@ -1,3 +1,6 @@
1#ifndef INFTREES_H
2#define INFTREES_H
3
1/* inftrees.h -- header to use inftrees.c 4/* inftrees.h -- header to use inftrees.c
2 * Copyright (C) 1995-2005 Mark Adler 5 * Copyright (C) 1995-2005 Mark Adler
3 * For conditions of distribution and use, see copyright notice in zlib.h 6 * For conditions of distribution and use, see copyright notice in zlib.h
@@ -53,3 +56,4 @@ typedef enum {
53extern int zlib_inflate_table (codetype type, unsigned short *lens, 56extern int zlib_inflate_table (codetype type, unsigned short *lens,
54 unsigned codes, code **table, 57 unsigned codes, code **table,
55 unsigned *bits, unsigned short *work); 58 unsigned *bits, unsigned short *work);
59#endif