aboutsummaryrefslogtreecommitdiffstats
path: root/lib
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2018-10-31 12:25:15 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2018-10-31 12:25:15 -0400
commit59fc453b21f767f2fb0ff4dc0a947e9b9c9e6d14 (patch)
tree42029c432982ebabb462bd16a413a033125fd793 /lib
parent310c7585e8300ddc46211df0757c11e4299ec482 (diff)
parent2ebe82288b3278b8e538ee8adce4142dbdedd8f6 (diff)
Merge branch 'akpm' (patches from Andrew)
Merge more updates from Andrew Morton: - the rest of MM - lib/bitmap updates - hfs updates - fatfs updates - various other misc things * emailed patches from Andrew Morton <akpm@linux-foundation.org>: (94 commits) mm/gup.c: fix __get_user_pages_fast() comment mm: Fix warning in insert_pfn() memory-hotplug.rst: add some details about locking internals powerpc/powernv: hold device_hotplug_lock when calling memtrace_offline_pages() powerpc/powernv: hold device_hotplug_lock when calling device_online() mm/memory_hotplug: fix online/offline_pages called w.o. mem_hotplug_lock mm/memory_hotplug: make add_memory() take the device_hotplug_lock mm/memory_hotplug: make remove_memory() take the device_hotplug_lock mm/memblock.c: warn if zero alignment was requested memblock: stop using implicit alignment to SMP_CACHE_BYTES docs/boot-time-mm: remove bootmem documentation mm: remove include/linux/bootmem.h memblock: replace BOOTMEM_ALLOC_* with MEMBLOCK variants mm: remove nobootmem memblock: rename __free_pages_bootmem to memblock_free_pages memblock: rename free_all_bootmem to memblock_free_all memblock: replace free_bootmem_late with memblock_free_late memblock: replace free_bootmem{_node} with memblock_free mm: nobootmem: remove bootmem allocation APIs memblock: replace alloc_bootmem with memblock_alloc ...
Diffstat (limited to 'lib')
-rw-r--r--lib/Kconfig.debug3
-rw-r--r--lib/bitmap.c22
-rw-r--r--lib/cpumask.c4
-rw-r--r--lib/kstrtox.c16
-rw-r--r--lib/lz4/lz4_decompress.c481
-rw-r--r--lib/lz4/lz4defs.h9
-rw-r--r--lib/parser.c16
-rw-r--r--lib/sg_pool.c7
-rw-r--r--lib/zlib_inflate/inflate.c12
9 files changed, 386 insertions, 184 deletions
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
index e0ba05e6f6bd..1af29b8224fd 100644
--- a/lib/Kconfig.debug
+++ b/lib/Kconfig.debug
@@ -1292,7 +1292,7 @@ config DEBUG_KOBJECT
1292 depends on DEBUG_KERNEL 1292 depends on DEBUG_KERNEL
1293 help 1293 help
1294 If you say Y here, some extra kobject debugging messages will be sent 1294 If you say Y here, some extra kobject debugging messages will be sent
1295 to the syslog. 1295 to the syslog.
1296 1296
1297config DEBUG_KOBJECT_RELEASE 1297config DEBUG_KOBJECT_RELEASE
1298 bool "kobject release debugging" 1298 bool "kobject release debugging"
@@ -1980,7 +1980,6 @@ endif # RUNTIME_TESTING_MENU
1980 1980
1981config MEMTEST 1981config MEMTEST
1982 bool "Memtest" 1982 bool "Memtest"
1983 depends on HAVE_MEMBLOCK
1984 ---help--- 1983 ---help---
1985 This option adds a kernel parameter 'memtest', which allows memtest 1984 This option adds a kernel parameter 'memtest', which allows memtest
1986 to be set. 1985 to be set.
diff --git a/lib/bitmap.c b/lib/bitmap.c
index 2fd07f6df0b8..eead55aa7170 100644
--- a/lib/bitmap.c
+++ b/lib/bitmap.c
@@ -13,6 +13,7 @@
13#include <linux/bitops.h> 13#include <linux/bitops.h>
14#include <linux/bug.h> 14#include <linux/bug.h>
15#include <linux/kernel.h> 15#include <linux/kernel.h>
16#include <linux/mm.h>
16#include <linux/slab.h> 17#include <linux/slab.h>
17#include <linux/string.h> 18#include <linux/string.h>
18#include <linux/uaccess.h> 19#include <linux/uaccess.h>
@@ -36,11 +37,6 @@
36 * carefully filter out these unused bits from impacting their 37 * carefully filter out these unused bits from impacting their
37 * results. 38 * results.
38 * 39 *
39 * These operations actually hold to a slightly stronger rule:
40 * if you don't input any bitmaps to these ops that have some
41 * unused bits set, then they won't output any set unused bits
42 * in output bitmaps.
43 *
44 * The byte ordering of bitmaps is more natural on little 40 * The byte ordering of bitmaps is more natural on little
45 * endian architectures. See the big-endian headers 41 * endian architectures. See the big-endian headers
46 * include/asm-ppc64/bitops.h and include/asm-s390/bitops.h 42 * include/asm-ppc64/bitops.h and include/asm-s390/bitops.h
@@ -466,20 +462,18 @@ EXPORT_SYMBOL(bitmap_parse_user);
466 * ranges if list is specified or hex digits grouped into comma-separated 462 * ranges if list is specified or hex digits grouped into comma-separated
467 * sets of 8 digits/set. Returns the number of characters written to buf. 463 * sets of 8 digits/set. Returns the number of characters written to buf.
468 * 464 *
469 * It is assumed that @buf is a pointer into a PAGE_SIZE area and that 465 * It is assumed that @buf is a pointer into a PAGE_SIZE, page-aligned
470 * sufficient storage remains at @buf to accommodate the 466 * area and that sufficient storage remains at @buf to accommodate the
471 * bitmap_print_to_pagebuf() output. 467 * bitmap_print_to_pagebuf() output. Returns the number of characters
468 * actually printed to @buf, excluding terminating '\0'.
472 */ 469 */
473int bitmap_print_to_pagebuf(bool list, char *buf, const unsigned long *maskp, 470int bitmap_print_to_pagebuf(bool list, char *buf, const unsigned long *maskp,
474 int nmaskbits) 471 int nmaskbits)
475{ 472{
476 ptrdiff_t len = PTR_ALIGN(buf + PAGE_SIZE - 1, PAGE_SIZE) - buf; 473 ptrdiff_t len = PAGE_SIZE - offset_in_page(buf);
477 int n = 0;
478 474
479 if (len > 1) 475 return list ? scnprintf(buf, len, "%*pbl\n", nmaskbits, maskp) :
480 n = list ? scnprintf(buf, len, "%*pbl\n", nmaskbits, maskp) : 476 scnprintf(buf, len, "%*pb\n", nmaskbits, maskp);
481 scnprintf(buf, len, "%*pb\n", nmaskbits, maskp);
482 return n;
483} 477}
484EXPORT_SYMBOL(bitmap_print_to_pagebuf); 478EXPORT_SYMBOL(bitmap_print_to_pagebuf);
485 479
diff --git a/lib/cpumask.c b/lib/cpumask.c
index beca6244671a..8d666ab84b5c 100644
--- a/lib/cpumask.c
+++ b/lib/cpumask.c
@@ -4,7 +4,7 @@
4#include <linux/bitops.h> 4#include <linux/bitops.h>
5#include <linux/cpumask.h> 5#include <linux/cpumask.h>
6#include <linux/export.h> 6#include <linux/export.h>
7#include <linux/bootmem.h> 7#include <linux/memblock.h>
8 8
9/** 9/**
10 * cpumask_next - get the next cpu in a cpumask 10 * cpumask_next - get the next cpu in a cpumask
@@ -163,7 +163,7 @@ EXPORT_SYMBOL(zalloc_cpumask_var);
163 */ 163 */
164void __init alloc_bootmem_cpumask_var(cpumask_var_t *mask) 164void __init alloc_bootmem_cpumask_var(cpumask_var_t *mask)
165{ 165{
166 *mask = memblock_virt_alloc(cpumask_size(), 0); 166 *mask = memblock_alloc(cpumask_size(), SMP_CACHE_BYTES);
167} 167}
168 168
169/** 169/**
diff --git a/lib/kstrtox.c b/lib/kstrtox.c
index 661a1e807bd1..1006bf70bf74 100644
--- a/lib/kstrtox.c
+++ b/lib/kstrtox.c
@@ -175,7 +175,7 @@ int _kstrtoul(const char *s, unsigned int base, unsigned long *res)
175 rv = kstrtoull(s, base, &tmp); 175 rv = kstrtoull(s, base, &tmp);
176 if (rv < 0) 176 if (rv < 0)
177 return rv; 177 return rv;
178 if (tmp != (unsigned long long)(unsigned long)tmp) 178 if (tmp != (unsigned long)tmp)
179 return -ERANGE; 179 return -ERANGE;
180 *res = tmp; 180 *res = tmp;
181 return 0; 181 return 0;
@@ -191,7 +191,7 @@ int _kstrtol(const char *s, unsigned int base, long *res)
191 rv = kstrtoll(s, base, &tmp); 191 rv = kstrtoll(s, base, &tmp);
192 if (rv < 0) 192 if (rv < 0)
193 return rv; 193 return rv;
194 if (tmp != (long long)(long)tmp) 194 if (tmp != (long)tmp)
195 return -ERANGE; 195 return -ERANGE;
196 *res = tmp; 196 *res = tmp;
197 return 0; 197 return 0;
@@ -222,7 +222,7 @@ int kstrtouint(const char *s, unsigned int base, unsigned int *res)
222 rv = kstrtoull(s, base, &tmp); 222 rv = kstrtoull(s, base, &tmp);
223 if (rv < 0) 223 if (rv < 0)
224 return rv; 224 return rv;
225 if (tmp != (unsigned long long)(unsigned int)tmp) 225 if (tmp != (unsigned int)tmp)
226 return -ERANGE; 226 return -ERANGE;
227 *res = tmp; 227 *res = tmp;
228 return 0; 228 return 0;
@@ -253,7 +253,7 @@ int kstrtoint(const char *s, unsigned int base, int *res)
253 rv = kstrtoll(s, base, &tmp); 253 rv = kstrtoll(s, base, &tmp);
254 if (rv < 0) 254 if (rv < 0)
255 return rv; 255 return rv;
256 if (tmp != (long long)(int)tmp) 256 if (tmp != (int)tmp)
257 return -ERANGE; 257 return -ERANGE;
258 *res = tmp; 258 *res = tmp;
259 return 0; 259 return 0;
@@ -268,7 +268,7 @@ int kstrtou16(const char *s, unsigned int base, u16 *res)
268 rv = kstrtoull(s, base, &tmp); 268 rv = kstrtoull(s, base, &tmp);
269 if (rv < 0) 269 if (rv < 0)
270 return rv; 270 return rv;
271 if (tmp != (unsigned long long)(u16)tmp) 271 if (tmp != (u16)tmp)
272 return -ERANGE; 272 return -ERANGE;
273 *res = tmp; 273 *res = tmp;
274 return 0; 274 return 0;
@@ -283,7 +283,7 @@ int kstrtos16(const char *s, unsigned int base, s16 *res)
283 rv = kstrtoll(s, base, &tmp); 283 rv = kstrtoll(s, base, &tmp);
284 if (rv < 0) 284 if (rv < 0)
285 return rv; 285 return rv;
286 if (tmp != (long long)(s16)tmp) 286 if (tmp != (s16)tmp)
287 return -ERANGE; 287 return -ERANGE;
288 *res = tmp; 288 *res = tmp;
289 return 0; 289 return 0;
@@ -298,7 +298,7 @@ int kstrtou8(const char *s, unsigned int base, u8 *res)
298 rv = kstrtoull(s, base, &tmp); 298 rv = kstrtoull(s, base, &tmp);
299 if (rv < 0) 299 if (rv < 0)
300 return rv; 300 return rv;
301 if (tmp != (unsigned long long)(u8)tmp) 301 if (tmp != (u8)tmp)
302 return -ERANGE; 302 return -ERANGE;
303 *res = tmp; 303 *res = tmp;
304 return 0; 304 return 0;
@@ -313,7 +313,7 @@ int kstrtos8(const char *s, unsigned int base, s8 *res)
313 rv = kstrtoll(s, base, &tmp); 313 rv = kstrtoll(s, base, &tmp);
314 if (rv < 0) 314 if (rv < 0)
315 return rv; 315 return rv;
316 if (tmp != (long long)(s8)tmp) 316 if (tmp != (s8)tmp)
317 return -ERANGE; 317 return -ERANGE;
318 *res = tmp; 318 *res = tmp;
319 return 0; 319 return 0;
diff --git a/lib/lz4/lz4_decompress.c b/lib/lz4/lz4_decompress.c
index 141734d255e4..0c9d3ad17e0f 100644
--- a/lib/lz4/lz4_decompress.c
+++ b/lib/lz4/lz4_decompress.c
@@ -43,30 +43,36 @@
43/*-***************************** 43/*-*****************************
44 * Decompression functions 44 * Decompression functions
45 *******************************/ 45 *******************************/
46/* LZ4_decompress_generic() : 46
47 * This generic decompression function cover all use cases. 47#define DEBUGLOG(l, ...) {} /* disabled */
48 * It shall be instantiated several times, using different sets of directives 48
49 * Note that it is important this generic function is really inlined, 49#ifndef assert
50#define assert(condition) ((void)0)
51#endif
52
53/*
54 * LZ4_decompress_generic() :
55 * This generic decompression function covers all use cases.
56 * It shall be instantiated several times, using different sets of directives.
57 * Note that it is important for performance that this function really get inlined,
50 * in order to remove useless branches during compilation optimization. 58 * in order to remove useless branches during compilation optimization.
51 */ 59 */
52static FORCE_INLINE int LZ4_decompress_generic( 60static FORCE_INLINE int LZ4_decompress_generic(
53 const char * const source, 61 const char * const src,
54 char * const dest, 62 char * const dst,
55 int inputSize, 63 int srcSize,
56 /* 64 /*
57 * If endOnInput == endOnInputSize, 65 * If endOnInput == endOnInputSize,
58 * this value is the max size of Output Buffer. 66 * this value is `dstCapacity`
59 */ 67 */
60 int outputSize, 68 int outputSize,
61 /* endOnOutputSize, endOnInputSize */ 69 /* endOnOutputSize, endOnInputSize */
62 int endOnInput, 70 endCondition_directive endOnInput,
63 /* full, partial */ 71 /* full, partial */
64 int partialDecoding, 72 earlyEnd_directive partialDecoding,
65 /* only used if partialDecoding == partial */
66 int targetOutputSize,
67 /* noDict, withPrefix64k, usingExtDict */ 73 /* noDict, withPrefix64k, usingExtDict */
68 int dict, 74 dict_directive dict,
69 /* == dest when no prefix */ 75 /* always <= dst, == dst when no prefix */
70 const BYTE * const lowPrefix, 76 const BYTE * const lowPrefix,
71 /* only if dict == usingExtDict */ 77 /* only if dict == usingExtDict */
72 const BYTE * const dictStart, 78 const BYTE * const dictStart,
@@ -74,35 +80,43 @@ static FORCE_INLINE int LZ4_decompress_generic(
74 const size_t dictSize 80 const size_t dictSize
75 ) 81 )
76{ 82{
77 /* Local Variables */ 83 const BYTE *ip = (const BYTE *) src;
78 const BYTE *ip = (const BYTE *) source; 84 const BYTE * const iend = ip + srcSize;
79 const BYTE * const iend = ip + inputSize;
80 85
81 BYTE *op = (BYTE *) dest; 86 BYTE *op = (BYTE *) dst;
82 BYTE * const oend = op + outputSize; 87 BYTE * const oend = op + outputSize;
83 BYTE *cpy; 88 BYTE *cpy;
84 BYTE *oexit = op + targetOutputSize;
85 const BYTE * const lowLimit = lowPrefix - dictSize;
86 89
87 const BYTE * const dictEnd = (const BYTE *)dictStart + dictSize; 90 const BYTE * const dictEnd = (const BYTE *)dictStart + dictSize;
88 static const unsigned int dec32table[] = { 0, 1, 2, 1, 4, 4, 4, 4 }; 91 static const unsigned int inc32table[8] = {0, 1, 2, 1, 0, 4, 4, 4};
89 static const int dec64table[] = { 0, 0, 0, -1, 0, 1, 2, 3 }; 92 static const int dec64table[8] = {0, 0, 0, -1, -4, 1, 2, 3};
90 93
91 const int safeDecode = (endOnInput == endOnInputSize); 94 const int safeDecode = (endOnInput == endOnInputSize);
92 const int checkOffset = ((safeDecode) && (dictSize < (int)(64 * KB))); 95 const int checkOffset = ((safeDecode) && (dictSize < (int)(64 * KB)));
93 96
97 /* Set up the "end" pointers for the shortcut. */
98 const BYTE *const shortiend = iend -
99 (endOnInput ? 14 : 8) /*maxLL*/ - 2 /*offset*/;
100 const BYTE *const shortoend = oend -
101 (endOnInput ? 14 : 8) /*maxLL*/ - 18 /*maxML*/;
102
103 DEBUGLOG(5, "%s (srcSize:%i, dstSize:%i)", __func__,
104 srcSize, outputSize);
105
94 /* Special cases */ 106 /* Special cases */
95 /* targetOutputSize too high => decode everything */ 107 assert(lowPrefix <= op);
96 if ((partialDecoding) && (oexit > oend - MFLIMIT)) 108 assert(src != NULL);
97 oexit = oend - MFLIMIT;
98 109
99 /* Empty output buffer */ 110 /* Empty output buffer */
100 if ((endOnInput) && (unlikely(outputSize == 0))) 111 if ((endOnInput) && (unlikely(outputSize == 0)))
101 return ((inputSize == 1) && (*ip == 0)) ? 0 : -1; 112 return ((srcSize == 1) && (*ip == 0)) ? 0 : -1;
102 113
103 if ((!endOnInput) && (unlikely(outputSize == 0))) 114 if ((!endOnInput) && (unlikely(outputSize == 0)))
104 return (*ip == 0 ? 1 : -1); 115 return (*ip == 0 ? 1 : -1);
105 116
117 if ((endOnInput) && unlikely(srcSize == 0))
118 return -1;
119
106 /* Main Loop : decode sequences */ 120 /* Main Loop : decode sequences */
107 while (1) { 121 while (1) {
108 size_t length; 122 size_t length;
@@ -111,12 +125,74 @@ static FORCE_INLINE int LZ4_decompress_generic(
111 125
112 /* get literal length */ 126 /* get literal length */
113 unsigned int const token = *ip++; 127 unsigned int const token = *ip++;
114
115 length = token>>ML_BITS; 128 length = token>>ML_BITS;
116 129
130 /* ip < iend before the increment */
131 assert(!endOnInput || ip <= iend);
132
133 /*
134 * A two-stage shortcut for the most common case:
135 * 1) If the literal length is 0..14, and there is enough
136 * space, enter the shortcut and copy 16 bytes on behalf
137 * of the literals (in the fast mode, only 8 bytes can be
138 * safely copied this way).
139 * 2) Further if the match length is 4..18, copy 18 bytes
140 * in a similar manner; but we ensure that there's enough
141 * space in the output for those 18 bytes earlier, upon
142 * entering the shortcut (in other words, there is a
143 * combined check for both stages).
144 */
145 if ((endOnInput ? length != RUN_MASK : length <= 8)
146 /*
147 * strictly "less than" on input, to re-enter
148 * the loop with at least one byte
149 */
150 && likely((endOnInput ? ip < shortiend : 1) &
151 (op <= shortoend))) {
152 /* Copy the literals */
153 memcpy(op, ip, endOnInput ? 16 : 8);
154 op += length; ip += length;
155
156 /*
157 * The second stage:
158 * prepare for match copying, decode full info.
159 * If it doesn't work out, the info won't be wasted.
160 */
161 length = token & ML_MASK; /* match length */
162 offset = LZ4_readLE16(ip);
163 ip += 2;
164 match = op - offset;
165 assert(match <= op); /* check overflow */
166
167 /* Do not deal with overlapping matches. */
168 if ((length != ML_MASK) &&
169 (offset >= 8) &&
170 (dict == withPrefix64k || match >= lowPrefix)) {
171 /* Copy the match. */
172 memcpy(op + 0, match + 0, 8);
173 memcpy(op + 8, match + 8, 8);
174 memcpy(op + 16, match + 16, 2);
175 op += length + MINMATCH;
176 /* Both stages worked, load the next token. */
177 continue;
178 }
179
180 /*
181 * The second stage didn't work out, but the info
182 * is ready. Propel it right to the point of match
183 * copying.
184 */
185 goto _copy_match;
186 }
187
188 /* decode literal length */
117 if (length == RUN_MASK) { 189 if (length == RUN_MASK) {
118 unsigned int s; 190 unsigned int s;
119 191
192 if (unlikely(endOnInput ? ip >= iend - RUN_MASK : 0)) {
193 /* overflow detection */
194 goto _output_error;
195 }
120 do { 196 do {
121 s = *ip++; 197 s = *ip++;
122 length += s; 198 length += s;
@@ -125,14 +201,14 @@ static FORCE_INLINE int LZ4_decompress_generic(
125 : 1) & (s == 255)); 201 : 1) & (s == 255));
126 202
127 if ((safeDecode) 203 if ((safeDecode)
128 && unlikely( 204 && unlikely((uptrval)(op) +
129 (size_t)(op + length) < (size_t)(op))) { 205 length < (uptrval)(op))) {
130 /* overflow detection */ 206 /* overflow detection */
131 goto _output_error; 207 goto _output_error;
132 } 208 }
133 if ((safeDecode) 209 if ((safeDecode)
134 && unlikely( 210 && unlikely((uptrval)(ip) +
135 (size_t)(ip + length) < (size_t)(ip))) { 211 length < (uptrval)(ip))) {
136 /* overflow detection */ 212 /* overflow detection */
137 goto _output_error; 213 goto _output_error;
138 } 214 }
@@ -140,16 +216,19 @@ static FORCE_INLINE int LZ4_decompress_generic(
140 216
141 /* copy literals */ 217 /* copy literals */
142 cpy = op + length; 218 cpy = op + length;
143 if (((endOnInput) && ((cpy > (partialDecoding ? oexit : oend - MFLIMIT)) 219 LZ4_STATIC_ASSERT(MFLIMIT >= WILDCOPYLENGTH);
220
221 if (((endOnInput) && ((cpy > oend - MFLIMIT)
144 || (ip + length > iend - (2 + 1 + LASTLITERALS)))) 222 || (ip + length > iend - (2 + 1 + LASTLITERALS))))
145 || ((!endOnInput) && (cpy > oend - WILDCOPYLENGTH))) { 223 || ((!endOnInput) && (cpy > oend - WILDCOPYLENGTH))) {
146 if (partialDecoding) { 224 if (partialDecoding) {
147 if (cpy > oend) { 225 if (cpy > oend) {
148 /* 226 /*
149 * Error : 227 * Partial decoding :
150 * write attempt beyond end of output buffer 228 * stop in the middle of literal segment
151 */ 229 */
152 goto _output_error; 230 cpy = oend;
231 length = oend - op;
153 } 232 }
154 if ((endOnInput) 233 if ((endOnInput)
155 && (ip + length > iend)) { 234 && (ip + length > iend)) {
@@ -184,29 +263,43 @@ static FORCE_INLINE int LZ4_decompress_generic(
184 memcpy(op, ip, length); 263 memcpy(op, ip, length);
185 ip += length; 264 ip += length;
186 op += length; 265 op += length;
266
187 /* Necessarily EOF, due to parsing restrictions */ 267 /* Necessarily EOF, due to parsing restrictions */
188 break; 268 if (!partialDecoding || (cpy == oend))
269 break;
270 } else {
271 /* may overwrite up to WILDCOPYLENGTH beyond cpy */
272 LZ4_wildCopy(op, ip, cpy);
273 ip += length;
274 op = cpy;
189 } 275 }
190 276
191 LZ4_wildCopy(op, ip, cpy);
192 ip += length;
193 op = cpy;
194
195 /* get offset */ 277 /* get offset */
196 offset = LZ4_readLE16(ip); 278 offset = LZ4_readLE16(ip);
197 ip += 2; 279 ip += 2;
198 match = op - offset; 280 match = op - offset;
199 281
200 if ((checkOffset) && (unlikely(match < lowLimit))) { 282 /* get matchlength */
283 length = token & ML_MASK;
284
285_copy_match:
286 if ((checkOffset) && (unlikely(match + dictSize < lowPrefix))) {
201 /* Error : offset outside buffers */ 287 /* Error : offset outside buffers */
202 goto _output_error; 288 goto _output_error;
203 } 289 }
204 290
205 /* costs ~1%; silence an msan warning when offset == 0 */ 291 /* costs ~1%; silence an msan warning when offset == 0 */
206 LZ4_write32(op, (U32)offset); 292 /*
293 * note : when partialDecoding, there is no guarantee that
294 * at least 4 bytes remain available in output buffer
295 */
296 if (!partialDecoding) {
297 assert(oend > op);
298 assert(oend - op >= 4);
299
300 LZ4_write32(op, (U32)offset);
301 }
207 302
208 /* get matchlength */
209 length = token & ML_MASK;
210 if (length == ML_MASK) { 303 if (length == ML_MASK) {
211 unsigned int s; 304 unsigned int s;
212 305
@@ -221,7 +314,7 @@ static FORCE_INLINE int LZ4_decompress_generic(
221 314
222 if ((safeDecode) 315 if ((safeDecode)
223 && unlikely( 316 && unlikely(
224 (size_t)(op + length) < (size_t)op)) { 317 (uptrval)(op) + length < (uptrval)op)) {
225 /* overflow detection */ 318 /* overflow detection */
226 goto _output_error; 319 goto _output_error;
227 } 320 }
@@ -229,24 +322,26 @@ static FORCE_INLINE int LZ4_decompress_generic(
229 322
230 length += MINMATCH; 323 length += MINMATCH;
231 324
232 /* check external dictionary */ 325 /* match starting within external dictionary */
233 if ((dict == usingExtDict) && (match < lowPrefix)) { 326 if ((dict == usingExtDict) && (match < lowPrefix)) {
234 if (unlikely(op + length > oend - LASTLITERALS)) { 327 if (unlikely(op + length > oend - LASTLITERALS)) {
235 /* doesn't respect parsing restriction */ 328 /* doesn't respect parsing restriction */
236 goto _output_error; 329 if (!partialDecoding)
330 goto _output_error;
331 length = min(length, (size_t)(oend - op));
237 } 332 }
238 333
239 if (length <= (size_t)(lowPrefix - match)) { 334 if (length <= (size_t)(lowPrefix - match)) {
240 /* 335 /*
241 * match can be copied as a single segment 336 * match fits entirely within external
242 * from external dictionary 337 * dictionary : just copy
243 */ 338 */
244 memmove(op, dictEnd - (lowPrefix - match), 339 memmove(op, dictEnd - (lowPrefix - match),
245 length); 340 length);
246 op += length; 341 op += length;
247 } else { 342 } else {
248 /* 343 /*
249 * match encompass external 344 * match stretches into both external
250 * dictionary and current block 345 * dictionary and current block
251 */ 346 */
252 size_t const copySize = (size_t)(lowPrefix - match); 347 size_t const copySize = (size_t)(lowPrefix - match);
@@ -254,7 +349,6 @@ static FORCE_INLINE int LZ4_decompress_generic(
254 349
255 memcpy(op, dictEnd - copySize, copySize); 350 memcpy(op, dictEnd - copySize, copySize);
256 op += copySize; 351 op += copySize;
257
258 if (restSize > (size_t)(op - lowPrefix)) { 352 if (restSize > (size_t)(op - lowPrefix)) {
259 /* overlap copy */ 353 /* overlap copy */
260 BYTE * const endOfMatch = op + restSize; 354 BYTE * const endOfMatch = op + restSize;
@@ -267,23 +361,44 @@ static FORCE_INLINE int LZ4_decompress_generic(
267 op += restSize; 361 op += restSize;
268 } 362 }
269 } 363 }
270
271 continue; 364 continue;
272 } 365 }
273 366
274 /* copy match within block */ 367 /* copy match within block */
275 cpy = op + length; 368 cpy = op + length;
276 369
277 if (unlikely(offset < 8)) { 370 /*
278 const int dec64 = dec64table[offset]; 371 * partialDecoding :
372 * may not respect endBlock parsing restrictions
373 */
374 assert(op <= oend);
375 if (partialDecoding &&
376 (cpy > oend - MATCH_SAFEGUARD_DISTANCE)) {
377 size_t const mlen = min(length, (size_t)(oend - op));
378 const BYTE * const matchEnd = match + mlen;
379 BYTE * const copyEnd = op + mlen;
380
381 if (matchEnd > op) {
382 /* overlap copy */
383 while (op < copyEnd)
384 *op++ = *match++;
385 } else {
386 memcpy(op, match, mlen);
387 }
388 op = copyEnd;
389 if (op == oend)
390 break;
391 continue;
392 }
279 393
394 if (unlikely(offset < 8)) {
280 op[0] = match[0]; 395 op[0] = match[0];
281 op[1] = match[1]; 396 op[1] = match[1];
282 op[2] = match[2]; 397 op[2] = match[2];
283 op[3] = match[3]; 398 op[3] = match[3];
284 match += dec32table[offset]; 399 match += inc32table[offset];
285 memcpy(op + 4, match, 4); 400 memcpy(op + 4, match, 4);
286 match -= dec64; 401 match -= dec64table[offset];
287 } else { 402 } else {
288 LZ4_copy8(op, match); 403 LZ4_copy8(op, match);
289 match += 8; 404 match += 8;
@@ -291,7 +406,7 @@ static FORCE_INLINE int LZ4_decompress_generic(
291 406
292 op += 8; 407 op += 8;
293 408
294 if (unlikely(cpy > oend - 12)) { 409 if (unlikely(cpy > oend - MATCH_SAFEGUARD_DISTANCE)) {
295 BYTE * const oCopyLimit = oend - (WILDCOPYLENGTH - 1); 410 BYTE * const oCopyLimit = oend - (WILDCOPYLENGTH - 1);
296 411
297 if (cpy > oend - LASTLITERALS) { 412 if (cpy > oend - LASTLITERALS) {
@@ -307,60 +422,139 @@ static FORCE_INLINE int LZ4_decompress_generic(
307 match += oCopyLimit - op; 422 match += oCopyLimit - op;
308 op = oCopyLimit; 423 op = oCopyLimit;
309 } 424 }
310
311 while (op < cpy) 425 while (op < cpy)
312 *op++ = *match++; 426 *op++ = *match++;
313 } else { 427 } else {
314 LZ4_copy8(op, match); 428 LZ4_copy8(op, match);
315
316 if (length > 16) 429 if (length > 16)
317 LZ4_wildCopy(op + 8, match + 8, cpy); 430 LZ4_wildCopy(op + 8, match + 8, cpy);
318 } 431 }
319 432 op = cpy; /* wildcopy correction */
320 op = cpy; /* correction */
321 } 433 }
322 434
323 /* end of decoding */ 435 /* end of decoding */
324 if (endOnInput) { 436 if (endOnInput) {
325 /* Nb of output bytes decoded */ 437 /* Nb of output bytes decoded */
326 return (int) (((char *)op) - dest); 438 return (int) (((char *)op) - dst);
327 } else { 439 } else {
328 /* Nb of input bytes read */ 440 /* Nb of input bytes read */
329 return (int) (((const char *)ip) - source); 441 return (int) (((const char *)ip) - src);
330 } 442 }
331 443
332 /* Overflow error detected */ 444 /* Overflow error detected */
333_output_error: 445_output_error:
334 return -1; 446 return (int) (-(((const char *)ip) - src)) - 1;
335} 447}
336 448
337int LZ4_decompress_safe(const char *source, char *dest, 449int LZ4_decompress_safe(const char *source, char *dest,
338 int compressedSize, int maxDecompressedSize) 450 int compressedSize, int maxDecompressedSize)
339{ 451{
340 return LZ4_decompress_generic(source, dest, compressedSize, 452 return LZ4_decompress_generic(source, dest,
341 maxDecompressedSize, endOnInputSize, full, 0, 453 compressedSize, maxDecompressedSize,
342 noDict, (BYTE *)dest, NULL, 0); 454 endOnInputSize, decode_full_block,
455 noDict, (BYTE *)dest, NULL, 0);
343} 456}
344 457
345int LZ4_decompress_safe_partial(const char *source, char *dest, 458int LZ4_decompress_safe_partial(const char *src, char *dst,
346 int compressedSize, int targetOutputSize, int maxDecompressedSize) 459 int compressedSize, int targetOutputSize, int dstCapacity)
347{ 460{
348 return LZ4_decompress_generic(source, dest, compressedSize, 461 dstCapacity = min(targetOutputSize, dstCapacity);
349 maxDecompressedSize, endOnInputSize, partial, 462 return LZ4_decompress_generic(src, dst, compressedSize, dstCapacity,
350 targetOutputSize, noDict, (BYTE *)dest, NULL, 0); 463 endOnInputSize, partial_decode,
464 noDict, (BYTE *)dst, NULL, 0);
351} 465}
352 466
353int LZ4_decompress_fast(const char *source, char *dest, int originalSize) 467int LZ4_decompress_fast(const char *source, char *dest, int originalSize)
354{ 468{
355 return LZ4_decompress_generic(source, dest, 0, originalSize, 469 return LZ4_decompress_generic(source, dest, 0, originalSize,
356 endOnOutputSize, full, 0, withPrefix64k, 470 endOnOutputSize, decode_full_block,
357 (BYTE *)(dest - 64 * KB), NULL, 64 * KB); 471 withPrefix64k,
472 (BYTE *)dest - 64 * KB, NULL, 0);
473}
474
475/* ===== Instantiate a few more decoding cases, used more than once. ===== */
476
477int LZ4_decompress_safe_withPrefix64k(const char *source, char *dest,
478 int compressedSize, int maxOutputSize)
479{
480 return LZ4_decompress_generic(source, dest,
481 compressedSize, maxOutputSize,
482 endOnInputSize, decode_full_block,
483 withPrefix64k,
484 (BYTE *)dest - 64 * KB, NULL, 0);
485}
486
487static int LZ4_decompress_safe_withSmallPrefix(const char *source, char *dest,
488 int compressedSize,
489 int maxOutputSize,
490 size_t prefixSize)
491{
492 return LZ4_decompress_generic(source, dest,
493 compressedSize, maxOutputSize,
494 endOnInputSize, decode_full_block,
495 noDict,
496 (BYTE *)dest - prefixSize, NULL, 0);
497}
498
499int LZ4_decompress_safe_forceExtDict(const char *source, char *dest,
500 int compressedSize, int maxOutputSize,
501 const void *dictStart, size_t dictSize)
502{
503 return LZ4_decompress_generic(source, dest,
504 compressedSize, maxOutputSize,
505 endOnInputSize, decode_full_block,
506 usingExtDict, (BYTE *)dest,
507 (const BYTE *)dictStart, dictSize);
358} 508}
359 509
510static int LZ4_decompress_fast_extDict(const char *source, char *dest,
511 int originalSize,
512 const void *dictStart, size_t dictSize)
513{
514 return LZ4_decompress_generic(source, dest,
515 0, originalSize,
516 endOnOutputSize, decode_full_block,
517 usingExtDict, (BYTE *)dest,
518 (const BYTE *)dictStart, dictSize);
519}
520
521/*
522 * The "double dictionary" mode, for use with e.g. ring buffers: the first part
523 * of the dictionary is passed as prefix, and the second via dictStart + dictSize.
524 * These routines are used only once, in LZ4_decompress_*_continue().
525 */
526static FORCE_INLINE
527int LZ4_decompress_safe_doubleDict(const char *source, char *dest,
528 int compressedSize, int maxOutputSize,
529 size_t prefixSize,
530 const void *dictStart, size_t dictSize)
531{
532 return LZ4_decompress_generic(source, dest,
533 compressedSize, maxOutputSize,
534 endOnInputSize, decode_full_block,
535 usingExtDict, (BYTE *)dest - prefixSize,
536 (const BYTE *)dictStart, dictSize);
537}
538
539static FORCE_INLINE
540int LZ4_decompress_fast_doubleDict(const char *source, char *dest,
541 int originalSize, size_t prefixSize,
542 const void *dictStart, size_t dictSize)
543{
544 return LZ4_decompress_generic(source, dest,
545 0, originalSize,
546 endOnOutputSize, decode_full_block,
547 usingExtDict, (BYTE *)dest - prefixSize,
548 (const BYTE *)dictStart, dictSize);
549}
550
551/* ===== streaming decompression functions ===== */
552
360int LZ4_setStreamDecode(LZ4_streamDecode_t *LZ4_streamDecode, 553int LZ4_setStreamDecode(LZ4_streamDecode_t *LZ4_streamDecode,
361 const char *dictionary, int dictSize) 554 const char *dictionary, int dictSize)
362{ 555{
363 LZ4_streamDecode_t_internal *lz4sd = (LZ4_streamDecode_t_internal *) LZ4_streamDecode; 556 LZ4_streamDecode_t_internal *lz4sd =
557 &LZ4_streamDecode->internal_donotuse;
364 558
365 lz4sd->prefixSize = (size_t) dictSize; 559 lz4sd->prefixSize = (size_t) dictSize;
366 lz4sd->prefixEnd = (const BYTE *) dictionary + dictSize; 560 lz4sd->prefixEnd = (const BYTE *) dictionary + dictSize;
@@ -382,35 +576,51 @@ int LZ4_setStreamDecode(LZ4_streamDecode_t *LZ4_streamDecode,
382int LZ4_decompress_safe_continue(LZ4_streamDecode_t *LZ4_streamDecode, 576int LZ4_decompress_safe_continue(LZ4_streamDecode_t *LZ4_streamDecode,
383 const char *source, char *dest, int compressedSize, int maxOutputSize) 577 const char *source, char *dest, int compressedSize, int maxOutputSize)
384{ 578{
385 LZ4_streamDecode_t_internal *lz4sd = &LZ4_streamDecode->internal_donotuse; 579 LZ4_streamDecode_t_internal *lz4sd =
580 &LZ4_streamDecode->internal_donotuse;
386 int result; 581 int result;
387 582
388 if (lz4sd->prefixEnd == (BYTE *)dest) { 583 if (lz4sd->prefixSize == 0) {
389 result = LZ4_decompress_generic(source, dest, 584 /* The first call, no dictionary yet. */
390 compressedSize, 585 assert(lz4sd->extDictSize == 0);
391 maxOutputSize, 586 result = LZ4_decompress_safe(source, dest,
392 endOnInputSize, full, 0, 587 compressedSize, maxOutputSize);
393 usingExtDict, lz4sd->prefixEnd - lz4sd->prefixSize, 588 if (result <= 0)
394 lz4sd->externalDict, 589 return result;
395 lz4sd->extDictSize); 590 lz4sd->prefixSize = result;
396 591 lz4sd->prefixEnd = (BYTE *)dest + result;
592 } else if (lz4sd->prefixEnd == (BYTE *)dest) {
593 /* They're rolling the current segment. */
594 if (lz4sd->prefixSize >= 64 * KB - 1)
595 result = LZ4_decompress_safe_withPrefix64k(source, dest,
596 compressedSize, maxOutputSize);
597 else if (lz4sd->extDictSize == 0)
598 result = LZ4_decompress_safe_withSmallPrefix(source,
599 dest, compressedSize, maxOutputSize,
600 lz4sd->prefixSize);
601 else
602 result = LZ4_decompress_safe_doubleDict(source, dest,
603 compressedSize, maxOutputSize,
604 lz4sd->prefixSize,
605 lz4sd->externalDict, lz4sd->extDictSize);
397 if (result <= 0) 606 if (result <= 0)
398 return result; 607 return result;
399
400 lz4sd->prefixSize += result; 608 lz4sd->prefixSize += result;
401 lz4sd->prefixEnd += result; 609 lz4sd->prefixEnd += result;
402 } else { 610 } else {
611 /*
612 * The buffer wraps around, or they're
613 * switching to another buffer.
614 */
403 lz4sd->extDictSize = lz4sd->prefixSize; 615 lz4sd->extDictSize = lz4sd->prefixSize;
404 lz4sd->externalDict = lz4sd->prefixEnd - lz4sd->extDictSize; 616 lz4sd->externalDict = lz4sd->prefixEnd - lz4sd->extDictSize;
405 result = LZ4_decompress_generic(source, dest, 617 result = LZ4_decompress_safe_forceExtDict(source, dest,
406 compressedSize, maxOutputSize, 618 compressedSize, maxOutputSize,
407 endOnInputSize, full, 0,
408 usingExtDict, (BYTE *)dest,
409 lz4sd->externalDict, lz4sd->extDictSize); 619 lz4sd->externalDict, lz4sd->extDictSize);
410 if (result <= 0) 620 if (result <= 0)
411 return result; 621 return result;
412 lz4sd->prefixSize = result; 622 lz4sd->prefixSize = result;
413 lz4sd->prefixEnd = (BYTE *)dest + result; 623 lz4sd->prefixEnd = (BYTE *)dest + result;
414 } 624 }
415 625
416 return result; 626 return result;
@@ -422,75 +632,66 @@ int LZ4_decompress_fast_continue(LZ4_streamDecode_t *LZ4_streamDecode,
422 LZ4_streamDecode_t_internal *lz4sd = &LZ4_streamDecode->internal_donotuse; 632 LZ4_streamDecode_t_internal *lz4sd = &LZ4_streamDecode->internal_donotuse;
423 int result; 633 int result;
424 634
425 if (lz4sd->prefixEnd == (BYTE *)dest) { 635 if (lz4sd->prefixSize == 0) {
426 result = LZ4_decompress_generic(source, dest, 0, originalSize, 636 assert(lz4sd->extDictSize == 0);
427 endOnOutputSize, full, 0, 637 result = LZ4_decompress_fast(source, dest, originalSize);
428 usingExtDict, 638 if (result <= 0)
429 lz4sd->prefixEnd - lz4sd->prefixSize, 639 return result;
430 lz4sd->externalDict, lz4sd->extDictSize); 640 lz4sd->prefixSize = originalSize;
431 641 lz4sd->prefixEnd = (BYTE *)dest + originalSize;
642 } else if (lz4sd->prefixEnd == (BYTE *)dest) {
643 if (lz4sd->prefixSize >= 64 * KB - 1 ||
644 lz4sd->extDictSize == 0)
645 result = LZ4_decompress_fast(source, dest,
646 originalSize);
647 else
648 result = LZ4_decompress_fast_doubleDict(source, dest,
649 originalSize, lz4sd->prefixSize,
650 lz4sd->externalDict, lz4sd->extDictSize);
432 if (result <= 0) 651 if (result <= 0)
433 return result; 652 return result;
434
435 lz4sd->prefixSize += originalSize; 653 lz4sd->prefixSize += originalSize;
436 lz4sd->prefixEnd += originalSize; 654 lz4sd->prefixEnd += originalSize;
437 } else { 655 } else {
438 lz4sd->extDictSize = lz4sd->prefixSize; 656 lz4sd->extDictSize = lz4sd->prefixSize;
439 lz4sd->externalDict = lz4sd->prefixEnd - lz4sd->extDictSize; 657 lz4sd->externalDict = lz4sd->prefixEnd - lz4sd->extDictSize;
440 result = LZ4_decompress_generic(source, dest, 0, originalSize, 658 result = LZ4_decompress_fast_extDict(source, dest,
441 endOnOutputSize, full, 0, 659 originalSize, lz4sd->externalDict, lz4sd->extDictSize);
442 usingExtDict, (BYTE *)dest,
443 lz4sd->externalDict, lz4sd->extDictSize);
444 if (result <= 0) 660 if (result <= 0)
445 return result; 661 return result;
446 lz4sd->prefixSize = originalSize; 662 lz4sd->prefixSize = originalSize;
447 lz4sd->prefixEnd = (BYTE *)dest + originalSize; 663 lz4sd->prefixEnd = (BYTE *)dest + originalSize;
448 } 664 }
449
450 return result; 665 return result;
451} 666}
452 667
453/* 668int LZ4_decompress_safe_usingDict(const char *source, char *dest,
454 * Advanced decoding functions : 669 int compressedSize, int maxOutputSize,
455 * *_usingDict() : 670 const char *dictStart, int dictSize)
456 * These decoding functions work the same as "_continue" ones,
457 * the dictionary must be explicitly provided within parameters
458 */
459static FORCE_INLINE int LZ4_decompress_usingDict_generic(const char *source,
460 char *dest, int compressedSize, int maxOutputSize, int safe,
461 const char *dictStart, int dictSize)
462{ 671{
463 if (dictSize == 0) 672 if (dictSize == 0)
464 return LZ4_decompress_generic(source, dest, 673 return LZ4_decompress_safe(source, dest,
465 compressedSize, maxOutputSize, safe, full, 0, 674 compressedSize, maxOutputSize);
466 noDict, (BYTE *)dest, NULL, 0); 675 if (dictStart+dictSize == dest) {
467 if (dictStart + dictSize == dest) { 676 if (dictSize >= 64 * KB - 1)
468 if (dictSize >= (int)(64 * KB - 1)) 677 return LZ4_decompress_safe_withPrefix64k(source, dest,
469 return LZ4_decompress_generic(source, dest, 678 compressedSize, maxOutputSize);
470 compressedSize, maxOutputSize, safe, full, 0, 679 return LZ4_decompress_safe_withSmallPrefix(source, dest,
471 withPrefix64k, (BYTE *)dest - 64 * KB, NULL, 0); 680 compressedSize, maxOutputSize, dictSize);
472 return LZ4_decompress_generic(source, dest, compressedSize,
473 maxOutputSize, safe, full, 0, noDict,
474 (BYTE *)dest - dictSize, NULL, 0);
475 } 681 }
476 return LZ4_decompress_generic(source, dest, compressedSize, 682 return LZ4_decompress_safe_forceExtDict(source, dest,
477 maxOutputSize, safe, full, 0, usingExtDict, 683 compressedSize, maxOutputSize, dictStart, dictSize);
478 (BYTE *)dest, (const BYTE *)dictStart, dictSize);
479}
480
481int LZ4_decompress_safe_usingDict(const char *source, char *dest,
482 int compressedSize, int maxOutputSize,
483 const char *dictStart, int dictSize)
484{
485 return LZ4_decompress_usingDict_generic(source, dest,
486 compressedSize, maxOutputSize, 1, dictStart, dictSize);
487} 684}
488 685
489int LZ4_decompress_fast_usingDict(const char *source, char *dest, 686int LZ4_decompress_fast_usingDict(const char *source, char *dest,
490 int originalSize, const char *dictStart, int dictSize) 687 int originalSize,
688 const char *dictStart, int dictSize)
491{ 689{
492 return LZ4_decompress_usingDict_generic(source, dest, 0, 690 if (dictSize == 0 || dictStart + dictSize == dest)
493 originalSize, 0, dictStart, dictSize); 691 return LZ4_decompress_fast(source, dest, originalSize);
692
693 return LZ4_decompress_fast_extDict(source, dest, originalSize,
694 dictStart, dictSize);
494} 695}
495 696
496#ifndef STATIC 697#ifndef STATIC
diff --git a/lib/lz4/lz4defs.h b/lib/lz4/lz4defs.h
index 00a0b58a0871..1a7fa9d9170f 100644
--- a/lib/lz4/lz4defs.h
+++ b/lib/lz4/lz4defs.h
@@ -75,6 +75,11 @@ typedef uintptr_t uptrval;
75#define WILDCOPYLENGTH 8 75#define WILDCOPYLENGTH 8
76#define LASTLITERALS 5 76#define LASTLITERALS 5
77#define MFLIMIT (WILDCOPYLENGTH + MINMATCH) 77#define MFLIMIT (WILDCOPYLENGTH + MINMATCH)
78/*
79 * ensure it's possible to write 2 x wildcopyLength
80 * without overflowing output buffer
81 */
82#define MATCH_SAFEGUARD_DISTANCE ((2 * WILDCOPYLENGTH) - MINMATCH)
78 83
79/* Increase this value ==> compression run slower on incompressible data */ 84/* Increase this value ==> compression run slower on incompressible data */
80#define LZ4_SKIPTRIGGER 6 85#define LZ4_SKIPTRIGGER 6
@@ -222,6 +227,8 @@ typedef enum { noDict = 0, withPrefix64k, usingExtDict } dict_directive;
222typedef enum { noDictIssue = 0, dictSmall } dictIssue_directive; 227typedef enum { noDictIssue = 0, dictSmall } dictIssue_directive;
223 228
224typedef enum { endOnOutputSize = 0, endOnInputSize = 1 } endCondition_directive; 229typedef enum { endOnOutputSize = 0, endOnInputSize = 1 } endCondition_directive;
225typedef enum { full = 0, partial = 1 } earlyEnd_directive; 230typedef enum { decode_full_block = 0, partial_decode = 1 } earlyEnd_directive;
231
232#define LZ4_STATIC_ASSERT(c) BUILD_BUG_ON(!(c))
226 233
227#endif 234#endif
diff --git a/lib/parser.c b/lib/parser.c
index 3278958b472a..dd70e5e6c9e2 100644
--- a/lib/parser.c
+++ b/lib/parser.c
@@ -131,13 +131,10 @@ static int match_number(substring_t *s, int *result, int base)
131 char *buf; 131 char *buf;
132 int ret; 132 int ret;
133 long val; 133 long val;
134 size_t len = s->to - s->from;
135 134
136 buf = kmalloc(len + 1, GFP_KERNEL); 135 buf = match_strdup(s);
137 if (!buf) 136 if (!buf)
138 return -ENOMEM; 137 return -ENOMEM;
139 memcpy(buf, s->from, len);
140 buf[len] = '\0';
141 138
142 ret = 0; 139 ret = 0;
143 val = simple_strtol(buf, &endp, base); 140 val = simple_strtol(buf, &endp, base);
@@ -166,13 +163,10 @@ static int match_u64int(substring_t *s, u64 *result, int base)
166 char *buf; 163 char *buf;
167 int ret; 164 int ret;
168 u64 val; 165 u64 val;
169 size_t len = s->to - s->from;
170 166
171 buf = kmalloc(len + 1, GFP_KERNEL); 167 buf = match_strdup(s);
172 if (!buf) 168 if (!buf)
173 return -ENOMEM; 169 return -ENOMEM;
174 memcpy(buf, s->from, len);
175 buf[len] = '\0';
176 170
177 ret = kstrtoull(buf, base, &val); 171 ret = kstrtoull(buf, base, &val);
178 if (!ret) 172 if (!ret)
@@ -327,10 +321,6 @@ EXPORT_SYMBOL(match_strlcpy);
327 */ 321 */
328char *match_strdup(const substring_t *s) 322char *match_strdup(const substring_t *s)
329{ 323{
330 size_t sz = s->to - s->from + 1; 324 return kmemdup_nul(s->from, s->to - s->from, GFP_KERNEL);
331 char *p = kmalloc(sz, GFP_KERNEL);
332 if (p)
333 match_strlcpy(p, s, sz);
334 return p;
335} 325}
336EXPORT_SYMBOL(match_strdup); 326EXPORT_SYMBOL(match_strdup);
diff --git a/lib/sg_pool.c b/lib/sg_pool.c
index 6dd30615a201..d1c1e6388eaa 100644
--- a/lib/sg_pool.c
+++ b/lib/sg_pool.c
@@ -148,10 +148,9 @@ static __init int sg_pool_init(void)
148cleanup_sdb: 148cleanup_sdb:
149 for (i = 0; i < SG_MEMPOOL_NR; i++) { 149 for (i = 0; i < SG_MEMPOOL_NR; i++) {
150 struct sg_pool *sgp = sg_pools + i; 150 struct sg_pool *sgp = sg_pools + i;
151 if (sgp->pool) 151
152 mempool_destroy(sgp->pool); 152 mempool_destroy(sgp->pool);
153 if (sgp->slab) 153 kmem_cache_destroy(sgp->slab);
154 kmem_cache_destroy(sgp->slab);
155 } 154 }
156 155
157 return -ENOMEM; 156 return -ENOMEM;
diff --git a/lib/zlib_inflate/inflate.c b/lib/zlib_inflate/inflate.c
index 58a733b10387..48f14cd58c77 100644
--- a/lib/zlib_inflate/inflate.c
+++ b/lib/zlib_inflate/inflate.c
@@ -382,6 +382,7 @@ int zlib_inflate(z_streamp strm, int flush)
382 strm->adler = state->check = REVERSE(hold); 382 strm->adler = state->check = REVERSE(hold);
383 INITBITS(); 383 INITBITS();
384 state->mode = DICT; 384 state->mode = DICT;
385 /* fall through */
385 case DICT: 386 case DICT:
386 if (state->havedict == 0) { 387 if (state->havedict == 0) {
387 RESTORE(); 388 RESTORE();
@@ -389,8 +390,10 @@ int zlib_inflate(z_streamp strm, int flush)
389 } 390 }
390 strm->adler = state->check = zlib_adler32(0L, NULL, 0); 391 strm->adler = state->check = zlib_adler32(0L, NULL, 0);
391 state->mode = TYPE; 392 state->mode = TYPE;
393 /* fall through */
392 case TYPE: 394 case TYPE:
393 if (flush == Z_BLOCK) goto inf_leave; 395 if (flush == Z_BLOCK) goto inf_leave;
396 /* fall through */
394 case TYPEDO: 397 case TYPEDO:
395 if (state->last) { 398 if (state->last) {
396 BYTEBITS(); 399 BYTEBITS();
@@ -428,6 +431,7 @@ int zlib_inflate(z_streamp strm, int flush)
428 state->length = (unsigned)hold & 0xffff; 431 state->length = (unsigned)hold & 0xffff;
429 INITBITS(); 432 INITBITS();
430 state->mode = COPY; 433 state->mode = COPY;
434 /* fall through */
431 case COPY: 435 case COPY:
432 copy = state->length; 436 copy = state->length;
433 if (copy) { 437 if (copy) {
@@ -461,6 +465,7 @@ int zlib_inflate(z_streamp strm, int flush)
461#endif 465#endif
462 state->have = 0; 466 state->have = 0;
463 state->mode = LENLENS; 467 state->mode = LENLENS;
468 /* fall through */
464 case LENLENS: 469 case LENLENS:
465 while (state->have < state->ncode) { 470 while (state->have < state->ncode) {
466 NEEDBITS(3); 471 NEEDBITS(3);
@@ -481,6 +486,7 @@ int zlib_inflate(z_streamp strm, int flush)
481 } 486 }
482 state->have = 0; 487 state->have = 0;
483 state->mode = CODELENS; 488 state->mode = CODELENS;
489 /* fall through */
484 case CODELENS: 490 case CODELENS:
485 while (state->have < state->nlen + state->ndist) { 491 while (state->have < state->nlen + state->ndist) {
486 for (;;) { 492 for (;;) {
@@ -554,6 +560,7 @@ int zlib_inflate(z_streamp strm, int flush)
554 break; 560 break;
555 } 561 }
556 state->mode = LEN; 562 state->mode = LEN;
563 /* fall through */
557 case LEN: 564 case LEN:
558 if (have >= 6 && left >= 258) { 565 if (have >= 6 && left >= 258) {
559 RESTORE(); 566 RESTORE();
@@ -593,6 +600,7 @@ int zlib_inflate(z_streamp strm, int flush)
593 } 600 }
594 state->extra = (unsigned)(this.op) & 15; 601 state->extra = (unsigned)(this.op) & 15;
595 state->mode = LENEXT; 602 state->mode = LENEXT;
603 /* fall through */
596 case LENEXT: 604 case LENEXT:
597 if (state->extra) { 605 if (state->extra) {
598 NEEDBITS(state->extra); 606 NEEDBITS(state->extra);
@@ -600,6 +608,7 @@ int zlib_inflate(z_streamp strm, int flush)
600 DROPBITS(state->extra); 608 DROPBITS(state->extra);
601 } 609 }
602 state->mode = DIST; 610 state->mode = DIST;
611 /* fall through */
603 case DIST: 612 case DIST:
604 for (;;) { 613 for (;;) {
605 this = state->distcode[BITS(state->distbits)]; 614 this = state->distcode[BITS(state->distbits)];
@@ -625,6 +634,7 @@ int zlib_inflate(z_streamp strm, int flush)
625 state->offset = (unsigned)this.val; 634 state->offset = (unsigned)this.val;
626 state->extra = (unsigned)(this.op) & 15; 635 state->extra = (unsigned)(this.op) & 15;
627 state->mode = DISTEXT; 636 state->mode = DISTEXT;
637 /* fall through */
628 case DISTEXT: 638 case DISTEXT:
629 if (state->extra) { 639 if (state->extra) {
630 NEEDBITS(state->extra); 640 NEEDBITS(state->extra);
@@ -644,6 +654,7 @@ int zlib_inflate(z_streamp strm, int flush)
644 break; 654 break;
645 } 655 }
646 state->mode = MATCH; 656 state->mode = MATCH;
657 /* fall through */
647 case MATCH: 658 case MATCH:
648 if (left == 0) goto inf_leave; 659 if (left == 0) goto inf_leave;
649 copy = out - left; 660 copy = out - left;
@@ -694,6 +705,7 @@ int zlib_inflate(z_streamp strm, int flush)
694 INITBITS(); 705 INITBITS();
695 } 706 }
696 state->mode = DONE; 707 state->mode = DONE;
708 /* fall through */
697 case DONE: 709 case DONE:
698 ret = Z_STREAM_END; 710 ret = Z_STREAM_END;
699 goto inf_leave; 711 goto inf_leave;