aboutsummaryrefslogtreecommitdiffstats
path: root/lib
diff options
context:
space:
mode:
Diffstat (limited to 'lib')
-rw-r--r--lib/Kconfig15
-rw-r--r--lib/Kconfig.debug4
-rw-r--r--lib/Makefile6
-rw-r--r--lib/clz_ctz.c58
-rw-r--r--lib/crc-t10dif.c73
-rw-r--r--lib/decompress.c5
-rw-r--r--lib/decompress_unlz4.c187
-rw-r--r--lib/earlycpio.c2
-rw-r--r--lib/llist.c15
-rw-r--r--lib/locking-selftest.c1
-rw-r--r--lib/lz4/Makefile3
-rw-r--r--lib/lz4/lz4_compress.c443
-rw-r--r--lib/lz4/lz4_decompress.c326
-rw-r--r--lib/lz4/lz4defs.h156
-rw-r--r--lib/lz4/lz4hc_compress.c539
-rw-r--r--lib/mpi/longlong.h17
-rw-r--r--lib/percpu_counter.c2
-rw-r--r--lib/scatterlist.c133
-rw-r--r--lib/vsprintf.c124
19 files changed, 2028 insertions, 81 deletions
diff --git a/lib/Kconfig b/lib/Kconfig
index f1ed53c3aa44..71d9f81f6eed 100644
--- a/lib/Kconfig
+++ b/lib/Kconfig
@@ -66,8 +66,6 @@ config CRC16
66 66
67config CRC_T10DIF 67config CRC_T10DIF
68 tristate "CRC calculation for the T10 Data Integrity Field" 68 tristate "CRC calculation for the T10 Data Integrity Field"
69 select CRYPTO
70 select CRYPTO_CRCT10DIF
71 help 69 help
72 This option is only needed if a module that's not in the 70 This option is only needed if a module that's not in the
73 kernel tree needs to calculate CRC checks for use with the 71 kernel tree needs to calculate CRC checks for use with the
@@ -194,6 +192,15 @@ config LZO_COMPRESS
194config LZO_DECOMPRESS 192config LZO_DECOMPRESS
195 tristate 193 tristate
196 194
195config LZ4_COMPRESS
196 tristate
197
198config LZ4HC_COMPRESS
199 tristate
200
201config LZ4_DECOMPRESS
202 tristate
203
197source "lib/xz/Kconfig" 204source "lib/xz/Kconfig"
198 205
199# 206#
@@ -218,6 +225,10 @@ config DECOMPRESS_LZO
218 select LZO_DECOMPRESS 225 select LZO_DECOMPRESS
219 tristate 226 tristate
220 227
228config DECOMPRESS_LZ4
229 select LZ4_DECOMPRESS
230 tristate
231
221# 232#
222# Generic allocator support is selected if needed 233# Generic allocator support is selected if needed
223# 234#
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
index 88c8d9876702..1501aa553221 100644
--- a/lib/Kconfig.debug
+++ b/lib/Kconfig.debug
@@ -238,7 +238,7 @@ config DEBUG_SECTION_MISMATCH
238 any use of code/data previously in these sections would 238 any use of code/data previously in these sections would
239 most likely result in an oops. 239 most likely result in an oops.
240 In the code, functions and variables are annotated with 240 In the code, functions and variables are annotated with
241 __init, __cpuinit, etc. (see the full list in include/linux/init.h), 241 __init,, etc. (see the full list in include/linux/init.h),
242 which results in the code/data being placed in specific sections. 242 which results in the code/data being placed in specific sections.
243 The section mismatch analysis is always performed after a full 243 The section mismatch analysis is always performed after a full
244 kernel build, and enabling this option causes the following 244 kernel build, and enabling this option causes the following
@@ -1347,7 +1347,7 @@ config FAULT_INJECTION_STACKTRACE_FILTER
1347 depends on FAULT_INJECTION_DEBUG_FS && STACKTRACE_SUPPORT 1347 depends on FAULT_INJECTION_DEBUG_FS && STACKTRACE_SUPPORT
1348 depends on !X86_64 1348 depends on !X86_64
1349 select STACKTRACE 1349 select STACKTRACE
1350 select FRAME_POINTER if !PPC && !S390 && !MICROBLAZE && !ARM_UNWIND 1350 select FRAME_POINTER if !MIPS && !PPC && !S390 && !MICROBLAZE && !ARM_UNWIND
1351 help 1351 help
1352 Provide stacktrace filter for fault-injection capabilities 1352 Provide stacktrace filter for fault-injection capabilities
1353 1353
diff --git a/lib/Makefile b/lib/Makefile
index c09e38eca87a..7baccfd8a4e9 100644
--- a/lib/Makefile
+++ b/lib/Makefile
@@ -23,7 +23,7 @@ lib-y += kobject.o klist.o
23 23
24obj-y += bcd.o div64.o sort.o parser.o halfmd4.o debug_locks.o random32.o \ 24obj-y += bcd.o div64.o sort.o parser.o halfmd4.o debug_locks.o random32.o \
25 bust_spinlocks.o hexdump.o kasprintf.o bitmap.o scatterlist.o \ 25 bust_spinlocks.o hexdump.o kasprintf.o bitmap.o scatterlist.o \
26 gcd.o lcm.o list_sort.o uuid.o flex_array.o iovec.o \ 26 gcd.o lcm.o list_sort.o uuid.o flex_array.o iovec.o clz_ctz.o \
27 bsearch.o find_last_bit.o find_next_bit.o llist.o memweight.o kfifo.o 27 bsearch.o find_last_bit.o find_next_bit.o llist.o memweight.o kfifo.o
28obj-y += string_helpers.o 28obj-y += string_helpers.o
29obj-$(CONFIG_TEST_STRING_HELPERS) += test-string_helpers.o 29obj-$(CONFIG_TEST_STRING_HELPERS) += test-string_helpers.o
@@ -75,6 +75,9 @@ obj-$(CONFIG_REED_SOLOMON) += reed_solomon/
75obj-$(CONFIG_BCH) += bch.o 75obj-$(CONFIG_BCH) += bch.o
76obj-$(CONFIG_LZO_COMPRESS) += lzo/ 76obj-$(CONFIG_LZO_COMPRESS) += lzo/
77obj-$(CONFIG_LZO_DECOMPRESS) += lzo/ 77obj-$(CONFIG_LZO_DECOMPRESS) += lzo/
78obj-$(CONFIG_LZ4_COMPRESS) += lz4/
79obj-$(CONFIG_LZ4HC_COMPRESS) += lz4/
80obj-$(CONFIG_LZ4_DECOMPRESS) += lz4/
78obj-$(CONFIG_XZ_DEC) += xz/ 81obj-$(CONFIG_XZ_DEC) += xz/
79obj-$(CONFIG_RAID6_PQ) += raid6/ 82obj-$(CONFIG_RAID6_PQ) += raid6/
80 83
@@ -83,6 +86,7 @@ lib-$(CONFIG_DECOMPRESS_BZIP2) += decompress_bunzip2.o
83lib-$(CONFIG_DECOMPRESS_LZMA) += decompress_unlzma.o 86lib-$(CONFIG_DECOMPRESS_LZMA) += decompress_unlzma.o
84lib-$(CONFIG_DECOMPRESS_XZ) += decompress_unxz.o 87lib-$(CONFIG_DECOMPRESS_XZ) += decompress_unxz.o
85lib-$(CONFIG_DECOMPRESS_LZO) += decompress_unlzo.o 88lib-$(CONFIG_DECOMPRESS_LZO) += decompress_unlzo.o
89lib-$(CONFIG_DECOMPRESS_LZ4) += decompress_unlz4.o
86 90
87obj-$(CONFIG_TEXTSEARCH) += textsearch.o 91obj-$(CONFIG_TEXTSEARCH) += textsearch.o
88obj-$(CONFIG_TEXTSEARCH_KMP) += ts_kmp.o 92obj-$(CONFIG_TEXTSEARCH_KMP) += ts_kmp.o
diff --git a/lib/clz_ctz.c b/lib/clz_ctz.c
new file mode 100644
index 000000000000..a8f8379eb49f
--- /dev/null
+++ b/lib/clz_ctz.c
@@ -0,0 +1,58 @@
1/*
2 * lib/clz_ctz.c
3 *
4 * Copyright (C) 2013 Chanho Min <chanho.min@lge.com>
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 *
10 * __c[lt]z[sd]i2 can be overridden by linking arch-specific versions.
11 */
12
13#include <linux/export.h>
14#include <linux/kernel.h>
15
16int __weak __ctzsi2(int val)
17{
18 return __ffs(val);
19}
20EXPORT_SYMBOL(__ctzsi2);
21
22int __weak __clzsi2(int val)
23{
24 return 32 - fls(val);
25}
26EXPORT_SYMBOL(__clzsi2);
27
28#if BITS_PER_LONG == 32
29
30int __weak __clzdi2(long val)
31{
32 return 32 - fls((int)val);
33}
34EXPORT_SYMBOL(__clzdi2);
35
36int __weak __ctzdi2(long val)
37{
38 return __ffs((u32)val);
39}
40EXPORT_SYMBOL(__ctzdi2);
41
42#elif BITS_PER_LONG == 64
43
44int __weak __clzdi2(long val)
45{
46 return 64 - fls64((u64)val);
47}
48EXPORT_SYMBOL(__clzdi2);
49
50int __weak __ctzdi2(long val)
51{
52 return __ffs64((u64)val);
53}
54EXPORT_SYMBOL(__ctzdi2);
55
56#else
57#error BITS_PER_LONG not 32 or 64
58#endif
diff --git a/lib/crc-t10dif.c b/lib/crc-t10dif.c
index fe3428c07b47..fbbd66ed86cd 100644
--- a/lib/crc-t10dif.c
+++ b/lib/crc-t10dif.c
@@ -11,44 +11,57 @@
11#include <linux/types.h> 11#include <linux/types.h>
12#include <linux/module.h> 12#include <linux/module.h>
13#include <linux/crc-t10dif.h> 13#include <linux/crc-t10dif.h>
14#include <linux/err.h>
15#include <linux/init.h>
16#include <crypto/hash.h>
17 14
18static struct crypto_shash *crct10dif_tfm; 15/* Table generated using the following polynomium:
16 * x^16 + x^15 + x^11 + x^9 + x^8 + x^7 + x^5 + x^4 + x^2 + x + 1
17 * gt: 0x8bb7
18 */
19static const __u16 t10_dif_crc_table[256] = {
20 0x0000, 0x8BB7, 0x9CD9, 0x176E, 0xB205, 0x39B2, 0x2EDC, 0xA56B,
21 0xEFBD, 0x640A, 0x7364, 0xF8D3, 0x5DB8, 0xD60F, 0xC161, 0x4AD6,
22 0x54CD, 0xDF7A, 0xC814, 0x43A3, 0xE6C8, 0x6D7F, 0x7A11, 0xF1A6,
23 0xBB70, 0x30C7, 0x27A9, 0xAC1E, 0x0975, 0x82C2, 0x95AC, 0x1E1B,
24 0xA99A, 0x222D, 0x3543, 0xBEF4, 0x1B9F, 0x9028, 0x8746, 0x0CF1,
25 0x4627, 0xCD90, 0xDAFE, 0x5149, 0xF422, 0x7F95, 0x68FB, 0xE34C,
26 0xFD57, 0x76E0, 0x618E, 0xEA39, 0x4F52, 0xC4E5, 0xD38B, 0x583C,
27 0x12EA, 0x995D, 0x8E33, 0x0584, 0xA0EF, 0x2B58, 0x3C36, 0xB781,
28 0xD883, 0x5334, 0x445A, 0xCFED, 0x6A86, 0xE131, 0xF65F, 0x7DE8,
29 0x373E, 0xBC89, 0xABE7, 0x2050, 0x853B, 0x0E8C, 0x19E2, 0x9255,
30 0x8C4E, 0x07F9, 0x1097, 0x9B20, 0x3E4B, 0xB5FC, 0xA292, 0x2925,
31 0x63F3, 0xE844, 0xFF2A, 0x749D, 0xD1F6, 0x5A41, 0x4D2F, 0xC698,
32 0x7119, 0xFAAE, 0xEDC0, 0x6677, 0xC31C, 0x48AB, 0x5FC5, 0xD472,
33 0x9EA4, 0x1513, 0x027D, 0x89CA, 0x2CA1, 0xA716, 0xB078, 0x3BCF,
34 0x25D4, 0xAE63, 0xB90D, 0x32BA, 0x97D1, 0x1C66, 0x0B08, 0x80BF,
35 0xCA69, 0x41DE, 0x56B0, 0xDD07, 0x786C, 0xF3DB, 0xE4B5, 0x6F02,
36 0x3AB1, 0xB106, 0xA668, 0x2DDF, 0x88B4, 0x0303, 0x146D, 0x9FDA,
37 0xD50C, 0x5EBB, 0x49D5, 0xC262, 0x6709, 0xECBE, 0xFBD0, 0x7067,
38 0x6E7C, 0xE5CB, 0xF2A5, 0x7912, 0xDC79, 0x57CE, 0x40A0, 0xCB17,
39 0x81C1, 0x0A76, 0x1D18, 0x96AF, 0x33C4, 0xB873, 0xAF1D, 0x24AA,
40 0x932B, 0x189C, 0x0FF2, 0x8445, 0x212E, 0xAA99, 0xBDF7, 0x3640,
41 0x7C96, 0xF721, 0xE04F, 0x6BF8, 0xCE93, 0x4524, 0x524A, 0xD9FD,
42 0xC7E6, 0x4C51, 0x5B3F, 0xD088, 0x75E3, 0xFE54, 0xE93A, 0x628D,
43 0x285B, 0xA3EC, 0xB482, 0x3F35, 0x9A5E, 0x11E9, 0x0687, 0x8D30,
44 0xE232, 0x6985, 0x7EEB, 0xF55C, 0x5037, 0xDB80, 0xCCEE, 0x4759,
45 0x0D8F, 0x8638, 0x9156, 0x1AE1, 0xBF8A, 0x343D, 0x2353, 0xA8E4,
46 0xB6FF, 0x3D48, 0x2A26, 0xA191, 0x04FA, 0x8F4D, 0x9823, 0x1394,
47 0x5942, 0xD2F5, 0xC59B, 0x4E2C, 0xEB47, 0x60F0, 0x779E, 0xFC29,
48 0x4BA8, 0xC01F, 0xD771, 0x5CC6, 0xF9AD, 0x721A, 0x6574, 0xEEC3,
49 0xA415, 0x2FA2, 0x38CC, 0xB37B, 0x1610, 0x9DA7, 0x8AC9, 0x017E,
50 0x1F65, 0x94D2, 0x83BC, 0x080B, 0xAD60, 0x26D7, 0x31B9, 0xBA0E,
51 0xF0D8, 0x7B6F, 0x6C01, 0xE7B6, 0x42DD, 0xC96A, 0xDE04, 0x55B3
52};
19 53
20__u16 crc_t10dif(const unsigned char *buffer, size_t len) 54__u16 crc_t10dif(const unsigned char *buffer, size_t len)
21{ 55{
22 struct { 56 __u16 crc = 0;
23 struct shash_desc shash; 57 unsigned int i;
24 char ctx[2];
25 } desc;
26 int err;
27
28 desc.shash.tfm = crct10dif_tfm;
29 desc.shash.flags = 0;
30 *(__u16 *)desc.ctx = 0;
31 58
32 err = crypto_shash_update(&desc.shash, buffer, len); 59 for (i = 0 ; i < len ; i++)
33 BUG_ON(err); 60 crc = (crc << 8) ^ t10_dif_crc_table[((crc >> 8) ^ buffer[i]) & 0xff];
34 61
35 return *(__u16 *)desc.ctx; 62 return crc;
36} 63}
37EXPORT_SYMBOL(crc_t10dif); 64EXPORT_SYMBOL(crc_t10dif);
38 65
39static int __init crc_t10dif_mod_init(void)
40{
41 crct10dif_tfm = crypto_alloc_shash("crct10dif", 0, 0);
42 return PTR_RET(crct10dif_tfm);
43}
44
45static void __exit crc_t10dif_mod_fini(void)
46{
47 crypto_free_shash(crct10dif_tfm);
48}
49
50module_init(crc_t10dif_mod_init);
51module_exit(crc_t10dif_mod_fini);
52
53MODULE_DESCRIPTION("T10 DIF CRC calculation"); 66MODULE_DESCRIPTION("T10 DIF CRC calculation");
54MODULE_LICENSE("GPL"); 67MODULE_LICENSE("GPL");
diff --git a/lib/decompress.c b/lib/decompress.c
index f8fdedaf7b3d..4d1cd0397aab 100644
--- a/lib/decompress.c
+++ b/lib/decompress.c
@@ -11,6 +11,7 @@
11#include <linux/decompress/unxz.h> 11#include <linux/decompress/unxz.h>
12#include <linux/decompress/inflate.h> 12#include <linux/decompress/inflate.h>
13#include <linux/decompress/unlzo.h> 13#include <linux/decompress/unlzo.h>
14#include <linux/decompress/unlz4.h>
14 15
15#include <linux/types.h> 16#include <linux/types.h>
16#include <linux/string.h> 17#include <linux/string.h>
@@ -31,6 +32,9 @@
31#ifndef CONFIG_DECOMPRESS_LZO 32#ifndef CONFIG_DECOMPRESS_LZO
32# define unlzo NULL 33# define unlzo NULL
33#endif 34#endif
35#ifndef CONFIG_DECOMPRESS_LZ4
36# define unlz4 NULL
37#endif
34 38
35struct compress_format { 39struct compress_format {
36 unsigned char magic[2]; 40 unsigned char magic[2];
@@ -45,6 +49,7 @@ static const struct compress_format compressed_formats[] __initconst = {
45 { {0x5d, 0x00}, "lzma", unlzma }, 49 { {0x5d, 0x00}, "lzma", unlzma },
46 { {0xfd, 0x37}, "xz", unxz }, 50 { {0xfd, 0x37}, "xz", unxz },
47 { {0x89, 0x4c}, "lzo", unlzo }, 51 { {0x89, 0x4c}, "lzo", unlzo },
52 { {0x02, 0x21}, "lz4", unlz4 },
48 { {0, 0}, NULL, NULL } 53 { {0, 0}, NULL, NULL }
49}; 54};
50 55
diff --git a/lib/decompress_unlz4.c b/lib/decompress_unlz4.c
new file mode 100644
index 000000000000..3e67cfad16ad
--- /dev/null
+++ b/lib/decompress_unlz4.c
@@ -0,0 +1,187 @@
1/*
2 * Wrapper for decompressing LZ4-compressed kernel, initramfs, and initrd
3 *
4 * Copyright (C) 2013, LG Electronics, Kyungsik Lee <kyungsik.lee@lge.com>
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 */
10
11#ifdef STATIC
12#define PREBOOT
13#include "lz4/lz4_decompress.c"
14#else
15#include <linux/decompress/unlz4.h>
16#endif
17#include <linux/types.h>
18#include <linux/lz4.h>
19#include <linux/decompress/mm.h>
20#include <linux/compiler.h>
21
22#include <asm/unaligned.h>
23
24/*
25 * Note: Uncompressed chunk size is used in the compressor side
26 * (userspace side for compression).
27 * It is hardcoded because there is not proper way to extract it
28 * from the binary stream which is generated by the preliminary
29 * version of LZ4 tool so far.
30 */
31#define LZ4_DEFAULT_UNCOMPRESSED_CHUNK_SIZE (8 << 20)
32#define ARCHIVE_MAGICNUMBER 0x184C2102
33
34STATIC inline int INIT unlz4(u8 *input, int in_len,
35 int (*fill) (void *, unsigned int),
36 int (*flush) (void *, unsigned int),
37 u8 *output, int *posp,
38 void (*error) (char *x))
39{
40 int ret = -1;
41 size_t chunksize = 0;
42 size_t uncomp_chunksize = LZ4_DEFAULT_UNCOMPRESSED_CHUNK_SIZE;
43 u8 *inp;
44 u8 *inp_start;
45 u8 *outp;
46 int size = in_len;
47#ifdef PREBOOT
48 size_t out_len = get_unaligned_le32(input + in_len);
49#endif
50 size_t dest_len;
51
52
53 if (output) {
54 outp = output;
55 } else if (!flush) {
56 error("NULL output pointer and no flush function provided");
57 goto exit_0;
58 } else {
59 outp = large_malloc(uncomp_chunksize);
60 if (!outp) {
61 error("Could not allocate output buffer");
62 goto exit_0;
63 }
64 }
65
66 if (input && fill) {
67 error("Both input pointer and fill function provided,");
68 goto exit_1;
69 } else if (input) {
70 inp = input;
71 } else if (!fill) {
72 error("NULL input pointer and missing fill function");
73 goto exit_1;
74 } else {
75 inp = large_malloc(lz4_compressbound(uncomp_chunksize));
76 if (!inp) {
77 error("Could not allocate input buffer");
78 goto exit_1;
79 }
80 }
81 inp_start = inp;
82
83 if (posp)
84 *posp = 0;
85
86 if (fill)
87 fill(inp, 4);
88
89 chunksize = get_unaligned_le32(inp);
90 if (chunksize == ARCHIVE_MAGICNUMBER) {
91 inp += 4;
92 size -= 4;
93 } else {
94 error("invalid header");
95 goto exit_2;
96 }
97
98 if (posp)
99 *posp += 4;
100
101 for (;;) {
102
103 if (fill)
104 fill(inp, 4);
105
106 chunksize = get_unaligned_le32(inp);
107 if (chunksize == ARCHIVE_MAGICNUMBER) {
108 inp += 4;
109 size -= 4;
110 if (posp)
111 *posp += 4;
112 continue;
113 }
114 inp += 4;
115 size -= 4;
116
117 if (posp)
118 *posp += 4;
119
120 if (fill) {
121 if (chunksize > lz4_compressbound(uncomp_chunksize)) {
122 error("chunk length is longer than allocated");
123 goto exit_2;
124 }
125 fill(inp, chunksize);
126 }
127#ifdef PREBOOT
128 if (out_len >= uncomp_chunksize) {
129 dest_len = uncomp_chunksize;
130 out_len -= dest_len;
131 } else
132 dest_len = out_len;
133 ret = lz4_decompress(inp, &chunksize, outp, dest_len);
134#else
135 dest_len = uncomp_chunksize;
136 ret = lz4_decompress_unknownoutputsize(inp, chunksize, outp,
137 &dest_len);
138#endif
139 if (ret < 0) {
140 error("Decoding failed");
141 goto exit_2;
142 }
143
144 if (flush && flush(outp, dest_len) != dest_len)
145 goto exit_2;
146 if (output)
147 outp += dest_len;
148 if (posp)
149 *posp += chunksize;
150
151 size -= chunksize;
152
153 if (size == 0)
154 break;
155 else if (size < 0) {
156 error("data corrupted");
157 goto exit_2;
158 }
159
160 inp += chunksize;
161 if (fill)
162 inp = inp_start;
163 }
164
165 ret = 0;
166exit_2:
167 if (!input)
168 large_free(inp_start);
169exit_1:
170 if (!output)
171 large_free(outp);
172exit_0:
173 return ret;
174}
175
176#ifdef PREBOOT
177STATIC int INIT decompress(unsigned char *buf, int in_len,
178 int(*fill)(void*, unsigned int),
179 int(*flush)(void*, unsigned int),
180 unsigned char *output,
181 int *posp,
182 void(*error)(char *x)
183 )
184{
185 return unlz4(buf, in_len - 4, fill, flush, output, posp, error);
186}
187#endif
diff --git a/lib/earlycpio.c b/lib/earlycpio.c
index 8078ef49cb79..7aa7ce250c94 100644
--- a/lib/earlycpio.c
+++ b/lib/earlycpio.c
@@ -63,7 +63,7 @@ enum cpio_fields {
63 * the match returned an empty filename string. 63 * the match returned an empty filename string.
64 */ 64 */
65 65
66struct cpio_data __cpuinit find_cpio_data(const char *path, void *data, 66struct cpio_data find_cpio_data(const char *path, void *data,
67 size_t len, long *offset) 67 size_t len, long *offset)
68{ 68{
69 const size_t cpio_header_len = 8*C_NFIELDS - 2; 69 const size_t cpio_header_len = 8*C_NFIELDS - 2;
diff --git a/lib/llist.c b/lib/llist.c
index 4a15115e90f8..4a70d120138c 100644
--- a/lib/llist.c
+++ b/lib/llist.c
@@ -39,18 +39,13 @@
39bool llist_add_batch(struct llist_node *new_first, struct llist_node *new_last, 39bool llist_add_batch(struct llist_node *new_first, struct llist_node *new_last,
40 struct llist_head *head) 40 struct llist_head *head)
41{ 41{
42 struct llist_node *entry, *old_entry; 42 struct llist_node *first;
43 43
44 entry = head->first; 44 do {
45 for (;;) { 45 new_last->next = first = ACCESS_ONCE(head->first);
46 old_entry = entry; 46 } while (cmpxchg(&head->first, first, new_first) != first);
47 new_last->next = entry;
48 entry = cmpxchg(&head->first, old_entry, new_first);
49 if (entry == old_entry)
50 break;
51 }
52 47
53 return old_entry == NULL; 48 return !first;
54} 49}
55EXPORT_SYMBOL_GPL(llist_add_batch); 50EXPORT_SYMBOL_GPL(llist_add_batch);
56 51
diff --git a/lib/locking-selftest.c b/lib/locking-selftest.c
index aad024dde3c4..6dc09d8f4c24 100644
--- a/lib/locking-selftest.c
+++ b/lib/locking-selftest.c
@@ -12,6 +12,7 @@
12 */ 12 */
13#include <linux/rwsem.h> 13#include <linux/rwsem.h>
14#include <linux/mutex.h> 14#include <linux/mutex.h>
15#include <linux/ww_mutex.h>
15#include <linux/sched.h> 16#include <linux/sched.h>
16#include <linux/delay.h> 17#include <linux/delay.h>
17#include <linux/lockdep.h> 18#include <linux/lockdep.h>
diff --git a/lib/lz4/Makefile b/lib/lz4/Makefile
new file mode 100644
index 000000000000..8085d04e9309
--- /dev/null
+++ b/lib/lz4/Makefile
@@ -0,0 +1,3 @@
1obj-$(CONFIG_LZ4_COMPRESS) += lz4_compress.o
2obj-$(CONFIG_LZ4HC_COMPRESS) += lz4hc_compress.o
3obj-$(CONFIG_LZ4_DECOMPRESS) += lz4_decompress.o
diff --git a/lib/lz4/lz4_compress.c b/lib/lz4/lz4_compress.c
new file mode 100644
index 000000000000..fd94058bd7f9
--- /dev/null
+++ b/lib/lz4/lz4_compress.c
@@ -0,0 +1,443 @@
1/*
2 * LZ4 - Fast LZ compression algorithm
3 * Copyright (C) 2011-2012, Yann Collet.
4 * BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
5
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions are
8 * met:
9 *
10 * * Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * * Redistributions in binary form must reproduce the above
13 * copyright notice, this list of conditions and the following disclaimer
14 * in the documentation and/or other materials provided with the
15 * distribution.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
18 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
19 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
20 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
21 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
22 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
23 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
24 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
25 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
27 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28 *
29 * You can contact the author at :
30 * - LZ4 homepage : http://fastcompression.blogspot.com/p/lz4.html
31 * - LZ4 source repository : http://code.google.com/p/lz4/
32 *
33 * Changed for kernel use by:
34 * Chanho Min <chanho.min@lge.com>
35 */
36
37#include <linux/module.h>
38#include <linux/kernel.h>
39#include <linux/lz4.h>
40#include <asm/unaligned.h>
41#include "lz4defs.h"
42
43/*
44 * LZ4_compressCtx :
45 * -----------------
46 * Compress 'isize' bytes from 'source' into an output buffer 'dest' of
47 * maximum size 'maxOutputSize'. * If it cannot achieve it, compression
48 * will stop, and result of the function will be zero.
49 * return : the number of bytes written in buffer 'dest', or 0 if the
50 * compression fails
51 */
52static inline int lz4_compressctx(void *ctx,
53 const char *source,
54 char *dest,
55 int isize,
56 int maxoutputsize)
57{
58 HTYPE *hashtable = (HTYPE *)ctx;
59 const u8 *ip = (u8 *)source;
60#if LZ4_ARCH64
61 const BYTE * const base = ip;
62#else
63 const int base = 0;
64#endif
65 const u8 *anchor = ip;
66 const u8 *const iend = ip + isize;
67 const u8 *const mflimit = iend - MFLIMIT;
68 #define MATCHLIMIT (iend - LASTLITERALS)
69
70 u8 *op = (u8 *) dest;
71 u8 *const oend = op + maxoutputsize;
72 int length;
73 const int skipstrength = SKIPSTRENGTH;
74 u32 forwardh;
75 int lastrun;
76
77 /* Init */
78 if (isize < MINLENGTH)
79 goto _last_literals;
80
81 memset((void *)hashtable, 0, LZ4_MEM_COMPRESS);
82
83 /* First Byte */
84 hashtable[LZ4_HASH_VALUE(ip)] = ip - base;
85 ip++;
86 forwardh = LZ4_HASH_VALUE(ip);
87
88 /* Main Loop */
89 for (;;) {
90 int findmatchattempts = (1U << skipstrength) + 3;
91 const u8 *forwardip = ip;
92 const u8 *ref;
93 u8 *token;
94
95 /* Find a match */
96 do {
97 u32 h = forwardh;
98 int step = findmatchattempts++ >> skipstrength;
99 ip = forwardip;
100 forwardip = ip + step;
101
102 if (unlikely(forwardip > mflimit))
103 goto _last_literals;
104
105 forwardh = LZ4_HASH_VALUE(forwardip);
106 ref = base + hashtable[h];
107 hashtable[h] = ip - base;
108 } while ((ref < ip - MAX_DISTANCE) || (A32(ref) != A32(ip)));
109
110 /* Catch up */
111 while ((ip > anchor) && (ref > (u8 *)source) &&
112 unlikely(ip[-1] == ref[-1])) {
113 ip--;
114 ref--;
115 }
116
117 /* Encode Literal length */
118 length = (int)(ip - anchor);
119 token = op++;
120 /* check output limit */
121 if (unlikely(op + length + (2 + 1 + LASTLITERALS) +
122 (length >> 8) > oend))
123 return 0;
124
125 if (length >= (int)RUN_MASK) {
126 int len;
127 *token = (RUN_MASK << ML_BITS);
128 len = length - RUN_MASK;
129 for (; len > 254 ; len -= 255)
130 *op++ = 255;
131 *op++ = (u8)len;
132 } else
133 *token = (length << ML_BITS);
134
135 /* Copy Literals */
136 LZ4_BLINDCOPY(anchor, op, length);
137_next_match:
138 /* Encode Offset */
139 LZ4_WRITE_LITTLEENDIAN_16(op, (u16)(ip - ref));
140
141 /* Start Counting */
142 ip += MINMATCH;
143 /* MinMatch verified */
144 ref += MINMATCH;
145 anchor = ip;
146 while (likely(ip < MATCHLIMIT - (STEPSIZE - 1))) {
147 #if LZ4_ARCH64
148 u64 diff = A64(ref) ^ A64(ip);
149 #else
150 u32 diff = A32(ref) ^ A32(ip);
151 #endif
152 if (!diff) {
153 ip += STEPSIZE;
154 ref += STEPSIZE;
155 continue;
156 }
157 ip += LZ4_NBCOMMONBYTES(diff);
158 goto _endcount;
159 }
160 #if LZ4_ARCH64
161 if ((ip < (MATCHLIMIT - 3)) && (A32(ref) == A32(ip))) {
162 ip += 4;
163 ref += 4;
164 }
165 #endif
166 if ((ip < (MATCHLIMIT - 1)) && (A16(ref) == A16(ip))) {
167 ip += 2;
168 ref += 2;
169 }
170 if ((ip < MATCHLIMIT) && (*ref == *ip))
171 ip++;
172_endcount:
173 /* Encode MatchLength */
174 length = (int)(ip - anchor);
175 /* Check output limit */
176 if (unlikely(op + (1 + LASTLITERALS) + (length >> 8) > oend))
177 return 0;
178 if (length >= (int)ML_MASK) {
179 *token += ML_MASK;
180 length -= ML_MASK;
181 for (; length > 509 ; length -= 510) {
182 *op++ = 255;
183 *op++ = 255;
184 }
185 if (length > 254) {
186 length -= 255;
187 *op++ = 255;
188 }
189 *op++ = (u8)length;
190 } else
191 *token += length;
192
193 /* Test end of chunk */
194 if (ip > mflimit) {
195 anchor = ip;
196 break;
197 }
198
199 /* Fill table */
200 hashtable[LZ4_HASH_VALUE(ip-2)] = ip - 2 - base;
201
202 /* Test next position */
203 ref = base + hashtable[LZ4_HASH_VALUE(ip)];
204 hashtable[LZ4_HASH_VALUE(ip)] = ip - base;
205 if ((ref > ip - (MAX_DISTANCE + 1)) && (A32(ref) == A32(ip))) {
206 token = op++;
207 *token = 0;
208 goto _next_match;
209 }
210
211 /* Prepare next loop */
212 anchor = ip++;
213 forwardh = LZ4_HASH_VALUE(ip);
214 }
215
216_last_literals:
217 /* Encode Last Literals */
218 lastrun = (int)(iend - anchor);
219 if (((char *)op - dest) + lastrun + 1
220 + ((lastrun + 255 - RUN_MASK) / 255) > (u32)maxoutputsize)
221 return 0;
222
223 if (lastrun >= (int)RUN_MASK) {
224 *op++ = (RUN_MASK << ML_BITS);
225 lastrun -= RUN_MASK;
226 for (; lastrun > 254 ; lastrun -= 255)
227 *op++ = 255;
228 *op++ = (u8)lastrun;
229 } else
230 *op++ = (lastrun << ML_BITS);
231 memcpy(op, anchor, iend - anchor);
232 op += iend - anchor;
233
234 /* End */
235 return (int)(((char *)op) - dest);
236}
237
238static inline int lz4_compress64kctx(void *ctx,
239 const char *source,
240 char *dest,
241 int isize,
242 int maxoutputsize)
243{
244 u16 *hashtable = (u16 *)ctx;
245 const u8 *ip = (u8 *) source;
246 const u8 *anchor = ip;
247 const u8 *const base = ip;
248 const u8 *const iend = ip + isize;
249 const u8 *const mflimit = iend - MFLIMIT;
250 #define MATCHLIMIT (iend - LASTLITERALS)
251
252 u8 *op = (u8 *) dest;
253 u8 *const oend = op + maxoutputsize;
254 int len, length;
255 const int skipstrength = SKIPSTRENGTH;
256 u32 forwardh;
257 int lastrun;
258
259 /* Init */
260 if (isize < MINLENGTH)
261 goto _last_literals;
262
263 memset((void *)hashtable, 0, LZ4_MEM_COMPRESS);
264
265 /* First Byte */
266 ip++;
267 forwardh = LZ4_HASH64K_VALUE(ip);
268
269 /* Main Loop */
270 for (;;) {
271 int findmatchattempts = (1U << skipstrength) + 3;
272 const u8 *forwardip = ip;
273 const u8 *ref;
274 u8 *token;
275
276 /* Find a match */
277 do {
278 u32 h = forwardh;
279 int step = findmatchattempts++ >> skipstrength;
280 ip = forwardip;
281 forwardip = ip + step;
282
283 if (forwardip > mflimit)
284 goto _last_literals;
285
286 forwardh = LZ4_HASH64K_VALUE(forwardip);
287 ref = base + hashtable[h];
288 hashtable[h] = (u16)(ip - base);
289 } while (A32(ref) != A32(ip));
290
291 /* Catch up */
292 while ((ip > anchor) && (ref > (u8 *)source)
293 && (ip[-1] == ref[-1])) {
294 ip--;
295 ref--;
296 }
297
298 /* Encode Literal length */
299 length = (int)(ip - anchor);
300 token = op++;
301 /* Check output limit */
302 if (unlikely(op + length + (2 + 1 + LASTLITERALS)
303 + (length >> 8) > oend))
304 return 0;
305 if (length >= (int)RUN_MASK) {
306 *token = (RUN_MASK << ML_BITS);
307 len = length - RUN_MASK;
308 for (; len > 254 ; len -= 255)
309 *op++ = 255;
310 *op++ = (u8)len;
311 } else
312 *token = (length << ML_BITS);
313
314 /* Copy Literals */
315 LZ4_BLINDCOPY(anchor, op, length);
316
317_next_match:
318 /* Encode Offset */
319 LZ4_WRITE_LITTLEENDIAN_16(op, (u16)(ip - ref));
320
321 /* Start Counting */
322 ip += MINMATCH;
323 /* MinMatch verified */
324 ref += MINMATCH;
325 anchor = ip;
326
327 while (ip < MATCHLIMIT - (STEPSIZE - 1)) {
328 #if LZ4_ARCH64
329 u64 diff = A64(ref) ^ A64(ip);
330 #else
331 u32 diff = A32(ref) ^ A32(ip);
332 #endif
333
334 if (!diff) {
335 ip += STEPSIZE;
336 ref += STEPSIZE;
337 continue;
338 }
339 ip += LZ4_NBCOMMONBYTES(diff);
340 goto _endcount;
341 }
342 #if LZ4_ARCH64
343 if ((ip < (MATCHLIMIT - 3)) && (A32(ref) == A32(ip))) {
344 ip += 4;
345 ref += 4;
346 }
347 #endif
348 if ((ip < (MATCHLIMIT - 1)) && (A16(ref) == A16(ip))) {
349 ip += 2;
350 ref += 2;
351 }
352 if ((ip < MATCHLIMIT) && (*ref == *ip))
353 ip++;
354_endcount:
355
356 /* Encode MatchLength */
357 len = (int)(ip - anchor);
358 /* Check output limit */
359 if (unlikely(op + (1 + LASTLITERALS) + (len >> 8) > oend))
360 return 0;
361 if (len >= (int)ML_MASK) {
362 *token += ML_MASK;
363 len -= ML_MASK;
364 for (; len > 509 ; len -= 510) {
365 *op++ = 255;
366 *op++ = 255;
367 }
368 if (len > 254) {
369 len -= 255;
370 *op++ = 255;
371 }
372 *op++ = (u8)len;
373 } else
374 *token += len;
375
376 /* Test end of chunk */
377 if (ip > mflimit) {
378 anchor = ip;
379 break;
380 }
381
382 /* Fill table */
383 hashtable[LZ4_HASH64K_VALUE(ip-2)] = (u16)(ip - 2 - base);
384
385 /* Test next position */
386 ref = base + hashtable[LZ4_HASH64K_VALUE(ip)];
387 hashtable[LZ4_HASH64K_VALUE(ip)] = (u16)(ip - base);
388 if (A32(ref) == A32(ip)) {
389 token = op++;
390 *token = 0;
391 goto _next_match;
392 }
393
394 /* Prepare next loop */
395 anchor = ip++;
396 forwardh = LZ4_HASH64K_VALUE(ip);
397 }
398
399_last_literals:
400 /* Encode Last Literals */
401 lastrun = (int)(iend - anchor);
402 if (op + lastrun + 1 + (lastrun - RUN_MASK + 255) / 255 > oend)
403 return 0;
404 if (lastrun >= (int)RUN_MASK) {
405 *op++ = (RUN_MASK << ML_BITS);
406 lastrun -= RUN_MASK;
407 for (; lastrun > 254 ; lastrun -= 255)
408 *op++ = 255;
409 *op++ = (u8)lastrun;
410 } else
411 *op++ = (lastrun << ML_BITS);
412 memcpy(op, anchor, iend - anchor);
413 op += iend - anchor;
414 /* End */
415 return (int)(((char *)op) - dest);
416}
417
418int lz4_compress(const unsigned char *src, size_t src_len,
419 unsigned char *dst, size_t *dst_len, void *wrkmem)
420{
421 int ret = -1;
422 int out_len = 0;
423
424 if (src_len < LZ4_64KLIMIT)
425 out_len = lz4_compress64kctx(wrkmem, src, dst, src_len,
426 lz4_compressbound(src_len));
427 else
428 out_len = lz4_compressctx(wrkmem, src, dst, src_len,
429 lz4_compressbound(src_len));
430
431 if (out_len < 0)
432 goto exit;
433
434 *dst_len = out_len;
435
436 return 0;
437exit:
438 return ret;
439}
440EXPORT_SYMBOL_GPL(lz4_compress);
441
442MODULE_LICENSE("GPL");
443MODULE_DESCRIPTION("LZ4 compressor");
diff --git a/lib/lz4/lz4_decompress.c b/lib/lz4/lz4_decompress.c
new file mode 100644
index 000000000000..d3414eae73a1
--- /dev/null
+++ b/lib/lz4/lz4_decompress.c
@@ -0,0 +1,326 @@
1/*
2 * LZ4 Decompressor for Linux kernel
3 *
4 * Copyright (C) 2013, LG Electronics, Kyungsik Lee <kyungsik.lee@lge.com>
5 *
6 * Based on LZ4 implementation by Yann Collet.
7 *
8 * LZ4 - Fast LZ compression algorithm
9 * Copyright (C) 2011-2012, Yann Collet.
10 * BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
11 *
12 * Redistribution and use in source and binary forms, with or without
13 * modification, are permitted provided that the following conditions are
14 * met:
15 *
16 * * Redistributions of source code must retain the above copyright
17 * notice, this list of conditions and the following disclaimer.
18 * * Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following disclaimer
20 * in the documentation and/or other materials provided with the
21 * distribution.
22 *
23 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
24 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
25 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
26 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
27 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
28 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
29 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
30 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
31 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
32 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
33 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
34 *
35 * You can contact the author at :
36 * - LZ4 homepage : http://fastcompression.blogspot.com/p/lz4.html
37 * - LZ4 source repository : http://code.google.com/p/lz4/
38 */
39
40#ifndef STATIC
41#include <linux/module.h>
42#include <linux/kernel.h>
43#endif
44#include <linux/lz4.h>
45
46#include <asm/unaligned.h>
47
48#include "lz4defs.h"
49
50static int lz4_uncompress(const char *source, char *dest, int osize)
51{
52 const BYTE *ip = (const BYTE *) source;
53 const BYTE *ref;
54 BYTE *op = (BYTE *) dest;
55 BYTE * const oend = op + osize;
56 BYTE *cpy;
57 unsigned token;
58 size_t length;
59 size_t dec32table[] = {0, 3, 2, 3, 0, 0, 0, 0};
60#if LZ4_ARCH64
61 size_t dec64table[] = {0, 0, 0, -1, 0, 1, 2, 3};
62#endif
63
64 while (1) {
65
66 /* get runlength */
67 token = *ip++;
68 length = (token >> ML_BITS);
69 if (length == RUN_MASK) {
70 size_t len;
71
72 len = *ip++;
73 for (; len == 255; length += 255)
74 len = *ip++;
75 length += len;
76 }
77
78 /* copy literals */
79 cpy = op + length;
80 if (unlikely(cpy > oend - COPYLENGTH)) {
81 /*
82 * Error: not enough place for another match
83 * (min 4) + 5 literals
84 */
85 if (cpy != oend)
86 goto _output_error;
87
88 memcpy(op, ip, length);
89 ip += length;
90 break; /* EOF */
91 }
92 LZ4_WILDCOPY(ip, op, cpy);
93 ip -= (op - cpy);
94 op = cpy;
95
96 /* get offset */
97 LZ4_READ_LITTLEENDIAN_16(ref, cpy, ip);
98 ip += 2;
99
100 /* Error: offset create reference outside destination buffer */
101 if (unlikely(ref < (BYTE *const) dest))
102 goto _output_error;
103
104 /* get matchlength */
105 length = token & ML_MASK;
106 if (length == ML_MASK) {
107 for (; *ip == 255; length += 255)
108 ip++;
109 length += *ip++;
110 }
111
112 /* copy repeated sequence */
113 if (unlikely((op - ref) < STEPSIZE)) {
114#if LZ4_ARCH64
115 size_t dec64 = dec64table[op - ref];
116#else
117 const int dec64 = 0;
118#endif
119 op[0] = ref[0];
120 op[1] = ref[1];
121 op[2] = ref[2];
122 op[3] = ref[3];
123 op += 4;
124 ref += 4;
125 ref -= dec32table[op-ref];
126 PUT4(ref, op);
127 op += STEPSIZE - 4;
128 ref -= dec64;
129 } else {
130 LZ4_COPYSTEP(ref, op);
131 }
132 cpy = op + length - (STEPSIZE - 4);
133 if (cpy > (oend - COPYLENGTH)) {
134
135 /* Error: request to write beyond destination buffer */
136 if (cpy > oend)
137 goto _output_error;
138 LZ4_SECURECOPY(ref, op, (oend - COPYLENGTH));
139 while (op < cpy)
140 *op++ = *ref++;
141 op = cpy;
142 /*
143 * Check EOF (should never happen, since last 5 bytes
144 * are supposed to be literals)
145 */
146 if (op == oend)
147 goto _output_error;
148 continue;
149 }
150 LZ4_SECURECOPY(ref, op, cpy);
151 op = cpy; /* correction */
152 }
153 /* end of decoding */
154 return (int) (((char *)ip) - source);
155
156 /* write overflow error detected */
157_output_error:
158 return (int) (-(((char *)ip) - source));
159}
160
161static int lz4_uncompress_unknownoutputsize(const char *source, char *dest,
162 int isize, size_t maxoutputsize)
163{
164 const BYTE *ip = (const BYTE *) source;
165 const BYTE *const iend = ip + isize;
166 const BYTE *ref;
167
168
169 BYTE *op = (BYTE *) dest;
170 BYTE * const oend = op + maxoutputsize;
171 BYTE *cpy;
172
173 size_t dec32table[] = {0, 3, 2, 3, 0, 0, 0, 0};
174#if LZ4_ARCH64
175 size_t dec64table[] = {0, 0, 0, -1, 0, 1, 2, 3};
176#endif
177
178 /* Main Loop */
179 while (ip < iend) {
180
181 unsigned token;
182 size_t length;
183
184 /* get runlength */
185 token = *ip++;
186 length = (token >> ML_BITS);
187 if (length == RUN_MASK) {
188 int s = 255;
189 while ((ip < iend) && (s == 255)) {
190 s = *ip++;
191 length += s;
192 }
193 }
194 /* copy literals */
195 cpy = op + length;
196 if ((cpy > oend - COPYLENGTH) ||
197 (ip + length > iend - COPYLENGTH)) {
198
199 if (cpy > oend)
200 goto _output_error;/* writes beyond buffer */
201
202 if (ip + length != iend)
203 goto _output_error;/*
204 * Error: LZ4 format requires
205 * to consume all input
206 * at this stage
207 */
208 memcpy(op, ip, length);
209 op += length;
210 break;/* Necessarily EOF, due to parsing restrictions */
211 }
212 LZ4_WILDCOPY(ip, op, cpy);
213 ip -= (op - cpy);
214 op = cpy;
215
216 /* get offset */
217 LZ4_READ_LITTLEENDIAN_16(ref, cpy, ip);
218 ip += 2;
219 if (ref < (BYTE * const) dest)
220 goto _output_error;
221 /*
222 * Error : offset creates reference
223 * outside of destination buffer
224 */
225
226 /* get matchlength */
227 length = (token & ML_MASK);
228 if (length == ML_MASK) {
229 while (ip < iend) {
230 int s = *ip++;
231 length += s;
232 if (s == 255)
233 continue;
234 break;
235 }
236 }
237
238 /* copy repeated sequence */
239 if (unlikely((op - ref) < STEPSIZE)) {
240#if LZ4_ARCH64
241 size_t dec64 = dec64table[op - ref];
242#else
243 const int dec64 = 0;
244#endif
245 op[0] = ref[0];
246 op[1] = ref[1];
247 op[2] = ref[2];
248 op[3] = ref[3];
249 op += 4;
250 ref += 4;
251 ref -= dec32table[op - ref];
252 PUT4(ref, op);
253 op += STEPSIZE - 4;
254 ref -= dec64;
255 } else {
256 LZ4_COPYSTEP(ref, op);
257 }
258 cpy = op + length - (STEPSIZE-4);
259 if (cpy > oend - COPYLENGTH) {
260 if (cpy > oend)
261 goto _output_error; /* write outside of buf */
262
263 LZ4_SECURECOPY(ref, op, (oend - COPYLENGTH));
264 while (op < cpy)
265 *op++ = *ref++;
266 op = cpy;
267 /*
268 * Check EOF (should never happen, since last 5 bytes
269 * are supposed to be literals)
270 */
271 if (op == oend)
272 goto _output_error;
273 continue;
274 }
275 LZ4_SECURECOPY(ref, op, cpy);
276 op = cpy; /* correction */
277 }
278 /* end of decoding */
279 return (int) (((char *) op) - dest);
280
281 /* write overflow error detected */
282_output_error:
283 return (int) (-(((char *) ip) - source));
284}
285
286int lz4_decompress(const char *src, size_t *src_len, char *dest,
287 size_t actual_dest_len)
288{
289 int ret = -1;
290 int input_len = 0;
291
292 input_len = lz4_uncompress(src, dest, actual_dest_len);
293 if (input_len < 0)
294 goto exit_0;
295 *src_len = input_len;
296
297 return 0;
298exit_0:
299 return ret;
300}
301#ifndef STATIC
302EXPORT_SYMBOL_GPL(lz4_decompress);
303#endif
304
305int lz4_decompress_unknownoutputsize(const char *src, size_t src_len,
306 char *dest, size_t *dest_len)
307{
308 int ret = -1;
309 int out_len = 0;
310
311 out_len = lz4_uncompress_unknownoutputsize(src, dest, src_len,
312 *dest_len);
313 if (out_len < 0)
314 goto exit_0;
315 *dest_len = out_len;
316
317 return 0;
318exit_0:
319 return ret;
320}
321#ifndef STATIC
322EXPORT_SYMBOL_GPL(lz4_decompress_unknownoutputsize);
323
324MODULE_LICENSE("GPL");
325MODULE_DESCRIPTION("LZ4 Decompressor");
326#endif
diff --git a/lib/lz4/lz4defs.h b/lib/lz4/lz4defs.h
new file mode 100644
index 000000000000..abcecdc2d0f2
--- /dev/null
+++ b/lib/lz4/lz4defs.h
@@ -0,0 +1,156 @@
1/*
2 * lz4defs.h -- architecture specific defines
3 *
4 * Copyright (C) 2013, LG Electronics, Kyungsik Lee <kyungsik.lee@lge.com>
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 */
10
11/*
12 * Detects 64 bits mode
13 */
14#if (defined(__x86_64__) || defined(__x86_64) || defined(__amd64__) \
15 || defined(__ppc64__) || defined(__LP64__))
16#define LZ4_ARCH64 1
17#else
18#define LZ4_ARCH64 0
19#endif
20
21/*
22 * Architecture-specific macros
23 */
24#define BYTE u8
25typedef struct _U16_S { u16 v; } U16_S;
26typedef struct _U32_S { u32 v; } U32_S;
27typedef struct _U64_S { u64 v; } U64_S;
28#if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) \
29 || defined(CONFIG_ARM) && __LINUX_ARM_ARCH__ >= 6 \
30 && defined(ARM_EFFICIENT_UNALIGNED_ACCESS)
31
32#define A16(x) (((U16_S *)(x))->v)
33#define A32(x) (((U32_S *)(x))->v)
34#define A64(x) (((U64_S *)(x))->v)
35
36#define PUT4(s, d) (A32(d) = A32(s))
37#define PUT8(s, d) (A64(d) = A64(s))
38#define LZ4_WRITE_LITTLEENDIAN_16(p, v) \
39 do { \
40 A16(p) = v; \
41 p += 2; \
42 } while (0)
43#else /* CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS */
44
45#define A64(x) get_unaligned((u64 *)&(((U16_S *)(x))->v))
46#define A32(x) get_unaligned((u32 *)&(((U16_S *)(x))->v))
47#define A16(x) get_unaligned((u16 *)&(((U16_S *)(x))->v))
48
49#define PUT4(s, d) \
50 put_unaligned(get_unaligned((const u32 *) s), (u32 *) d)
51#define PUT8(s, d) \
52 put_unaligned(get_unaligned((const u64 *) s), (u64 *) d)
53
54#define LZ4_WRITE_LITTLEENDIAN_16(p, v) \
55 do { \
56 put_unaligned(v, (u16 *)(p)); \
57 p += 2; \
58 } while (0)
59#endif
60
61#define COPYLENGTH 8
62#define ML_BITS 4
63#define ML_MASK ((1U << ML_BITS) - 1)
64#define RUN_BITS (8 - ML_BITS)
65#define RUN_MASK ((1U << RUN_BITS) - 1)
66#define MEMORY_USAGE 14
67#define MINMATCH 4
68#define SKIPSTRENGTH 6
69#define LASTLITERALS 5
70#define MFLIMIT (COPYLENGTH + MINMATCH)
71#define MINLENGTH (MFLIMIT + 1)
72#define MAXD_LOG 16
73#define MAXD (1 << MAXD_LOG)
74#define MAXD_MASK (u32)(MAXD - 1)
75#define MAX_DISTANCE (MAXD - 1)
76#define HASH_LOG (MAXD_LOG - 1)
77#define HASHTABLESIZE (1 << HASH_LOG)
78#define MAX_NB_ATTEMPTS 256
79#define OPTIMAL_ML (int)((ML_MASK-1)+MINMATCH)
80#define LZ4_64KLIMIT ((1<<16) + (MFLIMIT - 1))
81#define HASHLOG64K ((MEMORY_USAGE - 2) + 1)
82#define HASH64KTABLESIZE (1U << HASHLOG64K)
83#define LZ4_HASH_VALUE(p) (((A32(p)) * 2654435761U) >> \
84 ((MINMATCH * 8) - (MEMORY_USAGE-2)))
85#define LZ4_HASH64K_VALUE(p) (((A32(p)) * 2654435761U) >> \
86 ((MINMATCH * 8) - HASHLOG64K))
87#define HASH_VALUE(p) (((A32(p)) * 2654435761U) >> \
88 ((MINMATCH * 8) - HASH_LOG))
89
90#if LZ4_ARCH64/* 64-bit */
91#define STEPSIZE 8
92
93#define LZ4_COPYSTEP(s, d) \
94 do { \
95 PUT8(s, d); \
96 d += 8; \
97 s += 8; \
98 } while (0)
99
100#define LZ4_COPYPACKET(s, d) LZ4_COPYSTEP(s, d)
101
102#define LZ4_SECURECOPY(s, d, e) \
103 do { \
104 if (d < e) { \
105 LZ4_WILDCOPY(s, d, e); \
106 } \
107 } while (0)
108#define HTYPE u32
109
110#ifdef __BIG_ENDIAN
111#define LZ4_NBCOMMONBYTES(val) (__builtin_clzll(val) >> 3)
112#else
113#define LZ4_NBCOMMONBYTES(val) (__builtin_ctzll(val) >> 3)
114#endif
115
116#else /* 32-bit */
117#define STEPSIZE 4
118
119#define LZ4_COPYSTEP(s, d) \
120 do { \
121 PUT4(s, d); \
122 d += 4; \
123 s += 4; \
124 } while (0)
125
126#define LZ4_COPYPACKET(s, d) \
127 do { \
128 LZ4_COPYSTEP(s, d); \
129 LZ4_COPYSTEP(s, d); \
130 } while (0)
131
132#define LZ4_SECURECOPY LZ4_WILDCOPY
133#define HTYPE const u8*
134
135#ifdef __BIG_ENDIAN
136#define LZ4_NBCOMMONBYTES(val) (__builtin_clz(val) >> 3)
137#else
138#define LZ4_NBCOMMONBYTES(val) (__builtin_ctz(val) >> 3)
139#endif
140
141#endif
142
143#define LZ4_READ_LITTLEENDIAN_16(d, s, p) \
144 (d = s - get_unaligned_le16(p))
145
146#define LZ4_WILDCOPY(s, d, e) \
147 do { \
148 LZ4_COPYPACKET(s, d); \
149 } while (d < e)
150
151#define LZ4_BLINDCOPY(s, d, l) \
152 do { \
153 u8 *e = (d) + l; \
154 LZ4_WILDCOPY(s, d, e); \
155 d = e; \
156 } while (0)
diff --git a/lib/lz4/lz4hc_compress.c b/lib/lz4/lz4hc_compress.c
new file mode 100644
index 000000000000..eb1a74f5e368
--- /dev/null
+++ b/lib/lz4/lz4hc_compress.c
@@ -0,0 +1,539 @@
1/*
2 * LZ4 HC - High Compression Mode of LZ4
3 * Copyright (C) 2011-2012, Yann Collet.
4 * BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions are
8 * met:
9 *
10 * * Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * * Redistributions in binary form must reproduce the above
13 * copyright notice, this list of conditions and the following disclaimer
14 * in the documentation and/or other materials provided with the
15 * distribution.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
18 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
19 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
20 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
21 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
22 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
23 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
24 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
25 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
27 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28 *
29 * You can contact the author at :
30 * - LZ4 homepage : http://fastcompression.blogspot.com/p/lz4.html
31 * - LZ4 source repository : http://code.google.com/p/lz4/
32 *
33 * Changed for kernel use by:
34 * Chanho Min <chanho.min@lge.com>
35 */
36
37#include <linux/module.h>
38#include <linux/kernel.h>
39#include <linux/lz4.h>
40#include <asm/unaligned.h>
41#include "lz4defs.h"
42
43struct lz4hc_data {
44 const u8 *base;
45 HTYPE hashtable[HASHTABLESIZE];
46 u16 chaintable[MAXD];
47 const u8 *nexttoupdate;
48} __attribute__((__packed__));
49
50static inline int lz4hc_init(struct lz4hc_data *hc4, const u8 *base)
51{
52 memset((void *)hc4->hashtable, 0, sizeof(hc4->hashtable));
53 memset(hc4->chaintable, 0xFF, sizeof(hc4->chaintable));
54
55#if LZ4_ARCH64
56 hc4->nexttoupdate = base + 1;
57#else
58 hc4->nexttoupdate = base;
59#endif
60 hc4->base = base;
61 return 1;
62}
63
64/* Update chains up to ip (excluded) */
65static inline void lz4hc_insert(struct lz4hc_data *hc4, const u8 *ip)
66{
67 u16 *chaintable = hc4->chaintable;
68 HTYPE *hashtable = hc4->hashtable;
69#if LZ4_ARCH64
70 const BYTE * const base = hc4->base;
71#else
72 const int base = 0;
73#endif
74
75 while (hc4->nexttoupdate < ip) {
76 const u8 *p = hc4->nexttoupdate;
77 size_t delta = p - (hashtable[HASH_VALUE(p)] + base);
78 if (delta > MAX_DISTANCE)
79 delta = MAX_DISTANCE;
80 chaintable[(size_t)(p) & MAXD_MASK] = (u16)delta;
81 hashtable[HASH_VALUE(p)] = (p) - base;
82 hc4->nexttoupdate++;
83 }
84}
85
86static inline size_t lz4hc_commonlength(const u8 *p1, const u8 *p2,
87 const u8 *const matchlimit)
88{
89 const u8 *p1t = p1;
90
91 while (p1t < matchlimit - (STEPSIZE - 1)) {
92#if LZ4_ARCH64
93 u64 diff = A64(p2) ^ A64(p1t);
94#else
95 u32 diff = A32(p2) ^ A32(p1t);
96#endif
97 if (!diff) {
98 p1t += STEPSIZE;
99 p2 += STEPSIZE;
100 continue;
101 }
102 p1t += LZ4_NBCOMMONBYTES(diff);
103 return p1t - p1;
104 }
105#if LZ4_ARCH64
106 if ((p1t < (matchlimit-3)) && (A32(p2) == A32(p1t))) {
107 p1t += 4;
108 p2 += 4;
109 }
110#endif
111
112 if ((p1t < (matchlimit - 1)) && (A16(p2) == A16(p1t))) {
113 p1t += 2;
114 p2 += 2;
115 }
116 if ((p1t < matchlimit) && (*p2 == *p1t))
117 p1t++;
118 return p1t - p1;
119}
120
121static inline int lz4hc_insertandfindbestmatch(struct lz4hc_data *hc4,
122 const u8 *ip, const u8 *const matchlimit, const u8 **matchpos)
123{
124 u16 *const chaintable = hc4->chaintable;
125 HTYPE *const hashtable = hc4->hashtable;
126 const u8 *ref;
127#if LZ4_ARCH64
128 const BYTE * const base = hc4->base;
129#else
130 const int base = 0;
131#endif
132 int nbattempts = MAX_NB_ATTEMPTS;
133 size_t repl = 0, ml = 0;
134 u16 delta;
135
136 /* HC4 match finder */
137 lz4hc_insert(hc4, ip);
138 ref = hashtable[HASH_VALUE(ip)] + base;
139
140 /* potential repetition */
141 if (ref >= ip-4) {
142 /* confirmed */
143 if (A32(ref) == A32(ip)) {
144 delta = (u16)(ip-ref);
145 repl = ml = lz4hc_commonlength(ip + MINMATCH,
146 ref + MINMATCH, matchlimit) + MINMATCH;
147 *matchpos = ref;
148 }
149 ref -= (size_t)chaintable[(size_t)(ref) & MAXD_MASK];
150 }
151
152 while ((ref >= ip - MAX_DISTANCE) && nbattempts) {
153 nbattempts--;
154 if (*(ref + ml) == *(ip + ml)) {
155 if (A32(ref) == A32(ip)) {
156 size_t mlt =
157 lz4hc_commonlength(ip + MINMATCH,
158 ref + MINMATCH, matchlimit) + MINMATCH;
159 if (mlt > ml) {
160 ml = mlt;
161 *matchpos = ref;
162 }
163 }
164 }
165 ref -= (size_t)chaintable[(size_t)(ref) & MAXD_MASK];
166 }
167
168 /* Complete table */
169 if (repl) {
170 const BYTE *ptr = ip;
171 const BYTE *end;
172 end = ip + repl - (MINMATCH-1);
173 /* Pre-Load */
174 while (ptr < end - delta) {
175 chaintable[(size_t)(ptr) & MAXD_MASK] = delta;
176 ptr++;
177 }
178 do {
179 chaintable[(size_t)(ptr) & MAXD_MASK] = delta;
180 /* Head of chain */
181 hashtable[HASH_VALUE(ptr)] = (ptr) - base;
182 ptr++;
183 } while (ptr < end);
184 hc4->nexttoupdate = end;
185 }
186
187 return (int)ml;
188}
189
190static inline int lz4hc_insertandgetwidermatch(struct lz4hc_data *hc4,
191 const u8 *ip, const u8 *startlimit, const u8 *matchlimit, int longest,
192 const u8 **matchpos, const u8 **startpos)
193{
194 u16 *const chaintable = hc4->chaintable;
195 HTYPE *const hashtable = hc4->hashtable;
196#if LZ4_ARCH64
197 const BYTE * const base = hc4->base;
198#else
199 const int base = 0;
200#endif
201 const u8 *ref;
202 int nbattempts = MAX_NB_ATTEMPTS;
203 int delta = (int)(ip - startlimit);
204
205 /* First Match */
206 lz4hc_insert(hc4, ip);
207 ref = hashtable[HASH_VALUE(ip)] + base;
208
209 while ((ref >= ip - MAX_DISTANCE) && (ref >= hc4->base)
210 && (nbattempts)) {
211 nbattempts--;
212 if (*(startlimit + longest) == *(ref - delta + longest)) {
213 if (A32(ref) == A32(ip)) {
214 const u8 *reft = ref + MINMATCH;
215 const u8 *ipt = ip + MINMATCH;
216 const u8 *startt = ip;
217
218 while (ipt < matchlimit-(STEPSIZE - 1)) {
219 #if LZ4_ARCH64
220 u64 diff = A64(reft) ^ A64(ipt);
221 #else
222 u32 diff = A32(reft) ^ A32(ipt);
223 #endif
224
225 if (!diff) {
226 ipt += STEPSIZE;
227 reft += STEPSIZE;
228 continue;
229 }
230 ipt += LZ4_NBCOMMONBYTES(diff);
231 goto _endcount;
232 }
233 #if LZ4_ARCH64
234 if ((ipt < (matchlimit - 3))
235 && (A32(reft) == A32(ipt))) {
236 ipt += 4;
237 reft += 4;
238 }
239 ipt += 2;
240 #endif
241 if ((ipt < (matchlimit - 1))
242 && (A16(reft) == A16(ipt))) {
243 reft += 2;
244 }
245 if ((ipt < matchlimit) && (*reft == *ipt))
246 ipt++;
247_endcount:
248 reft = ref;
249
250 while ((startt > startlimit)
251 && (reft > hc4->base)
252 && (startt[-1] == reft[-1])) {
253 startt--;
254 reft--;
255 }
256
257 if ((ipt - startt) > longest) {
258 longest = (int)(ipt - startt);
259 *matchpos = reft;
260 *startpos = startt;
261 }
262 }
263 }
264 ref -= (size_t)chaintable[(size_t)(ref) & MAXD_MASK];
265 }
266 return longest;
267}
268
269static inline int lz4_encodesequence(const u8 **ip, u8 **op, const u8 **anchor,
270 int ml, const u8 *ref)
271{
272 int length, len;
273 u8 *token;
274
275 /* Encode Literal length */
276 length = (int)(*ip - *anchor);
277 token = (*op)++;
278 if (length >= (int)RUN_MASK) {
279 *token = (RUN_MASK << ML_BITS);
280 len = length - RUN_MASK;
281 for (; len > 254 ; len -= 255)
282 *(*op)++ = 255;
283 *(*op)++ = (u8)len;
284 } else
285 *token = (length << ML_BITS);
286
287 /* Copy Literals */
288 LZ4_BLINDCOPY(*anchor, *op, length);
289
290 /* Encode Offset */
291 LZ4_WRITE_LITTLEENDIAN_16(*op, (u16)(*ip - ref));
292
293 /* Encode MatchLength */
294 len = (int)(ml - MINMATCH);
295 if (len >= (int)ML_MASK) {
296 *token += ML_MASK;
297 len -= ML_MASK;
298 for (; len > 509 ; len -= 510) {
299 *(*op)++ = 255;
300 *(*op)++ = 255;
301 }
302 if (len > 254) {
303 len -= 255;
304 *(*op)++ = 255;
305 }
306 *(*op)++ = (u8)len;
307 } else
308 *token += len;
309
310 /* Prepare next loop */
311 *ip += ml;
312 *anchor = *ip;
313
314 return 0;
315}
316
317static int lz4_compresshcctx(struct lz4hc_data *ctx,
318 const char *source,
319 char *dest,
320 int isize)
321{
322 const u8 *ip = (const u8 *)source;
323 const u8 *anchor = ip;
324 const u8 *const iend = ip + isize;
325 const u8 *const mflimit = iend - MFLIMIT;
326 const u8 *const matchlimit = (iend - LASTLITERALS);
327
328 u8 *op = (u8 *)dest;
329
330 int ml, ml2, ml3, ml0;
331 const u8 *ref = NULL;
332 const u8 *start2 = NULL;
333 const u8 *ref2 = NULL;
334 const u8 *start3 = NULL;
335 const u8 *ref3 = NULL;
336 const u8 *start0;
337 const u8 *ref0;
338 int lastrun;
339
340 ip++;
341
342 /* Main Loop */
343 while (ip < mflimit) {
344 ml = lz4hc_insertandfindbestmatch(ctx, ip, matchlimit, (&ref));
345 if (!ml) {
346 ip++;
347 continue;
348 }
349
350 /* saved, in case we would skip too much */
351 start0 = ip;
352 ref0 = ref;
353 ml0 = ml;
354_search2:
355 if (ip+ml < mflimit)
356 ml2 = lz4hc_insertandgetwidermatch(ctx, ip + ml - 2,
357 ip + 1, matchlimit, ml, &ref2, &start2);
358 else
359 ml2 = ml;
360 /* No better match */
361 if (ml2 == ml) {
362 lz4_encodesequence(&ip, &op, &anchor, ml, ref);
363 continue;
364 }
365
366 if (start0 < ip) {
367 /* empirical */
368 if (start2 < ip + ml0) {
369 ip = start0;
370 ref = ref0;
371 ml = ml0;
372 }
373 }
374 /*
375 * Here, start0==ip
376 * First Match too small : removed
377 */
378 if ((start2 - ip) < 3) {
379 ml = ml2;
380 ip = start2;
381 ref = ref2;
382 goto _search2;
383 }
384
385_search3:
386 /*
387 * Currently we have :
388 * ml2 > ml1, and
389 * ip1+3 <= ip2 (usually < ip1+ml1)
390 */
391 if ((start2 - ip) < OPTIMAL_ML) {
392 int correction;
393 int new_ml = ml;
394 if (new_ml > OPTIMAL_ML)
395 new_ml = OPTIMAL_ML;
396 if (ip + new_ml > start2 + ml2 - MINMATCH)
397 new_ml = (int)(start2 - ip) + ml2 - MINMATCH;
398 correction = new_ml - (int)(start2 - ip);
399 if (correction > 0) {
400 start2 += correction;
401 ref2 += correction;
402 ml2 -= correction;
403 }
404 }
405 /*
406 * Now, we have start2 = ip+new_ml,
407 * with new_ml=min(ml, OPTIMAL_ML=18)
408 */
409 if (start2 + ml2 < mflimit)
410 ml3 = lz4hc_insertandgetwidermatch(ctx,
411 start2 + ml2 - 3, start2, matchlimit,
412 ml2, &ref3, &start3);
413 else
414 ml3 = ml2;
415
416 /* No better match : 2 sequences to encode */
417 if (ml3 == ml2) {
418 /* ip & ref are known; Now for ml */
419 if (start2 < ip+ml)
420 ml = (int)(start2 - ip);
421
422 /* Now, encode 2 sequences */
423 lz4_encodesequence(&ip, &op, &anchor, ml, ref);
424 ip = start2;
425 lz4_encodesequence(&ip, &op, &anchor, ml2, ref2);
426 continue;
427 }
428
429 /* Not enough space for match 2 : remove it */
430 if (start3 < ip + ml + 3) {
431 /*
432 * can write Seq1 immediately ==> Seq2 is removed,
433 * so Seq3 becomes Seq1
434 */
435 if (start3 >= (ip + ml)) {
436 if (start2 < ip + ml) {
437 int correction =
438 (int)(ip + ml - start2);
439 start2 += correction;
440 ref2 += correction;
441 ml2 -= correction;
442 if (ml2 < MINMATCH) {
443 start2 = start3;
444 ref2 = ref3;
445 ml2 = ml3;
446 }
447 }
448
449 lz4_encodesequence(&ip, &op, &anchor, ml, ref);
450 ip = start3;
451 ref = ref3;
452 ml = ml3;
453
454 start0 = start2;
455 ref0 = ref2;
456 ml0 = ml2;
457 goto _search2;
458 }
459
460 start2 = start3;
461 ref2 = ref3;
462 ml2 = ml3;
463 goto _search3;
464 }
465
466 /*
467 * OK, now we have 3 ascending matches; let's write at least
468 * the first one ip & ref are known; Now for ml
469 */
470 if (start2 < ip + ml) {
471 if ((start2 - ip) < (int)ML_MASK) {
472 int correction;
473 if (ml > OPTIMAL_ML)
474 ml = OPTIMAL_ML;
475 if (ip + ml > start2 + ml2 - MINMATCH)
476 ml = (int)(start2 - ip) + ml2
477 - MINMATCH;
478 correction = ml - (int)(start2 - ip);
479 if (correction > 0) {
480 start2 += correction;
481 ref2 += correction;
482 ml2 -= correction;
483 }
484 } else
485 ml = (int)(start2 - ip);
486 }
487 lz4_encodesequence(&ip, &op, &anchor, ml, ref);
488
489 ip = start2;
490 ref = ref2;
491 ml = ml2;
492
493 start2 = start3;
494 ref2 = ref3;
495 ml2 = ml3;
496
497 goto _search3;
498 }
499
500 /* Encode Last Literals */
501 lastrun = (int)(iend - anchor);
502 if (lastrun >= (int)RUN_MASK) {
503 *op++ = (RUN_MASK << ML_BITS);
504 lastrun -= RUN_MASK;
505 for (; lastrun > 254 ; lastrun -= 255)
506 *op++ = 255;
507 *op++ = (u8) lastrun;
508 } else
509 *op++ = (lastrun << ML_BITS);
510 memcpy(op, anchor, iend - anchor);
511 op += iend - anchor;
512 /* End */
513 return (int) (((char *)op) - dest);
514}
515
516int lz4hc_compress(const unsigned char *src, size_t src_len,
517 unsigned char *dst, size_t *dst_len, void *wrkmem)
518{
519 int ret = -1;
520 int out_len = 0;
521
522 struct lz4hc_data *hc4 = (struct lz4hc_data *)wrkmem;
523 lz4hc_init(hc4, (const u8 *)src);
524 out_len = lz4_compresshcctx((struct lz4hc_data *)hc4, (const u8 *)src,
525 (char *)dst, (int)src_len);
526
527 if (out_len < 0)
528 goto exit;
529
530 *dst_len = out_len;
531 return 0;
532
533exit:
534 return ret;
535}
536EXPORT_SYMBOL_GPL(lz4hc_compress);
537
538MODULE_LICENSE("GPL");
539MODULE_DESCRIPTION("LZ4HC compressor");
diff --git a/lib/mpi/longlong.h b/lib/mpi/longlong.h
index d411355f238e..aac511417ad1 100644
--- a/lib/mpi/longlong.h
+++ b/lib/mpi/longlong.h
@@ -151,15 +151,12 @@ do { \
151#endif /* __a29k__ */ 151#endif /* __a29k__ */
152 152
153#if defined(__alpha) && W_TYPE_SIZE == 64 153#if defined(__alpha) && W_TYPE_SIZE == 64
154#define umul_ppmm(ph, pl, m0, m1) \ 154#define umul_ppmm(ph, pl, m0, m1) \
155do { \ 155do { \
156 UDItype __m0 = (m0), __m1 = (m1); \ 156 UDItype __m0 = (m0), __m1 = (m1); \
157 __asm__ ("umulh %r1,%2,%0" \ 157 (ph) = __builtin_alpha_umulh(__m0, __m1); \
158 : "=r" ((UDItype) ph) \ 158 (pl) = __m0 * __m1; \
159 : "%rJ" (__m0), \ 159} while (0)
160 "rI" (__m1)); \
161 (pl) = __m0 * __m1; \
162 } while (0)
163#define UMUL_TIME 46 160#define UMUL_TIME 46
164#ifndef LONGLONG_STANDALONE 161#ifndef LONGLONG_STANDALONE
165#define udiv_qrnnd(q, r, n1, n0, d) \ 162#define udiv_qrnnd(q, r, n1, n0, d) \
@@ -167,7 +164,7 @@ do { UDItype __r; \
167 (q) = __udiv_qrnnd(&__r, (n1), (n0), (d)); \ 164 (q) = __udiv_qrnnd(&__r, (n1), (n0), (d)); \
168 (r) = __r; \ 165 (r) = __r; \
169} while (0) 166} while (0)
170extern UDItype __udiv_qrnnd(); 167extern UDItype __udiv_qrnnd(UDItype *, UDItype, UDItype, UDItype);
171#define UDIV_TIME 220 168#define UDIV_TIME 220
172#endif /* LONGLONG_STANDALONE */ 169#endif /* LONGLONG_STANDALONE */
173#endif /* __alpha */ 170#endif /* __alpha */
diff --git a/lib/percpu_counter.c b/lib/percpu_counter.c
index 1fc23a3277e1..93c5d5ecff4e 100644
--- a/lib/percpu_counter.c
+++ b/lib/percpu_counter.c
@@ -158,7 +158,7 @@ static void compute_batch_value(void)
158 percpu_counter_batch = max(32, nr*2); 158 percpu_counter_batch = max(32, nr*2);
159} 159}
160 160
161static int __cpuinit percpu_counter_hotcpu_callback(struct notifier_block *nb, 161static int percpu_counter_hotcpu_callback(struct notifier_block *nb,
162 unsigned long action, void *hcpu) 162 unsigned long action, void *hcpu)
163{ 163{
164#ifdef CONFIG_HOTPLUG_CPU 164#ifdef CONFIG_HOTPLUG_CPU
diff --git a/lib/scatterlist.c b/lib/scatterlist.c
index a1cf8cae60e7..a685c8a79578 100644
--- a/lib/scatterlist.c
+++ b/lib/scatterlist.c
@@ -247,13 +247,15 @@ int __sg_alloc_table(struct sg_table *table, unsigned int nents,
247 struct scatterlist *sg, *prv; 247 struct scatterlist *sg, *prv;
248 unsigned int left; 248 unsigned int left;
249 249
250 memset(table, 0, sizeof(*table));
251
252 if (nents == 0)
253 return -EINVAL;
250#ifndef ARCH_HAS_SG_CHAIN 254#ifndef ARCH_HAS_SG_CHAIN
251 if (WARN_ON_ONCE(nents > max_ents)) 255 if (WARN_ON_ONCE(nents > max_ents))
252 return -EINVAL; 256 return -EINVAL;
253#endif 257#endif
254 258
255 memset(table, 0, sizeof(*table));
256
257 left = nents; 259 left = nents;
258 prv = NULL; 260 prv = NULL;
259 do { 261 do {
@@ -453,6 +455,65 @@ void sg_miter_start(struct sg_mapping_iter *miter, struct scatterlist *sgl,
453} 455}
454EXPORT_SYMBOL(sg_miter_start); 456EXPORT_SYMBOL(sg_miter_start);
455 457
458static bool sg_miter_get_next_page(struct sg_mapping_iter *miter)
459{
460 if (!miter->__remaining) {
461 struct scatterlist *sg;
462 unsigned long pgoffset;
463
464 if (!__sg_page_iter_next(&miter->piter))
465 return false;
466
467 sg = miter->piter.sg;
468 pgoffset = miter->piter.sg_pgoffset;
469
470 miter->__offset = pgoffset ? 0 : sg->offset;
471 miter->__remaining = sg->offset + sg->length -
472 (pgoffset << PAGE_SHIFT) - miter->__offset;
473 miter->__remaining = min_t(unsigned long, miter->__remaining,
474 PAGE_SIZE - miter->__offset);
475 }
476
477 return true;
478}
479
480/**
481 * sg_miter_skip - reposition mapping iterator
482 * @miter: sg mapping iter to be skipped
483 * @offset: number of bytes to plus the current location
484 *
485 * Description:
486 * Sets the offset of @miter to its current location plus @offset bytes.
487 * If mapping iterator @miter has been proceeded by sg_miter_next(), this
488 * stops @miter.
489 *
490 * Context:
491 * Don't care if @miter is stopped, or not proceeded yet.
492 * Otherwise, preemption disabled if the SG_MITER_ATOMIC is set.
493 *
494 * Returns:
495 * true if @miter contains the valid mapping. false if end of sg
496 * list is reached.
497 */
498static bool sg_miter_skip(struct sg_mapping_iter *miter, off_t offset)
499{
500 sg_miter_stop(miter);
501
502 while (offset) {
503 off_t consumed;
504
505 if (!sg_miter_get_next_page(miter))
506 return false;
507
508 consumed = min_t(off_t, offset, miter->__remaining);
509 miter->__offset += consumed;
510 miter->__remaining -= consumed;
511 offset -= consumed;
512 }
513
514 return true;
515}
516
456/** 517/**
457 * sg_miter_next - proceed mapping iterator to the next mapping 518 * sg_miter_next - proceed mapping iterator to the next mapping
458 * @miter: sg mapping iter to proceed 519 * @miter: sg mapping iter to proceed
@@ -478,22 +539,9 @@ bool sg_miter_next(struct sg_mapping_iter *miter)
478 * Get to the next page if necessary. 539 * Get to the next page if necessary.
479 * __remaining, __offset is adjusted by sg_miter_stop 540 * __remaining, __offset is adjusted by sg_miter_stop
480 */ 541 */
481 if (!miter->__remaining) { 542 if (!sg_miter_get_next_page(miter))
482 struct scatterlist *sg; 543 return false;
483 unsigned long pgoffset;
484
485 if (!__sg_page_iter_next(&miter->piter))
486 return false;
487
488 sg = miter->piter.sg;
489 pgoffset = miter->piter.sg_pgoffset;
490 544
491 miter->__offset = pgoffset ? 0 : sg->offset;
492 miter->__remaining = sg->offset + sg->length -
493 (pgoffset << PAGE_SHIFT) - miter->__offset;
494 miter->__remaining = min_t(unsigned long, miter->__remaining,
495 PAGE_SIZE - miter->__offset);
496 }
497 miter->page = sg_page_iter_page(&miter->piter); 545 miter->page = sg_page_iter_page(&miter->piter);
498 miter->consumed = miter->length = miter->__remaining; 546 miter->consumed = miter->length = miter->__remaining;
499 547
@@ -552,14 +600,16 @@ EXPORT_SYMBOL(sg_miter_stop);
552 * @nents: Number of SG entries 600 * @nents: Number of SG entries
553 * @buf: Where to copy from 601 * @buf: Where to copy from
554 * @buflen: The number of bytes to copy 602 * @buflen: The number of bytes to copy
555 * @to_buffer: transfer direction (non zero == from an sg list to a 603 * @skip: Number of bytes to skip before copying
556 * buffer, 0 == from a buffer to an sg list 604 * @to_buffer: transfer direction (true == from an sg list to a
605 * buffer, false == from a buffer to an sg list
557 * 606 *
558 * Returns the number of copied bytes. 607 * Returns the number of copied bytes.
559 * 608 *
560 **/ 609 **/
561static size_t sg_copy_buffer(struct scatterlist *sgl, unsigned int nents, 610static size_t sg_copy_buffer(struct scatterlist *sgl, unsigned int nents,
562 void *buf, size_t buflen, int to_buffer) 611 void *buf, size_t buflen, off_t skip,
612 bool to_buffer)
563{ 613{
564 unsigned int offset = 0; 614 unsigned int offset = 0;
565 struct sg_mapping_iter miter; 615 struct sg_mapping_iter miter;
@@ -573,6 +623,9 @@ static size_t sg_copy_buffer(struct scatterlist *sgl, unsigned int nents,
573 623
574 sg_miter_start(&miter, sgl, nents, sg_flags); 624 sg_miter_start(&miter, sgl, nents, sg_flags);
575 625
626 if (!sg_miter_skip(&miter, skip))
627 return false;
628
576 local_irq_save(flags); 629 local_irq_save(flags);
577 630
578 while (sg_miter_next(&miter) && offset < buflen) { 631 while (sg_miter_next(&miter) && offset < buflen) {
@@ -607,7 +660,7 @@ static size_t sg_copy_buffer(struct scatterlist *sgl, unsigned int nents,
607size_t sg_copy_from_buffer(struct scatterlist *sgl, unsigned int nents, 660size_t sg_copy_from_buffer(struct scatterlist *sgl, unsigned int nents,
608 void *buf, size_t buflen) 661 void *buf, size_t buflen)
609{ 662{
610 return sg_copy_buffer(sgl, nents, buf, buflen, 0); 663 return sg_copy_buffer(sgl, nents, buf, buflen, 0, false);
611} 664}
612EXPORT_SYMBOL(sg_copy_from_buffer); 665EXPORT_SYMBOL(sg_copy_from_buffer);
613 666
@@ -624,6 +677,42 @@ EXPORT_SYMBOL(sg_copy_from_buffer);
624size_t sg_copy_to_buffer(struct scatterlist *sgl, unsigned int nents, 677size_t sg_copy_to_buffer(struct scatterlist *sgl, unsigned int nents,
625 void *buf, size_t buflen) 678 void *buf, size_t buflen)
626{ 679{
627 return sg_copy_buffer(sgl, nents, buf, buflen, 1); 680 return sg_copy_buffer(sgl, nents, buf, buflen, 0, true);
628} 681}
629EXPORT_SYMBOL(sg_copy_to_buffer); 682EXPORT_SYMBOL(sg_copy_to_buffer);
683
684/**
685 * sg_pcopy_from_buffer - Copy from a linear buffer to an SG list
686 * @sgl: The SG list
687 * @nents: Number of SG entries
688 * @buf: Where to copy from
689 * @skip: Number of bytes to skip before copying
690 * @buflen: The number of bytes to copy
691 *
692 * Returns the number of copied bytes.
693 *
694 **/
695size_t sg_pcopy_from_buffer(struct scatterlist *sgl, unsigned int nents,
696 void *buf, size_t buflen, off_t skip)
697{
698 return sg_copy_buffer(sgl, nents, buf, buflen, skip, false);
699}
700EXPORT_SYMBOL(sg_pcopy_from_buffer);
701
702/**
703 * sg_pcopy_to_buffer - Copy from an SG list to a linear buffer
704 * @sgl: The SG list
705 * @nents: Number of SG entries
706 * @buf: Where to copy to
707 * @skip: Number of bytes to skip before copying
708 * @buflen: The number of bytes to copy
709 *
710 * Returns the number of copied bytes.
711 *
712 **/
713size_t sg_pcopy_to_buffer(struct scatterlist *sgl, unsigned int nents,
714 void *buf, size_t buflen, off_t skip)
715{
716 return sg_copy_buffer(sgl, nents, buf, buflen, skip, true);
717}
718EXPORT_SYMBOL(sg_pcopy_to_buffer);
diff --git a/lib/vsprintf.c b/lib/vsprintf.c
index 7d8467645d2e..739a36366b79 100644
--- a/lib/vsprintf.c
+++ b/lib/vsprintf.c
@@ -923,6 +923,103 @@ char *ip4_addr_string(char *buf, char *end, const u8 *addr,
923} 923}
924 924
925static noinline_for_stack 925static noinline_for_stack
926char *ip6_addr_string_sa(char *buf, char *end, const struct sockaddr_in6 *sa,
927 struct printf_spec spec, const char *fmt)
928{
929 bool have_p = false, have_s = false, have_f = false, have_c = false;
930 char ip6_addr[sizeof("[xxxx:xxxx:xxxx:xxxx:xxxx:xxxx:255.255.255.255]") +
931 sizeof(":12345") + sizeof("/123456789") +
932 sizeof("%1234567890")];
933 char *p = ip6_addr, *pend = ip6_addr + sizeof(ip6_addr);
934 const u8 *addr = (const u8 *) &sa->sin6_addr;
935 char fmt6[2] = { fmt[0], '6' };
936 u8 off = 0;
937
938 fmt++;
939 while (isalpha(*++fmt)) {
940 switch (*fmt) {
941 case 'p':
942 have_p = true;
943 break;
944 case 'f':
945 have_f = true;
946 break;
947 case 's':
948 have_s = true;
949 break;
950 case 'c':
951 have_c = true;
952 break;
953 }
954 }
955
956 if (have_p || have_s || have_f) {
957 *p = '[';
958 off = 1;
959 }
960
961 if (fmt6[0] == 'I' && have_c)
962 p = ip6_compressed_string(ip6_addr + off, addr);
963 else
964 p = ip6_string(ip6_addr + off, addr, fmt6);
965
966 if (have_p || have_s || have_f)
967 *p++ = ']';
968
969 if (have_p) {
970 *p++ = ':';
971 p = number(p, pend, ntohs(sa->sin6_port), spec);
972 }
973 if (have_f) {
974 *p++ = '/';
975 p = number(p, pend, ntohl(sa->sin6_flowinfo &
976 IPV6_FLOWINFO_MASK), spec);
977 }
978 if (have_s) {
979 *p++ = '%';
980 p = number(p, pend, sa->sin6_scope_id, spec);
981 }
982 *p = '\0';
983
984 return string(buf, end, ip6_addr, spec);
985}
986
987static noinline_for_stack
988char *ip4_addr_string_sa(char *buf, char *end, const struct sockaddr_in *sa,
989 struct printf_spec spec, const char *fmt)
990{
991 bool have_p = false;
992 char *p, ip4_addr[sizeof("255.255.255.255") + sizeof(":12345")];
993 char *pend = ip4_addr + sizeof(ip4_addr);
994 const u8 *addr = (const u8 *) &sa->sin_addr.s_addr;
995 char fmt4[3] = { fmt[0], '4', 0 };
996
997 fmt++;
998 while (isalpha(*++fmt)) {
999 switch (*fmt) {
1000 case 'p':
1001 have_p = true;
1002 break;
1003 case 'h':
1004 case 'l':
1005 case 'n':
1006 case 'b':
1007 fmt4[2] = *fmt;
1008 break;
1009 }
1010 }
1011
1012 p = ip4_string(ip4_addr, addr, fmt4);
1013 if (have_p) {
1014 *p++ = ':';
1015 p = number(p, pend, ntohs(sa->sin_port), spec);
1016 }
1017 *p = '\0';
1018
1019 return string(buf, end, ip4_addr, spec);
1020}
1021
1022static noinline_for_stack
926char *uuid_string(char *buf, char *end, const u8 *addr, 1023char *uuid_string(char *buf, char *end, const u8 *addr,
927 struct printf_spec spec, const char *fmt) 1024 struct printf_spec spec, const char *fmt)
928{ 1025{
@@ -1007,11 +1104,17 @@ int kptr_restrict __read_mostly;
1007 * - 'I' [46] for IPv4/IPv6 addresses printed in the usual way 1104 * - 'I' [46] for IPv4/IPv6 addresses printed in the usual way
1008 * IPv4 uses dot-separated decimal without leading 0's (1.2.3.4) 1105 * IPv4 uses dot-separated decimal without leading 0's (1.2.3.4)
1009 * IPv6 uses colon separated network-order 16 bit hex with leading 0's 1106 * IPv6 uses colon separated network-order 16 bit hex with leading 0's
1107 * [S][pfs]
1108 * Generic IPv4/IPv6 address (struct sockaddr *) that falls back to
1109 * [4] or [6] and is able to print port [p], flowinfo [f], scope [s]
1010 * - 'i' [46] for 'raw' IPv4/IPv6 addresses 1110 * - 'i' [46] for 'raw' IPv4/IPv6 addresses
1011 * IPv6 omits the colons (01020304...0f) 1111 * IPv6 omits the colons (01020304...0f)
1012 * IPv4 uses dot-separated decimal with leading 0's (010.123.045.006) 1112 * IPv4 uses dot-separated decimal with leading 0's (010.123.045.006)
1013 * - '[Ii]4[hnbl]' IPv4 addresses in host, network, big or little endian order 1113 * [S][pfs]
1014 * - 'I6c' for IPv6 addresses printed as specified by 1114 * Generic IPv4/IPv6 address (struct sockaddr *) that falls back to
1115 * [4] or [6] and is able to print port [p], flowinfo [f], scope [s]
1116 * - '[Ii][4S][hnbl]' IPv4 addresses in host, network, big or little endian order
1117 * - 'I[6S]c' for IPv6 addresses printed as specified by
1015 * http://tools.ietf.org/html/rfc5952 1118 * http://tools.ietf.org/html/rfc5952
1016 * - 'U' For a 16 byte UUID/GUID, it prints the UUID/GUID in the form 1119 * - 'U' For a 16 byte UUID/GUID, it prints the UUID/GUID in the form
1017 * "xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" 1120 * "xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx"
@@ -1093,6 +1196,21 @@ char *pointer(const char *fmt, char *buf, char *end, void *ptr,
1093 return ip6_addr_string(buf, end, ptr, spec, fmt); 1196 return ip6_addr_string(buf, end, ptr, spec, fmt);
1094 case '4': 1197 case '4':
1095 return ip4_addr_string(buf, end, ptr, spec, fmt); 1198 return ip4_addr_string(buf, end, ptr, spec, fmt);
1199 case 'S': {
1200 const union {
1201 struct sockaddr raw;
1202 struct sockaddr_in v4;
1203 struct sockaddr_in6 v6;
1204 } *sa = ptr;
1205
1206 switch (sa->raw.sa_family) {
1207 case AF_INET:
1208 return ip4_addr_string_sa(buf, end, &sa->v4, spec, fmt);
1209 case AF_INET6:
1210 return ip6_addr_string_sa(buf, end, &sa->v6, spec, fmt);
1211 default:
1212 return string(buf, end, "(invalid address)", spec);
1213 }}
1096 } 1214 }
1097 break; 1215 break;
1098 case 'U': 1216 case 'U':
@@ -1370,6 +1488,8 @@ qualifier:
1370 * %pI6 print an IPv6 address with colons 1488 * %pI6 print an IPv6 address with colons
1371 * %pi6 print an IPv6 address without colons 1489 * %pi6 print an IPv6 address without colons
1372 * %pI6c print an IPv6 address as specified by RFC 5952 1490 * %pI6c print an IPv6 address as specified by RFC 5952
1491 * %pIS depending on sa_family of 'struct sockaddr *' print IPv4/IPv6 address
1492 * %piS depending on sa_family of 'struct sockaddr *' print IPv4/IPv6 address
1373 * %pU[bBlL] print a UUID/GUID in big or little endian using lower or upper 1493 * %pU[bBlL] print a UUID/GUID in big or little endian using lower or upper
1374 * case. 1494 * case.
1375 * %*ph[CDN] a variable-length hex string with a separator (supports up to 64 1495 * %*ph[CDN] a variable-length hex string with a separator (supports up to 64