diff options
author | Kyungsik Lee <kyungsik.lee@lge.com> | 2013-07-08 19:01:45 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2013-07-09 13:33:30 -0400 |
commit | cffb78b0e0b3a30b059b27a1d97500cf6464efa9 (patch) | |
tree | e20e06f58e90d3a6f3c3ff91547a9e9d51a8f230 /lib/lz4/lz4defs.h | |
parent | 4df87bb7b6a22dfc6fdd5abb3dd362b3af2c164d (diff) |
decompressor: add LZ4 decompressor module
Add support for LZ4 decompression in the Linux Kernel. LZ4 Decompression
APIs for kernel are based on LZ4 implementation by Yann Collet.
Benchmark Results(PATCH v3)
Compiler: Linaro ARM gcc 4.6.2
1. ARMv7, 1.5GHz based board
Kernel: linux 3.4
Uncompressed Kernel Size: 14MB
Compressed Size Decompression Speed
LZO 6.7MB 20.1MB/s, 25.2MB/s(UA)
LZ4 7.3MB 29.1MB/s, 45.6MB/s(UA)
2. ARMv7, 1.7GHz based board
Kernel: linux 3.7
Uncompressed Kernel Size: 14MB
Compressed Size Decompression Speed
LZO 6.0MB 34.1MB/s, 52.2MB/s(UA)
LZ4 6.5MB 86.7MB/s
- UA: Unaligned memory Access support
- Latest patch set for LZO applied
This patch set is for adding support for LZ4-compressed Kernel. LZ4 is a
very fast lossless compression algorithm and it also features an extremely
fast decoder [1].
But we have five of decompressors already and one question which does
arise, however, is that of where do we stop adding new ones? This issue
had been discussed and came to the conclusion [2].
Russell King said that we should have:
- one decompressor which is the fastest
- one decompressor for the highest compression ratio
- one popular decompressor (eg conventional gzip)
If we have a replacement one for one of these, then it should do exactly
that: replace it.
The benchmark shows that an 8% increase in image size vs a 66% increase
in decompression speed compared to LZO(which has been known as the
fastest decompressor in the Kernel). Therefore the "fast but may not be
small" compression title has clearly been taken by LZ4 [3].
[1] http://code.google.com/p/lz4/
[2] http://thread.gmane.org/gmane.linux.kbuild.devel/9157
[3] http://thread.gmane.org/gmane.linux.kbuild.devel/9347
LZ4 homepage: http://fastcompression.blogspot.com/p/lz4.html
LZ4 source repository: http://code.google.com/p/lz4/
Signed-off-by: Kyungsik Lee <kyungsik.lee@lge.com>
Signed-off-by: Yann Collet <yann.collet.73@gmail.com>
Cc: "H. Peter Anvin" <hpa@zytor.com>
Cc: Ingo Molnar <mingo@elte.hu>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Russell King <rmk@arm.linux.org.uk>
Cc: Borislav Petkov <bp@alien8.de>
Cc: Florian Fainelli <florian@openwrt.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'lib/lz4/lz4defs.h')
-rw-r--r-- | lib/lz4/lz4defs.h | 94 |
1 files changed, 94 insertions, 0 deletions
diff --git a/lib/lz4/lz4defs.h b/lib/lz4/lz4defs.h new file mode 100644 index 000000000000..43ac31d63f36 --- /dev/null +++ b/lib/lz4/lz4defs.h | |||
@@ -0,0 +1,94 @@ | |||
1 | /* | ||
2 | * lz4defs.h -- architecture specific defines | ||
3 | * | ||
4 | * Copyright (C) 2013, LG Electronics, Kyungsik Lee <kyungsik.lee@lge.com> | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or modify | ||
7 | * it under the terms of the GNU General Public License version 2 as | ||
8 | * published by the Free Software Foundation. | ||
9 | */ | ||
10 | |||
11 | /* | ||
12 | * Detects 64 bits mode | ||
13 | */ | ||
14 | #if (defined(__x86_64__) || defined(__x86_64) || defined(__amd64__) \ | ||
15 | || defined(__ppc64__) || defined(__LP64__)) | ||
16 | #define LZ4_ARCH64 1 | ||
17 | #else | ||
18 | #define LZ4_ARCH64 0 | ||
19 | #endif | ||
20 | |||
21 | /* | ||
22 | * Architecture-specific macros | ||
23 | */ | ||
24 | #define BYTE u8 | ||
25 | #if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) \ | ||
26 | || defined(CONFIG_ARM) && __LINUX_ARM_ARCH__ >= 6 \ | ||
27 | && defined(ARM_EFFICIENT_UNALIGNED_ACCESS) | ||
28 | typedef struct _U32_S { u32 v; } U32_S; | ||
29 | typedef struct _U64_S { u64 v; } U64_S; | ||
30 | |||
31 | #define A32(x) (((U32_S *)(x))->v) | ||
32 | #define A64(x) (((U64_S *)(x))->v) | ||
33 | |||
34 | #define PUT4(s, d) (A32(d) = A32(s)) | ||
35 | #define PUT8(s, d) (A64(d) = A64(s)) | ||
36 | #else /* CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS */ | ||
37 | |||
38 | #define PUT4(s, d) \ | ||
39 | put_unaligned(get_unaligned((const u32 *) s), (u32 *) d) | ||
40 | #define PUT8(s, d) \ | ||
41 | put_unaligned(get_unaligned((const u64 *) s), (u64 *) d) | ||
42 | #endif | ||
43 | |||
44 | #define COPYLENGTH 8 | ||
45 | #define ML_BITS 4 | ||
46 | #define ML_MASK ((1U << ML_BITS) - 1) | ||
47 | #define RUN_BITS (8 - ML_BITS) | ||
48 | #define RUN_MASK ((1U << RUN_BITS) - 1) | ||
49 | |||
50 | #if LZ4_ARCH64/* 64-bit */ | ||
51 | #define STEPSIZE 8 | ||
52 | |||
53 | #define LZ4_COPYSTEP(s, d) \ | ||
54 | do { \ | ||
55 | PUT8(s, d); \ | ||
56 | d += 8; \ | ||
57 | s += 8; \ | ||
58 | } while (0) | ||
59 | |||
60 | #define LZ4_COPYPACKET(s, d) LZ4_COPYSTEP(s, d) | ||
61 | |||
62 | #define LZ4_SECURECOPY(s, d, e) \ | ||
63 | do { \ | ||
64 | if (d < e) { \ | ||
65 | LZ4_WILDCOPY(s, d, e); \ | ||
66 | } \ | ||
67 | } while (0) | ||
68 | |||
69 | #else /* 32-bit */ | ||
70 | #define STEPSIZE 4 | ||
71 | |||
72 | #define LZ4_COPYSTEP(s, d) \ | ||
73 | do { \ | ||
74 | PUT4(s, d); \ | ||
75 | d += 4; \ | ||
76 | s += 4; \ | ||
77 | } while (0) | ||
78 | |||
79 | #define LZ4_COPYPACKET(s, d) \ | ||
80 | do { \ | ||
81 | LZ4_COPYSTEP(s, d); \ | ||
82 | LZ4_COPYSTEP(s, d); \ | ||
83 | } while (0) | ||
84 | |||
85 | #define LZ4_SECURECOPY LZ4_WILDCOPY | ||
86 | #endif | ||
87 | |||
88 | #define LZ4_READ_LITTLEENDIAN_16(d, s, p) \ | ||
89 | (d = s - get_unaligned_le16(p)) | ||
90 | |||
91 | #define LZ4_WILDCOPY(s, d, e) \ | ||
92 | do { \ | ||
93 | LZ4_COPYPACKET(s, d); \ | ||
94 | } while (d < e) | ||