aboutsummaryrefslogtreecommitdiffstats
path: root/lib/lz4/lz4defs.h
diff options
context:
space:
mode:
authorChanho Min <chanho.min@lge.com>2013-07-08 19:01:49 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2013-07-09 13:33:30 -0400
commitc72ac7a1a926dbffb59daf0f275450e5eecce16f (patch)
tree11350b56ad27c7001bdd45ce35d95666c355dfa8 /lib/lz4/lz4defs.h
parentf9b493ac9b833fd9dd3bbd50460adb33f29e1238 (diff)
lib: add lz4 compressor module
This patchset is for supporting LZ4 compression and the crypto API using it. As shown below, the size of data is a little bit bigger but compressing speed is faster under the enabled unaligned memory access. We can use lz4 de/compression through crypto API as well. Also, It will be useful for another potential user of lz4 compression. lz4 Compression Benchmark: Compiler: ARM gcc 4.6.4 ARMv7, 1 GHz based board Kernel: linux 3.4 Uncompressed data Size: 101 MB Compressed Size compression Speed LZO 72.1MB 32.1MB/s, 33.0MB/s(UA) LZ4 75.1MB 30.4MB/s, 35.9MB/s(UA) LZ4HC 59.8MB 2.4MB/s, 2.5MB/s(UA) - UA: Unaligned memory Access support - Latest patch set for LZO applied This patch: Add support for LZ4 compression in the Linux Kernel. LZ4 Compression APIs for kernel are based on LZ4 implementation by Yann Collet and were changed for kernel coding style. LZ4 homepage : http://fastcompression.blogspot.com/p/lz4.html LZ4 source repository : http://code.google.com/p/lz4/ svn revision : r90 Two APIs are added: lz4_compress() support basic lz4 compression whereas lz4hc_compress() support high compression or CPU performance get lower but compression ratio get higher. Also, we require the pre-allocated working memory with the defined size and destination buffer must be allocated with the size of lz4_compressbound. [akpm@linux-foundation.org: make lz4_compresshcctx() static] Signed-off-by: Chanho Min <chanho.min@lge.com> Cc: "Darrick J. Wong" <djwong@us.ibm.com> Cc: Bob Pearson <rpearson@systemfabricworks.com> Cc: Richard Weinberger <richard@nod.at> Cc: Herbert Xu <herbert@gondor.hengli.com.au> Cc: Yann Collet <yann.collet.73@gmail.com> Cc: Kyungsik Lee <kyungsik.lee@lge.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'lib/lz4/lz4defs.h')
-rw-r--r--lib/lz4/lz4defs.h66
1 files changed, 64 insertions, 2 deletions
diff --git a/lib/lz4/lz4defs.h b/lib/lz4/lz4defs.h
index 43ac31d63f36..abcecdc2d0f2 100644
--- a/lib/lz4/lz4defs.h
+++ b/lib/lz4/lz4defs.h
@@ -22,23 +22,40 @@
22 * Architecture-specific macros 22 * Architecture-specific macros
23 */ 23 */
24#define BYTE u8 24#define BYTE u8
25typedef struct _U16_S { u16 v; } U16_S;
26typedef struct _U32_S { u32 v; } U32_S;
27typedef struct _U64_S { u64 v; } U64_S;
25#if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) \ 28#if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) \
26 || defined(CONFIG_ARM) && __LINUX_ARM_ARCH__ >= 6 \ 29 || defined(CONFIG_ARM) && __LINUX_ARM_ARCH__ >= 6 \
27 && defined(ARM_EFFICIENT_UNALIGNED_ACCESS) 30 && defined(ARM_EFFICIENT_UNALIGNED_ACCESS)
28typedef struct _U32_S { u32 v; } U32_S;
29typedef struct _U64_S { u64 v; } U64_S;
30 31
32#define A16(x) (((U16_S *)(x))->v)
31#define A32(x) (((U32_S *)(x))->v) 33#define A32(x) (((U32_S *)(x))->v)
32#define A64(x) (((U64_S *)(x))->v) 34#define A64(x) (((U64_S *)(x))->v)
33 35
34#define PUT4(s, d) (A32(d) = A32(s)) 36#define PUT4(s, d) (A32(d) = A32(s))
35#define PUT8(s, d) (A64(d) = A64(s)) 37#define PUT8(s, d) (A64(d) = A64(s))
38#define LZ4_WRITE_LITTLEENDIAN_16(p, v) \
39 do { \
40 A16(p) = v; \
41 p += 2; \
42 } while (0)
36#else /* CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS */ 43#else /* CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS */
37 44
45#define A64(x) get_unaligned((u64 *)&(((U16_S *)(x))->v))
46#define A32(x) get_unaligned((u32 *)&(((U16_S *)(x))->v))
47#define A16(x) get_unaligned((u16 *)&(((U16_S *)(x))->v))
48
38#define PUT4(s, d) \ 49#define PUT4(s, d) \
39 put_unaligned(get_unaligned((const u32 *) s), (u32 *) d) 50 put_unaligned(get_unaligned((const u32 *) s), (u32 *) d)
40#define PUT8(s, d) \ 51#define PUT8(s, d) \
41 put_unaligned(get_unaligned((const u64 *) s), (u64 *) d) 52 put_unaligned(get_unaligned((const u64 *) s), (u64 *) d)
53
54#define LZ4_WRITE_LITTLEENDIAN_16(p, v) \
55 do { \
56 put_unaligned(v, (u16 *)(p)); \
57 p += 2; \
58 } while (0)
42#endif 59#endif
43 60
44#define COPYLENGTH 8 61#define COPYLENGTH 8
@@ -46,6 +63,29 @@ typedef struct _U64_S { u64 v; } U64_S;
46#define ML_MASK ((1U << ML_BITS) - 1) 63#define ML_MASK ((1U << ML_BITS) - 1)
47#define RUN_BITS (8 - ML_BITS) 64#define RUN_BITS (8 - ML_BITS)
48#define RUN_MASK ((1U << RUN_BITS) - 1) 65#define RUN_MASK ((1U << RUN_BITS) - 1)
66#define MEMORY_USAGE 14
67#define MINMATCH 4
68#define SKIPSTRENGTH 6
69#define LASTLITERALS 5
70#define MFLIMIT (COPYLENGTH + MINMATCH)
71#define MINLENGTH (MFLIMIT + 1)
72#define MAXD_LOG 16
73#define MAXD (1 << MAXD_LOG)
74#define MAXD_MASK (u32)(MAXD - 1)
75#define MAX_DISTANCE (MAXD - 1)
76#define HASH_LOG (MAXD_LOG - 1)
77#define HASHTABLESIZE (1 << HASH_LOG)
78#define MAX_NB_ATTEMPTS 256
79#define OPTIMAL_ML (int)((ML_MASK-1)+MINMATCH)
80#define LZ4_64KLIMIT ((1<<16) + (MFLIMIT - 1))
81#define HASHLOG64K ((MEMORY_USAGE - 2) + 1)
82#define HASH64KTABLESIZE (1U << HASHLOG64K)
83#define LZ4_HASH_VALUE(p) (((A32(p)) * 2654435761U) >> \
84 ((MINMATCH * 8) - (MEMORY_USAGE-2)))
85#define LZ4_HASH64K_VALUE(p) (((A32(p)) * 2654435761U) >> \
86 ((MINMATCH * 8) - HASHLOG64K))
87#define HASH_VALUE(p) (((A32(p)) * 2654435761U) >> \
88 ((MINMATCH * 8) - HASH_LOG))
49 89
50#if LZ4_ARCH64/* 64-bit */ 90#if LZ4_ARCH64/* 64-bit */
51#define STEPSIZE 8 91#define STEPSIZE 8
@@ -65,6 +105,13 @@ typedef struct _U64_S { u64 v; } U64_S;
65 LZ4_WILDCOPY(s, d, e); \ 105 LZ4_WILDCOPY(s, d, e); \
66 } \ 106 } \
67 } while (0) 107 } while (0)
108#define HTYPE u32
109
110#ifdef __BIG_ENDIAN
111#define LZ4_NBCOMMONBYTES(val) (__builtin_clzll(val) >> 3)
112#else
113#define LZ4_NBCOMMONBYTES(val) (__builtin_ctzll(val) >> 3)
114#endif
68 115
69#else /* 32-bit */ 116#else /* 32-bit */
70#define STEPSIZE 4 117#define STEPSIZE 4
@@ -83,6 +130,14 @@ typedef struct _U64_S { u64 v; } U64_S;
83 } while (0) 130 } while (0)
84 131
85#define LZ4_SECURECOPY LZ4_WILDCOPY 132#define LZ4_SECURECOPY LZ4_WILDCOPY
133#define HTYPE const u8*
134
135#ifdef __BIG_ENDIAN
136#define LZ4_NBCOMMONBYTES(val) (__builtin_clz(val) >> 3)
137#else
138#define LZ4_NBCOMMONBYTES(val) (__builtin_ctz(val) >> 3)
139#endif
140
86#endif 141#endif
87 142
88#define LZ4_READ_LITTLEENDIAN_16(d, s, p) \ 143#define LZ4_READ_LITTLEENDIAN_16(d, s, p) \
@@ -92,3 +147,10 @@ typedef struct _U64_S { u64 v; } U64_S;
92 do { \ 147 do { \
93 LZ4_COPYPACKET(s, d); \ 148 LZ4_COPYPACKET(s, d); \
94 } while (d < e) 149 } while (d < e)
150
151#define LZ4_BLINDCOPY(s, d, l) \
152 do { \
153 u8 *e = (d) + l; \
154 LZ4_WILDCOPY(s, d, e); \
155 d = e; \
156 } while (0)