aboutsummaryrefslogtreecommitdiffstats
path: root/lib
diff options
context:
space:
mode:
Diffstat (limited to 'lib')
-rw-r--r--lib/Kconfig54
-rw-r--r--lib/Kconfig.debug25
-rw-r--r--lib/Makefile5
-rw-r--r--lib/bitmap.c22
-rw-r--r--lib/btree.c4
-rw-r--r--lib/crc32.c1279
-rw-r--r--lib/crc32defs.h56
-rw-r--r--lib/gen_crc32table.c81
-rw-r--r--lib/genalloc.c190
-rw-r--r--lib/kobject_uevent.c21
-rw-r--r--lib/md5.c95
-rw-r--r--lib/memcopy.c403
-rw-r--r--lib/nlattr.c1
-rw-r--r--lib/plist.c7
-rw-r--r--lib/sha1.c212
-rw-r--r--lib/string.c29
-rw-r--r--lib/xz/xz_dec_bcj.c27
-rw-r--r--lib/xz/xz_dec_stream.c1
-rw-r--r--lib/xz/xz_private.h2
19 files changed, 1966 insertions, 548 deletions
diff --git a/lib/Kconfig b/lib/Kconfig
index 830181cc7a8..8991c57c196 100644
--- a/lib/Kconfig
+++ b/lib/Kconfig
@@ -51,14 +51,60 @@ config CRC_ITU_T
51 functions require M here. 51 functions require M here.
52 52
53config CRC32 53config CRC32
54 tristate "CRC32 functions" 54 tristate "CRC32/CRC32c functions"
55 default y 55 default y
56 select BITREVERSE 56 select BITREVERSE
57 help 57 help
58 This option is provided for the case where no in-kernel-tree 58 This option is provided for the case where no in-kernel-tree
59 modules require CRC32 functions, but a module built outside the 59 modules require CRC32/CRC32c functions, but a module built outside
60 kernel tree does. Such modules that use library CRC32 functions 60 the kernel tree does. Such modules that use library CRC32/CRC32c
61 require M here. 61 functions require M here.
62
63config CRC32_SELFTEST
64 bool "CRC32 perform self test on init"
65 default n
66 depends on CRC32
67 help
68 This option enables the CRC32 library functions to perform a
69 self test on initialization. The self test computes crc32_le
70 and crc32_be over byte strings with random alignment and length
71 and computes the total elapsed time and number of bytes processed.
72
73choice
74 prompt "CRC32 implementation"
75 depends on CRC32
76 default CRC32_SLICEBY8
77
78config CRC32_SLICEBY8
79 bool "Slice by 8 bytes"
80 help
81 Calculate checksum 8 bytes at a time with a clever slicing algorithm.
82 This is the fastest algorithm, but comes with a 8KiB lookup table.
83 Most modern processors have enough cache that this shouldn't be
84 a problem.
85
86 If you don't know which to choose, choose this one.
87
88config CRC32_SLICEBY4
89 bool "Slice by 4 bytes"
90 help
91 Calculate checksum 4 bytes at a time with a clever slicing algorithm.
92 This is a bit slower than slice by 8, but has a smaller 4KiB lookup
93 table.
94
95config CRC32_SARWATE
96 bool "Sarwate's Algorithm (one byte at a time)"
97 help
98 Calculate checksum a byte at a time using Sarwate's algorithm. This
99 is not particularly fast, but has a small 256 byte lookup table.
100
101config CRC32_BIT
102 bool "Classic Algorithm (one bit at a time)"
103 help
104 Calculate checksum one bit at a time. This is VERY slow, but has
105 no lookup table. This is provided as a debugging option.
106
107endchoice
62 108
63config CRC7 109config CRC7
64 tristate "CRC7 functions" 110 tristate "CRC7 functions"
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
index dd373c8ee94..37a94aea5b7 100644
--- a/lib/Kconfig.debug
+++ b/lib/Kconfig.debug
@@ -411,6 +411,26 @@ config SLUB_STATS
411 out which slabs are relevant to a particular load. 411 out which slabs are relevant to a particular load.
412 Try running: slabinfo -DA 412 Try running: slabinfo -DA
413 413
414config SLQB_DEBUG
415 default y
416 bool "Enable SLQB debugging support"
417 depends on SLQB
418
419config SLQB_DEBUG_ON
420 default n
421 bool "SLQB debugging on by default"
422 depends on SLQB_DEBUG
423
424config SLQB_SYSFS
425 bool "Create SYSFS entries for slab caches"
426 default n
427 depends on SLQB
428
429config SLQB_STATS
430 bool "Enable SLQB performance statistics"
431 default n
432 depends on SLQB_SYSFS
433
414config DEBUG_KMEMLEAK 434config DEBUG_KMEMLEAK
415 bool "Kernel memory leak detector" 435 bool "Kernel memory leak detector"
416 depends on DEBUG_KERNEL && EXPERIMENTAL && !MEMORY_HOTPLUG && \ 436 depends on DEBUG_KERNEL && EXPERIMENTAL && !MEMORY_HOTPLUG && \
@@ -667,8 +687,9 @@ config DEBUG_LOCKING_API_SELFTESTS
667 mutexes and rwsems. 687 mutexes and rwsems.
668 688
669config STACKTRACE 689config STACKTRACE
670 bool 690 bool "Stacktrace"
671 depends on STACKTRACE_SUPPORT 691 depends on STACKTRACE_SUPPORT
692 default y
672 693
673config DEBUG_STACK_USAGE 694config DEBUG_STACK_USAGE
674 bool "Stack utilization instrumentation" 695 bool "Stack utilization instrumentation"
@@ -1040,7 +1061,7 @@ config FAULT_INJECTION
1040config FAILSLAB 1061config FAILSLAB
1041 bool "Fault-injection capability for kmalloc" 1062 bool "Fault-injection capability for kmalloc"
1042 depends on FAULT_INJECTION 1063 depends on FAULT_INJECTION
1043 depends on SLAB || SLUB 1064 depends on SLAB || SLUB || SLQB
1044 help 1065 help
1045 Provide fault-injection capability for kmalloc. 1066 Provide fault-injection capability for kmalloc.
1046 1067
diff --git a/lib/Makefile b/lib/Makefile
index 6b597fdb189..7bd1afa496c 100644
--- a/lib/Makefile
+++ b/lib/Makefile
@@ -10,9 +10,10 @@ endif
10lib-y := ctype.o string.o vsprintf.o cmdline.o \ 10lib-y := ctype.o string.o vsprintf.o cmdline.o \
11 rbtree.o radix-tree.o dump_stack.o timerqueue.o\ 11 rbtree.o radix-tree.o dump_stack.o timerqueue.o\
12 idr.o int_sqrt.o extable.o prio_tree.o \ 12 idr.o int_sqrt.o extable.o prio_tree.o \
13 sha1.o irq_regs.o reciprocal_div.o argv_split.o \ 13 sha1.o md5.o irq_regs.o reciprocal_div.o argv_split.o \
14 proportions.o prio_heap.o ratelimit.o show_mem.o \ 14 proportions.o prio_heap.o ratelimit.o show_mem.o \
15 is_single_threaded.o plist.o decompress.o find_next_bit.o 15 is_single_threaded.o plist.o decompress.o find_next_bit.o \
16 memcopy.o
16 17
17lib-$(CONFIG_MMU) += ioremap.o 18lib-$(CONFIG_MMU) += ioremap.o
18lib-$(CONFIG_SMP) += cpumask.o 19lib-$(CONFIG_SMP) += cpumask.o
diff --git a/lib/bitmap.c b/lib/bitmap.c
index 3f3b68199d7..cf12bb86d7c 100644
--- a/lib/bitmap.c
+++ b/lib/bitmap.c
@@ -315,30 +315,32 @@ void bitmap_clear(unsigned long *map, int start, int nr)
315} 315}
316EXPORT_SYMBOL(bitmap_clear); 316EXPORT_SYMBOL(bitmap_clear);
317 317
318/* 318/**
319 * bitmap_find_next_zero_area - find a contiguous aligned zero area 319 * bitmap_find_next_zero_area - find a contiguous aligned zero area
320 * @map: The address to base the search on 320 * @map: The address to base the search on
321 * @size: The bitmap size in bits 321 * @size: The bitmap size in bits
322 * @start: The bitnumber to start searching at 322 * @start: The bitnumber to start searching at
323 * @nr: The number of zeroed bits we're looking for 323 * @nr: The number of zeroed bits we're looking for
324 * @align_mask: Alignment mask for zero area 324 * @align_mask: Alignment mask for zero area
325 * @align_offset: Alignment offset for zero area.
325 * 326 *
326 * The @align_mask should be one less than a power of 2; the effect is that 327 * The @align_mask should be one less than a power of 2; the effect is that
327 * the bit offset of all zero areas this function finds is multiples of that 328 * the bit offset of all zero areas this function finds plus @align_offset
328 * power of 2. A @align_mask of 0 means no alignment is required. 329 * is multiple of that power of 2.
329 */ 330 */
330unsigned long bitmap_find_next_zero_area(unsigned long *map, 331unsigned long bitmap_find_next_zero_area_off(unsigned long *map,
331 unsigned long size, 332 unsigned long size,
332 unsigned long start, 333 unsigned long start,
333 unsigned int nr, 334 unsigned int nr,
334 unsigned long align_mask) 335 unsigned long align_mask,
336 unsigned long align_offset)
335{ 337{
336 unsigned long index, end, i; 338 unsigned long index, end, i;
337again: 339again:
338 index = find_next_zero_bit(map, size, start); 340 index = find_next_zero_bit(map, size, start);
339 341
340 /* Align allocation */ 342 /* Align allocation */
341 index = __ALIGN_MASK(index, align_mask); 343 index = __ALIGN_MASK(index + align_offset, align_mask) - align_offset;
342 344
343 end = index + nr; 345 end = index + nr;
344 if (end > size) 346 if (end > size)
@@ -350,7 +352,7 @@ again:
350 } 352 }
351 return index; 353 return index;
352} 354}
353EXPORT_SYMBOL(bitmap_find_next_zero_area); 355EXPORT_SYMBOL(bitmap_find_next_zero_area_off);
354 356
355/* 357/*
356 * Bitmap printing & parsing functions: first version by Bill Irwin, 358 * Bitmap printing & parsing functions: first version by Bill Irwin,
diff --git a/lib/btree.c b/lib/btree.c
index 2a34392bcec..297124d4d8d 100644
--- a/lib/btree.c
+++ b/lib/btree.c
@@ -319,8 +319,8 @@ void *btree_get_prev(struct btree_head *head, struct btree_geo *geo,
319 319
320 if (head->height == 0) 320 if (head->height == 0)
321 return NULL; 321 return NULL;
322retry:
323 longcpy(key, __key, geo->keylen); 322 longcpy(key, __key, geo->keylen);
323retry:
324 dec_key(geo, key); 324 dec_key(geo, key);
325 325
326 node = head->node; 326 node = head->node;
@@ -351,7 +351,7 @@ retry:
351 } 351 }
352miss: 352miss:
353 if (retry_key) { 353 if (retry_key) {
354 __key = retry_key; 354 longcpy(key, retry_key, geo->keylen);
355 retry_key = NULL; 355 retry_key = NULL;
356 goto retry; 356 goto retry;
357 } 357 }
diff --git a/lib/crc32.c b/lib/crc32.c
index 4855995fcde..382fa767a65 100644
--- a/lib/crc32.c
+++ b/lib/crc32.c
@@ -1,4 +1,8 @@
1/* 1/*
2 * Aug 8, 2011 Bob Pearson with help from Joakim Tjernlund and George Spelvin
3 * cleaned up code to current version of sparse and added the slicing-by-8
4 * algorithm to the closely similar existing slicing-by-4 algorithm.
5 *
2 * Oct 15, 2000 Matt Domsch <Matt_Domsch@dell.com> 6 * Oct 15, 2000 Matt Domsch <Matt_Domsch@dell.com>
3 * Nicer crc32 functions/docs submitted by linux@horizon.com. Thanks! 7 * Nicer crc32 functions/docs submitted by linux@horizon.com. Thanks!
4 * Code was from the public domain, copyright abandoned. Code was 8 * Code was from the public domain, copyright abandoned. Code was
@@ -20,51 +24,58 @@
20 * Version 2. See the file COPYING for more details. 24 * Version 2. See the file COPYING for more details.
21 */ 25 */
22 26
27/* see: Documentation/crc32.txt for a description of algorithms */
28
23#include <linux/crc32.h> 29#include <linux/crc32.h>
24#include <linux/kernel.h>
25#include <linux/module.h> 30#include <linux/module.h>
26#include <linux/compiler.h>
27#include <linux/types.h> 31#include <linux/types.h>
28#include <linux/init.h>
29#include <asm/atomic.h>
30#include "crc32defs.h" 32#include "crc32defs.h"
31#if CRC_LE_BITS == 8 33
32# define tole(x) __constant_cpu_to_le32(x) 34#if CRC_LE_BITS > 8
35# define tole(x) (__force u32) __constant_cpu_to_le32(x)
33#else 36#else
34# define tole(x) (x) 37# define tole(x) (x)
35#endif 38#endif
36 39
37#if CRC_BE_BITS == 8 40#if CRC_BE_BITS > 8
38# define tobe(x) __constant_cpu_to_be32(x) 41# define tobe(x) (__force u32) __constant_cpu_to_be32(x)
39#else 42#else
40# define tobe(x) (x) 43# define tobe(x) (x)
41#endif 44#endif
45
42#include "crc32table.h" 46#include "crc32table.h"
43 47
44MODULE_AUTHOR("Matt Domsch <Matt_Domsch@dell.com>"); 48MODULE_AUTHOR("Matt Domsch <Matt_Domsch@dell.com>");
45MODULE_DESCRIPTION("Ethernet CRC32 calculations"); 49MODULE_DESCRIPTION("Various CRC32 calculations");
46MODULE_LICENSE("GPL"); 50MODULE_LICENSE("GPL");
47 51
48#if CRC_LE_BITS == 8 || CRC_BE_BITS == 8 52#if CRC_LE_BITS > 8 || CRC_BE_BITS > 8
49 53
54/* implements slicing-by-4 or slicing-by-8 algorithm */
50static inline u32 55static inline u32
51crc32_body(u32 crc, unsigned char const *buf, size_t len, const u32 (*tab)[256]) 56crc32_body(u32 crc, unsigned char const *buf, size_t len, const u32 (*tab)[256])
52{ 57{
53# ifdef __LITTLE_ENDIAN 58# ifdef __LITTLE_ENDIAN
54# define DO_CRC(x) crc = tab[0][(crc ^ (x)) & 255] ^ (crc >> 8) 59# define DO_CRC(x) (crc = t0[(crc ^ (x)) & 255] ^ (crc >> 8))
55# define DO_CRC4 crc = tab[3][(crc) & 255] ^ \ 60# define DO_CRC4 (t3[(q) & 255] ^ t2[(q >> 8) & 255] ^ \
56 tab[2][(crc >> 8) & 255] ^ \ 61 t1[(q >> 16) & 255] ^ t0[(q >> 24) & 255])
57 tab[1][(crc >> 16) & 255] ^ \ 62# define DO_CRC8 (t7[(q) & 255] ^ t6[(q >> 8) & 255] ^ \
58 tab[0][(crc >> 24) & 255] 63 t5[(q >> 16) & 255] ^ t4[(q >> 24) & 255])
59# else 64# else
60# define DO_CRC(x) crc = tab[0][((crc >> 24) ^ (x)) & 255] ^ (crc << 8) 65# define DO_CRC(x) (crc = t0[((crc >> 24) ^ (x)) & 255] ^ (crc << 8))
61# define DO_CRC4 crc = tab[0][(crc) & 255] ^ \ 66# define DO_CRC4 (t0[(q) & 255] ^ t1[(q >> 8) & 255] ^ \
62 tab[1][(crc >> 8) & 255] ^ \ 67 t2[(q >> 16) & 255] ^ t3[(q >> 24) & 255])
63 tab[2][(crc >> 16) & 255] ^ \ 68# define DO_CRC8 (t4[(q) & 255] ^ t5[(q >> 8) & 255] ^ \
64 tab[3][(crc >> 24) & 255] 69 t6[(q >> 16) & 255] ^ t7[(q >> 24) & 255])
65# endif 70# endif
66 const u32 *b; 71 const u32 *b;
67 size_t rem_len; 72 size_t rem_len;
73# ifdef CONFIG_X86
74 size_t i;
75# endif
76 const u32 *t0 = tab[0], *t1 = tab[1], *t2 = tab[2], *t3 = tab[3];
77 const u32 *t4 = tab[4], *t5 = tab[5], *t6 = tab[6], *t7 = tab[7];
78 u32 q;
68 79
69 /* Align it */ 80 /* Align it */
70 if (unlikely((long)buf & 3 && len)) { 81 if (unlikely((long)buf & 3 && len)) {
@@ -72,27 +83,51 @@ crc32_body(u32 crc, unsigned char const *buf, size_t len, const u32 (*tab)[256])
72 DO_CRC(*buf++); 83 DO_CRC(*buf++);
73 } while ((--len) && ((long)buf)&3); 84 } while ((--len) && ((long)buf)&3);
74 } 85 }
86
87# if CRC_LE_BITS == 32
75 rem_len = len & 3; 88 rem_len = len & 3;
76 /* load data 32 bits wide, xor data 32 bits wide. */
77 len = len >> 2; 89 len = len >> 2;
90# else
91 rem_len = len & 7;
92 len = len >> 3;
93# endif
94
78 b = (const u32 *)buf; 95 b = (const u32 *)buf;
96# ifdef CONFIG_X86
97 --b;
98 for (i = 0; i < len; i++) {
99# else
79 for (--b; len; --len) { 100 for (--b; len; --len) {
80 crc ^= *++b; /* use pre increment for speed */ 101# endif
81 DO_CRC4; 102 q = crc ^ *++b; /* use pre increment for speed */
103# if CRC_LE_BITS == 32
104 crc = DO_CRC4;
105# else
106 crc = DO_CRC8;
107 q = *++b;
108 crc ^= DO_CRC4;
109# endif
82 } 110 }
83 len = rem_len; 111 len = rem_len;
84 /* And the last few bytes */ 112 /* And the last few bytes */
85 if (len) { 113 if (len) {
86 u8 *p = (u8 *)(b + 1) - 1; 114 u8 *p = (u8 *)(b + 1) - 1;
115# ifdef CONFIG_X86
116 for (i = 0; i < len; i++)
117 DO_CRC(*++p); /* use pre increment for speed */
118# else
87 do { 119 do {
88 DO_CRC(*++p); /* use pre increment for speed */ 120 DO_CRC(*++p); /* use pre increment for speed */
89 } while (--len); 121 } while (--len);
122# endif
90 } 123 }
91 return crc; 124 return crc;
92#undef DO_CRC 125#undef DO_CRC
93#undef DO_CRC4 126#undef DO_CRC4
127#undef DO_CRC8
94} 128}
95#endif 129#endif
130
96/** 131/**
97 * crc32_le() - Calculate bitwise little-endian Ethernet AUTODIN II CRC32 132 * crc32_le() - Calculate bitwise little-endian Ethernet AUTODIN II CRC32
98 * @crc: seed value for computation. ~0 for Ethernet, sometimes 0 for 133 * @crc: seed value for computation. ~0 for Ethernet, sometimes 0 for
@@ -100,53 +135,56 @@ crc32_body(u32 crc, unsigned char const *buf, size_t len, const u32 (*tab)[256])
100 * @p: pointer to buffer over which CRC is run 135 * @p: pointer to buffer over which CRC is run
101 * @len: length of buffer @p 136 * @len: length of buffer @p
102 */ 137 */
103u32 __pure crc32_le(u32 crc, unsigned char const *p, size_t len); 138static inline u32 __pure crc32_le_generic(u32 crc, unsigned char const *p,
104 139 size_t len, const u32 (*tab)[256],
105#if CRC_LE_BITS == 1 140 u32 polynomial)
106/*
107 * In fact, the table-based code will work in this case, but it can be
108 * simplified by inlining the table in ?: form.
109 */
110
111u32 __pure crc32_le(u32 crc, unsigned char const *p, size_t len)
112{ 141{
142#if CRC_LE_BITS == 1
113 int i; 143 int i;
114 while (len--) { 144 while (len--) {
115 crc ^= *p++; 145 crc ^= *p++;
116 for (i = 0; i < 8; i++) 146 for (i = 0; i < 8; i++)
117 crc = (crc >> 1) ^ ((crc & 1) ? CRCPOLY_LE : 0); 147 crc = (crc >> 1) ^ ((crc & 1) ? polynomial : 0);
148 }
149# elif CRC_LE_BITS == 2
150 while (len--) {
151 crc ^= *p++;
152 crc = (crc >> 2) ^ tab[0][crc & 3];
153 crc = (crc >> 2) ^ tab[0][crc & 3];
154 crc = (crc >> 2) ^ tab[0][crc & 3];
155 crc = (crc >> 2) ^ tab[0][crc & 3];
118 } 156 }
119 return crc;
120}
121#else /* Table-based approach */
122
123u32 __pure crc32_le(u32 crc, unsigned char const *p, size_t len)
124{
125# if CRC_LE_BITS == 8
126 const u32 (*tab)[] = crc32table_le;
127
128 crc = __cpu_to_le32(crc);
129 crc = crc32_body(crc, p, len, tab);
130 return __le32_to_cpu(crc);
131# elif CRC_LE_BITS == 4 157# elif CRC_LE_BITS == 4
132 while (len--) { 158 while (len--) {
133 crc ^= *p++; 159 crc ^= *p++;
134 crc = (crc >> 4) ^ crc32table_le[crc & 15]; 160 crc = (crc >> 4) ^ tab[0][crc & 15];
135 crc = (crc >> 4) ^ crc32table_le[crc & 15]; 161 crc = (crc >> 4) ^ tab[0][crc & 15];
136 } 162 }
137 return crc; 163# elif CRC_LE_BITS == 8
138# elif CRC_LE_BITS == 2 164 /* aka Sarwate algorithm */
139 while (len--) { 165 while (len--) {
140 crc ^= *p++; 166 crc ^= *p++;
141 crc = (crc >> 2) ^ crc32table_le[crc & 3]; 167 crc = (crc >> 8) ^ tab[0][crc & 255];
142 crc = (crc >> 2) ^ crc32table_le[crc & 3];
143 crc = (crc >> 2) ^ crc32table_le[crc & 3];
144 crc = (crc >> 2) ^ crc32table_le[crc & 3];
145 } 168 }
169# else
170 crc = (__force u32) __cpu_to_le32(crc);
171 crc = crc32_body(crc, p, len, tab);
172 crc = __le32_to_cpu((__force __le32)crc);
173#endif
146 return crc; 174 return crc;
147# endif
148} 175}
149#endif 176
177u32 __pure crc32_le(u32 crc, unsigned char const *p, size_t len)
178{
179 return crc32_le_generic(crc, p, len, crc32table_le, CRCPOLY_LE);
180}
181EXPORT_SYMBOL(crc32_le);
182
183u32 __pure __crc32c_le(u32 crc, unsigned char const *p, size_t len)
184{
185 return crc32_le_generic(crc, p, len, crc32ctable_le, CRC32C_POLY_LE);
186}
187EXPORT_SYMBOL(__crc32c_le);
150 188
151/** 189/**
152 * crc32_be() - Calculate bitwise big-endian Ethernet AUTODIN II CRC32 190 * crc32_be() - Calculate bitwise big-endian Ethernet AUTODIN II CRC32
@@ -155,317 +193,906 @@ u32 __pure crc32_le(u32 crc, unsigned char const *p, size_t len)
155 * @p: pointer to buffer over which CRC is run 193 * @p: pointer to buffer over which CRC is run
156 * @len: length of buffer @p 194 * @len: length of buffer @p
157 */ 195 */
158u32 __pure crc32_be(u32 crc, unsigned char const *p, size_t len); 196static inline u32 __pure crc32_be_generic(u32 crc, unsigned char const *p,
159 197 size_t len, const u32 (*tab)[256],
160#if CRC_BE_BITS == 1 198 u32 polynomial)
161/*
162 * In fact, the table-based code will work in this case, but it can be
163 * simplified by inlining the table in ?: form.
164 */
165
166u32 __pure crc32_be(u32 crc, unsigned char const *p, size_t len)
167{ 199{
200#if CRC_BE_BITS == 1
168 int i; 201 int i;
169 while (len--) { 202 while (len--) {
170 crc ^= *p++ << 24; 203 crc ^= *p++ << 24;
171 for (i = 0; i < 8; i++) 204 for (i = 0; i < 8; i++)
172 crc = 205 crc =
173 (crc << 1) ^ ((crc & 0x80000000) ? CRCPOLY_BE : 206 (crc << 1) ^ ((crc & 0x80000000) ? polynomial :
174 0); 207 0);
175 } 208 }
176 return crc; 209# elif CRC_BE_BITS == 2
177} 210 while (len--) {
178 211 crc ^= *p++ << 24;
179#else /* Table-based approach */ 212 crc = (crc << 2) ^ tab[0][crc >> 30];
180u32 __pure crc32_be(u32 crc, unsigned char const *p, size_t len) 213 crc = (crc << 2) ^ tab[0][crc >> 30];
181{ 214 crc = (crc << 2) ^ tab[0][crc >> 30];
182# if CRC_BE_BITS == 8 215 crc = (crc << 2) ^ tab[0][crc >> 30];
183 const u32 (*tab)[] = crc32table_be; 216 }
184
185 crc = __cpu_to_be32(crc);
186 crc = crc32_body(crc, p, len, tab);
187 return __be32_to_cpu(crc);
188# elif CRC_BE_BITS == 4 217# elif CRC_BE_BITS == 4
189 while (len--) { 218 while (len--) {
190 crc ^= *p++ << 24; 219 crc ^= *p++ << 24;
191 crc = (crc << 4) ^ crc32table_be[crc >> 28]; 220 crc = (crc << 4) ^ tab[0][crc >> 28];
192 crc = (crc << 4) ^ crc32table_be[crc >> 28]; 221 crc = (crc << 4) ^ tab[0][crc >> 28];
193 } 222 }
194 return crc; 223# elif CRC_BE_BITS == 8
195# elif CRC_BE_BITS == 2
196 while (len--) { 224 while (len--) {
197 crc ^= *p++ << 24; 225 crc ^= *p++ << 24;
198 crc = (crc << 2) ^ crc32table_be[crc >> 30]; 226 crc = (crc << 8) ^ tab[0][crc >> 24];
199 crc = (crc << 2) ^ crc32table_be[crc >> 30];
200 crc = (crc << 2) ^ crc32table_be[crc >> 30];
201 crc = (crc << 2) ^ crc32table_be[crc >> 30];
202 } 227 }
203 return crc; 228# else
229 crc = (__force u32) __cpu_to_be32(crc);
230 crc = crc32_body(crc, p, len, tab);
231 crc = __be32_to_cpu((__force __be32)crc);
204# endif 232# endif
233 return crc;
205} 234}
206#endif
207 235
208EXPORT_SYMBOL(crc32_le); 236u32 __pure crc32_be(u32 crc, unsigned char const *p, size_t len)
237{
238 return crc32_be_generic(crc, p, len, crc32table_be, CRCPOLY_BE);
239}
209EXPORT_SYMBOL(crc32_be); 240EXPORT_SYMBOL(crc32_be);
210 241
211/* 242#ifdef CONFIG_CRC32_SELFTEST
212 * A brief CRC tutorial.
213 *
214 * A CRC is a long-division remainder. You add the CRC to the message,
215 * and the whole thing (message+CRC) is a multiple of the given
216 * CRC polynomial. To check the CRC, you can either check that the
217 * CRC matches the recomputed value, *or* you can check that the
218 * remainder computed on the message+CRC is 0. This latter approach
219 * is used by a lot of hardware implementations, and is why so many
220 * protocols put the end-of-frame flag after the CRC.
221 *
222 * It's actually the same long division you learned in school, except that
223 * - We're working in binary, so the digits are only 0 and 1, and
224 * - When dividing polynomials, there are no carries. Rather than add and
225 * subtract, we just xor. Thus, we tend to get a bit sloppy about
226 * the difference between adding and subtracting.
227 *
228 * A 32-bit CRC polynomial is actually 33 bits long. But since it's
229 * 33 bits long, bit 32 is always going to be set, so usually the CRC
230 * is written in hex with the most significant bit omitted. (If you're
231 * familiar with the IEEE 754 floating-point format, it's the same idea.)
232 *
233 * Note that a CRC is computed over a string of *bits*, so you have
234 * to decide on the endianness of the bits within each byte. To get
235 * the best error-detecting properties, this should correspond to the
236 * order they're actually sent. For example, standard RS-232 serial is
237 * little-endian; the most significant bit (sometimes used for parity)
238 * is sent last. And when appending a CRC word to a message, you should
239 * do it in the right order, matching the endianness.
240 *
241 * Just like with ordinary division, the remainder is always smaller than
242 * the divisor (the CRC polynomial) you're dividing by. Each step of the
243 * division, you take one more digit (bit) of the dividend and append it
244 * to the current remainder. Then you figure out the appropriate multiple
245 * of the divisor to subtract to being the remainder back into range.
246 * In binary, it's easy - it has to be either 0 or 1, and to make the
247 * XOR cancel, it's just a copy of bit 32 of the remainder.
248 *
249 * When computing a CRC, we don't care about the quotient, so we can
250 * throw the quotient bit away, but subtract the appropriate multiple of
251 * the polynomial from the remainder and we're back to where we started,
252 * ready to process the next bit.
253 *
254 * A big-endian CRC written this way would be coded like:
255 * for (i = 0; i < input_bits; i++) {
256 * multiple = remainder & 0x80000000 ? CRCPOLY : 0;
257 * remainder = (remainder << 1 | next_input_bit()) ^ multiple;
258 * }
259 * Notice how, to get at bit 32 of the shifted remainder, we look
260 * at bit 31 of the remainder *before* shifting it.
261 *
262 * But also notice how the next_input_bit() bits we're shifting into
263 * the remainder don't actually affect any decision-making until
264 * 32 bits later. Thus, the first 32 cycles of this are pretty boring.
265 * Also, to add the CRC to a message, we need a 32-bit-long hole for it at
266 * the end, so we have to add 32 extra cycles shifting in zeros at the
267 * end of every message,
268 *
269 * So the standard trick is to rearrage merging in the next_input_bit()
270 * until the moment it's needed. Then the first 32 cycles can be precomputed,
271 * and merging in the final 32 zero bits to make room for the CRC can be
272 * skipped entirely.
273 * This changes the code to:
274 * for (i = 0; i < input_bits; i++) {
275 * remainder ^= next_input_bit() << 31;
276 * multiple = (remainder & 0x80000000) ? CRCPOLY : 0;
277 * remainder = (remainder << 1) ^ multiple;
278 * }
279 * With this optimization, the little-endian code is simpler:
280 * for (i = 0; i < input_bits; i++) {
281 * remainder ^= next_input_bit();
282 * multiple = (remainder & 1) ? CRCPOLY : 0;
283 * remainder = (remainder >> 1) ^ multiple;
284 * }
285 *
286 * Note that the other details of endianness have been hidden in CRCPOLY
287 * (which must be bit-reversed) and next_input_bit().
288 *
289 * However, as long as next_input_bit is returning the bits in a sensible
290 * order, we can actually do the merging 8 or more bits at a time rather
291 * than one bit at a time:
292 * for (i = 0; i < input_bytes; i++) {
293 * remainder ^= next_input_byte() << 24;
294 * for (j = 0; j < 8; j++) {
295 * multiple = (remainder & 0x80000000) ? CRCPOLY : 0;
296 * remainder = (remainder << 1) ^ multiple;
297 * }
298 * }
299 * Or in little-endian:
300 * for (i = 0; i < input_bytes; i++) {
301 * remainder ^= next_input_byte();
302 * for (j = 0; j < 8; j++) {
303 * multiple = (remainder & 1) ? CRCPOLY : 0;
304 * remainder = (remainder << 1) ^ multiple;
305 * }
306 * }
307 * If the input is a multiple of 32 bits, you can even XOR in a 32-bit
308 * word at a time and increase the inner loop count to 32.
309 *
310 * You can also mix and match the two loop styles, for example doing the
311 * bulk of a message byte-at-a-time and adding bit-at-a-time processing
312 * for any fractional bytes at the end.
313 *
314 * The only remaining optimization is to the byte-at-a-time table method.
315 * Here, rather than just shifting one bit of the remainder to decide
316 * in the correct multiple to subtract, we can shift a byte at a time.
317 * This produces a 40-bit (rather than a 33-bit) intermediate remainder,
318 * but again the multiple of the polynomial to subtract depends only on
319 * the high bits, the high 8 bits in this case.
320 *
321 * The multiple we need in that case is the low 32 bits of a 40-bit
322 * value whose high 8 bits are given, and which is a multiple of the
323 * generator polynomial. This is simply the CRC-32 of the given
324 * one-byte message.
325 *
326 * Two more details: normally, appending zero bits to a message which
327 * is already a multiple of a polynomial produces a larger multiple of that
328 * polynomial. To enable a CRC to detect this condition, it's common to
329 * invert the CRC before appending it. This makes the remainder of the
330 * message+crc come out not as zero, but some fixed non-zero value.
331 *
332 * The same problem applies to zero bits prepended to the message, and
333 * a similar solution is used. Instead of starting with a remainder of
334 * 0, an initial remainder of all ones is used. As long as you start
335 * the same way on decoding, it doesn't make a difference.
336 */
337 243
338#ifdef UNITTEST 244/* 4096 random bytes */
339 245static u8 __attribute__((__aligned__(8))) test_buf[] =
340#include <stdlib.h> 246{
341#include <stdio.h> 247 0x5b, 0x85, 0x21, 0xcb, 0x09, 0x68, 0x7d, 0x30,
248 0xc7, 0x69, 0xd7, 0x30, 0x92, 0xde, 0x59, 0xe4,
249 0xc9, 0x6e, 0x8b, 0xdb, 0x98, 0x6b, 0xaa, 0x60,
250 0xa8, 0xb5, 0xbc, 0x6c, 0xa9, 0xb1, 0x5b, 0x2c,
251 0xea, 0xb4, 0x92, 0x6a, 0x3f, 0x79, 0x91, 0xe4,
252 0xe9, 0x70, 0x51, 0x8c, 0x7f, 0x95, 0x6f, 0x1a,
253 0x56, 0xa1, 0x5c, 0x27, 0x03, 0x67, 0x9f, 0x3a,
254 0xe2, 0x31, 0x11, 0x29, 0x6b, 0x98, 0xfc, 0xc4,
255 0x53, 0x24, 0xc5, 0x8b, 0xce, 0x47, 0xb2, 0xb9,
256 0x32, 0xcb, 0xc1, 0xd0, 0x03, 0x57, 0x4e, 0xd4,
257 0xe9, 0x3c, 0xa1, 0x63, 0xcf, 0x12, 0x0e, 0xca,
258 0xe1, 0x13, 0xd1, 0x93, 0xa6, 0x88, 0x5c, 0x61,
259 0x5b, 0xbb, 0xf0, 0x19, 0x46, 0xb4, 0xcf, 0x9e,
260 0xb6, 0x6b, 0x4c, 0x3a, 0xcf, 0x60, 0xf9, 0x7a,
261 0x8d, 0x07, 0x63, 0xdb, 0x40, 0xe9, 0x0b, 0x6f,
262 0xad, 0x97, 0xf1, 0xed, 0xd0, 0x1e, 0x26, 0xfd,
263 0xbf, 0xb7, 0xc8, 0x04, 0x94, 0xf8, 0x8b, 0x8c,
264 0xf1, 0xab, 0x7a, 0xd4, 0xdd, 0xf3, 0xe8, 0x88,
265 0xc3, 0xed, 0x17, 0x8a, 0x9b, 0x40, 0x0d, 0x53,
266 0x62, 0x12, 0x03, 0x5f, 0x1b, 0x35, 0x32, 0x1f,
267 0xb4, 0x7b, 0x93, 0x78, 0x0d, 0xdb, 0xce, 0xa4,
268 0xc0, 0x47, 0xd5, 0xbf, 0x68, 0xe8, 0x5d, 0x74,
269 0x8f, 0x8e, 0x75, 0x1c, 0xb2, 0x4f, 0x9a, 0x60,
270 0xd1, 0xbe, 0x10, 0xf4, 0x5c, 0xa1, 0x53, 0x09,
271 0xa5, 0xe0, 0x09, 0x54, 0x85, 0x5c, 0xdc, 0x07,
272 0xe7, 0x21, 0x69, 0x7b, 0x8a, 0xfd, 0x90, 0xf1,
273 0x22, 0xd0, 0xb4, 0x36, 0x28, 0xe6, 0xb8, 0x0f,
274 0x39, 0xde, 0xc8, 0xf3, 0x86, 0x60, 0x34, 0xd2,
275 0x5e, 0xdf, 0xfd, 0xcf, 0x0f, 0xa9, 0x65, 0xf0,
276 0xd5, 0x4d, 0x96, 0x40, 0xe3, 0xdf, 0x3f, 0x95,
277 0x5a, 0x39, 0x19, 0x93, 0xf4, 0x75, 0xce, 0x22,
278 0x00, 0x1c, 0x93, 0xe2, 0x03, 0x66, 0xf4, 0x93,
279 0x73, 0x86, 0x81, 0x8e, 0x29, 0x44, 0x48, 0x86,
280 0x61, 0x7c, 0x48, 0xa3, 0x43, 0xd2, 0x9c, 0x8d,
281 0xd4, 0x95, 0xdd, 0xe1, 0x22, 0x89, 0x3a, 0x40,
282 0x4c, 0x1b, 0x8a, 0x04, 0xa8, 0x09, 0x69, 0x8b,
283 0xea, 0xc6, 0x55, 0x8e, 0x57, 0xe6, 0x64, 0x35,
284 0xf0, 0xc7, 0x16, 0x9f, 0x5d, 0x5e, 0x86, 0x40,
285 0x46, 0xbb, 0xe5, 0x45, 0x88, 0xfe, 0xc9, 0x63,
286 0x15, 0xfb, 0xf5, 0xbd, 0x71, 0x61, 0xeb, 0x7b,
287 0x78, 0x70, 0x07, 0x31, 0x03, 0x9f, 0xb2, 0xc8,
288 0xa7, 0xab, 0x47, 0xfd, 0xdf, 0xa0, 0x78, 0x72,
289 0xa4, 0x2a, 0xe4, 0xb6, 0xba, 0xc0, 0x1e, 0x86,
290 0x71, 0xe6, 0x3d, 0x18, 0x37, 0x70, 0xe6, 0xff,
291 0xe0, 0xbc, 0x0b, 0x22, 0xa0, 0x1f, 0xd3, 0xed,
292 0xa2, 0x55, 0x39, 0xab, 0xa8, 0x13, 0x73, 0x7c,
293 0x3f, 0xb2, 0xd6, 0x19, 0xac, 0xff, 0x99, 0xed,
294 0xe8, 0xe6, 0xa6, 0x22, 0xe3, 0x9c, 0xf1, 0x30,
295 0xdc, 0x01, 0x0a, 0x56, 0xfa, 0xe4, 0xc9, 0x99,
296 0xdd, 0xa8, 0xd8, 0xda, 0x35, 0x51, 0x73, 0xb4,
297 0x40, 0x86, 0x85, 0xdb, 0x5c, 0xd5, 0x85, 0x80,
298 0x14, 0x9c, 0xfd, 0x98, 0xa9, 0x82, 0xc5, 0x37,
299 0xff, 0x32, 0x5d, 0xd0, 0x0b, 0xfa, 0xdc, 0x04,
300 0x5e, 0x09, 0xd2, 0xca, 0x17, 0x4b, 0x1a, 0x8e,
301 0x15, 0xe1, 0xcc, 0x4e, 0x52, 0x88, 0x35, 0xbd,
302 0x48, 0xfe, 0x15, 0xa0, 0x91, 0xfd, 0x7e, 0x6c,
303 0x0e, 0x5d, 0x79, 0x1b, 0x81, 0x79, 0xd2, 0x09,
304 0x34, 0x70, 0x3d, 0x81, 0xec, 0xf6, 0x24, 0xbb,
305 0xfb, 0xf1, 0x7b, 0xdf, 0x54, 0xea, 0x80, 0x9b,
306 0xc7, 0x99, 0x9e, 0xbd, 0x16, 0x78, 0x12, 0x53,
307 0x5e, 0x01, 0xa7, 0x4e, 0xbd, 0x67, 0xe1, 0x9b,
308 0x4c, 0x0e, 0x61, 0x45, 0x97, 0xd2, 0xf0, 0x0f,
309 0xfe, 0x15, 0x08, 0xb7, 0x11, 0x4c, 0xe7, 0xff,
310 0x81, 0x53, 0xff, 0x91, 0x25, 0x38, 0x7e, 0x40,
311 0x94, 0xe5, 0xe0, 0xad, 0xe6, 0xd9, 0x79, 0xb6,
312 0x92, 0xc9, 0xfc, 0xde, 0xc3, 0x1a, 0x23, 0xbb,
313 0xdd, 0xc8, 0x51, 0x0c, 0x3a, 0x72, 0xfa, 0x73,
314 0x6f, 0xb7, 0xee, 0x61, 0x39, 0x03, 0x01, 0x3f,
315 0x7f, 0x94, 0x2e, 0x2e, 0xba, 0x3a, 0xbb, 0xb4,
316 0xfa, 0x6a, 0x17, 0xfe, 0xea, 0xef, 0x5e, 0x66,
317 0x97, 0x3f, 0x32, 0x3d, 0xd7, 0x3e, 0xb1, 0xf1,
318 0x6c, 0x14, 0x4c, 0xfd, 0x37, 0xd3, 0x38, 0x80,
319 0xfb, 0xde, 0xa6, 0x24, 0x1e, 0xc8, 0xca, 0x7f,
320 0x3a, 0x93, 0xd8, 0x8b, 0x18, 0x13, 0xb2, 0xe5,
321 0xe4, 0x93, 0x05, 0x53, 0x4f, 0x84, 0x66, 0xa7,
322 0x58, 0x5c, 0x7b, 0x86, 0x52, 0x6d, 0x0d, 0xce,
323 0xa4, 0x30, 0x7d, 0xb6, 0x18, 0x9f, 0xeb, 0xff,
324 0x22, 0xbb, 0x72, 0x29, 0xb9, 0x44, 0x0b, 0x48,
325 0x1e, 0x84, 0x71, 0x81, 0xe3, 0x6d, 0x73, 0x26,
326 0x92, 0xb4, 0x4d, 0x2a, 0x29, 0xb8, 0x1f, 0x72,
327 0xed, 0xd0, 0xe1, 0x64, 0x77, 0xea, 0x8e, 0x88,
328 0x0f, 0xef, 0x3f, 0xb1, 0x3b, 0xad, 0xf9, 0xc9,
329 0x8b, 0xd0, 0xac, 0xc6, 0xcc, 0xa9, 0x40, 0xcc,
330 0x76, 0xf6, 0x3b, 0x53, 0xb5, 0x88, 0xcb, 0xc8,
331 0x37, 0xf1, 0xa2, 0xba, 0x23, 0x15, 0x99, 0x09,
332 0xcc, 0xe7, 0x7a, 0x3b, 0x37, 0xf7, 0x58, 0xc8,
333 0x46, 0x8c, 0x2b, 0x2f, 0x4e, 0x0e, 0xa6, 0x5c,
334 0xea, 0x85, 0x55, 0xba, 0x02, 0x0e, 0x0e, 0x48,
335 0xbc, 0xe1, 0xb1, 0x01, 0x35, 0x79, 0x13, 0x3d,
336 0x1b, 0xc0, 0x53, 0x68, 0x11, 0xe7, 0x95, 0x0f,
337 0x9d, 0x3f, 0x4c, 0x47, 0x7b, 0x4d, 0x1c, 0xae,
338 0x50, 0x9b, 0xcb, 0xdd, 0x05, 0x8d, 0x9a, 0x97,
339 0xfd, 0x8c, 0xef, 0x0c, 0x1d, 0x67, 0x73, 0xa8,
340 0x28, 0x36, 0xd5, 0xb6, 0x92, 0x33, 0x40, 0x75,
341 0x0b, 0x51, 0xc3, 0x64, 0xba, 0x1d, 0xc2, 0xcc,
342 0xee, 0x7d, 0x54, 0x0f, 0x27, 0x69, 0xa7, 0x27,
343 0x63, 0x30, 0x29, 0xd9, 0xc8, 0x84, 0xd8, 0xdf,
344 0x9f, 0x68, 0x8d, 0x04, 0xca, 0xa6, 0xc5, 0xc7,
345 0x7a, 0x5c, 0xc8, 0xd1, 0xcb, 0x4a, 0xec, 0xd0,
346 0xd8, 0x20, 0x69, 0xc5, 0x17, 0xcd, 0x78, 0xc8,
347 0x75, 0x23, 0x30, 0x69, 0xc9, 0xd4, 0xea, 0x5c,
348 0x4f, 0x6b, 0x86, 0x3f, 0x8b, 0xfe, 0xee, 0x44,
349 0xc9, 0x7c, 0xb7, 0xdd, 0x3e, 0xe5, 0xec, 0x54,
350 0x03, 0x3e, 0xaa, 0x82, 0xc6, 0xdf, 0xb2, 0x38,
351 0x0e, 0x5d, 0xb3, 0x88, 0xd9, 0xd3, 0x69, 0x5f,
352 0x8f, 0x70, 0x8a, 0x7e, 0x11, 0xd9, 0x1e, 0x7b,
353 0x38, 0xf1, 0x42, 0x1a, 0xc0, 0x35, 0xf5, 0xc7,
354 0x36, 0x85, 0xf5, 0xf7, 0xb8, 0x7e, 0xc7, 0xef,
355 0x18, 0xf1, 0x63, 0xd6, 0x7a, 0xc6, 0xc9, 0x0e,
356 0x4d, 0x69, 0x4f, 0x84, 0xef, 0x26, 0x41, 0x0c,
357 0xec, 0xc7, 0xe0, 0x7e, 0x3c, 0x67, 0x01, 0x4c,
358 0x62, 0x1a, 0x20, 0x6f, 0xee, 0x47, 0x4d, 0xc0,
359 0x99, 0x13, 0x8d, 0x91, 0x4a, 0x26, 0xd4, 0x37,
360 0x28, 0x90, 0x58, 0x75, 0x66, 0x2b, 0x0a, 0xdf,
361 0xda, 0xee, 0x92, 0x25, 0x90, 0x62, 0x39, 0x9e,
362 0x44, 0x98, 0xad, 0xc1, 0x88, 0xed, 0xe4, 0xb4,
363 0xaf, 0xf5, 0x8c, 0x9b, 0x48, 0x4d, 0x56, 0x60,
364 0x97, 0x0f, 0x61, 0x59, 0x9e, 0xa6, 0x27, 0xfe,
365 0xc1, 0x91, 0x15, 0x38, 0xb8, 0x0f, 0xae, 0x61,
366 0x7d, 0x26, 0x13, 0x5a, 0x73, 0xff, 0x1c, 0xa3,
367 0x61, 0x04, 0x58, 0x48, 0x55, 0x44, 0x11, 0xfe,
368 0x15, 0xca, 0xc3, 0xbd, 0xca, 0xc5, 0xb4, 0x40,
369 0x5d, 0x1b, 0x7f, 0x39, 0xb5, 0x9c, 0x35, 0xec,
370 0x61, 0x15, 0x32, 0x32, 0xb8, 0x4e, 0x40, 0x9f,
371 0x17, 0x1f, 0x0a, 0x4d, 0xa9, 0x91, 0xef, 0xb7,
372 0xb0, 0xeb, 0xc2, 0x83, 0x9a, 0x6c, 0xd2, 0x79,
373 0x43, 0x78, 0x5e, 0x2f, 0xe5, 0xdd, 0x1a, 0x3c,
374 0x45, 0xab, 0x29, 0x40, 0x3a, 0x37, 0x5b, 0x6f,
375 0xd7, 0xfc, 0x48, 0x64, 0x3c, 0x49, 0xfb, 0x21,
376 0xbe, 0xc3, 0xff, 0x07, 0xfb, 0x17, 0xe9, 0xc9,
377 0x0c, 0x4c, 0x5c, 0x15, 0x9e, 0x8e, 0x22, 0x30,
378 0x0a, 0xde, 0x48, 0x7f, 0xdb, 0x0d, 0xd1, 0x2b,
379 0x87, 0x38, 0x9e, 0xcc, 0x5a, 0x01, 0x16, 0xee,
380 0x75, 0x49, 0x0d, 0x30, 0x01, 0x34, 0x6a, 0xb6,
381 0x9a, 0x5a, 0x2a, 0xec, 0xbb, 0x48, 0xac, 0xd3,
382 0x77, 0x83, 0xd8, 0x08, 0x86, 0x4f, 0x48, 0x09,
383 0x29, 0x41, 0x79, 0xa1, 0x03, 0x12, 0xc4, 0xcd,
384 0x90, 0x55, 0x47, 0x66, 0x74, 0x9a, 0xcc, 0x4f,
385 0x35, 0x8c, 0xd6, 0x98, 0xef, 0xeb, 0x45, 0xb9,
386 0x9a, 0x26, 0x2f, 0x39, 0xa5, 0x70, 0x6d, 0xfc,
387 0xb4, 0x51, 0xee, 0xf4, 0x9c, 0xe7, 0x38, 0x59,
388 0xad, 0xf4, 0xbc, 0x46, 0xff, 0x46, 0x8e, 0x60,
389 0x9c, 0xa3, 0x60, 0x1d, 0xf8, 0x26, 0x72, 0xf5,
390 0x72, 0x9d, 0x68, 0x80, 0x04, 0xf6, 0x0b, 0xa1,
391 0x0a, 0xd5, 0xa7, 0x82, 0x3a, 0x3e, 0x47, 0xa8,
392 0x5a, 0xde, 0x59, 0x4f, 0x7b, 0x07, 0xb3, 0xe9,
393 0x24, 0x19, 0x3d, 0x34, 0x05, 0xec, 0xf1, 0xab,
394 0x6e, 0x64, 0x8f, 0xd3, 0xe6, 0x41, 0x86, 0x80,
395 0x70, 0xe3, 0x8d, 0x60, 0x9c, 0x34, 0x25, 0x01,
396 0x07, 0x4d, 0x19, 0x41, 0x4e, 0x3d, 0x5c, 0x7e,
397 0xa8, 0xf5, 0xcc, 0xd5, 0x7b, 0xe2, 0x7d, 0x3d,
398 0x49, 0x86, 0x7d, 0x07, 0xb7, 0x10, 0xe3, 0x35,
399 0xb8, 0x84, 0x6d, 0x76, 0xab, 0x17, 0xc6, 0x38,
400 0xb4, 0xd3, 0x28, 0x57, 0xad, 0xd3, 0x88, 0x5a,
401 0xda, 0xea, 0xc8, 0x94, 0xcc, 0x37, 0x19, 0xac,
402 0x9c, 0x9f, 0x4b, 0x00, 0x15, 0xc0, 0xc8, 0xca,
403 0x1f, 0x15, 0xaa, 0xe0, 0xdb, 0xf9, 0x2f, 0x57,
404 0x1b, 0x24, 0xc7, 0x6f, 0x76, 0x29, 0xfb, 0xed,
405 0x25, 0x0d, 0xc0, 0xfe, 0xbd, 0x5a, 0xbf, 0x20,
406 0x08, 0x51, 0x05, 0xec, 0x71, 0xa3, 0xbf, 0xef,
407 0x5e, 0x99, 0x75, 0xdb, 0x3c, 0x5f, 0x9a, 0x8c,
408 0xbb, 0x19, 0x5c, 0x0e, 0x93, 0x19, 0xf8, 0x6a,
409 0xbc, 0xf2, 0x12, 0x54, 0x2f, 0xcb, 0x28, 0x64,
410 0x88, 0xb3, 0x92, 0x0d, 0x96, 0xd1, 0xa6, 0xe4,
411 0x1f, 0xf1, 0x4d, 0xa4, 0xab, 0x1c, 0xee, 0x54,
412 0xf2, 0xad, 0x29, 0x6d, 0x32, 0x37, 0xb2, 0x16,
413 0x77, 0x5c, 0xdc, 0x2e, 0x54, 0xec, 0x75, 0x26,
414 0xc6, 0x36, 0xd9, 0x17, 0x2c, 0xf1, 0x7a, 0xdc,
415 0x4b, 0xf1, 0xe2, 0xd9, 0x95, 0xba, 0xac, 0x87,
416 0xc1, 0xf3, 0x8e, 0x58, 0x08, 0xd8, 0x87, 0x60,
417 0xc9, 0xee, 0x6a, 0xde, 0xa4, 0xd2, 0xfc, 0x0d,
418 0xe5, 0x36, 0xc4, 0x5c, 0x52, 0xb3, 0x07, 0x54,
419 0x65, 0x24, 0xc1, 0xb1, 0xd1, 0xb1, 0x53, 0x13,
420 0x31, 0x79, 0x7f, 0x05, 0x76, 0xeb, 0x37, 0x59,
421 0x15, 0x2b, 0xd1, 0x3f, 0xac, 0x08, 0x97, 0xeb,
422 0x91, 0x98, 0xdf, 0x6c, 0x09, 0x0d, 0x04, 0x9f,
423 0xdc, 0x3b, 0x0e, 0x60, 0x68, 0x47, 0x23, 0x15,
424 0x16, 0xc6, 0x0b, 0x35, 0xf8, 0x77, 0xa2, 0x78,
425 0x50, 0xd4, 0x64, 0x22, 0x33, 0xff, 0xfb, 0x93,
426 0x71, 0x46, 0x50, 0x39, 0x1b, 0x9c, 0xea, 0x4e,
427 0x8d, 0x0c, 0x37, 0xe5, 0x5c, 0x51, 0x3a, 0x31,
428 0xb2, 0x85, 0x84, 0x3f, 0x41, 0xee, 0xa2, 0xc1,
429 0xc6, 0x13, 0x3b, 0x54, 0x28, 0xd2, 0x18, 0x37,
430 0xcc, 0x46, 0x9f, 0x6a, 0x91, 0x3d, 0x5a, 0x15,
431 0x3c, 0x89, 0xa3, 0x61, 0x06, 0x7d, 0x2e, 0x78,
432 0xbe, 0x7d, 0x40, 0xba, 0x2f, 0x95, 0xb1, 0x2f,
433 0x87, 0x3b, 0x8a, 0xbe, 0x6a, 0xf4, 0xc2, 0x31,
434 0x74, 0xee, 0x91, 0xe0, 0x23, 0xaa, 0x5d, 0x7f,
435 0xdd, 0xf0, 0x44, 0x8c, 0x0b, 0x59, 0x2b, 0xfc,
436 0x48, 0x3a, 0xdf, 0x07, 0x05, 0x38, 0x6c, 0xc9,
437 0xeb, 0x18, 0x24, 0x68, 0x8d, 0x58, 0x98, 0xd3,
438 0x31, 0xa3, 0xe4, 0x70, 0x59, 0xb1, 0x21, 0xbe,
439 0x7e, 0x65, 0x7d, 0xb8, 0x04, 0xab, 0xf6, 0xe4,
440 0xd7, 0xda, 0xec, 0x09, 0x8f, 0xda, 0x6d, 0x24,
441 0x07, 0xcc, 0x29, 0x17, 0x05, 0x78, 0x1a, 0xc1,
442 0xb1, 0xce, 0xfc, 0xaa, 0x2d, 0xe7, 0xcc, 0x85,
443 0x84, 0x84, 0x03, 0x2a, 0x0c, 0x3f, 0xa9, 0xf8,
444 0xfd, 0x84, 0x53, 0x59, 0x5c, 0xf0, 0xd4, 0x09,
445 0xf0, 0xd2, 0x6c, 0x32, 0x03, 0xb0, 0xa0, 0x8c,
446 0x52, 0xeb, 0x23, 0x91, 0x88, 0x43, 0x13, 0x46,
447 0xf6, 0x1e, 0xb4, 0x1b, 0xf5, 0x8e, 0x3a, 0xb5,
448 0x3d, 0x00, 0xf6, 0xe5, 0x08, 0x3d, 0x5f, 0x39,
449 0xd3, 0x21, 0x69, 0xbc, 0x03, 0x22, 0x3a, 0xd2,
450 0x5c, 0x84, 0xf8, 0x15, 0xc4, 0x80, 0x0b, 0xbc,
451 0x29, 0x3c, 0xf3, 0x95, 0x98, 0xcd, 0x8f, 0x35,
452 0xbc, 0xa5, 0x3e, 0xfc, 0xd4, 0x13, 0x9e, 0xde,
453 0x4f, 0xce, 0x71, 0x9d, 0x09, 0xad, 0xf2, 0x80,
454 0x6b, 0x65, 0x7f, 0x03, 0x00, 0x14, 0x7c, 0x15,
455 0x85, 0x40, 0x6d, 0x70, 0xea, 0xdc, 0xb3, 0x63,
456 0x35, 0x4f, 0x4d, 0xe0, 0xd9, 0xd5, 0x3c, 0x58,
457 0x56, 0x23, 0x80, 0xe2, 0x36, 0xdd, 0x75, 0x1d,
458 0x94, 0x11, 0x41, 0x8e, 0xe0, 0x81, 0x8e, 0xcf,
459 0xe0, 0xe5, 0xf6, 0xde, 0xd1, 0xe7, 0x04, 0x12,
460 0x79, 0x92, 0x2b, 0x71, 0x2a, 0x79, 0x8b, 0x7c,
461 0x44, 0x79, 0x16, 0x30, 0x4e, 0xf4, 0xf6, 0x9b,
462 0xb7, 0x40, 0xa3, 0x5a, 0xa7, 0x69, 0x3e, 0xc1,
463 0x3a, 0x04, 0xd0, 0x88, 0xa0, 0x3b, 0xdd, 0xc6,
464 0x9e, 0x7e, 0x1e, 0x1e, 0x8f, 0x44, 0xf7, 0x73,
465 0x67, 0x1e, 0x1a, 0x78, 0xfa, 0x62, 0xf4, 0xa9,
466 0xa8, 0xc6, 0x5b, 0xb8, 0xfa, 0x06, 0x7d, 0x5e,
467 0x38, 0x1c, 0x9a, 0x39, 0xe9, 0x39, 0x98, 0x22,
468 0x0b, 0xa7, 0xac, 0x0b, 0xf3, 0xbc, 0xf1, 0xeb,
469 0x8c, 0x81, 0xe3, 0x48, 0x8a, 0xed, 0x42, 0xc2,
470 0x38, 0xcf, 0x3e, 0xda, 0xd2, 0x89, 0x8d, 0x9c,
471 0x53, 0xb5, 0x2f, 0x41, 0x01, 0x26, 0x84, 0x9c,
472 0xa3, 0x56, 0xf6, 0x49, 0xc7, 0xd4, 0x9f, 0x93,
473 0x1b, 0x96, 0x49, 0x5e, 0xad, 0xb3, 0x84, 0x1f,
474 0x3c, 0xa4, 0xe0, 0x9b, 0xd1, 0x90, 0xbc, 0x38,
475 0x6c, 0xdd, 0x95, 0x4d, 0x9d, 0xb1, 0x71, 0x57,
476 0x2d, 0x34, 0xe8, 0xb8, 0x42, 0xc7, 0x99, 0x03,
477 0xc7, 0x07, 0x30, 0x65, 0x91, 0x55, 0xd5, 0x90,
478 0x70, 0x97, 0x37, 0x68, 0xd4, 0x11, 0xf9, 0xe8,
479 0xce, 0xec, 0xdc, 0x34, 0xd5, 0xd3, 0xb7, 0xc4,
480 0xb8, 0x97, 0x05, 0x92, 0xad, 0xf8, 0xe2, 0x36,
481 0x64, 0x41, 0xc9, 0xc5, 0x41, 0x77, 0x52, 0xd7,
482 0x2c, 0xa5, 0x24, 0x2f, 0xd9, 0x34, 0x0b, 0x47,
483 0x35, 0xa7, 0x28, 0x8b, 0xc5, 0xcd, 0xe9, 0x46,
484 0xac, 0x39, 0x94, 0x3c, 0x10, 0xc6, 0x29, 0x73,
485 0x0e, 0x0e, 0x5d, 0xe0, 0x71, 0x03, 0x8a, 0x72,
486 0x0e, 0x26, 0xb0, 0x7d, 0x84, 0xed, 0x95, 0x23,
487 0x49, 0x5a, 0x45, 0x83, 0x45, 0x60, 0x11, 0x4a,
488 0x46, 0x31, 0xd4, 0xd8, 0x16, 0x54, 0x98, 0x58,
489 0xed, 0x6d, 0xcc, 0x5d, 0xd6, 0x50, 0x61, 0x9f,
490 0x9d, 0xc5, 0x3e, 0x9d, 0x32, 0x47, 0xde, 0x96,
491 0xe1, 0x5d, 0xd8, 0xf8, 0xb4, 0x69, 0x6f, 0xb9,
492 0x15, 0x90, 0x57, 0x7a, 0xf6, 0xad, 0xb0, 0x5b,
493 0xf5, 0xa6, 0x36, 0x94, 0xfd, 0x84, 0xce, 0x1c,
494 0x0f, 0x4b, 0xd0, 0xc2, 0x5b, 0x6b, 0x56, 0xef,
495 0x73, 0x93, 0x0b, 0xc3, 0xee, 0xd9, 0xcf, 0xd3,
496 0xa4, 0x22, 0x58, 0xcd, 0x50, 0x6e, 0x65, 0xf4,
497 0xe9, 0xb7, 0x71, 0xaf, 0x4b, 0xb3, 0xb6, 0x2f,
498 0x0f, 0x0e, 0x3b, 0xc9, 0x85, 0x14, 0xf5, 0x17,
499 0xe8, 0x7a, 0x3a, 0xbf, 0x5f, 0x5e, 0xf8, 0x18,
500 0x48, 0xa6, 0x72, 0xab, 0x06, 0x95, 0xe9, 0xc8,
501 0xa7, 0xf4, 0x32, 0x44, 0x04, 0x0c, 0x84, 0x98,
502 0x73, 0xe3, 0x89, 0x8d, 0x5f, 0x7e, 0x4a, 0x42,
503 0x8f, 0xc5, 0x28, 0xb1, 0x82, 0xef, 0x1c, 0x97,
504 0x31, 0x3b, 0x4d, 0xe0, 0x0e, 0x10, 0x10, 0x97,
505 0x93, 0x49, 0x78, 0x2f, 0x0d, 0x86, 0x8b, 0xa1,
506 0x53, 0xa9, 0x81, 0x20, 0x79, 0xe7, 0x07, 0x77,
507 0xb6, 0xac, 0x5e, 0xd2, 0x05, 0xcd, 0xe9, 0xdb,
508 0x8a, 0x94, 0x82, 0x8a, 0x23, 0xb9, 0x3d, 0x1c,
509 0xa9, 0x7d, 0x72, 0x4a, 0xed, 0x33, 0xa3, 0xdb,
510 0x21, 0xa7, 0x86, 0x33, 0x45, 0xa5, 0xaa, 0x56,
511 0x45, 0xb5, 0x83, 0x29, 0x40, 0x47, 0x79, 0x04,
512 0x6e, 0xb9, 0x95, 0xd0, 0x81, 0x77, 0x2d, 0x48,
513 0x1e, 0xfe, 0xc3, 0xc2, 0x1e, 0xe5, 0xf2, 0xbe,
514 0xfd, 0x3b, 0x94, 0x9f, 0xc4, 0xc4, 0x26, 0x9d,
515 0xe4, 0x66, 0x1e, 0x19, 0xee, 0x6c, 0x79, 0x97,
516 0x11, 0x31, 0x4b, 0x0d, 0x01, 0xcb, 0xde, 0xa8,
517 0xf6, 0x6d, 0x7c, 0x39, 0x46, 0x4e, 0x7e, 0x3f,
518 0x94, 0x17, 0xdf, 0xa1, 0x7d, 0xd9, 0x1c, 0x8e,
519 0xbc, 0x7d, 0x33, 0x7d, 0xe3, 0x12, 0x40, 0xca,
520 0xab, 0x37, 0x11, 0x46, 0xd4, 0xae, 0xef, 0x44,
521 0xa2, 0xb3, 0x6a, 0x66, 0x0e, 0x0c, 0x90, 0x7f,
522 0xdf, 0x5c, 0x66, 0x5f, 0xf2, 0x94, 0x9f, 0xa6,
523 0x73, 0x4f, 0xeb, 0x0d, 0xad, 0xbf, 0xc0, 0x63,
524 0x5c, 0xdc, 0x46, 0x51, 0xe8, 0x8e, 0x90, 0x19,
525 0xa8, 0xa4, 0x3c, 0x91, 0x79, 0xfa, 0x7e, 0x58,
526 0x85, 0x13, 0x55, 0xc5, 0x19, 0x82, 0x37, 0x1b,
527 0x0a, 0x02, 0x1f, 0x99, 0x6b, 0x18, 0xf1, 0x28,
528 0x08, 0xa2, 0x73, 0xb8, 0x0f, 0x2e, 0xcd, 0xbf,
529 0xf3, 0x86, 0x7f, 0xea, 0xef, 0xd0, 0xbb, 0xa6,
530 0x21, 0xdf, 0x49, 0x73, 0x51, 0xcc, 0x36, 0xd3,
531 0x3e, 0xa0, 0xf8, 0x44, 0xdf, 0xd3, 0xa6, 0xbe,
532 0x8a, 0xd4, 0x57, 0xdd, 0x72, 0x94, 0x61, 0x0f,
533 0x82, 0xd1, 0x07, 0xb8, 0x7c, 0x18, 0x83, 0xdf,
534 0x3a, 0xe5, 0x50, 0x6a, 0x82, 0x20, 0xac, 0xa9,
535 0xa8, 0xff, 0xd9, 0xf3, 0x77, 0x33, 0x5a, 0x9e,
536 0x7f, 0x6d, 0xfe, 0x5d, 0x33, 0x41, 0x42, 0xe7,
537 0x6c, 0x19, 0xe0, 0x44, 0x8a, 0x15, 0xf6, 0x70,
538 0x98, 0xb7, 0x68, 0x4d, 0xfa, 0x97, 0x39, 0xb0,
539 0x8e, 0xe8, 0x84, 0x8b, 0x75, 0x30, 0xb7, 0x7d,
540 0x92, 0x69, 0x20, 0x9c, 0x81, 0xfb, 0x4b, 0xf4,
541 0x01, 0x50, 0xeb, 0xce, 0x0c, 0x1c, 0x6c, 0xb5,
542 0x4a, 0xd7, 0x27, 0x0c, 0xce, 0xbb, 0xe5, 0x85,
543 0xf0, 0xb6, 0xee, 0xd5, 0x70, 0xdd, 0x3b, 0xfc,
544 0xd4, 0x99, 0xf1, 0x33, 0xdd, 0x8b, 0xc4, 0x2f,
545 0xae, 0xab, 0x74, 0x96, 0x32, 0xc7, 0x4c, 0x56,
546 0x3c, 0x89, 0x0f, 0x96, 0x0b, 0x42, 0xc0, 0xcb,
547 0xee, 0x0f, 0x0b, 0x8c, 0xfb, 0x7e, 0x47, 0x7b,
548 0x64, 0x48, 0xfd, 0xb2, 0x00, 0x80, 0x89, 0xa5,
549 0x13, 0x55, 0x62, 0xfc, 0x8f, 0xe2, 0x42, 0x03,
550 0xb7, 0x4e, 0x2a, 0x79, 0xb4, 0x82, 0xea, 0x23,
551 0x49, 0xda, 0xaf, 0x52, 0x63, 0x1e, 0x60, 0x03,
552 0x89, 0x06, 0x44, 0x46, 0x08, 0xc3, 0xc4, 0x87,
553 0x70, 0x2e, 0xda, 0x94, 0xad, 0x6b, 0xe0, 0xe4,
554 0xd1, 0x8a, 0x06, 0xc2, 0xa8, 0xc0, 0xa7, 0x43,
555 0x3c, 0x47, 0x52, 0x0e, 0xc3, 0x77, 0x81, 0x11,
556 0x67, 0x0e, 0xa0, 0x70, 0x04, 0x47, 0x29, 0x40,
557 0x86, 0x0d, 0x34, 0x56, 0xa7, 0xc9, 0x35, 0x59,
558 0x68, 0xdc, 0x93, 0x81, 0x70, 0xee, 0x86, 0xd9,
559 0x80, 0x06, 0x40, 0x4f, 0x1a, 0x0d, 0x40, 0x30,
560 0x0b, 0xcb, 0x96, 0x47, 0xc1, 0xb7, 0x52, 0xfd,
561 0x56, 0xe0, 0x72, 0x4b, 0xfb, 0xbd, 0x92, 0x45,
562 0x61, 0x71, 0xc2, 0x33, 0x11, 0xbf, 0x52, 0x83,
563 0x79, 0x26, 0xe0, 0x49, 0x6b, 0xb7, 0x05, 0x8b,
564 0xe8, 0x0e, 0x87, 0x31, 0xd7, 0x9d, 0x8a, 0xf5,
565 0xc0, 0x5f, 0x2e, 0x58, 0x4a, 0xdb, 0x11, 0xb3,
566 0x6c, 0x30, 0x2a, 0x46, 0x19, 0xe3, 0x27, 0x84,
567 0x1f, 0x63, 0x6e, 0xf6, 0x57, 0xc7, 0xc9, 0xd8,
568 0x5e, 0xba, 0xb3, 0x87, 0xd5, 0x83, 0x26, 0x34,
569 0x21, 0x9e, 0x65, 0xde, 0x42, 0xd3, 0xbe, 0x7b,
570 0xbc, 0x91, 0x71, 0x44, 0x4d, 0x99, 0x3b, 0x31,
571 0xe5, 0x3f, 0x11, 0x4e, 0x7f, 0x13, 0x51, 0x3b,
572 0xae, 0x79, 0xc9, 0xd3, 0x81, 0x8e, 0x25, 0x40,
573 0x10, 0xfc, 0x07, 0x1e, 0xf9, 0x7b, 0x9a, 0x4b,
574 0x6c, 0xe3, 0xb3, 0xad, 0x1a, 0x0a, 0xdd, 0x9e,
575 0x59, 0x0c, 0xa2, 0xcd, 0xae, 0x48, 0x4a, 0x38,
576 0x5b, 0x47, 0x41, 0x94, 0x65, 0x6b, 0xbb, 0xeb,
577 0x5b, 0xe3, 0xaf, 0x07, 0x5b, 0xd4, 0x4a, 0xa2,
578 0xc9, 0x5d, 0x2f, 0x64, 0x03, 0xd7, 0x3a, 0x2c,
579 0x6e, 0xce, 0x76, 0x95, 0xb4, 0xb3, 0xc0, 0xf1,
580 0xe2, 0x45, 0x73, 0x7a, 0x5c, 0xab, 0xc1, 0xfc,
581 0x02, 0x8d, 0x81, 0x29, 0xb3, 0xac, 0x07, 0xec,
582 0x40, 0x7d, 0x45, 0xd9, 0x7a, 0x59, 0xee, 0x34,
583 0xf0, 0xe9, 0xd5, 0x7b, 0x96, 0xb1, 0x3d, 0x95,
584 0xcc, 0x86, 0xb5, 0xb6, 0x04, 0x2d, 0xb5, 0x92,
585 0x7e, 0x76, 0xf4, 0x06, 0xa9, 0xa3, 0x12, 0x0f,
586 0xb1, 0xaf, 0x26, 0xba, 0x7c, 0xfc, 0x7e, 0x1c,
587 0xbc, 0x2c, 0x49, 0x97, 0x53, 0x60, 0x13, 0x0b,
588 0xa6, 0x61, 0x83, 0x89, 0x42, 0xd4, 0x17, 0x0c,
589 0x6c, 0x26, 0x52, 0xc3, 0xb3, 0xd4, 0x67, 0xf5,
590 0xe3, 0x04, 0xb7, 0xf4, 0xcb, 0x80, 0xb8, 0xcb,
591 0x77, 0x56, 0x3e, 0xaa, 0x57, 0x54, 0xee, 0xb4,
592 0x2c, 0x67, 0xcf, 0xf2, 0xdc, 0xbe, 0x55, 0xf9,
593 0x43, 0x1f, 0x6e, 0x22, 0x97, 0x67, 0x7f, 0xc4,
594 0xef, 0xb1, 0x26, 0x31, 0x1e, 0x27, 0xdf, 0x41,
595 0x80, 0x47, 0x6c, 0xe2, 0xfa, 0xa9, 0x8c, 0x2a,
596 0xf6, 0xf2, 0xab, 0xf0, 0x15, 0xda, 0x6c, 0xc8,
597 0xfe, 0xb5, 0x23, 0xde, 0xa9, 0x05, 0x3f, 0x06,
598 0x54, 0x4c, 0xcd, 0xe1, 0xab, 0xfc, 0x0e, 0x62,
599 0x33, 0x31, 0x73, 0x2c, 0x76, 0xcb, 0xb4, 0x47,
600 0x1e, 0x20, 0xad, 0xd8, 0xf2, 0x31, 0xdd, 0xc4,
601 0x8b, 0x0c, 0x77, 0xbe, 0xe1, 0x8b, 0x26, 0x00,
602 0x02, 0x58, 0xd6, 0x8d, 0xef, 0xad, 0x74, 0x67,
603 0xab, 0x3f, 0xef, 0xcb, 0x6f, 0xb0, 0xcc, 0x81,
604 0x44, 0x4c, 0xaf, 0xe9, 0x49, 0x4f, 0xdb, 0xa0,
605 0x25, 0xa4, 0xf0, 0x89, 0xf1, 0xbe, 0xd8, 0x10,
606 0xff, 0xb1, 0x3b, 0x4b, 0xfa, 0x98, 0xf5, 0x79,
607 0x6d, 0x1e, 0x69, 0x4d, 0x57, 0xb1, 0xc8, 0x19,
608 0x1b, 0xbd, 0x1e, 0x8c, 0x84, 0xb7, 0x7b, 0xe8,
609 0xd2, 0x2d, 0x09, 0x41, 0x41, 0x37, 0x3d, 0xb1,
610 0x6f, 0x26, 0x5d, 0x71, 0x16, 0x3d, 0xb7, 0x83,
611 0x27, 0x2c, 0xa7, 0xb6, 0x50, 0xbd, 0x91, 0x86,
612 0xab, 0x24, 0xa1, 0x38, 0xfd, 0xea, 0x71, 0x55,
613 0x7e, 0x9a, 0x07, 0x77, 0x4b, 0xfa, 0x61, 0x66,
614 0x20, 0x1e, 0x28, 0x95, 0x18, 0x1b, 0xa4, 0xa0,
615 0xfd, 0xc0, 0x89, 0x72, 0x43, 0xd9, 0x3b, 0x49,
616 0x5a, 0x3f, 0x9d, 0xbf, 0xdb, 0xb4, 0x46, 0xea,
617 0x42, 0x01, 0x77, 0x23, 0x68, 0x95, 0xb6, 0x24,
618 0xb3, 0xa8, 0x6c, 0x28, 0x3b, 0x11, 0x40, 0x7e,
619 0x18, 0x65, 0x6d, 0xd8, 0x24, 0x42, 0x7d, 0x88,
620 0xc0, 0x52, 0xd9, 0x05, 0xe4, 0x95, 0x90, 0x87,
621 0x8c, 0xf4, 0xd0, 0x6b, 0xb9, 0x83, 0x99, 0x34,
622 0x6d, 0xfe, 0x54, 0x40, 0x94, 0x52, 0x21, 0x4f,
623 0x14, 0x25, 0xc5, 0xd6, 0x5e, 0x95, 0xdc, 0x0a,
624 0x2b, 0x89, 0x20, 0x11, 0x84, 0x48, 0xd6, 0x3a,
625 0xcd, 0x5c, 0x24, 0xad, 0x62, 0xe3, 0xb1, 0x93,
626 0x25, 0x8d, 0xcd, 0x7e, 0xfc, 0x27, 0xa3, 0x37,
627 0xfd, 0x84, 0xfc, 0x1b, 0xb2, 0xf1, 0x27, 0x38,
628 0x5a, 0xb7, 0xfc, 0xf2, 0xfa, 0x95, 0x66, 0xd4,
629 0xfb, 0xba, 0xa7, 0xd7, 0xa3, 0x72, 0x69, 0x48,
630 0x48, 0x8c, 0xeb, 0x28, 0x89, 0xfe, 0x33, 0x65,
631 0x5a, 0x36, 0x01, 0x7e, 0x06, 0x79, 0x0a, 0x09,
632 0x3b, 0x74, 0x11, 0x9a, 0x6e, 0xbf, 0xd4, 0x9e,
633 0x58, 0x90, 0x49, 0x4f, 0x4d, 0x08, 0xd4, 0xe5,
634 0x4a, 0x09, 0x21, 0xef, 0x8b, 0xb8, 0x74, 0x3b,
635 0x91, 0xdd, 0x36, 0x85, 0x60, 0x2d, 0xfa, 0xd4,
636 0x45, 0x7b, 0x45, 0x53, 0xf5, 0x47, 0x87, 0x7e,
637 0xa6, 0x37, 0xc8, 0x78, 0x7a, 0x68, 0x9d, 0x8d,
638 0x65, 0x2c, 0x0e, 0x91, 0x5c, 0xa2, 0x60, 0xf0,
639 0x8e, 0x3f, 0xe9, 0x1a, 0xcd, 0xaa, 0xe7, 0xd5,
640 0x77, 0x18, 0xaf, 0xc9, 0xbc, 0x18, 0xea, 0x48,
641 0x1b, 0xfb, 0x22, 0x48, 0x70, 0x16, 0x29, 0x9e,
642 0x5b, 0xc1, 0x2c, 0x66, 0x23, 0xbc, 0xf0, 0x1f,
643 0xef, 0xaf, 0xe4, 0xd6, 0x04, 0x19, 0x82, 0x7a,
644 0x0b, 0xba, 0x4b, 0x46, 0xb1, 0x6a, 0x85, 0x5d,
645 0xb4, 0x73, 0xd6, 0x21, 0xa1, 0x71, 0x60, 0x14,
646 0xee, 0x0a, 0x77, 0xc4, 0x66, 0x2e, 0xf9, 0x69,
647 0x30, 0xaf, 0x41, 0x0b, 0xc8, 0x83, 0x3c, 0x53,
648 0x99, 0x19, 0x27, 0x46, 0xf7, 0x41, 0x6e, 0x56,
649 0xdc, 0x94, 0x28, 0x67, 0x4e, 0xb7, 0x25, 0x48,
650 0x8a, 0xc2, 0xe0, 0x60, 0x96, 0xcc, 0x18, 0xf4,
651 0x84, 0xdd, 0xa7, 0x5e, 0x3e, 0x05, 0x0b, 0x26,
652 0x26, 0xb2, 0x5c, 0x1f, 0x57, 0x1a, 0x04, 0x7e,
653 0x6a, 0xe3, 0x2f, 0xb4, 0x35, 0xb6, 0x38, 0x40,
654 0x40, 0xcd, 0x6f, 0x87, 0x2e, 0xef, 0xa3, 0xd7,
655 0xa9, 0xc2, 0xe8, 0x0d, 0x27, 0xdf, 0x44, 0x62,
656 0x99, 0xa0, 0xfc, 0xcf, 0x81, 0x78, 0xcb, 0xfe,
657 0xe5, 0xa0, 0x03, 0x4e, 0x6c, 0xd7, 0xf4, 0xaf,
658 0x7a, 0xbb, 0x61, 0x82, 0xfe, 0x71, 0x89, 0xb2,
659 0x22, 0x7c, 0x8e, 0x83, 0x04, 0xce, 0xf6, 0x5d,
660 0x84, 0x8f, 0x95, 0x6a, 0x7f, 0xad, 0xfd, 0x32,
661 0x9c, 0x5e, 0xe4, 0x9c, 0x89, 0x60, 0x54, 0xaa,
662 0x96, 0x72, 0xd2, 0xd7, 0x36, 0x85, 0xa9, 0x45,
663 0xd2, 0x2a, 0xa1, 0x81, 0x49, 0x6f, 0x7e, 0x04,
664 0xfa, 0xe2, 0xfe, 0x90, 0x26, 0x77, 0x5a, 0x33,
665 0xb8, 0x04, 0x9a, 0x7a, 0xe6, 0x4c, 0x4f, 0xad,
666 0x72, 0x96, 0x08, 0x28, 0x58, 0x13, 0xf8, 0xc4,
667 0x1c, 0xf0, 0xc3, 0x45, 0x95, 0x49, 0x20, 0x8c,
668 0x9f, 0x39, 0x70, 0xe1, 0x77, 0xfe, 0xd5, 0x4b,
669 0xaf, 0x86, 0xda, 0xef, 0x22, 0x06, 0x83, 0x36,
670 0x29, 0x12, 0x11, 0x40, 0xbc, 0x3b, 0x86, 0xaa,
671 0xaa, 0x65, 0x60, 0xc3, 0x80, 0xca, 0xed, 0xa9,
672 0xf3, 0xb0, 0x79, 0x96, 0xa2, 0x55, 0x27, 0x28,
673 0x55, 0x73, 0x26, 0xa5, 0x50, 0xea, 0x92, 0x4b,
674 0x3c, 0x5c, 0x82, 0x33, 0xf0, 0x01, 0x3f, 0x03,
675 0xc1, 0x08, 0x05, 0xbf, 0x98, 0xf4, 0x9b, 0x6d,
676 0xa5, 0xa8, 0xb4, 0x82, 0x0c, 0x06, 0xfa, 0xff,
677 0x2d, 0x08, 0xf3, 0x05, 0x4f, 0x57, 0x2a, 0x39,
678 0xd4, 0x83, 0x0d, 0x75, 0x51, 0xd8, 0x5b, 0x1b,
679 0xd3, 0x51, 0x5a, 0x32, 0x2a, 0x9b, 0x32, 0xb2,
680 0xf2, 0xa4, 0x96, 0x12, 0xf2, 0xae, 0x40, 0x34,
681 0x67, 0xa8, 0xf5, 0x44, 0xd5, 0x35, 0x53, 0xfe,
682 0xa3, 0x60, 0x96, 0x63, 0x0f, 0x1f, 0x6e, 0xb0,
683 0x5a, 0x42, 0xa6, 0xfc, 0x51, 0x0b, 0x60, 0x27,
684 0xbc, 0x06, 0x71, 0xed, 0x65, 0x5b, 0x23, 0x86,
685 0x4a, 0x07, 0x3b, 0x22, 0x07, 0x46, 0xe6, 0x90,
686 0x3e, 0xf3, 0x25, 0x50, 0x1b, 0x4c, 0x7f, 0x03,
687 0x08, 0xa8, 0x36, 0x6b, 0x87, 0xe5, 0xe3, 0xdb,
688 0x9a, 0x38, 0x83, 0xff, 0x9f, 0x1a, 0x9f, 0x57,
689 0xa4, 0x2a, 0xf6, 0x37, 0xbc, 0x1a, 0xff, 0xc9,
690 0x1e, 0x35, 0x0c, 0xc3, 0x7c, 0xa3, 0xb2, 0xe5,
691 0xd2, 0xc6, 0xb4, 0x57, 0x47, 0xe4, 0x32, 0x16,
692 0x6d, 0xa9, 0xae, 0x64, 0xe6, 0x2d, 0x8d, 0xc5,
693 0x8d, 0x50, 0x8e, 0xe8, 0x1a, 0x22, 0x34, 0x2a,
694 0xd9, 0xeb, 0x51, 0x90, 0x4a, 0xb1, 0x41, 0x7d,
695 0x64, 0xf9, 0xb9, 0x0d, 0xf6, 0x23, 0x33, 0xb0,
696 0x33, 0xf4, 0xf7, 0x3f, 0x27, 0x84, 0xc6, 0x0f,
697 0x54, 0xa5, 0xc0, 0x2e, 0xec, 0x0b, 0x3a, 0x48,
698 0x6e, 0x80, 0x35, 0x81, 0x43, 0x9b, 0x90, 0xb1,
699 0xd0, 0x2b, 0xea, 0x21, 0xdc, 0xda, 0x5b, 0x09,
700 0xf4, 0xcc, 0x10, 0xb4, 0xc7, 0xfe, 0x79, 0x51,
701 0xc3, 0xc5, 0xac, 0x88, 0x74, 0x84, 0x0b, 0x4b,
702 0xca, 0x79, 0x16, 0x29, 0xfb, 0x69, 0x54, 0xdf,
703 0x41, 0x7e, 0xe9, 0xc7, 0x8e, 0xea, 0xa5, 0xfe,
704 0xfc, 0x76, 0x0e, 0x90, 0xc4, 0x92, 0x38, 0xad,
705 0x7b, 0x48, 0xe6, 0x6e, 0xf7, 0x21, 0xfd, 0x4e,
706 0x93, 0x0a, 0x7b, 0x41, 0x83, 0x68, 0xfb, 0x57,
707 0x51, 0x76, 0x34, 0xa9, 0x6c, 0x00, 0xaa, 0x4f,
708 0x66, 0x65, 0x98, 0x4a, 0x4f, 0xa3, 0xa0, 0xef,
709 0x69, 0x3f, 0xe3, 0x1c, 0x92, 0x8c, 0xfd, 0xd8,
710 0xe8, 0xde, 0x7c, 0x7f, 0x3e, 0x84, 0x8e, 0x69,
711 0x3c, 0xf1, 0xf2, 0x05, 0x46, 0xdc, 0x2f, 0x9d,
712 0x5e, 0x6e, 0x4c, 0xfb, 0xb5, 0x99, 0x2a, 0x59,
713 0x63, 0xc1, 0x34, 0xbc, 0x57, 0xc0, 0x0d, 0xb9,
714 0x61, 0x25, 0xf3, 0x33, 0x23, 0x51, 0xb6, 0x0d,
715 0x07, 0xa6, 0xab, 0x94, 0x4a, 0xb7, 0x2a, 0xea,
716 0xee, 0xac, 0xa3, 0xc3, 0x04, 0x8b, 0x0e, 0x56,
717 0xfe, 0x44, 0xa7, 0x39, 0xe2, 0xed, 0xed, 0xb4,
718 0x22, 0x2b, 0xac, 0x12, 0x32, 0x28, 0x91, 0xd8,
719 0xa5, 0xab, 0xff, 0x5f, 0xe0, 0x4b, 0xda, 0x78,
720 0x17, 0xda, 0xf1, 0x01, 0x5b, 0xcd, 0xe2, 0x5f,
721 0x50, 0x45, 0x73, 0x2b, 0xe4, 0x76, 0x77, 0xf4,
722 0x64, 0x1d, 0x43, 0xfb, 0x84, 0x7a, 0xea, 0x91,
723 0xae, 0xf9, 0x9e, 0xb7, 0xb4, 0xb0, 0x91, 0x5f,
724 0x16, 0x35, 0x9a, 0x11, 0xb8, 0xc7, 0xc1, 0x8c,
725 0xc6, 0x10, 0x8d, 0x2f, 0x63, 0x4a, 0xa7, 0x57,
726 0x3a, 0x51, 0xd6, 0x32, 0x2d, 0x64, 0x72, 0xd4,
727 0x66, 0xdc, 0x10, 0xa6, 0x67, 0xd6, 0x04, 0x23,
728 0x9d, 0x0a, 0x11, 0x77, 0xdd, 0x37, 0x94, 0x17,
729 0x3c, 0xbf, 0x8b, 0x65, 0xb0, 0x2e, 0x5e, 0x66,
730 0x47, 0x64, 0xac, 0xdd, 0xf0, 0x84, 0xfd, 0x39,
731 0xfa, 0x15, 0x5d, 0xef, 0xae, 0xca, 0xc1, 0x36,
732 0xa7, 0x5c, 0xbf, 0xc7, 0x08, 0xc2, 0x66, 0x00,
733 0x74, 0x74, 0x4e, 0x27, 0x3f, 0x55, 0x8a, 0xb7,
734 0x38, 0x66, 0x83, 0x6d, 0xcf, 0x99, 0x9e, 0x60,
735 0x8f, 0xdd, 0x2e, 0x62, 0x22, 0x0e, 0xef, 0x0c,
736 0x98, 0xa7, 0x85, 0x74, 0x3b, 0x9d, 0xec, 0x9e,
737 0xa9, 0x19, 0x72, 0xa5, 0x7f, 0x2c, 0x39, 0xb7,
738 0x7d, 0xb7, 0xf1, 0x12, 0x65, 0x27, 0x4b, 0x5a,
739 0xde, 0x17, 0xfe, 0xad, 0x44, 0xf3, 0x20, 0x4d,
740 0xfd, 0xe4, 0x1f, 0xb5, 0x81, 0xb0, 0x36, 0x37,
741 0x08, 0x6f, 0xc3, 0x0c, 0xe9, 0x85, 0x98, 0x82,
742 0xa9, 0x62, 0x0c, 0xc4, 0x97, 0xc0, 0x50, 0xc8,
743 0xa7, 0x3c, 0x50, 0x9f, 0x43, 0xb9, 0xcd, 0x5e,
744 0x4d, 0xfa, 0x1c, 0x4b, 0x0b, 0xa9, 0x98, 0x85,
745 0x38, 0x92, 0xac, 0x8d, 0xe4, 0xad, 0x9b, 0x98,
746 0xab, 0xd9, 0x38, 0xac, 0x62, 0x52, 0xa3, 0x22,
747 0x63, 0x0f, 0xbf, 0x95, 0x48, 0xdf, 0x69, 0xe7,
748 0x8b, 0x33, 0xd5, 0xb2, 0xbd, 0x05, 0x49, 0x49,
749 0x9d, 0x57, 0x73, 0x19, 0x33, 0xae, 0xfa, 0x33,
750 0xf1, 0x19, 0xa8, 0x80, 0xce, 0x04, 0x9f, 0xbc,
751 0x1d, 0x65, 0x82, 0x1b, 0xe5, 0x3a, 0x51, 0xc8,
752 0x1c, 0x21, 0xe3, 0x5d, 0xf3, 0x7d, 0x9b, 0x2f,
753 0x2c, 0x1d, 0x4a, 0x7f, 0x9b, 0x68, 0x35, 0xa3,
754 0xb2, 0x50, 0xf7, 0x62, 0x79, 0xcd, 0xf4, 0x98,
755 0x4f, 0xe5, 0x63, 0x7c, 0x3e, 0x45, 0x31, 0x8c,
756 0x16, 0xa0, 0x12, 0xc8, 0x58, 0xce, 0x39, 0xa6,
757 0xbc, 0x54, 0xdb, 0xc5, 0xe0, 0xd5, 0xba, 0xbc,
758 0xb9, 0x04, 0xf4, 0x8d, 0xe8, 0x2f, 0x15, 0x9d,
759};
342 760
343#if 0 /*Not used at present */ 761/* 100 test cases */
344static void 762static struct crc_test {
345buf_dump(char const *prefix, unsigned char const *buf, size_t len) 763 u32 crc; /* random starting crc */
764 u32 start; /* random 6 bit offset in buf */
765 u32 length; /* random 11 bit length of test */
766 u32 crc_le; /* expected crc32_le result */
767 u32 crc_be; /* expected crc32_be result */
768 u32 crc32c_le; /* expected crc32c_le result */
769} test[] =
346{ 770{
347 fputs(prefix, stdout); 771 {0x674bf11d, 0x00000038, 0x00000542, 0x0af6d466, 0xd8b6e4c1,
348 while (len--) 772 0xf6e93d6c},
349 printf(" %02x", *buf++); 773 {0x35c672c6, 0x0000003a, 0x000001aa, 0xc6d3dfba, 0x28aaf3ad,
350 putchar('\n'); 774 0x0fe92aca},
775 {0x496da28e, 0x00000039, 0x000005af, 0xd933660f, 0x5d57e81f,
776 0x52e1ebb8},
777 {0x09a9b90e, 0x00000027, 0x000001f8, 0xb45fe007, 0xf45fca9a,
778 0x0798af9a},
779 {0xdc97e5a9, 0x00000025, 0x000003b6, 0xf81a3562, 0xe0126ba2,
780 0x18eb3152},
781 {0x47c58900, 0x0000000a, 0x000000b9, 0x8e58eccf, 0xf3afc793,
782 0xd00d08c7},
783 {0x292561e8, 0x0000000c, 0x00000403, 0xa2ba8aaf, 0x0b797aed,
784 0x8ba966bc},
785 {0x415037f6, 0x00000003, 0x00000676, 0xa17d52e8, 0x7f0fdf35,
786 0x11d694a2},
787 {0x3466e707, 0x00000026, 0x00000042, 0x258319be, 0x75c484a2,
788 0x6ab3208d},
789 {0xafd1281b, 0x00000023, 0x000002ee, 0x4428eaf8, 0x06c7ad10,
790 0xba4603c5},
791 {0xd3857b18, 0x00000028, 0x000004a2, 0x5c430821, 0xb062b7cb,
792 0xe6071c6f},
793 {0x1d825a8f, 0x0000002b, 0x0000050b, 0xd2c45f0c, 0xd68634e0,
794 0x179ec30a},
795 {0x5033e3bc, 0x0000000b, 0x00000078, 0xa3ea4113, 0xac6d31fb,
796 0x0903beb8},
797 {0x94f1fb5e, 0x0000000f, 0x000003a2, 0xfbfc50b1, 0x3cfe50ed,
798 0x6a7cb4fa},
799 {0xc9a0fe14, 0x00000009, 0x00000473, 0x5fb61894, 0x87070591,
800 0xdb535801},
801 {0x88a034b1, 0x0000001c, 0x000005ad, 0xc1b16053, 0x46f95c67,
802 0x92bed597},
803 {0xf0f72239, 0x00000020, 0x0000026d, 0xa6fa58f3, 0xf8c2c1dd,
804 0x192a3f1b},
805 {0xcc20a5e3, 0x0000003b, 0x0000067a, 0x7740185a, 0x308b979a,
806 0xccbaec1a},
807 {0xce589c95, 0x0000002b, 0x00000641, 0xd055e987, 0x40aae25b,
808 0x7eabae4d},
809 {0x78edc885, 0x00000035, 0x000005be, 0xa39cb14b, 0x035b0d1f,
810 0x28c72982},
811 {0x9d40a377, 0x0000003b, 0x00000038, 0x1f47ccd2, 0x197fbc9d,
812 0xc3cd4d18},
813 {0x703d0e01, 0x0000003c, 0x000006f1, 0x88735e7c, 0xfed57c5a,
814 0xbca8f0e7},
815 {0x776bf505, 0x0000000f, 0x000005b2, 0x5cc4fc01, 0xf32efb97,
816 0x713f60b3},
817 {0x4a3e7854, 0x00000027, 0x000004b8, 0x8d923c82, 0x0cbfb4a2,
818 0xebd08fd5},
819 {0x209172dd, 0x0000003b, 0x00000356, 0xb89e9c2b, 0xd7868138,
820 0x64406c59},
821 {0x3ba4cc5b, 0x0000002f, 0x00000203, 0xe51601a9, 0x5b2a1032,
822 0x7421890e},
823 {0xfc62f297, 0x00000000, 0x00000079, 0x71a8e1a2, 0x5d88685f,
824 0xe9347603},
825 {0x64280b8b, 0x00000016, 0x000007ab, 0x0fa7a30c, 0xda3a455f,
826 0x1bef9060},
827 {0x97dd724b, 0x00000033, 0x000007ad, 0x5788b2f4, 0xd7326d32,
828 0x34720072},
829 {0x61394b52, 0x00000035, 0x00000571, 0xc66525f1, 0xcabe7fef,
830 0x48310f59},
831 {0x29b4faff, 0x00000024, 0x0000006e, 0xca13751e, 0x993648e0,
832 0x783a4213},
833 {0x29bfb1dc, 0x0000000b, 0x00000244, 0x436c43f7, 0x429f7a59,
834 0x9e8efd41},
835 {0x86ae934b, 0x00000035, 0x00000104, 0x0760ec93, 0x9cf7d0f4,
836 0xfc3d34a5},
837 {0xc4c1024e, 0x0000002e, 0x000006b1, 0x6516a3ec, 0x19321f9c,
838 0x17a52ae2},
839 {0x3287a80a, 0x00000026, 0x00000496, 0x0b257eb1, 0x754ebd51,
840 0x886d935a},
841 {0xa4db423e, 0x00000023, 0x0000045d, 0x9b3a66dc, 0x873e9f11,
842 0xeaaeaeb2},
843 {0x7a1078df, 0x00000015, 0x0000014a, 0x8c2484c5, 0x6a628659,
844 0x8e900a4b},
845 {0x6048bd5b, 0x00000006, 0x0000006a, 0x897e3559, 0xac9961af,
846 0xd74662b1},
847 {0xd8f9ea20, 0x0000003d, 0x00000277, 0x60eb905b, 0xed2aaf99,
848 0xd26752ba},
849 {0xea5ec3b4, 0x0000002a, 0x000004fe, 0x869965dc, 0x6c1f833b,
850 0x8b1fcd62},
851 {0x2dfb005d, 0x00000016, 0x00000345, 0x6a3b117e, 0xf05e8521,
852 0xf54342fe},
853 {0x5a214ade, 0x00000020, 0x000005b6, 0x467f70be, 0xcb22ccd3,
854 0x5b95b988},
855 {0xf0ab9cca, 0x00000032, 0x00000515, 0xed223df3, 0x7f3ef01d,
856 0x2e1176be},
857 {0x91b444f9, 0x0000002e, 0x000007f8, 0x84e9a983, 0x5676756f,
858 0x66120546},
859 {0x1b5d2ddb, 0x0000002e, 0x0000012c, 0xba638c4c, 0x3f42047b,
860 0xf256a5cc},
861 {0xd824d1bb, 0x0000003a, 0x000007b5, 0x6288653b, 0x3a3ebea0,
862 0x4af1dd69},
863 {0x0470180c, 0x00000034, 0x000001f0, 0x9d5b80d6, 0x3de08195,
864 0x56f0a04a},
865 {0xffaa3a3f, 0x00000036, 0x00000299, 0xf3a82ab8, 0x53e0c13d,
866 0x74f6b6b2},
867 {0x6406cfeb, 0x00000023, 0x00000600, 0xa920b8e8, 0xe4e2acf4,
868 0x085951fd},
869 {0xb24aaa38, 0x0000003e, 0x000004a1, 0x657cc328, 0x5077b2c3,
870 0xc65387eb},
871 {0x58b2ab7c, 0x00000039, 0x000002b4, 0x3a17ee7e, 0x9dcb3643,
872 0x1ca9257b},
873 {0x3db85970, 0x00000006, 0x000002b6, 0x95268b59, 0xb9812c10,
874 0xfd196d76},
875 {0x857830c5, 0x00000003, 0x00000590, 0x4ef439d5, 0xf042161d,
876 0x5ef88339},
877 {0xe1fcd978, 0x0000003e, 0x000007d8, 0xae8d8699, 0xce0a1ef5,
878 0x2c3714d9},
879 {0xb982a768, 0x00000016, 0x000006e0, 0x62fad3df, 0x5f8a067b,
880 0x58576548},
881 {0x1d581ce8, 0x0000001e, 0x0000058b, 0xf0f5da53, 0x26e39eee,
882 0xfd7c57de},
883 {0x2456719b, 0x00000025, 0x00000503, 0x4296ac64, 0xd50e4c14,
884 0xd5fedd59},
885 {0xfae6d8f2, 0x00000000, 0x0000055d, 0x057fdf2e, 0x2a31391a,
886 0x1cc3b17b},
887 {0xcba828e3, 0x00000039, 0x000002ce, 0xe3f22351, 0x8f00877b,
888 0x270eed73},
889 {0x13d25952, 0x0000000a, 0x0000072d, 0x76d4b4cc, 0x5eb67ec3,
890 0x91ecbb11},
891 {0x0342be3f, 0x00000015, 0x00000599, 0xec75d9f1, 0x9d4d2826,
892 0x05ed8d0c},
893 {0xeaa344e0, 0x00000014, 0x000004d8, 0x72a4c981, 0x2064ea06,
894 0x0b09ad5b},
895 {0xbbb52021, 0x0000003b, 0x00000272, 0x04af99fc, 0xaf042d35,
896 0xf8d511fb},
897 {0xb66384dc, 0x0000001d, 0x000007fc, 0xd7629116, 0x782bd801,
898 0x5ad832cc},
899 {0x616c01b6, 0x00000022, 0x000002c8, 0x5b1dab30, 0x783ce7d2,
900 0x1214d196},
901 {0xce2bdaad, 0x00000016, 0x0000062a, 0x932535c8, 0x3f02926d,
902 0x5747218a},
903 {0x00fe84d7, 0x00000005, 0x00000205, 0x850e50aa, 0x753d649c,
904 0xde8f14de},
905 {0xbebdcb4c, 0x00000006, 0x0000055d, 0xbeaa37a2, 0x2d8c9eba,
906 0x3563b7b9},
907 {0xd8b1a02a, 0x00000010, 0x00000387, 0x5017d2fc, 0x503541a5,
908 0x071475d0},
909 {0x3b96cad2, 0x00000036, 0x00000347, 0x1d2372ae, 0x926cd90b,
910 0x54c79d60},
911 {0xc94c1ed7, 0x00000005, 0x0000038b, 0x9e9fdb22, 0x144a9178,
912 0x4c53eee6},
913 {0x1aad454e, 0x00000025, 0x000002b2, 0xc3f6315c, 0x5c7a35b3,
914 0x10137a3c},
915 {0xa4fec9a6, 0x00000000, 0x000006d6, 0x90be5080, 0xa4107605,
916 0xaa9d6c73},
917 {0x1bbe71e2, 0x0000001f, 0x000002fd, 0x4e504c3b, 0x284ccaf1,
918 0xb63d23e7},
919 {0x4201c7e4, 0x00000002, 0x000002b7, 0x7822e3f9, 0x0cc912a9,
920 0x7f53e9cf},
921 {0x23fddc96, 0x00000003, 0x00000627, 0x8a385125, 0x07767e78,
922 0x13c1cd83},
923 {0xd82ba25c, 0x00000016, 0x0000063e, 0x98e4148a, 0x283330c9,
924 0x49ff5867},
925 {0x786f2032, 0x0000002d, 0x0000060f, 0xf201600a, 0xf561bfcd,
926 0x8467f211},
927 {0xfebe4e1f, 0x0000002a, 0x000004f2, 0x95e51961, 0xfd80dcab,
928 0x3f9683b2},
929 {0x1a6e0a39, 0x00000008, 0x00000672, 0x8af6c2a5, 0x78dd84cb,
930 0x76a3f874},
931 {0x56000ab8, 0x0000000e, 0x000000e5, 0x36bacb8f, 0x22ee1f77,
932 0x863b702f},
933 {0x4717fe0c, 0x00000000, 0x000006ec, 0x8439f342, 0x5c8e03da,
934 0xdc6c58ff},
935 {0xd5d5d68e, 0x0000003c, 0x000003a3, 0x46fff083, 0x177d1b39,
936 0x0622cc95},
937 {0xc25dd6c6, 0x00000024, 0x000006c0, 0x5ceb8eb4, 0x892b0d16,
938 0xe85605cd},
939 {0xe9b11300, 0x00000023, 0x00000683, 0x07a5d59a, 0x6c6a3208,
940 0x31da5f06},
941 {0x95cd285e, 0x00000001, 0x00000047, 0x7b3a4368, 0x0202c07e,
942 0xa1f2e784},
943 {0xd9245a25, 0x0000001e, 0x000003a6, 0xd33c1841, 0x1936c0d5,
944 0xb07cc616},
945 {0x103279db, 0x00000006, 0x0000039b, 0xca09b8a0, 0x77d62892,
946 0xbf943b6c},
947 {0x1cba3172, 0x00000027, 0x000001c8, 0xcb377194, 0xebe682db,
948 0x2c01af1c},
949 {0x8f613739, 0x0000000c, 0x000001df, 0xb4b0bc87, 0x7710bd43,
950 0x0fe5f56d},
951 {0x1c6aa90d, 0x0000001b, 0x0000053c, 0x70559245, 0xda7894ac,
952 0xf8943b2d},
953 {0xaabe5b93, 0x0000003d, 0x00000715, 0xcdbf42fa, 0x0c3b99e7,
954 0xe4d89272},
955 {0xf15dd038, 0x00000006, 0x000006db, 0x6e104aea, 0x8d5967f2,
956 0x7c2f6bbb},
957 {0x584dd49c, 0x00000020, 0x000007bc, 0x36b6cfd6, 0xad4e23b2,
958 0xabbf388b},
959 {0x5d8c9506, 0x00000020, 0x00000470, 0x4c62378e, 0x31d92640,
960 0x1dca1f4e},
961 {0xb80d17b0, 0x00000032, 0x00000346, 0x22a5bb88, 0x9a7ec89f,
962 0x5c170e23},
963 {0xdaf0592e, 0x00000023, 0x000007b0, 0x3cab3f99, 0x9b1fdd99,
964 0xc0e9d672},
965 {0x4793cc85, 0x0000000d, 0x00000706, 0xe82e04f6, 0xed3db6b7,
966 0xc18bdc86},
967 {0x82ebf64e, 0x00000009, 0x000007c3, 0x69d590a9, 0x9efa8499,
968 0xa874fcdd},
969 {0xb18a0319, 0x00000026, 0x000007db, 0x1cf98dcc, 0x8fa9ad6a,
970 0x9dc0bb48},
971};
351 972
352} 973#include <linux/time.h>
353#endif
354 974
355static void bytereverse(unsigned char *buf, size_t len) 975static int __init crc32c_test(void)
356{ 976{
357 while (len--) { 977 int i;
358 unsigned char x = bitrev8(*buf); 978 int errors = 0;
359 *buf++ = x; 979 int bytes = 0;
980 struct timespec start, stop;
981 u64 nsec;
982 unsigned long flags;
983
984 /* keep static to prevent cache warming code from
985 * getting eliminated by the compiler */
986 static u32 crc;
987
988 /* pre-warm the cache */
989 for (i = 0; i < 100; i++) {
990 bytes += 2*test[i].length;
991
992 crc ^= __crc32c_le(test[i].crc, test_buf +
993 test[i].start, test[i].length);
360 } 994 }
361}
362 995
363static void random_garbage(unsigned char *buf, size_t len) 996 /* reduce OS noise */
364{ 997 local_irq_save(flags);
365 while (len--) 998 local_irq_disable();
366 *buf++ = (unsigned char) random();
367}
368 999
369#if 0 /* Not used at present */ 1000 getnstimeofday(&start);
370static void store_le(u32 x, unsigned char *buf) 1001 for (i = 0; i < 100; i++) {
371{ 1002 if (test[i].crc32c_le != __crc32c_le(test[i].crc, test_buf +
372 buf[0] = (unsigned char) x; 1003 test[i].start, test[i].length))
373 buf[1] = (unsigned char) (x >> 8); 1004 errors++;
374 buf[2] = (unsigned char) (x >> 16); 1005 }
375 buf[3] = (unsigned char) (x >> 24); 1006 getnstimeofday(&stop);
376}
377#endif
378 1007
379static void store_be(u32 x, unsigned char *buf) 1008 local_irq_restore(flags);
380{ 1009 local_irq_enable();
381 buf[0] = (unsigned char) (x >> 24); 1010
382 buf[1] = (unsigned char) (x >> 16); 1011 nsec = stop.tv_nsec - start.tv_nsec +
383 buf[2] = (unsigned char) (x >> 8); 1012 1000000000 * (stop.tv_sec - start.tv_sec);
384 buf[3] = (unsigned char) x; 1013
1014 pr_info("crc32c: CRC_LE_BITS = %d\n", CRC_LE_BITS);
1015
1016 if (errors)
1017 pr_warn("crc32c: %d self tests failed\n", errors);
1018 else {
1019 pr_info("crc32c: self tests passed, processed %d bytes in %lld nsec\n",
1020 bytes, nsec);
1021 }
1022
1023 return 0;
385} 1024}
386 1025
387/* 1026static int __init crc32_test(void)
388 * This checks that CRC(buf + CRC(buf)) = 0, and that
389 * CRC commutes with bit-reversal. This has the side effect
390 * of bytewise bit-reversing the input buffer, and returns
391 * the CRC of the reversed buffer.
392 */
393static u32 test_step(u32 init, unsigned char *buf, size_t len)
394{ 1027{
395 u32 crc1, crc2; 1028 int i;
396 size_t i; 1029 int errors = 0;
1030 int bytes = 0;
1031 struct timespec start, stop;
1032 u64 nsec;
1033 unsigned long flags;
1034
1035 /* keep static to prevent cache warming code from
1036 * getting eliminated by the compiler */
1037 static u32 crc;
1038
1039 /* pre-warm the cache */
1040 for (i = 0; i < 100; i++) {
1041 bytes += 2*test[i].length;
397 1042
398 crc1 = crc32_be(init, buf, len); 1043 crc ^= crc32_le(test[i].crc, test_buf +
399 store_be(crc1, buf + len); 1044 test[i].start, test[i].length);
400 crc2 = crc32_be(init, buf, len + 4); 1045
401 if (crc2) 1046 crc ^= crc32_be(test[i].crc, test_buf +
402 printf("\nCRC cancellation fail: 0x%08x should be 0\n", 1047 test[i].start, test[i].length);
403 crc2);
404
405 for (i = 0; i <= len + 4; i++) {
406 crc2 = crc32_be(init, buf, i);
407 crc2 = crc32_be(crc2, buf + i, len + 4 - i);
408 if (crc2)
409 printf("\nCRC split fail: 0x%08x\n", crc2);
410 } 1048 }
411 1049
412 /* Now swap it around for the other test */ 1050 /* reduce OS noise */
413 1051 local_irq_save(flags);
414 bytereverse(buf, len + 4); 1052 local_irq_disable();
415 init = bitrev32(init); 1053
416 crc2 = bitrev32(crc1); 1054 getnstimeofday(&start);
417 if (crc1 != bitrev32(crc2)) 1055 for (i = 0; i < 100; i++) {
418 printf("\nBit reversal fail: 0x%08x -> 0x%08x -> 0x%08x\n", 1056 if (test[i].crc_le != crc32_le(test[i].crc, test_buf +
419 crc1, crc2, bitrev32(crc2)); 1057 test[i].start, test[i].length))
420 crc1 = crc32_le(init, buf, len); 1058 errors++;
421 if (crc1 != crc2) 1059
422 printf("\nCRC endianness fail: 0x%08x != 0x%08x\n", crc1, 1060 if (test[i].crc_be != crc32_be(test[i].crc, test_buf +
423 crc2); 1061 test[i].start, test[i].length))
424 crc2 = crc32_le(init, buf, len + 4); 1062 errors++;
425 if (crc2)
426 printf("\nCRC cancellation fail: 0x%08x should be 0\n",
427 crc2);
428
429 for (i = 0; i <= len + 4; i++) {
430 crc2 = crc32_le(init, buf, i);
431 crc2 = crc32_le(crc2, buf + i, len + 4 - i);
432 if (crc2)
433 printf("\nCRC split fail: 0x%08x\n", crc2);
434 } 1063 }
1064 getnstimeofday(&stop);
435 1065
436 return crc1; 1066 local_irq_restore(flags);
437} 1067 local_irq_enable();
438 1068
439#define SIZE 64 1069 nsec = stop.tv_nsec - start.tv_nsec +
440#define INIT1 0 1070 1000000000 * (stop.tv_sec - start.tv_sec);
441#define INIT2 0
442 1071
443int main(void) 1072 pr_info("crc32: CRC_LE_BITS = %d, CRC_BE BITS = %d\n",
444{ 1073 CRC_LE_BITS, CRC_BE_BITS);
445 unsigned char buf1[SIZE + 4]; 1074
446 unsigned char buf2[SIZE + 4]; 1075 if (errors)
447 unsigned char buf3[SIZE + 4]; 1076 pr_warn("crc32: %d self tests failed\n", errors);
448 int i, j; 1077 else {
449 u32 crc1, crc2, crc3; 1078 pr_info("crc32: self tests passed, processed %d bytes in %lld nsec\n",
450 1079 bytes, nsec);
451 for (i = 0; i <= SIZE; i++) {
452 printf("\rTesting length %d...", i);
453 fflush(stdout);
454 random_garbage(buf1, i);
455 random_garbage(buf2, i);
456 for (j = 0; j < i; j++)
457 buf3[j] = buf1[j] ^ buf2[j];
458
459 crc1 = test_step(INIT1, buf1, i);
460 crc2 = test_step(INIT2, buf2, i);
461 /* Now check that CRC(buf1 ^ buf2) = CRC(buf1) ^ CRC(buf2) */
462 crc3 = test_step(INIT1 ^ INIT2, buf3, i);
463 if (crc3 != (crc1 ^ crc2))
464 printf("CRC XOR fail: 0x%08x != 0x%08x ^ 0x%08x\n",
465 crc3, crc1, crc2);
466 } 1080 }
467 printf("\nAll test complete. No failures expected.\n"); 1081
1082 return 0;
1083}
1084
1085static int __init crc32test_init(void)
1086{
1087 crc32_test();
1088 crc32c_test();
468 return 0; 1089 return 0;
469} 1090}
470 1091
471#endif /* UNITTEST */ 1092static void __exit crc32_exit(void)
1093{
1094}
1095
1096module_init(crc32test_init);
1097module_exit(crc32_exit);
1098#endif /* CONFIG_CRC32_SELFTEST */
diff --git a/lib/crc32defs.h b/lib/crc32defs.h
index 9b6773d7374..64cba2c3c70 100644
--- a/lib/crc32defs.h
+++ b/lib/crc32defs.h
@@ -6,27 +6,67 @@
6#define CRCPOLY_LE 0xedb88320 6#define CRCPOLY_LE 0xedb88320
7#define CRCPOLY_BE 0x04c11db7 7#define CRCPOLY_BE 0x04c11db7
8 8
9/* How many bits at a time to use. Requires a table of 4<<CRC_xx_BITS bytes. */ 9/*
10/* For less performance-sensitive, use 4 */ 10 * This is the CRC32c polynomial, as outlined by Castagnoli.
11#ifndef CRC_LE_BITS 11 * x^32+x^28+x^27+x^26+x^25+x^23+x^22+x^20+x^19+x^18+x^14+x^13+x^11+x^10+x^9+
12 * x^8+x^6+x^0
13 */
14#define CRC32C_POLY_LE 0x82F63B78
15
16/* Try to choose an implementation variant via Kconfig */
17#ifdef CONFIG_CRC32_SLICEBY8
18# define CRC_LE_BITS 64
19# define CRC_BE_BITS 64
20#endif
21#ifdef CONFIG_CRC32_SLICEBY4
22# define CRC_LE_BITS 32
23# define CRC_BE_BITS 32
24#endif
25#ifdef CONFIG_CRC32_SARWATE
12# define CRC_LE_BITS 8 26# define CRC_LE_BITS 8
27# define CRC_BE_BITS 8
28#endif
29#ifdef CONFIG_CRC32_BIT
30# define CRC_LE_BITS 1
31# define CRC_BE_BITS 1
32#endif
33
34/*
35 * How many bits at a time to use. Valid values are 1, 2, 4, 8, 32 and 64.
36 * For less performance-sensitive, use 4 or 8 to save table size.
37 * For larger systems choose same as CPU architecture as default.
38 * This works well on X86_64, SPARC64 systems. This may require some
39 * elaboration after experiments with other architectures.
40 */
41#ifndef CRC_LE_BITS
42# ifdef CONFIG_64BIT
43# define CRC_LE_BITS 64
44# else
45# define CRC_LE_BITS 32
46# endif
13#endif 47#endif
14#ifndef CRC_BE_BITS 48#ifndef CRC_BE_BITS
15# define CRC_BE_BITS 8 49# ifdef CONFIG_64BIT
50# define CRC_BE_BITS 64
51# else
52# define CRC_BE_BITS 32
53# endif
16#endif 54#endif
17 55
18/* 56/*
19 * Little-endian CRC computation. Used with serial bit streams sent 57 * Little-endian CRC computation. Used with serial bit streams sent
20 * lsbit-first. Be sure to use cpu_to_le32() to append the computed CRC. 58 * lsbit-first. Be sure to use cpu_to_le32() to append the computed CRC.
21 */ 59 */
22#if CRC_LE_BITS > 8 || CRC_LE_BITS < 1 || CRC_LE_BITS & CRC_LE_BITS-1 60#if CRC_LE_BITS > 64 || CRC_LE_BITS < 1 || CRC_LE_BITS == 16 || \
23# error CRC_LE_BITS must be a power of 2 between 1 and 8 61 CRC_LE_BITS & CRC_LE_BITS-1
62# error "CRC_LE_BITS must be one of {1, 2, 4, 8, 32, 64}"
24#endif 63#endif
25 64
26/* 65/*
27 * Big-endian CRC computation. Used with serial bit streams sent 66 * Big-endian CRC computation. Used with serial bit streams sent
28 * msbit-first. Be sure to use cpu_to_be32() to append the computed CRC. 67 * msbit-first. Be sure to use cpu_to_be32() to append the computed CRC.
29 */ 68 */
30#if CRC_BE_BITS > 8 || CRC_BE_BITS < 1 || CRC_BE_BITS & CRC_BE_BITS-1 69#if CRC_BE_BITS > 64 || CRC_BE_BITS < 1 || CRC_BE_BITS == 16 || \
31# error CRC_BE_BITS must be a power of 2 between 1 and 8 70 CRC_BE_BITS & CRC_BE_BITS-1
71# error "CRC_BE_BITS must be one of {1, 2, 4, 8, 32, 64}"
32#endif 72#endif
diff --git a/lib/gen_crc32table.c b/lib/gen_crc32table.c
index 85d0e412a04..8f8d5439e2d 100644
--- a/lib/gen_crc32table.c
+++ b/lib/gen_crc32table.c
@@ -1,14 +1,29 @@
1#include <stdio.h> 1#include <stdio.h>
2#include "../include/generated/autoconf.h"
2#include "crc32defs.h" 3#include "crc32defs.h"
3#include <inttypes.h> 4#include <inttypes.h>
4 5
5#define ENTRIES_PER_LINE 4 6#define ENTRIES_PER_LINE 4
6 7
7#define LE_TABLE_SIZE (1 << CRC_LE_BITS) 8#if CRC_LE_BITS > 8
8#define BE_TABLE_SIZE (1 << CRC_BE_BITS) 9# define LE_TABLE_ROWS (CRC_LE_BITS/8)
10# define LE_TABLE_SIZE 256
11#else
12# define LE_TABLE_ROWS 1
13# define LE_TABLE_SIZE (1 << CRC_LE_BITS)
14#endif
9 15
10static uint32_t crc32table_le[4][LE_TABLE_SIZE]; 16#if CRC_BE_BITS > 8
11static uint32_t crc32table_be[4][BE_TABLE_SIZE]; 17# define BE_TABLE_ROWS (CRC_BE_BITS/8)
18# define BE_TABLE_SIZE 256
19#else
20# define BE_TABLE_ROWS 1
21# define BE_TABLE_SIZE (1 << CRC_BE_BITS)
22#endif
23
24static uint32_t crc32table_le[LE_TABLE_ROWS][256];
25static uint32_t crc32table_be[BE_TABLE_ROWS][256];
26static uint32_t crc32ctable_le[LE_TABLE_ROWS][256];
12 27
13/** 28/**
14 * crc32init_le() - allocate and initialize LE table data 29 * crc32init_le() - allocate and initialize LE table data
@@ -17,27 +32,38 @@ static uint32_t crc32table_be[4][BE_TABLE_SIZE];
17 * fact that crctable[i^j] = crctable[i] ^ crctable[j]. 32 * fact that crctable[i^j] = crctable[i] ^ crctable[j].
18 * 33 *
19 */ 34 */
20static void crc32init_le(void) 35static void crc32init_le_generic(const uint32_t polynomial,
36 uint32_t (*tab)[256])
21{ 37{
22 unsigned i, j; 38 unsigned i, j;
23 uint32_t crc = 1; 39 uint32_t crc = 1;
24 40
25 crc32table_le[0][0] = 0; 41 tab[0][0] = 0;
26 42
27 for (i = 1 << (CRC_LE_BITS - 1); i; i >>= 1) { 43 for (i = LE_TABLE_SIZE >> 1; i; i >>= 1) {
28 crc = (crc >> 1) ^ ((crc & 1) ? CRCPOLY_LE : 0); 44 crc = (crc >> 1) ^ ((crc & 1) ? polynomial : 0);
29 for (j = 0; j < LE_TABLE_SIZE; j += 2 * i) 45 for (j = 0; j < LE_TABLE_SIZE; j += 2 * i)
30 crc32table_le[0][i + j] = crc ^ crc32table_le[0][j]; 46 tab[0][i + j] = crc ^ tab[0][j];
31 } 47 }
32 for (i = 0; i < LE_TABLE_SIZE; i++) { 48 for (i = 0; i < LE_TABLE_SIZE; i++) {
33 crc = crc32table_le[0][i]; 49 crc = tab[0][i];
34 for (j = 1; j < 4; j++) { 50 for (j = 1; j < LE_TABLE_ROWS; j++) {
35 crc = crc32table_le[0][crc & 0xff] ^ (crc >> 8); 51 crc = tab[0][crc & 0xff] ^ (crc >> 8);
36 crc32table_le[j][i] = crc; 52 tab[j][i] = crc;
37 } 53 }
38 } 54 }
39} 55}
40 56
57static void crc32init_le(void)
58{
59 crc32init_le_generic(CRCPOLY_LE, crc32table_le);
60}
61
62static void crc32cinit_le(void)
63{
64 crc32init_le_generic(CRC32C_POLY_LE, crc32ctable_le);
65}
66
41/** 67/**
42 * crc32init_be() - allocate and initialize BE table data 68 * crc32init_be() - allocate and initialize BE table data
43 */ 69 */
@@ -55,18 +81,18 @@ static void crc32init_be(void)
55 } 81 }
56 for (i = 0; i < BE_TABLE_SIZE; i++) { 82 for (i = 0; i < BE_TABLE_SIZE; i++) {
57 crc = crc32table_be[0][i]; 83 crc = crc32table_be[0][i];
58 for (j = 1; j < 4; j++) { 84 for (j = 1; j < BE_TABLE_ROWS; j++) {
59 crc = crc32table_be[0][(crc >> 24) & 0xff] ^ (crc << 8); 85 crc = crc32table_be[0][(crc >> 24) & 0xff] ^ (crc << 8);
60 crc32table_be[j][i] = crc; 86 crc32table_be[j][i] = crc;
61 } 87 }
62 } 88 }
63} 89}
64 90
65static void output_table(uint32_t table[4][256], int len, char *trans) 91static void output_table(uint32_t (*table)[256], int rows, int len, char *trans)
66{ 92{
67 int i, j; 93 int i, j;
68 94
69 for (j = 0 ; j < 4; j++) { 95 for (j = 0 ; j < rows; j++) {
70 printf("{"); 96 printf("{");
71 for (i = 0; i < len - 1; i++) { 97 for (i = 0; i < len - 1; i++) {
72 if (i % ENTRIES_PER_LINE == 0) 98 if (i % ENTRIES_PER_LINE == 0)
@@ -83,15 +109,30 @@ int main(int argc, char** argv)
83 109
84 if (CRC_LE_BITS > 1) { 110 if (CRC_LE_BITS > 1) {
85 crc32init_le(); 111 crc32init_le();
86 printf("static const u32 crc32table_le[4][256] = {"); 112 printf("static const u32 __cacheline_aligned "
87 output_table(crc32table_le, LE_TABLE_SIZE, "tole"); 113 "crc32table_le[%d][%d] = {",
114 LE_TABLE_ROWS, LE_TABLE_SIZE);
115 output_table(crc32table_le, LE_TABLE_ROWS,
116 LE_TABLE_SIZE, "tole");
88 printf("};\n"); 117 printf("};\n");
89 } 118 }
90 119
91 if (CRC_BE_BITS > 1) { 120 if (CRC_BE_BITS > 1) {
92 crc32init_be(); 121 crc32init_be();
93 printf("static const u32 crc32table_be[4][256] = {"); 122 printf("static const u32 __cacheline_aligned "
94 output_table(crc32table_be, BE_TABLE_SIZE, "tobe"); 123 "crc32table_be[%d][%d] = {",
124 BE_TABLE_ROWS, BE_TABLE_SIZE);
125 output_table(crc32table_be, LE_TABLE_ROWS,
126 BE_TABLE_SIZE, "tobe");
127 printf("};\n");
128 }
129 if (CRC_LE_BITS > 1) {
130 crc32cinit_le();
131 printf("static const u32 __cacheline_aligned "
132 "crc32ctable_le[%d][%d] = {",
133 LE_TABLE_ROWS, LE_TABLE_SIZE);
134 output_table(crc32ctable_le, LE_TABLE_ROWS,
135 LE_TABLE_SIZE, "tole");
95 printf("};\n"); 136 printf("};\n");
96 } 137 }
97 138
diff --git a/lib/genalloc.c b/lib/genalloc.c
index 577ddf80597..b41dd90f290 100644
--- a/lib/genalloc.c
+++ b/lib/genalloc.c
@@ -16,23 +16,46 @@
16#include <linux/genalloc.h> 16#include <linux/genalloc.h>
17 17
18 18
19/* General purpose special memory pool descriptor. */
20struct gen_pool {
21 rwlock_t lock; /* protects chunks list */
22 struct list_head chunks; /* list of chunks in this pool */
23 unsigned order; /* minimum allocation order */
24};
25
26/* General purpose special memory pool chunk descriptor. */
27struct gen_pool_chunk {
28 spinlock_t lock; /* protects bits */
29 struct list_head next_chunk; /* next chunk in pool */
30 phys_addr_t phys_addr; /* physical starting address of memory chunk */
31 unsigned long start; /* start of memory chunk */
32 unsigned long size; /* number of bits */
33 unsigned long bits[0]; /* bitmap for allocating memory chunk */
34};
35
36
19/** 37/**
20 * gen_pool_create - create a new special memory pool 38 * gen_pool_create() - create a new special memory pool
21 * @min_alloc_order: log base 2 of number of bytes each bitmap bit represents 39 * @order: Log base 2 of number of bytes each bitmap bit
22 * @nid: node id of the node the pool structure should be allocated on, or -1 40 * represents.
41 * @nid: Node id of the node the pool structure should be allocated
42 * on, or -1. This will be also used for other allocations.
23 * 43 *
24 * Create a new special memory pool that can be used to manage special purpose 44 * Create a new special memory pool that can be used to manage special purpose
25 * memory not managed by the regular kmalloc/kfree interface. 45 * memory not managed by the regular kmalloc/kfree interface.
26 */ 46 */
27struct gen_pool *gen_pool_create(int min_alloc_order, int nid) 47struct gen_pool *__must_check gen_pool_create(unsigned order, int nid)
28{ 48{
29 struct gen_pool *pool; 49 struct gen_pool *pool;
30 50
31 pool = kmalloc_node(sizeof(struct gen_pool), GFP_KERNEL, nid); 51 if (WARN_ON(order >= BITS_PER_LONG))
32 if (pool != NULL) { 52 return NULL;
53
54 pool = kmalloc_node(sizeof *pool, GFP_KERNEL, nid);
55 if (pool) {
33 rwlock_init(&pool->lock); 56 rwlock_init(&pool->lock);
34 INIT_LIST_HEAD(&pool->chunks); 57 INIT_LIST_HEAD(&pool->chunks);
35 pool->min_alloc_order = min_alloc_order; 58 pool->order = order;
36 } 59 }
37 return pool; 60 return pool;
38} 61}
@@ -40,33 +63,41 @@ EXPORT_SYMBOL(gen_pool_create);
40 63
41/** 64/**
42 * gen_pool_add_virt - add a new chunk of special memory to the pool 65 * gen_pool_add_virt - add a new chunk of special memory to the pool
43 * @pool: pool to add new memory chunk to 66 * @pool: Pool to add new memory chunk to
44 * @virt: virtual starting address of memory chunk to add to pool 67 * @virt: Virtual starting address of memory chunk to add to pool
45 * @phys: physical starting address of memory chunk to add to pool 68 * @phys: Physical starting address of memory chunk to add to pool
46 * @size: size in bytes of the memory chunk to add to pool 69 * @size: Size in bytes of the memory chunk to add to pool
47 * @nid: node id of the node the chunk structure and bitmap should be 70 * @nid: Node id of the node the chunk structure and bitmap should be
48 * allocated on, or -1 71 * allocated on, or -1
49 * 72 *
50 * Add a new chunk of special memory to the specified pool. 73 * Add a new chunk of special memory to the specified pool.
51 * 74 *
52 * Returns 0 on success or a -ve errno on failure. 75 * Returns 0 on success or a -ve errno on failure.
53 */ 76 */
54int gen_pool_add_virt(struct gen_pool *pool, unsigned long virt, phys_addr_t phys, 77int __must_check
55 size_t size, int nid) 78gen_pool_add_virt(struct gen_pool *pool, unsigned long virt, phys_addr_t phys,
79 size_t size, int nid)
56{ 80{
57 struct gen_pool_chunk *chunk; 81 struct gen_pool_chunk *chunk;
58 int nbits = size >> pool->min_alloc_order; 82 size_t nbytes;
59 int nbytes = sizeof(struct gen_pool_chunk) +
60 (nbits + BITS_PER_BYTE - 1) / BITS_PER_BYTE;
61 83
62 chunk = kmalloc_node(nbytes, GFP_KERNEL | __GFP_ZERO, nid); 84 if (WARN_ON(!virt || virt + size < virt ||
63 if (unlikely(chunk == NULL)) 85 (virt & ((1 << pool->order) - 1))))
86 return -EINVAL;
87
88 size = size >> pool->order;
89 if (WARN_ON(!size))
90 return -EINVAL;
91
92 nbytes = sizeof *chunk + BITS_TO_LONGS(size) * sizeof *chunk->bits;
93 chunk = kzalloc_node(nbytes, GFP_KERNEL, nid);
94 if (!chunk)
64 return -ENOMEM; 95 return -ENOMEM;
65 96
66 spin_lock_init(&chunk->lock); 97 spin_lock_init(&chunk->lock);
98 chunk->start = virt >> pool->order;
99 chunk->size = size;
67 chunk->phys_addr = phys; 100 chunk->phys_addr = phys;
68 chunk->start_addr = virt;
69 chunk->end_addr = virt + size;
70 101
71 write_lock(&pool->lock); 102 write_lock(&pool->lock);
72 list_add(&chunk->next_chunk, &pool->chunks); 103 list_add(&chunk->next_chunk, &pool->chunks);
@@ -90,10 +121,12 @@ phys_addr_t gen_pool_virt_to_phys(struct gen_pool *pool, unsigned long addr)
90 121
91 read_lock(&pool->lock); 122 read_lock(&pool->lock);
92 list_for_each(_chunk, &pool->chunks) { 123 list_for_each(_chunk, &pool->chunks) {
124 unsigned long start_addr;
93 chunk = list_entry(_chunk, struct gen_pool_chunk, next_chunk); 125 chunk = list_entry(_chunk, struct gen_pool_chunk, next_chunk);
94 126
95 if (addr >= chunk->start_addr && addr < chunk->end_addr) 127 start_addr = chunk->start << pool->order;
96 return chunk->phys_addr + addr - chunk->start_addr; 128 if (addr >= start_addr && addr < start_addr + chunk->size)
129 return chunk->phys_addr + addr - start_addr;
97 } 130 }
98 read_unlock(&pool->lock); 131 read_unlock(&pool->lock);
99 132
@@ -102,115 +135,116 @@ phys_addr_t gen_pool_virt_to_phys(struct gen_pool *pool, unsigned long addr)
102EXPORT_SYMBOL(gen_pool_virt_to_phys); 135EXPORT_SYMBOL(gen_pool_virt_to_phys);
103 136
104/** 137/**
105 * gen_pool_destroy - destroy a special memory pool 138 * gen_pool_destroy() - destroy a special memory pool
106 * @pool: pool to destroy 139 * @pool: Pool to destroy.
107 * 140 *
108 * Destroy the specified special memory pool. Verifies that there are no 141 * Destroy the specified special memory pool. Verifies that there are no
109 * outstanding allocations. 142 * outstanding allocations.
110 */ 143 */
111void gen_pool_destroy(struct gen_pool *pool) 144void gen_pool_destroy(struct gen_pool *pool)
112{ 145{
113 struct list_head *_chunk, *_next_chunk;
114 struct gen_pool_chunk *chunk; 146 struct gen_pool_chunk *chunk;
115 int order = pool->min_alloc_order; 147 int bit;
116 int bit, end_bit;
117
118 148
119 list_for_each_safe(_chunk, _next_chunk, &pool->chunks) { 149 while (!list_empty(&pool->chunks)) {
120 chunk = list_entry(_chunk, struct gen_pool_chunk, next_chunk); 150 chunk = list_entry(pool->chunks.next, struct gen_pool_chunk,
151 next_chunk);
121 list_del(&chunk->next_chunk); 152 list_del(&chunk->next_chunk);
122 153
123 end_bit = (chunk->end_addr - chunk->start_addr) >> order; 154 bit = find_next_bit(chunk->bits, chunk->size, 0);
124 bit = find_next_bit(chunk->bits, end_bit, 0); 155 BUG_ON(bit < chunk->size);
125 BUG_ON(bit < end_bit);
126 156
127 kfree(chunk); 157 kfree(chunk);
128 } 158 }
129 kfree(pool); 159 kfree(pool);
130 return;
131} 160}
132EXPORT_SYMBOL(gen_pool_destroy); 161EXPORT_SYMBOL(gen_pool_destroy);
133 162
134/** 163/**
135 * gen_pool_alloc - allocate special memory from the pool 164 * gen_pool_alloc_aligned() - allocate special memory from the pool
136 * @pool: pool to allocate from 165 * @pool: Pool to allocate from.
137 * @size: number of bytes to allocate from the pool 166 * @size: Number of bytes to allocate from the pool.
167 * @alignment_order: Order the allocated space should be
168 * aligned to (eg. 20 means allocated space
169 * must be aligned to 1MiB).
138 * 170 *
139 * Allocate the requested number of bytes from the specified pool. 171 * Allocate the requested number of bytes from the specified pool.
140 * Uses a first-fit algorithm. 172 * Uses a first-fit algorithm.
141 */ 173 */
142unsigned long gen_pool_alloc(struct gen_pool *pool, size_t size) 174unsigned long __must_check
175gen_pool_alloc_aligned(struct gen_pool *pool, size_t size,
176 unsigned alignment_order)
143{ 177{
144 struct list_head *_chunk; 178 unsigned long addr, align_mask = 0, flags, start;
145 struct gen_pool_chunk *chunk; 179 struct gen_pool_chunk *chunk;
146 unsigned long addr, flags;
147 int order = pool->min_alloc_order;
148 int nbits, start_bit, end_bit;
149 180
150 if (size == 0) 181 if (size == 0)
151 return 0; 182 return 0;
152 183
153 nbits = (size + (1UL << order) - 1) >> order; 184 if (alignment_order > pool->order)
185 align_mask = (1 << (alignment_order - pool->order)) - 1;
154 186
155 read_lock(&pool->lock); 187 size = (size + (1UL << pool->order) - 1) >> pool->order;
156 list_for_each(_chunk, &pool->chunks) {
157 chunk = list_entry(_chunk, struct gen_pool_chunk, next_chunk);
158 188
159 end_bit = (chunk->end_addr - chunk->start_addr) >> order; 189 read_lock(&pool->lock);
190 list_for_each_entry(chunk, &pool->chunks, next_chunk) {
191 if (chunk->size < size)
192 continue;
160 193
161 spin_lock_irqsave(&chunk->lock, flags); 194 spin_lock_irqsave(&chunk->lock, flags);
162 start_bit = bitmap_find_next_zero_area(chunk->bits, end_bit, 0, 195 start = bitmap_find_next_zero_area_off(chunk->bits, chunk->size,
163 nbits, 0); 196 0, size, align_mask,
164 if (start_bit >= end_bit) { 197 chunk->start);
198 if (start >= chunk->size) {
165 spin_unlock_irqrestore(&chunk->lock, flags); 199 spin_unlock_irqrestore(&chunk->lock, flags);
166 continue; 200 continue;
167 } 201 }
168 202
169 addr = chunk->start_addr + ((unsigned long)start_bit << order); 203 bitmap_set(chunk->bits, start, size);
170
171 bitmap_set(chunk->bits, start_bit, nbits);
172 spin_unlock_irqrestore(&chunk->lock, flags); 204 spin_unlock_irqrestore(&chunk->lock, flags);
173 read_unlock(&pool->lock); 205 addr = (chunk->start + start) << pool->order;
174 return addr; 206 goto done;
175 } 207 }
208
209 addr = 0;
210done:
176 read_unlock(&pool->lock); 211 read_unlock(&pool->lock);
177 return 0; 212 return addr;
178} 213}
179EXPORT_SYMBOL(gen_pool_alloc); 214EXPORT_SYMBOL(gen_pool_alloc_aligned);
180 215
181/** 216/**
182 * gen_pool_free - free allocated special memory back to the pool 217 * gen_pool_free() - free allocated special memory back to the pool
183 * @pool: pool to free to 218 * @pool: Pool to free to.
184 * @addr: starting address of memory to free back to pool 219 * @addr: Starting address of memory to free back to pool.
185 * @size: size in bytes of memory to free 220 * @size: Size in bytes of memory to free.
186 * 221 *
187 * Free previously allocated special memory back to the specified pool. 222 * Free previously allocated special memory back to the specified pool.
188 */ 223 */
189void gen_pool_free(struct gen_pool *pool, unsigned long addr, size_t size) 224void gen_pool_free(struct gen_pool *pool, unsigned long addr, size_t size)
190{ 225{
191 struct list_head *_chunk;
192 struct gen_pool_chunk *chunk; 226 struct gen_pool_chunk *chunk;
193 unsigned long flags; 227 unsigned long flags;
194 int order = pool->min_alloc_order;
195 int bit, nbits;
196 228
197 nbits = (size + (1UL << order) - 1) >> order; 229 if (!size)
230 return;
198 231
199 read_lock(&pool->lock); 232 addr = addr >> pool->order;
200 list_for_each(_chunk, &pool->chunks) { 233 size = (size + (1UL << pool->order) - 1) >> pool->order;
201 chunk = list_entry(_chunk, struct gen_pool_chunk, next_chunk);
202 234
203 if (addr >= chunk->start_addr && addr < chunk->end_addr) { 235 BUG_ON(addr + size < addr);
204 BUG_ON(addr + size > chunk->end_addr); 236
237 read_lock(&pool->lock);
238 list_for_each_entry(chunk, &pool->chunks, next_chunk)
239 if (addr >= chunk->start &&
240 addr + size <= chunk->start + chunk->size) {
205 spin_lock_irqsave(&chunk->lock, flags); 241 spin_lock_irqsave(&chunk->lock, flags);
206 bit = (addr - chunk->start_addr) >> order; 242 bitmap_clear(chunk->bits, addr - chunk->start, size);
207 while (nbits--)
208 __clear_bit(bit++, chunk->bits);
209 spin_unlock_irqrestore(&chunk->lock, flags); 243 spin_unlock_irqrestore(&chunk->lock, flags);
210 break; 244 goto done;
211 } 245 }
212 } 246 BUG_ON(1);
213 BUG_ON(nbits > 0); 247done:
214 read_unlock(&pool->lock); 248 read_unlock(&pool->lock);
215} 249}
216EXPORT_SYMBOL(gen_pool_free); 250EXPORT_SYMBOL(gen_pool_free);
diff --git a/lib/kobject_uevent.c b/lib/kobject_uevent.c
index 70af0a7f97c..6d40244e801 100644
--- a/lib/kobject_uevent.c
+++ b/lib/kobject_uevent.c
@@ -29,16 +29,17 @@
29 29
30u64 uevent_seqnum; 30u64 uevent_seqnum;
31char uevent_helper[UEVENT_HELPER_PATH_LEN] = CONFIG_UEVENT_HELPER_PATH; 31char uevent_helper[UEVENT_HELPER_PATH_LEN] = CONFIG_UEVENT_HELPER_PATH;
32static DEFINE_SPINLOCK(sequence_lock);
33#ifdef CONFIG_NET 32#ifdef CONFIG_NET
34struct uevent_sock { 33struct uevent_sock {
35 struct list_head list; 34 struct list_head list;
36 struct sock *sk; 35 struct sock *sk;
37}; 36};
38static LIST_HEAD(uevent_sock_list); 37static LIST_HEAD(uevent_sock_list);
39static DEFINE_MUTEX(uevent_sock_mutex);
40#endif 38#endif
41 39
40/* This lock protects uevent_seqnum and uevent_sock_list */
41static DEFINE_MUTEX(uevent_sock_mutex);
42
42/* the strings here must match the enum in include/linux/kobject.h */ 43/* the strings here must match the enum in include/linux/kobject.h */
43static const char *kobject_actions[] = { 44static const char *kobject_actions[] = {
44 [KOBJ_ADD] = "add", 45 [KOBJ_ADD] = "add",
@@ -136,7 +137,6 @@ int kobject_uevent_env(struct kobject *kobj, enum kobject_action action,
136 struct kobject *top_kobj; 137 struct kobject *top_kobj;
137 struct kset *kset; 138 struct kset *kset;
138 const struct kset_uevent_ops *uevent_ops; 139 const struct kset_uevent_ops *uevent_ops;
139 u64 seq;
140 int i = 0; 140 int i = 0;
141 int retval = 0; 141 int retval = 0;
142#ifdef CONFIG_NET 142#ifdef CONFIG_NET
@@ -243,17 +243,16 @@ int kobject_uevent_env(struct kobject *kobj, enum kobject_action action,
243 else if (action == KOBJ_REMOVE) 243 else if (action == KOBJ_REMOVE)
244 kobj->state_remove_uevent_sent = 1; 244 kobj->state_remove_uevent_sent = 1;
245 245
246 mutex_lock(&uevent_sock_mutex);
246 /* we will send an event, so request a new sequence number */ 247 /* we will send an event, so request a new sequence number */
247 spin_lock(&sequence_lock); 248 retval = add_uevent_var(env, "SEQNUM=%llu", (unsigned long long)++uevent_seqnum);
248 seq = ++uevent_seqnum; 249 if (retval) {
249 spin_unlock(&sequence_lock); 250 mutex_unlock(&uevent_sock_mutex);
250 retval = add_uevent_var(env, "SEQNUM=%llu", (unsigned long long)seq);
251 if (retval)
252 goto exit; 251 goto exit;
252 }
253 253
254#if defined(CONFIG_NET) 254#if defined(CONFIG_NET)
255 /* send netlink message */ 255 /* send netlink message */
256 mutex_lock(&uevent_sock_mutex);
257 list_for_each_entry(ue_sk, &uevent_sock_list, list) { 256 list_for_each_entry(ue_sk, &uevent_sock_list, list) {
258 struct sock *uevent_sock = ue_sk->sk; 257 struct sock *uevent_sock = ue_sk->sk;
259 struct sk_buff *skb; 258 struct sk_buff *skb;
@@ -282,13 +281,13 @@ int kobject_uevent_env(struct kobject *kobj, enum kobject_action action,
282 kobj_bcast_filter, 281 kobj_bcast_filter,
283 kobj); 282 kobj);
284 /* ENOBUFS should be handled in userspace */ 283 /* ENOBUFS should be handled in userspace */
285 if (retval == -ENOBUFS) 284 if (retval == -ENOBUFS || retval == -ESRCH)
286 retval = 0; 285 retval = 0;
287 } else 286 } else
288 retval = -ENOMEM; 287 retval = -ENOMEM;
289 } 288 }
290 mutex_unlock(&uevent_sock_mutex);
291#endif 289#endif
290 mutex_unlock(&uevent_sock_mutex);
292 291
293 /* call uevent_helper, usually only enabled during early boot */ 292 /* call uevent_helper, usually only enabled during early boot */
294 if (uevent_helper[0] && !kobj_usermode_filter(kobj)) { 293 if (uevent_helper[0] && !kobj_usermode_filter(kobj)) {
diff --git a/lib/md5.c b/lib/md5.c
new file mode 100644
index 00000000000..c777180e1f2
--- /dev/null
+++ b/lib/md5.c
@@ -0,0 +1,95 @@
1#include <linux/kernel.h>
2#include <linux/module.h>
3#include <linux/cryptohash.h>
4
5#define F1(x, y, z) (z ^ (x & (y ^ z)))
6#define F2(x, y, z) F1(z, x, y)
7#define F3(x, y, z) (x ^ y ^ z)
8#define F4(x, y, z) (y ^ (x | ~z))
9
10#define MD5STEP(f, w, x, y, z, in, s) \
11 (w += f(x, y, z) + in, w = (w<<s | w>>(32-s)) + x)
12
13void md5_transform(__u32 *hash, __u32 const *in)
14{
15 u32 a, b, c, d;
16
17 a = hash[0];
18 b = hash[1];
19 c = hash[2];
20 d = hash[3];
21
22 MD5STEP(F1, a, b, c, d, in[0] + 0xd76aa478, 7);
23 MD5STEP(F1, d, a, b, c, in[1] + 0xe8c7b756, 12);
24 MD5STEP(F1, c, d, a, b, in[2] + 0x242070db, 17);
25 MD5STEP(F1, b, c, d, a, in[3] + 0xc1bdceee, 22);
26 MD5STEP(F1, a, b, c, d, in[4] + 0xf57c0faf, 7);
27 MD5STEP(F1, d, a, b, c, in[5] + 0x4787c62a, 12);
28 MD5STEP(F1, c, d, a, b, in[6] + 0xa8304613, 17);
29 MD5STEP(F1, b, c, d, a, in[7] + 0xfd469501, 22);
30 MD5STEP(F1, a, b, c, d, in[8] + 0x698098d8, 7);
31 MD5STEP(F1, d, a, b, c, in[9] + 0x8b44f7af, 12);
32 MD5STEP(F1, c, d, a, b, in[10] + 0xffff5bb1, 17);
33 MD5STEP(F1, b, c, d, a, in[11] + 0x895cd7be, 22);
34 MD5STEP(F1, a, b, c, d, in[12] + 0x6b901122, 7);
35 MD5STEP(F1, d, a, b, c, in[13] + 0xfd987193, 12);
36 MD5STEP(F1, c, d, a, b, in[14] + 0xa679438e, 17);
37 MD5STEP(F1, b, c, d, a, in[15] + 0x49b40821, 22);
38
39 MD5STEP(F2, a, b, c, d, in[1] + 0xf61e2562, 5);
40 MD5STEP(F2, d, a, b, c, in[6] + 0xc040b340, 9);
41 MD5STEP(F2, c, d, a, b, in[11] + 0x265e5a51, 14);
42 MD5STEP(F2, b, c, d, a, in[0] + 0xe9b6c7aa, 20);
43 MD5STEP(F2, a, b, c, d, in[5] + 0xd62f105d, 5);
44 MD5STEP(F2, d, a, b, c, in[10] + 0x02441453, 9);
45 MD5STEP(F2, c, d, a, b, in[15] + 0xd8a1e681, 14);
46 MD5STEP(F2, b, c, d, a, in[4] + 0xe7d3fbc8, 20);
47 MD5STEP(F2, a, b, c, d, in[9] + 0x21e1cde6, 5);
48 MD5STEP(F2, d, a, b, c, in[14] + 0xc33707d6, 9);
49 MD5STEP(F2, c, d, a, b, in[3] + 0xf4d50d87, 14);
50 MD5STEP(F2, b, c, d, a, in[8] + 0x455a14ed, 20);
51 MD5STEP(F2, a, b, c, d, in[13] + 0xa9e3e905, 5);
52 MD5STEP(F2, d, a, b, c, in[2] + 0xfcefa3f8, 9);
53 MD5STEP(F2, c, d, a, b, in[7] + 0x676f02d9, 14);
54 MD5STEP(F2, b, c, d, a, in[12] + 0x8d2a4c8a, 20);
55
56 MD5STEP(F3, a, b, c, d, in[5] + 0xfffa3942, 4);
57 MD5STEP(F3, d, a, b, c, in[8] + 0x8771f681, 11);
58 MD5STEP(F3, c, d, a, b, in[11] + 0x6d9d6122, 16);
59 MD5STEP(F3, b, c, d, a, in[14] + 0xfde5380c, 23);
60 MD5STEP(F3, a, b, c, d, in[1] + 0xa4beea44, 4);
61 MD5STEP(F3, d, a, b, c, in[4] + 0x4bdecfa9, 11);
62 MD5STEP(F3, c, d, a, b, in[7] + 0xf6bb4b60, 16);
63 MD5STEP(F3, b, c, d, a, in[10] + 0xbebfbc70, 23);
64 MD5STEP(F3, a, b, c, d, in[13] + 0x289b7ec6, 4);
65 MD5STEP(F3, d, a, b, c, in[0] + 0xeaa127fa, 11);
66 MD5STEP(F3, c, d, a, b, in[3] + 0xd4ef3085, 16);
67 MD5STEP(F3, b, c, d, a, in[6] + 0x04881d05, 23);
68 MD5STEP(F3, a, b, c, d, in[9] + 0xd9d4d039, 4);
69 MD5STEP(F3, d, a, b, c, in[12] + 0xe6db99e5, 11);
70 MD5STEP(F3, c, d, a, b, in[15] + 0x1fa27cf8, 16);
71 MD5STEP(F3, b, c, d, a, in[2] + 0xc4ac5665, 23);
72
73 MD5STEP(F4, a, b, c, d, in[0] + 0xf4292244, 6);
74 MD5STEP(F4, d, a, b, c, in[7] + 0x432aff97, 10);
75 MD5STEP(F4, c, d, a, b, in[14] + 0xab9423a7, 15);
76 MD5STEP(F4, b, c, d, a, in[5] + 0xfc93a039, 21);
77 MD5STEP(F4, a, b, c, d, in[12] + 0x655b59c3, 6);
78 MD5STEP(F4, d, a, b, c, in[3] + 0x8f0ccc92, 10);
79 MD5STEP(F4, c, d, a, b, in[10] + 0xffeff47d, 15);
80 MD5STEP(F4, b, c, d, a, in[1] + 0x85845dd1, 21);
81 MD5STEP(F4, a, b, c, d, in[8] + 0x6fa87e4f, 6);
82 MD5STEP(F4, d, a, b, c, in[15] + 0xfe2ce6e0, 10);
83 MD5STEP(F4, c, d, a, b, in[6] + 0xa3014314, 15);
84 MD5STEP(F4, b, c, d, a, in[13] + 0x4e0811a1, 21);
85 MD5STEP(F4, a, b, c, d, in[4] + 0xf7537e82, 6);
86 MD5STEP(F4, d, a, b, c, in[11] + 0xbd3af235, 10);
87 MD5STEP(F4, c, d, a, b, in[2] + 0x2ad7d2bb, 15);
88 MD5STEP(F4, b, c, d, a, in[9] + 0xeb86d391, 21);
89
90 hash[0] += a;
91 hash[1] += b;
92 hash[2] += c;
93 hash[3] += d;
94}
95EXPORT_SYMBOL(md5_transform);
diff --git a/lib/memcopy.c b/lib/memcopy.c
new file mode 100644
index 00000000000..d2d3376f738
--- /dev/null
+++ b/lib/memcopy.c
@@ -0,0 +1,403 @@
1/*
2 * memcopy.c -- subroutines for memory copy functions.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms of the GNU General Public License as published by the Free
6 * Software Foundation; either version 2.1 of the License, or (at your option)
7 * any later version.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
12 * Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License along
15 * with this program; if not, write to the Free Software Foundation, Inc.,
16 * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
17 *
18 * The code is derived from the GNU C Library.
19 * Copyright (C) 1991, 1992, 1993, 1997, 2004 Free Software Foundation, Inc.
20 */
21
22/* BE VERY CAREFUL IF YOU CHANGE THIS CODE...! */
23
24#include <linux/memcopy.h>
25
26/*
27 * _wordcopy_fwd_aligned -- Copy block beginning at SRCP to block beginning
28 * at DSTP with LEN `op_t' words (not LEN bytes!).
29 * Both SRCP and DSTP should be aligned for memory operations on `op_t's.
30 */
31void _wordcopy_fwd_aligned (long int dstp, long int srcp, size_t len)
32{
33 op_t a0, a1;
34
35 switch (len % 8) {
36 case 2:
37 a0 = ((op_t *) srcp)[0];
38 srcp -= 6 * OPSIZ;
39 dstp -= 7 * OPSIZ;
40 len += 6;
41 goto do1;
42 case 3:
43 a1 = ((op_t *) srcp)[0];
44 srcp -= 5 * OPSIZ;
45 dstp -= 6 * OPSIZ;
46 len += 5;
47 goto do2;
48 case 4:
49 a0 = ((op_t *) srcp)[0];
50 srcp -= 4 * OPSIZ;
51 dstp -= 5 * OPSIZ;
52 len += 4;
53 goto do3;
54 case 5:
55 a1 = ((op_t *) srcp)[0];
56 srcp -= 3 * OPSIZ;
57 dstp -= 4 * OPSIZ;
58 len += 3;
59 goto do4;
60 case 6:
61 a0 = ((op_t *) srcp)[0];
62 srcp -= 2 * OPSIZ;
63 dstp -= 3 * OPSIZ;
64 len += 2;
65 goto do5;
66 case 7:
67 a1 = ((op_t *) srcp)[0];
68 srcp -= 1 * OPSIZ;
69 dstp -= 2 * OPSIZ;
70 len += 1;
71 goto do6;
72 case 0:
73 if (OP_T_THRESHOLD <= 3 * OPSIZ && len == 0)
74 return;
75 a0 = ((op_t *) srcp)[0];
76 srcp -= 0 * OPSIZ;
77 dstp -= 1 * OPSIZ;
78 goto do7;
79 case 1:
80 a1 = ((op_t *) srcp)[0];
81 srcp -=-1 * OPSIZ;
82 dstp -= 0 * OPSIZ;
83 len -= 1;
84 if (OP_T_THRESHOLD <= 3 * OPSIZ && len == 0)
85 goto do0;
86 goto do8; /* No-op. */
87 }
88
89 do {
90do8:
91 a0 = ((op_t *) srcp)[0];
92 ((op_t *) dstp)[0] = a1;
93do7:
94 a1 = ((op_t *) srcp)[1];
95 ((op_t *) dstp)[1] = a0;
96do6:
97 a0 = ((op_t *) srcp)[2];
98 ((op_t *) dstp)[2] = a1;
99do5:
100 a1 = ((op_t *) srcp)[3];
101 ((op_t *) dstp)[3] = a0;
102do4:
103 a0 = ((op_t *) srcp)[4];
104 ((op_t *) dstp)[4] = a1;
105do3:
106 a1 = ((op_t *) srcp)[5];
107 ((op_t *) dstp)[5] = a0;
108do2:
109 a0 = ((op_t *) srcp)[6];
110 ((op_t *) dstp)[6] = a1;
111do1:
112 a1 = ((op_t *) srcp)[7];
113 ((op_t *) dstp)[7] = a0;
114
115 srcp += 8 * OPSIZ;
116 dstp += 8 * OPSIZ;
117 len -= 8;
118 } while (len != 0);
119
120 /*
121 * This is the right position for do0. Please don't move it into
122 * the loop.
123 */
124do0:
125 ((op_t *) dstp)[0] = a1;
126}
127
128/*
129 * _wordcopy_fwd_dest_aligned -- Copy block beginning at SRCP to block
130 * beginning at DSTP with LEN `op_t' words (not LEN bytes!). DSTP should
131 * be aligned for memory operations on `op_t's, but SRCP must *not* be aligned.
132 */
133
134void _wordcopy_fwd_dest_aligned (long int dstp, long int srcp, size_t len)
135{
136 op_t a0, a1, a2, a3;
137 int sh_1, sh_2;
138
139 /*
140 * Calculate how to shift a word read at the memory operation aligned
141 * srcp to make it aligned for copy.
142 */
143 sh_1 = 8 * (srcp % OPSIZ);
144 sh_2 = 8 * OPSIZ - sh_1;
145
146 /*
147 * Make SRCP aligned by rounding it down to the beginning of the `op_t'
148 * it points in the middle of.
149 */
150 srcp &= -OPSIZ;
151
152 switch (len % 4) {
153 case 2:
154 a1 = ((op_t *) srcp)[0];
155 a2 = ((op_t *) srcp)[1];
156 srcp -= 1 * OPSIZ;
157 dstp -= 3 * OPSIZ;
158 len += 2;
159 goto do1;
160 case 3:
161 a0 = ((op_t *) srcp)[0];
162 a1 = ((op_t *) srcp)[1];
163 srcp -= 0 * OPSIZ;
164 dstp -= 2 * OPSIZ;
165 len += 1;
166 goto do2;
167 case 0:
168 if (OP_T_THRESHOLD <= 3 * OPSIZ && len == 0)
169 return;
170 a3 = ((op_t *) srcp)[0];
171 a0 = ((op_t *) srcp)[1];
172 srcp -=-1 * OPSIZ;
173 dstp -= 1 * OPSIZ;
174 len += 0;
175 goto do3;
176 case 1:
177 a2 = ((op_t *) srcp)[0];
178 a3 = ((op_t *) srcp)[1];
179 srcp -=-2 * OPSIZ;
180 dstp -= 0 * OPSIZ;
181 len -= 1;
182 if (OP_T_THRESHOLD <= 3 * OPSIZ && len == 0)
183 goto do0;
184 goto do4; /* No-op. */
185 }
186
187 do {
188do4:
189 a0 = ((op_t *) srcp)[0];
190 ((op_t *) dstp)[0] = MERGE (a2, sh_1, a3, sh_2);
191do3:
192 a1 = ((op_t *) srcp)[1];
193 ((op_t *) dstp)[1] = MERGE (a3, sh_1, a0, sh_2);
194do2:
195 a2 = ((op_t *) srcp)[2];
196 ((op_t *) dstp)[2] = MERGE (a0, sh_1, a1, sh_2);
197do1:
198 a3 = ((op_t *) srcp)[3];
199 ((op_t *) dstp)[3] = MERGE (a1, sh_1, a2, sh_2);
200
201 srcp += 4 * OPSIZ;
202 dstp += 4 * OPSIZ;
203 len -= 4;
204 } while (len != 0);
205
206 /*
207 * This is the right position for do0. Please don't move it into
208 * the loop.
209 */
210do0:
211 ((op_t *) dstp)[0] = MERGE (a2, sh_1, a3, sh_2);
212}
213
214/*
215 * _wordcopy_bwd_aligned -- Copy block finishing right before
216 * SRCP to block finishing right before DSTP with LEN `op_t' words (not LEN
217 * bytes!). Both SRCP and DSTP should be aligned for memory operations
218 * on `op_t's.
219 */
220void _wordcopy_bwd_aligned (long int dstp, long int srcp, size_t len)
221{
222 op_t a0, a1;
223
224 switch (len % 8) {
225 case 2:
226 srcp -= 2 * OPSIZ;
227 dstp -= 1 * OPSIZ;
228 a0 = ((op_t *) srcp)[1];
229 len += 6;
230 goto do1;
231 case 3:
232 srcp -= 3 * OPSIZ;
233 dstp -= 2 * OPSIZ;
234 a1 = ((op_t *) srcp)[2];
235 len += 5;
236 goto do2;
237 case 4:
238 srcp -= 4 * OPSIZ;
239 dstp -= 3 * OPSIZ;
240 a0 = ((op_t *) srcp)[3];
241 len += 4;
242 goto do3;
243 case 5:
244 srcp -= 5 * OPSIZ;
245 dstp -= 4 * OPSIZ;
246 a1 = ((op_t *) srcp)[4];
247 len += 3;
248 goto do4;
249 case 6:
250 srcp -= 6 * OPSIZ;
251 dstp -= 5 * OPSIZ;
252 a0 = ((op_t *) srcp)[5];
253 len += 2;
254 goto do5;
255 case 7:
256 srcp -= 7 * OPSIZ;
257 dstp -= 6 * OPSIZ;
258 a1 = ((op_t *) srcp)[6];
259 len += 1;
260 goto do6;
261 case 0:
262 if (OP_T_THRESHOLD <= 3 * OPSIZ && len == 0)
263 return;
264 srcp -= 8 * OPSIZ;
265 dstp -= 7 * OPSIZ;
266 a0 = ((op_t *) srcp)[7];
267 goto do7;
268 case 1:
269 srcp -= 9 * OPSIZ;
270 dstp -= 8 * OPSIZ;
271 a1 = ((op_t *) srcp)[8];
272 len -= 1;
273 if (OP_T_THRESHOLD <= 3 * OPSIZ && len == 0)
274 goto do0;
275 goto do8; /* No-op. */
276 }
277
278 do {
279do8:
280 a0 = ((op_t *) srcp)[7];
281 ((op_t *) dstp)[7] = a1;
282do7:
283 a1 = ((op_t *) srcp)[6];
284 ((op_t *) dstp)[6] = a0;
285do6:
286 a0 = ((op_t *) srcp)[5];
287 ((op_t *) dstp)[5] = a1;
288do5:
289 a1 = ((op_t *) srcp)[4];
290 ((op_t *) dstp)[4] = a0;
291do4:
292 a0 = ((op_t *) srcp)[3];
293 ((op_t *) dstp)[3] = a1;
294do3:
295 a1 = ((op_t *) srcp)[2];
296 ((op_t *) dstp)[2] = a0;
297do2:
298 a0 = ((op_t *) srcp)[1];
299 ((op_t *) dstp)[1] = a1;
300do1:
301 a1 = ((op_t *) srcp)[0];
302 ((op_t *) dstp)[0] = a0;
303
304 srcp -= 8 * OPSIZ;
305 dstp -= 8 * OPSIZ;
306 len -= 8;
307 } while (len != 0);
308
309 /*
310 * This is the right position for do0. Please don't move it into
311 * the loop.
312 */
313do0:
314 ((op_t *) dstp)[7] = a1;
315}
316
317/*
318 * _wordcopy_bwd_dest_aligned -- Copy block finishing right before SRCP to
319 * block finishing right before DSTP with LEN `op_t' words (not LEN bytes!).
320 * DSTP should be aligned for memory operations on `op_t', but SRCP must *not*
321 * be aligned.
322 */
323void _wordcopy_bwd_dest_aligned (long int dstp, long int srcp, size_t len)
324{
325 op_t a0, a1, a2, a3;
326 int sh_1, sh_2;
327
328 /*
329 * Calculate how to shift a word read at the memory operation aligned
330 * srcp to make it aligned for copy.
331 */
332
333 sh_1 = 8 * (srcp % OPSIZ);
334 sh_2 = 8 * OPSIZ - sh_1;
335
336 /*
337 * Make srcp aligned by rounding it down to the beginning of the op_t
338 * it points in the middle of.
339 */
340 srcp &= -OPSIZ;
341 srcp += OPSIZ;
342
343 switch (len % 4) {
344 case 2:
345 srcp -= 3 * OPSIZ;
346 dstp -= 1 * OPSIZ;
347 a2 = ((op_t *) srcp)[2];
348 a1 = ((op_t *) srcp)[1];
349 len += 2;
350 goto do1;
351 case 3:
352 srcp -= 4 * OPSIZ;
353 dstp -= 2 * OPSIZ;
354 a3 = ((op_t *) srcp)[3];
355 a2 = ((op_t *) srcp)[2];
356 len += 1;
357 goto do2;
358 case 0:
359 if (OP_T_THRESHOLD <= 3 * OPSIZ && len == 0)
360 return;
361 srcp -= 5 * OPSIZ;
362 dstp -= 3 * OPSIZ;
363 a0 = ((op_t *) srcp)[4];
364 a3 = ((op_t *) srcp)[3];
365 goto do3;
366 case 1:
367 srcp -= 6 * OPSIZ;
368 dstp -= 4 * OPSIZ;
369 a1 = ((op_t *) srcp)[5];
370 a0 = ((op_t *) srcp)[4];
371 len -= 1;
372 if (OP_T_THRESHOLD <= 3 * OPSIZ && len == 0)
373 goto do0;
374 goto do4; /* No-op. */
375 }
376
377 do {
378do4:
379 a3 = ((op_t *) srcp)[3];
380 ((op_t *) dstp)[3] = MERGE (a0, sh_1, a1, sh_2);
381do3:
382 a2 = ((op_t *) srcp)[2];
383 ((op_t *) dstp)[2] = MERGE (a3, sh_1, a0, sh_2);
384do2:
385 a1 = ((op_t *) srcp)[1];
386 ((op_t *) dstp)[1] = MERGE (a2, sh_1, a3, sh_2);
387do1:
388 a0 = ((op_t *) srcp)[0];
389 ((op_t *) dstp)[0] = MERGE (a1, sh_1, a2, sh_2);
390
391 srcp -= 4 * OPSIZ;
392 dstp -= 4 * OPSIZ;
393 len -= 4;
394 } while (len != 0);
395
396 /*
397 * This is the right position for do0. Please don't move it into
398 * the loop.
399 */
400do0:
401 ((op_t *) dstp)[3] = MERGE (a0, sh_1, a1, sh_2);
402}
403
diff --git a/lib/nlattr.c b/lib/nlattr.c
index ac09f2226dc..a8408b6cacd 100644
--- a/lib/nlattr.c
+++ b/lib/nlattr.c
@@ -20,6 +20,7 @@ static const u16 nla_attr_minlen[NLA_TYPE_MAX+1] = {
20 [NLA_U16] = sizeof(u16), 20 [NLA_U16] = sizeof(u16),
21 [NLA_U32] = sizeof(u32), 21 [NLA_U32] = sizeof(u32),
22 [NLA_U64] = sizeof(u64), 22 [NLA_U64] = sizeof(u64),
23 [NLA_MSECS] = sizeof(u64),
23 [NLA_NESTED] = NLA_HDRLEN, 24 [NLA_NESTED] = NLA_HDRLEN,
24}; 25};
25 26
diff --git a/lib/plist.c b/lib/plist.c
index 0ae7e643172..a0a4da489c2 100644
--- a/lib/plist.c
+++ b/lib/plist.c
@@ -56,11 +56,6 @@ static void plist_check_list(struct list_head *top)
56 56
57static void plist_check_head(struct plist_head *head) 57static void plist_check_head(struct plist_head *head)
58{ 58{
59 WARN_ON(head != &test_head && !head->rawlock && !head->spinlock);
60 if (head->rawlock)
61 WARN_ON_SMP(!raw_spin_is_locked(head->rawlock));
62 if (head->spinlock)
63 WARN_ON_SMP(!spin_is_locked(head->spinlock));
64 if (!plist_head_empty(head)) 59 if (!plist_head_empty(head))
65 plist_check_list(&plist_first(head)->prio_list); 60 plist_check_list(&plist_first(head)->prio_list);
66 plist_check_list(&head->node_list); 61 plist_check_list(&head->node_list);
@@ -180,7 +175,7 @@ static int __init plist_test(void)
180 unsigned int r = local_clock(); 175 unsigned int r = local_clock();
181 176
182 printk(KERN_INFO "start plist test\n"); 177 printk(KERN_INFO "start plist test\n");
183 plist_head_init(&test_head, NULL); 178 plist_head_init(&test_head);
184 for (i = 0; i < ARRAY_SIZE(test_node); i++) 179 for (i = 0; i < ARRAY_SIZE(test_node); i++)
185 plist_node_init(test_node + i, 0); 180 plist_node_init(test_node + i, 0);
186 181
diff --git a/lib/sha1.c b/lib/sha1.c
index 4c45fd50e91..f33271dd00c 100644
--- a/lib/sha1.c
+++ b/lib/sha1.c
@@ -1,31 +1,72 @@
1/* 1/*
2 * SHA transform algorithm, originally taken from code written by 2 * SHA1 routine optimized to do word accesses rather than byte accesses,
3 * Peter Gutmann, and placed in the public domain. 3 * and to avoid unnecessary copies into the context array.
4 *
5 * This was based on the git SHA1 implementation.
4 */ 6 */
5 7
6#include <linux/kernel.h> 8#include <linux/kernel.h>
7#include <linux/module.h> 9#include <linux/module.h>
8#include <linux/cryptohash.h> 10#include <linux/bitops.h>
11#include <asm/unaligned.h>
9 12
10/* The SHA f()-functions. */ 13/*
14 * If you have 32 registers or more, the compiler can (and should)
15 * try to change the array[] accesses into registers. However, on
16 * machines with less than ~25 registers, that won't really work,
17 * and at least gcc will make an unholy mess of it.
18 *
19 * So to avoid that mess which just slows things down, we force
20 * the stores to memory to actually happen (we might be better off
21 * with a 'W(t)=(val);asm("":"+m" (W(t))' there instead, as
22 * suggested by Artur Skawina - that will also make gcc unable to
23 * try to do the silly "optimize away loads" part because it won't
24 * see what the value will be).
25 *
26 * Ben Herrenschmidt reports that on PPC, the C version comes close
27 * to the optimized asm with this (ie on PPC you don't want that
28 * 'volatile', since there are lots of registers).
29 *
30 * On ARM we get the best code generation by forcing a full memory barrier
31 * between each SHA_ROUND, otherwise gcc happily get wild with spilling and
32 * the stack frame size simply explode and performance goes down the drain.
33 */
11 34
12#define f1(x,y,z) (z ^ (x & (y ^ z))) /* x ? y : z */ 35#ifdef CONFIG_X86
13#define f2(x,y,z) (x ^ y ^ z) /* XOR */ 36 #define setW(x, val) (*(volatile __u32 *)&W(x) = (val))
14#define f3(x,y,z) ((x & y) + (z & (x ^ y))) /* majority */ 37#elif defined(CONFIG_ARM)
38 #define setW(x, val) do { W(x) = (val); __asm__("":::"memory"); } while (0)
39#else
40 #define setW(x, val) (W(x) = (val))
41#endif
15 42
16/* The SHA Mysterious Constants */ 43/* This "rolls" over the 512-bit array */
44#define W(x) (array[(x)&15])
17 45
18#define K1 0x5A827999L /* Rounds 0-19: sqrt(2) * 2^30 */ 46/*
19#define K2 0x6ED9EBA1L /* Rounds 20-39: sqrt(3) * 2^30 */ 47 * Where do we get the source from? The first 16 iterations get it from
20#define K3 0x8F1BBCDCL /* Rounds 40-59: sqrt(5) * 2^30 */ 48 * the input data, the next mix it from the 512-bit array.
21#define K4 0xCA62C1D6L /* Rounds 60-79: sqrt(10) * 2^30 */ 49 */
50#define SHA_SRC(t) get_unaligned_be32((__u32 *)data + t)
51#define SHA_MIX(t) rol32(W(t+13) ^ W(t+8) ^ W(t+2) ^ W(t), 1)
52
53#define SHA_ROUND(t, input, fn, constant, A, B, C, D, E) do { \
54 __u32 TEMP = input(t); setW(t, TEMP); \
55 E += TEMP + rol32(A,5) + (fn) + (constant); \
56 B = ror32(B, 2); } while (0)
57
58#define T_0_15(t, A, B, C, D, E) SHA_ROUND(t, SHA_SRC, (((C^D)&B)^D) , 0x5a827999, A, B, C, D, E )
59#define T_16_19(t, A, B, C, D, E) SHA_ROUND(t, SHA_MIX, (((C^D)&B)^D) , 0x5a827999, A, B, C, D, E )
60#define T_20_39(t, A, B, C, D, E) SHA_ROUND(t, SHA_MIX, (B^C^D) , 0x6ed9eba1, A, B, C, D, E )
61#define T_40_59(t, A, B, C, D, E) SHA_ROUND(t, SHA_MIX, ((B&C)+(D&(B^C))) , 0x8f1bbcdc, A, B, C, D, E )
62#define T_60_79(t, A, B, C, D, E) SHA_ROUND(t, SHA_MIX, (B^C^D) , 0xca62c1d6, A, B, C, D, E )
22 63
23/** 64/**
24 * sha_transform - single block SHA1 transform 65 * sha_transform - single block SHA1 transform
25 * 66 *
26 * @digest: 160 bit digest to update 67 * @digest: 160 bit digest to update
27 * @data: 512 bits of data to hash 68 * @data: 512 bits of data to hash
28 * @W: 80 words of workspace (see note) 69 * @array: 16 words of workspace (see note)
29 * 70 *
30 * This function generates a SHA1 digest for a single 512-bit block. 71 * This function generates a SHA1 digest for a single 512-bit block.
31 * Be warned, it does not handle padding and message digest, do not 72 * Be warned, it does not handle padding and message digest, do not
@@ -36,47 +77,111 @@
36 * to clear the workspace. This is left to the caller to avoid 77 * to clear the workspace. This is left to the caller to avoid
37 * unnecessary clears between chained hashing operations. 78 * unnecessary clears between chained hashing operations.
38 */ 79 */
39void sha_transform(__u32 *digest, const char *in, __u32 *W) 80void sha_transform(__u32 *digest, const char *data, __u32 *array)
40{ 81{
41 __u32 a, b, c, d, e, t, i; 82 __u32 A, B, C, D, E;
42 83
43 for (i = 0; i < 16; i++) 84 A = digest[0];
44 W[i] = be32_to_cpu(((const __be32 *)in)[i]); 85 B = digest[1];
45 86 C = digest[2];
46 for (i = 0; i < 64; i++) 87 D = digest[3];
47 W[i+16] = rol32(W[i+13] ^ W[i+8] ^ W[i+2] ^ W[i], 1); 88 E = digest[4];
48 89
49 a = digest[0]; 90 /* Round 1 - iterations 0-16 take their input from 'data' */
50 b = digest[1]; 91 T_0_15( 0, A, B, C, D, E);
51 c = digest[2]; 92 T_0_15( 1, E, A, B, C, D);
52 d = digest[3]; 93 T_0_15( 2, D, E, A, B, C);
53 e = digest[4]; 94 T_0_15( 3, C, D, E, A, B);
54 95 T_0_15( 4, B, C, D, E, A);
55 for (i = 0; i < 20; i++) { 96 T_0_15( 5, A, B, C, D, E);
56 t = f1(b, c, d) + K1 + rol32(a, 5) + e + W[i]; 97 T_0_15( 6, E, A, B, C, D);
57 e = d; d = c; c = rol32(b, 30); b = a; a = t; 98 T_0_15( 7, D, E, A, B, C);
58 } 99 T_0_15( 8, C, D, E, A, B);
59 100 T_0_15( 9, B, C, D, E, A);
60 for (; i < 40; i ++) { 101 T_0_15(10, A, B, C, D, E);
61 t = f2(b, c, d) + K2 + rol32(a, 5) + e + W[i]; 102 T_0_15(11, E, A, B, C, D);
62 e = d; d = c; c = rol32(b, 30); b = a; a = t; 103 T_0_15(12, D, E, A, B, C);
63 } 104 T_0_15(13, C, D, E, A, B);
64 105 T_0_15(14, B, C, D, E, A);
65 for (; i < 60; i ++) { 106 T_0_15(15, A, B, C, D, E);
66 t = f3(b, c, d) + K3 + rol32(a, 5) + e + W[i]; 107
67 e = d; d = c; c = rol32(b, 30); b = a; a = t; 108 /* Round 1 - tail. Input from 512-bit mixing array */
68 } 109 T_16_19(16, E, A, B, C, D);
69 110 T_16_19(17, D, E, A, B, C);
70 for (; i < 80; i ++) { 111 T_16_19(18, C, D, E, A, B);
71 t = f2(b, c, d) + K4 + rol32(a, 5) + e + W[i]; 112 T_16_19(19, B, C, D, E, A);
72 e = d; d = c; c = rol32(b, 30); b = a; a = t; 113
73 } 114 /* Round 2 */
74 115 T_20_39(20, A, B, C, D, E);
75 digest[0] += a; 116 T_20_39(21, E, A, B, C, D);
76 digest[1] += b; 117 T_20_39(22, D, E, A, B, C);
77 digest[2] += c; 118 T_20_39(23, C, D, E, A, B);
78 digest[3] += d; 119 T_20_39(24, B, C, D, E, A);
79 digest[4] += e; 120 T_20_39(25, A, B, C, D, E);
121 T_20_39(26, E, A, B, C, D);
122 T_20_39(27, D, E, A, B, C);
123 T_20_39(28, C, D, E, A, B);
124 T_20_39(29, B, C, D, E, A);
125 T_20_39(30, A, B, C, D, E);
126 T_20_39(31, E, A, B, C, D);
127 T_20_39(32, D, E, A, B, C);
128 T_20_39(33, C, D, E, A, B);
129 T_20_39(34, B, C, D, E, A);
130 T_20_39(35, A, B, C, D, E);
131 T_20_39(36, E, A, B, C, D);
132 T_20_39(37, D, E, A, B, C);
133 T_20_39(38, C, D, E, A, B);
134 T_20_39(39, B, C, D, E, A);
135
136 /* Round 3 */
137 T_40_59(40, A, B, C, D, E);
138 T_40_59(41, E, A, B, C, D);
139 T_40_59(42, D, E, A, B, C);
140 T_40_59(43, C, D, E, A, B);
141 T_40_59(44, B, C, D, E, A);
142 T_40_59(45, A, B, C, D, E);
143 T_40_59(46, E, A, B, C, D);
144 T_40_59(47, D, E, A, B, C);
145 T_40_59(48, C, D, E, A, B);
146 T_40_59(49, B, C, D, E, A);
147 T_40_59(50, A, B, C, D, E);
148 T_40_59(51, E, A, B, C, D);
149 T_40_59(52, D, E, A, B, C);
150 T_40_59(53, C, D, E, A, B);
151 T_40_59(54, B, C, D, E, A);
152 T_40_59(55, A, B, C, D, E);
153 T_40_59(56, E, A, B, C, D);
154 T_40_59(57, D, E, A, B, C);
155 T_40_59(58, C, D, E, A, B);
156 T_40_59(59, B, C, D, E, A);
157
158 /* Round 4 */
159 T_60_79(60, A, B, C, D, E);
160 T_60_79(61, E, A, B, C, D);
161 T_60_79(62, D, E, A, B, C);
162 T_60_79(63, C, D, E, A, B);
163 T_60_79(64, B, C, D, E, A);
164 T_60_79(65, A, B, C, D, E);
165 T_60_79(66, E, A, B, C, D);
166 T_60_79(67, D, E, A, B, C);
167 T_60_79(68, C, D, E, A, B);
168 T_60_79(69, B, C, D, E, A);
169 T_60_79(70, A, B, C, D, E);
170 T_60_79(71, E, A, B, C, D);
171 T_60_79(72, D, E, A, B, C);
172 T_60_79(73, C, D, E, A, B);
173 T_60_79(74, B, C, D, E, A);
174 T_60_79(75, A, B, C, D, E);
175 T_60_79(76, E, A, B, C, D);
176 T_60_79(77, D, E, A, B, C);
177 T_60_79(78, C, D, E, A, B);
178 T_60_79(79, B, C, D, E, A);
179
180 digest[0] += A;
181 digest[1] += B;
182 digest[2] += C;
183 digest[3] += D;
184 digest[4] += E;
80} 185}
81EXPORT_SYMBOL(sha_transform); 186EXPORT_SYMBOL(sha_transform);
82 187
@@ -92,4 +197,3 @@ void sha_init(__u32 *buf)
92 buf[3] = 0x10325476; 197 buf[3] = 0x10325476;
93 buf[4] = 0xc3d2e1f0; 198 buf[4] = 0xc3d2e1f0;
94} 199}
95
diff --git a/lib/string.c b/lib/string.c
index 01fad9b203e..ed166883858 100644
--- a/lib/string.c
+++ b/lib/string.c
@@ -23,6 +23,7 @@
23#include <linux/string.h> 23#include <linux/string.h>
24#include <linux/ctype.h> 24#include <linux/ctype.h>
25#include <linux/module.h> 25#include <linux/module.h>
26#include <linux/memcopy.h>
26 27
27#ifndef __HAVE_ARCH_STRNICMP 28#ifndef __HAVE_ARCH_STRNICMP
28/** 29/**
@@ -596,11 +597,11 @@ EXPORT_SYMBOL(memset);
596 */ 597 */
597void *memcpy(void *dest, const void *src, size_t count) 598void *memcpy(void *dest, const void *src, size_t count)
598{ 599{
599 char *tmp = dest; 600 unsigned long dstp = (unsigned long)dest;
600 const char *s = src; 601 unsigned long srcp = (unsigned long)src;
601 602
602 while (count--) 603 /* Copy from the beginning to the end */
603 *tmp++ = *s++; 604 mem_copy_fwd(dstp, srcp, count);
604 return dest; 605 return dest;
605} 606}
606EXPORT_SYMBOL(memcpy); 607EXPORT_SYMBOL(memcpy);
@@ -617,21 +618,15 @@ EXPORT_SYMBOL(memcpy);
617 */ 618 */
618void *memmove(void *dest, const void *src, size_t count) 619void *memmove(void *dest, const void *src, size_t count)
619{ 620{
620 char *tmp; 621 unsigned long dstp = (unsigned long)dest;
621 const char *s; 622 unsigned long srcp = (unsigned long)src;
622 623
623 if (dest <= src) { 624 if (dest - src >= count) {
624 tmp = dest; 625 /* Copy from the beginning to the end */
625 s = src; 626 mem_copy_fwd(dstp, srcp, count);
626 while (count--)
627 *tmp++ = *s++;
628 } else { 627 } else {
629 tmp = dest; 628 /* Copy from the end to the beginning */
630 tmp += count; 629 mem_copy_bwd(dstp, srcp, count);
631 s = src;
632 s += count;
633 while (count--)
634 *--tmp = *--s;
635 } 630 }
636 return dest; 631 return dest;
637} 632}
diff --git a/lib/xz/xz_dec_bcj.c b/lib/xz/xz_dec_bcj.c
index e51e2558ca9..a768e6d28bb 100644
--- a/lib/xz/xz_dec_bcj.c
+++ b/lib/xz/xz_dec_bcj.c
@@ -441,8 +441,12 @@ XZ_EXTERN enum xz_ret xz_dec_bcj_run(struct xz_dec_bcj *s,
441 * next filter in the chain. Apply the BCJ filter on the new data 441 * next filter in the chain. Apply the BCJ filter on the new data
442 * in the output buffer. If everything cannot be filtered, copy it 442 * in the output buffer. If everything cannot be filtered, copy it
443 * to temp and rewind the output buffer position accordingly. 443 * to temp and rewind the output buffer position accordingly.
444 *
445 * This needs to be always run when temp.size == 0 to handle a special
446 * case where the output buffer is full and the next filter has no
447 * more output coming but hasn't returned XZ_STREAM_END yet.
444 */ 448 */
445 if (s->temp.size < b->out_size - b->out_pos) { 449 if (s->temp.size < b->out_size - b->out_pos || s->temp.size == 0) {
446 out_start = b->out_pos; 450 out_start = b->out_pos;
447 memcpy(b->out + b->out_pos, s->temp.buf, s->temp.size); 451 memcpy(b->out + b->out_pos, s->temp.buf, s->temp.size);
448 b->out_pos += s->temp.size; 452 b->out_pos += s->temp.size;
@@ -465,16 +469,25 @@ XZ_EXTERN enum xz_ret xz_dec_bcj_run(struct xz_dec_bcj *s,
465 s->temp.size = b->out_pos - out_start; 469 s->temp.size = b->out_pos - out_start;
466 b->out_pos -= s->temp.size; 470 b->out_pos -= s->temp.size;
467 memcpy(s->temp.buf, b->out + b->out_pos, s->temp.size); 471 memcpy(s->temp.buf, b->out + b->out_pos, s->temp.size);
472
473 /*
474 * If there wasn't enough input to the next filter to fill
475 * the output buffer with unfiltered data, there's no point
476 * to try decoding more data to temp.
477 */
478 if (b->out_pos + s->temp.size < b->out_size)
479 return XZ_OK;
468 } 480 }
469 481
470 /* 482 /*
471 * If we have unfiltered data in temp, try to fill by decoding more 483 * We have unfiltered data in temp. If the output buffer isn't full
472 * data from the next filter. Apply the BCJ filter on temp. Then we 484 * yet, try to fill the temp buffer by decoding more data from the
473 * hopefully can fill the actual output buffer by copying filtered 485 * next filter. Apply the BCJ filter on temp. Then we hopefully can
474 * data from temp. A mix of filtered and unfiltered data may be left 486 * fill the actual output buffer by copying filtered data from temp.
475 * in temp; it will be taken care on the next call to this function. 487 * A mix of filtered and unfiltered data may be left in temp; it will
488 * be taken care on the next call to this function.
476 */ 489 */
477 if (s->temp.size > 0) { 490 if (b->out_pos < b->out_size) {
478 /* Make b->out{,_pos,_size} temporarily point to s->temp. */ 491 /* Make b->out{,_pos,_size} temporarily point to s->temp. */
479 s->out = b->out; 492 s->out = b->out;
480 s->out_pos = b->out_pos; 493 s->out_pos = b->out_pos;
diff --git a/lib/xz/xz_dec_stream.c b/lib/xz/xz_dec_stream.c
index ac809b1e64f..9a60cc21964 100644
--- a/lib/xz/xz_dec_stream.c
+++ b/lib/xz/xz_dec_stream.c
@@ -9,6 +9,7 @@
9 9
10#include "xz_private.h" 10#include "xz_private.h"
11#include "xz_stream.h" 11#include "xz_stream.h"
12#include <linux/kernel.h>
12 13
13/* Hash used to validate the Index field */ 14/* Hash used to validate the Index field */
14struct xz_dec_hash { 15struct xz_dec_hash {
diff --git a/lib/xz/xz_private.h b/lib/xz/xz_private.h
index a65633e0696..482b90f363f 100644
--- a/lib/xz/xz_private.h
+++ b/lib/xz/xz_private.h
@@ -12,7 +12,7 @@
12 12
13#ifdef __KERNEL__ 13#ifdef __KERNEL__
14# include <linux/xz.h> 14# include <linux/xz.h>
15# include <asm/byteorder.h> 15# include <linux/kernel.h>
16# include <asm/unaligned.h> 16# include <asm/unaligned.h>
17 /* XZ_PREBOOT may be defined only via decompress_unxz.c. */ 17 /* XZ_PREBOOT may be defined only via decompress_unxz.c. */
18# ifndef XZ_PREBOOT 18# ifndef XZ_PREBOOT