diff options
Diffstat (limited to 'lib')
-rw-r--r-- | lib/Kconfig | 3 | ||||
-rw-r--r-- | lib/Kconfig.debug | 70 | ||||
-rw-r--r-- | lib/Makefile | 1 | ||||
-rw-r--r-- | lib/atomic64_test.c | 2 | ||||
-rw-r--r-- | lib/bug.c | 12 | ||||
-rw-r--r-- | lib/decompress_bunzip2.c | 10 | ||||
-rw-r--r-- | lib/devres.c | 2 | ||||
-rw-r--r-- | lib/flex_array.c | 25 | ||||
-rw-r--r-- | lib/inflate.c | 2 | ||||
-rw-r--r-- | lib/iommu-helper.c | 9 | ||||
-rw-r--r-- | lib/ioremap.c | 10 | ||||
-rw-r--r-- | lib/list_debug.c | 6 | ||||
-rw-r--r-- | lib/percpu_counter.c | 27 | ||||
-rw-r--r-- | lib/radix-tree.c | 94 | ||||
-rw-r--r-- | lib/raid6/Makefile | 75 | ||||
-rw-r--r-- | lib/raid6/algos.c | 154 | ||||
-rw-r--r-- | lib/raid6/altivec.uc | 130 | ||||
-rw-r--r-- | lib/raid6/int.uc | 117 | ||||
-rw-r--r-- | lib/raid6/mktables.c | 132 | ||||
-rw-r--r-- | lib/raid6/mmx.c | 142 | ||||
-rw-r--r-- | lib/raid6/recov.c | 132 | ||||
-rw-r--r-- | lib/raid6/sse1.c | 162 | ||||
-rw-r--r-- | lib/raid6/sse2.c | 262 | ||||
-rw-r--r-- | lib/raid6/test/Makefile | 72 | ||||
-rw-r--r-- | lib/raid6/test/test.c | 124 | ||||
-rw-r--r-- | lib/raid6/unroll.awk | 20 | ||||
-rw-r--r-- | lib/raid6/x86.h | 61 | ||||
-rw-r--r-- | lib/random32.c | 2 | ||||
-rw-r--r-- | lib/rwsem.c | 150 | ||||
-rw-r--r-- | lib/scatterlist.c | 23 | ||||
-rw-r--r-- | lib/swiotlb.c | 137 | ||||
-rw-r--r-- | lib/vsprintf.c | 23 |
32 files changed, 2013 insertions, 178 deletions
diff --git a/lib/Kconfig b/lib/Kconfig index 5b916bc0fbae..fa9bf2c06199 100644 --- a/lib/Kconfig +++ b/lib/Kconfig | |||
@@ -7,6 +7,9 @@ config BINARY_PRINTF | |||
7 | 7 | ||
8 | menu "Library routines" | 8 | menu "Library routines" |
9 | 9 | ||
10 | config RAID6_PQ | ||
11 | tristate | ||
12 | |||
10 | config BITREVERSE | 13 | config BITREVERSE |
11 | tristate | 14 | tristate |
12 | 15 | ||
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug index e722e9d62221..9e06b7f5ecf1 100644 --- a/lib/Kconfig.debug +++ b/lib/Kconfig.debug | |||
@@ -76,7 +76,6 @@ config UNUSED_SYMBOLS | |||
76 | 76 | ||
77 | config DEBUG_FS | 77 | config DEBUG_FS |
78 | bool "Debug Filesystem" | 78 | bool "Debug Filesystem" |
79 | depends on SYSFS | ||
80 | help | 79 | help |
81 | debugfs is a virtual file system that kernel developers use to put | 80 | debugfs is a virtual file system that kernel developers use to put |
82 | debugging files into. Enable this option to be able to read and | 81 | debugging files into. Enable this option to be able to read and |
@@ -152,28 +151,33 @@ config DEBUG_SHIRQ | |||
152 | Drivers ought to be able to handle interrupts coming in at those | 151 | Drivers ought to be able to handle interrupts coming in at those |
153 | points; some don't and need to be caught. | 152 | points; some don't and need to be caught. |
154 | 153 | ||
155 | config DETECT_SOFTLOCKUP | 154 | config LOCKUP_DETECTOR |
156 | bool "Detect Soft Lockups" | 155 | bool "Detect Hard and Soft Lockups" |
157 | depends on DEBUG_KERNEL && !S390 | 156 | depends on DEBUG_KERNEL && !S390 |
158 | default y | ||
159 | help | 157 | help |
160 | Say Y here to enable the kernel to detect "soft lockups", | 158 | Say Y here to enable the kernel to act as a watchdog to detect |
161 | which are bugs that cause the kernel to loop in kernel | 159 | hard and soft lockups. |
160 | |||
161 | Softlockups are bugs that cause the kernel to loop in kernel | ||
162 | mode for more than 60 seconds, without giving other tasks a | 162 | mode for more than 60 seconds, without giving other tasks a |
163 | chance to run. | 163 | chance to run. The current stack trace is displayed upon |
164 | detection and the system will stay locked up. | ||
164 | 165 | ||
165 | When a soft-lockup is detected, the kernel will print the | 166 | Hardlockups are bugs that cause the CPU to loop in kernel mode |
166 | current stack trace (which you should report), but the | 167 | for more than 60 seconds, without letting other interrupts have a |
167 | system will stay locked up. This feature has negligible | 168 | chance to run. The current stack trace is displayed upon detection |
168 | overhead. | 169 | and the system will stay locked up. |
170 | |||
171 | The overhead should be minimal. A periodic hrtimer runs to | ||
172 | generate interrupts and kick the watchdog task every 10-12 seconds. | ||
173 | An NMI is generated every 60 seconds or so to check for hardlockups. | ||
169 | 174 | ||
170 | (Note that "hard lockups" are separate type of bugs that | 175 | config HARDLOCKUP_DETECTOR |
171 | can be detected via the NMI-watchdog, on platforms that | 176 | def_bool LOCKUP_DETECTOR && PERF_EVENTS && HAVE_PERF_EVENTS_NMI |
172 | support it.) | ||
173 | 177 | ||
174 | config BOOTPARAM_SOFTLOCKUP_PANIC | 178 | config BOOTPARAM_SOFTLOCKUP_PANIC |
175 | bool "Panic (Reboot) On Soft Lockups" | 179 | bool "Panic (Reboot) On Soft Lockups" |
176 | depends on DETECT_SOFTLOCKUP | 180 | depends on LOCKUP_DETECTOR |
177 | help | 181 | help |
178 | Say Y here to enable the kernel to panic on "soft lockups", | 182 | Say Y here to enable the kernel to panic on "soft lockups", |
179 | which are bugs that cause the kernel to loop in kernel | 183 | which are bugs that cause the kernel to loop in kernel |
@@ -190,7 +194,7 @@ config BOOTPARAM_SOFTLOCKUP_PANIC | |||
190 | 194 | ||
191 | config BOOTPARAM_SOFTLOCKUP_PANIC_VALUE | 195 | config BOOTPARAM_SOFTLOCKUP_PANIC_VALUE |
192 | int | 196 | int |
193 | depends on DETECT_SOFTLOCKUP | 197 | depends on LOCKUP_DETECTOR |
194 | range 0 1 | 198 | range 0 1 |
195 | default 0 if !BOOTPARAM_SOFTLOCKUP_PANIC | 199 | default 0 if !BOOTPARAM_SOFTLOCKUP_PANIC |
196 | default 1 if BOOTPARAM_SOFTLOCKUP_PANIC | 200 | default 1 if BOOTPARAM_SOFTLOCKUP_PANIC |
@@ -307,6 +311,12 @@ config DEBUG_OBJECTS_WORK | |||
307 | work queue routines to track the life time of work objects and | 311 | work queue routines to track the life time of work objects and |
308 | validate the work operations. | 312 | validate the work operations. |
309 | 313 | ||
314 | config DEBUG_OBJECTS_RCU_HEAD | ||
315 | bool "Debug RCU callbacks objects" | ||
316 | depends on DEBUG_OBJECTS && PREEMPT | ||
317 | help | ||
318 | Enable this to turn on debugging of RCU list heads (call_rcu() usage). | ||
319 | |||
310 | config DEBUG_OBJECTS_ENABLE_DEFAULT | 320 | config DEBUG_OBJECTS_ENABLE_DEFAULT |
311 | int "debug_objects bootup default value (0-1)" | 321 | int "debug_objects bootup default value (0-1)" |
312 | range 0 1 | 322 | range 0 1 |
@@ -400,6 +410,13 @@ config DEBUG_KMEMLEAK_TEST | |||
400 | 410 | ||
401 | If unsure, say N. | 411 | If unsure, say N. |
402 | 412 | ||
413 | config DEBUG_KMEMLEAK_DEFAULT_OFF | ||
414 | bool "Default kmemleak to off" | ||
415 | depends on DEBUG_KMEMLEAK | ||
416 | help | ||
417 | Say Y here to disable kmemleak by default. It can then be enabled | ||
418 | on the command line via kmemleak=on. | ||
419 | |||
403 | config DEBUG_PREEMPT | 420 | config DEBUG_PREEMPT |
404 | bool "Debug preemptible kernel" | 421 | bool "Debug preemptible kernel" |
405 | depends on DEBUG_KERNEL && PREEMPT && TRACE_IRQFLAGS_SUPPORT | 422 | depends on DEBUG_KERNEL && PREEMPT && TRACE_IRQFLAGS_SUPPORT |
@@ -528,7 +545,7 @@ config LOCKDEP | |||
528 | bool | 545 | bool |
529 | depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT | 546 | depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT |
530 | select STACKTRACE | 547 | select STACKTRACE |
531 | select FRAME_POINTER if !MIPS && !PPC && !ARM_UNWIND && !S390 | 548 | select FRAME_POINTER if !MIPS && !PPC && !ARM_UNWIND && !S390 && !MICROBLAZE |
532 | select KALLSYMS | 549 | select KALLSYMS |
533 | select KALLSYMS_ALL | 550 | select KALLSYMS_ALL |
534 | 551 | ||
@@ -628,6 +645,19 @@ config DEBUG_INFO | |||
628 | 645 | ||
629 | If unsure, say N. | 646 | If unsure, say N. |
630 | 647 | ||
648 | config DEBUG_INFO_REDUCED | ||
649 | bool "Reduce debugging information" | ||
650 | depends on DEBUG_INFO | ||
651 | help | ||
652 | If you say Y here gcc is instructed to generate less debugging | ||
653 | information for structure types. This means that tools that | ||
654 | need full debugging information (like kgdb or systemtap) won't | ||
655 | be happy. But if you merely need debugging information to | ||
656 | resolve line numbers there is no loss. Advantage is that | ||
657 | build directory object sizes shrink dramatically over a full | ||
658 | DEBUG_INFO build and compile times are reduced too. | ||
659 | Only works with newer gcc versions. | ||
660 | |||
631 | config DEBUG_VM | 661 | config DEBUG_VM |
632 | bool "Debug VM" | 662 | bool "Debug VM" |
633 | depends on DEBUG_KERNEL | 663 | depends on DEBUG_KERNEL |
@@ -937,7 +967,7 @@ config FAIL_MAKE_REQUEST | |||
937 | Provide fault-injection capability for disk IO. | 967 | Provide fault-injection capability for disk IO. |
938 | 968 | ||
939 | config FAIL_IO_TIMEOUT | 969 | config FAIL_IO_TIMEOUT |
940 | bool "Faul-injection capability for faking disk interrupts" | 970 | bool "Fault-injection capability for faking disk interrupts" |
941 | depends on FAULT_INJECTION && BLOCK | 971 | depends on FAULT_INJECTION && BLOCK |
942 | help | 972 | help |
943 | Provide fault-injection capability on end IO handling. This | 973 | Provide fault-injection capability on end IO handling. This |
@@ -958,13 +988,13 @@ config FAULT_INJECTION_STACKTRACE_FILTER | |||
958 | depends on FAULT_INJECTION_DEBUG_FS && STACKTRACE_SUPPORT | 988 | depends on FAULT_INJECTION_DEBUG_FS && STACKTRACE_SUPPORT |
959 | depends on !X86_64 | 989 | depends on !X86_64 |
960 | select STACKTRACE | 990 | select STACKTRACE |
961 | select FRAME_POINTER if !PPC && !S390 | 991 | select FRAME_POINTER if !PPC && !S390 && !MICROBLAZE |
962 | help | 992 | help |
963 | Provide stacktrace filter for fault-injection capabilities | 993 | Provide stacktrace filter for fault-injection capabilities |
964 | 994 | ||
965 | config LATENCYTOP | 995 | config LATENCYTOP |
966 | bool "Latency measuring infrastructure" | 996 | bool "Latency measuring infrastructure" |
967 | select FRAME_POINTER if !MIPS && !PPC && !S390 | 997 | select FRAME_POINTER if !MIPS && !PPC && !S390 && !MICROBLAZE |
968 | select KALLSYMS | 998 | select KALLSYMS |
969 | select KALLSYMS_ALL | 999 | select KALLSYMS_ALL |
970 | select STACKTRACE | 1000 | select STACKTRACE |
diff --git a/lib/Makefile b/lib/Makefile index 0bfabba1bb32..e6a3763b8212 100644 --- a/lib/Makefile +++ b/lib/Makefile | |||
@@ -69,6 +69,7 @@ obj-$(CONFIG_ZLIB_DEFLATE) += zlib_deflate/ | |||
69 | obj-$(CONFIG_REED_SOLOMON) += reed_solomon/ | 69 | obj-$(CONFIG_REED_SOLOMON) += reed_solomon/ |
70 | obj-$(CONFIG_LZO_COMPRESS) += lzo/ | 70 | obj-$(CONFIG_LZO_COMPRESS) += lzo/ |
71 | obj-$(CONFIG_LZO_DECOMPRESS) += lzo/ | 71 | obj-$(CONFIG_LZO_DECOMPRESS) += lzo/ |
72 | obj-$(CONFIG_RAID6_PQ) += raid6/ | ||
72 | 73 | ||
73 | lib-$(CONFIG_DECOMPRESS_GZIP) += decompress_inflate.o | 74 | lib-$(CONFIG_DECOMPRESS_GZIP) += decompress_inflate.o |
74 | lib-$(CONFIG_DECOMPRESS_BZIP2) += decompress_bunzip2.o | 75 | lib-$(CONFIG_DECOMPRESS_BZIP2) += decompress_bunzip2.o |
diff --git a/lib/atomic64_test.c b/lib/atomic64_test.c index 250ed11d3ed2..44524cc8c32a 100644 --- a/lib/atomic64_test.c +++ b/lib/atomic64_test.c | |||
@@ -114,7 +114,7 @@ static __init int test_atomic64(void) | |||
114 | BUG_ON(v.counter != r); | 114 | BUG_ON(v.counter != r); |
115 | 115 | ||
116 | #if defined(CONFIG_X86) || defined(CONFIG_MIPS) || defined(CONFIG_PPC) || \ | 116 | #if defined(CONFIG_X86) || defined(CONFIG_MIPS) || defined(CONFIG_PPC) || \ |
117 | defined(CONFIG_S390) || defined(_ASM_GENERIC_ATOMIC64_H) | 117 | defined(CONFIG_S390) || defined(_ASM_GENERIC_ATOMIC64_H) || defined(CONFIG_ARM) |
118 | INIT(onestwos); | 118 | INIT(onestwos); |
119 | BUG_ON(atomic64_dec_if_positive(&v) != (onestwos - 1)); | 119 | BUG_ON(atomic64_dec_if_positive(&v) != (onestwos - 1)); |
120 | r -= one; | 120 | r -= one; |
@@ -136,8 +136,6 @@ enum bug_trap_type report_bug(unsigned long bugaddr, struct pt_regs *regs) | |||
136 | 136 | ||
137 | bug = find_bug(bugaddr); | 137 | bug = find_bug(bugaddr); |
138 | 138 | ||
139 | printk(KERN_EMERG "------------[ cut here ]------------\n"); | ||
140 | |||
141 | file = NULL; | 139 | file = NULL; |
142 | line = 0; | 140 | line = 0; |
143 | warning = 0; | 141 | warning = 0; |
@@ -156,19 +154,25 @@ enum bug_trap_type report_bug(unsigned long bugaddr, struct pt_regs *regs) | |||
156 | 154 | ||
157 | if (warning) { | 155 | if (warning) { |
158 | /* this is a WARN_ON rather than BUG/BUG_ON */ | 156 | /* this is a WARN_ON rather than BUG/BUG_ON */ |
157 | printk(KERN_WARNING "------------[ cut here ]------------\n"); | ||
158 | |||
159 | if (file) | 159 | if (file) |
160 | printk(KERN_ERR "Badness at %s:%u\n", | 160 | printk(KERN_WARNING "WARNING: at %s:%u\n", |
161 | file, line); | 161 | file, line); |
162 | else | 162 | else |
163 | printk(KERN_ERR "Badness at %p " | 163 | printk(KERN_WARNING "WARNING: at %p " |
164 | "[verbose debug info unavailable]\n", | 164 | "[verbose debug info unavailable]\n", |
165 | (void *)bugaddr); | 165 | (void *)bugaddr); |
166 | 166 | ||
167 | print_modules(); | ||
167 | show_regs(regs); | 168 | show_regs(regs); |
169 | print_oops_end_marker(); | ||
168 | add_taint(BUG_GET_TAINT(bug)); | 170 | add_taint(BUG_GET_TAINT(bug)); |
169 | return BUG_TRAP_TYPE_WARN; | 171 | return BUG_TRAP_TYPE_WARN; |
170 | } | 172 | } |
171 | 173 | ||
174 | printk(KERN_EMERG "------------[ cut here ]------------\n"); | ||
175 | |||
172 | if (file) | 176 | if (file) |
173 | printk(KERN_CRIT "kernel BUG at %s:%u!\n", | 177 | printk(KERN_CRIT "kernel BUG at %s:%u!\n", |
174 | file, line); | 178 | file, line); |
diff --git a/lib/decompress_bunzip2.c b/lib/decompress_bunzip2.c index a4e971dee102..81c8bb1cc6aa 100644 --- a/lib/decompress_bunzip2.c +++ b/lib/decompress_bunzip2.c | |||
@@ -107,6 +107,8 @@ struct bunzip_data { | |||
107 | unsigned char selectors[32768]; /* nSelectors = 15 bits */ | 107 | unsigned char selectors[32768]; /* nSelectors = 15 bits */ |
108 | struct group_data groups[MAX_GROUPS]; /* Huffman coding tables */ | 108 | struct group_data groups[MAX_GROUPS]; /* Huffman coding tables */ |
109 | int io_error; /* non-zero if we have IO error */ | 109 | int io_error; /* non-zero if we have IO error */ |
110 | int byteCount[256]; | ||
111 | unsigned char symToByte[256], mtfSymbol[256]; | ||
110 | }; | 112 | }; |
111 | 113 | ||
112 | 114 | ||
@@ -158,14 +160,16 @@ static int INIT get_next_block(struct bunzip_data *bd) | |||
158 | int *base = NULL; | 160 | int *base = NULL; |
159 | int *limit = NULL; | 161 | int *limit = NULL; |
160 | int dbufCount, nextSym, dbufSize, groupCount, selector, | 162 | int dbufCount, nextSym, dbufSize, groupCount, selector, |
161 | i, j, k, t, runPos, symCount, symTotal, nSelectors, | 163 | i, j, k, t, runPos, symCount, symTotal, nSelectors, *byteCount; |
162 | byteCount[256]; | 164 | unsigned char uc, *symToByte, *mtfSymbol, *selectors; |
163 | unsigned char uc, symToByte[256], mtfSymbol[256], *selectors; | ||
164 | unsigned int *dbuf, origPtr; | 165 | unsigned int *dbuf, origPtr; |
165 | 166 | ||
166 | dbuf = bd->dbuf; | 167 | dbuf = bd->dbuf; |
167 | dbufSize = bd->dbufSize; | 168 | dbufSize = bd->dbufSize; |
168 | selectors = bd->selectors; | 169 | selectors = bd->selectors; |
170 | byteCount = bd->byteCount; | ||
171 | symToByte = bd->symToByte; | ||
172 | mtfSymbol = bd->mtfSymbol; | ||
169 | 173 | ||
170 | /* Read in header signature and CRC, then validate signature. | 174 | /* Read in header signature and CRC, then validate signature. |
171 | (last block signature means CRC is for whole file, return now) */ | 175 | (last block signature means CRC is for whole file, return now) */ |
diff --git a/lib/devres.c b/lib/devres.c index 49368608f988..6efddf53b90c 100644 --- a/lib/devres.c +++ b/lib/devres.c | |||
@@ -328,7 +328,7 @@ EXPORT_SYMBOL(pcim_iomap_regions_request_all); | |||
328 | * @pdev: PCI device to map IO resources for | 328 | * @pdev: PCI device to map IO resources for |
329 | * @mask: Mask of BARs to unmap and release | 329 | * @mask: Mask of BARs to unmap and release |
330 | * | 330 | * |
331 | * Unamp and release regions specified by @mask. | 331 | * Unmap and release regions specified by @mask. |
332 | */ | 332 | */ |
333 | void pcim_iounmap_regions(struct pci_dev *pdev, u16 mask) | 333 | void pcim_iounmap_regions(struct pci_dev *pdev, u16 mask) |
334 | { | 334 | { |
diff --git a/lib/flex_array.c b/lib/flex_array.c index 41b1804fa728..77a6fea7481e 100644 --- a/lib/flex_array.c +++ b/lib/flex_array.c | |||
@@ -171,6 +171,8 @@ __fa_get_part(struct flex_array *fa, int part_nr, gfp_t flags) | |||
171 | * Note that this *copies* the contents of @src into | 171 | * Note that this *copies* the contents of @src into |
172 | * the array. If you are trying to store an array of | 172 | * the array. If you are trying to store an array of |
173 | * pointers, make sure to pass in &ptr instead of ptr. | 173 | * pointers, make sure to pass in &ptr instead of ptr. |
174 | * You may instead wish to use the flex_array_put_ptr() | ||
175 | * helper function. | ||
174 | * | 176 | * |
175 | * Locking must be provided by the caller. | 177 | * Locking must be provided by the caller. |
176 | */ | 178 | */ |
@@ -265,7 +267,8 @@ int flex_array_prealloc(struct flex_array *fa, unsigned int start, | |||
265 | * | 267 | * |
266 | * Returns a pointer to the data at index @element_nr. Note | 268 | * Returns a pointer to the data at index @element_nr. Note |
267 | * that this is a copy of the data that was passed in. If you | 269 | * that this is a copy of the data that was passed in. If you |
268 | * are using this to store pointers, you'll get back &ptr. | 270 | * are using this to store pointers, you'll get back &ptr. You |
271 | * may instead wish to use the flex_array_get_ptr helper. | ||
269 | * | 272 | * |
270 | * Locking must be provided by the caller. | 273 | * Locking must be provided by the caller. |
271 | */ | 274 | */ |
@@ -286,6 +289,26 @@ void *flex_array_get(struct flex_array *fa, unsigned int element_nr) | |||
286 | return &part->elements[index_inside_part(fa, element_nr)]; | 289 | return &part->elements[index_inside_part(fa, element_nr)]; |
287 | } | 290 | } |
288 | 291 | ||
292 | /** | ||
293 | * flex_array_get_ptr - pull a ptr back out of the array | ||
294 | * @fa: the flex array from which to extract data | ||
295 | * @element_nr: index of the element to fetch from the array | ||
296 | * | ||
297 | * Returns the pointer placed in the flex array at element_nr using | ||
298 | * flex_array_put_ptr(). This function should not be called if the | ||
299 | * element in question was not set using the _put_ptr() helper. | ||
300 | */ | ||
301 | void *flex_array_get_ptr(struct flex_array *fa, unsigned int element_nr) | ||
302 | { | ||
303 | void **tmp; | ||
304 | |||
305 | tmp = flex_array_get(fa, element_nr); | ||
306 | if (!tmp) | ||
307 | return NULL; | ||
308 | |||
309 | return *tmp; | ||
310 | } | ||
311 | |||
289 | static int part_is_free(struct flex_array_part *part) | 312 | static int part_is_free(struct flex_array_part *part) |
290 | { | 313 | { |
291 | int i; | 314 | int i; |
diff --git a/lib/inflate.c b/lib/inflate.c index 677b738c2204..013a76193481 100644 --- a/lib/inflate.c +++ b/lib/inflate.c | |||
@@ -103,7 +103,9 @@ | |||
103 | the two sets of lengths. | 103 | the two sets of lengths. |
104 | */ | 104 | */ |
105 | #include <linux/compiler.h> | 105 | #include <linux/compiler.h> |
106 | #ifdef NO_INFLATE_MALLOC | ||
106 | #include <linux/slab.h> | 107 | #include <linux/slab.h> |
108 | #endif | ||
107 | 109 | ||
108 | #ifdef RCSID | 110 | #ifdef RCSID |
109 | static char rcsid[] = "#Id: inflate.c,v 0.14 1993/06/10 13:27:04 jloup Exp #"; | 111 | static char rcsid[] = "#Id: inflate.c,v 0.14 1993/06/10 13:27:04 jloup Exp #"; |
diff --git a/lib/iommu-helper.c b/lib/iommu-helper.c index c0251f4ad08b..da053313ee5c 100644 --- a/lib/iommu-helper.c +++ b/lib/iommu-helper.c | |||
@@ -38,12 +38,3 @@ again: | |||
38 | return -1; | 38 | return -1; |
39 | } | 39 | } |
40 | EXPORT_SYMBOL(iommu_area_alloc); | 40 | EXPORT_SYMBOL(iommu_area_alloc); |
41 | |||
42 | unsigned long iommu_num_pages(unsigned long addr, unsigned long len, | ||
43 | unsigned long io_page_size) | ||
44 | { | ||
45 | unsigned long size = (addr & (io_page_size - 1)) + len; | ||
46 | |||
47 | return DIV_ROUND_UP(size, io_page_size); | ||
48 | } | ||
49 | EXPORT_SYMBOL(iommu_num_pages); | ||
diff --git a/lib/ioremap.c b/lib/ioremap.c index 14c6078f17a2..5730ecd3eb66 100644 --- a/lib/ioremap.c +++ b/lib/ioremap.c | |||
@@ -13,10 +13,10 @@ | |||
13 | #include <asm/pgtable.h> | 13 | #include <asm/pgtable.h> |
14 | 14 | ||
15 | static int ioremap_pte_range(pmd_t *pmd, unsigned long addr, | 15 | static int ioremap_pte_range(pmd_t *pmd, unsigned long addr, |
16 | unsigned long end, unsigned long phys_addr, pgprot_t prot) | 16 | unsigned long end, phys_addr_t phys_addr, pgprot_t prot) |
17 | { | 17 | { |
18 | pte_t *pte; | 18 | pte_t *pte; |
19 | unsigned long pfn; | 19 | u64 pfn; |
20 | 20 | ||
21 | pfn = phys_addr >> PAGE_SHIFT; | 21 | pfn = phys_addr >> PAGE_SHIFT; |
22 | pte = pte_alloc_kernel(pmd, addr); | 22 | pte = pte_alloc_kernel(pmd, addr); |
@@ -31,7 +31,7 @@ static int ioremap_pte_range(pmd_t *pmd, unsigned long addr, | |||
31 | } | 31 | } |
32 | 32 | ||
33 | static inline int ioremap_pmd_range(pud_t *pud, unsigned long addr, | 33 | static inline int ioremap_pmd_range(pud_t *pud, unsigned long addr, |
34 | unsigned long end, unsigned long phys_addr, pgprot_t prot) | 34 | unsigned long end, phys_addr_t phys_addr, pgprot_t prot) |
35 | { | 35 | { |
36 | pmd_t *pmd; | 36 | pmd_t *pmd; |
37 | unsigned long next; | 37 | unsigned long next; |
@@ -49,7 +49,7 @@ static inline int ioremap_pmd_range(pud_t *pud, unsigned long addr, | |||
49 | } | 49 | } |
50 | 50 | ||
51 | static inline int ioremap_pud_range(pgd_t *pgd, unsigned long addr, | 51 | static inline int ioremap_pud_range(pgd_t *pgd, unsigned long addr, |
52 | unsigned long end, unsigned long phys_addr, pgprot_t prot) | 52 | unsigned long end, phys_addr_t phys_addr, pgprot_t prot) |
53 | { | 53 | { |
54 | pud_t *pud; | 54 | pud_t *pud; |
55 | unsigned long next; | 55 | unsigned long next; |
@@ -67,7 +67,7 @@ static inline int ioremap_pud_range(pgd_t *pgd, unsigned long addr, | |||
67 | } | 67 | } |
68 | 68 | ||
69 | int ioremap_page_range(unsigned long addr, | 69 | int ioremap_page_range(unsigned long addr, |
70 | unsigned long end, unsigned long phys_addr, pgprot_t prot) | 70 | unsigned long end, phys_addr_t phys_addr, pgprot_t prot) |
71 | { | 71 | { |
72 | pgd_t *pgd; | 72 | pgd_t *pgd; |
73 | unsigned long start; | 73 | unsigned long start; |
diff --git a/lib/list_debug.c b/lib/list_debug.c index 1a39f4e3ae1f..344c710d16ca 100644 --- a/lib/list_debug.c +++ b/lib/list_debug.c | |||
@@ -43,6 +43,12 @@ EXPORT_SYMBOL(__list_add); | |||
43 | */ | 43 | */ |
44 | void list_del(struct list_head *entry) | 44 | void list_del(struct list_head *entry) |
45 | { | 45 | { |
46 | WARN(entry->next == LIST_POISON1, | ||
47 | "list_del corruption, next is LIST_POISON1 (%p)\n", | ||
48 | LIST_POISON1); | ||
49 | WARN(entry->next != LIST_POISON1 && entry->prev == LIST_POISON2, | ||
50 | "list_del corruption, prev is LIST_POISON2 (%p)\n", | ||
51 | LIST_POISON2); | ||
46 | WARN(entry->prev->next != entry, | 52 | WARN(entry->prev->next != entry, |
47 | "list_del corruption. prev->next should be %p, " | 53 | "list_del corruption. prev->next should be %p, " |
48 | "but was %p\n", entry, entry->prev->next); | 54 | "but was %p\n", entry, entry->prev->next); |
diff --git a/lib/percpu_counter.c b/lib/percpu_counter.c index aeaa6d734447..ec9048e74f44 100644 --- a/lib/percpu_counter.c +++ b/lib/percpu_counter.c | |||
@@ -137,6 +137,33 @@ static int __cpuinit percpu_counter_hotcpu_callback(struct notifier_block *nb, | |||
137 | return NOTIFY_OK; | 137 | return NOTIFY_OK; |
138 | } | 138 | } |
139 | 139 | ||
140 | /* | ||
141 | * Compare counter against given value. | ||
142 | * Return 1 if greater, 0 if equal and -1 if less | ||
143 | */ | ||
144 | int percpu_counter_compare(struct percpu_counter *fbc, s64 rhs) | ||
145 | { | ||
146 | s64 count; | ||
147 | |||
148 | count = percpu_counter_read(fbc); | ||
149 | /* Check to see if rough count will be sufficient for comparison */ | ||
150 | if (abs(count - rhs) > (percpu_counter_batch*num_online_cpus())) { | ||
151 | if (count > rhs) | ||
152 | return 1; | ||
153 | else | ||
154 | return -1; | ||
155 | } | ||
156 | /* Need to use precise count */ | ||
157 | count = percpu_counter_sum(fbc); | ||
158 | if (count > rhs) | ||
159 | return 1; | ||
160 | else if (count < rhs) | ||
161 | return -1; | ||
162 | else | ||
163 | return 0; | ||
164 | } | ||
165 | EXPORT_SYMBOL(percpu_counter_compare); | ||
166 | |||
140 | static int __init percpu_counter_startup(void) | 167 | static int __init percpu_counter_startup(void) |
141 | { | 168 | { |
142 | compute_batch_value(); | 169 | compute_batch_value(); |
diff --git a/lib/radix-tree.c b/lib/radix-tree.c index 05da38bcc298..e907858498a6 100644 --- a/lib/radix-tree.c +++ b/lib/radix-tree.c | |||
@@ -609,6 +609,100 @@ int radix_tree_tag_get(struct radix_tree_root *root, | |||
609 | EXPORT_SYMBOL(radix_tree_tag_get); | 609 | EXPORT_SYMBOL(radix_tree_tag_get); |
610 | 610 | ||
611 | /** | 611 | /** |
612 | * radix_tree_range_tag_if_tagged - for each item in given range set given | ||
613 | * tag if item has another tag set | ||
614 | * @root: radix tree root | ||
615 | * @first_indexp: pointer to a starting index of a range to scan | ||
616 | * @last_index: last index of a range to scan | ||
617 | * @nr_to_tag: maximum number items to tag | ||
618 | * @iftag: tag index to test | ||
619 | * @settag: tag index to set if tested tag is set | ||
620 | * | ||
621 | * This function scans range of radix tree from first_index to last_index | ||
622 | * (inclusive). For each item in the range if iftag is set, the function sets | ||
623 | * also settag. The function stops either after tagging nr_to_tag items or | ||
624 | * after reaching last_index. | ||
625 | * | ||
626 | * The function returns number of leaves where the tag was set and sets | ||
627 | * *first_indexp to the first unscanned index. | ||
628 | */ | ||
629 | unsigned long radix_tree_range_tag_if_tagged(struct radix_tree_root *root, | ||
630 | unsigned long *first_indexp, unsigned long last_index, | ||
631 | unsigned long nr_to_tag, | ||
632 | unsigned int iftag, unsigned int settag) | ||
633 | { | ||
634 | unsigned int height = root->height, shift; | ||
635 | unsigned long tagged = 0, index = *first_indexp; | ||
636 | struct radix_tree_node *open_slots[height], *slot; | ||
637 | |||
638 | last_index = min(last_index, radix_tree_maxindex(height)); | ||
639 | if (index > last_index) | ||
640 | return 0; | ||
641 | if (!nr_to_tag) | ||
642 | return 0; | ||
643 | if (!root_tag_get(root, iftag)) { | ||
644 | *first_indexp = last_index + 1; | ||
645 | return 0; | ||
646 | } | ||
647 | if (height == 0) { | ||
648 | *first_indexp = last_index + 1; | ||
649 | root_tag_set(root, settag); | ||
650 | return 1; | ||
651 | } | ||
652 | |||
653 | shift = (height - 1) * RADIX_TREE_MAP_SHIFT; | ||
654 | slot = radix_tree_indirect_to_ptr(root->rnode); | ||
655 | |||
656 | for (;;) { | ||
657 | int offset; | ||
658 | |||
659 | offset = (index >> shift) & RADIX_TREE_MAP_MASK; | ||
660 | if (!slot->slots[offset]) | ||
661 | goto next; | ||
662 | if (!tag_get(slot, iftag, offset)) | ||
663 | goto next; | ||
664 | tag_set(slot, settag, offset); | ||
665 | if (height == 1) { | ||
666 | tagged++; | ||
667 | goto next; | ||
668 | } | ||
669 | /* Go down one level */ | ||
670 | height--; | ||
671 | shift -= RADIX_TREE_MAP_SHIFT; | ||
672 | open_slots[height] = slot; | ||
673 | slot = slot->slots[offset]; | ||
674 | continue; | ||
675 | next: | ||
676 | /* Go to next item at level determined by 'shift' */ | ||
677 | index = ((index >> shift) + 1) << shift; | ||
678 | if (index > last_index) | ||
679 | break; | ||
680 | if (tagged >= nr_to_tag) | ||
681 | break; | ||
682 | while (((index >> shift) & RADIX_TREE_MAP_MASK) == 0) { | ||
683 | /* | ||
684 | * We've fully scanned this node. Go up. Because | ||
685 | * last_index is guaranteed to be in the tree, what | ||
686 | * we do below cannot wander astray. | ||
687 | */ | ||
688 | slot = open_slots[height]; | ||
689 | height++; | ||
690 | shift += RADIX_TREE_MAP_SHIFT; | ||
691 | } | ||
692 | } | ||
693 | /* | ||
694 | * The iftag must have been set somewhere because otherwise | ||
695 | * we would return immediated at the beginning of the function | ||
696 | */ | ||
697 | root_tag_set(root, settag); | ||
698 | *first_indexp = index; | ||
699 | |||
700 | return tagged; | ||
701 | } | ||
702 | EXPORT_SYMBOL(radix_tree_range_tag_if_tagged); | ||
703 | |||
704 | |||
705 | /** | ||
612 | * radix_tree_next_hole - find the next hole (not-present entry) | 706 | * radix_tree_next_hole - find the next hole (not-present entry) |
613 | * @root: tree root | 707 | * @root: tree root |
614 | * @index: index key | 708 | * @index: index key |
diff --git a/lib/raid6/Makefile b/lib/raid6/Makefile new file mode 100644 index 000000000000..8a38102770f3 --- /dev/null +++ b/lib/raid6/Makefile | |||
@@ -0,0 +1,75 @@ | |||
1 | obj-$(CONFIG_RAID6_PQ) += raid6_pq.o | ||
2 | |||
3 | raid6_pq-y += algos.o recov.o tables.o int1.o int2.o int4.o \ | ||
4 | int8.o int16.o int32.o altivec1.o altivec2.o altivec4.o \ | ||
5 | altivec8.o mmx.o sse1.o sse2.o | ||
6 | hostprogs-y += mktables | ||
7 | |||
8 | quiet_cmd_unroll = UNROLL $@ | ||
9 | cmd_unroll = $(AWK) -f$(srctree)/$(src)/unroll.awk -vN=$(UNROLL) \ | ||
10 | < $< > $@ || ( rm -f $@ && exit 1 ) | ||
11 | |||
12 | ifeq ($(CONFIG_ALTIVEC),y) | ||
13 | altivec_flags := -maltivec -mabi=altivec | ||
14 | endif | ||
15 | |||
16 | targets += int1.c | ||
17 | $(obj)/int1.c: UNROLL := 1 | ||
18 | $(obj)/int1.c: $(src)/int.uc $(src)/unroll.awk FORCE | ||
19 | $(call if_changed,unroll) | ||
20 | |||
21 | targets += int2.c | ||
22 | $(obj)/int2.c: UNROLL := 2 | ||
23 | $(obj)/int2.c: $(src)/int.uc $(src)/unroll.awk FORCE | ||
24 | $(call if_changed,unroll) | ||
25 | |||
26 | targets += int4.c | ||
27 | $(obj)/int4.c: UNROLL := 4 | ||
28 | $(obj)/int4.c: $(src)/int.uc $(src)/unroll.awk FORCE | ||
29 | $(call if_changed,unroll) | ||
30 | |||
31 | targets += int8.c | ||
32 | $(obj)/int8.c: UNROLL := 8 | ||
33 | $(obj)/int8.c: $(src)/int.uc $(src)/unroll.awk FORCE | ||
34 | $(call if_changed,unroll) | ||
35 | |||
36 | targets += int16.c | ||
37 | $(obj)/int16.c: UNROLL := 16 | ||
38 | $(obj)/int16.c: $(src)/int.uc $(src)/unroll.awk FORCE | ||
39 | $(call if_changed,unroll) | ||
40 | |||
41 | targets += int32.c | ||
42 | $(obj)/int32.c: UNROLL := 32 | ||
43 | $(obj)/int32.c: $(src)/int.uc $(src)/unroll.awk FORCE | ||
44 | $(call if_changed,unroll) | ||
45 | |||
46 | CFLAGS_altivec1.o += $(altivec_flags) | ||
47 | targets += altivec1.c | ||
48 | $(obj)/altivec1.c: UNROLL := 1 | ||
49 | $(obj)/altivec1.c: $(src)/altivec.uc $(src)/unroll.awk FORCE | ||
50 | $(call if_changed,unroll) | ||
51 | |||
52 | CFLAGS_altivec2.o += $(altivec_flags) | ||
53 | targets += altivec2.c | ||
54 | $(obj)/altivec2.c: UNROLL := 2 | ||
55 | $(obj)/altivec2.c: $(src)/altivec.uc $(src)/unroll.awk FORCE | ||
56 | $(call if_changed,unroll) | ||
57 | |||
58 | CFLAGS_altivec4.o += $(altivec_flags) | ||
59 | targets += altivec4.c | ||
60 | $(obj)/altivec4.c: UNROLL := 4 | ||
61 | $(obj)/altivec4.c: $(src)/altivec.uc $(src)/unroll.awk FORCE | ||
62 | $(call if_changed,unroll) | ||
63 | |||
64 | CFLAGS_altivec8.o += $(altivec_flags) | ||
65 | targets += altivec8.c | ||
66 | $(obj)/altivec8.c: UNROLL := 8 | ||
67 | $(obj)/altivec8.c: $(src)/altivec.uc $(src)/unroll.awk FORCE | ||
68 | $(call if_changed,unroll) | ||
69 | |||
70 | quiet_cmd_mktable = TABLE $@ | ||
71 | cmd_mktable = $(obj)/mktables > $@ || ( rm -f $@ && exit 1 ) | ||
72 | |||
73 | targets += tables.c | ||
74 | $(obj)/tables.c: $(obj)/mktables FORCE | ||
75 | $(call if_changed,mktable) | ||
diff --git a/lib/raid6/algos.c b/lib/raid6/algos.c new file mode 100644 index 000000000000..b595f560bee7 --- /dev/null +++ b/lib/raid6/algos.c | |||
@@ -0,0 +1,154 @@ | |||
1 | /* -*- linux-c -*- ------------------------------------------------------- * | ||
2 | * | ||
3 | * Copyright 2002 H. Peter Anvin - All Rights Reserved | ||
4 | * | ||
5 | * This program is free software; you can redistribute it and/or modify | ||
6 | * it under the terms of the GNU General Public License as published by | ||
7 | * the Free Software Foundation, Inc., 53 Temple Place Ste 330, | ||
8 | * Boston MA 02111-1307, USA; either version 2 of the License, or | ||
9 | * (at your option) any later version; incorporated herein by reference. | ||
10 | * | ||
11 | * ----------------------------------------------------------------------- */ | ||
12 | |||
13 | /* | ||
14 | * raid6/algos.c | ||
15 | * | ||
16 | * Algorithm list and algorithm selection for RAID-6 | ||
17 | */ | ||
18 | |||
19 | #include <linux/raid/pq.h> | ||
20 | #ifndef __KERNEL__ | ||
21 | #include <sys/mman.h> | ||
22 | #include <stdio.h> | ||
23 | #else | ||
24 | #include <linux/gfp.h> | ||
25 | #if !RAID6_USE_EMPTY_ZERO_PAGE | ||
26 | /* In .bss so it's zeroed */ | ||
27 | const char raid6_empty_zero_page[PAGE_SIZE] __attribute__((aligned(256))); | ||
28 | EXPORT_SYMBOL(raid6_empty_zero_page); | ||
29 | #endif | ||
30 | #endif | ||
31 | |||
32 | struct raid6_calls raid6_call; | ||
33 | EXPORT_SYMBOL_GPL(raid6_call); | ||
34 | |||
35 | const struct raid6_calls * const raid6_algos[] = { | ||
36 | &raid6_intx1, | ||
37 | &raid6_intx2, | ||
38 | &raid6_intx4, | ||
39 | &raid6_intx8, | ||
40 | #if defined(__ia64__) | ||
41 | &raid6_intx16, | ||
42 | &raid6_intx32, | ||
43 | #endif | ||
44 | #if defined(__i386__) && !defined(__arch_um__) | ||
45 | &raid6_mmxx1, | ||
46 | &raid6_mmxx2, | ||
47 | &raid6_sse1x1, | ||
48 | &raid6_sse1x2, | ||
49 | &raid6_sse2x1, | ||
50 | &raid6_sse2x2, | ||
51 | #endif | ||
52 | #if defined(__x86_64__) && !defined(__arch_um__) | ||
53 | &raid6_sse2x1, | ||
54 | &raid6_sse2x2, | ||
55 | &raid6_sse2x4, | ||
56 | #endif | ||
57 | #ifdef CONFIG_ALTIVEC | ||
58 | &raid6_altivec1, | ||
59 | &raid6_altivec2, | ||
60 | &raid6_altivec4, | ||
61 | &raid6_altivec8, | ||
62 | #endif | ||
63 | NULL | ||
64 | }; | ||
65 | |||
66 | #ifdef __KERNEL__ | ||
67 | #define RAID6_TIME_JIFFIES_LG2 4 | ||
68 | #else | ||
69 | /* Need more time to be stable in userspace */ | ||
70 | #define RAID6_TIME_JIFFIES_LG2 9 | ||
71 | #define time_before(x, y) ((x) < (y)) | ||
72 | #endif | ||
73 | |||
74 | /* Try to pick the best algorithm */ | ||
75 | /* This code uses the gfmul table as convenient data set to abuse */ | ||
76 | |||
77 | int __init raid6_select_algo(void) | ||
78 | { | ||
79 | const struct raid6_calls * const * algo; | ||
80 | const struct raid6_calls * best; | ||
81 | char *syndromes; | ||
82 | void *dptrs[(65536/PAGE_SIZE)+2]; | ||
83 | int i, disks; | ||
84 | unsigned long perf, bestperf; | ||
85 | int bestprefer; | ||
86 | unsigned long j0, j1; | ||
87 | |||
88 | disks = (65536/PAGE_SIZE)+2; | ||
89 | for ( i = 0 ; i < disks-2 ; i++ ) { | ||
90 | dptrs[i] = ((char *)raid6_gfmul) + PAGE_SIZE*i; | ||
91 | } | ||
92 | |||
93 | /* Normal code - use a 2-page allocation to avoid D$ conflict */ | ||
94 | syndromes = (void *) __get_free_pages(GFP_KERNEL, 1); | ||
95 | |||
96 | if ( !syndromes ) { | ||
97 | printk("raid6: Yikes! No memory available.\n"); | ||
98 | return -ENOMEM; | ||
99 | } | ||
100 | |||
101 | dptrs[disks-2] = syndromes; | ||
102 | dptrs[disks-1] = syndromes + PAGE_SIZE; | ||
103 | |||
104 | bestperf = 0; bestprefer = 0; best = NULL; | ||
105 | |||
106 | for ( algo = raid6_algos ; *algo ; algo++ ) { | ||
107 | if ( !(*algo)->valid || (*algo)->valid() ) { | ||
108 | perf = 0; | ||
109 | |||
110 | preempt_disable(); | ||
111 | j0 = jiffies; | ||
112 | while ( (j1 = jiffies) == j0 ) | ||
113 | cpu_relax(); | ||
114 | while (time_before(jiffies, | ||
115 | j1 + (1<<RAID6_TIME_JIFFIES_LG2))) { | ||
116 | (*algo)->gen_syndrome(disks, PAGE_SIZE, dptrs); | ||
117 | perf++; | ||
118 | } | ||
119 | preempt_enable(); | ||
120 | |||
121 | if ( (*algo)->prefer > bestprefer || | ||
122 | ((*algo)->prefer == bestprefer && | ||
123 | perf > bestperf) ) { | ||
124 | best = *algo; | ||
125 | bestprefer = best->prefer; | ||
126 | bestperf = perf; | ||
127 | } | ||
128 | printk("raid6: %-8s %5ld MB/s\n", (*algo)->name, | ||
129 | (perf*HZ) >> (20-16+RAID6_TIME_JIFFIES_LG2)); | ||
130 | } | ||
131 | } | ||
132 | |||
133 | if (best) { | ||
134 | printk("raid6: using algorithm %s (%ld MB/s)\n", | ||
135 | best->name, | ||
136 | (bestperf*HZ) >> (20-16+RAID6_TIME_JIFFIES_LG2)); | ||
137 | raid6_call = *best; | ||
138 | } else | ||
139 | printk("raid6: Yikes! No algorithm found!\n"); | ||
140 | |||
141 | free_pages((unsigned long)syndromes, 1); | ||
142 | |||
143 | return best ? 0 : -EINVAL; | ||
144 | } | ||
145 | |||
146 | static void raid6_exit(void) | ||
147 | { | ||
148 | do { } while (0); | ||
149 | } | ||
150 | |||
151 | subsys_initcall(raid6_select_algo); | ||
152 | module_exit(raid6_exit); | ||
153 | MODULE_LICENSE("GPL"); | ||
154 | MODULE_DESCRIPTION("RAID6 Q-syndrome calculations"); | ||
diff --git a/lib/raid6/altivec.uc b/lib/raid6/altivec.uc new file mode 100644 index 000000000000..2654d5c854be --- /dev/null +++ b/lib/raid6/altivec.uc | |||
@@ -0,0 +1,130 @@ | |||
1 | /* -*- linux-c -*- ------------------------------------------------------- * | ||
2 | * | ||
3 | * Copyright 2002-2004 H. Peter Anvin - All Rights Reserved | ||
4 | * | ||
5 | * This program is free software; you can redistribute it and/or modify | ||
6 | * it under the terms of the GNU General Public License as published by | ||
7 | * the Free Software Foundation, Inc., 53 Temple Place Ste 330, | ||
8 | * Boston MA 02111-1307, USA; either version 2 of the License, or | ||
9 | * (at your option) any later version; incorporated herein by reference. | ||
10 | * | ||
11 | * ----------------------------------------------------------------------- */ | ||
12 | |||
13 | /* | ||
14 | * raid6altivec$#.c | ||
15 | * | ||
16 | * $#-way unrolled portable integer math RAID-6 instruction set | ||
17 | * | ||
18 | * This file is postprocessed using unroll.awk | ||
19 | * | ||
20 | * <benh> hpa: in process, | ||
21 | * you can just "steal" the vec unit with enable_kernel_altivec() (but | ||
22 | * bracked this with preempt_disable/enable or in a lock) | ||
23 | */ | ||
24 | |||
25 | #include <linux/raid/pq.h> | ||
26 | |||
27 | #ifdef CONFIG_ALTIVEC | ||
28 | |||
29 | #include <altivec.h> | ||
30 | #ifdef __KERNEL__ | ||
31 | # include <asm/system.h> | ||
32 | # include <asm/cputable.h> | ||
33 | #endif | ||
34 | |||
35 | /* | ||
36 | * This is the C data type to use. We use a vector of | ||
37 | * signed char so vec_cmpgt() will generate the right | ||
38 | * instruction. | ||
39 | */ | ||
40 | |||
41 | typedef vector signed char unative_t; | ||
42 | |||
43 | #define NBYTES(x) ((vector signed char) {x,x,x,x, x,x,x,x, x,x,x,x, x,x,x,x}) | ||
44 | #define NSIZE sizeof(unative_t) | ||
45 | |||
46 | /* | ||
47 | * The SHLBYTE() operation shifts each byte left by 1, *not* | ||
48 | * rolling over into the next byte | ||
49 | */ | ||
50 | static inline __attribute_const__ unative_t SHLBYTE(unative_t v) | ||
51 | { | ||
52 | return vec_add(v,v); | ||
53 | } | ||
54 | |||
55 | /* | ||
56 | * The MASK() operation returns 0xFF in any byte for which the high | ||
57 | * bit is 1, 0x00 for any byte for which the high bit is 0. | ||
58 | */ | ||
59 | static inline __attribute_const__ unative_t MASK(unative_t v) | ||
60 | { | ||
61 | unative_t zv = NBYTES(0); | ||
62 | |||
63 | /* vec_cmpgt returns a vector bool char; thus the need for the cast */ | ||
64 | return (unative_t)vec_cmpgt(zv, v); | ||
65 | } | ||
66 | |||
67 | |||
68 | /* This is noinline to make damned sure that gcc doesn't move any of the | ||
69 | Altivec code around the enable/disable code */ | ||
70 | static void noinline | ||
71 | raid6_altivec$#_gen_syndrome_real(int disks, size_t bytes, void **ptrs) | ||
72 | { | ||
73 | u8 **dptr = (u8 **)ptrs; | ||
74 | u8 *p, *q; | ||
75 | int d, z, z0; | ||
76 | |||
77 | unative_t wd$$, wq$$, wp$$, w1$$, w2$$; | ||
78 | unative_t x1d = NBYTES(0x1d); | ||
79 | |||
80 | z0 = disks - 3; /* Highest data disk */ | ||
81 | p = dptr[z0+1]; /* XOR parity */ | ||
82 | q = dptr[z0+2]; /* RS syndrome */ | ||
83 | |||
84 | for ( d = 0 ; d < bytes ; d += NSIZE*$# ) { | ||
85 | wq$$ = wp$$ = *(unative_t *)&dptr[z0][d+$$*NSIZE]; | ||
86 | for ( z = z0-1 ; z >= 0 ; z-- ) { | ||
87 | wd$$ = *(unative_t *)&dptr[z][d+$$*NSIZE]; | ||
88 | wp$$ = vec_xor(wp$$, wd$$); | ||
89 | w2$$ = MASK(wq$$); | ||
90 | w1$$ = SHLBYTE(wq$$); | ||
91 | w2$$ = vec_and(w2$$, x1d); | ||
92 | w1$$ = vec_xor(w1$$, w2$$); | ||
93 | wq$$ = vec_xor(w1$$, wd$$); | ||
94 | } | ||
95 | *(unative_t *)&p[d+NSIZE*$$] = wp$$; | ||
96 | *(unative_t *)&q[d+NSIZE*$$] = wq$$; | ||
97 | } | ||
98 | } | ||
99 | |||
100 | static void raid6_altivec$#_gen_syndrome(int disks, size_t bytes, void **ptrs) | ||
101 | { | ||
102 | preempt_disable(); | ||
103 | enable_kernel_altivec(); | ||
104 | |||
105 | raid6_altivec$#_gen_syndrome_real(disks, bytes, ptrs); | ||
106 | |||
107 | preempt_enable(); | ||
108 | } | ||
109 | |||
110 | int raid6_have_altivec(void); | ||
111 | #if $# == 1 | ||
112 | int raid6_have_altivec(void) | ||
113 | { | ||
114 | /* This assumes either all CPUs have Altivec or none does */ | ||
115 | # ifdef __KERNEL__ | ||
116 | return cpu_has_feature(CPU_FTR_ALTIVEC); | ||
117 | # else | ||
118 | return 1; | ||
119 | # endif | ||
120 | } | ||
121 | #endif | ||
122 | |||
123 | const struct raid6_calls raid6_altivec$# = { | ||
124 | raid6_altivec$#_gen_syndrome, | ||
125 | raid6_have_altivec, | ||
126 | "altivecx$#", | ||
127 | 0 | ||
128 | }; | ||
129 | |||
130 | #endif /* CONFIG_ALTIVEC */ | ||
diff --git a/lib/raid6/int.uc b/lib/raid6/int.uc new file mode 100644 index 000000000000..d1e276a14fab --- /dev/null +++ b/lib/raid6/int.uc | |||
@@ -0,0 +1,117 @@ | |||
1 | /* -*- linux-c -*- ------------------------------------------------------- * | ||
2 | * | ||
3 | * Copyright 2002-2004 H. Peter Anvin - All Rights Reserved | ||
4 | * | ||
5 | * This program is free software; you can redistribute it and/or modify | ||
6 | * it under the terms of the GNU General Public License as published by | ||
7 | * the Free Software Foundation, Inc., 53 Temple Place Ste 330, | ||
8 | * Boston MA 02111-1307, USA; either version 2 of the License, or | ||
9 | * (at your option) any later version; incorporated herein by reference. | ||
10 | * | ||
11 | * ----------------------------------------------------------------------- */ | ||
12 | |||
13 | /* | ||
14 | * raid6int$#.c | ||
15 | * | ||
16 | * $#-way unrolled portable integer math RAID-6 instruction set | ||
17 | * | ||
18 | * This file is postprocessed using unroll.awk | ||
19 | */ | ||
20 | |||
21 | #include <linux/raid/pq.h> | ||
22 | |||
23 | /* | ||
24 | * This is the C data type to use | ||
25 | */ | ||
26 | |||
27 | /* Change this from BITS_PER_LONG if there is something better... */ | ||
28 | #if BITS_PER_LONG == 64 | ||
29 | # define NBYTES(x) ((x) * 0x0101010101010101UL) | ||
30 | # define NSIZE 8 | ||
31 | # define NSHIFT 3 | ||
32 | # define NSTRING "64" | ||
33 | typedef u64 unative_t; | ||
34 | #else | ||
35 | # define NBYTES(x) ((x) * 0x01010101U) | ||
36 | # define NSIZE 4 | ||
37 | # define NSHIFT 2 | ||
38 | # define NSTRING "32" | ||
39 | typedef u32 unative_t; | ||
40 | #endif | ||
41 | |||
42 | |||
43 | |||
44 | /* | ||
45 | * IA-64 wants insane amounts of unrolling. On other architectures that | ||
46 | * is just a waste of space. | ||
47 | */ | ||
48 | #if ($# <= 8) || defined(__ia64__) | ||
49 | |||
50 | |||
51 | /* | ||
52 | * These sub-operations are separate inlines since they can sometimes be | ||
53 | * specially optimized using architecture-specific hacks. | ||
54 | */ | ||
55 | |||
56 | /* | ||
57 | * The SHLBYTE() operation shifts each byte left by 1, *not* | ||
58 | * rolling over into the next byte | ||
59 | */ | ||
60 | static inline __attribute_const__ unative_t SHLBYTE(unative_t v) | ||
61 | { | ||
62 | unative_t vv; | ||
63 | |||
64 | vv = (v << 1) & NBYTES(0xfe); | ||
65 | return vv; | ||
66 | } | ||
67 | |||
68 | /* | ||
69 | * The MASK() operation returns 0xFF in any byte for which the high | ||
70 | * bit is 1, 0x00 for any byte for which the high bit is 0. | ||
71 | */ | ||
72 | static inline __attribute_const__ unative_t MASK(unative_t v) | ||
73 | { | ||
74 | unative_t vv; | ||
75 | |||
76 | vv = v & NBYTES(0x80); | ||
77 | vv = (vv << 1) - (vv >> 7); /* Overflow on the top bit is OK */ | ||
78 | return vv; | ||
79 | } | ||
80 | |||
81 | |||
82 | static void raid6_int$#_gen_syndrome(int disks, size_t bytes, void **ptrs) | ||
83 | { | ||
84 | u8 **dptr = (u8 **)ptrs; | ||
85 | u8 *p, *q; | ||
86 | int d, z, z0; | ||
87 | |||
88 | unative_t wd$$, wq$$, wp$$, w1$$, w2$$; | ||
89 | |||
90 | z0 = disks - 3; /* Highest data disk */ | ||
91 | p = dptr[z0+1]; /* XOR parity */ | ||
92 | q = dptr[z0+2]; /* RS syndrome */ | ||
93 | |||
94 | for ( d = 0 ; d < bytes ; d += NSIZE*$# ) { | ||
95 | wq$$ = wp$$ = *(unative_t *)&dptr[z0][d+$$*NSIZE]; | ||
96 | for ( z = z0-1 ; z >= 0 ; z-- ) { | ||
97 | wd$$ = *(unative_t *)&dptr[z][d+$$*NSIZE]; | ||
98 | wp$$ ^= wd$$; | ||
99 | w2$$ = MASK(wq$$); | ||
100 | w1$$ = SHLBYTE(wq$$); | ||
101 | w2$$ &= NBYTES(0x1d); | ||
102 | w1$$ ^= w2$$; | ||
103 | wq$$ = w1$$ ^ wd$$; | ||
104 | } | ||
105 | *(unative_t *)&p[d+NSIZE*$$] = wp$$; | ||
106 | *(unative_t *)&q[d+NSIZE*$$] = wq$$; | ||
107 | } | ||
108 | } | ||
109 | |||
110 | const struct raid6_calls raid6_intx$# = { | ||
111 | raid6_int$#_gen_syndrome, | ||
112 | NULL, /* always valid */ | ||
113 | "int" NSTRING "x$#", | ||
114 | 0 | ||
115 | }; | ||
116 | |||
117 | #endif | ||
diff --git a/lib/raid6/mktables.c b/lib/raid6/mktables.c new file mode 100644 index 000000000000..3b1500843bba --- /dev/null +++ b/lib/raid6/mktables.c | |||
@@ -0,0 +1,132 @@ | |||
1 | /* -*- linux-c -*- ------------------------------------------------------- * | ||
2 | * | ||
3 | * Copyright 2002-2007 H. Peter Anvin - All Rights Reserved | ||
4 | * | ||
5 | * This file is part of the Linux kernel, and is made available under | ||
6 | * the terms of the GNU General Public License version 2 or (at your | ||
7 | * option) any later version; incorporated herein by reference. | ||
8 | * | ||
9 | * ----------------------------------------------------------------------- */ | ||
10 | |||
11 | /* | ||
12 | * mktables.c | ||
13 | * | ||
14 | * Make RAID-6 tables. This is a host user space program to be run at | ||
15 | * compile time. | ||
16 | */ | ||
17 | |||
18 | #include <stdio.h> | ||
19 | #include <string.h> | ||
20 | #include <inttypes.h> | ||
21 | #include <stdlib.h> | ||
22 | #include <time.h> | ||
23 | |||
24 | static uint8_t gfmul(uint8_t a, uint8_t b) | ||
25 | { | ||
26 | uint8_t v = 0; | ||
27 | |||
28 | while (b) { | ||
29 | if (b & 1) | ||
30 | v ^= a; | ||
31 | a = (a << 1) ^ (a & 0x80 ? 0x1d : 0); | ||
32 | b >>= 1; | ||
33 | } | ||
34 | |||
35 | return v; | ||
36 | } | ||
37 | |||
38 | static uint8_t gfpow(uint8_t a, int b) | ||
39 | { | ||
40 | uint8_t v = 1; | ||
41 | |||
42 | b %= 255; | ||
43 | if (b < 0) | ||
44 | b += 255; | ||
45 | |||
46 | while (b) { | ||
47 | if (b & 1) | ||
48 | v = gfmul(v, a); | ||
49 | a = gfmul(a, a); | ||
50 | b >>= 1; | ||
51 | } | ||
52 | |||
53 | return v; | ||
54 | } | ||
55 | |||
56 | int main(int argc, char *argv[]) | ||
57 | { | ||
58 | int i, j, k; | ||
59 | uint8_t v; | ||
60 | uint8_t exptbl[256], invtbl[256]; | ||
61 | |||
62 | printf("#include <linux/raid/pq.h>\n"); | ||
63 | |||
64 | /* Compute multiplication table */ | ||
65 | printf("\nconst u8 __attribute__((aligned(256)))\n" | ||
66 | "raid6_gfmul[256][256] =\n" | ||
67 | "{\n"); | ||
68 | for (i = 0; i < 256; i++) { | ||
69 | printf("\t{\n"); | ||
70 | for (j = 0; j < 256; j += 8) { | ||
71 | printf("\t\t"); | ||
72 | for (k = 0; k < 8; k++) | ||
73 | printf("0x%02x,%c", gfmul(i, j + k), | ||
74 | (k == 7) ? '\n' : ' '); | ||
75 | } | ||
76 | printf("\t},\n"); | ||
77 | } | ||
78 | printf("};\n"); | ||
79 | printf("#ifdef __KERNEL__\n"); | ||
80 | printf("EXPORT_SYMBOL(raid6_gfmul);\n"); | ||
81 | printf("#endif\n"); | ||
82 | |||
83 | /* Compute power-of-2 table (exponent) */ | ||
84 | v = 1; | ||
85 | printf("\nconst u8 __attribute__((aligned(256)))\n" | ||
86 | "raid6_gfexp[256] =\n" "{\n"); | ||
87 | for (i = 0; i < 256; i += 8) { | ||
88 | printf("\t"); | ||
89 | for (j = 0; j < 8; j++) { | ||
90 | exptbl[i + j] = v; | ||
91 | printf("0x%02x,%c", v, (j == 7) ? '\n' : ' '); | ||
92 | v = gfmul(v, 2); | ||
93 | if (v == 1) | ||
94 | v = 0; /* For entry 255, not a real entry */ | ||
95 | } | ||
96 | } | ||
97 | printf("};\n"); | ||
98 | printf("#ifdef __KERNEL__\n"); | ||
99 | printf("EXPORT_SYMBOL(raid6_gfexp);\n"); | ||
100 | printf("#endif\n"); | ||
101 | |||
102 | /* Compute inverse table x^-1 == x^254 */ | ||
103 | printf("\nconst u8 __attribute__((aligned(256)))\n" | ||
104 | "raid6_gfinv[256] =\n" "{\n"); | ||
105 | for (i = 0; i < 256; i += 8) { | ||
106 | printf("\t"); | ||
107 | for (j = 0; j < 8; j++) { | ||
108 | invtbl[i + j] = v = gfpow(i + j, 254); | ||
109 | printf("0x%02x,%c", v, (j == 7) ? '\n' : ' '); | ||
110 | } | ||
111 | } | ||
112 | printf("};\n"); | ||
113 | printf("#ifdef __KERNEL__\n"); | ||
114 | printf("EXPORT_SYMBOL(raid6_gfinv);\n"); | ||
115 | printf("#endif\n"); | ||
116 | |||
117 | /* Compute inv(2^x + 1) (exponent-xor-inverse) table */ | ||
118 | printf("\nconst u8 __attribute__((aligned(256)))\n" | ||
119 | "raid6_gfexi[256] =\n" "{\n"); | ||
120 | for (i = 0; i < 256; i += 8) { | ||
121 | printf("\t"); | ||
122 | for (j = 0; j < 8; j++) | ||
123 | printf("0x%02x,%c", invtbl[exptbl[i + j] ^ 1], | ||
124 | (j == 7) ? '\n' : ' '); | ||
125 | } | ||
126 | printf("};\n"); | ||
127 | printf("#ifdef __KERNEL__\n"); | ||
128 | printf("EXPORT_SYMBOL(raid6_gfexi);\n"); | ||
129 | printf("#endif\n"); | ||
130 | |||
131 | return 0; | ||
132 | } | ||
diff --git a/lib/raid6/mmx.c b/lib/raid6/mmx.c new file mode 100644 index 000000000000..279347f23094 --- /dev/null +++ b/lib/raid6/mmx.c | |||
@@ -0,0 +1,142 @@ | |||
1 | /* -*- linux-c -*- ------------------------------------------------------- * | ||
2 | * | ||
3 | * Copyright 2002 H. Peter Anvin - All Rights Reserved | ||
4 | * | ||
5 | * This program is free software; you can redistribute it and/or modify | ||
6 | * it under the terms of the GNU General Public License as published by | ||
7 | * the Free Software Foundation, Inc., 53 Temple Place Ste 330, | ||
8 | * Boston MA 02111-1307, USA; either version 2 of the License, or | ||
9 | * (at your option) any later version; incorporated herein by reference. | ||
10 | * | ||
11 | * ----------------------------------------------------------------------- */ | ||
12 | |||
13 | /* | ||
14 | * raid6/mmx.c | ||
15 | * | ||
16 | * MMX implementation of RAID-6 syndrome functions | ||
17 | */ | ||
18 | |||
19 | #if defined(__i386__) && !defined(__arch_um__) | ||
20 | |||
21 | #include <linux/raid/pq.h> | ||
22 | #include "x86.h" | ||
23 | |||
24 | /* Shared with raid6/sse1.c */ | ||
25 | const struct raid6_mmx_constants { | ||
26 | u64 x1d; | ||
27 | } raid6_mmx_constants = { | ||
28 | 0x1d1d1d1d1d1d1d1dULL, | ||
29 | }; | ||
30 | |||
31 | static int raid6_have_mmx(void) | ||
32 | { | ||
33 | /* Not really "boot_cpu" but "all_cpus" */ | ||
34 | return boot_cpu_has(X86_FEATURE_MMX); | ||
35 | } | ||
36 | |||
37 | /* | ||
38 | * Plain MMX implementation | ||
39 | */ | ||
40 | static void raid6_mmx1_gen_syndrome(int disks, size_t bytes, void **ptrs) | ||
41 | { | ||
42 | u8 **dptr = (u8 **)ptrs; | ||
43 | u8 *p, *q; | ||
44 | int d, z, z0; | ||
45 | |||
46 | z0 = disks - 3; /* Highest data disk */ | ||
47 | p = dptr[z0+1]; /* XOR parity */ | ||
48 | q = dptr[z0+2]; /* RS syndrome */ | ||
49 | |||
50 | kernel_fpu_begin(); | ||
51 | |||
52 | asm volatile("movq %0,%%mm0" : : "m" (raid6_mmx_constants.x1d)); | ||
53 | asm volatile("pxor %mm5,%mm5"); /* Zero temp */ | ||
54 | |||
55 | for ( d = 0 ; d < bytes ; d += 8 ) { | ||
56 | asm volatile("movq %0,%%mm2" : : "m" (dptr[z0][d])); /* P[0] */ | ||
57 | asm volatile("movq %mm2,%mm4"); /* Q[0] */ | ||
58 | for ( z = z0-1 ; z >= 0 ; z-- ) { | ||
59 | asm volatile("movq %0,%%mm6" : : "m" (dptr[z][d])); | ||
60 | asm volatile("pcmpgtb %mm4,%mm5"); | ||
61 | asm volatile("paddb %mm4,%mm4"); | ||
62 | asm volatile("pand %mm0,%mm5"); | ||
63 | asm volatile("pxor %mm5,%mm4"); | ||
64 | asm volatile("pxor %mm5,%mm5"); | ||
65 | asm volatile("pxor %mm6,%mm2"); | ||
66 | asm volatile("pxor %mm6,%mm4"); | ||
67 | } | ||
68 | asm volatile("movq %%mm2,%0" : "=m" (p[d])); | ||
69 | asm volatile("pxor %mm2,%mm2"); | ||
70 | asm volatile("movq %%mm4,%0" : "=m" (q[d])); | ||
71 | asm volatile("pxor %mm4,%mm4"); | ||
72 | } | ||
73 | |||
74 | kernel_fpu_end(); | ||
75 | } | ||
76 | |||
77 | const struct raid6_calls raid6_mmxx1 = { | ||
78 | raid6_mmx1_gen_syndrome, | ||
79 | raid6_have_mmx, | ||
80 | "mmxx1", | ||
81 | 0 | ||
82 | }; | ||
83 | |||
84 | /* | ||
85 | * Unrolled-by-2 MMX implementation | ||
86 | */ | ||
87 | static void raid6_mmx2_gen_syndrome(int disks, size_t bytes, void **ptrs) | ||
88 | { | ||
89 | u8 **dptr = (u8 **)ptrs; | ||
90 | u8 *p, *q; | ||
91 | int d, z, z0; | ||
92 | |||
93 | z0 = disks - 3; /* Highest data disk */ | ||
94 | p = dptr[z0+1]; /* XOR parity */ | ||
95 | q = dptr[z0+2]; /* RS syndrome */ | ||
96 | |||
97 | kernel_fpu_begin(); | ||
98 | |||
99 | asm volatile("movq %0,%%mm0" : : "m" (raid6_mmx_constants.x1d)); | ||
100 | asm volatile("pxor %mm5,%mm5"); /* Zero temp */ | ||
101 | asm volatile("pxor %mm7,%mm7"); /* Zero temp */ | ||
102 | |||
103 | for ( d = 0 ; d < bytes ; d += 16 ) { | ||
104 | asm volatile("movq %0,%%mm2" : : "m" (dptr[z0][d])); /* P[0] */ | ||
105 | asm volatile("movq %0,%%mm3" : : "m" (dptr[z0][d+8])); | ||
106 | asm volatile("movq %mm2,%mm4"); /* Q[0] */ | ||
107 | asm volatile("movq %mm3,%mm6"); /* Q[1] */ | ||
108 | for ( z = z0-1 ; z >= 0 ; z-- ) { | ||
109 | asm volatile("pcmpgtb %mm4,%mm5"); | ||
110 | asm volatile("pcmpgtb %mm6,%mm7"); | ||
111 | asm volatile("paddb %mm4,%mm4"); | ||
112 | asm volatile("paddb %mm6,%mm6"); | ||
113 | asm volatile("pand %mm0,%mm5"); | ||
114 | asm volatile("pand %mm0,%mm7"); | ||
115 | asm volatile("pxor %mm5,%mm4"); | ||
116 | asm volatile("pxor %mm7,%mm6"); | ||
117 | asm volatile("movq %0,%%mm5" : : "m" (dptr[z][d])); | ||
118 | asm volatile("movq %0,%%mm7" : : "m" (dptr[z][d+8])); | ||
119 | asm volatile("pxor %mm5,%mm2"); | ||
120 | asm volatile("pxor %mm7,%mm3"); | ||
121 | asm volatile("pxor %mm5,%mm4"); | ||
122 | asm volatile("pxor %mm7,%mm6"); | ||
123 | asm volatile("pxor %mm5,%mm5"); | ||
124 | asm volatile("pxor %mm7,%mm7"); | ||
125 | } | ||
126 | asm volatile("movq %%mm2,%0" : "=m" (p[d])); | ||
127 | asm volatile("movq %%mm3,%0" : "=m" (p[d+8])); | ||
128 | asm volatile("movq %%mm4,%0" : "=m" (q[d])); | ||
129 | asm volatile("movq %%mm6,%0" : "=m" (q[d+8])); | ||
130 | } | ||
131 | |||
132 | kernel_fpu_end(); | ||
133 | } | ||
134 | |||
135 | const struct raid6_calls raid6_mmxx2 = { | ||
136 | raid6_mmx2_gen_syndrome, | ||
137 | raid6_have_mmx, | ||
138 | "mmxx2", | ||
139 | 0 | ||
140 | }; | ||
141 | |||
142 | #endif | ||
diff --git a/lib/raid6/recov.c b/lib/raid6/recov.c new file mode 100644 index 000000000000..8590d19cf522 --- /dev/null +++ b/lib/raid6/recov.c | |||
@@ -0,0 +1,132 @@ | |||
1 | /* -*- linux-c -*- ------------------------------------------------------- * | ||
2 | * | ||
3 | * Copyright 2002 H. Peter Anvin - All Rights Reserved | ||
4 | * | ||
5 | * This program is free software; you can redistribute it and/or modify | ||
6 | * it under the terms of the GNU General Public License as published by | ||
7 | * the Free Software Foundation, Inc., 53 Temple Place Ste 330, | ||
8 | * Boston MA 02111-1307, USA; either version 2 of the License, or | ||
9 | * (at your option) any later version; incorporated herein by reference. | ||
10 | * | ||
11 | * ----------------------------------------------------------------------- */ | ||
12 | |||
13 | /* | ||
14 | * raid6/recov.c | ||
15 | * | ||
16 | * RAID-6 data recovery in dual failure mode. In single failure mode, | ||
17 | * use the RAID-5 algorithm (or, in the case of Q failure, just reconstruct | ||
18 | * the syndrome.) | ||
19 | */ | ||
20 | |||
21 | #include <linux/raid/pq.h> | ||
22 | |||
23 | /* Recover two failed data blocks. */ | ||
24 | void raid6_2data_recov(int disks, size_t bytes, int faila, int failb, | ||
25 | void **ptrs) | ||
26 | { | ||
27 | u8 *p, *q, *dp, *dq; | ||
28 | u8 px, qx, db; | ||
29 | const u8 *pbmul; /* P multiplier table for B data */ | ||
30 | const u8 *qmul; /* Q multiplier table (for both) */ | ||
31 | |||
32 | p = (u8 *)ptrs[disks-2]; | ||
33 | q = (u8 *)ptrs[disks-1]; | ||
34 | |||
35 | /* Compute syndrome with zero for the missing data pages | ||
36 | Use the dead data pages as temporary storage for | ||
37 | delta p and delta q */ | ||
38 | dp = (u8 *)ptrs[faila]; | ||
39 | ptrs[faila] = (void *)raid6_empty_zero_page; | ||
40 | ptrs[disks-2] = dp; | ||
41 | dq = (u8 *)ptrs[failb]; | ||
42 | ptrs[failb] = (void *)raid6_empty_zero_page; | ||
43 | ptrs[disks-1] = dq; | ||
44 | |||
45 | raid6_call.gen_syndrome(disks, bytes, ptrs); | ||
46 | |||
47 | /* Restore pointer table */ | ||
48 | ptrs[faila] = dp; | ||
49 | ptrs[failb] = dq; | ||
50 | ptrs[disks-2] = p; | ||
51 | ptrs[disks-1] = q; | ||
52 | |||
53 | /* Now, pick the proper data tables */ | ||
54 | pbmul = raid6_gfmul[raid6_gfexi[failb-faila]]; | ||
55 | qmul = raid6_gfmul[raid6_gfinv[raid6_gfexp[faila]^raid6_gfexp[failb]]]; | ||
56 | |||
57 | /* Now do it... */ | ||
58 | while ( bytes-- ) { | ||
59 | px = *p ^ *dp; | ||
60 | qx = qmul[*q ^ *dq]; | ||
61 | *dq++ = db = pbmul[px] ^ qx; /* Reconstructed B */ | ||
62 | *dp++ = db ^ px; /* Reconstructed A */ | ||
63 | p++; q++; | ||
64 | } | ||
65 | } | ||
66 | EXPORT_SYMBOL_GPL(raid6_2data_recov); | ||
67 | |||
68 | /* Recover failure of one data block plus the P block */ | ||
69 | void raid6_datap_recov(int disks, size_t bytes, int faila, void **ptrs) | ||
70 | { | ||
71 | u8 *p, *q, *dq; | ||
72 | const u8 *qmul; /* Q multiplier table */ | ||
73 | |||
74 | p = (u8 *)ptrs[disks-2]; | ||
75 | q = (u8 *)ptrs[disks-1]; | ||
76 | |||
77 | /* Compute syndrome with zero for the missing data page | ||
78 | Use the dead data page as temporary storage for delta q */ | ||
79 | dq = (u8 *)ptrs[faila]; | ||
80 | ptrs[faila] = (void *)raid6_empty_zero_page; | ||
81 | ptrs[disks-1] = dq; | ||
82 | |||
83 | raid6_call.gen_syndrome(disks, bytes, ptrs); | ||
84 | |||
85 | /* Restore pointer table */ | ||
86 | ptrs[faila] = dq; | ||
87 | ptrs[disks-1] = q; | ||
88 | |||
89 | /* Now, pick the proper data tables */ | ||
90 | qmul = raid6_gfmul[raid6_gfinv[raid6_gfexp[faila]]]; | ||
91 | |||
92 | /* Now do it... */ | ||
93 | while ( bytes-- ) { | ||
94 | *p++ ^= *dq = qmul[*q ^ *dq]; | ||
95 | q++; dq++; | ||
96 | } | ||
97 | } | ||
98 | EXPORT_SYMBOL_GPL(raid6_datap_recov); | ||
99 | |||
100 | #ifndef __KERNEL__ | ||
101 | /* Testing only */ | ||
102 | |||
103 | /* Recover two failed blocks. */ | ||
104 | void raid6_dual_recov(int disks, size_t bytes, int faila, int failb, void **ptrs) | ||
105 | { | ||
106 | if ( faila > failb ) { | ||
107 | int tmp = faila; | ||
108 | faila = failb; | ||
109 | failb = tmp; | ||
110 | } | ||
111 | |||
112 | if ( failb == disks-1 ) { | ||
113 | if ( faila == disks-2 ) { | ||
114 | /* P+Q failure. Just rebuild the syndrome. */ | ||
115 | raid6_call.gen_syndrome(disks, bytes, ptrs); | ||
116 | } else { | ||
117 | /* data+Q failure. Reconstruct data from P, | ||
118 | then rebuild syndrome. */ | ||
119 | /* NOT IMPLEMENTED - equivalent to RAID-5 */ | ||
120 | } | ||
121 | } else { | ||
122 | if ( failb == disks-2 ) { | ||
123 | /* data+P failure. */ | ||
124 | raid6_datap_recov(disks, bytes, faila, ptrs); | ||
125 | } else { | ||
126 | /* data+data failure. */ | ||
127 | raid6_2data_recov(disks, bytes, faila, failb, ptrs); | ||
128 | } | ||
129 | } | ||
130 | } | ||
131 | |||
132 | #endif | ||
diff --git a/lib/raid6/sse1.c b/lib/raid6/sse1.c new file mode 100644 index 000000000000..10dd91948c07 --- /dev/null +++ b/lib/raid6/sse1.c | |||
@@ -0,0 +1,162 @@ | |||
1 | /* -*- linux-c -*- ------------------------------------------------------- * | ||
2 | * | ||
3 | * Copyright 2002 H. Peter Anvin - All Rights Reserved | ||
4 | * | ||
5 | * This program is free software; you can redistribute it and/or modify | ||
6 | * it under the terms of the GNU General Public License as published by | ||
7 | * the Free Software Foundation, Inc., 53 Temple Place Ste 330, | ||
8 | * Boston MA 02111-1307, USA; either version 2 of the License, or | ||
9 | * (at your option) any later version; incorporated herein by reference. | ||
10 | * | ||
11 | * ----------------------------------------------------------------------- */ | ||
12 | |||
13 | /* | ||
14 | * raid6/sse1.c | ||
15 | * | ||
16 | * SSE-1/MMXEXT implementation of RAID-6 syndrome functions | ||
17 | * | ||
18 | * This is really an MMX implementation, but it requires SSE-1 or | ||
19 | * AMD MMXEXT for prefetch support and a few other features. The | ||
20 | * support for nontemporal memory accesses is enough to make this | ||
21 | * worthwhile as a separate implementation. | ||
22 | */ | ||
23 | |||
24 | #if defined(__i386__) && !defined(__arch_um__) | ||
25 | |||
26 | #include <linux/raid/pq.h> | ||
27 | #include "x86.h" | ||
28 | |||
29 | /* Defined in raid6/mmx.c */ | ||
30 | extern const struct raid6_mmx_constants { | ||
31 | u64 x1d; | ||
32 | } raid6_mmx_constants; | ||
33 | |||
34 | static int raid6_have_sse1_or_mmxext(void) | ||
35 | { | ||
36 | /* Not really boot_cpu but "all_cpus" */ | ||
37 | return boot_cpu_has(X86_FEATURE_MMX) && | ||
38 | (boot_cpu_has(X86_FEATURE_XMM) || | ||
39 | boot_cpu_has(X86_FEATURE_MMXEXT)); | ||
40 | } | ||
41 | |||
42 | /* | ||
43 | * Plain SSE1 implementation | ||
44 | */ | ||
45 | static void raid6_sse11_gen_syndrome(int disks, size_t bytes, void **ptrs) | ||
46 | { | ||
47 | u8 **dptr = (u8 **)ptrs; | ||
48 | u8 *p, *q; | ||
49 | int d, z, z0; | ||
50 | |||
51 | z0 = disks - 3; /* Highest data disk */ | ||
52 | p = dptr[z0+1]; /* XOR parity */ | ||
53 | q = dptr[z0+2]; /* RS syndrome */ | ||
54 | |||
55 | kernel_fpu_begin(); | ||
56 | |||
57 | asm volatile("movq %0,%%mm0" : : "m" (raid6_mmx_constants.x1d)); | ||
58 | asm volatile("pxor %mm5,%mm5"); /* Zero temp */ | ||
59 | |||
60 | for ( d = 0 ; d < bytes ; d += 8 ) { | ||
61 | asm volatile("prefetchnta %0" : : "m" (dptr[z0][d])); | ||
62 | asm volatile("movq %0,%%mm2" : : "m" (dptr[z0][d])); /* P[0] */ | ||
63 | asm volatile("prefetchnta %0" : : "m" (dptr[z0-1][d])); | ||
64 | asm volatile("movq %mm2,%mm4"); /* Q[0] */ | ||
65 | asm volatile("movq %0,%%mm6" : : "m" (dptr[z0-1][d])); | ||
66 | for ( z = z0-2 ; z >= 0 ; z-- ) { | ||
67 | asm volatile("prefetchnta %0" : : "m" (dptr[z][d])); | ||
68 | asm volatile("pcmpgtb %mm4,%mm5"); | ||
69 | asm volatile("paddb %mm4,%mm4"); | ||
70 | asm volatile("pand %mm0,%mm5"); | ||
71 | asm volatile("pxor %mm5,%mm4"); | ||
72 | asm volatile("pxor %mm5,%mm5"); | ||
73 | asm volatile("pxor %mm6,%mm2"); | ||
74 | asm volatile("pxor %mm6,%mm4"); | ||
75 | asm volatile("movq %0,%%mm6" : : "m" (dptr[z][d])); | ||
76 | } | ||
77 | asm volatile("pcmpgtb %mm4,%mm5"); | ||
78 | asm volatile("paddb %mm4,%mm4"); | ||
79 | asm volatile("pand %mm0,%mm5"); | ||
80 | asm volatile("pxor %mm5,%mm4"); | ||
81 | asm volatile("pxor %mm5,%mm5"); | ||
82 | asm volatile("pxor %mm6,%mm2"); | ||
83 | asm volatile("pxor %mm6,%mm4"); | ||
84 | |||
85 | asm volatile("movntq %%mm2,%0" : "=m" (p[d])); | ||
86 | asm volatile("movntq %%mm4,%0" : "=m" (q[d])); | ||
87 | } | ||
88 | |||
89 | asm volatile("sfence" : : : "memory"); | ||
90 | kernel_fpu_end(); | ||
91 | } | ||
92 | |||
93 | const struct raid6_calls raid6_sse1x1 = { | ||
94 | raid6_sse11_gen_syndrome, | ||
95 | raid6_have_sse1_or_mmxext, | ||
96 | "sse1x1", | ||
97 | 1 /* Has cache hints */ | ||
98 | }; | ||
99 | |||
100 | /* | ||
101 | * Unrolled-by-2 SSE1 implementation | ||
102 | */ | ||
103 | static void raid6_sse12_gen_syndrome(int disks, size_t bytes, void **ptrs) | ||
104 | { | ||
105 | u8 **dptr = (u8 **)ptrs; | ||
106 | u8 *p, *q; | ||
107 | int d, z, z0; | ||
108 | |||
109 | z0 = disks - 3; /* Highest data disk */ | ||
110 | p = dptr[z0+1]; /* XOR parity */ | ||
111 | q = dptr[z0+2]; /* RS syndrome */ | ||
112 | |||
113 | kernel_fpu_begin(); | ||
114 | |||
115 | asm volatile("movq %0,%%mm0" : : "m" (raid6_mmx_constants.x1d)); | ||
116 | asm volatile("pxor %mm5,%mm5"); /* Zero temp */ | ||
117 | asm volatile("pxor %mm7,%mm7"); /* Zero temp */ | ||
118 | |||
119 | /* We uniformly assume a single prefetch covers at least 16 bytes */ | ||
120 | for ( d = 0 ; d < bytes ; d += 16 ) { | ||
121 | asm volatile("prefetchnta %0" : : "m" (dptr[z0][d])); | ||
122 | asm volatile("movq %0,%%mm2" : : "m" (dptr[z0][d])); /* P[0] */ | ||
123 | asm volatile("movq %0,%%mm3" : : "m" (dptr[z0][d+8])); /* P[1] */ | ||
124 | asm volatile("movq %mm2,%mm4"); /* Q[0] */ | ||
125 | asm volatile("movq %mm3,%mm6"); /* Q[1] */ | ||
126 | for ( z = z0-1 ; z >= 0 ; z-- ) { | ||
127 | asm volatile("prefetchnta %0" : : "m" (dptr[z][d])); | ||
128 | asm volatile("pcmpgtb %mm4,%mm5"); | ||
129 | asm volatile("pcmpgtb %mm6,%mm7"); | ||
130 | asm volatile("paddb %mm4,%mm4"); | ||
131 | asm volatile("paddb %mm6,%mm6"); | ||
132 | asm volatile("pand %mm0,%mm5"); | ||
133 | asm volatile("pand %mm0,%mm7"); | ||
134 | asm volatile("pxor %mm5,%mm4"); | ||
135 | asm volatile("pxor %mm7,%mm6"); | ||
136 | asm volatile("movq %0,%%mm5" : : "m" (dptr[z][d])); | ||
137 | asm volatile("movq %0,%%mm7" : : "m" (dptr[z][d+8])); | ||
138 | asm volatile("pxor %mm5,%mm2"); | ||
139 | asm volatile("pxor %mm7,%mm3"); | ||
140 | asm volatile("pxor %mm5,%mm4"); | ||
141 | asm volatile("pxor %mm7,%mm6"); | ||
142 | asm volatile("pxor %mm5,%mm5"); | ||
143 | asm volatile("pxor %mm7,%mm7"); | ||
144 | } | ||
145 | asm volatile("movntq %%mm2,%0" : "=m" (p[d])); | ||
146 | asm volatile("movntq %%mm3,%0" : "=m" (p[d+8])); | ||
147 | asm volatile("movntq %%mm4,%0" : "=m" (q[d])); | ||
148 | asm volatile("movntq %%mm6,%0" : "=m" (q[d+8])); | ||
149 | } | ||
150 | |||
151 | asm volatile("sfence" : :: "memory"); | ||
152 | kernel_fpu_end(); | ||
153 | } | ||
154 | |||
155 | const struct raid6_calls raid6_sse1x2 = { | ||
156 | raid6_sse12_gen_syndrome, | ||
157 | raid6_have_sse1_or_mmxext, | ||
158 | "sse1x2", | ||
159 | 1 /* Has cache hints */ | ||
160 | }; | ||
161 | |||
162 | #endif | ||
diff --git a/lib/raid6/sse2.c b/lib/raid6/sse2.c new file mode 100644 index 000000000000..bc2d57daa589 --- /dev/null +++ b/lib/raid6/sse2.c | |||
@@ -0,0 +1,262 @@ | |||
1 | /* -*- linux-c -*- ------------------------------------------------------- * | ||
2 | * | ||
3 | * Copyright 2002 H. Peter Anvin - All Rights Reserved | ||
4 | * | ||
5 | * This program is free software; you can redistribute it and/or modify | ||
6 | * it under the terms of the GNU General Public License as published by | ||
7 | * the Free Software Foundation, Inc., 53 Temple Place Ste 330, | ||
8 | * Boston MA 02111-1307, USA; either version 2 of the License, or | ||
9 | * (at your option) any later version; incorporated herein by reference. | ||
10 | * | ||
11 | * ----------------------------------------------------------------------- */ | ||
12 | |||
13 | /* | ||
14 | * raid6/sse2.c | ||
15 | * | ||
16 | * SSE-2 implementation of RAID-6 syndrome functions | ||
17 | * | ||
18 | */ | ||
19 | |||
20 | #if (defined(__i386__) || defined(__x86_64__)) && !defined(__arch_um__) | ||
21 | |||
22 | #include <linux/raid/pq.h> | ||
23 | #include "x86.h" | ||
24 | |||
25 | static const struct raid6_sse_constants { | ||
26 | u64 x1d[2]; | ||
27 | } raid6_sse_constants __attribute__((aligned(16))) = { | ||
28 | { 0x1d1d1d1d1d1d1d1dULL, 0x1d1d1d1d1d1d1d1dULL }, | ||
29 | }; | ||
30 | |||
31 | static int raid6_have_sse2(void) | ||
32 | { | ||
33 | /* Not really boot_cpu but "all_cpus" */ | ||
34 | return boot_cpu_has(X86_FEATURE_MMX) && | ||
35 | boot_cpu_has(X86_FEATURE_FXSR) && | ||
36 | boot_cpu_has(X86_FEATURE_XMM) && | ||
37 | boot_cpu_has(X86_FEATURE_XMM2); | ||
38 | } | ||
39 | |||
40 | /* | ||
41 | * Plain SSE2 implementation | ||
42 | */ | ||
43 | static void raid6_sse21_gen_syndrome(int disks, size_t bytes, void **ptrs) | ||
44 | { | ||
45 | u8 **dptr = (u8 **)ptrs; | ||
46 | u8 *p, *q; | ||
47 | int d, z, z0; | ||
48 | |||
49 | z0 = disks - 3; /* Highest data disk */ | ||
50 | p = dptr[z0+1]; /* XOR parity */ | ||
51 | q = dptr[z0+2]; /* RS syndrome */ | ||
52 | |||
53 | kernel_fpu_begin(); | ||
54 | |||
55 | asm volatile("movdqa %0,%%xmm0" : : "m" (raid6_sse_constants.x1d[0])); | ||
56 | asm volatile("pxor %xmm5,%xmm5"); /* Zero temp */ | ||
57 | |||
58 | for ( d = 0 ; d < bytes ; d += 16 ) { | ||
59 | asm volatile("prefetchnta %0" : : "m" (dptr[z0][d])); | ||
60 | asm volatile("movdqa %0,%%xmm2" : : "m" (dptr[z0][d])); /* P[0] */ | ||
61 | asm volatile("prefetchnta %0" : : "m" (dptr[z0-1][d])); | ||
62 | asm volatile("movdqa %xmm2,%xmm4"); /* Q[0] */ | ||
63 | asm volatile("movdqa %0,%%xmm6" : : "m" (dptr[z0-1][d])); | ||
64 | for ( z = z0-2 ; z >= 0 ; z-- ) { | ||
65 | asm volatile("prefetchnta %0" : : "m" (dptr[z][d])); | ||
66 | asm volatile("pcmpgtb %xmm4,%xmm5"); | ||
67 | asm volatile("paddb %xmm4,%xmm4"); | ||
68 | asm volatile("pand %xmm0,%xmm5"); | ||
69 | asm volatile("pxor %xmm5,%xmm4"); | ||
70 | asm volatile("pxor %xmm5,%xmm5"); | ||
71 | asm volatile("pxor %xmm6,%xmm2"); | ||
72 | asm volatile("pxor %xmm6,%xmm4"); | ||
73 | asm volatile("movdqa %0,%%xmm6" : : "m" (dptr[z][d])); | ||
74 | } | ||
75 | asm volatile("pcmpgtb %xmm4,%xmm5"); | ||
76 | asm volatile("paddb %xmm4,%xmm4"); | ||
77 | asm volatile("pand %xmm0,%xmm5"); | ||
78 | asm volatile("pxor %xmm5,%xmm4"); | ||
79 | asm volatile("pxor %xmm5,%xmm5"); | ||
80 | asm volatile("pxor %xmm6,%xmm2"); | ||
81 | asm volatile("pxor %xmm6,%xmm4"); | ||
82 | |||
83 | asm volatile("movntdq %%xmm2,%0" : "=m" (p[d])); | ||
84 | asm volatile("pxor %xmm2,%xmm2"); | ||
85 | asm volatile("movntdq %%xmm4,%0" : "=m" (q[d])); | ||
86 | asm volatile("pxor %xmm4,%xmm4"); | ||
87 | } | ||
88 | |||
89 | asm volatile("sfence" : : : "memory"); | ||
90 | kernel_fpu_end(); | ||
91 | } | ||
92 | |||
93 | const struct raid6_calls raid6_sse2x1 = { | ||
94 | raid6_sse21_gen_syndrome, | ||
95 | raid6_have_sse2, | ||
96 | "sse2x1", | ||
97 | 1 /* Has cache hints */ | ||
98 | }; | ||
99 | |||
100 | /* | ||
101 | * Unrolled-by-2 SSE2 implementation | ||
102 | */ | ||
103 | static void raid6_sse22_gen_syndrome(int disks, size_t bytes, void **ptrs) | ||
104 | { | ||
105 | u8 **dptr = (u8 **)ptrs; | ||
106 | u8 *p, *q; | ||
107 | int d, z, z0; | ||
108 | |||
109 | z0 = disks - 3; /* Highest data disk */ | ||
110 | p = dptr[z0+1]; /* XOR parity */ | ||
111 | q = dptr[z0+2]; /* RS syndrome */ | ||
112 | |||
113 | kernel_fpu_begin(); | ||
114 | |||
115 | asm volatile("movdqa %0,%%xmm0" : : "m" (raid6_sse_constants.x1d[0])); | ||
116 | asm volatile("pxor %xmm5,%xmm5"); /* Zero temp */ | ||
117 | asm volatile("pxor %xmm7,%xmm7"); /* Zero temp */ | ||
118 | |||
119 | /* We uniformly assume a single prefetch covers at least 32 bytes */ | ||
120 | for ( d = 0 ; d < bytes ; d += 32 ) { | ||
121 | asm volatile("prefetchnta %0" : : "m" (dptr[z0][d])); | ||
122 | asm volatile("movdqa %0,%%xmm2" : : "m" (dptr[z0][d])); /* P[0] */ | ||
123 | asm volatile("movdqa %0,%%xmm3" : : "m" (dptr[z0][d+16])); /* P[1] */ | ||
124 | asm volatile("movdqa %xmm2,%xmm4"); /* Q[0] */ | ||
125 | asm volatile("movdqa %xmm3,%xmm6"); /* Q[1] */ | ||
126 | for ( z = z0-1 ; z >= 0 ; z-- ) { | ||
127 | asm volatile("prefetchnta %0" : : "m" (dptr[z][d])); | ||
128 | asm volatile("pcmpgtb %xmm4,%xmm5"); | ||
129 | asm volatile("pcmpgtb %xmm6,%xmm7"); | ||
130 | asm volatile("paddb %xmm4,%xmm4"); | ||
131 | asm volatile("paddb %xmm6,%xmm6"); | ||
132 | asm volatile("pand %xmm0,%xmm5"); | ||
133 | asm volatile("pand %xmm0,%xmm7"); | ||
134 | asm volatile("pxor %xmm5,%xmm4"); | ||
135 | asm volatile("pxor %xmm7,%xmm6"); | ||
136 | asm volatile("movdqa %0,%%xmm5" : : "m" (dptr[z][d])); | ||
137 | asm volatile("movdqa %0,%%xmm7" : : "m" (dptr[z][d+16])); | ||
138 | asm volatile("pxor %xmm5,%xmm2"); | ||
139 | asm volatile("pxor %xmm7,%xmm3"); | ||
140 | asm volatile("pxor %xmm5,%xmm4"); | ||
141 | asm volatile("pxor %xmm7,%xmm6"); | ||
142 | asm volatile("pxor %xmm5,%xmm5"); | ||
143 | asm volatile("pxor %xmm7,%xmm7"); | ||
144 | } | ||
145 | asm volatile("movntdq %%xmm2,%0" : "=m" (p[d])); | ||
146 | asm volatile("movntdq %%xmm3,%0" : "=m" (p[d+16])); | ||
147 | asm volatile("movntdq %%xmm4,%0" : "=m" (q[d])); | ||
148 | asm volatile("movntdq %%xmm6,%0" : "=m" (q[d+16])); | ||
149 | } | ||
150 | |||
151 | asm volatile("sfence" : : : "memory"); | ||
152 | kernel_fpu_end(); | ||
153 | } | ||
154 | |||
155 | const struct raid6_calls raid6_sse2x2 = { | ||
156 | raid6_sse22_gen_syndrome, | ||
157 | raid6_have_sse2, | ||
158 | "sse2x2", | ||
159 | 1 /* Has cache hints */ | ||
160 | }; | ||
161 | |||
162 | #endif | ||
163 | |||
164 | #if defined(__x86_64__) && !defined(__arch_um__) | ||
165 | |||
166 | /* | ||
167 | * Unrolled-by-4 SSE2 implementation | ||
168 | */ | ||
169 | static void raid6_sse24_gen_syndrome(int disks, size_t bytes, void **ptrs) | ||
170 | { | ||
171 | u8 **dptr = (u8 **)ptrs; | ||
172 | u8 *p, *q; | ||
173 | int d, z, z0; | ||
174 | |||
175 | z0 = disks - 3; /* Highest data disk */ | ||
176 | p = dptr[z0+1]; /* XOR parity */ | ||
177 | q = dptr[z0+2]; /* RS syndrome */ | ||
178 | |||
179 | kernel_fpu_begin(); | ||
180 | |||
181 | asm volatile("movdqa %0,%%xmm0" :: "m" (raid6_sse_constants.x1d[0])); | ||
182 | asm volatile("pxor %xmm2,%xmm2"); /* P[0] */ | ||
183 | asm volatile("pxor %xmm3,%xmm3"); /* P[1] */ | ||
184 | asm volatile("pxor %xmm4,%xmm4"); /* Q[0] */ | ||
185 | asm volatile("pxor %xmm5,%xmm5"); /* Zero temp */ | ||
186 | asm volatile("pxor %xmm6,%xmm6"); /* Q[1] */ | ||
187 | asm volatile("pxor %xmm7,%xmm7"); /* Zero temp */ | ||
188 | asm volatile("pxor %xmm10,%xmm10"); /* P[2] */ | ||
189 | asm volatile("pxor %xmm11,%xmm11"); /* P[3] */ | ||
190 | asm volatile("pxor %xmm12,%xmm12"); /* Q[2] */ | ||
191 | asm volatile("pxor %xmm13,%xmm13"); /* Zero temp */ | ||
192 | asm volatile("pxor %xmm14,%xmm14"); /* Q[3] */ | ||
193 | asm volatile("pxor %xmm15,%xmm15"); /* Zero temp */ | ||
194 | |||
195 | for ( d = 0 ; d < bytes ; d += 64 ) { | ||
196 | for ( z = z0 ; z >= 0 ; z-- ) { | ||
197 | /* The second prefetch seems to improve performance... */ | ||
198 | asm volatile("prefetchnta %0" :: "m" (dptr[z][d])); | ||
199 | asm volatile("prefetchnta %0" :: "m" (dptr[z][d+32])); | ||
200 | asm volatile("pcmpgtb %xmm4,%xmm5"); | ||
201 | asm volatile("pcmpgtb %xmm6,%xmm7"); | ||
202 | asm volatile("pcmpgtb %xmm12,%xmm13"); | ||
203 | asm volatile("pcmpgtb %xmm14,%xmm15"); | ||
204 | asm volatile("paddb %xmm4,%xmm4"); | ||
205 | asm volatile("paddb %xmm6,%xmm6"); | ||
206 | asm volatile("paddb %xmm12,%xmm12"); | ||
207 | asm volatile("paddb %xmm14,%xmm14"); | ||
208 | asm volatile("pand %xmm0,%xmm5"); | ||
209 | asm volatile("pand %xmm0,%xmm7"); | ||
210 | asm volatile("pand %xmm0,%xmm13"); | ||
211 | asm volatile("pand %xmm0,%xmm15"); | ||
212 | asm volatile("pxor %xmm5,%xmm4"); | ||
213 | asm volatile("pxor %xmm7,%xmm6"); | ||
214 | asm volatile("pxor %xmm13,%xmm12"); | ||
215 | asm volatile("pxor %xmm15,%xmm14"); | ||
216 | asm volatile("movdqa %0,%%xmm5" :: "m" (dptr[z][d])); | ||
217 | asm volatile("movdqa %0,%%xmm7" :: "m" (dptr[z][d+16])); | ||
218 | asm volatile("movdqa %0,%%xmm13" :: "m" (dptr[z][d+32])); | ||
219 | asm volatile("movdqa %0,%%xmm15" :: "m" (dptr[z][d+48])); | ||
220 | asm volatile("pxor %xmm5,%xmm2"); | ||
221 | asm volatile("pxor %xmm7,%xmm3"); | ||
222 | asm volatile("pxor %xmm13,%xmm10"); | ||
223 | asm volatile("pxor %xmm15,%xmm11"); | ||
224 | asm volatile("pxor %xmm5,%xmm4"); | ||
225 | asm volatile("pxor %xmm7,%xmm6"); | ||
226 | asm volatile("pxor %xmm13,%xmm12"); | ||
227 | asm volatile("pxor %xmm15,%xmm14"); | ||
228 | asm volatile("pxor %xmm5,%xmm5"); | ||
229 | asm volatile("pxor %xmm7,%xmm7"); | ||
230 | asm volatile("pxor %xmm13,%xmm13"); | ||
231 | asm volatile("pxor %xmm15,%xmm15"); | ||
232 | } | ||
233 | asm volatile("movntdq %%xmm2,%0" : "=m" (p[d])); | ||
234 | asm volatile("pxor %xmm2,%xmm2"); | ||
235 | asm volatile("movntdq %%xmm3,%0" : "=m" (p[d+16])); | ||
236 | asm volatile("pxor %xmm3,%xmm3"); | ||
237 | asm volatile("movntdq %%xmm10,%0" : "=m" (p[d+32])); | ||
238 | asm volatile("pxor %xmm10,%xmm10"); | ||
239 | asm volatile("movntdq %%xmm11,%0" : "=m" (p[d+48])); | ||
240 | asm volatile("pxor %xmm11,%xmm11"); | ||
241 | asm volatile("movntdq %%xmm4,%0" : "=m" (q[d])); | ||
242 | asm volatile("pxor %xmm4,%xmm4"); | ||
243 | asm volatile("movntdq %%xmm6,%0" : "=m" (q[d+16])); | ||
244 | asm volatile("pxor %xmm6,%xmm6"); | ||
245 | asm volatile("movntdq %%xmm12,%0" : "=m" (q[d+32])); | ||
246 | asm volatile("pxor %xmm12,%xmm12"); | ||
247 | asm volatile("movntdq %%xmm14,%0" : "=m" (q[d+48])); | ||
248 | asm volatile("pxor %xmm14,%xmm14"); | ||
249 | } | ||
250 | |||
251 | asm volatile("sfence" : : : "memory"); | ||
252 | kernel_fpu_end(); | ||
253 | } | ||
254 | |||
255 | const struct raid6_calls raid6_sse2x4 = { | ||
256 | raid6_sse24_gen_syndrome, | ||
257 | raid6_have_sse2, | ||
258 | "sse2x4", | ||
259 | 1 /* Has cache hints */ | ||
260 | }; | ||
261 | |||
262 | #endif | ||
diff --git a/lib/raid6/test/Makefile b/lib/raid6/test/Makefile new file mode 100644 index 000000000000..aa651697b6dc --- /dev/null +++ b/lib/raid6/test/Makefile | |||
@@ -0,0 +1,72 @@ | |||
1 | # | ||
2 | # This is a simple Makefile to test some of the RAID-6 code | ||
3 | # from userspace. | ||
4 | # | ||
5 | |||
6 | CC = gcc | ||
7 | OPTFLAGS = -O2 # Adjust as desired | ||
8 | CFLAGS = -I.. -I ../../../include -g $(OPTFLAGS) | ||
9 | LD = ld | ||
10 | AWK = awk -f | ||
11 | AR = ar | ||
12 | RANLIB = ranlib | ||
13 | |||
14 | .c.o: | ||
15 | $(CC) $(CFLAGS) -c -o $@ $< | ||
16 | |||
17 | %.c: ../%.c | ||
18 | cp -f $< $@ | ||
19 | |||
20 | %.uc: ../%.uc | ||
21 | cp -f $< $@ | ||
22 | |||
23 | all: raid6.a raid6test | ||
24 | |||
25 | raid6.a: int1.o int2.o int4.o int8.o int16.o int32.o mmx.o sse1.o sse2.o \ | ||
26 | altivec1.o altivec2.o altivec4.o altivec8.o recov.o algos.o \ | ||
27 | tables.o | ||
28 | rm -f $@ | ||
29 | $(AR) cq $@ $^ | ||
30 | $(RANLIB) $@ | ||
31 | |||
32 | raid6test: test.c raid6.a | ||
33 | $(CC) $(CFLAGS) -o raid6test $^ | ||
34 | |||
35 | altivec1.c: altivec.uc ../unroll.awk | ||
36 | $(AWK) ../unroll.awk -vN=1 < altivec.uc > $@ | ||
37 | |||
38 | altivec2.c: altivec.uc ../unroll.awk | ||
39 | $(AWK) ../unroll.awk -vN=2 < altivec.uc > $@ | ||
40 | |||
41 | altivec4.c: altivec.uc ../unroll.awk | ||
42 | $(AWK) ../unroll.awk -vN=4 < altivec.uc > $@ | ||
43 | |||
44 | altivec8.c: altivec.uc ../unroll.awk | ||
45 | $(AWK) ../unroll.awk -vN=8 < altivec.uc > $@ | ||
46 | |||
47 | int1.c: int.uc ../unroll.awk | ||
48 | $(AWK) ../unroll.awk -vN=1 < int.uc > $@ | ||
49 | |||
50 | int2.c: int.uc ../unroll.awk | ||
51 | $(AWK) ../unroll.awk -vN=2 < int.uc > $@ | ||
52 | |||
53 | int4.c: int.uc ../unroll.awk | ||
54 | $(AWK) ../unroll.awk -vN=4 < int.uc > $@ | ||
55 | |||
56 | int8.c: int.uc ../unroll.awk | ||
57 | $(AWK) ../unroll.awk -vN=8 < int.uc > $@ | ||
58 | |||
59 | int16.c: int.uc ../unroll.awk | ||
60 | $(AWK) ../unroll.awk -vN=16 < int.uc > $@ | ||
61 | |||
62 | int32.c: int.uc ../unroll.awk | ||
63 | $(AWK) ../unroll.awk -vN=32 < int.uc > $@ | ||
64 | |||
65 | tables.c: mktables | ||
66 | ./mktables > tables.c | ||
67 | |||
68 | clean: | ||
69 | rm -f *.o *.a mktables mktables.c *.uc int*.c altivec*.c tables.c raid6test | ||
70 | |||
71 | spotless: clean | ||
72 | rm -f *~ | ||
diff --git a/lib/raid6/test/test.c b/lib/raid6/test/test.c new file mode 100644 index 000000000000..7a930318b17d --- /dev/null +++ b/lib/raid6/test/test.c | |||
@@ -0,0 +1,124 @@ | |||
1 | /* -*- linux-c -*- ------------------------------------------------------- * | ||
2 | * | ||
3 | * Copyright 2002-2007 H. Peter Anvin - All Rights Reserved | ||
4 | * | ||
5 | * This file is part of the Linux kernel, and is made available under | ||
6 | * the terms of the GNU General Public License version 2 or (at your | ||
7 | * option) any later version; incorporated herein by reference. | ||
8 | * | ||
9 | * ----------------------------------------------------------------------- */ | ||
10 | |||
11 | /* | ||
12 | * raid6test.c | ||
13 | * | ||
14 | * Test RAID-6 recovery with various algorithms | ||
15 | */ | ||
16 | |||
17 | #include <stdlib.h> | ||
18 | #include <stdio.h> | ||
19 | #include <string.h> | ||
20 | #include <linux/raid/pq.h> | ||
21 | |||
22 | #define NDISKS 16 /* Including P and Q */ | ||
23 | |||
24 | const char raid6_empty_zero_page[PAGE_SIZE] __attribute__((aligned(256))); | ||
25 | struct raid6_calls raid6_call; | ||
26 | |||
27 | char *dataptrs[NDISKS]; | ||
28 | char data[NDISKS][PAGE_SIZE]; | ||
29 | char recovi[PAGE_SIZE], recovj[PAGE_SIZE]; | ||
30 | |||
31 | static void makedata(void) | ||
32 | { | ||
33 | int i, j; | ||
34 | |||
35 | for (i = 0; i < NDISKS; i++) { | ||
36 | for (j = 0; j < PAGE_SIZE; j++) | ||
37 | data[i][j] = rand(); | ||
38 | |||
39 | dataptrs[i] = data[i]; | ||
40 | } | ||
41 | } | ||
42 | |||
43 | static char disk_type(int d) | ||
44 | { | ||
45 | switch (d) { | ||
46 | case NDISKS-2: | ||
47 | return 'P'; | ||
48 | case NDISKS-1: | ||
49 | return 'Q'; | ||
50 | default: | ||
51 | return 'D'; | ||
52 | } | ||
53 | } | ||
54 | |||
55 | static int test_disks(int i, int j) | ||
56 | { | ||
57 | int erra, errb; | ||
58 | |||
59 | memset(recovi, 0xf0, PAGE_SIZE); | ||
60 | memset(recovj, 0xba, PAGE_SIZE); | ||
61 | |||
62 | dataptrs[i] = recovi; | ||
63 | dataptrs[j] = recovj; | ||
64 | |||
65 | raid6_dual_recov(NDISKS, PAGE_SIZE, i, j, (void **)&dataptrs); | ||
66 | |||
67 | erra = memcmp(data[i], recovi, PAGE_SIZE); | ||
68 | errb = memcmp(data[j], recovj, PAGE_SIZE); | ||
69 | |||
70 | if (i < NDISKS-2 && j == NDISKS-1) { | ||
71 | /* We don't implement the DQ failure scenario, since it's | ||
72 | equivalent to a RAID-5 failure (XOR, then recompute Q) */ | ||
73 | erra = errb = 0; | ||
74 | } else { | ||
75 | printf("algo=%-8s faila=%3d(%c) failb=%3d(%c) %s\n", | ||
76 | raid6_call.name, | ||
77 | i, disk_type(i), | ||
78 | j, disk_type(j), | ||
79 | (!erra && !errb) ? "OK" : | ||
80 | !erra ? "ERRB" : | ||
81 | !errb ? "ERRA" : "ERRAB"); | ||
82 | } | ||
83 | |||
84 | dataptrs[i] = data[i]; | ||
85 | dataptrs[j] = data[j]; | ||
86 | |||
87 | return erra || errb; | ||
88 | } | ||
89 | |||
90 | int main(int argc, char *argv[]) | ||
91 | { | ||
92 | const struct raid6_calls *const *algo; | ||
93 | int i, j; | ||
94 | int err = 0; | ||
95 | |||
96 | makedata(); | ||
97 | |||
98 | for (algo = raid6_algos; *algo; algo++) { | ||
99 | if (!(*algo)->valid || (*algo)->valid()) { | ||
100 | raid6_call = **algo; | ||
101 | |||
102 | /* Nuke syndromes */ | ||
103 | memset(data[NDISKS-2], 0xee, 2*PAGE_SIZE); | ||
104 | |||
105 | /* Generate assumed good syndrome */ | ||
106 | raid6_call.gen_syndrome(NDISKS, PAGE_SIZE, | ||
107 | (void **)&dataptrs); | ||
108 | |||
109 | for (i = 0; i < NDISKS-1; i++) | ||
110 | for (j = i+1; j < NDISKS; j++) | ||
111 | err += test_disks(i, j); | ||
112 | } | ||
113 | printf("\n"); | ||
114 | } | ||
115 | |||
116 | printf("\n"); | ||
117 | /* Pick the best algorithm test */ | ||
118 | raid6_select_algo(); | ||
119 | |||
120 | if (err) | ||
121 | printf("\n*** ERRORS FOUND ***\n"); | ||
122 | |||
123 | return err; | ||
124 | } | ||
diff --git a/lib/raid6/unroll.awk b/lib/raid6/unroll.awk new file mode 100644 index 000000000000..c6aa03631df8 --- /dev/null +++ b/lib/raid6/unroll.awk | |||
@@ -0,0 +1,20 @@ | |||
1 | |||
2 | # This filter requires one command line option of form -vN=n | ||
3 | # where n must be a decimal number. | ||
4 | # | ||
5 | # Repeat each input line containing $$ n times, replacing $$ with 0...n-1. | ||
6 | # Replace each $# with n, and each $* with a single $. | ||
7 | |||
8 | BEGIN { | ||
9 | n = N + 0 | ||
10 | } | ||
11 | { | ||
12 | if (/\$\$/) { rep = n } else { rep = 1 } | ||
13 | for (i = 0; i < rep; ++i) { | ||
14 | tmp = $0 | ||
15 | gsub(/\$\$/, i, tmp) | ||
16 | gsub(/\$\#/, n, tmp) | ||
17 | gsub(/\$\*/, "$", tmp) | ||
18 | print tmp | ||
19 | } | ||
20 | } | ||
diff --git a/lib/raid6/x86.h b/lib/raid6/x86.h new file mode 100644 index 000000000000..cb2a8c91c886 --- /dev/null +++ b/lib/raid6/x86.h | |||
@@ -0,0 +1,61 @@ | |||
1 | /* ----------------------------------------------------------------------- * | ||
2 | * | ||
3 | * Copyright 2002-2004 H. Peter Anvin - All Rights Reserved | ||
4 | * | ||
5 | * This program is free software; you can redistribute it and/or modify | ||
6 | * it under the terms of the GNU General Public License as published by | ||
7 | * the Free Software Foundation, Inc., 53 Temple Place Ste 330, | ||
8 | * Boston MA 02111-1307, USA; either version 2 of the License, or | ||
9 | * (at your option) any later version; incorporated herein by reference. | ||
10 | * | ||
11 | * ----------------------------------------------------------------------- */ | ||
12 | |||
13 | /* | ||
14 | * raid6/x86.h | ||
15 | * | ||
16 | * Definitions common to x86 and x86-64 RAID-6 code only | ||
17 | */ | ||
18 | |||
19 | #ifndef LINUX_RAID_RAID6X86_H | ||
20 | #define LINUX_RAID_RAID6X86_H | ||
21 | |||
22 | #if (defined(__i386__) || defined(__x86_64__)) && !defined(__arch_um__) | ||
23 | |||
24 | #ifdef __KERNEL__ /* Real code */ | ||
25 | |||
26 | #include <asm/i387.h> | ||
27 | |||
28 | #else /* Dummy code for user space testing */ | ||
29 | |||
30 | static inline void kernel_fpu_begin(void) | ||
31 | { | ||
32 | } | ||
33 | |||
34 | static inline void kernel_fpu_end(void) | ||
35 | { | ||
36 | } | ||
37 | |||
38 | #define X86_FEATURE_MMX (0*32+23) /* Multimedia Extensions */ | ||
39 | #define X86_FEATURE_FXSR (0*32+24) /* FXSAVE and FXRSTOR instructions | ||
40 | * (fast save and restore) */ | ||
41 | #define X86_FEATURE_XMM (0*32+25) /* Streaming SIMD Extensions */ | ||
42 | #define X86_FEATURE_XMM2 (0*32+26) /* Streaming SIMD Extensions-2 */ | ||
43 | #define X86_FEATURE_MMXEXT (1*32+22) /* AMD MMX extensions */ | ||
44 | |||
45 | /* Should work well enough on modern CPUs for testing */ | ||
46 | static inline int boot_cpu_has(int flag) | ||
47 | { | ||
48 | u32 eax = (flag >> 5) ? 0x80000001 : 1; | ||
49 | u32 edx; | ||
50 | |||
51 | asm volatile("cpuid" | ||
52 | : "+a" (eax), "=d" (edx) | ||
53 | : : "ecx", "ebx"); | ||
54 | |||
55 | return (edx >> (flag & 31)) & 1; | ||
56 | } | ||
57 | |||
58 | #endif /* ndef __KERNEL__ */ | ||
59 | |||
60 | #endif | ||
61 | #endif | ||
diff --git a/lib/random32.c b/lib/random32.c index 870dc3fc0f0f..fc3545a32771 100644 --- a/lib/random32.c +++ b/lib/random32.c | |||
@@ -127,7 +127,7 @@ core_initcall(random32_init); | |||
127 | 127 | ||
128 | /* | 128 | /* |
129 | * Generate better values after random number generator | 129 | * Generate better values after random number generator |
130 | * is fully initalized. | 130 | * is fully initialized. |
131 | */ | 131 | */ |
132 | static int __init random32_reseed(void) | 132 | static int __init random32_reseed(void) |
133 | { | 133 | { |
diff --git a/lib/rwsem.c b/lib/rwsem.c index ceba8e28807a..f236d7cd5cf3 100644 --- a/lib/rwsem.c +++ b/lib/rwsem.c | |||
@@ -36,45 +36,56 @@ struct rwsem_waiter { | |||
36 | #define RWSEM_WAITING_FOR_WRITE 0x00000002 | 36 | #define RWSEM_WAITING_FOR_WRITE 0x00000002 |
37 | }; | 37 | }; |
38 | 38 | ||
39 | /* Wake types for __rwsem_do_wake(). Note that RWSEM_WAKE_NO_ACTIVE and | ||
40 | * RWSEM_WAKE_READ_OWNED imply that the spinlock must have been kept held | ||
41 | * since the rwsem value was observed. | ||
42 | */ | ||
43 | #define RWSEM_WAKE_ANY 0 /* Wake whatever's at head of wait list */ | ||
44 | #define RWSEM_WAKE_NO_ACTIVE 1 /* rwsem was observed with no active thread */ | ||
45 | #define RWSEM_WAKE_READ_OWNED 2 /* rwsem was observed to be read owned */ | ||
46 | |||
39 | /* | 47 | /* |
40 | * handle the lock release when processes blocked on it that can now run | 48 | * handle the lock release when processes blocked on it that can now run |
41 | * - if we come here from up_xxxx(), then: | 49 | * - if we come here from up_xxxx(), then: |
42 | * - the 'active part' of count (&0x0000ffff) reached 0 (but may have changed) | 50 | * - the 'active part' of count (&0x0000ffff) reached 0 (but may have changed) |
43 | * - the 'waiting part' of count (&0xffff0000) is -ve (and will still be so) | 51 | * - the 'waiting part' of count (&0xffff0000) is -ve (and will still be so) |
44 | * - there must be someone on the queue | 52 | * - there must be someone on the queue |
45 | * - the spinlock must be held by the caller | 53 | * - the spinlock must be held by the caller |
46 | * - woken process blocks are discarded from the list after having task zeroed | 54 | * - woken process blocks are discarded from the list after having task zeroed |
47 | * - writers are only woken if downgrading is false | 55 | * - writers are only woken if downgrading is false |
48 | */ | 56 | */ |
49 | static inline struct rw_semaphore * | 57 | static struct rw_semaphore * |
50 | __rwsem_do_wake(struct rw_semaphore *sem, int downgrading) | 58 | __rwsem_do_wake(struct rw_semaphore *sem, int wake_type) |
51 | { | 59 | { |
52 | struct rwsem_waiter *waiter; | 60 | struct rwsem_waiter *waiter; |
53 | struct task_struct *tsk; | 61 | struct task_struct *tsk; |
54 | struct list_head *next; | 62 | struct list_head *next; |
55 | signed long oldcount, woken, loop; | 63 | signed long oldcount, woken, loop, adjustment; |
56 | |||
57 | if (downgrading) | ||
58 | goto dont_wake_writers; | ||
59 | |||
60 | /* if we came through an up_xxxx() call, we only only wake someone up | ||
61 | * if we can transition the active part of the count from 0 -> 1 | ||
62 | */ | ||
63 | try_again: | ||
64 | oldcount = rwsem_atomic_update(RWSEM_ACTIVE_BIAS, sem) | ||
65 | - RWSEM_ACTIVE_BIAS; | ||
66 | if (oldcount & RWSEM_ACTIVE_MASK) | ||
67 | goto undo; | ||
68 | 64 | ||
69 | waiter = list_entry(sem->wait_list.next, struct rwsem_waiter, list); | 65 | waiter = list_entry(sem->wait_list.next, struct rwsem_waiter, list); |
70 | |||
71 | /* try to grant a single write lock if there's a writer at the front | ||
72 | * of the queue - note we leave the 'active part' of the count | ||
73 | * incremented by 1 and the waiting part incremented by 0x00010000 | ||
74 | */ | ||
75 | if (!(waiter->flags & RWSEM_WAITING_FOR_WRITE)) | 66 | if (!(waiter->flags & RWSEM_WAITING_FOR_WRITE)) |
76 | goto readers_only; | 67 | goto readers_only; |
77 | 68 | ||
69 | if (wake_type == RWSEM_WAKE_READ_OWNED) | ||
70 | /* Another active reader was observed, so wakeup is not | ||
71 | * likely to succeed. Save the atomic op. | ||
72 | */ | ||
73 | goto out; | ||
74 | |||
75 | /* There's a writer at the front of the queue - try to grant it the | ||
76 | * write lock. However, we only wake this writer if we can transition | ||
77 | * the active part of the count from 0 -> 1 | ||
78 | */ | ||
79 | adjustment = RWSEM_ACTIVE_WRITE_BIAS; | ||
80 | if (waiter->list.next == &sem->wait_list) | ||
81 | adjustment -= RWSEM_WAITING_BIAS; | ||
82 | |||
83 | try_again_write: | ||
84 | oldcount = rwsem_atomic_update(adjustment, sem) - adjustment; | ||
85 | if (oldcount & RWSEM_ACTIVE_MASK) | ||
86 | /* Someone grabbed the sem already */ | ||
87 | goto undo_write; | ||
88 | |||
78 | /* We must be careful not to touch 'waiter' after we set ->task = NULL. | 89 | /* We must be careful not to touch 'waiter' after we set ->task = NULL. |
79 | * It is an allocated on the waiter's stack and may become invalid at | 90 | * It is an allocated on the waiter's stack and may become invalid at |
80 | * any time after that point (due to a wakeup from another source). | 91 | * any time after that point (due to a wakeup from another source). |
@@ -87,18 +98,30 @@ __rwsem_do_wake(struct rw_semaphore *sem, int downgrading) | |||
87 | put_task_struct(tsk); | 98 | put_task_struct(tsk); |
88 | goto out; | 99 | goto out; |
89 | 100 | ||
90 | /* don't want to wake any writers */ | 101 | readers_only: |
91 | dont_wake_writers: | 102 | /* If we come here from up_xxxx(), another thread might have reached |
92 | waiter = list_entry(sem->wait_list.next, struct rwsem_waiter, list); | 103 | * rwsem_down_failed_common() before we acquired the spinlock and |
93 | if (waiter->flags & RWSEM_WAITING_FOR_WRITE) | 104 | * woken up a waiter, making it now active. We prefer to check for |
105 | * this first in order to not spend too much time with the spinlock | ||
106 | * held if we're not going to be able to wake up readers in the end. | ||
107 | * | ||
108 | * Note that we do not need to update the rwsem count: any writer | ||
109 | * trying to acquire rwsem will run rwsem_down_write_failed() due | ||
110 | * to the waiting threads and block trying to acquire the spinlock. | ||
111 | * | ||
112 | * We use a dummy atomic update in order to acquire the cache line | ||
113 | * exclusively since we expect to succeed and run the final rwsem | ||
114 | * count adjustment pretty soon. | ||
115 | */ | ||
116 | if (wake_type == RWSEM_WAKE_ANY && | ||
117 | rwsem_atomic_update(0, sem) < RWSEM_WAITING_BIAS) | ||
118 | /* Someone grabbed the sem for write already */ | ||
94 | goto out; | 119 | goto out; |
95 | 120 | ||
96 | /* grant an infinite number of read locks to the readers at the front | 121 | /* Grant an infinite number of read locks to the readers at the front |
97 | * of the queue | 122 | * of the queue. Note we increment the 'active part' of the count by |
98 | * - note we increment the 'active part' of the count by the number of | 123 | * the number of readers before waking any processes up. |
99 | * readers before waking any processes up | ||
100 | */ | 124 | */ |
101 | readers_only: | ||
102 | woken = 0; | 125 | woken = 0; |
103 | do { | 126 | do { |
104 | woken++; | 127 | woken++; |
@@ -111,16 +134,15 @@ __rwsem_do_wake(struct rw_semaphore *sem, int downgrading) | |||
111 | 134 | ||
112 | } while (waiter->flags & RWSEM_WAITING_FOR_READ); | 135 | } while (waiter->flags & RWSEM_WAITING_FOR_READ); |
113 | 136 | ||
114 | loop = woken; | 137 | adjustment = woken * RWSEM_ACTIVE_READ_BIAS; |
115 | woken *= RWSEM_ACTIVE_BIAS - RWSEM_WAITING_BIAS; | 138 | if (waiter->flags & RWSEM_WAITING_FOR_READ) |
116 | if (!downgrading) | 139 | /* hit end of list above */ |
117 | /* we'd already done one increment earlier */ | 140 | adjustment -= RWSEM_WAITING_BIAS; |
118 | woken -= RWSEM_ACTIVE_BIAS; | ||
119 | 141 | ||
120 | rwsem_atomic_add(woken, sem); | 142 | rwsem_atomic_add(adjustment, sem); |
121 | 143 | ||
122 | next = sem->wait_list.next; | 144 | next = sem->wait_list.next; |
123 | for (; loop > 0; loop--) { | 145 | for (loop = woken; loop > 0; loop--) { |
124 | waiter = list_entry(next, struct rwsem_waiter, list); | 146 | waiter = list_entry(next, struct rwsem_waiter, list); |
125 | next = waiter->list.next; | 147 | next = waiter->list.next; |
126 | tsk = waiter->task; | 148 | tsk = waiter->task; |
@@ -138,10 +160,10 @@ __rwsem_do_wake(struct rw_semaphore *sem, int downgrading) | |||
138 | 160 | ||
139 | /* undo the change to the active count, but check for a transition | 161 | /* undo the change to the active count, but check for a transition |
140 | * 1->0 */ | 162 | * 1->0 */ |
141 | undo: | 163 | undo_write: |
142 | if (rwsem_atomic_update(-RWSEM_ACTIVE_BIAS, sem) & RWSEM_ACTIVE_MASK) | 164 | if (rwsem_atomic_update(-adjustment, sem) & RWSEM_ACTIVE_MASK) |
143 | goto out; | 165 | goto out; |
144 | goto try_again; | 166 | goto try_again_write; |
145 | } | 167 | } |
146 | 168 | ||
147 | /* | 169 | /* |
@@ -149,8 +171,9 @@ __rwsem_do_wake(struct rw_semaphore *sem, int downgrading) | |||
149 | */ | 171 | */ |
150 | static struct rw_semaphore __sched * | 172 | static struct rw_semaphore __sched * |
151 | rwsem_down_failed_common(struct rw_semaphore *sem, | 173 | rwsem_down_failed_common(struct rw_semaphore *sem, |
152 | struct rwsem_waiter *waiter, signed long adjustment) | 174 | unsigned int flags, signed long adjustment) |
153 | { | 175 | { |
176 | struct rwsem_waiter waiter; | ||
154 | struct task_struct *tsk = current; | 177 | struct task_struct *tsk = current; |
155 | signed long count; | 178 | signed long count; |
156 | 179 | ||
@@ -158,23 +181,34 @@ rwsem_down_failed_common(struct rw_semaphore *sem, | |||
158 | 181 | ||
159 | /* set up my own style of waitqueue */ | 182 | /* set up my own style of waitqueue */ |
160 | spin_lock_irq(&sem->wait_lock); | 183 | spin_lock_irq(&sem->wait_lock); |
161 | waiter->task = tsk; | 184 | waiter.task = tsk; |
185 | waiter.flags = flags; | ||
162 | get_task_struct(tsk); | 186 | get_task_struct(tsk); |
163 | 187 | ||
164 | list_add_tail(&waiter->list, &sem->wait_list); | 188 | if (list_empty(&sem->wait_list)) |
189 | adjustment += RWSEM_WAITING_BIAS; | ||
190 | list_add_tail(&waiter.list, &sem->wait_list); | ||
165 | 191 | ||
166 | /* we're now waiting on the lock, but no longer actively read-locking */ | 192 | /* we're now waiting on the lock, but no longer actively locking */ |
167 | count = rwsem_atomic_update(adjustment, sem); | 193 | count = rwsem_atomic_update(adjustment, sem); |
168 | 194 | ||
169 | /* if there are no active locks, wake the front queued process(es) up */ | 195 | /* If there are no active locks, wake the front queued process(es) up. |
170 | if (!(count & RWSEM_ACTIVE_MASK)) | 196 | * |
171 | sem = __rwsem_do_wake(sem, 0); | 197 | * Alternatively, if we're called from a failed down_write(), there |
198 | * were already threads queued before us and there are no active | ||
199 | * writers, the lock must be read owned; so we try to wake any read | ||
200 | * locks that were queued ahead of us. */ | ||
201 | if (count == RWSEM_WAITING_BIAS) | ||
202 | sem = __rwsem_do_wake(sem, RWSEM_WAKE_NO_ACTIVE); | ||
203 | else if (count > RWSEM_WAITING_BIAS && | ||
204 | adjustment == -RWSEM_ACTIVE_WRITE_BIAS) | ||
205 | sem = __rwsem_do_wake(sem, RWSEM_WAKE_READ_OWNED); | ||
172 | 206 | ||
173 | spin_unlock_irq(&sem->wait_lock); | 207 | spin_unlock_irq(&sem->wait_lock); |
174 | 208 | ||
175 | /* wait to be given the lock */ | 209 | /* wait to be given the lock */ |
176 | for (;;) { | 210 | for (;;) { |
177 | if (!waiter->task) | 211 | if (!waiter.task) |
178 | break; | 212 | break; |
179 | schedule(); | 213 | schedule(); |
180 | set_task_state(tsk, TASK_UNINTERRUPTIBLE); | 214 | set_task_state(tsk, TASK_UNINTERRUPTIBLE); |
@@ -191,12 +225,8 @@ rwsem_down_failed_common(struct rw_semaphore *sem, | |||
191 | asmregparm struct rw_semaphore __sched * | 225 | asmregparm struct rw_semaphore __sched * |
192 | rwsem_down_read_failed(struct rw_semaphore *sem) | 226 | rwsem_down_read_failed(struct rw_semaphore *sem) |
193 | { | 227 | { |
194 | struct rwsem_waiter waiter; | 228 | return rwsem_down_failed_common(sem, RWSEM_WAITING_FOR_READ, |
195 | 229 | -RWSEM_ACTIVE_READ_BIAS); | |
196 | waiter.flags = RWSEM_WAITING_FOR_READ; | ||
197 | rwsem_down_failed_common(sem, &waiter, | ||
198 | RWSEM_WAITING_BIAS - RWSEM_ACTIVE_BIAS); | ||
199 | return sem; | ||
200 | } | 230 | } |
201 | 231 | ||
202 | /* | 232 | /* |
@@ -205,12 +235,8 @@ rwsem_down_read_failed(struct rw_semaphore *sem) | |||
205 | asmregparm struct rw_semaphore __sched * | 235 | asmregparm struct rw_semaphore __sched * |
206 | rwsem_down_write_failed(struct rw_semaphore *sem) | 236 | rwsem_down_write_failed(struct rw_semaphore *sem) |
207 | { | 237 | { |
208 | struct rwsem_waiter waiter; | 238 | return rwsem_down_failed_common(sem, RWSEM_WAITING_FOR_WRITE, |
209 | 239 | -RWSEM_ACTIVE_WRITE_BIAS); | |
210 | waiter.flags = RWSEM_WAITING_FOR_WRITE; | ||
211 | rwsem_down_failed_common(sem, &waiter, -RWSEM_ACTIVE_BIAS); | ||
212 | |||
213 | return sem; | ||
214 | } | 240 | } |
215 | 241 | ||
216 | /* | 242 | /* |
@@ -225,7 +251,7 @@ asmregparm struct rw_semaphore *rwsem_wake(struct rw_semaphore *sem) | |||
225 | 251 | ||
226 | /* do nothing if list empty */ | 252 | /* do nothing if list empty */ |
227 | if (!list_empty(&sem->wait_list)) | 253 | if (!list_empty(&sem->wait_list)) |
228 | sem = __rwsem_do_wake(sem, 0); | 254 | sem = __rwsem_do_wake(sem, RWSEM_WAKE_ANY); |
229 | 255 | ||
230 | spin_unlock_irqrestore(&sem->wait_lock, flags); | 256 | spin_unlock_irqrestore(&sem->wait_lock, flags); |
231 | 257 | ||
@@ -245,7 +271,7 @@ asmregparm struct rw_semaphore *rwsem_downgrade_wake(struct rw_semaphore *sem) | |||
245 | 271 | ||
246 | /* do nothing if list empty */ | 272 | /* do nothing if list empty */ |
247 | if (!list_empty(&sem->wait_list)) | 273 | if (!list_empty(&sem->wait_list)) |
248 | sem = __rwsem_do_wake(sem, 1); | 274 | sem = __rwsem_do_wake(sem, RWSEM_WAKE_READ_OWNED); |
249 | 275 | ||
250 | spin_unlock_irqrestore(&sem->wait_lock, flags); | 276 | spin_unlock_irqrestore(&sem->wait_lock, flags); |
251 | 277 | ||
diff --git a/lib/scatterlist.c b/lib/scatterlist.c index 9afa25b52a83..a5ec42868f99 100644 --- a/lib/scatterlist.c +++ b/lib/scatterlist.c | |||
@@ -10,6 +10,7 @@ | |||
10 | #include <linux/slab.h> | 10 | #include <linux/slab.h> |
11 | #include <linux/scatterlist.h> | 11 | #include <linux/scatterlist.h> |
12 | #include <linux/highmem.h> | 12 | #include <linux/highmem.h> |
13 | #include <linux/kmemleak.h> | ||
13 | 14 | ||
14 | /** | 15 | /** |
15 | * sg_next - return the next scatterlist entry in a list | 16 | * sg_next - return the next scatterlist entry in a list |
@@ -115,17 +116,29 @@ EXPORT_SYMBOL(sg_init_one); | |||
115 | */ | 116 | */ |
116 | static struct scatterlist *sg_kmalloc(unsigned int nents, gfp_t gfp_mask) | 117 | static struct scatterlist *sg_kmalloc(unsigned int nents, gfp_t gfp_mask) |
117 | { | 118 | { |
118 | if (nents == SG_MAX_SINGLE_ALLOC) | 119 | if (nents == SG_MAX_SINGLE_ALLOC) { |
119 | return (struct scatterlist *) __get_free_page(gfp_mask); | 120 | /* |
120 | else | 121 | * Kmemleak doesn't track page allocations as they are not |
122 | * commonly used (in a raw form) for kernel data structures. | ||
123 | * As we chain together a list of pages and then a normal | ||
124 | * kmalloc (tracked by kmemleak), in order to for that last | ||
125 | * allocation not to become decoupled (and thus a | ||
126 | * false-positive) we need to inform kmemleak of all the | ||
127 | * intermediate allocations. | ||
128 | */ | ||
129 | void *ptr = (void *) __get_free_page(gfp_mask); | ||
130 | kmemleak_alloc(ptr, PAGE_SIZE, 1, gfp_mask); | ||
131 | return ptr; | ||
132 | } else | ||
121 | return kmalloc(nents * sizeof(struct scatterlist), gfp_mask); | 133 | return kmalloc(nents * sizeof(struct scatterlist), gfp_mask); |
122 | } | 134 | } |
123 | 135 | ||
124 | static void sg_kfree(struct scatterlist *sg, unsigned int nents) | 136 | static void sg_kfree(struct scatterlist *sg, unsigned int nents) |
125 | { | 137 | { |
126 | if (nents == SG_MAX_SINGLE_ALLOC) | 138 | if (nents == SG_MAX_SINGLE_ALLOC) { |
139 | kmemleak_free(sg); | ||
127 | free_page((unsigned long) sg); | 140 | free_page((unsigned long) sg); |
128 | else | 141 | } else |
129 | kfree(sg); | 142 | kfree(sg); |
130 | } | 143 | } |
131 | 144 | ||
diff --git a/lib/swiotlb.c b/lib/swiotlb.c index a009055140ec..34e3082632d8 100644 --- a/lib/swiotlb.c +++ b/lib/swiotlb.c | |||
@@ -50,19 +50,11 @@ | |||
50 | */ | 50 | */ |
51 | #define IO_TLB_MIN_SLABS ((1<<20) >> IO_TLB_SHIFT) | 51 | #define IO_TLB_MIN_SLABS ((1<<20) >> IO_TLB_SHIFT) |
52 | 52 | ||
53 | /* | ||
54 | * Enumeration for sync targets | ||
55 | */ | ||
56 | enum dma_sync_target { | ||
57 | SYNC_FOR_CPU = 0, | ||
58 | SYNC_FOR_DEVICE = 1, | ||
59 | }; | ||
60 | |||
61 | int swiotlb_force; | 53 | int swiotlb_force; |
62 | 54 | ||
63 | /* | 55 | /* |
64 | * Used to do a quick range check in unmap_single and | 56 | * Used to do a quick range check in swiotlb_tbl_unmap_single and |
65 | * sync_single_*, to see if the memory was in fact allocated by this | 57 | * swiotlb_tbl_sync_single_*, to see if the memory was in fact allocated by this |
66 | * API. | 58 | * API. |
67 | */ | 59 | */ |
68 | static char *io_tlb_start, *io_tlb_end; | 60 | static char *io_tlb_start, *io_tlb_end; |
@@ -140,28 +132,14 @@ void swiotlb_print_info(void) | |||
140 | (unsigned long long)pend); | 132 | (unsigned long long)pend); |
141 | } | 133 | } |
142 | 134 | ||
143 | /* | 135 | void __init swiotlb_init_with_tbl(char *tlb, unsigned long nslabs, int verbose) |
144 | * Statically reserve bounce buffer space and initialize bounce buffer data | ||
145 | * structures for the software IO TLB used to implement the DMA API. | ||
146 | */ | ||
147 | void __init | ||
148 | swiotlb_init_with_default_size(size_t default_size, int verbose) | ||
149 | { | 136 | { |
150 | unsigned long i, bytes; | 137 | unsigned long i, bytes; |
151 | 138 | ||
152 | if (!io_tlb_nslabs) { | 139 | bytes = nslabs << IO_TLB_SHIFT; |
153 | io_tlb_nslabs = (default_size >> IO_TLB_SHIFT); | ||
154 | io_tlb_nslabs = ALIGN(io_tlb_nslabs, IO_TLB_SEGSIZE); | ||
155 | } | ||
156 | |||
157 | bytes = io_tlb_nslabs << IO_TLB_SHIFT; | ||
158 | 140 | ||
159 | /* | 141 | io_tlb_nslabs = nslabs; |
160 | * Get IO TLB memory from the low pages | 142 | io_tlb_start = tlb; |
161 | */ | ||
162 | io_tlb_start = alloc_bootmem_low_pages(bytes); | ||
163 | if (!io_tlb_start) | ||
164 | panic("Cannot allocate SWIOTLB buffer"); | ||
165 | io_tlb_end = io_tlb_start + bytes; | 143 | io_tlb_end = io_tlb_start + bytes; |
166 | 144 | ||
167 | /* | 145 | /* |
@@ -185,6 +163,32 @@ swiotlb_init_with_default_size(size_t default_size, int verbose) | |||
185 | swiotlb_print_info(); | 163 | swiotlb_print_info(); |
186 | } | 164 | } |
187 | 165 | ||
166 | /* | ||
167 | * Statically reserve bounce buffer space and initialize bounce buffer data | ||
168 | * structures for the software IO TLB used to implement the DMA API. | ||
169 | */ | ||
170 | void __init | ||
171 | swiotlb_init_with_default_size(size_t default_size, int verbose) | ||
172 | { | ||
173 | unsigned long bytes; | ||
174 | |||
175 | if (!io_tlb_nslabs) { | ||
176 | io_tlb_nslabs = (default_size >> IO_TLB_SHIFT); | ||
177 | io_tlb_nslabs = ALIGN(io_tlb_nslabs, IO_TLB_SEGSIZE); | ||
178 | } | ||
179 | |||
180 | bytes = io_tlb_nslabs << IO_TLB_SHIFT; | ||
181 | |||
182 | /* | ||
183 | * Get IO TLB memory from the low pages | ||
184 | */ | ||
185 | io_tlb_start = alloc_bootmem_low_pages(bytes); | ||
186 | if (!io_tlb_start) | ||
187 | panic("Cannot allocate SWIOTLB buffer"); | ||
188 | |||
189 | swiotlb_init_with_tbl(io_tlb_start, io_tlb_nslabs, verbose); | ||
190 | } | ||
191 | |||
188 | void __init | 192 | void __init |
189 | swiotlb_init(int verbose) | 193 | swiotlb_init(int verbose) |
190 | { | 194 | { |
@@ -323,8 +327,8 @@ static int is_swiotlb_buffer(phys_addr_t paddr) | |||
323 | /* | 327 | /* |
324 | * Bounce: copy the swiotlb buffer back to the original dma location | 328 | * Bounce: copy the swiotlb buffer back to the original dma location |
325 | */ | 329 | */ |
326 | static void swiotlb_bounce(phys_addr_t phys, char *dma_addr, size_t size, | 330 | void swiotlb_bounce(phys_addr_t phys, char *dma_addr, size_t size, |
327 | enum dma_data_direction dir) | 331 | enum dma_data_direction dir) |
328 | { | 332 | { |
329 | unsigned long pfn = PFN_DOWN(phys); | 333 | unsigned long pfn = PFN_DOWN(phys); |
330 | 334 | ||
@@ -360,26 +364,25 @@ static void swiotlb_bounce(phys_addr_t phys, char *dma_addr, size_t size, | |||
360 | memcpy(phys_to_virt(phys), dma_addr, size); | 364 | memcpy(phys_to_virt(phys), dma_addr, size); |
361 | } | 365 | } |
362 | } | 366 | } |
367 | EXPORT_SYMBOL_GPL(swiotlb_bounce); | ||
363 | 368 | ||
364 | /* | 369 | void *swiotlb_tbl_map_single(struct device *hwdev, dma_addr_t tbl_dma_addr, |
365 | * Allocates bounce buffer and returns its kernel virtual address. | 370 | phys_addr_t phys, size_t size, |
366 | */ | 371 | enum dma_data_direction dir) |
367 | static void * | ||
368 | map_single(struct device *hwdev, phys_addr_t phys, size_t size, int dir) | ||
369 | { | 372 | { |
370 | unsigned long flags; | 373 | unsigned long flags; |
371 | char *dma_addr; | 374 | char *dma_addr; |
372 | unsigned int nslots, stride, index, wrap; | 375 | unsigned int nslots, stride, index, wrap; |
373 | int i; | 376 | int i; |
374 | unsigned long start_dma_addr; | ||
375 | unsigned long mask; | 377 | unsigned long mask; |
376 | unsigned long offset_slots; | 378 | unsigned long offset_slots; |
377 | unsigned long max_slots; | 379 | unsigned long max_slots; |
378 | 380 | ||
379 | mask = dma_get_seg_boundary(hwdev); | 381 | mask = dma_get_seg_boundary(hwdev); |
380 | start_dma_addr = swiotlb_virt_to_bus(hwdev, io_tlb_start) & mask; | ||
381 | 382 | ||
382 | offset_slots = ALIGN(start_dma_addr, 1 << IO_TLB_SHIFT) >> IO_TLB_SHIFT; | 383 | tbl_dma_addr &= mask; |
384 | |||
385 | offset_slots = ALIGN(tbl_dma_addr, 1 << IO_TLB_SHIFT) >> IO_TLB_SHIFT; | ||
383 | 386 | ||
384 | /* | 387 | /* |
385 | * Carefully handle integer overflow which can occur when mask == ~0UL. | 388 | * Carefully handle integer overflow which can occur when mask == ~0UL. |
@@ -466,12 +469,27 @@ found: | |||
466 | 469 | ||
467 | return dma_addr; | 470 | return dma_addr; |
468 | } | 471 | } |
472 | EXPORT_SYMBOL_GPL(swiotlb_tbl_map_single); | ||
473 | |||
474 | /* | ||
475 | * Allocates bounce buffer and returns its kernel virtual address. | ||
476 | */ | ||
477 | |||
478 | static void * | ||
479 | map_single(struct device *hwdev, phys_addr_t phys, size_t size, | ||
480 | enum dma_data_direction dir) | ||
481 | { | ||
482 | dma_addr_t start_dma_addr = swiotlb_virt_to_bus(hwdev, io_tlb_start); | ||
483 | |||
484 | return swiotlb_tbl_map_single(hwdev, start_dma_addr, phys, size, dir); | ||
485 | } | ||
469 | 486 | ||
470 | /* | 487 | /* |
471 | * dma_addr is the kernel virtual address of the bounce buffer to unmap. | 488 | * dma_addr is the kernel virtual address of the bounce buffer to unmap. |
472 | */ | 489 | */ |
473 | static void | 490 | void |
474 | do_unmap_single(struct device *hwdev, char *dma_addr, size_t size, int dir) | 491 | swiotlb_tbl_unmap_single(struct device *hwdev, char *dma_addr, size_t size, |
492 | enum dma_data_direction dir) | ||
475 | { | 493 | { |
476 | unsigned long flags; | 494 | unsigned long flags; |
477 | int i, count, nslots = ALIGN(size, 1 << IO_TLB_SHIFT) >> IO_TLB_SHIFT; | 495 | int i, count, nslots = ALIGN(size, 1 << IO_TLB_SHIFT) >> IO_TLB_SHIFT; |
@@ -509,10 +527,12 @@ do_unmap_single(struct device *hwdev, char *dma_addr, size_t size, int dir) | |||
509 | } | 527 | } |
510 | spin_unlock_irqrestore(&io_tlb_lock, flags); | 528 | spin_unlock_irqrestore(&io_tlb_lock, flags); |
511 | } | 529 | } |
530 | EXPORT_SYMBOL_GPL(swiotlb_tbl_unmap_single); | ||
512 | 531 | ||
513 | static void | 532 | void |
514 | sync_single(struct device *hwdev, char *dma_addr, size_t size, | 533 | swiotlb_tbl_sync_single(struct device *hwdev, char *dma_addr, size_t size, |
515 | int dir, int target) | 534 | enum dma_data_direction dir, |
535 | enum dma_sync_target target) | ||
516 | { | 536 | { |
517 | int index = (dma_addr - io_tlb_start) >> IO_TLB_SHIFT; | 537 | int index = (dma_addr - io_tlb_start) >> IO_TLB_SHIFT; |
518 | phys_addr_t phys = io_tlb_orig_addr[index]; | 538 | phys_addr_t phys = io_tlb_orig_addr[index]; |
@@ -536,6 +556,7 @@ sync_single(struct device *hwdev, char *dma_addr, size_t size, | |||
536 | BUG(); | 556 | BUG(); |
537 | } | 557 | } |
538 | } | 558 | } |
559 | EXPORT_SYMBOL_GPL(swiotlb_tbl_sync_single); | ||
539 | 560 | ||
540 | void * | 561 | void * |
541 | swiotlb_alloc_coherent(struct device *hwdev, size_t size, | 562 | swiotlb_alloc_coherent(struct device *hwdev, size_t size, |
@@ -559,8 +580,8 @@ swiotlb_alloc_coherent(struct device *hwdev, size_t size, | |||
559 | } | 580 | } |
560 | if (!ret) { | 581 | if (!ret) { |
561 | /* | 582 | /* |
562 | * We are either out of memory or the device can't DMA | 583 | * We are either out of memory or the device can't DMA to |
563 | * to GFP_DMA memory; fall back on map_single(), which | 584 | * GFP_DMA memory; fall back on map_single(), which |
564 | * will grab memory from the lowest available address range. | 585 | * will grab memory from the lowest available address range. |
565 | */ | 586 | */ |
566 | ret = map_single(hwdev, 0, size, DMA_FROM_DEVICE); | 587 | ret = map_single(hwdev, 0, size, DMA_FROM_DEVICE); |
@@ -578,7 +599,7 @@ swiotlb_alloc_coherent(struct device *hwdev, size_t size, | |||
578 | (unsigned long long)dev_addr); | 599 | (unsigned long long)dev_addr); |
579 | 600 | ||
580 | /* DMA_TO_DEVICE to avoid memcpy in unmap_single */ | 601 | /* DMA_TO_DEVICE to avoid memcpy in unmap_single */ |
581 | do_unmap_single(hwdev, ret, size, DMA_TO_DEVICE); | 602 | swiotlb_tbl_unmap_single(hwdev, ret, size, DMA_TO_DEVICE); |
582 | return NULL; | 603 | return NULL; |
583 | } | 604 | } |
584 | *dma_handle = dev_addr; | 605 | *dma_handle = dev_addr; |
@@ -596,13 +617,14 @@ swiotlb_free_coherent(struct device *hwdev, size_t size, void *vaddr, | |||
596 | if (!is_swiotlb_buffer(paddr)) | 617 | if (!is_swiotlb_buffer(paddr)) |
597 | free_pages((unsigned long)vaddr, get_order(size)); | 618 | free_pages((unsigned long)vaddr, get_order(size)); |
598 | else | 619 | else |
599 | /* DMA_TO_DEVICE to avoid memcpy in unmap_single */ | 620 | /* DMA_TO_DEVICE to avoid memcpy in swiotlb_tbl_unmap_single */ |
600 | do_unmap_single(hwdev, vaddr, size, DMA_TO_DEVICE); | 621 | swiotlb_tbl_unmap_single(hwdev, vaddr, size, DMA_TO_DEVICE); |
601 | } | 622 | } |
602 | EXPORT_SYMBOL(swiotlb_free_coherent); | 623 | EXPORT_SYMBOL(swiotlb_free_coherent); |
603 | 624 | ||
604 | static void | 625 | static void |
605 | swiotlb_full(struct device *dev, size_t size, int dir, int do_panic) | 626 | swiotlb_full(struct device *dev, size_t size, enum dma_data_direction dir, |
627 | int do_panic) | ||
606 | { | 628 | { |
607 | /* | 629 | /* |
608 | * Ran out of IOMMU space for this operation. This is very bad. | 630 | * Ran out of IOMMU space for this operation. This is very bad. |
@@ -680,14 +702,14 @@ EXPORT_SYMBOL_GPL(swiotlb_map_page); | |||
680 | * whatever the device wrote there. | 702 | * whatever the device wrote there. |
681 | */ | 703 | */ |
682 | static void unmap_single(struct device *hwdev, dma_addr_t dev_addr, | 704 | static void unmap_single(struct device *hwdev, dma_addr_t dev_addr, |
683 | size_t size, int dir) | 705 | size_t size, enum dma_data_direction dir) |
684 | { | 706 | { |
685 | phys_addr_t paddr = dma_to_phys(hwdev, dev_addr); | 707 | phys_addr_t paddr = dma_to_phys(hwdev, dev_addr); |
686 | 708 | ||
687 | BUG_ON(dir == DMA_NONE); | 709 | BUG_ON(dir == DMA_NONE); |
688 | 710 | ||
689 | if (is_swiotlb_buffer(paddr)) { | 711 | if (is_swiotlb_buffer(paddr)) { |
690 | do_unmap_single(hwdev, phys_to_virt(paddr), size, dir); | 712 | swiotlb_tbl_unmap_single(hwdev, phys_to_virt(paddr), size, dir); |
691 | return; | 713 | return; |
692 | } | 714 | } |
693 | 715 | ||
@@ -723,14 +745,16 @@ EXPORT_SYMBOL_GPL(swiotlb_unmap_page); | |||
723 | */ | 745 | */ |
724 | static void | 746 | static void |
725 | swiotlb_sync_single(struct device *hwdev, dma_addr_t dev_addr, | 747 | swiotlb_sync_single(struct device *hwdev, dma_addr_t dev_addr, |
726 | size_t size, int dir, int target) | 748 | size_t size, enum dma_data_direction dir, |
749 | enum dma_sync_target target) | ||
727 | { | 750 | { |
728 | phys_addr_t paddr = dma_to_phys(hwdev, dev_addr); | 751 | phys_addr_t paddr = dma_to_phys(hwdev, dev_addr); |
729 | 752 | ||
730 | BUG_ON(dir == DMA_NONE); | 753 | BUG_ON(dir == DMA_NONE); |
731 | 754 | ||
732 | if (is_swiotlb_buffer(paddr)) { | 755 | if (is_swiotlb_buffer(paddr)) { |
733 | sync_single(hwdev, phys_to_virt(paddr), size, dir, target); | 756 | swiotlb_tbl_sync_single(hwdev, phys_to_virt(paddr), size, dir, |
757 | target); | ||
734 | return; | 758 | return; |
735 | } | 759 | } |
736 | 760 | ||
@@ -809,7 +833,7 @@ EXPORT_SYMBOL(swiotlb_map_sg_attrs); | |||
809 | 833 | ||
810 | int | 834 | int |
811 | swiotlb_map_sg(struct device *hwdev, struct scatterlist *sgl, int nelems, | 835 | swiotlb_map_sg(struct device *hwdev, struct scatterlist *sgl, int nelems, |
812 | int dir) | 836 | enum dma_data_direction dir) |
813 | { | 837 | { |
814 | return swiotlb_map_sg_attrs(hwdev, sgl, nelems, dir, NULL); | 838 | return swiotlb_map_sg_attrs(hwdev, sgl, nelems, dir, NULL); |
815 | } | 839 | } |
@@ -836,7 +860,7 @@ EXPORT_SYMBOL(swiotlb_unmap_sg_attrs); | |||
836 | 860 | ||
837 | void | 861 | void |
838 | swiotlb_unmap_sg(struct device *hwdev, struct scatterlist *sgl, int nelems, | 862 | swiotlb_unmap_sg(struct device *hwdev, struct scatterlist *sgl, int nelems, |
839 | int dir) | 863 | enum dma_data_direction dir) |
840 | { | 864 | { |
841 | return swiotlb_unmap_sg_attrs(hwdev, sgl, nelems, dir, NULL); | 865 | return swiotlb_unmap_sg_attrs(hwdev, sgl, nelems, dir, NULL); |
842 | } | 866 | } |
@@ -851,7 +875,8 @@ EXPORT_SYMBOL(swiotlb_unmap_sg); | |||
851 | */ | 875 | */ |
852 | static void | 876 | static void |
853 | swiotlb_sync_sg(struct device *hwdev, struct scatterlist *sgl, | 877 | swiotlb_sync_sg(struct device *hwdev, struct scatterlist *sgl, |
854 | int nelems, int dir, int target) | 878 | int nelems, enum dma_data_direction dir, |
879 | enum dma_sync_target target) | ||
855 | { | 880 | { |
856 | struct scatterlist *sg; | 881 | struct scatterlist *sg; |
857 | int i; | 882 | int i; |
diff --git a/lib/vsprintf.c b/lib/vsprintf.c index b8a2f549ab0e..7af9d841c43b 100644 --- a/lib/vsprintf.c +++ b/lib/vsprintf.c | |||
@@ -146,19 +146,16 @@ int strict_strtoul(const char *cp, unsigned int base, unsigned long *res) | |||
146 | { | 146 | { |
147 | char *tail; | 147 | char *tail; |
148 | unsigned long val; | 148 | unsigned long val; |
149 | size_t len; | ||
150 | 149 | ||
151 | *res = 0; | 150 | *res = 0; |
152 | len = strlen(cp); | 151 | if (!*cp) |
153 | if (len == 0) | ||
154 | return -EINVAL; | 152 | return -EINVAL; |
155 | 153 | ||
156 | val = simple_strtoul(cp, &tail, base); | 154 | val = simple_strtoul(cp, &tail, base); |
157 | if (tail == cp) | 155 | if (tail == cp) |
158 | return -EINVAL; | 156 | return -EINVAL; |
159 | 157 | ||
160 | if ((*tail == '\0') || | 158 | if ((tail[0] == '\0') || (tail[0] == '\n' && tail[1] == '\0')) { |
161 | ((len == (size_t)(tail - cp) + 1) && (*tail == '\n'))) { | ||
162 | *res = val; | 159 | *res = val; |
163 | return 0; | 160 | return 0; |
164 | } | 161 | } |
@@ -220,18 +217,15 @@ int strict_strtoull(const char *cp, unsigned int base, unsigned long long *res) | |||
220 | { | 217 | { |
221 | char *tail; | 218 | char *tail; |
222 | unsigned long long val; | 219 | unsigned long long val; |
223 | size_t len; | ||
224 | 220 | ||
225 | *res = 0; | 221 | *res = 0; |
226 | len = strlen(cp); | 222 | if (!*cp) |
227 | if (len == 0) | ||
228 | return -EINVAL; | 223 | return -EINVAL; |
229 | 224 | ||
230 | val = simple_strtoull(cp, &tail, base); | 225 | val = simple_strtoull(cp, &tail, base); |
231 | if (tail == cp) | 226 | if (tail == cp) |
232 | return -EINVAL; | 227 | return -EINVAL; |
233 | if ((*tail == '\0') || | 228 | if ((tail[0] == '\0') || (tail[0] == '\n' && tail[1] == '\0')) { |
234 | ((len == (size_t)(tail - cp) + 1) && (*tail == '\n'))) { | ||
235 | *res = val; | 229 | *res = val; |
236 | return 0; | 230 | return 0; |
237 | } | 231 | } |
@@ -980,6 +974,11 @@ char *uuid_string(char *buf, char *end, const u8 *addr, | |||
980 | * [0][1][2][3]-[4][5]-[6][7]-[8][9]-[10][11][12][13][14][15] | 974 | * [0][1][2][3]-[4][5]-[6][7]-[8][9]-[10][11][12][13][14][15] |
981 | * little endian output byte order is: | 975 | * little endian output byte order is: |
982 | * [3][2][1][0]-[5][4]-[7][6]-[8][9]-[10][11][12][13][14][15] | 976 | * [3][2][1][0]-[5][4]-[7][6]-[8][9]-[10][11][12][13][14][15] |
977 | * - 'V' For a struct va_format which contains a format string * and va_list *, | ||
978 | * call vsnprintf(->format, *->va_list). | ||
979 | * Implements a "recursive vsnprintf". | ||
980 | * Do not use this feature without some mechanism to verify the | ||
981 | * correctness of the format string and va_list arguments. | ||
983 | * | 982 | * |
984 | * Note: The difference between 'S' and 'F' is that on ia64 and ppc64 | 983 | * Note: The difference between 'S' and 'F' is that on ia64 and ppc64 |
985 | * function pointers are really function descriptors, which contain a | 984 | * function pointers are really function descriptors, which contain a |
@@ -1025,6 +1024,10 @@ char *pointer(const char *fmt, char *buf, char *end, void *ptr, | |||
1025 | break; | 1024 | break; |
1026 | case 'U': | 1025 | case 'U': |
1027 | return uuid_string(buf, end, ptr, spec, fmt); | 1026 | return uuid_string(buf, end, ptr, spec, fmt); |
1027 | case 'V': | ||
1028 | return buf + vsnprintf(buf, end - buf, | ||
1029 | ((struct va_format *)ptr)->fmt, | ||
1030 | *(((struct va_format *)ptr)->va)); | ||
1028 | } | 1031 | } |
1029 | spec.flags |= SMALL; | 1032 | spec.flags |= SMALL; |
1030 | if (spec.field_width == -1) { | 1033 | if (spec.field_width == -1) { |