diff options
Diffstat (limited to 'lib')
147 files changed, 10969 insertions, 2619 deletions
diff --git a/lib/.gitignore b/lib/.gitignore index 3bef1ea94c99..09aae85418ab 100644 --- a/lib/.gitignore +++ b/lib/.gitignore | |||
@@ -3,4 +3,4 @@ | |||
3 | # | 3 | # |
4 | gen_crc32table | 4 | gen_crc32table |
5 | crc32table.h | 5 | crc32table.h |
6 | 6 | oid_registry_data.c | |
diff --git a/lib/Kconfig b/lib/Kconfig index 6c695ff9caba..4b31a46fb307 100644 --- a/lib/Kconfig +++ b/lib/Kconfig | |||
@@ -16,9 +16,32 @@ config BITREVERSE | |||
16 | config RATIONAL | 16 | config RATIONAL |
17 | boolean | 17 | boolean |
18 | 18 | ||
19 | config GENERIC_STRNCPY_FROM_USER | ||
20 | bool | ||
21 | |||
22 | config GENERIC_STRNLEN_USER | ||
23 | bool | ||
24 | |||
19 | config GENERIC_FIND_FIRST_BIT | 25 | config GENERIC_FIND_FIRST_BIT |
20 | bool | 26 | bool |
21 | 27 | ||
28 | config NO_GENERIC_PCI_IOPORT_MAP | ||
29 | bool | ||
30 | |||
31 | config GENERIC_PCI_IOMAP | ||
32 | bool | ||
33 | |||
34 | config GENERIC_IOMAP | ||
35 | bool | ||
36 | select GENERIC_PCI_IOMAP | ||
37 | |||
38 | config GENERIC_IO | ||
39 | boolean | ||
40 | default n | ||
41 | |||
42 | config STMP_DEVICE | ||
43 | bool | ||
44 | |||
22 | config CRC_CCITT | 45 | config CRC_CCITT |
23 | tristate "CRC-CCITT functions" | 46 | tristate "CRC-CCITT functions" |
24 | help | 47 | help |
@@ -51,14 +74,71 @@ config CRC_ITU_T | |||
51 | functions require M here. | 74 | functions require M here. |
52 | 75 | ||
53 | config CRC32 | 76 | config CRC32 |
54 | tristate "CRC32 functions" | 77 | tristate "CRC32/CRC32c functions" |
55 | default y | 78 | default y |
56 | select BITREVERSE | 79 | select BITREVERSE |
57 | help | 80 | help |
58 | This option is provided for the case where no in-kernel-tree | 81 | This option is provided for the case where no in-kernel-tree |
59 | modules require CRC32 functions, but a module built outside the | 82 | modules require CRC32/CRC32c functions, but a module built outside |
60 | kernel tree does. Such modules that use library CRC32 functions | 83 | the kernel tree does. Such modules that use library CRC32/CRC32c |
61 | require M here. | 84 | functions require M here. |
85 | |||
86 | config CRC32_SELFTEST | ||
87 | bool "CRC32 perform self test on init" | ||
88 | default n | ||
89 | depends on CRC32 | ||
90 | help | ||
91 | This option enables the CRC32 library functions to perform a | ||
92 | self test on initialization. The self test computes crc32_le | ||
93 | and crc32_be over byte strings with random alignment and length | ||
94 | and computes the total elapsed time and number of bytes processed. | ||
95 | |||
96 | choice | ||
97 | prompt "CRC32 implementation" | ||
98 | depends on CRC32 | ||
99 | default CRC32_SLICEBY8 | ||
100 | help | ||
101 | This option allows a kernel builder to override the default choice | ||
102 | of CRC32 algorithm. Choose the default ("slice by 8") unless you | ||
103 | know that you need one of the others. | ||
104 | |||
105 | config CRC32_SLICEBY8 | ||
106 | bool "Slice by 8 bytes" | ||
107 | help | ||
108 | Calculate checksum 8 bytes at a time with a clever slicing algorithm. | ||
109 | This is the fastest algorithm, but comes with a 8KiB lookup table. | ||
110 | Most modern processors have enough cache to hold this table without | ||
111 | thrashing the cache. | ||
112 | |||
113 | This is the default implementation choice. Choose this one unless | ||
114 | you have a good reason not to. | ||
115 | |||
116 | config CRC32_SLICEBY4 | ||
117 | bool "Slice by 4 bytes" | ||
118 | help | ||
119 | Calculate checksum 4 bytes at a time with a clever slicing algorithm. | ||
120 | This is a bit slower than slice by 8, but has a smaller 4KiB lookup | ||
121 | table. | ||
122 | |||
123 | Only choose this option if you know what you are doing. | ||
124 | |||
125 | config CRC32_SARWATE | ||
126 | bool "Sarwate's Algorithm (one byte at a time)" | ||
127 | help | ||
128 | Calculate checksum a byte at a time using Sarwate's algorithm. This | ||
129 | is not particularly fast, but has a small 256 byte lookup table. | ||
130 | |||
131 | Only choose this option if you know what you are doing. | ||
132 | |||
133 | config CRC32_BIT | ||
134 | bool "Classic Algorithm (one bit at a time)" | ||
135 | help | ||
136 | Calculate checksum one bit at a time. This is VERY slow, but has | ||
137 | no lookup table. This is provided as a debugging option. | ||
138 | |||
139 | Only choose this option if you are debugging crc32. | ||
140 | |||
141 | endchoice | ||
62 | 142 | ||
63 | config CRC7 | 143 | config CRC7 |
64 | tristate "CRC7 functions" | 144 | tristate "CRC7 functions" |
@@ -214,6 +294,7 @@ config BTREE | |||
214 | config HAS_IOMEM | 294 | config HAS_IOMEM |
215 | boolean | 295 | boolean |
216 | depends on !NO_IOMEM | 296 | depends on !NO_IOMEM |
297 | select GENERIC_IO | ||
217 | default y | 298 | default y |
218 | 299 | ||
219 | config HAS_IOPORT | 300 | config HAS_IOPORT |
@@ -244,6 +325,9 @@ config CPU_RMAP | |||
244 | bool | 325 | bool |
245 | depends on SMP | 326 | depends on SMP |
246 | 327 | ||
328 | config DQL | ||
329 | bool | ||
330 | |||
247 | # | 331 | # |
248 | # Netlink attribute parsing support is select'ed if needed | 332 | # Netlink attribute parsing support is select'ed if needed |
249 | # | 333 | # |
@@ -256,6 +340,9 @@ config NLATTR | |||
256 | config GENERIC_ATOMIC64 | 340 | config GENERIC_ATOMIC64 |
257 | bool | 341 | bool |
258 | 342 | ||
343 | config ARCH_HAS_ATOMIC64_DEC_IF_POSITIVE | ||
344 | def_bool y if GENERIC_ATOMIC64 | ||
345 | |||
259 | config LRU_CACHE | 346 | config LRU_CACHE |
260 | tristate | 347 | tristate |
261 | 348 | ||
@@ -269,14 +356,49 @@ config AVERAGE | |||
269 | 356 | ||
270 | If unsure, say N. | 357 | If unsure, say N. |
271 | 358 | ||
359 | config CLZ_TAB | ||
360 | bool | ||
361 | |||
272 | config CORDIC | 362 | config CORDIC |
273 | tristate "Cordic function" | 363 | tristate "CORDIC algorithm" |
364 | help | ||
365 | This option provides an implementation of the CORDIC algorithm; | ||
366 | calculations are in fixed point. Module will be called cordic. | ||
367 | |||
368 | config DDR | ||
369 | bool "JEDEC DDR data" | ||
370 | help | ||
371 | Data from JEDEC specs for DDR SDRAM memories, | ||
372 | particularly the AC timing parameters and addressing | ||
373 | information. This data is useful for drivers handling | ||
374 | DDR SDRAM controllers. | ||
375 | |||
376 | config MPILIB | ||
377 | tristate | ||
378 | select CLZ_TAB | ||
379 | help | ||
380 | Multiprecision maths library from GnuPG. | ||
381 | It is used to implement RSA digital signature verification, | ||
382 | which is used by IMA/EVM digital signature extension. | ||
383 | |||
384 | config SIGNATURE | ||
385 | tristate | ||
386 | depends on KEYS && CRYPTO | ||
387 | select CRYPTO_SHA1 | ||
388 | select MPILIB | ||
274 | help | 389 | help |
275 | The option provides arithmetic function using cordic algorithm | 390 | Digital signature verification. Currently only RSA is supported. |
276 | so its calculations are in fixed point. Modules can select this | 391 | Implementation is done using GnuPG MPI library |
277 | when they require this function. Module will be called cordic. | ||
278 | 392 | ||
279 | config LLIST | 393 | # |
394 | # libfdt files, only selected if needed. | ||
395 | # | ||
396 | config LIBFDT | ||
280 | bool | 397 | bool |
281 | 398 | ||
399 | config OID_REGISTRY | ||
400 | tristate | ||
401 | help | ||
402 | Enable fast lookup object identifier registry. | ||
403 | |||
282 | endmenu | 404 | endmenu |
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug index c0cb9c4bc46d..28e9d6c98941 100644 --- a/lib/Kconfig.debug +++ b/lib/Kconfig.debug | |||
@@ -3,12 +3,16 @@ config PRINTK_TIME | |||
3 | bool "Show timing information on printks" | 3 | bool "Show timing information on printks" |
4 | depends on PRINTK | 4 | depends on PRINTK |
5 | help | 5 | help |
6 | Selecting this option causes timing information to be | 6 | Selecting this option causes time stamps of the printk() |
7 | included in printk output. This allows you to measure | 7 | messages to be added to the output of the syslog() system |
8 | the interval between kernel operations, including bootup | 8 | call and at the console. |
9 | operations. This is useful for identifying long delays | 9 | |
10 | in kernel startup. Or add printk.time=1 at boot-time. | 10 | The timestamp is always recorded internally, and exported |
11 | See Documentation/kernel-parameters.txt | 11 | to /dev/kmsg. This flag just specifies if the timestamp should |
12 | be included, not that the timestamp is recorded. | ||
13 | |||
14 | The behavior is also controlled by the kernel command line | ||
15 | parameter printk.time=1. See Documentation/kernel-parameters.txt | ||
12 | 16 | ||
13 | config DEFAULT_MESSAGE_LOGLEVEL | 17 | config DEFAULT_MESSAGE_LOGLEVEL |
14 | int "Default message log level (1-7)" | 18 | int "Default message log level (1-7)" |
@@ -70,6 +74,15 @@ config STRIP_ASM_SYMS | |||
70 | that look like '.Lxxx') so they don't pollute the output of | 74 | that look like '.Lxxx') so they don't pollute the output of |
71 | get_wchan() and suchlike. | 75 | get_wchan() and suchlike. |
72 | 76 | ||
77 | config READABLE_ASM | ||
78 | bool "Generate readable assembler code" | ||
79 | depends on DEBUG_KERNEL | ||
80 | help | ||
81 | Disable some compiler optimizations that tend to generate human unreadable | ||
82 | assembler output. This may make the kernel slightly slower, but it helps | ||
83 | to keep kernel developers who have to stare a lot at assembler listings | ||
84 | sane. | ||
85 | |||
73 | config UNUSED_SYMBOLS | 86 | config UNUSED_SYMBOLS |
74 | bool "Enable unused/obsolete exported symbols" | 87 | bool "Enable unused/obsolete exported symbols" |
75 | default y if X86 | 88 | default y if X86 |
@@ -117,31 +130,31 @@ config DEBUG_SECTION_MISMATCH | |||
117 | help | 130 | help |
118 | The section mismatch analysis checks if there are illegal | 131 | The section mismatch analysis checks if there are illegal |
119 | references from one section to another section. | 132 | references from one section to another section. |
120 | Linux will during link or during runtime drop some sections | 133 | During linktime or runtime, some sections are dropped; |
121 | and any use of code/data previously in these sections will | 134 | any use of code/data previously in these sections would |
122 | most likely result in an oops. | 135 | most likely result in an oops. |
123 | In the code functions and variables are annotated with | 136 | In the code, functions and variables are annotated with |
124 | __init, __devinit etc. (see full list in include/linux/init.h) | 137 | __init, __devinit, etc. (see the full list in include/linux/init.h), |
125 | which results in the code/data being placed in specific sections. | 138 | which results in the code/data being placed in specific sections. |
126 | The section mismatch analysis is always done after a full | 139 | The section mismatch analysis is always performed after a full |
127 | kernel build but enabling this option will in addition | 140 | kernel build, and enabling this option causes the following |
128 | do the following: | 141 | additional steps to occur: |
129 | - Add the option -fno-inline-functions-called-once to gcc | 142 | - Add the option -fno-inline-functions-called-once to gcc commands. |
130 | When inlining a function annotated __init in a non-init | 143 | When inlining a function annotated with __init in a non-init |
131 | function we would lose the section information and thus | 144 | function, we would lose the section information and thus |
132 | the analysis would not catch the illegal reference. | 145 | the analysis would not catch the illegal reference. |
133 | This option tells gcc to inline less but will also | 146 | This option tells gcc to inline less (but it does result in |
134 | result in a larger kernel. | 147 | a larger kernel). |
135 | - Run the section mismatch analysis for each module/built-in.o | 148 | - Run the section mismatch analysis for each module/built-in.o file. |
136 | When we run the section mismatch analysis on vmlinux.o we | 149 | When we run the section mismatch analysis on vmlinux.o, we |
137 | lose valueble information about where the mismatch was | 150 | lose valueble information about where the mismatch was |
138 | introduced. | 151 | introduced. |
139 | Running the analysis for each module/built-in.o file | 152 | Running the analysis for each module/built-in.o file |
140 | will tell where the mismatch happens much closer to the | 153 | tells where the mismatch happens much closer to the |
141 | source. The drawback is that we will report the same | 154 | source. The drawback is that the same mismatch is |
142 | mismatch at least twice. | 155 | reported at least twice. |
143 | - Enable verbose reporting from modpost to help solving | 156 | - Enable verbose reporting from modpost in order to help resolve |
144 | the section mismatches reported. | 157 | the section mismatches that are reported. |
145 | 158 | ||
146 | config DEBUG_KERNEL | 159 | config DEBUG_KERNEL |
147 | bool "Kernel debugging" | 160 | bool "Kernel debugging" |
@@ -166,36 +179,41 @@ config LOCKUP_DETECTOR | |||
166 | hard and soft lockups. | 179 | hard and soft lockups. |
167 | 180 | ||
168 | Softlockups are bugs that cause the kernel to loop in kernel | 181 | Softlockups are bugs that cause the kernel to loop in kernel |
169 | mode for more than 60 seconds, without giving other tasks a | 182 | mode for more than 20 seconds, without giving other tasks a |
170 | chance to run. The current stack trace is displayed upon | 183 | chance to run. The current stack trace is displayed upon |
171 | detection and the system will stay locked up. | 184 | detection and the system will stay locked up. |
172 | 185 | ||
173 | Hardlockups are bugs that cause the CPU to loop in kernel mode | 186 | Hardlockups are bugs that cause the CPU to loop in kernel mode |
174 | for more than 60 seconds, without letting other interrupts have a | 187 | for more than 10 seconds, without letting other interrupts have a |
175 | chance to run. The current stack trace is displayed upon detection | 188 | chance to run. The current stack trace is displayed upon detection |
176 | and the system will stay locked up. | 189 | and the system will stay locked up. |
177 | 190 | ||
178 | The overhead should be minimal. A periodic hrtimer runs to | 191 | The overhead should be minimal. A periodic hrtimer runs to |
179 | generate interrupts and kick the watchdog task every 10-12 seconds. | 192 | generate interrupts and kick the watchdog task every 4 seconds. |
180 | An NMI is generated every 60 seconds or so to check for hardlockups. | 193 | An NMI is generated every 10 seconds or so to check for hardlockups. |
194 | |||
195 | The frequency of hrtimer and NMI events and the soft and hard lockup | ||
196 | thresholds can be controlled through the sysctl watchdog_thresh. | ||
181 | 197 | ||
182 | config HARDLOCKUP_DETECTOR | 198 | config HARDLOCKUP_DETECTOR |
183 | def_bool LOCKUP_DETECTOR && PERF_EVENTS && HAVE_PERF_EVENTS_NMI && \ | 199 | def_bool y |
184 | !ARCH_HAS_NMI_WATCHDOG | 200 | depends on LOCKUP_DETECTOR && !HAVE_NMI_WATCHDOG |
201 | depends on PERF_EVENTS && HAVE_PERF_EVENTS_NMI | ||
185 | 202 | ||
186 | config BOOTPARAM_HARDLOCKUP_PANIC | 203 | config BOOTPARAM_HARDLOCKUP_PANIC |
187 | bool "Panic (Reboot) On Hard Lockups" | 204 | bool "Panic (Reboot) On Hard Lockups" |
188 | depends on LOCKUP_DETECTOR | 205 | depends on HARDLOCKUP_DETECTOR |
189 | help | 206 | help |
190 | Say Y here to enable the kernel to panic on "hard lockups", | 207 | Say Y here to enable the kernel to panic on "hard lockups", |
191 | which are bugs that cause the kernel to loop in kernel | 208 | which are bugs that cause the kernel to loop in kernel |
192 | mode with interrupts disabled for more than 60 seconds. | 209 | mode with interrupts disabled for more than 10 seconds (configurable |
210 | using the watchdog_thresh sysctl). | ||
193 | 211 | ||
194 | Say N if unsure. | 212 | Say N if unsure. |
195 | 213 | ||
196 | config BOOTPARAM_HARDLOCKUP_PANIC_VALUE | 214 | config BOOTPARAM_HARDLOCKUP_PANIC_VALUE |
197 | int | 215 | int |
198 | depends on LOCKUP_DETECTOR | 216 | depends on HARDLOCKUP_DETECTOR |
199 | range 0 1 | 217 | range 0 1 |
200 | default 0 if !BOOTPARAM_HARDLOCKUP_PANIC | 218 | default 0 if !BOOTPARAM_HARDLOCKUP_PANIC |
201 | default 1 if BOOTPARAM_HARDLOCKUP_PANIC | 219 | default 1 if BOOTPARAM_HARDLOCKUP_PANIC |
@@ -206,8 +224,8 @@ config BOOTPARAM_SOFTLOCKUP_PANIC | |||
206 | help | 224 | help |
207 | Say Y here to enable the kernel to panic on "soft lockups", | 225 | Say Y here to enable the kernel to panic on "soft lockups", |
208 | which are bugs that cause the kernel to loop in kernel | 226 | which are bugs that cause the kernel to loop in kernel |
209 | mode for more than 60 seconds, without giving other tasks a | 227 | mode for more than 20 seconds (configurable using the watchdog_thresh |
210 | chance to run. | 228 | sysctl), without giving other tasks a chance to run. |
211 | 229 | ||
212 | The panic can be used in combination with panic_timeout, | 230 | The panic can be used in combination with panic_timeout, |
213 | to cause the system to reboot automatically after a | 231 | to cause the system to reboot automatically after a |
@@ -224,6 +242,26 @@ config BOOTPARAM_SOFTLOCKUP_PANIC_VALUE | |||
224 | default 0 if !BOOTPARAM_SOFTLOCKUP_PANIC | 242 | default 0 if !BOOTPARAM_SOFTLOCKUP_PANIC |
225 | default 1 if BOOTPARAM_SOFTLOCKUP_PANIC | 243 | default 1 if BOOTPARAM_SOFTLOCKUP_PANIC |
226 | 244 | ||
245 | config PANIC_ON_OOPS | ||
246 | bool "Panic on Oops" if EXPERT | ||
247 | default n | ||
248 | help | ||
249 | Say Y here to enable the kernel to panic when it oopses. This | ||
250 | has the same effect as setting oops=panic on the kernel command | ||
251 | line. | ||
252 | |||
253 | This feature is useful to ensure that the kernel does not do | ||
254 | anything erroneous after an oops which could result in data | ||
255 | corruption or other issues. | ||
256 | |||
257 | Say N if unsure. | ||
258 | |||
259 | config PANIC_ON_OOPS_VALUE | ||
260 | int | ||
261 | range 0 1 | ||
262 | default 0 if !PANIC_ON_OOPS | ||
263 | default 1 if PANIC_ON_OOPS | ||
264 | |||
227 | config DETECT_HUNG_TASK | 265 | config DETECT_HUNG_TASK |
228 | bool "Detect Hung Tasks" | 266 | bool "Detect Hung Tasks" |
229 | depends on DEBUG_KERNEL | 267 | depends on DEBUG_KERNEL |
@@ -248,8 +286,9 @@ config DEFAULT_HUNG_TASK_TIMEOUT | |||
248 | to determine when a task has become non-responsive and should | 286 | to determine when a task has become non-responsive and should |
249 | be considered hung. | 287 | be considered hung. |
250 | 288 | ||
251 | It can be adjusted at runtime via the kernel.hung_task_timeout | 289 | It can be adjusted at runtime via the kernel.hung_task_timeout_secs |
252 | sysctl or by writing a value to /proc/sys/kernel/hung_task_timeout. | 290 | sysctl or by writing a value to |
291 | /proc/sys/kernel/hung_task_timeout_secs. | ||
253 | 292 | ||
254 | A timeout of 0 disables the check. The default is two minutes. | 293 | A timeout of 0 disables the check. The default is two minutes. |
255 | Keeping the default should be fine in most cases. | 294 | Keeping the default should be fine in most cases. |
@@ -411,11 +450,12 @@ config SLUB_STATS | |||
411 | out which slabs are relevant to a particular load. | 450 | out which slabs are relevant to a particular load. |
412 | Try running: slabinfo -DA | 451 | Try running: slabinfo -DA |
413 | 452 | ||
453 | config HAVE_DEBUG_KMEMLEAK | ||
454 | bool | ||
455 | |||
414 | config DEBUG_KMEMLEAK | 456 | config DEBUG_KMEMLEAK |
415 | bool "Kernel memory leak detector" | 457 | bool "Kernel memory leak detector" |
416 | depends on DEBUG_KERNEL && EXPERIMENTAL && !MEMORY_HOTPLUG && \ | 458 | depends on DEBUG_KERNEL && EXPERIMENTAL && HAVE_DEBUG_KMEMLEAK |
417 | (X86 || ARM || PPC || MIPS || S390 || SPARC64 || SUPERH || MICROBLAZE || TILE) | ||
418 | |||
419 | select DEBUG_FS | 459 | select DEBUG_FS |
420 | select STACKTRACE if STACKTRACE_SUPPORT | 460 | select STACKTRACE if STACKTRACE_SUPPORT |
421 | select KALLSYMS | 461 | select KALLSYMS |
@@ -494,6 +534,7 @@ config RT_MUTEX_TESTER | |||
494 | config DEBUG_SPINLOCK | 534 | config DEBUG_SPINLOCK |
495 | bool "Spinlock and rw-lock debugging: basic checks" | 535 | bool "Spinlock and rw-lock debugging: basic checks" |
496 | depends on DEBUG_KERNEL | 536 | depends on DEBUG_KERNEL |
537 | select UNINLINE_SPIN_UNLOCK | ||
497 | help | 538 | help |
498 | Say Y here and build SMP to catch missing spinlock initialization | 539 | Say Y here and build SMP to catch missing spinlock initialization |
499 | and certain other kinds of spinlock errors commonly made. This is | 540 | and certain other kinds of spinlock errors commonly made. This is |
@@ -590,6 +631,20 @@ config PROVE_RCU_REPEATEDLY | |||
590 | 631 | ||
591 | Say N if you are unsure. | 632 | Say N if you are unsure. |
592 | 633 | ||
634 | config PROVE_RCU_DELAY | ||
635 | bool "RCU debugging: preemptible RCU race provocation" | ||
636 | depends on DEBUG_KERNEL && PREEMPT_RCU | ||
637 | default n | ||
638 | help | ||
639 | There is a class of races that involve an unlikely preemption | ||
640 | of __rcu_read_unlock() just after ->rcu_read_lock_nesting has | ||
641 | been set to INT_MIN. This feature inserts a delay at that | ||
642 | point to increase the probability of these races. | ||
643 | |||
644 | Say Y to increase probability of preemption of __rcu_read_unlock(). | ||
645 | |||
646 | Say N if you are unsure. | ||
647 | |||
593 | config SPARSE_RCU_POINTER | 648 | config SPARSE_RCU_POINTER |
594 | bool "RCU debugging: sparse-based checks for pointer usage" | 649 | bool "RCU debugging: sparse-based checks for pointer usage" |
595 | default n | 650 | default n |
@@ -675,7 +730,7 @@ config STACKTRACE | |||
675 | 730 | ||
676 | config DEBUG_STACK_USAGE | 731 | config DEBUG_STACK_USAGE |
677 | bool "Stack utilization instrumentation" | 732 | bool "Stack utilization instrumentation" |
678 | depends on DEBUG_KERNEL | 733 | depends on DEBUG_KERNEL && !IA64 && !PARISC |
679 | help | 734 | help |
680 | Enables the display of the minimum amount of free stack which each | 735 | Enables the display of the minimum amount of free stack which each |
681 | task has ever had available in the sysrq-T and sysrq-P debug output. | 736 | task has ever had available in the sysrq-T and sysrq-P debug output. |
@@ -696,11 +751,12 @@ config DEBUG_HIGHMEM | |||
696 | This options enables addition error checking for high memory systems. | 751 | This options enables addition error checking for high memory systems. |
697 | Disable for production systems. | 752 | Disable for production systems. |
698 | 753 | ||
754 | config HAVE_DEBUG_BUGVERBOSE | ||
755 | bool | ||
756 | |||
699 | config DEBUG_BUGVERBOSE | 757 | config DEBUG_BUGVERBOSE |
700 | bool "Verbose BUG() reporting (adds 70K)" if DEBUG_KERNEL && EXPERT | 758 | bool "Verbose BUG() reporting (adds 70K)" if DEBUG_KERNEL && EXPERT |
701 | depends on BUG | 759 | depends on BUG && (GENERIC_BUG || HAVE_DEBUG_BUGVERBOSE) |
702 | depends on ARM || AVR32 || M32R || M68K || SPARC32 || SPARC64 || \ | ||
703 | FRV || SUPERH || GENERIC_BUG || BLACKFIN || MN10300 || TILE | ||
704 | default y | 760 | default y |
705 | help | 761 | help |
706 | Say Y here to make BUG() panics output the file name and line number | 762 | Say Y here to make BUG() panics output the file name and line number |
@@ -742,6 +798,15 @@ config DEBUG_VM | |||
742 | 798 | ||
743 | If unsure, say N. | 799 | If unsure, say N. |
744 | 800 | ||
801 | config DEBUG_VM_RB | ||
802 | bool "Debug VM red-black trees" | ||
803 | depends on DEBUG_VM | ||
804 | help | ||
805 | Enable this to turn on more extended checks in the virtual-memory | ||
806 | system that may impact performance. | ||
807 | |||
808 | If unsure, say N. | ||
809 | |||
745 | config DEBUG_VIRTUAL | 810 | config DEBUG_VIRTUAL |
746 | bool "Debug VM translations" | 811 | bool "Debug VM translations" |
747 | depends on DEBUG_KERNEL && X86 | 812 | depends on DEBUG_KERNEL && X86 |
@@ -835,7 +900,7 @@ config DEBUG_CREDENTIALS | |||
835 | 900 | ||
836 | # | 901 | # |
837 | # Select this config option from the architecture Kconfig, if it | 902 | # Select this config option from the architecture Kconfig, if it |
838 | # it is preferred to always offer frame pointers as a config | 903 | # is preferred to always offer frame pointers as a config |
839 | # option on the architecture (regardless of KERNEL_DEBUG): | 904 | # option on the architecture (regardless of KERNEL_DEBUG): |
840 | # | 905 | # |
841 | config ARCH_WANT_FRAME_POINTERS | 906 | config ARCH_WANT_FRAME_POINTERS |
@@ -926,6 +991,30 @@ config RCU_CPU_STALL_VERBOSE | |||
926 | 991 | ||
927 | Say Y if you want to enable such checks. | 992 | Say Y if you want to enable such checks. |
928 | 993 | ||
994 | config RCU_CPU_STALL_INFO | ||
995 | bool "Print additional diagnostics on RCU CPU stall" | ||
996 | depends on (TREE_RCU || TREE_PREEMPT_RCU) && DEBUG_KERNEL | ||
997 | default n | ||
998 | help | ||
999 | For each stalled CPU that is aware of the current RCU grace | ||
1000 | period, print out additional per-CPU diagnostic information | ||
1001 | regarding scheduling-clock ticks, idle state, and, | ||
1002 | for RCU_FAST_NO_HZ kernels, idle-entry state. | ||
1003 | |||
1004 | Say N if you are unsure. | ||
1005 | |||
1006 | Say Y if you want to enable such diagnostics. | ||
1007 | |||
1008 | config RCU_TRACE | ||
1009 | bool "Enable tracing for RCU" | ||
1010 | depends on DEBUG_KERNEL | ||
1011 | help | ||
1012 | This option provides tracing in RCU which presents stats | ||
1013 | in debugfs for debugging RCU implementation. | ||
1014 | |||
1015 | Say Y here if you want to enable RCU tracing | ||
1016 | Say N if you are unsure. | ||
1017 | |||
929 | config KPROBES_SANITY_TEST | 1018 | config KPROBES_SANITY_TEST |
930 | bool "Kprobes sanity tests" | 1019 | bool "Kprobes sanity tests" |
931 | depends on DEBUG_KERNEL | 1020 | depends on DEBUG_KERNEL |
@@ -1021,18 +1110,105 @@ config LKDTM | |||
1021 | Documentation on how to use the module can be found in | 1110 | Documentation on how to use the module can be found in |
1022 | Documentation/fault-injection/provoke-crashes.txt | 1111 | Documentation/fault-injection/provoke-crashes.txt |
1023 | 1112 | ||
1113 | config NOTIFIER_ERROR_INJECTION | ||
1114 | tristate "Notifier error injection" | ||
1115 | depends on DEBUG_KERNEL | ||
1116 | select DEBUG_FS | ||
1117 | help | ||
1118 | This option provides the ability to inject artifical errors to | ||
1119 | specified notifier chain callbacks. It is useful to test the error | ||
1120 | handling of notifier call chain failures. | ||
1121 | |||
1122 | Say N if unsure. | ||
1123 | |||
1024 | config CPU_NOTIFIER_ERROR_INJECT | 1124 | config CPU_NOTIFIER_ERROR_INJECT |
1025 | tristate "CPU notifier error injection module" | 1125 | tristate "CPU notifier error injection module" |
1026 | depends on HOTPLUG_CPU && DEBUG_KERNEL | 1126 | depends on HOTPLUG_CPU && NOTIFIER_ERROR_INJECTION |
1027 | help | 1127 | help |
1028 | This option provides a kernel module that can be used to test | 1128 | This option provides a kernel module that can be used to test |
1029 | the error handling of the cpu notifiers | 1129 | the error handling of the cpu notifiers by injecting artifical |
1130 | errors to CPU notifier chain callbacks. It is controlled through | ||
1131 | debugfs interface under /sys/kernel/debug/notifier-error-inject/cpu | ||
1132 | |||
1133 | If the notifier call chain should be failed with some events | ||
1134 | notified, write the error code to "actions/<notifier event>/error". | ||
1135 | |||
1136 | Example: Inject CPU offline error (-1 == -EPERM) | ||
1137 | |||
1138 | # cd /sys/kernel/debug/notifier-error-inject/cpu | ||
1139 | # echo -1 > actions/CPU_DOWN_PREPARE/error | ||
1140 | # echo 0 > /sys/devices/system/cpu/cpu1/online | ||
1141 | bash: echo: write error: Operation not permitted | ||
1030 | 1142 | ||
1031 | To compile this code as a module, choose M here: the module will | 1143 | To compile this code as a module, choose M here: the module will |
1032 | be called cpu-notifier-error-inject. | 1144 | be called cpu-notifier-error-inject. |
1033 | 1145 | ||
1034 | If unsure, say N. | 1146 | If unsure, say N. |
1035 | 1147 | ||
1148 | config PM_NOTIFIER_ERROR_INJECT | ||
1149 | tristate "PM notifier error injection module" | ||
1150 | depends on PM && NOTIFIER_ERROR_INJECTION | ||
1151 | default m if PM_DEBUG | ||
1152 | help | ||
1153 | This option provides the ability to inject artifical errors to | ||
1154 | PM notifier chain callbacks. It is controlled through debugfs | ||
1155 | interface /sys/kernel/debug/notifier-error-inject/pm | ||
1156 | |||
1157 | If the notifier call chain should be failed with some events | ||
1158 | notified, write the error code to "actions/<notifier event>/error". | ||
1159 | |||
1160 | Example: Inject PM suspend error (-12 = -ENOMEM) | ||
1161 | |||
1162 | # cd /sys/kernel/debug/notifier-error-inject/pm/ | ||
1163 | # echo -12 > actions/PM_SUSPEND_PREPARE/error | ||
1164 | # echo mem > /sys/power/state | ||
1165 | bash: echo: write error: Cannot allocate memory | ||
1166 | |||
1167 | To compile this code as a module, choose M here: the module will | ||
1168 | be called pm-notifier-error-inject. | ||
1169 | |||
1170 | If unsure, say N. | ||
1171 | |||
1172 | config MEMORY_NOTIFIER_ERROR_INJECT | ||
1173 | tristate "Memory hotplug notifier error injection module" | ||
1174 | depends on MEMORY_HOTPLUG_SPARSE && NOTIFIER_ERROR_INJECTION | ||
1175 | help | ||
1176 | This option provides the ability to inject artifical errors to | ||
1177 | memory hotplug notifier chain callbacks. It is controlled through | ||
1178 | debugfs interface under /sys/kernel/debug/notifier-error-inject/memory | ||
1179 | |||
1180 | If the notifier call chain should be failed with some events | ||
1181 | notified, write the error code to "actions/<notifier event>/error". | ||
1182 | |||
1183 | Example: Inject memory hotplug offline error (-12 == -ENOMEM) | ||
1184 | |||
1185 | # cd /sys/kernel/debug/notifier-error-inject/memory | ||
1186 | # echo -12 > actions/MEM_GOING_OFFLINE/error | ||
1187 | # echo offline > /sys/devices/system/memory/memoryXXX/state | ||
1188 | bash: echo: write error: Cannot allocate memory | ||
1189 | |||
1190 | To compile this code as a module, choose M here: the module will | ||
1191 | be called pSeries-reconfig-notifier-error-inject. | ||
1192 | |||
1193 | If unsure, say N. | ||
1194 | |||
1195 | config PSERIES_RECONFIG_NOTIFIER_ERROR_INJECT | ||
1196 | tristate "pSeries reconfig notifier error injection module" | ||
1197 | depends on PPC_PSERIES && NOTIFIER_ERROR_INJECTION | ||
1198 | help | ||
1199 | This option provides the ability to inject artifical errors to | ||
1200 | pSeries reconfig notifier chain callbacks. It is controlled | ||
1201 | through debugfs interface under | ||
1202 | /sys/kernel/debug/notifier-error-inject/pSeries-reconfig/ | ||
1203 | |||
1204 | If the notifier call chain should be failed with some events | ||
1205 | notified, write the error code to "actions/<notifier event>/error". | ||
1206 | |||
1207 | To compile this code as a module, choose M here: the module will | ||
1208 | be called memory-notifier-error-inject. | ||
1209 | |||
1210 | If unsure, say N. | ||
1211 | |||
1036 | config FAULT_INJECTION | 1212 | config FAULT_INJECTION |
1037 | bool "Fault-injection framework" | 1213 | bool "Fault-injection framework" |
1038 | depends on DEBUG_KERNEL | 1214 | depends on DEBUG_KERNEL |
@@ -1070,6 +1246,17 @@ config FAIL_IO_TIMEOUT | |||
1070 | Only works with drivers that use the generic timeout handling, | 1246 | Only works with drivers that use the generic timeout handling, |
1071 | for others it wont do anything. | 1247 | for others it wont do anything. |
1072 | 1248 | ||
1249 | config FAIL_MMC_REQUEST | ||
1250 | bool "Fault-injection capability for MMC IO" | ||
1251 | select DEBUG_FS | ||
1252 | depends on FAULT_INJECTION && MMC | ||
1253 | help | ||
1254 | Provide fault-injection capability for MMC IO. | ||
1255 | This will make the mmc core return data errors. This is | ||
1256 | useful to test the error handling in the mmc block device | ||
1257 | and to test how the mmc host driver handles retries from | ||
1258 | the block device. | ||
1259 | |||
1073 | config FAULT_INJECTION_DEBUG_FS | 1260 | config FAULT_INJECTION_DEBUG_FS |
1074 | bool "Debugfs entries for fault-injection capabilities" | 1261 | bool "Debugfs entries for fault-injection capabilities" |
1075 | depends on FAULT_INJECTION && SYSFS && DEBUG_FS | 1262 | depends on FAULT_INJECTION && SYSFS && DEBUG_FS |
@@ -1081,7 +1268,7 @@ config FAULT_INJECTION_STACKTRACE_FILTER | |||
1081 | depends on FAULT_INJECTION_DEBUG_FS && STACKTRACE_SUPPORT | 1268 | depends on FAULT_INJECTION_DEBUG_FS && STACKTRACE_SUPPORT |
1082 | depends on !X86_64 | 1269 | depends on !X86_64 |
1083 | select STACKTRACE | 1270 | select STACKTRACE |
1084 | select FRAME_POINTER if !PPC && !S390 && !MICROBLAZE | 1271 | select FRAME_POINTER if !PPC && !S390 && !MICROBLAZE && !ARM_UNWIND |
1085 | help | 1272 | help |
1086 | Provide stacktrace filter for fault-injection capabilities | 1273 | Provide stacktrace filter for fault-injection capabilities |
1087 | 1274 | ||
@@ -1091,7 +1278,7 @@ config LATENCYTOP | |||
1091 | depends on DEBUG_KERNEL | 1278 | depends on DEBUG_KERNEL |
1092 | depends on STACKTRACE_SUPPORT | 1279 | depends on STACKTRACE_SUPPORT |
1093 | depends on PROC_FS | 1280 | depends on PROC_FS |
1094 | select FRAME_POINTER if !MIPS && !PPC && !S390 && !MICROBLAZE | 1281 | select FRAME_POINTER if !MIPS && !PPC && !S390 && !MICROBLAZE && !ARM_UNWIND |
1095 | select KALLSYMS | 1282 | select KALLSYMS |
1096 | select KALLSYMS_ALL | 1283 | select KALLSYMS_ALL |
1097 | select STACKTRACE | 1284 | select STACKTRACE |
@@ -1101,17 +1288,22 @@ config LATENCYTOP | |||
1101 | Enable this option if you want to use the LatencyTOP tool | 1288 | Enable this option if you want to use the LatencyTOP tool |
1102 | to find out which userspace is blocking on what kernel operations. | 1289 | to find out which userspace is blocking on what kernel operations. |
1103 | 1290 | ||
1104 | config SYSCTL_SYSCALL_CHECK | ||
1105 | bool "Sysctl checks" | ||
1106 | depends on SYSCTL | ||
1107 | ---help--- | ||
1108 | sys_sysctl uses binary paths that have been found challenging | ||
1109 | to properly maintain and use. This enables checks that help | ||
1110 | you to keep things correct. | ||
1111 | |||
1112 | source mm/Kconfig.debug | 1291 | source mm/Kconfig.debug |
1113 | source kernel/trace/Kconfig | 1292 | source kernel/trace/Kconfig |
1114 | 1293 | ||
1294 | config RBTREE_TEST | ||
1295 | tristate "Red-Black tree test" | ||
1296 | depends on m && DEBUG_KERNEL | ||
1297 | help | ||
1298 | A benchmark measuring the performance of the rbtree library. | ||
1299 | Also includes rbtree invariant checks. | ||
1300 | |||
1301 | config INTERVAL_TREE_TEST | ||
1302 | tristate "Interval tree test" | ||
1303 | depends on m && DEBUG_KERNEL | ||
1304 | help | ||
1305 | A benchmark measuring the performance of the interval tree library | ||
1306 | |||
1115 | config PROVIDE_OHCI1394_DMA_INIT | 1307 | config PROVIDE_OHCI1394_DMA_INIT |
1116 | bool "Remote debugging over FireWire early on boot" | 1308 | bool "Remote debugging over FireWire early on boot" |
1117 | depends on PCI && X86 | 1309 | depends on PCI && X86 |
@@ -1172,8 +1364,13 @@ config DYNAMIC_DEBUG | |||
1172 | otherwise be available at runtime. These messages can then be | 1364 | otherwise be available at runtime. These messages can then be |
1173 | enabled/disabled based on various levels of scope - per source file, | 1365 | enabled/disabled based on various levels of scope - per source file, |
1174 | function, module, format string, and line number. This mechanism | 1366 | function, module, format string, and line number. This mechanism |
1175 | implicitly enables all pr_debug() and dev_dbg() calls. The impact of | 1367 | implicitly compiles in all pr_debug() and dev_dbg() calls, which |
1176 | this compile option is a larger kernel text size of about 2%. | 1368 | enlarges the kernel text size by about 2%. |
1369 | |||
1370 | If a source file is compiled with DEBUG flag set, any | ||
1371 | pr_debug() calls in it are enabled by default, but can be | ||
1372 | disabled at runtime as below. Note that DEBUG flag is | ||
1373 | turned on by many CONFIG_*DEBUG* options. | ||
1177 | 1374 | ||
1178 | Usage: | 1375 | Usage: |
1179 | 1376 | ||
@@ -1190,16 +1387,16 @@ config DYNAMIC_DEBUG | |||
1190 | lineno : line number of the debug statement | 1387 | lineno : line number of the debug statement |
1191 | module : module that contains the debug statement | 1388 | module : module that contains the debug statement |
1192 | function : function that contains the debug statement | 1389 | function : function that contains the debug statement |
1193 | flags : 'p' means the line is turned 'on' for printing | 1390 | flags : '=p' means the line is turned 'on' for printing |
1194 | format : the format used for the debug statement | 1391 | format : the format used for the debug statement |
1195 | 1392 | ||
1196 | From a live system: | 1393 | From a live system: |
1197 | 1394 | ||
1198 | nullarbor:~ # cat <debugfs>/dynamic_debug/control | 1395 | nullarbor:~ # cat <debugfs>/dynamic_debug/control |
1199 | # filename:lineno [module]function flags format | 1396 | # filename:lineno [module]function flags format |
1200 | fs/aio.c:222 [aio]__put_ioctx - "__put_ioctx:\040freeing\040%p\012" | 1397 | fs/aio.c:222 [aio]__put_ioctx =_ "__put_ioctx:\040freeing\040%p\012" |
1201 | fs/aio.c:248 [aio]ioctx_alloc - "ENOMEM:\040nr_events\040too\040high\012" | 1398 | fs/aio.c:248 [aio]ioctx_alloc =_ "ENOMEM:\040nr_events\040too\040high\012" |
1202 | fs/aio.c:1770 [aio]sys_io_cancel - "calling\040cancel\012" | 1399 | fs/aio.c:1770 [aio]sys_io_cancel =_ "calling\040cancel\012" |
1203 | 1400 | ||
1204 | Example usage: | 1401 | Example usage: |
1205 | 1402 | ||
diff --git a/lib/Makefile b/lib/Makefile index d5d175c8a6ca..821a16229111 100644 --- a/lib/Makefile +++ b/lib/Makefile | |||
@@ -9,20 +9,20 @@ endif | |||
9 | 9 | ||
10 | lib-y := ctype.o string.o vsprintf.o cmdline.o \ | 10 | lib-y := ctype.o string.o vsprintf.o cmdline.o \ |
11 | rbtree.o radix-tree.o dump_stack.o timerqueue.o\ | 11 | rbtree.o radix-tree.o dump_stack.o timerqueue.o\ |
12 | idr.o int_sqrt.o extable.o prio_tree.o \ | 12 | idr.o int_sqrt.o extable.o \ |
13 | sha1.o md5.o irq_regs.o reciprocal_div.o argv_split.o \ | 13 | sha1.o md5.o irq_regs.o reciprocal_div.o argv_split.o \ |
14 | proportions.o prio_heap.o ratelimit.o show_mem.o \ | 14 | proportions.o flex_proportions.o prio_heap.o ratelimit.o show_mem.o \ |
15 | is_single_threaded.o plist.o decompress.o find_next_bit.o | 15 | is_single_threaded.o plist.o decompress.o |
16 | 16 | ||
17 | lib-$(CONFIG_MMU) += ioremap.o | 17 | lib-$(CONFIG_MMU) += ioremap.o |
18 | lib-$(CONFIG_SMP) += cpumask.o | 18 | lib-$(CONFIG_SMP) += cpumask.o |
19 | 19 | ||
20 | lib-y += kobject.o kref.o klist.o | 20 | lib-y += kobject.o klist.o |
21 | 21 | ||
22 | obj-y += bcd.o div64.o sort.o parser.o halfmd4.o debug_locks.o random32.o \ | 22 | obj-y += bcd.o div64.o sort.o parser.o halfmd4.o debug_locks.o random32.o \ |
23 | bust_spinlocks.o hexdump.o kasprintf.o bitmap.o scatterlist.o \ | 23 | bust_spinlocks.o hexdump.o kasprintf.o bitmap.o scatterlist.o \ |
24 | string_helpers.o gcd.o lcm.o list_sort.o uuid.o flex_array.o \ | 24 | string_helpers.o gcd.o lcm.o list_sort.o uuid.o flex_array.o \ |
25 | bsearch.o find_last_bit.o | 25 | bsearch.o find_last_bit.o find_next_bit.o llist.o memweight.o |
26 | obj-y += kstrtox.o | 26 | obj-y += kstrtox.o |
27 | obj-$(CONFIG_TEST_KSTRTOX) += test-kstrtox.o | 27 | obj-$(CONFIG_TEST_KSTRTOX) += test-kstrtox.o |
28 | 28 | ||
@@ -33,6 +33,7 @@ endif | |||
33 | 33 | ||
34 | lib-$(CONFIG_HOTPLUG) += kobject_uevent.o | 34 | lib-$(CONFIG_HOTPLUG) += kobject_uevent.o |
35 | obj-$(CONFIG_GENERIC_IOMAP) += iomap.o | 35 | obj-$(CONFIG_GENERIC_IOMAP) += iomap.o |
36 | obj-$(CONFIG_GENERIC_PCI_IOMAP) += pci_iomap.o | ||
36 | obj-$(CONFIG_HAS_IOMEM) += iomap_copy.o devres.o | 37 | obj-$(CONFIG_HAS_IOMEM) += iomap_copy.o devres.o |
37 | obj-$(CONFIG_CHECK_SIGNATURE) += check_signature.o | 38 | obj-$(CONFIG_CHECK_SIGNATURE) += check_signature.o |
38 | obj-$(CONFIG_DEBUG_LOCKING_API_SELFTESTS) += locking-selftest.o | 39 | obj-$(CONFIG_DEBUG_LOCKING_API_SELFTESTS) += locking-selftest.o |
@@ -89,7 +90,12 @@ obj-$(CONFIG_AUDIT_GENERIC) += audit.o | |||
89 | obj-$(CONFIG_SWIOTLB) += swiotlb.o | 90 | obj-$(CONFIG_SWIOTLB) += swiotlb.o |
90 | obj-$(CONFIG_IOMMU_HELPER) += iommu-helper.o | 91 | obj-$(CONFIG_IOMMU_HELPER) += iommu-helper.o |
91 | obj-$(CONFIG_FAULT_INJECTION) += fault-inject.o | 92 | obj-$(CONFIG_FAULT_INJECTION) += fault-inject.o |
93 | obj-$(CONFIG_NOTIFIER_ERROR_INJECTION) += notifier-error-inject.o | ||
92 | obj-$(CONFIG_CPU_NOTIFIER_ERROR_INJECT) += cpu-notifier-error-inject.o | 94 | obj-$(CONFIG_CPU_NOTIFIER_ERROR_INJECT) += cpu-notifier-error-inject.o |
95 | obj-$(CONFIG_PM_NOTIFIER_ERROR_INJECT) += pm-notifier-error-inject.o | ||
96 | obj-$(CONFIG_MEMORY_NOTIFIER_ERROR_INJECT) += memory-notifier-error-inject.o | ||
97 | obj-$(CONFIG_PSERIES_RECONFIG_NOTIFIER_ERROR_INJECT) += \ | ||
98 | pSeries-reconfig-notifier-error-inject.o | ||
93 | 99 | ||
94 | lib-$(CONFIG_GENERIC_BUG) += bug.o | 100 | lib-$(CONFIG_GENERIC_BUG) += bug.o |
95 | 101 | ||
@@ -115,7 +121,31 @@ obj-$(CONFIG_CPU_RMAP) += cpu_rmap.o | |||
115 | 121 | ||
116 | obj-$(CONFIG_CORDIC) += cordic.o | 122 | obj-$(CONFIG_CORDIC) += cordic.o |
117 | 123 | ||
118 | obj-$(CONFIG_LLIST) += llist.o | 124 | obj-$(CONFIG_DQL) += dynamic_queue_limits.o |
125 | |||
126 | obj-$(CONFIG_MPILIB) += mpi/ | ||
127 | obj-$(CONFIG_SIGNATURE) += digsig.o | ||
128 | |||
129 | obj-$(CONFIG_CLZ_TAB) += clz_tab.o | ||
130 | |||
131 | obj-$(CONFIG_DDR) += jedec_ddr_data.o | ||
132 | |||
133 | obj-$(CONFIG_GENERIC_STRNCPY_FROM_USER) += strncpy_from_user.o | ||
134 | obj-$(CONFIG_GENERIC_STRNLEN_USER) += strnlen_user.o | ||
135 | |||
136 | obj-$(CONFIG_STMP_DEVICE) += stmp_device.o | ||
137 | |||
138 | libfdt_files = fdt.o fdt_ro.o fdt_wip.o fdt_rw.o fdt_sw.o fdt_strerror.o | ||
139 | $(foreach file, $(libfdt_files), \ | ||
140 | $(eval CFLAGS_$(file) = -I$(src)/../scripts/dtc/libfdt)) | ||
141 | lib-$(CONFIG_LIBFDT) += $(libfdt_files) | ||
142 | |||
143 | obj-$(CONFIG_RBTREE_TEST) += rbtree_test.o | ||
144 | obj-$(CONFIG_INTERVAL_TREE_TEST) += interval_tree_test.o | ||
145 | |||
146 | interval_tree_test-objs := interval_tree_test_main.o interval_tree.o | ||
147 | |||
148 | obj-$(CONFIG_ASN1) += asn1_decoder.o | ||
119 | 149 | ||
120 | hostprogs-y := gen_crc32table | 150 | hostprogs-y := gen_crc32table |
121 | clean-files := crc32table.h | 151 | clean-files := crc32table.h |
@@ -127,3 +157,19 @@ quiet_cmd_crc32 = GEN $@ | |||
127 | 157 | ||
128 | $(obj)/crc32table.h: $(obj)/gen_crc32table | 158 | $(obj)/crc32table.h: $(obj)/gen_crc32table |
129 | $(call cmd,crc32) | 159 | $(call cmd,crc32) |
160 | |||
161 | # | ||
162 | # Build a fast OID lookip registry from include/linux/oid_registry.h | ||
163 | # | ||
164 | obj-$(CONFIG_OID_REGISTRY) += oid_registry.o | ||
165 | |||
166 | $(obj)/oid_registry.c: $(obj)/oid_registry_data.c | ||
167 | |||
168 | $(obj)/oid_registry_data.c: $(srctree)/include/linux/oid_registry.h \ | ||
169 | $(src)/build_OID_registry | ||
170 | $(call cmd,build_OID_registry) | ||
171 | |||
172 | quiet_cmd_build_OID_registry = GEN $@ | ||
173 | cmd_build_OID_registry = perl $(srctree)/$(src)/build_OID_registry $< $@ | ||
174 | |||
175 | clean-files += oid_registry_data.c | ||
diff --git a/lib/argv_split.c b/lib/argv_split.c index 4b1b083f219c..1e9a6cbc3689 100644 --- a/lib/argv_split.c +++ b/lib/argv_split.c | |||
@@ -6,7 +6,7 @@ | |||
6 | #include <linux/ctype.h> | 6 | #include <linux/ctype.h> |
7 | #include <linux/string.h> | 7 | #include <linux/string.h> |
8 | #include <linux/slab.h> | 8 | #include <linux/slab.h> |
9 | #include <linux/module.h> | 9 | #include <linux/export.h> |
10 | 10 | ||
11 | static const char *skip_arg(const char *cp) | 11 | static const char *skip_arg(const char *cp) |
12 | { | 12 | { |
diff --git a/lib/asn1_decoder.c b/lib/asn1_decoder.c new file mode 100644 index 000000000000..de2c8b5a715b --- /dev/null +++ b/lib/asn1_decoder.c | |||
@@ -0,0 +1,487 @@ | |||
1 | /* Decoder for ASN.1 BER/DER/CER encoded bytestream | ||
2 | * | ||
3 | * Copyright (C) 2012 Red Hat, Inc. All Rights Reserved. | ||
4 | * Written by David Howells (dhowells@redhat.com) | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or | ||
7 | * modify it under the terms of the GNU General Public Licence | ||
8 | * as published by the Free Software Foundation; either version | ||
9 | * 2 of the Licence, or (at your option) any later version. | ||
10 | */ | ||
11 | |||
12 | #include <linux/export.h> | ||
13 | #include <linux/kernel.h> | ||
14 | #include <linux/errno.h> | ||
15 | #include <linux/asn1_decoder.h> | ||
16 | #include <linux/asn1_ber_bytecode.h> | ||
17 | |||
18 | static const unsigned char asn1_op_lengths[ASN1_OP__NR] = { | ||
19 | /* OPC TAG JMP ACT */ | ||
20 | [ASN1_OP_MATCH] = 1 + 1, | ||
21 | [ASN1_OP_MATCH_OR_SKIP] = 1 + 1, | ||
22 | [ASN1_OP_MATCH_ACT] = 1 + 1 + 1, | ||
23 | [ASN1_OP_MATCH_ACT_OR_SKIP] = 1 + 1 + 1, | ||
24 | [ASN1_OP_MATCH_JUMP] = 1 + 1 + 1, | ||
25 | [ASN1_OP_MATCH_JUMP_OR_SKIP] = 1 + 1 + 1, | ||
26 | [ASN1_OP_MATCH_ANY] = 1, | ||
27 | [ASN1_OP_MATCH_ANY_ACT] = 1 + 1, | ||
28 | [ASN1_OP_COND_MATCH_OR_SKIP] = 1 + 1, | ||
29 | [ASN1_OP_COND_MATCH_ACT_OR_SKIP] = 1 + 1 + 1, | ||
30 | [ASN1_OP_COND_MATCH_JUMP_OR_SKIP] = 1 + 1 + 1, | ||
31 | [ASN1_OP_COND_MATCH_ANY] = 1, | ||
32 | [ASN1_OP_COND_MATCH_ANY_ACT] = 1 + 1, | ||
33 | [ASN1_OP_COND_FAIL] = 1, | ||
34 | [ASN1_OP_COMPLETE] = 1, | ||
35 | [ASN1_OP_ACT] = 1 + 1, | ||
36 | [ASN1_OP_RETURN] = 1, | ||
37 | [ASN1_OP_END_SEQ] = 1, | ||
38 | [ASN1_OP_END_SEQ_OF] = 1 + 1, | ||
39 | [ASN1_OP_END_SET] = 1, | ||
40 | [ASN1_OP_END_SET_OF] = 1 + 1, | ||
41 | [ASN1_OP_END_SEQ_ACT] = 1 + 1, | ||
42 | [ASN1_OP_END_SEQ_OF_ACT] = 1 + 1 + 1, | ||
43 | [ASN1_OP_END_SET_ACT] = 1 + 1, | ||
44 | [ASN1_OP_END_SET_OF_ACT] = 1 + 1 + 1, | ||
45 | }; | ||
46 | |||
47 | /* | ||
48 | * Find the length of an indefinite length object | ||
49 | * @data: The data buffer | ||
50 | * @datalen: The end of the innermost containing element in the buffer | ||
51 | * @_dp: The data parse cursor (updated before returning) | ||
52 | * @_len: Where to return the size of the element. | ||
53 | * @_errmsg: Where to return a pointer to an error message on error | ||
54 | */ | ||
55 | static int asn1_find_indefinite_length(const unsigned char *data, size_t datalen, | ||
56 | size_t *_dp, size_t *_len, | ||
57 | const char **_errmsg) | ||
58 | { | ||
59 | unsigned char tag, tmp; | ||
60 | size_t dp = *_dp, len, n; | ||
61 | int indef_level = 1; | ||
62 | |||
63 | next_tag: | ||
64 | if (unlikely(datalen - dp < 2)) { | ||
65 | if (datalen == dp) | ||
66 | goto missing_eoc; | ||
67 | goto data_overrun_error; | ||
68 | } | ||
69 | |||
70 | /* Extract a tag from the data */ | ||
71 | tag = data[dp++]; | ||
72 | if (tag == 0) { | ||
73 | /* It appears to be an EOC. */ | ||
74 | if (data[dp++] != 0) | ||
75 | goto invalid_eoc; | ||
76 | if (--indef_level <= 0) { | ||
77 | *_len = dp - *_dp; | ||
78 | *_dp = dp; | ||
79 | return 0; | ||
80 | } | ||
81 | goto next_tag; | ||
82 | } | ||
83 | |||
84 | if (unlikely((tag & 0x1f) == 0x1f)) { | ||
85 | do { | ||
86 | if (unlikely(datalen - dp < 2)) | ||
87 | goto data_overrun_error; | ||
88 | tmp = data[dp++]; | ||
89 | } while (tmp & 0x80); | ||
90 | } | ||
91 | |||
92 | /* Extract the length */ | ||
93 | len = data[dp++]; | ||
94 | if (len < 0x7f) { | ||
95 | dp += len; | ||
96 | goto next_tag; | ||
97 | } | ||
98 | |||
99 | if (unlikely(len == 0x80)) { | ||
100 | /* Indefinite length */ | ||
101 | if (unlikely((tag & ASN1_CONS_BIT) == ASN1_PRIM << 5)) | ||
102 | goto indefinite_len_primitive; | ||
103 | indef_level++; | ||
104 | goto next_tag; | ||
105 | } | ||
106 | |||
107 | n = len - 0x80; | ||
108 | if (unlikely(n > sizeof(size_t) - 1)) | ||
109 | goto length_too_long; | ||
110 | if (unlikely(n > datalen - dp)) | ||
111 | goto data_overrun_error; | ||
112 | for (len = 0; n > 0; n--) { | ||
113 | len <<= 8; | ||
114 | len |= data[dp++]; | ||
115 | } | ||
116 | dp += len; | ||
117 | goto next_tag; | ||
118 | |||
119 | length_too_long: | ||
120 | *_errmsg = "Unsupported length"; | ||
121 | goto error; | ||
122 | indefinite_len_primitive: | ||
123 | *_errmsg = "Indefinite len primitive not permitted"; | ||
124 | goto error; | ||
125 | invalid_eoc: | ||
126 | *_errmsg = "Invalid length EOC"; | ||
127 | goto error; | ||
128 | data_overrun_error: | ||
129 | *_errmsg = "Data overrun error"; | ||
130 | goto error; | ||
131 | missing_eoc: | ||
132 | *_errmsg = "Missing EOC in indefinite len cons"; | ||
133 | error: | ||
134 | *_dp = dp; | ||
135 | return -1; | ||
136 | } | ||
137 | |||
138 | /** | ||
139 | * asn1_ber_decoder - Decoder BER/DER/CER ASN.1 according to pattern | ||
140 | * @decoder: The decoder definition (produced by asn1_compiler) | ||
141 | * @context: The caller's context (to be passed to the action functions) | ||
142 | * @data: The encoded data | ||
143 | * @datasize: The size of the encoded data | ||
144 | * | ||
145 | * Decode BER/DER/CER encoded ASN.1 data according to a bytecode pattern | ||
146 | * produced by asn1_compiler. Action functions are called on marked tags to | ||
147 | * allow the caller to retrieve significant data. | ||
148 | * | ||
149 | * LIMITATIONS: | ||
150 | * | ||
151 | * To keep down the amount of stack used by this function, the following limits | ||
152 | * have been imposed: | ||
153 | * | ||
154 | * (1) This won't handle datalen > 65535 without increasing the size of the | ||
155 | * cons stack elements and length_too_long checking. | ||
156 | * | ||
157 | * (2) The stack of constructed types is 10 deep. If the depth of non-leaf | ||
158 | * constructed types exceeds this, the decode will fail. | ||
159 | * | ||
160 | * (3) The SET type (not the SET OF type) isn't really supported as tracking | ||
161 | * what members of the set have been seen is a pain. | ||
162 | */ | ||
163 | int asn1_ber_decoder(const struct asn1_decoder *decoder, | ||
164 | void *context, | ||
165 | const unsigned char *data, | ||
166 | size_t datalen) | ||
167 | { | ||
168 | const unsigned char *machine = decoder->machine; | ||
169 | const asn1_action_t *actions = decoder->actions; | ||
170 | size_t machlen = decoder->machlen; | ||
171 | enum asn1_opcode op; | ||
172 | unsigned char tag = 0, csp = 0, jsp = 0, optag = 0, hdr = 0; | ||
173 | const char *errmsg; | ||
174 | size_t pc = 0, dp = 0, tdp = 0, len = 0; | ||
175 | int ret; | ||
176 | |||
177 | unsigned char flags = 0; | ||
178 | #define FLAG_INDEFINITE_LENGTH 0x01 | ||
179 | #define FLAG_MATCHED 0x02 | ||
180 | #define FLAG_CONS 0x20 /* Corresponds to CONS bit in the opcode tag | ||
181 | * - ie. whether or not we are going to parse | ||
182 | * a compound type. | ||
183 | */ | ||
184 | |||
185 | #define NR_CONS_STACK 10 | ||
186 | unsigned short cons_dp_stack[NR_CONS_STACK]; | ||
187 | unsigned short cons_datalen_stack[NR_CONS_STACK]; | ||
188 | unsigned char cons_hdrlen_stack[NR_CONS_STACK]; | ||
189 | #define NR_JUMP_STACK 10 | ||
190 | unsigned char jump_stack[NR_JUMP_STACK]; | ||
191 | |||
192 | if (datalen > 65535) | ||
193 | return -EMSGSIZE; | ||
194 | |||
195 | next_op: | ||
196 | pr_debug("next_op: pc=\e[32m%zu\e[m/%zu dp=\e[33m%zu\e[m/%zu C=%d J=%d\n", | ||
197 | pc, machlen, dp, datalen, csp, jsp); | ||
198 | if (unlikely(pc >= machlen)) | ||
199 | goto machine_overrun_error; | ||
200 | op = machine[pc]; | ||
201 | if (unlikely(pc + asn1_op_lengths[op] > machlen)) | ||
202 | goto machine_overrun_error; | ||
203 | |||
204 | /* If this command is meant to match a tag, then do that before | ||
205 | * evaluating the command. | ||
206 | */ | ||
207 | if (op <= ASN1_OP__MATCHES_TAG) { | ||
208 | unsigned char tmp; | ||
209 | |||
210 | /* Skip conditional matches if possible */ | ||
211 | if ((op & ASN1_OP_MATCH__COND && | ||
212 | flags & FLAG_MATCHED) || | ||
213 | dp == datalen) { | ||
214 | pc += asn1_op_lengths[op]; | ||
215 | goto next_op; | ||
216 | } | ||
217 | |||
218 | flags = 0; | ||
219 | hdr = 2; | ||
220 | |||
221 | /* Extract a tag from the data */ | ||
222 | if (unlikely(dp >= datalen - 1)) | ||
223 | goto data_overrun_error; | ||
224 | tag = data[dp++]; | ||
225 | if (unlikely((tag & 0x1f) == 0x1f)) | ||
226 | goto long_tag_not_supported; | ||
227 | |||
228 | if (op & ASN1_OP_MATCH__ANY) { | ||
229 | pr_debug("- any %02x\n", tag); | ||
230 | } else { | ||
231 | /* Extract the tag from the machine | ||
232 | * - Either CONS or PRIM are permitted in the data if | ||
233 | * CONS is not set in the op stream, otherwise CONS | ||
234 | * is mandatory. | ||
235 | */ | ||
236 | optag = machine[pc + 1]; | ||
237 | flags |= optag & FLAG_CONS; | ||
238 | |||
239 | /* Determine whether the tag matched */ | ||
240 | tmp = optag ^ tag; | ||
241 | tmp &= ~(optag & ASN1_CONS_BIT); | ||
242 | pr_debug("- match? %02x %02x %02x\n", tag, optag, tmp); | ||
243 | if (tmp != 0) { | ||
244 | /* All odd-numbered tags are MATCH_OR_SKIP. */ | ||
245 | if (op & ASN1_OP_MATCH__SKIP) { | ||
246 | pc += asn1_op_lengths[op]; | ||
247 | dp--; | ||
248 | goto next_op; | ||
249 | } | ||
250 | goto tag_mismatch; | ||
251 | } | ||
252 | } | ||
253 | flags |= FLAG_MATCHED; | ||
254 | |||
255 | len = data[dp++]; | ||
256 | if (len > 0x7f) { | ||
257 | if (unlikely(len == 0x80)) { | ||
258 | /* Indefinite length */ | ||
259 | if (unlikely(!(tag & ASN1_CONS_BIT))) | ||
260 | goto indefinite_len_primitive; | ||
261 | flags |= FLAG_INDEFINITE_LENGTH; | ||
262 | if (unlikely(2 > datalen - dp)) | ||
263 | goto data_overrun_error; | ||
264 | } else { | ||
265 | int n = len - 0x80; | ||
266 | if (unlikely(n > 2)) | ||
267 | goto length_too_long; | ||
268 | if (unlikely(dp >= datalen - n)) | ||
269 | goto data_overrun_error; | ||
270 | hdr += n; | ||
271 | for (len = 0; n > 0; n--) { | ||
272 | len <<= 8; | ||
273 | len |= data[dp++]; | ||
274 | } | ||
275 | if (unlikely(len > datalen - dp)) | ||
276 | goto data_overrun_error; | ||
277 | } | ||
278 | } | ||
279 | |||
280 | if (flags & FLAG_CONS) { | ||
281 | /* For expected compound forms, we stack the positions | ||
282 | * of the start and end of the data. | ||
283 | */ | ||
284 | if (unlikely(csp >= NR_CONS_STACK)) | ||
285 | goto cons_stack_overflow; | ||
286 | cons_dp_stack[csp] = dp; | ||
287 | cons_hdrlen_stack[csp] = hdr; | ||
288 | if (!(flags & FLAG_INDEFINITE_LENGTH)) { | ||
289 | cons_datalen_stack[csp] = datalen; | ||
290 | datalen = dp + len; | ||
291 | } else { | ||
292 | cons_datalen_stack[csp] = 0; | ||
293 | } | ||
294 | csp++; | ||
295 | } | ||
296 | |||
297 | pr_debug("- TAG: %02x %zu%s\n", | ||
298 | tag, len, flags & FLAG_CONS ? " CONS" : ""); | ||
299 | tdp = dp; | ||
300 | } | ||
301 | |||
302 | /* Decide how to handle the operation */ | ||
303 | switch (op) { | ||
304 | case ASN1_OP_MATCH_ANY_ACT: | ||
305 | case ASN1_OP_COND_MATCH_ANY_ACT: | ||
306 | ret = actions[machine[pc + 1]](context, hdr, tag, data + dp, len); | ||
307 | if (ret < 0) | ||
308 | return ret; | ||
309 | goto skip_data; | ||
310 | |||
311 | case ASN1_OP_MATCH_ACT: | ||
312 | case ASN1_OP_MATCH_ACT_OR_SKIP: | ||
313 | case ASN1_OP_COND_MATCH_ACT_OR_SKIP: | ||
314 | ret = actions[machine[pc + 2]](context, hdr, tag, data + dp, len); | ||
315 | if (ret < 0) | ||
316 | return ret; | ||
317 | goto skip_data; | ||
318 | |||
319 | case ASN1_OP_MATCH: | ||
320 | case ASN1_OP_MATCH_OR_SKIP: | ||
321 | case ASN1_OP_MATCH_ANY: | ||
322 | case ASN1_OP_COND_MATCH_OR_SKIP: | ||
323 | case ASN1_OP_COND_MATCH_ANY: | ||
324 | skip_data: | ||
325 | if (!(flags & FLAG_CONS)) { | ||
326 | if (flags & FLAG_INDEFINITE_LENGTH) { | ||
327 | ret = asn1_find_indefinite_length( | ||
328 | data, datalen, &dp, &len, &errmsg); | ||
329 | if (ret < 0) | ||
330 | goto error; | ||
331 | } else { | ||
332 | dp += len; | ||
333 | } | ||
334 | pr_debug("- LEAF: %zu\n", len); | ||
335 | } | ||
336 | pc += asn1_op_lengths[op]; | ||
337 | goto next_op; | ||
338 | |||
339 | case ASN1_OP_MATCH_JUMP: | ||
340 | case ASN1_OP_MATCH_JUMP_OR_SKIP: | ||
341 | case ASN1_OP_COND_MATCH_JUMP_OR_SKIP: | ||
342 | pr_debug("- MATCH_JUMP\n"); | ||
343 | if (unlikely(jsp == NR_JUMP_STACK)) | ||
344 | goto jump_stack_overflow; | ||
345 | jump_stack[jsp++] = pc + asn1_op_lengths[op]; | ||
346 | pc = machine[pc + 2]; | ||
347 | goto next_op; | ||
348 | |||
349 | case ASN1_OP_COND_FAIL: | ||
350 | if (unlikely(!(flags & FLAG_MATCHED))) | ||
351 | goto tag_mismatch; | ||
352 | pc += asn1_op_lengths[op]; | ||
353 | goto next_op; | ||
354 | |||
355 | case ASN1_OP_COMPLETE: | ||
356 | if (unlikely(jsp != 0 || csp != 0)) { | ||
357 | pr_err("ASN.1 decoder error: Stacks not empty at completion (%u, %u)\n", | ||
358 | jsp, csp); | ||
359 | return -EBADMSG; | ||
360 | } | ||
361 | return 0; | ||
362 | |||
363 | case ASN1_OP_END_SET: | ||
364 | case ASN1_OP_END_SET_ACT: | ||
365 | if (unlikely(!(flags & FLAG_MATCHED))) | ||
366 | goto tag_mismatch; | ||
367 | case ASN1_OP_END_SEQ: | ||
368 | case ASN1_OP_END_SET_OF: | ||
369 | case ASN1_OP_END_SEQ_OF: | ||
370 | case ASN1_OP_END_SEQ_ACT: | ||
371 | case ASN1_OP_END_SET_OF_ACT: | ||
372 | case ASN1_OP_END_SEQ_OF_ACT: | ||
373 | if (unlikely(csp <= 0)) | ||
374 | goto cons_stack_underflow; | ||
375 | csp--; | ||
376 | tdp = cons_dp_stack[csp]; | ||
377 | hdr = cons_hdrlen_stack[csp]; | ||
378 | len = datalen; | ||
379 | datalen = cons_datalen_stack[csp]; | ||
380 | pr_debug("- end cons t=%zu dp=%zu l=%zu/%zu\n", | ||
381 | tdp, dp, len, datalen); | ||
382 | if (datalen == 0) { | ||
383 | /* Indefinite length - check for the EOC. */ | ||
384 | datalen = len; | ||
385 | if (unlikely(datalen - dp < 2)) | ||
386 | goto data_overrun_error; | ||
387 | if (data[dp++] != 0) { | ||
388 | if (op & ASN1_OP_END__OF) { | ||
389 | dp--; | ||
390 | csp++; | ||
391 | pc = machine[pc + 1]; | ||
392 | pr_debug("- continue\n"); | ||
393 | goto next_op; | ||
394 | } | ||
395 | goto missing_eoc; | ||
396 | } | ||
397 | if (data[dp++] != 0) | ||
398 | goto invalid_eoc; | ||
399 | len = dp - tdp - 2; | ||
400 | } else { | ||
401 | if (dp < len && (op & ASN1_OP_END__OF)) { | ||
402 | datalen = len; | ||
403 | csp++; | ||
404 | pc = machine[pc + 1]; | ||
405 | pr_debug("- continue\n"); | ||
406 | goto next_op; | ||
407 | } | ||
408 | if (dp != len) | ||
409 | goto cons_length_error; | ||
410 | len -= tdp; | ||
411 | pr_debug("- cons len l=%zu d=%zu\n", len, dp - tdp); | ||
412 | } | ||
413 | |||
414 | if (op & ASN1_OP_END__ACT) { | ||
415 | unsigned char act; | ||
416 | if (op & ASN1_OP_END__OF) | ||
417 | act = machine[pc + 2]; | ||
418 | else | ||
419 | act = machine[pc + 1]; | ||
420 | ret = actions[act](context, hdr, 0, data + tdp, len); | ||
421 | } | ||
422 | pc += asn1_op_lengths[op]; | ||
423 | goto next_op; | ||
424 | |||
425 | case ASN1_OP_ACT: | ||
426 | ret = actions[machine[pc + 1]](context, hdr, tag, data + tdp, len); | ||
427 | pc += asn1_op_lengths[op]; | ||
428 | goto next_op; | ||
429 | |||
430 | case ASN1_OP_RETURN: | ||
431 | if (unlikely(jsp <= 0)) | ||
432 | goto jump_stack_underflow; | ||
433 | pc = jump_stack[--jsp]; | ||
434 | goto next_op; | ||
435 | |||
436 | default: | ||
437 | break; | ||
438 | } | ||
439 | |||
440 | /* Shouldn't reach here */ | ||
441 | pr_err("ASN.1 decoder error: Found reserved opcode (%u)\n", op); | ||
442 | return -EBADMSG; | ||
443 | |||
444 | data_overrun_error: | ||
445 | errmsg = "Data overrun error"; | ||
446 | goto error; | ||
447 | machine_overrun_error: | ||
448 | errmsg = "Machine overrun error"; | ||
449 | goto error; | ||
450 | jump_stack_underflow: | ||
451 | errmsg = "Jump stack underflow"; | ||
452 | goto error; | ||
453 | jump_stack_overflow: | ||
454 | errmsg = "Jump stack overflow"; | ||
455 | goto error; | ||
456 | cons_stack_underflow: | ||
457 | errmsg = "Cons stack underflow"; | ||
458 | goto error; | ||
459 | cons_stack_overflow: | ||
460 | errmsg = "Cons stack overflow"; | ||
461 | goto error; | ||
462 | cons_length_error: | ||
463 | errmsg = "Cons length error"; | ||
464 | goto error; | ||
465 | missing_eoc: | ||
466 | errmsg = "Missing EOC in indefinite len cons"; | ||
467 | goto error; | ||
468 | invalid_eoc: | ||
469 | errmsg = "Invalid length EOC"; | ||
470 | goto error; | ||
471 | length_too_long: | ||
472 | errmsg = "Unsupported length"; | ||
473 | goto error; | ||
474 | indefinite_len_primitive: | ||
475 | errmsg = "Indefinite len primitive not permitted"; | ||
476 | goto error; | ||
477 | tag_mismatch: | ||
478 | errmsg = "Unexpected tag"; | ||
479 | goto error; | ||
480 | long_tag_not_supported: | ||
481 | errmsg = "Long tag not supported"; | ||
482 | error: | ||
483 | pr_debug("\nASN1: %s [m=%zu d=%zu ot=%02x t=%02x l=%zu]\n", | ||
484 | errmsg, pc, dp, optag, tag, len); | ||
485 | return -EBADMSG; | ||
486 | } | ||
487 | EXPORT_SYMBOL_GPL(asn1_ber_decoder); | ||
diff --git a/lib/atomic64.c b/lib/atomic64.c index e12ae0dd08a8..978537809d84 100644 --- a/lib/atomic64.c +++ b/lib/atomic64.c | |||
@@ -13,7 +13,7 @@ | |||
13 | #include <linux/cache.h> | 13 | #include <linux/cache.h> |
14 | #include <linux/spinlock.h> | 14 | #include <linux/spinlock.h> |
15 | #include <linux/init.h> | 15 | #include <linux/init.h> |
16 | #include <linux/module.h> | 16 | #include <linux/export.h> |
17 | #include <linux/atomic.h> | 17 | #include <linux/atomic.h> |
18 | 18 | ||
19 | /* | 19 | /* |
@@ -29,11 +29,11 @@ | |||
29 | * Ensure each lock is in a separate cacheline. | 29 | * Ensure each lock is in a separate cacheline. |
30 | */ | 30 | */ |
31 | static union { | 31 | static union { |
32 | spinlock_t lock; | 32 | raw_spinlock_t lock; |
33 | char pad[L1_CACHE_BYTES]; | 33 | char pad[L1_CACHE_BYTES]; |
34 | } atomic64_lock[NR_LOCKS] __cacheline_aligned_in_smp; | 34 | } atomic64_lock[NR_LOCKS] __cacheline_aligned_in_smp; |
35 | 35 | ||
36 | static inline spinlock_t *lock_addr(const atomic64_t *v) | 36 | static inline raw_spinlock_t *lock_addr(const atomic64_t *v) |
37 | { | 37 | { |
38 | unsigned long addr = (unsigned long) v; | 38 | unsigned long addr = (unsigned long) v; |
39 | 39 | ||
@@ -45,12 +45,12 @@ static inline spinlock_t *lock_addr(const atomic64_t *v) | |||
45 | long long atomic64_read(const atomic64_t *v) | 45 | long long atomic64_read(const atomic64_t *v) |
46 | { | 46 | { |
47 | unsigned long flags; | 47 | unsigned long flags; |
48 | spinlock_t *lock = lock_addr(v); | 48 | raw_spinlock_t *lock = lock_addr(v); |
49 | long long val; | 49 | long long val; |
50 | 50 | ||
51 | spin_lock_irqsave(lock, flags); | 51 | raw_spin_lock_irqsave(lock, flags); |
52 | val = v->counter; | 52 | val = v->counter; |
53 | spin_unlock_irqrestore(lock, flags); | 53 | raw_spin_unlock_irqrestore(lock, flags); |
54 | return val; | 54 | return val; |
55 | } | 55 | } |
56 | EXPORT_SYMBOL(atomic64_read); | 56 | EXPORT_SYMBOL(atomic64_read); |
@@ -58,34 +58,34 @@ EXPORT_SYMBOL(atomic64_read); | |||
58 | void atomic64_set(atomic64_t *v, long long i) | 58 | void atomic64_set(atomic64_t *v, long long i) |
59 | { | 59 | { |
60 | unsigned long flags; | 60 | unsigned long flags; |
61 | spinlock_t *lock = lock_addr(v); | 61 | raw_spinlock_t *lock = lock_addr(v); |
62 | 62 | ||
63 | spin_lock_irqsave(lock, flags); | 63 | raw_spin_lock_irqsave(lock, flags); |
64 | v->counter = i; | 64 | v->counter = i; |
65 | spin_unlock_irqrestore(lock, flags); | 65 | raw_spin_unlock_irqrestore(lock, flags); |
66 | } | 66 | } |
67 | EXPORT_SYMBOL(atomic64_set); | 67 | EXPORT_SYMBOL(atomic64_set); |
68 | 68 | ||
69 | void atomic64_add(long long a, atomic64_t *v) | 69 | void atomic64_add(long long a, atomic64_t *v) |
70 | { | 70 | { |
71 | unsigned long flags; | 71 | unsigned long flags; |
72 | spinlock_t *lock = lock_addr(v); | 72 | raw_spinlock_t *lock = lock_addr(v); |
73 | 73 | ||
74 | spin_lock_irqsave(lock, flags); | 74 | raw_spin_lock_irqsave(lock, flags); |
75 | v->counter += a; | 75 | v->counter += a; |
76 | spin_unlock_irqrestore(lock, flags); | 76 | raw_spin_unlock_irqrestore(lock, flags); |
77 | } | 77 | } |
78 | EXPORT_SYMBOL(atomic64_add); | 78 | EXPORT_SYMBOL(atomic64_add); |
79 | 79 | ||
80 | long long atomic64_add_return(long long a, atomic64_t *v) | 80 | long long atomic64_add_return(long long a, atomic64_t *v) |
81 | { | 81 | { |
82 | unsigned long flags; | 82 | unsigned long flags; |
83 | spinlock_t *lock = lock_addr(v); | 83 | raw_spinlock_t *lock = lock_addr(v); |
84 | long long val; | 84 | long long val; |
85 | 85 | ||
86 | spin_lock_irqsave(lock, flags); | 86 | raw_spin_lock_irqsave(lock, flags); |
87 | val = v->counter += a; | 87 | val = v->counter += a; |
88 | spin_unlock_irqrestore(lock, flags); | 88 | raw_spin_unlock_irqrestore(lock, flags); |
89 | return val; | 89 | return val; |
90 | } | 90 | } |
91 | EXPORT_SYMBOL(atomic64_add_return); | 91 | EXPORT_SYMBOL(atomic64_add_return); |
@@ -93,23 +93,23 @@ EXPORT_SYMBOL(atomic64_add_return); | |||
93 | void atomic64_sub(long long a, atomic64_t *v) | 93 | void atomic64_sub(long long a, atomic64_t *v) |
94 | { | 94 | { |
95 | unsigned long flags; | 95 | unsigned long flags; |
96 | spinlock_t *lock = lock_addr(v); | 96 | raw_spinlock_t *lock = lock_addr(v); |
97 | 97 | ||
98 | spin_lock_irqsave(lock, flags); | 98 | raw_spin_lock_irqsave(lock, flags); |
99 | v->counter -= a; | 99 | v->counter -= a; |
100 | spin_unlock_irqrestore(lock, flags); | 100 | raw_spin_unlock_irqrestore(lock, flags); |
101 | } | 101 | } |
102 | EXPORT_SYMBOL(atomic64_sub); | 102 | EXPORT_SYMBOL(atomic64_sub); |
103 | 103 | ||
104 | long long atomic64_sub_return(long long a, atomic64_t *v) | 104 | long long atomic64_sub_return(long long a, atomic64_t *v) |
105 | { | 105 | { |
106 | unsigned long flags; | 106 | unsigned long flags; |
107 | spinlock_t *lock = lock_addr(v); | 107 | raw_spinlock_t *lock = lock_addr(v); |
108 | long long val; | 108 | long long val; |
109 | 109 | ||
110 | spin_lock_irqsave(lock, flags); | 110 | raw_spin_lock_irqsave(lock, flags); |
111 | val = v->counter -= a; | 111 | val = v->counter -= a; |
112 | spin_unlock_irqrestore(lock, flags); | 112 | raw_spin_unlock_irqrestore(lock, flags); |
113 | return val; | 113 | return val; |
114 | } | 114 | } |
115 | EXPORT_SYMBOL(atomic64_sub_return); | 115 | EXPORT_SYMBOL(atomic64_sub_return); |
@@ -117,14 +117,14 @@ EXPORT_SYMBOL(atomic64_sub_return); | |||
117 | long long atomic64_dec_if_positive(atomic64_t *v) | 117 | long long atomic64_dec_if_positive(atomic64_t *v) |
118 | { | 118 | { |
119 | unsigned long flags; | 119 | unsigned long flags; |
120 | spinlock_t *lock = lock_addr(v); | 120 | raw_spinlock_t *lock = lock_addr(v); |
121 | long long val; | 121 | long long val; |
122 | 122 | ||
123 | spin_lock_irqsave(lock, flags); | 123 | raw_spin_lock_irqsave(lock, flags); |
124 | val = v->counter - 1; | 124 | val = v->counter - 1; |
125 | if (val >= 0) | 125 | if (val >= 0) |
126 | v->counter = val; | 126 | v->counter = val; |
127 | spin_unlock_irqrestore(lock, flags); | 127 | raw_spin_unlock_irqrestore(lock, flags); |
128 | return val; | 128 | return val; |
129 | } | 129 | } |
130 | EXPORT_SYMBOL(atomic64_dec_if_positive); | 130 | EXPORT_SYMBOL(atomic64_dec_if_positive); |
@@ -132,14 +132,14 @@ EXPORT_SYMBOL(atomic64_dec_if_positive); | |||
132 | long long atomic64_cmpxchg(atomic64_t *v, long long o, long long n) | 132 | long long atomic64_cmpxchg(atomic64_t *v, long long o, long long n) |
133 | { | 133 | { |
134 | unsigned long flags; | 134 | unsigned long flags; |
135 | spinlock_t *lock = lock_addr(v); | 135 | raw_spinlock_t *lock = lock_addr(v); |
136 | long long val; | 136 | long long val; |
137 | 137 | ||
138 | spin_lock_irqsave(lock, flags); | 138 | raw_spin_lock_irqsave(lock, flags); |
139 | val = v->counter; | 139 | val = v->counter; |
140 | if (val == o) | 140 | if (val == o) |
141 | v->counter = n; | 141 | v->counter = n; |
142 | spin_unlock_irqrestore(lock, flags); | 142 | raw_spin_unlock_irqrestore(lock, flags); |
143 | return val; | 143 | return val; |
144 | } | 144 | } |
145 | EXPORT_SYMBOL(atomic64_cmpxchg); | 145 | EXPORT_SYMBOL(atomic64_cmpxchg); |
@@ -147,13 +147,13 @@ EXPORT_SYMBOL(atomic64_cmpxchg); | |||
147 | long long atomic64_xchg(atomic64_t *v, long long new) | 147 | long long atomic64_xchg(atomic64_t *v, long long new) |
148 | { | 148 | { |
149 | unsigned long flags; | 149 | unsigned long flags; |
150 | spinlock_t *lock = lock_addr(v); | 150 | raw_spinlock_t *lock = lock_addr(v); |
151 | long long val; | 151 | long long val; |
152 | 152 | ||
153 | spin_lock_irqsave(lock, flags); | 153 | raw_spin_lock_irqsave(lock, flags); |
154 | val = v->counter; | 154 | val = v->counter; |
155 | v->counter = new; | 155 | v->counter = new; |
156 | spin_unlock_irqrestore(lock, flags); | 156 | raw_spin_unlock_irqrestore(lock, flags); |
157 | return val; | 157 | return val; |
158 | } | 158 | } |
159 | EXPORT_SYMBOL(atomic64_xchg); | 159 | EXPORT_SYMBOL(atomic64_xchg); |
@@ -161,15 +161,15 @@ EXPORT_SYMBOL(atomic64_xchg); | |||
161 | int atomic64_add_unless(atomic64_t *v, long long a, long long u) | 161 | int atomic64_add_unless(atomic64_t *v, long long a, long long u) |
162 | { | 162 | { |
163 | unsigned long flags; | 163 | unsigned long flags; |
164 | spinlock_t *lock = lock_addr(v); | 164 | raw_spinlock_t *lock = lock_addr(v); |
165 | int ret = 0; | 165 | int ret = 0; |
166 | 166 | ||
167 | spin_lock_irqsave(lock, flags); | 167 | raw_spin_lock_irqsave(lock, flags); |
168 | if (v->counter != u) { | 168 | if (v->counter != u) { |
169 | v->counter += a; | 169 | v->counter += a; |
170 | ret = 1; | 170 | ret = 1; |
171 | } | 171 | } |
172 | spin_unlock_irqrestore(lock, flags); | 172 | raw_spin_unlock_irqrestore(lock, flags); |
173 | return ret; | 173 | return ret; |
174 | } | 174 | } |
175 | EXPORT_SYMBOL(atomic64_add_unless); | 175 | EXPORT_SYMBOL(atomic64_add_unless); |
@@ -179,7 +179,7 @@ static int init_atomic64_lock(void) | |||
179 | int i; | 179 | int i; |
180 | 180 | ||
181 | for (i = 0; i < NR_LOCKS; ++i) | 181 | for (i = 0; i < NR_LOCKS; ++i) |
182 | spin_lock_init(&atomic64_lock[i].lock); | 182 | raw_spin_lock_init(&atomic64_lock[i].lock); |
183 | return 0; | 183 | return 0; |
184 | } | 184 | } |
185 | 185 | ||
diff --git a/lib/atomic64_test.c b/lib/atomic64_test.c index 0c33cde2a1e6..00bca223d1e1 100644 --- a/lib/atomic64_test.c +++ b/lib/atomic64_test.c | |||
@@ -9,6 +9,7 @@ | |||
9 | * (at your option) any later version. | 9 | * (at your option) any later version. |
10 | */ | 10 | */ |
11 | #include <linux/init.h> | 11 | #include <linux/init.h> |
12 | #include <linux/bug.h> | ||
12 | #include <linux/kernel.h> | 13 | #include <linux/kernel.h> |
13 | #include <linux/atomic.h> | 14 | #include <linux/atomic.h> |
14 | 15 | ||
@@ -113,8 +114,7 @@ static __init int test_atomic64(void) | |||
113 | r += one; | 114 | r += one; |
114 | BUG_ON(v.counter != r); | 115 | BUG_ON(v.counter != r); |
115 | 116 | ||
116 | #if defined(CONFIG_X86) || defined(CONFIG_MIPS) || defined(CONFIG_PPC) || \ | 117 | #ifdef CONFIG_ARCH_HAS_ATOMIC64_DEC_IF_POSITIVE |
117 | defined(CONFIG_S390) || defined(_ASM_GENERIC_ATOMIC64_H) || defined(CONFIG_ARM) | ||
118 | INIT(onestwos); | 118 | INIT(onestwos); |
119 | BUG_ON(atomic64_dec_if_positive(&v) != (onestwos - 1)); | 119 | BUG_ON(atomic64_dec_if_positive(&v) != (onestwos - 1)); |
120 | r -= one; | 120 | r -= one; |
@@ -128,7 +128,7 @@ static __init int test_atomic64(void) | |||
128 | BUG_ON(atomic64_dec_if_positive(&v) != (-one - one)); | 128 | BUG_ON(atomic64_dec_if_positive(&v) != (-one - one)); |
129 | BUG_ON(v.counter != r); | 129 | BUG_ON(v.counter != r); |
130 | #else | 130 | #else |
131 | #warning Please implement atomic64_dec_if_positive for your architecture, and add it to the IF above | 131 | #warning Please implement atomic64_dec_if_positive for your architecture and select the above Kconfig symbol |
132 | #endif | 132 | #endif |
133 | 133 | ||
134 | INIT(onestwos); | 134 | INIT(onestwos); |
diff --git a/lib/average.c b/lib/average.c index 5576c2841496..99a67e662b3c 100644 --- a/lib/average.c +++ b/lib/average.c | |||
@@ -5,8 +5,9 @@ | |||
5 | * Version 2. See the file COPYING for more details. | 5 | * Version 2. See the file COPYING for more details. |
6 | */ | 6 | */ |
7 | 7 | ||
8 | #include <linux/module.h> | 8 | #include <linux/export.h> |
9 | #include <linux/average.h> | 9 | #include <linux/average.h> |
10 | #include <linux/kernel.h> | ||
10 | #include <linux/bug.h> | 11 | #include <linux/bug.h> |
11 | #include <linux/log2.h> | 12 | #include <linux/log2.h> |
12 | 13 | ||
@@ -1,14 +1,14 @@ | |||
1 | #include <linux/bcd.h> | 1 | #include <linux/bcd.h> |
2 | #include <linux/module.h> | 2 | #include <linux/export.h> |
3 | 3 | ||
4 | unsigned bcd2bin(unsigned char val) | 4 | unsigned _bcd2bin(unsigned char val) |
5 | { | 5 | { |
6 | return (val & 0x0f) + (val >> 4) * 10; | 6 | return (val & 0x0f) + (val >> 4) * 10; |
7 | } | 7 | } |
8 | EXPORT_SYMBOL(bcd2bin); | 8 | EXPORT_SYMBOL(_bcd2bin); |
9 | 9 | ||
10 | unsigned char bin2bcd(unsigned val) | 10 | unsigned char _bin2bcd(unsigned val) |
11 | { | 11 | { |
12 | return ((val / 10) << 4) + val % 10; | 12 | return ((val / 10) << 4) + val % 10; |
13 | } | 13 | } |
14 | EXPORT_SYMBOL(bin2bcd); | 14 | EXPORT_SYMBOL(_bin2bcd); |
diff --git a/lib/bitmap.c b/lib/bitmap.c index 2f4412e4d071..06fdfa1aeba7 100644 --- a/lib/bitmap.c +++ b/lib/bitmap.c | |||
@@ -5,11 +5,13 @@ | |||
5 | * This source code is licensed under the GNU General Public License, | 5 | * This source code is licensed under the GNU General Public License, |
6 | * Version 2. See the file COPYING for more details. | 6 | * Version 2. See the file COPYING for more details. |
7 | */ | 7 | */ |
8 | #include <linux/module.h> | 8 | #include <linux/export.h> |
9 | #include <linux/thread_info.h> | ||
9 | #include <linux/ctype.h> | 10 | #include <linux/ctype.h> |
10 | #include <linux/errno.h> | 11 | #include <linux/errno.h> |
11 | #include <linux/bitmap.h> | 12 | #include <linux/bitmap.h> |
12 | #include <linux/bitops.h> | 13 | #include <linux/bitops.h> |
14 | #include <linux/bug.h> | ||
13 | #include <asm/uaccess.h> | 15 | #include <asm/uaccess.h> |
14 | 16 | ||
15 | /* | 17 | /* |
@@ -367,7 +369,8 @@ EXPORT_SYMBOL(bitmap_find_next_zero_area); | |||
367 | * @nmaskbits: size of bitmap, in bits | 369 | * @nmaskbits: size of bitmap, in bits |
368 | * | 370 | * |
369 | * Exactly @nmaskbits bits are displayed. Hex digits are grouped into | 371 | * Exactly @nmaskbits bits are displayed. Hex digits are grouped into |
370 | * comma-separated sets of eight digits per set. | 372 | * comma-separated sets of eight digits per set. Returns the number of |
373 | * characters which were written to *buf, excluding the trailing \0. | ||
371 | */ | 374 | */ |
372 | int bitmap_scnprintf(char *buf, unsigned int buflen, | 375 | int bitmap_scnprintf(char *buf, unsigned int buflen, |
373 | const unsigned long *maskp, int nmaskbits) | 376 | const unsigned long *maskp, int nmaskbits) |
@@ -419,7 +422,7 @@ int __bitmap_parse(const char *buf, unsigned int buflen, | |||
419 | { | 422 | { |
420 | int c, old_c, totaldigits, ndigits, nchunks, nbits; | 423 | int c, old_c, totaldigits, ndigits, nchunks, nbits; |
421 | u32 chunk; | 424 | u32 chunk; |
422 | const char __user *ubuf = buf; | 425 | const char __user __force *ubuf = (const char __user __force *)buf; |
423 | 426 | ||
424 | bitmap_zero(maskp, nmaskbits); | 427 | bitmap_zero(maskp, nmaskbits); |
425 | 428 | ||
@@ -504,7 +507,9 @@ int bitmap_parse_user(const char __user *ubuf, | |||
504 | { | 507 | { |
505 | if (!access_ok(VERIFY_READ, ubuf, ulen)) | 508 | if (!access_ok(VERIFY_READ, ubuf, ulen)) |
506 | return -EFAULT; | 509 | return -EFAULT; |
507 | return __bitmap_parse((const char *)ubuf, ulen, 1, maskp, nmaskbits); | 510 | return __bitmap_parse((const char __force *)ubuf, |
511 | ulen, 1, maskp, nmaskbits); | ||
512 | |||
508 | } | 513 | } |
509 | EXPORT_SYMBOL(bitmap_parse_user); | 514 | EXPORT_SYMBOL(bitmap_parse_user); |
510 | 515 | ||
@@ -513,8 +518,8 @@ EXPORT_SYMBOL(bitmap_parse_user); | |||
513 | * | 518 | * |
514 | * Helper routine for bitmap_scnlistprintf(). Write decimal number | 519 | * Helper routine for bitmap_scnlistprintf(). Write decimal number |
515 | * or range to buf, suppressing output past buf+buflen, with optional | 520 | * or range to buf, suppressing output past buf+buflen, with optional |
516 | * comma-prefix. Return len of what would be written to buf, if it | 521 | * comma-prefix. Return len of what was written to *buf, excluding the |
517 | * all fit. | 522 | * trailing \0. |
518 | */ | 523 | */ |
519 | static inline int bscnl_emit(char *buf, int buflen, int rbot, int rtop, int len) | 524 | static inline int bscnl_emit(char *buf, int buflen, int rbot, int rtop, int len) |
520 | { | 525 | { |
@@ -540,9 +545,8 @@ static inline int bscnl_emit(char *buf, int buflen, int rbot, int rtop, int len) | |||
540 | * the range. Output format is compatible with the format | 545 | * the range. Output format is compatible with the format |
541 | * accepted as input by bitmap_parselist(). | 546 | * accepted as input by bitmap_parselist(). |
542 | * | 547 | * |
543 | * The return value is the number of characters which would be | 548 | * The return value is the number of characters which were written to *buf |
544 | * generated for the given input, excluding the trailing '\0', as | 549 | * excluding the trailing '\0', as per ISO C99's scnprintf. |
545 | * per ISO C99. | ||
546 | */ | 550 | */ |
547 | int bitmap_scnlistprintf(char *buf, unsigned int buflen, | 551 | int bitmap_scnlistprintf(char *buf, unsigned int buflen, |
548 | const unsigned long *maskp, int nmaskbits) | 552 | const unsigned long *maskp, int nmaskbits) |
@@ -594,7 +598,7 @@ static int __bitmap_parselist(const char *buf, unsigned int buflen, | |||
594 | { | 598 | { |
595 | unsigned a, b; | 599 | unsigned a, b; |
596 | int c, old_c, totaldigits; | 600 | int c, old_c, totaldigits; |
597 | const char __user *ubuf = buf; | 601 | const char __user __force *ubuf = (const char __user __force *)buf; |
598 | int exp_digit, in_range; | 602 | int exp_digit, in_range; |
599 | 603 | ||
600 | totaldigits = c = 0; | 604 | totaldigits = c = 0; |
@@ -694,7 +698,7 @@ int bitmap_parselist_user(const char __user *ubuf, | |||
694 | { | 698 | { |
695 | if (!access_ok(VERIFY_READ, ubuf, ulen)) | 699 | if (!access_ok(VERIFY_READ, ubuf, ulen)) |
696 | return -EFAULT; | 700 | return -EFAULT; |
697 | return __bitmap_parselist((const char *)ubuf, | 701 | return __bitmap_parselist((const char __force *)ubuf, |
698 | ulen, 1, maskp, nmaskbits); | 702 | ulen, 1, maskp, nmaskbits); |
699 | } | 703 | } |
700 | EXPORT_SYMBOL(bitmap_parselist_user); | 704 | EXPORT_SYMBOL(bitmap_parselist_user); |
diff --git a/lib/bsearch.c b/lib/bsearch.c index 5b54758e2afb..e33c179089db 100644 --- a/lib/bsearch.c +++ b/lib/bsearch.c | |||
@@ -9,7 +9,7 @@ | |||
9 | * published by the Free Software Foundation; version 2. | 9 | * published by the Free Software Foundation; version 2. |
10 | */ | 10 | */ |
11 | 11 | ||
12 | #include <linux/module.h> | 12 | #include <linux/export.h> |
13 | #include <linux/bsearch.h> | 13 | #include <linux/bsearch.h> |
14 | 14 | ||
15 | /* | 15 | /* |
diff --git a/lib/btree.c b/lib/btree.c index 2a34392bcecc..f9a484676cb6 100644 --- a/lib/btree.c +++ b/lib/btree.c | |||
@@ -319,8 +319,8 @@ void *btree_get_prev(struct btree_head *head, struct btree_geo *geo, | |||
319 | 319 | ||
320 | if (head->height == 0) | 320 | if (head->height == 0) |
321 | return NULL; | 321 | return NULL; |
322 | retry: | ||
323 | longcpy(key, __key, geo->keylen); | 322 | longcpy(key, __key, geo->keylen); |
323 | retry: | ||
324 | dec_key(geo, key); | 324 | dec_key(geo, key); |
325 | 325 | ||
326 | node = head->node; | 326 | node = head->node; |
@@ -351,12 +351,13 @@ retry: | |||
351 | } | 351 | } |
352 | miss: | 352 | miss: |
353 | if (retry_key) { | 353 | if (retry_key) { |
354 | __key = retry_key; | 354 | longcpy(key, retry_key, geo->keylen); |
355 | retry_key = NULL; | 355 | retry_key = NULL; |
356 | goto retry; | 356 | goto retry; |
357 | } | 357 | } |
358 | return NULL; | 358 | return NULL; |
359 | } | 359 | } |
360 | EXPORT_SYMBOL_GPL(btree_get_prev); | ||
360 | 361 | ||
361 | static int getpos(struct btree_geo *geo, unsigned long *node, | 362 | static int getpos(struct btree_geo *geo, unsigned long *node, |
362 | unsigned long *key) | 363 | unsigned long *key) |
@@ -508,6 +509,7 @@ retry: | |||
508 | int btree_insert(struct btree_head *head, struct btree_geo *geo, | 509 | int btree_insert(struct btree_head *head, struct btree_geo *geo, |
509 | unsigned long *key, void *val, gfp_t gfp) | 510 | unsigned long *key, void *val, gfp_t gfp) |
510 | { | 511 | { |
512 | BUG_ON(!val); | ||
511 | return btree_insert_level(head, geo, key, val, 1, gfp); | 513 | return btree_insert_level(head, geo, key, val, 1, gfp); |
512 | } | 514 | } |
513 | EXPORT_SYMBOL_GPL(btree_insert); | 515 | EXPORT_SYMBOL_GPL(btree_insert); |
@@ -169,7 +169,7 @@ enum bug_trap_type report_bug(unsigned long bugaddr, struct pt_regs *regs) | |||
169 | return BUG_TRAP_TYPE_WARN; | 169 | return BUG_TRAP_TYPE_WARN; |
170 | } | 170 | } |
171 | 171 | ||
172 | printk(KERN_EMERG "------------[ cut here ]------------\n"); | 172 | printk(KERN_DEFAULT "------------[ cut here ]------------\n"); |
173 | 173 | ||
174 | if (file) | 174 | if (file) |
175 | printk(KERN_CRIT "kernel BUG at %s:%u!\n", | 175 | printk(KERN_CRIT "kernel BUG at %s:%u!\n", |
diff --git a/lib/build_OID_registry b/lib/build_OID_registry new file mode 100755 index 000000000000..dfbdaab81bc8 --- /dev/null +++ b/lib/build_OID_registry | |||
@@ -0,0 +1,209 @@ | |||
1 | #!/usr/bin/perl -w | ||
2 | # | ||
3 | # Build a static ASN.1 Object Identified (OID) registry | ||
4 | # | ||
5 | # Copyright (C) 2012 Red Hat, Inc. All Rights Reserved. | ||
6 | # Written by David Howells (dhowells@redhat.com) | ||
7 | # | ||
8 | # This program is free software; you can redistribute it and/or | ||
9 | # modify it under the terms of the GNU General Public Licence | ||
10 | # as published by the Free Software Foundation; either version | ||
11 | # 2 of the Licence, or (at your option) any later version. | ||
12 | # | ||
13 | |||
14 | use strict; | ||
15 | |||
16 | my @names = (); | ||
17 | my @oids = (); | ||
18 | |||
19 | if ($#ARGV != 1) { | ||
20 | print STDERR "Format: ", $0, " <in-h-file> <out-c-file>\n"; | ||
21 | exit(2); | ||
22 | } | ||
23 | |||
24 | # | ||
25 | # Open the file to read from | ||
26 | # | ||
27 | open IN_FILE, "<$ARGV[0]" || die; | ||
28 | while (<IN_FILE>) { | ||
29 | chomp; | ||
30 | if (m!\s+OID_([a-zA-z][a-zA-Z0-9_]+),\s+/[*]\s+([012][.0-9]*)\s+[*]/!) { | ||
31 | push @names, $1; | ||
32 | push @oids, $2; | ||
33 | } | ||
34 | } | ||
35 | close IN_FILE || die; | ||
36 | |||
37 | # | ||
38 | # Open the files to write into | ||
39 | # | ||
40 | open C_FILE, ">$ARGV[1]" or die; | ||
41 | print C_FILE "/*\n"; | ||
42 | print C_FILE " * Automatically generated by ", $0, ". Do not edit\n"; | ||
43 | print C_FILE " */\n"; | ||
44 | |||
45 | # | ||
46 | # Split the data up into separate lists and also determine the lengths of the | ||
47 | # encoded data arrays. | ||
48 | # | ||
49 | my @indices = (); | ||
50 | my @lengths = (); | ||
51 | my $total_length = 0; | ||
52 | |||
53 | print "Compiling ", $#names + 1, " OIDs\n"; | ||
54 | |||
55 | for (my $i = 0; $i <= $#names; $i++) { | ||
56 | my $name = $names[$i]; | ||
57 | my $oid = $oids[$i]; | ||
58 | |||
59 | my @components = split(/[.]/, $oid); | ||
60 | |||
61 | # Determine the encoded length of this OID | ||
62 | my $size = $#components; | ||
63 | for (my $loop = 2; $loop <= $#components; $loop++) { | ||
64 | my $c = $components[$loop]; | ||
65 | |||
66 | # We will base128 encode the number | ||
67 | my $tmp = ($c == 0) ? 0 : int(log($c)/log(2)); | ||
68 | $tmp = int($tmp / 7); | ||
69 | $size += $tmp; | ||
70 | } | ||
71 | push @lengths, $size; | ||
72 | push @indices, $total_length; | ||
73 | $total_length += $size; | ||
74 | } | ||
75 | |||
76 | # | ||
77 | # Emit the look-up-by-OID index table | ||
78 | # | ||
79 | print C_FILE "\n"; | ||
80 | if ($total_length <= 255) { | ||
81 | print C_FILE "static const unsigned char oid_index[OID__NR + 1] = {\n"; | ||
82 | } else { | ||
83 | print C_FILE "static const unsigned short oid_index[OID__NR + 1] = {\n"; | ||
84 | } | ||
85 | for (my $i = 0; $i <= $#names; $i++) { | ||
86 | print C_FILE "\t[OID_", $names[$i], "] = ", $indices[$i], ",\n" | ||
87 | } | ||
88 | print C_FILE "\t[OID__NR] = ", $total_length, "\n"; | ||
89 | print C_FILE "};\n"; | ||
90 | |||
91 | # | ||
92 | # Encode the OIDs | ||
93 | # | ||
94 | my @encoded_oids = (); | ||
95 | |||
96 | for (my $i = 0; $i <= $#names; $i++) { | ||
97 | my @octets = (); | ||
98 | |||
99 | my @components = split(/[.]/, $oids[$i]); | ||
100 | |||
101 | push @octets, $components[0] * 40 + $components[1]; | ||
102 | |||
103 | for (my $loop = 2; $loop <= $#components; $loop++) { | ||
104 | my $c = $components[$loop]; | ||
105 | |||
106 | # Base128 encode the number | ||
107 | my $tmp = ($c == 0) ? 0 : int(log($c)/log(2)); | ||
108 | $tmp = int($tmp / 7); | ||
109 | |||
110 | for (; $tmp > 0; $tmp--) { | ||
111 | push @octets, (($c >> $tmp * 7) & 0x7f) | 0x80; | ||
112 | } | ||
113 | push @octets, $c & 0x7f; | ||
114 | } | ||
115 | |||
116 | push @encoded_oids, \@octets; | ||
117 | } | ||
118 | |||
119 | # | ||
120 | # Create a hash value for each OID | ||
121 | # | ||
122 | my @hash_values = (); | ||
123 | for (my $i = 0; $i <= $#names; $i++) { | ||
124 | my @octets = @{$encoded_oids[$i]}; | ||
125 | |||
126 | my $hash = $#octets; | ||
127 | foreach (@octets) { | ||
128 | $hash += $_ * 33; | ||
129 | } | ||
130 | |||
131 | $hash = ($hash >> 24) ^ ($hash >> 16) ^ ($hash >> 8) ^ ($hash); | ||
132 | |||
133 | push @hash_values, $hash & 0xff; | ||
134 | } | ||
135 | |||
136 | # | ||
137 | # Emit the OID data | ||
138 | # | ||
139 | print C_FILE "\n"; | ||
140 | print C_FILE "static const unsigned char oid_data[", $total_length, "] = {\n"; | ||
141 | for (my $i = 0; $i <= $#names; $i++) { | ||
142 | my @octets = @{$encoded_oids[$i]}; | ||
143 | print C_FILE "\t"; | ||
144 | print C_FILE $_, ", " foreach (@octets); | ||
145 | print C_FILE "\t// ", $names[$i]; | ||
146 | print C_FILE "\n"; | ||
147 | } | ||
148 | print C_FILE "};\n"; | ||
149 | |||
150 | # | ||
151 | # Build the search index table (ordered by length then hash then content) | ||
152 | # | ||
153 | my @index_table = ( 0 .. $#names ); | ||
154 | |||
155 | @index_table = sort { | ||
156 | my @octets_a = @{$encoded_oids[$a]}; | ||
157 | my @octets_b = @{$encoded_oids[$b]}; | ||
158 | |||
159 | return $hash_values[$a] <=> $hash_values[$b] | ||
160 | if ($hash_values[$a] != $hash_values[$b]); | ||
161 | return $#octets_a <=> $#octets_b | ||
162 | if ($#octets_a != $#octets_b); | ||
163 | for (my $i = $#octets_a; $i >= 0; $i--) { | ||
164 | return $octets_a[$i] <=> $octets_b[$i] | ||
165 | if ($octets_a[$i] != $octets_b[$i]); | ||
166 | } | ||
167 | return 0; | ||
168 | |||
169 | } @index_table; | ||
170 | |||
171 | # | ||
172 | # Emit the search index and hash value table | ||
173 | # | ||
174 | print C_FILE "\n"; | ||
175 | print C_FILE "static const struct {\n"; | ||
176 | print C_FILE "\tunsigned char hash;\n"; | ||
177 | if ($#names <= 255) { | ||
178 | print C_FILE "\tenum OID oid : 8;\n"; | ||
179 | } else { | ||
180 | print C_FILE "\tenum OID oid : 16;\n"; | ||
181 | } | ||
182 | print C_FILE "} oid_search_table[OID__NR] = {\n"; | ||
183 | for (my $i = 0; $i <= $#names; $i++) { | ||
184 | my @octets = @{$encoded_oids[$index_table[$i]]}; | ||
185 | printf(C_FILE "\t[%3u] = { %3u, OID_%-35s }, // ", | ||
186 | $i, | ||
187 | $hash_values[$index_table[$i]], | ||
188 | $names[$index_table[$i]]); | ||
189 | printf C_FILE "%02x", $_ foreach (@octets); | ||
190 | print C_FILE "\n"; | ||
191 | } | ||
192 | print C_FILE "};\n"; | ||
193 | |||
194 | # | ||
195 | # Emit the OID debugging name table | ||
196 | # | ||
197 | #print C_FILE "\n"; | ||
198 | #print C_FILE "const char *const oid_name_table[OID__NR + 1] = {\n"; | ||
199 | # | ||
200 | #for (my $i = 0; $i <= $#names; $i++) { | ||
201 | # print C_FILE "\t\"", $names[$i], "\",\n" | ||
202 | #} | ||
203 | #print C_FILE "\t\"Unknown-OID\"\n"; | ||
204 | #print C_FILE "};\n"; | ||
205 | |||
206 | # | ||
207 | # Polish off | ||
208 | # | ||
209 | close C_FILE or die; | ||
diff --git a/lib/check_signature.c b/lib/check_signature.c index fd6af199247b..6b49797980c4 100644 --- a/lib/check_signature.c +++ b/lib/check_signature.c | |||
@@ -1,5 +1,5 @@ | |||
1 | #include <linux/io.h> | 1 | #include <linux/io.h> |
2 | #include <linux/module.h> | 2 | #include <linux/export.h> |
3 | 3 | ||
4 | /** | 4 | /** |
5 | * check_signature - find BIOS signatures | 5 | * check_signature - find BIOS signatures |
diff --git a/lib/checksum.c b/lib/checksum.c index 8df2f91e6d98..12dceb27ff20 100644 --- a/lib/checksum.c +++ b/lib/checksum.c | |||
@@ -32,7 +32,7 @@ | |||
32 | /* Revised by Kenneth Albanowski for m68knommu. Basic problem: unaligned access | 32 | /* Revised by Kenneth Albanowski for m68knommu. Basic problem: unaligned access |
33 | kills, so most of the assembly has to go. */ | 33 | kills, so most of the assembly has to go. */ |
34 | 34 | ||
35 | #include <linux/module.h> | 35 | #include <linux/export.h> |
36 | #include <net/checksum.h> | 36 | #include <net/checksum.h> |
37 | 37 | ||
38 | #include <asm/byteorder.h> | 38 | #include <asm/byteorder.h> |
diff --git a/lib/clz_tab.c b/lib/clz_tab.c new file mode 100644 index 000000000000..7287b4a991a7 --- /dev/null +++ b/lib/clz_tab.c | |||
@@ -0,0 +1,18 @@ | |||
1 | const unsigned char __clz_tab[] = { | ||
2 | 0, 1, 2, 2, 3, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4, 4, 5, 5, 5, 5, 5, 5, 5, 5, | ||
3 | 5, 5, 5, 5, 5, 5, 5, 5, | ||
4 | 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, | ||
5 | 6, 6, 6, 6, 6, 6, 6, 6, | ||
6 | 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, | ||
7 | 7, 7, 7, 7, 7, 7, 7, 7, | ||
8 | 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, | ||
9 | 7, 7, 7, 7, 7, 7, 7, 7, | ||
10 | 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, | ||
11 | 8, 8, 8, 8, 8, 8, 8, 8, | ||
12 | 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, | ||
13 | 8, 8, 8, 8, 8, 8, 8, 8, | ||
14 | 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, | ||
15 | 8, 8, 8, 8, 8, 8, 8, 8, | ||
16 | 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, | ||
17 | 8, 8, 8, 8, 8, 8, 8, 8, | ||
18 | }; | ||
diff --git a/lib/cmdline.c b/lib/cmdline.c index f5f3ad8b62ff..eb6791188cf5 100644 --- a/lib/cmdline.c +++ b/lib/cmdline.c | |||
@@ -12,7 +12,7 @@ | |||
12 | * | 12 | * |
13 | */ | 13 | */ |
14 | 14 | ||
15 | #include <linux/module.h> | 15 | #include <linux/export.h> |
16 | #include <linux/kernel.h> | 16 | #include <linux/kernel.h> |
17 | #include <linux/string.h> | 17 | #include <linux/string.h> |
18 | 18 | ||
diff --git a/lib/cordic.c b/lib/cordic.c index aa27a88d7e04..6cf477839ebd 100644 --- a/lib/cordic.c +++ b/lib/cordic.c | |||
@@ -96,6 +96,6 @@ struct cordic_iq cordic_calc_iq(s32 theta) | |||
96 | } | 96 | } |
97 | EXPORT_SYMBOL(cordic_calc_iq); | 97 | EXPORT_SYMBOL(cordic_calc_iq); |
98 | 98 | ||
99 | MODULE_DESCRIPTION("Cordic functions"); | 99 | MODULE_DESCRIPTION("CORDIC algorithm"); |
100 | MODULE_AUTHOR("Broadcom Corporation"); | 100 | MODULE_AUTHOR("Broadcom Corporation"); |
101 | MODULE_LICENSE("Dual BSD/GPL"); | 101 | MODULE_LICENSE("Dual BSD/GPL"); |
diff --git a/lib/cpu-notifier-error-inject.c b/lib/cpu-notifier-error-inject.c index 4dc20321b0d5..707ca24f7b18 100644 --- a/lib/cpu-notifier-error-inject.c +++ b/lib/cpu-notifier-error-inject.c | |||
@@ -1,58 +1,45 @@ | |||
1 | #include <linux/kernel.h> | 1 | #include <linux/kernel.h> |
2 | #include <linux/cpu.h> | ||
3 | #include <linux/module.h> | 2 | #include <linux/module.h> |
4 | #include <linux/notifier.h> | 3 | #include <linux/cpu.h> |
5 | 4 | ||
6 | static int priority; | 5 | #include "notifier-error-inject.h" |
7 | static int cpu_up_prepare_error; | ||
8 | static int cpu_down_prepare_error; | ||
9 | 6 | ||
7 | static int priority; | ||
10 | module_param(priority, int, 0); | 8 | module_param(priority, int, 0); |
11 | MODULE_PARM_DESC(priority, "specify cpu notifier priority"); | 9 | MODULE_PARM_DESC(priority, "specify cpu notifier priority"); |
12 | 10 | ||
13 | module_param(cpu_up_prepare_error, int, 0644); | 11 | static struct notifier_err_inject cpu_notifier_err_inject = { |
14 | MODULE_PARM_DESC(cpu_up_prepare_error, | 12 | .actions = { |
15 | "specify error code to inject CPU_UP_PREPARE action"); | 13 | { NOTIFIER_ERR_INJECT_ACTION(CPU_UP_PREPARE) }, |
16 | 14 | { NOTIFIER_ERR_INJECT_ACTION(CPU_UP_PREPARE_FROZEN) }, | |
17 | module_param(cpu_down_prepare_error, int, 0644); | 15 | { NOTIFIER_ERR_INJECT_ACTION(CPU_DOWN_PREPARE) }, |
18 | MODULE_PARM_DESC(cpu_down_prepare_error, | 16 | { NOTIFIER_ERR_INJECT_ACTION(CPU_DOWN_PREPARE_FROZEN) }, |
19 | "specify error code to inject CPU_DOWN_PREPARE action"); | 17 | {} |
20 | |||
21 | static int err_inject_cpu_callback(struct notifier_block *nfb, | ||
22 | unsigned long action, void *hcpu) | ||
23 | { | ||
24 | int err = 0; | ||
25 | |||
26 | switch (action) { | ||
27 | case CPU_UP_PREPARE: | ||
28 | case CPU_UP_PREPARE_FROZEN: | ||
29 | err = cpu_up_prepare_error; | ||
30 | break; | ||
31 | case CPU_DOWN_PREPARE: | ||
32 | case CPU_DOWN_PREPARE_FROZEN: | ||
33 | err = cpu_down_prepare_error; | ||
34 | break; | ||
35 | } | 18 | } |
36 | if (err) | ||
37 | printk(KERN_INFO "Injecting error (%d) at cpu notifier\n", err); | ||
38 | |||
39 | return notifier_from_errno(err); | ||
40 | } | ||
41 | |||
42 | static struct notifier_block err_inject_cpu_notifier = { | ||
43 | .notifier_call = err_inject_cpu_callback, | ||
44 | }; | 19 | }; |
45 | 20 | ||
21 | static struct dentry *dir; | ||
22 | |||
46 | static int err_inject_init(void) | 23 | static int err_inject_init(void) |
47 | { | 24 | { |
48 | err_inject_cpu_notifier.priority = priority; | 25 | int err; |
26 | |||
27 | dir = notifier_err_inject_init("cpu", notifier_err_inject_dir, | ||
28 | &cpu_notifier_err_inject, priority); | ||
29 | if (IS_ERR(dir)) | ||
30 | return PTR_ERR(dir); | ||
31 | |||
32 | err = register_hotcpu_notifier(&cpu_notifier_err_inject.nb); | ||
33 | if (err) | ||
34 | debugfs_remove_recursive(dir); | ||
49 | 35 | ||
50 | return register_hotcpu_notifier(&err_inject_cpu_notifier); | 36 | return err; |
51 | } | 37 | } |
52 | 38 | ||
53 | static void err_inject_exit(void) | 39 | static void err_inject_exit(void) |
54 | { | 40 | { |
55 | unregister_hotcpu_notifier(&err_inject_cpu_notifier); | 41 | unregister_hotcpu_notifier(&cpu_notifier_err_inject.nb); |
42 | debugfs_remove_recursive(dir); | ||
56 | } | 43 | } |
57 | 44 | ||
58 | module_init(err_inject_init); | 45 | module_init(err_inject_init); |
diff --git a/lib/cpu_rmap.c b/lib/cpu_rmap.c index 987acfafeb83..145dec5267c9 100644 --- a/lib/cpu_rmap.c +++ b/lib/cpu_rmap.c | |||
@@ -11,7 +11,7 @@ | |||
11 | #ifdef CONFIG_GENERIC_HARDIRQS | 11 | #ifdef CONFIG_GENERIC_HARDIRQS |
12 | #include <linux/interrupt.h> | 12 | #include <linux/interrupt.h> |
13 | #endif | 13 | #endif |
14 | #include <linux/module.h> | 14 | #include <linux/export.h> |
15 | 15 | ||
16 | /* | 16 | /* |
17 | * These functions maintain a mapping from CPUs to some ordered set of | 17 | * These functions maintain a mapping from CPUs to some ordered set of |
diff --git a/lib/cpumask.c b/lib/cpumask.c index af3e5817de98..402a54ac35cb 100644 --- a/lib/cpumask.c +++ b/lib/cpumask.c | |||
@@ -2,7 +2,7 @@ | |||
2 | #include <linux/kernel.h> | 2 | #include <linux/kernel.h> |
3 | #include <linux/bitops.h> | 3 | #include <linux/bitops.h> |
4 | #include <linux/cpumask.h> | 4 | #include <linux/cpumask.h> |
5 | #include <linux/module.h> | 5 | #include <linux/export.h> |
6 | #include <linux/bootmem.h> | 6 | #include <linux/bootmem.h> |
7 | 7 | ||
8 | int __first_cpu(const cpumask_t *srcp) | 8 | int __first_cpu(const cpumask_t *srcp) |
@@ -26,18 +26,6 @@ int __next_cpu_nr(int n, const cpumask_t *srcp) | |||
26 | EXPORT_SYMBOL(__next_cpu_nr); | 26 | EXPORT_SYMBOL(__next_cpu_nr); |
27 | #endif | 27 | #endif |
28 | 28 | ||
29 | int __any_online_cpu(const cpumask_t *mask) | ||
30 | { | ||
31 | int cpu; | ||
32 | |||
33 | for_each_cpu(cpu, mask) { | ||
34 | if (cpu_online(cpu)) | ||
35 | break; | ||
36 | } | ||
37 | return cpu; | ||
38 | } | ||
39 | EXPORT_SYMBOL(__any_online_cpu); | ||
40 | |||
41 | /** | 29 | /** |
42 | * cpumask_next_and - get the next cpu in *src1p & *src2p | 30 | * cpumask_next_and - get the next cpu in *src1p & *src2p |
43 | * @n: the cpu prior to the place to search (ie. return will be > @n) | 31 | * @n: the cpu prior to the place to search (ie. return will be > @n) |
diff --git a/lib/crc32.c b/lib/crc32.c index a6e633a48cea..072fbd8234d5 100644 --- a/lib/crc32.c +++ b/lib/crc32.c | |||
@@ -1,4 +1,8 @@ | |||
1 | /* | 1 | /* |
2 | * Aug 8, 2011 Bob Pearson with help from Joakim Tjernlund and George Spelvin | ||
3 | * cleaned up code to current version of sparse and added the slicing-by-8 | ||
4 | * algorithm to the closely similar existing slicing-by-4 algorithm. | ||
5 | * | ||
2 | * Oct 15, 2000 Matt Domsch <Matt_Domsch@dell.com> | 6 | * Oct 15, 2000 Matt Domsch <Matt_Domsch@dell.com> |
3 | * Nicer crc32 functions/docs submitted by linux@horizon.com. Thanks! | 7 | * Nicer crc32 functions/docs submitted by linux@horizon.com. Thanks! |
4 | * Code was from the public domain, copyright abandoned. Code was | 8 | * Code was from the public domain, copyright abandoned. Code was |
@@ -20,51 +24,60 @@ | |||
20 | * Version 2. See the file COPYING for more details. | 24 | * Version 2. See the file COPYING for more details. |
21 | */ | 25 | */ |
22 | 26 | ||
27 | /* see: Documentation/crc32.txt for a description of algorithms */ | ||
28 | |||
23 | #include <linux/crc32.h> | 29 | #include <linux/crc32.h> |
24 | #include <linux/kernel.h> | ||
25 | #include <linux/module.h> | 30 | #include <linux/module.h> |
26 | #include <linux/compiler.h> | ||
27 | #include <linux/types.h> | 31 | #include <linux/types.h> |
28 | #include <linux/init.h> | ||
29 | #include <linux/atomic.h> | ||
30 | #include "crc32defs.h" | 32 | #include "crc32defs.h" |
31 | #if CRC_LE_BITS == 8 | 33 | |
32 | # define tole(x) __constant_cpu_to_le32(x) | 34 | #if CRC_LE_BITS > 8 |
35 | # define tole(x) ((__force u32) __constant_cpu_to_le32(x)) | ||
33 | #else | 36 | #else |
34 | # define tole(x) (x) | 37 | # define tole(x) (x) |
35 | #endif | 38 | #endif |
36 | 39 | ||
37 | #if CRC_BE_BITS == 8 | 40 | #if CRC_BE_BITS > 8 |
38 | # define tobe(x) __constant_cpu_to_be32(x) | 41 | # define tobe(x) ((__force u32) __constant_cpu_to_be32(x)) |
39 | #else | 42 | #else |
40 | # define tobe(x) (x) | 43 | # define tobe(x) (x) |
41 | #endif | 44 | #endif |
45 | |||
42 | #include "crc32table.h" | 46 | #include "crc32table.h" |
43 | 47 | ||
44 | MODULE_AUTHOR("Matt Domsch <Matt_Domsch@dell.com>"); | 48 | MODULE_AUTHOR("Matt Domsch <Matt_Domsch@dell.com>"); |
45 | MODULE_DESCRIPTION("Ethernet CRC32 calculations"); | 49 | MODULE_DESCRIPTION("Various CRC32 calculations"); |
46 | MODULE_LICENSE("GPL"); | 50 | MODULE_LICENSE("GPL"); |
47 | 51 | ||
48 | #if CRC_LE_BITS == 8 || CRC_BE_BITS == 8 | 52 | #if CRC_LE_BITS > 8 || CRC_BE_BITS > 8 |
49 | 53 | ||
54 | /* implements slicing-by-4 or slicing-by-8 algorithm */ | ||
50 | static inline u32 | 55 | static inline u32 |
51 | crc32_body(u32 crc, unsigned char const *buf, size_t len, const u32 (*tab)[256]) | 56 | crc32_body(u32 crc, unsigned char const *buf, size_t len, const u32 (*tab)[256]) |
52 | { | 57 | { |
53 | # ifdef __LITTLE_ENDIAN | 58 | # ifdef __LITTLE_ENDIAN |
54 | # define DO_CRC(x) crc = tab[0][(crc ^ (x)) & 255] ^ (crc >> 8) | 59 | # define DO_CRC(x) crc = t0[(crc ^ (x)) & 255] ^ (crc >> 8) |
55 | # define DO_CRC4 crc = tab[3][(crc) & 255] ^ \ | 60 | # define DO_CRC4 (t3[(q) & 255] ^ t2[(q >> 8) & 255] ^ \ |
56 | tab[2][(crc >> 8) & 255] ^ \ | 61 | t1[(q >> 16) & 255] ^ t0[(q >> 24) & 255]) |
57 | tab[1][(crc >> 16) & 255] ^ \ | 62 | # define DO_CRC8 (t7[(q) & 255] ^ t6[(q >> 8) & 255] ^ \ |
58 | tab[0][(crc >> 24) & 255] | 63 | t5[(q >> 16) & 255] ^ t4[(q >> 24) & 255]) |
59 | # else | 64 | # else |
60 | # define DO_CRC(x) crc = tab[0][((crc >> 24) ^ (x)) & 255] ^ (crc << 8) | 65 | # define DO_CRC(x) crc = t0[((crc >> 24) ^ (x)) & 255] ^ (crc << 8) |
61 | # define DO_CRC4 crc = tab[0][(crc) & 255] ^ \ | 66 | # define DO_CRC4 (t0[(q) & 255] ^ t1[(q >> 8) & 255] ^ \ |
62 | tab[1][(crc >> 8) & 255] ^ \ | 67 | t2[(q >> 16) & 255] ^ t3[(q >> 24) & 255]) |
63 | tab[2][(crc >> 16) & 255] ^ \ | 68 | # define DO_CRC8 (t4[(q) & 255] ^ t5[(q >> 8) & 255] ^ \ |
64 | tab[3][(crc >> 24) & 255] | 69 | t6[(q >> 16) & 255] ^ t7[(q >> 24) & 255]) |
65 | # endif | 70 | # endif |
66 | const u32 *b; | 71 | const u32 *b; |
67 | size_t rem_len; | 72 | size_t rem_len; |
73 | # ifdef CONFIG_X86 | ||
74 | size_t i; | ||
75 | # endif | ||
76 | const u32 *t0=tab[0], *t1=tab[1], *t2=tab[2], *t3=tab[3]; | ||
77 | # if CRC_LE_BITS != 32 | ||
78 | const u32 *t4 = tab[4], *t5 = tab[5], *t6 = tab[6], *t7 = tab[7]; | ||
79 | # endif | ||
80 | u32 q; | ||
68 | 81 | ||
69 | /* Align it */ | 82 | /* Align it */ |
70 | if (unlikely((long)buf & 3 && len)) { | 83 | if (unlikely((long)buf & 3 && len)) { |
@@ -72,27 +85,51 @@ crc32_body(u32 crc, unsigned char const *buf, size_t len, const u32 (*tab)[256]) | |||
72 | DO_CRC(*buf++); | 85 | DO_CRC(*buf++); |
73 | } while ((--len) && ((long)buf)&3); | 86 | } while ((--len) && ((long)buf)&3); |
74 | } | 87 | } |
88 | |||
89 | # if CRC_LE_BITS == 32 | ||
75 | rem_len = len & 3; | 90 | rem_len = len & 3; |
76 | /* load data 32 bits wide, xor data 32 bits wide. */ | ||
77 | len = len >> 2; | 91 | len = len >> 2; |
92 | # else | ||
93 | rem_len = len & 7; | ||
94 | len = len >> 3; | ||
95 | # endif | ||
96 | |||
78 | b = (const u32 *)buf; | 97 | b = (const u32 *)buf; |
98 | # ifdef CONFIG_X86 | ||
99 | --b; | ||
100 | for (i = 0; i < len; i++) { | ||
101 | # else | ||
79 | for (--b; len; --len) { | 102 | for (--b; len; --len) { |
80 | crc ^= *++b; /* use pre increment for speed */ | 103 | # endif |
81 | DO_CRC4; | 104 | q = crc ^ *++b; /* use pre increment for speed */ |
105 | # if CRC_LE_BITS == 32 | ||
106 | crc = DO_CRC4; | ||
107 | # else | ||
108 | crc = DO_CRC8; | ||
109 | q = *++b; | ||
110 | crc ^= DO_CRC4; | ||
111 | # endif | ||
82 | } | 112 | } |
83 | len = rem_len; | 113 | len = rem_len; |
84 | /* And the last few bytes */ | 114 | /* And the last few bytes */ |
85 | if (len) { | 115 | if (len) { |
86 | u8 *p = (u8 *)(b + 1) - 1; | 116 | u8 *p = (u8 *)(b + 1) - 1; |
117 | # ifdef CONFIG_X86 | ||
118 | for (i = 0; i < len; i++) | ||
119 | DO_CRC(*++p); /* use pre increment for speed */ | ||
120 | # else | ||
87 | do { | 121 | do { |
88 | DO_CRC(*++p); /* use pre increment for speed */ | 122 | DO_CRC(*++p); /* use pre increment for speed */ |
89 | } while (--len); | 123 | } while (--len); |
124 | # endif | ||
90 | } | 125 | } |
91 | return crc; | 126 | return crc; |
92 | #undef DO_CRC | 127 | #undef DO_CRC |
93 | #undef DO_CRC4 | 128 | #undef DO_CRC4 |
129 | #undef DO_CRC8 | ||
94 | } | 130 | } |
95 | #endif | 131 | #endif |
132 | |||
96 | /** | 133 | /** |
97 | * crc32_le() - Calculate bitwise little-endian Ethernet AUTODIN II CRC32 | 134 | * crc32_le() - Calculate bitwise little-endian Ethernet AUTODIN II CRC32 |
98 | * @crc: seed value for computation. ~0 for Ethernet, sometimes 0 for | 135 | * @crc: seed value for computation. ~0 for Ethernet, sometimes 0 for |
@@ -100,53 +137,68 @@ crc32_body(u32 crc, unsigned char const *buf, size_t len, const u32 (*tab)[256]) | |||
100 | * @p: pointer to buffer over which CRC is run | 137 | * @p: pointer to buffer over which CRC is run |
101 | * @len: length of buffer @p | 138 | * @len: length of buffer @p |
102 | */ | 139 | */ |
103 | u32 __pure crc32_le(u32 crc, unsigned char const *p, size_t len); | 140 | static inline u32 __pure crc32_le_generic(u32 crc, unsigned char const *p, |
104 | 141 | size_t len, const u32 (*tab)[256], | |
105 | #if CRC_LE_BITS == 1 | 142 | u32 polynomial) |
106 | /* | ||
107 | * In fact, the table-based code will work in this case, but it can be | ||
108 | * simplified by inlining the table in ?: form. | ||
109 | */ | ||
110 | |||
111 | u32 __pure crc32_le(u32 crc, unsigned char const *p, size_t len) | ||
112 | { | 143 | { |
144 | #if CRC_LE_BITS == 1 | ||
113 | int i; | 145 | int i; |
114 | while (len--) { | 146 | while (len--) { |
115 | crc ^= *p++; | 147 | crc ^= *p++; |
116 | for (i = 0; i < 8; i++) | 148 | for (i = 0; i < 8; i++) |
117 | crc = (crc >> 1) ^ ((crc & 1) ? CRCPOLY_LE : 0); | 149 | crc = (crc >> 1) ^ ((crc & 1) ? polynomial : 0); |
150 | } | ||
151 | # elif CRC_LE_BITS == 2 | ||
152 | while (len--) { | ||
153 | crc ^= *p++; | ||
154 | crc = (crc >> 2) ^ tab[0][crc & 3]; | ||
155 | crc = (crc >> 2) ^ tab[0][crc & 3]; | ||
156 | crc = (crc >> 2) ^ tab[0][crc & 3]; | ||
157 | crc = (crc >> 2) ^ tab[0][crc & 3]; | ||
118 | } | 158 | } |
119 | return crc; | ||
120 | } | ||
121 | #else /* Table-based approach */ | ||
122 | |||
123 | u32 __pure crc32_le(u32 crc, unsigned char const *p, size_t len) | ||
124 | { | ||
125 | # if CRC_LE_BITS == 8 | ||
126 | const u32 (*tab)[] = crc32table_le; | ||
127 | |||
128 | crc = __cpu_to_le32(crc); | ||
129 | crc = crc32_body(crc, p, len, tab); | ||
130 | return __le32_to_cpu(crc); | ||
131 | # elif CRC_LE_BITS == 4 | 159 | # elif CRC_LE_BITS == 4 |
132 | while (len--) { | 160 | while (len--) { |
133 | crc ^= *p++; | 161 | crc ^= *p++; |
134 | crc = (crc >> 4) ^ crc32table_le[crc & 15]; | 162 | crc = (crc >> 4) ^ tab[0][crc & 15]; |
135 | crc = (crc >> 4) ^ crc32table_le[crc & 15]; | 163 | crc = (crc >> 4) ^ tab[0][crc & 15]; |
136 | } | 164 | } |
137 | return crc; | 165 | # elif CRC_LE_BITS == 8 |
138 | # elif CRC_LE_BITS == 2 | 166 | /* aka Sarwate algorithm */ |
139 | while (len--) { | 167 | while (len--) { |
140 | crc ^= *p++; | 168 | crc ^= *p++; |
141 | crc = (crc >> 2) ^ crc32table_le[crc & 3]; | 169 | crc = (crc >> 8) ^ tab[0][crc & 255]; |
142 | crc = (crc >> 2) ^ crc32table_le[crc & 3]; | ||
143 | crc = (crc >> 2) ^ crc32table_le[crc & 3]; | ||
144 | crc = (crc >> 2) ^ crc32table_le[crc & 3]; | ||
145 | } | 170 | } |
171 | # else | ||
172 | crc = (__force u32) __cpu_to_le32(crc); | ||
173 | crc = crc32_body(crc, p, len, tab); | ||
174 | crc = __le32_to_cpu((__force __le32)crc); | ||
175 | #endif | ||
146 | return crc; | 176 | return crc; |
147 | # endif | 177 | } |
178 | |||
179 | #if CRC_LE_BITS == 1 | ||
180 | u32 __pure crc32_le(u32 crc, unsigned char const *p, size_t len) | ||
181 | { | ||
182 | return crc32_le_generic(crc, p, len, NULL, CRCPOLY_LE); | ||
183 | } | ||
184 | u32 __pure __crc32c_le(u32 crc, unsigned char const *p, size_t len) | ||
185 | { | ||
186 | return crc32_le_generic(crc, p, len, NULL, CRC32C_POLY_LE); | ||
187 | } | ||
188 | #else | ||
189 | u32 __pure crc32_le(u32 crc, unsigned char const *p, size_t len) | ||
190 | { | ||
191 | return crc32_le_generic(crc, p, len, | ||
192 | (const u32 (*)[256])crc32table_le, CRCPOLY_LE); | ||
193 | } | ||
194 | u32 __pure __crc32c_le(u32 crc, unsigned char const *p, size_t len) | ||
195 | { | ||
196 | return crc32_le_generic(crc, p, len, | ||
197 | (const u32 (*)[256])crc32ctable_le, CRC32C_POLY_LE); | ||
148 | } | 198 | } |
149 | #endif | 199 | #endif |
200 | EXPORT_SYMBOL(crc32_le); | ||
201 | EXPORT_SYMBOL(__crc32c_le); | ||
150 | 202 | ||
151 | /** | 203 | /** |
152 | * crc32_be() - Calculate bitwise big-endian Ethernet AUTODIN II CRC32 | 204 | * crc32_be() - Calculate bitwise big-endian Ethernet AUTODIN II CRC32 |
@@ -155,317 +207,914 @@ u32 __pure crc32_le(u32 crc, unsigned char const *p, size_t len) | |||
155 | * @p: pointer to buffer over which CRC is run | 207 | * @p: pointer to buffer over which CRC is run |
156 | * @len: length of buffer @p | 208 | * @len: length of buffer @p |
157 | */ | 209 | */ |
158 | u32 __pure crc32_be(u32 crc, unsigned char const *p, size_t len); | 210 | static inline u32 __pure crc32_be_generic(u32 crc, unsigned char const *p, |
159 | 211 | size_t len, const u32 (*tab)[256], | |
160 | #if CRC_BE_BITS == 1 | 212 | u32 polynomial) |
161 | /* | ||
162 | * In fact, the table-based code will work in this case, but it can be | ||
163 | * simplified by inlining the table in ?: form. | ||
164 | */ | ||
165 | |||
166 | u32 __pure crc32_be(u32 crc, unsigned char const *p, size_t len) | ||
167 | { | 213 | { |
214 | #if CRC_BE_BITS == 1 | ||
168 | int i; | 215 | int i; |
169 | while (len--) { | 216 | while (len--) { |
170 | crc ^= *p++ << 24; | 217 | crc ^= *p++ << 24; |
171 | for (i = 0; i < 8; i++) | 218 | for (i = 0; i < 8; i++) |
172 | crc = | 219 | crc = |
173 | (crc << 1) ^ ((crc & 0x80000000) ? CRCPOLY_BE : | 220 | (crc << 1) ^ ((crc & 0x80000000) ? polynomial : |
174 | 0); | 221 | 0); |
175 | } | 222 | } |
176 | return crc; | 223 | # elif CRC_BE_BITS == 2 |
177 | } | 224 | while (len--) { |
178 | 225 | crc ^= *p++ << 24; | |
179 | #else /* Table-based approach */ | 226 | crc = (crc << 2) ^ tab[0][crc >> 30]; |
180 | u32 __pure crc32_be(u32 crc, unsigned char const *p, size_t len) | 227 | crc = (crc << 2) ^ tab[0][crc >> 30]; |
181 | { | 228 | crc = (crc << 2) ^ tab[0][crc >> 30]; |
182 | # if CRC_BE_BITS == 8 | 229 | crc = (crc << 2) ^ tab[0][crc >> 30]; |
183 | const u32 (*tab)[] = crc32table_be; | 230 | } |
184 | |||
185 | crc = __cpu_to_be32(crc); | ||
186 | crc = crc32_body(crc, p, len, tab); | ||
187 | return __be32_to_cpu(crc); | ||
188 | # elif CRC_BE_BITS == 4 | 231 | # elif CRC_BE_BITS == 4 |
189 | while (len--) { | 232 | while (len--) { |
190 | crc ^= *p++ << 24; | 233 | crc ^= *p++ << 24; |
191 | crc = (crc << 4) ^ crc32table_be[crc >> 28]; | 234 | crc = (crc << 4) ^ tab[0][crc >> 28]; |
192 | crc = (crc << 4) ^ crc32table_be[crc >> 28]; | 235 | crc = (crc << 4) ^ tab[0][crc >> 28]; |
193 | } | 236 | } |
194 | return crc; | 237 | # elif CRC_BE_BITS == 8 |
195 | # elif CRC_BE_BITS == 2 | ||
196 | while (len--) { | 238 | while (len--) { |
197 | crc ^= *p++ << 24; | 239 | crc ^= *p++ << 24; |
198 | crc = (crc << 2) ^ crc32table_be[crc >> 30]; | 240 | crc = (crc << 8) ^ tab[0][crc >> 24]; |
199 | crc = (crc << 2) ^ crc32table_be[crc >> 30]; | ||
200 | crc = (crc << 2) ^ crc32table_be[crc >> 30]; | ||
201 | crc = (crc << 2) ^ crc32table_be[crc >> 30]; | ||
202 | } | 241 | } |
203 | return crc; | 242 | # else |
243 | crc = (__force u32) __cpu_to_be32(crc); | ||
244 | crc = crc32_body(crc, p, len, tab); | ||
245 | crc = __be32_to_cpu((__force __be32)crc); | ||
204 | # endif | 246 | # endif |
247 | return crc; | ||
205 | } | 248 | } |
206 | #endif | ||
207 | 249 | ||
208 | EXPORT_SYMBOL(crc32_le); | 250 | #if CRC_LE_BITS == 1 |
251 | u32 __pure crc32_be(u32 crc, unsigned char const *p, size_t len) | ||
252 | { | ||
253 | return crc32_be_generic(crc, p, len, NULL, CRCPOLY_BE); | ||
254 | } | ||
255 | #else | ||
256 | u32 __pure crc32_be(u32 crc, unsigned char const *p, size_t len) | ||
257 | { | ||
258 | return crc32_be_generic(crc, p, len, | ||
259 | (const u32 (*)[256])crc32table_be, CRCPOLY_BE); | ||
260 | } | ||
261 | #endif | ||
209 | EXPORT_SYMBOL(crc32_be); | 262 | EXPORT_SYMBOL(crc32_be); |
210 | 263 | ||
211 | /* | 264 | #ifdef CONFIG_CRC32_SELFTEST |
212 | * A brief CRC tutorial. | ||
213 | * | ||
214 | * A CRC is a long-division remainder. You add the CRC to the message, | ||
215 | * and the whole thing (message+CRC) is a multiple of the given | ||
216 | * CRC polynomial. To check the CRC, you can either check that the | ||
217 | * CRC matches the recomputed value, *or* you can check that the | ||
218 | * remainder computed on the message+CRC is 0. This latter approach | ||
219 | * is used by a lot of hardware implementations, and is why so many | ||
220 | * protocols put the end-of-frame flag after the CRC. | ||
221 | * | ||
222 | * It's actually the same long division you learned in school, except that | ||
223 | * - We're working in binary, so the digits are only 0 and 1, and | ||
224 | * - When dividing polynomials, there are no carries. Rather than add and | ||
225 | * subtract, we just xor. Thus, we tend to get a bit sloppy about | ||
226 | * the difference between adding and subtracting. | ||
227 | * | ||
228 | * A 32-bit CRC polynomial is actually 33 bits long. But since it's | ||
229 | * 33 bits long, bit 32 is always going to be set, so usually the CRC | ||
230 | * is written in hex with the most significant bit omitted. (If you're | ||
231 | * familiar with the IEEE 754 floating-point format, it's the same idea.) | ||
232 | * | ||
233 | * Note that a CRC is computed over a string of *bits*, so you have | ||
234 | * to decide on the endianness of the bits within each byte. To get | ||
235 | * the best error-detecting properties, this should correspond to the | ||
236 | * order they're actually sent. For example, standard RS-232 serial is | ||
237 | * little-endian; the most significant bit (sometimes used for parity) | ||
238 | * is sent last. And when appending a CRC word to a message, you should | ||
239 | * do it in the right order, matching the endianness. | ||
240 | * | ||
241 | * Just like with ordinary division, the remainder is always smaller than | ||
242 | * the divisor (the CRC polynomial) you're dividing by. Each step of the | ||
243 | * division, you take one more digit (bit) of the dividend and append it | ||
244 | * to the current remainder. Then you figure out the appropriate multiple | ||
245 | * of the divisor to subtract to being the remainder back into range. | ||
246 | * In binary, it's easy - it has to be either 0 or 1, and to make the | ||
247 | * XOR cancel, it's just a copy of bit 32 of the remainder. | ||
248 | * | ||
249 | * When computing a CRC, we don't care about the quotient, so we can | ||
250 | * throw the quotient bit away, but subtract the appropriate multiple of | ||
251 | * the polynomial from the remainder and we're back to where we started, | ||
252 | * ready to process the next bit. | ||
253 | * | ||
254 | * A big-endian CRC written this way would be coded like: | ||
255 | * for (i = 0; i < input_bits; i++) { | ||
256 | * multiple = remainder & 0x80000000 ? CRCPOLY : 0; | ||
257 | * remainder = (remainder << 1 | next_input_bit()) ^ multiple; | ||
258 | * } | ||
259 | * Notice how, to get at bit 32 of the shifted remainder, we look | ||
260 | * at bit 31 of the remainder *before* shifting it. | ||
261 | * | ||
262 | * But also notice how the next_input_bit() bits we're shifting into | ||
263 | * the remainder don't actually affect any decision-making until | ||
264 | * 32 bits later. Thus, the first 32 cycles of this are pretty boring. | ||
265 | * Also, to add the CRC to a message, we need a 32-bit-long hole for it at | ||
266 | * the end, so we have to add 32 extra cycles shifting in zeros at the | ||
267 | * end of every message, | ||
268 | * | ||
269 | * So the standard trick is to rearrage merging in the next_input_bit() | ||
270 | * until the moment it's needed. Then the first 32 cycles can be precomputed, | ||
271 | * and merging in the final 32 zero bits to make room for the CRC can be | ||
272 | * skipped entirely. | ||
273 | * This changes the code to: | ||
274 | * for (i = 0; i < input_bits; i++) { | ||
275 | * remainder ^= next_input_bit() << 31; | ||
276 | * multiple = (remainder & 0x80000000) ? CRCPOLY : 0; | ||
277 | * remainder = (remainder << 1) ^ multiple; | ||
278 | * } | ||
279 | * With this optimization, the little-endian code is simpler: | ||
280 | * for (i = 0; i < input_bits; i++) { | ||
281 | * remainder ^= next_input_bit(); | ||
282 | * multiple = (remainder & 1) ? CRCPOLY : 0; | ||
283 | * remainder = (remainder >> 1) ^ multiple; | ||
284 | * } | ||
285 | * | ||
286 | * Note that the other details of endianness have been hidden in CRCPOLY | ||
287 | * (which must be bit-reversed) and next_input_bit(). | ||
288 | * | ||
289 | * However, as long as next_input_bit is returning the bits in a sensible | ||
290 | * order, we can actually do the merging 8 or more bits at a time rather | ||
291 | * than one bit at a time: | ||
292 | * for (i = 0; i < input_bytes; i++) { | ||
293 | * remainder ^= next_input_byte() << 24; | ||
294 | * for (j = 0; j < 8; j++) { | ||
295 | * multiple = (remainder & 0x80000000) ? CRCPOLY : 0; | ||
296 | * remainder = (remainder << 1) ^ multiple; | ||
297 | * } | ||
298 | * } | ||
299 | * Or in little-endian: | ||
300 | * for (i = 0; i < input_bytes; i++) { | ||
301 | * remainder ^= next_input_byte(); | ||
302 | * for (j = 0; j < 8; j++) { | ||
303 | * multiple = (remainder & 1) ? CRCPOLY : 0; | ||
304 | * remainder = (remainder << 1) ^ multiple; | ||
305 | * } | ||
306 | * } | ||
307 | * If the input is a multiple of 32 bits, you can even XOR in a 32-bit | ||
308 | * word at a time and increase the inner loop count to 32. | ||
309 | * | ||
310 | * You can also mix and match the two loop styles, for example doing the | ||
311 | * bulk of a message byte-at-a-time and adding bit-at-a-time processing | ||
312 | * for any fractional bytes at the end. | ||
313 | * | ||
314 | * The only remaining optimization is to the byte-at-a-time table method. | ||
315 | * Here, rather than just shifting one bit of the remainder to decide | ||
316 | * in the correct multiple to subtract, we can shift a byte at a time. | ||
317 | * This produces a 40-bit (rather than a 33-bit) intermediate remainder, | ||
318 | * but again the multiple of the polynomial to subtract depends only on | ||
319 | * the high bits, the high 8 bits in this case. | ||
320 | * | ||
321 | * The multiple we need in that case is the low 32 bits of a 40-bit | ||
322 | * value whose high 8 bits are given, and which is a multiple of the | ||
323 | * generator polynomial. This is simply the CRC-32 of the given | ||
324 | * one-byte message. | ||
325 | * | ||
326 | * Two more details: normally, appending zero bits to a message which | ||
327 | * is already a multiple of a polynomial produces a larger multiple of that | ||
328 | * polynomial. To enable a CRC to detect this condition, it's common to | ||
329 | * invert the CRC before appending it. This makes the remainder of the | ||
330 | * message+crc come out not as zero, but some fixed non-zero value. | ||
331 | * | ||
332 | * The same problem applies to zero bits prepended to the message, and | ||
333 | * a similar solution is used. Instead of starting with a remainder of | ||
334 | * 0, an initial remainder of all ones is used. As long as you start | ||
335 | * the same way on decoding, it doesn't make a difference. | ||
336 | */ | ||
337 | |||
338 | #ifdef UNITTEST | ||
339 | 265 | ||
340 | #include <stdlib.h> | 266 | /* 4096 random bytes */ |
341 | #include <stdio.h> | 267 | static u8 __attribute__((__aligned__(8))) test_buf[] = |
268 | { | ||
269 | 0x5b, 0x85, 0x21, 0xcb, 0x09, 0x68, 0x7d, 0x30, | ||
270 | 0xc7, 0x69, 0xd7, 0x30, 0x92, 0xde, 0x59, 0xe4, | ||
271 | 0xc9, 0x6e, 0x8b, 0xdb, 0x98, 0x6b, 0xaa, 0x60, | ||
272 | 0xa8, 0xb5, 0xbc, 0x6c, 0xa9, 0xb1, 0x5b, 0x2c, | ||
273 | 0xea, 0xb4, 0x92, 0x6a, 0x3f, 0x79, 0x91, 0xe4, | ||
274 | 0xe9, 0x70, 0x51, 0x8c, 0x7f, 0x95, 0x6f, 0x1a, | ||
275 | 0x56, 0xa1, 0x5c, 0x27, 0x03, 0x67, 0x9f, 0x3a, | ||
276 | 0xe2, 0x31, 0x11, 0x29, 0x6b, 0x98, 0xfc, 0xc4, | ||
277 | 0x53, 0x24, 0xc5, 0x8b, 0xce, 0x47, 0xb2, 0xb9, | ||
278 | 0x32, 0xcb, 0xc1, 0xd0, 0x03, 0x57, 0x4e, 0xd4, | ||
279 | 0xe9, 0x3c, 0xa1, 0x63, 0xcf, 0x12, 0x0e, 0xca, | ||
280 | 0xe1, 0x13, 0xd1, 0x93, 0xa6, 0x88, 0x5c, 0x61, | ||
281 | 0x5b, 0xbb, 0xf0, 0x19, 0x46, 0xb4, 0xcf, 0x9e, | ||
282 | 0xb6, 0x6b, 0x4c, 0x3a, 0xcf, 0x60, 0xf9, 0x7a, | ||
283 | 0x8d, 0x07, 0x63, 0xdb, 0x40, 0xe9, 0x0b, 0x6f, | ||
284 | 0xad, 0x97, 0xf1, 0xed, 0xd0, 0x1e, 0x26, 0xfd, | ||
285 | 0xbf, 0xb7, 0xc8, 0x04, 0x94, 0xf8, 0x8b, 0x8c, | ||
286 | 0xf1, 0xab, 0x7a, 0xd4, 0xdd, 0xf3, 0xe8, 0x88, | ||
287 | 0xc3, 0xed, 0x17, 0x8a, 0x9b, 0x40, 0x0d, 0x53, | ||
288 | 0x62, 0x12, 0x03, 0x5f, 0x1b, 0x35, 0x32, 0x1f, | ||
289 | 0xb4, 0x7b, 0x93, 0x78, 0x0d, 0xdb, 0xce, 0xa4, | ||
290 | 0xc0, 0x47, 0xd5, 0xbf, 0x68, 0xe8, 0x5d, 0x74, | ||
291 | 0x8f, 0x8e, 0x75, 0x1c, 0xb2, 0x4f, 0x9a, 0x60, | ||
292 | 0xd1, 0xbe, 0x10, 0xf4, 0x5c, 0xa1, 0x53, 0x09, | ||
293 | 0xa5, 0xe0, 0x09, 0x54, 0x85, 0x5c, 0xdc, 0x07, | ||
294 | 0xe7, 0x21, 0x69, 0x7b, 0x8a, 0xfd, 0x90, 0xf1, | ||
295 | 0x22, 0xd0, 0xb4, 0x36, 0x28, 0xe6, 0xb8, 0x0f, | ||
296 | 0x39, 0xde, 0xc8, 0xf3, 0x86, 0x60, 0x34, 0xd2, | ||
297 | 0x5e, 0xdf, 0xfd, 0xcf, 0x0f, 0xa9, 0x65, 0xf0, | ||
298 | 0xd5, 0x4d, 0x96, 0x40, 0xe3, 0xdf, 0x3f, 0x95, | ||
299 | 0x5a, 0x39, 0x19, 0x93, 0xf4, 0x75, 0xce, 0x22, | ||
300 | 0x00, 0x1c, 0x93, 0xe2, 0x03, 0x66, 0xf4, 0x93, | ||
301 | 0x73, 0x86, 0x81, 0x8e, 0x29, 0x44, 0x48, 0x86, | ||
302 | 0x61, 0x7c, 0x48, 0xa3, 0x43, 0xd2, 0x9c, 0x8d, | ||
303 | 0xd4, 0x95, 0xdd, 0xe1, 0x22, 0x89, 0x3a, 0x40, | ||
304 | 0x4c, 0x1b, 0x8a, 0x04, 0xa8, 0x09, 0x69, 0x8b, | ||
305 | 0xea, 0xc6, 0x55, 0x8e, 0x57, 0xe6, 0x64, 0x35, | ||
306 | 0xf0, 0xc7, 0x16, 0x9f, 0x5d, 0x5e, 0x86, 0x40, | ||
307 | 0x46, 0xbb, 0xe5, 0x45, 0x88, 0xfe, 0xc9, 0x63, | ||
308 | 0x15, 0xfb, 0xf5, 0xbd, 0x71, 0x61, 0xeb, 0x7b, | ||
309 | 0x78, 0x70, 0x07, 0x31, 0x03, 0x9f, 0xb2, 0xc8, | ||
310 | 0xa7, 0xab, 0x47, 0xfd, 0xdf, 0xa0, 0x78, 0x72, | ||
311 | 0xa4, 0x2a, 0xe4, 0xb6, 0xba, 0xc0, 0x1e, 0x86, | ||
312 | 0x71, 0xe6, 0x3d, 0x18, 0x37, 0x70, 0xe6, 0xff, | ||
313 | 0xe0, 0xbc, 0x0b, 0x22, 0xa0, 0x1f, 0xd3, 0xed, | ||
314 | 0xa2, 0x55, 0x39, 0xab, 0xa8, 0x13, 0x73, 0x7c, | ||
315 | 0x3f, 0xb2, 0xd6, 0x19, 0xac, 0xff, 0x99, 0xed, | ||
316 | 0xe8, 0xe6, 0xa6, 0x22, 0xe3, 0x9c, 0xf1, 0x30, | ||
317 | 0xdc, 0x01, 0x0a, 0x56, 0xfa, 0xe4, 0xc9, 0x99, | ||
318 | 0xdd, 0xa8, 0xd8, 0xda, 0x35, 0x51, 0x73, 0xb4, | ||
319 | 0x40, 0x86, 0x85, 0xdb, 0x5c, 0xd5, 0x85, 0x80, | ||
320 | 0x14, 0x9c, 0xfd, 0x98, 0xa9, 0x82, 0xc5, 0x37, | ||
321 | 0xff, 0x32, 0x5d, 0xd0, 0x0b, 0xfa, 0xdc, 0x04, | ||
322 | 0x5e, 0x09, 0xd2, 0xca, 0x17, 0x4b, 0x1a, 0x8e, | ||
323 | 0x15, 0xe1, 0xcc, 0x4e, 0x52, 0x88, 0x35, 0xbd, | ||
324 | 0x48, 0xfe, 0x15, 0xa0, 0x91, 0xfd, 0x7e, 0x6c, | ||
325 | 0x0e, 0x5d, 0x79, 0x1b, 0x81, 0x79, 0xd2, 0x09, | ||
326 | 0x34, 0x70, 0x3d, 0x81, 0xec, 0xf6, 0x24, 0xbb, | ||
327 | 0xfb, 0xf1, 0x7b, 0xdf, 0x54, 0xea, 0x80, 0x9b, | ||
328 | 0xc7, 0x99, 0x9e, 0xbd, 0x16, 0x78, 0x12, 0x53, | ||
329 | 0x5e, 0x01, 0xa7, 0x4e, 0xbd, 0x67, 0xe1, 0x9b, | ||
330 | 0x4c, 0x0e, 0x61, 0x45, 0x97, 0xd2, 0xf0, 0x0f, | ||
331 | 0xfe, 0x15, 0x08, 0xb7, 0x11, 0x4c, 0xe7, 0xff, | ||
332 | 0x81, 0x53, 0xff, 0x91, 0x25, 0x38, 0x7e, 0x40, | ||
333 | 0x94, 0xe5, 0xe0, 0xad, 0xe6, 0xd9, 0x79, 0xb6, | ||
334 | 0x92, 0xc9, 0xfc, 0xde, 0xc3, 0x1a, 0x23, 0xbb, | ||
335 | 0xdd, 0xc8, 0x51, 0x0c, 0x3a, 0x72, 0xfa, 0x73, | ||
336 | 0x6f, 0xb7, 0xee, 0x61, 0x39, 0x03, 0x01, 0x3f, | ||
337 | 0x7f, 0x94, 0x2e, 0x2e, 0xba, 0x3a, 0xbb, 0xb4, | ||
338 | 0xfa, 0x6a, 0x17, 0xfe, 0xea, 0xef, 0x5e, 0x66, | ||
339 | 0x97, 0x3f, 0x32, 0x3d, 0xd7, 0x3e, 0xb1, 0xf1, | ||
340 | 0x6c, 0x14, 0x4c, 0xfd, 0x37, 0xd3, 0x38, 0x80, | ||
341 | 0xfb, 0xde, 0xa6, 0x24, 0x1e, 0xc8, 0xca, 0x7f, | ||
342 | 0x3a, 0x93, 0xd8, 0x8b, 0x18, 0x13, 0xb2, 0xe5, | ||
343 | 0xe4, 0x93, 0x05, 0x53, 0x4f, 0x84, 0x66, 0xa7, | ||
344 | 0x58, 0x5c, 0x7b, 0x86, 0x52, 0x6d, 0x0d, 0xce, | ||
345 | 0xa4, 0x30, 0x7d, 0xb6, 0x18, 0x9f, 0xeb, 0xff, | ||
346 | 0x22, 0xbb, 0x72, 0x29, 0xb9, 0x44, 0x0b, 0x48, | ||
347 | 0x1e, 0x84, 0x71, 0x81, 0xe3, 0x6d, 0x73, 0x26, | ||
348 | 0x92, 0xb4, 0x4d, 0x2a, 0x29, 0xb8, 0x1f, 0x72, | ||
349 | 0xed, 0xd0, 0xe1, 0x64, 0x77, 0xea, 0x8e, 0x88, | ||
350 | 0x0f, 0xef, 0x3f, 0xb1, 0x3b, 0xad, 0xf9, 0xc9, | ||
351 | 0x8b, 0xd0, 0xac, 0xc6, 0xcc, 0xa9, 0x40, 0xcc, | ||
352 | 0x76, 0xf6, 0x3b, 0x53, 0xb5, 0x88, 0xcb, 0xc8, | ||
353 | 0x37, 0xf1, 0xa2, 0xba, 0x23, 0x15, 0x99, 0x09, | ||
354 | 0xcc, 0xe7, 0x7a, 0x3b, 0x37, 0xf7, 0x58, 0xc8, | ||
355 | 0x46, 0x8c, 0x2b, 0x2f, 0x4e, 0x0e, 0xa6, 0x5c, | ||
356 | 0xea, 0x85, 0x55, 0xba, 0x02, 0x0e, 0x0e, 0x48, | ||
357 | 0xbc, 0xe1, 0xb1, 0x01, 0x35, 0x79, 0x13, 0x3d, | ||
358 | 0x1b, 0xc0, 0x53, 0x68, 0x11, 0xe7, 0x95, 0x0f, | ||
359 | 0x9d, 0x3f, 0x4c, 0x47, 0x7b, 0x4d, 0x1c, 0xae, | ||
360 | 0x50, 0x9b, 0xcb, 0xdd, 0x05, 0x8d, 0x9a, 0x97, | ||
361 | 0xfd, 0x8c, 0xef, 0x0c, 0x1d, 0x67, 0x73, 0xa8, | ||
362 | 0x28, 0x36, 0xd5, 0xb6, 0x92, 0x33, 0x40, 0x75, | ||
363 | 0x0b, 0x51, 0xc3, 0x64, 0xba, 0x1d, 0xc2, 0xcc, | ||
364 | 0xee, 0x7d, 0x54, 0x0f, 0x27, 0x69, 0xa7, 0x27, | ||
365 | 0x63, 0x30, 0x29, 0xd9, 0xc8, 0x84, 0xd8, 0xdf, | ||
366 | 0x9f, 0x68, 0x8d, 0x04, 0xca, 0xa6, 0xc5, 0xc7, | ||
367 | 0x7a, 0x5c, 0xc8, 0xd1, 0xcb, 0x4a, 0xec, 0xd0, | ||
368 | 0xd8, 0x20, 0x69, 0xc5, 0x17, 0xcd, 0x78, 0xc8, | ||
369 | 0x75, 0x23, 0x30, 0x69, 0xc9, 0xd4, 0xea, 0x5c, | ||
370 | 0x4f, 0x6b, 0x86, 0x3f, 0x8b, 0xfe, 0xee, 0x44, | ||
371 | 0xc9, 0x7c, 0xb7, 0xdd, 0x3e, 0xe5, 0xec, 0x54, | ||
372 | 0x03, 0x3e, 0xaa, 0x82, 0xc6, 0xdf, 0xb2, 0x38, | ||
373 | 0x0e, 0x5d, 0xb3, 0x88, 0xd9, 0xd3, 0x69, 0x5f, | ||
374 | 0x8f, 0x70, 0x8a, 0x7e, 0x11, 0xd9, 0x1e, 0x7b, | ||
375 | 0x38, 0xf1, 0x42, 0x1a, 0xc0, 0x35, 0xf5, 0xc7, | ||
376 | 0x36, 0x85, 0xf5, 0xf7, 0xb8, 0x7e, 0xc7, 0xef, | ||
377 | 0x18, 0xf1, 0x63, 0xd6, 0x7a, 0xc6, 0xc9, 0x0e, | ||
378 | 0x4d, 0x69, 0x4f, 0x84, 0xef, 0x26, 0x41, 0x0c, | ||
379 | 0xec, 0xc7, 0xe0, 0x7e, 0x3c, 0x67, 0x01, 0x4c, | ||
380 | 0x62, 0x1a, 0x20, 0x6f, 0xee, 0x47, 0x4d, 0xc0, | ||
381 | 0x99, 0x13, 0x8d, 0x91, 0x4a, 0x26, 0xd4, 0x37, | ||
382 | 0x28, 0x90, 0x58, 0x75, 0x66, 0x2b, 0x0a, 0xdf, | ||
383 | 0xda, 0xee, 0x92, 0x25, 0x90, 0x62, 0x39, 0x9e, | ||
384 | 0x44, 0x98, 0xad, 0xc1, 0x88, 0xed, 0xe4, 0xb4, | ||
385 | 0xaf, 0xf5, 0x8c, 0x9b, 0x48, 0x4d, 0x56, 0x60, | ||
386 | 0x97, 0x0f, 0x61, 0x59, 0x9e, 0xa6, 0x27, 0xfe, | ||
387 | 0xc1, 0x91, 0x15, 0x38, 0xb8, 0x0f, 0xae, 0x61, | ||
388 | 0x7d, 0x26, 0x13, 0x5a, 0x73, 0xff, 0x1c, 0xa3, | ||
389 | 0x61, 0x04, 0x58, 0x48, 0x55, 0x44, 0x11, 0xfe, | ||
390 | 0x15, 0xca, 0xc3, 0xbd, 0xca, 0xc5, 0xb4, 0x40, | ||
391 | 0x5d, 0x1b, 0x7f, 0x39, 0xb5, 0x9c, 0x35, 0xec, | ||
392 | 0x61, 0x15, 0x32, 0x32, 0xb8, 0x4e, 0x40, 0x9f, | ||
393 | 0x17, 0x1f, 0x0a, 0x4d, 0xa9, 0x91, 0xef, 0xb7, | ||
394 | 0xb0, 0xeb, 0xc2, 0x83, 0x9a, 0x6c, 0xd2, 0x79, | ||
395 | 0x43, 0x78, 0x5e, 0x2f, 0xe5, 0xdd, 0x1a, 0x3c, | ||
396 | 0x45, 0xab, 0x29, 0x40, 0x3a, 0x37, 0x5b, 0x6f, | ||
397 | 0xd7, 0xfc, 0x48, 0x64, 0x3c, 0x49, 0xfb, 0x21, | ||
398 | 0xbe, 0xc3, 0xff, 0x07, 0xfb, 0x17, 0xe9, 0xc9, | ||
399 | 0x0c, 0x4c, 0x5c, 0x15, 0x9e, 0x8e, 0x22, 0x30, | ||
400 | 0x0a, 0xde, 0x48, 0x7f, 0xdb, 0x0d, 0xd1, 0x2b, | ||
401 | 0x87, 0x38, 0x9e, 0xcc, 0x5a, 0x01, 0x16, 0xee, | ||
402 | 0x75, 0x49, 0x0d, 0x30, 0x01, 0x34, 0x6a, 0xb6, | ||
403 | 0x9a, 0x5a, 0x2a, 0xec, 0xbb, 0x48, 0xac, 0xd3, | ||
404 | 0x77, 0x83, 0xd8, 0x08, 0x86, 0x4f, 0x48, 0x09, | ||
405 | 0x29, 0x41, 0x79, 0xa1, 0x03, 0x12, 0xc4, 0xcd, | ||
406 | 0x90, 0x55, 0x47, 0x66, 0x74, 0x9a, 0xcc, 0x4f, | ||
407 | 0x35, 0x8c, 0xd6, 0x98, 0xef, 0xeb, 0x45, 0xb9, | ||
408 | 0x9a, 0x26, 0x2f, 0x39, 0xa5, 0x70, 0x6d, 0xfc, | ||
409 | 0xb4, 0x51, 0xee, 0xf4, 0x9c, 0xe7, 0x38, 0x59, | ||
410 | 0xad, 0xf4, 0xbc, 0x46, 0xff, 0x46, 0x8e, 0x60, | ||
411 | 0x9c, 0xa3, 0x60, 0x1d, 0xf8, 0x26, 0x72, 0xf5, | ||
412 | 0x72, 0x9d, 0x68, 0x80, 0x04, 0xf6, 0x0b, 0xa1, | ||
413 | 0x0a, 0xd5, 0xa7, 0x82, 0x3a, 0x3e, 0x47, 0xa8, | ||
414 | 0x5a, 0xde, 0x59, 0x4f, 0x7b, 0x07, 0xb3, 0xe9, | ||
415 | 0x24, 0x19, 0x3d, 0x34, 0x05, 0xec, 0xf1, 0xab, | ||
416 | 0x6e, 0x64, 0x8f, 0xd3, 0xe6, 0x41, 0x86, 0x80, | ||
417 | 0x70, 0xe3, 0x8d, 0x60, 0x9c, 0x34, 0x25, 0x01, | ||
418 | 0x07, 0x4d, 0x19, 0x41, 0x4e, 0x3d, 0x5c, 0x7e, | ||
419 | 0xa8, 0xf5, 0xcc, 0xd5, 0x7b, 0xe2, 0x7d, 0x3d, | ||
420 | 0x49, 0x86, 0x7d, 0x07, 0xb7, 0x10, 0xe3, 0x35, | ||
421 | 0xb8, 0x84, 0x6d, 0x76, 0xab, 0x17, 0xc6, 0x38, | ||
422 | 0xb4, 0xd3, 0x28, 0x57, 0xad, 0xd3, 0x88, 0x5a, | ||
423 | 0xda, 0xea, 0xc8, 0x94, 0xcc, 0x37, 0x19, 0xac, | ||
424 | 0x9c, 0x9f, 0x4b, 0x00, 0x15, 0xc0, 0xc8, 0xca, | ||
425 | 0x1f, 0x15, 0xaa, 0xe0, 0xdb, 0xf9, 0x2f, 0x57, | ||
426 | 0x1b, 0x24, 0xc7, 0x6f, 0x76, 0x29, 0xfb, 0xed, | ||
427 | 0x25, 0x0d, 0xc0, 0xfe, 0xbd, 0x5a, 0xbf, 0x20, | ||
428 | 0x08, 0x51, 0x05, 0xec, 0x71, 0xa3, 0xbf, 0xef, | ||
429 | 0x5e, 0x99, 0x75, 0xdb, 0x3c, 0x5f, 0x9a, 0x8c, | ||
430 | 0xbb, 0x19, 0x5c, 0x0e, 0x93, 0x19, 0xf8, 0x6a, | ||
431 | 0xbc, 0xf2, 0x12, 0x54, 0x2f, 0xcb, 0x28, 0x64, | ||
432 | 0x88, 0xb3, 0x92, 0x0d, 0x96, 0xd1, 0xa6, 0xe4, | ||
433 | 0x1f, 0xf1, 0x4d, 0xa4, 0xab, 0x1c, 0xee, 0x54, | ||
434 | 0xf2, 0xad, 0x29, 0x6d, 0x32, 0x37, 0xb2, 0x16, | ||
435 | 0x77, 0x5c, 0xdc, 0x2e, 0x54, 0xec, 0x75, 0x26, | ||
436 | 0xc6, 0x36, 0xd9, 0x17, 0x2c, 0xf1, 0x7a, 0xdc, | ||
437 | 0x4b, 0xf1, 0xe2, 0xd9, 0x95, 0xba, 0xac, 0x87, | ||
438 | 0xc1, 0xf3, 0x8e, 0x58, 0x08, 0xd8, 0x87, 0x60, | ||
439 | 0xc9, 0xee, 0x6a, 0xde, 0xa4, 0xd2, 0xfc, 0x0d, | ||
440 | 0xe5, 0x36, 0xc4, 0x5c, 0x52, 0xb3, 0x07, 0x54, | ||
441 | 0x65, 0x24, 0xc1, 0xb1, 0xd1, 0xb1, 0x53, 0x13, | ||
442 | 0x31, 0x79, 0x7f, 0x05, 0x76, 0xeb, 0x37, 0x59, | ||
443 | 0x15, 0x2b, 0xd1, 0x3f, 0xac, 0x08, 0x97, 0xeb, | ||
444 | 0x91, 0x98, 0xdf, 0x6c, 0x09, 0x0d, 0x04, 0x9f, | ||
445 | 0xdc, 0x3b, 0x0e, 0x60, 0x68, 0x47, 0x23, 0x15, | ||
446 | 0x16, 0xc6, 0x0b, 0x35, 0xf8, 0x77, 0xa2, 0x78, | ||
447 | 0x50, 0xd4, 0x64, 0x22, 0x33, 0xff, 0xfb, 0x93, | ||
448 | 0x71, 0x46, 0x50, 0x39, 0x1b, 0x9c, 0xea, 0x4e, | ||
449 | 0x8d, 0x0c, 0x37, 0xe5, 0x5c, 0x51, 0x3a, 0x31, | ||
450 | 0xb2, 0x85, 0x84, 0x3f, 0x41, 0xee, 0xa2, 0xc1, | ||
451 | 0xc6, 0x13, 0x3b, 0x54, 0x28, 0xd2, 0x18, 0x37, | ||
452 | 0xcc, 0x46, 0x9f, 0x6a, 0x91, 0x3d, 0x5a, 0x15, | ||
453 | 0x3c, 0x89, 0xa3, 0x61, 0x06, 0x7d, 0x2e, 0x78, | ||
454 | 0xbe, 0x7d, 0x40, 0xba, 0x2f, 0x95, 0xb1, 0x2f, | ||
455 | 0x87, 0x3b, 0x8a, 0xbe, 0x6a, 0xf4, 0xc2, 0x31, | ||
456 | 0x74, 0xee, 0x91, 0xe0, 0x23, 0xaa, 0x5d, 0x7f, | ||
457 | 0xdd, 0xf0, 0x44, 0x8c, 0x0b, 0x59, 0x2b, 0xfc, | ||
458 | 0x48, 0x3a, 0xdf, 0x07, 0x05, 0x38, 0x6c, 0xc9, | ||
459 | 0xeb, 0x18, 0x24, 0x68, 0x8d, 0x58, 0x98, 0xd3, | ||
460 | 0x31, 0xa3, 0xe4, 0x70, 0x59, 0xb1, 0x21, 0xbe, | ||
461 | 0x7e, 0x65, 0x7d, 0xb8, 0x04, 0xab, 0xf6, 0xe4, | ||
462 | 0xd7, 0xda, 0xec, 0x09, 0x8f, 0xda, 0x6d, 0x24, | ||
463 | 0x07, 0xcc, 0x29, 0x17, 0x05, 0x78, 0x1a, 0xc1, | ||
464 | 0xb1, 0xce, 0xfc, 0xaa, 0x2d, 0xe7, 0xcc, 0x85, | ||
465 | 0x84, 0x84, 0x03, 0x2a, 0x0c, 0x3f, 0xa9, 0xf8, | ||
466 | 0xfd, 0x84, 0x53, 0x59, 0x5c, 0xf0, 0xd4, 0x09, | ||
467 | 0xf0, 0xd2, 0x6c, 0x32, 0x03, 0xb0, 0xa0, 0x8c, | ||
468 | 0x52, 0xeb, 0x23, 0x91, 0x88, 0x43, 0x13, 0x46, | ||
469 | 0xf6, 0x1e, 0xb4, 0x1b, 0xf5, 0x8e, 0x3a, 0xb5, | ||
470 | 0x3d, 0x00, 0xf6, 0xe5, 0x08, 0x3d, 0x5f, 0x39, | ||
471 | 0xd3, 0x21, 0x69, 0xbc, 0x03, 0x22, 0x3a, 0xd2, | ||
472 | 0x5c, 0x84, 0xf8, 0x15, 0xc4, 0x80, 0x0b, 0xbc, | ||
473 | 0x29, 0x3c, 0xf3, 0x95, 0x98, 0xcd, 0x8f, 0x35, | ||
474 | 0xbc, 0xa5, 0x3e, 0xfc, 0xd4, 0x13, 0x9e, 0xde, | ||
475 | 0x4f, 0xce, 0x71, 0x9d, 0x09, 0xad, 0xf2, 0x80, | ||
476 | 0x6b, 0x65, 0x7f, 0x03, 0x00, 0x14, 0x7c, 0x15, | ||
477 | 0x85, 0x40, 0x6d, 0x70, 0xea, 0xdc, 0xb3, 0x63, | ||
478 | 0x35, 0x4f, 0x4d, 0xe0, 0xd9, 0xd5, 0x3c, 0x58, | ||
479 | 0x56, 0x23, 0x80, 0xe2, 0x36, 0xdd, 0x75, 0x1d, | ||
480 | 0x94, 0x11, 0x41, 0x8e, 0xe0, 0x81, 0x8e, 0xcf, | ||
481 | 0xe0, 0xe5, 0xf6, 0xde, 0xd1, 0xe7, 0x04, 0x12, | ||
482 | 0x79, 0x92, 0x2b, 0x71, 0x2a, 0x79, 0x8b, 0x7c, | ||
483 | 0x44, 0x79, 0x16, 0x30, 0x4e, 0xf4, 0xf6, 0x9b, | ||
484 | 0xb7, 0x40, 0xa3, 0x5a, 0xa7, 0x69, 0x3e, 0xc1, | ||
485 | 0x3a, 0x04, 0xd0, 0x88, 0xa0, 0x3b, 0xdd, 0xc6, | ||
486 | 0x9e, 0x7e, 0x1e, 0x1e, 0x8f, 0x44, 0xf7, 0x73, | ||
487 | 0x67, 0x1e, 0x1a, 0x78, 0xfa, 0x62, 0xf4, 0xa9, | ||
488 | 0xa8, 0xc6, 0x5b, 0xb8, 0xfa, 0x06, 0x7d, 0x5e, | ||
489 | 0x38, 0x1c, 0x9a, 0x39, 0xe9, 0x39, 0x98, 0x22, | ||
490 | 0x0b, 0xa7, 0xac, 0x0b, 0xf3, 0xbc, 0xf1, 0xeb, | ||
491 | 0x8c, 0x81, 0xe3, 0x48, 0x8a, 0xed, 0x42, 0xc2, | ||
492 | 0x38, 0xcf, 0x3e, 0xda, 0xd2, 0x89, 0x8d, 0x9c, | ||
493 | 0x53, 0xb5, 0x2f, 0x41, 0x01, 0x26, 0x84, 0x9c, | ||
494 | 0xa3, 0x56, 0xf6, 0x49, 0xc7, 0xd4, 0x9f, 0x93, | ||
495 | 0x1b, 0x96, 0x49, 0x5e, 0xad, 0xb3, 0x84, 0x1f, | ||
496 | 0x3c, 0xa4, 0xe0, 0x9b, 0xd1, 0x90, 0xbc, 0x38, | ||
497 | 0x6c, 0xdd, 0x95, 0x4d, 0x9d, 0xb1, 0x71, 0x57, | ||
498 | 0x2d, 0x34, 0xe8, 0xb8, 0x42, 0xc7, 0x99, 0x03, | ||
499 | 0xc7, 0x07, 0x30, 0x65, 0x91, 0x55, 0xd5, 0x90, | ||
500 | 0x70, 0x97, 0x37, 0x68, 0xd4, 0x11, 0xf9, 0xe8, | ||
501 | 0xce, 0xec, 0xdc, 0x34, 0xd5, 0xd3, 0xb7, 0xc4, | ||
502 | 0xb8, 0x97, 0x05, 0x92, 0xad, 0xf8, 0xe2, 0x36, | ||
503 | 0x64, 0x41, 0xc9, 0xc5, 0x41, 0x77, 0x52, 0xd7, | ||
504 | 0x2c, 0xa5, 0x24, 0x2f, 0xd9, 0x34, 0x0b, 0x47, | ||
505 | 0x35, 0xa7, 0x28, 0x8b, 0xc5, 0xcd, 0xe9, 0x46, | ||
506 | 0xac, 0x39, 0x94, 0x3c, 0x10, 0xc6, 0x29, 0x73, | ||
507 | 0x0e, 0x0e, 0x5d, 0xe0, 0x71, 0x03, 0x8a, 0x72, | ||
508 | 0x0e, 0x26, 0xb0, 0x7d, 0x84, 0xed, 0x95, 0x23, | ||
509 | 0x49, 0x5a, 0x45, 0x83, 0x45, 0x60, 0x11, 0x4a, | ||
510 | 0x46, 0x31, 0xd4, 0xd8, 0x16, 0x54, 0x98, 0x58, | ||
511 | 0xed, 0x6d, 0xcc, 0x5d, 0xd6, 0x50, 0x61, 0x9f, | ||
512 | 0x9d, 0xc5, 0x3e, 0x9d, 0x32, 0x47, 0xde, 0x96, | ||
513 | 0xe1, 0x5d, 0xd8, 0xf8, 0xb4, 0x69, 0x6f, 0xb9, | ||
514 | 0x15, 0x90, 0x57, 0x7a, 0xf6, 0xad, 0xb0, 0x5b, | ||
515 | 0xf5, 0xa6, 0x36, 0x94, 0xfd, 0x84, 0xce, 0x1c, | ||
516 | 0x0f, 0x4b, 0xd0, 0xc2, 0x5b, 0x6b, 0x56, 0xef, | ||
517 | 0x73, 0x93, 0x0b, 0xc3, 0xee, 0xd9, 0xcf, 0xd3, | ||
518 | 0xa4, 0x22, 0x58, 0xcd, 0x50, 0x6e, 0x65, 0xf4, | ||
519 | 0xe9, 0xb7, 0x71, 0xaf, 0x4b, 0xb3, 0xb6, 0x2f, | ||
520 | 0x0f, 0x0e, 0x3b, 0xc9, 0x85, 0x14, 0xf5, 0x17, | ||
521 | 0xe8, 0x7a, 0x3a, 0xbf, 0x5f, 0x5e, 0xf8, 0x18, | ||
522 | 0x48, 0xa6, 0x72, 0xab, 0x06, 0x95, 0xe9, 0xc8, | ||
523 | 0xa7, 0xf4, 0x32, 0x44, 0x04, 0x0c, 0x84, 0x98, | ||
524 | 0x73, 0xe3, 0x89, 0x8d, 0x5f, 0x7e, 0x4a, 0x42, | ||
525 | 0x8f, 0xc5, 0x28, 0xb1, 0x82, 0xef, 0x1c, 0x97, | ||
526 | 0x31, 0x3b, 0x4d, 0xe0, 0x0e, 0x10, 0x10, 0x97, | ||
527 | 0x93, 0x49, 0x78, 0x2f, 0x0d, 0x86, 0x8b, 0xa1, | ||
528 | 0x53, 0xa9, 0x81, 0x20, 0x79, 0xe7, 0x07, 0x77, | ||
529 | 0xb6, 0xac, 0x5e, 0xd2, 0x05, 0xcd, 0xe9, 0xdb, | ||
530 | 0x8a, 0x94, 0x82, 0x8a, 0x23, 0xb9, 0x3d, 0x1c, | ||
531 | 0xa9, 0x7d, 0x72, 0x4a, 0xed, 0x33, 0xa3, 0xdb, | ||
532 | 0x21, 0xa7, 0x86, 0x33, 0x45, 0xa5, 0xaa, 0x56, | ||
533 | 0x45, 0xb5, 0x83, 0x29, 0x40, 0x47, 0x79, 0x04, | ||
534 | 0x6e, 0xb9, 0x95, 0xd0, 0x81, 0x77, 0x2d, 0x48, | ||
535 | 0x1e, 0xfe, 0xc3, 0xc2, 0x1e, 0xe5, 0xf2, 0xbe, | ||
536 | 0xfd, 0x3b, 0x94, 0x9f, 0xc4, 0xc4, 0x26, 0x9d, | ||
537 | 0xe4, 0x66, 0x1e, 0x19, 0xee, 0x6c, 0x79, 0x97, | ||
538 | 0x11, 0x31, 0x4b, 0x0d, 0x01, 0xcb, 0xde, 0xa8, | ||
539 | 0xf6, 0x6d, 0x7c, 0x39, 0x46, 0x4e, 0x7e, 0x3f, | ||
540 | 0x94, 0x17, 0xdf, 0xa1, 0x7d, 0xd9, 0x1c, 0x8e, | ||
541 | 0xbc, 0x7d, 0x33, 0x7d, 0xe3, 0x12, 0x40, 0xca, | ||
542 | 0xab, 0x37, 0x11, 0x46, 0xd4, 0xae, 0xef, 0x44, | ||
543 | 0xa2, 0xb3, 0x6a, 0x66, 0x0e, 0x0c, 0x90, 0x7f, | ||
544 | 0xdf, 0x5c, 0x66, 0x5f, 0xf2, 0x94, 0x9f, 0xa6, | ||
545 | 0x73, 0x4f, 0xeb, 0x0d, 0xad, 0xbf, 0xc0, 0x63, | ||
546 | 0x5c, 0xdc, 0x46, 0x51, 0xe8, 0x8e, 0x90, 0x19, | ||
547 | 0xa8, 0xa4, 0x3c, 0x91, 0x79, 0xfa, 0x7e, 0x58, | ||
548 | 0x85, 0x13, 0x55, 0xc5, 0x19, 0x82, 0x37, 0x1b, | ||
549 | 0x0a, 0x02, 0x1f, 0x99, 0x6b, 0x18, 0xf1, 0x28, | ||
550 | 0x08, 0xa2, 0x73, 0xb8, 0x0f, 0x2e, 0xcd, 0xbf, | ||
551 | 0xf3, 0x86, 0x7f, 0xea, 0xef, 0xd0, 0xbb, 0xa6, | ||
552 | 0x21, 0xdf, 0x49, 0x73, 0x51, 0xcc, 0x36, 0xd3, | ||
553 | 0x3e, 0xa0, 0xf8, 0x44, 0xdf, 0xd3, 0xa6, 0xbe, | ||
554 | 0x8a, 0xd4, 0x57, 0xdd, 0x72, 0x94, 0x61, 0x0f, | ||
555 | 0x82, 0xd1, 0x07, 0xb8, 0x7c, 0x18, 0x83, 0xdf, | ||
556 | 0x3a, 0xe5, 0x50, 0x6a, 0x82, 0x20, 0xac, 0xa9, | ||
557 | 0xa8, 0xff, 0xd9, 0xf3, 0x77, 0x33, 0x5a, 0x9e, | ||
558 | 0x7f, 0x6d, 0xfe, 0x5d, 0x33, 0x41, 0x42, 0xe7, | ||
559 | 0x6c, 0x19, 0xe0, 0x44, 0x8a, 0x15, 0xf6, 0x70, | ||
560 | 0x98, 0xb7, 0x68, 0x4d, 0xfa, 0x97, 0x39, 0xb0, | ||
561 | 0x8e, 0xe8, 0x84, 0x8b, 0x75, 0x30, 0xb7, 0x7d, | ||
562 | 0x92, 0x69, 0x20, 0x9c, 0x81, 0xfb, 0x4b, 0xf4, | ||
563 | 0x01, 0x50, 0xeb, 0xce, 0x0c, 0x1c, 0x6c, 0xb5, | ||
564 | 0x4a, 0xd7, 0x27, 0x0c, 0xce, 0xbb, 0xe5, 0x85, | ||
565 | 0xf0, 0xb6, 0xee, 0xd5, 0x70, 0xdd, 0x3b, 0xfc, | ||
566 | 0xd4, 0x99, 0xf1, 0x33, 0xdd, 0x8b, 0xc4, 0x2f, | ||
567 | 0xae, 0xab, 0x74, 0x96, 0x32, 0xc7, 0x4c, 0x56, | ||
568 | 0x3c, 0x89, 0x0f, 0x96, 0x0b, 0x42, 0xc0, 0xcb, | ||
569 | 0xee, 0x0f, 0x0b, 0x8c, 0xfb, 0x7e, 0x47, 0x7b, | ||
570 | 0x64, 0x48, 0xfd, 0xb2, 0x00, 0x80, 0x89, 0xa5, | ||
571 | 0x13, 0x55, 0x62, 0xfc, 0x8f, 0xe2, 0x42, 0x03, | ||
572 | 0xb7, 0x4e, 0x2a, 0x79, 0xb4, 0x82, 0xea, 0x23, | ||
573 | 0x49, 0xda, 0xaf, 0x52, 0x63, 0x1e, 0x60, 0x03, | ||
574 | 0x89, 0x06, 0x44, 0x46, 0x08, 0xc3, 0xc4, 0x87, | ||
575 | 0x70, 0x2e, 0xda, 0x94, 0xad, 0x6b, 0xe0, 0xe4, | ||
576 | 0xd1, 0x8a, 0x06, 0xc2, 0xa8, 0xc0, 0xa7, 0x43, | ||
577 | 0x3c, 0x47, 0x52, 0x0e, 0xc3, 0x77, 0x81, 0x11, | ||
578 | 0x67, 0x0e, 0xa0, 0x70, 0x04, 0x47, 0x29, 0x40, | ||
579 | 0x86, 0x0d, 0x34, 0x56, 0xa7, 0xc9, 0x35, 0x59, | ||
580 | 0x68, 0xdc, 0x93, 0x81, 0x70, 0xee, 0x86, 0xd9, | ||
581 | 0x80, 0x06, 0x40, 0x4f, 0x1a, 0x0d, 0x40, 0x30, | ||
582 | 0x0b, 0xcb, 0x96, 0x47, 0xc1, 0xb7, 0x52, 0xfd, | ||
583 | 0x56, 0xe0, 0x72, 0x4b, 0xfb, 0xbd, 0x92, 0x45, | ||
584 | 0x61, 0x71, 0xc2, 0x33, 0x11, 0xbf, 0x52, 0x83, | ||
585 | 0x79, 0x26, 0xe0, 0x49, 0x6b, 0xb7, 0x05, 0x8b, | ||
586 | 0xe8, 0x0e, 0x87, 0x31, 0xd7, 0x9d, 0x8a, 0xf5, | ||
587 | 0xc0, 0x5f, 0x2e, 0x58, 0x4a, 0xdb, 0x11, 0xb3, | ||
588 | 0x6c, 0x30, 0x2a, 0x46, 0x19, 0xe3, 0x27, 0x84, | ||
589 | 0x1f, 0x63, 0x6e, 0xf6, 0x57, 0xc7, 0xc9, 0xd8, | ||
590 | 0x5e, 0xba, 0xb3, 0x87, 0xd5, 0x83, 0x26, 0x34, | ||
591 | 0x21, 0x9e, 0x65, 0xde, 0x42, 0xd3, 0xbe, 0x7b, | ||
592 | 0xbc, 0x91, 0x71, 0x44, 0x4d, 0x99, 0x3b, 0x31, | ||
593 | 0xe5, 0x3f, 0x11, 0x4e, 0x7f, 0x13, 0x51, 0x3b, | ||
594 | 0xae, 0x79, 0xc9, 0xd3, 0x81, 0x8e, 0x25, 0x40, | ||
595 | 0x10, 0xfc, 0x07, 0x1e, 0xf9, 0x7b, 0x9a, 0x4b, | ||
596 | 0x6c, 0xe3, 0xb3, 0xad, 0x1a, 0x0a, 0xdd, 0x9e, | ||
597 | 0x59, 0x0c, 0xa2, 0xcd, 0xae, 0x48, 0x4a, 0x38, | ||
598 | 0x5b, 0x47, 0x41, 0x94, 0x65, 0x6b, 0xbb, 0xeb, | ||
599 | 0x5b, 0xe3, 0xaf, 0x07, 0x5b, 0xd4, 0x4a, 0xa2, | ||
600 | 0xc9, 0x5d, 0x2f, 0x64, 0x03, 0xd7, 0x3a, 0x2c, | ||
601 | 0x6e, 0xce, 0x76, 0x95, 0xb4, 0xb3, 0xc0, 0xf1, | ||
602 | 0xe2, 0x45, 0x73, 0x7a, 0x5c, 0xab, 0xc1, 0xfc, | ||
603 | 0x02, 0x8d, 0x81, 0x29, 0xb3, 0xac, 0x07, 0xec, | ||
604 | 0x40, 0x7d, 0x45, 0xd9, 0x7a, 0x59, 0xee, 0x34, | ||
605 | 0xf0, 0xe9, 0xd5, 0x7b, 0x96, 0xb1, 0x3d, 0x95, | ||
606 | 0xcc, 0x86, 0xb5, 0xb6, 0x04, 0x2d, 0xb5, 0x92, | ||
607 | 0x7e, 0x76, 0xf4, 0x06, 0xa9, 0xa3, 0x12, 0x0f, | ||
608 | 0xb1, 0xaf, 0x26, 0xba, 0x7c, 0xfc, 0x7e, 0x1c, | ||
609 | 0xbc, 0x2c, 0x49, 0x97, 0x53, 0x60, 0x13, 0x0b, | ||
610 | 0xa6, 0x61, 0x83, 0x89, 0x42, 0xd4, 0x17, 0x0c, | ||
611 | 0x6c, 0x26, 0x52, 0xc3, 0xb3, 0xd4, 0x67, 0xf5, | ||
612 | 0xe3, 0x04, 0xb7, 0xf4, 0xcb, 0x80, 0xb8, 0xcb, | ||
613 | 0x77, 0x56, 0x3e, 0xaa, 0x57, 0x54, 0xee, 0xb4, | ||
614 | 0x2c, 0x67, 0xcf, 0xf2, 0xdc, 0xbe, 0x55, 0xf9, | ||
615 | 0x43, 0x1f, 0x6e, 0x22, 0x97, 0x67, 0x7f, 0xc4, | ||
616 | 0xef, 0xb1, 0x26, 0x31, 0x1e, 0x27, 0xdf, 0x41, | ||
617 | 0x80, 0x47, 0x6c, 0xe2, 0xfa, 0xa9, 0x8c, 0x2a, | ||
618 | 0xf6, 0xf2, 0xab, 0xf0, 0x15, 0xda, 0x6c, 0xc8, | ||
619 | 0xfe, 0xb5, 0x23, 0xde, 0xa9, 0x05, 0x3f, 0x06, | ||
620 | 0x54, 0x4c, 0xcd, 0xe1, 0xab, 0xfc, 0x0e, 0x62, | ||
621 | 0x33, 0x31, 0x73, 0x2c, 0x76, 0xcb, 0xb4, 0x47, | ||
622 | 0x1e, 0x20, 0xad, 0xd8, 0xf2, 0x31, 0xdd, 0xc4, | ||
623 | 0x8b, 0x0c, 0x77, 0xbe, 0xe1, 0x8b, 0x26, 0x00, | ||
624 | 0x02, 0x58, 0xd6, 0x8d, 0xef, 0xad, 0x74, 0x67, | ||
625 | 0xab, 0x3f, 0xef, 0xcb, 0x6f, 0xb0, 0xcc, 0x81, | ||
626 | 0x44, 0x4c, 0xaf, 0xe9, 0x49, 0x4f, 0xdb, 0xa0, | ||
627 | 0x25, 0xa4, 0xf0, 0x89, 0xf1, 0xbe, 0xd8, 0x10, | ||
628 | 0xff, 0xb1, 0x3b, 0x4b, 0xfa, 0x98, 0xf5, 0x79, | ||
629 | 0x6d, 0x1e, 0x69, 0x4d, 0x57, 0xb1, 0xc8, 0x19, | ||
630 | 0x1b, 0xbd, 0x1e, 0x8c, 0x84, 0xb7, 0x7b, 0xe8, | ||
631 | 0xd2, 0x2d, 0x09, 0x41, 0x41, 0x37, 0x3d, 0xb1, | ||
632 | 0x6f, 0x26, 0x5d, 0x71, 0x16, 0x3d, 0xb7, 0x83, | ||
633 | 0x27, 0x2c, 0xa7, 0xb6, 0x50, 0xbd, 0x91, 0x86, | ||
634 | 0xab, 0x24, 0xa1, 0x38, 0xfd, 0xea, 0x71, 0x55, | ||
635 | 0x7e, 0x9a, 0x07, 0x77, 0x4b, 0xfa, 0x61, 0x66, | ||
636 | 0x20, 0x1e, 0x28, 0x95, 0x18, 0x1b, 0xa4, 0xa0, | ||
637 | 0xfd, 0xc0, 0x89, 0x72, 0x43, 0xd9, 0x3b, 0x49, | ||
638 | 0x5a, 0x3f, 0x9d, 0xbf, 0xdb, 0xb4, 0x46, 0xea, | ||
639 | 0x42, 0x01, 0x77, 0x23, 0x68, 0x95, 0xb6, 0x24, | ||
640 | 0xb3, 0xa8, 0x6c, 0x28, 0x3b, 0x11, 0x40, 0x7e, | ||
641 | 0x18, 0x65, 0x6d, 0xd8, 0x24, 0x42, 0x7d, 0x88, | ||
642 | 0xc0, 0x52, 0xd9, 0x05, 0xe4, 0x95, 0x90, 0x87, | ||
643 | 0x8c, 0xf4, 0xd0, 0x6b, 0xb9, 0x83, 0x99, 0x34, | ||
644 | 0x6d, 0xfe, 0x54, 0x40, 0x94, 0x52, 0x21, 0x4f, | ||
645 | 0x14, 0x25, 0xc5, 0xd6, 0x5e, 0x95, 0xdc, 0x0a, | ||
646 | 0x2b, 0x89, 0x20, 0x11, 0x84, 0x48, 0xd6, 0x3a, | ||
647 | 0xcd, 0x5c, 0x24, 0xad, 0x62, 0xe3, 0xb1, 0x93, | ||
648 | 0x25, 0x8d, 0xcd, 0x7e, 0xfc, 0x27, 0xa3, 0x37, | ||
649 | 0xfd, 0x84, 0xfc, 0x1b, 0xb2, 0xf1, 0x27, 0x38, | ||
650 | 0x5a, 0xb7, 0xfc, 0xf2, 0xfa, 0x95, 0x66, 0xd4, | ||
651 | 0xfb, 0xba, 0xa7, 0xd7, 0xa3, 0x72, 0x69, 0x48, | ||
652 | 0x48, 0x8c, 0xeb, 0x28, 0x89, 0xfe, 0x33, 0x65, | ||
653 | 0x5a, 0x36, 0x01, 0x7e, 0x06, 0x79, 0x0a, 0x09, | ||
654 | 0x3b, 0x74, 0x11, 0x9a, 0x6e, 0xbf, 0xd4, 0x9e, | ||
655 | 0x58, 0x90, 0x49, 0x4f, 0x4d, 0x08, 0xd4, 0xe5, | ||
656 | 0x4a, 0x09, 0x21, 0xef, 0x8b, 0xb8, 0x74, 0x3b, | ||
657 | 0x91, 0xdd, 0x36, 0x85, 0x60, 0x2d, 0xfa, 0xd4, | ||
658 | 0x45, 0x7b, 0x45, 0x53, 0xf5, 0x47, 0x87, 0x7e, | ||
659 | 0xa6, 0x37, 0xc8, 0x78, 0x7a, 0x68, 0x9d, 0x8d, | ||
660 | 0x65, 0x2c, 0x0e, 0x91, 0x5c, 0xa2, 0x60, 0xf0, | ||
661 | 0x8e, 0x3f, 0xe9, 0x1a, 0xcd, 0xaa, 0xe7, 0xd5, | ||
662 | 0x77, 0x18, 0xaf, 0xc9, 0xbc, 0x18, 0xea, 0x48, | ||
663 | 0x1b, 0xfb, 0x22, 0x48, 0x70, 0x16, 0x29, 0x9e, | ||
664 | 0x5b, 0xc1, 0x2c, 0x66, 0x23, 0xbc, 0xf0, 0x1f, | ||
665 | 0xef, 0xaf, 0xe4, 0xd6, 0x04, 0x19, 0x82, 0x7a, | ||
666 | 0x0b, 0xba, 0x4b, 0x46, 0xb1, 0x6a, 0x85, 0x5d, | ||
667 | 0xb4, 0x73, 0xd6, 0x21, 0xa1, 0x71, 0x60, 0x14, | ||
668 | 0xee, 0x0a, 0x77, 0xc4, 0x66, 0x2e, 0xf9, 0x69, | ||
669 | 0x30, 0xaf, 0x41, 0x0b, 0xc8, 0x83, 0x3c, 0x53, | ||
670 | 0x99, 0x19, 0x27, 0x46, 0xf7, 0x41, 0x6e, 0x56, | ||
671 | 0xdc, 0x94, 0x28, 0x67, 0x4e, 0xb7, 0x25, 0x48, | ||
672 | 0x8a, 0xc2, 0xe0, 0x60, 0x96, 0xcc, 0x18, 0xf4, | ||
673 | 0x84, 0xdd, 0xa7, 0x5e, 0x3e, 0x05, 0x0b, 0x26, | ||
674 | 0x26, 0xb2, 0x5c, 0x1f, 0x57, 0x1a, 0x04, 0x7e, | ||
675 | 0x6a, 0xe3, 0x2f, 0xb4, 0x35, 0xb6, 0x38, 0x40, | ||
676 | 0x40, 0xcd, 0x6f, 0x87, 0x2e, 0xef, 0xa3, 0xd7, | ||
677 | 0xa9, 0xc2, 0xe8, 0x0d, 0x27, 0xdf, 0x44, 0x62, | ||
678 | 0x99, 0xa0, 0xfc, 0xcf, 0x81, 0x78, 0xcb, 0xfe, | ||
679 | 0xe5, 0xa0, 0x03, 0x4e, 0x6c, 0xd7, 0xf4, 0xaf, | ||
680 | 0x7a, 0xbb, 0x61, 0x82, 0xfe, 0x71, 0x89, 0xb2, | ||
681 | 0x22, 0x7c, 0x8e, 0x83, 0x04, 0xce, 0xf6, 0x5d, | ||
682 | 0x84, 0x8f, 0x95, 0x6a, 0x7f, 0xad, 0xfd, 0x32, | ||
683 | 0x9c, 0x5e, 0xe4, 0x9c, 0x89, 0x60, 0x54, 0xaa, | ||
684 | 0x96, 0x72, 0xd2, 0xd7, 0x36, 0x85, 0xa9, 0x45, | ||
685 | 0xd2, 0x2a, 0xa1, 0x81, 0x49, 0x6f, 0x7e, 0x04, | ||
686 | 0xfa, 0xe2, 0xfe, 0x90, 0x26, 0x77, 0x5a, 0x33, | ||
687 | 0xb8, 0x04, 0x9a, 0x7a, 0xe6, 0x4c, 0x4f, 0xad, | ||
688 | 0x72, 0x96, 0x08, 0x28, 0x58, 0x13, 0xf8, 0xc4, | ||
689 | 0x1c, 0xf0, 0xc3, 0x45, 0x95, 0x49, 0x20, 0x8c, | ||
690 | 0x9f, 0x39, 0x70, 0xe1, 0x77, 0xfe, 0xd5, 0x4b, | ||
691 | 0xaf, 0x86, 0xda, 0xef, 0x22, 0x06, 0x83, 0x36, | ||
692 | 0x29, 0x12, 0x11, 0x40, 0xbc, 0x3b, 0x86, 0xaa, | ||
693 | 0xaa, 0x65, 0x60, 0xc3, 0x80, 0xca, 0xed, 0xa9, | ||
694 | 0xf3, 0xb0, 0x79, 0x96, 0xa2, 0x55, 0x27, 0x28, | ||
695 | 0x55, 0x73, 0x26, 0xa5, 0x50, 0xea, 0x92, 0x4b, | ||
696 | 0x3c, 0x5c, 0x82, 0x33, 0xf0, 0x01, 0x3f, 0x03, | ||
697 | 0xc1, 0x08, 0x05, 0xbf, 0x98, 0xf4, 0x9b, 0x6d, | ||
698 | 0xa5, 0xa8, 0xb4, 0x82, 0x0c, 0x06, 0xfa, 0xff, | ||
699 | 0x2d, 0x08, 0xf3, 0x05, 0x4f, 0x57, 0x2a, 0x39, | ||
700 | 0xd4, 0x83, 0x0d, 0x75, 0x51, 0xd8, 0x5b, 0x1b, | ||
701 | 0xd3, 0x51, 0x5a, 0x32, 0x2a, 0x9b, 0x32, 0xb2, | ||
702 | 0xf2, 0xa4, 0x96, 0x12, 0xf2, 0xae, 0x40, 0x34, | ||
703 | 0x67, 0xa8, 0xf5, 0x44, 0xd5, 0x35, 0x53, 0xfe, | ||
704 | 0xa3, 0x60, 0x96, 0x63, 0x0f, 0x1f, 0x6e, 0xb0, | ||
705 | 0x5a, 0x42, 0xa6, 0xfc, 0x51, 0x0b, 0x60, 0x27, | ||
706 | 0xbc, 0x06, 0x71, 0xed, 0x65, 0x5b, 0x23, 0x86, | ||
707 | 0x4a, 0x07, 0x3b, 0x22, 0x07, 0x46, 0xe6, 0x90, | ||
708 | 0x3e, 0xf3, 0x25, 0x50, 0x1b, 0x4c, 0x7f, 0x03, | ||
709 | 0x08, 0xa8, 0x36, 0x6b, 0x87, 0xe5, 0xe3, 0xdb, | ||
710 | 0x9a, 0x38, 0x83, 0xff, 0x9f, 0x1a, 0x9f, 0x57, | ||
711 | 0xa4, 0x2a, 0xf6, 0x37, 0xbc, 0x1a, 0xff, 0xc9, | ||
712 | 0x1e, 0x35, 0x0c, 0xc3, 0x7c, 0xa3, 0xb2, 0xe5, | ||
713 | 0xd2, 0xc6, 0xb4, 0x57, 0x47, 0xe4, 0x32, 0x16, | ||
714 | 0x6d, 0xa9, 0xae, 0x64, 0xe6, 0x2d, 0x8d, 0xc5, | ||
715 | 0x8d, 0x50, 0x8e, 0xe8, 0x1a, 0x22, 0x34, 0x2a, | ||
716 | 0xd9, 0xeb, 0x51, 0x90, 0x4a, 0xb1, 0x41, 0x7d, | ||
717 | 0x64, 0xf9, 0xb9, 0x0d, 0xf6, 0x23, 0x33, 0xb0, | ||
718 | 0x33, 0xf4, 0xf7, 0x3f, 0x27, 0x84, 0xc6, 0x0f, | ||
719 | 0x54, 0xa5, 0xc0, 0x2e, 0xec, 0x0b, 0x3a, 0x48, | ||
720 | 0x6e, 0x80, 0x35, 0x81, 0x43, 0x9b, 0x90, 0xb1, | ||
721 | 0xd0, 0x2b, 0xea, 0x21, 0xdc, 0xda, 0x5b, 0x09, | ||
722 | 0xf4, 0xcc, 0x10, 0xb4, 0xc7, 0xfe, 0x79, 0x51, | ||
723 | 0xc3, 0xc5, 0xac, 0x88, 0x74, 0x84, 0x0b, 0x4b, | ||
724 | 0xca, 0x79, 0x16, 0x29, 0xfb, 0x69, 0x54, 0xdf, | ||
725 | 0x41, 0x7e, 0xe9, 0xc7, 0x8e, 0xea, 0xa5, 0xfe, | ||
726 | 0xfc, 0x76, 0x0e, 0x90, 0xc4, 0x92, 0x38, 0xad, | ||
727 | 0x7b, 0x48, 0xe6, 0x6e, 0xf7, 0x21, 0xfd, 0x4e, | ||
728 | 0x93, 0x0a, 0x7b, 0x41, 0x83, 0x68, 0xfb, 0x57, | ||
729 | 0x51, 0x76, 0x34, 0xa9, 0x6c, 0x00, 0xaa, 0x4f, | ||
730 | 0x66, 0x65, 0x98, 0x4a, 0x4f, 0xa3, 0xa0, 0xef, | ||
731 | 0x69, 0x3f, 0xe3, 0x1c, 0x92, 0x8c, 0xfd, 0xd8, | ||
732 | 0xe8, 0xde, 0x7c, 0x7f, 0x3e, 0x84, 0x8e, 0x69, | ||
733 | 0x3c, 0xf1, 0xf2, 0x05, 0x46, 0xdc, 0x2f, 0x9d, | ||
734 | 0x5e, 0x6e, 0x4c, 0xfb, 0xb5, 0x99, 0x2a, 0x59, | ||
735 | 0x63, 0xc1, 0x34, 0xbc, 0x57, 0xc0, 0x0d, 0xb9, | ||
736 | 0x61, 0x25, 0xf3, 0x33, 0x23, 0x51, 0xb6, 0x0d, | ||
737 | 0x07, 0xa6, 0xab, 0x94, 0x4a, 0xb7, 0x2a, 0xea, | ||
738 | 0xee, 0xac, 0xa3, 0xc3, 0x04, 0x8b, 0x0e, 0x56, | ||
739 | 0xfe, 0x44, 0xa7, 0x39, 0xe2, 0xed, 0xed, 0xb4, | ||
740 | 0x22, 0x2b, 0xac, 0x12, 0x32, 0x28, 0x91, 0xd8, | ||
741 | 0xa5, 0xab, 0xff, 0x5f, 0xe0, 0x4b, 0xda, 0x78, | ||
742 | 0x17, 0xda, 0xf1, 0x01, 0x5b, 0xcd, 0xe2, 0x5f, | ||
743 | 0x50, 0x45, 0x73, 0x2b, 0xe4, 0x76, 0x77, 0xf4, | ||
744 | 0x64, 0x1d, 0x43, 0xfb, 0x84, 0x7a, 0xea, 0x91, | ||
745 | 0xae, 0xf9, 0x9e, 0xb7, 0xb4, 0xb0, 0x91, 0x5f, | ||
746 | 0x16, 0x35, 0x9a, 0x11, 0xb8, 0xc7, 0xc1, 0x8c, | ||
747 | 0xc6, 0x10, 0x8d, 0x2f, 0x63, 0x4a, 0xa7, 0x57, | ||
748 | 0x3a, 0x51, 0xd6, 0x32, 0x2d, 0x64, 0x72, 0xd4, | ||
749 | 0x66, 0xdc, 0x10, 0xa6, 0x67, 0xd6, 0x04, 0x23, | ||
750 | 0x9d, 0x0a, 0x11, 0x77, 0xdd, 0x37, 0x94, 0x17, | ||
751 | 0x3c, 0xbf, 0x8b, 0x65, 0xb0, 0x2e, 0x5e, 0x66, | ||
752 | 0x47, 0x64, 0xac, 0xdd, 0xf0, 0x84, 0xfd, 0x39, | ||
753 | 0xfa, 0x15, 0x5d, 0xef, 0xae, 0xca, 0xc1, 0x36, | ||
754 | 0xa7, 0x5c, 0xbf, 0xc7, 0x08, 0xc2, 0x66, 0x00, | ||
755 | 0x74, 0x74, 0x4e, 0x27, 0x3f, 0x55, 0x8a, 0xb7, | ||
756 | 0x38, 0x66, 0x83, 0x6d, 0xcf, 0x99, 0x9e, 0x60, | ||
757 | 0x8f, 0xdd, 0x2e, 0x62, 0x22, 0x0e, 0xef, 0x0c, | ||
758 | 0x98, 0xa7, 0x85, 0x74, 0x3b, 0x9d, 0xec, 0x9e, | ||
759 | 0xa9, 0x19, 0x72, 0xa5, 0x7f, 0x2c, 0x39, 0xb7, | ||
760 | 0x7d, 0xb7, 0xf1, 0x12, 0x65, 0x27, 0x4b, 0x5a, | ||
761 | 0xde, 0x17, 0xfe, 0xad, 0x44, 0xf3, 0x20, 0x4d, | ||
762 | 0xfd, 0xe4, 0x1f, 0xb5, 0x81, 0xb0, 0x36, 0x37, | ||
763 | 0x08, 0x6f, 0xc3, 0x0c, 0xe9, 0x85, 0x98, 0x82, | ||
764 | 0xa9, 0x62, 0x0c, 0xc4, 0x97, 0xc0, 0x50, 0xc8, | ||
765 | 0xa7, 0x3c, 0x50, 0x9f, 0x43, 0xb9, 0xcd, 0x5e, | ||
766 | 0x4d, 0xfa, 0x1c, 0x4b, 0x0b, 0xa9, 0x98, 0x85, | ||
767 | 0x38, 0x92, 0xac, 0x8d, 0xe4, 0xad, 0x9b, 0x98, | ||
768 | 0xab, 0xd9, 0x38, 0xac, 0x62, 0x52, 0xa3, 0x22, | ||
769 | 0x63, 0x0f, 0xbf, 0x95, 0x48, 0xdf, 0x69, 0xe7, | ||
770 | 0x8b, 0x33, 0xd5, 0xb2, 0xbd, 0x05, 0x49, 0x49, | ||
771 | 0x9d, 0x57, 0x73, 0x19, 0x33, 0xae, 0xfa, 0x33, | ||
772 | 0xf1, 0x19, 0xa8, 0x80, 0xce, 0x04, 0x9f, 0xbc, | ||
773 | 0x1d, 0x65, 0x82, 0x1b, 0xe5, 0x3a, 0x51, 0xc8, | ||
774 | 0x1c, 0x21, 0xe3, 0x5d, 0xf3, 0x7d, 0x9b, 0x2f, | ||
775 | 0x2c, 0x1d, 0x4a, 0x7f, 0x9b, 0x68, 0x35, 0xa3, | ||
776 | 0xb2, 0x50, 0xf7, 0x62, 0x79, 0xcd, 0xf4, 0x98, | ||
777 | 0x4f, 0xe5, 0x63, 0x7c, 0x3e, 0x45, 0x31, 0x8c, | ||
778 | 0x16, 0xa0, 0x12, 0xc8, 0x58, 0xce, 0x39, 0xa6, | ||
779 | 0xbc, 0x54, 0xdb, 0xc5, 0xe0, 0xd5, 0xba, 0xbc, | ||
780 | 0xb9, 0x04, 0xf4, 0x8d, 0xe8, 0x2f, 0x15, 0x9d, | ||
781 | }; | ||
342 | 782 | ||
343 | #if 0 /*Not used at present */ | 783 | /* 100 test cases */ |
344 | static void | 784 | static struct crc_test { |
345 | buf_dump(char const *prefix, unsigned char const *buf, size_t len) | 785 | u32 crc; /* random starting crc */ |
786 | u32 start; /* random 6 bit offset in buf */ | ||
787 | u32 length; /* random 11 bit length of test */ | ||
788 | u32 crc_le; /* expected crc32_le result */ | ||
789 | u32 crc_be; /* expected crc32_be result */ | ||
790 | u32 crc32c_le; /* expected crc32c_le result */ | ||
791 | } test[] = | ||
346 | { | 792 | { |
347 | fputs(prefix, stdout); | 793 | {0x674bf11d, 0x00000038, 0x00000542, 0x0af6d466, 0xd8b6e4c1, |
348 | while (len--) | 794 | 0xf6e93d6c}, |
349 | printf(" %02x", *buf++); | 795 | {0x35c672c6, 0x0000003a, 0x000001aa, 0xc6d3dfba, 0x28aaf3ad, |
350 | putchar('\n'); | 796 | 0x0fe92aca}, |
797 | {0x496da28e, 0x00000039, 0x000005af, 0xd933660f, 0x5d57e81f, | ||
798 | 0x52e1ebb8}, | ||
799 | {0x09a9b90e, 0x00000027, 0x000001f8, 0xb45fe007, 0xf45fca9a, | ||
800 | 0x0798af9a}, | ||
801 | {0xdc97e5a9, 0x00000025, 0x000003b6, 0xf81a3562, 0xe0126ba2, | ||
802 | 0x18eb3152}, | ||
803 | {0x47c58900, 0x0000000a, 0x000000b9, 0x8e58eccf, 0xf3afc793, | ||
804 | 0xd00d08c7}, | ||
805 | {0x292561e8, 0x0000000c, 0x00000403, 0xa2ba8aaf, 0x0b797aed, | ||
806 | 0x8ba966bc}, | ||
807 | {0x415037f6, 0x00000003, 0x00000676, 0xa17d52e8, 0x7f0fdf35, | ||
808 | 0x11d694a2}, | ||
809 | {0x3466e707, 0x00000026, 0x00000042, 0x258319be, 0x75c484a2, | ||
810 | 0x6ab3208d}, | ||
811 | {0xafd1281b, 0x00000023, 0x000002ee, 0x4428eaf8, 0x06c7ad10, | ||
812 | 0xba4603c5}, | ||
813 | {0xd3857b18, 0x00000028, 0x000004a2, 0x5c430821, 0xb062b7cb, | ||
814 | 0xe6071c6f}, | ||
815 | {0x1d825a8f, 0x0000002b, 0x0000050b, 0xd2c45f0c, 0xd68634e0, | ||
816 | 0x179ec30a}, | ||
817 | {0x5033e3bc, 0x0000000b, 0x00000078, 0xa3ea4113, 0xac6d31fb, | ||
818 | 0x0903beb8}, | ||
819 | {0x94f1fb5e, 0x0000000f, 0x000003a2, 0xfbfc50b1, 0x3cfe50ed, | ||
820 | 0x6a7cb4fa}, | ||
821 | {0xc9a0fe14, 0x00000009, 0x00000473, 0x5fb61894, 0x87070591, | ||
822 | 0xdb535801}, | ||
823 | {0x88a034b1, 0x0000001c, 0x000005ad, 0xc1b16053, 0x46f95c67, | ||
824 | 0x92bed597}, | ||
825 | {0xf0f72239, 0x00000020, 0x0000026d, 0xa6fa58f3, 0xf8c2c1dd, | ||
826 | 0x192a3f1b}, | ||
827 | {0xcc20a5e3, 0x0000003b, 0x0000067a, 0x7740185a, 0x308b979a, | ||
828 | 0xccbaec1a}, | ||
829 | {0xce589c95, 0x0000002b, 0x00000641, 0xd055e987, 0x40aae25b, | ||
830 | 0x7eabae4d}, | ||
831 | {0x78edc885, 0x00000035, 0x000005be, 0xa39cb14b, 0x035b0d1f, | ||
832 | 0x28c72982}, | ||
833 | {0x9d40a377, 0x0000003b, 0x00000038, 0x1f47ccd2, 0x197fbc9d, | ||
834 | 0xc3cd4d18}, | ||
835 | {0x703d0e01, 0x0000003c, 0x000006f1, 0x88735e7c, 0xfed57c5a, | ||
836 | 0xbca8f0e7}, | ||
837 | {0x776bf505, 0x0000000f, 0x000005b2, 0x5cc4fc01, 0xf32efb97, | ||
838 | 0x713f60b3}, | ||
839 | {0x4a3e7854, 0x00000027, 0x000004b8, 0x8d923c82, 0x0cbfb4a2, | ||
840 | 0xebd08fd5}, | ||
841 | {0x209172dd, 0x0000003b, 0x00000356, 0xb89e9c2b, 0xd7868138, | ||
842 | 0x64406c59}, | ||
843 | {0x3ba4cc5b, 0x0000002f, 0x00000203, 0xe51601a9, 0x5b2a1032, | ||
844 | 0x7421890e}, | ||
845 | {0xfc62f297, 0x00000000, 0x00000079, 0x71a8e1a2, 0x5d88685f, | ||
846 | 0xe9347603}, | ||
847 | {0x64280b8b, 0x00000016, 0x000007ab, 0x0fa7a30c, 0xda3a455f, | ||
848 | 0x1bef9060}, | ||
849 | {0x97dd724b, 0x00000033, 0x000007ad, 0x5788b2f4, 0xd7326d32, | ||
850 | 0x34720072}, | ||
851 | {0x61394b52, 0x00000035, 0x00000571, 0xc66525f1, 0xcabe7fef, | ||
852 | 0x48310f59}, | ||
853 | {0x29b4faff, 0x00000024, 0x0000006e, 0xca13751e, 0x993648e0, | ||
854 | 0x783a4213}, | ||
855 | {0x29bfb1dc, 0x0000000b, 0x00000244, 0x436c43f7, 0x429f7a59, | ||
856 | 0x9e8efd41}, | ||
857 | {0x86ae934b, 0x00000035, 0x00000104, 0x0760ec93, 0x9cf7d0f4, | ||
858 | 0xfc3d34a5}, | ||
859 | {0xc4c1024e, 0x0000002e, 0x000006b1, 0x6516a3ec, 0x19321f9c, | ||
860 | 0x17a52ae2}, | ||
861 | {0x3287a80a, 0x00000026, 0x00000496, 0x0b257eb1, 0x754ebd51, | ||
862 | 0x886d935a}, | ||
863 | {0xa4db423e, 0x00000023, 0x0000045d, 0x9b3a66dc, 0x873e9f11, | ||
864 | 0xeaaeaeb2}, | ||
865 | {0x7a1078df, 0x00000015, 0x0000014a, 0x8c2484c5, 0x6a628659, | ||
866 | 0x8e900a4b}, | ||
867 | {0x6048bd5b, 0x00000006, 0x0000006a, 0x897e3559, 0xac9961af, | ||
868 | 0xd74662b1}, | ||
869 | {0xd8f9ea20, 0x0000003d, 0x00000277, 0x60eb905b, 0xed2aaf99, | ||
870 | 0xd26752ba}, | ||
871 | {0xea5ec3b4, 0x0000002a, 0x000004fe, 0x869965dc, 0x6c1f833b, | ||
872 | 0x8b1fcd62}, | ||
873 | {0x2dfb005d, 0x00000016, 0x00000345, 0x6a3b117e, 0xf05e8521, | ||
874 | 0xf54342fe}, | ||
875 | {0x5a214ade, 0x00000020, 0x000005b6, 0x467f70be, 0xcb22ccd3, | ||
876 | 0x5b95b988}, | ||
877 | {0xf0ab9cca, 0x00000032, 0x00000515, 0xed223df3, 0x7f3ef01d, | ||
878 | 0x2e1176be}, | ||
879 | {0x91b444f9, 0x0000002e, 0x000007f8, 0x84e9a983, 0x5676756f, | ||
880 | 0x66120546}, | ||
881 | {0x1b5d2ddb, 0x0000002e, 0x0000012c, 0xba638c4c, 0x3f42047b, | ||
882 | 0xf256a5cc}, | ||
883 | {0xd824d1bb, 0x0000003a, 0x000007b5, 0x6288653b, 0x3a3ebea0, | ||
884 | 0x4af1dd69}, | ||
885 | {0x0470180c, 0x00000034, 0x000001f0, 0x9d5b80d6, 0x3de08195, | ||
886 | 0x56f0a04a}, | ||
887 | {0xffaa3a3f, 0x00000036, 0x00000299, 0xf3a82ab8, 0x53e0c13d, | ||
888 | 0x74f6b6b2}, | ||
889 | {0x6406cfeb, 0x00000023, 0x00000600, 0xa920b8e8, 0xe4e2acf4, | ||
890 | 0x085951fd}, | ||
891 | {0xb24aaa38, 0x0000003e, 0x000004a1, 0x657cc328, 0x5077b2c3, | ||
892 | 0xc65387eb}, | ||
893 | {0x58b2ab7c, 0x00000039, 0x000002b4, 0x3a17ee7e, 0x9dcb3643, | ||
894 | 0x1ca9257b}, | ||
895 | {0x3db85970, 0x00000006, 0x000002b6, 0x95268b59, 0xb9812c10, | ||
896 | 0xfd196d76}, | ||
897 | {0x857830c5, 0x00000003, 0x00000590, 0x4ef439d5, 0xf042161d, | ||
898 | 0x5ef88339}, | ||
899 | {0xe1fcd978, 0x0000003e, 0x000007d8, 0xae8d8699, 0xce0a1ef5, | ||
900 | 0x2c3714d9}, | ||
901 | {0xb982a768, 0x00000016, 0x000006e0, 0x62fad3df, 0x5f8a067b, | ||
902 | 0x58576548}, | ||
903 | {0x1d581ce8, 0x0000001e, 0x0000058b, 0xf0f5da53, 0x26e39eee, | ||
904 | 0xfd7c57de}, | ||
905 | {0x2456719b, 0x00000025, 0x00000503, 0x4296ac64, 0xd50e4c14, | ||
906 | 0xd5fedd59}, | ||
907 | {0xfae6d8f2, 0x00000000, 0x0000055d, 0x057fdf2e, 0x2a31391a, | ||
908 | 0x1cc3b17b}, | ||
909 | {0xcba828e3, 0x00000039, 0x000002ce, 0xe3f22351, 0x8f00877b, | ||
910 | 0x270eed73}, | ||
911 | {0x13d25952, 0x0000000a, 0x0000072d, 0x76d4b4cc, 0x5eb67ec3, | ||
912 | 0x91ecbb11}, | ||
913 | {0x0342be3f, 0x00000015, 0x00000599, 0xec75d9f1, 0x9d4d2826, | ||
914 | 0x05ed8d0c}, | ||
915 | {0xeaa344e0, 0x00000014, 0x000004d8, 0x72a4c981, 0x2064ea06, | ||
916 | 0x0b09ad5b}, | ||
917 | {0xbbb52021, 0x0000003b, 0x00000272, 0x04af99fc, 0xaf042d35, | ||
918 | 0xf8d511fb}, | ||
919 | {0xb66384dc, 0x0000001d, 0x000007fc, 0xd7629116, 0x782bd801, | ||
920 | 0x5ad832cc}, | ||
921 | {0x616c01b6, 0x00000022, 0x000002c8, 0x5b1dab30, 0x783ce7d2, | ||
922 | 0x1214d196}, | ||
923 | {0xce2bdaad, 0x00000016, 0x0000062a, 0x932535c8, 0x3f02926d, | ||
924 | 0x5747218a}, | ||
925 | {0x00fe84d7, 0x00000005, 0x00000205, 0x850e50aa, 0x753d649c, | ||
926 | 0xde8f14de}, | ||
927 | {0xbebdcb4c, 0x00000006, 0x0000055d, 0xbeaa37a2, 0x2d8c9eba, | ||
928 | 0x3563b7b9}, | ||
929 | {0xd8b1a02a, 0x00000010, 0x00000387, 0x5017d2fc, 0x503541a5, | ||
930 | 0x071475d0}, | ||
931 | {0x3b96cad2, 0x00000036, 0x00000347, 0x1d2372ae, 0x926cd90b, | ||
932 | 0x54c79d60}, | ||
933 | {0xc94c1ed7, 0x00000005, 0x0000038b, 0x9e9fdb22, 0x144a9178, | ||
934 | 0x4c53eee6}, | ||
935 | {0x1aad454e, 0x00000025, 0x000002b2, 0xc3f6315c, 0x5c7a35b3, | ||
936 | 0x10137a3c}, | ||
937 | {0xa4fec9a6, 0x00000000, 0x000006d6, 0x90be5080, 0xa4107605, | ||
938 | 0xaa9d6c73}, | ||
939 | {0x1bbe71e2, 0x0000001f, 0x000002fd, 0x4e504c3b, 0x284ccaf1, | ||
940 | 0xb63d23e7}, | ||
941 | {0x4201c7e4, 0x00000002, 0x000002b7, 0x7822e3f9, 0x0cc912a9, | ||
942 | 0x7f53e9cf}, | ||
943 | {0x23fddc96, 0x00000003, 0x00000627, 0x8a385125, 0x07767e78, | ||
944 | 0x13c1cd83}, | ||
945 | {0xd82ba25c, 0x00000016, 0x0000063e, 0x98e4148a, 0x283330c9, | ||
946 | 0x49ff5867}, | ||
947 | {0x786f2032, 0x0000002d, 0x0000060f, 0xf201600a, 0xf561bfcd, | ||
948 | 0x8467f211}, | ||
949 | {0xfebe4e1f, 0x0000002a, 0x000004f2, 0x95e51961, 0xfd80dcab, | ||
950 | 0x3f9683b2}, | ||
951 | {0x1a6e0a39, 0x00000008, 0x00000672, 0x8af6c2a5, 0x78dd84cb, | ||
952 | 0x76a3f874}, | ||
953 | {0x56000ab8, 0x0000000e, 0x000000e5, 0x36bacb8f, 0x22ee1f77, | ||
954 | 0x863b702f}, | ||
955 | {0x4717fe0c, 0x00000000, 0x000006ec, 0x8439f342, 0x5c8e03da, | ||
956 | 0xdc6c58ff}, | ||
957 | {0xd5d5d68e, 0x0000003c, 0x000003a3, 0x46fff083, 0x177d1b39, | ||
958 | 0x0622cc95}, | ||
959 | {0xc25dd6c6, 0x00000024, 0x000006c0, 0x5ceb8eb4, 0x892b0d16, | ||
960 | 0xe85605cd}, | ||
961 | {0xe9b11300, 0x00000023, 0x00000683, 0x07a5d59a, 0x6c6a3208, | ||
962 | 0x31da5f06}, | ||
963 | {0x95cd285e, 0x00000001, 0x00000047, 0x7b3a4368, 0x0202c07e, | ||
964 | 0xa1f2e784}, | ||
965 | {0xd9245a25, 0x0000001e, 0x000003a6, 0xd33c1841, 0x1936c0d5, | ||
966 | 0xb07cc616}, | ||
967 | {0x103279db, 0x00000006, 0x0000039b, 0xca09b8a0, 0x77d62892, | ||
968 | 0xbf943b6c}, | ||
969 | {0x1cba3172, 0x00000027, 0x000001c8, 0xcb377194, 0xebe682db, | ||
970 | 0x2c01af1c}, | ||
971 | {0x8f613739, 0x0000000c, 0x000001df, 0xb4b0bc87, 0x7710bd43, | ||
972 | 0x0fe5f56d}, | ||
973 | {0x1c6aa90d, 0x0000001b, 0x0000053c, 0x70559245, 0xda7894ac, | ||
974 | 0xf8943b2d}, | ||
975 | {0xaabe5b93, 0x0000003d, 0x00000715, 0xcdbf42fa, 0x0c3b99e7, | ||
976 | 0xe4d89272}, | ||
977 | {0xf15dd038, 0x00000006, 0x000006db, 0x6e104aea, 0x8d5967f2, | ||
978 | 0x7c2f6bbb}, | ||
979 | {0x584dd49c, 0x00000020, 0x000007bc, 0x36b6cfd6, 0xad4e23b2, | ||
980 | 0xabbf388b}, | ||
981 | {0x5d8c9506, 0x00000020, 0x00000470, 0x4c62378e, 0x31d92640, | ||
982 | 0x1dca1f4e}, | ||
983 | {0xb80d17b0, 0x00000032, 0x00000346, 0x22a5bb88, 0x9a7ec89f, | ||
984 | 0x5c170e23}, | ||
985 | {0xdaf0592e, 0x00000023, 0x000007b0, 0x3cab3f99, 0x9b1fdd99, | ||
986 | 0xc0e9d672}, | ||
987 | {0x4793cc85, 0x0000000d, 0x00000706, 0xe82e04f6, 0xed3db6b7, | ||
988 | 0xc18bdc86}, | ||
989 | {0x82ebf64e, 0x00000009, 0x000007c3, 0x69d590a9, 0x9efa8499, | ||
990 | 0xa874fcdd}, | ||
991 | {0xb18a0319, 0x00000026, 0x000007db, 0x1cf98dcc, 0x8fa9ad6a, | ||
992 | 0x9dc0bb48}, | ||
993 | }; | ||
351 | 994 | ||
352 | } | 995 | #include <linux/time.h> |
353 | #endif | ||
354 | 996 | ||
355 | static void bytereverse(unsigned char *buf, size_t len) | 997 | static int __init crc32c_test(void) |
356 | { | 998 | { |
357 | while (len--) { | 999 | int i; |
358 | unsigned char x = bitrev8(*buf); | 1000 | int errors = 0; |
359 | *buf++ = x; | 1001 | int bytes = 0; |
1002 | struct timespec start, stop; | ||
1003 | u64 nsec; | ||
1004 | unsigned long flags; | ||
1005 | |||
1006 | /* keep static to prevent cache warming code from | ||
1007 | * getting eliminated by the compiler */ | ||
1008 | static u32 crc; | ||
1009 | |||
1010 | /* pre-warm the cache */ | ||
1011 | for (i = 0; i < 100; i++) { | ||
1012 | bytes += 2*test[i].length; | ||
1013 | |||
1014 | crc ^= __crc32c_le(test[i].crc, test_buf + | ||
1015 | test[i].start, test[i].length); | ||
360 | } | 1016 | } |
361 | } | ||
362 | 1017 | ||
363 | static void random_garbage(unsigned char *buf, size_t len) | 1018 | /* reduce OS noise */ |
364 | { | 1019 | local_irq_save(flags); |
365 | while (len--) | 1020 | local_irq_disable(); |
366 | *buf++ = (unsigned char) random(); | ||
367 | } | ||
368 | 1021 | ||
369 | #if 0 /* Not used at present */ | 1022 | getnstimeofday(&start); |
370 | static void store_le(u32 x, unsigned char *buf) | 1023 | for (i = 0; i < 100; i++) { |
371 | { | 1024 | if (test[i].crc32c_le != __crc32c_le(test[i].crc, test_buf + |
372 | buf[0] = (unsigned char) x; | 1025 | test[i].start, test[i].length)) |
373 | buf[1] = (unsigned char) (x >> 8); | 1026 | errors++; |
374 | buf[2] = (unsigned char) (x >> 16); | 1027 | } |
375 | buf[3] = (unsigned char) (x >> 24); | 1028 | getnstimeofday(&stop); |
376 | } | ||
377 | #endif | ||
378 | 1029 | ||
379 | static void store_be(u32 x, unsigned char *buf) | 1030 | local_irq_restore(flags); |
380 | { | 1031 | local_irq_enable(); |
381 | buf[0] = (unsigned char) (x >> 24); | 1032 | |
382 | buf[1] = (unsigned char) (x >> 16); | 1033 | nsec = stop.tv_nsec - start.tv_nsec + |
383 | buf[2] = (unsigned char) (x >> 8); | 1034 | 1000000000 * (stop.tv_sec - start.tv_sec); |
384 | buf[3] = (unsigned char) x; | 1035 | |
1036 | pr_info("crc32c: CRC_LE_BITS = %d\n", CRC_LE_BITS); | ||
1037 | |||
1038 | if (errors) | ||
1039 | pr_warn("crc32c: %d self tests failed\n", errors); | ||
1040 | else { | ||
1041 | pr_info("crc32c: self tests passed, processed %d bytes in %lld nsec\n", | ||
1042 | bytes, nsec); | ||
1043 | } | ||
1044 | |||
1045 | return 0; | ||
385 | } | 1046 | } |
386 | 1047 | ||
387 | /* | 1048 | static int __init crc32_test(void) |
388 | * This checks that CRC(buf + CRC(buf)) = 0, and that | ||
389 | * CRC commutes with bit-reversal. This has the side effect | ||
390 | * of bytewise bit-reversing the input buffer, and returns | ||
391 | * the CRC of the reversed buffer. | ||
392 | */ | ||
393 | static u32 test_step(u32 init, unsigned char *buf, size_t len) | ||
394 | { | 1049 | { |
395 | u32 crc1, crc2; | 1050 | int i; |
396 | size_t i; | 1051 | int errors = 0; |
1052 | int bytes = 0; | ||
1053 | struct timespec start, stop; | ||
1054 | u64 nsec; | ||
1055 | unsigned long flags; | ||
1056 | |||
1057 | /* keep static to prevent cache warming code from | ||
1058 | * getting eliminated by the compiler */ | ||
1059 | static u32 crc; | ||
1060 | |||
1061 | /* pre-warm the cache */ | ||
1062 | for (i = 0; i < 100; i++) { | ||
1063 | bytes += 2*test[i].length; | ||
397 | 1064 | ||
398 | crc1 = crc32_be(init, buf, len); | 1065 | crc ^= crc32_le(test[i].crc, test_buf + |
399 | store_be(crc1, buf + len); | 1066 | test[i].start, test[i].length); |
400 | crc2 = crc32_be(init, buf, len + 4); | 1067 | |
401 | if (crc2) | 1068 | crc ^= crc32_be(test[i].crc, test_buf + |
402 | printf("\nCRC cancellation fail: 0x%08x should be 0\n", | 1069 | test[i].start, test[i].length); |
403 | crc2); | ||
404 | |||
405 | for (i = 0; i <= len + 4; i++) { | ||
406 | crc2 = crc32_be(init, buf, i); | ||
407 | crc2 = crc32_be(crc2, buf + i, len + 4 - i); | ||
408 | if (crc2) | ||
409 | printf("\nCRC split fail: 0x%08x\n", crc2); | ||
410 | } | 1070 | } |
411 | 1071 | ||
412 | /* Now swap it around for the other test */ | 1072 | /* reduce OS noise */ |
413 | 1073 | local_irq_save(flags); | |
414 | bytereverse(buf, len + 4); | 1074 | local_irq_disable(); |
415 | init = bitrev32(init); | 1075 | |
416 | crc2 = bitrev32(crc1); | 1076 | getnstimeofday(&start); |
417 | if (crc1 != bitrev32(crc2)) | 1077 | for (i = 0; i < 100; i++) { |
418 | printf("\nBit reversal fail: 0x%08x -> 0x%08x -> 0x%08x\n", | 1078 | if (test[i].crc_le != crc32_le(test[i].crc, test_buf + |
419 | crc1, crc2, bitrev32(crc2)); | 1079 | test[i].start, test[i].length)) |
420 | crc1 = crc32_le(init, buf, len); | 1080 | errors++; |
421 | if (crc1 != crc2) | 1081 | |
422 | printf("\nCRC endianness fail: 0x%08x != 0x%08x\n", crc1, | 1082 | if (test[i].crc_be != crc32_be(test[i].crc, test_buf + |
423 | crc2); | 1083 | test[i].start, test[i].length)) |
424 | crc2 = crc32_le(init, buf, len + 4); | 1084 | errors++; |
425 | if (crc2) | ||
426 | printf("\nCRC cancellation fail: 0x%08x should be 0\n", | ||
427 | crc2); | ||
428 | |||
429 | for (i = 0; i <= len + 4; i++) { | ||
430 | crc2 = crc32_le(init, buf, i); | ||
431 | crc2 = crc32_le(crc2, buf + i, len + 4 - i); | ||
432 | if (crc2) | ||
433 | printf("\nCRC split fail: 0x%08x\n", crc2); | ||
434 | } | 1085 | } |
1086 | getnstimeofday(&stop); | ||
435 | 1087 | ||
436 | return crc1; | 1088 | local_irq_restore(flags); |
437 | } | 1089 | local_irq_enable(); |
438 | 1090 | ||
439 | #define SIZE 64 | 1091 | nsec = stop.tv_nsec - start.tv_nsec + |
440 | #define INIT1 0 | 1092 | 1000000000 * (stop.tv_sec - start.tv_sec); |
441 | #define INIT2 0 | ||
442 | 1093 | ||
443 | int main(void) | 1094 | pr_info("crc32: CRC_LE_BITS = %d, CRC_BE BITS = %d\n", |
444 | { | 1095 | CRC_LE_BITS, CRC_BE_BITS); |
445 | unsigned char buf1[SIZE + 4]; | 1096 | |
446 | unsigned char buf2[SIZE + 4]; | 1097 | if (errors) |
447 | unsigned char buf3[SIZE + 4]; | 1098 | pr_warn("crc32: %d self tests failed\n", errors); |
448 | int i, j; | 1099 | else { |
449 | u32 crc1, crc2, crc3; | 1100 | pr_info("crc32: self tests passed, processed %d bytes in %lld nsec\n", |
450 | 1101 | bytes, nsec); | |
451 | for (i = 0; i <= SIZE; i++) { | ||
452 | printf("\rTesting length %d...", i); | ||
453 | fflush(stdout); | ||
454 | random_garbage(buf1, i); | ||
455 | random_garbage(buf2, i); | ||
456 | for (j = 0; j < i; j++) | ||
457 | buf3[j] = buf1[j] ^ buf2[j]; | ||
458 | |||
459 | crc1 = test_step(INIT1, buf1, i); | ||
460 | crc2 = test_step(INIT2, buf2, i); | ||
461 | /* Now check that CRC(buf1 ^ buf2) = CRC(buf1) ^ CRC(buf2) */ | ||
462 | crc3 = test_step(INIT1 ^ INIT2, buf3, i); | ||
463 | if (crc3 != (crc1 ^ crc2)) | ||
464 | printf("CRC XOR fail: 0x%08x != 0x%08x ^ 0x%08x\n", | ||
465 | crc3, crc1, crc2); | ||
466 | } | 1102 | } |
467 | printf("\nAll test complete. No failures expected.\n"); | 1103 | |
468 | return 0; | 1104 | return 0; |
469 | } | 1105 | } |
470 | 1106 | ||
471 | #endif /* UNITTEST */ | 1107 | static int __init crc32test_init(void) |
1108 | { | ||
1109 | crc32_test(); | ||
1110 | crc32c_test(); | ||
1111 | return 0; | ||
1112 | } | ||
1113 | |||
1114 | static void __exit crc32_exit(void) | ||
1115 | { | ||
1116 | } | ||
1117 | |||
1118 | module_init(crc32test_init); | ||
1119 | module_exit(crc32_exit); | ||
1120 | #endif /* CONFIG_CRC32_SELFTEST */ | ||
diff --git a/lib/crc32defs.h b/lib/crc32defs.h index 9b6773d73749..64cba2c3c700 100644 --- a/lib/crc32defs.h +++ b/lib/crc32defs.h | |||
@@ -6,27 +6,67 @@ | |||
6 | #define CRCPOLY_LE 0xedb88320 | 6 | #define CRCPOLY_LE 0xedb88320 |
7 | #define CRCPOLY_BE 0x04c11db7 | 7 | #define CRCPOLY_BE 0x04c11db7 |
8 | 8 | ||
9 | /* How many bits at a time to use. Requires a table of 4<<CRC_xx_BITS bytes. */ | 9 | /* |
10 | /* For less performance-sensitive, use 4 */ | 10 | * This is the CRC32c polynomial, as outlined by Castagnoli. |
11 | #ifndef CRC_LE_BITS | 11 | * x^32+x^28+x^27+x^26+x^25+x^23+x^22+x^20+x^19+x^18+x^14+x^13+x^11+x^10+x^9+ |
12 | * x^8+x^6+x^0 | ||
13 | */ | ||
14 | #define CRC32C_POLY_LE 0x82F63B78 | ||
15 | |||
16 | /* Try to choose an implementation variant via Kconfig */ | ||
17 | #ifdef CONFIG_CRC32_SLICEBY8 | ||
18 | # define CRC_LE_BITS 64 | ||
19 | # define CRC_BE_BITS 64 | ||
20 | #endif | ||
21 | #ifdef CONFIG_CRC32_SLICEBY4 | ||
22 | # define CRC_LE_BITS 32 | ||
23 | # define CRC_BE_BITS 32 | ||
24 | #endif | ||
25 | #ifdef CONFIG_CRC32_SARWATE | ||
12 | # define CRC_LE_BITS 8 | 26 | # define CRC_LE_BITS 8 |
27 | # define CRC_BE_BITS 8 | ||
28 | #endif | ||
29 | #ifdef CONFIG_CRC32_BIT | ||
30 | # define CRC_LE_BITS 1 | ||
31 | # define CRC_BE_BITS 1 | ||
32 | #endif | ||
33 | |||
34 | /* | ||
35 | * How many bits at a time to use. Valid values are 1, 2, 4, 8, 32 and 64. | ||
36 | * For less performance-sensitive, use 4 or 8 to save table size. | ||
37 | * For larger systems choose same as CPU architecture as default. | ||
38 | * This works well on X86_64, SPARC64 systems. This may require some | ||
39 | * elaboration after experiments with other architectures. | ||
40 | */ | ||
41 | #ifndef CRC_LE_BITS | ||
42 | # ifdef CONFIG_64BIT | ||
43 | # define CRC_LE_BITS 64 | ||
44 | # else | ||
45 | # define CRC_LE_BITS 32 | ||
46 | # endif | ||
13 | #endif | 47 | #endif |
14 | #ifndef CRC_BE_BITS | 48 | #ifndef CRC_BE_BITS |
15 | # define CRC_BE_BITS 8 | 49 | # ifdef CONFIG_64BIT |
50 | # define CRC_BE_BITS 64 | ||
51 | # else | ||
52 | # define CRC_BE_BITS 32 | ||
53 | # endif | ||
16 | #endif | 54 | #endif |
17 | 55 | ||
18 | /* | 56 | /* |
19 | * Little-endian CRC computation. Used with serial bit streams sent | 57 | * Little-endian CRC computation. Used with serial bit streams sent |
20 | * lsbit-first. Be sure to use cpu_to_le32() to append the computed CRC. | 58 | * lsbit-first. Be sure to use cpu_to_le32() to append the computed CRC. |
21 | */ | 59 | */ |
22 | #if CRC_LE_BITS > 8 || CRC_LE_BITS < 1 || CRC_LE_BITS & CRC_LE_BITS-1 | 60 | #if CRC_LE_BITS > 64 || CRC_LE_BITS < 1 || CRC_LE_BITS == 16 || \ |
23 | # error CRC_LE_BITS must be a power of 2 between 1 and 8 | 61 | CRC_LE_BITS & CRC_LE_BITS-1 |
62 | # error "CRC_LE_BITS must be one of {1, 2, 4, 8, 32, 64}" | ||
24 | #endif | 63 | #endif |
25 | 64 | ||
26 | /* | 65 | /* |
27 | * Big-endian CRC computation. Used with serial bit streams sent | 66 | * Big-endian CRC computation. Used with serial bit streams sent |
28 | * msbit-first. Be sure to use cpu_to_be32() to append the computed CRC. | 67 | * msbit-first. Be sure to use cpu_to_be32() to append the computed CRC. |
29 | */ | 68 | */ |
30 | #if CRC_BE_BITS > 8 || CRC_BE_BITS < 1 || CRC_BE_BITS & CRC_BE_BITS-1 | 69 | #if CRC_BE_BITS > 64 || CRC_BE_BITS < 1 || CRC_BE_BITS == 16 || \ |
31 | # error CRC_BE_BITS must be a power of 2 between 1 and 8 | 70 | CRC_BE_BITS & CRC_BE_BITS-1 |
71 | # error "CRC_BE_BITS must be one of {1, 2, 4, 8, 32, 64}" | ||
32 | #endif | 72 | #endif |
diff --git a/lib/ctype.c b/lib/ctype.c index 26baa620e95b..c646df91a2f7 100644 --- a/lib/ctype.c +++ b/lib/ctype.c | |||
@@ -5,7 +5,8 @@ | |||
5 | */ | 5 | */ |
6 | 6 | ||
7 | #include <linux/ctype.h> | 7 | #include <linux/ctype.h> |
8 | #include <linux/module.h> | 8 | #include <linux/compiler.h> |
9 | #include <linux/export.h> | ||
9 | 10 | ||
10 | const unsigned char _ctype[] = { | 11 | const unsigned char _ctype[] = { |
11 | _C,_C,_C,_C,_C,_C,_C,_C, /* 0-7 */ | 12 | _C,_C,_C,_C,_C,_C,_C,_C, /* 0-7 */ |
diff --git a/lib/debug_locks.c b/lib/debug_locks.c index b1c177307677..f2fa60c59343 100644 --- a/lib/debug_locks.c +++ b/lib/debug_locks.c | |||
@@ -10,7 +10,7 @@ | |||
10 | */ | 10 | */ |
11 | #include <linux/rwsem.h> | 11 | #include <linux/rwsem.h> |
12 | #include <linux/mutex.h> | 12 | #include <linux/mutex.h> |
13 | #include <linux/module.h> | 13 | #include <linux/export.h> |
14 | #include <linux/spinlock.h> | 14 | #include <linux/spinlock.h> |
15 | #include <linux/debug_locks.h> | 15 | #include <linux/debug_locks.h> |
16 | 16 | ||
diff --git a/lib/debugobjects.c b/lib/debugobjects.c index a78b7c6e042c..d11808ca4bc4 100644 --- a/lib/debugobjects.c +++ b/lib/debugobjects.c | |||
@@ -79,30 +79,29 @@ static const char *obj_states[ODEBUG_STATE_MAX] = { | |||
79 | [ODEBUG_STATE_NOTAVAILABLE] = "not available", | 79 | [ODEBUG_STATE_NOTAVAILABLE] = "not available", |
80 | }; | 80 | }; |
81 | 81 | ||
82 | static int fill_pool(void) | 82 | static void fill_pool(void) |
83 | { | 83 | { |
84 | gfp_t gfp = GFP_ATOMIC | __GFP_NORETRY | __GFP_NOWARN; | 84 | gfp_t gfp = GFP_ATOMIC | __GFP_NORETRY | __GFP_NOWARN; |
85 | struct debug_obj *new; | 85 | struct debug_obj *new; |
86 | unsigned long flags; | 86 | unsigned long flags; |
87 | 87 | ||
88 | if (likely(obj_pool_free >= ODEBUG_POOL_MIN_LEVEL)) | 88 | if (likely(obj_pool_free >= ODEBUG_POOL_MIN_LEVEL)) |
89 | return obj_pool_free; | 89 | return; |
90 | 90 | ||
91 | if (unlikely(!obj_cache)) | 91 | if (unlikely(!obj_cache)) |
92 | return obj_pool_free; | 92 | return; |
93 | 93 | ||
94 | while (obj_pool_free < ODEBUG_POOL_MIN_LEVEL) { | 94 | while (obj_pool_free < ODEBUG_POOL_MIN_LEVEL) { |
95 | 95 | ||
96 | new = kmem_cache_zalloc(obj_cache, gfp); | 96 | new = kmem_cache_zalloc(obj_cache, gfp); |
97 | if (!new) | 97 | if (!new) |
98 | return obj_pool_free; | 98 | return; |
99 | 99 | ||
100 | raw_spin_lock_irqsave(&pool_lock, flags); | 100 | raw_spin_lock_irqsave(&pool_lock, flags); |
101 | hlist_add_head(&new->node, &obj_pool); | 101 | hlist_add_head(&new->node, &obj_pool); |
102 | obj_pool_free++; | 102 | obj_pool_free++; |
103 | raw_spin_unlock_irqrestore(&pool_lock, flags); | 103 | raw_spin_unlock_irqrestore(&pool_lock, flags); |
104 | } | 104 | } |
105 | return obj_pool_free; | ||
106 | } | 105 | } |
107 | 106 | ||
108 | /* | 107 | /* |
@@ -268,12 +267,16 @@ static void debug_print_object(struct debug_obj *obj, char *msg) | |||
268 | * Try to repair the damage, so we have a better chance to get useful | 267 | * Try to repair the damage, so we have a better chance to get useful |
269 | * debug output. | 268 | * debug output. |
270 | */ | 269 | */ |
271 | static void | 270 | static int |
272 | debug_object_fixup(int (*fixup)(void *addr, enum debug_obj_state state), | 271 | debug_object_fixup(int (*fixup)(void *addr, enum debug_obj_state state), |
273 | void * addr, enum debug_obj_state state) | 272 | void * addr, enum debug_obj_state state) |
274 | { | 273 | { |
274 | int fixed = 0; | ||
275 | |||
275 | if (fixup) | 276 | if (fixup) |
276 | debug_objects_fixups += fixup(addr, state); | 277 | fixed = fixup(addr, state); |
278 | debug_objects_fixups += fixed; | ||
279 | return fixed; | ||
277 | } | 280 | } |
278 | 281 | ||
279 | static void debug_object_is_on_stack(void *addr, int onstack) | 282 | static void debug_object_is_on_stack(void *addr, int onstack) |
@@ -386,6 +389,9 @@ void debug_object_activate(void *addr, struct debug_obj_descr *descr) | |||
386 | struct debug_bucket *db; | 389 | struct debug_bucket *db; |
387 | struct debug_obj *obj; | 390 | struct debug_obj *obj; |
388 | unsigned long flags; | 391 | unsigned long flags; |
392 | struct debug_obj o = { .object = addr, | ||
393 | .state = ODEBUG_STATE_NOTAVAILABLE, | ||
394 | .descr = descr }; | ||
389 | 395 | ||
390 | if (!debug_objects_enabled) | 396 | if (!debug_objects_enabled) |
391 | return; | 397 | return; |
@@ -425,8 +431,9 @@ void debug_object_activate(void *addr, struct debug_obj_descr *descr) | |||
425 | * let the type specific code decide whether this is | 431 | * let the type specific code decide whether this is |
426 | * true or not. | 432 | * true or not. |
427 | */ | 433 | */ |
428 | debug_object_fixup(descr->fixup_activate, addr, | 434 | if (debug_object_fixup(descr->fixup_activate, addr, |
429 | ODEBUG_STATE_NOTAVAILABLE); | 435 | ODEBUG_STATE_NOTAVAILABLE)) |
436 | debug_print_object(&o, "activate"); | ||
430 | } | 437 | } |
431 | 438 | ||
432 | /** | 439 | /** |
@@ -563,6 +570,44 @@ out_unlock: | |||
563 | } | 570 | } |
564 | 571 | ||
565 | /** | 572 | /** |
573 | * debug_object_assert_init - debug checks when object should be init-ed | ||
574 | * @addr: address of the object | ||
575 | * @descr: pointer to an object specific debug description structure | ||
576 | */ | ||
577 | void debug_object_assert_init(void *addr, struct debug_obj_descr *descr) | ||
578 | { | ||
579 | struct debug_bucket *db; | ||
580 | struct debug_obj *obj; | ||
581 | unsigned long flags; | ||
582 | |||
583 | if (!debug_objects_enabled) | ||
584 | return; | ||
585 | |||
586 | db = get_bucket((unsigned long) addr); | ||
587 | |||
588 | raw_spin_lock_irqsave(&db->lock, flags); | ||
589 | |||
590 | obj = lookup_object(addr, db); | ||
591 | if (!obj) { | ||
592 | struct debug_obj o = { .object = addr, | ||
593 | .state = ODEBUG_STATE_NOTAVAILABLE, | ||
594 | .descr = descr }; | ||
595 | |||
596 | raw_spin_unlock_irqrestore(&db->lock, flags); | ||
597 | /* | ||
598 | * Maybe the object is static. Let the type specific | ||
599 | * code decide what to do. | ||
600 | */ | ||
601 | if (debug_object_fixup(descr->fixup_assert_init, addr, | ||
602 | ODEBUG_STATE_NOTAVAILABLE)) | ||
603 | debug_print_object(&o, "assert_init"); | ||
604 | return; | ||
605 | } | ||
606 | |||
607 | raw_spin_unlock_irqrestore(&db->lock, flags); | ||
608 | } | ||
609 | |||
610 | /** | ||
566 | * debug_object_active_state - debug checks object usage state machine | 611 | * debug_object_active_state - debug checks object usage state machine |
567 | * @addr: address of the object | 612 | * @addr: address of the object |
568 | * @descr: pointer to an object specific debug description structure | 613 | * @descr: pointer to an object specific debug description structure |
@@ -772,17 +817,9 @@ static int __init fixup_activate(void *addr, enum debug_obj_state state) | |||
772 | if (obj->static_init == 1) { | 817 | if (obj->static_init == 1) { |
773 | debug_object_init(obj, &descr_type_test); | 818 | debug_object_init(obj, &descr_type_test); |
774 | debug_object_activate(obj, &descr_type_test); | 819 | debug_object_activate(obj, &descr_type_test); |
775 | /* | 820 | return 0; |
776 | * Real code should return 0 here ! This is | ||
777 | * not a fixup of some bad behaviour. We | ||
778 | * merily call the debug_init function to keep | ||
779 | * track of the object. | ||
780 | */ | ||
781 | return 1; | ||
782 | } else { | ||
783 | /* Real code needs to emit a warning here */ | ||
784 | } | 821 | } |
785 | return 0; | 822 | return 1; |
786 | 823 | ||
787 | case ODEBUG_STATE_ACTIVE: | 824 | case ODEBUG_STATE_ACTIVE: |
788 | debug_object_deactivate(obj, &descr_type_test); | 825 | debug_object_deactivate(obj, &descr_type_test); |
@@ -921,7 +958,7 @@ static void __init debug_objects_selftest(void) | |||
921 | 958 | ||
922 | obj.static_init = 1; | 959 | obj.static_init = 1; |
923 | debug_object_activate(&obj, &descr_type_test); | 960 | debug_object_activate(&obj, &descr_type_test); |
924 | if (check_results(&obj, ODEBUG_STATE_ACTIVE, ++fixups, warnings)) | 961 | if (check_results(&obj, ODEBUG_STATE_ACTIVE, fixups, warnings)) |
925 | goto out; | 962 | goto out; |
926 | debug_object_init(&obj, &descr_type_test); | 963 | debug_object_init(&obj, &descr_type_test); |
927 | if (check_results(&obj, ODEBUG_STATE_INIT, ++fixups, ++warnings)) | 964 | if (check_results(&obj, ODEBUG_STATE_INIT, ++fixups, ++warnings)) |
@@ -1014,10 +1051,10 @@ static int __init debug_objects_replace_static_objects(void) | |||
1014 | cnt++; | 1051 | cnt++; |
1015 | } | 1052 | } |
1016 | } | 1053 | } |
1054 | local_irq_enable(); | ||
1017 | 1055 | ||
1018 | printk(KERN_DEBUG "ODEBUG: %d of %d active objects replaced\n", cnt, | 1056 | printk(KERN_DEBUG "ODEBUG: %d of %d active objects replaced\n", cnt, |
1019 | obj_pool_used); | 1057 | obj_pool_used); |
1020 | local_irq_enable(); | ||
1021 | return 0; | 1058 | return 0; |
1022 | free: | 1059 | free: |
1023 | hlist_for_each_entry_safe(obj, node, tmp, &objects, node) { | 1060 | hlist_for_each_entry_safe(obj, node, tmp, &objects, node) { |
diff --git a/lib/dec_and_lock.c b/lib/dec_and_lock.c index b5257725daad..e26278576b31 100644 --- a/lib/dec_and_lock.c +++ b/lib/dec_and_lock.c | |||
@@ -1,4 +1,4 @@ | |||
1 | #include <linux/module.h> | 1 | #include <linux/export.h> |
2 | #include <linux/spinlock.h> | 2 | #include <linux/spinlock.h> |
3 | #include <linux/atomic.h> | 3 | #include <linux/atomic.h> |
4 | 4 | ||
diff --git a/lib/decompress.c b/lib/decompress.c index 3d766b7f60ab..31a804277282 100644 --- a/lib/decompress.c +++ b/lib/decompress.c | |||
@@ -14,6 +14,7 @@ | |||
14 | 14 | ||
15 | #include <linux/types.h> | 15 | #include <linux/types.h> |
16 | #include <linux/string.h> | 16 | #include <linux/string.h> |
17 | #include <linux/init.h> | ||
17 | 18 | ||
18 | #ifndef CONFIG_DECOMPRESS_GZIP | 19 | #ifndef CONFIG_DECOMPRESS_GZIP |
19 | # define gunzip NULL | 20 | # define gunzip NULL |
@@ -31,11 +32,13 @@ | |||
31 | # define unlzo NULL | 32 | # define unlzo NULL |
32 | #endif | 33 | #endif |
33 | 34 | ||
34 | static const struct compress_format { | 35 | struct compress_format { |
35 | unsigned char magic[2]; | 36 | unsigned char magic[2]; |
36 | const char *name; | 37 | const char *name; |
37 | decompress_fn decompressor; | 38 | decompress_fn decompressor; |
38 | } compressed_formats[] = { | 39 | }; |
40 | |||
41 | static const struct compress_format compressed_formats[] __initdata = { | ||
39 | { {037, 0213}, "gzip", gunzip }, | 42 | { {037, 0213}, "gzip", gunzip }, |
40 | { {037, 0236}, "gzip", gunzip }, | 43 | { {037, 0236}, "gzip", gunzip }, |
41 | { {0x42, 0x5a}, "bzip2", bunzip2 }, | 44 | { {0x42, 0x5a}, "bzip2", bunzip2 }, |
@@ -45,7 +48,7 @@ static const struct compress_format { | |||
45 | { {0, 0}, NULL, NULL } | 48 | { {0, 0}, NULL, NULL } |
46 | }; | 49 | }; |
47 | 50 | ||
48 | decompress_fn decompress_method(const unsigned char *inbuf, int len, | 51 | decompress_fn __init decompress_method(const unsigned char *inbuf, int len, |
49 | const char **name) | 52 | const char **name) |
50 | { | 53 | { |
51 | const struct compress_format *cf; | 54 | const struct compress_format *cf; |
diff --git a/lib/decompress_bunzip2.c b/lib/decompress_bunzip2.c index a7b80c1d6a0d..31c5f7675fbf 100644 --- a/lib/decompress_bunzip2.c +++ b/lib/decompress_bunzip2.c | |||
@@ -1,4 +1,3 @@ | |||
1 | /* vi: set sw = 4 ts = 4: */ | ||
2 | /* Small bzip2 deflate implementation, by Rob Landley (rob@landley.net). | 1 | /* Small bzip2 deflate implementation, by Rob Landley (rob@landley.net). |
3 | 2 | ||
4 | Based on bzip2 decompression code by Julian R Seward (jseward@acm.org), | 3 | Based on bzip2 decompression code by Julian R Seward (jseward@acm.org), |
@@ -691,7 +690,7 @@ STATIC int INIT bunzip2(unsigned char *buf, int len, | |||
691 | outbuf = malloc(BZIP2_IOBUF_SIZE); | 690 | outbuf = malloc(BZIP2_IOBUF_SIZE); |
692 | 691 | ||
693 | if (!outbuf) { | 692 | if (!outbuf) { |
694 | error("Could not allocate output bufer"); | 693 | error("Could not allocate output buffer"); |
695 | return RETVAL_OUT_OF_MEMORY; | 694 | return RETVAL_OUT_OF_MEMORY; |
696 | } | 695 | } |
697 | if (buf) | 696 | if (buf) |
@@ -699,7 +698,7 @@ STATIC int INIT bunzip2(unsigned char *buf, int len, | |||
699 | else | 698 | else |
700 | inbuf = malloc(BZIP2_IOBUF_SIZE); | 699 | inbuf = malloc(BZIP2_IOBUF_SIZE); |
701 | if (!inbuf) { | 700 | if (!inbuf) { |
702 | error("Could not allocate input bufer"); | 701 | error("Could not allocate input buffer"); |
703 | i = RETVAL_OUT_OF_MEMORY; | 702 | i = RETVAL_OUT_OF_MEMORY; |
704 | goto exit_0; | 703 | goto exit_0; |
705 | } | 704 | } |
diff --git a/lib/decompress_unlzma.c b/lib/decompress_unlzma.c index 476c65af9709..32adb73a9038 100644 --- a/lib/decompress_unlzma.c +++ b/lib/decompress_unlzma.c | |||
@@ -562,7 +562,7 @@ STATIC inline int INIT unlzma(unsigned char *buf, int in_len, | |||
562 | else | 562 | else |
563 | inbuf = malloc(LZMA_IOBUF_SIZE); | 563 | inbuf = malloc(LZMA_IOBUF_SIZE); |
564 | if (!inbuf) { | 564 | if (!inbuf) { |
565 | error("Could not allocate input bufer"); | 565 | error("Could not allocate input buffer"); |
566 | goto exit_0; | 566 | goto exit_0; |
567 | } | 567 | } |
568 | 568 | ||
diff --git a/lib/decompress_unlzo.c b/lib/decompress_unlzo.c index 5a7a2adf4c4c..4531294fa62f 100644 --- a/lib/decompress_unlzo.c +++ b/lib/decompress_unlzo.c | |||
@@ -279,7 +279,7 @@ STATIC inline int INIT unlzo(u8 *input, int in_len, | |||
279 | ret = 0; | 279 | ret = 0; |
280 | exit_2: | 280 | exit_2: |
281 | if (!input) | 281 | if (!input) |
282 | free(in_buf); | 282 | free(in_buf_save); |
283 | exit_1: | 283 | exit_1: |
284 | if (!output) | 284 | if (!output) |
285 | free(out_buf); | 285 | free(out_buf); |
diff --git a/lib/devres.c b/lib/devres.c index 7c0e953a7486..80b9c76d436a 100644 --- a/lib/devres.c +++ b/lib/devres.c | |||
@@ -1,7 +1,7 @@ | |||
1 | #include <linux/pci.h> | 1 | #include <linux/pci.h> |
2 | #include <linux/io.h> | 2 | #include <linux/io.h> |
3 | #include <linux/gfp.h> | 3 | #include <linux/gfp.h> |
4 | #include <linux/module.h> | 4 | #include <linux/export.h> |
5 | 5 | ||
6 | void devm_ioremap_release(struct device *dev, void *res) | 6 | void devm_ioremap_release(struct device *dev, void *res) |
7 | { | 7 | { |
@@ -85,6 +85,57 @@ void devm_iounmap(struct device *dev, void __iomem *addr) | |||
85 | } | 85 | } |
86 | EXPORT_SYMBOL(devm_iounmap); | 86 | EXPORT_SYMBOL(devm_iounmap); |
87 | 87 | ||
88 | /** | ||
89 | * devm_request_and_ioremap() - Check, request region, and ioremap resource | ||
90 | * @dev: Generic device to handle the resource for | ||
91 | * @res: resource to be handled | ||
92 | * | ||
93 | * Takes all necessary steps to ioremap a mem resource. Uses managed device, so | ||
94 | * everything is undone on driver detach. Checks arguments, so you can feed | ||
95 | * it the result from e.g. platform_get_resource() directly. Returns the | ||
96 | * remapped pointer or NULL on error. Usage example: | ||
97 | * | ||
98 | * res = platform_get_resource(pdev, IORESOURCE_MEM, 0); | ||
99 | * base = devm_request_and_ioremap(&pdev->dev, res); | ||
100 | * if (!base) | ||
101 | * return -EADDRNOTAVAIL; | ||
102 | */ | ||
103 | void __iomem *devm_request_and_ioremap(struct device *dev, | ||
104 | struct resource *res) | ||
105 | { | ||
106 | resource_size_t size; | ||
107 | const char *name; | ||
108 | void __iomem *dest_ptr; | ||
109 | |||
110 | BUG_ON(!dev); | ||
111 | |||
112 | if (!res || resource_type(res) != IORESOURCE_MEM) { | ||
113 | dev_err(dev, "invalid resource\n"); | ||
114 | return NULL; | ||
115 | } | ||
116 | |||
117 | size = resource_size(res); | ||
118 | name = res->name ?: dev_name(dev); | ||
119 | |||
120 | if (!devm_request_mem_region(dev, res->start, size, name)) { | ||
121 | dev_err(dev, "can't request region for resource %pR\n", res); | ||
122 | return NULL; | ||
123 | } | ||
124 | |||
125 | if (res->flags & IORESOURCE_CACHEABLE) | ||
126 | dest_ptr = devm_ioremap(dev, res->start, size); | ||
127 | else | ||
128 | dest_ptr = devm_ioremap_nocache(dev, res->start, size); | ||
129 | |||
130 | if (!dest_ptr) { | ||
131 | dev_err(dev, "ioremap failed for resource %pR\n", res); | ||
132 | devm_release_mem_region(dev, res->start, size); | ||
133 | } | ||
134 | |||
135 | return dest_ptr; | ||
136 | } | ||
137 | EXPORT_SYMBOL(devm_request_and_ioremap); | ||
138 | |||
88 | #ifdef CONFIG_HAS_IOPORT | 139 | #ifdef CONFIG_HAS_IOPORT |
89 | /* | 140 | /* |
90 | * Generic iomap devres | 141 | * Generic iomap devres |
@@ -253,7 +304,7 @@ EXPORT_SYMBOL(pcim_iounmap); | |||
253 | * | 304 | * |
254 | * Request and iomap regions specified by @mask. | 305 | * Request and iomap regions specified by @mask. |
255 | */ | 306 | */ |
256 | int pcim_iomap_regions(struct pci_dev *pdev, u16 mask, const char *name) | 307 | int pcim_iomap_regions(struct pci_dev *pdev, int mask, const char *name) |
257 | { | 308 | { |
258 | void __iomem * const *iomap; | 309 | void __iomem * const *iomap; |
259 | int i, rc; | 310 | int i, rc; |
@@ -306,7 +357,7 @@ EXPORT_SYMBOL(pcim_iomap_regions); | |||
306 | * | 357 | * |
307 | * Request all PCI BARs and iomap regions specified by @mask. | 358 | * Request all PCI BARs and iomap regions specified by @mask. |
308 | */ | 359 | */ |
309 | int pcim_iomap_regions_request_all(struct pci_dev *pdev, u16 mask, | 360 | int pcim_iomap_regions_request_all(struct pci_dev *pdev, int mask, |
310 | const char *name) | 361 | const char *name) |
311 | { | 362 | { |
312 | int request_mask = ((1 << 6) - 1) & ~mask; | 363 | int request_mask = ((1 << 6) - 1) & ~mask; |
@@ -330,7 +381,7 @@ EXPORT_SYMBOL(pcim_iomap_regions_request_all); | |||
330 | * | 381 | * |
331 | * Unmap and release regions specified by @mask. | 382 | * Unmap and release regions specified by @mask. |
332 | */ | 383 | */ |
333 | void pcim_iounmap_regions(struct pci_dev *pdev, u16 mask) | 384 | void pcim_iounmap_regions(struct pci_dev *pdev, int mask) |
334 | { | 385 | { |
335 | void __iomem * const *iomap; | 386 | void __iomem * const *iomap; |
336 | int i; | 387 | int i; |
@@ -348,5 +399,5 @@ void pcim_iounmap_regions(struct pci_dev *pdev, u16 mask) | |||
348 | } | 399 | } |
349 | } | 400 | } |
350 | EXPORT_SYMBOL(pcim_iounmap_regions); | 401 | EXPORT_SYMBOL(pcim_iounmap_regions); |
351 | #endif | 402 | #endif /* CONFIG_PCI */ |
352 | #endif | 403 | #endif /* CONFIG_HAS_IOPORT */ |
diff --git a/lib/digsig.c b/lib/digsig.c new file mode 100644 index 000000000000..8c0e62975c88 --- /dev/null +++ b/lib/digsig.c | |||
@@ -0,0 +1,280 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2011 Nokia Corporation | ||
3 | * Copyright (C) 2011 Intel Corporation | ||
4 | * | ||
5 | * Author: | ||
6 | * Dmitry Kasatkin <dmitry.kasatkin@nokia.com> | ||
7 | * <dmitry.kasatkin@intel.com> | ||
8 | * | ||
9 | * This program is free software; you can redistribute it and/or modify | ||
10 | * it under the terms of the GNU General Public License as published by | ||
11 | * the Free Software Foundation, version 2 of the License. | ||
12 | * | ||
13 | * File: sign.c | ||
14 | * implements signature (RSA) verification | ||
15 | * pkcs decoding is based on LibTomCrypt code | ||
16 | */ | ||
17 | |||
18 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt | ||
19 | |||
20 | #include <linux/err.h> | ||
21 | #include <linux/module.h> | ||
22 | #include <linux/slab.h> | ||
23 | #include <linux/key.h> | ||
24 | #include <linux/crypto.h> | ||
25 | #include <crypto/hash.h> | ||
26 | #include <crypto/sha.h> | ||
27 | #include <keys/user-type.h> | ||
28 | #include <linux/mpi.h> | ||
29 | #include <linux/digsig.h> | ||
30 | |||
31 | static struct crypto_shash *shash; | ||
32 | |||
33 | static int pkcs_1_v1_5_decode_emsa(const unsigned char *msg, | ||
34 | unsigned long msglen, | ||
35 | unsigned long modulus_bitlen, | ||
36 | unsigned char *out, | ||
37 | unsigned long *outlen) | ||
38 | { | ||
39 | unsigned long modulus_len, ps_len, i; | ||
40 | |||
41 | modulus_len = (modulus_bitlen >> 3) + (modulus_bitlen & 7 ? 1 : 0); | ||
42 | |||
43 | /* test message size */ | ||
44 | if ((msglen > modulus_len) || (modulus_len < 11)) | ||
45 | return -EINVAL; | ||
46 | |||
47 | /* separate encoded message */ | ||
48 | if ((msg[0] != 0x00) || (msg[1] != (unsigned char)1)) | ||
49 | return -EINVAL; | ||
50 | |||
51 | for (i = 2; i < modulus_len - 1; i++) | ||
52 | if (msg[i] != 0xFF) | ||
53 | break; | ||
54 | |||
55 | /* separator check */ | ||
56 | if (msg[i] != 0) | ||
57 | /* There was no octet with hexadecimal value 0x00 | ||
58 | to separate ps from m. */ | ||
59 | return -EINVAL; | ||
60 | |||
61 | ps_len = i - 2; | ||
62 | |||
63 | if (*outlen < (msglen - (2 + ps_len + 1))) { | ||
64 | *outlen = msglen - (2 + ps_len + 1); | ||
65 | return -EOVERFLOW; | ||
66 | } | ||
67 | |||
68 | *outlen = (msglen - (2 + ps_len + 1)); | ||
69 | memcpy(out, &msg[2 + ps_len + 1], *outlen); | ||
70 | |||
71 | return 0; | ||
72 | } | ||
73 | |||
74 | /* | ||
75 | * RSA Signature verification with public key | ||
76 | */ | ||
77 | static int digsig_verify_rsa(struct key *key, | ||
78 | const char *sig, int siglen, | ||
79 | const char *h, int hlen) | ||
80 | { | ||
81 | int err = -EINVAL; | ||
82 | unsigned long len; | ||
83 | unsigned long mlen, mblen; | ||
84 | unsigned nret, l; | ||
85 | int head, i; | ||
86 | unsigned char *out1 = NULL, *out2 = NULL; | ||
87 | MPI in = NULL, res = NULL, pkey[2]; | ||
88 | uint8_t *p, *datap, *endp; | ||
89 | struct user_key_payload *ukp; | ||
90 | struct pubkey_hdr *pkh; | ||
91 | |||
92 | down_read(&key->sem); | ||
93 | ukp = key->payload.data; | ||
94 | |||
95 | if (ukp->datalen < sizeof(*pkh)) | ||
96 | goto err1; | ||
97 | |||
98 | pkh = (struct pubkey_hdr *)ukp->data; | ||
99 | |||
100 | if (pkh->version != 1) | ||
101 | goto err1; | ||
102 | |||
103 | if (pkh->algo != PUBKEY_ALGO_RSA) | ||
104 | goto err1; | ||
105 | |||
106 | if (pkh->nmpi != 2) | ||
107 | goto err1; | ||
108 | |||
109 | datap = pkh->mpi; | ||
110 | endp = ukp->data + ukp->datalen; | ||
111 | |||
112 | err = -ENOMEM; | ||
113 | |||
114 | for (i = 0; i < pkh->nmpi; i++) { | ||
115 | unsigned int remaining = endp - datap; | ||
116 | pkey[i] = mpi_read_from_buffer(datap, &remaining); | ||
117 | if (!pkey[i]) | ||
118 | goto err; | ||
119 | datap += remaining; | ||
120 | } | ||
121 | |||
122 | mblen = mpi_get_nbits(pkey[0]); | ||
123 | mlen = (mblen + 7)/8; | ||
124 | |||
125 | if (mlen == 0) | ||
126 | goto err; | ||
127 | |||
128 | out1 = kzalloc(mlen, GFP_KERNEL); | ||
129 | if (!out1) | ||
130 | goto err; | ||
131 | |||
132 | out2 = kzalloc(mlen, GFP_KERNEL); | ||
133 | if (!out2) | ||
134 | goto err; | ||
135 | |||
136 | nret = siglen; | ||
137 | in = mpi_read_from_buffer(sig, &nret); | ||
138 | if (!in) | ||
139 | goto err; | ||
140 | |||
141 | res = mpi_alloc(mpi_get_nlimbs(in) * 2); | ||
142 | if (!res) | ||
143 | goto err; | ||
144 | |||
145 | err = mpi_powm(res, in, pkey[1], pkey[0]); | ||
146 | if (err) | ||
147 | goto err; | ||
148 | |||
149 | if (mpi_get_nlimbs(res) * BYTES_PER_MPI_LIMB > mlen) { | ||
150 | err = -EINVAL; | ||
151 | goto err; | ||
152 | } | ||
153 | |||
154 | p = mpi_get_buffer(res, &l, NULL); | ||
155 | if (!p) { | ||
156 | err = -EINVAL; | ||
157 | goto err; | ||
158 | } | ||
159 | |||
160 | len = mlen; | ||
161 | head = len - l; | ||
162 | memset(out1, 0, head); | ||
163 | memcpy(out1 + head, p, l); | ||
164 | |||
165 | err = pkcs_1_v1_5_decode_emsa(out1, len, mblen, out2, &len); | ||
166 | if (err) | ||
167 | goto err; | ||
168 | |||
169 | if (len != hlen || memcmp(out2, h, hlen)) | ||
170 | err = -EINVAL; | ||
171 | |||
172 | err: | ||
173 | mpi_free(in); | ||
174 | mpi_free(res); | ||
175 | kfree(out1); | ||
176 | kfree(out2); | ||
177 | while (--i >= 0) | ||
178 | mpi_free(pkey[i]); | ||
179 | err1: | ||
180 | up_read(&key->sem); | ||
181 | |||
182 | return err; | ||
183 | } | ||
184 | |||
185 | /** | ||
186 | * digsig_verify() - digital signature verification with public key | ||
187 | * @keyring: keyring to search key in | ||
188 | * @sig: digital signature | ||
189 | * @sigen: length of the signature | ||
190 | * @data: data | ||
191 | * @datalen: length of the data | ||
192 | * @return: 0 on success, -EINVAL otherwise | ||
193 | * | ||
194 | * Verifies data integrity against digital signature. | ||
195 | * Currently only RSA is supported. | ||
196 | * Normally hash of the content is used as a data for this function. | ||
197 | * | ||
198 | */ | ||
199 | int digsig_verify(struct key *keyring, const char *sig, int siglen, | ||
200 | const char *data, int datalen) | ||
201 | { | ||
202 | int err = -ENOMEM; | ||
203 | struct signature_hdr *sh = (struct signature_hdr *)sig; | ||
204 | struct shash_desc *desc = NULL; | ||
205 | unsigned char hash[SHA1_DIGEST_SIZE]; | ||
206 | struct key *key; | ||
207 | char name[20]; | ||
208 | |||
209 | if (siglen < sizeof(*sh) + 2) | ||
210 | return -EINVAL; | ||
211 | |||
212 | if (sh->algo != PUBKEY_ALGO_RSA) | ||
213 | return -ENOTSUPP; | ||
214 | |||
215 | sprintf(name, "%llX", __be64_to_cpup((uint64_t *)sh->keyid)); | ||
216 | |||
217 | if (keyring) { | ||
218 | /* search in specific keyring */ | ||
219 | key_ref_t kref; | ||
220 | kref = keyring_search(make_key_ref(keyring, 1UL), | ||
221 | &key_type_user, name); | ||
222 | if (IS_ERR(kref)) | ||
223 | key = ERR_PTR(PTR_ERR(kref)); | ||
224 | else | ||
225 | key = key_ref_to_ptr(kref); | ||
226 | } else { | ||
227 | key = request_key(&key_type_user, name, NULL); | ||
228 | } | ||
229 | if (IS_ERR(key)) { | ||
230 | pr_err("key not found, id: %s\n", name); | ||
231 | return PTR_ERR(key); | ||
232 | } | ||
233 | |||
234 | desc = kzalloc(sizeof(*desc) + crypto_shash_descsize(shash), | ||
235 | GFP_KERNEL); | ||
236 | if (!desc) | ||
237 | goto err; | ||
238 | |||
239 | desc->tfm = shash; | ||
240 | desc->flags = CRYPTO_TFM_REQ_MAY_SLEEP; | ||
241 | |||
242 | crypto_shash_init(desc); | ||
243 | crypto_shash_update(desc, data, datalen); | ||
244 | crypto_shash_update(desc, sig, sizeof(*sh)); | ||
245 | crypto_shash_final(desc, hash); | ||
246 | |||
247 | kfree(desc); | ||
248 | |||
249 | /* pass signature mpis address */ | ||
250 | err = digsig_verify_rsa(key, sig + sizeof(*sh), siglen - sizeof(*sh), | ||
251 | hash, sizeof(hash)); | ||
252 | |||
253 | err: | ||
254 | key_put(key); | ||
255 | |||
256 | return err ? -EINVAL : 0; | ||
257 | } | ||
258 | EXPORT_SYMBOL_GPL(digsig_verify); | ||
259 | |||
260 | static int __init digsig_init(void) | ||
261 | { | ||
262 | shash = crypto_alloc_shash("sha1", 0, 0); | ||
263 | if (IS_ERR(shash)) { | ||
264 | pr_err("shash allocation failed\n"); | ||
265 | return PTR_ERR(shash); | ||
266 | } | ||
267 | |||
268 | return 0; | ||
269 | |||
270 | } | ||
271 | |||
272 | static void __exit digsig_cleanup(void) | ||
273 | { | ||
274 | crypto_free_shash(shash); | ||
275 | } | ||
276 | |||
277 | module_init(digsig_init); | ||
278 | module_exit(digsig_cleanup); | ||
279 | |||
280 | MODULE_LICENSE("GPL"); | ||
diff --git a/lib/div64.c b/lib/div64.c index 5b4919191778..a163b6caef73 100644 --- a/lib/div64.c +++ b/lib/div64.c | |||
@@ -16,7 +16,8 @@ | |||
16 | * assembly versions such as arch/ppc/lib/div64.S and arch/sh/lib/div64.S. | 16 | * assembly versions such as arch/ppc/lib/div64.S and arch/sh/lib/div64.S. |
17 | */ | 17 | */ |
18 | 18 | ||
19 | #include <linux/module.h> | 19 | #include <linux/export.h> |
20 | #include <linux/kernel.h> | ||
20 | #include <linux/math64.h> | 21 | #include <linux/math64.h> |
21 | 22 | ||
22 | /* Not needed on 64bit architectures */ | 23 | /* Not needed on 64bit architectures */ |
@@ -86,7 +87,7 @@ EXPORT_SYMBOL(div_s64_rem); | |||
86 | * by the book 'Hacker's Delight'. The original source and full proof | 87 | * by the book 'Hacker's Delight'. The original source and full proof |
87 | * can be found here and is available for use without restriction. | 88 | * can be found here and is available for use without restriction. |
88 | * | 89 | * |
89 | * 'http://www.hackersdelight.org/HDcode/newCode/divDouble.c' | 90 | * 'http://www.hackersdelight.org/HDcode/newCode/divDouble.c.txt' |
90 | */ | 91 | */ |
91 | #ifndef div64_u64 | 92 | #ifndef div64_u64 |
92 | u64 div64_u64(u64 dividend, u64 divisor) | 93 | u64 div64_u64(u64 dividend, u64 divisor) |
diff --git a/lib/dma-debug.c b/lib/dma-debug.c index db07bfd9298e..d84beb994f36 100644 --- a/lib/dma-debug.c +++ b/lib/dma-debug.c | |||
@@ -24,6 +24,7 @@ | |||
24 | #include <linux/spinlock.h> | 24 | #include <linux/spinlock.h> |
25 | #include <linux/debugfs.h> | 25 | #include <linux/debugfs.h> |
26 | #include <linux/uaccess.h> | 26 | #include <linux/uaccess.h> |
27 | #include <linux/export.h> | ||
27 | #include <linux/device.h> | 28 | #include <linux/device.h> |
28 | #include <linux/types.h> | 29 | #include <linux/types.h> |
29 | #include <linux/sched.h> | 30 | #include <linux/sched.h> |
@@ -62,6 +63,8 @@ struct dma_debug_entry { | |||
62 | #endif | 63 | #endif |
63 | }; | 64 | }; |
64 | 65 | ||
66 | typedef bool (*match_fn)(struct dma_debug_entry *, struct dma_debug_entry *); | ||
67 | |||
65 | struct hash_bucket { | 68 | struct hash_bucket { |
66 | struct list_head list; | 69 | struct list_head list; |
67 | spinlock_t lock; | 70 | spinlock_t lock; |
@@ -75,7 +78,7 @@ static LIST_HEAD(free_entries); | |||
75 | static DEFINE_SPINLOCK(free_entries_lock); | 78 | static DEFINE_SPINLOCK(free_entries_lock); |
76 | 79 | ||
77 | /* Global disable flag - will be set in case of an error */ | 80 | /* Global disable flag - will be set in case of an error */ |
78 | static bool global_disable __read_mostly; | 81 | static u32 global_disable __read_mostly; |
79 | 82 | ||
80 | /* Global error count */ | 83 | /* Global error count */ |
81 | static u32 error_count; | 84 | static u32 error_count; |
@@ -117,11 +120,6 @@ static const char *type2name[4] = { "single", "page", | |||
117 | static const char *dir2name[4] = { "DMA_BIDIRECTIONAL", "DMA_TO_DEVICE", | 120 | static const char *dir2name[4] = { "DMA_BIDIRECTIONAL", "DMA_TO_DEVICE", |
118 | "DMA_FROM_DEVICE", "DMA_NONE" }; | 121 | "DMA_FROM_DEVICE", "DMA_NONE" }; |
119 | 122 | ||
120 | /* little merge helper - remove it after the merge window */ | ||
121 | #ifndef BUS_NOTIFY_UNBOUND_DRIVER | ||
122 | #define BUS_NOTIFY_UNBOUND_DRIVER 0x0005 | ||
123 | #endif | ||
124 | |||
125 | /* | 123 | /* |
126 | * The access to some variables in this macro is racy. We can't use atomic_t | 124 | * The access to some variables in this macro is racy. We can't use atomic_t |
127 | * here because all these variables are exported to debugfs. Some of them even | 125 | * here because all these variables are exported to debugfs. Some of them even |
@@ -167,7 +165,7 @@ static bool driver_filter(struct device *dev) | |||
167 | return false; | 165 | return false; |
168 | 166 | ||
169 | /* driver filter on but not yet initialized */ | 167 | /* driver filter on but not yet initialized */ |
170 | drv = get_driver(dev->driver); | 168 | drv = dev->driver; |
171 | if (!drv) | 169 | if (!drv) |
172 | return false; | 170 | return false; |
173 | 171 | ||
@@ -182,7 +180,6 @@ static bool driver_filter(struct device *dev) | |||
182 | } | 180 | } |
183 | 181 | ||
184 | read_unlock_irqrestore(&driver_name_lock, flags); | 182 | read_unlock_irqrestore(&driver_name_lock, flags); |
185 | put_driver(drv); | ||
186 | 183 | ||
187 | return ret; | 184 | return ret; |
188 | } | 185 | } |
@@ -240,18 +237,37 @@ static void put_hash_bucket(struct hash_bucket *bucket, | |||
240 | spin_unlock_irqrestore(&bucket->lock, __flags); | 237 | spin_unlock_irqrestore(&bucket->lock, __flags); |
241 | } | 238 | } |
242 | 239 | ||
240 | static bool exact_match(struct dma_debug_entry *a, struct dma_debug_entry *b) | ||
241 | { | ||
242 | return ((a->dev_addr == b->dev_addr) && | ||
243 | (a->dev == b->dev)) ? true : false; | ||
244 | } | ||
245 | |||
246 | static bool containing_match(struct dma_debug_entry *a, | ||
247 | struct dma_debug_entry *b) | ||
248 | { | ||
249 | if (a->dev != b->dev) | ||
250 | return false; | ||
251 | |||
252 | if ((b->dev_addr <= a->dev_addr) && | ||
253 | ((b->dev_addr + b->size) >= (a->dev_addr + a->size))) | ||
254 | return true; | ||
255 | |||
256 | return false; | ||
257 | } | ||
258 | |||
243 | /* | 259 | /* |
244 | * Search a given entry in the hash bucket list | 260 | * Search a given entry in the hash bucket list |
245 | */ | 261 | */ |
246 | static struct dma_debug_entry *hash_bucket_find(struct hash_bucket *bucket, | 262 | static struct dma_debug_entry *__hash_bucket_find(struct hash_bucket *bucket, |
247 | struct dma_debug_entry *ref) | 263 | struct dma_debug_entry *ref, |
264 | match_fn match) | ||
248 | { | 265 | { |
249 | struct dma_debug_entry *entry, *ret = NULL; | 266 | struct dma_debug_entry *entry, *ret = NULL; |
250 | int matches = 0, match_lvl, last_lvl = 0; | 267 | int matches = 0, match_lvl, last_lvl = -1; |
251 | 268 | ||
252 | list_for_each_entry(entry, &bucket->list, list) { | 269 | list_for_each_entry(entry, &bucket->list, list) { |
253 | if ((entry->dev_addr != ref->dev_addr) || | 270 | if (!match(ref, entry)) |
254 | (entry->dev != ref->dev)) | ||
255 | continue; | 271 | continue; |
256 | 272 | ||
257 | /* | 273 | /* |
@@ -277,7 +293,7 @@ static struct dma_debug_entry *hash_bucket_find(struct hash_bucket *bucket, | |||
277 | } else if (match_lvl > last_lvl) { | 293 | } else if (match_lvl > last_lvl) { |
278 | /* | 294 | /* |
279 | * We found an entry that fits better then the | 295 | * We found an entry that fits better then the |
280 | * previous one | 296 | * previous one or it is the 1st match. |
281 | */ | 297 | */ |
282 | last_lvl = match_lvl; | 298 | last_lvl = match_lvl; |
283 | ret = entry; | 299 | ret = entry; |
@@ -293,6 +309,39 @@ static struct dma_debug_entry *hash_bucket_find(struct hash_bucket *bucket, | |||
293 | return ret; | 309 | return ret; |
294 | } | 310 | } |
295 | 311 | ||
312 | static struct dma_debug_entry *bucket_find_exact(struct hash_bucket *bucket, | ||
313 | struct dma_debug_entry *ref) | ||
314 | { | ||
315 | return __hash_bucket_find(bucket, ref, exact_match); | ||
316 | } | ||
317 | |||
318 | static struct dma_debug_entry *bucket_find_contain(struct hash_bucket **bucket, | ||
319 | struct dma_debug_entry *ref, | ||
320 | unsigned long *flags) | ||
321 | { | ||
322 | |||
323 | unsigned int max_range = dma_get_max_seg_size(ref->dev); | ||
324 | struct dma_debug_entry *entry, index = *ref; | ||
325 | unsigned int range = 0; | ||
326 | |||
327 | while (range <= max_range) { | ||
328 | entry = __hash_bucket_find(*bucket, &index, containing_match); | ||
329 | |||
330 | if (entry) | ||
331 | return entry; | ||
332 | |||
333 | /* | ||
334 | * Nothing found, go back a hash bucket | ||
335 | */ | ||
336 | put_hash_bucket(*bucket, flags); | ||
337 | range += (1 << HASH_FN_SHIFT); | ||
338 | index.dev_addr -= (1 << HASH_FN_SHIFT); | ||
339 | *bucket = get_hash_bucket(&index, flags); | ||
340 | } | ||
341 | |||
342 | return NULL; | ||
343 | } | ||
344 | |||
296 | /* | 345 | /* |
297 | * Add an entry to a hash bucket | 346 | * Add an entry to a hash bucket |
298 | */ | 347 | */ |
@@ -376,7 +425,7 @@ static struct dma_debug_entry *__dma_entry_alloc(void) | |||
376 | */ | 425 | */ |
377 | static struct dma_debug_entry *dma_entry_alloc(void) | 426 | static struct dma_debug_entry *dma_entry_alloc(void) |
378 | { | 427 | { |
379 | struct dma_debug_entry *entry = NULL; | 428 | struct dma_debug_entry *entry; |
380 | unsigned long flags; | 429 | unsigned long flags; |
381 | 430 | ||
382 | spin_lock_irqsave(&free_entries_lock, flags); | 431 | spin_lock_irqsave(&free_entries_lock, flags); |
@@ -384,11 +433,14 @@ static struct dma_debug_entry *dma_entry_alloc(void) | |||
384 | if (list_empty(&free_entries)) { | 433 | if (list_empty(&free_entries)) { |
385 | pr_err("DMA-API: debugging out of memory - disabling\n"); | 434 | pr_err("DMA-API: debugging out of memory - disabling\n"); |
386 | global_disable = true; | 435 | global_disable = true; |
387 | goto out; | 436 | spin_unlock_irqrestore(&free_entries_lock, flags); |
437 | return NULL; | ||
388 | } | 438 | } |
389 | 439 | ||
390 | entry = __dma_entry_alloc(); | 440 | entry = __dma_entry_alloc(); |
391 | 441 | ||
442 | spin_unlock_irqrestore(&free_entries_lock, flags); | ||
443 | |||
392 | #ifdef CONFIG_STACKTRACE | 444 | #ifdef CONFIG_STACKTRACE |
393 | entry->stacktrace.max_entries = DMA_DEBUG_STACKTRACE_ENTRIES; | 445 | entry->stacktrace.max_entries = DMA_DEBUG_STACKTRACE_ENTRIES; |
394 | entry->stacktrace.entries = entry->st_entries; | 446 | entry->stacktrace.entries = entry->st_entries; |
@@ -396,9 +448,6 @@ static struct dma_debug_entry *dma_entry_alloc(void) | |||
396 | save_stack_trace(&entry->stacktrace); | 448 | save_stack_trace(&entry->stacktrace); |
397 | #endif | 449 | #endif |
398 | 450 | ||
399 | out: | ||
400 | spin_unlock_irqrestore(&free_entries_lock, flags); | ||
401 | |||
402 | return entry; | 451 | return entry; |
403 | } | 452 | } |
404 | 453 | ||
@@ -603,7 +652,7 @@ static int dma_debug_fs_init(void) | |||
603 | 652 | ||
604 | global_disable_dent = debugfs_create_bool("disabled", 0444, | 653 | global_disable_dent = debugfs_create_bool("disabled", 0444, |
605 | dma_debug_dent, | 654 | dma_debug_dent, |
606 | (u32 *)&global_disable); | 655 | &global_disable); |
607 | if (!global_disable_dent) | 656 | if (!global_disable_dent) |
608 | goto out_err; | 657 | goto out_err; |
609 | 658 | ||
@@ -802,7 +851,7 @@ static void check_unmap(struct dma_debug_entry *ref) | |||
802 | } | 851 | } |
803 | 852 | ||
804 | bucket = get_hash_bucket(ref, &flags); | 853 | bucket = get_hash_bucket(ref, &flags); |
805 | entry = hash_bucket_find(bucket, ref); | 854 | entry = bucket_find_exact(bucket, ref); |
806 | 855 | ||
807 | if (!entry) { | 856 | if (!entry) { |
808 | err_printk(ref->dev, NULL, "DMA-API: device driver tries " | 857 | err_printk(ref->dev, NULL, "DMA-API: device driver tries " |
@@ -902,7 +951,7 @@ static void check_sync(struct device *dev, | |||
902 | 951 | ||
903 | bucket = get_hash_bucket(ref, &flags); | 952 | bucket = get_hash_bucket(ref, &flags); |
904 | 953 | ||
905 | entry = hash_bucket_find(bucket, ref); | 954 | entry = bucket_find_contain(&bucket, ref, &flags); |
906 | 955 | ||
907 | if (!entry) { | 956 | if (!entry) { |
908 | err_printk(dev, NULL, "DMA-API: device driver tries " | 957 | err_printk(dev, NULL, "DMA-API: device driver tries " |
@@ -1060,7 +1109,7 @@ static int get_nr_mapped_entries(struct device *dev, | |||
1060 | int mapped_ents; | 1109 | int mapped_ents; |
1061 | 1110 | ||
1062 | bucket = get_hash_bucket(ref, &flags); | 1111 | bucket = get_hash_bucket(ref, &flags); |
1063 | entry = hash_bucket_find(bucket, ref); | 1112 | entry = bucket_find_exact(bucket, ref); |
1064 | mapped_ents = 0; | 1113 | mapped_ents = 0; |
1065 | 1114 | ||
1066 | if (entry) | 1115 | if (entry) |
diff --git a/lib/dump_stack.c b/lib/dump_stack.c index 53bff4c8452b..42f4f55c9458 100644 --- a/lib/dump_stack.c +++ b/lib/dump_stack.c | |||
@@ -4,7 +4,7 @@ | |||
4 | */ | 4 | */ |
5 | 5 | ||
6 | #include <linux/kernel.h> | 6 | #include <linux/kernel.h> |
7 | #include <linux/module.h> | 7 | #include <linux/export.h> |
8 | 8 | ||
9 | void dump_stack(void) | 9 | void dump_stack(void) |
10 | { | 10 | { |
diff --git a/lib/dynamic_debug.c b/lib/dynamic_debug.c index 75ca78f3a8c9..e7f7d993357a 100644 --- a/lib/dynamic_debug.c +++ b/lib/dynamic_debug.c | |||
@@ -10,11 +10,12 @@ | |||
10 | * Copyright (C) 2011 Bart Van Assche. All Rights Reserved. | 10 | * Copyright (C) 2011 Bart Van Assche. All Rights Reserved. |
11 | */ | 11 | */ |
12 | 12 | ||
13 | #define pr_fmt(fmt) KBUILD_MODNAME ":%s: " fmt, __func__ | ||
14 | |||
13 | #include <linux/kernel.h> | 15 | #include <linux/kernel.h> |
14 | #include <linux/module.h> | 16 | #include <linux/module.h> |
15 | #include <linux/moduleparam.h> | 17 | #include <linux/moduleparam.h> |
16 | #include <linux/kallsyms.h> | 18 | #include <linux/kallsyms.h> |
17 | #include <linux/version.h> | ||
18 | #include <linux/types.h> | 19 | #include <linux/types.h> |
19 | #include <linux/mutex.h> | 20 | #include <linux/mutex.h> |
20 | #include <linux/proc_fs.h> | 21 | #include <linux/proc_fs.h> |
@@ -30,6 +31,8 @@ | |||
30 | #include <linux/jump_label.h> | 31 | #include <linux/jump_label.h> |
31 | #include <linux/hardirq.h> | 32 | #include <linux/hardirq.h> |
32 | #include <linux/sched.h> | 33 | #include <linux/sched.h> |
34 | #include <linux/device.h> | ||
35 | #include <linux/netdevice.h> | ||
33 | 36 | ||
34 | extern struct _ddebug __start___verbose[]; | 37 | extern struct _ddebug __start___verbose[]; |
35 | extern struct _ddebug __stop___verbose[]; | 38 | extern struct _ddebug __stop___verbose[]; |
@@ -38,7 +41,6 @@ struct ddebug_table { | |||
38 | struct list_head link; | 41 | struct list_head link; |
39 | char *mod_name; | 42 | char *mod_name; |
40 | unsigned int num_ddebugs; | 43 | unsigned int num_ddebugs; |
41 | unsigned int num_enabled; | ||
42 | struct _ddebug *ddebugs; | 44 | struct _ddebug *ddebugs; |
43 | }; | 45 | }; |
44 | 46 | ||
@@ -58,6 +60,7 @@ struct ddebug_iter { | |||
58 | static DEFINE_MUTEX(ddebug_lock); | 60 | static DEFINE_MUTEX(ddebug_lock); |
59 | static LIST_HEAD(ddebug_tables); | 61 | static LIST_HEAD(ddebug_tables); |
60 | static int verbose = 0; | 62 | static int verbose = 0; |
63 | module_param(verbose, int, 0644); | ||
61 | 64 | ||
62 | /* Return the last part of a pathname */ | 65 | /* Return the last part of a pathname */ |
63 | static inline const char *basename(const char *path) | 66 | static inline const char *basename(const char *path) |
@@ -66,12 +69,24 @@ static inline const char *basename(const char *path) | |||
66 | return tail ? tail+1 : path; | 69 | return tail ? tail+1 : path; |
67 | } | 70 | } |
68 | 71 | ||
72 | /* Return the path relative to source root */ | ||
73 | static inline const char *trim_prefix(const char *path) | ||
74 | { | ||
75 | int skip = strlen(__FILE__) - strlen("lib/dynamic_debug.c"); | ||
76 | |||
77 | if (strncmp(path, __FILE__, skip)) | ||
78 | skip = 0; /* prefix mismatch, don't skip */ | ||
79 | |||
80 | return path + skip; | ||
81 | } | ||
82 | |||
69 | static struct { unsigned flag:8; char opt_char; } opt_array[] = { | 83 | static struct { unsigned flag:8; char opt_char; } opt_array[] = { |
70 | { _DPRINTK_FLAGS_PRINT, 'p' }, | 84 | { _DPRINTK_FLAGS_PRINT, 'p' }, |
71 | { _DPRINTK_FLAGS_INCL_MODNAME, 'm' }, | 85 | { _DPRINTK_FLAGS_INCL_MODNAME, 'm' }, |
72 | { _DPRINTK_FLAGS_INCL_FUNCNAME, 'f' }, | 86 | { _DPRINTK_FLAGS_INCL_FUNCNAME, 'f' }, |
73 | { _DPRINTK_FLAGS_INCL_LINENO, 'l' }, | 87 | { _DPRINTK_FLAGS_INCL_LINENO, 'l' }, |
74 | { _DPRINTK_FLAGS_INCL_TID, 't' }, | 88 | { _DPRINTK_FLAGS_INCL_TID, 't' }, |
89 | { _DPRINTK_FLAGS_NONE, '_' }, | ||
75 | }; | 90 | }; |
76 | 91 | ||
77 | /* format a string into buf[] which describes the _ddebug's flags */ | 92 | /* format a string into buf[] which describes the _ddebug's flags */ |
@@ -81,58 +96,76 @@ static char *ddebug_describe_flags(struct _ddebug *dp, char *buf, | |||
81 | char *p = buf; | 96 | char *p = buf; |
82 | int i; | 97 | int i; |
83 | 98 | ||
84 | BUG_ON(maxlen < 4); | 99 | BUG_ON(maxlen < 6); |
85 | for (i = 0; i < ARRAY_SIZE(opt_array); ++i) | 100 | for (i = 0; i < ARRAY_SIZE(opt_array); ++i) |
86 | if (dp->flags & opt_array[i].flag) | 101 | if (dp->flags & opt_array[i].flag) |
87 | *p++ = opt_array[i].opt_char; | 102 | *p++ = opt_array[i].opt_char; |
88 | if (p == buf) | 103 | if (p == buf) |
89 | *p++ = '-'; | 104 | *p++ = '_'; |
90 | *p = '\0'; | 105 | *p = '\0'; |
91 | 106 | ||
92 | return buf; | 107 | return buf; |
93 | } | 108 | } |
94 | 109 | ||
110 | #define vpr_info(fmt, ...) \ | ||
111 | if (verbose) do { pr_info(fmt, ##__VA_ARGS__); } while (0) | ||
112 | |||
113 | #define vpr_info_dq(q, msg) \ | ||
114 | do { \ | ||
115 | /* trim last char off format print */ \ | ||
116 | vpr_info("%s: func=\"%s\" file=\"%s\" " \ | ||
117 | "module=\"%s\" format=\"%.*s\" " \ | ||
118 | "lineno=%u-%u", \ | ||
119 | msg, \ | ||
120 | q->function ? q->function : "", \ | ||
121 | q->filename ? q->filename : "", \ | ||
122 | q->module ? q->module : "", \ | ||
123 | (int)(q->format ? strlen(q->format) - 1 : 0), \ | ||
124 | q->format ? q->format : "", \ | ||
125 | q->first_lineno, q->last_lineno); \ | ||
126 | } while (0) | ||
127 | |||
95 | /* | 128 | /* |
96 | * Search the tables for _ddebug's which match the given | 129 | * Search the tables for _ddebug's which match the given `query' and |
97 | * `query' and apply the `flags' and `mask' to them. Tells | 130 | * apply the `flags' and `mask' to them. Returns number of matching |
98 | * the user which ddebug's were changed, or whether none | 131 | * callsites, normally the same as number of changes. If verbose, |
99 | * were matched. | 132 | * logs the changes. Takes ddebug_lock. |
100 | */ | 133 | */ |
101 | static void ddebug_change(const struct ddebug_query *query, | 134 | static int ddebug_change(const struct ddebug_query *query, |
102 | unsigned int flags, unsigned int mask) | 135 | unsigned int flags, unsigned int mask) |
103 | { | 136 | { |
104 | int i; | 137 | int i; |
105 | struct ddebug_table *dt; | 138 | struct ddebug_table *dt; |
106 | unsigned int newflags; | 139 | unsigned int newflags; |
107 | unsigned int nfound = 0; | 140 | unsigned int nfound = 0; |
108 | char flagbuf[8]; | 141 | char flagbuf[10]; |
109 | 142 | ||
110 | /* search for matching ddebugs */ | 143 | /* search for matching ddebugs */ |
111 | mutex_lock(&ddebug_lock); | 144 | mutex_lock(&ddebug_lock); |
112 | list_for_each_entry(dt, &ddebug_tables, link) { | 145 | list_for_each_entry(dt, &ddebug_tables, link) { |
113 | 146 | ||
114 | /* match against the module name */ | 147 | /* match against the module name */ |
115 | if (query->module != NULL && | 148 | if (query->module && strcmp(query->module, dt->mod_name)) |
116 | strcmp(query->module, dt->mod_name)) | ||
117 | continue; | 149 | continue; |
118 | 150 | ||
119 | for (i = 0 ; i < dt->num_ddebugs ; i++) { | 151 | for (i = 0 ; i < dt->num_ddebugs ; i++) { |
120 | struct _ddebug *dp = &dt->ddebugs[i]; | 152 | struct _ddebug *dp = &dt->ddebugs[i]; |
121 | 153 | ||
122 | /* match against the source filename */ | 154 | /* match against the source filename */ |
123 | if (query->filename != NULL && | 155 | if (query->filename && |
124 | strcmp(query->filename, dp->filename) && | 156 | strcmp(query->filename, dp->filename) && |
125 | strcmp(query->filename, basename(dp->filename))) | 157 | strcmp(query->filename, basename(dp->filename)) && |
158 | strcmp(query->filename, trim_prefix(dp->filename))) | ||
126 | continue; | 159 | continue; |
127 | 160 | ||
128 | /* match against the function */ | 161 | /* match against the function */ |
129 | if (query->function != NULL && | 162 | if (query->function && |
130 | strcmp(query->function, dp->function)) | 163 | strcmp(query->function, dp->function)) |
131 | continue; | 164 | continue; |
132 | 165 | ||
133 | /* match against the format */ | 166 | /* match against the format */ |
134 | if (query->format != NULL && | 167 | if (query->format && |
135 | strstr(dp->format, query->format) == NULL) | 168 | !strstr(dp->format, query->format)) |
136 | continue; | 169 | continue; |
137 | 170 | ||
138 | /* match against the line number range */ | 171 | /* match against the line number range */ |
@@ -148,29 +181,20 @@ static void ddebug_change(const struct ddebug_query *query, | |||
148 | newflags = (dp->flags & mask) | flags; | 181 | newflags = (dp->flags & mask) | flags; |
149 | if (newflags == dp->flags) | 182 | if (newflags == dp->flags) |
150 | continue; | 183 | continue; |
151 | |||
152 | if (!newflags) | ||
153 | dt->num_enabled--; | ||
154 | else if (!dp->flags) | ||
155 | dt->num_enabled++; | ||
156 | dp->flags = newflags; | 184 | dp->flags = newflags; |
157 | if (newflags) | 185 | vpr_info("changed %s:%d [%s]%s =%s\n", |
158 | dp->enabled = 1; | 186 | trim_prefix(dp->filename), dp->lineno, |
159 | else | 187 | dt->mod_name, dp->function, |
160 | dp->enabled = 0; | 188 | ddebug_describe_flags(dp, flagbuf, |
161 | if (verbose) | 189 | sizeof(flagbuf))); |
162 | printk(KERN_INFO | ||
163 | "ddebug: changed %s:%d [%s]%s %s\n", | ||
164 | dp->filename, dp->lineno, | ||
165 | dt->mod_name, dp->function, | ||
166 | ddebug_describe_flags(dp, flagbuf, | ||
167 | sizeof(flagbuf))); | ||
168 | } | 190 | } |
169 | } | 191 | } |
170 | mutex_unlock(&ddebug_lock); | 192 | mutex_unlock(&ddebug_lock); |
171 | 193 | ||
172 | if (!nfound && verbose) | 194 | if (!nfound && verbose) |
173 | printk(KERN_INFO "ddebug: no matches for query\n"); | 195 | pr_info("no matches for query\n"); |
196 | |||
197 | return nfound; | ||
174 | } | 198 | } |
175 | 199 | ||
176 | /* | 200 | /* |
@@ -190,8 +214,10 @@ static int ddebug_tokenize(char *buf, char *words[], int maxwords) | |||
190 | buf = skip_spaces(buf); | 214 | buf = skip_spaces(buf); |
191 | if (!*buf) | 215 | if (!*buf) |
192 | break; /* oh, it was trailing whitespace */ | 216 | break; /* oh, it was trailing whitespace */ |
217 | if (*buf == '#') | ||
218 | break; /* token starts comment, skip rest of line */ | ||
193 | 219 | ||
194 | /* Run `end' over a word, either whitespace separated or quoted */ | 220 | /* find `end' of word, whitespace separated or quoted */ |
195 | if (*buf == '"' || *buf == '\'') { | 221 | if (*buf == '"' || *buf == '\'') { |
196 | int quote = *buf++; | 222 | int quote = *buf++; |
197 | for (end = buf ; *end && *end != quote ; end++) | 223 | for (end = buf ; *end && *end != quote ; end++) |
@@ -203,8 +229,8 @@ static int ddebug_tokenize(char *buf, char *words[], int maxwords) | |||
203 | ; | 229 | ; |
204 | BUG_ON(end == buf); | 230 | BUG_ON(end == buf); |
205 | } | 231 | } |
206 | /* Here `buf' is the start of the word, `end' is one past the end */ | ||
207 | 232 | ||
233 | /* `buf' is start of word, `end' is one past its end */ | ||
208 | if (nwords == maxwords) | 234 | if (nwords == maxwords) |
209 | return -EINVAL; /* ran out of words[] before bytes */ | 235 | return -EINVAL; /* ran out of words[] before bytes */ |
210 | if (*end) | 236 | if (*end) |
@@ -215,10 +241,10 @@ static int ddebug_tokenize(char *buf, char *words[], int maxwords) | |||
215 | 241 | ||
216 | if (verbose) { | 242 | if (verbose) { |
217 | int i; | 243 | int i; |
218 | printk(KERN_INFO "%s: split into words:", __func__); | 244 | pr_info("split into words:"); |
219 | for (i = 0 ; i < nwords ; i++) | 245 | for (i = 0 ; i < nwords ; i++) |
220 | printk(" \"%s\"", words[i]); | 246 | pr_cont(" \"%s\"", words[i]); |
221 | printk("\n"); | 247 | pr_cont("\n"); |
222 | } | 248 | } |
223 | 249 | ||
224 | return nwords; | 250 | return nwords; |
@@ -283,6 +309,19 @@ static char *unescape(char *str) | |||
283 | return str; | 309 | return str; |
284 | } | 310 | } |
285 | 311 | ||
312 | static int check_set(const char **dest, char *src, char *name) | ||
313 | { | ||
314 | int rc = 0; | ||
315 | |||
316 | if (*dest) { | ||
317 | rc = -EINVAL; | ||
318 | pr_err("match-spec:%s val:%s overridden by %s", | ||
319 | name, *dest, src); | ||
320 | } | ||
321 | *dest = src; | ||
322 | return rc; | ||
323 | } | ||
324 | |||
286 | /* | 325 | /* |
287 | * Parse words[] as a ddebug query specification, which is a series | 326 | * Parse words[] as a ddebug query specification, which is a series |
288 | * of (keyword, value) pairs chosen from these possibilities: | 327 | * of (keyword, value) pairs chosen from these possibilities: |
@@ -294,55 +333,64 @@ static char *unescape(char *str) | |||
294 | * format <escaped-string-to-find-in-format> | 333 | * format <escaped-string-to-find-in-format> |
295 | * line <lineno> | 334 | * line <lineno> |
296 | * line <first-lineno>-<last-lineno> // where either may be empty | 335 | * line <first-lineno>-<last-lineno> // where either may be empty |
336 | * | ||
337 | * Only 1 of each type is allowed. | ||
338 | * Returns 0 on success, <0 on error. | ||
297 | */ | 339 | */ |
298 | static int ddebug_parse_query(char *words[], int nwords, | 340 | static int ddebug_parse_query(char *words[], int nwords, |
299 | struct ddebug_query *query) | 341 | struct ddebug_query *query, const char *modname) |
300 | { | 342 | { |
301 | unsigned int i; | 343 | unsigned int i; |
344 | int rc; | ||
302 | 345 | ||
303 | /* check we have an even number of words */ | 346 | /* check we have an even number of words */ |
304 | if (nwords % 2 != 0) | 347 | if (nwords % 2 != 0) |
305 | return -EINVAL; | 348 | return -EINVAL; |
306 | memset(query, 0, sizeof(*query)); | 349 | memset(query, 0, sizeof(*query)); |
307 | 350 | ||
351 | if (modname) | ||
352 | /* support $modname.dyndbg=<multiple queries> */ | ||
353 | query->module = modname; | ||
354 | |||
308 | for (i = 0 ; i < nwords ; i += 2) { | 355 | for (i = 0 ; i < nwords ; i += 2) { |
309 | if (!strcmp(words[i], "func")) | 356 | if (!strcmp(words[i], "func")) |
310 | query->function = words[i+1]; | 357 | rc = check_set(&query->function, words[i+1], "func"); |
311 | else if (!strcmp(words[i], "file")) | 358 | else if (!strcmp(words[i], "file")) |
312 | query->filename = words[i+1]; | 359 | rc = check_set(&query->filename, words[i+1], "file"); |
313 | else if (!strcmp(words[i], "module")) | 360 | else if (!strcmp(words[i], "module")) |
314 | query->module = words[i+1]; | 361 | rc = check_set(&query->module, words[i+1], "module"); |
315 | else if (!strcmp(words[i], "format")) | 362 | else if (!strcmp(words[i], "format")) |
316 | query->format = unescape(words[i+1]); | 363 | rc = check_set(&query->format, unescape(words[i+1]), |
364 | "format"); | ||
317 | else if (!strcmp(words[i], "line")) { | 365 | else if (!strcmp(words[i], "line")) { |
318 | char *first = words[i+1]; | 366 | char *first = words[i+1]; |
319 | char *last = strchr(first, '-'); | 367 | char *last = strchr(first, '-'); |
368 | if (query->first_lineno || query->last_lineno) { | ||
369 | pr_err("match-spec:line given 2 times\n"); | ||
370 | return -EINVAL; | ||
371 | } | ||
320 | if (last) | 372 | if (last) |
321 | *last++ = '\0'; | 373 | *last++ = '\0'; |
322 | if (parse_lineno(first, &query->first_lineno) < 0) | 374 | if (parse_lineno(first, &query->first_lineno) < 0) |
323 | return -EINVAL; | 375 | return -EINVAL; |
324 | if (last != NULL) { | 376 | if (last) { |
325 | /* range <first>-<last> */ | 377 | /* range <first>-<last> */ |
326 | if (parse_lineno(last, &query->last_lineno) < 0) | 378 | if (parse_lineno(last, &query->last_lineno) |
379 | < query->first_lineno) { | ||
380 | pr_err("last-line < 1st-line\n"); | ||
327 | return -EINVAL; | 381 | return -EINVAL; |
382 | } | ||
328 | } else { | 383 | } else { |
329 | query->last_lineno = query->first_lineno; | 384 | query->last_lineno = query->first_lineno; |
330 | } | 385 | } |
331 | } else { | 386 | } else { |
332 | if (verbose) | 387 | pr_err("unknown keyword \"%s\"\n", words[i]); |
333 | printk(KERN_ERR "%s: unknown keyword \"%s\"\n", | ||
334 | __func__, words[i]); | ||
335 | return -EINVAL; | 388 | return -EINVAL; |
336 | } | 389 | } |
390 | if (rc) | ||
391 | return rc; | ||
337 | } | 392 | } |
338 | 393 | vpr_info_dq(query, "parsed"); | |
339 | if (verbose) | ||
340 | printk(KERN_INFO "%s: q->function=\"%s\" q->filename=\"%s\" " | ||
341 | "q->module=\"%s\" q->format=\"%s\" q->lineno=%u-%u\n", | ||
342 | __func__, query->function, query->filename, | ||
343 | query->module, query->format, query->first_lineno, | ||
344 | query->last_lineno); | ||
345 | |||
346 | return 0; | 394 | return 0; |
347 | } | 395 | } |
348 | 396 | ||
@@ -367,8 +415,7 @@ static int ddebug_parse_flags(const char *str, unsigned int *flagsp, | |||
367 | default: | 415 | default: |
368 | return -EINVAL; | 416 | return -EINVAL; |
369 | } | 417 | } |
370 | if (verbose) | 418 | vpr_info("op='%c'\n", op); |
371 | printk(KERN_INFO "%s: op='%c'\n", __func__, op); | ||
372 | 419 | ||
373 | for ( ; *str ; ++str) { | 420 | for ( ; *str ; ++str) { |
374 | for (i = ARRAY_SIZE(opt_array) - 1; i >= 0; i--) { | 421 | for (i = ARRAY_SIZE(opt_array) - 1; i >= 0; i--) { |
@@ -380,10 +427,7 @@ static int ddebug_parse_flags(const char *str, unsigned int *flagsp, | |||
380 | if (i < 0) | 427 | if (i < 0) |
381 | return -EINVAL; | 428 | return -EINVAL; |
382 | } | 429 | } |
383 | if (flags == 0) | 430 | vpr_info("flags=0x%x\n", flags); |
384 | return -EINVAL; | ||
385 | if (verbose) | ||
386 | printk(KERN_INFO "%s: flags=0x%x\n", __func__, flags); | ||
387 | 431 | ||
388 | /* calculate final *flagsp, *maskp according to mask and op */ | 432 | /* calculate final *flagsp, *maskp according to mask and op */ |
389 | switch (op) { | 433 | switch (op) { |
@@ -400,70 +444,216 @@ static int ddebug_parse_flags(const char *str, unsigned int *flagsp, | |||
400 | *flagsp = 0; | 444 | *flagsp = 0; |
401 | break; | 445 | break; |
402 | } | 446 | } |
403 | if (verbose) | 447 | vpr_info("*flagsp=0x%x *maskp=0x%x\n", *flagsp, *maskp); |
404 | printk(KERN_INFO "%s: *flagsp=0x%x *maskp=0x%x\n", | ||
405 | __func__, *flagsp, *maskp); | ||
406 | return 0; | 448 | return 0; |
407 | } | 449 | } |
408 | 450 | ||
409 | static int ddebug_exec_query(char *query_string) | 451 | static int ddebug_exec_query(char *query_string, const char *modname) |
410 | { | 452 | { |
411 | unsigned int flags = 0, mask = 0; | 453 | unsigned int flags = 0, mask = 0; |
412 | struct ddebug_query query; | 454 | struct ddebug_query query; |
413 | #define MAXWORDS 9 | 455 | #define MAXWORDS 9 |
414 | int nwords; | 456 | int nwords, nfound; |
415 | char *words[MAXWORDS]; | 457 | char *words[MAXWORDS]; |
416 | 458 | ||
417 | nwords = ddebug_tokenize(query_string, words, MAXWORDS); | 459 | nwords = ddebug_tokenize(query_string, words, MAXWORDS); |
418 | if (nwords <= 0) | 460 | if (nwords <= 0) |
419 | return -EINVAL; | 461 | return -EINVAL; |
420 | if (ddebug_parse_query(words, nwords-1, &query)) | 462 | if (ddebug_parse_query(words, nwords-1, &query, modname)) |
421 | return -EINVAL; | 463 | return -EINVAL; |
422 | if (ddebug_parse_flags(words[nwords-1], &flags, &mask)) | 464 | if (ddebug_parse_flags(words[nwords-1], &flags, &mask)) |
423 | return -EINVAL; | 465 | return -EINVAL; |
424 | 466 | ||
425 | /* actually go and implement the change */ | 467 | /* actually go and implement the change */ |
426 | ddebug_change(&query, flags, mask); | 468 | nfound = ddebug_change(&query, flags, mask); |
469 | vpr_info_dq((&query), (nfound) ? "applied" : "no-match"); | ||
470 | |||
471 | return nfound; | ||
472 | } | ||
473 | |||
474 | /* handle multiple queries in query string, continue on error, return | ||
475 | last error or number of matching callsites. Module name is either | ||
476 | in param (for boot arg) or perhaps in query string. | ||
477 | */ | ||
478 | static int ddebug_exec_queries(char *query, const char *modname) | ||
479 | { | ||
480 | char *split; | ||
481 | int i, errs = 0, exitcode = 0, rc, nfound = 0; | ||
482 | |||
483 | for (i = 0; query; query = split) { | ||
484 | split = strpbrk(query, ";\n"); | ||
485 | if (split) | ||
486 | *split++ = '\0'; | ||
487 | |||
488 | query = skip_spaces(query); | ||
489 | if (!query || !*query || *query == '#') | ||
490 | continue; | ||
491 | |||
492 | vpr_info("query %d: \"%s\"\n", i, query); | ||
493 | |||
494 | rc = ddebug_exec_query(query, modname); | ||
495 | if (rc < 0) { | ||
496 | errs++; | ||
497 | exitcode = rc; | ||
498 | } else | ||
499 | nfound += rc; | ||
500 | i++; | ||
501 | } | ||
502 | vpr_info("processed %d queries, with %d matches, %d errs\n", | ||
503 | i, nfound, errs); | ||
504 | |||
505 | if (exitcode) | ||
506 | return exitcode; | ||
507 | return nfound; | ||
508 | } | ||
509 | |||
510 | #define PREFIX_SIZE 64 | ||
511 | |||
512 | static int remaining(int wrote) | ||
513 | { | ||
514 | if (PREFIX_SIZE - wrote > 0) | ||
515 | return PREFIX_SIZE - wrote; | ||
427 | return 0; | 516 | return 0; |
428 | } | 517 | } |
429 | 518 | ||
519 | static char *dynamic_emit_prefix(const struct _ddebug *desc, char *buf) | ||
520 | { | ||
521 | int pos_after_tid; | ||
522 | int pos = 0; | ||
523 | |||
524 | *buf = '\0'; | ||
525 | |||
526 | if (desc->flags & _DPRINTK_FLAGS_INCL_TID) { | ||
527 | if (in_interrupt()) | ||
528 | pos += snprintf(buf + pos, remaining(pos), "<intr> "); | ||
529 | else | ||
530 | pos += snprintf(buf + pos, remaining(pos), "[%d] ", | ||
531 | task_pid_vnr(current)); | ||
532 | } | ||
533 | pos_after_tid = pos; | ||
534 | if (desc->flags & _DPRINTK_FLAGS_INCL_MODNAME) | ||
535 | pos += snprintf(buf + pos, remaining(pos), "%s:", | ||
536 | desc->modname); | ||
537 | if (desc->flags & _DPRINTK_FLAGS_INCL_FUNCNAME) | ||
538 | pos += snprintf(buf + pos, remaining(pos), "%s:", | ||
539 | desc->function); | ||
540 | if (desc->flags & _DPRINTK_FLAGS_INCL_LINENO) | ||
541 | pos += snprintf(buf + pos, remaining(pos), "%d:", | ||
542 | desc->lineno); | ||
543 | if (pos - pos_after_tid) | ||
544 | pos += snprintf(buf + pos, remaining(pos), " "); | ||
545 | if (pos >= PREFIX_SIZE) | ||
546 | buf[PREFIX_SIZE - 1] = '\0'; | ||
547 | |||
548 | return buf; | ||
549 | } | ||
550 | |||
430 | int __dynamic_pr_debug(struct _ddebug *descriptor, const char *fmt, ...) | 551 | int __dynamic_pr_debug(struct _ddebug *descriptor, const char *fmt, ...) |
431 | { | 552 | { |
432 | va_list args; | 553 | va_list args; |
433 | int res; | 554 | int res; |
555 | struct va_format vaf; | ||
556 | char buf[PREFIX_SIZE]; | ||
434 | 557 | ||
435 | BUG_ON(!descriptor); | 558 | BUG_ON(!descriptor); |
436 | BUG_ON(!fmt); | 559 | BUG_ON(!fmt); |
437 | 560 | ||
438 | va_start(args, fmt); | 561 | va_start(args, fmt); |
439 | res = printk(KERN_DEBUG); | 562 | |
440 | if (descriptor->flags & _DPRINTK_FLAGS_INCL_TID) { | 563 | vaf.fmt = fmt; |
441 | if (in_interrupt()) | 564 | vaf.va = &args; |
442 | res += printk(KERN_CONT "<intr> "); | 565 | |
443 | else | 566 | res = printk(KERN_DEBUG "%s%pV", |
444 | res += printk(KERN_CONT "[%d] ", task_pid_vnr(current)); | 567 | dynamic_emit_prefix(descriptor, buf), &vaf); |
445 | } | 568 | |
446 | if (descriptor->flags & _DPRINTK_FLAGS_INCL_MODNAME) | ||
447 | res += printk(KERN_CONT "%s:", descriptor->modname); | ||
448 | if (descriptor->flags & _DPRINTK_FLAGS_INCL_FUNCNAME) | ||
449 | res += printk(KERN_CONT "%s:", descriptor->function); | ||
450 | if (descriptor->flags & _DPRINTK_FLAGS_INCL_LINENO) | ||
451 | res += printk(KERN_CONT "%d ", descriptor->lineno); | ||
452 | res += vprintk(fmt, args); | ||
453 | va_end(args); | 569 | va_end(args); |
454 | 570 | ||
455 | return res; | 571 | return res; |
456 | } | 572 | } |
457 | EXPORT_SYMBOL(__dynamic_pr_debug); | 573 | EXPORT_SYMBOL(__dynamic_pr_debug); |
458 | 574 | ||
459 | static __initdata char ddebug_setup_string[1024]; | 575 | int __dynamic_dev_dbg(struct _ddebug *descriptor, |
576 | const struct device *dev, const char *fmt, ...) | ||
577 | { | ||
578 | struct va_format vaf; | ||
579 | va_list args; | ||
580 | int res; | ||
581 | |||
582 | BUG_ON(!descriptor); | ||
583 | BUG_ON(!fmt); | ||
584 | |||
585 | va_start(args, fmt); | ||
586 | |||
587 | vaf.fmt = fmt; | ||
588 | vaf.va = &args; | ||
589 | |||
590 | if (!dev) { | ||
591 | res = printk(KERN_DEBUG "(NULL device *): %pV", &vaf); | ||
592 | } else { | ||
593 | char buf[PREFIX_SIZE]; | ||
594 | |||
595 | res = dev_printk_emit(7, dev, "%s%s %s: %pV", | ||
596 | dynamic_emit_prefix(descriptor, buf), | ||
597 | dev_driver_string(dev), dev_name(dev), | ||
598 | &vaf); | ||
599 | } | ||
600 | |||
601 | va_end(args); | ||
602 | |||
603 | return res; | ||
604 | } | ||
605 | EXPORT_SYMBOL(__dynamic_dev_dbg); | ||
606 | |||
607 | #ifdef CONFIG_NET | ||
608 | |||
609 | int __dynamic_netdev_dbg(struct _ddebug *descriptor, | ||
610 | const struct net_device *dev, const char *fmt, ...) | ||
611 | { | ||
612 | struct va_format vaf; | ||
613 | va_list args; | ||
614 | int res; | ||
615 | |||
616 | BUG_ON(!descriptor); | ||
617 | BUG_ON(!fmt); | ||
618 | |||
619 | va_start(args, fmt); | ||
620 | |||
621 | vaf.fmt = fmt; | ||
622 | vaf.va = &args; | ||
623 | |||
624 | if (dev && dev->dev.parent) { | ||
625 | char buf[PREFIX_SIZE]; | ||
626 | |||
627 | res = dev_printk_emit(7, dev->dev.parent, | ||
628 | "%s%s %s %s: %pV", | ||
629 | dynamic_emit_prefix(descriptor, buf), | ||
630 | dev_driver_string(dev->dev.parent), | ||
631 | dev_name(dev->dev.parent), | ||
632 | netdev_name(dev), &vaf); | ||
633 | } else if (dev) { | ||
634 | res = printk(KERN_DEBUG "%s: %pV", netdev_name(dev), &vaf); | ||
635 | } else { | ||
636 | res = printk(KERN_DEBUG "(NULL net_device): %pV", &vaf); | ||
637 | } | ||
638 | |||
639 | va_end(args); | ||
640 | |||
641 | return res; | ||
642 | } | ||
643 | EXPORT_SYMBOL(__dynamic_netdev_dbg); | ||
644 | |||
645 | #endif | ||
646 | |||
647 | #define DDEBUG_STRING_SIZE 1024 | ||
648 | static __initdata char ddebug_setup_string[DDEBUG_STRING_SIZE]; | ||
649 | |||
460 | static __init int ddebug_setup_query(char *str) | 650 | static __init int ddebug_setup_query(char *str) |
461 | { | 651 | { |
462 | if (strlen(str) >= 1024) { | 652 | if (strlen(str) >= DDEBUG_STRING_SIZE) { |
463 | pr_warning("ddebug boot param string too large\n"); | 653 | pr_warn("ddebug boot param string too large\n"); |
464 | return 0; | 654 | return 0; |
465 | } | 655 | } |
466 | strcpy(ddebug_setup_string, str); | 656 | strlcpy(ddebug_setup_string, str, DDEBUG_STRING_SIZE); |
467 | return 1; | 657 | return 1; |
468 | } | 658 | } |
469 | 659 | ||
@@ -473,26 +663,32 @@ __setup("ddebug_query=", ddebug_setup_query); | |||
473 | * File_ops->write method for <debugfs>/dynamic_debug/conrol. Gathers the | 663 | * File_ops->write method for <debugfs>/dynamic_debug/conrol. Gathers the |
474 | * command text from userspace, parses and executes it. | 664 | * command text from userspace, parses and executes it. |
475 | */ | 665 | */ |
666 | #define USER_BUF_PAGE 4096 | ||
476 | static ssize_t ddebug_proc_write(struct file *file, const char __user *ubuf, | 667 | static ssize_t ddebug_proc_write(struct file *file, const char __user *ubuf, |
477 | size_t len, loff_t *offp) | 668 | size_t len, loff_t *offp) |
478 | { | 669 | { |
479 | char tmpbuf[256]; | 670 | char *tmpbuf; |
480 | int ret; | 671 | int ret; |
481 | 672 | ||
482 | if (len == 0) | 673 | if (len == 0) |
483 | return 0; | 674 | return 0; |
484 | /* we don't check *offp -- multiple writes() are allowed */ | 675 | if (len > USER_BUF_PAGE - 1) { |
485 | if (len > sizeof(tmpbuf)-1) | 676 | pr_warn("expected <%d bytes into control\n", USER_BUF_PAGE); |
486 | return -E2BIG; | 677 | return -E2BIG; |
487 | if (copy_from_user(tmpbuf, ubuf, len)) | 678 | } |
679 | tmpbuf = kmalloc(len + 1, GFP_KERNEL); | ||
680 | if (!tmpbuf) | ||
681 | return -ENOMEM; | ||
682 | if (copy_from_user(tmpbuf, ubuf, len)) { | ||
683 | kfree(tmpbuf); | ||
488 | return -EFAULT; | 684 | return -EFAULT; |
685 | } | ||
489 | tmpbuf[len] = '\0'; | 686 | tmpbuf[len] = '\0'; |
490 | if (verbose) | 687 | vpr_info("read %d bytes from userspace\n", (int)len); |
491 | printk(KERN_INFO "%s: read %d bytes from userspace\n", | ||
492 | __func__, (int)len); | ||
493 | 688 | ||
494 | ret = ddebug_exec_query(tmpbuf); | 689 | ret = ddebug_exec_queries(tmpbuf, NULL); |
495 | if (ret) | 690 | kfree(tmpbuf); |
691 | if (ret < 0) | ||
496 | return ret; | 692 | return ret; |
497 | 693 | ||
498 | *offp += len; | 694 | *offp += len; |
@@ -551,9 +747,7 @@ static void *ddebug_proc_start(struct seq_file *m, loff_t *pos) | |||
551 | struct _ddebug *dp; | 747 | struct _ddebug *dp; |
552 | int n = *pos; | 748 | int n = *pos; |
553 | 749 | ||
554 | if (verbose) | 750 | vpr_info("called m=%p *pos=%lld\n", m, (unsigned long long)*pos); |
555 | printk(KERN_INFO "%s: called m=%p *pos=%lld\n", | ||
556 | __func__, m, (unsigned long long)*pos); | ||
557 | 751 | ||
558 | mutex_lock(&ddebug_lock); | 752 | mutex_lock(&ddebug_lock); |
559 | 753 | ||
@@ -577,9 +771,8 @@ static void *ddebug_proc_next(struct seq_file *m, void *p, loff_t *pos) | |||
577 | struct ddebug_iter *iter = m->private; | 771 | struct ddebug_iter *iter = m->private; |
578 | struct _ddebug *dp; | 772 | struct _ddebug *dp; |
579 | 773 | ||
580 | if (verbose) | 774 | vpr_info("called m=%p p=%p *pos=%lld\n", |
581 | printk(KERN_INFO "%s: called m=%p p=%p *pos=%lld\n", | 775 | m, p, (unsigned long long)*pos); |
582 | __func__, m, p, (unsigned long long)*pos); | ||
583 | 776 | ||
584 | if (p == SEQ_START_TOKEN) | 777 | if (p == SEQ_START_TOKEN) |
585 | dp = ddebug_iter_first(iter); | 778 | dp = ddebug_iter_first(iter); |
@@ -599,11 +792,9 @@ static int ddebug_proc_show(struct seq_file *m, void *p) | |||
599 | { | 792 | { |
600 | struct ddebug_iter *iter = m->private; | 793 | struct ddebug_iter *iter = m->private; |
601 | struct _ddebug *dp = p; | 794 | struct _ddebug *dp = p; |
602 | char flagsbuf[8]; | 795 | char flagsbuf[10]; |
603 | 796 | ||
604 | if (verbose) | 797 | vpr_info("called m=%p p=%p\n", m, p); |
605 | printk(KERN_INFO "%s: called m=%p p=%p\n", | ||
606 | __func__, m, p); | ||
607 | 798 | ||
608 | if (p == SEQ_START_TOKEN) { | 799 | if (p == SEQ_START_TOKEN) { |
609 | seq_puts(m, | 800 | seq_puts(m, |
@@ -611,10 +802,10 @@ static int ddebug_proc_show(struct seq_file *m, void *p) | |||
611 | return 0; | 802 | return 0; |
612 | } | 803 | } |
613 | 804 | ||
614 | seq_printf(m, "%s:%u [%s]%s %s \"", | 805 | seq_printf(m, "%s:%u [%s]%s =%s \"", |
615 | dp->filename, dp->lineno, | 806 | trim_prefix(dp->filename), dp->lineno, |
616 | iter->table->mod_name, dp->function, | 807 | iter->table->mod_name, dp->function, |
617 | ddebug_describe_flags(dp, flagsbuf, sizeof(flagsbuf))); | 808 | ddebug_describe_flags(dp, flagsbuf, sizeof(flagsbuf))); |
618 | seq_escape(m, dp->format, "\t\r\n\""); | 809 | seq_escape(m, dp->format, "\t\r\n\""); |
619 | seq_puts(m, "\"\n"); | 810 | seq_puts(m, "\"\n"); |
620 | 811 | ||
@@ -627,9 +818,7 @@ static int ddebug_proc_show(struct seq_file *m, void *p) | |||
627 | */ | 818 | */ |
628 | static void ddebug_proc_stop(struct seq_file *m, void *p) | 819 | static void ddebug_proc_stop(struct seq_file *m, void *p) |
629 | { | 820 | { |
630 | if (verbose) | 821 | vpr_info("called m=%p p=%p\n", m, p); |
631 | printk(KERN_INFO "%s: called m=%p p=%p\n", | ||
632 | __func__, m, p); | ||
633 | mutex_unlock(&ddebug_lock); | 822 | mutex_unlock(&ddebug_lock); |
634 | } | 823 | } |
635 | 824 | ||
@@ -641,18 +830,18 @@ static const struct seq_operations ddebug_proc_seqops = { | |||
641 | }; | 830 | }; |
642 | 831 | ||
643 | /* | 832 | /* |
644 | * File_ops->open method for <debugfs>/dynamic_debug/control. Does the seq_file | 833 | * File_ops->open method for <debugfs>/dynamic_debug/control. Does |
645 | * setup dance, and also creates an iterator to walk the _ddebugs. | 834 | * the seq_file setup dance, and also creates an iterator to walk the |
646 | * Note that we create a seq_file always, even for O_WRONLY files | 835 | * _ddebugs. Note that we create a seq_file always, even for O_WRONLY |
647 | * where it's not needed, as doing so simplifies the ->release method. | 836 | * files where it's not needed, as doing so simplifies the ->release |
837 | * method. | ||
648 | */ | 838 | */ |
649 | static int ddebug_proc_open(struct inode *inode, struct file *file) | 839 | static int ddebug_proc_open(struct inode *inode, struct file *file) |
650 | { | 840 | { |
651 | struct ddebug_iter *iter; | 841 | struct ddebug_iter *iter; |
652 | int err; | 842 | int err; |
653 | 843 | ||
654 | if (verbose) | 844 | vpr_info("called\n"); |
655 | printk(KERN_INFO "%s: called\n", __func__); | ||
656 | 845 | ||
657 | iter = kzalloc(sizeof(*iter), GFP_KERNEL); | 846 | iter = kzalloc(sizeof(*iter), GFP_KERNEL); |
658 | if (iter == NULL) | 847 | if (iter == NULL) |
@@ -696,20 +885,57 @@ int ddebug_add_module(struct _ddebug *tab, unsigned int n, | |||
696 | } | 885 | } |
697 | dt->mod_name = new_name; | 886 | dt->mod_name = new_name; |
698 | dt->num_ddebugs = n; | 887 | dt->num_ddebugs = n; |
699 | dt->num_enabled = 0; | ||
700 | dt->ddebugs = tab; | 888 | dt->ddebugs = tab; |
701 | 889 | ||
702 | mutex_lock(&ddebug_lock); | 890 | mutex_lock(&ddebug_lock); |
703 | list_add_tail(&dt->link, &ddebug_tables); | 891 | list_add_tail(&dt->link, &ddebug_tables); |
704 | mutex_unlock(&ddebug_lock); | 892 | mutex_unlock(&ddebug_lock); |
705 | 893 | ||
706 | if (verbose) | 894 | vpr_info("%u debug prints in module %s\n", n, dt->mod_name); |
707 | printk(KERN_INFO "%u debug prints in module %s\n", | ||
708 | n, dt->mod_name); | ||
709 | return 0; | 895 | return 0; |
710 | } | 896 | } |
711 | EXPORT_SYMBOL_GPL(ddebug_add_module); | 897 | EXPORT_SYMBOL_GPL(ddebug_add_module); |
712 | 898 | ||
899 | /* helper for ddebug_dyndbg_(boot|module)_param_cb */ | ||
900 | static int ddebug_dyndbg_param_cb(char *param, char *val, | ||
901 | const char *modname, int on_err) | ||
902 | { | ||
903 | char *sep; | ||
904 | |||
905 | sep = strchr(param, '.'); | ||
906 | if (sep) { | ||
907 | /* needed only for ddebug_dyndbg_boot_param_cb */ | ||
908 | *sep = '\0'; | ||
909 | modname = param; | ||
910 | param = sep + 1; | ||
911 | } | ||
912 | if (strcmp(param, "dyndbg")) | ||
913 | return on_err; /* determined by caller */ | ||
914 | |||
915 | ddebug_exec_queries((val ? val : "+p"), modname); | ||
916 | |||
917 | return 0; /* query failure shouldnt stop module load */ | ||
918 | } | ||
919 | |||
920 | /* handle both dyndbg and $module.dyndbg params at boot */ | ||
921 | static int ddebug_dyndbg_boot_param_cb(char *param, char *val, | ||
922 | const char *unused) | ||
923 | { | ||
924 | vpr_info("%s=\"%s\"\n", param, val); | ||
925 | return ddebug_dyndbg_param_cb(param, val, NULL, 0); | ||
926 | } | ||
927 | |||
928 | /* | ||
929 | * modprobe foo finds foo.params in boot-args, strips "foo.", and | ||
930 | * passes them to load_module(). This callback gets unknown params, | ||
931 | * processes dyndbg params, rejects others. | ||
932 | */ | ||
933 | int ddebug_dyndbg_module_param_cb(char *param, char *val, const char *module) | ||
934 | { | ||
935 | vpr_info("module: %s %s=\"%s\"\n", module, param, val); | ||
936 | return ddebug_dyndbg_param_cb(param, val, module, -ENOENT); | ||
937 | } | ||
938 | |||
713 | static void ddebug_table_free(struct ddebug_table *dt) | 939 | static void ddebug_table_free(struct ddebug_table *dt) |
714 | { | 940 | { |
715 | list_del_init(&dt->link); | 941 | list_del_init(&dt->link); |
@@ -726,9 +952,7 @@ int ddebug_remove_module(const char *mod_name) | |||
726 | struct ddebug_table *dt, *nextdt; | 952 | struct ddebug_table *dt, *nextdt; |
727 | int ret = -ENOENT; | 953 | int ret = -ENOENT; |
728 | 954 | ||
729 | if (verbose) | 955 | vpr_info("removing module \"%s\"\n", mod_name); |
730 | printk(KERN_INFO "%s: removing module \"%s\"\n", | ||
731 | __func__, mod_name); | ||
732 | 956 | ||
733 | mutex_lock(&ddebug_lock); | 957 | mutex_lock(&ddebug_lock); |
734 | list_for_each_entry_safe(dt, nextdt, &ddebug_tables, link) { | 958 | list_for_each_entry_safe(dt, nextdt, &ddebug_tables, link) { |
@@ -779,46 +1003,76 @@ static int __init dynamic_debug_init(void) | |||
779 | { | 1003 | { |
780 | struct _ddebug *iter, *iter_start; | 1004 | struct _ddebug *iter, *iter_start; |
781 | const char *modname = NULL; | 1005 | const char *modname = NULL; |
1006 | char *cmdline; | ||
782 | int ret = 0; | 1007 | int ret = 0; |
783 | int n = 0; | 1008 | int n = 0, entries = 0, modct = 0; |
784 | 1009 | int verbose_bytes = 0; | |
785 | if (__start___verbose != __stop___verbose) { | 1010 | |
786 | iter = __start___verbose; | 1011 | if (__start___verbose == __stop___verbose) { |
787 | modname = iter->modname; | 1012 | pr_warn("_ddebug table is empty in a " |
788 | iter_start = iter; | 1013 | "CONFIG_DYNAMIC_DEBUG build"); |
789 | for (; iter < __stop___verbose; iter++) { | 1014 | return 1; |
790 | if (strcmp(modname, iter->modname)) { | 1015 | } |
791 | ret = ddebug_add_module(iter_start, n, modname); | 1016 | iter = __start___verbose; |
792 | if (ret) | 1017 | modname = iter->modname; |
793 | goto out_free; | 1018 | iter_start = iter; |
794 | n = 0; | 1019 | for (; iter < __stop___verbose; iter++) { |
795 | modname = iter->modname; | 1020 | entries++; |
796 | iter_start = iter; | 1021 | verbose_bytes += strlen(iter->modname) + strlen(iter->function) |
797 | } | 1022 | + strlen(iter->filename) + strlen(iter->format); |
798 | n++; | 1023 | |
1024 | if (strcmp(modname, iter->modname)) { | ||
1025 | modct++; | ||
1026 | ret = ddebug_add_module(iter_start, n, modname); | ||
1027 | if (ret) | ||
1028 | goto out_err; | ||
1029 | n = 0; | ||
1030 | modname = iter->modname; | ||
1031 | iter_start = iter; | ||
799 | } | 1032 | } |
800 | ret = ddebug_add_module(iter_start, n, modname); | 1033 | n++; |
801 | } | 1034 | } |
1035 | ret = ddebug_add_module(iter_start, n, modname); | ||
1036 | if (ret) | ||
1037 | goto out_err; | ||
802 | 1038 | ||
803 | /* ddebug_query boot param got passed -> set it up */ | 1039 | ddebug_init_success = 1; |
1040 | vpr_info("%d modules, %d entries and %d bytes in ddebug tables," | ||
1041 | " %d bytes in (readonly) verbose section\n", | ||
1042 | modct, entries, (int)( modct * sizeof(struct ddebug_table)), | ||
1043 | verbose_bytes + (int)(__stop___verbose - __start___verbose)); | ||
1044 | |||
1045 | /* apply ddebug_query boot param, dont unload tables on err */ | ||
804 | if (ddebug_setup_string[0] != '\0') { | 1046 | if (ddebug_setup_string[0] != '\0') { |
805 | ret = ddebug_exec_query(ddebug_setup_string); | 1047 | pr_warn("ddebug_query param name is deprecated," |
806 | if (ret) | 1048 | " change it to dyndbg\n"); |
807 | pr_warning("Invalid ddebug boot param %s", | 1049 | ret = ddebug_exec_queries(ddebug_setup_string, NULL); |
808 | ddebug_setup_string); | 1050 | if (ret < 0) |
809 | else | 1051 | pr_warn("Invalid ddebug boot param %s", |
810 | pr_info("ddebug initialized with string %s", | ||
811 | ddebug_setup_string); | 1052 | ddebug_setup_string); |
1053 | else | ||
1054 | pr_info("%d changes by ddebug_query\n", ret); | ||
812 | } | 1055 | } |
1056 | /* now that ddebug tables are loaded, process all boot args | ||
1057 | * again to find and activate queries given in dyndbg params. | ||
1058 | * While this has already been done for known boot params, it | ||
1059 | * ignored the unknown ones (dyndbg in particular). Reusing | ||
1060 | * parse_args avoids ad-hoc parsing. This will also attempt | ||
1061 | * to activate queries for not-yet-loaded modules, which is | ||
1062 | * slightly noisy if verbose, but harmless. | ||
1063 | */ | ||
1064 | cmdline = kstrdup(saved_command_line, GFP_KERNEL); | ||
1065 | parse_args("dyndbg params", cmdline, NULL, | ||
1066 | 0, 0, 0, &ddebug_dyndbg_boot_param_cb); | ||
1067 | kfree(cmdline); | ||
1068 | return 0; | ||
813 | 1069 | ||
814 | out_free: | 1070 | out_err: |
815 | if (ret) | 1071 | ddebug_remove_all_tables(); |
816 | ddebug_remove_all_tables(); | ||
817 | else | ||
818 | ddebug_init_success = 1; | ||
819 | return 0; | 1072 | return 0; |
820 | } | 1073 | } |
821 | /* Allow early initialization for boot messages via boot param */ | 1074 | /* Allow early initialization for boot messages via boot param */ |
822 | arch_initcall(dynamic_debug_init); | 1075 | early_initcall(dynamic_debug_init); |
1076 | |||
823 | /* Debugfs setup must be done later */ | 1077 | /* Debugfs setup must be done later */ |
824 | module_init(dynamic_debug_init_debugfs); | 1078 | fs_initcall(dynamic_debug_init_debugfs); |
diff --git a/lib/dynamic_queue_limits.c b/lib/dynamic_queue_limits.c new file mode 100644 index 000000000000..0777c5a45fa0 --- /dev/null +++ b/lib/dynamic_queue_limits.c | |||
@@ -0,0 +1,138 @@ | |||
1 | /* | ||
2 | * Dynamic byte queue limits. See include/linux/dynamic_queue_limits.h | ||
3 | * | ||
4 | * Copyright (c) 2011, Tom Herbert <therbert@google.com> | ||
5 | */ | ||
6 | #include <linux/module.h> | ||
7 | #include <linux/types.h> | ||
8 | #include <linux/ctype.h> | ||
9 | #include <linux/kernel.h> | ||
10 | #include <linux/jiffies.h> | ||
11 | #include <linux/dynamic_queue_limits.h> | ||
12 | |||
13 | #define POSDIFF(A, B) ((int)((A) - (B)) > 0 ? (A) - (B) : 0) | ||
14 | #define AFTER_EQ(A, B) ((int)((A) - (B)) >= 0) | ||
15 | |||
16 | /* Records completed count and recalculates the queue limit */ | ||
17 | void dql_completed(struct dql *dql, unsigned int count) | ||
18 | { | ||
19 | unsigned int inprogress, prev_inprogress, limit; | ||
20 | unsigned int ovlimit, completed, num_queued; | ||
21 | bool all_prev_completed; | ||
22 | |||
23 | num_queued = ACCESS_ONCE(dql->num_queued); | ||
24 | |||
25 | /* Can't complete more than what's in queue */ | ||
26 | BUG_ON(count > num_queued - dql->num_completed); | ||
27 | |||
28 | completed = dql->num_completed + count; | ||
29 | limit = dql->limit; | ||
30 | ovlimit = POSDIFF(num_queued - dql->num_completed, limit); | ||
31 | inprogress = num_queued - completed; | ||
32 | prev_inprogress = dql->prev_num_queued - dql->num_completed; | ||
33 | all_prev_completed = AFTER_EQ(completed, dql->prev_num_queued); | ||
34 | |||
35 | if ((ovlimit && !inprogress) || | ||
36 | (dql->prev_ovlimit && all_prev_completed)) { | ||
37 | /* | ||
38 | * Queue considered starved if: | ||
39 | * - The queue was over-limit in the last interval, | ||
40 | * and there is no more data in the queue. | ||
41 | * OR | ||
42 | * - The queue was over-limit in the previous interval and | ||
43 | * when enqueuing it was possible that all queued data | ||
44 | * had been consumed. This covers the case when queue | ||
45 | * may have becomes starved between completion processing | ||
46 | * running and next time enqueue was scheduled. | ||
47 | * | ||
48 | * When queue is starved increase the limit by the amount | ||
49 | * of bytes both sent and completed in the last interval, | ||
50 | * plus any previous over-limit. | ||
51 | */ | ||
52 | limit += POSDIFF(completed, dql->prev_num_queued) + | ||
53 | dql->prev_ovlimit; | ||
54 | dql->slack_start_time = jiffies; | ||
55 | dql->lowest_slack = UINT_MAX; | ||
56 | } else if (inprogress && prev_inprogress && !all_prev_completed) { | ||
57 | /* | ||
58 | * Queue was not starved, check if the limit can be decreased. | ||
59 | * A decrease is only considered if the queue has been busy in | ||
60 | * the whole interval (the check above). | ||
61 | * | ||
62 | * If there is slack, the amount of execess data queued above | ||
63 | * the the amount needed to prevent starvation, the queue limit | ||
64 | * can be decreased. To avoid hysteresis we consider the | ||
65 | * minimum amount of slack found over several iterations of the | ||
66 | * completion routine. | ||
67 | */ | ||
68 | unsigned int slack, slack_last_objs; | ||
69 | |||
70 | /* | ||
71 | * Slack is the maximum of | ||
72 | * - The queue limit plus previous over-limit minus twice | ||
73 | * the number of objects completed. Note that two times | ||
74 | * number of completed bytes is a basis for an upper bound | ||
75 | * of the limit. | ||
76 | * - Portion of objects in the last queuing operation that | ||
77 | * was not part of non-zero previous over-limit. That is | ||
78 | * "round down" by non-overlimit portion of the last | ||
79 | * queueing operation. | ||
80 | */ | ||
81 | slack = POSDIFF(limit + dql->prev_ovlimit, | ||
82 | 2 * (completed - dql->num_completed)); | ||
83 | slack_last_objs = dql->prev_ovlimit ? | ||
84 | POSDIFF(dql->prev_last_obj_cnt, dql->prev_ovlimit) : 0; | ||
85 | |||
86 | slack = max(slack, slack_last_objs); | ||
87 | |||
88 | if (slack < dql->lowest_slack) | ||
89 | dql->lowest_slack = slack; | ||
90 | |||
91 | if (time_after(jiffies, | ||
92 | dql->slack_start_time + dql->slack_hold_time)) { | ||
93 | limit = POSDIFF(limit, dql->lowest_slack); | ||
94 | dql->slack_start_time = jiffies; | ||
95 | dql->lowest_slack = UINT_MAX; | ||
96 | } | ||
97 | } | ||
98 | |||
99 | /* Enforce bounds on limit */ | ||
100 | limit = clamp(limit, dql->min_limit, dql->max_limit); | ||
101 | |||
102 | if (limit != dql->limit) { | ||
103 | dql->limit = limit; | ||
104 | ovlimit = 0; | ||
105 | } | ||
106 | |||
107 | dql->adj_limit = limit + completed; | ||
108 | dql->prev_ovlimit = ovlimit; | ||
109 | dql->prev_last_obj_cnt = dql->last_obj_cnt; | ||
110 | dql->num_completed = completed; | ||
111 | dql->prev_num_queued = num_queued; | ||
112 | } | ||
113 | EXPORT_SYMBOL(dql_completed); | ||
114 | |||
115 | void dql_reset(struct dql *dql) | ||
116 | { | ||
117 | /* Reset all dynamic values */ | ||
118 | dql->limit = 0; | ||
119 | dql->num_queued = 0; | ||
120 | dql->num_completed = 0; | ||
121 | dql->last_obj_cnt = 0; | ||
122 | dql->prev_num_queued = 0; | ||
123 | dql->prev_last_obj_cnt = 0; | ||
124 | dql->prev_ovlimit = 0; | ||
125 | dql->lowest_slack = UINT_MAX; | ||
126 | dql->slack_start_time = jiffies; | ||
127 | } | ||
128 | EXPORT_SYMBOL(dql_reset); | ||
129 | |||
130 | int dql_init(struct dql *dql, unsigned hold_time) | ||
131 | { | ||
132 | dql->max_limit = DQL_MAX_LIMIT; | ||
133 | dql->min_limit = 0; | ||
134 | dql->slack_hold_time = hold_time; | ||
135 | dql_reset(dql); | ||
136 | return 0; | ||
137 | } | ||
138 | EXPORT_SYMBOL(dql_init); | ||
diff --git a/lib/fault-inject.c b/lib/fault-inject.c index f193b7796449..f7210ad6cffd 100644 --- a/lib/fault-inject.c +++ b/lib/fault-inject.c | |||
@@ -5,7 +5,7 @@ | |||
5 | #include <linux/stat.h> | 5 | #include <linux/stat.h> |
6 | #include <linux/types.h> | 6 | #include <linux/types.h> |
7 | #include <linux/fs.h> | 7 | #include <linux/fs.h> |
8 | #include <linux/module.h> | 8 | #include <linux/export.h> |
9 | #include <linux/interrupt.h> | 9 | #include <linux/interrupt.h> |
10 | #include <linux/stacktrace.h> | 10 | #include <linux/stacktrace.h> |
11 | #include <linux/fault-inject.h> | 11 | #include <linux/fault-inject.h> |
@@ -14,7 +14,7 @@ | |||
14 | * setup_fault_attr() is a helper function for various __setup handlers, so it | 14 | * setup_fault_attr() is a helper function for various __setup handlers, so it |
15 | * returns 0 on error, because that is what __setup handlers do. | 15 | * returns 0 on error, because that is what __setup handlers do. |
16 | */ | 16 | */ |
17 | int __init setup_fault_attr(struct fault_attr *attr, char *str) | 17 | int setup_fault_attr(struct fault_attr *attr, char *str) |
18 | { | 18 | { |
19 | unsigned long probability; | 19 | unsigned long probability; |
20 | unsigned long interval; | 20 | unsigned long interval; |
@@ -36,6 +36,7 @@ int __init setup_fault_attr(struct fault_attr *attr, char *str) | |||
36 | 36 | ||
37 | return 1; | 37 | return 1; |
38 | } | 38 | } |
39 | EXPORT_SYMBOL_GPL(setup_fault_attr); | ||
39 | 40 | ||
40 | static void fail_dump(struct fault_attr *attr) | 41 | static void fail_dump(struct fault_attr *attr) |
41 | { | 42 | { |
@@ -100,6 +101,10 @@ static inline bool fail_stacktrace(struct fault_attr *attr) | |||
100 | 101 | ||
101 | bool should_fail(struct fault_attr *attr, ssize_t size) | 102 | bool should_fail(struct fault_attr *attr, ssize_t size) |
102 | { | 103 | { |
104 | /* No need to check any other properties if the probability is 0 */ | ||
105 | if (attr->probability == 0) | ||
106 | return false; | ||
107 | |||
103 | if (attr->task_filter && !fail_task(attr, current)) | 108 | if (attr->task_filter && !fail_task(attr, current)) |
104 | return false; | 109 | return false; |
105 | 110 | ||
@@ -130,6 +135,7 @@ bool should_fail(struct fault_attr *attr, ssize_t size) | |||
130 | 135 | ||
131 | return true; | 136 | return true; |
132 | } | 137 | } |
138 | EXPORT_SYMBOL_GPL(should_fail); | ||
133 | 139 | ||
134 | #ifdef CONFIG_FAULT_INJECTION_DEBUG_FS | 140 | #ifdef CONFIG_FAULT_INJECTION_DEBUG_FS |
135 | 141 | ||
@@ -147,7 +153,7 @@ static int debugfs_ul_get(void *data, u64 *val) | |||
147 | 153 | ||
148 | DEFINE_SIMPLE_ATTRIBUTE(fops_ul, debugfs_ul_get, debugfs_ul_set, "%llu\n"); | 154 | DEFINE_SIMPLE_ATTRIBUTE(fops_ul, debugfs_ul_get, debugfs_ul_set, "%llu\n"); |
149 | 155 | ||
150 | static struct dentry *debugfs_create_ul(const char *name, mode_t mode, | 156 | static struct dentry *debugfs_create_ul(const char *name, umode_t mode, |
151 | struct dentry *parent, unsigned long *value) | 157 | struct dentry *parent, unsigned long *value) |
152 | { | 158 | { |
153 | return debugfs_create_file(name, mode, parent, value, &fops_ul); | 159 | return debugfs_create_file(name, mode, parent, value, &fops_ul); |
@@ -167,7 +173,7 @@ DEFINE_SIMPLE_ATTRIBUTE(fops_stacktrace_depth, debugfs_ul_get, | |||
167 | debugfs_stacktrace_depth_set, "%llu\n"); | 173 | debugfs_stacktrace_depth_set, "%llu\n"); |
168 | 174 | ||
169 | static struct dentry *debugfs_create_stacktrace_depth( | 175 | static struct dentry *debugfs_create_stacktrace_depth( |
170 | const char *name, mode_t mode, | 176 | const char *name, umode_t mode, |
171 | struct dentry *parent, unsigned long *value) | 177 | struct dentry *parent, unsigned long *value) |
172 | { | 178 | { |
173 | return debugfs_create_file(name, mode, parent, value, | 179 | return debugfs_create_file(name, mode, parent, value, |
@@ -191,7 +197,7 @@ static int debugfs_atomic_t_get(void *data, u64 *val) | |||
191 | DEFINE_SIMPLE_ATTRIBUTE(fops_atomic_t, debugfs_atomic_t_get, | 197 | DEFINE_SIMPLE_ATTRIBUTE(fops_atomic_t, debugfs_atomic_t_get, |
192 | debugfs_atomic_t_set, "%lld\n"); | 198 | debugfs_atomic_t_set, "%lld\n"); |
193 | 199 | ||
194 | static struct dentry *debugfs_create_atomic_t(const char *name, mode_t mode, | 200 | static struct dentry *debugfs_create_atomic_t(const char *name, umode_t mode, |
195 | struct dentry *parent, atomic_t *value) | 201 | struct dentry *parent, atomic_t *value) |
196 | { | 202 | { |
197 | return debugfs_create_file(name, mode, parent, value, &fops_atomic_t); | 203 | return debugfs_create_file(name, mode, parent, value, &fops_atomic_t); |
@@ -200,7 +206,7 @@ static struct dentry *debugfs_create_atomic_t(const char *name, mode_t mode, | |||
200 | struct dentry *fault_create_debugfs_attr(const char *name, | 206 | struct dentry *fault_create_debugfs_attr(const char *name, |
201 | struct dentry *parent, struct fault_attr *attr) | 207 | struct dentry *parent, struct fault_attr *attr) |
202 | { | 208 | { |
203 | mode_t mode = S_IFREG | S_IRUSR | S_IWUSR; | 209 | umode_t mode = S_IFREG | S_IRUSR | S_IWUSR; |
204 | struct dentry *dir; | 210 | struct dentry *dir; |
205 | 211 | ||
206 | dir = debugfs_create_dir(name, parent); | 212 | dir = debugfs_create_dir(name, parent); |
@@ -243,5 +249,6 @@ fail: | |||
243 | 249 | ||
244 | return ERR_PTR(-ENOMEM); | 250 | return ERR_PTR(-ENOMEM); |
245 | } | 251 | } |
252 | EXPORT_SYMBOL_GPL(fault_create_debugfs_attr); | ||
246 | 253 | ||
247 | #endif /* CONFIG_FAULT_INJECTION_DEBUG_FS */ | 254 | #endif /* CONFIG_FAULT_INJECTION_DEBUG_FS */ |
diff --git a/lib/fdt.c b/lib/fdt.c new file mode 100644 index 000000000000..97f20069fc37 --- /dev/null +++ b/lib/fdt.c | |||
@@ -0,0 +1,2 @@ | |||
1 | #include <linux/libfdt_env.h> | ||
2 | #include "../scripts/dtc/libfdt/fdt.c" | ||
diff --git a/lib/fdt_ro.c b/lib/fdt_ro.c new file mode 100644 index 000000000000..f73c04ea7be4 --- /dev/null +++ b/lib/fdt_ro.c | |||
@@ -0,0 +1,2 @@ | |||
1 | #include <linux/libfdt_env.h> | ||
2 | #include "../scripts/dtc/libfdt/fdt_ro.c" | ||
diff --git a/lib/fdt_rw.c b/lib/fdt_rw.c new file mode 100644 index 000000000000..0c1f0f4a4b13 --- /dev/null +++ b/lib/fdt_rw.c | |||
@@ -0,0 +1,2 @@ | |||
1 | #include <linux/libfdt_env.h> | ||
2 | #include "../scripts/dtc/libfdt/fdt_rw.c" | ||
diff --git a/lib/fdt_strerror.c b/lib/fdt_strerror.c new file mode 100644 index 000000000000..8713e3ff4707 --- /dev/null +++ b/lib/fdt_strerror.c | |||
@@ -0,0 +1,2 @@ | |||
1 | #include <linux/libfdt_env.h> | ||
2 | #include "../scripts/dtc/libfdt/fdt_strerror.c" | ||
diff --git a/lib/fdt_sw.c b/lib/fdt_sw.c new file mode 100644 index 000000000000..9ac7e50c76ce --- /dev/null +++ b/lib/fdt_sw.c | |||
@@ -0,0 +1,2 @@ | |||
1 | #include <linux/libfdt_env.h> | ||
2 | #include "../scripts/dtc/libfdt/fdt_sw.c" | ||
diff --git a/lib/fdt_wip.c b/lib/fdt_wip.c new file mode 100644 index 000000000000..45b3fc3d3ba1 --- /dev/null +++ b/lib/fdt_wip.c | |||
@@ -0,0 +1,2 @@ | |||
1 | #include <linux/libfdt_env.h> | ||
2 | #include "../scripts/dtc/libfdt/fdt_wip.c" | ||
diff --git a/lib/find_last_bit.c b/lib/find_last_bit.c index d903959ad695..91ca09fbf6f9 100644 --- a/lib/find_last_bit.c +++ b/lib/find_last_bit.c | |||
@@ -11,7 +11,7 @@ | |||
11 | */ | 11 | */ |
12 | 12 | ||
13 | #include <linux/bitops.h> | 13 | #include <linux/bitops.h> |
14 | #include <linux/module.h> | 14 | #include <linux/export.h> |
15 | #include <asm/types.h> | 15 | #include <asm/types.h> |
16 | #include <asm/byteorder.h> | 16 | #include <asm/byteorder.h> |
17 | 17 | ||
diff --git a/lib/find_next_bit.c b/lib/find_next_bit.c index 4bd75a73ba00..0cbfc0b4398f 100644 --- a/lib/find_next_bit.c +++ b/lib/find_next_bit.c | |||
@@ -10,7 +10,7 @@ | |||
10 | */ | 10 | */ |
11 | 11 | ||
12 | #include <linux/bitops.h> | 12 | #include <linux/bitops.h> |
13 | #include <linux/module.h> | 13 | #include <linux/export.h> |
14 | #include <asm/types.h> | 14 | #include <asm/types.h> |
15 | #include <asm/byteorder.h> | 15 | #include <asm/byteorder.h> |
16 | 16 | ||
diff --git a/lib/flex_array.c b/lib/flex_array.c index 9b8b89458c4c..6948a6692fc4 100644 --- a/lib/flex_array.c +++ b/lib/flex_array.c | |||
@@ -23,7 +23,7 @@ | |||
23 | #include <linux/flex_array.h> | 23 | #include <linux/flex_array.h> |
24 | #include <linux/slab.h> | 24 | #include <linux/slab.h> |
25 | #include <linux/stddef.h> | 25 | #include <linux/stddef.h> |
26 | #include <linux/module.h> | 26 | #include <linux/export.h> |
27 | #include <linux/reciprocal_div.h> | 27 | #include <linux/reciprocal_div.h> |
28 | 28 | ||
29 | struct flex_array_part { | 29 | struct flex_array_part { |
diff --git a/lib/flex_proportions.c b/lib/flex_proportions.c new file mode 100644 index 000000000000..ebf3bac460b0 --- /dev/null +++ b/lib/flex_proportions.c | |||
@@ -0,0 +1,272 @@ | |||
1 | /* | ||
2 | * Floating proportions with flexible aging period | ||
3 | * | ||
4 | * Copyright (C) 2011, SUSE, Jan Kara <jack@suse.cz> | ||
5 | * | ||
6 | * The goal of this code is: Given different types of event, measure proportion | ||
7 | * of each type of event over time. The proportions are measured with | ||
8 | * exponentially decaying history to give smooth transitions. A formula | ||
9 | * expressing proportion of event of type 'j' is: | ||
10 | * | ||
11 | * p_{j} = (\Sum_{i>=0} x_{i,j}/2^{i+1})/(\Sum_{i>=0} x_i/2^{i+1}) | ||
12 | * | ||
13 | * Where x_{i,j} is j's number of events in i-th last time period and x_i is | ||
14 | * total number of events in i-th last time period. | ||
15 | * | ||
16 | * Note that p_{j}'s are normalised, i.e. | ||
17 | * | ||
18 | * \Sum_{j} p_{j} = 1, | ||
19 | * | ||
20 | * This formula can be straightforwardly computed by maintaing denominator | ||
21 | * (let's call it 'd') and for each event type its numerator (let's call it | ||
22 | * 'n_j'). When an event of type 'j' happens, we simply need to do: | ||
23 | * n_j++; d++; | ||
24 | * | ||
25 | * When a new period is declared, we could do: | ||
26 | * d /= 2 | ||
27 | * for each j | ||
28 | * n_j /= 2 | ||
29 | * | ||
30 | * To avoid iteration over all event types, we instead shift numerator of event | ||
31 | * j lazily when someone asks for a proportion of event j or when event j | ||
32 | * occurs. This can bit trivially implemented by remembering last period in | ||
33 | * which something happened with proportion of type j. | ||
34 | */ | ||
35 | #include <linux/flex_proportions.h> | ||
36 | |||
37 | int fprop_global_init(struct fprop_global *p) | ||
38 | { | ||
39 | int err; | ||
40 | |||
41 | p->period = 0; | ||
42 | /* Use 1 to avoid dealing with periods with 0 events... */ | ||
43 | err = percpu_counter_init(&p->events, 1); | ||
44 | if (err) | ||
45 | return err; | ||
46 | seqcount_init(&p->sequence); | ||
47 | return 0; | ||
48 | } | ||
49 | |||
50 | void fprop_global_destroy(struct fprop_global *p) | ||
51 | { | ||
52 | percpu_counter_destroy(&p->events); | ||
53 | } | ||
54 | |||
55 | /* | ||
56 | * Declare @periods new periods. It is upto the caller to make sure period | ||
57 | * transitions cannot happen in parallel. | ||
58 | * | ||
59 | * The function returns true if the proportions are still defined and false | ||
60 | * if aging zeroed out all events. This can be used to detect whether declaring | ||
61 | * further periods has any effect. | ||
62 | */ | ||
63 | bool fprop_new_period(struct fprop_global *p, int periods) | ||
64 | { | ||
65 | s64 events; | ||
66 | unsigned long flags; | ||
67 | |||
68 | local_irq_save(flags); | ||
69 | events = percpu_counter_sum(&p->events); | ||
70 | /* | ||
71 | * Don't do anything if there are no events. | ||
72 | */ | ||
73 | if (events <= 1) { | ||
74 | local_irq_restore(flags); | ||
75 | return false; | ||
76 | } | ||
77 | write_seqcount_begin(&p->sequence); | ||
78 | if (periods < 64) | ||
79 | events -= events >> periods; | ||
80 | /* Use addition to avoid losing events happening between sum and set */ | ||
81 | percpu_counter_add(&p->events, -events); | ||
82 | p->period += periods; | ||
83 | write_seqcount_end(&p->sequence); | ||
84 | local_irq_restore(flags); | ||
85 | |||
86 | return true; | ||
87 | } | ||
88 | |||
89 | /* | ||
90 | * ---- SINGLE ---- | ||
91 | */ | ||
92 | |||
93 | int fprop_local_init_single(struct fprop_local_single *pl) | ||
94 | { | ||
95 | pl->events = 0; | ||
96 | pl->period = 0; | ||
97 | raw_spin_lock_init(&pl->lock); | ||
98 | return 0; | ||
99 | } | ||
100 | |||
101 | void fprop_local_destroy_single(struct fprop_local_single *pl) | ||
102 | { | ||
103 | } | ||
104 | |||
105 | static void fprop_reflect_period_single(struct fprop_global *p, | ||
106 | struct fprop_local_single *pl) | ||
107 | { | ||
108 | unsigned int period = p->period; | ||
109 | unsigned long flags; | ||
110 | |||
111 | /* Fast path - period didn't change */ | ||
112 | if (pl->period == period) | ||
113 | return; | ||
114 | raw_spin_lock_irqsave(&pl->lock, flags); | ||
115 | /* Someone updated pl->period while we were spinning? */ | ||
116 | if (pl->period >= period) { | ||
117 | raw_spin_unlock_irqrestore(&pl->lock, flags); | ||
118 | return; | ||
119 | } | ||
120 | /* Aging zeroed our fraction? */ | ||
121 | if (period - pl->period < BITS_PER_LONG) | ||
122 | pl->events >>= period - pl->period; | ||
123 | else | ||
124 | pl->events = 0; | ||
125 | pl->period = period; | ||
126 | raw_spin_unlock_irqrestore(&pl->lock, flags); | ||
127 | } | ||
128 | |||
129 | /* Event of type pl happened */ | ||
130 | void __fprop_inc_single(struct fprop_global *p, struct fprop_local_single *pl) | ||
131 | { | ||
132 | fprop_reflect_period_single(p, pl); | ||
133 | pl->events++; | ||
134 | percpu_counter_add(&p->events, 1); | ||
135 | } | ||
136 | |||
137 | /* Return fraction of events of type pl */ | ||
138 | void fprop_fraction_single(struct fprop_global *p, | ||
139 | struct fprop_local_single *pl, | ||
140 | unsigned long *numerator, unsigned long *denominator) | ||
141 | { | ||
142 | unsigned int seq; | ||
143 | s64 num, den; | ||
144 | |||
145 | do { | ||
146 | seq = read_seqcount_begin(&p->sequence); | ||
147 | fprop_reflect_period_single(p, pl); | ||
148 | num = pl->events; | ||
149 | den = percpu_counter_read_positive(&p->events); | ||
150 | } while (read_seqcount_retry(&p->sequence, seq)); | ||
151 | |||
152 | /* | ||
153 | * Make fraction <= 1 and denominator > 0 even in presence of percpu | ||
154 | * counter errors | ||
155 | */ | ||
156 | if (den <= num) { | ||
157 | if (num) | ||
158 | den = num; | ||
159 | else | ||
160 | den = 1; | ||
161 | } | ||
162 | *denominator = den; | ||
163 | *numerator = num; | ||
164 | } | ||
165 | |||
166 | /* | ||
167 | * ---- PERCPU ---- | ||
168 | */ | ||
169 | #define PROP_BATCH (8*(1+ilog2(nr_cpu_ids))) | ||
170 | |||
171 | int fprop_local_init_percpu(struct fprop_local_percpu *pl) | ||
172 | { | ||
173 | int err; | ||
174 | |||
175 | err = percpu_counter_init(&pl->events, 0); | ||
176 | if (err) | ||
177 | return err; | ||
178 | pl->period = 0; | ||
179 | raw_spin_lock_init(&pl->lock); | ||
180 | return 0; | ||
181 | } | ||
182 | |||
183 | void fprop_local_destroy_percpu(struct fprop_local_percpu *pl) | ||
184 | { | ||
185 | percpu_counter_destroy(&pl->events); | ||
186 | } | ||
187 | |||
188 | static void fprop_reflect_period_percpu(struct fprop_global *p, | ||
189 | struct fprop_local_percpu *pl) | ||
190 | { | ||
191 | unsigned int period = p->period; | ||
192 | unsigned long flags; | ||
193 | |||
194 | /* Fast path - period didn't change */ | ||
195 | if (pl->period == period) | ||
196 | return; | ||
197 | raw_spin_lock_irqsave(&pl->lock, flags); | ||
198 | /* Someone updated pl->period while we were spinning? */ | ||
199 | if (pl->period >= period) { | ||
200 | raw_spin_unlock_irqrestore(&pl->lock, flags); | ||
201 | return; | ||
202 | } | ||
203 | /* Aging zeroed our fraction? */ | ||
204 | if (period - pl->period < BITS_PER_LONG) { | ||
205 | s64 val = percpu_counter_read(&pl->events); | ||
206 | |||
207 | if (val < (nr_cpu_ids * PROP_BATCH)) | ||
208 | val = percpu_counter_sum(&pl->events); | ||
209 | |||
210 | __percpu_counter_add(&pl->events, | ||
211 | -val + (val >> (period-pl->period)), PROP_BATCH); | ||
212 | } else | ||
213 | percpu_counter_set(&pl->events, 0); | ||
214 | pl->period = period; | ||
215 | raw_spin_unlock_irqrestore(&pl->lock, flags); | ||
216 | } | ||
217 | |||
218 | /* Event of type pl happened */ | ||
219 | void __fprop_inc_percpu(struct fprop_global *p, struct fprop_local_percpu *pl) | ||
220 | { | ||
221 | fprop_reflect_period_percpu(p, pl); | ||
222 | __percpu_counter_add(&pl->events, 1, PROP_BATCH); | ||
223 | percpu_counter_add(&p->events, 1); | ||
224 | } | ||
225 | |||
226 | void fprop_fraction_percpu(struct fprop_global *p, | ||
227 | struct fprop_local_percpu *pl, | ||
228 | unsigned long *numerator, unsigned long *denominator) | ||
229 | { | ||
230 | unsigned int seq; | ||
231 | s64 num, den; | ||
232 | |||
233 | do { | ||
234 | seq = read_seqcount_begin(&p->sequence); | ||
235 | fprop_reflect_period_percpu(p, pl); | ||
236 | num = percpu_counter_read_positive(&pl->events); | ||
237 | den = percpu_counter_read_positive(&p->events); | ||
238 | } while (read_seqcount_retry(&p->sequence, seq)); | ||
239 | |||
240 | /* | ||
241 | * Make fraction <= 1 and denominator > 0 even in presence of percpu | ||
242 | * counter errors | ||
243 | */ | ||
244 | if (den <= num) { | ||
245 | if (num) | ||
246 | den = num; | ||
247 | else | ||
248 | den = 1; | ||
249 | } | ||
250 | *denominator = den; | ||
251 | *numerator = num; | ||
252 | } | ||
253 | |||
254 | /* | ||
255 | * Like __fprop_inc_percpu() except that event is counted only if the given | ||
256 | * type has fraction smaller than @max_frac/FPROP_FRAC_BASE | ||
257 | */ | ||
258 | void __fprop_inc_percpu_max(struct fprop_global *p, | ||
259 | struct fprop_local_percpu *pl, int max_frac) | ||
260 | { | ||
261 | if (unlikely(max_frac < FPROP_FRAC_BASE)) { | ||
262 | unsigned long numerator, denominator; | ||
263 | |||
264 | fprop_fraction_percpu(p, pl, &numerator, &denominator); | ||
265 | if (numerator > | ||
266 | (((u64)denominator) * max_frac) >> FPROP_FRAC_SHIFT) | ||
267 | return; | ||
268 | } else | ||
269 | fprop_reflect_period_percpu(p, pl); | ||
270 | __percpu_counter_add(&pl->events, 1, PROP_BATCH); | ||
271 | percpu_counter_add(&p->events, 1); | ||
272 | } | ||
@@ -1,6 +1,6 @@ | |||
1 | #include <linux/kernel.h> | 1 | #include <linux/kernel.h> |
2 | #include <linux/gcd.h> | 2 | #include <linux/gcd.h> |
3 | #include <linux/module.h> | 3 | #include <linux/export.h> |
4 | 4 | ||
5 | /* Greatest common divisor */ | 5 | /* Greatest common divisor */ |
6 | unsigned long gcd(unsigned long a, unsigned long b) | 6 | unsigned long gcd(unsigned long a, unsigned long b) |
@@ -9,6 +9,9 @@ unsigned long gcd(unsigned long a, unsigned long b) | |||
9 | 9 | ||
10 | if (a < b) | 10 | if (a < b) |
11 | swap(a, b); | 11 | swap(a, b); |
12 | |||
13 | if (!b) | ||
14 | return a; | ||
12 | while ((r = a % b) != 0) { | 15 | while ((r = a % b) != 0) { |
13 | a = b; | 16 | a = b; |
14 | b = r; | 17 | b = r; |
diff --git a/lib/gen_crc32table.c b/lib/gen_crc32table.c index 85d0e412a04f..71fcfcd96410 100644 --- a/lib/gen_crc32table.c +++ b/lib/gen_crc32table.c | |||
@@ -1,14 +1,29 @@ | |||
1 | #include <stdio.h> | 1 | #include <stdio.h> |
2 | #include "../include/generated/autoconf.h" | ||
2 | #include "crc32defs.h" | 3 | #include "crc32defs.h" |
3 | #include <inttypes.h> | 4 | #include <inttypes.h> |
4 | 5 | ||
5 | #define ENTRIES_PER_LINE 4 | 6 | #define ENTRIES_PER_LINE 4 |
6 | 7 | ||
7 | #define LE_TABLE_SIZE (1 << CRC_LE_BITS) | 8 | #if CRC_LE_BITS > 8 |
8 | #define BE_TABLE_SIZE (1 << CRC_BE_BITS) | 9 | # define LE_TABLE_ROWS (CRC_LE_BITS/8) |
10 | # define LE_TABLE_SIZE 256 | ||
11 | #else | ||
12 | # define LE_TABLE_ROWS 1 | ||
13 | # define LE_TABLE_SIZE (1 << CRC_LE_BITS) | ||
14 | #endif | ||
9 | 15 | ||
10 | static uint32_t crc32table_le[4][LE_TABLE_SIZE]; | 16 | #if CRC_BE_BITS > 8 |
11 | static uint32_t crc32table_be[4][BE_TABLE_SIZE]; | 17 | # define BE_TABLE_ROWS (CRC_BE_BITS/8) |
18 | # define BE_TABLE_SIZE 256 | ||
19 | #else | ||
20 | # define BE_TABLE_ROWS 1 | ||
21 | # define BE_TABLE_SIZE (1 << CRC_BE_BITS) | ||
22 | #endif | ||
23 | |||
24 | static uint32_t crc32table_le[LE_TABLE_ROWS][256]; | ||
25 | static uint32_t crc32table_be[BE_TABLE_ROWS][256]; | ||
26 | static uint32_t crc32ctable_le[LE_TABLE_ROWS][256]; | ||
12 | 27 | ||
13 | /** | 28 | /** |
14 | * crc32init_le() - allocate and initialize LE table data | 29 | * crc32init_le() - allocate and initialize LE table data |
@@ -17,27 +32,38 @@ static uint32_t crc32table_be[4][BE_TABLE_SIZE]; | |||
17 | * fact that crctable[i^j] = crctable[i] ^ crctable[j]. | 32 | * fact that crctable[i^j] = crctable[i] ^ crctable[j]. |
18 | * | 33 | * |
19 | */ | 34 | */ |
20 | static void crc32init_le(void) | 35 | static void crc32init_le_generic(const uint32_t polynomial, |
36 | uint32_t (*tab)[256]) | ||
21 | { | 37 | { |
22 | unsigned i, j; | 38 | unsigned i, j; |
23 | uint32_t crc = 1; | 39 | uint32_t crc = 1; |
24 | 40 | ||
25 | crc32table_le[0][0] = 0; | 41 | tab[0][0] = 0; |
26 | 42 | ||
27 | for (i = 1 << (CRC_LE_BITS - 1); i; i >>= 1) { | 43 | for (i = LE_TABLE_SIZE >> 1; i; i >>= 1) { |
28 | crc = (crc >> 1) ^ ((crc & 1) ? CRCPOLY_LE : 0); | 44 | crc = (crc >> 1) ^ ((crc & 1) ? polynomial : 0); |
29 | for (j = 0; j < LE_TABLE_SIZE; j += 2 * i) | 45 | for (j = 0; j < LE_TABLE_SIZE; j += 2 * i) |
30 | crc32table_le[0][i + j] = crc ^ crc32table_le[0][j]; | 46 | tab[0][i + j] = crc ^ tab[0][j]; |
31 | } | 47 | } |
32 | for (i = 0; i < LE_TABLE_SIZE; i++) { | 48 | for (i = 0; i < LE_TABLE_SIZE; i++) { |
33 | crc = crc32table_le[0][i]; | 49 | crc = tab[0][i]; |
34 | for (j = 1; j < 4; j++) { | 50 | for (j = 1; j < LE_TABLE_ROWS; j++) { |
35 | crc = crc32table_le[0][crc & 0xff] ^ (crc >> 8); | 51 | crc = tab[0][crc & 0xff] ^ (crc >> 8); |
36 | crc32table_le[j][i] = crc; | 52 | tab[j][i] = crc; |
37 | } | 53 | } |
38 | } | 54 | } |
39 | } | 55 | } |
40 | 56 | ||
57 | static void crc32init_le(void) | ||
58 | { | ||
59 | crc32init_le_generic(CRCPOLY_LE, crc32table_le); | ||
60 | } | ||
61 | |||
62 | static void crc32cinit_le(void) | ||
63 | { | ||
64 | crc32init_le_generic(CRC32C_POLY_LE, crc32ctable_le); | ||
65 | } | ||
66 | |||
41 | /** | 67 | /** |
42 | * crc32init_be() - allocate and initialize BE table data | 68 | * crc32init_be() - allocate and initialize BE table data |
43 | */ | 69 | */ |
@@ -55,18 +81,18 @@ static void crc32init_be(void) | |||
55 | } | 81 | } |
56 | for (i = 0; i < BE_TABLE_SIZE; i++) { | 82 | for (i = 0; i < BE_TABLE_SIZE; i++) { |
57 | crc = crc32table_be[0][i]; | 83 | crc = crc32table_be[0][i]; |
58 | for (j = 1; j < 4; j++) { | 84 | for (j = 1; j < BE_TABLE_ROWS; j++) { |
59 | crc = crc32table_be[0][(crc >> 24) & 0xff] ^ (crc << 8); | 85 | crc = crc32table_be[0][(crc >> 24) & 0xff] ^ (crc << 8); |
60 | crc32table_be[j][i] = crc; | 86 | crc32table_be[j][i] = crc; |
61 | } | 87 | } |
62 | } | 88 | } |
63 | } | 89 | } |
64 | 90 | ||
65 | static void output_table(uint32_t table[4][256], int len, char *trans) | 91 | static void output_table(uint32_t (*table)[256], int rows, int len, char *trans) |
66 | { | 92 | { |
67 | int i, j; | 93 | int i, j; |
68 | 94 | ||
69 | for (j = 0 ; j < 4; j++) { | 95 | for (j = 0 ; j < rows; j++) { |
70 | printf("{"); | 96 | printf("{"); |
71 | for (i = 0; i < len - 1; i++) { | 97 | for (i = 0; i < len - 1; i++) { |
72 | if (i % ENTRIES_PER_LINE == 0) | 98 | if (i % ENTRIES_PER_LINE == 0) |
@@ -83,15 +109,30 @@ int main(int argc, char** argv) | |||
83 | 109 | ||
84 | if (CRC_LE_BITS > 1) { | 110 | if (CRC_LE_BITS > 1) { |
85 | crc32init_le(); | 111 | crc32init_le(); |
86 | printf("static const u32 crc32table_le[4][256] = {"); | 112 | printf("static u32 __cacheline_aligned " |
87 | output_table(crc32table_le, LE_TABLE_SIZE, "tole"); | 113 | "crc32table_le[%d][%d] = {", |
114 | LE_TABLE_ROWS, LE_TABLE_SIZE); | ||
115 | output_table(crc32table_le, LE_TABLE_ROWS, | ||
116 | LE_TABLE_SIZE, "tole"); | ||
88 | printf("};\n"); | 117 | printf("};\n"); |
89 | } | 118 | } |
90 | 119 | ||
91 | if (CRC_BE_BITS > 1) { | 120 | if (CRC_BE_BITS > 1) { |
92 | crc32init_be(); | 121 | crc32init_be(); |
93 | printf("static const u32 crc32table_be[4][256] = {"); | 122 | printf("static u32 __cacheline_aligned " |
94 | output_table(crc32table_be, BE_TABLE_SIZE, "tobe"); | 123 | "crc32table_be[%d][%d] = {", |
124 | BE_TABLE_ROWS, BE_TABLE_SIZE); | ||
125 | output_table(crc32table_be, LE_TABLE_ROWS, | ||
126 | BE_TABLE_SIZE, "tobe"); | ||
127 | printf("};\n"); | ||
128 | } | ||
129 | if (CRC_LE_BITS > 1) { | ||
130 | crc32cinit_le(); | ||
131 | printf("static u32 __cacheline_aligned " | ||
132 | "crc32ctable_le[%d][%d] = {", | ||
133 | LE_TABLE_ROWS, LE_TABLE_SIZE); | ||
134 | output_table(crc32ctable_le, LE_TABLE_ROWS, | ||
135 | LE_TABLE_SIZE, "tole"); | ||
95 | printf("};\n"); | 136 | printf("};\n"); |
96 | } | 137 | } |
97 | 138 | ||
diff --git a/lib/genalloc.c b/lib/genalloc.c index f352cc42f4f8..54920433705a 100644 --- a/lib/genalloc.c +++ b/lib/genalloc.c | |||
@@ -29,7 +29,7 @@ | |||
29 | */ | 29 | */ |
30 | 30 | ||
31 | #include <linux/slab.h> | 31 | #include <linux/slab.h> |
32 | #include <linux/module.h> | 32 | #include <linux/export.h> |
33 | #include <linux/bitmap.h> | 33 | #include <linux/bitmap.h> |
34 | #include <linux/rculist.h> | 34 | #include <linux/rculist.h> |
35 | #include <linux/interrupt.h> | 35 | #include <linux/interrupt.h> |
@@ -152,6 +152,8 @@ struct gen_pool *gen_pool_create(int min_alloc_order, int nid) | |||
152 | spin_lock_init(&pool->lock); | 152 | spin_lock_init(&pool->lock); |
153 | INIT_LIST_HEAD(&pool->chunks); | 153 | INIT_LIST_HEAD(&pool->chunks); |
154 | pool->min_alloc_order = min_alloc_order; | 154 | pool->min_alloc_order = min_alloc_order; |
155 | pool->algo = gen_pool_first_fit; | ||
156 | pool->data = NULL; | ||
155 | } | 157 | } |
156 | return pool; | 158 | return pool; |
157 | } | 159 | } |
@@ -176,7 +178,7 @@ int gen_pool_add_virt(struct gen_pool *pool, unsigned long virt, phys_addr_t phy | |||
176 | struct gen_pool_chunk *chunk; | 178 | struct gen_pool_chunk *chunk; |
177 | int nbits = size >> pool->min_alloc_order; | 179 | int nbits = size >> pool->min_alloc_order; |
178 | int nbytes = sizeof(struct gen_pool_chunk) + | 180 | int nbytes = sizeof(struct gen_pool_chunk) + |
179 | (nbits + BITS_PER_BYTE - 1) / BITS_PER_BYTE; | 181 | BITS_TO_LONGS(nbits) * sizeof(long); |
180 | 182 | ||
181 | chunk = kmalloc_node(nbytes, GFP_KERNEL | __GFP_ZERO, nid); | 183 | chunk = kmalloc_node(nbytes, GFP_KERNEL | __GFP_ZERO, nid); |
182 | if (unlikely(chunk == NULL)) | 184 | if (unlikely(chunk == NULL)) |
@@ -255,8 +257,9 @@ EXPORT_SYMBOL(gen_pool_destroy); | |||
255 | * @size: number of bytes to allocate from the pool | 257 | * @size: number of bytes to allocate from the pool |
256 | * | 258 | * |
257 | * Allocate the requested number of bytes from the specified pool. | 259 | * Allocate the requested number of bytes from the specified pool. |
258 | * Uses a first-fit algorithm. Can not be used in NMI handler on | 260 | * Uses the pool allocation function (with first-fit algorithm by default). |
259 | * architectures without NMI-safe cmpxchg implementation. | 261 | * Can not be used in NMI handler on architectures without |
262 | * NMI-safe cmpxchg implementation. | ||
260 | */ | 263 | */ |
261 | unsigned long gen_pool_alloc(struct gen_pool *pool, size_t size) | 264 | unsigned long gen_pool_alloc(struct gen_pool *pool, size_t size) |
262 | { | 265 | { |
@@ -280,8 +283,8 @@ unsigned long gen_pool_alloc(struct gen_pool *pool, size_t size) | |||
280 | 283 | ||
281 | end_bit = (chunk->end_addr - chunk->start_addr) >> order; | 284 | end_bit = (chunk->end_addr - chunk->start_addr) >> order; |
282 | retry: | 285 | retry: |
283 | start_bit = bitmap_find_next_zero_area(chunk->bits, end_bit, | 286 | start_bit = pool->algo(chunk->bits, end_bit, start_bit, nbits, |
284 | start_bit, nbits, 0); | 287 | pool->data); |
285 | if (start_bit >= end_bit) | 288 | if (start_bit >= end_bit) |
286 | continue; | 289 | continue; |
287 | remain = bitmap_set_ll(chunk->bits, start_bit, nbits); | 290 | remain = bitmap_set_ll(chunk->bits, start_bit, nbits); |
@@ -400,3 +403,80 @@ size_t gen_pool_size(struct gen_pool *pool) | |||
400 | return size; | 403 | return size; |
401 | } | 404 | } |
402 | EXPORT_SYMBOL_GPL(gen_pool_size); | 405 | EXPORT_SYMBOL_GPL(gen_pool_size); |
406 | |||
407 | /** | ||
408 | * gen_pool_set_algo - set the allocation algorithm | ||
409 | * @pool: pool to change allocation algorithm | ||
410 | * @algo: custom algorithm function | ||
411 | * @data: additional data used by @algo | ||
412 | * | ||
413 | * Call @algo for each memory allocation in the pool. | ||
414 | * If @algo is NULL use gen_pool_first_fit as default | ||
415 | * memory allocation function. | ||
416 | */ | ||
417 | void gen_pool_set_algo(struct gen_pool *pool, genpool_algo_t algo, void *data) | ||
418 | { | ||
419 | rcu_read_lock(); | ||
420 | |||
421 | pool->algo = algo; | ||
422 | if (!pool->algo) | ||
423 | pool->algo = gen_pool_first_fit; | ||
424 | |||
425 | pool->data = data; | ||
426 | |||
427 | rcu_read_unlock(); | ||
428 | } | ||
429 | EXPORT_SYMBOL(gen_pool_set_algo); | ||
430 | |||
431 | /** | ||
432 | * gen_pool_first_fit - find the first available region | ||
433 | * of memory matching the size requirement (no alignment constraint) | ||
434 | * @map: The address to base the search on | ||
435 | * @size: The bitmap size in bits | ||
436 | * @start: The bitnumber to start searching at | ||
437 | * @nr: The number of zeroed bits we're looking for | ||
438 | * @data: additional data - unused | ||
439 | */ | ||
440 | unsigned long gen_pool_first_fit(unsigned long *map, unsigned long size, | ||
441 | unsigned long start, unsigned int nr, void *data) | ||
442 | { | ||
443 | return bitmap_find_next_zero_area(map, size, start, nr, 0); | ||
444 | } | ||
445 | EXPORT_SYMBOL(gen_pool_first_fit); | ||
446 | |||
447 | /** | ||
448 | * gen_pool_best_fit - find the best fitting region of memory | ||
449 | * macthing the size requirement (no alignment constraint) | ||
450 | * @map: The address to base the search on | ||
451 | * @size: The bitmap size in bits | ||
452 | * @start: The bitnumber to start searching at | ||
453 | * @nr: The number of zeroed bits we're looking for | ||
454 | * @data: additional data - unused | ||
455 | * | ||
456 | * Iterate over the bitmap to find the smallest free region | ||
457 | * which we can allocate the memory. | ||
458 | */ | ||
459 | unsigned long gen_pool_best_fit(unsigned long *map, unsigned long size, | ||
460 | unsigned long start, unsigned int nr, void *data) | ||
461 | { | ||
462 | unsigned long start_bit = size; | ||
463 | unsigned long len = size + 1; | ||
464 | unsigned long index; | ||
465 | |||
466 | index = bitmap_find_next_zero_area(map, size, start, nr, 0); | ||
467 | |||
468 | while (index < size) { | ||
469 | int next_bit = find_next_bit(map, size, index + nr); | ||
470 | if ((next_bit - index) < len) { | ||
471 | len = next_bit - index; | ||
472 | start_bit = index; | ||
473 | if (len == nr) | ||
474 | return start_bit; | ||
475 | } | ||
476 | index = bitmap_find_next_zero_area(map, size, | ||
477 | next_bit + 1, nr, 0); | ||
478 | } | ||
479 | |||
480 | return start_bit; | ||
481 | } | ||
482 | EXPORT_SYMBOL(gen_pool_best_fit); | ||
diff --git a/lib/halfmd4.c b/lib/halfmd4.c index e11db26f8ae5..66d0ee8b7776 100644 --- a/lib/halfmd4.c +++ b/lib/halfmd4.c | |||
@@ -1,5 +1,5 @@ | |||
1 | #include <linux/kernel.h> | 1 | #include <linux/kernel.h> |
2 | #include <linux/module.h> | 2 | #include <linux/export.h> |
3 | #include <linux/cryptohash.h> | 3 | #include <linux/cryptohash.h> |
4 | 4 | ||
5 | /* F, G and H are basic MD4 functions: selection, majority, parity */ | 5 | /* F, G and H are basic MD4 functions: selection, majority, parity */ |
diff --git a/lib/hexdump.c b/lib/hexdump.c index f5fe6ba7a3ab..6540d657dca4 100644 --- a/lib/hexdump.c +++ b/lib/hexdump.c | |||
@@ -10,7 +10,7 @@ | |||
10 | #include <linux/types.h> | 10 | #include <linux/types.h> |
11 | #include <linux/ctype.h> | 11 | #include <linux/ctype.h> |
12 | #include <linux/kernel.h> | 12 | #include <linux/kernel.h> |
13 | #include <linux/module.h> | 13 | #include <linux/export.h> |
14 | 14 | ||
15 | const char hex_asc[] = "0123456789abcdef"; | 15 | const char hex_asc[] = "0123456789abcdef"; |
16 | EXPORT_SYMBOL(hex_asc); | 16 | EXPORT_SYMBOL(hex_asc); |
@@ -38,14 +38,21 @@ EXPORT_SYMBOL(hex_to_bin); | |||
38 | * @dst: binary result | 38 | * @dst: binary result |
39 | * @src: ascii hexadecimal string | 39 | * @src: ascii hexadecimal string |
40 | * @count: result length | 40 | * @count: result length |
41 | * | ||
42 | * Return 0 on success, -1 in case of bad input. | ||
41 | */ | 43 | */ |
42 | void hex2bin(u8 *dst, const char *src, size_t count) | 44 | int hex2bin(u8 *dst, const char *src, size_t count) |
43 | { | 45 | { |
44 | while (count--) { | 46 | while (count--) { |
45 | *dst = hex_to_bin(*src++) << 4; | 47 | int hi = hex_to_bin(*src++); |
46 | *dst += hex_to_bin(*src++); | 48 | int lo = hex_to_bin(*src++); |
47 | dst++; | 49 | |
50 | if ((hi < 0) || (lo < 0)) | ||
51 | return -1; | ||
52 | |||
53 | *dst++ = (hi << 4) | lo; | ||
48 | } | 54 | } |
55 | return 0; | ||
49 | } | 56 | } |
50 | EXPORT_SYMBOL(hex2bin); | 57 | EXPORT_SYMBOL(hex2bin); |
51 | 58 | ||
diff --git a/lib/hweight.c b/lib/hweight.c index 3c79d50814cf..b7d81ba143d1 100644 --- a/lib/hweight.c +++ b/lib/hweight.c | |||
@@ -1,4 +1,4 @@ | |||
1 | #include <linux/module.h> | 1 | #include <linux/export.h> |
2 | #include <linux/bitops.h> | 2 | #include <linux/bitops.h> |
3 | #include <asm/types.h> | 3 | #include <asm/types.h> |
4 | 4 | ||
@@ -20,7 +20,7 @@ | |||
20 | * that id to this code and it returns your pointer. | 20 | * that id to this code and it returns your pointer. |
21 | 21 | ||
22 | * You can release ids at any time. When all ids are released, most of | 22 | * You can release ids at any time. When all ids are released, most of |
23 | * the memory is returned (we keep IDR_FREE_MAX) in a local pool so we | 23 | * the memory is returned (we keep MAX_IDR_FREE) in a local pool so we |
24 | * don't need to go to the memory "store" during an id allocate, just | 24 | * don't need to go to the memory "store" during an id allocate, just |
25 | * so you don't need to be too concerned about locking and conflicts | 25 | * so you don't need to be too concerned about locking and conflicts |
26 | * with the slab allocator. | 26 | * with the slab allocator. |
@@ -29,7 +29,7 @@ | |||
29 | #ifndef TEST // to test in user space... | 29 | #ifndef TEST // to test in user space... |
30 | #include <linux/slab.h> | 30 | #include <linux/slab.h> |
31 | #include <linux/init.h> | 31 | #include <linux/init.h> |
32 | #include <linux/module.h> | 32 | #include <linux/export.h> |
33 | #endif | 33 | #endif |
34 | #include <linux/err.h> | 34 | #include <linux/err.h> |
35 | #include <linux/string.h> | 35 | #include <linux/string.h> |
@@ -122,7 +122,7 @@ static void idr_mark_full(struct idr_layer **pa, int id) | |||
122 | */ | 122 | */ |
123 | int idr_pre_get(struct idr *idp, gfp_t gfp_mask) | 123 | int idr_pre_get(struct idr *idp, gfp_t gfp_mask) |
124 | { | 124 | { |
125 | while (idp->id_free_cnt < IDR_FREE_MAX) { | 125 | while (idp->id_free_cnt < MAX_IDR_FREE) { |
126 | struct idr_layer *new; | 126 | struct idr_layer *new; |
127 | new = kmem_cache_zalloc(idr_layer_cache, gfp_mask); | 127 | new = kmem_cache_zalloc(idr_layer_cache, gfp_mask); |
128 | if (new == NULL) | 128 | if (new == NULL) |
@@ -179,7 +179,7 @@ static int sub_alloc(struct idr *idp, int *starting_id, struct idr_layer **pa) | |||
179 | sh = IDR_BITS*l; | 179 | sh = IDR_BITS*l; |
180 | id = ((id >> sh) ^ n ^ m) << sh; | 180 | id = ((id >> sh) ^ n ^ m) << sh; |
181 | } | 181 | } |
182 | if ((id >= MAX_ID_BIT) || (id < 0)) | 182 | if ((id >= MAX_IDR_BIT) || (id < 0)) |
183 | return IDR_NOMORE_SPACE; | 183 | return IDR_NOMORE_SPACE; |
184 | if (l == 0) | 184 | if (l == 0) |
185 | break; | 185 | break; |
@@ -223,7 +223,7 @@ build_up: | |||
223 | * Add a new layer to the top of the tree if the requested | 223 | * Add a new layer to the top of the tree if the requested |
224 | * id is larger than the currently allocated space. | 224 | * id is larger than the currently allocated space. |
225 | */ | 225 | */ |
226 | while ((layers < (MAX_LEVEL - 1)) && (id >= (1 << (layers*IDR_BITS)))) { | 226 | while ((layers < (MAX_IDR_LEVEL - 1)) && (id >= (1 << (layers*IDR_BITS)))) { |
227 | layers++; | 227 | layers++; |
228 | if (!p->count) { | 228 | if (!p->count) { |
229 | /* special case: if the tree is currently empty, | 229 | /* special case: if the tree is currently empty, |
@@ -265,7 +265,7 @@ build_up: | |||
265 | 265 | ||
266 | static int idr_get_new_above_int(struct idr *idp, void *ptr, int starting_id) | 266 | static int idr_get_new_above_int(struct idr *idp, void *ptr, int starting_id) |
267 | { | 267 | { |
268 | struct idr_layer *pa[MAX_LEVEL]; | 268 | struct idr_layer *pa[MAX_IDR_LEVEL]; |
269 | int id; | 269 | int id; |
270 | 270 | ||
271 | id = idr_get_empty_slot(idp, starting_id, pa); | 271 | id = idr_get_empty_slot(idp, starting_id, pa); |
@@ -357,7 +357,7 @@ static void idr_remove_warning(int id) | |||
357 | static void sub_remove(struct idr *idp, int shift, int id) | 357 | static void sub_remove(struct idr *idp, int shift, int id) |
358 | { | 358 | { |
359 | struct idr_layer *p = idp->top; | 359 | struct idr_layer *p = idp->top; |
360 | struct idr_layer **pa[MAX_LEVEL]; | 360 | struct idr_layer **pa[MAX_IDR_LEVEL]; |
361 | struct idr_layer ***paa = &pa[0]; | 361 | struct idr_layer ***paa = &pa[0]; |
362 | struct idr_layer *to_free; | 362 | struct idr_layer *to_free; |
363 | int n; | 363 | int n; |
@@ -402,7 +402,7 @@ void idr_remove(struct idr *idp, int id) | |||
402 | struct idr_layer *to_free; | 402 | struct idr_layer *to_free; |
403 | 403 | ||
404 | /* Mask off upper bits we don't use for the search. */ | 404 | /* Mask off upper bits we don't use for the search. */ |
405 | id &= MAX_ID_MASK; | 405 | id &= MAX_IDR_MASK; |
406 | 406 | ||
407 | sub_remove(idp, (idp->layers - 1) * IDR_BITS, id); | 407 | sub_remove(idp, (idp->layers - 1) * IDR_BITS, id); |
408 | if (idp->top && idp->top->count == 1 && (idp->layers > 1) && | 408 | if (idp->top && idp->top->count == 1 && (idp->layers > 1) && |
@@ -420,7 +420,7 @@ void idr_remove(struct idr *idp, int id) | |||
420 | to_free->bitmap = to_free->count = 0; | 420 | to_free->bitmap = to_free->count = 0; |
421 | free_layer(to_free); | 421 | free_layer(to_free); |
422 | } | 422 | } |
423 | while (idp->id_free_cnt >= IDR_FREE_MAX) { | 423 | while (idp->id_free_cnt >= MAX_IDR_FREE) { |
424 | p = get_from_free_list(idp); | 424 | p = get_from_free_list(idp); |
425 | /* | 425 | /* |
426 | * Note: we don't call the rcu callback here, since the only | 426 | * Note: we don't call the rcu callback here, since the only |
@@ -451,7 +451,7 @@ void idr_remove_all(struct idr *idp) | |||
451 | int n, id, max; | 451 | int n, id, max; |
452 | int bt_mask; | 452 | int bt_mask; |
453 | struct idr_layer *p; | 453 | struct idr_layer *p; |
454 | struct idr_layer *pa[MAX_LEVEL]; | 454 | struct idr_layer *pa[MAX_IDR_LEVEL]; |
455 | struct idr_layer **paa = &pa[0]; | 455 | struct idr_layer **paa = &pa[0]; |
456 | 456 | ||
457 | n = idp->layers * IDR_BITS; | 457 | n = idp->layers * IDR_BITS; |
@@ -517,7 +517,7 @@ void *idr_find(struct idr *idp, int id) | |||
517 | n = (p->layer+1) * IDR_BITS; | 517 | n = (p->layer+1) * IDR_BITS; |
518 | 518 | ||
519 | /* Mask off upper bits we don't use for the search. */ | 519 | /* Mask off upper bits we don't use for the search. */ |
520 | id &= MAX_ID_MASK; | 520 | id &= MAX_IDR_MASK; |
521 | 521 | ||
522 | if (id >= (1 << n)) | 522 | if (id >= (1 << n)) |
523 | return NULL; | 523 | return NULL; |
@@ -555,7 +555,7 @@ int idr_for_each(struct idr *idp, | |||
555 | { | 555 | { |
556 | int n, id, max, error = 0; | 556 | int n, id, max, error = 0; |
557 | struct idr_layer *p; | 557 | struct idr_layer *p; |
558 | struct idr_layer *pa[MAX_LEVEL]; | 558 | struct idr_layer *pa[MAX_IDR_LEVEL]; |
559 | struct idr_layer **paa = &pa[0]; | 559 | struct idr_layer **paa = &pa[0]; |
560 | 560 | ||
561 | n = idp->layers * IDR_BITS; | 561 | n = idp->layers * IDR_BITS; |
@@ -595,21 +595,23 @@ EXPORT_SYMBOL(idr_for_each); | |||
595 | * Returns pointer to registered object with id, which is next number to | 595 | * Returns pointer to registered object with id, which is next number to |
596 | * given id. After being looked up, *@nextidp will be updated for the next | 596 | * given id. After being looked up, *@nextidp will be updated for the next |
597 | * iteration. | 597 | * iteration. |
598 | * | ||
599 | * This function can be called under rcu_read_lock(), given that the leaf | ||
600 | * pointers lifetimes are correctly managed. | ||
598 | */ | 601 | */ |
599 | |||
600 | void *idr_get_next(struct idr *idp, int *nextidp) | 602 | void *idr_get_next(struct idr *idp, int *nextidp) |
601 | { | 603 | { |
602 | struct idr_layer *p, *pa[MAX_LEVEL]; | 604 | struct idr_layer *p, *pa[MAX_IDR_LEVEL]; |
603 | struct idr_layer **paa = &pa[0]; | 605 | struct idr_layer **paa = &pa[0]; |
604 | int id = *nextidp; | 606 | int id = *nextidp; |
605 | int n, max; | 607 | int n, max; |
606 | 608 | ||
607 | /* find first ent */ | 609 | /* find first ent */ |
608 | n = idp->layers * IDR_BITS; | ||
609 | max = 1 << n; | ||
610 | p = rcu_dereference_raw(idp->top); | 610 | p = rcu_dereference_raw(idp->top); |
611 | if (!p) | 611 | if (!p) |
612 | return NULL; | 612 | return NULL; |
613 | n = (p->layer + 1) * IDR_BITS; | ||
614 | max = 1 << n; | ||
613 | 615 | ||
614 | while (id < max) { | 616 | while (id < max) { |
615 | while (n > 0 && p) { | 617 | while (n > 0 && p) { |
@@ -657,7 +659,7 @@ void *idr_replace(struct idr *idp, void *ptr, int id) | |||
657 | 659 | ||
658 | n = (p->layer+1) * IDR_BITS; | 660 | n = (p->layer+1) * IDR_BITS; |
659 | 661 | ||
660 | id &= MAX_ID_MASK; | 662 | id &= MAX_IDR_MASK; |
661 | 663 | ||
662 | if (id >= (1 << n)) | 664 | if (id >= (1 << n)) |
663 | return ERR_PTR(-EINVAL); | 665 | return ERR_PTR(-EINVAL); |
@@ -767,8 +769,8 @@ EXPORT_SYMBOL(ida_pre_get); | |||
767 | * @starting_id: id to start search at | 769 | * @starting_id: id to start search at |
768 | * @p_id: pointer to the allocated handle | 770 | * @p_id: pointer to the allocated handle |
769 | * | 771 | * |
770 | * Allocate new ID above or equal to @ida. It should be called with | 772 | * Allocate new ID above or equal to @starting_id. It should be called |
771 | * any required locks. | 773 | * with any required locks. |
772 | * | 774 | * |
773 | * If memory is required, it will return %-EAGAIN, you should unlock | 775 | * If memory is required, it will return %-EAGAIN, you should unlock |
774 | * and go back to the ida_pre_get() call. If the ida is full, it will | 776 | * and go back to the ida_pre_get() call. If the ida is full, it will |
@@ -778,7 +780,7 @@ EXPORT_SYMBOL(ida_pre_get); | |||
778 | */ | 780 | */ |
779 | int ida_get_new_above(struct ida *ida, int starting_id, int *p_id) | 781 | int ida_get_new_above(struct ida *ida, int starting_id, int *p_id) |
780 | { | 782 | { |
781 | struct idr_layer *pa[MAX_LEVEL]; | 783 | struct idr_layer *pa[MAX_IDR_LEVEL]; |
782 | struct ida_bitmap *bitmap; | 784 | struct ida_bitmap *bitmap; |
783 | unsigned long flags; | 785 | unsigned long flags; |
784 | int idr_id = starting_id / IDA_BITMAP_BITS; | 786 | int idr_id = starting_id / IDA_BITMAP_BITS; |
@@ -791,7 +793,7 @@ int ida_get_new_above(struct ida *ida, int starting_id, int *p_id) | |||
791 | if (t < 0) | 793 | if (t < 0) |
792 | return _idr_rc_to_errno(t); | 794 | return _idr_rc_to_errno(t); |
793 | 795 | ||
794 | if (t * IDA_BITMAP_BITS >= MAX_ID_BIT) | 796 | if (t * IDA_BITMAP_BITS >= MAX_IDR_BIT) |
795 | return -ENOSPC; | 797 | return -ENOSPC; |
796 | 798 | ||
797 | if (t != idr_id) | 799 | if (t != idr_id) |
@@ -825,7 +827,7 @@ int ida_get_new_above(struct ida *ida, int starting_id, int *p_id) | |||
825 | } | 827 | } |
826 | 828 | ||
827 | id = idr_id * IDA_BITMAP_BITS + t; | 829 | id = idr_id * IDA_BITMAP_BITS + t; |
828 | if (id >= MAX_ID_BIT) | 830 | if (id >= MAX_IDR_BIT) |
829 | return -ENOSPC; | 831 | return -ENOSPC; |
830 | 832 | ||
831 | __set_bit(t, bitmap->bitmap); | 833 | __set_bit(t, bitmap->bitmap); |
@@ -860,7 +862,7 @@ EXPORT_SYMBOL(ida_get_new_above); | |||
860 | * and go back to the idr_pre_get() call. If the idr is full, it will | 862 | * and go back to the idr_pre_get() call. If the idr is full, it will |
861 | * return %-ENOSPC. | 863 | * return %-ENOSPC. |
862 | * | 864 | * |
863 | * @id returns a value in the range %0 ... %0x7fffffff. | 865 | * @p_id returns a value in the range %0 ... %0x7fffffff. |
864 | */ | 866 | */ |
865 | int ida_get_new(struct ida *ida, int *p_id) | 867 | int ida_get_new(struct ida *ida, int *p_id) |
866 | { | 868 | { |
@@ -944,6 +946,7 @@ int ida_simple_get(struct ida *ida, unsigned int start, unsigned int end, | |||
944 | { | 946 | { |
945 | int ret, id; | 947 | int ret, id; |
946 | unsigned int max; | 948 | unsigned int max; |
949 | unsigned long flags; | ||
947 | 950 | ||
948 | BUG_ON((int)start < 0); | 951 | BUG_ON((int)start < 0); |
949 | BUG_ON((int)end < 0); | 952 | BUG_ON((int)end < 0); |
@@ -959,7 +962,7 @@ again: | |||
959 | if (!ida_pre_get(ida, gfp_mask)) | 962 | if (!ida_pre_get(ida, gfp_mask)) |
960 | return -ENOMEM; | 963 | return -ENOMEM; |
961 | 964 | ||
962 | spin_lock(&simple_ida_lock); | 965 | spin_lock_irqsave(&simple_ida_lock, flags); |
963 | ret = ida_get_new_above(ida, start, &id); | 966 | ret = ida_get_new_above(ida, start, &id); |
964 | if (!ret) { | 967 | if (!ret) { |
965 | if (id > max) { | 968 | if (id > max) { |
@@ -969,7 +972,7 @@ again: | |||
969 | ret = id; | 972 | ret = id; |
970 | } | 973 | } |
971 | } | 974 | } |
972 | spin_unlock(&simple_ida_lock); | 975 | spin_unlock_irqrestore(&simple_ida_lock, flags); |
973 | 976 | ||
974 | if (unlikely(ret == -EAGAIN)) | 977 | if (unlikely(ret == -EAGAIN)) |
975 | goto again; | 978 | goto again; |
@@ -985,10 +988,12 @@ EXPORT_SYMBOL(ida_simple_get); | |||
985 | */ | 988 | */ |
986 | void ida_simple_remove(struct ida *ida, unsigned int id) | 989 | void ida_simple_remove(struct ida *ida, unsigned int id) |
987 | { | 990 | { |
991 | unsigned long flags; | ||
992 | |||
988 | BUG_ON((int)id < 0); | 993 | BUG_ON((int)id < 0); |
989 | spin_lock(&simple_ida_lock); | 994 | spin_lock_irqsave(&simple_ida_lock, flags); |
990 | ida_remove(ida, id); | 995 | ida_remove(ida, id); |
991 | spin_unlock(&simple_ida_lock); | 996 | spin_unlock_irqrestore(&simple_ida_lock, flags); |
992 | } | 997 | } |
993 | EXPORT_SYMBOL(ida_simple_remove); | 998 | EXPORT_SYMBOL(ida_simple_remove); |
994 | 999 | ||
diff --git a/lib/int_sqrt.c b/lib/int_sqrt.c index fd355a99327c..fc2eeb7cb2ea 100644 --- a/lib/int_sqrt.c +++ b/lib/int_sqrt.c | |||
@@ -1,6 +1,6 @@ | |||
1 | 1 | ||
2 | #include <linux/kernel.h> | 2 | #include <linux/kernel.h> |
3 | #include <linux/module.h> | 3 | #include <linux/export.h> |
4 | 4 | ||
5 | /** | 5 | /** |
6 | * int_sqrt - rough approximation to sqrt | 6 | * int_sqrt - rough approximation to sqrt |
diff --git a/lib/interval_tree.c b/lib/interval_tree.c new file mode 100644 index 000000000000..e6eb406f2d65 --- /dev/null +++ b/lib/interval_tree.c | |||
@@ -0,0 +1,10 @@ | |||
1 | #include <linux/init.h> | ||
2 | #include <linux/interval_tree.h> | ||
3 | #include <linux/interval_tree_generic.h> | ||
4 | |||
5 | #define START(node) ((node)->start) | ||
6 | #define LAST(node) ((node)->last) | ||
7 | |||
8 | INTERVAL_TREE_DEFINE(struct interval_tree_node, rb, | ||
9 | unsigned long, __subtree_last, | ||
10 | START, LAST,, interval_tree) | ||
diff --git a/lib/interval_tree_test_main.c b/lib/interval_tree_test_main.c new file mode 100644 index 000000000000..b25903987f7a --- /dev/null +++ b/lib/interval_tree_test_main.c | |||
@@ -0,0 +1,105 @@ | |||
1 | #include <linux/module.h> | ||
2 | #include <linux/interval_tree.h> | ||
3 | #include <linux/random.h> | ||
4 | #include <asm/timex.h> | ||
5 | |||
6 | #define NODES 100 | ||
7 | #define PERF_LOOPS 100000 | ||
8 | #define SEARCHES 100 | ||
9 | #define SEARCH_LOOPS 10000 | ||
10 | |||
11 | static struct rb_root root = RB_ROOT; | ||
12 | static struct interval_tree_node nodes[NODES]; | ||
13 | static u32 queries[SEARCHES]; | ||
14 | |||
15 | static struct rnd_state rnd; | ||
16 | |||
17 | static inline unsigned long | ||
18 | search(unsigned long query, struct rb_root *root) | ||
19 | { | ||
20 | struct interval_tree_node *node; | ||
21 | unsigned long results = 0; | ||
22 | |||
23 | for (node = interval_tree_iter_first(root, query, query); node; | ||
24 | node = interval_tree_iter_next(node, query, query)) | ||
25 | results++; | ||
26 | return results; | ||
27 | } | ||
28 | |||
29 | static void init(void) | ||
30 | { | ||
31 | int i; | ||
32 | for (i = 0; i < NODES; i++) { | ||
33 | u32 a = prandom32(&rnd), b = prandom32(&rnd); | ||
34 | if (a <= b) { | ||
35 | nodes[i].start = a; | ||
36 | nodes[i].last = b; | ||
37 | } else { | ||
38 | nodes[i].start = b; | ||
39 | nodes[i].last = a; | ||
40 | } | ||
41 | } | ||
42 | for (i = 0; i < SEARCHES; i++) | ||
43 | queries[i] = prandom32(&rnd); | ||
44 | } | ||
45 | |||
46 | static int interval_tree_test_init(void) | ||
47 | { | ||
48 | int i, j; | ||
49 | unsigned long results; | ||
50 | cycles_t time1, time2, time; | ||
51 | |||
52 | printk(KERN_ALERT "interval tree insert/remove"); | ||
53 | |||
54 | prandom32_seed(&rnd, 3141592653589793238ULL); | ||
55 | init(); | ||
56 | |||
57 | time1 = get_cycles(); | ||
58 | |||
59 | for (i = 0; i < PERF_LOOPS; i++) { | ||
60 | for (j = 0; j < NODES; j++) | ||
61 | interval_tree_insert(nodes + j, &root); | ||
62 | for (j = 0; j < NODES; j++) | ||
63 | interval_tree_remove(nodes + j, &root); | ||
64 | } | ||
65 | |||
66 | time2 = get_cycles(); | ||
67 | time = time2 - time1; | ||
68 | |||
69 | time = div_u64(time, PERF_LOOPS); | ||
70 | printk(" -> %llu cycles\n", (unsigned long long)time); | ||
71 | |||
72 | printk(KERN_ALERT "interval tree search"); | ||
73 | |||
74 | for (j = 0; j < NODES; j++) | ||
75 | interval_tree_insert(nodes + j, &root); | ||
76 | |||
77 | time1 = get_cycles(); | ||
78 | |||
79 | results = 0; | ||
80 | for (i = 0; i < SEARCH_LOOPS; i++) | ||
81 | for (j = 0; j < SEARCHES; j++) | ||
82 | results += search(queries[j], &root); | ||
83 | |||
84 | time2 = get_cycles(); | ||
85 | time = time2 - time1; | ||
86 | |||
87 | time = div_u64(time, SEARCH_LOOPS); | ||
88 | results = div_u64(results, SEARCH_LOOPS); | ||
89 | printk(" -> %llu cycles (%lu results)\n", | ||
90 | (unsigned long long)time, results); | ||
91 | |||
92 | return -EAGAIN; /* Fail will directly unload the module */ | ||
93 | } | ||
94 | |||
95 | static void interval_tree_test_exit(void) | ||
96 | { | ||
97 | printk(KERN_ALERT "test exit\n"); | ||
98 | } | ||
99 | |||
100 | module_init(interval_tree_test_init) | ||
101 | module_exit(interval_tree_test_exit) | ||
102 | |||
103 | MODULE_LICENSE("GPL"); | ||
104 | MODULE_AUTHOR("Michel Lespinasse"); | ||
105 | MODULE_DESCRIPTION("Interval Tree test"); | ||
diff --git a/lib/iomap.c b/lib/iomap.c index 5dbcb4b2d864..2c08f36862eb 100644 --- a/lib/iomap.c +++ b/lib/iomap.c | |||
@@ -6,7 +6,7 @@ | |||
6 | #include <linux/pci.h> | 6 | #include <linux/pci.h> |
7 | #include <linux/io.h> | 7 | #include <linux/io.h> |
8 | 8 | ||
9 | #include <linux/module.h> | 9 | #include <linux/export.h> |
10 | 10 | ||
11 | /* | 11 | /* |
12 | * Read/write from/to an (offsettable) iomem cookie. It might be a PIO | 12 | * Read/write from/to an (offsettable) iomem cookie. It might be a PIO |
@@ -242,45 +242,11 @@ EXPORT_SYMBOL(ioport_unmap); | |||
242 | #endif /* CONFIG_HAS_IOPORT */ | 242 | #endif /* CONFIG_HAS_IOPORT */ |
243 | 243 | ||
244 | #ifdef CONFIG_PCI | 244 | #ifdef CONFIG_PCI |
245 | /** | 245 | /* Hide the details if this is a MMIO or PIO address space and just do what |
246 | * pci_iomap - create a virtual mapping cookie for a PCI BAR | 246 | * you expect in the correct way. */ |
247 | * @dev: PCI device that owns the BAR | ||
248 | * @bar: BAR number | ||
249 | * @maxlen: length of the memory to map | ||
250 | * | ||
251 | * Using this function you will get a __iomem address to your device BAR. | ||
252 | * You can access it using ioread*() and iowrite*(). These functions hide | ||
253 | * the details if this is a MMIO or PIO address space and will just do what | ||
254 | * you expect from them in the correct way. | ||
255 | * | ||
256 | * @maxlen specifies the maximum length to map. If you want to get access to | ||
257 | * the complete BAR without checking for its length first, pass %0 here. | ||
258 | * */ | ||
259 | void __iomem *pci_iomap(struct pci_dev *dev, int bar, unsigned long maxlen) | ||
260 | { | ||
261 | resource_size_t start = pci_resource_start(dev, bar); | ||
262 | resource_size_t len = pci_resource_len(dev, bar); | ||
263 | unsigned long flags = pci_resource_flags(dev, bar); | ||
264 | |||
265 | if (!len || !start) | ||
266 | return NULL; | ||
267 | if (maxlen && len > maxlen) | ||
268 | len = maxlen; | ||
269 | if (flags & IORESOURCE_IO) | ||
270 | return ioport_map(start, len); | ||
271 | if (flags & IORESOURCE_MEM) { | ||
272 | if (flags & IORESOURCE_CACHEABLE) | ||
273 | return ioremap(start, len); | ||
274 | return ioremap_nocache(start, len); | ||
275 | } | ||
276 | /* What? */ | ||
277 | return NULL; | ||
278 | } | ||
279 | |||
280 | void pci_iounmap(struct pci_dev *dev, void __iomem * addr) | 247 | void pci_iounmap(struct pci_dev *dev, void __iomem * addr) |
281 | { | 248 | { |
282 | IO_COND(addr, /* nothing */, iounmap(addr)); | 249 | IO_COND(addr, /* nothing */, iounmap(addr)); |
283 | } | 250 | } |
284 | EXPORT_SYMBOL(pci_iomap); | ||
285 | EXPORT_SYMBOL(pci_iounmap); | 251 | EXPORT_SYMBOL(pci_iounmap); |
286 | #endif /* CONFIG_PCI */ | 252 | #endif /* CONFIG_PCI */ |
diff --git a/lib/iomap_copy.c b/lib/iomap_copy.c index 864fc5ea398c..4527e751b5e0 100644 --- a/lib/iomap_copy.c +++ b/lib/iomap_copy.c | |||
@@ -15,7 +15,7 @@ | |||
15 | * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA. | 15 | * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA. |
16 | */ | 16 | */ |
17 | 17 | ||
18 | #include <linux/module.h> | 18 | #include <linux/export.h> |
19 | #include <linux/io.h> | 19 | #include <linux/io.h> |
20 | 20 | ||
21 | /** | 21 | /** |
diff --git a/lib/iommu-helper.c b/lib/iommu-helper.c index da053313ee5c..c27e269210c4 100644 --- a/lib/iommu-helper.c +++ b/lib/iommu-helper.c | |||
@@ -2,8 +2,9 @@ | |||
2 | * IOMMU helper functions for the free area management | 2 | * IOMMU helper functions for the free area management |
3 | */ | 3 | */ |
4 | 4 | ||
5 | #include <linux/module.h> | 5 | #include <linux/export.h> |
6 | #include <linux/bitmap.h> | 6 | #include <linux/bitmap.h> |
7 | #include <linux/bug.h> | ||
7 | 8 | ||
8 | int iommu_is_span_boundary(unsigned int index, unsigned int nr, | 9 | int iommu_is_span_boundary(unsigned int index, unsigned int nr, |
9 | unsigned long shift, | 10 | unsigned long shift, |
diff --git a/lib/ioremap.c b/lib/ioremap.c index da4e2ad74b68..0c9216c48762 100644 --- a/lib/ioremap.c +++ b/lib/ioremap.c | |||
@@ -9,7 +9,7 @@ | |||
9 | #include <linux/mm.h> | 9 | #include <linux/mm.h> |
10 | #include <linux/sched.h> | 10 | #include <linux/sched.h> |
11 | #include <linux/io.h> | 11 | #include <linux/io.h> |
12 | #include <linux/module.h> | 12 | #include <linux/export.h> |
13 | #include <asm/cacheflush.h> | 13 | #include <asm/cacheflush.h> |
14 | #include <asm/pgtable.h> | 14 | #include <asm/pgtable.h> |
15 | 15 | ||
diff --git a/lib/irq_regs.c b/lib/irq_regs.c index 753880a5440c..9c0a1d70fbe8 100644 --- a/lib/irq_regs.c +++ b/lib/irq_regs.c | |||
@@ -8,7 +8,8 @@ | |||
8 | * as published by the Free Software Foundation; either version | 8 | * as published by the Free Software Foundation; either version |
9 | * 2 of the License, or (at your option) any later version. | 9 | * 2 of the License, or (at your option) any later version. |
10 | */ | 10 | */ |
11 | #include <linux/module.h> | 11 | #include <linux/export.h> |
12 | #include <linux/percpu.h> | ||
12 | #include <asm/irq_regs.h> | 13 | #include <asm/irq_regs.h> |
13 | 14 | ||
14 | #ifndef ARCH_HAS_OWN_IRQ_REGS | 15 | #ifndef ARCH_HAS_OWN_IRQ_REGS |
diff --git a/lib/jedec_ddr_data.c b/lib/jedec_ddr_data.c new file mode 100644 index 000000000000..6d2cbf1d567f --- /dev/null +++ b/lib/jedec_ddr_data.c | |||
@@ -0,0 +1,135 @@ | |||
1 | /* | ||
2 | * DDR addressing details and AC timing parameters from JEDEC specs | ||
3 | * | ||
4 | * Copyright (C) 2012 Texas Instruments, Inc. | ||
5 | * | ||
6 | * Aneesh V <aneesh@ti.com> | ||
7 | * | ||
8 | * This program is free software; you can redistribute it and/or modify | ||
9 | * it under the terms of the GNU General Public License version 2 as | ||
10 | * published by the Free Software Foundation. | ||
11 | */ | ||
12 | |||
13 | #include <memory/jedec_ddr.h> | ||
14 | #include <linux/module.h> | ||
15 | |||
16 | /* LPDDR2 addressing details from JESD209-2 section 2.4 */ | ||
17 | const struct lpddr2_addressing | ||
18 | lpddr2_jedec_addressing_table[NUM_DDR_ADDR_TABLE_ENTRIES] = { | ||
19 | {B4, T_REFI_15_6, T_RFC_90}, /* 64M */ | ||
20 | {B4, T_REFI_15_6, T_RFC_90}, /* 128M */ | ||
21 | {B4, T_REFI_7_8, T_RFC_90}, /* 256M */ | ||
22 | {B4, T_REFI_7_8, T_RFC_90}, /* 512M */ | ||
23 | {B8, T_REFI_7_8, T_RFC_130}, /* 1GS4 */ | ||
24 | {B8, T_REFI_3_9, T_RFC_130}, /* 2GS4 */ | ||
25 | {B8, T_REFI_3_9, T_RFC_130}, /* 4G */ | ||
26 | {B8, T_REFI_3_9, T_RFC_210}, /* 8G */ | ||
27 | {B4, T_REFI_7_8, T_RFC_130}, /* 1GS2 */ | ||
28 | {B4, T_REFI_3_9, T_RFC_130}, /* 2GS2 */ | ||
29 | }; | ||
30 | EXPORT_SYMBOL_GPL(lpddr2_jedec_addressing_table); | ||
31 | |||
32 | /* LPDDR2 AC timing parameters from JESD209-2 section 12 */ | ||
33 | const struct lpddr2_timings | ||
34 | lpddr2_jedec_timings[NUM_DDR_TIMING_TABLE_ENTRIES] = { | ||
35 | /* Speed bin 400(200 MHz) */ | ||
36 | [0] = { | ||
37 | .max_freq = 200000000, | ||
38 | .min_freq = 10000000, | ||
39 | .tRPab = 21000, | ||
40 | .tRCD = 18000, | ||
41 | .tWR = 15000, | ||
42 | .tRAS_min = 42000, | ||
43 | .tRRD = 10000, | ||
44 | .tWTR = 10000, | ||
45 | .tXP = 7500, | ||
46 | .tRTP = 7500, | ||
47 | .tCKESR = 15000, | ||
48 | .tDQSCK_max = 5500, | ||
49 | .tFAW = 50000, | ||
50 | .tZQCS = 90000, | ||
51 | .tZQCL = 360000, | ||
52 | .tZQinit = 1000000, | ||
53 | .tRAS_max_ns = 70000, | ||
54 | .tDQSCK_max_derated = 6000, | ||
55 | }, | ||
56 | /* Speed bin 533(266 MHz) */ | ||
57 | [1] = { | ||
58 | .max_freq = 266666666, | ||
59 | .min_freq = 10000000, | ||
60 | .tRPab = 21000, | ||
61 | .tRCD = 18000, | ||
62 | .tWR = 15000, | ||
63 | .tRAS_min = 42000, | ||
64 | .tRRD = 10000, | ||
65 | .tWTR = 7500, | ||
66 | .tXP = 7500, | ||
67 | .tRTP = 7500, | ||
68 | .tCKESR = 15000, | ||
69 | .tDQSCK_max = 5500, | ||
70 | .tFAW = 50000, | ||
71 | .tZQCS = 90000, | ||
72 | .tZQCL = 360000, | ||
73 | .tZQinit = 1000000, | ||
74 | .tRAS_max_ns = 70000, | ||
75 | .tDQSCK_max_derated = 6000, | ||
76 | }, | ||
77 | /* Speed bin 800(400 MHz) */ | ||
78 | [2] = { | ||
79 | .max_freq = 400000000, | ||
80 | .min_freq = 10000000, | ||
81 | .tRPab = 21000, | ||
82 | .tRCD = 18000, | ||
83 | .tWR = 15000, | ||
84 | .tRAS_min = 42000, | ||
85 | .tRRD = 10000, | ||
86 | .tWTR = 7500, | ||
87 | .tXP = 7500, | ||
88 | .tRTP = 7500, | ||
89 | .tCKESR = 15000, | ||
90 | .tDQSCK_max = 5500, | ||
91 | .tFAW = 50000, | ||
92 | .tZQCS = 90000, | ||
93 | .tZQCL = 360000, | ||
94 | .tZQinit = 1000000, | ||
95 | .tRAS_max_ns = 70000, | ||
96 | .tDQSCK_max_derated = 6000, | ||
97 | }, | ||
98 | /* Speed bin 1066(533 MHz) */ | ||
99 | [3] = { | ||
100 | .max_freq = 533333333, | ||
101 | .min_freq = 10000000, | ||
102 | .tRPab = 21000, | ||
103 | .tRCD = 18000, | ||
104 | .tWR = 15000, | ||
105 | .tRAS_min = 42000, | ||
106 | .tRRD = 10000, | ||
107 | .tWTR = 7500, | ||
108 | .tXP = 7500, | ||
109 | .tRTP = 7500, | ||
110 | .tCKESR = 15000, | ||
111 | .tDQSCK_max = 5500, | ||
112 | .tFAW = 50000, | ||
113 | .tZQCS = 90000, | ||
114 | .tZQCL = 360000, | ||
115 | .tZQinit = 1000000, | ||
116 | .tRAS_max_ns = 70000, | ||
117 | .tDQSCK_max_derated = 5620, | ||
118 | }, | ||
119 | }; | ||
120 | EXPORT_SYMBOL_GPL(lpddr2_jedec_timings); | ||
121 | |||
122 | const struct lpddr2_min_tck lpddr2_jedec_min_tck = { | ||
123 | .tRPab = 3, | ||
124 | .tRCD = 3, | ||
125 | .tWR = 3, | ||
126 | .tRASmin = 3, | ||
127 | .tRRD = 2, | ||
128 | .tWTR = 2, | ||
129 | .tXP = 2, | ||
130 | .tRTP = 2, | ||
131 | .tCKE = 3, | ||
132 | .tCKESR = 3, | ||
133 | .tFAW = 8 | ||
134 | }; | ||
135 | EXPORT_SYMBOL_GPL(lpddr2_jedec_min_tck); | ||
diff --git a/lib/kasprintf.c b/lib/kasprintf.c index 9c4233b23783..32f12150fc4f 100644 --- a/lib/kasprintf.c +++ b/lib/kasprintf.c | |||
@@ -5,7 +5,7 @@ | |||
5 | */ | 5 | */ |
6 | 6 | ||
7 | #include <stdarg.h> | 7 | #include <stdarg.h> |
8 | #include <linux/module.h> | 8 | #include <linux/export.h> |
9 | #include <linux/slab.h> | 9 | #include <linux/slab.h> |
10 | #include <linux/types.h> | 10 | #include <linux/types.h> |
11 | #include <linux/string.h> | 11 | #include <linux/string.h> |
@@ -21,7 +21,7 @@ char *kvasprintf(gfp_t gfp, const char *fmt, va_list ap) | |||
21 | len = vsnprintf(NULL, 0, fmt, aq); | 21 | len = vsnprintf(NULL, 0, fmt, aq); |
22 | va_end(aq); | 22 | va_end(aq); |
23 | 23 | ||
24 | p = kmalloc(len+1, gfp); | 24 | p = kmalloc_track_caller(len+1, gfp); |
25 | if (!p) | 25 | if (!p) |
26 | return NULL; | 26 | return NULL; |
27 | 27 | ||
diff --git a/lib/klist.c b/lib/klist.c index 573d6068a42e..0874e41609a6 100644 --- a/lib/klist.c +++ b/lib/klist.c | |||
@@ -35,7 +35,7 @@ | |||
35 | */ | 35 | */ |
36 | 36 | ||
37 | #include <linux/klist.h> | 37 | #include <linux/klist.h> |
38 | #include <linux/module.h> | 38 | #include <linux/export.h> |
39 | #include <linux/sched.h> | 39 | #include <linux/sched.h> |
40 | 40 | ||
41 | /* | 41 | /* |
diff --git a/lib/kobject.c b/lib/kobject.c index 640bd98a4c8a..e07ee1fcd6f1 100644 --- a/lib/kobject.c +++ b/lib/kobject.c | |||
@@ -14,7 +14,7 @@ | |||
14 | 14 | ||
15 | #include <linux/kobject.h> | 15 | #include <linux/kobject.h> |
16 | #include <linux/string.h> | 16 | #include <linux/string.h> |
17 | #include <linux/module.h> | 17 | #include <linux/export.h> |
18 | #include <linux/stat.h> | 18 | #include <linux/stat.h> |
19 | #include <linux/slab.h> | 19 | #include <linux/slab.h> |
20 | 20 | ||
@@ -47,13 +47,11 @@ static int populate_dir(struct kobject *kobj) | |||
47 | static int create_dir(struct kobject *kobj) | 47 | static int create_dir(struct kobject *kobj) |
48 | { | 48 | { |
49 | int error = 0; | 49 | int error = 0; |
50 | if (kobject_name(kobj)) { | 50 | error = sysfs_create_dir(kobj); |
51 | error = sysfs_create_dir(kobj); | 51 | if (!error) { |
52 | if (!error) { | 52 | error = populate_dir(kobj); |
53 | error = populate_dir(kobj); | 53 | if (error) |
54 | if (error) | 54 | sysfs_remove_dir(kobj); |
55 | sysfs_remove_dir(kobj); | ||
56 | } | ||
57 | } | 55 | } |
58 | return error; | 56 | return error; |
59 | } | 57 | } |
@@ -192,14 +190,14 @@ static int kobject_add_internal(struct kobject *kobj) | |||
192 | 190 | ||
193 | /* be noisy on error issues */ | 191 | /* be noisy on error issues */ |
194 | if (error == -EEXIST) | 192 | if (error == -EEXIST) |
195 | printk(KERN_ERR "%s failed for %s with " | 193 | WARN(1, "%s failed for %s with " |
196 | "-EEXIST, don't try to register things with " | 194 | "-EEXIST, don't try to register things with " |
197 | "the same name in the same directory.\n", | 195 | "the same name in the same directory.\n", |
198 | __func__, kobject_name(kobj)); | 196 | __func__, kobject_name(kobj)); |
199 | else | 197 | else |
200 | printk(KERN_ERR "%s failed for %s (%d)\n", | 198 | WARN(1, "%s failed for %s (error: %d parent: %s)\n", |
201 | __func__, kobject_name(kobj), error); | 199 | __func__, kobject_name(kobj), error, |
202 | dump_stack(); | 200 | parent ? kobject_name(parent) : "'none'"); |
203 | } else | 201 | } else |
204 | kobj->state_in_sysfs = 1; | 202 | kobj->state_in_sysfs = 1; |
205 | 203 | ||
@@ -634,7 +632,7 @@ struct kobject *kobject_create(void) | |||
634 | /** | 632 | /** |
635 | * kobject_create_and_add - create a struct kobject dynamically and register it with sysfs | 633 | * kobject_create_and_add - create a struct kobject dynamically and register it with sysfs |
636 | * | 634 | * |
637 | * @name: the name for the kset | 635 | * @name: the name for the kobject |
638 | * @parent: the parent kobject of this kobject, if any. | 636 | * @parent: the parent kobject of this kobject, if any. |
639 | * | 637 | * |
640 | * This function creates a kobject structure dynamically and registers it | 638 | * This function creates a kobject structure dynamically and registers it |
@@ -746,43 +744,11 @@ void kset_unregister(struct kset *k) | |||
746 | */ | 744 | */ |
747 | struct kobject *kset_find_obj(struct kset *kset, const char *name) | 745 | struct kobject *kset_find_obj(struct kset *kset, const char *name) |
748 | { | 746 | { |
749 | return kset_find_obj_hinted(kset, name, NULL); | ||
750 | } | ||
751 | |||
752 | /** | ||
753 | * kset_find_obj_hinted - search for object in kset given a predecessor hint. | ||
754 | * @kset: kset we're looking in. | ||
755 | * @name: object's name. | ||
756 | * @hint: hint to possible object's predecessor. | ||
757 | * | ||
758 | * Check the hint's next object and if it is a match return it directly, | ||
759 | * otherwise, fall back to the behavior of kset_find_obj(). Either way | ||
760 | * a reference for the returned object is held and the reference on the | ||
761 | * hinted object is released. | ||
762 | */ | ||
763 | struct kobject *kset_find_obj_hinted(struct kset *kset, const char *name, | ||
764 | struct kobject *hint) | ||
765 | { | ||
766 | struct kobject *k; | 747 | struct kobject *k; |
767 | struct kobject *ret = NULL; | 748 | struct kobject *ret = NULL; |
768 | 749 | ||
769 | spin_lock(&kset->list_lock); | 750 | spin_lock(&kset->list_lock); |
770 | 751 | ||
771 | if (!hint) | ||
772 | goto slow_search; | ||
773 | |||
774 | /* end of list detection */ | ||
775 | if (hint->entry.next == kset->list.next) | ||
776 | goto slow_search; | ||
777 | |||
778 | k = container_of(hint->entry.next, struct kobject, entry); | ||
779 | if (!kobject_name(k) || strcmp(kobject_name(k), name)) | ||
780 | goto slow_search; | ||
781 | |||
782 | ret = kobject_get(k); | ||
783 | goto unlock_exit; | ||
784 | |||
785 | slow_search: | ||
786 | list_for_each_entry(k, &kset->list, entry) { | 752 | list_for_each_entry(k, &kset->list, entry) { |
787 | if (kobject_name(k) && !strcmp(kobject_name(k), name)) { | 753 | if (kobject_name(k) && !strcmp(kobject_name(k), name)) { |
788 | ret = kobject_get(k); | 754 | ret = kobject_get(k); |
@@ -790,12 +756,7 @@ slow_search: | |||
790 | } | 756 | } |
791 | } | 757 | } |
792 | 758 | ||
793 | unlock_exit: | ||
794 | spin_unlock(&kset->list_lock); | 759 | spin_unlock(&kset->list_lock); |
795 | |||
796 | if (hint) | ||
797 | kobject_put(hint); | ||
798 | |||
799 | return ret; | 760 | return ret; |
800 | } | 761 | } |
801 | 762 | ||
diff --git a/lib/kobject_uevent.c b/lib/kobject_uevent.c index 70af0a7f97c0..52e5abbc41db 100644 --- a/lib/kobject_uevent.c +++ b/lib/kobject_uevent.c | |||
@@ -17,7 +17,8 @@ | |||
17 | #include <linux/spinlock.h> | 17 | #include <linux/spinlock.h> |
18 | #include <linux/string.h> | 18 | #include <linux/string.h> |
19 | #include <linux/kobject.h> | 19 | #include <linux/kobject.h> |
20 | #include <linux/module.h> | 20 | #include <linux/export.h> |
21 | #include <linux/kmod.h> | ||
21 | #include <linux/slab.h> | 22 | #include <linux/slab.h> |
22 | #include <linux/user_namespace.h> | 23 | #include <linux/user_namespace.h> |
23 | #include <linux/socket.h> | 24 | #include <linux/socket.h> |
@@ -29,16 +30,17 @@ | |||
29 | 30 | ||
30 | u64 uevent_seqnum; | 31 | u64 uevent_seqnum; |
31 | char uevent_helper[UEVENT_HELPER_PATH_LEN] = CONFIG_UEVENT_HELPER_PATH; | 32 | char uevent_helper[UEVENT_HELPER_PATH_LEN] = CONFIG_UEVENT_HELPER_PATH; |
32 | static DEFINE_SPINLOCK(sequence_lock); | ||
33 | #ifdef CONFIG_NET | 33 | #ifdef CONFIG_NET |
34 | struct uevent_sock { | 34 | struct uevent_sock { |
35 | struct list_head list; | 35 | struct list_head list; |
36 | struct sock *sk; | 36 | struct sock *sk; |
37 | }; | 37 | }; |
38 | static LIST_HEAD(uevent_sock_list); | 38 | static LIST_HEAD(uevent_sock_list); |
39 | static DEFINE_MUTEX(uevent_sock_mutex); | ||
40 | #endif | 39 | #endif |
41 | 40 | ||
41 | /* This lock protects uevent_seqnum and uevent_sock_list */ | ||
42 | static DEFINE_MUTEX(uevent_sock_mutex); | ||
43 | |||
42 | /* the strings here must match the enum in include/linux/kobject.h */ | 44 | /* the strings here must match the enum in include/linux/kobject.h */ |
43 | static const char *kobject_actions[] = { | 45 | static const char *kobject_actions[] = { |
44 | [KOBJ_ADD] = "add", | 46 | [KOBJ_ADD] = "add", |
@@ -136,7 +138,6 @@ int kobject_uevent_env(struct kobject *kobj, enum kobject_action action, | |||
136 | struct kobject *top_kobj; | 138 | struct kobject *top_kobj; |
137 | struct kset *kset; | 139 | struct kset *kset; |
138 | const struct kset_uevent_ops *uevent_ops; | 140 | const struct kset_uevent_ops *uevent_ops; |
139 | u64 seq; | ||
140 | int i = 0; | 141 | int i = 0; |
141 | int retval = 0; | 142 | int retval = 0; |
142 | #ifdef CONFIG_NET | 143 | #ifdef CONFIG_NET |
@@ -243,22 +244,24 @@ int kobject_uevent_env(struct kobject *kobj, enum kobject_action action, | |||
243 | else if (action == KOBJ_REMOVE) | 244 | else if (action == KOBJ_REMOVE) |
244 | kobj->state_remove_uevent_sent = 1; | 245 | kobj->state_remove_uevent_sent = 1; |
245 | 246 | ||
247 | mutex_lock(&uevent_sock_mutex); | ||
246 | /* we will send an event, so request a new sequence number */ | 248 | /* we will send an event, so request a new sequence number */ |
247 | spin_lock(&sequence_lock); | 249 | retval = add_uevent_var(env, "SEQNUM=%llu", (unsigned long long)++uevent_seqnum); |
248 | seq = ++uevent_seqnum; | 250 | if (retval) { |
249 | spin_unlock(&sequence_lock); | 251 | mutex_unlock(&uevent_sock_mutex); |
250 | retval = add_uevent_var(env, "SEQNUM=%llu", (unsigned long long)seq); | ||
251 | if (retval) | ||
252 | goto exit; | 252 | goto exit; |
253 | } | ||
253 | 254 | ||
254 | #if defined(CONFIG_NET) | 255 | #if defined(CONFIG_NET) |
255 | /* send netlink message */ | 256 | /* send netlink message */ |
256 | mutex_lock(&uevent_sock_mutex); | ||
257 | list_for_each_entry(ue_sk, &uevent_sock_list, list) { | 257 | list_for_each_entry(ue_sk, &uevent_sock_list, list) { |
258 | struct sock *uevent_sock = ue_sk->sk; | 258 | struct sock *uevent_sock = ue_sk->sk; |
259 | struct sk_buff *skb; | 259 | struct sk_buff *skb; |
260 | size_t len; | 260 | size_t len; |
261 | 261 | ||
262 | if (!netlink_has_listeners(uevent_sock, 1)) | ||
263 | continue; | ||
264 | |||
262 | /* allocate message with the maximum possible size */ | 265 | /* allocate message with the maximum possible size */ |
263 | len = strlen(action_string) + strlen(devpath) + 2; | 266 | len = strlen(action_string) + strlen(devpath) + 2; |
264 | skb = alloc_skb(len + env->buflen, GFP_KERNEL); | 267 | skb = alloc_skb(len + env->buflen, GFP_KERNEL); |
@@ -282,13 +285,13 @@ int kobject_uevent_env(struct kobject *kobj, enum kobject_action action, | |||
282 | kobj_bcast_filter, | 285 | kobj_bcast_filter, |
283 | kobj); | 286 | kobj); |
284 | /* ENOBUFS should be handled in userspace */ | 287 | /* ENOBUFS should be handled in userspace */ |
285 | if (retval == -ENOBUFS) | 288 | if (retval == -ENOBUFS || retval == -ESRCH) |
286 | retval = 0; | 289 | retval = 0; |
287 | } else | 290 | } else |
288 | retval = -ENOMEM; | 291 | retval = -ENOMEM; |
289 | } | 292 | } |
290 | mutex_unlock(&uevent_sock_mutex); | ||
291 | #endif | 293 | #endif |
294 | mutex_unlock(&uevent_sock_mutex); | ||
292 | 295 | ||
293 | /* call uevent_helper, usually only enabled during early boot */ | 296 | /* call uevent_helper, usually only enabled during early boot */ |
294 | if (uevent_helper[0] && !kobj_usermode_filter(kobj)) { | 297 | if (uevent_helper[0] && !kobj_usermode_filter(kobj)) { |
@@ -370,13 +373,16 @@ EXPORT_SYMBOL_GPL(add_uevent_var); | |||
370 | static int uevent_net_init(struct net *net) | 373 | static int uevent_net_init(struct net *net) |
371 | { | 374 | { |
372 | struct uevent_sock *ue_sk; | 375 | struct uevent_sock *ue_sk; |
376 | struct netlink_kernel_cfg cfg = { | ||
377 | .groups = 1, | ||
378 | .flags = NL_CFG_F_NONROOT_RECV, | ||
379 | }; | ||
373 | 380 | ||
374 | ue_sk = kzalloc(sizeof(*ue_sk), GFP_KERNEL); | 381 | ue_sk = kzalloc(sizeof(*ue_sk), GFP_KERNEL); |
375 | if (!ue_sk) | 382 | if (!ue_sk) |
376 | return -ENOMEM; | 383 | return -ENOMEM; |
377 | 384 | ||
378 | ue_sk->sk = netlink_kernel_create(net, NETLINK_KOBJECT_UEVENT, | 385 | ue_sk->sk = netlink_kernel_create(net, NETLINK_KOBJECT_UEVENT, &cfg); |
379 | 1, NULL, NULL, THIS_MODULE); | ||
380 | if (!ue_sk->sk) { | 386 | if (!ue_sk->sk) { |
381 | printk(KERN_ERR | 387 | printk(KERN_ERR |
382 | "kobject_uevent: unable to create netlink socket!\n"); | 388 | "kobject_uevent: unable to create netlink socket!\n"); |
@@ -416,7 +422,6 @@ static struct pernet_operations uevent_net_ops = { | |||
416 | 422 | ||
417 | static int __init kobject_uevent_init(void) | 423 | static int __init kobject_uevent_init(void) |
418 | { | 424 | { |
419 | netlink_set_nonroot(NETLINK_KOBJECT_UEVENT, NL_NONROOT_RECV); | ||
420 | return register_pernet_subsys(&uevent_net_ops); | 425 | return register_pernet_subsys(&uevent_net_ops); |
421 | } | 426 | } |
422 | 427 | ||
diff --git a/lib/kref.c b/lib/kref.c deleted file mode 100644 index 3efb882b11db..000000000000 --- a/lib/kref.c +++ /dev/null | |||
@@ -1,97 +0,0 @@ | |||
1 | /* | ||
2 | * kref.c - library routines for handling generic reference counted objects | ||
3 | * | ||
4 | * Copyright (C) 2004 Greg Kroah-Hartman <greg@kroah.com> | ||
5 | * Copyright (C) 2004 IBM Corp. | ||
6 | * | ||
7 | * based on lib/kobject.c which was: | ||
8 | * Copyright (C) 2002-2003 Patrick Mochel <mochel@osdl.org> | ||
9 | * | ||
10 | * This file is released under the GPLv2. | ||
11 | * | ||
12 | */ | ||
13 | |||
14 | #include <linux/kref.h> | ||
15 | #include <linux/module.h> | ||
16 | #include <linux/slab.h> | ||
17 | |||
18 | /** | ||
19 | * kref_init - initialize object. | ||
20 | * @kref: object in question. | ||
21 | */ | ||
22 | void kref_init(struct kref *kref) | ||
23 | { | ||
24 | atomic_set(&kref->refcount, 1); | ||
25 | smp_mb(); | ||
26 | } | ||
27 | |||
28 | /** | ||
29 | * kref_get - increment refcount for object. | ||
30 | * @kref: object. | ||
31 | */ | ||
32 | void kref_get(struct kref *kref) | ||
33 | { | ||
34 | WARN_ON(!atomic_read(&kref->refcount)); | ||
35 | atomic_inc(&kref->refcount); | ||
36 | smp_mb__after_atomic_inc(); | ||
37 | } | ||
38 | |||
39 | /** | ||
40 | * kref_put - decrement refcount for object. | ||
41 | * @kref: object. | ||
42 | * @release: pointer to the function that will clean up the object when the | ||
43 | * last reference to the object is released. | ||
44 | * This pointer is required, and it is not acceptable to pass kfree | ||
45 | * in as this function. | ||
46 | * | ||
47 | * Decrement the refcount, and if 0, call release(). | ||
48 | * Return 1 if the object was removed, otherwise return 0. Beware, if this | ||
49 | * function returns 0, you still can not count on the kref from remaining in | ||
50 | * memory. Only use the return value if you want to see if the kref is now | ||
51 | * gone, not present. | ||
52 | */ | ||
53 | int kref_put(struct kref *kref, void (*release)(struct kref *kref)) | ||
54 | { | ||
55 | WARN_ON(release == NULL); | ||
56 | WARN_ON(release == (void (*)(struct kref *))kfree); | ||
57 | |||
58 | if (atomic_dec_and_test(&kref->refcount)) { | ||
59 | release(kref); | ||
60 | return 1; | ||
61 | } | ||
62 | return 0; | ||
63 | } | ||
64 | |||
65 | |||
66 | /** | ||
67 | * kref_sub - subtract a number of refcounts for object. | ||
68 | * @kref: object. | ||
69 | * @count: Number of recounts to subtract. | ||
70 | * @release: pointer to the function that will clean up the object when the | ||
71 | * last reference to the object is released. | ||
72 | * This pointer is required, and it is not acceptable to pass kfree | ||
73 | * in as this function. | ||
74 | * | ||
75 | * Subtract @count from the refcount, and if 0, call release(). | ||
76 | * Return 1 if the object was removed, otherwise return 0. Beware, if this | ||
77 | * function returns 0, you still can not count on the kref from remaining in | ||
78 | * memory. Only use the return value if you want to see if the kref is now | ||
79 | * gone, not present. | ||
80 | */ | ||
81 | int kref_sub(struct kref *kref, unsigned int count, | ||
82 | void (*release)(struct kref *kref)) | ||
83 | { | ||
84 | WARN_ON(release == NULL); | ||
85 | WARN_ON(release == (void (*)(struct kref *))kfree); | ||
86 | |||
87 | if (atomic_sub_and_test((int) count, &kref->refcount)) { | ||
88 | release(kref); | ||
89 | return 1; | ||
90 | } | ||
91 | return 0; | ||
92 | } | ||
93 | |||
94 | EXPORT_SYMBOL(kref_init); | ||
95 | EXPORT_SYMBOL(kref_get); | ||
96 | EXPORT_SYMBOL(kref_put); | ||
97 | EXPORT_SYMBOL(kref_sub); | ||
diff --git a/lib/kstrtox.c b/lib/kstrtox.c index 5e066759f551..c3615eab0cc3 100644 --- a/lib/kstrtox.c +++ b/lib/kstrtox.c | |||
@@ -15,29 +15,44 @@ | |||
15 | #include <linux/errno.h> | 15 | #include <linux/errno.h> |
16 | #include <linux/kernel.h> | 16 | #include <linux/kernel.h> |
17 | #include <linux/math64.h> | 17 | #include <linux/math64.h> |
18 | #include <linux/module.h> | 18 | #include <linux/export.h> |
19 | #include <linux/types.h> | 19 | #include <linux/types.h> |
20 | #include <asm/uaccess.h> | 20 | #include <asm/uaccess.h> |
21 | #include "kstrtox.h" | ||
21 | 22 | ||
22 | static int _kstrtoull(const char *s, unsigned int base, unsigned long long *res) | 23 | const char *_parse_integer_fixup_radix(const char *s, unsigned int *base) |
23 | { | 24 | { |
24 | unsigned long long acc; | 25 | if (*base == 0) { |
25 | int ok; | ||
26 | |||
27 | if (base == 0) { | ||
28 | if (s[0] == '0') { | 26 | if (s[0] == '0') { |
29 | if (_tolower(s[1]) == 'x' && isxdigit(s[2])) | 27 | if (_tolower(s[1]) == 'x' && isxdigit(s[2])) |
30 | base = 16; | 28 | *base = 16; |
31 | else | 29 | else |
32 | base = 8; | 30 | *base = 8; |
33 | } else | 31 | } else |
34 | base = 10; | 32 | *base = 10; |
35 | } | 33 | } |
36 | if (base == 16 && s[0] == '0' && _tolower(s[1]) == 'x') | 34 | if (*base == 16 && s[0] == '0' && _tolower(s[1]) == 'x') |
37 | s += 2; | 35 | s += 2; |
36 | return s; | ||
37 | } | ||
38 | 38 | ||
39 | acc = 0; | 39 | /* |
40 | ok = 0; | 40 | * Convert non-negative integer string representation in explicitly given radix |
41 | * to an integer. | ||
42 | * Return number of characters consumed maybe or-ed with overflow bit. | ||
43 | * If overflow occurs, result integer (incorrect) is still returned. | ||
44 | * | ||
45 | * Don't you dare use this function. | ||
46 | */ | ||
47 | unsigned int _parse_integer(const char *s, unsigned int base, unsigned long long *p) | ||
48 | { | ||
49 | unsigned long long res; | ||
50 | unsigned int rv; | ||
51 | int overflow; | ||
52 | |||
53 | res = 0; | ||
54 | rv = 0; | ||
55 | overflow = 0; | ||
41 | while (*s) { | 56 | while (*s) { |
42 | unsigned int val; | 57 | unsigned int val; |
43 | 58 | ||
@@ -45,23 +60,47 @@ static int _kstrtoull(const char *s, unsigned int base, unsigned long long *res) | |||
45 | val = *s - '0'; | 60 | val = *s - '0'; |
46 | else if ('a' <= _tolower(*s) && _tolower(*s) <= 'f') | 61 | else if ('a' <= _tolower(*s) && _tolower(*s) <= 'f') |
47 | val = _tolower(*s) - 'a' + 10; | 62 | val = _tolower(*s) - 'a' + 10; |
48 | else if (*s == '\n' && *(s + 1) == '\0') | ||
49 | break; | ||
50 | else | 63 | else |
51 | return -EINVAL; | 64 | break; |
52 | 65 | ||
53 | if (val >= base) | 66 | if (val >= base) |
54 | return -EINVAL; | 67 | break; |
55 | if (acc > div_u64(ULLONG_MAX - val, base)) | 68 | /* |
56 | return -ERANGE; | 69 | * Check for overflow only if we are within range of |
57 | acc = acc * base + val; | 70 | * it in the max base we support (16) |
58 | ok = 1; | 71 | */ |
59 | 72 | if (unlikely(res & (~0ull << 60))) { | |
73 | if (res > div_u64(ULLONG_MAX - val, base)) | ||
74 | overflow = 1; | ||
75 | } | ||
76 | res = res * base + val; | ||
77 | rv++; | ||
60 | s++; | 78 | s++; |
61 | } | 79 | } |
62 | if (!ok) | 80 | *p = res; |
81 | if (overflow) | ||
82 | rv |= KSTRTOX_OVERFLOW; | ||
83 | return rv; | ||
84 | } | ||
85 | |||
86 | static int _kstrtoull(const char *s, unsigned int base, unsigned long long *res) | ||
87 | { | ||
88 | unsigned long long _res; | ||
89 | unsigned int rv; | ||
90 | |||
91 | s = _parse_integer_fixup_radix(s, &base); | ||
92 | rv = _parse_integer(s, base, &_res); | ||
93 | if (rv & KSTRTOX_OVERFLOW) | ||
94 | return -ERANGE; | ||
95 | rv &= ~KSTRTOX_OVERFLOW; | ||
96 | if (rv == 0) | ||
97 | return -EINVAL; | ||
98 | s += rv; | ||
99 | if (*s == '\n') | ||
100 | s++; | ||
101 | if (*s) | ||
63 | return -EINVAL; | 102 | return -EINVAL; |
64 | *res = acc; | 103 | *res = _res; |
65 | return 0; | 104 | return 0; |
66 | } | 105 | } |
67 | 106 | ||
diff --git a/lib/kstrtox.h b/lib/kstrtox.h new file mode 100644 index 000000000000..f13eeeaf441d --- /dev/null +++ b/lib/kstrtox.h | |||
@@ -0,0 +1,8 @@ | |||
1 | #ifndef _LIB_KSTRTOX_H | ||
2 | #define _LIB_KSTRTOX_H | ||
3 | |||
4 | #define KSTRTOX_OVERFLOW (1U << 31) | ||
5 | const char *_parse_integer_fixup_radix(const char *s, unsigned int *base); | ||
6 | unsigned int _parse_integer(const char *s, unsigned int base, unsigned long long *res); | ||
7 | |||
8 | #endif | ||
@@ -1,6 +1,6 @@ | |||
1 | #include <linux/kernel.h> | 1 | #include <linux/kernel.h> |
2 | #include <linux/gcd.h> | 2 | #include <linux/gcd.h> |
3 | #include <linux/module.h> | 3 | #include <linux/export.h> |
4 | #include <linux/lcm.h> | 4 | #include <linux/lcm.h> |
5 | 5 | ||
6 | /* Lowest common multiple */ | 6 | /* Lowest common multiple */ |
diff --git a/lib/list_debug.c b/lib/list_debug.c index b8029a5583ff..c24c2f7e296f 100644 --- a/lib/list_debug.c +++ b/lib/list_debug.c | |||
@@ -6,8 +6,11 @@ | |||
6 | * DEBUG_LIST. | 6 | * DEBUG_LIST. |
7 | */ | 7 | */ |
8 | 8 | ||
9 | #include <linux/module.h> | 9 | #include <linux/export.h> |
10 | #include <linux/list.h> | 10 | #include <linux/list.h> |
11 | #include <linux/bug.h> | ||
12 | #include <linux/kernel.h> | ||
13 | #include <linux/rculist.h> | ||
11 | 14 | ||
12 | /* | 15 | /* |
13 | * Insert a new entry between two known consecutive entries. | 16 | * Insert a new entry between two known consecutive entries. |
@@ -28,6 +31,9 @@ void __list_add(struct list_head *new, | |||
28 | "list_add corruption. prev->next should be " | 31 | "list_add corruption. prev->next should be " |
29 | "next (%p), but was %p. (prev=%p).\n", | 32 | "next (%p), but was %p. (prev=%p).\n", |
30 | next, prev->next, prev); | 33 | next, prev->next, prev); |
34 | WARN(new == prev || new == next, | ||
35 | "list_add double add: new=%p, prev=%p, next=%p.\n", | ||
36 | new, prev, next); | ||
31 | next->prev = new; | 37 | next->prev = new; |
32 | new->next = next; | 38 | new->next = next; |
33 | new->prev = prev; | 39 | new->prev = prev; |
@@ -73,3 +79,22 @@ void list_del(struct list_head *entry) | |||
73 | entry->prev = LIST_POISON2; | 79 | entry->prev = LIST_POISON2; |
74 | } | 80 | } |
75 | EXPORT_SYMBOL(list_del); | 81 | EXPORT_SYMBOL(list_del); |
82 | |||
83 | /* | ||
84 | * RCU variants. | ||
85 | */ | ||
86 | void __list_add_rcu(struct list_head *new, | ||
87 | struct list_head *prev, struct list_head *next) | ||
88 | { | ||
89 | WARN(next->prev != prev, | ||
90 | "list_add_rcu corruption. next->prev should be prev (%p), but was %p. (next=%p).\n", | ||
91 | prev, next->prev, next); | ||
92 | WARN(prev->next != next, | ||
93 | "list_add_rcu corruption. prev->next should be next (%p), but was %p. (prev=%p).\n", | ||
94 | next, prev->next, prev); | ||
95 | new->next = next; | ||
96 | new->prev = prev; | ||
97 | rcu_assign_pointer(list_next_rcu(prev), new); | ||
98 | next->prev = new; | ||
99 | } | ||
100 | EXPORT_SYMBOL(__list_add_rcu); | ||
diff --git a/lib/llist.c b/lib/llist.c index da445724fa1f..4a15115e90f8 100644 --- a/lib/llist.c +++ b/lib/llist.c | |||
@@ -3,8 +3,8 @@ | |||
3 | * | 3 | * |
4 | * The basic atomic operation of this list is cmpxchg on long. On | 4 | * The basic atomic operation of this list is cmpxchg on long. On |
5 | * architectures that don't have NMI-safe cmpxchg implementation, the | 5 | * architectures that don't have NMI-safe cmpxchg implementation, the |
6 | * list can NOT be used in NMI handler. So code uses the list in NMI | 6 | * list can NOT be used in NMI handlers. So code that uses the list in |
7 | * handler should depend on CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG. | 7 | * an NMI handler should depend on CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG. |
8 | * | 8 | * |
9 | * Copyright 2010,2011 Intel Corp. | 9 | * Copyright 2010,2011 Intel Corp. |
10 | * Author: Huang Ying <ying.huang@intel.com> | 10 | * Author: Huang Ying <ying.huang@intel.com> |
@@ -23,55 +23,34 @@ | |||
23 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA | 23 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA |
24 | */ | 24 | */ |
25 | #include <linux/kernel.h> | 25 | #include <linux/kernel.h> |
26 | #include <linux/module.h> | 26 | #include <linux/export.h> |
27 | #include <linux/interrupt.h> | 27 | #include <linux/interrupt.h> |
28 | #include <linux/llist.h> | 28 | #include <linux/llist.h> |
29 | 29 | ||
30 | #include <asm/system.h> | ||
31 | |||
32 | /** | ||
33 | * llist_add - add a new entry | ||
34 | * @new: new entry to be added | ||
35 | * @head: the head for your lock-less list | ||
36 | */ | ||
37 | void llist_add(struct llist_node *new, struct llist_head *head) | ||
38 | { | ||
39 | struct llist_node *entry, *old_entry; | ||
40 | |||
41 | #ifndef CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG | ||
42 | BUG_ON(in_nmi()); | ||
43 | #endif | ||
44 | |||
45 | entry = head->first; | ||
46 | do { | ||
47 | old_entry = entry; | ||
48 | new->next = entry; | ||
49 | cpu_relax(); | ||
50 | } while ((entry = cmpxchg(&head->first, old_entry, new)) != old_entry); | ||
51 | } | ||
52 | EXPORT_SYMBOL_GPL(llist_add); | ||
53 | 30 | ||
54 | /** | 31 | /** |
55 | * llist_add_batch - add several linked entries in batch | 32 | * llist_add_batch - add several linked entries in batch |
56 | * @new_first: first entry in batch to be added | 33 | * @new_first: first entry in batch to be added |
57 | * @new_last: last entry in batch to be added | 34 | * @new_last: last entry in batch to be added |
58 | * @head: the head for your lock-less list | 35 | * @head: the head for your lock-less list |
36 | * | ||
37 | * Return whether list is empty before adding. | ||
59 | */ | 38 | */ |
60 | void llist_add_batch(struct llist_node *new_first, struct llist_node *new_last, | 39 | bool llist_add_batch(struct llist_node *new_first, struct llist_node *new_last, |
61 | struct llist_head *head) | 40 | struct llist_head *head) |
62 | { | 41 | { |
63 | struct llist_node *entry, *old_entry; | 42 | struct llist_node *entry, *old_entry; |
64 | 43 | ||
65 | #ifndef CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG | ||
66 | BUG_ON(in_nmi()); | ||
67 | #endif | ||
68 | |||
69 | entry = head->first; | 44 | entry = head->first; |
70 | do { | 45 | for (;;) { |
71 | old_entry = entry; | 46 | old_entry = entry; |
72 | new_last->next = entry; | 47 | new_last->next = entry; |
73 | cpu_relax(); | 48 | entry = cmpxchg(&head->first, old_entry, new_first); |
74 | } while ((entry = cmpxchg(&head->first, old_entry, new_first)) != old_entry); | 49 | if (entry == old_entry) |
50 | break; | ||
51 | } | ||
52 | |||
53 | return old_entry == NULL; | ||
75 | } | 54 | } |
76 | EXPORT_SYMBOL_GPL(llist_add_batch); | 55 | EXPORT_SYMBOL_GPL(llist_add_batch); |
77 | 56 | ||
@@ -93,37 +72,17 @@ struct llist_node *llist_del_first(struct llist_head *head) | |||
93 | { | 72 | { |
94 | struct llist_node *entry, *old_entry, *next; | 73 | struct llist_node *entry, *old_entry, *next; |
95 | 74 | ||
96 | #ifndef CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG | ||
97 | BUG_ON(in_nmi()); | ||
98 | #endif | ||
99 | |||
100 | entry = head->first; | 75 | entry = head->first; |
101 | do { | 76 | for (;;) { |
102 | if (entry == NULL) | 77 | if (entry == NULL) |
103 | return NULL; | 78 | return NULL; |
104 | old_entry = entry; | 79 | old_entry = entry; |
105 | next = entry->next; | 80 | next = entry->next; |
106 | cpu_relax(); | 81 | entry = cmpxchg(&head->first, old_entry, next); |
107 | } while ((entry = cmpxchg(&head->first, old_entry, next)) != old_entry); | 82 | if (entry == old_entry) |
83 | break; | ||
84 | } | ||
108 | 85 | ||
109 | return entry; | 86 | return entry; |
110 | } | 87 | } |
111 | EXPORT_SYMBOL_GPL(llist_del_first); | 88 | EXPORT_SYMBOL_GPL(llist_del_first); |
112 | |||
113 | /** | ||
114 | * llist_del_all - delete all entries from lock-less list | ||
115 | * @head: the head of lock-less list to delete all entries | ||
116 | * | ||
117 | * If list is empty, return NULL, otherwise, delete all entries and | ||
118 | * return the pointer to the first entry. The order of entries | ||
119 | * deleted is from the newest to the oldest added one. | ||
120 | */ | ||
121 | struct llist_node *llist_del_all(struct llist_head *head) | ||
122 | { | ||
123 | #ifndef CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG | ||
124 | BUG_ON(in_nmi()); | ||
125 | #endif | ||
126 | |||
127 | return xchg(&head->first, NULL); | ||
128 | } | ||
129 | EXPORT_SYMBOL_GPL(llist_del_all); | ||
diff --git a/lib/locking-selftest.c b/lib/locking-selftest.c index 507a22fab738..7aae0f2a5e0a 100644 --- a/lib/locking-selftest.c +++ b/lib/locking-selftest.c | |||
@@ -14,7 +14,6 @@ | |||
14 | #include <linux/mutex.h> | 14 | #include <linux/mutex.h> |
15 | #include <linux/sched.h> | 15 | #include <linux/sched.h> |
16 | #include <linux/delay.h> | 16 | #include <linux/delay.h> |
17 | #include <linux/module.h> | ||
18 | #include <linux/lockdep.h> | 17 | #include <linux/lockdep.h> |
19 | #include <linux/spinlock.h> | 18 | #include <linux/spinlock.h> |
20 | #include <linux/kallsyms.h> | 19 | #include <linux/kallsyms.h> |
@@ -1,5 +1,5 @@ | |||
1 | #include <linux/kernel.h> | 1 | #include <linux/kernel.h> |
2 | #include <linux/module.h> | 2 | #include <linux/export.h> |
3 | #include <linux/cryptohash.h> | 3 | #include <linux/cryptohash.h> |
4 | 4 | ||
5 | #define F1(x, y, z) (z ^ (x & (y ^ z))) | 5 | #define F1(x, y, z) (z ^ (x & (y ^ z))) |
diff --git a/lib/memory-notifier-error-inject.c b/lib/memory-notifier-error-inject.c new file mode 100644 index 000000000000..e6239bf0b0df --- /dev/null +++ b/lib/memory-notifier-error-inject.c | |||
@@ -0,0 +1,48 @@ | |||
1 | #include <linux/kernel.h> | ||
2 | #include <linux/module.h> | ||
3 | #include <linux/memory.h> | ||
4 | |||
5 | #include "notifier-error-inject.h" | ||
6 | |||
7 | static int priority; | ||
8 | module_param(priority, int, 0); | ||
9 | MODULE_PARM_DESC(priority, "specify memory notifier priority"); | ||
10 | |||
11 | static struct notifier_err_inject memory_notifier_err_inject = { | ||
12 | .actions = { | ||
13 | { NOTIFIER_ERR_INJECT_ACTION(MEM_GOING_ONLINE) }, | ||
14 | { NOTIFIER_ERR_INJECT_ACTION(MEM_GOING_OFFLINE) }, | ||
15 | {} | ||
16 | } | ||
17 | }; | ||
18 | |||
19 | static struct dentry *dir; | ||
20 | |||
21 | static int err_inject_init(void) | ||
22 | { | ||
23 | int err; | ||
24 | |||
25 | dir = notifier_err_inject_init("memory", notifier_err_inject_dir, | ||
26 | &memory_notifier_err_inject, priority); | ||
27 | if (IS_ERR(dir)) | ||
28 | return PTR_ERR(dir); | ||
29 | |||
30 | err = register_memory_notifier(&memory_notifier_err_inject.nb); | ||
31 | if (err) | ||
32 | debugfs_remove_recursive(dir); | ||
33 | |||
34 | return err; | ||
35 | } | ||
36 | |||
37 | static void err_inject_exit(void) | ||
38 | { | ||
39 | unregister_memory_notifier(&memory_notifier_err_inject.nb); | ||
40 | debugfs_remove_recursive(dir); | ||
41 | } | ||
42 | |||
43 | module_init(err_inject_init); | ||
44 | module_exit(err_inject_exit); | ||
45 | |||
46 | MODULE_DESCRIPTION("memory notifier error injection module"); | ||
47 | MODULE_LICENSE("GPL"); | ||
48 | MODULE_AUTHOR("Akinobu Mita <akinobu.mita@gmail.com>"); | ||
diff --git a/lib/memweight.c b/lib/memweight.c new file mode 100644 index 000000000000..e35fc8771893 --- /dev/null +++ b/lib/memweight.c | |||
@@ -0,0 +1,38 @@ | |||
1 | #include <linux/export.h> | ||
2 | #include <linux/bug.h> | ||
3 | #include <linux/bitmap.h> | ||
4 | |||
5 | /** | ||
6 | * memweight - count the total number of bits set in memory area | ||
7 | * @ptr: pointer to the start of the area | ||
8 | * @bytes: the size of the area | ||
9 | */ | ||
10 | size_t memweight(const void *ptr, size_t bytes) | ||
11 | { | ||
12 | size_t ret = 0; | ||
13 | size_t longs; | ||
14 | const unsigned char *bitmap = ptr; | ||
15 | |||
16 | for (; bytes > 0 && ((unsigned long)bitmap) % sizeof(long); | ||
17 | bytes--, bitmap++) | ||
18 | ret += hweight8(*bitmap); | ||
19 | |||
20 | longs = bytes / sizeof(long); | ||
21 | if (longs) { | ||
22 | BUG_ON(longs >= INT_MAX / BITS_PER_LONG); | ||
23 | ret += bitmap_weight((unsigned long *)bitmap, | ||
24 | longs * BITS_PER_LONG); | ||
25 | bytes -= longs * sizeof(long); | ||
26 | bitmap += longs * sizeof(long); | ||
27 | } | ||
28 | /* | ||
29 | * The reason that this last loop is distinct from the preceding | ||
30 | * bitmap_weight() call is to compute 1-bits in the last region smaller | ||
31 | * than sizeof(long) properly on big-endian systems. | ||
32 | */ | ||
33 | for (; bytes > 0; bytes--, bitmap++) | ||
34 | ret += hweight8(*bitmap); | ||
35 | |||
36 | return ret; | ||
37 | } | ||
38 | EXPORT_SYMBOL(memweight); | ||
diff --git a/lib/mpi/Makefile b/lib/mpi/Makefile new file mode 100644 index 000000000000..019a68c90144 --- /dev/null +++ b/lib/mpi/Makefile | |||
@@ -0,0 +1,22 @@ | |||
1 | # | ||
2 | # MPI multiprecision maths library (from gpg) | ||
3 | # | ||
4 | |||
5 | obj-$(CONFIG_MPILIB) = mpi.o | ||
6 | |||
7 | mpi-y = \ | ||
8 | generic_mpih-lshift.o \ | ||
9 | generic_mpih-mul1.o \ | ||
10 | generic_mpih-mul2.o \ | ||
11 | generic_mpih-mul3.o \ | ||
12 | generic_mpih-rshift.o \ | ||
13 | generic_mpih-sub1.o \ | ||
14 | generic_mpih-add1.o \ | ||
15 | mpicoder.o \ | ||
16 | mpi-bit.o \ | ||
17 | mpi-cmp.o \ | ||
18 | mpih-cmp.o \ | ||
19 | mpih-div.o \ | ||
20 | mpih-mul.o \ | ||
21 | mpi-pow.o \ | ||
22 | mpiutil.o | ||
diff --git a/lib/mpi/generic_mpih-add1.c b/lib/mpi/generic_mpih-add1.c new file mode 100644 index 000000000000..c94c7dd344b3 --- /dev/null +++ b/lib/mpi/generic_mpih-add1.c | |||
@@ -0,0 +1,61 @@ | |||
1 | /* mpihelp-add_1.c - MPI helper functions | ||
2 | * Copyright (C) 1994, 1996, 1997, 1998, | ||
3 | * 2000 Free Software Foundation, Inc. | ||
4 | * | ||
5 | * This file is part of GnuPG. | ||
6 | * | ||
7 | * GnuPG is free software; you can redistribute it and/or modify | ||
8 | * it under the terms of the GNU General Public License as published by | ||
9 | * the Free Software Foundation; either version 2 of the License, or | ||
10 | * (at your option) any later version. | ||
11 | * | ||
12 | * GnuPG is distributed in the hope that it will be useful, | ||
13 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
14 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
15 | * GNU General Public License for more details. | ||
16 | * | ||
17 | * You should have received a copy of the GNU General Public License | ||
18 | * along with this program; if not, write to the Free Software | ||
19 | * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA | ||
20 | * | ||
21 | * Note: This code is heavily based on the GNU MP Library. | ||
22 | * Actually it's the same code with only minor changes in the | ||
23 | * way the data is stored; this is to support the abstraction | ||
24 | * of an optional secure memory allocation which may be used | ||
25 | * to avoid revealing of sensitive data due to paging etc. | ||
26 | * The GNU MP Library itself is published under the LGPL; | ||
27 | * however I decided to publish this code under the plain GPL. | ||
28 | */ | ||
29 | |||
30 | #include "mpi-internal.h" | ||
31 | #include "longlong.h" | ||
32 | |||
33 | mpi_limb_t | ||
34 | mpihelp_add_n(mpi_ptr_t res_ptr, mpi_ptr_t s1_ptr, | ||
35 | mpi_ptr_t s2_ptr, mpi_size_t size) | ||
36 | { | ||
37 | mpi_limb_t x, y, cy; | ||
38 | mpi_size_t j; | ||
39 | |||
40 | /* The loop counter and index J goes from -SIZE to -1. This way | ||
41 | the loop becomes faster. */ | ||
42 | j = -size; | ||
43 | |||
44 | /* Offset the base pointers to compensate for the negative indices. */ | ||
45 | s1_ptr -= j; | ||
46 | s2_ptr -= j; | ||
47 | res_ptr -= j; | ||
48 | |||
49 | cy = 0; | ||
50 | do { | ||
51 | y = s2_ptr[j]; | ||
52 | x = s1_ptr[j]; | ||
53 | y += cy; /* add previous carry to one addend */ | ||
54 | cy = y < cy; /* get out carry from that addition */ | ||
55 | y += x; /* add other addend */ | ||
56 | cy += y < x; /* get out carry from that add, combine */ | ||
57 | res_ptr[j] = y; | ||
58 | } while (++j); | ||
59 | |||
60 | return cy; | ||
61 | } | ||
diff --git a/lib/mpi/generic_mpih-lshift.c b/lib/mpi/generic_mpih-lshift.c new file mode 100644 index 000000000000..86318927231a --- /dev/null +++ b/lib/mpi/generic_mpih-lshift.c | |||
@@ -0,0 +1,63 @@ | |||
1 | /* mpihelp-lshift.c - MPI helper functions | ||
2 | * Copyright (C) 1994, 1996, 1998, 2001 Free Software Foundation, Inc. | ||
3 | * | ||
4 | * This file is part of GnuPG. | ||
5 | * | ||
6 | * GnuPG is free software; you can redistribute it and/or modify | ||
7 | * it under the terms of the GNU General Public License as published by | ||
8 | * the Free Software Foundation; either version 2 of the License, or | ||
9 | * (at your option) any later version. | ||
10 | * | ||
11 | * GnuPG is distributed in the hope that it will be useful, | ||
12 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
14 | * GNU General Public License for more details. | ||
15 | * | ||
16 | * You should have received a copy of the GNU General Public License | ||
17 | * along with this program; if not, write to the Free Software | ||
18 | * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA | ||
19 | * | ||
20 | * Note: This code is heavily based on the GNU MP Library. | ||
21 | * Actually it's the same code with only minor changes in the | ||
22 | * way the data is stored; this is to support the abstraction | ||
23 | * of an optional secure memory allocation which may be used | ||
24 | * to avoid revealing of sensitive data due to paging etc. | ||
25 | * The GNU MP Library itself is published under the LGPL; | ||
26 | * however I decided to publish this code under the plain GPL. | ||
27 | */ | ||
28 | |||
29 | #include "mpi-internal.h" | ||
30 | |||
31 | /* Shift U (pointed to by UP and USIZE digits long) CNT bits to the left | ||
32 | * and store the USIZE least significant digits of the result at WP. | ||
33 | * Return the bits shifted out from the most significant digit. | ||
34 | * | ||
35 | * Argument constraints: | ||
36 | * 1. 0 < CNT < BITS_PER_MP_LIMB | ||
37 | * 2. If the result is to be written over the input, WP must be >= UP. | ||
38 | */ | ||
39 | |||
40 | mpi_limb_t | ||
41 | mpihelp_lshift(mpi_ptr_t wp, mpi_ptr_t up, mpi_size_t usize, unsigned int cnt) | ||
42 | { | ||
43 | mpi_limb_t high_limb, low_limb; | ||
44 | unsigned sh_1, sh_2; | ||
45 | mpi_size_t i; | ||
46 | mpi_limb_t retval; | ||
47 | |||
48 | sh_1 = cnt; | ||
49 | wp += 1; | ||
50 | sh_2 = BITS_PER_MPI_LIMB - sh_1; | ||
51 | i = usize - 1; | ||
52 | low_limb = up[i]; | ||
53 | retval = low_limb >> sh_2; | ||
54 | high_limb = low_limb; | ||
55 | while (--i >= 0) { | ||
56 | low_limb = up[i]; | ||
57 | wp[i] = (high_limb << sh_1) | (low_limb >> sh_2); | ||
58 | high_limb = low_limb; | ||
59 | } | ||
60 | wp[i] = high_limb << sh_1; | ||
61 | |||
62 | return retval; | ||
63 | } | ||
diff --git a/lib/mpi/generic_mpih-mul1.c b/lib/mpi/generic_mpih-mul1.c new file mode 100644 index 000000000000..1668dfd9092c --- /dev/null +++ b/lib/mpi/generic_mpih-mul1.c | |||
@@ -0,0 +1,57 @@ | |||
1 | /* mpihelp-mul_1.c - MPI helper functions | ||
2 | * Copyright (C) 1994, 1996, 1997, 1998, 2001 Free Software Foundation, Inc. | ||
3 | * | ||
4 | * This file is part of GnuPG. | ||
5 | * | ||
6 | * GnuPG is free software; you can redistribute it and/or modify | ||
7 | * it under the terms of the GNU General Public License as published by | ||
8 | * the Free Software Foundation; either version 2 of the License, or | ||
9 | * (at your option) any later version. | ||
10 | * | ||
11 | * GnuPG is distributed in the hope that it will be useful, | ||
12 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
14 | * GNU General Public License for more details. | ||
15 | * | ||
16 | * You should have received a copy of the GNU General Public License | ||
17 | * along with this program; if not, write to the Free Software | ||
18 | * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA | ||
19 | * | ||
20 | * Note: This code is heavily based on the GNU MP Library. | ||
21 | * Actually it's the same code with only minor changes in the | ||
22 | * way the data is stored; this is to support the abstraction | ||
23 | * of an optional secure memory allocation which may be used | ||
24 | * to avoid revealing of sensitive data due to paging etc. | ||
25 | * The GNU MP Library itself is published under the LGPL; | ||
26 | * however I decided to publish this code under the plain GPL. | ||
27 | */ | ||
28 | |||
29 | #include "mpi-internal.h" | ||
30 | #include "longlong.h" | ||
31 | |||
32 | mpi_limb_t | ||
33 | mpihelp_mul_1(mpi_ptr_t res_ptr, mpi_ptr_t s1_ptr, mpi_size_t s1_size, | ||
34 | mpi_limb_t s2_limb) | ||
35 | { | ||
36 | mpi_limb_t cy_limb; | ||
37 | mpi_size_t j; | ||
38 | mpi_limb_t prod_high, prod_low; | ||
39 | |||
40 | /* The loop counter and index J goes from -S1_SIZE to -1. This way | ||
41 | * the loop becomes faster. */ | ||
42 | j = -s1_size; | ||
43 | |||
44 | /* Offset the base pointers to compensate for the negative indices. */ | ||
45 | s1_ptr -= j; | ||
46 | res_ptr -= j; | ||
47 | |||
48 | cy_limb = 0; | ||
49 | do { | ||
50 | umul_ppmm(prod_high, prod_low, s1_ptr[j], s2_limb); | ||
51 | prod_low += cy_limb; | ||
52 | cy_limb = (prod_low < cy_limb ? 1 : 0) + prod_high; | ||
53 | res_ptr[j] = prod_low; | ||
54 | } while (++j); | ||
55 | |||
56 | return cy_limb; | ||
57 | } | ||
diff --git a/lib/mpi/generic_mpih-mul2.c b/lib/mpi/generic_mpih-mul2.c new file mode 100644 index 000000000000..8a7b29ee1740 --- /dev/null +++ b/lib/mpi/generic_mpih-mul2.c | |||
@@ -0,0 +1,60 @@ | |||
1 | /* mpihelp-mul_2.c - MPI helper functions | ||
2 | * Copyright (C) 1994, 1996, 1997, 1998, 2001 Free Software Foundation, Inc. | ||
3 | * | ||
4 | * This file is part of GnuPG. | ||
5 | * | ||
6 | * GnuPG is free software; you can redistribute it and/or modify | ||
7 | * it under the terms of the GNU General Public License as published by | ||
8 | * the Free Software Foundation; either version 2 of the License, or | ||
9 | * (at your option) any later version. | ||
10 | * | ||
11 | * GnuPG is distributed in the hope that it will be useful, | ||
12 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
14 | * GNU General Public License for more details. | ||
15 | * | ||
16 | * You should have received a copy of the GNU General Public License | ||
17 | * along with this program; if not, write to the Free Software | ||
18 | * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA | ||
19 | * | ||
20 | * Note: This code is heavily based on the GNU MP Library. | ||
21 | * Actually it's the same code with only minor changes in the | ||
22 | * way the data is stored; this is to support the abstraction | ||
23 | * of an optional secure memory allocation which may be used | ||
24 | * to avoid revealing of sensitive data due to paging etc. | ||
25 | * The GNU MP Library itself is published under the LGPL; | ||
26 | * however I decided to publish this code under the plain GPL. | ||
27 | */ | ||
28 | |||
29 | #include "mpi-internal.h" | ||
30 | #include "longlong.h" | ||
31 | |||
32 | mpi_limb_t | ||
33 | mpihelp_addmul_1(mpi_ptr_t res_ptr, mpi_ptr_t s1_ptr, | ||
34 | mpi_size_t s1_size, mpi_limb_t s2_limb) | ||
35 | { | ||
36 | mpi_limb_t cy_limb; | ||
37 | mpi_size_t j; | ||
38 | mpi_limb_t prod_high, prod_low; | ||
39 | mpi_limb_t x; | ||
40 | |||
41 | /* The loop counter and index J goes from -SIZE to -1. This way | ||
42 | * the loop becomes faster. */ | ||
43 | j = -s1_size; | ||
44 | res_ptr -= j; | ||
45 | s1_ptr -= j; | ||
46 | |||
47 | cy_limb = 0; | ||
48 | do { | ||
49 | umul_ppmm(prod_high, prod_low, s1_ptr[j], s2_limb); | ||
50 | |||
51 | prod_low += cy_limb; | ||
52 | cy_limb = (prod_low < cy_limb ? 1 : 0) + prod_high; | ||
53 | |||
54 | x = res_ptr[j]; | ||
55 | prod_low = x + prod_low; | ||
56 | cy_limb += prod_low < x ? 1 : 0; | ||
57 | res_ptr[j] = prod_low; | ||
58 | } while (++j); | ||
59 | return cy_limb; | ||
60 | } | ||
diff --git a/lib/mpi/generic_mpih-mul3.c b/lib/mpi/generic_mpih-mul3.c new file mode 100644 index 000000000000..f96df327be63 --- /dev/null +++ b/lib/mpi/generic_mpih-mul3.c | |||
@@ -0,0 +1,61 @@ | |||
1 | /* mpihelp-mul_3.c - MPI helper functions | ||
2 | * Copyright (C) 1994, 1996, 1997, 1998, 2001 Free Software Foundation, Inc. | ||
3 | * | ||
4 | * This file is part of GnuPG. | ||
5 | * | ||
6 | * GnuPG is free software; you can redistribute it and/or modify | ||
7 | * it under the terms of the GNU General Public License as published by | ||
8 | * the Free Software Foundation; either version 2 of the License, or | ||
9 | * (at your option) any later version. | ||
10 | * | ||
11 | * GnuPG is distributed in the hope that it will be useful, | ||
12 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
14 | * GNU General Public License for more details. | ||
15 | * | ||
16 | * You should have received a copy of the GNU General Public License | ||
17 | * along with this program; if not, write to the Free Software | ||
18 | * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA | ||
19 | * | ||
20 | * Note: This code is heavily based on the GNU MP Library. | ||
21 | * Actually it's the same code with only minor changes in the | ||
22 | * way the data is stored; this is to support the abstraction | ||
23 | * of an optional secure memory allocation which may be used | ||
24 | * to avoid revealing of sensitive data due to paging etc. | ||
25 | * The GNU MP Library itself is published under the LGPL; | ||
26 | * however I decided to publish this code under the plain GPL. | ||
27 | */ | ||
28 | |||
29 | #include "mpi-internal.h" | ||
30 | #include "longlong.h" | ||
31 | |||
32 | mpi_limb_t | ||
33 | mpihelp_submul_1(mpi_ptr_t res_ptr, mpi_ptr_t s1_ptr, | ||
34 | mpi_size_t s1_size, mpi_limb_t s2_limb) | ||
35 | { | ||
36 | mpi_limb_t cy_limb; | ||
37 | mpi_size_t j; | ||
38 | mpi_limb_t prod_high, prod_low; | ||
39 | mpi_limb_t x; | ||
40 | |||
41 | /* The loop counter and index J goes from -SIZE to -1. This way | ||
42 | * the loop becomes faster. */ | ||
43 | j = -s1_size; | ||
44 | res_ptr -= j; | ||
45 | s1_ptr -= j; | ||
46 | |||
47 | cy_limb = 0; | ||
48 | do { | ||
49 | umul_ppmm(prod_high, prod_low, s1_ptr[j], s2_limb); | ||
50 | |||
51 | prod_low += cy_limb; | ||
52 | cy_limb = (prod_low < cy_limb ? 1 : 0) + prod_high; | ||
53 | |||
54 | x = res_ptr[j]; | ||
55 | prod_low = x - prod_low; | ||
56 | cy_limb += prod_low > x ? 1 : 0; | ||
57 | res_ptr[j] = prod_low; | ||
58 | } while (++j); | ||
59 | |||
60 | return cy_limb; | ||
61 | } | ||
diff --git a/lib/mpi/generic_mpih-rshift.c b/lib/mpi/generic_mpih-rshift.c new file mode 100644 index 000000000000..ffa328818ca6 --- /dev/null +++ b/lib/mpi/generic_mpih-rshift.c | |||
@@ -0,0 +1,63 @@ | |||
1 | /* mpih-rshift.c - MPI helper functions | ||
2 | * Copyright (C) 1994, 1996, 1998, 1999, | ||
3 | * 2000, 2001 Free Software Foundation, Inc. | ||
4 | * | ||
5 | * This file is part of GNUPG | ||
6 | * | ||
7 | * GNUPG is free software; you can redistribute it and/or modify | ||
8 | * it under the terms of the GNU General Public License as published by | ||
9 | * the Free Software Foundation; either version 2 of the License, or | ||
10 | * (at your option) any later version. | ||
11 | * | ||
12 | * GNUPG is distributed in the hope that it will be useful, | ||
13 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
14 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
15 | * GNU General Public License for more details. | ||
16 | * | ||
17 | * You should have received a copy of the GNU General Public License | ||
18 | * along with this program; if not, write to the Free Software | ||
19 | * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA | ||
20 | * | ||
21 | * Note: This code is heavily based on the GNU MP Library. | ||
22 | * Actually it's the same code with only minor changes in the | ||
23 | * way the data is stored; this is to support the abstraction | ||
24 | * of an optional secure memory allocation which may be used | ||
25 | * to avoid revealing of sensitive data due to paging etc. | ||
26 | * The GNU MP Library itself is published under the LGPL; | ||
27 | * however I decided to publish this code under the plain GPL. | ||
28 | */ | ||
29 | |||
30 | #include "mpi-internal.h" | ||
31 | |||
32 | /* Shift U (pointed to by UP and USIZE limbs long) CNT bits to the right | ||
33 | * and store the USIZE least significant limbs of the result at WP. | ||
34 | * The bits shifted out to the right are returned. | ||
35 | * | ||
36 | * Argument constraints: | ||
37 | * 1. 0 < CNT < BITS_PER_MP_LIMB | ||
38 | * 2. If the result is to be written over the input, WP must be <= UP. | ||
39 | */ | ||
40 | |||
41 | mpi_limb_t | ||
42 | mpihelp_rshift(mpi_ptr_t wp, mpi_ptr_t up, mpi_size_t usize, unsigned cnt) | ||
43 | { | ||
44 | mpi_limb_t high_limb, low_limb; | ||
45 | unsigned sh_1, sh_2; | ||
46 | mpi_size_t i; | ||
47 | mpi_limb_t retval; | ||
48 | |||
49 | sh_1 = cnt; | ||
50 | wp -= 1; | ||
51 | sh_2 = BITS_PER_MPI_LIMB - sh_1; | ||
52 | high_limb = up[0]; | ||
53 | retval = high_limb << sh_2; | ||
54 | low_limb = high_limb; | ||
55 | for (i = 1; i < usize; i++) { | ||
56 | high_limb = up[i]; | ||
57 | wp[i] = (low_limb >> sh_1) | (high_limb << sh_2); | ||
58 | low_limb = high_limb; | ||
59 | } | ||
60 | wp[i] = low_limb >> sh_1; | ||
61 | |||
62 | return retval; | ||
63 | } | ||
diff --git a/lib/mpi/generic_mpih-sub1.c b/lib/mpi/generic_mpih-sub1.c new file mode 100644 index 000000000000..5d98ab7d6853 --- /dev/null +++ b/lib/mpi/generic_mpih-sub1.c | |||
@@ -0,0 +1,60 @@ | |||
1 | /* mpihelp-add_2.c - MPI helper functions | ||
2 | * Copyright (C) 1994, 1996, 1997, 1998, 2001 Free Software Foundation, Inc. | ||
3 | * | ||
4 | * This file is part of GnuPG. | ||
5 | * | ||
6 | * GnuPG is free software; you can redistribute it and/or modify | ||
7 | * it under the terms of the GNU General Public License as published by | ||
8 | * the Free Software Foundation; either version 2 of the License, or | ||
9 | * (at your option) any later version. | ||
10 | * | ||
11 | * GnuPG is distributed in the hope that it will be useful, | ||
12 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
14 | * GNU General Public License for more details. | ||
15 | * | ||
16 | * You should have received a copy of the GNU General Public License | ||
17 | * along with this program; if not, write to the Free Software | ||
18 | * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA | ||
19 | * | ||
20 | * Note: This code is heavily based on the GNU MP Library. | ||
21 | * Actually it's the same code with only minor changes in the | ||
22 | * way the data is stored; this is to support the abstraction | ||
23 | * of an optional secure memory allocation which may be used | ||
24 | * to avoid revealing of sensitive data due to paging etc. | ||
25 | * The GNU MP Library itself is published under the LGPL; | ||
26 | * however I decided to publish this code under the plain GPL. | ||
27 | */ | ||
28 | |||
29 | #include "mpi-internal.h" | ||
30 | #include "longlong.h" | ||
31 | |||
32 | mpi_limb_t | ||
33 | mpihelp_sub_n(mpi_ptr_t res_ptr, mpi_ptr_t s1_ptr, | ||
34 | mpi_ptr_t s2_ptr, mpi_size_t size) | ||
35 | { | ||
36 | mpi_limb_t x, y, cy; | ||
37 | mpi_size_t j; | ||
38 | |||
39 | /* The loop counter and index J goes from -SIZE to -1. This way | ||
40 | the loop becomes faster. */ | ||
41 | j = -size; | ||
42 | |||
43 | /* Offset the base pointers to compensate for the negative indices. */ | ||
44 | s1_ptr -= j; | ||
45 | s2_ptr -= j; | ||
46 | res_ptr -= j; | ||
47 | |||
48 | cy = 0; | ||
49 | do { | ||
50 | y = s2_ptr[j]; | ||
51 | x = s1_ptr[j]; | ||
52 | y += cy; /* add previous carry to subtrahend */ | ||
53 | cy = y < cy; /* get out carry from that addition */ | ||
54 | y = x - y; /* main subtract */ | ||
55 | cy += y > x; /* get out carry from the subtract, combine */ | ||
56 | res_ptr[j] = y; | ||
57 | } while (++j); | ||
58 | |||
59 | return cy; | ||
60 | } | ||
diff --git a/lib/mpi/longlong.h b/lib/mpi/longlong.h new file mode 100644 index 000000000000..678ce4f1e124 --- /dev/null +++ b/lib/mpi/longlong.h | |||
@@ -0,0 +1,1366 @@ | |||
1 | /* longlong.h -- definitions for mixed size 32/64 bit arithmetic. | ||
2 | * Note: I added some stuff for use with gnupg | ||
3 | * | ||
4 | * Copyright (C) 1991, 1992, 1993, 1994, 1996, 1998, | ||
5 | * 2000, 2001, 2002, 2003 Free Software Foundation, Inc. | ||
6 | * | ||
7 | * This file is free software; you can redistribute it and/or modify | ||
8 | * it under the terms of the GNU Library General Public License as published by | ||
9 | * the Free Software Foundation; either version 2 of the License, or (at your | ||
10 | * option) any later version. | ||
11 | * | ||
12 | * This file is distributed in the hope that it will be useful, but | ||
13 | * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY | ||
14 | * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Library General Public | ||
15 | * License for more details. | ||
16 | * | ||
17 | * You should have received a copy of the GNU Library General Public License | ||
18 | * along with this file; see the file COPYING.LIB. If not, write to | ||
19 | * the Free Software Foundation, Inc., 59 Temple Place - Suite 330, Boston, | ||
20 | * MA 02111-1307, USA. */ | ||
21 | |||
22 | #include <asm-generic/bitops/count_zeros.h> | ||
23 | |||
24 | /* You have to define the following before including this file: | ||
25 | * | ||
26 | * UWtype -- An unsigned type, default type for operations (typically a "word") | ||
27 | * UHWtype -- An unsigned type, at least half the size of UWtype. | ||
28 | * UDWtype -- An unsigned type, at least twice as large a UWtype | ||
29 | * W_TYPE_SIZE -- size in bits of UWtype | ||
30 | * | ||
31 | * SItype, USItype -- Signed and unsigned 32 bit types. | ||
32 | * DItype, UDItype -- Signed and unsigned 64 bit types. | ||
33 | * | ||
34 | * On a 32 bit machine UWtype should typically be USItype; | ||
35 | * on a 64 bit machine, UWtype should typically be UDItype. | ||
36 | */ | ||
37 | |||
38 | #define __BITS4 (W_TYPE_SIZE / 4) | ||
39 | #define __ll_B ((UWtype) 1 << (W_TYPE_SIZE / 2)) | ||
40 | #define __ll_lowpart(t) ((UWtype) (t) & (__ll_B - 1)) | ||
41 | #define __ll_highpart(t) ((UWtype) (t) >> (W_TYPE_SIZE / 2)) | ||
42 | |||
43 | /* This is used to make sure no undesirable sharing between different libraries | ||
44 | that use this file takes place. */ | ||
45 | #ifndef __MPN | ||
46 | #define __MPN(x) __##x | ||
47 | #endif | ||
48 | |||
49 | /* Define auxiliary asm macros. | ||
50 | * | ||
51 | * 1) umul_ppmm(high_prod, low_prod, multipler, multiplicand) multiplies two | ||
52 | * UWtype integers MULTIPLER and MULTIPLICAND, and generates a two UWtype | ||
53 | * word product in HIGH_PROD and LOW_PROD. | ||
54 | * | ||
55 | * 2) __umulsidi3(a,b) multiplies two UWtype integers A and B, and returns a | ||
56 | * UDWtype product. This is just a variant of umul_ppmm. | ||
57 | |||
58 | * 3) udiv_qrnnd(quotient, remainder, high_numerator, low_numerator, | ||
59 | * denominator) divides a UDWtype, composed by the UWtype integers | ||
60 | * HIGH_NUMERATOR and LOW_NUMERATOR, by DENOMINATOR and places the quotient | ||
61 | * in QUOTIENT and the remainder in REMAINDER. HIGH_NUMERATOR must be less | ||
62 | * than DENOMINATOR for correct operation. If, in addition, the most | ||
63 | * significant bit of DENOMINATOR must be 1, then the pre-processor symbol | ||
64 | * UDIV_NEEDS_NORMALIZATION is defined to 1. | ||
65 | * 4) sdiv_qrnnd(quotient, remainder, high_numerator, low_numerator, | ||
66 | * denominator). Like udiv_qrnnd but the numbers are signed. The quotient | ||
67 | * is rounded towards 0. | ||
68 | * | ||
69 | * 5) count_leading_zeros(count, x) counts the number of zero-bits from the | ||
70 | * msb to the first non-zero bit in the UWtype X. This is the number of | ||
71 | * steps X needs to be shifted left to set the msb. Undefined for X == 0, | ||
72 | * unless the symbol COUNT_LEADING_ZEROS_0 is defined to some value. | ||
73 | * | ||
74 | * 6) count_trailing_zeros(count, x) like count_leading_zeros, but counts | ||
75 | * from the least significant end. | ||
76 | * | ||
77 | * 7) add_ssaaaa(high_sum, low_sum, high_addend_1, low_addend_1, | ||
78 | * high_addend_2, low_addend_2) adds two UWtype integers, composed by | ||
79 | * HIGH_ADDEND_1 and LOW_ADDEND_1, and HIGH_ADDEND_2 and LOW_ADDEND_2 | ||
80 | * respectively. The result is placed in HIGH_SUM and LOW_SUM. Overflow | ||
81 | * (i.e. carry out) is not stored anywhere, and is lost. | ||
82 | * | ||
83 | * 8) sub_ddmmss(high_difference, low_difference, high_minuend, low_minuend, | ||
84 | * high_subtrahend, low_subtrahend) subtracts two two-word UWtype integers, | ||
85 | * composed by HIGH_MINUEND_1 and LOW_MINUEND_1, and HIGH_SUBTRAHEND_2 and | ||
86 | * LOW_SUBTRAHEND_2 respectively. The result is placed in HIGH_DIFFERENCE | ||
87 | * and LOW_DIFFERENCE. Overflow (i.e. carry out) is not stored anywhere, | ||
88 | * and is lost. | ||
89 | * | ||
90 | * If any of these macros are left undefined for a particular CPU, | ||
91 | * C macros are used. */ | ||
92 | |||
93 | /* The CPUs come in alphabetical order below. | ||
94 | * | ||
95 | * Please add support for more CPUs here, or improve the current support | ||
96 | * for the CPUs below! */ | ||
97 | |||
98 | #if defined(__GNUC__) && !defined(NO_ASM) | ||
99 | |||
100 | /* We sometimes need to clobber "cc" with gcc2, but that would not be | ||
101 | understood by gcc1. Use cpp to avoid major code duplication. */ | ||
102 | #if __GNUC__ < 2 | ||
103 | #define __CLOBBER_CC | ||
104 | #define __AND_CLOBBER_CC | ||
105 | #else /* __GNUC__ >= 2 */ | ||
106 | #define __CLOBBER_CC : "cc" | ||
107 | #define __AND_CLOBBER_CC , "cc" | ||
108 | #endif /* __GNUC__ < 2 */ | ||
109 | |||
110 | /*************************************** | ||
111 | ************** A29K ***************** | ||
112 | ***************************************/ | ||
113 | #if (defined(__a29k__) || defined(_AM29K)) && W_TYPE_SIZE == 32 | ||
114 | #define add_ssaaaa(sh, sl, ah, al, bh, bl) \ | ||
115 | __asm__ ("add %1,%4,%5\n" \ | ||
116 | "addc %0,%2,%3" \ | ||
117 | : "=r" ((USItype)(sh)), \ | ||
118 | "=&r" ((USItype)(sl)) \ | ||
119 | : "%r" ((USItype)(ah)), \ | ||
120 | "rI" ((USItype)(bh)), \ | ||
121 | "%r" ((USItype)(al)), \ | ||
122 | "rI" ((USItype)(bl))) | ||
123 | #define sub_ddmmss(sh, sl, ah, al, bh, bl) \ | ||
124 | __asm__ ("sub %1,%4,%5\n" \ | ||
125 | "subc %0,%2,%3" \ | ||
126 | : "=r" ((USItype)(sh)), \ | ||
127 | "=&r" ((USItype)(sl)) \ | ||
128 | : "r" ((USItype)(ah)), \ | ||
129 | "rI" ((USItype)(bh)), \ | ||
130 | "r" ((USItype)(al)), \ | ||
131 | "rI" ((USItype)(bl))) | ||
132 | #define umul_ppmm(xh, xl, m0, m1) \ | ||
133 | do { \ | ||
134 | USItype __m0 = (m0), __m1 = (m1); \ | ||
135 | __asm__ ("multiplu %0,%1,%2" \ | ||
136 | : "=r" ((USItype)(xl)) \ | ||
137 | : "r" (__m0), \ | ||
138 | "r" (__m1)); \ | ||
139 | __asm__ ("multmu %0,%1,%2" \ | ||
140 | : "=r" ((USItype)(xh)) \ | ||
141 | : "r" (__m0), \ | ||
142 | "r" (__m1)); \ | ||
143 | } while (0) | ||
144 | #define udiv_qrnnd(q, r, n1, n0, d) \ | ||
145 | __asm__ ("dividu %0,%3,%4" \ | ||
146 | : "=r" ((USItype)(q)), \ | ||
147 | "=q" ((USItype)(r)) \ | ||
148 | : "1" ((USItype)(n1)), \ | ||
149 | "r" ((USItype)(n0)), \ | ||
150 | "r" ((USItype)(d))) | ||
151 | #endif /* __a29k__ */ | ||
152 | |||
153 | #if defined(__alpha) && W_TYPE_SIZE == 64 | ||
154 | #define umul_ppmm(ph, pl, m0, m1) \ | ||
155 | do { \ | ||
156 | UDItype __m0 = (m0), __m1 = (m1); \ | ||
157 | __asm__ ("umulh %r1,%2,%0" \ | ||
158 | : "=r" ((UDItype) ph) \ | ||
159 | : "%rJ" (__m0), \ | ||
160 | "rI" (__m1)); \ | ||
161 | (pl) = __m0 * __m1; \ | ||
162 | } while (0) | ||
163 | #define UMUL_TIME 46 | ||
164 | #ifndef LONGLONG_STANDALONE | ||
165 | #define udiv_qrnnd(q, r, n1, n0, d) \ | ||
166 | do { UDItype __r; \ | ||
167 | (q) = __udiv_qrnnd(&__r, (n1), (n0), (d)); \ | ||
168 | (r) = __r; \ | ||
169 | } while (0) | ||
170 | extern UDItype __udiv_qrnnd(); | ||
171 | #define UDIV_TIME 220 | ||
172 | #endif /* LONGLONG_STANDALONE */ | ||
173 | #endif /* __alpha */ | ||
174 | |||
175 | /*************************************** | ||
176 | ************** ARM ****************** | ||
177 | ***************************************/ | ||
178 | #if defined(__arm__) && W_TYPE_SIZE == 32 | ||
179 | #define add_ssaaaa(sh, sl, ah, al, bh, bl) \ | ||
180 | __asm__ ("adds %1, %4, %5\n" \ | ||
181 | "adc %0, %2, %3" \ | ||
182 | : "=r" ((USItype)(sh)), \ | ||
183 | "=&r" ((USItype)(sl)) \ | ||
184 | : "%r" ((USItype)(ah)), \ | ||
185 | "rI" ((USItype)(bh)), \ | ||
186 | "%r" ((USItype)(al)), \ | ||
187 | "rI" ((USItype)(bl))) | ||
188 | #define sub_ddmmss(sh, sl, ah, al, bh, bl) \ | ||
189 | __asm__ ("subs %1, %4, %5\n" \ | ||
190 | "sbc %0, %2, %3" \ | ||
191 | : "=r" ((USItype)(sh)), \ | ||
192 | "=&r" ((USItype)(sl)) \ | ||
193 | : "r" ((USItype)(ah)), \ | ||
194 | "rI" ((USItype)(bh)), \ | ||
195 | "r" ((USItype)(al)), \ | ||
196 | "rI" ((USItype)(bl))) | ||
197 | #if defined __ARM_ARCH_2__ || defined __ARM_ARCH_3__ | ||
198 | #define umul_ppmm(xh, xl, a, b) \ | ||
199 | __asm__ ("%@ Inlined umul_ppmm\n" \ | ||
200 | "mov %|r0, %2, lsr #16 @ AAAA\n" \ | ||
201 | "mov %|r2, %3, lsr #16 @ BBBB\n" \ | ||
202 | "bic %|r1, %2, %|r0, lsl #16 @ aaaa\n" \ | ||
203 | "bic %0, %3, %|r2, lsl #16 @ bbbb\n" \ | ||
204 | "mul %1, %|r1, %|r2 @ aaaa * BBBB\n" \ | ||
205 | "mul %|r2, %|r0, %|r2 @ AAAA * BBBB\n" \ | ||
206 | "mul %|r1, %0, %|r1 @ aaaa * bbbb\n" \ | ||
207 | "mul %0, %|r0, %0 @ AAAA * bbbb\n" \ | ||
208 | "adds %|r0, %1, %0 @ central sum\n" \ | ||
209 | "addcs %|r2, %|r2, #65536\n" \ | ||
210 | "adds %1, %|r1, %|r0, lsl #16\n" \ | ||
211 | "adc %0, %|r2, %|r0, lsr #16" \ | ||
212 | : "=&r" ((USItype)(xh)), \ | ||
213 | "=r" ((USItype)(xl)) \ | ||
214 | : "r" ((USItype)(a)), \ | ||
215 | "r" ((USItype)(b)) \ | ||
216 | : "r0", "r1", "r2") | ||
217 | #else | ||
218 | #define umul_ppmm(xh, xl, a, b) \ | ||
219 | __asm__ ("%@ Inlined umul_ppmm\n" \ | ||
220 | "umull %r1, %r0, %r2, %r3" \ | ||
221 | : "=&r" ((USItype)(xh)), \ | ||
222 | "=r" ((USItype)(xl)) \ | ||
223 | : "r" ((USItype)(a)), \ | ||
224 | "r" ((USItype)(b)) \ | ||
225 | : "r0", "r1") | ||
226 | #endif | ||
227 | #define UMUL_TIME 20 | ||
228 | #define UDIV_TIME 100 | ||
229 | #endif /* __arm__ */ | ||
230 | |||
231 | /*************************************** | ||
232 | ************** CLIPPER ************** | ||
233 | ***************************************/ | ||
234 | #if defined(__clipper__) && W_TYPE_SIZE == 32 | ||
235 | #define umul_ppmm(w1, w0, u, v) \ | ||
236 | ({union {UDItype __ll; \ | ||
237 | struct {USItype __l, __h; } __i; \ | ||
238 | } __xx; \ | ||
239 | __asm__ ("mulwux %2,%0" \ | ||
240 | : "=r" (__xx.__ll) \ | ||
241 | : "%0" ((USItype)(u)), \ | ||
242 | "r" ((USItype)(v))); \ | ||
243 | (w1) = __xx.__i.__h; (w0) = __xx.__i.__l; }) | ||
244 | #define smul_ppmm(w1, w0, u, v) \ | ||
245 | ({union {DItype __ll; \ | ||
246 | struct {SItype __l, __h; } __i; \ | ||
247 | } __xx; \ | ||
248 | __asm__ ("mulwx %2,%0" \ | ||
249 | : "=r" (__xx.__ll) \ | ||
250 | : "%0" ((SItype)(u)), \ | ||
251 | "r" ((SItype)(v))); \ | ||
252 | (w1) = __xx.__i.__h; (w0) = __xx.__i.__l; }) | ||
253 | #define __umulsidi3(u, v) \ | ||
254 | ({UDItype __w; \ | ||
255 | __asm__ ("mulwux %2,%0" \ | ||
256 | : "=r" (__w) \ | ||
257 | : "%0" ((USItype)(u)), \ | ||
258 | "r" ((USItype)(v))); \ | ||
259 | __w; }) | ||
260 | #endif /* __clipper__ */ | ||
261 | |||
262 | /*************************************** | ||
263 | ************** GMICRO *************** | ||
264 | ***************************************/ | ||
265 | #if defined(__gmicro__) && W_TYPE_SIZE == 32 | ||
266 | #define add_ssaaaa(sh, sl, ah, al, bh, bl) \ | ||
267 | __asm__ ("add.w %5,%1\n" \ | ||
268 | "addx %3,%0" \ | ||
269 | : "=g" ((USItype)(sh)), \ | ||
270 | "=&g" ((USItype)(sl)) \ | ||
271 | : "%0" ((USItype)(ah)), \ | ||
272 | "g" ((USItype)(bh)), \ | ||
273 | "%1" ((USItype)(al)), \ | ||
274 | "g" ((USItype)(bl))) | ||
275 | #define sub_ddmmss(sh, sl, ah, al, bh, bl) \ | ||
276 | __asm__ ("sub.w %5,%1\n" \ | ||
277 | "subx %3,%0" \ | ||
278 | : "=g" ((USItype)(sh)), \ | ||
279 | "=&g" ((USItype)(sl)) \ | ||
280 | : "0" ((USItype)(ah)), \ | ||
281 | "g" ((USItype)(bh)), \ | ||
282 | "1" ((USItype)(al)), \ | ||
283 | "g" ((USItype)(bl))) | ||
284 | #define umul_ppmm(ph, pl, m0, m1) \ | ||
285 | __asm__ ("mulx %3,%0,%1" \ | ||
286 | : "=g" ((USItype)(ph)), \ | ||
287 | "=r" ((USItype)(pl)) \ | ||
288 | : "%0" ((USItype)(m0)), \ | ||
289 | "g" ((USItype)(m1))) | ||
290 | #define udiv_qrnnd(q, r, nh, nl, d) \ | ||
291 | __asm__ ("divx %4,%0,%1" \ | ||
292 | : "=g" ((USItype)(q)), \ | ||
293 | "=r" ((USItype)(r)) \ | ||
294 | : "1" ((USItype)(nh)), \ | ||
295 | "0" ((USItype)(nl)), \ | ||
296 | "g" ((USItype)(d))) | ||
297 | #endif | ||
298 | |||
299 | /*************************************** | ||
300 | ************** HPPA ***************** | ||
301 | ***************************************/ | ||
302 | #if defined(__hppa) && W_TYPE_SIZE == 32 | ||
303 | #define add_ssaaaa(sh, sl, ah, al, bh, bl) \ | ||
304 | __asm__ ("add %4,%5,%1\n" \ | ||
305 | "addc %2,%3,%0" \ | ||
306 | : "=r" ((USItype)(sh)), \ | ||
307 | "=&r" ((USItype)(sl)) \ | ||
308 | : "%rM" ((USItype)(ah)), \ | ||
309 | "rM" ((USItype)(bh)), \ | ||
310 | "%rM" ((USItype)(al)), \ | ||
311 | "rM" ((USItype)(bl))) | ||
312 | #define sub_ddmmss(sh, sl, ah, al, bh, bl) \ | ||
313 | __asm__ ("sub %4,%5,%1\n" \ | ||
314 | "subb %2,%3,%0" \ | ||
315 | : "=r" ((USItype)(sh)), \ | ||
316 | "=&r" ((USItype)(sl)) \ | ||
317 | : "rM" ((USItype)(ah)), \ | ||
318 | "rM" ((USItype)(bh)), \ | ||
319 | "rM" ((USItype)(al)), \ | ||
320 | "rM" ((USItype)(bl))) | ||
321 | #if defined(_PA_RISC1_1) | ||
322 | #define umul_ppmm(wh, wl, u, v) \ | ||
323 | do { \ | ||
324 | union {UDItype __ll; \ | ||
325 | struct {USItype __h, __l; } __i; \ | ||
326 | } __xx; \ | ||
327 | __asm__ ("xmpyu %1,%2,%0" \ | ||
328 | : "=*f" (__xx.__ll) \ | ||
329 | : "*f" ((USItype)(u)), \ | ||
330 | "*f" ((USItype)(v))); \ | ||
331 | (wh) = __xx.__i.__h; \ | ||
332 | (wl) = __xx.__i.__l; \ | ||
333 | } while (0) | ||
334 | #define UMUL_TIME 8 | ||
335 | #define UDIV_TIME 60 | ||
336 | #else | ||
337 | #define UMUL_TIME 40 | ||
338 | #define UDIV_TIME 80 | ||
339 | #endif | ||
340 | #ifndef LONGLONG_STANDALONE | ||
341 | #define udiv_qrnnd(q, r, n1, n0, d) \ | ||
342 | do { USItype __r; \ | ||
343 | (q) = __udiv_qrnnd(&__r, (n1), (n0), (d)); \ | ||
344 | (r) = __r; \ | ||
345 | } while (0) | ||
346 | extern USItype __udiv_qrnnd(); | ||
347 | #endif /* LONGLONG_STANDALONE */ | ||
348 | #endif /* hppa */ | ||
349 | |||
350 | /*************************************** | ||
351 | ************** I370 ***************** | ||
352 | ***************************************/ | ||
353 | #if (defined(__i370__) || defined(__mvs__)) && W_TYPE_SIZE == 32 | ||
354 | #define umul_ppmm(xh, xl, m0, m1) \ | ||
355 | do { \ | ||
356 | union {UDItype __ll; \ | ||
357 | struct {USItype __h, __l; } __i; \ | ||
358 | } __xx; \ | ||
359 | USItype __m0 = (m0), __m1 = (m1); \ | ||
360 | __asm__ ("mr %0,%3" \ | ||
361 | : "=r" (__xx.__i.__h), \ | ||
362 | "=r" (__xx.__i.__l) \ | ||
363 | : "%1" (__m0), \ | ||
364 | "r" (__m1)); \ | ||
365 | (xh) = __xx.__i.__h; (xl) = __xx.__i.__l; \ | ||
366 | (xh) += ((((SItype) __m0 >> 31) & __m1) \ | ||
367 | + (((SItype) __m1 >> 31) & __m0)); \ | ||
368 | } while (0) | ||
369 | #define smul_ppmm(xh, xl, m0, m1) \ | ||
370 | do { \ | ||
371 | union {DItype __ll; \ | ||
372 | struct {USItype __h, __l; } __i; \ | ||
373 | } __xx; \ | ||
374 | __asm__ ("mr %0,%3" \ | ||
375 | : "=r" (__xx.__i.__h), \ | ||
376 | "=r" (__xx.__i.__l) \ | ||
377 | : "%1" (m0), \ | ||
378 | "r" (m1)); \ | ||
379 | (xh) = __xx.__i.__h; (xl) = __xx.__i.__l; \ | ||
380 | } while (0) | ||
381 | #define sdiv_qrnnd(q, r, n1, n0, d) \ | ||
382 | do { \ | ||
383 | union {DItype __ll; \ | ||
384 | struct {USItype __h, __l; } __i; \ | ||
385 | } __xx; \ | ||
386 | __xx.__i.__h = n1; __xx.__i.__l = n0; \ | ||
387 | __asm__ ("dr %0,%2" \ | ||
388 | : "=r" (__xx.__ll) \ | ||
389 | : "0" (__xx.__ll), "r" (d)); \ | ||
390 | (q) = __xx.__i.__l; (r) = __xx.__i.__h; \ | ||
391 | } while (0) | ||
392 | #endif | ||
393 | |||
394 | /*************************************** | ||
395 | ************** I386 ***************** | ||
396 | ***************************************/ | ||
397 | #undef __i386__ | ||
398 | #if (defined(__i386__) || defined(__i486__)) && W_TYPE_SIZE == 32 | ||
399 | #define add_ssaaaa(sh, sl, ah, al, bh, bl) \ | ||
400 | __asm__ ("addl %5,%1\n" \ | ||
401 | "adcl %3,%0" \ | ||
402 | : "=r" ((USItype)(sh)), \ | ||
403 | "=&r" ((USItype)(sl)) \ | ||
404 | : "%0" ((USItype)(ah)), \ | ||
405 | "g" ((USItype)(bh)), \ | ||
406 | "%1" ((USItype)(al)), \ | ||
407 | "g" ((USItype)(bl))) | ||
408 | #define sub_ddmmss(sh, sl, ah, al, bh, bl) \ | ||
409 | __asm__ ("subl %5,%1\n" \ | ||
410 | "sbbl %3,%0" \ | ||
411 | : "=r" ((USItype)(sh)), \ | ||
412 | "=&r" ((USItype)(sl)) \ | ||
413 | : "0" ((USItype)(ah)), \ | ||
414 | "g" ((USItype)(bh)), \ | ||
415 | "1" ((USItype)(al)), \ | ||
416 | "g" ((USItype)(bl))) | ||
417 | #define umul_ppmm(w1, w0, u, v) \ | ||
418 | __asm__ ("mull %3" \ | ||
419 | : "=a" ((USItype)(w0)), \ | ||
420 | "=d" ((USItype)(w1)) \ | ||
421 | : "%0" ((USItype)(u)), \ | ||
422 | "rm" ((USItype)(v))) | ||
423 | #define udiv_qrnnd(q, r, n1, n0, d) \ | ||
424 | __asm__ ("divl %4" \ | ||
425 | : "=a" ((USItype)(q)), \ | ||
426 | "=d" ((USItype)(r)) \ | ||
427 | : "0" ((USItype)(n0)), \ | ||
428 | "1" ((USItype)(n1)), \ | ||
429 | "rm" ((USItype)(d))) | ||
430 | #ifndef UMUL_TIME | ||
431 | #define UMUL_TIME 40 | ||
432 | #endif | ||
433 | #ifndef UDIV_TIME | ||
434 | #define UDIV_TIME 40 | ||
435 | #endif | ||
436 | #endif /* 80x86 */ | ||
437 | |||
438 | /*************************************** | ||
439 | ************** I860 ***************** | ||
440 | ***************************************/ | ||
441 | #if defined(__i860__) && W_TYPE_SIZE == 32 | ||
442 | #define rshift_rhlc(r, h, l, c) \ | ||
443 | __asm__ ("shr %3,r0,r0\n" \ | ||
444 | "shrd %1,%2,%0" \ | ||
445 | "=r" (r) : "r" (h), "r" (l), "rn" (c)) | ||
446 | #endif /* i860 */ | ||
447 | |||
448 | /*************************************** | ||
449 | ************** I960 ***************** | ||
450 | ***************************************/ | ||
451 | #if defined(__i960__) && W_TYPE_SIZE == 32 | ||
452 | #define add_ssaaaa(sh, sl, ah, al, bh, bl) \ | ||
453 | __asm__ ("cmpo 1,0\n" \ | ||
454 | "addc %5,%4,%1\n" \ | ||
455 | "addc %3,%2,%0" \ | ||
456 | : "=r" ((USItype)(sh)), \ | ||
457 | "=&r" ((USItype)(sl)) \ | ||
458 | : "%dI" ((USItype)(ah)), \ | ||
459 | "dI" ((USItype)(bh)), \ | ||
460 | "%dI" ((USItype)(al)), \ | ||
461 | "dI" ((USItype)(bl))) | ||
462 | #define sub_ddmmss(sh, sl, ah, al, bh, bl) \ | ||
463 | __asm__ ("cmpo 0,0\n" \ | ||
464 | "subc %5,%4,%1\n" \ | ||
465 | "subc %3,%2,%0" \ | ||
466 | : "=r" ((USItype)(sh)), \ | ||
467 | "=&r" ((USItype)(sl)) \ | ||
468 | : "dI" ((USItype)(ah)), \ | ||
469 | "dI" ((USItype)(bh)), \ | ||
470 | "dI" ((USItype)(al)), \ | ||
471 | "dI" ((USItype)(bl))) | ||
472 | #define umul_ppmm(w1, w0, u, v) \ | ||
473 | ({union {UDItype __ll; \ | ||
474 | struct {USItype __l, __h; } __i; \ | ||
475 | } __xx; \ | ||
476 | __asm__ ("emul %2,%1,%0" \ | ||
477 | : "=d" (__xx.__ll) \ | ||
478 | : "%dI" ((USItype)(u)), \ | ||
479 | "dI" ((USItype)(v))); \ | ||
480 | (w1) = __xx.__i.__h; (w0) = __xx.__i.__l; }) | ||
481 | #define __umulsidi3(u, v) \ | ||
482 | ({UDItype __w; \ | ||
483 | __asm__ ("emul %2,%1,%0" \ | ||
484 | : "=d" (__w) \ | ||
485 | : "%dI" ((USItype)(u)), \ | ||
486 | "dI" ((USItype)(v))); \ | ||
487 | __w; }) | ||
488 | #define udiv_qrnnd(q, r, nh, nl, d) \ | ||
489 | do { \ | ||
490 | union {UDItype __ll; \ | ||
491 | struct {USItype __l, __h; } __i; \ | ||
492 | } __nn; \ | ||
493 | __nn.__i.__h = (nh); __nn.__i.__l = (nl); \ | ||
494 | __asm__ ("ediv %d,%n,%0" \ | ||
495 | : "=d" (__rq.__ll) \ | ||
496 | : "dI" (__nn.__ll), \ | ||
497 | "dI" ((USItype)(d))); \ | ||
498 | (r) = __rq.__i.__l; (q) = __rq.__i.__h; \ | ||
499 | } while (0) | ||
500 | #if defined(__i960mx) /* what is the proper symbol to test??? */ | ||
501 | #define rshift_rhlc(r, h, l, c) \ | ||
502 | do { \ | ||
503 | union {UDItype __ll; \ | ||
504 | struct {USItype __l, __h; } __i; \ | ||
505 | } __nn; \ | ||
506 | __nn.__i.__h = (h); __nn.__i.__l = (l); \ | ||
507 | __asm__ ("shre %2,%1,%0" \ | ||
508 | : "=d" (r) : "dI" (__nn.__ll), "dI" (c)); \ | ||
509 | } | ||
510 | #endif /* i960mx */ | ||
511 | #endif /* i960 */ | ||
512 | |||
513 | /*************************************** | ||
514 | ************** 68000 **************** | ||
515 | ***************************************/ | ||
516 | #if (defined(__mc68000__) || defined(__mc68020__) || defined(__NeXT__) || defined(mc68020)) && W_TYPE_SIZE == 32 | ||
517 | #define add_ssaaaa(sh, sl, ah, al, bh, bl) \ | ||
518 | __asm__ ("add%.l %5,%1\n" \ | ||
519 | "addx%.l %3,%0" \ | ||
520 | : "=d" ((USItype)(sh)), \ | ||
521 | "=&d" ((USItype)(sl)) \ | ||
522 | : "%0" ((USItype)(ah)), \ | ||
523 | "d" ((USItype)(bh)), \ | ||
524 | "%1" ((USItype)(al)), \ | ||
525 | "g" ((USItype)(bl))) | ||
526 | #define sub_ddmmss(sh, sl, ah, al, bh, bl) \ | ||
527 | __asm__ ("sub%.l %5,%1\n" \ | ||
528 | "subx%.l %3,%0" \ | ||
529 | : "=d" ((USItype)(sh)), \ | ||
530 | "=&d" ((USItype)(sl)) \ | ||
531 | : "0" ((USItype)(ah)), \ | ||
532 | "d" ((USItype)(bh)), \ | ||
533 | "1" ((USItype)(al)), \ | ||
534 | "g" ((USItype)(bl))) | ||
535 | #if (defined(__mc68020__) || defined(__NeXT__) || defined(mc68020)) | ||
536 | #define umul_ppmm(w1, w0, u, v) \ | ||
537 | __asm__ ("mulu%.l %3,%1:%0" \ | ||
538 | : "=d" ((USItype)(w0)), \ | ||
539 | "=d" ((USItype)(w1)) \ | ||
540 | : "%0" ((USItype)(u)), \ | ||
541 | "dmi" ((USItype)(v))) | ||
542 | #define UMUL_TIME 45 | ||
543 | #define udiv_qrnnd(q, r, n1, n0, d) \ | ||
544 | __asm__ ("divu%.l %4,%1:%0" \ | ||
545 | : "=d" ((USItype)(q)), \ | ||
546 | "=d" ((USItype)(r)) \ | ||
547 | : "0" ((USItype)(n0)), \ | ||
548 | "1" ((USItype)(n1)), \ | ||
549 | "dmi" ((USItype)(d))) | ||
550 | #define UDIV_TIME 90 | ||
551 | #define sdiv_qrnnd(q, r, n1, n0, d) \ | ||
552 | __asm__ ("divs%.l %4,%1:%0" \ | ||
553 | : "=d" ((USItype)(q)), \ | ||
554 | "=d" ((USItype)(r)) \ | ||
555 | : "0" ((USItype)(n0)), \ | ||
556 | "1" ((USItype)(n1)), \ | ||
557 | "dmi" ((USItype)(d))) | ||
558 | #else /* not mc68020 */ | ||
559 | #define umul_ppmm(xh, xl, a, b) \ | ||
560 | do { USItype __umul_tmp1, __umul_tmp2; \ | ||
561 | __asm__ ("| Inlined umul_ppmm\n" \ | ||
562 | "move%.l %5,%3\n" \ | ||
563 | "move%.l %2,%0\n" \ | ||
564 | "move%.w %3,%1\n" \ | ||
565 | "swap %3\n" \ | ||
566 | "swap %0\n" \ | ||
567 | "mulu %2,%1\n" \ | ||
568 | "mulu %3,%0\n" \ | ||
569 | "mulu %2,%3\n" \ | ||
570 | "swap %2\n" \ | ||
571 | "mulu %5,%2\n" \ | ||
572 | "add%.l %3,%2\n" \ | ||
573 | "jcc 1f\n" \ | ||
574 | "add%.l %#0x10000,%0\n" \ | ||
575 | "1: move%.l %2,%3\n" \ | ||
576 | "clr%.w %2\n" \ | ||
577 | "swap %2\n" \ | ||
578 | "swap %3\n" \ | ||
579 | "clr%.w %3\n" \ | ||
580 | "add%.l %3,%1\n" \ | ||
581 | "addx%.l %2,%0\n" \ | ||
582 | "| End inlined umul_ppmm" \ | ||
583 | : "=&d" ((USItype)(xh)), "=&d" ((USItype)(xl)), \ | ||
584 | "=d" (__umul_tmp1), "=&d" (__umul_tmp2) \ | ||
585 | : "%2" ((USItype)(a)), "d" ((USItype)(b))); \ | ||
586 | } while (0) | ||
587 | #define UMUL_TIME 100 | ||
588 | #define UDIV_TIME 400 | ||
589 | #endif /* not mc68020 */ | ||
590 | #endif /* mc68000 */ | ||
591 | |||
592 | /*************************************** | ||
593 | ************** 88000 **************** | ||
594 | ***************************************/ | ||
595 | #if defined(__m88000__) && W_TYPE_SIZE == 32 | ||
596 | #define add_ssaaaa(sh, sl, ah, al, bh, bl) \ | ||
597 | __asm__ ("addu.co %1,%r4,%r5\n" \ | ||
598 | "addu.ci %0,%r2,%r3" \ | ||
599 | : "=r" ((USItype)(sh)), \ | ||
600 | "=&r" ((USItype)(sl)) \ | ||
601 | : "%rJ" ((USItype)(ah)), \ | ||
602 | "rJ" ((USItype)(bh)), \ | ||
603 | "%rJ" ((USItype)(al)), \ | ||
604 | "rJ" ((USItype)(bl))) | ||
605 | #define sub_ddmmss(sh, sl, ah, al, bh, bl) \ | ||
606 | __asm__ ("subu.co %1,%r4,%r5\n" \ | ||
607 | "subu.ci %0,%r2,%r3" \ | ||
608 | : "=r" ((USItype)(sh)), \ | ||
609 | "=&r" ((USItype)(sl)) \ | ||
610 | : "rJ" ((USItype)(ah)), \ | ||
611 | "rJ" ((USItype)(bh)), \ | ||
612 | "rJ" ((USItype)(al)), \ | ||
613 | "rJ" ((USItype)(bl))) | ||
614 | #if defined(__m88110__) | ||
615 | #define umul_ppmm(wh, wl, u, v) \ | ||
616 | do { \ | ||
617 | union {UDItype __ll; \ | ||
618 | struct {USItype __h, __l; } __i; \ | ||
619 | } __x; \ | ||
620 | __asm__ ("mulu.d %0,%1,%2" : "=r" (__x.__ll) : "r" (u), "r" (v)); \ | ||
621 | (wh) = __x.__i.__h; \ | ||
622 | (wl) = __x.__i.__l; \ | ||
623 | } while (0) | ||
624 | #define udiv_qrnnd(q, r, n1, n0, d) \ | ||
625 | ({union {UDItype __ll; \ | ||
626 | struct {USItype __h, __l; } __i; \ | ||
627 | } __x, __q; \ | ||
628 | __x.__i.__h = (n1); __x.__i.__l = (n0); \ | ||
629 | __asm__ ("divu.d %0,%1,%2" \ | ||
630 | : "=r" (__q.__ll) : "r" (__x.__ll), "r" (d)); \ | ||
631 | (r) = (n0) - __q.__l * (d); (q) = __q.__l; }) | ||
632 | #define UMUL_TIME 5 | ||
633 | #define UDIV_TIME 25 | ||
634 | #else | ||
635 | #define UMUL_TIME 17 | ||
636 | #define UDIV_TIME 150 | ||
637 | #endif /* __m88110__ */ | ||
638 | #endif /* __m88000__ */ | ||
639 | |||
640 | /*************************************** | ||
641 | ************** MIPS ***************** | ||
642 | ***************************************/ | ||
643 | #if defined(__mips__) && W_TYPE_SIZE == 32 | ||
644 | #if __GNUC__ > 2 || __GNUC_MINOR__ >= 7 | ||
645 | #define umul_ppmm(w1, w0, u, v) \ | ||
646 | __asm__ ("multu %2,%3" \ | ||
647 | : "=l" ((USItype)(w0)), \ | ||
648 | "=h" ((USItype)(w1)) \ | ||
649 | : "d" ((USItype)(u)), \ | ||
650 | "d" ((USItype)(v))) | ||
651 | #else | ||
652 | #define umul_ppmm(w1, w0, u, v) \ | ||
653 | __asm__ ("multu %2,%3\n" \ | ||
654 | "mflo %0\n" \ | ||
655 | "mfhi %1" \ | ||
656 | : "=d" ((USItype)(w0)), \ | ||
657 | "=d" ((USItype)(w1)) \ | ||
658 | : "d" ((USItype)(u)), \ | ||
659 | "d" ((USItype)(v))) | ||
660 | #endif | ||
661 | #define UMUL_TIME 10 | ||
662 | #define UDIV_TIME 100 | ||
663 | #endif /* __mips__ */ | ||
664 | |||
665 | /*************************************** | ||
666 | ************** MIPS/64 ************** | ||
667 | ***************************************/ | ||
668 | #if (defined(__mips) && __mips >= 3) && W_TYPE_SIZE == 64 | ||
669 | #if __GNUC__ > 2 || __GNUC_MINOR__ >= 7 | ||
670 | #define umul_ppmm(w1, w0, u, v) \ | ||
671 | __asm__ ("dmultu %2,%3" \ | ||
672 | : "=l" ((UDItype)(w0)), \ | ||
673 | "=h" ((UDItype)(w1)) \ | ||
674 | : "d" ((UDItype)(u)), \ | ||
675 | "d" ((UDItype)(v))) | ||
676 | #else | ||
677 | #define umul_ppmm(w1, w0, u, v) \ | ||
678 | __asm__ ("dmultu %2,%3\n" \ | ||
679 | "mflo %0\n" \ | ||
680 | "mfhi %1" \ | ||
681 | : "=d" ((UDItype)(w0)), \ | ||
682 | "=d" ((UDItype)(w1)) \ | ||
683 | : "d" ((UDItype)(u)), \ | ||
684 | "d" ((UDItype)(v))) | ||
685 | #endif | ||
686 | #define UMUL_TIME 20 | ||
687 | #define UDIV_TIME 140 | ||
688 | #endif /* __mips__ */ | ||
689 | |||
690 | /*************************************** | ||
691 | ************** 32000 **************** | ||
692 | ***************************************/ | ||
693 | #if defined(__ns32000__) && W_TYPE_SIZE == 32 | ||
694 | #define umul_ppmm(w1, w0, u, v) \ | ||
695 | ({union {UDItype __ll; \ | ||
696 | struct {USItype __l, __h; } __i; \ | ||
697 | } __xx; \ | ||
698 | __asm__ ("meid %2,%0" \ | ||
699 | : "=g" (__xx.__ll) \ | ||
700 | : "%0" ((USItype)(u)), \ | ||
701 | "g" ((USItype)(v))); \ | ||
702 | (w1) = __xx.__i.__h; (w0) = __xx.__i.__l; }) | ||
703 | #define __umulsidi3(u, v) \ | ||
704 | ({UDItype __w; \ | ||
705 | __asm__ ("meid %2,%0" \ | ||
706 | : "=g" (__w) \ | ||
707 | : "%0" ((USItype)(u)), \ | ||
708 | "g" ((USItype)(v))); \ | ||
709 | __w; }) | ||
710 | #define udiv_qrnnd(q, r, n1, n0, d) \ | ||
711 | ({union {UDItype __ll; \ | ||
712 | struct {USItype __l, __h; } __i; \ | ||
713 | } __xx; \ | ||
714 | __xx.__i.__h = (n1); __xx.__i.__l = (n0); \ | ||
715 | __asm__ ("deid %2,%0" \ | ||
716 | : "=g" (__xx.__ll) \ | ||
717 | : "0" (__xx.__ll), \ | ||
718 | "g" ((USItype)(d))); \ | ||
719 | (r) = __xx.__i.__l; (q) = __xx.__i.__h; }) | ||
720 | #endif /* __ns32000__ */ | ||
721 | |||
722 | /*************************************** | ||
723 | ************** PPC ****************** | ||
724 | ***************************************/ | ||
725 | #if (defined(_ARCH_PPC) || defined(_IBMR2)) && W_TYPE_SIZE == 32 | ||
726 | #define add_ssaaaa(sh, sl, ah, al, bh, bl) \ | ||
727 | do { \ | ||
728 | if (__builtin_constant_p(bh) && (bh) == 0) \ | ||
729 | __asm__ ("{a%I4|add%I4c} %1,%3,%4\n\t{aze|addze} %0,%2" \ | ||
730 | : "=r" ((USItype)(sh)), \ | ||
731 | "=&r" ((USItype)(sl)) \ | ||
732 | : "%r" ((USItype)(ah)), \ | ||
733 | "%r" ((USItype)(al)), \ | ||
734 | "rI" ((USItype)(bl))); \ | ||
735 | else if (__builtin_constant_p(bh) && (bh) == ~(USItype) 0) \ | ||
736 | __asm__ ("{a%I4|add%I4c} %1,%3,%4\n\t{ame|addme} %0,%2" \ | ||
737 | : "=r" ((USItype)(sh)), \ | ||
738 | "=&r" ((USItype)(sl)) \ | ||
739 | : "%r" ((USItype)(ah)), \ | ||
740 | "%r" ((USItype)(al)), \ | ||
741 | "rI" ((USItype)(bl))); \ | ||
742 | else \ | ||
743 | __asm__ ("{a%I5|add%I5c} %1,%4,%5\n\t{ae|adde} %0,%2,%3" \ | ||
744 | : "=r" ((USItype)(sh)), \ | ||
745 | "=&r" ((USItype)(sl)) \ | ||
746 | : "%r" ((USItype)(ah)), \ | ||
747 | "r" ((USItype)(bh)), \ | ||
748 | "%r" ((USItype)(al)), \ | ||
749 | "rI" ((USItype)(bl))); \ | ||
750 | } while (0) | ||
751 | #define sub_ddmmss(sh, sl, ah, al, bh, bl) \ | ||
752 | do { \ | ||
753 | if (__builtin_constant_p(ah) && (ah) == 0) \ | ||
754 | __asm__ ("{sf%I3|subf%I3c} %1,%4,%3\n\t{sfze|subfze} %0,%2" \ | ||
755 | : "=r" ((USItype)(sh)), \ | ||
756 | "=&r" ((USItype)(sl)) \ | ||
757 | : "r" ((USItype)(bh)), \ | ||
758 | "rI" ((USItype)(al)), \ | ||
759 | "r" ((USItype)(bl))); \ | ||
760 | else if (__builtin_constant_p(ah) && (ah) == ~(USItype) 0) \ | ||
761 | __asm__ ("{sf%I3|subf%I3c} %1,%4,%3\n\t{sfme|subfme} %0,%2" \ | ||
762 | : "=r" ((USItype)(sh)), \ | ||
763 | "=&r" ((USItype)(sl)) \ | ||
764 | : "r" ((USItype)(bh)), \ | ||
765 | "rI" ((USItype)(al)), \ | ||
766 | "r" ((USItype)(bl))); \ | ||
767 | else if (__builtin_constant_p(bh) && (bh) == 0) \ | ||
768 | __asm__ ("{sf%I3|subf%I3c} %1,%4,%3\n\t{ame|addme} %0,%2" \ | ||
769 | : "=r" ((USItype)(sh)), \ | ||
770 | "=&r" ((USItype)(sl)) \ | ||
771 | : "r" ((USItype)(ah)), \ | ||
772 | "rI" ((USItype)(al)), \ | ||
773 | "r" ((USItype)(bl))); \ | ||
774 | else if (__builtin_constant_p(bh) && (bh) == ~(USItype) 0) \ | ||
775 | __asm__ ("{sf%I3|subf%I3c} %1,%4,%3\n\t{aze|addze} %0,%2" \ | ||
776 | : "=r" ((USItype)(sh)), \ | ||
777 | "=&r" ((USItype)(sl)) \ | ||
778 | : "r" ((USItype)(ah)), \ | ||
779 | "rI" ((USItype)(al)), \ | ||
780 | "r" ((USItype)(bl))); \ | ||
781 | else \ | ||
782 | __asm__ ("{sf%I4|subf%I4c} %1,%5,%4\n\t{sfe|subfe} %0,%3,%2" \ | ||
783 | : "=r" ((USItype)(sh)), \ | ||
784 | "=&r" ((USItype)(sl)) \ | ||
785 | : "r" ((USItype)(ah)), \ | ||
786 | "r" ((USItype)(bh)), \ | ||
787 | "rI" ((USItype)(al)), \ | ||
788 | "r" ((USItype)(bl))); \ | ||
789 | } while (0) | ||
790 | #if defined(_ARCH_PPC) | ||
791 | #define umul_ppmm(ph, pl, m0, m1) \ | ||
792 | do { \ | ||
793 | USItype __m0 = (m0), __m1 = (m1); \ | ||
794 | __asm__ ("mulhwu %0,%1,%2" \ | ||
795 | : "=r" ((USItype) ph) \ | ||
796 | : "%r" (__m0), \ | ||
797 | "r" (__m1)); \ | ||
798 | (pl) = __m0 * __m1; \ | ||
799 | } while (0) | ||
800 | #define UMUL_TIME 15 | ||
801 | #define smul_ppmm(ph, pl, m0, m1) \ | ||
802 | do { \ | ||
803 | SItype __m0 = (m0), __m1 = (m1); \ | ||
804 | __asm__ ("mulhw %0,%1,%2" \ | ||
805 | : "=r" ((SItype) ph) \ | ||
806 | : "%r" (__m0), \ | ||
807 | "r" (__m1)); \ | ||
808 | (pl) = __m0 * __m1; \ | ||
809 | } while (0) | ||
810 | #define SMUL_TIME 14 | ||
811 | #define UDIV_TIME 120 | ||
812 | #else | ||
813 | #define umul_ppmm(xh, xl, m0, m1) \ | ||
814 | do { \ | ||
815 | USItype __m0 = (m0), __m1 = (m1); \ | ||
816 | __asm__ ("mul %0,%2,%3" \ | ||
817 | : "=r" ((USItype)(xh)), \ | ||
818 | "=q" ((USItype)(xl)) \ | ||
819 | : "r" (__m0), \ | ||
820 | "r" (__m1)); \ | ||
821 | (xh) += ((((SItype) __m0 >> 31) & __m1) \ | ||
822 | + (((SItype) __m1 >> 31) & __m0)); \ | ||
823 | } while (0) | ||
824 | #define UMUL_TIME 8 | ||
825 | #define smul_ppmm(xh, xl, m0, m1) \ | ||
826 | __asm__ ("mul %0,%2,%3" \ | ||
827 | : "=r" ((SItype)(xh)), \ | ||
828 | "=q" ((SItype)(xl)) \ | ||
829 | : "r" (m0), \ | ||
830 | "r" (m1)) | ||
831 | #define SMUL_TIME 4 | ||
832 | #define sdiv_qrnnd(q, r, nh, nl, d) \ | ||
833 | __asm__ ("div %0,%2,%4" \ | ||
834 | : "=r" ((SItype)(q)), "=q" ((SItype)(r)) \ | ||
835 | : "r" ((SItype)(nh)), "1" ((SItype)(nl)), "r" ((SItype)(d))) | ||
836 | #define UDIV_TIME 100 | ||
837 | #endif | ||
838 | #endif /* Power architecture variants. */ | ||
839 | |||
840 | /*************************************** | ||
841 | ************** PYR ****************** | ||
842 | ***************************************/ | ||
843 | #if defined(__pyr__) && W_TYPE_SIZE == 32 | ||
844 | #define add_ssaaaa(sh, sl, ah, al, bh, bl) \ | ||
845 | __asm__ ("addw %5,%1\n" \ | ||
846 | "addwc %3,%0" \ | ||
847 | : "=r" ((USItype)(sh)), \ | ||
848 | "=&r" ((USItype)(sl)) \ | ||
849 | : "%0" ((USItype)(ah)), \ | ||
850 | "g" ((USItype)(bh)), \ | ||
851 | "%1" ((USItype)(al)), \ | ||
852 | "g" ((USItype)(bl))) | ||
853 | #define sub_ddmmss(sh, sl, ah, al, bh, bl) \ | ||
854 | __asm__ ("subw %5,%1\n" \ | ||
855 | "subwb %3,%0" \ | ||
856 | : "=r" ((USItype)(sh)), \ | ||
857 | "=&r" ((USItype)(sl)) \ | ||
858 | : "0" ((USItype)(ah)), \ | ||
859 | "g" ((USItype)(bh)), \ | ||
860 | "1" ((USItype)(al)), \ | ||
861 | "g" ((USItype)(bl))) | ||
862 | /* This insn works on Pyramids with AP, XP, or MI CPUs, but not with SP. */ | ||
863 | #define umul_ppmm(w1, w0, u, v) \ | ||
864 | ({union {UDItype __ll; \ | ||
865 | struct {USItype __h, __l; } __i; \ | ||
866 | } __xx; \ | ||
867 | __asm__ ("movw %1,%R0\n" \ | ||
868 | "uemul %2,%0" \ | ||
869 | : "=&r" (__xx.__ll) \ | ||
870 | : "g" ((USItype) (u)), \ | ||
871 | "g" ((USItype)(v))); \ | ||
872 | (w1) = __xx.__i.__h; (w0) = __xx.__i.__l; }) | ||
873 | #endif /* __pyr__ */ | ||
874 | |||
875 | /*************************************** | ||
876 | ************** RT/ROMP ************** | ||
877 | ***************************************/ | ||
878 | #if defined(__ibm032__) /* RT/ROMP */ && W_TYPE_SIZE == 32 | ||
879 | #define add_ssaaaa(sh, sl, ah, al, bh, bl) \ | ||
880 | __asm__ ("a %1,%5\n" \ | ||
881 | "ae %0,%3" \ | ||
882 | : "=r" ((USItype)(sh)), \ | ||
883 | "=&r" ((USItype)(sl)) \ | ||
884 | : "%0" ((USItype)(ah)), \ | ||
885 | "r" ((USItype)(bh)), \ | ||
886 | "%1" ((USItype)(al)), \ | ||
887 | "r" ((USItype)(bl))) | ||
888 | #define sub_ddmmss(sh, sl, ah, al, bh, bl) \ | ||
889 | __asm__ ("s %1,%5\n" \ | ||
890 | "se %0,%3" \ | ||
891 | : "=r" ((USItype)(sh)), \ | ||
892 | "=&r" ((USItype)(sl)) \ | ||
893 | : "0" ((USItype)(ah)), \ | ||
894 | "r" ((USItype)(bh)), \ | ||
895 | "1" ((USItype)(al)), \ | ||
896 | "r" ((USItype)(bl))) | ||
897 | #define umul_ppmm(ph, pl, m0, m1) \ | ||
898 | do { \ | ||
899 | USItype __m0 = (m0), __m1 = (m1); \ | ||
900 | __asm__ ( \ | ||
901 | "s r2,r2\n" \ | ||
902 | "mts r10,%2\n" \ | ||
903 | "m r2,%3\n" \ | ||
904 | "m r2,%3\n" \ | ||
905 | "m r2,%3\n" \ | ||
906 | "m r2,%3\n" \ | ||
907 | "m r2,%3\n" \ | ||
908 | "m r2,%3\n" \ | ||
909 | "m r2,%3\n" \ | ||
910 | "m r2,%3\n" \ | ||
911 | "m r2,%3\n" \ | ||
912 | "m r2,%3\n" \ | ||
913 | "m r2,%3\n" \ | ||
914 | "m r2,%3\n" \ | ||
915 | "m r2,%3\n" \ | ||
916 | "m r2,%3\n" \ | ||
917 | "m r2,%3\n" \ | ||
918 | "m r2,%3\n" \ | ||
919 | "cas %0,r2,r0\n" \ | ||
920 | "mfs r10,%1" \ | ||
921 | : "=r" ((USItype)(ph)), \ | ||
922 | "=r" ((USItype)(pl)) \ | ||
923 | : "%r" (__m0), \ | ||
924 | "r" (__m1) \ | ||
925 | : "r2"); \ | ||
926 | (ph) += ((((SItype) __m0 >> 31) & __m1) \ | ||
927 | + (((SItype) __m1 >> 31) & __m0)); \ | ||
928 | } while (0) | ||
929 | #define UMUL_TIME 20 | ||
930 | #define UDIV_TIME 200 | ||
931 | #endif /* RT/ROMP */ | ||
932 | |||
933 | /*************************************** | ||
934 | ************** SH2 ****************** | ||
935 | ***************************************/ | ||
936 | #if (defined(__sh2__) || defined(__sh3__) || defined(__SH4__)) \ | ||
937 | && W_TYPE_SIZE == 32 | ||
938 | #define umul_ppmm(w1, w0, u, v) \ | ||
939 | __asm__ ( \ | ||
940 | "dmulu.l %2,%3\n" \ | ||
941 | "sts macl,%1\n" \ | ||
942 | "sts mach,%0" \ | ||
943 | : "=r" ((USItype)(w1)), \ | ||
944 | "=r" ((USItype)(w0)) \ | ||
945 | : "r" ((USItype)(u)), \ | ||
946 | "r" ((USItype)(v)) \ | ||
947 | : "macl", "mach") | ||
948 | #define UMUL_TIME 5 | ||
949 | #endif | ||
950 | |||
951 | /*************************************** | ||
952 | ************** SPARC **************** | ||
953 | ***************************************/ | ||
954 | #if defined(__sparc__) && W_TYPE_SIZE == 32 | ||
955 | #define add_ssaaaa(sh, sl, ah, al, bh, bl) \ | ||
956 | __asm__ ("addcc %r4,%5,%1\n" \ | ||
957 | "addx %r2,%3,%0" \ | ||
958 | : "=r" ((USItype)(sh)), \ | ||
959 | "=&r" ((USItype)(sl)) \ | ||
960 | : "%rJ" ((USItype)(ah)), \ | ||
961 | "rI" ((USItype)(bh)), \ | ||
962 | "%rJ" ((USItype)(al)), \ | ||
963 | "rI" ((USItype)(bl)) \ | ||
964 | __CLOBBER_CC) | ||
965 | #define sub_ddmmss(sh, sl, ah, al, bh, bl) \ | ||
966 | __asm__ ("subcc %r4,%5,%1\n" \ | ||
967 | "subx %r2,%3,%0" \ | ||
968 | : "=r" ((USItype)(sh)), \ | ||
969 | "=&r" ((USItype)(sl)) \ | ||
970 | : "rJ" ((USItype)(ah)), \ | ||
971 | "rI" ((USItype)(bh)), \ | ||
972 | "rJ" ((USItype)(al)), \ | ||
973 | "rI" ((USItype)(bl)) \ | ||
974 | __CLOBBER_CC) | ||
975 | #if defined(__sparc_v8__) | ||
976 | /* Don't match immediate range because, 1) it is not often useful, | ||
977 | 2) the 'I' flag thinks of the range as a 13 bit signed interval, | ||
978 | while we want to match a 13 bit interval, sign extended to 32 bits, | ||
979 | but INTERPRETED AS UNSIGNED. */ | ||
980 | #define umul_ppmm(w1, w0, u, v) \ | ||
981 | __asm__ ("umul %2,%3,%1;rd %%y,%0" \ | ||
982 | : "=r" ((USItype)(w1)), \ | ||
983 | "=r" ((USItype)(w0)) \ | ||
984 | : "r" ((USItype)(u)), \ | ||
985 | "r" ((USItype)(v))) | ||
986 | #define UMUL_TIME 5 | ||
987 | #ifndef SUPERSPARC /* SuperSPARC's udiv only handles 53 bit dividends */ | ||
988 | #define udiv_qrnnd(q, r, n1, n0, d) \ | ||
989 | do { \ | ||
990 | USItype __q; \ | ||
991 | __asm__ ("mov %1,%%y;nop;nop;nop;udiv %2,%3,%0" \ | ||
992 | : "=r" ((USItype)(__q)) \ | ||
993 | : "r" ((USItype)(n1)), \ | ||
994 | "r" ((USItype)(n0)), \ | ||
995 | "r" ((USItype)(d))); \ | ||
996 | (r) = (n0) - __q * (d); \ | ||
997 | (q) = __q; \ | ||
998 | } while (0) | ||
999 | #define UDIV_TIME 25 | ||
1000 | #endif /* SUPERSPARC */ | ||
1001 | #else /* ! __sparc_v8__ */ | ||
1002 | #if defined(__sparclite__) | ||
1003 | /* This has hardware multiply but not divide. It also has two additional | ||
1004 | instructions scan (ffs from high bit) and divscc. */ | ||
1005 | #define umul_ppmm(w1, w0, u, v) \ | ||
1006 | __asm__ ("umul %2,%3,%1;rd %%y,%0" \ | ||
1007 | : "=r" ((USItype)(w1)), \ | ||
1008 | "=r" ((USItype)(w0)) \ | ||
1009 | : "r" ((USItype)(u)), \ | ||
1010 | "r" ((USItype)(v))) | ||
1011 | #define UMUL_TIME 5 | ||
1012 | #define udiv_qrnnd(q, r, n1, n0, d) \ | ||
1013 | __asm__ ("! Inlined udiv_qrnnd\n" \ | ||
1014 | "wr %%g0,%2,%%y ! Not a delayed write for sparclite\n" \ | ||
1015 | "tst %%g0\n" \ | ||
1016 | "divscc %3,%4,%%g1\n" \ | ||
1017 | "divscc %%g1,%4,%%g1\n" \ | ||
1018 | "divscc %%g1,%4,%%g1\n" \ | ||
1019 | "divscc %%g1,%4,%%g1\n" \ | ||
1020 | "divscc %%g1,%4,%%g1\n" \ | ||
1021 | "divscc %%g1,%4,%%g1\n" \ | ||
1022 | "divscc %%g1,%4,%%g1\n" \ | ||
1023 | "divscc %%g1,%4,%%g1\n" \ | ||
1024 | "divscc %%g1,%4,%%g1\n" \ | ||
1025 | "divscc %%g1,%4,%%g1\n" \ | ||
1026 | "divscc %%g1,%4,%%g1\n" \ | ||
1027 | "divscc %%g1,%4,%%g1\n" \ | ||
1028 | "divscc %%g1,%4,%%g1\n" \ | ||
1029 | "divscc %%g1,%4,%%g1\n" \ | ||
1030 | "divscc %%g1,%4,%%g1\n" \ | ||
1031 | "divscc %%g1,%4,%%g1\n" \ | ||
1032 | "divscc %%g1,%4,%%g1\n" \ | ||
1033 | "divscc %%g1,%4,%%g1\n" \ | ||
1034 | "divscc %%g1,%4,%%g1\n" \ | ||
1035 | "divscc %%g1,%4,%%g1\n" \ | ||
1036 | "divscc %%g1,%4,%%g1\n" \ | ||
1037 | "divscc %%g1,%4,%%g1\n" \ | ||
1038 | "divscc %%g1,%4,%%g1\n" \ | ||
1039 | "divscc %%g1,%4,%%g1\n" \ | ||
1040 | "divscc %%g1,%4,%%g1\n" \ | ||
1041 | "divscc %%g1,%4,%%g1\n" \ | ||
1042 | "divscc %%g1,%4,%%g1\n" \ | ||
1043 | "divscc %%g1,%4,%%g1\n" \ | ||
1044 | "divscc %%g1,%4,%%g1\n" \ | ||
1045 | "divscc %%g1,%4,%%g1\n" \ | ||
1046 | "divscc %%g1,%4,%%g1\n" \ | ||
1047 | "divscc %%g1,%4,%0\n" \ | ||
1048 | "rd %%y,%1\n" \ | ||
1049 | "bl,a 1f\n" \ | ||
1050 | "add %1,%4,%1\n" \ | ||
1051 | "1: ! End of inline udiv_qrnnd" \ | ||
1052 | : "=r" ((USItype)(q)), \ | ||
1053 | "=r" ((USItype)(r)) \ | ||
1054 | : "r" ((USItype)(n1)), \ | ||
1055 | "r" ((USItype)(n0)), \ | ||
1056 | "rI" ((USItype)(d)) \ | ||
1057 | : "%g1" __AND_CLOBBER_CC) | ||
1058 | #define UDIV_TIME 37 | ||
1059 | #endif /* __sparclite__ */ | ||
1060 | #endif /* __sparc_v8__ */ | ||
1061 | /* Default to sparc v7 versions of umul_ppmm and udiv_qrnnd. */ | ||
1062 | #ifndef umul_ppmm | ||
1063 | #define umul_ppmm(w1, w0, u, v) \ | ||
1064 | __asm__ ("! Inlined umul_ppmm\n" \ | ||
1065 | "wr %%g0,%2,%%y ! SPARC has 0-3 delay insn after a wr\n" \ | ||
1066 | "sra %3,31,%%g2 ! Don't move this insn\n" \ | ||
1067 | "and %2,%%g2,%%g2 ! Don't move this insn\n" \ | ||
1068 | "andcc %%g0,0,%%g1 ! Don't move this insn\n" \ | ||
1069 | "mulscc %%g1,%3,%%g1\n" \ | ||
1070 | "mulscc %%g1,%3,%%g1\n" \ | ||
1071 | "mulscc %%g1,%3,%%g1\n" \ | ||
1072 | "mulscc %%g1,%3,%%g1\n" \ | ||
1073 | "mulscc %%g1,%3,%%g1\n" \ | ||
1074 | "mulscc %%g1,%3,%%g1\n" \ | ||
1075 | "mulscc %%g1,%3,%%g1\n" \ | ||
1076 | "mulscc %%g1,%3,%%g1\n" \ | ||
1077 | "mulscc %%g1,%3,%%g1\n" \ | ||
1078 | "mulscc %%g1,%3,%%g1\n" \ | ||
1079 | "mulscc %%g1,%3,%%g1\n" \ | ||
1080 | "mulscc %%g1,%3,%%g1\n" \ | ||
1081 | "mulscc %%g1,%3,%%g1\n" \ | ||
1082 | "mulscc %%g1,%3,%%g1\n" \ | ||
1083 | "mulscc %%g1,%3,%%g1\n" \ | ||
1084 | "mulscc %%g1,%3,%%g1\n" \ | ||
1085 | "mulscc %%g1,%3,%%g1\n" \ | ||
1086 | "mulscc %%g1,%3,%%g1\n" \ | ||
1087 | "mulscc %%g1,%3,%%g1\n" \ | ||
1088 | "mulscc %%g1,%3,%%g1\n" \ | ||
1089 | "mulscc %%g1,%3,%%g1\n" \ | ||
1090 | "mulscc %%g1,%3,%%g1\n" \ | ||
1091 | "mulscc %%g1,%3,%%g1\n" \ | ||
1092 | "mulscc %%g1,%3,%%g1\n" \ | ||
1093 | "mulscc %%g1,%3,%%g1\n" \ | ||
1094 | "mulscc %%g1,%3,%%g1\n" \ | ||
1095 | "mulscc %%g1,%3,%%g1\n" \ | ||
1096 | "mulscc %%g1,%3,%%g1\n" \ | ||
1097 | "mulscc %%g1,%3,%%g1\n" \ | ||
1098 | "mulscc %%g1,%3,%%g1\n" \ | ||
1099 | "mulscc %%g1,%3,%%g1\n" \ | ||
1100 | "mulscc %%g1,%3,%%g1\n" \ | ||
1101 | "mulscc %%g1,0,%%g1\n" \ | ||
1102 | "add %%g1,%%g2,%0\n" \ | ||
1103 | "rd %%y,%1" \ | ||
1104 | : "=r" ((USItype)(w1)), \ | ||
1105 | "=r" ((USItype)(w0)) \ | ||
1106 | : "%rI" ((USItype)(u)), \ | ||
1107 | "r" ((USItype)(v)) \ | ||
1108 | : "%g1", "%g2" __AND_CLOBBER_CC) | ||
1109 | #define UMUL_TIME 39 /* 39 instructions */ | ||
1110 | /* It's quite necessary to add this much assembler for the sparc. | ||
1111 | The default udiv_qrnnd (in C) is more than 10 times slower! */ | ||
1112 | #define udiv_qrnnd(q, r, n1, n0, d) \ | ||
1113 | __asm__ ("! Inlined udiv_qrnnd\n\t" \ | ||
1114 | "mov 32,%%g1\n\t" \ | ||
1115 | "subcc %1,%2,%%g0\n\t" \ | ||
1116 | "1: bcs 5f\n\t" \ | ||
1117 | "addxcc %0,%0,%0 ! shift n1n0 and a q-bit in lsb\n\t" \ | ||
1118 | "sub %1,%2,%1 ! this kills msb of n\n\t" \ | ||
1119 | "addx %1,%1,%1 ! so this can't give carry\n\t" \ | ||
1120 | "subcc %%g1,1,%%g1\n\t" \ | ||
1121 | "2: bne 1b\n\t" \ | ||
1122 | "subcc %1,%2,%%g0\n\t" \ | ||
1123 | "bcs 3f\n\t" \ | ||
1124 | "addxcc %0,%0,%0 ! shift n1n0 and a q-bit in lsb\n\t" \ | ||
1125 | "b 3f\n\t" \ | ||
1126 | "sub %1,%2,%1 ! this kills msb of n\n\t" \ | ||
1127 | "4: sub %1,%2,%1\n\t" \ | ||
1128 | "5: addxcc %1,%1,%1\n\t" \ | ||
1129 | "bcc 2b\n\t" \ | ||
1130 | "subcc %%g1,1,%%g1\n\t" \ | ||
1131 | "! Got carry from n. Subtract next step to cancel this carry.\n\t" \ | ||
1132 | "bne 4b\n\t" \ | ||
1133 | "addcc %0,%0,%0 ! shift n1n0 and a 0-bit in lsb\n\t" \ | ||
1134 | "sub %1,%2,%1\n\t" \ | ||
1135 | "3: xnor %0,0,%0\n\t" \ | ||
1136 | "! End of inline udiv_qrnnd\n" \ | ||
1137 | : "=&r" ((USItype)(q)), \ | ||
1138 | "=&r" ((USItype)(r)) \ | ||
1139 | : "r" ((USItype)(d)), \ | ||
1140 | "1" ((USItype)(n1)), \ | ||
1141 | "0" ((USItype)(n0)) : "%g1", "cc") | ||
1142 | #define UDIV_TIME (3+7*32) /* 7 instructions/iteration. 32 iterations. */ | ||
1143 | #endif | ||
1144 | #endif /* __sparc__ */ | ||
1145 | |||
1146 | /*************************************** | ||
1147 | ************** VAX ****************** | ||
1148 | ***************************************/ | ||
1149 | #if defined(__vax__) && W_TYPE_SIZE == 32 | ||
1150 | #define add_ssaaaa(sh, sl, ah, al, bh, bl) \ | ||
1151 | __asm__ ("addl2 %5,%1\n" \ | ||
1152 | "adwc %3,%0" \ | ||
1153 | : "=g" ((USItype)(sh)), \ | ||
1154 | "=&g" ((USItype)(sl)) \ | ||
1155 | : "%0" ((USItype)(ah)), \ | ||
1156 | "g" ((USItype)(bh)), \ | ||
1157 | "%1" ((USItype)(al)), \ | ||
1158 | "g" ((USItype)(bl))) | ||
1159 | #define sub_ddmmss(sh, sl, ah, al, bh, bl) \ | ||
1160 | __asm__ ("subl2 %5,%1\n" \ | ||
1161 | "sbwc %3,%0" \ | ||
1162 | : "=g" ((USItype)(sh)), \ | ||
1163 | "=&g" ((USItype)(sl)) \ | ||
1164 | : "0" ((USItype)(ah)), \ | ||
1165 | "g" ((USItype)(bh)), \ | ||
1166 | "1" ((USItype)(al)), \ | ||
1167 | "g" ((USItype)(bl))) | ||
1168 | #define umul_ppmm(xh, xl, m0, m1) \ | ||
1169 | do { \ | ||
1170 | union {UDItype __ll; \ | ||
1171 | struct {USItype __l, __h; } __i; \ | ||
1172 | } __xx; \ | ||
1173 | USItype __m0 = (m0), __m1 = (m1); \ | ||
1174 | __asm__ ("emul %1,%2,$0,%0" \ | ||
1175 | : "=g" (__xx.__ll) \ | ||
1176 | : "g" (__m0), \ | ||
1177 | "g" (__m1)); \ | ||
1178 | (xh) = __xx.__i.__h; (xl) = __xx.__i.__l; \ | ||
1179 | (xh) += ((((SItype) __m0 >> 31) & __m1) \ | ||
1180 | + (((SItype) __m1 >> 31) & __m0)); \ | ||
1181 | } while (0) | ||
1182 | #define sdiv_qrnnd(q, r, n1, n0, d) \ | ||
1183 | do { \ | ||
1184 | union {DItype __ll; \ | ||
1185 | struct {SItype __l, __h; } __i; \ | ||
1186 | } __xx; \ | ||
1187 | __xx.__i.__h = n1; __xx.__i.__l = n0; \ | ||
1188 | __asm__ ("ediv %3,%2,%0,%1" \ | ||
1189 | : "=g" (q), "=g" (r) \ | ||
1190 | : "g" (__xx.__ll), "g" (d)); \ | ||
1191 | } while (0) | ||
1192 | #endif /* __vax__ */ | ||
1193 | |||
1194 | /*************************************** | ||
1195 | ************** Z8000 **************** | ||
1196 | ***************************************/ | ||
1197 | #if defined(__z8000__) && W_TYPE_SIZE == 16 | ||
1198 | #define add_ssaaaa(sh, sl, ah, al, bh, bl) \ | ||
1199 | __asm__ ("add %H1,%H5\n\tadc %H0,%H3" \ | ||
1200 | : "=r" ((unsigned int)(sh)), \ | ||
1201 | "=&r" ((unsigned int)(sl)) \ | ||
1202 | : "%0" ((unsigned int)(ah)), \ | ||
1203 | "r" ((unsigned int)(bh)), \ | ||
1204 | "%1" ((unsigned int)(al)), \ | ||
1205 | "rQR" ((unsigned int)(bl))) | ||
1206 | #define sub_ddmmss(sh, sl, ah, al, bh, bl) \ | ||
1207 | __asm__ ("sub %H1,%H5\n\tsbc %H0,%H3" \ | ||
1208 | : "=r" ((unsigned int)(sh)), \ | ||
1209 | "=&r" ((unsigned int)(sl)) \ | ||
1210 | : "0" ((unsigned int)(ah)), \ | ||
1211 | "r" ((unsigned int)(bh)), \ | ||
1212 | "1" ((unsigned int)(al)), \ | ||
1213 | "rQR" ((unsigned int)(bl))) | ||
1214 | #define umul_ppmm(xh, xl, m0, m1) \ | ||
1215 | do { \ | ||
1216 | union {long int __ll; \ | ||
1217 | struct {unsigned int __h, __l; } __i; \ | ||
1218 | } __xx; \ | ||
1219 | unsigned int __m0 = (m0), __m1 = (m1); \ | ||
1220 | __asm__ ("mult %S0,%H3" \ | ||
1221 | : "=r" (__xx.__i.__h), \ | ||
1222 | "=r" (__xx.__i.__l) \ | ||
1223 | : "%1" (__m0), \ | ||
1224 | "rQR" (__m1)); \ | ||
1225 | (xh) = __xx.__i.__h; (xl) = __xx.__i.__l; \ | ||
1226 | (xh) += ((((signed int) __m0 >> 15) & __m1) \ | ||
1227 | + (((signed int) __m1 >> 15) & __m0)); \ | ||
1228 | } while (0) | ||
1229 | #endif /* __z8000__ */ | ||
1230 | |||
1231 | #endif /* __GNUC__ */ | ||
1232 | |||
1233 | /*************************************** | ||
1234 | *********** Generic Versions ******** | ||
1235 | ***************************************/ | ||
1236 | #if !defined(umul_ppmm) && defined(__umulsidi3) | ||
1237 | #define umul_ppmm(ph, pl, m0, m1) \ | ||
1238 | { \ | ||
1239 | UDWtype __ll = __umulsidi3(m0, m1); \ | ||
1240 | ph = (UWtype) (__ll >> W_TYPE_SIZE); \ | ||
1241 | pl = (UWtype) __ll; \ | ||
1242 | } | ||
1243 | #endif | ||
1244 | |||
1245 | #if !defined(__umulsidi3) | ||
1246 | #define __umulsidi3(u, v) \ | ||
1247 | ({UWtype __hi, __lo; \ | ||
1248 | umul_ppmm(__hi, __lo, u, v); \ | ||
1249 | ((UDWtype) __hi << W_TYPE_SIZE) | __lo; }) | ||
1250 | #endif | ||
1251 | |||
1252 | /* If this machine has no inline assembler, use C macros. */ | ||
1253 | |||
1254 | #if !defined(add_ssaaaa) | ||
1255 | #define add_ssaaaa(sh, sl, ah, al, bh, bl) \ | ||
1256 | do { \ | ||
1257 | UWtype __x; \ | ||
1258 | __x = (al) + (bl); \ | ||
1259 | (sh) = (ah) + (bh) + (__x < (al)); \ | ||
1260 | (sl) = __x; \ | ||
1261 | } while (0) | ||
1262 | #endif | ||
1263 | |||
1264 | #if !defined(sub_ddmmss) | ||
1265 | #define sub_ddmmss(sh, sl, ah, al, bh, bl) \ | ||
1266 | do { \ | ||
1267 | UWtype __x; \ | ||
1268 | __x = (al) - (bl); \ | ||
1269 | (sh) = (ah) - (bh) - (__x > (al)); \ | ||
1270 | (sl) = __x; \ | ||
1271 | } while (0) | ||
1272 | #endif | ||
1273 | |||
1274 | #if !defined(umul_ppmm) | ||
1275 | #define umul_ppmm(w1, w0, u, v) \ | ||
1276 | do { \ | ||
1277 | UWtype __x0, __x1, __x2, __x3; \ | ||
1278 | UHWtype __ul, __vl, __uh, __vh; \ | ||
1279 | UWtype __u = (u), __v = (v); \ | ||
1280 | \ | ||
1281 | __ul = __ll_lowpart(__u); \ | ||
1282 | __uh = __ll_highpart(__u); \ | ||
1283 | __vl = __ll_lowpart(__v); \ | ||
1284 | __vh = __ll_highpart(__v); \ | ||
1285 | \ | ||
1286 | __x0 = (UWtype) __ul * __vl; \ | ||
1287 | __x1 = (UWtype) __ul * __vh; \ | ||
1288 | __x2 = (UWtype) __uh * __vl; \ | ||
1289 | __x3 = (UWtype) __uh * __vh; \ | ||
1290 | \ | ||
1291 | __x1 += __ll_highpart(__x0);/* this can't give carry */ \ | ||
1292 | __x1 += __x2; /* but this indeed can */ \ | ||
1293 | if (__x1 < __x2) /* did we get it? */ \ | ||
1294 | __x3 += __ll_B; /* yes, add it in the proper pos. */ \ | ||
1295 | \ | ||
1296 | (w1) = __x3 + __ll_highpart(__x1); \ | ||
1297 | (w0) = (__ll_lowpart(__x1) << W_TYPE_SIZE/2) + __ll_lowpart(__x0); \ | ||
1298 | } while (0) | ||
1299 | #endif | ||
1300 | |||
1301 | #if !defined(umul_ppmm) | ||
1302 | #define smul_ppmm(w1, w0, u, v) \ | ||
1303 | do { \ | ||
1304 | UWtype __w1; \ | ||
1305 | UWtype __m0 = (u), __m1 = (v); \ | ||
1306 | umul_ppmm(__w1, w0, __m0, __m1); \ | ||
1307 | (w1) = __w1 - (-(__m0 >> (W_TYPE_SIZE - 1)) & __m1) \ | ||
1308 | - (-(__m1 >> (W_TYPE_SIZE - 1)) & __m0); \ | ||
1309 | } while (0) | ||
1310 | #endif | ||
1311 | |||
1312 | /* Define this unconditionally, so it can be used for debugging. */ | ||
1313 | #define __udiv_qrnnd_c(q, r, n1, n0, d) \ | ||
1314 | do { \ | ||
1315 | UWtype __d1, __d0, __q1, __q0, __r1, __r0, __m; \ | ||
1316 | __d1 = __ll_highpart(d); \ | ||
1317 | __d0 = __ll_lowpart(d); \ | ||
1318 | \ | ||
1319 | __r1 = (n1) % __d1; \ | ||
1320 | __q1 = (n1) / __d1; \ | ||
1321 | __m = (UWtype) __q1 * __d0; \ | ||
1322 | __r1 = __r1 * __ll_B | __ll_highpart(n0); \ | ||
1323 | if (__r1 < __m) { \ | ||
1324 | __q1--, __r1 += (d); \ | ||
1325 | if (__r1 >= (d)) /* i.e. we didn't get carry when adding to __r1 */ \ | ||
1326 | if (__r1 < __m) \ | ||
1327 | __q1--, __r1 += (d); \ | ||
1328 | } \ | ||
1329 | __r1 -= __m; \ | ||
1330 | \ | ||
1331 | __r0 = __r1 % __d1; \ | ||
1332 | __q0 = __r1 / __d1; \ | ||
1333 | __m = (UWtype) __q0 * __d0; \ | ||
1334 | __r0 = __r0 * __ll_B | __ll_lowpart(n0); \ | ||
1335 | if (__r0 < __m) { \ | ||
1336 | __q0--, __r0 += (d); \ | ||
1337 | if (__r0 >= (d)) \ | ||
1338 | if (__r0 < __m) \ | ||
1339 | __q0--, __r0 += (d); \ | ||
1340 | } \ | ||
1341 | __r0 -= __m; \ | ||
1342 | \ | ||
1343 | (q) = (UWtype) __q1 * __ll_B | __q0; \ | ||
1344 | (r) = __r0; \ | ||
1345 | } while (0) | ||
1346 | |||
1347 | /* If the processor has no udiv_qrnnd but sdiv_qrnnd, go through | ||
1348 | __udiv_w_sdiv (defined in libgcc or elsewhere). */ | ||
1349 | #if !defined(udiv_qrnnd) && defined(sdiv_qrnnd) | ||
1350 | #define udiv_qrnnd(q, r, nh, nl, d) \ | ||
1351 | do { \ | ||
1352 | UWtype __r; \ | ||
1353 | (q) = __MPN(udiv_w_sdiv) (&__r, nh, nl, d); \ | ||
1354 | (r) = __r; \ | ||
1355 | } while (0) | ||
1356 | #endif | ||
1357 | |||
1358 | /* If udiv_qrnnd was not defined for this processor, use __udiv_qrnnd_c. */ | ||
1359 | #if !defined(udiv_qrnnd) | ||
1360 | #define UDIV_NEEDS_NORMALIZATION 1 | ||
1361 | #define udiv_qrnnd __udiv_qrnnd_c | ||
1362 | #endif | ||
1363 | |||
1364 | #ifndef UDIV_NEEDS_NORMALIZATION | ||
1365 | #define UDIV_NEEDS_NORMALIZATION 0 | ||
1366 | #endif | ||
diff --git a/lib/mpi/mpi-bit.c b/lib/mpi/mpi-bit.c new file mode 100644 index 000000000000..503537e08436 --- /dev/null +++ b/lib/mpi/mpi-bit.c | |||
@@ -0,0 +1,56 @@ | |||
1 | /* mpi-bit.c - MPI bit level fucntions | ||
2 | * Copyright (C) 1998, 1999 Free Software Foundation, Inc. | ||
3 | * | ||
4 | * This file is part of GnuPG. | ||
5 | * | ||
6 | * GnuPG is free software; you can redistribute it and/or modify | ||
7 | * it under the terms of the GNU General Public License as published by | ||
8 | * the Free Software Foundation; either version 2 of the License, or | ||
9 | * (at your option) any later version. | ||
10 | * | ||
11 | * GnuPG is distributed in the hope that it will be useful, | ||
12 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
14 | * GNU General Public License for more details. | ||
15 | * | ||
16 | * You should have received a copy of the GNU General Public License | ||
17 | * along with this program; if not, write to the Free Software | ||
18 | * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA | ||
19 | */ | ||
20 | |||
21 | #include "mpi-internal.h" | ||
22 | #include "longlong.h" | ||
23 | |||
24 | #define A_LIMB_1 ((mpi_limb_t) 1) | ||
25 | |||
26 | /**************** | ||
27 | * Sometimes we have MSL (most significant limbs) which are 0; | ||
28 | * this is for some reasons not good, so this function removes them. | ||
29 | */ | ||
30 | void mpi_normalize(MPI a) | ||
31 | { | ||
32 | for (; a->nlimbs && !a->d[a->nlimbs - 1]; a->nlimbs--) | ||
33 | ; | ||
34 | } | ||
35 | |||
36 | /**************** | ||
37 | * Return the number of bits in A. | ||
38 | */ | ||
39 | unsigned mpi_get_nbits(MPI a) | ||
40 | { | ||
41 | unsigned n; | ||
42 | |||
43 | mpi_normalize(a); | ||
44 | |||
45 | if (a->nlimbs) { | ||
46 | mpi_limb_t alimb = a->d[a->nlimbs - 1]; | ||
47 | if (alimb) | ||
48 | n = count_leading_zeros(alimb); | ||
49 | else | ||
50 | n = BITS_PER_MPI_LIMB; | ||
51 | n = BITS_PER_MPI_LIMB - n + (a->nlimbs - 1) * BITS_PER_MPI_LIMB; | ||
52 | } else | ||
53 | n = 0; | ||
54 | return n; | ||
55 | } | ||
56 | EXPORT_SYMBOL_GPL(mpi_get_nbits); | ||
diff --git a/lib/mpi/mpi-cmp.c b/lib/mpi/mpi-cmp.c new file mode 100644 index 000000000000..1871e7b61ca0 --- /dev/null +++ b/lib/mpi/mpi-cmp.c | |||
@@ -0,0 +1,70 @@ | |||
1 | /* mpi-cmp.c - MPI functions | ||
2 | * Copyright (C) 1998, 1999 Free Software Foundation, Inc. | ||
3 | * | ||
4 | * This file is part of GnuPG. | ||
5 | * | ||
6 | * GnuPG is free software; you can redistribute it and/or modify | ||
7 | * it under the terms of the GNU General Public License as published by | ||
8 | * the Free Software Foundation; either version 2 of the License, or | ||
9 | * (at your option) any later version. | ||
10 | * | ||
11 | * GnuPG is distributed in the hope that it will be useful, | ||
12 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
14 | * GNU General Public License for more details. | ||
15 | * | ||
16 | * You should have received a copy of the GNU General Public License | ||
17 | * along with this program; if not, write to the Free Software | ||
18 | * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA | ||
19 | */ | ||
20 | |||
21 | #include "mpi-internal.h" | ||
22 | |||
23 | int mpi_cmp_ui(MPI u, unsigned long v) | ||
24 | { | ||
25 | mpi_limb_t limb = v; | ||
26 | |||
27 | mpi_normalize(u); | ||
28 | if (!u->nlimbs && !limb) | ||
29 | return 0; | ||
30 | if (u->sign) | ||
31 | return -1; | ||
32 | if (u->nlimbs > 1) | ||
33 | return 1; | ||
34 | |||
35 | if (u->d[0] == limb) | ||
36 | return 0; | ||
37 | else if (u->d[0] > limb) | ||
38 | return 1; | ||
39 | else | ||
40 | return -1; | ||
41 | } | ||
42 | EXPORT_SYMBOL_GPL(mpi_cmp_ui); | ||
43 | |||
44 | int mpi_cmp(MPI u, MPI v) | ||
45 | { | ||
46 | mpi_size_t usize, vsize; | ||
47 | int cmp; | ||
48 | |||
49 | mpi_normalize(u); | ||
50 | mpi_normalize(v); | ||
51 | usize = u->nlimbs; | ||
52 | vsize = v->nlimbs; | ||
53 | if (!u->sign && v->sign) | ||
54 | return 1; | ||
55 | if (u->sign && !v->sign) | ||
56 | return -1; | ||
57 | if (usize != vsize && !u->sign && !v->sign) | ||
58 | return usize - vsize; | ||
59 | if (usize != vsize && u->sign && v->sign) | ||
60 | return vsize + usize; | ||
61 | if (!usize) | ||
62 | return 0; | ||
63 | cmp = mpihelp_cmp(u->d, v->d, usize); | ||
64 | if (!cmp) | ||
65 | return 0; | ||
66 | if ((cmp < 0 ? 1 : 0) == (u->sign ? 1 : 0)) | ||
67 | return 1; | ||
68 | return -1; | ||
69 | } | ||
70 | EXPORT_SYMBOL_GPL(mpi_cmp); | ||
diff --git a/lib/mpi/mpi-inline.h b/lib/mpi/mpi-inline.h new file mode 100644 index 000000000000..e2b39852b30a --- /dev/null +++ b/lib/mpi/mpi-inline.h | |||
@@ -0,0 +1,122 @@ | |||
1 | /* mpi-inline.h - Internal to the Multi Precision Integers | ||
2 | * Copyright (C) 1994, 1996, 1998, 1999 Free Software Foundation, Inc. | ||
3 | * | ||
4 | * This file is part of GnuPG. | ||
5 | * | ||
6 | * GnuPG is free software; you can redistribute it and/or modify | ||
7 | * it under the terms of the GNU General Public License as published by | ||
8 | * the Free Software Foundation; either version 2 of the License, or | ||
9 | * (at your option) any later version. | ||
10 | * | ||
11 | * GnuPG is distributed in the hope that it will be useful, | ||
12 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
14 | * GNU General Public License for more details. | ||
15 | * | ||
16 | * You should have received a copy of the GNU General Public License | ||
17 | * along with this program; if not, write to the Free Software | ||
18 | * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA | ||
19 | * | ||
20 | * Note: This code is heavily based on the GNU MP Library. | ||
21 | * Actually it's the same code with only minor changes in the | ||
22 | * way the data is stored; this is to support the abstraction | ||
23 | * of an optional secure memory allocation which may be used | ||
24 | * to avoid revealing of sensitive data due to paging etc. | ||
25 | * The GNU MP Library itself is published under the LGPL; | ||
26 | * however I decided to publish this code under the plain GPL. | ||
27 | */ | ||
28 | |||
29 | #ifndef G10_MPI_INLINE_H | ||
30 | #define G10_MPI_INLINE_H | ||
31 | |||
32 | #ifndef G10_MPI_INLINE_DECL | ||
33 | #define G10_MPI_INLINE_DECL extern inline | ||
34 | #endif | ||
35 | |||
36 | G10_MPI_INLINE_DECL mpi_limb_t | ||
37 | mpihelp_add_1(mpi_ptr_t res_ptr, mpi_ptr_t s1_ptr, | ||
38 | mpi_size_t s1_size, mpi_limb_t s2_limb) | ||
39 | { | ||
40 | mpi_limb_t x; | ||
41 | |||
42 | x = *s1_ptr++; | ||
43 | s2_limb += x; | ||
44 | *res_ptr++ = s2_limb; | ||
45 | if (s2_limb < x) { /* sum is less than the left operand: handle carry */ | ||
46 | while (--s1_size) { | ||
47 | x = *s1_ptr++ + 1; /* add carry */ | ||
48 | *res_ptr++ = x; /* and store */ | ||
49 | if (x) /* not 0 (no overflow): we can stop */ | ||
50 | goto leave; | ||
51 | } | ||
52 | return 1; /* return carry (size of s1 to small) */ | ||
53 | } | ||
54 | |||
55 | leave: | ||
56 | if (res_ptr != s1_ptr) { /* not the same variable */ | ||
57 | mpi_size_t i; /* copy the rest */ | ||
58 | for (i = 0; i < s1_size - 1; i++) | ||
59 | res_ptr[i] = s1_ptr[i]; | ||
60 | } | ||
61 | return 0; /* no carry */ | ||
62 | } | ||
63 | |||
64 | G10_MPI_INLINE_DECL mpi_limb_t | ||
65 | mpihelp_add(mpi_ptr_t res_ptr, mpi_ptr_t s1_ptr, mpi_size_t s1_size, | ||
66 | mpi_ptr_t s2_ptr, mpi_size_t s2_size) | ||
67 | { | ||
68 | mpi_limb_t cy = 0; | ||
69 | |||
70 | if (s2_size) | ||
71 | cy = mpihelp_add_n(res_ptr, s1_ptr, s2_ptr, s2_size); | ||
72 | |||
73 | if (s1_size - s2_size) | ||
74 | cy = mpihelp_add_1(res_ptr + s2_size, s1_ptr + s2_size, | ||
75 | s1_size - s2_size, cy); | ||
76 | return cy; | ||
77 | } | ||
78 | |||
79 | G10_MPI_INLINE_DECL mpi_limb_t | ||
80 | mpihelp_sub_1(mpi_ptr_t res_ptr, mpi_ptr_t s1_ptr, | ||
81 | mpi_size_t s1_size, mpi_limb_t s2_limb) | ||
82 | { | ||
83 | mpi_limb_t x; | ||
84 | |||
85 | x = *s1_ptr++; | ||
86 | s2_limb = x - s2_limb; | ||
87 | *res_ptr++ = s2_limb; | ||
88 | if (s2_limb > x) { | ||
89 | while (--s1_size) { | ||
90 | x = *s1_ptr++; | ||
91 | *res_ptr++ = x - 1; | ||
92 | if (x) | ||
93 | goto leave; | ||
94 | } | ||
95 | return 1; | ||
96 | } | ||
97 | |||
98 | leave: | ||
99 | if (res_ptr != s1_ptr) { | ||
100 | mpi_size_t i; | ||
101 | for (i = 0; i < s1_size - 1; i++) | ||
102 | res_ptr[i] = s1_ptr[i]; | ||
103 | } | ||
104 | return 0; | ||
105 | } | ||
106 | |||
107 | G10_MPI_INLINE_DECL mpi_limb_t | ||
108 | mpihelp_sub(mpi_ptr_t res_ptr, mpi_ptr_t s1_ptr, mpi_size_t s1_size, | ||
109 | mpi_ptr_t s2_ptr, mpi_size_t s2_size) | ||
110 | { | ||
111 | mpi_limb_t cy = 0; | ||
112 | |||
113 | if (s2_size) | ||
114 | cy = mpihelp_sub_n(res_ptr, s1_ptr, s2_ptr, s2_size); | ||
115 | |||
116 | if (s1_size - s2_size) | ||
117 | cy = mpihelp_sub_1(res_ptr + s2_size, s1_ptr + s2_size, | ||
118 | s1_size - s2_size, cy); | ||
119 | return cy; | ||
120 | } | ||
121 | |||
122 | #endif /*G10_MPI_INLINE_H */ | ||
diff --git a/lib/mpi/mpi-internal.h b/lib/mpi/mpi-internal.h new file mode 100644 index 000000000000..77adcf6bc257 --- /dev/null +++ b/lib/mpi/mpi-internal.h | |||
@@ -0,0 +1,261 @@ | |||
1 | /* mpi-internal.h - Internal to the Multi Precision Integers | ||
2 | * Copyright (C) 1994, 1996 Free Software Foundation, Inc. | ||
3 | * Copyright (C) 1998, 2000 Free Software Foundation, Inc. | ||
4 | * | ||
5 | * This file is part of GnuPG. | ||
6 | * | ||
7 | * GnuPG is free software; you can redistribute it and/or modify | ||
8 | * it under the terms of the GNU General Public License as published by | ||
9 | * the Free Software Foundation; either version 2 of the License, or | ||
10 | * (at your option) any later version. | ||
11 | * | ||
12 | * GnuPG is distributed in the hope that it will be useful, | ||
13 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
14 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
15 | * GNU General Public License for more details. | ||
16 | * | ||
17 | * You should have received a copy of the GNU General Public License | ||
18 | * along with this program; if not, write to the Free Software | ||
19 | * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA | ||
20 | * | ||
21 | * Note: This code is heavily based on the GNU MP Library. | ||
22 | * Actually it's the same code with only minor changes in the | ||
23 | * way the data is stored; this is to support the abstraction | ||
24 | * of an optional secure memory allocation which may be used | ||
25 | * to avoid revealing of sensitive data due to paging etc. | ||
26 | * The GNU MP Library itself is published under the LGPL; | ||
27 | * however I decided to publish this code under the plain GPL. | ||
28 | */ | ||
29 | |||
30 | #ifndef G10_MPI_INTERNAL_H | ||
31 | #define G10_MPI_INTERNAL_H | ||
32 | |||
33 | #include <linux/module.h> | ||
34 | #include <linux/kernel.h> | ||
35 | #include <linux/slab.h> | ||
36 | #include <linux/string.h> | ||
37 | #include <linux/mpi.h> | ||
38 | #include <linux/errno.h> | ||
39 | |||
40 | #define log_debug printk | ||
41 | #define log_bug printk | ||
42 | |||
43 | #define assert(x) \ | ||
44 | do { \ | ||
45 | if (!x) \ | ||
46 | log_bug("failed assertion\n"); \ | ||
47 | } while (0); | ||
48 | |||
49 | /* If KARATSUBA_THRESHOLD is not already defined, define it to a | ||
50 | * value which is good on most machines. */ | ||
51 | |||
52 | /* tested 4, 16, 32 and 64, where 16 gave the best performance when | ||
53 | * checking a 768 and a 1024 bit ElGamal signature. | ||
54 | * (wk 22.12.97) */ | ||
55 | #ifndef KARATSUBA_THRESHOLD | ||
56 | #define KARATSUBA_THRESHOLD 16 | ||
57 | #endif | ||
58 | |||
59 | /* The code can't handle KARATSUBA_THRESHOLD smaller than 2. */ | ||
60 | #if KARATSUBA_THRESHOLD < 2 | ||
61 | #undef KARATSUBA_THRESHOLD | ||
62 | #define KARATSUBA_THRESHOLD 2 | ||
63 | #endif | ||
64 | |||
65 | typedef mpi_limb_t *mpi_ptr_t; /* pointer to a limb */ | ||
66 | typedef int mpi_size_t; /* (must be a signed type) */ | ||
67 | |||
68 | #define ABS(x) (x >= 0 ? x : -x) | ||
69 | #define MIN(l, o) ((l) < (o) ? (l) : (o)) | ||
70 | #define MAX(h, i) ((h) > (i) ? (h) : (i)) | ||
71 | |||
72 | static inline int RESIZE_IF_NEEDED(MPI a, unsigned b) | ||
73 | { | ||
74 | if (a->alloced < b) | ||
75 | return mpi_resize(a, b); | ||
76 | return 0; | ||
77 | } | ||
78 | |||
79 | /* Copy N limbs from S to D. */ | ||
80 | #define MPN_COPY(d, s, n) \ | ||
81 | do { \ | ||
82 | mpi_size_t _i; \ | ||
83 | for (_i = 0; _i < (n); _i++) \ | ||
84 | (d)[_i] = (s)[_i]; \ | ||
85 | } while (0) | ||
86 | |||
87 | #define MPN_COPY_INCR(d, s, n) \ | ||
88 | do { \ | ||
89 | mpi_size_t _i; \ | ||
90 | for (_i = 0; _i < (n); _i++) \ | ||
91 | (d)[_i] = (d)[_i]; \ | ||
92 | } while (0) | ||
93 | |||
94 | #define MPN_COPY_DECR(d, s, n) \ | ||
95 | do { \ | ||
96 | mpi_size_t _i; \ | ||
97 | for (_i = (n)-1; _i >= 0; _i--) \ | ||
98 | (d)[_i] = (s)[_i]; \ | ||
99 | } while (0) | ||
100 | |||
101 | /* Zero N limbs at D */ | ||
102 | #define MPN_ZERO(d, n) \ | ||
103 | do { \ | ||
104 | int _i; \ | ||
105 | for (_i = 0; _i < (n); _i++) \ | ||
106 | (d)[_i] = 0; \ | ||
107 | } while (0) | ||
108 | |||
109 | #define MPN_NORMALIZE(d, n) \ | ||
110 | do { \ | ||
111 | while ((n) > 0) { \ | ||
112 | if ((d)[(n)-1]) \ | ||
113 | break; \ | ||
114 | (n)--; \ | ||
115 | } \ | ||
116 | } while (0) | ||
117 | |||
118 | #define MPN_NORMALIZE_NOT_ZERO(d, n) \ | ||
119 | do { \ | ||
120 | for (;;) { \ | ||
121 | if ((d)[(n)-1]) \ | ||
122 | break; \ | ||
123 | (n)--; \ | ||
124 | } \ | ||
125 | } while (0) | ||
126 | |||
127 | #define MPN_MUL_N_RECURSE(prodp, up, vp, size, tspace) \ | ||
128 | do { \ | ||
129 | if ((size) < KARATSUBA_THRESHOLD) \ | ||
130 | mul_n_basecase(prodp, up, vp, size); \ | ||
131 | else \ | ||
132 | mul_n(prodp, up, vp, size, tspace); \ | ||
133 | } while (0); | ||
134 | |||
135 | /* Divide the two-limb number in (NH,,NL) by D, with DI being the largest | ||
136 | * limb not larger than (2**(2*BITS_PER_MP_LIMB))/D - (2**BITS_PER_MP_LIMB). | ||
137 | * If this would yield overflow, DI should be the largest possible number | ||
138 | * (i.e., only ones). For correct operation, the most significant bit of D | ||
139 | * has to be set. Put the quotient in Q and the remainder in R. | ||
140 | */ | ||
141 | #define UDIV_QRNND_PREINV(q, r, nh, nl, d, di) \ | ||
142 | do { \ | ||
143 | mpi_limb_t _q, _ql, _r; \ | ||
144 | mpi_limb_t _xh, _xl; \ | ||
145 | umul_ppmm(_q, _ql, (nh), (di)); \ | ||
146 | _q += (nh); /* DI is 2**BITS_PER_MPI_LIMB too small */ \ | ||
147 | umul_ppmm(_xh, _xl, _q, (d)); \ | ||
148 | sub_ddmmss(_xh, _r, (nh), (nl), _xh, _xl); \ | ||
149 | if (_xh) { \ | ||
150 | sub_ddmmss(_xh, _r, _xh, _r, 0, (d)); \ | ||
151 | _q++; \ | ||
152 | if (_xh) { \ | ||
153 | sub_ddmmss(_xh, _r, _xh, _r, 0, (d)); \ | ||
154 | _q++; \ | ||
155 | } \ | ||
156 | } \ | ||
157 | if (_r >= (d)) { \ | ||
158 | _r -= (d); \ | ||
159 | _q++; \ | ||
160 | } \ | ||
161 | (r) = _r; \ | ||
162 | (q) = _q; \ | ||
163 | } while (0) | ||
164 | |||
165 | /*-- mpiutil.c --*/ | ||
166 | mpi_ptr_t mpi_alloc_limb_space(unsigned nlimbs); | ||
167 | void mpi_free_limb_space(mpi_ptr_t a); | ||
168 | void mpi_assign_limb_space(MPI a, mpi_ptr_t ap, unsigned nlimbs); | ||
169 | |||
170 | /*-- mpi-bit.c --*/ | ||
171 | void mpi_rshift_limbs(MPI a, unsigned int count); | ||
172 | int mpi_lshift_limbs(MPI a, unsigned int count); | ||
173 | |||
174 | /*-- mpihelp-add.c --*/ | ||
175 | mpi_limb_t mpihelp_add_1(mpi_ptr_t res_ptr, mpi_ptr_t s1_ptr, | ||
176 | mpi_size_t s1_size, mpi_limb_t s2_limb); | ||
177 | mpi_limb_t mpihelp_add_n(mpi_ptr_t res_ptr, mpi_ptr_t s1_ptr, | ||
178 | mpi_ptr_t s2_ptr, mpi_size_t size); | ||
179 | mpi_limb_t mpihelp_add(mpi_ptr_t res_ptr, mpi_ptr_t s1_ptr, mpi_size_t s1_size, | ||
180 | mpi_ptr_t s2_ptr, mpi_size_t s2_size); | ||
181 | |||
182 | /*-- mpihelp-sub.c --*/ | ||
183 | mpi_limb_t mpihelp_sub_1(mpi_ptr_t res_ptr, mpi_ptr_t s1_ptr, | ||
184 | mpi_size_t s1_size, mpi_limb_t s2_limb); | ||
185 | mpi_limb_t mpihelp_sub_n(mpi_ptr_t res_ptr, mpi_ptr_t s1_ptr, | ||
186 | mpi_ptr_t s2_ptr, mpi_size_t size); | ||
187 | mpi_limb_t mpihelp_sub(mpi_ptr_t res_ptr, mpi_ptr_t s1_ptr, mpi_size_t s1_size, | ||
188 | mpi_ptr_t s2_ptr, mpi_size_t s2_size); | ||
189 | |||
190 | /*-- mpihelp-cmp.c --*/ | ||
191 | int mpihelp_cmp(mpi_ptr_t op1_ptr, mpi_ptr_t op2_ptr, mpi_size_t size); | ||
192 | |||
193 | /*-- mpihelp-mul.c --*/ | ||
194 | |||
195 | struct karatsuba_ctx { | ||
196 | struct karatsuba_ctx *next; | ||
197 | mpi_ptr_t tspace; | ||
198 | mpi_size_t tspace_size; | ||
199 | mpi_ptr_t tp; | ||
200 | mpi_size_t tp_size; | ||
201 | }; | ||
202 | |||
203 | void mpihelp_release_karatsuba_ctx(struct karatsuba_ctx *ctx); | ||
204 | |||
205 | mpi_limb_t mpihelp_addmul_1(mpi_ptr_t res_ptr, mpi_ptr_t s1_ptr, | ||
206 | mpi_size_t s1_size, mpi_limb_t s2_limb); | ||
207 | mpi_limb_t mpihelp_submul_1(mpi_ptr_t res_ptr, mpi_ptr_t s1_ptr, | ||
208 | mpi_size_t s1_size, mpi_limb_t s2_limb); | ||
209 | int mpihelp_mul_n(mpi_ptr_t prodp, mpi_ptr_t up, mpi_ptr_t vp, mpi_size_t size); | ||
210 | int mpihelp_mul(mpi_ptr_t prodp, mpi_ptr_t up, mpi_size_t usize, | ||
211 | mpi_ptr_t vp, mpi_size_t vsize, mpi_limb_t *_result); | ||
212 | void mpih_sqr_n_basecase(mpi_ptr_t prodp, mpi_ptr_t up, mpi_size_t size); | ||
213 | void mpih_sqr_n(mpi_ptr_t prodp, mpi_ptr_t up, mpi_size_t size, | ||
214 | mpi_ptr_t tspace); | ||
215 | |||
216 | int mpihelp_mul_karatsuba_case(mpi_ptr_t prodp, | ||
217 | mpi_ptr_t up, mpi_size_t usize, | ||
218 | mpi_ptr_t vp, mpi_size_t vsize, | ||
219 | struct karatsuba_ctx *ctx); | ||
220 | |||
221 | /*-- mpihelp-mul_1.c (or xxx/cpu/ *.S) --*/ | ||
222 | mpi_limb_t mpihelp_mul_1(mpi_ptr_t res_ptr, mpi_ptr_t s1_ptr, | ||
223 | mpi_size_t s1_size, mpi_limb_t s2_limb); | ||
224 | |||
225 | /*-- mpihelp-div.c --*/ | ||
226 | mpi_limb_t mpihelp_mod_1(mpi_ptr_t dividend_ptr, mpi_size_t dividend_size, | ||
227 | mpi_limb_t divisor_limb); | ||
228 | mpi_limb_t mpihelp_divrem(mpi_ptr_t qp, mpi_size_t qextra_limbs, | ||
229 | mpi_ptr_t np, mpi_size_t nsize, | ||
230 | mpi_ptr_t dp, mpi_size_t dsize); | ||
231 | mpi_limb_t mpihelp_divmod_1(mpi_ptr_t quot_ptr, | ||
232 | mpi_ptr_t dividend_ptr, mpi_size_t dividend_size, | ||
233 | mpi_limb_t divisor_limb); | ||
234 | |||
235 | /*-- mpihelp-shift.c --*/ | ||
236 | mpi_limb_t mpihelp_lshift(mpi_ptr_t wp, mpi_ptr_t up, mpi_size_t usize, | ||
237 | unsigned cnt); | ||
238 | mpi_limb_t mpihelp_rshift(mpi_ptr_t wp, mpi_ptr_t up, mpi_size_t usize, | ||
239 | unsigned cnt); | ||
240 | |||
241 | /* Define stuff for longlong.h. */ | ||
242 | #define W_TYPE_SIZE BITS_PER_MPI_LIMB | ||
243 | typedef mpi_limb_t UWtype; | ||
244 | typedef unsigned int UHWtype; | ||
245 | #if defined(__GNUC__) | ||
246 | typedef unsigned int UQItype __attribute__ ((mode(QI))); | ||
247 | typedef int SItype __attribute__ ((mode(SI))); | ||
248 | typedef unsigned int USItype __attribute__ ((mode(SI))); | ||
249 | typedef int DItype __attribute__ ((mode(DI))); | ||
250 | typedef unsigned int UDItype __attribute__ ((mode(DI))); | ||
251 | #else | ||
252 | typedef unsigned char UQItype; | ||
253 | typedef long SItype; | ||
254 | typedef unsigned long USItype; | ||
255 | #endif | ||
256 | |||
257 | #ifdef __GNUC__ | ||
258 | #include "mpi-inline.h" | ||
259 | #endif | ||
260 | |||
261 | #endif /*G10_MPI_INTERNAL_H */ | ||
diff --git a/lib/mpi/mpi-pow.c b/lib/mpi/mpi-pow.c new file mode 100644 index 000000000000..5464c8744ea9 --- /dev/null +++ b/lib/mpi/mpi-pow.c | |||
@@ -0,0 +1,323 @@ | |||
1 | /* mpi-pow.c - MPI functions | ||
2 | * Copyright (C) 1994, 1996, 1998, 2000 Free Software Foundation, Inc. | ||
3 | * | ||
4 | * This file is part of GnuPG. | ||
5 | * | ||
6 | * GnuPG is free software; you can redistribute it and/or modify | ||
7 | * it under the terms of the GNU General Public License as published by | ||
8 | * the Free Software Foundation; either version 2 of the License, or | ||
9 | * (at your option) any later version. | ||
10 | * | ||
11 | * GnuPG is distributed in the hope that it will be useful, | ||
12 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
14 | * GNU General Public License for more details. | ||
15 | * | ||
16 | * You should have received a copy of the GNU General Public License | ||
17 | * along with this program; if not, write to the Free Software | ||
18 | * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA | ||
19 | * | ||
20 | * Note: This code is heavily based on the GNU MP Library. | ||
21 | * Actually it's the same code with only minor changes in the | ||
22 | * way the data is stored; this is to support the abstraction | ||
23 | * of an optional secure memory allocation which may be used | ||
24 | * to avoid revealing of sensitive data due to paging etc. | ||
25 | * The GNU MP Library itself is published under the LGPL; | ||
26 | * however I decided to publish this code under the plain GPL. | ||
27 | */ | ||
28 | |||
29 | #include <linux/string.h> | ||
30 | #include "mpi-internal.h" | ||
31 | #include "longlong.h" | ||
32 | |||
33 | /**************** | ||
34 | * RES = BASE ^ EXP mod MOD | ||
35 | */ | ||
36 | int mpi_powm(MPI res, MPI base, MPI exp, MPI mod) | ||
37 | { | ||
38 | mpi_ptr_t mp_marker = NULL, bp_marker = NULL, ep_marker = NULL; | ||
39 | mpi_ptr_t xp_marker = NULL; | ||
40 | mpi_ptr_t tspace = NULL; | ||
41 | mpi_ptr_t rp, ep, mp, bp; | ||
42 | mpi_size_t esize, msize, bsize, rsize; | ||
43 | int esign, msign, bsign, rsign; | ||
44 | mpi_size_t size; | ||
45 | int mod_shift_cnt; | ||
46 | int negative_result; | ||
47 | int assign_rp = 0; | ||
48 | mpi_size_t tsize = 0; /* to avoid compiler warning */ | ||
49 | /* fixme: we should check that the warning is void */ | ||
50 | int rc = -ENOMEM; | ||
51 | |||
52 | esize = exp->nlimbs; | ||
53 | msize = mod->nlimbs; | ||
54 | size = 2 * msize; | ||
55 | esign = exp->sign; | ||
56 | msign = mod->sign; | ||
57 | |||
58 | rp = res->d; | ||
59 | ep = exp->d; | ||
60 | |||
61 | if (!msize) | ||
62 | return -EINVAL; | ||
63 | |||
64 | if (!esize) { | ||
65 | /* Exponent is zero, result is 1 mod MOD, i.e., 1 or 0 | ||
66 | * depending on if MOD equals 1. */ | ||
67 | rp[0] = 1; | ||
68 | res->nlimbs = (msize == 1 && mod->d[0] == 1) ? 0 : 1; | ||
69 | res->sign = 0; | ||
70 | goto leave; | ||
71 | } | ||
72 | |||
73 | /* Normalize MOD (i.e. make its most significant bit set) as required by | ||
74 | * mpn_divrem. This will make the intermediate values in the calculation | ||
75 | * slightly larger, but the correct result is obtained after a final | ||
76 | * reduction using the original MOD value. */ | ||
77 | mp = mp_marker = mpi_alloc_limb_space(msize); | ||
78 | if (!mp) | ||
79 | goto enomem; | ||
80 | mod_shift_cnt = count_leading_zeros(mod->d[msize - 1]); | ||
81 | if (mod_shift_cnt) | ||
82 | mpihelp_lshift(mp, mod->d, msize, mod_shift_cnt); | ||
83 | else | ||
84 | MPN_COPY(mp, mod->d, msize); | ||
85 | |||
86 | bsize = base->nlimbs; | ||
87 | bsign = base->sign; | ||
88 | if (bsize > msize) { /* The base is larger than the module. Reduce it. */ | ||
89 | /* Allocate (BSIZE + 1) with space for remainder and quotient. | ||
90 | * (The quotient is (bsize - msize + 1) limbs.) */ | ||
91 | bp = bp_marker = mpi_alloc_limb_space(bsize + 1); | ||
92 | if (!bp) | ||
93 | goto enomem; | ||
94 | MPN_COPY(bp, base->d, bsize); | ||
95 | /* We don't care about the quotient, store it above the remainder, | ||
96 | * at BP + MSIZE. */ | ||
97 | mpihelp_divrem(bp + msize, 0, bp, bsize, mp, msize); | ||
98 | bsize = msize; | ||
99 | /* Canonicalize the base, since we are going to multiply with it | ||
100 | * quite a few times. */ | ||
101 | MPN_NORMALIZE(bp, bsize); | ||
102 | } else | ||
103 | bp = base->d; | ||
104 | |||
105 | if (!bsize) { | ||
106 | res->nlimbs = 0; | ||
107 | res->sign = 0; | ||
108 | goto leave; | ||
109 | } | ||
110 | |||
111 | if (res->alloced < size) { | ||
112 | /* We have to allocate more space for RES. If any of the input | ||
113 | * parameters are identical to RES, defer deallocation of the old | ||
114 | * space. */ | ||
115 | if (rp == ep || rp == mp || rp == bp) { | ||
116 | rp = mpi_alloc_limb_space(size); | ||
117 | if (!rp) | ||
118 | goto enomem; | ||
119 | assign_rp = 1; | ||
120 | } else { | ||
121 | if (mpi_resize(res, size) < 0) | ||
122 | goto enomem; | ||
123 | rp = res->d; | ||
124 | } | ||
125 | } else { /* Make BASE, EXP and MOD not overlap with RES. */ | ||
126 | if (rp == bp) { | ||
127 | /* RES and BASE are identical. Allocate temp. space for BASE. */ | ||
128 | BUG_ON(bp_marker); | ||
129 | bp = bp_marker = mpi_alloc_limb_space(bsize); | ||
130 | if (!bp) | ||
131 | goto enomem; | ||
132 | MPN_COPY(bp, rp, bsize); | ||
133 | } | ||
134 | if (rp == ep) { | ||
135 | /* RES and EXP are identical. Allocate temp. space for EXP. */ | ||
136 | ep = ep_marker = mpi_alloc_limb_space(esize); | ||
137 | if (!ep) | ||
138 | goto enomem; | ||
139 | MPN_COPY(ep, rp, esize); | ||
140 | } | ||
141 | if (rp == mp) { | ||
142 | /* RES and MOD are identical. Allocate temporary space for MOD. */ | ||
143 | BUG_ON(mp_marker); | ||
144 | mp = mp_marker = mpi_alloc_limb_space(msize); | ||
145 | if (!mp) | ||
146 | goto enomem; | ||
147 | MPN_COPY(mp, rp, msize); | ||
148 | } | ||
149 | } | ||
150 | |||
151 | MPN_COPY(rp, bp, bsize); | ||
152 | rsize = bsize; | ||
153 | rsign = bsign; | ||
154 | |||
155 | { | ||
156 | mpi_size_t i; | ||
157 | mpi_ptr_t xp; | ||
158 | int c; | ||
159 | mpi_limb_t e; | ||
160 | mpi_limb_t carry_limb; | ||
161 | struct karatsuba_ctx karactx; | ||
162 | |||
163 | xp = xp_marker = mpi_alloc_limb_space(2 * (msize + 1)); | ||
164 | if (!xp) | ||
165 | goto enomem; | ||
166 | |||
167 | memset(&karactx, 0, sizeof karactx); | ||
168 | negative_result = (ep[0] & 1) && base->sign; | ||
169 | |||
170 | i = esize - 1; | ||
171 | e = ep[i]; | ||
172 | c = count_leading_zeros(e); | ||
173 | e = (e << c) << 1; /* shift the exp bits to the left, lose msb */ | ||
174 | c = BITS_PER_MPI_LIMB - 1 - c; | ||
175 | |||
176 | /* Main loop. | ||
177 | * | ||
178 | * Make the result be pointed to alternately by XP and RP. This | ||
179 | * helps us avoid block copying, which would otherwise be necessary | ||
180 | * with the overlap restrictions of mpihelp_divmod. With 50% probability | ||
181 | * the result after this loop will be in the area originally pointed | ||
182 | * by RP (==RES->d), and with 50% probability in the area originally | ||
183 | * pointed to by XP. | ||
184 | */ | ||
185 | |||
186 | for (;;) { | ||
187 | while (c) { | ||
188 | mpi_ptr_t tp; | ||
189 | mpi_size_t xsize; | ||
190 | |||
191 | /*if (mpihelp_mul_n(xp, rp, rp, rsize) < 0) goto enomem */ | ||
192 | if (rsize < KARATSUBA_THRESHOLD) | ||
193 | mpih_sqr_n_basecase(xp, rp, rsize); | ||
194 | else { | ||
195 | if (!tspace) { | ||
196 | tsize = 2 * rsize; | ||
197 | tspace = | ||
198 | mpi_alloc_limb_space(tsize); | ||
199 | if (!tspace) | ||
200 | goto enomem; | ||
201 | } else if (tsize < (2 * rsize)) { | ||
202 | mpi_free_limb_space(tspace); | ||
203 | tsize = 2 * rsize; | ||
204 | tspace = | ||
205 | mpi_alloc_limb_space(tsize); | ||
206 | if (!tspace) | ||
207 | goto enomem; | ||
208 | } | ||
209 | mpih_sqr_n(xp, rp, rsize, tspace); | ||
210 | } | ||
211 | |||
212 | xsize = 2 * rsize; | ||
213 | if (xsize > msize) { | ||
214 | mpihelp_divrem(xp + msize, 0, xp, xsize, | ||
215 | mp, msize); | ||
216 | xsize = msize; | ||
217 | } | ||
218 | |||
219 | tp = rp; | ||
220 | rp = xp; | ||
221 | xp = tp; | ||
222 | rsize = xsize; | ||
223 | |||
224 | if ((mpi_limb_signed_t) e < 0) { | ||
225 | /*mpihelp_mul( xp, rp, rsize, bp, bsize ); */ | ||
226 | if (bsize < KARATSUBA_THRESHOLD) { | ||
227 | mpi_limb_t tmp; | ||
228 | if (mpihelp_mul | ||
229 | (xp, rp, rsize, bp, bsize, | ||
230 | &tmp) < 0) | ||
231 | goto enomem; | ||
232 | } else { | ||
233 | if (mpihelp_mul_karatsuba_case | ||
234 | (xp, rp, rsize, bp, bsize, | ||
235 | &karactx) < 0) | ||
236 | goto enomem; | ||
237 | } | ||
238 | |||
239 | xsize = rsize + bsize; | ||
240 | if (xsize > msize) { | ||
241 | mpihelp_divrem(xp + msize, 0, | ||
242 | xp, xsize, mp, | ||
243 | msize); | ||
244 | xsize = msize; | ||
245 | } | ||
246 | |||
247 | tp = rp; | ||
248 | rp = xp; | ||
249 | xp = tp; | ||
250 | rsize = xsize; | ||
251 | } | ||
252 | e <<= 1; | ||
253 | c--; | ||
254 | } | ||
255 | |||
256 | i--; | ||
257 | if (i < 0) | ||
258 | break; | ||
259 | e = ep[i]; | ||
260 | c = BITS_PER_MPI_LIMB; | ||
261 | } | ||
262 | |||
263 | /* We shifted MOD, the modulo reduction argument, left MOD_SHIFT_CNT | ||
264 | * steps. Adjust the result by reducing it with the original MOD. | ||
265 | * | ||
266 | * Also make sure the result is put in RES->d (where it already | ||
267 | * might be, see above). | ||
268 | */ | ||
269 | if (mod_shift_cnt) { | ||
270 | carry_limb = | ||
271 | mpihelp_lshift(res->d, rp, rsize, mod_shift_cnt); | ||
272 | rp = res->d; | ||
273 | if (carry_limb) { | ||
274 | rp[rsize] = carry_limb; | ||
275 | rsize++; | ||
276 | } | ||
277 | } else { | ||
278 | MPN_COPY(res->d, rp, rsize); | ||
279 | rp = res->d; | ||
280 | } | ||
281 | |||
282 | if (rsize >= msize) { | ||
283 | mpihelp_divrem(rp + msize, 0, rp, rsize, mp, msize); | ||
284 | rsize = msize; | ||
285 | } | ||
286 | |||
287 | /* Remove any leading zero words from the result. */ | ||
288 | if (mod_shift_cnt) | ||
289 | mpihelp_rshift(rp, rp, rsize, mod_shift_cnt); | ||
290 | MPN_NORMALIZE(rp, rsize); | ||
291 | |||
292 | mpihelp_release_karatsuba_ctx(&karactx); | ||
293 | } | ||
294 | |||
295 | if (negative_result && rsize) { | ||
296 | if (mod_shift_cnt) | ||
297 | mpihelp_rshift(mp, mp, msize, mod_shift_cnt); | ||
298 | mpihelp_sub(rp, mp, msize, rp, rsize); | ||
299 | rsize = msize; | ||
300 | rsign = msign; | ||
301 | MPN_NORMALIZE(rp, rsize); | ||
302 | } | ||
303 | res->nlimbs = rsize; | ||
304 | res->sign = rsign; | ||
305 | |||
306 | leave: | ||
307 | rc = 0; | ||
308 | enomem: | ||
309 | if (assign_rp) | ||
310 | mpi_assign_limb_space(res, rp, size); | ||
311 | if (mp_marker) | ||
312 | mpi_free_limb_space(mp_marker); | ||
313 | if (bp_marker) | ||
314 | mpi_free_limb_space(bp_marker); | ||
315 | if (ep_marker) | ||
316 | mpi_free_limb_space(ep_marker); | ||
317 | if (xp_marker) | ||
318 | mpi_free_limb_space(xp_marker); | ||
319 | if (tspace) | ||
320 | mpi_free_limb_space(tspace); | ||
321 | return rc; | ||
322 | } | ||
323 | EXPORT_SYMBOL_GPL(mpi_powm); | ||
diff --git a/lib/mpi/mpicoder.c b/lib/mpi/mpicoder.c new file mode 100644 index 000000000000..3962b7f7fe3f --- /dev/null +++ b/lib/mpi/mpicoder.c | |||
@@ -0,0 +1,260 @@ | |||
1 | /* mpicoder.c - Coder for the external representation of MPIs | ||
2 | * Copyright (C) 1998, 1999 Free Software Foundation, Inc. | ||
3 | * | ||
4 | * This file is part of GnuPG. | ||
5 | * | ||
6 | * GnuPG is free software; you can redistribute it and/or modify | ||
7 | * it under the terms of the GNU General Public License as published by | ||
8 | * the Free Software Foundation; either version 2 of the License, or | ||
9 | * (at your option) any later version. | ||
10 | * | ||
11 | * GnuPG is distributed in the hope that it will be useful, | ||
12 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
14 | * GNU General Public License for more details. | ||
15 | * | ||
16 | * You should have received a copy of the GNU General Public License | ||
17 | * along with this program; if not, write to the Free Software | ||
18 | * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA | ||
19 | */ | ||
20 | |||
21 | #include <linux/bitops.h> | ||
22 | #include <asm-generic/bitops/count_zeros.h> | ||
23 | #include "mpi-internal.h" | ||
24 | |||
25 | #define MAX_EXTERN_MPI_BITS 16384 | ||
26 | |||
27 | /** | ||
28 | * mpi_read_raw_data - Read a raw byte stream as a positive integer | ||
29 | * @xbuffer: The data to read | ||
30 | * @nbytes: The amount of data to read | ||
31 | */ | ||
32 | MPI mpi_read_raw_data(const void *xbuffer, size_t nbytes) | ||
33 | { | ||
34 | const uint8_t *buffer = xbuffer; | ||
35 | int i, j; | ||
36 | unsigned nbits, nlimbs; | ||
37 | mpi_limb_t a; | ||
38 | MPI val = NULL; | ||
39 | |||
40 | while (nbytes >= 0 && buffer[0] == 0) { | ||
41 | buffer++; | ||
42 | nbytes--; | ||
43 | } | ||
44 | |||
45 | nbits = nbytes * 8; | ||
46 | if (nbits > MAX_EXTERN_MPI_BITS) { | ||
47 | pr_info("MPI: mpi too large (%u bits)\n", nbits); | ||
48 | return NULL; | ||
49 | } | ||
50 | if (nbytes > 0) | ||
51 | nbits -= count_leading_zeros(buffer[0]); | ||
52 | else | ||
53 | nbits = 0; | ||
54 | |||
55 | nlimbs = (nbytes + BYTES_PER_MPI_LIMB - 1) / BYTES_PER_MPI_LIMB; | ||
56 | val = mpi_alloc(nlimbs); | ||
57 | if (!val) | ||
58 | return NULL; | ||
59 | val->nbits = nbits; | ||
60 | val->sign = 0; | ||
61 | val->nlimbs = nlimbs; | ||
62 | |||
63 | if (nbytes > 0) { | ||
64 | i = BYTES_PER_MPI_LIMB - nbytes % BYTES_PER_MPI_LIMB; | ||
65 | i %= BYTES_PER_MPI_LIMB; | ||
66 | for (j = nlimbs; j > 0; j--) { | ||
67 | a = 0; | ||
68 | for (; i < BYTES_PER_MPI_LIMB; i++) { | ||
69 | a <<= 8; | ||
70 | a |= *buffer++; | ||
71 | } | ||
72 | i = 0; | ||
73 | val->d[j - 1] = a; | ||
74 | } | ||
75 | } | ||
76 | return val; | ||
77 | } | ||
78 | EXPORT_SYMBOL_GPL(mpi_read_raw_data); | ||
79 | |||
80 | MPI mpi_read_from_buffer(const void *xbuffer, unsigned *ret_nread) | ||
81 | { | ||
82 | const uint8_t *buffer = xbuffer; | ||
83 | int i, j; | ||
84 | unsigned nbits, nbytes, nlimbs, nread = 0; | ||
85 | mpi_limb_t a; | ||
86 | MPI val = NULL; | ||
87 | |||
88 | if (*ret_nread < 2) | ||
89 | goto leave; | ||
90 | nbits = buffer[0] << 8 | buffer[1]; | ||
91 | |||
92 | if (nbits > MAX_EXTERN_MPI_BITS) { | ||
93 | pr_info("MPI: mpi too large (%u bits)\n", nbits); | ||
94 | goto leave; | ||
95 | } | ||
96 | buffer += 2; | ||
97 | nread = 2; | ||
98 | |||
99 | nbytes = (nbits + 7) / 8; | ||
100 | nlimbs = (nbytes + BYTES_PER_MPI_LIMB - 1) / BYTES_PER_MPI_LIMB; | ||
101 | val = mpi_alloc(nlimbs); | ||
102 | if (!val) | ||
103 | return NULL; | ||
104 | i = BYTES_PER_MPI_LIMB - nbytes % BYTES_PER_MPI_LIMB; | ||
105 | i %= BYTES_PER_MPI_LIMB; | ||
106 | val->nbits = nbits; | ||
107 | j = val->nlimbs = nlimbs; | ||
108 | val->sign = 0; | ||
109 | for (; j > 0; j--) { | ||
110 | a = 0; | ||
111 | for (; i < BYTES_PER_MPI_LIMB; i++) { | ||
112 | if (++nread > *ret_nread) { | ||
113 | printk | ||
114 | ("MPI: mpi larger than buffer nread=%d ret_nread=%d\n", | ||
115 | nread, *ret_nread); | ||
116 | goto leave; | ||
117 | } | ||
118 | a <<= 8; | ||
119 | a |= *buffer++; | ||
120 | } | ||
121 | i = 0; | ||
122 | val->d[j - 1] = a; | ||
123 | } | ||
124 | |||
125 | leave: | ||
126 | *ret_nread = nread; | ||
127 | return val; | ||
128 | } | ||
129 | EXPORT_SYMBOL_GPL(mpi_read_from_buffer); | ||
130 | |||
131 | /**************** | ||
132 | * Return an allocated buffer with the MPI (msb first). | ||
133 | * NBYTES receives the length of this buffer. Caller must free the | ||
134 | * return string (This function does return a 0 byte buffer with NBYTES | ||
135 | * set to zero if the value of A is zero. If sign is not NULL, it will | ||
136 | * be set to the sign of the A. | ||
137 | */ | ||
138 | void *mpi_get_buffer(MPI a, unsigned *nbytes, int *sign) | ||
139 | { | ||
140 | uint8_t *p, *buffer; | ||
141 | mpi_limb_t alimb; | ||
142 | int i; | ||
143 | unsigned int n; | ||
144 | |||
145 | if (sign) | ||
146 | *sign = a->sign; | ||
147 | *nbytes = n = a->nlimbs * BYTES_PER_MPI_LIMB; | ||
148 | if (!n) | ||
149 | n++; /* avoid zero length allocation */ | ||
150 | p = buffer = kmalloc(n, GFP_KERNEL); | ||
151 | if (!p) | ||
152 | return NULL; | ||
153 | |||
154 | for (i = a->nlimbs - 1; i >= 0; i--) { | ||
155 | alimb = a->d[i]; | ||
156 | #if BYTES_PER_MPI_LIMB == 4 | ||
157 | *p++ = alimb >> 24; | ||
158 | *p++ = alimb >> 16; | ||
159 | *p++ = alimb >> 8; | ||
160 | *p++ = alimb; | ||
161 | #elif BYTES_PER_MPI_LIMB == 8 | ||
162 | *p++ = alimb >> 56; | ||
163 | *p++ = alimb >> 48; | ||
164 | *p++ = alimb >> 40; | ||
165 | *p++ = alimb >> 32; | ||
166 | *p++ = alimb >> 24; | ||
167 | *p++ = alimb >> 16; | ||
168 | *p++ = alimb >> 8; | ||
169 | *p++ = alimb; | ||
170 | #else | ||
171 | #error please implement for this limb size. | ||
172 | #endif | ||
173 | } | ||
174 | |||
175 | /* this is sub-optimal but we need to do the shift operation | ||
176 | * because the caller has to free the returned buffer */ | ||
177 | for (p = buffer; !*p && *nbytes; p++, --*nbytes) | ||
178 | ; | ||
179 | if (p != buffer) | ||
180 | memmove(buffer, p, *nbytes); | ||
181 | |||
182 | return buffer; | ||
183 | } | ||
184 | EXPORT_SYMBOL_GPL(mpi_get_buffer); | ||
185 | |||
186 | /**************** | ||
187 | * Use BUFFER to update MPI. | ||
188 | */ | ||
189 | int mpi_set_buffer(MPI a, const void *xbuffer, unsigned nbytes, int sign) | ||
190 | { | ||
191 | const uint8_t *buffer = xbuffer, *p; | ||
192 | mpi_limb_t alimb; | ||
193 | int nlimbs; | ||
194 | int i; | ||
195 | |||
196 | nlimbs = (nbytes + BYTES_PER_MPI_LIMB - 1) / BYTES_PER_MPI_LIMB; | ||
197 | if (RESIZE_IF_NEEDED(a, nlimbs) < 0) | ||
198 | return -ENOMEM; | ||
199 | a->sign = sign; | ||
200 | |||
201 | for (i = 0, p = buffer + nbytes - 1; p >= buffer + BYTES_PER_MPI_LIMB;) { | ||
202 | #if BYTES_PER_MPI_LIMB == 4 | ||
203 | alimb = (mpi_limb_t) *p--; | ||
204 | alimb |= (mpi_limb_t) *p-- << 8; | ||
205 | alimb |= (mpi_limb_t) *p-- << 16; | ||
206 | alimb |= (mpi_limb_t) *p-- << 24; | ||
207 | #elif BYTES_PER_MPI_LIMB == 8 | ||
208 | alimb = (mpi_limb_t) *p--; | ||
209 | alimb |= (mpi_limb_t) *p-- << 8; | ||
210 | alimb |= (mpi_limb_t) *p-- << 16; | ||
211 | alimb |= (mpi_limb_t) *p-- << 24; | ||
212 | alimb |= (mpi_limb_t) *p-- << 32; | ||
213 | alimb |= (mpi_limb_t) *p-- << 40; | ||
214 | alimb |= (mpi_limb_t) *p-- << 48; | ||
215 | alimb |= (mpi_limb_t) *p-- << 56; | ||
216 | #else | ||
217 | #error please implement for this limb size. | ||
218 | #endif | ||
219 | a->d[i++] = alimb; | ||
220 | } | ||
221 | if (p >= buffer) { | ||
222 | #if BYTES_PER_MPI_LIMB == 4 | ||
223 | alimb = *p--; | ||
224 | if (p >= buffer) | ||
225 | alimb |= (mpi_limb_t) *p-- << 8; | ||
226 | if (p >= buffer) | ||
227 | alimb |= (mpi_limb_t) *p-- << 16; | ||
228 | if (p >= buffer) | ||
229 | alimb |= (mpi_limb_t) *p-- << 24; | ||
230 | #elif BYTES_PER_MPI_LIMB == 8 | ||
231 | alimb = (mpi_limb_t) *p--; | ||
232 | if (p >= buffer) | ||
233 | alimb |= (mpi_limb_t) *p-- << 8; | ||
234 | if (p >= buffer) | ||
235 | alimb |= (mpi_limb_t) *p-- << 16; | ||
236 | if (p >= buffer) | ||
237 | alimb |= (mpi_limb_t) *p-- << 24; | ||
238 | if (p >= buffer) | ||
239 | alimb |= (mpi_limb_t) *p-- << 32; | ||
240 | if (p >= buffer) | ||
241 | alimb |= (mpi_limb_t) *p-- << 40; | ||
242 | if (p >= buffer) | ||
243 | alimb |= (mpi_limb_t) *p-- << 48; | ||
244 | if (p >= buffer) | ||
245 | alimb |= (mpi_limb_t) *p-- << 56; | ||
246 | #else | ||
247 | #error please implement for this limb size. | ||
248 | #endif | ||
249 | a->d[i++] = alimb; | ||
250 | } | ||
251 | a->nlimbs = i; | ||
252 | |||
253 | if (i != nlimbs) { | ||
254 | pr_emerg("MPI: mpi_set_buffer: Assertion failed (%d != %d)", i, | ||
255 | nlimbs); | ||
256 | BUG(); | ||
257 | } | ||
258 | return 0; | ||
259 | } | ||
260 | EXPORT_SYMBOL_GPL(mpi_set_buffer); | ||
diff --git a/lib/mpi/mpih-cmp.c b/lib/mpi/mpih-cmp.c new file mode 100644 index 000000000000..b2fd39677f1b --- /dev/null +++ b/lib/mpi/mpih-cmp.c | |||
@@ -0,0 +1,56 @@ | |||
1 | /* mpihelp-sub.c - MPI helper functions | ||
2 | * Copyright (C) 1994, 1996 Free Software Foundation, Inc. | ||
3 | * Copyright (C) 1998, 1999, 2000, 2001 Free Software Foundation, Inc. | ||
4 | * | ||
5 | * This file is part of GnuPG. | ||
6 | * | ||
7 | * GnuPG is free software; you can redistribute it and/or modify | ||
8 | * it under the terms of the GNU General Public License as published by | ||
9 | * the Free Software Foundation; either version 2 of the License, or | ||
10 | * (at your option) any later version. | ||
11 | * | ||
12 | * GnuPG is distributed in the hope that it will be useful, | ||
13 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
14 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
15 | * GNU General Public License for more details. | ||
16 | * | ||
17 | * You should have received a copy of the GNU General Public License | ||
18 | * along with this program; if not, write to the Free Software | ||
19 | * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA | ||
20 | * | ||
21 | * Note: This code is heavily based on the GNU MP Library. | ||
22 | * Actually it's the same code with only minor changes in the | ||
23 | * way the data is stored; this is to support the abstraction | ||
24 | * of an optional secure memory allocation which may be used | ||
25 | * to avoid revealing of sensitive data due to paging etc. | ||
26 | * The GNU MP Library itself is published under the LGPL; | ||
27 | * however I decided to publish this code under the plain GPL. | ||
28 | */ | ||
29 | |||
30 | #include "mpi-internal.h" | ||
31 | |||
32 | /**************** | ||
33 | * Compare OP1_PTR/OP1_SIZE with OP2_PTR/OP2_SIZE. | ||
34 | * There are no restrictions on the relative sizes of | ||
35 | * the two arguments. | ||
36 | * Return 1 if OP1 > OP2, 0 if they are equal, and -1 if OP1 < OP2. | ||
37 | */ | ||
38 | int mpihelp_cmp(mpi_ptr_t op1_ptr, mpi_ptr_t op2_ptr, mpi_size_t size) | ||
39 | { | ||
40 | mpi_size_t i; | ||
41 | mpi_limb_t op1_word, op2_word; | ||
42 | |||
43 | for (i = size - 1; i >= 0; i--) { | ||
44 | op1_word = op1_ptr[i]; | ||
45 | op2_word = op2_ptr[i]; | ||
46 | if (op1_word != op2_word) | ||
47 | goto diff; | ||
48 | } | ||
49 | return 0; | ||
50 | |||
51 | diff: | ||
52 | /* This can *not* be simplified to | ||
53 | * op2_word - op2_word | ||
54 | * since that expression might give signed overflow. */ | ||
55 | return (op1_word > op2_word) ? 1 : -1; | ||
56 | } | ||
diff --git a/lib/mpi/mpih-div.c b/lib/mpi/mpih-div.c new file mode 100644 index 000000000000..c57d1d46295e --- /dev/null +++ b/lib/mpi/mpih-div.c | |||
@@ -0,0 +1,236 @@ | |||
1 | /* mpihelp-div.c - MPI helper functions | ||
2 | * Copyright (C) 1994, 1996 Free Software Foundation, Inc. | ||
3 | * Copyright (C) 1998, 1999 Free Software Foundation, Inc. | ||
4 | * | ||
5 | * This file is part of GnuPG. | ||
6 | * | ||
7 | * GnuPG is free software; you can redistribute it and/or modify | ||
8 | * it under the terms of the GNU General Public License as published by | ||
9 | * the Free Software Foundation; either version 2 of the License, or | ||
10 | * (at your option) any later version. | ||
11 | * | ||
12 | * GnuPG is distributed in the hope that it will be useful, | ||
13 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
14 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
15 | * GNU General Public License for more details. | ||
16 | * | ||
17 | * You should have received a copy of the GNU General Public License | ||
18 | * along with this program; if not, write to the Free Software | ||
19 | * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA | ||
20 | * | ||
21 | * Note: This code is heavily based on the GNU MP Library. | ||
22 | * Actually it's the same code with only minor changes in the | ||
23 | * way the data is stored; this is to support the abstraction | ||
24 | * of an optional secure memory allocation which may be used | ||
25 | * to avoid revealing of sensitive data due to paging etc. | ||
26 | * The GNU MP Library itself is published under the LGPL; | ||
27 | * however I decided to publish this code under the plain GPL. | ||
28 | */ | ||
29 | |||
30 | #include "mpi-internal.h" | ||
31 | #include "longlong.h" | ||
32 | |||
33 | #ifndef UMUL_TIME | ||
34 | #define UMUL_TIME 1 | ||
35 | #endif | ||
36 | #ifndef UDIV_TIME | ||
37 | #define UDIV_TIME UMUL_TIME | ||
38 | #endif | ||
39 | |||
40 | /* Divide num (NP/NSIZE) by den (DP/DSIZE) and write | ||
41 | * the NSIZE-DSIZE least significant quotient limbs at QP | ||
42 | * and the DSIZE long remainder at NP. If QEXTRA_LIMBS is | ||
43 | * non-zero, generate that many fraction bits and append them after the | ||
44 | * other quotient limbs. | ||
45 | * Return the most significant limb of the quotient, this is always 0 or 1. | ||
46 | * | ||
47 | * Preconditions: | ||
48 | * 0. NSIZE >= DSIZE. | ||
49 | * 1. The most significant bit of the divisor must be set. | ||
50 | * 2. QP must either not overlap with the input operands at all, or | ||
51 | * QP + DSIZE >= NP must hold true. (This means that it's | ||
52 | * possible to put the quotient in the high part of NUM, right after the | ||
53 | * remainder in NUM. | ||
54 | * 3. NSIZE >= DSIZE, even if QEXTRA_LIMBS is non-zero. | ||
55 | */ | ||
56 | |||
57 | mpi_limb_t | ||
58 | mpihelp_divrem(mpi_ptr_t qp, mpi_size_t qextra_limbs, | ||
59 | mpi_ptr_t np, mpi_size_t nsize, mpi_ptr_t dp, mpi_size_t dsize) | ||
60 | { | ||
61 | mpi_limb_t most_significant_q_limb = 0; | ||
62 | |||
63 | switch (dsize) { | ||
64 | case 0: | ||
65 | /* We are asked to divide by zero, so go ahead and do it! (To make | ||
66 | the compiler not remove this statement, return the value.) */ | ||
67 | /* | ||
68 | * existing clients of this function have been modified | ||
69 | * not to call it with dsize == 0, so this should not happen | ||
70 | */ | ||
71 | return 1 / dsize; | ||
72 | |||
73 | case 1: | ||
74 | { | ||
75 | mpi_size_t i; | ||
76 | mpi_limb_t n1; | ||
77 | mpi_limb_t d; | ||
78 | |||
79 | d = dp[0]; | ||
80 | n1 = np[nsize - 1]; | ||
81 | |||
82 | if (n1 >= d) { | ||
83 | n1 -= d; | ||
84 | most_significant_q_limb = 1; | ||
85 | } | ||
86 | |||
87 | qp += qextra_limbs; | ||
88 | for (i = nsize - 2; i >= 0; i--) | ||
89 | udiv_qrnnd(qp[i], n1, n1, np[i], d); | ||
90 | qp -= qextra_limbs; | ||
91 | |||
92 | for (i = qextra_limbs - 1; i >= 0; i--) | ||
93 | udiv_qrnnd(qp[i], n1, n1, 0, d); | ||
94 | |||
95 | np[0] = n1; | ||
96 | } | ||
97 | break; | ||
98 | |||
99 | case 2: | ||
100 | { | ||
101 | mpi_size_t i; | ||
102 | mpi_limb_t n1, n0, n2; | ||
103 | mpi_limb_t d1, d0; | ||
104 | |||
105 | np += nsize - 2; | ||
106 | d1 = dp[1]; | ||
107 | d0 = dp[0]; | ||
108 | n1 = np[1]; | ||
109 | n0 = np[0]; | ||
110 | |||
111 | if (n1 >= d1 && (n1 > d1 || n0 >= d0)) { | ||
112 | sub_ddmmss(n1, n0, n1, n0, d1, d0); | ||
113 | most_significant_q_limb = 1; | ||
114 | } | ||
115 | |||
116 | for (i = qextra_limbs + nsize - 2 - 1; i >= 0; i--) { | ||
117 | mpi_limb_t q; | ||
118 | mpi_limb_t r; | ||
119 | |||
120 | if (i >= qextra_limbs) | ||
121 | np--; | ||
122 | else | ||
123 | np[0] = 0; | ||
124 | |||
125 | if (n1 == d1) { | ||
126 | /* Q should be either 111..111 or 111..110. Need special | ||
127 | * treatment of this rare case as normal division would | ||
128 | * give overflow. */ | ||
129 | q = ~(mpi_limb_t) 0; | ||
130 | |||
131 | r = n0 + d1; | ||
132 | if (r < d1) { /* Carry in the addition? */ | ||
133 | add_ssaaaa(n1, n0, r - d0, | ||
134 | np[0], 0, d0); | ||
135 | qp[i] = q; | ||
136 | continue; | ||
137 | } | ||
138 | n1 = d0 - (d0 != 0 ? 1 : 0); | ||
139 | n0 = -d0; | ||
140 | } else { | ||
141 | udiv_qrnnd(q, r, n1, n0, d1); | ||
142 | umul_ppmm(n1, n0, d0, q); | ||
143 | } | ||
144 | |||
145 | n2 = np[0]; | ||
146 | q_test: | ||
147 | if (n1 > r || (n1 == r && n0 > n2)) { | ||
148 | /* The estimated Q was too large. */ | ||
149 | q--; | ||
150 | sub_ddmmss(n1, n0, n1, n0, 0, d0); | ||
151 | r += d1; | ||
152 | if (r >= d1) /* If not carry, test Q again. */ | ||
153 | goto q_test; | ||
154 | } | ||
155 | |||
156 | qp[i] = q; | ||
157 | sub_ddmmss(n1, n0, r, n2, n1, n0); | ||
158 | } | ||
159 | np[1] = n1; | ||
160 | np[0] = n0; | ||
161 | } | ||
162 | break; | ||
163 | |||
164 | default: | ||
165 | { | ||
166 | mpi_size_t i; | ||
167 | mpi_limb_t dX, d1, n0; | ||
168 | |||
169 | np += nsize - dsize; | ||
170 | dX = dp[dsize - 1]; | ||
171 | d1 = dp[dsize - 2]; | ||
172 | n0 = np[dsize - 1]; | ||
173 | |||
174 | if (n0 >= dX) { | ||
175 | if (n0 > dX | ||
176 | || mpihelp_cmp(np, dp, dsize - 1) >= 0) { | ||
177 | mpihelp_sub_n(np, np, dp, dsize); | ||
178 | n0 = np[dsize - 1]; | ||
179 | most_significant_q_limb = 1; | ||
180 | } | ||
181 | } | ||
182 | |||
183 | for (i = qextra_limbs + nsize - dsize - 1; i >= 0; i--) { | ||
184 | mpi_limb_t q; | ||
185 | mpi_limb_t n1, n2; | ||
186 | mpi_limb_t cy_limb; | ||
187 | |||
188 | if (i >= qextra_limbs) { | ||
189 | np--; | ||
190 | n2 = np[dsize]; | ||
191 | } else { | ||
192 | n2 = np[dsize - 1]; | ||
193 | MPN_COPY_DECR(np + 1, np, dsize - 1); | ||
194 | np[0] = 0; | ||
195 | } | ||
196 | |||
197 | if (n0 == dX) { | ||
198 | /* This might over-estimate q, but it's probably not worth | ||
199 | * the extra code here to find out. */ | ||
200 | q = ~(mpi_limb_t) 0; | ||
201 | } else { | ||
202 | mpi_limb_t r; | ||
203 | |||
204 | udiv_qrnnd(q, r, n0, np[dsize - 1], dX); | ||
205 | umul_ppmm(n1, n0, d1, q); | ||
206 | |||
207 | while (n1 > r | ||
208 | || (n1 == r | ||
209 | && n0 > np[dsize - 2])) { | ||
210 | q--; | ||
211 | r += dX; | ||
212 | if (r < dX) /* I.e. "carry in previous addition?" */ | ||
213 | break; | ||
214 | n1 -= n0 < d1; | ||
215 | n0 -= d1; | ||
216 | } | ||
217 | } | ||
218 | |||
219 | /* Possible optimization: We already have (q * n0) and (1 * n1) | ||
220 | * after the calculation of q. Taking advantage of that, we | ||
221 | * could make this loop make two iterations less. */ | ||
222 | cy_limb = mpihelp_submul_1(np, dp, dsize, q); | ||
223 | |||
224 | if (n2 != cy_limb) { | ||
225 | mpihelp_add_n(np, np, dp, dsize); | ||
226 | q--; | ||
227 | } | ||
228 | |||
229 | qp[i] = q; | ||
230 | n0 = np[dsize - 1]; | ||
231 | } | ||
232 | } | ||
233 | } | ||
234 | |||
235 | return most_significant_q_limb; | ||
236 | } | ||
diff --git a/lib/mpi/mpih-mul.c b/lib/mpi/mpih-mul.c new file mode 100644 index 000000000000..7c841719fdfb --- /dev/null +++ b/lib/mpi/mpih-mul.c | |||
@@ -0,0 +1,497 @@ | |||
1 | /* mpihelp-mul.c - MPI helper functions | ||
2 | * Copyright (C) 1994, 1996, 1998, 1999, | ||
3 | * 2000 Free Software Foundation, Inc. | ||
4 | * | ||
5 | * This file is part of GnuPG. | ||
6 | * | ||
7 | * GnuPG is free software; you can redistribute it and/or modify | ||
8 | * it under the terms of the GNU General Public License as published by | ||
9 | * the Free Software Foundation; either version 2 of the License, or | ||
10 | * (at your option) any later version. | ||
11 | * | ||
12 | * GnuPG is distributed in the hope that it will be useful, | ||
13 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
14 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
15 | * GNU General Public License for more details. | ||
16 | * | ||
17 | * You should have received a copy of the GNU General Public License | ||
18 | * along with this program; if not, write to the Free Software | ||
19 | * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA | ||
20 | * | ||
21 | * Note: This code is heavily based on the GNU MP Library. | ||
22 | * Actually it's the same code with only minor changes in the | ||
23 | * way the data is stored; this is to support the abstraction | ||
24 | * of an optional secure memory allocation which may be used | ||
25 | * to avoid revealing of sensitive data due to paging etc. | ||
26 | * The GNU MP Library itself is published under the LGPL; | ||
27 | * however I decided to publish this code under the plain GPL. | ||
28 | */ | ||
29 | |||
30 | #include <linux/string.h> | ||
31 | #include "mpi-internal.h" | ||
32 | #include "longlong.h" | ||
33 | |||
34 | #define MPN_MUL_N_RECURSE(prodp, up, vp, size, tspace) \ | ||
35 | do { \ | ||
36 | if ((size) < KARATSUBA_THRESHOLD) \ | ||
37 | mul_n_basecase(prodp, up, vp, size); \ | ||
38 | else \ | ||
39 | mul_n(prodp, up, vp, size, tspace); \ | ||
40 | } while (0); | ||
41 | |||
42 | #define MPN_SQR_N_RECURSE(prodp, up, size, tspace) \ | ||
43 | do { \ | ||
44 | if ((size) < KARATSUBA_THRESHOLD) \ | ||
45 | mpih_sqr_n_basecase(prodp, up, size); \ | ||
46 | else \ | ||
47 | mpih_sqr_n(prodp, up, size, tspace); \ | ||
48 | } while (0); | ||
49 | |||
50 | /* Multiply the natural numbers u (pointed to by UP) and v (pointed to by VP), | ||
51 | * both with SIZE limbs, and store the result at PRODP. 2 * SIZE limbs are | ||
52 | * always stored. Return the most significant limb. | ||
53 | * | ||
54 | * Argument constraints: | ||
55 | * 1. PRODP != UP and PRODP != VP, i.e. the destination | ||
56 | * must be distinct from the multiplier and the multiplicand. | ||
57 | * | ||
58 | * | ||
59 | * Handle simple cases with traditional multiplication. | ||
60 | * | ||
61 | * This is the most critical code of multiplication. All multiplies rely | ||
62 | * on this, both small and huge. Small ones arrive here immediately. Huge | ||
63 | * ones arrive here as this is the base case for Karatsuba's recursive | ||
64 | * algorithm below. | ||
65 | */ | ||
66 | |||
67 | static mpi_limb_t | ||
68 | mul_n_basecase(mpi_ptr_t prodp, mpi_ptr_t up, mpi_ptr_t vp, mpi_size_t size) | ||
69 | { | ||
70 | mpi_size_t i; | ||
71 | mpi_limb_t cy; | ||
72 | mpi_limb_t v_limb; | ||
73 | |||
74 | /* Multiply by the first limb in V separately, as the result can be | ||
75 | * stored (not added) to PROD. We also avoid a loop for zeroing. */ | ||
76 | v_limb = vp[0]; | ||
77 | if (v_limb <= 1) { | ||
78 | if (v_limb == 1) | ||
79 | MPN_COPY(prodp, up, size); | ||
80 | else | ||
81 | MPN_ZERO(prodp, size); | ||
82 | cy = 0; | ||
83 | } else | ||
84 | cy = mpihelp_mul_1(prodp, up, size, v_limb); | ||
85 | |||
86 | prodp[size] = cy; | ||
87 | prodp++; | ||
88 | |||
89 | /* For each iteration in the outer loop, multiply one limb from | ||
90 | * U with one limb from V, and add it to PROD. */ | ||
91 | for (i = 1; i < size; i++) { | ||
92 | v_limb = vp[i]; | ||
93 | if (v_limb <= 1) { | ||
94 | cy = 0; | ||
95 | if (v_limb == 1) | ||
96 | cy = mpihelp_add_n(prodp, prodp, up, size); | ||
97 | } else | ||
98 | cy = mpihelp_addmul_1(prodp, up, size, v_limb); | ||
99 | |||
100 | prodp[size] = cy; | ||
101 | prodp++; | ||
102 | } | ||
103 | |||
104 | return cy; | ||
105 | } | ||
106 | |||
107 | static void | ||
108 | mul_n(mpi_ptr_t prodp, mpi_ptr_t up, mpi_ptr_t vp, | ||
109 | mpi_size_t size, mpi_ptr_t tspace) | ||
110 | { | ||
111 | if (size & 1) { | ||
112 | /* The size is odd, and the code below doesn't handle that. | ||
113 | * Multiply the least significant (size - 1) limbs with a recursive | ||
114 | * call, and handle the most significant limb of S1 and S2 | ||
115 | * separately. | ||
116 | * A slightly faster way to do this would be to make the Karatsuba | ||
117 | * code below behave as if the size were even, and let it check for | ||
118 | * odd size in the end. I.e., in essence move this code to the end. | ||
119 | * Doing so would save us a recursive call, and potentially make the | ||
120 | * stack grow a lot less. | ||
121 | */ | ||
122 | mpi_size_t esize = size - 1; /* even size */ | ||
123 | mpi_limb_t cy_limb; | ||
124 | |||
125 | MPN_MUL_N_RECURSE(prodp, up, vp, esize, tspace); | ||
126 | cy_limb = mpihelp_addmul_1(prodp + esize, up, esize, vp[esize]); | ||
127 | prodp[esize + esize] = cy_limb; | ||
128 | cy_limb = mpihelp_addmul_1(prodp + esize, vp, size, up[esize]); | ||
129 | prodp[esize + size] = cy_limb; | ||
130 | } else { | ||
131 | /* Anatolij Alekseevich Karatsuba's divide-and-conquer algorithm. | ||
132 | * | ||
133 | * Split U in two pieces, U1 and U0, such that | ||
134 | * U = U0 + U1*(B**n), | ||
135 | * and V in V1 and V0, such that | ||
136 | * V = V0 + V1*(B**n). | ||
137 | * | ||
138 | * UV is then computed recursively using the identity | ||
139 | * | ||
140 | * 2n n n n | ||
141 | * UV = (B + B )U V + B (U -U )(V -V ) + (B + 1)U V | ||
142 | * 1 1 1 0 0 1 0 0 | ||
143 | * | ||
144 | * Where B = 2**BITS_PER_MP_LIMB. | ||
145 | */ | ||
146 | mpi_size_t hsize = size >> 1; | ||
147 | mpi_limb_t cy; | ||
148 | int negflg; | ||
149 | |||
150 | /* Product H. ________________ ________________ | ||
151 | * |_____U1 x V1____||____U0 x V0_____| | ||
152 | * Put result in upper part of PROD and pass low part of TSPACE | ||
153 | * as new TSPACE. | ||
154 | */ | ||
155 | MPN_MUL_N_RECURSE(prodp + size, up + hsize, vp + hsize, hsize, | ||
156 | tspace); | ||
157 | |||
158 | /* Product M. ________________ | ||
159 | * |_(U1-U0)(V0-V1)_| | ||
160 | */ | ||
161 | if (mpihelp_cmp(up + hsize, up, hsize) >= 0) { | ||
162 | mpihelp_sub_n(prodp, up + hsize, up, hsize); | ||
163 | negflg = 0; | ||
164 | } else { | ||
165 | mpihelp_sub_n(prodp, up, up + hsize, hsize); | ||
166 | negflg = 1; | ||
167 | } | ||
168 | if (mpihelp_cmp(vp + hsize, vp, hsize) >= 0) { | ||
169 | mpihelp_sub_n(prodp + hsize, vp + hsize, vp, hsize); | ||
170 | negflg ^= 1; | ||
171 | } else { | ||
172 | mpihelp_sub_n(prodp + hsize, vp, vp + hsize, hsize); | ||
173 | /* No change of NEGFLG. */ | ||
174 | } | ||
175 | /* Read temporary operands from low part of PROD. | ||
176 | * Put result in low part of TSPACE using upper part of TSPACE | ||
177 | * as new TSPACE. | ||
178 | */ | ||
179 | MPN_MUL_N_RECURSE(tspace, prodp, prodp + hsize, hsize, | ||
180 | tspace + size); | ||
181 | |||
182 | /* Add/copy product H. */ | ||
183 | MPN_COPY(prodp + hsize, prodp + size, hsize); | ||
184 | cy = mpihelp_add_n(prodp + size, prodp + size, | ||
185 | prodp + size + hsize, hsize); | ||
186 | |||
187 | /* Add product M (if NEGFLG M is a negative number) */ | ||
188 | if (negflg) | ||
189 | cy -= | ||
190 | mpihelp_sub_n(prodp + hsize, prodp + hsize, tspace, | ||
191 | size); | ||
192 | else | ||
193 | cy += | ||
194 | mpihelp_add_n(prodp + hsize, prodp + hsize, tspace, | ||
195 | size); | ||
196 | |||
197 | /* Product L. ________________ ________________ | ||
198 | * |________________||____U0 x V0_____| | ||
199 | * Read temporary operands from low part of PROD. | ||
200 | * Put result in low part of TSPACE using upper part of TSPACE | ||
201 | * as new TSPACE. | ||
202 | */ | ||
203 | MPN_MUL_N_RECURSE(tspace, up, vp, hsize, tspace + size); | ||
204 | |||
205 | /* Add/copy Product L (twice) */ | ||
206 | |||
207 | cy += mpihelp_add_n(prodp + hsize, prodp + hsize, tspace, size); | ||
208 | if (cy) | ||
209 | mpihelp_add_1(prodp + hsize + size, | ||
210 | prodp + hsize + size, hsize, cy); | ||
211 | |||
212 | MPN_COPY(prodp, tspace, hsize); | ||
213 | cy = mpihelp_add_n(prodp + hsize, prodp + hsize, tspace + hsize, | ||
214 | hsize); | ||
215 | if (cy) | ||
216 | mpihelp_add_1(prodp + size, prodp + size, size, 1); | ||
217 | } | ||
218 | } | ||
219 | |||
220 | void mpih_sqr_n_basecase(mpi_ptr_t prodp, mpi_ptr_t up, mpi_size_t size) | ||
221 | { | ||
222 | mpi_size_t i; | ||
223 | mpi_limb_t cy_limb; | ||
224 | mpi_limb_t v_limb; | ||
225 | |||
226 | /* Multiply by the first limb in V separately, as the result can be | ||
227 | * stored (not added) to PROD. We also avoid a loop for zeroing. */ | ||
228 | v_limb = up[0]; | ||
229 | if (v_limb <= 1) { | ||
230 | if (v_limb == 1) | ||
231 | MPN_COPY(prodp, up, size); | ||
232 | else | ||
233 | MPN_ZERO(prodp, size); | ||
234 | cy_limb = 0; | ||
235 | } else | ||
236 | cy_limb = mpihelp_mul_1(prodp, up, size, v_limb); | ||
237 | |||
238 | prodp[size] = cy_limb; | ||
239 | prodp++; | ||
240 | |||
241 | /* For each iteration in the outer loop, multiply one limb from | ||
242 | * U with one limb from V, and add it to PROD. */ | ||
243 | for (i = 1; i < size; i++) { | ||
244 | v_limb = up[i]; | ||
245 | if (v_limb <= 1) { | ||
246 | cy_limb = 0; | ||
247 | if (v_limb == 1) | ||
248 | cy_limb = mpihelp_add_n(prodp, prodp, up, size); | ||
249 | } else | ||
250 | cy_limb = mpihelp_addmul_1(prodp, up, size, v_limb); | ||
251 | |||
252 | prodp[size] = cy_limb; | ||
253 | prodp++; | ||
254 | } | ||
255 | } | ||
256 | |||
257 | void | ||
258 | mpih_sqr_n(mpi_ptr_t prodp, mpi_ptr_t up, mpi_size_t size, mpi_ptr_t tspace) | ||
259 | { | ||
260 | if (size & 1) { | ||
261 | /* The size is odd, and the code below doesn't handle that. | ||
262 | * Multiply the least significant (size - 1) limbs with a recursive | ||
263 | * call, and handle the most significant limb of S1 and S2 | ||
264 | * separately. | ||
265 | * A slightly faster way to do this would be to make the Karatsuba | ||
266 | * code below behave as if the size were even, and let it check for | ||
267 | * odd size in the end. I.e., in essence move this code to the end. | ||
268 | * Doing so would save us a recursive call, and potentially make the | ||
269 | * stack grow a lot less. | ||
270 | */ | ||
271 | mpi_size_t esize = size - 1; /* even size */ | ||
272 | mpi_limb_t cy_limb; | ||
273 | |||
274 | MPN_SQR_N_RECURSE(prodp, up, esize, tspace); | ||
275 | cy_limb = mpihelp_addmul_1(prodp + esize, up, esize, up[esize]); | ||
276 | prodp[esize + esize] = cy_limb; | ||
277 | cy_limb = mpihelp_addmul_1(prodp + esize, up, size, up[esize]); | ||
278 | |||
279 | prodp[esize + size] = cy_limb; | ||
280 | } else { | ||
281 | mpi_size_t hsize = size >> 1; | ||
282 | mpi_limb_t cy; | ||
283 | |||
284 | /* Product H. ________________ ________________ | ||
285 | * |_____U1 x U1____||____U0 x U0_____| | ||
286 | * Put result in upper part of PROD and pass low part of TSPACE | ||
287 | * as new TSPACE. | ||
288 | */ | ||
289 | MPN_SQR_N_RECURSE(prodp + size, up + hsize, hsize, tspace); | ||
290 | |||
291 | /* Product M. ________________ | ||
292 | * |_(U1-U0)(U0-U1)_| | ||
293 | */ | ||
294 | if (mpihelp_cmp(up + hsize, up, hsize) >= 0) | ||
295 | mpihelp_sub_n(prodp, up + hsize, up, hsize); | ||
296 | else | ||
297 | mpihelp_sub_n(prodp, up, up + hsize, hsize); | ||
298 | |||
299 | /* Read temporary operands from low part of PROD. | ||
300 | * Put result in low part of TSPACE using upper part of TSPACE | ||
301 | * as new TSPACE. */ | ||
302 | MPN_SQR_N_RECURSE(tspace, prodp, hsize, tspace + size); | ||
303 | |||
304 | /* Add/copy product H */ | ||
305 | MPN_COPY(prodp + hsize, prodp + size, hsize); | ||
306 | cy = mpihelp_add_n(prodp + size, prodp + size, | ||
307 | prodp + size + hsize, hsize); | ||
308 | |||
309 | /* Add product M (if NEGFLG M is a negative number). */ | ||
310 | cy -= mpihelp_sub_n(prodp + hsize, prodp + hsize, tspace, size); | ||
311 | |||
312 | /* Product L. ________________ ________________ | ||
313 | * |________________||____U0 x U0_____| | ||
314 | * Read temporary operands from low part of PROD. | ||
315 | * Put result in low part of TSPACE using upper part of TSPACE | ||
316 | * as new TSPACE. */ | ||
317 | MPN_SQR_N_RECURSE(tspace, up, hsize, tspace + size); | ||
318 | |||
319 | /* Add/copy Product L (twice). */ | ||
320 | cy += mpihelp_add_n(prodp + hsize, prodp + hsize, tspace, size); | ||
321 | if (cy) | ||
322 | mpihelp_add_1(prodp + hsize + size, | ||
323 | prodp + hsize + size, hsize, cy); | ||
324 | |||
325 | MPN_COPY(prodp, tspace, hsize); | ||
326 | cy = mpihelp_add_n(prodp + hsize, prodp + hsize, tspace + hsize, | ||
327 | hsize); | ||
328 | if (cy) | ||
329 | mpihelp_add_1(prodp + size, prodp + size, size, 1); | ||
330 | } | ||
331 | } | ||
332 | |||
333 | int | ||
334 | mpihelp_mul_karatsuba_case(mpi_ptr_t prodp, | ||
335 | mpi_ptr_t up, mpi_size_t usize, | ||
336 | mpi_ptr_t vp, mpi_size_t vsize, | ||
337 | struct karatsuba_ctx *ctx) | ||
338 | { | ||
339 | mpi_limb_t cy; | ||
340 | |||
341 | if (!ctx->tspace || ctx->tspace_size < vsize) { | ||
342 | if (ctx->tspace) | ||
343 | mpi_free_limb_space(ctx->tspace); | ||
344 | ctx->tspace = mpi_alloc_limb_space(2 * vsize); | ||
345 | if (!ctx->tspace) | ||
346 | return -ENOMEM; | ||
347 | ctx->tspace_size = vsize; | ||
348 | } | ||
349 | |||
350 | MPN_MUL_N_RECURSE(prodp, up, vp, vsize, ctx->tspace); | ||
351 | |||
352 | prodp += vsize; | ||
353 | up += vsize; | ||
354 | usize -= vsize; | ||
355 | if (usize >= vsize) { | ||
356 | if (!ctx->tp || ctx->tp_size < vsize) { | ||
357 | if (ctx->tp) | ||
358 | mpi_free_limb_space(ctx->tp); | ||
359 | ctx->tp = mpi_alloc_limb_space(2 * vsize); | ||
360 | if (!ctx->tp) { | ||
361 | if (ctx->tspace) | ||
362 | mpi_free_limb_space(ctx->tspace); | ||
363 | ctx->tspace = NULL; | ||
364 | return -ENOMEM; | ||
365 | } | ||
366 | ctx->tp_size = vsize; | ||
367 | } | ||
368 | |||
369 | do { | ||
370 | MPN_MUL_N_RECURSE(ctx->tp, up, vp, vsize, ctx->tspace); | ||
371 | cy = mpihelp_add_n(prodp, prodp, ctx->tp, vsize); | ||
372 | mpihelp_add_1(prodp + vsize, ctx->tp + vsize, vsize, | ||
373 | cy); | ||
374 | prodp += vsize; | ||
375 | up += vsize; | ||
376 | usize -= vsize; | ||
377 | } while (usize >= vsize); | ||
378 | } | ||
379 | |||
380 | if (usize) { | ||
381 | if (usize < KARATSUBA_THRESHOLD) { | ||
382 | mpi_limb_t tmp; | ||
383 | if (mpihelp_mul(ctx->tspace, vp, vsize, up, usize, &tmp) | ||
384 | < 0) | ||
385 | return -ENOMEM; | ||
386 | } else { | ||
387 | if (!ctx->next) { | ||
388 | ctx->next = kzalloc(sizeof *ctx, GFP_KERNEL); | ||
389 | if (!ctx->next) | ||
390 | return -ENOMEM; | ||
391 | } | ||
392 | if (mpihelp_mul_karatsuba_case(ctx->tspace, | ||
393 | vp, vsize, | ||
394 | up, usize, | ||
395 | ctx->next) < 0) | ||
396 | return -ENOMEM; | ||
397 | } | ||
398 | |||
399 | cy = mpihelp_add_n(prodp, prodp, ctx->tspace, vsize); | ||
400 | mpihelp_add_1(prodp + vsize, ctx->tspace + vsize, usize, cy); | ||
401 | } | ||
402 | |||
403 | return 0; | ||
404 | } | ||
405 | |||
406 | void mpihelp_release_karatsuba_ctx(struct karatsuba_ctx *ctx) | ||
407 | { | ||
408 | struct karatsuba_ctx *ctx2; | ||
409 | |||
410 | if (ctx->tp) | ||
411 | mpi_free_limb_space(ctx->tp); | ||
412 | if (ctx->tspace) | ||
413 | mpi_free_limb_space(ctx->tspace); | ||
414 | for (ctx = ctx->next; ctx; ctx = ctx2) { | ||
415 | ctx2 = ctx->next; | ||
416 | if (ctx->tp) | ||
417 | mpi_free_limb_space(ctx->tp); | ||
418 | if (ctx->tspace) | ||
419 | mpi_free_limb_space(ctx->tspace); | ||
420 | kfree(ctx); | ||
421 | } | ||
422 | } | ||
423 | |||
424 | /* Multiply the natural numbers u (pointed to by UP, with USIZE limbs) | ||
425 | * and v (pointed to by VP, with VSIZE limbs), and store the result at | ||
426 | * PRODP. USIZE + VSIZE limbs are always stored, but if the input | ||
427 | * operands are normalized. Return the most significant limb of the | ||
428 | * result. | ||
429 | * | ||
430 | * NOTE: The space pointed to by PRODP is overwritten before finished | ||
431 | * with U and V, so overlap is an error. | ||
432 | * | ||
433 | * Argument constraints: | ||
434 | * 1. USIZE >= VSIZE. | ||
435 | * 2. PRODP != UP and PRODP != VP, i.e. the destination | ||
436 | * must be distinct from the multiplier and the multiplicand. | ||
437 | */ | ||
438 | |||
439 | int | ||
440 | mpihelp_mul(mpi_ptr_t prodp, mpi_ptr_t up, mpi_size_t usize, | ||
441 | mpi_ptr_t vp, mpi_size_t vsize, mpi_limb_t *_result) | ||
442 | { | ||
443 | mpi_ptr_t prod_endp = prodp + usize + vsize - 1; | ||
444 | mpi_limb_t cy; | ||
445 | struct karatsuba_ctx ctx; | ||
446 | |||
447 | if (vsize < KARATSUBA_THRESHOLD) { | ||
448 | mpi_size_t i; | ||
449 | mpi_limb_t v_limb; | ||
450 | |||
451 | if (!vsize) { | ||
452 | *_result = 0; | ||
453 | return 0; | ||
454 | } | ||
455 | |||
456 | /* Multiply by the first limb in V separately, as the result can be | ||
457 | * stored (not added) to PROD. We also avoid a loop for zeroing. */ | ||
458 | v_limb = vp[0]; | ||
459 | if (v_limb <= 1) { | ||
460 | if (v_limb == 1) | ||
461 | MPN_COPY(prodp, up, usize); | ||
462 | else | ||
463 | MPN_ZERO(prodp, usize); | ||
464 | cy = 0; | ||
465 | } else | ||
466 | cy = mpihelp_mul_1(prodp, up, usize, v_limb); | ||
467 | |||
468 | prodp[usize] = cy; | ||
469 | prodp++; | ||
470 | |||
471 | /* For each iteration in the outer loop, multiply one limb from | ||
472 | * U with one limb from V, and add it to PROD. */ | ||
473 | for (i = 1; i < vsize; i++) { | ||
474 | v_limb = vp[i]; | ||
475 | if (v_limb <= 1) { | ||
476 | cy = 0; | ||
477 | if (v_limb == 1) | ||
478 | cy = mpihelp_add_n(prodp, prodp, up, | ||
479 | usize); | ||
480 | } else | ||
481 | cy = mpihelp_addmul_1(prodp, up, usize, v_limb); | ||
482 | |||
483 | prodp[usize] = cy; | ||
484 | prodp++; | ||
485 | } | ||
486 | |||
487 | *_result = cy; | ||
488 | return 0; | ||
489 | } | ||
490 | |||
491 | memset(&ctx, 0, sizeof ctx); | ||
492 | if (mpihelp_mul_karatsuba_case(prodp, up, usize, vp, vsize, &ctx) < 0) | ||
493 | return -ENOMEM; | ||
494 | mpihelp_release_karatsuba_ctx(&ctx); | ||
495 | *_result = *prod_endp; | ||
496 | return 0; | ||
497 | } | ||
diff --git a/lib/mpi/mpiutil.c b/lib/mpi/mpiutil.c new file mode 100644 index 000000000000..657979f71bef --- /dev/null +++ b/lib/mpi/mpiutil.c | |||
@@ -0,0 +1,123 @@ | |||
1 | /* mpiutil.ac - Utility functions for MPI | ||
2 | * Copyright (C) 1998, 1999 Free Software Foundation, Inc. | ||
3 | * | ||
4 | * This file is part of GnuPG. | ||
5 | * | ||
6 | * GnuPG is free software; you can redistribute it and/or modify | ||
7 | * it under the terms of the GNU General Public License as published by | ||
8 | * the Free Software Foundation; either version 2 of the License, or | ||
9 | * (at your option) any later version. | ||
10 | * | ||
11 | * GnuPG is distributed in the hope that it will be useful, | ||
12 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
14 | * GNU General Public License for more details. | ||
15 | * | ||
16 | * You should have received a copy of the GNU General Public License | ||
17 | * along with this program; if not, write to the Free Software | ||
18 | * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA | ||
19 | */ | ||
20 | |||
21 | #include "mpi-internal.h" | ||
22 | |||
23 | /**************** | ||
24 | * Note: It was a bad idea to use the number of limbs to allocate | ||
25 | * because on a alpha the limbs are large but we normally need | ||
26 | * integers of n bits - So we should chnage this to bits (or bytes). | ||
27 | * | ||
28 | * But mpi_alloc is used in a lot of places :-) | ||
29 | */ | ||
30 | MPI mpi_alloc(unsigned nlimbs) | ||
31 | { | ||
32 | MPI a; | ||
33 | |||
34 | a = kmalloc(sizeof *a, GFP_KERNEL); | ||
35 | if (!a) | ||
36 | return a; | ||
37 | |||
38 | if (nlimbs) { | ||
39 | a->d = mpi_alloc_limb_space(nlimbs); | ||
40 | if (!a->d) { | ||
41 | kfree(a); | ||
42 | return NULL; | ||
43 | } | ||
44 | } else { | ||
45 | a->d = NULL; | ||
46 | } | ||
47 | |||
48 | a->alloced = nlimbs; | ||
49 | a->nlimbs = 0; | ||
50 | a->sign = 0; | ||
51 | a->flags = 0; | ||
52 | a->nbits = 0; | ||
53 | return a; | ||
54 | } | ||
55 | EXPORT_SYMBOL_GPL(mpi_alloc); | ||
56 | |||
57 | mpi_ptr_t mpi_alloc_limb_space(unsigned nlimbs) | ||
58 | { | ||
59 | size_t len = nlimbs * sizeof(mpi_limb_t); | ||
60 | |||
61 | if (!len) | ||
62 | return NULL; | ||
63 | |||
64 | return kmalloc(len, GFP_KERNEL); | ||
65 | } | ||
66 | |||
67 | void mpi_free_limb_space(mpi_ptr_t a) | ||
68 | { | ||
69 | if (!a) | ||
70 | return; | ||
71 | |||
72 | kfree(a); | ||
73 | } | ||
74 | |||
75 | void mpi_assign_limb_space(MPI a, mpi_ptr_t ap, unsigned nlimbs) | ||
76 | { | ||
77 | mpi_free_limb_space(a->d); | ||
78 | a->d = ap; | ||
79 | a->alloced = nlimbs; | ||
80 | } | ||
81 | |||
82 | /**************** | ||
83 | * Resize the array of A to NLIMBS. the additional space is cleared | ||
84 | * (set to 0) [done by m_realloc()] | ||
85 | */ | ||
86 | int mpi_resize(MPI a, unsigned nlimbs) | ||
87 | { | ||
88 | void *p; | ||
89 | |||
90 | if (nlimbs <= a->alloced) | ||
91 | return 0; /* no need to do it */ | ||
92 | |||
93 | if (a->d) { | ||
94 | p = kmalloc(nlimbs * sizeof(mpi_limb_t), GFP_KERNEL); | ||
95 | if (!p) | ||
96 | return -ENOMEM; | ||
97 | memcpy(p, a->d, a->alloced * sizeof(mpi_limb_t)); | ||
98 | kfree(a->d); | ||
99 | a->d = p; | ||
100 | } else { | ||
101 | a->d = kzalloc(nlimbs * sizeof(mpi_limb_t), GFP_KERNEL); | ||
102 | if (!a->d) | ||
103 | return -ENOMEM; | ||
104 | } | ||
105 | a->alloced = nlimbs; | ||
106 | return 0; | ||
107 | } | ||
108 | |||
109 | void mpi_free(MPI a) | ||
110 | { | ||
111 | if (!a) | ||
112 | return; | ||
113 | |||
114 | if (a->flags & 4) | ||
115 | kfree(a->d); | ||
116 | else | ||
117 | mpi_free_limb_space(a->d); | ||
118 | |||
119 | if (a->flags & ~7) | ||
120 | pr_info("invalid flag value in mpi\n"); | ||
121 | kfree(a); | ||
122 | } | ||
123 | EXPORT_SYMBOL_GPL(mpi_free); | ||
diff --git a/lib/nlattr.c b/lib/nlattr.c index ac09f2226dc7..18eca7809b08 100644 --- a/lib/nlattr.c +++ b/lib/nlattr.c | |||
@@ -5,7 +5,7 @@ | |||
5 | * Alexey Kuznetsov <kuznet@ms2.inr.ac.ru> | 5 | * Alexey Kuznetsov <kuznet@ms2.inr.ac.ru> |
6 | */ | 6 | */ |
7 | 7 | ||
8 | #include <linux/module.h> | 8 | #include <linux/export.h> |
9 | #include <linux/kernel.h> | 9 | #include <linux/kernel.h> |
10 | #include <linux/errno.h> | 10 | #include <linux/errno.h> |
11 | #include <linux/jiffies.h> | 11 | #include <linux/jiffies.h> |
@@ -20,7 +20,12 @@ static const u16 nla_attr_minlen[NLA_TYPE_MAX+1] = { | |||
20 | [NLA_U16] = sizeof(u16), | 20 | [NLA_U16] = sizeof(u16), |
21 | [NLA_U32] = sizeof(u32), | 21 | [NLA_U32] = sizeof(u32), |
22 | [NLA_U64] = sizeof(u64), | 22 | [NLA_U64] = sizeof(u64), |
23 | [NLA_MSECS] = sizeof(u64), | ||
23 | [NLA_NESTED] = NLA_HDRLEN, | 24 | [NLA_NESTED] = NLA_HDRLEN, |
25 | [NLA_S8] = sizeof(s8), | ||
26 | [NLA_S16] = sizeof(s16), | ||
27 | [NLA_S32] = sizeof(s32), | ||
28 | [NLA_S64] = sizeof(s64), | ||
24 | }; | 29 | }; |
25 | 30 | ||
26 | static int validate_nla(const struct nlattr *nla, int maxtype, | 31 | static int validate_nla(const struct nlattr *nla, int maxtype, |
diff --git a/lib/notifier-error-inject.c b/lib/notifier-error-inject.c new file mode 100644 index 000000000000..44b92cb6224f --- /dev/null +++ b/lib/notifier-error-inject.c | |||
@@ -0,0 +1,112 @@ | |||
1 | #include <linux/module.h> | ||
2 | |||
3 | #include "notifier-error-inject.h" | ||
4 | |||
5 | static int debugfs_errno_set(void *data, u64 val) | ||
6 | { | ||
7 | *(int *)data = clamp_t(int, val, -MAX_ERRNO, 0); | ||
8 | return 0; | ||
9 | } | ||
10 | |||
11 | static int debugfs_errno_get(void *data, u64 *val) | ||
12 | { | ||
13 | *val = *(int *)data; | ||
14 | return 0; | ||
15 | } | ||
16 | |||
17 | DEFINE_SIMPLE_ATTRIBUTE(fops_errno, debugfs_errno_get, debugfs_errno_set, | ||
18 | "%lld\n"); | ||
19 | |||
20 | static struct dentry *debugfs_create_errno(const char *name, mode_t mode, | ||
21 | struct dentry *parent, int *value) | ||
22 | { | ||
23 | return debugfs_create_file(name, mode, parent, value, &fops_errno); | ||
24 | } | ||
25 | |||
26 | static int notifier_err_inject_callback(struct notifier_block *nb, | ||
27 | unsigned long val, void *p) | ||
28 | { | ||
29 | int err = 0; | ||
30 | struct notifier_err_inject *err_inject = | ||
31 | container_of(nb, struct notifier_err_inject, nb); | ||
32 | struct notifier_err_inject_action *action; | ||
33 | |||
34 | for (action = err_inject->actions; action->name; action++) { | ||
35 | if (action->val == val) { | ||
36 | err = action->error; | ||
37 | break; | ||
38 | } | ||
39 | } | ||
40 | if (err) | ||
41 | pr_info("Injecting error (%d) to %s\n", err, action->name); | ||
42 | |||
43 | return notifier_from_errno(err); | ||
44 | } | ||
45 | |||
46 | struct dentry *notifier_err_inject_dir; | ||
47 | EXPORT_SYMBOL_GPL(notifier_err_inject_dir); | ||
48 | |||
49 | struct dentry *notifier_err_inject_init(const char *name, struct dentry *parent, | ||
50 | struct notifier_err_inject *err_inject, int priority) | ||
51 | { | ||
52 | struct notifier_err_inject_action *action; | ||
53 | mode_t mode = S_IFREG | S_IRUSR | S_IWUSR; | ||
54 | struct dentry *dir; | ||
55 | struct dentry *actions_dir; | ||
56 | |||
57 | err_inject->nb.notifier_call = notifier_err_inject_callback; | ||
58 | err_inject->nb.priority = priority; | ||
59 | |||
60 | dir = debugfs_create_dir(name, parent); | ||
61 | if (!dir) | ||
62 | return ERR_PTR(-ENOMEM); | ||
63 | |||
64 | actions_dir = debugfs_create_dir("actions", dir); | ||
65 | if (!actions_dir) | ||
66 | goto fail; | ||
67 | |||
68 | for (action = err_inject->actions; action->name; action++) { | ||
69 | struct dentry *action_dir; | ||
70 | |||
71 | action_dir = debugfs_create_dir(action->name, actions_dir); | ||
72 | if (!action_dir) | ||
73 | goto fail; | ||
74 | |||
75 | /* | ||
76 | * Create debugfs r/w file containing action->error. If | ||
77 | * notifier call chain is called with action->val, it will | ||
78 | * fail with the error code | ||
79 | */ | ||
80 | if (!debugfs_create_errno("error", mode, action_dir, | ||
81 | &action->error)) | ||
82 | goto fail; | ||
83 | } | ||
84 | return dir; | ||
85 | fail: | ||
86 | debugfs_remove_recursive(dir); | ||
87 | return ERR_PTR(-ENOMEM); | ||
88 | } | ||
89 | EXPORT_SYMBOL_GPL(notifier_err_inject_init); | ||
90 | |||
91 | static int __init err_inject_init(void) | ||
92 | { | ||
93 | notifier_err_inject_dir = | ||
94 | debugfs_create_dir("notifier-error-inject", NULL); | ||
95 | |||
96 | if (!notifier_err_inject_dir) | ||
97 | return -ENOMEM; | ||
98 | |||
99 | return 0; | ||
100 | } | ||
101 | |||
102 | static void __exit err_inject_exit(void) | ||
103 | { | ||
104 | debugfs_remove_recursive(notifier_err_inject_dir); | ||
105 | } | ||
106 | |||
107 | module_init(err_inject_init); | ||
108 | module_exit(err_inject_exit); | ||
109 | |||
110 | MODULE_DESCRIPTION("Notifier error injection module"); | ||
111 | MODULE_LICENSE("GPL"); | ||
112 | MODULE_AUTHOR("Akinobu Mita <akinobu.mita@gmail.com>"); | ||
diff --git a/lib/notifier-error-inject.h b/lib/notifier-error-inject.h new file mode 100644 index 000000000000..99b3b6fc470b --- /dev/null +++ b/lib/notifier-error-inject.h | |||
@@ -0,0 +1,24 @@ | |||
1 | #include <linux/atomic.h> | ||
2 | #include <linux/debugfs.h> | ||
3 | #include <linux/notifier.h> | ||
4 | |||
5 | struct notifier_err_inject_action { | ||
6 | unsigned long val; | ||
7 | int error; | ||
8 | const char *name; | ||
9 | }; | ||
10 | |||
11 | #define NOTIFIER_ERR_INJECT_ACTION(action) \ | ||
12 | .name = #action, .val = (action), | ||
13 | |||
14 | struct notifier_err_inject { | ||
15 | struct notifier_block nb; | ||
16 | struct notifier_err_inject_action actions[]; | ||
17 | /* The last slot must be terminated with zero sentinel */ | ||
18 | }; | ||
19 | |||
20 | extern struct dentry *notifier_err_inject_dir; | ||
21 | |||
22 | extern struct dentry *notifier_err_inject_init(const char *name, | ||
23 | struct dentry *parent, struct notifier_err_inject *err_inject, | ||
24 | int priority); | ||
diff --git a/lib/oid_registry.c b/lib/oid_registry.c new file mode 100644 index 000000000000..d8de11f45908 --- /dev/null +++ b/lib/oid_registry.c | |||
@@ -0,0 +1,170 @@ | |||
1 | /* ASN.1 Object identifier (OID) registry | ||
2 | * | ||
3 | * Copyright (C) 2012 Red Hat, Inc. All Rights Reserved. | ||
4 | * Written by David Howells (dhowells@redhat.com) | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or | ||
7 | * modify it under the terms of the GNU General Public Licence | ||
8 | * as published by the Free Software Foundation; either version | ||
9 | * 2 of the Licence, or (at your option) any later version. | ||
10 | */ | ||
11 | |||
12 | #include <linux/export.h> | ||
13 | #include <linux/oid_registry.h> | ||
14 | #include <linux/kernel.h> | ||
15 | #include <linux/errno.h> | ||
16 | #include <linux/bug.h> | ||
17 | #include "oid_registry_data.c" | ||
18 | |||
19 | /** | ||
20 | * look_up_OID - Find an OID registration for the specified data | ||
21 | * @data: Binary representation of the OID | ||
22 | * @datasize: Size of the binary representation | ||
23 | */ | ||
24 | enum OID look_up_OID(const void *data, size_t datasize) | ||
25 | { | ||
26 | const unsigned char *octets = data; | ||
27 | enum OID oid; | ||
28 | unsigned char xhash; | ||
29 | unsigned i, j, k, hash; | ||
30 | size_t len; | ||
31 | |||
32 | /* Hash the OID data */ | ||
33 | hash = datasize - 1; | ||
34 | |||
35 | for (i = 0; i < datasize; i++) | ||
36 | hash += octets[i] * 33; | ||
37 | hash = (hash >> 24) ^ (hash >> 16) ^ (hash >> 8) ^ hash; | ||
38 | hash &= 0xff; | ||
39 | |||
40 | /* Binary search the OID registry. OIDs are stored in ascending order | ||
41 | * of hash value then ascending order of size and then in ascending | ||
42 | * order of reverse value. | ||
43 | */ | ||
44 | i = 0; | ||
45 | k = OID__NR; | ||
46 | while (i < k) { | ||
47 | j = (i + k) / 2; | ||
48 | |||
49 | xhash = oid_search_table[j].hash; | ||
50 | if (xhash > hash) { | ||
51 | k = j; | ||
52 | continue; | ||
53 | } | ||
54 | if (xhash < hash) { | ||
55 | i = j + 1; | ||
56 | continue; | ||
57 | } | ||
58 | |||
59 | oid = oid_search_table[j].oid; | ||
60 | len = oid_index[oid + 1] - oid_index[oid]; | ||
61 | if (len > datasize) { | ||
62 | k = j; | ||
63 | continue; | ||
64 | } | ||
65 | if (len < datasize) { | ||
66 | i = j + 1; | ||
67 | continue; | ||
68 | } | ||
69 | |||
70 | /* Variation is most likely to be at the tail end of the | ||
71 | * OID, so do the comparison in reverse. | ||
72 | */ | ||
73 | while (len > 0) { | ||
74 | unsigned char a = oid_data[oid_index[oid] + --len]; | ||
75 | unsigned char b = octets[len]; | ||
76 | if (a > b) { | ||
77 | k = j; | ||
78 | goto next; | ||
79 | } | ||
80 | if (a < b) { | ||
81 | i = j + 1; | ||
82 | goto next; | ||
83 | } | ||
84 | } | ||
85 | return oid; | ||
86 | next: | ||
87 | ; | ||
88 | } | ||
89 | |||
90 | return OID__NR; | ||
91 | } | ||
92 | EXPORT_SYMBOL_GPL(look_up_OID); | ||
93 | |||
94 | /* | ||
95 | * sprint_OID - Print an Object Identifier into a buffer | ||
96 | * @data: The encoded OID to print | ||
97 | * @datasize: The size of the encoded OID | ||
98 | * @buffer: The buffer to render into | ||
99 | * @bufsize: The size of the buffer | ||
100 | * | ||
101 | * The OID is rendered into the buffer in "a.b.c.d" format and the number of | ||
102 | * bytes is returned. -EBADMSG is returned if the data could not be intepreted | ||
103 | * and -ENOBUFS if the buffer was too small. | ||
104 | */ | ||
105 | int sprint_oid(const void *data, size_t datasize, char *buffer, size_t bufsize) | ||
106 | { | ||
107 | const unsigned char *v = data, *end = v + datasize; | ||
108 | unsigned long num; | ||
109 | unsigned char n; | ||
110 | size_t ret; | ||
111 | int count; | ||
112 | |||
113 | if (v >= end) | ||
114 | return -EBADMSG; | ||
115 | |||
116 | n = *v++; | ||
117 | ret = count = snprintf(buffer, bufsize, "%u.%u", n / 40, n % 40); | ||
118 | buffer += count; | ||
119 | bufsize -= count; | ||
120 | if (bufsize == 0) | ||
121 | return -ENOBUFS; | ||
122 | |||
123 | while (v < end) { | ||
124 | num = 0; | ||
125 | n = *v++; | ||
126 | if (!(n & 0x80)) { | ||
127 | num = n; | ||
128 | } else { | ||
129 | num = n & 0x7f; | ||
130 | do { | ||
131 | if (v >= end) | ||
132 | return -EBADMSG; | ||
133 | n = *v++; | ||
134 | num <<= 7; | ||
135 | num |= n & 0x7f; | ||
136 | } while (n & 0x80); | ||
137 | } | ||
138 | ret += count = snprintf(buffer, bufsize, ".%lu", num); | ||
139 | buffer += count; | ||
140 | bufsize -= count; | ||
141 | if (bufsize == 0) | ||
142 | return -ENOBUFS; | ||
143 | } | ||
144 | |||
145 | return ret; | ||
146 | } | ||
147 | EXPORT_SYMBOL_GPL(sprint_oid); | ||
148 | |||
149 | /** | ||
150 | * sprint_OID - Print an Object Identifier into a buffer | ||
151 | * @oid: The OID to print | ||
152 | * @buffer: The buffer to render into | ||
153 | * @bufsize: The size of the buffer | ||
154 | * | ||
155 | * The OID is rendered into the buffer in "a.b.c.d" format and the number of | ||
156 | * bytes is returned. | ||
157 | */ | ||
158 | int sprint_OID(enum OID oid, char *buffer, size_t bufsize) | ||
159 | { | ||
160 | int ret; | ||
161 | |||
162 | BUG_ON(oid >= OID__NR); | ||
163 | |||
164 | ret = sprint_oid(oid_data + oid_index[oid], | ||
165 | oid_index[oid + 1] - oid_index[oid], | ||
166 | buffer, bufsize); | ||
167 | BUG_ON(ret == -EBADMSG); | ||
168 | return ret; | ||
169 | } | ||
170 | EXPORT_SYMBOL_GPL(sprint_OID); | ||
diff --git a/lib/pSeries-reconfig-notifier-error-inject.c b/lib/pSeries-reconfig-notifier-error-inject.c new file mode 100644 index 000000000000..7f7c98dcd5c4 --- /dev/null +++ b/lib/pSeries-reconfig-notifier-error-inject.c | |||
@@ -0,0 +1,51 @@ | |||
1 | #include <linux/kernel.h> | ||
2 | #include <linux/module.h> | ||
3 | |||
4 | #include <asm/pSeries_reconfig.h> | ||
5 | |||
6 | #include "notifier-error-inject.h" | ||
7 | |||
8 | static int priority; | ||
9 | module_param(priority, int, 0); | ||
10 | MODULE_PARM_DESC(priority, "specify pSeries reconfig notifier priority"); | ||
11 | |||
12 | static struct notifier_err_inject reconfig_err_inject = { | ||
13 | .actions = { | ||
14 | { NOTIFIER_ERR_INJECT_ACTION(PSERIES_RECONFIG_ADD) }, | ||
15 | { NOTIFIER_ERR_INJECT_ACTION(PSERIES_RECONFIG_REMOVE) }, | ||
16 | { NOTIFIER_ERR_INJECT_ACTION(PSERIES_DRCONF_MEM_ADD) }, | ||
17 | { NOTIFIER_ERR_INJECT_ACTION(PSERIES_DRCONF_MEM_REMOVE) }, | ||
18 | {} | ||
19 | } | ||
20 | }; | ||
21 | |||
22 | static struct dentry *dir; | ||
23 | |||
24 | static int err_inject_init(void) | ||
25 | { | ||
26 | int err; | ||
27 | |||
28 | dir = notifier_err_inject_init("pSeries-reconfig", | ||
29 | notifier_err_inject_dir, &reconfig_err_inject, priority); | ||
30 | if (IS_ERR(dir)) | ||
31 | return PTR_ERR(dir); | ||
32 | |||
33 | err = pSeries_reconfig_notifier_register(&reconfig_err_inject.nb); | ||
34 | if (err) | ||
35 | debugfs_remove_recursive(dir); | ||
36 | |||
37 | return err; | ||
38 | } | ||
39 | |||
40 | static void err_inject_exit(void) | ||
41 | { | ||
42 | pSeries_reconfig_notifier_unregister(&reconfig_err_inject.nb); | ||
43 | debugfs_remove_recursive(dir); | ||
44 | } | ||
45 | |||
46 | module_init(err_inject_init); | ||
47 | module_exit(err_inject_exit); | ||
48 | |||
49 | MODULE_DESCRIPTION("pSeries reconfig notifier error injection module"); | ||
50 | MODULE_LICENSE("GPL"); | ||
51 | MODULE_AUTHOR("Akinobu Mita <akinobu.mita@gmail.com>"); | ||
diff --git a/lib/parser.c b/lib/parser.c index dcbaaef6cf11..52cfa69f73df 100644 --- a/lib/parser.c +++ b/lib/parser.c | |||
@@ -6,7 +6,8 @@ | |||
6 | */ | 6 | */ |
7 | 7 | ||
8 | #include <linux/ctype.h> | 8 | #include <linux/ctype.h> |
9 | #include <linux/module.h> | 9 | #include <linux/types.h> |
10 | #include <linux/export.h> | ||
10 | #include <linux/parser.h> | 11 | #include <linux/parser.h> |
11 | #include <linux/slab.h> | 12 | #include <linux/slab.h> |
12 | #include <linux/string.h> | 13 | #include <linux/string.h> |
@@ -121,13 +122,14 @@ int match_token(char *s, const match_table_t table, substring_t args[]) | |||
121 | * | 122 | * |
122 | * Description: Given a &substring_t and a base, attempts to parse the substring | 123 | * Description: Given a &substring_t and a base, attempts to parse the substring |
123 | * as a number in that base. On success, sets @result to the integer represented | 124 | * as a number in that base. On success, sets @result to the integer represented |
124 | * by the string and returns 0. Returns either -ENOMEM or -EINVAL on failure. | 125 | * by the string and returns 0. Returns -ENOMEM, -EINVAL, or -ERANGE on failure. |
125 | */ | 126 | */ |
126 | static int match_number(substring_t *s, int *result, int base) | 127 | static int match_number(substring_t *s, int *result, int base) |
127 | { | 128 | { |
128 | char *endp; | 129 | char *endp; |
129 | char *buf; | 130 | char *buf; |
130 | int ret; | 131 | int ret; |
132 | long val; | ||
131 | size_t len = s->to - s->from; | 133 | size_t len = s->to - s->from; |
132 | 134 | ||
133 | buf = kmalloc(len + 1, GFP_KERNEL); | 135 | buf = kmalloc(len + 1, GFP_KERNEL); |
@@ -135,10 +137,15 @@ static int match_number(substring_t *s, int *result, int base) | |||
135 | return -ENOMEM; | 137 | return -ENOMEM; |
136 | memcpy(buf, s->from, len); | 138 | memcpy(buf, s->from, len); |
137 | buf[len] = '\0'; | 139 | buf[len] = '\0'; |
138 | *result = simple_strtol(buf, &endp, base); | 140 | |
139 | ret = 0; | 141 | ret = 0; |
142 | val = simple_strtol(buf, &endp, base); | ||
140 | if (endp == buf) | 143 | if (endp == buf) |
141 | ret = -EINVAL; | 144 | ret = -EINVAL; |
145 | else if (val < (long)INT_MIN || val > (long)INT_MAX) | ||
146 | ret = -ERANGE; | ||
147 | else | ||
148 | *result = (int) val; | ||
142 | kfree(buf); | 149 | kfree(buf); |
143 | return ret; | 150 | return ret; |
144 | } | 151 | } |
diff --git a/lib/pci_iomap.c b/lib/pci_iomap.c new file mode 100644 index 000000000000..0d83ea8a9605 --- /dev/null +++ b/lib/pci_iomap.c | |||
@@ -0,0 +1,48 @@ | |||
1 | /* | ||
2 | * Implement the default iomap interfaces | ||
3 | * | ||
4 | * (C) Copyright 2004 Linus Torvalds | ||
5 | */ | ||
6 | #include <linux/pci.h> | ||
7 | #include <linux/io.h> | ||
8 | |||
9 | #include <linux/export.h> | ||
10 | |||
11 | #ifdef CONFIG_PCI | ||
12 | /** | ||
13 | * pci_iomap - create a virtual mapping cookie for a PCI BAR | ||
14 | * @dev: PCI device that owns the BAR | ||
15 | * @bar: BAR number | ||
16 | * @maxlen: length of the memory to map | ||
17 | * | ||
18 | * Using this function you will get a __iomem address to your device BAR. | ||
19 | * You can access it using ioread*() and iowrite*(). These functions hide | ||
20 | * the details if this is a MMIO or PIO address space and will just do what | ||
21 | * you expect from them in the correct way. | ||
22 | * | ||
23 | * @maxlen specifies the maximum length to map. If you want to get access to | ||
24 | * the complete BAR without checking for its length first, pass %0 here. | ||
25 | * */ | ||
26 | void __iomem *pci_iomap(struct pci_dev *dev, int bar, unsigned long maxlen) | ||
27 | { | ||
28 | resource_size_t start = pci_resource_start(dev, bar); | ||
29 | resource_size_t len = pci_resource_len(dev, bar); | ||
30 | unsigned long flags = pci_resource_flags(dev, bar); | ||
31 | |||
32 | if (!len || !start) | ||
33 | return NULL; | ||
34 | if (maxlen && len > maxlen) | ||
35 | len = maxlen; | ||
36 | if (flags & IORESOURCE_IO) | ||
37 | return __pci_ioport_map(dev, start, len); | ||
38 | if (flags & IORESOURCE_MEM) { | ||
39 | if (flags & IORESOURCE_CACHEABLE) | ||
40 | return ioremap(start, len); | ||
41 | return ioremap_nocache(start, len); | ||
42 | } | ||
43 | /* What? */ | ||
44 | return NULL; | ||
45 | } | ||
46 | |||
47 | EXPORT_SYMBOL(pci_iomap); | ||
48 | #endif /* CONFIG_PCI */ | ||
diff --git a/lib/percpu_counter.c b/lib/percpu_counter.c index 28f2c33c6b53..ba6085d9c741 100644 --- a/lib/percpu_counter.c +++ b/lib/percpu_counter.c | |||
@@ -10,8 +10,10 @@ | |||
10 | #include <linux/module.h> | 10 | #include <linux/module.h> |
11 | #include <linux/debugobjects.h> | 11 | #include <linux/debugobjects.h> |
12 | 12 | ||
13 | #ifdef CONFIG_HOTPLUG_CPU | ||
13 | static LIST_HEAD(percpu_counters); | 14 | static LIST_HEAD(percpu_counters); |
14 | static DEFINE_MUTEX(percpu_counters_lock); | 15 | static DEFINE_SPINLOCK(percpu_counters_lock); |
16 | #endif | ||
15 | 17 | ||
16 | #ifdef CONFIG_DEBUG_OBJECTS_PERCPU_COUNTER | 18 | #ifdef CONFIG_DEBUG_OBJECTS_PERCPU_COUNTER |
17 | 19 | ||
@@ -59,13 +61,13 @@ void percpu_counter_set(struct percpu_counter *fbc, s64 amount) | |||
59 | { | 61 | { |
60 | int cpu; | 62 | int cpu; |
61 | 63 | ||
62 | spin_lock(&fbc->lock); | 64 | raw_spin_lock(&fbc->lock); |
63 | for_each_possible_cpu(cpu) { | 65 | for_each_possible_cpu(cpu) { |
64 | s32 *pcount = per_cpu_ptr(fbc->counters, cpu); | 66 | s32 *pcount = per_cpu_ptr(fbc->counters, cpu); |
65 | *pcount = 0; | 67 | *pcount = 0; |
66 | } | 68 | } |
67 | fbc->count = amount; | 69 | fbc->count = amount; |
68 | spin_unlock(&fbc->lock); | 70 | raw_spin_unlock(&fbc->lock); |
69 | } | 71 | } |
70 | EXPORT_SYMBOL(percpu_counter_set); | 72 | EXPORT_SYMBOL(percpu_counter_set); |
71 | 73 | ||
@@ -76,10 +78,10 @@ void __percpu_counter_add(struct percpu_counter *fbc, s64 amount, s32 batch) | |||
76 | preempt_disable(); | 78 | preempt_disable(); |
77 | count = __this_cpu_read(*fbc->counters) + amount; | 79 | count = __this_cpu_read(*fbc->counters) + amount; |
78 | if (count >= batch || count <= -batch) { | 80 | if (count >= batch || count <= -batch) { |
79 | spin_lock(&fbc->lock); | 81 | raw_spin_lock(&fbc->lock); |
80 | fbc->count += count; | 82 | fbc->count += count; |
81 | __this_cpu_write(*fbc->counters, 0); | 83 | __this_cpu_write(*fbc->counters, 0); |
82 | spin_unlock(&fbc->lock); | 84 | raw_spin_unlock(&fbc->lock); |
83 | } else { | 85 | } else { |
84 | __this_cpu_write(*fbc->counters, count); | 86 | __this_cpu_write(*fbc->counters, count); |
85 | } | 87 | } |
@@ -96,13 +98,13 @@ s64 __percpu_counter_sum(struct percpu_counter *fbc) | |||
96 | s64 ret; | 98 | s64 ret; |
97 | int cpu; | 99 | int cpu; |
98 | 100 | ||
99 | spin_lock(&fbc->lock); | 101 | raw_spin_lock(&fbc->lock); |
100 | ret = fbc->count; | 102 | ret = fbc->count; |
101 | for_each_online_cpu(cpu) { | 103 | for_each_online_cpu(cpu) { |
102 | s32 *pcount = per_cpu_ptr(fbc->counters, cpu); | 104 | s32 *pcount = per_cpu_ptr(fbc->counters, cpu); |
103 | ret += *pcount; | 105 | ret += *pcount; |
104 | } | 106 | } |
105 | spin_unlock(&fbc->lock); | 107 | raw_spin_unlock(&fbc->lock); |
106 | return ret; | 108 | return ret; |
107 | } | 109 | } |
108 | EXPORT_SYMBOL(__percpu_counter_sum); | 110 | EXPORT_SYMBOL(__percpu_counter_sum); |
@@ -110,7 +112,7 @@ EXPORT_SYMBOL(__percpu_counter_sum); | |||
110 | int __percpu_counter_init(struct percpu_counter *fbc, s64 amount, | 112 | int __percpu_counter_init(struct percpu_counter *fbc, s64 amount, |
111 | struct lock_class_key *key) | 113 | struct lock_class_key *key) |
112 | { | 114 | { |
113 | spin_lock_init(&fbc->lock); | 115 | raw_spin_lock_init(&fbc->lock); |
114 | lockdep_set_class(&fbc->lock, key); | 116 | lockdep_set_class(&fbc->lock, key); |
115 | fbc->count = amount; | 117 | fbc->count = amount; |
116 | fbc->counters = alloc_percpu(s32); | 118 | fbc->counters = alloc_percpu(s32); |
@@ -121,9 +123,9 @@ int __percpu_counter_init(struct percpu_counter *fbc, s64 amount, | |||
121 | 123 | ||
122 | #ifdef CONFIG_HOTPLUG_CPU | 124 | #ifdef CONFIG_HOTPLUG_CPU |
123 | INIT_LIST_HEAD(&fbc->list); | 125 | INIT_LIST_HEAD(&fbc->list); |
124 | mutex_lock(&percpu_counters_lock); | 126 | spin_lock(&percpu_counters_lock); |
125 | list_add(&fbc->list, &percpu_counters); | 127 | list_add(&fbc->list, &percpu_counters); |
126 | mutex_unlock(&percpu_counters_lock); | 128 | spin_unlock(&percpu_counters_lock); |
127 | #endif | 129 | #endif |
128 | return 0; | 130 | return 0; |
129 | } | 131 | } |
@@ -137,9 +139,9 @@ void percpu_counter_destroy(struct percpu_counter *fbc) | |||
137 | debug_percpu_counter_deactivate(fbc); | 139 | debug_percpu_counter_deactivate(fbc); |
138 | 140 | ||
139 | #ifdef CONFIG_HOTPLUG_CPU | 141 | #ifdef CONFIG_HOTPLUG_CPU |
140 | mutex_lock(&percpu_counters_lock); | 142 | spin_lock(&percpu_counters_lock); |
141 | list_del(&fbc->list); | 143 | list_del(&fbc->list); |
142 | mutex_unlock(&percpu_counters_lock); | 144 | spin_unlock(&percpu_counters_lock); |
143 | #endif | 145 | #endif |
144 | free_percpu(fbc->counters); | 146 | free_percpu(fbc->counters); |
145 | fbc->counters = NULL; | 147 | fbc->counters = NULL; |
@@ -168,18 +170,18 @@ static int __cpuinit percpu_counter_hotcpu_callback(struct notifier_block *nb, | |||
168 | return NOTIFY_OK; | 170 | return NOTIFY_OK; |
169 | 171 | ||
170 | cpu = (unsigned long)hcpu; | 172 | cpu = (unsigned long)hcpu; |
171 | mutex_lock(&percpu_counters_lock); | 173 | spin_lock(&percpu_counters_lock); |
172 | list_for_each_entry(fbc, &percpu_counters, list) { | 174 | list_for_each_entry(fbc, &percpu_counters, list) { |
173 | s32 *pcount; | 175 | s32 *pcount; |
174 | unsigned long flags; | 176 | unsigned long flags; |
175 | 177 | ||
176 | spin_lock_irqsave(&fbc->lock, flags); | 178 | raw_spin_lock_irqsave(&fbc->lock, flags); |
177 | pcount = per_cpu_ptr(fbc->counters, cpu); | 179 | pcount = per_cpu_ptr(fbc->counters, cpu); |
178 | fbc->count += *pcount; | 180 | fbc->count += *pcount; |
179 | *pcount = 0; | 181 | *pcount = 0; |
180 | spin_unlock_irqrestore(&fbc->lock, flags); | 182 | raw_spin_unlock_irqrestore(&fbc->lock, flags); |
181 | } | 183 | } |
182 | mutex_unlock(&percpu_counters_lock); | 184 | spin_unlock(&percpu_counters_lock); |
183 | #endif | 185 | #endif |
184 | return NOTIFY_OK; | 186 | return NOTIFY_OK; |
185 | } | 187 | } |
diff --git a/lib/plist.c b/lib/plist.c index a0a4da489c22..1ebc95f7a46f 100644 --- a/lib/plist.c +++ b/lib/plist.c | |||
@@ -23,6 +23,7 @@ | |||
23 | * information. | 23 | * information. |
24 | */ | 24 | */ |
25 | 25 | ||
26 | #include <linux/bug.h> | ||
26 | #include <linux/plist.h> | 27 | #include <linux/plist.h> |
27 | #include <linux/spinlock.h> | 28 | #include <linux/spinlock.h> |
28 | 29 | ||
@@ -174,7 +175,7 @@ static int __init plist_test(void) | |||
174 | int nr_expect = 0, i, loop; | 175 | int nr_expect = 0, i, loop; |
175 | unsigned int r = local_clock(); | 176 | unsigned int r = local_clock(); |
176 | 177 | ||
177 | printk(KERN_INFO "start plist test\n"); | 178 | pr_debug("start plist test\n"); |
178 | plist_head_init(&test_head); | 179 | plist_head_init(&test_head); |
179 | for (i = 0; i < ARRAY_SIZE(test_node); i++) | 180 | for (i = 0; i < ARRAY_SIZE(test_node); i++) |
180 | plist_node_init(test_node + i, 0); | 181 | plist_node_init(test_node + i, 0); |
@@ -202,7 +203,7 @@ static int __init plist_test(void) | |||
202 | plist_test_check(nr_expect); | 203 | plist_test_check(nr_expect); |
203 | } | 204 | } |
204 | 205 | ||
205 | printk(KERN_INFO "end plist test\n"); | 206 | pr_debug("end plist test\n"); |
206 | return 0; | 207 | return 0; |
207 | } | 208 | } |
208 | 209 | ||
diff --git a/lib/pm-notifier-error-inject.c b/lib/pm-notifier-error-inject.c new file mode 100644 index 000000000000..c094b2dedc23 --- /dev/null +++ b/lib/pm-notifier-error-inject.c | |||
@@ -0,0 +1,49 @@ | |||
1 | #include <linux/kernel.h> | ||
2 | #include <linux/module.h> | ||
3 | #include <linux/suspend.h> | ||
4 | |||
5 | #include "notifier-error-inject.h" | ||
6 | |||
7 | static int priority; | ||
8 | module_param(priority, int, 0); | ||
9 | MODULE_PARM_DESC(priority, "specify PM notifier priority"); | ||
10 | |||
11 | static struct notifier_err_inject pm_notifier_err_inject = { | ||
12 | .actions = { | ||
13 | { NOTIFIER_ERR_INJECT_ACTION(PM_HIBERNATION_PREPARE) }, | ||
14 | { NOTIFIER_ERR_INJECT_ACTION(PM_SUSPEND_PREPARE) }, | ||
15 | { NOTIFIER_ERR_INJECT_ACTION(PM_RESTORE_PREPARE) }, | ||
16 | {} | ||
17 | } | ||
18 | }; | ||
19 | |||
20 | static struct dentry *dir; | ||
21 | |||
22 | static int err_inject_init(void) | ||
23 | { | ||
24 | int err; | ||
25 | |||
26 | dir = notifier_err_inject_init("pm", notifier_err_inject_dir, | ||
27 | &pm_notifier_err_inject, priority); | ||
28 | if (IS_ERR(dir)) | ||
29 | return PTR_ERR(dir); | ||
30 | |||
31 | err = register_pm_notifier(&pm_notifier_err_inject.nb); | ||
32 | if (err) | ||
33 | debugfs_remove_recursive(dir); | ||
34 | |||
35 | return err; | ||
36 | } | ||
37 | |||
38 | static void err_inject_exit(void) | ||
39 | { | ||
40 | unregister_pm_notifier(&pm_notifier_err_inject.nb); | ||
41 | debugfs_remove_recursive(dir); | ||
42 | } | ||
43 | |||
44 | module_init(err_inject_init); | ||
45 | module_exit(err_inject_exit); | ||
46 | |||
47 | MODULE_DESCRIPTION("PM notifier error injection module"); | ||
48 | MODULE_LICENSE("GPL"); | ||
49 | MODULE_AUTHOR("Akinobu Mita <akinobu.mita@gmail.com>"); | ||
diff --git a/lib/prio_tree.c b/lib/prio_tree.c deleted file mode 100644 index ccfd850b0dec..000000000000 --- a/lib/prio_tree.c +++ /dev/null | |||
@@ -1,484 +0,0 @@ | |||
1 | /* | ||
2 | * lib/prio_tree.c - priority search tree | ||
3 | * | ||
4 | * Copyright (C) 2004, Rajesh Venkatasubramanian <vrajesh@umich.edu> | ||
5 | * | ||
6 | * This file is released under the GPL v2. | ||
7 | * | ||
8 | * Based on the radix priority search tree proposed by Edward M. McCreight | ||
9 | * SIAM Journal of Computing, vol. 14, no.2, pages 257-276, May 1985 | ||
10 | * | ||
11 | * 02Feb2004 Initial version | ||
12 | */ | ||
13 | |||
14 | #include <linux/init.h> | ||
15 | #include <linux/mm.h> | ||
16 | #include <linux/prio_tree.h> | ||
17 | |||
18 | /* | ||
19 | * A clever mix of heap and radix trees forms a radix priority search tree (PST) | ||
20 | * which is useful for storing intervals, e.g, we can consider a vma as a closed | ||
21 | * interval of file pages [offset_begin, offset_end], and store all vmas that | ||
22 | * map a file in a PST. Then, using the PST, we can answer a stabbing query, | ||
23 | * i.e., selecting a set of stored intervals (vmas) that overlap with (map) a | ||
24 | * given input interval X (a set of consecutive file pages), in "O(log n + m)" | ||
25 | * time where 'log n' is the height of the PST, and 'm' is the number of stored | ||
26 | * intervals (vmas) that overlap (map) with the input interval X (the set of | ||
27 | * consecutive file pages). | ||
28 | * | ||
29 | * In our implementation, we store closed intervals of the form [radix_index, | ||
30 | * heap_index]. We assume that always radix_index <= heap_index. McCreight's PST | ||
31 | * is designed for storing intervals with unique radix indices, i.e., each | ||
32 | * interval have different radix_index. However, this limitation can be easily | ||
33 | * overcome by using the size, i.e., heap_index - radix_index, as part of the | ||
34 | * index, so we index the tree using [(radix_index,size), heap_index]. | ||
35 | * | ||
36 | * When the above-mentioned indexing scheme is used, theoretically, in a 32 bit | ||
37 | * machine, the maximum height of a PST can be 64. We can use a balanced version | ||
38 | * of the priority search tree to optimize the tree height, but the balanced | ||
39 | * tree proposed by McCreight is too complex and memory-hungry for our purpose. | ||
40 | */ | ||
41 | |||
42 | /* | ||
43 | * The following macros are used for implementing prio_tree for i_mmap | ||
44 | */ | ||
45 | |||
46 | #define RADIX_INDEX(vma) ((vma)->vm_pgoff) | ||
47 | #define VMA_SIZE(vma) (((vma)->vm_end - (vma)->vm_start) >> PAGE_SHIFT) | ||
48 | /* avoid overflow */ | ||
49 | #define HEAP_INDEX(vma) ((vma)->vm_pgoff + (VMA_SIZE(vma) - 1)) | ||
50 | |||
51 | |||
52 | static void get_index(const struct prio_tree_root *root, | ||
53 | const struct prio_tree_node *node, | ||
54 | unsigned long *radix, unsigned long *heap) | ||
55 | { | ||
56 | if (root->raw) { | ||
57 | struct vm_area_struct *vma = prio_tree_entry( | ||
58 | node, struct vm_area_struct, shared.prio_tree_node); | ||
59 | |||
60 | *radix = RADIX_INDEX(vma); | ||
61 | *heap = HEAP_INDEX(vma); | ||
62 | } | ||
63 | else { | ||
64 | *radix = node->start; | ||
65 | *heap = node->last; | ||
66 | } | ||
67 | } | ||
68 | |||
69 | static unsigned long index_bits_to_maxindex[BITS_PER_LONG]; | ||
70 | |||
71 | void __init prio_tree_init(void) | ||
72 | { | ||
73 | unsigned int i; | ||
74 | |||
75 | for (i = 0; i < ARRAY_SIZE(index_bits_to_maxindex) - 1; i++) | ||
76 | index_bits_to_maxindex[i] = (1UL << (i + 1)) - 1; | ||
77 | index_bits_to_maxindex[ARRAY_SIZE(index_bits_to_maxindex) - 1] = ~0UL; | ||
78 | } | ||
79 | |||
80 | /* | ||
81 | * Maximum heap_index that can be stored in a PST with index_bits bits | ||
82 | */ | ||
83 | static inline unsigned long prio_tree_maxindex(unsigned int bits) | ||
84 | { | ||
85 | return index_bits_to_maxindex[bits - 1]; | ||
86 | } | ||
87 | |||
88 | /* | ||
89 | * Extend a priority search tree so that it can store a node with heap_index | ||
90 | * max_heap_index. In the worst case, this algorithm takes O((log n)^2). | ||
91 | * However, this function is used rarely and the common case performance is | ||
92 | * not bad. | ||
93 | */ | ||
94 | static struct prio_tree_node *prio_tree_expand(struct prio_tree_root *root, | ||
95 | struct prio_tree_node *node, unsigned long max_heap_index) | ||
96 | { | ||
97 | struct prio_tree_node *first = NULL, *prev, *last = NULL; | ||
98 | |||
99 | if (max_heap_index > prio_tree_maxindex(root->index_bits)) | ||
100 | root->index_bits++; | ||
101 | |||
102 | while (max_heap_index > prio_tree_maxindex(root->index_bits)) { | ||
103 | root->index_bits++; | ||
104 | |||
105 | if (prio_tree_empty(root)) | ||
106 | continue; | ||
107 | |||
108 | if (first == NULL) { | ||
109 | first = root->prio_tree_node; | ||
110 | prio_tree_remove(root, root->prio_tree_node); | ||
111 | INIT_PRIO_TREE_NODE(first); | ||
112 | last = first; | ||
113 | } else { | ||
114 | prev = last; | ||
115 | last = root->prio_tree_node; | ||
116 | prio_tree_remove(root, root->prio_tree_node); | ||
117 | INIT_PRIO_TREE_NODE(last); | ||
118 | prev->left = last; | ||
119 | last->parent = prev; | ||
120 | } | ||
121 | } | ||
122 | |||
123 | INIT_PRIO_TREE_NODE(node); | ||
124 | |||
125 | if (first) { | ||
126 | node->left = first; | ||
127 | first->parent = node; | ||
128 | } else | ||
129 | last = node; | ||
130 | |||
131 | if (!prio_tree_empty(root)) { | ||
132 | last->left = root->prio_tree_node; | ||
133 | last->left->parent = last; | ||
134 | } | ||
135 | |||
136 | root->prio_tree_node = node; | ||
137 | return node; | ||
138 | } | ||
139 | |||
140 | /* | ||
141 | * Replace a prio_tree_node with a new node and return the old node | ||
142 | */ | ||
143 | struct prio_tree_node *prio_tree_replace(struct prio_tree_root *root, | ||
144 | struct prio_tree_node *old, struct prio_tree_node *node) | ||
145 | { | ||
146 | INIT_PRIO_TREE_NODE(node); | ||
147 | |||
148 | if (prio_tree_root(old)) { | ||
149 | BUG_ON(root->prio_tree_node != old); | ||
150 | /* | ||
151 | * We can reduce root->index_bits here. However, it is complex | ||
152 | * and does not help much to improve performance (IMO). | ||
153 | */ | ||
154 | node->parent = node; | ||
155 | root->prio_tree_node = node; | ||
156 | } else { | ||
157 | node->parent = old->parent; | ||
158 | if (old->parent->left == old) | ||
159 | old->parent->left = node; | ||
160 | else | ||
161 | old->parent->right = node; | ||
162 | } | ||
163 | |||
164 | if (!prio_tree_left_empty(old)) { | ||
165 | node->left = old->left; | ||
166 | old->left->parent = node; | ||
167 | } | ||
168 | |||
169 | if (!prio_tree_right_empty(old)) { | ||
170 | node->right = old->right; | ||
171 | old->right->parent = node; | ||
172 | } | ||
173 | |||
174 | return old; | ||
175 | } | ||
176 | |||
177 | /* | ||
178 | * Insert a prio_tree_node @node into a radix priority search tree @root. The | ||
179 | * algorithm typically takes O(log n) time where 'log n' is the number of bits | ||
180 | * required to represent the maximum heap_index. In the worst case, the algo | ||
181 | * can take O((log n)^2) - check prio_tree_expand. | ||
182 | * | ||
183 | * If a prior node with same radix_index and heap_index is already found in | ||
184 | * the tree, then returns the address of the prior node. Otherwise, inserts | ||
185 | * @node into the tree and returns @node. | ||
186 | */ | ||
187 | struct prio_tree_node *prio_tree_insert(struct prio_tree_root *root, | ||
188 | struct prio_tree_node *node) | ||
189 | { | ||
190 | struct prio_tree_node *cur, *res = node; | ||
191 | unsigned long radix_index, heap_index; | ||
192 | unsigned long r_index, h_index, index, mask; | ||
193 | int size_flag = 0; | ||
194 | |||
195 | get_index(root, node, &radix_index, &heap_index); | ||
196 | |||
197 | if (prio_tree_empty(root) || | ||
198 | heap_index > prio_tree_maxindex(root->index_bits)) | ||
199 | return prio_tree_expand(root, node, heap_index); | ||
200 | |||
201 | cur = root->prio_tree_node; | ||
202 | mask = 1UL << (root->index_bits - 1); | ||
203 | |||
204 | while (mask) { | ||
205 | get_index(root, cur, &r_index, &h_index); | ||
206 | |||
207 | if (r_index == radix_index && h_index == heap_index) | ||
208 | return cur; | ||
209 | |||
210 | if (h_index < heap_index || | ||
211 | (h_index == heap_index && r_index > radix_index)) { | ||
212 | struct prio_tree_node *tmp = node; | ||
213 | node = prio_tree_replace(root, cur, node); | ||
214 | cur = tmp; | ||
215 | /* swap indices */ | ||
216 | index = r_index; | ||
217 | r_index = radix_index; | ||
218 | radix_index = index; | ||
219 | index = h_index; | ||
220 | h_index = heap_index; | ||
221 | heap_index = index; | ||
222 | } | ||
223 | |||
224 | if (size_flag) | ||
225 | index = heap_index - radix_index; | ||
226 | else | ||
227 | index = radix_index; | ||
228 | |||
229 | if (index & mask) { | ||
230 | if (prio_tree_right_empty(cur)) { | ||
231 | INIT_PRIO_TREE_NODE(node); | ||
232 | cur->right = node; | ||
233 | node->parent = cur; | ||
234 | return res; | ||
235 | } else | ||
236 | cur = cur->right; | ||
237 | } else { | ||
238 | if (prio_tree_left_empty(cur)) { | ||
239 | INIT_PRIO_TREE_NODE(node); | ||
240 | cur->left = node; | ||
241 | node->parent = cur; | ||
242 | return res; | ||
243 | } else | ||
244 | cur = cur->left; | ||
245 | } | ||
246 | |||
247 | mask >>= 1; | ||
248 | |||
249 | if (!mask) { | ||
250 | mask = 1UL << (BITS_PER_LONG - 1); | ||
251 | size_flag = 1; | ||
252 | } | ||
253 | } | ||
254 | /* Should not reach here */ | ||
255 | BUG(); | ||
256 | return NULL; | ||
257 | } | ||
258 | |||
259 | /* | ||
260 | * Remove a prio_tree_node @node from a radix priority search tree @root. The | ||
261 | * algorithm takes O(log n) time where 'log n' is the number of bits required | ||
262 | * to represent the maximum heap_index. | ||
263 | */ | ||
264 | void prio_tree_remove(struct prio_tree_root *root, struct prio_tree_node *node) | ||
265 | { | ||
266 | struct prio_tree_node *cur; | ||
267 | unsigned long r_index, h_index_right, h_index_left; | ||
268 | |||
269 | cur = node; | ||
270 | |||
271 | while (!prio_tree_left_empty(cur) || !prio_tree_right_empty(cur)) { | ||
272 | if (!prio_tree_left_empty(cur)) | ||
273 | get_index(root, cur->left, &r_index, &h_index_left); | ||
274 | else { | ||
275 | cur = cur->right; | ||
276 | continue; | ||
277 | } | ||
278 | |||
279 | if (!prio_tree_right_empty(cur)) | ||
280 | get_index(root, cur->right, &r_index, &h_index_right); | ||
281 | else { | ||
282 | cur = cur->left; | ||
283 | continue; | ||
284 | } | ||
285 | |||
286 | /* both h_index_left and h_index_right cannot be 0 */ | ||
287 | if (h_index_left >= h_index_right) | ||
288 | cur = cur->left; | ||
289 | else | ||
290 | cur = cur->right; | ||
291 | } | ||
292 | |||
293 | if (prio_tree_root(cur)) { | ||
294 | BUG_ON(root->prio_tree_node != cur); | ||
295 | __INIT_PRIO_TREE_ROOT(root, root->raw); | ||
296 | return; | ||
297 | } | ||
298 | |||
299 | if (cur->parent->right == cur) | ||
300 | cur->parent->right = cur->parent; | ||
301 | else | ||
302 | cur->parent->left = cur->parent; | ||
303 | |||
304 | while (cur != node) | ||
305 | cur = prio_tree_replace(root, cur->parent, cur); | ||
306 | } | ||
307 | |||
308 | /* | ||
309 | * Following functions help to enumerate all prio_tree_nodes in the tree that | ||
310 | * overlap with the input interval X [radix_index, heap_index]. The enumeration | ||
311 | * takes O(log n + m) time where 'log n' is the height of the tree (which is | ||
312 | * proportional to # of bits required to represent the maximum heap_index) and | ||
313 | * 'm' is the number of prio_tree_nodes that overlap the interval X. | ||
314 | */ | ||
315 | |||
316 | static struct prio_tree_node *prio_tree_left(struct prio_tree_iter *iter, | ||
317 | unsigned long *r_index, unsigned long *h_index) | ||
318 | { | ||
319 | if (prio_tree_left_empty(iter->cur)) | ||
320 | return NULL; | ||
321 | |||
322 | get_index(iter->root, iter->cur->left, r_index, h_index); | ||
323 | |||
324 | if (iter->r_index <= *h_index) { | ||
325 | iter->cur = iter->cur->left; | ||
326 | iter->mask >>= 1; | ||
327 | if (iter->mask) { | ||
328 | if (iter->size_level) | ||
329 | iter->size_level++; | ||
330 | } else { | ||
331 | if (iter->size_level) { | ||
332 | BUG_ON(!prio_tree_left_empty(iter->cur)); | ||
333 | BUG_ON(!prio_tree_right_empty(iter->cur)); | ||
334 | iter->size_level++; | ||
335 | iter->mask = ULONG_MAX; | ||
336 | } else { | ||
337 | iter->size_level = 1; | ||
338 | iter->mask = 1UL << (BITS_PER_LONG - 1); | ||
339 | } | ||
340 | } | ||
341 | return iter->cur; | ||
342 | } | ||
343 | |||
344 | return NULL; | ||
345 | } | ||
346 | |||
347 | static struct prio_tree_node *prio_tree_right(struct prio_tree_iter *iter, | ||
348 | unsigned long *r_index, unsigned long *h_index) | ||
349 | { | ||
350 | unsigned long value; | ||
351 | |||
352 | if (prio_tree_right_empty(iter->cur)) | ||
353 | return NULL; | ||
354 | |||
355 | if (iter->size_level) | ||
356 | value = iter->value; | ||
357 | else | ||
358 | value = iter->value | iter->mask; | ||
359 | |||
360 | if (iter->h_index < value) | ||
361 | return NULL; | ||
362 | |||
363 | get_index(iter->root, iter->cur->right, r_index, h_index); | ||
364 | |||
365 | if (iter->r_index <= *h_index) { | ||
366 | iter->cur = iter->cur->right; | ||
367 | iter->mask >>= 1; | ||
368 | iter->value = value; | ||
369 | if (iter->mask) { | ||
370 | if (iter->size_level) | ||
371 | iter->size_level++; | ||
372 | } else { | ||
373 | if (iter->size_level) { | ||
374 | BUG_ON(!prio_tree_left_empty(iter->cur)); | ||
375 | BUG_ON(!prio_tree_right_empty(iter->cur)); | ||
376 | iter->size_level++; | ||
377 | iter->mask = ULONG_MAX; | ||
378 | } else { | ||
379 | iter->size_level = 1; | ||
380 | iter->mask = 1UL << (BITS_PER_LONG - 1); | ||
381 | } | ||
382 | } | ||
383 | return iter->cur; | ||
384 | } | ||
385 | |||
386 | return NULL; | ||
387 | } | ||
388 | |||
389 | static struct prio_tree_node *prio_tree_parent(struct prio_tree_iter *iter) | ||
390 | { | ||
391 | iter->cur = iter->cur->parent; | ||
392 | if (iter->mask == ULONG_MAX) | ||
393 | iter->mask = 1UL; | ||
394 | else if (iter->size_level == 1) | ||
395 | iter->mask = 1UL; | ||
396 | else | ||
397 | iter->mask <<= 1; | ||
398 | if (iter->size_level) | ||
399 | iter->size_level--; | ||
400 | if (!iter->size_level && (iter->value & iter->mask)) | ||
401 | iter->value ^= iter->mask; | ||
402 | return iter->cur; | ||
403 | } | ||
404 | |||
405 | static inline int overlap(struct prio_tree_iter *iter, | ||
406 | unsigned long r_index, unsigned long h_index) | ||
407 | { | ||
408 | return iter->h_index >= r_index && iter->r_index <= h_index; | ||
409 | } | ||
410 | |||
411 | /* | ||
412 | * prio_tree_first: | ||
413 | * | ||
414 | * Get the first prio_tree_node that overlaps with the interval [radix_index, | ||
415 | * heap_index]. Note that always radix_index <= heap_index. We do a pre-order | ||
416 | * traversal of the tree. | ||
417 | */ | ||
418 | static struct prio_tree_node *prio_tree_first(struct prio_tree_iter *iter) | ||
419 | { | ||
420 | struct prio_tree_root *root; | ||
421 | unsigned long r_index, h_index; | ||
422 | |||
423 | INIT_PRIO_TREE_ITER(iter); | ||
424 | |||
425 | root = iter->root; | ||
426 | if (prio_tree_empty(root)) | ||
427 | return NULL; | ||
428 | |||
429 | get_index(root, root->prio_tree_node, &r_index, &h_index); | ||
430 | |||
431 | if (iter->r_index > h_index) | ||
432 | return NULL; | ||
433 | |||
434 | iter->mask = 1UL << (root->index_bits - 1); | ||
435 | iter->cur = root->prio_tree_node; | ||
436 | |||
437 | while (1) { | ||
438 | if (overlap(iter, r_index, h_index)) | ||
439 | return iter->cur; | ||
440 | |||
441 | if (prio_tree_left(iter, &r_index, &h_index)) | ||
442 | continue; | ||
443 | |||
444 | if (prio_tree_right(iter, &r_index, &h_index)) | ||
445 | continue; | ||
446 | |||
447 | break; | ||
448 | } | ||
449 | return NULL; | ||
450 | } | ||
451 | |||
452 | /* | ||
453 | * prio_tree_next: | ||
454 | * | ||
455 | * Get the next prio_tree_node that overlaps with the input interval in iter | ||
456 | */ | ||
457 | struct prio_tree_node *prio_tree_next(struct prio_tree_iter *iter) | ||
458 | { | ||
459 | unsigned long r_index, h_index; | ||
460 | |||
461 | if (iter->cur == NULL) | ||
462 | return prio_tree_first(iter); | ||
463 | |||
464 | repeat: | ||
465 | while (prio_tree_left(iter, &r_index, &h_index)) | ||
466 | if (overlap(iter, r_index, h_index)) | ||
467 | return iter->cur; | ||
468 | |||
469 | while (!prio_tree_right(iter, &r_index, &h_index)) { | ||
470 | while (!prio_tree_root(iter->cur) && | ||
471 | iter->cur->parent->right == iter->cur) | ||
472 | prio_tree_parent(iter); | ||
473 | |||
474 | if (prio_tree_root(iter->cur)) | ||
475 | return NULL; | ||
476 | |||
477 | prio_tree_parent(iter); | ||
478 | } | ||
479 | |||
480 | if (overlap(iter, r_index, h_index)) | ||
481 | return iter->cur; | ||
482 | |||
483 | goto repeat; | ||
484 | } | ||
diff --git a/lib/proportions.c b/lib/proportions.c index d50746a79de2..05df84801b56 100644 --- a/lib/proportions.c +++ b/lib/proportions.c | |||
@@ -190,7 +190,7 @@ prop_adjust_shift(int *pl_shift, unsigned long *pl_period, int new_shift) | |||
190 | 190 | ||
191 | int prop_local_init_percpu(struct prop_local_percpu *pl) | 191 | int prop_local_init_percpu(struct prop_local_percpu *pl) |
192 | { | 192 | { |
193 | spin_lock_init(&pl->lock); | 193 | raw_spin_lock_init(&pl->lock); |
194 | pl->shift = 0; | 194 | pl->shift = 0; |
195 | pl->period = 0; | 195 | pl->period = 0; |
196 | return percpu_counter_init(&pl->events, 0); | 196 | return percpu_counter_init(&pl->events, 0); |
@@ -226,7 +226,7 @@ void prop_norm_percpu(struct prop_global *pg, struct prop_local_percpu *pl) | |||
226 | if (pl->period == global_period) | 226 | if (pl->period == global_period) |
227 | return; | 227 | return; |
228 | 228 | ||
229 | spin_lock_irqsave(&pl->lock, flags); | 229 | raw_spin_lock_irqsave(&pl->lock, flags); |
230 | prop_adjust_shift(&pl->shift, &pl->period, pg->shift); | 230 | prop_adjust_shift(&pl->shift, &pl->period, pg->shift); |
231 | 231 | ||
232 | /* | 232 | /* |
@@ -247,7 +247,7 @@ void prop_norm_percpu(struct prop_global *pg, struct prop_local_percpu *pl) | |||
247 | percpu_counter_set(&pl->events, 0); | 247 | percpu_counter_set(&pl->events, 0); |
248 | 248 | ||
249 | pl->period = global_period; | 249 | pl->period = global_period; |
250 | spin_unlock_irqrestore(&pl->lock, flags); | 250 | raw_spin_unlock_irqrestore(&pl->lock, flags); |
251 | } | 251 | } |
252 | 252 | ||
253 | /* | 253 | /* |
@@ -324,7 +324,7 @@ void prop_fraction_percpu(struct prop_descriptor *pd, | |||
324 | 324 | ||
325 | int prop_local_init_single(struct prop_local_single *pl) | 325 | int prop_local_init_single(struct prop_local_single *pl) |
326 | { | 326 | { |
327 | spin_lock_init(&pl->lock); | 327 | raw_spin_lock_init(&pl->lock); |
328 | pl->shift = 0; | 328 | pl->shift = 0; |
329 | pl->period = 0; | 329 | pl->period = 0; |
330 | pl->events = 0; | 330 | pl->events = 0; |
@@ -356,7 +356,7 @@ void prop_norm_single(struct prop_global *pg, struct prop_local_single *pl) | |||
356 | if (pl->period == global_period) | 356 | if (pl->period == global_period) |
357 | return; | 357 | return; |
358 | 358 | ||
359 | spin_lock_irqsave(&pl->lock, flags); | 359 | raw_spin_lock_irqsave(&pl->lock, flags); |
360 | prop_adjust_shift(&pl->shift, &pl->period, pg->shift); | 360 | prop_adjust_shift(&pl->shift, &pl->period, pg->shift); |
361 | /* | 361 | /* |
362 | * For each missed period, we half the local counter. | 362 | * For each missed period, we half the local counter. |
@@ -367,7 +367,7 @@ void prop_norm_single(struct prop_global *pg, struct prop_local_single *pl) | |||
367 | else | 367 | else |
368 | pl->events = 0; | 368 | pl->events = 0; |
369 | pl->period = global_period; | 369 | pl->period = global_period; |
370 | spin_unlock_irqrestore(&pl->lock, flags); | 370 | raw_spin_unlock_irqrestore(&pl->lock, flags); |
371 | } | 371 | } |
372 | 372 | ||
373 | /* | 373 | /* |
diff --git a/lib/radix-tree.c b/lib/radix-tree.c index a2f9da59c197..e7964296fd50 100644 --- a/lib/radix-tree.c +++ b/lib/radix-tree.c | |||
@@ -3,6 +3,7 @@ | |||
3 | * Portions Copyright (C) 2001 Christoph Hellwig | 3 | * Portions Copyright (C) 2001 Christoph Hellwig |
4 | * Copyright (C) 2005 SGI, Christoph Lameter | 4 | * Copyright (C) 2005 SGI, Christoph Lameter |
5 | * Copyright (C) 2006 Nick Piggin | 5 | * Copyright (C) 2006 Nick Piggin |
6 | * Copyright (C) 2012 Konstantin Khlebnikov | ||
6 | * | 7 | * |
7 | * This program is free software; you can redistribute it and/or | 8 | * This program is free software; you can redistribute it and/or |
8 | * modify it under the terms of the GNU General Public License as | 9 | * modify it under the terms of the GNU General Public License as |
@@ -22,7 +23,7 @@ | |||
22 | #include <linux/errno.h> | 23 | #include <linux/errno.h> |
23 | #include <linux/init.h> | 24 | #include <linux/init.h> |
24 | #include <linux/kernel.h> | 25 | #include <linux/kernel.h> |
25 | #include <linux/module.h> | 26 | #include <linux/export.h> |
26 | #include <linux/radix-tree.h> | 27 | #include <linux/radix-tree.h> |
27 | #include <linux/percpu.h> | 28 | #include <linux/percpu.h> |
28 | #include <linux/slab.h> | 29 | #include <linux/slab.h> |
@@ -48,16 +49,14 @@ | |||
48 | struct radix_tree_node { | 49 | struct radix_tree_node { |
49 | unsigned int height; /* Height from the bottom */ | 50 | unsigned int height; /* Height from the bottom */ |
50 | unsigned int count; | 51 | unsigned int count; |
51 | struct rcu_head rcu_head; | 52 | union { |
53 | struct radix_tree_node *parent; /* Used when ascending tree */ | ||
54 | struct rcu_head rcu_head; /* Used when freeing node */ | ||
55 | }; | ||
52 | void __rcu *slots[RADIX_TREE_MAP_SIZE]; | 56 | void __rcu *slots[RADIX_TREE_MAP_SIZE]; |
53 | unsigned long tags[RADIX_TREE_MAX_TAGS][RADIX_TREE_TAG_LONGS]; | 57 | unsigned long tags[RADIX_TREE_MAX_TAGS][RADIX_TREE_TAG_LONGS]; |
54 | }; | 58 | }; |
55 | 59 | ||
56 | struct radix_tree_path { | ||
57 | struct radix_tree_node *node; | ||
58 | int offset; | ||
59 | }; | ||
60 | |||
61 | #define RADIX_TREE_INDEX_BITS (8 /* CHAR_BIT */ * sizeof(unsigned long)) | 60 | #define RADIX_TREE_INDEX_BITS (8 /* CHAR_BIT */ * sizeof(unsigned long)) |
62 | #define RADIX_TREE_MAX_PATH (DIV_ROUND_UP(RADIX_TREE_INDEX_BITS, \ | 61 | #define RADIX_TREE_MAX_PATH (DIV_ROUND_UP(RADIX_TREE_INDEX_BITS, \ |
63 | RADIX_TREE_MAP_SHIFT)) | 62 | RADIX_TREE_MAP_SHIFT)) |
@@ -74,11 +73,24 @@ static unsigned long height_to_maxindex[RADIX_TREE_MAX_PATH + 1] __read_mostly; | |||
74 | static struct kmem_cache *radix_tree_node_cachep; | 73 | static struct kmem_cache *radix_tree_node_cachep; |
75 | 74 | ||
76 | /* | 75 | /* |
76 | * The radix tree is variable-height, so an insert operation not only has | ||
77 | * to build the branch to its corresponding item, it also has to build the | ||
78 | * branch to existing items if the size has to be increased (by | ||
79 | * radix_tree_extend). | ||
80 | * | ||
81 | * The worst case is a zero height tree with just a single item at index 0, | ||
82 | * and then inserting an item at index ULONG_MAX. This requires 2 new branches | ||
83 | * of RADIX_TREE_MAX_PATH size to be created, with only the root node shared. | ||
84 | * Hence: | ||
85 | */ | ||
86 | #define RADIX_TREE_PRELOAD_SIZE (RADIX_TREE_MAX_PATH * 2 - 1) | ||
87 | |||
88 | /* | ||
77 | * Per-cpu pool of preloaded nodes | 89 | * Per-cpu pool of preloaded nodes |
78 | */ | 90 | */ |
79 | struct radix_tree_preload { | 91 | struct radix_tree_preload { |
80 | int nr; | 92 | int nr; |
81 | struct radix_tree_node *nodes[RADIX_TREE_MAX_PATH]; | 93 | struct radix_tree_node *nodes[RADIX_TREE_PRELOAD_SIZE]; |
82 | }; | 94 | }; |
83 | static DEFINE_PER_CPU(struct radix_tree_preload, radix_tree_preloads) = { 0, }; | 95 | static DEFINE_PER_CPU(struct radix_tree_preload, radix_tree_preloads) = { 0, }; |
84 | 96 | ||
@@ -148,6 +160,43 @@ static inline int any_tag_set(struct radix_tree_node *node, unsigned int tag) | |||
148 | } | 160 | } |
149 | return 0; | 161 | return 0; |
150 | } | 162 | } |
163 | |||
164 | /** | ||
165 | * radix_tree_find_next_bit - find the next set bit in a memory region | ||
166 | * | ||
167 | * @addr: The address to base the search on | ||
168 | * @size: The bitmap size in bits | ||
169 | * @offset: The bitnumber to start searching at | ||
170 | * | ||
171 | * Unrollable variant of find_next_bit() for constant size arrays. | ||
172 | * Tail bits starting from size to roundup(size, BITS_PER_LONG) must be zero. | ||
173 | * Returns next bit offset, or size if nothing found. | ||
174 | */ | ||
175 | static __always_inline unsigned long | ||
176 | radix_tree_find_next_bit(const unsigned long *addr, | ||
177 | unsigned long size, unsigned long offset) | ||
178 | { | ||
179 | if (!__builtin_constant_p(size)) | ||
180 | return find_next_bit(addr, size, offset); | ||
181 | |||
182 | if (offset < size) { | ||
183 | unsigned long tmp; | ||
184 | |||
185 | addr += offset / BITS_PER_LONG; | ||
186 | tmp = *addr >> (offset % BITS_PER_LONG); | ||
187 | if (tmp) | ||
188 | return __ffs(tmp) + offset; | ||
189 | offset = (offset + BITS_PER_LONG) & ~(BITS_PER_LONG - 1); | ||
190 | while (offset < size) { | ||
191 | tmp = *++addr; | ||
192 | if (tmp) | ||
193 | return __ffs(tmp) + offset; | ||
194 | offset += BITS_PER_LONG; | ||
195 | } | ||
196 | } | ||
197 | return size; | ||
198 | } | ||
199 | |||
151 | /* | 200 | /* |
152 | * This assumes that the caller has performed appropriate preallocation, and | 201 | * This assumes that the caller has performed appropriate preallocation, and |
153 | * that the caller has pinned this thread of control to the current CPU. | 202 | * that the caller has pinned this thread of control to the current CPU. |
@@ -256,6 +305,7 @@ static inline unsigned long radix_tree_maxindex(unsigned int height) | |||
256 | static int radix_tree_extend(struct radix_tree_root *root, unsigned long index) | 305 | static int radix_tree_extend(struct radix_tree_root *root, unsigned long index) |
257 | { | 306 | { |
258 | struct radix_tree_node *node; | 307 | struct radix_tree_node *node; |
308 | struct radix_tree_node *slot; | ||
259 | unsigned int height; | 309 | unsigned int height; |
260 | int tag; | 310 | int tag; |
261 | 311 | ||
@@ -274,18 +324,23 @@ static int radix_tree_extend(struct radix_tree_root *root, unsigned long index) | |||
274 | if (!(node = radix_tree_node_alloc(root))) | 324 | if (!(node = radix_tree_node_alloc(root))) |
275 | return -ENOMEM; | 325 | return -ENOMEM; |
276 | 326 | ||
277 | /* Increase the height. */ | ||
278 | node->slots[0] = indirect_to_ptr(root->rnode); | ||
279 | |||
280 | /* Propagate the aggregated tag info into the new root */ | 327 | /* Propagate the aggregated tag info into the new root */ |
281 | for (tag = 0; tag < RADIX_TREE_MAX_TAGS; tag++) { | 328 | for (tag = 0; tag < RADIX_TREE_MAX_TAGS; tag++) { |
282 | if (root_tag_get(root, tag)) | 329 | if (root_tag_get(root, tag)) |
283 | tag_set(node, tag, 0); | 330 | tag_set(node, tag, 0); |
284 | } | 331 | } |
285 | 332 | ||
333 | /* Increase the height. */ | ||
286 | newheight = root->height+1; | 334 | newheight = root->height+1; |
287 | node->height = newheight; | 335 | node->height = newheight; |
288 | node->count = 1; | 336 | node->count = 1; |
337 | node->parent = NULL; | ||
338 | slot = root->rnode; | ||
339 | if (newheight > 1) { | ||
340 | slot = indirect_to_ptr(slot); | ||
341 | slot->parent = node; | ||
342 | } | ||
343 | node->slots[0] = slot; | ||
289 | node = ptr_to_indirect(node); | 344 | node = ptr_to_indirect(node); |
290 | rcu_assign_pointer(root->rnode, node); | 345 | rcu_assign_pointer(root->rnode, node); |
291 | root->height = newheight; | 346 | root->height = newheight; |
@@ -331,6 +386,7 @@ int radix_tree_insert(struct radix_tree_root *root, | |||
331 | if (!(slot = radix_tree_node_alloc(root))) | 386 | if (!(slot = radix_tree_node_alloc(root))) |
332 | return -ENOMEM; | 387 | return -ENOMEM; |
333 | slot->height = height; | 388 | slot->height = height; |
389 | slot->parent = node; | ||
334 | if (node) { | 390 | if (node) { |
335 | rcu_assign_pointer(node->slots[offset], slot); | 391 | rcu_assign_pointer(node->slots[offset], slot); |
336 | node->count++; | 392 | node->count++; |
@@ -504,47 +560,41 @@ EXPORT_SYMBOL(radix_tree_tag_set); | |||
504 | void *radix_tree_tag_clear(struct radix_tree_root *root, | 560 | void *radix_tree_tag_clear(struct radix_tree_root *root, |
505 | unsigned long index, unsigned int tag) | 561 | unsigned long index, unsigned int tag) |
506 | { | 562 | { |
507 | /* | 563 | struct radix_tree_node *node = NULL; |
508 | * The radix tree path needs to be one longer than the maximum path | ||
509 | * since the "list" is null terminated. | ||
510 | */ | ||
511 | struct radix_tree_path path[RADIX_TREE_MAX_PATH + 1], *pathp = path; | ||
512 | struct radix_tree_node *slot = NULL; | 564 | struct radix_tree_node *slot = NULL; |
513 | unsigned int height, shift; | 565 | unsigned int height, shift; |
566 | int uninitialized_var(offset); | ||
514 | 567 | ||
515 | height = root->height; | 568 | height = root->height; |
516 | if (index > radix_tree_maxindex(height)) | 569 | if (index > radix_tree_maxindex(height)) |
517 | goto out; | 570 | goto out; |
518 | 571 | ||
519 | shift = (height - 1) * RADIX_TREE_MAP_SHIFT; | 572 | shift = height * RADIX_TREE_MAP_SHIFT; |
520 | pathp->node = NULL; | ||
521 | slot = indirect_to_ptr(root->rnode); | 573 | slot = indirect_to_ptr(root->rnode); |
522 | 574 | ||
523 | while (height > 0) { | 575 | while (shift) { |
524 | int offset; | ||
525 | |||
526 | if (slot == NULL) | 576 | if (slot == NULL) |
527 | goto out; | 577 | goto out; |
528 | 578 | ||
579 | shift -= RADIX_TREE_MAP_SHIFT; | ||
529 | offset = (index >> shift) & RADIX_TREE_MAP_MASK; | 580 | offset = (index >> shift) & RADIX_TREE_MAP_MASK; |
530 | pathp[1].offset = offset; | 581 | node = slot; |
531 | pathp[1].node = slot; | ||
532 | slot = slot->slots[offset]; | 582 | slot = slot->slots[offset]; |
533 | pathp++; | ||
534 | shift -= RADIX_TREE_MAP_SHIFT; | ||
535 | height--; | ||
536 | } | 583 | } |
537 | 584 | ||
538 | if (slot == NULL) | 585 | if (slot == NULL) |
539 | goto out; | 586 | goto out; |
540 | 587 | ||
541 | while (pathp->node) { | 588 | while (node) { |
542 | if (!tag_get(pathp->node, tag, pathp->offset)) | 589 | if (!tag_get(node, tag, offset)) |
543 | goto out; | 590 | goto out; |
544 | tag_clear(pathp->node, tag, pathp->offset); | 591 | tag_clear(node, tag, offset); |
545 | if (any_tag_set(pathp->node, tag)) | 592 | if (any_tag_set(node, tag)) |
546 | goto out; | 593 | goto out; |
547 | pathp--; | 594 | |
595 | index >>= RADIX_TREE_MAP_SHIFT; | ||
596 | offset = index & RADIX_TREE_MAP_MASK; | ||
597 | node = node->parent; | ||
548 | } | 598 | } |
549 | 599 | ||
550 | /* clear the root's tag bit */ | 600 | /* clear the root's tag bit */ |
@@ -576,7 +626,6 @@ int radix_tree_tag_get(struct radix_tree_root *root, | |||
576 | { | 626 | { |
577 | unsigned int height, shift; | 627 | unsigned int height, shift; |
578 | struct radix_tree_node *node; | 628 | struct radix_tree_node *node; |
579 | int saw_unset_tag = 0; | ||
580 | 629 | ||
581 | /* check the root's tag bit */ | 630 | /* check the root's tag bit */ |
582 | if (!root_tag_get(root, tag)) | 631 | if (!root_tag_get(root, tag)) |
@@ -603,15 +652,10 @@ int radix_tree_tag_get(struct radix_tree_root *root, | |||
603 | return 0; | 652 | return 0; |
604 | 653 | ||
605 | offset = (index >> shift) & RADIX_TREE_MAP_MASK; | 654 | offset = (index >> shift) & RADIX_TREE_MAP_MASK; |
606 | |||
607 | /* | ||
608 | * This is just a debug check. Later, we can bale as soon as | ||
609 | * we see an unset tag. | ||
610 | */ | ||
611 | if (!tag_get(node, tag, offset)) | 655 | if (!tag_get(node, tag, offset)) |
612 | saw_unset_tag = 1; | 656 | return 0; |
613 | if (height == 1) | 657 | if (height == 1) |
614 | return !!tag_get(node, tag, offset); | 658 | return 1; |
615 | node = rcu_dereference_raw(node->slots[offset]); | 659 | node = rcu_dereference_raw(node->slots[offset]); |
616 | shift -= RADIX_TREE_MAP_SHIFT; | 660 | shift -= RADIX_TREE_MAP_SHIFT; |
617 | height--; | 661 | height--; |
@@ -620,6 +664,122 @@ int radix_tree_tag_get(struct radix_tree_root *root, | |||
620 | EXPORT_SYMBOL(radix_tree_tag_get); | 664 | EXPORT_SYMBOL(radix_tree_tag_get); |
621 | 665 | ||
622 | /** | 666 | /** |
667 | * radix_tree_next_chunk - find next chunk of slots for iteration | ||
668 | * | ||
669 | * @root: radix tree root | ||
670 | * @iter: iterator state | ||
671 | * @flags: RADIX_TREE_ITER_* flags and tag index | ||
672 | * Returns: pointer to chunk first slot, or NULL if iteration is over | ||
673 | */ | ||
674 | void **radix_tree_next_chunk(struct radix_tree_root *root, | ||
675 | struct radix_tree_iter *iter, unsigned flags) | ||
676 | { | ||
677 | unsigned shift, tag = flags & RADIX_TREE_ITER_TAG_MASK; | ||
678 | struct radix_tree_node *rnode, *node; | ||
679 | unsigned long index, offset; | ||
680 | |||
681 | if ((flags & RADIX_TREE_ITER_TAGGED) && !root_tag_get(root, tag)) | ||
682 | return NULL; | ||
683 | |||
684 | /* | ||
685 | * Catch next_index overflow after ~0UL. iter->index never overflows | ||
686 | * during iterating; it can be zero only at the beginning. | ||
687 | * And we cannot overflow iter->next_index in a single step, | ||
688 | * because RADIX_TREE_MAP_SHIFT < BITS_PER_LONG. | ||
689 | * | ||
690 | * This condition also used by radix_tree_next_slot() to stop | ||
691 | * contiguous iterating, and forbid swithing to the next chunk. | ||
692 | */ | ||
693 | index = iter->next_index; | ||
694 | if (!index && iter->index) | ||
695 | return NULL; | ||
696 | |||
697 | rnode = rcu_dereference_raw(root->rnode); | ||
698 | if (radix_tree_is_indirect_ptr(rnode)) { | ||
699 | rnode = indirect_to_ptr(rnode); | ||
700 | } else if (rnode && !index) { | ||
701 | /* Single-slot tree */ | ||
702 | iter->index = 0; | ||
703 | iter->next_index = 1; | ||
704 | iter->tags = 1; | ||
705 | return (void **)&root->rnode; | ||
706 | } else | ||
707 | return NULL; | ||
708 | |||
709 | restart: | ||
710 | shift = (rnode->height - 1) * RADIX_TREE_MAP_SHIFT; | ||
711 | offset = index >> shift; | ||
712 | |||
713 | /* Index outside of the tree */ | ||
714 | if (offset >= RADIX_TREE_MAP_SIZE) | ||
715 | return NULL; | ||
716 | |||
717 | node = rnode; | ||
718 | while (1) { | ||
719 | if ((flags & RADIX_TREE_ITER_TAGGED) ? | ||
720 | !test_bit(offset, node->tags[tag]) : | ||
721 | !node->slots[offset]) { | ||
722 | /* Hole detected */ | ||
723 | if (flags & RADIX_TREE_ITER_CONTIG) | ||
724 | return NULL; | ||
725 | |||
726 | if (flags & RADIX_TREE_ITER_TAGGED) | ||
727 | offset = radix_tree_find_next_bit( | ||
728 | node->tags[tag], | ||
729 | RADIX_TREE_MAP_SIZE, | ||
730 | offset + 1); | ||
731 | else | ||
732 | while (++offset < RADIX_TREE_MAP_SIZE) { | ||
733 | if (node->slots[offset]) | ||
734 | break; | ||
735 | } | ||
736 | index &= ~((RADIX_TREE_MAP_SIZE << shift) - 1); | ||
737 | index += offset << shift; | ||
738 | /* Overflow after ~0UL */ | ||
739 | if (!index) | ||
740 | return NULL; | ||
741 | if (offset == RADIX_TREE_MAP_SIZE) | ||
742 | goto restart; | ||
743 | } | ||
744 | |||
745 | /* This is leaf-node */ | ||
746 | if (!shift) | ||
747 | break; | ||
748 | |||
749 | node = rcu_dereference_raw(node->slots[offset]); | ||
750 | if (node == NULL) | ||
751 | goto restart; | ||
752 | shift -= RADIX_TREE_MAP_SHIFT; | ||
753 | offset = (index >> shift) & RADIX_TREE_MAP_MASK; | ||
754 | } | ||
755 | |||
756 | /* Update the iterator state */ | ||
757 | iter->index = index; | ||
758 | iter->next_index = (index | RADIX_TREE_MAP_MASK) + 1; | ||
759 | |||
760 | /* Construct iter->tags bit-mask from node->tags[tag] array */ | ||
761 | if (flags & RADIX_TREE_ITER_TAGGED) { | ||
762 | unsigned tag_long, tag_bit; | ||
763 | |||
764 | tag_long = offset / BITS_PER_LONG; | ||
765 | tag_bit = offset % BITS_PER_LONG; | ||
766 | iter->tags = node->tags[tag][tag_long] >> tag_bit; | ||
767 | /* This never happens if RADIX_TREE_TAG_LONGS == 1 */ | ||
768 | if (tag_long < RADIX_TREE_TAG_LONGS - 1) { | ||
769 | /* Pick tags from next element */ | ||
770 | if (tag_bit) | ||
771 | iter->tags |= node->tags[tag][tag_long + 1] << | ||
772 | (BITS_PER_LONG - tag_bit); | ||
773 | /* Clip chunk size, here only BITS_PER_LONG tags */ | ||
774 | iter->next_index = index + BITS_PER_LONG; | ||
775 | } | ||
776 | } | ||
777 | |||
778 | return node->slots + offset; | ||
779 | } | ||
780 | EXPORT_SYMBOL(radix_tree_next_chunk); | ||
781 | |||
782 | /** | ||
623 | * radix_tree_range_tag_if_tagged - for each item in given range set given | 783 | * radix_tree_range_tag_if_tagged - for each item in given range set given |
624 | * tag if item has another tag set | 784 | * tag if item has another tag set |
625 | * @root: radix tree root | 785 | * @root: radix tree root |
@@ -652,8 +812,7 @@ unsigned long radix_tree_range_tag_if_tagged(struct radix_tree_root *root, | |||
652 | unsigned int iftag, unsigned int settag) | 812 | unsigned int iftag, unsigned int settag) |
653 | { | 813 | { |
654 | unsigned int height = root->height; | 814 | unsigned int height = root->height; |
655 | struct radix_tree_path path[height]; | 815 | struct radix_tree_node *node = NULL; |
656 | struct radix_tree_path *pathp = path; | ||
657 | struct radix_tree_node *slot; | 816 | struct radix_tree_node *slot; |
658 | unsigned int shift; | 817 | unsigned int shift; |
659 | unsigned long tagged = 0; | 818 | unsigned long tagged = 0; |
@@ -677,14 +836,8 @@ unsigned long radix_tree_range_tag_if_tagged(struct radix_tree_root *root, | |||
677 | shift = (height - 1) * RADIX_TREE_MAP_SHIFT; | 836 | shift = (height - 1) * RADIX_TREE_MAP_SHIFT; |
678 | slot = indirect_to_ptr(root->rnode); | 837 | slot = indirect_to_ptr(root->rnode); |
679 | 838 | ||
680 | /* | ||
681 | * we fill the path from (root->height - 2) to 0, leaving the index at | ||
682 | * (root->height - 1) as a terminator. Zero the node in the terminator | ||
683 | * so that we can use this to end walk loops back up the path. | ||
684 | */ | ||
685 | path[height - 1].node = NULL; | ||
686 | |||
687 | for (;;) { | 839 | for (;;) { |
840 | unsigned long upindex; | ||
688 | int offset; | 841 | int offset; |
689 | 842 | ||
690 | offset = (index >> shift) & RADIX_TREE_MAP_MASK; | 843 | offset = (index >> shift) & RADIX_TREE_MAP_MASK; |
@@ -692,12 +845,10 @@ unsigned long radix_tree_range_tag_if_tagged(struct radix_tree_root *root, | |||
692 | goto next; | 845 | goto next; |
693 | if (!tag_get(slot, iftag, offset)) | 846 | if (!tag_get(slot, iftag, offset)) |
694 | goto next; | 847 | goto next; |
695 | if (height > 1) { | 848 | if (shift) { |
696 | /* Go down one level */ | 849 | /* Go down one level */ |
697 | height--; | ||
698 | shift -= RADIX_TREE_MAP_SHIFT; | 850 | shift -= RADIX_TREE_MAP_SHIFT; |
699 | path[height - 1].node = slot; | 851 | node = slot; |
700 | path[height - 1].offset = offset; | ||
701 | slot = slot->slots[offset]; | 852 | slot = slot->slots[offset]; |
702 | continue; | 853 | continue; |
703 | } | 854 | } |
@@ -707,15 +858,27 @@ unsigned long radix_tree_range_tag_if_tagged(struct radix_tree_root *root, | |||
707 | tag_set(slot, settag, offset); | 858 | tag_set(slot, settag, offset); |
708 | 859 | ||
709 | /* walk back up the path tagging interior nodes */ | 860 | /* walk back up the path tagging interior nodes */ |
710 | pathp = &path[0]; | 861 | upindex = index; |
711 | while (pathp->node) { | 862 | while (node) { |
863 | upindex >>= RADIX_TREE_MAP_SHIFT; | ||
864 | offset = upindex & RADIX_TREE_MAP_MASK; | ||
865 | |||
712 | /* stop if we find a node with the tag already set */ | 866 | /* stop if we find a node with the tag already set */ |
713 | if (tag_get(pathp->node, settag, pathp->offset)) | 867 | if (tag_get(node, settag, offset)) |
714 | break; | 868 | break; |
715 | tag_set(pathp->node, settag, pathp->offset); | 869 | tag_set(node, settag, offset); |
716 | pathp++; | 870 | node = node->parent; |
717 | } | 871 | } |
718 | 872 | ||
873 | /* | ||
874 | * Small optimization: now clear that node pointer. | ||
875 | * Since all of this slot's ancestors now have the tag set | ||
876 | * from setting it above, we have no further need to walk | ||
877 | * back up the tree setting tags, until we update slot to | ||
878 | * point to another radix_tree_node. | ||
879 | */ | ||
880 | node = NULL; | ||
881 | |||
719 | next: | 882 | next: |
720 | /* Go to next item at level determined by 'shift' */ | 883 | /* Go to next item at level determined by 'shift' */ |
721 | index = ((index >> shift) + 1) << shift; | 884 | index = ((index >> shift) + 1) << shift; |
@@ -730,8 +893,7 @@ next: | |||
730 | * last_index is guaranteed to be in the tree, what | 893 | * last_index is guaranteed to be in the tree, what |
731 | * we do below cannot wander astray. | 894 | * we do below cannot wander astray. |
732 | */ | 895 | */ |
733 | slot = path[height - 1].node; | 896 | slot = slot->parent; |
734 | height++; | ||
735 | shift += RADIX_TREE_MAP_SHIFT; | 897 | shift += RADIX_TREE_MAP_SHIFT; |
736 | } | 898 | } |
737 | } | 899 | } |
@@ -822,57 +984,6 @@ unsigned long radix_tree_prev_hole(struct radix_tree_root *root, | |||
822 | } | 984 | } |
823 | EXPORT_SYMBOL(radix_tree_prev_hole); | 985 | EXPORT_SYMBOL(radix_tree_prev_hole); |
824 | 986 | ||
825 | static unsigned int | ||
826 | __lookup(struct radix_tree_node *slot, void ***results, unsigned long *indices, | ||
827 | unsigned long index, unsigned int max_items, unsigned long *next_index) | ||
828 | { | ||
829 | unsigned int nr_found = 0; | ||
830 | unsigned int shift, height; | ||
831 | unsigned long i; | ||
832 | |||
833 | height = slot->height; | ||
834 | if (height == 0) | ||
835 | goto out; | ||
836 | shift = (height-1) * RADIX_TREE_MAP_SHIFT; | ||
837 | |||
838 | for ( ; height > 1; height--) { | ||
839 | i = (index >> shift) & RADIX_TREE_MAP_MASK; | ||
840 | for (;;) { | ||
841 | if (slot->slots[i] != NULL) | ||
842 | break; | ||
843 | index &= ~((1UL << shift) - 1); | ||
844 | index += 1UL << shift; | ||
845 | if (index == 0) | ||
846 | goto out; /* 32-bit wraparound */ | ||
847 | i++; | ||
848 | if (i == RADIX_TREE_MAP_SIZE) | ||
849 | goto out; | ||
850 | } | ||
851 | |||
852 | shift -= RADIX_TREE_MAP_SHIFT; | ||
853 | slot = rcu_dereference_raw(slot->slots[i]); | ||
854 | if (slot == NULL) | ||
855 | goto out; | ||
856 | } | ||
857 | |||
858 | /* Bottom level: grab some items */ | ||
859 | for (i = index & RADIX_TREE_MAP_MASK; i < RADIX_TREE_MAP_SIZE; i++) { | ||
860 | if (slot->slots[i]) { | ||
861 | results[nr_found] = &(slot->slots[i]); | ||
862 | if (indices) | ||
863 | indices[nr_found] = index; | ||
864 | if (++nr_found == max_items) { | ||
865 | index++; | ||
866 | goto out; | ||
867 | } | ||
868 | } | ||
869 | index++; | ||
870 | } | ||
871 | out: | ||
872 | *next_index = index; | ||
873 | return nr_found; | ||
874 | } | ||
875 | |||
876 | /** | 987 | /** |
877 | * radix_tree_gang_lookup - perform multiple lookup on a radix tree | 988 | * radix_tree_gang_lookup - perform multiple lookup on a radix tree |
878 | * @root: radix tree root | 989 | * @root: radix tree root |
@@ -896,48 +1007,19 @@ unsigned int | |||
896 | radix_tree_gang_lookup(struct radix_tree_root *root, void **results, | 1007 | radix_tree_gang_lookup(struct radix_tree_root *root, void **results, |
897 | unsigned long first_index, unsigned int max_items) | 1008 | unsigned long first_index, unsigned int max_items) |
898 | { | 1009 | { |
899 | unsigned long max_index; | 1010 | struct radix_tree_iter iter; |
900 | struct radix_tree_node *node; | 1011 | void **slot; |
901 | unsigned long cur_index = first_index; | 1012 | unsigned int ret = 0; |
902 | unsigned int ret; | ||
903 | 1013 | ||
904 | node = rcu_dereference_raw(root->rnode); | 1014 | if (unlikely(!max_items)) |
905 | if (!node) | ||
906 | return 0; | 1015 | return 0; |
907 | 1016 | ||
908 | if (!radix_tree_is_indirect_ptr(node)) { | 1017 | radix_tree_for_each_slot(slot, root, &iter, first_index) { |
909 | if (first_index > 0) | 1018 | results[ret] = indirect_to_ptr(rcu_dereference_raw(*slot)); |
910 | return 0; | 1019 | if (!results[ret]) |
911 | results[0] = node; | 1020 | continue; |
912 | return 1; | 1021 | if (++ret == max_items) |
913 | } | ||
914 | node = indirect_to_ptr(node); | ||
915 | |||
916 | max_index = radix_tree_maxindex(node->height); | ||
917 | |||
918 | ret = 0; | ||
919 | while (ret < max_items) { | ||
920 | unsigned int nr_found, slots_found, i; | ||
921 | unsigned long next_index; /* Index of next search */ | ||
922 | |||
923 | if (cur_index > max_index) | ||
924 | break; | ||
925 | slots_found = __lookup(node, (void ***)results + ret, NULL, | ||
926 | cur_index, max_items - ret, &next_index); | ||
927 | nr_found = 0; | ||
928 | for (i = 0; i < slots_found; i++) { | ||
929 | struct radix_tree_node *slot; | ||
930 | slot = *(((void ***)results)[ret + i]); | ||
931 | if (!slot) | ||
932 | continue; | ||
933 | results[ret + nr_found] = | ||
934 | indirect_to_ptr(rcu_dereference_raw(slot)); | ||
935 | nr_found++; | ||
936 | } | ||
937 | ret += nr_found; | ||
938 | if (next_index == 0) | ||
939 | break; | 1022 | break; |
940 | cur_index = next_index; | ||
941 | } | 1023 | } |
942 | 1024 | ||
943 | return ret; | 1025 | return ret; |
@@ -967,112 +1049,25 @@ radix_tree_gang_lookup_slot(struct radix_tree_root *root, | |||
967 | void ***results, unsigned long *indices, | 1049 | void ***results, unsigned long *indices, |
968 | unsigned long first_index, unsigned int max_items) | 1050 | unsigned long first_index, unsigned int max_items) |
969 | { | 1051 | { |
970 | unsigned long max_index; | 1052 | struct radix_tree_iter iter; |
971 | struct radix_tree_node *node; | 1053 | void **slot; |
972 | unsigned long cur_index = first_index; | 1054 | unsigned int ret = 0; |
973 | unsigned int ret; | ||
974 | 1055 | ||
975 | node = rcu_dereference_raw(root->rnode); | 1056 | if (unlikely(!max_items)) |
976 | if (!node) | ||
977 | return 0; | 1057 | return 0; |
978 | 1058 | ||
979 | if (!radix_tree_is_indirect_ptr(node)) { | 1059 | radix_tree_for_each_slot(slot, root, &iter, first_index) { |
980 | if (first_index > 0) | 1060 | results[ret] = slot; |
981 | return 0; | ||
982 | results[0] = (void **)&root->rnode; | ||
983 | if (indices) | 1061 | if (indices) |
984 | indices[0] = 0; | 1062 | indices[ret] = iter.index; |
985 | return 1; | 1063 | if (++ret == max_items) |
986 | } | ||
987 | node = indirect_to_ptr(node); | ||
988 | |||
989 | max_index = radix_tree_maxindex(node->height); | ||
990 | |||
991 | ret = 0; | ||
992 | while (ret < max_items) { | ||
993 | unsigned int slots_found; | ||
994 | unsigned long next_index; /* Index of next search */ | ||
995 | |||
996 | if (cur_index > max_index) | ||
997 | break; | ||
998 | slots_found = __lookup(node, results + ret, | ||
999 | indices ? indices + ret : NULL, | ||
1000 | cur_index, max_items - ret, &next_index); | ||
1001 | ret += slots_found; | ||
1002 | if (next_index == 0) | ||
1003 | break; | 1064 | break; |
1004 | cur_index = next_index; | ||
1005 | } | 1065 | } |
1006 | 1066 | ||
1007 | return ret; | 1067 | return ret; |
1008 | } | 1068 | } |
1009 | EXPORT_SYMBOL(radix_tree_gang_lookup_slot); | 1069 | EXPORT_SYMBOL(radix_tree_gang_lookup_slot); |
1010 | 1070 | ||
1011 | /* | ||
1012 | * FIXME: the two tag_get()s here should use find_next_bit() instead of | ||
1013 | * open-coding the search. | ||
1014 | */ | ||
1015 | static unsigned int | ||
1016 | __lookup_tag(struct radix_tree_node *slot, void ***results, unsigned long index, | ||
1017 | unsigned int max_items, unsigned long *next_index, unsigned int tag) | ||
1018 | { | ||
1019 | unsigned int nr_found = 0; | ||
1020 | unsigned int shift, height; | ||
1021 | |||
1022 | height = slot->height; | ||
1023 | if (height == 0) | ||
1024 | goto out; | ||
1025 | shift = (height-1) * RADIX_TREE_MAP_SHIFT; | ||
1026 | |||
1027 | while (height > 0) { | ||
1028 | unsigned long i = (index >> shift) & RADIX_TREE_MAP_MASK ; | ||
1029 | |||
1030 | for (;;) { | ||
1031 | if (tag_get(slot, tag, i)) | ||
1032 | break; | ||
1033 | index &= ~((1UL << shift) - 1); | ||
1034 | index += 1UL << shift; | ||
1035 | if (index == 0) | ||
1036 | goto out; /* 32-bit wraparound */ | ||
1037 | i++; | ||
1038 | if (i == RADIX_TREE_MAP_SIZE) | ||
1039 | goto out; | ||
1040 | } | ||
1041 | height--; | ||
1042 | if (height == 0) { /* Bottom level: grab some items */ | ||
1043 | unsigned long j = index & RADIX_TREE_MAP_MASK; | ||
1044 | |||
1045 | for ( ; j < RADIX_TREE_MAP_SIZE; j++) { | ||
1046 | index++; | ||
1047 | if (!tag_get(slot, tag, j)) | ||
1048 | continue; | ||
1049 | /* | ||
1050 | * Even though the tag was found set, we need to | ||
1051 | * recheck that we have a non-NULL node, because | ||
1052 | * if this lookup is lockless, it may have been | ||
1053 | * subsequently deleted. | ||
1054 | * | ||
1055 | * Similar care must be taken in any place that | ||
1056 | * lookup ->slots[x] without a lock (ie. can't | ||
1057 | * rely on its value remaining the same). | ||
1058 | */ | ||
1059 | if (slot->slots[j]) { | ||
1060 | results[nr_found++] = &(slot->slots[j]); | ||
1061 | if (nr_found == max_items) | ||
1062 | goto out; | ||
1063 | } | ||
1064 | } | ||
1065 | } | ||
1066 | shift -= RADIX_TREE_MAP_SHIFT; | ||
1067 | slot = rcu_dereference_raw(slot->slots[i]); | ||
1068 | if (slot == NULL) | ||
1069 | break; | ||
1070 | } | ||
1071 | out: | ||
1072 | *next_index = index; | ||
1073 | return nr_found; | ||
1074 | } | ||
1075 | |||
1076 | /** | 1071 | /** |
1077 | * radix_tree_gang_lookup_tag - perform multiple lookup on a radix tree | 1072 | * radix_tree_gang_lookup_tag - perform multiple lookup on a radix tree |
1078 | * based on a tag | 1073 | * based on a tag |
@@ -1091,52 +1086,19 @@ radix_tree_gang_lookup_tag(struct radix_tree_root *root, void **results, | |||
1091 | unsigned long first_index, unsigned int max_items, | 1086 | unsigned long first_index, unsigned int max_items, |
1092 | unsigned int tag) | 1087 | unsigned int tag) |
1093 | { | 1088 | { |
1094 | struct radix_tree_node *node; | 1089 | struct radix_tree_iter iter; |
1095 | unsigned long max_index; | 1090 | void **slot; |
1096 | unsigned long cur_index = first_index; | 1091 | unsigned int ret = 0; |
1097 | unsigned int ret; | ||
1098 | 1092 | ||
1099 | /* check the root's tag bit */ | 1093 | if (unlikely(!max_items)) |
1100 | if (!root_tag_get(root, tag)) | ||
1101 | return 0; | 1094 | return 0; |
1102 | 1095 | ||
1103 | node = rcu_dereference_raw(root->rnode); | 1096 | radix_tree_for_each_tagged(slot, root, &iter, first_index, tag) { |
1104 | if (!node) | 1097 | results[ret] = indirect_to_ptr(rcu_dereference_raw(*slot)); |
1105 | return 0; | 1098 | if (!results[ret]) |
1106 | 1099 | continue; | |
1107 | if (!radix_tree_is_indirect_ptr(node)) { | 1100 | if (++ret == max_items) |
1108 | if (first_index > 0) | ||
1109 | return 0; | ||
1110 | results[0] = node; | ||
1111 | return 1; | ||
1112 | } | ||
1113 | node = indirect_to_ptr(node); | ||
1114 | |||
1115 | max_index = radix_tree_maxindex(node->height); | ||
1116 | |||
1117 | ret = 0; | ||
1118 | while (ret < max_items) { | ||
1119 | unsigned int nr_found, slots_found, i; | ||
1120 | unsigned long next_index; /* Index of next search */ | ||
1121 | |||
1122 | if (cur_index > max_index) | ||
1123 | break; | ||
1124 | slots_found = __lookup_tag(node, (void ***)results + ret, | ||
1125 | cur_index, max_items - ret, &next_index, tag); | ||
1126 | nr_found = 0; | ||
1127 | for (i = 0; i < slots_found; i++) { | ||
1128 | struct radix_tree_node *slot; | ||
1129 | slot = *(((void ***)results)[ret + i]); | ||
1130 | if (!slot) | ||
1131 | continue; | ||
1132 | results[ret + nr_found] = | ||
1133 | indirect_to_ptr(rcu_dereference_raw(slot)); | ||
1134 | nr_found++; | ||
1135 | } | ||
1136 | ret += nr_found; | ||
1137 | if (next_index == 0) | ||
1138 | break; | 1101 | break; |
1139 | cur_index = next_index; | ||
1140 | } | 1102 | } |
1141 | 1103 | ||
1142 | return ret; | 1104 | return ret; |
@@ -1161,42 +1123,17 @@ radix_tree_gang_lookup_tag_slot(struct radix_tree_root *root, void ***results, | |||
1161 | unsigned long first_index, unsigned int max_items, | 1123 | unsigned long first_index, unsigned int max_items, |
1162 | unsigned int tag) | 1124 | unsigned int tag) |
1163 | { | 1125 | { |
1164 | struct radix_tree_node *node; | 1126 | struct radix_tree_iter iter; |
1165 | unsigned long max_index; | 1127 | void **slot; |
1166 | unsigned long cur_index = first_index; | 1128 | unsigned int ret = 0; |
1167 | unsigned int ret; | ||
1168 | 1129 | ||
1169 | /* check the root's tag bit */ | 1130 | if (unlikely(!max_items)) |
1170 | if (!root_tag_get(root, tag)) | ||
1171 | return 0; | 1131 | return 0; |
1172 | 1132 | ||
1173 | node = rcu_dereference_raw(root->rnode); | 1133 | radix_tree_for_each_tagged(slot, root, &iter, first_index, tag) { |
1174 | if (!node) | 1134 | results[ret] = slot; |
1175 | return 0; | 1135 | if (++ret == max_items) |
1176 | |||
1177 | if (!radix_tree_is_indirect_ptr(node)) { | ||
1178 | if (first_index > 0) | ||
1179 | return 0; | ||
1180 | results[0] = (void **)&root->rnode; | ||
1181 | return 1; | ||
1182 | } | ||
1183 | node = indirect_to_ptr(node); | ||
1184 | |||
1185 | max_index = radix_tree_maxindex(node->height); | ||
1186 | |||
1187 | ret = 0; | ||
1188 | while (ret < max_items) { | ||
1189 | unsigned int slots_found; | ||
1190 | unsigned long next_index; /* Index of next search */ | ||
1191 | |||
1192 | if (cur_index > max_index) | ||
1193 | break; | ||
1194 | slots_found = __lookup_tag(node, results + ret, | ||
1195 | cur_index, max_items - ret, &next_index, tag); | ||
1196 | ret += slots_found; | ||
1197 | if (next_index == 0) | ||
1198 | break; | 1136 | break; |
1199 | cur_index = next_index; | ||
1200 | } | 1137 | } |
1201 | 1138 | ||
1202 | return ret; | 1139 | return ret; |
@@ -1305,7 +1242,7 @@ static inline void radix_tree_shrink(struct radix_tree_root *root) | |||
1305 | /* try to shrink tree height */ | 1242 | /* try to shrink tree height */ |
1306 | while (root->height > 0) { | 1243 | while (root->height > 0) { |
1307 | struct radix_tree_node *to_free = root->rnode; | 1244 | struct radix_tree_node *to_free = root->rnode; |
1308 | void *newptr; | 1245 | struct radix_tree_node *slot; |
1309 | 1246 | ||
1310 | BUG_ON(!radix_tree_is_indirect_ptr(to_free)); | 1247 | BUG_ON(!radix_tree_is_indirect_ptr(to_free)); |
1311 | to_free = indirect_to_ptr(to_free); | 1248 | to_free = indirect_to_ptr(to_free); |
@@ -1326,10 +1263,12 @@ static inline void radix_tree_shrink(struct radix_tree_root *root) | |||
1326 | * (to_free->slots[0]), it will be safe to dereference the new | 1263 | * (to_free->slots[0]), it will be safe to dereference the new |
1327 | * one (root->rnode) as far as dependent read barriers go. | 1264 | * one (root->rnode) as far as dependent read barriers go. |
1328 | */ | 1265 | */ |
1329 | newptr = to_free->slots[0]; | 1266 | slot = to_free->slots[0]; |
1330 | if (root->height > 1) | 1267 | if (root->height > 1) { |
1331 | newptr = ptr_to_indirect(newptr); | 1268 | slot->parent = NULL; |
1332 | root->rnode = newptr; | 1269 | slot = ptr_to_indirect(slot); |
1270 | } | ||
1271 | root->rnode = slot; | ||
1333 | root->height--; | 1272 | root->height--; |
1334 | 1273 | ||
1335 | /* | 1274 | /* |
@@ -1369,16 +1308,12 @@ static inline void radix_tree_shrink(struct radix_tree_root *root) | |||
1369 | */ | 1308 | */ |
1370 | void *radix_tree_delete(struct radix_tree_root *root, unsigned long index) | 1309 | void *radix_tree_delete(struct radix_tree_root *root, unsigned long index) |
1371 | { | 1310 | { |
1372 | /* | 1311 | struct radix_tree_node *node = NULL; |
1373 | * The radix tree path needs to be one longer than the maximum path | ||
1374 | * since the "list" is null terminated. | ||
1375 | */ | ||
1376 | struct radix_tree_path path[RADIX_TREE_MAX_PATH + 1], *pathp = path; | ||
1377 | struct radix_tree_node *slot = NULL; | 1312 | struct radix_tree_node *slot = NULL; |
1378 | struct radix_tree_node *to_free; | 1313 | struct radix_tree_node *to_free; |
1379 | unsigned int height, shift; | 1314 | unsigned int height, shift; |
1380 | int tag; | 1315 | int tag; |
1381 | int offset; | 1316 | int uninitialized_var(offset); |
1382 | 1317 | ||
1383 | height = root->height; | 1318 | height = root->height; |
1384 | if (index > radix_tree_maxindex(height)) | 1319 | if (index > radix_tree_maxindex(height)) |
@@ -1391,39 +1326,35 @@ void *radix_tree_delete(struct radix_tree_root *root, unsigned long index) | |||
1391 | goto out; | 1326 | goto out; |
1392 | } | 1327 | } |
1393 | slot = indirect_to_ptr(slot); | 1328 | slot = indirect_to_ptr(slot); |
1394 | 1329 | shift = height * RADIX_TREE_MAP_SHIFT; | |
1395 | shift = (height - 1) * RADIX_TREE_MAP_SHIFT; | ||
1396 | pathp->node = NULL; | ||
1397 | 1330 | ||
1398 | do { | 1331 | do { |
1399 | if (slot == NULL) | 1332 | if (slot == NULL) |
1400 | goto out; | 1333 | goto out; |
1401 | 1334 | ||
1402 | pathp++; | 1335 | shift -= RADIX_TREE_MAP_SHIFT; |
1403 | offset = (index >> shift) & RADIX_TREE_MAP_MASK; | 1336 | offset = (index >> shift) & RADIX_TREE_MAP_MASK; |
1404 | pathp->offset = offset; | 1337 | node = slot; |
1405 | pathp->node = slot; | ||
1406 | slot = slot->slots[offset]; | 1338 | slot = slot->slots[offset]; |
1407 | shift -= RADIX_TREE_MAP_SHIFT; | 1339 | } while (shift); |
1408 | height--; | ||
1409 | } while (height > 0); | ||
1410 | 1340 | ||
1411 | if (slot == NULL) | 1341 | if (slot == NULL) |
1412 | goto out; | 1342 | goto out; |
1413 | 1343 | ||
1414 | /* | 1344 | /* |
1415 | * Clear all tags associated with the just-deleted item | 1345 | * Clear all tags associated with the item to be deleted. |
1346 | * This way of doing it would be inefficient, but seldom is any set. | ||
1416 | */ | 1347 | */ |
1417 | for (tag = 0; tag < RADIX_TREE_MAX_TAGS; tag++) { | 1348 | for (tag = 0; tag < RADIX_TREE_MAX_TAGS; tag++) { |
1418 | if (tag_get(pathp->node, tag, pathp->offset)) | 1349 | if (tag_get(node, tag, offset)) |
1419 | radix_tree_tag_clear(root, index, tag); | 1350 | radix_tree_tag_clear(root, index, tag); |
1420 | } | 1351 | } |
1421 | 1352 | ||
1422 | to_free = NULL; | 1353 | to_free = NULL; |
1423 | /* Now free the nodes we do not need anymore */ | 1354 | /* Now free the nodes we do not need anymore */ |
1424 | while (pathp->node) { | 1355 | while (node) { |
1425 | pathp->node->slots[pathp->offset] = NULL; | 1356 | node->slots[offset] = NULL; |
1426 | pathp->node->count--; | 1357 | node->count--; |
1427 | /* | 1358 | /* |
1428 | * Queue the node for deferred freeing after the | 1359 | * Queue the node for deferred freeing after the |
1429 | * last reference to it disappears (set NULL, above). | 1360 | * last reference to it disappears (set NULL, above). |
@@ -1431,17 +1362,20 @@ void *radix_tree_delete(struct radix_tree_root *root, unsigned long index) | |||
1431 | if (to_free) | 1362 | if (to_free) |
1432 | radix_tree_node_free(to_free); | 1363 | radix_tree_node_free(to_free); |
1433 | 1364 | ||
1434 | if (pathp->node->count) { | 1365 | if (node->count) { |
1435 | if (pathp->node == indirect_to_ptr(root->rnode)) | 1366 | if (node == indirect_to_ptr(root->rnode)) |
1436 | radix_tree_shrink(root); | 1367 | radix_tree_shrink(root); |
1437 | goto out; | 1368 | goto out; |
1438 | } | 1369 | } |
1439 | 1370 | ||
1440 | /* Node with zero slots in use so free it */ | 1371 | /* Node with zero slots in use so free it */ |
1441 | to_free = pathp->node; | 1372 | to_free = node; |
1442 | pathp--; | ||
1443 | 1373 | ||
1374 | index >>= RADIX_TREE_MAP_SHIFT; | ||
1375 | offset = index & RADIX_TREE_MAP_MASK; | ||
1376 | node = node->parent; | ||
1444 | } | 1377 | } |
1378 | |||
1445 | root_tag_clear_all(root); | 1379 | root_tag_clear_all(root); |
1446 | root->height = 0; | 1380 | root->height = 0; |
1447 | root->rnode = NULL; | 1381 | root->rnode = NULL; |
diff --git a/lib/raid6/Makefile b/lib/raid6/Makefile index 8a38102770f3..de06dfe165b8 100644 --- a/lib/raid6/Makefile +++ b/lib/raid6/Makefile | |||
@@ -1,6 +1,6 @@ | |||
1 | obj-$(CONFIG_RAID6_PQ) += raid6_pq.o | 1 | obj-$(CONFIG_RAID6_PQ) += raid6_pq.o |
2 | 2 | ||
3 | raid6_pq-y += algos.o recov.o tables.o int1.o int2.o int4.o \ | 3 | raid6_pq-y += algos.o recov.o recov_ssse3.o tables.o int1.o int2.o int4.o \ |
4 | int8.o int16.o int32.o altivec1.o altivec2.o altivec4.o \ | 4 | int8.o int16.o int32.o altivec1.o altivec2.o altivec4.o \ |
5 | altivec8.o mmx.o sse1.o sse2.o | 5 | altivec8.o mmx.o sse1.o sse2.o |
6 | hostprogs-y += mktables | 6 | hostprogs-y += mktables |
diff --git a/lib/raid6/algos.c b/lib/raid6/algos.c index b595f560bee7..589f5f50ad2e 100644 --- a/lib/raid6/algos.c +++ b/lib/raid6/algos.c | |||
@@ -21,6 +21,7 @@ | |||
21 | #include <sys/mman.h> | 21 | #include <sys/mman.h> |
22 | #include <stdio.h> | 22 | #include <stdio.h> |
23 | #else | 23 | #else |
24 | #include <linux/module.h> | ||
24 | #include <linux/gfp.h> | 25 | #include <linux/gfp.h> |
25 | #if !RAID6_USE_EMPTY_ZERO_PAGE | 26 | #if !RAID6_USE_EMPTY_ZERO_PAGE |
26 | /* In .bss so it's zeroed */ | 27 | /* In .bss so it's zeroed */ |
@@ -33,10 +34,6 @@ struct raid6_calls raid6_call; | |||
33 | EXPORT_SYMBOL_GPL(raid6_call); | 34 | EXPORT_SYMBOL_GPL(raid6_call); |
34 | 35 | ||
35 | const struct raid6_calls * const raid6_algos[] = { | 36 | const struct raid6_calls * const raid6_algos[] = { |
36 | &raid6_intx1, | ||
37 | &raid6_intx2, | ||
38 | &raid6_intx4, | ||
39 | &raid6_intx8, | ||
40 | #if defined(__ia64__) | 37 | #if defined(__ia64__) |
41 | &raid6_intx16, | 38 | &raid6_intx16, |
42 | &raid6_intx32, | 39 | &raid6_intx32, |
@@ -60,6 +57,24 @@ const struct raid6_calls * const raid6_algos[] = { | |||
60 | &raid6_altivec4, | 57 | &raid6_altivec4, |
61 | &raid6_altivec8, | 58 | &raid6_altivec8, |
62 | #endif | 59 | #endif |
60 | &raid6_intx1, | ||
61 | &raid6_intx2, | ||
62 | &raid6_intx4, | ||
63 | &raid6_intx8, | ||
64 | NULL | ||
65 | }; | ||
66 | |||
67 | void (*raid6_2data_recov)(int, size_t, int, int, void **); | ||
68 | EXPORT_SYMBOL_GPL(raid6_2data_recov); | ||
69 | |||
70 | void (*raid6_datap_recov)(int, size_t, int, void **); | ||
71 | EXPORT_SYMBOL_GPL(raid6_datap_recov); | ||
72 | |||
73 | const struct raid6_recov_calls *const raid6_recov_algos[] = { | ||
74 | #if (defined(__i386__) || defined(__x86_64__)) && !defined(__arch_um__) | ||
75 | &raid6_recov_ssse3, | ||
76 | #endif | ||
77 | &raid6_recov_intx1, | ||
63 | NULL | 78 | NULL |
64 | }; | 79 | }; |
65 | 80 | ||
@@ -71,59 +86,55 @@ const struct raid6_calls * const raid6_algos[] = { | |||
71 | #define time_before(x, y) ((x) < (y)) | 86 | #define time_before(x, y) ((x) < (y)) |
72 | #endif | 87 | #endif |
73 | 88 | ||
74 | /* Try to pick the best algorithm */ | 89 | static inline const struct raid6_recov_calls *raid6_choose_recov(void) |
75 | /* This code uses the gfmul table as convenient data set to abuse */ | ||
76 | |||
77 | int __init raid6_select_algo(void) | ||
78 | { | 90 | { |
79 | const struct raid6_calls * const * algo; | 91 | const struct raid6_recov_calls *const *algo; |
80 | const struct raid6_calls * best; | 92 | const struct raid6_recov_calls *best; |
81 | char *syndromes; | ||
82 | void *dptrs[(65536/PAGE_SIZE)+2]; | ||
83 | int i, disks; | ||
84 | unsigned long perf, bestperf; | ||
85 | int bestprefer; | ||
86 | unsigned long j0, j1; | ||
87 | 93 | ||
88 | disks = (65536/PAGE_SIZE)+2; | 94 | for (best = NULL, algo = raid6_recov_algos; *algo; algo++) |
89 | for ( i = 0 ; i < disks-2 ; i++ ) { | 95 | if (!best || (*algo)->priority > best->priority) |
90 | dptrs[i] = ((char *)raid6_gfmul) + PAGE_SIZE*i; | 96 | if (!(*algo)->valid || (*algo)->valid()) |
91 | } | 97 | best = *algo; |
92 | 98 | ||
93 | /* Normal code - use a 2-page allocation to avoid D$ conflict */ | 99 | if (best) { |
94 | syndromes = (void *) __get_free_pages(GFP_KERNEL, 1); | 100 | raid6_2data_recov = best->data2; |
101 | raid6_datap_recov = best->datap; | ||
95 | 102 | ||
96 | if ( !syndromes ) { | 103 | printk("raid6: using %s recovery algorithm\n", best->name); |
97 | printk("raid6: Yikes! No memory available.\n"); | 104 | } else |
98 | return -ENOMEM; | 105 | printk("raid6: Yikes! No recovery algorithm found!\n"); |
99 | } | ||
100 | 106 | ||
101 | dptrs[disks-2] = syndromes; | 107 | return best; |
102 | dptrs[disks-1] = syndromes + PAGE_SIZE; | 108 | } |
109 | |||
110 | static inline const struct raid6_calls *raid6_choose_gen( | ||
111 | void *(*const dptrs)[(65536/PAGE_SIZE)+2], const int disks) | ||
112 | { | ||
113 | unsigned long perf, bestperf, j0, j1; | ||
114 | const struct raid6_calls *const *algo; | ||
115 | const struct raid6_calls *best; | ||
103 | 116 | ||
104 | bestperf = 0; bestprefer = 0; best = NULL; | 117 | for (bestperf = 0, best = NULL, algo = raid6_algos; *algo; algo++) { |
118 | if (!best || (*algo)->prefer >= best->prefer) { | ||
119 | if ((*algo)->valid && !(*algo)->valid()) | ||
120 | continue; | ||
105 | 121 | ||
106 | for ( algo = raid6_algos ; *algo ; algo++ ) { | ||
107 | if ( !(*algo)->valid || (*algo)->valid() ) { | ||
108 | perf = 0; | 122 | perf = 0; |
109 | 123 | ||
110 | preempt_disable(); | 124 | preempt_disable(); |
111 | j0 = jiffies; | 125 | j0 = jiffies; |
112 | while ( (j1 = jiffies) == j0 ) | 126 | while ((j1 = jiffies) == j0) |
113 | cpu_relax(); | 127 | cpu_relax(); |
114 | while (time_before(jiffies, | 128 | while (time_before(jiffies, |
115 | j1 + (1<<RAID6_TIME_JIFFIES_LG2))) { | 129 | j1 + (1<<RAID6_TIME_JIFFIES_LG2))) { |
116 | (*algo)->gen_syndrome(disks, PAGE_SIZE, dptrs); | 130 | (*algo)->gen_syndrome(disks, PAGE_SIZE, *dptrs); |
117 | perf++; | 131 | perf++; |
118 | } | 132 | } |
119 | preempt_enable(); | 133 | preempt_enable(); |
120 | 134 | ||
121 | if ( (*algo)->prefer > bestprefer || | 135 | if (perf > bestperf) { |
122 | ((*algo)->prefer == bestprefer && | ||
123 | perf > bestperf) ) { | ||
124 | best = *algo; | ||
125 | bestprefer = best->prefer; | ||
126 | bestperf = perf; | 136 | bestperf = perf; |
137 | best = *algo; | ||
127 | } | 138 | } |
128 | printk("raid6: %-8s %5ld MB/s\n", (*algo)->name, | 139 | printk("raid6: %-8s %5ld MB/s\n", (*algo)->name, |
129 | (perf*HZ) >> (20-16+RAID6_TIME_JIFFIES_LG2)); | 140 | (perf*HZ) >> (20-16+RAID6_TIME_JIFFIES_LG2)); |
@@ -138,9 +149,46 @@ int __init raid6_select_algo(void) | |||
138 | } else | 149 | } else |
139 | printk("raid6: Yikes! No algorithm found!\n"); | 150 | printk("raid6: Yikes! No algorithm found!\n"); |
140 | 151 | ||
152 | return best; | ||
153 | } | ||
154 | |||
155 | |||
156 | /* Try to pick the best algorithm */ | ||
157 | /* This code uses the gfmul table as convenient data set to abuse */ | ||
158 | |||
159 | int __init raid6_select_algo(void) | ||
160 | { | ||
161 | const int disks = (65536/PAGE_SIZE)+2; | ||
162 | |||
163 | const struct raid6_calls *gen_best; | ||
164 | const struct raid6_recov_calls *rec_best; | ||
165 | char *syndromes; | ||
166 | void *dptrs[(65536/PAGE_SIZE)+2]; | ||
167 | int i; | ||
168 | |||
169 | for (i = 0; i < disks-2; i++) | ||
170 | dptrs[i] = ((char *)raid6_gfmul) + PAGE_SIZE*i; | ||
171 | |||
172 | /* Normal code - use a 2-page allocation to avoid D$ conflict */ | ||
173 | syndromes = (void *) __get_free_pages(GFP_KERNEL, 1); | ||
174 | |||
175 | if (!syndromes) { | ||
176 | printk("raid6: Yikes! No memory available.\n"); | ||
177 | return -ENOMEM; | ||
178 | } | ||
179 | |||
180 | dptrs[disks-2] = syndromes; | ||
181 | dptrs[disks-1] = syndromes + PAGE_SIZE; | ||
182 | |||
183 | /* select raid gen_syndrome function */ | ||
184 | gen_best = raid6_choose_gen(&dptrs, disks); | ||
185 | |||
186 | /* select raid recover functions */ | ||
187 | rec_best = raid6_choose_recov(); | ||
188 | |||
141 | free_pages((unsigned long)syndromes, 1); | 189 | free_pages((unsigned long)syndromes, 1); |
142 | 190 | ||
143 | return best ? 0 : -EINVAL; | 191 | return gen_best && rec_best ? 0 : -EINVAL; |
144 | } | 192 | } |
145 | 193 | ||
146 | static void raid6_exit(void) | 194 | static void raid6_exit(void) |
diff --git a/lib/raid6/altivec.uc b/lib/raid6/altivec.uc index 2654d5c854be..b71012b756f4 100644 --- a/lib/raid6/altivec.uc +++ b/lib/raid6/altivec.uc | |||
@@ -28,8 +28,8 @@ | |||
28 | 28 | ||
29 | #include <altivec.h> | 29 | #include <altivec.h> |
30 | #ifdef __KERNEL__ | 30 | #ifdef __KERNEL__ |
31 | # include <asm/system.h> | ||
32 | # include <asm/cputable.h> | 31 | # include <asm/cputable.h> |
32 | # include <asm/switch_to.h> | ||
33 | #endif | 33 | #endif |
34 | 34 | ||
35 | /* | 35 | /* |
diff --git a/lib/raid6/int.uc b/lib/raid6/int.uc index d1e276a14fab..5b50f8dfc5d2 100644 --- a/lib/raid6/int.uc +++ b/lib/raid6/int.uc | |||
@@ -11,7 +11,7 @@ | |||
11 | * ----------------------------------------------------------------------- */ | 11 | * ----------------------------------------------------------------------- */ |
12 | 12 | ||
13 | /* | 13 | /* |
14 | * raid6int$#.c | 14 | * int$#.c |
15 | * | 15 | * |
16 | * $#-way unrolled portable integer math RAID-6 instruction set | 16 | * $#-way unrolled portable integer math RAID-6 instruction set |
17 | * | 17 | * |
diff --git a/lib/raid6/mktables.c b/lib/raid6/mktables.c index 3b1500843bba..39787db588b0 100644 --- a/lib/raid6/mktables.c +++ b/lib/raid6/mktables.c | |||
@@ -60,6 +60,7 @@ int main(int argc, char *argv[]) | |||
60 | uint8_t exptbl[256], invtbl[256]; | 60 | uint8_t exptbl[256], invtbl[256]; |
61 | 61 | ||
62 | printf("#include <linux/raid/pq.h>\n"); | 62 | printf("#include <linux/raid/pq.h>\n"); |
63 | printf("#include <linux/export.h>\n"); | ||
63 | 64 | ||
64 | /* Compute multiplication table */ | 65 | /* Compute multiplication table */ |
65 | printf("\nconst u8 __attribute__((aligned(256)))\n" | 66 | printf("\nconst u8 __attribute__((aligned(256)))\n" |
@@ -80,6 +81,31 @@ int main(int argc, char *argv[]) | |||
80 | printf("EXPORT_SYMBOL(raid6_gfmul);\n"); | 81 | printf("EXPORT_SYMBOL(raid6_gfmul);\n"); |
81 | printf("#endif\n"); | 82 | printf("#endif\n"); |
82 | 83 | ||
84 | /* Compute vector multiplication table */ | ||
85 | printf("\nconst u8 __attribute__((aligned(256)))\n" | ||
86 | "raid6_vgfmul[256][32] =\n" | ||
87 | "{\n"); | ||
88 | for (i = 0; i < 256; i++) { | ||
89 | printf("\t{\n"); | ||
90 | for (j = 0; j < 16; j += 8) { | ||
91 | printf("\t\t"); | ||
92 | for (k = 0; k < 8; k++) | ||
93 | printf("0x%02x,%c", gfmul(i, j + k), | ||
94 | (k == 7) ? '\n' : ' '); | ||
95 | } | ||
96 | for (j = 0; j < 16; j += 8) { | ||
97 | printf("\t\t"); | ||
98 | for (k = 0; k < 8; k++) | ||
99 | printf("0x%02x,%c", gfmul(i, (j + k) << 4), | ||
100 | (k == 7) ? '\n' : ' '); | ||
101 | } | ||
102 | printf("\t},\n"); | ||
103 | } | ||
104 | printf("};\n"); | ||
105 | printf("#ifdef __KERNEL__\n"); | ||
106 | printf("EXPORT_SYMBOL(raid6_vgfmul);\n"); | ||
107 | printf("#endif\n"); | ||
108 | |||
83 | /* Compute power-of-2 table (exponent) */ | 109 | /* Compute power-of-2 table (exponent) */ |
84 | v = 1; | 110 | v = 1; |
85 | printf("\nconst u8 __attribute__((aligned(256)))\n" | 111 | printf("\nconst u8 __attribute__((aligned(256)))\n" |
diff --git a/lib/raid6/recov.c b/lib/raid6/recov.c index 8590d19cf522..a95bccb8497d 100644 --- a/lib/raid6/recov.c +++ b/lib/raid6/recov.c | |||
@@ -18,11 +18,12 @@ | |||
18 | * the syndrome.) | 18 | * the syndrome.) |
19 | */ | 19 | */ |
20 | 20 | ||
21 | #include <linux/export.h> | ||
21 | #include <linux/raid/pq.h> | 22 | #include <linux/raid/pq.h> |
22 | 23 | ||
23 | /* Recover two failed data blocks. */ | 24 | /* Recover two failed data blocks. */ |
24 | void raid6_2data_recov(int disks, size_t bytes, int faila, int failb, | 25 | static void raid6_2data_recov_intx1(int disks, size_t bytes, int faila, |
25 | void **ptrs) | 26 | int failb, void **ptrs) |
26 | { | 27 | { |
27 | u8 *p, *q, *dp, *dq; | 28 | u8 *p, *q, *dp, *dq; |
28 | u8 px, qx, db; | 29 | u8 px, qx, db; |
@@ -63,10 +64,10 @@ void raid6_2data_recov(int disks, size_t bytes, int faila, int failb, | |||
63 | p++; q++; | 64 | p++; q++; |
64 | } | 65 | } |
65 | } | 66 | } |
66 | EXPORT_SYMBOL_GPL(raid6_2data_recov); | ||
67 | 67 | ||
68 | /* Recover failure of one data block plus the P block */ | 68 | /* Recover failure of one data block plus the P block */ |
69 | void raid6_datap_recov(int disks, size_t bytes, int faila, void **ptrs) | 69 | static void raid6_datap_recov_intx1(int disks, size_t bytes, int faila, |
70 | void **ptrs) | ||
70 | { | 71 | { |
71 | u8 *p, *q, *dq; | 72 | u8 *p, *q, *dq; |
72 | const u8 *qmul; /* Q multiplier table */ | 73 | const u8 *qmul; /* Q multiplier table */ |
@@ -95,7 +96,15 @@ void raid6_datap_recov(int disks, size_t bytes, int faila, void **ptrs) | |||
95 | q++; dq++; | 96 | q++; dq++; |
96 | } | 97 | } |
97 | } | 98 | } |
98 | EXPORT_SYMBOL_GPL(raid6_datap_recov); | 99 | |
100 | |||
101 | const struct raid6_recov_calls raid6_recov_intx1 = { | ||
102 | .data2 = raid6_2data_recov_intx1, | ||
103 | .datap = raid6_datap_recov_intx1, | ||
104 | .valid = NULL, | ||
105 | .name = "intx1", | ||
106 | .priority = 0, | ||
107 | }; | ||
99 | 108 | ||
100 | #ifndef __KERNEL__ | 109 | #ifndef __KERNEL__ |
101 | /* Testing only */ | 110 | /* Testing only */ |
diff --git a/lib/raid6/recov_ssse3.c b/lib/raid6/recov_ssse3.c new file mode 100644 index 000000000000..ecb710c0b4d9 --- /dev/null +++ b/lib/raid6/recov_ssse3.c | |||
@@ -0,0 +1,336 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2012 Intel Corporation | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU General Public License | ||
6 | * as published by the Free Software Foundation; version 2 | ||
7 | * of the License. | ||
8 | */ | ||
9 | |||
10 | #if (defined(__i386__) || defined(__x86_64__)) && !defined(__arch_um__) | ||
11 | |||
12 | #include <linux/raid/pq.h> | ||
13 | #include "x86.h" | ||
14 | |||
15 | static int raid6_has_ssse3(void) | ||
16 | { | ||
17 | return boot_cpu_has(X86_FEATURE_XMM) && | ||
18 | boot_cpu_has(X86_FEATURE_XMM2) && | ||
19 | boot_cpu_has(X86_FEATURE_SSSE3); | ||
20 | } | ||
21 | |||
22 | static void raid6_2data_recov_ssse3(int disks, size_t bytes, int faila, | ||
23 | int failb, void **ptrs) | ||
24 | { | ||
25 | u8 *p, *q, *dp, *dq; | ||
26 | const u8 *pbmul; /* P multiplier table for B data */ | ||
27 | const u8 *qmul; /* Q multiplier table (for both) */ | ||
28 | static const u8 __aligned(16) x0f[16] = { | ||
29 | 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, | ||
30 | 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f}; | ||
31 | |||
32 | p = (u8 *)ptrs[disks-2]; | ||
33 | q = (u8 *)ptrs[disks-1]; | ||
34 | |||
35 | /* Compute syndrome with zero for the missing data pages | ||
36 | Use the dead data pages as temporary storage for | ||
37 | delta p and delta q */ | ||
38 | dp = (u8 *)ptrs[faila]; | ||
39 | ptrs[faila] = (void *)raid6_empty_zero_page; | ||
40 | ptrs[disks-2] = dp; | ||
41 | dq = (u8 *)ptrs[failb]; | ||
42 | ptrs[failb] = (void *)raid6_empty_zero_page; | ||
43 | ptrs[disks-1] = dq; | ||
44 | |||
45 | raid6_call.gen_syndrome(disks, bytes, ptrs); | ||
46 | |||
47 | /* Restore pointer table */ | ||
48 | ptrs[faila] = dp; | ||
49 | ptrs[failb] = dq; | ||
50 | ptrs[disks-2] = p; | ||
51 | ptrs[disks-1] = q; | ||
52 | |||
53 | /* Now, pick the proper data tables */ | ||
54 | pbmul = raid6_vgfmul[raid6_gfexi[failb-faila]]; | ||
55 | qmul = raid6_vgfmul[raid6_gfinv[raid6_gfexp[faila] ^ | ||
56 | raid6_gfexp[failb]]]; | ||
57 | |||
58 | kernel_fpu_begin(); | ||
59 | |||
60 | asm volatile("movdqa %0,%%xmm7" : : "m" (x0f[0])); | ||
61 | |||
62 | #ifdef CONFIG_X86_64 | ||
63 | asm volatile("movdqa %0,%%xmm6" : : "m" (qmul[0])); | ||
64 | asm volatile("movdqa %0,%%xmm14" : : "m" (pbmul[0])); | ||
65 | asm volatile("movdqa %0,%%xmm15" : : "m" (pbmul[16])); | ||
66 | #endif | ||
67 | |||
68 | /* Now do it... */ | ||
69 | while (bytes) { | ||
70 | #ifdef CONFIG_X86_64 | ||
71 | /* xmm6, xmm14, xmm15 */ | ||
72 | |||
73 | asm volatile("movdqa %0,%%xmm1" : : "m" (q[0])); | ||
74 | asm volatile("movdqa %0,%%xmm9" : : "m" (q[16])); | ||
75 | asm volatile("movdqa %0,%%xmm0" : : "m" (p[0])); | ||
76 | asm volatile("movdqa %0,%%xmm8" : : "m" (p[16])); | ||
77 | asm volatile("pxor %0,%%xmm1" : : "m" (dq[0])); | ||
78 | asm volatile("pxor %0,%%xmm9" : : "m" (dq[16])); | ||
79 | asm volatile("pxor %0,%%xmm0" : : "m" (dp[0])); | ||
80 | asm volatile("pxor %0,%%xmm8" : : "m" (dp[16])); | ||
81 | |||
82 | /* xmm0/8 = px */ | ||
83 | |||
84 | asm volatile("movdqa %xmm6,%xmm4"); | ||
85 | asm volatile("movdqa %0,%%xmm5" : : "m" (qmul[16])); | ||
86 | asm volatile("movdqa %xmm6,%xmm12"); | ||
87 | asm volatile("movdqa %xmm5,%xmm13"); | ||
88 | asm volatile("movdqa %xmm1,%xmm3"); | ||
89 | asm volatile("movdqa %xmm9,%xmm11"); | ||
90 | asm volatile("movdqa %xmm0,%xmm2"); /* xmm2/10 = px */ | ||
91 | asm volatile("movdqa %xmm8,%xmm10"); | ||
92 | asm volatile("psraw $4,%xmm1"); | ||
93 | asm volatile("psraw $4,%xmm9"); | ||
94 | asm volatile("pand %xmm7,%xmm3"); | ||
95 | asm volatile("pand %xmm7,%xmm11"); | ||
96 | asm volatile("pand %xmm7,%xmm1"); | ||
97 | asm volatile("pand %xmm7,%xmm9"); | ||
98 | asm volatile("pshufb %xmm3,%xmm4"); | ||
99 | asm volatile("pshufb %xmm11,%xmm12"); | ||
100 | asm volatile("pshufb %xmm1,%xmm5"); | ||
101 | asm volatile("pshufb %xmm9,%xmm13"); | ||
102 | asm volatile("pxor %xmm4,%xmm5"); | ||
103 | asm volatile("pxor %xmm12,%xmm13"); | ||
104 | |||
105 | /* xmm5/13 = qx */ | ||
106 | |||
107 | asm volatile("movdqa %xmm14,%xmm4"); | ||
108 | asm volatile("movdqa %xmm15,%xmm1"); | ||
109 | asm volatile("movdqa %xmm14,%xmm12"); | ||
110 | asm volatile("movdqa %xmm15,%xmm9"); | ||
111 | asm volatile("movdqa %xmm2,%xmm3"); | ||
112 | asm volatile("movdqa %xmm10,%xmm11"); | ||
113 | asm volatile("psraw $4,%xmm2"); | ||
114 | asm volatile("psraw $4,%xmm10"); | ||
115 | asm volatile("pand %xmm7,%xmm3"); | ||
116 | asm volatile("pand %xmm7,%xmm11"); | ||
117 | asm volatile("pand %xmm7,%xmm2"); | ||
118 | asm volatile("pand %xmm7,%xmm10"); | ||
119 | asm volatile("pshufb %xmm3,%xmm4"); | ||
120 | asm volatile("pshufb %xmm11,%xmm12"); | ||
121 | asm volatile("pshufb %xmm2,%xmm1"); | ||
122 | asm volatile("pshufb %xmm10,%xmm9"); | ||
123 | asm volatile("pxor %xmm4,%xmm1"); | ||
124 | asm volatile("pxor %xmm12,%xmm9"); | ||
125 | |||
126 | /* xmm1/9 = pbmul[px] */ | ||
127 | asm volatile("pxor %xmm5,%xmm1"); | ||
128 | asm volatile("pxor %xmm13,%xmm9"); | ||
129 | /* xmm1/9 = db = DQ */ | ||
130 | asm volatile("movdqa %%xmm1,%0" : "=m" (dq[0])); | ||
131 | asm volatile("movdqa %%xmm9,%0" : "=m" (dq[16])); | ||
132 | |||
133 | asm volatile("pxor %xmm1,%xmm0"); | ||
134 | asm volatile("pxor %xmm9,%xmm8"); | ||
135 | asm volatile("movdqa %%xmm0,%0" : "=m" (dp[0])); | ||
136 | asm volatile("movdqa %%xmm8,%0" : "=m" (dp[16])); | ||
137 | |||
138 | bytes -= 32; | ||
139 | p += 32; | ||
140 | q += 32; | ||
141 | dp += 32; | ||
142 | dq += 32; | ||
143 | #else | ||
144 | asm volatile("movdqa %0,%%xmm1" : : "m" (*q)); | ||
145 | asm volatile("movdqa %0,%%xmm0" : : "m" (*p)); | ||
146 | asm volatile("pxor %0,%%xmm1" : : "m" (*dq)); | ||
147 | asm volatile("pxor %0,%%xmm0" : : "m" (*dp)); | ||
148 | |||
149 | /* 1 = dq ^ q | ||
150 | * 0 = dp ^ p | ||
151 | */ | ||
152 | asm volatile("movdqa %0,%%xmm4" : : "m" (qmul[0])); | ||
153 | asm volatile("movdqa %0,%%xmm5" : : "m" (qmul[16])); | ||
154 | |||
155 | asm volatile("movdqa %xmm1,%xmm3"); | ||
156 | asm volatile("psraw $4,%xmm1"); | ||
157 | asm volatile("pand %xmm7,%xmm3"); | ||
158 | asm volatile("pand %xmm7,%xmm1"); | ||
159 | asm volatile("pshufb %xmm3,%xmm4"); | ||
160 | asm volatile("pshufb %xmm1,%xmm5"); | ||
161 | asm volatile("pxor %xmm4,%xmm5"); | ||
162 | |||
163 | asm volatile("movdqa %xmm0,%xmm2"); /* xmm2 = px */ | ||
164 | |||
165 | /* xmm5 = qx */ | ||
166 | |||
167 | asm volatile("movdqa %0,%%xmm4" : : "m" (pbmul[0])); | ||
168 | asm volatile("movdqa %0,%%xmm1" : : "m" (pbmul[16])); | ||
169 | asm volatile("movdqa %xmm2,%xmm3"); | ||
170 | asm volatile("psraw $4,%xmm2"); | ||
171 | asm volatile("pand %xmm7,%xmm3"); | ||
172 | asm volatile("pand %xmm7,%xmm2"); | ||
173 | asm volatile("pshufb %xmm3,%xmm4"); | ||
174 | asm volatile("pshufb %xmm2,%xmm1"); | ||
175 | asm volatile("pxor %xmm4,%xmm1"); | ||
176 | |||
177 | /* xmm1 = pbmul[px] */ | ||
178 | asm volatile("pxor %xmm5,%xmm1"); | ||
179 | /* xmm1 = db = DQ */ | ||
180 | asm volatile("movdqa %%xmm1,%0" : "=m" (*dq)); | ||
181 | |||
182 | asm volatile("pxor %xmm1,%xmm0"); | ||
183 | asm volatile("movdqa %%xmm0,%0" : "=m" (*dp)); | ||
184 | |||
185 | bytes -= 16; | ||
186 | p += 16; | ||
187 | q += 16; | ||
188 | dp += 16; | ||
189 | dq += 16; | ||
190 | #endif | ||
191 | } | ||
192 | |||
193 | kernel_fpu_end(); | ||
194 | } | ||
195 | |||
196 | |||
197 | static void raid6_datap_recov_ssse3(int disks, size_t bytes, int faila, | ||
198 | void **ptrs) | ||
199 | { | ||
200 | u8 *p, *q, *dq; | ||
201 | const u8 *qmul; /* Q multiplier table */ | ||
202 | static const u8 __aligned(16) x0f[16] = { | ||
203 | 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, | ||
204 | 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f}; | ||
205 | |||
206 | p = (u8 *)ptrs[disks-2]; | ||
207 | q = (u8 *)ptrs[disks-1]; | ||
208 | |||
209 | /* Compute syndrome with zero for the missing data page | ||
210 | Use the dead data page as temporary storage for delta q */ | ||
211 | dq = (u8 *)ptrs[faila]; | ||
212 | ptrs[faila] = (void *)raid6_empty_zero_page; | ||
213 | ptrs[disks-1] = dq; | ||
214 | |||
215 | raid6_call.gen_syndrome(disks, bytes, ptrs); | ||
216 | |||
217 | /* Restore pointer table */ | ||
218 | ptrs[faila] = dq; | ||
219 | ptrs[disks-1] = q; | ||
220 | |||
221 | /* Now, pick the proper data tables */ | ||
222 | qmul = raid6_vgfmul[raid6_gfinv[raid6_gfexp[faila]]]; | ||
223 | |||
224 | kernel_fpu_begin(); | ||
225 | |||
226 | asm volatile("movdqa %0, %%xmm7" : : "m" (x0f[0])); | ||
227 | |||
228 | while (bytes) { | ||
229 | #ifdef CONFIG_X86_64 | ||
230 | asm volatile("movdqa %0, %%xmm3" : : "m" (dq[0])); | ||
231 | asm volatile("movdqa %0, %%xmm4" : : "m" (dq[16])); | ||
232 | asm volatile("pxor %0, %%xmm3" : : "m" (q[0])); | ||
233 | asm volatile("movdqa %0, %%xmm0" : : "m" (qmul[0])); | ||
234 | |||
235 | /* xmm3 = q[0] ^ dq[0] */ | ||
236 | |||
237 | asm volatile("pxor %0, %%xmm4" : : "m" (q[16])); | ||
238 | asm volatile("movdqa %0, %%xmm1" : : "m" (qmul[16])); | ||
239 | |||
240 | /* xmm4 = q[16] ^ dq[16] */ | ||
241 | |||
242 | asm volatile("movdqa %xmm3, %xmm6"); | ||
243 | asm volatile("movdqa %xmm4, %xmm8"); | ||
244 | |||
245 | /* xmm4 = xmm8 = q[16] ^ dq[16] */ | ||
246 | |||
247 | asm volatile("psraw $4, %xmm3"); | ||
248 | asm volatile("pand %xmm7, %xmm6"); | ||
249 | asm volatile("pand %xmm7, %xmm3"); | ||
250 | asm volatile("pshufb %xmm6, %xmm0"); | ||
251 | asm volatile("pshufb %xmm3, %xmm1"); | ||
252 | asm volatile("movdqa %0, %%xmm10" : : "m" (qmul[0])); | ||
253 | asm volatile("pxor %xmm0, %xmm1"); | ||
254 | asm volatile("movdqa %0, %%xmm11" : : "m" (qmul[16])); | ||
255 | |||
256 | /* xmm1 = qmul[q[0] ^ dq[0]] */ | ||
257 | |||
258 | asm volatile("psraw $4, %xmm4"); | ||
259 | asm volatile("pand %xmm7, %xmm8"); | ||
260 | asm volatile("pand %xmm7, %xmm4"); | ||
261 | asm volatile("pshufb %xmm8, %xmm10"); | ||
262 | asm volatile("pshufb %xmm4, %xmm11"); | ||
263 | asm volatile("movdqa %0, %%xmm2" : : "m" (p[0])); | ||
264 | asm volatile("pxor %xmm10, %xmm11"); | ||
265 | asm volatile("movdqa %0, %%xmm12" : : "m" (p[16])); | ||
266 | |||
267 | /* xmm11 = qmul[q[16] ^ dq[16]] */ | ||
268 | |||
269 | asm volatile("pxor %xmm1, %xmm2"); | ||
270 | |||
271 | /* xmm2 = p[0] ^ qmul[q[0] ^ dq[0]] */ | ||
272 | |||
273 | asm volatile("pxor %xmm11, %xmm12"); | ||
274 | |||
275 | /* xmm12 = p[16] ^ qmul[q[16] ^ dq[16]] */ | ||
276 | |||
277 | asm volatile("movdqa %%xmm1, %0" : "=m" (dq[0])); | ||
278 | asm volatile("movdqa %%xmm11, %0" : "=m" (dq[16])); | ||
279 | |||
280 | asm volatile("movdqa %%xmm2, %0" : "=m" (p[0])); | ||
281 | asm volatile("movdqa %%xmm12, %0" : "=m" (p[16])); | ||
282 | |||
283 | bytes -= 32; | ||
284 | p += 32; | ||
285 | q += 32; | ||
286 | dq += 32; | ||
287 | |||
288 | #else | ||
289 | asm volatile("movdqa %0, %%xmm3" : : "m" (dq[0])); | ||
290 | asm volatile("movdqa %0, %%xmm0" : : "m" (qmul[0])); | ||
291 | asm volatile("pxor %0, %%xmm3" : : "m" (q[0])); | ||
292 | asm volatile("movdqa %0, %%xmm1" : : "m" (qmul[16])); | ||
293 | |||
294 | /* xmm3 = *q ^ *dq */ | ||
295 | |||
296 | asm volatile("movdqa %xmm3, %xmm6"); | ||
297 | asm volatile("movdqa %0, %%xmm2" : : "m" (p[0])); | ||
298 | asm volatile("psraw $4, %xmm3"); | ||
299 | asm volatile("pand %xmm7, %xmm6"); | ||
300 | asm volatile("pand %xmm7, %xmm3"); | ||
301 | asm volatile("pshufb %xmm6, %xmm0"); | ||
302 | asm volatile("pshufb %xmm3, %xmm1"); | ||
303 | asm volatile("pxor %xmm0, %xmm1"); | ||
304 | |||
305 | /* xmm1 = qmul[*q ^ *dq */ | ||
306 | |||
307 | asm volatile("pxor %xmm1, %xmm2"); | ||
308 | |||
309 | /* xmm2 = *p ^ qmul[*q ^ *dq] */ | ||
310 | |||
311 | asm volatile("movdqa %%xmm1, %0" : "=m" (dq[0])); | ||
312 | asm volatile("movdqa %%xmm2, %0" : "=m" (p[0])); | ||
313 | |||
314 | bytes -= 16; | ||
315 | p += 16; | ||
316 | q += 16; | ||
317 | dq += 16; | ||
318 | #endif | ||
319 | } | ||
320 | |||
321 | kernel_fpu_end(); | ||
322 | } | ||
323 | |||
324 | const struct raid6_recov_calls raid6_recov_ssse3 = { | ||
325 | .data2 = raid6_2data_recov_ssse3, | ||
326 | .datap = raid6_datap_recov_ssse3, | ||
327 | .valid = raid6_has_ssse3, | ||
328 | #ifdef CONFIG_X86_64 | ||
329 | .name = "ssse3x2", | ||
330 | #else | ||
331 | .name = "ssse3x1", | ||
332 | #endif | ||
333 | .priority = 1, | ||
334 | }; | ||
335 | |||
336 | #endif | ||
diff --git a/lib/raid6/test/Makefile b/lib/raid6/test/Makefile index aa651697b6dc..c76151d94764 100644 --- a/lib/raid6/test/Makefile +++ b/lib/raid6/test/Makefile | |||
@@ -23,7 +23,7 @@ RANLIB = ranlib | |||
23 | all: raid6.a raid6test | 23 | all: raid6.a raid6test |
24 | 24 | ||
25 | raid6.a: int1.o int2.o int4.o int8.o int16.o int32.o mmx.o sse1.o sse2.o \ | 25 | raid6.a: int1.o int2.o int4.o int8.o int16.o int32.o mmx.o sse1.o sse2.o \ |
26 | altivec1.o altivec2.o altivec4.o altivec8.o recov.o algos.o \ | 26 | altivec1.o altivec2.o altivec4.o altivec8.o recov.o recov_ssse3.o algos.o \ |
27 | tables.o | 27 | tables.o |
28 | rm -f $@ | 28 | rm -f $@ |
29 | $(AR) cq $@ $^ | 29 | $(AR) cq $@ $^ |
diff --git a/lib/raid6/test/test.c b/lib/raid6/test/test.c index 7a930318b17d..5a485b7a7d3c 100644 --- a/lib/raid6/test/test.c +++ b/lib/raid6/test/test.c | |||
@@ -90,25 +90,35 @@ static int test_disks(int i, int j) | |||
90 | int main(int argc, char *argv[]) | 90 | int main(int argc, char *argv[]) |
91 | { | 91 | { |
92 | const struct raid6_calls *const *algo; | 92 | const struct raid6_calls *const *algo; |
93 | const struct raid6_recov_calls *const *ra; | ||
93 | int i, j; | 94 | int i, j; |
94 | int err = 0; | 95 | int err = 0; |
95 | 96 | ||
96 | makedata(); | 97 | makedata(); |
97 | 98 | ||
98 | for (algo = raid6_algos; *algo; algo++) { | 99 | for (ra = raid6_recov_algos; *ra; ra++) { |
99 | if (!(*algo)->valid || (*algo)->valid()) { | 100 | if ((*ra)->valid && !(*ra)->valid()) |
100 | raid6_call = **algo; | 101 | continue; |
102 | raid6_2data_recov = (*ra)->data2; | ||
103 | raid6_datap_recov = (*ra)->datap; | ||
101 | 104 | ||
102 | /* Nuke syndromes */ | 105 | printf("using recovery %s\n", (*ra)->name); |
103 | memset(data[NDISKS-2], 0xee, 2*PAGE_SIZE); | ||
104 | 106 | ||
105 | /* Generate assumed good syndrome */ | 107 | for (algo = raid6_algos; *algo; algo++) { |
106 | raid6_call.gen_syndrome(NDISKS, PAGE_SIZE, | 108 | if (!(*algo)->valid || (*algo)->valid()) { |
107 | (void **)&dataptrs); | 109 | raid6_call = **algo; |
108 | 110 | ||
109 | for (i = 0; i < NDISKS-1; i++) | 111 | /* Nuke syndromes */ |
110 | for (j = i+1; j < NDISKS; j++) | 112 | memset(data[NDISKS-2], 0xee, 2*PAGE_SIZE); |
111 | err += test_disks(i, j); | 113 | |
114 | /* Generate assumed good syndrome */ | ||
115 | raid6_call.gen_syndrome(NDISKS, PAGE_SIZE, | ||
116 | (void **)&dataptrs); | ||
117 | |||
118 | for (i = 0; i < NDISKS-1; i++) | ||
119 | for (j = i+1; j < NDISKS; j++) | ||
120 | err += test_disks(i, j); | ||
121 | } | ||
112 | } | 122 | } |
113 | printf("\n"); | 123 | printf("\n"); |
114 | } | 124 | } |
diff --git a/lib/raid6/x86.h b/lib/raid6/x86.h index cb2a8c91c886..d55d63232c55 100644 --- a/lib/raid6/x86.h +++ b/lib/raid6/x86.h | |||
@@ -35,24 +35,29 @@ static inline void kernel_fpu_end(void) | |||
35 | { | 35 | { |
36 | } | 36 | } |
37 | 37 | ||
38 | #define __aligned(x) __attribute__((aligned(x))) | ||
39 | |||
38 | #define X86_FEATURE_MMX (0*32+23) /* Multimedia Extensions */ | 40 | #define X86_FEATURE_MMX (0*32+23) /* Multimedia Extensions */ |
39 | #define X86_FEATURE_FXSR (0*32+24) /* FXSAVE and FXRSTOR instructions | 41 | #define X86_FEATURE_FXSR (0*32+24) /* FXSAVE and FXRSTOR instructions |
40 | * (fast save and restore) */ | 42 | * (fast save and restore) */ |
41 | #define X86_FEATURE_XMM (0*32+25) /* Streaming SIMD Extensions */ | 43 | #define X86_FEATURE_XMM (0*32+25) /* Streaming SIMD Extensions */ |
42 | #define X86_FEATURE_XMM2 (0*32+26) /* Streaming SIMD Extensions-2 */ | 44 | #define X86_FEATURE_XMM2 (0*32+26) /* Streaming SIMD Extensions-2 */ |
45 | #define X86_FEATURE_XMM3 (4*32+ 0) /* "pni" SSE-3 */ | ||
46 | #define X86_FEATURE_SSSE3 (4*32+ 9) /* Supplemental SSE-3 */ | ||
47 | #define X86_FEATURE_AVX (4*32+28) /* Advanced Vector Extensions */ | ||
43 | #define X86_FEATURE_MMXEXT (1*32+22) /* AMD MMX extensions */ | 48 | #define X86_FEATURE_MMXEXT (1*32+22) /* AMD MMX extensions */ |
44 | 49 | ||
45 | /* Should work well enough on modern CPUs for testing */ | 50 | /* Should work well enough on modern CPUs for testing */ |
46 | static inline int boot_cpu_has(int flag) | 51 | static inline int boot_cpu_has(int flag) |
47 | { | 52 | { |
48 | u32 eax = (flag >> 5) ? 0x80000001 : 1; | 53 | u32 eax = (flag & 0x20) ? 0x80000001 : 1; |
49 | u32 edx; | 54 | u32 ecx, edx; |
50 | 55 | ||
51 | asm volatile("cpuid" | 56 | asm volatile("cpuid" |
52 | : "+a" (eax), "=d" (edx) | 57 | : "+a" (eax), "=d" (edx), "=c" (ecx) |
53 | : : "ecx", "ebx"); | 58 | : : "ebx"); |
54 | 59 | ||
55 | return (edx >> (flag & 31)) & 1; | 60 | return ((flag & 0x80 ? ecx : edx) >> (flag & 31)) & 1; |
56 | } | 61 | } |
57 | 62 | ||
58 | #endif /* ndef __KERNEL__ */ | 63 | #endif /* ndef __KERNEL__ */ |
diff --git a/lib/random32.c b/lib/random32.c index fc3545a32771..938bde5876ac 100644 --- a/lib/random32.c +++ b/lib/random32.c | |||
@@ -35,7 +35,7 @@ | |||
35 | 35 | ||
36 | #include <linux/types.h> | 36 | #include <linux/types.h> |
37 | #include <linux/percpu.h> | 37 | #include <linux/percpu.h> |
38 | #include <linux/module.h> | 38 | #include <linux/export.h> |
39 | #include <linux/jiffies.h> | 39 | #include <linux/jiffies.h> |
40 | #include <linux/random.h> | 40 | #include <linux/random.h> |
41 | 41 | ||
diff --git a/lib/ratelimit.c b/lib/ratelimit.c index 027a03f4c56d..40e03ea2a967 100644 --- a/lib/ratelimit.c +++ b/lib/ratelimit.c | |||
@@ -11,7 +11,7 @@ | |||
11 | 11 | ||
12 | #include <linux/ratelimit.h> | 12 | #include <linux/ratelimit.h> |
13 | #include <linux/jiffies.h> | 13 | #include <linux/jiffies.h> |
14 | #include <linux/module.h> | 14 | #include <linux/export.h> |
15 | 15 | ||
16 | /* | 16 | /* |
17 | * __ratelimit - rate limiting | 17 | * __ratelimit - rate limiting |
@@ -39,7 +39,7 @@ int ___ratelimit(struct ratelimit_state *rs, const char *func) | |||
39 | * in addition to the one that will be printed by | 39 | * in addition to the one that will be printed by |
40 | * the entity that is holding the lock already: | 40 | * the entity that is holding the lock already: |
41 | */ | 41 | */ |
42 | if (!spin_trylock_irqsave(&rs->lock, flags)) | 42 | if (!raw_spin_trylock_irqsave(&rs->lock, flags)) |
43 | return 0; | 43 | return 0; |
44 | 44 | ||
45 | if (!rs->begin) | 45 | if (!rs->begin) |
@@ -60,7 +60,7 @@ int ___ratelimit(struct ratelimit_state *rs, const char *func) | |||
60 | rs->missed++; | 60 | rs->missed++; |
61 | ret = 0; | 61 | ret = 0; |
62 | } | 62 | } |
63 | spin_unlock_irqrestore(&rs->lock, flags); | 63 | raw_spin_unlock_irqrestore(&rs->lock, flags); |
64 | 64 | ||
65 | return ret; | 65 | return ret; |
66 | } | 66 | } |
diff --git a/lib/rational.c b/lib/rational.c index 3ed247b80662..f0aa21c2a762 100644 --- a/lib/rational.c +++ b/lib/rational.c | |||
@@ -1,13 +1,14 @@ | |||
1 | /* | 1 | /* |
2 | * rational fractions | 2 | * rational fractions |
3 | * | 3 | * |
4 | * Copyright (C) 2009 emlix GmbH, Oskar Schirmer <os@emlix.com> | 4 | * Copyright (C) 2009 emlix GmbH, Oskar Schirmer <oskar@scara.com> |
5 | * | 5 | * |
6 | * helper functions when coping with rational numbers | 6 | * helper functions when coping with rational numbers |
7 | */ | 7 | */ |
8 | 8 | ||
9 | #include <linux/rational.h> | 9 | #include <linux/rational.h> |
10 | #include <linux/module.h> | 10 | #include <linux/compiler.h> |
11 | #include <linux/export.h> | ||
11 | 12 | ||
12 | /* | 13 | /* |
13 | * calculate best rational approximation for a given fraction | 14 | * calculate best rational approximation for a given fraction |
diff --git a/lib/rbtree.c b/lib/rbtree.c index a16be19a1305..4f56a11d67fa 100644 --- a/lib/rbtree.c +++ b/lib/rbtree.c | |||
@@ -2,7 +2,8 @@ | |||
2 | Red Black Trees | 2 | Red Black Trees |
3 | (C) 1999 Andrea Arcangeli <andrea@suse.de> | 3 | (C) 1999 Andrea Arcangeli <andrea@suse.de> |
4 | (C) 2002 David Woodhouse <dwmw2@infradead.org> | 4 | (C) 2002 David Woodhouse <dwmw2@infradead.org> |
5 | 5 | (C) 2012 Michel Lespinasse <walken@google.com> | |
6 | |||
6 | This program is free software; you can redistribute it and/or modify | 7 | This program is free software; you can redistribute it and/or modify |
7 | it under the terms of the GNU General Public License as published by | 8 | it under the terms of the GNU General Public License as published by |
8 | the Free Software Foundation; either version 2 of the License, or | 9 | the Free Software Foundation; either version 2 of the License, or |
@@ -20,339 +21,382 @@ | |||
20 | linux/lib/rbtree.c | 21 | linux/lib/rbtree.c |
21 | */ | 22 | */ |
22 | 23 | ||
23 | #include <linux/rbtree.h> | 24 | #include <linux/rbtree_augmented.h> |
24 | #include <linux/module.h> | 25 | #include <linux/export.h> |
25 | |||
26 | static void __rb_rotate_left(struct rb_node *node, struct rb_root *root) | ||
27 | { | ||
28 | struct rb_node *right = node->rb_right; | ||
29 | struct rb_node *parent = rb_parent(node); | ||
30 | |||
31 | if ((node->rb_right = right->rb_left)) | ||
32 | rb_set_parent(right->rb_left, node); | ||
33 | right->rb_left = node; | ||
34 | 26 | ||
35 | rb_set_parent(right, parent); | 27 | /* |
28 | * red-black trees properties: http://en.wikipedia.org/wiki/Rbtree | ||
29 | * | ||
30 | * 1) A node is either red or black | ||
31 | * 2) The root is black | ||
32 | * 3) All leaves (NULL) are black | ||
33 | * 4) Both children of every red node are black | ||
34 | * 5) Every simple path from root to leaves contains the same number | ||
35 | * of black nodes. | ||
36 | * | ||
37 | * 4 and 5 give the O(log n) guarantee, since 4 implies you cannot have two | ||
38 | * consecutive red nodes in a path and every red node is therefore followed by | ||
39 | * a black. So if B is the number of black nodes on every simple path (as per | ||
40 | * 5), then the longest possible path due to 4 is 2B. | ||
41 | * | ||
42 | * We shall indicate color with case, where black nodes are uppercase and red | ||
43 | * nodes will be lowercase. Unknown color nodes shall be drawn as red within | ||
44 | * parentheses and have some accompanying text comment. | ||
45 | */ | ||
36 | 46 | ||
37 | if (parent) | 47 | static inline void rb_set_black(struct rb_node *rb) |
38 | { | 48 | { |
39 | if (node == parent->rb_left) | 49 | rb->__rb_parent_color |= RB_BLACK; |
40 | parent->rb_left = right; | ||
41 | else | ||
42 | parent->rb_right = right; | ||
43 | } | ||
44 | else | ||
45 | root->rb_node = right; | ||
46 | rb_set_parent(node, right); | ||
47 | } | 50 | } |
48 | 51 | ||
49 | static void __rb_rotate_right(struct rb_node *node, struct rb_root *root) | 52 | static inline struct rb_node *rb_red_parent(struct rb_node *red) |
50 | { | 53 | { |
51 | struct rb_node *left = node->rb_left; | 54 | return (struct rb_node *)red->__rb_parent_color; |
52 | struct rb_node *parent = rb_parent(node); | 55 | } |
53 | |||
54 | if ((node->rb_left = left->rb_right)) | ||
55 | rb_set_parent(left->rb_right, node); | ||
56 | left->rb_right = node; | ||
57 | |||
58 | rb_set_parent(left, parent); | ||
59 | 56 | ||
60 | if (parent) | 57 | /* |
61 | { | 58 | * Helper function for rotations: |
62 | if (node == parent->rb_right) | 59 | * - old's parent and color get assigned to new |
63 | parent->rb_right = left; | 60 | * - old gets assigned new as a parent and 'color' as a color. |
64 | else | 61 | */ |
65 | parent->rb_left = left; | 62 | static inline void |
66 | } | 63 | __rb_rotate_set_parents(struct rb_node *old, struct rb_node *new, |
67 | else | 64 | struct rb_root *root, int color) |
68 | root->rb_node = left; | 65 | { |
69 | rb_set_parent(node, left); | 66 | struct rb_node *parent = rb_parent(old); |
67 | new->__rb_parent_color = old->__rb_parent_color; | ||
68 | rb_set_parent_color(old, new, color); | ||
69 | __rb_change_child(old, new, parent, root); | ||
70 | } | 70 | } |
71 | 71 | ||
72 | void rb_insert_color(struct rb_node *node, struct rb_root *root) | 72 | static __always_inline void |
73 | __rb_insert(struct rb_node *node, struct rb_root *root, | ||
74 | void (*augment_rotate)(struct rb_node *old, struct rb_node *new)) | ||
73 | { | 75 | { |
74 | struct rb_node *parent, *gparent; | 76 | struct rb_node *parent = rb_red_parent(node), *gparent, *tmp; |
75 | 77 | ||
76 | while ((parent = rb_parent(node)) && rb_is_red(parent)) | 78 | while (true) { |
77 | { | 79 | /* |
78 | gparent = rb_parent(parent); | 80 | * Loop invariant: node is red |
79 | 81 | * | |
80 | if (parent == gparent->rb_left) | 82 | * If there is a black parent, we are done. |
81 | { | 83 | * Otherwise, take some corrective action as we don't |
82 | { | 84 | * want a red root or two consecutive red nodes. |
83 | register struct rb_node *uncle = gparent->rb_right; | 85 | */ |
84 | if (uncle && rb_is_red(uncle)) | 86 | if (!parent) { |
85 | { | 87 | rb_set_parent_color(node, NULL, RB_BLACK); |
86 | rb_set_black(uncle); | 88 | break; |
87 | rb_set_black(parent); | 89 | } else if (rb_is_black(parent)) |
88 | rb_set_red(gparent); | 90 | break; |
89 | node = gparent; | 91 | |
90 | continue; | 92 | gparent = rb_red_parent(parent); |
91 | } | 93 | |
94 | tmp = gparent->rb_right; | ||
95 | if (parent != tmp) { /* parent == gparent->rb_left */ | ||
96 | if (tmp && rb_is_red(tmp)) { | ||
97 | /* | ||
98 | * Case 1 - color flips | ||
99 | * | ||
100 | * G g | ||
101 | * / \ / \ | ||
102 | * p u --> P U | ||
103 | * / / | ||
104 | * n N | ||
105 | * | ||
106 | * However, since g's parent might be red, and | ||
107 | * 4) does not allow this, we need to recurse | ||
108 | * at g. | ||
109 | */ | ||
110 | rb_set_parent_color(tmp, gparent, RB_BLACK); | ||
111 | rb_set_parent_color(parent, gparent, RB_BLACK); | ||
112 | node = gparent; | ||
113 | parent = rb_parent(node); | ||
114 | rb_set_parent_color(node, parent, RB_RED); | ||
115 | continue; | ||
92 | } | 116 | } |
93 | 117 | ||
94 | if (parent->rb_right == node) | 118 | tmp = parent->rb_right; |
95 | { | 119 | if (node == tmp) { |
96 | register struct rb_node *tmp; | 120 | /* |
97 | __rb_rotate_left(parent, root); | 121 | * Case 2 - left rotate at parent |
98 | tmp = parent; | 122 | * |
123 | * G G | ||
124 | * / \ / \ | ||
125 | * p U --> n U | ||
126 | * \ / | ||
127 | * n p | ||
128 | * | ||
129 | * This still leaves us in violation of 4), the | ||
130 | * continuation into Case 3 will fix that. | ||
131 | */ | ||
132 | parent->rb_right = tmp = node->rb_left; | ||
133 | node->rb_left = parent; | ||
134 | if (tmp) | ||
135 | rb_set_parent_color(tmp, parent, | ||
136 | RB_BLACK); | ||
137 | rb_set_parent_color(parent, node, RB_RED); | ||
138 | augment_rotate(parent, node); | ||
99 | parent = node; | 139 | parent = node; |
100 | node = tmp; | 140 | tmp = node->rb_right; |
101 | } | 141 | } |
102 | 142 | ||
103 | rb_set_black(parent); | 143 | /* |
104 | rb_set_red(gparent); | 144 | * Case 3 - right rotate at gparent |
105 | __rb_rotate_right(gparent, root); | 145 | * |
146 | * G P | ||
147 | * / \ / \ | ||
148 | * p U --> n g | ||
149 | * / \ | ||
150 | * n U | ||
151 | */ | ||
152 | gparent->rb_left = tmp; /* == parent->rb_right */ | ||
153 | parent->rb_right = gparent; | ||
154 | if (tmp) | ||
155 | rb_set_parent_color(tmp, gparent, RB_BLACK); | ||
156 | __rb_rotate_set_parents(gparent, parent, root, RB_RED); | ||
157 | augment_rotate(gparent, parent); | ||
158 | break; | ||
106 | } else { | 159 | } else { |
107 | { | 160 | tmp = gparent->rb_left; |
108 | register struct rb_node *uncle = gparent->rb_left; | 161 | if (tmp && rb_is_red(tmp)) { |
109 | if (uncle && rb_is_red(uncle)) | 162 | /* Case 1 - color flips */ |
110 | { | 163 | rb_set_parent_color(tmp, gparent, RB_BLACK); |
111 | rb_set_black(uncle); | 164 | rb_set_parent_color(parent, gparent, RB_BLACK); |
112 | rb_set_black(parent); | 165 | node = gparent; |
113 | rb_set_red(gparent); | 166 | parent = rb_parent(node); |
114 | node = gparent; | 167 | rb_set_parent_color(node, parent, RB_RED); |
115 | continue; | 168 | continue; |
116 | } | ||
117 | } | 169 | } |
118 | 170 | ||
119 | if (parent->rb_left == node) | 171 | tmp = parent->rb_left; |
120 | { | 172 | if (node == tmp) { |
121 | register struct rb_node *tmp; | 173 | /* Case 2 - right rotate at parent */ |
122 | __rb_rotate_right(parent, root); | 174 | parent->rb_left = tmp = node->rb_right; |
123 | tmp = parent; | 175 | node->rb_right = parent; |
176 | if (tmp) | ||
177 | rb_set_parent_color(tmp, parent, | ||
178 | RB_BLACK); | ||
179 | rb_set_parent_color(parent, node, RB_RED); | ||
180 | augment_rotate(parent, node); | ||
124 | parent = node; | 181 | parent = node; |
125 | node = tmp; | 182 | tmp = node->rb_left; |
126 | } | 183 | } |
127 | 184 | ||
128 | rb_set_black(parent); | 185 | /* Case 3 - left rotate at gparent */ |
129 | rb_set_red(gparent); | 186 | gparent->rb_right = tmp; /* == parent->rb_left */ |
130 | __rb_rotate_left(gparent, root); | 187 | parent->rb_left = gparent; |
188 | if (tmp) | ||
189 | rb_set_parent_color(tmp, gparent, RB_BLACK); | ||
190 | __rb_rotate_set_parents(gparent, parent, root, RB_RED); | ||
191 | augment_rotate(gparent, parent); | ||
192 | break; | ||
131 | } | 193 | } |
132 | } | 194 | } |
133 | |||
134 | rb_set_black(root->rb_node); | ||
135 | } | 195 | } |
136 | EXPORT_SYMBOL(rb_insert_color); | ||
137 | 196 | ||
138 | static void __rb_erase_color(struct rb_node *node, struct rb_node *parent, | 197 | __always_inline void |
139 | struct rb_root *root) | 198 | __rb_erase_color(struct rb_node *parent, struct rb_root *root, |
199 | void (*augment_rotate)(struct rb_node *old, struct rb_node *new)) | ||
140 | { | 200 | { |
141 | struct rb_node *other; | 201 | struct rb_node *node = NULL, *sibling, *tmp1, *tmp2; |
142 | 202 | ||
143 | while ((!node || rb_is_black(node)) && node != root->rb_node) | 203 | while (true) { |
144 | { | 204 | /* |
145 | if (parent->rb_left == node) | 205 | * Loop invariants: |
146 | { | 206 | * - node is black (or NULL on first iteration) |
147 | other = parent->rb_right; | 207 | * - node is not the root (parent is not NULL) |
148 | if (rb_is_red(other)) | 208 | * - All leaf paths going through parent and node have a |
149 | { | 209 | * black node count that is 1 lower than other leaf paths. |
150 | rb_set_black(other); | 210 | */ |
151 | rb_set_red(parent); | 211 | sibling = parent->rb_right; |
152 | __rb_rotate_left(parent, root); | 212 | if (node != sibling) { /* node == parent->rb_left */ |
153 | other = parent->rb_right; | 213 | if (rb_is_red(sibling)) { |
214 | /* | ||
215 | * Case 1 - left rotate at parent | ||
216 | * | ||
217 | * P S | ||
218 | * / \ / \ | ||
219 | * N s --> p Sr | ||
220 | * / \ / \ | ||
221 | * Sl Sr N Sl | ||
222 | */ | ||
223 | parent->rb_right = tmp1 = sibling->rb_left; | ||
224 | sibling->rb_left = parent; | ||
225 | rb_set_parent_color(tmp1, parent, RB_BLACK); | ||
226 | __rb_rotate_set_parents(parent, sibling, root, | ||
227 | RB_RED); | ||
228 | augment_rotate(parent, sibling); | ||
229 | sibling = tmp1; | ||
154 | } | 230 | } |
155 | if ((!other->rb_left || rb_is_black(other->rb_left)) && | 231 | tmp1 = sibling->rb_right; |
156 | (!other->rb_right || rb_is_black(other->rb_right))) | 232 | if (!tmp1 || rb_is_black(tmp1)) { |
157 | { | 233 | tmp2 = sibling->rb_left; |
158 | rb_set_red(other); | 234 | if (!tmp2 || rb_is_black(tmp2)) { |
159 | node = parent; | 235 | /* |
160 | parent = rb_parent(node); | 236 | * Case 2 - sibling color flip |
161 | } | 237 | * (p could be either color here) |
162 | else | 238 | * |
163 | { | 239 | * (p) (p) |
164 | if (!other->rb_right || rb_is_black(other->rb_right)) | 240 | * / \ / \ |
165 | { | 241 | * N S --> N s |
166 | rb_set_black(other->rb_left); | 242 | * / \ / \ |
167 | rb_set_red(other); | 243 | * Sl Sr Sl Sr |
168 | __rb_rotate_right(other, root); | 244 | * |
169 | other = parent->rb_right; | 245 | * This leaves us violating 5) which |
246 | * can be fixed by flipping p to black | ||
247 | * if it was red, or by recursing at p. | ||
248 | * p is red when coming from Case 1. | ||
249 | */ | ||
250 | rb_set_parent_color(sibling, parent, | ||
251 | RB_RED); | ||
252 | if (rb_is_red(parent)) | ||
253 | rb_set_black(parent); | ||
254 | else { | ||
255 | node = parent; | ||
256 | parent = rb_parent(node); | ||
257 | if (parent) | ||
258 | continue; | ||
259 | } | ||
260 | break; | ||
170 | } | 261 | } |
171 | rb_set_color(other, rb_color(parent)); | 262 | /* |
172 | rb_set_black(parent); | 263 | * Case 3 - right rotate at sibling |
173 | rb_set_black(other->rb_right); | 264 | * (p could be either color here) |
174 | __rb_rotate_left(parent, root); | 265 | * |
175 | node = root->rb_node; | 266 | * (p) (p) |
176 | break; | 267 | * / \ / \ |
177 | } | 268 | * N S --> N Sl |
178 | } | 269 | * / \ \ |
179 | else | 270 | * sl Sr s |
180 | { | 271 | * \ |
181 | other = parent->rb_left; | 272 | * Sr |
182 | if (rb_is_red(other)) | 273 | */ |
183 | { | 274 | sibling->rb_left = tmp1 = tmp2->rb_right; |
184 | rb_set_black(other); | 275 | tmp2->rb_right = sibling; |
185 | rb_set_red(parent); | 276 | parent->rb_right = tmp2; |
186 | __rb_rotate_right(parent, root); | 277 | if (tmp1) |
187 | other = parent->rb_left; | 278 | rb_set_parent_color(tmp1, sibling, |
279 | RB_BLACK); | ||
280 | augment_rotate(sibling, tmp2); | ||
281 | tmp1 = sibling; | ||
282 | sibling = tmp2; | ||
188 | } | 283 | } |
189 | if ((!other->rb_left || rb_is_black(other->rb_left)) && | 284 | /* |
190 | (!other->rb_right || rb_is_black(other->rb_right))) | 285 | * Case 4 - left rotate at parent + color flips |
191 | { | 286 | * (p and sl could be either color here. |
192 | rb_set_red(other); | 287 | * After rotation, p becomes black, s acquires |
193 | node = parent; | 288 | * p's color, and sl keeps its color) |
194 | parent = rb_parent(node); | 289 | * |
290 | * (p) (s) | ||
291 | * / \ / \ | ||
292 | * N S --> P Sr | ||
293 | * / \ / \ | ||
294 | * (sl) sr N (sl) | ||
295 | */ | ||
296 | parent->rb_right = tmp2 = sibling->rb_left; | ||
297 | sibling->rb_left = parent; | ||
298 | rb_set_parent_color(tmp1, sibling, RB_BLACK); | ||
299 | if (tmp2) | ||
300 | rb_set_parent(tmp2, parent); | ||
301 | __rb_rotate_set_parents(parent, sibling, root, | ||
302 | RB_BLACK); | ||
303 | augment_rotate(parent, sibling); | ||
304 | break; | ||
305 | } else { | ||
306 | sibling = parent->rb_left; | ||
307 | if (rb_is_red(sibling)) { | ||
308 | /* Case 1 - right rotate at parent */ | ||
309 | parent->rb_left = tmp1 = sibling->rb_right; | ||
310 | sibling->rb_right = parent; | ||
311 | rb_set_parent_color(tmp1, parent, RB_BLACK); | ||
312 | __rb_rotate_set_parents(parent, sibling, root, | ||
313 | RB_RED); | ||
314 | augment_rotate(parent, sibling); | ||
315 | sibling = tmp1; | ||
195 | } | 316 | } |
196 | else | 317 | tmp1 = sibling->rb_left; |
197 | { | 318 | if (!tmp1 || rb_is_black(tmp1)) { |
198 | if (!other->rb_left || rb_is_black(other->rb_left)) | 319 | tmp2 = sibling->rb_right; |
199 | { | 320 | if (!tmp2 || rb_is_black(tmp2)) { |
200 | rb_set_black(other->rb_right); | 321 | /* Case 2 - sibling color flip */ |
201 | rb_set_red(other); | 322 | rb_set_parent_color(sibling, parent, |
202 | __rb_rotate_left(other, root); | 323 | RB_RED); |
203 | other = parent->rb_left; | 324 | if (rb_is_red(parent)) |
325 | rb_set_black(parent); | ||
326 | else { | ||
327 | node = parent; | ||
328 | parent = rb_parent(node); | ||
329 | if (parent) | ||
330 | continue; | ||
331 | } | ||
332 | break; | ||
204 | } | 333 | } |
205 | rb_set_color(other, rb_color(parent)); | 334 | /* Case 3 - right rotate at sibling */ |
206 | rb_set_black(parent); | 335 | sibling->rb_right = tmp1 = tmp2->rb_left; |
207 | rb_set_black(other->rb_left); | 336 | tmp2->rb_left = sibling; |
208 | __rb_rotate_right(parent, root); | 337 | parent->rb_left = tmp2; |
209 | node = root->rb_node; | 338 | if (tmp1) |
210 | break; | 339 | rb_set_parent_color(tmp1, sibling, |
340 | RB_BLACK); | ||
341 | augment_rotate(sibling, tmp2); | ||
342 | tmp1 = sibling; | ||
343 | sibling = tmp2; | ||
211 | } | 344 | } |
345 | /* Case 4 - left rotate at parent + color flips */ | ||
346 | parent->rb_left = tmp2 = sibling->rb_right; | ||
347 | sibling->rb_right = parent; | ||
348 | rb_set_parent_color(tmp1, sibling, RB_BLACK); | ||
349 | if (tmp2) | ||
350 | rb_set_parent(tmp2, parent); | ||
351 | __rb_rotate_set_parents(parent, sibling, root, | ||
352 | RB_BLACK); | ||
353 | augment_rotate(parent, sibling); | ||
354 | break; | ||
212 | } | 355 | } |
213 | } | 356 | } |
214 | if (node) | ||
215 | rb_set_black(node); | ||
216 | } | 357 | } |
358 | EXPORT_SYMBOL(__rb_erase_color); | ||
217 | 359 | ||
218 | void rb_erase(struct rb_node *node, struct rb_root *root) | 360 | /* |
219 | { | 361 | * Non-augmented rbtree manipulation functions. |
220 | struct rb_node *child, *parent; | 362 | * |
221 | int color; | 363 | * We use dummy augmented callbacks here, and have the compiler optimize them |
222 | 364 | * out of the rb_insert_color() and rb_erase() function definitions. | |
223 | if (!node->rb_left) | 365 | */ |
224 | child = node->rb_right; | ||
225 | else if (!node->rb_right) | ||
226 | child = node->rb_left; | ||
227 | else | ||
228 | { | ||
229 | struct rb_node *old = node, *left; | ||
230 | |||
231 | node = node->rb_right; | ||
232 | while ((left = node->rb_left) != NULL) | ||
233 | node = left; | ||
234 | |||
235 | if (rb_parent(old)) { | ||
236 | if (rb_parent(old)->rb_left == old) | ||
237 | rb_parent(old)->rb_left = node; | ||
238 | else | ||
239 | rb_parent(old)->rb_right = node; | ||
240 | } else | ||
241 | root->rb_node = node; | ||
242 | |||
243 | child = node->rb_right; | ||
244 | parent = rb_parent(node); | ||
245 | color = rb_color(node); | ||
246 | |||
247 | if (parent == old) { | ||
248 | parent = node; | ||
249 | } else { | ||
250 | if (child) | ||
251 | rb_set_parent(child, parent); | ||
252 | parent->rb_left = child; | ||
253 | |||
254 | node->rb_right = old->rb_right; | ||
255 | rb_set_parent(old->rb_right, node); | ||
256 | } | ||
257 | |||
258 | node->rb_parent_color = old->rb_parent_color; | ||
259 | node->rb_left = old->rb_left; | ||
260 | rb_set_parent(old->rb_left, node); | ||
261 | 366 | ||
262 | goto color; | 367 | static inline void dummy_propagate(struct rb_node *node, struct rb_node *stop) {} |
263 | } | 368 | static inline void dummy_copy(struct rb_node *old, struct rb_node *new) {} |
369 | static inline void dummy_rotate(struct rb_node *old, struct rb_node *new) {} | ||
264 | 370 | ||
265 | parent = rb_parent(node); | 371 | static const struct rb_augment_callbacks dummy_callbacks = { |
266 | color = rb_color(node); | 372 | dummy_propagate, dummy_copy, dummy_rotate |
267 | 373 | }; | |
268 | if (child) | ||
269 | rb_set_parent(child, parent); | ||
270 | if (parent) | ||
271 | { | ||
272 | if (parent->rb_left == node) | ||
273 | parent->rb_left = child; | ||
274 | else | ||
275 | parent->rb_right = child; | ||
276 | } | ||
277 | else | ||
278 | root->rb_node = child; | ||
279 | 374 | ||
280 | color: | 375 | void rb_insert_color(struct rb_node *node, struct rb_root *root) |
281 | if (color == RB_BLACK) | ||
282 | __rb_erase_color(child, parent, root); | ||
283 | } | ||
284 | EXPORT_SYMBOL(rb_erase); | ||
285 | |||
286 | static void rb_augment_path(struct rb_node *node, rb_augment_f func, void *data) | ||
287 | { | 376 | { |
288 | struct rb_node *parent; | 377 | __rb_insert(node, root, dummy_rotate); |
289 | |||
290 | up: | ||
291 | func(node, data); | ||
292 | parent = rb_parent(node); | ||
293 | if (!parent) | ||
294 | return; | ||
295 | |||
296 | if (node == parent->rb_left && parent->rb_right) | ||
297 | func(parent->rb_right, data); | ||
298 | else if (parent->rb_left) | ||
299 | func(parent->rb_left, data); | ||
300 | |||
301 | node = parent; | ||
302 | goto up; | ||
303 | } | 378 | } |
379 | EXPORT_SYMBOL(rb_insert_color); | ||
304 | 380 | ||
305 | /* | 381 | void rb_erase(struct rb_node *node, struct rb_root *root) |
306 | * after inserting @node into the tree, update the tree to account for | ||
307 | * both the new entry and any damage done by rebalance | ||
308 | */ | ||
309 | void rb_augment_insert(struct rb_node *node, rb_augment_f func, void *data) | ||
310 | { | 382 | { |
311 | if (node->rb_left) | 383 | rb_erase_augmented(node, root, &dummy_callbacks); |
312 | node = node->rb_left; | ||
313 | else if (node->rb_right) | ||
314 | node = node->rb_right; | ||
315 | |||
316 | rb_augment_path(node, func, data); | ||
317 | } | 384 | } |
318 | EXPORT_SYMBOL(rb_augment_insert); | 385 | EXPORT_SYMBOL(rb_erase); |
319 | 386 | ||
320 | /* | 387 | /* |
321 | * before removing the node, find the deepest node on the rebalance path | 388 | * Augmented rbtree manipulation functions. |
322 | * that will still be there after @node gets removed | 389 | * |
390 | * This instantiates the same __always_inline functions as in the non-augmented | ||
391 | * case, but this time with user-defined callbacks. | ||
323 | */ | 392 | */ |
324 | struct rb_node *rb_augment_erase_begin(struct rb_node *node) | ||
325 | { | ||
326 | struct rb_node *deepest; | ||
327 | |||
328 | if (!node->rb_right && !node->rb_left) | ||
329 | deepest = rb_parent(node); | ||
330 | else if (!node->rb_right) | ||
331 | deepest = node->rb_left; | ||
332 | else if (!node->rb_left) | ||
333 | deepest = node->rb_right; | ||
334 | else { | ||
335 | deepest = rb_next(node); | ||
336 | if (deepest->rb_right) | ||
337 | deepest = deepest->rb_right; | ||
338 | else if (rb_parent(deepest) != node) | ||
339 | deepest = rb_parent(deepest); | ||
340 | } | ||
341 | |||
342 | return deepest; | ||
343 | } | ||
344 | EXPORT_SYMBOL(rb_augment_erase_begin); | ||
345 | 393 | ||
346 | /* | 394 | void __rb_insert_augmented(struct rb_node *node, struct rb_root *root, |
347 | * after removal, update the tree to account for the removed entry | 395 | void (*augment_rotate)(struct rb_node *old, struct rb_node *new)) |
348 | * and any rebalance damage. | ||
349 | */ | ||
350 | void rb_augment_erase_end(struct rb_node *node, rb_augment_f func, void *data) | ||
351 | { | 396 | { |
352 | if (node) | 397 | __rb_insert(node, root, augment_rotate); |
353 | rb_augment_path(node, func, data); | ||
354 | } | 398 | } |
355 | EXPORT_SYMBOL(rb_augment_erase_end); | 399 | EXPORT_SYMBOL(__rb_insert_augmented); |
356 | 400 | ||
357 | /* | 401 | /* |
358 | * This function returns the first node (in sort order) of the tree. | 402 | * This function returns the first node (in sort order) of the tree. |
@@ -387,11 +431,13 @@ struct rb_node *rb_next(const struct rb_node *node) | |||
387 | { | 431 | { |
388 | struct rb_node *parent; | 432 | struct rb_node *parent; |
389 | 433 | ||
390 | if (rb_parent(node) == node) | 434 | if (RB_EMPTY_NODE(node)) |
391 | return NULL; | 435 | return NULL; |
392 | 436 | ||
393 | /* If we have a right-hand child, go down and then left as far | 437 | /* |
394 | as we can. */ | 438 | * If we have a right-hand child, go down and then left as far |
439 | * as we can. | ||
440 | */ | ||
395 | if (node->rb_right) { | 441 | if (node->rb_right) { |
396 | node = node->rb_right; | 442 | node = node->rb_right; |
397 | while (node->rb_left) | 443 | while (node->rb_left) |
@@ -399,12 +445,13 @@ struct rb_node *rb_next(const struct rb_node *node) | |||
399 | return (struct rb_node *)node; | 445 | return (struct rb_node *)node; |
400 | } | 446 | } |
401 | 447 | ||
402 | /* No right-hand children. Everything down and left is | 448 | /* |
403 | smaller than us, so any 'next' node must be in the general | 449 | * No right-hand children. Everything down and left is smaller than us, |
404 | direction of our parent. Go up the tree; any time the | 450 | * so any 'next' node must be in the general direction of our parent. |
405 | ancestor is a right-hand child of its parent, keep going | 451 | * Go up the tree; any time the ancestor is a right-hand child of its |
406 | up. First time it's a left-hand child of its parent, said | 452 | * parent, keep going up. First time it's a left-hand child of its |
407 | parent is our 'next' node. */ | 453 | * parent, said parent is our 'next' node. |
454 | */ | ||
408 | while ((parent = rb_parent(node)) && node == parent->rb_right) | 455 | while ((parent = rb_parent(node)) && node == parent->rb_right) |
409 | node = parent; | 456 | node = parent; |
410 | 457 | ||
@@ -416,11 +463,13 @@ struct rb_node *rb_prev(const struct rb_node *node) | |||
416 | { | 463 | { |
417 | struct rb_node *parent; | 464 | struct rb_node *parent; |
418 | 465 | ||
419 | if (rb_parent(node) == node) | 466 | if (RB_EMPTY_NODE(node)) |
420 | return NULL; | 467 | return NULL; |
421 | 468 | ||
422 | /* If we have a left-hand child, go down and then right as far | 469 | /* |
423 | as we can. */ | 470 | * If we have a left-hand child, go down and then right as far |
471 | * as we can. | ||
472 | */ | ||
424 | if (node->rb_left) { | 473 | if (node->rb_left) { |
425 | node = node->rb_left; | 474 | node = node->rb_left; |
426 | while (node->rb_right) | 475 | while (node->rb_right) |
@@ -428,8 +477,10 @@ struct rb_node *rb_prev(const struct rb_node *node) | |||
428 | return (struct rb_node *)node; | 477 | return (struct rb_node *)node; |
429 | } | 478 | } |
430 | 479 | ||
431 | /* No left-hand children. Go up till we find an ancestor which | 480 | /* |
432 | is a right-hand child of its parent */ | 481 | * No left-hand children. Go up till we find an ancestor which |
482 | * is a right-hand child of its parent. | ||
483 | */ | ||
433 | while ((parent = rb_parent(node)) && node == parent->rb_left) | 484 | while ((parent = rb_parent(node)) && node == parent->rb_left) |
434 | node = parent; | 485 | node = parent; |
435 | 486 | ||
@@ -443,14 +494,7 @@ void rb_replace_node(struct rb_node *victim, struct rb_node *new, | |||
443 | struct rb_node *parent = rb_parent(victim); | 494 | struct rb_node *parent = rb_parent(victim); |
444 | 495 | ||
445 | /* Set the surrounding nodes to point to the replacement */ | 496 | /* Set the surrounding nodes to point to the replacement */ |
446 | if (parent) { | 497 | __rb_change_child(victim, new, parent, root); |
447 | if (victim == parent->rb_left) | ||
448 | parent->rb_left = new; | ||
449 | else | ||
450 | parent->rb_right = new; | ||
451 | } else { | ||
452 | root->rb_node = new; | ||
453 | } | ||
454 | if (victim->rb_left) | 498 | if (victim->rb_left) |
455 | rb_set_parent(victim->rb_left, new); | 499 | rb_set_parent(victim->rb_left, new); |
456 | if (victim->rb_right) | 500 | if (victim->rb_right) |
diff --git a/lib/rbtree_test.c b/lib/rbtree_test.c new file mode 100644 index 000000000000..268b23951fec --- /dev/null +++ b/lib/rbtree_test.c | |||
@@ -0,0 +1,234 @@ | |||
1 | #include <linux/module.h> | ||
2 | #include <linux/rbtree_augmented.h> | ||
3 | #include <linux/random.h> | ||
4 | #include <asm/timex.h> | ||
5 | |||
6 | #define NODES 100 | ||
7 | #define PERF_LOOPS 100000 | ||
8 | #define CHECK_LOOPS 100 | ||
9 | |||
10 | struct test_node { | ||
11 | struct rb_node rb; | ||
12 | u32 key; | ||
13 | |||
14 | /* following fields used for testing augmented rbtree functionality */ | ||
15 | u32 val; | ||
16 | u32 augmented; | ||
17 | }; | ||
18 | |||
19 | static struct rb_root root = RB_ROOT; | ||
20 | static struct test_node nodes[NODES]; | ||
21 | |||
22 | static struct rnd_state rnd; | ||
23 | |||
24 | static void insert(struct test_node *node, struct rb_root *root) | ||
25 | { | ||
26 | struct rb_node **new = &root->rb_node, *parent = NULL; | ||
27 | u32 key = node->key; | ||
28 | |||
29 | while (*new) { | ||
30 | parent = *new; | ||
31 | if (key < rb_entry(parent, struct test_node, rb)->key) | ||
32 | new = &parent->rb_left; | ||
33 | else | ||
34 | new = &parent->rb_right; | ||
35 | } | ||
36 | |||
37 | rb_link_node(&node->rb, parent, new); | ||
38 | rb_insert_color(&node->rb, root); | ||
39 | } | ||
40 | |||
41 | static inline void erase(struct test_node *node, struct rb_root *root) | ||
42 | { | ||
43 | rb_erase(&node->rb, root); | ||
44 | } | ||
45 | |||
46 | static inline u32 augment_recompute(struct test_node *node) | ||
47 | { | ||
48 | u32 max = node->val, child_augmented; | ||
49 | if (node->rb.rb_left) { | ||
50 | child_augmented = rb_entry(node->rb.rb_left, struct test_node, | ||
51 | rb)->augmented; | ||
52 | if (max < child_augmented) | ||
53 | max = child_augmented; | ||
54 | } | ||
55 | if (node->rb.rb_right) { | ||
56 | child_augmented = rb_entry(node->rb.rb_right, struct test_node, | ||
57 | rb)->augmented; | ||
58 | if (max < child_augmented) | ||
59 | max = child_augmented; | ||
60 | } | ||
61 | return max; | ||
62 | } | ||
63 | |||
64 | RB_DECLARE_CALLBACKS(static, augment_callbacks, struct test_node, rb, | ||
65 | u32, augmented, augment_recompute) | ||
66 | |||
67 | static void insert_augmented(struct test_node *node, struct rb_root *root) | ||
68 | { | ||
69 | struct rb_node **new = &root->rb_node, *rb_parent = NULL; | ||
70 | u32 key = node->key; | ||
71 | u32 val = node->val; | ||
72 | struct test_node *parent; | ||
73 | |||
74 | while (*new) { | ||
75 | rb_parent = *new; | ||
76 | parent = rb_entry(rb_parent, struct test_node, rb); | ||
77 | if (parent->augmented < val) | ||
78 | parent->augmented = val; | ||
79 | if (key < parent->key) | ||
80 | new = &parent->rb.rb_left; | ||
81 | else | ||
82 | new = &parent->rb.rb_right; | ||
83 | } | ||
84 | |||
85 | node->augmented = val; | ||
86 | rb_link_node(&node->rb, rb_parent, new); | ||
87 | rb_insert_augmented(&node->rb, root, &augment_callbacks); | ||
88 | } | ||
89 | |||
90 | static void erase_augmented(struct test_node *node, struct rb_root *root) | ||
91 | { | ||
92 | rb_erase_augmented(&node->rb, root, &augment_callbacks); | ||
93 | } | ||
94 | |||
95 | static void init(void) | ||
96 | { | ||
97 | int i; | ||
98 | for (i = 0; i < NODES; i++) { | ||
99 | nodes[i].key = prandom32(&rnd); | ||
100 | nodes[i].val = prandom32(&rnd); | ||
101 | } | ||
102 | } | ||
103 | |||
104 | static bool is_red(struct rb_node *rb) | ||
105 | { | ||
106 | return !(rb->__rb_parent_color & 1); | ||
107 | } | ||
108 | |||
109 | static int black_path_count(struct rb_node *rb) | ||
110 | { | ||
111 | int count; | ||
112 | for (count = 0; rb; rb = rb_parent(rb)) | ||
113 | count += !is_red(rb); | ||
114 | return count; | ||
115 | } | ||
116 | |||
117 | static void check(int nr_nodes) | ||
118 | { | ||
119 | struct rb_node *rb; | ||
120 | int count = 0; | ||
121 | int blacks; | ||
122 | u32 prev_key = 0; | ||
123 | |||
124 | for (rb = rb_first(&root); rb; rb = rb_next(rb)) { | ||
125 | struct test_node *node = rb_entry(rb, struct test_node, rb); | ||
126 | WARN_ON_ONCE(node->key < prev_key); | ||
127 | WARN_ON_ONCE(is_red(rb) && | ||
128 | (!rb_parent(rb) || is_red(rb_parent(rb)))); | ||
129 | if (!count) | ||
130 | blacks = black_path_count(rb); | ||
131 | else | ||
132 | WARN_ON_ONCE((!rb->rb_left || !rb->rb_right) && | ||
133 | blacks != black_path_count(rb)); | ||
134 | prev_key = node->key; | ||
135 | count++; | ||
136 | } | ||
137 | WARN_ON_ONCE(count != nr_nodes); | ||
138 | } | ||
139 | |||
140 | static void check_augmented(int nr_nodes) | ||
141 | { | ||
142 | struct rb_node *rb; | ||
143 | |||
144 | check(nr_nodes); | ||
145 | for (rb = rb_first(&root); rb; rb = rb_next(rb)) { | ||
146 | struct test_node *node = rb_entry(rb, struct test_node, rb); | ||
147 | WARN_ON_ONCE(node->augmented != augment_recompute(node)); | ||
148 | } | ||
149 | } | ||
150 | |||
151 | static int rbtree_test_init(void) | ||
152 | { | ||
153 | int i, j; | ||
154 | cycles_t time1, time2, time; | ||
155 | |||
156 | printk(KERN_ALERT "rbtree testing"); | ||
157 | |||
158 | prandom32_seed(&rnd, 3141592653589793238ULL); | ||
159 | init(); | ||
160 | |||
161 | time1 = get_cycles(); | ||
162 | |||
163 | for (i = 0; i < PERF_LOOPS; i++) { | ||
164 | for (j = 0; j < NODES; j++) | ||
165 | insert(nodes + j, &root); | ||
166 | for (j = 0; j < NODES; j++) | ||
167 | erase(nodes + j, &root); | ||
168 | } | ||
169 | |||
170 | time2 = get_cycles(); | ||
171 | time = time2 - time1; | ||
172 | |||
173 | time = div_u64(time, PERF_LOOPS); | ||
174 | printk(" -> %llu cycles\n", (unsigned long long)time); | ||
175 | |||
176 | for (i = 0; i < CHECK_LOOPS; i++) { | ||
177 | init(); | ||
178 | for (j = 0; j < NODES; j++) { | ||
179 | check(j); | ||
180 | insert(nodes + j, &root); | ||
181 | } | ||
182 | for (j = 0; j < NODES; j++) { | ||
183 | check(NODES - j); | ||
184 | erase(nodes + j, &root); | ||
185 | } | ||
186 | check(0); | ||
187 | } | ||
188 | |||
189 | printk(KERN_ALERT "augmented rbtree testing"); | ||
190 | |||
191 | init(); | ||
192 | |||
193 | time1 = get_cycles(); | ||
194 | |||
195 | for (i = 0; i < PERF_LOOPS; i++) { | ||
196 | for (j = 0; j < NODES; j++) | ||
197 | insert_augmented(nodes + j, &root); | ||
198 | for (j = 0; j < NODES; j++) | ||
199 | erase_augmented(nodes + j, &root); | ||
200 | } | ||
201 | |||
202 | time2 = get_cycles(); | ||
203 | time = time2 - time1; | ||
204 | |||
205 | time = div_u64(time, PERF_LOOPS); | ||
206 | printk(" -> %llu cycles\n", (unsigned long long)time); | ||
207 | |||
208 | for (i = 0; i < CHECK_LOOPS; i++) { | ||
209 | init(); | ||
210 | for (j = 0; j < NODES; j++) { | ||
211 | check_augmented(j); | ||
212 | insert_augmented(nodes + j, &root); | ||
213 | } | ||
214 | for (j = 0; j < NODES; j++) { | ||
215 | check_augmented(NODES - j); | ||
216 | erase_augmented(nodes + j, &root); | ||
217 | } | ||
218 | check_augmented(0); | ||
219 | } | ||
220 | |||
221 | return -EAGAIN; /* Fail will directly unload the module */ | ||
222 | } | ||
223 | |||
224 | static void rbtree_test_exit(void) | ||
225 | { | ||
226 | printk(KERN_ALERT "test exit\n"); | ||
227 | } | ||
228 | |||
229 | module_init(rbtree_test_init) | ||
230 | module_exit(rbtree_test_exit) | ||
231 | |||
232 | MODULE_LICENSE("GPL"); | ||
233 | MODULE_AUTHOR("Michel Lespinasse"); | ||
234 | MODULE_DESCRIPTION("Red Black Tree test"); | ||
diff --git a/lib/reciprocal_div.c b/lib/reciprocal_div.c index 6a3bd48fa2a0..75510e94f7d0 100644 --- a/lib/reciprocal_div.c +++ b/lib/reciprocal_div.c | |||
@@ -1,5 +1,6 @@ | |||
1 | #include <asm/div64.h> | 1 | #include <asm/div64.h> |
2 | #include <linux/reciprocal_div.h> | 2 | #include <linux/reciprocal_div.h> |
3 | #include <linux/export.h> | ||
3 | 4 | ||
4 | u32 reciprocal_value(u32 k) | 5 | u32 reciprocal_value(u32 k) |
5 | { | 6 | { |
@@ -7,3 +8,4 @@ u32 reciprocal_value(u32 k) | |||
7 | do_div(val, k); | 8 | do_div(val, k); |
8 | return (u32)val; | 9 | return (u32)val; |
9 | } | 10 | } |
11 | EXPORT_SYMBOL(reciprocal_value); | ||
diff --git a/lib/rwsem-spinlock.c b/lib/rwsem-spinlock.c index ffc9fc7f3b05..7e0d6a58fc83 100644 --- a/lib/rwsem-spinlock.c +++ b/lib/rwsem-spinlock.c | |||
@@ -7,7 +7,7 @@ | |||
7 | */ | 7 | */ |
8 | #include <linux/rwsem.h> | 8 | #include <linux/rwsem.h> |
9 | #include <linux/sched.h> | 9 | #include <linux/sched.h> |
10 | #include <linux/module.h> | 10 | #include <linux/export.h> |
11 | 11 | ||
12 | struct rwsem_waiter { | 12 | struct rwsem_waiter { |
13 | struct list_head list; | 13 | struct list_head list; |
@@ -22,9 +22,9 @@ int rwsem_is_locked(struct rw_semaphore *sem) | |||
22 | int ret = 1; | 22 | int ret = 1; |
23 | unsigned long flags; | 23 | unsigned long flags; |
24 | 24 | ||
25 | if (spin_trylock_irqsave(&sem->wait_lock, flags)) { | 25 | if (raw_spin_trylock_irqsave(&sem->wait_lock, flags)) { |
26 | ret = (sem->activity != 0); | 26 | ret = (sem->activity != 0); |
27 | spin_unlock_irqrestore(&sem->wait_lock, flags); | 27 | raw_spin_unlock_irqrestore(&sem->wait_lock, flags); |
28 | } | 28 | } |
29 | return ret; | 29 | return ret; |
30 | } | 30 | } |
@@ -44,7 +44,7 @@ void __init_rwsem(struct rw_semaphore *sem, const char *name, | |||
44 | lockdep_init_map(&sem->dep_map, name, key, 0); | 44 | lockdep_init_map(&sem->dep_map, name, key, 0); |
45 | #endif | 45 | #endif |
46 | sem->activity = 0; | 46 | sem->activity = 0; |
47 | spin_lock_init(&sem->wait_lock); | 47 | raw_spin_lock_init(&sem->wait_lock); |
48 | INIT_LIST_HEAD(&sem->wait_list); | 48 | INIT_LIST_HEAD(&sem->wait_list); |
49 | } | 49 | } |
50 | EXPORT_SYMBOL(__init_rwsem); | 50 | EXPORT_SYMBOL(__init_rwsem); |
@@ -145,12 +145,12 @@ void __sched __down_read(struct rw_semaphore *sem) | |||
145 | struct task_struct *tsk; | 145 | struct task_struct *tsk; |
146 | unsigned long flags; | 146 | unsigned long flags; |
147 | 147 | ||
148 | spin_lock_irqsave(&sem->wait_lock, flags); | 148 | raw_spin_lock_irqsave(&sem->wait_lock, flags); |
149 | 149 | ||
150 | if (sem->activity >= 0 && list_empty(&sem->wait_list)) { | 150 | if (sem->activity >= 0 && list_empty(&sem->wait_list)) { |
151 | /* granted */ | 151 | /* granted */ |
152 | sem->activity++; | 152 | sem->activity++; |
153 | spin_unlock_irqrestore(&sem->wait_lock, flags); | 153 | raw_spin_unlock_irqrestore(&sem->wait_lock, flags); |
154 | goto out; | 154 | goto out; |
155 | } | 155 | } |
156 | 156 | ||
@@ -165,7 +165,7 @@ void __sched __down_read(struct rw_semaphore *sem) | |||
165 | list_add_tail(&waiter.list, &sem->wait_list); | 165 | list_add_tail(&waiter.list, &sem->wait_list); |
166 | 166 | ||
167 | /* we don't need to touch the semaphore struct anymore */ | 167 | /* we don't need to touch the semaphore struct anymore */ |
168 | spin_unlock_irqrestore(&sem->wait_lock, flags); | 168 | raw_spin_unlock_irqrestore(&sem->wait_lock, flags); |
169 | 169 | ||
170 | /* wait to be given the lock */ | 170 | /* wait to be given the lock */ |
171 | for (;;) { | 171 | for (;;) { |
@@ -189,7 +189,7 @@ int __down_read_trylock(struct rw_semaphore *sem) | |||
189 | int ret = 0; | 189 | int ret = 0; |
190 | 190 | ||
191 | 191 | ||
192 | spin_lock_irqsave(&sem->wait_lock, flags); | 192 | raw_spin_lock_irqsave(&sem->wait_lock, flags); |
193 | 193 | ||
194 | if (sem->activity >= 0 && list_empty(&sem->wait_list)) { | 194 | if (sem->activity >= 0 && list_empty(&sem->wait_list)) { |
195 | /* granted */ | 195 | /* granted */ |
@@ -197,7 +197,7 @@ int __down_read_trylock(struct rw_semaphore *sem) | |||
197 | ret = 1; | 197 | ret = 1; |
198 | } | 198 | } |
199 | 199 | ||
200 | spin_unlock_irqrestore(&sem->wait_lock, flags); | 200 | raw_spin_unlock_irqrestore(&sem->wait_lock, flags); |
201 | 201 | ||
202 | return ret; | 202 | return ret; |
203 | } | 203 | } |
@@ -212,12 +212,12 @@ void __sched __down_write_nested(struct rw_semaphore *sem, int subclass) | |||
212 | struct task_struct *tsk; | 212 | struct task_struct *tsk; |
213 | unsigned long flags; | 213 | unsigned long flags; |
214 | 214 | ||
215 | spin_lock_irqsave(&sem->wait_lock, flags); | 215 | raw_spin_lock_irqsave(&sem->wait_lock, flags); |
216 | 216 | ||
217 | if (sem->activity == 0 && list_empty(&sem->wait_list)) { | 217 | if (sem->activity == 0 && list_empty(&sem->wait_list)) { |
218 | /* granted */ | 218 | /* granted */ |
219 | sem->activity = -1; | 219 | sem->activity = -1; |
220 | spin_unlock_irqrestore(&sem->wait_lock, flags); | 220 | raw_spin_unlock_irqrestore(&sem->wait_lock, flags); |
221 | goto out; | 221 | goto out; |
222 | } | 222 | } |
223 | 223 | ||
@@ -232,7 +232,7 @@ void __sched __down_write_nested(struct rw_semaphore *sem, int subclass) | |||
232 | list_add_tail(&waiter.list, &sem->wait_list); | 232 | list_add_tail(&waiter.list, &sem->wait_list); |
233 | 233 | ||
234 | /* we don't need to touch the semaphore struct anymore */ | 234 | /* we don't need to touch the semaphore struct anymore */ |
235 | spin_unlock_irqrestore(&sem->wait_lock, flags); | 235 | raw_spin_unlock_irqrestore(&sem->wait_lock, flags); |
236 | 236 | ||
237 | /* wait to be given the lock */ | 237 | /* wait to be given the lock */ |
238 | for (;;) { | 238 | for (;;) { |
@@ -260,7 +260,7 @@ int __down_write_trylock(struct rw_semaphore *sem) | |||
260 | unsigned long flags; | 260 | unsigned long flags; |
261 | int ret = 0; | 261 | int ret = 0; |
262 | 262 | ||
263 | spin_lock_irqsave(&sem->wait_lock, flags); | 263 | raw_spin_lock_irqsave(&sem->wait_lock, flags); |
264 | 264 | ||
265 | if (sem->activity == 0 && list_empty(&sem->wait_list)) { | 265 | if (sem->activity == 0 && list_empty(&sem->wait_list)) { |
266 | /* granted */ | 266 | /* granted */ |
@@ -268,7 +268,7 @@ int __down_write_trylock(struct rw_semaphore *sem) | |||
268 | ret = 1; | 268 | ret = 1; |
269 | } | 269 | } |
270 | 270 | ||
271 | spin_unlock_irqrestore(&sem->wait_lock, flags); | 271 | raw_spin_unlock_irqrestore(&sem->wait_lock, flags); |
272 | 272 | ||
273 | return ret; | 273 | return ret; |
274 | } | 274 | } |
@@ -280,12 +280,12 @@ void __up_read(struct rw_semaphore *sem) | |||
280 | { | 280 | { |
281 | unsigned long flags; | 281 | unsigned long flags; |
282 | 282 | ||
283 | spin_lock_irqsave(&sem->wait_lock, flags); | 283 | raw_spin_lock_irqsave(&sem->wait_lock, flags); |
284 | 284 | ||
285 | if (--sem->activity == 0 && !list_empty(&sem->wait_list)) | 285 | if (--sem->activity == 0 && !list_empty(&sem->wait_list)) |
286 | sem = __rwsem_wake_one_writer(sem); | 286 | sem = __rwsem_wake_one_writer(sem); |
287 | 287 | ||
288 | spin_unlock_irqrestore(&sem->wait_lock, flags); | 288 | raw_spin_unlock_irqrestore(&sem->wait_lock, flags); |
289 | } | 289 | } |
290 | 290 | ||
291 | /* | 291 | /* |
@@ -295,13 +295,13 @@ void __up_write(struct rw_semaphore *sem) | |||
295 | { | 295 | { |
296 | unsigned long flags; | 296 | unsigned long flags; |
297 | 297 | ||
298 | spin_lock_irqsave(&sem->wait_lock, flags); | 298 | raw_spin_lock_irqsave(&sem->wait_lock, flags); |
299 | 299 | ||
300 | sem->activity = 0; | 300 | sem->activity = 0; |
301 | if (!list_empty(&sem->wait_list)) | 301 | if (!list_empty(&sem->wait_list)) |
302 | sem = __rwsem_do_wake(sem, 1); | 302 | sem = __rwsem_do_wake(sem, 1); |
303 | 303 | ||
304 | spin_unlock_irqrestore(&sem->wait_lock, flags); | 304 | raw_spin_unlock_irqrestore(&sem->wait_lock, flags); |
305 | } | 305 | } |
306 | 306 | ||
307 | /* | 307 | /* |
@@ -312,12 +312,12 @@ void __downgrade_write(struct rw_semaphore *sem) | |||
312 | { | 312 | { |
313 | unsigned long flags; | 313 | unsigned long flags; |
314 | 314 | ||
315 | spin_lock_irqsave(&sem->wait_lock, flags); | 315 | raw_spin_lock_irqsave(&sem->wait_lock, flags); |
316 | 316 | ||
317 | sem->activity = 1; | 317 | sem->activity = 1; |
318 | if (!list_empty(&sem->wait_list)) | 318 | if (!list_empty(&sem->wait_list)) |
319 | sem = __rwsem_do_wake(sem, 0); | 319 | sem = __rwsem_do_wake(sem, 0); |
320 | 320 | ||
321 | spin_unlock_irqrestore(&sem->wait_lock, flags); | 321 | raw_spin_unlock_irqrestore(&sem->wait_lock, flags); |
322 | } | 322 | } |
323 | 323 | ||
diff --git a/lib/rwsem.c b/lib/rwsem.c index aa7c3052261f..8337e1b9bb8d 100644 --- a/lib/rwsem.c +++ b/lib/rwsem.c | |||
@@ -6,7 +6,7 @@ | |||
6 | #include <linux/rwsem.h> | 6 | #include <linux/rwsem.h> |
7 | #include <linux/sched.h> | 7 | #include <linux/sched.h> |
8 | #include <linux/init.h> | 8 | #include <linux/init.h> |
9 | #include <linux/module.h> | 9 | #include <linux/export.h> |
10 | 10 | ||
11 | /* | 11 | /* |
12 | * Initialize an rwsem: | 12 | * Initialize an rwsem: |
@@ -22,7 +22,7 @@ void __init_rwsem(struct rw_semaphore *sem, const char *name, | |||
22 | lockdep_init_map(&sem->dep_map, name, key, 0); | 22 | lockdep_init_map(&sem->dep_map, name, key, 0); |
23 | #endif | 23 | #endif |
24 | sem->count = RWSEM_UNLOCKED_VALUE; | 24 | sem->count = RWSEM_UNLOCKED_VALUE; |
25 | spin_lock_init(&sem->wait_lock); | 25 | raw_spin_lock_init(&sem->wait_lock); |
26 | INIT_LIST_HEAD(&sem->wait_list); | 26 | INIT_LIST_HEAD(&sem->wait_list); |
27 | } | 27 | } |
28 | 28 | ||
@@ -180,7 +180,7 @@ rwsem_down_failed_common(struct rw_semaphore *sem, | |||
180 | set_task_state(tsk, TASK_UNINTERRUPTIBLE); | 180 | set_task_state(tsk, TASK_UNINTERRUPTIBLE); |
181 | 181 | ||
182 | /* set up my own style of waitqueue */ | 182 | /* set up my own style of waitqueue */ |
183 | spin_lock_irq(&sem->wait_lock); | 183 | raw_spin_lock_irq(&sem->wait_lock); |
184 | waiter.task = tsk; | 184 | waiter.task = tsk; |
185 | waiter.flags = flags; | 185 | waiter.flags = flags; |
186 | get_task_struct(tsk); | 186 | get_task_struct(tsk); |
@@ -204,7 +204,7 @@ rwsem_down_failed_common(struct rw_semaphore *sem, | |||
204 | adjustment == -RWSEM_ACTIVE_WRITE_BIAS) | 204 | adjustment == -RWSEM_ACTIVE_WRITE_BIAS) |
205 | sem = __rwsem_do_wake(sem, RWSEM_WAKE_READ_OWNED); | 205 | sem = __rwsem_do_wake(sem, RWSEM_WAKE_READ_OWNED); |
206 | 206 | ||
207 | spin_unlock_irq(&sem->wait_lock); | 207 | raw_spin_unlock_irq(&sem->wait_lock); |
208 | 208 | ||
209 | /* wait to be given the lock */ | 209 | /* wait to be given the lock */ |
210 | for (;;) { | 210 | for (;;) { |
@@ -245,13 +245,13 @@ struct rw_semaphore *rwsem_wake(struct rw_semaphore *sem) | |||
245 | { | 245 | { |
246 | unsigned long flags; | 246 | unsigned long flags; |
247 | 247 | ||
248 | spin_lock_irqsave(&sem->wait_lock, flags); | 248 | raw_spin_lock_irqsave(&sem->wait_lock, flags); |
249 | 249 | ||
250 | /* do nothing if list empty */ | 250 | /* do nothing if list empty */ |
251 | if (!list_empty(&sem->wait_list)) | 251 | if (!list_empty(&sem->wait_list)) |
252 | sem = __rwsem_do_wake(sem, RWSEM_WAKE_ANY); | 252 | sem = __rwsem_do_wake(sem, RWSEM_WAKE_ANY); |
253 | 253 | ||
254 | spin_unlock_irqrestore(&sem->wait_lock, flags); | 254 | raw_spin_unlock_irqrestore(&sem->wait_lock, flags); |
255 | 255 | ||
256 | return sem; | 256 | return sem; |
257 | } | 257 | } |
@@ -265,13 +265,13 @@ struct rw_semaphore *rwsem_downgrade_wake(struct rw_semaphore *sem) | |||
265 | { | 265 | { |
266 | unsigned long flags; | 266 | unsigned long flags; |
267 | 267 | ||
268 | spin_lock_irqsave(&sem->wait_lock, flags); | 268 | raw_spin_lock_irqsave(&sem->wait_lock, flags); |
269 | 269 | ||
270 | /* do nothing if list empty */ | 270 | /* do nothing if list empty */ |
271 | if (!list_empty(&sem->wait_list)) | 271 | if (!list_empty(&sem->wait_list)) |
272 | sem = __rwsem_do_wake(sem, RWSEM_WAKE_READ_OWNED); | 272 | sem = __rwsem_do_wake(sem, RWSEM_WAKE_READ_OWNED); |
273 | 273 | ||
274 | spin_unlock_irqrestore(&sem->wait_lock, flags); | 274 | raw_spin_unlock_irqrestore(&sem->wait_lock, flags); |
275 | 275 | ||
276 | return sem; | 276 | return sem; |
277 | } | 277 | } |
diff --git a/lib/scatterlist.c b/lib/scatterlist.c index 4ceb05d772ae..3675452b23ca 100644 --- a/lib/scatterlist.c +++ b/lib/scatterlist.c | |||
@@ -6,7 +6,7 @@ | |||
6 | * This source code is licensed under the GNU General Public License, | 6 | * This source code is licensed under the GNU General Public License, |
7 | * Version 2. See the file COPYING for more details. | 7 | * Version 2. See the file COPYING for more details. |
8 | */ | 8 | */ |
9 | #include <linux/module.h> | 9 | #include <linux/export.h> |
10 | #include <linux/slab.h> | 10 | #include <linux/slab.h> |
11 | #include <linux/scatterlist.h> | 11 | #include <linux/scatterlist.h> |
12 | #include <linux/highmem.h> | 12 | #include <linux/highmem.h> |
@@ -39,6 +39,25 @@ struct scatterlist *sg_next(struct scatterlist *sg) | |||
39 | EXPORT_SYMBOL(sg_next); | 39 | EXPORT_SYMBOL(sg_next); |
40 | 40 | ||
41 | /** | 41 | /** |
42 | * sg_nents - return total count of entries in scatterlist | ||
43 | * @sg: The scatterlist | ||
44 | * | ||
45 | * Description: | ||
46 | * Allows to know how many entries are in sg, taking into acount | ||
47 | * chaining as well | ||
48 | * | ||
49 | **/ | ||
50 | int sg_nents(struct scatterlist *sg) | ||
51 | { | ||
52 | int nents; | ||
53 | for (nents = 0; sg; sg = sg_next(sg)) | ||
54 | nents++; | ||
55 | return nents; | ||
56 | } | ||
57 | EXPORT_SYMBOL(sg_nents); | ||
58 | |||
59 | |||
60 | /** | ||
42 | * sg_last - return the last scatterlist entry in a list | 61 | * sg_last - return the last scatterlist entry in a list |
43 | * @sgl: First entry in the scatterlist | 62 | * @sgl: First entry in the scatterlist |
44 | * @nents: Number of entries in the scatterlist | 63 | * @nents: Number of entries in the scatterlist |
@@ -279,14 +298,6 @@ int __sg_alloc_table(struct sg_table *table, unsigned int nents, | |||
279 | if (!left) | 298 | if (!left) |
280 | sg_mark_end(&sg[sg_size - 1]); | 299 | sg_mark_end(&sg[sg_size - 1]); |
281 | 300 | ||
282 | /* | ||
283 | * only really needed for mempool backed sg allocations (like | ||
284 | * SCSI), a possible improvement here would be to pass the | ||
285 | * table pointer into the allocator and let that clear these | ||
286 | * flags | ||
287 | */ | ||
288 | gfp_mask &= ~__GFP_WAIT; | ||
289 | gfp_mask |= __GFP_HIGH; | ||
290 | prv = sg; | 301 | prv = sg; |
291 | } while (left); | 302 | } while (left); |
292 | 303 | ||
@@ -319,6 +330,70 @@ int sg_alloc_table(struct sg_table *table, unsigned int nents, gfp_t gfp_mask) | |||
319 | EXPORT_SYMBOL(sg_alloc_table); | 330 | EXPORT_SYMBOL(sg_alloc_table); |
320 | 331 | ||
321 | /** | 332 | /** |
333 | * sg_alloc_table_from_pages - Allocate and initialize an sg table from | ||
334 | * an array of pages | ||
335 | * @sgt: The sg table header to use | ||
336 | * @pages: Pointer to an array of page pointers | ||
337 | * @n_pages: Number of pages in the pages array | ||
338 | * @offset: Offset from start of the first page to the start of a buffer | ||
339 | * @size: Number of valid bytes in the buffer (after offset) | ||
340 | * @gfp_mask: GFP allocation mask | ||
341 | * | ||
342 | * Description: | ||
343 | * Allocate and initialize an sg table from a list of pages. Contiguous | ||
344 | * ranges of the pages are squashed into a single scatterlist node. A user | ||
345 | * may provide an offset at a start and a size of valid data in a buffer | ||
346 | * specified by the page array. The returned sg table is released by | ||
347 | * sg_free_table. | ||
348 | * | ||
349 | * Returns: | ||
350 | * 0 on success, negative error on failure | ||
351 | */ | ||
352 | int sg_alloc_table_from_pages(struct sg_table *sgt, | ||
353 | struct page **pages, unsigned int n_pages, | ||
354 | unsigned long offset, unsigned long size, | ||
355 | gfp_t gfp_mask) | ||
356 | { | ||
357 | unsigned int chunks; | ||
358 | unsigned int i; | ||
359 | unsigned int cur_page; | ||
360 | int ret; | ||
361 | struct scatterlist *s; | ||
362 | |||
363 | /* compute number of contiguous chunks */ | ||
364 | chunks = 1; | ||
365 | for (i = 1; i < n_pages; ++i) | ||
366 | if (page_to_pfn(pages[i]) != page_to_pfn(pages[i - 1]) + 1) | ||
367 | ++chunks; | ||
368 | |||
369 | ret = sg_alloc_table(sgt, chunks, gfp_mask); | ||
370 | if (unlikely(ret)) | ||
371 | return ret; | ||
372 | |||
373 | /* merging chunks and putting them into the scatterlist */ | ||
374 | cur_page = 0; | ||
375 | for_each_sg(sgt->sgl, s, sgt->orig_nents, i) { | ||
376 | unsigned long chunk_size; | ||
377 | unsigned int j; | ||
378 | |||
379 | /* look for the end of the current chunk */ | ||
380 | for (j = cur_page + 1; j < n_pages; ++j) | ||
381 | if (page_to_pfn(pages[j]) != | ||
382 | page_to_pfn(pages[j - 1]) + 1) | ||
383 | break; | ||
384 | |||
385 | chunk_size = ((j - cur_page) << PAGE_SHIFT) - offset; | ||
386 | sg_set_page(s, pages[cur_page], min(size, chunk_size), offset); | ||
387 | size -= chunk_size; | ||
388 | offset = 0; | ||
389 | cur_page = j; | ||
390 | } | ||
391 | |||
392 | return 0; | ||
393 | } | ||
394 | EXPORT_SYMBOL(sg_alloc_table_from_pages); | ||
395 | |||
396 | /** | ||
322 | * sg_miter_start - start mapping iteration over a sg list | 397 | * sg_miter_start - start mapping iteration over a sg list |
323 | * @miter: sg mapping iter to be started | 398 | * @miter: sg mapping iter to be started |
324 | * @sgl: sg list to iterate over | 399 | * @sgl: sg list to iterate over |
@@ -348,14 +423,13 @@ EXPORT_SYMBOL(sg_miter_start); | |||
348 | * @miter: sg mapping iter to proceed | 423 | * @miter: sg mapping iter to proceed |
349 | * | 424 | * |
350 | * Description: | 425 | * Description: |
351 | * Proceeds @miter@ to the next mapping. @miter@ should have been | 426 | * Proceeds @miter to the next mapping. @miter should have been started |
352 | * started using sg_miter_start(). On successful return, | 427 | * using sg_miter_start(). On successful return, @miter->page, |
353 | * @miter@->page, @miter@->addr and @miter@->length point to the | 428 | * @miter->addr and @miter->length point to the current mapping. |
354 | * current mapping. | ||
355 | * | 429 | * |
356 | * Context: | 430 | * Context: |
357 | * IRQ disabled if SG_MITER_ATOMIC. IRQ must stay disabled till | 431 | * Preemption disabled if SG_MITER_ATOMIC. Preemption must stay disabled |
358 | * @miter@ is stopped. May sleep if !SG_MITER_ATOMIC. | 432 | * till @miter is stopped. May sleep if !SG_MITER_ATOMIC. |
359 | * | 433 | * |
360 | * Returns: | 434 | * Returns: |
361 | * true if @miter contains the next mapping. false if end of sg | 435 | * true if @miter contains the next mapping. false if end of sg |
@@ -390,7 +464,7 @@ bool sg_miter_next(struct sg_mapping_iter *miter) | |||
390 | miter->consumed = miter->length; | 464 | miter->consumed = miter->length; |
391 | 465 | ||
392 | if (miter->__flags & SG_MITER_ATOMIC) | 466 | if (miter->__flags & SG_MITER_ATOMIC) |
393 | miter->addr = kmap_atomic(miter->page, KM_BIO_SRC_IRQ) + off; | 467 | miter->addr = kmap_atomic(miter->page) + off; |
394 | else | 468 | else |
395 | miter->addr = kmap(miter->page) + off; | 469 | miter->addr = kmap(miter->page) + off; |
396 | 470 | ||
@@ -409,7 +483,8 @@ EXPORT_SYMBOL(sg_miter_next); | |||
409 | * resources (kmap) need to be released during iteration. | 483 | * resources (kmap) need to be released during iteration. |
410 | * | 484 | * |
411 | * Context: | 485 | * Context: |
412 | * IRQ disabled if the SG_MITER_ATOMIC is set. Don't care otherwise. | 486 | * Preemption disabled if the SG_MITER_ATOMIC is set. Don't care |
487 | * otherwise. | ||
413 | */ | 488 | */ |
414 | void sg_miter_stop(struct sg_mapping_iter *miter) | 489 | void sg_miter_stop(struct sg_mapping_iter *miter) |
415 | { | 490 | { |
@@ -423,8 +498,8 @@ void sg_miter_stop(struct sg_mapping_iter *miter) | |||
423 | flush_kernel_dcache_page(miter->page); | 498 | flush_kernel_dcache_page(miter->page); |
424 | 499 | ||
425 | if (miter->__flags & SG_MITER_ATOMIC) { | 500 | if (miter->__flags & SG_MITER_ATOMIC) { |
426 | WARN_ON(!irqs_disabled()); | 501 | WARN_ON_ONCE(preemptible()); |
427 | kunmap_atomic(miter->addr, KM_BIO_SRC_IRQ); | 502 | kunmap_atomic(miter->addr); |
428 | } else | 503 | } else |
429 | kunmap(miter->page); | 504 | kunmap(miter->page); |
430 | 505 | ||
diff --git a/lib/sha1.c b/lib/sha1.c index f33271dd00cb..1df191e04a24 100644 --- a/lib/sha1.c +++ b/lib/sha1.c | |||
@@ -6,8 +6,9 @@ | |||
6 | */ | 6 | */ |
7 | 7 | ||
8 | #include <linux/kernel.h> | 8 | #include <linux/kernel.h> |
9 | #include <linux/module.h> | 9 | #include <linux/export.h> |
10 | #include <linux/bitops.h> | 10 | #include <linux/bitops.h> |
11 | #include <linux/cryptohash.h> | ||
11 | #include <asm/unaligned.h> | 12 | #include <asm/unaligned.h> |
12 | 13 | ||
13 | /* | 14 | /* |
diff --git a/lib/smp_processor_id.c b/lib/smp_processor_id.c index 4689cb073da4..4c0d0e51d49e 100644 --- a/lib/smp_processor_id.c +++ b/lib/smp_processor_id.c | |||
@@ -3,7 +3,7 @@ | |||
3 | * | 3 | * |
4 | * DEBUG_PREEMPT variant of smp_processor_id(). | 4 | * DEBUG_PREEMPT variant of smp_processor_id(). |
5 | */ | 5 | */ |
6 | #include <linux/module.h> | 6 | #include <linux/export.h> |
7 | #include <linux/kallsyms.h> | 7 | #include <linux/kallsyms.h> |
8 | #include <linux/sched.h> | 8 | #include <linux/sched.h> |
9 | 9 | ||
@@ -22,7 +22,7 @@ notrace unsigned int debug_smp_processor_id(void) | |||
22 | * Kernel threads bound to a single CPU can safely use | 22 | * Kernel threads bound to a single CPU can safely use |
23 | * smp_processor_id(): | 23 | * smp_processor_id(): |
24 | */ | 24 | */ |
25 | if (cpumask_equal(¤t->cpus_allowed, cpumask_of(this_cpu))) | 25 | if (cpumask_equal(tsk_cpus_allowed(current), cpumask_of(this_cpu))) |
26 | goto out; | 26 | goto out; |
27 | 27 | ||
28 | /* | 28 | /* |
diff --git a/lib/spinlock_debug.c b/lib/spinlock_debug.c index 4755b98b6dfb..0374a596cffa 100644 --- a/lib/spinlock_debug.c +++ b/lib/spinlock_debug.c | |||
@@ -11,7 +11,7 @@ | |||
11 | #include <linux/interrupt.h> | 11 | #include <linux/interrupt.h> |
12 | #include <linux/debug_locks.h> | 12 | #include <linux/debug_locks.h> |
13 | #include <linux/delay.h> | 13 | #include <linux/delay.h> |
14 | #include <linux/module.h> | 14 | #include <linux/export.h> |
15 | 15 | ||
16 | void __raw_spin_lock_init(raw_spinlock_t *lock, const char *name, | 16 | void __raw_spin_lock_init(raw_spinlock_t *lock, const char *name, |
17 | struct lock_class_key *key) | 17 | struct lock_class_key *key) |
@@ -49,19 +49,16 @@ void __rwlock_init(rwlock_t *lock, const char *name, | |||
49 | 49 | ||
50 | EXPORT_SYMBOL(__rwlock_init); | 50 | EXPORT_SYMBOL(__rwlock_init); |
51 | 51 | ||
52 | static void spin_bug(raw_spinlock_t *lock, const char *msg) | 52 | static void spin_dump(raw_spinlock_t *lock, const char *msg) |
53 | { | 53 | { |
54 | struct task_struct *owner = NULL; | 54 | struct task_struct *owner = NULL; |
55 | 55 | ||
56 | if (!debug_locks_off()) | ||
57 | return; | ||
58 | |||
59 | if (lock->owner && lock->owner != SPINLOCK_OWNER_INIT) | 56 | if (lock->owner && lock->owner != SPINLOCK_OWNER_INIT) |
60 | owner = lock->owner; | 57 | owner = lock->owner; |
61 | printk(KERN_EMERG "BUG: spinlock %s on CPU#%d, %s/%d\n", | 58 | printk(KERN_EMERG "BUG: spinlock %s on CPU#%d, %s/%d\n", |
62 | msg, raw_smp_processor_id(), | 59 | msg, raw_smp_processor_id(), |
63 | current->comm, task_pid_nr(current)); | 60 | current->comm, task_pid_nr(current)); |
64 | printk(KERN_EMERG " lock: %p, .magic: %08x, .owner: %s/%d, " | 61 | printk(KERN_EMERG " lock: %pS, .magic: %08x, .owner: %s/%d, " |
65 | ".owner_cpu: %d\n", | 62 | ".owner_cpu: %d\n", |
66 | lock, lock->magic, | 63 | lock, lock->magic, |
67 | owner ? owner->comm : "<none>", | 64 | owner ? owner->comm : "<none>", |
@@ -70,6 +67,14 @@ static void spin_bug(raw_spinlock_t *lock, const char *msg) | |||
70 | dump_stack(); | 67 | dump_stack(); |
71 | } | 68 | } |
72 | 69 | ||
70 | static void spin_bug(raw_spinlock_t *lock, const char *msg) | ||
71 | { | ||
72 | if (!debug_locks_off()) | ||
73 | return; | ||
74 | |||
75 | spin_dump(lock, msg); | ||
76 | } | ||
77 | |||
73 | #define SPIN_BUG_ON(cond, lock, msg) if (unlikely(cond)) spin_bug(lock, msg) | 78 | #define SPIN_BUG_ON(cond, lock, msg) if (unlikely(cond)) spin_bug(lock, msg) |
74 | 79 | ||
75 | static inline void | 80 | static inline void |
@@ -102,27 +107,27 @@ static void __spin_lock_debug(raw_spinlock_t *lock) | |||
102 | { | 107 | { |
103 | u64 i; | 108 | u64 i; |
104 | u64 loops = loops_per_jiffy * HZ; | 109 | u64 loops = loops_per_jiffy * HZ; |
105 | int print_once = 1; | ||
106 | 110 | ||
107 | for (;;) { | 111 | for (i = 0; i < loops; i++) { |
108 | for (i = 0; i < loops; i++) { | 112 | if (arch_spin_trylock(&lock->raw_lock)) |
109 | if (arch_spin_trylock(&lock->raw_lock)) | 113 | return; |
110 | return; | 114 | __delay(1); |
111 | __delay(1); | 115 | } |
112 | } | 116 | /* lockup suspected: */ |
113 | /* lockup suspected: */ | 117 | spin_dump(lock, "lockup suspected"); |
114 | if (print_once) { | ||
115 | print_once = 0; | ||
116 | printk(KERN_EMERG "BUG: spinlock lockup on CPU#%d, " | ||
117 | "%s/%d, %p\n", | ||
118 | raw_smp_processor_id(), current->comm, | ||
119 | task_pid_nr(current), lock); | ||
120 | dump_stack(); | ||
121 | #ifdef CONFIG_SMP | 118 | #ifdef CONFIG_SMP |
122 | trigger_all_cpu_backtrace(); | 119 | trigger_all_cpu_backtrace(); |
123 | #endif | 120 | #endif |
124 | } | 121 | |
125 | } | 122 | /* |
123 | * The trylock above was causing a livelock. Give the lower level arch | ||
124 | * specific lock code a chance to acquire the lock. We have already | ||
125 | * printed a warning/backtrace at this point. The non-debug arch | ||
126 | * specific code might actually succeed in acquiring the lock. If it is | ||
127 | * not successful, the end-result is the same - there is no forward | ||
128 | * progress. | ||
129 | */ | ||
130 | arch_spin_lock(&lock->raw_lock); | ||
126 | } | 131 | } |
127 | 132 | ||
128 | void do_raw_spin_lock(raw_spinlock_t *lock) | 133 | void do_raw_spin_lock(raw_spinlock_t *lock) |
diff --git a/lib/stmp_device.c b/lib/stmp_device.c new file mode 100644 index 000000000000..8ac9bcc4289a --- /dev/null +++ b/lib/stmp_device.c | |||
@@ -0,0 +1,80 @@ | |||
1 | /* | ||
2 | * Copyright (C) 1999 ARM Limited | ||
3 | * Copyright (C) 2000 Deep Blue Solutions Ltd | ||
4 | * Copyright 2006-2007,2010 Freescale Semiconductor, Inc. All Rights Reserved. | ||
5 | * Copyright 2008 Juergen Beisert, kernel@pengutronix.de | ||
6 | * Copyright 2009 Ilya Yanok, Emcraft Systems Ltd, yanok@emcraft.com | ||
7 | * Copyright (C) 2011 Wolfram Sang, Pengutronix e.K. | ||
8 | * | ||
9 | * This program is free software; you can redistribute it and/or modify | ||
10 | * it under the terms of the GNU General Public License as published by | ||
11 | * the Free Software Foundation; either version 2 of the License, or | ||
12 | * (at your option) any later version. | ||
13 | */ | ||
14 | |||
15 | #include <linux/io.h> | ||
16 | #include <linux/errno.h> | ||
17 | #include <linux/delay.h> | ||
18 | #include <linux/module.h> | ||
19 | #include <linux/stmp_device.h> | ||
20 | |||
21 | #define STMP_MODULE_CLKGATE (1 << 30) | ||
22 | #define STMP_MODULE_SFTRST (1 << 31) | ||
23 | |||
24 | /* | ||
25 | * Clear the bit and poll it cleared. This is usually called with | ||
26 | * a reset address and mask being either SFTRST(bit 31) or CLKGATE | ||
27 | * (bit 30). | ||
28 | */ | ||
29 | static int stmp_clear_poll_bit(void __iomem *addr, u32 mask) | ||
30 | { | ||
31 | int timeout = 0x400; | ||
32 | |||
33 | writel(mask, addr + STMP_OFFSET_REG_CLR); | ||
34 | udelay(1); | ||
35 | while ((readl(addr) & mask) && --timeout) | ||
36 | /* nothing */; | ||
37 | |||
38 | return !timeout; | ||
39 | } | ||
40 | |||
41 | int stmp_reset_block(void __iomem *reset_addr) | ||
42 | { | ||
43 | int ret; | ||
44 | int timeout = 0x400; | ||
45 | |||
46 | /* clear and poll SFTRST */ | ||
47 | ret = stmp_clear_poll_bit(reset_addr, STMP_MODULE_SFTRST); | ||
48 | if (unlikely(ret)) | ||
49 | goto error; | ||
50 | |||
51 | /* clear CLKGATE */ | ||
52 | writel(STMP_MODULE_CLKGATE, reset_addr + STMP_OFFSET_REG_CLR); | ||
53 | |||
54 | /* set SFTRST to reset the block */ | ||
55 | writel(STMP_MODULE_SFTRST, reset_addr + STMP_OFFSET_REG_SET); | ||
56 | udelay(1); | ||
57 | |||
58 | /* poll CLKGATE becoming set */ | ||
59 | while ((!(readl(reset_addr) & STMP_MODULE_CLKGATE)) && --timeout) | ||
60 | /* nothing */; | ||
61 | if (unlikely(!timeout)) | ||
62 | goto error; | ||
63 | |||
64 | /* clear and poll SFTRST */ | ||
65 | ret = stmp_clear_poll_bit(reset_addr, STMP_MODULE_SFTRST); | ||
66 | if (unlikely(ret)) | ||
67 | goto error; | ||
68 | |||
69 | /* clear and poll CLKGATE */ | ||
70 | ret = stmp_clear_poll_bit(reset_addr, STMP_MODULE_CLKGATE); | ||
71 | if (unlikely(ret)) | ||
72 | goto error; | ||
73 | |||
74 | return 0; | ||
75 | |||
76 | error: | ||
77 | pr_err("%s(%p): module reset timeout\n", __func__, reset_addr); | ||
78 | return -ETIMEDOUT; | ||
79 | } | ||
80 | EXPORT_SYMBOL(stmp_reset_block); | ||
diff --git a/lib/string.c b/lib/string.c index 01fad9b203e1..e5878de4f101 100644 --- a/lib/string.c +++ b/lib/string.c | |||
@@ -22,7 +22,10 @@ | |||
22 | #include <linux/types.h> | 22 | #include <linux/types.h> |
23 | #include <linux/string.h> | 23 | #include <linux/string.h> |
24 | #include <linux/ctype.h> | 24 | #include <linux/ctype.h> |
25 | #include <linux/module.h> | 25 | #include <linux/kernel.h> |
26 | #include <linux/export.h> | ||
27 | #include <linux/bug.h> | ||
28 | #include <linux/errno.h> | ||
26 | 29 | ||
27 | #ifndef __HAVE_ARCH_STRNICMP | 30 | #ifndef __HAVE_ARCH_STRNICMP |
28 | /** | 31 | /** |
@@ -360,7 +363,6 @@ char *strim(char *s) | |||
360 | size_t size; | 363 | size_t size; |
361 | char *end; | 364 | char *end; |
362 | 365 | ||
363 | s = skip_spaces(s); | ||
364 | size = strlen(s); | 366 | size = strlen(s); |
365 | if (!size) | 367 | if (!size) |
366 | return s; | 368 | return s; |
@@ -370,7 +372,7 @@ char *strim(char *s) | |||
370 | end--; | 372 | end--; |
371 | *(end + 1) = '\0'; | 373 | *(end + 1) = '\0'; |
372 | 374 | ||
373 | return s; | 375 | return skip_spaces(s); |
374 | } | 376 | } |
375 | EXPORT_SYMBOL(strim); | 377 | EXPORT_SYMBOL(strim); |
376 | 378 | ||
@@ -756,3 +758,69 @@ void *memchr(const void *s, int c, size_t n) | |||
756 | } | 758 | } |
757 | EXPORT_SYMBOL(memchr); | 759 | EXPORT_SYMBOL(memchr); |
758 | #endif | 760 | #endif |
761 | |||
762 | static void *check_bytes8(const u8 *start, u8 value, unsigned int bytes) | ||
763 | { | ||
764 | while (bytes) { | ||
765 | if (*start != value) | ||
766 | return (void *)start; | ||
767 | start++; | ||
768 | bytes--; | ||
769 | } | ||
770 | return NULL; | ||
771 | } | ||
772 | |||
773 | /** | ||
774 | * memchr_inv - Find an unmatching character in an area of memory. | ||
775 | * @start: The memory area | ||
776 | * @c: Find a character other than c | ||
777 | * @bytes: The size of the area. | ||
778 | * | ||
779 | * returns the address of the first character other than @c, or %NULL | ||
780 | * if the whole buffer contains just @c. | ||
781 | */ | ||
782 | void *memchr_inv(const void *start, int c, size_t bytes) | ||
783 | { | ||
784 | u8 value = c; | ||
785 | u64 value64; | ||
786 | unsigned int words, prefix; | ||
787 | |||
788 | if (bytes <= 16) | ||
789 | return check_bytes8(start, value, bytes); | ||
790 | |||
791 | value64 = value; | ||
792 | #if defined(ARCH_HAS_FAST_MULTIPLIER) && BITS_PER_LONG == 64 | ||
793 | value64 *= 0x0101010101010101; | ||
794 | #elif defined(ARCH_HAS_FAST_MULTIPLIER) | ||
795 | value64 *= 0x01010101; | ||
796 | value64 |= value64 << 32; | ||
797 | #else | ||
798 | value64 |= value64 << 8; | ||
799 | value64 |= value64 << 16; | ||
800 | value64 |= value64 << 32; | ||
801 | #endif | ||
802 | |||
803 | prefix = (unsigned long)start % 8; | ||
804 | if (prefix) { | ||
805 | u8 *r; | ||
806 | |||
807 | prefix = 8 - prefix; | ||
808 | r = check_bytes8(start, value, prefix); | ||
809 | if (r) | ||
810 | return r; | ||
811 | start += prefix; | ||
812 | bytes -= prefix; | ||
813 | } | ||
814 | |||
815 | words = bytes / 8; | ||
816 | |||
817 | while (words) { | ||
818 | if (*(u64 *)start != value64) | ||
819 | return check_bytes8(start, value, 8); | ||
820 | start += 8; | ||
821 | words--; | ||
822 | } | ||
823 | |||
824 | return check_bytes8(start, value, bytes % 8); | ||
825 | } | ||
826 | EXPORT_SYMBOL(memchr_inv); | ||
diff --git a/lib/string_helpers.c b/lib/string_helpers.c index ab431d4cc970..1cffc223bff5 100644 --- a/lib/string_helpers.c +++ b/lib/string_helpers.c | |||
@@ -5,7 +5,7 @@ | |||
5 | */ | 5 | */ |
6 | #include <linux/kernel.h> | 6 | #include <linux/kernel.h> |
7 | #include <linux/math64.h> | 7 | #include <linux/math64.h> |
8 | #include <linux/module.h> | 8 | #include <linux/export.h> |
9 | #include <linux/string_helpers.h> | 9 | #include <linux/string_helpers.h> |
10 | 10 | ||
11 | /** | 11 | /** |
@@ -23,15 +23,15 @@ | |||
23 | int string_get_size(u64 size, const enum string_size_units units, | 23 | int string_get_size(u64 size, const enum string_size_units units, |
24 | char *buf, int len) | 24 | char *buf, int len) |
25 | { | 25 | { |
26 | const char *units_10[] = { "B", "kB", "MB", "GB", "TB", "PB", | 26 | static const char *units_10[] = { "B", "kB", "MB", "GB", "TB", "PB", |
27 | "EB", "ZB", "YB", NULL}; | 27 | "EB", "ZB", "YB", NULL}; |
28 | const char *units_2[] = {"B", "KiB", "MiB", "GiB", "TiB", "PiB", | 28 | static const char *units_2[] = {"B", "KiB", "MiB", "GiB", "TiB", "PiB", |
29 | "EiB", "ZiB", "YiB", NULL }; | 29 | "EiB", "ZiB", "YiB", NULL }; |
30 | const char **units_str[] = { | 30 | static const char **units_str[] = { |
31 | [STRING_UNITS_10] = units_10, | 31 | [STRING_UNITS_10] = units_10, |
32 | [STRING_UNITS_2] = units_2, | 32 | [STRING_UNITS_2] = units_2, |
33 | }; | 33 | }; |
34 | const unsigned int divisor[] = { | 34 | static const unsigned int divisor[] = { |
35 | [STRING_UNITS_10] = 1000, | 35 | [STRING_UNITS_10] = 1000, |
36 | [STRING_UNITS_2] = 1024, | 36 | [STRING_UNITS_2] = 1024, |
37 | }; | 37 | }; |
diff --git a/lib/strncpy_from_user.c b/lib/strncpy_from_user.c new file mode 100644 index 000000000000..bb2b201d6ad0 --- /dev/null +++ b/lib/strncpy_from_user.c | |||
@@ -0,0 +1,113 @@ | |||
1 | #include <linux/module.h> | ||
2 | #include <linux/uaccess.h> | ||
3 | #include <linux/kernel.h> | ||
4 | #include <linux/errno.h> | ||
5 | |||
6 | #include <asm/byteorder.h> | ||
7 | #include <asm/word-at-a-time.h> | ||
8 | |||
9 | #ifdef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS | ||
10 | #define IS_UNALIGNED(src, dst) 0 | ||
11 | #else | ||
12 | #define IS_UNALIGNED(src, dst) \ | ||
13 | (((long) dst | (long) src) & (sizeof(long) - 1)) | ||
14 | #endif | ||
15 | |||
16 | /* | ||
17 | * Do a strncpy, return length of string without final '\0'. | ||
18 | * 'count' is the user-supplied count (return 'count' if we | ||
19 | * hit it), 'max' is the address space maximum (and we return | ||
20 | * -EFAULT if we hit it). | ||
21 | */ | ||
22 | static inline long do_strncpy_from_user(char *dst, const char __user *src, long count, unsigned long max) | ||
23 | { | ||
24 | const struct word_at_a_time constants = WORD_AT_A_TIME_CONSTANTS; | ||
25 | long res = 0; | ||
26 | |||
27 | /* | ||
28 | * Truncate 'max' to the user-specified limit, so that | ||
29 | * we only have one limit we need to check in the loop | ||
30 | */ | ||
31 | if (max > count) | ||
32 | max = count; | ||
33 | |||
34 | if (IS_UNALIGNED(src, dst)) | ||
35 | goto byte_at_a_time; | ||
36 | |||
37 | while (max >= sizeof(unsigned long)) { | ||
38 | unsigned long c, data; | ||
39 | |||
40 | /* Fall back to byte-at-a-time if we get a page fault */ | ||
41 | if (unlikely(__get_user(c,(unsigned long __user *)(src+res)))) | ||
42 | break; | ||
43 | *(unsigned long *)(dst+res) = c; | ||
44 | if (has_zero(c, &data, &constants)) { | ||
45 | data = prep_zero_mask(c, data, &constants); | ||
46 | data = create_zero_mask(data); | ||
47 | return res + find_zero(data); | ||
48 | } | ||
49 | res += sizeof(unsigned long); | ||
50 | max -= sizeof(unsigned long); | ||
51 | } | ||
52 | |||
53 | byte_at_a_time: | ||
54 | while (max) { | ||
55 | char c; | ||
56 | |||
57 | if (unlikely(__get_user(c,src+res))) | ||
58 | return -EFAULT; | ||
59 | dst[res] = c; | ||
60 | if (!c) | ||
61 | return res; | ||
62 | res++; | ||
63 | max--; | ||
64 | } | ||
65 | |||
66 | /* | ||
67 | * Uhhuh. We hit 'max'. But was that the user-specified maximum | ||
68 | * too? If so, that's ok - we got as much as the user asked for. | ||
69 | */ | ||
70 | if (res >= count) | ||
71 | return res; | ||
72 | |||
73 | /* | ||
74 | * Nope: we hit the address space limit, and we still had more | ||
75 | * characters the caller would have wanted. That's an EFAULT. | ||
76 | */ | ||
77 | return -EFAULT; | ||
78 | } | ||
79 | |||
80 | /** | ||
81 | * strncpy_from_user: - Copy a NUL terminated string from userspace. | ||
82 | * @dst: Destination address, in kernel space. This buffer must be at | ||
83 | * least @count bytes long. | ||
84 | * @src: Source address, in user space. | ||
85 | * @count: Maximum number of bytes to copy, including the trailing NUL. | ||
86 | * | ||
87 | * Copies a NUL-terminated string from userspace to kernel space. | ||
88 | * | ||
89 | * On success, returns the length of the string (not including the trailing | ||
90 | * NUL). | ||
91 | * | ||
92 | * If access to userspace fails, returns -EFAULT (some data may have been | ||
93 | * copied). | ||
94 | * | ||
95 | * If @count is smaller than the length of the string, copies @count bytes | ||
96 | * and returns @count. | ||
97 | */ | ||
98 | long strncpy_from_user(char *dst, const char __user *src, long count) | ||
99 | { | ||
100 | unsigned long max_addr, src_addr; | ||
101 | |||
102 | if (unlikely(count <= 0)) | ||
103 | return 0; | ||
104 | |||
105 | max_addr = user_addr_max(); | ||
106 | src_addr = (unsigned long)src; | ||
107 | if (likely(src_addr < max_addr)) { | ||
108 | unsigned long max = max_addr - src_addr; | ||
109 | return do_strncpy_from_user(dst, src, count, max); | ||
110 | } | ||
111 | return -EFAULT; | ||
112 | } | ||
113 | EXPORT_SYMBOL(strncpy_from_user); | ||
diff --git a/lib/strnlen_user.c b/lib/strnlen_user.c new file mode 100644 index 000000000000..a28df5206d95 --- /dev/null +++ b/lib/strnlen_user.c | |||
@@ -0,0 +1,138 @@ | |||
1 | #include <linux/kernel.h> | ||
2 | #include <linux/export.h> | ||
3 | #include <linux/uaccess.h> | ||
4 | |||
5 | #include <asm/word-at-a-time.h> | ||
6 | |||
7 | /* Set bits in the first 'n' bytes when loaded from memory */ | ||
8 | #ifdef __LITTLE_ENDIAN | ||
9 | # define aligned_byte_mask(n) ((1ul << 8*(n))-1) | ||
10 | #else | ||
11 | # define aligned_byte_mask(n) (~0xfful << (BITS_PER_LONG - 8 - 8*(n))) | ||
12 | #endif | ||
13 | |||
14 | /* | ||
15 | * Do a strnlen, return length of string *with* final '\0'. | ||
16 | * 'count' is the user-supplied count, while 'max' is the | ||
17 | * address space maximum. | ||
18 | * | ||
19 | * Return 0 for exceptions (which includes hitting the address | ||
20 | * space maximum), or 'count+1' if hitting the user-supplied | ||
21 | * maximum count. | ||
22 | * | ||
23 | * NOTE! We can sometimes overshoot the user-supplied maximum | ||
24 | * if it fits in a aligned 'long'. The caller needs to check | ||
25 | * the return value against "> max". | ||
26 | */ | ||
27 | static inline long do_strnlen_user(const char __user *src, unsigned long count, unsigned long max) | ||
28 | { | ||
29 | const struct word_at_a_time constants = WORD_AT_A_TIME_CONSTANTS; | ||
30 | long align, res = 0; | ||
31 | unsigned long c; | ||
32 | |||
33 | /* | ||
34 | * Truncate 'max' to the user-specified limit, so that | ||
35 | * we only have one limit we need to check in the loop | ||
36 | */ | ||
37 | if (max > count) | ||
38 | max = count; | ||
39 | |||
40 | /* | ||
41 | * Do everything aligned. But that means that we | ||
42 | * need to also expand the maximum.. | ||
43 | */ | ||
44 | align = (sizeof(long) - 1) & (unsigned long)src; | ||
45 | src -= align; | ||
46 | max += align; | ||
47 | |||
48 | if (unlikely(__get_user(c,(unsigned long __user *)src))) | ||
49 | return 0; | ||
50 | c |= aligned_byte_mask(align); | ||
51 | |||
52 | for (;;) { | ||
53 | unsigned long data; | ||
54 | if (has_zero(c, &data, &constants)) { | ||
55 | data = prep_zero_mask(c, data, &constants); | ||
56 | data = create_zero_mask(data); | ||
57 | return res + find_zero(data) + 1 - align; | ||
58 | } | ||
59 | res += sizeof(unsigned long); | ||
60 | if (unlikely(max < sizeof(unsigned long))) | ||
61 | break; | ||
62 | max -= sizeof(unsigned long); | ||
63 | if (unlikely(__get_user(c,(unsigned long __user *)(src+res)))) | ||
64 | return 0; | ||
65 | } | ||
66 | res -= align; | ||
67 | |||
68 | /* | ||
69 | * Uhhuh. We hit 'max'. But was that the user-specified maximum | ||
70 | * too? If so, return the marker for "too long". | ||
71 | */ | ||
72 | if (res >= count) | ||
73 | return count+1; | ||
74 | |||
75 | /* | ||
76 | * Nope: we hit the address space limit, and we still had more | ||
77 | * characters the caller would have wanted. That's 0. | ||
78 | */ | ||
79 | return 0; | ||
80 | } | ||
81 | |||
82 | /** | ||
83 | * strnlen_user: - Get the size of a user string INCLUDING final NUL. | ||
84 | * @str: The string to measure. | ||
85 | * @count: Maximum count (including NUL character) | ||
86 | * | ||
87 | * Context: User context only. This function may sleep. | ||
88 | * | ||
89 | * Get the size of a NUL-terminated string in user space. | ||
90 | * | ||
91 | * Returns the size of the string INCLUDING the terminating NUL. | ||
92 | * If the string is too long, returns 'count+1'. | ||
93 | * On exception (or invalid count), returns 0. | ||
94 | */ | ||
95 | long strnlen_user(const char __user *str, long count) | ||
96 | { | ||
97 | unsigned long max_addr, src_addr; | ||
98 | |||
99 | if (unlikely(count <= 0)) | ||
100 | return 0; | ||
101 | |||
102 | max_addr = user_addr_max(); | ||
103 | src_addr = (unsigned long)str; | ||
104 | if (likely(src_addr < max_addr)) { | ||
105 | unsigned long max = max_addr - src_addr; | ||
106 | return do_strnlen_user(str, count, max); | ||
107 | } | ||
108 | return 0; | ||
109 | } | ||
110 | EXPORT_SYMBOL(strnlen_user); | ||
111 | |||
112 | /** | ||
113 | * strlen_user: - Get the size of a user string INCLUDING final NUL. | ||
114 | * @str: The string to measure. | ||
115 | * | ||
116 | * Context: User context only. This function may sleep. | ||
117 | * | ||
118 | * Get the size of a NUL-terminated string in user space. | ||
119 | * | ||
120 | * Returns the size of the string INCLUDING the terminating NUL. | ||
121 | * On exception, returns 0. | ||
122 | * | ||
123 | * If there is a limit on the length of a valid string, you may wish to | ||
124 | * consider using strnlen_user() instead. | ||
125 | */ | ||
126 | long strlen_user(const char __user *str) | ||
127 | { | ||
128 | unsigned long max_addr, src_addr; | ||
129 | |||
130 | max_addr = user_addr_max(); | ||
131 | src_addr = (unsigned long)str; | ||
132 | if (likely(src_addr < max_addr)) { | ||
133 | unsigned long max = max_addr - src_addr; | ||
134 | return do_strnlen_user(str, ~0ul, max); | ||
135 | } | ||
136 | return 0; | ||
137 | } | ||
138 | EXPORT_SYMBOL(strlen_user); | ||
diff --git a/lib/swiotlb.c b/lib/swiotlb.c index 99093b396145..f114bf6a8e13 100644 --- a/lib/swiotlb.c +++ b/lib/swiotlb.c | |||
@@ -20,7 +20,7 @@ | |||
20 | #include <linux/cache.h> | 20 | #include <linux/cache.h> |
21 | #include <linux/dma-mapping.h> | 21 | #include <linux/dma-mapping.h> |
22 | #include <linux/mm.h> | 22 | #include <linux/mm.h> |
23 | #include <linux/module.h> | 23 | #include <linux/export.h> |
24 | #include <linux/spinlock.h> | 24 | #include <linux/spinlock.h> |
25 | #include <linux/string.h> | 25 | #include <linux/string.h> |
26 | #include <linux/swiotlb.h> | 26 | #include <linux/swiotlb.h> |
@@ -110,11 +110,11 @@ setup_io_tlb_npages(char *str) | |||
110 | __setup("swiotlb=", setup_io_tlb_npages); | 110 | __setup("swiotlb=", setup_io_tlb_npages); |
111 | /* make io_tlb_overflow tunable too? */ | 111 | /* make io_tlb_overflow tunable too? */ |
112 | 112 | ||
113 | unsigned long swioltb_nr_tbl(void) | 113 | unsigned long swiotlb_nr_tbl(void) |
114 | { | 114 | { |
115 | return io_tlb_nslabs; | 115 | return io_tlb_nslabs; |
116 | } | 116 | } |
117 | 117 | EXPORT_SYMBOL_GPL(swiotlb_nr_tbl); | |
118 | /* Note that this doesn't work with highmem page */ | 118 | /* Note that this doesn't work with highmem page */ |
119 | static dma_addr_t swiotlb_virt_to_bus(struct device *hwdev, | 119 | static dma_addr_t swiotlb_virt_to_bus(struct device *hwdev, |
120 | volatile void *address) | 120 | volatile void *address) |
@@ -130,11 +130,9 @@ void swiotlb_print_info(void) | |||
130 | pstart = virt_to_phys(io_tlb_start); | 130 | pstart = virt_to_phys(io_tlb_start); |
131 | pend = virt_to_phys(io_tlb_end); | 131 | pend = virt_to_phys(io_tlb_end); |
132 | 132 | ||
133 | printk(KERN_INFO "Placing %luMB software IO TLB between %p - %p\n", | 133 | printk(KERN_INFO "software IO TLB [mem %#010llx-%#010llx] (%luMB) mapped at [%p-%p]\n", |
134 | bytes >> 20, io_tlb_start, io_tlb_end); | 134 | (unsigned long long)pstart, (unsigned long long)pend - 1, |
135 | printk(KERN_INFO "software IO TLB at phys %#llx - %#llx\n", | 135 | bytes >> 20, io_tlb_start, io_tlb_end - 1); |
136 | (unsigned long long)pstart, | ||
137 | (unsigned long long)pend); | ||
138 | } | 136 | } |
139 | 137 | ||
140 | void __init swiotlb_init_with_tbl(char *tlb, unsigned long nslabs, int verbose) | 138 | void __init swiotlb_init_with_tbl(char *tlb, unsigned long nslabs, int verbose) |
@@ -172,7 +170,7 @@ void __init swiotlb_init_with_tbl(char *tlb, unsigned long nslabs, int verbose) | |||
172 | * Statically reserve bounce buffer space and initialize bounce buffer data | 170 | * Statically reserve bounce buffer space and initialize bounce buffer data |
173 | * structures for the software IO TLB used to implement the DMA API. | 171 | * structures for the software IO TLB used to implement the DMA API. |
174 | */ | 172 | */ |
175 | void __init | 173 | static void __init |
176 | swiotlb_init_with_default_size(size_t default_size, int verbose) | 174 | swiotlb_init_with_default_size(size_t default_size, int verbose) |
177 | { | 175 | { |
178 | unsigned long bytes; | 176 | unsigned long bytes; |
@@ -208,8 +206,9 @@ swiotlb_init(int verbose) | |||
208 | int | 206 | int |
209 | swiotlb_late_init_with_default_size(size_t default_size) | 207 | swiotlb_late_init_with_default_size(size_t default_size) |
210 | { | 208 | { |
211 | unsigned long i, bytes, req_nslabs = io_tlb_nslabs; | 209 | unsigned long bytes, req_nslabs = io_tlb_nslabs; |
212 | unsigned int order; | 210 | unsigned int order; |
211 | int rc = 0; | ||
213 | 212 | ||
214 | if (!io_tlb_nslabs) { | 213 | if (!io_tlb_nslabs) { |
215 | io_tlb_nslabs = (default_size >> IO_TLB_SHIFT); | 214 | io_tlb_nslabs = (default_size >> IO_TLB_SHIFT); |
@@ -231,16 +230,32 @@ swiotlb_late_init_with_default_size(size_t default_size) | |||
231 | order--; | 230 | order--; |
232 | } | 231 | } |
233 | 232 | ||
234 | if (!io_tlb_start) | 233 | if (!io_tlb_start) { |
235 | goto cleanup1; | 234 | io_tlb_nslabs = req_nslabs; |
236 | 235 | return -ENOMEM; | |
236 | } | ||
237 | if (order != get_order(bytes)) { | 237 | if (order != get_order(bytes)) { |
238 | printk(KERN_WARNING "Warning: only able to allocate %ld MB " | 238 | printk(KERN_WARNING "Warning: only able to allocate %ld MB " |
239 | "for software IO TLB\n", (PAGE_SIZE << order) >> 20); | 239 | "for software IO TLB\n", (PAGE_SIZE << order) >> 20); |
240 | io_tlb_nslabs = SLABS_PER_PAGE << order; | 240 | io_tlb_nslabs = SLABS_PER_PAGE << order; |
241 | bytes = io_tlb_nslabs << IO_TLB_SHIFT; | ||
242 | } | 241 | } |
242 | rc = swiotlb_late_init_with_tbl(io_tlb_start, io_tlb_nslabs); | ||
243 | if (rc) | ||
244 | free_pages((unsigned long)io_tlb_start, order); | ||
245 | return rc; | ||
246 | } | ||
247 | |||
248 | int | ||
249 | swiotlb_late_init_with_tbl(char *tlb, unsigned long nslabs) | ||
250 | { | ||
251 | unsigned long i, bytes; | ||
252 | |||
253 | bytes = nslabs << IO_TLB_SHIFT; | ||
254 | |||
255 | io_tlb_nslabs = nslabs; | ||
256 | io_tlb_start = tlb; | ||
243 | io_tlb_end = io_tlb_start + bytes; | 257 | io_tlb_end = io_tlb_start + bytes; |
258 | |||
244 | memset(io_tlb_start, 0, bytes); | 259 | memset(io_tlb_start, 0, bytes); |
245 | 260 | ||
246 | /* | 261 | /* |
@@ -290,10 +305,8 @@ cleanup3: | |||
290 | io_tlb_list = NULL; | 305 | io_tlb_list = NULL; |
291 | cleanup2: | 306 | cleanup2: |
292 | io_tlb_end = NULL; | 307 | io_tlb_end = NULL; |
293 | free_pages((unsigned long)io_tlb_start, order); | ||
294 | io_tlb_start = NULL; | 308 | io_tlb_start = NULL; |
295 | cleanup1: | 309 | io_tlb_nslabs = 0; |
296 | io_tlb_nslabs = req_nslabs; | ||
297 | return -ENOMEM; | 310 | return -ENOMEM; |
298 | } | 311 | } |
299 | 312 | ||
@@ -321,6 +334,7 @@ void __init swiotlb_free(void) | |||
321 | free_bootmem_late(__pa(io_tlb_start), | 334 | free_bootmem_late(__pa(io_tlb_start), |
322 | PAGE_ALIGN(io_tlb_nslabs << IO_TLB_SHIFT)); | 335 | PAGE_ALIGN(io_tlb_nslabs << IO_TLB_SHIFT)); |
323 | } | 336 | } |
337 | io_tlb_nslabs = 0; | ||
324 | } | 338 | } |
325 | 339 | ||
326 | static int is_swiotlb_buffer(phys_addr_t paddr) | 340 | static int is_swiotlb_buffer(phys_addr_t paddr) |
@@ -348,13 +362,12 @@ void swiotlb_bounce(phys_addr_t phys, char *dma_addr, size_t size, | |||
348 | sz = min_t(size_t, PAGE_SIZE - offset, size); | 362 | sz = min_t(size_t, PAGE_SIZE - offset, size); |
349 | 363 | ||
350 | local_irq_save(flags); | 364 | local_irq_save(flags); |
351 | buffer = kmap_atomic(pfn_to_page(pfn), | 365 | buffer = kmap_atomic(pfn_to_page(pfn)); |
352 | KM_BOUNCE_READ); | ||
353 | if (dir == DMA_TO_DEVICE) | 366 | if (dir == DMA_TO_DEVICE) |
354 | memcpy(dma_addr, buffer + offset, sz); | 367 | memcpy(dma_addr, buffer + offset, sz); |
355 | else | 368 | else |
356 | memcpy(buffer + offset, dma_addr, sz); | 369 | memcpy(buffer + offset, dma_addr, sz); |
357 | kunmap_atomic(buffer, KM_BOUNCE_READ); | 370 | kunmap_atomic(buffer); |
358 | local_irq_restore(flags); | 371 | local_irq_restore(flags); |
359 | 372 | ||
360 | size -= sz; | 373 | size -= sz; |
diff --git a/lib/syscall.c b/lib/syscall.c index a4f7067f72fa..58710eefeac8 100644 --- a/lib/syscall.c +++ b/lib/syscall.c | |||
@@ -1,6 +1,6 @@ | |||
1 | #include <linux/ptrace.h> | 1 | #include <linux/ptrace.h> |
2 | #include <linux/sched.h> | 2 | #include <linux/sched.h> |
3 | #include <linux/module.h> | 3 | #include <linux/export.h> |
4 | #include <asm/syscall.h> | 4 | #include <asm/syscall.h> |
5 | 5 | ||
6 | static int collect_syscall(struct task_struct *target, long *callno, | 6 | static int collect_syscall(struct task_struct *target, long *callno, |
diff --git a/lib/test-kstrtox.c b/lib/test-kstrtox.c index d55769d63cb8..bea3f3fa3f02 100644 --- a/lib/test-kstrtox.c +++ b/lib/test-kstrtox.c | |||
@@ -11,7 +11,7 @@ struct test_fail { | |||
11 | }; | 11 | }; |
12 | 12 | ||
13 | #define DEFINE_TEST_FAIL(test) \ | 13 | #define DEFINE_TEST_FAIL(test) \ |
14 | const struct test_fail test[] __initdata | 14 | const struct test_fail test[] __initconst |
15 | 15 | ||
16 | #define DECLARE_TEST_OK(type, test_type) \ | 16 | #define DECLARE_TEST_OK(type, test_type) \ |
17 | test_type { \ | 17 | test_type { \ |
@@ -21,7 +21,7 @@ struct test_fail { | |||
21 | } | 21 | } |
22 | 22 | ||
23 | #define DEFINE_TEST_OK(type, test) \ | 23 | #define DEFINE_TEST_OK(type, test) \ |
24 | const type test[] __initdata | 24 | const type test[] __initconst |
25 | 25 | ||
26 | #define TEST_FAIL(fn, type, fmt, test) \ | 26 | #define TEST_FAIL(fn, type, fmt, test) \ |
27 | { \ | 27 | { \ |
diff --git a/lib/timerqueue.c b/lib/timerqueue.c index 191176a43e9a..a382e4a32609 100644 --- a/lib/timerqueue.c +++ b/lib/timerqueue.c | |||
@@ -22,9 +22,10 @@ | |||
22 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA | 22 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA |
23 | */ | 23 | */ |
24 | 24 | ||
25 | #include <linux/bug.h> | ||
25 | #include <linux/timerqueue.h> | 26 | #include <linux/timerqueue.h> |
26 | #include <linux/rbtree.h> | 27 | #include <linux/rbtree.h> |
27 | #include <linux/module.h> | 28 | #include <linux/export.h> |
28 | 29 | ||
29 | /** | 30 | /** |
30 | * timerqueue_add - Adds timer to timerqueue. | 31 | * timerqueue_add - Adds timer to timerqueue. |
diff --git a/lib/uuid.c b/lib/uuid.c index 8fadd7cef46c..52a6fe6387de 100644 --- a/lib/uuid.c +++ b/lib/uuid.c | |||
@@ -19,7 +19,7 @@ | |||
19 | */ | 19 | */ |
20 | 20 | ||
21 | #include <linux/kernel.h> | 21 | #include <linux/kernel.h> |
22 | #include <linux/module.h> | 22 | #include <linux/export.h> |
23 | #include <linux/uuid.h> | 23 | #include <linux/uuid.h> |
24 | #include <linux/random.h> | 24 | #include <linux/random.h> |
25 | 25 | ||
diff --git a/lib/vsprintf.c b/lib/vsprintf.c index d7222a9c8267..39c99fea7c03 100644 --- a/lib/vsprintf.c +++ b/lib/vsprintf.c | |||
@@ -17,7 +17,7 @@ | |||
17 | */ | 17 | */ |
18 | 18 | ||
19 | #include <stdarg.h> | 19 | #include <stdarg.h> |
20 | #include <linux/module.h> | 20 | #include <linux/module.h> /* for KSYM_SYMBOL_LEN */ |
21 | #include <linux/types.h> | 21 | #include <linux/types.h> |
22 | #include <linux/string.h> | 22 | #include <linux/string.h> |
23 | #include <linux/ctype.h> | 23 | #include <linux/ctype.h> |
@@ -31,17 +31,7 @@ | |||
31 | #include <asm/div64.h> | 31 | #include <asm/div64.h> |
32 | #include <asm/sections.h> /* for dereference_function_descriptor() */ | 32 | #include <asm/sections.h> /* for dereference_function_descriptor() */ |
33 | 33 | ||
34 | static unsigned int simple_guess_base(const char *cp) | 34 | #include "kstrtox.h" |
35 | { | ||
36 | if (cp[0] == '0') { | ||
37 | if (_tolower(cp[1]) == 'x' && isxdigit(cp[2])) | ||
38 | return 16; | ||
39 | else | ||
40 | return 8; | ||
41 | } else { | ||
42 | return 10; | ||
43 | } | ||
44 | } | ||
45 | 35 | ||
46 | /** | 36 | /** |
47 | * simple_strtoull - convert a string to an unsigned long long | 37 | * simple_strtoull - convert a string to an unsigned long long |
@@ -51,23 +41,14 @@ static unsigned int simple_guess_base(const char *cp) | |||
51 | */ | 41 | */ |
52 | unsigned long long simple_strtoull(const char *cp, char **endp, unsigned int base) | 42 | unsigned long long simple_strtoull(const char *cp, char **endp, unsigned int base) |
53 | { | 43 | { |
54 | unsigned long long result = 0; | 44 | unsigned long long result; |
45 | unsigned int rv; | ||
55 | 46 | ||
56 | if (!base) | 47 | cp = _parse_integer_fixup_radix(cp, &base); |
57 | base = simple_guess_base(cp); | 48 | rv = _parse_integer(cp, base, &result); |
49 | /* FIXME */ | ||
50 | cp += (rv & ~KSTRTOX_OVERFLOW); | ||
58 | 51 | ||
59 | if (base == 16 && cp[0] == '0' && _tolower(cp[1]) == 'x') | ||
60 | cp += 2; | ||
61 | |||
62 | while (isxdigit(*cp)) { | ||
63 | unsigned int value; | ||
64 | |||
65 | value = isdigit(*cp) ? *cp - '0' : _tolower(*cp) - 'a' + 10; | ||
66 | if (value >= base) | ||
67 | break; | ||
68 | result = result * base + value; | ||
69 | cp++; | ||
70 | } | ||
71 | if (endp) | 52 | if (endp) |
72 | *endp = (char *)cp; | 53 | *endp = (char *)cp; |
73 | 54 | ||
@@ -131,104 +112,220 @@ int skip_atoi(const char **s) | |||
131 | /* Decimal conversion is by far the most typical, and is used | 112 | /* Decimal conversion is by far the most typical, and is used |
132 | * for /proc and /sys data. This directly impacts e.g. top performance | 113 | * for /proc and /sys data. This directly impacts e.g. top performance |
133 | * with many processes running. We optimize it for speed | 114 | * with many processes running. We optimize it for speed |
134 | * using code from | 115 | * using ideas described at <http://www.cs.uiowa.edu/~jones/bcd/divide.html> |
135 | * http://www.cs.uiowa.edu/~jones/bcd/decimal.html | 116 | * (with permission from the author, Douglas W. Jones). |
136 | * (with permission from the author, Douglas W. Jones). */ | 117 | */ |
137 | 118 | ||
138 | /* Formats correctly any integer in [0,99999]. | 119 | #if BITS_PER_LONG != 32 || BITS_PER_LONG_LONG != 64 |
139 | * Outputs from one to five digits depending on input. | 120 | /* Formats correctly any integer in [0, 999999999] */ |
140 | * On i386 gcc 4.1.2 -O2: ~250 bytes of code. */ | ||
141 | static noinline_for_stack | 121 | static noinline_for_stack |
142 | char *put_dec_trunc(char *buf, unsigned q) | 122 | char *put_dec_full9(char *buf, unsigned q) |
143 | { | 123 | { |
144 | unsigned d3, d2, d1, d0; | 124 | unsigned r; |
145 | d1 = (q>>4) & 0xf; | ||
146 | d2 = (q>>8) & 0xf; | ||
147 | d3 = (q>>12); | ||
148 | |||
149 | d0 = 6*(d3 + d2 + d1) + (q & 0xf); | ||
150 | q = (d0 * 0xcd) >> 11; | ||
151 | d0 = d0 - 10*q; | ||
152 | *buf++ = d0 + '0'; /* least significant digit */ | ||
153 | d1 = q + 9*d3 + 5*d2 + d1; | ||
154 | if (d1 != 0) { | ||
155 | q = (d1 * 0xcd) >> 11; | ||
156 | d1 = d1 - 10*q; | ||
157 | *buf++ = d1 + '0'; /* next digit */ | ||
158 | |||
159 | d2 = q + 2*d2; | ||
160 | if ((d2 != 0) || (d3 != 0)) { | ||
161 | q = (d2 * 0xd) >> 7; | ||
162 | d2 = d2 - 10*q; | ||
163 | *buf++ = d2 + '0'; /* next digit */ | ||
164 | |||
165 | d3 = q + 4*d3; | ||
166 | if (d3 != 0) { | ||
167 | q = (d3 * 0xcd) >> 11; | ||
168 | d3 = d3 - 10*q; | ||
169 | *buf++ = d3 + '0'; /* next digit */ | ||
170 | if (q != 0) | ||
171 | *buf++ = q + '0'; /* most sign. digit */ | ||
172 | } | ||
173 | } | ||
174 | } | ||
175 | 125 | ||
126 | /* | ||
127 | * Possible ways to approx. divide by 10 | ||
128 | * (x * 0x1999999a) >> 32 x < 1073741829 (multiply must be 64-bit) | ||
129 | * (x * 0xcccd) >> 19 x < 81920 (x < 262149 when 64-bit mul) | ||
130 | * (x * 0x6667) >> 18 x < 43699 | ||
131 | * (x * 0x3334) >> 17 x < 16389 | ||
132 | * (x * 0x199a) >> 16 x < 16389 | ||
133 | * (x * 0x0ccd) >> 15 x < 16389 | ||
134 | * (x * 0x0667) >> 14 x < 2739 | ||
135 | * (x * 0x0334) >> 13 x < 1029 | ||
136 | * (x * 0x019a) >> 12 x < 1029 | ||
137 | * (x * 0x00cd) >> 11 x < 1029 shorter code than * 0x67 (on i386) | ||
138 | * (x * 0x0067) >> 10 x < 179 | ||
139 | * (x * 0x0034) >> 9 x < 69 same | ||
140 | * (x * 0x001a) >> 8 x < 69 same | ||
141 | * (x * 0x000d) >> 7 x < 69 same, shortest code (on i386) | ||
142 | * (x * 0x0007) >> 6 x < 19 | ||
143 | * See <http://www.cs.uiowa.edu/~jones/bcd/divide.html> | ||
144 | */ | ||
145 | r = (q * (uint64_t)0x1999999a) >> 32; | ||
146 | *buf++ = (q - 10 * r) + '0'; /* 1 */ | ||
147 | q = (r * (uint64_t)0x1999999a) >> 32; | ||
148 | *buf++ = (r - 10 * q) + '0'; /* 2 */ | ||
149 | r = (q * (uint64_t)0x1999999a) >> 32; | ||
150 | *buf++ = (q - 10 * r) + '0'; /* 3 */ | ||
151 | q = (r * (uint64_t)0x1999999a) >> 32; | ||
152 | *buf++ = (r - 10 * q) + '0'; /* 4 */ | ||
153 | r = (q * (uint64_t)0x1999999a) >> 32; | ||
154 | *buf++ = (q - 10 * r) + '0'; /* 5 */ | ||
155 | /* Now value is under 10000, can avoid 64-bit multiply */ | ||
156 | q = (r * 0x199a) >> 16; | ||
157 | *buf++ = (r - 10 * q) + '0'; /* 6 */ | ||
158 | r = (q * 0xcd) >> 11; | ||
159 | *buf++ = (q - 10 * r) + '0'; /* 7 */ | ||
160 | q = (r * 0xcd) >> 11; | ||
161 | *buf++ = (r - 10 * q) + '0'; /* 8 */ | ||
162 | *buf++ = q + '0'; /* 9 */ | ||
176 | return buf; | 163 | return buf; |
177 | } | 164 | } |
178 | /* Same with if's removed. Always emits five digits */ | 165 | #endif |
166 | |||
167 | /* Similar to above but do not pad with zeros. | ||
168 | * Code can be easily arranged to print 9 digits too, but our callers | ||
169 | * always call put_dec_full9() instead when the number has 9 decimal digits. | ||
170 | */ | ||
179 | static noinline_for_stack | 171 | static noinline_for_stack |
180 | char *put_dec_full(char *buf, unsigned q) | 172 | char *put_dec_trunc8(char *buf, unsigned r) |
181 | { | 173 | { |
182 | /* BTW, if q is in [0,9999], 8-bit ints will be enough, */ | 174 | unsigned q; |
183 | /* but anyway, gcc produces better code with full-sized ints */ | ||
184 | unsigned d3, d2, d1, d0; | ||
185 | d1 = (q>>4) & 0xf; | ||
186 | d2 = (q>>8) & 0xf; | ||
187 | d3 = (q>>12); | ||
188 | 175 | ||
189 | /* | 176 | /* Copy of previous function's body with added early returns */ |
190 | * Possible ways to approx. divide by 10 | 177 | while (r >= 10000) { |
191 | * gcc -O2 replaces multiply with shifts and adds | 178 | q = r + '0'; |
192 | * (x * 0xcd) >> 11: 11001101 - shorter code than * 0x67 (on i386) | 179 | r = (r * (uint64_t)0x1999999a) >> 32; |
193 | * (x * 0x67) >> 10: 1100111 | 180 | *buf++ = q - 10*r; |
194 | * (x * 0x34) >> 9: 110100 - same | 181 | } |
195 | * (x * 0x1a) >> 8: 11010 - same | ||
196 | * (x * 0x0d) >> 7: 1101 - same, shortest code (on i386) | ||
197 | */ | ||
198 | d0 = 6*(d3 + d2 + d1) + (q & 0xf); | ||
199 | q = (d0 * 0xcd) >> 11; | ||
200 | d0 = d0 - 10*q; | ||
201 | *buf++ = d0 + '0'; | ||
202 | d1 = q + 9*d3 + 5*d2 + d1; | ||
203 | q = (d1 * 0xcd) >> 11; | ||
204 | d1 = d1 - 10*q; | ||
205 | *buf++ = d1 + '0'; | ||
206 | |||
207 | d2 = q + 2*d2; | ||
208 | q = (d2 * 0xd) >> 7; | ||
209 | d2 = d2 - 10*q; | ||
210 | *buf++ = d2 + '0'; | ||
211 | |||
212 | d3 = q + 4*d3; | ||
213 | q = (d3 * 0xcd) >> 11; /* - shorter code */ | ||
214 | /* q = (d3 * 0x67) >> 10; - would also work */ | ||
215 | d3 = d3 - 10*q; | ||
216 | *buf++ = d3 + '0'; | ||
217 | *buf++ = q + '0'; | ||
218 | 182 | ||
183 | q = (r * 0x199a) >> 16; /* r <= 9999 */ | ||
184 | *buf++ = (r - 10 * q) + '0'; | ||
185 | if (q == 0) | ||
186 | return buf; | ||
187 | r = (q * 0xcd) >> 11; /* q <= 999 */ | ||
188 | *buf++ = (q - 10 * r) + '0'; | ||
189 | if (r == 0) | ||
190 | return buf; | ||
191 | q = (r * 0xcd) >> 11; /* r <= 99 */ | ||
192 | *buf++ = (r - 10 * q) + '0'; | ||
193 | if (q == 0) | ||
194 | return buf; | ||
195 | *buf++ = q + '0'; /* q <= 9 */ | ||
219 | return buf; | 196 | return buf; |
220 | } | 197 | } |
221 | /* No inlining helps gcc to use registers better */ | 198 | |
199 | /* There are two algorithms to print larger numbers. | ||
200 | * One is generic: divide by 1000000000 and repeatedly print | ||
201 | * groups of (up to) 9 digits. It's conceptually simple, | ||
202 | * but requires a (unsigned long long) / 1000000000 division. | ||
203 | * | ||
204 | * Second algorithm splits 64-bit unsigned long long into 16-bit chunks, | ||
205 | * manipulates them cleverly and generates groups of 4 decimal digits. | ||
206 | * It so happens that it does NOT require long long division. | ||
207 | * | ||
208 | * If long is > 32 bits, division of 64-bit values is relatively easy, | ||
209 | * and we will use the first algorithm. | ||
210 | * If long long is > 64 bits (strange architecture with VERY large long long), | ||
211 | * second algorithm can't be used, and we again use the first one. | ||
212 | * | ||
213 | * Else (if long is 32 bits and long long is 64 bits) we use second one. | ||
214 | */ | ||
215 | |||
216 | #if BITS_PER_LONG != 32 || BITS_PER_LONG_LONG != 64 | ||
217 | |||
218 | /* First algorithm: generic */ | ||
219 | |||
220 | static | ||
221 | char *put_dec(char *buf, unsigned long long n) | ||
222 | { | ||
223 | if (n >= 100*1000*1000) { | ||
224 | while (n >= 1000*1000*1000) | ||
225 | buf = put_dec_full9(buf, do_div(n, 1000*1000*1000)); | ||
226 | if (n >= 100*1000*1000) | ||
227 | return put_dec_full9(buf, n); | ||
228 | } | ||
229 | return put_dec_trunc8(buf, n); | ||
230 | } | ||
231 | |||
232 | #else | ||
233 | |||
234 | /* Second algorithm: valid only for 64-bit long longs */ | ||
235 | |||
236 | /* See comment in put_dec_full9 for choice of constants */ | ||
222 | static noinline_for_stack | 237 | static noinline_for_stack |
223 | char *put_dec(char *buf, unsigned long long num) | 238 | void put_dec_full4(char *buf, unsigned q) |
239 | { | ||
240 | unsigned r; | ||
241 | r = (q * 0xccd) >> 15; | ||
242 | buf[0] = (q - 10 * r) + '0'; | ||
243 | q = (r * 0xcd) >> 11; | ||
244 | buf[1] = (r - 10 * q) + '0'; | ||
245 | r = (q * 0xcd) >> 11; | ||
246 | buf[2] = (q - 10 * r) + '0'; | ||
247 | buf[3] = r + '0'; | ||
248 | } | ||
249 | |||
250 | /* | ||
251 | * Call put_dec_full4 on x % 10000, return x / 10000. | ||
252 | * The approximation x/10000 == (x * 0x346DC5D7) >> 43 | ||
253 | * holds for all x < 1,128,869,999. The largest value this | ||
254 | * helper will ever be asked to convert is 1,125,520,955. | ||
255 | * (d1 in the put_dec code, assuming n is all-ones). | ||
256 | */ | ||
257 | static | ||
258 | unsigned put_dec_helper4(char *buf, unsigned x) | ||
259 | { | ||
260 | uint32_t q = (x * (uint64_t)0x346DC5D7) >> 43; | ||
261 | |||
262 | put_dec_full4(buf, x - q * 10000); | ||
263 | return q; | ||
264 | } | ||
265 | |||
266 | /* Based on code by Douglas W. Jones found at | ||
267 | * <http://www.cs.uiowa.edu/~jones/bcd/decimal.html#sixtyfour> | ||
268 | * (with permission from the author). | ||
269 | * Performs no 64-bit division and hence should be fast on 32-bit machines. | ||
270 | */ | ||
271 | static | ||
272 | char *put_dec(char *buf, unsigned long long n) | ||
273 | { | ||
274 | uint32_t d3, d2, d1, q, h; | ||
275 | |||
276 | if (n < 100*1000*1000) | ||
277 | return put_dec_trunc8(buf, n); | ||
278 | |||
279 | d1 = ((uint32_t)n >> 16); /* implicit "& 0xffff" */ | ||
280 | h = (n >> 32); | ||
281 | d2 = (h ) & 0xffff; | ||
282 | d3 = (h >> 16); /* implicit "& 0xffff" */ | ||
283 | |||
284 | q = 656 * d3 + 7296 * d2 + 5536 * d1 + ((uint32_t)n & 0xffff); | ||
285 | q = put_dec_helper4(buf, q); | ||
286 | |||
287 | q += 7671 * d3 + 9496 * d2 + 6 * d1; | ||
288 | q = put_dec_helper4(buf+4, q); | ||
289 | |||
290 | q += 4749 * d3 + 42 * d2; | ||
291 | q = put_dec_helper4(buf+8, q); | ||
292 | |||
293 | q += 281 * d3; | ||
294 | buf += 12; | ||
295 | if (q) | ||
296 | buf = put_dec_trunc8(buf, q); | ||
297 | else while (buf[-1] == '0') | ||
298 | --buf; | ||
299 | |||
300 | return buf; | ||
301 | } | ||
302 | |||
303 | #endif | ||
304 | |||
305 | /* | ||
306 | * Convert passed number to decimal string. | ||
307 | * Returns the length of string. On buffer overflow, returns 0. | ||
308 | * | ||
309 | * If speed is not important, use snprintf(). It's easy to read the code. | ||
310 | */ | ||
311 | int num_to_str(char *buf, int size, unsigned long long num) | ||
224 | { | 312 | { |
225 | while (1) { | 313 | char tmp[sizeof(num) * 3]; |
226 | unsigned rem; | 314 | int idx, len; |
227 | if (num < 100000) | 315 | |
228 | return put_dec_trunc(buf, num); | 316 | /* put_dec() may work incorrectly for num = 0 (generate "", not "0") */ |
229 | rem = do_div(num, 100000); | 317 | if (num <= 9) { |
230 | buf = put_dec_full(buf, rem); | 318 | tmp[0] = '0' + num; |
319 | len = 1; | ||
320 | } else { | ||
321 | len = put_dec(tmp, num) - tmp; | ||
231 | } | 322 | } |
323 | |||
324 | if (len > size) | ||
325 | return 0; | ||
326 | for (idx = 0; idx < len; ++idx) | ||
327 | buf[idx] = tmp[len - idx - 1]; | ||
328 | return len; | ||
232 | } | 329 | } |
233 | 330 | ||
234 | #define ZEROPAD 1 /* pad with zero */ | 331 | #define ZEROPAD 1 /* pad with zero */ |
@@ -283,6 +380,7 @@ char *number(char *buf, char *end, unsigned long long num, | |||
283 | char locase; | 380 | char locase; |
284 | int need_pfx = ((spec.flags & SPECIAL) && spec.base != 10); | 381 | int need_pfx = ((spec.flags & SPECIAL) && spec.base != 10); |
285 | int i; | 382 | int i; |
383 | bool is_zero = num == 0LL; | ||
286 | 384 | ||
287 | /* locase = 0 or 0x20. ORing digits or letters with 'locase' | 385 | /* locase = 0 or 0x20. ORing digits or letters with 'locase' |
288 | * produces same digits or (maybe lowercased) letters */ | 386 | * produces same digits or (maybe lowercased) letters */ |
@@ -304,15 +402,16 @@ char *number(char *buf, char *end, unsigned long long num, | |||
304 | } | 402 | } |
305 | } | 403 | } |
306 | if (need_pfx) { | 404 | if (need_pfx) { |
307 | spec.field_width--; | ||
308 | if (spec.base == 16) | 405 | if (spec.base == 16) |
406 | spec.field_width -= 2; | ||
407 | else if (!is_zero) | ||
309 | spec.field_width--; | 408 | spec.field_width--; |
310 | } | 409 | } |
311 | 410 | ||
312 | /* generate full string in tmp[], in reverse order */ | 411 | /* generate full string in tmp[], in reverse order */ |
313 | i = 0; | 412 | i = 0; |
314 | if (num == 0) | 413 | if (num < spec.base) |
315 | tmp[i++] = '0'; | 414 | tmp[i++] = digits[num] | locase; |
316 | /* Generic code, for any base: | 415 | /* Generic code, for any base: |
317 | else do { | 416 | else do { |
318 | tmp[i++] = (digits[do_div(num,base)] | locase); | 417 | tmp[i++] = (digits[do_div(num,base)] | locase); |
@@ -352,9 +451,11 @@ char *number(char *buf, char *end, unsigned long long num, | |||
352 | } | 451 | } |
353 | /* "0x" / "0" prefix */ | 452 | /* "0x" / "0" prefix */ |
354 | if (need_pfx) { | 453 | if (need_pfx) { |
355 | if (buf < end) | 454 | if (spec.base == 16 || !is_zero) { |
356 | *buf = '0'; | 455 | if (buf < end) |
357 | ++buf; | 456 | *buf = '0'; |
457 | ++buf; | ||
458 | } | ||
358 | if (spec.base == 16) { | 459 | if (spec.base == 16) { |
359 | if (buf < end) | 460 | if (buf < end) |
360 | *buf = ('X' | locase); | 461 | *buf = ('X' | locase); |
@@ -435,7 +536,7 @@ char *symbol_string(char *buf, char *end, void *ptr, | |||
435 | else if (ext != 'f' && ext != 's') | 536 | else if (ext != 'f' && ext != 's') |
436 | sprint_symbol(sym, value); | 537 | sprint_symbol(sym, value); |
437 | else | 538 | else |
438 | kallsyms_lookup(value, NULL, NULL, NULL, sym); | 539 | sprint_symbol_no_offset(sym, value); |
439 | 540 | ||
440 | return string(buf, end, sym, spec); | 541 | return string(buf, end, sym, spec); |
441 | #else | 542 | #else |
@@ -551,6 +652,50 @@ char *resource_string(char *buf, char *end, struct resource *res, | |||
551 | } | 652 | } |
552 | 653 | ||
553 | static noinline_for_stack | 654 | static noinline_for_stack |
655 | char *hex_string(char *buf, char *end, u8 *addr, struct printf_spec spec, | ||
656 | const char *fmt) | ||
657 | { | ||
658 | int i, len = 1; /* if we pass '%ph[CDN]', field witdh remains | ||
659 | negative value, fallback to the default */ | ||
660 | char separator; | ||
661 | |||
662 | if (spec.field_width == 0) | ||
663 | /* nothing to print */ | ||
664 | return buf; | ||
665 | |||
666 | if (ZERO_OR_NULL_PTR(addr)) | ||
667 | /* NULL pointer */ | ||
668 | return string(buf, end, NULL, spec); | ||
669 | |||
670 | switch (fmt[1]) { | ||
671 | case 'C': | ||
672 | separator = ':'; | ||
673 | break; | ||
674 | case 'D': | ||
675 | separator = '-'; | ||
676 | break; | ||
677 | case 'N': | ||
678 | separator = 0; | ||
679 | break; | ||
680 | default: | ||
681 | separator = ' '; | ||
682 | break; | ||
683 | } | ||
684 | |||
685 | if (spec.field_width > 0) | ||
686 | len = min_t(int, spec.field_width, 64); | ||
687 | |||
688 | for (i = 0; i < len && buf < end - 1; i++) { | ||
689 | buf = hex_byte_pack(buf, addr[i]); | ||
690 | |||
691 | if (buf < end && separator && i != len - 1) | ||
692 | *buf++ = separator; | ||
693 | } | ||
694 | |||
695 | return buf; | ||
696 | } | ||
697 | |||
698 | static noinline_for_stack | ||
554 | char *mac_address_string(char *buf, char *end, u8 *addr, | 699 | char *mac_address_string(char *buf, char *end, u8 *addr, |
555 | struct printf_spec spec, const char *fmt) | 700 | struct printf_spec spec, const char *fmt) |
556 | { | 701 | { |
@@ -558,15 +703,28 @@ char *mac_address_string(char *buf, char *end, u8 *addr, | |||
558 | char *p = mac_addr; | 703 | char *p = mac_addr; |
559 | int i; | 704 | int i; |
560 | char separator; | 705 | char separator; |
706 | bool reversed = false; | ||
561 | 707 | ||
562 | if (fmt[1] == 'F') { /* FDDI canonical format */ | 708 | switch (fmt[1]) { |
709 | case 'F': | ||
563 | separator = '-'; | 710 | separator = '-'; |
564 | } else { | 711 | break; |
712 | |||
713 | case 'R': | ||
714 | reversed = true; | ||
715 | /* fall through */ | ||
716 | |||
717 | default: | ||
565 | separator = ':'; | 718 | separator = ':'; |
719 | break; | ||
566 | } | 720 | } |
567 | 721 | ||
568 | for (i = 0; i < 6; i++) { | 722 | for (i = 0; i < 6; i++) { |
569 | p = pack_hex_byte(p, addr[i]); | 723 | if (reversed) |
724 | p = hex_byte_pack(p, addr[5 - i]); | ||
725 | else | ||
726 | p = hex_byte_pack(p, addr[i]); | ||
727 | |||
570 | if (fmt[0] == 'M' && i != 5) | 728 | if (fmt[0] == 'M' && i != 5) |
571 | *p++ = separator; | 729 | *p++ = separator; |
572 | } | 730 | } |
@@ -606,7 +764,7 @@ char *ip4_string(char *p, const u8 *addr, const char *fmt) | |||
606 | } | 764 | } |
607 | for (i = 0; i < 4; i++) { | 765 | for (i = 0; i < 4; i++) { |
608 | char temp[3]; /* hold each IP quad in reverse order */ | 766 | char temp[3]; /* hold each IP quad in reverse order */ |
609 | int digits = put_dec_trunc(temp, addr[index]) - temp; | 767 | int digits = put_dec_trunc8(temp, addr[index]) - temp; |
610 | if (leading_zeros) { | 768 | if (leading_zeros) { |
611 | if (digits < 3) | 769 | if (digits < 3) |
612 | *p++ = '0'; | 770 | *p++ = '0'; |
@@ -686,13 +844,13 @@ char *ip6_compressed_string(char *p, const char *addr) | |||
686 | lo = word & 0xff; | 844 | lo = word & 0xff; |
687 | if (hi) { | 845 | if (hi) { |
688 | if (hi > 0x0f) | 846 | if (hi > 0x0f) |
689 | p = pack_hex_byte(p, hi); | 847 | p = hex_byte_pack(p, hi); |
690 | else | 848 | else |
691 | *p++ = hex_asc_lo(hi); | 849 | *p++ = hex_asc_lo(hi); |
692 | p = pack_hex_byte(p, lo); | 850 | p = hex_byte_pack(p, lo); |
693 | } | 851 | } |
694 | else if (lo > 0x0f) | 852 | else if (lo > 0x0f) |
695 | p = pack_hex_byte(p, lo); | 853 | p = hex_byte_pack(p, lo); |
696 | else | 854 | else |
697 | *p++ = hex_asc_lo(lo); | 855 | *p++ = hex_asc_lo(lo); |
698 | needcolon = true; | 856 | needcolon = true; |
@@ -714,8 +872,8 @@ char *ip6_string(char *p, const char *addr, const char *fmt) | |||
714 | int i; | 872 | int i; |
715 | 873 | ||
716 | for (i = 0; i < 8; i++) { | 874 | for (i = 0; i < 8; i++) { |
717 | p = pack_hex_byte(p, *addr++); | 875 | p = hex_byte_pack(p, *addr++); |
718 | p = pack_hex_byte(p, *addr++); | 876 | p = hex_byte_pack(p, *addr++); |
719 | if (fmt[0] == 'I' && i != 7) | 877 | if (fmt[0] == 'I' && i != 7) |
720 | *p++ = ':'; | 878 | *p++ = ':'; |
721 | } | 879 | } |
@@ -773,7 +931,7 @@ char *uuid_string(char *buf, char *end, const u8 *addr, | |||
773 | } | 931 | } |
774 | 932 | ||
775 | for (i = 0; i < 16; i++) { | 933 | for (i = 0; i < 16; i++) { |
776 | p = pack_hex_byte(p, addr[index[i]]); | 934 | p = hex_byte_pack(p, addr[index[i]]); |
777 | switch (i) { | 935 | switch (i) { |
778 | case 3: | 936 | case 3: |
779 | case 5: | 937 | case 5: |
@@ -796,6 +954,18 @@ char *uuid_string(char *buf, char *end, const u8 *addr, | |||
796 | return string(buf, end, uuid, spec); | 954 | return string(buf, end, uuid, spec); |
797 | } | 955 | } |
798 | 956 | ||
957 | static | ||
958 | char *netdev_feature_string(char *buf, char *end, const u8 *addr, | ||
959 | struct printf_spec spec) | ||
960 | { | ||
961 | spec.flags |= SPECIAL | SMALL | ZEROPAD; | ||
962 | if (spec.field_width == -1) | ||
963 | spec.field_width = 2 + 2 * sizeof(netdev_features_t); | ||
964 | spec.base = 16; | ||
965 | |||
966 | return number(buf, end, *(const netdev_features_t *)addr, spec); | ||
967 | } | ||
968 | |||
799 | int kptr_restrict __read_mostly; | 969 | int kptr_restrict __read_mostly; |
800 | 970 | ||
801 | /* | 971 | /* |
@@ -817,6 +987,7 @@ int kptr_restrict __read_mostly; | |||
817 | * - 'm' For a 6-byte MAC address, it prints the hex address without colons | 987 | * - 'm' For a 6-byte MAC address, it prints the hex address without colons |
818 | * - 'MF' For a 6-byte MAC FDDI address, it prints the address | 988 | * - 'MF' For a 6-byte MAC FDDI address, it prints the address |
819 | * with a dash-separated hex notation | 989 | * with a dash-separated hex notation |
990 | * - '[mM]R' For a 6-byte MAC address, Reverse order (Bluetooth) | ||
820 | * - 'I' [46] for IPv4/IPv6 addresses printed in the usual way | 991 | * - 'I' [46] for IPv4/IPv6 addresses printed in the usual way |
821 | * IPv4 uses dot-separated decimal without leading 0's (1.2.3.4) | 992 | * IPv4 uses dot-separated decimal without leading 0's (1.2.3.4) |
822 | * IPv6 uses colon separated network-order 16 bit hex with leading 0's | 993 | * IPv6 uses colon separated network-order 16 bit hex with leading 0's |
@@ -843,6 +1014,14 @@ int kptr_restrict __read_mostly; | |||
843 | * Do not use this feature without some mechanism to verify the | 1014 | * Do not use this feature without some mechanism to verify the |
844 | * correctness of the format string and va_list arguments. | 1015 | * correctness of the format string and va_list arguments. |
845 | * - 'K' For a kernel pointer that should be hidden from unprivileged users | 1016 | * - 'K' For a kernel pointer that should be hidden from unprivileged users |
1017 | * - 'NF' For a netdev_features_t | ||
1018 | * - 'h[CDN]' For a variable-length buffer, it prints it as a hex string with | ||
1019 | * a certain separator (' ' by default): | ||
1020 | * C colon | ||
1021 | * D dash | ||
1022 | * N no separator | ||
1023 | * The maximum supported length is 64 bytes of the input. Consider | ||
1024 | * to use print_hex_dump() for the larger input. | ||
846 | * | 1025 | * |
847 | * Note: The difference between 'S' and 'F' is that on ia64 and ppc64 | 1026 | * Note: The difference between 'S' and 'F' is that on ia64 and ppc64 |
848 | * function pointers are really function descriptors, which contain a | 1027 | * function pointers are really function descriptors, which contain a |
@@ -852,13 +1031,15 @@ static noinline_for_stack | |||
852 | char *pointer(const char *fmt, char *buf, char *end, void *ptr, | 1031 | char *pointer(const char *fmt, char *buf, char *end, void *ptr, |
853 | struct printf_spec spec) | 1032 | struct printf_spec spec) |
854 | { | 1033 | { |
1034 | int default_width = 2 * sizeof(void *) + (spec.flags & SPECIAL ? 2 : 0); | ||
1035 | |||
855 | if (!ptr && *fmt != 'K') { | 1036 | if (!ptr && *fmt != 'K') { |
856 | /* | 1037 | /* |
857 | * Print (null) with the same width as a pointer so it makes | 1038 | * Print (null) with the same width as a pointer so it makes |
858 | * tabular output look nice. | 1039 | * tabular output look nice. |
859 | */ | 1040 | */ |
860 | if (spec.field_width == -1) | 1041 | if (spec.field_width == -1) |
861 | spec.field_width = 2 * sizeof(void *); | 1042 | spec.field_width = default_width; |
862 | return string(buf, end, "(null)", spec); | 1043 | return string(buf, end, "(null)", spec); |
863 | } | 1044 | } |
864 | 1045 | ||
@@ -874,9 +1055,12 @@ char *pointer(const char *fmt, char *buf, char *end, void *ptr, | |||
874 | case 'R': | 1055 | case 'R': |
875 | case 'r': | 1056 | case 'r': |
876 | return resource_string(buf, end, ptr, spec, fmt); | 1057 | return resource_string(buf, end, ptr, spec, fmt); |
1058 | case 'h': | ||
1059 | return hex_string(buf, end, ptr, spec, fmt); | ||
877 | case 'M': /* Colon separated: 00:01:02:03:04:05 */ | 1060 | case 'M': /* Colon separated: 00:01:02:03:04:05 */ |
878 | case 'm': /* Contiguous: 000102030405 */ | 1061 | case 'm': /* Contiguous: 000102030405 */ |
879 | /* [mM]F (FDDI, bit reversed) */ | 1062 | /* [mM]F (FDDI) */ |
1063 | /* [mM]R (Reverse order; Bluetooth) */ | ||
880 | return mac_address_string(buf, end, ptr, spec, fmt); | 1064 | return mac_address_string(buf, end, ptr, spec, fmt); |
881 | case 'I': /* Formatted IP supported | 1065 | case 'I': /* Formatted IP supported |
882 | * 4: 1.2.3.4 | 1066 | * 4: 1.2.3.4 |
@@ -897,17 +1081,24 @@ char *pointer(const char *fmt, char *buf, char *end, void *ptr, | |||
897 | case 'U': | 1081 | case 'U': |
898 | return uuid_string(buf, end, ptr, spec, fmt); | 1082 | return uuid_string(buf, end, ptr, spec, fmt); |
899 | case 'V': | 1083 | case 'V': |
900 | return buf + vsnprintf(buf, end > buf ? end - buf : 0, | 1084 | { |
901 | ((struct va_format *)ptr)->fmt, | 1085 | va_list va; |
902 | *(((struct va_format *)ptr)->va)); | 1086 | |
1087 | va_copy(va, *((struct va_format *)ptr)->va); | ||
1088 | buf += vsnprintf(buf, end > buf ? end - buf : 0, | ||
1089 | ((struct va_format *)ptr)->fmt, va); | ||
1090 | va_end(va); | ||
1091 | return buf; | ||
1092 | } | ||
903 | case 'K': | 1093 | case 'K': |
904 | /* | 1094 | /* |
905 | * %pK cannot be used in IRQ context because its test | 1095 | * %pK cannot be used in IRQ context because its test |
906 | * for CAP_SYSLOG would be meaningless. | 1096 | * for CAP_SYSLOG would be meaningless. |
907 | */ | 1097 | */ |
908 | if (in_irq() || in_serving_softirq() || in_nmi()) { | 1098 | if (kptr_restrict && (in_irq() || in_serving_softirq() || |
1099 | in_nmi())) { | ||
909 | if (spec.field_width == -1) | 1100 | if (spec.field_width == -1) |
910 | spec.field_width = 2 * sizeof(void *); | 1101 | spec.field_width = default_width; |
911 | return string(buf, end, "pK-error", spec); | 1102 | return string(buf, end, "pK-error", spec); |
912 | } | 1103 | } |
913 | if (!((kptr_restrict == 0) || | 1104 | if (!((kptr_restrict == 0) || |
@@ -915,10 +1106,16 @@ char *pointer(const char *fmt, char *buf, char *end, void *ptr, | |||
915 | has_capability_noaudit(current, CAP_SYSLOG)))) | 1106 | has_capability_noaudit(current, CAP_SYSLOG)))) |
916 | ptr = NULL; | 1107 | ptr = NULL; |
917 | break; | 1108 | break; |
1109 | case 'N': | ||
1110 | switch (fmt[1]) { | ||
1111 | case 'F': | ||
1112 | return netdev_feature_string(buf, end, ptr, spec); | ||
1113 | } | ||
1114 | break; | ||
918 | } | 1115 | } |
919 | spec.flags |= SMALL; | 1116 | spec.flags |= SMALL; |
920 | if (spec.field_width == -1) { | 1117 | if (spec.field_width == -1) { |
921 | spec.field_width = 2 * sizeof(void *); | 1118 | spec.field_width = default_width; |
922 | spec.flags |= ZEROPAD; | 1119 | spec.flags |= ZEROPAD; |
923 | } | 1120 | } |
924 | spec.base = 16; | 1121 | spec.base = 16; |
@@ -1141,7 +1338,10 @@ qualifier: | |||
1141 | * %pR output the address range in a struct resource with decoded flags | 1338 | * %pR output the address range in a struct resource with decoded flags |
1142 | * %pr output the address range in a struct resource with raw flags | 1339 | * %pr output the address range in a struct resource with raw flags |
1143 | * %pM output a 6-byte MAC address with colons | 1340 | * %pM output a 6-byte MAC address with colons |
1341 | * %pMR output a 6-byte MAC address with colons in reversed order | ||
1342 | * %pMF output a 6-byte MAC address with dashes | ||
1144 | * %pm output a 6-byte MAC address without colons | 1343 | * %pm output a 6-byte MAC address without colons |
1344 | * %pmR output a 6-byte MAC address without colons in reversed order | ||
1145 | * %pI4 print an IPv4 address without leading zeros | 1345 | * %pI4 print an IPv4 address without leading zeros |
1146 | * %pi4 print an IPv4 address with leading zeros | 1346 | * %pi4 print an IPv4 address with leading zeros |
1147 | * %pI6 print an IPv6 address with colons | 1347 | * %pI6 print an IPv6 address with colons |
@@ -1149,8 +1349,12 @@ qualifier: | |||
1149 | * %pI6c print an IPv6 address as specified by RFC 5952 | 1349 | * %pI6c print an IPv6 address as specified by RFC 5952 |
1150 | * %pU[bBlL] print a UUID/GUID in big or little endian using lower or upper | 1350 | * %pU[bBlL] print a UUID/GUID in big or little endian using lower or upper |
1151 | * case. | 1351 | * case. |
1352 | * %*ph[CDN] a variable-length hex string with a separator (supports up to 64 | ||
1353 | * bytes of the input) | ||
1152 | * %n is ignored | 1354 | * %n is ignored |
1153 | * | 1355 | * |
1356 | * ** Please update Documentation/printk-formats.txt when making changes ** | ||
1357 | * | ||
1154 | * The return value is the number of characters which would | 1358 | * The return value is the number of characters which would |
1155 | * be generated for the given input, excluding the trailing | 1359 | * be generated for the given input, excluding the trailing |
1156 | * '\0', as per ISO C99. If you want to have the exact | 1360 | * '\0', as per ISO C99. If you want to have the exact |
@@ -1813,7 +2017,7 @@ int vsscanf(const char *buf, const char *fmt, va_list args) | |||
1813 | s16 field_width; | 2017 | s16 field_width; |
1814 | bool is_sign; | 2018 | bool is_sign; |
1815 | 2019 | ||
1816 | while (*fmt && *str) { | 2020 | while (*fmt) { |
1817 | /* skip any white space in format */ | 2021 | /* skip any white space in format */ |
1818 | /* white space in format matchs any amount of | 2022 | /* white space in format matchs any amount of |
1819 | * white space, including none, in the input. | 2023 | * white space, including none, in the input. |
@@ -1838,6 +2042,8 @@ int vsscanf(const char *buf, const char *fmt, va_list args) | |||
1838 | * advance both strings to next white space | 2042 | * advance both strings to next white space |
1839 | */ | 2043 | */ |
1840 | if (*fmt == '*') { | 2044 | if (*fmt == '*') { |
2045 | if (!*str) | ||
2046 | break; | ||
1841 | while (!isspace(*fmt) && *fmt != '%' && *fmt) | 2047 | while (!isspace(*fmt) && *fmt != '%' && *fmt) |
1842 | fmt++; | 2048 | fmt++; |
1843 | while (!isspace(*str) && *str) | 2049 | while (!isspace(*str) && *str) |
@@ -1866,7 +2072,17 @@ int vsscanf(const char *buf, const char *fmt, va_list args) | |||
1866 | } | 2072 | } |
1867 | } | 2073 | } |
1868 | 2074 | ||
1869 | if (!*fmt || !*str) | 2075 | if (!*fmt) |
2076 | break; | ||
2077 | |||
2078 | if (*fmt == 'n') { | ||
2079 | /* return number of characters read so far */ | ||
2080 | *va_arg(args, int *) = str - buf; | ||
2081 | ++fmt; | ||
2082 | continue; | ||
2083 | } | ||
2084 | |||
2085 | if (!*str) | ||
1870 | break; | 2086 | break; |
1871 | 2087 | ||
1872 | base = 10; | 2088 | base = 10; |
@@ -1899,13 +2115,6 @@ int vsscanf(const char *buf, const char *fmt, va_list args) | |||
1899 | num++; | 2115 | num++; |
1900 | } | 2116 | } |
1901 | continue; | 2117 | continue; |
1902 | case 'n': | ||
1903 | /* return number of characters read so far */ | ||
1904 | { | ||
1905 | int *i = (int *)va_arg(args, int*); | ||
1906 | *i = str - buf; | ||
1907 | } | ||
1908 | continue; | ||
1909 | case 'o': | 2118 | case 'o': |
1910 | base = 8; | 2119 | base = 8; |
1911 | break; | 2120 | break; |
@@ -2006,16 +2215,6 @@ int vsscanf(const char *buf, const char *fmt, va_list args) | |||
2006 | str = next; | 2215 | str = next; |
2007 | } | 2216 | } |
2008 | 2217 | ||
2009 | /* | ||
2010 | * Now we've come all the way through so either the input string or the | ||
2011 | * format ended. In the former case, there can be a %n at the current | ||
2012 | * position in the format that needs to be filled. | ||
2013 | */ | ||
2014 | if (*fmt == '%' && *(fmt + 1) == 'n') { | ||
2015 | int *p = (int *)va_arg(args, int *); | ||
2016 | *p = str - buf; | ||
2017 | } | ||
2018 | |||
2019 | return num; | 2218 | return num; |
2020 | } | 2219 | } |
2021 | EXPORT_SYMBOL(vsscanf); | 2220 | EXPORT_SYMBOL(vsscanf); |
diff --git a/lib/xz/xz_dec_bcj.c b/lib/xz/xz_dec_bcj.c index e51e2558ca9d..a768e6d28bbb 100644 --- a/lib/xz/xz_dec_bcj.c +++ b/lib/xz/xz_dec_bcj.c | |||
@@ -441,8 +441,12 @@ XZ_EXTERN enum xz_ret xz_dec_bcj_run(struct xz_dec_bcj *s, | |||
441 | * next filter in the chain. Apply the BCJ filter on the new data | 441 | * next filter in the chain. Apply the BCJ filter on the new data |
442 | * in the output buffer. If everything cannot be filtered, copy it | 442 | * in the output buffer. If everything cannot be filtered, copy it |
443 | * to temp and rewind the output buffer position accordingly. | 443 | * to temp and rewind the output buffer position accordingly. |
444 | * | ||
445 | * This needs to be always run when temp.size == 0 to handle a special | ||
446 | * case where the output buffer is full and the next filter has no | ||
447 | * more output coming but hasn't returned XZ_STREAM_END yet. | ||
444 | */ | 448 | */ |
445 | if (s->temp.size < b->out_size - b->out_pos) { | 449 | if (s->temp.size < b->out_size - b->out_pos || s->temp.size == 0) { |
446 | out_start = b->out_pos; | 450 | out_start = b->out_pos; |
447 | memcpy(b->out + b->out_pos, s->temp.buf, s->temp.size); | 451 | memcpy(b->out + b->out_pos, s->temp.buf, s->temp.size); |
448 | b->out_pos += s->temp.size; | 452 | b->out_pos += s->temp.size; |
@@ -465,16 +469,25 @@ XZ_EXTERN enum xz_ret xz_dec_bcj_run(struct xz_dec_bcj *s, | |||
465 | s->temp.size = b->out_pos - out_start; | 469 | s->temp.size = b->out_pos - out_start; |
466 | b->out_pos -= s->temp.size; | 470 | b->out_pos -= s->temp.size; |
467 | memcpy(s->temp.buf, b->out + b->out_pos, s->temp.size); | 471 | memcpy(s->temp.buf, b->out + b->out_pos, s->temp.size); |
472 | |||
473 | /* | ||
474 | * If there wasn't enough input to the next filter to fill | ||
475 | * the output buffer with unfiltered data, there's no point | ||
476 | * to try decoding more data to temp. | ||
477 | */ | ||
478 | if (b->out_pos + s->temp.size < b->out_size) | ||
479 | return XZ_OK; | ||
468 | } | 480 | } |
469 | 481 | ||
470 | /* | 482 | /* |
471 | * If we have unfiltered data in temp, try to fill by decoding more | 483 | * We have unfiltered data in temp. If the output buffer isn't full |
472 | * data from the next filter. Apply the BCJ filter on temp. Then we | 484 | * yet, try to fill the temp buffer by decoding more data from the |
473 | * hopefully can fill the actual output buffer by copying filtered | 485 | * next filter. Apply the BCJ filter on temp. Then we hopefully can |
474 | * data from temp. A mix of filtered and unfiltered data may be left | 486 | * fill the actual output buffer by copying filtered data from temp. |
475 | * in temp; it will be taken care on the next call to this function. | 487 | * A mix of filtered and unfiltered data may be left in temp; it will |
488 | * be taken care on the next call to this function. | ||
476 | */ | 489 | */ |
477 | if (s->temp.size > 0) { | 490 | if (b->out_pos < b->out_size) { |
478 | /* Make b->out{,_pos,_size} temporarily point to s->temp. */ | 491 | /* Make b->out{,_pos,_size} temporarily point to s->temp. */ |
479 | s->out = b->out; | 492 | s->out = b->out; |
480 | s->out_pos = b->out_pos; | 493 | s->out_pos = b->out_pos; |