diff options
Diffstat (limited to 'lib')
36 files changed, 1989 insertions, 635 deletions
diff --git a/lib/Kconfig b/lib/Kconfig index 277fbfb233b9..496d16e1fa2c 100644 --- a/lib/Kconfig +++ b/lib/Kconfig | |||
| @@ -117,6 +117,10 @@ config DECOMPRESS_BZIP2 | |||
| 117 | config DECOMPRESS_LZMA | 117 | config DECOMPRESS_LZMA |
| 118 | tristate | 118 | tristate |
| 119 | 119 | ||
| 120 | config DECOMPRESS_LZO | ||
| 121 | select LZO_DECOMPRESS | ||
| 122 | tristate | ||
| 123 | |||
| 120 | # | 124 | # |
| 121 | # Generic allocator support is selected if needed | 125 | # Generic allocator support is selected if needed |
| 122 | # | 126 | # |
| @@ -156,6 +160,9 @@ config TEXTSEARCH_BM | |||
| 156 | config TEXTSEARCH_FSM | 160 | config TEXTSEARCH_FSM |
| 157 | tristate | 161 | tristate |
| 158 | 162 | ||
| 163 | config LIST_SORT | ||
| 164 | boolean | ||
| 165 | |||
| 159 | config BTREE | 166 | config BTREE |
| 160 | boolean | 167 | boolean |
| 161 | 168 | ||
| @@ -203,4 +210,7 @@ config NLATTR | |||
| 203 | config GENERIC_ATOMIC64 | 210 | config GENERIC_ATOMIC64 |
| 204 | bool | 211 | bool |
| 205 | 212 | ||
| 213 | config LRU_CACHE | ||
| 214 | tristate | ||
| 215 | |||
| 206 | endmenu | 216 | endmenu |
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug index 234ceb10861f..b520ec1f33c5 100644 --- a/lib/Kconfig.debug +++ b/lib/Kconfig.debug | |||
| @@ -105,7 +105,7 @@ config DEBUG_SECTION_MISMATCH | |||
| 105 | bool "Enable full Section mismatch analysis" | 105 | bool "Enable full Section mismatch analysis" |
| 106 | depends on UNDEFINED | 106 | depends on UNDEFINED |
| 107 | # This option is on purpose disabled for now. | 107 | # This option is on purpose disabled for now. |
| 108 | # It will be enabled when we are down to a resonable number | 108 | # It will be enabled when we are down to a reasonable number |
| 109 | # of section mismatch warnings (< 10 for an allyesconfig build) | 109 | # of section mismatch warnings (< 10 for an allyesconfig build) |
| 110 | help | 110 | help |
| 111 | The section mismatch analysis checks if there are illegal | 111 | The section mismatch analysis checks if there are illegal |
| @@ -298,6 +298,14 @@ config DEBUG_OBJECTS_TIMERS | |||
| 298 | timer routines to track the life time of timer objects and | 298 | timer routines to track the life time of timer objects and |
| 299 | validate the timer operations. | 299 | validate the timer operations. |
| 300 | 300 | ||
| 301 | config DEBUG_OBJECTS_WORK | ||
| 302 | bool "Debug work objects" | ||
| 303 | depends on DEBUG_OBJECTS | ||
| 304 | help | ||
| 305 | If you say Y here, additional code will be inserted into the | ||
| 306 | work queue routines to track the life time of work objects and | ||
| 307 | validate the work operations. | ||
| 308 | |||
| 301 | config DEBUG_OBJECTS_ENABLE_DEFAULT | 309 | config DEBUG_OBJECTS_ENABLE_DEFAULT |
| 302 | int "debug_objects bootup default value (0-1)" | 310 | int "debug_objects bootup default value (0-1)" |
| 303 | range 0 1 | 311 | range 0 1 |
| @@ -347,11 +355,12 @@ config SLUB_STATS | |||
| 347 | config DEBUG_KMEMLEAK | 355 | config DEBUG_KMEMLEAK |
| 348 | bool "Kernel memory leak detector" | 356 | bool "Kernel memory leak detector" |
| 349 | depends on DEBUG_KERNEL && EXPERIMENTAL && !MEMORY_HOTPLUG && \ | 357 | depends on DEBUG_KERNEL && EXPERIMENTAL && !MEMORY_HOTPLUG && \ |
| 350 | (X86 || ARM || PPC || S390) | 358 | (X86 || ARM || PPC || S390 || SUPERH) |
| 351 | 359 | ||
| 352 | select DEBUG_FS if SYSFS | 360 | select DEBUG_FS if SYSFS |
| 353 | select STACKTRACE if STACKTRACE_SUPPORT | 361 | select STACKTRACE if STACKTRACE_SUPPORT |
| 354 | select KALLSYMS | 362 | select KALLSYMS |
| 363 | select CRC32 | ||
| 355 | help | 364 | help |
| 356 | Say Y here if you want to enable the memory leak | 365 | Say Y here if you want to enable the memory leak |
| 357 | detector. The memory allocation/freeing is traced in a way | 366 | detector. The memory allocation/freeing is traced in a way |
| @@ -490,6 +499,18 @@ config PROVE_LOCKING | |||
| 490 | 499 | ||
| 491 | For more details, see Documentation/lockdep-design.txt. | 500 | For more details, see Documentation/lockdep-design.txt. |
| 492 | 501 | ||
| 502 | config PROVE_RCU | ||
| 503 | bool "RCU debugging: prove RCU correctness" | ||
| 504 | depends on PROVE_LOCKING | ||
| 505 | default n | ||
| 506 | help | ||
| 507 | This feature enables lockdep extensions that check for correct | ||
| 508 | use of RCU APIs. This is currently under development. Say Y | ||
| 509 | if you want to debug RCU usage or help work on the PROVE_RCU | ||
| 510 | feature. | ||
| 511 | |||
| 512 | Say N if you are unsure. | ||
| 513 | |||
| 493 | config LOCKDEP | 514 | config LOCKDEP |
| 494 | bool | 515 | bool |
| 495 | depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT | 516 | depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT |
| @@ -567,7 +588,7 @@ config DEBUG_BUGVERBOSE | |||
| 567 | depends on BUG | 588 | depends on BUG |
| 568 | depends on ARM || AVR32 || M32R || M68K || SPARC32 || SPARC64 || \ | 589 | depends on ARM || AVR32 || M32R || M68K || SPARC32 || SPARC64 || \ |
| 569 | FRV || SUPERH || GENERIC_BUG || BLACKFIN || MN10300 | 590 | FRV || SUPERH || GENERIC_BUG || BLACKFIN || MN10300 |
| 570 | default !EMBEDDED | 591 | default y |
| 571 | help | 592 | help |
| 572 | Say Y here to make BUG() panics output the file name and line number | 593 | Say Y here to make BUG() panics output the file name and line number |
| 573 | of the BUG call as well as the EIP and oops trace. This aids | 594 | of the BUG call as well as the EIP and oops trace. This aids |
| @@ -750,16 +771,28 @@ config RCU_TORTURE_TEST_RUNNABLE | |||
| 750 | config RCU_CPU_STALL_DETECTOR | 771 | config RCU_CPU_STALL_DETECTOR |
| 751 | bool "Check for stalled CPUs delaying RCU grace periods" | 772 | bool "Check for stalled CPUs delaying RCU grace periods" |
| 752 | depends on TREE_RCU || TREE_PREEMPT_RCU | 773 | depends on TREE_RCU || TREE_PREEMPT_RCU |
| 753 | default n | 774 | default y |
| 754 | help | 775 | help |
| 755 | This option causes RCU to printk information on which | 776 | This option causes RCU to printk information on which |
| 756 | CPUs are delaying the current grace period, but only when | 777 | CPUs are delaying the current grace period, but only when |
| 757 | the grace period extends for excessive time periods. | 778 | the grace period extends for excessive time periods. |
| 758 | 779 | ||
| 759 | Say Y if you want RCU to perform such checks. | 780 | Say N if you want to disable such checks. |
| 781 | |||
| 782 | Say Y if you are unsure. | ||
| 783 | |||
| 784 | config RCU_CPU_STALL_VERBOSE | ||
| 785 | bool "Print additional per-task information for RCU_CPU_STALL_DETECTOR" | ||
| 786 | depends on RCU_CPU_STALL_DETECTOR && TREE_PREEMPT_RCU | ||
| 787 | default n | ||
| 788 | help | ||
| 789 | This option causes RCU to printk detailed per-task information | ||
| 790 | for any tasks that are stalling the current RCU grace period. | ||
| 760 | 791 | ||
| 761 | Say N if you are unsure. | 792 | Say N if you are unsure. |
| 762 | 793 | ||
| 794 | Say Y if you want to enable such checks. | ||
| 795 | |||
| 763 | config KPROBES_SANITY_TEST | 796 | config KPROBES_SANITY_TEST |
| 764 | bool "Kprobes sanity tests" | 797 | bool "Kprobes sanity tests" |
| 765 | depends on DEBUG_KERNEL | 798 | depends on DEBUG_KERNEL |
| @@ -831,8 +864,7 @@ config DEBUG_FORCE_WEAK_PER_CPU | |||
| 831 | 864 | ||
| 832 | config LKDTM | 865 | config LKDTM |
| 833 | tristate "Linux Kernel Dump Test Tool Module" | 866 | tristate "Linux Kernel Dump Test Tool Module" |
| 834 | depends on DEBUG_KERNEL | 867 | depends on DEBUG_FS |
| 835 | depends on KPROBES | ||
| 836 | depends on BLOCK | 868 | depends on BLOCK |
| 837 | default n | 869 | default n |
| 838 | help | 870 | help |
| @@ -843,7 +875,7 @@ config LKDTM | |||
| 843 | called lkdtm. | 875 | called lkdtm. |
| 844 | 876 | ||
| 845 | Documentation on how to use the module can be found in | 877 | Documentation on how to use the module can be found in |
| 846 | drivers/misc/lkdtm.c | 878 | Documentation/fault-injection/provoke-crashes.txt |
| 847 | 879 | ||
| 848 | config FAULT_INJECTION | 880 | config FAULT_INJECTION |
| 849 | bool "Fault-injection framework" | 881 | bool "Fault-injection framework" |
| @@ -912,7 +944,7 @@ config LATENCYTOP | |||
| 912 | 944 | ||
| 913 | config SYSCTL_SYSCALL_CHECK | 945 | config SYSCTL_SYSCALL_CHECK |
| 914 | bool "Sysctl checks" | 946 | bool "Sysctl checks" |
| 915 | depends on SYSCTL_SYSCALL | 947 | depends on SYSCTL |
| 916 | ---help--- | 948 | ---help--- |
| 917 | sys_sysctl uses binary paths that have been found challenging | 949 | sys_sysctl uses binary paths that have been found challenging |
| 918 | to properly maintain and use. This enables checks that help | 950 | to properly maintain and use. This enables checks that help |
diff --git a/lib/Makefile b/lib/Makefile index cff82612e98b..59e46a014bc6 100644 --- a/lib/Makefile +++ b/lib/Makefile | |||
| @@ -40,6 +40,7 @@ lib-$(CONFIG_GENERIC_FIND_FIRST_BIT) += find_next_bit.o | |||
| 40 | lib-$(CONFIG_GENERIC_FIND_NEXT_BIT) += find_next_bit.o | 40 | lib-$(CONFIG_GENERIC_FIND_NEXT_BIT) += find_next_bit.o |
| 41 | obj-$(CONFIG_GENERIC_FIND_LAST_BIT) += find_last_bit.o | 41 | obj-$(CONFIG_GENERIC_FIND_LAST_BIT) += find_last_bit.o |
| 42 | obj-$(CONFIG_GENERIC_HWEIGHT) += hweight.o | 42 | obj-$(CONFIG_GENERIC_HWEIGHT) += hweight.o |
| 43 | obj-$(CONFIG_LIST_SORT) += list_sort.o | ||
| 43 | obj-$(CONFIG_LOCK_KERNEL) += kernel_lock.o | 44 | obj-$(CONFIG_LOCK_KERNEL) += kernel_lock.o |
| 44 | obj-$(CONFIG_BTREE) += btree.o | 45 | obj-$(CONFIG_BTREE) += btree.o |
| 45 | obj-$(CONFIG_DEBUG_PREEMPT) += smp_processor_id.o | 46 | obj-$(CONFIG_DEBUG_PREEMPT) += smp_processor_id.o |
| @@ -70,6 +71,7 @@ obj-$(CONFIG_LZO_DECOMPRESS) += lzo/ | |||
| 70 | lib-$(CONFIG_DECOMPRESS_GZIP) += decompress_inflate.o | 71 | lib-$(CONFIG_DECOMPRESS_GZIP) += decompress_inflate.o |
| 71 | lib-$(CONFIG_DECOMPRESS_BZIP2) += decompress_bunzip2.o | 72 | lib-$(CONFIG_DECOMPRESS_BZIP2) += decompress_bunzip2.o |
| 72 | lib-$(CONFIG_DECOMPRESS_LZMA) += decompress_unlzma.o | 73 | lib-$(CONFIG_DECOMPRESS_LZMA) += decompress_unlzma.o |
| 74 | lib-$(CONFIG_DECOMPRESS_LZO) += decompress_unlzo.o | ||
| 73 | 75 | ||
| 74 | obj-$(CONFIG_TEXTSEARCH) += textsearch.o | 76 | obj-$(CONFIG_TEXTSEARCH) += textsearch.o |
| 75 | obj-$(CONFIG_TEXTSEARCH_KMP) += ts_kmp.o | 77 | obj-$(CONFIG_TEXTSEARCH_KMP) += ts_kmp.o |
| @@ -92,6 +94,8 @@ obj-$(CONFIG_DYNAMIC_DEBUG) += dynamic_debug.o | |||
| 92 | 94 | ||
| 93 | obj-$(CONFIG_NLATTR) += nlattr.o | 95 | obj-$(CONFIG_NLATTR) += nlattr.o |
| 94 | 96 | ||
| 97 | obj-$(CONFIG_LRU_CACHE) += lru_cache.o | ||
| 98 | |||
| 95 | obj-$(CONFIG_DMA_API_DEBUG) += dma-debug.o | 99 | obj-$(CONFIG_DMA_API_DEBUG) += dma-debug.o |
| 96 | 100 | ||
| 97 | obj-$(CONFIG_GENERIC_CSUM) += checksum.o | 101 | obj-$(CONFIG_GENERIC_CSUM) += checksum.o |
diff --git a/lib/argv_split.c b/lib/argv_split.c index 5205a8dae5bc..4b1b083f219c 100644 --- a/lib/argv_split.c +++ b/lib/argv_split.c | |||
| @@ -4,17 +4,10 @@ | |||
| 4 | 4 | ||
| 5 | #include <linux/kernel.h> | 5 | #include <linux/kernel.h> |
| 6 | #include <linux/ctype.h> | 6 | #include <linux/ctype.h> |
| 7 | #include <linux/string.h> | ||
| 7 | #include <linux/slab.h> | 8 | #include <linux/slab.h> |
| 8 | #include <linux/module.h> | 9 | #include <linux/module.h> |
| 9 | 10 | ||
| 10 | static const char *skip_sep(const char *cp) | ||
| 11 | { | ||
| 12 | while (*cp && isspace(*cp)) | ||
| 13 | cp++; | ||
| 14 | |||
| 15 | return cp; | ||
| 16 | } | ||
| 17 | |||
| 18 | static const char *skip_arg(const char *cp) | 11 | static const char *skip_arg(const char *cp) |
| 19 | { | 12 | { |
| 20 | while (*cp && !isspace(*cp)) | 13 | while (*cp && !isspace(*cp)) |
| @@ -28,7 +21,7 @@ static int count_argc(const char *str) | |||
| 28 | int count = 0; | 21 | int count = 0; |
| 29 | 22 | ||
| 30 | while (*str) { | 23 | while (*str) { |
| 31 | str = skip_sep(str); | 24 | str = skip_spaces(str); |
| 32 | if (*str) { | 25 | if (*str) { |
| 33 | count++; | 26 | count++; |
| 34 | str = skip_arg(str); | 27 | str = skip_arg(str); |
| @@ -82,7 +75,7 @@ char **argv_split(gfp_t gfp, const char *str, int *argcp) | |||
| 82 | argvp = argv; | 75 | argvp = argv; |
| 83 | 76 | ||
| 84 | while (*str) { | 77 | while (*str) { |
| 85 | str = skip_sep(str); | 78 | str = skip_spaces(str); |
| 86 | 79 | ||
| 87 | if (*str) { | 80 | if (*str) { |
| 88 | const char *p = str; | 81 | const char *p = str; |
diff --git a/lib/bitmap.c b/lib/bitmap.c index 702565821c99..ffb78c916ccd 100644 --- a/lib/bitmap.c +++ b/lib/bitmap.c | |||
| @@ -271,6 +271,87 @@ int __bitmap_weight(const unsigned long *bitmap, int bits) | |||
| 271 | } | 271 | } |
| 272 | EXPORT_SYMBOL(__bitmap_weight); | 272 | EXPORT_SYMBOL(__bitmap_weight); |
| 273 | 273 | ||
| 274 | #define BITMAP_FIRST_WORD_MASK(start) (~0UL << ((start) % BITS_PER_LONG)) | ||
| 275 | |||
| 276 | void bitmap_set(unsigned long *map, int start, int nr) | ||
| 277 | { | ||
| 278 | unsigned long *p = map + BIT_WORD(start); | ||
| 279 | const int size = start + nr; | ||
| 280 | int bits_to_set = BITS_PER_LONG - (start % BITS_PER_LONG); | ||
| 281 | unsigned long mask_to_set = BITMAP_FIRST_WORD_MASK(start); | ||
| 282 | |||
| 283 | while (nr - bits_to_set >= 0) { | ||
| 284 | *p |= mask_to_set; | ||
| 285 | nr -= bits_to_set; | ||
| 286 | bits_to_set = BITS_PER_LONG; | ||
| 287 | mask_to_set = ~0UL; | ||
| 288 | p++; | ||
| 289 | } | ||
| 290 | if (nr) { | ||
| 291 | mask_to_set &= BITMAP_LAST_WORD_MASK(size); | ||
| 292 | *p |= mask_to_set; | ||
| 293 | } | ||
| 294 | } | ||
| 295 | EXPORT_SYMBOL(bitmap_set); | ||
| 296 | |||
| 297 | void bitmap_clear(unsigned long *map, int start, int nr) | ||
| 298 | { | ||
| 299 | unsigned long *p = map + BIT_WORD(start); | ||
| 300 | const int size = start + nr; | ||
| 301 | int bits_to_clear = BITS_PER_LONG - (start % BITS_PER_LONG); | ||
| 302 | unsigned long mask_to_clear = BITMAP_FIRST_WORD_MASK(start); | ||
| 303 | |||
| 304 | while (nr - bits_to_clear >= 0) { | ||
| 305 | *p &= ~mask_to_clear; | ||
| 306 | nr -= bits_to_clear; | ||
| 307 | bits_to_clear = BITS_PER_LONG; | ||
| 308 | mask_to_clear = ~0UL; | ||
| 309 | p++; | ||
| 310 | } | ||
| 311 | if (nr) { | ||
| 312 | mask_to_clear &= BITMAP_LAST_WORD_MASK(size); | ||
| 313 | *p &= ~mask_to_clear; | ||
| 314 | } | ||
| 315 | } | ||
| 316 | EXPORT_SYMBOL(bitmap_clear); | ||
| 317 | |||
| 318 | /* | ||
| 319 | * bitmap_find_next_zero_area - find a contiguous aligned zero area | ||
| 320 | * @map: The address to base the search on | ||
| 321 | * @size: The bitmap size in bits | ||
| 322 | * @start: The bitnumber to start searching at | ||
| 323 | * @nr: The number of zeroed bits we're looking for | ||
| 324 | * @align_mask: Alignment mask for zero area | ||
| 325 | * | ||
| 326 | * The @align_mask should be one less than a power of 2; the effect is that | ||
| 327 | * the bit offset of all zero areas this function finds is multiples of that | ||
| 328 | * power of 2. A @align_mask of 0 means no alignment is required. | ||
| 329 | */ | ||
| 330 | unsigned long bitmap_find_next_zero_area(unsigned long *map, | ||
| 331 | unsigned long size, | ||
| 332 | unsigned long start, | ||
| 333 | unsigned int nr, | ||
| 334 | unsigned long align_mask) | ||
| 335 | { | ||
| 336 | unsigned long index, end, i; | ||
| 337 | again: | ||
| 338 | index = find_next_zero_bit(map, size, start); | ||
| 339 | |||
| 340 | /* Align allocation */ | ||
| 341 | index = __ALIGN_MASK(index, align_mask); | ||
| 342 | |||
| 343 | end = index + nr; | ||
| 344 | if (end > size) | ||
| 345 | return end; | ||
| 346 | i = find_next_bit(map, end, index); | ||
| 347 | if (i < end) { | ||
| 348 | start = i + 1; | ||
| 349 | goto again; | ||
| 350 | } | ||
| 351 | return index; | ||
| 352 | } | ||
| 353 | EXPORT_SYMBOL(bitmap_find_next_zero_area); | ||
| 354 | |||
| 274 | /* | 355 | /* |
| 275 | * Bitmap printing & parsing functions: first version by Bill Irwin, | 356 | * Bitmap printing & parsing functions: first version by Bill Irwin, |
| 276 | * second version by Paul Jackson, third by Joe Korty. | 357 | * second version by Paul Jackson, third by Joe Korty. |
| @@ -406,7 +487,7 @@ int __bitmap_parse(const char *buf, unsigned int buflen, | |||
| 406 | EXPORT_SYMBOL(__bitmap_parse); | 487 | EXPORT_SYMBOL(__bitmap_parse); |
| 407 | 488 | ||
| 408 | /** | 489 | /** |
| 409 | * bitmap_parse_user() | 490 | * bitmap_parse_user - convert an ASCII hex string in a user buffer into a bitmap |
| 410 | * | 491 | * |
| 411 | * @ubuf: pointer to user buffer containing string. | 492 | * @ubuf: pointer to user buffer containing string. |
| 412 | * @ulen: buffer size in bytes. If string is smaller than this | 493 | * @ulen: buffer size in bytes. If string is smaller than this |
| @@ -538,7 +619,7 @@ int bitmap_parselist(const char *bp, unsigned long *maskp, int nmaskbits) | |||
| 538 | EXPORT_SYMBOL(bitmap_parselist); | 619 | EXPORT_SYMBOL(bitmap_parselist); |
| 539 | 620 | ||
| 540 | /** | 621 | /** |
| 541 | * bitmap_pos_to_ord(buf, pos, bits) | 622 | * bitmap_pos_to_ord - find ordinal of set bit at given position in bitmap |
| 542 | * @buf: pointer to a bitmap | 623 | * @buf: pointer to a bitmap |
| 543 | * @pos: a bit position in @buf (0 <= @pos < @bits) | 624 | * @pos: a bit position in @buf (0 <= @pos < @bits) |
| 544 | * @bits: number of valid bit positions in @buf | 625 | * @bits: number of valid bit positions in @buf |
| @@ -574,7 +655,7 @@ static int bitmap_pos_to_ord(const unsigned long *buf, int pos, int bits) | |||
| 574 | } | 655 | } |
| 575 | 656 | ||
| 576 | /** | 657 | /** |
| 577 | * bitmap_ord_to_pos(buf, ord, bits) | 658 | * bitmap_ord_to_pos - find position of n-th set bit in bitmap |
| 578 | * @buf: pointer to bitmap | 659 | * @buf: pointer to bitmap |
| 579 | * @ord: ordinal bit position (n-th set bit, n >= 0) | 660 | * @ord: ordinal bit position (n-th set bit, n >= 0) |
| 580 | * @bits: number of valid bit positions in @buf | 661 | * @bits: number of valid bit positions in @buf |
| @@ -652,10 +733,9 @@ void bitmap_remap(unsigned long *dst, const unsigned long *src, | |||
| 652 | bitmap_zero(dst, bits); | 733 | bitmap_zero(dst, bits); |
| 653 | 734 | ||
| 654 | w = bitmap_weight(new, bits); | 735 | w = bitmap_weight(new, bits); |
| 655 | for (oldbit = find_first_bit(src, bits); | 736 | for_each_set_bit(oldbit, src, bits) { |
| 656 | oldbit < bits; | ||
| 657 | oldbit = find_next_bit(src, bits, oldbit + 1)) { | ||
| 658 | int n = bitmap_pos_to_ord(old, oldbit, bits); | 737 | int n = bitmap_pos_to_ord(old, oldbit, bits); |
| 738 | |||
| 659 | if (n < 0 || w == 0) | 739 | if (n < 0 || w == 0) |
| 660 | set_bit(oldbit, dst); /* identity map */ | 740 | set_bit(oldbit, dst); /* identity map */ |
| 661 | else | 741 | else |
| @@ -822,9 +902,7 @@ void bitmap_onto(unsigned long *dst, const unsigned long *orig, | |||
| 822 | */ | 902 | */ |
| 823 | 903 | ||
| 824 | m = 0; | 904 | m = 0; |
| 825 | for (n = find_first_bit(relmap, bits); | 905 | for_each_set_bit(n, relmap, bits) { |
| 826 | n < bits; | ||
| 827 | n = find_next_bit(relmap, bits, n + 1)) { | ||
| 828 | /* m == bitmap_pos_to_ord(relmap, n, bits) */ | 906 | /* m == bitmap_pos_to_ord(relmap, n, bits) */ |
| 829 | if (test_bit(m, orig)) | 907 | if (test_bit(m, orig)) |
| 830 | set_bit(n, dst); | 908 | set_bit(n, dst); |
| @@ -853,9 +931,7 @@ void bitmap_fold(unsigned long *dst, const unsigned long *orig, | |||
| 853 | return; | 931 | return; |
| 854 | bitmap_zero(dst, bits); | 932 | bitmap_zero(dst, bits); |
| 855 | 933 | ||
| 856 | for (oldbit = find_first_bit(orig, bits); | 934 | for_each_set_bit(oldbit, orig, bits) |
| 857 | oldbit < bits; | ||
| 858 | oldbit = find_next_bit(orig, bits, oldbit + 1)) | ||
| 859 | set_bit(oldbit % sz, dst); | 935 | set_bit(oldbit % sz, dst); |
| 860 | } | 936 | } |
| 861 | EXPORT_SYMBOL(bitmap_fold); | 937 | EXPORT_SYMBOL(bitmap_fold); |
diff --git a/lib/checksum.c b/lib/checksum.c index b2e2fd468461..097508732f34 100644 --- a/lib/checksum.c +++ b/lib/checksum.c | |||
| @@ -37,7 +37,8 @@ | |||
| 37 | 37 | ||
| 38 | #include <asm/byteorder.h> | 38 | #include <asm/byteorder.h> |
| 39 | 39 | ||
| 40 | static inline unsigned short from32to16(unsigned long x) | 40 | #ifndef do_csum |
| 41 | static inline unsigned short from32to16(unsigned int x) | ||
| 41 | { | 42 | { |
| 42 | /* add up 16-bit and 16-bit for 16+c bit */ | 43 | /* add up 16-bit and 16-bit for 16+c bit */ |
| 43 | x = (x & 0xffff) + (x >> 16); | 44 | x = (x & 0xffff) + (x >> 16); |
| @@ -49,16 +50,16 @@ static inline unsigned short from32to16(unsigned long x) | |||
| 49 | static unsigned int do_csum(const unsigned char *buff, int len) | 50 | static unsigned int do_csum(const unsigned char *buff, int len) |
| 50 | { | 51 | { |
| 51 | int odd, count; | 52 | int odd, count; |
| 52 | unsigned long result = 0; | 53 | unsigned int result = 0; |
| 53 | 54 | ||
| 54 | if (len <= 0) | 55 | if (len <= 0) |
| 55 | goto out; | 56 | goto out; |
| 56 | odd = 1 & (unsigned long) buff; | 57 | odd = 1 & (unsigned long) buff; |
| 57 | if (odd) { | 58 | if (odd) { |
| 58 | #ifdef __LITTLE_ENDIAN | 59 | #ifdef __LITTLE_ENDIAN |
| 59 | result = *buff; | ||
| 60 | #else | ||
| 61 | result += (*buff << 8); | 60 | result += (*buff << 8); |
| 61 | #else | ||
| 62 | result = *buff; | ||
| 62 | #endif | 63 | #endif |
| 63 | len--; | 64 | len--; |
| 64 | buff++; | 65 | buff++; |
| @@ -73,9 +74,9 @@ static unsigned int do_csum(const unsigned char *buff, int len) | |||
| 73 | } | 74 | } |
| 74 | count >>= 1; /* nr of 32-bit words.. */ | 75 | count >>= 1; /* nr of 32-bit words.. */ |
| 75 | if (count) { | 76 | if (count) { |
| 76 | unsigned long carry = 0; | 77 | unsigned int carry = 0; |
| 77 | do { | 78 | do { |
| 78 | unsigned long w = *(unsigned int *) buff; | 79 | unsigned int w = *(unsigned int *) buff; |
| 79 | count--; | 80 | count--; |
| 80 | buff += 4; | 81 | buff += 4; |
| 81 | result += carry; | 82 | result += carry; |
| @@ -102,6 +103,7 @@ static unsigned int do_csum(const unsigned char *buff, int len) | |||
| 102 | out: | 103 | out: |
| 103 | return result; | 104 | return result; |
| 104 | } | 105 | } |
| 106 | #endif | ||
| 105 | 107 | ||
| 106 | /* | 108 | /* |
| 107 | * This is a version of ip_compute_csum() optimized for IP headers, | 109 | * This is a version of ip_compute_csum() optimized for IP headers, |
diff --git a/lib/crc32.c b/lib/crc32.c index 49d1c9e3ce38..0f45fbff34cb 100644 --- a/lib/crc32.c +++ b/lib/crc32.c | |||
| @@ -30,11 +30,15 @@ | |||
| 30 | #include <asm/atomic.h> | 30 | #include <asm/atomic.h> |
| 31 | #include "crc32defs.h" | 31 | #include "crc32defs.h" |
| 32 | #if CRC_LE_BITS == 8 | 32 | #if CRC_LE_BITS == 8 |
| 33 | #define tole(x) __constant_cpu_to_le32(x) | 33 | # define tole(x) __constant_cpu_to_le32(x) |
| 34 | #define tobe(x) __constant_cpu_to_be32(x) | ||
| 35 | #else | 34 | #else |
| 36 | #define tole(x) (x) | 35 | # define tole(x) (x) |
| 37 | #define tobe(x) (x) | 36 | #endif |
| 37 | |||
| 38 | #if CRC_BE_BITS == 8 | ||
| 39 | # define tobe(x) __constant_cpu_to_be32(x) | ||
| 40 | #else | ||
| 41 | # define tobe(x) (x) | ||
| 38 | #endif | 42 | #endif |
| 39 | #include "crc32table.h" | 43 | #include "crc32table.h" |
| 40 | 44 | ||
| @@ -42,6 +46,48 @@ MODULE_AUTHOR("Matt Domsch <Matt_Domsch@dell.com>"); | |||
| 42 | MODULE_DESCRIPTION("Ethernet CRC32 calculations"); | 46 | MODULE_DESCRIPTION("Ethernet CRC32 calculations"); |
| 43 | MODULE_LICENSE("GPL"); | 47 | MODULE_LICENSE("GPL"); |
| 44 | 48 | ||
| 49 | #if CRC_LE_BITS == 8 || CRC_BE_BITS == 8 | ||
| 50 | |||
| 51 | static inline u32 | ||
| 52 | crc32_body(u32 crc, unsigned char const *buf, size_t len, const u32 *tab) | ||
| 53 | { | ||
| 54 | # ifdef __LITTLE_ENDIAN | ||
| 55 | # define DO_CRC(x) crc = tab[(crc ^ (x)) & 255 ] ^ (crc >> 8) | ||
| 56 | # else | ||
| 57 | # define DO_CRC(x) crc = tab[((crc >> 24) ^ (x)) & 255] ^ (crc << 8) | ||
| 58 | # endif | ||
| 59 | const u32 *b; | ||
| 60 | size_t rem_len; | ||
| 61 | |||
| 62 | /* Align it */ | ||
| 63 | if (unlikely((long)buf & 3 && len)) { | ||
| 64 | do { | ||
| 65 | DO_CRC(*buf++); | ||
| 66 | } while ((--len) && ((long)buf)&3); | ||
| 67 | } | ||
| 68 | rem_len = len & 3; | ||
| 69 | /* load data 32 bits wide, xor data 32 bits wide. */ | ||
| 70 | len = len >> 2; | ||
| 71 | b = (const u32 *)buf; | ||
| 72 | for (--b; len; --len) { | ||
| 73 | crc ^= *++b; /* use pre increment for speed */ | ||
| 74 | DO_CRC(0); | ||
| 75 | DO_CRC(0); | ||
| 76 | DO_CRC(0); | ||
| 77 | DO_CRC(0); | ||
| 78 | } | ||
| 79 | len = rem_len; | ||
| 80 | /* And the last few bytes */ | ||
| 81 | if (len) { | ||
| 82 | u8 *p = (u8 *)(b + 1) - 1; | ||
| 83 | do { | ||
| 84 | DO_CRC(*++p); /* use pre increment for speed */ | ||
| 85 | } while (--len); | ||
| 86 | } | ||
| 87 | return crc; | ||
| 88 | #undef DO_CRC | ||
| 89 | } | ||
| 90 | #endif | ||
| 45 | /** | 91 | /** |
| 46 | * crc32_le() - Calculate bitwise little-endian Ethernet AUTODIN II CRC32 | 92 | * crc32_le() - Calculate bitwise little-endian Ethernet AUTODIN II CRC32 |
| 47 | * @crc: seed value for computation. ~0 for Ethernet, sometimes 0 for | 93 | * @crc: seed value for computation. ~0 for Ethernet, sometimes 0 for |
| @@ -72,52 +118,11 @@ u32 __pure crc32_le(u32 crc, unsigned char const *p, size_t len) | |||
| 72 | u32 __pure crc32_le(u32 crc, unsigned char const *p, size_t len) | 118 | u32 __pure crc32_le(u32 crc, unsigned char const *p, size_t len) |
| 73 | { | 119 | { |
| 74 | # if CRC_LE_BITS == 8 | 120 | # if CRC_LE_BITS == 8 |
| 75 | const u32 *b =(u32 *)p; | ||
| 76 | const u32 *tab = crc32table_le; | 121 | const u32 *tab = crc32table_le; |
| 77 | 122 | ||
| 78 | # ifdef __LITTLE_ENDIAN | ||
| 79 | # define DO_CRC(x) crc = tab[ (crc ^ (x)) & 255 ] ^ (crc>>8) | ||
| 80 | # else | ||
| 81 | # define DO_CRC(x) crc = tab[ ((crc >> 24) ^ (x)) & 255] ^ (crc<<8) | ||
| 82 | # endif | ||
| 83 | |||
| 84 | crc = __cpu_to_le32(crc); | 123 | crc = __cpu_to_le32(crc); |
| 85 | /* Align it */ | 124 | crc = crc32_body(crc, p, len, tab); |
| 86 | if(unlikely(((long)b)&3 && len)){ | ||
| 87 | do { | ||
| 88 | u8 *p = (u8 *)b; | ||
| 89 | DO_CRC(*p++); | ||
| 90 | b = (void *)p; | ||
| 91 | } while ((--len) && ((long)b)&3 ); | ||
| 92 | } | ||
| 93 | if(likely(len >= 4)){ | ||
| 94 | /* load data 32 bits wide, xor data 32 bits wide. */ | ||
| 95 | size_t save_len = len & 3; | ||
| 96 | len = len >> 2; | ||
| 97 | --b; /* use pre increment below(*++b) for speed */ | ||
| 98 | do { | ||
| 99 | crc ^= *++b; | ||
| 100 | DO_CRC(0); | ||
| 101 | DO_CRC(0); | ||
| 102 | DO_CRC(0); | ||
| 103 | DO_CRC(0); | ||
| 104 | } while (--len); | ||
| 105 | b++; /* point to next byte(s) */ | ||
| 106 | len = save_len; | ||
| 107 | } | ||
| 108 | /* And the last few bytes */ | ||
| 109 | if(len){ | ||
| 110 | do { | ||
| 111 | u8 *p = (u8 *)b; | ||
| 112 | DO_CRC(*p++); | ||
| 113 | b = (void *)p; | ||
| 114 | } while (--len); | ||
| 115 | } | ||
| 116 | |||
| 117 | return __le32_to_cpu(crc); | 125 | return __le32_to_cpu(crc); |
| 118 | #undef ENDIAN_SHIFT | ||
| 119 | #undef DO_CRC | ||
| 120 | |||
| 121 | # elif CRC_LE_BITS == 4 | 126 | # elif CRC_LE_BITS == 4 |
| 122 | while (len--) { | 127 | while (len--) { |
| 123 | crc ^= *p++; | 128 | crc ^= *p++; |
| @@ -170,51 +175,11 @@ u32 __pure crc32_be(u32 crc, unsigned char const *p, size_t len) | |||
| 170 | u32 __pure crc32_be(u32 crc, unsigned char const *p, size_t len) | 175 | u32 __pure crc32_be(u32 crc, unsigned char const *p, size_t len) |
| 171 | { | 176 | { |
| 172 | # if CRC_BE_BITS == 8 | 177 | # if CRC_BE_BITS == 8 |
| 173 | const u32 *b =(u32 *)p; | ||
| 174 | const u32 *tab = crc32table_be; | 178 | const u32 *tab = crc32table_be; |
| 175 | 179 | ||
| 176 | # ifdef __LITTLE_ENDIAN | ||
| 177 | # define DO_CRC(x) crc = tab[ (crc ^ (x)) & 255 ] ^ (crc>>8) | ||
| 178 | # else | ||
| 179 | # define DO_CRC(x) crc = tab[ ((crc >> 24) ^ (x)) & 255] ^ (crc<<8) | ||
| 180 | # endif | ||
| 181 | |||
| 182 | crc = __cpu_to_be32(crc); | 180 | crc = __cpu_to_be32(crc); |
| 183 | /* Align it */ | 181 | crc = crc32_body(crc, p, len, tab); |
| 184 | if(unlikely(((long)b)&3 && len)){ | ||
| 185 | do { | ||
| 186 | u8 *p = (u8 *)b; | ||
| 187 | DO_CRC(*p++); | ||
| 188 | b = (u32 *)p; | ||
| 189 | } while ((--len) && ((long)b)&3 ); | ||
| 190 | } | ||
| 191 | if(likely(len >= 4)){ | ||
| 192 | /* load data 32 bits wide, xor data 32 bits wide. */ | ||
| 193 | size_t save_len = len & 3; | ||
| 194 | len = len >> 2; | ||
| 195 | --b; /* use pre increment below(*++b) for speed */ | ||
| 196 | do { | ||
| 197 | crc ^= *++b; | ||
| 198 | DO_CRC(0); | ||
| 199 | DO_CRC(0); | ||
| 200 | DO_CRC(0); | ||
| 201 | DO_CRC(0); | ||
| 202 | } while (--len); | ||
| 203 | b++; /* point to next byte(s) */ | ||
| 204 | len = save_len; | ||
| 205 | } | ||
| 206 | /* And the last few bytes */ | ||
| 207 | if(len){ | ||
| 208 | do { | ||
| 209 | u8 *p = (u8 *)b; | ||
| 210 | DO_CRC(*p++); | ||
| 211 | b = (void *)p; | ||
| 212 | } while (--len); | ||
| 213 | } | ||
| 214 | return __be32_to_cpu(crc); | 182 | return __be32_to_cpu(crc); |
| 215 | #undef ENDIAN_SHIFT | ||
| 216 | #undef DO_CRC | ||
| 217 | |||
| 218 | # elif CRC_BE_BITS == 4 | 183 | # elif CRC_BE_BITS == 4 |
| 219 | while (len--) { | 184 | while (len--) { |
| 220 | crc ^= *p++ << 24; | 185 | crc ^= *p++ << 24; |
diff --git a/lib/ctype.c b/lib/ctype.c index d02ace14a322..26baa620e95b 100644 --- a/lib/ctype.c +++ b/lib/ctype.c | |||
| @@ -7,30 +7,30 @@ | |||
| 7 | #include <linux/ctype.h> | 7 | #include <linux/ctype.h> |
| 8 | #include <linux/module.h> | 8 | #include <linux/module.h> |
| 9 | 9 | ||
| 10 | unsigned char _ctype[] = { | 10 | const unsigned char _ctype[] = { |
| 11 | _C,_C,_C,_C,_C,_C,_C,_C, /* 0-7 */ | 11 | _C,_C,_C,_C,_C,_C,_C,_C, /* 0-7 */ |
| 12 | _C,_C|_S,_C|_S,_C|_S,_C|_S,_C|_S,_C,_C, /* 8-15 */ | 12 | _C,_C|_S,_C|_S,_C|_S,_C|_S,_C|_S,_C,_C, /* 8-15 */ |
| 13 | _C,_C,_C,_C,_C,_C,_C,_C, /* 16-23 */ | 13 | _C,_C,_C,_C,_C,_C,_C,_C, /* 16-23 */ |
| 14 | _C,_C,_C,_C,_C,_C,_C,_C, /* 24-31 */ | 14 | _C,_C,_C,_C,_C,_C,_C,_C, /* 24-31 */ |
| 15 | _S|_SP,_P,_P,_P,_P,_P,_P,_P, /* 32-39 */ | 15 | _S|_SP,_P,_P,_P,_P,_P,_P,_P, /* 32-39 */ |
| 16 | _P,_P,_P,_P,_P,_P,_P,_P, /* 40-47 */ | 16 | _P,_P,_P,_P,_P,_P,_P,_P, /* 40-47 */ |
| 17 | _D,_D,_D,_D,_D,_D,_D,_D, /* 48-55 */ | 17 | _D,_D,_D,_D,_D,_D,_D,_D, /* 48-55 */ |
| 18 | _D,_D,_P,_P,_P,_P,_P,_P, /* 56-63 */ | 18 | _D,_D,_P,_P,_P,_P,_P,_P, /* 56-63 */ |
| 19 | _P,_U|_X,_U|_X,_U|_X,_U|_X,_U|_X,_U|_X,_U, /* 64-71 */ | 19 | _P,_U|_X,_U|_X,_U|_X,_U|_X,_U|_X,_U|_X,_U, /* 64-71 */ |
| 20 | _U,_U,_U,_U,_U,_U,_U,_U, /* 72-79 */ | 20 | _U,_U,_U,_U,_U,_U,_U,_U, /* 72-79 */ |
| 21 | _U,_U,_U,_U,_U,_U,_U,_U, /* 80-87 */ | 21 | _U,_U,_U,_U,_U,_U,_U,_U, /* 80-87 */ |
| 22 | _U,_U,_U,_P,_P,_P,_P,_P, /* 88-95 */ | 22 | _U,_U,_U,_P,_P,_P,_P,_P, /* 88-95 */ |
| 23 | _P,_L|_X,_L|_X,_L|_X,_L|_X,_L|_X,_L|_X,_L, /* 96-103 */ | 23 | _P,_L|_X,_L|_X,_L|_X,_L|_X,_L|_X,_L|_X,_L, /* 96-103 */ |
| 24 | _L,_L,_L,_L,_L,_L,_L,_L, /* 104-111 */ | 24 | _L,_L,_L,_L,_L,_L,_L,_L, /* 104-111 */ |
| 25 | _L,_L,_L,_L,_L,_L,_L,_L, /* 112-119 */ | 25 | _L,_L,_L,_L,_L,_L,_L,_L, /* 112-119 */ |
| 26 | _L,_L,_L,_P,_P,_P,_P,_C, /* 120-127 */ | 26 | _L,_L,_L,_P,_P,_P,_P,_C, /* 120-127 */ |
| 27 | 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* 128-143 */ | 27 | 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* 128-143 */ |
| 28 | 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* 144-159 */ | 28 | 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* 144-159 */ |
| 29 | _S|_SP,_P,_P,_P,_P,_P,_P,_P,_P,_P,_P,_P,_P,_P,_P,_P, /* 160-175 */ | 29 | _S|_SP,_P,_P,_P,_P,_P,_P,_P,_P,_P,_P,_P,_P,_P,_P,_P, /* 160-175 */ |
| 30 | _P,_P,_P,_P,_P,_P,_P,_P,_P,_P,_P,_P,_P,_P,_P,_P, /* 176-191 */ | 30 | _P,_P,_P,_P,_P,_P,_P,_P,_P,_P,_P,_P,_P,_P,_P,_P, /* 176-191 */ |
| 31 | _U,_U,_U,_U,_U,_U,_U,_U,_U,_U,_U,_U,_U,_U,_U,_U, /* 192-207 */ | 31 | _U,_U,_U,_U,_U,_U,_U,_U,_U,_U,_U,_U,_U,_U,_U,_U, /* 192-207 */ |
| 32 | _U,_U,_U,_U,_U,_U,_U,_P,_U,_U,_U,_U,_U,_U,_U,_L, /* 208-223 */ | 32 | _U,_U,_U,_U,_U,_U,_U,_P,_U,_U,_U,_U,_U,_U,_U,_L, /* 208-223 */ |
| 33 | _L,_L,_L,_L,_L,_L,_L,_L,_L,_L,_L,_L,_L,_L,_L,_L, /* 224-239 */ | 33 | _L,_L,_L,_L,_L,_L,_L,_L,_L,_L,_L,_L,_L,_L,_L,_L, /* 224-239 */ |
| 34 | _L,_L,_L,_L,_L,_L,_L,_P,_L,_L,_L,_L,_L,_L,_L,_L}; /* 240-255 */ | 34 | _L,_L,_L,_L,_L,_L,_L,_P,_L,_L,_L,_L,_L,_L,_L,_L}; /* 240-255 */ |
| 35 | 35 | ||
| 36 | EXPORT_SYMBOL(_ctype); | 36 | EXPORT_SYMBOL(_ctype); |
diff --git a/lib/debug_locks.c b/lib/debug_locks.c index bc3b11731b9c..5bf0020b9248 100644 --- a/lib/debug_locks.c +++ b/lib/debug_locks.c | |||
| @@ -23,6 +23,7 @@ | |||
| 23 | * shut up after that. | 23 | * shut up after that. |
| 24 | */ | 24 | */ |
| 25 | int debug_locks = 1; | 25 | int debug_locks = 1; |
| 26 | EXPORT_SYMBOL_GPL(debug_locks); | ||
| 26 | 27 | ||
| 27 | /* | 28 | /* |
| 28 | * The locking-testsuite uses <debug_locks_silent> to get a | 29 | * The locking-testsuite uses <debug_locks_silent> to get a |
diff --git a/lib/debugobjects.c b/lib/debugobjects.c index eae56fddfa3b..a9a8996d286a 100644 --- a/lib/debugobjects.c +++ b/lib/debugobjects.c | |||
| @@ -26,14 +26,14 @@ | |||
| 26 | 26 | ||
| 27 | struct debug_bucket { | 27 | struct debug_bucket { |
| 28 | struct hlist_head list; | 28 | struct hlist_head list; |
| 29 | spinlock_t lock; | 29 | raw_spinlock_t lock; |
| 30 | }; | 30 | }; |
| 31 | 31 | ||
| 32 | static struct debug_bucket obj_hash[ODEBUG_HASH_SIZE]; | 32 | static struct debug_bucket obj_hash[ODEBUG_HASH_SIZE]; |
| 33 | 33 | ||
| 34 | static struct debug_obj obj_static_pool[ODEBUG_POOL_SIZE] __initdata; | 34 | static struct debug_obj obj_static_pool[ODEBUG_POOL_SIZE] __initdata; |
| 35 | 35 | ||
| 36 | static DEFINE_SPINLOCK(pool_lock); | 36 | static DEFINE_RAW_SPINLOCK(pool_lock); |
| 37 | 37 | ||
| 38 | static HLIST_HEAD(obj_pool); | 38 | static HLIST_HEAD(obj_pool); |
| 39 | 39 | ||
| @@ -96,10 +96,10 @@ static int fill_pool(void) | |||
| 96 | if (!new) | 96 | if (!new) |
| 97 | return obj_pool_free; | 97 | return obj_pool_free; |
| 98 | 98 | ||
| 99 | spin_lock_irqsave(&pool_lock, flags); | 99 | raw_spin_lock_irqsave(&pool_lock, flags); |
| 100 | hlist_add_head(&new->node, &obj_pool); | 100 | hlist_add_head(&new->node, &obj_pool); |
| 101 | obj_pool_free++; | 101 | obj_pool_free++; |
| 102 | spin_unlock_irqrestore(&pool_lock, flags); | 102 | raw_spin_unlock_irqrestore(&pool_lock, flags); |
| 103 | } | 103 | } |
| 104 | return obj_pool_free; | 104 | return obj_pool_free; |
| 105 | } | 105 | } |
| @@ -133,7 +133,7 @@ alloc_object(void *addr, struct debug_bucket *b, struct debug_obj_descr *descr) | |||
| 133 | { | 133 | { |
| 134 | struct debug_obj *obj = NULL; | 134 | struct debug_obj *obj = NULL; |
| 135 | 135 | ||
| 136 | spin_lock(&pool_lock); | 136 | raw_spin_lock(&pool_lock); |
| 137 | if (obj_pool.first) { | 137 | if (obj_pool.first) { |
| 138 | obj = hlist_entry(obj_pool.first, typeof(*obj), node); | 138 | obj = hlist_entry(obj_pool.first, typeof(*obj), node); |
| 139 | 139 | ||
| @@ -152,7 +152,7 @@ alloc_object(void *addr, struct debug_bucket *b, struct debug_obj_descr *descr) | |||
| 152 | if (obj_pool_free < obj_pool_min_free) | 152 | if (obj_pool_free < obj_pool_min_free) |
| 153 | obj_pool_min_free = obj_pool_free; | 153 | obj_pool_min_free = obj_pool_free; |
| 154 | } | 154 | } |
| 155 | spin_unlock(&pool_lock); | 155 | raw_spin_unlock(&pool_lock); |
| 156 | 156 | ||
| 157 | return obj; | 157 | return obj; |
| 158 | } | 158 | } |
| @@ -165,7 +165,7 @@ static void free_obj_work(struct work_struct *work) | |||
| 165 | struct debug_obj *obj; | 165 | struct debug_obj *obj; |
| 166 | unsigned long flags; | 166 | unsigned long flags; |
| 167 | 167 | ||
| 168 | spin_lock_irqsave(&pool_lock, flags); | 168 | raw_spin_lock_irqsave(&pool_lock, flags); |
| 169 | while (obj_pool_free > ODEBUG_POOL_SIZE) { | 169 | while (obj_pool_free > ODEBUG_POOL_SIZE) { |
| 170 | obj = hlist_entry(obj_pool.first, typeof(*obj), node); | 170 | obj = hlist_entry(obj_pool.first, typeof(*obj), node); |
| 171 | hlist_del(&obj->node); | 171 | hlist_del(&obj->node); |
| @@ -174,11 +174,11 @@ static void free_obj_work(struct work_struct *work) | |||
| 174 | * We release pool_lock across kmem_cache_free() to | 174 | * We release pool_lock across kmem_cache_free() to |
| 175 | * avoid contention on pool_lock. | 175 | * avoid contention on pool_lock. |
| 176 | */ | 176 | */ |
| 177 | spin_unlock_irqrestore(&pool_lock, flags); | 177 | raw_spin_unlock_irqrestore(&pool_lock, flags); |
| 178 | kmem_cache_free(obj_cache, obj); | 178 | kmem_cache_free(obj_cache, obj); |
| 179 | spin_lock_irqsave(&pool_lock, flags); | 179 | raw_spin_lock_irqsave(&pool_lock, flags); |
| 180 | } | 180 | } |
| 181 | spin_unlock_irqrestore(&pool_lock, flags); | 181 | raw_spin_unlock_irqrestore(&pool_lock, flags); |
| 182 | } | 182 | } |
| 183 | 183 | ||
| 184 | /* | 184 | /* |
| @@ -190,7 +190,7 @@ static void free_object(struct debug_obj *obj) | |||
| 190 | unsigned long flags; | 190 | unsigned long flags; |
| 191 | int sched = 0; | 191 | int sched = 0; |
| 192 | 192 | ||
| 193 | spin_lock_irqsave(&pool_lock, flags); | 193 | raw_spin_lock_irqsave(&pool_lock, flags); |
| 194 | /* | 194 | /* |
| 195 | * schedule work when the pool is filled and the cache is | 195 | * schedule work when the pool is filled and the cache is |
| 196 | * initialized: | 196 | * initialized: |
| @@ -200,7 +200,7 @@ static void free_object(struct debug_obj *obj) | |||
| 200 | hlist_add_head(&obj->node, &obj_pool); | 200 | hlist_add_head(&obj->node, &obj_pool); |
| 201 | obj_pool_free++; | 201 | obj_pool_free++; |
| 202 | obj_pool_used--; | 202 | obj_pool_used--; |
| 203 | spin_unlock_irqrestore(&pool_lock, flags); | 203 | raw_spin_unlock_irqrestore(&pool_lock, flags); |
| 204 | if (sched) | 204 | if (sched) |
| 205 | schedule_work(&debug_obj_work); | 205 | schedule_work(&debug_obj_work); |
| 206 | } | 206 | } |
| @@ -221,9 +221,9 @@ static void debug_objects_oom(void) | |||
| 221 | printk(KERN_WARNING "ODEBUG: Out of memory. ODEBUG disabled\n"); | 221 | printk(KERN_WARNING "ODEBUG: Out of memory. ODEBUG disabled\n"); |
| 222 | 222 | ||
| 223 | for (i = 0; i < ODEBUG_HASH_SIZE; i++, db++) { | 223 | for (i = 0; i < ODEBUG_HASH_SIZE; i++, db++) { |
| 224 | spin_lock_irqsave(&db->lock, flags); | 224 | raw_spin_lock_irqsave(&db->lock, flags); |
| 225 | hlist_move_list(&db->list, &freelist); | 225 | hlist_move_list(&db->list, &freelist); |
| 226 | spin_unlock_irqrestore(&db->lock, flags); | 226 | raw_spin_unlock_irqrestore(&db->lock, flags); |
| 227 | 227 | ||
| 228 | /* Now free them */ | 228 | /* Now free them */ |
| 229 | hlist_for_each_entry_safe(obj, node, tmp, &freelist, node) { | 229 | hlist_for_each_entry_safe(obj, node, tmp, &freelist, node) { |
| @@ -303,14 +303,14 @@ __debug_object_init(void *addr, struct debug_obj_descr *descr, int onstack) | |||
| 303 | 303 | ||
| 304 | db = get_bucket((unsigned long) addr); | 304 | db = get_bucket((unsigned long) addr); |
| 305 | 305 | ||
| 306 | spin_lock_irqsave(&db->lock, flags); | 306 | raw_spin_lock_irqsave(&db->lock, flags); |
| 307 | 307 | ||
| 308 | obj = lookup_object(addr, db); | 308 | obj = lookup_object(addr, db); |
| 309 | if (!obj) { | 309 | if (!obj) { |
| 310 | obj = alloc_object(addr, db, descr); | 310 | obj = alloc_object(addr, db, descr); |
| 311 | if (!obj) { | 311 | if (!obj) { |
| 312 | debug_objects_enabled = 0; | 312 | debug_objects_enabled = 0; |
| 313 | spin_unlock_irqrestore(&db->lock, flags); | 313 | raw_spin_unlock_irqrestore(&db->lock, flags); |
| 314 | debug_objects_oom(); | 314 | debug_objects_oom(); |
| 315 | return; | 315 | return; |
| 316 | } | 316 | } |
| @@ -327,7 +327,7 @@ __debug_object_init(void *addr, struct debug_obj_descr *descr, int onstack) | |||
| 327 | case ODEBUG_STATE_ACTIVE: | 327 | case ODEBUG_STATE_ACTIVE: |
| 328 | debug_print_object(obj, "init"); | 328 | debug_print_object(obj, "init"); |
| 329 | state = obj->state; | 329 | state = obj->state; |
| 330 | spin_unlock_irqrestore(&db->lock, flags); | 330 | raw_spin_unlock_irqrestore(&db->lock, flags); |
| 331 | debug_object_fixup(descr->fixup_init, addr, state); | 331 | debug_object_fixup(descr->fixup_init, addr, state); |
| 332 | return; | 332 | return; |
| 333 | 333 | ||
| @@ -338,7 +338,7 @@ __debug_object_init(void *addr, struct debug_obj_descr *descr, int onstack) | |||
| 338 | break; | 338 | break; |
| 339 | } | 339 | } |
| 340 | 340 | ||
| 341 | spin_unlock_irqrestore(&db->lock, flags); | 341 | raw_spin_unlock_irqrestore(&db->lock, flags); |
| 342 | } | 342 | } |
| 343 | 343 | ||
| 344 | /** | 344 | /** |
| @@ -385,7 +385,7 @@ void debug_object_activate(void *addr, struct debug_obj_descr *descr) | |||
| 385 | 385 | ||
| 386 | db = get_bucket((unsigned long) addr); | 386 | db = get_bucket((unsigned long) addr); |
| 387 | 387 | ||
| 388 | spin_lock_irqsave(&db->lock, flags); | 388 | raw_spin_lock_irqsave(&db->lock, flags); |
| 389 | 389 | ||
| 390 | obj = lookup_object(addr, db); | 390 | obj = lookup_object(addr, db); |
| 391 | if (obj) { | 391 | if (obj) { |
| @@ -398,7 +398,7 @@ void debug_object_activate(void *addr, struct debug_obj_descr *descr) | |||
| 398 | case ODEBUG_STATE_ACTIVE: | 398 | case ODEBUG_STATE_ACTIVE: |
| 399 | debug_print_object(obj, "activate"); | 399 | debug_print_object(obj, "activate"); |
| 400 | state = obj->state; | 400 | state = obj->state; |
| 401 | spin_unlock_irqrestore(&db->lock, flags); | 401 | raw_spin_unlock_irqrestore(&db->lock, flags); |
| 402 | debug_object_fixup(descr->fixup_activate, addr, state); | 402 | debug_object_fixup(descr->fixup_activate, addr, state); |
| 403 | return; | 403 | return; |
| 404 | 404 | ||
| @@ -408,11 +408,11 @@ void debug_object_activate(void *addr, struct debug_obj_descr *descr) | |||
| 408 | default: | 408 | default: |
| 409 | break; | 409 | break; |
| 410 | } | 410 | } |
| 411 | spin_unlock_irqrestore(&db->lock, flags); | 411 | raw_spin_unlock_irqrestore(&db->lock, flags); |
| 412 | return; | 412 | return; |
| 413 | } | 413 | } |
| 414 | 414 | ||
| 415 | spin_unlock_irqrestore(&db->lock, flags); | 415 | raw_spin_unlock_irqrestore(&db->lock, flags); |
| 416 | /* | 416 | /* |
| 417 | * This happens when a static object is activated. We | 417 | * This happens when a static object is activated. We |
| 418 | * let the type specific code decide whether this is | 418 | * let the type specific code decide whether this is |
| @@ -438,7 +438,7 @@ void debug_object_deactivate(void *addr, struct debug_obj_descr *descr) | |||
| 438 | 438 | ||
| 439 | db = get_bucket((unsigned long) addr); | 439 | db = get_bucket((unsigned long) addr); |
| 440 | 440 | ||
| 441 | spin_lock_irqsave(&db->lock, flags); | 441 | raw_spin_lock_irqsave(&db->lock, flags); |
| 442 | 442 | ||
| 443 | obj = lookup_object(addr, db); | 443 | obj = lookup_object(addr, db); |
| 444 | if (obj) { | 444 | if (obj) { |
| @@ -463,7 +463,7 @@ void debug_object_deactivate(void *addr, struct debug_obj_descr *descr) | |||
| 463 | debug_print_object(&o, "deactivate"); | 463 | debug_print_object(&o, "deactivate"); |
| 464 | } | 464 | } |
| 465 | 465 | ||
| 466 | spin_unlock_irqrestore(&db->lock, flags); | 466 | raw_spin_unlock_irqrestore(&db->lock, flags); |
| 467 | } | 467 | } |
| 468 | 468 | ||
| 469 | /** | 469 | /** |
| @@ -483,7 +483,7 @@ void debug_object_destroy(void *addr, struct debug_obj_descr *descr) | |||
| 483 | 483 | ||
| 484 | db = get_bucket((unsigned long) addr); | 484 | db = get_bucket((unsigned long) addr); |
| 485 | 485 | ||
| 486 | spin_lock_irqsave(&db->lock, flags); | 486 | raw_spin_lock_irqsave(&db->lock, flags); |
| 487 | 487 | ||
| 488 | obj = lookup_object(addr, db); | 488 | obj = lookup_object(addr, db); |
| 489 | if (!obj) | 489 | if (!obj) |
| @@ -498,7 +498,7 @@ void debug_object_destroy(void *addr, struct debug_obj_descr *descr) | |||
| 498 | case ODEBUG_STATE_ACTIVE: | 498 | case ODEBUG_STATE_ACTIVE: |
| 499 | debug_print_object(obj, "destroy"); | 499 | debug_print_object(obj, "destroy"); |
| 500 | state = obj->state; | 500 | state = obj->state; |
| 501 | spin_unlock_irqrestore(&db->lock, flags); | 501 | raw_spin_unlock_irqrestore(&db->lock, flags); |
| 502 | debug_object_fixup(descr->fixup_destroy, addr, state); | 502 | debug_object_fixup(descr->fixup_destroy, addr, state); |
| 503 | return; | 503 | return; |
| 504 | 504 | ||
| @@ -509,7 +509,7 @@ void debug_object_destroy(void *addr, struct debug_obj_descr *descr) | |||
| 509 | break; | 509 | break; |
| 510 | } | 510 | } |
| 511 | out_unlock: | 511 | out_unlock: |
| 512 | spin_unlock_irqrestore(&db->lock, flags); | 512 | raw_spin_unlock_irqrestore(&db->lock, flags); |
| 513 | } | 513 | } |
| 514 | 514 | ||
| 515 | /** | 515 | /** |
| @@ -529,7 +529,7 @@ void debug_object_free(void *addr, struct debug_obj_descr *descr) | |||
| 529 | 529 | ||
| 530 | db = get_bucket((unsigned long) addr); | 530 | db = get_bucket((unsigned long) addr); |
| 531 | 531 | ||
| 532 | spin_lock_irqsave(&db->lock, flags); | 532 | raw_spin_lock_irqsave(&db->lock, flags); |
| 533 | 533 | ||
| 534 | obj = lookup_object(addr, db); | 534 | obj = lookup_object(addr, db); |
| 535 | if (!obj) | 535 | if (!obj) |
| @@ -539,17 +539,17 @@ void debug_object_free(void *addr, struct debug_obj_descr *descr) | |||
| 539 | case ODEBUG_STATE_ACTIVE: | 539 | case ODEBUG_STATE_ACTIVE: |
| 540 | debug_print_object(obj, "free"); | 540 | debug_print_object(obj, "free"); |
| 541 | state = obj->state; | 541 | state = obj->state; |
| 542 | spin_unlock_irqrestore(&db->lock, flags); | 542 | raw_spin_unlock_irqrestore(&db->lock, flags); |
| 543 | debug_object_fixup(descr->fixup_free, addr, state); | 543 | debug_object_fixup(descr->fixup_free, addr, state); |
| 544 | return; | 544 | return; |
| 545 | default: | 545 | default: |
| 546 | hlist_del(&obj->node); | 546 | hlist_del(&obj->node); |
| 547 | spin_unlock_irqrestore(&db->lock, flags); | 547 | raw_spin_unlock_irqrestore(&db->lock, flags); |
| 548 | free_object(obj); | 548 | free_object(obj); |
| 549 | return; | 549 | return; |
| 550 | } | 550 | } |
| 551 | out_unlock: | 551 | out_unlock: |
| 552 | spin_unlock_irqrestore(&db->lock, flags); | 552 | raw_spin_unlock_irqrestore(&db->lock, flags); |
| 553 | } | 553 | } |
| 554 | 554 | ||
| 555 | #ifdef CONFIG_DEBUG_OBJECTS_FREE | 555 | #ifdef CONFIG_DEBUG_OBJECTS_FREE |
| @@ -575,7 +575,7 @@ static void __debug_check_no_obj_freed(const void *address, unsigned long size) | |||
| 575 | 575 | ||
| 576 | repeat: | 576 | repeat: |
| 577 | cnt = 0; | 577 | cnt = 0; |
| 578 | spin_lock_irqsave(&db->lock, flags); | 578 | raw_spin_lock_irqsave(&db->lock, flags); |
| 579 | hlist_for_each_entry_safe(obj, node, tmp, &db->list, node) { | 579 | hlist_for_each_entry_safe(obj, node, tmp, &db->list, node) { |
| 580 | cnt++; | 580 | cnt++; |
| 581 | oaddr = (unsigned long) obj->object; | 581 | oaddr = (unsigned long) obj->object; |
| @@ -587,7 +587,7 @@ repeat: | |||
| 587 | debug_print_object(obj, "free"); | 587 | debug_print_object(obj, "free"); |
| 588 | descr = obj->descr; | 588 | descr = obj->descr; |
| 589 | state = obj->state; | 589 | state = obj->state; |
| 590 | spin_unlock_irqrestore(&db->lock, flags); | 590 | raw_spin_unlock_irqrestore(&db->lock, flags); |
| 591 | debug_object_fixup(descr->fixup_free, | 591 | debug_object_fixup(descr->fixup_free, |
| 592 | (void *) oaddr, state); | 592 | (void *) oaddr, state); |
| 593 | goto repeat; | 593 | goto repeat; |
| @@ -597,7 +597,7 @@ repeat: | |||
| 597 | break; | 597 | break; |
| 598 | } | 598 | } |
| 599 | } | 599 | } |
| 600 | spin_unlock_irqrestore(&db->lock, flags); | 600 | raw_spin_unlock_irqrestore(&db->lock, flags); |
| 601 | 601 | ||
| 602 | /* Now free them */ | 602 | /* Now free them */ |
| 603 | hlist_for_each_entry_safe(obj, node, tmp, &freelist, node) { | 603 | hlist_for_each_entry_safe(obj, node, tmp, &freelist, node) { |
| @@ -783,7 +783,7 @@ check_results(void *addr, enum debug_obj_state state, int fixups, int warnings) | |||
| 783 | 783 | ||
| 784 | db = get_bucket((unsigned long) addr); | 784 | db = get_bucket((unsigned long) addr); |
| 785 | 785 | ||
| 786 | spin_lock_irqsave(&db->lock, flags); | 786 | raw_spin_lock_irqsave(&db->lock, flags); |
| 787 | 787 | ||
| 788 | obj = lookup_object(addr, db); | 788 | obj = lookup_object(addr, db); |
| 789 | if (!obj && state != ODEBUG_STATE_NONE) { | 789 | if (!obj && state != ODEBUG_STATE_NONE) { |
| @@ -807,7 +807,7 @@ check_results(void *addr, enum debug_obj_state state, int fixups, int warnings) | |||
| 807 | } | 807 | } |
| 808 | res = 0; | 808 | res = 0; |
| 809 | out: | 809 | out: |
| 810 | spin_unlock_irqrestore(&db->lock, flags); | 810 | raw_spin_unlock_irqrestore(&db->lock, flags); |
| 811 | if (res) | 811 | if (res) |
| 812 | debug_objects_enabled = 0; | 812 | debug_objects_enabled = 0; |
| 813 | return res; | 813 | return res; |
| @@ -907,7 +907,7 @@ void __init debug_objects_early_init(void) | |||
| 907 | int i; | 907 | int i; |
| 908 | 908 | ||
| 909 | for (i = 0; i < ODEBUG_HASH_SIZE; i++) | 909 | for (i = 0; i < ODEBUG_HASH_SIZE; i++) |
| 910 | spin_lock_init(&obj_hash[i].lock); | 910 | raw_spin_lock_init(&obj_hash[i].lock); |
| 911 | 911 | ||
| 912 | for (i = 0; i < ODEBUG_POOL_SIZE; i++) | 912 | for (i = 0; i < ODEBUG_POOL_SIZE; i++) |
| 913 | hlist_add_head(&obj_static_pool[i].node, &obj_pool); | 913 | hlist_add_head(&obj_static_pool[i].node, &obj_pool); |
diff --git a/lib/decompress.c b/lib/decompress.c index d2842f571674..a7606815541f 100644 --- a/lib/decompress.c +++ b/lib/decompress.c | |||
| @@ -9,6 +9,7 @@ | |||
| 9 | #include <linux/decompress/bunzip2.h> | 9 | #include <linux/decompress/bunzip2.h> |
| 10 | #include <linux/decompress/unlzma.h> | 10 | #include <linux/decompress/unlzma.h> |
| 11 | #include <linux/decompress/inflate.h> | 11 | #include <linux/decompress/inflate.h> |
| 12 | #include <linux/decompress/unlzo.h> | ||
| 12 | 13 | ||
| 13 | #include <linux/types.h> | 14 | #include <linux/types.h> |
| 14 | #include <linux/string.h> | 15 | #include <linux/string.h> |
| @@ -22,6 +23,9 @@ | |||
| 22 | #ifndef CONFIG_DECOMPRESS_LZMA | 23 | #ifndef CONFIG_DECOMPRESS_LZMA |
| 23 | # define unlzma NULL | 24 | # define unlzma NULL |
| 24 | #endif | 25 | #endif |
| 26 | #ifndef CONFIG_DECOMPRESS_LZO | ||
| 27 | # define unlzo NULL | ||
| 28 | #endif | ||
| 25 | 29 | ||
| 26 | static const struct compress_format { | 30 | static const struct compress_format { |
| 27 | unsigned char magic[2]; | 31 | unsigned char magic[2]; |
| @@ -32,6 +36,7 @@ static const struct compress_format { | |||
| 32 | { {037, 0236}, "gzip", gunzip }, | 36 | { {037, 0236}, "gzip", gunzip }, |
| 33 | { {0x42, 0x5a}, "bzip2", bunzip2 }, | 37 | { {0x42, 0x5a}, "bzip2", bunzip2 }, |
| 34 | { {0x5d, 0x00}, "lzma", unlzma }, | 38 | { {0x5d, 0x00}, "lzma", unlzma }, |
| 39 | { {0x89, 0x4c}, "lzo", unlzo }, | ||
| 35 | { {0, 0}, NULL, NULL } | 40 | { {0, 0}, NULL, NULL } |
| 36 | }; | 41 | }; |
| 37 | 42 | ||
diff --git a/lib/decompress_bunzip2.c b/lib/decompress_bunzip2.c index 600f473a5610..a4e971dee102 100644 --- a/lib/decompress_bunzip2.c +++ b/lib/decompress_bunzip2.c | |||
| @@ -299,7 +299,7 @@ static int INIT get_next_block(struct bunzip_data *bd) | |||
| 299 | again when using them (during symbol decoding).*/ | 299 | again when using them (during symbol decoding).*/ |
| 300 | base = hufGroup->base-1; | 300 | base = hufGroup->base-1; |
| 301 | limit = hufGroup->limit-1; | 301 | limit = hufGroup->limit-1; |
| 302 | /* Calculate permute[]. Concurently, initialize | 302 | /* Calculate permute[]. Concurrently, initialize |
| 303 | * temp[] and limit[]. */ | 303 | * temp[] and limit[]. */ |
| 304 | pp = 0; | 304 | pp = 0; |
| 305 | for (i = minLen; i <= maxLen; i++) { | 305 | for (i = minLen; i <= maxLen; i++) { |
| @@ -637,6 +637,8 @@ static int INIT start_bunzip(struct bunzip_data **bdp, void *inbuf, int len, | |||
| 637 | 637 | ||
| 638 | /* Allocate bunzip_data. Most fields initialize to zero. */ | 638 | /* Allocate bunzip_data. Most fields initialize to zero. */ |
| 639 | bd = *bdp = malloc(i); | 639 | bd = *bdp = malloc(i); |
| 640 | if (!bd) | ||
| 641 | return RETVAL_OUT_OF_MEMORY; | ||
| 640 | memset(bd, 0, sizeof(struct bunzip_data)); | 642 | memset(bd, 0, sizeof(struct bunzip_data)); |
| 641 | /* Setup input buffer */ | 643 | /* Setup input buffer */ |
| 642 | bd->inbuf = inbuf; | 644 | bd->inbuf = inbuf; |
| @@ -664,6 +666,8 @@ static int INIT start_bunzip(struct bunzip_data **bdp, void *inbuf, int len, | |||
| 664 | bd->dbufSize = 100000*(i-BZh0); | 666 | bd->dbufSize = 100000*(i-BZh0); |
| 665 | 667 | ||
| 666 | bd->dbuf = large_malloc(bd->dbufSize * sizeof(int)); | 668 | bd->dbuf = large_malloc(bd->dbufSize * sizeof(int)); |
| 669 | if (!bd->dbuf) | ||
| 670 | return RETVAL_OUT_OF_MEMORY; | ||
| 667 | return RETVAL_OK; | 671 | return RETVAL_OK; |
| 668 | } | 672 | } |
| 669 | 673 | ||
| @@ -686,7 +690,7 @@ STATIC int INIT bunzip2(unsigned char *buf, int len, | |||
| 686 | 690 | ||
| 687 | if (!outbuf) { | 691 | if (!outbuf) { |
| 688 | error("Could not allocate output bufer"); | 692 | error("Could not allocate output bufer"); |
| 689 | return -1; | 693 | return RETVAL_OUT_OF_MEMORY; |
| 690 | } | 694 | } |
| 691 | if (buf) | 695 | if (buf) |
| 692 | inbuf = buf; | 696 | inbuf = buf; |
| @@ -694,6 +698,7 @@ STATIC int INIT bunzip2(unsigned char *buf, int len, | |||
| 694 | inbuf = malloc(BZIP2_IOBUF_SIZE); | 698 | inbuf = malloc(BZIP2_IOBUF_SIZE); |
| 695 | if (!inbuf) { | 699 | if (!inbuf) { |
| 696 | error("Could not allocate input bufer"); | 700 | error("Could not allocate input bufer"); |
| 701 | i = RETVAL_OUT_OF_MEMORY; | ||
| 697 | goto exit_0; | 702 | goto exit_0; |
| 698 | } | 703 | } |
| 699 | i = start_bunzip(&bd, inbuf, len, fill); | 704 | i = start_bunzip(&bd, inbuf, len, fill); |
| @@ -720,11 +725,14 @@ STATIC int INIT bunzip2(unsigned char *buf, int len, | |||
| 720 | } else if (i == RETVAL_UNEXPECTED_OUTPUT_EOF) { | 725 | } else if (i == RETVAL_UNEXPECTED_OUTPUT_EOF) { |
| 721 | error("Compressed file ends unexpectedly"); | 726 | error("Compressed file ends unexpectedly"); |
| 722 | } | 727 | } |
| 728 | if (!bd) | ||
| 729 | goto exit_1; | ||
| 723 | if (bd->dbuf) | 730 | if (bd->dbuf) |
| 724 | large_free(bd->dbuf); | 731 | large_free(bd->dbuf); |
| 725 | if (pos) | 732 | if (pos) |
| 726 | *pos = bd->inbufPos; | 733 | *pos = bd->inbufPos; |
| 727 | free(bd); | 734 | free(bd); |
| 735 | exit_1: | ||
| 728 | if (!buf) | 736 | if (!buf) |
| 729 | free(inbuf); | 737 | free(inbuf); |
| 730 | exit_0: | 738 | exit_0: |
diff --git a/lib/decompress_unlzo.c b/lib/decompress_unlzo.c new file mode 100644 index 000000000000..db521f45626e --- /dev/null +++ b/lib/decompress_unlzo.c | |||
| @@ -0,0 +1,209 @@ | |||
| 1 | /* | ||
| 2 | * LZO decompressor for the Linux kernel. Code borrowed from the lzo | ||
| 3 | * implementation by Markus Franz Xaver Johannes Oberhumer. | ||
| 4 | * | ||
| 5 | * Linux kernel adaptation: | ||
| 6 | * Copyright (C) 2009 | ||
| 7 | * Albin Tonnerre, Free Electrons <albin.tonnerre@free-electrons.com> | ||
| 8 | * | ||
| 9 | * Original code: | ||
| 10 | * Copyright (C) 1996-2005 Markus Franz Xaver Johannes Oberhumer | ||
| 11 | * All Rights Reserved. | ||
| 12 | * | ||
| 13 | * lzop and the LZO library are free software; you can redistribute them | ||
| 14 | * and/or modify them under the terms of the GNU General Public License as | ||
| 15 | * published by the Free Software Foundation; either version 2 of | ||
| 16 | * the License, or (at your option) any later version. | ||
| 17 | * | ||
| 18 | * This program is distributed in the hope that it will be useful, | ||
| 19 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
| 20 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
| 21 | * GNU General Public License for more details. | ||
| 22 | * | ||
| 23 | * You should have received a copy of the GNU General Public License | ||
| 24 | * along with this program; see the file COPYING. | ||
| 25 | * If not, write to the Free Software Foundation, Inc., | ||
| 26 | * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. | ||
| 27 | * | ||
| 28 | * Markus F.X.J. Oberhumer | ||
| 29 | * <markus@oberhumer.com> | ||
| 30 | * http://www.oberhumer.com/opensource/lzop/ | ||
| 31 | */ | ||
| 32 | |||
| 33 | #ifdef STATIC | ||
| 34 | #include "lzo/lzo1x_decompress.c" | ||
| 35 | #else | ||
| 36 | #include <linux/slab.h> | ||
| 37 | #include <linux/decompress/unlzo.h> | ||
| 38 | #endif | ||
| 39 | |||
| 40 | #include <linux/types.h> | ||
| 41 | #include <linux/lzo.h> | ||
| 42 | #include <linux/decompress/mm.h> | ||
| 43 | |||
| 44 | #include <linux/compiler.h> | ||
| 45 | #include <asm/unaligned.h> | ||
| 46 | |||
| 47 | static const unsigned char lzop_magic[] = { | ||
| 48 | 0x89, 0x4c, 0x5a, 0x4f, 0x00, 0x0d, 0x0a, 0x1a, 0x0a }; | ||
| 49 | |||
| 50 | #define LZO_BLOCK_SIZE (256*1024l) | ||
| 51 | #define HEADER_HAS_FILTER 0x00000800L | ||
| 52 | |||
| 53 | STATIC inline int INIT parse_header(u8 *input, u8 *skip) | ||
| 54 | { | ||
| 55 | int l; | ||
| 56 | u8 *parse = input; | ||
| 57 | u8 level = 0; | ||
| 58 | u16 version; | ||
| 59 | |||
| 60 | /* read magic: 9 first bits */ | ||
| 61 | for (l = 0; l < 9; l++) { | ||
| 62 | if (*parse++ != lzop_magic[l]) | ||
| 63 | return 0; | ||
| 64 | } | ||
| 65 | /* get version (2bytes), skip library version (2), | ||
| 66 | * 'need to be extracted' version (2) and | ||
| 67 | * method (1) */ | ||
| 68 | version = get_unaligned_be16(parse); | ||
| 69 | parse += 7; | ||
| 70 | if (version >= 0x0940) | ||
| 71 | level = *parse++; | ||
| 72 | if (get_unaligned_be32(parse) & HEADER_HAS_FILTER) | ||
| 73 | parse += 8; /* flags + filter info */ | ||
| 74 | else | ||
| 75 | parse += 4; /* flags */ | ||
| 76 | |||
| 77 | /* skip mode and mtime_low */ | ||
| 78 | parse += 8; | ||
| 79 | if (version >= 0x0940) | ||
| 80 | parse += 4; /* skip mtime_high */ | ||
| 81 | |||
| 82 | l = *parse++; | ||
| 83 | /* don't care about the file name, and skip checksum */ | ||
| 84 | parse += l + 4; | ||
| 85 | |||
| 86 | *skip = parse - input; | ||
| 87 | return 1; | ||
| 88 | } | ||
| 89 | |||
| 90 | STATIC inline int INIT unlzo(u8 *input, int in_len, | ||
| 91 | int (*fill) (void *, unsigned int), | ||
| 92 | int (*flush) (void *, unsigned int), | ||
| 93 | u8 *output, int *posp, | ||
| 94 | void (*error_fn) (char *x)) | ||
| 95 | { | ||
| 96 | u8 skip = 0, r = 0; | ||
| 97 | u32 src_len, dst_len; | ||
| 98 | size_t tmp; | ||
| 99 | u8 *in_buf, *in_buf_save, *out_buf; | ||
| 100 | int obytes_processed = 0; | ||
| 101 | |||
| 102 | set_error_fn(error_fn); | ||
| 103 | |||
| 104 | if (output) { | ||
| 105 | out_buf = output; | ||
| 106 | } else if (!flush) { | ||
| 107 | error("NULL output pointer and no flush function provided"); | ||
| 108 | goto exit; | ||
| 109 | } else { | ||
| 110 | out_buf = malloc(LZO_BLOCK_SIZE); | ||
| 111 | if (!out_buf) { | ||
| 112 | error("Could not allocate output buffer"); | ||
| 113 | goto exit; | ||
| 114 | } | ||
| 115 | } | ||
| 116 | |||
| 117 | if (input && fill) { | ||
| 118 | error("Both input pointer and fill function provided, don't know what to do"); | ||
| 119 | goto exit_1; | ||
| 120 | } else if (input) { | ||
| 121 | in_buf = input; | ||
| 122 | } else if (!fill || !posp) { | ||
| 123 | error("NULL input pointer and missing position pointer or fill function"); | ||
| 124 | goto exit_1; | ||
| 125 | } else { | ||
| 126 | in_buf = malloc(lzo1x_worst_compress(LZO_BLOCK_SIZE)); | ||
| 127 | if (!in_buf) { | ||
| 128 | error("Could not allocate input buffer"); | ||
| 129 | goto exit_1; | ||
| 130 | } | ||
| 131 | } | ||
| 132 | in_buf_save = in_buf; | ||
| 133 | |||
| 134 | if (posp) | ||
| 135 | *posp = 0; | ||
| 136 | |||
| 137 | if (fill) | ||
| 138 | fill(in_buf, lzo1x_worst_compress(LZO_BLOCK_SIZE)); | ||
| 139 | |||
| 140 | if (!parse_header(input, &skip)) { | ||
| 141 | error("invalid header"); | ||
| 142 | goto exit_2; | ||
| 143 | } | ||
| 144 | in_buf += skip; | ||
| 145 | |||
| 146 | if (posp) | ||
| 147 | *posp = skip; | ||
| 148 | |||
| 149 | for (;;) { | ||
| 150 | /* read uncompressed block size */ | ||
| 151 | dst_len = get_unaligned_be32(in_buf); | ||
| 152 | in_buf += 4; | ||
| 153 | |||
| 154 | /* exit if last block */ | ||
| 155 | if (dst_len == 0) { | ||
| 156 | if (posp) | ||
| 157 | *posp += 4; | ||
| 158 | break; | ||
| 159 | } | ||
| 160 | |||
| 161 | if (dst_len > LZO_BLOCK_SIZE) { | ||
| 162 | error("dest len longer than block size"); | ||
| 163 | goto exit_2; | ||
| 164 | } | ||
| 165 | |||
| 166 | /* read compressed block size, and skip block checksum info */ | ||
| 167 | src_len = get_unaligned_be32(in_buf); | ||
| 168 | in_buf += 8; | ||
| 169 | |||
| 170 | if (src_len <= 0 || src_len > dst_len) { | ||
| 171 | error("file corrupted"); | ||
| 172 | goto exit_2; | ||
| 173 | } | ||
| 174 | |||
| 175 | /* decompress */ | ||
| 176 | tmp = dst_len; | ||
| 177 | r = lzo1x_decompress_safe((u8 *) in_buf, src_len, | ||
| 178 | out_buf, &tmp); | ||
| 179 | |||
| 180 | if (r != LZO_E_OK || dst_len != tmp) { | ||
| 181 | error("Compressed data violation"); | ||
| 182 | goto exit_2; | ||
| 183 | } | ||
| 184 | |||
| 185 | obytes_processed += dst_len; | ||
| 186 | if (flush) | ||
| 187 | flush(out_buf, dst_len); | ||
| 188 | if (output) | ||
| 189 | out_buf += dst_len; | ||
| 190 | if (posp) | ||
| 191 | *posp += src_len + 12; | ||
| 192 | if (fill) { | ||
| 193 | in_buf = in_buf_save; | ||
| 194 | fill(in_buf, lzo1x_worst_compress(LZO_BLOCK_SIZE)); | ||
| 195 | } else | ||
| 196 | in_buf += src_len; | ||
| 197 | } | ||
| 198 | |||
| 199 | exit_2: | ||
| 200 | if (!input) | ||
| 201 | free(in_buf); | ||
| 202 | exit_1: | ||
| 203 | if (!output) | ||
| 204 | free(out_buf); | ||
| 205 | exit: | ||
| 206 | return obytes_processed; | ||
| 207 | } | ||
| 208 | |||
| 209 | #define decompress unlzo | ||
diff --git a/lib/dma-debug.c b/lib/dma-debug.c index ce6b7eabf674..ba8b67039d13 100644 --- a/lib/dma-debug.c +++ b/lib/dma-debug.c | |||
| @@ -259,7 +259,7 @@ static struct dma_debug_entry *hash_bucket_find(struct hash_bucket *bucket, | |||
| 259 | * times. Without a hardware IOMMU this results in the | 259 | * times. Without a hardware IOMMU this results in the |
| 260 | * same device addresses being put into the dma-debug | 260 | * same device addresses being put into the dma-debug |
| 261 | * hash multiple times too. This can result in false | 261 | * hash multiple times too. This can result in false |
| 262 | * positives being reported. Therfore we implement a | 262 | * positives being reported. Therefore we implement a |
| 263 | * best-fit algorithm here which returns the entry from | 263 | * best-fit algorithm here which returns the entry from |
| 264 | * the hash which fits best to the reference value | 264 | * the hash which fits best to the reference value |
| 265 | * instead of the first-fit. | 265 | * instead of the first-fit. |
| @@ -587,7 +587,7 @@ out_unlock: | |||
| 587 | return count; | 587 | return count; |
| 588 | } | 588 | } |
| 589 | 589 | ||
| 590 | const struct file_operations filter_fops = { | 590 | static const struct file_operations filter_fops = { |
| 591 | .read = filter_read, | 591 | .read = filter_read, |
| 592 | .write = filter_write, | 592 | .write = filter_write, |
| 593 | }; | 593 | }; |
| @@ -670,12 +670,13 @@ static int device_dma_allocations(struct device *dev) | |||
| 670 | return count; | 670 | return count; |
| 671 | } | 671 | } |
| 672 | 672 | ||
| 673 | static int dma_debug_device_change(struct notifier_block *nb, | 673 | static int dma_debug_device_change(struct notifier_block *nb, unsigned long action, void *data) |
| 674 | unsigned long action, void *data) | ||
| 675 | { | 674 | { |
| 676 | struct device *dev = data; | 675 | struct device *dev = data; |
| 677 | int count; | 676 | int count; |
| 678 | 677 | ||
| 678 | if (global_disable) | ||
| 679 | return 0; | ||
| 679 | 680 | ||
| 680 | switch (action) { | 681 | switch (action) { |
| 681 | case BUS_NOTIFY_UNBOUND_DRIVER: | 682 | case BUS_NOTIFY_UNBOUND_DRIVER: |
| @@ -697,6 +698,9 @@ void dma_debug_add_bus(struct bus_type *bus) | |||
| 697 | { | 698 | { |
| 698 | struct notifier_block *nb; | 699 | struct notifier_block *nb; |
| 699 | 700 | ||
| 701 | if (global_disable) | ||
| 702 | return; | ||
| 703 | |||
| 700 | nb = kzalloc(sizeof(struct notifier_block), GFP_KERNEL); | 704 | nb = kzalloc(sizeof(struct notifier_block), GFP_KERNEL); |
| 701 | if (nb == NULL) { | 705 | if (nb == NULL) { |
| 702 | pr_err("dma_debug_add_bus: out of memory\n"); | 706 | pr_err("dma_debug_add_bus: out of memory\n"); |
| @@ -909,6 +913,9 @@ static void check_sync(struct device *dev, | |||
| 909 | ref->size); | 913 | ref->size); |
| 910 | } | 914 | } |
| 911 | 915 | ||
| 916 | if (entry->direction == DMA_BIDIRECTIONAL) | ||
| 917 | goto out; | ||
| 918 | |||
| 912 | if (ref->direction != entry->direction) { | 919 | if (ref->direction != entry->direction) { |
| 913 | err_printk(dev, entry, "DMA-API: device driver syncs " | 920 | err_printk(dev, entry, "DMA-API: device driver syncs " |
| 914 | "DMA memory with different direction " | 921 | "DMA memory with different direction " |
| @@ -919,9 +926,6 @@ static void check_sync(struct device *dev, | |||
| 919 | dir2name[ref->direction]); | 926 | dir2name[ref->direction]); |
| 920 | } | 927 | } |
| 921 | 928 | ||
| 922 | if (entry->direction == DMA_BIDIRECTIONAL) | ||
| 923 | goto out; | ||
| 924 | |||
| 925 | if (to_cpu && !(entry->direction == DMA_FROM_DEVICE) && | 929 | if (to_cpu && !(entry->direction == DMA_FROM_DEVICE) && |
| 926 | !(ref->direction == DMA_TO_DEVICE)) | 930 | !(ref->direction == DMA_TO_DEVICE)) |
| 927 | err_printk(dev, entry, "DMA-API: device driver syncs " | 931 | err_printk(dev, entry, "DMA-API: device driver syncs " |
| @@ -944,7 +948,6 @@ static void check_sync(struct device *dev, | |||
| 944 | 948 | ||
| 945 | out: | 949 | out: |
| 946 | put_hash_bucket(bucket, &flags); | 950 | put_hash_bucket(bucket, &flags); |
| 947 | |||
| 948 | } | 951 | } |
| 949 | 952 | ||
| 950 | void debug_dma_map_page(struct device *dev, struct page *page, size_t offset, | 953 | void debug_dma_map_page(struct device *dev, struct page *page, size_t offset, |
diff --git a/lib/dynamic_debug.c b/lib/dynamic_debug.c index e22c148e4b7f..f93502915988 100644 --- a/lib/dynamic_debug.c +++ b/lib/dynamic_debug.c | |||
| @@ -21,6 +21,7 @@ | |||
| 21 | #include <linux/list.h> | 21 | #include <linux/list.h> |
| 22 | #include <linux/sysctl.h> | 22 | #include <linux/sysctl.h> |
| 23 | #include <linux/ctype.h> | 23 | #include <linux/ctype.h> |
| 24 | #include <linux/string.h> | ||
| 24 | #include <linux/uaccess.h> | 25 | #include <linux/uaccess.h> |
| 25 | #include <linux/dynamic_debug.h> | 26 | #include <linux/dynamic_debug.h> |
| 26 | #include <linux/debugfs.h> | 27 | #include <linux/debugfs.h> |
| @@ -209,8 +210,7 @@ static int ddebug_tokenize(char *buf, char *words[], int maxwords) | |||
| 209 | char *end; | 210 | char *end; |
| 210 | 211 | ||
| 211 | /* Skip leading whitespace */ | 212 | /* Skip leading whitespace */ |
| 212 | while (*buf && isspace(*buf)) | 213 | buf = skip_spaces(buf); |
| 213 | buf++; | ||
| 214 | if (!*buf) | 214 | if (!*buf) |
| 215 | break; /* oh, it was trailing whitespace */ | 215 | break; /* oh, it was trailing whitespace */ |
| 216 | 216 | ||
diff --git a/lib/genalloc.c b/lib/genalloc.c index eed2bdb865e7..e67f97495dd5 100644 --- a/lib/genalloc.c +++ b/lib/genalloc.c | |||
| @@ -11,6 +11,7 @@ | |||
| 11 | */ | 11 | */ |
| 12 | 12 | ||
| 13 | #include <linux/module.h> | 13 | #include <linux/module.h> |
| 14 | #include <linux/bitmap.h> | ||
| 14 | #include <linux/genalloc.h> | 15 | #include <linux/genalloc.h> |
| 15 | 16 | ||
| 16 | 17 | ||
| @@ -114,7 +115,7 @@ unsigned long gen_pool_alloc(struct gen_pool *pool, size_t size) | |||
| 114 | struct gen_pool_chunk *chunk; | 115 | struct gen_pool_chunk *chunk; |
| 115 | unsigned long addr, flags; | 116 | unsigned long addr, flags; |
| 116 | int order = pool->min_alloc_order; | 117 | int order = pool->min_alloc_order; |
| 117 | int nbits, bit, start_bit, end_bit; | 118 | int nbits, start_bit, end_bit; |
| 118 | 119 | ||
| 119 | if (size == 0) | 120 | if (size == 0) |
| 120 | return 0; | 121 | return 0; |
| @@ -129,29 +130,19 @@ unsigned long gen_pool_alloc(struct gen_pool *pool, size_t size) | |||
| 129 | end_bit -= nbits + 1; | 130 | end_bit -= nbits + 1; |
| 130 | 131 | ||
| 131 | spin_lock_irqsave(&chunk->lock, flags); | 132 | spin_lock_irqsave(&chunk->lock, flags); |
| 132 | bit = -1; | 133 | start_bit = bitmap_find_next_zero_area(chunk->bits, end_bit, 0, |
| 133 | while (bit + 1 < end_bit) { | 134 | nbits, 0); |
| 134 | bit = find_next_zero_bit(chunk->bits, end_bit, bit + 1); | 135 | if (start_bit >= end_bit) { |
| 135 | if (bit >= end_bit) | ||
| 136 | break; | ||
| 137 | |||
| 138 | start_bit = bit; | ||
| 139 | if (nbits > 1) { | ||
| 140 | bit = find_next_bit(chunk->bits, bit + nbits, | ||
| 141 | bit + 1); | ||
| 142 | if (bit - start_bit < nbits) | ||
| 143 | continue; | ||
| 144 | } | ||
| 145 | |||
| 146 | addr = chunk->start_addr + | ||
| 147 | ((unsigned long)start_bit << order); | ||
| 148 | while (nbits--) | ||
| 149 | __set_bit(start_bit++, chunk->bits); | ||
| 150 | spin_unlock_irqrestore(&chunk->lock, flags); | 136 | spin_unlock_irqrestore(&chunk->lock, flags); |
| 151 | read_unlock(&pool->lock); | 137 | continue; |
| 152 | return addr; | ||
| 153 | } | 138 | } |
| 139 | |||
| 140 | addr = chunk->start_addr + ((unsigned long)start_bit << order); | ||
| 141 | |||
| 142 | bitmap_set(chunk->bits, start_bit, nbits); | ||
| 154 | spin_unlock_irqrestore(&chunk->lock, flags); | 143 | spin_unlock_irqrestore(&chunk->lock, flags); |
| 144 | read_unlock(&pool->lock); | ||
| 145 | return addr; | ||
| 155 | } | 146 | } |
| 156 | read_unlock(&pool->lock); | 147 | read_unlock(&pool->lock); |
| 157 | return 0; | 148 | return 0; |
diff --git a/lib/hweight.c b/lib/hweight.c index 389424ecb129..63ee4eb1228d 100644 --- a/lib/hweight.c +++ b/lib/hweight.c | |||
| @@ -11,11 +11,18 @@ | |||
| 11 | 11 | ||
| 12 | unsigned int hweight32(unsigned int w) | 12 | unsigned int hweight32(unsigned int w) |
| 13 | { | 13 | { |
| 14 | #ifdef ARCH_HAS_FAST_MULTIPLIER | ||
| 15 | w -= (w >> 1) & 0x55555555; | ||
| 16 | w = (w & 0x33333333) + ((w >> 2) & 0x33333333); | ||
| 17 | w = (w + (w >> 4)) & 0x0f0f0f0f; | ||
| 18 | return (w * 0x01010101) >> 24; | ||
| 19 | #else | ||
| 14 | unsigned int res = w - ((w >> 1) & 0x55555555); | 20 | unsigned int res = w - ((w >> 1) & 0x55555555); |
| 15 | res = (res & 0x33333333) + ((res >> 2) & 0x33333333); | 21 | res = (res & 0x33333333) + ((res >> 2) & 0x33333333); |
| 16 | res = (res + (res >> 4)) & 0x0F0F0F0F; | 22 | res = (res + (res >> 4)) & 0x0F0F0F0F; |
| 17 | res = res + (res >> 8); | 23 | res = res + (res >> 8); |
| 18 | return (res + (res >> 16)) & 0x000000FF; | 24 | return (res + (res >> 16)) & 0x000000FF; |
| 25 | #endif | ||
| 19 | } | 26 | } |
| 20 | EXPORT_SYMBOL(hweight32); | 27 | EXPORT_SYMBOL(hweight32); |
| 21 | 28 | ||
| @@ -156,10 +156,12 @@ static int sub_alloc(struct idr *idp, int *starting_id, struct idr_layer **pa) | |||
| 156 | id = (id | ((1 << (IDR_BITS * l)) - 1)) + 1; | 156 | id = (id | ((1 << (IDR_BITS * l)) - 1)) + 1; |
| 157 | 157 | ||
| 158 | /* if already at the top layer, we need to grow */ | 158 | /* if already at the top layer, we need to grow */ |
| 159 | if (!(p = pa[l])) { | 159 | if (id >= 1 << (idp->layers * IDR_BITS)) { |
| 160 | *starting_id = id; | 160 | *starting_id = id; |
| 161 | return IDR_NEED_TO_GROW; | 161 | return IDR_NEED_TO_GROW; |
| 162 | } | 162 | } |
| 163 | p = pa[l]; | ||
| 164 | BUG_ON(!p); | ||
| 163 | 165 | ||
| 164 | /* If we need to go up one layer, continue the | 166 | /* If we need to go up one layer, continue the |
| 165 | * loop; otherwise, restart from the top. | 167 | * loop; otherwise, restart from the top. |
| @@ -281,7 +283,7 @@ static int idr_get_new_above_int(struct idr *idp, void *ptr, int starting_id) | |||
| 281 | /** | 283 | /** |
| 282 | * idr_get_new_above - allocate new idr entry above or equal to a start id | 284 | * idr_get_new_above - allocate new idr entry above or equal to a start id |
| 283 | * @idp: idr handle | 285 | * @idp: idr handle |
| 284 | * @ptr: pointer you want associated with the ide | 286 | * @ptr: pointer you want associated with the id |
| 285 | * @start_id: id to start search at | 287 | * @start_id: id to start search at |
| 286 | * @id: pointer to the allocated handle | 288 | * @id: pointer to the allocated handle |
| 287 | * | 289 | * |
| @@ -313,7 +315,7 @@ EXPORT_SYMBOL(idr_get_new_above); | |||
| 313 | /** | 315 | /** |
| 314 | * idr_get_new - allocate new idr entry | 316 | * idr_get_new - allocate new idr entry |
| 315 | * @idp: idr handle | 317 | * @idp: idr handle |
| 316 | * @ptr: pointer you want associated with the ide | 318 | * @ptr: pointer you want associated with the id |
| 317 | * @id: pointer to the allocated handle | 319 | * @id: pointer to the allocated handle |
| 318 | * | 320 | * |
| 319 | * This is the allocate id function. It should be called with any | 321 | * This is the allocate id function. It should be called with any |
| @@ -502,7 +504,7 @@ void *idr_find(struct idr *idp, int id) | |||
| 502 | int n; | 504 | int n; |
| 503 | struct idr_layer *p; | 505 | struct idr_layer *p; |
| 504 | 506 | ||
| 505 | p = rcu_dereference(idp->top); | 507 | p = rcu_dereference_raw(idp->top); |
| 506 | if (!p) | 508 | if (!p) |
| 507 | return NULL; | 509 | return NULL; |
| 508 | n = (p->layer+1) * IDR_BITS; | 510 | n = (p->layer+1) * IDR_BITS; |
| @@ -517,7 +519,7 @@ void *idr_find(struct idr *idp, int id) | |||
| 517 | while (n > 0 && p) { | 519 | while (n > 0 && p) { |
| 518 | n -= IDR_BITS; | 520 | n -= IDR_BITS; |
| 519 | BUG_ON(n != p->layer*IDR_BITS); | 521 | BUG_ON(n != p->layer*IDR_BITS); |
| 520 | p = rcu_dereference(p->ary[(id >> n) & IDR_MASK]); | 522 | p = rcu_dereference_raw(p->ary[(id >> n) & IDR_MASK]); |
| 521 | } | 523 | } |
| 522 | return((void *)p); | 524 | return((void *)p); |
| 523 | } | 525 | } |
| @@ -550,7 +552,7 @@ int idr_for_each(struct idr *idp, | |||
| 550 | struct idr_layer **paa = &pa[0]; | 552 | struct idr_layer **paa = &pa[0]; |
| 551 | 553 | ||
| 552 | n = idp->layers * IDR_BITS; | 554 | n = idp->layers * IDR_BITS; |
| 553 | p = rcu_dereference(idp->top); | 555 | p = rcu_dereference_raw(idp->top); |
| 554 | max = 1 << n; | 556 | max = 1 << n; |
| 555 | 557 | ||
| 556 | id = 0; | 558 | id = 0; |
| @@ -558,7 +560,7 @@ int idr_for_each(struct idr *idp, | |||
| 558 | while (n > 0 && p) { | 560 | while (n > 0 && p) { |
| 559 | n -= IDR_BITS; | 561 | n -= IDR_BITS; |
| 560 | *paa++ = p; | 562 | *paa++ = p; |
| 561 | p = rcu_dereference(p->ary[(id >> n) & IDR_MASK]); | 563 | p = rcu_dereference_raw(p->ary[(id >> n) & IDR_MASK]); |
| 562 | } | 564 | } |
| 563 | 565 | ||
| 564 | if (p) { | 566 | if (p) { |
diff --git a/lib/iommu-helper.c b/lib/iommu-helper.c index 75dbda03f4fb..c0251f4ad08b 100644 --- a/lib/iommu-helper.c +++ b/lib/iommu-helper.c | |||
| @@ -3,41 +3,7 @@ | |||
| 3 | */ | 3 | */ |
| 4 | 4 | ||
| 5 | #include <linux/module.h> | 5 | #include <linux/module.h> |
| 6 | #include <linux/bitops.h> | 6 | #include <linux/bitmap.h> |
| 7 | |||
| 8 | static unsigned long find_next_zero_area(unsigned long *map, | ||
| 9 | unsigned long size, | ||
| 10 | unsigned long start, | ||
| 11 | unsigned int nr, | ||
| 12 | unsigned long align_mask) | ||
| 13 | { | ||
| 14 | unsigned long index, end, i; | ||
| 15 | again: | ||
| 16 | index = find_next_zero_bit(map, size, start); | ||
| 17 | |||
| 18 | /* Align allocation */ | ||
| 19 | index = (index + align_mask) & ~align_mask; | ||
| 20 | |||
| 21 | end = index + nr; | ||
| 22 | if (end >= size) | ||
| 23 | return -1; | ||
| 24 | for (i = index; i < end; i++) { | ||
| 25 | if (test_bit(i, map)) { | ||
| 26 | start = i+1; | ||
| 27 | goto again; | ||
| 28 | } | ||
| 29 | } | ||
| 30 | return index; | ||
| 31 | } | ||
| 32 | |||
| 33 | void iommu_area_reserve(unsigned long *map, unsigned long i, int len) | ||
| 34 | { | ||
| 35 | unsigned long end = i + len; | ||
| 36 | while (i < end) { | ||
| 37 | __set_bit(i, map); | ||
| 38 | i++; | ||
| 39 | } | ||
| 40 | } | ||
| 41 | 7 | ||
| 42 | int iommu_is_span_boundary(unsigned int index, unsigned int nr, | 8 | int iommu_is_span_boundary(unsigned int index, unsigned int nr, |
| 43 | unsigned long shift, | 9 | unsigned long shift, |
| @@ -55,31 +21,24 @@ unsigned long iommu_area_alloc(unsigned long *map, unsigned long size, | |||
| 55 | unsigned long align_mask) | 21 | unsigned long align_mask) |
| 56 | { | 22 | { |
| 57 | unsigned long index; | 23 | unsigned long index; |
| 24 | |||
| 25 | /* We don't want the last of the limit */ | ||
| 26 | size -= 1; | ||
| 58 | again: | 27 | again: |
| 59 | index = find_next_zero_area(map, size, start, nr, align_mask); | 28 | index = bitmap_find_next_zero_area(map, size, start, nr, align_mask); |
| 60 | if (index != -1) { | 29 | if (index < size) { |
| 61 | if (iommu_is_span_boundary(index, nr, shift, boundary_size)) { | 30 | if (iommu_is_span_boundary(index, nr, shift, boundary_size)) { |
| 62 | /* we could do more effectively */ | 31 | /* we could do more effectively */ |
| 63 | start = index + 1; | 32 | start = index + 1; |
| 64 | goto again; | 33 | goto again; |
| 65 | } | 34 | } |
| 66 | iommu_area_reserve(map, index, nr); | 35 | bitmap_set(map, index, nr); |
| 36 | return index; | ||
| 67 | } | 37 | } |
| 68 | return index; | 38 | return -1; |
| 69 | } | 39 | } |
| 70 | EXPORT_SYMBOL(iommu_area_alloc); | 40 | EXPORT_SYMBOL(iommu_area_alloc); |
| 71 | 41 | ||
| 72 | void iommu_area_free(unsigned long *map, unsigned long start, unsigned int nr) | ||
| 73 | { | ||
| 74 | unsigned long end = start + nr; | ||
| 75 | |||
| 76 | while (start < end) { | ||
| 77 | __clear_bit(start, map); | ||
| 78 | start++; | ||
| 79 | } | ||
| 80 | } | ||
| 81 | EXPORT_SYMBOL(iommu_area_free); | ||
| 82 | |||
| 83 | unsigned long iommu_num_pages(unsigned long addr, unsigned long len, | 42 | unsigned long iommu_num_pages(unsigned long addr, unsigned long len, |
| 84 | unsigned long io_page_size) | 43 | unsigned long io_page_size) |
| 85 | { | 44 | { |
diff --git a/lib/kernel_lock.c b/lib/kernel_lock.c index 39f1029e3525..b135d04aa48a 100644 --- a/lib/kernel_lock.c +++ b/lib/kernel_lock.c | |||
| @@ -5,10 +5,13 @@ | |||
| 5 | * relegated to obsolescence, but used by various less | 5 | * relegated to obsolescence, but used by various less |
| 6 | * important (or lazy) subsystems. | 6 | * important (or lazy) subsystems. |
| 7 | */ | 7 | */ |
| 8 | #include <linux/smp_lock.h> | ||
| 9 | #include <linux/module.h> | 8 | #include <linux/module.h> |
| 10 | #include <linux/kallsyms.h> | 9 | #include <linux/kallsyms.h> |
| 11 | #include <linux/semaphore.h> | 10 | #include <linux/semaphore.h> |
| 11 | #include <linux/smp_lock.h> | ||
| 12 | |||
| 13 | #define CREATE_TRACE_POINTS | ||
| 14 | #include <trace/events/bkl.h> | ||
| 12 | 15 | ||
| 13 | /* | 16 | /* |
| 14 | * The 'big kernel lock' | 17 | * The 'big kernel lock' |
| @@ -20,7 +23,7 @@ | |||
| 20 | * | 23 | * |
| 21 | * Don't use in new code. | 24 | * Don't use in new code. |
| 22 | */ | 25 | */ |
| 23 | static __cacheline_aligned_in_smp DEFINE_SPINLOCK(kernel_flag); | 26 | static __cacheline_aligned_in_smp DEFINE_RAW_SPINLOCK(kernel_flag); |
| 24 | 27 | ||
| 25 | 28 | ||
| 26 | /* | 29 | /* |
| @@ -33,12 +36,12 @@ static __cacheline_aligned_in_smp DEFINE_SPINLOCK(kernel_flag); | |||
| 33 | * If it successfully gets the lock, it should increment | 36 | * If it successfully gets the lock, it should increment |
| 34 | * the preemption count like any spinlock does. | 37 | * the preemption count like any spinlock does. |
| 35 | * | 38 | * |
| 36 | * (This works on UP too - _raw_spin_trylock will never | 39 | * (This works on UP too - do_raw_spin_trylock will never |
| 37 | * return false in that case) | 40 | * return false in that case) |
| 38 | */ | 41 | */ |
| 39 | int __lockfunc __reacquire_kernel_lock(void) | 42 | int __lockfunc __reacquire_kernel_lock(void) |
| 40 | { | 43 | { |
| 41 | while (!_raw_spin_trylock(&kernel_flag)) { | 44 | while (!do_raw_spin_trylock(&kernel_flag)) { |
| 42 | if (need_resched()) | 45 | if (need_resched()) |
| 43 | return -EAGAIN; | 46 | return -EAGAIN; |
| 44 | cpu_relax(); | 47 | cpu_relax(); |
| @@ -49,27 +52,27 @@ int __lockfunc __reacquire_kernel_lock(void) | |||
| 49 | 52 | ||
| 50 | void __lockfunc __release_kernel_lock(void) | 53 | void __lockfunc __release_kernel_lock(void) |
| 51 | { | 54 | { |
| 52 | _raw_spin_unlock(&kernel_flag); | 55 | do_raw_spin_unlock(&kernel_flag); |
| 53 | preempt_enable_no_resched(); | 56 | preempt_enable_no_resched(); |
| 54 | } | 57 | } |
| 55 | 58 | ||
| 56 | /* | 59 | /* |
| 57 | * These are the BKL spinlocks - we try to be polite about preemption. | 60 | * These are the BKL spinlocks - we try to be polite about preemption. |
| 58 | * If SMP is not on (ie UP preemption), this all goes away because the | 61 | * If SMP is not on (ie UP preemption), this all goes away because the |
| 59 | * _raw_spin_trylock() will always succeed. | 62 | * do_raw_spin_trylock() will always succeed. |
| 60 | */ | 63 | */ |
| 61 | #ifdef CONFIG_PREEMPT | 64 | #ifdef CONFIG_PREEMPT |
| 62 | static inline void __lock_kernel(void) | 65 | static inline void __lock_kernel(void) |
| 63 | { | 66 | { |
| 64 | preempt_disable(); | 67 | preempt_disable(); |
| 65 | if (unlikely(!_raw_spin_trylock(&kernel_flag))) { | 68 | if (unlikely(!do_raw_spin_trylock(&kernel_flag))) { |
| 66 | /* | 69 | /* |
| 67 | * If preemption was disabled even before this | 70 | * If preemption was disabled even before this |
| 68 | * was called, there's nothing we can be polite | 71 | * was called, there's nothing we can be polite |
| 69 | * about - just spin. | 72 | * about - just spin. |
| 70 | */ | 73 | */ |
| 71 | if (preempt_count() > 1) { | 74 | if (preempt_count() > 1) { |
| 72 | _raw_spin_lock(&kernel_flag); | 75 | do_raw_spin_lock(&kernel_flag); |
| 73 | return; | 76 | return; |
| 74 | } | 77 | } |
| 75 | 78 | ||
| @@ -79,10 +82,10 @@ static inline void __lock_kernel(void) | |||
| 79 | */ | 82 | */ |
| 80 | do { | 83 | do { |
| 81 | preempt_enable(); | 84 | preempt_enable(); |
| 82 | while (spin_is_locked(&kernel_flag)) | 85 | while (raw_spin_is_locked(&kernel_flag)) |
| 83 | cpu_relax(); | 86 | cpu_relax(); |
| 84 | preempt_disable(); | 87 | preempt_disable(); |
| 85 | } while (!_raw_spin_trylock(&kernel_flag)); | 88 | } while (!do_raw_spin_trylock(&kernel_flag)); |
| 86 | } | 89 | } |
| 87 | } | 90 | } |
| 88 | 91 | ||
| @@ -93,7 +96,7 @@ static inline void __lock_kernel(void) | |||
| 93 | */ | 96 | */ |
| 94 | static inline void __lock_kernel(void) | 97 | static inline void __lock_kernel(void) |
| 95 | { | 98 | { |
| 96 | _raw_spin_lock(&kernel_flag); | 99 | do_raw_spin_lock(&kernel_flag); |
| 97 | } | 100 | } |
| 98 | #endif | 101 | #endif |
| 99 | 102 | ||
| @@ -103,7 +106,7 @@ static inline void __unlock_kernel(void) | |||
| 103 | * the BKL is not covered by lockdep, so we open-code the | 106 | * the BKL is not covered by lockdep, so we open-code the |
| 104 | * unlocking sequence (and thus avoid the dep-chain ops): | 107 | * unlocking sequence (and thus avoid the dep-chain ops): |
| 105 | */ | 108 | */ |
| 106 | _raw_spin_unlock(&kernel_flag); | 109 | do_raw_spin_unlock(&kernel_flag); |
| 107 | preempt_enable(); | 110 | preempt_enable(); |
| 108 | } | 111 | } |
| 109 | 112 | ||
| @@ -113,21 +116,28 @@ static inline void __unlock_kernel(void) | |||
| 113 | * This cannot happen asynchronously, so we only need to | 116 | * This cannot happen asynchronously, so we only need to |
| 114 | * worry about other CPU's. | 117 | * worry about other CPU's. |
| 115 | */ | 118 | */ |
| 116 | void __lockfunc lock_kernel(void) | 119 | void __lockfunc _lock_kernel(const char *func, const char *file, int line) |
| 117 | { | 120 | { |
| 118 | int depth = current->lock_depth+1; | 121 | int depth = current->lock_depth + 1; |
| 119 | if (likely(!depth)) | 122 | |
| 123 | trace_lock_kernel(func, file, line); | ||
| 124 | |||
| 125 | if (likely(!depth)) { | ||
| 126 | might_sleep(); | ||
| 120 | __lock_kernel(); | 127 | __lock_kernel(); |
| 128 | } | ||
| 121 | current->lock_depth = depth; | 129 | current->lock_depth = depth; |
| 122 | } | 130 | } |
| 123 | 131 | ||
| 124 | void __lockfunc unlock_kernel(void) | 132 | void __lockfunc _unlock_kernel(const char *func, const char *file, int line) |
| 125 | { | 133 | { |
| 126 | BUG_ON(current->lock_depth < 0); | 134 | BUG_ON(current->lock_depth < 0); |
| 127 | if (likely(--current->lock_depth < 0)) | 135 | if (likely(--current->lock_depth < 0)) |
| 128 | __unlock_kernel(); | 136 | __unlock_kernel(); |
| 137 | |||
| 138 | trace_unlock_kernel(func, file, line); | ||
| 129 | } | 139 | } |
| 130 | 140 | ||
| 131 | EXPORT_SYMBOL(lock_kernel); | 141 | EXPORT_SYMBOL(_lock_kernel); |
| 132 | EXPORT_SYMBOL(unlock_kernel); | 142 | EXPORT_SYMBOL(_unlock_kernel); |
| 133 | 143 | ||
diff --git a/lib/list_sort.c b/lib/list_sort.c new file mode 100644 index 000000000000..4b5cb794c38b --- /dev/null +++ b/lib/list_sort.c | |||
| @@ -0,0 +1,217 @@ | |||
| 1 | #include <linux/kernel.h> | ||
| 2 | #include <linux/module.h> | ||
| 3 | #include <linux/list_sort.h> | ||
| 4 | #include <linux/slab.h> | ||
| 5 | #include <linux/list.h> | ||
| 6 | |||
| 7 | #define MAX_LIST_LENGTH_BITS 20 | ||
| 8 | |||
| 9 | /* | ||
| 10 | * Returns a list organized in an intermediate format suited | ||
| 11 | * to chaining of merge() calls: null-terminated, no reserved or | ||
| 12 | * sentinel head node, "prev" links not maintained. | ||
| 13 | */ | ||
| 14 | static struct list_head *merge(void *priv, | ||
| 15 | int (*cmp)(void *priv, struct list_head *a, | ||
| 16 | struct list_head *b), | ||
| 17 | struct list_head *a, struct list_head *b) | ||
| 18 | { | ||
| 19 | struct list_head head, *tail = &head; | ||
| 20 | |||
| 21 | while (a && b) { | ||
| 22 | /* if equal, take 'a' -- important for sort stability */ | ||
| 23 | if ((*cmp)(priv, a, b) <= 0) { | ||
| 24 | tail->next = a; | ||
| 25 | a = a->next; | ||
| 26 | } else { | ||
| 27 | tail->next = b; | ||
| 28 | b = b->next; | ||
| 29 | } | ||
| 30 | tail = tail->next; | ||
| 31 | } | ||
| 32 | tail->next = a?:b; | ||
| 33 | return head.next; | ||
| 34 | } | ||
| 35 | |||
| 36 | /* | ||
| 37 | * Combine final list merge with restoration of standard doubly-linked | ||
| 38 | * list structure. This approach duplicates code from merge(), but | ||
| 39 | * runs faster than the tidier alternatives of either a separate final | ||
| 40 | * prev-link restoration pass, or maintaining the prev links | ||
| 41 | * throughout. | ||
| 42 | */ | ||
| 43 | static void merge_and_restore_back_links(void *priv, | ||
| 44 | int (*cmp)(void *priv, struct list_head *a, | ||
| 45 | struct list_head *b), | ||
| 46 | struct list_head *head, | ||
| 47 | struct list_head *a, struct list_head *b) | ||
| 48 | { | ||
| 49 | struct list_head *tail = head; | ||
| 50 | |||
| 51 | while (a && b) { | ||
| 52 | /* if equal, take 'a' -- important for sort stability */ | ||
| 53 | if ((*cmp)(priv, a, b) <= 0) { | ||
| 54 | tail->next = a; | ||
| 55 | a->prev = tail; | ||
| 56 | a = a->next; | ||
| 57 | } else { | ||
| 58 | tail->next = b; | ||
| 59 | b->prev = tail; | ||
| 60 | b = b->next; | ||
| 61 | } | ||
| 62 | tail = tail->next; | ||
| 63 | } | ||
| 64 | tail->next = a ? : b; | ||
| 65 | |||
| 66 | do { | ||
| 67 | /* | ||
| 68 | * In worst cases this loop may run many iterations. | ||
| 69 | * Continue callbacks to the client even though no | ||
| 70 | * element comparison is needed, so the client's cmp() | ||
| 71 | * routine can invoke cond_resched() periodically. | ||
| 72 | */ | ||
| 73 | (*cmp)(priv, tail, tail); | ||
| 74 | |||
| 75 | tail->next->prev = tail; | ||
| 76 | tail = tail->next; | ||
| 77 | } while (tail->next); | ||
| 78 | |||
| 79 | tail->next = head; | ||
| 80 | head->prev = tail; | ||
| 81 | } | ||
| 82 | |||
| 83 | /** | ||
| 84 | * list_sort - sort a list | ||
| 85 | * @priv: private data, opaque to list_sort(), passed to @cmp | ||
| 86 | * @head: the list to sort | ||
| 87 | * @cmp: the elements comparison function | ||
| 88 | * | ||
| 89 | * This function implements "merge sort", which has O(nlog(n)) | ||
| 90 | * complexity. | ||
| 91 | * | ||
| 92 | * The comparison function @cmp must return a negative value if @a | ||
| 93 | * should sort before @b, and a positive value if @a should sort after | ||
| 94 | * @b. If @a and @b are equivalent, and their original relative | ||
| 95 | * ordering is to be preserved, @cmp must return 0. | ||
| 96 | */ | ||
| 97 | void list_sort(void *priv, struct list_head *head, | ||
| 98 | int (*cmp)(void *priv, struct list_head *a, | ||
| 99 | struct list_head *b)) | ||
| 100 | { | ||
| 101 | struct list_head *part[MAX_LIST_LENGTH_BITS+1]; /* sorted partial lists | ||
| 102 | -- last slot is a sentinel */ | ||
| 103 | int lev; /* index into part[] */ | ||
| 104 | int max_lev = 0; | ||
| 105 | struct list_head *list; | ||
| 106 | |||
| 107 | if (list_empty(head)) | ||
| 108 | return; | ||
| 109 | |||
| 110 | memset(part, 0, sizeof(part)); | ||
| 111 | |||
| 112 | head->prev->next = NULL; | ||
| 113 | list = head->next; | ||
| 114 | |||
| 115 | while (list) { | ||
| 116 | struct list_head *cur = list; | ||
| 117 | list = list->next; | ||
| 118 | cur->next = NULL; | ||
| 119 | |||
| 120 | for (lev = 0; part[lev]; lev++) { | ||
| 121 | cur = merge(priv, cmp, part[lev], cur); | ||
| 122 | part[lev] = NULL; | ||
| 123 | } | ||
| 124 | if (lev > max_lev) { | ||
| 125 | if (unlikely(lev >= ARRAY_SIZE(part)-1)) { | ||
| 126 | printk_once(KERN_DEBUG "list passed to" | ||
| 127 | " list_sort() too long for" | ||
| 128 | " efficiency\n"); | ||
| 129 | lev--; | ||
| 130 | } | ||
| 131 | max_lev = lev; | ||
| 132 | } | ||
| 133 | part[lev] = cur; | ||
| 134 | } | ||
| 135 | |||
| 136 | for (lev = 0; lev < max_lev; lev++) | ||
| 137 | if (part[lev]) | ||
| 138 | list = merge(priv, cmp, part[lev], list); | ||
| 139 | |||
| 140 | merge_and_restore_back_links(priv, cmp, head, part[max_lev], list); | ||
| 141 | } | ||
| 142 | EXPORT_SYMBOL(list_sort); | ||
| 143 | |||
| 144 | #ifdef DEBUG_LIST_SORT | ||
| 145 | struct debug_el { | ||
| 146 | struct list_head l_h; | ||
| 147 | int value; | ||
| 148 | unsigned serial; | ||
| 149 | }; | ||
| 150 | |||
| 151 | static int cmp(void *priv, struct list_head *a, struct list_head *b) | ||
| 152 | { | ||
| 153 | return container_of(a, struct debug_el, l_h)->value | ||
| 154 | - container_of(b, struct debug_el, l_h)->value; | ||
| 155 | } | ||
| 156 | |||
| 157 | /* | ||
| 158 | * The pattern of set bits in the list length determines which cases | ||
| 159 | * are hit in list_sort(). | ||
| 160 | */ | ||
| 161 | #define LIST_SORT_TEST_LENGTH (512+128+2) /* not including head */ | ||
| 162 | |||
| 163 | static int __init list_sort_test(void) | ||
| 164 | { | ||
| 165 | int i, r = 1, count; | ||
| 166 | struct list_head *head = kmalloc(sizeof(*head), GFP_KERNEL); | ||
| 167 | struct list_head *cur; | ||
| 168 | |||
| 169 | printk(KERN_WARNING "testing list_sort()\n"); | ||
| 170 | |||
| 171 | cur = head; | ||
| 172 | for (i = 0; i < LIST_SORT_TEST_LENGTH; i++) { | ||
| 173 | struct debug_el *el = kmalloc(sizeof(*el), GFP_KERNEL); | ||
| 174 | BUG_ON(!el); | ||
| 175 | /* force some equivalencies */ | ||
| 176 | el->value = (r = (r * 725861) % 6599) % (LIST_SORT_TEST_LENGTH/3); | ||
| 177 | el->serial = i; | ||
| 178 | |||
| 179 | el->l_h.prev = cur; | ||
| 180 | cur->next = &el->l_h; | ||
| 181 | cur = cur->next; | ||
| 182 | } | ||
| 183 | head->prev = cur; | ||
| 184 | |||
| 185 | list_sort(NULL, head, cmp); | ||
| 186 | |||
| 187 | count = 1; | ||
| 188 | for (cur = head->next; cur->next != head; cur = cur->next) { | ||
| 189 | struct debug_el *el = container_of(cur, struct debug_el, l_h); | ||
| 190 | int cmp_result = cmp(NULL, cur, cur->next); | ||
| 191 | if (cur->next->prev != cur) { | ||
| 192 | printk(KERN_EMERG "list_sort() returned " | ||
| 193 | "a corrupted list!\n"); | ||
| 194 | return 1; | ||
| 195 | } else if (cmp_result > 0) { | ||
| 196 | printk(KERN_EMERG "list_sort() failed to sort!\n"); | ||
| 197 | return 1; | ||
| 198 | } else if (cmp_result == 0 && | ||
| 199 | el->serial >= container_of(cur->next, | ||
| 200 | struct debug_el, l_h)->serial) { | ||
| 201 | printk(KERN_EMERG "list_sort() failed to preserve order" | ||
| 202 | " of equivalent elements!\n"); | ||
| 203 | return 1; | ||
| 204 | } | ||
| 205 | kfree(cur->prev); | ||
| 206 | count++; | ||
| 207 | } | ||
| 208 | kfree(cur); | ||
| 209 | if (count != LIST_SORT_TEST_LENGTH) { | ||
| 210 | printk(KERN_EMERG "list_sort() returned list of" | ||
| 211 | "different length!\n"); | ||
| 212 | return 1; | ||
| 213 | } | ||
| 214 | return 0; | ||
| 215 | } | ||
| 216 | module_init(list_sort_test); | ||
| 217 | #endif | ||
| @@ -205,9 +205,8 @@ long lmb_add(u64 base, u64 size) | |||
| 205 | 205 | ||
| 206 | } | 206 | } |
| 207 | 207 | ||
| 208 | long lmb_remove(u64 base, u64 size) | 208 | static long __lmb_remove(struct lmb_region *rgn, u64 base, u64 size) |
| 209 | { | 209 | { |
| 210 | struct lmb_region *rgn = &(lmb.memory); | ||
| 211 | u64 rgnbegin, rgnend; | 210 | u64 rgnbegin, rgnend; |
| 212 | u64 end = base + size; | 211 | u64 end = base + size; |
| 213 | int i; | 212 | int i; |
| @@ -254,6 +253,16 @@ long lmb_remove(u64 base, u64 size) | |||
| 254 | return lmb_add_region(rgn, end, rgnend - end); | 253 | return lmb_add_region(rgn, end, rgnend - end); |
| 255 | } | 254 | } |
| 256 | 255 | ||
| 256 | long lmb_remove(u64 base, u64 size) | ||
| 257 | { | ||
| 258 | return __lmb_remove(&lmb.memory, base, size); | ||
| 259 | } | ||
| 260 | |||
| 261 | long __init lmb_free(u64 base, u64 size) | ||
| 262 | { | ||
| 263 | return __lmb_remove(&lmb.reserved, base, size); | ||
| 264 | } | ||
| 265 | |||
| 257 | long __init lmb_reserve(u64 base, u64 size) | 266 | long __init lmb_reserve(u64 base, u64 size) |
| 258 | { | 267 | { |
| 259 | struct lmb_region *_rgn = &lmb.reserved; | 268 | struct lmb_region *_rgn = &lmb.reserved; |
| @@ -263,7 +272,7 @@ long __init lmb_reserve(u64 base, u64 size) | |||
| 263 | return lmb_add_region(_rgn, base, size); | 272 | return lmb_add_region(_rgn, base, size); |
| 264 | } | 273 | } |
| 265 | 274 | ||
| 266 | long __init lmb_overlaps_region(struct lmb_region *rgn, u64 base, u64 size) | 275 | long lmb_overlaps_region(struct lmb_region *rgn, u64 base, u64 size) |
| 267 | { | 276 | { |
| 268 | unsigned long i; | 277 | unsigned long i; |
| 269 | 278 | ||
| @@ -493,6 +502,11 @@ int __init lmb_is_reserved(u64 addr) | |||
| 493 | return 0; | 502 | return 0; |
| 494 | } | 503 | } |
| 495 | 504 | ||
| 505 | int lmb_is_region_reserved(u64 base, u64 size) | ||
| 506 | { | ||
| 507 | return lmb_overlaps_region(&lmb.reserved, base, size); | ||
| 508 | } | ||
| 509 | |||
| 496 | /* | 510 | /* |
| 497 | * Given a <base, len>, find which memory regions belong to this range. | 511 | * Given a <base, len>, find which memory regions belong to this range. |
| 498 | * Adjust the request and return a contiguous chunk. | 512 | * Adjust the request and return a contiguous chunk. |
diff --git a/lib/lru_cache.c b/lib/lru_cache.c new file mode 100644 index 000000000000..270de9d31b8c --- /dev/null +++ b/lib/lru_cache.c | |||
| @@ -0,0 +1,560 @@ | |||
| 1 | /* | ||
| 2 | lru_cache.c | ||
| 3 | |||
| 4 | This file is part of DRBD by Philipp Reisner and Lars Ellenberg. | ||
| 5 | |||
| 6 | Copyright (C) 2003-2008, LINBIT Information Technologies GmbH. | ||
| 7 | Copyright (C) 2003-2008, Philipp Reisner <philipp.reisner@linbit.com>. | ||
| 8 | Copyright (C) 2003-2008, Lars Ellenberg <lars.ellenberg@linbit.com>. | ||
| 9 | |||
| 10 | drbd is free software; you can redistribute it and/or modify | ||
| 11 | it under the terms of the GNU General Public License as published by | ||
| 12 | the Free Software Foundation; either version 2, or (at your option) | ||
| 13 | any later version. | ||
| 14 | |||
| 15 | drbd is distributed in the hope that it will be useful, | ||
| 16 | but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
| 17 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
| 18 | GNU General Public License for more details. | ||
| 19 | |||
| 20 | You should have received a copy of the GNU General Public License | ||
| 21 | along with drbd; see the file COPYING. If not, write to | ||
| 22 | the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. | ||
| 23 | |||
| 24 | */ | ||
| 25 | |||
| 26 | #include <linux/module.h> | ||
| 27 | #include <linux/bitops.h> | ||
| 28 | #include <linux/slab.h> | ||
| 29 | #include <linux/string.h> /* for memset */ | ||
| 30 | #include <linux/seq_file.h> /* for seq_printf */ | ||
| 31 | #include <linux/lru_cache.h> | ||
| 32 | |||
| 33 | MODULE_AUTHOR("Philipp Reisner <phil@linbit.com>, " | ||
| 34 | "Lars Ellenberg <lars@linbit.com>"); | ||
| 35 | MODULE_DESCRIPTION("lru_cache - Track sets of hot objects"); | ||
| 36 | MODULE_LICENSE("GPL"); | ||
| 37 | |||
| 38 | /* this is developers aid only. | ||
| 39 | * it catches concurrent access (lack of locking on the users part) */ | ||
| 40 | #define PARANOIA_ENTRY() do { \ | ||
| 41 | BUG_ON(!lc); \ | ||
| 42 | BUG_ON(!lc->nr_elements); \ | ||
| 43 | BUG_ON(test_and_set_bit(__LC_PARANOIA, &lc->flags)); \ | ||
| 44 | } while (0) | ||
| 45 | |||
| 46 | #define RETURN(x...) do { \ | ||
| 47 | clear_bit(__LC_PARANOIA, &lc->flags); \ | ||
| 48 | smp_mb__after_clear_bit(); return x ; } while (0) | ||
| 49 | |||
| 50 | /* BUG() if e is not one of the elements tracked by lc */ | ||
| 51 | #define PARANOIA_LC_ELEMENT(lc, e) do { \ | ||
| 52 | struct lru_cache *lc_ = (lc); \ | ||
| 53 | struct lc_element *e_ = (e); \ | ||
| 54 | unsigned i = e_->lc_index; \ | ||
| 55 | BUG_ON(i >= lc_->nr_elements); \ | ||
| 56 | BUG_ON(lc_->lc_element[i] != e_); } while (0) | ||
| 57 | |||
| 58 | /** | ||
| 59 | * lc_create - prepares to track objects in an active set | ||
| 60 | * @name: descriptive name only used in lc_seq_printf_stats and lc_seq_dump_details | ||
| 61 | * @e_count: number of elements allowed to be active simultaneously | ||
| 62 | * @e_size: size of the tracked objects | ||
| 63 | * @e_off: offset to the &struct lc_element member in a tracked object | ||
| 64 | * | ||
| 65 | * Returns a pointer to a newly initialized struct lru_cache on success, | ||
| 66 | * or NULL on (allocation) failure. | ||
| 67 | */ | ||
| 68 | struct lru_cache *lc_create(const char *name, struct kmem_cache *cache, | ||
| 69 | unsigned e_count, size_t e_size, size_t e_off) | ||
| 70 | { | ||
| 71 | struct hlist_head *slot = NULL; | ||
| 72 | struct lc_element **element = NULL; | ||
| 73 | struct lru_cache *lc; | ||
| 74 | struct lc_element *e; | ||
| 75 | unsigned cache_obj_size = kmem_cache_size(cache); | ||
| 76 | unsigned i; | ||
| 77 | |||
| 78 | WARN_ON(cache_obj_size < e_size); | ||
| 79 | if (cache_obj_size < e_size) | ||
| 80 | return NULL; | ||
| 81 | |||
| 82 | /* e_count too big; would probably fail the allocation below anyways. | ||
| 83 | * for typical use cases, e_count should be few thousand at most. */ | ||
| 84 | if (e_count > LC_MAX_ACTIVE) | ||
| 85 | return NULL; | ||
| 86 | |||
| 87 | slot = kzalloc(e_count * sizeof(struct hlist_head*), GFP_KERNEL); | ||
| 88 | if (!slot) | ||
| 89 | goto out_fail; | ||
| 90 | element = kzalloc(e_count * sizeof(struct lc_element *), GFP_KERNEL); | ||
| 91 | if (!element) | ||
| 92 | goto out_fail; | ||
| 93 | |||
| 94 | lc = kzalloc(sizeof(*lc), GFP_KERNEL); | ||
| 95 | if (!lc) | ||
| 96 | goto out_fail; | ||
| 97 | |||
| 98 | INIT_LIST_HEAD(&lc->in_use); | ||
| 99 | INIT_LIST_HEAD(&lc->lru); | ||
| 100 | INIT_LIST_HEAD(&lc->free); | ||
| 101 | |||
| 102 | lc->name = name; | ||
| 103 | lc->element_size = e_size; | ||
| 104 | lc->element_off = e_off; | ||
| 105 | lc->nr_elements = e_count; | ||
| 106 | lc->new_number = LC_FREE; | ||
| 107 | lc->lc_cache = cache; | ||
| 108 | lc->lc_element = element; | ||
| 109 | lc->lc_slot = slot; | ||
| 110 | |||
| 111 | /* preallocate all objects */ | ||
| 112 | for (i = 0; i < e_count; i++) { | ||
| 113 | void *p = kmem_cache_alloc(cache, GFP_KERNEL); | ||
| 114 | if (!p) | ||
| 115 | break; | ||
| 116 | memset(p, 0, lc->element_size); | ||
| 117 | e = p + e_off; | ||
| 118 | e->lc_index = i; | ||
| 119 | e->lc_number = LC_FREE; | ||
| 120 | list_add(&e->list, &lc->free); | ||
| 121 | element[i] = e; | ||
| 122 | } | ||
| 123 | if (i == e_count) | ||
| 124 | return lc; | ||
| 125 | |||
| 126 | /* else: could not allocate all elements, give up */ | ||
| 127 | for (i--; i; i--) { | ||
| 128 | void *p = element[i]; | ||
| 129 | kmem_cache_free(cache, p - e_off); | ||
| 130 | } | ||
| 131 | kfree(lc); | ||
| 132 | out_fail: | ||
| 133 | kfree(element); | ||
| 134 | kfree(slot); | ||
| 135 | return NULL; | ||
| 136 | } | ||
| 137 | |||
| 138 | void lc_free_by_index(struct lru_cache *lc, unsigned i) | ||
| 139 | { | ||
| 140 | void *p = lc->lc_element[i]; | ||
| 141 | WARN_ON(!p); | ||
| 142 | if (p) { | ||
| 143 | p -= lc->element_off; | ||
| 144 | kmem_cache_free(lc->lc_cache, p); | ||
| 145 | } | ||
| 146 | } | ||
| 147 | |||
| 148 | /** | ||
| 149 | * lc_destroy - frees memory allocated by lc_create() | ||
| 150 | * @lc: the lru cache to destroy | ||
| 151 | */ | ||
| 152 | void lc_destroy(struct lru_cache *lc) | ||
| 153 | { | ||
| 154 | unsigned i; | ||
| 155 | if (!lc) | ||
| 156 | return; | ||
| 157 | for (i = 0; i < lc->nr_elements; i++) | ||
| 158 | lc_free_by_index(lc, i); | ||
| 159 | kfree(lc->lc_element); | ||
| 160 | kfree(lc->lc_slot); | ||
| 161 | kfree(lc); | ||
| 162 | } | ||
| 163 | |||
| 164 | /** | ||
| 165 | * lc_reset - does a full reset for @lc and the hash table slots. | ||
| 166 | * @lc: the lru cache to operate on | ||
| 167 | * | ||
| 168 | * It is roughly the equivalent of re-allocating a fresh lru_cache object, | ||
| 169 | * basically a short cut to lc_destroy(lc); lc = lc_create(...); | ||
| 170 | */ | ||
| 171 | void lc_reset(struct lru_cache *lc) | ||
| 172 | { | ||
| 173 | unsigned i; | ||
| 174 | |||
| 175 | INIT_LIST_HEAD(&lc->in_use); | ||
| 176 | INIT_LIST_HEAD(&lc->lru); | ||
| 177 | INIT_LIST_HEAD(&lc->free); | ||
| 178 | lc->used = 0; | ||
| 179 | lc->hits = 0; | ||
| 180 | lc->misses = 0; | ||
| 181 | lc->starving = 0; | ||
| 182 | lc->dirty = 0; | ||
| 183 | lc->changed = 0; | ||
| 184 | lc->flags = 0; | ||
| 185 | lc->changing_element = NULL; | ||
| 186 | lc->new_number = LC_FREE; | ||
| 187 | memset(lc->lc_slot, 0, sizeof(struct hlist_head) * lc->nr_elements); | ||
| 188 | |||
| 189 | for (i = 0; i < lc->nr_elements; i++) { | ||
| 190 | struct lc_element *e = lc->lc_element[i]; | ||
| 191 | void *p = e; | ||
| 192 | p -= lc->element_off; | ||
| 193 | memset(p, 0, lc->element_size); | ||
| 194 | /* re-init it */ | ||
| 195 | e->lc_index = i; | ||
| 196 | e->lc_number = LC_FREE; | ||
| 197 | list_add(&e->list, &lc->free); | ||
| 198 | } | ||
| 199 | } | ||
| 200 | |||
| 201 | /** | ||
| 202 | * lc_seq_printf_stats - print stats about @lc into @seq | ||
| 203 | * @seq: the seq_file to print into | ||
| 204 | * @lc: the lru cache to print statistics of | ||
| 205 | */ | ||
| 206 | size_t lc_seq_printf_stats(struct seq_file *seq, struct lru_cache *lc) | ||
| 207 | { | ||
| 208 | /* NOTE: | ||
| 209 | * total calls to lc_get are | ||
| 210 | * (starving + hits + misses) | ||
| 211 | * misses include "dirty" count (update from an other thread in | ||
| 212 | * progress) and "changed", when this in fact lead to an successful | ||
| 213 | * update of the cache. | ||
| 214 | */ | ||
| 215 | return seq_printf(seq, "\t%s: used:%u/%u " | ||
| 216 | "hits:%lu misses:%lu starving:%lu dirty:%lu changed:%lu\n", | ||
| 217 | lc->name, lc->used, lc->nr_elements, | ||
| 218 | lc->hits, lc->misses, lc->starving, lc->dirty, lc->changed); | ||
| 219 | } | ||
| 220 | |||
| 221 | static struct hlist_head *lc_hash_slot(struct lru_cache *lc, unsigned int enr) | ||
| 222 | { | ||
| 223 | return lc->lc_slot + (enr % lc->nr_elements); | ||
| 224 | } | ||
| 225 | |||
| 226 | |||
| 227 | /** | ||
| 228 | * lc_find - find element by label, if present in the hash table | ||
| 229 | * @lc: The lru_cache object | ||
| 230 | * @enr: element number | ||
| 231 | * | ||
| 232 | * Returns the pointer to an element, if the element with the requested | ||
| 233 | * "label" or element number is present in the hash table, | ||
| 234 | * or NULL if not found. Does not change the refcnt. | ||
| 235 | */ | ||
| 236 | struct lc_element *lc_find(struct lru_cache *lc, unsigned int enr) | ||
| 237 | { | ||
| 238 | struct hlist_node *n; | ||
| 239 | struct lc_element *e; | ||
| 240 | |||
| 241 | BUG_ON(!lc); | ||
| 242 | BUG_ON(!lc->nr_elements); | ||
| 243 | hlist_for_each_entry(e, n, lc_hash_slot(lc, enr), colision) { | ||
| 244 | if (e->lc_number == enr) | ||
| 245 | return e; | ||
| 246 | } | ||
| 247 | return NULL; | ||
| 248 | } | ||
| 249 | |||
| 250 | /* returned element will be "recycled" immediately */ | ||
| 251 | static struct lc_element *lc_evict(struct lru_cache *lc) | ||
| 252 | { | ||
| 253 | struct list_head *n; | ||
| 254 | struct lc_element *e; | ||
| 255 | |||
| 256 | if (list_empty(&lc->lru)) | ||
| 257 | return NULL; | ||
| 258 | |||
| 259 | n = lc->lru.prev; | ||
| 260 | e = list_entry(n, struct lc_element, list); | ||
| 261 | |||
| 262 | PARANOIA_LC_ELEMENT(lc, e); | ||
| 263 | |||
| 264 | list_del(&e->list); | ||
| 265 | hlist_del(&e->colision); | ||
| 266 | return e; | ||
| 267 | } | ||
| 268 | |||
| 269 | /** | ||
| 270 | * lc_del - removes an element from the cache | ||
| 271 | * @lc: The lru_cache object | ||
| 272 | * @e: The element to remove | ||
| 273 | * | ||
| 274 | * @e must be unused (refcnt == 0). Moves @e from "lru" to "free" list, | ||
| 275 | * sets @e->enr to %LC_FREE. | ||
| 276 | */ | ||
| 277 | void lc_del(struct lru_cache *lc, struct lc_element *e) | ||
| 278 | { | ||
| 279 | PARANOIA_ENTRY(); | ||
| 280 | PARANOIA_LC_ELEMENT(lc, e); | ||
| 281 | BUG_ON(e->refcnt); | ||
| 282 | |||
| 283 | e->lc_number = LC_FREE; | ||
| 284 | hlist_del_init(&e->colision); | ||
| 285 | list_move(&e->list, &lc->free); | ||
| 286 | RETURN(); | ||
| 287 | } | ||
| 288 | |||
| 289 | static struct lc_element *lc_get_unused_element(struct lru_cache *lc) | ||
| 290 | { | ||
| 291 | struct list_head *n; | ||
| 292 | |||
| 293 | if (list_empty(&lc->free)) | ||
| 294 | return lc_evict(lc); | ||
| 295 | |||
| 296 | n = lc->free.next; | ||
| 297 | list_del(n); | ||
| 298 | return list_entry(n, struct lc_element, list); | ||
| 299 | } | ||
| 300 | |||
| 301 | static int lc_unused_element_available(struct lru_cache *lc) | ||
| 302 | { | ||
| 303 | if (!list_empty(&lc->free)) | ||
| 304 | return 1; /* something on the free list */ | ||
| 305 | if (!list_empty(&lc->lru)) | ||
| 306 | return 1; /* something to evict */ | ||
| 307 | |||
| 308 | return 0; | ||
| 309 | } | ||
| 310 | |||
| 311 | |||
| 312 | /** | ||
| 313 | * lc_get - get element by label, maybe change the active set | ||
| 314 | * @lc: the lru cache to operate on | ||
| 315 | * @enr: the label to look up | ||
| 316 | * | ||
| 317 | * Finds an element in the cache, increases its usage count, | ||
| 318 | * "touches" and returns it. | ||
| 319 | * | ||
| 320 | * In case the requested number is not present, it needs to be added to the | ||
| 321 | * cache. Therefore it is possible that an other element becomes evicted from | ||
| 322 | * the cache. In either case, the user is notified so he is able to e.g. keep | ||
| 323 | * a persistent log of the cache changes, and therefore the objects in use. | ||
| 324 | * | ||
| 325 | * Return values: | ||
| 326 | * NULL | ||
| 327 | * The cache was marked %LC_STARVING, | ||
| 328 | * or the requested label was not in the active set | ||
| 329 | * and a changing transaction is still pending (@lc was marked %LC_DIRTY). | ||
| 330 | * Or no unused or free element could be recycled (@lc will be marked as | ||
| 331 | * %LC_STARVING, blocking further lc_get() operations). | ||
| 332 | * | ||
| 333 | * pointer to the element with the REQUESTED element number. | ||
| 334 | * In this case, it can be used right away | ||
| 335 | * | ||
| 336 | * pointer to an UNUSED element with some different element number, | ||
| 337 | * where that different number may also be %LC_FREE. | ||
| 338 | * | ||
| 339 | * In this case, the cache is marked %LC_DIRTY (blocking further changes), | ||
| 340 | * and the returned element pointer is removed from the lru list and | ||
| 341 | * hash collision chains. The user now should do whatever housekeeping | ||
| 342 | * is necessary. | ||
| 343 | * Then he must call lc_changed(lc,element_pointer), to finish | ||
| 344 | * the change. | ||
| 345 | * | ||
| 346 | * NOTE: The user needs to check the lc_number on EACH use, so he recognizes | ||
| 347 | * any cache set change. | ||
| 348 | */ | ||
| 349 | struct lc_element *lc_get(struct lru_cache *lc, unsigned int enr) | ||
| 350 | { | ||
| 351 | struct lc_element *e; | ||
| 352 | |||
| 353 | PARANOIA_ENTRY(); | ||
| 354 | if (lc->flags & LC_STARVING) { | ||
| 355 | ++lc->starving; | ||
| 356 | RETURN(NULL); | ||
| 357 | } | ||
| 358 | |||
| 359 | e = lc_find(lc, enr); | ||
| 360 | if (e) { | ||
| 361 | ++lc->hits; | ||
| 362 | if (e->refcnt++ == 0) | ||
| 363 | lc->used++; | ||
| 364 | list_move(&e->list, &lc->in_use); /* Not evictable... */ | ||
| 365 | RETURN(e); | ||
| 366 | } | ||
| 367 | |||
| 368 | ++lc->misses; | ||
| 369 | |||
| 370 | /* In case there is nothing available and we can not kick out | ||
| 371 | * the LRU element, we have to wait ... | ||
| 372 | */ | ||
| 373 | if (!lc_unused_element_available(lc)) { | ||
| 374 | __set_bit(__LC_STARVING, &lc->flags); | ||
| 375 | RETURN(NULL); | ||
| 376 | } | ||
| 377 | |||
| 378 | /* it was not present in the active set. | ||
| 379 | * we are going to recycle an unused (or even "free") element. | ||
| 380 | * user may need to commit a transaction to record that change. | ||
| 381 | * we serialize on flags & TF_DIRTY */ | ||
| 382 | if (test_and_set_bit(__LC_DIRTY, &lc->flags)) { | ||
| 383 | ++lc->dirty; | ||
| 384 | RETURN(NULL); | ||
| 385 | } | ||
| 386 | |||
| 387 | e = lc_get_unused_element(lc); | ||
| 388 | BUG_ON(!e); | ||
| 389 | |||
| 390 | clear_bit(__LC_STARVING, &lc->flags); | ||
| 391 | BUG_ON(++e->refcnt != 1); | ||
| 392 | lc->used++; | ||
| 393 | |||
| 394 | lc->changing_element = e; | ||
| 395 | lc->new_number = enr; | ||
| 396 | |||
| 397 | RETURN(e); | ||
| 398 | } | ||
| 399 | |||
| 400 | /* similar to lc_get, | ||
| 401 | * but only gets a new reference on an existing element. | ||
| 402 | * you either get the requested element, or NULL. | ||
| 403 | * will be consolidated into one function. | ||
| 404 | */ | ||
| 405 | struct lc_element *lc_try_get(struct lru_cache *lc, unsigned int enr) | ||
| 406 | { | ||
| 407 | struct lc_element *e; | ||
| 408 | |||
| 409 | PARANOIA_ENTRY(); | ||
| 410 | if (lc->flags & LC_STARVING) { | ||
| 411 | ++lc->starving; | ||
| 412 | RETURN(NULL); | ||
| 413 | } | ||
| 414 | |||
| 415 | e = lc_find(lc, enr); | ||
| 416 | if (e) { | ||
| 417 | ++lc->hits; | ||
| 418 | if (e->refcnt++ == 0) | ||
| 419 | lc->used++; | ||
| 420 | list_move(&e->list, &lc->in_use); /* Not evictable... */ | ||
| 421 | } | ||
| 422 | RETURN(e); | ||
| 423 | } | ||
| 424 | |||
| 425 | /** | ||
| 426 | * lc_changed - tell @lc that the change has been recorded | ||
| 427 | * @lc: the lru cache to operate on | ||
| 428 | * @e: the element pending label change | ||
| 429 | */ | ||
| 430 | void lc_changed(struct lru_cache *lc, struct lc_element *e) | ||
| 431 | { | ||
| 432 | PARANOIA_ENTRY(); | ||
| 433 | BUG_ON(e != lc->changing_element); | ||
| 434 | PARANOIA_LC_ELEMENT(lc, e); | ||
| 435 | ++lc->changed; | ||
| 436 | e->lc_number = lc->new_number; | ||
| 437 | list_add(&e->list, &lc->in_use); | ||
| 438 | hlist_add_head(&e->colision, lc_hash_slot(lc, lc->new_number)); | ||
| 439 | lc->changing_element = NULL; | ||
| 440 | lc->new_number = LC_FREE; | ||
| 441 | clear_bit(__LC_DIRTY, &lc->flags); | ||
| 442 | smp_mb__after_clear_bit(); | ||
| 443 | RETURN(); | ||
| 444 | } | ||
| 445 | |||
| 446 | |||
| 447 | /** | ||
| 448 | * lc_put - give up refcnt of @e | ||
| 449 | * @lc: the lru cache to operate on | ||
| 450 | * @e: the element to put | ||
| 451 | * | ||
| 452 | * If refcnt reaches zero, the element is moved to the lru list, | ||
| 453 | * and a %LC_STARVING (if set) is cleared. | ||
| 454 | * Returns the new (post-decrement) refcnt. | ||
| 455 | */ | ||
| 456 | unsigned int lc_put(struct lru_cache *lc, struct lc_element *e) | ||
| 457 | { | ||
| 458 | PARANOIA_ENTRY(); | ||
| 459 | PARANOIA_LC_ELEMENT(lc, e); | ||
| 460 | BUG_ON(e->refcnt == 0); | ||
| 461 | BUG_ON(e == lc->changing_element); | ||
| 462 | if (--e->refcnt == 0) { | ||
| 463 | /* move it to the front of LRU. */ | ||
| 464 | list_move(&e->list, &lc->lru); | ||
| 465 | lc->used--; | ||
| 466 | clear_bit(__LC_STARVING, &lc->flags); | ||
| 467 | smp_mb__after_clear_bit(); | ||
| 468 | } | ||
| 469 | RETURN(e->refcnt); | ||
| 470 | } | ||
| 471 | |||
| 472 | /** | ||
| 473 | * lc_element_by_index | ||
| 474 | * @lc: the lru cache to operate on | ||
| 475 | * @i: the index of the element to return | ||
| 476 | */ | ||
| 477 | struct lc_element *lc_element_by_index(struct lru_cache *lc, unsigned i) | ||
| 478 | { | ||
| 479 | BUG_ON(i >= lc->nr_elements); | ||
| 480 | BUG_ON(lc->lc_element[i] == NULL); | ||
| 481 | BUG_ON(lc->lc_element[i]->lc_index != i); | ||
| 482 | return lc->lc_element[i]; | ||
| 483 | } | ||
| 484 | |||
| 485 | /** | ||
| 486 | * lc_index_of | ||
| 487 | * @lc: the lru cache to operate on | ||
| 488 | * @e: the element to query for its index position in lc->element | ||
| 489 | */ | ||
| 490 | unsigned int lc_index_of(struct lru_cache *lc, struct lc_element *e) | ||
| 491 | { | ||
| 492 | PARANOIA_LC_ELEMENT(lc, e); | ||
| 493 | return e->lc_index; | ||
| 494 | } | ||
| 495 | |||
| 496 | /** | ||
| 497 | * lc_set - associate index with label | ||
| 498 | * @lc: the lru cache to operate on | ||
| 499 | * @enr: the label to set | ||
| 500 | * @index: the element index to associate label with. | ||
| 501 | * | ||
| 502 | * Used to initialize the active set to some previously recorded state. | ||
| 503 | */ | ||
| 504 | void lc_set(struct lru_cache *lc, unsigned int enr, int index) | ||
| 505 | { | ||
| 506 | struct lc_element *e; | ||
| 507 | |||
| 508 | if (index < 0 || index >= lc->nr_elements) | ||
| 509 | return; | ||
| 510 | |||
| 511 | e = lc_element_by_index(lc, index); | ||
| 512 | e->lc_number = enr; | ||
| 513 | |||
| 514 | hlist_del_init(&e->colision); | ||
| 515 | hlist_add_head(&e->colision, lc_hash_slot(lc, enr)); | ||
| 516 | list_move(&e->list, e->refcnt ? &lc->in_use : &lc->lru); | ||
| 517 | } | ||
| 518 | |||
| 519 | /** | ||
| 520 | * lc_dump - Dump a complete LRU cache to seq in textual form. | ||
| 521 | * @lc: the lru cache to operate on | ||
| 522 | * @seq: the &struct seq_file pointer to seq_printf into | ||
| 523 | * @utext: user supplied "heading" or other info | ||
| 524 | * @detail: function pointer the user may provide to dump further details | ||
| 525 | * of the object the lc_element is embedded in. | ||
| 526 | */ | ||
| 527 | void lc_seq_dump_details(struct seq_file *seq, struct lru_cache *lc, char *utext, | ||
| 528 | void (*detail) (struct seq_file *, struct lc_element *)) | ||
| 529 | { | ||
| 530 | unsigned int nr_elements = lc->nr_elements; | ||
| 531 | struct lc_element *e; | ||
| 532 | int i; | ||
| 533 | |||
| 534 | seq_printf(seq, "\tnn: lc_number refcnt %s\n ", utext); | ||
| 535 | for (i = 0; i < nr_elements; i++) { | ||
| 536 | e = lc_element_by_index(lc, i); | ||
| 537 | if (e->lc_number == LC_FREE) { | ||
| 538 | seq_printf(seq, "\t%2d: FREE\n", i); | ||
| 539 | } else { | ||
| 540 | seq_printf(seq, "\t%2d: %4u %4u ", i, | ||
| 541 | e->lc_number, e->refcnt); | ||
| 542 | detail(seq, e); | ||
| 543 | } | ||
| 544 | } | ||
| 545 | } | ||
| 546 | |||
| 547 | EXPORT_SYMBOL(lc_create); | ||
| 548 | EXPORT_SYMBOL(lc_reset); | ||
| 549 | EXPORT_SYMBOL(lc_destroy); | ||
| 550 | EXPORT_SYMBOL(lc_set); | ||
| 551 | EXPORT_SYMBOL(lc_del); | ||
| 552 | EXPORT_SYMBOL(lc_try_get); | ||
| 553 | EXPORT_SYMBOL(lc_find); | ||
| 554 | EXPORT_SYMBOL(lc_get); | ||
| 555 | EXPORT_SYMBOL(lc_put); | ||
| 556 | EXPORT_SYMBOL(lc_changed); | ||
| 557 | EXPORT_SYMBOL(lc_element_by_index); | ||
| 558 | EXPORT_SYMBOL(lc_index_of); | ||
| 559 | EXPORT_SYMBOL(lc_seq_printf_stats); | ||
| 560 | EXPORT_SYMBOL(lc_seq_dump_details); | ||
diff --git a/lib/lzo/lzo1x_decompress.c b/lib/lzo/lzo1x_decompress.c index 5dc6b29c1575..f2fd09850223 100644 --- a/lib/lzo/lzo1x_decompress.c +++ b/lib/lzo/lzo1x_decompress.c | |||
| @@ -11,11 +11,13 @@ | |||
| 11 | * Richard Purdie <rpurdie@openedhand.com> | 11 | * Richard Purdie <rpurdie@openedhand.com> |
| 12 | */ | 12 | */ |
| 13 | 13 | ||
| 14 | #ifndef STATIC | ||
| 14 | #include <linux/module.h> | 15 | #include <linux/module.h> |
| 15 | #include <linux/kernel.h> | 16 | #include <linux/kernel.h> |
| 16 | #include <linux/lzo.h> | 17 | #endif |
| 17 | #include <asm/byteorder.h> | 18 | |
| 18 | #include <asm/unaligned.h> | 19 | #include <asm/unaligned.h> |
| 20 | #include <linux/lzo.h> | ||
| 19 | #include "lzodefs.h" | 21 | #include "lzodefs.h" |
| 20 | 22 | ||
| 21 | #define HAVE_IP(x, ip_end, ip) ((size_t)(ip_end - ip) < (x)) | 23 | #define HAVE_IP(x, ip_end, ip) ((size_t)(ip_end - ip) < (x)) |
| @@ -244,9 +246,10 @@ lookbehind_overrun: | |||
| 244 | *out_len = op - out; | 246 | *out_len = op - out; |
| 245 | return LZO_E_LOOKBEHIND_OVERRUN; | 247 | return LZO_E_LOOKBEHIND_OVERRUN; |
| 246 | } | 248 | } |
| 247 | 249 | #ifndef STATIC | |
| 248 | EXPORT_SYMBOL_GPL(lzo1x_decompress_safe); | 250 | EXPORT_SYMBOL_GPL(lzo1x_decompress_safe); |
| 249 | 251 | ||
| 250 | MODULE_LICENSE("GPL"); | 252 | MODULE_LICENSE("GPL"); |
| 251 | MODULE_DESCRIPTION("LZO1X Decompressor"); | 253 | MODULE_DESCRIPTION("LZO1X Decompressor"); |
| 252 | 254 | ||
| 255 | #endif | ||
diff --git a/lib/parser.c b/lib/parser.c index b00d02059a5f..fb34977246bb 100644 --- a/lib/parser.c +++ b/lib/parser.c | |||
| @@ -56,13 +56,16 @@ static int match_one(char *s, const char *p, substring_t args[]) | |||
| 56 | 56 | ||
| 57 | args[argc].from = s; | 57 | args[argc].from = s; |
| 58 | switch (*p++) { | 58 | switch (*p++) { |
| 59 | case 's': | 59 | case 's': { |
| 60 | if (strlen(s) == 0) | 60 | size_t str_len = strlen(s); |
| 61 | |||
| 62 | if (str_len == 0) | ||
| 61 | return 0; | 63 | return 0; |
| 62 | else if (len == -1 || len > strlen(s)) | 64 | if (len == -1 || len > str_len) |
| 63 | len = strlen(s); | 65 | len = str_len; |
| 64 | args[argc].to = s + len; | 66 | args[argc].to = s + len; |
| 65 | break; | 67 | break; |
| 68 | } | ||
| 66 | case 'd': | 69 | case 'd': |
| 67 | simple_strtol(s, &args[argc].to, 0); | 70 | simple_strtol(s, &args[argc].to, 0); |
| 68 | goto num; | 71 | goto num; |
diff --git a/lib/plist.c b/lib/plist.c index d6c64a824e1d..1471988d9190 100644 --- a/lib/plist.c +++ b/lib/plist.c | |||
| @@ -54,9 +54,11 @@ static void plist_check_list(struct list_head *top) | |||
| 54 | 54 | ||
| 55 | static void plist_check_head(struct plist_head *head) | 55 | static void plist_check_head(struct plist_head *head) |
| 56 | { | 56 | { |
| 57 | WARN_ON(!head->lock); | 57 | WARN_ON(!head->rawlock && !head->spinlock); |
| 58 | if (head->lock) | 58 | if (head->rawlock) |
| 59 | WARN_ON_SMP(!spin_is_locked(head->lock)); | 59 | WARN_ON_SMP(!raw_spin_is_locked(head->rawlock)); |
| 60 | if (head->spinlock) | ||
| 61 | WARN_ON_SMP(!spin_is_locked(head->spinlock)); | ||
| 60 | plist_check_list(&head->prio_list); | 62 | plist_check_list(&head->prio_list); |
| 61 | plist_check_list(&head->node_list); | 63 | plist_check_list(&head->node_list); |
| 62 | } | 64 | } |
diff --git a/lib/radix-tree.c b/lib/radix-tree.c index 23abbd93cae1..6b9670d6bbf9 100644 --- a/lib/radix-tree.c +++ b/lib/radix-tree.c | |||
| @@ -200,6 +200,9 @@ radix_tree_node_free(struct radix_tree_node *node) | |||
| 200 | * ensure that the addition of a single element in the tree cannot fail. On | 200 | * ensure that the addition of a single element in the tree cannot fail. On |
| 201 | * success, return zero, with preemption disabled. On error, return -ENOMEM | 201 | * success, return zero, with preemption disabled. On error, return -ENOMEM |
| 202 | * with preemption not disabled. | 202 | * with preemption not disabled. |
| 203 | * | ||
| 204 | * To make use of this facility, the radix tree must be initialised without | ||
| 205 | * __GFP_WAIT being passed to INIT_RADIX_TREE(). | ||
| 203 | */ | 206 | */ |
| 204 | int radix_tree_preload(gfp_t gfp_mask) | 207 | int radix_tree_preload(gfp_t gfp_mask) |
| 205 | { | 208 | { |
| @@ -361,7 +364,7 @@ static void *radix_tree_lookup_element(struct radix_tree_root *root, | |||
| 361 | unsigned int height, shift; | 364 | unsigned int height, shift; |
| 362 | struct radix_tree_node *node, **slot; | 365 | struct radix_tree_node *node, **slot; |
| 363 | 366 | ||
| 364 | node = rcu_dereference(root->rnode); | 367 | node = rcu_dereference_raw(root->rnode); |
| 365 | if (node == NULL) | 368 | if (node == NULL) |
| 366 | return NULL; | 369 | return NULL; |
| 367 | 370 | ||
| @@ -381,7 +384,7 @@ static void *radix_tree_lookup_element(struct radix_tree_root *root, | |||
| 381 | do { | 384 | do { |
| 382 | slot = (struct radix_tree_node **) | 385 | slot = (struct radix_tree_node **) |
| 383 | (node->slots + ((index>>shift) & RADIX_TREE_MAP_MASK)); | 386 | (node->slots + ((index>>shift) & RADIX_TREE_MAP_MASK)); |
| 384 | node = rcu_dereference(*slot); | 387 | node = rcu_dereference_raw(*slot); |
| 385 | if (node == NULL) | 388 | if (node == NULL) |
| 386 | return NULL; | 389 | return NULL; |
| 387 | 390 | ||
| @@ -543,7 +546,6 @@ out: | |||
| 543 | } | 546 | } |
| 544 | EXPORT_SYMBOL(radix_tree_tag_clear); | 547 | EXPORT_SYMBOL(radix_tree_tag_clear); |
| 545 | 548 | ||
| 546 | #ifndef __KERNEL__ /* Only the test harness uses this at present */ | ||
| 547 | /** | 549 | /** |
| 548 | * radix_tree_tag_get - get a tag on a radix tree node | 550 | * radix_tree_tag_get - get a tag on a radix tree node |
| 549 | * @root: radix tree root | 551 | * @root: radix tree root |
| @@ -566,7 +568,7 @@ int radix_tree_tag_get(struct radix_tree_root *root, | |||
| 566 | if (!root_tag_get(root, tag)) | 568 | if (!root_tag_get(root, tag)) |
| 567 | return 0; | 569 | return 0; |
| 568 | 570 | ||
| 569 | node = rcu_dereference(root->rnode); | 571 | node = rcu_dereference_raw(root->rnode); |
| 570 | if (node == NULL) | 572 | if (node == NULL) |
| 571 | return 0; | 573 | return 0; |
| 572 | 574 | ||
| @@ -600,13 +602,12 @@ int radix_tree_tag_get(struct radix_tree_root *root, | |||
| 600 | BUG_ON(ret && saw_unset_tag); | 602 | BUG_ON(ret && saw_unset_tag); |
| 601 | return !!ret; | 603 | return !!ret; |
| 602 | } | 604 | } |
| 603 | node = rcu_dereference(node->slots[offset]); | 605 | node = rcu_dereference_raw(node->slots[offset]); |
| 604 | shift -= RADIX_TREE_MAP_SHIFT; | 606 | shift -= RADIX_TREE_MAP_SHIFT; |
| 605 | height--; | 607 | height--; |
| 606 | } | 608 | } |
| 607 | } | 609 | } |
| 608 | EXPORT_SYMBOL(radix_tree_tag_get); | 610 | EXPORT_SYMBOL(radix_tree_tag_get); |
| 609 | #endif | ||
| 610 | 611 | ||
| 611 | /** | 612 | /** |
| 612 | * radix_tree_next_hole - find the next hole (not-present entry) | 613 | * radix_tree_next_hole - find the next hole (not-present entry) |
| @@ -710,7 +711,7 @@ __lookup(struct radix_tree_node *slot, void ***results, unsigned long index, | |||
| 710 | } | 711 | } |
| 711 | 712 | ||
| 712 | shift -= RADIX_TREE_MAP_SHIFT; | 713 | shift -= RADIX_TREE_MAP_SHIFT; |
| 713 | slot = rcu_dereference(slot->slots[i]); | 714 | slot = rcu_dereference_raw(slot->slots[i]); |
| 714 | if (slot == NULL) | 715 | if (slot == NULL) |
| 715 | goto out; | 716 | goto out; |
| 716 | } | 717 | } |
| @@ -757,7 +758,7 @@ radix_tree_gang_lookup(struct radix_tree_root *root, void **results, | |||
| 757 | unsigned long cur_index = first_index; | 758 | unsigned long cur_index = first_index; |
| 758 | unsigned int ret; | 759 | unsigned int ret; |
| 759 | 760 | ||
| 760 | node = rcu_dereference(root->rnode); | 761 | node = rcu_dereference_raw(root->rnode); |
| 761 | if (!node) | 762 | if (!node) |
| 762 | return 0; | 763 | return 0; |
| 763 | 764 | ||
| @@ -786,7 +787,7 @@ radix_tree_gang_lookup(struct radix_tree_root *root, void **results, | |||
| 786 | slot = *(((void ***)results)[ret + i]); | 787 | slot = *(((void ***)results)[ret + i]); |
| 787 | if (!slot) | 788 | if (!slot) |
| 788 | continue; | 789 | continue; |
| 789 | results[ret + nr_found] = rcu_dereference(slot); | 790 | results[ret + nr_found] = rcu_dereference_raw(slot); |
| 790 | nr_found++; | 791 | nr_found++; |
| 791 | } | 792 | } |
| 792 | ret += nr_found; | 793 | ret += nr_found; |
| @@ -825,7 +826,7 @@ radix_tree_gang_lookup_slot(struct radix_tree_root *root, void ***results, | |||
| 825 | unsigned long cur_index = first_index; | 826 | unsigned long cur_index = first_index; |
| 826 | unsigned int ret; | 827 | unsigned int ret; |
| 827 | 828 | ||
| 828 | node = rcu_dereference(root->rnode); | 829 | node = rcu_dereference_raw(root->rnode); |
| 829 | if (!node) | 830 | if (!node) |
| 830 | return 0; | 831 | return 0; |
| 831 | 832 | ||
| @@ -914,7 +915,7 @@ __lookup_tag(struct radix_tree_node *slot, void ***results, unsigned long index, | |||
| 914 | } | 915 | } |
| 915 | } | 916 | } |
| 916 | shift -= RADIX_TREE_MAP_SHIFT; | 917 | shift -= RADIX_TREE_MAP_SHIFT; |
| 917 | slot = rcu_dereference(slot->slots[i]); | 918 | slot = rcu_dereference_raw(slot->slots[i]); |
| 918 | if (slot == NULL) | 919 | if (slot == NULL) |
| 919 | break; | 920 | break; |
| 920 | } | 921 | } |
| @@ -950,7 +951,7 @@ radix_tree_gang_lookup_tag(struct radix_tree_root *root, void **results, | |||
| 950 | if (!root_tag_get(root, tag)) | 951 | if (!root_tag_get(root, tag)) |
| 951 | return 0; | 952 | return 0; |
| 952 | 953 | ||
| 953 | node = rcu_dereference(root->rnode); | 954 | node = rcu_dereference_raw(root->rnode); |
| 954 | if (!node) | 955 | if (!node) |
| 955 | return 0; | 956 | return 0; |
| 956 | 957 | ||
| @@ -979,7 +980,7 @@ radix_tree_gang_lookup_tag(struct radix_tree_root *root, void **results, | |||
| 979 | slot = *(((void ***)results)[ret + i]); | 980 | slot = *(((void ***)results)[ret + i]); |
| 980 | if (!slot) | 981 | if (!slot) |
| 981 | continue; | 982 | continue; |
| 982 | results[ret + nr_found] = rcu_dereference(slot); | 983 | results[ret + nr_found] = rcu_dereference_raw(slot); |
| 983 | nr_found++; | 984 | nr_found++; |
| 984 | } | 985 | } |
| 985 | ret += nr_found; | 986 | ret += nr_found; |
| @@ -1019,7 +1020,7 @@ radix_tree_gang_lookup_tag_slot(struct radix_tree_root *root, void ***results, | |||
| 1019 | if (!root_tag_get(root, tag)) | 1020 | if (!root_tag_get(root, tag)) |
| 1020 | return 0; | 1021 | return 0; |
| 1021 | 1022 | ||
| 1022 | node = rcu_dereference(root->rnode); | 1023 | node = rcu_dereference_raw(root->rnode); |
| 1023 | if (!node) | 1024 | if (!node) |
| 1024 | return 0; | 1025 | return 0; |
| 1025 | 1026 | ||
diff --git a/lib/ratelimit.c b/lib/ratelimit.c index 26187edcc7ea..09f5ce1810dc 100644 --- a/lib/ratelimit.c +++ b/lib/ratelimit.c | |||
| @@ -7,15 +7,12 @@ | |||
| 7 | * parameter. Now every user can use their own standalone ratelimit_state. | 7 | * parameter. Now every user can use their own standalone ratelimit_state. |
| 8 | * | 8 | * |
| 9 | * This file is released under the GPLv2. | 9 | * This file is released under the GPLv2. |
| 10 | * | ||
| 11 | */ | 10 | */ |
| 12 | 11 | ||
| 13 | #include <linux/kernel.h> | 12 | #include <linux/ratelimit.h> |
| 14 | #include <linux/jiffies.h> | 13 | #include <linux/jiffies.h> |
| 15 | #include <linux/module.h> | 14 | #include <linux/module.h> |
| 16 | 15 | ||
| 17 | static DEFINE_SPINLOCK(ratelimit_lock); | ||
| 18 | |||
| 19 | /* | 16 | /* |
| 20 | * __ratelimit - rate limiting | 17 | * __ratelimit - rate limiting |
| 21 | * @rs: ratelimit_state data | 18 | * @rs: ratelimit_state data |
| @@ -23,35 +20,43 @@ static DEFINE_SPINLOCK(ratelimit_lock); | |||
| 23 | * This enforces a rate limit: not more than @rs->ratelimit_burst callbacks | 20 | * This enforces a rate limit: not more than @rs->ratelimit_burst callbacks |
| 24 | * in every @rs->ratelimit_jiffies | 21 | * in every @rs->ratelimit_jiffies |
| 25 | */ | 22 | */ |
| 26 | int __ratelimit(struct ratelimit_state *rs) | 23 | int ___ratelimit(struct ratelimit_state *rs, const char *func) |
| 27 | { | 24 | { |
| 28 | unsigned long flags; | 25 | unsigned long flags; |
| 26 | int ret; | ||
| 29 | 27 | ||
| 30 | if (!rs->interval) | 28 | if (!rs->interval) |
| 31 | return 1; | 29 | return 1; |
| 32 | 30 | ||
| 33 | spin_lock_irqsave(&ratelimit_lock, flags); | 31 | /* |
| 32 | * If we contend on this state's lock then almost | ||
| 33 | * by definition we are too busy to print a message, | ||
| 34 | * in addition to the one that will be printed by | ||
| 35 | * the entity that is holding the lock already: | ||
| 36 | */ | ||
| 37 | if (!spin_trylock_irqsave(&rs->lock, flags)) | ||
| 38 | return 1; | ||
| 39 | |||
| 34 | if (!rs->begin) | 40 | if (!rs->begin) |
| 35 | rs->begin = jiffies; | 41 | rs->begin = jiffies; |
| 36 | 42 | ||
| 37 | if (time_is_before_jiffies(rs->begin + rs->interval)) { | 43 | if (time_is_before_jiffies(rs->begin + rs->interval)) { |
| 38 | if (rs->missed) | 44 | if (rs->missed) |
| 39 | printk(KERN_WARNING "%s: %d callbacks suppressed\n", | 45 | printk(KERN_WARNING "%s: %d callbacks suppressed\n", |
| 40 | __func__, rs->missed); | 46 | func, rs->missed); |
| 41 | rs->begin = 0; | 47 | rs->begin = 0; |
| 42 | rs->printed = 0; | 48 | rs->printed = 0; |
| 43 | rs->missed = 0; | 49 | rs->missed = 0; |
| 44 | } | 50 | } |
| 45 | if (rs->burst && rs->burst > rs->printed) | 51 | if (rs->burst && rs->burst > rs->printed) { |
| 46 | goto print; | 52 | rs->printed++; |
| 47 | 53 | ret = 1; | |
| 48 | rs->missed++; | 54 | } else { |
| 49 | spin_unlock_irqrestore(&ratelimit_lock, flags); | 55 | rs->missed++; |
| 50 | return 0; | 56 | ret = 0; |
| 57 | } | ||
| 58 | spin_unlock_irqrestore(&rs->lock, flags); | ||
| 51 | 59 | ||
| 52 | print: | 60 | return ret; |
| 53 | rs->printed++; | ||
| 54 | spin_unlock_irqrestore(&ratelimit_lock, flags); | ||
| 55 | return 1; | ||
| 56 | } | 61 | } |
| 57 | EXPORT_SYMBOL(__ratelimit); | 62 | EXPORT_SYMBOL(___ratelimit); |
diff --git a/lib/rational.c b/lib/rational.c index b3c099b5478e..3ed247b80662 100644 --- a/lib/rational.c +++ b/lib/rational.c | |||
| @@ -7,6 +7,7 @@ | |||
| 7 | */ | 7 | */ |
| 8 | 8 | ||
| 9 | #include <linux/rational.h> | 9 | #include <linux/rational.h> |
| 10 | #include <linux/module.h> | ||
| 10 | 11 | ||
| 11 | /* | 12 | /* |
| 12 | * calculate best rational approximation for a given fraction | 13 | * calculate best rational approximation for a given fraction |
diff --git a/lib/rwsem-spinlock.c b/lib/rwsem-spinlock.c index 9df3ca56db11..ccf95bff7984 100644 --- a/lib/rwsem-spinlock.c +++ b/lib/rwsem-spinlock.c | |||
| @@ -17,6 +17,19 @@ struct rwsem_waiter { | |||
| 17 | #define RWSEM_WAITING_FOR_WRITE 0x00000002 | 17 | #define RWSEM_WAITING_FOR_WRITE 0x00000002 |
| 18 | }; | 18 | }; |
| 19 | 19 | ||
| 20 | int rwsem_is_locked(struct rw_semaphore *sem) | ||
| 21 | { | ||
| 22 | int ret = 1; | ||
| 23 | unsigned long flags; | ||
| 24 | |||
| 25 | if (spin_trylock_irqsave(&sem->wait_lock, flags)) { | ||
| 26 | ret = (sem->activity != 0); | ||
| 27 | spin_unlock_irqrestore(&sem->wait_lock, flags); | ||
| 28 | } | ||
| 29 | return ret; | ||
| 30 | } | ||
| 31 | EXPORT_SYMBOL(rwsem_is_locked); | ||
| 32 | |||
| 20 | /* | 33 | /* |
| 21 | * initialise the semaphore | 34 | * initialise the semaphore |
| 22 | */ | 35 | */ |
| @@ -34,6 +47,7 @@ void __init_rwsem(struct rw_semaphore *sem, const char *name, | |||
| 34 | spin_lock_init(&sem->wait_lock); | 47 | spin_lock_init(&sem->wait_lock); |
| 35 | INIT_LIST_HEAD(&sem->wait_list); | 48 | INIT_LIST_HEAD(&sem->wait_list); |
| 36 | } | 49 | } |
| 50 | EXPORT_SYMBOL(__init_rwsem); | ||
| 37 | 51 | ||
| 38 | /* | 52 | /* |
| 39 | * handle the lock release when processes blocked on it that can now run | 53 | * handle the lock release when processes blocked on it that can now run |
| @@ -305,12 +319,3 @@ void __downgrade_write(struct rw_semaphore *sem) | |||
| 305 | spin_unlock_irqrestore(&sem->wait_lock, flags); | 319 | spin_unlock_irqrestore(&sem->wait_lock, flags); |
| 306 | } | 320 | } |
| 307 | 321 | ||
| 308 | EXPORT_SYMBOL(__init_rwsem); | ||
| 309 | EXPORT_SYMBOL(__down_read); | ||
| 310 | EXPORT_SYMBOL(__down_read_trylock); | ||
| 311 | EXPORT_SYMBOL(__down_write_nested); | ||
| 312 | EXPORT_SYMBOL(__down_write); | ||
| 313 | EXPORT_SYMBOL(__down_write_trylock); | ||
| 314 | EXPORT_SYMBOL(__up_read); | ||
| 315 | EXPORT_SYMBOL(__up_write); | ||
| 316 | EXPORT_SYMBOL(__downgrade_write); | ||
diff --git a/lib/show_mem.c b/lib/show_mem.c index 238e72a18ce1..fdc77c82f922 100644 --- a/lib/show_mem.c +++ b/lib/show_mem.c | |||
| @@ -15,7 +15,7 @@ void show_mem(void) | |||
| 15 | unsigned long total = 0, reserved = 0, shared = 0, | 15 | unsigned long total = 0, reserved = 0, shared = 0, |
| 16 | nonshared = 0, highmem = 0; | 16 | nonshared = 0, highmem = 0; |
| 17 | 17 | ||
| 18 | printk(KERN_INFO "Mem-Info:\n"); | 18 | printk("Mem-Info:\n"); |
| 19 | show_free_areas(); | 19 | show_free_areas(); |
| 20 | 20 | ||
| 21 | for_each_online_pgdat(pgdat) { | 21 | for_each_online_pgdat(pgdat) { |
| @@ -49,15 +49,15 @@ void show_mem(void) | |||
| 49 | pgdat_resize_unlock(pgdat, &flags); | 49 | pgdat_resize_unlock(pgdat, &flags); |
| 50 | } | 50 | } |
| 51 | 51 | ||
| 52 | printk(KERN_INFO "%lu pages RAM\n", total); | 52 | printk("%lu pages RAM\n", total); |
| 53 | #ifdef CONFIG_HIGHMEM | 53 | #ifdef CONFIG_HIGHMEM |
| 54 | printk(KERN_INFO "%lu pages HighMem\n", highmem); | 54 | printk("%lu pages HighMem\n", highmem); |
| 55 | #endif | 55 | #endif |
| 56 | printk(KERN_INFO "%lu pages reserved\n", reserved); | 56 | printk("%lu pages reserved\n", reserved); |
| 57 | printk(KERN_INFO "%lu pages shared\n", shared); | 57 | printk("%lu pages shared\n", shared); |
| 58 | printk(KERN_INFO "%lu pages non-shared\n", nonshared); | 58 | printk("%lu pages non-shared\n", nonshared); |
| 59 | #ifdef CONFIG_QUICKLIST | 59 | #ifdef CONFIG_QUICKLIST |
| 60 | printk(KERN_INFO "%lu pages in pagetable cache\n", | 60 | printk("%lu pages in pagetable cache\n", |
| 61 | quicklist_total_size()); | 61 | quicklist_total_size()); |
| 62 | #endif | 62 | #endif |
| 63 | } | 63 | } |
diff --git a/lib/spinlock_debug.c b/lib/spinlock_debug.c index 9c4b0256490b..4755b98b6dfb 100644 --- a/lib/spinlock_debug.c +++ b/lib/spinlock_debug.c | |||
| @@ -13,8 +13,8 @@ | |||
| 13 | #include <linux/delay.h> | 13 | #include <linux/delay.h> |
| 14 | #include <linux/module.h> | 14 | #include <linux/module.h> |
| 15 | 15 | ||
| 16 | void __spin_lock_init(spinlock_t *lock, const char *name, | 16 | void __raw_spin_lock_init(raw_spinlock_t *lock, const char *name, |
| 17 | struct lock_class_key *key) | 17 | struct lock_class_key *key) |
| 18 | { | 18 | { |
| 19 | #ifdef CONFIG_DEBUG_LOCK_ALLOC | 19 | #ifdef CONFIG_DEBUG_LOCK_ALLOC |
| 20 | /* | 20 | /* |
| @@ -23,13 +23,13 @@ void __spin_lock_init(spinlock_t *lock, const char *name, | |||
| 23 | debug_check_no_locks_freed((void *)lock, sizeof(*lock)); | 23 | debug_check_no_locks_freed((void *)lock, sizeof(*lock)); |
| 24 | lockdep_init_map(&lock->dep_map, name, key, 0); | 24 | lockdep_init_map(&lock->dep_map, name, key, 0); |
| 25 | #endif | 25 | #endif |
| 26 | lock->raw_lock = (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED; | 26 | lock->raw_lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED; |
| 27 | lock->magic = SPINLOCK_MAGIC; | 27 | lock->magic = SPINLOCK_MAGIC; |
| 28 | lock->owner = SPINLOCK_OWNER_INIT; | 28 | lock->owner = SPINLOCK_OWNER_INIT; |
| 29 | lock->owner_cpu = -1; | 29 | lock->owner_cpu = -1; |
| 30 | } | 30 | } |
| 31 | 31 | ||
| 32 | EXPORT_SYMBOL(__spin_lock_init); | 32 | EXPORT_SYMBOL(__raw_spin_lock_init); |
| 33 | 33 | ||
| 34 | void __rwlock_init(rwlock_t *lock, const char *name, | 34 | void __rwlock_init(rwlock_t *lock, const char *name, |
| 35 | struct lock_class_key *key) | 35 | struct lock_class_key *key) |
| @@ -41,7 +41,7 @@ void __rwlock_init(rwlock_t *lock, const char *name, | |||
| 41 | debug_check_no_locks_freed((void *)lock, sizeof(*lock)); | 41 | debug_check_no_locks_freed((void *)lock, sizeof(*lock)); |
| 42 | lockdep_init_map(&lock->dep_map, name, key, 0); | 42 | lockdep_init_map(&lock->dep_map, name, key, 0); |
| 43 | #endif | 43 | #endif |
| 44 | lock->raw_lock = (raw_rwlock_t) __RAW_RW_LOCK_UNLOCKED; | 44 | lock->raw_lock = (arch_rwlock_t) __ARCH_RW_LOCK_UNLOCKED; |
| 45 | lock->magic = RWLOCK_MAGIC; | 45 | lock->magic = RWLOCK_MAGIC; |
| 46 | lock->owner = SPINLOCK_OWNER_INIT; | 46 | lock->owner = SPINLOCK_OWNER_INIT; |
| 47 | lock->owner_cpu = -1; | 47 | lock->owner_cpu = -1; |
| @@ -49,7 +49,7 @@ void __rwlock_init(rwlock_t *lock, const char *name, | |||
| 49 | 49 | ||
| 50 | EXPORT_SYMBOL(__rwlock_init); | 50 | EXPORT_SYMBOL(__rwlock_init); |
| 51 | 51 | ||
| 52 | static void spin_bug(spinlock_t *lock, const char *msg) | 52 | static void spin_bug(raw_spinlock_t *lock, const char *msg) |
| 53 | { | 53 | { |
| 54 | struct task_struct *owner = NULL; | 54 | struct task_struct *owner = NULL; |
| 55 | 55 | ||
| @@ -73,7 +73,7 @@ static void spin_bug(spinlock_t *lock, const char *msg) | |||
| 73 | #define SPIN_BUG_ON(cond, lock, msg) if (unlikely(cond)) spin_bug(lock, msg) | 73 | #define SPIN_BUG_ON(cond, lock, msg) if (unlikely(cond)) spin_bug(lock, msg) |
| 74 | 74 | ||
| 75 | static inline void | 75 | static inline void |
| 76 | debug_spin_lock_before(spinlock_t *lock) | 76 | debug_spin_lock_before(raw_spinlock_t *lock) |
| 77 | { | 77 | { |
| 78 | SPIN_BUG_ON(lock->magic != SPINLOCK_MAGIC, lock, "bad magic"); | 78 | SPIN_BUG_ON(lock->magic != SPINLOCK_MAGIC, lock, "bad magic"); |
| 79 | SPIN_BUG_ON(lock->owner == current, lock, "recursion"); | 79 | SPIN_BUG_ON(lock->owner == current, lock, "recursion"); |
| @@ -81,16 +81,16 @@ debug_spin_lock_before(spinlock_t *lock) | |||
| 81 | lock, "cpu recursion"); | 81 | lock, "cpu recursion"); |
| 82 | } | 82 | } |
| 83 | 83 | ||
| 84 | static inline void debug_spin_lock_after(spinlock_t *lock) | 84 | static inline void debug_spin_lock_after(raw_spinlock_t *lock) |
| 85 | { | 85 | { |
| 86 | lock->owner_cpu = raw_smp_processor_id(); | 86 | lock->owner_cpu = raw_smp_processor_id(); |
| 87 | lock->owner = current; | 87 | lock->owner = current; |
| 88 | } | 88 | } |
| 89 | 89 | ||
| 90 | static inline void debug_spin_unlock(spinlock_t *lock) | 90 | static inline void debug_spin_unlock(raw_spinlock_t *lock) |
| 91 | { | 91 | { |
| 92 | SPIN_BUG_ON(lock->magic != SPINLOCK_MAGIC, lock, "bad magic"); | 92 | SPIN_BUG_ON(lock->magic != SPINLOCK_MAGIC, lock, "bad magic"); |
| 93 | SPIN_BUG_ON(!spin_is_locked(lock), lock, "already unlocked"); | 93 | SPIN_BUG_ON(!raw_spin_is_locked(lock), lock, "already unlocked"); |
| 94 | SPIN_BUG_ON(lock->owner != current, lock, "wrong owner"); | 94 | SPIN_BUG_ON(lock->owner != current, lock, "wrong owner"); |
| 95 | SPIN_BUG_ON(lock->owner_cpu != raw_smp_processor_id(), | 95 | SPIN_BUG_ON(lock->owner_cpu != raw_smp_processor_id(), |
| 96 | lock, "wrong CPU"); | 96 | lock, "wrong CPU"); |
| @@ -98,7 +98,7 @@ static inline void debug_spin_unlock(spinlock_t *lock) | |||
| 98 | lock->owner_cpu = -1; | 98 | lock->owner_cpu = -1; |
| 99 | } | 99 | } |
| 100 | 100 | ||
| 101 | static void __spin_lock_debug(spinlock_t *lock) | 101 | static void __spin_lock_debug(raw_spinlock_t *lock) |
| 102 | { | 102 | { |
| 103 | u64 i; | 103 | u64 i; |
| 104 | u64 loops = loops_per_jiffy * HZ; | 104 | u64 loops = loops_per_jiffy * HZ; |
| @@ -106,7 +106,7 @@ static void __spin_lock_debug(spinlock_t *lock) | |||
| 106 | 106 | ||
| 107 | for (;;) { | 107 | for (;;) { |
| 108 | for (i = 0; i < loops; i++) { | 108 | for (i = 0; i < loops; i++) { |
| 109 | if (__raw_spin_trylock(&lock->raw_lock)) | 109 | if (arch_spin_trylock(&lock->raw_lock)) |
| 110 | return; | 110 | return; |
| 111 | __delay(1); | 111 | __delay(1); |
| 112 | } | 112 | } |
| @@ -125,17 +125,17 @@ static void __spin_lock_debug(spinlock_t *lock) | |||
| 125 | } | 125 | } |
| 126 | } | 126 | } |
| 127 | 127 | ||
| 128 | void _raw_spin_lock(spinlock_t *lock) | 128 | void do_raw_spin_lock(raw_spinlock_t *lock) |
| 129 | { | 129 | { |
| 130 | debug_spin_lock_before(lock); | 130 | debug_spin_lock_before(lock); |
| 131 | if (unlikely(!__raw_spin_trylock(&lock->raw_lock))) | 131 | if (unlikely(!arch_spin_trylock(&lock->raw_lock))) |
| 132 | __spin_lock_debug(lock); | 132 | __spin_lock_debug(lock); |
| 133 | debug_spin_lock_after(lock); | 133 | debug_spin_lock_after(lock); |
| 134 | } | 134 | } |
| 135 | 135 | ||
| 136 | int _raw_spin_trylock(spinlock_t *lock) | 136 | int do_raw_spin_trylock(raw_spinlock_t *lock) |
| 137 | { | 137 | { |
| 138 | int ret = __raw_spin_trylock(&lock->raw_lock); | 138 | int ret = arch_spin_trylock(&lock->raw_lock); |
| 139 | 139 | ||
| 140 | if (ret) | 140 | if (ret) |
| 141 | debug_spin_lock_after(lock); | 141 | debug_spin_lock_after(lock); |
| @@ -148,10 +148,10 @@ int _raw_spin_trylock(spinlock_t *lock) | |||
| 148 | return ret; | 148 | return ret; |
| 149 | } | 149 | } |
| 150 | 150 | ||
| 151 | void _raw_spin_unlock(spinlock_t *lock) | 151 | void do_raw_spin_unlock(raw_spinlock_t *lock) |
| 152 | { | 152 | { |
| 153 | debug_spin_unlock(lock); | 153 | debug_spin_unlock(lock); |
| 154 | __raw_spin_unlock(&lock->raw_lock); | 154 | arch_spin_unlock(&lock->raw_lock); |
| 155 | } | 155 | } |
| 156 | 156 | ||
| 157 | static void rwlock_bug(rwlock_t *lock, const char *msg) | 157 | static void rwlock_bug(rwlock_t *lock, const char *msg) |
| @@ -176,7 +176,7 @@ static void __read_lock_debug(rwlock_t *lock) | |||
| 176 | 176 | ||
| 177 | for (;;) { | 177 | for (;;) { |
| 178 | for (i = 0; i < loops; i++) { | 178 | for (i = 0; i < loops; i++) { |
| 179 | if (__raw_read_trylock(&lock->raw_lock)) | 179 | if (arch_read_trylock(&lock->raw_lock)) |
| 180 | return; | 180 | return; |
| 181 | __delay(1); | 181 | __delay(1); |
| 182 | } | 182 | } |
| @@ -193,15 +193,15 @@ static void __read_lock_debug(rwlock_t *lock) | |||
| 193 | } | 193 | } |
| 194 | #endif | 194 | #endif |
| 195 | 195 | ||
| 196 | void _raw_read_lock(rwlock_t *lock) | 196 | void do_raw_read_lock(rwlock_t *lock) |
| 197 | { | 197 | { |
| 198 | RWLOCK_BUG_ON(lock->magic != RWLOCK_MAGIC, lock, "bad magic"); | 198 | RWLOCK_BUG_ON(lock->magic != RWLOCK_MAGIC, lock, "bad magic"); |
| 199 | __raw_read_lock(&lock->raw_lock); | 199 | arch_read_lock(&lock->raw_lock); |
| 200 | } | 200 | } |
| 201 | 201 | ||
| 202 | int _raw_read_trylock(rwlock_t *lock) | 202 | int do_raw_read_trylock(rwlock_t *lock) |
| 203 | { | 203 | { |
| 204 | int ret = __raw_read_trylock(&lock->raw_lock); | 204 | int ret = arch_read_trylock(&lock->raw_lock); |
| 205 | 205 | ||
| 206 | #ifndef CONFIG_SMP | 206 | #ifndef CONFIG_SMP |
| 207 | /* | 207 | /* |
| @@ -212,10 +212,10 @@ int _raw_read_trylock(rwlock_t *lock) | |||
| 212 | return ret; | 212 | return ret; |
| 213 | } | 213 | } |
| 214 | 214 | ||
| 215 | void _raw_read_unlock(rwlock_t *lock) | 215 | void do_raw_read_unlock(rwlock_t *lock) |
| 216 | { | 216 | { |
| 217 | RWLOCK_BUG_ON(lock->magic != RWLOCK_MAGIC, lock, "bad magic"); | 217 | RWLOCK_BUG_ON(lock->magic != RWLOCK_MAGIC, lock, "bad magic"); |
| 218 | __raw_read_unlock(&lock->raw_lock); | 218 | arch_read_unlock(&lock->raw_lock); |
| 219 | } | 219 | } |
| 220 | 220 | ||
| 221 | static inline void debug_write_lock_before(rwlock_t *lock) | 221 | static inline void debug_write_lock_before(rwlock_t *lock) |
| @@ -251,7 +251,7 @@ static void __write_lock_debug(rwlock_t *lock) | |||
| 251 | 251 | ||
| 252 | for (;;) { | 252 | for (;;) { |
| 253 | for (i = 0; i < loops; i++) { | 253 | for (i = 0; i < loops; i++) { |
| 254 | if (__raw_write_trylock(&lock->raw_lock)) | 254 | if (arch_write_trylock(&lock->raw_lock)) |
| 255 | return; | 255 | return; |
| 256 | __delay(1); | 256 | __delay(1); |
| 257 | } | 257 | } |
| @@ -268,16 +268,16 @@ static void __write_lock_debug(rwlock_t *lock) | |||
| 268 | } | 268 | } |
| 269 | #endif | 269 | #endif |
| 270 | 270 | ||
| 271 | void _raw_write_lock(rwlock_t *lock) | 271 | void do_raw_write_lock(rwlock_t *lock) |
| 272 | { | 272 | { |
| 273 | debug_write_lock_before(lock); | 273 | debug_write_lock_before(lock); |
| 274 | __raw_write_lock(&lock->raw_lock); | 274 | arch_write_lock(&lock->raw_lock); |
| 275 | debug_write_lock_after(lock); | 275 | debug_write_lock_after(lock); |
| 276 | } | 276 | } |
| 277 | 277 | ||
| 278 | int _raw_write_trylock(rwlock_t *lock) | 278 | int do_raw_write_trylock(rwlock_t *lock) |
| 279 | { | 279 | { |
| 280 | int ret = __raw_write_trylock(&lock->raw_lock); | 280 | int ret = arch_write_trylock(&lock->raw_lock); |
| 281 | 281 | ||
| 282 | if (ret) | 282 | if (ret) |
| 283 | debug_write_lock_after(lock); | 283 | debug_write_lock_after(lock); |
| @@ -290,8 +290,8 @@ int _raw_write_trylock(rwlock_t *lock) | |||
| 290 | return ret; | 290 | return ret; |
| 291 | } | 291 | } |
| 292 | 292 | ||
| 293 | void _raw_write_unlock(rwlock_t *lock) | 293 | void do_raw_write_unlock(rwlock_t *lock) |
| 294 | { | 294 | { |
| 295 | debug_write_unlock(lock); | 295 | debug_write_unlock(lock); |
| 296 | __raw_write_unlock(&lock->raw_lock); | 296 | arch_write_unlock(&lock->raw_lock); |
| 297 | } | 297 | } |
diff --git a/lib/string.c b/lib/string.c index e96421ab9a9a..f71bead1be3e 100644 --- a/lib/string.c +++ b/lib/string.c | |||
| @@ -36,25 +36,21 @@ int strnicmp(const char *s1, const char *s2, size_t len) | |||
| 36 | /* Yes, Virginia, it had better be unsigned */ | 36 | /* Yes, Virginia, it had better be unsigned */ |
| 37 | unsigned char c1, c2; | 37 | unsigned char c1, c2; |
| 38 | 38 | ||
| 39 | c1 = c2 = 0; | 39 | if (!len) |
| 40 | if (len) { | 40 | return 0; |
| 41 | do { | 41 | |
| 42 | c1 = *s1; | 42 | do { |
| 43 | c2 = *s2; | 43 | c1 = *s1++; |
| 44 | s1++; | 44 | c2 = *s2++; |
| 45 | s2++; | 45 | if (!c1 || !c2) |
| 46 | if (!c1) | 46 | break; |
| 47 | break; | 47 | if (c1 == c2) |
| 48 | if (!c2) | 48 | continue; |
| 49 | break; | 49 | c1 = tolower(c1); |
| 50 | if (c1 == c2) | 50 | c2 = tolower(c2); |
| 51 | continue; | 51 | if (c1 != c2) |
| 52 | c1 = tolower(c1); | 52 | break; |
| 53 | c2 = tolower(c2); | 53 | } while (--len); |
| 54 | if (c1 != c2) | ||
| 55 | break; | ||
| 56 | } while (--len); | ||
| 57 | } | ||
| 58 | return (int)c1 - (int)c2; | 54 | return (int)c1 - (int)c2; |
| 59 | } | 55 | } |
| 60 | EXPORT_SYMBOL(strnicmp); | 56 | EXPORT_SYMBOL(strnicmp); |
| @@ -338,20 +334,34 @@ EXPORT_SYMBOL(strnchr); | |||
| 338 | #endif | 334 | #endif |
| 339 | 335 | ||
| 340 | /** | 336 | /** |
| 341 | * strstrip - Removes leading and trailing whitespace from @s. | 337 | * skip_spaces - Removes leading whitespace from @str. |
| 338 | * @str: The string to be stripped. | ||
| 339 | * | ||
| 340 | * Returns a pointer to the first non-whitespace character in @str. | ||
| 341 | */ | ||
| 342 | char *skip_spaces(const char *str) | ||
| 343 | { | ||
| 344 | while (isspace(*str)) | ||
| 345 | ++str; | ||
| 346 | return (char *)str; | ||
| 347 | } | ||
| 348 | EXPORT_SYMBOL(skip_spaces); | ||
| 349 | |||
| 350 | /** | ||
| 351 | * strim - Removes leading and trailing whitespace from @s. | ||
| 342 | * @s: The string to be stripped. | 352 | * @s: The string to be stripped. |
| 343 | * | 353 | * |
| 344 | * Note that the first trailing whitespace is replaced with a %NUL-terminator | 354 | * Note that the first trailing whitespace is replaced with a %NUL-terminator |
| 345 | * in the given string @s. Returns a pointer to the first non-whitespace | 355 | * in the given string @s. Returns a pointer to the first non-whitespace |
| 346 | * character in @s. | 356 | * character in @s. |
| 347 | */ | 357 | */ |
| 348 | char *strstrip(char *s) | 358 | char *strim(char *s) |
| 349 | { | 359 | { |
| 350 | size_t size; | 360 | size_t size; |
| 351 | char *end; | 361 | char *end; |
| 352 | 362 | ||
| 363 | s = skip_spaces(s); | ||
| 353 | size = strlen(s); | 364 | size = strlen(s); |
| 354 | |||
| 355 | if (!size) | 365 | if (!size) |
| 356 | return s; | 366 | return s; |
| 357 | 367 | ||
| @@ -360,12 +370,9 @@ char *strstrip(char *s) | |||
| 360 | end--; | 370 | end--; |
| 361 | *(end + 1) = '\0'; | 371 | *(end + 1) = '\0'; |
| 362 | 372 | ||
| 363 | while (*s && isspace(*s)) | ||
| 364 | s++; | ||
| 365 | |||
| 366 | return s; | 373 | return s; |
| 367 | } | 374 | } |
| 368 | EXPORT_SYMBOL(strstrip); | 375 | EXPORT_SYMBOL(strim); |
| 369 | 376 | ||
| 370 | #ifndef __HAVE_ARCH_STRLEN | 377 | #ifndef __HAVE_ARCH_STRLEN |
| 371 | /** | 378 | /** |
| @@ -656,7 +663,7 @@ EXPORT_SYMBOL(memscan); | |||
| 656 | */ | 663 | */ |
| 657 | char *strstr(const char *s1, const char *s2) | 664 | char *strstr(const char *s1, const char *s2) |
| 658 | { | 665 | { |
| 659 | int l1, l2; | 666 | size_t l1, l2; |
| 660 | 667 | ||
| 661 | l2 = strlen(s2); | 668 | l2 = strlen(s2); |
| 662 | if (!l2) | 669 | if (!l2) |
| @@ -673,6 +680,31 @@ char *strstr(const char *s1, const char *s2) | |||
| 673 | EXPORT_SYMBOL(strstr); | 680 | EXPORT_SYMBOL(strstr); |
| 674 | #endif | 681 | #endif |
| 675 | 682 | ||
| 683 | #ifndef __HAVE_ARCH_STRNSTR | ||
| 684 | /** | ||
| 685 | * strnstr - Find the first substring in a length-limited string | ||
| 686 | * @s1: The string to be searched | ||
| 687 | * @s2: The string to search for | ||
| 688 | * @len: the maximum number of characters to search | ||
| 689 | */ | ||
| 690 | char *strnstr(const char *s1, const char *s2, size_t len) | ||
| 691 | { | ||
| 692 | size_t l2; | ||
| 693 | |||
| 694 | l2 = strlen(s2); | ||
| 695 | if (!l2) | ||
| 696 | return (char *)s1; | ||
| 697 | while (len >= l2) { | ||
| 698 | len--; | ||
| 699 | if (!memcmp(s1, s2, l2)) | ||
| 700 | return (char *)s1; | ||
| 701 | s1++; | ||
| 702 | } | ||
| 703 | return NULL; | ||
| 704 | } | ||
| 705 | EXPORT_SYMBOL(strnstr); | ||
| 706 | #endif | ||
| 707 | |||
| 676 | #ifndef __HAVE_ARCH_MEMCHR | 708 | #ifndef __HAVE_ARCH_MEMCHR |
| 677 | /** | 709 | /** |
| 678 | * memchr - Find a character in an area of memory. | 710 | * memchr - Find a character in an area of memory. |
diff --git a/lib/swiotlb.c b/lib/swiotlb.c index ac25cd28e807..437eedb5a53b 100644 --- a/lib/swiotlb.c +++ b/lib/swiotlb.c | |||
| @@ -97,6 +97,8 @@ static phys_addr_t *io_tlb_orig_addr; | |||
| 97 | */ | 97 | */ |
| 98 | static DEFINE_SPINLOCK(io_tlb_lock); | 98 | static DEFINE_SPINLOCK(io_tlb_lock); |
| 99 | 99 | ||
| 100 | static int late_alloc; | ||
| 101 | |||
| 100 | static int __init | 102 | static int __init |
| 101 | setup_io_tlb_npages(char *str) | 103 | setup_io_tlb_npages(char *str) |
| 102 | { | 104 | { |
| @@ -109,6 +111,7 @@ setup_io_tlb_npages(char *str) | |||
| 109 | ++str; | 111 | ++str; |
| 110 | if (!strcmp(str, "force")) | 112 | if (!strcmp(str, "force")) |
| 111 | swiotlb_force = 1; | 113 | swiotlb_force = 1; |
| 114 | |||
| 112 | return 1; | 115 | return 1; |
| 113 | } | 116 | } |
| 114 | __setup("swiotlb=", setup_io_tlb_npages); | 117 | __setup("swiotlb=", setup_io_tlb_npages); |
| @@ -121,8 +124,9 @@ static dma_addr_t swiotlb_virt_to_bus(struct device *hwdev, | |||
| 121 | return phys_to_dma(hwdev, virt_to_phys(address)); | 124 | return phys_to_dma(hwdev, virt_to_phys(address)); |
| 122 | } | 125 | } |
| 123 | 126 | ||
| 124 | static void swiotlb_print_info(unsigned long bytes) | 127 | void swiotlb_print_info(void) |
| 125 | { | 128 | { |
| 129 | unsigned long bytes = io_tlb_nslabs << IO_TLB_SHIFT; | ||
| 126 | phys_addr_t pstart, pend; | 130 | phys_addr_t pstart, pend; |
| 127 | 131 | ||
| 128 | pstart = virt_to_phys(io_tlb_start); | 132 | pstart = virt_to_phys(io_tlb_start); |
| @@ -140,7 +144,7 @@ static void swiotlb_print_info(unsigned long bytes) | |||
| 140 | * structures for the software IO TLB used to implement the DMA API. | 144 | * structures for the software IO TLB used to implement the DMA API. |
| 141 | */ | 145 | */ |
| 142 | void __init | 146 | void __init |
| 143 | swiotlb_init_with_default_size(size_t default_size) | 147 | swiotlb_init_with_default_size(size_t default_size, int verbose) |
| 144 | { | 148 | { |
| 145 | unsigned long i, bytes; | 149 | unsigned long i, bytes; |
| 146 | 150 | ||
| @@ -176,14 +180,14 @@ swiotlb_init_with_default_size(size_t default_size) | |||
| 176 | io_tlb_overflow_buffer = alloc_bootmem_low(io_tlb_overflow); | 180 | io_tlb_overflow_buffer = alloc_bootmem_low(io_tlb_overflow); |
| 177 | if (!io_tlb_overflow_buffer) | 181 | if (!io_tlb_overflow_buffer) |
| 178 | panic("Cannot allocate SWIOTLB overflow buffer!\n"); | 182 | panic("Cannot allocate SWIOTLB overflow buffer!\n"); |
| 179 | 183 | if (verbose) | |
| 180 | swiotlb_print_info(bytes); | 184 | swiotlb_print_info(); |
| 181 | } | 185 | } |
| 182 | 186 | ||
| 183 | void __init | 187 | void __init |
| 184 | swiotlb_init(void) | 188 | swiotlb_init(int verbose) |
| 185 | { | 189 | { |
| 186 | swiotlb_init_with_default_size(64 * (1<<20)); /* default to 64MB */ | 190 | swiotlb_init_with_default_size(64 * (1<<20), verbose); /* default to 64MB */ |
| 187 | } | 191 | } |
| 188 | 192 | ||
| 189 | /* | 193 | /* |
| @@ -260,7 +264,9 @@ swiotlb_late_init_with_default_size(size_t default_size) | |||
| 260 | if (!io_tlb_overflow_buffer) | 264 | if (!io_tlb_overflow_buffer) |
| 261 | goto cleanup4; | 265 | goto cleanup4; |
| 262 | 266 | ||
| 263 | swiotlb_print_info(bytes); | 267 | swiotlb_print_info(); |
| 268 | |||
| 269 | late_alloc = 1; | ||
| 264 | 270 | ||
| 265 | return 0; | 271 | return 0; |
| 266 | 272 | ||
| @@ -281,6 +287,32 @@ cleanup1: | |||
| 281 | return -ENOMEM; | 287 | return -ENOMEM; |
| 282 | } | 288 | } |
| 283 | 289 | ||
| 290 | void __init swiotlb_free(void) | ||
| 291 | { | ||
| 292 | if (!io_tlb_overflow_buffer) | ||
| 293 | return; | ||
| 294 | |||
| 295 | if (late_alloc) { | ||
| 296 | free_pages((unsigned long)io_tlb_overflow_buffer, | ||
| 297 | get_order(io_tlb_overflow)); | ||
| 298 | free_pages((unsigned long)io_tlb_orig_addr, | ||
| 299 | get_order(io_tlb_nslabs * sizeof(phys_addr_t))); | ||
| 300 | free_pages((unsigned long)io_tlb_list, get_order(io_tlb_nslabs * | ||
| 301 | sizeof(int))); | ||
| 302 | free_pages((unsigned long)io_tlb_start, | ||
| 303 | get_order(io_tlb_nslabs << IO_TLB_SHIFT)); | ||
| 304 | } else { | ||
| 305 | free_bootmem_late(__pa(io_tlb_overflow_buffer), | ||
| 306 | io_tlb_overflow); | ||
| 307 | free_bootmem_late(__pa(io_tlb_orig_addr), | ||
| 308 | io_tlb_nslabs * sizeof(phys_addr_t)); | ||
| 309 | free_bootmem_late(__pa(io_tlb_list), | ||
| 310 | io_tlb_nslabs * sizeof(int)); | ||
| 311 | free_bootmem_late(__pa(io_tlb_start), | ||
| 312 | io_tlb_nslabs << IO_TLB_SHIFT); | ||
| 313 | } | ||
| 314 | } | ||
| 315 | |||
| 284 | static int is_swiotlb_buffer(phys_addr_t paddr) | 316 | static int is_swiotlb_buffer(phys_addr_t paddr) |
| 285 | { | 317 | { |
| 286 | return paddr >= virt_to_phys(io_tlb_start) && | 318 | return paddr >= virt_to_phys(io_tlb_start) && |
| @@ -453,7 +485,7 @@ do_unmap_single(struct device *hwdev, char *dma_addr, size_t size, int dir) | |||
| 453 | 485 | ||
| 454 | /* | 486 | /* |
| 455 | * Return the buffer to the free list by setting the corresponding | 487 | * Return the buffer to the free list by setting the corresponding |
| 456 | * entries to indicate the number of contigous entries available. | 488 | * entries to indicate the number of contiguous entries available. |
| 457 | * While returning the entries to the free list, we merge the entries | 489 | * While returning the entries to the free list, we merge the entries |
| 458 | * with slots below and above the pool being returned. | 490 | * with slots below and above the pool being returned. |
| 459 | */ | 491 | */ |
| @@ -517,7 +549,7 @@ swiotlb_alloc_coherent(struct device *hwdev, size_t size, | |||
| 517 | dma_mask = hwdev->coherent_dma_mask; | 549 | dma_mask = hwdev->coherent_dma_mask; |
| 518 | 550 | ||
| 519 | ret = (void *)__get_free_pages(flags, order); | 551 | ret = (void *)__get_free_pages(flags, order); |
| 520 | if (ret && swiotlb_virt_to_bus(hwdev, ret) + size > dma_mask) { | 552 | if (ret && swiotlb_virt_to_bus(hwdev, ret) + size - 1 > dma_mask) { |
| 521 | /* | 553 | /* |
| 522 | * The allocated memory isn't reachable by the device. | 554 | * The allocated memory isn't reachable by the device. |
| 523 | */ | 555 | */ |
| @@ -539,7 +571,7 @@ swiotlb_alloc_coherent(struct device *hwdev, size_t size, | |||
| 539 | dev_addr = swiotlb_virt_to_bus(hwdev, ret); | 571 | dev_addr = swiotlb_virt_to_bus(hwdev, ret); |
| 540 | 572 | ||
| 541 | /* Confirm address can be DMA'd by device */ | 573 | /* Confirm address can be DMA'd by device */ |
| 542 | if (dev_addr + size > dma_mask) { | 574 | if (dev_addr + size - 1 > dma_mask) { |
| 543 | printk("hwdev DMA mask = 0x%016Lx, dev_addr = 0x%016Lx\n", | 575 | printk("hwdev DMA mask = 0x%016Lx, dev_addr = 0x%016Lx\n", |
| 544 | (unsigned long long)dma_mask, | 576 | (unsigned long long)dma_mask, |
| 545 | (unsigned long long)dev_addr); | 577 | (unsigned long long)dev_addr); |
diff --git a/lib/vsprintf.c b/lib/vsprintf.c index 33bed5e67a21..af4aaa6c36f3 100644 --- a/lib/vsprintf.c +++ b/lib/vsprintf.c | |||
| @@ -9,7 +9,7 @@ | |||
| 9 | * Wirzenius wrote this portably, Torvalds fucked it up :-) | 9 | * Wirzenius wrote this portably, Torvalds fucked it up :-) |
| 10 | */ | 10 | */ |
| 11 | 11 | ||
| 12 | /* | 12 | /* |
| 13 | * Fri Jul 13 2001 Crutcher Dunnavant <crutcher+kernel@datastacks.com> | 13 | * Fri Jul 13 2001 Crutcher Dunnavant <crutcher+kernel@datastacks.com> |
| 14 | * - changed to provide snprintf and vsnprintf functions | 14 | * - changed to provide snprintf and vsnprintf functions |
| 15 | * So Feb 1 16:51:32 CET 2004 Juergen Quade <quade@hsnr.de> | 15 | * So Feb 1 16:51:32 CET 2004 Juergen Quade <quade@hsnr.de> |
| @@ -47,14 +47,14 @@ static unsigned int simple_guess_base(const char *cp) | |||
| 47 | } | 47 | } |
| 48 | 48 | ||
| 49 | /** | 49 | /** |
| 50 | * simple_strtoul - convert a string to an unsigned long | 50 | * simple_strtoull - convert a string to an unsigned long long |
| 51 | * @cp: The start of the string | 51 | * @cp: The start of the string |
| 52 | * @endp: A pointer to the end of the parsed string will be placed here | 52 | * @endp: A pointer to the end of the parsed string will be placed here |
| 53 | * @base: The number base to use | 53 | * @base: The number base to use |
| 54 | */ | 54 | */ |
| 55 | unsigned long simple_strtoul(const char *cp, char **endp, unsigned int base) | 55 | unsigned long long simple_strtoull(const char *cp, char **endp, unsigned int base) |
| 56 | { | 56 | { |
| 57 | unsigned long result = 0; | 57 | unsigned long long result = 0; |
| 58 | 58 | ||
| 59 | if (!base) | 59 | if (!base) |
| 60 | base = simple_guess_base(cp); | 60 | base = simple_guess_base(cp); |
| @@ -71,58 +71,39 @@ unsigned long simple_strtoul(const char *cp, char **endp, unsigned int base) | |||
| 71 | result = result * base + value; | 71 | result = result * base + value; |
| 72 | cp++; | 72 | cp++; |
| 73 | } | 73 | } |
| 74 | |||
| 75 | if (endp) | 74 | if (endp) |
| 76 | *endp = (char *)cp; | 75 | *endp = (char *)cp; |
| 76 | |||
| 77 | return result; | 77 | return result; |
| 78 | } | 78 | } |
| 79 | EXPORT_SYMBOL(simple_strtoul); | 79 | EXPORT_SYMBOL(simple_strtoull); |
| 80 | 80 | ||
| 81 | /** | 81 | /** |
| 82 | * simple_strtol - convert a string to a signed long | 82 | * simple_strtoul - convert a string to an unsigned long |
| 83 | * @cp: The start of the string | 83 | * @cp: The start of the string |
| 84 | * @endp: A pointer to the end of the parsed string will be placed here | 84 | * @endp: A pointer to the end of the parsed string will be placed here |
| 85 | * @base: The number base to use | 85 | * @base: The number base to use |
| 86 | */ | 86 | */ |
| 87 | long simple_strtol(const char *cp, char **endp, unsigned int base) | 87 | unsigned long simple_strtoul(const char *cp, char **endp, unsigned int base) |
| 88 | { | 88 | { |
| 89 | if(*cp == '-') | 89 | return simple_strtoull(cp, endp, base); |
| 90 | return -simple_strtoul(cp + 1, endp, base); | ||
| 91 | return simple_strtoul(cp, endp, base); | ||
| 92 | } | 90 | } |
| 93 | EXPORT_SYMBOL(simple_strtol); | 91 | EXPORT_SYMBOL(simple_strtoul); |
| 94 | 92 | ||
| 95 | /** | 93 | /** |
| 96 | * simple_strtoull - convert a string to an unsigned long long | 94 | * simple_strtol - convert a string to a signed long |
| 97 | * @cp: The start of the string | 95 | * @cp: The start of the string |
| 98 | * @endp: A pointer to the end of the parsed string will be placed here | 96 | * @endp: A pointer to the end of the parsed string will be placed here |
| 99 | * @base: The number base to use | 97 | * @base: The number base to use |
| 100 | */ | 98 | */ |
| 101 | unsigned long long simple_strtoull(const char *cp, char **endp, unsigned int base) | 99 | long simple_strtol(const char *cp, char **endp, unsigned int base) |
| 102 | { | 100 | { |
| 103 | unsigned long long result = 0; | 101 | if (*cp == '-') |
| 104 | 102 | return -simple_strtoul(cp + 1, endp, base); | |
| 105 | if (!base) | ||
| 106 | base = simple_guess_base(cp); | ||
| 107 | |||
| 108 | if (base == 16 && cp[0] == '0' && TOLOWER(cp[1]) == 'x') | ||
| 109 | cp += 2; | ||
| 110 | |||
| 111 | while (isxdigit(*cp)) { | ||
| 112 | unsigned int value; | ||
| 113 | |||
| 114 | value = isdigit(*cp) ? *cp - '0' : TOLOWER(*cp) - 'a' + 10; | ||
| 115 | if (value >= base) | ||
| 116 | break; | ||
| 117 | result = result * base + value; | ||
| 118 | cp++; | ||
| 119 | } | ||
| 120 | 103 | ||
| 121 | if (endp) | 104 | return simple_strtoul(cp, endp, base); |
| 122 | *endp = (char *)cp; | ||
| 123 | return result; | ||
| 124 | } | 105 | } |
| 125 | EXPORT_SYMBOL(simple_strtoull); | 106 | EXPORT_SYMBOL(simple_strtol); |
| 126 | 107 | ||
| 127 | /** | 108 | /** |
| 128 | * simple_strtoll - convert a string to a signed long long | 109 | * simple_strtoll - convert a string to a signed long long |
| @@ -132,8 +113,9 @@ EXPORT_SYMBOL(simple_strtoull); | |||
| 132 | */ | 113 | */ |
| 133 | long long simple_strtoll(const char *cp, char **endp, unsigned int base) | 114 | long long simple_strtoll(const char *cp, char **endp, unsigned int base) |
| 134 | { | 115 | { |
| 135 | if(*cp=='-') | 116 | if (*cp == '-') |
| 136 | return -simple_strtoull(cp + 1, endp, base); | 117 | return -simple_strtoull(cp + 1, endp, base); |
| 118 | |||
| 137 | return simple_strtoull(cp, endp, base); | 119 | return simple_strtoull(cp, endp, base); |
| 138 | } | 120 | } |
| 139 | 121 | ||
| @@ -173,6 +155,7 @@ int strict_strtoul(const char *cp, unsigned int base, unsigned long *res) | |||
| 173 | val = simple_strtoul(cp, &tail, base); | 155 | val = simple_strtoul(cp, &tail, base); |
| 174 | if (tail == cp) | 156 | if (tail == cp) |
| 175 | return -EINVAL; | 157 | return -EINVAL; |
| 158 | |||
| 176 | if ((*tail == '\0') || | 159 | if ((*tail == '\0') || |
| 177 | ((len == (size_t)(tail - cp) + 1) && (*tail == '\n'))) { | 160 | ((len == (size_t)(tail - cp) + 1) && (*tail == '\n'))) { |
| 178 | *res = val; | 161 | *res = val; |
| @@ -285,10 +268,11 @@ EXPORT_SYMBOL(strict_strtoll); | |||
| 285 | 268 | ||
| 286 | static int skip_atoi(const char **s) | 269 | static int skip_atoi(const char **s) |
| 287 | { | 270 | { |
| 288 | int i=0; | 271 | int i = 0; |
| 289 | 272 | ||
| 290 | while (isdigit(**s)) | 273 | while (isdigit(**s)) |
| 291 | i = i*10 + *((*s)++) - '0'; | 274 | i = i*10 + *((*s)++) - '0'; |
| 275 | |||
| 292 | return i; | 276 | return i; |
| 293 | } | 277 | } |
| 294 | 278 | ||
| @@ -302,7 +286,7 @@ static int skip_atoi(const char **s) | |||
| 302 | /* Formats correctly any integer in [0,99999]. | 286 | /* Formats correctly any integer in [0,99999]. |
| 303 | * Outputs from one to five digits depending on input. | 287 | * Outputs from one to five digits depending on input. |
| 304 | * On i386 gcc 4.1.2 -O2: ~250 bytes of code. */ | 288 | * On i386 gcc 4.1.2 -O2: ~250 bytes of code. */ |
| 305 | static char* put_dec_trunc(char *buf, unsigned q) | 289 | static char *put_dec_trunc(char *buf, unsigned q) |
| 306 | { | 290 | { |
| 307 | unsigned d3, d2, d1, d0; | 291 | unsigned d3, d2, d1, d0; |
| 308 | d1 = (q>>4) & 0xf; | 292 | d1 = (q>>4) & 0xf; |
| @@ -331,14 +315,15 @@ static char* put_dec_trunc(char *buf, unsigned q) | |||
| 331 | d3 = d3 - 10*q; | 315 | d3 = d3 - 10*q; |
| 332 | *buf++ = d3 + '0'; /* next digit */ | 316 | *buf++ = d3 + '0'; /* next digit */ |
| 333 | if (q != 0) | 317 | if (q != 0) |
| 334 | *buf++ = q + '0'; /* most sign. digit */ | 318 | *buf++ = q + '0'; /* most sign. digit */ |
| 335 | } | 319 | } |
| 336 | } | 320 | } |
| 337 | } | 321 | } |
| 322 | |||
| 338 | return buf; | 323 | return buf; |
| 339 | } | 324 | } |
| 340 | /* Same with if's removed. Always emits five digits */ | 325 | /* Same with if's removed. Always emits five digits */ |
| 341 | static char* put_dec_full(char *buf, unsigned q) | 326 | static char *put_dec_full(char *buf, unsigned q) |
| 342 | { | 327 | { |
| 343 | /* BTW, if q is in [0,9999], 8-bit ints will be enough, */ | 328 | /* BTW, if q is in [0,9999], 8-bit ints will be enough, */ |
| 344 | /* but anyway, gcc produces better code with full-sized ints */ | 329 | /* but anyway, gcc produces better code with full-sized ints */ |
| @@ -347,14 +332,15 @@ static char* put_dec_full(char *buf, unsigned q) | |||
| 347 | d2 = (q>>8) & 0xf; | 332 | d2 = (q>>8) & 0xf; |
| 348 | d3 = (q>>12); | 333 | d3 = (q>>12); |
| 349 | 334 | ||
| 350 | /* Possible ways to approx. divide by 10 */ | 335 | /* |
| 351 | /* gcc -O2 replaces multiply with shifts and adds */ | 336 | * Possible ways to approx. divide by 10 |
| 352 | // (x * 0xcd) >> 11: 11001101 - shorter code than * 0x67 (on i386) | 337 | * gcc -O2 replaces multiply with shifts and adds |
| 353 | // (x * 0x67) >> 10: 1100111 | 338 | * (x * 0xcd) >> 11: 11001101 - shorter code than * 0x67 (on i386) |
| 354 | // (x * 0x34) >> 9: 110100 - same | 339 | * (x * 0x67) >> 10: 1100111 |
| 355 | // (x * 0x1a) >> 8: 11010 - same | 340 | * (x * 0x34) >> 9: 110100 - same |
| 356 | // (x * 0x0d) >> 7: 1101 - same, shortest code (on i386) | 341 | * (x * 0x1a) >> 8: 11010 - same |
| 357 | 342 | * (x * 0x0d) >> 7: 1101 - same, shortest code (on i386) | |
| 343 | */ | ||
| 358 | d0 = 6*(d3 + d2 + d1) + (q & 0xf); | 344 | d0 = 6*(d3 + d2 + d1) + (q & 0xf); |
| 359 | q = (d0 * 0xcd) >> 11; | 345 | q = (d0 * 0xcd) >> 11; |
| 360 | d0 = d0 - 10*q; | 346 | d0 = d0 - 10*q; |
| @@ -375,10 +361,11 @@ static char* put_dec_full(char *buf, unsigned q) | |||
| 375 | d3 = d3 - 10*q; | 361 | d3 = d3 - 10*q; |
| 376 | *buf++ = d3 + '0'; | 362 | *buf++ = d3 + '0'; |
| 377 | *buf++ = q + '0'; | 363 | *buf++ = q + '0'; |
| 364 | |||
| 378 | return buf; | 365 | return buf; |
| 379 | } | 366 | } |
| 380 | /* No inlining helps gcc to use registers better */ | 367 | /* No inlining helps gcc to use registers better */ |
| 381 | static noinline char* put_dec(char *buf, unsigned long long num) | 368 | static noinline char *put_dec(char *buf, unsigned long long num) |
| 382 | { | 369 | { |
| 383 | while (1) { | 370 | while (1) { |
| 384 | unsigned rem; | 371 | unsigned rem; |
| @@ -448,9 +435,9 @@ static char *number(char *buf, char *end, unsigned long long num, | |||
| 448 | spec.flags &= ~ZEROPAD; | 435 | spec.flags &= ~ZEROPAD; |
| 449 | sign = 0; | 436 | sign = 0; |
| 450 | if (spec.flags & SIGN) { | 437 | if (spec.flags & SIGN) { |
| 451 | if ((signed long long) num < 0) { | 438 | if ((signed long long)num < 0) { |
| 452 | sign = '-'; | 439 | sign = '-'; |
| 453 | num = - (signed long long) num; | 440 | num = -(signed long long)num; |
| 454 | spec.field_width--; | 441 | spec.field_width--; |
| 455 | } else if (spec.flags & PLUS) { | 442 | } else if (spec.flags & PLUS) { |
| 456 | sign = '+'; | 443 | sign = '+'; |
| @@ -478,7 +465,9 @@ static char *number(char *buf, char *end, unsigned long long num, | |||
| 478 | else if (spec.base != 10) { /* 8 or 16 */ | 465 | else if (spec.base != 10) { /* 8 or 16 */ |
| 479 | int mask = spec.base - 1; | 466 | int mask = spec.base - 1; |
| 480 | int shift = 3; | 467 | int shift = 3; |
| 481 | if (spec.base == 16) shift = 4; | 468 | |
| 469 | if (spec.base == 16) | ||
| 470 | shift = 4; | ||
| 482 | do { | 471 | do { |
| 483 | tmp[i++] = (digits[((unsigned char)num) & mask] | locase); | 472 | tmp[i++] = (digits[((unsigned char)num) & mask] | locase); |
| 484 | num >>= shift; | 473 | num >>= shift; |
| @@ -493,7 +482,7 @@ static char *number(char *buf, char *end, unsigned long long num, | |||
| 493 | /* leading space padding */ | 482 | /* leading space padding */ |
| 494 | spec.field_width -= spec.precision; | 483 | spec.field_width -= spec.precision; |
| 495 | if (!(spec.flags & (ZEROPAD+LEFT))) { | 484 | if (!(spec.flags & (ZEROPAD+LEFT))) { |
| 496 | while(--spec.field_width >= 0) { | 485 | while (--spec.field_width >= 0) { |
| 497 | if (buf < end) | 486 | if (buf < end) |
| 498 | *buf = ' '; | 487 | *buf = ' '; |
| 499 | ++buf; | 488 | ++buf; |
| @@ -543,15 +532,16 @@ static char *number(char *buf, char *end, unsigned long long num, | |||
| 543 | *buf = ' '; | 532 | *buf = ' '; |
| 544 | ++buf; | 533 | ++buf; |
| 545 | } | 534 | } |
| 535 | |||
| 546 | return buf; | 536 | return buf; |
| 547 | } | 537 | } |
| 548 | 538 | ||
| 549 | static char *string(char *buf, char *end, char *s, struct printf_spec spec) | 539 | static char *string(char *buf, char *end, const char *s, struct printf_spec spec) |
| 550 | { | 540 | { |
| 551 | int len, i; | 541 | int len, i; |
| 552 | 542 | ||
| 553 | if ((unsigned long)s < PAGE_SIZE) | 543 | if ((unsigned long)s < PAGE_SIZE) |
| 554 | s = "<NULL>"; | 544 | s = "(null)"; |
| 555 | 545 | ||
| 556 | len = strnlen(s, spec.precision); | 546 | len = strnlen(s, spec.precision); |
| 557 | 547 | ||
| @@ -572,6 +562,7 @@ static char *string(char *buf, char *end, char *s, struct printf_spec spec) | |||
| 572 | *buf = ' '; | 562 | *buf = ' '; |
| 573 | ++buf; | 563 | ++buf; |
| 574 | } | 564 | } |
| 565 | |||
| 575 | return buf; | 566 | return buf; |
| 576 | } | 567 | } |
| 577 | 568 | ||
| @@ -585,47 +576,101 @@ static char *symbol_string(char *buf, char *end, void *ptr, | |||
| 585 | sprint_symbol(sym, value); | 576 | sprint_symbol(sym, value); |
| 586 | else | 577 | else |
| 587 | kallsyms_lookup(value, NULL, NULL, NULL, sym); | 578 | kallsyms_lookup(value, NULL, NULL, NULL, sym); |
| 579 | |||
| 588 | return string(buf, end, sym, spec); | 580 | return string(buf, end, sym, spec); |
| 589 | #else | 581 | #else |
| 590 | spec.field_width = 2*sizeof(void *); | 582 | spec.field_width = 2 * sizeof(void *); |
| 591 | spec.flags |= SPECIAL | SMALL | ZEROPAD; | 583 | spec.flags |= SPECIAL | SMALL | ZEROPAD; |
| 592 | spec.base = 16; | 584 | spec.base = 16; |
| 585 | |||
| 593 | return number(buf, end, value, spec); | 586 | return number(buf, end, value, spec); |
| 594 | #endif | 587 | #endif |
| 595 | } | 588 | } |
| 596 | 589 | ||
| 597 | static char *resource_string(char *buf, char *end, struct resource *res, | 590 | static char *resource_string(char *buf, char *end, struct resource *res, |
| 598 | struct printf_spec spec) | 591 | struct printf_spec spec, const char *fmt) |
| 599 | { | 592 | { |
| 600 | #ifndef IO_RSRC_PRINTK_SIZE | 593 | #ifndef IO_RSRC_PRINTK_SIZE |
| 601 | #define IO_RSRC_PRINTK_SIZE 4 | 594 | #define IO_RSRC_PRINTK_SIZE 6 |
| 602 | #endif | 595 | #endif |
| 603 | 596 | ||
| 604 | #ifndef MEM_RSRC_PRINTK_SIZE | 597 | #ifndef MEM_RSRC_PRINTK_SIZE |
| 605 | #define MEM_RSRC_PRINTK_SIZE 8 | 598 | #define MEM_RSRC_PRINTK_SIZE 10 |
| 606 | #endif | 599 | #endif |
| 607 | struct printf_spec num_spec = { | 600 | struct printf_spec hex_spec = { |
| 608 | .base = 16, | 601 | .base = 16, |
| 609 | .precision = -1, | 602 | .precision = -1, |
| 610 | .flags = SPECIAL | SMALL | ZEROPAD, | 603 | .flags = SPECIAL | SMALL | ZEROPAD, |
| 611 | }; | 604 | }; |
| 612 | /* room for the actual numbers, the two "0x", -, [, ] and the final zero */ | 605 | struct printf_spec dec_spec = { |
| 613 | char sym[4*sizeof(resource_size_t) + 8]; | 606 | .base = 10, |
| 607 | .precision = -1, | ||
| 608 | .flags = 0, | ||
| 609 | }; | ||
| 610 | struct printf_spec str_spec = { | ||
| 611 | .field_width = -1, | ||
| 612 | .precision = 10, | ||
| 613 | .flags = LEFT, | ||
| 614 | }; | ||
| 615 | struct printf_spec flag_spec = { | ||
| 616 | .base = 16, | ||
| 617 | .precision = -1, | ||
| 618 | .flags = SPECIAL | SMALL, | ||
| 619 | }; | ||
| 620 | |||
| 621 | /* 32-bit res (sizeof==4): 10 chars in dec, 10 in hex ("0x" + 8) | ||
| 622 | * 64-bit res (sizeof==8): 20 chars in dec, 18 in hex ("0x" + 16) */ | ||
| 623 | #define RSRC_BUF_SIZE ((2 * sizeof(resource_size_t)) + 4) | ||
| 624 | #define FLAG_BUF_SIZE (2 * sizeof(res->flags)) | ||
| 625 | #define DECODED_BUF_SIZE sizeof("[mem - 64bit pref disabled]") | ||
| 626 | #define RAW_BUF_SIZE sizeof("[mem - flags 0x]") | ||
| 627 | char sym[max(2*RSRC_BUF_SIZE + DECODED_BUF_SIZE, | ||
| 628 | 2*RSRC_BUF_SIZE + FLAG_BUF_SIZE + RAW_BUF_SIZE)]; | ||
| 629 | |||
| 614 | char *p = sym, *pend = sym + sizeof(sym); | 630 | char *p = sym, *pend = sym + sizeof(sym); |
| 615 | int size = -1; | 631 | int size = -1, addr = 0; |
| 632 | int decode = (fmt[0] == 'R') ? 1 : 0; | ||
| 616 | 633 | ||
| 617 | if (res->flags & IORESOURCE_IO) | 634 | if (res->flags & IORESOURCE_IO) { |
| 618 | size = IO_RSRC_PRINTK_SIZE; | 635 | size = IO_RSRC_PRINTK_SIZE; |
| 619 | else if (res->flags & IORESOURCE_MEM) | 636 | addr = 1; |
| 637 | } else if (res->flags & IORESOURCE_MEM) { | ||
| 620 | size = MEM_RSRC_PRINTK_SIZE; | 638 | size = MEM_RSRC_PRINTK_SIZE; |
| 639 | addr = 1; | ||
| 640 | } | ||
| 621 | 641 | ||
| 622 | *p++ = '['; | 642 | *p++ = '['; |
| 623 | num_spec.field_width = size; | 643 | if (res->flags & IORESOURCE_IO) |
| 624 | p = number(p, pend, res->start, num_spec); | 644 | p = string(p, pend, "io ", str_spec); |
| 625 | *p++ = '-'; | 645 | else if (res->flags & IORESOURCE_MEM) |
| 626 | p = number(p, pend, res->end, num_spec); | 646 | p = string(p, pend, "mem ", str_spec); |
| 647 | else if (res->flags & IORESOURCE_IRQ) | ||
| 648 | p = string(p, pend, "irq ", str_spec); | ||
| 649 | else if (res->flags & IORESOURCE_DMA) | ||
| 650 | p = string(p, pend, "dma ", str_spec); | ||
| 651 | else { | ||
| 652 | p = string(p, pend, "??? ", str_spec); | ||
| 653 | decode = 0; | ||
| 654 | } | ||
| 655 | hex_spec.field_width = size; | ||
| 656 | p = number(p, pend, res->start, addr ? hex_spec : dec_spec); | ||
| 657 | if (res->start != res->end) { | ||
| 658 | *p++ = '-'; | ||
| 659 | p = number(p, pend, res->end, addr ? hex_spec : dec_spec); | ||
| 660 | } | ||
| 661 | if (decode) { | ||
| 662 | if (res->flags & IORESOURCE_MEM_64) | ||
| 663 | p = string(p, pend, " 64bit", str_spec); | ||
| 664 | if (res->flags & IORESOURCE_PREFETCH) | ||
| 665 | p = string(p, pend, " pref", str_spec); | ||
| 666 | if (res->flags & IORESOURCE_DISABLED) | ||
| 667 | p = string(p, pend, " disabled", str_spec); | ||
| 668 | } else { | ||
| 669 | p = string(p, pend, " flags ", str_spec); | ||
| 670 | p = number(p, pend, res->flags, flag_spec); | ||
| 671 | } | ||
| 627 | *p++ = ']'; | 672 | *p++ = ']'; |
| 628 | *p = 0; | 673 | *p = '\0'; |
| 629 | 674 | ||
| 630 | return string(buf, end, sym, spec); | 675 | return string(buf, end, sym, spec); |
| 631 | } | 676 | } |
| @@ -636,24 +681,55 @@ static char *mac_address_string(char *buf, char *end, u8 *addr, | |||
| 636 | char mac_addr[sizeof("xx:xx:xx:xx:xx:xx")]; | 681 | char mac_addr[sizeof("xx:xx:xx:xx:xx:xx")]; |
| 637 | char *p = mac_addr; | 682 | char *p = mac_addr; |
| 638 | int i; | 683 | int i; |
| 684 | char separator; | ||
| 685 | |||
| 686 | if (fmt[1] == 'F') { /* FDDI canonical format */ | ||
| 687 | separator = '-'; | ||
| 688 | } else { | ||
| 689 | separator = ':'; | ||
| 690 | } | ||
| 639 | 691 | ||
| 640 | for (i = 0; i < 6; i++) { | 692 | for (i = 0; i < 6; i++) { |
| 641 | p = pack_hex_byte(p, addr[i]); | 693 | p = pack_hex_byte(p, addr[i]); |
| 642 | if (fmt[0] == 'M' && i != 5) | 694 | if (fmt[0] == 'M' && i != 5) |
| 643 | *p++ = ':'; | 695 | *p++ = separator; |
| 644 | } | 696 | } |
| 645 | *p = '\0'; | 697 | *p = '\0'; |
| 646 | 698 | ||
| 647 | return string(buf, end, mac_addr, spec); | 699 | return string(buf, end, mac_addr, spec); |
| 648 | } | 700 | } |
| 649 | 701 | ||
| 650 | static char *ip4_string(char *p, const u8 *addr, bool leading_zeros) | 702 | static char *ip4_string(char *p, const u8 *addr, const char *fmt) |
| 651 | { | 703 | { |
| 652 | int i; | 704 | int i; |
| 653 | 705 | bool leading_zeros = (fmt[0] == 'i'); | |
| 706 | int index; | ||
| 707 | int step; | ||
| 708 | |||
| 709 | switch (fmt[2]) { | ||
| 710 | case 'h': | ||
| 711 | #ifdef __BIG_ENDIAN | ||
| 712 | index = 0; | ||
| 713 | step = 1; | ||
| 714 | #else | ||
| 715 | index = 3; | ||
| 716 | step = -1; | ||
| 717 | #endif | ||
| 718 | break; | ||
| 719 | case 'l': | ||
| 720 | index = 3; | ||
| 721 | step = -1; | ||
| 722 | break; | ||
| 723 | case 'n': | ||
| 724 | case 'b': | ||
| 725 | default: | ||
| 726 | index = 0; | ||
| 727 | step = 1; | ||
| 728 | break; | ||
| 729 | } | ||
| 654 | for (i = 0; i < 4; i++) { | 730 | for (i = 0; i < 4; i++) { |
| 655 | char temp[3]; /* hold each IP quad in reverse order */ | 731 | char temp[3]; /* hold each IP quad in reverse order */ |
| 656 | int digits = put_dec_trunc(temp, addr[i]) - temp; | 732 | int digits = put_dec_trunc(temp, addr[index]) - temp; |
| 657 | if (leading_zeros) { | 733 | if (leading_zeros) { |
| 658 | if (digits < 3) | 734 | if (digits < 3) |
| 659 | *p++ = '0'; | 735 | *p++ = '0'; |
| @@ -665,23 +741,21 @@ static char *ip4_string(char *p, const u8 *addr, bool leading_zeros) | |||
| 665 | *p++ = temp[digits]; | 741 | *p++ = temp[digits]; |
| 666 | if (i < 3) | 742 | if (i < 3) |
| 667 | *p++ = '.'; | 743 | *p++ = '.'; |
| 744 | index += step; | ||
| 668 | } | 745 | } |
| 669 | |||
| 670 | *p = '\0'; | 746 | *p = '\0'; |
| 747 | |||
| 671 | return p; | 748 | return p; |
| 672 | } | 749 | } |
| 673 | 750 | ||
| 674 | static char *ip6_compressed_string(char *p, const char *addr) | 751 | static char *ip6_compressed_string(char *p, const char *addr) |
| 675 | { | 752 | { |
| 676 | int i; | 753 | int i, j, range; |
| 677 | int j; | ||
| 678 | int range; | ||
| 679 | unsigned char zerolength[8]; | 754 | unsigned char zerolength[8]; |
| 680 | int longest = 1; | 755 | int longest = 1; |
| 681 | int colonpos = -1; | 756 | int colonpos = -1; |
| 682 | u16 word; | 757 | u16 word; |
| 683 | u8 hi; | 758 | u8 hi, lo; |
| 684 | u8 lo; | ||
| 685 | bool needcolon = false; | 759 | bool needcolon = false; |
| 686 | bool useIPv4; | 760 | bool useIPv4; |
| 687 | struct in6_addr in6; | 761 | struct in6_addr in6; |
| @@ -735,8 +809,9 @@ static char *ip6_compressed_string(char *p, const char *addr) | |||
| 735 | p = pack_hex_byte(p, hi); | 809 | p = pack_hex_byte(p, hi); |
| 736 | else | 810 | else |
| 737 | *p++ = hex_asc_lo(hi); | 811 | *p++ = hex_asc_lo(hi); |
| 812 | p = pack_hex_byte(p, lo); | ||
| 738 | } | 813 | } |
| 739 | if (hi || lo > 0x0f) | 814 | else if (lo > 0x0f) |
| 740 | p = pack_hex_byte(p, lo); | 815 | p = pack_hex_byte(p, lo); |
| 741 | else | 816 | else |
| 742 | *p++ = hex_asc_lo(lo); | 817 | *p++ = hex_asc_lo(lo); |
| @@ -746,24 +821,25 @@ static char *ip6_compressed_string(char *p, const char *addr) | |||
| 746 | if (useIPv4) { | 821 | if (useIPv4) { |
| 747 | if (needcolon) | 822 | if (needcolon) |
| 748 | *p++ = ':'; | 823 | *p++ = ':'; |
| 749 | p = ip4_string(p, &in6.s6_addr[12], false); | 824 | p = ip4_string(p, &in6.s6_addr[12], "I4"); |
| 750 | } | 825 | } |
| 751 | |||
| 752 | *p = '\0'; | 826 | *p = '\0'; |
| 827 | |||
| 753 | return p; | 828 | return p; |
| 754 | } | 829 | } |
| 755 | 830 | ||
| 756 | static char *ip6_string(char *p, const char *addr, const char *fmt) | 831 | static char *ip6_string(char *p, const char *addr, const char *fmt) |
| 757 | { | 832 | { |
| 758 | int i; | 833 | int i; |
| 834 | |||
| 759 | for (i = 0; i < 8; i++) { | 835 | for (i = 0; i < 8; i++) { |
| 760 | p = pack_hex_byte(p, *addr++); | 836 | p = pack_hex_byte(p, *addr++); |
| 761 | p = pack_hex_byte(p, *addr++); | 837 | p = pack_hex_byte(p, *addr++); |
| 762 | if (fmt[0] == 'I' && i != 7) | 838 | if (fmt[0] == 'I' && i != 7) |
| 763 | *p++ = ':'; | 839 | *p++ = ':'; |
| 764 | } | 840 | } |
| 765 | |||
| 766 | *p = '\0'; | 841 | *p = '\0'; |
| 842 | |||
| 767 | return p; | 843 | return p; |
| 768 | } | 844 | } |
| 769 | 845 | ||
| @@ -785,11 +861,57 @@ static char *ip4_addr_string(char *buf, char *end, const u8 *addr, | |||
| 785 | { | 861 | { |
| 786 | char ip4_addr[sizeof("255.255.255.255")]; | 862 | char ip4_addr[sizeof("255.255.255.255")]; |
| 787 | 863 | ||
| 788 | ip4_string(ip4_addr, addr, fmt[0] == 'i'); | 864 | ip4_string(ip4_addr, addr, fmt); |
| 789 | 865 | ||
| 790 | return string(buf, end, ip4_addr, spec); | 866 | return string(buf, end, ip4_addr, spec); |
| 791 | } | 867 | } |
| 792 | 868 | ||
| 869 | static char *uuid_string(char *buf, char *end, const u8 *addr, | ||
| 870 | struct printf_spec spec, const char *fmt) | ||
| 871 | { | ||
| 872 | char uuid[sizeof("xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx")]; | ||
| 873 | char *p = uuid; | ||
| 874 | int i; | ||
| 875 | static const u8 be[16] = {0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15}; | ||
| 876 | static const u8 le[16] = {3,2,1,0,5,4,7,6,8,9,10,11,12,13,14,15}; | ||
| 877 | const u8 *index = be; | ||
| 878 | bool uc = false; | ||
| 879 | |||
| 880 | switch (*(++fmt)) { | ||
| 881 | case 'L': | ||
| 882 | uc = true; /* fall-through */ | ||
| 883 | case 'l': | ||
| 884 | index = le; | ||
| 885 | break; | ||
| 886 | case 'B': | ||
| 887 | uc = true; | ||
| 888 | break; | ||
| 889 | } | ||
| 890 | |||
| 891 | for (i = 0; i < 16; i++) { | ||
| 892 | p = pack_hex_byte(p, addr[index[i]]); | ||
| 893 | switch (i) { | ||
| 894 | case 3: | ||
| 895 | case 5: | ||
| 896 | case 7: | ||
| 897 | case 9: | ||
| 898 | *p++ = '-'; | ||
| 899 | break; | ||
| 900 | } | ||
| 901 | } | ||
| 902 | |||
| 903 | *p = 0; | ||
| 904 | |||
| 905 | if (uc) { | ||
| 906 | p = uuid; | ||
| 907 | do { | ||
| 908 | *p = toupper(*p); | ||
| 909 | } while (*(++p)); | ||
| 910 | } | ||
| 911 | |||
| 912 | return string(buf, end, uuid, spec); | ||
| 913 | } | ||
| 914 | |||
| 793 | /* | 915 | /* |
| 794 | * Show a '%p' thing. A kernel extension is that the '%p' is followed | 916 | * Show a '%p' thing. A kernel extension is that the '%p' is followed |
| 795 | * by an extra set of alphanumeric characters that are extended format | 917 | * by an extra set of alphanumeric characters that are extended format |
| @@ -801,19 +923,34 @@ static char *ip4_addr_string(char *buf, char *end, const u8 *addr, | |||
| 801 | * - 'f' For simple symbolic function names without offset | 923 | * - 'f' For simple symbolic function names without offset |
| 802 | * - 'S' For symbolic direct pointers with offset | 924 | * - 'S' For symbolic direct pointers with offset |
| 803 | * - 's' For symbolic direct pointers without offset | 925 | * - 's' For symbolic direct pointers without offset |
| 804 | * - 'R' For a struct resource pointer, it prints the range of | 926 | * - 'R' For decoded struct resource, e.g., [mem 0x0-0x1f 64bit pref] |
| 805 | * addresses (not the name nor the flags) | 927 | * - 'r' For raw struct resource, e.g., [mem 0x0-0x1f flags 0x201] |
| 806 | * - 'M' For a 6-byte MAC address, it prints the address in the | 928 | * - 'M' For a 6-byte MAC address, it prints the address in the |
| 807 | * usual colon-separated hex notation | 929 | * usual colon-separated hex notation |
| 808 | * - 'm' For a 6-byte MAC address, it prints the hex address without colons | 930 | * - 'm' For a 6-byte MAC address, it prints the hex address without colons |
| 931 | * - 'MF' For a 6-byte MAC FDDI address, it prints the address | ||
| 932 | * with a dash-separated hex notation | ||
| 809 | * - 'I' [46] for IPv4/IPv6 addresses printed in the usual way | 933 | * - 'I' [46] for IPv4/IPv6 addresses printed in the usual way |
| 810 | * IPv4 uses dot-separated decimal without leading 0's (1.2.3.4) | 934 | * IPv4 uses dot-separated decimal without leading 0's (1.2.3.4) |
| 811 | * IPv6 uses colon separated network-order 16 bit hex with leading 0's | 935 | * IPv6 uses colon separated network-order 16 bit hex with leading 0's |
| 812 | * - 'i' [46] for 'raw' IPv4/IPv6 addresses | 936 | * - 'i' [46] for 'raw' IPv4/IPv6 addresses |
| 813 | * IPv6 omits the colons (01020304...0f) | 937 | * IPv6 omits the colons (01020304...0f) |
| 814 | * IPv4 uses dot-separated decimal with leading 0's (010.123.045.006) | 938 | * IPv4 uses dot-separated decimal with leading 0's (010.123.045.006) |
| 939 | * - '[Ii]4[hnbl]' IPv4 addresses in host, network, big or little endian order | ||
| 815 | * - 'I6c' for IPv6 addresses printed as specified by | 940 | * - 'I6c' for IPv6 addresses printed as specified by |
| 816 | * http://www.ietf.org/id/draft-kawamura-ipv6-text-representation-03.txt | 941 | * http://tools.ietf.org/html/draft-ietf-6man-text-addr-representation-00 |
| 942 | * - 'U' For a 16 byte UUID/GUID, it prints the UUID/GUID in the form | ||
| 943 | * "xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" | ||
| 944 | * Options for %pU are: | ||
| 945 | * b big endian lower case hex (default) | ||
| 946 | * B big endian UPPER case hex | ||
| 947 | * l little endian lower case hex | ||
| 948 | * L little endian UPPER case hex | ||
| 949 | * big endian output byte order is: | ||
| 950 | * [0][1][2][3]-[4][5]-[6][7]-[8][9]-[10][11][12][13][14][15] | ||
| 951 | * little endian output byte order is: | ||
| 952 | * [3][2][1][0]-[5][4]-[7][6]-[8][9]-[10][11][12][13][14][15] | ||
| 953 | * | ||
| 817 | * Note: The difference between 'S' and 'F' is that on ia64 and ppc64 | 954 | * Note: The difference between 'S' and 'F' is that on ia64 and ppc64 |
| 818 | * function pointers are really function descriptors, which contain a | 955 | * function pointers are really function descriptors, which contain a |
| 819 | * pointer to the real address. | 956 | * pointer to the real address. |
| @@ -828,14 +965,16 @@ static char *pointer(const char *fmt, char *buf, char *end, void *ptr, | |||
| 828 | case 'F': | 965 | case 'F': |
| 829 | case 'f': | 966 | case 'f': |
| 830 | ptr = dereference_function_descriptor(ptr); | 967 | ptr = dereference_function_descriptor(ptr); |
| 831 | case 's': | ||
| 832 | /* Fallthrough */ | 968 | /* Fallthrough */ |
| 833 | case 'S': | 969 | case 'S': |
| 970 | case 's': | ||
| 834 | return symbol_string(buf, end, ptr, spec, *fmt); | 971 | return symbol_string(buf, end, ptr, spec, *fmt); |
| 835 | case 'R': | 972 | case 'R': |
| 836 | return resource_string(buf, end, ptr, spec); | 973 | case 'r': |
| 974 | return resource_string(buf, end, ptr, spec, fmt); | ||
| 837 | case 'M': /* Colon separated: 00:01:02:03:04:05 */ | 975 | case 'M': /* Colon separated: 00:01:02:03:04:05 */ |
| 838 | case 'm': /* Contiguous: 000102030405 */ | 976 | case 'm': /* Contiguous: 000102030405 */ |
| 977 | /* [mM]F (FDDI, bit reversed) */ | ||
| 839 | return mac_address_string(buf, end, ptr, spec, fmt); | 978 | return mac_address_string(buf, end, ptr, spec, fmt); |
| 840 | case 'I': /* Formatted IP supported | 979 | case 'I': /* Formatted IP supported |
| 841 | * 4: 1.2.3.4 | 980 | * 4: 1.2.3.4 |
| @@ -853,6 +992,8 @@ static char *pointer(const char *fmt, char *buf, char *end, void *ptr, | |||
| 853 | return ip4_addr_string(buf, end, ptr, spec, fmt); | 992 | return ip4_addr_string(buf, end, ptr, spec, fmt); |
| 854 | } | 993 | } |
| 855 | break; | 994 | break; |
| 995 | case 'U': | ||
| 996 | return uuid_string(buf, end, ptr, spec, fmt); | ||
| 856 | } | 997 | } |
| 857 | spec.flags |= SMALL; | 998 | spec.flags |= SMALL; |
| 858 | if (spec.field_width == -1) { | 999 | if (spec.field_width == -1) { |
| @@ -970,8 +1111,8 @@ precision: | |||
| 970 | qualifier: | 1111 | qualifier: |
| 971 | /* get the conversion qualifier */ | 1112 | /* get the conversion qualifier */ |
| 972 | spec->qualifier = -1; | 1113 | spec->qualifier = -1; |
| 973 | if (*fmt == 'h' || *fmt == 'l' || *fmt == 'L' || | 1114 | if (*fmt == 'h' || TOLOWER(*fmt) == 'l' || |
| 974 | *fmt == 'Z' || *fmt == 'z' || *fmt == 't') { | 1115 | TOLOWER(*fmt) == 'z' || *fmt == 't') { |
| 975 | spec->qualifier = *fmt++; | 1116 | spec->qualifier = *fmt++; |
| 976 | if (unlikely(spec->qualifier == *fmt)) { | 1117 | if (unlikely(spec->qualifier == *fmt)) { |
| 977 | if (spec->qualifier == 'l') { | 1118 | if (spec->qualifier == 'l') { |
| @@ -1038,7 +1179,7 @@ qualifier: | |||
| 1038 | spec->type = FORMAT_TYPE_LONG; | 1179 | spec->type = FORMAT_TYPE_LONG; |
| 1039 | else | 1180 | else |
| 1040 | spec->type = FORMAT_TYPE_ULONG; | 1181 | spec->type = FORMAT_TYPE_ULONG; |
| 1041 | } else if (spec->qualifier == 'Z' || spec->qualifier == 'z') { | 1182 | } else if (TOLOWER(spec->qualifier) == 'z') { |
| 1042 | spec->type = FORMAT_TYPE_SIZE_T; | 1183 | spec->type = FORMAT_TYPE_SIZE_T; |
| 1043 | } else if (spec->qualifier == 't') { | 1184 | } else if (spec->qualifier == 't') { |
| 1044 | spec->type = FORMAT_TYPE_PTRDIFF; | 1185 | spec->type = FORMAT_TYPE_PTRDIFF; |
| @@ -1074,7 +1215,18 @@ qualifier: | |||
| 1074 | * %ps output the name of a text symbol without offset | 1215 | * %ps output the name of a text symbol without offset |
| 1075 | * %pF output the name of a function pointer with its offset | 1216 | * %pF output the name of a function pointer with its offset |
| 1076 | * %pf output the name of a function pointer without its offset | 1217 | * %pf output the name of a function pointer without its offset |
| 1077 | * %pR output the address range in a struct resource | 1218 | * %pR output the address range in a struct resource with decoded flags |
| 1219 | * %pr output the address range in a struct resource with raw flags | ||
| 1220 | * %pM output a 6-byte MAC address with colons | ||
| 1221 | * %pm output a 6-byte MAC address without colons | ||
| 1222 | * %pI4 print an IPv4 address without leading zeros | ||
| 1223 | * %pi4 print an IPv4 address with leading zeros | ||
| 1224 | * %pI6 print an IPv6 address with colons | ||
| 1225 | * %pi6 print an IPv6 address without colons | ||
| 1226 | * %pI6c print an IPv6 address as specified by | ||
| 1227 | * http://tools.ietf.org/html/draft-ietf-6man-text-addr-representation-00 | ||
| 1228 | * %pU[bBlL] print a UUID/GUID in big or little endian using lower or upper | ||
| 1229 | * case. | ||
| 1078 | * %n is ignored | 1230 | * %n is ignored |
| 1079 | * | 1231 | * |
| 1080 | * The return value is the number of characters which would | 1232 | * The return value is the number of characters which would |
| @@ -1091,8 +1243,7 @@ qualifier: | |||
| 1091 | int vsnprintf(char *buf, size_t size, const char *fmt, va_list args) | 1243 | int vsnprintf(char *buf, size_t size, const char *fmt, va_list args) |
| 1092 | { | 1244 | { |
| 1093 | unsigned long long num; | 1245 | unsigned long long num; |
| 1094 | char *str, *end, c; | 1246 | char *str, *end; |
| 1095 | int read; | ||
| 1096 | struct printf_spec spec = {0}; | 1247 | struct printf_spec spec = {0}; |
| 1097 | 1248 | ||
| 1098 | /* Reject out-of-range values early. Large positive sizes are | 1249 | /* Reject out-of-range values early. Large positive sizes are |
| @@ -1111,8 +1262,7 @@ int vsnprintf(char *buf, size_t size, const char *fmt, va_list args) | |||
| 1111 | 1262 | ||
| 1112 | while (*fmt) { | 1263 | while (*fmt) { |
| 1113 | const char *old_fmt = fmt; | 1264 | const char *old_fmt = fmt; |
| 1114 | 1265 | int read = format_decode(fmt, &spec); | |
| 1115 | read = format_decode(fmt, &spec); | ||
| 1116 | 1266 | ||
| 1117 | fmt += read; | 1267 | fmt += read; |
| 1118 | 1268 | ||
| @@ -1136,7 +1286,9 @@ int vsnprintf(char *buf, size_t size, const char *fmt, va_list args) | |||
| 1136 | spec.precision = va_arg(args, int); | 1286 | spec.precision = va_arg(args, int); |
| 1137 | break; | 1287 | break; |
| 1138 | 1288 | ||
| 1139 | case FORMAT_TYPE_CHAR: | 1289 | case FORMAT_TYPE_CHAR: { |
| 1290 | char c; | ||
| 1291 | |||
| 1140 | if (!(spec.flags & LEFT)) { | 1292 | if (!(spec.flags & LEFT)) { |
| 1141 | while (--spec.field_width > 0) { | 1293 | while (--spec.field_width > 0) { |
| 1142 | if (str < end) | 1294 | if (str < end) |
| @@ -1155,6 +1307,7 @@ int vsnprintf(char *buf, size_t size, const char *fmt, va_list args) | |||
| 1155 | ++str; | 1307 | ++str; |
| 1156 | } | 1308 | } |
| 1157 | break; | 1309 | break; |
| 1310 | } | ||
| 1158 | 1311 | ||
| 1159 | case FORMAT_TYPE_STR: | 1312 | case FORMAT_TYPE_STR: |
| 1160 | str = string(str, end, va_arg(args, char *), spec); | 1313 | str = string(str, end, va_arg(args, char *), spec); |
| @@ -1185,8 +1338,7 @@ int vsnprintf(char *buf, size_t size, const char *fmt, va_list args) | |||
| 1185 | if (qualifier == 'l') { | 1338 | if (qualifier == 'l') { |
| 1186 | long *ip = va_arg(args, long *); | 1339 | long *ip = va_arg(args, long *); |
| 1187 | *ip = (str - buf); | 1340 | *ip = (str - buf); |
| 1188 | } else if (qualifier == 'Z' || | 1341 | } else if (TOLOWER(qualifier) == 'z') { |
| 1189 | qualifier == 'z') { | ||
| 1190 | size_t *ip = va_arg(args, size_t *); | 1342 | size_t *ip = va_arg(args, size_t *); |
| 1191 | *ip = (str - buf); | 1343 | *ip = (str - buf); |
| 1192 | } else { | 1344 | } else { |
| @@ -1269,7 +1421,8 @@ int vscnprintf(char *buf, size_t size, const char *fmt, va_list args) | |||
| 1269 | { | 1421 | { |
| 1270 | int i; | 1422 | int i; |
| 1271 | 1423 | ||
| 1272 | i=vsnprintf(buf,size,fmt,args); | 1424 | i = vsnprintf(buf, size, fmt, args); |
| 1425 | |||
| 1273 | return (i >= size) ? (size - 1) : i; | 1426 | return (i >= size) ? (size - 1) : i; |
| 1274 | } | 1427 | } |
| 1275 | EXPORT_SYMBOL(vscnprintf); | 1428 | EXPORT_SYMBOL(vscnprintf); |
| @@ -1288,14 +1441,15 @@ EXPORT_SYMBOL(vscnprintf); | |||
| 1288 | * | 1441 | * |
| 1289 | * See the vsnprintf() documentation for format string extensions over C99. | 1442 | * See the vsnprintf() documentation for format string extensions over C99. |
| 1290 | */ | 1443 | */ |
| 1291 | int snprintf(char * buf, size_t size, const char *fmt, ...) | 1444 | int snprintf(char *buf, size_t size, const char *fmt, ...) |
| 1292 | { | 1445 | { |
| 1293 | va_list args; | 1446 | va_list args; |
| 1294 | int i; | 1447 | int i; |
| 1295 | 1448 | ||
| 1296 | va_start(args, fmt); | 1449 | va_start(args, fmt); |
| 1297 | i=vsnprintf(buf,size,fmt,args); | 1450 | i = vsnprintf(buf, size, fmt, args); |
| 1298 | va_end(args); | 1451 | va_end(args); |
| 1452 | |||
| 1299 | return i; | 1453 | return i; |
| 1300 | } | 1454 | } |
| 1301 | EXPORT_SYMBOL(snprintf); | 1455 | EXPORT_SYMBOL(snprintf); |
| @@ -1311,7 +1465,7 @@ EXPORT_SYMBOL(snprintf); | |||
| 1311 | * the trailing '\0'. If @size is <= 0 the function returns 0. | 1465 | * the trailing '\0'. If @size is <= 0 the function returns 0. |
| 1312 | */ | 1466 | */ |
| 1313 | 1467 | ||
| 1314 | int scnprintf(char * buf, size_t size, const char *fmt, ...) | 1468 | int scnprintf(char *buf, size_t size, const char *fmt, ...) |
| 1315 | { | 1469 | { |
| 1316 | va_list args; | 1470 | va_list args; |
| 1317 | int i; | 1471 | int i; |
| @@ -1319,6 +1473,7 @@ int scnprintf(char * buf, size_t size, const char *fmt, ...) | |||
| 1319 | va_start(args, fmt); | 1473 | va_start(args, fmt); |
| 1320 | i = vsnprintf(buf, size, fmt, args); | 1474 | i = vsnprintf(buf, size, fmt, args); |
| 1321 | va_end(args); | 1475 | va_end(args); |
| 1476 | |||
| 1322 | return (i >= size) ? (size - 1) : i; | 1477 | return (i >= size) ? (size - 1) : i; |
| 1323 | } | 1478 | } |
| 1324 | EXPORT_SYMBOL(scnprintf); | 1479 | EXPORT_SYMBOL(scnprintf); |
| @@ -1356,14 +1511,15 @@ EXPORT_SYMBOL(vsprintf); | |||
| 1356 | * | 1511 | * |
| 1357 | * See the vsnprintf() documentation for format string extensions over C99. | 1512 | * See the vsnprintf() documentation for format string extensions over C99. |
| 1358 | */ | 1513 | */ |
| 1359 | int sprintf(char * buf, const char *fmt, ...) | 1514 | int sprintf(char *buf, const char *fmt, ...) |
| 1360 | { | 1515 | { |
| 1361 | va_list args; | 1516 | va_list args; |
| 1362 | int i; | 1517 | int i; |
| 1363 | 1518 | ||
| 1364 | va_start(args, fmt); | 1519 | va_start(args, fmt); |
| 1365 | i=vsnprintf(buf, INT_MAX, fmt, args); | 1520 | i = vsnprintf(buf, INT_MAX, fmt, args); |
| 1366 | va_end(args); | 1521 | va_end(args); |
| 1522 | |||
| 1367 | return i; | 1523 | return i; |
| 1368 | } | 1524 | } |
| 1369 | EXPORT_SYMBOL(sprintf); | 1525 | EXPORT_SYMBOL(sprintf); |
| @@ -1396,7 +1552,6 @@ int vbin_printf(u32 *bin_buf, size_t size, const char *fmt, va_list args) | |||
| 1396 | { | 1552 | { |
| 1397 | struct printf_spec spec = {0}; | 1553 | struct printf_spec spec = {0}; |
| 1398 | char *str, *end; | 1554 | char *str, *end; |
| 1399 | int read; | ||
| 1400 | 1555 | ||
| 1401 | str = (char *)bin_buf; | 1556 | str = (char *)bin_buf; |
| 1402 | end = (char *)(bin_buf + size); | 1557 | end = (char *)(bin_buf + size); |
| @@ -1421,14 +1576,15 @@ do { \ | |||
| 1421 | str += sizeof(type); \ | 1576 | str += sizeof(type); \ |
| 1422 | } while (0) | 1577 | } while (0) |
| 1423 | 1578 | ||
| 1424 | |||
| 1425 | while (*fmt) { | 1579 | while (*fmt) { |
| 1426 | read = format_decode(fmt, &spec); | 1580 | int read = format_decode(fmt, &spec); |
| 1427 | 1581 | ||
| 1428 | fmt += read; | 1582 | fmt += read; |
| 1429 | 1583 | ||
| 1430 | switch (spec.type) { | 1584 | switch (spec.type) { |
| 1431 | case FORMAT_TYPE_NONE: | 1585 | case FORMAT_TYPE_NONE: |
| 1586 | case FORMAT_TYPE_INVALID: | ||
| 1587 | case FORMAT_TYPE_PERCENT_CHAR: | ||
| 1432 | break; | 1588 | break; |
| 1433 | 1589 | ||
| 1434 | case FORMAT_TYPE_WIDTH: | 1590 | case FORMAT_TYPE_WIDTH: |
| @@ -1443,13 +1599,14 @@ do { \ | |||
| 1443 | case FORMAT_TYPE_STR: { | 1599 | case FORMAT_TYPE_STR: { |
| 1444 | const char *save_str = va_arg(args, char *); | 1600 | const char *save_str = va_arg(args, char *); |
| 1445 | size_t len; | 1601 | size_t len; |
| 1602 | |||
| 1446 | if ((unsigned long)save_str > (unsigned long)-PAGE_SIZE | 1603 | if ((unsigned long)save_str > (unsigned long)-PAGE_SIZE |
| 1447 | || (unsigned long)save_str < PAGE_SIZE) | 1604 | || (unsigned long)save_str < PAGE_SIZE) |
| 1448 | save_str = "<NULL>"; | 1605 | save_str = "(null)"; |
| 1449 | len = strlen(save_str); | 1606 | len = strlen(save_str) + 1; |
| 1450 | if (str + len + 1 < end) | 1607 | if (str + len < end) |
| 1451 | memcpy(str, save_str, len + 1); | 1608 | memcpy(str, save_str, len); |
| 1452 | str += len + 1; | 1609 | str += len; |
| 1453 | break; | 1610 | break; |
| 1454 | } | 1611 | } |
| 1455 | 1612 | ||
| @@ -1460,19 +1617,13 @@ do { \ | |||
| 1460 | fmt++; | 1617 | fmt++; |
| 1461 | break; | 1618 | break; |
| 1462 | 1619 | ||
| 1463 | case FORMAT_TYPE_PERCENT_CHAR: | ||
| 1464 | break; | ||
| 1465 | |||
| 1466 | case FORMAT_TYPE_INVALID: | ||
| 1467 | break; | ||
| 1468 | |||
| 1469 | case FORMAT_TYPE_NRCHARS: { | 1620 | case FORMAT_TYPE_NRCHARS: { |
| 1470 | /* skip %n 's argument */ | 1621 | /* skip %n 's argument */ |
| 1471 | int qualifier = spec.qualifier; | 1622 | int qualifier = spec.qualifier; |
| 1472 | void *skip_arg; | 1623 | void *skip_arg; |
| 1473 | if (qualifier == 'l') | 1624 | if (qualifier == 'l') |
| 1474 | skip_arg = va_arg(args, long *); | 1625 | skip_arg = va_arg(args, long *); |
| 1475 | else if (qualifier == 'Z' || qualifier == 'z') | 1626 | else if (TOLOWER(qualifier) == 'z') |
| 1476 | skip_arg = va_arg(args, size_t *); | 1627 | skip_arg = va_arg(args, size_t *); |
| 1477 | else | 1628 | else |
| 1478 | skip_arg = va_arg(args, int *); | 1629 | skip_arg = va_arg(args, int *); |
| @@ -1508,8 +1659,8 @@ do { \ | |||
| 1508 | } | 1659 | } |
| 1509 | } | 1660 | } |
| 1510 | } | 1661 | } |
| 1511 | return (u32 *)(PTR_ALIGN(str, sizeof(u32))) - bin_buf; | ||
| 1512 | 1662 | ||
| 1663 | return (u32 *)(PTR_ALIGN(str, sizeof(u32))) - bin_buf; | ||
| 1513 | #undef save_arg | 1664 | #undef save_arg |
| 1514 | } | 1665 | } |
| 1515 | EXPORT_SYMBOL_GPL(vbin_printf); | 1666 | EXPORT_SYMBOL_GPL(vbin_printf); |
| @@ -1538,11 +1689,9 @@ EXPORT_SYMBOL_GPL(vbin_printf); | |||
| 1538 | */ | 1689 | */ |
| 1539 | int bstr_printf(char *buf, size_t size, const char *fmt, const u32 *bin_buf) | 1690 | int bstr_printf(char *buf, size_t size, const char *fmt, const u32 *bin_buf) |
| 1540 | { | 1691 | { |
| 1541 | unsigned long long num; | ||
| 1542 | char *str, *end, c; | ||
| 1543 | const char *args = (const char *)bin_buf; | ||
| 1544 | |||
| 1545 | struct printf_spec spec = {0}; | 1692 | struct printf_spec spec = {0}; |
| 1693 | char *str, *end; | ||
| 1694 | const char *args = (const char *)bin_buf; | ||
| 1546 | 1695 | ||
| 1547 | if (WARN_ON_ONCE((int) size < 0)) | 1696 | if (WARN_ON_ONCE((int) size < 0)) |
| 1548 | return 0; | 1697 | return 0; |
| @@ -1572,10 +1721,8 @@ int bstr_printf(char *buf, size_t size, const char *fmt, const u32 *bin_buf) | |||
| 1572 | } | 1721 | } |
| 1573 | 1722 | ||
| 1574 | while (*fmt) { | 1723 | while (*fmt) { |
| 1575 | int read; | ||
| 1576 | const char *old_fmt = fmt; | 1724 | const char *old_fmt = fmt; |
| 1577 | 1725 | int read = format_decode(fmt, &spec); | |
| 1578 | read = format_decode(fmt, &spec); | ||
| 1579 | 1726 | ||
| 1580 | fmt += read; | 1727 | fmt += read; |
| 1581 | 1728 | ||
| @@ -1599,7 +1746,9 @@ int bstr_printf(char *buf, size_t size, const char *fmt, const u32 *bin_buf) | |||
| 1599 | spec.precision = get_arg(int); | 1746 | spec.precision = get_arg(int); |
| 1600 | break; | 1747 | break; |
| 1601 | 1748 | ||
| 1602 | case FORMAT_TYPE_CHAR: | 1749 | case FORMAT_TYPE_CHAR: { |
| 1750 | char c; | ||
| 1751 | |||
| 1603 | if (!(spec.flags & LEFT)) { | 1752 | if (!(spec.flags & LEFT)) { |
| 1604 | while (--spec.field_width > 0) { | 1753 | while (--spec.field_width > 0) { |
| 1605 | if (str < end) | 1754 | if (str < end) |
| @@ -1617,11 +1766,11 @@ int bstr_printf(char *buf, size_t size, const char *fmt, const u32 *bin_buf) | |||
| 1617 | ++str; | 1766 | ++str; |
| 1618 | } | 1767 | } |
| 1619 | break; | 1768 | break; |
| 1769 | } | ||
| 1620 | 1770 | ||
| 1621 | case FORMAT_TYPE_STR: { | 1771 | case FORMAT_TYPE_STR: { |
| 1622 | const char *str_arg = args; | 1772 | const char *str_arg = args; |
| 1623 | size_t len = strlen(str_arg); | 1773 | args += strlen(str_arg) + 1; |
| 1624 | args += len + 1; | ||
| 1625 | str = string(str, end, (char *)str_arg, spec); | 1774 | str = string(str, end, (char *)str_arg, spec); |
| 1626 | break; | 1775 | break; |
| 1627 | } | 1776 | } |
| @@ -1633,11 +1782,6 @@ int bstr_printf(char *buf, size_t size, const char *fmt, const u32 *bin_buf) | |||
| 1633 | break; | 1782 | break; |
| 1634 | 1783 | ||
| 1635 | case FORMAT_TYPE_PERCENT_CHAR: | 1784 | case FORMAT_TYPE_PERCENT_CHAR: |
| 1636 | if (str < end) | ||
| 1637 | *str = '%'; | ||
| 1638 | ++str; | ||
| 1639 | break; | ||
| 1640 | |||
| 1641 | case FORMAT_TYPE_INVALID: | 1785 | case FORMAT_TYPE_INVALID: |
| 1642 | if (str < end) | 1786 | if (str < end) |
| 1643 | *str = '%'; | 1787 | *str = '%'; |
| @@ -1648,15 +1792,15 @@ int bstr_printf(char *buf, size_t size, const char *fmt, const u32 *bin_buf) | |||
| 1648 | /* skip */ | 1792 | /* skip */ |
| 1649 | break; | 1793 | break; |
| 1650 | 1794 | ||
| 1651 | default: | 1795 | default: { |
| 1796 | unsigned long long num; | ||
| 1797 | |||
| 1652 | switch (spec.type) { | 1798 | switch (spec.type) { |
| 1653 | 1799 | ||
| 1654 | case FORMAT_TYPE_LONG_LONG: | 1800 | case FORMAT_TYPE_LONG_LONG: |
| 1655 | num = get_arg(long long); | 1801 | num = get_arg(long long); |
| 1656 | break; | 1802 | break; |
| 1657 | case FORMAT_TYPE_ULONG: | 1803 | case FORMAT_TYPE_ULONG: |
| 1658 | num = get_arg(unsigned long); | ||
| 1659 | break; | ||
| 1660 | case FORMAT_TYPE_LONG: | 1804 | case FORMAT_TYPE_LONG: |
| 1661 | num = get_arg(unsigned long); | 1805 | num = get_arg(unsigned long); |
| 1662 | break; | 1806 | break; |
| @@ -1686,8 +1830,9 @@ int bstr_printf(char *buf, size_t size, const char *fmt, const u32 *bin_buf) | |||
| 1686 | } | 1830 | } |
| 1687 | 1831 | ||
| 1688 | str = number(str, end, num, spec); | 1832 | str = number(str, end, num, spec); |
| 1689 | } | 1833 | } /* default: */ |
| 1690 | } | 1834 | } /* switch(spec.type) */ |
| 1835 | } /* while(*fmt) */ | ||
| 1691 | 1836 | ||
| 1692 | if (size > 0) { | 1837 | if (size > 0) { |
| 1693 | if (str < end) | 1838 | if (str < end) |
| @@ -1721,6 +1866,7 @@ int bprintf(u32 *bin_buf, size_t size, const char *fmt, ...) | |||
| 1721 | va_start(args, fmt); | 1866 | va_start(args, fmt); |
| 1722 | ret = vbin_printf(bin_buf, size, fmt, args); | 1867 | ret = vbin_printf(bin_buf, size, fmt, args); |
| 1723 | va_end(args); | 1868 | va_end(args); |
| 1869 | |||
| 1724 | return ret; | 1870 | return ret; |
| 1725 | } | 1871 | } |
| 1726 | EXPORT_SYMBOL_GPL(bprintf); | 1872 | EXPORT_SYMBOL_GPL(bprintf); |
| @@ -1733,27 +1879,23 @@ EXPORT_SYMBOL_GPL(bprintf); | |||
| 1733 | * @fmt: format of buffer | 1879 | * @fmt: format of buffer |
| 1734 | * @args: arguments | 1880 | * @args: arguments |
| 1735 | */ | 1881 | */ |
| 1736 | int vsscanf(const char * buf, const char * fmt, va_list args) | 1882 | int vsscanf(const char *buf, const char *fmt, va_list args) |
| 1737 | { | 1883 | { |
| 1738 | const char *str = buf; | 1884 | const char *str = buf; |
| 1739 | char *next; | 1885 | char *next; |
| 1740 | char digit; | 1886 | char digit; |
| 1741 | int num = 0; | 1887 | int num = 0; |
| 1742 | int qualifier; | 1888 | int qualifier, base, field_width; |
| 1743 | int base; | 1889 | bool is_sign; |
| 1744 | int field_width; | ||
| 1745 | int is_sign = 0; | ||
| 1746 | 1890 | ||
| 1747 | while(*fmt && *str) { | 1891 | while (*fmt && *str) { |
| 1748 | /* skip any white space in format */ | 1892 | /* skip any white space in format */ |
| 1749 | /* white space in format matchs any amount of | 1893 | /* white space in format matchs any amount of |
| 1750 | * white space, including none, in the input. | 1894 | * white space, including none, in the input. |
| 1751 | */ | 1895 | */ |
| 1752 | if (isspace(*fmt)) { | 1896 | if (isspace(*fmt)) { |
| 1753 | while (isspace(*fmt)) | 1897 | fmt = skip_spaces(++fmt); |
| 1754 | ++fmt; | 1898 | str = skip_spaces(str); |
| 1755 | while (isspace(*str)) | ||
| 1756 | ++str; | ||
| 1757 | } | 1899 | } |
| 1758 | 1900 | ||
| 1759 | /* anything that is not a conversion must match exactly */ | 1901 | /* anything that is not a conversion must match exactly */ |
| @@ -1766,7 +1908,7 @@ int vsscanf(const char * buf, const char * fmt, va_list args) | |||
| 1766 | if (!*fmt) | 1908 | if (!*fmt) |
| 1767 | break; | 1909 | break; |
| 1768 | ++fmt; | 1910 | ++fmt; |
| 1769 | 1911 | ||
| 1770 | /* skip this conversion. | 1912 | /* skip this conversion. |
| 1771 | * advance both strings to next white space | 1913 | * advance both strings to next white space |
| 1772 | */ | 1914 | */ |
| @@ -1785,8 +1927,8 @@ int vsscanf(const char * buf, const char * fmt, va_list args) | |||
| 1785 | 1927 | ||
| 1786 | /* get conversion qualifier */ | 1928 | /* get conversion qualifier */ |
| 1787 | qualifier = -1; | 1929 | qualifier = -1; |
| 1788 | if (*fmt == 'h' || *fmt == 'l' || *fmt == 'L' || | 1930 | if (*fmt == 'h' || TOLOWER(*fmt) == 'l' || |
| 1789 | *fmt == 'Z' || *fmt == 'z') { | 1931 | TOLOWER(*fmt) == 'z') { |
| 1790 | qualifier = *fmt++; | 1932 | qualifier = *fmt++; |
| 1791 | if (unlikely(qualifier == *fmt)) { | 1933 | if (unlikely(qualifier == *fmt)) { |
| 1792 | if (qualifier == 'h') { | 1934 | if (qualifier == 'h') { |
| @@ -1798,16 +1940,17 @@ int vsscanf(const char * buf, const char * fmt, va_list args) | |||
| 1798 | } | 1940 | } |
| 1799 | } | 1941 | } |
| 1800 | } | 1942 | } |
| 1801 | base = 10; | ||
| 1802 | is_sign = 0; | ||
| 1803 | 1943 | ||
| 1804 | if (!*fmt || !*str) | 1944 | if (!*fmt || !*str) |
| 1805 | break; | 1945 | break; |
| 1806 | 1946 | ||
| 1807 | switch(*fmt++) { | 1947 | base = 10; |
| 1948 | is_sign = 0; | ||
| 1949 | |||
| 1950 | switch (*fmt++) { | ||
| 1808 | case 'c': | 1951 | case 'c': |
| 1809 | { | 1952 | { |
| 1810 | char *s = (char *) va_arg(args,char*); | 1953 | char *s = (char *)va_arg(args, char*); |
| 1811 | if (field_width == -1) | 1954 | if (field_width == -1) |
| 1812 | field_width = 1; | 1955 | field_width = 1; |
| 1813 | do { | 1956 | do { |
| @@ -1818,17 +1961,15 @@ int vsscanf(const char * buf, const char * fmt, va_list args) | |||
| 1818 | continue; | 1961 | continue; |
| 1819 | case 's': | 1962 | case 's': |
| 1820 | { | 1963 | { |
| 1821 | char *s = (char *) va_arg(args, char *); | 1964 | char *s = (char *)va_arg(args, char *); |
| 1822 | if(field_width == -1) | 1965 | if (field_width == -1) |
| 1823 | field_width = INT_MAX; | 1966 | field_width = INT_MAX; |
| 1824 | /* first, skip leading white space in buffer */ | 1967 | /* first, skip leading white space in buffer */ |
| 1825 | while (isspace(*str)) | 1968 | str = skip_spaces(str); |
| 1826 | str++; | ||
| 1827 | 1969 | ||
| 1828 | /* now copy until next white space */ | 1970 | /* now copy until next white space */ |
| 1829 | while (*str && !isspace(*str) && field_width--) { | 1971 | while (*str && !isspace(*str) && field_width--) |
| 1830 | *s++ = *str++; | 1972 | *s++ = *str++; |
| 1831 | } | ||
| 1832 | *s = '\0'; | 1973 | *s = '\0'; |
| 1833 | num++; | 1974 | num++; |
| 1834 | } | 1975 | } |
| @@ -1836,7 +1977,7 @@ int vsscanf(const char * buf, const char * fmt, va_list args) | |||
| 1836 | case 'n': | 1977 | case 'n': |
| 1837 | /* return number of characters read so far */ | 1978 | /* return number of characters read so far */ |
| 1838 | { | 1979 | { |
| 1839 | int *i = (int *)va_arg(args,int*); | 1980 | int *i = (int *)va_arg(args, int*); |
| 1840 | *i = str - buf; | 1981 | *i = str - buf; |
| 1841 | } | 1982 | } |
| 1842 | continue; | 1983 | continue; |
| @@ -1848,14 +1989,14 @@ int vsscanf(const char * buf, const char * fmt, va_list args) | |||
| 1848 | base = 16; | 1989 | base = 16; |
| 1849 | break; | 1990 | break; |
| 1850 | case 'i': | 1991 | case 'i': |
| 1851 | base = 0; | 1992 | base = 0; |
| 1852 | case 'd': | 1993 | case 'd': |
| 1853 | is_sign = 1; | 1994 | is_sign = 1; |
| 1854 | case 'u': | 1995 | case 'u': |
| 1855 | break; | 1996 | break; |
| 1856 | case '%': | 1997 | case '%': |
| 1857 | /* looking for '%' in str */ | 1998 | /* looking for '%' in str */ |
| 1858 | if (*str++ != '%') | 1999 | if (*str++ != '%') |
| 1859 | return num; | 2000 | return num; |
| 1860 | continue; | 2001 | continue; |
| 1861 | default: | 2002 | default: |
| @@ -1866,71 +2007,70 @@ int vsscanf(const char * buf, const char * fmt, va_list args) | |||
| 1866 | /* have some sort of integer conversion. | 2007 | /* have some sort of integer conversion. |
| 1867 | * first, skip white space in buffer. | 2008 | * first, skip white space in buffer. |
| 1868 | */ | 2009 | */ |
| 1869 | while (isspace(*str)) | 2010 | str = skip_spaces(str); |
| 1870 | str++; | ||
| 1871 | 2011 | ||
| 1872 | digit = *str; | 2012 | digit = *str; |
| 1873 | if (is_sign && digit == '-') | 2013 | if (is_sign && digit == '-') |
| 1874 | digit = *(str + 1); | 2014 | digit = *(str + 1); |
| 1875 | 2015 | ||
| 1876 | if (!digit | 2016 | if (!digit |
| 1877 | || (base == 16 && !isxdigit(digit)) | 2017 | || (base == 16 && !isxdigit(digit)) |
| 1878 | || (base == 10 && !isdigit(digit)) | 2018 | || (base == 10 && !isdigit(digit)) |
| 1879 | || (base == 8 && (!isdigit(digit) || digit > '7')) | 2019 | || (base == 8 && (!isdigit(digit) || digit > '7')) |
| 1880 | || (base == 0 && !isdigit(digit))) | 2020 | || (base == 0 && !isdigit(digit))) |
| 1881 | break; | 2021 | break; |
| 1882 | 2022 | ||
| 1883 | switch(qualifier) { | 2023 | switch (qualifier) { |
| 1884 | case 'H': /* that's 'hh' in format */ | 2024 | case 'H': /* that's 'hh' in format */ |
| 1885 | if (is_sign) { | 2025 | if (is_sign) { |
| 1886 | signed char *s = (signed char *) va_arg(args,signed char *); | 2026 | signed char *s = (signed char *)va_arg(args, signed char *); |
| 1887 | *s = (signed char) simple_strtol(str,&next,base); | 2027 | *s = (signed char)simple_strtol(str, &next, base); |
| 1888 | } else { | 2028 | } else { |
| 1889 | unsigned char *s = (unsigned char *) va_arg(args, unsigned char *); | 2029 | unsigned char *s = (unsigned char *)va_arg(args, unsigned char *); |
| 1890 | *s = (unsigned char) simple_strtoul(str, &next, base); | 2030 | *s = (unsigned char)simple_strtoul(str, &next, base); |
| 1891 | } | 2031 | } |
| 1892 | break; | 2032 | break; |
| 1893 | case 'h': | 2033 | case 'h': |
| 1894 | if (is_sign) { | 2034 | if (is_sign) { |
| 1895 | short *s = (short *) va_arg(args,short *); | 2035 | short *s = (short *)va_arg(args, short *); |
| 1896 | *s = (short) simple_strtol(str,&next,base); | 2036 | *s = (short)simple_strtol(str, &next, base); |
| 1897 | } else { | 2037 | } else { |
| 1898 | unsigned short *s = (unsigned short *) va_arg(args, unsigned short *); | 2038 | unsigned short *s = (unsigned short *)va_arg(args, unsigned short *); |
| 1899 | *s = (unsigned short) simple_strtoul(str, &next, base); | 2039 | *s = (unsigned short)simple_strtoul(str, &next, base); |
| 1900 | } | 2040 | } |
| 1901 | break; | 2041 | break; |
| 1902 | case 'l': | 2042 | case 'l': |
| 1903 | if (is_sign) { | 2043 | if (is_sign) { |
| 1904 | long *l = (long *) va_arg(args,long *); | 2044 | long *l = (long *)va_arg(args, long *); |
| 1905 | *l = simple_strtol(str,&next,base); | 2045 | *l = simple_strtol(str, &next, base); |
| 1906 | } else { | 2046 | } else { |
| 1907 | unsigned long *l = (unsigned long*) va_arg(args,unsigned long*); | 2047 | unsigned long *l = (unsigned long *)va_arg(args, unsigned long *); |
| 1908 | *l = simple_strtoul(str,&next,base); | 2048 | *l = simple_strtoul(str, &next, base); |
| 1909 | } | 2049 | } |
| 1910 | break; | 2050 | break; |
| 1911 | case 'L': | 2051 | case 'L': |
| 1912 | if (is_sign) { | 2052 | if (is_sign) { |
| 1913 | long long *l = (long long*) va_arg(args,long long *); | 2053 | long long *l = (long long *)va_arg(args, long long *); |
| 1914 | *l = simple_strtoll(str,&next,base); | 2054 | *l = simple_strtoll(str, &next, base); |
| 1915 | } else { | 2055 | } else { |
| 1916 | unsigned long long *l = (unsigned long long*) va_arg(args,unsigned long long*); | 2056 | unsigned long long *l = (unsigned long long *)va_arg(args, unsigned long long *); |
| 1917 | *l = simple_strtoull(str,&next,base); | 2057 | *l = simple_strtoull(str, &next, base); |
| 1918 | } | 2058 | } |
| 1919 | break; | 2059 | break; |
| 1920 | case 'Z': | 2060 | case 'Z': |
| 1921 | case 'z': | 2061 | case 'z': |
| 1922 | { | 2062 | { |
| 1923 | size_t *s = (size_t*) va_arg(args,size_t*); | 2063 | size_t *s = (size_t *)va_arg(args, size_t *); |
| 1924 | *s = (size_t) simple_strtoul(str,&next,base); | 2064 | *s = (size_t)simple_strtoul(str, &next, base); |
| 1925 | } | 2065 | } |
| 1926 | break; | 2066 | break; |
| 1927 | default: | 2067 | default: |
| 1928 | if (is_sign) { | 2068 | if (is_sign) { |
| 1929 | int *i = (int *) va_arg(args, int*); | 2069 | int *i = (int *)va_arg(args, int *); |
| 1930 | *i = (int) simple_strtol(str,&next,base); | 2070 | *i = (int)simple_strtol(str, &next, base); |
| 1931 | } else { | 2071 | } else { |
| 1932 | unsigned int *i = (unsigned int*) va_arg(args, unsigned int*); | 2072 | unsigned int *i = (unsigned int *)va_arg(args, unsigned int*); |
| 1933 | *i = (unsigned int) simple_strtoul(str,&next,base); | 2073 | *i = (unsigned int)simple_strtoul(str, &next, base); |
| 1934 | } | 2074 | } |
| 1935 | break; | 2075 | break; |
| 1936 | } | 2076 | } |
| @@ -1961,14 +2101,15 @@ EXPORT_SYMBOL(vsscanf); | |||
| 1961 | * @fmt: formatting of buffer | 2101 | * @fmt: formatting of buffer |
| 1962 | * @...: resulting arguments | 2102 | * @...: resulting arguments |
| 1963 | */ | 2103 | */ |
| 1964 | int sscanf(const char * buf, const char * fmt, ...) | 2104 | int sscanf(const char *buf, const char *fmt, ...) |
| 1965 | { | 2105 | { |
| 1966 | va_list args; | 2106 | va_list args; |
| 1967 | int i; | 2107 | int i; |
| 1968 | 2108 | ||
| 1969 | va_start(args,fmt); | 2109 | va_start(args, fmt); |
| 1970 | i = vsscanf(buf,fmt,args); | 2110 | i = vsscanf(buf, fmt, args); |
| 1971 | va_end(args); | 2111 | va_end(args); |
| 2112 | |||
| 1972 | return i; | 2113 | return i; |
| 1973 | } | 2114 | } |
| 1974 | EXPORT_SYMBOL(sscanf); | 2115 | EXPORT_SYMBOL(sscanf); |
diff --git a/lib/zlib_inflate/inffast.c b/lib/zlib_inflate/inffast.c index 8550b0c05d00..215447c55261 100644 --- a/lib/zlib_inflate/inffast.c +++ b/lib/zlib_inflate/inffast.c | |||
| @@ -8,6 +8,21 @@ | |||
| 8 | #include "inflate.h" | 8 | #include "inflate.h" |
| 9 | #include "inffast.h" | 9 | #include "inffast.h" |
| 10 | 10 | ||
| 11 | /* Only do the unaligned "Faster" variant when | ||
| 12 | * CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS is set | ||
| 13 | * | ||
| 14 | * On powerpc, it won't be as we don't include autoconf.h | ||
| 15 | * automatically for the boot wrapper, which is intended as | ||
| 16 | * we run in an environment where we may not be able to deal | ||
| 17 | * with (even rare) alignment faults. In addition, we do not | ||
| 18 | * define __KERNEL__ for arch/powerpc/boot unlike x86 | ||
| 19 | */ | ||
| 20 | |||
| 21 | #ifdef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS | ||
| 22 | #include <asm/unaligned.h> | ||
| 23 | #include <asm/byteorder.h> | ||
| 24 | #endif | ||
| 25 | |||
| 11 | #ifndef ASMINF | 26 | #ifndef ASMINF |
| 12 | 27 | ||
| 13 | /* Allow machine dependent optimization for post-increment or pre-increment. | 28 | /* Allow machine dependent optimization for post-increment or pre-increment. |
| @@ -24,9 +39,11 @@ | |||
| 24 | #ifdef POSTINC | 39 | #ifdef POSTINC |
| 25 | # define OFF 0 | 40 | # define OFF 0 |
| 26 | # define PUP(a) *(a)++ | 41 | # define PUP(a) *(a)++ |
| 42 | # define UP_UNALIGNED(a) get_unaligned((a)++) | ||
| 27 | #else | 43 | #else |
| 28 | # define OFF 1 | 44 | # define OFF 1 |
| 29 | # define PUP(a) *++(a) | 45 | # define PUP(a) *++(a) |
| 46 | # define UP_UNALIGNED(a) get_unaligned(++(a)) | ||
| 30 | #endif | 47 | #endif |
| 31 | 48 | ||
| 32 | /* | 49 | /* |
| @@ -239,18 +256,62 @@ void inflate_fast(z_streamp strm, unsigned start) | |||
| 239 | } | 256 | } |
| 240 | } | 257 | } |
| 241 | else { | 258 | else { |
| 259 | #ifdef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS | ||
| 260 | unsigned short *sout; | ||
| 261 | unsigned long loops; | ||
| 262 | |||
| 263 | from = out - dist; /* copy direct from output */ | ||
| 264 | /* minimum length is three */ | ||
| 265 | /* Align out addr */ | ||
| 266 | if (!((long)(out - 1 + OFF) & 1)) { | ||
| 267 | PUP(out) = PUP(from); | ||
| 268 | len--; | ||
| 269 | } | ||
| 270 | sout = (unsigned short *)(out - OFF); | ||
| 271 | if (dist > 2) { | ||
| 272 | unsigned short *sfrom; | ||
| 273 | |||
| 274 | sfrom = (unsigned short *)(from - OFF); | ||
| 275 | loops = len >> 1; | ||
| 276 | do | ||
| 277 | PUP(sout) = UP_UNALIGNED(sfrom); | ||
| 278 | while (--loops); | ||
| 279 | out = (unsigned char *)sout + OFF; | ||
| 280 | from = (unsigned char *)sfrom + OFF; | ||
| 281 | } else { /* dist == 1 or dist == 2 */ | ||
| 282 | unsigned short pat16; | ||
| 283 | |||
| 284 | pat16 = *(sout-2+2*OFF); | ||
| 285 | if (dist == 1) | ||
| 286 | #if defined(__BIG_ENDIAN) | ||
| 287 | pat16 = (pat16 & 0xff) | ((pat16 & 0xff) << 8); | ||
| 288 | #elif defined(__LITTLE_ENDIAN) | ||
| 289 | pat16 = (pat16 & 0xff00) | ((pat16 & 0xff00) >> 8); | ||
| 290 | #else | ||
| 291 | #error __BIG_ENDIAN nor __LITTLE_ENDIAN is defined | ||
| 292 | #endif | ||
| 293 | loops = len >> 1; | ||
| 294 | do | ||
| 295 | PUP(sout) = pat16; | ||
| 296 | while (--loops); | ||
| 297 | out = (unsigned char *)sout + OFF; | ||
| 298 | } | ||
| 299 | if (len & 1) | ||
| 300 | PUP(out) = PUP(from); | ||
| 301 | #else /* CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS */ | ||
| 242 | from = out - dist; /* copy direct from output */ | 302 | from = out - dist; /* copy direct from output */ |
| 243 | do { /* minimum length is three */ | 303 | do { /* minimum length is three */ |
| 244 | PUP(out) = PUP(from); | 304 | PUP(out) = PUP(from); |
| 245 | PUP(out) = PUP(from); | 305 | PUP(out) = PUP(from); |
| 246 | PUP(out) = PUP(from); | 306 | PUP(out) = PUP(from); |
| 247 | len -= 3; | 307 | len -= 3; |
| 248 | } while (len > 2); | 308 | } while (len > 2); |
| 249 | if (len) { | 309 | if (len) { |
| 250 | PUP(out) = PUP(from); | 310 | PUP(out) = PUP(from); |
| 251 | if (len > 1) | 311 | if (len > 1) |
| 252 | PUP(out) = PUP(from); | 312 | PUP(out) = PUP(from); |
| 253 | } | 313 | } |
| 314 | #endif /* !CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS */ | ||
| 254 | } | 315 | } |
| 255 | } | 316 | } |
| 256 | else if ((op & 64) == 0) { /* 2nd level distance code */ | 317 | else if ((op & 64) == 0) { /* 2nd level distance code */ |
