diff options
Diffstat (limited to 'lib')
49 files changed, 2885 insertions, 677 deletions
diff --git a/lib/Kconfig b/lib/Kconfig index bb1326d3839c..170d8ca901d8 100644 --- a/lib/Kconfig +++ b/lib/Kconfig | |||
| @@ -117,6 +117,10 @@ config DECOMPRESS_BZIP2 | |||
| 117 | config DECOMPRESS_LZMA | 117 | config DECOMPRESS_LZMA |
| 118 | tristate | 118 | tristate |
| 119 | 119 | ||
| 120 | config DECOMPRESS_LZO | ||
| 121 | select LZO_DECOMPRESS | ||
| 122 | tristate | ||
| 123 | |||
| 120 | # | 124 | # |
| 121 | # Generic allocator support is selected if needed | 125 | # Generic allocator support is selected if needed |
| 122 | # | 126 | # |
| @@ -156,6 +160,9 @@ config TEXTSEARCH_BM | |||
| 156 | config TEXTSEARCH_FSM | 160 | config TEXTSEARCH_FSM |
| 157 | tristate | 161 | tristate |
| 158 | 162 | ||
| 163 | config BTREE | ||
| 164 | boolean | ||
| 165 | |||
| 159 | config HAS_IOMEM | 166 | config HAS_IOMEM |
| 160 | boolean | 167 | boolean |
| 161 | depends on !NO_IOMEM | 168 | depends on !NO_IOMEM |
| @@ -200,4 +207,7 @@ config NLATTR | |||
| 200 | config GENERIC_ATOMIC64 | 207 | config GENERIC_ATOMIC64 |
| 201 | bool | 208 | bool |
| 202 | 209 | ||
| 210 | config LRU_CACHE | ||
| 211 | tristate | ||
| 212 | |||
| 203 | endmenu | 213 | endmenu |
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug index 234ceb10861f..935248bdbc47 100644 --- a/lib/Kconfig.debug +++ b/lib/Kconfig.debug | |||
| @@ -103,9 +103,10 @@ config HEADERS_CHECK | |||
| 103 | 103 | ||
| 104 | config DEBUG_SECTION_MISMATCH | 104 | config DEBUG_SECTION_MISMATCH |
| 105 | bool "Enable full Section mismatch analysis" | 105 | bool "Enable full Section mismatch analysis" |
| 106 | depends on UNDEFINED | 106 | depends on UNDEFINED || (BLACKFIN) |
| 107 | default y | ||
| 107 | # This option is on purpose disabled for now. | 108 | # This option is on purpose disabled for now. |
| 108 | # It will be enabled when we are down to a resonable number | 109 | # It will be enabled when we are down to a reasonable number |
| 109 | # of section mismatch warnings (< 10 for an allyesconfig build) | 110 | # of section mismatch warnings (< 10 for an allyesconfig build) |
| 110 | help | 111 | help |
| 111 | The section mismatch analysis checks if there are illegal | 112 | The section mismatch analysis checks if there are illegal |
| @@ -298,6 +299,14 @@ config DEBUG_OBJECTS_TIMERS | |||
| 298 | timer routines to track the life time of timer objects and | 299 | timer routines to track the life time of timer objects and |
| 299 | validate the timer operations. | 300 | validate the timer operations. |
| 300 | 301 | ||
| 302 | config DEBUG_OBJECTS_WORK | ||
| 303 | bool "Debug work objects" | ||
| 304 | depends on DEBUG_OBJECTS | ||
| 305 | help | ||
| 306 | If you say Y here, additional code will be inserted into the | ||
| 307 | work queue routines to track the life time of work objects and | ||
| 308 | validate the work operations. | ||
| 309 | |||
| 301 | config DEBUG_OBJECTS_ENABLE_DEFAULT | 310 | config DEBUG_OBJECTS_ENABLE_DEFAULT |
| 302 | int "debug_objects bootup default value (0-1)" | 311 | int "debug_objects bootup default value (0-1)" |
| 303 | range 0 1 | 312 | range 0 1 |
| @@ -347,11 +356,12 @@ config SLUB_STATS | |||
| 347 | config DEBUG_KMEMLEAK | 356 | config DEBUG_KMEMLEAK |
| 348 | bool "Kernel memory leak detector" | 357 | bool "Kernel memory leak detector" |
| 349 | depends on DEBUG_KERNEL && EXPERIMENTAL && !MEMORY_HOTPLUG && \ | 358 | depends on DEBUG_KERNEL && EXPERIMENTAL && !MEMORY_HOTPLUG && \ |
| 350 | (X86 || ARM || PPC || S390) | 359 | (X86 || ARM || PPC || S390 || SPARC64 || SUPERH || MICROBLAZE) |
| 351 | 360 | ||
| 352 | select DEBUG_FS if SYSFS | 361 | select DEBUG_FS if SYSFS |
| 353 | select STACKTRACE if STACKTRACE_SUPPORT | 362 | select STACKTRACE if STACKTRACE_SUPPORT |
| 354 | select KALLSYMS | 363 | select KALLSYMS |
| 364 | select CRC32 | ||
| 355 | help | 365 | help |
| 356 | Say Y here if you want to enable the memory leak | 366 | Say Y here if you want to enable the memory leak |
| 357 | detector. The memory allocation/freeing is traced in a way | 367 | detector. The memory allocation/freeing is traced in a way |
| @@ -490,6 +500,18 @@ config PROVE_LOCKING | |||
| 490 | 500 | ||
| 491 | For more details, see Documentation/lockdep-design.txt. | 501 | For more details, see Documentation/lockdep-design.txt. |
| 492 | 502 | ||
| 503 | config PROVE_RCU | ||
| 504 | bool "RCU debugging: prove RCU correctness" | ||
| 505 | depends on PROVE_LOCKING | ||
| 506 | default n | ||
| 507 | help | ||
| 508 | This feature enables lockdep extensions that check for correct | ||
| 509 | use of RCU APIs. This is currently under development. Say Y | ||
| 510 | if you want to debug RCU usage or help work on the PROVE_RCU | ||
| 511 | feature. | ||
| 512 | |||
| 513 | Say N if you are unsure. | ||
| 514 | |||
| 493 | config LOCKDEP | 515 | config LOCKDEP |
| 494 | bool | 516 | bool |
| 495 | depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT | 517 | depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT |
| @@ -511,6 +533,14 @@ config LOCK_STAT | |||
| 511 | 533 | ||
| 512 | For more details, see Documentation/lockstat.txt | 534 | For more details, see Documentation/lockstat.txt |
| 513 | 535 | ||
| 536 | This also enables lock events required by "perf lock", | ||
| 537 | subcommand of perf. | ||
| 538 | If you want to use "perf lock", you also need to turn on | ||
| 539 | CONFIG_EVENT_TRACING. | ||
| 540 | |||
| 541 | CONFIG_LOCK_STAT defines "contended" and "acquired" lock events. | ||
| 542 | (CONFIG_LOCKDEP defines "acquire" and "release" events.) | ||
| 543 | |||
| 514 | config DEBUG_LOCKDEP | 544 | config DEBUG_LOCKDEP |
| 515 | bool "Lock dependency engine debugging" | 545 | bool "Lock dependency engine debugging" |
| 516 | depends on DEBUG_KERNEL && LOCKDEP | 546 | depends on DEBUG_KERNEL && LOCKDEP |
| @@ -567,7 +597,7 @@ config DEBUG_BUGVERBOSE | |||
| 567 | depends on BUG | 597 | depends on BUG |
| 568 | depends on ARM || AVR32 || M32R || M68K || SPARC32 || SPARC64 || \ | 598 | depends on ARM || AVR32 || M32R || M68K || SPARC32 || SPARC64 || \ |
| 569 | FRV || SUPERH || GENERIC_BUG || BLACKFIN || MN10300 | 599 | FRV || SUPERH || GENERIC_BUG || BLACKFIN || MN10300 |
| 570 | default !EMBEDDED | 600 | default y |
| 571 | help | 601 | help |
| 572 | Say Y here to make BUG() panics output the file name and line number | 602 | Say Y here to make BUG() panics output the file name and line number |
| 573 | of the BUG call as well as the EIP and oops trace. This aids | 603 | of the BUG call as well as the EIP and oops trace. This aids |
| @@ -750,16 +780,28 @@ config RCU_TORTURE_TEST_RUNNABLE | |||
| 750 | config RCU_CPU_STALL_DETECTOR | 780 | config RCU_CPU_STALL_DETECTOR |
| 751 | bool "Check for stalled CPUs delaying RCU grace periods" | 781 | bool "Check for stalled CPUs delaying RCU grace periods" |
| 752 | depends on TREE_RCU || TREE_PREEMPT_RCU | 782 | depends on TREE_RCU || TREE_PREEMPT_RCU |
| 753 | default n | 783 | default y |
| 754 | help | 784 | help |
| 755 | This option causes RCU to printk information on which | 785 | This option causes RCU to printk information on which |
| 756 | CPUs are delaying the current grace period, but only when | 786 | CPUs are delaying the current grace period, but only when |
| 757 | the grace period extends for excessive time periods. | 787 | the grace period extends for excessive time periods. |
| 758 | 788 | ||
| 759 | Say Y if you want RCU to perform such checks. | 789 | Say N if you want to disable such checks. |
| 790 | |||
| 791 | Say Y if you are unsure. | ||
| 792 | |||
| 793 | config RCU_CPU_STALL_VERBOSE | ||
| 794 | bool "Print additional per-task information for RCU_CPU_STALL_DETECTOR" | ||
| 795 | depends on RCU_CPU_STALL_DETECTOR && TREE_PREEMPT_RCU | ||
| 796 | default n | ||
| 797 | help | ||
| 798 | This option causes RCU to printk detailed per-task information | ||
| 799 | for any tasks that are stalling the current RCU grace period. | ||
| 760 | 800 | ||
| 761 | Say N if you are unsure. | 801 | Say N if you are unsure. |
| 762 | 802 | ||
| 803 | Say Y if you want to enable such checks. | ||
| 804 | |||
| 763 | config KPROBES_SANITY_TEST | 805 | config KPROBES_SANITY_TEST |
| 764 | bool "Kprobes sanity tests" | 806 | bool "Kprobes sanity tests" |
| 765 | depends on DEBUG_KERNEL | 807 | depends on DEBUG_KERNEL |
| @@ -831,8 +873,7 @@ config DEBUG_FORCE_WEAK_PER_CPU | |||
| 831 | 873 | ||
| 832 | config LKDTM | 874 | config LKDTM |
| 833 | tristate "Linux Kernel Dump Test Tool Module" | 875 | tristate "Linux Kernel Dump Test Tool Module" |
| 834 | depends on DEBUG_KERNEL | 876 | depends on DEBUG_FS |
| 835 | depends on KPROBES | ||
| 836 | depends on BLOCK | 877 | depends on BLOCK |
| 837 | default n | 878 | default n |
| 838 | help | 879 | help |
| @@ -843,7 +884,7 @@ config LKDTM | |||
| 843 | called lkdtm. | 884 | called lkdtm. |
| 844 | 885 | ||
| 845 | Documentation on how to use the module can be found in | 886 | Documentation on how to use the module can be found in |
| 846 | drivers/misc/lkdtm.c | 887 | Documentation/fault-injection/provoke-crashes.txt |
| 847 | 888 | ||
| 848 | config FAULT_INJECTION | 889 | config FAULT_INJECTION |
| 849 | bool "Fault-injection framework" | 890 | bool "Fault-injection framework" |
| @@ -912,7 +953,7 @@ config LATENCYTOP | |||
| 912 | 953 | ||
| 913 | config SYSCTL_SYSCALL_CHECK | 954 | config SYSCTL_SYSCALL_CHECK |
| 914 | bool "Sysctl checks" | 955 | bool "Sysctl checks" |
| 915 | depends on SYSCTL_SYSCALL | 956 | depends on SYSCTL |
| 916 | ---help--- | 957 | ---help--- |
| 917 | sys_sysctl uses binary paths that have been found challenging | 958 | sys_sysctl uses binary paths that have been found challenging |
| 918 | to properly maintain and use. This enables checks that help | 959 | to properly maintain and use. This enables checks that help |
diff --git a/lib/Makefile b/lib/Makefile index 2e78277eff9d..0d4015205c64 100644 --- a/lib/Makefile +++ b/lib/Makefile | |||
| @@ -21,7 +21,7 @@ lib-y += kobject.o kref.o klist.o | |||
| 21 | 21 | ||
| 22 | obj-y += bcd.o div64.o sort.o parser.o halfmd4.o debug_locks.o random32.o \ | 22 | obj-y += bcd.o div64.o sort.o parser.o halfmd4.o debug_locks.o random32.o \ |
| 23 | bust_spinlocks.o hexdump.o kasprintf.o bitmap.o scatterlist.o \ | 23 | bust_spinlocks.o hexdump.o kasprintf.o bitmap.o scatterlist.o \ |
| 24 | string_helpers.o gcd.o | 24 | string_helpers.o gcd.o lcm.o list_sort.o |
| 25 | 25 | ||
| 26 | ifeq ($(CONFIG_DEBUG_KOBJECT),y) | 26 | ifeq ($(CONFIG_DEBUG_KOBJECT),y) |
| 27 | CFLAGS_kobject.o += -DDEBUG | 27 | CFLAGS_kobject.o += -DDEBUG |
| @@ -41,6 +41,7 @@ lib-$(CONFIG_GENERIC_FIND_NEXT_BIT) += find_next_bit.o | |||
| 41 | obj-$(CONFIG_GENERIC_FIND_LAST_BIT) += find_last_bit.o | 41 | obj-$(CONFIG_GENERIC_FIND_LAST_BIT) += find_last_bit.o |
| 42 | obj-$(CONFIG_GENERIC_HWEIGHT) += hweight.o | 42 | obj-$(CONFIG_GENERIC_HWEIGHT) += hweight.o |
| 43 | obj-$(CONFIG_LOCK_KERNEL) += kernel_lock.o | 43 | obj-$(CONFIG_LOCK_KERNEL) += kernel_lock.o |
| 44 | obj-$(CONFIG_BTREE) += btree.o | ||
| 44 | obj-$(CONFIG_DEBUG_PREEMPT) += smp_processor_id.o | 45 | obj-$(CONFIG_DEBUG_PREEMPT) += smp_processor_id.o |
| 45 | obj-$(CONFIG_DEBUG_LIST) += list_debug.o | 46 | obj-$(CONFIG_DEBUG_LIST) += list_debug.o |
| 46 | obj-$(CONFIG_DEBUG_OBJECTS) += debugobjects.o | 47 | obj-$(CONFIG_DEBUG_OBJECTS) += debugobjects.o |
| @@ -69,6 +70,7 @@ obj-$(CONFIG_LZO_DECOMPRESS) += lzo/ | |||
| 69 | lib-$(CONFIG_DECOMPRESS_GZIP) += decompress_inflate.o | 70 | lib-$(CONFIG_DECOMPRESS_GZIP) += decompress_inflate.o |
| 70 | lib-$(CONFIG_DECOMPRESS_BZIP2) += decompress_bunzip2.o | 71 | lib-$(CONFIG_DECOMPRESS_BZIP2) += decompress_bunzip2.o |
| 71 | lib-$(CONFIG_DECOMPRESS_LZMA) += decompress_unlzma.o | 72 | lib-$(CONFIG_DECOMPRESS_LZMA) += decompress_unlzma.o |
| 73 | lib-$(CONFIG_DECOMPRESS_LZO) += decompress_unlzo.o | ||
| 72 | 74 | ||
| 73 | obj-$(CONFIG_TEXTSEARCH) += textsearch.o | 75 | obj-$(CONFIG_TEXTSEARCH) += textsearch.o |
| 74 | obj-$(CONFIG_TEXTSEARCH_KMP) += ts_kmp.o | 76 | obj-$(CONFIG_TEXTSEARCH_KMP) += ts_kmp.o |
| @@ -91,6 +93,8 @@ obj-$(CONFIG_DYNAMIC_DEBUG) += dynamic_debug.o | |||
| 91 | 93 | ||
| 92 | obj-$(CONFIG_NLATTR) += nlattr.o | 94 | obj-$(CONFIG_NLATTR) += nlattr.o |
| 93 | 95 | ||
| 96 | obj-$(CONFIG_LRU_CACHE) += lru_cache.o | ||
| 97 | |||
| 94 | obj-$(CONFIG_DMA_API_DEBUG) += dma-debug.o | 98 | obj-$(CONFIG_DMA_API_DEBUG) += dma-debug.o |
| 95 | 99 | ||
| 96 | obj-$(CONFIG_GENERIC_CSUM) += checksum.o | 100 | obj-$(CONFIG_GENERIC_CSUM) += checksum.o |
diff --git a/lib/argv_split.c b/lib/argv_split.c index 5205a8dae5bc..4b1b083f219c 100644 --- a/lib/argv_split.c +++ b/lib/argv_split.c | |||
| @@ -4,17 +4,10 @@ | |||
| 4 | 4 | ||
| 5 | #include <linux/kernel.h> | 5 | #include <linux/kernel.h> |
| 6 | #include <linux/ctype.h> | 6 | #include <linux/ctype.h> |
| 7 | #include <linux/string.h> | ||
| 7 | #include <linux/slab.h> | 8 | #include <linux/slab.h> |
| 8 | #include <linux/module.h> | 9 | #include <linux/module.h> |
| 9 | 10 | ||
| 10 | static const char *skip_sep(const char *cp) | ||
| 11 | { | ||
| 12 | while (*cp && isspace(*cp)) | ||
| 13 | cp++; | ||
| 14 | |||
| 15 | return cp; | ||
| 16 | } | ||
| 17 | |||
| 18 | static const char *skip_arg(const char *cp) | 11 | static const char *skip_arg(const char *cp) |
| 19 | { | 12 | { |
| 20 | while (*cp && !isspace(*cp)) | 13 | while (*cp && !isspace(*cp)) |
| @@ -28,7 +21,7 @@ static int count_argc(const char *str) | |||
| 28 | int count = 0; | 21 | int count = 0; |
| 29 | 22 | ||
| 30 | while (*str) { | 23 | while (*str) { |
| 31 | str = skip_sep(str); | 24 | str = skip_spaces(str); |
| 32 | if (*str) { | 25 | if (*str) { |
| 33 | count++; | 26 | count++; |
| 34 | str = skip_arg(str); | 27 | str = skip_arg(str); |
| @@ -82,7 +75,7 @@ char **argv_split(gfp_t gfp, const char *str, int *argcp) | |||
| 82 | argvp = argv; | 75 | argvp = argv; |
| 83 | 76 | ||
| 84 | while (*str) { | 77 | while (*str) { |
| 85 | str = skip_sep(str); | 78 | str = skip_spaces(str); |
| 86 | 79 | ||
| 87 | if (*str) { | 80 | if (*str) { |
| 88 | const char *p = str; | 81 | const char *p = str; |
diff --git a/lib/bitmap.c b/lib/bitmap.c index 702565821c99..ffb78c916ccd 100644 --- a/lib/bitmap.c +++ b/lib/bitmap.c | |||
| @@ -271,6 +271,87 @@ int __bitmap_weight(const unsigned long *bitmap, int bits) | |||
| 271 | } | 271 | } |
| 272 | EXPORT_SYMBOL(__bitmap_weight); | 272 | EXPORT_SYMBOL(__bitmap_weight); |
| 273 | 273 | ||
| 274 | #define BITMAP_FIRST_WORD_MASK(start) (~0UL << ((start) % BITS_PER_LONG)) | ||
| 275 | |||
| 276 | void bitmap_set(unsigned long *map, int start, int nr) | ||
| 277 | { | ||
| 278 | unsigned long *p = map + BIT_WORD(start); | ||
| 279 | const int size = start + nr; | ||
| 280 | int bits_to_set = BITS_PER_LONG - (start % BITS_PER_LONG); | ||
| 281 | unsigned long mask_to_set = BITMAP_FIRST_WORD_MASK(start); | ||
| 282 | |||
| 283 | while (nr - bits_to_set >= 0) { | ||
| 284 | *p |= mask_to_set; | ||
| 285 | nr -= bits_to_set; | ||
| 286 | bits_to_set = BITS_PER_LONG; | ||
| 287 | mask_to_set = ~0UL; | ||
| 288 | p++; | ||
| 289 | } | ||
| 290 | if (nr) { | ||
| 291 | mask_to_set &= BITMAP_LAST_WORD_MASK(size); | ||
| 292 | *p |= mask_to_set; | ||
| 293 | } | ||
| 294 | } | ||
| 295 | EXPORT_SYMBOL(bitmap_set); | ||
| 296 | |||
| 297 | void bitmap_clear(unsigned long *map, int start, int nr) | ||
| 298 | { | ||
| 299 | unsigned long *p = map + BIT_WORD(start); | ||
| 300 | const int size = start + nr; | ||
| 301 | int bits_to_clear = BITS_PER_LONG - (start % BITS_PER_LONG); | ||
| 302 | unsigned long mask_to_clear = BITMAP_FIRST_WORD_MASK(start); | ||
| 303 | |||
| 304 | while (nr - bits_to_clear >= 0) { | ||
| 305 | *p &= ~mask_to_clear; | ||
| 306 | nr -= bits_to_clear; | ||
| 307 | bits_to_clear = BITS_PER_LONG; | ||
| 308 | mask_to_clear = ~0UL; | ||
| 309 | p++; | ||
| 310 | } | ||
| 311 | if (nr) { | ||
| 312 | mask_to_clear &= BITMAP_LAST_WORD_MASK(size); | ||
| 313 | *p &= ~mask_to_clear; | ||
| 314 | } | ||
| 315 | } | ||
| 316 | EXPORT_SYMBOL(bitmap_clear); | ||
| 317 | |||
| 318 | /* | ||
| 319 | * bitmap_find_next_zero_area - find a contiguous aligned zero area | ||
| 320 | * @map: The address to base the search on | ||
| 321 | * @size: The bitmap size in bits | ||
| 322 | * @start: The bitnumber to start searching at | ||
| 323 | * @nr: The number of zeroed bits we're looking for | ||
| 324 | * @align_mask: Alignment mask for zero area | ||
| 325 | * | ||
| 326 | * The @align_mask should be one less than a power of 2; the effect is that | ||
| 327 | * the bit offset of all zero areas this function finds is multiples of that | ||
| 328 | * power of 2. A @align_mask of 0 means no alignment is required. | ||
| 329 | */ | ||
| 330 | unsigned long bitmap_find_next_zero_area(unsigned long *map, | ||
| 331 | unsigned long size, | ||
| 332 | unsigned long start, | ||
| 333 | unsigned int nr, | ||
| 334 | unsigned long align_mask) | ||
| 335 | { | ||
| 336 | unsigned long index, end, i; | ||
| 337 | again: | ||
| 338 | index = find_next_zero_bit(map, size, start); | ||
| 339 | |||
| 340 | /* Align allocation */ | ||
| 341 | index = __ALIGN_MASK(index, align_mask); | ||
| 342 | |||
| 343 | end = index + nr; | ||
| 344 | if (end > size) | ||
| 345 | return end; | ||
| 346 | i = find_next_bit(map, end, index); | ||
| 347 | if (i < end) { | ||
| 348 | start = i + 1; | ||
| 349 | goto again; | ||
| 350 | } | ||
| 351 | return index; | ||
| 352 | } | ||
| 353 | EXPORT_SYMBOL(bitmap_find_next_zero_area); | ||
| 354 | |||
| 274 | /* | 355 | /* |
| 275 | * Bitmap printing & parsing functions: first version by Bill Irwin, | 356 | * Bitmap printing & parsing functions: first version by Bill Irwin, |
| 276 | * second version by Paul Jackson, third by Joe Korty. | 357 | * second version by Paul Jackson, third by Joe Korty. |
| @@ -406,7 +487,7 @@ int __bitmap_parse(const char *buf, unsigned int buflen, | |||
| 406 | EXPORT_SYMBOL(__bitmap_parse); | 487 | EXPORT_SYMBOL(__bitmap_parse); |
| 407 | 488 | ||
| 408 | /** | 489 | /** |
| 409 | * bitmap_parse_user() | 490 | * bitmap_parse_user - convert an ASCII hex string in a user buffer into a bitmap |
| 410 | * | 491 | * |
| 411 | * @ubuf: pointer to user buffer containing string. | 492 | * @ubuf: pointer to user buffer containing string. |
| 412 | * @ulen: buffer size in bytes. If string is smaller than this | 493 | * @ulen: buffer size in bytes. If string is smaller than this |
| @@ -538,7 +619,7 @@ int bitmap_parselist(const char *bp, unsigned long *maskp, int nmaskbits) | |||
| 538 | EXPORT_SYMBOL(bitmap_parselist); | 619 | EXPORT_SYMBOL(bitmap_parselist); |
| 539 | 620 | ||
| 540 | /** | 621 | /** |
| 541 | * bitmap_pos_to_ord(buf, pos, bits) | 622 | * bitmap_pos_to_ord - find ordinal of set bit at given position in bitmap |
| 542 | * @buf: pointer to a bitmap | 623 | * @buf: pointer to a bitmap |
| 543 | * @pos: a bit position in @buf (0 <= @pos < @bits) | 624 | * @pos: a bit position in @buf (0 <= @pos < @bits) |
| 544 | * @bits: number of valid bit positions in @buf | 625 | * @bits: number of valid bit positions in @buf |
| @@ -574,7 +655,7 @@ static int bitmap_pos_to_ord(const unsigned long *buf, int pos, int bits) | |||
| 574 | } | 655 | } |
| 575 | 656 | ||
| 576 | /** | 657 | /** |
| 577 | * bitmap_ord_to_pos(buf, ord, bits) | 658 | * bitmap_ord_to_pos - find position of n-th set bit in bitmap |
| 578 | * @buf: pointer to bitmap | 659 | * @buf: pointer to bitmap |
| 579 | * @ord: ordinal bit position (n-th set bit, n >= 0) | 660 | * @ord: ordinal bit position (n-th set bit, n >= 0) |
| 580 | * @bits: number of valid bit positions in @buf | 661 | * @bits: number of valid bit positions in @buf |
| @@ -652,10 +733,9 @@ void bitmap_remap(unsigned long *dst, const unsigned long *src, | |||
| 652 | bitmap_zero(dst, bits); | 733 | bitmap_zero(dst, bits); |
| 653 | 734 | ||
| 654 | w = bitmap_weight(new, bits); | 735 | w = bitmap_weight(new, bits); |
| 655 | for (oldbit = find_first_bit(src, bits); | 736 | for_each_set_bit(oldbit, src, bits) { |
| 656 | oldbit < bits; | ||
| 657 | oldbit = find_next_bit(src, bits, oldbit + 1)) { | ||
| 658 | int n = bitmap_pos_to_ord(old, oldbit, bits); | 737 | int n = bitmap_pos_to_ord(old, oldbit, bits); |
| 738 | |||
| 659 | if (n < 0 || w == 0) | 739 | if (n < 0 || w == 0) |
| 660 | set_bit(oldbit, dst); /* identity map */ | 740 | set_bit(oldbit, dst); /* identity map */ |
| 661 | else | 741 | else |
| @@ -822,9 +902,7 @@ void bitmap_onto(unsigned long *dst, const unsigned long *orig, | |||
| 822 | */ | 902 | */ |
| 823 | 903 | ||
| 824 | m = 0; | 904 | m = 0; |
| 825 | for (n = find_first_bit(relmap, bits); | 905 | for_each_set_bit(n, relmap, bits) { |
| 826 | n < bits; | ||
| 827 | n = find_next_bit(relmap, bits, n + 1)) { | ||
| 828 | /* m == bitmap_pos_to_ord(relmap, n, bits) */ | 906 | /* m == bitmap_pos_to_ord(relmap, n, bits) */ |
| 829 | if (test_bit(m, orig)) | 907 | if (test_bit(m, orig)) |
| 830 | set_bit(n, dst); | 908 | set_bit(n, dst); |
| @@ -853,9 +931,7 @@ void bitmap_fold(unsigned long *dst, const unsigned long *orig, | |||
| 853 | return; | 931 | return; |
| 854 | bitmap_zero(dst, bits); | 932 | bitmap_zero(dst, bits); |
| 855 | 933 | ||
| 856 | for (oldbit = find_first_bit(orig, bits); | 934 | for_each_set_bit(oldbit, orig, bits) |
| 857 | oldbit < bits; | ||
| 858 | oldbit = find_next_bit(orig, bits, oldbit + 1)) | ||
| 859 | set_bit(oldbit % sz, dst); | 935 | set_bit(oldbit % sz, dst); |
| 860 | } | 936 | } |
| 861 | EXPORT_SYMBOL(bitmap_fold); | 937 | EXPORT_SYMBOL(bitmap_fold); |
diff --git a/lib/btree.c b/lib/btree.c new file mode 100644 index 000000000000..c9c6f0351526 --- /dev/null +++ b/lib/btree.c | |||
| @@ -0,0 +1,798 @@ | |||
| 1 | /* | ||
| 2 | * lib/btree.c - Simple In-memory B+Tree | ||
| 3 | * | ||
| 4 | * As should be obvious for Linux kernel code, license is GPLv2 | ||
| 5 | * | ||
| 6 | * Copyright (c) 2007-2008 Joern Engel <joern@logfs.org> | ||
| 7 | * Bits and pieces stolen from Peter Zijlstra's code, which is | ||
| 8 | * Copyright 2007, Red Hat Inc. Peter Zijlstra <pzijlstr@redhat.com> | ||
| 9 | * GPLv2 | ||
| 10 | * | ||
| 11 | * see http://programming.kicks-ass.net/kernel-patches/vma_lookup/btree.patch | ||
| 12 | * | ||
| 13 | * A relatively simple B+Tree implementation. I have written it as a learning | ||
| 14 | * excercise to understand how B+Trees work. Turned out to be useful as well. | ||
| 15 | * | ||
| 16 | * B+Trees can be used similar to Linux radix trees (which don't have anything | ||
| 17 | * in common with textbook radix trees, beware). Prerequisite for them working | ||
| 18 | * well is that access to a random tree node is much faster than a large number | ||
| 19 | * of operations within each node. | ||
| 20 | * | ||
| 21 | * Disks have fulfilled the prerequisite for a long time. More recently DRAM | ||
| 22 | * has gained similar properties, as memory access times, when measured in cpu | ||
| 23 | * cycles, have increased. Cacheline sizes have increased as well, which also | ||
| 24 | * helps B+Trees. | ||
| 25 | * | ||
| 26 | * Compared to radix trees, B+Trees are more efficient when dealing with a | ||
| 27 | * sparsely populated address space. Between 25% and 50% of the memory is | ||
| 28 | * occupied with valid pointers. When densely populated, radix trees contain | ||
| 29 | * ~98% pointers - hard to beat. Very sparse radix trees contain only ~2% | ||
| 30 | * pointers. | ||
| 31 | * | ||
| 32 | * This particular implementation stores pointers identified by a long value. | ||
| 33 | * Storing NULL pointers is illegal, lookup will return NULL when no entry | ||
| 34 | * was found. | ||
| 35 | * | ||
| 36 | * A tricks was used that is not commonly found in textbooks. The lowest | ||
| 37 | * values are to the right, not to the left. All used slots within a node | ||
| 38 | * are on the left, all unused slots contain NUL values. Most operations | ||
| 39 | * simply loop once over all slots and terminate on the first NUL. | ||
| 40 | */ | ||
| 41 | |||
| 42 | #include <linux/btree.h> | ||
| 43 | #include <linux/cache.h> | ||
| 44 | #include <linux/kernel.h> | ||
| 45 | #include <linux/slab.h> | ||
| 46 | #include <linux/module.h> | ||
| 47 | |||
| 48 | #define MAX(a, b) ((a) > (b) ? (a) : (b)) | ||
| 49 | #define NODESIZE MAX(L1_CACHE_BYTES, 128) | ||
| 50 | |||
| 51 | struct btree_geo { | ||
| 52 | int keylen; | ||
| 53 | int no_pairs; | ||
| 54 | int no_longs; | ||
| 55 | }; | ||
| 56 | |||
| 57 | struct btree_geo btree_geo32 = { | ||
| 58 | .keylen = 1, | ||
| 59 | .no_pairs = NODESIZE / sizeof(long) / 2, | ||
| 60 | .no_longs = NODESIZE / sizeof(long) / 2, | ||
| 61 | }; | ||
| 62 | EXPORT_SYMBOL_GPL(btree_geo32); | ||
| 63 | |||
| 64 | #define LONG_PER_U64 (64 / BITS_PER_LONG) | ||
| 65 | struct btree_geo btree_geo64 = { | ||
| 66 | .keylen = LONG_PER_U64, | ||
| 67 | .no_pairs = NODESIZE / sizeof(long) / (1 + LONG_PER_U64), | ||
| 68 | .no_longs = LONG_PER_U64 * (NODESIZE / sizeof(long) / (1 + LONG_PER_U64)), | ||
| 69 | }; | ||
| 70 | EXPORT_SYMBOL_GPL(btree_geo64); | ||
| 71 | |||
| 72 | struct btree_geo btree_geo128 = { | ||
| 73 | .keylen = 2 * LONG_PER_U64, | ||
| 74 | .no_pairs = NODESIZE / sizeof(long) / (1 + 2 * LONG_PER_U64), | ||
| 75 | .no_longs = 2 * LONG_PER_U64 * (NODESIZE / sizeof(long) / (1 + 2 * LONG_PER_U64)), | ||
| 76 | }; | ||
| 77 | EXPORT_SYMBOL_GPL(btree_geo128); | ||
| 78 | |||
| 79 | static struct kmem_cache *btree_cachep; | ||
| 80 | |||
| 81 | void *btree_alloc(gfp_t gfp_mask, void *pool_data) | ||
| 82 | { | ||
| 83 | return kmem_cache_alloc(btree_cachep, gfp_mask); | ||
| 84 | } | ||
| 85 | EXPORT_SYMBOL_GPL(btree_alloc); | ||
| 86 | |||
| 87 | void btree_free(void *element, void *pool_data) | ||
| 88 | { | ||
| 89 | kmem_cache_free(btree_cachep, element); | ||
| 90 | } | ||
| 91 | EXPORT_SYMBOL_GPL(btree_free); | ||
| 92 | |||
| 93 | static unsigned long *btree_node_alloc(struct btree_head *head, gfp_t gfp) | ||
| 94 | { | ||
| 95 | unsigned long *node; | ||
| 96 | |||
| 97 | node = mempool_alloc(head->mempool, gfp); | ||
| 98 | if (likely(node)) | ||
| 99 | memset(node, 0, NODESIZE); | ||
| 100 | return node; | ||
| 101 | } | ||
| 102 | |||
| 103 | static int longcmp(const unsigned long *l1, const unsigned long *l2, size_t n) | ||
| 104 | { | ||
| 105 | size_t i; | ||
| 106 | |||
| 107 | for (i = 0; i < n; i++) { | ||
| 108 | if (l1[i] < l2[i]) | ||
| 109 | return -1; | ||
| 110 | if (l1[i] > l2[i]) | ||
| 111 | return 1; | ||
| 112 | } | ||
| 113 | return 0; | ||
| 114 | } | ||
| 115 | |||
| 116 | static unsigned long *longcpy(unsigned long *dest, const unsigned long *src, | ||
| 117 | size_t n) | ||
| 118 | { | ||
| 119 | size_t i; | ||
| 120 | |||
| 121 | for (i = 0; i < n; i++) | ||
| 122 | dest[i] = src[i]; | ||
| 123 | return dest; | ||
| 124 | } | ||
| 125 | |||
| 126 | static unsigned long *longset(unsigned long *s, unsigned long c, size_t n) | ||
| 127 | { | ||
| 128 | size_t i; | ||
| 129 | |||
| 130 | for (i = 0; i < n; i++) | ||
| 131 | s[i] = c; | ||
| 132 | return s; | ||
| 133 | } | ||
| 134 | |||
| 135 | static void dec_key(struct btree_geo *geo, unsigned long *key) | ||
| 136 | { | ||
| 137 | unsigned long val; | ||
| 138 | int i; | ||
| 139 | |||
| 140 | for (i = geo->keylen - 1; i >= 0; i--) { | ||
| 141 | val = key[i]; | ||
| 142 | key[i] = val - 1; | ||
| 143 | if (val) | ||
| 144 | break; | ||
| 145 | } | ||
| 146 | } | ||
| 147 | |||
| 148 | static unsigned long *bkey(struct btree_geo *geo, unsigned long *node, int n) | ||
| 149 | { | ||
| 150 | return &node[n * geo->keylen]; | ||
| 151 | } | ||
| 152 | |||
| 153 | static void *bval(struct btree_geo *geo, unsigned long *node, int n) | ||
| 154 | { | ||
| 155 | return (void *)node[geo->no_longs + n]; | ||
| 156 | } | ||
| 157 | |||
| 158 | static void setkey(struct btree_geo *geo, unsigned long *node, int n, | ||
| 159 | unsigned long *key) | ||
| 160 | { | ||
| 161 | longcpy(bkey(geo, node, n), key, geo->keylen); | ||
| 162 | } | ||
| 163 | |||
| 164 | static void setval(struct btree_geo *geo, unsigned long *node, int n, | ||
| 165 | void *val) | ||
| 166 | { | ||
| 167 | node[geo->no_longs + n] = (unsigned long) val; | ||
| 168 | } | ||
| 169 | |||
| 170 | static void clearpair(struct btree_geo *geo, unsigned long *node, int n) | ||
| 171 | { | ||
| 172 | longset(bkey(geo, node, n), 0, geo->keylen); | ||
| 173 | node[geo->no_longs + n] = 0; | ||
| 174 | } | ||
| 175 | |||
| 176 | static inline void __btree_init(struct btree_head *head) | ||
| 177 | { | ||
| 178 | head->node = NULL; | ||
| 179 | head->height = 0; | ||
| 180 | } | ||
| 181 | |||
| 182 | void btree_init_mempool(struct btree_head *head, mempool_t *mempool) | ||
| 183 | { | ||
| 184 | __btree_init(head); | ||
| 185 | head->mempool = mempool; | ||
| 186 | } | ||
| 187 | EXPORT_SYMBOL_GPL(btree_init_mempool); | ||
| 188 | |||
| 189 | int btree_init(struct btree_head *head) | ||
| 190 | { | ||
| 191 | __btree_init(head); | ||
| 192 | head->mempool = mempool_create(0, btree_alloc, btree_free, NULL); | ||
| 193 | if (!head->mempool) | ||
| 194 | return -ENOMEM; | ||
| 195 | return 0; | ||
| 196 | } | ||
| 197 | EXPORT_SYMBOL_GPL(btree_init); | ||
| 198 | |||
| 199 | void btree_destroy(struct btree_head *head) | ||
| 200 | { | ||
| 201 | mempool_destroy(head->mempool); | ||
| 202 | head->mempool = NULL; | ||
| 203 | } | ||
| 204 | EXPORT_SYMBOL_GPL(btree_destroy); | ||
| 205 | |||
| 206 | void *btree_last(struct btree_head *head, struct btree_geo *geo, | ||
| 207 | unsigned long *key) | ||
| 208 | { | ||
| 209 | int height = head->height; | ||
| 210 | unsigned long *node = head->node; | ||
| 211 | |||
| 212 | if (height == 0) | ||
| 213 | return NULL; | ||
| 214 | |||
| 215 | for ( ; height > 1; height--) | ||
| 216 | node = bval(geo, node, 0); | ||
| 217 | |||
| 218 | longcpy(key, bkey(geo, node, 0), geo->keylen); | ||
| 219 | return bval(geo, node, 0); | ||
| 220 | } | ||
| 221 | EXPORT_SYMBOL_GPL(btree_last); | ||
| 222 | |||
| 223 | static int keycmp(struct btree_geo *geo, unsigned long *node, int pos, | ||
| 224 | unsigned long *key) | ||
| 225 | { | ||
| 226 | return longcmp(bkey(geo, node, pos), key, geo->keylen); | ||
| 227 | } | ||
| 228 | |||
| 229 | static int keyzero(struct btree_geo *geo, unsigned long *key) | ||
| 230 | { | ||
| 231 | int i; | ||
| 232 | |||
| 233 | for (i = 0; i < geo->keylen; i++) | ||
| 234 | if (key[i]) | ||
| 235 | return 0; | ||
| 236 | |||
| 237 | return 1; | ||
| 238 | } | ||
| 239 | |||
| 240 | void *btree_lookup(struct btree_head *head, struct btree_geo *geo, | ||
| 241 | unsigned long *key) | ||
| 242 | { | ||
| 243 | int i, height = head->height; | ||
| 244 | unsigned long *node = head->node; | ||
| 245 | |||
| 246 | if (height == 0) | ||
| 247 | return NULL; | ||
| 248 | |||
| 249 | for ( ; height > 1; height--) { | ||
| 250 | for (i = 0; i < geo->no_pairs; i++) | ||
| 251 | if (keycmp(geo, node, i, key) <= 0) | ||
| 252 | break; | ||
| 253 | if (i == geo->no_pairs) | ||
| 254 | return NULL; | ||
| 255 | node = bval(geo, node, i); | ||
| 256 | if (!node) | ||
| 257 | return NULL; | ||
| 258 | } | ||
| 259 | |||
| 260 | if (!node) | ||
| 261 | return NULL; | ||
| 262 | |||
| 263 | for (i = 0; i < geo->no_pairs; i++) | ||
| 264 | if (keycmp(geo, node, i, key) == 0) | ||
| 265 | return bval(geo, node, i); | ||
| 266 | return NULL; | ||
| 267 | } | ||
| 268 | EXPORT_SYMBOL_GPL(btree_lookup); | ||
| 269 | |||
| 270 | int btree_update(struct btree_head *head, struct btree_geo *geo, | ||
| 271 | unsigned long *key, void *val) | ||
| 272 | { | ||
| 273 | int i, height = head->height; | ||
| 274 | unsigned long *node = head->node; | ||
| 275 | |||
| 276 | if (height == 0) | ||
| 277 | return -ENOENT; | ||
| 278 | |||
| 279 | for ( ; height > 1; height--) { | ||
| 280 | for (i = 0; i < geo->no_pairs; i++) | ||
| 281 | if (keycmp(geo, node, i, key) <= 0) | ||
| 282 | break; | ||
| 283 | if (i == geo->no_pairs) | ||
| 284 | return -ENOENT; | ||
| 285 | node = bval(geo, node, i); | ||
| 286 | if (!node) | ||
| 287 | return -ENOENT; | ||
| 288 | } | ||
| 289 | |||
| 290 | if (!node) | ||
| 291 | return -ENOENT; | ||
| 292 | |||
| 293 | for (i = 0; i < geo->no_pairs; i++) | ||
| 294 | if (keycmp(geo, node, i, key) == 0) { | ||
| 295 | setval(geo, node, i, val); | ||
| 296 | return 0; | ||
| 297 | } | ||
| 298 | return -ENOENT; | ||
| 299 | } | ||
| 300 | EXPORT_SYMBOL_GPL(btree_update); | ||
| 301 | |||
| 302 | /* | ||
| 303 | * Usually this function is quite similar to normal lookup. But the key of | ||
| 304 | * a parent node may be smaller than the smallest key of all its siblings. | ||
| 305 | * In such a case we cannot just return NULL, as we have only proven that no | ||
| 306 | * key smaller than __key, but larger than this parent key exists. | ||
| 307 | * So we set __key to the parent key and retry. We have to use the smallest | ||
| 308 | * such parent key, which is the last parent key we encountered. | ||
| 309 | */ | ||
| 310 | void *btree_get_prev(struct btree_head *head, struct btree_geo *geo, | ||
| 311 | unsigned long *__key) | ||
| 312 | { | ||
| 313 | int i, height; | ||
| 314 | unsigned long *node, *oldnode; | ||
| 315 | unsigned long *retry_key = NULL, key[geo->keylen]; | ||
| 316 | |||
| 317 | if (keyzero(geo, __key)) | ||
| 318 | return NULL; | ||
| 319 | |||
| 320 | if (head->height == 0) | ||
| 321 | return NULL; | ||
| 322 | retry: | ||
| 323 | longcpy(key, __key, geo->keylen); | ||
| 324 | dec_key(geo, key); | ||
| 325 | |||
| 326 | node = head->node; | ||
| 327 | for (height = head->height ; height > 1; height--) { | ||
| 328 | for (i = 0; i < geo->no_pairs; i++) | ||
| 329 | if (keycmp(geo, node, i, key) <= 0) | ||
| 330 | break; | ||
| 331 | if (i == geo->no_pairs) | ||
| 332 | goto miss; | ||
| 333 | oldnode = node; | ||
| 334 | node = bval(geo, node, i); | ||
| 335 | if (!node) | ||
| 336 | goto miss; | ||
| 337 | retry_key = bkey(geo, oldnode, i); | ||
| 338 | } | ||
| 339 | |||
| 340 | if (!node) | ||
| 341 | goto miss; | ||
| 342 | |||
| 343 | for (i = 0; i < geo->no_pairs; i++) { | ||
| 344 | if (keycmp(geo, node, i, key) <= 0) { | ||
| 345 | if (bval(geo, node, i)) { | ||
| 346 | longcpy(__key, bkey(geo, node, i), geo->keylen); | ||
| 347 | return bval(geo, node, i); | ||
| 348 | } else | ||
| 349 | goto miss; | ||
| 350 | } | ||
| 351 | } | ||
| 352 | miss: | ||
| 353 | if (retry_key) { | ||
| 354 | __key = retry_key; | ||
| 355 | retry_key = NULL; | ||
| 356 | goto retry; | ||
| 357 | } | ||
| 358 | return NULL; | ||
| 359 | } | ||
| 360 | |||
| 361 | static int getpos(struct btree_geo *geo, unsigned long *node, | ||
| 362 | unsigned long *key) | ||
| 363 | { | ||
| 364 | int i; | ||
| 365 | |||
| 366 | for (i = 0; i < geo->no_pairs; i++) { | ||
| 367 | if (keycmp(geo, node, i, key) <= 0) | ||
| 368 | break; | ||
| 369 | } | ||
| 370 | return i; | ||
| 371 | } | ||
| 372 | |||
| 373 | static int getfill(struct btree_geo *geo, unsigned long *node, int start) | ||
| 374 | { | ||
| 375 | int i; | ||
| 376 | |||
| 377 | for (i = start; i < geo->no_pairs; i++) | ||
| 378 | if (!bval(geo, node, i)) | ||
| 379 | break; | ||
| 380 | return i; | ||
| 381 | } | ||
| 382 | |||
| 383 | /* | ||
| 384 | * locate the correct leaf node in the btree | ||
| 385 | */ | ||
| 386 | static unsigned long *find_level(struct btree_head *head, struct btree_geo *geo, | ||
| 387 | unsigned long *key, int level) | ||
| 388 | { | ||
| 389 | unsigned long *node = head->node; | ||
| 390 | int i, height; | ||
| 391 | |||
| 392 | for (height = head->height; height > level; height--) { | ||
| 393 | for (i = 0; i < geo->no_pairs; i++) | ||
| 394 | if (keycmp(geo, node, i, key) <= 0) | ||
| 395 | break; | ||
| 396 | |||
| 397 | if ((i == geo->no_pairs) || !bval(geo, node, i)) { | ||
| 398 | /* right-most key is too large, update it */ | ||
| 399 | /* FIXME: If the right-most key on higher levels is | ||
| 400 | * always zero, this wouldn't be necessary. */ | ||
| 401 | i--; | ||
| 402 | setkey(geo, node, i, key); | ||
| 403 | } | ||
| 404 | BUG_ON(i < 0); | ||
| 405 | node = bval(geo, node, i); | ||
| 406 | } | ||
| 407 | BUG_ON(!node); | ||
| 408 | return node; | ||
| 409 | } | ||
| 410 | |||
| 411 | static int btree_grow(struct btree_head *head, struct btree_geo *geo, | ||
| 412 | gfp_t gfp) | ||
| 413 | { | ||
| 414 | unsigned long *node; | ||
| 415 | int fill; | ||
| 416 | |||
| 417 | node = btree_node_alloc(head, gfp); | ||
| 418 | if (!node) | ||
| 419 | return -ENOMEM; | ||
| 420 | if (head->node) { | ||
| 421 | fill = getfill(geo, head->node, 0); | ||
| 422 | setkey(geo, node, 0, bkey(geo, head->node, fill - 1)); | ||
| 423 | setval(geo, node, 0, head->node); | ||
| 424 | } | ||
| 425 | head->node = node; | ||
| 426 | head->height++; | ||
| 427 | return 0; | ||
| 428 | } | ||
| 429 | |||
| 430 | static void btree_shrink(struct btree_head *head, struct btree_geo *geo) | ||
| 431 | { | ||
| 432 | unsigned long *node; | ||
| 433 | int fill; | ||
| 434 | |||
| 435 | if (head->height <= 1) | ||
| 436 | return; | ||
| 437 | |||
| 438 | node = head->node; | ||
| 439 | fill = getfill(geo, node, 0); | ||
| 440 | BUG_ON(fill > 1); | ||
| 441 | head->node = bval(geo, node, 0); | ||
| 442 | head->height--; | ||
| 443 | mempool_free(node, head->mempool); | ||
| 444 | } | ||
| 445 | |||
| 446 | static int btree_insert_level(struct btree_head *head, struct btree_geo *geo, | ||
| 447 | unsigned long *key, void *val, int level, | ||
| 448 | gfp_t gfp) | ||
| 449 | { | ||
| 450 | unsigned long *node; | ||
| 451 | int i, pos, fill, err; | ||
| 452 | |||
| 453 | BUG_ON(!val); | ||
| 454 | if (head->height < level) { | ||
| 455 | err = btree_grow(head, geo, gfp); | ||
| 456 | if (err) | ||
| 457 | return err; | ||
| 458 | } | ||
| 459 | |||
| 460 | retry: | ||
| 461 | node = find_level(head, geo, key, level); | ||
| 462 | pos = getpos(geo, node, key); | ||
| 463 | fill = getfill(geo, node, pos); | ||
| 464 | /* two identical keys are not allowed */ | ||
| 465 | BUG_ON(pos < fill && keycmp(geo, node, pos, key) == 0); | ||
| 466 | |||
| 467 | if (fill == geo->no_pairs) { | ||
| 468 | /* need to split node */ | ||
| 469 | unsigned long *new; | ||
| 470 | |||
| 471 | new = btree_node_alloc(head, gfp); | ||
| 472 | if (!new) | ||
| 473 | return -ENOMEM; | ||
| 474 | err = btree_insert_level(head, geo, | ||
| 475 | bkey(geo, node, fill / 2 - 1), | ||
| 476 | new, level + 1, gfp); | ||
| 477 | if (err) { | ||
| 478 | mempool_free(new, head->mempool); | ||
| 479 | return err; | ||
| 480 | } | ||
| 481 | for (i = 0; i < fill / 2; i++) { | ||
| 482 | setkey(geo, new, i, bkey(geo, node, i)); | ||
| 483 | setval(geo, new, i, bval(geo, node, i)); | ||
| 484 | setkey(geo, node, i, bkey(geo, node, i + fill / 2)); | ||
| 485 | setval(geo, node, i, bval(geo, node, i + fill / 2)); | ||
| 486 | clearpair(geo, node, i + fill / 2); | ||
| 487 | } | ||
| 488 | if (fill & 1) { | ||
| 489 | setkey(geo, node, i, bkey(geo, node, fill - 1)); | ||
| 490 | setval(geo, node, i, bval(geo, node, fill - 1)); | ||
| 491 | clearpair(geo, node, fill - 1); | ||
| 492 | } | ||
| 493 | goto retry; | ||
| 494 | } | ||
| 495 | BUG_ON(fill >= geo->no_pairs); | ||
| 496 | |||
| 497 | /* shift and insert */ | ||
| 498 | for (i = fill; i > pos; i--) { | ||
| 499 | setkey(geo, node, i, bkey(geo, node, i - 1)); | ||
| 500 | setval(geo, node, i, bval(geo, node, i - 1)); | ||
| 501 | } | ||
| 502 | setkey(geo, node, pos, key); | ||
| 503 | setval(geo, node, pos, val); | ||
| 504 | |||
| 505 | return 0; | ||
| 506 | } | ||
| 507 | |||
| 508 | int btree_insert(struct btree_head *head, struct btree_geo *geo, | ||
| 509 | unsigned long *key, void *val, gfp_t gfp) | ||
| 510 | { | ||
| 511 | return btree_insert_level(head, geo, key, val, 1, gfp); | ||
| 512 | } | ||
| 513 | EXPORT_SYMBOL_GPL(btree_insert); | ||
| 514 | |||
| 515 | static void *btree_remove_level(struct btree_head *head, struct btree_geo *geo, | ||
| 516 | unsigned long *key, int level); | ||
| 517 | static void merge(struct btree_head *head, struct btree_geo *geo, int level, | ||
| 518 | unsigned long *left, int lfill, | ||
| 519 | unsigned long *right, int rfill, | ||
| 520 | unsigned long *parent, int lpos) | ||
| 521 | { | ||
| 522 | int i; | ||
| 523 | |||
| 524 | for (i = 0; i < rfill; i++) { | ||
| 525 | /* Move all keys to the left */ | ||
| 526 | setkey(geo, left, lfill + i, bkey(geo, right, i)); | ||
| 527 | setval(geo, left, lfill + i, bval(geo, right, i)); | ||
| 528 | } | ||
| 529 | /* Exchange left and right child in parent */ | ||
| 530 | setval(geo, parent, lpos, right); | ||
| 531 | setval(geo, parent, lpos + 1, left); | ||
| 532 | /* Remove left (formerly right) child from parent */ | ||
| 533 | btree_remove_level(head, geo, bkey(geo, parent, lpos), level + 1); | ||
| 534 | mempool_free(right, head->mempool); | ||
| 535 | } | ||
| 536 | |||
| 537 | static void rebalance(struct btree_head *head, struct btree_geo *geo, | ||
| 538 | unsigned long *key, int level, unsigned long *child, int fill) | ||
| 539 | { | ||
| 540 | unsigned long *parent, *left = NULL, *right = NULL; | ||
| 541 | int i, no_left, no_right; | ||
| 542 | |||
| 543 | if (fill == 0) { | ||
| 544 | /* Because we don't steal entries from a neigbour, this case | ||
| 545 | * can happen. Parent node contains a single child, this | ||
| 546 | * node, so merging with a sibling never happens. | ||
| 547 | */ | ||
| 548 | btree_remove_level(head, geo, key, level + 1); | ||
| 549 | mempool_free(child, head->mempool); | ||
| 550 | return; | ||
| 551 | } | ||
| 552 | |||
| 553 | parent = find_level(head, geo, key, level + 1); | ||
| 554 | i = getpos(geo, parent, key); | ||
| 555 | BUG_ON(bval(geo, parent, i) != child); | ||
| 556 | |||
| 557 | if (i > 0) { | ||
| 558 | left = bval(geo, parent, i - 1); | ||
| 559 | no_left = getfill(geo, left, 0); | ||
| 560 | if (fill + no_left <= geo->no_pairs) { | ||
| 561 | merge(head, geo, level, | ||
| 562 | left, no_left, | ||
| 563 | child, fill, | ||
| 564 | parent, i - 1); | ||
| 565 | return; | ||
| 566 | } | ||
| 567 | } | ||
| 568 | if (i + 1 < getfill(geo, parent, i)) { | ||
| 569 | right = bval(geo, parent, i + 1); | ||
| 570 | no_right = getfill(geo, right, 0); | ||
| 571 | if (fill + no_right <= geo->no_pairs) { | ||
| 572 | merge(head, geo, level, | ||
| 573 | child, fill, | ||
| 574 | right, no_right, | ||
| 575 | parent, i); | ||
| 576 | return; | ||
| 577 | } | ||
| 578 | } | ||
| 579 | /* | ||
| 580 | * We could also try to steal one entry from the left or right | ||
| 581 | * neighbor. By not doing so we changed the invariant from | ||
| 582 | * "all nodes are at least half full" to "no two neighboring | ||
| 583 | * nodes can be merged". Which means that the average fill of | ||
| 584 | * all nodes is still half or better. | ||
| 585 | */ | ||
| 586 | } | ||
| 587 | |||
| 588 | static void *btree_remove_level(struct btree_head *head, struct btree_geo *geo, | ||
| 589 | unsigned long *key, int level) | ||
| 590 | { | ||
| 591 | unsigned long *node; | ||
| 592 | int i, pos, fill; | ||
| 593 | void *ret; | ||
| 594 | |||
| 595 | if (level > head->height) { | ||
| 596 | /* we recursed all the way up */ | ||
| 597 | head->height = 0; | ||
| 598 | head->node = NULL; | ||
| 599 | return NULL; | ||
| 600 | } | ||
| 601 | |||
| 602 | node = find_level(head, geo, key, level); | ||
| 603 | pos = getpos(geo, node, key); | ||
| 604 | fill = getfill(geo, node, pos); | ||
| 605 | if ((level == 1) && (keycmp(geo, node, pos, key) != 0)) | ||
| 606 | return NULL; | ||
| 607 | ret = bval(geo, node, pos); | ||
| 608 | |||
| 609 | /* remove and shift */ | ||
| 610 | for (i = pos; i < fill - 1; i++) { | ||
| 611 | setkey(geo, node, i, bkey(geo, node, i + 1)); | ||
| 612 | setval(geo, node, i, bval(geo, node, i + 1)); | ||
| 613 | } | ||
| 614 | clearpair(geo, node, fill - 1); | ||
| 615 | |||
| 616 | if (fill - 1 < geo->no_pairs / 2) { | ||
| 617 | if (level < head->height) | ||
| 618 | rebalance(head, geo, key, level, node, fill - 1); | ||
| 619 | else if (fill - 1 == 1) | ||
| 620 | btree_shrink(head, geo); | ||
| 621 | } | ||
| 622 | |||
| 623 | return ret; | ||
| 624 | } | ||
| 625 | |||
| 626 | void *btree_remove(struct btree_head *head, struct btree_geo *geo, | ||
| 627 | unsigned long *key) | ||
| 628 | { | ||
| 629 | if (head->height == 0) | ||
| 630 | return NULL; | ||
| 631 | |||
| 632 | return btree_remove_level(head, geo, key, 1); | ||
| 633 | } | ||
| 634 | EXPORT_SYMBOL_GPL(btree_remove); | ||
| 635 | |||
| 636 | int btree_merge(struct btree_head *target, struct btree_head *victim, | ||
| 637 | struct btree_geo *geo, gfp_t gfp) | ||
| 638 | { | ||
| 639 | unsigned long key[geo->keylen]; | ||
| 640 | unsigned long dup[geo->keylen]; | ||
| 641 | void *val; | ||
| 642 | int err; | ||
| 643 | |||
| 644 | BUG_ON(target == victim); | ||
| 645 | |||
| 646 | if (!(target->node)) { | ||
| 647 | /* target is empty, just copy fields over */ | ||
| 648 | target->node = victim->node; | ||
| 649 | target->height = victim->height; | ||
| 650 | __btree_init(victim); | ||
| 651 | return 0; | ||
| 652 | } | ||
| 653 | |||
| 654 | /* TODO: This needs some optimizations. Currently we do three tree | ||
| 655 | * walks to remove a single object from the victim. | ||
| 656 | */ | ||
| 657 | for (;;) { | ||
| 658 | if (!btree_last(victim, geo, key)) | ||
| 659 | break; | ||
| 660 | val = btree_lookup(victim, geo, key); | ||
| 661 | err = btree_insert(target, geo, key, val, gfp); | ||
| 662 | if (err) | ||
| 663 | return err; | ||
| 664 | /* We must make a copy of the key, as the original will get | ||
| 665 | * mangled inside btree_remove. */ | ||
| 666 | longcpy(dup, key, geo->keylen); | ||
| 667 | btree_remove(victim, geo, dup); | ||
| 668 | } | ||
| 669 | return 0; | ||
| 670 | } | ||
| 671 | EXPORT_SYMBOL_GPL(btree_merge); | ||
| 672 | |||
| 673 | static size_t __btree_for_each(struct btree_head *head, struct btree_geo *geo, | ||
| 674 | unsigned long *node, unsigned long opaque, | ||
| 675 | void (*func)(void *elem, unsigned long opaque, | ||
| 676 | unsigned long *key, size_t index, | ||
| 677 | void *func2), | ||
| 678 | void *func2, int reap, int height, size_t count) | ||
| 679 | { | ||
| 680 | int i; | ||
| 681 | unsigned long *child; | ||
| 682 | |||
| 683 | for (i = 0; i < geo->no_pairs; i++) { | ||
| 684 | child = bval(geo, node, i); | ||
| 685 | if (!child) | ||
| 686 | break; | ||
| 687 | if (height > 1) | ||
| 688 | count = __btree_for_each(head, geo, child, opaque, | ||
| 689 | func, func2, reap, height - 1, count); | ||
| 690 | else | ||
| 691 | func(child, opaque, bkey(geo, node, i), count++, | ||
| 692 | func2); | ||
| 693 | } | ||
| 694 | if (reap) | ||
| 695 | mempool_free(node, head->mempool); | ||
| 696 | return count; | ||
| 697 | } | ||
| 698 | |||
| 699 | static void empty(void *elem, unsigned long opaque, unsigned long *key, | ||
| 700 | size_t index, void *func2) | ||
| 701 | { | ||
| 702 | } | ||
| 703 | |||
| 704 | void visitorl(void *elem, unsigned long opaque, unsigned long *key, | ||
| 705 | size_t index, void *__func) | ||
| 706 | { | ||
| 707 | visitorl_t func = __func; | ||
| 708 | |||
| 709 | func(elem, opaque, *key, index); | ||
| 710 | } | ||
| 711 | EXPORT_SYMBOL_GPL(visitorl); | ||
| 712 | |||
| 713 | void visitor32(void *elem, unsigned long opaque, unsigned long *__key, | ||
| 714 | size_t index, void *__func) | ||
| 715 | { | ||
| 716 | visitor32_t func = __func; | ||
| 717 | u32 *key = (void *)__key; | ||
| 718 | |||
| 719 | func(elem, opaque, *key, index); | ||
| 720 | } | ||
| 721 | EXPORT_SYMBOL_GPL(visitor32); | ||
| 722 | |||
| 723 | void visitor64(void *elem, unsigned long opaque, unsigned long *__key, | ||
| 724 | size_t index, void *__func) | ||
| 725 | { | ||
| 726 | visitor64_t func = __func; | ||
| 727 | u64 *key = (void *)__key; | ||
| 728 | |||
| 729 | func(elem, opaque, *key, index); | ||
| 730 | } | ||
| 731 | EXPORT_SYMBOL_GPL(visitor64); | ||
| 732 | |||
| 733 | void visitor128(void *elem, unsigned long opaque, unsigned long *__key, | ||
| 734 | size_t index, void *__func) | ||
| 735 | { | ||
| 736 | visitor128_t func = __func; | ||
| 737 | u64 *key = (void *)__key; | ||
| 738 | |||
| 739 | func(elem, opaque, key[0], key[1], index); | ||
| 740 | } | ||
| 741 | EXPORT_SYMBOL_GPL(visitor128); | ||
| 742 | |||
| 743 | size_t btree_visitor(struct btree_head *head, struct btree_geo *geo, | ||
| 744 | unsigned long opaque, | ||
| 745 | void (*func)(void *elem, unsigned long opaque, | ||
| 746 | unsigned long *key, | ||
| 747 | size_t index, void *func2), | ||
| 748 | void *func2) | ||
| 749 | { | ||
| 750 | size_t count = 0; | ||
| 751 | |||
| 752 | if (!func2) | ||
| 753 | func = empty; | ||
| 754 | if (head->node) | ||
| 755 | count = __btree_for_each(head, geo, head->node, opaque, func, | ||
| 756 | func2, 0, head->height, 0); | ||
| 757 | return count; | ||
| 758 | } | ||
| 759 | EXPORT_SYMBOL_GPL(btree_visitor); | ||
| 760 | |||
| 761 | size_t btree_grim_visitor(struct btree_head *head, struct btree_geo *geo, | ||
| 762 | unsigned long opaque, | ||
| 763 | void (*func)(void *elem, unsigned long opaque, | ||
| 764 | unsigned long *key, | ||
| 765 | size_t index, void *func2), | ||
| 766 | void *func2) | ||
| 767 | { | ||
| 768 | size_t count = 0; | ||
| 769 | |||
| 770 | if (!func2) | ||
| 771 | func = empty; | ||
| 772 | if (head->node) | ||
| 773 | count = __btree_for_each(head, geo, head->node, opaque, func, | ||
| 774 | func2, 1, head->height, 0); | ||
| 775 | __btree_init(head); | ||
| 776 | return count; | ||
| 777 | } | ||
| 778 | EXPORT_SYMBOL_GPL(btree_grim_visitor); | ||
| 779 | |||
| 780 | static int __init btree_module_init(void) | ||
| 781 | { | ||
| 782 | btree_cachep = kmem_cache_create("btree_node", NODESIZE, 0, | ||
| 783 | SLAB_HWCACHE_ALIGN, NULL); | ||
| 784 | return 0; | ||
| 785 | } | ||
| 786 | |||
| 787 | static void __exit btree_module_exit(void) | ||
| 788 | { | ||
| 789 | kmem_cache_destroy(btree_cachep); | ||
| 790 | } | ||
| 791 | |||
| 792 | /* If core code starts using btree, initialization should happen even earlier */ | ||
| 793 | module_init(btree_module_init); | ||
| 794 | module_exit(btree_module_exit); | ||
| 795 | |||
| 796 | MODULE_AUTHOR("Joern Engel <joern@logfs.org>"); | ||
| 797 | MODULE_AUTHOR("Johannes Berg <johannes@sipsolutions.net>"); | ||
| 798 | MODULE_LICENSE("GPL"); | ||
diff --git a/lib/checksum.c b/lib/checksum.c index b2e2fd468461..097508732f34 100644 --- a/lib/checksum.c +++ b/lib/checksum.c | |||
| @@ -37,7 +37,8 @@ | |||
| 37 | 37 | ||
| 38 | #include <asm/byteorder.h> | 38 | #include <asm/byteorder.h> |
| 39 | 39 | ||
| 40 | static inline unsigned short from32to16(unsigned long x) | 40 | #ifndef do_csum |
| 41 | static inline unsigned short from32to16(unsigned int x) | ||
| 41 | { | 42 | { |
| 42 | /* add up 16-bit and 16-bit for 16+c bit */ | 43 | /* add up 16-bit and 16-bit for 16+c bit */ |
| 43 | x = (x & 0xffff) + (x >> 16); | 44 | x = (x & 0xffff) + (x >> 16); |
| @@ -49,16 +50,16 @@ static inline unsigned short from32to16(unsigned long x) | |||
| 49 | static unsigned int do_csum(const unsigned char *buff, int len) | 50 | static unsigned int do_csum(const unsigned char *buff, int len) |
| 50 | { | 51 | { |
| 51 | int odd, count; | 52 | int odd, count; |
| 52 | unsigned long result = 0; | 53 | unsigned int result = 0; |
| 53 | 54 | ||
| 54 | if (len <= 0) | 55 | if (len <= 0) |
| 55 | goto out; | 56 | goto out; |
| 56 | odd = 1 & (unsigned long) buff; | 57 | odd = 1 & (unsigned long) buff; |
| 57 | if (odd) { | 58 | if (odd) { |
| 58 | #ifdef __LITTLE_ENDIAN | 59 | #ifdef __LITTLE_ENDIAN |
| 59 | result = *buff; | ||
| 60 | #else | ||
| 61 | result += (*buff << 8); | 60 | result += (*buff << 8); |
| 61 | #else | ||
| 62 | result = *buff; | ||
| 62 | #endif | 63 | #endif |
| 63 | len--; | 64 | len--; |
| 64 | buff++; | 65 | buff++; |
| @@ -73,9 +74,9 @@ static unsigned int do_csum(const unsigned char *buff, int len) | |||
| 73 | } | 74 | } |
| 74 | count >>= 1; /* nr of 32-bit words.. */ | 75 | count >>= 1; /* nr of 32-bit words.. */ |
| 75 | if (count) { | 76 | if (count) { |
| 76 | unsigned long carry = 0; | 77 | unsigned int carry = 0; |
| 77 | do { | 78 | do { |
| 78 | unsigned long w = *(unsigned int *) buff; | 79 | unsigned int w = *(unsigned int *) buff; |
| 79 | count--; | 80 | count--; |
| 80 | buff += 4; | 81 | buff += 4; |
| 81 | result += carry; | 82 | result += carry; |
| @@ -102,6 +103,7 @@ static unsigned int do_csum(const unsigned char *buff, int len) | |||
| 102 | out: | 103 | out: |
| 103 | return result; | 104 | return result; |
| 104 | } | 105 | } |
| 106 | #endif | ||
| 105 | 107 | ||
| 106 | /* | 108 | /* |
| 107 | * This is a version of ip_compute_csum() optimized for IP headers, | 109 | * This is a version of ip_compute_csum() optimized for IP headers, |
diff --git a/lib/cpumask.c b/lib/cpumask.c index 7bb4142a502f..05d6aca7fc19 100644 --- a/lib/cpumask.c +++ b/lib/cpumask.c | |||
| @@ -1,3 +1,4 @@ | |||
| 1 | #include <linux/slab.h> | ||
| 1 | #include <linux/kernel.h> | 2 | #include <linux/kernel.h> |
| 2 | #include <linux/bitops.h> | 3 | #include <linux/bitops.h> |
| 3 | #include <linux/cpumask.h> | 4 | #include <linux/cpumask.h> |
diff --git a/lib/crc32.c b/lib/crc32.c index 49d1c9e3ce38..bc5b936e9142 100644 --- a/lib/crc32.c +++ b/lib/crc32.c | |||
| @@ -25,16 +25,19 @@ | |||
| 25 | #include <linux/module.h> | 25 | #include <linux/module.h> |
| 26 | #include <linux/compiler.h> | 26 | #include <linux/compiler.h> |
| 27 | #include <linux/types.h> | 27 | #include <linux/types.h> |
| 28 | #include <linux/slab.h> | ||
| 29 | #include <linux/init.h> | 28 | #include <linux/init.h> |
| 30 | #include <asm/atomic.h> | 29 | #include <asm/atomic.h> |
| 31 | #include "crc32defs.h" | 30 | #include "crc32defs.h" |
| 32 | #if CRC_LE_BITS == 8 | 31 | #if CRC_LE_BITS == 8 |
| 33 | #define tole(x) __constant_cpu_to_le32(x) | 32 | # define tole(x) __constant_cpu_to_le32(x) |
| 34 | #define tobe(x) __constant_cpu_to_be32(x) | ||
| 35 | #else | 33 | #else |
| 36 | #define tole(x) (x) | 34 | # define tole(x) (x) |
| 37 | #define tobe(x) (x) | 35 | #endif |
| 36 | |||
| 37 | #if CRC_BE_BITS == 8 | ||
| 38 | # define tobe(x) __constant_cpu_to_be32(x) | ||
| 39 | #else | ||
| 40 | # define tobe(x) (x) | ||
| 38 | #endif | 41 | #endif |
| 39 | #include "crc32table.h" | 42 | #include "crc32table.h" |
| 40 | 43 | ||
| @@ -42,6 +45,48 @@ MODULE_AUTHOR("Matt Domsch <Matt_Domsch@dell.com>"); | |||
| 42 | MODULE_DESCRIPTION("Ethernet CRC32 calculations"); | 45 | MODULE_DESCRIPTION("Ethernet CRC32 calculations"); |
| 43 | MODULE_LICENSE("GPL"); | 46 | MODULE_LICENSE("GPL"); |
| 44 | 47 | ||
| 48 | #if CRC_LE_BITS == 8 || CRC_BE_BITS == 8 | ||
| 49 | |||
| 50 | static inline u32 | ||
| 51 | crc32_body(u32 crc, unsigned char const *buf, size_t len, const u32 *tab) | ||
| 52 | { | ||
| 53 | # ifdef __LITTLE_ENDIAN | ||
| 54 | # define DO_CRC(x) crc = tab[(crc ^ (x)) & 255 ] ^ (crc >> 8) | ||
| 55 | # else | ||
| 56 | # define DO_CRC(x) crc = tab[((crc >> 24) ^ (x)) & 255] ^ (crc << 8) | ||
| 57 | # endif | ||
| 58 | const u32 *b; | ||
| 59 | size_t rem_len; | ||
| 60 | |||
| 61 | /* Align it */ | ||
| 62 | if (unlikely((long)buf & 3 && len)) { | ||
| 63 | do { | ||
| 64 | DO_CRC(*buf++); | ||
| 65 | } while ((--len) && ((long)buf)&3); | ||
| 66 | } | ||
| 67 | rem_len = len & 3; | ||
| 68 | /* load data 32 bits wide, xor data 32 bits wide. */ | ||
| 69 | len = len >> 2; | ||
| 70 | b = (const u32 *)buf; | ||
| 71 | for (--b; len; --len) { | ||
| 72 | crc ^= *++b; /* use pre increment for speed */ | ||
| 73 | DO_CRC(0); | ||
| 74 | DO_CRC(0); | ||
| 75 | DO_CRC(0); | ||
| 76 | DO_CRC(0); | ||
| 77 | } | ||
| 78 | len = rem_len; | ||
| 79 | /* And the last few bytes */ | ||
| 80 | if (len) { | ||
| 81 | u8 *p = (u8 *)(b + 1) - 1; | ||
| 82 | do { | ||
| 83 | DO_CRC(*++p); /* use pre increment for speed */ | ||
| 84 | } while (--len); | ||
| 85 | } | ||
| 86 | return crc; | ||
| 87 | #undef DO_CRC | ||
| 88 | } | ||
| 89 | #endif | ||
| 45 | /** | 90 | /** |
| 46 | * crc32_le() - Calculate bitwise little-endian Ethernet AUTODIN II CRC32 | 91 | * crc32_le() - Calculate bitwise little-endian Ethernet AUTODIN II CRC32 |
| 47 | * @crc: seed value for computation. ~0 for Ethernet, sometimes 0 for | 92 | * @crc: seed value for computation. ~0 for Ethernet, sometimes 0 for |
| @@ -72,52 +117,11 @@ u32 __pure crc32_le(u32 crc, unsigned char const *p, size_t len) | |||
| 72 | u32 __pure crc32_le(u32 crc, unsigned char const *p, size_t len) | 117 | u32 __pure crc32_le(u32 crc, unsigned char const *p, size_t len) |
| 73 | { | 118 | { |
| 74 | # if CRC_LE_BITS == 8 | 119 | # if CRC_LE_BITS == 8 |
| 75 | const u32 *b =(u32 *)p; | ||
| 76 | const u32 *tab = crc32table_le; | 120 | const u32 *tab = crc32table_le; |
| 77 | 121 | ||
| 78 | # ifdef __LITTLE_ENDIAN | ||
| 79 | # define DO_CRC(x) crc = tab[ (crc ^ (x)) & 255 ] ^ (crc>>8) | ||
| 80 | # else | ||
| 81 | # define DO_CRC(x) crc = tab[ ((crc >> 24) ^ (x)) & 255] ^ (crc<<8) | ||
| 82 | # endif | ||
| 83 | |||
| 84 | crc = __cpu_to_le32(crc); | 122 | crc = __cpu_to_le32(crc); |
| 85 | /* Align it */ | 123 | crc = crc32_body(crc, p, len, tab); |
| 86 | if(unlikely(((long)b)&3 && len)){ | ||
| 87 | do { | ||
| 88 | u8 *p = (u8 *)b; | ||
| 89 | DO_CRC(*p++); | ||
| 90 | b = (void *)p; | ||
| 91 | } while ((--len) && ((long)b)&3 ); | ||
| 92 | } | ||
| 93 | if(likely(len >= 4)){ | ||
| 94 | /* load data 32 bits wide, xor data 32 bits wide. */ | ||
| 95 | size_t save_len = len & 3; | ||
| 96 | len = len >> 2; | ||
| 97 | --b; /* use pre increment below(*++b) for speed */ | ||
| 98 | do { | ||
| 99 | crc ^= *++b; | ||
| 100 | DO_CRC(0); | ||
| 101 | DO_CRC(0); | ||
| 102 | DO_CRC(0); | ||
| 103 | DO_CRC(0); | ||
| 104 | } while (--len); | ||
| 105 | b++; /* point to next byte(s) */ | ||
| 106 | len = save_len; | ||
| 107 | } | ||
| 108 | /* And the last few bytes */ | ||
| 109 | if(len){ | ||
| 110 | do { | ||
| 111 | u8 *p = (u8 *)b; | ||
| 112 | DO_CRC(*p++); | ||
| 113 | b = (void *)p; | ||
| 114 | } while (--len); | ||
| 115 | } | ||
| 116 | |||
| 117 | return __le32_to_cpu(crc); | 124 | return __le32_to_cpu(crc); |
| 118 | #undef ENDIAN_SHIFT | ||
| 119 | #undef DO_CRC | ||
| 120 | |||
| 121 | # elif CRC_LE_BITS == 4 | 125 | # elif CRC_LE_BITS == 4 |
| 122 | while (len--) { | 126 | while (len--) { |
| 123 | crc ^= *p++; | 127 | crc ^= *p++; |
| @@ -170,51 +174,11 @@ u32 __pure crc32_be(u32 crc, unsigned char const *p, size_t len) | |||
| 170 | u32 __pure crc32_be(u32 crc, unsigned char const *p, size_t len) | 174 | u32 __pure crc32_be(u32 crc, unsigned char const *p, size_t len) |
| 171 | { | 175 | { |
| 172 | # if CRC_BE_BITS == 8 | 176 | # if CRC_BE_BITS == 8 |
| 173 | const u32 *b =(u32 *)p; | ||
| 174 | const u32 *tab = crc32table_be; | 177 | const u32 *tab = crc32table_be; |
| 175 | 178 | ||
| 176 | # ifdef __LITTLE_ENDIAN | ||
| 177 | # define DO_CRC(x) crc = tab[ (crc ^ (x)) & 255 ] ^ (crc>>8) | ||
| 178 | # else | ||
| 179 | # define DO_CRC(x) crc = tab[ ((crc >> 24) ^ (x)) & 255] ^ (crc<<8) | ||
| 180 | # endif | ||
| 181 | |||
| 182 | crc = __cpu_to_be32(crc); | 179 | crc = __cpu_to_be32(crc); |
| 183 | /* Align it */ | 180 | crc = crc32_body(crc, p, len, tab); |
| 184 | if(unlikely(((long)b)&3 && len)){ | ||
| 185 | do { | ||
| 186 | u8 *p = (u8 *)b; | ||
| 187 | DO_CRC(*p++); | ||
| 188 | b = (u32 *)p; | ||
| 189 | } while ((--len) && ((long)b)&3 ); | ||
| 190 | } | ||
| 191 | if(likely(len >= 4)){ | ||
| 192 | /* load data 32 bits wide, xor data 32 bits wide. */ | ||
| 193 | size_t save_len = len & 3; | ||
| 194 | len = len >> 2; | ||
| 195 | --b; /* use pre increment below(*++b) for speed */ | ||
| 196 | do { | ||
| 197 | crc ^= *++b; | ||
| 198 | DO_CRC(0); | ||
| 199 | DO_CRC(0); | ||
| 200 | DO_CRC(0); | ||
| 201 | DO_CRC(0); | ||
| 202 | } while (--len); | ||
| 203 | b++; /* point to next byte(s) */ | ||
| 204 | len = save_len; | ||
| 205 | } | ||
| 206 | /* And the last few bytes */ | ||
| 207 | if(len){ | ||
| 208 | do { | ||
| 209 | u8 *p = (u8 *)b; | ||
| 210 | DO_CRC(*p++); | ||
| 211 | b = (void *)p; | ||
| 212 | } while (--len); | ||
| 213 | } | ||
| 214 | return __be32_to_cpu(crc); | 181 | return __be32_to_cpu(crc); |
| 215 | #undef ENDIAN_SHIFT | ||
| 216 | #undef DO_CRC | ||
| 217 | |||
| 218 | # elif CRC_BE_BITS == 4 | 182 | # elif CRC_BE_BITS == 4 |
| 219 | while (len--) { | 183 | while (len--) { |
| 220 | crc ^= *p++ << 24; | 184 | crc ^= *p++ << 24; |
diff --git a/lib/ctype.c b/lib/ctype.c index d02ace14a322..26baa620e95b 100644 --- a/lib/ctype.c +++ b/lib/ctype.c | |||
| @@ -7,30 +7,30 @@ | |||
| 7 | #include <linux/ctype.h> | 7 | #include <linux/ctype.h> |
| 8 | #include <linux/module.h> | 8 | #include <linux/module.h> |
| 9 | 9 | ||
| 10 | unsigned char _ctype[] = { | 10 | const unsigned char _ctype[] = { |
| 11 | _C,_C,_C,_C,_C,_C,_C,_C, /* 0-7 */ | 11 | _C,_C,_C,_C,_C,_C,_C,_C, /* 0-7 */ |
| 12 | _C,_C|_S,_C|_S,_C|_S,_C|_S,_C|_S,_C,_C, /* 8-15 */ | 12 | _C,_C|_S,_C|_S,_C|_S,_C|_S,_C|_S,_C,_C, /* 8-15 */ |
| 13 | _C,_C,_C,_C,_C,_C,_C,_C, /* 16-23 */ | 13 | _C,_C,_C,_C,_C,_C,_C,_C, /* 16-23 */ |
| 14 | _C,_C,_C,_C,_C,_C,_C,_C, /* 24-31 */ | 14 | _C,_C,_C,_C,_C,_C,_C,_C, /* 24-31 */ |
| 15 | _S|_SP,_P,_P,_P,_P,_P,_P,_P, /* 32-39 */ | 15 | _S|_SP,_P,_P,_P,_P,_P,_P,_P, /* 32-39 */ |
| 16 | _P,_P,_P,_P,_P,_P,_P,_P, /* 40-47 */ | 16 | _P,_P,_P,_P,_P,_P,_P,_P, /* 40-47 */ |
| 17 | _D,_D,_D,_D,_D,_D,_D,_D, /* 48-55 */ | 17 | _D,_D,_D,_D,_D,_D,_D,_D, /* 48-55 */ |
| 18 | _D,_D,_P,_P,_P,_P,_P,_P, /* 56-63 */ | 18 | _D,_D,_P,_P,_P,_P,_P,_P, /* 56-63 */ |
| 19 | _P,_U|_X,_U|_X,_U|_X,_U|_X,_U|_X,_U|_X,_U, /* 64-71 */ | 19 | _P,_U|_X,_U|_X,_U|_X,_U|_X,_U|_X,_U|_X,_U, /* 64-71 */ |
| 20 | _U,_U,_U,_U,_U,_U,_U,_U, /* 72-79 */ | 20 | _U,_U,_U,_U,_U,_U,_U,_U, /* 72-79 */ |
| 21 | _U,_U,_U,_U,_U,_U,_U,_U, /* 80-87 */ | 21 | _U,_U,_U,_U,_U,_U,_U,_U, /* 80-87 */ |
| 22 | _U,_U,_U,_P,_P,_P,_P,_P, /* 88-95 */ | 22 | _U,_U,_U,_P,_P,_P,_P,_P, /* 88-95 */ |
| 23 | _P,_L|_X,_L|_X,_L|_X,_L|_X,_L|_X,_L|_X,_L, /* 96-103 */ | 23 | _P,_L|_X,_L|_X,_L|_X,_L|_X,_L|_X,_L|_X,_L, /* 96-103 */ |
| 24 | _L,_L,_L,_L,_L,_L,_L,_L, /* 104-111 */ | 24 | _L,_L,_L,_L,_L,_L,_L,_L, /* 104-111 */ |
| 25 | _L,_L,_L,_L,_L,_L,_L,_L, /* 112-119 */ | 25 | _L,_L,_L,_L,_L,_L,_L,_L, /* 112-119 */ |
| 26 | _L,_L,_L,_P,_P,_P,_P,_C, /* 120-127 */ | 26 | _L,_L,_L,_P,_P,_P,_P,_C, /* 120-127 */ |
| 27 | 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* 128-143 */ | 27 | 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* 128-143 */ |
| 28 | 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* 144-159 */ | 28 | 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* 144-159 */ |
| 29 | _S|_SP,_P,_P,_P,_P,_P,_P,_P,_P,_P,_P,_P,_P,_P,_P,_P, /* 160-175 */ | 29 | _S|_SP,_P,_P,_P,_P,_P,_P,_P,_P,_P,_P,_P,_P,_P,_P,_P, /* 160-175 */ |
| 30 | _P,_P,_P,_P,_P,_P,_P,_P,_P,_P,_P,_P,_P,_P,_P,_P, /* 176-191 */ | 30 | _P,_P,_P,_P,_P,_P,_P,_P,_P,_P,_P,_P,_P,_P,_P,_P, /* 176-191 */ |
| 31 | _U,_U,_U,_U,_U,_U,_U,_U,_U,_U,_U,_U,_U,_U,_U,_U, /* 192-207 */ | 31 | _U,_U,_U,_U,_U,_U,_U,_U,_U,_U,_U,_U,_U,_U,_U,_U, /* 192-207 */ |
| 32 | _U,_U,_U,_U,_U,_U,_U,_P,_U,_U,_U,_U,_U,_U,_U,_L, /* 208-223 */ | 32 | _U,_U,_U,_U,_U,_U,_U,_P,_U,_U,_U,_U,_U,_U,_U,_L, /* 208-223 */ |
| 33 | _L,_L,_L,_L,_L,_L,_L,_L,_L,_L,_L,_L,_L,_L,_L,_L, /* 224-239 */ | 33 | _L,_L,_L,_L,_L,_L,_L,_L,_L,_L,_L,_L,_L,_L,_L,_L, /* 224-239 */ |
| 34 | _L,_L,_L,_L,_L,_L,_L,_P,_L,_L,_L,_L,_L,_L,_L,_L}; /* 240-255 */ | 34 | _L,_L,_L,_L,_L,_L,_L,_P,_L,_L,_L,_L,_L,_L,_L,_L}; /* 240-255 */ |
| 35 | 35 | ||
| 36 | EXPORT_SYMBOL(_ctype); | 36 | EXPORT_SYMBOL(_ctype); |
diff --git a/lib/debug_locks.c b/lib/debug_locks.c index bc3b11731b9c..5bf0020b9248 100644 --- a/lib/debug_locks.c +++ b/lib/debug_locks.c | |||
| @@ -23,6 +23,7 @@ | |||
| 23 | * shut up after that. | 23 | * shut up after that. |
| 24 | */ | 24 | */ |
| 25 | int debug_locks = 1; | 25 | int debug_locks = 1; |
| 26 | EXPORT_SYMBOL_GPL(debug_locks); | ||
| 26 | 27 | ||
| 27 | /* | 28 | /* |
| 28 | * The locking-testsuite uses <debug_locks_silent> to get a | 29 | * The locking-testsuite uses <debug_locks_silent> to get a |
diff --git a/lib/debugobjects.c b/lib/debugobjects.c index eae56fddfa3b..b862b30369ff 100644 --- a/lib/debugobjects.c +++ b/lib/debugobjects.c | |||
| @@ -12,6 +12,7 @@ | |||
| 12 | #include <linux/sched.h> | 12 | #include <linux/sched.h> |
| 13 | #include <linux/seq_file.h> | 13 | #include <linux/seq_file.h> |
| 14 | #include <linux/debugfs.h> | 14 | #include <linux/debugfs.h> |
| 15 | #include <linux/slab.h> | ||
| 15 | #include <linux/hash.h> | 16 | #include <linux/hash.h> |
| 16 | 17 | ||
| 17 | #define ODEBUG_HASH_BITS 14 | 18 | #define ODEBUG_HASH_BITS 14 |
| @@ -26,14 +27,14 @@ | |||
| 26 | 27 | ||
| 27 | struct debug_bucket { | 28 | struct debug_bucket { |
| 28 | struct hlist_head list; | 29 | struct hlist_head list; |
| 29 | spinlock_t lock; | 30 | raw_spinlock_t lock; |
| 30 | }; | 31 | }; |
| 31 | 32 | ||
| 32 | static struct debug_bucket obj_hash[ODEBUG_HASH_SIZE]; | 33 | static struct debug_bucket obj_hash[ODEBUG_HASH_SIZE]; |
| 33 | 34 | ||
| 34 | static struct debug_obj obj_static_pool[ODEBUG_POOL_SIZE] __initdata; | 35 | static struct debug_obj obj_static_pool[ODEBUG_POOL_SIZE] __initdata; |
| 35 | 36 | ||
| 36 | static DEFINE_SPINLOCK(pool_lock); | 37 | static DEFINE_RAW_SPINLOCK(pool_lock); |
| 37 | 38 | ||
| 38 | static HLIST_HEAD(obj_pool); | 39 | static HLIST_HEAD(obj_pool); |
| 39 | 40 | ||
| @@ -96,10 +97,10 @@ static int fill_pool(void) | |||
| 96 | if (!new) | 97 | if (!new) |
| 97 | return obj_pool_free; | 98 | return obj_pool_free; |
| 98 | 99 | ||
| 99 | spin_lock_irqsave(&pool_lock, flags); | 100 | raw_spin_lock_irqsave(&pool_lock, flags); |
| 100 | hlist_add_head(&new->node, &obj_pool); | 101 | hlist_add_head(&new->node, &obj_pool); |
| 101 | obj_pool_free++; | 102 | obj_pool_free++; |
| 102 | spin_unlock_irqrestore(&pool_lock, flags); | 103 | raw_spin_unlock_irqrestore(&pool_lock, flags); |
| 103 | } | 104 | } |
| 104 | return obj_pool_free; | 105 | return obj_pool_free; |
| 105 | } | 106 | } |
| @@ -133,7 +134,7 @@ alloc_object(void *addr, struct debug_bucket *b, struct debug_obj_descr *descr) | |||
| 133 | { | 134 | { |
| 134 | struct debug_obj *obj = NULL; | 135 | struct debug_obj *obj = NULL; |
| 135 | 136 | ||
| 136 | spin_lock(&pool_lock); | 137 | raw_spin_lock(&pool_lock); |
| 137 | if (obj_pool.first) { | 138 | if (obj_pool.first) { |
| 138 | obj = hlist_entry(obj_pool.first, typeof(*obj), node); | 139 | obj = hlist_entry(obj_pool.first, typeof(*obj), node); |
| 139 | 140 | ||
| @@ -152,7 +153,7 @@ alloc_object(void *addr, struct debug_bucket *b, struct debug_obj_descr *descr) | |||
| 152 | if (obj_pool_free < obj_pool_min_free) | 153 | if (obj_pool_free < obj_pool_min_free) |
| 153 | obj_pool_min_free = obj_pool_free; | 154 | obj_pool_min_free = obj_pool_free; |
| 154 | } | 155 | } |
| 155 | spin_unlock(&pool_lock); | 156 | raw_spin_unlock(&pool_lock); |
| 156 | 157 | ||
| 157 | return obj; | 158 | return obj; |
| 158 | } | 159 | } |
| @@ -165,7 +166,7 @@ static void free_obj_work(struct work_struct *work) | |||
| 165 | struct debug_obj *obj; | 166 | struct debug_obj *obj; |
| 166 | unsigned long flags; | 167 | unsigned long flags; |
| 167 | 168 | ||
| 168 | spin_lock_irqsave(&pool_lock, flags); | 169 | raw_spin_lock_irqsave(&pool_lock, flags); |
| 169 | while (obj_pool_free > ODEBUG_POOL_SIZE) { | 170 | while (obj_pool_free > ODEBUG_POOL_SIZE) { |
| 170 | obj = hlist_entry(obj_pool.first, typeof(*obj), node); | 171 | obj = hlist_entry(obj_pool.first, typeof(*obj), node); |
| 171 | hlist_del(&obj->node); | 172 | hlist_del(&obj->node); |
| @@ -174,11 +175,11 @@ static void free_obj_work(struct work_struct *work) | |||
| 174 | * We release pool_lock across kmem_cache_free() to | 175 | * We release pool_lock across kmem_cache_free() to |
| 175 | * avoid contention on pool_lock. | 176 | * avoid contention on pool_lock. |
| 176 | */ | 177 | */ |
| 177 | spin_unlock_irqrestore(&pool_lock, flags); | 178 | raw_spin_unlock_irqrestore(&pool_lock, flags); |
| 178 | kmem_cache_free(obj_cache, obj); | 179 | kmem_cache_free(obj_cache, obj); |
| 179 | spin_lock_irqsave(&pool_lock, flags); | 180 | raw_spin_lock_irqsave(&pool_lock, flags); |
| 180 | } | 181 | } |
| 181 | spin_unlock_irqrestore(&pool_lock, flags); | 182 | raw_spin_unlock_irqrestore(&pool_lock, flags); |
| 182 | } | 183 | } |
| 183 | 184 | ||
| 184 | /* | 185 | /* |
| @@ -190,7 +191,7 @@ static void free_object(struct debug_obj *obj) | |||
| 190 | unsigned long flags; | 191 | unsigned long flags; |
| 191 | int sched = 0; | 192 | int sched = 0; |
| 192 | 193 | ||
| 193 | spin_lock_irqsave(&pool_lock, flags); | 194 | raw_spin_lock_irqsave(&pool_lock, flags); |
| 194 | /* | 195 | /* |
| 195 | * schedule work when the pool is filled and the cache is | 196 | * schedule work when the pool is filled and the cache is |
| 196 | * initialized: | 197 | * initialized: |
| @@ -200,7 +201,7 @@ static void free_object(struct debug_obj *obj) | |||
| 200 | hlist_add_head(&obj->node, &obj_pool); | 201 | hlist_add_head(&obj->node, &obj_pool); |
| 201 | obj_pool_free++; | 202 | obj_pool_free++; |
| 202 | obj_pool_used--; | 203 | obj_pool_used--; |
| 203 | spin_unlock_irqrestore(&pool_lock, flags); | 204 | raw_spin_unlock_irqrestore(&pool_lock, flags); |
| 204 | if (sched) | 205 | if (sched) |
| 205 | schedule_work(&debug_obj_work); | 206 | schedule_work(&debug_obj_work); |
| 206 | } | 207 | } |
| @@ -221,9 +222,9 @@ static void debug_objects_oom(void) | |||
| 221 | printk(KERN_WARNING "ODEBUG: Out of memory. ODEBUG disabled\n"); | 222 | printk(KERN_WARNING "ODEBUG: Out of memory. ODEBUG disabled\n"); |
| 222 | 223 | ||
| 223 | for (i = 0; i < ODEBUG_HASH_SIZE; i++, db++) { | 224 | for (i = 0; i < ODEBUG_HASH_SIZE; i++, db++) { |
| 224 | spin_lock_irqsave(&db->lock, flags); | 225 | raw_spin_lock_irqsave(&db->lock, flags); |
| 225 | hlist_move_list(&db->list, &freelist); | 226 | hlist_move_list(&db->list, &freelist); |
| 226 | spin_unlock_irqrestore(&db->lock, flags); | 227 | raw_spin_unlock_irqrestore(&db->lock, flags); |
| 227 | 228 | ||
| 228 | /* Now free them */ | 229 | /* Now free them */ |
| 229 | hlist_for_each_entry_safe(obj, node, tmp, &freelist, node) { | 230 | hlist_for_each_entry_safe(obj, node, tmp, &freelist, node) { |
| @@ -303,14 +304,14 @@ __debug_object_init(void *addr, struct debug_obj_descr *descr, int onstack) | |||
| 303 | 304 | ||
| 304 | db = get_bucket((unsigned long) addr); | 305 | db = get_bucket((unsigned long) addr); |
| 305 | 306 | ||
| 306 | spin_lock_irqsave(&db->lock, flags); | 307 | raw_spin_lock_irqsave(&db->lock, flags); |
| 307 | 308 | ||
| 308 | obj = lookup_object(addr, db); | 309 | obj = lookup_object(addr, db); |
| 309 | if (!obj) { | 310 | if (!obj) { |
| 310 | obj = alloc_object(addr, db, descr); | 311 | obj = alloc_object(addr, db, descr); |
| 311 | if (!obj) { | 312 | if (!obj) { |
| 312 | debug_objects_enabled = 0; | 313 | debug_objects_enabled = 0; |
| 313 | spin_unlock_irqrestore(&db->lock, flags); | 314 | raw_spin_unlock_irqrestore(&db->lock, flags); |
| 314 | debug_objects_oom(); | 315 | debug_objects_oom(); |
| 315 | return; | 316 | return; |
| 316 | } | 317 | } |
| @@ -327,7 +328,7 @@ __debug_object_init(void *addr, struct debug_obj_descr *descr, int onstack) | |||
| 327 | case ODEBUG_STATE_ACTIVE: | 328 | case ODEBUG_STATE_ACTIVE: |
| 328 | debug_print_object(obj, "init"); | 329 | debug_print_object(obj, "init"); |
| 329 | state = obj->state; | 330 | state = obj->state; |
| 330 | spin_unlock_irqrestore(&db->lock, flags); | 331 | raw_spin_unlock_irqrestore(&db->lock, flags); |
| 331 | debug_object_fixup(descr->fixup_init, addr, state); | 332 | debug_object_fixup(descr->fixup_init, addr, state); |
| 332 | return; | 333 | return; |
| 333 | 334 | ||
| @@ -338,7 +339,7 @@ __debug_object_init(void *addr, struct debug_obj_descr *descr, int onstack) | |||
| 338 | break; | 339 | break; |
| 339 | } | 340 | } |
| 340 | 341 | ||
| 341 | spin_unlock_irqrestore(&db->lock, flags); | 342 | raw_spin_unlock_irqrestore(&db->lock, flags); |
| 342 | } | 343 | } |
| 343 | 344 | ||
| 344 | /** | 345 | /** |
| @@ -385,7 +386,7 @@ void debug_object_activate(void *addr, struct debug_obj_descr *descr) | |||
| 385 | 386 | ||
| 386 | db = get_bucket((unsigned long) addr); | 387 | db = get_bucket((unsigned long) addr); |
| 387 | 388 | ||
| 388 | spin_lock_irqsave(&db->lock, flags); | 389 | raw_spin_lock_irqsave(&db->lock, flags); |
| 389 | 390 | ||
| 390 | obj = lookup_object(addr, db); | 391 | obj = lookup_object(addr, db); |
| 391 | if (obj) { | 392 | if (obj) { |
| @@ -398,7 +399,7 @@ void debug_object_activate(void *addr, struct debug_obj_descr *descr) | |||
| 398 | case ODEBUG_STATE_ACTIVE: | 399 | case ODEBUG_STATE_ACTIVE: |
| 399 | debug_print_object(obj, "activate"); | 400 | debug_print_object(obj, "activate"); |
| 400 | state = obj->state; | 401 | state = obj->state; |
| 401 | spin_unlock_irqrestore(&db->lock, flags); | 402 | raw_spin_unlock_irqrestore(&db->lock, flags); |
| 402 | debug_object_fixup(descr->fixup_activate, addr, state); | 403 | debug_object_fixup(descr->fixup_activate, addr, state); |
| 403 | return; | 404 | return; |
| 404 | 405 | ||
| @@ -408,11 +409,11 @@ void debug_object_activate(void *addr, struct debug_obj_descr *descr) | |||
| 408 | default: | 409 | default: |
| 409 | break; | 410 | break; |
| 410 | } | 411 | } |
| 411 | spin_unlock_irqrestore(&db->lock, flags); | 412 | raw_spin_unlock_irqrestore(&db->lock, flags); |
| 412 | return; | 413 | return; |
| 413 | } | 414 | } |
| 414 | 415 | ||
| 415 | spin_unlock_irqrestore(&db->lock, flags); | 416 | raw_spin_unlock_irqrestore(&db->lock, flags); |
| 416 | /* | 417 | /* |
| 417 | * This happens when a static object is activated. We | 418 | * This happens when a static object is activated. We |
| 418 | * let the type specific code decide whether this is | 419 | * let the type specific code decide whether this is |
| @@ -438,7 +439,7 @@ void debug_object_deactivate(void *addr, struct debug_obj_descr *descr) | |||
| 438 | 439 | ||
| 439 | db = get_bucket((unsigned long) addr); | 440 | db = get_bucket((unsigned long) addr); |
| 440 | 441 | ||
| 441 | spin_lock_irqsave(&db->lock, flags); | 442 | raw_spin_lock_irqsave(&db->lock, flags); |
| 442 | 443 | ||
| 443 | obj = lookup_object(addr, db); | 444 | obj = lookup_object(addr, db); |
| 444 | if (obj) { | 445 | if (obj) { |
| @@ -463,7 +464,7 @@ void debug_object_deactivate(void *addr, struct debug_obj_descr *descr) | |||
| 463 | debug_print_object(&o, "deactivate"); | 464 | debug_print_object(&o, "deactivate"); |
| 464 | } | 465 | } |
| 465 | 466 | ||
| 466 | spin_unlock_irqrestore(&db->lock, flags); | 467 | raw_spin_unlock_irqrestore(&db->lock, flags); |
| 467 | } | 468 | } |
| 468 | 469 | ||
| 469 | /** | 470 | /** |
| @@ -483,7 +484,7 @@ void debug_object_destroy(void *addr, struct debug_obj_descr *descr) | |||
| 483 | 484 | ||
| 484 | db = get_bucket((unsigned long) addr); | 485 | db = get_bucket((unsigned long) addr); |
| 485 | 486 | ||
| 486 | spin_lock_irqsave(&db->lock, flags); | 487 | raw_spin_lock_irqsave(&db->lock, flags); |
| 487 | 488 | ||
| 488 | obj = lookup_object(addr, db); | 489 | obj = lookup_object(addr, db); |
| 489 | if (!obj) | 490 | if (!obj) |
| @@ -498,7 +499,7 @@ void debug_object_destroy(void *addr, struct debug_obj_descr *descr) | |||
| 498 | case ODEBUG_STATE_ACTIVE: | 499 | case ODEBUG_STATE_ACTIVE: |
| 499 | debug_print_object(obj, "destroy"); | 500 | debug_print_object(obj, "destroy"); |
| 500 | state = obj->state; | 501 | state = obj->state; |
| 501 | spin_unlock_irqrestore(&db->lock, flags); | 502 | raw_spin_unlock_irqrestore(&db->lock, flags); |
| 502 | debug_object_fixup(descr->fixup_destroy, addr, state); | 503 | debug_object_fixup(descr->fixup_destroy, addr, state); |
| 503 | return; | 504 | return; |
| 504 | 505 | ||
| @@ -509,7 +510,7 @@ void debug_object_destroy(void *addr, struct debug_obj_descr *descr) | |||
| 509 | break; | 510 | break; |
| 510 | } | 511 | } |
| 511 | out_unlock: | 512 | out_unlock: |
| 512 | spin_unlock_irqrestore(&db->lock, flags); | 513 | raw_spin_unlock_irqrestore(&db->lock, flags); |
| 513 | } | 514 | } |
| 514 | 515 | ||
| 515 | /** | 516 | /** |
| @@ -529,7 +530,7 @@ void debug_object_free(void *addr, struct debug_obj_descr *descr) | |||
| 529 | 530 | ||
| 530 | db = get_bucket((unsigned long) addr); | 531 | db = get_bucket((unsigned long) addr); |
| 531 | 532 | ||
| 532 | spin_lock_irqsave(&db->lock, flags); | 533 | raw_spin_lock_irqsave(&db->lock, flags); |
| 533 | 534 | ||
| 534 | obj = lookup_object(addr, db); | 535 | obj = lookup_object(addr, db); |
| 535 | if (!obj) | 536 | if (!obj) |
| @@ -539,17 +540,17 @@ void debug_object_free(void *addr, struct debug_obj_descr *descr) | |||
| 539 | case ODEBUG_STATE_ACTIVE: | 540 | case ODEBUG_STATE_ACTIVE: |
| 540 | debug_print_object(obj, "free"); | 541 | debug_print_object(obj, "free"); |
| 541 | state = obj->state; | 542 | state = obj->state; |
| 542 | spin_unlock_irqrestore(&db->lock, flags); | 543 | raw_spin_unlock_irqrestore(&db->lock, flags); |
| 543 | debug_object_fixup(descr->fixup_free, addr, state); | 544 | debug_object_fixup(descr->fixup_free, addr, state); |
| 544 | return; | 545 | return; |
| 545 | default: | 546 | default: |
| 546 | hlist_del(&obj->node); | 547 | hlist_del(&obj->node); |
| 547 | spin_unlock_irqrestore(&db->lock, flags); | 548 | raw_spin_unlock_irqrestore(&db->lock, flags); |
| 548 | free_object(obj); | 549 | free_object(obj); |
| 549 | return; | 550 | return; |
| 550 | } | 551 | } |
| 551 | out_unlock: | 552 | out_unlock: |
| 552 | spin_unlock_irqrestore(&db->lock, flags); | 553 | raw_spin_unlock_irqrestore(&db->lock, flags); |
| 553 | } | 554 | } |
| 554 | 555 | ||
| 555 | #ifdef CONFIG_DEBUG_OBJECTS_FREE | 556 | #ifdef CONFIG_DEBUG_OBJECTS_FREE |
| @@ -575,7 +576,7 @@ static void __debug_check_no_obj_freed(const void *address, unsigned long size) | |||
| 575 | 576 | ||
| 576 | repeat: | 577 | repeat: |
| 577 | cnt = 0; | 578 | cnt = 0; |
| 578 | spin_lock_irqsave(&db->lock, flags); | 579 | raw_spin_lock_irqsave(&db->lock, flags); |
| 579 | hlist_for_each_entry_safe(obj, node, tmp, &db->list, node) { | 580 | hlist_for_each_entry_safe(obj, node, tmp, &db->list, node) { |
| 580 | cnt++; | 581 | cnt++; |
| 581 | oaddr = (unsigned long) obj->object; | 582 | oaddr = (unsigned long) obj->object; |
| @@ -587,7 +588,7 @@ repeat: | |||
| 587 | debug_print_object(obj, "free"); | 588 | debug_print_object(obj, "free"); |
| 588 | descr = obj->descr; | 589 | descr = obj->descr; |
| 589 | state = obj->state; | 590 | state = obj->state; |
| 590 | spin_unlock_irqrestore(&db->lock, flags); | 591 | raw_spin_unlock_irqrestore(&db->lock, flags); |
| 591 | debug_object_fixup(descr->fixup_free, | 592 | debug_object_fixup(descr->fixup_free, |
| 592 | (void *) oaddr, state); | 593 | (void *) oaddr, state); |
| 593 | goto repeat; | 594 | goto repeat; |
| @@ -597,7 +598,7 @@ repeat: | |||
| 597 | break; | 598 | break; |
| 598 | } | 599 | } |
| 599 | } | 600 | } |
| 600 | spin_unlock_irqrestore(&db->lock, flags); | 601 | raw_spin_unlock_irqrestore(&db->lock, flags); |
| 601 | 602 | ||
| 602 | /* Now free them */ | 603 | /* Now free them */ |
| 603 | hlist_for_each_entry_safe(obj, node, tmp, &freelist, node) { | 604 | hlist_for_each_entry_safe(obj, node, tmp, &freelist, node) { |
| @@ -783,7 +784,7 @@ check_results(void *addr, enum debug_obj_state state, int fixups, int warnings) | |||
| 783 | 784 | ||
| 784 | db = get_bucket((unsigned long) addr); | 785 | db = get_bucket((unsigned long) addr); |
| 785 | 786 | ||
| 786 | spin_lock_irqsave(&db->lock, flags); | 787 | raw_spin_lock_irqsave(&db->lock, flags); |
| 787 | 788 | ||
| 788 | obj = lookup_object(addr, db); | 789 | obj = lookup_object(addr, db); |
| 789 | if (!obj && state != ODEBUG_STATE_NONE) { | 790 | if (!obj && state != ODEBUG_STATE_NONE) { |
| @@ -807,7 +808,7 @@ check_results(void *addr, enum debug_obj_state state, int fixups, int warnings) | |||
| 807 | } | 808 | } |
| 808 | res = 0; | 809 | res = 0; |
| 809 | out: | 810 | out: |
| 810 | spin_unlock_irqrestore(&db->lock, flags); | 811 | raw_spin_unlock_irqrestore(&db->lock, flags); |
| 811 | if (res) | 812 | if (res) |
| 812 | debug_objects_enabled = 0; | 813 | debug_objects_enabled = 0; |
| 813 | return res; | 814 | return res; |
| @@ -907,7 +908,7 @@ void __init debug_objects_early_init(void) | |||
| 907 | int i; | 908 | int i; |
| 908 | 909 | ||
| 909 | for (i = 0; i < ODEBUG_HASH_SIZE; i++) | 910 | for (i = 0; i < ODEBUG_HASH_SIZE; i++) |
| 910 | spin_lock_init(&obj_hash[i].lock); | 911 | raw_spin_lock_init(&obj_hash[i].lock); |
| 911 | 912 | ||
| 912 | for (i = 0; i < ODEBUG_POOL_SIZE; i++) | 913 | for (i = 0; i < ODEBUG_POOL_SIZE; i++) |
| 913 | hlist_add_head(&obj_static_pool[i].node, &obj_pool); | 914 | hlist_add_head(&obj_static_pool[i].node, &obj_pool); |
diff --git a/lib/decompress.c b/lib/decompress.c index d2842f571674..a7606815541f 100644 --- a/lib/decompress.c +++ b/lib/decompress.c | |||
| @@ -9,6 +9,7 @@ | |||
| 9 | #include <linux/decompress/bunzip2.h> | 9 | #include <linux/decompress/bunzip2.h> |
| 10 | #include <linux/decompress/unlzma.h> | 10 | #include <linux/decompress/unlzma.h> |
| 11 | #include <linux/decompress/inflate.h> | 11 | #include <linux/decompress/inflate.h> |
| 12 | #include <linux/decompress/unlzo.h> | ||
| 12 | 13 | ||
| 13 | #include <linux/types.h> | 14 | #include <linux/types.h> |
| 14 | #include <linux/string.h> | 15 | #include <linux/string.h> |
| @@ -22,6 +23,9 @@ | |||
| 22 | #ifndef CONFIG_DECOMPRESS_LZMA | 23 | #ifndef CONFIG_DECOMPRESS_LZMA |
| 23 | # define unlzma NULL | 24 | # define unlzma NULL |
| 24 | #endif | 25 | #endif |
| 26 | #ifndef CONFIG_DECOMPRESS_LZO | ||
| 27 | # define unlzo NULL | ||
| 28 | #endif | ||
| 25 | 29 | ||
| 26 | static const struct compress_format { | 30 | static const struct compress_format { |
| 27 | unsigned char magic[2]; | 31 | unsigned char magic[2]; |
| @@ -32,6 +36,7 @@ static const struct compress_format { | |||
| 32 | { {037, 0236}, "gzip", gunzip }, | 36 | { {037, 0236}, "gzip", gunzip }, |
| 33 | { {0x42, 0x5a}, "bzip2", bunzip2 }, | 37 | { {0x42, 0x5a}, "bzip2", bunzip2 }, |
| 34 | { {0x5d, 0x00}, "lzma", unlzma }, | 38 | { {0x5d, 0x00}, "lzma", unlzma }, |
| 39 | { {0x89, 0x4c}, "lzo", unlzo }, | ||
| 35 | { {0, 0}, NULL, NULL } | 40 | { {0, 0}, NULL, NULL } |
| 36 | }; | 41 | }; |
| 37 | 42 | ||
diff --git a/lib/decompress_bunzip2.c b/lib/decompress_bunzip2.c index 600f473a5610..a4e971dee102 100644 --- a/lib/decompress_bunzip2.c +++ b/lib/decompress_bunzip2.c | |||
| @@ -299,7 +299,7 @@ static int INIT get_next_block(struct bunzip_data *bd) | |||
| 299 | again when using them (during symbol decoding).*/ | 299 | again when using them (during symbol decoding).*/ |
| 300 | base = hufGroup->base-1; | 300 | base = hufGroup->base-1; |
| 301 | limit = hufGroup->limit-1; | 301 | limit = hufGroup->limit-1; |
| 302 | /* Calculate permute[]. Concurently, initialize | 302 | /* Calculate permute[]. Concurrently, initialize |
| 303 | * temp[] and limit[]. */ | 303 | * temp[] and limit[]. */ |
| 304 | pp = 0; | 304 | pp = 0; |
| 305 | for (i = minLen; i <= maxLen; i++) { | 305 | for (i = minLen; i <= maxLen; i++) { |
| @@ -637,6 +637,8 @@ static int INIT start_bunzip(struct bunzip_data **bdp, void *inbuf, int len, | |||
| 637 | 637 | ||
| 638 | /* Allocate bunzip_data. Most fields initialize to zero. */ | 638 | /* Allocate bunzip_data. Most fields initialize to zero. */ |
| 639 | bd = *bdp = malloc(i); | 639 | bd = *bdp = malloc(i); |
| 640 | if (!bd) | ||
| 641 | return RETVAL_OUT_OF_MEMORY; | ||
| 640 | memset(bd, 0, sizeof(struct bunzip_data)); | 642 | memset(bd, 0, sizeof(struct bunzip_data)); |
| 641 | /* Setup input buffer */ | 643 | /* Setup input buffer */ |
| 642 | bd->inbuf = inbuf; | 644 | bd->inbuf = inbuf; |
| @@ -664,6 +666,8 @@ static int INIT start_bunzip(struct bunzip_data **bdp, void *inbuf, int len, | |||
| 664 | bd->dbufSize = 100000*(i-BZh0); | 666 | bd->dbufSize = 100000*(i-BZh0); |
| 665 | 667 | ||
| 666 | bd->dbuf = large_malloc(bd->dbufSize * sizeof(int)); | 668 | bd->dbuf = large_malloc(bd->dbufSize * sizeof(int)); |
| 669 | if (!bd->dbuf) | ||
| 670 | return RETVAL_OUT_OF_MEMORY; | ||
| 667 | return RETVAL_OK; | 671 | return RETVAL_OK; |
| 668 | } | 672 | } |
| 669 | 673 | ||
| @@ -686,7 +690,7 @@ STATIC int INIT bunzip2(unsigned char *buf, int len, | |||
| 686 | 690 | ||
| 687 | if (!outbuf) { | 691 | if (!outbuf) { |
| 688 | error("Could not allocate output bufer"); | 692 | error("Could not allocate output bufer"); |
| 689 | return -1; | 693 | return RETVAL_OUT_OF_MEMORY; |
| 690 | } | 694 | } |
| 691 | if (buf) | 695 | if (buf) |
| 692 | inbuf = buf; | 696 | inbuf = buf; |
| @@ -694,6 +698,7 @@ STATIC int INIT bunzip2(unsigned char *buf, int len, | |||
| 694 | inbuf = malloc(BZIP2_IOBUF_SIZE); | 698 | inbuf = malloc(BZIP2_IOBUF_SIZE); |
| 695 | if (!inbuf) { | 699 | if (!inbuf) { |
| 696 | error("Could not allocate input bufer"); | 700 | error("Could not allocate input bufer"); |
| 701 | i = RETVAL_OUT_OF_MEMORY; | ||
| 697 | goto exit_0; | 702 | goto exit_0; |
| 698 | } | 703 | } |
| 699 | i = start_bunzip(&bd, inbuf, len, fill); | 704 | i = start_bunzip(&bd, inbuf, len, fill); |
| @@ -720,11 +725,14 @@ STATIC int INIT bunzip2(unsigned char *buf, int len, | |||
| 720 | } else if (i == RETVAL_UNEXPECTED_OUTPUT_EOF) { | 725 | } else if (i == RETVAL_UNEXPECTED_OUTPUT_EOF) { |
| 721 | error("Compressed file ends unexpectedly"); | 726 | error("Compressed file ends unexpectedly"); |
| 722 | } | 727 | } |
| 728 | if (!bd) | ||
| 729 | goto exit_1; | ||
| 723 | if (bd->dbuf) | 730 | if (bd->dbuf) |
| 724 | large_free(bd->dbuf); | 731 | large_free(bd->dbuf); |
| 725 | if (pos) | 732 | if (pos) |
| 726 | *pos = bd->inbufPos; | 733 | *pos = bd->inbufPos; |
| 727 | free(bd); | 734 | free(bd); |
| 735 | exit_1: | ||
| 728 | if (!buf) | 736 | if (!buf) |
| 729 | free(inbuf); | 737 | free(inbuf); |
| 730 | exit_0: | 738 | exit_0: |
diff --git a/lib/decompress_unlzo.c b/lib/decompress_unlzo.c new file mode 100644 index 000000000000..bcb3a4bd68ff --- /dev/null +++ b/lib/decompress_unlzo.c | |||
| @@ -0,0 +1,217 @@ | |||
| 1 | /* | ||
| 2 | * LZO decompressor for the Linux kernel. Code borrowed from the lzo | ||
| 3 | * implementation by Markus Franz Xaver Johannes Oberhumer. | ||
| 4 | * | ||
| 5 | * Linux kernel adaptation: | ||
| 6 | * Copyright (C) 2009 | ||
| 7 | * Albin Tonnerre, Free Electrons <albin.tonnerre@free-electrons.com> | ||
| 8 | * | ||
| 9 | * Original code: | ||
| 10 | * Copyright (C) 1996-2005 Markus Franz Xaver Johannes Oberhumer | ||
| 11 | * All Rights Reserved. | ||
| 12 | * | ||
| 13 | * lzop and the LZO library are free software; you can redistribute them | ||
| 14 | * and/or modify them under the terms of the GNU General Public License as | ||
| 15 | * published by the Free Software Foundation; either version 2 of | ||
| 16 | * the License, or (at your option) any later version. | ||
| 17 | * | ||
| 18 | * This program is distributed in the hope that it will be useful, | ||
| 19 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
| 20 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
| 21 | * GNU General Public License for more details. | ||
| 22 | * | ||
| 23 | * You should have received a copy of the GNU General Public License | ||
| 24 | * along with this program; see the file COPYING. | ||
| 25 | * If not, write to the Free Software Foundation, Inc., | ||
| 26 | * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. | ||
| 27 | * | ||
| 28 | * Markus F.X.J. Oberhumer | ||
| 29 | * <markus@oberhumer.com> | ||
| 30 | * http://www.oberhumer.com/opensource/lzop/ | ||
| 31 | */ | ||
| 32 | |||
| 33 | #ifdef STATIC | ||
| 34 | #include "lzo/lzo1x_decompress.c" | ||
| 35 | #else | ||
| 36 | #include <linux/slab.h> | ||
| 37 | #include <linux/decompress/unlzo.h> | ||
| 38 | #endif | ||
| 39 | |||
| 40 | #include <linux/types.h> | ||
| 41 | #include <linux/lzo.h> | ||
| 42 | #include <linux/decompress/mm.h> | ||
| 43 | |||
| 44 | #include <linux/compiler.h> | ||
| 45 | #include <asm/unaligned.h> | ||
| 46 | |||
| 47 | static const unsigned char lzop_magic[] = { | ||
| 48 | 0x89, 0x4c, 0x5a, 0x4f, 0x00, 0x0d, 0x0a, 0x1a, 0x0a }; | ||
| 49 | |||
| 50 | #define LZO_BLOCK_SIZE (256*1024l) | ||
| 51 | #define HEADER_HAS_FILTER 0x00000800L | ||
| 52 | |||
| 53 | STATIC inline int INIT parse_header(u8 *input, u8 *skip) | ||
| 54 | { | ||
| 55 | int l; | ||
| 56 | u8 *parse = input; | ||
| 57 | u8 level = 0; | ||
| 58 | u16 version; | ||
| 59 | |||
| 60 | /* read magic: 9 first bits */ | ||
| 61 | for (l = 0; l < 9; l++) { | ||
| 62 | if (*parse++ != lzop_magic[l]) | ||
| 63 | return 0; | ||
| 64 | } | ||
| 65 | /* get version (2bytes), skip library version (2), | ||
| 66 | * 'need to be extracted' version (2) and | ||
| 67 | * method (1) */ | ||
| 68 | version = get_unaligned_be16(parse); | ||
| 69 | parse += 7; | ||
| 70 | if (version >= 0x0940) | ||
| 71 | level = *parse++; | ||
| 72 | if (get_unaligned_be32(parse) & HEADER_HAS_FILTER) | ||
| 73 | parse += 8; /* flags + filter info */ | ||
| 74 | else | ||
| 75 | parse += 4; /* flags */ | ||
| 76 | |||
| 77 | /* skip mode and mtime_low */ | ||
| 78 | parse += 8; | ||
| 79 | if (version >= 0x0940) | ||
| 80 | parse += 4; /* skip mtime_high */ | ||
| 81 | |||
| 82 | l = *parse++; | ||
| 83 | /* don't care about the file name, and skip checksum */ | ||
| 84 | parse += l + 4; | ||
| 85 | |||
| 86 | *skip = parse - input; | ||
| 87 | return 1; | ||
| 88 | } | ||
| 89 | |||
| 90 | STATIC inline int INIT unlzo(u8 *input, int in_len, | ||
| 91 | int (*fill) (void *, unsigned int), | ||
| 92 | int (*flush) (void *, unsigned int), | ||
| 93 | u8 *output, int *posp, | ||
| 94 | void (*error_fn) (char *x)) | ||
| 95 | { | ||
| 96 | u8 skip = 0, r = 0; | ||
| 97 | u32 src_len, dst_len; | ||
| 98 | size_t tmp; | ||
| 99 | u8 *in_buf, *in_buf_save, *out_buf; | ||
| 100 | int ret = -1; | ||
| 101 | |||
| 102 | set_error_fn(error_fn); | ||
| 103 | |||
| 104 | if (output) { | ||
| 105 | out_buf = output; | ||
| 106 | } else if (!flush) { | ||
| 107 | error("NULL output pointer and no flush function provided"); | ||
| 108 | goto exit; | ||
| 109 | } else { | ||
| 110 | out_buf = malloc(LZO_BLOCK_SIZE); | ||
| 111 | if (!out_buf) { | ||
| 112 | error("Could not allocate output buffer"); | ||
| 113 | goto exit; | ||
| 114 | } | ||
| 115 | } | ||
| 116 | |||
| 117 | if (input && fill) { | ||
| 118 | error("Both input pointer and fill function provided, don't know what to do"); | ||
| 119 | goto exit_1; | ||
| 120 | } else if (input) { | ||
| 121 | in_buf = input; | ||
| 122 | } else if (!fill || !posp) { | ||
| 123 | error("NULL input pointer and missing position pointer or fill function"); | ||
| 124 | goto exit_1; | ||
| 125 | } else { | ||
| 126 | in_buf = malloc(lzo1x_worst_compress(LZO_BLOCK_SIZE)); | ||
| 127 | if (!in_buf) { | ||
| 128 | error("Could not allocate input buffer"); | ||
| 129 | goto exit_1; | ||
| 130 | } | ||
| 131 | } | ||
| 132 | in_buf_save = in_buf; | ||
| 133 | |||
| 134 | if (posp) | ||
| 135 | *posp = 0; | ||
| 136 | |||
| 137 | if (fill) | ||
| 138 | fill(in_buf, lzo1x_worst_compress(LZO_BLOCK_SIZE)); | ||
| 139 | |||
| 140 | if (!parse_header(input, &skip)) { | ||
| 141 | error("invalid header"); | ||
| 142 | goto exit_2; | ||
| 143 | } | ||
| 144 | in_buf += skip; | ||
| 145 | |||
| 146 | if (posp) | ||
| 147 | *posp = skip; | ||
| 148 | |||
| 149 | for (;;) { | ||
| 150 | /* read uncompressed block size */ | ||
| 151 | dst_len = get_unaligned_be32(in_buf); | ||
| 152 | in_buf += 4; | ||
| 153 | |||
| 154 | /* exit if last block */ | ||
| 155 | if (dst_len == 0) { | ||
| 156 | if (posp) | ||
| 157 | *posp += 4; | ||
| 158 | break; | ||
| 159 | } | ||
| 160 | |||
| 161 | if (dst_len > LZO_BLOCK_SIZE) { | ||
| 162 | error("dest len longer than block size"); | ||
| 163 | goto exit_2; | ||
| 164 | } | ||
| 165 | |||
| 166 | /* read compressed block size, and skip block checksum info */ | ||
| 167 | src_len = get_unaligned_be32(in_buf); | ||
| 168 | in_buf += 8; | ||
| 169 | |||
| 170 | if (src_len <= 0 || src_len > dst_len) { | ||
| 171 | error("file corrupted"); | ||
| 172 | goto exit_2; | ||
| 173 | } | ||
| 174 | |||
| 175 | /* decompress */ | ||
| 176 | tmp = dst_len; | ||
| 177 | |||
| 178 | /* When the input data is not compressed at all, | ||
| 179 | * lzo1x_decompress_safe will fail, so call memcpy() | ||
| 180 | * instead */ | ||
| 181 | if (unlikely(dst_len == src_len)) | ||
| 182 | memcpy(out_buf, in_buf, src_len); | ||
| 183 | else { | ||
| 184 | r = lzo1x_decompress_safe((u8 *) in_buf, src_len, | ||
| 185 | out_buf, &tmp); | ||
| 186 | |||
| 187 | if (r != LZO_E_OK || dst_len != tmp) { | ||
| 188 | error("Compressed data violation"); | ||
| 189 | goto exit_2; | ||
| 190 | } | ||
| 191 | } | ||
| 192 | |||
| 193 | if (flush) | ||
| 194 | flush(out_buf, dst_len); | ||
| 195 | if (output) | ||
| 196 | out_buf += dst_len; | ||
| 197 | if (posp) | ||
| 198 | *posp += src_len + 12; | ||
| 199 | if (fill) { | ||
| 200 | in_buf = in_buf_save; | ||
| 201 | fill(in_buf, lzo1x_worst_compress(LZO_BLOCK_SIZE)); | ||
| 202 | } else | ||
| 203 | in_buf += src_len; | ||
| 204 | } | ||
| 205 | |||
| 206 | ret = 0; | ||
| 207 | exit_2: | ||
| 208 | if (!input) | ||
| 209 | free(in_buf); | ||
| 210 | exit_1: | ||
| 211 | if (!output) | ||
| 212 | free(out_buf); | ||
| 213 | exit: | ||
| 214 | return ret; | ||
| 215 | } | ||
| 216 | |||
| 217 | #define decompress unlzo | ||
diff --git a/lib/devres.c b/lib/devres.c index 72c8909006da..49368608f988 100644 --- a/lib/devres.c +++ b/lib/devres.c | |||
| @@ -1,5 +1,6 @@ | |||
| 1 | #include <linux/pci.h> | 1 | #include <linux/pci.h> |
| 2 | #include <linux/io.h> | 2 | #include <linux/io.h> |
| 3 | #include <linux/gfp.h> | ||
| 3 | #include <linux/module.h> | 4 | #include <linux/module.h> |
| 4 | 5 | ||
| 5 | void devm_ioremap_release(struct device *dev, void *res) | 6 | void devm_ioremap_release(struct device *dev, void *res) |
diff --git a/lib/dma-debug.c b/lib/dma-debug.c index ce6b7eabf674..01e64270e246 100644 --- a/lib/dma-debug.c +++ b/lib/dma-debug.c | |||
| @@ -259,7 +259,7 @@ static struct dma_debug_entry *hash_bucket_find(struct hash_bucket *bucket, | |||
| 259 | * times. Without a hardware IOMMU this results in the | 259 | * times. Without a hardware IOMMU this results in the |
| 260 | * same device addresses being put into the dma-debug | 260 | * same device addresses being put into the dma-debug |
| 261 | * hash multiple times too. This can result in false | 261 | * hash multiple times too. This can result in false |
| 262 | * positives being reported. Therfore we implement a | 262 | * positives being reported. Therefore we implement a |
| 263 | * best-fit algorithm here which returns the entry from | 263 | * best-fit algorithm here which returns the entry from |
| 264 | * the hash which fits best to the reference value | 264 | * the hash which fits best to the reference value |
| 265 | * instead of the first-fit. | 265 | * instead of the first-fit. |
| @@ -570,7 +570,7 @@ static ssize_t filter_write(struct file *file, const char __user *userbuf, | |||
| 570 | * Now parse out the first token and use it as the name for the | 570 | * Now parse out the first token and use it as the name for the |
| 571 | * driver to filter for. | 571 | * driver to filter for. |
| 572 | */ | 572 | */ |
| 573 | for (i = 0; i < NAME_MAX_LEN; ++i) { | 573 | for (i = 0; i < NAME_MAX_LEN - 1; ++i) { |
| 574 | current_driver_name[i] = buf[i]; | 574 | current_driver_name[i] = buf[i]; |
| 575 | if (isspace(buf[i]) || buf[i] == ' ' || buf[i] == 0) | 575 | if (isspace(buf[i]) || buf[i] == ' ' || buf[i] == 0) |
| 576 | break; | 576 | break; |
| @@ -587,7 +587,7 @@ out_unlock: | |||
| 587 | return count; | 587 | return count; |
| 588 | } | 588 | } |
| 589 | 589 | ||
| 590 | const struct file_operations filter_fops = { | 590 | static const struct file_operations filter_fops = { |
| 591 | .read = filter_read, | 591 | .read = filter_read, |
| 592 | .write = filter_write, | 592 | .write = filter_write, |
| 593 | }; | 593 | }; |
| @@ -670,12 +670,13 @@ static int device_dma_allocations(struct device *dev) | |||
| 670 | return count; | 670 | return count; |
| 671 | } | 671 | } |
| 672 | 672 | ||
| 673 | static int dma_debug_device_change(struct notifier_block *nb, | 673 | static int dma_debug_device_change(struct notifier_block *nb, unsigned long action, void *data) |
| 674 | unsigned long action, void *data) | ||
| 675 | { | 674 | { |
| 676 | struct device *dev = data; | 675 | struct device *dev = data; |
| 677 | int count; | 676 | int count; |
| 678 | 677 | ||
| 678 | if (global_disable) | ||
| 679 | return 0; | ||
| 679 | 680 | ||
| 680 | switch (action) { | 681 | switch (action) { |
| 681 | case BUS_NOTIFY_UNBOUND_DRIVER: | 682 | case BUS_NOTIFY_UNBOUND_DRIVER: |
| @@ -697,6 +698,9 @@ void dma_debug_add_bus(struct bus_type *bus) | |||
| 697 | { | 698 | { |
| 698 | struct notifier_block *nb; | 699 | struct notifier_block *nb; |
| 699 | 700 | ||
| 701 | if (global_disable) | ||
| 702 | return; | ||
| 703 | |||
| 700 | nb = kzalloc(sizeof(struct notifier_block), GFP_KERNEL); | 704 | nb = kzalloc(sizeof(struct notifier_block), GFP_KERNEL); |
| 701 | if (nb == NULL) { | 705 | if (nb == NULL) { |
| 702 | pr_err("dma_debug_add_bus: out of memory\n"); | 706 | pr_err("dma_debug_add_bus: out of memory\n"); |
| @@ -909,6 +913,9 @@ static void check_sync(struct device *dev, | |||
| 909 | ref->size); | 913 | ref->size); |
| 910 | } | 914 | } |
| 911 | 915 | ||
| 916 | if (entry->direction == DMA_BIDIRECTIONAL) | ||
| 917 | goto out; | ||
| 918 | |||
| 912 | if (ref->direction != entry->direction) { | 919 | if (ref->direction != entry->direction) { |
| 913 | err_printk(dev, entry, "DMA-API: device driver syncs " | 920 | err_printk(dev, entry, "DMA-API: device driver syncs " |
| 914 | "DMA memory with different direction " | 921 | "DMA memory with different direction " |
| @@ -919,9 +926,6 @@ static void check_sync(struct device *dev, | |||
| 919 | dir2name[ref->direction]); | 926 | dir2name[ref->direction]); |
| 920 | } | 927 | } |
| 921 | 928 | ||
| 922 | if (entry->direction == DMA_BIDIRECTIONAL) | ||
| 923 | goto out; | ||
| 924 | |||
| 925 | if (to_cpu && !(entry->direction == DMA_FROM_DEVICE) && | 929 | if (to_cpu && !(entry->direction == DMA_FROM_DEVICE) && |
| 926 | !(ref->direction == DMA_TO_DEVICE)) | 930 | !(ref->direction == DMA_TO_DEVICE)) |
| 927 | err_printk(dev, entry, "DMA-API: device driver syncs " | 931 | err_printk(dev, entry, "DMA-API: device driver syncs " |
| @@ -944,7 +948,6 @@ static void check_sync(struct device *dev, | |||
| 944 | 948 | ||
| 945 | out: | 949 | out: |
| 946 | put_hash_bucket(bucket, &flags); | 950 | put_hash_bucket(bucket, &flags); |
| 947 | |||
| 948 | } | 951 | } |
| 949 | 952 | ||
| 950 | void debug_dma_map_page(struct device *dev, struct page *page, size_t offset, | 953 | void debug_dma_map_page(struct device *dev, struct page *page, size_t offset, |
diff --git a/lib/dynamic_debug.c b/lib/dynamic_debug.c index e22c148e4b7f..d6b8b9b1abfe 100644 --- a/lib/dynamic_debug.c +++ b/lib/dynamic_debug.c | |||
| @@ -21,9 +21,11 @@ | |||
| 21 | #include <linux/list.h> | 21 | #include <linux/list.h> |
| 22 | #include <linux/sysctl.h> | 22 | #include <linux/sysctl.h> |
| 23 | #include <linux/ctype.h> | 23 | #include <linux/ctype.h> |
| 24 | #include <linux/string.h> | ||
| 24 | #include <linux/uaccess.h> | 25 | #include <linux/uaccess.h> |
| 25 | #include <linux/dynamic_debug.h> | 26 | #include <linux/dynamic_debug.h> |
| 26 | #include <linux/debugfs.h> | 27 | #include <linux/debugfs.h> |
| 28 | #include <linux/slab.h> | ||
| 27 | 29 | ||
| 28 | extern struct _ddebug __start___verbose[]; | 30 | extern struct _ddebug __start___verbose[]; |
| 29 | extern struct _ddebug __stop___verbose[]; | 31 | extern struct _ddebug __stop___verbose[]; |
| @@ -209,8 +211,7 @@ static int ddebug_tokenize(char *buf, char *words[], int maxwords) | |||
| 209 | char *end; | 211 | char *end; |
| 210 | 212 | ||
| 211 | /* Skip leading whitespace */ | 213 | /* Skip leading whitespace */ |
| 212 | while (*buf && isspace(*buf)) | 214 | buf = skip_spaces(buf); |
| 213 | buf++; | ||
| 214 | if (!*buf) | 215 | if (!*buf) |
| 215 | break; /* oh, it was trailing whitespace */ | 216 | break; /* oh, it was trailing whitespace */ |
| 216 | 217 | ||
diff --git a/lib/flex_array.c b/lib/flex_array.c index 66eef2e4483e..41b1804fa728 100644 --- a/lib/flex_array.c +++ b/lib/flex_array.c | |||
| @@ -99,7 +99,7 @@ struct flex_array *flex_array_alloc(int element_size, unsigned int total, | |||
| 99 | ret->element_size = element_size; | 99 | ret->element_size = element_size; |
| 100 | ret->total_nr_elements = total; | 100 | ret->total_nr_elements = total; |
| 101 | if (elements_fit_in_base(ret) && !(flags & __GFP_ZERO)) | 101 | if (elements_fit_in_base(ret) && !(flags & __GFP_ZERO)) |
| 102 | memset(ret->parts[0], FLEX_ARRAY_FREE, | 102 | memset(&ret->parts[0], FLEX_ARRAY_FREE, |
| 103 | FLEX_ARRAY_BASE_BYTES_LEFT); | 103 | FLEX_ARRAY_BASE_BYTES_LEFT); |
| 104 | return ret; | 104 | return ret; |
| 105 | } | 105 | } |
diff --git a/lib/genalloc.c b/lib/genalloc.c index eed2bdb865e7..736c3b06398e 100644 --- a/lib/genalloc.c +++ b/lib/genalloc.c | |||
| @@ -10,7 +10,9 @@ | |||
| 10 | * Version 2. See the file COPYING for more details. | 10 | * Version 2. See the file COPYING for more details. |
| 11 | */ | 11 | */ |
| 12 | 12 | ||
| 13 | #include <linux/slab.h> | ||
| 13 | #include <linux/module.h> | 14 | #include <linux/module.h> |
| 15 | #include <linux/bitmap.h> | ||
| 14 | #include <linux/genalloc.h> | 16 | #include <linux/genalloc.h> |
| 15 | 17 | ||
| 16 | 18 | ||
| @@ -114,7 +116,7 @@ unsigned long gen_pool_alloc(struct gen_pool *pool, size_t size) | |||
| 114 | struct gen_pool_chunk *chunk; | 116 | struct gen_pool_chunk *chunk; |
| 115 | unsigned long addr, flags; | 117 | unsigned long addr, flags; |
| 116 | int order = pool->min_alloc_order; | 118 | int order = pool->min_alloc_order; |
| 117 | int nbits, bit, start_bit, end_bit; | 119 | int nbits, start_bit, end_bit; |
| 118 | 120 | ||
| 119 | if (size == 0) | 121 | if (size == 0) |
| 120 | return 0; | 122 | return 0; |
| @@ -129,29 +131,19 @@ unsigned long gen_pool_alloc(struct gen_pool *pool, size_t size) | |||
| 129 | end_bit -= nbits + 1; | 131 | end_bit -= nbits + 1; |
| 130 | 132 | ||
| 131 | spin_lock_irqsave(&chunk->lock, flags); | 133 | spin_lock_irqsave(&chunk->lock, flags); |
| 132 | bit = -1; | 134 | start_bit = bitmap_find_next_zero_area(chunk->bits, end_bit, 0, |
| 133 | while (bit + 1 < end_bit) { | 135 | nbits, 0); |
| 134 | bit = find_next_zero_bit(chunk->bits, end_bit, bit + 1); | 136 | if (start_bit >= end_bit) { |
| 135 | if (bit >= end_bit) | ||
| 136 | break; | ||
| 137 | |||
| 138 | start_bit = bit; | ||
| 139 | if (nbits > 1) { | ||
| 140 | bit = find_next_bit(chunk->bits, bit + nbits, | ||
| 141 | bit + 1); | ||
| 142 | if (bit - start_bit < nbits) | ||
| 143 | continue; | ||
| 144 | } | ||
| 145 | |||
| 146 | addr = chunk->start_addr + | ||
| 147 | ((unsigned long)start_bit << order); | ||
| 148 | while (nbits--) | ||
| 149 | __set_bit(start_bit++, chunk->bits); | ||
| 150 | spin_unlock_irqrestore(&chunk->lock, flags); | 137 | spin_unlock_irqrestore(&chunk->lock, flags); |
| 151 | read_unlock(&pool->lock); | 138 | continue; |
| 152 | return addr; | ||
| 153 | } | 139 | } |
| 140 | |||
| 141 | addr = chunk->start_addr + ((unsigned long)start_bit << order); | ||
| 142 | |||
| 143 | bitmap_set(chunk->bits, start_bit, nbits); | ||
| 154 | spin_unlock_irqrestore(&chunk->lock, flags); | 144 | spin_unlock_irqrestore(&chunk->lock, flags); |
| 145 | read_unlock(&pool->lock); | ||
| 146 | return addr; | ||
| 155 | } | 147 | } |
| 156 | read_unlock(&pool->lock); | 148 | read_unlock(&pool->lock); |
| 157 | return 0; | 149 | return 0; |
diff --git a/lib/hweight.c b/lib/hweight.c index 389424ecb129..63ee4eb1228d 100644 --- a/lib/hweight.c +++ b/lib/hweight.c | |||
| @@ -11,11 +11,18 @@ | |||
| 11 | 11 | ||
| 12 | unsigned int hweight32(unsigned int w) | 12 | unsigned int hweight32(unsigned int w) |
| 13 | { | 13 | { |
| 14 | #ifdef ARCH_HAS_FAST_MULTIPLIER | ||
| 15 | w -= (w >> 1) & 0x55555555; | ||
| 16 | w = (w & 0x33333333) + ((w >> 2) & 0x33333333); | ||
| 17 | w = (w + (w >> 4)) & 0x0f0f0f0f; | ||
| 18 | return (w * 0x01010101) >> 24; | ||
| 19 | #else | ||
| 14 | unsigned int res = w - ((w >> 1) & 0x55555555); | 20 | unsigned int res = w - ((w >> 1) & 0x55555555); |
| 15 | res = (res & 0x33333333) + ((res >> 2) & 0x33333333); | 21 | res = (res & 0x33333333) + ((res >> 2) & 0x33333333); |
| 16 | res = (res + (res >> 4)) & 0x0F0F0F0F; | 22 | res = (res + (res >> 4)) & 0x0F0F0F0F; |
| 17 | res = res + (res >> 8); | 23 | res = res + (res >> 8); |
| 18 | return (res + (res >> 16)) & 0x000000FF; | 24 | return (res + (res >> 16)) & 0x000000FF; |
| 25 | #endif | ||
| 19 | } | 26 | } |
| 20 | EXPORT_SYMBOL(hweight32); | 27 | EXPORT_SYMBOL(hweight32); |
| 21 | 28 | ||
| @@ -156,10 +156,12 @@ static int sub_alloc(struct idr *idp, int *starting_id, struct idr_layer **pa) | |||
| 156 | id = (id | ((1 << (IDR_BITS * l)) - 1)) + 1; | 156 | id = (id | ((1 << (IDR_BITS * l)) - 1)) + 1; |
| 157 | 157 | ||
| 158 | /* if already at the top layer, we need to grow */ | 158 | /* if already at the top layer, we need to grow */ |
| 159 | if (!(p = pa[l])) { | 159 | if (id >= 1 << (idp->layers * IDR_BITS)) { |
| 160 | *starting_id = id; | 160 | *starting_id = id; |
| 161 | return IDR_NEED_TO_GROW; | 161 | return IDR_NEED_TO_GROW; |
| 162 | } | 162 | } |
| 163 | p = pa[l]; | ||
| 164 | BUG_ON(!p); | ||
| 163 | 165 | ||
| 164 | /* If we need to go up one layer, continue the | 166 | /* If we need to go up one layer, continue the |
| 165 | * loop; otherwise, restart from the top. | 167 | * loop; otherwise, restart from the top. |
| @@ -281,7 +283,7 @@ static int idr_get_new_above_int(struct idr *idp, void *ptr, int starting_id) | |||
| 281 | /** | 283 | /** |
| 282 | * idr_get_new_above - allocate new idr entry above or equal to a start id | 284 | * idr_get_new_above - allocate new idr entry above or equal to a start id |
| 283 | * @idp: idr handle | 285 | * @idp: idr handle |
| 284 | * @ptr: pointer you want associated with the ide | 286 | * @ptr: pointer you want associated with the id |
| 285 | * @start_id: id to start search at | 287 | * @start_id: id to start search at |
| 286 | * @id: pointer to the allocated handle | 288 | * @id: pointer to the allocated handle |
| 287 | * | 289 | * |
| @@ -313,7 +315,7 @@ EXPORT_SYMBOL(idr_get_new_above); | |||
| 313 | /** | 315 | /** |
| 314 | * idr_get_new - allocate new idr entry | 316 | * idr_get_new - allocate new idr entry |
| 315 | * @idp: idr handle | 317 | * @idp: idr handle |
| 316 | * @ptr: pointer you want associated with the ide | 318 | * @ptr: pointer you want associated with the id |
| 317 | * @id: pointer to the allocated handle | 319 | * @id: pointer to the allocated handle |
| 318 | * | 320 | * |
| 319 | * This is the allocate id function. It should be called with any | 321 | * This is the allocate id function. It should be called with any |
| @@ -502,7 +504,7 @@ void *idr_find(struct idr *idp, int id) | |||
| 502 | int n; | 504 | int n; |
| 503 | struct idr_layer *p; | 505 | struct idr_layer *p; |
| 504 | 506 | ||
| 505 | p = rcu_dereference(idp->top); | 507 | p = rcu_dereference_raw(idp->top); |
| 506 | if (!p) | 508 | if (!p) |
| 507 | return NULL; | 509 | return NULL; |
| 508 | n = (p->layer+1) * IDR_BITS; | 510 | n = (p->layer+1) * IDR_BITS; |
| @@ -517,7 +519,7 @@ void *idr_find(struct idr *idp, int id) | |||
| 517 | while (n > 0 && p) { | 519 | while (n > 0 && p) { |
| 518 | n -= IDR_BITS; | 520 | n -= IDR_BITS; |
| 519 | BUG_ON(n != p->layer*IDR_BITS); | 521 | BUG_ON(n != p->layer*IDR_BITS); |
| 520 | p = rcu_dereference(p->ary[(id >> n) & IDR_MASK]); | 522 | p = rcu_dereference_raw(p->ary[(id >> n) & IDR_MASK]); |
| 521 | } | 523 | } |
| 522 | return((void *)p); | 524 | return((void *)p); |
| 523 | } | 525 | } |
| @@ -550,7 +552,7 @@ int idr_for_each(struct idr *idp, | |||
| 550 | struct idr_layer **paa = &pa[0]; | 552 | struct idr_layer **paa = &pa[0]; |
| 551 | 553 | ||
| 552 | n = idp->layers * IDR_BITS; | 554 | n = idp->layers * IDR_BITS; |
| 553 | p = rcu_dereference(idp->top); | 555 | p = rcu_dereference_raw(idp->top); |
| 554 | max = 1 << n; | 556 | max = 1 << n; |
| 555 | 557 | ||
| 556 | id = 0; | 558 | id = 0; |
| @@ -558,7 +560,7 @@ int idr_for_each(struct idr *idp, | |||
| 558 | while (n > 0 && p) { | 560 | while (n > 0 && p) { |
| 559 | n -= IDR_BITS; | 561 | n -= IDR_BITS; |
| 560 | *paa++ = p; | 562 | *paa++ = p; |
| 561 | p = rcu_dereference(p->ary[(id >> n) & IDR_MASK]); | 563 | p = rcu_dereference_raw(p->ary[(id >> n) & IDR_MASK]); |
| 562 | } | 564 | } |
| 563 | 565 | ||
| 564 | if (p) { | 566 | if (p) { |
diff --git a/lib/inflate.c b/lib/inflate.c index d10255973a9f..677b738c2204 100644 --- a/lib/inflate.c +++ b/lib/inflate.c | |||
| @@ -103,6 +103,7 @@ | |||
| 103 | the two sets of lengths. | 103 | the two sets of lengths. |
| 104 | */ | 104 | */ |
| 105 | #include <linux/compiler.h> | 105 | #include <linux/compiler.h> |
| 106 | #include <linux/slab.h> | ||
| 106 | 107 | ||
| 107 | #ifdef RCSID | 108 | #ifdef RCSID |
| 108 | static char rcsid[] = "#Id: inflate.c,v 0.14 1993/06/10 13:27:04 jloup Exp #"; | 109 | static char rcsid[] = "#Id: inflate.c,v 0.14 1993/06/10 13:27:04 jloup Exp #"; |
diff --git a/lib/iommu-helper.c b/lib/iommu-helper.c index 75dbda03f4fb..c0251f4ad08b 100644 --- a/lib/iommu-helper.c +++ b/lib/iommu-helper.c | |||
| @@ -3,41 +3,7 @@ | |||
| 3 | */ | 3 | */ |
| 4 | 4 | ||
| 5 | #include <linux/module.h> | 5 | #include <linux/module.h> |
| 6 | #include <linux/bitops.h> | 6 | #include <linux/bitmap.h> |
| 7 | |||
| 8 | static unsigned long find_next_zero_area(unsigned long *map, | ||
| 9 | unsigned long size, | ||
| 10 | unsigned long start, | ||
| 11 | unsigned int nr, | ||
| 12 | unsigned long align_mask) | ||
| 13 | { | ||
| 14 | unsigned long index, end, i; | ||
| 15 | again: | ||
| 16 | index = find_next_zero_bit(map, size, start); | ||
| 17 | |||
| 18 | /* Align allocation */ | ||
| 19 | index = (index + align_mask) & ~align_mask; | ||
| 20 | |||
| 21 | end = index + nr; | ||
| 22 | if (end >= size) | ||
| 23 | return -1; | ||
| 24 | for (i = index; i < end; i++) { | ||
| 25 | if (test_bit(i, map)) { | ||
| 26 | start = i+1; | ||
| 27 | goto again; | ||
| 28 | } | ||
| 29 | } | ||
| 30 | return index; | ||
| 31 | } | ||
| 32 | |||
| 33 | void iommu_area_reserve(unsigned long *map, unsigned long i, int len) | ||
| 34 | { | ||
| 35 | unsigned long end = i + len; | ||
| 36 | while (i < end) { | ||
| 37 | __set_bit(i, map); | ||
| 38 | i++; | ||
| 39 | } | ||
| 40 | } | ||
| 41 | 7 | ||
| 42 | int iommu_is_span_boundary(unsigned int index, unsigned int nr, | 8 | int iommu_is_span_boundary(unsigned int index, unsigned int nr, |
| 43 | unsigned long shift, | 9 | unsigned long shift, |
| @@ -55,31 +21,24 @@ unsigned long iommu_area_alloc(unsigned long *map, unsigned long size, | |||
| 55 | unsigned long align_mask) | 21 | unsigned long align_mask) |
| 56 | { | 22 | { |
| 57 | unsigned long index; | 23 | unsigned long index; |
| 24 | |||
| 25 | /* We don't want the last of the limit */ | ||
| 26 | size -= 1; | ||
| 58 | again: | 27 | again: |
| 59 | index = find_next_zero_area(map, size, start, nr, align_mask); | 28 | index = bitmap_find_next_zero_area(map, size, start, nr, align_mask); |
| 60 | if (index != -1) { | 29 | if (index < size) { |
| 61 | if (iommu_is_span_boundary(index, nr, shift, boundary_size)) { | 30 | if (iommu_is_span_boundary(index, nr, shift, boundary_size)) { |
| 62 | /* we could do more effectively */ | 31 | /* we could do more effectively */ |
| 63 | start = index + 1; | 32 | start = index + 1; |
| 64 | goto again; | 33 | goto again; |
| 65 | } | 34 | } |
| 66 | iommu_area_reserve(map, index, nr); | 35 | bitmap_set(map, index, nr); |
| 36 | return index; | ||
| 67 | } | 37 | } |
| 68 | return index; | 38 | return -1; |
| 69 | } | 39 | } |
| 70 | EXPORT_SYMBOL(iommu_area_alloc); | 40 | EXPORT_SYMBOL(iommu_area_alloc); |
| 71 | 41 | ||
| 72 | void iommu_area_free(unsigned long *map, unsigned long start, unsigned int nr) | ||
| 73 | { | ||
| 74 | unsigned long end = start + nr; | ||
| 75 | |||
| 76 | while (start < end) { | ||
| 77 | __clear_bit(start, map); | ||
| 78 | start++; | ||
| 79 | } | ||
| 80 | } | ||
| 81 | EXPORT_SYMBOL(iommu_area_free); | ||
| 82 | |||
| 83 | unsigned long iommu_num_pages(unsigned long addr, unsigned long len, | 42 | unsigned long iommu_num_pages(unsigned long addr, unsigned long len, |
| 84 | unsigned long io_page_size) | 43 | unsigned long io_page_size) |
| 85 | { | 44 | { |
diff --git a/lib/kasprintf.c b/lib/kasprintf.c index c5ff1fd10030..9c4233b23783 100644 --- a/lib/kasprintf.c +++ b/lib/kasprintf.c | |||
| @@ -6,6 +6,7 @@ | |||
| 6 | 6 | ||
| 7 | #include <stdarg.h> | 7 | #include <stdarg.h> |
| 8 | #include <linux/module.h> | 8 | #include <linux/module.h> |
| 9 | #include <linux/slab.h> | ||
| 9 | #include <linux/types.h> | 10 | #include <linux/types.h> |
| 10 | #include <linux/string.h> | 11 | #include <linux/string.h> |
| 11 | 12 | ||
diff --git a/lib/kernel_lock.c b/lib/kernel_lock.c index 39f1029e3525..b135d04aa48a 100644 --- a/lib/kernel_lock.c +++ b/lib/kernel_lock.c | |||
| @@ -5,10 +5,13 @@ | |||
| 5 | * relegated to obsolescence, but used by various less | 5 | * relegated to obsolescence, but used by various less |
| 6 | * important (or lazy) subsystems. | 6 | * important (or lazy) subsystems. |
| 7 | */ | 7 | */ |
| 8 | #include <linux/smp_lock.h> | ||
| 9 | #include <linux/module.h> | 8 | #include <linux/module.h> |
| 10 | #include <linux/kallsyms.h> | 9 | #include <linux/kallsyms.h> |
| 11 | #include <linux/semaphore.h> | 10 | #include <linux/semaphore.h> |
| 11 | #include <linux/smp_lock.h> | ||
| 12 | |||
| 13 | #define CREATE_TRACE_POINTS | ||
| 14 | #include <trace/events/bkl.h> | ||
| 12 | 15 | ||
| 13 | /* | 16 | /* |
| 14 | * The 'big kernel lock' | 17 | * The 'big kernel lock' |
| @@ -20,7 +23,7 @@ | |||
| 20 | * | 23 | * |
| 21 | * Don't use in new code. | 24 | * Don't use in new code. |
| 22 | */ | 25 | */ |
| 23 | static __cacheline_aligned_in_smp DEFINE_SPINLOCK(kernel_flag); | 26 | static __cacheline_aligned_in_smp DEFINE_RAW_SPINLOCK(kernel_flag); |
| 24 | 27 | ||
| 25 | 28 | ||
| 26 | /* | 29 | /* |
| @@ -33,12 +36,12 @@ static __cacheline_aligned_in_smp DEFINE_SPINLOCK(kernel_flag); | |||
| 33 | * If it successfully gets the lock, it should increment | 36 | * If it successfully gets the lock, it should increment |
| 34 | * the preemption count like any spinlock does. | 37 | * the preemption count like any spinlock does. |
| 35 | * | 38 | * |
| 36 | * (This works on UP too - _raw_spin_trylock will never | 39 | * (This works on UP too - do_raw_spin_trylock will never |
| 37 | * return false in that case) | 40 | * return false in that case) |
| 38 | */ | 41 | */ |
| 39 | int __lockfunc __reacquire_kernel_lock(void) | 42 | int __lockfunc __reacquire_kernel_lock(void) |
| 40 | { | 43 | { |
| 41 | while (!_raw_spin_trylock(&kernel_flag)) { | 44 | while (!do_raw_spin_trylock(&kernel_flag)) { |
| 42 | if (need_resched()) | 45 | if (need_resched()) |
| 43 | return -EAGAIN; | 46 | return -EAGAIN; |
| 44 | cpu_relax(); | 47 | cpu_relax(); |
| @@ -49,27 +52,27 @@ int __lockfunc __reacquire_kernel_lock(void) | |||
| 49 | 52 | ||
| 50 | void __lockfunc __release_kernel_lock(void) | 53 | void __lockfunc __release_kernel_lock(void) |
| 51 | { | 54 | { |
| 52 | _raw_spin_unlock(&kernel_flag); | 55 | do_raw_spin_unlock(&kernel_flag); |
| 53 | preempt_enable_no_resched(); | 56 | preempt_enable_no_resched(); |
| 54 | } | 57 | } |
| 55 | 58 | ||
| 56 | /* | 59 | /* |
| 57 | * These are the BKL spinlocks - we try to be polite about preemption. | 60 | * These are the BKL spinlocks - we try to be polite about preemption. |
| 58 | * If SMP is not on (ie UP preemption), this all goes away because the | 61 | * If SMP is not on (ie UP preemption), this all goes away because the |
| 59 | * _raw_spin_trylock() will always succeed. | 62 | * do_raw_spin_trylock() will always succeed. |
| 60 | */ | 63 | */ |
| 61 | #ifdef CONFIG_PREEMPT | 64 | #ifdef CONFIG_PREEMPT |
| 62 | static inline void __lock_kernel(void) | 65 | static inline void __lock_kernel(void) |
| 63 | { | 66 | { |
| 64 | preempt_disable(); | 67 | preempt_disable(); |
| 65 | if (unlikely(!_raw_spin_trylock(&kernel_flag))) { | 68 | if (unlikely(!do_raw_spin_trylock(&kernel_flag))) { |
| 66 | /* | 69 | /* |
| 67 | * If preemption was disabled even before this | 70 | * If preemption was disabled even before this |
| 68 | * was called, there's nothing we can be polite | 71 | * was called, there's nothing we can be polite |
| 69 | * about - just spin. | 72 | * about - just spin. |
| 70 | */ | 73 | */ |
| 71 | if (preempt_count() > 1) { | 74 | if (preempt_count() > 1) { |
| 72 | _raw_spin_lock(&kernel_flag); | 75 | do_raw_spin_lock(&kernel_flag); |
| 73 | return; | 76 | return; |
| 74 | } | 77 | } |
| 75 | 78 | ||
| @@ -79,10 +82,10 @@ static inline void __lock_kernel(void) | |||
| 79 | */ | 82 | */ |
| 80 | do { | 83 | do { |
| 81 | preempt_enable(); | 84 | preempt_enable(); |
| 82 | while (spin_is_locked(&kernel_flag)) | 85 | while (raw_spin_is_locked(&kernel_flag)) |
| 83 | cpu_relax(); | 86 | cpu_relax(); |
| 84 | preempt_disable(); | 87 | preempt_disable(); |
| 85 | } while (!_raw_spin_trylock(&kernel_flag)); | 88 | } while (!do_raw_spin_trylock(&kernel_flag)); |
| 86 | } | 89 | } |
| 87 | } | 90 | } |
| 88 | 91 | ||
| @@ -93,7 +96,7 @@ static inline void __lock_kernel(void) | |||
| 93 | */ | 96 | */ |
| 94 | static inline void __lock_kernel(void) | 97 | static inline void __lock_kernel(void) |
| 95 | { | 98 | { |
| 96 | _raw_spin_lock(&kernel_flag); | 99 | do_raw_spin_lock(&kernel_flag); |
| 97 | } | 100 | } |
| 98 | #endif | 101 | #endif |
| 99 | 102 | ||
| @@ -103,7 +106,7 @@ static inline void __unlock_kernel(void) | |||
| 103 | * the BKL is not covered by lockdep, so we open-code the | 106 | * the BKL is not covered by lockdep, so we open-code the |
| 104 | * unlocking sequence (and thus avoid the dep-chain ops): | 107 | * unlocking sequence (and thus avoid the dep-chain ops): |
| 105 | */ | 108 | */ |
| 106 | _raw_spin_unlock(&kernel_flag); | 109 | do_raw_spin_unlock(&kernel_flag); |
| 107 | preempt_enable(); | 110 | preempt_enable(); |
| 108 | } | 111 | } |
| 109 | 112 | ||
| @@ -113,21 +116,28 @@ static inline void __unlock_kernel(void) | |||
| 113 | * This cannot happen asynchronously, so we only need to | 116 | * This cannot happen asynchronously, so we only need to |
| 114 | * worry about other CPU's. | 117 | * worry about other CPU's. |
| 115 | */ | 118 | */ |
| 116 | void __lockfunc lock_kernel(void) | 119 | void __lockfunc _lock_kernel(const char *func, const char *file, int line) |
| 117 | { | 120 | { |
| 118 | int depth = current->lock_depth+1; | 121 | int depth = current->lock_depth + 1; |
| 119 | if (likely(!depth)) | 122 | |
| 123 | trace_lock_kernel(func, file, line); | ||
| 124 | |||
| 125 | if (likely(!depth)) { | ||
| 126 | might_sleep(); | ||
| 120 | __lock_kernel(); | 127 | __lock_kernel(); |
| 128 | } | ||
| 121 | current->lock_depth = depth; | 129 | current->lock_depth = depth; |
| 122 | } | 130 | } |
| 123 | 131 | ||
| 124 | void __lockfunc unlock_kernel(void) | 132 | void __lockfunc _unlock_kernel(const char *func, const char *file, int line) |
| 125 | { | 133 | { |
| 126 | BUG_ON(current->lock_depth < 0); | 134 | BUG_ON(current->lock_depth < 0); |
| 127 | if (likely(--current->lock_depth < 0)) | 135 | if (likely(--current->lock_depth < 0)) |
| 128 | __unlock_kernel(); | 136 | __unlock_kernel(); |
| 137 | |||
| 138 | trace_unlock_kernel(func, file, line); | ||
| 129 | } | 139 | } |
| 130 | 140 | ||
| 131 | EXPORT_SYMBOL(lock_kernel); | 141 | EXPORT_SYMBOL(_lock_kernel); |
| 132 | EXPORT_SYMBOL(unlock_kernel); | 142 | EXPORT_SYMBOL(_unlock_kernel); |
| 133 | 143 | ||
diff --git a/lib/kobject.c b/lib/kobject.c index b512b746d2af..8115eb1bbf4d 100644 --- a/lib/kobject.c +++ b/lib/kobject.c | |||
| @@ -700,7 +700,7 @@ static ssize_t kobj_attr_store(struct kobject *kobj, struct attribute *attr, | |||
| 700 | return ret; | 700 | return ret; |
| 701 | } | 701 | } |
| 702 | 702 | ||
| 703 | struct sysfs_ops kobj_sysfs_ops = { | 703 | const struct sysfs_ops kobj_sysfs_ops = { |
| 704 | .show = kobj_attr_show, | 704 | .show = kobj_attr_show, |
| 705 | .store = kobj_attr_store, | 705 | .store = kobj_attr_store, |
| 706 | }; | 706 | }; |
| @@ -789,7 +789,7 @@ static struct kobj_type kset_ktype = { | |||
| 789 | * If the kset was not able to be created, NULL will be returned. | 789 | * If the kset was not able to be created, NULL will be returned. |
| 790 | */ | 790 | */ |
| 791 | static struct kset *kset_create(const char *name, | 791 | static struct kset *kset_create(const char *name, |
| 792 | struct kset_uevent_ops *uevent_ops, | 792 | const struct kset_uevent_ops *uevent_ops, |
| 793 | struct kobject *parent_kobj) | 793 | struct kobject *parent_kobj) |
| 794 | { | 794 | { |
| 795 | struct kset *kset; | 795 | struct kset *kset; |
| @@ -832,7 +832,7 @@ static struct kset *kset_create(const char *name, | |||
| 832 | * If the kset was not able to be created, NULL will be returned. | 832 | * If the kset was not able to be created, NULL will be returned. |
| 833 | */ | 833 | */ |
| 834 | struct kset *kset_create_and_add(const char *name, | 834 | struct kset *kset_create_and_add(const char *name, |
| 835 | struct kset_uevent_ops *uevent_ops, | 835 | const struct kset_uevent_ops *uevent_ops, |
| 836 | struct kobject *parent_kobj) | 836 | struct kobject *parent_kobj) |
| 837 | { | 837 | { |
| 838 | struct kset *kset; | 838 | struct kset *kset; |
diff --git a/lib/kobject_uevent.c b/lib/kobject_uevent.c index 920a3ca6e259..7b48d44ced6e 100644 --- a/lib/kobject_uevent.c +++ b/lib/kobject_uevent.c | |||
| @@ -18,6 +18,7 @@ | |||
| 18 | #include <linux/string.h> | 18 | #include <linux/string.h> |
| 19 | #include <linux/kobject.h> | 19 | #include <linux/kobject.h> |
| 20 | #include <linux/module.h> | 20 | #include <linux/module.h> |
| 21 | #include <linux/slab.h> | ||
| 21 | 22 | ||
| 22 | #include <linux/socket.h> | 23 | #include <linux/socket.h> |
| 23 | #include <linux/skbuff.h> | 24 | #include <linux/skbuff.h> |
| @@ -95,7 +96,7 @@ int kobject_uevent_env(struct kobject *kobj, enum kobject_action action, | |||
| 95 | const char *subsystem; | 96 | const char *subsystem; |
| 96 | struct kobject *top_kobj; | 97 | struct kobject *top_kobj; |
| 97 | struct kset *kset; | 98 | struct kset *kset; |
| 98 | struct kset_uevent_ops *uevent_ops; | 99 | const struct kset_uevent_ops *uevent_ops; |
| 99 | u64 seq; | 100 | u64 seq; |
| 100 | int i = 0; | 101 | int i = 0; |
| 101 | int retval = 0; | 102 | int retval = 0; |
diff --git a/lib/kref.c b/lib/kref.c index 9ecd6e865610..6d19f690380b 100644 --- a/lib/kref.c +++ b/lib/kref.c | |||
| @@ -13,6 +13,7 @@ | |||
| 13 | 13 | ||
| 14 | #include <linux/kref.h> | 14 | #include <linux/kref.h> |
| 15 | #include <linux/module.h> | 15 | #include <linux/module.h> |
| 16 | #include <linux/slab.h> | ||
| 16 | 17 | ||
| 17 | /** | 18 | /** |
| 18 | * kref_set - initialize object and set refcount to requested number. | 19 | * kref_set - initialize object and set refcount to requested number. |
diff --git a/lib/lcm.c b/lib/lcm.c new file mode 100644 index 000000000000..157cd88a6ffc --- /dev/null +++ b/lib/lcm.c | |||
| @@ -0,0 +1,15 @@ | |||
| 1 | #include <linux/kernel.h> | ||
| 2 | #include <linux/gcd.h> | ||
| 3 | #include <linux/module.h> | ||
| 4 | |||
| 5 | /* Lowest common multiple */ | ||
| 6 | unsigned long lcm(unsigned long a, unsigned long b) | ||
| 7 | { | ||
| 8 | if (a && b) | ||
| 9 | return (a * b) / gcd(a, b); | ||
| 10 | else if (b) | ||
| 11 | return b; | ||
| 12 | |||
| 13 | return a; | ||
| 14 | } | ||
| 15 | EXPORT_SYMBOL_GPL(lcm); | ||
diff --git a/lib/list_sort.c b/lib/list_sort.c new file mode 100644 index 000000000000..4b5cb794c38b --- /dev/null +++ b/lib/list_sort.c | |||
| @@ -0,0 +1,217 @@ | |||
| 1 | #include <linux/kernel.h> | ||
| 2 | #include <linux/module.h> | ||
| 3 | #include <linux/list_sort.h> | ||
| 4 | #include <linux/slab.h> | ||
| 5 | #include <linux/list.h> | ||
| 6 | |||
| 7 | #define MAX_LIST_LENGTH_BITS 20 | ||
| 8 | |||
| 9 | /* | ||
| 10 | * Returns a list organized in an intermediate format suited | ||
| 11 | * to chaining of merge() calls: null-terminated, no reserved or | ||
| 12 | * sentinel head node, "prev" links not maintained. | ||
| 13 | */ | ||
| 14 | static struct list_head *merge(void *priv, | ||
| 15 | int (*cmp)(void *priv, struct list_head *a, | ||
| 16 | struct list_head *b), | ||
| 17 | struct list_head *a, struct list_head *b) | ||
| 18 | { | ||
| 19 | struct list_head head, *tail = &head; | ||
| 20 | |||
| 21 | while (a && b) { | ||
| 22 | /* if equal, take 'a' -- important for sort stability */ | ||
| 23 | if ((*cmp)(priv, a, b) <= 0) { | ||
| 24 | tail->next = a; | ||
| 25 | a = a->next; | ||
| 26 | } else { | ||
| 27 | tail->next = b; | ||
| 28 | b = b->next; | ||
| 29 | } | ||
| 30 | tail = tail->next; | ||
| 31 | } | ||
| 32 | tail->next = a?:b; | ||
| 33 | return head.next; | ||
| 34 | } | ||
| 35 | |||
| 36 | /* | ||
| 37 | * Combine final list merge with restoration of standard doubly-linked | ||
| 38 | * list structure. This approach duplicates code from merge(), but | ||
| 39 | * runs faster than the tidier alternatives of either a separate final | ||
| 40 | * prev-link restoration pass, or maintaining the prev links | ||
| 41 | * throughout. | ||
| 42 | */ | ||
| 43 | static void merge_and_restore_back_links(void *priv, | ||
| 44 | int (*cmp)(void *priv, struct list_head *a, | ||
| 45 | struct list_head *b), | ||
| 46 | struct list_head *head, | ||
| 47 | struct list_head *a, struct list_head *b) | ||
| 48 | { | ||
| 49 | struct list_head *tail = head; | ||
| 50 | |||
| 51 | while (a && b) { | ||
| 52 | /* if equal, take 'a' -- important for sort stability */ | ||
| 53 | if ((*cmp)(priv, a, b) <= 0) { | ||
| 54 | tail->next = a; | ||
| 55 | a->prev = tail; | ||
| 56 | a = a->next; | ||
| 57 | } else { | ||
| 58 | tail->next = b; | ||
| 59 | b->prev = tail; | ||
| 60 | b = b->next; | ||
| 61 | } | ||
| 62 | tail = tail->next; | ||
| 63 | } | ||
| 64 | tail->next = a ? : b; | ||
| 65 | |||
| 66 | do { | ||
| 67 | /* | ||
| 68 | * In worst cases this loop may run many iterations. | ||
| 69 | * Continue callbacks to the client even though no | ||
| 70 | * element comparison is needed, so the client's cmp() | ||
| 71 | * routine can invoke cond_resched() periodically. | ||
| 72 | */ | ||
| 73 | (*cmp)(priv, tail, tail); | ||
| 74 | |||
| 75 | tail->next->prev = tail; | ||
| 76 | tail = tail->next; | ||
| 77 | } while (tail->next); | ||
| 78 | |||
| 79 | tail->next = head; | ||
| 80 | head->prev = tail; | ||
| 81 | } | ||
| 82 | |||
| 83 | /** | ||
| 84 | * list_sort - sort a list | ||
| 85 | * @priv: private data, opaque to list_sort(), passed to @cmp | ||
| 86 | * @head: the list to sort | ||
| 87 | * @cmp: the elements comparison function | ||
| 88 | * | ||
| 89 | * This function implements "merge sort", which has O(nlog(n)) | ||
| 90 | * complexity. | ||
| 91 | * | ||
| 92 | * The comparison function @cmp must return a negative value if @a | ||
| 93 | * should sort before @b, and a positive value if @a should sort after | ||
| 94 | * @b. If @a and @b are equivalent, and their original relative | ||
| 95 | * ordering is to be preserved, @cmp must return 0. | ||
| 96 | */ | ||
| 97 | void list_sort(void *priv, struct list_head *head, | ||
| 98 | int (*cmp)(void *priv, struct list_head *a, | ||
| 99 | struct list_head *b)) | ||
| 100 | { | ||
| 101 | struct list_head *part[MAX_LIST_LENGTH_BITS+1]; /* sorted partial lists | ||
| 102 | -- last slot is a sentinel */ | ||
| 103 | int lev; /* index into part[] */ | ||
| 104 | int max_lev = 0; | ||
| 105 | struct list_head *list; | ||
| 106 | |||
| 107 | if (list_empty(head)) | ||
| 108 | return; | ||
| 109 | |||
| 110 | memset(part, 0, sizeof(part)); | ||
| 111 | |||
| 112 | head->prev->next = NULL; | ||
| 113 | list = head->next; | ||
| 114 | |||
| 115 | while (list) { | ||
| 116 | struct list_head *cur = list; | ||
| 117 | list = list->next; | ||
| 118 | cur->next = NULL; | ||
| 119 | |||
| 120 | for (lev = 0; part[lev]; lev++) { | ||
| 121 | cur = merge(priv, cmp, part[lev], cur); | ||
| 122 | part[lev] = NULL; | ||
| 123 | } | ||
| 124 | if (lev > max_lev) { | ||
| 125 | if (unlikely(lev >= ARRAY_SIZE(part)-1)) { | ||
| 126 | printk_once(KERN_DEBUG "list passed to" | ||
| 127 | " list_sort() too long for" | ||
| 128 | " efficiency\n"); | ||
| 129 | lev--; | ||
| 130 | } | ||
| 131 | max_lev = lev; | ||
| 132 | } | ||
| 133 | part[lev] = cur; | ||
| 134 | } | ||
| 135 | |||
| 136 | for (lev = 0; lev < max_lev; lev++) | ||
| 137 | if (part[lev]) | ||
| 138 | list = merge(priv, cmp, part[lev], list); | ||
| 139 | |||
| 140 | merge_and_restore_back_links(priv, cmp, head, part[max_lev], list); | ||
| 141 | } | ||
| 142 | EXPORT_SYMBOL(list_sort); | ||
| 143 | |||
| 144 | #ifdef DEBUG_LIST_SORT | ||
| 145 | struct debug_el { | ||
| 146 | struct list_head l_h; | ||
| 147 | int value; | ||
| 148 | unsigned serial; | ||
| 149 | }; | ||
| 150 | |||
| 151 | static int cmp(void *priv, struct list_head *a, struct list_head *b) | ||
| 152 | { | ||
| 153 | return container_of(a, struct debug_el, l_h)->value | ||
| 154 | - container_of(b, struct debug_el, l_h)->value; | ||
| 155 | } | ||
| 156 | |||
| 157 | /* | ||
| 158 | * The pattern of set bits in the list length determines which cases | ||
| 159 | * are hit in list_sort(). | ||
| 160 | */ | ||
| 161 | #define LIST_SORT_TEST_LENGTH (512+128+2) /* not including head */ | ||
| 162 | |||
| 163 | static int __init list_sort_test(void) | ||
| 164 | { | ||
| 165 | int i, r = 1, count; | ||
| 166 | struct list_head *head = kmalloc(sizeof(*head), GFP_KERNEL); | ||
| 167 | struct list_head *cur; | ||
| 168 | |||
| 169 | printk(KERN_WARNING "testing list_sort()\n"); | ||
| 170 | |||
| 171 | cur = head; | ||
| 172 | for (i = 0; i < LIST_SORT_TEST_LENGTH; i++) { | ||
| 173 | struct debug_el *el = kmalloc(sizeof(*el), GFP_KERNEL); | ||
| 174 | BUG_ON(!el); | ||
| 175 | /* force some equivalencies */ | ||
| 176 | el->value = (r = (r * 725861) % 6599) % (LIST_SORT_TEST_LENGTH/3); | ||
| 177 | el->serial = i; | ||
| 178 | |||
| 179 | el->l_h.prev = cur; | ||
| 180 | cur->next = &el->l_h; | ||
| 181 | cur = cur->next; | ||
| 182 | } | ||
| 183 | head->prev = cur; | ||
| 184 | |||
| 185 | list_sort(NULL, head, cmp); | ||
| 186 | |||
| 187 | count = 1; | ||
| 188 | for (cur = head->next; cur->next != head; cur = cur->next) { | ||
| 189 | struct debug_el *el = container_of(cur, struct debug_el, l_h); | ||
| 190 | int cmp_result = cmp(NULL, cur, cur->next); | ||
| 191 | if (cur->next->prev != cur) { | ||
| 192 | printk(KERN_EMERG "list_sort() returned " | ||
| 193 | "a corrupted list!\n"); | ||
| 194 | return 1; | ||
| 195 | } else if (cmp_result > 0) { | ||
| 196 | printk(KERN_EMERG "list_sort() failed to sort!\n"); | ||
| 197 | return 1; | ||
| 198 | } else if (cmp_result == 0 && | ||
| 199 | el->serial >= container_of(cur->next, | ||
| 200 | struct debug_el, l_h)->serial) { | ||
| 201 | printk(KERN_EMERG "list_sort() failed to preserve order" | ||
| 202 | " of equivalent elements!\n"); | ||
| 203 | return 1; | ||
| 204 | } | ||
| 205 | kfree(cur->prev); | ||
| 206 | count++; | ||
| 207 | } | ||
| 208 | kfree(cur); | ||
| 209 | if (count != LIST_SORT_TEST_LENGTH) { | ||
| 210 | printk(KERN_EMERG "list_sort() returned list of" | ||
| 211 | "different length!\n"); | ||
| 212 | return 1; | ||
| 213 | } | ||
| 214 | return 0; | ||
| 215 | } | ||
| 216 | module_init(list_sort_test); | ||
| 217 | #endif | ||
| @@ -205,9 +205,8 @@ long lmb_add(u64 base, u64 size) | |||
| 205 | 205 | ||
| 206 | } | 206 | } |
| 207 | 207 | ||
| 208 | long lmb_remove(u64 base, u64 size) | 208 | static long __lmb_remove(struct lmb_region *rgn, u64 base, u64 size) |
| 209 | { | 209 | { |
| 210 | struct lmb_region *rgn = &(lmb.memory); | ||
| 211 | u64 rgnbegin, rgnend; | 210 | u64 rgnbegin, rgnend; |
| 212 | u64 end = base + size; | 211 | u64 end = base + size; |
| 213 | int i; | 212 | int i; |
| @@ -254,6 +253,16 @@ long lmb_remove(u64 base, u64 size) | |||
| 254 | return lmb_add_region(rgn, end, rgnend - end); | 253 | return lmb_add_region(rgn, end, rgnend - end); |
| 255 | } | 254 | } |
| 256 | 255 | ||
| 256 | long lmb_remove(u64 base, u64 size) | ||
| 257 | { | ||
| 258 | return __lmb_remove(&lmb.memory, base, size); | ||
| 259 | } | ||
| 260 | |||
| 261 | long __init lmb_free(u64 base, u64 size) | ||
| 262 | { | ||
| 263 | return __lmb_remove(&lmb.reserved, base, size); | ||
| 264 | } | ||
| 265 | |||
| 257 | long __init lmb_reserve(u64 base, u64 size) | 266 | long __init lmb_reserve(u64 base, u64 size) |
| 258 | { | 267 | { |
| 259 | struct lmb_region *_rgn = &lmb.reserved; | 268 | struct lmb_region *_rgn = &lmb.reserved; |
| @@ -263,7 +272,7 @@ long __init lmb_reserve(u64 base, u64 size) | |||
| 263 | return lmb_add_region(_rgn, base, size); | 272 | return lmb_add_region(_rgn, base, size); |
| 264 | } | 273 | } |
| 265 | 274 | ||
| 266 | long __init lmb_overlaps_region(struct lmb_region *rgn, u64 base, u64 size) | 275 | long lmb_overlaps_region(struct lmb_region *rgn, u64 base, u64 size) |
| 267 | { | 276 | { |
| 268 | unsigned long i; | 277 | unsigned long i; |
| 269 | 278 | ||
| @@ -493,6 +502,11 @@ int __init lmb_is_reserved(u64 addr) | |||
| 493 | return 0; | 502 | return 0; |
| 494 | } | 503 | } |
| 495 | 504 | ||
| 505 | int lmb_is_region_reserved(u64 base, u64 size) | ||
| 506 | { | ||
| 507 | return lmb_overlaps_region(&lmb.reserved, base, size); | ||
| 508 | } | ||
| 509 | |||
| 496 | /* | 510 | /* |
| 497 | * Given a <base, len>, find which memory regions belong to this range. | 511 | * Given a <base, len>, find which memory regions belong to this range. |
| 498 | * Adjust the request and return a contiguous chunk. | 512 | * Adjust the request and return a contiguous chunk. |
diff --git a/lib/lru_cache.c b/lib/lru_cache.c new file mode 100644 index 000000000000..270de9d31b8c --- /dev/null +++ b/lib/lru_cache.c | |||
| @@ -0,0 +1,560 @@ | |||
| 1 | /* | ||
| 2 | lru_cache.c | ||
| 3 | |||
| 4 | This file is part of DRBD by Philipp Reisner and Lars Ellenberg. | ||
| 5 | |||
| 6 | Copyright (C) 2003-2008, LINBIT Information Technologies GmbH. | ||
| 7 | Copyright (C) 2003-2008, Philipp Reisner <philipp.reisner@linbit.com>. | ||
| 8 | Copyright (C) 2003-2008, Lars Ellenberg <lars.ellenberg@linbit.com>. | ||
| 9 | |||
| 10 | drbd is free software; you can redistribute it and/or modify | ||
| 11 | it under the terms of the GNU General Public License as published by | ||
| 12 | the Free Software Foundation; either version 2, or (at your option) | ||
| 13 | any later version. | ||
| 14 | |||
| 15 | drbd is distributed in the hope that it will be useful, | ||
| 16 | but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
| 17 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
| 18 | GNU General Public License for more details. | ||
| 19 | |||
| 20 | You should have received a copy of the GNU General Public License | ||
| 21 | along with drbd; see the file COPYING. If not, write to | ||
| 22 | the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. | ||
| 23 | |||
| 24 | */ | ||
| 25 | |||
| 26 | #include <linux/module.h> | ||
| 27 | #include <linux/bitops.h> | ||
| 28 | #include <linux/slab.h> | ||
| 29 | #include <linux/string.h> /* for memset */ | ||
| 30 | #include <linux/seq_file.h> /* for seq_printf */ | ||
| 31 | #include <linux/lru_cache.h> | ||
| 32 | |||
| 33 | MODULE_AUTHOR("Philipp Reisner <phil@linbit.com>, " | ||
| 34 | "Lars Ellenberg <lars@linbit.com>"); | ||
| 35 | MODULE_DESCRIPTION("lru_cache - Track sets of hot objects"); | ||
| 36 | MODULE_LICENSE("GPL"); | ||
| 37 | |||
| 38 | /* this is developers aid only. | ||
| 39 | * it catches concurrent access (lack of locking on the users part) */ | ||
| 40 | #define PARANOIA_ENTRY() do { \ | ||
| 41 | BUG_ON(!lc); \ | ||
| 42 | BUG_ON(!lc->nr_elements); \ | ||
| 43 | BUG_ON(test_and_set_bit(__LC_PARANOIA, &lc->flags)); \ | ||
| 44 | } while (0) | ||
| 45 | |||
| 46 | #define RETURN(x...) do { \ | ||
| 47 | clear_bit(__LC_PARANOIA, &lc->flags); \ | ||
| 48 | smp_mb__after_clear_bit(); return x ; } while (0) | ||
| 49 | |||
| 50 | /* BUG() if e is not one of the elements tracked by lc */ | ||
| 51 | #define PARANOIA_LC_ELEMENT(lc, e) do { \ | ||
| 52 | struct lru_cache *lc_ = (lc); \ | ||
| 53 | struct lc_element *e_ = (e); \ | ||
| 54 | unsigned i = e_->lc_index; \ | ||
| 55 | BUG_ON(i >= lc_->nr_elements); \ | ||
| 56 | BUG_ON(lc_->lc_element[i] != e_); } while (0) | ||
| 57 | |||
| 58 | /** | ||
| 59 | * lc_create - prepares to track objects in an active set | ||
| 60 | * @name: descriptive name only used in lc_seq_printf_stats and lc_seq_dump_details | ||
| 61 | * @e_count: number of elements allowed to be active simultaneously | ||
| 62 | * @e_size: size of the tracked objects | ||
| 63 | * @e_off: offset to the &struct lc_element member in a tracked object | ||
| 64 | * | ||
| 65 | * Returns a pointer to a newly initialized struct lru_cache on success, | ||
| 66 | * or NULL on (allocation) failure. | ||
| 67 | */ | ||
| 68 | struct lru_cache *lc_create(const char *name, struct kmem_cache *cache, | ||
| 69 | unsigned e_count, size_t e_size, size_t e_off) | ||
| 70 | { | ||
| 71 | struct hlist_head *slot = NULL; | ||
| 72 | struct lc_element **element = NULL; | ||
| 73 | struct lru_cache *lc; | ||
| 74 | struct lc_element *e; | ||
| 75 | unsigned cache_obj_size = kmem_cache_size(cache); | ||
| 76 | unsigned i; | ||
| 77 | |||
| 78 | WARN_ON(cache_obj_size < e_size); | ||
| 79 | if (cache_obj_size < e_size) | ||
| 80 | return NULL; | ||
| 81 | |||
| 82 | /* e_count too big; would probably fail the allocation below anyways. | ||
| 83 | * for typical use cases, e_count should be few thousand at most. */ | ||
| 84 | if (e_count > LC_MAX_ACTIVE) | ||
| 85 | return NULL; | ||
| 86 | |||
| 87 | slot = kzalloc(e_count * sizeof(struct hlist_head*), GFP_KERNEL); | ||
| 88 | if (!slot) | ||
| 89 | goto out_fail; | ||
| 90 | element = kzalloc(e_count * sizeof(struct lc_element *), GFP_KERNEL); | ||
| 91 | if (!element) | ||
| 92 | goto out_fail; | ||
| 93 | |||
| 94 | lc = kzalloc(sizeof(*lc), GFP_KERNEL); | ||
| 95 | if (!lc) | ||
| 96 | goto out_fail; | ||
| 97 | |||
| 98 | INIT_LIST_HEAD(&lc->in_use); | ||
| 99 | INIT_LIST_HEAD(&lc->lru); | ||
| 100 | INIT_LIST_HEAD(&lc->free); | ||
| 101 | |||
| 102 | lc->name = name; | ||
| 103 | lc->element_size = e_size; | ||
| 104 | lc->element_off = e_off; | ||
| 105 | lc->nr_elements = e_count; | ||
| 106 | lc->new_number = LC_FREE; | ||
| 107 | lc->lc_cache = cache; | ||
| 108 | lc->lc_element = element; | ||
| 109 | lc->lc_slot = slot; | ||
| 110 | |||
| 111 | /* preallocate all objects */ | ||
| 112 | for (i = 0; i < e_count; i++) { | ||
| 113 | void *p = kmem_cache_alloc(cache, GFP_KERNEL); | ||
| 114 | if (!p) | ||
| 115 | break; | ||
| 116 | memset(p, 0, lc->element_size); | ||
| 117 | e = p + e_off; | ||
| 118 | e->lc_index = i; | ||
| 119 | e->lc_number = LC_FREE; | ||
| 120 | list_add(&e->list, &lc->free); | ||
| 121 | element[i] = e; | ||
| 122 | } | ||
| 123 | if (i == e_count) | ||
| 124 | return lc; | ||
| 125 | |||
| 126 | /* else: could not allocate all elements, give up */ | ||
| 127 | for (i--; i; i--) { | ||
| 128 | void *p = element[i]; | ||
| 129 | kmem_cache_free(cache, p - e_off); | ||
| 130 | } | ||
| 131 | kfree(lc); | ||
| 132 | out_fail: | ||
| 133 | kfree(element); | ||
| 134 | kfree(slot); | ||
| 135 | return NULL; | ||
| 136 | } | ||
| 137 | |||
| 138 | void lc_free_by_index(struct lru_cache *lc, unsigned i) | ||
| 139 | { | ||
| 140 | void *p = lc->lc_element[i]; | ||
| 141 | WARN_ON(!p); | ||
| 142 | if (p) { | ||
| 143 | p -= lc->element_off; | ||
| 144 | kmem_cache_free(lc->lc_cache, p); | ||
| 145 | } | ||
| 146 | } | ||
| 147 | |||
| 148 | /** | ||
| 149 | * lc_destroy - frees memory allocated by lc_create() | ||
| 150 | * @lc: the lru cache to destroy | ||
| 151 | */ | ||
| 152 | void lc_destroy(struct lru_cache *lc) | ||
| 153 | { | ||
| 154 | unsigned i; | ||
| 155 | if (!lc) | ||
| 156 | return; | ||
| 157 | for (i = 0; i < lc->nr_elements; i++) | ||
| 158 | lc_free_by_index(lc, i); | ||
| 159 | kfree(lc->lc_element); | ||
| 160 | kfree(lc->lc_slot); | ||
| 161 | kfree(lc); | ||
| 162 | } | ||
| 163 | |||
| 164 | /** | ||
| 165 | * lc_reset - does a full reset for @lc and the hash table slots. | ||
| 166 | * @lc: the lru cache to operate on | ||
| 167 | * | ||
| 168 | * It is roughly the equivalent of re-allocating a fresh lru_cache object, | ||
| 169 | * basically a short cut to lc_destroy(lc); lc = lc_create(...); | ||
| 170 | */ | ||
| 171 | void lc_reset(struct lru_cache *lc) | ||
| 172 | { | ||
| 173 | unsigned i; | ||
| 174 | |||
| 175 | INIT_LIST_HEAD(&lc->in_use); | ||
| 176 | INIT_LIST_HEAD(&lc->lru); | ||
| 177 | INIT_LIST_HEAD(&lc->free); | ||
| 178 | lc->used = 0; | ||
| 179 | lc->hits = 0; | ||
| 180 | lc->misses = 0; | ||
| 181 | lc->starving = 0; | ||
| 182 | lc->dirty = 0; | ||
| 183 | lc->changed = 0; | ||
| 184 | lc->flags = 0; | ||
| 185 | lc->changing_element = NULL; | ||
| 186 | lc->new_number = LC_FREE; | ||
| 187 | memset(lc->lc_slot, 0, sizeof(struct hlist_head) * lc->nr_elements); | ||
| 188 | |||
| 189 | for (i = 0; i < lc->nr_elements; i++) { | ||
| 190 | struct lc_element *e = lc->lc_element[i]; | ||
| 191 | void *p = e; | ||
| 192 | p -= lc->element_off; | ||
| 193 | memset(p, 0, lc->element_size); | ||
| 194 | /* re-init it */ | ||
| 195 | e->lc_index = i; | ||
| 196 | e->lc_number = LC_FREE; | ||
| 197 | list_add(&e->list, &lc->free); | ||
| 198 | } | ||
| 199 | } | ||
| 200 | |||
| 201 | /** | ||
| 202 | * lc_seq_printf_stats - print stats about @lc into @seq | ||
| 203 | * @seq: the seq_file to print into | ||
| 204 | * @lc: the lru cache to print statistics of | ||
| 205 | */ | ||
| 206 | size_t lc_seq_printf_stats(struct seq_file *seq, struct lru_cache *lc) | ||
| 207 | { | ||
| 208 | /* NOTE: | ||
| 209 | * total calls to lc_get are | ||
| 210 | * (starving + hits + misses) | ||
| 211 | * misses include "dirty" count (update from an other thread in | ||
| 212 | * progress) and "changed", when this in fact lead to an successful | ||
| 213 | * update of the cache. | ||
| 214 | */ | ||
| 215 | return seq_printf(seq, "\t%s: used:%u/%u " | ||
| 216 | "hits:%lu misses:%lu starving:%lu dirty:%lu changed:%lu\n", | ||
| 217 | lc->name, lc->used, lc->nr_elements, | ||
| 218 | lc->hits, lc->misses, lc->starving, lc->dirty, lc->changed); | ||
| 219 | } | ||
| 220 | |||
| 221 | static struct hlist_head *lc_hash_slot(struct lru_cache *lc, unsigned int enr) | ||
| 222 | { | ||
| 223 | return lc->lc_slot + (enr % lc->nr_elements); | ||
| 224 | } | ||
| 225 | |||
| 226 | |||
| 227 | /** | ||
| 228 | * lc_find - find element by label, if present in the hash table | ||
| 229 | * @lc: The lru_cache object | ||
| 230 | * @enr: element number | ||
| 231 | * | ||
| 232 | * Returns the pointer to an element, if the element with the requested | ||
| 233 | * "label" or element number is present in the hash table, | ||
| 234 | * or NULL if not found. Does not change the refcnt. | ||
| 235 | */ | ||
| 236 | struct lc_element *lc_find(struct lru_cache *lc, unsigned int enr) | ||
| 237 | { | ||
| 238 | struct hlist_node *n; | ||
| 239 | struct lc_element *e; | ||
| 240 | |||
| 241 | BUG_ON(!lc); | ||
| 242 | BUG_ON(!lc->nr_elements); | ||
| 243 | hlist_for_each_entry(e, n, lc_hash_slot(lc, enr), colision) { | ||
| 244 | if (e->lc_number == enr) | ||
| 245 | return e; | ||
| 246 | } | ||
| 247 | return NULL; | ||
| 248 | } | ||
| 249 | |||
| 250 | /* returned element will be "recycled" immediately */ | ||
| 251 | static struct lc_element *lc_evict(struct lru_cache *lc) | ||
| 252 | { | ||
| 253 | struct list_head *n; | ||
| 254 | struct lc_element *e; | ||
| 255 | |||
| 256 | if (list_empty(&lc->lru)) | ||
| 257 | return NULL; | ||
| 258 | |||
| 259 | n = lc->lru.prev; | ||
| 260 | e = list_entry(n, struct lc_element, list); | ||
| 261 | |||
| 262 | PARANOIA_LC_ELEMENT(lc, e); | ||
| 263 | |||
| 264 | list_del(&e->list); | ||
| 265 | hlist_del(&e->colision); | ||
| 266 | return e; | ||
| 267 | } | ||
| 268 | |||
| 269 | /** | ||
| 270 | * lc_del - removes an element from the cache | ||
| 271 | * @lc: The lru_cache object | ||
| 272 | * @e: The element to remove | ||
| 273 | * | ||
| 274 | * @e must be unused (refcnt == 0). Moves @e from "lru" to "free" list, | ||
| 275 | * sets @e->enr to %LC_FREE. | ||
| 276 | */ | ||
| 277 | void lc_del(struct lru_cache *lc, struct lc_element *e) | ||
| 278 | { | ||
| 279 | PARANOIA_ENTRY(); | ||
| 280 | PARANOIA_LC_ELEMENT(lc, e); | ||
| 281 | BUG_ON(e->refcnt); | ||
| 282 | |||
| 283 | e->lc_number = LC_FREE; | ||
| 284 | hlist_del_init(&e->colision); | ||
| 285 | list_move(&e->list, &lc->free); | ||
| 286 | RETURN(); | ||
| 287 | } | ||
| 288 | |||
| 289 | static struct lc_element *lc_get_unused_element(struct lru_cache *lc) | ||
| 290 | { | ||
| 291 | struct list_head *n; | ||
| 292 | |||
| 293 | if (list_empty(&lc->free)) | ||
| 294 | return lc_evict(lc); | ||
| 295 | |||
| 296 | n = lc->free.next; | ||
| 297 | list_del(n); | ||
| 298 | return list_entry(n, struct lc_element, list); | ||
| 299 | } | ||
| 300 | |||
| 301 | static int lc_unused_element_available(struct lru_cache *lc) | ||
| 302 | { | ||
| 303 | if (!list_empty(&lc->free)) | ||
| 304 | return 1; /* something on the free list */ | ||
| 305 | if (!list_empty(&lc->lru)) | ||
| 306 | return 1; /* something to evict */ | ||
| 307 | |||
| 308 | return 0; | ||
| 309 | } | ||
| 310 | |||
| 311 | |||
| 312 | /** | ||
| 313 | * lc_get - get element by label, maybe change the active set | ||
| 314 | * @lc: the lru cache to operate on | ||
| 315 | * @enr: the label to look up | ||
| 316 | * | ||
| 317 | * Finds an element in the cache, increases its usage count, | ||
| 318 | * "touches" and returns it. | ||
| 319 | * | ||
| 320 | * In case the requested number is not present, it needs to be added to the | ||
| 321 | * cache. Therefore it is possible that an other element becomes evicted from | ||
| 322 | * the cache. In either case, the user is notified so he is able to e.g. keep | ||
| 323 | * a persistent log of the cache changes, and therefore the objects in use. | ||
| 324 | * | ||
| 325 | * Return values: | ||
| 326 | * NULL | ||
| 327 | * The cache was marked %LC_STARVING, | ||
| 328 | * or the requested label was not in the active set | ||
| 329 | * and a changing transaction is still pending (@lc was marked %LC_DIRTY). | ||
| 330 | * Or no unused or free element could be recycled (@lc will be marked as | ||
| 331 | * %LC_STARVING, blocking further lc_get() operations). | ||
| 332 | * | ||
| 333 | * pointer to the element with the REQUESTED element number. | ||
| 334 | * In this case, it can be used right away | ||
| 335 | * | ||
| 336 | * pointer to an UNUSED element with some different element number, | ||
| 337 | * where that different number may also be %LC_FREE. | ||
| 338 | * | ||
| 339 | * In this case, the cache is marked %LC_DIRTY (blocking further changes), | ||
| 340 | * and the returned element pointer is removed from the lru list and | ||
| 341 | * hash collision chains. The user now should do whatever housekeeping | ||
| 342 | * is necessary. | ||
| 343 | * Then he must call lc_changed(lc,element_pointer), to finish | ||
| 344 | * the change. | ||
| 345 | * | ||
| 346 | * NOTE: The user needs to check the lc_number on EACH use, so he recognizes | ||
| 347 | * any cache set change. | ||
| 348 | */ | ||
| 349 | struct lc_element *lc_get(struct lru_cache *lc, unsigned int enr) | ||
| 350 | { | ||
| 351 | struct lc_element *e; | ||
| 352 | |||
| 353 | PARANOIA_ENTRY(); | ||
| 354 | if (lc->flags & LC_STARVING) { | ||
| 355 | ++lc->starving; | ||
| 356 | RETURN(NULL); | ||
| 357 | } | ||
| 358 | |||
| 359 | e = lc_find(lc, enr); | ||
| 360 | if (e) { | ||
| 361 | ++lc->hits; | ||
| 362 | if (e->refcnt++ == 0) | ||
| 363 | lc->used++; | ||
| 364 | list_move(&e->list, &lc->in_use); /* Not evictable... */ | ||
| 365 | RETURN(e); | ||
| 366 | } | ||
| 367 | |||
| 368 | ++lc->misses; | ||
| 369 | |||
| 370 | /* In case there is nothing available and we can not kick out | ||
| 371 | * the LRU element, we have to wait ... | ||
| 372 | */ | ||
| 373 | if (!lc_unused_element_available(lc)) { | ||
| 374 | __set_bit(__LC_STARVING, &lc->flags); | ||
| 375 | RETURN(NULL); | ||
| 376 | } | ||
| 377 | |||
| 378 | /* it was not present in the active set. | ||
| 379 | * we are going to recycle an unused (or even "free") element. | ||
| 380 | * user may need to commit a transaction to record that change. | ||
| 381 | * we serialize on flags & TF_DIRTY */ | ||
| 382 | if (test_and_set_bit(__LC_DIRTY, &lc->flags)) { | ||
| 383 | ++lc->dirty; | ||
| 384 | RETURN(NULL); | ||
| 385 | } | ||
| 386 | |||
| 387 | e = lc_get_unused_element(lc); | ||
| 388 | BUG_ON(!e); | ||
| 389 | |||
| 390 | clear_bit(__LC_STARVING, &lc->flags); | ||
| 391 | BUG_ON(++e->refcnt != 1); | ||
| 392 | lc->used++; | ||
| 393 | |||
| 394 | lc->changing_element = e; | ||
| 395 | lc->new_number = enr; | ||
| 396 | |||
| 397 | RETURN(e); | ||
| 398 | } | ||
| 399 | |||
| 400 | /* similar to lc_get, | ||
| 401 | * but only gets a new reference on an existing element. | ||
| 402 | * you either get the requested element, or NULL. | ||
| 403 | * will be consolidated into one function. | ||
| 404 | */ | ||
| 405 | struct lc_element *lc_try_get(struct lru_cache *lc, unsigned int enr) | ||
| 406 | { | ||
| 407 | struct lc_element *e; | ||
| 408 | |||
| 409 | PARANOIA_ENTRY(); | ||
| 410 | if (lc->flags & LC_STARVING) { | ||
| 411 | ++lc->starving; | ||
| 412 | RETURN(NULL); | ||
| 413 | } | ||
| 414 | |||
| 415 | e = lc_find(lc, enr); | ||
| 416 | if (e) { | ||
| 417 | ++lc->hits; | ||
| 418 | if (e->refcnt++ == 0) | ||
| 419 | lc->used++; | ||
| 420 | list_move(&e->list, &lc->in_use); /* Not evictable... */ | ||
| 421 | } | ||
| 422 | RETURN(e); | ||
| 423 | } | ||
| 424 | |||
| 425 | /** | ||
| 426 | * lc_changed - tell @lc that the change has been recorded | ||
| 427 | * @lc: the lru cache to operate on | ||
| 428 | * @e: the element pending label change | ||
| 429 | */ | ||
| 430 | void lc_changed(struct lru_cache *lc, struct lc_element *e) | ||
| 431 | { | ||
| 432 | PARANOIA_ENTRY(); | ||
| 433 | BUG_ON(e != lc->changing_element); | ||
| 434 | PARANOIA_LC_ELEMENT(lc, e); | ||
| 435 | ++lc->changed; | ||
| 436 | e->lc_number = lc->new_number; | ||
| 437 | list_add(&e->list, &lc->in_use); | ||
| 438 | hlist_add_head(&e->colision, lc_hash_slot(lc, lc->new_number)); | ||
| 439 | lc->changing_element = NULL; | ||
| 440 | lc->new_number = LC_FREE; | ||
| 441 | clear_bit(__LC_DIRTY, &lc->flags); | ||
| 442 | smp_mb__after_clear_bit(); | ||
| 443 | RETURN(); | ||
| 444 | } | ||
| 445 | |||
| 446 | |||
| 447 | /** | ||
| 448 | * lc_put - give up refcnt of @e | ||
| 449 | * @lc: the lru cache to operate on | ||
| 450 | * @e: the element to put | ||
| 451 | * | ||
| 452 | * If refcnt reaches zero, the element is moved to the lru list, | ||
| 453 | * and a %LC_STARVING (if set) is cleared. | ||
| 454 | * Returns the new (post-decrement) refcnt. | ||
| 455 | */ | ||
| 456 | unsigned int lc_put(struct lru_cache *lc, struct lc_element *e) | ||
| 457 | { | ||
| 458 | PARANOIA_ENTRY(); | ||
| 459 | PARANOIA_LC_ELEMENT(lc, e); | ||
| 460 | BUG_ON(e->refcnt == 0); | ||
| 461 | BUG_ON(e == lc->changing_element); | ||
| 462 | if (--e->refcnt == 0) { | ||
| 463 | /* move it to the front of LRU. */ | ||
| 464 | list_move(&e->list, &lc->lru); | ||
| 465 | lc->used--; | ||
| 466 | clear_bit(__LC_STARVING, &lc->flags); | ||
| 467 | smp_mb__after_clear_bit(); | ||
| 468 | } | ||
| 469 | RETURN(e->refcnt); | ||
| 470 | } | ||
| 471 | |||
| 472 | /** | ||
| 473 | * lc_element_by_index | ||
| 474 | * @lc: the lru cache to operate on | ||
| 475 | * @i: the index of the element to return | ||
| 476 | */ | ||
| 477 | struct lc_element *lc_element_by_index(struct lru_cache *lc, unsigned i) | ||
| 478 | { | ||
| 479 | BUG_ON(i >= lc->nr_elements); | ||
| 480 | BUG_ON(lc->lc_element[i] == NULL); | ||
| 481 | BUG_ON(lc->lc_element[i]->lc_index != i); | ||
| 482 | return lc->lc_element[i]; | ||
| 483 | } | ||
| 484 | |||
| 485 | /** | ||
| 486 | * lc_index_of | ||
| 487 | * @lc: the lru cache to operate on | ||
| 488 | * @e: the element to query for its index position in lc->element | ||
| 489 | */ | ||
| 490 | unsigned int lc_index_of(struct lru_cache *lc, struct lc_element *e) | ||
| 491 | { | ||
| 492 | PARANOIA_LC_ELEMENT(lc, e); | ||
| 493 | return e->lc_index; | ||
| 494 | } | ||
| 495 | |||
| 496 | /** | ||
| 497 | * lc_set - associate index with label | ||
| 498 | * @lc: the lru cache to operate on | ||
| 499 | * @enr: the label to set | ||
| 500 | * @index: the element index to associate label with. | ||
| 501 | * | ||
| 502 | * Used to initialize the active set to some previously recorded state. | ||
| 503 | */ | ||
| 504 | void lc_set(struct lru_cache *lc, unsigned int enr, int index) | ||
| 505 | { | ||
| 506 | struct lc_element *e; | ||
| 507 | |||
| 508 | if (index < 0 || index >= lc->nr_elements) | ||
| 509 | return; | ||
| 510 | |||
| 511 | e = lc_element_by_index(lc, index); | ||
| 512 | e->lc_number = enr; | ||
| 513 | |||
| 514 | hlist_del_init(&e->colision); | ||
| 515 | hlist_add_head(&e->colision, lc_hash_slot(lc, enr)); | ||
| 516 | list_move(&e->list, e->refcnt ? &lc->in_use : &lc->lru); | ||
| 517 | } | ||
| 518 | |||
| 519 | /** | ||
| 520 | * lc_dump - Dump a complete LRU cache to seq in textual form. | ||
| 521 | * @lc: the lru cache to operate on | ||
| 522 | * @seq: the &struct seq_file pointer to seq_printf into | ||
| 523 | * @utext: user supplied "heading" or other info | ||
| 524 | * @detail: function pointer the user may provide to dump further details | ||
| 525 | * of the object the lc_element is embedded in. | ||
| 526 | */ | ||
| 527 | void lc_seq_dump_details(struct seq_file *seq, struct lru_cache *lc, char *utext, | ||
| 528 | void (*detail) (struct seq_file *, struct lc_element *)) | ||
| 529 | { | ||
| 530 | unsigned int nr_elements = lc->nr_elements; | ||
| 531 | struct lc_element *e; | ||
| 532 | int i; | ||
| 533 | |||
| 534 | seq_printf(seq, "\tnn: lc_number refcnt %s\n ", utext); | ||
| 535 | for (i = 0; i < nr_elements; i++) { | ||
| 536 | e = lc_element_by_index(lc, i); | ||
| 537 | if (e->lc_number == LC_FREE) { | ||
| 538 | seq_printf(seq, "\t%2d: FREE\n", i); | ||
| 539 | } else { | ||
| 540 | seq_printf(seq, "\t%2d: %4u %4u ", i, | ||
| 541 | e->lc_number, e->refcnt); | ||
| 542 | detail(seq, e); | ||
| 543 | } | ||
| 544 | } | ||
| 545 | } | ||
| 546 | |||
| 547 | EXPORT_SYMBOL(lc_create); | ||
| 548 | EXPORT_SYMBOL(lc_reset); | ||
| 549 | EXPORT_SYMBOL(lc_destroy); | ||
| 550 | EXPORT_SYMBOL(lc_set); | ||
| 551 | EXPORT_SYMBOL(lc_del); | ||
| 552 | EXPORT_SYMBOL(lc_try_get); | ||
| 553 | EXPORT_SYMBOL(lc_find); | ||
| 554 | EXPORT_SYMBOL(lc_get); | ||
| 555 | EXPORT_SYMBOL(lc_put); | ||
| 556 | EXPORT_SYMBOL(lc_changed); | ||
| 557 | EXPORT_SYMBOL(lc_element_by_index); | ||
| 558 | EXPORT_SYMBOL(lc_index_of); | ||
| 559 | EXPORT_SYMBOL(lc_seq_printf_stats); | ||
| 560 | EXPORT_SYMBOL(lc_seq_dump_details); | ||
diff --git a/lib/lzo/lzo1x_decompress.c b/lib/lzo/lzo1x_decompress.c index 5dc6b29c1575..f2fd09850223 100644 --- a/lib/lzo/lzo1x_decompress.c +++ b/lib/lzo/lzo1x_decompress.c | |||
| @@ -11,11 +11,13 @@ | |||
| 11 | * Richard Purdie <rpurdie@openedhand.com> | 11 | * Richard Purdie <rpurdie@openedhand.com> |
| 12 | */ | 12 | */ |
| 13 | 13 | ||
| 14 | #ifndef STATIC | ||
| 14 | #include <linux/module.h> | 15 | #include <linux/module.h> |
| 15 | #include <linux/kernel.h> | 16 | #include <linux/kernel.h> |
| 16 | #include <linux/lzo.h> | 17 | #endif |
| 17 | #include <asm/byteorder.h> | 18 | |
| 18 | #include <asm/unaligned.h> | 19 | #include <asm/unaligned.h> |
| 20 | #include <linux/lzo.h> | ||
| 19 | #include "lzodefs.h" | 21 | #include "lzodefs.h" |
| 20 | 22 | ||
| 21 | #define HAVE_IP(x, ip_end, ip) ((size_t)(ip_end - ip) < (x)) | 23 | #define HAVE_IP(x, ip_end, ip) ((size_t)(ip_end - ip) < (x)) |
| @@ -244,9 +246,10 @@ lookbehind_overrun: | |||
| 244 | *out_len = op - out; | 246 | *out_len = op - out; |
| 245 | return LZO_E_LOOKBEHIND_OVERRUN; | 247 | return LZO_E_LOOKBEHIND_OVERRUN; |
| 246 | } | 248 | } |
| 247 | 249 | #ifndef STATIC | |
| 248 | EXPORT_SYMBOL_GPL(lzo1x_decompress_safe); | 250 | EXPORT_SYMBOL_GPL(lzo1x_decompress_safe); |
| 249 | 251 | ||
| 250 | MODULE_LICENSE("GPL"); | 252 | MODULE_LICENSE("GPL"); |
| 251 | MODULE_DESCRIPTION("LZO1X Decompressor"); | 253 | MODULE_DESCRIPTION("LZO1X Decompressor"); |
| 252 | 254 | ||
| 255 | #endif | ||
diff --git a/lib/parser.c b/lib/parser.c index b00d02059a5f..fb34977246bb 100644 --- a/lib/parser.c +++ b/lib/parser.c | |||
| @@ -56,13 +56,16 @@ static int match_one(char *s, const char *p, substring_t args[]) | |||
| 56 | 56 | ||
| 57 | args[argc].from = s; | 57 | args[argc].from = s; |
| 58 | switch (*p++) { | 58 | switch (*p++) { |
| 59 | case 's': | 59 | case 's': { |
| 60 | if (strlen(s) == 0) | 60 | size_t str_len = strlen(s); |
| 61 | |||
| 62 | if (str_len == 0) | ||
| 61 | return 0; | 63 | return 0; |
| 62 | else if (len == -1 || len > strlen(s)) | 64 | if (len == -1 || len > str_len) |
| 63 | len = strlen(s); | 65 | len = str_len; |
| 64 | args[argc].to = s + len; | 66 | args[argc].to = s + len; |
| 65 | break; | 67 | break; |
| 68 | } | ||
| 66 | case 'd': | 69 | case 'd': |
| 67 | simple_strtol(s, &args[argc].to, 0); | 70 | simple_strtol(s, &args[argc].to, 0); |
| 68 | goto num; | 71 | goto num; |
diff --git a/lib/plist.c b/lib/plist.c index d6c64a824e1d..1471988d9190 100644 --- a/lib/plist.c +++ b/lib/plist.c | |||
| @@ -54,9 +54,11 @@ static void plist_check_list(struct list_head *top) | |||
| 54 | 54 | ||
| 55 | static void plist_check_head(struct plist_head *head) | 55 | static void plist_check_head(struct plist_head *head) |
| 56 | { | 56 | { |
| 57 | WARN_ON(!head->lock); | 57 | WARN_ON(!head->rawlock && !head->spinlock); |
| 58 | if (head->lock) | 58 | if (head->rawlock) |
| 59 | WARN_ON_SMP(!spin_is_locked(head->lock)); | 59 | WARN_ON_SMP(!raw_spin_is_locked(head->rawlock)); |
| 60 | if (head->spinlock) | ||
| 61 | WARN_ON_SMP(!spin_is_locked(head->spinlock)); | ||
| 60 | plist_check_list(&head->prio_list); | 62 | plist_check_list(&head->prio_list); |
| 61 | plist_check_list(&head->node_list); | 63 | plist_check_list(&head->node_list); |
| 62 | } | 64 | } |
diff --git a/lib/radix-tree.c b/lib/radix-tree.c index 92cdd9936e3d..2a087e0f9863 100644 --- a/lib/radix-tree.c +++ b/lib/radix-tree.c | |||
| @@ -28,7 +28,6 @@ | |||
| 28 | #include <linux/slab.h> | 28 | #include <linux/slab.h> |
| 29 | #include <linux/notifier.h> | 29 | #include <linux/notifier.h> |
| 30 | #include <linux/cpu.h> | 30 | #include <linux/cpu.h> |
| 31 | #include <linux/gfp.h> | ||
| 32 | #include <linux/string.h> | 31 | #include <linux/string.h> |
| 33 | #include <linux/bitops.h> | 32 | #include <linux/bitops.h> |
| 34 | #include <linux/rcupdate.h> | 33 | #include <linux/rcupdate.h> |
| @@ -364,7 +363,7 @@ static void *radix_tree_lookup_element(struct radix_tree_root *root, | |||
| 364 | unsigned int height, shift; | 363 | unsigned int height, shift; |
| 365 | struct radix_tree_node *node, **slot; | 364 | struct radix_tree_node *node, **slot; |
| 366 | 365 | ||
| 367 | node = rcu_dereference(root->rnode); | 366 | node = rcu_dereference_raw(root->rnode); |
| 368 | if (node == NULL) | 367 | if (node == NULL) |
| 369 | return NULL; | 368 | return NULL; |
| 370 | 369 | ||
| @@ -384,7 +383,7 @@ static void *radix_tree_lookup_element(struct radix_tree_root *root, | |||
| 384 | do { | 383 | do { |
| 385 | slot = (struct radix_tree_node **) | 384 | slot = (struct radix_tree_node **) |
| 386 | (node->slots + ((index>>shift) & RADIX_TREE_MAP_MASK)); | 385 | (node->slots + ((index>>shift) & RADIX_TREE_MAP_MASK)); |
| 387 | node = rcu_dereference(*slot); | 386 | node = rcu_dereference_raw(*slot); |
| 388 | if (node == NULL) | 387 | if (node == NULL) |
| 389 | return NULL; | 388 | return NULL; |
| 390 | 389 | ||
| @@ -556,6 +555,10 @@ EXPORT_SYMBOL(radix_tree_tag_clear); | |||
| 556 | * | 555 | * |
| 557 | * 0: tag not present or not set | 556 | * 0: tag not present or not set |
| 558 | * 1: tag set | 557 | * 1: tag set |
| 558 | * | ||
| 559 | * Note that the return value of this function may not be relied on, even if | ||
| 560 | * the RCU lock is held, unless tag modification and node deletion are excluded | ||
| 561 | * from concurrency. | ||
| 559 | */ | 562 | */ |
| 560 | int radix_tree_tag_get(struct radix_tree_root *root, | 563 | int radix_tree_tag_get(struct radix_tree_root *root, |
| 561 | unsigned long index, unsigned int tag) | 564 | unsigned long index, unsigned int tag) |
| @@ -568,7 +571,7 @@ int radix_tree_tag_get(struct radix_tree_root *root, | |||
| 568 | if (!root_tag_get(root, tag)) | 571 | if (!root_tag_get(root, tag)) |
| 569 | return 0; | 572 | return 0; |
| 570 | 573 | ||
| 571 | node = rcu_dereference(root->rnode); | 574 | node = rcu_dereference_raw(root->rnode); |
| 572 | if (node == NULL) | 575 | if (node == NULL) |
| 573 | return 0; | 576 | return 0; |
| 574 | 577 | ||
| @@ -596,13 +599,9 @@ int radix_tree_tag_get(struct radix_tree_root *root, | |||
| 596 | */ | 599 | */ |
| 597 | if (!tag_get(node, tag, offset)) | 600 | if (!tag_get(node, tag, offset)) |
| 598 | saw_unset_tag = 1; | 601 | saw_unset_tag = 1; |
| 599 | if (height == 1) { | 602 | if (height == 1) |
| 600 | int ret = tag_get(node, tag, offset); | 603 | return !!tag_get(node, tag, offset); |
| 601 | 604 | node = rcu_dereference_raw(node->slots[offset]); | |
| 602 | BUG_ON(ret && saw_unset_tag); | ||
| 603 | return !!ret; | ||
| 604 | } | ||
| 605 | node = rcu_dereference(node->slots[offset]); | ||
| 606 | shift -= RADIX_TREE_MAP_SHIFT; | 605 | shift -= RADIX_TREE_MAP_SHIFT; |
| 607 | height--; | 606 | height--; |
| 608 | } | 607 | } |
| @@ -711,7 +710,7 @@ __lookup(struct radix_tree_node *slot, void ***results, unsigned long index, | |||
| 711 | } | 710 | } |
| 712 | 711 | ||
| 713 | shift -= RADIX_TREE_MAP_SHIFT; | 712 | shift -= RADIX_TREE_MAP_SHIFT; |
| 714 | slot = rcu_dereference(slot->slots[i]); | 713 | slot = rcu_dereference_raw(slot->slots[i]); |
| 715 | if (slot == NULL) | 714 | if (slot == NULL) |
| 716 | goto out; | 715 | goto out; |
| 717 | } | 716 | } |
| @@ -758,7 +757,7 @@ radix_tree_gang_lookup(struct radix_tree_root *root, void **results, | |||
| 758 | unsigned long cur_index = first_index; | 757 | unsigned long cur_index = first_index; |
| 759 | unsigned int ret; | 758 | unsigned int ret; |
| 760 | 759 | ||
| 761 | node = rcu_dereference(root->rnode); | 760 | node = rcu_dereference_raw(root->rnode); |
| 762 | if (!node) | 761 | if (!node) |
| 763 | return 0; | 762 | return 0; |
| 764 | 763 | ||
| @@ -787,7 +786,7 @@ radix_tree_gang_lookup(struct radix_tree_root *root, void **results, | |||
| 787 | slot = *(((void ***)results)[ret + i]); | 786 | slot = *(((void ***)results)[ret + i]); |
| 788 | if (!slot) | 787 | if (!slot) |
| 789 | continue; | 788 | continue; |
| 790 | results[ret + nr_found] = rcu_dereference(slot); | 789 | results[ret + nr_found] = rcu_dereference_raw(slot); |
| 791 | nr_found++; | 790 | nr_found++; |
| 792 | } | 791 | } |
| 793 | ret += nr_found; | 792 | ret += nr_found; |
| @@ -826,7 +825,7 @@ radix_tree_gang_lookup_slot(struct radix_tree_root *root, void ***results, | |||
| 826 | unsigned long cur_index = first_index; | 825 | unsigned long cur_index = first_index; |
| 827 | unsigned int ret; | 826 | unsigned int ret; |
| 828 | 827 | ||
| 829 | node = rcu_dereference(root->rnode); | 828 | node = rcu_dereference_raw(root->rnode); |
| 830 | if (!node) | 829 | if (!node) |
| 831 | return 0; | 830 | return 0; |
| 832 | 831 | ||
| @@ -915,7 +914,7 @@ __lookup_tag(struct radix_tree_node *slot, void ***results, unsigned long index, | |||
| 915 | } | 914 | } |
| 916 | } | 915 | } |
| 917 | shift -= RADIX_TREE_MAP_SHIFT; | 916 | shift -= RADIX_TREE_MAP_SHIFT; |
| 918 | slot = rcu_dereference(slot->slots[i]); | 917 | slot = rcu_dereference_raw(slot->slots[i]); |
| 919 | if (slot == NULL) | 918 | if (slot == NULL) |
| 920 | break; | 919 | break; |
| 921 | } | 920 | } |
| @@ -951,7 +950,7 @@ radix_tree_gang_lookup_tag(struct radix_tree_root *root, void **results, | |||
| 951 | if (!root_tag_get(root, tag)) | 950 | if (!root_tag_get(root, tag)) |
| 952 | return 0; | 951 | return 0; |
| 953 | 952 | ||
| 954 | node = rcu_dereference(root->rnode); | 953 | node = rcu_dereference_raw(root->rnode); |
| 955 | if (!node) | 954 | if (!node) |
| 956 | return 0; | 955 | return 0; |
| 957 | 956 | ||
| @@ -980,7 +979,7 @@ radix_tree_gang_lookup_tag(struct radix_tree_root *root, void **results, | |||
| 980 | slot = *(((void ***)results)[ret + i]); | 979 | slot = *(((void ***)results)[ret + i]); |
| 981 | if (!slot) | 980 | if (!slot) |
| 982 | continue; | 981 | continue; |
| 983 | results[ret + nr_found] = rcu_dereference(slot); | 982 | results[ret + nr_found] = rcu_dereference_raw(slot); |
| 984 | nr_found++; | 983 | nr_found++; |
| 985 | } | 984 | } |
| 986 | ret += nr_found; | 985 | ret += nr_found; |
| @@ -1020,7 +1019,7 @@ radix_tree_gang_lookup_tag_slot(struct radix_tree_root *root, void ***results, | |||
| 1020 | if (!root_tag_get(root, tag)) | 1019 | if (!root_tag_get(root, tag)) |
| 1021 | return 0; | 1020 | return 0; |
| 1022 | 1021 | ||
| 1023 | node = rcu_dereference(root->rnode); | 1022 | node = rcu_dereference_raw(root->rnode); |
| 1024 | if (!node) | 1023 | if (!node) |
| 1025 | return 0; | 1024 | return 0; |
| 1026 | 1025 | ||
diff --git a/lib/ratelimit.c b/lib/ratelimit.c index 26187edcc7ea..027a03f4c56d 100644 --- a/lib/ratelimit.c +++ b/lib/ratelimit.c | |||
| @@ -7,51 +7,61 @@ | |||
| 7 | * parameter. Now every user can use their own standalone ratelimit_state. | 7 | * parameter. Now every user can use their own standalone ratelimit_state. |
| 8 | * | 8 | * |
| 9 | * This file is released under the GPLv2. | 9 | * This file is released under the GPLv2. |
| 10 | * | ||
| 11 | */ | 10 | */ |
| 12 | 11 | ||
| 13 | #include <linux/kernel.h> | 12 | #include <linux/ratelimit.h> |
| 14 | #include <linux/jiffies.h> | 13 | #include <linux/jiffies.h> |
| 15 | #include <linux/module.h> | 14 | #include <linux/module.h> |
| 16 | 15 | ||
| 17 | static DEFINE_SPINLOCK(ratelimit_lock); | ||
| 18 | |||
| 19 | /* | 16 | /* |
| 20 | * __ratelimit - rate limiting | 17 | * __ratelimit - rate limiting |
| 21 | * @rs: ratelimit_state data | 18 | * @rs: ratelimit_state data |
| 19 | * @func: name of calling function | ||
| 20 | * | ||
| 21 | * This enforces a rate limit: not more than @rs->burst callbacks | ||
| 22 | * in every @rs->interval | ||
| 22 | * | 23 | * |
| 23 | * This enforces a rate limit: not more than @rs->ratelimit_burst callbacks | 24 | * RETURNS: |
| 24 | * in every @rs->ratelimit_jiffies | 25 | * 0 means callbacks will be suppressed. |
| 26 | * 1 means go ahead and do it. | ||
| 25 | */ | 27 | */ |
| 26 | int __ratelimit(struct ratelimit_state *rs) | 28 | int ___ratelimit(struct ratelimit_state *rs, const char *func) |
| 27 | { | 29 | { |
| 28 | unsigned long flags; | 30 | unsigned long flags; |
| 31 | int ret; | ||
| 29 | 32 | ||
| 30 | if (!rs->interval) | 33 | if (!rs->interval) |
| 31 | return 1; | 34 | return 1; |
| 32 | 35 | ||
| 33 | spin_lock_irqsave(&ratelimit_lock, flags); | 36 | /* |
| 37 | * If we contend on this state's lock then almost | ||
| 38 | * by definition we are too busy to print a message, | ||
| 39 | * in addition to the one that will be printed by | ||
| 40 | * the entity that is holding the lock already: | ||
| 41 | */ | ||
| 42 | if (!spin_trylock_irqsave(&rs->lock, flags)) | ||
| 43 | return 0; | ||
| 44 | |||
| 34 | if (!rs->begin) | 45 | if (!rs->begin) |
| 35 | rs->begin = jiffies; | 46 | rs->begin = jiffies; |
| 36 | 47 | ||
| 37 | if (time_is_before_jiffies(rs->begin + rs->interval)) { | 48 | if (time_is_before_jiffies(rs->begin + rs->interval)) { |
| 38 | if (rs->missed) | 49 | if (rs->missed) |
| 39 | printk(KERN_WARNING "%s: %d callbacks suppressed\n", | 50 | printk(KERN_WARNING "%s: %d callbacks suppressed\n", |
| 40 | __func__, rs->missed); | 51 | func, rs->missed); |
| 41 | rs->begin = 0; | 52 | rs->begin = 0; |
| 42 | rs->printed = 0; | 53 | rs->printed = 0; |
| 43 | rs->missed = 0; | 54 | rs->missed = 0; |
| 44 | } | 55 | } |
| 45 | if (rs->burst && rs->burst > rs->printed) | 56 | if (rs->burst && rs->burst > rs->printed) { |
| 46 | goto print; | 57 | rs->printed++; |
| 47 | 58 | ret = 1; | |
| 48 | rs->missed++; | 59 | } else { |
| 49 | spin_unlock_irqrestore(&ratelimit_lock, flags); | 60 | rs->missed++; |
| 50 | return 0; | 61 | ret = 0; |
| 62 | } | ||
| 63 | spin_unlock_irqrestore(&rs->lock, flags); | ||
| 51 | 64 | ||
| 52 | print: | 65 | return ret; |
| 53 | rs->printed++; | ||
| 54 | spin_unlock_irqrestore(&ratelimit_lock, flags); | ||
| 55 | return 1; | ||
| 56 | } | 66 | } |
| 57 | EXPORT_SYMBOL(__ratelimit); | 67 | EXPORT_SYMBOL(___ratelimit); |
diff --git a/lib/rational.c b/lib/rational.c index b3c099b5478e..3ed247b80662 100644 --- a/lib/rational.c +++ b/lib/rational.c | |||
| @@ -7,6 +7,7 @@ | |||
| 7 | */ | 7 | */ |
| 8 | 8 | ||
| 9 | #include <linux/rational.h> | 9 | #include <linux/rational.h> |
| 10 | #include <linux/module.h> | ||
| 10 | 11 | ||
| 11 | /* | 12 | /* |
| 12 | * calculate best rational approximation for a given fraction | 13 | * calculate best rational approximation for a given fraction |
diff --git a/lib/rwsem-spinlock.c b/lib/rwsem-spinlock.c index 9df3ca56db11..ffc9fc7f3b05 100644 --- a/lib/rwsem-spinlock.c +++ b/lib/rwsem-spinlock.c | |||
| @@ -17,6 +17,19 @@ struct rwsem_waiter { | |||
| 17 | #define RWSEM_WAITING_FOR_WRITE 0x00000002 | 17 | #define RWSEM_WAITING_FOR_WRITE 0x00000002 |
| 18 | }; | 18 | }; |
| 19 | 19 | ||
| 20 | int rwsem_is_locked(struct rw_semaphore *sem) | ||
| 21 | { | ||
| 22 | int ret = 1; | ||
| 23 | unsigned long flags; | ||
| 24 | |||
| 25 | if (spin_trylock_irqsave(&sem->wait_lock, flags)) { | ||
| 26 | ret = (sem->activity != 0); | ||
| 27 | spin_unlock_irqrestore(&sem->wait_lock, flags); | ||
| 28 | } | ||
| 29 | return ret; | ||
| 30 | } | ||
| 31 | EXPORT_SYMBOL(rwsem_is_locked); | ||
| 32 | |||
| 20 | /* | 33 | /* |
| 21 | * initialise the semaphore | 34 | * initialise the semaphore |
| 22 | */ | 35 | */ |
| @@ -34,6 +47,7 @@ void __init_rwsem(struct rw_semaphore *sem, const char *name, | |||
| 34 | spin_lock_init(&sem->wait_lock); | 47 | spin_lock_init(&sem->wait_lock); |
| 35 | INIT_LIST_HEAD(&sem->wait_list); | 48 | INIT_LIST_HEAD(&sem->wait_list); |
| 36 | } | 49 | } |
| 50 | EXPORT_SYMBOL(__init_rwsem); | ||
| 37 | 51 | ||
| 38 | /* | 52 | /* |
| 39 | * handle the lock release when processes blocked on it that can now run | 53 | * handle the lock release when processes blocked on it that can now run |
| @@ -129,13 +143,14 @@ void __sched __down_read(struct rw_semaphore *sem) | |||
| 129 | { | 143 | { |
| 130 | struct rwsem_waiter waiter; | 144 | struct rwsem_waiter waiter; |
| 131 | struct task_struct *tsk; | 145 | struct task_struct *tsk; |
| 146 | unsigned long flags; | ||
| 132 | 147 | ||
| 133 | spin_lock_irq(&sem->wait_lock); | 148 | spin_lock_irqsave(&sem->wait_lock, flags); |
| 134 | 149 | ||
| 135 | if (sem->activity >= 0 && list_empty(&sem->wait_list)) { | 150 | if (sem->activity >= 0 && list_empty(&sem->wait_list)) { |
| 136 | /* granted */ | 151 | /* granted */ |
| 137 | sem->activity++; | 152 | sem->activity++; |
| 138 | spin_unlock_irq(&sem->wait_lock); | 153 | spin_unlock_irqrestore(&sem->wait_lock, flags); |
| 139 | goto out; | 154 | goto out; |
| 140 | } | 155 | } |
| 141 | 156 | ||
| @@ -150,7 +165,7 @@ void __sched __down_read(struct rw_semaphore *sem) | |||
| 150 | list_add_tail(&waiter.list, &sem->wait_list); | 165 | list_add_tail(&waiter.list, &sem->wait_list); |
| 151 | 166 | ||
| 152 | /* we don't need to touch the semaphore struct anymore */ | 167 | /* we don't need to touch the semaphore struct anymore */ |
| 153 | spin_unlock_irq(&sem->wait_lock); | 168 | spin_unlock_irqrestore(&sem->wait_lock, flags); |
| 154 | 169 | ||
| 155 | /* wait to be given the lock */ | 170 | /* wait to be given the lock */ |
| 156 | for (;;) { | 171 | for (;;) { |
| @@ -195,13 +210,14 @@ void __sched __down_write_nested(struct rw_semaphore *sem, int subclass) | |||
| 195 | { | 210 | { |
| 196 | struct rwsem_waiter waiter; | 211 | struct rwsem_waiter waiter; |
| 197 | struct task_struct *tsk; | 212 | struct task_struct *tsk; |
| 213 | unsigned long flags; | ||
| 198 | 214 | ||
| 199 | spin_lock_irq(&sem->wait_lock); | 215 | spin_lock_irqsave(&sem->wait_lock, flags); |
| 200 | 216 | ||
| 201 | if (sem->activity == 0 && list_empty(&sem->wait_list)) { | 217 | if (sem->activity == 0 && list_empty(&sem->wait_list)) { |
| 202 | /* granted */ | 218 | /* granted */ |
| 203 | sem->activity = -1; | 219 | sem->activity = -1; |
| 204 | spin_unlock_irq(&sem->wait_lock); | 220 | spin_unlock_irqrestore(&sem->wait_lock, flags); |
| 205 | goto out; | 221 | goto out; |
| 206 | } | 222 | } |
| 207 | 223 | ||
| @@ -216,7 +232,7 @@ void __sched __down_write_nested(struct rw_semaphore *sem, int subclass) | |||
| 216 | list_add_tail(&waiter.list, &sem->wait_list); | 232 | list_add_tail(&waiter.list, &sem->wait_list); |
| 217 | 233 | ||
| 218 | /* we don't need to touch the semaphore struct anymore */ | 234 | /* we don't need to touch the semaphore struct anymore */ |
| 219 | spin_unlock_irq(&sem->wait_lock); | 235 | spin_unlock_irqrestore(&sem->wait_lock, flags); |
| 220 | 236 | ||
| 221 | /* wait to be given the lock */ | 237 | /* wait to be given the lock */ |
| 222 | for (;;) { | 238 | for (;;) { |
| @@ -305,12 +321,3 @@ void __downgrade_write(struct rw_semaphore *sem) | |||
| 305 | spin_unlock_irqrestore(&sem->wait_lock, flags); | 321 | spin_unlock_irqrestore(&sem->wait_lock, flags); |
| 306 | } | 322 | } |
| 307 | 323 | ||
| 308 | EXPORT_SYMBOL(__init_rwsem); | ||
| 309 | EXPORT_SYMBOL(__down_read); | ||
| 310 | EXPORT_SYMBOL(__down_read_trylock); | ||
| 311 | EXPORT_SYMBOL(__down_write_nested); | ||
| 312 | EXPORT_SYMBOL(__down_write); | ||
| 313 | EXPORT_SYMBOL(__down_write_trylock); | ||
| 314 | EXPORT_SYMBOL(__up_read); | ||
| 315 | EXPORT_SYMBOL(__up_write); | ||
| 316 | EXPORT_SYMBOL(__downgrade_write); | ||
diff --git a/lib/rwsem.c b/lib/rwsem.c index 3e3365e5665e..ceba8e28807a 100644 --- a/lib/rwsem.c +++ b/lib/rwsem.c | |||
| @@ -136,9 +136,10 @@ __rwsem_do_wake(struct rw_semaphore *sem, int downgrading) | |||
| 136 | out: | 136 | out: |
| 137 | return sem; | 137 | return sem; |
| 138 | 138 | ||
| 139 | /* undo the change to count, but check for a transition 1->0 */ | 139 | /* undo the change to the active count, but check for a transition |
| 140 | * 1->0 */ | ||
| 140 | undo: | 141 | undo: |
| 141 | if (rwsem_atomic_update(-RWSEM_ACTIVE_BIAS, sem) != 0) | 142 | if (rwsem_atomic_update(-RWSEM_ACTIVE_BIAS, sem) & RWSEM_ACTIVE_MASK) |
| 142 | goto out; | 143 | goto out; |
| 143 | goto try_again; | 144 | goto try_again; |
| 144 | } | 145 | } |
diff --git a/lib/scatterlist.c b/lib/scatterlist.c index 0d475d8167bf..9afa25b52a83 100644 --- a/lib/scatterlist.c +++ b/lib/scatterlist.c | |||
| @@ -7,6 +7,7 @@ | |||
| 7 | * Version 2. See the file COPYING for more details. | 7 | * Version 2. See the file COPYING for more details. |
| 8 | */ | 8 | */ |
| 9 | #include <linux/module.h> | 9 | #include <linux/module.h> |
| 10 | #include <linux/slab.h> | ||
| 10 | #include <linux/scatterlist.h> | 11 | #include <linux/scatterlist.h> |
| 11 | #include <linux/highmem.h> | 12 | #include <linux/highmem.h> |
| 12 | 13 | ||
diff --git a/lib/show_mem.c b/lib/show_mem.c index 238e72a18ce1..fdc77c82f922 100644 --- a/lib/show_mem.c +++ b/lib/show_mem.c | |||
| @@ -15,7 +15,7 @@ void show_mem(void) | |||
| 15 | unsigned long total = 0, reserved = 0, shared = 0, | 15 | unsigned long total = 0, reserved = 0, shared = 0, |
| 16 | nonshared = 0, highmem = 0; | 16 | nonshared = 0, highmem = 0; |
| 17 | 17 | ||
| 18 | printk(KERN_INFO "Mem-Info:\n"); | 18 | printk("Mem-Info:\n"); |
| 19 | show_free_areas(); | 19 | show_free_areas(); |
| 20 | 20 | ||
| 21 | for_each_online_pgdat(pgdat) { | 21 | for_each_online_pgdat(pgdat) { |
| @@ -49,15 +49,15 @@ void show_mem(void) | |||
| 49 | pgdat_resize_unlock(pgdat, &flags); | 49 | pgdat_resize_unlock(pgdat, &flags); |
| 50 | } | 50 | } |
| 51 | 51 | ||
| 52 | printk(KERN_INFO "%lu pages RAM\n", total); | 52 | printk("%lu pages RAM\n", total); |
| 53 | #ifdef CONFIG_HIGHMEM | 53 | #ifdef CONFIG_HIGHMEM |
| 54 | printk(KERN_INFO "%lu pages HighMem\n", highmem); | 54 | printk("%lu pages HighMem\n", highmem); |
| 55 | #endif | 55 | #endif |
| 56 | printk(KERN_INFO "%lu pages reserved\n", reserved); | 56 | printk("%lu pages reserved\n", reserved); |
| 57 | printk(KERN_INFO "%lu pages shared\n", shared); | 57 | printk("%lu pages shared\n", shared); |
| 58 | printk(KERN_INFO "%lu pages non-shared\n", nonshared); | 58 | printk("%lu pages non-shared\n", nonshared); |
| 59 | #ifdef CONFIG_QUICKLIST | 59 | #ifdef CONFIG_QUICKLIST |
| 60 | printk(KERN_INFO "%lu pages in pagetable cache\n", | 60 | printk("%lu pages in pagetable cache\n", |
| 61 | quicklist_total_size()); | 61 | quicklist_total_size()); |
| 62 | #endif | 62 | #endif |
| 63 | } | 63 | } |
diff --git a/lib/spinlock_debug.c b/lib/spinlock_debug.c index 9c4b0256490b..4755b98b6dfb 100644 --- a/lib/spinlock_debug.c +++ b/lib/spinlock_debug.c | |||
| @@ -13,8 +13,8 @@ | |||
| 13 | #include <linux/delay.h> | 13 | #include <linux/delay.h> |
| 14 | #include <linux/module.h> | 14 | #include <linux/module.h> |
| 15 | 15 | ||
| 16 | void __spin_lock_init(spinlock_t *lock, const char *name, | 16 | void __raw_spin_lock_init(raw_spinlock_t *lock, const char *name, |
| 17 | struct lock_class_key *key) | 17 | struct lock_class_key *key) |
| 18 | { | 18 | { |
| 19 | #ifdef CONFIG_DEBUG_LOCK_ALLOC | 19 | #ifdef CONFIG_DEBUG_LOCK_ALLOC |
| 20 | /* | 20 | /* |
| @@ -23,13 +23,13 @@ void __spin_lock_init(spinlock_t *lock, const char *name, | |||
| 23 | debug_check_no_locks_freed((void *)lock, sizeof(*lock)); | 23 | debug_check_no_locks_freed((void *)lock, sizeof(*lock)); |
| 24 | lockdep_init_map(&lock->dep_map, name, key, 0); | 24 | lockdep_init_map(&lock->dep_map, name, key, 0); |
| 25 | #endif | 25 | #endif |
| 26 | lock->raw_lock = (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED; | 26 | lock->raw_lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED; |
| 27 | lock->magic = SPINLOCK_MAGIC; | 27 | lock->magic = SPINLOCK_MAGIC; |
| 28 | lock->owner = SPINLOCK_OWNER_INIT; | 28 | lock->owner = SPINLOCK_OWNER_INIT; |
| 29 | lock->owner_cpu = -1; | 29 | lock->owner_cpu = -1; |
| 30 | } | 30 | } |
| 31 | 31 | ||
| 32 | EXPORT_SYMBOL(__spin_lock_init); | 32 | EXPORT_SYMBOL(__raw_spin_lock_init); |
| 33 | 33 | ||
| 34 | void __rwlock_init(rwlock_t *lock, const char *name, | 34 | void __rwlock_init(rwlock_t *lock, const char *name, |
| 35 | struct lock_class_key *key) | 35 | struct lock_class_key *key) |
| @@ -41,7 +41,7 @@ void __rwlock_init(rwlock_t *lock, const char *name, | |||
| 41 | debug_check_no_locks_freed((void *)lock, sizeof(*lock)); | 41 | debug_check_no_locks_freed((void *)lock, sizeof(*lock)); |
| 42 | lockdep_init_map(&lock->dep_map, name, key, 0); | 42 | lockdep_init_map(&lock->dep_map, name, key, 0); |
| 43 | #endif | 43 | #endif |
| 44 | lock->raw_lock = (raw_rwlock_t) __RAW_RW_LOCK_UNLOCKED; | 44 | lock->raw_lock = (arch_rwlock_t) __ARCH_RW_LOCK_UNLOCKED; |
| 45 | lock->magic = RWLOCK_MAGIC; | 45 | lock->magic = RWLOCK_MAGIC; |
| 46 | lock->owner = SPINLOCK_OWNER_INIT; | 46 | lock->owner = SPINLOCK_OWNER_INIT; |
| 47 | lock->owner_cpu = -1; | 47 | lock->owner_cpu = -1; |
| @@ -49,7 +49,7 @@ void __rwlock_init(rwlock_t *lock, const char *name, | |||
| 49 | 49 | ||
| 50 | EXPORT_SYMBOL(__rwlock_init); | 50 | EXPORT_SYMBOL(__rwlock_init); |
| 51 | 51 | ||
| 52 | static void spin_bug(spinlock_t *lock, const char *msg) | 52 | static void spin_bug(raw_spinlock_t *lock, const char *msg) |
| 53 | { | 53 | { |
| 54 | struct task_struct *owner = NULL; | 54 | struct task_struct *owner = NULL; |
| 55 | 55 | ||
| @@ -73,7 +73,7 @@ static void spin_bug(spinlock_t *lock, const char *msg) | |||
| 73 | #define SPIN_BUG_ON(cond, lock, msg) if (unlikely(cond)) spin_bug(lock, msg) | 73 | #define SPIN_BUG_ON(cond, lock, msg) if (unlikely(cond)) spin_bug(lock, msg) |
| 74 | 74 | ||
| 75 | static inline void | 75 | static inline void |
| 76 | debug_spin_lock_before(spinlock_t *lock) | 76 | debug_spin_lock_before(raw_spinlock_t *lock) |
| 77 | { | 77 | { |
| 78 | SPIN_BUG_ON(lock->magic != SPINLOCK_MAGIC, lock, "bad magic"); | 78 | SPIN_BUG_ON(lock->magic != SPINLOCK_MAGIC, lock, "bad magic"); |
| 79 | SPIN_BUG_ON(lock->owner == current, lock, "recursion"); | 79 | SPIN_BUG_ON(lock->owner == current, lock, "recursion"); |
| @@ -81,16 +81,16 @@ debug_spin_lock_before(spinlock_t *lock) | |||
| 81 | lock, "cpu recursion"); | 81 | lock, "cpu recursion"); |
| 82 | } | 82 | } |
| 83 | 83 | ||
| 84 | static inline void debug_spin_lock_after(spinlock_t *lock) | 84 | static inline void debug_spin_lock_after(raw_spinlock_t *lock) |
| 85 | { | 85 | { |
| 86 | lock->owner_cpu = raw_smp_processor_id(); | 86 | lock->owner_cpu = raw_smp_processor_id(); |
| 87 | lock->owner = current; | 87 | lock->owner = current; |
| 88 | } | 88 | } |
| 89 | 89 | ||
| 90 | static inline void debug_spin_unlock(spinlock_t *lock) | 90 | static inline void debug_spin_unlock(raw_spinlock_t *lock) |
| 91 | { | 91 | { |
| 92 | SPIN_BUG_ON(lock->magic != SPINLOCK_MAGIC, lock, "bad magic"); | 92 | SPIN_BUG_ON(lock->magic != SPINLOCK_MAGIC, lock, "bad magic"); |
| 93 | SPIN_BUG_ON(!spin_is_locked(lock), lock, "already unlocked"); | 93 | SPIN_BUG_ON(!raw_spin_is_locked(lock), lock, "already unlocked"); |
| 94 | SPIN_BUG_ON(lock->owner != current, lock, "wrong owner"); | 94 | SPIN_BUG_ON(lock->owner != current, lock, "wrong owner"); |
| 95 | SPIN_BUG_ON(lock->owner_cpu != raw_smp_processor_id(), | 95 | SPIN_BUG_ON(lock->owner_cpu != raw_smp_processor_id(), |
| 96 | lock, "wrong CPU"); | 96 | lock, "wrong CPU"); |
| @@ -98,7 +98,7 @@ static inline void debug_spin_unlock(spinlock_t *lock) | |||
| 98 | lock->owner_cpu = -1; | 98 | lock->owner_cpu = -1; |
| 99 | } | 99 | } |
| 100 | 100 | ||
| 101 | static void __spin_lock_debug(spinlock_t *lock) | 101 | static void __spin_lock_debug(raw_spinlock_t *lock) |
| 102 | { | 102 | { |
| 103 | u64 i; | 103 | u64 i; |
| 104 | u64 loops = loops_per_jiffy * HZ; | 104 | u64 loops = loops_per_jiffy * HZ; |
| @@ -106,7 +106,7 @@ static void __spin_lock_debug(spinlock_t *lock) | |||
| 106 | 106 | ||
| 107 | for (;;) { | 107 | for (;;) { |
| 108 | for (i = 0; i < loops; i++) { | 108 | for (i = 0; i < loops; i++) { |
| 109 | if (__raw_spin_trylock(&lock->raw_lock)) | 109 | if (arch_spin_trylock(&lock->raw_lock)) |
| 110 | return; | 110 | return; |
| 111 | __delay(1); | 111 | __delay(1); |
| 112 | } | 112 | } |
| @@ -125,17 +125,17 @@ static void __spin_lock_debug(spinlock_t *lock) | |||
| 125 | } | 125 | } |
| 126 | } | 126 | } |
| 127 | 127 | ||
| 128 | void _raw_spin_lock(spinlock_t *lock) | 128 | void do_raw_spin_lock(raw_spinlock_t *lock) |
| 129 | { | 129 | { |
| 130 | debug_spin_lock_before(lock); | 130 | debug_spin_lock_before(lock); |
| 131 | if (unlikely(!__raw_spin_trylock(&lock->raw_lock))) | 131 | if (unlikely(!arch_spin_trylock(&lock->raw_lock))) |
| 132 | __spin_lock_debug(lock); | 132 | __spin_lock_debug(lock); |
| 133 | debug_spin_lock_after(lock); | 133 | debug_spin_lock_after(lock); |
| 134 | } | 134 | } |
| 135 | 135 | ||
| 136 | int _raw_spin_trylock(spinlock_t *lock) | 136 | int do_raw_spin_trylock(raw_spinlock_t *lock) |
| 137 | { | 137 | { |
| 138 | int ret = __raw_spin_trylock(&lock->raw_lock); | 138 | int ret = arch_spin_trylock(&lock->raw_lock); |
| 139 | 139 | ||
| 140 | if (ret) | 140 | if (ret) |
| 141 | debug_spin_lock_after(lock); | 141 | debug_spin_lock_after(lock); |
| @@ -148,10 +148,10 @@ int _raw_spin_trylock(spinlock_t *lock) | |||
| 148 | return ret; | 148 | return ret; |
| 149 | } | 149 | } |
| 150 | 150 | ||
| 151 | void _raw_spin_unlock(spinlock_t *lock) | 151 | void do_raw_spin_unlock(raw_spinlock_t *lock) |
| 152 | { | 152 | { |
| 153 | debug_spin_unlock(lock); | 153 | debug_spin_unlock(lock); |
| 154 | __raw_spin_unlock(&lock->raw_lock); | 154 | arch_spin_unlock(&lock->raw_lock); |
| 155 | } | 155 | } |
| 156 | 156 | ||
| 157 | static void rwlock_bug(rwlock_t *lock, const char *msg) | 157 | static void rwlock_bug(rwlock_t *lock, const char *msg) |
| @@ -176,7 +176,7 @@ static void __read_lock_debug(rwlock_t *lock) | |||
| 176 | 176 | ||
| 177 | for (;;) { | 177 | for (;;) { |
| 178 | for (i = 0; i < loops; i++) { | 178 | for (i = 0; i < loops; i++) { |
| 179 | if (__raw_read_trylock(&lock->raw_lock)) | 179 | if (arch_read_trylock(&lock->raw_lock)) |
| 180 | return; | 180 | return; |
| 181 | __delay(1); | 181 | __delay(1); |
| 182 | } | 182 | } |
| @@ -193,15 +193,15 @@ static void __read_lock_debug(rwlock_t *lock) | |||
| 193 | } | 193 | } |
| 194 | #endif | 194 | #endif |
| 195 | 195 | ||
| 196 | void _raw_read_lock(rwlock_t *lock) | 196 | void do_raw_read_lock(rwlock_t *lock) |
| 197 | { | 197 | { |
| 198 | RWLOCK_BUG_ON(lock->magic != RWLOCK_MAGIC, lock, "bad magic"); | 198 | RWLOCK_BUG_ON(lock->magic != RWLOCK_MAGIC, lock, "bad magic"); |
| 199 | __raw_read_lock(&lock->raw_lock); | 199 | arch_read_lock(&lock->raw_lock); |
| 200 | } | 200 | } |
| 201 | 201 | ||
| 202 | int _raw_read_trylock(rwlock_t *lock) | 202 | int do_raw_read_trylock(rwlock_t *lock) |
| 203 | { | 203 | { |
| 204 | int ret = __raw_read_trylock(&lock->raw_lock); | 204 | int ret = arch_read_trylock(&lock->raw_lock); |
| 205 | 205 | ||
| 206 | #ifndef CONFIG_SMP | 206 | #ifndef CONFIG_SMP |
| 207 | /* | 207 | /* |
| @@ -212,10 +212,10 @@ int _raw_read_trylock(rwlock_t *lock) | |||
| 212 | return ret; | 212 | return ret; |
| 213 | } | 213 | } |
| 214 | 214 | ||
| 215 | void _raw_read_unlock(rwlock_t *lock) | 215 | void do_raw_read_unlock(rwlock_t *lock) |
| 216 | { | 216 | { |
| 217 | RWLOCK_BUG_ON(lock->magic != RWLOCK_MAGIC, lock, "bad magic"); | 217 | RWLOCK_BUG_ON(lock->magic != RWLOCK_MAGIC, lock, "bad magic"); |
| 218 | __raw_read_unlock(&lock->raw_lock); | 218 | arch_read_unlock(&lock->raw_lock); |
| 219 | } | 219 | } |
| 220 | 220 | ||
| 221 | static inline void debug_write_lock_before(rwlock_t *lock) | 221 | static inline void debug_write_lock_before(rwlock_t *lock) |
| @@ -251,7 +251,7 @@ static void __write_lock_debug(rwlock_t *lock) | |||
| 251 | 251 | ||
| 252 | for (;;) { | 252 | for (;;) { |
| 253 | for (i = 0; i < loops; i++) { | 253 | for (i = 0; i < loops; i++) { |
| 254 | if (__raw_write_trylock(&lock->raw_lock)) | 254 | if (arch_write_trylock(&lock->raw_lock)) |
| 255 | return; | 255 | return; |
| 256 | __delay(1); | 256 | __delay(1); |
| 257 | } | 257 | } |
| @@ -268,16 +268,16 @@ static void __write_lock_debug(rwlock_t *lock) | |||
| 268 | } | 268 | } |
| 269 | #endif | 269 | #endif |
| 270 | 270 | ||
| 271 | void _raw_write_lock(rwlock_t *lock) | 271 | void do_raw_write_lock(rwlock_t *lock) |
| 272 | { | 272 | { |
| 273 | debug_write_lock_before(lock); | 273 | debug_write_lock_before(lock); |
| 274 | __raw_write_lock(&lock->raw_lock); | 274 | arch_write_lock(&lock->raw_lock); |
| 275 | debug_write_lock_after(lock); | 275 | debug_write_lock_after(lock); |
| 276 | } | 276 | } |
| 277 | 277 | ||
| 278 | int _raw_write_trylock(rwlock_t *lock) | 278 | int do_raw_write_trylock(rwlock_t *lock) |
| 279 | { | 279 | { |
| 280 | int ret = __raw_write_trylock(&lock->raw_lock); | 280 | int ret = arch_write_trylock(&lock->raw_lock); |
| 281 | 281 | ||
| 282 | if (ret) | 282 | if (ret) |
| 283 | debug_write_lock_after(lock); | 283 | debug_write_lock_after(lock); |
| @@ -290,8 +290,8 @@ int _raw_write_trylock(rwlock_t *lock) | |||
| 290 | return ret; | 290 | return ret; |
| 291 | } | 291 | } |
| 292 | 292 | ||
| 293 | void _raw_write_unlock(rwlock_t *lock) | 293 | void do_raw_write_unlock(rwlock_t *lock) |
| 294 | { | 294 | { |
| 295 | debug_write_unlock(lock); | 295 | debug_write_unlock(lock); |
| 296 | __raw_write_unlock(&lock->raw_lock); | 296 | arch_write_unlock(&lock->raw_lock); |
| 297 | } | 297 | } |
diff --git a/lib/string.c b/lib/string.c index e96421ab9a9a..f71bead1be3e 100644 --- a/lib/string.c +++ b/lib/string.c | |||
| @@ -36,25 +36,21 @@ int strnicmp(const char *s1, const char *s2, size_t len) | |||
| 36 | /* Yes, Virginia, it had better be unsigned */ | 36 | /* Yes, Virginia, it had better be unsigned */ |
| 37 | unsigned char c1, c2; | 37 | unsigned char c1, c2; |
| 38 | 38 | ||
| 39 | c1 = c2 = 0; | 39 | if (!len) |
| 40 | if (len) { | 40 | return 0; |
| 41 | do { | 41 | |
| 42 | c1 = *s1; | 42 | do { |
| 43 | c2 = *s2; | 43 | c1 = *s1++; |
| 44 | s1++; | 44 | c2 = *s2++; |
| 45 | s2++; | 45 | if (!c1 || !c2) |
| 46 | if (!c1) | 46 | break; |
| 47 | break; | 47 | if (c1 == c2) |
| 48 | if (!c2) | 48 | continue; |
| 49 | break; | 49 | c1 = tolower(c1); |
| 50 | if (c1 == c2) | 50 | c2 = tolower(c2); |
| 51 | continue; | 51 | if (c1 != c2) |
| 52 | c1 = tolower(c1); | 52 | break; |
| 53 | c2 = tolower(c2); | 53 | } while (--len); |
| 54 | if (c1 != c2) | ||
| 55 | break; | ||
| 56 | } while (--len); | ||
| 57 | } | ||
| 58 | return (int)c1 - (int)c2; | 54 | return (int)c1 - (int)c2; |
| 59 | } | 55 | } |
| 60 | EXPORT_SYMBOL(strnicmp); | 56 | EXPORT_SYMBOL(strnicmp); |
| @@ -338,20 +334,34 @@ EXPORT_SYMBOL(strnchr); | |||
| 338 | #endif | 334 | #endif |
| 339 | 335 | ||
| 340 | /** | 336 | /** |
| 341 | * strstrip - Removes leading and trailing whitespace from @s. | 337 | * skip_spaces - Removes leading whitespace from @str. |
| 338 | * @str: The string to be stripped. | ||
| 339 | * | ||
| 340 | * Returns a pointer to the first non-whitespace character in @str. | ||
| 341 | */ | ||
| 342 | char *skip_spaces(const char *str) | ||
| 343 | { | ||
| 344 | while (isspace(*str)) | ||
| 345 | ++str; | ||
| 346 | return (char *)str; | ||
| 347 | } | ||
| 348 | EXPORT_SYMBOL(skip_spaces); | ||
| 349 | |||
| 350 | /** | ||
| 351 | * strim - Removes leading and trailing whitespace from @s. | ||
| 342 | * @s: The string to be stripped. | 352 | * @s: The string to be stripped. |
| 343 | * | 353 | * |
| 344 | * Note that the first trailing whitespace is replaced with a %NUL-terminator | 354 | * Note that the first trailing whitespace is replaced with a %NUL-terminator |
| 345 | * in the given string @s. Returns a pointer to the first non-whitespace | 355 | * in the given string @s. Returns a pointer to the first non-whitespace |
| 346 | * character in @s. | 356 | * character in @s. |
| 347 | */ | 357 | */ |
| 348 | char *strstrip(char *s) | 358 | char *strim(char *s) |
| 349 | { | 359 | { |
| 350 | size_t size; | 360 | size_t size; |
| 351 | char *end; | 361 | char *end; |
| 352 | 362 | ||
| 363 | s = skip_spaces(s); | ||
| 353 | size = strlen(s); | 364 | size = strlen(s); |
| 354 | |||
| 355 | if (!size) | 365 | if (!size) |
| 356 | return s; | 366 | return s; |
| 357 | 367 | ||
| @@ -360,12 +370,9 @@ char *strstrip(char *s) | |||
| 360 | end--; | 370 | end--; |
| 361 | *(end + 1) = '\0'; | 371 | *(end + 1) = '\0'; |
| 362 | 372 | ||
| 363 | while (*s && isspace(*s)) | ||
| 364 | s++; | ||
| 365 | |||
| 366 | return s; | 373 | return s; |
| 367 | } | 374 | } |
| 368 | EXPORT_SYMBOL(strstrip); | 375 | EXPORT_SYMBOL(strim); |
| 369 | 376 | ||
| 370 | #ifndef __HAVE_ARCH_STRLEN | 377 | #ifndef __HAVE_ARCH_STRLEN |
| 371 | /** | 378 | /** |
| @@ -656,7 +663,7 @@ EXPORT_SYMBOL(memscan); | |||
| 656 | */ | 663 | */ |
| 657 | char *strstr(const char *s1, const char *s2) | 664 | char *strstr(const char *s1, const char *s2) |
| 658 | { | 665 | { |
| 659 | int l1, l2; | 666 | size_t l1, l2; |
| 660 | 667 | ||
| 661 | l2 = strlen(s2); | 668 | l2 = strlen(s2); |
| 662 | if (!l2) | 669 | if (!l2) |
| @@ -673,6 +680,31 @@ char *strstr(const char *s1, const char *s2) | |||
| 673 | EXPORT_SYMBOL(strstr); | 680 | EXPORT_SYMBOL(strstr); |
| 674 | #endif | 681 | #endif |
| 675 | 682 | ||
| 683 | #ifndef __HAVE_ARCH_STRNSTR | ||
| 684 | /** | ||
| 685 | * strnstr - Find the first substring in a length-limited string | ||
| 686 | * @s1: The string to be searched | ||
| 687 | * @s2: The string to search for | ||
| 688 | * @len: the maximum number of characters to search | ||
| 689 | */ | ||
| 690 | char *strnstr(const char *s1, const char *s2, size_t len) | ||
| 691 | { | ||
| 692 | size_t l2; | ||
| 693 | |||
| 694 | l2 = strlen(s2); | ||
| 695 | if (!l2) | ||
| 696 | return (char *)s1; | ||
| 697 | while (len >= l2) { | ||
| 698 | len--; | ||
| 699 | if (!memcmp(s1, s2, l2)) | ||
| 700 | return (char *)s1; | ||
| 701 | s1++; | ||
| 702 | } | ||
| 703 | return NULL; | ||
| 704 | } | ||
| 705 | EXPORT_SYMBOL(strnstr); | ||
| 706 | #endif | ||
| 707 | |||
| 676 | #ifndef __HAVE_ARCH_MEMCHR | 708 | #ifndef __HAVE_ARCH_MEMCHR |
| 677 | /** | 709 | /** |
| 678 | * memchr - Find a character in an area of memory. | 710 | * memchr - Find a character in an area of memory. |
diff --git a/lib/swiotlb.c b/lib/swiotlb.c index ac25cd28e807..5fddf720da73 100644 --- a/lib/swiotlb.c +++ b/lib/swiotlb.c | |||
| @@ -28,6 +28,7 @@ | |||
| 28 | #include <linux/types.h> | 28 | #include <linux/types.h> |
| 29 | #include <linux/ctype.h> | 29 | #include <linux/ctype.h> |
| 30 | #include <linux/highmem.h> | 30 | #include <linux/highmem.h> |
| 31 | #include <linux/gfp.h> | ||
| 31 | 32 | ||
| 32 | #include <asm/io.h> | 33 | #include <asm/io.h> |
| 33 | #include <asm/dma.h> | 34 | #include <asm/dma.h> |
| @@ -97,6 +98,8 @@ static phys_addr_t *io_tlb_orig_addr; | |||
| 97 | */ | 98 | */ |
| 98 | static DEFINE_SPINLOCK(io_tlb_lock); | 99 | static DEFINE_SPINLOCK(io_tlb_lock); |
| 99 | 100 | ||
| 101 | static int late_alloc; | ||
| 102 | |||
| 100 | static int __init | 103 | static int __init |
| 101 | setup_io_tlb_npages(char *str) | 104 | setup_io_tlb_npages(char *str) |
| 102 | { | 105 | { |
| @@ -109,6 +112,7 @@ setup_io_tlb_npages(char *str) | |||
| 109 | ++str; | 112 | ++str; |
| 110 | if (!strcmp(str, "force")) | 113 | if (!strcmp(str, "force")) |
| 111 | swiotlb_force = 1; | 114 | swiotlb_force = 1; |
| 115 | |||
| 112 | return 1; | 116 | return 1; |
| 113 | } | 117 | } |
| 114 | __setup("swiotlb=", setup_io_tlb_npages); | 118 | __setup("swiotlb=", setup_io_tlb_npages); |
| @@ -121,8 +125,9 @@ static dma_addr_t swiotlb_virt_to_bus(struct device *hwdev, | |||
| 121 | return phys_to_dma(hwdev, virt_to_phys(address)); | 125 | return phys_to_dma(hwdev, virt_to_phys(address)); |
| 122 | } | 126 | } |
| 123 | 127 | ||
| 124 | static void swiotlb_print_info(unsigned long bytes) | 128 | void swiotlb_print_info(void) |
| 125 | { | 129 | { |
| 130 | unsigned long bytes = io_tlb_nslabs << IO_TLB_SHIFT; | ||
| 126 | phys_addr_t pstart, pend; | 131 | phys_addr_t pstart, pend; |
| 127 | 132 | ||
| 128 | pstart = virt_to_phys(io_tlb_start); | 133 | pstart = virt_to_phys(io_tlb_start); |
| @@ -140,7 +145,7 @@ static void swiotlb_print_info(unsigned long bytes) | |||
| 140 | * structures for the software IO TLB used to implement the DMA API. | 145 | * structures for the software IO TLB used to implement the DMA API. |
| 141 | */ | 146 | */ |
| 142 | void __init | 147 | void __init |
| 143 | swiotlb_init_with_default_size(size_t default_size) | 148 | swiotlb_init_with_default_size(size_t default_size, int verbose) |
| 144 | { | 149 | { |
| 145 | unsigned long i, bytes; | 150 | unsigned long i, bytes; |
| 146 | 151 | ||
| @@ -176,14 +181,14 @@ swiotlb_init_with_default_size(size_t default_size) | |||
| 176 | io_tlb_overflow_buffer = alloc_bootmem_low(io_tlb_overflow); | 181 | io_tlb_overflow_buffer = alloc_bootmem_low(io_tlb_overflow); |
| 177 | if (!io_tlb_overflow_buffer) | 182 | if (!io_tlb_overflow_buffer) |
| 178 | panic("Cannot allocate SWIOTLB overflow buffer!\n"); | 183 | panic("Cannot allocate SWIOTLB overflow buffer!\n"); |
| 179 | 184 | if (verbose) | |
| 180 | swiotlb_print_info(bytes); | 185 | swiotlb_print_info(); |
| 181 | } | 186 | } |
| 182 | 187 | ||
| 183 | void __init | 188 | void __init |
| 184 | swiotlb_init(void) | 189 | swiotlb_init(int verbose) |
| 185 | { | 190 | { |
| 186 | swiotlb_init_with_default_size(64 * (1<<20)); /* default to 64MB */ | 191 | swiotlb_init_with_default_size(64 * (1<<20), verbose); /* default to 64MB */ |
| 187 | } | 192 | } |
| 188 | 193 | ||
| 189 | /* | 194 | /* |
| @@ -260,7 +265,9 @@ swiotlb_late_init_with_default_size(size_t default_size) | |||
| 260 | if (!io_tlb_overflow_buffer) | 265 | if (!io_tlb_overflow_buffer) |
| 261 | goto cleanup4; | 266 | goto cleanup4; |
| 262 | 267 | ||
| 263 | swiotlb_print_info(bytes); | 268 | swiotlb_print_info(); |
| 269 | |||
| 270 | late_alloc = 1; | ||
| 264 | 271 | ||
| 265 | return 0; | 272 | return 0; |
| 266 | 273 | ||
| @@ -281,6 +288,32 @@ cleanup1: | |||
| 281 | return -ENOMEM; | 288 | return -ENOMEM; |
| 282 | } | 289 | } |
| 283 | 290 | ||
| 291 | void __init swiotlb_free(void) | ||
| 292 | { | ||
| 293 | if (!io_tlb_overflow_buffer) | ||
| 294 | return; | ||
| 295 | |||
| 296 | if (late_alloc) { | ||
| 297 | free_pages((unsigned long)io_tlb_overflow_buffer, | ||
| 298 | get_order(io_tlb_overflow)); | ||
| 299 | free_pages((unsigned long)io_tlb_orig_addr, | ||
| 300 | get_order(io_tlb_nslabs * sizeof(phys_addr_t))); | ||
| 301 | free_pages((unsigned long)io_tlb_list, get_order(io_tlb_nslabs * | ||
| 302 | sizeof(int))); | ||
| 303 | free_pages((unsigned long)io_tlb_start, | ||
| 304 | get_order(io_tlb_nslabs << IO_TLB_SHIFT)); | ||
| 305 | } else { | ||
| 306 | free_bootmem_late(__pa(io_tlb_overflow_buffer), | ||
| 307 | io_tlb_overflow); | ||
| 308 | free_bootmem_late(__pa(io_tlb_orig_addr), | ||
| 309 | io_tlb_nslabs * sizeof(phys_addr_t)); | ||
| 310 | free_bootmem_late(__pa(io_tlb_list), | ||
| 311 | io_tlb_nslabs * sizeof(int)); | ||
| 312 | free_bootmem_late(__pa(io_tlb_start), | ||
| 313 | io_tlb_nslabs << IO_TLB_SHIFT); | ||
| 314 | } | ||
| 315 | } | ||
| 316 | |||
| 284 | static int is_swiotlb_buffer(phys_addr_t paddr) | 317 | static int is_swiotlb_buffer(phys_addr_t paddr) |
| 285 | { | 318 | { |
| 286 | return paddr >= virt_to_phys(io_tlb_start) && | 319 | return paddr >= virt_to_phys(io_tlb_start) && |
| @@ -453,7 +486,7 @@ do_unmap_single(struct device *hwdev, char *dma_addr, size_t size, int dir) | |||
| 453 | 486 | ||
| 454 | /* | 487 | /* |
| 455 | * Return the buffer to the free list by setting the corresponding | 488 | * Return the buffer to the free list by setting the corresponding |
| 456 | * entries to indicate the number of contigous entries available. | 489 | * entries to indicate the number of contiguous entries available. |
| 457 | * While returning the entries to the free list, we merge the entries | 490 | * While returning the entries to the free list, we merge the entries |
| 458 | * with slots below and above the pool being returned. | 491 | * with slots below and above the pool being returned. |
| 459 | */ | 492 | */ |
| @@ -517,7 +550,7 @@ swiotlb_alloc_coherent(struct device *hwdev, size_t size, | |||
| 517 | dma_mask = hwdev->coherent_dma_mask; | 550 | dma_mask = hwdev->coherent_dma_mask; |
| 518 | 551 | ||
| 519 | ret = (void *)__get_free_pages(flags, order); | 552 | ret = (void *)__get_free_pages(flags, order); |
| 520 | if (ret && swiotlb_virt_to_bus(hwdev, ret) + size > dma_mask) { | 553 | if (ret && swiotlb_virt_to_bus(hwdev, ret) + size - 1 > dma_mask) { |
| 521 | /* | 554 | /* |
| 522 | * The allocated memory isn't reachable by the device. | 555 | * The allocated memory isn't reachable by the device. |
| 523 | */ | 556 | */ |
| @@ -539,7 +572,7 @@ swiotlb_alloc_coherent(struct device *hwdev, size_t size, | |||
| 539 | dev_addr = swiotlb_virt_to_bus(hwdev, ret); | 572 | dev_addr = swiotlb_virt_to_bus(hwdev, ret); |
| 540 | 573 | ||
| 541 | /* Confirm address can be DMA'd by device */ | 574 | /* Confirm address can be DMA'd by device */ |
| 542 | if (dev_addr + size > dma_mask) { | 575 | if (dev_addr + size - 1 > dma_mask) { |
| 543 | printk("hwdev DMA mask = 0x%016Lx, dev_addr = 0x%016Lx\n", | 576 | printk("hwdev DMA mask = 0x%016Lx, dev_addr = 0x%016Lx\n", |
| 544 | (unsigned long long)dma_mask, | 577 | (unsigned long long)dma_mask, |
| 545 | (unsigned long long)dev_addr); | 578 | (unsigned long long)dev_addr); |
diff --git a/lib/textsearch.c b/lib/textsearch.c index 9fbcb44c554f..d608331b3e47 100644 --- a/lib/textsearch.c +++ b/lib/textsearch.c | |||
| @@ -103,6 +103,7 @@ | |||
| 103 | #include <linux/rcupdate.h> | 103 | #include <linux/rcupdate.h> |
| 104 | #include <linux/err.h> | 104 | #include <linux/err.h> |
| 105 | #include <linux/textsearch.h> | 105 | #include <linux/textsearch.h> |
| 106 | #include <linux/slab.h> | ||
| 106 | 107 | ||
| 107 | static LIST_HEAD(ts_ops); | 108 | static LIST_HEAD(ts_ops); |
| 108 | static DEFINE_SPINLOCK(ts_mod_lock); | 109 | static DEFINE_SPINLOCK(ts_mod_lock); |
diff --git a/lib/vsprintf.c b/lib/vsprintf.c index 33bed5e67a21..46d34b0b74a8 100644 --- a/lib/vsprintf.c +++ b/lib/vsprintf.c | |||
| @@ -9,7 +9,7 @@ | |||
| 9 | * Wirzenius wrote this portably, Torvalds fucked it up :-) | 9 | * Wirzenius wrote this portably, Torvalds fucked it up :-) |
| 10 | */ | 10 | */ |
| 11 | 11 | ||
| 12 | /* | 12 | /* |
| 13 | * Fri Jul 13 2001 Crutcher Dunnavant <crutcher+kernel@datastacks.com> | 13 | * Fri Jul 13 2001 Crutcher Dunnavant <crutcher+kernel@datastacks.com> |
| 14 | * - changed to provide snprintf and vsnprintf functions | 14 | * - changed to provide snprintf and vsnprintf functions |
| 15 | * So Feb 1 16:51:32 CET 2004 Juergen Quade <quade@hsnr.de> | 15 | * So Feb 1 16:51:32 CET 2004 Juergen Quade <quade@hsnr.de> |
| @@ -47,14 +47,14 @@ static unsigned int simple_guess_base(const char *cp) | |||
| 47 | } | 47 | } |
| 48 | 48 | ||
| 49 | /** | 49 | /** |
| 50 | * simple_strtoul - convert a string to an unsigned long | 50 | * simple_strtoull - convert a string to an unsigned long long |
| 51 | * @cp: The start of the string | 51 | * @cp: The start of the string |
| 52 | * @endp: A pointer to the end of the parsed string will be placed here | 52 | * @endp: A pointer to the end of the parsed string will be placed here |
| 53 | * @base: The number base to use | 53 | * @base: The number base to use |
| 54 | */ | 54 | */ |
| 55 | unsigned long simple_strtoul(const char *cp, char **endp, unsigned int base) | 55 | unsigned long long simple_strtoull(const char *cp, char **endp, unsigned int base) |
| 56 | { | 56 | { |
| 57 | unsigned long result = 0; | 57 | unsigned long long result = 0; |
| 58 | 58 | ||
| 59 | if (!base) | 59 | if (!base) |
| 60 | base = simple_guess_base(cp); | 60 | base = simple_guess_base(cp); |
| @@ -71,58 +71,39 @@ unsigned long simple_strtoul(const char *cp, char **endp, unsigned int base) | |||
| 71 | result = result * base + value; | 71 | result = result * base + value; |
| 72 | cp++; | 72 | cp++; |
| 73 | } | 73 | } |
| 74 | |||
| 75 | if (endp) | 74 | if (endp) |
| 76 | *endp = (char *)cp; | 75 | *endp = (char *)cp; |
| 76 | |||
| 77 | return result; | 77 | return result; |
| 78 | } | 78 | } |
| 79 | EXPORT_SYMBOL(simple_strtoul); | 79 | EXPORT_SYMBOL(simple_strtoull); |
| 80 | 80 | ||
| 81 | /** | 81 | /** |
| 82 | * simple_strtol - convert a string to a signed long | 82 | * simple_strtoul - convert a string to an unsigned long |
| 83 | * @cp: The start of the string | 83 | * @cp: The start of the string |
| 84 | * @endp: A pointer to the end of the parsed string will be placed here | 84 | * @endp: A pointer to the end of the parsed string will be placed here |
| 85 | * @base: The number base to use | 85 | * @base: The number base to use |
| 86 | */ | 86 | */ |
| 87 | long simple_strtol(const char *cp, char **endp, unsigned int base) | 87 | unsigned long simple_strtoul(const char *cp, char **endp, unsigned int base) |
| 88 | { | 88 | { |
| 89 | if(*cp == '-') | 89 | return simple_strtoull(cp, endp, base); |
| 90 | return -simple_strtoul(cp + 1, endp, base); | ||
| 91 | return simple_strtoul(cp, endp, base); | ||
| 92 | } | 90 | } |
| 93 | EXPORT_SYMBOL(simple_strtol); | 91 | EXPORT_SYMBOL(simple_strtoul); |
| 94 | 92 | ||
| 95 | /** | 93 | /** |
| 96 | * simple_strtoull - convert a string to an unsigned long long | 94 | * simple_strtol - convert a string to a signed long |
| 97 | * @cp: The start of the string | 95 | * @cp: The start of the string |
| 98 | * @endp: A pointer to the end of the parsed string will be placed here | 96 | * @endp: A pointer to the end of the parsed string will be placed here |
| 99 | * @base: The number base to use | 97 | * @base: The number base to use |
| 100 | */ | 98 | */ |
| 101 | unsigned long long simple_strtoull(const char *cp, char **endp, unsigned int base) | 99 | long simple_strtol(const char *cp, char **endp, unsigned int base) |
| 102 | { | 100 | { |
| 103 | unsigned long long result = 0; | 101 | if (*cp == '-') |
| 104 | 102 | return -simple_strtoul(cp + 1, endp, base); | |
| 105 | if (!base) | ||
| 106 | base = simple_guess_base(cp); | ||
| 107 | |||
| 108 | if (base == 16 && cp[0] == '0' && TOLOWER(cp[1]) == 'x') | ||
| 109 | cp += 2; | ||
| 110 | |||
| 111 | while (isxdigit(*cp)) { | ||
| 112 | unsigned int value; | ||
| 113 | |||
| 114 | value = isdigit(*cp) ? *cp - '0' : TOLOWER(*cp) - 'a' + 10; | ||
| 115 | if (value >= base) | ||
| 116 | break; | ||
| 117 | result = result * base + value; | ||
| 118 | cp++; | ||
| 119 | } | ||
| 120 | 103 | ||
| 121 | if (endp) | 104 | return simple_strtoul(cp, endp, base); |
| 122 | *endp = (char *)cp; | ||
| 123 | return result; | ||
| 124 | } | 105 | } |
| 125 | EXPORT_SYMBOL(simple_strtoull); | 106 | EXPORT_SYMBOL(simple_strtol); |
| 126 | 107 | ||
| 127 | /** | 108 | /** |
| 128 | * simple_strtoll - convert a string to a signed long long | 109 | * simple_strtoll - convert a string to a signed long long |
| @@ -132,10 +113,12 @@ EXPORT_SYMBOL(simple_strtoull); | |||
| 132 | */ | 113 | */ |
| 133 | long long simple_strtoll(const char *cp, char **endp, unsigned int base) | 114 | long long simple_strtoll(const char *cp, char **endp, unsigned int base) |
| 134 | { | 115 | { |
| 135 | if(*cp=='-') | 116 | if (*cp == '-') |
| 136 | return -simple_strtoull(cp + 1, endp, base); | 117 | return -simple_strtoull(cp + 1, endp, base); |
| 118 | |||
| 137 | return simple_strtoull(cp, endp, base); | 119 | return simple_strtoull(cp, endp, base); |
| 138 | } | 120 | } |
| 121 | EXPORT_SYMBOL(simple_strtoll); | ||
| 139 | 122 | ||
| 140 | /** | 123 | /** |
| 141 | * strict_strtoul - convert a string to an unsigned long strictly | 124 | * strict_strtoul - convert a string to an unsigned long strictly |
| @@ -173,6 +156,7 @@ int strict_strtoul(const char *cp, unsigned int base, unsigned long *res) | |||
| 173 | val = simple_strtoul(cp, &tail, base); | 156 | val = simple_strtoul(cp, &tail, base); |
| 174 | if (tail == cp) | 157 | if (tail == cp) |
| 175 | return -EINVAL; | 158 | return -EINVAL; |
| 159 | |||
| 176 | if ((*tail == '\0') || | 160 | if ((*tail == '\0') || |
| 177 | ((len == (size_t)(tail - cp) + 1) && (*tail == '\n'))) { | 161 | ((len == (size_t)(tail - cp) + 1) && (*tail == '\n'))) { |
| 178 | *res = val; | 162 | *res = val; |
| @@ -285,10 +269,11 @@ EXPORT_SYMBOL(strict_strtoll); | |||
| 285 | 269 | ||
| 286 | static int skip_atoi(const char **s) | 270 | static int skip_atoi(const char **s) |
| 287 | { | 271 | { |
| 288 | int i=0; | 272 | int i = 0; |
| 289 | 273 | ||
| 290 | while (isdigit(**s)) | 274 | while (isdigit(**s)) |
| 291 | i = i*10 + *((*s)++) - '0'; | 275 | i = i*10 + *((*s)++) - '0'; |
| 276 | |||
| 292 | return i; | 277 | return i; |
| 293 | } | 278 | } |
| 294 | 279 | ||
| @@ -302,7 +287,7 @@ static int skip_atoi(const char **s) | |||
| 302 | /* Formats correctly any integer in [0,99999]. | 287 | /* Formats correctly any integer in [0,99999]. |
| 303 | * Outputs from one to five digits depending on input. | 288 | * Outputs from one to five digits depending on input. |
| 304 | * On i386 gcc 4.1.2 -O2: ~250 bytes of code. */ | 289 | * On i386 gcc 4.1.2 -O2: ~250 bytes of code. */ |
| 305 | static char* put_dec_trunc(char *buf, unsigned q) | 290 | static char *put_dec_trunc(char *buf, unsigned q) |
| 306 | { | 291 | { |
| 307 | unsigned d3, d2, d1, d0; | 292 | unsigned d3, d2, d1, d0; |
| 308 | d1 = (q>>4) & 0xf; | 293 | d1 = (q>>4) & 0xf; |
| @@ -331,14 +316,15 @@ static char* put_dec_trunc(char *buf, unsigned q) | |||
| 331 | d3 = d3 - 10*q; | 316 | d3 = d3 - 10*q; |
| 332 | *buf++ = d3 + '0'; /* next digit */ | 317 | *buf++ = d3 + '0'; /* next digit */ |
| 333 | if (q != 0) | 318 | if (q != 0) |
| 334 | *buf++ = q + '0'; /* most sign. digit */ | 319 | *buf++ = q + '0'; /* most sign. digit */ |
| 335 | } | 320 | } |
| 336 | } | 321 | } |
| 337 | } | 322 | } |
| 323 | |||
| 338 | return buf; | 324 | return buf; |
| 339 | } | 325 | } |
| 340 | /* Same with if's removed. Always emits five digits */ | 326 | /* Same with if's removed. Always emits five digits */ |
| 341 | static char* put_dec_full(char *buf, unsigned q) | 327 | static char *put_dec_full(char *buf, unsigned q) |
| 342 | { | 328 | { |
| 343 | /* BTW, if q is in [0,9999], 8-bit ints will be enough, */ | 329 | /* BTW, if q is in [0,9999], 8-bit ints will be enough, */ |
| 344 | /* but anyway, gcc produces better code with full-sized ints */ | 330 | /* but anyway, gcc produces better code with full-sized ints */ |
| @@ -347,14 +333,15 @@ static char* put_dec_full(char *buf, unsigned q) | |||
| 347 | d2 = (q>>8) & 0xf; | 333 | d2 = (q>>8) & 0xf; |
| 348 | d3 = (q>>12); | 334 | d3 = (q>>12); |
| 349 | 335 | ||
| 350 | /* Possible ways to approx. divide by 10 */ | 336 | /* |
| 351 | /* gcc -O2 replaces multiply with shifts and adds */ | 337 | * Possible ways to approx. divide by 10 |
| 352 | // (x * 0xcd) >> 11: 11001101 - shorter code than * 0x67 (on i386) | 338 | * gcc -O2 replaces multiply with shifts and adds |
| 353 | // (x * 0x67) >> 10: 1100111 | 339 | * (x * 0xcd) >> 11: 11001101 - shorter code than * 0x67 (on i386) |
| 354 | // (x * 0x34) >> 9: 110100 - same | 340 | * (x * 0x67) >> 10: 1100111 |
| 355 | // (x * 0x1a) >> 8: 11010 - same | 341 | * (x * 0x34) >> 9: 110100 - same |
| 356 | // (x * 0x0d) >> 7: 1101 - same, shortest code (on i386) | 342 | * (x * 0x1a) >> 8: 11010 - same |
| 357 | 343 | * (x * 0x0d) >> 7: 1101 - same, shortest code (on i386) | |
| 344 | */ | ||
| 358 | d0 = 6*(d3 + d2 + d1) + (q & 0xf); | 345 | d0 = 6*(d3 + d2 + d1) + (q & 0xf); |
| 359 | q = (d0 * 0xcd) >> 11; | 346 | q = (d0 * 0xcd) >> 11; |
| 360 | d0 = d0 - 10*q; | 347 | d0 = d0 - 10*q; |
| @@ -375,10 +362,11 @@ static char* put_dec_full(char *buf, unsigned q) | |||
| 375 | d3 = d3 - 10*q; | 362 | d3 = d3 - 10*q; |
| 376 | *buf++ = d3 + '0'; | 363 | *buf++ = d3 + '0'; |
| 377 | *buf++ = q + '0'; | 364 | *buf++ = q + '0'; |
| 365 | |||
| 378 | return buf; | 366 | return buf; |
| 379 | } | 367 | } |
| 380 | /* No inlining helps gcc to use registers better */ | 368 | /* No inlining helps gcc to use registers better */ |
| 381 | static noinline char* put_dec(char *buf, unsigned long long num) | 369 | static noinline char *put_dec(char *buf, unsigned long long num) |
| 382 | { | 370 | { |
| 383 | while (1) { | 371 | while (1) { |
| 384 | unsigned rem; | 372 | unsigned rem; |
| @@ -394,8 +382,8 @@ static noinline char* put_dec(char *buf, unsigned long long num) | |||
| 394 | #define PLUS 4 /* show plus */ | 382 | #define PLUS 4 /* show plus */ |
| 395 | #define SPACE 8 /* space if plus */ | 383 | #define SPACE 8 /* space if plus */ |
| 396 | #define LEFT 16 /* left justified */ | 384 | #define LEFT 16 /* left justified */ |
| 397 | #define SMALL 32 /* Must be 32 == 0x20 */ | 385 | #define SMALL 32 /* use lowercase in hex (must be 32 == 0x20) */ |
| 398 | #define SPECIAL 64 /* 0x */ | 386 | #define SPECIAL 64 /* prefix hex with "0x", octal with "0" */ |
| 399 | 387 | ||
| 400 | enum format_type { | 388 | enum format_type { |
| 401 | FORMAT_TYPE_NONE, /* Just a string part */ | 389 | FORMAT_TYPE_NONE, /* Just a string part */ |
| @@ -421,12 +409,12 @@ enum format_type { | |||
| 421 | }; | 409 | }; |
| 422 | 410 | ||
| 423 | struct printf_spec { | 411 | struct printf_spec { |
| 424 | enum format_type type; | 412 | u8 type; /* format_type enum */ |
| 425 | int flags; /* flags to number() */ | 413 | u8 flags; /* flags to number() */ |
| 426 | int field_width; /* width of output field */ | 414 | u8 base; /* number base, 8, 10 or 16 only */ |
| 427 | int base; | 415 | u8 qualifier; /* number qualifier, one of 'hHlLtzZ' */ |
| 428 | int precision; /* # of digits/chars */ | 416 | s16 field_width; /* width of output field */ |
| 429 | int qualifier; | 417 | s16 precision; /* # of digits/chars */ |
| 430 | }; | 418 | }; |
| 431 | 419 | ||
| 432 | static char *number(char *buf, char *end, unsigned long long num, | 420 | static char *number(char *buf, char *end, unsigned long long num, |
| @@ -448,9 +436,9 @@ static char *number(char *buf, char *end, unsigned long long num, | |||
| 448 | spec.flags &= ~ZEROPAD; | 436 | spec.flags &= ~ZEROPAD; |
| 449 | sign = 0; | 437 | sign = 0; |
| 450 | if (spec.flags & SIGN) { | 438 | if (spec.flags & SIGN) { |
| 451 | if ((signed long long) num < 0) { | 439 | if ((signed long long)num < 0) { |
| 452 | sign = '-'; | 440 | sign = '-'; |
| 453 | num = - (signed long long) num; | 441 | num = -(signed long long)num; |
| 454 | spec.field_width--; | 442 | spec.field_width--; |
| 455 | } else if (spec.flags & PLUS) { | 443 | } else if (spec.flags & PLUS) { |
| 456 | sign = '+'; | 444 | sign = '+'; |
| @@ -478,7 +466,9 @@ static char *number(char *buf, char *end, unsigned long long num, | |||
| 478 | else if (spec.base != 10) { /* 8 or 16 */ | 466 | else if (spec.base != 10) { /* 8 or 16 */ |
| 479 | int mask = spec.base - 1; | 467 | int mask = spec.base - 1; |
| 480 | int shift = 3; | 468 | int shift = 3; |
| 481 | if (spec.base == 16) shift = 4; | 469 | |
| 470 | if (spec.base == 16) | ||
| 471 | shift = 4; | ||
| 482 | do { | 472 | do { |
| 483 | tmp[i++] = (digits[((unsigned char)num) & mask] | locase); | 473 | tmp[i++] = (digits[((unsigned char)num) & mask] | locase); |
| 484 | num >>= shift; | 474 | num >>= shift; |
| @@ -493,7 +483,7 @@ static char *number(char *buf, char *end, unsigned long long num, | |||
| 493 | /* leading space padding */ | 483 | /* leading space padding */ |
| 494 | spec.field_width -= spec.precision; | 484 | spec.field_width -= spec.precision; |
| 495 | if (!(spec.flags & (ZEROPAD+LEFT))) { | 485 | if (!(spec.flags & (ZEROPAD+LEFT))) { |
| 496 | while(--spec.field_width >= 0) { | 486 | while (--spec.field_width >= 0) { |
| 497 | if (buf < end) | 487 | if (buf < end) |
| 498 | *buf = ' '; | 488 | *buf = ' '; |
| 499 | ++buf; | 489 | ++buf; |
| @@ -543,15 +533,16 @@ static char *number(char *buf, char *end, unsigned long long num, | |||
| 543 | *buf = ' '; | 533 | *buf = ' '; |
| 544 | ++buf; | 534 | ++buf; |
| 545 | } | 535 | } |
| 536 | |||
| 546 | return buf; | 537 | return buf; |
| 547 | } | 538 | } |
| 548 | 539 | ||
| 549 | static char *string(char *buf, char *end, char *s, struct printf_spec spec) | 540 | static char *string(char *buf, char *end, const char *s, struct printf_spec spec) |
| 550 | { | 541 | { |
| 551 | int len, i; | 542 | int len, i; |
| 552 | 543 | ||
| 553 | if ((unsigned long)s < PAGE_SIZE) | 544 | if ((unsigned long)s < PAGE_SIZE) |
| 554 | s = "<NULL>"; | 545 | s = "(null)"; |
| 555 | 546 | ||
| 556 | len = strnlen(s, spec.precision); | 547 | len = strnlen(s, spec.precision); |
| 557 | 548 | ||
| @@ -572,6 +563,7 @@ static char *string(char *buf, char *end, char *s, struct printf_spec spec) | |||
| 572 | *buf = ' '; | 563 | *buf = ' '; |
| 573 | ++buf; | 564 | ++buf; |
| 574 | } | 565 | } |
| 566 | |||
| 575 | return buf; | 567 | return buf; |
| 576 | } | 568 | } |
| 577 | 569 | ||
| @@ -585,47 +577,115 @@ static char *symbol_string(char *buf, char *end, void *ptr, | |||
| 585 | sprint_symbol(sym, value); | 577 | sprint_symbol(sym, value); |
| 586 | else | 578 | else |
| 587 | kallsyms_lookup(value, NULL, NULL, NULL, sym); | 579 | kallsyms_lookup(value, NULL, NULL, NULL, sym); |
| 580 | |||
| 588 | return string(buf, end, sym, spec); | 581 | return string(buf, end, sym, spec); |
| 589 | #else | 582 | #else |
| 590 | spec.field_width = 2*sizeof(void *); | 583 | spec.field_width = 2 * sizeof(void *); |
| 591 | spec.flags |= SPECIAL | SMALL | ZEROPAD; | 584 | spec.flags |= SPECIAL | SMALL | ZEROPAD; |
| 592 | spec.base = 16; | 585 | spec.base = 16; |
| 586 | |||
| 593 | return number(buf, end, value, spec); | 587 | return number(buf, end, value, spec); |
| 594 | #endif | 588 | #endif |
| 595 | } | 589 | } |
| 596 | 590 | ||
| 597 | static char *resource_string(char *buf, char *end, struct resource *res, | 591 | static char *resource_string(char *buf, char *end, struct resource *res, |
| 598 | struct printf_spec spec) | 592 | struct printf_spec spec, const char *fmt) |
| 599 | { | 593 | { |
| 600 | #ifndef IO_RSRC_PRINTK_SIZE | 594 | #ifndef IO_RSRC_PRINTK_SIZE |
| 601 | #define IO_RSRC_PRINTK_SIZE 4 | 595 | #define IO_RSRC_PRINTK_SIZE 6 |
| 602 | #endif | 596 | #endif |
| 603 | 597 | ||
| 604 | #ifndef MEM_RSRC_PRINTK_SIZE | 598 | #ifndef MEM_RSRC_PRINTK_SIZE |
| 605 | #define MEM_RSRC_PRINTK_SIZE 8 | 599 | #define MEM_RSRC_PRINTK_SIZE 10 |
| 606 | #endif | 600 | #endif |
| 607 | struct printf_spec num_spec = { | 601 | static const struct printf_spec io_spec = { |
| 608 | .base = 16, | 602 | .base = 16, |
| 603 | .field_width = IO_RSRC_PRINTK_SIZE, | ||
| 609 | .precision = -1, | 604 | .precision = -1, |
| 610 | .flags = SPECIAL | SMALL | ZEROPAD, | 605 | .flags = SPECIAL | SMALL | ZEROPAD, |
| 611 | }; | 606 | }; |
| 612 | /* room for the actual numbers, the two "0x", -, [, ] and the final zero */ | 607 | static const struct printf_spec mem_spec = { |
| 613 | char sym[4*sizeof(resource_size_t) + 8]; | 608 | .base = 16, |
| 614 | char *p = sym, *pend = sym + sizeof(sym); | 609 | .field_width = MEM_RSRC_PRINTK_SIZE, |
| 615 | int size = -1; | 610 | .precision = -1, |
| 611 | .flags = SPECIAL | SMALL | ZEROPAD, | ||
| 612 | }; | ||
| 613 | static const struct printf_spec bus_spec = { | ||
| 614 | .base = 16, | ||
| 615 | .field_width = 2, | ||
| 616 | .precision = -1, | ||
| 617 | .flags = SMALL | ZEROPAD, | ||
| 618 | }; | ||
| 619 | static const struct printf_spec dec_spec = { | ||
| 620 | .base = 10, | ||
| 621 | .precision = -1, | ||
| 622 | .flags = 0, | ||
| 623 | }; | ||
| 624 | static const struct printf_spec str_spec = { | ||
| 625 | .field_width = -1, | ||
| 626 | .precision = 10, | ||
| 627 | .flags = LEFT, | ||
| 628 | }; | ||
| 629 | static const struct printf_spec flag_spec = { | ||
| 630 | .base = 16, | ||
| 631 | .precision = -1, | ||
| 632 | .flags = SPECIAL | SMALL, | ||
| 633 | }; | ||
| 616 | 634 | ||
| 617 | if (res->flags & IORESOURCE_IO) | 635 | /* 32-bit res (sizeof==4): 10 chars in dec, 10 in hex ("0x" + 8) |
| 618 | size = IO_RSRC_PRINTK_SIZE; | 636 | * 64-bit res (sizeof==8): 20 chars in dec, 18 in hex ("0x" + 16) */ |
| 619 | else if (res->flags & IORESOURCE_MEM) | 637 | #define RSRC_BUF_SIZE ((2 * sizeof(resource_size_t)) + 4) |
| 620 | size = MEM_RSRC_PRINTK_SIZE; | 638 | #define FLAG_BUF_SIZE (2 * sizeof(res->flags)) |
| 639 | #define DECODED_BUF_SIZE sizeof("[mem - 64bit pref window disabled]") | ||
| 640 | #define RAW_BUF_SIZE sizeof("[mem - flags 0x]") | ||
| 641 | char sym[max(2*RSRC_BUF_SIZE + DECODED_BUF_SIZE, | ||
| 642 | 2*RSRC_BUF_SIZE + FLAG_BUF_SIZE + RAW_BUF_SIZE)]; | ||
| 643 | |||
| 644 | char *p = sym, *pend = sym + sizeof(sym); | ||
| 645 | int decode = (fmt[0] == 'R') ? 1 : 0; | ||
| 646 | const struct printf_spec *specp; | ||
| 621 | 647 | ||
| 622 | *p++ = '['; | 648 | *p++ = '['; |
| 623 | num_spec.field_width = size; | 649 | if (res->flags & IORESOURCE_IO) { |
| 624 | p = number(p, pend, res->start, num_spec); | 650 | p = string(p, pend, "io ", str_spec); |
| 625 | *p++ = '-'; | 651 | specp = &io_spec; |
| 626 | p = number(p, pend, res->end, num_spec); | 652 | } else if (res->flags & IORESOURCE_MEM) { |
| 653 | p = string(p, pend, "mem ", str_spec); | ||
| 654 | specp = &mem_spec; | ||
| 655 | } else if (res->flags & IORESOURCE_IRQ) { | ||
| 656 | p = string(p, pend, "irq ", str_spec); | ||
| 657 | specp = &dec_spec; | ||
| 658 | } else if (res->flags & IORESOURCE_DMA) { | ||
| 659 | p = string(p, pend, "dma ", str_spec); | ||
| 660 | specp = &dec_spec; | ||
| 661 | } else if (res->flags & IORESOURCE_BUS) { | ||
| 662 | p = string(p, pend, "bus ", str_spec); | ||
| 663 | specp = &bus_spec; | ||
| 664 | } else { | ||
| 665 | p = string(p, pend, "??? ", str_spec); | ||
| 666 | specp = &mem_spec; | ||
| 667 | decode = 0; | ||
| 668 | } | ||
| 669 | p = number(p, pend, res->start, *specp); | ||
| 670 | if (res->start != res->end) { | ||
| 671 | *p++ = '-'; | ||
| 672 | p = number(p, pend, res->end, *specp); | ||
| 673 | } | ||
| 674 | if (decode) { | ||
| 675 | if (res->flags & IORESOURCE_MEM_64) | ||
| 676 | p = string(p, pend, " 64bit", str_spec); | ||
| 677 | if (res->flags & IORESOURCE_PREFETCH) | ||
| 678 | p = string(p, pend, " pref", str_spec); | ||
| 679 | if (res->flags & IORESOURCE_WINDOW) | ||
| 680 | p = string(p, pend, " window", str_spec); | ||
| 681 | if (res->flags & IORESOURCE_DISABLED) | ||
| 682 | p = string(p, pend, " disabled", str_spec); | ||
| 683 | } else { | ||
| 684 | p = string(p, pend, " flags ", str_spec); | ||
| 685 | p = number(p, pend, res->flags, flag_spec); | ||
| 686 | } | ||
| 627 | *p++ = ']'; | 687 | *p++ = ']'; |
| 628 | *p = 0; | 688 | *p = '\0'; |
| 629 | 689 | ||
| 630 | return string(buf, end, sym, spec); | 690 | return string(buf, end, sym, spec); |
| 631 | } | 691 | } |
| @@ -636,24 +696,55 @@ static char *mac_address_string(char *buf, char *end, u8 *addr, | |||
| 636 | char mac_addr[sizeof("xx:xx:xx:xx:xx:xx")]; | 696 | char mac_addr[sizeof("xx:xx:xx:xx:xx:xx")]; |
| 637 | char *p = mac_addr; | 697 | char *p = mac_addr; |
| 638 | int i; | 698 | int i; |
| 699 | char separator; | ||
| 700 | |||
| 701 | if (fmt[1] == 'F') { /* FDDI canonical format */ | ||
| 702 | separator = '-'; | ||
| 703 | } else { | ||
| 704 | separator = ':'; | ||
| 705 | } | ||
| 639 | 706 | ||
| 640 | for (i = 0; i < 6; i++) { | 707 | for (i = 0; i < 6; i++) { |
| 641 | p = pack_hex_byte(p, addr[i]); | 708 | p = pack_hex_byte(p, addr[i]); |
| 642 | if (fmt[0] == 'M' && i != 5) | 709 | if (fmt[0] == 'M' && i != 5) |
| 643 | *p++ = ':'; | 710 | *p++ = separator; |
| 644 | } | 711 | } |
| 645 | *p = '\0'; | 712 | *p = '\0'; |
| 646 | 713 | ||
| 647 | return string(buf, end, mac_addr, spec); | 714 | return string(buf, end, mac_addr, spec); |
| 648 | } | 715 | } |
| 649 | 716 | ||
| 650 | static char *ip4_string(char *p, const u8 *addr, bool leading_zeros) | 717 | static char *ip4_string(char *p, const u8 *addr, const char *fmt) |
| 651 | { | 718 | { |
| 652 | int i; | 719 | int i; |
| 653 | 720 | bool leading_zeros = (fmt[0] == 'i'); | |
| 721 | int index; | ||
| 722 | int step; | ||
| 723 | |||
| 724 | switch (fmt[2]) { | ||
| 725 | case 'h': | ||
| 726 | #ifdef __BIG_ENDIAN | ||
| 727 | index = 0; | ||
| 728 | step = 1; | ||
| 729 | #else | ||
| 730 | index = 3; | ||
| 731 | step = -1; | ||
| 732 | #endif | ||
| 733 | break; | ||
| 734 | case 'l': | ||
| 735 | index = 3; | ||
| 736 | step = -1; | ||
| 737 | break; | ||
| 738 | case 'n': | ||
| 739 | case 'b': | ||
| 740 | default: | ||
| 741 | index = 0; | ||
| 742 | step = 1; | ||
| 743 | break; | ||
| 744 | } | ||
| 654 | for (i = 0; i < 4; i++) { | 745 | for (i = 0; i < 4; i++) { |
| 655 | char temp[3]; /* hold each IP quad in reverse order */ | 746 | char temp[3]; /* hold each IP quad in reverse order */ |
| 656 | int digits = put_dec_trunc(temp, addr[i]) - temp; | 747 | int digits = put_dec_trunc(temp, addr[index]) - temp; |
| 657 | if (leading_zeros) { | 748 | if (leading_zeros) { |
| 658 | if (digits < 3) | 749 | if (digits < 3) |
| 659 | *p++ = '0'; | 750 | *p++ = '0'; |
| @@ -665,23 +756,21 @@ static char *ip4_string(char *p, const u8 *addr, bool leading_zeros) | |||
| 665 | *p++ = temp[digits]; | 756 | *p++ = temp[digits]; |
| 666 | if (i < 3) | 757 | if (i < 3) |
| 667 | *p++ = '.'; | 758 | *p++ = '.'; |
| 759 | index += step; | ||
| 668 | } | 760 | } |
| 669 | |||
| 670 | *p = '\0'; | 761 | *p = '\0'; |
| 762 | |||
| 671 | return p; | 763 | return p; |
| 672 | } | 764 | } |
| 673 | 765 | ||
| 674 | static char *ip6_compressed_string(char *p, const char *addr) | 766 | static char *ip6_compressed_string(char *p, const char *addr) |
| 675 | { | 767 | { |
| 676 | int i; | 768 | int i, j, range; |
| 677 | int j; | ||
| 678 | int range; | ||
| 679 | unsigned char zerolength[8]; | 769 | unsigned char zerolength[8]; |
| 680 | int longest = 1; | 770 | int longest = 1; |
| 681 | int colonpos = -1; | 771 | int colonpos = -1; |
| 682 | u16 word; | 772 | u16 word; |
| 683 | u8 hi; | 773 | u8 hi, lo; |
| 684 | u8 lo; | ||
| 685 | bool needcolon = false; | 774 | bool needcolon = false; |
| 686 | bool useIPv4; | 775 | bool useIPv4; |
| 687 | struct in6_addr in6; | 776 | struct in6_addr in6; |
| @@ -735,8 +824,9 @@ static char *ip6_compressed_string(char *p, const char *addr) | |||
| 735 | p = pack_hex_byte(p, hi); | 824 | p = pack_hex_byte(p, hi); |
| 736 | else | 825 | else |
| 737 | *p++ = hex_asc_lo(hi); | 826 | *p++ = hex_asc_lo(hi); |
| 827 | p = pack_hex_byte(p, lo); | ||
| 738 | } | 828 | } |
| 739 | if (hi || lo > 0x0f) | 829 | else if (lo > 0x0f) |
| 740 | p = pack_hex_byte(p, lo); | 830 | p = pack_hex_byte(p, lo); |
| 741 | else | 831 | else |
| 742 | *p++ = hex_asc_lo(lo); | 832 | *p++ = hex_asc_lo(lo); |
| @@ -746,24 +836,25 @@ static char *ip6_compressed_string(char *p, const char *addr) | |||
| 746 | if (useIPv4) { | 836 | if (useIPv4) { |
| 747 | if (needcolon) | 837 | if (needcolon) |
| 748 | *p++ = ':'; | 838 | *p++ = ':'; |
| 749 | p = ip4_string(p, &in6.s6_addr[12], false); | 839 | p = ip4_string(p, &in6.s6_addr[12], "I4"); |
| 750 | } | 840 | } |
| 751 | |||
| 752 | *p = '\0'; | 841 | *p = '\0'; |
| 842 | |||
| 753 | return p; | 843 | return p; |
| 754 | } | 844 | } |
| 755 | 845 | ||
| 756 | static char *ip6_string(char *p, const char *addr, const char *fmt) | 846 | static char *ip6_string(char *p, const char *addr, const char *fmt) |
| 757 | { | 847 | { |
| 758 | int i; | 848 | int i; |
| 849 | |||
| 759 | for (i = 0; i < 8; i++) { | 850 | for (i = 0; i < 8; i++) { |
| 760 | p = pack_hex_byte(p, *addr++); | 851 | p = pack_hex_byte(p, *addr++); |
| 761 | p = pack_hex_byte(p, *addr++); | 852 | p = pack_hex_byte(p, *addr++); |
| 762 | if (fmt[0] == 'I' && i != 7) | 853 | if (fmt[0] == 'I' && i != 7) |
| 763 | *p++ = ':'; | 854 | *p++ = ':'; |
| 764 | } | 855 | } |
| 765 | |||
| 766 | *p = '\0'; | 856 | *p = '\0'; |
| 857 | |||
| 767 | return p; | 858 | return p; |
| 768 | } | 859 | } |
| 769 | 860 | ||
| @@ -785,11 +876,57 @@ static char *ip4_addr_string(char *buf, char *end, const u8 *addr, | |||
| 785 | { | 876 | { |
| 786 | char ip4_addr[sizeof("255.255.255.255")]; | 877 | char ip4_addr[sizeof("255.255.255.255")]; |
| 787 | 878 | ||
| 788 | ip4_string(ip4_addr, addr, fmt[0] == 'i'); | 879 | ip4_string(ip4_addr, addr, fmt); |
| 789 | 880 | ||
| 790 | return string(buf, end, ip4_addr, spec); | 881 | return string(buf, end, ip4_addr, spec); |
| 791 | } | 882 | } |
| 792 | 883 | ||
| 884 | static char *uuid_string(char *buf, char *end, const u8 *addr, | ||
| 885 | struct printf_spec spec, const char *fmt) | ||
| 886 | { | ||
| 887 | char uuid[sizeof("xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx")]; | ||
| 888 | char *p = uuid; | ||
| 889 | int i; | ||
| 890 | static const u8 be[16] = {0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15}; | ||
| 891 | static const u8 le[16] = {3,2,1,0,5,4,7,6,8,9,10,11,12,13,14,15}; | ||
| 892 | const u8 *index = be; | ||
| 893 | bool uc = false; | ||
| 894 | |||
| 895 | switch (*(++fmt)) { | ||
| 896 | case 'L': | ||
| 897 | uc = true; /* fall-through */ | ||
| 898 | case 'l': | ||
| 899 | index = le; | ||
| 900 | break; | ||
| 901 | case 'B': | ||
| 902 | uc = true; | ||
| 903 | break; | ||
| 904 | } | ||
| 905 | |||
| 906 | for (i = 0; i < 16; i++) { | ||
| 907 | p = pack_hex_byte(p, addr[index[i]]); | ||
| 908 | switch (i) { | ||
| 909 | case 3: | ||
| 910 | case 5: | ||
| 911 | case 7: | ||
| 912 | case 9: | ||
| 913 | *p++ = '-'; | ||
| 914 | break; | ||
| 915 | } | ||
| 916 | } | ||
| 917 | |||
| 918 | *p = 0; | ||
| 919 | |||
| 920 | if (uc) { | ||
| 921 | p = uuid; | ||
| 922 | do { | ||
| 923 | *p = toupper(*p); | ||
| 924 | } while (*(++p)); | ||
| 925 | } | ||
| 926 | |||
| 927 | return string(buf, end, uuid, spec); | ||
| 928 | } | ||
| 929 | |||
| 793 | /* | 930 | /* |
| 794 | * Show a '%p' thing. A kernel extension is that the '%p' is followed | 931 | * Show a '%p' thing. A kernel extension is that the '%p' is followed |
| 795 | * by an extra set of alphanumeric characters that are extended format | 932 | * by an extra set of alphanumeric characters that are extended format |
| @@ -801,19 +938,34 @@ static char *ip4_addr_string(char *buf, char *end, const u8 *addr, | |||
| 801 | * - 'f' For simple symbolic function names without offset | 938 | * - 'f' For simple symbolic function names without offset |
| 802 | * - 'S' For symbolic direct pointers with offset | 939 | * - 'S' For symbolic direct pointers with offset |
| 803 | * - 's' For symbolic direct pointers without offset | 940 | * - 's' For symbolic direct pointers without offset |
| 804 | * - 'R' For a struct resource pointer, it prints the range of | 941 | * - 'R' For decoded struct resource, e.g., [mem 0x0-0x1f 64bit pref] |
| 805 | * addresses (not the name nor the flags) | 942 | * - 'r' For raw struct resource, e.g., [mem 0x0-0x1f flags 0x201] |
| 806 | * - 'M' For a 6-byte MAC address, it prints the address in the | 943 | * - 'M' For a 6-byte MAC address, it prints the address in the |
| 807 | * usual colon-separated hex notation | 944 | * usual colon-separated hex notation |
| 808 | * - 'm' For a 6-byte MAC address, it prints the hex address without colons | 945 | * - 'm' For a 6-byte MAC address, it prints the hex address without colons |
| 946 | * - 'MF' For a 6-byte MAC FDDI address, it prints the address | ||
| 947 | * with a dash-separated hex notation | ||
| 809 | * - 'I' [46] for IPv4/IPv6 addresses printed in the usual way | 948 | * - 'I' [46] for IPv4/IPv6 addresses printed in the usual way |
| 810 | * IPv4 uses dot-separated decimal without leading 0's (1.2.3.4) | 949 | * IPv4 uses dot-separated decimal without leading 0's (1.2.3.4) |
| 811 | * IPv6 uses colon separated network-order 16 bit hex with leading 0's | 950 | * IPv6 uses colon separated network-order 16 bit hex with leading 0's |
| 812 | * - 'i' [46] for 'raw' IPv4/IPv6 addresses | 951 | * - 'i' [46] for 'raw' IPv4/IPv6 addresses |
| 813 | * IPv6 omits the colons (01020304...0f) | 952 | * IPv6 omits the colons (01020304...0f) |
| 814 | * IPv4 uses dot-separated decimal with leading 0's (010.123.045.006) | 953 | * IPv4 uses dot-separated decimal with leading 0's (010.123.045.006) |
| 954 | * - '[Ii]4[hnbl]' IPv4 addresses in host, network, big or little endian order | ||
| 815 | * - 'I6c' for IPv6 addresses printed as specified by | 955 | * - 'I6c' for IPv6 addresses printed as specified by |
| 816 | * http://www.ietf.org/id/draft-kawamura-ipv6-text-representation-03.txt | 956 | * http://tools.ietf.org/html/draft-ietf-6man-text-addr-representation-00 |
| 957 | * - 'U' For a 16 byte UUID/GUID, it prints the UUID/GUID in the form | ||
| 958 | * "xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" | ||
| 959 | * Options for %pU are: | ||
| 960 | * b big endian lower case hex (default) | ||
| 961 | * B big endian UPPER case hex | ||
| 962 | * l little endian lower case hex | ||
| 963 | * L little endian UPPER case hex | ||
| 964 | * big endian output byte order is: | ||
| 965 | * [0][1][2][3]-[4][5]-[6][7]-[8][9]-[10][11][12][13][14][15] | ||
| 966 | * little endian output byte order is: | ||
| 967 | * [3][2][1][0]-[5][4]-[7][6]-[8][9]-[10][11][12][13][14][15] | ||
| 968 | * | ||
| 817 | * Note: The difference between 'S' and 'F' is that on ia64 and ppc64 | 969 | * Note: The difference between 'S' and 'F' is that on ia64 and ppc64 |
| 818 | * function pointers are really function descriptors, which contain a | 970 | * function pointers are really function descriptors, which contain a |
| 819 | * pointer to the real address. | 971 | * pointer to the real address. |
| @@ -828,14 +980,16 @@ static char *pointer(const char *fmt, char *buf, char *end, void *ptr, | |||
| 828 | case 'F': | 980 | case 'F': |
| 829 | case 'f': | 981 | case 'f': |
| 830 | ptr = dereference_function_descriptor(ptr); | 982 | ptr = dereference_function_descriptor(ptr); |
| 831 | case 's': | ||
| 832 | /* Fallthrough */ | 983 | /* Fallthrough */ |
| 833 | case 'S': | 984 | case 'S': |
| 985 | case 's': | ||
| 834 | return symbol_string(buf, end, ptr, spec, *fmt); | 986 | return symbol_string(buf, end, ptr, spec, *fmt); |
| 835 | case 'R': | 987 | case 'R': |
| 836 | return resource_string(buf, end, ptr, spec); | 988 | case 'r': |
| 989 | return resource_string(buf, end, ptr, spec, fmt); | ||
| 837 | case 'M': /* Colon separated: 00:01:02:03:04:05 */ | 990 | case 'M': /* Colon separated: 00:01:02:03:04:05 */ |
| 838 | case 'm': /* Contiguous: 000102030405 */ | 991 | case 'm': /* Contiguous: 000102030405 */ |
| 992 | /* [mM]F (FDDI, bit reversed) */ | ||
| 839 | return mac_address_string(buf, end, ptr, spec, fmt); | 993 | return mac_address_string(buf, end, ptr, spec, fmt); |
| 840 | case 'I': /* Formatted IP supported | 994 | case 'I': /* Formatted IP supported |
| 841 | * 4: 1.2.3.4 | 995 | * 4: 1.2.3.4 |
| @@ -853,6 +1007,8 @@ static char *pointer(const char *fmt, char *buf, char *end, void *ptr, | |||
| 853 | return ip4_addr_string(buf, end, ptr, spec, fmt); | 1007 | return ip4_addr_string(buf, end, ptr, spec, fmt); |
| 854 | } | 1008 | } |
| 855 | break; | 1009 | break; |
| 1010 | case 'U': | ||
| 1011 | return uuid_string(buf, end, ptr, spec, fmt); | ||
| 856 | } | 1012 | } |
| 857 | spec.flags |= SMALL; | 1013 | spec.flags |= SMALL; |
| 858 | if (spec.field_width == -1) { | 1014 | if (spec.field_width == -1) { |
| @@ -970,8 +1126,8 @@ precision: | |||
| 970 | qualifier: | 1126 | qualifier: |
| 971 | /* get the conversion qualifier */ | 1127 | /* get the conversion qualifier */ |
| 972 | spec->qualifier = -1; | 1128 | spec->qualifier = -1; |
| 973 | if (*fmt == 'h' || *fmt == 'l' || *fmt == 'L' || | 1129 | if (*fmt == 'h' || TOLOWER(*fmt) == 'l' || |
| 974 | *fmt == 'Z' || *fmt == 'z' || *fmt == 't') { | 1130 | TOLOWER(*fmt) == 'z' || *fmt == 't') { |
| 975 | spec->qualifier = *fmt++; | 1131 | spec->qualifier = *fmt++; |
| 976 | if (unlikely(spec->qualifier == *fmt)) { | 1132 | if (unlikely(spec->qualifier == *fmt)) { |
| 977 | if (spec->qualifier == 'l') { | 1133 | if (spec->qualifier == 'l') { |
| @@ -1038,7 +1194,7 @@ qualifier: | |||
| 1038 | spec->type = FORMAT_TYPE_LONG; | 1194 | spec->type = FORMAT_TYPE_LONG; |
| 1039 | else | 1195 | else |
| 1040 | spec->type = FORMAT_TYPE_ULONG; | 1196 | spec->type = FORMAT_TYPE_ULONG; |
| 1041 | } else if (spec->qualifier == 'Z' || spec->qualifier == 'z') { | 1197 | } else if (TOLOWER(spec->qualifier) == 'z') { |
| 1042 | spec->type = FORMAT_TYPE_SIZE_T; | 1198 | spec->type = FORMAT_TYPE_SIZE_T; |
| 1043 | } else if (spec->qualifier == 't') { | 1199 | } else if (spec->qualifier == 't') { |
| 1044 | spec->type = FORMAT_TYPE_PTRDIFF; | 1200 | spec->type = FORMAT_TYPE_PTRDIFF; |
| @@ -1074,7 +1230,18 @@ qualifier: | |||
| 1074 | * %ps output the name of a text symbol without offset | 1230 | * %ps output the name of a text symbol without offset |
| 1075 | * %pF output the name of a function pointer with its offset | 1231 | * %pF output the name of a function pointer with its offset |
| 1076 | * %pf output the name of a function pointer without its offset | 1232 | * %pf output the name of a function pointer without its offset |
| 1077 | * %pR output the address range in a struct resource | 1233 | * %pR output the address range in a struct resource with decoded flags |
| 1234 | * %pr output the address range in a struct resource with raw flags | ||
| 1235 | * %pM output a 6-byte MAC address with colons | ||
| 1236 | * %pm output a 6-byte MAC address without colons | ||
| 1237 | * %pI4 print an IPv4 address without leading zeros | ||
| 1238 | * %pi4 print an IPv4 address with leading zeros | ||
| 1239 | * %pI6 print an IPv6 address with colons | ||
| 1240 | * %pi6 print an IPv6 address without colons | ||
| 1241 | * %pI6c print an IPv6 address as specified by | ||
| 1242 | * http://tools.ietf.org/html/draft-ietf-6man-text-addr-representation-00 | ||
| 1243 | * %pU[bBlL] print a UUID/GUID in big or little endian using lower or upper | ||
| 1244 | * case. | ||
| 1078 | * %n is ignored | 1245 | * %n is ignored |
| 1079 | * | 1246 | * |
| 1080 | * The return value is the number of characters which would | 1247 | * The return value is the number of characters which would |
| @@ -1091,8 +1258,7 @@ qualifier: | |||
| 1091 | int vsnprintf(char *buf, size_t size, const char *fmt, va_list args) | 1258 | int vsnprintf(char *buf, size_t size, const char *fmt, va_list args) |
| 1092 | { | 1259 | { |
| 1093 | unsigned long long num; | 1260 | unsigned long long num; |
| 1094 | char *str, *end, c; | 1261 | char *str, *end; |
| 1095 | int read; | ||
| 1096 | struct printf_spec spec = {0}; | 1262 | struct printf_spec spec = {0}; |
| 1097 | 1263 | ||
| 1098 | /* Reject out-of-range values early. Large positive sizes are | 1264 | /* Reject out-of-range values early. Large positive sizes are |
| @@ -1111,8 +1277,7 @@ int vsnprintf(char *buf, size_t size, const char *fmt, va_list args) | |||
| 1111 | 1277 | ||
| 1112 | while (*fmt) { | 1278 | while (*fmt) { |
| 1113 | const char *old_fmt = fmt; | 1279 | const char *old_fmt = fmt; |
| 1114 | 1280 | int read = format_decode(fmt, &spec); | |
| 1115 | read = format_decode(fmt, &spec); | ||
| 1116 | 1281 | ||
| 1117 | fmt += read; | 1282 | fmt += read; |
| 1118 | 1283 | ||
| @@ -1136,7 +1301,9 @@ int vsnprintf(char *buf, size_t size, const char *fmt, va_list args) | |||
| 1136 | spec.precision = va_arg(args, int); | 1301 | spec.precision = va_arg(args, int); |
| 1137 | break; | 1302 | break; |
| 1138 | 1303 | ||
| 1139 | case FORMAT_TYPE_CHAR: | 1304 | case FORMAT_TYPE_CHAR: { |
| 1305 | char c; | ||
| 1306 | |||
| 1140 | if (!(spec.flags & LEFT)) { | 1307 | if (!(spec.flags & LEFT)) { |
| 1141 | while (--spec.field_width > 0) { | 1308 | while (--spec.field_width > 0) { |
| 1142 | if (str < end) | 1309 | if (str < end) |
| @@ -1155,6 +1322,7 @@ int vsnprintf(char *buf, size_t size, const char *fmt, va_list args) | |||
| 1155 | ++str; | 1322 | ++str; |
| 1156 | } | 1323 | } |
| 1157 | break; | 1324 | break; |
| 1325 | } | ||
| 1158 | 1326 | ||
| 1159 | case FORMAT_TYPE_STR: | 1327 | case FORMAT_TYPE_STR: |
| 1160 | str = string(str, end, va_arg(args, char *), spec); | 1328 | str = string(str, end, va_arg(args, char *), spec); |
| @@ -1180,13 +1348,12 @@ int vsnprintf(char *buf, size_t size, const char *fmt, va_list args) | |||
| 1180 | break; | 1348 | break; |
| 1181 | 1349 | ||
| 1182 | case FORMAT_TYPE_NRCHARS: { | 1350 | case FORMAT_TYPE_NRCHARS: { |
| 1183 | int qualifier = spec.qualifier; | 1351 | u8 qualifier = spec.qualifier; |
| 1184 | 1352 | ||
| 1185 | if (qualifier == 'l') { | 1353 | if (qualifier == 'l') { |
| 1186 | long *ip = va_arg(args, long *); | 1354 | long *ip = va_arg(args, long *); |
| 1187 | *ip = (str - buf); | 1355 | *ip = (str - buf); |
| 1188 | } else if (qualifier == 'Z' || | 1356 | } else if (TOLOWER(qualifier) == 'z') { |
| 1189 | qualifier == 'z') { | ||
| 1190 | size_t *ip = va_arg(args, size_t *); | 1357 | size_t *ip = va_arg(args, size_t *); |
| 1191 | *ip = (str - buf); | 1358 | *ip = (str - buf); |
| 1192 | } else { | 1359 | } else { |
| @@ -1269,7 +1436,8 @@ int vscnprintf(char *buf, size_t size, const char *fmt, va_list args) | |||
| 1269 | { | 1436 | { |
| 1270 | int i; | 1437 | int i; |
| 1271 | 1438 | ||
| 1272 | i=vsnprintf(buf,size,fmt,args); | 1439 | i = vsnprintf(buf, size, fmt, args); |
| 1440 | |||
| 1273 | return (i >= size) ? (size - 1) : i; | 1441 | return (i >= size) ? (size - 1) : i; |
| 1274 | } | 1442 | } |
| 1275 | EXPORT_SYMBOL(vscnprintf); | 1443 | EXPORT_SYMBOL(vscnprintf); |
| @@ -1288,14 +1456,15 @@ EXPORT_SYMBOL(vscnprintf); | |||
| 1288 | * | 1456 | * |
| 1289 | * See the vsnprintf() documentation for format string extensions over C99. | 1457 | * See the vsnprintf() documentation for format string extensions over C99. |
| 1290 | */ | 1458 | */ |
| 1291 | int snprintf(char * buf, size_t size, const char *fmt, ...) | 1459 | int snprintf(char *buf, size_t size, const char *fmt, ...) |
| 1292 | { | 1460 | { |
| 1293 | va_list args; | 1461 | va_list args; |
| 1294 | int i; | 1462 | int i; |
| 1295 | 1463 | ||
| 1296 | va_start(args, fmt); | 1464 | va_start(args, fmt); |
| 1297 | i=vsnprintf(buf,size,fmt,args); | 1465 | i = vsnprintf(buf, size, fmt, args); |
| 1298 | va_end(args); | 1466 | va_end(args); |
| 1467 | |||
| 1299 | return i; | 1468 | return i; |
| 1300 | } | 1469 | } |
| 1301 | EXPORT_SYMBOL(snprintf); | 1470 | EXPORT_SYMBOL(snprintf); |
| @@ -1311,7 +1480,7 @@ EXPORT_SYMBOL(snprintf); | |||
| 1311 | * the trailing '\0'. If @size is <= 0 the function returns 0. | 1480 | * the trailing '\0'. If @size is <= 0 the function returns 0. |
| 1312 | */ | 1481 | */ |
| 1313 | 1482 | ||
| 1314 | int scnprintf(char * buf, size_t size, const char *fmt, ...) | 1483 | int scnprintf(char *buf, size_t size, const char *fmt, ...) |
| 1315 | { | 1484 | { |
| 1316 | va_list args; | 1485 | va_list args; |
| 1317 | int i; | 1486 | int i; |
| @@ -1319,6 +1488,7 @@ int scnprintf(char * buf, size_t size, const char *fmt, ...) | |||
| 1319 | va_start(args, fmt); | 1488 | va_start(args, fmt); |
| 1320 | i = vsnprintf(buf, size, fmt, args); | 1489 | i = vsnprintf(buf, size, fmt, args); |
| 1321 | va_end(args); | 1490 | va_end(args); |
| 1491 | |||
| 1322 | return (i >= size) ? (size - 1) : i; | 1492 | return (i >= size) ? (size - 1) : i; |
| 1323 | } | 1493 | } |
| 1324 | EXPORT_SYMBOL(scnprintf); | 1494 | EXPORT_SYMBOL(scnprintf); |
| @@ -1356,14 +1526,15 @@ EXPORT_SYMBOL(vsprintf); | |||
| 1356 | * | 1526 | * |
| 1357 | * See the vsnprintf() documentation for format string extensions over C99. | 1527 | * See the vsnprintf() documentation for format string extensions over C99. |
| 1358 | */ | 1528 | */ |
| 1359 | int sprintf(char * buf, const char *fmt, ...) | 1529 | int sprintf(char *buf, const char *fmt, ...) |
| 1360 | { | 1530 | { |
| 1361 | va_list args; | 1531 | va_list args; |
| 1362 | int i; | 1532 | int i; |
| 1363 | 1533 | ||
| 1364 | va_start(args, fmt); | 1534 | va_start(args, fmt); |
| 1365 | i=vsnprintf(buf, INT_MAX, fmt, args); | 1535 | i = vsnprintf(buf, INT_MAX, fmt, args); |
| 1366 | va_end(args); | 1536 | va_end(args); |
| 1537 | |||
| 1367 | return i; | 1538 | return i; |
| 1368 | } | 1539 | } |
| 1369 | EXPORT_SYMBOL(sprintf); | 1540 | EXPORT_SYMBOL(sprintf); |
| @@ -1396,7 +1567,6 @@ int vbin_printf(u32 *bin_buf, size_t size, const char *fmt, va_list args) | |||
| 1396 | { | 1567 | { |
| 1397 | struct printf_spec spec = {0}; | 1568 | struct printf_spec spec = {0}; |
| 1398 | char *str, *end; | 1569 | char *str, *end; |
| 1399 | int read; | ||
| 1400 | 1570 | ||
| 1401 | str = (char *)bin_buf; | 1571 | str = (char *)bin_buf; |
| 1402 | end = (char *)(bin_buf + size); | 1572 | end = (char *)(bin_buf + size); |
| @@ -1421,14 +1591,15 @@ do { \ | |||
| 1421 | str += sizeof(type); \ | 1591 | str += sizeof(type); \ |
| 1422 | } while (0) | 1592 | } while (0) |
| 1423 | 1593 | ||
| 1424 | |||
| 1425 | while (*fmt) { | 1594 | while (*fmt) { |
| 1426 | read = format_decode(fmt, &spec); | 1595 | int read = format_decode(fmt, &spec); |
| 1427 | 1596 | ||
| 1428 | fmt += read; | 1597 | fmt += read; |
| 1429 | 1598 | ||
| 1430 | switch (spec.type) { | 1599 | switch (spec.type) { |
| 1431 | case FORMAT_TYPE_NONE: | 1600 | case FORMAT_TYPE_NONE: |
| 1601 | case FORMAT_TYPE_INVALID: | ||
| 1602 | case FORMAT_TYPE_PERCENT_CHAR: | ||
| 1432 | break; | 1603 | break; |
| 1433 | 1604 | ||
| 1434 | case FORMAT_TYPE_WIDTH: | 1605 | case FORMAT_TYPE_WIDTH: |
| @@ -1443,13 +1614,14 @@ do { \ | |||
| 1443 | case FORMAT_TYPE_STR: { | 1614 | case FORMAT_TYPE_STR: { |
| 1444 | const char *save_str = va_arg(args, char *); | 1615 | const char *save_str = va_arg(args, char *); |
| 1445 | size_t len; | 1616 | size_t len; |
| 1617 | |||
| 1446 | if ((unsigned long)save_str > (unsigned long)-PAGE_SIZE | 1618 | if ((unsigned long)save_str > (unsigned long)-PAGE_SIZE |
| 1447 | || (unsigned long)save_str < PAGE_SIZE) | 1619 | || (unsigned long)save_str < PAGE_SIZE) |
| 1448 | save_str = "<NULL>"; | 1620 | save_str = "(null)"; |
| 1449 | len = strlen(save_str); | 1621 | len = strlen(save_str) + 1; |
| 1450 | if (str + len + 1 < end) | 1622 | if (str + len < end) |
| 1451 | memcpy(str, save_str, len + 1); | 1623 | memcpy(str, save_str, len); |
| 1452 | str += len + 1; | 1624 | str += len; |
| 1453 | break; | 1625 | break; |
| 1454 | } | 1626 | } |
| 1455 | 1627 | ||
| @@ -1460,19 +1632,13 @@ do { \ | |||
| 1460 | fmt++; | 1632 | fmt++; |
| 1461 | break; | 1633 | break; |
| 1462 | 1634 | ||
| 1463 | case FORMAT_TYPE_PERCENT_CHAR: | ||
| 1464 | break; | ||
| 1465 | |||
| 1466 | case FORMAT_TYPE_INVALID: | ||
| 1467 | break; | ||
| 1468 | |||
| 1469 | case FORMAT_TYPE_NRCHARS: { | 1635 | case FORMAT_TYPE_NRCHARS: { |
| 1470 | /* skip %n 's argument */ | 1636 | /* skip %n 's argument */ |
| 1471 | int qualifier = spec.qualifier; | 1637 | u8 qualifier = spec.qualifier; |
| 1472 | void *skip_arg; | 1638 | void *skip_arg; |
| 1473 | if (qualifier == 'l') | 1639 | if (qualifier == 'l') |
| 1474 | skip_arg = va_arg(args, long *); | 1640 | skip_arg = va_arg(args, long *); |
| 1475 | else if (qualifier == 'Z' || qualifier == 'z') | 1641 | else if (TOLOWER(qualifier) == 'z') |
| 1476 | skip_arg = va_arg(args, size_t *); | 1642 | skip_arg = va_arg(args, size_t *); |
| 1477 | else | 1643 | else |
| 1478 | skip_arg = va_arg(args, int *); | 1644 | skip_arg = va_arg(args, int *); |
| @@ -1508,8 +1674,8 @@ do { \ | |||
| 1508 | } | 1674 | } |
| 1509 | } | 1675 | } |
| 1510 | } | 1676 | } |
| 1511 | return (u32 *)(PTR_ALIGN(str, sizeof(u32))) - bin_buf; | ||
| 1512 | 1677 | ||
| 1678 | return (u32 *)(PTR_ALIGN(str, sizeof(u32))) - bin_buf; | ||
| 1513 | #undef save_arg | 1679 | #undef save_arg |
| 1514 | } | 1680 | } |
| 1515 | EXPORT_SYMBOL_GPL(vbin_printf); | 1681 | EXPORT_SYMBOL_GPL(vbin_printf); |
| @@ -1538,11 +1704,9 @@ EXPORT_SYMBOL_GPL(vbin_printf); | |||
| 1538 | */ | 1704 | */ |
| 1539 | int bstr_printf(char *buf, size_t size, const char *fmt, const u32 *bin_buf) | 1705 | int bstr_printf(char *buf, size_t size, const char *fmt, const u32 *bin_buf) |
| 1540 | { | 1706 | { |
| 1541 | unsigned long long num; | ||
| 1542 | char *str, *end, c; | ||
| 1543 | const char *args = (const char *)bin_buf; | ||
| 1544 | |||
| 1545 | struct printf_spec spec = {0}; | 1707 | struct printf_spec spec = {0}; |
| 1708 | char *str, *end; | ||
| 1709 | const char *args = (const char *)bin_buf; | ||
| 1546 | 1710 | ||
| 1547 | if (WARN_ON_ONCE((int) size < 0)) | 1711 | if (WARN_ON_ONCE((int) size < 0)) |
| 1548 | return 0; | 1712 | return 0; |
| @@ -1572,10 +1736,8 @@ int bstr_printf(char *buf, size_t size, const char *fmt, const u32 *bin_buf) | |||
| 1572 | } | 1736 | } |
| 1573 | 1737 | ||
| 1574 | while (*fmt) { | 1738 | while (*fmt) { |
| 1575 | int read; | ||
| 1576 | const char *old_fmt = fmt; | 1739 | const char *old_fmt = fmt; |
| 1577 | 1740 | int read = format_decode(fmt, &spec); | |
| 1578 | read = format_decode(fmt, &spec); | ||
| 1579 | 1741 | ||
| 1580 | fmt += read; | 1742 | fmt += read; |
| 1581 | 1743 | ||
| @@ -1599,7 +1761,9 @@ int bstr_printf(char *buf, size_t size, const char *fmt, const u32 *bin_buf) | |||
| 1599 | spec.precision = get_arg(int); | 1761 | spec.precision = get_arg(int); |
| 1600 | break; | 1762 | break; |
| 1601 | 1763 | ||
| 1602 | case FORMAT_TYPE_CHAR: | 1764 | case FORMAT_TYPE_CHAR: { |
| 1765 | char c; | ||
| 1766 | |||
| 1603 | if (!(spec.flags & LEFT)) { | 1767 | if (!(spec.flags & LEFT)) { |
| 1604 | while (--spec.field_width > 0) { | 1768 | while (--spec.field_width > 0) { |
| 1605 | if (str < end) | 1769 | if (str < end) |
| @@ -1617,11 +1781,11 @@ int bstr_printf(char *buf, size_t size, const char *fmt, const u32 *bin_buf) | |||
| 1617 | ++str; | 1781 | ++str; |
| 1618 | } | 1782 | } |
| 1619 | break; | 1783 | break; |
| 1784 | } | ||
| 1620 | 1785 | ||
| 1621 | case FORMAT_TYPE_STR: { | 1786 | case FORMAT_TYPE_STR: { |
| 1622 | const char *str_arg = args; | 1787 | const char *str_arg = args; |
| 1623 | size_t len = strlen(str_arg); | 1788 | args += strlen(str_arg) + 1; |
| 1624 | args += len + 1; | ||
| 1625 | str = string(str, end, (char *)str_arg, spec); | 1789 | str = string(str, end, (char *)str_arg, spec); |
| 1626 | break; | 1790 | break; |
| 1627 | } | 1791 | } |
| @@ -1633,11 +1797,6 @@ int bstr_printf(char *buf, size_t size, const char *fmt, const u32 *bin_buf) | |||
| 1633 | break; | 1797 | break; |
| 1634 | 1798 | ||
| 1635 | case FORMAT_TYPE_PERCENT_CHAR: | 1799 | case FORMAT_TYPE_PERCENT_CHAR: |
| 1636 | if (str < end) | ||
| 1637 | *str = '%'; | ||
| 1638 | ++str; | ||
| 1639 | break; | ||
| 1640 | |||
| 1641 | case FORMAT_TYPE_INVALID: | 1800 | case FORMAT_TYPE_INVALID: |
| 1642 | if (str < end) | 1801 | if (str < end) |
| 1643 | *str = '%'; | 1802 | *str = '%'; |
| @@ -1648,15 +1807,15 @@ int bstr_printf(char *buf, size_t size, const char *fmt, const u32 *bin_buf) | |||
| 1648 | /* skip */ | 1807 | /* skip */ |
| 1649 | break; | 1808 | break; |
| 1650 | 1809 | ||
| 1651 | default: | 1810 | default: { |
| 1811 | unsigned long long num; | ||
| 1812 | |||
| 1652 | switch (spec.type) { | 1813 | switch (spec.type) { |
| 1653 | 1814 | ||
| 1654 | case FORMAT_TYPE_LONG_LONG: | 1815 | case FORMAT_TYPE_LONG_LONG: |
| 1655 | num = get_arg(long long); | 1816 | num = get_arg(long long); |
| 1656 | break; | 1817 | break; |
| 1657 | case FORMAT_TYPE_ULONG: | 1818 | case FORMAT_TYPE_ULONG: |
| 1658 | num = get_arg(unsigned long); | ||
| 1659 | break; | ||
| 1660 | case FORMAT_TYPE_LONG: | 1819 | case FORMAT_TYPE_LONG: |
| 1661 | num = get_arg(unsigned long); | 1820 | num = get_arg(unsigned long); |
| 1662 | break; | 1821 | break; |
| @@ -1686,8 +1845,9 @@ int bstr_printf(char *buf, size_t size, const char *fmt, const u32 *bin_buf) | |||
| 1686 | } | 1845 | } |
| 1687 | 1846 | ||
| 1688 | str = number(str, end, num, spec); | 1847 | str = number(str, end, num, spec); |
| 1689 | } | 1848 | } /* default: */ |
| 1690 | } | 1849 | } /* switch(spec.type) */ |
| 1850 | } /* while(*fmt) */ | ||
| 1691 | 1851 | ||
| 1692 | if (size > 0) { | 1852 | if (size > 0) { |
| 1693 | if (str < end) | 1853 | if (str < end) |
| @@ -1721,6 +1881,7 @@ int bprintf(u32 *bin_buf, size_t size, const char *fmt, ...) | |||
| 1721 | va_start(args, fmt); | 1881 | va_start(args, fmt); |
| 1722 | ret = vbin_printf(bin_buf, size, fmt, args); | 1882 | ret = vbin_printf(bin_buf, size, fmt, args); |
| 1723 | va_end(args); | 1883 | va_end(args); |
| 1884 | |||
| 1724 | return ret; | 1885 | return ret; |
| 1725 | } | 1886 | } |
| 1726 | EXPORT_SYMBOL_GPL(bprintf); | 1887 | EXPORT_SYMBOL_GPL(bprintf); |
| @@ -1733,27 +1894,25 @@ EXPORT_SYMBOL_GPL(bprintf); | |||
| 1733 | * @fmt: format of buffer | 1894 | * @fmt: format of buffer |
| 1734 | * @args: arguments | 1895 | * @args: arguments |
| 1735 | */ | 1896 | */ |
| 1736 | int vsscanf(const char * buf, const char * fmt, va_list args) | 1897 | int vsscanf(const char *buf, const char *fmt, va_list args) |
| 1737 | { | 1898 | { |
| 1738 | const char *str = buf; | 1899 | const char *str = buf; |
| 1739 | char *next; | 1900 | char *next; |
| 1740 | char digit; | 1901 | char digit; |
| 1741 | int num = 0; | 1902 | int num = 0; |
| 1742 | int qualifier; | 1903 | u8 qualifier; |
| 1743 | int base; | 1904 | u8 base; |
| 1744 | int field_width; | 1905 | s16 field_width; |
| 1745 | int is_sign = 0; | 1906 | bool is_sign; |
| 1746 | 1907 | ||
| 1747 | while(*fmt && *str) { | 1908 | while (*fmt && *str) { |
| 1748 | /* skip any white space in format */ | 1909 | /* skip any white space in format */ |
| 1749 | /* white space in format matchs any amount of | 1910 | /* white space in format matchs any amount of |
| 1750 | * white space, including none, in the input. | 1911 | * white space, including none, in the input. |
| 1751 | */ | 1912 | */ |
| 1752 | if (isspace(*fmt)) { | 1913 | if (isspace(*fmt)) { |
| 1753 | while (isspace(*fmt)) | 1914 | fmt = skip_spaces(++fmt); |
| 1754 | ++fmt; | 1915 | str = skip_spaces(str); |
| 1755 | while (isspace(*str)) | ||
| 1756 | ++str; | ||
| 1757 | } | 1916 | } |
| 1758 | 1917 | ||
| 1759 | /* anything that is not a conversion must match exactly */ | 1918 | /* anything that is not a conversion must match exactly */ |
| @@ -1766,7 +1925,7 @@ int vsscanf(const char * buf, const char * fmt, va_list args) | |||
| 1766 | if (!*fmt) | 1925 | if (!*fmt) |
| 1767 | break; | 1926 | break; |
| 1768 | ++fmt; | 1927 | ++fmt; |
| 1769 | 1928 | ||
| 1770 | /* skip this conversion. | 1929 | /* skip this conversion. |
| 1771 | * advance both strings to next white space | 1930 | * advance both strings to next white space |
| 1772 | */ | 1931 | */ |
| @@ -1785,8 +1944,8 @@ int vsscanf(const char * buf, const char * fmt, va_list args) | |||
| 1785 | 1944 | ||
| 1786 | /* get conversion qualifier */ | 1945 | /* get conversion qualifier */ |
| 1787 | qualifier = -1; | 1946 | qualifier = -1; |
| 1788 | if (*fmt == 'h' || *fmt == 'l' || *fmt == 'L' || | 1947 | if (*fmt == 'h' || TOLOWER(*fmt) == 'l' || |
| 1789 | *fmt == 'Z' || *fmt == 'z') { | 1948 | TOLOWER(*fmt) == 'z') { |
| 1790 | qualifier = *fmt++; | 1949 | qualifier = *fmt++; |
| 1791 | if (unlikely(qualifier == *fmt)) { | 1950 | if (unlikely(qualifier == *fmt)) { |
| 1792 | if (qualifier == 'h') { | 1951 | if (qualifier == 'h') { |
| @@ -1798,16 +1957,17 @@ int vsscanf(const char * buf, const char * fmt, va_list args) | |||
| 1798 | } | 1957 | } |
| 1799 | } | 1958 | } |
| 1800 | } | 1959 | } |
| 1801 | base = 10; | ||
| 1802 | is_sign = 0; | ||
| 1803 | 1960 | ||
| 1804 | if (!*fmt || !*str) | 1961 | if (!*fmt || !*str) |
| 1805 | break; | 1962 | break; |
| 1806 | 1963 | ||
| 1807 | switch(*fmt++) { | 1964 | base = 10; |
| 1965 | is_sign = 0; | ||
| 1966 | |||
| 1967 | switch (*fmt++) { | ||
| 1808 | case 'c': | 1968 | case 'c': |
| 1809 | { | 1969 | { |
| 1810 | char *s = (char *) va_arg(args,char*); | 1970 | char *s = (char *)va_arg(args, char*); |
| 1811 | if (field_width == -1) | 1971 | if (field_width == -1) |
| 1812 | field_width = 1; | 1972 | field_width = 1; |
| 1813 | do { | 1973 | do { |
| @@ -1818,17 +1978,15 @@ int vsscanf(const char * buf, const char * fmt, va_list args) | |||
| 1818 | continue; | 1978 | continue; |
| 1819 | case 's': | 1979 | case 's': |
| 1820 | { | 1980 | { |
| 1821 | char *s = (char *) va_arg(args, char *); | 1981 | char *s = (char *)va_arg(args, char *); |
| 1822 | if(field_width == -1) | 1982 | if (field_width == -1) |
| 1823 | field_width = INT_MAX; | 1983 | field_width = SHORT_MAX; |
| 1824 | /* first, skip leading white space in buffer */ | 1984 | /* first, skip leading white space in buffer */ |
| 1825 | while (isspace(*str)) | 1985 | str = skip_spaces(str); |
| 1826 | str++; | ||
| 1827 | 1986 | ||
| 1828 | /* now copy until next white space */ | 1987 | /* now copy until next white space */ |
| 1829 | while (*str && !isspace(*str) && field_width--) { | 1988 | while (*str && !isspace(*str) && field_width--) |
| 1830 | *s++ = *str++; | 1989 | *s++ = *str++; |
| 1831 | } | ||
| 1832 | *s = '\0'; | 1990 | *s = '\0'; |
| 1833 | num++; | 1991 | num++; |
| 1834 | } | 1992 | } |
| @@ -1836,7 +1994,7 @@ int vsscanf(const char * buf, const char * fmt, va_list args) | |||
| 1836 | case 'n': | 1994 | case 'n': |
| 1837 | /* return number of characters read so far */ | 1995 | /* return number of characters read so far */ |
| 1838 | { | 1996 | { |
| 1839 | int *i = (int *)va_arg(args,int*); | 1997 | int *i = (int *)va_arg(args, int*); |
| 1840 | *i = str - buf; | 1998 | *i = str - buf; |
| 1841 | } | 1999 | } |
| 1842 | continue; | 2000 | continue; |
| @@ -1848,14 +2006,14 @@ int vsscanf(const char * buf, const char * fmt, va_list args) | |||
| 1848 | base = 16; | 2006 | base = 16; |
| 1849 | break; | 2007 | break; |
| 1850 | case 'i': | 2008 | case 'i': |
| 1851 | base = 0; | 2009 | base = 0; |
| 1852 | case 'd': | 2010 | case 'd': |
| 1853 | is_sign = 1; | 2011 | is_sign = 1; |
| 1854 | case 'u': | 2012 | case 'u': |
| 1855 | break; | 2013 | break; |
| 1856 | case '%': | 2014 | case '%': |
| 1857 | /* looking for '%' in str */ | 2015 | /* looking for '%' in str */ |
| 1858 | if (*str++ != '%') | 2016 | if (*str++ != '%') |
| 1859 | return num; | 2017 | return num; |
| 1860 | continue; | 2018 | continue; |
| 1861 | default: | 2019 | default: |
| @@ -1866,71 +2024,70 @@ int vsscanf(const char * buf, const char * fmt, va_list args) | |||
| 1866 | /* have some sort of integer conversion. | 2024 | /* have some sort of integer conversion. |
| 1867 | * first, skip white space in buffer. | 2025 | * first, skip white space in buffer. |
| 1868 | */ | 2026 | */ |
| 1869 | while (isspace(*str)) | 2027 | str = skip_spaces(str); |
| 1870 | str++; | ||
| 1871 | 2028 | ||
| 1872 | digit = *str; | 2029 | digit = *str; |
| 1873 | if (is_sign && digit == '-') | 2030 | if (is_sign && digit == '-') |
| 1874 | digit = *(str + 1); | 2031 | digit = *(str + 1); |
| 1875 | 2032 | ||
| 1876 | if (!digit | 2033 | if (!digit |
| 1877 | || (base == 16 && !isxdigit(digit)) | 2034 | || (base == 16 && !isxdigit(digit)) |
| 1878 | || (base == 10 && !isdigit(digit)) | 2035 | || (base == 10 && !isdigit(digit)) |
| 1879 | || (base == 8 && (!isdigit(digit) || digit > '7')) | 2036 | || (base == 8 && (!isdigit(digit) || digit > '7')) |
| 1880 | || (base == 0 && !isdigit(digit))) | 2037 | || (base == 0 && !isdigit(digit))) |
| 1881 | break; | 2038 | break; |
| 1882 | 2039 | ||
| 1883 | switch(qualifier) { | 2040 | switch (qualifier) { |
| 1884 | case 'H': /* that's 'hh' in format */ | 2041 | case 'H': /* that's 'hh' in format */ |
| 1885 | if (is_sign) { | 2042 | if (is_sign) { |
| 1886 | signed char *s = (signed char *) va_arg(args,signed char *); | 2043 | signed char *s = (signed char *)va_arg(args, signed char *); |
| 1887 | *s = (signed char) simple_strtol(str,&next,base); | 2044 | *s = (signed char)simple_strtol(str, &next, base); |
| 1888 | } else { | 2045 | } else { |
| 1889 | unsigned char *s = (unsigned char *) va_arg(args, unsigned char *); | 2046 | unsigned char *s = (unsigned char *)va_arg(args, unsigned char *); |
| 1890 | *s = (unsigned char) simple_strtoul(str, &next, base); | 2047 | *s = (unsigned char)simple_strtoul(str, &next, base); |
| 1891 | } | 2048 | } |
| 1892 | break; | 2049 | break; |
| 1893 | case 'h': | 2050 | case 'h': |
| 1894 | if (is_sign) { | 2051 | if (is_sign) { |
| 1895 | short *s = (short *) va_arg(args,short *); | 2052 | short *s = (short *)va_arg(args, short *); |
| 1896 | *s = (short) simple_strtol(str,&next,base); | 2053 | *s = (short)simple_strtol(str, &next, base); |
| 1897 | } else { | 2054 | } else { |
| 1898 | unsigned short *s = (unsigned short *) va_arg(args, unsigned short *); | 2055 | unsigned short *s = (unsigned short *)va_arg(args, unsigned short *); |
| 1899 | *s = (unsigned short) simple_strtoul(str, &next, base); | 2056 | *s = (unsigned short)simple_strtoul(str, &next, base); |
| 1900 | } | 2057 | } |
| 1901 | break; | 2058 | break; |
| 1902 | case 'l': | 2059 | case 'l': |
| 1903 | if (is_sign) { | 2060 | if (is_sign) { |
| 1904 | long *l = (long *) va_arg(args,long *); | 2061 | long *l = (long *)va_arg(args, long *); |
| 1905 | *l = simple_strtol(str,&next,base); | 2062 | *l = simple_strtol(str, &next, base); |
| 1906 | } else { | 2063 | } else { |
| 1907 | unsigned long *l = (unsigned long*) va_arg(args,unsigned long*); | 2064 | unsigned long *l = (unsigned long *)va_arg(args, unsigned long *); |
| 1908 | *l = simple_strtoul(str,&next,base); | 2065 | *l = simple_strtoul(str, &next, base); |
| 1909 | } | 2066 | } |
| 1910 | break; | 2067 | break; |
| 1911 | case 'L': | 2068 | case 'L': |
| 1912 | if (is_sign) { | 2069 | if (is_sign) { |
| 1913 | long long *l = (long long*) va_arg(args,long long *); | 2070 | long long *l = (long long *)va_arg(args, long long *); |
| 1914 | *l = simple_strtoll(str,&next,base); | 2071 | *l = simple_strtoll(str, &next, base); |
| 1915 | } else { | 2072 | } else { |
| 1916 | unsigned long long *l = (unsigned long long*) va_arg(args,unsigned long long*); | 2073 | unsigned long long *l = (unsigned long long *)va_arg(args, unsigned long long *); |
| 1917 | *l = simple_strtoull(str,&next,base); | 2074 | *l = simple_strtoull(str, &next, base); |
| 1918 | } | 2075 | } |
| 1919 | break; | 2076 | break; |
| 1920 | case 'Z': | 2077 | case 'Z': |
| 1921 | case 'z': | 2078 | case 'z': |
| 1922 | { | 2079 | { |
| 1923 | size_t *s = (size_t*) va_arg(args,size_t*); | 2080 | size_t *s = (size_t *)va_arg(args, size_t *); |
| 1924 | *s = (size_t) simple_strtoul(str,&next,base); | 2081 | *s = (size_t)simple_strtoul(str, &next, base); |
| 1925 | } | 2082 | } |
| 1926 | break; | 2083 | break; |
| 1927 | default: | 2084 | default: |
| 1928 | if (is_sign) { | 2085 | if (is_sign) { |
| 1929 | int *i = (int *) va_arg(args, int*); | 2086 | int *i = (int *)va_arg(args, int *); |
| 1930 | *i = (int) simple_strtol(str,&next,base); | 2087 | *i = (int)simple_strtol(str, &next, base); |
| 1931 | } else { | 2088 | } else { |
| 1932 | unsigned int *i = (unsigned int*) va_arg(args, unsigned int*); | 2089 | unsigned int *i = (unsigned int *)va_arg(args, unsigned int*); |
| 1933 | *i = (unsigned int) simple_strtoul(str,&next,base); | 2090 | *i = (unsigned int)simple_strtoul(str, &next, base); |
| 1934 | } | 2091 | } |
| 1935 | break; | 2092 | break; |
| 1936 | } | 2093 | } |
| @@ -1961,14 +2118,15 @@ EXPORT_SYMBOL(vsscanf); | |||
| 1961 | * @fmt: formatting of buffer | 2118 | * @fmt: formatting of buffer |
| 1962 | * @...: resulting arguments | 2119 | * @...: resulting arguments |
| 1963 | */ | 2120 | */ |
| 1964 | int sscanf(const char * buf, const char * fmt, ...) | 2121 | int sscanf(const char *buf, const char *fmt, ...) |
| 1965 | { | 2122 | { |
| 1966 | va_list args; | 2123 | va_list args; |
| 1967 | int i; | 2124 | int i; |
| 1968 | 2125 | ||
| 1969 | va_start(args,fmt); | 2126 | va_start(args, fmt); |
| 1970 | i = vsscanf(buf,fmt,args); | 2127 | i = vsscanf(buf, fmt, args); |
| 1971 | va_end(args); | 2128 | va_end(args); |
| 2129 | |||
| 1972 | return i; | 2130 | return i; |
| 1973 | } | 2131 | } |
| 1974 | EXPORT_SYMBOL(sscanf); | 2132 | EXPORT_SYMBOL(sscanf); |
diff --git a/lib/zlib_inflate/inffast.c b/lib/zlib_inflate/inffast.c index 8550b0c05d00..2c13ecc5bb2c 100644 --- a/lib/zlib_inflate/inffast.c +++ b/lib/zlib_inflate/inffast.c | |||
| @@ -21,12 +21,31 @@ | |||
| 21 | - Pentium III (Anderson) | 21 | - Pentium III (Anderson) |
| 22 | - M68060 (Nikl) | 22 | - M68060 (Nikl) |
| 23 | */ | 23 | */ |
| 24 | union uu { | ||
| 25 | unsigned short us; | ||
| 26 | unsigned char b[2]; | ||
| 27 | }; | ||
| 28 | |||
| 29 | /* Endian independed version */ | ||
| 30 | static inline unsigned short | ||
| 31 | get_unaligned16(const unsigned short *p) | ||
| 32 | { | ||
| 33 | union uu mm; | ||
| 34 | unsigned char *b = (unsigned char *)p; | ||
| 35 | |||
| 36 | mm.b[0] = b[0]; | ||
| 37 | mm.b[1] = b[1]; | ||
| 38 | return mm.us; | ||
| 39 | } | ||
| 40 | |||
| 24 | #ifdef POSTINC | 41 | #ifdef POSTINC |
| 25 | # define OFF 0 | 42 | # define OFF 0 |
| 26 | # define PUP(a) *(a)++ | 43 | # define PUP(a) *(a)++ |
| 44 | # define UP_UNALIGNED(a) get_unaligned16((a)++) | ||
| 27 | #else | 45 | #else |
| 28 | # define OFF 1 | 46 | # define OFF 1 |
| 29 | # define PUP(a) *++(a) | 47 | # define PUP(a) *++(a) |
| 48 | # define UP_UNALIGNED(a) get_unaligned16(++(a)) | ||
| 30 | #endif | 49 | #endif |
| 31 | 50 | ||
| 32 | /* | 51 | /* |
| @@ -239,18 +258,50 @@ void inflate_fast(z_streamp strm, unsigned start) | |||
| 239 | } | 258 | } |
| 240 | } | 259 | } |
| 241 | else { | 260 | else { |
| 261 | unsigned short *sout; | ||
| 262 | unsigned long loops; | ||
| 263 | |||
| 242 | from = out - dist; /* copy direct from output */ | 264 | from = out - dist; /* copy direct from output */ |
| 243 | do { /* minimum length is three */ | 265 | /* minimum length is three */ |
| 244 | PUP(out) = PUP(from); | 266 | /* Align out addr */ |
| 245 | PUP(out) = PUP(from); | 267 | if (!((long)(out - 1 + OFF) & 1)) { |
| 246 | PUP(out) = PUP(from); | 268 | PUP(out) = PUP(from); |
| 247 | len -= 3; | 269 | len--; |
| 248 | } while (len > 2); | 270 | } |
| 249 | if (len) { | 271 | sout = (unsigned short *)(out - OFF); |
| 250 | PUP(out) = PUP(from); | 272 | if (dist > 2) { |
| 251 | if (len > 1) | 273 | unsigned short *sfrom; |
| 252 | PUP(out) = PUP(from); | 274 | |
| 253 | } | 275 | sfrom = (unsigned short *)(from - OFF); |
| 276 | loops = len >> 1; | ||
| 277 | do | ||
| 278 | #ifdef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS | ||
| 279 | PUP(sout) = PUP(sfrom); | ||
| 280 | #else | ||
| 281 | PUP(sout) = UP_UNALIGNED(sfrom); | ||
| 282 | #endif | ||
| 283 | while (--loops); | ||
| 284 | out = (unsigned char *)sout + OFF; | ||
| 285 | from = (unsigned char *)sfrom + OFF; | ||
| 286 | } else { /* dist == 1 or dist == 2 */ | ||
| 287 | unsigned short pat16; | ||
| 288 | |||
| 289 | pat16 = *(sout-1+OFF); | ||
| 290 | if (dist == 1) { | ||
| 291 | union uu mm; | ||
| 292 | /* copy one char pattern to both bytes */ | ||
| 293 | mm.us = pat16; | ||
| 294 | mm.b[0] = mm.b[1]; | ||
| 295 | pat16 = mm.us; | ||
| 296 | } | ||
| 297 | loops = len >> 1; | ||
| 298 | do | ||
| 299 | PUP(sout) = pat16; | ||
| 300 | while (--loops); | ||
| 301 | out = (unsigned char *)sout + OFF; | ||
| 302 | } | ||
| 303 | if (len & 1) | ||
| 304 | PUP(out) = PUP(from); | ||
| 254 | } | 305 | } |
| 255 | } | 306 | } |
| 256 | else if ((op & 64) == 0) { /* 2nd level distance code */ | 307 | else if ((op & 64) == 0) { /* 2nd level distance code */ |
