aboutsummaryrefslogtreecommitdiffstats
path: root/lib
diff options
context:
space:
mode:
Diffstat (limited to 'lib')
-rw-r--r--lib/Kconfig13
-rw-r--r--lib/Kconfig.debug178
-rw-r--r--lib/Kconfig.kgdb24
-rw-r--r--lib/Makefile14
-rw-r--r--lib/argv_split.c13
-rw-r--r--lib/atomic64.c4
-rw-r--r--lib/atomic64_test.c166
-rw-r--r--lib/bitmap.c100
-rw-r--r--lib/btree.c798
-rw-r--r--lib/bug.c2
-rw-r--r--lib/checksum.c14
-rw-r--r--lib/cpu-notifier-error-inject.c63
-rw-r--r--lib/cpumask.c1
-rw-r--r--lib/crc32.c150
-rw-r--r--lib/ctype.c50
-rw-r--r--lib/debug_locks.c1
-rw-r--r--lib/debugobjects.c139
-rw-r--r--lib/decompress.c5
-rw-r--r--lib/decompress_bunzip2.c12
-rw-r--r--lib/decompress_unlzo.c217
-rw-r--r--lib/devres.c3
-rw-r--r--lib/dma-debug.c27
-rw-r--r--lib/dynamic_debug.c9
-rw-r--r--lib/fault-inject.c1
-rw-r--r--lib/flex_array.c2
-rw-r--r--lib/gen_crc32table.c47
-rw-r--r--lib/genalloc.c35
-rw-r--r--lib/hexdump.c54
-rw-r--r--lib/hweight.c26
-rw-r--r--lib/idr.c27
-rw-r--r--lib/inflate.c1
-rw-r--r--lib/iommu-helper.c59
-rw-r--r--lib/ioremap.c10
-rw-r--r--lib/kasprintf.c1
-rw-r--r--lib/kernel_lock.c46
-rw-r--r--lib/kobject.c121
-rw-r--r--lib/kobject_uevent.c115
-rw-r--r--lib/kref.c16
-rw-r--r--lib/lcm.c15
-rw-r--r--lib/list_sort.c217
-rw-r--r--lib/lmb.c527
-rw-r--r--lib/lru_cache.c560
-rw-r--r--lib/lzo/lzo1x_decompress.c9
-rw-r--r--lib/parser.c11
-rw-r--r--lib/plist.c8
-rw-r--r--lib/radix-tree.c46
-rw-r--r--lib/raid6/Makefile22
-rw-r--r--lib/raid6/raid6algos.c21
-rw-r--r--lib/raid6/raid6altivec.uc2
-rw-r--r--lib/raid6/raid6int.uc2
-rw-r--r--lib/raid6/raid6test/Makefile42
-rw-r--r--lib/raid6/unroll.awk20
-rw-r--r--lib/raid6/unroll.pl24
-rw-r--r--lib/random32.c40
-rw-r--r--lib/ratelimit.c54
-rw-r--r--lib/rational.c1
-rw-r--r--lib/rbtree.c68
-rw-r--r--lib/rwsem-spinlock.c37
-rw-r--r--lib/rwsem.c5
-rw-r--r--lib/scatterlist.c1
-rw-r--r--lib/show_mem.c14
-rw-r--r--lib/spinlock_debug.c64
-rw-r--r--lib/string.c106
-rw-r--r--lib/swiotlb.c217
-rw-r--r--lib/textsearch.c1
-rw-r--r--lib/uuid.c53
-rw-r--r--lib/vsprintf.c652
-rw-r--r--lib/zlib_inflate/inffast.c73
68 files changed, 3935 insertions, 1541 deletions
diff --git a/lib/Kconfig b/lib/Kconfig
index 1cc756c4c78c..fa9bf2c06199 100644
--- a/lib/Kconfig
+++ b/lib/Kconfig
@@ -120,6 +120,10 @@ config DECOMPRESS_BZIP2
120config DECOMPRESS_LZMA 120config DECOMPRESS_LZMA
121 tristate 121 tristate
122 122
123config DECOMPRESS_LZO
124 select LZO_DECOMPRESS
125 tristate
126
123# 127#
124# Generic allocator support is selected if needed 128# Generic allocator support is selected if needed
125# 129#
@@ -159,6 +163,9 @@ config TEXTSEARCH_BM
159config TEXTSEARCH_FSM 163config TEXTSEARCH_FSM
160 tristate 164 tristate
161 165
166config BTREE
167 boolean
168
162config HAS_IOMEM 169config HAS_IOMEM
163 boolean 170 boolean
164 depends on !NO_IOMEM 171 depends on !NO_IOMEM
@@ -177,9 +184,6 @@ config HAS_DMA
177config CHECK_SIGNATURE 184config CHECK_SIGNATURE
178 bool 185 bool
179 186
180config HAVE_LMB
181 boolean
182
183config CPUMASK_OFFSTACK 187config CPUMASK_OFFSTACK
184 bool "Force CPU masks off stack" if DEBUG_PER_CPU_MAPS 188 bool "Force CPU masks off stack" if DEBUG_PER_CPU_MAPS
185 help 189 help
@@ -203,4 +207,7 @@ config NLATTR
203config GENERIC_ATOMIC64 207config GENERIC_ATOMIC64
204 bool 208 bool
205 209
210config LRU_CACHE
211 tristate
212
206endmenu 213endmenu
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
index 891155817bc6..79e0dff1cdcb 100644
--- a/lib/Kconfig.debug
+++ b/lib/Kconfig.debug
@@ -76,7 +76,6 @@ config UNUSED_SYMBOLS
76 76
77config DEBUG_FS 77config DEBUG_FS
78 bool "Debug Filesystem" 78 bool "Debug Filesystem"
79 depends on SYSFS
80 help 79 help
81 debugfs is a virtual file system that kernel developers use to put 80 debugfs is a virtual file system that kernel developers use to put
82 debugging files into. Enable this option to be able to read and 81 debugging files into. Enable this option to be able to read and
@@ -103,9 +102,10 @@ config HEADERS_CHECK
103 102
104config DEBUG_SECTION_MISMATCH 103config DEBUG_SECTION_MISMATCH
105 bool "Enable full Section mismatch analysis" 104 bool "Enable full Section mismatch analysis"
106 depends on UNDEFINED 105 depends on UNDEFINED || (BLACKFIN)
106 default y
107 # This option is on purpose disabled for now. 107 # This option is on purpose disabled for now.
108 # It will be enabled when we are down to a resonable number 108 # It will be enabled when we are down to a reasonable number
109 # of section mismatch warnings (< 10 for an allyesconfig build) 109 # of section mismatch warnings (< 10 for an allyesconfig build)
110 help 110 help
111 The section mismatch analysis checks if there are illegal 111 The section mismatch analysis checks if there are illegal
@@ -151,28 +151,33 @@ config DEBUG_SHIRQ
151 Drivers ought to be able to handle interrupts coming in at those 151 Drivers ought to be able to handle interrupts coming in at those
152 points; some don't and need to be caught. 152 points; some don't and need to be caught.
153 153
154config DETECT_SOFTLOCKUP 154config LOCKUP_DETECTOR
155 bool "Detect Soft Lockups" 155 bool "Detect Hard and Soft Lockups"
156 depends on DEBUG_KERNEL && !S390 156 depends on DEBUG_KERNEL && !S390
157 default y
158 help 157 help
159 Say Y here to enable the kernel to detect "soft lockups", 158 Say Y here to enable the kernel to act as a watchdog to detect
160 which are bugs that cause the kernel to loop in kernel 159 hard and soft lockups.
160
161 Softlockups are bugs that cause the kernel to loop in kernel
161 mode for more than 60 seconds, without giving other tasks a 162 mode for more than 60 seconds, without giving other tasks a
162 chance to run. 163 chance to run. The current stack trace is displayed upon
164 detection and the system will stay locked up.
163 165
164 When a soft-lockup is detected, the kernel will print the 166 Hardlockups are bugs that cause the CPU to loop in kernel mode
165 current stack trace (which you should report), but the 167 for more than 60 seconds, without letting other interrupts have a
166 system will stay locked up. This feature has negligible 168 chance to run. The current stack trace is displayed upon detection
167 overhead. 169 and the system will stay locked up.
170
171 The overhead should be minimal. A periodic hrtimer runs to
172 generate interrupts and kick the watchdog task every 10-12 seconds.
173 An NMI is generated every 60 seconds or so to check for hardlockups.
168 174
169 (Note that "hard lockups" are separate type of bugs that 175config HARDLOCKUP_DETECTOR
170 can be detected via the NMI-watchdog, on platforms that 176 def_bool LOCKUP_DETECTOR && PERF_EVENTS && HAVE_PERF_EVENTS_NMI
171 support it.)
172 177
173config BOOTPARAM_SOFTLOCKUP_PANIC 178config BOOTPARAM_SOFTLOCKUP_PANIC
174 bool "Panic (Reboot) On Soft Lockups" 179 bool "Panic (Reboot) On Soft Lockups"
175 depends on DETECT_SOFTLOCKUP 180 depends on LOCKUP_DETECTOR
176 help 181 help
177 Say Y here to enable the kernel to panic on "soft lockups", 182 Say Y here to enable the kernel to panic on "soft lockups",
178 which are bugs that cause the kernel to loop in kernel 183 which are bugs that cause the kernel to loop in kernel
@@ -189,7 +194,7 @@ config BOOTPARAM_SOFTLOCKUP_PANIC
189 194
190config BOOTPARAM_SOFTLOCKUP_PANIC_VALUE 195config BOOTPARAM_SOFTLOCKUP_PANIC_VALUE
191 int 196 int
192 depends on DETECT_SOFTLOCKUP 197 depends on LOCKUP_DETECTOR
193 range 0 1 198 range 0 1
194 default 0 if !BOOTPARAM_SOFTLOCKUP_PANIC 199 default 0 if !BOOTPARAM_SOFTLOCKUP_PANIC
195 default 1 if BOOTPARAM_SOFTLOCKUP_PANIC 200 default 1 if BOOTPARAM_SOFTLOCKUP_PANIC
@@ -298,6 +303,20 @@ config DEBUG_OBJECTS_TIMERS
298 timer routines to track the life time of timer objects and 303 timer routines to track the life time of timer objects and
299 validate the timer operations. 304 validate the timer operations.
300 305
306config DEBUG_OBJECTS_WORK
307 bool "Debug work objects"
308 depends on DEBUG_OBJECTS
309 help
310 If you say Y here, additional code will be inserted into the
311 work queue routines to track the life time of work objects and
312 validate the work operations.
313
314config DEBUG_OBJECTS_RCU_HEAD
315 bool "Debug RCU callbacks objects"
316 depends on DEBUG_OBJECTS && PREEMPT
317 help
318 Enable this to turn on debugging of RCU list heads (call_rcu() usage).
319
301config DEBUG_OBJECTS_ENABLE_DEFAULT 320config DEBUG_OBJECTS_ENABLE_DEFAULT
302 int "debug_objects bootup default value (0-1)" 321 int "debug_objects bootup default value (0-1)"
303 range 0 1 322 range 0 1
@@ -346,11 +365,13 @@ config SLUB_STATS
346 365
347config DEBUG_KMEMLEAK 366config DEBUG_KMEMLEAK
348 bool "Kernel memory leak detector" 367 bool "Kernel memory leak detector"
349 depends on DEBUG_KERNEL && EXPERIMENTAL && (X86 || ARM || PPC) && \ 368 depends on DEBUG_KERNEL && EXPERIMENTAL && !MEMORY_HOTPLUG && \
350 !MEMORY_HOTPLUG 369 (X86 || ARM || PPC || S390 || SPARC64 || SUPERH || MICROBLAZE)
370
351 select DEBUG_FS if SYSFS 371 select DEBUG_FS if SYSFS
352 select STACKTRACE if STACKTRACE_SUPPORT 372 select STACKTRACE if STACKTRACE_SUPPORT
353 select KALLSYMS 373 select KALLSYMS
374 select CRC32
354 help 375 help
355 Say Y here if you want to enable the memory leak 376 Say Y here if you want to enable the memory leak
356 detector. The memory allocation/freeing is traced in a way 377 detector. The memory allocation/freeing is traced in a way
@@ -370,7 +391,7 @@ config DEBUG_KMEMLEAK
370config DEBUG_KMEMLEAK_EARLY_LOG_SIZE 391config DEBUG_KMEMLEAK_EARLY_LOG_SIZE
371 int "Maximum kmemleak early log entries" 392 int "Maximum kmemleak early log entries"
372 depends on DEBUG_KMEMLEAK 393 depends on DEBUG_KMEMLEAK
373 range 200 2000 394 range 200 40000
374 default 400 395 default 400
375 help 396 help
376 Kmemleak must track all the memory allocations to avoid 397 Kmemleak must track all the memory allocations to avoid
@@ -391,7 +412,7 @@ config DEBUG_KMEMLEAK_TEST
391 412
392config DEBUG_PREEMPT 413config DEBUG_PREEMPT
393 bool "Debug preemptible kernel" 414 bool "Debug preemptible kernel"
394 depends on DEBUG_KERNEL && PREEMPT && (TRACE_IRQFLAGS_SUPPORT || PPC64) 415 depends on DEBUG_KERNEL && PREEMPT && TRACE_IRQFLAGS_SUPPORT
395 default y 416 default y
396 help 417 help
397 If you say Y here then the kernel will use a debug variant of the 418 If you say Y here then the kernel will use a debug variant of the
@@ -489,11 +510,35 @@ config PROVE_LOCKING
489 510
490 For more details, see Documentation/lockdep-design.txt. 511 For more details, see Documentation/lockdep-design.txt.
491 512
513config PROVE_RCU
514 bool "RCU debugging: prove RCU correctness"
515 depends on PROVE_LOCKING
516 default n
517 help
518 This feature enables lockdep extensions that check for correct
519 use of RCU APIs. This is currently under development. Say Y
520 if you want to debug RCU usage or help work on the PROVE_RCU
521 feature.
522
523 Say N if you are unsure.
524
525config PROVE_RCU_REPEATEDLY
526 bool "RCU debugging: don't disable PROVE_RCU on first splat"
527 depends on PROVE_RCU
528 default n
529 help
530 By itself, PROVE_RCU will disable checking upon issuing the
531 first warning (or "splat"). This feature prevents such
532 disabling, allowing multiple RCU-lockdep warnings to be printed
533 on a single reboot.
534
535 Say N if you are unsure.
536
492config LOCKDEP 537config LOCKDEP
493 bool 538 bool
494 depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT 539 depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT
495 select STACKTRACE 540 select STACKTRACE
496 select FRAME_POINTER if !MIPS && !PPC && !ARM_UNWIND && !S390 541 select FRAME_POINTER if !MIPS && !PPC && !ARM_UNWIND && !S390 && !MICROBLAZE
497 select KALLSYMS 542 select KALLSYMS
498 select KALLSYMS_ALL 543 select KALLSYMS_ALL
499 544
@@ -510,6 +555,14 @@ config LOCK_STAT
510 555
511 For more details, see Documentation/lockstat.txt 556 For more details, see Documentation/lockstat.txt
512 557
558 This also enables lock events required by "perf lock",
559 subcommand of perf.
560 If you want to use "perf lock", you also need to turn on
561 CONFIG_EVENT_TRACING.
562
563 CONFIG_LOCK_STAT defines "contended" and "acquired" lock events.
564 (CONFIG_LOCKDEP defines "acquire" and "release" events.)
565
513config DEBUG_LOCKDEP 566config DEBUG_LOCKDEP
514 bool "Lock dependency engine debugging" 567 bool "Lock dependency engine debugging"
515 depends on DEBUG_KERNEL && LOCKDEP 568 depends on DEBUG_KERNEL && LOCKDEP
@@ -566,7 +619,7 @@ config DEBUG_BUGVERBOSE
566 depends on BUG 619 depends on BUG
567 depends on ARM || AVR32 || M32R || M68K || SPARC32 || SPARC64 || \ 620 depends on ARM || AVR32 || M32R || M68K || SPARC32 || SPARC64 || \
568 FRV || SUPERH || GENERIC_BUG || BLACKFIN || MN10300 621 FRV || SUPERH || GENERIC_BUG || BLACKFIN || MN10300
569 default !EMBEDDED 622 default y
570 help 623 help
571 Say Y here to make BUG() panics output the file name and line number 624 Say Y here to make BUG() panics output the file name and line number
572 of the BUG call as well as the EIP and oops trace. This aids 625 of the BUG call as well as the EIP and oops trace. This aids
@@ -585,6 +638,19 @@ config DEBUG_INFO
585 638
586 If unsure, say N. 639 If unsure, say N.
587 640
641config DEBUG_INFO_REDUCED
642 bool "Reduce debugging information"
643 depends on DEBUG_INFO
644 help
645 If you say Y here gcc is instructed to generate less debugging
646 information for structure types. This means that tools that
647 need full debugging information (like kgdb or systemtap) won't
648 be happy. But if you merely need debugging information to
649 resolve line numbers there is no loss. Advantage is that
650 build directory object sizes shrink dramatically over a full
651 DEBUG_INFO build and compile times are reduced too.
652 Only works with newer gcc versions.
653
588config DEBUG_VM 654config DEBUG_VM
589 bool "Debug VM" 655 bool "Debug VM"
590 depends on DEBUG_KERNEL 656 depends on DEBUG_KERNEL
@@ -749,16 +815,28 @@ config RCU_TORTURE_TEST_RUNNABLE
749config RCU_CPU_STALL_DETECTOR 815config RCU_CPU_STALL_DETECTOR
750 bool "Check for stalled CPUs delaying RCU grace periods" 816 bool "Check for stalled CPUs delaying RCU grace periods"
751 depends on TREE_RCU || TREE_PREEMPT_RCU 817 depends on TREE_RCU || TREE_PREEMPT_RCU
752 default n 818 default y
753 help 819 help
754 This option causes RCU to printk information on which 820 This option causes RCU to printk information on which
755 CPUs are delaying the current grace period, but only when 821 CPUs are delaying the current grace period, but only when
756 the grace period extends for excessive time periods. 822 the grace period extends for excessive time periods.
757 823
758 Say Y if you want RCU to perform such checks. 824 Say N if you want to disable such checks.
825
826 Say Y if you are unsure.
827
828config RCU_CPU_STALL_VERBOSE
829 bool "Print additional per-task information for RCU_CPU_STALL_DETECTOR"
830 depends on RCU_CPU_STALL_DETECTOR && TREE_PREEMPT_RCU
831 default y
832 help
833 This option causes RCU to printk detailed per-task information
834 for any tasks that are stalling the current RCU grace period.
759 835
760 Say N if you are unsure. 836 Say N if you are unsure.
761 837
838 Say Y if you want to enable such checks.
839
762config KPROBES_SANITY_TEST 840config KPROBES_SANITY_TEST
763 bool "Kprobes sanity tests" 841 bool "Kprobes sanity tests"
764 depends on DEBUG_KERNEL 842 depends on DEBUG_KERNEL
@@ -830,8 +908,7 @@ config DEBUG_FORCE_WEAK_PER_CPU
830 908
831config LKDTM 909config LKDTM
832 tristate "Linux Kernel Dump Test Tool Module" 910 tristate "Linux Kernel Dump Test Tool Module"
833 depends on DEBUG_KERNEL 911 depends on DEBUG_FS
834 depends on KPROBES
835 depends on BLOCK 912 depends on BLOCK
836 default n 913 default n
837 help 914 help
@@ -842,7 +919,19 @@ config LKDTM
842 called lkdtm. 919 called lkdtm.
843 920
844 Documentation on how to use the module can be found in 921 Documentation on how to use the module can be found in
845 drivers/misc/lkdtm.c 922 Documentation/fault-injection/provoke-crashes.txt
923
924config CPU_NOTIFIER_ERROR_INJECT
925 tristate "CPU notifier error injection module"
926 depends on HOTPLUG_CPU && DEBUG_KERNEL
927 help
928 This option provides a kernel module that can be used to test
929 the error handling of the cpu notifiers
930
931 To compile this code as a module, choose M here: the module will
932 be called cpu-notifier-error-inject.
933
934 If unsure, say N.
846 935
847config FAULT_INJECTION 936config FAULT_INJECTION
848 bool "Fault-injection framework" 937 bool "Fault-injection framework"
@@ -871,7 +960,7 @@ config FAIL_MAKE_REQUEST
871 Provide fault-injection capability for disk IO. 960 Provide fault-injection capability for disk IO.
872 961
873config FAIL_IO_TIMEOUT 962config FAIL_IO_TIMEOUT
874 bool "Faul-injection capability for faking disk interrupts" 963 bool "Fault-injection capability for faking disk interrupts"
875 depends on FAULT_INJECTION && BLOCK 964 depends on FAULT_INJECTION && BLOCK
876 help 965 help
877 Provide fault-injection capability on end IO handling. This 966 Provide fault-injection capability on end IO handling. This
@@ -892,13 +981,13 @@ config FAULT_INJECTION_STACKTRACE_FILTER
892 depends on FAULT_INJECTION_DEBUG_FS && STACKTRACE_SUPPORT 981 depends on FAULT_INJECTION_DEBUG_FS && STACKTRACE_SUPPORT
893 depends on !X86_64 982 depends on !X86_64
894 select STACKTRACE 983 select STACKTRACE
895 select FRAME_POINTER if !PPC && !S390 984 select FRAME_POINTER if !PPC && !S390 && !MICROBLAZE
896 help 985 help
897 Provide stacktrace filter for fault-injection capabilities 986 Provide stacktrace filter for fault-injection capabilities
898 987
899config LATENCYTOP 988config LATENCYTOP
900 bool "Latency measuring infrastructure" 989 bool "Latency measuring infrastructure"
901 select FRAME_POINTER if !MIPS && !PPC && !S390 990 select FRAME_POINTER if !MIPS && !PPC && !S390 && !MICROBLAZE
902 select KALLSYMS 991 select KALLSYMS
903 select KALLSYMS_ALL 992 select KALLSYMS_ALL
904 select STACKTRACE 993 select STACKTRACE
@@ -911,7 +1000,7 @@ config LATENCYTOP
911 1000
912config SYSCTL_SYSCALL_CHECK 1001config SYSCTL_SYSCALL_CHECK
913 bool "Sysctl checks" 1002 bool "Sysctl checks"
914 depends on SYSCTL_SYSCALL 1003 depends on SYSCTL
915 ---help--- 1004 ---help---
916 sys_sysctl uses binary paths that have been found challenging 1005 sys_sysctl uses binary paths that have been found challenging
917 to properly maintain and use. This enables checks that help 1006 to properly maintain and use. This enables checks that help
@@ -985,10 +1074,10 @@ config DYNAMIC_DEBUG
985 1074
986 Usage: 1075 Usage:
987 1076
988 Dynamic debugging is controlled via the 'dynamic_debug/ddebug' file, 1077 Dynamic debugging is controlled via the 'dynamic_debug/control' file,
989 which is contained in the 'debugfs' filesystem. Thus, the debugfs 1078 which is contained in the 'debugfs' filesystem. Thus, the debugfs
990 filesystem must first be mounted before making use of this feature. 1079 filesystem must first be mounted before making use of this feature.
991 We refer the control file as: <debugfs>/dynamic_debug/ddebug. This 1080 We refer the control file as: <debugfs>/dynamic_debug/control. This
992 file contains a list of the debug statements that can be enabled. The 1081 file contains a list of the debug statements that can be enabled. The
993 format for each line of the file is: 1082 format for each line of the file is:
994 1083
@@ -1003,7 +1092,7 @@ config DYNAMIC_DEBUG
1003 1092
1004 From a live system: 1093 From a live system:
1005 1094
1006 nullarbor:~ # cat <debugfs>/dynamic_debug/ddebug 1095 nullarbor:~ # cat <debugfs>/dynamic_debug/control
1007 # filename:lineno [module]function flags format 1096 # filename:lineno [module]function flags format
1008 fs/aio.c:222 [aio]__put_ioctx - "__put_ioctx:\040freeing\040%p\012" 1097 fs/aio.c:222 [aio]__put_ioctx - "__put_ioctx:\040freeing\040%p\012"
1009 fs/aio.c:248 [aio]ioctx_alloc - "ENOMEM:\040nr_events\040too\040high\012" 1098 fs/aio.c:248 [aio]ioctx_alloc - "ENOMEM:\040nr_events\040too\040high\012"
@@ -1013,23 +1102,23 @@ config DYNAMIC_DEBUG
1013 1102
1014 // enable the message at line 1603 of file svcsock.c 1103 // enable the message at line 1603 of file svcsock.c
1015 nullarbor:~ # echo -n 'file svcsock.c line 1603 +p' > 1104 nullarbor:~ # echo -n 'file svcsock.c line 1603 +p' >
1016 <debugfs>/dynamic_debug/ddebug 1105 <debugfs>/dynamic_debug/control
1017 1106
1018 // enable all the messages in file svcsock.c 1107 // enable all the messages in file svcsock.c
1019 nullarbor:~ # echo -n 'file svcsock.c +p' > 1108 nullarbor:~ # echo -n 'file svcsock.c +p' >
1020 <debugfs>/dynamic_debug/ddebug 1109 <debugfs>/dynamic_debug/control
1021 1110
1022 // enable all the messages in the NFS server module 1111 // enable all the messages in the NFS server module
1023 nullarbor:~ # echo -n 'module nfsd +p' > 1112 nullarbor:~ # echo -n 'module nfsd +p' >
1024 <debugfs>/dynamic_debug/ddebug 1113 <debugfs>/dynamic_debug/control
1025 1114
1026 // enable all 12 messages in the function svc_process() 1115 // enable all 12 messages in the function svc_process()
1027 nullarbor:~ # echo -n 'func svc_process +p' > 1116 nullarbor:~ # echo -n 'func svc_process +p' >
1028 <debugfs>/dynamic_debug/ddebug 1117 <debugfs>/dynamic_debug/control
1029 1118
1030 // disable all 12 messages in the function svc_process() 1119 // disable all 12 messages in the function svc_process()
1031 nullarbor:~ # echo -n 'func svc_process -p' > 1120 nullarbor:~ # echo -n 'func svc_process -p' >
1032 <debugfs>/dynamic_debug/ddebug 1121 <debugfs>/dynamic_debug/control
1033 1122
1034 See Documentation/dynamic-debug-howto.txt for additional information. 1123 See Documentation/dynamic-debug-howto.txt for additional information.
1035 1124
@@ -1044,6 +1133,13 @@ config DMA_API_DEBUG
1044 This option causes a performance degredation. Use only if you want 1133 This option causes a performance degredation. Use only if you want
1045 to debug device drivers. If unsure, say N. 1134 to debug device drivers. If unsure, say N.
1046 1135
1136config ATOMIC64_SELFTEST
1137 bool "Perform an atomic64_t self-test at boot"
1138 help
1139 Enable this option to test the atomic64_t functions at boot.
1140
1141 If unsure, say N.
1142
1047source "samples/Kconfig" 1143source "samples/Kconfig"
1048 1144
1049source "lib/Kconfig.kgdb" 1145source "lib/Kconfig.kgdb"
diff --git a/lib/Kconfig.kgdb b/lib/Kconfig.kgdb
index 9b5d1d7f2ef7..43cb93fa2651 100644
--- a/lib/Kconfig.kgdb
+++ b/lib/Kconfig.kgdb
@@ -3,7 +3,7 @@ config HAVE_ARCH_KGDB
3 bool 3 bool
4 4
5menuconfig KGDB 5menuconfig KGDB
6 bool "KGDB: kernel debugging with remote gdb" 6 bool "KGDB: kernel debugger"
7 depends on HAVE_ARCH_KGDB 7 depends on HAVE_ARCH_KGDB
8 depends on DEBUG_KERNEL && EXPERIMENTAL 8 depends on DEBUG_KERNEL && EXPERIMENTAL
9 help 9 help
@@ -57,4 +57,26 @@ config KGDB_TESTS_BOOT_STRING
57 information about other strings you could use beyond the 57 information about other strings you could use beyond the
58 default of V1F100. 58 default of V1F100.
59 59
60config KGDB_LOW_LEVEL_TRAP
61 bool "KGDB: Allow debugging with traps in notifiers"
62 depends on X86 || MIPS
63 default n
64 help
65 This will add an extra call back to kgdb for the breakpoint
66 exception handler on which will will allow kgdb to step
67 through a notify handler.
68
69config KGDB_KDB
70 bool "KGDB_KDB: include kdb frontend for kgdb"
71 default n
72 help
73 KDB frontend for kernel
74
75config KDB_KEYBOARD
76 bool "KGDB_KDB: keyboard as input device"
77 depends on VT && KGDB_KDB
78 default n
79 help
80 KDB can use a PS/2 type keyboard for an input device
81
60endif # KGDB 82endif # KGDB
diff --git a/lib/Makefile b/lib/Makefile
index b25faf0d1d5c..e6a3763b8212 100644
--- a/lib/Makefile
+++ b/lib/Makefile
@@ -21,7 +21,7 @@ lib-y += kobject.o kref.o klist.o
21 21
22obj-y += bcd.o div64.o sort.o parser.o halfmd4.o debug_locks.o random32.o \ 22obj-y += bcd.o div64.o sort.o parser.o halfmd4.o debug_locks.o random32.o \
23 bust_spinlocks.o hexdump.o kasprintf.o bitmap.o scatterlist.o \ 23 bust_spinlocks.o hexdump.o kasprintf.o bitmap.o scatterlist.o \
24 string_helpers.o gcd.o 24 string_helpers.o gcd.o lcm.o list_sort.o uuid.o
25 25
26ifeq ($(CONFIG_DEBUG_KOBJECT),y) 26ifeq ($(CONFIG_DEBUG_KOBJECT),y)
27CFLAGS_kobject.o += -DDEBUG 27CFLAGS_kobject.o += -DDEBUG
@@ -39,8 +39,12 @@ lib-$(CONFIG_RWSEM_XCHGADD_ALGORITHM) += rwsem.o
39lib-$(CONFIG_GENERIC_FIND_FIRST_BIT) += find_next_bit.o 39lib-$(CONFIG_GENERIC_FIND_FIRST_BIT) += find_next_bit.o
40lib-$(CONFIG_GENERIC_FIND_NEXT_BIT) += find_next_bit.o 40lib-$(CONFIG_GENERIC_FIND_NEXT_BIT) += find_next_bit.o
41obj-$(CONFIG_GENERIC_FIND_LAST_BIT) += find_last_bit.o 41obj-$(CONFIG_GENERIC_FIND_LAST_BIT) += find_last_bit.o
42
43CFLAGS_hweight.o = $(subst $(quote),,$(CONFIG_ARCH_HWEIGHT_CFLAGS))
42obj-$(CONFIG_GENERIC_HWEIGHT) += hweight.o 44obj-$(CONFIG_GENERIC_HWEIGHT) += hweight.o
45
43obj-$(CONFIG_LOCK_KERNEL) += kernel_lock.o 46obj-$(CONFIG_LOCK_KERNEL) += kernel_lock.o
47obj-$(CONFIG_BTREE) += btree.o
44obj-$(CONFIG_DEBUG_PREEMPT) += smp_processor_id.o 48obj-$(CONFIG_DEBUG_PREEMPT) += smp_processor_id.o
45obj-$(CONFIG_DEBUG_LIST) += list_debug.o 49obj-$(CONFIG_DEBUG_LIST) += list_debug.o
46obj-$(CONFIG_DEBUG_OBJECTS) += debugobjects.o 50obj-$(CONFIG_DEBUG_OBJECTS) += debugobjects.o
@@ -70,6 +74,7 @@ obj-$(CONFIG_RAID6_PQ) += raid6/
70lib-$(CONFIG_DECOMPRESS_GZIP) += decompress_inflate.o 74lib-$(CONFIG_DECOMPRESS_GZIP) += decompress_inflate.o
71lib-$(CONFIG_DECOMPRESS_BZIP2) += decompress_bunzip2.o 75lib-$(CONFIG_DECOMPRESS_BZIP2) += decompress_bunzip2.o
72lib-$(CONFIG_DECOMPRESS_LZMA) += decompress_unlzma.o 76lib-$(CONFIG_DECOMPRESS_LZMA) += decompress_unlzma.o
77lib-$(CONFIG_DECOMPRESS_LZO) += decompress_unlzo.o
73 78
74obj-$(CONFIG_TEXTSEARCH) += textsearch.o 79obj-$(CONFIG_TEXTSEARCH) += textsearch.o
75obj-$(CONFIG_TEXTSEARCH_KMP) += ts_kmp.o 80obj-$(CONFIG_TEXTSEARCH_KMP) += ts_kmp.o
@@ -81,23 +86,26 @@ obj-$(CONFIG_AUDIT_GENERIC) += audit.o
81obj-$(CONFIG_SWIOTLB) += swiotlb.o 86obj-$(CONFIG_SWIOTLB) += swiotlb.o
82obj-$(CONFIG_IOMMU_HELPER) += iommu-helper.o 87obj-$(CONFIG_IOMMU_HELPER) += iommu-helper.o
83obj-$(CONFIG_FAULT_INJECTION) += fault-inject.o 88obj-$(CONFIG_FAULT_INJECTION) += fault-inject.o
89obj-$(CONFIG_CPU_NOTIFIER_ERROR_INJECT) += cpu-notifier-error-inject.o
84 90
85lib-$(CONFIG_GENERIC_BUG) += bug.o 91lib-$(CONFIG_GENERIC_BUG) += bug.o
86 92
87obj-$(CONFIG_HAVE_LMB) += lmb.o
88
89obj-$(CONFIG_HAVE_ARCH_TRACEHOOK) += syscall.o 93obj-$(CONFIG_HAVE_ARCH_TRACEHOOK) += syscall.o
90 94
91obj-$(CONFIG_DYNAMIC_DEBUG) += dynamic_debug.o 95obj-$(CONFIG_DYNAMIC_DEBUG) += dynamic_debug.o
92 96
93obj-$(CONFIG_NLATTR) += nlattr.o 97obj-$(CONFIG_NLATTR) += nlattr.o
94 98
99obj-$(CONFIG_LRU_CACHE) += lru_cache.o
100
95obj-$(CONFIG_DMA_API_DEBUG) += dma-debug.o 101obj-$(CONFIG_DMA_API_DEBUG) += dma-debug.o
96 102
97obj-$(CONFIG_GENERIC_CSUM) += checksum.o 103obj-$(CONFIG_GENERIC_CSUM) += checksum.o
98 104
99obj-$(CONFIG_GENERIC_ATOMIC64) += atomic64.o 105obj-$(CONFIG_GENERIC_ATOMIC64) += atomic64.o
100 106
107obj-$(CONFIG_ATOMIC64_SELFTEST) += atomic64_test.o
108
101hostprogs-y := gen_crc32table 109hostprogs-y := gen_crc32table
102clean-files := crc32table.h 110clean-files := crc32table.h
103 111
diff --git a/lib/argv_split.c b/lib/argv_split.c
index 5205a8dae5bc..4b1b083f219c 100644
--- a/lib/argv_split.c
+++ b/lib/argv_split.c
@@ -4,17 +4,10 @@
4 4
5#include <linux/kernel.h> 5#include <linux/kernel.h>
6#include <linux/ctype.h> 6#include <linux/ctype.h>
7#include <linux/string.h>
7#include <linux/slab.h> 8#include <linux/slab.h>
8#include <linux/module.h> 9#include <linux/module.h>
9 10
10static const char *skip_sep(const char *cp)
11{
12 while (*cp && isspace(*cp))
13 cp++;
14
15 return cp;
16}
17
18static const char *skip_arg(const char *cp) 11static const char *skip_arg(const char *cp)
19{ 12{
20 while (*cp && !isspace(*cp)) 13 while (*cp && !isspace(*cp))
@@ -28,7 +21,7 @@ static int count_argc(const char *str)
28 int count = 0; 21 int count = 0;
29 22
30 while (*str) { 23 while (*str) {
31 str = skip_sep(str); 24 str = skip_spaces(str);
32 if (*str) { 25 if (*str) {
33 count++; 26 count++;
34 str = skip_arg(str); 27 str = skip_arg(str);
@@ -82,7 +75,7 @@ char **argv_split(gfp_t gfp, const char *str, int *argcp)
82 argvp = argv; 75 argvp = argv;
83 76
84 while (*str) { 77 while (*str) {
85 str = skip_sep(str); 78 str = skip_spaces(str);
86 79
87 if (*str) { 80 if (*str) {
88 const char *p = str; 81 const char *p = str;
diff --git a/lib/atomic64.c b/lib/atomic64.c
index 8bee16ec7524..a21c12bc727c 100644
--- a/lib/atomic64.c
+++ b/lib/atomic64.c
@@ -162,12 +162,12 @@ int atomic64_add_unless(atomic64_t *v, long long a, long long u)
162{ 162{
163 unsigned long flags; 163 unsigned long flags;
164 spinlock_t *lock = lock_addr(v); 164 spinlock_t *lock = lock_addr(v);
165 int ret = 1; 165 int ret = 0;
166 166
167 spin_lock_irqsave(lock, flags); 167 spin_lock_irqsave(lock, flags);
168 if (v->counter != u) { 168 if (v->counter != u) {
169 v->counter += a; 169 v->counter += a;
170 ret = 0; 170 ret = 1;
171 } 171 }
172 spin_unlock_irqrestore(lock, flags); 172 spin_unlock_irqrestore(lock, flags);
173 return ret; 173 return ret;
diff --git a/lib/atomic64_test.c b/lib/atomic64_test.c
new file mode 100644
index 000000000000..44524cc8c32a
--- /dev/null
+++ b/lib/atomic64_test.c
@@ -0,0 +1,166 @@
1/*
2 * Testsuite for atomic64_t functions
3 *
4 * Copyright © 2010 Luca Barbieri
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 */
11#include <linux/init.h>
12#include <linux/kernel.h>
13#include <asm/atomic.h>
14
15#define INIT(c) do { atomic64_set(&v, c); r = c; } while (0)
16static __init int test_atomic64(void)
17{
18 long long v0 = 0xaaa31337c001d00dLL;
19 long long v1 = 0xdeadbeefdeafcafeLL;
20 long long v2 = 0xfaceabadf00df001LL;
21 long long onestwos = 0x1111111122222222LL;
22 long long one = 1LL;
23
24 atomic64_t v = ATOMIC64_INIT(v0);
25 long long r = v0;
26 BUG_ON(v.counter != r);
27
28 atomic64_set(&v, v1);
29 r = v1;
30 BUG_ON(v.counter != r);
31 BUG_ON(atomic64_read(&v) != r);
32
33 INIT(v0);
34 atomic64_add(onestwos, &v);
35 r += onestwos;
36 BUG_ON(v.counter != r);
37
38 INIT(v0);
39 atomic64_add(-one, &v);
40 r += -one;
41 BUG_ON(v.counter != r);
42
43 INIT(v0);
44 r += onestwos;
45 BUG_ON(atomic64_add_return(onestwos, &v) != r);
46 BUG_ON(v.counter != r);
47
48 INIT(v0);
49 r += -one;
50 BUG_ON(atomic64_add_return(-one, &v) != r);
51 BUG_ON(v.counter != r);
52
53 INIT(v0);
54 atomic64_sub(onestwos, &v);
55 r -= onestwos;
56 BUG_ON(v.counter != r);
57
58 INIT(v0);
59 atomic64_sub(-one, &v);
60 r -= -one;
61 BUG_ON(v.counter != r);
62
63 INIT(v0);
64 r -= onestwos;
65 BUG_ON(atomic64_sub_return(onestwos, &v) != r);
66 BUG_ON(v.counter != r);
67
68 INIT(v0);
69 r -= -one;
70 BUG_ON(atomic64_sub_return(-one, &v) != r);
71 BUG_ON(v.counter != r);
72
73 INIT(v0);
74 atomic64_inc(&v);
75 r += one;
76 BUG_ON(v.counter != r);
77
78 INIT(v0);
79 r += one;
80 BUG_ON(atomic64_inc_return(&v) != r);
81 BUG_ON(v.counter != r);
82
83 INIT(v0);
84 atomic64_dec(&v);
85 r -= one;
86 BUG_ON(v.counter != r);
87
88 INIT(v0);
89 r -= one;
90 BUG_ON(atomic64_dec_return(&v) != r);
91 BUG_ON(v.counter != r);
92
93 INIT(v0);
94 BUG_ON(atomic64_xchg(&v, v1) != v0);
95 r = v1;
96 BUG_ON(v.counter != r);
97
98 INIT(v0);
99 BUG_ON(atomic64_cmpxchg(&v, v0, v1) != v0);
100 r = v1;
101 BUG_ON(v.counter != r);
102
103 INIT(v0);
104 BUG_ON(atomic64_cmpxchg(&v, v2, v1) != v0);
105 BUG_ON(v.counter != r);
106
107 INIT(v0);
108 BUG_ON(atomic64_add_unless(&v, one, v0));
109 BUG_ON(v.counter != r);
110
111 INIT(v0);
112 BUG_ON(!atomic64_add_unless(&v, one, v1));
113 r += one;
114 BUG_ON(v.counter != r);
115
116#if defined(CONFIG_X86) || defined(CONFIG_MIPS) || defined(CONFIG_PPC) || \
117 defined(CONFIG_S390) || defined(_ASM_GENERIC_ATOMIC64_H) || defined(CONFIG_ARM)
118 INIT(onestwos);
119 BUG_ON(atomic64_dec_if_positive(&v) != (onestwos - 1));
120 r -= one;
121 BUG_ON(v.counter != r);
122
123 INIT(0);
124 BUG_ON(atomic64_dec_if_positive(&v) != -one);
125 BUG_ON(v.counter != r);
126
127 INIT(-one);
128 BUG_ON(atomic64_dec_if_positive(&v) != (-one - one));
129 BUG_ON(v.counter != r);
130#else
131#warning Please implement atomic64_dec_if_positive for your architecture, and add it to the IF above
132#endif
133
134 INIT(onestwos);
135 BUG_ON(!atomic64_inc_not_zero(&v));
136 r += one;
137 BUG_ON(v.counter != r);
138
139 INIT(0);
140 BUG_ON(atomic64_inc_not_zero(&v));
141 BUG_ON(v.counter != r);
142
143 INIT(-one);
144 BUG_ON(!atomic64_inc_not_zero(&v));
145 r += one;
146 BUG_ON(v.counter != r);
147
148#ifdef CONFIG_X86
149 printk(KERN_INFO "atomic64 test passed for %s platform %s CX8 and %s SSE\n",
150#ifdef CONFIG_X86_64
151 "x86-64",
152#elif defined(CONFIG_X86_CMPXCHG64)
153 "i586+",
154#else
155 "i386+",
156#endif
157 boot_cpu_has(X86_FEATURE_CX8) ? "with" : "without",
158 boot_cpu_has(X86_FEATURE_XMM) ? "with" : "without");
159#else
160 printk(KERN_INFO "atomic64 test passed\n");
161#endif
162
163 return 0;
164}
165
166core_initcall(test_atomic64);
diff --git a/lib/bitmap.c b/lib/bitmap.c
index 702565821c99..ffb78c916ccd 100644
--- a/lib/bitmap.c
+++ b/lib/bitmap.c
@@ -271,6 +271,87 @@ int __bitmap_weight(const unsigned long *bitmap, int bits)
271} 271}
272EXPORT_SYMBOL(__bitmap_weight); 272EXPORT_SYMBOL(__bitmap_weight);
273 273
274#define BITMAP_FIRST_WORD_MASK(start) (~0UL << ((start) % BITS_PER_LONG))
275
276void bitmap_set(unsigned long *map, int start, int nr)
277{
278 unsigned long *p = map + BIT_WORD(start);
279 const int size = start + nr;
280 int bits_to_set = BITS_PER_LONG - (start % BITS_PER_LONG);
281 unsigned long mask_to_set = BITMAP_FIRST_WORD_MASK(start);
282
283 while (nr - bits_to_set >= 0) {
284 *p |= mask_to_set;
285 nr -= bits_to_set;
286 bits_to_set = BITS_PER_LONG;
287 mask_to_set = ~0UL;
288 p++;
289 }
290 if (nr) {
291 mask_to_set &= BITMAP_LAST_WORD_MASK(size);
292 *p |= mask_to_set;
293 }
294}
295EXPORT_SYMBOL(bitmap_set);
296
297void bitmap_clear(unsigned long *map, int start, int nr)
298{
299 unsigned long *p = map + BIT_WORD(start);
300 const int size = start + nr;
301 int bits_to_clear = BITS_PER_LONG - (start % BITS_PER_LONG);
302 unsigned long mask_to_clear = BITMAP_FIRST_WORD_MASK(start);
303
304 while (nr - bits_to_clear >= 0) {
305 *p &= ~mask_to_clear;
306 nr -= bits_to_clear;
307 bits_to_clear = BITS_PER_LONG;
308 mask_to_clear = ~0UL;
309 p++;
310 }
311 if (nr) {
312 mask_to_clear &= BITMAP_LAST_WORD_MASK(size);
313 *p &= ~mask_to_clear;
314 }
315}
316EXPORT_SYMBOL(bitmap_clear);
317
318/*
319 * bitmap_find_next_zero_area - find a contiguous aligned zero area
320 * @map: The address to base the search on
321 * @size: The bitmap size in bits
322 * @start: The bitnumber to start searching at
323 * @nr: The number of zeroed bits we're looking for
324 * @align_mask: Alignment mask for zero area
325 *
326 * The @align_mask should be one less than a power of 2; the effect is that
327 * the bit offset of all zero areas this function finds is multiples of that
328 * power of 2. A @align_mask of 0 means no alignment is required.
329 */
330unsigned long bitmap_find_next_zero_area(unsigned long *map,
331 unsigned long size,
332 unsigned long start,
333 unsigned int nr,
334 unsigned long align_mask)
335{
336 unsigned long index, end, i;
337again:
338 index = find_next_zero_bit(map, size, start);
339
340 /* Align allocation */
341 index = __ALIGN_MASK(index, align_mask);
342
343 end = index + nr;
344 if (end > size)
345 return end;
346 i = find_next_bit(map, end, index);
347 if (i < end) {
348 start = i + 1;
349 goto again;
350 }
351 return index;
352}
353EXPORT_SYMBOL(bitmap_find_next_zero_area);
354
274/* 355/*
275 * Bitmap printing & parsing functions: first version by Bill Irwin, 356 * Bitmap printing & parsing functions: first version by Bill Irwin,
276 * second version by Paul Jackson, third by Joe Korty. 357 * second version by Paul Jackson, third by Joe Korty.
@@ -406,7 +487,7 @@ int __bitmap_parse(const char *buf, unsigned int buflen,
406EXPORT_SYMBOL(__bitmap_parse); 487EXPORT_SYMBOL(__bitmap_parse);
407 488
408/** 489/**
409 * bitmap_parse_user() 490 * bitmap_parse_user - convert an ASCII hex string in a user buffer into a bitmap
410 * 491 *
411 * @ubuf: pointer to user buffer containing string. 492 * @ubuf: pointer to user buffer containing string.
412 * @ulen: buffer size in bytes. If string is smaller than this 493 * @ulen: buffer size in bytes. If string is smaller than this
@@ -538,7 +619,7 @@ int bitmap_parselist(const char *bp, unsigned long *maskp, int nmaskbits)
538EXPORT_SYMBOL(bitmap_parselist); 619EXPORT_SYMBOL(bitmap_parselist);
539 620
540/** 621/**
541 * bitmap_pos_to_ord(buf, pos, bits) 622 * bitmap_pos_to_ord - find ordinal of set bit at given position in bitmap
542 * @buf: pointer to a bitmap 623 * @buf: pointer to a bitmap
543 * @pos: a bit position in @buf (0 <= @pos < @bits) 624 * @pos: a bit position in @buf (0 <= @pos < @bits)
544 * @bits: number of valid bit positions in @buf 625 * @bits: number of valid bit positions in @buf
@@ -574,7 +655,7 @@ static int bitmap_pos_to_ord(const unsigned long *buf, int pos, int bits)
574} 655}
575 656
576/** 657/**
577 * bitmap_ord_to_pos(buf, ord, bits) 658 * bitmap_ord_to_pos - find position of n-th set bit in bitmap
578 * @buf: pointer to bitmap 659 * @buf: pointer to bitmap
579 * @ord: ordinal bit position (n-th set bit, n >= 0) 660 * @ord: ordinal bit position (n-th set bit, n >= 0)
580 * @bits: number of valid bit positions in @buf 661 * @bits: number of valid bit positions in @buf
@@ -652,10 +733,9 @@ void bitmap_remap(unsigned long *dst, const unsigned long *src,
652 bitmap_zero(dst, bits); 733 bitmap_zero(dst, bits);
653 734
654 w = bitmap_weight(new, bits); 735 w = bitmap_weight(new, bits);
655 for (oldbit = find_first_bit(src, bits); 736 for_each_set_bit(oldbit, src, bits) {
656 oldbit < bits;
657 oldbit = find_next_bit(src, bits, oldbit + 1)) {
658 int n = bitmap_pos_to_ord(old, oldbit, bits); 737 int n = bitmap_pos_to_ord(old, oldbit, bits);
738
659 if (n < 0 || w == 0) 739 if (n < 0 || w == 0)
660 set_bit(oldbit, dst); /* identity map */ 740 set_bit(oldbit, dst); /* identity map */
661 else 741 else
@@ -822,9 +902,7 @@ void bitmap_onto(unsigned long *dst, const unsigned long *orig,
822 */ 902 */
823 903
824 m = 0; 904 m = 0;
825 for (n = find_first_bit(relmap, bits); 905 for_each_set_bit(n, relmap, bits) {
826 n < bits;
827 n = find_next_bit(relmap, bits, n + 1)) {
828 /* m == bitmap_pos_to_ord(relmap, n, bits) */ 906 /* m == bitmap_pos_to_ord(relmap, n, bits) */
829 if (test_bit(m, orig)) 907 if (test_bit(m, orig))
830 set_bit(n, dst); 908 set_bit(n, dst);
@@ -853,9 +931,7 @@ void bitmap_fold(unsigned long *dst, const unsigned long *orig,
853 return; 931 return;
854 bitmap_zero(dst, bits); 932 bitmap_zero(dst, bits);
855 933
856 for (oldbit = find_first_bit(orig, bits); 934 for_each_set_bit(oldbit, orig, bits)
857 oldbit < bits;
858 oldbit = find_next_bit(orig, bits, oldbit + 1))
859 set_bit(oldbit % sz, dst); 935 set_bit(oldbit % sz, dst);
860} 936}
861EXPORT_SYMBOL(bitmap_fold); 937EXPORT_SYMBOL(bitmap_fold);
diff --git a/lib/btree.c b/lib/btree.c
new file mode 100644
index 000000000000..c9c6f0351526
--- /dev/null
+++ b/lib/btree.c
@@ -0,0 +1,798 @@
1/*
2 * lib/btree.c - Simple In-memory B+Tree
3 *
4 * As should be obvious for Linux kernel code, license is GPLv2
5 *
6 * Copyright (c) 2007-2008 Joern Engel <joern@logfs.org>
7 * Bits and pieces stolen from Peter Zijlstra's code, which is
8 * Copyright 2007, Red Hat Inc. Peter Zijlstra <pzijlstr@redhat.com>
9 * GPLv2
10 *
11 * see http://programming.kicks-ass.net/kernel-patches/vma_lookup/btree.patch
12 *
13 * A relatively simple B+Tree implementation. I have written it as a learning
14 * excercise to understand how B+Trees work. Turned out to be useful as well.
15 *
16 * B+Trees can be used similar to Linux radix trees (which don't have anything
17 * in common with textbook radix trees, beware). Prerequisite for them working
18 * well is that access to a random tree node is much faster than a large number
19 * of operations within each node.
20 *
21 * Disks have fulfilled the prerequisite for a long time. More recently DRAM
22 * has gained similar properties, as memory access times, when measured in cpu
23 * cycles, have increased. Cacheline sizes have increased as well, which also
24 * helps B+Trees.
25 *
26 * Compared to radix trees, B+Trees are more efficient when dealing with a
27 * sparsely populated address space. Between 25% and 50% of the memory is
28 * occupied with valid pointers. When densely populated, radix trees contain
29 * ~98% pointers - hard to beat. Very sparse radix trees contain only ~2%
30 * pointers.
31 *
32 * This particular implementation stores pointers identified by a long value.
33 * Storing NULL pointers is illegal, lookup will return NULL when no entry
34 * was found.
35 *
36 * A tricks was used that is not commonly found in textbooks. The lowest
37 * values are to the right, not to the left. All used slots within a node
38 * are on the left, all unused slots contain NUL values. Most operations
39 * simply loop once over all slots and terminate on the first NUL.
40 */
41
42#include <linux/btree.h>
43#include <linux/cache.h>
44#include <linux/kernel.h>
45#include <linux/slab.h>
46#include <linux/module.h>
47
48#define MAX(a, b) ((a) > (b) ? (a) : (b))
49#define NODESIZE MAX(L1_CACHE_BYTES, 128)
50
51struct btree_geo {
52 int keylen;
53 int no_pairs;
54 int no_longs;
55};
56
57struct btree_geo btree_geo32 = {
58 .keylen = 1,
59 .no_pairs = NODESIZE / sizeof(long) / 2,
60 .no_longs = NODESIZE / sizeof(long) / 2,
61};
62EXPORT_SYMBOL_GPL(btree_geo32);
63
64#define LONG_PER_U64 (64 / BITS_PER_LONG)
65struct btree_geo btree_geo64 = {
66 .keylen = LONG_PER_U64,
67 .no_pairs = NODESIZE / sizeof(long) / (1 + LONG_PER_U64),
68 .no_longs = LONG_PER_U64 * (NODESIZE / sizeof(long) / (1 + LONG_PER_U64)),
69};
70EXPORT_SYMBOL_GPL(btree_geo64);
71
72struct btree_geo btree_geo128 = {
73 .keylen = 2 * LONG_PER_U64,
74 .no_pairs = NODESIZE / sizeof(long) / (1 + 2 * LONG_PER_U64),
75 .no_longs = 2 * LONG_PER_U64 * (NODESIZE / sizeof(long) / (1 + 2 * LONG_PER_U64)),
76};
77EXPORT_SYMBOL_GPL(btree_geo128);
78
79static struct kmem_cache *btree_cachep;
80
81void *btree_alloc(gfp_t gfp_mask, void *pool_data)
82{
83 return kmem_cache_alloc(btree_cachep, gfp_mask);
84}
85EXPORT_SYMBOL_GPL(btree_alloc);
86
87void btree_free(void *element, void *pool_data)
88{
89 kmem_cache_free(btree_cachep, element);
90}
91EXPORT_SYMBOL_GPL(btree_free);
92
93static unsigned long *btree_node_alloc(struct btree_head *head, gfp_t gfp)
94{
95 unsigned long *node;
96
97 node = mempool_alloc(head->mempool, gfp);
98 if (likely(node))
99 memset(node, 0, NODESIZE);
100 return node;
101}
102
103static int longcmp(const unsigned long *l1, const unsigned long *l2, size_t n)
104{
105 size_t i;
106
107 for (i = 0; i < n; i++) {
108 if (l1[i] < l2[i])
109 return -1;
110 if (l1[i] > l2[i])
111 return 1;
112 }
113 return 0;
114}
115
116static unsigned long *longcpy(unsigned long *dest, const unsigned long *src,
117 size_t n)
118{
119 size_t i;
120
121 for (i = 0; i < n; i++)
122 dest[i] = src[i];
123 return dest;
124}
125
126static unsigned long *longset(unsigned long *s, unsigned long c, size_t n)
127{
128 size_t i;
129
130 for (i = 0; i < n; i++)
131 s[i] = c;
132 return s;
133}
134
135static void dec_key(struct btree_geo *geo, unsigned long *key)
136{
137 unsigned long val;
138 int i;
139
140 for (i = geo->keylen - 1; i >= 0; i--) {
141 val = key[i];
142 key[i] = val - 1;
143 if (val)
144 break;
145 }
146}
147
148static unsigned long *bkey(struct btree_geo *geo, unsigned long *node, int n)
149{
150 return &node[n * geo->keylen];
151}
152
153static void *bval(struct btree_geo *geo, unsigned long *node, int n)
154{
155 return (void *)node[geo->no_longs + n];
156}
157
158static void setkey(struct btree_geo *geo, unsigned long *node, int n,
159 unsigned long *key)
160{
161 longcpy(bkey(geo, node, n), key, geo->keylen);
162}
163
164static void setval(struct btree_geo *geo, unsigned long *node, int n,
165 void *val)
166{
167 node[geo->no_longs + n] = (unsigned long) val;
168}
169
170static void clearpair(struct btree_geo *geo, unsigned long *node, int n)
171{
172 longset(bkey(geo, node, n), 0, geo->keylen);
173 node[geo->no_longs + n] = 0;
174}
175
176static inline void __btree_init(struct btree_head *head)
177{
178 head->node = NULL;
179 head->height = 0;
180}
181
182void btree_init_mempool(struct btree_head *head, mempool_t *mempool)
183{
184 __btree_init(head);
185 head->mempool = mempool;
186}
187EXPORT_SYMBOL_GPL(btree_init_mempool);
188
189int btree_init(struct btree_head *head)
190{
191 __btree_init(head);
192 head->mempool = mempool_create(0, btree_alloc, btree_free, NULL);
193 if (!head->mempool)
194 return -ENOMEM;
195 return 0;
196}
197EXPORT_SYMBOL_GPL(btree_init);
198
199void btree_destroy(struct btree_head *head)
200{
201 mempool_destroy(head->mempool);
202 head->mempool = NULL;
203}
204EXPORT_SYMBOL_GPL(btree_destroy);
205
206void *btree_last(struct btree_head *head, struct btree_geo *geo,
207 unsigned long *key)
208{
209 int height = head->height;
210 unsigned long *node = head->node;
211
212 if (height == 0)
213 return NULL;
214
215 for ( ; height > 1; height--)
216 node = bval(geo, node, 0);
217
218 longcpy(key, bkey(geo, node, 0), geo->keylen);
219 return bval(geo, node, 0);
220}
221EXPORT_SYMBOL_GPL(btree_last);
222
223static int keycmp(struct btree_geo *geo, unsigned long *node, int pos,
224 unsigned long *key)
225{
226 return longcmp(bkey(geo, node, pos), key, geo->keylen);
227}
228
229static int keyzero(struct btree_geo *geo, unsigned long *key)
230{
231 int i;
232
233 for (i = 0; i < geo->keylen; i++)
234 if (key[i])
235 return 0;
236
237 return 1;
238}
239
240void *btree_lookup(struct btree_head *head, struct btree_geo *geo,
241 unsigned long *key)
242{
243 int i, height = head->height;
244 unsigned long *node = head->node;
245
246 if (height == 0)
247 return NULL;
248
249 for ( ; height > 1; height--) {
250 for (i = 0; i < geo->no_pairs; i++)
251 if (keycmp(geo, node, i, key) <= 0)
252 break;
253 if (i == geo->no_pairs)
254 return NULL;
255 node = bval(geo, node, i);
256 if (!node)
257 return NULL;
258 }
259
260 if (!node)
261 return NULL;
262
263 for (i = 0; i < geo->no_pairs; i++)
264 if (keycmp(geo, node, i, key) == 0)
265 return bval(geo, node, i);
266 return NULL;
267}
268EXPORT_SYMBOL_GPL(btree_lookup);
269
270int btree_update(struct btree_head *head, struct btree_geo *geo,
271 unsigned long *key, void *val)
272{
273 int i, height = head->height;
274 unsigned long *node = head->node;
275
276 if (height == 0)
277 return -ENOENT;
278
279 for ( ; height > 1; height--) {
280 for (i = 0; i < geo->no_pairs; i++)
281 if (keycmp(geo, node, i, key) <= 0)
282 break;
283 if (i == geo->no_pairs)
284 return -ENOENT;
285 node = bval(geo, node, i);
286 if (!node)
287 return -ENOENT;
288 }
289
290 if (!node)
291 return -ENOENT;
292
293 for (i = 0; i < geo->no_pairs; i++)
294 if (keycmp(geo, node, i, key) == 0) {
295 setval(geo, node, i, val);
296 return 0;
297 }
298 return -ENOENT;
299}
300EXPORT_SYMBOL_GPL(btree_update);
301
302/*
303 * Usually this function is quite similar to normal lookup. But the key of
304 * a parent node may be smaller than the smallest key of all its siblings.
305 * In such a case we cannot just return NULL, as we have only proven that no
306 * key smaller than __key, but larger than this parent key exists.
307 * So we set __key to the parent key and retry. We have to use the smallest
308 * such parent key, which is the last parent key we encountered.
309 */
310void *btree_get_prev(struct btree_head *head, struct btree_geo *geo,
311 unsigned long *__key)
312{
313 int i, height;
314 unsigned long *node, *oldnode;
315 unsigned long *retry_key = NULL, key[geo->keylen];
316
317 if (keyzero(geo, __key))
318 return NULL;
319
320 if (head->height == 0)
321 return NULL;
322retry:
323 longcpy(key, __key, geo->keylen);
324 dec_key(geo, key);
325
326 node = head->node;
327 for (height = head->height ; height > 1; height--) {
328 for (i = 0; i < geo->no_pairs; i++)
329 if (keycmp(geo, node, i, key) <= 0)
330 break;
331 if (i == geo->no_pairs)
332 goto miss;
333 oldnode = node;
334 node = bval(geo, node, i);
335 if (!node)
336 goto miss;
337 retry_key = bkey(geo, oldnode, i);
338 }
339
340 if (!node)
341 goto miss;
342
343 for (i = 0; i < geo->no_pairs; i++) {
344 if (keycmp(geo, node, i, key) <= 0) {
345 if (bval(geo, node, i)) {
346 longcpy(__key, bkey(geo, node, i), geo->keylen);
347 return bval(geo, node, i);
348 } else
349 goto miss;
350 }
351 }
352miss:
353 if (retry_key) {
354 __key = retry_key;
355 retry_key = NULL;
356 goto retry;
357 }
358 return NULL;
359}
360
361static int getpos(struct btree_geo *geo, unsigned long *node,
362 unsigned long *key)
363{
364 int i;
365
366 for (i = 0; i < geo->no_pairs; i++) {
367 if (keycmp(geo, node, i, key) <= 0)
368 break;
369 }
370 return i;
371}
372
373static int getfill(struct btree_geo *geo, unsigned long *node, int start)
374{
375 int i;
376
377 for (i = start; i < geo->no_pairs; i++)
378 if (!bval(geo, node, i))
379 break;
380 return i;
381}
382
383/*
384 * locate the correct leaf node in the btree
385 */
386static unsigned long *find_level(struct btree_head *head, struct btree_geo *geo,
387 unsigned long *key, int level)
388{
389 unsigned long *node = head->node;
390 int i, height;
391
392 for (height = head->height; height > level; height--) {
393 for (i = 0; i < geo->no_pairs; i++)
394 if (keycmp(geo, node, i, key) <= 0)
395 break;
396
397 if ((i == geo->no_pairs) || !bval(geo, node, i)) {
398 /* right-most key is too large, update it */
399 /* FIXME: If the right-most key on higher levels is
400 * always zero, this wouldn't be necessary. */
401 i--;
402 setkey(geo, node, i, key);
403 }
404 BUG_ON(i < 0);
405 node = bval(geo, node, i);
406 }
407 BUG_ON(!node);
408 return node;
409}
410
411static int btree_grow(struct btree_head *head, struct btree_geo *geo,
412 gfp_t gfp)
413{
414 unsigned long *node;
415 int fill;
416
417 node = btree_node_alloc(head, gfp);
418 if (!node)
419 return -ENOMEM;
420 if (head->node) {
421 fill = getfill(geo, head->node, 0);
422 setkey(geo, node, 0, bkey(geo, head->node, fill - 1));
423 setval(geo, node, 0, head->node);
424 }
425 head->node = node;
426 head->height++;
427 return 0;
428}
429
430static void btree_shrink(struct btree_head *head, struct btree_geo *geo)
431{
432 unsigned long *node;
433 int fill;
434
435 if (head->height <= 1)
436 return;
437
438 node = head->node;
439 fill = getfill(geo, node, 0);
440 BUG_ON(fill > 1);
441 head->node = bval(geo, node, 0);
442 head->height--;
443 mempool_free(node, head->mempool);
444}
445
446static int btree_insert_level(struct btree_head *head, struct btree_geo *geo,
447 unsigned long *key, void *val, int level,
448 gfp_t gfp)
449{
450 unsigned long *node;
451 int i, pos, fill, err;
452
453 BUG_ON(!val);
454 if (head->height < level) {
455 err = btree_grow(head, geo, gfp);
456 if (err)
457 return err;
458 }
459
460retry:
461 node = find_level(head, geo, key, level);
462 pos = getpos(geo, node, key);
463 fill = getfill(geo, node, pos);
464 /* two identical keys are not allowed */
465 BUG_ON(pos < fill && keycmp(geo, node, pos, key) == 0);
466
467 if (fill == geo->no_pairs) {
468 /* need to split node */
469 unsigned long *new;
470
471 new = btree_node_alloc(head, gfp);
472 if (!new)
473 return -ENOMEM;
474 err = btree_insert_level(head, geo,
475 bkey(geo, node, fill / 2 - 1),
476 new, level + 1, gfp);
477 if (err) {
478 mempool_free(new, head->mempool);
479 return err;
480 }
481 for (i = 0; i < fill / 2; i++) {
482 setkey(geo, new, i, bkey(geo, node, i));
483 setval(geo, new, i, bval(geo, node, i));
484 setkey(geo, node, i, bkey(geo, node, i + fill / 2));
485 setval(geo, node, i, bval(geo, node, i + fill / 2));
486 clearpair(geo, node, i + fill / 2);
487 }
488 if (fill & 1) {
489 setkey(geo, node, i, bkey(geo, node, fill - 1));
490 setval(geo, node, i, bval(geo, node, fill - 1));
491 clearpair(geo, node, fill - 1);
492 }
493 goto retry;
494 }
495 BUG_ON(fill >= geo->no_pairs);
496
497 /* shift and insert */
498 for (i = fill; i > pos; i--) {
499 setkey(geo, node, i, bkey(geo, node, i - 1));
500 setval(geo, node, i, bval(geo, node, i - 1));
501 }
502 setkey(geo, node, pos, key);
503 setval(geo, node, pos, val);
504
505 return 0;
506}
507
508int btree_insert(struct btree_head *head, struct btree_geo *geo,
509 unsigned long *key, void *val, gfp_t gfp)
510{
511 return btree_insert_level(head, geo, key, val, 1, gfp);
512}
513EXPORT_SYMBOL_GPL(btree_insert);
514
515static void *btree_remove_level(struct btree_head *head, struct btree_geo *geo,
516 unsigned long *key, int level);
517static void merge(struct btree_head *head, struct btree_geo *geo, int level,
518 unsigned long *left, int lfill,
519 unsigned long *right, int rfill,
520 unsigned long *parent, int lpos)
521{
522 int i;
523
524 for (i = 0; i < rfill; i++) {
525 /* Move all keys to the left */
526 setkey(geo, left, lfill + i, bkey(geo, right, i));
527 setval(geo, left, lfill + i, bval(geo, right, i));
528 }
529 /* Exchange left and right child in parent */
530 setval(geo, parent, lpos, right);
531 setval(geo, parent, lpos + 1, left);
532 /* Remove left (formerly right) child from parent */
533 btree_remove_level(head, geo, bkey(geo, parent, lpos), level + 1);
534 mempool_free(right, head->mempool);
535}
536
537static void rebalance(struct btree_head *head, struct btree_geo *geo,
538 unsigned long *key, int level, unsigned long *child, int fill)
539{
540 unsigned long *parent, *left = NULL, *right = NULL;
541 int i, no_left, no_right;
542
543 if (fill == 0) {
544 /* Because we don't steal entries from a neigbour, this case
545 * can happen. Parent node contains a single child, this
546 * node, so merging with a sibling never happens.
547 */
548 btree_remove_level(head, geo, key, level + 1);
549 mempool_free(child, head->mempool);
550 return;
551 }
552
553 parent = find_level(head, geo, key, level + 1);
554 i = getpos(geo, parent, key);
555 BUG_ON(bval(geo, parent, i) != child);
556
557 if (i > 0) {
558 left = bval(geo, parent, i - 1);
559 no_left = getfill(geo, left, 0);
560 if (fill + no_left <= geo->no_pairs) {
561 merge(head, geo, level,
562 left, no_left,
563 child, fill,
564 parent, i - 1);
565 return;
566 }
567 }
568 if (i + 1 < getfill(geo, parent, i)) {
569 right = bval(geo, parent, i + 1);
570 no_right = getfill(geo, right, 0);
571 if (fill + no_right <= geo->no_pairs) {
572 merge(head, geo, level,
573 child, fill,
574 right, no_right,
575 parent, i);
576 return;
577 }
578 }
579 /*
580 * We could also try to steal one entry from the left or right
581 * neighbor. By not doing so we changed the invariant from
582 * "all nodes are at least half full" to "no two neighboring
583 * nodes can be merged". Which means that the average fill of
584 * all nodes is still half or better.
585 */
586}
587
588static void *btree_remove_level(struct btree_head *head, struct btree_geo *geo,
589 unsigned long *key, int level)
590{
591 unsigned long *node;
592 int i, pos, fill;
593 void *ret;
594
595 if (level > head->height) {
596 /* we recursed all the way up */
597 head->height = 0;
598 head->node = NULL;
599 return NULL;
600 }
601
602 node = find_level(head, geo, key, level);
603 pos = getpos(geo, node, key);
604 fill = getfill(geo, node, pos);
605 if ((level == 1) && (keycmp(geo, node, pos, key) != 0))
606 return NULL;
607 ret = bval(geo, node, pos);
608
609 /* remove and shift */
610 for (i = pos; i < fill - 1; i++) {
611 setkey(geo, node, i, bkey(geo, node, i + 1));
612 setval(geo, node, i, bval(geo, node, i + 1));
613 }
614 clearpair(geo, node, fill - 1);
615
616 if (fill - 1 < geo->no_pairs / 2) {
617 if (level < head->height)
618 rebalance(head, geo, key, level, node, fill - 1);
619 else if (fill - 1 == 1)
620 btree_shrink(head, geo);
621 }
622
623 return ret;
624}
625
626void *btree_remove(struct btree_head *head, struct btree_geo *geo,
627 unsigned long *key)
628{
629 if (head->height == 0)
630 return NULL;
631
632 return btree_remove_level(head, geo, key, 1);
633}
634EXPORT_SYMBOL_GPL(btree_remove);
635
636int btree_merge(struct btree_head *target, struct btree_head *victim,
637 struct btree_geo *geo, gfp_t gfp)
638{
639 unsigned long key[geo->keylen];
640 unsigned long dup[geo->keylen];
641 void *val;
642 int err;
643
644 BUG_ON(target == victim);
645
646 if (!(target->node)) {
647 /* target is empty, just copy fields over */
648 target->node = victim->node;
649 target->height = victim->height;
650 __btree_init(victim);
651 return 0;
652 }
653
654 /* TODO: This needs some optimizations. Currently we do three tree
655 * walks to remove a single object from the victim.
656 */
657 for (;;) {
658 if (!btree_last(victim, geo, key))
659 break;
660 val = btree_lookup(victim, geo, key);
661 err = btree_insert(target, geo, key, val, gfp);
662 if (err)
663 return err;
664 /* We must make a copy of the key, as the original will get
665 * mangled inside btree_remove. */
666 longcpy(dup, key, geo->keylen);
667 btree_remove(victim, geo, dup);
668 }
669 return 0;
670}
671EXPORT_SYMBOL_GPL(btree_merge);
672
673static size_t __btree_for_each(struct btree_head *head, struct btree_geo *geo,
674 unsigned long *node, unsigned long opaque,
675 void (*func)(void *elem, unsigned long opaque,
676 unsigned long *key, size_t index,
677 void *func2),
678 void *func2, int reap, int height, size_t count)
679{
680 int i;
681 unsigned long *child;
682
683 for (i = 0; i < geo->no_pairs; i++) {
684 child = bval(geo, node, i);
685 if (!child)
686 break;
687 if (height > 1)
688 count = __btree_for_each(head, geo, child, opaque,
689 func, func2, reap, height - 1, count);
690 else
691 func(child, opaque, bkey(geo, node, i), count++,
692 func2);
693 }
694 if (reap)
695 mempool_free(node, head->mempool);
696 return count;
697}
698
699static void empty(void *elem, unsigned long opaque, unsigned long *key,
700 size_t index, void *func2)
701{
702}
703
704void visitorl(void *elem, unsigned long opaque, unsigned long *key,
705 size_t index, void *__func)
706{
707 visitorl_t func = __func;
708
709 func(elem, opaque, *key, index);
710}
711EXPORT_SYMBOL_GPL(visitorl);
712
713void visitor32(void *elem, unsigned long opaque, unsigned long *__key,
714 size_t index, void *__func)
715{
716 visitor32_t func = __func;
717 u32 *key = (void *)__key;
718
719 func(elem, opaque, *key, index);
720}
721EXPORT_SYMBOL_GPL(visitor32);
722
723void visitor64(void *elem, unsigned long opaque, unsigned long *__key,
724 size_t index, void *__func)
725{
726 visitor64_t func = __func;
727 u64 *key = (void *)__key;
728
729 func(elem, opaque, *key, index);
730}
731EXPORT_SYMBOL_GPL(visitor64);
732
733void visitor128(void *elem, unsigned long opaque, unsigned long *__key,
734 size_t index, void *__func)
735{
736 visitor128_t func = __func;
737 u64 *key = (void *)__key;
738
739 func(elem, opaque, key[0], key[1], index);
740}
741EXPORT_SYMBOL_GPL(visitor128);
742
743size_t btree_visitor(struct btree_head *head, struct btree_geo *geo,
744 unsigned long opaque,
745 void (*func)(void *elem, unsigned long opaque,
746 unsigned long *key,
747 size_t index, void *func2),
748 void *func2)
749{
750 size_t count = 0;
751
752 if (!func2)
753 func = empty;
754 if (head->node)
755 count = __btree_for_each(head, geo, head->node, opaque, func,
756 func2, 0, head->height, 0);
757 return count;
758}
759EXPORT_SYMBOL_GPL(btree_visitor);
760
761size_t btree_grim_visitor(struct btree_head *head, struct btree_geo *geo,
762 unsigned long opaque,
763 void (*func)(void *elem, unsigned long opaque,
764 unsigned long *key,
765 size_t index, void *func2),
766 void *func2)
767{
768 size_t count = 0;
769
770 if (!func2)
771 func = empty;
772 if (head->node)
773 count = __btree_for_each(head, geo, head->node, opaque, func,
774 func2, 1, head->height, 0);
775 __btree_init(head);
776 return count;
777}
778EXPORT_SYMBOL_GPL(btree_grim_visitor);
779
780static int __init btree_module_init(void)
781{
782 btree_cachep = kmem_cache_create("btree_node", NODESIZE, 0,
783 SLAB_HWCACHE_ALIGN, NULL);
784 return 0;
785}
786
787static void __exit btree_module_exit(void)
788{
789 kmem_cache_destroy(btree_cachep);
790}
791
792/* If core code starts using btree, initialization should happen even earlier */
793module_init(btree_module_init);
794module_exit(btree_module_exit);
795
796MODULE_AUTHOR("Joern Engel <joern@logfs.org>");
797MODULE_AUTHOR("Johannes Berg <johannes@sipsolutions.net>");
798MODULE_LICENSE("GPL");
diff --git a/lib/bug.c b/lib/bug.c
index 300e41afbf97..f13daf435211 100644
--- a/lib/bug.c
+++ b/lib/bug.c
@@ -165,7 +165,7 @@ enum bug_trap_type report_bug(unsigned long bugaddr, struct pt_regs *regs)
165 (void *)bugaddr); 165 (void *)bugaddr);
166 166
167 show_regs(regs); 167 show_regs(regs);
168 add_taint(TAINT_WARN); 168 add_taint(BUG_GET_TAINT(bug));
169 return BUG_TRAP_TYPE_WARN; 169 return BUG_TRAP_TYPE_WARN;
170 } 170 }
171 171
diff --git a/lib/checksum.c b/lib/checksum.c
index b2e2fd468461..097508732f34 100644
--- a/lib/checksum.c
+++ b/lib/checksum.c
@@ -37,7 +37,8 @@
37 37
38#include <asm/byteorder.h> 38#include <asm/byteorder.h>
39 39
40static inline unsigned short from32to16(unsigned long x) 40#ifndef do_csum
41static inline unsigned short from32to16(unsigned int x)
41{ 42{
42 /* add up 16-bit and 16-bit for 16+c bit */ 43 /* add up 16-bit and 16-bit for 16+c bit */
43 x = (x & 0xffff) + (x >> 16); 44 x = (x & 0xffff) + (x >> 16);
@@ -49,16 +50,16 @@ static inline unsigned short from32to16(unsigned long x)
49static unsigned int do_csum(const unsigned char *buff, int len) 50static unsigned int do_csum(const unsigned char *buff, int len)
50{ 51{
51 int odd, count; 52 int odd, count;
52 unsigned long result = 0; 53 unsigned int result = 0;
53 54
54 if (len <= 0) 55 if (len <= 0)
55 goto out; 56 goto out;
56 odd = 1 & (unsigned long) buff; 57 odd = 1 & (unsigned long) buff;
57 if (odd) { 58 if (odd) {
58#ifdef __LITTLE_ENDIAN 59#ifdef __LITTLE_ENDIAN
59 result = *buff;
60#else
61 result += (*buff << 8); 60 result += (*buff << 8);
61#else
62 result = *buff;
62#endif 63#endif
63 len--; 64 len--;
64 buff++; 65 buff++;
@@ -73,9 +74,9 @@ static unsigned int do_csum(const unsigned char *buff, int len)
73 } 74 }
74 count >>= 1; /* nr of 32-bit words.. */ 75 count >>= 1; /* nr of 32-bit words.. */
75 if (count) { 76 if (count) {
76 unsigned long carry = 0; 77 unsigned int carry = 0;
77 do { 78 do {
78 unsigned long w = *(unsigned int *) buff; 79 unsigned int w = *(unsigned int *) buff;
79 count--; 80 count--;
80 buff += 4; 81 buff += 4;
81 result += carry; 82 result += carry;
@@ -102,6 +103,7 @@ static unsigned int do_csum(const unsigned char *buff, int len)
102out: 103out:
103 return result; 104 return result;
104} 105}
106#endif
105 107
106/* 108/*
107 * This is a version of ip_compute_csum() optimized for IP headers, 109 * This is a version of ip_compute_csum() optimized for IP headers,
diff --git a/lib/cpu-notifier-error-inject.c b/lib/cpu-notifier-error-inject.c
new file mode 100644
index 000000000000..4dc20321b0d5
--- /dev/null
+++ b/lib/cpu-notifier-error-inject.c
@@ -0,0 +1,63 @@
1#include <linux/kernel.h>
2#include <linux/cpu.h>
3#include <linux/module.h>
4#include <linux/notifier.h>
5
6static int priority;
7static int cpu_up_prepare_error;
8static int cpu_down_prepare_error;
9
10module_param(priority, int, 0);
11MODULE_PARM_DESC(priority, "specify cpu notifier priority");
12
13module_param(cpu_up_prepare_error, int, 0644);
14MODULE_PARM_DESC(cpu_up_prepare_error,
15 "specify error code to inject CPU_UP_PREPARE action");
16
17module_param(cpu_down_prepare_error, int, 0644);
18MODULE_PARM_DESC(cpu_down_prepare_error,
19 "specify error code to inject CPU_DOWN_PREPARE action");
20
21static int err_inject_cpu_callback(struct notifier_block *nfb,
22 unsigned long action, void *hcpu)
23{
24 int err = 0;
25
26 switch (action) {
27 case CPU_UP_PREPARE:
28 case CPU_UP_PREPARE_FROZEN:
29 err = cpu_up_prepare_error;
30 break;
31 case CPU_DOWN_PREPARE:
32 case CPU_DOWN_PREPARE_FROZEN:
33 err = cpu_down_prepare_error;
34 break;
35 }
36 if (err)
37 printk(KERN_INFO "Injecting error (%d) at cpu notifier\n", err);
38
39 return notifier_from_errno(err);
40}
41
42static struct notifier_block err_inject_cpu_notifier = {
43 .notifier_call = err_inject_cpu_callback,
44};
45
46static int err_inject_init(void)
47{
48 err_inject_cpu_notifier.priority = priority;
49
50 return register_hotcpu_notifier(&err_inject_cpu_notifier);
51}
52
53static void err_inject_exit(void)
54{
55 unregister_hotcpu_notifier(&err_inject_cpu_notifier);
56}
57
58module_init(err_inject_init);
59module_exit(err_inject_exit);
60
61MODULE_DESCRIPTION("CPU notifier error injection module");
62MODULE_LICENSE("GPL");
63MODULE_AUTHOR("Akinobu Mita <akinobu.mita@gmail.com>");
diff --git a/lib/cpumask.c b/lib/cpumask.c
index 7bb4142a502f..05d6aca7fc19 100644
--- a/lib/cpumask.c
+++ b/lib/cpumask.c
@@ -1,3 +1,4 @@
1#include <linux/slab.h>
1#include <linux/kernel.h> 2#include <linux/kernel.h>
2#include <linux/bitops.h> 3#include <linux/bitops.h>
3#include <linux/cpumask.h> 4#include <linux/cpumask.h>
diff --git a/lib/crc32.c b/lib/crc32.c
index 49d1c9e3ce38..4855995fcde9 100644
--- a/lib/crc32.c
+++ b/lib/crc32.c
@@ -25,16 +25,19 @@
25#include <linux/module.h> 25#include <linux/module.h>
26#include <linux/compiler.h> 26#include <linux/compiler.h>
27#include <linux/types.h> 27#include <linux/types.h>
28#include <linux/slab.h>
29#include <linux/init.h> 28#include <linux/init.h>
30#include <asm/atomic.h> 29#include <asm/atomic.h>
31#include "crc32defs.h" 30#include "crc32defs.h"
32#if CRC_LE_BITS == 8 31#if CRC_LE_BITS == 8
33#define tole(x) __constant_cpu_to_le32(x) 32# define tole(x) __constant_cpu_to_le32(x)
34#define tobe(x) __constant_cpu_to_be32(x)
35#else 33#else
36#define tole(x) (x) 34# define tole(x) (x)
37#define tobe(x) (x) 35#endif
36
37#if CRC_BE_BITS == 8
38# define tobe(x) __constant_cpu_to_be32(x)
39#else
40# define tobe(x) (x)
38#endif 41#endif
39#include "crc32table.h" 42#include "crc32table.h"
40 43
@@ -42,6 +45,54 @@ MODULE_AUTHOR("Matt Domsch <Matt_Domsch@dell.com>");
42MODULE_DESCRIPTION("Ethernet CRC32 calculations"); 45MODULE_DESCRIPTION("Ethernet CRC32 calculations");
43MODULE_LICENSE("GPL"); 46MODULE_LICENSE("GPL");
44 47
48#if CRC_LE_BITS == 8 || CRC_BE_BITS == 8
49
50static inline u32
51crc32_body(u32 crc, unsigned char const *buf, size_t len, const u32 (*tab)[256])
52{
53# ifdef __LITTLE_ENDIAN
54# define DO_CRC(x) crc = tab[0][(crc ^ (x)) & 255] ^ (crc >> 8)
55# define DO_CRC4 crc = tab[3][(crc) & 255] ^ \
56 tab[2][(crc >> 8) & 255] ^ \
57 tab[1][(crc >> 16) & 255] ^ \
58 tab[0][(crc >> 24) & 255]
59# else
60# define DO_CRC(x) crc = tab[0][((crc >> 24) ^ (x)) & 255] ^ (crc << 8)
61# define DO_CRC4 crc = tab[0][(crc) & 255] ^ \
62 tab[1][(crc >> 8) & 255] ^ \
63 tab[2][(crc >> 16) & 255] ^ \
64 tab[3][(crc >> 24) & 255]
65# endif
66 const u32 *b;
67 size_t rem_len;
68
69 /* Align it */
70 if (unlikely((long)buf & 3 && len)) {
71 do {
72 DO_CRC(*buf++);
73 } while ((--len) && ((long)buf)&3);
74 }
75 rem_len = len & 3;
76 /* load data 32 bits wide, xor data 32 bits wide. */
77 len = len >> 2;
78 b = (const u32 *)buf;
79 for (--b; len; --len) {
80 crc ^= *++b; /* use pre increment for speed */
81 DO_CRC4;
82 }
83 len = rem_len;
84 /* And the last few bytes */
85 if (len) {
86 u8 *p = (u8 *)(b + 1) - 1;
87 do {
88 DO_CRC(*++p); /* use pre increment for speed */
89 } while (--len);
90 }
91 return crc;
92#undef DO_CRC
93#undef DO_CRC4
94}
95#endif
45/** 96/**
46 * crc32_le() - Calculate bitwise little-endian Ethernet AUTODIN II CRC32 97 * crc32_le() - Calculate bitwise little-endian Ethernet AUTODIN II CRC32
47 * @crc: seed value for computation. ~0 for Ethernet, sometimes 0 for 98 * @crc: seed value for computation. ~0 for Ethernet, sometimes 0 for
@@ -72,52 +123,11 @@ u32 __pure crc32_le(u32 crc, unsigned char const *p, size_t len)
72u32 __pure crc32_le(u32 crc, unsigned char const *p, size_t len) 123u32 __pure crc32_le(u32 crc, unsigned char const *p, size_t len)
73{ 124{
74# if CRC_LE_BITS == 8 125# if CRC_LE_BITS == 8
75 const u32 *b =(u32 *)p; 126 const u32 (*tab)[] = crc32table_le;
76 const u32 *tab = crc32table_le;
77
78# ifdef __LITTLE_ENDIAN
79# define DO_CRC(x) crc = tab[ (crc ^ (x)) & 255 ] ^ (crc>>8)
80# else
81# define DO_CRC(x) crc = tab[ ((crc >> 24) ^ (x)) & 255] ^ (crc<<8)
82# endif
83 127
84 crc = __cpu_to_le32(crc); 128 crc = __cpu_to_le32(crc);
85 /* Align it */ 129 crc = crc32_body(crc, p, len, tab);
86 if(unlikely(((long)b)&3 && len)){
87 do {
88 u8 *p = (u8 *)b;
89 DO_CRC(*p++);
90 b = (void *)p;
91 } while ((--len) && ((long)b)&3 );
92 }
93 if(likely(len >= 4)){
94 /* load data 32 bits wide, xor data 32 bits wide. */
95 size_t save_len = len & 3;
96 len = len >> 2;
97 --b; /* use pre increment below(*++b) for speed */
98 do {
99 crc ^= *++b;
100 DO_CRC(0);
101 DO_CRC(0);
102 DO_CRC(0);
103 DO_CRC(0);
104 } while (--len);
105 b++; /* point to next byte(s) */
106 len = save_len;
107 }
108 /* And the last few bytes */
109 if(len){
110 do {
111 u8 *p = (u8 *)b;
112 DO_CRC(*p++);
113 b = (void *)p;
114 } while (--len);
115 }
116
117 return __le32_to_cpu(crc); 130 return __le32_to_cpu(crc);
118#undef ENDIAN_SHIFT
119#undef DO_CRC
120
121# elif CRC_LE_BITS == 4 131# elif CRC_LE_BITS == 4
122 while (len--) { 132 while (len--) {
123 crc ^= *p++; 133 crc ^= *p++;
@@ -170,51 +180,11 @@ u32 __pure crc32_be(u32 crc, unsigned char const *p, size_t len)
170u32 __pure crc32_be(u32 crc, unsigned char const *p, size_t len) 180u32 __pure crc32_be(u32 crc, unsigned char const *p, size_t len)
171{ 181{
172# if CRC_BE_BITS == 8 182# if CRC_BE_BITS == 8
173 const u32 *b =(u32 *)p; 183 const u32 (*tab)[] = crc32table_be;
174 const u32 *tab = crc32table_be;
175
176# ifdef __LITTLE_ENDIAN
177# define DO_CRC(x) crc = tab[ (crc ^ (x)) & 255 ] ^ (crc>>8)
178# else
179# define DO_CRC(x) crc = tab[ ((crc >> 24) ^ (x)) & 255] ^ (crc<<8)
180# endif
181 184
182 crc = __cpu_to_be32(crc); 185 crc = __cpu_to_be32(crc);
183 /* Align it */ 186 crc = crc32_body(crc, p, len, tab);
184 if(unlikely(((long)b)&3 && len)){
185 do {
186 u8 *p = (u8 *)b;
187 DO_CRC(*p++);
188 b = (u32 *)p;
189 } while ((--len) && ((long)b)&3 );
190 }
191 if(likely(len >= 4)){
192 /* load data 32 bits wide, xor data 32 bits wide. */
193 size_t save_len = len & 3;
194 len = len >> 2;
195 --b; /* use pre increment below(*++b) for speed */
196 do {
197 crc ^= *++b;
198 DO_CRC(0);
199 DO_CRC(0);
200 DO_CRC(0);
201 DO_CRC(0);
202 } while (--len);
203 b++; /* point to next byte(s) */
204 len = save_len;
205 }
206 /* And the last few bytes */
207 if(len){
208 do {
209 u8 *p = (u8 *)b;
210 DO_CRC(*p++);
211 b = (void *)p;
212 } while (--len);
213 }
214 return __be32_to_cpu(crc); 187 return __be32_to_cpu(crc);
215#undef ENDIAN_SHIFT
216#undef DO_CRC
217
218# elif CRC_BE_BITS == 4 188# elif CRC_BE_BITS == 4
219 while (len--) { 189 while (len--) {
220 crc ^= *p++ << 24; 190 crc ^= *p++ << 24;
diff --git a/lib/ctype.c b/lib/ctype.c
index d02ace14a322..26baa620e95b 100644
--- a/lib/ctype.c
+++ b/lib/ctype.c
@@ -7,30 +7,30 @@
7#include <linux/ctype.h> 7#include <linux/ctype.h>
8#include <linux/module.h> 8#include <linux/module.h>
9 9
10unsigned char _ctype[] = { 10const unsigned char _ctype[] = {
11_C,_C,_C,_C,_C,_C,_C,_C, /* 0-7 */ 11_C,_C,_C,_C,_C,_C,_C,_C, /* 0-7 */
12_C,_C|_S,_C|_S,_C|_S,_C|_S,_C|_S,_C,_C, /* 8-15 */ 12_C,_C|_S,_C|_S,_C|_S,_C|_S,_C|_S,_C,_C, /* 8-15 */
13_C,_C,_C,_C,_C,_C,_C,_C, /* 16-23 */ 13_C,_C,_C,_C,_C,_C,_C,_C, /* 16-23 */
14_C,_C,_C,_C,_C,_C,_C,_C, /* 24-31 */ 14_C,_C,_C,_C,_C,_C,_C,_C, /* 24-31 */
15_S|_SP,_P,_P,_P,_P,_P,_P,_P, /* 32-39 */ 15_S|_SP,_P,_P,_P,_P,_P,_P,_P, /* 32-39 */
16_P,_P,_P,_P,_P,_P,_P,_P, /* 40-47 */ 16_P,_P,_P,_P,_P,_P,_P,_P, /* 40-47 */
17_D,_D,_D,_D,_D,_D,_D,_D, /* 48-55 */ 17_D,_D,_D,_D,_D,_D,_D,_D, /* 48-55 */
18_D,_D,_P,_P,_P,_P,_P,_P, /* 56-63 */ 18_D,_D,_P,_P,_P,_P,_P,_P, /* 56-63 */
19_P,_U|_X,_U|_X,_U|_X,_U|_X,_U|_X,_U|_X,_U, /* 64-71 */ 19_P,_U|_X,_U|_X,_U|_X,_U|_X,_U|_X,_U|_X,_U, /* 64-71 */
20_U,_U,_U,_U,_U,_U,_U,_U, /* 72-79 */ 20_U,_U,_U,_U,_U,_U,_U,_U, /* 72-79 */
21_U,_U,_U,_U,_U,_U,_U,_U, /* 80-87 */ 21_U,_U,_U,_U,_U,_U,_U,_U, /* 80-87 */
22_U,_U,_U,_P,_P,_P,_P,_P, /* 88-95 */ 22_U,_U,_U,_P,_P,_P,_P,_P, /* 88-95 */
23_P,_L|_X,_L|_X,_L|_X,_L|_X,_L|_X,_L|_X,_L, /* 96-103 */ 23_P,_L|_X,_L|_X,_L|_X,_L|_X,_L|_X,_L|_X,_L, /* 96-103 */
24_L,_L,_L,_L,_L,_L,_L,_L, /* 104-111 */ 24_L,_L,_L,_L,_L,_L,_L,_L, /* 104-111 */
25_L,_L,_L,_L,_L,_L,_L,_L, /* 112-119 */ 25_L,_L,_L,_L,_L,_L,_L,_L, /* 112-119 */
26_L,_L,_L,_P,_P,_P,_P,_C, /* 120-127 */ 26_L,_L,_L,_P,_P,_P,_P,_C, /* 120-127 */
270,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* 128-143 */ 270,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* 128-143 */
280,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* 144-159 */ 280,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* 144-159 */
29_S|_SP,_P,_P,_P,_P,_P,_P,_P,_P,_P,_P,_P,_P,_P,_P,_P, /* 160-175 */ 29_S|_SP,_P,_P,_P,_P,_P,_P,_P,_P,_P,_P,_P,_P,_P,_P,_P, /* 160-175 */
30_P,_P,_P,_P,_P,_P,_P,_P,_P,_P,_P,_P,_P,_P,_P,_P, /* 176-191 */ 30_P,_P,_P,_P,_P,_P,_P,_P,_P,_P,_P,_P,_P,_P,_P,_P, /* 176-191 */
31_U,_U,_U,_U,_U,_U,_U,_U,_U,_U,_U,_U,_U,_U,_U,_U, /* 192-207 */ 31_U,_U,_U,_U,_U,_U,_U,_U,_U,_U,_U,_U,_U,_U,_U,_U, /* 192-207 */
32_U,_U,_U,_U,_U,_U,_U,_P,_U,_U,_U,_U,_U,_U,_U,_L, /* 208-223 */ 32_U,_U,_U,_U,_U,_U,_U,_P,_U,_U,_U,_U,_U,_U,_U,_L, /* 208-223 */
33_L,_L,_L,_L,_L,_L,_L,_L,_L,_L,_L,_L,_L,_L,_L,_L, /* 224-239 */ 33_L,_L,_L,_L,_L,_L,_L,_L,_L,_L,_L,_L,_L,_L,_L,_L, /* 224-239 */
34_L,_L,_L,_L,_L,_L,_L,_P,_L,_L,_L,_L,_L,_L,_L,_L}; /* 240-255 */ 34_L,_L,_L,_L,_L,_L,_L,_P,_L,_L,_L,_L,_L,_L,_L,_L}; /* 240-255 */
35 35
36EXPORT_SYMBOL(_ctype); 36EXPORT_SYMBOL(_ctype);
diff --git a/lib/debug_locks.c b/lib/debug_locks.c
index bc3b11731b9c..5bf0020b9248 100644
--- a/lib/debug_locks.c
+++ b/lib/debug_locks.c
@@ -23,6 +23,7 @@
23 * shut up after that. 23 * shut up after that.
24 */ 24 */
25int debug_locks = 1; 25int debug_locks = 1;
26EXPORT_SYMBOL_GPL(debug_locks);
26 27
27/* 28/*
28 * The locking-testsuite uses <debug_locks_silent> to get a 29 * The locking-testsuite uses <debug_locks_silent> to get a
diff --git a/lib/debugobjects.c b/lib/debugobjects.c
index 2755a3bd16a1..deebcc57d4e6 100644
--- a/lib/debugobjects.c
+++ b/lib/debugobjects.c
@@ -9,8 +9,10 @@
9 */ 9 */
10#include <linux/debugobjects.h> 10#include <linux/debugobjects.h>
11#include <linux/interrupt.h> 11#include <linux/interrupt.h>
12#include <linux/sched.h>
12#include <linux/seq_file.h> 13#include <linux/seq_file.h>
13#include <linux/debugfs.h> 14#include <linux/debugfs.h>
15#include <linux/slab.h>
14#include <linux/hash.h> 16#include <linux/hash.h>
15 17
16#define ODEBUG_HASH_BITS 14 18#define ODEBUG_HASH_BITS 14
@@ -25,14 +27,14 @@
25 27
26struct debug_bucket { 28struct debug_bucket {
27 struct hlist_head list; 29 struct hlist_head list;
28 spinlock_t lock; 30 raw_spinlock_t lock;
29}; 31};
30 32
31static struct debug_bucket obj_hash[ODEBUG_HASH_SIZE]; 33static struct debug_bucket obj_hash[ODEBUG_HASH_SIZE];
32 34
33static struct debug_obj obj_static_pool[ODEBUG_POOL_SIZE] __initdata; 35static struct debug_obj obj_static_pool[ODEBUG_POOL_SIZE] __initdata;
34 36
35static DEFINE_SPINLOCK(pool_lock); 37static DEFINE_RAW_SPINLOCK(pool_lock);
36 38
37static HLIST_HEAD(obj_pool); 39static HLIST_HEAD(obj_pool);
38 40
@@ -95,10 +97,10 @@ static int fill_pool(void)
95 if (!new) 97 if (!new)
96 return obj_pool_free; 98 return obj_pool_free;
97 99
98 spin_lock_irqsave(&pool_lock, flags); 100 raw_spin_lock_irqsave(&pool_lock, flags);
99 hlist_add_head(&new->node, &obj_pool); 101 hlist_add_head(&new->node, &obj_pool);
100 obj_pool_free++; 102 obj_pool_free++;
101 spin_unlock_irqrestore(&pool_lock, flags); 103 raw_spin_unlock_irqrestore(&pool_lock, flags);
102 } 104 }
103 return obj_pool_free; 105 return obj_pool_free;
104} 106}
@@ -132,13 +134,14 @@ alloc_object(void *addr, struct debug_bucket *b, struct debug_obj_descr *descr)
132{ 134{
133 struct debug_obj *obj = NULL; 135 struct debug_obj *obj = NULL;
134 136
135 spin_lock(&pool_lock); 137 raw_spin_lock(&pool_lock);
136 if (obj_pool.first) { 138 if (obj_pool.first) {
137 obj = hlist_entry(obj_pool.first, typeof(*obj), node); 139 obj = hlist_entry(obj_pool.first, typeof(*obj), node);
138 140
139 obj->object = addr; 141 obj->object = addr;
140 obj->descr = descr; 142 obj->descr = descr;
141 obj->state = ODEBUG_STATE_NONE; 143 obj->state = ODEBUG_STATE_NONE;
144 obj->astate = 0;
142 hlist_del(&obj->node); 145 hlist_del(&obj->node);
143 146
144 hlist_add_head(&obj->node, &b->list); 147 hlist_add_head(&obj->node, &b->list);
@@ -151,7 +154,7 @@ alloc_object(void *addr, struct debug_bucket *b, struct debug_obj_descr *descr)
151 if (obj_pool_free < obj_pool_min_free) 154 if (obj_pool_free < obj_pool_min_free)
152 obj_pool_min_free = obj_pool_free; 155 obj_pool_min_free = obj_pool_free;
153 } 156 }
154 spin_unlock(&pool_lock); 157 raw_spin_unlock(&pool_lock);
155 158
156 return obj; 159 return obj;
157} 160}
@@ -164,7 +167,7 @@ static void free_obj_work(struct work_struct *work)
164 struct debug_obj *obj; 167 struct debug_obj *obj;
165 unsigned long flags; 168 unsigned long flags;
166 169
167 spin_lock_irqsave(&pool_lock, flags); 170 raw_spin_lock_irqsave(&pool_lock, flags);
168 while (obj_pool_free > ODEBUG_POOL_SIZE) { 171 while (obj_pool_free > ODEBUG_POOL_SIZE) {
169 obj = hlist_entry(obj_pool.first, typeof(*obj), node); 172 obj = hlist_entry(obj_pool.first, typeof(*obj), node);
170 hlist_del(&obj->node); 173 hlist_del(&obj->node);
@@ -173,11 +176,11 @@ static void free_obj_work(struct work_struct *work)
173 * We release pool_lock across kmem_cache_free() to 176 * We release pool_lock across kmem_cache_free() to
174 * avoid contention on pool_lock. 177 * avoid contention on pool_lock.
175 */ 178 */
176 spin_unlock_irqrestore(&pool_lock, flags); 179 raw_spin_unlock_irqrestore(&pool_lock, flags);
177 kmem_cache_free(obj_cache, obj); 180 kmem_cache_free(obj_cache, obj);
178 spin_lock_irqsave(&pool_lock, flags); 181 raw_spin_lock_irqsave(&pool_lock, flags);
179 } 182 }
180 spin_unlock_irqrestore(&pool_lock, flags); 183 raw_spin_unlock_irqrestore(&pool_lock, flags);
181} 184}
182 185
183/* 186/*
@@ -189,7 +192,7 @@ static void free_object(struct debug_obj *obj)
189 unsigned long flags; 192 unsigned long flags;
190 int sched = 0; 193 int sched = 0;
191 194
192 spin_lock_irqsave(&pool_lock, flags); 195 raw_spin_lock_irqsave(&pool_lock, flags);
193 /* 196 /*
194 * schedule work when the pool is filled and the cache is 197 * schedule work when the pool is filled and the cache is
195 * initialized: 198 * initialized:
@@ -199,7 +202,7 @@ static void free_object(struct debug_obj *obj)
199 hlist_add_head(&obj->node, &obj_pool); 202 hlist_add_head(&obj->node, &obj_pool);
200 obj_pool_free++; 203 obj_pool_free++;
201 obj_pool_used--; 204 obj_pool_used--;
202 spin_unlock_irqrestore(&pool_lock, flags); 205 raw_spin_unlock_irqrestore(&pool_lock, flags);
203 if (sched) 206 if (sched)
204 schedule_work(&debug_obj_work); 207 schedule_work(&debug_obj_work);
205} 208}
@@ -220,9 +223,9 @@ static void debug_objects_oom(void)
220 printk(KERN_WARNING "ODEBUG: Out of memory. ODEBUG disabled\n"); 223 printk(KERN_WARNING "ODEBUG: Out of memory. ODEBUG disabled\n");
221 224
222 for (i = 0; i < ODEBUG_HASH_SIZE; i++, db++) { 225 for (i = 0; i < ODEBUG_HASH_SIZE; i++, db++) {
223 spin_lock_irqsave(&db->lock, flags); 226 raw_spin_lock_irqsave(&db->lock, flags);
224 hlist_move_list(&db->list, &freelist); 227 hlist_move_list(&db->list, &freelist);
225 spin_unlock_irqrestore(&db->lock, flags); 228 raw_spin_unlock_irqrestore(&db->lock, flags);
226 229
227 /* Now free them */ 230 /* Now free them */
228 hlist_for_each_entry_safe(obj, node, tmp, &freelist, node) { 231 hlist_for_each_entry_safe(obj, node, tmp, &freelist, node) {
@@ -250,8 +253,10 @@ static void debug_print_object(struct debug_obj *obj, char *msg)
250 253
251 if (limit < 5 && obj->descr != descr_test) { 254 if (limit < 5 && obj->descr != descr_test) {
252 limit++; 255 limit++;
253 WARN(1, KERN_ERR "ODEBUG: %s %s object type: %s\n", msg, 256 WARN(1, KERN_ERR "ODEBUG: %s %s (active state %u) "
254 obj_states[obj->state], obj->descr->name); 257 "object type: %s\n",
258 msg, obj_states[obj->state], obj->astate,
259 obj->descr->name);
255 } 260 }
256 debug_objects_warnings++; 261 debug_objects_warnings++;
257} 262}
@@ -302,14 +307,14 @@ __debug_object_init(void *addr, struct debug_obj_descr *descr, int onstack)
302 307
303 db = get_bucket((unsigned long) addr); 308 db = get_bucket((unsigned long) addr);
304 309
305 spin_lock_irqsave(&db->lock, flags); 310 raw_spin_lock_irqsave(&db->lock, flags);
306 311
307 obj = lookup_object(addr, db); 312 obj = lookup_object(addr, db);
308 if (!obj) { 313 if (!obj) {
309 obj = alloc_object(addr, db, descr); 314 obj = alloc_object(addr, db, descr);
310 if (!obj) { 315 if (!obj) {
311 debug_objects_enabled = 0; 316 debug_objects_enabled = 0;
312 spin_unlock_irqrestore(&db->lock, flags); 317 raw_spin_unlock_irqrestore(&db->lock, flags);
313 debug_objects_oom(); 318 debug_objects_oom();
314 return; 319 return;
315 } 320 }
@@ -326,7 +331,7 @@ __debug_object_init(void *addr, struct debug_obj_descr *descr, int onstack)
326 case ODEBUG_STATE_ACTIVE: 331 case ODEBUG_STATE_ACTIVE:
327 debug_print_object(obj, "init"); 332 debug_print_object(obj, "init");
328 state = obj->state; 333 state = obj->state;
329 spin_unlock_irqrestore(&db->lock, flags); 334 raw_spin_unlock_irqrestore(&db->lock, flags);
330 debug_object_fixup(descr->fixup_init, addr, state); 335 debug_object_fixup(descr->fixup_init, addr, state);
331 return; 336 return;
332 337
@@ -337,7 +342,7 @@ __debug_object_init(void *addr, struct debug_obj_descr *descr, int onstack)
337 break; 342 break;
338 } 343 }
339 344
340 spin_unlock_irqrestore(&db->lock, flags); 345 raw_spin_unlock_irqrestore(&db->lock, flags);
341} 346}
342 347
343/** 348/**
@@ -384,7 +389,7 @@ void debug_object_activate(void *addr, struct debug_obj_descr *descr)
384 389
385 db = get_bucket((unsigned long) addr); 390 db = get_bucket((unsigned long) addr);
386 391
387 spin_lock_irqsave(&db->lock, flags); 392 raw_spin_lock_irqsave(&db->lock, flags);
388 393
389 obj = lookup_object(addr, db); 394 obj = lookup_object(addr, db);
390 if (obj) { 395 if (obj) {
@@ -397,7 +402,7 @@ void debug_object_activate(void *addr, struct debug_obj_descr *descr)
397 case ODEBUG_STATE_ACTIVE: 402 case ODEBUG_STATE_ACTIVE:
398 debug_print_object(obj, "activate"); 403 debug_print_object(obj, "activate");
399 state = obj->state; 404 state = obj->state;
400 spin_unlock_irqrestore(&db->lock, flags); 405 raw_spin_unlock_irqrestore(&db->lock, flags);
401 debug_object_fixup(descr->fixup_activate, addr, state); 406 debug_object_fixup(descr->fixup_activate, addr, state);
402 return; 407 return;
403 408
@@ -407,11 +412,11 @@ void debug_object_activate(void *addr, struct debug_obj_descr *descr)
407 default: 412 default:
408 break; 413 break;
409 } 414 }
410 spin_unlock_irqrestore(&db->lock, flags); 415 raw_spin_unlock_irqrestore(&db->lock, flags);
411 return; 416 return;
412 } 417 }
413 418
414 spin_unlock_irqrestore(&db->lock, flags); 419 raw_spin_unlock_irqrestore(&db->lock, flags);
415 /* 420 /*
416 * This happens when a static object is activated. We 421 * This happens when a static object is activated. We
417 * let the type specific code decide whether this is 422 * let the type specific code decide whether this is
@@ -437,7 +442,7 @@ void debug_object_deactivate(void *addr, struct debug_obj_descr *descr)
437 442
438 db = get_bucket((unsigned long) addr); 443 db = get_bucket((unsigned long) addr);
439 444
440 spin_lock_irqsave(&db->lock, flags); 445 raw_spin_lock_irqsave(&db->lock, flags);
441 446
442 obj = lookup_object(addr, db); 447 obj = lookup_object(addr, db);
443 if (obj) { 448 if (obj) {
@@ -445,7 +450,10 @@ void debug_object_deactivate(void *addr, struct debug_obj_descr *descr)
445 case ODEBUG_STATE_INIT: 450 case ODEBUG_STATE_INIT:
446 case ODEBUG_STATE_INACTIVE: 451 case ODEBUG_STATE_INACTIVE:
447 case ODEBUG_STATE_ACTIVE: 452 case ODEBUG_STATE_ACTIVE:
448 obj->state = ODEBUG_STATE_INACTIVE; 453 if (!obj->astate)
454 obj->state = ODEBUG_STATE_INACTIVE;
455 else
456 debug_print_object(obj, "deactivate");
449 break; 457 break;
450 458
451 case ODEBUG_STATE_DESTROYED: 459 case ODEBUG_STATE_DESTROYED:
@@ -462,7 +470,7 @@ void debug_object_deactivate(void *addr, struct debug_obj_descr *descr)
462 debug_print_object(&o, "deactivate"); 470 debug_print_object(&o, "deactivate");
463 } 471 }
464 472
465 spin_unlock_irqrestore(&db->lock, flags); 473 raw_spin_unlock_irqrestore(&db->lock, flags);
466} 474}
467 475
468/** 476/**
@@ -482,7 +490,7 @@ void debug_object_destroy(void *addr, struct debug_obj_descr *descr)
482 490
483 db = get_bucket((unsigned long) addr); 491 db = get_bucket((unsigned long) addr);
484 492
485 spin_lock_irqsave(&db->lock, flags); 493 raw_spin_lock_irqsave(&db->lock, flags);
486 494
487 obj = lookup_object(addr, db); 495 obj = lookup_object(addr, db);
488 if (!obj) 496 if (!obj)
@@ -497,7 +505,7 @@ void debug_object_destroy(void *addr, struct debug_obj_descr *descr)
497 case ODEBUG_STATE_ACTIVE: 505 case ODEBUG_STATE_ACTIVE:
498 debug_print_object(obj, "destroy"); 506 debug_print_object(obj, "destroy");
499 state = obj->state; 507 state = obj->state;
500 spin_unlock_irqrestore(&db->lock, flags); 508 raw_spin_unlock_irqrestore(&db->lock, flags);
501 debug_object_fixup(descr->fixup_destroy, addr, state); 509 debug_object_fixup(descr->fixup_destroy, addr, state);
502 return; 510 return;
503 511
@@ -508,7 +516,7 @@ void debug_object_destroy(void *addr, struct debug_obj_descr *descr)
508 break; 516 break;
509 } 517 }
510out_unlock: 518out_unlock:
511 spin_unlock_irqrestore(&db->lock, flags); 519 raw_spin_unlock_irqrestore(&db->lock, flags);
512} 520}
513 521
514/** 522/**
@@ -528,7 +536,7 @@ void debug_object_free(void *addr, struct debug_obj_descr *descr)
528 536
529 db = get_bucket((unsigned long) addr); 537 db = get_bucket((unsigned long) addr);
530 538
531 spin_lock_irqsave(&db->lock, flags); 539 raw_spin_lock_irqsave(&db->lock, flags);
532 540
533 obj = lookup_object(addr, db); 541 obj = lookup_object(addr, db);
534 if (!obj) 542 if (!obj)
@@ -538,17 +546,64 @@ void debug_object_free(void *addr, struct debug_obj_descr *descr)
538 case ODEBUG_STATE_ACTIVE: 546 case ODEBUG_STATE_ACTIVE:
539 debug_print_object(obj, "free"); 547 debug_print_object(obj, "free");
540 state = obj->state; 548 state = obj->state;
541 spin_unlock_irqrestore(&db->lock, flags); 549 raw_spin_unlock_irqrestore(&db->lock, flags);
542 debug_object_fixup(descr->fixup_free, addr, state); 550 debug_object_fixup(descr->fixup_free, addr, state);
543 return; 551 return;
544 default: 552 default:
545 hlist_del(&obj->node); 553 hlist_del(&obj->node);
546 spin_unlock_irqrestore(&db->lock, flags); 554 raw_spin_unlock_irqrestore(&db->lock, flags);
547 free_object(obj); 555 free_object(obj);
548 return; 556 return;
549 } 557 }
550out_unlock: 558out_unlock:
551 spin_unlock_irqrestore(&db->lock, flags); 559 raw_spin_unlock_irqrestore(&db->lock, flags);
560}
561
562/**
563 * debug_object_active_state - debug checks object usage state machine
564 * @addr: address of the object
565 * @descr: pointer to an object specific debug description structure
566 * @expect: expected state
567 * @next: state to move to if expected state is found
568 */
569void
570debug_object_active_state(void *addr, struct debug_obj_descr *descr,
571 unsigned int expect, unsigned int next)
572{
573 struct debug_bucket *db;
574 struct debug_obj *obj;
575 unsigned long flags;
576
577 if (!debug_objects_enabled)
578 return;
579
580 db = get_bucket((unsigned long) addr);
581
582 raw_spin_lock_irqsave(&db->lock, flags);
583
584 obj = lookup_object(addr, db);
585 if (obj) {
586 switch (obj->state) {
587 case ODEBUG_STATE_ACTIVE:
588 if (obj->astate == expect)
589 obj->astate = next;
590 else
591 debug_print_object(obj, "active_state");
592 break;
593
594 default:
595 debug_print_object(obj, "active_state");
596 break;
597 }
598 } else {
599 struct debug_obj o = { .object = addr,
600 .state = ODEBUG_STATE_NOTAVAILABLE,
601 .descr = descr };
602
603 debug_print_object(&o, "active_state");
604 }
605
606 raw_spin_unlock_irqrestore(&db->lock, flags);
552} 607}
553 608
554#ifdef CONFIG_DEBUG_OBJECTS_FREE 609#ifdef CONFIG_DEBUG_OBJECTS_FREE
@@ -574,7 +629,7 @@ static void __debug_check_no_obj_freed(const void *address, unsigned long size)
574 629
575repeat: 630repeat:
576 cnt = 0; 631 cnt = 0;
577 spin_lock_irqsave(&db->lock, flags); 632 raw_spin_lock_irqsave(&db->lock, flags);
578 hlist_for_each_entry_safe(obj, node, tmp, &db->list, node) { 633 hlist_for_each_entry_safe(obj, node, tmp, &db->list, node) {
579 cnt++; 634 cnt++;
580 oaddr = (unsigned long) obj->object; 635 oaddr = (unsigned long) obj->object;
@@ -586,7 +641,7 @@ repeat:
586 debug_print_object(obj, "free"); 641 debug_print_object(obj, "free");
587 descr = obj->descr; 642 descr = obj->descr;
588 state = obj->state; 643 state = obj->state;
589 spin_unlock_irqrestore(&db->lock, flags); 644 raw_spin_unlock_irqrestore(&db->lock, flags);
590 debug_object_fixup(descr->fixup_free, 645 debug_object_fixup(descr->fixup_free,
591 (void *) oaddr, state); 646 (void *) oaddr, state);
592 goto repeat; 647 goto repeat;
@@ -596,7 +651,7 @@ repeat:
596 break; 651 break;
597 } 652 }
598 } 653 }
599 spin_unlock_irqrestore(&db->lock, flags); 654 raw_spin_unlock_irqrestore(&db->lock, flags);
600 655
601 /* Now free them */ 656 /* Now free them */
602 hlist_for_each_entry_safe(obj, node, tmp, &freelist, node) { 657 hlist_for_each_entry_safe(obj, node, tmp, &freelist, node) {
@@ -772,7 +827,7 @@ static int __init fixup_free(void *addr, enum debug_obj_state state)
772 } 827 }
773} 828}
774 829
775static int 830static int __init
776check_results(void *addr, enum debug_obj_state state, int fixups, int warnings) 831check_results(void *addr, enum debug_obj_state state, int fixups, int warnings)
777{ 832{
778 struct debug_bucket *db; 833 struct debug_bucket *db;
@@ -782,7 +837,7 @@ check_results(void *addr, enum debug_obj_state state, int fixups, int warnings)
782 837
783 db = get_bucket((unsigned long) addr); 838 db = get_bucket((unsigned long) addr);
784 839
785 spin_lock_irqsave(&db->lock, flags); 840 raw_spin_lock_irqsave(&db->lock, flags);
786 841
787 obj = lookup_object(addr, db); 842 obj = lookup_object(addr, db);
788 if (!obj && state != ODEBUG_STATE_NONE) { 843 if (!obj && state != ODEBUG_STATE_NONE) {
@@ -806,7 +861,7 @@ check_results(void *addr, enum debug_obj_state state, int fixups, int warnings)
806 } 861 }
807 res = 0; 862 res = 0;
808out: 863out:
809 spin_unlock_irqrestore(&db->lock, flags); 864 raw_spin_unlock_irqrestore(&db->lock, flags);
810 if (res) 865 if (res)
811 debug_objects_enabled = 0; 866 debug_objects_enabled = 0;
812 return res; 867 return res;
@@ -906,7 +961,7 @@ void __init debug_objects_early_init(void)
906 int i; 961 int i;
907 962
908 for (i = 0; i < ODEBUG_HASH_SIZE; i++) 963 for (i = 0; i < ODEBUG_HASH_SIZE; i++)
909 spin_lock_init(&obj_hash[i].lock); 964 raw_spin_lock_init(&obj_hash[i].lock);
910 965
911 for (i = 0; i < ODEBUG_POOL_SIZE; i++) 966 for (i = 0; i < ODEBUG_POOL_SIZE; i++)
912 hlist_add_head(&obj_static_pool[i].node, &obj_pool); 967 hlist_add_head(&obj_static_pool[i].node, &obj_pool);
@@ -915,7 +970,7 @@ void __init debug_objects_early_init(void)
915/* 970/*
916 * Convert the statically allocated objects to dynamic ones: 971 * Convert the statically allocated objects to dynamic ones:
917 */ 972 */
918static int debug_objects_replace_static_objects(void) 973static int __init debug_objects_replace_static_objects(void)
919{ 974{
920 struct debug_bucket *db = obj_hash; 975 struct debug_bucket *db = obj_hash;
921 struct hlist_node *node, *tmp; 976 struct hlist_node *node, *tmp;
diff --git a/lib/decompress.c b/lib/decompress.c
index d2842f571674..a7606815541f 100644
--- a/lib/decompress.c
+++ b/lib/decompress.c
@@ -9,6 +9,7 @@
9#include <linux/decompress/bunzip2.h> 9#include <linux/decompress/bunzip2.h>
10#include <linux/decompress/unlzma.h> 10#include <linux/decompress/unlzma.h>
11#include <linux/decompress/inflate.h> 11#include <linux/decompress/inflate.h>
12#include <linux/decompress/unlzo.h>
12 13
13#include <linux/types.h> 14#include <linux/types.h>
14#include <linux/string.h> 15#include <linux/string.h>
@@ -22,6 +23,9 @@
22#ifndef CONFIG_DECOMPRESS_LZMA 23#ifndef CONFIG_DECOMPRESS_LZMA
23# define unlzma NULL 24# define unlzma NULL
24#endif 25#endif
26#ifndef CONFIG_DECOMPRESS_LZO
27# define unlzo NULL
28#endif
25 29
26static const struct compress_format { 30static const struct compress_format {
27 unsigned char magic[2]; 31 unsigned char magic[2];
@@ -32,6 +36,7 @@ static const struct compress_format {
32 { {037, 0236}, "gzip", gunzip }, 36 { {037, 0236}, "gzip", gunzip },
33 { {0x42, 0x5a}, "bzip2", bunzip2 }, 37 { {0x42, 0x5a}, "bzip2", bunzip2 },
34 { {0x5d, 0x00}, "lzma", unlzma }, 38 { {0x5d, 0x00}, "lzma", unlzma },
39 { {0x89, 0x4c}, "lzo", unlzo },
35 { {0, 0}, NULL, NULL } 40 { {0, 0}, NULL, NULL }
36}; 41};
37 42
diff --git a/lib/decompress_bunzip2.c b/lib/decompress_bunzip2.c
index 600f473a5610..a4e971dee102 100644
--- a/lib/decompress_bunzip2.c
+++ b/lib/decompress_bunzip2.c
@@ -299,7 +299,7 @@ static int INIT get_next_block(struct bunzip_data *bd)
299 again when using them (during symbol decoding).*/ 299 again when using them (during symbol decoding).*/
300 base = hufGroup->base-1; 300 base = hufGroup->base-1;
301 limit = hufGroup->limit-1; 301 limit = hufGroup->limit-1;
302 /* Calculate permute[]. Concurently, initialize 302 /* Calculate permute[]. Concurrently, initialize
303 * temp[] and limit[]. */ 303 * temp[] and limit[]. */
304 pp = 0; 304 pp = 0;
305 for (i = minLen; i <= maxLen; i++) { 305 for (i = minLen; i <= maxLen; i++) {
@@ -637,6 +637,8 @@ static int INIT start_bunzip(struct bunzip_data **bdp, void *inbuf, int len,
637 637
638 /* Allocate bunzip_data. Most fields initialize to zero. */ 638 /* Allocate bunzip_data. Most fields initialize to zero. */
639 bd = *bdp = malloc(i); 639 bd = *bdp = malloc(i);
640 if (!bd)
641 return RETVAL_OUT_OF_MEMORY;
640 memset(bd, 0, sizeof(struct bunzip_data)); 642 memset(bd, 0, sizeof(struct bunzip_data));
641 /* Setup input buffer */ 643 /* Setup input buffer */
642 bd->inbuf = inbuf; 644 bd->inbuf = inbuf;
@@ -664,6 +666,8 @@ static int INIT start_bunzip(struct bunzip_data **bdp, void *inbuf, int len,
664 bd->dbufSize = 100000*(i-BZh0); 666 bd->dbufSize = 100000*(i-BZh0);
665 667
666 bd->dbuf = large_malloc(bd->dbufSize * sizeof(int)); 668 bd->dbuf = large_malloc(bd->dbufSize * sizeof(int));
669 if (!bd->dbuf)
670 return RETVAL_OUT_OF_MEMORY;
667 return RETVAL_OK; 671 return RETVAL_OK;
668} 672}
669 673
@@ -686,7 +690,7 @@ STATIC int INIT bunzip2(unsigned char *buf, int len,
686 690
687 if (!outbuf) { 691 if (!outbuf) {
688 error("Could not allocate output bufer"); 692 error("Could not allocate output bufer");
689 return -1; 693 return RETVAL_OUT_OF_MEMORY;
690 } 694 }
691 if (buf) 695 if (buf)
692 inbuf = buf; 696 inbuf = buf;
@@ -694,6 +698,7 @@ STATIC int INIT bunzip2(unsigned char *buf, int len,
694 inbuf = malloc(BZIP2_IOBUF_SIZE); 698 inbuf = malloc(BZIP2_IOBUF_SIZE);
695 if (!inbuf) { 699 if (!inbuf) {
696 error("Could not allocate input bufer"); 700 error("Could not allocate input bufer");
701 i = RETVAL_OUT_OF_MEMORY;
697 goto exit_0; 702 goto exit_0;
698 } 703 }
699 i = start_bunzip(&bd, inbuf, len, fill); 704 i = start_bunzip(&bd, inbuf, len, fill);
@@ -720,11 +725,14 @@ STATIC int INIT bunzip2(unsigned char *buf, int len,
720 } else if (i == RETVAL_UNEXPECTED_OUTPUT_EOF) { 725 } else if (i == RETVAL_UNEXPECTED_OUTPUT_EOF) {
721 error("Compressed file ends unexpectedly"); 726 error("Compressed file ends unexpectedly");
722 } 727 }
728 if (!bd)
729 goto exit_1;
723 if (bd->dbuf) 730 if (bd->dbuf)
724 large_free(bd->dbuf); 731 large_free(bd->dbuf);
725 if (pos) 732 if (pos)
726 *pos = bd->inbufPos; 733 *pos = bd->inbufPos;
727 free(bd); 734 free(bd);
735exit_1:
728 if (!buf) 736 if (!buf)
729 free(inbuf); 737 free(inbuf);
730exit_0: 738exit_0:
diff --git a/lib/decompress_unlzo.c b/lib/decompress_unlzo.c
new file mode 100644
index 000000000000..bcb3a4bd68ff
--- /dev/null
+++ b/lib/decompress_unlzo.c
@@ -0,0 +1,217 @@
1/*
2 * LZO decompressor for the Linux kernel. Code borrowed from the lzo
3 * implementation by Markus Franz Xaver Johannes Oberhumer.
4 *
5 * Linux kernel adaptation:
6 * Copyright (C) 2009
7 * Albin Tonnerre, Free Electrons <albin.tonnerre@free-electrons.com>
8 *
9 * Original code:
10 * Copyright (C) 1996-2005 Markus Franz Xaver Johannes Oberhumer
11 * All Rights Reserved.
12 *
13 * lzop and the LZO library are free software; you can redistribute them
14 * and/or modify them under the terms of the GNU General Public License as
15 * published by the Free Software Foundation; either version 2 of
16 * the License, or (at your option) any later version.
17 *
18 * This program is distributed in the hope that it will be useful,
19 * but WITHOUT ANY WARRANTY; without even the implied warranty of
20 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
21 * GNU General Public License for more details.
22 *
23 * You should have received a copy of the GNU General Public License
24 * along with this program; see the file COPYING.
25 * If not, write to the Free Software Foundation, Inc.,
26 * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
27 *
28 * Markus F.X.J. Oberhumer
29 * <markus@oberhumer.com>
30 * http://www.oberhumer.com/opensource/lzop/
31 */
32
33#ifdef STATIC
34#include "lzo/lzo1x_decompress.c"
35#else
36#include <linux/slab.h>
37#include <linux/decompress/unlzo.h>
38#endif
39
40#include <linux/types.h>
41#include <linux/lzo.h>
42#include <linux/decompress/mm.h>
43
44#include <linux/compiler.h>
45#include <asm/unaligned.h>
46
47static const unsigned char lzop_magic[] = {
48 0x89, 0x4c, 0x5a, 0x4f, 0x00, 0x0d, 0x0a, 0x1a, 0x0a };
49
50#define LZO_BLOCK_SIZE (256*1024l)
51#define HEADER_HAS_FILTER 0x00000800L
52
53STATIC inline int INIT parse_header(u8 *input, u8 *skip)
54{
55 int l;
56 u8 *parse = input;
57 u8 level = 0;
58 u16 version;
59
60 /* read magic: 9 first bits */
61 for (l = 0; l < 9; l++) {
62 if (*parse++ != lzop_magic[l])
63 return 0;
64 }
65 /* get version (2bytes), skip library version (2),
66 * 'need to be extracted' version (2) and
67 * method (1) */
68 version = get_unaligned_be16(parse);
69 parse += 7;
70 if (version >= 0x0940)
71 level = *parse++;
72 if (get_unaligned_be32(parse) & HEADER_HAS_FILTER)
73 parse += 8; /* flags + filter info */
74 else
75 parse += 4; /* flags */
76
77 /* skip mode and mtime_low */
78 parse += 8;
79 if (version >= 0x0940)
80 parse += 4; /* skip mtime_high */
81
82 l = *parse++;
83 /* don't care about the file name, and skip checksum */
84 parse += l + 4;
85
86 *skip = parse - input;
87 return 1;
88}
89
90STATIC inline int INIT unlzo(u8 *input, int in_len,
91 int (*fill) (void *, unsigned int),
92 int (*flush) (void *, unsigned int),
93 u8 *output, int *posp,
94 void (*error_fn) (char *x))
95{
96 u8 skip = 0, r = 0;
97 u32 src_len, dst_len;
98 size_t tmp;
99 u8 *in_buf, *in_buf_save, *out_buf;
100 int ret = -1;
101
102 set_error_fn(error_fn);
103
104 if (output) {
105 out_buf = output;
106 } else if (!flush) {
107 error("NULL output pointer and no flush function provided");
108 goto exit;
109 } else {
110 out_buf = malloc(LZO_BLOCK_SIZE);
111 if (!out_buf) {
112 error("Could not allocate output buffer");
113 goto exit;
114 }
115 }
116
117 if (input && fill) {
118 error("Both input pointer and fill function provided, don't know what to do");
119 goto exit_1;
120 } else if (input) {
121 in_buf = input;
122 } else if (!fill || !posp) {
123 error("NULL input pointer and missing position pointer or fill function");
124 goto exit_1;
125 } else {
126 in_buf = malloc(lzo1x_worst_compress(LZO_BLOCK_SIZE));
127 if (!in_buf) {
128 error("Could not allocate input buffer");
129 goto exit_1;
130 }
131 }
132 in_buf_save = in_buf;
133
134 if (posp)
135 *posp = 0;
136
137 if (fill)
138 fill(in_buf, lzo1x_worst_compress(LZO_BLOCK_SIZE));
139
140 if (!parse_header(input, &skip)) {
141 error("invalid header");
142 goto exit_2;
143 }
144 in_buf += skip;
145
146 if (posp)
147 *posp = skip;
148
149 for (;;) {
150 /* read uncompressed block size */
151 dst_len = get_unaligned_be32(in_buf);
152 in_buf += 4;
153
154 /* exit if last block */
155 if (dst_len == 0) {
156 if (posp)
157 *posp += 4;
158 break;
159 }
160
161 if (dst_len > LZO_BLOCK_SIZE) {
162 error("dest len longer than block size");
163 goto exit_2;
164 }
165
166 /* read compressed block size, and skip block checksum info */
167 src_len = get_unaligned_be32(in_buf);
168 in_buf += 8;
169
170 if (src_len <= 0 || src_len > dst_len) {
171 error("file corrupted");
172 goto exit_2;
173 }
174
175 /* decompress */
176 tmp = dst_len;
177
178 /* When the input data is not compressed at all,
179 * lzo1x_decompress_safe will fail, so call memcpy()
180 * instead */
181 if (unlikely(dst_len == src_len))
182 memcpy(out_buf, in_buf, src_len);
183 else {
184 r = lzo1x_decompress_safe((u8 *) in_buf, src_len,
185 out_buf, &tmp);
186
187 if (r != LZO_E_OK || dst_len != tmp) {
188 error("Compressed data violation");
189 goto exit_2;
190 }
191 }
192
193 if (flush)
194 flush(out_buf, dst_len);
195 if (output)
196 out_buf += dst_len;
197 if (posp)
198 *posp += src_len + 12;
199 if (fill) {
200 in_buf = in_buf_save;
201 fill(in_buf, lzo1x_worst_compress(LZO_BLOCK_SIZE));
202 } else
203 in_buf += src_len;
204 }
205
206 ret = 0;
207exit_2:
208 if (!input)
209 free(in_buf);
210exit_1:
211 if (!output)
212 free(out_buf);
213exit:
214 return ret;
215}
216
217#define decompress unlzo
diff --git a/lib/devres.c b/lib/devres.c
index 72c8909006da..6efddf53b90c 100644
--- a/lib/devres.c
+++ b/lib/devres.c
@@ -1,5 +1,6 @@
1#include <linux/pci.h> 1#include <linux/pci.h>
2#include <linux/io.h> 2#include <linux/io.h>
3#include <linux/gfp.h>
3#include <linux/module.h> 4#include <linux/module.h>
4 5
5void devm_ioremap_release(struct device *dev, void *res) 6void devm_ioremap_release(struct device *dev, void *res)
@@ -327,7 +328,7 @@ EXPORT_SYMBOL(pcim_iomap_regions_request_all);
327 * @pdev: PCI device to map IO resources for 328 * @pdev: PCI device to map IO resources for
328 * @mask: Mask of BARs to unmap and release 329 * @mask: Mask of BARs to unmap and release
329 * 330 *
330 * Unamp and release regions specified by @mask. 331 * Unmap and release regions specified by @mask.
331 */ 332 */
332void pcim_iounmap_regions(struct pci_dev *pdev, u16 mask) 333void pcim_iounmap_regions(struct pci_dev *pdev, u16 mask)
333{ 334{
diff --git a/lib/dma-debug.c b/lib/dma-debug.c
index 58a9f9fc609a..01e64270e246 100644
--- a/lib/dma-debug.c
+++ b/lib/dma-debug.c
@@ -259,7 +259,7 @@ static struct dma_debug_entry *hash_bucket_find(struct hash_bucket *bucket,
259 * times. Without a hardware IOMMU this results in the 259 * times. Without a hardware IOMMU this results in the
260 * same device addresses being put into the dma-debug 260 * same device addresses being put into the dma-debug
261 * hash multiple times too. This can result in false 261 * hash multiple times too. This can result in false
262 * positives being reported. Therfore we implement a 262 * positives being reported. Therefore we implement a
263 * best-fit algorithm here which returns the entry from 263 * best-fit algorithm here which returns the entry from
264 * the hash which fits best to the reference value 264 * the hash which fits best to the reference value
265 * instead of the first-fit. 265 * instead of the first-fit.
@@ -570,7 +570,7 @@ static ssize_t filter_write(struct file *file, const char __user *userbuf,
570 * Now parse out the first token and use it as the name for the 570 * Now parse out the first token and use it as the name for the
571 * driver to filter for. 571 * driver to filter for.
572 */ 572 */
573 for (i = 0; i < NAME_MAX_LEN; ++i) { 573 for (i = 0; i < NAME_MAX_LEN - 1; ++i) {
574 current_driver_name[i] = buf[i]; 574 current_driver_name[i] = buf[i];
575 if (isspace(buf[i]) || buf[i] == ' ' || buf[i] == 0) 575 if (isspace(buf[i]) || buf[i] == ' ' || buf[i] == 0)
576 break; 576 break;
@@ -587,7 +587,7 @@ out_unlock:
587 return count; 587 return count;
588} 588}
589 589
590const struct file_operations filter_fops = { 590static const struct file_operations filter_fops = {
591 .read = filter_read, 591 .read = filter_read,
592 .write = filter_write, 592 .write = filter_write,
593}; 593};
@@ -670,12 +670,13 @@ static int device_dma_allocations(struct device *dev)
670 return count; 670 return count;
671} 671}
672 672
673static int dma_debug_device_change(struct notifier_block *nb, 673static int dma_debug_device_change(struct notifier_block *nb, unsigned long action, void *data)
674 unsigned long action, void *data)
675{ 674{
676 struct device *dev = data; 675 struct device *dev = data;
677 int count; 676 int count;
678 677
678 if (global_disable)
679 return 0;
679 680
680 switch (action) { 681 switch (action) {
681 case BUS_NOTIFY_UNBOUND_DRIVER: 682 case BUS_NOTIFY_UNBOUND_DRIVER:
@@ -697,6 +698,9 @@ void dma_debug_add_bus(struct bus_type *bus)
697{ 698{
698 struct notifier_block *nb; 699 struct notifier_block *nb;
699 700
701 if (global_disable)
702 return;
703
700 nb = kzalloc(sizeof(struct notifier_block), GFP_KERNEL); 704 nb = kzalloc(sizeof(struct notifier_block), GFP_KERNEL);
701 if (nb == NULL) { 705 if (nb == NULL) {
702 pr_err("dma_debug_add_bus: out of memory\n"); 706 pr_err("dma_debug_add_bus: out of memory\n");
@@ -819,9 +823,11 @@ static void check_unmap(struct dma_debug_entry *ref)
819 err_printk(ref->dev, entry, "DMA-API: device driver frees " 823 err_printk(ref->dev, entry, "DMA-API: device driver frees "
820 "DMA memory with different CPU address " 824 "DMA memory with different CPU address "
821 "[device address=0x%016llx] [size=%llu bytes] " 825 "[device address=0x%016llx] [size=%llu bytes] "
822 "[cpu alloc address=%p] [cpu free address=%p]", 826 "[cpu alloc address=0x%016llx] "
827 "[cpu free address=0x%016llx]",
823 ref->dev_addr, ref->size, 828 ref->dev_addr, ref->size,
824 (void *)entry->paddr, (void *)ref->paddr); 829 (unsigned long long)entry->paddr,
830 (unsigned long long)ref->paddr);
825 } 831 }
826 832
827 if (ref->sg_call_ents && ref->type == dma_debug_sg && 833 if (ref->sg_call_ents && ref->type == dma_debug_sg &&
@@ -907,6 +913,9 @@ static void check_sync(struct device *dev,
907 ref->size); 913 ref->size);
908 } 914 }
909 915
916 if (entry->direction == DMA_BIDIRECTIONAL)
917 goto out;
918
910 if (ref->direction != entry->direction) { 919 if (ref->direction != entry->direction) {
911 err_printk(dev, entry, "DMA-API: device driver syncs " 920 err_printk(dev, entry, "DMA-API: device driver syncs "
912 "DMA memory with different direction " 921 "DMA memory with different direction "
@@ -917,9 +926,6 @@ static void check_sync(struct device *dev,
917 dir2name[ref->direction]); 926 dir2name[ref->direction]);
918 } 927 }
919 928
920 if (entry->direction == DMA_BIDIRECTIONAL)
921 goto out;
922
923 if (to_cpu && !(entry->direction == DMA_FROM_DEVICE) && 929 if (to_cpu && !(entry->direction == DMA_FROM_DEVICE) &&
924 !(ref->direction == DMA_TO_DEVICE)) 930 !(ref->direction == DMA_TO_DEVICE))
925 err_printk(dev, entry, "DMA-API: device driver syncs " 931 err_printk(dev, entry, "DMA-API: device driver syncs "
@@ -942,7 +948,6 @@ static void check_sync(struct device *dev,
942 948
943out: 949out:
944 put_hash_bucket(bucket, &flags); 950 put_hash_bucket(bucket, &flags);
945
946} 951}
947 952
948void debug_dma_map_page(struct device *dev, struct page *page, size_t offset, 953void debug_dma_map_page(struct device *dev, struct page *page, size_t offset,
diff --git a/lib/dynamic_debug.c b/lib/dynamic_debug.c
index e22c148e4b7f..02afc2533728 100644
--- a/lib/dynamic_debug.c
+++ b/lib/dynamic_debug.c
@@ -21,9 +21,11 @@
21#include <linux/list.h> 21#include <linux/list.h>
22#include <linux/sysctl.h> 22#include <linux/sysctl.h>
23#include <linux/ctype.h> 23#include <linux/ctype.h>
24#include <linux/string.h>
24#include <linux/uaccess.h> 25#include <linux/uaccess.h>
25#include <linux/dynamic_debug.h> 26#include <linux/dynamic_debug.h>
26#include <linux/debugfs.h> 27#include <linux/debugfs.h>
28#include <linux/slab.h>
27 29
28extern struct _ddebug __start___verbose[]; 30extern struct _ddebug __start___verbose[];
29extern struct _ddebug __stop___verbose[]; 31extern struct _ddebug __stop___verbose[];
@@ -209,8 +211,7 @@ static int ddebug_tokenize(char *buf, char *words[], int maxwords)
209 char *end; 211 char *end;
210 212
211 /* Skip leading whitespace */ 213 /* Skip leading whitespace */
212 while (*buf && isspace(*buf)) 214 buf = skip_spaces(buf);
213 buf++;
214 if (!*buf) 215 if (!*buf)
215 break; /* oh, it was trailing whitespace */ 216 break; /* oh, it was trailing whitespace */
216 217
@@ -455,7 +456,7 @@ static ssize_t ddebug_proc_write(struct file *file, const char __user *ubuf,
455 __func__, (int)len); 456 __func__, (int)len);
456 457
457 nwords = ddebug_tokenize(tmpbuf, words, MAXWORDS); 458 nwords = ddebug_tokenize(tmpbuf, words, MAXWORDS);
458 if (nwords < 0) 459 if (nwords <= 0)
459 return -EINVAL; 460 return -EINVAL;
460 if (ddebug_parse_query(words, nwords-1, &query)) 461 if (ddebug_parse_query(words, nwords-1, &query))
461 return -EINVAL; 462 return -EINVAL;
@@ -691,7 +692,7 @@ static void ddebug_table_free(struct ddebug_table *dt)
691 * Called in response to a module being unloaded. Removes 692 * Called in response to a module being unloaded. Removes
692 * any ddebug_table's which point at the module. 693 * any ddebug_table's which point at the module.
693 */ 694 */
694int ddebug_remove_module(char *mod_name) 695int ddebug_remove_module(const char *mod_name)
695{ 696{
696 struct ddebug_table *dt, *nextdt; 697 struct ddebug_table *dt, *nextdt;
697 int ret = -ENOENT; 698 int ret = -ENOENT;
diff --git a/lib/fault-inject.c b/lib/fault-inject.c
index f97af55bdd96..7e65af70635e 100644
--- a/lib/fault-inject.c
+++ b/lib/fault-inject.c
@@ -1,6 +1,7 @@
1#include <linux/kernel.h> 1#include <linux/kernel.h>
2#include <linux/init.h> 2#include <linux/init.h>
3#include <linux/random.h> 3#include <linux/random.h>
4#include <linux/sched.h>
4#include <linux/stat.h> 5#include <linux/stat.h>
5#include <linux/types.h> 6#include <linux/types.h>
6#include <linux/fs.h> 7#include <linux/fs.h>
diff --git a/lib/flex_array.c b/lib/flex_array.c
index 66eef2e4483e..41b1804fa728 100644
--- a/lib/flex_array.c
+++ b/lib/flex_array.c
@@ -99,7 +99,7 @@ struct flex_array *flex_array_alloc(int element_size, unsigned int total,
99 ret->element_size = element_size; 99 ret->element_size = element_size;
100 ret->total_nr_elements = total; 100 ret->total_nr_elements = total;
101 if (elements_fit_in_base(ret) && !(flags & __GFP_ZERO)) 101 if (elements_fit_in_base(ret) && !(flags & __GFP_ZERO))
102 memset(ret->parts[0], FLEX_ARRAY_FREE, 102 memset(&ret->parts[0], FLEX_ARRAY_FREE,
103 FLEX_ARRAY_BASE_BYTES_LEFT); 103 FLEX_ARRAY_BASE_BYTES_LEFT);
104 return ret; 104 return ret;
105} 105}
diff --git a/lib/gen_crc32table.c b/lib/gen_crc32table.c
index bea5d97df991..85d0e412a04f 100644
--- a/lib/gen_crc32table.c
+++ b/lib/gen_crc32table.c
@@ -7,8 +7,8 @@
7#define LE_TABLE_SIZE (1 << CRC_LE_BITS) 7#define LE_TABLE_SIZE (1 << CRC_LE_BITS)
8#define BE_TABLE_SIZE (1 << CRC_BE_BITS) 8#define BE_TABLE_SIZE (1 << CRC_BE_BITS)
9 9
10static uint32_t crc32table_le[LE_TABLE_SIZE]; 10static uint32_t crc32table_le[4][LE_TABLE_SIZE];
11static uint32_t crc32table_be[BE_TABLE_SIZE]; 11static uint32_t crc32table_be[4][BE_TABLE_SIZE];
12 12
13/** 13/**
14 * crc32init_le() - allocate and initialize LE table data 14 * crc32init_le() - allocate and initialize LE table data
@@ -22,12 +22,19 @@ static void crc32init_le(void)
22 unsigned i, j; 22 unsigned i, j;
23 uint32_t crc = 1; 23 uint32_t crc = 1;
24 24
25 crc32table_le[0] = 0; 25 crc32table_le[0][0] = 0;
26 26
27 for (i = 1 << (CRC_LE_BITS - 1); i; i >>= 1) { 27 for (i = 1 << (CRC_LE_BITS - 1); i; i >>= 1) {
28 crc = (crc >> 1) ^ ((crc & 1) ? CRCPOLY_LE : 0); 28 crc = (crc >> 1) ^ ((crc & 1) ? CRCPOLY_LE : 0);
29 for (j = 0; j < LE_TABLE_SIZE; j += 2 * i) 29 for (j = 0; j < LE_TABLE_SIZE; j += 2 * i)
30 crc32table_le[i + j] = crc ^ crc32table_le[j]; 30 crc32table_le[0][i + j] = crc ^ crc32table_le[0][j];
31 }
32 for (i = 0; i < LE_TABLE_SIZE; i++) {
33 crc = crc32table_le[0][i];
34 for (j = 1; j < 4; j++) {
35 crc = crc32table_le[0][crc & 0xff] ^ (crc >> 8);
36 crc32table_le[j][i] = crc;
37 }
31 } 38 }
32} 39}
33 40
@@ -39,25 +46,35 @@ static void crc32init_be(void)
39 unsigned i, j; 46 unsigned i, j;
40 uint32_t crc = 0x80000000; 47 uint32_t crc = 0x80000000;
41 48
42 crc32table_be[0] = 0; 49 crc32table_be[0][0] = 0;
43 50
44 for (i = 1; i < BE_TABLE_SIZE; i <<= 1) { 51 for (i = 1; i < BE_TABLE_SIZE; i <<= 1) {
45 crc = (crc << 1) ^ ((crc & 0x80000000) ? CRCPOLY_BE : 0); 52 crc = (crc << 1) ^ ((crc & 0x80000000) ? CRCPOLY_BE : 0);
46 for (j = 0; j < i; j++) 53 for (j = 0; j < i; j++)
47 crc32table_be[i + j] = crc ^ crc32table_be[j]; 54 crc32table_be[0][i + j] = crc ^ crc32table_be[0][j];
55 }
56 for (i = 0; i < BE_TABLE_SIZE; i++) {
57 crc = crc32table_be[0][i];
58 for (j = 1; j < 4; j++) {
59 crc = crc32table_be[0][(crc >> 24) & 0xff] ^ (crc << 8);
60 crc32table_be[j][i] = crc;
61 }
48 } 62 }
49} 63}
50 64
51static void output_table(uint32_t table[], int len, char *trans) 65static void output_table(uint32_t table[4][256], int len, char *trans)
52{ 66{
53 int i; 67 int i, j;
54 68
55 for (i = 0; i < len - 1; i++) { 69 for (j = 0 ; j < 4; j++) {
56 if (i % ENTRIES_PER_LINE == 0) 70 printf("{");
57 printf("\n"); 71 for (i = 0; i < len - 1; i++) {
58 printf("%s(0x%8.8xL), ", trans, table[i]); 72 if (i % ENTRIES_PER_LINE == 0)
73 printf("\n");
74 printf("%s(0x%8.8xL), ", trans, table[j][i]);
75 }
76 printf("%s(0x%8.8xL)},\n", trans, table[j][len - 1]);
59 } 77 }
60 printf("%s(0x%8.8xL)\n", trans, table[len - 1]);
61} 78}
62 79
63int main(int argc, char** argv) 80int main(int argc, char** argv)
@@ -66,14 +83,14 @@ int main(int argc, char** argv)
66 83
67 if (CRC_LE_BITS > 1) { 84 if (CRC_LE_BITS > 1) {
68 crc32init_le(); 85 crc32init_le();
69 printf("static const u32 crc32table_le[] = {"); 86 printf("static const u32 crc32table_le[4][256] = {");
70 output_table(crc32table_le, LE_TABLE_SIZE, "tole"); 87 output_table(crc32table_le, LE_TABLE_SIZE, "tole");
71 printf("};\n"); 88 printf("};\n");
72 } 89 }
73 90
74 if (CRC_BE_BITS > 1) { 91 if (CRC_BE_BITS > 1) {
75 crc32init_be(); 92 crc32init_be();
76 printf("static const u32 crc32table_be[] = {"); 93 printf("static const u32 crc32table_be[4][256] = {");
77 output_table(crc32table_be, BE_TABLE_SIZE, "tobe"); 94 output_table(crc32table_be, BE_TABLE_SIZE, "tobe");
78 printf("};\n"); 95 printf("};\n");
79 } 96 }
diff --git a/lib/genalloc.c b/lib/genalloc.c
index eed2bdb865e7..1923f1490e72 100644
--- a/lib/genalloc.c
+++ b/lib/genalloc.c
@@ -10,7 +10,9 @@
10 * Version 2. See the file COPYING for more details. 10 * Version 2. See the file COPYING for more details.
11 */ 11 */
12 12
13#include <linux/slab.h>
13#include <linux/module.h> 14#include <linux/module.h>
15#include <linux/bitmap.h>
14#include <linux/genalloc.h> 16#include <linux/genalloc.h>
15 17
16 18
@@ -114,7 +116,7 @@ unsigned long gen_pool_alloc(struct gen_pool *pool, size_t size)
114 struct gen_pool_chunk *chunk; 116 struct gen_pool_chunk *chunk;
115 unsigned long addr, flags; 117 unsigned long addr, flags;
116 int order = pool->min_alloc_order; 118 int order = pool->min_alloc_order;
117 int nbits, bit, start_bit, end_bit; 119 int nbits, start_bit, end_bit;
118 120
119 if (size == 0) 121 if (size == 0)
120 return 0; 122 return 0;
@@ -126,32 +128,21 @@ unsigned long gen_pool_alloc(struct gen_pool *pool, size_t size)
126 chunk = list_entry(_chunk, struct gen_pool_chunk, next_chunk); 128 chunk = list_entry(_chunk, struct gen_pool_chunk, next_chunk);
127 129
128 end_bit = (chunk->end_addr - chunk->start_addr) >> order; 130 end_bit = (chunk->end_addr - chunk->start_addr) >> order;
129 end_bit -= nbits + 1;
130 131
131 spin_lock_irqsave(&chunk->lock, flags); 132 spin_lock_irqsave(&chunk->lock, flags);
132 bit = -1; 133 start_bit = bitmap_find_next_zero_area(chunk->bits, end_bit, 0,
133 while (bit + 1 < end_bit) { 134 nbits, 0);
134 bit = find_next_zero_bit(chunk->bits, end_bit, bit + 1); 135 if (start_bit >= end_bit) {
135 if (bit >= end_bit)
136 break;
137
138 start_bit = bit;
139 if (nbits > 1) {
140 bit = find_next_bit(chunk->bits, bit + nbits,
141 bit + 1);
142 if (bit - start_bit < nbits)
143 continue;
144 }
145
146 addr = chunk->start_addr +
147 ((unsigned long)start_bit << order);
148 while (nbits--)
149 __set_bit(start_bit++, chunk->bits);
150 spin_unlock_irqrestore(&chunk->lock, flags); 136 spin_unlock_irqrestore(&chunk->lock, flags);
151 read_unlock(&pool->lock); 137 continue;
152 return addr;
153 } 138 }
139
140 addr = chunk->start_addr + ((unsigned long)start_bit << order);
141
142 bitmap_set(chunk->bits, start_bit, nbits);
154 spin_unlock_irqrestore(&chunk->lock, flags); 143 spin_unlock_irqrestore(&chunk->lock, flags);
144 read_unlock(&pool->lock);
145 return addr;
155 } 146 }
156 read_unlock(&pool->lock); 147 read_unlock(&pool->lock);
157 return 0; 148 return 0;
diff --git a/lib/hexdump.c b/lib/hexdump.c
index 39af2560f765..5d7a4802c562 100644
--- a/lib/hexdump.c
+++ b/lib/hexdump.c
@@ -16,6 +16,24 @@ const char hex_asc[] = "0123456789abcdef";
16EXPORT_SYMBOL(hex_asc); 16EXPORT_SYMBOL(hex_asc);
17 17
18/** 18/**
19 * hex_to_bin - convert a hex digit to its real value
20 * @ch: ascii character represents hex digit
21 *
22 * hex_to_bin() converts one hex digit to its actual value or -1 in case of bad
23 * input.
24 */
25int hex_to_bin(char ch)
26{
27 if ((ch >= '0') && (ch <= '9'))
28 return ch - '0';
29 ch = tolower(ch);
30 if ((ch >= 'a') && (ch <= 'f'))
31 return ch - 'a' + 10;
32 return -1;
33}
34EXPORT_SYMBOL(hex_to_bin);
35
36/**
19 * hex_dump_to_buffer - convert a blob of data to "hex ASCII" in memory 37 * hex_dump_to_buffer - convert a blob of data to "hex ASCII" in memory
20 * @buf: data blob to dump 38 * @buf: data blob to dump
21 * @len: number of bytes in the @buf 39 * @len: number of bytes in the @buf
@@ -34,7 +52,7 @@ EXPORT_SYMBOL(hex_asc);
34 * 52 *
35 * E.g.: 53 * E.g.:
36 * hex_dump_to_buffer(frame->data, frame->len, 16, 1, 54 * hex_dump_to_buffer(frame->data, frame->len, 16, 1,
37 * linebuf, sizeof(linebuf), 1); 55 * linebuf, sizeof(linebuf), true);
38 * 56 *
39 * example output buffer: 57 * example output buffer:
40 * 40 41 42 43 44 45 46 47 48 49 4a 4b 4c 4d 4e 4f @ABCDEFGHIJKLMNO 58 * 40 41 42 43 44 45 46 47 48 49 4a 4b 4c 4d 4e 4f @ABCDEFGHIJKLMNO
@@ -65,8 +83,8 @@ void hex_dump_to_buffer(const void *buf, size_t len, int rowsize,
65 83
66 for (j = 0; j < ngroups; j++) 84 for (j = 0; j < ngroups; j++)
67 lx += scnprintf(linebuf + lx, linebuflen - lx, 85 lx += scnprintf(linebuf + lx, linebuflen - lx,
68 "%s%16.16llx", j ? " " : "", 86 "%s%16.16llx", j ? " " : "",
69 (unsigned long long)*(ptr8 + j)); 87 (unsigned long long)*(ptr8 + j));
70 ascii_column = 17 * ngroups + 2; 88 ascii_column = 17 * ngroups + 2;
71 break; 89 break;
72 } 90 }
@@ -77,7 +95,7 @@ void hex_dump_to_buffer(const void *buf, size_t len, int rowsize,
77 95
78 for (j = 0; j < ngroups; j++) 96 for (j = 0; j < ngroups; j++)
79 lx += scnprintf(linebuf + lx, linebuflen - lx, 97 lx += scnprintf(linebuf + lx, linebuflen - lx,
80 "%s%8.8x", j ? " " : "", *(ptr4 + j)); 98 "%s%8.8x", j ? " " : "", *(ptr4 + j));
81 ascii_column = 9 * ngroups + 2; 99 ascii_column = 9 * ngroups + 2;
82 break; 100 break;
83 } 101 }
@@ -88,7 +106,7 @@ void hex_dump_to_buffer(const void *buf, size_t len, int rowsize,
88 106
89 for (j = 0; j < ngroups; j++) 107 for (j = 0; j < ngroups; j++)
90 lx += scnprintf(linebuf + lx, linebuflen - lx, 108 lx += scnprintf(linebuf + lx, linebuflen - lx,
91 "%s%4.4x", j ? " " : "", *(ptr2 + j)); 109 "%s%4.4x", j ? " " : "", *(ptr2 + j));
92 ascii_column = 5 * ngroups + 2; 110 ascii_column = 5 * ngroups + 2;
93 break; 111 break;
94 } 112 }
@@ -111,9 +129,10 @@ void hex_dump_to_buffer(const void *buf, size_t len, int rowsize,
111 129
112 while (lx < (linebuflen - 1) && lx < (ascii_column - 1)) 130 while (lx < (linebuflen - 1) && lx < (ascii_column - 1))
113 linebuf[lx++] = ' '; 131 linebuf[lx++] = ' ';
114 for (j = 0; (j < len) && (lx + 2) < linebuflen; j++) 132 for (j = 0; (j < len) && (lx + 2) < linebuflen; j++) {
115 linebuf[lx++] = (isascii(ptr[j]) && isprint(ptr[j])) ? ptr[j] 133 ch = ptr[j];
116 : '.'; 134 linebuf[lx++] = (isascii(ch) && isprint(ch)) ? ch : '.';
135 }
117nil: 136nil:
118 linebuf[lx++] = '\0'; 137 linebuf[lx++] = '\0';
119} 138}
@@ -143,7 +162,7 @@ EXPORT_SYMBOL(hex_dump_to_buffer);
143 * 162 *
144 * E.g.: 163 * E.g.:
145 * print_hex_dump(KERN_DEBUG, "raw data: ", DUMP_PREFIX_ADDRESS, 164 * print_hex_dump(KERN_DEBUG, "raw data: ", DUMP_PREFIX_ADDRESS,
146 * 16, 1, frame->data, frame->len, 1); 165 * 16, 1, frame->data, frame->len, true);
147 * 166 *
148 * Example output using %DUMP_PREFIX_OFFSET and 1-byte mode: 167 * Example output using %DUMP_PREFIX_OFFSET and 1-byte mode:
149 * 0009ab42: 40 41 42 43 44 45 46 47 48 49 4a 4b 4c 4d 4e 4f @ABCDEFGHIJKLMNO 168 * 0009ab42: 40 41 42 43 44 45 46 47 48 49 4a 4b 4c 4d 4e 4f @ABCDEFGHIJKLMNO
@@ -151,12 +170,12 @@ EXPORT_SYMBOL(hex_dump_to_buffer);
151 * ffffffff88089af0: 73727170 77767574 7b7a7978 7f7e7d7c pqrstuvwxyz{|}~. 170 * ffffffff88089af0: 73727170 77767574 7b7a7978 7f7e7d7c pqrstuvwxyz{|}~.
152 */ 171 */
153void print_hex_dump(const char *level, const char *prefix_str, int prefix_type, 172void print_hex_dump(const char *level, const char *prefix_str, int prefix_type,
154 int rowsize, int groupsize, 173 int rowsize, int groupsize,
155 const void *buf, size_t len, bool ascii) 174 const void *buf, size_t len, bool ascii)
156{ 175{
157 const u8 *ptr = buf; 176 const u8 *ptr = buf;
158 int i, linelen, remaining = len; 177 int i, linelen, remaining = len;
159 unsigned char linebuf[200]; 178 unsigned char linebuf[32 * 3 + 2 + 32 + 1];
160 179
161 if (rowsize != 16 && rowsize != 32) 180 if (rowsize != 16 && rowsize != 32)
162 rowsize = 16; 181 rowsize = 16;
@@ -164,13 +183,14 @@ void print_hex_dump(const char *level, const char *prefix_str, int prefix_type,
164 for (i = 0; i < len; i += rowsize) { 183 for (i = 0; i < len; i += rowsize) {
165 linelen = min(remaining, rowsize); 184 linelen = min(remaining, rowsize);
166 remaining -= rowsize; 185 remaining -= rowsize;
186
167 hex_dump_to_buffer(ptr + i, linelen, rowsize, groupsize, 187 hex_dump_to_buffer(ptr + i, linelen, rowsize, groupsize,
168 linebuf, sizeof(linebuf), ascii); 188 linebuf, sizeof(linebuf), ascii);
169 189
170 switch (prefix_type) { 190 switch (prefix_type) {
171 case DUMP_PREFIX_ADDRESS: 191 case DUMP_PREFIX_ADDRESS:
172 printk("%s%s%*p: %s\n", level, prefix_str, 192 printk("%s%s%p: %s\n",
173 (int)(2 * sizeof(void *)), ptr + i, linebuf); 193 level, prefix_str, ptr + i, linebuf);
174 break; 194 break;
175 case DUMP_PREFIX_OFFSET: 195 case DUMP_PREFIX_OFFSET:
176 printk("%s%s%.8x: %s\n", level, prefix_str, i, linebuf); 196 printk("%s%s%.8x: %s\n", level, prefix_str, i, linebuf);
@@ -196,9 +216,9 @@ EXPORT_SYMBOL(print_hex_dump);
196 * rowsize of 16, groupsize of 1, and ASCII output included. 216 * rowsize of 16, groupsize of 1, and ASCII output included.
197 */ 217 */
198void print_hex_dump_bytes(const char *prefix_str, int prefix_type, 218void print_hex_dump_bytes(const char *prefix_str, int prefix_type,
199 const void *buf, size_t len) 219 const void *buf, size_t len)
200{ 220{
201 print_hex_dump(KERN_DEBUG, prefix_str, prefix_type, 16, 1, 221 print_hex_dump(KERN_DEBUG, prefix_str, prefix_type, 16, 1,
202 buf, len, 1); 222 buf, len, true);
203} 223}
204EXPORT_SYMBOL(print_hex_dump_bytes); 224EXPORT_SYMBOL(print_hex_dump_bytes);
diff --git a/lib/hweight.c b/lib/hweight.c
index 389424ecb129..3c79d50814cf 100644
--- a/lib/hweight.c
+++ b/lib/hweight.c
@@ -9,37 +9,45 @@
9 * The Hamming Weight of a number is the total number of bits set in it. 9 * The Hamming Weight of a number is the total number of bits set in it.
10 */ 10 */
11 11
12unsigned int hweight32(unsigned int w) 12unsigned int __sw_hweight32(unsigned int w)
13{ 13{
14#ifdef ARCH_HAS_FAST_MULTIPLIER
15 w -= (w >> 1) & 0x55555555;
16 w = (w & 0x33333333) + ((w >> 2) & 0x33333333);
17 w = (w + (w >> 4)) & 0x0f0f0f0f;
18 return (w * 0x01010101) >> 24;
19#else
14 unsigned int res = w - ((w >> 1) & 0x55555555); 20 unsigned int res = w - ((w >> 1) & 0x55555555);
15 res = (res & 0x33333333) + ((res >> 2) & 0x33333333); 21 res = (res & 0x33333333) + ((res >> 2) & 0x33333333);
16 res = (res + (res >> 4)) & 0x0F0F0F0F; 22 res = (res + (res >> 4)) & 0x0F0F0F0F;
17 res = res + (res >> 8); 23 res = res + (res >> 8);
18 return (res + (res >> 16)) & 0x000000FF; 24 return (res + (res >> 16)) & 0x000000FF;
25#endif
19} 26}
20EXPORT_SYMBOL(hweight32); 27EXPORT_SYMBOL(__sw_hweight32);
21 28
22unsigned int hweight16(unsigned int w) 29unsigned int __sw_hweight16(unsigned int w)
23{ 30{
24 unsigned int res = w - ((w >> 1) & 0x5555); 31 unsigned int res = w - ((w >> 1) & 0x5555);
25 res = (res & 0x3333) + ((res >> 2) & 0x3333); 32 res = (res & 0x3333) + ((res >> 2) & 0x3333);
26 res = (res + (res >> 4)) & 0x0F0F; 33 res = (res + (res >> 4)) & 0x0F0F;
27 return (res + (res >> 8)) & 0x00FF; 34 return (res + (res >> 8)) & 0x00FF;
28} 35}
29EXPORT_SYMBOL(hweight16); 36EXPORT_SYMBOL(__sw_hweight16);
30 37
31unsigned int hweight8(unsigned int w) 38unsigned int __sw_hweight8(unsigned int w)
32{ 39{
33 unsigned int res = w - ((w >> 1) & 0x55); 40 unsigned int res = w - ((w >> 1) & 0x55);
34 res = (res & 0x33) + ((res >> 2) & 0x33); 41 res = (res & 0x33) + ((res >> 2) & 0x33);
35 return (res + (res >> 4)) & 0x0F; 42 return (res + (res >> 4)) & 0x0F;
36} 43}
37EXPORT_SYMBOL(hweight8); 44EXPORT_SYMBOL(__sw_hweight8);
38 45
39unsigned long hweight64(__u64 w) 46unsigned long __sw_hweight64(__u64 w)
40{ 47{
41#if BITS_PER_LONG == 32 48#if BITS_PER_LONG == 32
42 return hweight32((unsigned int)(w >> 32)) + hweight32((unsigned int)w); 49 return __sw_hweight32((unsigned int)(w >> 32)) +
50 __sw_hweight32((unsigned int)w);
43#elif BITS_PER_LONG == 64 51#elif BITS_PER_LONG == 64
44#ifdef ARCH_HAS_FAST_MULTIPLIER 52#ifdef ARCH_HAS_FAST_MULTIPLIER
45 w -= (w >> 1) & 0x5555555555555555ul; 53 w -= (w >> 1) & 0x5555555555555555ul;
@@ -56,4 +64,4 @@ unsigned long hweight64(__u64 w)
56#endif 64#endif
57#endif 65#endif
58} 66}
59EXPORT_SYMBOL(hweight64); 67EXPORT_SYMBOL(__sw_hweight64);
diff --git a/lib/idr.c b/lib/idr.c
index 80ca9aca038b..7f1a4f0acf50 100644
--- a/lib/idr.c
+++ b/lib/idr.c
@@ -156,10 +156,12 @@ static int sub_alloc(struct idr *idp, int *starting_id, struct idr_layer **pa)
156 id = (id | ((1 << (IDR_BITS * l)) - 1)) + 1; 156 id = (id | ((1 << (IDR_BITS * l)) - 1)) + 1;
157 157
158 /* if already at the top layer, we need to grow */ 158 /* if already at the top layer, we need to grow */
159 if (!(p = pa[l])) { 159 if (id >= 1 << (idp->layers * IDR_BITS)) {
160 *starting_id = id; 160 *starting_id = id;
161 return IDR_NEED_TO_GROW; 161 return IDR_NEED_TO_GROW;
162 } 162 }
163 p = pa[l];
164 BUG_ON(!p);
163 165
164 /* If we need to go up one layer, continue the 166 /* If we need to go up one layer, continue the
165 * loop; otherwise, restart from the top. 167 * loop; otherwise, restart from the top.
@@ -281,7 +283,7 @@ static int idr_get_new_above_int(struct idr *idp, void *ptr, int starting_id)
281/** 283/**
282 * idr_get_new_above - allocate new idr entry above or equal to a start id 284 * idr_get_new_above - allocate new idr entry above or equal to a start id
283 * @idp: idr handle 285 * @idp: idr handle
284 * @ptr: pointer you want associated with the ide 286 * @ptr: pointer you want associated with the id
285 * @start_id: id to start search at 287 * @start_id: id to start search at
286 * @id: pointer to the allocated handle 288 * @id: pointer to the allocated handle
287 * 289 *
@@ -313,7 +315,7 @@ EXPORT_SYMBOL(idr_get_new_above);
313/** 315/**
314 * idr_get_new - allocate new idr entry 316 * idr_get_new - allocate new idr entry
315 * @idp: idr handle 317 * @idp: idr handle
316 * @ptr: pointer you want associated with the ide 318 * @ptr: pointer you want associated with the id
317 * @id: pointer to the allocated handle 319 * @id: pointer to the allocated handle
318 * 320 *
319 * This is the allocate id function. It should be called with any 321 * This is the allocate id function. It should be called with any
@@ -443,6 +445,7 @@ EXPORT_SYMBOL(idr_remove);
443void idr_remove_all(struct idr *idp) 445void idr_remove_all(struct idr *idp)
444{ 446{
445 int n, id, max; 447 int n, id, max;
448 int bt_mask;
446 struct idr_layer *p; 449 struct idr_layer *p;
447 struct idr_layer *pa[MAX_LEVEL]; 450 struct idr_layer *pa[MAX_LEVEL];
448 struct idr_layer **paa = &pa[0]; 451 struct idr_layer **paa = &pa[0];
@@ -460,8 +463,10 @@ void idr_remove_all(struct idr *idp)
460 p = p->ary[(id >> n) & IDR_MASK]; 463 p = p->ary[(id >> n) & IDR_MASK];
461 } 464 }
462 465
466 bt_mask = id;
463 id += 1 << n; 467 id += 1 << n;
464 while (n < fls(id)) { 468 /* Get the highest bit that the above add changed from 0->1. */
469 while (n < fls(id ^ bt_mask)) {
465 if (p) 470 if (p)
466 free_layer(p); 471 free_layer(p);
467 n += IDR_BITS; 472 n += IDR_BITS;
@@ -502,7 +507,7 @@ void *idr_find(struct idr *idp, int id)
502 int n; 507 int n;
503 struct idr_layer *p; 508 struct idr_layer *p;
504 509
505 p = rcu_dereference(idp->top); 510 p = rcu_dereference_raw(idp->top);
506 if (!p) 511 if (!p)
507 return NULL; 512 return NULL;
508 n = (p->layer+1) * IDR_BITS; 513 n = (p->layer+1) * IDR_BITS;
@@ -517,7 +522,7 @@ void *idr_find(struct idr *idp, int id)
517 while (n > 0 && p) { 522 while (n > 0 && p) {
518 n -= IDR_BITS; 523 n -= IDR_BITS;
519 BUG_ON(n != p->layer*IDR_BITS); 524 BUG_ON(n != p->layer*IDR_BITS);
520 p = rcu_dereference(p->ary[(id >> n) & IDR_MASK]); 525 p = rcu_dereference_raw(p->ary[(id >> n) & IDR_MASK]);
521 } 526 }
522 return((void *)p); 527 return((void *)p);
523} 528}
@@ -550,7 +555,7 @@ int idr_for_each(struct idr *idp,
550 struct idr_layer **paa = &pa[0]; 555 struct idr_layer **paa = &pa[0];
551 556
552 n = idp->layers * IDR_BITS; 557 n = idp->layers * IDR_BITS;
553 p = rcu_dereference(idp->top); 558 p = rcu_dereference_raw(idp->top);
554 max = 1 << n; 559 max = 1 << n;
555 560
556 id = 0; 561 id = 0;
@@ -558,7 +563,7 @@ int idr_for_each(struct idr *idp,
558 while (n > 0 && p) { 563 while (n > 0 && p) {
559 n -= IDR_BITS; 564 n -= IDR_BITS;
560 *paa++ = p; 565 *paa++ = p;
561 p = rcu_dereference(p->ary[(id >> n) & IDR_MASK]); 566 p = rcu_dereference_raw(p->ary[(id >> n) & IDR_MASK]);
562 } 567 }
563 568
564 if (p) { 569 if (p) {
@@ -597,7 +602,7 @@ void *idr_get_next(struct idr *idp, int *nextidp)
597 /* find first ent */ 602 /* find first ent */
598 n = idp->layers * IDR_BITS; 603 n = idp->layers * IDR_BITS;
599 max = 1 << n; 604 max = 1 << n;
600 p = rcu_dereference(idp->top); 605 p = rcu_dereference_raw(idp->top);
601 if (!p) 606 if (!p)
602 return NULL; 607 return NULL;
603 608
@@ -605,7 +610,7 @@ void *idr_get_next(struct idr *idp, int *nextidp)
605 while (n > 0 && p) { 610 while (n > 0 && p) {
606 n -= IDR_BITS; 611 n -= IDR_BITS;
607 *paa++ = p; 612 *paa++ = p;
608 p = rcu_dereference(p->ary[(id >> n) & IDR_MASK]); 613 p = rcu_dereference_raw(p->ary[(id >> n) & IDR_MASK]);
609 } 614 }
610 615
611 if (p) { 616 if (p) {
@@ -621,7 +626,7 @@ void *idr_get_next(struct idr *idp, int *nextidp)
621 } 626 }
622 return NULL; 627 return NULL;
623} 628}
624 629EXPORT_SYMBOL(idr_get_next);
625 630
626 631
627/** 632/**
diff --git a/lib/inflate.c b/lib/inflate.c
index d10255973a9f..677b738c2204 100644
--- a/lib/inflate.c
+++ b/lib/inflate.c
@@ -103,6 +103,7 @@
103 the two sets of lengths. 103 the two sets of lengths.
104 */ 104 */
105#include <linux/compiler.h> 105#include <linux/compiler.h>
106#include <linux/slab.h>
106 107
107#ifdef RCSID 108#ifdef RCSID
108static char rcsid[] = "#Id: inflate.c,v 0.14 1993/06/10 13:27:04 jloup Exp #"; 109static char rcsid[] = "#Id: inflate.c,v 0.14 1993/06/10 13:27:04 jloup Exp #";
diff --git a/lib/iommu-helper.c b/lib/iommu-helper.c
index 75dbda03f4fb..c0251f4ad08b 100644
--- a/lib/iommu-helper.c
+++ b/lib/iommu-helper.c
@@ -3,41 +3,7 @@
3 */ 3 */
4 4
5#include <linux/module.h> 5#include <linux/module.h>
6#include <linux/bitops.h> 6#include <linux/bitmap.h>
7
8static unsigned long find_next_zero_area(unsigned long *map,
9 unsigned long size,
10 unsigned long start,
11 unsigned int nr,
12 unsigned long align_mask)
13{
14 unsigned long index, end, i;
15again:
16 index = find_next_zero_bit(map, size, start);
17
18 /* Align allocation */
19 index = (index + align_mask) & ~align_mask;
20
21 end = index + nr;
22 if (end >= size)
23 return -1;
24 for (i = index; i < end; i++) {
25 if (test_bit(i, map)) {
26 start = i+1;
27 goto again;
28 }
29 }
30 return index;
31}
32
33void iommu_area_reserve(unsigned long *map, unsigned long i, int len)
34{
35 unsigned long end = i + len;
36 while (i < end) {
37 __set_bit(i, map);
38 i++;
39 }
40}
41 7
42int iommu_is_span_boundary(unsigned int index, unsigned int nr, 8int iommu_is_span_boundary(unsigned int index, unsigned int nr,
43 unsigned long shift, 9 unsigned long shift,
@@ -55,31 +21,24 @@ unsigned long iommu_area_alloc(unsigned long *map, unsigned long size,
55 unsigned long align_mask) 21 unsigned long align_mask)
56{ 22{
57 unsigned long index; 23 unsigned long index;
24
25 /* We don't want the last of the limit */
26 size -= 1;
58again: 27again:
59 index = find_next_zero_area(map, size, start, nr, align_mask); 28 index = bitmap_find_next_zero_area(map, size, start, nr, align_mask);
60 if (index != -1) { 29 if (index < size) {
61 if (iommu_is_span_boundary(index, nr, shift, boundary_size)) { 30 if (iommu_is_span_boundary(index, nr, shift, boundary_size)) {
62 /* we could do more effectively */ 31 /* we could do more effectively */
63 start = index + 1; 32 start = index + 1;
64 goto again; 33 goto again;
65 } 34 }
66 iommu_area_reserve(map, index, nr); 35 bitmap_set(map, index, nr);
36 return index;
67 } 37 }
68 return index; 38 return -1;
69} 39}
70EXPORT_SYMBOL(iommu_area_alloc); 40EXPORT_SYMBOL(iommu_area_alloc);
71 41
72void iommu_area_free(unsigned long *map, unsigned long start, unsigned int nr)
73{
74 unsigned long end = start + nr;
75
76 while (start < end) {
77 __clear_bit(start, map);
78 start++;
79 }
80}
81EXPORT_SYMBOL(iommu_area_free);
82
83unsigned long iommu_num_pages(unsigned long addr, unsigned long len, 42unsigned long iommu_num_pages(unsigned long addr, unsigned long len,
84 unsigned long io_page_size) 43 unsigned long io_page_size)
85{ 44{
diff --git a/lib/ioremap.c b/lib/ioremap.c
index 14c6078f17a2..5730ecd3eb66 100644
--- a/lib/ioremap.c
+++ b/lib/ioremap.c
@@ -13,10 +13,10 @@
13#include <asm/pgtable.h> 13#include <asm/pgtable.h>
14 14
15static int ioremap_pte_range(pmd_t *pmd, unsigned long addr, 15static int ioremap_pte_range(pmd_t *pmd, unsigned long addr,
16 unsigned long end, unsigned long phys_addr, pgprot_t prot) 16 unsigned long end, phys_addr_t phys_addr, pgprot_t prot)
17{ 17{
18 pte_t *pte; 18 pte_t *pte;
19 unsigned long pfn; 19 u64 pfn;
20 20
21 pfn = phys_addr >> PAGE_SHIFT; 21 pfn = phys_addr >> PAGE_SHIFT;
22 pte = pte_alloc_kernel(pmd, addr); 22 pte = pte_alloc_kernel(pmd, addr);
@@ -31,7 +31,7 @@ static int ioremap_pte_range(pmd_t *pmd, unsigned long addr,
31} 31}
32 32
33static inline int ioremap_pmd_range(pud_t *pud, unsigned long addr, 33static inline int ioremap_pmd_range(pud_t *pud, unsigned long addr,
34 unsigned long end, unsigned long phys_addr, pgprot_t prot) 34 unsigned long end, phys_addr_t phys_addr, pgprot_t prot)
35{ 35{
36 pmd_t *pmd; 36 pmd_t *pmd;
37 unsigned long next; 37 unsigned long next;
@@ -49,7 +49,7 @@ static inline int ioremap_pmd_range(pud_t *pud, unsigned long addr,
49} 49}
50 50
51static inline int ioremap_pud_range(pgd_t *pgd, unsigned long addr, 51static inline int ioremap_pud_range(pgd_t *pgd, unsigned long addr,
52 unsigned long end, unsigned long phys_addr, pgprot_t prot) 52 unsigned long end, phys_addr_t phys_addr, pgprot_t prot)
53{ 53{
54 pud_t *pud; 54 pud_t *pud;
55 unsigned long next; 55 unsigned long next;
@@ -67,7 +67,7 @@ static inline int ioremap_pud_range(pgd_t *pgd, unsigned long addr,
67} 67}
68 68
69int ioremap_page_range(unsigned long addr, 69int ioremap_page_range(unsigned long addr,
70 unsigned long end, unsigned long phys_addr, pgprot_t prot) 70 unsigned long end, phys_addr_t phys_addr, pgprot_t prot)
71{ 71{
72 pgd_t *pgd; 72 pgd_t *pgd;
73 unsigned long start; 73 unsigned long start;
diff --git a/lib/kasprintf.c b/lib/kasprintf.c
index c5ff1fd10030..9c4233b23783 100644
--- a/lib/kasprintf.c
+++ b/lib/kasprintf.c
@@ -6,6 +6,7 @@
6 6
7#include <stdarg.h> 7#include <stdarg.h>
8#include <linux/module.h> 8#include <linux/module.h>
9#include <linux/slab.h>
9#include <linux/types.h> 10#include <linux/types.h>
10#include <linux/string.h> 11#include <linux/string.h>
11 12
diff --git a/lib/kernel_lock.c b/lib/kernel_lock.c
index 39f1029e3525..b135d04aa48a 100644
--- a/lib/kernel_lock.c
+++ b/lib/kernel_lock.c
@@ -5,10 +5,13 @@
5 * relegated to obsolescence, but used by various less 5 * relegated to obsolescence, but used by various less
6 * important (or lazy) subsystems. 6 * important (or lazy) subsystems.
7 */ 7 */
8#include <linux/smp_lock.h>
9#include <linux/module.h> 8#include <linux/module.h>
10#include <linux/kallsyms.h> 9#include <linux/kallsyms.h>
11#include <linux/semaphore.h> 10#include <linux/semaphore.h>
11#include <linux/smp_lock.h>
12
13#define CREATE_TRACE_POINTS
14#include <trace/events/bkl.h>
12 15
13/* 16/*
14 * The 'big kernel lock' 17 * The 'big kernel lock'
@@ -20,7 +23,7 @@
20 * 23 *
21 * Don't use in new code. 24 * Don't use in new code.
22 */ 25 */
23static __cacheline_aligned_in_smp DEFINE_SPINLOCK(kernel_flag); 26static __cacheline_aligned_in_smp DEFINE_RAW_SPINLOCK(kernel_flag);
24 27
25 28
26/* 29/*
@@ -33,12 +36,12 @@ static __cacheline_aligned_in_smp DEFINE_SPINLOCK(kernel_flag);
33 * If it successfully gets the lock, it should increment 36 * If it successfully gets the lock, it should increment
34 * the preemption count like any spinlock does. 37 * the preemption count like any spinlock does.
35 * 38 *
36 * (This works on UP too - _raw_spin_trylock will never 39 * (This works on UP too - do_raw_spin_trylock will never
37 * return false in that case) 40 * return false in that case)
38 */ 41 */
39int __lockfunc __reacquire_kernel_lock(void) 42int __lockfunc __reacquire_kernel_lock(void)
40{ 43{
41 while (!_raw_spin_trylock(&kernel_flag)) { 44 while (!do_raw_spin_trylock(&kernel_flag)) {
42 if (need_resched()) 45 if (need_resched())
43 return -EAGAIN; 46 return -EAGAIN;
44 cpu_relax(); 47 cpu_relax();
@@ -49,27 +52,27 @@ int __lockfunc __reacquire_kernel_lock(void)
49 52
50void __lockfunc __release_kernel_lock(void) 53void __lockfunc __release_kernel_lock(void)
51{ 54{
52 _raw_spin_unlock(&kernel_flag); 55 do_raw_spin_unlock(&kernel_flag);
53 preempt_enable_no_resched(); 56 preempt_enable_no_resched();
54} 57}
55 58
56/* 59/*
57 * These are the BKL spinlocks - we try to be polite about preemption. 60 * These are the BKL spinlocks - we try to be polite about preemption.
58 * If SMP is not on (ie UP preemption), this all goes away because the 61 * If SMP is not on (ie UP preemption), this all goes away because the
59 * _raw_spin_trylock() will always succeed. 62 * do_raw_spin_trylock() will always succeed.
60 */ 63 */
61#ifdef CONFIG_PREEMPT 64#ifdef CONFIG_PREEMPT
62static inline void __lock_kernel(void) 65static inline void __lock_kernel(void)
63{ 66{
64 preempt_disable(); 67 preempt_disable();
65 if (unlikely(!_raw_spin_trylock(&kernel_flag))) { 68 if (unlikely(!do_raw_spin_trylock(&kernel_flag))) {
66 /* 69 /*
67 * If preemption was disabled even before this 70 * If preemption was disabled even before this
68 * was called, there's nothing we can be polite 71 * was called, there's nothing we can be polite
69 * about - just spin. 72 * about - just spin.
70 */ 73 */
71 if (preempt_count() > 1) { 74 if (preempt_count() > 1) {
72 _raw_spin_lock(&kernel_flag); 75 do_raw_spin_lock(&kernel_flag);
73 return; 76 return;
74 } 77 }
75 78
@@ -79,10 +82,10 @@ static inline void __lock_kernel(void)
79 */ 82 */
80 do { 83 do {
81 preempt_enable(); 84 preempt_enable();
82 while (spin_is_locked(&kernel_flag)) 85 while (raw_spin_is_locked(&kernel_flag))
83 cpu_relax(); 86 cpu_relax();
84 preempt_disable(); 87 preempt_disable();
85 } while (!_raw_spin_trylock(&kernel_flag)); 88 } while (!do_raw_spin_trylock(&kernel_flag));
86 } 89 }
87} 90}
88 91
@@ -93,7 +96,7 @@ static inline void __lock_kernel(void)
93 */ 96 */
94static inline void __lock_kernel(void) 97static inline void __lock_kernel(void)
95{ 98{
96 _raw_spin_lock(&kernel_flag); 99 do_raw_spin_lock(&kernel_flag);
97} 100}
98#endif 101#endif
99 102
@@ -103,7 +106,7 @@ static inline void __unlock_kernel(void)
103 * the BKL is not covered by lockdep, so we open-code the 106 * the BKL is not covered by lockdep, so we open-code the
104 * unlocking sequence (and thus avoid the dep-chain ops): 107 * unlocking sequence (and thus avoid the dep-chain ops):
105 */ 108 */
106 _raw_spin_unlock(&kernel_flag); 109 do_raw_spin_unlock(&kernel_flag);
107 preempt_enable(); 110 preempt_enable();
108} 111}
109 112
@@ -113,21 +116,28 @@ static inline void __unlock_kernel(void)
113 * This cannot happen asynchronously, so we only need to 116 * This cannot happen asynchronously, so we only need to
114 * worry about other CPU's. 117 * worry about other CPU's.
115 */ 118 */
116void __lockfunc lock_kernel(void) 119void __lockfunc _lock_kernel(const char *func, const char *file, int line)
117{ 120{
118 int depth = current->lock_depth+1; 121 int depth = current->lock_depth + 1;
119 if (likely(!depth)) 122
123 trace_lock_kernel(func, file, line);
124
125 if (likely(!depth)) {
126 might_sleep();
120 __lock_kernel(); 127 __lock_kernel();
128 }
121 current->lock_depth = depth; 129 current->lock_depth = depth;
122} 130}
123 131
124void __lockfunc unlock_kernel(void) 132void __lockfunc _unlock_kernel(const char *func, const char *file, int line)
125{ 133{
126 BUG_ON(current->lock_depth < 0); 134 BUG_ON(current->lock_depth < 0);
127 if (likely(--current->lock_depth < 0)) 135 if (likely(--current->lock_depth < 0))
128 __unlock_kernel(); 136 __unlock_kernel();
137
138 trace_unlock_kernel(func, file, line);
129} 139}
130 140
131EXPORT_SYMBOL(lock_kernel); 141EXPORT_SYMBOL(_lock_kernel);
132EXPORT_SYMBOL(unlock_kernel); 142EXPORT_SYMBOL(_unlock_kernel);
133 143
diff --git a/lib/kobject.c b/lib/kobject.c
index b512b746d2af..f07c57252e82 100644
--- a/lib/kobject.c
+++ b/lib/kobject.c
@@ -700,7 +700,7 @@ static ssize_t kobj_attr_store(struct kobject *kobj, struct attribute *attr,
700 return ret; 700 return ret;
701} 701}
702 702
703struct sysfs_ops kobj_sysfs_ops = { 703const struct sysfs_ops kobj_sysfs_ops = {
704 .show = kobj_attr_show, 704 .show = kobj_attr_show,
705 .store = kobj_attr_store, 705 .store = kobj_attr_store,
706}; 706};
@@ -789,7 +789,7 @@ static struct kobj_type kset_ktype = {
789 * If the kset was not able to be created, NULL will be returned. 789 * If the kset was not able to be created, NULL will be returned.
790 */ 790 */
791static struct kset *kset_create(const char *name, 791static struct kset *kset_create(const char *name,
792 struct kset_uevent_ops *uevent_ops, 792 const struct kset_uevent_ops *uevent_ops,
793 struct kobject *parent_kobj) 793 struct kobject *parent_kobj)
794{ 794{
795 struct kset *kset; 795 struct kset *kset;
@@ -832,7 +832,7 @@ static struct kset *kset_create(const char *name,
832 * If the kset was not able to be created, NULL will be returned. 832 * If the kset was not able to be created, NULL will be returned.
833 */ 833 */
834struct kset *kset_create_and_add(const char *name, 834struct kset *kset_create_and_add(const char *name,
835 struct kset_uevent_ops *uevent_ops, 835 const struct kset_uevent_ops *uevent_ops,
836 struct kobject *parent_kobj) 836 struct kobject *parent_kobj)
837{ 837{
838 struct kset *kset; 838 struct kset *kset;
@@ -850,6 +850,121 @@ struct kset *kset_create_and_add(const char *name,
850} 850}
851EXPORT_SYMBOL_GPL(kset_create_and_add); 851EXPORT_SYMBOL_GPL(kset_create_and_add);
852 852
853
854static DEFINE_SPINLOCK(kobj_ns_type_lock);
855static const struct kobj_ns_type_operations *kobj_ns_ops_tbl[KOBJ_NS_TYPES];
856
857int kobj_ns_type_register(const struct kobj_ns_type_operations *ops)
858{
859 enum kobj_ns_type type = ops->type;
860 int error;
861
862 spin_lock(&kobj_ns_type_lock);
863
864 error = -EINVAL;
865 if (type >= KOBJ_NS_TYPES)
866 goto out;
867
868 error = -EINVAL;
869 if (type <= KOBJ_NS_TYPE_NONE)
870 goto out;
871
872 error = -EBUSY;
873 if (kobj_ns_ops_tbl[type])
874 goto out;
875
876 error = 0;
877 kobj_ns_ops_tbl[type] = ops;
878
879out:
880 spin_unlock(&kobj_ns_type_lock);
881 return error;
882}
883
884int kobj_ns_type_registered(enum kobj_ns_type type)
885{
886 int registered = 0;
887
888 spin_lock(&kobj_ns_type_lock);
889 if ((type > KOBJ_NS_TYPE_NONE) && (type < KOBJ_NS_TYPES))
890 registered = kobj_ns_ops_tbl[type] != NULL;
891 spin_unlock(&kobj_ns_type_lock);
892
893 return registered;
894}
895
896const struct kobj_ns_type_operations *kobj_child_ns_ops(struct kobject *parent)
897{
898 const struct kobj_ns_type_operations *ops = NULL;
899
900 if (parent && parent->ktype->child_ns_type)
901 ops = parent->ktype->child_ns_type(parent);
902
903 return ops;
904}
905
906const struct kobj_ns_type_operations *kobj_ns_ops(struct kobject *kobj)
907{
908 return kobj_child_ns_ops(kobj->parent);
909}
910
911
912const void *kobj_ns_current(enum kobj_ns_type type)
913{
914 const void *ns = NULL;
915
916 spin_lock(&kobj_ns_type_lock);
917 if ((type > KOBJ_NS_TYPE_NONE) && (type < KOBJ_NS_TYPES) &&
918 kobj_ns_ops_tbl[type])
919 ns = kobj_ns_ops_tbl[type]->current_ns();
920 spin_unlock(&kobj_ns_type_lock);
921
922 return ns;
923}
924
925const void *kobj_ns_netlink(enum kobj_ns_type type, struct sock *sk)
926{
927 const void *ns = NULL;
928
929 spin_lock(&kobj_ns_type_lock);
930 if ((type > KOBJ_NS_TYPE_NONE) && (type < KOBJ_NS_TYPES) &&
931 kobj_ns_ops_tbl[type])
932 ns = kobj_ns_ops_tbl[type]->netlink_ns(sk);
933 spin_unlock(&kobj_ns_type_lock);
934
935 return ns;
936}
937
938const void *kobj_ns_initial(enum kobj_ns_type type)
939{
940 const void *ns = NULL;
941
942 spin_lock(&kobj_ns_type_lock);
943 if ((type > KOBJ_NS_TYPE_NONE) && (type < KOBJ_NS_TYPES) &&
944 kobj_ns_ops_tbl[type])
945 ns = kobj_ns_ops_tbl[type]->initial_ns();
946 spin_unlock(&kobj_ns_type_lock);
947
948 return ns;
949}
950
951/*
952 * kobj_ns_exit - invalidate a namespace tag
953 *
954 * @type: the namespace type (i.e. KOBJ_NS_TYPE_NET)
955 * @ns: the actual namespace being invalidated
956 *
957 * This is called when a tag is no longer valid. For instance,
958 * when a network namespace exits, it uses this helper to
959 * make sure no sb's sysfs_info points to the now-invalidated
960 * netns.
961 */
962void kobj_ns_exit(enum kobj_ns_type type, const void *ns)
963{
964 sysfs_exit_ns(type, ns);
965}
966
967
853EXPORT_SYMBOL(kobject_get); 968EXPORT_SYMBOL(kobject_get);
854EXPORT_SYMBOL(kobject_put); 969EXPORT_SYMBOL(kobject_put);
855EXPORT_SYMBOL(kobject_del); 970EXPORT_SYMBOL(kobject_del);
diff --git a/lib/kobject_uevent.c b/lib/kobject_uevent.c
index 920a3ca6e259..b93579504dfa 100644
--- a/lib/kobject_uevent.c
+++ b/lib/kobject_uevent.c
@@ -18,18 +18,25 @@
18#include <linux/string.h> 18#include <linux/string.h>
19#include <linux/kobject.h> 19#include <linux/kobject.h>
20#include <linux/module.h> 20#include <linux/module.h>
21 21#include <linux/slab.h>
22#include <linux/user_namespace.h>
22#include <linux/socket.h> 23#include <linux/socket.h>
23#include <linux/skbuff.h> 24#include <linux/skbuff.h>
24#include <linux/netlink.h> 25#include <linux/netlink.h>
25#include <net/sock.h> 26#include <net/sock.h>
27#include <net/net_namespace.h>
26 28
27 29
28u64 uevent_seqnum; 30u64 uevent_seqnum;
29char uevent_helper[UEVENT_HELPER_PATH_LEN] = CONFIG_UEVENT_HELPER_PATH; 31char uevent_helper[UEVENT_HELPER_PATH_LEN] = CONFIG_UEVENT_HELPER_PATH;
30static DEFINE_SPINLOCK(sequence_lock); 32static DEFINE_SPINLOCK(sequence_lock);
31#if defined(CONFIG_NET) 33#ifdef CONFIG_NET
32static struct sock *uevent_sock; 34struct uevent_sock {
35 struct list_head list;
36 struct sock *sk;
37};
38static LIST_HEAD(uevent_sock_list);
39static DEFINE_MUTEX(uevent_sock_mutex);
33#endif 40#endif
34 41
35/* the strings here must match the enum in include/linux/kobject.h */ 42/* the strings here must match the enum in include/linux/kobject.h */
@@ -76,6 +83,39 @@ out:
76 return ret; 83 return ret;
77} 84}
78 85
86#ifdef CONFIG_NET
87static int kobj_bcast_filter(struct sock *dsk, struct sk_buff *skb, void *data)
88{
89 struct kobject *kobj = data;
90 const struct kobj_ns_type_operations *ops;
91
92 ops = kobj_ns_ops(kobj);
93 if (ops) {
94 const void *sock_ns, *ns;
95 ns = kobj->ktype->namespace(kobj);
96 sock_ns = ops->netlink_ns(dsk);
97 return sock_ns != ns;
98 }
99
100 return 0;
101}
102#endif
103
104static int kobj_usermode_filter(struct kobject *kobj)
105{
106 const struct kobj_ns_type_operations *ops;
107
108 ops = kobj_ns_ops(kobj);
109 if (ops) {
110 const void *init_ns, *ns;
111 ns = kobj->ktype->namespace(kobj);
112 init_ns = ops->initial_ns();
113 return ns != init_ns;
114 }
115
116 return 0;
117}
118
79/** 119/**
80 * kobject_uevent_env - send an uevent with environmental data 120 * kobject_uevent_env - send an uevent with environmental data
81 * 121 *
@@ -95,10 +135,13 @@ int kobject_uevent_env(struct kobject *kobj, enum kobject_action action,
95 const char *subsystem; 135 const char *subsystem;
96 struct kobject *top_kobj; 136 struct kobject *top_kobj;
97 struct kset *kset; 137 struct kset *kset;
98 struct kset_uevent_ops *uevent_ops; 138 const struct kset_uevent_ops *uevent_ops;
99 u64 seq; 139 u64 seq;
100 int i = 0; 140 int i = 0;
101 int retval = 0; 141 int retval = 0;
142#ifdef CONFIG_NET
143 struct uevent_sock *ue_sk;
144#endif
102 145
103 pr_debug("kobject: '%s' (%p): %s\n", 146 pr_debug("kobject: '%s' (%p): %s\n",
104 kobject_name(kobj), kobj, __func__); 147 kobject_name(kobj), kobj, __func__);
@@ -210,7 +253,9 @@ int kobject_uevent_env(struct kobject *kobj, enum kobject_action action,
210 253
211#if defined(CONFIG_NET) 254#if defined(CONFIG_NET)
212 /* send netlink message */ 255 /* send netlink message */
213 if (uevent_sock) { 256 mutex_lock(&uevent_sock_mutex);
257 list_for_each_entry(ue_sk, &uevent_sock_list, list) {
258 struct sock *uevent_sock = ue_sk->sk;
214 struct sk_buff *skb; 259 struct sk_buff *skb;
215 size_t len; 260 size_t len;
216 261
@@ -232,18 +277,21 @@ int kobject_uevent_env(struct kobject *kobj, enum kobject_action action,
232 } 277 }
233 278
234 NETLINK_CB(skb).dst_group = 1; 279 NETLINK_CB(skb).dst_group = 1;
235 retval = netlink_broadcast(uevent_sock, skb, 0, 1, 280 retval = netlink_broadcast_filtered(uevent_sock, skb,
236 GFP_KERNEL); 281 0, 1, GFP_KERNEL,
282 kobj_bcast_filter,
283 kobj);
237 /* ENOBUFS should be handled in userspace */ 284 /* ENOBUFS should be handled in userspace */
238 if (retval == -ENOBUFS) 285 if (retval == -ENOBUFS)
239 retval = 0; 286 retval = 0;
240 } else 287 } else
241 retval = -ENOMEM; 288 retval = -ENOMEM;
242 } 289 }
290 mutex_unlock(&uevent_sock_mutex);
243#endif 291#endif
244 292
245 /* call uevent_helper, usually only enabled during early boot */ 293 /* call uevent_helper, usually only enabled during early boot */
246 if (uevent_helper[0]) { 294 if (uevent_helper[0] && !kobj_usermode_filter(kobj)) {
247 char *argv [3]; 295 char *argv [3];
248 296
249 argv [0] = uevent_helper; 297 argv [0] = uevent_helper;
@@ -319,18 +367,59 @@ int add_uevent_var(struct kobj_uevent_env *env, const char *format, ...)
319EXPORT_SYMBOL_GPL(add_uevent_var); 367EXPORT_SYMBOL_GPL(add_uevent_var);
320 368
321#if defined(CONFIG_NET) 369#if defined(CONFIG_NET)
322static int __init kobject_uevent_init(void) 370static int uevent_net_init(struct net *net)
323{ 371{
324 uevent_sock = netlink_kernel_create(&init_net, NETLINK_KOBJECT_UEVENT, 372 struct uevent_sock *ue_sk;
325 1, NULL, NULL, THIS_MODULE); 373
326 if (!uevent_sock) { 374 ue_sk = kzalloc(sizeof(*ue_sk), GFP_KERNEL);
375 if (!ue_sk)
376 return -ENOMEM;
377
378 ue_sk->sk = netlink_kernel_create(net, NETLINK_KOBJECT_UEVENT,
379 1, NULL, NULL, THIS_MODULE);
380 if (!ue_sk->sk) {
327 printk(KERN_ERR 381 printk(KERN_ERR
328 "kobject_uevent: unable to create netlink socket!\n"); 382 "kobject_uevent: unable to create netlink socket!\n");
383 kfree(ue_sk);
329 return -ENODEV; 384 return -ENODEV;
330 } 385 }
331 netlink_set_nonroot(NETLINK_KOBJECT_UEVENT, NL_NONROOT_RECV); 386 mutex_lock(&uevent_sock_mutex);
387 list_add_tail(&ue_sk->list, &uevent_sock_list);
388 mutex_unlock(&uevent_sock_mutex);
332 return 0; 389 return 0;
333} 390}
334 391
392static void uevent_net_exit(struct net *net)
393{
394 struct uevent_sock *ue_sk;
395
396 mutex_lock(&uevent_sock_mutex);
397 list_for_each_entry(ue_sk, &uevent_sock_list, list) {
398 if (sock_net(ue_sk->sk) == net)
399 goto found;
400 }
401 mutex_unlock(&uevent_sock_mutex);
402 return;
403
404found:
405 list_del(&ue_sk->list);
406 mutex_unlock(&uevent_sock_mutex);
407
408 netlink_kernel_release(ue_sk->sk);
409 kfree(ue_sk);
410}
411
412static struct pernet_operations uevent_net_ops = {
413 .init = uevent_net_init,
414 .exit = uevent_net_exit,
415};
416
417static int __init kobject_uevent_init(void)
418{
419 netlink_set_nonroot(NETLINK_KOBJECT_UEVENT, NL_NONROOT_RECV);
420 return register_pernet_subsys(&uevent_net_ops);
421}
422
423
335postcore_initcall(kobject_uevent_init); 424postcore_initcall(kobject_uevent_init);
336#endif 425#endif
diff --git a/lib/kref.c b/lib/kref.c
index 9ecd6e865610..d3d227a08a4b 100644
--- a/lib/kref.c
+++ b/lib/kref.c
@@ -13,17 +13,7 @@
13 13
14#include <linux/kref.h> 14#include <linux/kref.h>
15#include <linux/module.h> 15#include <linux/module.h>
16 16#include <linux/slab.h>
17/**
18 * kref_set - initialize object and set refcount to requested number.
19 * @kref: object in question.
20 * @num: initial reference counter
21 */
22void kref_set(struct kref *kref, int num)
23{
24 atomic_set(&kref->refcount, num);
25 smp_mb();
26}
27 17
28/** 18/**
29 * kref_init - initialize object. 19 * kref_init - initialize object.
@@ -31,7 +21,8 @@ void kref_set(struct kref *kref, int num)
31 */ 21 */
32void kref_init(struct kref *kref) 22void kref_init(struct kref *kref)
33{ 23{
34 kref_set(kref, 1); 24 atomic_set(&kref->refcount, 1);
25 smp_mb();
35} 26}
36 27
37/** 28/**
@@ -71,7 +62,6 @@ int kref_put(struct kref *kref, void (*release)(struct kref *kref))
71 return 0; 62 return 0;
72} 63}
73 64
74EXPORT_SYMBOL(kref_set);
75EXPORT_SYMBOL(kref_init); 65EXPORT_SYMBOL(kref_init);
76EXPORT_SYMBOL(kref_get); 66EXPORT_SYMBOL(kref_get);
77EXPORT_SYMBOL(kref_put); 67EXPORT_SYMBOL(kref_put);
diff --git a/lib/lcm.c b/lib/lcm.c
new file mode 100644
index 000000000000..157cd88a6ffc
--- /dev/null
+++ b/lib/lcm.c
@@ -0,0 +1,15 @@
1#include <linux/kernel.h>
2#include <linux/gcd.h>
3#include <linux/module.h>
4
5/* Lowest common multiple */
6unsigned long lcm(unsigned long a, unsigned long b)
7{
8 if (a && b)
9 return (a * b) / gcd(a, b);
10 else if (b)
11 return b;
12
13 return a;
14}
15EXPORT_SYMBOL_GPL(lcm);
diff --git a/lib/list_sort.c b/lib/list_sort.c
new file mode 100644
index 000000000000..4b5cb794c38b
--- /dev/null
+++ b/lib/list_sort.c
@@ -0,0 +1,217 @@
1#include <linux/kernel.h>
2#include <linux/module.h>
3#include <linux/list_sort.h>
4#include <linux/slab.h>
5#include <linux/list.h>
6
7#define MAX_LIST_LENGTH_BITS 20
8
9/*
10 * Returns a list organized in an intermediate format suited
11 * to chaining of merge() calls: null-terminated, no reserved or
12 * sentinel head node, "prev" links not maintained.
13 */
14static struct list_head *merge(void *priv,
15 int (*cmp)(void *priv, struct list_head *a,
16 struct list_head *b),
17 struct list_head *a, struct list_head *b)
18{
19 struct list_head head, *tail = &head;
20
21 while (a && b) {
22 /* if equal, take 'a' -- important for sort stability */
23 if ((*cmp)(priv, a, b) <= 0) {
24 tail->next = a;
25 a = a->next;
26 } else {
27 tail->next = b;
28 b = b->next;
29 }
30 tail = tail->next;
31 }
32 tail->next = a?:b;
33 return head.next;
34}
35
36/*
37 * Combine final list merge with restoration of standard doubly-linked
38 * list structure. This approach duplicates code from merge(), but
39 * runs faster than the tidier alternatives of either a separate final
40 * prev-link restoration pass, or maintaining the prev links
41 * throughout.
42 */
43static void merge_and_restore_back_links(void *priv,
44 int (*cmp)(void *priv, struct list_head *a,
45 struct list_head *b),
46 struct list_head *head,
47 struct list_head *a, struct list_head *b)
48{
49 struct list_head *tail = head;
50
51 while (a && b) {
52 /* if equal, take 'a' -- important for sort stability */
53 if ((*cmp)(priv, a, b) <= 0) {
54 tail->next = a;
55 a->prev = tail;
56 a = a->next;
57 } else {
58 tail->next = b;
59 b->prev = tail;
60 b = b->next;
61 }
62 tail = tail->next;
63 }
64 tail->next = a ? : b;
65
66 do {
67 /*
68 * In worst cases this loop may run many iterations.
69 * Continue callbacks to the client even though no
70 * element comparison is needed, so the client's cmp()
71 * routine can invoke cond_resched() periodically.
72 */
73 (*cmp)(priv, tail, tail);
74
75 tail->next->prev = tail;
76 tail = tail->next;
77 } while (tail->next);
78
79 tail->next = head;
80 head->prev = tail;
81}
82
83/**
84 * list_sort - sort a list
85 * @priv: private data, opaque to list_sort(), passed to @cmp
86 * @head: the list to sort
87 * @cmp: the elements comparison function
88 *
89 * This function implements "merge sort", which has O(nlog(n))
90 * complexity.
91 *
92 * The comparison function @cmp must return a negative value if @a
93 * should sort before @b, and a positive value if @a should sort after
94 * @b. If @a and @b are equivalent, and their original relative
95 * ordering is to be preserved, @cmp must return 0.
96 */
97void list_sort(void *priv, struct list_head *head,
98 int (*cmp)(void *priv, struct list_head *a,
99 struct list_head *b))
100{
101 struct list_head *part[MAX_LIST_LENGTH_BITS+1]; /* sorted partial lists
102 -- last slot is a sentinel */
103 int lev; /* index into part[] */
104 int max_lev = 0;
105 struct list_head *list;
106
107 if (list_empty(head))
108 return;
109
110 memset(part, 0, sizeof(part));
111
112 head->prev->next = NULL;
113 list = head->next;
114
115 while (list) {
116 struct list_head *cur = list;
117 list = list->next;
118 cur->next = NULL;
119
120 for (lev = 0; part[lev]; lev++) {
121 cur = merge(priv, cmp, part[lev], cur);
122 part[lev] = NULL;
123 }
124 if (lev > max_lev) {
125 if (unlikely(lev >= ARRAY_SIZE(part)-1)) {
126 printk_once(KERN_DEBUG "list passed to"
127 " list_sort() too long for"
128 " efficiency\n");
129 lev--;
130 }
131 max_lev = lev;
132 }
133 part[lev] = cur;
134 }
135
136 for (lev = 0; lev < max_lev; lev++)
137 if (part[lev])
138 list = merge(priv, cmp, part[lev], list);
139
140 merge_and_restore_back_links(priv, cmp, head, part[max_lev], list);
141}
142EXPORT_SYMBOL(list_sort);
143
144#ifdef DEBUG_LIST_SORT
145struct debug_el {
146 struct list_head l_h;
147 int value;
148 unsigned serial;
149};
150
151static int cmp(void *priv, struct list_head *a, struct list_head *b)
152{
153 return container_of(a, struct debug_el, l_h)->value
154 - container_of(b, struct debug_el, l_h)->value;
155}
156
157/*
158 * The pattern of set bits in the list length determines which cases
159 * are hit in list_sort().
160 */
161#define LIST_SORT_TEST_LENGTH (512+128+2) /* not including head */
162
163static int __init list_sort_test(void)
164{
165 int i, r = 1, count;
166 struct list_head *head = kmalloc(sizeof(*head), GFP_KERNEL);
167 struct list_head *cur;
168
169 printk(KERN_WARNING "testing list_sort()\n");
170
171 cur = head;
172 for (i = 0; i < LIST_SORT_TEST_LENGTH; i++) {
173 struct debug_el *el = kmalloc(sizeof(*el), GFP_KERNEL);
174 BUG_ON(!el);
175 /* force some equivalencies */
176 el->value = (r = (r * 725861) % 6599) % (LIST_SORT_TEST_LENGTH/3);
177 el->serial = i;
178
179 el->l_h.prev = cur;
180 cur->next = &el->l_h;
181 cur = cur->next;
182 }
183 head->prev = cur;
184
185 list_sort(NULL, head, cmp);
186
187 count = 1;
188 for (cur = head->next; cur->next != head; cur = cur->next) {
189 struct debug_el *el = container_of(cur, struct debug_el, l_h);
190 int cmp_result = cmp(NULL, cur, cur->next);
191 if (cur->next->prev != cur) {
192 printk(KERN_EMERG "list_sort() returned "
193 "a corrupted list!\n");
194 return 1;
195 } else if (cmp_result > 0) {
196 printk(KERN_EMERG "list_sort() failed to sort!\n");
197 return 1;
198 } else if (cmp_result == 0 &&
199 el->serial >= container_of(cur->next,
200 struct debug_el, l_h)->serial) {
201 printk(KERN_EMERG "list_sort() failed to preserve order"
202 " of equivalent elements!\n");
203 return 1;
204 }
205 kfree(cur->prev);
206 count++;
207 }
208 kfree(cur);
209 if (count != LIST_SORT_TEST_LENGTH) {
210 printk(KERN_EMERG "list_sort() returned list of"
211 "different length!\n");
212 return 1;
213 }
214 return 0;
215}
216module_init(list_sort_test);
217#endif
diff --git a/lib/lmb.c b/lib/lmb.c
deleted file mode 100644
index 0343c05609f0..000000000000
--- a/lib/lmb.c
+++ /dev/null
@@ -1,527 +0,0 @@
1/*
2 * Procedures for maintaining information about logical memory blocks.
3 *
4 * Peter Bergner, IBM Corp. June 2001.
5 * Copyright (C) 2001 Peter Bergner.
6 *
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * as published by the Free Software Foundation; either version
10 * 2 of the License, or (at your option) any later version.
11 */
12
13#include <linux/kernel.h>
14#include <linux/init.h>
15#include <linux/bitops.h>
16#include <linux/lmb.h>
17
18#define LMB_ALLOC_ANYWHERE 0
19
20struct lmb lmb;
21
22static int lmb_debug;
23
24static int __init early_lmb(char *p)
25{
26 if (p && strstr(p, "debug"))
27 lmb_debug = 1;
28 return 0;
29}
30early_param("lmb", early_lmb);
31
32static void lmb_dump(struct lmb_region *region, char *name)
33{
34 unsigned long long base, size;
35 int i;
36
37 pr_info(" %s.cnt = 0x%lx\n", name, region->cnt);
38
39 for (i = 0; i < region->cnt; i++) {
40 base = region->region[i].base;
41 size = region->region[i].size;
42
43 pr_info(" %s[0x%x]\t0x%016llx - 0x%016llx, 0x%llx bytes\n",
44 name, i, base, base + size - 1, size);
45 }
46}
47
48void lmb_dump_all(void)
49{
50 if (!lmb_debug)
51 return;
52
53 pr_info("LMB configuration:\n");
54 pr_info(" rmo_size = 0x%llx\n", (unsigned long long)lmb.rmo_size);
55 pr_info(" memory.size = 0x%llx\n", (unsigned long long)lmb.memory.size);
56
57 lmb_dump(&lmb.memory, "memory");
58 lmb_dump(&lmb.reserved, "reserved");
59}
60
61static unsigned long lmb_addrs_overlap(u64 base1, u64 size1, u64 base2,
62 u64 size2)
63{
64 return ((base1 < (base2 + size2)) && (base2 < (base1 + size1)));
65}
66
67static long lmb_addrs_adjacent(u64 base1, u64 size1, u64 base2, u64 size2)
68{
69 if (base2 == base1 + size1)
70 return 1;
71 else if (base1 == base2 + size2)
72 return -1;
73
74 return 0;
75}
76
77static long lmb_regions_adjacent(struct lmb_region *rgn,
78 unsigned long r1, unsigned long r2)
79{
80 u64 base1 = rgn->region[r1].base;
81 u64 size1 = rgn->region[r1].size;
82 u64 base2 = rgn->region[r2].base;
83 u64 size2 = rgn->region[r2].size;
84
85 return lmb_addrs_adjacent(base1, size1, base2, size2);
86}
87
88static void lmb_remove_region(struct lmb_region *rgn, unsigned long r)
89{
90 unsigned long i;
91
92 for (i = r; i < rgn->cnt - 1; i++) {
93 rgn->region[i].base = rgn->region[i + 1].base;
94 rgn->region[i].size = rgn->region[i + 1].size;
95 }
96 rgn->cnt--;
97}
98
99/* Assumption: base addr of region 1 < base addr of region 2 */
100static void lmb_coalesce_regions(struct lmb_region *rgn,
101 unsigned long r1, unsigned long r2)
102{
103 rgn->region[r1].size += rgn->region[r2].size;
104 lmb_remove_region(rgn, r2);
105}
106
107void __init lmb_init(void)
108{
109 /* Create a dummy zero size LMB which will get coalesced away later.
110 * This simplifies the lmb_add() code below...
111 */
112 lmb.memory.region[0].base = 0;
113 lmb.memory.region[0].size = 0;
114 lmb.memory.cnt = 1;
115
116 /* Ditto. */
117 lmb.reserved.region[0].base = 0;
118 lmb.reserved.region[0].size = 0;
119 lmb.reserved.cnt = 1;
120}
121
122void __init lmb_analyze(void)
123{
124 int i;
125
126 lmb.memory.size = 0;
127
128 for (i = 0; i < lmb.memory.cnt; i++)
129 lmb.memory.size += lmb.memory.region[i].size;
130}
131
132static long lmb_add_region(struct lmb_region *rgn, u64 base, u64 size)
133{
134 unsigned long coalesced = 0;
135 long adjacent, i;
136
137 if ((rgn->cnt == 1) && (rgn->region[0].size == 0)) {
138 rgn->region[0].base = base;
139 rgn->region[0].size = size;
140 return 0;
141 }
142
143 /* First try and coalesce this LMB with another. */
144 for (i = 0; i < rgn->cnt; i++) {
145 u64 rgnbase = rgn->region[i].base;
146 u64 rgnsize = rgn->region[i].size;
147
148 if ((rgnbase == base) && (rgnsize == size))
149 /* Already have this region, so we're done */
150 return 0;
151
152 adjacent = lmb_addrs_adjacent(base, size, rgnbase, rgnsize);
153 if (adjacent > 0) {
154 rgn->region[i].base -= size;
155 rgn->region[i].size += size;
156 coalesced++;
157 break;
158 } else if (adjacent < 0) {
159 rgn->region[i].size += size;
160 coalesced++;
161 break;
162 }
163 }
164
165 if ((i < rgn->cnt - 1) && lmb_regions_adjacent(rgn, i, i+1)) {
166 lmb_coalesce_regions(rgn, i, i+1);
167 coalesced++;
168 }
169
170 if (coalesced)
171 return coalesced;
172 if (rgn->cnt >= MAX_LMB_REGIONS)
173 return -1;
174
175 /* Couldn't coalesce the LMB, so add it to the sorted table. */
176 for (i = rgn->cnt - 1; i >= 0; i--) {
177 if (base < rgn->region[i].base) {
178 rgn->region[i+1].base = rgn->region[i].base;
179 rgn->region[i+1].size = rgn->region[i].size;
180 } else {
181 rgn->region[i+1].base = base;
182 rgn->region[i+1].size = size;
183 break;
184 }
185 }
186
187 if (base < rgn->region[0].base) {
188 rgn->region[0].base = base;
189 rgn->region[0].size = size;
190 }
191 rgn->cnt++;
192
193 return 0;
194}
195
196long lmb_add(u64 base, u64 size)
197{
198 struct lmb_region *_rgn = &lmb.memory;
199
200 /* On pSeries LPAR systems, the first LMB is our RMO region. */
201 if (base == 0)
202 lmb.rmo_size = size;
203
204 return lmb_add_region(_rgn, base, size);
205
206}
207
208long lmb_remove(u64 base, u64 size)
209{
210 struct lmb_region *rgn = &(lmb.memory);
211 u64 rgnbegin, rgnend;
212 u64 end = base + size;
213 int i;
214
215 rgnbegin = rgnend = 0; /* supress gcc warnings */
216
217 /* Find the region where (base, size) belongs to */
218 for (i=0; i < rgn->cnt; i++) {
219 rgnbegin = rgn->region[i].base;
220 rgnend = rgnbegin + rgn->region[i].size;
221
222 if ((rgnbegin <= base) && (end <= rgnend))
223 break;
224 }
225
226 /* Didn't find the region */
227 if (i == rgn->cnt)
228 return -1;
229
230 /* Check to see if we are removing entire region */
231 if ((rgnbegin == base) && (rgnend == end)) {
232 lmb_remove_region(rgn, i);
233 return 0;
234 }
235
236 /* Check to see if region is matching at the front */
237 if (rgnbegin == base) {
238 rgn->region[i].base = end;
239 rgn->region[i].size -= size;
240 return 0;
241 }
242
243 /* Check to see if the region is matching at the end */
244 if (rgnend == end) {
245 rgn->region[i].size -= size;
246 return 0;
247 }
248
249 /*
250 * We need to split the entry - adjust the current one to the
251 * beginging of the hole and add the region after hole.
252 */
253 rgn->region[i].size = base - rgn->region[i].base;
254 return lmb_add_region(rgn, end, rgnend - end);
255}
256
257long __init lmb_reserve(u64 base, u64 size)
258{
259 struct lmb_region *_rgn = &lmb.reserved;
260
261 BUG_ON(0 == size);
262
263 return lmb_add_region(_rgn, base, size);
264}
265
266long __init lmb_overlaps_region(struct lmb_region *rgn, u64 base, u64 size)
267{
268 unsigned long i;
269
270 for (i = 0; i < rgn->cnt; i++) {
271 u64 rgnbase = rgn->region[i].base;
272 u64 rgnsize = rgn->region[i].size;
273 if (lmb_addrs_overlap(base, size, rgnbase, rgnsize))
274 break;
275 }
276
277 return (i < rgn->cnt) ? i : -1;
278}
279
280static u64 lmb_align_down(u64 addr, u64 size)
281{
282 return addr & ~(size - 1);
283}
284
285static u64 lmb_align_up(u64 addr, u64 size)
286{
287 return (addr + (size - 1)) & ~(size - 1);
288}
289
290static u64 __init lmb_alloc_nid_unreserved(u64 start, u64 end,
291 u64 size, u64 align)
292{
293 u64 base, res_base;
294 long j;
295
296 base = lmb_align_down((end - size), align);
297 while (start <= base) {
298 j = lmb_overlaps_region(&lmb.reserved, base, size);
299 if (j < 0) {
300 /* this area isn't reserved, take it */
301 if (lmb_add_region(&lmb.reserved, base, size) < 0)
302 base = ~(u64)0;
303 return base;
304 }
305 res_base = lmb.reserved.region[j].base;
306 if (res_base < size)
307 break;
308 base = lmb_align_down(res_base - size, align);
309 }
310
311 return ~(u64)0;
312}
313
314static u64 __init lmb_alloc_nid_region(struct lmb_property *mp,
315 u64 (*nid_range)(u64, u64, int *),
316 u64 size, u64 align, int nid)
317{
318 u64 start, end;
319
320 start = mp->base;
321 end = start + mp->size;
322
323 start = lmb_align_up(start, align);
324 while (start < end) {
325 u64 this_end;
326 int this_nid;
327
328 this_end = nid_range(start, end, &this_nid);
329 if (this_nid == nid) {
330 u64 ret = lmb_alloc_nid_unreserved(start, this_end,
331 size, align);
332 if (ret != ~(u64)0)
333 return ret;
334 }
335 start = this_end;
336 }
337
338 return ~(u64)0;
339}
340
341u64 __init lmb_alloc_nid(u64 size, u64 align, int nid,
342 u64 (*nid_range)(u64 start, u64 end, int *nid))
343{
344 struct lmb_region *mem = &lmb.memory;
345 int i;
346
347 BUG_ON(0 == size);
348
349 size = lmb_align_up(size, align);
350
351 for (i = 0; i < mem->cnt; i++) {
352 u64 ret = lmb_alloc_nid_region(&mem->region[i],
353 nid_range,
354 size, align, nid);
355 if (ret != ~(u64)0)
356 return ret;
357 }
358
359 return lmb_alloc(size, align);
360}
361
362u64 __init lmb_alloc(u64 size, u64 align)
363{
364 return lmb_alloc_base(size, align, LMB_ALLOC_ANYWHERE);
365}
366
367u64 __init lmb_alloc_base(u64 size, u64 align, u64 max_addr)
368{
369 u64 alloc;
370
371 alloc = __lmb_alloc_base(size, align, max_addr);
372
373 if (alloc == 0)
374 panic("ERROR: Failed to allocate 0x%llx bytes below 0x%llx.\n",
375 (unsigned long long) size, (unsigned long long) max_addr);
376
377 return alloc;
378}
379
380u64 __init __lmb_alloc_base(u64 size, u64 align, u64 max_addr)
381{
382 long i, j;
383 u64 base = 0;
384 u64 res_base;
385
386 BUG_ON(0 == size);
387
388 size = lmb_align_up(size, align);
389
390 /* On some platforms, make sure we allocate lowmem */
391 /* Note that LMB_REAL_LIMIT may be LMB_ALLOC_ANYWHERE */
392 if (max_addr == LMB_ALLOC_ANYWHERE)
393 max_addr = LMB_REAL_LIMIT;
394
395 for (i = lmb.memory.cnt - 1; i >= 0; i--) {
396 u64 lmbbase = lmb.memory.region[i].base;
397 u64 lmbsize = lmb.memory.region[i].size;
398
399 if (lmbsize < size)
400 continue;
401 if (max_addr == LMB_ALLOC_ANYWHERE)
402 base = lmb_align_down(lmbbase + lmbsize - size, align);
403 else if (lmbbase < max_addr) {
404 base = min(lmbbase + lmbsize, max_addr);
405 base = lmb_align_down(base - size, align);
406 } else
407 continue;
408
409 while (base && lmbbase <= base) {
410 j = lmb_overlaps_region(&lmb.reserved, base, size);
411 if (j < 0) {
412 /* this area isn't reserved, take it */
413 if (lmb_add_region(&lmb.reserved, base, size) < 0)
414 return 0;
415 return base;
416 }
417 res_base = lmb.reserved.region[j].base;
418 if (res_base < size)
419 break;
420 base = lmb_align_down(res_base - size, align);
421 }
422 }
423 return 0;
424}
425
426/* You must call lmb_analyze() before this. */
427u64 __init lmb_phys_mem_size(void)
428{
429 return lmb.memory.size;
430}
431
432u64 lmb_end_of_DRAM(void)
433{
434 int idx = lmb.memory.cnt - 1;
435
436 return (lmb.memory.region[idx].base + lmb.memory.region[idx].size);
437}
438
439/* You must call lmb_analyze() after this. */
440void __init lmb_enforce_memory_limit(u64 memory_limit)
441{
442 unsigned long i;
443 u64 limit;
444 struct lmb_property *p;
445
446 if (!memory_limit)
447 return;
448
449 /* Truncate the lmb regions to satisfy the memory limit. */
450 limit = memory_limit;
451 for (i = 0; i < lmb.memory.cnt; i++) {
452 if (limit > lmb.memory.region[i].size) {
453 limit -= lmb.memory.region[i].size;
454 continue;
455 }
456
457 lmb.memory.region[i].size = limit;
458 lmb.memory.cnt = i + 1;
459 break;
460 }
461
462 if (lmb.memory.region[0].size < lmb.rmo_size)
463 lmb.rmo_size = lmb.memory.region[0].size;
464
465 memory_limit = lmb_end_of_DRAM();
466
467 /* And truncate any reserves above the limit also. */
468 for (i = 0; i < lmb.reserved.cnt; i++) {
469 p = &lmb.reserved.region[i];
470
471 if (p->base > memory_limit)
472 p->size = 0;
473 else if ((p->base + p->size) > memory_limit)
474 p->size = memory_limit - p->base;
475
476 if (p->size == 0) {
477 lmb_remove_region(&lmb.reserved, i);
478 i--;
479 }
480 }
481}
482
483int __init lmb_is_reserved(u64 addr)
484{
485 int i;
486
487 for (i = 0; i < lmb.reserved.cnt; i++) {
488 u64 upper = lmb.reserved.region[i].base +
489 lmb.reserved.region[i].size - 1;
490 if ((addr >= lmb.reserved.region[i].base) && (addr <= upper))
491 return 1;
492 }
493 return 0;
494}
495
496/*
497 * Given a <base, len>, find which memory regions belong to this range.
498 * Adjust the request and return a contiguous chunk.
499 */
500int lmb_find(struct lmb_property *res)
501{
502 int i;
503 u64 rstart, rend;
504
505 rstart = res->base;
506 rend = rstart + res->size - 1;
507
508 for (i = 0; i < lmb.memory.cnt; i++) {
509 u64 start = lmb.memory.region[i].base;
510 u64 end = start + lmb.memory.region[i].size - 1;
511
512 if (start > rend)
513 return -1;
514
515 if ((end >= rstart) && (start < rend)) {
516 /* adjust the request */
517 if (rstart < start)
518 rstart = start;
519 if (rend > end)
520 rend = end;
521 res->base = rstart;
522 res->size = rend - rstart + 1;
523 return 0;
524 }
525 }
526 return -1;
527}
diff --git a/lib/lru_cache.c b/lib/lru_cache.c
new file mode 100644
index 000000000000..270de9d31b8c
--- /dev/null
+++ b/lib/lru_cache.c
@@ -0,0 +1,560 @@
1/*
2 lru_cache.c
3
4 This file is part of DRBD by Philipp Reisner and Lars Ellenberg.
5
6 Copyright (C) 2003-2008, LINBIT Information Technologies GmbH.
7 Copyright (C) 2003-2008, Philipp Reisner <philipp.reisner@linbit.com>.
8 Copyright (C) 2003-2008, Lars Ellenberg <lars.ellenberg@linbit.com>.
9
10 drbd is free software; you can redistribute it and/or modify
11 it under the terms of the GNU General Public License as published by
12 the Free Software Foundation; either version 2, or (at your option)
13 any later version.
14
15 drbd is distributed in the hope that it will be useful,
16 but WITHOUT ANY WARRANTY; without even the implied warranty of
17 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 GNU General Public License for more details.
19
20 You should have received a copy of the GNU General Public License
21 along with drbd; see the file COPYING. If not, write to
22 the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
23
24 */
25
26#include <linux/module.h>
27#include <linux/bitops.h>
28#include <linux/slab.h>
29#include <linux/string.h> /* for memset */
30#include <linux/seq_file.h> /* for seq_printf */
31#include <linux/lru_cache.h>
32
33MODULE_AUTHOR("Philipp Reisner <phil@linbit.com>, "
34 "Lars Ellenberg <lars@linbit.com>");
35MODULE_DESCRIPTION("lru_cache - Track sets of hot objects");
36MODULE_LICENSE("GPL");
37
38/* this is developers aid only.
39 * it catches concurrent access (lack of locking on the users part) */
40#define PARANOIA_ENTRY() do { \
41 BUG_ON(!lc); \
42 BUG_ON(!lc->nr_elements); \
43 BUG_ON(test_and_set_bit(__LC_PARANOIA, &lc->flags)); \
44} while (0)
45
46#define RETURN(x...) do { \
47 clear_bit(__LC_PARANOIA, &lc->flags); \
48 smp_mb__after_clear_bit(); return x ; } while (0)
49
50/* BUG() if e is not one of the elements tracked by lc */
51#define PARANOIA_LC_ELEMENT(lc, e) do { \
52 struct lru_cache *lc_ = (lc); \
53 struct lc_element *e_ = (e); \
54 unsigned i = e_->lc_index; \
55 BUG_ON(i >= lc_->nr_elements); \
56 BUG_ON(lc_->lc_element[i] != e_); } while (0)
57
58/**
59 * lc_create - prepares to track objects in an active set
60 * @name: descriptive name only used in lc_seq_printf_stats and lc_seq_dump_details
61 * @e_count: number of elements allowed to be active simultaneously
62 * @e_size: size of the tracked objects
63 * @e_off: offset to the &struct lc_element member in a tracked object
64 *
65 * Returns a pointer to a newly initialized struct lru_cache on success,
66 * or NULL on (allocation) failure.
67 */
68struct lru_cache *lc_create(const char *name, struct kmem_cache *cache,
69 unsigned e_count, size_t e_size, size_t e_off)
70{
71 struct hlist_head *slot = NULL;
72 struct lc_element **element = NULL;
73 struct lru_cache *lc;
74 struct lc_element *e;
75 unsigned cache_obj_size = kmem_cache_size(cache);
76 unsigned i;
77
78 WARN_ON(cache_obj_size < e_size);
79 if (cache_obj_size < e_size)
80 return NULL;
81
82 /* e_count too big; would probably fail the allocation below anyways.
83 * for typical use cases, e_count should be few thousand at most. */
84 if (e_count > LC_MAX_ACTIVE)
85 return NULL;
86
87 slot = kzalloc(e_count * sizeof(struct hlist_head*), GFP_KERNEL);
88 if (!slot)
89 goto out_fail;
90 element = kzalloc(e_count * sizeof(struct lc_element *), GFP_KERNEL);
91 if (!element)
92 goto out_fail;
93
94 lc = kzalloc(sizeof(*lc), GFP_KERNEL);
95 if (!lc)
96 goto out_fail;
97
98 INIT_LIST_HEAD(&lc->in_use);
99 INIT_LIST_HEAD(&lc->lru);
100 INIT_LIST_HEAD(&lc->free);
101
102 lc->name = name;
103 lc->element_size = e_size;
104 lc->element_off = e_off;
105 lc->nr_elements = e_count;
106 lc->new_number = LC_FREE;
107 lc->lc_cache = cache;
108 lc->lc_element = element;
109 lc->lc_slot = slot;
110
111 /* preallocate all objects */
112 for (i = 0; i < e_count; i++) {
113 void *p = kmem_cache_alloc(cache, GFP_KERNEL);
114 if (!p)
115 break;
116 memset(p, 0, lc->element_size);
117 e = p + e_off;
118 e->lc_index = i;
119 e->lc_number = LC_FREE;
120 list_add(&e->list, &lc->free);
121 element[i] = e;
122 }
123 if (i == e_count)
124 return lc;
125
126 /* else: could not allocate all elements, give up */
127 for (i--; i; i--) {
128 void *p = element[i];
129 kmem_cache_free(cache, p - e_off);
130 }
131 kfree(lc);
132out_fail:
133 kfree(element);
134 kfree(slot);
135 return NULL;
136}
137
138void lc_free_by_index(struct lru_cache *lc, unsigned i)
139{
140 void *p = lc->lc_element[i];
141 WARN_ON(!p);
142 if (p) {
143 p -= lc->element_off;
144 kmem_cache_free(lc->lc_cache, p);
145 }
146}
147
148/**
149 * lc_destroy - frees memory allocated by lc_create()
150 * @lc: the lru cache to destroy
151 */
152void lc_destroy(struct lru_cache *lc)
153{
154 unsigned i;
155 if (!lc)
156 return;
157 for (i = 0; i < lc->nr_elements; i++)
158 lc_free_by_index(lc, i);
159 kfree(lc->lc_element);
160 kfree(lc->lc_slot);
161 kfree(lc);
162}
163
164/**
165 * lc_reset - does a full reset for @lc and the hash table slots.
166 * @lc: the lru cache to operate on
167 *
168 * It is roughly the equivalent of re-allocating a fresh lru_cache object,
169 * basically a short cut to lc_destroy(lc); lc = lc_create(...);
170 */
171void lc_reset(struct lru_cache *lc)
172{
173 unsigned i;
174
175 INIT_LIST_HEAD(&lc->in_use);
176 INIT_LIST_HEAD(&lc->lru);
177 INIT_LIST_HEAD(&lc->free);
178 lc->used = 0;
179 lc->hits = 0;
180 lc->misses = 0;
181 lc->starving = 0;
182 lc->dirty = 0;
183 lc->changed = 0;
184 lc->flags = 0;
185 lc->changing_element = NULL;
186 lc->new_number = LC_FREE;
187 memset(lc->lc_slot, 0, sizeof(struct hlist_head) * lc->nr_elements);
188
189 for (i = 0; i < lc->nr_elements; i++) {
190 struct lc_element *e = lc->lc_element[i];
191 void *p = e;
192 p -= lc->element_off;
193 memset(p, 0, lc->element_size);
194 /* re-init it */
195 e->lc_index = i;
196 e->lc_number = LC_FREE;
197 list_add(&e->list, &lc->free);
198 }
199}
200
201/**
202 * lc_seq_printf_stats - print stats about @lc into @seq
203 * @seq: the seq_file to print into
204 * @lc: the lru cache to print statistics of
205 */
206size_t lc_seq_printf_stats(struct seq_file *seq, struct lru_cache *lc)
207{
208 /* NOTE:
209 * total calls to lc_get are
210 * (starving + hits + misses)
211 * misses include "dirty" count (update from an other thread in
212 * progress) and "changed", when this in fact lead to an successful
213 * update of the cache.
214 */
215 return seq_printf(seq, "\t%s: used:%u/%u "
216 "hits:%lu misses:%lu starving:%lu dirty:%lu changed:%lu\n",
217 lc->name, lc->used, lc->nr_elements,
218 lc->hits, lc->misses, lc->starving, lc->dirty, lc->changed);
219}
220
221static struct hlist_head *lc_hash_slot(struct lru_cache *lc, unsigned int enr)
222{
223 return lc->lc_slot + (enr % lc->nr_elements);
224}
225
226
227/**
228 * lc_find - find element by label, if present in the hash table
229 * @lc: The lru_cache object
230 * @enr: element number
231 *
232 * Returns the pointer to an element, if the element with the requested
233 * "label" or element number is present in the hash table,
234 * or NULL if not found. Does not change the refcnt.
235 */
236struct lc_element *lc_find(struct lru_cache *lc, unsigned int enr)
237{
238 struct hlist_node *n;
239 struct lc_element *e;
240
241 BUG_ON(!lc);
242 BUG_ON(!lc->nr_elements);
243 hlist_for_each_entry(e, n, lc_hash_slot(lc, enr), colision) {
244 if (e->lc_number == enr)
245 return e;
246 }
247 return NULL;
248}
249
250/* returned element will be "recycled" immediately */
251static struct lc_element *lc_evict(struct lru_cache *lc)
252{
253 struct list_head *n;
254 struct lc_element *e;
255
256 if (list_empty(&lc->lru))
257 return NULL;
258
259 n = lc->lru.prev;
260 e = list_entry(n, struct lc_element, list);
261
262 PARANOIA_LC_ELEMENT(lc, e);
263
264 list_del(&e->list);
265 hlist_del(&e->colision);
266 return e;
267}
268
269/**
270 * lc_del - removes an element from the cache
271 * @lc: The lru_cache object
272 * @e: The element to remove
273 *
274 * @e must be unused (refcnt == 0). Moves @e from "lru" to "free" list,
275 * sets @e->enr to %LC_FREE.
276 */
277void lc_del(struct lru_cache *lc, struct lc_element *e)
278{
279 PARANOIA_ENTRY();
280 PARANOIA_LC_ELEMENT(lc, e);
281 BUG_ON(e->refcnt);
282
283 e->lc_number = LC_FREE;
284 hlist_del_init(&e->colision);
285 list_move(&e->list, &lc->free);
286 RETURN();
287}
288
289static struct lc_element *lc_get_unused_element(struct lru_cache *lc)
290{
291 struct list_head *n;
292
293 if (list_empty(&lc->free))
294 return lc_evict(lc);
295
296 n = lc->free.next;
297 list_del(n);
298 return list_entry(n, struct lc_element, list);
299}
300
301static int lc_unused_element_available(struct lru_cache *lc)
302{
303 if (!list_empty(&lc->free))
304 return 1; /* something on the free list */
305 if (!list_empty(&lc->lru))
306 return 1; /* something to evict */
307
308 return 0;
309}
310
311
312/**
313 * lc_get - get element by label, maybe change the active set
314 * @lc: the lru cache to operate on
315 * @enr: the label to look up
316 *
317 * Finds an element in the cache, increases its usage count,
318 * "touches" and returns it.
319 *
320 * In case the requested number is not present, it needs to be added to the
321 * cache. Therefore it is possible that an other element becomes evicted from
322 * the cache. In either case, the user is notified so he is able to e.g. keep
323 * a persistent log of the cache changes, and therefore the objects in use.
324 *
325 * Return values:
326 * NULL
327 * The cache was marked %LC_STARVING,
328 * or the requested label was not in the active set
329 * and a changing transaction is still pending (@lc was marked %LC_DIRTY).
330 * Or no unused or free element could be recycled (@lc will be marked as
331 * %LC_STARVING, blocking further lc_get() operations).
332 *
333 * pointer to the element with the REQUESTED element number.
334 * In this case, it can be used right away
335 *
336 * pointer to an UNUSED element with some different element number,
337 * where that different number may also be %LC_FREE.
338 *
339 * In this case, the cache is marked %LC_DIRTY (blocking further changes),
340 * and the returned element pointer is removed from the lru list and
341 * hash collision chains. The user now should do whatever housekeeping
342 * is necessary.
343 * Then he must call lc_changed(lc,element_pointer), to finish
344 * the change.
345 *
346 * NOTE: The user needs to check the lc_number on EACH use, so he recognizes
347 * any cache set change.
348 */
349struct lc_element *lc_get(struct lru_cache *lc, unsigned int enr)
350{
351 struct lc_element *e;
352
353 PARANOIA_ENTRY();
354 if (lc->flags & LC_STARVING) {
355 ++lc->starving;
356 RETURN(NULL);
357 }
358
359 e = lc_find(lc, enr);
360 if (e) {
361 ++lc->hits;
362 if (e->refcnt++ == 0)
363 lc->used++;
364 list_move(&e->list, &lc->in_use); /* Not evictable... */
365 RETURN(e);
366 }
367
368 ++lc->misses;
369
370 /* In case there is nothing available and we can not kick out
371 * the LRU element, we have to wait ...
372 */
373 if (!lc_unused_element_available(lc)) {
374 __set_bit(__LC_STARVING, &lc->flags);
375 RETURN(NULL);
376 }
377
378 /* it was not present in the active set.
379 * we are going to recycle an unused (or even "free") element.
380 * user may need to commit a transaction to record that change.
381 * we serialize on flags & TF_DIRTY */
382 if (test_and_set_bit(__LC_DIRTY, &lc->flags)) {
383 ++lc->dirty;
384 RETURN(NULL);
385 }
386
387 e = lc_get_unused_element(lc);
388 BUG_ON(!e);
389
390 clear_bit(__LC_STARVING, &lc->flags);
391 BUG_ON(++e->refcnt != 1);
392 lc->used++;
393
394 lc->changing_element = e;
395 lc->new_number = enr;
396
397 RETURN(e);
398}
399
400/* similar to lc_get,
401 * but only gets a new reference on an existing element.
402 * you either get the requested element, or NULL.
403 * will be consolidated into one function.
404 */
405struct lc_element *lc_try_get(struct lru_cache *lc, unsigned int enr)
406{
407 struct lc_element *e;
408
409 PARANOIA_ENTRY();
410 if (lc->flags & LC_STARVING) {
411 ++lc->starving;
412 RETURN(NULL);
413 }
414
415 e = lc_find(lc, enr);
416 if (e) {
417 ++lc->hits;
418 if (e->refcnt++ == 0)
419 lc->used++;
420 list_move(&e->list, &lc->in_use); /* Not evictable... */
421 }
422 RETURN(e);
423}
424
425/**
426 * lc_changed - tell @lc that the change has been recorded
427 * @lc: the lru cache to operate on
428 * @e: the element pending label change
429 */
430void lc_changed(struct lru_cache *lc, struct lc_element *e)
431{
432 PARANOIA_ENTRY();
433 BUG_ON(e != lc->changing_element);
434 PARANOIA_LC_ELEMENT(lc, e);
435 ++lc->changed;
436 e->lc_number = lc->new_number;
437 list_add(&e->list, &lc->in_use);
438 hlist_add_head(&e->colision, lc_hash_slot(lc, lc->new_number));
439 lc->changing_element = NULL;
440 lc->new_number = LC_FREE;
441 clear_bit(__LC_DIRTY, &lc->flags);
442 smp_mb__after_clear_bit();
443 RETURN();
444}
445
446
447/**
448 * lc_put - give up refcnt of @e
449 * @lc: the lru cache to operate on
450 * @e: the element to put
451 *
452 * If refcnt reaches zero, the element is moved to the lru list,
453 * and a %LC_STARVING (if set) is cleared.
454 * Returns the new (post-decrement) refcnt.
455 */
456unsigned int lc_put(struct lru_cache *lc, struct lc_element *e)
457{
458 PARANOIA_ENTRY();
459 PARANOIA_LC_ELEMENT(lc, e);
460 BUG_ON(e->refcnt == 0);
461 BUG_ON(e == lc->changing_element);
462 if (--e->refcnt == 0) {
463 /* move it to the front of LRU. */
464 list_move(&e->list, &lc->lru);
465 lc->used--;
466 clear_bit(__LC_STARVING, &lc->flags);
467 smp_mb__after_clear_bit();
468 }
469 RETURN(e->refcnt);
470}
471
472/**
473 * lc_element_by_index
474 * @lc: the lru cache to operate on
475 * @i: the index of the element to return
476 */
477struct lc_element *lc_element_by_index(struct lru_cache *lc, unsigned i)
478{
479 BUG_ON(i >= lc->nr_elements);
480 BUG_ON(lc->lc_element[i] == NULL);
481 BUG_ON(lc->lc_element[i]->lc_index != i);
482 return lc->lc_element[i];
483}
484
485/**
486 * lc_index_of
487 * @lc: the lru cache to operate on
488 * @e: the element to query for its index position in lc->element
489 */
490unsigned int lc_index_of(struct lru_cache *lc, struct lc_element *e)
491{
492 PARANOIA_LC_ELEMENT(lc, e);
493 return e->lc_index;
494}
495
496/**
497 * lc_set - associate index with label
498 * @lc: the lru cache to operate on
499 * @enr: the label to set
500 * @index: the element index to associate label with.
501 *
502 * Used to initialize the active set to some previously recorded state.
503 */
504void lc_set(struct lru_cache *lc, unsigned int enr, int index)
505{
506 struct lc_element *e;
507
508 if (index < 0 || index >= lc->nr_elements)
509 return;
510
511 e = lc_element_by_index(lc, index);
512 e->lc_number = enr;
513
514 hlist_del_init(&e->colision);
515 hlist_add_head(&e->colision, lc_hash_slot(lc, enr));
516 list_move(&e->list, e->refcnt ? &lc->in_use : &lc->lru);
517}
518
519/**
520 * lc_dump - Dump a complete LRU cache to seq in textual form.
521 * @lc: the lru cache to operate on
522 * @seq: the &struct seq_file pointer to seq_printf into
523 * @utext: user supplied "heading" or other info
524 * @detail: function pointer the user may provide to dump further details
525 * of the object the lc_element is embedded in.
526 */
527void lc_seq_dump_details(struct seq_file *seq, struct lru_cache *lc, char *utext,
528 void (*detail) (struct seq_file *, struct lc_element *))
529{
530 unsigned int nr_elements = lc->nr_elements;
531 struct lc_element *e;
532 int i;
533
534 seq_printf(seq, "\tnn: lc_number refcnt %s\n ", utext);
535 for (i = 0; i < nr_elements; i++) {
536 e = lc_element_by_index(lc, i);
537 if (e->lc_number == LC_FREE) {
538 seq_printf(seq, "\t%2d: FREE\n", i);
539 } else {
540 seq_printf(seq, "\t%2d: %4u %4u ", i,
541 e->lc_number, e->refcnt);
542 detail(seq, e);
543 }
544 }
545}
546
547EXPORT_SYMBOL(lc_create);
548EXPORT_SYMBOL(lc_reset);
549EXPORT_SYMBOL(lc_destroy);
550EXPORT_SYMBOL(lc_set);
551EXPORT_SYMBOL(lc_del);
552EXPORT_SYMBOL(lc_try_get);
553EXPORT_SYMBOL(lc_find);
554EXPORT_SYMBOL(lc_get);
555EXPORT_SYMBOL(lc_put);
556EXPORT_SYMBOL(lc_changed);
557EXPORT_SYMBOL(lc_element_by_index);
558EXPORT_SYMBOL(lc_index_of);
559EXPORT_SYMBOL(lc_seq_printf_stats);
560EXPORT_SYMBOL(lc_seq_dump_details);
diff --git a/lib/lzo/lzo1x_decompress.c b/lib/lzo/lzo1x_decompress.c
index 5dc6b29c1575..f2fd09850223 100644
--- a/lib/lzo/lzo1x_decompress.c
+++ b/lib/lzo/lzo1x_decompress.c
@@ -11,11 +11,13 @@
11 * Richard Purdie <rpurdie@openedhand.com> 11 * Richard Purdie <rpurdie@openedhand.com>
12 */ 12 */
13 13
14#ifndef STATIC
14#include <linux/module.h> 15#include <linux/module.h>
15#include <linux/kernel.h> 16#include <linux/kernel.h>
16#include <linux/lzo.h> 17#endif
17#include <asm/byteorder.h> 18
18#include <asm/unaligned.h> 19#include <asm/unaligned.h>
20#include <linux/lzo.h>
19#include "lzodefs.h" 21#include "lzodefs.h"
20 22
21#define HAVE_IP(x, ip_end, ip) ((size_t)(ip_end - ip) < (x)) 23#define HAVE_IP(x, ip_end, ip) ((size_t)(ip_end - ip) < (x))
@@ -244,9 +246,10 @@ lookbehind_overrun:
244 *out_len = op - out; 246 *out_len = op - out;
245 return LZO_E_LOOKBEHIND_OVERRUN; 247 return LZO_E_LOOKBEHIND_OVERRUN;
246} 248}
247 249#ifndef STATIC
248EXPORT_SYMBOL_GPL(lzo1x_decompress_safe); 250EXPORT_SYMBOL_GPL(lzo1x_decompress_safe);
249 251
250MODULE_LICENSE("GPL"); 252MODULE_LICENSE("GPL");
251MODULE_DESCRIPTION("LZO1X Decompressor"); 253MODULE_DESCRIPTION("LZO1X Decompressor");
252 254
255#endif
diff --git a/lib/parser.c b/lib/parser.c
index b00d02059a5f..fb34977246bb 100644
--- a/lib/parser.c
+++ b/lib/parser.c
@@ -56,13 +56,16 @@ static int match_one(char *s, const char *p, substring_t args[])
56 56
57 args[argc].from = s; 57 args[argc].from = s;
58 switch (*p++) { 58 switch (*p++) {
59 case 's': 59 case 's': {
60 if (strlen(s) == 0) 60 size_t str_len = strlen(s);
61
62 if (str_len == 0)
61 return 0; 63 return 0;
62 else if (len == -1 || len > strlen(s)) 64 if (len == -1 || len > str_len)
63 len = strlen(s); 65 len = str_len;
64 args[argc].to = s + len; 66 args[argc].to = s + len;
65 break; 67 break;
68 }
66 case 'd': 69 case 'd':
67 simple_strtol(s, &args[argc].to, 0); 70 simple_strtol(s, &args[argc].to, 0);
68 goto num; 71 goto num;
diff --git a/lib/plist.c b/lib/plist.c
index d6c64a824e1d..1471988d9190 100644
--- a/lib/plist.c
+++ b/lib/plist.c
@@ -54,9 +54,11 @@ static void plist_check_list(struct list_head *top)
54 54
55static void plist_check_head(struct plist_head *head) 55static void plist_check_head(struct plist_head *head)
56{ 56{
57 WARN_ON(!head->lock); 57 WARN_ON(!head->rawlock && !head->spinlock);
58 if (head->lock) 58 if (head->rawlock)
59 WARN_ON_SMP(!spin_is_locked(head->lock)); 59 WARN_ON_SMP(!raw_spin_is_locked(head->rawlock));
60 if (head->spinlock)
61 WARN_ON_SMP(!spin_is_locked(head->spinlock));
60 plist_check_list(&head->prio_list); 62 plist_check_list(&head->prio_list);
61 plist_check_list(&head->node_list); 63 plist_check_list(&head->node_list);
62} 64}
diff --git a/lib/radix-tree.c b/lib/radix-tree.c
index 23abbd93cae1..05da38bcc298 100644
--- a/lib/radix-tree.c
+++ b/lib/radix-tree.c
@@ -28,7 +28,6 @@
28#include <linux/slab.h> 28#include <linux/slab.h>
29#include <linux/notifier.h> 29#include <linux/notifier.h>
30#include <linux/cpu.h> 30#include <linux/cpu.h>
31#include <linux/gfp.h>
32#include <linux/string.h> 31#include <linux/string.h>
33#include <linux/bitops.h> 32#include <linux/bitops.h>
34#include <linux/rcupdate.h> 33#include <linux/rcupdate.h>
@@ -200,6 +199,9 @@ radix_tree_node_free(struct radix_tree_node *node)
200 * ensure that the addition of a single element in the tree cannot fail. On 199 * ensure that the addition of a single element in the tree cannot fail. On
201 * success, return zero, with preemption disabled. On error, return -ENOMEM 200 * success, return zero, with preemption disabled. On error, return -ENOMEM
202 * with preemption not disabled. 201 * with preemption not disabled.
202 *
203 * To make use of this facility, the radix tree must be initialised without
204 * __GFP_WAIT being passed to INIT_RADIX_TREE().
203 */ 205 */
204int radix_tree_preload(gfp_t gfp_mask) 206int radix_tree_preload(gfp_t gfp_mask)
205{ 207{
@@ -361,7 +363,7 @@ static void *radix_tree_lookup_element(struct radix_tree_root *root,
361 unsigned int height, shift; 363 unsigned int height, shift;
362 struct radix_tree_node *node, **slot; 364 struct radix_tree_node *node, **slot;
363 365
364 node = rcu_dereference(root->rnode); 366 node = rcu_dereference_raw(root->rnode);
365 if (node == NULL) 367 if (node == NULL)
366 return NULL; 368 return NULL;
367 369
@@ -381,7 +383,7 @@ static void *radix_tree_lookup_element(struct radix_tree_root *root,
381 do { 383 do {
382 slot = (struct radix_tree_node **) 384 slot = (struct radix_tree_node **)
383 (node->slots + ((index>>shift) & RADIX_TREE_MAP_MASK)); 385 (node->slots + ((index>>shift) & RADIX_TREE_MAP_MASK));
384 node = rcu_dereference(*slot); 386 node = rcu_dereference_raw(*slot);
385 if (node == NULL) 387 if (node == NULL)
386 return NULL; 388 return NULL;
387 389
@@ -543,7 +545,6 @@ out:
543} 545}
544EXPORT_SYMBOL(radix_tree_tag_clear); 546EXPORT_SYMBOL(radix_tree_tag_clear);
545 547
546#ifndef __KERNEL__ /* Only the test harness uses this at present */
547/** 548/**
548 * radix_tree_tag_get - get a tag on a radix tree node 549 * radix_tree_tag_get - get a tag on a radix tree node
549 * @root: radix tree root 550 * @root: radix tree root
@@ -554,6 +555,10 @@ EXPORT_SYMBOL(radix_tree_tag_clear);
554 * 555 *
555 * 0: tag not present or not set 556 * 0: tag not present or not set
556 * 1: tag set 557 * 1: tag set
558 *
559 * Note that the return value of this function may not be relied on, even if
560 * the RCU lock is held, unless tag modification and node deletion are excluded
561 * from concurrency.
557 */ 562 */
558int radix_tree_tag_get(struct radix_tree_root *root, 563int radix_tree_tag_get(struct radix_tree_root *root,
559 unsigned long index, unsigned int tag) 564 unsigned long index, unsigned int tag)
@@ -566,7 +571,7 @@ int radix_tree_tag_get(struct radix_tree_root *root,
566 if (!root_tag_get(root, tag)) 571 if (!root_tag_get(root, tag))
567 return 0; 572 return 0;
568 573
569 node = rcu_dereference(root->rnode); 574 node = rcu_dereference_raw(root->rnode);
570 if (node == NULL) 575 if (node == NULL)
571 return 0; 576 return 0;
572 577
@@ -594,19 +599,14 @@ int radix_tree_tag_get(struct radix_tree_root *root,
594 */ 599 */
595 if (!tag_get(node, tag, offset)) 600 if (!tag_get(node, tag, offset))
596 saw_unset_tag = 1; 601 saw_unset_tag = 1;
597 if (height == 1) { 602 if (height == 1)
598 int ret = tag_get(node, tag, offset); 603 return !!tag_get(node, tag, offset);
599 604 node = rcu_dereference_raw(node->slots[offset]);
600 BUG_ON(ret && saw_unset_tag);
601 return !!ret;
602 }
603 node = rcu_dereference(node->slots[offset]);
604 shift -= RADIX_TREE_MAP_SHIFT; 605 shift -= RADIX_TREE_MAP_SHIFT;
605 height--; 606 height--;
606 } 607 }
607} 608}
608EXPORT_SYMBOL(radix_tree_tag_get); 609EXPORT_SYMBOL(radix_tree_tag_get);
609#endif
610 610
611/** 611/**
612 * radix_tree_next_hole - find the next hole (not-present entry) 612 * radix_tree_next_hole - find the next hole (not-present entry)
@@ -656,7 +656,7 @@ EXPORT_SYMBOL(radix_tree_next_hole);
656 * 656 *
657 * Returns: the index of the hole if found, otherwise returns an index 657 * Returns: the index of the hole if found, otherwise returns an index
658 * outside of the set specified (in which case 'index - return >= max_scan' 658 * outside of the set specified (in which case 'index - return >= max_scan'
659 * will be true). In rare cases of wrap-around, LONG_MAX will be returned. 659 * will be true). In rare cases of wrap-around, ULONG_MAX will be returned.
660 * 660 *
661 * radix_tree_next_hole may be called under rcu_read_lock. However, like 661 * radix_tree_next_hole may be called under rcu_read_lock. However, like
662 * radix_tree_gang_lookup, this will not atomically search a snapshot of 662 * radix_tree_gang_lookup, this will not atomically search a snapshot of
@@ -674,7 +674,7 @@ unsigned long radix_tree_prev_hole(struct radix_tree_root *root,
674 if (!radix_tree_lookup(root, index)) 674 if (!radix_tree_lookup(root, index))
675 break; 675 break;
676 index--; 676 index--;
677 if (index == LONG_MAX) 677 if (index == ULONG_MAX)
678 break; 678 break;
679 } 679 }
680 680
@@ -710,7 +710,7 @@ __lookup(struct radix_tree_node *slot, void ***results, unsigned long index,
710 } 710 }
711 711
712 shift -= RADIX_TREE_MAP_SHIFT; 712 shift -= RADIX_TREE_MAP_SHIFT;
713 slot = rcu_dereference(slot->slots[i]); 713 slot = rcu_dereference_raw(slot->slots[i]);
714 if (slot == NULL) 714 if (slot == NULL)
715 goto out; 715 goto out;
716 } 716 }
@@ -757,7 +757,7 @@ radix_tree_gang_lookup(struct radix_tree_root *root, void **results,
757 unsigned long cur_index = first_index; 757 unsigned long cur_index = first_index;
758 unsigned int ret; 758 unsigned int ret;
759 759
760 node = rcu_dereference(root->rnode); 760 node = rcu_dereference_raw(root->rnode);
761 if (!node) 761 if (!node)
762 return 0; 762 return 0;
763 763
@@ -786,7 +786,7 @@ radix_tree_gang_lookup(struct radix_tree_root *root, void **results,
786 slot = *(((void ***)results)[ret + i]); 786 slot = *(((void ***)results)[ret + i]);
787 if (!slot) 787 if (!slot)
788 continue; 788 continue;
789 results[ret + nr_found] = rcu_dereference(slot); 789 results[ret + nr_found] = rcu_dereference_raw(slot);
790 nr_found++; 790 nr_found++;
791 } 791 }
792 ret += nr_found; 792 ret += nr_found;
@@ -825,7 +825,7 @@ radix_tree_gang_lookup_slot(struct radix_tree_root *root, void ***results,
825 unsigned long cur_index = first_index; 825 unsigned long cur_index = first_index;
826 unsigned int ret; 826 unsigned int ret;
827 827
828 node = rcu_dereference(root->rnode); 828 node = rcu_dereference_raw(root->rnode);
829 if (!node) 829 if (!node)
830 return 0; 830 return 0;
831 831
@@ -914,7 +914,7 @@ __lookup_tag(struct radix_tree_node *slot, void ***results, unsigned long index,
914 } 914 }
915 } 915 }
916 shift -= RADIX_TREE_MAP_SHIFT; 916 shift -= RADIX_TREE_MAP_SHIFT;
917 slot = rcu_dereference(slot->slots[i]); 917 slot = rcu_dereference_raw(slot->slots[i]);
918 if (slot == NULL) 918 if (slot == NULL)
919 break; 919 break;
920 } 920 }
@@ -950,7 +950,7 @@ radix_tree_gang_lookup_tag(struct radix_tree_root *root, void **results,
950 if (!root_tag_get(root, tag)) 950 if (!root_tag_get(root, tag))
951 return 0; 951 return 0;
952 952
953 node = rcu_dereference(root->rnode); 953 node = rcu_dereference_raw(root->rnode);
954 if (!node) 954 if (!node)
955 return 0; 955 return 0;
956 956
@@ -979,7 +979,7 @@ radix_tree_gang_lookup_tag(struct radix_tree_root *root, void **results,
979 slot = *(((void ***)results)[ret + i]); 979 slot = *(((void ***)results)[ret + i]);
980 if (!slot) 980 if (!slot)
981 continue; 981 continue;
982 results[ret + nr_found] = rcu_dereference(slot); 982 results[ret + nr_found] = rcu_dereference_raw(slot);
983 nr_found++; 983 nr_found++;
984 } 984 }
985 ret += nr_found; 985 ret += nr_found;
@@ -1019,7 +1019,7 @@ radix_tree_gang_lookup_tag_slot(struct radix_tree_root *root, void ***results,
1019 if (!root_tag_get(root, tag)) 1019 if (!root_tag_get(root, tag))
1020 return 0; 1020 return 0;
1021 1021
1022 node = rcu_dereference(root->rnode); 1022 node = rcu_dereference_raw(root->rnode);
1023 if (!node) 1023 if (!node)
1024 return 0; 1024 return 0;
1025 1025
diff --git a/lib/raid6/Makefile b/lib/raid6/Makefile
index b2fe4baa90e0..19bf32da644f 100644
--- a/lib/raid6/Makefile
+++ b/lib/raid6/Makefile
@@ -9,7 +9,7 @@ raid6_pq-y += raid6algos.o raid6recov.o raid6tables.o \
9hostprogs-y += mktables 9hostprogs-y += mktables
10 10
11quiet_cmd_unroll = UNROLL $@ 11quiet_cmd_unroll = UNROLL $@
12 cmd_unroll = $(PERL) $(srctree)/$(src)/unroll.pl $(UNROLL) \ 12 cmd_unroll = $(AWK) -f$(srctree)/$(src)/unroll.awk -vN=$(UNROLL) \
13 < $< > $@ || ( rm -f $@ && exit 1 ) 13 < $< > $@ || ( rm -f $@ && exit 1 )
14 14
15ifeq ($(CONFIG_ALTIVEC),y) 15ifeq ($(CONFIG_ALTIVEC),y)
@@ -18,56 +18,56 @@ endif
18 18
19targets += raid6int1.c 19targets += raid6int1.c
20$(obj)/raid6int1.c: UNROLL := 1 20$(obj)/raid6int1.c: UNROLL := 1
21$(obj)/raid6int1.c: $(src)/raid6int.uc $(src)/unroll.pl FORCE 21$(obj)/raid6int1.c: $(src)/raid6int.uc $(src)/unroll.awk FORCE
22 $(call if_changed,unroll) 22 $(call if_changed,unroll)
23 23
24targets += raid6int2.c 24targets += raid6int2.c
25$(obj)/raid6int2.c: UNROLL := 2 25$(obj)/raid6int2.c: UNROLL := 2
26$(obj)/raid6int2.c: $(src)/raid6int.uc $(src)/unroll.pl FORCE 26$(obj)/raid6int2.c: $(src)/raid6int.uc $(src)/unroll.awk FORCE
27 $(call if_changed,unroll) 27 $(call if_changed,unroll)
28 28
29targets += raid6int4.c 29targets += raid6int4.c
30$(obj)/raid6int4.c: UNROLL := 4 30$(obj)/raid6int4.c: UNROLL := 4
31$(obj)/raid6int4.c: $(src)/raid6int.uc $(src)/unroll.pl FORCE 31$(obj)/raid6int4.c: $(src)/raid6int.uc $(src)/unroll.awk FORCE
32 $(call if_changed,unroll) 32 $(call if_changed,unroll)
33 33
34targets += raid6int8.c 34targets += raid6int8.c
35$(obj)/raid6int8.c: UNROLL := 8 35$(obj)/raid6int8.c: UNROLL := 8
36$(obj)/raid6int8.c: $(src)/raid6int.uc $(src)/unroll.pl FORCE 36$(obj)/raid6int8.c: $(src)/raid6int.uc $(src)/unroll.awk FORCE
37 $(call if_changed,unroll) 37 $(call if_changed,unroll)
38 38
39targets += raid6int16.c 39targets += raid6int16.c
40$(obj)/raid6int16.c: UNROLL := 16 40$(obj)/raid6int16.c: UNROLL := 16
41$(obj)/raid6int16.c: $(src)/raid6int.uc $(src)/unroll.pl FORCE 41$(obj)/raid6int16.c: $(src)/raid6int.uc $(src)/unroll.awk FORCE
42 $(call if_changed,unroll) 42 $(call if_changed,unroll)
43 43
44targets += raid6int32.c 44targets += raid6int32.c
45$(obj)/raid6int32.c: UNROLL := 32 45$(obj)/raid6int32.c: UNROLL := 32
46$(obj)/raid6int32.c: $(src)/raid6int.uc $(src)/unroll.pl FORCE 46$(obj)/raid6int32.c: $(src)/raid6int.uc $(src)/unroll.awk FORCE
47 $(call if_changed,unroll) 47 $(call if_changed,unroll)
48 48
49CFLAGS_raid6altivec1.o += $(altivec_flags) 49CFLAGS_raid6altivec1.o += $(altivec_flags)
50targets += raid6altivec1.c 50targets += raid6altivec1.c
51$(obj)/raid6altivec1.c: UNROLL := 1 51$(obj)/raid6altivec1.c: UNROLL := 1
52$(obj)/raid6altivec1.c: $(src)/raid6altivec.uc $(src)/unroll.pl FORCE 52$(obj)/raid6altivec1.c: $(src)/raid6altivec.uc $(src)/unroll.awk FORCE
53 $(call if_changed,unroll) 53 $(call if_changed,unroll)
54 54
55CFLAGS_raid6altivec2.o += $(altivec_flags) 55CFLAGS_raid6altivec2.o += $(altivec_flags)
56targets += raid6altivec2.c 56targets += raid6altivec2.c
57$(obj)/raid6altivec2.c: UNROLL := 2 57$(obj)/raid6altivec2.c: UNROLL := 2
58$(obj)/raid6altivec2.c: $(src)/raid6altivec.uc $(src)/unroll.pl FORCE 58$(obj)/raid6altivec2.c: $(src)/raid6altivec.uc $(src)/unroll.awk FORCE
59 $(call if_changed,unroll) 59 $(call if_changed,unroll)
60 60
61CFLAGS_raid6altivec4.o += $(altivec_flags) 61CFLAGS_raid6altivec4.o += $(altivec_flags)
62targets += raid6altivec4.c 62targets += raid6altivec4.c
63$(obj)/raid6altivec4.c: UNROLL := 4 63$(obj)/raid6altivec4.c: UNROLL := 4
64$(obj)/raid6altivec4.c: $(src)/raid6altivec.uc $(src)/unroll.pl FORCE 64$(obj)/raid6altivec4.c: $(src)/raid6altivec.uc $(src)/unroll.awk FORCE
65 $(call if_changed,unroll) 65 $(call if_changed,unroll)
66 66
67CFLAGS_raid6altivec8.o += $(altivec_flags) 67CFLAGS_raid6altivec8.o += $(altivec_flags)
68targets += raid6altivec8.c 68targets += raid6altivec8.c
69$(obj)/raid6altivec8.c: UNROLL := 8 69$(obj)/raid6altivec8.c: UNROLL := 8
70$(obj)/raid6altivec8.c: $(src)/raid6altivec.uc $(src)/unroll.pl FORCE 70$(obj)/raid6altivec8.c: $(src)/raid6altivec.uc $(src)/unroll.awk FORCE
71 $(call if_changed,unroll) 71 $(call if_changed,unroll)
72 72
73quiet_cmd_mktable = TABLE $@ 73quiet_cmd_mktable = TABLE $@
diff --git a/lib/raid6/raid6algos.c b/lib/raid6/raid6algos.c
index 866215ac7f25..1f8784bfd44d 100644
--- a/lib/raid6/raid6algos.c
+++ b/lib/raid6/raid6algos.c
@@ -17,6 +17,7 @@
17 */ 17 */
18 18
19#include <linux/raid/pq.h> 19#include <linux/raid/pq.h>
20#include <linux/gfp.h>
20#ifndef __KERNEL__ 21#ifndef __KERNEL__
21#include <sys/mman.h> 22#include <sys/mman.h>
22#include <stdio.h> 23#include <stdio.h>
@@ -31,25 +32,6 @@ EXPORT_SYMBOL(raid6_empty_zero_page);
31struct raid6_calls raid6_call; 32struct raid6_calls raid6_call;
32EXPORT_SYMBOL_GPL(raid6_call); 33EXPORT_SYMBOL_GPL(raid6_call);
33 34
34/* Various routine sets */
35extern const struct raid6_calls raid6_intx1;
36extern const struct raid6_calls raid6_intx2;
37extern const struct raid6_calls raid6_intx4;
38extern const struct raid6_calls raid6_intx8;
39extern const struct raid6_calls raid6_intx16;
40extern const struct raid6_calls raid6_intx32;
41extern const struct raid6_calls raid6_mmxx1;
42extern const struct raid6_calls raid6_mmxx2;
43extern const struct raid6_calls raid6_sse1x1;
44extern const struct raid6_calls raid6_sse1x2;
45extern const struct raid6_calls raid6_sse2x1;
46extern const struct raid6_calls raid6_sse2x2;
47extern const struct raid6_calls raid6_sse2x4;
48extern const struct raid6_calls raid6_altivec1;
49extern const struct raid6_calls raid6_altivec2;
50extern const struct raid6_calls raid6_altivec4;
51extern const struct raid6_calls raid6_altivec8;
52
53const struct raid6_calls * const raid6_algos[] = { 35const struct raid6_calls * const raid6_algos[] = {
54 &raid6_intx1, 36 &raid6_intx1,
55 &raid6_intx2, 37 &raid6_intx2,
@@ -169,3 +151,4 @@ static void raid6_exit(void)
169subsys_initcall(raid6_select_algo); 151subsys_initcall(raid6_select_algo);
170module_exit(raid6_exit); 152module_exit(raid6_exit);
171MODULE_LICENSE("GPL"); 153MODULE_LICENSE("GPL");
154MODULE_DESCRIPTION("RAID6 Q-syndrome calculations");
diff --git a/lib/raid6/raid6altivec.uc b/lib/raid6/raid6altivec.uc
index 699dfeee4944..2654d5c854be 100644
--- a/lib/raid6/raid6altivec.uc
+++ b/lib/raid6/raid6altivec.uc
@@ -15,7 +15,7 @@
15 * 15 *
16 * $#-way unrolled portable integer math RAID-6 instruction set 16 * $#-way unrolled portable integer math RAID-6 instruction set
17 * 17 *
18 * This file is postprocessed using unroll.pl 18 * This file is postprocessed using unroll.awk
19 * 19 *
20 * <benh> hpa: in process, 20 * <benh> hpa: in process,
21 * you can just "steal" the vec unit with enable_kernel_altivec() (but 21 * you can just "steal" the vec unit with enable_kernel_altivec() (but
diff --git a/lib/raid6/raid6int.uc b/lib/raid6/raid6int.uc
index f9bf9cba357f..d1e276a14fab 100644
--- a/lib/raid6/raid6int.uc
+++ b/lib/raid6/raid6int.uc
@@ -15,7 +15,7 @@
15 * 15 *
16 * $#-way unrolled portable integer math RAID-6 instruction set 16 * $#-way unrolled portable integer math RAID-6 instruction set
17 * 17 *
18 * This file is postprocessed using unroll.pl 18 * This file is postprocessed using unroll.awk
19 */ 19 */
20 20
21#include <linux/raid/pq.h> 21#include <linux/raid/pq.h>
diff --git a/lib/raid6/raid6test/Makefile b/lib/raid6/raid6test/Makefile
index 58ffdf4f5161..2874cbef529d 100644
--- a/lib/raid6/raid6test/Makefile
+++ b/lib/raid6/raid6test/Makefile
@@ -7,7 +7,7 @@ CC = gcc
7OPTFLAGS = -O2 # Adjust as desired 7OPTFLAGS = -O2 # Adjust as desired
8CFLAGS = -I.. -I ../../../include -g $(OPTFLAGS) 8CFLAGS = -I.. -I ../../../include -g $(OPTFLAGS)
9LD = ld 9LD = ld
10PERL = perl 10AWK = awk
11AR = ar 11AR = ar
12RANLIB = ranlib 12RANLIB = ranlib
13 13
@@ -35,35 +35,35 @@ raid6.a: raid6int1.o raid6int2.o raid6int4.o raid6int8.o raid6int16.o \
35raid6test: test.c raid6.a 35raid6test: test.c raid6.a
36 $(CC) $(CFLAGS) -o raid6test $^ 36 $(CC) $(CFLAGS) -o raid6test $^
37 37
38raid6altivec1.c: raid6altivec.uc ../unroll.pl 38raid6altivec1.c: raid6altivec.uc ../unroll.awk
39 $(PERL) ../unroll.pl 1 < raid6altivec.uc > $@ 39 $(AWK) ../unroll.awk -vN=1 < raid6altivec.uc > $@
40 40
41raid6altivec2.c: raid6altivec.uc ../unroll.pl 41raid6altivec2.c: raid6altivec.uc ../unroll.awk
42 $(PERL) ../unroll.pl 2 < raid6altivec.uc > $@ 42 $(AWK) ../unroll.awk -vN=2 < raid6altivec.uc > $@
43 43
44raid6altivec4.c: raid6altivec.uc ../unroll.pl 44raid6altivec4.c: raid6altivec.uc ../unroll.awk
45 $(PERL) ../unroll.pl 4 < raid6altivec.uc > $@ 45 $(AWK) ../unroll.awk -vN=4 < raid6altivec.uc > $@
46 46
47raid6altivec8.c: raid6altivec.uc ../unroll.pl 47raid6altivec8.c: raid6altivec.uc ../unroll.awk
48 $(PERL) ../unroll.pl 8 < raid6altivec.uc > $@ 48 $(AWK) ../unroll.awk -vN=8 < raid6altivec.uc > $@
49 49
50raid6int1.c: raid6int.uc ../unroll.pl 50raid6int1.c: raid6int.uc ../unroll.awk
51 $(PERL) ../unroll.pl 1 < raid6int.uc > $@ 51 $(AWK) ../unroll.awk -vN=1 < raid6int.uc > $@
52 52
53raid6int2.c: raid6int.uc ../unroll.pl 53raid6int2.c: raid6int.uc ../unroll.awk
54 $(PERL) ../unroll.pl 2 < raid6int.uc > $@ 54 $(AWK) ../unroll.awk -vN=2 < raid6int.uc > $@
55 55
56raid6int4.c: raid6int.uc ../unroll.pl 56raid6int4.c: raid6int.uc ../unroll.awk
57 $(PERL) ../unroll.pl 4 < raid6int.uc > $@ 57 $(AWK) ../unroll.awk -vN=4 < raid6int.uc > $@
58 58
59raid6int8.c: raid6int.uc ../unroll.pl 59raid6int8.c: raid6int.uc ../unroll.awk
60 $(PERL) ../unroll.pl 8 < raid6int.uc > $@ 60 $(AWK) ../unroll.awk -vN=8 < raid6int.uc > $@
61 61
62raid6int16.c: raid6int.uc ../unroll.pl 62raid6int16.c: raid6int.uc ../unroll.awk
63 $(PERL) ../unroll.pl 16 < raid6int.uc > $@ 63 $(AWK) ../unroll.awk -vN=16 < raid6int.uc > $@
64 64
65raid6int32.c: raid6int.uc ../unroll.pl 65raid6int32.c: raid6int.uc ../unroll.awk
66 $(PERL) ../unroll.pl 32 < raid6int.uc > $@ 66 $(AWK) ../unroll.awk -vN=32 < raid6int.uc > $@
67 67
68raid6tables.c: mktables 68raid6tables.c: mktables
69 ./mktables > raid6tables.c 69 ./mktables > raid6tables.c
diff --git a/lib/raid6/unroll.awk b/lib/raid6/unroll.awk
new file mode 100644
index 000000000000..c6aa03631df8
--- /dev/null
+++ b/lib/raid6/unroll.awk
@@ -0,0 +1,20 @@
1
2# This filter requires one command line option of form -vN=n
3# where n must be a decimal number.
4#
5# Repeat each input line containing $$ n times, replacing $$ with 0...n-1.
6# Replace each $# with n, and each $* with a single $.
7
8BEGIN {
9 n = N + 0
10}
11{
12 if (/\$\$/) { rep = n } else { rep = 1 }
13 for (i = 0; i < rep; ++i) {
14 tmp = $0
15 gsub(/\$\$/, i, tmp)
16 gsub(/\$\#/, n, tmp)
17 gsub(/\$\*/, "$", tmp)
18 print tmp
19 }
20}
diff --git a/lib/raid6/unroll.pl b/lib/raid6/unroll.pl
deleted file mode 100644
index 3acc710a20ea..000000000000
--- a/lib/raid6/unroll.pl
+++ /dev/null
@@ -1,24 +0,0 @@
1#!/usr/bin/perl
2#
3# Take a piece of C code and for each line which contains the sequence $$
4# repeat n times with $ replaced by 0...n-1; the sequence $# is replaced
5# by the unrolling factor, and $* with a single $
6#
7
8($n) = @ARGV;
9$n += 0;
10
11while ( defined($line = <STDIN>) ) {
12 if ( $line =~ /\$\$/ ) {
13 $rep = $n;
14 } else {
15 $rep = 1;
16 }
17 for ( $i = 0 ; $i < $rep ; $i++ ) {
18 $tmp = $line;
19 $tmp =~ s/\$\$/$i/g;
20 $tmp =~ s/\$\#/$n/g;
21 $tmp =~ s/\$\*/\$/g;
22 print $tmp;
23 }
24}
diff --git a/lib/random32.c b/lib/random32.c
index 217d5c4b666d..fc3545a32771 100644
--- a/lib/random32.c
+++ b/lib/random32.c
@@ -39,13 +39,16 @@
39#include <linux/jiffies.h> 39#include <linux/jiffies.h>
40#include <linux/random.h> 40#include <linux/random.h>
41 41
42struct rnd_state {
43 u32 s1, s2, s3;
44};
45
46static DEFINE_PER_CPU(struct rnd_state, net_rand_state); 42static DEFINE_PER_CPU(struct rnd_state, net_rand_state);
47 43
48static u32 __random32(struct rnd_state *state) 44/**
45 * prandom32 - seeded pseudo-random number generator.
46 * @state: pointer to state structure holding seeded state.
47 *
48 * This is used for pseudo-randomness with no outside seeding.
49 * For more random results, use random32().
50 */
51u32 prandom32(struct rnd_state *state)
49{ 52{
50#define TAUSWORTHE(s,a,b,c,d) ((s&c)<<d) ^ (((s <<a) ^ s)>>b) 53#define TAUSWORTHE(s,a,b,c,d) ((s&c)<<d) ^ (((s <<a) ^ s)>>b)
51 54
@@ -55,14 +58,7 @@ static u32 __random32(struct rnd_state *state)
55 58
56 return (state->s1 ^ state->s2 ^ state->s3); 59 return (state->s1 ^ state->s2 ^ state->s3);
57} 60}
58 61EXPORT_SYMBOL(prandom32);
59/*
60 * Handle minimum values for seeds
61 */
62static inline u32 __seed(u32 x, u32 m)
63{
64 return (x < m) ? x + m : x;
65}
66 62
67/** 63/**
68 * random32 - pseudo random number generator 64 * random32 - pseudo random number generator
@@ -75,7 +71,7 @@ u32 random32(void)
75{ 71{
76 unsigned long r; 72 unsigned long r;
77 struct rnd_state *state = &get_cpu_var(net_rand_state); 73 struct rnd_state *state = &get_cpu_var(net_rand_state);
78 r = __random32(state); 74 r = prandom32(state);
79 put_cpu_var(state); 75 put_cpu_var(state);
80 return r; 76 return r;
81} 77}
@@ -118,12 +114,12 @@ static int __init random32_init(void)
118 state->s3 = __seed(LCG(state->s2), 15); 114 state->s3 = __seed(LCG(state->s2), 15);
119 115
120 /* "warm it up" */ 116 /* "warm it up" */
121 __random32(state); 117 prandom32(state);
122 __random32(state); 118 prandom32(state);
123 __random32(state); 119 prandom32(state);
124 __random32(state); 120 prandom32(state);
125 __random32(state); 121 prandom32(state);
126 __random32(state); 122 prandom32(state);
127 } 123 }
128 return 0; 124 return 0;
129} 125}
@@ -131,7 +127,7 @@ core_initcall(random32_init);
131 127
132/* 128/*
133 * Generate better values after random number generator 129 * Generate better values after random number generator
134 * is fully initalized. 130 * is fully initialized.
135 */ 131 */
136static int __init random32_reseed(void) 132static int __init random32_reseed(void)
137{ 133{
@@ -147,7 +143,7 @@ static int __init random32_reseed(void)
147 state->s3 = __seed(seeds[2], 15); 143 state->s3 = __seed(seeds[2], 15);
148 144
149 /* mix it in */ 145 /* mix it in */
150 __random32(state); 146 prandom32(state);
151 } 147 }
152 return 0; 148 return 0;
153} 149}
diff --git a/lib/ratelimit.c b/lib/ratelimit.c
index 26187edcc7ea..027a03f4c56d 100644
--- a/lib/ratelimit.c
+++ b/lib/ratelimit.c
@@ -7,51 +7,61 @@
7 * parameter. Now every user can use their own standalone ratelimit_state. 7 * parameter. Now every user can use their own standalone ratelimit_state.
8 * 8 *
9 * This file is released under the GPLv2. 9 * This file is released under the GPLv2.
10 *
11 */ 10 */
12 11
13#include <linux/kernel.h> 12#include <linux/ratelimit.h>
14#include <linux/jiffies.h> 13#include <linux/jiffies.h>
15#include <linux/module.h> 14#include <linux/module.h>
16 15
17static DEFINE_SPINLOCK(ratelimit_lock);
18
19/* 16/*
20 * __ratelimit - rate limiting 17 * __ratelimit - rate limiting
21 * @rs: ratelimit_state data 18 * @rs: ratelimit_state data
19 * @func: name of calling function
20 *
21 * This enforces a rate limit: not more than @rs->burst callbacks
22 * in every @rs->interval
22 * 23 *
23 * This enforces a rate limit: not more than @rs->ratelimit_burst callbacks 24 * RETURNS:
24 * in every @rs->ratelimit_jiffies 25 * 0 means callbacks will be suppressed.
26 * 1 means go ahead and do it.
25 */ 27 */
26int __ratelimit(struct ratelimit_state *rs) 28int ___ratelimit(struct ratelimit_state *rs, const char *func)
27{ 29{
28 unsigned long flags; 30 unsigned long flags;
31 int ret;
29 32
30 if (!rs->interval) 33 if (!rs->interval)
31 return 1; 34 return 1;
32 35
33 spin_lock_irqsave(&ratelimit_lock, flags); 36 /*
37 * If we contend on this state's lock then almost
38 * by definition we are too busy to print a message,
39 * in addition to the one that will be printed by
40 * the entity that is holding the lock already:
41 */
42 if (!spin_trylock_irqsave(&rs->lock, flags))
43 return 0;
44
34 if (!rs->begin) 45 if (!rs->begin)
35 rs->begin = jiffies; 46 rs->begin = jiffies;
36 47
37 if (time_is_before_jiffies(rs->begin + rs->interval)) { 48 if (time_is_before_jiffies(rs->begin + rs->interval)) {
38 if (rs->missed) 49 if (rs->missed)
39 printk(KERN_WARNING "%s: %d callbacks suppressed\n", 50 printk(KERN_WARNING "%s: %d callbacks suppressed\n",
40 __func__, rs->missed); 51 func, rs->missed);
41 rs->begin = 0; 52 rs->begin = 0;
42 rs->printed = 0; 53 rs->printed = 0;
43 rs->missed = 0; 54 rs->missed = 0;
44 } 55 }
45 if (rs->burst && rs->burst > rs->printed) 56 if (rs->burst && rs->burst > rs->printed) {
46 goto print; 57 rs->printed++;
47 58 ret = 1;
48 rs->missed++; 59 } else {
49 spin_unlock_irqrestore(&ratelimit_lock, flags); 60 rs->missed++;
50 return 0; 61 ret = 0;
62 }
63 spin_unlock_irqrestore(&rs->lock, flags);
51 64
52print: 65 return ret;
53 rs->printed++;
54 spin_unlock_irqrestore(&ratelimit_lock, flags);
55 return 1;
56} 66}
57EXPORT_SYMBOL(__ratelimit); 67EXPORT_SYMBOL(___ratelimit);
diff --git a/lib/rational.c b/lib/rational.c
index b3c099b5478e..3ed247b80662 100644
--- a/lib/rational.c
+++ b/lib/rational.c
@@ -7,6 +7,7 @@
7 */ 7 */
8 8
9#include <linux/rational.h> 9#include <linux/rational.h>
10#include <linux/module.h>
10 11
11/* 12/*
12 * calculate best rational approximation for a given fraction 13 * calculate best rational approximation for a given fraction
diff --git a/lib/rbtree.c b/lib/rbtree.c
index e2aa3be29858..4693f79195d3 100644
--- a/lib/rbtree.c
+++ b/lib/rbtree.c
@@ -283,6 +283,74 @@ void rb_erase(struct rb_node *node, struct rb_root *root)
283} 283}
284EXPORT_SYMBOL(rb_erase); 284EXPORT_SYMBOL(rb_erase);
285 285
286static void rb_augment_path(struct rb_node *node, rb_augment_f func, void *data)
287{
288 struct rb_node *parent;
289
290up:
291 func(node, data);
292 parent = rb_parent(node);
293 if (!parent)
294 return;
295
296 if (node == parent->rb_left && parent->rb_right)
297 func(parent->rb_right, data);
298 else if (parent->rb_left)
299 func(parent->rb_left, data);
300
301 node = parent;
302 goto up;
303}
304
305/*
306 * after inserting @node into the tree, update the tree to account for
307 * both the new entry and any damage done by rebalance
308 */
309void rb_augment_insert(struct rb_node *node, rb_augment_f func, void *data)
310{
311 if (node->rb_left)
312 node = node->rb_left;
313 else if (node->rb_right)
314 node = node->rb_right;
315
316 rb_augment_path(node, func, data);
317}
318
319/*
320 * before removing the node, find the deepest node on the rebalance path
321 * that will still be there after @node gets removed
322 */
323struct rb_node *rb_augment_erase_begin(struct rb_node *node)
324{
325 struct rb_node *deepest;
326
327 if (!node->rb_right && !node->rb_left)
328 deepest = rb_parent(node);
329 else if (!node->rb_right)
330 deepest = node->rb_left;
331 else if (!node->rb_left)
332 deepest = node->rb_right;
333 else {
334 deepest = rb_next(node);
335 if (deepest->rb_right)
336 deepest = deepest->rb_right;
337 else if (rb_parent(deepest) != node)
338 deepest = rb_parent(deepest);
339 }
340
341 return deepest;
342}
343
344/*
345 * after removal, update the tree to account for the removed entry
346 * and any rebalance damage.
347 */
348void rb_augment_erase_end(struct rb_node *node, rb_augment_f func, void *data)
349{
350 if (node)
351 rb_augment_path(node, func, data);
352}
353
286/* 354/*
287 * This function returns the first node (in sort order) of the tree. 355 * This function returns the first node (in sort order) of the tree.
288 */ 356 */
diff --git a/lib/rwsem-spinlock.c b/lib/rwsem-spinlock.c
index 9df3ca56db11..ffc9fc7f3b05 100644
--- a/lib/rwsem-spinlock.c
+++ b/lib/rwsem-spinlock.c
@@ -17,6 +17,19 @@ struct rwsem_waiter {
17#define RWSEM_WAITING_FOR_WRITE 0x00000002 17#define RWSEM_WAITING_FOR_WRITE 0x00000002
18}; 18};
19 19
20int rwsem_is_locked(struct rw_semaphore *sem)
21{
22 int ret = 1;
23 unsigned long flags;
24
25 if (spin_trylock_irqsave(&sem->wait_lock, flags)) {
26 ret = (sem->activity != 0);
27 spin_unlock_irqrestore(&sem->wait_lock, flags);
28 }
29 return ret;
30}
31EXPORT_SYMBOL(rwsem_is_locked);
32
20/* 33/*
21 * initialise the semaphore 34 * initialise the semaphore
22 */ 35 */
@@ -34,6 +47,7 @@ void __init_rwsem(struct rw_semaphore *sem, const char *name,
34 spin_lock_init(&sem->wait_lock); 47 spin_lock_init(&sem->wait_lock);
35 INIT_LIST_HEAD(&sem->wait_list); 48 INIT_LIST_HEAD(&sem->wait_list);
36} 49}
50EXPORT_SYMBOL(__init_rwsem);
37 51
38/* 52/*
39 * handle the lock release when processes blocked on it that can now run 53 * handle the lock release when processes blocked on it that can now run
@@ -129,13 +143,14 @@ void __sched __down_read(struct rw_semaphore *sem)
129{ 143{
130 struct rwsem_waiter waiter; 144 struct rwsem_waiter waiter;
131 struct task_struct *tsk; 145 struct task_struct *tsk;
146 unsigned long flags;
132 147
133 spin_lock_irq(&sem->wait_lock); 148 spin_lock_irqsave(&sem->wait_lock, flags);
134 149
135 if (sem->activity >= 0 && list_empty(&sem->wait_list)) { 150 if (sem->activity >= 0 && list_empty(&sem->wait_list)) {
136 /* granted */ 151 /* granted */
137 sem->activity++; 152 sem->activity++;
138 spin_unlock_irq(&sem->wait_lock); 153 spin_unlock_irqrestore(&sem->wait_lock, flags);
139 goto out; 154 goto out;
140 } 155 }
141 156
@@ -150,7 +165,7 @@ void __sched __down_read(struct rw_semaphore *sem)
150 list_add_tail(&waiter.list, &sem->wait_list); 165 list_add_tail(&waiter.list, &sem->wait_list);
151 166
152 /* we don't need to touch the semaphore struct anymore */ 167 /* we don't need to touch the semaphore struct anymore */
153 spin_unlock_irq(&sem->wait_lock); 168 spin_unlock_irqrestore(&sem->wait_lock, flags);
154 169
155 /* wait to be given the lock */ 170 /* wait to be given the lock */
156 for (;;) { 171 for (;;) {
@@ -195,13 +210,14 @@ void __sched __down_write_nested(struct rw_semaphore *sem, int subclass)
195{ 210{
196 struct rwsem_waiter waiter; 211 struct rwsem_waiter waiter;
197 struct task_struct *tsk; 212 struct task_struct *tsk;
213 unsigned long flags;
198 214
199 spin_lock_irq(&sem->wait_lock); 215 spin_lock_irqsave(&sem->wait_lock, flags);
200 216
201 if (sem->activity == 0 && list_empty(&sem->wait_list)) { 217 if (sem->activity == 0 && list_empty(&sem->wait_list)) {
202 /* granted */ 218 /* granted */
203 sem->activity = -1; 219 sem->activity = -1;
204 spin_unlock_irq(&sem->wait_lock); 220 spin_unlock_irqrestore(&sem->wait_lock, flags);
205 goto out; 221 goto out;
206 } 222 }
207 223
@@ -216,7 +232,7 @@ void __sched __down_write_nested(struct rw_semaphore *sem, int subclass)
216 list_add_tail(&waiter.list, &sem->wait_list); 232 list_add_tail(&waiter.list, &sem->wait_list);
217 233
218 /* we don't need to touch the semaphore struct anymore */ 234 /* we don't need to touch the semaphore struct anymore */
219 spin_unlock_irq(&sem->wait_lock); 235 spin_unlock_irqrestore(&sem->wait_lock, flags);
220 236
221 /* wait to be given the lock */ 237 /* wait to be given the lock */
222 for (;;) { 238 for (;;) {
@@ -305,12 +321,3 @@ void __downgrade_write(struct rw_semaphore *sem)
305 spin_unlock_irqrestore(&sem->wait_lock, flags); 321 spin_unlock_irqrestore(&sem->wait_lock, flags);
306} 322}
307 323
308EXPORT_SYMBOL(__init_rwsem);
309EXPORT_SYMBOL(__down_read);
310EXPORT_SYMBOL(__down_read_trylock);
311EXPORT_SYMBOL(__down_write_nested);
312EXPORT_SYMBOL(__down_write);
313EXPORT_SYMBOL(__down_write_trylock);
314EXPORT_SYMBOL(__up_read);
315EXPORT_SYMBOL(__up_write);
316EXPORT_SYMBOL(__downgrade_write);
diff --git a/lib/rwsem.c b/lib/rwsem.c
index 3e3365e5665e..ceba8e28807a 100644
--- a/lib/rwsem.c
+++ b/lib/rwsem.c
@@ -136,9 +136,10 @@ __rwsem_do_wake(struct rw_semaphore *sem, int downgrading)
136 out: 136 out:
137 return sem; 137 return sem;
138 138
139 /* undo the change to count, but check for a transition 1->0 */ 139 /* undo the change to the active count, but check for a transition
140 * 1->0 */
140 undo: 141 undo:
141 if (rwsem_atomic_update(-RWSEM_ACTIVE_BIAS, sem) != 0) 142 if (rwsem_atomic_update(-RWSEM_ACTIVE_BIAS, sem) & RWSEM_ACTIVE_MASK)
142 goto out; 143 goto out;
143 goto try_again; 144 goto try_again;
144} 145}
diff --git a/lib/scatterlist.c b/lib/scatterlist.c
index 0d475d8167bf..9afa25b52a83 100644
--- a/lib/scatterlist.c
+++ b/lib/scatterlist.c
@@ -7,6 +7,7 @@
7 * Version 2. See the file COPYING for more details. 7 * Version 2. See the file COPYING for more details.
8 */ 8 */
9#include <linux/module.h> 9#include <linux/module.h>
10#include <linux/slab.h>
10#include <linux/scatterlist.h> 11#include <linux/scatterlist.h>
11#include <linux/highmem.h> 12#include <linux/highmem.h>
12 13
diff --git a/lib/show_mem.c b/lib/show_mem.c
index 238e72a18ce1..fdc77c82f922 100644
--- a/lib/show_mem.c
+++ b/lib/show_mem.c
@@ -15,7 +15,7 @@ void show_mem(void)
15 unsigned long total = 0, reserved = 0, shared = 0, 15 unsigned long total = 0, reserved = 0, shared = 0,
16 nonshared = 0, highmem = 0; 16 nonshared = 0, highmem = 0;
17 17
18 printk(KERN_INFO "Mem-Info:\n"); 18 printk("Mem-Info:\n");
19 show_free_areas(); 19 show_free_areas();
20 20
21 for_each_online_pgdat(pgdat) { 21 for_each_online_pgdat(pgdat) {
@@ -49,15 +49,15 @@ void show_mem(void)
49 pgdat_resize_unlock(pgdat, &flags); 49 pgdat_resize_unlock(pgdat, &flags);
50 } 50 }
51 51
52 printk(KERN_INFO "%lu pages RAM\n", total); 52 printk("%lu pages RAM\n", total);
53#ifdef CONFIG_HIGHMEM 53#ifdef CONFIG_HIGHMEM
54 printk(KERN_INFO "%lu pages HighMem\n", highmem); 54 printk("%lu pages HighMem\n", highmem);
55#endif 55#endif
56 printk(KERN_INFO "%lu pages reserved\n", reserved); 56 printk("%lu pages reserved\n", reserved);
57 printk(KERN_INFO "%lu pages shared\n", shared); 57 printk("%lu pages shared\n", shared);
58 printk(KERN_INFO "%lu pages non-shared\n", nonshared); 58 printk("%lu pages non-shared\n", nonshared);
59#ifdef CONFIG_QUICKLIST 59#ifdef CONFIG_QUICKLIST
60 printk(KERN_INFO "%lu pages in pagetable cache\n", 60 printk("%lu pages in pagetable cache\n",
61 quicklist_total_size()); 61 quicklist_total_size());
62#endif 62#endif
63} 63}
diff --git a/lib/spinlock_debug.c b/lib/spinlock_debug.c
index 9c4b0256490b..4755b98b6dfb 100644
--- a/lib/spinlock_debug.c
+++ b/lib/spinlock_debug.c
@@ -13,8 +13,8 @@
13#include <linux/delay.h> 13#include <linux/delay.h>
14#include <linux/module.h> 14#include <linux/module.h>
15 15
16void __spin_lock_init(spinlock_t *lock, const char *name, 16void __raw_spin_lock_init(raw_spinlock_t *lock, const char *name,
17 struct lock_class_key *key) 17 struct lock_class_key *key)
18{ 18{
19#ifdef CONFIG_DEBUG_LOCK_ALLOC 19#ifdef CONFIG_DEBUG_LOCK_ALLOC
20 /* 20 /*
@@ -23,13 +23,13 @@ void __spin_lock_init(spinlock_t *lock, const char *name,
23 debug_check_no_locks_freed((void *)lock, sizeof(*lock)); 23 debug_check_no_locks_freed((void *)lock, sizeof(*lock));
24 lockdep_init_map(&lock->dep_map, name, key, 0); 24 lockdep_init_map(&lock->dep_map, name, key, 0);
25#endif 25#endif
26 lock->raw_lock = (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED; 26 lock->raw_lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
27 lock->magic = SPINLOCK_MAGIC; 27 lock->magic = SPINLOCK_MAGIC;
28 lock->owner = SPINLOCK_OWNER_INIT; 28 lock->owner = SPINLOCK_OWNER_INIT;
29 lock->owner_cpu = -1; 29 lock->owner_cpu = -1;
30} 30}
31 31
32EXPORT_SYMBOL(__spin_lock_init); 32EXPORT_SYMBOL(__raw_spin_lock_init);
33 33
34void __rwlock_init(rwlock_t *lock, const char *name, 34void __rwlock_init(rwlock_t *lock, const char *name,
35 struct lock_class_key *key) 35 struct lock_class_key *key)
@@ -41,7 +41,7 @@ void __rwlock_init(rwlock_t *lock, const char *name,
41 debug_check_no_locks_freed((void *)lock, sizeof(*lock)); 41 debug_check_no_locks_freed((void *)lock, sizeof(*lock));
42 lockdep_init_map(&lock->dep_map, name, key, 0); 42 lockdep_init_map(&lock->dep_map, name, key, 0);
43#endif 43#endif
44 lock->raw_lock = (raw_rwlock_t) __RAW_RW_LOCK_UNLOCKED; 44 lock->raw_lock = (arch_rwlock_t) __ARCH_RW_LOCK_UNLOCKED;
45 lock->magic = RWLOCK_MAGIC; 45 lock->magic = RWLOCK_MAGIC;
46 lock->owner = SPINLOCK_OWNER_INIT; 46 lock->owner = SPINLOCK_OWNER_INIT;
47 lock->owner_cpu = -1; 47 lock->owner_cpu = -1;
@@ -49,7 +49,7 @@ void __rwlock_init(rwlock_t *lock, const char *name,
49 49
50EXPORT_SYMBOL(__rwlock_init); 50EXPORT_SYMBOL(__rwlock_init);
51 51
52static void spin_bug(spinlock_t *lock, const char *msg) 52static void spin_bug(raw_spinlock_t *lock, const char *msg)
53{ 53{
54 struct task_struct *owner = NULL; 54 struct task_struct *owner = NULL;
55 55
@@ -73,7 +73,7 @@ static void spin_bug(spinlock_t *lock, const char *msg)
73#define SPIN_BUG_ON(cond, lock, msg) if (unlikely(cond)) spin_bug(lock, msg) 73#define SPIN_BUG_ON(cond, lock, msg) if (unlikely(cond)) spin_bug(lock, msg)
74 74
75static inline void 75static inline void
76debug_spin_lock_before(spinlock_t *lock) 76debug_spin_lock_before(raw_spinlock_t *lock)
77{ 77{
78 SPIN_BUG_ON(lock->magic != SPINLOCK_MAGIC, lock, "bad magic"); 78 SPIN_BUG_ON(lock->magic != SPINLOCK_MAGIC, lock, "bad magic");
79 SPIN_BUG_ON(lock->owner == current, lock, "recursion"); 79 SPIN_BUG_ON(lock->owner == current, lock, "recursion");
@@ -81,16 +81,16 @@ debug_spin_lock_before(spinlock_t *lock)
81 lock, "cpu recursion"); 81 lock, "cpu recursion");
82} 82}
83 83
84static inline void debug_spin_lock_after(spinlock_t *lock) 84static inline void debug_spin_lock_after(raw_spinlock_t *lock)
85{ 85{
86 lock->owner_cpu = raw_smp_processor_id(); 86 lock->owner_cpu = raw_smp_processor_id();
87 lock->owner = current; 87 lock->owner = current;
88} 88}
89 89
90static inline void debug_spin_unlock(spinlock_t *lock) 90static inline void debug_spin_unlock(raw_spinlock_t *lock)
91{ 91{
92 SPIN_BUG_ON(lock->magic != SPINLOCK_MAGIC, lock, "bad magic"); 92 SPIN_BUG_ON(lock->magic != SPINLOCK_MAGIC, lock, "bad magic");
93 SPIN_BUG_ON(!spin_is_locked(lock), lock, "already unlocked"); 93 SPIN_BUG_ON(!raw_spin_is_locked(lock), lock, "already unlocked");
94 SPIN_BUG_ON(lock->owner != current, lock, "wrong owner"); 94 SPIN_BUG_ON(lock->owner != current, lock, "wrong owner");
95 SPIN_BUG_ON(lock->owner_cpu != raw_smp_processor_id(), 95 SPIN_BUG_ON(lock->owner_cpu != raw_smp_processor_id(),
96 lock, "wrong CPU"); 96 lock, "wrong CPU");
@@ -98,7 +98,7 @@ static inline void debug_spin_unlock(spinlock_t *lock)
98 lock->owner_cpu = -1; 98 lock->owner_cpu = -1;
99} 99}
100 100
101static void __spin_lock_debug(spinlock_t *lock) 101static void __spin_lock_debug(raw_spinlock_t *lock)
102{ 102{
103 u64 i; 103 u64 i;
104 u64 loops = loops_per_jiffy * HZ; 104 u64 loops = loops_per_jiffy * HZ;
@@ -106,7 +106,7 @@ static void __spin_lock_debug(spinlock_t *lock)
106 106
107 for (;;) { 107 for (;;) {
108 for (i = 0; i < loops; i++) { 108 for (i = 0; i < loops; i++) {
109 if (__raw_spin_trylock(&lock->raw_lock)) 109 if (arch_spin_trylock(&lock->raw_lock))
110 return; 110 return;
111 __delay(1); 111 __delay(1);
112 } 112 }
@@ -125,17 +125,17 @@ static void __spin_lock_debug(spinlock_t *lock)
125 } 125 }
126} 126}
127 127
128void _raw_spin_lock(spinlock_t *lock) 128void do_raw_spin_lock(raw_spinlock_t *lock)
129{ 129{
130 debug_spin_lock_before(lock); 130 debug_spin_lock_before(lock);
131 if (unlikely(!__raw_spin_trylock(&lock->raw_lock))) 131 if (unlikely(!arch_spin_trylock(&lock->raw_lock)))
132 __spin_lock_debug(lock); 132 __spin_lock_debug(lock);
133 debug_spin_lock_after(lock); 133 debug_spin_lock_after(lock);
134} 134}
135 135
136int _raw_spin_trylock(spinlock_t *lock) 136int do_raw_spin_trylock(raw_spinlock_t *lock)
137{ 137{
138 int ret = __raw_spin_trylock(&lock->raw_lock); 138 int ret = arch_spin_trylock(&lock->raw_lock);
139 139
140 if (ret) 140 if (ret)
141 debug_spin_lock_after(lock); 141 debug_spin_lock_after(lock);
@@ -148,10 +148,10 @@ int _raw_spin_trylock(spinlock_t *lock)
148 return ret; 148 return ret;
149} 149}
150 150
151void _raw_spin_unlock(spinlock_t *lock) 151void do_raw_spin_unlock(raw_spinlock_t *lock)
152{ 152{
153 debug_spin_unlock(lock); 153 debug_spin_unlock(lock);
154 __raw_spin_unlock(&lock->raw_lock); 154 arch_spin_unlock(&lock->raw_lock);
155} 155}
156 156
157static void rwlock_bug(rwlock_t *lock, const char *msg) 157static void rwlock_bug(rwlock_t *lock, const char *msg)
@@ -176,7 +176,7 @@ static void __read_lock_debug(rwlock_t *lock)
176 176
177 for (;;) { 177 for (;;) {
178 for (i = 0; i < loops; i++) { 178 for (i = 0; i < loops; i++) {
179 if (__raw_read_trylock(&lock->raw_lock)) 179 if (arch_read_trylock(&lock->raw_lock))
180 return; 180 return;
181 __delay(1); 181 __delay(1);
182 } 182 }
@@ -193,15 +193,15 @@ static void __read_lock_debug(rwlock_t *lock)
193} 193}
194#endif 194#endif
195 195
196void _raw_read_lock(rwlock_t *lock) 196void do_raw_read_lock(rwlock_t *lock)
197{ 197{
198 RWLOCK_BUG_ON(lock->magic != RWLOCK_MAGIC, lock, "bad magic"); 198 RWLOCK_BUG_ON(lock->magic != RWLOCK_MAGIC, lock, "bad magic");
199 __raw_read_lock(&lock->raw_lock); 199 arch_read_lock(&lock->raw_lock);
200} 200}
201 201
202int _raw_read_trylock(rwlock_t *lock) 202int do_raw_read_trylock(rwlock_t *lock)
203{ 203{
204 int ret = __raw_read_trylock(&lock->raw_lock); 204 int ret = arch_read_trylock(&lock->raw_lock);
205 205
206#ifndef CONFIG_SMP 206#ifndef CONFIG_SMP
207 /* 207 /*
@@ -212,10 +212,10 @@ int _raw_read_trylock(rwlock_t *lock)
212 return ret; 212 return ret;
213} 213}
214 214
215void _raw_read_unlock(rwlock_t *lock) 215void do_raw_read_unlock(rwlock_t *lock)
216{ 216{
217 RWLOCK_BUG_ON(lock->magic != RWLOCK_MAGIC, lock, "bad magic"); 217 RWLOCK_BUG_ON(lock->magic != RWLOCK_MAGIC, lock, "bad magic");
218 __raw_read_unlock(&lock->raw_lock); 218 arch_read_unlock(&lock->raw_lock);
219} 219}
220 220
221static inline void debug_write_lock_before(rwlock_t *lock) 221static inline void debug_write_lock_before(rwlock_t *lock)
@@ -251,7 +251,7 @@ static void __write_lock_debug(rwlock_t *lock)
251 251
252 for (;;) { 252 for (;;) {
253 for (i = 0; i < loops; i++) { 253 for (i = 0; i < loops; i++) {
254 if (__raw_write_trylock(&lock->raw_lock)) 254 if (arch_write_trylock(&lock->raw_lock))
255 return; 255 return;
256 __delay(1); 256 __delay(1);
257 } 257 }
@@ -268,16 +268,16 @@ static void __write_lock_debug(rwlock_t *lock)
268} 268}
269#endif 269#endif
270 270
271void _raw_write_lock(rwlock_t *lock) 271void do_raw_write_lock(rwlock_t *lock)
272{ 272{
273 debug_write_lock_before(lock); 273 debug_write_lock_before(lock);
274 __raw_write_lock(&lock->raw_lock); 274 arch_write_lock(&lock->raw_lock);
275 debug_write_lock_after(lock); 275 debug_write_lock_after(lock);
276} 276}
277 277
278int _raw_write_trylock(rwlock_t *lock) 278int do_raw_write_trylock(rwlock_t *lock)
279{ 279{
280 int ret = __raw_write_trylock(&lock->raw_lock); 280 int ret = arch_write_trylock(&lock->raw_lock);
281 281
282 if (ret) 282 if (ret)
283 debug_write_lock_after(lock); 283 debug_write_lock_after(lock);
@@ -290,8 +290,8 @@ int _raw_write_trylock(rwlock_t *lock)
290 return ret; 290 return ret;
291} 291}
292 292
293void _raw_write_unlock(rwlock_t *lock) 293void do_raw_write_unlock(rwlock_t *lock)
294{ 294{
295 debug_write_unlock(lock); 295 debug_write_unlock(lock);
296 __raw_write_unlock(&lock->raw_lock); 296 arch_write_unlock(&lock->raw_lock);
297} 297}
diff --git a/lib/string.c b/lib/string.c
index b19b87af65a3..f71bead1be3e 100644
--- a/lib/string.c
+++ b/lib/string.c
@@ -36,25 +36,21 @@ int strnicmp(const char *s1, const char *s2, size_t len)
36 /* Yes, Virginia, it had better be unsigned */ 36 /* Yes, Virginia, it had better be unsigned */
37 unsigned char c1, c2; 37 unsigned char c1, c2;
38 38
39 c1 = c2 = 0; 39 if (!len)
40 if (len) { 40 return 0;
41 do { 41
42 c1 = *s1; 42 do {
43 c2 = *s2; 43 c1 = *s1++;
44 s1++; 44 c2 = *s2++;
45 s2++; 45 if (!c1 || !c2)
46 if (!c1) 46 break;
47 break; 47 if (c1 == c2)
48 if (!c2) 48 continue;
49 break; 49 c1 = tolower(c1);
50 if (c1 == c2) 50 c2 = tolower(c2);
51 continue; 51 if (c1 != c2)
52 c1 = tolower(c1); 52 break;
53 c2 = tolower(c2); 53 } while (--len);
54 if (c1 != c2)
55 break;
56 } while (--len);
57 }
58 return (int)c1 - (int)c2; 54 return (int)c1 - (int)c2;
59} 55}
60EXPORT_SYMBOL(strnicmp); 56EXPORT_SYMBOL(strnicmp);
@@ -246,13 +242,17 @@ EXPORT_SYMBOL(strlcat);
246#undef strcmp 242#undef strcmp
247int strcmp(const char *cs, const char *ct) 243int strcmp(const char *cs, const char *ct)
248{ 244{
249 signed char __res; 245 unsigned char c1, c2;
250 246
251 while (1) { 247 while (1) {
252 if ((__res = *cs - *ct++) != 0 || !*cs++) 248 c1 = *cs++;
249 c2 = *ct++;
250 if (c1 != c2)
251 return c1 < c2 ? -1 : 1;
252 if (!c1)
253 break; 253 break;
254 } 254 }
255 return __res; 255 return 0;
256} 256}
257EXPORT_SYMBOL(strcmp); 257EXPORT_SYMBOL(strcmp);
258#endif 258#endif
@@ -266,14 +266,18 @@ EXPORT_SYMBOL(strcmp);
266 */ 266 */
267int strncmp(const char *cs, const char *ct, size_t count) 267int strncmp(const char *cs, const char *ct, size_t count)
268{ 268{
269 signed char __res = 0; 269 unsigned char c1, c2;
270 270
271 while (count) { 271 while (count) {
272 if ((__res = *cs - *ct++) != 0 || !*cs++) 272 c1 = *cs++;
273 c2 = *ct++;
274 if (c1 != c2)
275 return c1 < c2 ? -1 : 1;
276 if (!c1)
273 break; 277 break;
274 count--; 278 count--;
275 } 279 }
276 return __res; 280 return 0;
277} 281}
278EXPORT_SYMBOL(strncmp); 282EXPORT_SYMBOL(strncmp);
279#endif 283#endif
@@ -330,20 +334,34 @@ EXPORT_SYMBOL(strnchr);
330#endif 334#endif
331 335
332/** 336/**
333 * strstrip - Removes leading and trailing whitespace from @s. 337 * skip_spaces - Removes leading whitespace from @str.
338 * @str: The string to be stripped.
339 *
340 * Returns a pointer to the first non-whitespace character in @str.
341 */
342char *skip_spaces(const char *str)
343{
344 while (isspace(*str))
345 ++str;
346 return (char *)str;
347}
348EXPORT_SYMBOL(skip_spaces);
349
350/**
351 * strim - Removes leading and trailing whitespace from @s.
334 * @s: The string to be stripped. 352 * @s: The string to be stripped.
335 * 353 *
336 * Note that the first trailing whitespace is replaced with a %NUL-terminator 354 * Note that the first trailing whitespace is replaced with a %NUL-terminator
337 * in the given string @s. Returns a pointer to the first non-whitespace 355 * in the given string @s. Returns a pointer to the first non-whitespace
338 * character in @s. 356 * character in @s.
339 */ 357 */
340char *strstrip(char *s) 358char *strim(char *s)
341{ 359{
342 size_t size; 360 size_t size;
343 char *end; 361 char *end;
344 362
363 s = skip_spaces(s);
345 size = strlen(s); 364 size = strlen(s);
346
347 if (!size) 365 if (!size)
348 return s; 366 return s;
349 367
@@ -352,12 +370,9 @@ char *strstrip(char *s)
352 end--; 370 end--;
353 *(end + 1) = '\0'; 371 *(end + 1) = '\0';
354 372
355 while (*s && isspace(*s))
356 s++;
357
358 return s; 373 return s;
359} 374}
360EXPORT_SYMBOL(strstrip); 375EXPORT_SYMBOL(strim);
361 376
362#ifndef __HAVE_ARCH_STRLEN 377#ifndef __HAVE_ARCH_STRLEN
363/** 378/**
@@ -648,7 +663,7 @@ EXPORT_SYMBOL(memscan);
648 */ 663 */
649char *strstr(const char *s1, const char *s2) 664char *strstr(const char *s1, const char *s2)
650{ 665{
651 int l1, l2; 666 size_t l1, l2;
652 667
653 l2 = strlen(s2); 668 l2 = strlen(s2);
654 if (!l2) 669 if (!l2)
@@ -665,6 +680,31 @@ char *strstr(const char *s1, const char *s2)
665EXPORT_SYMBOL(strstr); 680EXPORT_SYMBOL(strstr);
666#endif 681#endif
667 682
683#ifndef __HAVE_ARCH_STRNSTR
684/**
685 * strnstr - Find the first substring in a length-limited string
686 * @s1: The string to be searched
687 * @s2: The string to search for
688 * @len: the maximum number of characters to search
689 */
690char *strnstr(const char *s1, const char *s2, size_t len)
691{
692 size_t l2;
693
694 l2 = strlen(s2);
695 if (!l2)
696 return (char *)s1;
697 while (len >= l2) {
698 len--;
699 if (!memcmp(s1, s2, l2))
700 return (char *)s1;
701 s1++;
702 }
703 return NULL;
704}
705EXPORT_SYMBOL(strnstr);
706#endif
707
668#ifndef __HAVE_ARCH_MEMCHR 708#ifndef __HAVE_ARCH_MEMCHR
669/** 709/**
670 * memchr - Find a character in an area of memory. 710 * memchr - Find a character in an area of memory.
diff --git a/lib/swiotlb.c b/lib/swiotlb.c
index ac25cd28e807..34e3082632d8 100644
--- a/lib/swiotlb.c
+++ b/lib/swiotlb.c
@@ -28,6 +28,7 @@
28#include <linux/types.h> 28#include <linux/types.h>
29#include <linux/ctype.h> 29#include <linux/ctype.h>
30#include <linux/highmem.h> 30#include <linux/highmem.h>
31#include <linux/gfp.h>
31 32
32#include <asm/io.h> 33#include <asm/io.h>
33#include <asm/dma.h> 34#include <asm/dma.h>
@@ -49,19 +50,11 @@
49 */ 50 */
50#define IO_TLB_MIN_SLABS ((1<<20) >> IO_TLB_SHIFT) 51#define IO_TLB_MIN_SLABS ((1<<20) >> IO_TLB_SHIFT)
51 52
52/*
53 * Enumeration for sync targets
54 */
55enum dma_sync_target {
56 SYNC_FOR_CPU = 0,
57 SYNC_FOR_DEVICE = 1,
58};
59
60int swiotlb_force; 53int swiotlb_force;
61 54
62/* 55/*
63 * Used to do a quick range check in unmap_single and 56 * Used to do a quick range check in swiotlb_tbl_unmap_single and
64 * sync_single_*, to see if the memory was in fact allocated by this 57 * swiotlb_tbl_sync_single_*, to see if the memory was in fact allocated by this
65 * API. 58 * API.
66 */ 59 */
67static char *io_tlb_start, *io_tlb_end; 60static char *io_tlb_start, *io_tlb_end;
@@ -97,6 +90,8 @@ static phys_addr_t *io_tlb_orig_addr;
97 */ 90 */
98static DEFINE_SPINLOCK(io_tlb_lock); 91static DEFINE_SPINLOCK(io_tlb_lock);
99 92
93static int late_alloc;
94
100static int __init 95static int __init
101setup_io_tlb_npages(char *str) 96setup_io_tlb_npages(char *str)
102{ 97{
@@ -109,6 +104,7 @@ setup_io_tlb_npages(char *str)
109 ++str; 104 ++str;
110 if (!strcmp(str, "force")) 105 if (!strcmp(str, "force"))
111 swiotlb_force = 1; 106 swiotlb_force = 1;
107
112 return 1; 108 return 1;
113} 109}
114__setup("swiotlb=", setup_io_tlb_npages); 110__setup("swiotlb=", setup_io_tlb_npages);
@@ -121,8 +117,9 @@ static dma_addr_t swiotlb_virt_to_bus(struct device *hwdev,
121 return phys_to_dma(hwdev, virt_to_phys(address)); 117 return phys_to_dma(hwdev, virt_to_phys(address));
122} 118}
123 119
124static void swiotlb_print_info(unsigned long bytes) 120void swiotlb_print_info(void)
125{ 121{
122 unsigned long bytes = io_tlb_nslabs << IO_TLB_SHIFT;
126 phys_addr_t pstart, pend; 123 phys_addr_t pstart, pend;
127 124
128 pstart = virt_to_phys(io_tlb_start); 125 pstart = virt_to_phys(io_tlb_start);
@@ -135,28 +132,14 @@ static void swiotlb_print_info(unsigned long bytes)
135 (unsigned long long)pend); 132 (unsigned long long)pend);
136} 133}
137 134
138/* 135void __init swiotlb_init_with_tbl(char *tlb, unsigned long nslabs, int verbose)
139 * Statically reserve bounce buffer space and initialize bounce buffer data
140 * structures for the software IO TLB used to implement the DMA API.
141 */
142void __init
143swiotlb_init_with_default_size(size_t default_size)
144{ 136{
145 unsigned long i, bytes; 137 unsigned long i, bytes;
146 138
147 if (!io_tlb_nslabs) { 139 bytes = nslabs << IO_TLB_SHIFT;
148 io_tlb_nslabs = (default_size >> IO_TLB_SHIFT);
149 io_tlb_nslabs = ALIGN(io_tlb_nslabs, IO_TLB_SEGSIZE);
150 }
151
152 bytes = io_tlb_nslabs << IO_TLB_SHIFT;
153 140
154 /* 141 io_tlb_nslabs = nslabs;
155 * Get IO TLB memory from the low pages 142 io_tlb_start = tlb;
156 */
157 io_tlb_start = alloc_bootmem_low_pages(bytes);
158 if (!io_tlb_start)
159 panic("Cannot allocate SWIOTLB buffer");
160 io_tlb_end = io_tlb_start + bytes; 143 io_tlb_end = io_tlb_start + bytes;
161 144
162 /* 145 /*
@@ -176,14 +159,40 @@ swiotlb_init_with_default_size(size_t default_size)
176 io_tlb_overflow_buffer = alloc_bootmem_low(io_tlb_overflow); 159 io_tlb_overflow_buffer = alloc_bootmem_low(io_tlb_overflow);
177 if (!io_tlb_overflow_buffer) 160 if (!io_tlb_overflow_buffer)
178 panic("Cannot allocate SWIOTLB overflow buffer!\n"); 161 panic("Cannot allocate SWIOTLB overflow buffer!\n");
162 if (verbose)
163 swiotlb_print_info();
164}
165
166/*
167 * Statically reserve bounce buffer space and initialize bounce buffer data
168 * structures for the software IO TLB used to implement the DMA API.
169 */
170void __init
171swiotlb_init_with_default_size(size_t default_size, int verbose)
172{
173 unsigned long bytes;
174
175 if (!io_tlb_nslabs) {
176 io_tlb_nslabs = (default_size >> IO_TLB_SHIFT);
177 io_tlb_nslabs = ALIGN(io_tlb_nslabs, IO_TLB_SEGSIZE);
178 }
179
180 bytes = io_tlb_nslabs << IO_TLB_SHIFT;
181
182 /*
183 * Get IO TLB memory from the low pages
184 */
185 io_tlb_start = alloc_bootmem_low_pages(bytes);
186 if (!io_tlb_start)
187 panic("Cannot allocate SWIOTLB buffer");
179 188
180 swiotlb_print_info(bytes); 189 swiotlb_init_with_tbl(io_tlb_start, io_tlb_nslabs, verbose);
181} 190}
182 191
183void __init 192void __init
184swiotlb_init(void) 193swiotlb_init(int verbose)
185{ 194{
186 swiotlb_init_with_default_size(64 * (1<<20)); /* default to 64MB */ 195 swiotlb_init_with_default_size(64 * (1<<20), verbose); /* default to 64MB */
187} 196}
188 197
189/* 198/*
@@ -260,7 +269,9 @@ swiotlb_late_init_with_default_size(size_t default_size)
260 if (!io_tlb_overflow_buffer) 269 if (!io_tlb_overflow_buffer)
261 goto cleanup4; 270 goto cleanup4;
262 271
263 swiotlb_print_info(bytes); 272 swiotlb_print_info();
273
274 late_alloc = 1;
264 275
265 return 0; 276 return 0;
266 277
@@ -281,6 +292,32 @@ cleanup1:
281 return -ENOMEM; 292 return -ENOMEM;
282} 293}
283 294
295void __init swiotlb_free(void)
296{
297 if (!io_tlb_overflow_buffer)
298 return;
299
300 if (late_alloc) {
301 free_pages((unsigned long)io_tlb_overflow_buffer,
302 get_order(io_tlb_overflow));
303 free_pages((unsigned long)io_tlb_orig_addr,
304 get_order(io_tlb_nslabs * sizeof(phys_addr_t)));
305 free_pages((unsigned long)io_tlb_list, get_order(io_tlb_nslabs *
306 sizeof(int)));
307 free_pages((unsigned long)io_tlb_start,
308 get_order(io_tlb_nslabs << IO_TLB_SHIFT));
309 } else {
310 free_bootmem_late(__pa(io_tlb_overflow_buffer),
311 io_tlb_overflow);
312 free_bootmem_late(__pa(io_tlb_orig_addr),
313 io_tlb_nslabs * sizeof(phys_addr_t));
314 free_bootmem_late(__pa(io_tlb_list),
315 io_tlb_nslabs * sizeof(int));
316 free_bootmem_late(__pa(io_tlb_start),
317 io_tlb_nslabs << IO_TLB_SHIFT);
318 }
319}
320
284static int is_swiotlb_buffer(phys_addr_t paddr) 321static int is_swiotlb_buffer(phys_addr_t paddr)
285{ 322{
286 return paddr >= virt_to_phys(io_tlb_start) && 323 return paddr >= virt_to_phys(io_tlb_start) &&
@@ -290,8 +327,8 @@ static int is_swiotlb_buffer(phys_addr_t paddr)
290/* 327/*
291 * Bounce: copy the swiotlb buffer back to the original dma location 328 * Bounce: copy the swiotlb buffer back to the original dma location
292 */ 329 */
293static void swiotlb_bounce(phys_addr_t phys, char *dma_addr, size_t size, 330void swiotlb_bounce(phys_addr_t phys, char *dma_addr, size_t size,
294 enum dma_data_direction dir) 331 enum dma_data_direction dir)
295{ 332{
296 unsigned long pfn = PFN_DOWN(phys); 333 unsigned long pfn = PFN_DOWN(phys);
297 334
@@ -327,26 +364,25 @@ static void swiotlb_bounce(phys_addr_t phys, char *dma_addr, size_t size,
327 memcpy(phys_to_virt(phys), dma_addr, size); 364 memcpy(phys_to_virt(phys), dma_addr, size);
328 } 365 }
329} 366}
367EXPORT_SYMBOL_GPL(swiotlb_bounce);
330 368
331/* 369void *swiotlb_tbl_map_single(struct device *hwdev, dma_addr_t tbl_dma_addr,
332 * Allocates bounce buffer and returns its kernel virtual address. 370 phys_addr_t phys, size_t size,
333 */ 371 enum dma_data_direction dir)
334static void *
335map_single(struct device *hwdev, phys_addr_t phys, size_t size, int dir)
336{ 372{
337 unsigned long flags; 373 unsigned long flags;
338 char *dma_addr; 374 char *dma_addr;
339 unsigned int nslots, stride, index, wrap; 375 unsigned int nslots, stride, index, wrap;
340 int i; 376 int i;
341 unsigned long start_dma_addr;
342 unsigned long mask; 377 unsigned long mask;
343 unsigned long offset_slots; 378 unsigned long offset_slots;
344 unsigned long max_slots; 379 unsigned long max_slots;
345 380
346 mask = dma_get_seg_boundary(hwdev); 381 mask = dma_get_seg_boundary(hwdev);
347 start_dma_addr = swiotlb_virt_to_bus(hwdev, io_tlb_start) & mask;
348 382
349 offset_slots = ALIGN(start_dma_addr, 1 << IO_TLB_SHIFT) >> IO_TLB_SHIFT; 383 tbl_dma_addr &= mask;
384
385 offset_slots = ALIGN(tbl_dma_addr, 1 << IO_TLB_SHIFT) >> IO_TLB_SHIFT;
350 386
351 /* 387 /*
352 * Carefully handle integer overflow which can occur when mask == ~0UL. 388 * Carefully handle integer overflow which can occur when mask == ~0UL.
@@ -433,12 +469,27 @@ found:
433 469
434 return dma_addr; 470 return dma_addr;
435} 471}
472EXPORT_SYMBOL_GPL(swiotlb_tbl_map_single);
473
474/*
475 * Allocates bounce buffer and returns its kernel virtual address.
476 */
477
478static void *
479map_single(struct device *hwdev, phys_addr_t phys, size_t size,
480 enum dma_data_direction dir)
481{
482 dma_addr_t start_dma_addr = swiotlb_virt_to_bus(hwdev, io_tlb_start);
483
484 return swiotlb_tbl_map_single(hwdev, start_dma_addr, phys, size, dir);
485}
436 486
437/* 487/*
438 * dma_addr is the kernel virtual address of the bounce buffer to unmap. 488 * dma_addr is the kernel virtual address of the bounce buffer to unmap.
439 */ 489 */
440static void 490void
441do_unmap_single(struct device *hwdev, char *dma_addr, size_t size, int dir) 491swiotlb_tbl_unmap_single(struct device *hwdev, char *dma_addr, size_t size,
492 enum dma_data_direction dir)
442{ 493{
443 unsigned long flags; 494 unsigned long flags;
444 int i, count, nslots = ALIGN(size, 1 << IO_TLB_SHIFT) >> IO_TLB_SHIFT; 495 int i, count, nslots = ALIGN(size, 1 << IO_TLB_SHIFT) >> IO_TLB_SHIFT;
@@ -453,7 +504,7 @@ do_unmap_single(struct device *hwdev, char *dma_addr, size_t size, int dir)
453 504
454 /* 505 /*
455 * Return the buffer to the free list by setting the corresponding 506 * Return the buffer to the free list by setting the corresponding
456 * entries to indicate the number of contigous entries available. 507 * entries to indicate the number of contiguous entries available.
457 * While returning the entries to the free list, we merge the entries 508 * While returning the entries to the free list, we merge the entries
458 * with slots below and above the pool being returned. 509 * with slots below and above the pool being returned.
459 */ 510 */
@@ -476,10 +527,12 @@ do_unmap_single(struct device *hwdev, char *dma_addr, size_t size, int dir)
476 } 527 }
477 spin_unlock_irqrestore(&io_tlb_lock, flags); 528 spin_unlock_irqrestore(&io_tlb_lock, flags);
478} 529}
530EXPORT_SYMBOL_GPL(swiotlb_tbl_unmap_single);
479 531
480static void 532void
481sync_single(struct device *hwdev, char *dma_addr, size_t size, 533swiotlb_tbl_sync_single(struct device *hwdev, char *dma_addr, size_t size,
482 int dir, int target) 534 enum dma_data_direction dir,
535 enum dma_sync_target target)
483{ 536{
484 int index = (dma_addr - io_tlb_start) >> IO_TLB_SHIFT; 537 int index = (dma_addr - io_tlb_start) >> IO_TLB_SHIFT;
485 phys_addr_t phys = io_tlb_orig_addr[index]; 538 phys_addr_t phys = io_tlb_orig_addr[index];
@@ -503,6 +556,7 @@ sync_single(struct device *hwdev, char *dma_addr, size_t size,
503 BUG(); 556 BUG();
504 } 557 }
505} 558}
559EXPORT_SYMBOL_GPL(swiotlb_tbl_sync_single);
506 560
507void * 561void *
508swiotlb_alloc_coherent(struct device *hwdev, size_t size, 562swiotlb_alloc_coherent(struct device *hwdev, size_t size,
@@ -517,7 +571,7 @@ swiotlb_alloc_coherent(struct device *hwdev, size_t size,
517 dma_mask = hwdev->coherent_dma_mask; 571 dma_mask = hwdev->coherent_dma_mask;
518 572
519 ret = (void *)__get_free_pages(flags, order); 573 ret = (void *)__get_free_pages(flags, order);
520 if (ret && swiotlb_virt_to_bus(hwdev, ret) + size > dma_mask) { 574 if (ret && swiotlb_virt_to_bus(hwdev, ret) + size - 1 > dma_mask) {
521 /* 575 /*
522 * The allocated memory isn't reachable by the device. 576 * The allocated memory isn't reachable by the device.
523 */ 577 */
@@ -526,8 +580,8 @@ swiotlb_alloc_coherent(struct device *hwdev, size_t size,
526 } 580 }
527 if (!ret) { 581 if (!ret) {
528 /* 582 /*
529 * We are either out of memory or the device can't DMA 583 * We are either out of memory or the device can't DMA to
530 * to GFP_DMA memory; fall back on map_single(), which 584 * GFP_DMA memory; fall back on map_single(), which
531 * will grab memory from the lowest available address range. 585 * will grab memory from the lowest available address range.
532 */ 586 */
533 ret = map_single(hwdev, 0, size, DMA_FROM_DEVICE); 587 ret = map_single(hwdev, 0, size, DMA_FROM_DEVICE);
@@ -539,13 +593,13 @@ swiotlb_alloc_coherent(struct device *hwdev, size_t size,
539 dev_addr = swiotlb_virt_to_bus(hwdev, ret); 593 dev_addr = swiotlb_virt_to_bus(hwdev, ret);
540 594
541 /* Confirm address can be DMA'd by device */ 595 /* Confirm address can be DMA'd by device */
542 if (dev_addr + size > dma_mask) { 596 if (dev_addr + size - 1 > dma_mask) {
543 printk("hwdev DMA mask = 0x%016Lx, dev_addr = 0x%016Lx\n", 597 printk("hwdev DMA mask = 0x%016Lx, dev_addr = 0x%016Lx\n",
544 (unsigned long long)dma_mask, 598 (unsigned long long)dma_mask,
545 (unsigned long long)dev_addr); 599 (unsigned long long)dev_addr);
546 600
547 /* DMA_TO_DEVICE to avoid memcpy in unmap_single */ 601 /* DMA_TO_DEVICE to avoid memcpy in unmap_single */
548 do_unmap_single(hwdev, ret, size, DMA_TO_DEVICE); 602 swiotlb_tbl_unmap_single(hwdev, ret, size, DMA_TO_DEVICE);
549 return NULL; 603 return NULL;
550 } 604 }
551 *dma_handle = dev_addr; 605 *dma_handle = dev_addr;
@@ -563,13 +617,14 @@ swiotlb_free_coherent(struct device *hwdev, size_t size, void *vaddr,
563 if (!is_swiotlb_buffer(paddr)) 617 if (!is_swiotlb_buffer(paddr))
564 free_pages((unsigned long)vaddr, get_order(size)); 618 free_pages((unsigned long)vaddr, get_order(size));
565 else 619 else
566 /* DMA_TO_DEVICE to avoid memcpy in unmap_single */ 620 /* DMA_TO_DEVICE to avoid memcpy in swiotlb_tbl_unmap_single */
567 do_unmap_single(hwdev, vaddr, size, DMA_TO_DEVICE); 621 swiotlb_tbl_unmap_single(hwdev, vaddr, size, DMA_TO_DEVICE);
568} 622}
569EXPORT_SYMBOL(swiotlb_free_coherent); 623EXPORT_SYMBOL(swiotlb_free_coherent);
570 624
571static void 625static void
572swiotlb_full(struct device *dev, size_t size, int dir, int do_panic) 626swiotlb_full(struct device *dev, size_t size, enum dma_data_direction dir,
627 int do_panic)
573{ 628{
574 /* 629 /*
575 * Ran out of IOMMU space for this operation. This is very bad. 630 * Ran out of IOMMU space for this operation. This is very bad.
@@ -647,14 +702,14 @@ EXPORT_SYMBOL_GPL(swiotlb_map_page);
647 * whatever the device wrote there. 702 * whatever the device wrote there.
648 */ 703 */
649static void unmap_single(struct device *hwdev, dma_addr_t dev_addr, 704static void unmap_single(struct device *hwdev, dma_addr_t dev_addr,
650 size_t size, int dir) 705 size_t size, enum dma_data_direction dir)
651{ 706{
652 phys_addr_t paddr = dma_to_phys(hwdev, dev_addr); 707 phys_addr_t paddr = dma_to_phys(hwdev, dev_addr);
653 708
654 BUG_ON(dir == DMA_NONE); 709 BUG_ON(dir == DMA_NONE);
655 710
656 if (is_swiotlb_buffer(paddr)) { 711 if (is_swiotlb_buffer(paddr)) {
657 do_unmap_single(hwdev, phys_to_virt(paddr), size, dir); 712 swiotlb_tbl_unmap_single(hwdev, phys_to_virt(paddr), size, dir);
658 return; 713 return;
659 } 714 }
660 715
@@ -690,14 +745,16 @@ EXPORT_SYMBOL_GPL(swiotlb_unmap_page);
690 */ 745 */
691static void 746static void
692swiotlb_sync_single(struct device *hwdev, dma_addr_t dev_addr, 747swiotlb_sync_single(struct device *hwdev, dma_addr_t dev_addr,
693 size_t size, int dir, int target) 748 size_t size, enum dma_data_direction dir,
749 enum dma_sync_target target)
694{ 750{
695 phys_addr_t paddr = dma_to_phys(hwdev, dev_addr); 751 phys_addr_t paddr = dma_to_phys(hwdev, dev_addr);
696 752
697 BUG_ON(dir == DMA_NONE); 753 BUG_ON(dir == DMA_NONE);
698 754
699 if (is_swiotlb_buffer(paddr)) { 755 if (is_swiotlb_buffer(paddr)) {
700 sync_single(hwdev, phys_to_virt(paddr), size, dir, target); 756 swiotlb_tbl_sync_single(hwdev, phys_to_virt(paddr), size, dir,
757 target);
701 return; 758 return;
702 } 759 }
703 760
@@ -724,37 +781,6 @@ swiotlb_sync_single_for_device(struct device *hwdev, dma_addr_t dev_addr,
724EXPORT_SYMBOL(swiotlb_sync_single_for_device); 781EXPORT_SYMBOL(swiotlb_sync_single_for_device);
725 782
726/* 783/*
727 * Same as above, but for a sub-range of the mapping.
728 */
729static void
730swiotlb_sync_single_range(struct device *hwdev, dma_addr_t dev_addr,
731 unsigned long offset, size_t size,
732 int dir, int target)
733{
734 swiotlb_sync_single(hwdev, dev_addr + offset, size, dir, target);
735}
736
737void
738swiotlb_sync_single_range_for_cpu(struct device *hwdev, dma_addr_t dev_addr,
739 unsigned long offset, size_t size,
740 enum dma_data_direction dir)
741{
742 swiotlb_sync_single_range(hwdev, dev_addr, offset, size, dir,
743 SYNC_FOR_CPU);
744}
745EXPORT_SYMBOL_GPL(swiotlb_sync_single_range_for_cpu);
746
747void
748swiotlb_sync_single_range_for_device(struct device *hwdev, dma_addr_t dev_addr,
749 unsigned long offset, size_t size,
750 enum dma_data_direction dir)
751{
752 swiotlb_sync_single_range(hwdev, dev_addr, offset, size, dir,
753 SYNC_FOR_DEVICE);
754}
755EXPORT_SYMBOL_GPL(swiotlb_sync_single_range_for_device);
756
757/*
758 * Map a set of buffers described by scatterlist in streaming mode for DMA. 784 * Map a set of buffers described by scatterlist in streaming mode for DMA.
759 * This is the scatter-gather version of the above swiotlb_map_page 785 * This is the scatter-gather version of the above swiotlb_map_page
760 * interface. Here the scatter gather list elements are each tagged with the 786 * interface. Here the scatter gather list elements are each tagged with the
@@ -807,7 +833,7 @@ EXPORT_SYMBOL(swiotlb_map_sg_attrs);
807 833
808int 834int
809swiotlb_map_sg(struct device *hwdev, struct scatterlist *sgl, int nelems, 835swiotlb_map_sg(struct device *hwdev, struct scatterlist *sgl, int nelems,
810 int dir) 836 enum dma_data_direction dir)
811{ 837{
812 return swiotlb_map_sg_attrs(hwdev, sgl, nelems, dir, NULL); 838 return swiotlb_map_sg_attrs(hwdev, sgl, nelems, dir, NULL);
813} 839}
@@ -834,7 +860,7 @@ EXPORT_SYMBOL(swiotlb_unmap_sg_attrs);
834 860
835void 861void
836swiotlb_unmap_sg(struct device *hwdev, struct scatterlist *sgl, int nelems, 862swiotlb_unmap_sg(struct device *hwdev, struct scatterlist *sgl, int nelems,
837 int dir) 863 enum dma_data_direction dir)
838{ 864{
839 return swiotlb_unmap_sg_attrs(hwdev, sgl, nelems, dir, NULL); 865 return swiotlb_unmap_sg_attrs(hwdev, sgl, nelems, dir, NULL);
840} 866}
@@ -849,7 +875,8 @@ EXPORT_SYMBOL(swiotlb_unmap_sg);
849 */ 875 */
850static void 876static void
851swiotlb_sync_sg(struct device *hwdev, struct scatterlist *sgl, 877swiotlb_sync_sg(struct device *hwdev, struct scatterlist *sgl,
852 int nelems, int dir, int target) 878 int nelems, enum dma_data_direction dir,
879 enum dma_sync_target target)
853{ 880{
854 struct scatterlist *sg; 881 struct scatterlist *sg;
855 int i; 882 int i;
diff --git a/lib/textsearch.c b/lib/textsearch.c
index 9fbcb44c554f..d608331b3e47 100644
--- a/lib/textsearch.c
+++ b/lib/textsearch.c
@@ -103,6 +103,7 @@
103#include <linux/rcupdate.h> 103#include <linux/rcupdate.h>
104#include <linux/err.h> 104#include <linux/err.h>
105#include <linux/textsearch.h> 105#include <linux/textsearch.h>
106#include <linux/slab.h>
106 107
107static LIST_HEAD(ts_ops); 108static LIST_HEAD(ts_ops);
108static DEFINE_SPINLOCK(ts_mod_lock); 109static DEFINE_SPINLOCK(ts_mod_lock);
diff --git a/lib/uuid.c b/lib/uuid.c
new file mode 100644
index 000000000000..8fadd7cef46c
--- /dev/null
+++ b/lib/uuid.c
@@ -0,0 +1,53 @@
1/*
2 * Unified UUID/GUID definition
3 *
4 * Copyright (C) 2009, Intel Corp.
5 * Huang Ying <ying.huang@intel.com>
6 *
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License version
9 * 2 as published by the Free Software Foundation;
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 */
20
21#include <linux/kernel.h>
22#include <linux/module.h>
23#include <linux/uuid.h>
24#include <linux/random.h>
25
26static void __uuid_gen_common(__u8 b[16])
27{
28 int i;
29 u32 r;
30
31 for (i = 0; i < 4; i++) {
32 r = random32();
33 memcpy(b + i * 4, &r, 4);
34 }
35 /* reversion 0b10 */
36 b[8] = (b[8] & 0x3F) | 0x80;
37}
38
39void uuid_le_gen(uuid_le *lu)
40{
41 __uuid_gen_common(lu->b);
42 /* version 4 : random generation */
43 lu->b[7] = (lu->b[7] & 0x0F) | 0x40;
44}
45EXPORT_SYMBOL_GPL(uuid_le_gen);
46
47void uuid_be_gen(uuid_be *bu)
48{
49 __uuid_gen_common(bu->b);
50 /* version 4 : random generation */
51 bu->b[6] = (bu->b[6] & 0x0F) | 0x40;
52}
53EXPORT_SYMBOL_GPL(uuid_be_gen);
diff --git a/lib/vsprintf.c b/lib/vsprintf.c
index b91839e9e892..4ee19d0d3910 100644
--- a/lib/vsprintf.c
+++ b/lib/vsprintf.c
@@ -9,7 +9,7 @@
9 * Wirzenius wrote this portably, Torvalds fucked it up :-) 9 * Wirzenius wrote this portably, Torvalds fucked it up :-)
10 */ 10 */
11 11
12/* 12/*
13 * Fri Jul 13 2001 Crutcher Dunnavant <crutcher+kernel@datastacks.com> 13 * Fri Jul 13 2001 Crutcher Dunnavant <crutcher+kernel@datastacks.com>
14 * - changed to provide snprintf and vsnprintf functions 14 * - changed to provide snprintf and vsnprintf functions
15 * So Feb 1 16:51:32 CET 2004 Juergen Quade <quade@hsnr.de> 15 * So Feb 1 16:51:32 CET 2004 Juergen Quade <quade@hsnr.de>
@@ -47,14 +47,14 @@ static unsigned int simple_guess_base(const char *cp)
47} 47}
48 48
49/** 49/**
50 * simple_strtoul - convert a string to an unsigned long 50 * simple_strtoull - convert a string to an unsigned long long
51 * @cp: The start of the string 51 * @cp: The start of the string
52 * @endp: A pointer to the end of the parsed string will be placed here 52 * @endp: A pointer to the end of the parsed string will be placed here
53 * @base: The number base to use 53 * @base: The number base to use
54 */ 54 */
55unsigned long simple_strtoul(const char *cp, char **endp, unsigned int base) 55unsigned long long simple_strtoull(const char *cp, char **endp, unsigned int base)
56{ 56{
57 unsigned long result = 0; 57 unsigned long long result = 0;
58 58
59 if (!base) 59 if (!base)
60 base = simple_guess_base(cp); 60 base = simple_guess_base(cp);
@@ -71,58 +71,39 @@ unsigned long simple_strtoul(const char *cp, char **endp, unsigned int base)
71 result = result * base + value; 71 result = result * base + value;
72 cp++; 72 cp++;
73 } 73 }
74
75 if (endp) 74 if (endp)
76 *endp = (char *)cp; 75 *endp = (char *)cp;
76
77 return result; 77 return result;
78} 78}
79EXPORT_SYMBOL(simple_strtoul); 79EXPORT_SYMBOL(simple_strtoull);
80 80
81/** 81/**
82 * simple_strtol - convert a string to a signed long 82 * simple_strtoul - convert a string to an unsigned long
83 * @cp: The start of the string 83 * @cp: The start of the string
84 * @endp: A pointer to the end of the parsed string will be placed here 84 * @endp: A pointer to the end of the parsed string will be placed here
85 * @base: The number base to use 85 * @base: The number base to use
86 */ 86 */
87long simple_strtol(const char *cp, char **endp, unsigned int base) 87unsigned long simple_strtoul(const char *cp, char **endp, unsigned int base)
88{ 88{
89 if(*cp == '-') 89 return simple_strtoull(cp, endp, base);
90 return -simple_strtoul(cp + 1, endp, base);
91 return simple_strtoul(cp, endp, base);
92} 90}
93EXPORT_SYMBOL(simple_strtol); 91EXPORT_SYMBOL(simple_strtoul);
94 92
95/** 93/**
96 * simple_strtoull - convert a string to an unsigned long long 94 * simple_strtol - convert a string to a signed long
97 * @cp: The start of the string 95 * @cp: The start of the string
98 * @endp: A pointer to the end of the parsed string will be placed here 96 * @endp: A pointer to the end of the parsed string will be placed here
99 * @base: The number base to use 97 * @base: The number base to use
100 */ 98 */
101unsigned long long simple_strtoull(const char *cp, char **endp, unsigned int base) 99long simple_strtol(const char *cp, char **endp, unsigned int base)
102{ 100{
103 unsigned long long result = 0; 101 if (*cp == '-')
104 102 return -simple_strtoul(cp + 1, endp, base);
105 if (!base)
106 base = simple_guess_base(cp);
107
108 if (base == 16 && cp[0] == '0' && TOLOWER(cp[1]) == 'x')
109 cp += 2;
110
111 while (isxdigit(*cp)) {
112 unsigned int value;
113
114 value = isdigit(*cp) ? *cp - '0' : TOLOWER(*cp) - 'a' + 10;
115 if (value >= base)
116 break;
117 result = result * base + value;
118 cp++;
119 }
120 103
121 if (endp) 104 return simple_strtoul(cp, endp, base);
122 *endp = (char *)cp;
123 return result;
124} 105}
125EXPORT_SYMBOL(simple_strtoull); 106EXPORT_SYMBOL(simple_strtol);
126 107
127/** 108/**
128 * simple_strtoll - convert a string to a signed long long 109 * simple_strtoll - convert a string to a signed long long
@@ -132,10 +113,12 @@ EXPORT_SYMBOL(simple_strtoull);
132 */ 113 */
133long long simple_strtoll(const char *cp, char **endp, unsigned int base) 114long long simple_strtoll(const char *cp, char **endp, unsigned int base)
134{ 115{
135 if(*cp=='-') 116 if (*cp == '-')
136 return -simple_strtoull(cp + 1, endp, base); 117 return -simple_strtoull(cp + 1, endp, base);
118
137 return simple_strtoull(cp, endp, base); 119 return simple_strtoull(cp, endp, base);
138} 120}
121EXPORT_SYMBOL(simple_strtoll);
139 122
140/** 123/**
141 * strict_strtoul - convert a string to an unsigned long strictly 124 * strict_strtoul - convert a string to an unsigned long strictly
@@ -173,6 +156,7 @@ int strict_strtoul(const char *cp, unsigned int base, unsigned long *res)
173 val = simple_strtoul(cp, &tail, base); 156 val = simple_strtoul(cp, &tail, base);
174 if (tail == cp) 157 if (tail == cp)
175 return -EINVAL; 158 return -EINVAL;
159
176 if ((*tail == '\0') || 160 if ((*tail == '\0') ||
177 ((len == (size_t)(tail - cp) + 1) && (*tail == '\n'))) { 161 ((len == (size_t)(tail - cp) + 1) && (*tail == '\n'))) {
178 *res = val; 162 *res = val;
@@ -283,12 +267,14 @@ int strict_strtoll(const char *cp, unsigned int base, long long *res)
283} 267}
284EXPORT_SYMBOL(strict_strtoll); 268EXPORT_SYMBOL(strict_strtoll);
285 269
286static int skip_atoi(const char **s) 270static noinline_for_stack
271int skip_atoi(const char **s)
287{ 272{
288 int i=0; 273 int i = 0;
289 274
290 while (isdigit(**s)) 275 while (isdigit(**s))
291 i = i*10 + *((*s)++) - '0'; 276 i = i*10 + *((*s)++) - '0';
277
292 return i; 278 return i;
293} 279}
294 280
@@ -302,7 +288,8 @@ static int skip_atoi(const char **s)
302/* Formats correctly any integer in [0,99999]. 288/* Formats correctly any integer in [0,99999].
303 * Outputs from one to five digits depending on input. 289 * Outputs from one to five digits depending on input.
304 * On i386 gcc 4.1.2 -O2: ~250 bytes of code. */ 290 * On i386 gcc 4.1.2 -O2: ~250 bytes of code. */
305static char* put_dec_trunc(char *buf, unsigned q) 291static noinline_for_stack
292char *put_dec_trunc(char *buf, unsigned q)
306{ 293{
307 unsigned d3, d2, d1, d0; 294 unsigned d3, d2, d1, d0;
308 d1 = (q>>4) & 0xf; 295 d1 = (q>>4) & 0xf;
@@ -331,14 +318,16 @@ static char* put_dec_trunc(char *buf, unsigned q)
331 d3 = d3 - 10*q; 318 d3 = d3 - 10*q;
332 *buf++ = d3 + '0'; /* next digit */ 319 *buf++ = d3 + '0'; /* next digit */
333 if (q != 0) 320 if (q != 0)
334 *buf++ = q + '0'; /* most sign. digit */ 321 *buf++ = q + '0'; /* most sign. digit */
335 } 322 }
336 } 323 }
337 } 324 }
325
338 return buf; 326 return buf;
339} 327}
340/* Same with if's removed. Always emits five digits */ 328/* Same with if's removed. Always emits five digits */
341static char* put_dec_full(char *buf, unsigned q) 329static noinline_for_stack
330char *put_dec_full(char *buf, unsigned q)
342{ 331{
343 /* BTW, if q is in [0,9999], 8-bit ints will be enough, */ 332 /* BTW, if q is in [0,9999], 8-bit ints will be enough, */
344 /* but anyway, gcc produces better code with full-sized ints */ 333 /* but anyway, gcc produces better code with full-sized ints */
@@ -347,14 +336,15 @@ static char* put_dec_full(char *buf, unsigned q)
347 d2 = (q>>8) & 0xf; 336 d2 = (q>>8) & 0xf;
348 d3 = (q>>12); 337 d3 = (q>>12);
349 338
350 /* Possible ways to approx. divide by 10 */ 339 /*
351 /* gcc -O2 replaces multiply with shifts and adds */ 340 * Possible ways to approx. divide by 10
352 // (x * 0xcd) >> 11: 11001101 - shorter code than * 0x67 (on i386) 341 * gcc -O2 replaces multiply with shifts and adds
353 // (x * 0x67) >> 10: 1100111 342 * (x * 0xcd) >> 11: 11001101 - shorter code than * 0x67 (on i386)
354 // (x * 0x34) >> 9: 110100 - same 343 * (x * 0x67) >> 10: 1100111
355 // (x * 0x1a) >> 8: 11010 - same 344 * (x * 0x34) >> 9: 110100 - same
356 // (x * 0x0d) >> 7: 1101 - same, shortest code (on i386) 345 * (x * 0x1a) >> 8: 11010 - same
357 346 * (x * 0x0d) >> 7: 1101 - same, shortest code (on i386)
347 */
358 d0 = 6*(d3 + d2 + d1) + (q & 0xf); 348 d0 = 6*(d3 + d2 + d1) + (q & 0xf);
359 q = (d0 * 0xcd) >> 11; 349 q = (d0 * 0xcd) >> 11;
360 d0 = d0 - 10*q; 350 d0 = d0 - 10*q;
@@ -375,10 +365,12 @@ static char* put_dec_full(char *buf, unsigned q)
375 d3 = d3 - 10*q; 365 d3 = d3 - 10*q;
376 *buf++ = d3 + '0'; 366 *buf++ = d3 + '0';
377 *buf++ = q + '0'; 367 *buf++ = q + '0';
368
378 return buf; 369 return buf;
379} 370}
380/* No inlining helps gcc to use registers better */ 371/* No inlining helps gcc to use registers better */
381static noinline char* put_dec(char *buf, unsigned long long num) 372static noinline_for_stack
373char *put_dec(char *buf, unsigned long long num)
382{ 374{
383 while (1) { 375 while (1) {
384 unsigned rem; 376 unsigned rem;
@@ -394,8 +386,8 @@ static noinline char* put_dec(char *buf, unsigned long long num)
394#define PLUS 4 /* show plus */ 386#define PLUS 4 /* show plus */
395#define SPACE 8 /* space if plus */ 387#define SPACE 8 /* space if plus */
396#define LEFT 16 /* left justified */ 388#define LEFT 16 /* left justified */
397#define SMALL 32 /* Must be 32 == 0x20 */ 389#define SMALL 32 /* use lowercase in hex (must be 32 == 0x20) */
398#define SPECIAL 64 /* 0x */ 390#define SPECIAL 64 /* prefix hex with "0x", octal with "0" */
399 391
400enum format_type { 392enum format_type {
401 FORMAT_TYPE_NONE, /* Just a string part */ 393 FORMAT_TYPE_NONE, /* Just a string part */
@@ -421,16 +413,17 @@ enum format_type {
421}; 413};
422 414
423struct printf_spec { 415struct printf_spec {
424 enum format_type type; 416 u8 type; /* format_type enum */
425 int flags; /* flags to number() */ 417 u8 flags; /* flags to number() */
426 int field_width; /* width of output field */ 418 u8 base; /* number base, 8, 10 or 16 only */
427 int base; 419 u8 qualifier; /* number qualifier, one of 'hHlLtzZ' */
428 int precision; /* # of digits/chars */ 420 s16 field_width; /* width of output field */
429 int qualifier; 421 s16 precision; /* # of digits/chars */
430}; 422};
431 423
432static char *number(char *buf, char *end, unsigned long long num, 424static noinline_for_stack
433 struct printf_spec spec) 425char *number(char *buf, char *end, unsigned long long num,
426 struct printf_spec spec)
434{ 427{
435 /* we are called with base 8, 10 or 16, only, thus don't need "G..." */ 428 /* we are called with base 8, 10 or 16, only, thus don't need "G..." */
436 static const char digits[16] = "0123456789ABCDEF"; /* "GHIJKLMNOPQRSTUVWXYZ"; */ 429 static const char digits[16] = "0123456789ABCDEF"; /* "GHIJKLMNOPQRSTUVWXYZ"; */
@@ -448,9 +441,9 @@ static char *number(char *buf, char *end, unsigned long long num,
448 spec.flags &= ~ZEROPAD; 441 spec.flags &= ~ZEROPAD;
449 sign = 0; 442 sign = 0;
450 if (spec.flags & SIGN) { 443 if (spec.flags & SIGN) {
451 if ((signed long long) num < 0) { 444 if ((signed long long)num < 0) {
452 sign = '-'; 445 sign = '-';
453 num = - (signed long long) num; 446 num = -(signed long long)num;
454 spec.field_width--; 447 spec.field_width--;
455 } else if (spec.flags & PLUS) { 448 } else if (spec.flags & PLUS) {
456 sign = '+'; 449 sign = '+';
@@ -478,7 +471,9 @@ static char *number(char *buf, char *end, unsigned long long num,
478 else if (spec.base != 10) { /* 8 or 16 */ 471 else if (spec.base != 10) { /* 8 or 16 */
479 int mask = spec.base - 1; 472 int mask = spec.base - 1;
480 int shift = 3; 473 int shift = 3;
481 if (spec.base == 16) shift = 4; 474
475 if (spec.base == 16)
476 shift = 4;
482 do { 477 do {
483 tmp[i++] = (digits[((unsigned char)num) & mask] | locase); 478 tmp[i++] = (digits[((unsigned char)num) & mask] | locase);
484 num >>= shift; 479 num >>= shift;
@@ -493,7 +488,7 @@ static char *number(char *buf, char *end, unsigned long long num,
493 /* leading space padding */ 488 /* leading space padding */
494 spec.field_width -= spec.precision; 489 spec.field_width -= spec.precision;
495 if (!(spec.flags & (ZEROPAD+LEFT))) { 490 if (!(spec.flags & (ZEROPAD+LEFT))) {
496 while(--spec.field_width >= 0) { 491 while (--spec.field_width >= 0) {
497 if (buf < end) 492 if (buf < end)
498 *buf = ' '; 493 *buf = ' ';
499 ++buf; 494 ++buf;
@@ -543,15 +538,17 @@ static char *number(char *buf, char *end, unsigned long long num,
543 *buf = ' '; 538 *buf = ' ';
544 ++buf; 539 ++buf;
545 } 540 }
541
546 return buf; 542 return buf;
547} 543}
548 544
549static char *string(char *buf, char *end, char *s, struct printf_spec spec) 545static noinline_for_stack
546char *string(char *buf, char *end, const char *s, struct printf_spec spec)
550{ 547{
551 int len, i; 548 int len, i;
552 549
553 if ((unsigned long)s < PAGE_SIZE) 550 if ((unsigned long)s < PAGE_SIZE)
554 s = "<NULL>"; 551 s = "(null)";
555 552
556 len = strnlen(s, spec.precision); 553 len = strnlen(s, spec.precision);
557 554
@@ -572,11 +569,13 @@ static char *string(char *buf, char *end, char *s, struct printf_spec spec)
572 *buf = ' '; 569 *buf = ' ';
573 ++buf; 570 ++buf;
574 } 571 }
572
575 return buf; 573 return buf;
576} 574}
577 575
578static char *symbol_string(char *buf, char *end, void *ptr, 576static noinline_for_stack
579 struct printf_spec spec, char ext) 577char *symbol_string(char *buf, char *end, void *ptr,
578 struct printf_spec spec, char ext)
580{ 579{
581 unsigned long value = (unsigned long) ptr; 580 unsigned long value = (unsigned long) ptr;
582#ifdef CONFIG_KALLSYMS 581#ifdef CONFIG_KALLSYMS
@@ -585,75 +584,177 @@ static char *symbol_string(char *buf, char *end, void *ptr,
585 sprint_symbol(sym, value); 584 sprint_symbol(sym, value);
586 else 585 else
587 kallsyms_lookup(value, NULL, NULL, NULL, sym); 586 kallsyms_lookup(value, NULL, NULL, NULL, sym);
587
588 return string(buf, end, sym, spec); 588 return string(buf, end, sym, spec);
589#else 589#else
590 spec.field_width = 2*sizeof(void *); 590 spec.field_width = 2 * sizeof(void *);
591 spec.flags |= SPECIAL | SMALL | ZEROPAD; 591 spec.flags |= SPECIAL | SMALL | ZEROPAD;
592 spec.base = 16; 592 spec.base = 16;
593
593 return number(buf, end, value, spec); 594 return number(buf, end, value, spec);
594#endif 595#endif
595} 596}
596 597
597static char *resource_string(char *buf, char *end, struct resource *res, 598static noinline_for_stack
598 struct printf_spec spec) 599char *resource_string(char *buf, char *end, struct resource *res,
600 struct printf_spec spec, const char *fmt)
599{ 601{
600#ifndef IO_RSRC_PRINTK_SIZE 602#ifndef IO_RSRC_PRINTK_SIZE
601#define IO_RSRC_PRINTK_SIZE 4 603#define IO_RSRC_PRINTK_SIZE 6
602#endif 604#endif
603 605
604#ifndef MEM_RSRC_PRINTK_SIZE 606#ifndef MEM_RSRC_PRINTK_SIZE
605#define MEM_RSRC_PRINTK_SIZE 8 607#define MEM_RSRC_PRINTK_SIZE 10
606#endif 608#endif
607 struct printf_spec num_spec = { 609 static const struct printf_spec io_spec = {
608 .base = 16, 610 .base = 16,
611 .field_width = IO_RSRC_PRINTK_SIZE,
609 .precision = -1, 612 .precision = -1,
610 .flags = SPECIAL | SMALL | ZEROPAD, 613 .flags = SPECIAL | SMALL | ZEROPAD,
611 }; 614 };
612 /* room for the actual numbers, the two "0x", -, [, ] and the final zero */ 615 static const struct printf_spec mem_spec = {
613 char sym[4*sizeof(resource_size_t) + 8]; 616 .base = 16,
614 char *p = sym, *pend = sym + sizeof(sym); 617 .field_width = MEM_RSRC_PRINTK_SIZE,
615 int size = -1; 618 .precision = -1,
619 .flags = SPECIAL | SMALL | ZEROPAD,
620 };
621 static const struct printf_spec bus_spec = {
622 .base = 16,
623 .field_width = 2,
624 .precision = -1,
625 .flags = SMALL | ZEROPAD,
626 };
627 static const struct printf_spec dec_spec = {
628 .base = 10,
629 .precision = -1,
630 .flags = 0,
631 };
632 static const struct printf_spec str_spec = {
633 .field_width = -1,
634 .precision = 10,
635 .flags = LEFT,
636 };
637 static const struct printf_spec flag_spec = {
638 .base = 16,
639 .precision = -1,
640 .flags = SPECIAL | SMALL,
641 };
616 642
617 if (res->flags & IORESOURCE_IO) 643 /* 32-bit res (sizeof==4): 10 chars in dec, 10 in hex ("0x" + 8)
618 size = IO_RSRC_PRINTK_SIZE; 644 * 64-bit res (sizeof==8): 20 chars in dec, 18 in hex ("0x" + 16) */
619 else if (res->flags & IORESOURCE_MEM) 645#define RSRC_BUF_SIZE ((2 * sizeof(resource_size_t)) + 4)
620 size = MEM_RSRC_PRINTK_SIZE; 646#define FLAG_BUF_SIZE (2 * sizeof(res->flags))
647#define DECODED_BUF_SIZE sizeof("[mem - 64bit pref window disabled]")
648#define RAW_BUF_SIZE sizeof("[mem - flags 0x]")
649 char sym[max(2*RSRC_BUF_SIZE + DECODED_BUF_SIZE,
650 2*RSRC_BUF_SIZE + FLAG_BUF_SIZE + RAW_BUF_SIZE)];
651
652 char *p = sym, *pend = sym + sizeof(sym);
653 int decode = (fmt[0] == 'R') ? 1 : 0;
654 const struct printf_spec *specp;
621 655
622 *p++ = '['; 656 *p++ = '[';
623 num_spec.field_width = size; 657 if (res->flags & IORESOURCE_IO) {
624 p = number(p, pend, res->start, num_spec); 658 p = string(p, pend, "io ", str_spec);
625 *p++ = '-'; 659 specp = &io_spec;
626 p = number(p, pend, res->end, num_spec); 660 } else if (res->flags & IORESOURCE_MEM) {
661 p = string(p, pend, "mem ", str_spec);
662 specp = &mem_spec;
663 } else if (res->flags & IORESOURCE_IRQ) {
664 p = string(p, pend, "irq ", str_spec);
665 specp = &dec_spec;
666 } else if (res->flags & IORESOURCE_DMA) {
667 p = string(p, pend, "dma ", str_spec);
668 specp = &dec_spec;
669 } else if (res->flags & IORESOURCE_BUS) {
670 p = string(p, pend, "bus ", str_spec);
671 specp = &bus_spec;
672 } else {
673 p = string(p, pend, "??? ", str_spec);
674 specp = &mem_spec;
675 decode = 0;
676 }
677 p = number(p, pend, res->start, *specp);
678 if (res->start != res->end) {
679 *p++ = '-';
680 p = number(p, pend, res->end, *specp);
681 }
682 if (decode) {
683 if (res->flags & IORESOURCE_MEM_64)
684 p = string(p, pend, " 64bit", str_spec);
685 if (res->flags & IORESOURCE_PREFETCH)
686 p = string(p, pend, " pref", str_spec);
687 if (res->flags & IORESOURCE_WINDOW)
688 p = string(p, pend, " window", str_spec);
689 if (res->flags & IORESOURCE_DISABLED)
690 p = string(p, pend, " disabled", str_spec);
691 } else {
692 p = string(p, pend, " flags ", str_spec);
693 p = number(p, pend, res->flags, flag_spec);
694 }
627 *p++ = ']'; 695 *p++ = ']';
628 *p = 0; 696 *p = '\0';
629 697
630 return string(buf, end, sym, spec); 698 return string(buf, end, sym, spec);
631} 699}
632 700
633static char *mac_address_string(char *buf, char *end, u8 *addr, 701static noinline_for_stack
634 struct printf_spec spec, const char *fmt) 702char *mac_address_string(char *buf, char *end, u8 *addr,
703 struct printf_spec spec, const char *fmt)
635{ 704{
636 char mac_addr[sizeof("xx:xx:xx:xx:xx:xx")]; 705 char mac_addr[sizeof("xx:xx:xx:xx:xx:xx")];
637 char *p = mac_addr; 706 char *p = mac_addr;
638 int i; 707 int i;
708 char separator;
709
710 if (fmt[1] == 'F') { /* FDDI canonical format */
711 separator = '-';
712 } else {
713 separator = ':';
714 }
639 715
640 for (i = 0; i < 6; i++) { 716 for (i = 0; i < 6; i++) {
641 p = pack_hex_byte(p, addr[i]); 717 p = pack_hex_byte(p, addr[i]);
642 if (fmt[0] == 'M' && i != 5) 718 if (fmt[0] == 'M' && i != 5)
643 *p++ = ':'; 719 *p++ = separator;
644 } 720 }
645 *p = '\0'; 721 *p = '\0';
646 722
647 return string(buf, end, mac_addr, spec); 723 return string(buf, end, mac_addr, spec);
648} 724}
649 725
650static char *ip4_string(char *p, const u8 *addr, bool leading_zeros) 726static noinline_for_stack
727char *ip4_string(char *p, const u8 *addr, const char *fmt)
651{ 728{
652 int i; 729 int i;
653 730 bool leading_zeros = (fmt[0] == 'i');
731 int index;
732 int step;
733
734 switch (fmt[2]) {
735 case 'h':
736#ifdef __BIG_ENDIAN
737 index = 0;
738 step = 1;
739#else
740 index = 3;
741 step = -1;
742#endif
743 break;
744 case 'l':
745 index = 3;
746 step = -1;
747 break;
748 case 'n':
749 case 'b':
750 default:
751 index = 0;
752 step = 1;
753 break;
754 }
654 for (i = 0; i < 4; i++) { 755 for (i = 0; i < 4; i++) {
655 char temp[3]; /* hold each IP quad in reverse order */ 756 char temp[3]; /* hold each IP quad in reverse order */
656 int digits = put_dec_trunc(temp, addr[i]) - temp; 757 int digits = put_dec_trunc(temp, addr[index]) - temp;
657 if (leading_zeros) { 758 if (leading_zeros) {
658 if (digits < 3) 759 if (digits < 3)
659 *p++ = '0'; 760 *p++ = '0';
@@ -665,23 +766,22 @@ static char *ip4_string(char *p, const u8 *addr, bool leading_zeros)
665 *p++ = temp[digits]; 766 *p++ = temp[digits];
666 if (i < 3) 767 if (i < 3)
667 *p++ = '.'; 768 *p++ = '.';
769 index += step;
668 } 770 }
669
670 *p = '\0'; 771 *p = '\0';
772
671 return p; 773 return p;
672} 774}
673 775
674static char *ip6_compressed_string(char *p, const char *addr) 776static noinline_for_stack
777char *ip6_compressed_string(char *p, const char *addr)
675{ 778{
676 int i; 779 int i, j, range;
677 int j;
678 int range;
679 unsigned char zerolength[8]; 780 unsigned char zerolength[8];
680 int longest = 1; 781 int longest = 1;
681 int colonpos = -1; 782 int colonpos = -1;
682 u16 word; 783 u16 word;
683 u8 hi; 784 u8 hi, lo;
684 u8 lo;
685 bool needcolon = false; 785 bool needcolon = false;
686 bool useIPv4; 786 bool useIPv4;
687 struct in6_addr in6; 787 struct in6_addr in6;
@@ -735,8 +835,9 @@ static char *ip6_compressed_string(char *p, const char *addr)
735 p = pack_hex_byte(p, hi); 835 p = pack_hex_byte(p, hi);
736 else 836 else
737 *p++ = hex_asc_lo(hi); 837 *p++ = hex_asc_lo(hi);
838 p = pack_hex_byte(p, lo);
738 } 839 }
739 if (hi || lo > 0x0f) 840 else if (lo > 0x0f)
740 p = pack_hex_byte(p, lo); 841 p = pack_hex_byte(p, lo);
741 else 842 else
742 *p++ = hex_asc_lo(lo); 843 *p++ = hex_asc_lo(lo);
@@ -746,29 +847,32 @@ static char *ip6_compressed_string(char *p, const char *addr)
746 if (useIPv4) { 847 if (useIPv4) {
747 if (needcolon) 848 if (needcolon)
748 *p++ = ':'; 849 *p++ = ':';
749 p = ip4_string(p, &in6.s6_addr[12], false); 850 p = ip4_string(p, &in6.s6_addr[12], "I4");
750 } 851 }
751
752 *p = '\0'; 852 *p = '\0';
853
753 return p; 854 return p;
754} 855}
755 856
756static char *ip6_string(char *p, const char *addr, const char *fmt) 857static noinline_for_stack
858char *ip6_string(char *p, const char *addr, const char *fmt)
757{ 859{
758 int i; 860 int i;
861
759 for (i = 0; i < 8; i++) { 862 for (i = 0; i < 8; i++) {
760 p = pack_hex_byte(p, *addr++); 863 p = pack_hex_byte(p, *addr++);
761 p = pack_hex_byte(p, *addr++); 864 p = pack_hex_byte(p, *addr++);
762 if (fmt[0] == 'I' && i != 7) 865 if (fmt[0] == 'I' && i != 7)
763 *p++ = ':'; 866 *p++ = ':';
764 } 867 }
765
766 *p = '\0'; 868 *p = '\0';
869
767 return p; 870 return p;
768} 871}
769 872
770static char *ip6_addr_string(char *buf, char *end, const u8 *addr, 873static noinline_for_stack
771 struct printf_spec spec, const char *fmt) 874char *ip6_addr_string(char *buf, char *end, const u8 *addr,
875 struct printf_spec spec, const char *fmt)
772{ 876{
773 char ip6_addr[sizeof("xxxx:xxxx:xxxx:xxxx:xxxx:xxxx:255.255.255.255")]; 877 char ip6_addr[sizeof("xxxx:xxxx:xxxx:xxxx:xxxx:xxxx:255.255.255.255")];
774 878
@@ -780,16 +884,64 @@ static char *ip6_addr_string(char *buf, char *end, const u8 *addr,
780 return string(buf, end, ip6_addr, spec); 884 return string(buf, end, ip6_addr, spec);
781} 885}
782 886
783static char *ip4_addr_string(char *buf, char *end, const u8 *addr, 887static noinline_for_stack
784 struct printf_spec spec, const char *fmt) 888char *ip4_addr_string(char *buf, char *end, const u8 *addr,
889 struct printf_spec spec, const char *fmt)
785{ 890{
786 char ip4_addr[sizeof("255.255.255.255")]; 891 char ip4_addr[sizeof("255.255.255.255")];
787 892
788 ip4_string(ip4_addr, addr, fmt[0] == 'i'); 893 ip4_string(ip4_addr, addr, fmt);
789 894
790 return string(buf, end, ip4_addr, spec); 895 return string(buf, end, ip4_addr, spec);
791} 896}
792 897
898static noinline_for_stack
899char *uuid_string(char *buf, char *end, const u8 *addr,
900 struct printf_spec spec, const char *fmt)
901{
902 char uuid[sizeof("xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx")];
903 char *p = uuid;
904 int i;
905 static const u8 be[16] = {0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15};
906 static const u8 le[16] = {3,2,1,0,5,4,7,6,8,9,10,11,12,13,14,15};
907 const u8 *index = be;
908 bool uc = false;
909
910 switch (*(++fmt)) {
911 case 'L':
912 uc = true; /* fall-through */
913 case 'l':
914 index = le;
915 break;
916 case 'B':
917 uc = true;
918 break;
919 }
920
921 for (i = 0; i < 16; i++) {
922 p = pack_hex_byte(p, addr[index[i]]);
923 switch (i) {
924 case 3:
925 case 5:
926 case 7:
927 case 9:
928 *p++ = '-';
929 break;
930 }
931 }
932
933 *p = 0;
934
935 if (uc) {
936 p = uuid;
937 do {
938 *p = toupper(*p);
939 } while (*(++p));
940 }
941
942 return string(buf, end, uuid, spec);
943}
944
793/* 945/*
794 * Show a '%p' thing. A kernel extension is that the '%p' is followed 946 * Show a '%p' thing. A kernel extension is that the '%p' is followed
795 * by an extra set of alphanumeric characters that are extended format 947 * by an extra set of alphanumeric characters that are extended format
@@ -801,25 +953,46 @@ static char *ip4_addr_string(char *buf, char *end, const u8 *addr,
801 * - 'f' For simple symbolic function names without offset 953 * - 'f' For simple symbolic function names without offset
802 * - 'S' For symbolic direct pointers with offset 954 * - 'S' For symbolic direct pointers with offset
803 * - 's' For symbolic direct pointers without offset 955 * - 's' For symbolic direct pointers without offset
804 * - 'R' For a struct resource pointer, it prints the range of 956 * - 'R' For decoded struct resource, e.g., [mem 0x0-0x1f 64bit pref]
805 * addresses (not the name nor the flags) 957 * - 'r' For raw struct resource, e.g., [mem 0x0-0x1f flags 0x201]
806 * - 'M' For a 6-byte MAC address, it prints the address in the 958 * - 'M' For a 6-byte MAC address, it prints the address in the
807 * usual colon-separated hex notation 959 * usual colon-separated hex notation
808 * - 'm' For a 6-byte MAC address, it prints the hex address without colons 960 * - 'm' For a 6-byte MAC address, it prints the hex address without colons
961 * - 'MF' For a 6-byte MAC FDDI address, it prints the address
962 * with a dash-separated hex notation
809 * - 'I' [46] for IPv4/IPv6 addresses printed in the usual way 963 * - 'I' [46] for IPv4/IPv6 addresses printed in the usual way
810 * IPv4 uses dot-separated decimal without leading 0's (1.2.3.4) 964 * IPv4 uses dot-separated decimal without leading 0's (1.2.3.4)
811 * IPv6 uses colon separated network-order 16 bit hex with leading 0's 965 * IPv6 uses colon separated network-order 16 bit hex with leading 0's
812 * - 'i' [46] for 'raw' IPv4/IPv6 addresses 966 * - 'i' [46] for 'raw' IPv4/IPv6 addresses
813 * IPv6 omits the colons (01020304...0f) 967 * IPv6 omits the colons (01020304...0f)
814 * IPv4 uses dot-separated decimal with leading 0's (010.123.045.006) 968 * IPv4 uses dot-separated decimal with leading 0's (010.123.045.006)
969 * - '[Ii]4[hnbl]' IPv4 addresses in host, network, big or little endian order
815 * - 'I6c' for IPv6 addresses printed as specified by 970 * - 'I6c' for IPv6 addresses printed as specified by
816 * http://www.ietf.org/id/draft-kawamura-ipv6-text-representation-03.txt 971 * http://tools.ietf.org/html/draft-ietf-6man-text-addr-representation-00
972 * - 'U' For a 16 byte UUID/GUID, it prints the UUID/GUID in the form
973 * "xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx"
974 * Options for %pU are:
975 * b big endian lower case hex (default)
976 * B big endian UPPER case hex
977 * l little endian lower case hex
978 * L little endian UPPER case hex
979 * big endian output byte order is:
980 * [0][1][2][3]-[4][5]-[6][7]-[8][9]-[10][11][12][13][14][15]
981 * little endian output byte order is:
982 * [3][2][1][0]-[5][4]-[7][6]-[8][9]-[10][11][12][13][14][15]
983 * - 'V' For a struct va_format which contains a format string * and va_list *,
984 * call vsnprintf(->format, *->va_list).
985 * Implements a "recursive vsnprintf".
986 * Do not use this feature without some mechanism to verify the
987 * correctness of the format string and va_list arguments.
988 *
817 * Note: The difference between 'S' and 'F' is that on ia64 and ppc64 989 * Note: The difference between 'S' and 'F' is that on ia64 and ppc64
818 * function pointers are really function descriptors, which contain a 990 * function pointers are really function descriptors, which contain a
819 * pointer to the real address. 991 * pointer to the real address.
820 */ 992 */
821static char *pointer(const char *fmt, char *buf, char *end, void *ptr, 993static noinline_for_stack
822 struct printf_spec spec) 994char *pointer(const char *fmt, char *buf, char *end, void *ptr,
995 struct printf_spec spec)
823{ 996{
824 if (!ptr) 997 if (!ptr)
825 return string(buf, end, "(null)", spec); 998 return string(buf, end, "(null)", spec);
@@ -828,14 +1001,16 @@ static char *pointer(const char *fmt, char *buf, char *end, void *ptr,
828 case 'F': 1001 case 'F':
829 case 'f': 1002 case 'f':
830 ptr = dereference_function_descriptor(ptr); 1003 ptr = dereference_function_descriptor(ptr);
831 case 's':
832 /* Fallthrough */ 1004 /* Fallthrough */
833 case 'S': 1005 case 'S':
1006 case 's':
834 return symbol_string(buf, end, ptr, spec, *fmt); 1007 return symbol_string(buf, end, ptr, spec, *fmt);
835 case 'R': 1008 case 'R':
836 return resource_string(buf, end, ptr, spec); 1009 case 'r':
1010 return resource_string(buf, end, ptr, spec, fmt);
837 case 'M': /* Colon separated: 00:01:02:03:04:05 */ 1011 case 'M': /* Colon separated: 00:01:02:03:04:05 */
838 case 'm': /* Contiguous: 000102030405 */ 1012 case 'm': /* Contiguous: 000102030405 */
1013 /* [mM]F (FDDI, bit reversed) */
839 return mac_address_string(buf, end, ptr, spec, fmt); 1014 return mac_address_string(buf, end, ptr, spec, fmt);
840 case 'I': /* Formatted IP supported 1015 case 'I': /* Formatted IP supported
841 * 4: 1.2.3.4 1016 * 4: 1.2.3.4
@@ -853,6 +1028,12 @@ static char *pointer(const char *fmt, char *buf, char *end, void *ptr,
853 return ip4_addr_string(buf, end, ptr, spec, fmt); 1028 return ip4_addr_string(buf, end, ptr, spec, fmt);
854 } 1029 }
855 break; 1030 break;
1031 case 'U':
1032 return uuid_string(buf, end, ptr, spec, fmt);
1033 case 'V':
1034 return buf + vsnprintf(buf, end - buf,
1035 ((struct va_format *)ptr)->fmt,
1036 *(((struct va_format *)ptr)->va));
856 } 1037 }
857 spec.flags |= SMALL; 1038 spec.flags |= SMALL;
858 if (spec.field_width == -1) { 1039 if (spec.field_width == -1) {
@@ -884,7 +1065,8 @@ static char *pointer(const char *fmt, char *buf, char *end, void *ptr,
884 * @precision: precision of a number 1065 * @precision: precision of a number
885 * @qualifier: qualifier of a number (long, size_t, ...) 1066 * @qualifier: qualifier of a number (long, size_t, ...)
886 */ 1067 */
887static int format_decode(const char *fmt, struct printf_spec *spec) 1068static noinline_for_stack
1069int format_decode(const char *fmt, struct printf_spec *spec)
888{ 1070{
889 const char *start = fmt; 1071 const char *start = fmt;
890 1072
@@ -970,8 +1152,8 @@ precision:
970qualifier: 1152qualifier:
971 /* get the conversion qualifier */ 1153 /* get the conversion qualifier */
972 spec->qualifier = -1; 1154 spec->qualifier = -1;
973 if (*fmt == 'h' || *fmt == 'l' || *fmt == 'L' || 1155 if (*fmt == 'h' || TOLOWER(*fmt) == 'l' ||
974 *fmt == 'Z' || *fmt == 'z' || *fmt == 't') { 1156 TOLOWER(*fmt) == 'z' || *fmt == 't') {
975 spec->qualifier = *fmt++; 1157 spec->qualifier = *fmt++;
976 if (unlikely(spec->qualifier == *fmt)) { 1158 if (unlikely(spec->qualifier == *fmt)) {
977 if (spec->qualifier == 'l') { 1159 if (spec->qualifier == 'l') {
@@ -1038,7 +1220,7 @@ qualifier:
1038 spec->type = FORMAT_TYPE_LONG; 1220 spec->type = FORMAT_TYPE_LONG;
1039 else 1221 else
1040 spec->type = FORMAT_TYPE_ULONG; 1222 spec->type = FORMAT_TYPE_ULONG;
1041 } else if (spec->qualifier == 'Z' || spec->qualifier == 'z') { 1223 } else if (TOLOWER(spec->qualifier) == 'z') {
1042 spec->type = FORMAT_TYPE_SIZE_T; 1224 spec->type = FORMAT_TYPE_SIZE_T;
1043 } else if (spec->qualifier == 't') { 1225 } else if (spec->qualifier == 't') {
1044 spec->type = FORMAT_TYPE_PTRDIFF; 1226 spec->type = FORMAT_TYPE_PTRDIFF;
@@ -1074,7 +1256,18 @@ qualifier:
1074 * %ps output the name of a text symbol without offset 1256 * %ps output the name of a text symbol without offset
1075 * %pF output the name of a function pointer with its offset 1257 * %pF output the name of a function pointer with its offset
1076 * %pf output the name of a function pointer without its offset 1258 * %pf output the name of a function pointer without its offset
1077 * %pR output the address range in a struct resource 1259 * %pR output the address range in a struct resource with decoded flags
1260 * %pr output the address range in a struct resource with raw flags
1261 * %pM output a 6-byte MAC address with colons
1262 * %pm output a 6-byte MAC address without colons
1263 * %pI4 print an IPv4 address without leading zeros
1264 * %pi4 print an IPv4 address with leading zeros
1265 * %pI6 print an IPv6 address with colons
1266 * %pi6 print an IPv6 address without colons
1267 * %pI6c print an IPv6 address as specified by
1268 * http://tools.ietf.org/html/draft-ietf-6man-text-addr-representation-00
1269 * %pU[bBlL] print a UUID/GUID in big or little endian using lower or upper
1270 * case.
1078 * %n is ignored 1271 * %n is ignored
1079 * 1272 *
1080 * The return value is the number of characters which would 1273 * The return value is the number of characters which would
@@ -1091,8 +1284,7 @@ qualifier:
1091int vsnprintf(char *buf, size_t size, const char *fmt, va_list args) 1284int vsnprintf(char *buf, size_t size, const char *fmt, va_list args)
1092{ 1285{
1093 unsigned long long num; 1286 unsigned long long num;
1094 char *str, *end, c; 1287 char *str, *end;
1095 int read;
1096 struct printf_spec spec = {0}; 1288 struct printf_spec spec = {0};
1097 1289
1098 /* Reject out-of-range values early. Large positive sizes are 1290 /* Reject out-of-range values early. Large positive sizes are
@@ -1111,8 +1303,7 @@ int vsnprintf(char *buf, size_t size, const char *fmt, va_list args)
1111 1303
1112 while (*fmt) { 1304 while (*fmt) {
1113 const char *old_fmt = fmt; 1305 const char *old_fmt = fmt;
1114 1306 int read = format_decode(fmt, &spec);
1115 read = format_decode(fmt, &spec);
1116 1307
1117 fmt += read; 1308 fmt += read;
1118 1309
@@ -1136,7 +1327,9 @@ int vsnprintf(char *buf, size_t size, const char *fmt, va_list args)
1136 spec.precision = va_arg(args, int); 1327 spec.precision = va_arg(args, int);
1137 break; 1328 break;
1138 1329
1139 case FORMAT_TYPE_CHAR: 1330 case FORMAT_TYPE_CHAR: {
1331 char c;
1332
1140 if (!(spec.flags & LEFT)) { 1333 if (!(spec.flags & LEFT)) {
1141 while (--spec.field_width > 0) { 1334 while (--spec.field_width > 0) {
1142 if (str < end) 1335 if (str < end)
@@ -1155,6 +1348,7 @@ int vsnprintf(char *buf, size_t size, const char *fmt, va_list args)
1155 ++str; 1348 ++str;
1156 } 1349 }
1157 break; 1350 break;
1351 }
1158 1352
1159 case FORMAT_TYPE_STR: 1353 case FORMAT_TYPE_STR:
1160 str = string(str, end, va_arg(args, char *), spec); 1354 str = string(str, end, va_arg(args, char *), spec);
@@ -1180,13 +1374,12 @@ int vsnprintf(char *buf, size_t size, const char *fmt, va_list args)
1180 break; 1374 break;
1181 1375
1182 case FORMAT_TYPE_NRCHARS: { 1376 case FORMAT_TYPE_NRCHARS: {
1183 int qualifier = spec.qualifier; 1377 u8 qualifier = spec.qualifier;
1184 1378
1185 if (qualifier == 'l') { 1379 if (qualifier == 'l') {
1186 long *ip = va_arg(args, long *); 1380 long *ip = va_arg(args, long *);
1187 *ip = (str - buf); 1381 *ip = (str - buf);
1188 } else if (qualifier == 'Z' || 1382 } else if (TOLOWER(qualifier) == 'z') {
1189 qualifier == 'z') {
1190 size_t *ip = va_arg(args, size_t *); 1383 size_t *ip = va_arg(args, size_t *);
1191 *ip = (str - buf); 1384 *ip = (str - buf);
1192 } else { 1385 } else {
@@ -1269,7 +1462,8 @@ int vscnprintf(char *buf, size_t size, const char *fmt, va_list args)
1269{ 1462{
1270 int i; 1463 int i;
1271 1464
1272 i=vsnprintf(buf,size,fmt,args); 1465 i = vsnprintf(buf, size, fmt, args);
1466
1273 return (i >= size) ? (size - 1) : i; 1467 return (i >= size) ? (size - 1) : i;
1274} 1468}
1275EXPORT_SYMBOL(vscnprintf); 1469EXPORT_SYMBOL(vscnprintf);
@@ -1288,14 +1482,15 @@ EXPORT_SYMBOL(vscnprintf);
1288 * 1482 *
1289 * See the vsnprintf() documentation for format string extensions over C99. 1483 * See the vsnprintf() documentation for format string extensions over C99.
1290 */ 1484 */
1291int snprintf(char * buf, size_t size, const char *fmt, ...) 1485int snprintf(char *buf, size_t size, const char *fmt, ...)
1292{ 1486{
1293 va_list args; 1487 va_list args;
1294 int i; 1488 int i;
1295 1489
1296 va_start(args, fmt); 1490 va_start(args, fmt);
1297 i=vsnprintf(buf,size,fmt,args); 1491 i = vsnprintf(buf, size, fmt, args);
1298 va_end(args); 1492 va_end(args);
1493
1299 return i; 1494 return i;
1300} 1495}
1301EXPORT_SYMBOL(snprintf); 1496EXPORT_SYMBOL(snprintf);
@@ -1311,7 +1506,7 @@ EXPORT_SYMBOL(snprintf);
1311 * the trailing '\0'. If @size is <= 0 the function returns 0. 1506 * the trailing '\0'. If @size is <= 0 the function returns 0.
1312 */ 1507 */
1313 1508
1314int scnprintf(char * buf, size_t size, const char *fmt, ...) 1509int scnprintf(char *buf, size_t size, const char *fmt, ...)
1315{ 1510{
1316 va_list args; 1511 va_list args;
1317 int i; 1512 int i;
@@ -1319,6 +1514,7 @@ int scnprintf(char * buf, size_t size, const char *fmt, ...)
1319 va_start(args, fmt); 1514 va_start(args, fmt);
1320 i = vsnprintf(buf, size, fmt, args); 1515 i = vsnprintf(buf, size, fmt, args);
1321 va_end(args); 1516 va_end(args);
1517
1322 return (i >= size) ? (size - 1) : i; 1518 return (i >= size) ? (size - 1) : i;
1323} 1519}
1324EXPORT_SYMBOL(scnprintf); 1520EXPORT_SYMBOL(scnprintf);
@@ -1356,14 +1552,15 @@ EXPORT_SYMBOL(vsprintf);
1356 * 1552 *
1357 * See the vsnprintf() documentation for format string extensions over C99. 1553 * See the vsnprintf() documentation for format string extensions over C99.
1358 */ 1554 */
1359int sprintf(char * buf, const char *fmt, ...) 1555int sprintf(char *buf, const char *fmt, ...)
1360{ 1556{
1361 va_list args; 1557 va_list args;
1362 int i; 1558 int i;
1363 1559
1364 va_start(args, fmt); 1560 va_start(args, fmt);
1365 i=vsnprintf(buf, INT_MAX, fmt, args); 1561 i = vsnprintf(buf, INT_MAX, fmt, args);
1366 va_end(args); 1562 va_end(args);
1563
1367 return i; 1564 return i;
1368} 1565}
1369EXPORT_SYMBOL(sprintf); 1566EXPORT_SYMBOL(sprintf);
@@ -1396,7 +1593,6 @@ int vbin_printf(u32 *bin_buf, size_t size, const char *fmt, va_list args)
1396{ 1593{
1397 struct printf_spec spec = {0}; 1594 struct printf_spec spec = {0};
1398 char *str, *end; 1595 char *str, *end;
1399 int read;
1400 1596
1401 str = (char *)bin_buf; 1597 str = (char *)bin_buf;
1402 end = (char *)(bin_buf + size); 1598 end = (char *)(bin_buf + size);
@@ -1421,14 +1617,15 @@ do { \
1421 str += sizeof(type); \ 1617 str += sizeof(type); \
1422} while (0) 1618} while (0)
1423 1619
1424
1425 while (*fmt) { 1620 while (*fmt) {
1426 read = format_decode(fmt, &spec); 1621 int read = format_decode(fmt, &spec);
1427 1622
1428 fmt += read; 1623 fmt += read;
1429 1624
1430 switch (spec.type) { 1625 switch (spec.type) {
1431 case FORMAT_TYPE_NONE: 1626 case FORMAT_TYPE_NONE:
1627 case FORMAT_TYPE_INVALID:
1628 case FORMAT_TYPE_PERCENT_CHAR:
1432 break; 1629 break;
1433 1630
1434 case FORMAT_TYPE_WIDTH: 1631 case FORMAT_TYPE_WIDTH:
@@ -1443,13 +1640,14 @@ do { \
1443 case FORMAT_TYPE_STR: { 1640 case FORMAT_TYPE_STR: {
1444 const char *save_str = va_arg(args, char *); 1641 const char *save_str = va_arg(args, char *);
1445 size_t len; 1642 size_t len;
1643
1446 if ((unsigned long)save_str > (unsigned long)-PAGE_SIZE 1644 if ((unsigned long)save_str > (unsigned long)-PAGE_SIZE
1447 || (unsigned long)save_str < PAGE_SIZE) 1645 || (unsigned long)save_str < PAGE_SIZE)
1448 save_str = "<NULL>"; 1646 save_str = "(null)";
1449 len = strlen(save_str); 1647 len = strlen(save_str) + 1;
1450 if (str + len + 1 < end) 1648 if (str + len < end)
1451 memcpy(str, save_str, len + 1); 1649 memcpy(str, save_str, len);
1452 str += len + 1; 1650 str += len;
1453 break; 1651 break;
1454 } 1652 }
1455 1653
@@ -1460,19 +1658,13 @@ do { \
1460 fmt++; 1658 fmt++;
1461 break; 1659 break;
1462 1660
1463 case FORMAT_TYPE_PERCENT_CHAR:
1464 break;
1465
1466 case FORMAT_TYPE_INVALID:
1467 break;
1468
1469 case FORMAT_TYPE_NRCHARS: { 1661 case FORMAT_TYPE_NRCHARS: {
1470 /* skip %n 's argument */ 1662 /* skip %n 's argument */
1471 int qualifier = spec.qualifier; 1663 u8 qualifier = spec.qualifier;
1472 void *skip_arg; 1664 void *skip_arg;
1473 if (qualifier == 'l') 1665 if (qualifier == 'l')
1474 skip_arg = va_arg(args, long *); 1666 skip_arg = va_arg(args, long *);
1475 else if (qualifier == 'Z' || qualifier == 'z') 1667 else if (TOLOWER(qualifier) == 'z')
1476 skip_arg = va_arg(args, size_t *); 1668 skip_arg = va_arg(args, size_t *);
1477 else 1669 else
1478 skip_arg = va_arg(args, int *); 1670 skip_arg = va_arg(args, int *);
@@ -1508,8 +1700,8 @@ do { \
1508 } 1700 }
1509 } 1701 }
1510 } 1702 }
1511 return (u32 *)(PTR_ALIGN(str, sizeof(u32))) - bin_buf;
1512 1703
1704 return (u32 *)(PTR_ALIGN(str, sizeof(u32))) - bin_buf;
1513#undef save_arg 1705#undef save_arg
1514} 1706}
1515EXPORT_SYMBOL_GPL(vbin_printf); 1707EXPORT_SYMBOL_GPL(vbin_printf);
@@ -1538,11 +1730,9 @@ EXPORT_SYMBOL_GPL(vbin_printf);
1538 */ 1730 */
1539int bstr_printf(char *buf, size_t size, const char *fmt, const u32 *bin_buf) 1731int bstr_printf(char *buf, size_t size, const char *fmt, const u32 *bin_buf)
1540{ 1732{
1541 unsigned long long num;
1542 char *str, *end, c;
1543 const char *args = (const char *)bin_buf;
1544
1545 struct printf_spec spec = {0}; 1733 struct printf_spec spec = {0};
1734 char *str, *end;
1735 const char *args = (const char *)bin_buf;
1546 1736
1547 if (WARN_ON_ONCE((int) size < 0)) 1737 if (WARN_ON_ONCE((int) size < 0))
1548 return 0; 1738 return 0;
@@ -1572,10 +1762,8 @@ int bstr_printf(char *buf, size_t size, const char *fmt, const u32 *bin_buf)
1572 } 1762 }
1573 1763
1574 while (*fmt) { 1764 while (*fmt) {
1575 int read;
1576 const char *old_fmt = fmt; 1765 const char *old_fmt = fmt;
1577 1766 int read = format_decode(fmt, &spec);
1578 read = format_decode(fmt, &spec);
1579 1767
1580 fmt += read; 1768 fmt += read;
1581 1769
@@ -1599,7 +1787,9 @@ int bstr_printf(char *buf, size_t size, const char *fmt, const u32 *bin_buf)
1599 spec.precision = get_arg(int); 1787 spec.precision = get_arg(int);
1600 break; 1788 break;
1601 1789
1602 case FORMAT_TYPE_CHAR: 1790 case FORMAT_TYPE_CHAR: {
1791 char c;
1792
1603 if (!(spec.flags & LEFT)) { 1793 if (!(spec.flags & LEFT)) {
1604 while (--spec.field_width > 0) { 1794 while (--spec.field_width > 0) {
1605 if (str < end) 1795 if (str < end)
@@ -1617,11 +1807,11 @@ int bstr_printf(char *buf, size_t size, const char *fmt, const u32 *bin_buf)
1617 ++str; 1807 ++str;
1618 } 1808 }
1619 break; 1809 break;
1810 }
1620 1811
1621 case FORMAT_TYPE_STR: { 1812 case FORMAT_TYPE_STR: {
1622 const char *str_arg = args; 1813 const char *str_arg = args;
1623 size_t len = strlen(str_arg); 1814 args += strlen(str_arg) + 1;
1624 args += len + 1;
1625 str = string(str, end, (char *)str_arg, spec); 1815 str = string(str, end, (char *)str_arg, spec);
1626 break; 1816 break;
1627 } 1817 }
@@ -1633,11 +1823,6 @@ int bstr_printf(char *buf, size_t size, const char *fmt, const u32 *bin_buf)
1633 break; 1823 break;
1634 1824
1635 case FORMAT_TYPE_PERCENT_CHAR: 1825 case FORMAT_TYPE_PERCENT_CHAR:
1636 if (str < end)
1637 *str = '%';
1638 ++str;
1639 break;
1640
1641 case FORMAT_TYPE_INVALID: 1826 case FORMAT_TYPE_INVALID:
1642 if (str < end) 1827 if (str < end)
1643 *str = '%'; 1828 *str = '%';
@@ -1648,15 +1833,15 @@ int bstr_printf(char *buf, size_t size, const char *fmt, const u32 *bin_buf)
1648 /* skip */ 1833 /* skip */
1649 break; 1834 break;
1650 1835
1651 default: 1836 default: {
1837 unsigned long long num;
1838
1652 switch (spec.type) { 1839 switch (spec.type) {
1653 1840
1654 case FORMAT_TYPE_LONG_LONG: 1841 case FORMAT_TYPE_LONG_LONG:
1655 num = get_arg(long long); 1842 num = get_arg(long long);
1656 break; 1843 break;
1657 case FORMAT_TYPE_ULONG: 1844 case FORMAT_TYPE_ULONG:
1658 num = get_arg(unsigned long);
1659 break;
1660 case FORMAT_TYPE_LONG: 1845 case FORMAT_TYPE_LONG:
1661 num = get_arg(unsigned long); 1846 num = get_arg(unsigned long);
1662 break; 1847 break;
@@ -1686,8 +1871,9 @@ int bstr_printf(char *buf, size_t size, const char *fmt, const u32 *bin_buf)
1686 } 1871 }
1687 1872
1688 str = number(str, end, num, spec); 1873 str = number(str, end, num, spec);
1689 } 1874 } /* default: */
1690 } 1875 } /* switch(spec.type) */
1876 } /* while(*fmt) */
1691 1877
1692 if (size > 0) { 1878 if (size > 0) {
1693 if (str < end) 1879 if (str < end)
@@ -1721,6 +1907,7 @@ int bprintf(u32 *bin_buf, size_t size, const char *fmt, ...)
1721 va_start(args, fmt); 1907 va_start(args, fmt);
1722 ret = vbin_printf(bin_buf, size, fmt, args); 1908 ret = vbin_printf(bin_buf, size, fmt, args);
1723 va_end(args); 1909 va_end(args);
1910
1724 return ret; 1911 return ret;
1725} 1912}
1726EXPORT_SYMBOL_GPL(bprintf); 1913EXPORT_SYMBOL_GPL(bprintf);
@@ -1733,27 +1920,25 @@ EXPORT_SYMBOL_GPL(bprintf);
1733 * @fmt: format of buffer 1920 * @fmt: format of buffer
1734 * @args: arguments 1921 * @args: arguments
1735 */ 1922 */
1736int vsscanf(const char * buf, const char * fmt, va_list args) 1923int vsscanf(const char *buf, const char *fmt, va_list args)
1737{ 1924{
1738 const char *str = buf; 1925 const char *str = buf;
1739 char *next; 1926 char *next;
1740 char digit; 1927 char digit;
1741 int num = 0; 1928 int num = 0;
1742 int qualifier; 1929 u8 qualifier;
1743 int base; 1930 u8 base;
1744 int field_width; 1931 s16 field_width;
1745 int is_sign = 0; 1932 bool is_sign;
1746 1933
1747 while(*fmt && *str) { 1934 while (*fmt && *str) {
1748 /* skip any white space in format */ 1935 /* skip any white space in format */
1749 /* white space in format matchs any amount of 1936 /* white space in format matchs any amount of
1750 * white space, including none, in the input. 1937 * white space, including none, in the input.
1751 */ 1938 */
1752 if (isspace(*fmt)) { 1939 if (isspace(*fmt)) {
1753 while (isspace(*fmt)) 1940 fmt = skip_spaces(++fmt);
1754 ++fmt; 1941 str = skip_spaces(str);
1755 while (isspace(*str))
1756 ++str;
1757 } 1942 }
1758 1943
1759 /* anything that is not a conversion must match exactly */ 1944 /* anything that is not a conversion must match exactly */
@@ -1766,12 +1951,12 @@ int vsscanf(const char * buf, const char * fmt, va_list args)
1766 if (!*fmt) 1951 if (!*fmt)
1767 break; 1952 break;
1768 ++fmt; 1953 ++fmt;
1769 1954
1770 /* skip this conversion. 1955 /* skip this conversion.
1771 * advance both strings to next white space 1956 * advance both strings to next white space
1772 */ 1957 */
1773 if (*fmt == '*') { 1958 if (*fmt == '*') {
1774 while (!isspace(*fmt) && *fmt) 1959 while (!isspace(*fmt) && *fmt != '%' && *fmt)
1775 fmt++; 1960 fmt++;
1776 while (!isspace(*str) && *str) 1961 while (!isspace(*str) && *str)
1777 str++; 1962 str++;
@@ -1785,8 +1970,8 @@ int vsscanf(const char * buf, const char * fmt, va_list args)
1785 1970
1786 /* get conversion qualifier */ 1971 /* get conversion qualifier */
1787 qualifier = -1; 1972 qualifier = -1;
1788 if (*fmt == 'h' || *fmt == 'l' || *fmt == 'L' || 1973 if (*fmt == 'h' || TOLOWER(*fmt) == 'l' ||
1789 *fmt == 'Z' || *fmt == 'z') { 1974 TOLOWER(*fmt) == 'z') {
1790 qualifier = *fmt++; 1975 qualifier = *fmt++;
1791 if (unlikely(qualifier == *fmt)) { 1976 if (unlikely(qualifier == *fmt)) {
1792 if (qualifier == 'h') { 1977 if (qualifier == 'h') {
@@ -1798,16 +1983,17 @@ int vsscanf(const char * buf, const char * fmt, va_list args)
1798 } 1983 }
1799 } 1984 }
1800 } 1985 }
1801 base = 10;
1802 is_sign = 0;
1803 1986
1804 if (!*fmt || !*str) 1987 if (!*fmt || !*str)
1805 break; 1988 break;
1806 1989
1807 switch(*fmt++) { 1990 base = 10;
1991 is_sign = 0;
1992
1993 switch (*fmt++) {
1808 case 'c': 1994 case 'c':
1809 { 1995 {
1810 char *s = (char *) va_arg(args,char*); 1996 char *s = (char *)va_arg(args, char*);
1811 if (field_width == -1) 1997 if (field_width == -1)
1812 field_width = 1; 1998 field_width = 1;
1813 do { 1999 do {
@@ -1818,17 +2004,15 @@ int vsscanf(const char * buf, const char * fmt, va_list args)
1818 continue; 2004 continue;
1819 case 's': 2005 case 's':
1820 { 2006 {
1821 char *s = (char *) va_arg(args, char *); 2007 char *s = (char *)va_arg(args, char *);
1822 if(field_width == -1) 2008 if (field_width == -1)
1823 field_width = INT_MAX; 2009 field_width = SHRT_MAX;
1824 /* first, skip leading white space in buffer */ 2010 /* first, skip leading white space in buffer */
1825 while (isspace(*str)) 2011 str = skip_spaces(str);
1826 str++;
1827 2012
1828 /* now copy until next white space */ 2013 /* now copy until next white space */
1829 while (*str && !isspace(*str) && field_width--) { 2014 while (*str && !isspace(*str) && field_width--)
1830 *s++ = *str++; 2015 *s++ = *str++;
1831 }
1832 *s = '\0'; 2016 *s = '\0';
1833 num++; 2017 num++;
1834 } 2018 }
@@ -1836,7 +2020,7 @@ int vsscanf(const char * buf, const char * fmt, va_list args)
1836 case 'n': 2020 case 'n':
1837 /* return number of characters read so far */ 2021 /* return number of characters read so far */
1838 { 2022 {
1839 int *i = (int *)va_arg(args,int*); 2023 int *i = (int *)va_arg(args, int*);
1840 *i = str - buf; 2024 *i = str - buf;
1841 } 2025 }
1842 continue; 2026 continue;
@@ -1848,14 +2032,14 @@ int vsscanf(const char * buf, const char * fmt, va_list args)
1848 base = 16; 2032 base = 16;
1849 break; 2033 break;
1850 case 'i': 2034 case 'i':
1851 base = 0; 2035 base = 0;
1852 case 'd': 2036 case 'd':
1853 is_sign = 1; 2037 is_sign = 1;
1854 case 'u': 2038 case 'u':
1855 break; 2039 break;
1856 case '%': 2040 case '%':
1857 /* looking for '%' in str */ 2041 /* looking for '%' in str */
1858 if (*str++ != '%') 2042 if (*str++ != '%')
1859 return num; 2043 return num;
1860 continue; 2044 continue;
1861 default: 2045 default:
@@ -1866,71 +2050,70 @@ int vsscanf(const char * buf, const char * fmt, va_list args)
1866 /* have some sort of integer conversion. 2050 /* have some sort of integer conversion.
1867 * first, skip white space in buffer. 2051 * first, skip white space in buffer.
1868 */ 2052 */
1869 while (isspace(*str)) 2053 str = skip_spaces(str);
1870 str++;
1871 2054
1872 digit = *str; 2055 digit = *str;
1873 if (is_sign && digit == '-') 2056 if (is_sign && digit == '-')
1874 digit = *(str + 1); 2057 digit = *(str + 1);
1875 2058
1876 if (!digit 2059 if (!digit
1877 || (base == 16 && !isxdigit(digit)) 2060 || (base == 16 && !isxdigit(digit))
1878 || (base == 10 && !isdigit(digit)) 2061 || (base == 10 && !isdigit(digit))
1879 || (base == 8 && (!isdigit(digit) || digit > '7')) 2062 || (base == 8 && (!isdigit(digit) || digit > '7'))
1880 || (base == 0 && !isdigit(digit))) 2063 || (base == 0 && !isdigit(digit)))
1881 break; 2064 break;
1882 2065
1883 switch(qualifier) { 2066 switch (qualifier) {
1884 case 'H': /* that's 'hh' in format */ 2067 case 'H': /* that's 'hh' in format */
1885 if (is_sign) { 2068 if (is_sign) {
1886 signed char *s = (signed char *) va_arg(args,signed char *); 2069 signed char *s = (signed char *)va_arg(args, signed char *);
1887 *s = (signed char) simple_strtol(str,&next,base); 2070 *s = (signed char)simple_strtol(str, &next, base);
1888 } else { 2071 } else {
1889 unsigned char *s = (unsigned char *) va_arg(args, unsigned char *); 2072 unsigned char *s = (unsigned char *)va_arg(args, unsigned char *);
1890 *s = (unsigned char) simple_strtoul(str, &next, base); 2073 *s = (unsigned char)simple_strtoul(str, &next, base);
1891 } 2074 }
1892 break; 2075 break;
1893 case 'h': 2076 case 'h':
1894 if (is_sign) { 2077 if (is_sign) {
1895 short *s = (short *) va_arg(args,short *); 2078 short *s = (short *)va_arg(args, short *);
1896 *s = (short) simple_strtol(str,&next,base); 2079 *s = (short)simple_strtol(str, &next, base);
1897 } else { 2080 } else {
1898 unsigned short *s = (unsigned short *) va_arg(args, unsigned short *); 2081 unsigned short *s = (unsigned short *)va_arg(args, unsigned short *);
1899 *s = (unsigned short) simple_strtoul(str, &next, base); 2082 *s = (unsigned short)simple_strtoul(str, &next, base);
1900 } 2083 }
1901 break; 2084 break;
1902 case 'l': 2085 case 'l':
1903 if (is_sign) { 2086 if (is_sign) {
1904 long *l = (long *) va_arg(args,long *); 2087 long *l = (long *)va_arg(args, long *);
1905 *l = simple_strtol(str,&next,base); 2088 *l = simple_strtol(str, &next, base);
1906 } else { 2089 } else {
1907 unsigned long *l = (unsigned long*) va_arg(args,unsigned long*); 2090 unsigned long *l = (unsigned long *)va_arg(args, unsigned long *);
1908 *l = simple_strtoul(str,&next,base); 2091 *l = simple_strtoul(str, &next, base);
1909 } 2092 }
1910 break; 2093 break;
1911 case 'L': 2094 case 'L':
1912 if (is_sign) { 2095 if (is_sign) {
1913 long long *l = (long long*) va_arg(args,long long *); 2096 long long *l = (long long *)va_arg(args, long long *);
1914 *l = simple_strtoll(str,&next,base); 2097 *l = simple_strtoll(str, &next, base);
1915 } else { 2098 } else {
1916 unsigned long long *l = (unsigned long long*) va_arg(args,unsigned long long*); 2099 unsigned long long *l = (unsigned long long *)va_arg(args, unsigned long long *);
1917 *l = simple_strtoull(str,&next,base); 2100 *l = simple_strtoull(str, &next, base);
1918 } 2101 }
1919 break; 2102 break;
1920 case 'Z': 2103 case 'Z':
1921 case 'z': 2104 case 'z':
1922 { 2105 {
1923 size_t *s = (size_t*) va_arg(args,size_t*); 2106 size_t *s = (size_t *)va_arg(args, size_t *);
1924 *s = (size_t) simple_strtoul(str,&next,base); 2107 *s = (size_t)simple_strtoul(str, &next, base);
1925 } 2108 }
1926 break; 2109 break;
1927 default: 2110 default:
1928 if (is_sign) { 2111 if (is_sign) {
1929 int *i = (int *) va_arg(args, int*); 2112 int *i = (int *)va_arg(args, int *);
1930 *i = (int) simple_strtol(str,&next,base); 2113 *i = (int)simple_strtol(str, &next, base);
1931 } else { 2114 } else {
1932 unsigned int *i = (unsigned int*) va_arg(args, unsigned int*); 2115 unsigned int *i = (unsigned int *)va_arg(args, unsigned int*);
1933 *i = (unsigned int) simple_strtoul(str,&next,base); 2116 *i = (unsigned int)simple_strtoul(str, &next, base);
1934 } 2117 }
1935 break; 2118 break;
1936 } 2119 }
@@ -1961,14 +2144,15 @@ EXPORT_SYMBOL(vsscanf);
1961 * @fmt: formatting of buffer 2144 * @fmt: formatting of buffer
1962 * @...: resulting arguments 2145 * @...: resulting arguments
1963 */ 2146 */
1964int sscanf(const char * buf, const char * fmt, ...) 2147int sscanf(const char *buf, const char *fmt, ...)
1965{ 2148{
1966 va_list args; 2149 va_list args;
1967 int i; 2150 int i;
1968 2151
1969 va_start(args,fmt); 2152 va_start(args, fmt);
1970 i = vsscanf(buf,fmt,args); 2153 i = vsscanf(buf, fmt, args);
1971 va_end(args); 2154 va_end(args);
2155
1972 return i; 2156 return i;
1973} 2157}
1974EXPORT_SYMBOL(sscanf); 2158EXPORT_SYMBOL(sscanf);
diff --git a/lib/zlib_inflate/inffast.c b/lib/zlib_inflate/inffast.c
index 8550b0c05d00..2c13ecc5bb2c 100644
--- a/lib/zlib_inflate/inffast.c
+++ b/lib/zlib_inflate/inffast.c
@@ -21,12 +21,31 @@
21 - Pentium III (Anderson) 21 - Pentium III (Anderson)
22 - M68060 (Nikl) 22 - M68060 (Nikl)
23 */ 23 */
24union uu {
25 unsigned short us;
26 unsigned char b[2];
27};
28
29/* Endian independed version */
30static inline unsigned short
31get_unaligned16(const unsigned short *p)
32{
33 union uu mm;
34 unsigned char *b = (unsigned char *)p;
35
36 mm.b[0] = b[0];
37 mm.b[1] = b[1];
38 return mm.us;
39}
40
24#ifdef POSTINC 41#ifdef POSTINC
25# define OFF 0 42# define OFF 0
26# define PUP(a) *(a)++ 43# define PUP(a) *(a)++
44# define UP_UNALIGNED(a) get_unaligned16((a)++)
27#else 45#else
28# define OFF 1 46# define OFF 1
29# define PUP(a) *++(a) 47# define PUP(a) *++(a)
48# define UP_UNALIGNED(a) get_unaligned16(++(a))
30#endif 49#endif
31 50
32/* 51/*
@@ -239,18 +258,50 @@ void inflate_fast(z_streamp strm, unsigned start)
239 } 258 }
240 } 259 }
241 else { 260 else {
261 unsigned short *sout;
262 unsigned long loops;
263
242 from = out - dist; /* copy direct from output */ 264 from = out - dist; /* copy direct from output */
243 do { /* minimum length is three */ 265 /* minimum length is three */
244 PUP(out) = PUP(from); 266 /* Align out addr */
245 PUP(out) = PUP(from); 267 if (!((long)(out - 1 + OFF) & 1)) {
246 PUP(out) = PUP(from); 268 PUP(out) = PUP(from);
247 len -= 3; 269 len--;
248 } while (len > 2); 270 }
249 if (len) { 271 sout = (unsigned short *)(out - OFF);
250 PUP(out) = PUP(from); 272 if (dist > 2) {
251 if (len > 1) 273 unsigned short *sfrom;
252 PUP(out) = PUP(from); 274
253 } 275 sfrom = (unsigned short *)(from - OFF);
276 loops = len >> 1;
277 do
278#ifdef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
279 PUP(sout) = PUP(sfrom);
280#else
281 PUP(sout) = UP_UNALIGNED(sfrom);
282#endif
283 while (--loops);
284 out = (unsigned char *)sout + OFF;
285 from = (unsigned char *)sfrom + OFF;
286 } else { /* dist == 1 or dist == 2 */
287 unsigned short pat16;
288
289 pat16 = *(sout-1+OFF);
290 if (dist == 1) {
291 union uu mm;
292 /* copy one char pattern to both bytes */
293 mm.us = pat16;
294 mm.b[0] = mm.b[1];
295 pat16 = mm.us;
296 }
297 loops = len >> 1;
298 do
299 PUP(sout) = pat16;
300 while (--loops);
301 out = (unsigned char *)sout + OFF;
302 }
303 if (len & 1)
304 PUP(out) = PUP(from);
254 } 305 }
255 } 306 }
256 else if ((op & 64) == 0) { /* 2nd level distance code */ 307 else if ((op & 64) == 0) { /* 2nd level distance code */