aboutsummaryrefslogtreecommitdiffstats
path: root/lib
diff options
context:
space:
mode:
Diffstat (limited to 'lib')
-rw-r--r--lib/Kconfig38
-rw-r--r--lib/Kconfig.debug223
-rw-r--r--lib/Kconfig.kmemcheck91
-rw-r--r--lib/Makefile23
-rw-r--r--lib/atomic64.c186
-rw-r--r--lib/bitmap.c28
-rw-r--r--lib/checksum.c201
-rw-r--r--lib/cpumask.c27
-rw-r--r--lib/debug_locks.c2
-rw-r--r--lib/debugobjects.c127
-rw-r--r--lib/dec_and_lock.c3
-rw-r--r--lib/decompress.c54
-rw-r--r--lib/decompress_bunzip2.c746
-rw-r--r--lib/decompress_inflate.c168
-rw-r--r--lib/decompress_unlzma.c659
-rw-r--r--lib/dma-debug.c1297
-rw-r--r--lib/dynamic_debug.c769
-rw-r--r--lib/dynamic_printk.c414
-rw-r--r--lib/extable.c21
-rw-r--r--lib/flex_array.c268
-rw-r--r--lib/gcd.c18
-rw-r--r--lib/genalloc.c1
-rw-r--r--lib/hexdump.c15
-rw-r--r--lib/idr.c48
-rw-r--r--lib/is_single_threaded.c61
-rw-r--r--lib/kernel_lock.c2
-rw-r--r--lib/kobject.c12
-rw-r--r--lib/kobject_uevent.c12
-rw-r--r--lib/lmb.c44
-rw-r--r--lib/locking-selftest.c4
-rw-r--r--lib/nlattr.c502
-rw-r--r--lib/radix-tree.c110
-rw-r--r--lib/rational.c62
-rw-r--r--lib/rbtree.c48
-rw-r--r--lib/scatterlist.c25
-rw-r--r--lib/smp_processor_id.c2
-rw-r--r--lib/swiotlb.c255
-rw-r--r--lib/vsprintf.c1203
-rw-r--r--lib/zlib_inflate/inflate.h4
-rw-r--r--lib/zlib_inflate/inftrees.h4
40 files changed, 6705 insertions, 1072 deletions
diff --git a/lib/Kconfig b/lib/Kconfig
index 03c2c24b9083..bb1326d3839c 100644
--- a/lib/Kconfig
+++ b/lib/Kconfig
@@ -2,11 +2,17 @@
2# Library configuration 2# Library configuration
3# 3#
4 4
5config BINARY_PRINTF
6 def_bool n
7
5menu "Library routines" 8menu "Library routines"
6 9
7config BITREVERSE 10config BITREVERSE
8 tristate 11 tristate
9 12
13config RATIONAL
14 boolean
15
10config GENERIC_FIND_FIRST_BIT 16config GENERIC_FIND_FIRST_BIT
11 bool 17 bool
12 18
@@ -98,6 +104,20 @@ config LZO_DECOMPRESS
98 tristate 104 tristate
99 105
100# 106#
107# These all provide a common interface (hence the apparent duplication with
108# ZLIB_INFLATE; DECOMPRESS_GZIP is just a wrapper.)
109#
110config DECOMPRESS_GZIP
111 select ZLIB_INFLATE
112 tristate
113
114config DECOMPRESS_BZIP2
115 tristate
116
117config DECOMPRESS_LZMA
118 tristate
119
120#
101# Generic allocator support is selected if needed 121# Generic allocator support is selected if needed
102# 122#
103config GENERIC_ALLOCATOR 123config GENERIC_ALLOCATOR
@@ -136,12 +156,6 @@ config TEXTSEARCH_BM
136config TEXTSEARCH_FSM 156config TEXTSEARCH_FSM
137 tristate 157 tristate
138 158
139#
140# plist support is select#ed if needed
141#
142config PLIST
143 boolean
144
145config HAS_IOMEM 159config HAS_IOMEM
146 boolean 160 boolean
147 depends on !NO_IOMEM 161 depends on !NO_IOMEM
@@ -174,4 +188,16 @@ config DISABLE_OBSOLETE_CPUMASK_FUNCTIONS
174 bool "Disable obsolete cpumask functions" if DEBUG_PER_CPU_MAPS 188 bool "Disable obsolete cpumask functions" if DEBUG_PER_CPU_MAPS
175 depends on EXPERIMENTAL && BROKEN 189 depends on EXPERIMENTAL && BROKEN
176 190
191#
192# Netlink attribute parsing support is select'ed if needed
193#
194config NLATTR
195 bool
196
197#
198# Generic 64-bit atomic support is selected if needed
199#
200config GENERIC_ATOMIC64
201 bool
202
177endmenu 203endmenu
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
index 4c9ae6085c75..7dbd5d9c29a4 100644
--- a/lib/Kconfig.debug
+++ b/lib/Kconfig.debug
@@ -186,6 +186,44 @@ config BOOTPARAM_SOFTLOCKUP_PANIC_VALUE
186 default 0 if !BOOTPARAM_SOFTLOCKUP_PANIC 186 default 0 if !BOOTPARAM_SOFTLOCKUP_PANIC
187 default 1 if BOOTPARAM_SOFTLOCKUP_PANIC 187 default 1 if BOOTPARAM_SOFTLOCKUP_PANIC
188 188
189config DETECT_HUNG_TASK
190 bool "Detect Hung Tasks"
191 depends on DEBUG_KERNEL
192 default DETECT_SOFTLOCKUP
193 help
194 Say Y here to enable the kernel to detect "hung tasks",
195 which are bugs that cause the task to be stuck in
196 uninterruptible "D" state indefinitiley.
197
198 When a hung task is detected, the kernel will print the
199 current stack trace (which you should report), but the
200 task will stay in uninterruptible state. If lockdep is
201 enabled then all held locks will also be reported. This
202 feature has negligible overhead.
203
204config BOOTPARAM_HUNG_TASK_PANIC
205 bool "Panic (Reboot) On Hung Tasks"
206 depends on DETECT_HUNG_TASK
207 help
208 Say Y here to enable the kernel to panic on "hung tasks",
209 which are bugs that cause the kernel to leave a task stuck
210 in uninterruptible "D" state.
211
212 The panic can be used in combination with panic_timeout,
213 to cause the system to reboot automatically after a
214 hung task has been detected. This feature is useful for
215 high-availability systems that have uptime guarantees and
216 where a hung tasks must be resolved ASAP.
217
218 Say N if unsure.
219
220config BOOTPARAM_HUNG_TASK_PANIC_VALUE
221 int
222 depends on DETECT_HUNG_TASK
223 range 0 1
224 default 0 if !BOOTPARAM_HUNG_TASK_PANIC
225 default 1 if BOOTPARAM_HUNG_TASK_PANIC
226
189config SCHED_DEBUG 227config SCHED_DEBUG
190 bool "Collect scheduler debugging info" 228 bool "Collect scheduler debugging info"
191 depends on DEBUG_KERNEL && PROC_FS 229 depends on DEBUG_KERNEL && PROC_FS
@@ -262,7 +300,7 @@ config DEBUG_OBJECTS_ENABLE_DEFAULT
262 300
263config DEBUG_SLAB 301config DEBUG_SLAB
264 bool "Debug slab memory allocations" 302 bool "Debug slab memory allocations"
265 depends on DEBUG_KERNEL && SLAB 303 depends on DEBUG_KERNEL && SLAB && !KMEMCHECK
266 help 304 help
267 Say Y here to have the kernel do limited verification on memory 305 Say Y here to have the kernel do limited verification on memory
268 allocation as well as poisoning memory on free to catch use of freed 306 allocation as well as poisoning memory on free to catch use of freed
@@ -274,7 +312,7 @@ config DEBUG_SLAB_LEAK
274 312
275config SLUB_DEBUG_ON 313config SLUB_DEBUG_ON
276 bool "SLUB debugging on by default" 314 bool "SLUB debugging on by default"
277 depends on SLUB && SLUB_DEBUG 315 depends on SLUB && SLUB_DEBUG && !KMEMCHECK
278 default n 316 default n
279 help 317 help
280 Boot with debugging on by default. SLUB boots by default with 318 Boot with debugging on by default. SLUB boots by default with
@@ -298,6 +336,51 @@ config SLUB_STATS
298 out which slabs are relevant to a particular load. 336 out which slabs are relevant to a particular load.
299 Try running: slabinfo -DA 337 Try running: slabinfo -DA
300 338
339config DEBUG_KMEMLEAK
340 bool "Kernel memory leak detector"
341 depends on DEBUG_KERNEL && EXPERIMENTAL && (X86 || ARM) && \
342 !MEMORY_HOTPLUG
343 select DEBUG_FS if SYSFS
344 select STACKTRACE if STACKTRACE_SUPPORT
345 select KALLSYMS
346 help
347 Say Y here if you want to enable the memory leak
348 detector. The memory allocation/freeing is traced in a way
349 similar to the Boehm's conservative garbage collector, the
350 difference being that the orphan objects are not freed but
351 only shown in /sys/kernel/debug/kmemleak. Enabling this
352 feature will introduce an overhead to memory
353 allocations. See Documentation/kmemleak.txt for more
354 details.
355
356 Enabling DEBUG_SLAB or SLUB_DEBUG may increase the chances
357 of finding leaks due to the slab objects poisoning.
358
359 In order to access the kmemleak file, debugfs needs to be
360 mounted (usually at /sys/kernel/debug).
361
362config DEBUG_KMEMLEAK_EARLY_LOG_SIZE
363 int "Maximum kmemleak early log entries"
364 depends on DEBUG_KMEMLEAK
365 range 200 2000
366 default 400
367 help
368 Kmemleak must track all the memory allocations to avoid
369 reporting false positives. Since memory may be allocated or
370 freed before kmemleak is initialised, an early log buffer is
371 used to store these actions. If kmemleak reports "early log
372 buffer exceeded", please increase this value.
373
374config DEBUG_KMEMLEAK_TEST
375 tristate "Simple test for the kernel memory leak detector"
376 depends on DEBUG_KMEMLEAK
377 help
378 Say Y or M here to build a test for the kernel memory leak
379 detector. This option enables a module that explicitly leaks
380 memory.
381
382 If unsure, say N.
383
301config DEBUG_PREEMPT 384config DEBUG_PREEMPT
302 bool "Debug preemptible kernel" 385 bool "Debug preemptible kernel"
303 depends on DEBUG_KERNEL && PREEMPT && (TRACE_IRQFLAGS_SUPPORT || PPC64) 386 depends on DEBUG_KERNEL && PREEMPT && (TRACE_IRQFLAGS_SUPPORT || PPC64)
@@ -402,7 +485,7 @@ config LOCKDEP
402 bool 485 bool
403 depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT 486 depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT
404 select STACKTRACE 487 select STACKTRACE
405 select FRAME_POINTER if !X86 && !MIPS && !PPC 488 select FRAME_POINTER if !MIPS && !PPC && !ARM_UNWIND && !S390
406 select KALLSYMS 489 select KALLSYMS
407 select KALLSYMS_ALL 490 select KALLSYMS_ALL
408 491
@@ -570,10 +653,34 @@ config DEBUG_NOTIFIERS
570 This is a relatively cheap check but if you care about maximum 653 This is a relatively cheap check but if you care about maximum
571 performance, say N. 654 performance, say N.
572 655
656config DEBUG_CREDENTIALS
657 bool "Debug credential management"
658 depends on DEBUG_KERNEL
659 help
660 Enable this to turn on some debug checking for credential
661 management. The additional code keeps track of the number of
662 pointers from task_structs to any given cred struct, and checks to
663 see that this number never exceeds the usage count of the cred
664 struct.
665
666 Furthermore, if SELinux is enabled, this also checks that the
667 security pointer in the cred struct is never seen to be invalid.
668
669 If unsure, say N.
670
671#
672# Select this config option from the architecture Kconfig, if it
673# it is preferred to always offer frame pointers as a config
674# option on the architecture (regardless of KERNEL_DEBUG):
675#
676config ARCH_WANT_FRAME_POINTERS
677 bool
678 help
679
573config FRAME_POINTER 680config FRAME_POINTER
574 bool "Compile the kernel with frame pointers" 681 bool "Compile the kernel with frame pointers"
575 depends on DEBUG_KERNEL && \ 682 depends on DEBUG_KERNEL && \
576 (CRIS || M68K || M68KNOMMU || FRV || UML || S390 || \ 683 (CRIS || M68K || M68KNOMMU || FRV || UML || \
577 AVR32 || SUPERH || BLACKFIN || MN10300) || \ 684 AVR32 || SUPERH || BLACKFIN || MN10300) || \
578 ARCH_WANT_FRAME_POINTERS 685 ARCH_WANT_FRAME_POINTERS
579 default y if (DEBUG_INFO && UML) || ARCH_WANT_FRAME_POINTERS 686 default y if (DEBUG_INFO && UML) || ARCH_WANT_FRAME_POINTERS
@@ -633,20 +740,7 @@ config RCU_TORTURE_TEST_RUNNABLE
633 740
634config RCU_CPU_STALL_DETECTOR 741config RCU_CPU_STALL_DETECTOR
635 bool "Check for stalled CPUs delaying RCU grace periods" 742 bool "Check for stalled CPUs delaying RCU grace periods"
636 depends on CLASSIC_RCU 743 depends on TREE_RCU || TREE_PREEMPT_RCU
637 default n
638 help
639 This option causes RCU to printk information on which
640 CPUs are delaying the current grace period, but only when
641 the grace period extends for excessive time periods.
642
643 Say Y if you want RCU to perform such checks.
644
645 Say N if you are unsure.
646
647config RCU_CPU_STALL_DETECTOR
648 bool "Check for stalled CPUs delaying RCU grace periods"
649 depends on CLASSIC_RCU || TREE_RCU
650 default n 744 default n
651 help 745 help
652 This option causes RCU to printk information on which 746 This option causes RCU to printk information on which
@@ -775,13 +869,13 @@ config FAULT_INJECTION_STACKTRACE_FILTER
775 depends on FAULT_INJECTION_DEBUG_FS && STACKTRACE_SUPPORT 869 depends on FAULT_INJECTION_DEBUG_FS && STACKTRACE_SUPPORT
776 depends on !X86_64 870 depends on !X86_64
777 select STACKTRACE 871 select STACKTRACE
778 select FRAME_POINTER if !PPC 872 select FRAME_POINTER if !PPC && !S390
779 help 873 help
780 Provide stacktrace filter for fault-injection capabilities 874 Provide stacktrace filter for fault-injection capabilities
781 875
782config LATENCYTOP 876config LATENCYTOP
783 bool "Latency measuring infrastructure" 877 bool "Latency measuring infrastructure"
784 select FRAME_POINTER if !MIPS && !PPC 878 select FRAME_POINTER if !MIPS && !PPC && !S390
785 select KALLSYMS 879 select KALLSYMS
786 select KALLSYMS_ALL 880 select KALLSYMS_ALL
787 select STACKTRACE 881 select STACKTRACE
@@ -800,6 +894,7 @@ config SYSCTL_SYSCALL_CHECK
800 to properly maintain and use. This enables checks that help 894 to properly maintain and use. This enables checks that help
801 you to keep things correct. 895 you to keep things correct.
802 896
897source mm/Kconfig.debug
803source kernel/trace/Kconfig 898source kernel/trace/Kconfig
804 899
805config PROVIDE_OHCI1394_DMA_INIT 900config PROVIDE_OHCI1394_DMA_INIT
@@ -842,7 +937,7 @@ config FIREWIRE_OHCI_REMOTE_DMA
842 937
843 If unsure, say N. 938 If unsure, say N.
844 939
845menuconfig BUILD_DOCSRC 940config BUILD_DOCSRC
846 bool "Build targets in Documentation/ tree" 941 bool "Build targets in Documentation/ tree"
847 depends on HEADERS_CHECK 942 depends on HEADERS_CHECK
848 help 943 help
@@ -851,61 +946,83 @@ menuconfig BUILD_DOCSRC
851 946
852 Say N if you are unsure. 947 Say N if you are unsure.
853 948
854config DYNAMIC_PRINTK_DEBUG 949config DYNAMIC_DEBUG
855 bool "Enable dynamic printk() call support" 950 bool "Enable dynamic printk() support"
856 default n 951 default n
857 depends on PRINTK 952 depends on PRINTK
858 select PRINTK_DEBUG 953 depends on DEBUG_FS
859 help 954 help
860 955
861 Compiles debug level messages into the kernel, which would not 956 Compiles debug level messages into the kernel, which would not
862 otherwise be available at runtime. These messages can then be 957 otherwise be available at runtime. These messages can then be
863 enabled/disabled on a per module basis. This mechanism implicitly 958 enabled/disabled based on various levels of scope - per source file,
864 enables all pr_debug() and dev_dbg() calls. The impact of this 959 function, module, format string, and line number. This mechanism
865 compile option is a larger kernel text size of about 2%. 960 implicitly enables all pr_debug() and dev_dbg() calls. The impact of
961 this compile option is a larger kernel text size of about 2%.
866 962
867 Usage: 963 Usage:
868 964
869 Dynamic debugging is controlled by the debugfs file, 965 Dynamic debugging is controlled via the 'dynamic_debug/ddebug' file,
870 dynamic_printk/modules. This file contains a list of the modules that 966 which is contained in the 'debugfs' filesystem. Thus, the debugfs
871 can be enabled. The format of the file is the module name, followed 967 filesystem must first be mounted before making use of this feature.
872 by a set of flags that can be enabled. The first flag is always the 968 We refer the control file as: <debugfs>/dynamic_debug/ddebug. This
873 'enabled' flag. For example: 969 file contains a list of the debug statements that can be enabled. The
970 format for each line of the file is:
874 971
875 <module_name> <enabled=0/1> 972 filename:lineno [module]function flags format
876 .
877 .
878 .
879 973
880 <module_name> : Name of the module in which the debug call resides 974 filename : source file of the debug statement
881 <enabled=0/1> : whether the messages are enabled or not 975 lineno : line number of the debug statement
976 module : module that contains the debug statement
977 function : function that contains the debug statement
978 flags : 'p' means the line is turned 'on' for printing
979 format : the format used for the debug statement
882 980
883 From a live system: 981 From a live system:
884 982
885 snd_hda_intel enabled=0 983 nullarbor:~ # cat <debugfs>/dynamic_debug/ddebug
886 fixup enabled=0 984 # filename:lineno [module]function flags format
887 driver enabled=0 985 fs/aio.c:222 [aio]__put_ioctx - "__put_ioctx:\040freeing\040%p\012"
888 986 fs/aio.c:248 [aio]ioctx_alloc - "ENOMEM:\040nr_events\040too\040high\012"
889 Enable a module: 987 fs/aio.c:1770 [aio]sys_io_cancel - "calling\040cancel\012"
890 988
891 $echo "set enabled=1 <module_name>" > dynamic_printk/modules 989 Example usage:
892 990
893 Disable a module: 991 // enable the message at line 1603 of file svcsock.c
992 nullarbor:~ # echo -n 'file svcsock.c line 1603 +p' >
993 <debugfs>/dynamic_debug/ddebug
894 994
895 $echo "set enabled=0 <module_name>" > dynamic_printk/modules 995 // enable all the messages in file svcsock.c
996 nullarbor:~ # echo -n 'file svcsock.c +p' >
997 <debugfs>/dynamic_debug/ddebug
896 998
897 Enable all modules: 999 // enable all the messages in the NFS server module
1000 nullarbor:~ # echo -n 'module nfsd +p' >
1001 <debugfs>/dynamic_debug/ddebug
898 1002
899 $echo "set enabled=1 all" > dynamic_printk/modules 1003 // enable all 12 messages in the function svc_process()
1004 nullarbor:~ # echo -n 'func svc_process +p' >
1005 <debugfs>/dynamic_debug/ddebug
900 1006
901 Disable all modules: 1007 // disable all 12 messages in the function svc_process()
1008 nullarbor:~ # echo -n 'func svc_process -p' >
1009 <debugfs>/dynamic_debug/ddebug
902 1010
903 $echo "set enabled=0 all" > dynamic_printk/modules 1011 See Documentation/dynamic-debug-howto.txt for additional information.
904 1012
905 Finally, passing "dynamic_printk" at the command line enables 1013config DMA_API_DEBUG
906 debugging for all modules. This mode can be turned off via the above 1014 bool "Enable debugging of DMA-API usage"
907 disable command. 1015 depends on HAVE_DMA_API_DEBUG
1016 help
1017 Enable this option to debug the use of the DMA API by device drivers.
1018 With this option you will be able to detect common bugs in device
1019 drivers like double-freeing of DMA mappings or freeing mappings that
1020 were never allocated.
1021 This option causes a performance degredation. Use only if you want
1022 to debug device drivers. If unsure, say N.
908 1023
909source "samples/Kconfig" 1024source "samples/Kconfig"
910 1025
911source "lib/Kconfig.kgdb" 1026source "lib/Kconfig.kgdb"
1027
1028source "lib/Kconfig.kmemcheck"
diff --git a/lib/Kconfig.kmemcheck b/lib/Kconfig.kmemcheck
new file mode 100644
index 000000000000..603c81b66549
--- /dev/null
+++ b/lib/Kconfig.kmemcheck
@@ -0,0 +1,91 @@
1config HAVE_ARCH_KMEMCHECK
2 bool
3
4menuconfig KMEMCHECK
5 bool "kmemcheck: trap use of uninitialized memory"
6 depends on DEBUG_KERNEL
7 depends on !X86_USE_3DNOW
8 depends on SLUB || SLAB
9 depends on !CC_OPTIMIZE_FOR_SIZE
10 depends on !FUNCTION_TRACER
11 select FRAME_POINTER
12 select STACKTRACE
13 default n
14 help
15 This option enables tracing of dynamically allocated kernel memory
16 to see if memory is used before it has been given an initial value.
17 Be aware that this requires half of your memory for bookkeeping and
18 will insert extra code at *every* read and write to tracked memory
19 thus slow down the kernel code (but user code is unaffected).
20
21 The kernel may be started with kmemcheck=0 or kmemcheck=1 to disable
22 or enable kmemcheck at boot-time. If the kernel is started with
23 kmemcheck=0, the large memory and CPU overhead is not incurred.
24
25choice
26 prompt "kmemcheck: default mode at boot"
27 depends on KMEMCHECK
28 default KMEMCHECK_ONESHOT_BY_DEFAULT
29 help
30 This option controls the default behaviour of kmemcheck when the
31 kernel boots and no kmemcheck= parameter is given.
32
33config KMEMCHECK_DISABLED_BY_DEFAULT
34 bool "disabled"
35 depends on KMEMCHECK
36
37config KMEMCHECK_ENABLED_BY_DEFAULT
38 bool "enabled"
39 depends on KMEMCHECK
40
41config KMEMCHECK_ONESHOT_BY_DEFAULT
42 bool "one-shot"
43 depends on KMEMCHECK
44 help
45 In one-shot mode, only the first error detected is reported before
46 kmemcheck is disabled.
47
48endchoice
49
50config KMEMCHECK_QUEUE_SIZE
51 int "kmemcheck: error queue size"
52 depends on KMEMCHECK
53 default 64
54 help
55 Select the maximum number of errors to store in the queue. Since
56 errors can occur virtually anywhere and in any context, we need a
57 temporary storage area which is guarantueed not to generate any
58 other faults. The queue will be emptied as soon as a tasklet may
59 be scheduled. If the queue is full, new error reports will be
60 lost.
61
62config KMEMCHECK_SHADOW_COPY_SHIFT
63 int "kmemcheck: shadow copy size (5 => 32 bytes, 6 => 64 bytes)"
64 depends on KMEMCHECK
65 range 2 8
66 default 5
67 help
68 Select the number of shadow bytes to save along with each entry of
69 the queue. These bytes indicate what parts of an allocation are
70 initialized, uninitialized, etc. and will be displayed when an
71 error is detected to help the debugging of a particular problem.
72
73config KMEMCHECK_PARTIAL_OK
74 bool "kmemcheck: allow partially uninitialized memory"
75 depends on KMEMCHECK
76 default y
77 help
78 This option works around certain GCC optimizations that produce
79 32-bit reads from 16-bit variables where the upper 16 bits are
80 thrown away afterwards. This may of course also hide some real
81 bugs.
82
83config KMEMCHECK_BITOPS_OK
84 bool "kmemcheck: allow bit-field manipulation"
85 depends on KMEMCHECK
86 default n
87 help
88 This option silences warnings that would be generated for bit-field
89 accesses where not all the bits are initialized at the same time.
90 This may also hide some real bugs.
91
diff --git a/lib/Makefile b/lib/Makefile
index 32b0e64ded27..2e78277eff9d 100644
--- a/lib/Makefile
+++ b/lib/Makefile
@@ -11,7 +11,8 @@ lib-y := ctype.o string.o vsprintf.o cmdline.o \
11 rbtree.o radix-tree.o dump_stack.o \ 11 rbtree.o radix-tree.o dump_stack.o \
12 idr.o int_sqrt.o extable.o prio_tree.o \ 12 idr.o int_sqrt.o extable.o prio_tree.o \
13 sha1.o irq_regs.o reciprocal_div.o argv_split.o \ 13 sha1.o irq_regs.o reciprocal_div.o argv_split.o \
14 proportions.o prio_heap.o ratelimit.o show_mem.o is_single_threaded.o 14 proportions.o prio_heap.o ratelimit.o show_mem.o \
15 is_single_threaded.o plist.o decompress.o flex_array.o
15 16
16lib-$(CONFIG_MMU) += ioremap.o 17lib-$(CONFIG_MMU) += ioremap.o
17lib-$(CONFIG_SMP) += cpumask.o 18lib-$(CONFIG_SMP) += cpumask.o
@@ -20,7 +21,7 @@ lib-y += kobject.o kref.o klist.o
20 21
21obj-y += bcd.o div64.o sort.o parser.o halfmd4.o debug_locks.o random32.o \ 22obj-y += bcd.o div64.o sort.o parser.o halfmd4.o debug_locks.o random32.o \
22 bust_spinlocks.o hexdump.o kasprintf.o bitmap.o scatterlist.o \ 23 bust_spinlocks.o hexdump.o kasprintf.o bitmap.o scatterlist.o \
23 string_helpers.o 24 string_helpers.o gcd.o
24 25
25ifeq ($(CONFIG_DEBUG_KOBJECT),y) 26ifeq ($(CONFIG_DEBUG_KOBJECT),y)
26CFLAGS_kobject.o += -DDEBUG 27CFLAGS_kobject.o += -DDEBUG
@@ -37,10 +38,9 @@ lib-$(CONFIG_RWSEM_GENERIC_SPINLOCK) += rwsem-spinlock.o
37lib-$(CONFIG_RWSEM_XCHGADD_ALGORITHM) += rwsem.o 38lib-$(CONFIG_RWSEM_XCHGADD_ALGORITHM) += rwsem.o
38lib-$(CONFIG_GENERIC_FIND_FIRST_BIT) += find_next_bit.o 39lib-$(CONFIG_GENERIC_FIND_FIRST_BIT) += find_next_bit.o
39lib-$(CONFIG_GENERIC_FIND_NEXT_BIT) += find_next_bit.o 40lib-$(CONFIG_GENERIC_FIND_NEXT_BIT) += find_next_bit.o
40lib-$(CONFIG_GENERIC_FIND_LAST_BIT) += find_last_bit.o 41obj-$(CONFIG_GENERIC_FIND_LAST_BIT) += find_last_bit.o
41obj-$(CONFIG_GENERIC_HWEIGHT) += hweight.o 42obj-$(CONFIG_GENERIC_HWEIGHT) += hweight.o
42obj-$(CONFIG_LOCK_KERNEL) += kernel_lock.o 43obj-$(CONFIG_LOCK_KERNEL) += kernel_lock.o
43obj-$(CONFIG_PLIST) += plist.o
44obj-$(CONFIG_DEBUG_PREEMPT) += smp_processor_id.o 44obj-$(CONFIG_DEBUG_PREEMPT) += smp_processor_id.o
45obj-$(CONFIG_DEBUG_LIST) += list_debug.o 45obj-$(CONFIG_DEBUG_LIST) += list_debug.o
46obj-$(CONFIG_DEBUG_OBJECTS) += debugobjects.o 46obj-$(CONFIG_DEBUG_OBJECTS) += debugobjects.o
@@ -50,6 +50,7 @@ ifneq ($(CONFIG_HAVE_DEC_LOCK),y)
50endif 50endif
51 51
52obj-$(CONFIG_BITREVERSE) += bitrev.o 52obj-$(CONFIG_BITREVERSE) += bitrev.o
53obj-$(CONFIG_RATIONAL) += rational.o
53obj-$(CONFIG_CRC_CCITT) += crc-ccitt.o 54obj-$(CONFIG_CRC_CCITT) += crc-ccitt.o
54obj-$(CONFIG_CRC16) += crc16.o 55obj-$(CONFIG_CRC16) += crc16.o
55obj-$(CONFIG_CRC_T10DIF)+= crc-t10dif.o 56obj-$(CONFIG_CRC_T10DIF)+= crc-t10dif.o
@@ -65,6 +66,10 @@ obj-$(CONFIG_REED_SOLOMON) += reed_solomon/
65obj-$(CONFIG_LZO_COMPRESS) += lzo/ 66obj-$(CONFIG_LZO_COMPRESS) += lzo/
66obj-$(CONFIG_LZO_DECOMPRESS) += lzo/ 67obj-$(CONFIG_LZO_DECOMPRESS) += lzo/
67 68
69lib-$(CONFIG_DECOMPRESS_GZIP) += decompress_inflate.o
70lib-$(CONFIG_DECOMPRESS_BZIP2) += decompress_bunzip2.o
71lib-$(CONFIG_DECOMPRESS_LZMA) += decompress_unlzma.o
72
68obj-$(CONFIG_TEXTSEARCH) += textsearch.o 73obj-$(CONFIG_TEXTSEARCH) += textsearch.o
69obj-$(CONFIG_TEXTSEARCH_KMP) += ts_kmp.o 74obj-$(CONFIG_TEXTSEARCH_KMP) += ts_kmp.o
70obj-$(CONFIG_TEXTSEARCH_BM) += ts_bm.o 75obj-$(CONFIG_TEXTSEARCH_BM) += ts_bm.o
@@ -82,7 +87,15 @@ obj-$(CONFIG_HAVE_LMB) += lmb.o
82 87
83obj-$(CONFIG_HAVE_ARCH_TRACEHOOK) += syscall.o 88obj-$(CONFIG_HAVE_ARCH_TRACEHOOK) += syscall.o
84 89
85obj-$(CONFIG_DYNAMIC_PRINTK_DEBUG) += dynamic_printk.o 90obj-$(CONFIG_DYNAMIC_DEBUG) += dynamic_debug.o
91
92obj-$(CONFIG_NLATTR) += nlattr.o
93
94obj-$(CONFIG_DMA_API_DEBUG) += dma-debug.o
95
96obj-$(CONFIG_GENERIC_CSUM) += checksum.o
97
98obj-$(CONFIG_GENERIC_ATOMIC64) += atomic64.o
86 99
87hostprogs-y := gen_crc32table 100hostprogs-y := gen_crc32table
88clean-files := crc32table.h 101clean-files := crc32table.h
diff --git a/lib/atomic64.c b/lib/atomic64.c
new file mode 100644
index 000000000000..8bee16ec7524
--- /dev/null
+++ b/lib/atomic64.c
@@ -0,0 +1,186 @@
1/*
2 * Generic implementation of 64-bit atomics using spinlocks,
3 * useful on processors that don't have 64-bit atomic instructions.
4 *
5 * Copyright © 2009 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com>
6 *
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * as published by the Free Software Foundation; either version
10 * 2 of the License, or (at your option) any later version.
11 */
12#include <linux/types.h>
13#include <linux/cache.h>
14#include <linux/spinlock.h>
15#include <linux/init.h>
16#include <linux/module.h>
17#include <asm/atomic.h>
18
19/*
20 * We use a hashed array of spinlocks to provide exclusive access
21 * to each atomic64_t variable. Since this is expected to used on
22 * systems with small numbers of CPUs (<= 4 or so), we use a
23 * relatively small array of 16 spinlocks to avoid wasting too much
24 * memory on the spinlock array.
25 */
26#define NR_LOCKS 16
27
28/*
29 * Ensure each lock is in a separate cacheline.
30 */
31static union {
32 spinlock_t lock;
33 char pad[L1_CACHE_BYTES];
34} atomic64_lock[NR_LOCKS] __cacheline_aligned_in_smp;
35
36static inline spinlock_t *lock_addr(const atomic64_t *v)
37{
38 unsigned long addr = (unsigned long) v;
39
40 addr >>= L1_CACHE_SHIFT;
41 addr ^= (addr >> 8) ^ (addr >> 16);
42 return &atomic64_lock[addr & (NR_LOCKS - 1)].lock;
43}
44
45long long atomic64_read(const atomic64_t *v)
46{
47 unsigned long flags;
48 spinlock_t *lock = lock_addr(v);
49 long long val;
50
51 spin_lock_irqsave(lock, flags);
52 val = v->counter;
53 spin_unlock_irqrestore(lock, flags);
54 return val;
55}
56EXPORT_SYMBOL(atomic64_read);
57
58void atomic64_set(atomic64_t *v, long long i)
59{
60 unsigned long flags;
61 spinlock_t *lock = lock_addr(v);
62
63 spin_lock_irqsave(lock, flags);
64 v->counter = i;
65 spin_unlock_irqrestore(lock, flags);
66}
67EXPORT_SYMBOL(atomic64_set);
68
69void atomic64_add(long long a, atomic64_t *v)
70{
71 unsigned long flags;
72 spinlock_t *lock = lock_addr(v);
73
74 spin_lock_irqsave(lock, flags);
75 v->counter += a;
76 spin_unlock_irqrestore(lock, flags);
77}
78EXPORT_SYMBOL(atomic64_add);
79
80long long atomic64_add_return(long long a, atomic64_t *v)
81{
82 unsigned long flags;
83 spinlock_t *lock = lock_addr(v);
84 long long val;
85
86 spin_lock_irqsave(lock, flags);
87 val = v->counter += a;
88 spin_unlock_irqrestore(lock, flags);
89 return val;
90}
91EXPORT_SYMBOL(atomic64_add_return);
92
93void atomic64_sub(long long a, atomic64_t *v)
94{
95 unsigned long flags;
96 spinlock_t *lock = lock_addr(v);
97
98 spin_lock_irqsave(lock, flags);
99 v->counter -= a;
100 spin_unlock_irqrestore(lock, flags);
101}
102EXPORT_SYMBOL(atomic64_sub);
103
104long long atomic64_sub_return(long long a, atomic64_t *v)
105{
106 unsigned long flags;
107 spinlock_t *lock = lock_addr(v);
108 long long val;
109
110 spin_lock_irqsave(lock, flags);
111 val = v->counter -= a;
112 spin_unlock_irqrestore(lock, flags);
113 return val;
114}
115EXPORT_SYMBOL(atomic64_sub_return);
116
117long long atomic64_dec_if_positive(atomic64_t *v)
118{
119 unsigned long flags;
120 spinlock_t *lock = lock_addr(v);
121 long long val;
122
123 spin_lock_irqsave(lock, flags);
124 val = v->counter - 1;
125 if (val >= 0)
126 v->counter = val;
127 spin_unlock_irqrestore(lock, flags);
128 return val;
129}
130EXPORT_SYMBOL(atomic64_dec_if_positive);
131
132long long atomic64_cmpxchg(atomic64_t *v, long long o, long long n)
133{
134 unsigned long flags;
135 spinlock_t *lock = lock_addr(v);
136 long long val;
137
138 spin_lock_irqsave(lock, flags);
139 val = v->counter;
140 if (val == o)
141 v->counter = n;
142 spin_unlock_irqrestore(lock, flags);
143 return val;
144}
145EXPORT_SYMBOL(atomic64_cmpxchg);
146
147long long atomic64_xchg(atomic64_t *v, long long new)
148{
149 unsigned long flags;
150 spinlock_t *lock = lock_addr(v);
151 long long val;
152
153 spin_lock_irqsave(lock, flags);
154 val = v->counter;
155 v->counter = new;
156 spin_unlock_irqrestore(lock, flags);
157 return val;
158}
159EXPORT_SYMBOL(atomic64_xchg);
160
161int atomic64_add_unless(atomic64_t *v, long long a, long long u)
162{
163 unsigned long flags;
164 spinlock_t *lock = lock_addr(v);
165 int ret = 1;
166
167 spin_lock_irqsave(lock, flags);
168 if (v->counter != u) {
169 v->counter += a;
170 ret = 0;
171 }
172 spin_unlock_irqrestore(lock, flags);
173 return ret;
174}
175EXPORT_SYMBOL(atomic64_add_unless);
176
177static int init_atomic64_lock(void)
178{
179 int i;
180
181 for (i = 0; i < NR_LOCKS; ++i)
182 spin_lock_init(&atomic64_lock[i].lock);
183 return 0;
184}
185
186pure_initcall(init_atomic64_lock);
diff --git a/lib/bitmap.c b/lib/bitmap.c
index 1338469ac849..702565821c99 100644
--- a/lib/bitmap.c
+++ b/lib/bitmap.c
@@ -179,14 +179,16 @@ void __bitmap_shift_left(unsigned long *dst,
179} 179}
180EXPORT_SYMBOL(__bitmap_shift_left); 180EXPORT_SYMBOL(__bitmap_shift_left);
181 181
182void __bitmap_and(unsigned long *dst, const unsigned long *bitmap1, 182int __bitmap_and(unsigned long *dst, const unsigned long *bitmap1,
183 const unsigned long *bitmap2, int bits) 183 const unsigned long *bitmap2, int bits)
184{ 184{
185 int k; 185 int k;
186 int nr = BITS_TO_LONGS(bits); 186 int nr = BITS_TO_LONGS(bits);
187 unsigned long result = 0;
187 188
188 for (k = 0; k < nr; k++) 189 for (k = 0; k < nr; k++)
189 dst[k] = bitmap1[k] & bitmap2[k]; 190 result |= (dst[k] = bitmap1[k] & bitmap2[k]);
191 return result != 0;
190} 192}
191EXPORT_SYMBOL(__bitmap_and); 193EXPORT_SYMBOL(__bitmap_and);
192 194
@@ -212,14 +214,16 @@ void __bitmap_xor(unsigned long *dst, const unsigned long *bitmap1,
212} 214}
213EXPORT_SYMBOL(__bitmap_xor); 215EXPORT_SYMBOL(__bitmap_xor);
214 216
215void __bitmap_andnot(unsigned long *dst, const unsigned long *bitmap1, 217int __bitmap_andnot(unsigned long *dst, const unsigned long *bitmap1,
216 const unsigned long *bitmap2, int bits) 218 const unsigned long *bitmap2, int bits)
217{ 219{
218 int k; 220 int k;
219 int nr = BITS_TO_LONGS(bits); 221 int nr = BITS_TO_LONGS(bits);
222 unsigned long result = 0;
220 223
221 for (k = 0; k < nr; k++) 224 for (k = 0; k < nr; k++)
222 dst[k] = bitmap1[k] & ~bitmap2[k]; 225 result |= (dst[k] = bitmap1[k] & ~bitmap2[k]);
226 return result != 0;
223} 227}
224EXPORT_SYMBOL(__bitmap_andnot); 228EXPORT_SYMBOL(__bitmap_andnot);
225 229
@@ -948,15 +952,15 @@ done:
948 */ 952 */
949int bitmap_find_free_region(unsigned long *bitmap, int bits, int order) 953int bitmap_find_free_region(unsigned long *bitmap, int bits, int order)
950{ 954{
951 int pos; /* scans bitmap by regions of size order */ 955 int pos, end; /* scans bitmap by regions of size order */
952 956
953 for (pos = 0; pos < bits; pos += (1 << order)) 957 for (pos = 0 ; (end = pos + (1 << order)) <= bits; pos = end) {
954 if (__reg_op(bitmap, pos, order, REG_OP_ISFREE)) 958 if (!__reg_op(bitmap, pos, order, REG_OP_ISFREE))
955 break; 959 continue;
956 if (pos == bits) 960 __reg_op(bitmap, pos, order, REG_OP_ALLOC);
957 return -ENOMEM; 961 return pos;
958 __reg_op(bitmap, pos, order, REG_OP_ALLOC); 962 }
959 return pos; 963 return -ENOMEM;
960} 964}
961EXPORT_SYMBOL(bitmap_find_free_region); 965EXPORT_SYMBOL(bitmap_find_free_region);
962 966
diff --git a/lib/checksum.c b/lib/checksum.c
new file mode 100644
index 000000000000..b2e2fd468461
--- /dev/null
+++ b/lib/checksum.c
@@ -0,0 +1,201 @@
1/*
2 *
3 * INET An implementation of the TCP/IP protocol suite for the LINUX
4 * operating system. INET is implemented using the BSD Socket
5 * interface as the means of communication with the user level.
6 *
7 * IP/TCP/UDP checksumming routines
8 *
9 * Authors: Jorge Cwik, <jorge@laser.satlink.net>
10 * Arnt Gulbrandsen, <agulbra@nvg.unit.no>
11 * Tom May, <ftom@netcom.com>
12 * Andreas Schwab, <schwab@issan.informatik.uni-dortmund.de>
13 * Lots of code moved from tcp.c and ip.c; see those files
14 * for more names.
15 *
16 * 03/02/96 Jes Sorensen, Andreas Schwab, Roman Hodek:
17 * Fixed some nasty bugs, causing some horrible crashes.
18 * A: At some points, the sum (%0) was used as
19 * length-counter instead of the length counter
20 * (%1). Thanks to Roman Hodek for pointing this out.
21 * B: GCC seems to mess up if one uses too many
22 * data-registers to hold input values and one tries to
23 * specify d0 and d1 as scratch registers. Letting gcc
24 * choose these registers itself solves the problem.
25 *
26 * This program is free software; you can redistribute it and/or
27 * modify it under the terms of the GNU General Public License
28 * as published by the Free Software Foundation; either version
29 * 2 of the License, or (at your option) any later version.
30 */
31
32/* Revised by Kenneth Albanowski for m68knommu. Basic problem: unaligned access
33 kills, so most of the assembly has to go. */
34
35#include <linux/module.h>
36#include <net/checksum.h>
37
38#include <asm/byteorder.h>
39
40static inline unsigned short from32to16(unsigned long x)
41{
42 /* add up 16-bit and 16-bit for 16+c bit */
43 x = (x & 0xffff) + (x >> 16);
44 /* add up carry.. */
45 x = (x & 0xffff) + (x >> 16);
46 return x;
47}
48
49static unsigned int do_csum(const unsigned char *buff, int len)
50{
51 int odd, count;
52 unsigned long result = 0;
53
54 if (len <= 0)
55 goto out;
56 odd = 1 & (unsigned long) buff;
57 if (odd) {
58#ifdef __LITTLE_ENDIAN
59 result = *buff;
60#else
61 result += (*buff << 8);
62#endif
63 len--;
64 buff++;
65 }
66 count = len >> 1; /* nr of 16-bit words.. */
67 if (count) {
68 if (2 & (unsigned long) buff) {
69 result += *(unsigned short *) buff;
70 count--;
71 len -= 2;
72 buff += 2;
73 }
74 count >>= 1; /* nr of 32-bit words.. */
75 if (count) {
76 unsigned long carry = 0;
77 do {
78 unsigned long w = *(unsigned int *) buff;
79 count--;
80 buff += 4;
81 result += carry;
82 result += w;
83 carry = (w > result);
84 } while (count);
85 result += carry;
86 result = (result & 0xffff) + (result >> 16);
87 }
88 if (len & 2) {
89 result += *(unsigned short *) buff;
90 buff += 2;
91 }
92 }
93 if (len & 1)
94#ifdef __LITTLE_ENDIAN
95 result += *buff;
96#else
97 result += (*buff << 8);
98#endif
99 result = from32to16(result);
100 if (odd)
101 result = ((result >> 8) & 0xff) | ((result & 0xff) << 8);
102out:
103 return result;
104}
105
106/*
107 * This is a version of ip_compute_csum() optimized for IP headers,
108 * which always checksum on 4 octet boundaries.
109 */
110__sum16 ip_fast_csum(const void *iph, unsigned int ihl)
111{
112 return (__force __sum16)~do_csum(iph, ihl*4);
113}
114EXPORT_SYMBOL(ip_fast_csum);
115
116/*
117 * computes the checksum of a memory block at buff, length len,
118 * and adds in "sum" (32-bit)
119 *
120 * returns a 32-bit number suitable for feeding into itself
121 * or csum_tcpudp_magic
122 *
123 * this function must be called with even lengths, except
124 * for the last fragment, which may be odd
125 *
126 * it's best to have buff aligned on a 32-bit boundary
127 */
128__wsum csum_partial(const void *buff, int len, __wsum wsum)
129{
130 unsigned int sum = (__force unsigned int)wsum;
131 unsigned int result = do_csum(buff, len);
132
133 /* add in old sum, and carry.. */
134 result += sum;
135 if (sum > result)
136 result += 1;
137 return (__force __wsum)result;
138}
139EXPORT_SYMBOL(csum_partial);
140
141/*
142 * this routine is used for miscellaneous IP-like checksums, mainly
143 * in icmp.c
144 */
145__sum16 ip_compute_csum(const void *buff, int len)
146{
147 return (__force __sum16)~do_csum(buff, len);
148}
149EXPORT_SYMBOL(ip_compute_csum);
150
151/*
152 * copy from fs while checksumming, otherwise like csum_partial
153 */
154__wsum
155csum_partial_copy_from_user(const void __user *src, void *dst, int len,
156 __wsum sum, int *csum_err)
157{
158 int missing;
159
160 missing = __copy_from_user(dst, src, len);
161 if (missing) {
162 memset(dst + len - missing, 0, missing);
163 *csum_err = -EFAULT;
164 } else
165 *csum_err = 0;
166
167 return csum_partial(dst, len, sum);
168}
169EXPORT_SYMBOL(csum_partial_copy_from_user);
170
171/*
172 * copy from ds while checksumming, otherwise like csum_partial
173 */
174__wsum
175csum_partial_copy(const void *src, void *dst, int len, __wsum sum)
176{
177 memcpy(dst, src, len);
178 return csum_partial(dst, len, sum);
179}
180EXPORT_SYMBOL(csum_partial_copy);
181
182#ifndef csum_tcpudp_nofold
183__wsum csum_tcpudp_nofold(__be32 saddr, __be32 daddr,
184 unsigned short len,
185 unsigned short proto,
186 __wsum sum)
187{
188 unsigned long long s = (__force u32)sum;
189
190 s += (__force u32)saddr;
191 s += (__force u32)daddr;
192#ifdef __BIG_ENDIAN
193 s += proto + len;
194#else
195 s += (proto + len) << 8;
196#endif
197 s += (s >> 32);
198 return (__force __wsum)s;
199}
200EXPORT_SYMBOL(csum_tcpudp_nofold);
201#endif
diff --git a/lib/cpumask.c b/lib/cpumask.c
index 3389e2440da0..7bb4142a502f 100644
--- a/lib/cpumask.c
+++ b/lib/cpumask.c
@@ -92,15 +92,8 @@ int cpumask_any_but(const struct cpumask *mask, unsigned int cpu)
92 */ 92 */
93bool alloc_cpumask_var_node(cpumask_var_t *mask, gfp_t flags, int node) 93bool alloc_cpumask_var_node(cpumask_var_t *mask, gfp_t flags, int node)
94{ 94{
95 if (likely(slab_is_available())) 95 *mask = kmalloc_node(cpumask_size(), flags, node);
96 *mask = kmalloc_node(cpumask_size(), flags, node); 96
97 else {
98#ifdef CONFIG_DEBUG_PER_CPU_MAPS
99 printk(KERN_ERR
100 "=> alloc_cpumask_var: kmalloc not available!\n");
101#endif
102 *mask = NULL;
103 }
104#ifdef CONFIG_DEBUG_PER_CPU_MAPS 97#ifdef CONFIG_DEBUG_PER_CPU_MAPS
105 if (!*mask) { 98 if (!*mask) {
106 printk(KERN_ERR "=> alloc_cpumask_var: failed!\n"); 99 printk(KERN_ERR "=> alloc_cpumask_var: failed!\n");
@@ -109,16 +102,22 @@ bool alloc_cpumask_var_node(cpumask_var_t *mask, gfp_t flags, int node)
109#endif 102#endif
110 /* FIXME: Bandaid to save us from old primitives which go to NR_CPUS. */ 103 /* FIXME: Bandaid to save us from old primitives which go to NR_CPUS. */
111 if (*mask) { 104 if (*mask) {
105 unsigned char *ptr = (unsigned char *)cpumask_bits(*mask);
112 unsigned int tail; 106 unsigned int tail;
113 tail = BITS_TO_LONGS(NR_CPUS - nr_cpumask_bits) * sizeof(long); 107 tail = BITS_TO_LONGS(NR_CPUS - nr_cpumask_bits) * sizeof(long);
114 memset(cpumask_bits(*mask) + cpumask_size() - tail, 108 memset(ptr + cpumask_size() - tail, 0, tail);
115 0, tail);
116 } 109 }
117 110
118 return *mask != NULL; 111 return *mask != NULL;
119} 112}
120EXPORT_SYMBOL(alloc_cpumask_var_node); 113EXPORT_SYMBOL(alloc_cpumask_var_node);
121 114
115bool zalloc_cpumask_var_node(cpumask_var_t *mask, gfp_t flags, int node)
116{
117 return alloc_cpumask_var_node(mask, flags | __GFP_ZERO, node);
118}
119EXPORT_SYMBOL(zalloc_cpumask_var_node);
120
122/** 121/**
123 * alloc_cpumask_var - allocate a struct cpumask 122 * alloc_cpumask_var - allocate a struct cpumask
124 * @mask: pointer to cpumask_var_t where the cpumask is returned 123 * @mask: pointer to cpumask_var_t where the cpumask is returned
@@ -135,6 +134,12 @@ bool alloc_cpumask_var(cpumask_var_t *mask, gfp_t flags)
135} 134}
136EXPORT_SYMBOL(alloc_cpumask_var); 135EXPORT_SYMBOL(alloc_cpumask_var);
137 136
137bool zalloc_cpumask_var(cpumask_var_t *mask, gfp_t flags)
138{
139 return alloc_cpumask_var(mask, flags | __GFP_ZERO);
140}
141EXPORT_SYMBOL(zalloc_cpumask_var);
142
138/** 143/**
139 * alloc_bootmem_cpumask_var - allocate a struct cpumask from the bootmem arena. 144 * alloc_bootmem_cpumask_var - allocate a struct cpumask from the bootmem arena.
140 * @mask: pointer to cpumask_var_t where the cpumask is returned 145 * @mask: pointer to cpumask_var_t where the cpumask is returned
diff --git a/lib/debug_locks.c b/lib/debug_locks.c
index 0218b4693dd8..bc3b11731b9c 100644
--- a/lib/debug_locks.c
+++ b/lib/debug_locks.c
@@ -36,7 +36,7 @@ int debug_locks_silent;
36 */ 36 */
37int debug_locks_off(void) 37int debug_locks_off(void)
38{ 38{
39 if (xchg(&debug_locks, 0)) { 39 if (__debug_locks_off()) {
40 if (!debug_locks_silent) { 40 if (!debug_locks_silent) {
41 oops_in_progress = 1; 41 oops_in_progress = 1;
42 console_verbose(); 42 console_verbose();
diff --git a/lib/debugobjects.c b/lib/debugobjects.c
index 5d99be1fd988..2755a3bd16a1 100644
--- a/lib/debugobjects.c
+++ b/lib/debugobjects.c
@@ -30,7 +30,7 @@ struct debug_bucket {
30 30
31static struct debug_bucket obj_hash[ODEBUG_HASH_SIZE]; 31static struct debug_bucket obj_hash[ODEBUG_HASH_SIZE];
32 32
33static struct debug_obj obj_static_pool[ODEBUG_POOL_SIZE]; 33static struct debug_obj obj_static_pool[ODEBUG_POOL_SIZE] __initdata;
34 34
35static DEFINE_SPINLOCK(pool_lock); 35static DEFINE_SPINLOCK(pool_lock);
36 36
@@ -50,12 +50,23 @@ static int debug_objects_enabled __read_mostly
50 50
51static struct debug_obj_descr *descr_test __read_mostly; 51static struct debug_obj_descr *descr_test __read_mostly;
52 52
53static void free_obj_work(struct work_struct *work);
54static DECLARE_WORK(debug_obj_work, free_obj_work);
55
53static int __init enable_object_debug(char *str) 56static int __init enable_object_debug(char *str)
54{ 57{
55 debug_objects_enabled = 1; 58 debug_objects_enabled = 1;
56 return 0; 59 return 0;
57} 60}
61
62static int __init disable_object_debug(char *str)
63{
64 debug_objects_enabled = 0;
65 return 0;
66}
67
58early_param("debug_objects", enable_object_debug); 68early_param("debug_objects", enable_object_debug);
69early_param("no_debug_objects", disable_object_debug);
59 70
60static const char *obj_states[ODEBUG_STATE_MAX] = { 71static const char *obj_states[ODEBUG_STATE_MAX] = {
61 [ODEBUG_STATE_NONE] = "none", 72 [ODEBUG_STATE_NONE] = "none",
@@ -146,25 +157,51 @@ alloc_object(void *addr, struct debug_bucket *b, struct debug_obj_descr *descr)
146} 157}
147 158
148/* 159/*
149 * Put the object back into the pool or give it back to kmem_cache: 160 * workqueue function to free objects.
150 */ 161 */
151static void free_object(struct debug_obj *obj) 162static void free_obj_work(struct work_struct *work)
152{ 163{
153 unsigned long idx = (unsigned long)(obj - obj_static_pool); 164 struct debug_obj *obj;
154 unsigned long flags; 165 unsigned long flags;
155 166
156 if (obj_pool_free < ODEBUG_POOL_SIZE || idx < ODEBUG_POOL_SIZE) { 167 spin_lock_irqsave(&pool_lock, flags);
157 spin_lock_irqsave(&pool_lock, flags); 168 while (obj_pool_free > ODEBUG_POOL_SIZE) {
158 hlist_add_head(&obj->node, &obj_pool); 169 obj = hlist_entry(obj_pool.first, typeof(*obj), node);
159 obj_pool_free++; 170 hlist_del(&obj->node);
160 obj_pool_used--; 171 obj_pool_free--;
161 spin_unlock_irqrestore(&pool_lock, flags); 172 /*
162 } else { 173 * We release pool_lock across kmem_cache_free() to
163 spin_lock_irqsave(&pool_lock, flags); 174 * avoid contention on pool_lock.
164 obj_pool_used--; 175 */
165 spin_unlock_irqrestore(&pool_lock, flags); 176 spin_unlock_irqrestore(&pool_lock, flags);
166 kmem_cache_free(obj_cache, obj); 177 kmem_cache_free(obj_cache, obj);
178 spin_lock_irqsave(&pool_lock, flags);
167 } 179 }
180 spin_unlock_irqrestore(&pool_lock, flags);
181}
182
183/*
184 * Put the object back into the pool and schedule work to free objects
185 * if necessary.
186 */
187static void free_object(struct debug_obj *obj)
188{
189 unsigned long flags;
190 int sched = 0;
191
192 spin_lock_irqsave(&pool_lock, flags);
193 /*
194 * schedule work when the pool is filled and the cache is
195 * initialized:
196 */
197 if (obj_pool_free > ODEBUG_POOL_SIZE && obj_cache)
198 sched = !work_pending(&debug_obj_work);
199 hlist_add_head(&obj->node, &obj_pool);
200 obj_pool_free++;
201 obj_pool_used--;
202 spin_unlock_irqrestore(&pool_lock, flags);
203 if (sched)
204 schedule_work(&debug_obj_work);
168} 205}
169 206
170/* 207/*
@@ -876,6 +913,63 @@ void __init debug_objects_early_init(void)
876} 913}
877 914
878/* 915/*
916 * Convert the statically allocated objects to dynamic ones:
917 */
918static int debug_objects_replace_static_objects(void)
919{
920 struct debug_bucket *db = obj_hash;
921 struct hlist_node *node, *tmp;
922 struct debug_obj *obj, *new;
923 HLIST_HEAD(objects);
924 int i, cnt = 0;
925
926 for (i = 0; i < ODEBUG_POOL_SIZE; i++) {
927 obj = kmem_cache_zalloc(obj_cache, GFP_KERNEL);
928 if (!obj)
929 goto free;
930 hlist_add_head(&obj->node, &objects);
931 }
932
933 /*
934 * When debug_objects_mem_init() is called we know that only
935 * one CPU is up, so disabling interrupts is enough
936 * protection. This avoids the lockdep hell of lock ordering.
937 */
938 local_irq_disable();
939
940 /* Remove the statically allocated objects from the pool */
941 hlist_for_each_entry_safe(obj, node, tmp, &obj_pool, node)
942 hlist_del(&obj->node);
943 /* Move the allocated objects to the pool */
944 hlist_move_list(&objects, &obj_pool);
945
946 /* Replace the active object references */
947 for (i = 0; i < ODEBUG_HASH_SIZE; i++, db++) {
948 hlist_move_list(&db->list, &objects);
949
950 hlist_for_each_entry(obj, node, &objects, node) {
951 new = hlist_entry(obj_pool.first, typeof(*obj), node);
952 hlist_del(&new->node);
953 /* copy object data */
954 *new = *obj;
955 hlist_add_head(&new->node, &db->list);
956 cnt++;
957 }
958 }
959
960 printk(KERN_DEBUG "ODEBUG: %d of %d active objects replaced\n", cnt,
961 obj_pool_used);
962 local_irq_enable();
963 return 0;
964free:
965 hlist_for_each_entry_safe(obj, node, tmp, &objects, node) {
966 hlist_del(&obj->node);
967 kmem_cache_free(obj_cache, obj);
968 }
969 return -ENOMEM;
970}
971
972/*
879 * Called after the kmem_caches are functional to setup a dedicated 973 * Called after the kmem_caches are functional to setup a dedicated
880 * cache pool, which has the SLAB_DEBUG_OBJECTS flag set. This flag 974 * cache pool, which has the SLAB_DEBUG_OBJECTS flag set. This flag
881 * prevents that the debug code is called on kmem_cache_free() for the 975 * prevents that the debug code is called on kmem_cache_free() for the
@@ -890,8 +984,11 @@ void __init debug_objects_mem_init(void)
890 sizeof (struct debug_obj), 0, 984 sizeof (struct debug_obj), 0,
891 SLAB_DEBUG_OBJECTS, NULL); 985 SLAB_DEBUG_OBJECTS, NULL);
892 986
893 if (!obj_cache) 987 if (!obj_cache || debug_objects_replace_static_objects()) {
894 debug_objects_enabled = 0; 988 debug_objects_enabled = 0;
895 else 989 if (obj_cache)
990 kmem_cache_destroy(obj_cache);
991 printk(KERN_WARNING "ODEBUG: out of memory.\n");
992 } else
896 debug_objects_selftest(); 993 debug_objects_selftest();
897} 994}
diff --git a/lib/dec_and_lock.c b/lib/dec_and_lock.c
index a65c31455541..e73822aa6e9a 100644
--- a/lib/dec_and_lock.c
+++ b/lib/dec_and_lock.c
@@ -19,11 +19,10 @@
19 */ 19 */
20int _atomic_dec_and_lock(atomic_t *atomic, spinlock_t *lock) 20int _atomic_dec_and_lock(atomic_t *atomic, spinlock_t *lock)
21{ 21{
22#ifdef CONFIG_SMP
23 /* Subtract 1 from counter unless that drops it to 0 (ie. it was 1) */ 22 /* Subtract 1 from counter unless that drops it to 0 (ie. it was 1) */
24 if (atomic_add_unless(atomic, -1, 1)) 23 if (atomic_add_unless(atomic, -1, 1))
25 return 0; 24 return 0;
26#endif 25
27 /* Otherwise do it the slow way */ 26 /* Otherwise do it the slow way */
28 spin_lock(lock); 27 spin_lock(lock);
29 if (atomic_dec_and_test(atomic)) 28 if (atomic_dec_and_test(atomic))
diff --git a/lib/decompress.c b/lib/decompress.c
new file mode 100644
index 000000000000..d2842f571674
--- /dev/null
+++ b/lib/decompress.c
@@ -0,0 +1,54 @@
1/*
2 * decompress.c
3 *
4 * Detect the decompression method based on magic number
5 */
6
7#include <linux/decompress/generic.h>
8
9#include <linux/decompress/bunzip2.h>
10#include <linux/decompress/unlzma.h>
11#include <linux/decompress/inflate.h>
12
13#include <linux/types.h>
14#include <linux/string.h>
15
16#ifndef CONFIG_DECOMPRESS_GZIP
17# define gunzip NULL
18#endif
19#ifndef CONFIG_DECOMPRESS_BZIP2
20# define bunzip2 NULL
21#endif
22#ifndef CONFIG_DECOMPRESS_LZMA
23# define unlzma NULL
24#endif
25
26static const struct compress_format {
27 unsigned char magic[2];
28 const char *name;
29 decompress_fn decompressor;
30} compressed_formats[] = {
31 { {037, 0213}, "gzip", gunzip },
32 { {037, 0236}, "gzip", gunzip },
33 { {0x42, 0x5a}, "bzip2", bunzip2 },
34 { {0x5d, 0x00}, "lzma", unlzma },
35 { {0, 0}, NULL, NULL }
36};
37
38decompress_fn decompress_method(const unsigned char *inbuf, int len,
39 const char **name)
40{
41 const struct compress_format *cf;
42
43 if (len < 2)
44 return NULL; /* Need at least this much... */
45
46 for (cf = compressed_formats; cf->name; cf++) {
47 if (!memcmp(inbuf, cf->magic, 2))
48 break;
49
50 }
51 if (name)
52 *name = cf->name;
53 return cf->decompressor;
54}
diff --git a/lib/decompress_bunzip2.c b/lib/decompress_bunzip2.c
new file mode 100644
index 000000000000..600f473a5610
--- /dev/null
+++ b/lib/decompress_bunzip2.c
@@ -0,0 +1,746 @@
1/* vi: set sw = 4 ts = 4: */
2/* Small bzip2 deflate implementation, by Rob Landley (rob@landley.net).
3
4 Based on bzip2 decompression code by Julian R Seward (jseward@acm.org),
5 which also acknowledges contributions by Mike Burrows, David Wheeler,
6 Peter Fenwick, Alistair Moffat, Radford Neal, Ian H. Witten,
7 Robert Sedgewick, and Jon L. Bentley.
8
9 This code is licensed under the LGPLv2:
10 LGPL (http://www.gnu.org/copyleft/lgpl.html
11*/
12
13/*
14 Size and speed optimizations by Manuel Novoa III (mjn3@codepoet.org).
15
16 More efficient reading of Huffman codes, a streamlined read_bunzip()
17 function, and various other tweaks. In (limited) tests, approximately
18 20% faster than bzcat on x86 and about 10% faster on arm.
19
20 Note that about 2/3 of the time is spent in read_unzip() reversing
21 the Burrows-Wheeler transformation. Much of that time is delay
22 resulting from cache misses.
23
24 I would ask that anyone benefiting from this work, especially those
25 using it in commercial products, consider making a donation to my local
26 non-profit hospice organization in the name of the woman I loved, who
27 passed away Feb. 12, 2003.
28
29 In memory of Toni W. Hagan
30
31 Hospice of Acadiana, Inc.
32 2600 Johnston St., Suite 200
33 Lafayette, LA 70503-3240
34
35 Phone (337) 232-1234 or 1-800-738-2226
36 Fax (337) 232-1297
37
38 http://www.hospiceacadiana.com/
39
40 Manuel
41 */
42
43/*
44 Made it fit for running in Linux Kernel by Alain Knaff (alain@knaff.lu)
45*/
46
47
48#ifdef STATIC
49#define PREBOOT
50#else
51#include <linux/decompress/bunzip2.h>
52#include <linux/slab.h>
53#endif /* STATIC */
54
55#include <linux/decompress/mm.h>
56
57#ifndef INT_MAX
58#define INT_MAX 0x7fffffff
59#endif
60
61/* Constants for Huffman coding */
62#define MAX_GROUPS 6
63#define GROUP_SIZE 50 /* 64 would have been more efficient */
64#define MAX_HUFCODE_BITS 20 /* Longest Huffman code allowed */
65#define MAX_SYMBOLS 258 /* 256 literals + RUNA + RUNB */
66#define SYMBOL_RUNA 0
67#define SYMBOL_RUNB 1
68
69/* Status return values */
70#define RETVAL_OK 0
71#define RETVAL_LAST_BLOCK (-1)
72#define RETVAL_NOT_BZIP_DATA (-2)
73#define RETVAL_UNEXPECTED_INPUT_EOF (-3)
74#define RETVAL_UNEXPECTED_OUTPUT_EOF (-4)
75#define RETVAL_DATA_ERROR (-5)
76#define RETVAL_OUT_OF_MEMORY (-6)
77#define RETVAL_OBSOLETE_INPUT (-7)
78
79/* Other housekeeping constants */
80#define BZIP2_IOBUF_SIZE 4096
81
82/* This is what we know about each Huffman coding group */
83struct group_data {
84 /* We have an extra slot at the end of limit[] for a sentinal value. */
85 int limit[MAX_HUFCODE_BITS+1];
86 int base[MAX_HUFCODE_BITS];
87 int permute[MAX_SYMBOLS];
88 int minLen, maxLen;
89};
90
91/* Structure holding all the housekeeping data, including IO buffers and
92 memory that persists between calls to bunzip */
93struct bunzip_data {
94 /* State for interrupting output loop */
95 int writeCopies, writePos, writeRunCountdown, writeCount, writeCurrent;
96 /* I/O tracking data (file handles, buffers, positions, etc.) */
97 int (*fill)(void*, unsigned int);
98 int inbufCount, inbufPos /*, outbufPos*/;
99 unsigned char *inbuf /*,*outbuf*/;
100 unsigned int inbufBitCount, inbufBits;
101 /* The CRC values stored in the block header and calculated from the
102 data */
103 unsigned int crc32Table[256], headerCRC, totalCRC, writeCRC;
104 /* Intermediate buffer and its size (in bytes) */
105 unsigned int *dbuf, dbufSize;
106 /* These things are a bit too big to go on the stack */
107 unsigned char selectors[32768]; /* nSelectors = 15 bits */
108 struct group_data groups[MAX_GROUPS]; /* Huffman coding tables */
109 int io_error; /* non-zero if we have IO error */
110};
111
112
113/* Return the next nnn bits of input. All reads from the compressed input
114 are done through this function. All reads are big endian */
115static unsigned int INIT get_bits(struct bunzip_data *bd, char bits_wanted)
116{
117 unsigned int bits = 0;
118
119 /* If we need to get more data from the byte buffer, do so.
120 (Loop getting one byte at a time to enforce endianness and avoid
121 unaligned access.) */
122 while (bd->inbufBitCount < bits_wanted) {
123 /* If we need to read more data from file into byte buffer, do
124 so */
125 if (bd->inbufPos == bd->inbufCount) {
126 if (bd->io_error)
127 return 0;
128 bd->inbufCount = bd->fill(bd->inbuf, BZIP2_IOBUF_SIZE);
129 if (bd->inbufCount <= 0) {
130 bd->io_error = RETVAL_UNEXPECTED_INPUT_EOF;
131 return 0;
132 }
133 bd->inbufPos = 0;
134 }
135 /* Avoid 32-bit overflow (dump bit buffer to top of output) */
136 if (bd->inbufBitCount >= 24) {
137 bits = bd->inbufBits&((1 << bd->inbufBitCount)-1);
138 bits_wanted -= bd->inbufBitCount;
139 bits <<= bits_wanted;
140 bd->inbufBitCount = 0;
141 }
142 /* Grab next 8 bits of input from buffer. */
143 bd->inbufBits = (bd->inbufBits << 8)|bd->inbuf[bd->inbufPos++];
144 bd->inbufBitCount += 8;
145 }
146 /* Calculate result */
147 bd->inbufBitCount -= bits_wanted;
148 bits |= (bd->inbufBits >> bd->inbufBitCount)&((1 << bits_wanted)-1);
149
150 return bits;
151}
152
153/* Unpacks the next block and sets up for the inverse burrows-wheeler step. */
154
155static int INIT get_next_block(struct bunzip_data *bd)
156{
157 struct group_data *hufGroup = NULL;
158 int *base = NULL;
159 int *limit = NULL;
160 int dbufCount, nextSym, dbufSize, groupCount, selector,
161 i, j, k, t, runPos, symCount, symTotal, nSelectors,
162 byteCount[256];
163 unsigned char uc, symToByte[256], mtfSymbol[256], *selectors;
164 unsigned int *dbuf, origPtr;
165
166 dbuf = bd->dbuf;
167 dbufSize = bd->dbufSize;
168 selectors = bd->selectors;
169
170 /* Read in header signature and CRC, then validate signature.
171 (last block signature means CRC is for whole file, return now) */
172 i = get_bits(bd, 24);
173 j = get_bits(bd, 24);
174 bd->headerCRC = get_bits(bd, 32);
175 if ((i == 0x177245) && (j == 0x385090))
176 return RETVAL_LAST_BLOCK;
177 if ((i != 0x314159) || (j != 0x265359))
178 return RETVAL_NOT_BZIP_DATA;
179 /* We can add support for blockRandomised if anybody complains.
180 There was some code for this in busybox 1.0.0-pre3, but nobody ever
181 noticed that it didn't actually work. */
182 if (get_bits(bd, 1))
183 return RETVAL_OBSOLETE_INPUT;
184 origPtr = get_bits(bd, 24);
185 if (origPtr > dbufSize)
186 return RETVAL_DATA_ERROR;
187 /* mapping table: if some byte values are never used (encoding things
188 like ascii text), the compression code removes the gaps to have fewer
189 symbols to deal with, and writes a sparse bitfield indicating which
190 values were present. We make a translation table to convert the
191 symbols back to the corresponding bytes. */
192 t = get_bits(bd, 16);
193 symTotal = 0;
194 for (i = 0; i < 16; i++) {
195 if (t&(1 << (15-i))) {
196 k = get_bits(bd, 16);
197 for (j = 0; j < 16; j++)
198 if (k&(1 << (15-j)))
199 symToByte[symTotal++] = (16*i)+j;
200 }
201 }
202 /* How many different Huffman coding groups does this block use? */
203 groupCount = get_bits(bd, 3);
204 if (groupCount < 2 || groupCount > MAX_GROUPS)
205 return RETVAL_DATA_ERROR;
206 /* nSelectors: Every GROUP_SIZE many symbols we select a new
207 Huffman coding group. Read in the group selector list,
208 which is stored as MTF encoded bit runs. (MTF = Move To
209 Front, as each value is used it's moved to the start of the
210 list.) */
211 nSelectors = get_bits(bd, 15);
212 if (!nSelectors)
213 return RETVAL_DATA_ERROR;
214 for (i = 0; i < groupCount; i++)
215 mtfSymbol[i] = i;
216 for (i = 0; i < nSelectors; i++) {
217 /* Get next value */
218 for (j = 0; get_bits(bd, 1); j++)
219 if (j >= groupCount)
220 return RETVAL_DATA_ERROR;
221 /* Decode MTF to get the next selector */
222 uc = mtfSymbol[j];
223 for (; j; j--)
224 mtfSymbol[j] = mtfSymbol[j-1];
225 mtfSymbol[0] = selectors[i] = uc;
226 }
227 /* Read the Huffman coding tables for each group, which code
228 for symTotal literal symbols, plus two run symbols (RUNA,
229 RUNB) */
230 symCount = symTotal+2;
231 for (j = 0; j < groupCount; j++) {
232 unsigned char length[MAX_SYMBOLS], temp[MAX_HUFCODE_BITS+1];
233 int minLen, maxLen, pp;
234 /* Read Huffman code lengths for each symbol. They're
235 stored in a way similar to mtf; record a starting
236 value for the first symbol, and an offset from the
237 previous value for everys symbol after that.
238 (Subtracting 1 before the loop and then adding it
239 back at the end is an optimization that makes the
240 test inside the loop simpler: symbol length 0
241 becomes negative, so an unsigned inequality catches
242 it.) */
243 t = get_bits(bd, 5)-1;
244 for (i = 0; i < symCount; i++) {
245 for (;;) {
246 if (((unsigned)t) > (MAX_HUFCODE_BITS-1))
247 return RETVAL_DATA_ERROR;
248
249 /* If first bit is 0, stop. Else
250 second bit indicates whether to
251 increment or decrement the value.
252 Optimization: grab 2 bits and unget
253 the second if the first was 0. */
254
255 k = get_bits(bd, 2);
256 if (k < 2) {
257 bd->inbufBitCount++;
258 break;
259 }
260 /* Add one if second bit 1, else
261 * subtract 1. Avoids if/else */
262 t += (((k+1)&2)-1);
263 }
264 /* Correct for the initial -1, to get the
265 * final symbol length */
266 length[i] = t+1;
267 }
268 /* Find largest and smallest lengths in this group */
269 minLen = maxLen = length[0];
270
271 for (i = 1; i < symCount; i++) {
272 if (length[i] > maxLen)
273 maxLen = length[i];
274 else if (length[i] < minLen)
275 minLen = length[i];
276 }
277
278 /* Calculate permute[], base[], and limit[] tables from
279 * length[].
280 *
281 * permute[] is the lookup table for converting
282 * Huffman coded symbols into decoded symbols. base[]
283 * is the amount to subtract from the value of a
284 * Huffman symbol of a given length when using
285 * permute[].
286 *
287 * limit[] indicates the largest numerical value a
288 * symbol with a given number of bits can have. This
289 * is how the Huffman codes can vary in length: each
290 * code with a value > limit[length] needs another
291 * bit.
292 */
293 hufGroup = bd->groups+j;
294 hufGroup->minLen = minLen;
295 hufGroup->maxLen = maxLen;
296 /* Note that minLen can't be smaller than 1, so we
297 adjust the base and limit array pointers so we're
298 not always wasting the first entry. We do this
299 again when using them (during symbol decoding).*/
300 base = hufGroup->base-1;
301 limit = hufGroup->limit-1;
302 /* Calculate permute[]. Concurently, initialize
303 * temp[] and limit[]. */
304 pp = 0;
305 for (i = minLen; i <= maxLen; i++) {
306 temp[i] = limit[i] = 0;
307 for (t = 0; t < symCount; t++)
308 if (length[t] == i)
309 hufGroup->permute[pp++] = t;
310 }
311 /* Count symbols coded for at each bit length */
312 for (i = 0; i < symCount; i++)
313 temp[length[i]]++;
314 /* Calculate limit[] (the largest symbol-coding value
315 *at each bit length, which is (previous limit <<
316 *1)+symbols at this level), and base[] (number of
317 *symbols to ignore at each bit length, which is limit
318 *minus the cumulative count of symbols coded for
319 *already). */
320 pp = t = 0;
321 for (i = minLen; i < maxLen; i++) {
322 pp += temp[i];
323 /* We read the largest possible symbol size
324 and then unget bits after determining how
325 many we need, and those extra bits could be
326 set to anything. (They're noise from
327 future symbols.) At each level we're
328 really only interested in the first few
329 bits, so here we set all the trailing
330 to-be-ignored bits to 1 so they don't
331 affect the value > limit[length]
332 comparison. */
333 limit[i] = (pp << (maxLen - i)) - 1;
334 pp <<= 1;
335 base[i+1] = pp-(t += temp[i]);
336 }
337 limit[maxLen+1] = INT_MAX; /* Sentinal value for
338 * reading next sym. */
339 limit[maxLen] = pp+temp[maxLen]-1;
340 base[minLen] = 0;
341 }
342 /* We've finished reading and digesting the block header. Now
343 read this block's Huffman coded symbols from the file and
344 undo the Huffman coding and run length encoding, saving the
345 result into dbuf[dbufCount++] = uc */
346
347 /* Initialize symbol occurrence counters and symbol Move To
348 * Front table */
349 for (i = 0; i < 256; i++) {
350 byteCount[i] = 0;
351 mtfSymbol[i] = (unsigned char)i;
352 }
353 /* Loop through compressed symbols. */
354 runPos = dbufCount = symCount = selector = 0;
355 for (;;) {
356 /* Determine which Huffman coding group to use. */
357 if (!(symCount--)) {
358 symCount = GROUP_SIZE-1;
359 if (selector >= nSelectors)
360 return RETVAL_DATA_ERROR;
361 hufGroup = bd->groups+selectors[selector++];
362 base = hufGroup->base-1;
363 limit = hufGroup->limit-1;
364 }
365 /* Read next Huffman-coded symbol. */
366 /* Note: It is far cheaper to read maxLen bits and
367 back up than it is to read minLen bits and then an
368 additional bit at a time, testing as we go.
369 Because there is a trailing last block (with file
370 CRC), there is no danger of the overread causing an
371 unexpected EOF for a valid compressed file. As a
372 further optimization, we do the read inline
373 (falling back to a call to get_bits if the buffer
374 runs dry). The following (up to got_huff_bits:) is
375 equivalent to j = get_bits(bd, hufGroup->maxLen);
376 */
377 while (bd->inbufBitCount < hufGroup->maxLen) {
378 if (bd->inbufPos == bd->inbufCount) {
379 j = get_bits(bd, hufGroup->maxLen);
380 goto got_huff_bits;
381 }
382 bd->inbufBits =
383 (bd->inbufBits << 8)|bd->inbuf[bd->inbufPos++];
384 bd->inbufBitCount += 8;
385 };
386 bd->inbufBitCount -= hufGroup->maxLen;
387 j = (bd->inbufBits >> bd->inbufBitCount)&
388 ((1 << hufGroup->maxLen)-1);
389got_huff_bits:
390 /* Figure how how many bits are in next symbol and
391 * unget extras */
392 i = hufGroup->minLen;
393 while (j > limit[i])
394 ++i;
395 bd->inbufBitCount += (hufGroup->maxLen - i);
396 /* Huffman decode value to get nextSym (with bounds checking) */
397 if ((i > hufGroup->maxLen)
398 || (((unsigned)(j = (j>>(hufGroup->maxLen-i))-base[i]))
399 >= MAX_SYMBOLS))
400 return RETVAL_DATA_ERROR;
401 nextSym = hufGroup->permute[j];
402 /* We have now decoded the symbol, which indicates
403 either a new literal byte, or a repeated run of the
404 most recent literal byte. First, check if nextSym
405 indicates a repeated run, and if so loop collecting
406 how many times to repeat the last literal. */
407 if (((unsigned)nextSym) <= SYMBOL_RUNB) { /* RUNA or RUNB */
408 /* If this is the start of a new run, zero out
409 * counter */
410 if (!runPos) {
411 runPos = 1;
412 t = 0;
413 }
414 /* Neat trick that saves 1 symbol: instead of
415 or-ing 0 or 1 at each bit position, add 1
416 or 2 instead. For example, 1011 is 1 << 0
417 + 1 << 1 + 2 << 2. 1010 is 2 << 0 + 2 << 1
418 + 1 << 2. You can make any bit pattern
419 that way using 1 less symbol than the basic
420 or 0/1 method (except all bits 0, which
421 would use no symbols, but a run of length 0
422 doesn't mean anything in this context).
423 Thus space is saved. */
424 t += (runPos << nextSym);
425 /* +runPos if RUNA; +2*runPos if RUNB */
426
427 runPos <<= 1;
428 continue;
429 }
430 /* When we hit the first non-run symbol after a run,
431 we now know how many times to repeat the last
432 literal, so append that many copies to our buffer
433 of decoded symbols (dbuf) now. (The last literal
434 used is the one at the head of the mtfSymbol
435 array.) */
436 if (runPos) {
437 runPos = 0;
438 if (dbufCount+t >= dbufSize)
439 return RETVAL_DATA_ERROR;
440
441 uc = symToByte[mtfSymbol[0]];
442 byteCount[uc] += t;
443 while (t--)
444 dbuf[dbufCount++] = uc;
445 }
446 /* Is this the terminating symbol? */
447 if (nextSym > symTotal)
448 break;
449 /* At this point, nextSym indicates a new literal
450 character. Subtract one to get the position in the
451 MTF array at which this literal is currently to be
452 found. (Note that the result can't be -1 or 0,
453 because 0 and 1 are RUNA and RUNB. But another
454 instance of the first symbol in the mtf array,
455 position 0, would have been handled as part of a
456 run above. Therefore 1 unused mtf position minus 2
457 non-literal nextSym values equals -1.) */
458 if (dbufCount >= dbufSize)
459 return RETVAL_DATA_ERROR;
460 i = nextSym - 1;
461 uc = mtfSymbol[i];
462 /* Adjust the MTF array. Since we typically expect to
463 *move only a small number of symbols, and are bound
464 *by 256 in any case, using memmove here would
465 *typically be bigger and slower due to function call
466 *overhead and other assorted setup costs. */
467 do {
468 mtfSymbol[i] = mtfSymbol[i-1];
469 } while (--i);
470 mtfSymbol[0] = uc;
471 uc = symToByte[uc];
472 /* We have our literal byte. Save it into dbuf. */
473 byteCount[uc]++;
474 dbuf[dbufCount++] = (unsigned int)uc;
475 }
476 /* At this point, we've read all the Huffman-coded symbols
477 (and repeated runs) for this block from the input stream,
478 and decoded them into the intermediate buffer. There are
479 dbufCount many decoded bytes in dbuf[]. Now undo the
480 Burrows-Wheeler transform on dbuf. See
481 http://dogma.net/markn/articles/bwt/bwt.htm
482 */
483 /* Turn byteCount into cumulative occurrence counts of 0 to n-1. */
484 j = 0;
485 for (i = 0; i < 256; i++) {
486 k = j+byteCount[i];
487 byteCount[i] = j;
488 j = k;
489 }
490 /* Figure out what order dbuf would be in if we sorted it. */
491 for (i = 0; i < dbufCount; i++) {
492 uc = (unsigned char)(dbuf[i] & 0xff);
493 dbuf[byteCount[uc]] |= (i << 8);
494 byteCount[uc]++;
495 }
496 /* Decode first byte by hand to initialize "previous" byte.
497 Note that it doesn't get output, and if the first three
498 characters are identical it doesn't qualify as a run (hence
499 writeRunCountdown = 5). */
500 if (dbufCount) {
501 if (origPtr >= dbufCount)
502 return RETVAL_DATA_ERROR;
503 bd->writePos = dbuf[origPtr];
504 bd->writeCurrent = (unsigned char)(bd->writePos&0xff);
505 bd->writePos >>= 8;
506 bd->writeRunCountdown = 5;
507 }
508 bd->writeCount = dbufCount;
509
510 return RETVAL_OK;
511}
512
513/* Undo burrows-wheeler transform on intermediate buffer to produce output.
514 If start_bunzip was initialized with out_fd =-1, then up to len bytes of
515 data are written to outbuf. Return value is number of bytes written or
516 error (all errors are negative numbers). If out_fd!=-1, outbuf and len
517 are ignored, data is written to out_fd and return is RETVAL_OK or error.
518*/
519
520static int INIT read_bunzip(struct bunzip_data *bd, char *outbuf, int len)
521{
522 const unsigned int *dbuf;
523 int pos, xcurrent, previous, gotcount;
524
525 /* If last read was short due to end of file, return last block now */
526 if (bd->writeCount < 0)
527 return bd->writeCount;
528
529 gotcount = 0;
530 dbuf = bd->dbuf;
531 pos = bd->writePos;
532 xcurrent = bd->writeCurrent;
533
534 /* We will always have pending decoded data to write into the output
535 buffer unless this is the very first call (in which case we haven't
536 Huffman-decoded a block into the intermediate buffer yet). */
537
538 if (bd->writeCopies) {
539 /* Inside the loop, writeCopies means extra copies (beyond 1) */
540 --bd->writeCopies;
541 /* Loop outputting bytes */
542 for (;;) {
543 /* If the output buffer is full, snapshot
544 * state and return */
545 if (gotcount >= len) {
546 bd->writePos = pos;
547 bd->writeCurrent = xcurrent;
548 bd->writeCopies++;
549 return len;
550 }
551 /* Write next byte into output buffer, updating CRC */
552 outbuf[gotcount++] = xcurrent;
553 bd->writeCRC = (((bd->writeCRC) << 8)
554 ^bd->crc32Table[((bd->writeCRC) >> 24)
555 ^xcurrent]);
556 /* Loop now if we're outputting multiple
557 * copies of this byte */
558 if (bd->writeCopies) {
559 --bd->writeCopies;
560 continue;
561 }
562decode_next_byte:
563 if (!bd->writeCount--)
564 break;
565 /* Follow sequence vector to undo
566 * Burrows-Wheeler transform */
567 previous = xcurrent;
568 pos = dbuf[pos];
569 xcurrent = pos&0xff;
570 pos >>= 8;
571 /* After 3 consecutive copies of the same
572 byte, the 4th is a repeat count. We count
573 down from 4 instead *of counting up because
574 testing for non-zero is faster */
575 if (--bd->writeRunCountdown) {
576 if (xcurrent != previous)
577 bd->writeRunCountdown = 4;
578 } else {
579 /* We have a repeated run, this byte
580 * indicates the count */
581 bd->writeCopies = xcurrent;
582 xcurrent = previous;
583 bd->writeRunCountdown = 5;
584 /* Sometimes there are just 3 bytes
585 * (run length 0) */
586 if (!bd->writeCopies)
587 goto decode_next_byte;
588 /* Subtract the 1 copy we'd output
589 * anyway to get extras */
590 --bd->writeCopies;
591 }
592 }
593 /* Decompression of this block completed successfully */
594 bd->writeCRC = ~bd->writeCRC;
595 bd->totalCRC = ((bd->totalCRC << 1) |
596 (bd->totalCRC >> 31)) ^ bd->writeCRC;
597 /* If this block had a CRC error, force file level CRC error. */
598 if (bd->writeCRC != bd->headerCRC) {
599 bd->totalCRC = bd->headerCRC+1;
600 return RETVAL_LAST_BLOCK;
601 }
602 }
603
604 /* Refill the intermediate buffer by Huffman-decoding next
605 * block of input */
606 /* (previous is just a convenient unused temp variable here) */
607 previous = get_next_block(bd);
608 if (previous) {
609 bd->writeCount = previous;
610 return (previous != RETVAL_LAST_BLOCK) ? previous : gotcount;
611 }
612 bd->writeCRC = 0xffffffffUL;
613 pos = bd->writePos;
614 xcurrent = bd->writeCurrent;
615 goto decode_next_byte;
616}
617
618static int INIT nofill(void *buf, unsigned int len)
619{
620 return -1;
621}
622
623/* Allocate the structure, read file header. If in_fd ==-1, inbuf must contain
624 a complete bunzip file (len bytes long). If in_fd!=-1, inbuf and len are
625 ignored, and data is read from file handle into temporary buffer. */
626static int INIT start_bunzip(struct bunzip_data **bdp, void *inbuf, int len,
627 int (*fill)(void*, unsigned int))
628{
629 struct bunzip_data *bd;
630 unsigned int i, j, c;
631 const unsigned int BZh0 =
632 (((unsigned int)'B') << 24)+(((unsigned int)'Z') << 16)
633 +(((unsigned int)'h') << 8)+(unsigned int)'0';
634
635 /* Figure out how much data to allocate */
636 i = sizeof(struct bunzip_data);
637
638 /* Allocate bunzip_data. Most fields initialize to zero. */
639 bd = *bdp = malloc(i);
640 memset(bd, 0, sizeof(struct bunzip_data));
641 /* Setup input buffer */
642 bd->inbuf = inbuf;
643 bd->inbufCount = len;
644 if (fill != NULL)
645 bd->fill = fill;
646 else
647 bd->fill = nofill;
648
649 /* Init the CRC32 table (big endian) */
650 for (i = 0; i < 256; i++) {
651 c = i << 24;
652 for (j = 8; j; j--)
653 c = c&0x80000000 ? (c << 1)^0x04c11db7 : (c << 1);
654 bd->crc32Table[i] = c;
655 }
656
657 /* Ensure that file starts with "BZh['1'-'9']." */
658 i = get_bits(bd, 32);
659 if (((unsigned int)(i-BZh0-1)) >= 9)
660 return RETVAL_NOT_BZIP_DATA;
661
662 /* Fourth byte (ascii '1'-'9'), indicates block size in units of 100k of
663 uncompressed data. Allocate intermediate buffer for block. */
664 bd->dbufSize = 100000*(i-BZh0);
665
666 bd->dbuf = large_malloc(bd->dbufSize * sizeof(int));
667 return RETVAL_OK;
668}
669
670/* Example usage: decompress src_fd to dst_fd. (Stops at end of bzip2 data,
671 not end of file.) */
672STATIC int INIT bunzip2(unsigned char *buf, int len,
673 int(*fill)(void*, unsigned int),
674 int(*flush)(void*, unsigned int),
675 unsigned char *outbuf,
676 int *pos,
677 void(*error_fn)(char *x))
678{
679 struct bunzip_data *bd;
680 int i = -1;
681 unsigned char *inbuf;
682
683 set_error_fn(error_fn);
684 if (flush)
685 outbuf = malloc(BZIP2_IOBUF_SIZE);
686
687 if (!outbuf) {
688 error("Could not allocate output bufer");
689 return -1;
690 }
691 if (buf)
692 inbuf = buf;
693 else
694 inbuf = malloc(BZIP2_IOBUF_SIZE);
695 if (!inbuf) {
696 error("Could not allocate input bufer");
697 goto exit_0;
698 }
699 i = start_bunzip(&bd, inbuf, len, fill);
700 if (!i) {
701 for (;;) {
702 i = read_bunzip(bd, outbuf, BZIP2_IOBUF_SIZE);
703 if (i <= 0)
704 break;
705 if (!flush)
706 outbuf += i;
707 else
708 if (i != flush(outbuf, i)) {
709 i = RETVAL_UNEXPECTED_OUTPUT_EOF;
710 break;
711 }
712 }
713 }
714 /* Check CRC and release memory */
715 if (i == RETVAL_LAST_BLOCK) {
716 if (bd->headerCRC != bd->totalCRC)
717 error("Data integrity error when decompressing.");
718 else
719 i = RETVAL_OK;
720 } else if (i == RETVAL_UNEXPECTED_OUTPUT_EOF) {
721 error("Compressed file ends unexpectedly");
722 }
723 if (bd->dbuf)
724 large_free(bd->dbuf);
725 if (pos)
726 *pos = bd->inbufPos;
727 free(bd);
728 if (!buf)
729 free(inbuf);
730exit_0:
731 if (flush)
732 free(outbuf);
733 return i;
734}
735
736#ifdef PREBOOT
737STATIC int INIT decompress(unsigned char *buf, int len,
738 int(*fill)(void*, unsigned int),
739 int(*flush)(void*, unsigned int),
740 unsigned char *outbuf,
741 int *pos,
742 void(*error_fn)(char *x))
743{
744 return bunzip2(buf, len - 4, fill, flush, outbuf, pos, error_fn);
745}
746#endif
diff --git a/lib/decompress_inflate.c b/lib/decompress_inflate.c
new file mode 100644
index 000000000000..68dfce59c1b8
--- /dev/null
+++ b/lib/decompress_inflate.c
@@ -0,0 +1,168 @@
1#ifdef STATIC
2/* Pre-boot environment: included */
3
4/* prevent inclusion of _LINUX_KERNEL_H in pre-boot environment: lots
5 * errors about console_printk etc... on ARM */
6#define _LINUX_KERNEL_H
7
8#include "zlib_inflate/inftrees.c"
9#include "zlib_inflate/inffast.c"
10#include "zlib_inflate/inflate.c"
11
12#else /* STATIC */
13/* initramfs et al: linked */
14
15#include <linux/zutil.h>
16
17#include "zlib_inflate/inftrees.h"
18#include "zlib_inflate/inffast.h"
19#include "zlib_inflate/inflate.h"
20
21#include "zlib_inflate/infutil.h"
22#include <linux/slab.h>
23
24#endif /* STATIC */
25
26#include <linux/decompress/mm.h>
27
28#define GZIP_IOBUF_SIZE (16*1024)
29
30/* Included from initramfs et al code */
31STATIC int INIT gunzip(unsigned char *buf, int len,
32 int(*fill)(void*, unsigned int),
33 int(*flush)(void*, unsigned int),
34 unsigned char *out_buf,
35 int *pos,
36 void(*error_fn)(char *x)) {
37 u8 *zbuf;
38 struct z_stream_s *strm;
39 int rc;
40 size_t out_len;
41
42 set_error_fn(error_fn);
43 rc = -1;
44 if (flush) {
45 out_len = 0x8000; /* 32 K */
46 out_buf = malloc(out_len);
47 } else {
48 out_len = 0x7fffffff; /* no limit */
49 }
50 if (!out_buf) {
51 error("Out of memory while allocating output buffer");
52 goto gunzip_nomem1;
53 }
54
55 if (buf)
56 zbuf = buf;
57 else {
58 zbuf = malloc(GZIP_IOBUF_SIZE);
59 len = 0;
60 }
61 if (!zbuf) {
62 error("Out of memory while allocating input buffer");
63 goto gunzip_nomem2;
64 }
65
66 strm = malloc(sizeof(*strm));
67 if (strm == NULL) {
68 error("Out of memory while allocating z_stream");
69 goto gunzip_nomem3;
70 }
71
72 strm->workspace = malloc(flush ? zlib_inflate_workspacesize() :
73 sizeof(struct inflate_state));
74 if (strm->workspace == NULL) {
75 error("Out of memory while allocating workspace");
76 goto gunzip_nomem4;
77 }
78
79 if (len == 0)
80 len = fill(zbuf, GZIP_IOBUF_SIZE);
81
82 /* verify the gzip header */
83 if (len < 10 ||
84 zbuf[0] != 0x1f || zbuf[1] != 0x8b || zbuf[2] != 0x08) {
85 if (pos)
86 *pos = 0;
87 error("Not a gzip file");
88 goto gunzip_5;
89 }
90
91 /* skip over gzip header (1f,8b,08... 10 bytes total +
92 * possible asciz filename)
93 */
94 strm->next_in = zbuf + 10;
95 /* skip over asciz filename */
96 if (zbuf[3] & 0x8) {
97 while (strm->next_in[0])
98 strm->next_in++;
99 strm->next_in++;
100 }
101 strm->avail_in = len - (strm->next_in - zbuf);
102
103 strm->next_out = out_buf;
104 strm->avail_out = out_len;
105
106 rc = zlib_inflateInit2(strm, -MAX_WBITS);
107
108 if (!flush) {
109 WS(strm)->inflate_state.wsize = 0;
110 WS(strm)->inflate_state.window = NULL;
111 }
112
113 while (rc == Z_OK) {
114 if (strm->avail_in == 0) {
115 /* TODO: handle case where both pos and fill are set */
116 len = fill(zbuf, GZIP_IOBUF_SIZE);
117 if (len < 0) {
118 rc = -1;
119 error("read error");
120 break;
121 }
122 strm->next_in = zbuf;
123 strm->avail_in = len;
124 }
125 rc = zlib_inflate(strm, 0);
126
127 /* Write any data generated */
128 if (flush && strm->next_out > out_buf) {
129 int l = strm->next_out - out_buf;
130 if (l != flush(out_buf, l)) {
131 rc = -1;
132 error("write error");
133 break;
134 }
135 strm->next_out = out_buf;
136 strm->avail_out = out_len;
137 }
138
139 /* after Z_FINISH, only Z_STREAM_END is "we unpacked it all" */
140 if (rc == Z_STREAM_END) {
141 rc = 0;
142 break;
143 } else if (rc != Z_OK) {
144 error("uncompression error");
145 rc = -1;
146 }
147 }
148
149 zlib_inflateEnd(strm);
150 if (pos)
151 /* add + 8 to skip over trailer */
152 *pos = strm->next_in - zbuf+8;
153
154gunzip_5:
155 free(strm->workspace);
156gunzip_nomem4:
157 free(strm);
158gunzip_nomem3:
159 if (!buf)
160 free(zbuf);
161gunzip_nomem2:
162 if (flush)
163 free(out_buf);
164gunzip_nomem1:
165 return rc; /* returns Z_OK (0) if successful */
166}
167
168#define decompress gunzip
diff --git a/lib/decompress_unlzma.c b/lib/decompress_unlzma.c
new file mode 100644
index 000000000000..0b954e04bd30
--- /dev/null
+++ b/lib/decompress_unlzma.c
@@ -0,0 +1,659 @@
1/* Lzma decompressor for Linux kernel. Shamelessly snarfed
2 *from busybox 1.1.1
3 *
4 *Linux kernel adaptation
5 *Copyright (C) 2006 Alain < alain@knaff.lu >
6 *
7 *Based on small lzma deflate implementation/Small range coder
8 *implementation for lzma.
9 *Copyright (C) 2006 Aurelien Jacobs < aurel@gnuage.org >
10 *
11 *Based on LzmaDecode.c from the LZMA SDK 4.22 (http://www.7-zip.org/)
12 *Copyright (C) 1999-2005 Igor Pavlov
13 *
14 *Copyrights of the parts, see headers below.
15 *
16 *
17 *This program is free software; you can redistribute it and/or
18 *modify it under the terms of the GNU Lesser General Public
19 *License as published by the Free Software Foundation; either
20 *version 2.1 of the License, or (at your option) any later version.
21 *
22 *This program is distributed in the hope that it will be useful,
23 *but WITHOUT ANY WARRANTY; without even the implied warranty of
24 *MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
25 *Lesser General Public License for more details.
26 *
27 *You should have received a copy of the GNU Lesser General Public
28 *License along with this library; if not, write to the Free Software
29 *Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
30 */
31
32#ifdef STATIC
33#define PREBOOT
34#else
35#include <linux/decompress/unlzma.h>
36#include <linux/slab.h>
37#endif /* STATIC */
38
39#include <linux/decompress/mm.h>
40
41#define MIN(a, b) (((a) < (b)) ? (a) : (b))
42
43static long long INIT read_int(unsigned char *ptr, int size)
44{
45 int i;
46 long long ret = 0;
47
48 for (i = 0; i < size; i++)
49 ret = (ret << 8) | ptr[size-i-1];
50 return ret;
51}
52
53#define ENDIAN_CONVERT(x) \
54 x = (typeof(x))read_int((unsigned char *)&x, sizeof(x))
55
56
57/* Small range coder implementation for lzma.
58 *Copyright (C) 2006 Aurelien Jacobs < aurel@gnuage.org >
59 *
60 *Based on LzmaDecode.c from the LZMA SDK 4.22 (http://www.7-zip.org/)
61 *Copyright (c) 1999-2005 Igor Pavlov
62 */
63
64#include <linux/compiler.h>
65
66#define LZMA_IOBUF_SIZE 0x10000
67
68struct rc {
69 int (*fill)(void*, unsigned int);
70 uint8_t *ptr;
71 uint8_t *buffer;
72 uint8_t *buffer_end;
73 int buffer_size;
74 uint32_t code;
75 uint32_t range;
76 uint32_t bound;
77};
78
79
80#define RC_TOP_BITS 24
81#define RC_MOVE_BITS 5
82#define RC_MODEL_TOTAL_BITS 11
83
84
85/* Called twice: once at startup and once in rc_normalize() */
86static void INIT rc_read(struct rc *rc)
87{
88 rc->buffer_size = rc->fill((char *)rc->buffer, LZMA_IOBUF_SIZE);
89 if (rc->buffer_size <= 0)
90 error("unexpected EOF");
91 rc->ptr = rc->buffer;
92 rc->buffer_end = rc->buffer + rc->buffer_size;
93}
94
95/* Called once */
96static inline void INIT rc_init(struct rc *rc,
97 int (*fill)(void*, unsigned int),
98 char *buffer, int buffer_size)
99{
100 rc->fill = fill;
101 rc->buffer = (uint8_t *)buffer;
102 rc->buffer_size = buffer_size;
103 rc->buffer_end = rc->buffer + rc->buffer_size;
104 rc->ptr = rc->buffer;
105
106 rc->code = 0;
107 rc->range = 0xFFFFFFFF;
108}
109
110static inline void INIT rc_init_code(struct rc *rc)
111{
112 int i;
113
114 for (i = 0; i < 5; i++) {
115 if (rc->ptr >= rc->buffer_end)
116 rc_read(rc);
117 rc->code = (rc->code << 8) | *rc->ptr++;
118 }
119}
120
121
122/* Called once. TODO: bb_maybe_free() */
123static inline void INIT rc_free(struct rc *rc)
124{
125 free(rc->buffer);
126}
127
128/* Called twice, but one callsite is in inline'd rc_is_bit_0_helper() */
129static void INIT rc_do_normalize(struct rc *rc)
130{
131 if (rc->ptr >= rc->buffer_end)
132 rc_read(rc);
133 rc->range <<= 8;
134 rc->code = (rc->code << 8) | *rc->ptr++;
135}
136static inline void INIT rc_normalize(struct rc *rc)
137{
138 if (rc->range < (1 << RC_TOP_BITS))
139 rc_do_normalize(rc);
140}
141
142/* Called 9 times */
143/* Why rc_is_bit_0_helper exists?
144 *Because we want to always expose (rc->code < rc->bound) to optimizer
145 */
146static inline uint32_t INIT rc_is_bit_0_helper(struct rc *rc, uint16_t *p)
147{
148 rc_normalize(rc);
149 rc->bound = *p * (rc->range >> RC_MODEL_TOTAL_BITS);
150 return rc->bound;
151}
152static inline int INIT rc_is_bit_0(struct rc *rc, uint16_t *p)
153{
154 uint32_t t = rc_is_bit_0_helper(rc, p);
155 return rc->code < t;
156}
157
158/* Called ~10 times, but very small, thus inlined */
159static inline void INIT rc_update_bit_0(struct rc *rc, uint16_t *p)
160{
161 rc->range = rc->bound;
162 *p += ((1 << RC_MODEL_TOTAL_BITS) - *p) >> RC_MOVE_BITS;
163}
164static inline void rc_update_bit_1(struct rc *rc, uint16_t *p)
165{
166 rc->range -= rc->bound;
167 rc->code -= rc->bound;
168 *p -= *p >> RC_MOVE_BITS;
169}
170
171/* Called 4 times in unlzma loop */
172static int INIT rc_get_bit(struct rc *rc, uint16_t *p, int *symbol)
173{
174 if (rc_is_bit_0(rc, p)) {
175 rc_update_bit_0(rc, p);
176 *symbol *= 2;
177 return 0;
178 } else {
179 rc_update_bit_1(rc, p);
180 *symbol = *symbol * 2 + 1;
181 return 1;
182 }
183}
184
185/* Called once */
186static inline int INIT rc_direct_bit(struct rc *rc)
187{
188 rc_normalize(rc);
189 rc->range >>= 1;
190 if (rc->code >= rc->range) {
191 rc->code -= rc->range;
192 return 1;
193 }
194 return 0;
195}
196
197/* Called twice */
198static inline void INIT
199rc_bit_tree_decode(struct rc *rc, uint16_t *p, int num_levels, int *symbol)
200{
201 int i = num_levels;
202
203 *symbol = 1;
204 while (i--)
205 rc_get_bit(rc, p + *symbol, symbol);
206 *symbol -= 1 << num_levels;
207}
208
209
210/*
211 * Small lzma deflate implementation.
212 * Copyright (C) 2006 Aurelien Jacobs < aurel@gnuage.org >
213 *
214 * Based on LzmaDecode.c from the LZMA SDK 4.22 (http://www.7-zip.org/)
215 * Copyright (C) 1999-2005 Igor Pavlov
216 */
217
218
219struct lzma_header {
220 uint8_t pos;
221 uint32_t dict_size;
222 uint64_t dst_size;
223} __attribute__ ((packed)) ;
224
225
226#define LZMA_BASE_SIZE 1846
227#define LZMA_LIT_SIZE 768
228
229#define LZMA_NUM_POS_BITS_MAX 4
230
231#define LZMA_LEN_NUM_LOW_BITS 3
232#define LZMA_LEN_NUM_MID_BITS 3
233#define LZMA_LEN_NUM_HIGH_BITS 8
234
235#define LZMA_LEN_CHOICE 0
236#define LZMA_LEN_CHOICE_2 (LZMA_LEN_CHOICE + 1)
237#define LZMA_LEN_LOW (LZMA_LEN_CHOICE_2 + 1)
238#define LZMA_LEN_MID (LZMA_LEN_LOW \
239 + (1 << (LZMA_NUM_POS_BITS_MAX + LZMA_LEN_NUM_LOW_BITS)))
240#define LZMA_LEN_HIGH (LZMA_LEN_MID \
241 +(1 << (LZMA_NUM_POS_BITS_MAX + LZMA_LEN_NUM_MID_BITS)))
242#define LZMA_NUM_LEN_PROBS (LZMA_LEN_HIGH + (1 << LZMA_LEN_NUM_HIGH_BITS))
243
244#define LZMA_NUM_STATES 12
245#define LZMA_NUM_LIT_STATES 7
246
247#define LZMA_START_POS_MODEL_INDEX 4
248#define LZMA_END_POS_MODEL_INDEX 14
249#define LZMA_NUM_FULL_DISTANCES (1 << (LZMA_END_POS_MODEL_INDEX >> 1))
250
251#define LZMA_NUM_POS_SLOT_BITS 6
252#define LZMA_NUM_LEN_TO_POS_STATES 4
253
254#define LZMA_NUM_ALIGN_BITS 4
255
256#define LZMA_MATCH_MIN_LEN 2
257
258#define LZMA_IS_MATCH 0
259#define LZMA_IS_REP (LZMA_IS_MATCH + (LZMA_NUM_STATES << LZMA_NUM_POS_BITS_MAX))
260#define LZMA_IS_REP_G0 (LZMA_IS_REP + LZMA_NUM_STATES)
261#define LZMA_IS_REP_G1 (LZMA_IS_REP_G0 + LZMA_NUM_STATES)
262#define LZMA_IS_REP_G2 (LZMA_IS_REP_G1 + LZMA_NUM_STATES)
263#define LZMA_IS_REP_0_LONG (LZMA_IS_REP_G2 + LZMA_NUM_STATES)
264#define LZMA_POS_SLOT (LZMA_IS_REP_0_LONG \
265 + (LZMA_NUM_STATES << LZMA_NUM_POS_BITS_MAX))
266#define LZMA_SPEC_POS (LZMA_POS_SLOT \
267 +(LZMA_NUM_LEN_TO_POS_STATES << LZMA_NUM_POS_SLOT_BITS))
268#define LZMA_ALIGN (LZMA_SPEC_POS \
269 + LZMA_NUM_FULL_DISTANCES - LZMA_END_POS_MODEL_INDEX)
270#define LZMA_LEN_CODER (LZMA_ALIGN + (1 << LZMA_NUM_ALIGN_BITS))
271#define LZMA_REP_LEN_CODER (LZMA_LEN_CODER + LZMA_NUM_LEN_PROBS)
272#define LZMA_LITERAL (LZMA_REP_LEN_CODER + LZMA_NUM_LEN_PROBS)
273
274
275struct writer {
276 uint8_t *buffer;
277 uint8_t previous_byte;
278 size_t buffer_pos;
279 int bufsize;
280 size_t global_pos;
281 int(*flush)(void*, unsigned int);
282 struct lzma_header *header;
283};
284
285struct cstate {
286 int state;
287 uint32_t rep0, rep1, rep2, rep3;
288};
289
290static inline size_t INIT get_pos(struct writer *wr)
291{
292 return
293 wr->global_pos + wr->buffer_pos;
294}
295
296static inline uint8_t INIT peek_old_byte(struct writer *wr,
297 uint32_t offs)
298{
299 if (!wr->flush) {
300 int32_t pos;
301 while (offs > wr->header->dict_size)
302 offs -= wr->header->dict_size;
303 pos = wr->buffer_pos - offs;
304 return wr->buffer[pos];
305 } else {
306 uint32_t pos = wr->buffer_pos - offs;
307 while (pos >= wr->header->dict_size)
308 pos += wr->header->dict_size;
309 return wr->buffer[pos];
310 }
311
312}
313
314static inline void INIT write_byte(struct writer *wr, uint8_t byte)
315{
316 wr->buffer[wr->buffer_pos++] = wr->previous_byte = byte;
317 if (wr->flush && wr->buffer_pos == wr->header->dict_size) {
318 wr->buffer_pos = 0;
319 wr->global_pos += wr->header->dict_size;
320 wr->flush((char *)wr->buffer, wr->header->dict_size);
321 }
322}
323
324
325static inline void INIT copy_byte(struct writer *wr, uint32_t offs)
326{
327 write_byte(wr, peek_old_byte(wr, offs));
328}
329
330static inline void INIT copy_bytes(struct writer *wr,
331 uint32_t rep0, int len)
332{
333 do {
334 copy_byte(wr, rep0);
335 len--;
336 } while (len != 0 && wr->buffer_pos < wr->header->dst_size);
337}
338
339static inline void INIT process_bit0(struct writer *wr, struct rc *rc,
340 struct cstate *cst, uint16_t *p,
341 int pos_state, uint16_t *prob,
342 int lc, uint32_t literal_pos_mask) {
343 int mi = 1;
344 rc_update_bit_0(rc, prob);
345 prob = (p + LZMA_LITERAL +
346 (LZMA_LIT_SIZE
347 * (((get_pos(wr) & literal_pos_mask) << lc)
348 + (wr->previous_byte >> (8 - lc))))
349 );
350
351 if (cst->state >= LZMA_NUM_LIT_STATES) {
352 int match_byte = peek_old_byte(wr, cst->rep0);
353 do {
354 int bit;
355 uint16_t *prob_lit;
356
357 match_byte <<= 1;
358 bit = match_byte & 0x100;
359 prob_lit = prob + 0x100 + bit + mi;
360 if (rc_get_bit(rc, prob_lit, &mi)) {
361 if (!bit)
362 break;
363 } else {
364 if (bit)
365 break;
366 }
367 } while (mi < 0x100);
368 }
369 while (mi < 0x100) {
370 uint16_t *prob_lit = prob + mi;
371 rc_get_bit(rc, prob_lit, &mi);
372 }
373 write_byte(wr, mi);
374 if (cst->state < 4)
375 cst->state = 0;
376 else if (cst->state < 10)
377 cst->state -= 3;
378 else
379 cst->state -= 6;
380}
381
382static inline void INIT process_bit1(struct writer *wr, struct rc *rc,
383 struct cstate *cst, uint16_t *p,
384 int pos_state, uint16_t *prob) {
385 int offset;
386 uint16_t *prob_len;
387 int num_bits;
388 int len;
389
390 rc_update_bit_1(rc, prob);
391 prob = p + LZMA_IS_REP + cst->state;
392 if (rc_is_bit_0(rc, prob)) {
393 rc_update_bit_0(rc, prob);
394 cst->rep3 = cst->rep2;
395 cst->rep2 = cst->rep1;
396 cst->rep1 = cst->rep0;
397 cst->state = cst->state < LZMA_NUM_LIT_STATES ? 0 : 3;
398 prob = p + LZMA_LEN_CODER;
399 } else {
400 rc_update_bit_1(rc, prob);
401 prob = p + LZMA_IS_REP_G0 + cst->state;
402 if (rc_is_bit_0(rc, prob)) {
403 rc_update_bit_0(rc, prob);
404 prob = (p + LZMA_IS_REP_0_LONG
405 + (cst->state <<
406 LZMA_NUM_POS_BITS_MAX) +
407 pos_state);
408 if (rc_is_bit_0(rc, prob)) {
409 rc_update_bit_0(rc, prob);
410
411 cst->state = cst->state < LZMA_NUM_LIT_STATES ?
412 9 : 11;
413 copy_byte(wr, cst->rep0);
414 return;
415 } else {
416 rc_update_bit_1(rc, prob);
417 }
418 } else {
419 uint32_t distance;
420
421 rc_update_bit_1(rc, prob);
422 prob = p + LZMA_IS_REP_G1 + cst->state;
423 if (rc_is_bit_0(rc, prob)) {
424 rc_update_bit_0(rc, prob);
425 distance = cst->rep1;
426 } else {
427 rc_update_bit_1(rc, prob);
428 prob = p + LZMA_IS_REP_G2 + cst->state;
429 if (rc_is_bit_0(rc, prob)) {
430 rc_update_bit_0(rc, prob);
431 distance = cst->rep2;
432 } else {
433 rc_update_bit_1(rc, prob);
434 distance = cst->rep3;
435 cst->rep3 = cst->rep2;
436 }
437 cst->rep2 = cst->rep1;
438 }
439 cst->rep1 = cst->rep0;
440 cst->rep0 = distance;
441 }
442 cst->state = cst->state < LZMA_NUM_LIT_STATES ? 8 : 11;
443 prob = p + LZMA_REP_LEN_CODER;
444 }
445
446 prob_len = prob + LZMA_LEN_CHOICE;
447 if (rc_is_bit_0(rc, prob_len)) {
448 rc_update_bit_0(rc, prob_len);
449 prob_len = (prob + LZMA_LEN_LOW
450 + (pos_state <<
451 LZMA_LEN_NUM_LOW_BITS));
452 offset = 0;
453 num_bits = LZMA_LEN_NUM_LOW_BITS;
454 } else {
455 rc_update_bit_1(rc, prob_len);
456 prob_len = prob + LZMA_LEN_CHOICE_2;
457 if (rc_is_bit_0(rc, prob_len)) {
458 rc_update_bit_0(rc, prob_len);
459 prob_len = (prob + LZMA_LEN_MID
460 + (pos_state <<
461 LZMA_LEN_NUM_MID_BITS));
462 offset = 1 << LZMA_LEN_NUM_LOW_BITS;
463 num_bits = LZMA_LEN_NUM_MID_BITS;
464 } else {
465 rc_update_bit_1(rc, prob_len);
466 prob_len = prob + LZMA_LEN_HIGH;
467 offset = ((1 << LZMA_LEN_NUM_LOW_BITS)
468 + (1 << LZMA_LEN_NUM_MID_BITS));
469 num_bits = LZMA_LEN_NUM_HIGH_BITS;
470 }
471 }
472
473 rc_bit_tree_decode(rc, prob_len, num_bits, &len);
474 len += offset;
475
476 if (cst->state < 4) {
477 int pos_slot;
478
479 cst->state += LZMA_NUM_LIT_STATES;
480 prob =
481 p + LZMA_POS_SLOT +
482 ((len <
483 LZMA_NUM_LEN_TO_POS_STATES ? len :
484 LZMA_NUM_LEN_TO_POS_STATES - 1)
485 << LZMA_NUM_POS_SLOT_BITS);
486 rc_bit_tree_decode(rc, prob,
487 LZMA_NUM_POS_SLOT_BITS,
488 &pos_slot);
489 if (pos_slot >= LZMA_START_POS_MODEL_INDEX) {
490 int i, mi;
491 num_bits = (pos_slot >> 1) - 1;
492 cst->rep0 = 2 | (pos_slot & 1);
493 if (pos_slot < LZMA_END_POS_MODEL_INDEX) {
494 cst->rep0 <<= num_bits;
495 prob = p + LZMA_SPEC_POS +
496 cst->rep0 - pos_slot - 1;
497 } else {
498 num_bits -= LZMA_NUM_ALIGN_BITS;
499 while (num_bits--)
500 cst->rep0 = (cst->rep0 << 1) |
501 rc_direct_bit(rc);
502 prob = p + LZMA_ALIGN;
503 cst->rep0 <<= LZMA_NUM_ALIGN_BITS;
504 num_bits = LZMA_NUM_ALIGN_BITS;
505 }
506 i = 1;
507 mi = 1;
508 while (num_bits--) {
509 if (rc_get_bit(rc, prob + mi, &mi))
510 cst->rep0 |= i;
511 i <<= 1;
512 }
513 } else
514 cst->rep0 = pos_slot;
515 if (++(cst->rep0) == 0)
516 return;
517 }
518
519 len += LZMA_MATCH_MIN_LEN;
520
521 copy_bytes(wr, cst->rep0, len);
522}
523
524
525
526STATIC inline int INIT unlzma(unsigned char *buf, int in_len,
527 int(*fill)(void*, unsigned int),
528 int(*flush)(void*, unsigned int),
529 unsigned char *output,
530 int *posp,
531 void(*error_fn)(char *x)
532 )
533{
534 struct lzma_header header;
535 int lc, pb, lp;
536 uint32_t pos_state_mask;
537 uint32_t literal_pos_mask;
538 uint16_t *p;
539 int num_probs;
540 struct rc rc;
541 int i, mi;
542 struct writer wr;
543 struct cstate cst;
544 unsigned char *inbuf;
545 int ret = -1;
546
547 set_error_fn(error_fn);
548
549 if (buf)
550 inbuf = buf;
551 else
552 inbuf = malloc(LZMA_IOBUF_SIZE);
553 if (!inbuf) {
554 error("Could not allocate input bufer");
555 goto exit_0;
556 }
557
558 cst.state = 0;
559 cst.rep0 = cst.rep1 = cst.rep2 = cst.rep3 = 1;
560
561 wr.header = &header;
562 wr.flush = flush;
563 wr.global_pos = 0;
564 wr.previous_byte = 0;
565 wr.buffer_pos = 0;
566
567 rc_init(&rc, fill, inbuf, in_len);
568
569 for (i = 0; i < sizeof(header); i++) {
570 if (rc.ptr >= rc.buffer_end)
571 rc_read(&rc);
572 ((unsigned char *)&header)[i] = *rc.ptr++;
573 }
574
575 if (header.pos >= (9 * 5 * 5))
576 error("bad header");
577
578 mi = 0;
579 lc = header.pos;
580 while (lc >= 9) {
581 mi++;
582 lc -= 9;
583 }
584 pb = 0;
585 lp = mi;
586 while (lp >= 5) {
587 pb++;
588 lp -= 5;
589 }
590 pos_state_mask = (1 << pb) - 1;
591 literal_pos_mask = (1 << lp) - 1;
592
593 ENDIAN_CONVERT(header.dict_size);
594 ENDIAN_CONVERT(header.dst_size);
595
596 if (header.dict_size == 0)
597 header.dict_size = 1;
598
599 if (output)
600 wr.buffer = output;
601 else {
602 wr.bufsize = MIN(header.dst_size, header.dict_size);
603 wr.buffer = large_malloc(wr.bufsize);
604 }
605 if (wr.buffer == NULL)
606 goto exit_1;
607
608 num_probs = LZMA_BASE_SIZE + (LZMA_LIT_SIZE << (lc + lp));
609 p = (uint16_t *) large_malloc(num_probs * sizeof(*p));
610 if (p == 0)
611 goto exit_2;
612 num_probs = LZMA_LITERAL + (LZMA_LIT_SIZE << (lc + lp));
613 for (i = 0; i < num_probs; i++)
614 p[i] = (1 << RC_MODEL_TOTAL_BITS) >> 1;
615
616 rc_init_code(&rc);
617
618 while (get_pos(&wr) < header.dst_size) {
619 int pos_state = get_pos(&wr) & pos_state_mask;
620 uint16_t *prob = p + LZMA_IS_MATCH +
621 (cst.state << LZMA_NUM_POS_BITS_MAX) + pos_state;
622 if (rc_is_bit_0(&rc, prob))
623 process_bit0(&wr, &rc, &cst, p, pos_state, prob,
624 lc, literal_pos_mask);
625 else {
626 process_bit1(&wr, &rc, &cst, p, pos_state, prob);
627 if (cst.rep0 == 0)
628 break;
629 }
630 }
631
632 if (posp)
633 *posp = rc.ptr-rc.buffer;
634 if (wr.flush)
635 wr.flush(wr.buffer, wr.buffer_pos);
636 ret = 0;
637 large_free(p);
638exit_2:
639 if (!output)
640 large_free(wr.buffer);
641exit_1:
642 if (!buf)
643 free(inbuf);
644exit_0:
645 return ret;
646}
647
648#ifdef PREBOOT
649STATIC int INIT decompress(unsigned char *buf, int in_len,
650 int(*fill)(void*, unsigned int),
651 int(*flush)(void*, unsigned int),
652 unsigned char *output,
653 int *posp,
654 void(*error_fn)(char *x)
655 )
656{
657 return unlzma(buf, in_len - 4, fill, flush, output, posp, error_fn);
658}
659#endif
diff --git a/lib/dma-debug.c b/lib/dma-debug.c
new file mode 100644
index 000000000000..58a9f9fc609a
--- /dev/null
+++ b/lib/dma-debug.c
@@ -0,0 +1,1297 @@
1/*
2 * Copyright (C) 2008 Advanced Micro Devices, Inc.
3 *
4 * Author: Joerg Roedel <joerg.roedel@amd.com>
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 as published
8 * by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
18 */
19
20#include <linux/scatterlist.h>
21#include <linux/dma-mapping.h>
22#include <linux/stacktrace.h>
23#include <linux/dma-debug.h>
24#include <linux/spinlock.h>
25#include <linux/debugfs.h>
26#include <linux/uaccess.h>
27#include <linux/device.h>
28#include <linux/types.h>
29#include <linux/sched.h>
30#include <linux/ctype.h>
31#include <linux/list.h>
32#include <linux/slab.h>
33
34#include <asm/sections.h>
35
36#define HASH_SIZE 1024ULL
37#define HASH_FN_SHIFT 13
38#define HASH_FN_MASK (HASH_SIZE - 1)
39
40enum {
41 dma_debug_single,
42 dma_debug_page,
43 dma_debug_sg,
44 dma_debug_coherent,
45};
46
47#define DMA_DEBUG_STACKTRACE_ENTRIES 5
48
49struct dma_debug_entry {
50 struct list_head list;
51 struct device *dev;
52 int type;
53 phys_addr_t paddr;
54 u64 dev_addr;
55 u64 size;
56 int direction;
57 int sg_call_ents;
58 int sg_mapped_ents;
59#ifdef CONFIG_STACKTRACE
60 struct stack_trace stacktrace;
61 unsigned long st_entries[DMA_DEBUG_STACKTRACE_ENTRIES];
62#endif
63};
64
65struct hash_bucket {
66 struct list_head list;
67 spinlock_t lock;
68} ____cacheline_aligned_in_smp;
69
70/* Hash list to save the allocated dma addresses */
71static struct hash_bucket dma_entry_hash[HASH_SIZE];
72/* List of pre-allocated dma_debug_entry's */
73static LIST_HEAD(free_entries);
74/* Lock for the list above */
75static DEFINE_SPINLOCK(free_entries_lock);
76
77/* Global disable flag - will be set in case of an error */
78static bool global_disable __read_mostly;
79
80/* Global error count */
81static u32 error_count;
82
83/* Global error show enable*/
84static u32 show_all_errors __read_mostly;
85/* Number of errors to show */
86static u32 show_num_errors = 1;
87
88static u32 num_free_entries;
89static u32 min_free_entries;
90static u32 nr_total_entries;
91
92/* number of preallocated entries requested by kernel cmdline */
93static u32 req_entries;
94
95/* debugfs dentry's for the stuff above */
96static struct dentry *dma_debug_dent __read_mostly;
97static struct dentry *global_disable_dent __read_mostly;
98static struct dentry *error_count_dent __read_mostly;
99static struct dentry *show_all_errors_dent __read_mostly;
100static struct dentry *show_num_errors_dent __read_mostly;
101static struct dentry *num_free_entries_dent __read_mostly;
102static struct dentry *min_free_entries_dent __read_mostly;
103static struct dentry *filter_dent __read_mostly;
104
105/* per-driver filter related state */
106
107#define NAME_MAX_LEN 64
108
109static char current_driver_name[NAME_MAX_LEN] __read_mostly;
110static struct device_driver *current_driver __read_mostly;
111
112static DEFINE_RWLOCK(driver_name_lock);
113
114static const char *type2name[4] = { "single", "page",
115 "scather-gather", "coherent" };
116
117static const char *dir2name[4] = { "DMA_BIDIRECTIONAL", "DMA_TO_DEVICE",
118 "DMA_FROM_DEVICE", "DMA_NONE" };
119
120/* little merge helper - remove it after the merge window */
121#ifndef BUS_NOTIFY_UNBOUND_DRIVER
122#define BUS_NOTIFY_UNBOUND_DRIVER 0x0005
123#endif
124
125/*
126 * The access to some variables in this macro is racy. We can't use atomic_t
127 * here because all these variables are exported to debugfs. Some of them even
128 * writeable. This is also the reason why a lock won't help much. But anyway,
129 * the races are no big deal. Here is why:
130 *
131 * error_count: the addition is racy, but the worst thing that can happen is
132 * that we don't count some errors
133 * show_num_errors: the subtraction is racy. Also no big deal because in
134 * worst case this will result in one warning more in the
135 * system log than the user configured. This variable is
136 * writeable via debugfs.
137 */
138static inline void dump_entry_trace(struct dma_debug_entry *entry)
139{
140#ifdef CONFIG_STACKTRACE
141 if (entry) {
142 pr_warning("Mapped at:\n");
143 print_stack_trace(&entry->stacktrace, 0);
144 }
145#endif
146}
147
148static bool driver_filter(struct device *dev)
149{
150 struct device_driver *drv;
151 unsigned long flags;
152 bool ret;
153
154 /* driver filter off */
155 if (likely(!current_driver_name[0]))
156 return true;
157
158 /* driver filter on and initialized */
159 if (current_driver && dev && dev->driver == current_driver)
160 return true;
161
162 /* driver filter on, but we can't filter on a NULL device... */
163 if (!dev)
164 return false;
165
166 if (current_driver || !current_driver_name[0])
167 return false;
168
169 /* driver filter on but not yet initialized */
170 drv = get_driver(dev->driver);
171 if (!drv)
172 return false;
173
174 /* lock to protect against change of current_driver_name */
175 read_lock_irqsave(&driver_name_lock, flags);
176
177 ret = false;
178 if (drv->name &&
179 strncmp(current_driver_name, drv->name, NAME_MAX_LEN - 1) == 0) {
180 current_driver = drv;
181 ret = true;
182 }
183
184 read_unlock_irqrestore(&driver_name_lock, flags);
185 put_driver(drv);
186
187 return ret;
188}
189
190#define err_printk(dev, entry, format, arg...) do { \
191 error_count += 1; \
192 if (driver_filter(dev) && \
193 (show_all_errors || show_num_errors > 0)) { \
194 WARN(1, "%s %s: " format, \
195 dev ? dev_driver_string(dev) : "NULL", \
196 dev ? dev_name(dev) : "NULL", ## arg); \
197 dump_entry_trace(entry); \
198 } \
199 if (!show_all_errors && show_num_errors > 0) \
200 show_num_errors -= 1; \
201 } while (0);
202
203/*
204 * Hash related functions
205 *
206 * Every DMA-API request is saved into a struct dma_debug_entry. To
207 * have quick access to these structs they are stored into a hash.
208 */
209static int hash_fn(struct dma_debug_entry *entry)
210{
211 /*
212 * Hash function is based on the dma address.
213 * We use bits 20-27 here as the index into the hash
214 */
215 return (entry->dev_addr >> HASH_FN_SHIFT) & HASH_FN_MASK;
216}
217
218/*
219 * Request exclusive access to a hash bucket for a given dma_debug_entry.
220 */
221static struct hash_bucket *get_hash_bucket(struct dma_debug_entry *entry,
222 unsigned long *flags)
223{
224 int idx = hash_fn(entry);
225 unsigned long __flags;
226
227 spin_lock_irqsave(&dma_entry_hash[idx].lock, __flags);
228 *flags = __flags;
229 return &dma_entry_hash[idx];
230}
231
232/*
233 * Give up exclusive access to the hash bucket
234 */
235static void put_hash_bucket(struct hash_bucket *bucket,
236 unsigned long *flags)
237{
238 unsigned long __flags = *flags;
239
240 spin_unlock_irqrestore(&bucket->lock, __flags);
241}
242
243/*
244 * Search a given entry in the hash bucket list
245 */
246static struct dma_debug_entry *hash_bucket_find(struct hash_bucket *bucket,
247 struct dma_debug_entry *ref)
248{
249 struct dma_debug_entry *entry, *ret = NULL;
250 int matches = 0, match_lvl, last_lvl = 0;
251
252 list_for_each_entry(entry, &bucket->list, list) {
253 if ((entry->dev_addr != ref->dev_addr) ||
254 (entry->dev != ref->dev))
255 continue;
256
257 /*
258 * Some drivers map the same physical address multiple
259 * times. Without a hardware IOMMU this results in the
260 * same device addresses being put into the dma-debug
261 * hash multiple times too. This can result in false
262 * positives being reported. Therfore we implement a
263 * best-fit algorithm here which returns the entry from
264 * the hash which fits best to the reference value
265 * instead of the first-fit.
266 */
267 matches += 1;
268 match_lvl = 0;
269 entry->size == ref->size ? ++match_lvl : 0;
270 entry->type == ref->type ? ++match_lvl : 0;
271 entry->direction == ref->direction ? ++match_lvl : 0;
272 entry->sg_call_ents == ref->sg_call_ents ? ++match_lvl : 0;
273
274 if (match_lvl == 4) {
275 /* perfect-fit - return the result */
276 return entry;
277 } else if (match_lvl > last_lvl) {
278 /*
279 * We found an entry that fits better then the
280 * previous one
281 */
282 last_lvl = match_lvl;
283 ret = entry;
284 }
285 }
286
287 /*
288 * If we have multiple matches but no perfect-fit, just return
289 * NULL.
290 */
291 ret = (matches == 1) ? ret : NULL;
292
293 return ret;
294}
295
296/*
297 * Add an entry to a hash bucket
298 */
299static void hash_bucket_add(struct hash_bucket *bucket,
300 struct dma_debug_entry *entry)
301{
302 list_add_tail(&entry->list, &bucket->list);
303}
304
305/*
306 * Remove entry from a hash bucket list
307 */
308static void hash_bucket_del(struct dma_debug_entry *entry)
309{
310 list_del(&entry->list);
311}
312
313/*
314 * Dump mapping entries for debugging purposes
315 */
316void debug_dma_dump_mappings(struct device *dev)
317{
318 int idx;
319
320 for (idx = 0; idx < HASH_SIZE; idx++) {
321 struct hash_bucket *bucket = &dma_entry_hash[idx];
322 struct dma_debug_entry *entry;
323 unsigned long flags;
324
325 spin_lock_irqsave(&bucket->lock, flags);
326
327 list_for_each_entry(entry, &bucket->list, list) {
328 if (!dev || dev == entry->dev) {
329 dev_info(entry->dev,
330 "%s idx %d P=%Lx D=%Lx L=%Lx %s\n",
331 type2name[entry->type], idx,
332 (unsigned long long)entry->paddr,
333 entry->dev_addr, entry->size,
334 dir2name[entry->direction]);
335 }
336 }
337
338 spin_unlock_irqrestore(&bucket->lock, flags);
339 }
340}
341EXPORT_SYMBOL(debug_dma_dump_mappings);
342
343/*
344 * Wrapper function for adding an entry to the hash.
345 * This function takes care of locking itself.
346 */
347static void add_dma_entry(struct dma_debug_entry *entry)
348{
349 struct hash_bucket *bucket;
350 unsigned long flags;
351
352 bucket = get_hash_bucket(entry, &flags);
353 hash_bucket_add(bucket, entry);
354 put_hash_bucket(bucket, &flags);
355}
356
357static struct dma_debug_entry *__dma_entry_alloc(void)
358{
359 struct dma_debug_entry *entry;
360
361 entry = list_entry(free_entries.next, struct dma_debug_entry, list);
362 list_del(&entry->list);
363 memset(entry, 0, sizeof(*entry));
364
365 num_free_entries -= 1;
366 if (num_free_entries < min_free_entries)
367 min_free_entries = num_free_entries;
368
369 return entry;
370}
371
372/* struct dma_entry allocator
373 *
374 * The next two functions implement the allocator for
375 * struct dma_debug_entries.
376 */
377static struct dma_debug_entry *dma_entry_alloc(void)
378{
379 struct dma_debug_entry *entry = NULL;
380 unsigned long flags;
381
382 spin_lock_irqsave(&free_entries_lock, flags);
383
384 if (list_empty(&free_entries)) {
385 pr_err("DMA-API: debugging out of memory - disabling\n");
386 global_disable = true;
387 goto out;
388 }
389
390 entry = __dma_entry_alloc();
391
392#ifdef CONFIG_STACKTRACE
393 entry->stacktrace.max_entries = DMA_DEBUG_STACKTRACE_ENTRIES;
394 entry->stacktrace.entries = entry->st_entries;
395 entry->stacktrace.skip = 2;
396 save_stack_trace(&entry->stacktrace);
397#endif
398
399out:
400 spin_unlock_irqrestore(&free_entries_lock, flags);
401
402 return entry;
403}
404
405static void dma_entry_free(struct dma_debug_entry *entry)
406{
407 unsigned long flags;
408
409 /*
410 * add to beginning of the list - this way the entries are
411 * more likely cache hot when they are reallocated.
412 */
413 spin_lock_irqsave(&free_entries_lock, flags);
414 list_add(&entry->list, &free_entries);
415 num_free_entries += 1;
416 spin_unlock_irqrestore(&free_entries_lock, flags);
417}
418
419int dma_debug_resize_entries(u32 num_entries)
420{
421 int i, delta, ret = 0;
422 unsigned long flags;
423 struct dma_debug_entry *entry;
424 LIST_HEAD(tmp);
425
426 spin_lock_irqsave(&free_entries_lock, flags);
427
428 if (nr_total_entries < num_entries) {
429 delta = num_entries - nr_total_entries;
430
431 spin_unlock_irqrestore(&free_entries_lock, flags);
432
433 for (i = 0; i < delta; i++) {
434 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
435 if (!entry)
436 break;
437
438 list_add_tail(&entry->list, &tmp);
439 }
440
441 spin_lock_irqsave(&free_entries_lock, flags);
442
443 list_splice(&tmp, &free_entries);
444 nr_total_entries += i;
445 num_free_entries += i;
446 } else {
447 delta = nr_total_entries - num_entries;
448
449 for (i = 0; i < delta && !list_empty(&free_entries); i++) {
450 entry = __dma_entry_alloc();
451 kfree(entry);
452 }
453
454 nr_total_entries -= i;
455 }
456
457 if (nr_total_entries != num_entries)
458 ret = 1;
459
460 spin_unlock_irqrestore(&free_entries_lock, flags);
461
462 return ret;
463}
464EXPORT_SYMBOL(dma_debug_resize_entries);
465
466/*
467 * DMA-API debugging init code
468 *
469 * The init code does two things:
470 * 1. Initialize core data structures
471 * 2. Preallocate a given number of dma_debug_entry structs
472 */
473
474static int prealloc_memory(u32 num_entries)
475{
476 struct dma_debug_entry *entry, *next_entry;
477 int i;
478
479 for (i = 0; i < num_entries; ++i) {
480 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
481 if (!entry)
482 goto out_err;
483
484 list_add_tail(&entry->list, &free_entries);
485 }
486
487 num_free_entries = num_entries;
488 min_free_entries = num_entries;
489
490 pr_info("DMA-API: preallocated %d debug entries\n", num_entries);
491
492 return 0;
493
494out_err:
495
496 list_for_each_entry_safe(entry, next_entry, &free_entries, list) {
497 list_del(&entry->list);
498 kfree(entry);
499 }
500
501 return -ENOMEM;
502}
503
504static ssize_t filter_read(struct file *file, char __user *user_buf,
505 size_t count, loff_t *ppos)
506{
507 char buf[NAME_MAX_LEN + 1];
508 unsigned long flags;
509 int len;
510
511 if (!current_driver_name[0])
512 return 0;
513
514 /*
515 * We can't copy to userspace directly because current_driver_name can
516 * only be read under the driver_name_lock with irqs disabled. So
517 * create a temporary copy first.
518 */
519 read_lock_irqsave(&driver_name_lock, flags);
520 len = scnprintf(buf, NAME_MAX_LEN + 1, "%s\n", current_driver_name);
521 read_unlock_irqrestore(&driver_name_lock, flags);
522
523 return simple_read_from_buffer(user_buf, count, ppos, buf, len);
524}
525
526static ssize_t filter_write(struct file *file, const char __user *userbuf,
527 size_t count, loff_t *ppos)
528{
529 char buf[NAME_MAX_LEN];
530 unsigned long flags;
531 size_t len;
532 int i;
533
534 /*
535 * We can't copy from userspace directly. Access to
536 * current_driver_name is protected with a write_lock with irqs
537 * disabled. Since copy_from_user can fault and may sleep we
538 * need to copy to temporary buffer first
539 */
540 len = min(count, (size_t)(NAME_MAX_LEN - 1));
541 if (copy_from_user(buf, userbuf, len))
542 return -EFAULT;
543
544 buf[len] = 0;
545
546 write_lock_irqsave(&driver_name_lock, flags);
547
548 /*
549 * Now handle the string we got from userspace very carefully.
550 * The rules are:
551 * - only use the first token we got
552 * - token delimiter is everything looking like a space
553 * character (' ', '\n', '\t' ...)
554 *
555 */
556 if (!isalnum(buf[0])) {
557 /*
558 * If the first character userspace gave us is not
559 * alphanumerical then assume the filter should be
560 * switched off.
561 */
562 if (current_driver_name[0])
563 pr_info("DMA-API: switching off dma-debug driver filter\n");
564 current_driver_name[0] = 0;
565 current_driver = NULL;
566 goto out_unlock;
567 }
568
569 /*
570 * Now parse out the first token and use it as the name for the
571 * driver to filter for.
572 */
573 for (i = 0; i < NAME_MAX_LEN; ++i) {
574 current_driver_name[i] = buf[i];
575 if (isspace(buf[i]) || buf[i] == ' ' || buf[i] == 0)
576 break;
577 }
578 current_driver_name[i] = 0;
579 current_driver = NULL;
580
581 pr_info("DMA-API: enable driver filter for driver [%s]\n",
582 current_driver_name);
583
584out_unlock:
585 write_unlock_irqrestore(&driver_name_lock, flags);
586
587 return count;
588}
589
590const struct file_operations filter_fops = {
591 .read = filter_read,
592 .write = filter_write,
593};
594
595static int dma_debug_fs_init(void)
596{
597 dma_debug_dent = debugfs_create_dir("dma-api", NULL);
598 if (!dma_debug_dent) {
599 pr_err("DMA-API: can not create debugfs directory\n");
600 return -ENOMEM;
601 }
602
603 global_disable_dent = debugfs_create_bool("disabled", 0444,
604 dma_debug_dent,
605 (u32 *)&global_disable);
606 if (!global_disable_dent)
607 goto out_err;
608
609 error_count_dent = debugfs_create_u32("error_count", 0444,
610 dma_debug_dent, &error_count);
611 if (!error_count_dent)
612 goto out_err;
613
614 show_all_errors_dent = debugfs_create_u32("all_errors", 0644,
615 dma_debug_dent,
616 &show_all_errors);
617 if (!show_all_errors_dent)
618 goto out_err;
619
620 show_num_errors_dent = debugfs_create_u32("num_errors", 0644,
621 dma_debug_dent,
622 &show_num_errors);
623 if (!show_num_errors_dent)
624 goto out_err;
625
626 num_free_entries_dent = debugfs_create_u32("num_free_entries", 0444,
627 dma_debug_dent,
628 &num_free_entries);
629 if (!num_free_entries_dent)
630 goto out_err;
631
632 min_free_entries_dent = debugfs_create_u32("min_free_entries", 0444,
633 dma_debug_dent,
634 &min_free_entries);
635 if (!min_free_entries_dent)
636 goto out_err;
637
638 filter_dent = debugfs_create_file("driver_filter", 0644,
639 dma_debug_dent, NULL, &filter_fops);
640 if (!filter_dent)
641 goto out_err;
642
643 return 0;
644
645out_err:
646 debugfs_remove_recursive(dma_debug_dent);
647
648 return -ENOMEM;
649}
650
651static int device_dma_allocations(struct device *dev)
652{
653 struct dma_debug_entry *entry;
654 unsigned long flags;
655 int count = 0, i;
656
657 local_irq_save(flags);
658
659 for (i = 0; i < HASH_SIZE; ++i) {
660 spin_lock(&dma_entry_hash[i].lock);
661 list_for_each_entry(entry, &dma_entry_hash[i].list, list) {
662 if (entry->dev == dev)
663 count += 1;
664 }
665 spin_unlock(&dma_entry_hash[i].lock);
666 }
667
668 local_irq_restore(flags);
669
670 return count;
671}
672
673static int dma_debug_device_change(struct notifier_block *nb,
674 unsigned long action, void *data)
675{
676 struct device *dev = data;
677 int count;
678
679
680 switch (action) {
681 case BUS_NOTIFY_UNBOUND_DRIVER:
682 count = device_dma_allocations(dev);
683 if (count == 0)
684 break;
685 err_printk(dev, NULL, "DMA-API: device driver has pending "
686 "DMA allocations while released from device "
687 "[count=%d]\n", count);
688 break;
689 default:
690 break;
691 }
692
693 return 0;
694}
695
696void dma_debug_add_bus(struct bus_type *bus)
697{
698 struct notifier_block *nb;
699
700 nb = kzalloc(sizeof(struct notifier_block), GFP_KERNEL);
701 if (nb == NULL) {
702 pr_err("dma_debug_add_bus: out of memory\n");
703 return;
704 }
705
706 nb->notifier_call = dma_debug_device_change;
707
708 bus_register_notifier(bus, nb);
709}
710
711/*
712 * Let the architectures decide how many entries should be preallocated.
713 */
714void dma_debug_init(u32 num_entries)
715{
716 int i;
717
718 if (global_disable)
719 return;
720
721 for (i = 0; i < HASH_SIZE; ++i) {
722 INIT_LIST_HEAD(&dma_entry_hash[i].list);
723 spin_lock_init(&dma_entry_hash[i].lock);
724 }
725
726 if (dma_debug_fs_init() != 0) {
727 pr_err("DMA-API: error creating debugfs entries - disabling\n");
728 global_disable = true;
729
730 return;
731 }
732
733 if (req_entries)
734 num_entries = req_entries;
735
736 if (prealloc_memory(num_entries) != 0) {
737 pr_err("DMA-API: debugging out of memory error - disabled\n");
738 global_disable = true;
739
740 return;
741 }
742
743 nr_total_entries = num_free_entries;
744
745 pr_info("DMA-API: debugging enabled by kernel config\n");
746}
747
748static __init int dma_debug_cmdline(char *str)
749{
750 if (!str)
751 return -EINVAL;
752
753 if (strncmp(str, "off", 3) == 0) {
754 pr_info("DMA-API: debugging disabled on kernel command line\n");
755 global_disable = true;
756 }
757
758 return 0;
759}
760
761static __init int dma_debug_entries_cmdline(char *str)
762{
763 int res;
764
765 if (!str)
766 return -EINVAL;
767
768 res = get_option(&str, &req_entries);
769
770 if (!res)
771 req_entries = 0;
772
773 return 0;
774}
775
776__setup("dma_debug=", dma_debug_cmdline);
777__setup("dma_debug_entries=", dma_debug_entries_cmdline);
778
779static void check_unmap(struct dma_debug_entry *ref)
780{
781 struct dma_debug_entry *entry;
782 struct hash_bucket *bucket;
783 unsigned long flags;
784
785 if (dma_mapping_error(ref->dev, ref->dev_addr)) {
786 err_printk(ref->dev, NULL, "DMA-API: device driver tries "
787 "to free an invalid DMA memory address\n");
788 return;
789 }
790
791 bucket = get_hash_bucket(ref, &flags);
792 entry = hash_bucket_find(bucket, ref);
793
794 if (!entry) {
795 err_printk(ref->dev, NULL, "DMA-API: device driver tries "
796 "to free DMA memory it has not allocated "
797 "[device address=0x%016llx] [size=%llu bytes]\n",
798 ref->dev_addr, ref->size);
799 goto out;
800 }
801
802 if (ref->size != entry->size) {
803 err_printk(ref->dev, entry, "DMA-API: device driver frees "
804 "DMA memory with different size "
805 "[device address=0x%016llx] [map size=%llu bytes] "
806 "[unmap size=%llu bytes]\n",
807 ref->dev_addr, entry->size, ref->size);
808 }
809
810 if (ref->type != entry->type) {
811 err_printk(ref->dev, entry, "DMA-API: device driver frees "
812 "DMA memory with wrong function "
813 "[device address=0x%016llx] [size=%llu bytes] "
814 "[mapped as %s] [unmapped as %s]\n",
815 ref->dev_addr, ref->size,
816 type2name[entry->type], type2name[ref->type]);
817 } else if ((entry->type == dma_debug_coherent) &&
818 (ref->paddr != entry->paddr)) {
819 err_printk(ref->dev, entry, "DMA-API: device driver frees "
820 "DMA memory with different CPU address "
821 "[device address=0x%016llx] [size=%llu bytes] "
822 "[cpu alloc address=%p] [cpu free address=%p]",
823 ref->dev_addr, ref->size,
824 (void *)entry->paddr, (void *)ref->paddr);
825 }
826
827 if (ref->sg_call_ents && ref->type == dma_debug_sg &&
828 ref->sg_call_ents != entry->sg_call_ents) {
829 err_printk(ref->dev, entry, "DMA-API: device driver frees "
830 "DMA sg list with different entry count "
831 "[map count=%d] [unmap count=%d]\n",
832 entry->sg_call_ents, ref->sg_call_ents);
833 }
834
835 /*
836 * This may be no bug in reality - but most implementations of the
837 * DMA API don't handle this properly, so check for it here
838 */
839 if (ref->direction != entry->direction) {
840 err_printk(ref->dev, entry, "DMA-API: device driver frees "
841 "DMA memory with different direction "
842 "[device address=0x%016llx] [size=%llu bytes] "
843 "[mapped with %s] [unmapped with %s]\n",
844 ref->dev_addr, ref->size,
845 dir2name[entry->direction],
846 dir2name[ref->direction]);
847 }
848
849 hash_bucket_del(entry);
850 dma_entry_free(entry);
851
852out:
853 put_hash_bucket(bucket, &flags);
854}
855
856static void check_for_stack(struct device *dev, void *addr)
857{
858 if (object_is_on_stack(addr))
859 err_printk(dev, NULL, "DMA-API: device driver maps memory from"
860 "stack [addr=%p]\n", addr);
861}
862
863static inline bool overlap(void *addr, unsigned long len, void *start, void *end)
864{
865 unsigned long a1 = (unsigned long)addr;
866 unsigned long b1 = a1 + len;
867 unsigned long a2 = (unsigned long)start;
868 unsigned long b2 = (unsigned long)end;
869
870 return !(b1 <= a2 || a1 >= b2);
871}
872
873static void check_for_illegal_area(struct device *dev, void *addr, unsigned long len)
874{
875 if (overlap(addr, len, _text, _etext) ||
876 overlap(addr, len, __start_rodata, __end_rodata))
877 err_printk(dev, NULL, "DMA-API: device driver maps memory from kernel text or rodata [addr=%p] [len=%lu]\n", addr, len);
878}
879
880static void check_sync(struct device *dev,
881 struct dma_debug_entry *ref,
882 bool to_cpu)
883{
884 struct dma_debug_entry *entry;
885 struct hash_bucket *bucket;
886 unsigned long flags;
887
888 bucket = get_hash_bucket(ref, &flags);
889
890 entry = hash_bucket_find(bucket, ref);
891
892 if (!entry) {
893 err_printk(dev, NULL, "DMA-API: device driver tries "
894 "to sync DMA memory it has not allocated "
895 "[device address=0x%016llx] [size=%llu bytes]\n",
896 (unsigned long long)ref->dev_addr, ref->size);
897 goto out;
898 }
899
900 if (ref->size > entry->size) {
901 err_printk(dev, entry, "DMA-API: device driver syncs"
902 " DMA memory outside allocated range "
903 "[device address=0x%016llx] "
904 "[allocation size=%llu bytes] "
905 "[sync offset+size=%llu]\n",
906 entry->dev_addr, entry->size,
907 ref->size);
908 }
909
910 if (ref->direction != entry->direction) {
911 err_printk(dev, entry, "DMA-API: device driver syncs "
912 "DMA memory with different direction "
913 "[device address=0x%016llx] [size=%llu bytes] "
914 "[mapped with %s] [synced with %s]\n",
915 (unsigned long long)ref->dev_addr, entry->size,
916 dir2name[entry->direction],
917 dir2name[ref->direction]);
918 }
919
920 if (entry->direction == DMA_BIDIRECTIONAL)
921 goto out;
922
923 if (to_cpu && !(entry->direction == DMA_FROM_DEVICE) &&
924 !(ref->direction == DMA_TO_DEVICE))
925 err_printk(dev, entry, "DMA-API: device driver syncs "
926 "device read-only DMA memory for cpu "
927 "[device address=0x%016llx] [size=%llu bytes] "
928 "[mapped with %s] [synced with %s]\n",
929 (unsigned long long)ref->dev_addr, entry->size,
930 dir2name[entry->direction],
931 dir2name[ref->direction]);
932
933 if (!to_cpu && !(entry->direction == DMA_TO_DEVICE) &&
934 !(ref->direction == DMA_FROM_DEVICE))
935 err_printk(dev, entry, "DMA-API: device driver syncs "
936 "device write-only DMA memory to device "
937 "[device address=0x%016llx] [size=%llu bytes] "
938 "[mapped with %s] [synced with %s]\n",
939 (unsigned long long)ref->dev_addr, entry->size,
940 dir2name[entry->direction],
941 dir2name[ref->direction]);
942
943out:
944 put_hash_bucket(bucket, &flags);
945
946}
947
948void debug_dma_map_page(struct device *dev, struct page *page, size_t offset,
949 size_t size, int direction, dma_addr_t dma_addr,
950 bool map_single)
951{
952 struct dma_debug_entry *entry;
953
954 if (unlikely(global_disable))
955 return;
956
957 if (unlikely(dma_mapping_error(dev, dma_addr)))
958 return;
959
960 entry = dma_entry_alloc();
961 if (!entry)
962 return;
963
964 entry->dev = dev;
965 entry->type = dma_debug_page;
966 entry->paddr = page_to_phys(page) + offset;
967 entry->dev_addr = dma_addr;
968 entry->size = size;
969 entry->direction = direction;
970
971 if (map_single)
972 entry->type = dma_debug_single;
973
974 if (!PageHighMem(page)) {
975 void *addr = page_address(page) + offset;
976
977 check_for_stack(dev, addr);
978 check_for_illegal_area(dev, addr, size);
979 }
980
981 add_dma_entry(entry);
982}
983EXPORT_SYMBOL(debug_dma_map_page);
984
985void debug_dma_unmap_page(struct device *dev, dma_addr_t addr,
986 size_t size, int direction, bool map_single)
987{
988 struct dma_debug_entry ref = {
989 .type = dma_debug_page,
990 .dev = dev,
991 .dev_addr = addr,
992 .size = size,
993 .direction = direction,
994 };
995
996 if (unlikely(global_disable))
997 return;
998
999 if (map_single)
1000 ref.type = dma_debug_single;
1001
1002 check_unmap(&ref);
1003}
1004EXPORT_SYMBOL(debug_dma_unmap_page);
1005
1006void debug_dma_map_sg(struct device *dev, struct scatterlist *sg,
1007 int nents, int mapped_ents, int direction)
1008{
1009 struct dma_debug_entry *entry;
1010 struct scatterlist *s;
1011 int i;
1012
1013 if (unlikely(global_disable))
1014 return;
1015
1016 for_each_sg(sg, s, mapped_ents, i) {
1017 entry = dma_entry_alloc();
1018 if (!entry)
1019 return;
1020
1021 entry->type = dma_debug_sg;
1022 entry->dev = dev;
1023 entry->paddr = sg_phys(s);
1024 entry->size = sg_dma_len(s);
1025 entry->dev_addr = sg_dma_address(s);
1026 entry->direction = direction;
1027 entry->sg_call_ents = nents;
1028 entry->sg_mapped_ents = mapped_ents;
1029
1030 if (!PageHighMem(sg_page(s))) {
1031 check_for_stack(dev, sg_virt(s));
1032 check_for_illegal_area(dev, sg_virt(s), sg_dma_len(s));
1033 }
1034
1035 add_dma_entry(entry);
1036 }
1037}
1038EXPORT_SYMBOL(debug_dma_map_sg);
1039
1040static int get_nr_mapped_entries(struct device *dev,
1041 struct dma_debug_entry *ref)
1042{
1043 struct dma_debug_entry *entry;
1044 struct hash_bucket *bucket;
1045 unsigned long flags;
1046 int mapped_ents;
1047
1048 bucket = get_hash_bucket(ref, &flags);
1049 entry = hash_bucket_find(bucket, ref);
1050 mapped_ents = 0;
1051
1052 if (entry)
1053 mapped_ents = entry->sg_mapped_ents;
1054 put_hash_bucket(bucket, &flags);
1055
1056 return mapped_ents;
1057}
1058
1059void debug_dma_unmap_sg(struct device *dev, struct scatterlist *sglist,
1060 int nelems, int dir)
1061{
1062 struct scatterlist *s;
1063 int mapped_ents = 0, i;
1064
1065 if (unlikely(global_disable))
1066 return;
1067
1068 for_each_sg(sglist, s, nelems, i) {
1069
1070 struct dma_debug_entry ref = {
1071 .type = dma_debug_sg,
1072 .dev = dev,
1073 .paddr = sg_phys(s),
1074 .dev_addr = sg_dma_address(s),
1075 .size = sg_dma_len(s),
1076 .direction = dir,
1077 .sg_call_ents = nelems,
1078 };
1079
1080 if (mapped_ents && i >= mapped_ents)
1081 break;
1082
1083 if (!i)
1084 mapped_ents = get_nr_mapped_entries(dev, &ref);
1085
1086 check_unmap(&ref);
1087 }
1088}
1089EXPORT_SYMBOL(debug_dma_unmap_sg);
1090
1091void debug_dma_alloc_coherent(struct device *dev, size_t size,
1092 dma_addr_t dma_addr, void *virt)
1093{
1094 struct dma_debug_entry *entry;
1095
1096 if (unlikely(global_disable))
1097 return;
1098
1099 if (unlikely(virt == NULL))
1100 return;
1101
1102 entry = dma_entry_alloc();
1103 if (!entry)
1104 return;
1105
1106 entry->type = dma_debug_coherent;
1107 entry->dev = dev;
1108 entry->paddr = virt_to_phys(virt);
1109 entry->size = size;
1110 entry->dev_addr = dma_addr;
1111 entry->direction = DMA_BIDIRECTIONAL;
1112
1113 add_dma_entry(entry);
1114}
1115EXPORT_SYMBOL(debug_dma_alloc_coherent);
1116
1117void debug_dma_free_coherent(struct device *dev, size_t size,
1118 void *virt, dma_addr_t addr)
1119{
1120 struct dma_debug_entry ref = {
1121 .type = dma_debug_coherent,
1122 .dev = dev,
1123 .paddr = virt_to_phys(virt),
1124 .dev_addr = addr,
1125 .size = size,
1126 .direction = DMA_BIDIRECTIONAL,
1127 };
1128
1129 if (unlikely(global_disable))
1130 return;
1131
1132 check_unmap(&ref);
1133}
1134EXPORT_SYMBOL(debug_dma_free_coherent);
1135
1136void debug_dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle,
1137 size_t size, int direction)
1138{
1139 struct dma_debug_entry ref;
1140
1141 if (unlikely(global_disable))
1142 return;
1143
1144 ref.type = dma_debug_single;
1145 ref.dev = dev;
1146 ref.dev_addr = dma_handle;
1147 ref.size = size;
1148 ref.direction = direction;
1149 ref.sg_call_ents = 0;
1150
1151 check_sync(dev, &ref, true);
1152}
1153EXPORT_SYMBOL(debug_dma_sync_single_for_cpu);
1154
1155void debug_dma_sync_single_for_device(struct device *dev,
1156 dma_addr_t dma_handle, size_t size,
1157 int direction)
1158{
1159 struct dma_debug_entry ref;
1160
1161 if (unlikely(global_disable))
1162 return;
1163
1164 ref.type = dma_debug_single;
1165 ref.dev = dev;
1166 ref.dev_addr = dma_handle;
1167 ref.size = size;
1168 ref.direction = direction;
1169 ref.sg_call_ents = 0;
1170
1171 check_sync(dev, &ref, false);
1172}
1173EXPORT_SYMBOL(debug_dma_sync_single_for_device);
1174
1175void debug_dma_sync_single_range_for_cpu(struct device *dev,
1176 dma_addr_t dma_handle,
1177 unsigned long offset, size_t size,
1178 int direction)
1179{
1180 struct dma_debug_entry ref;
1181
1182 if (unlikely(global_disable))
1183 return;
1184
1185 ref.type = dma_debug_single;
1186 ref.dev = dev;
1187 ref.dev_addr = dma_handle;
1188 ref.size = offset + size;
1189 ref.direction = direction;
1190 ref.sg_call_ents = 0;
1191
1192 check_sync(dev, &ref, true);
1193}
1194EXPORT_SYMBOL(debug_dma_sync_single_range_for_cpu);
1195
1196void debug_dma_sync_single_range_for_device(struct device *dev,
1197 dma_addr_t dma_handle,
1198 unsigned long offset,
1199 size_t size, int direction)
1200{
1201 struct dma_debug_entry ref;
1202
1203 if (unlikely(global_disable))
1204 return;
1205
1206 ref.type = dma_debug_single;
1207 ref.dev = dev;
1208 ref.dev_addr = dma_handle;
1209 ref.size = offset + size;
1210 ref.direction = direction;
1211 ref.sg_call_ents = 0;
1212
1213 check_sync(dev, &ref, false);
1214}
1215EXPORT_SYMBOL(debug_dma_sync_single_range_for_device);
1216
1217void debug_dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,
1218 int nelems, int direction)
1219{
1220 struct scatterlist *s;
1221 int mapped_ents = 0, i;
1222
1223 if (unlikely(global_disable))
1224 return;
1225
1226 for_each_sg(sg, s, nelems, i) {
1227
1228 struct dma_debug_entry ref = {
1229 .type = dma_debug_sg,
1230 .dev = dev,
1231 .paddr = sg_phys(s),
1232 .dev_addr = sg_dma_address(s),
1233 .size = sg_dma_len(s),
1234 .direction = direction,
1235 .sg_call_ents = nelems,
1236 };
1237
1238 if (!i)
1239 mapped_ents = get_nr_mapped_entries(dev, &ref);
1240
1241 if (i >= mapped_ents)
1242 break;
1243
1244 check_sync(dev, &ref, true);
1245 }
1246}
1247EXPORT_SYMBOL(debug_dma_sync_sg_for_cpu);
1248
1249void debug_dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
1250 int nelems, int direction)
1251{
1252 struct scatterlist *s;
1253 int mapped_ents = 0, i;
1254
1255 if (unlikely(global_disable))
1256 return;
1257
1258 for_each_sg(sg, s, nelems, i) {
1259
1260 struct dma_debug_entry ref = {
1261 .type = dma_debug_sg,
1262 .dev = dev,
1263 .paddr = sg_phys(s),
1264 .dev_addr = sg_dma_address(s),
1265 .size = sg_dma_len(s),
1266 .direction = direction,
1267 .sg_call_ents = nelems,
1268 };
1269 if (!i)
1270 mapped_ents = get_nr_mapped_entries(dev, &ref);
1271
1272 if (i >= mapped_ents)
1273 break;
1274
1275 check_sync(dev, &ref, false);
1276 }
1277}
1278EXPORT_SYMBOL(debug_dma_sync_sg_for_device);
1279
1280static int __init dma_debug_driver_setup(char *str)
1281{
1282 int i;
1283
1284 for (i = 0; i < NAME_MAX_LEN - 1; ++i, ++str) {
1285 current_driver_name[i] = *str;
1286 if (*str == 0)
1287 break;
1288 }
1289
1290 if (current_driver_name[0])
1291 pr_info("DMA-API: enable driver filter for driver [%s]\n",
1292 current_driver_name);
1293
1294
1295 return 1;
1296}
1297__setup("dma_debug_driver=", dma_debug_driver_setup);
diff --git a/lib/dynamic_debug.c b/lib/dynamic_debug.c
new file mode 100644
index 000000000000..e22c148e4b7f
--- /dev/null
+++ b/lib/dynamic_debug.c
@@ -0,0 +1,769 @@
1/*
2 * lib/dynamic_debug.c
3 *
4 * make pr_debug()/dev_dbg() calls runtime configurable based upon their
5 * source module.
6 *
7 * Copyright (C) 2008 Jason Baron <jbaron@redhat.com>
8 * By Greg Banks <gnb@melbourne.sgi.com>
9 * Copyright (c) 2008 Silicon Graphics Inc. All Rights Reserved.
10 */
11
12#include <linux/kernel.h>
13#include <linux/module.h>
14#include <linux/moduleparam.h>
15#include <linux/kallsyms.h>
16#include <linux/version.h>
17#include <linux/types.h>
18#include <linux/mutex.h>
19#include <linux/proc_fs.h>
20#include <linux/seq_file.h>
21#include <linux/list.h>
22#include <linux/sysctl.h>
23#include <linux/ctype.h>
24#include <linux/uaccess.h>
25#include <linux/dynamic_debug.h>
26#include <linux/debugfs.h>
27
28extern struct _ddebug __start___verbose[];
29extern struct _ddebug __stop___verbose[];
30
31/* dynamic_debug_enabled, and dynamic_debug_enabled2 are bitmasks in which
32 * bit n is set to 1 if any modname hashes into the bucket n, 0 otherwise. They
33 * use independent hash functions, to reduce the chance of false positives.
34 */
35long long dynamic_debug_enabled;
36EXPORT_SYMBOL_GPL(dynamic_debug_enabled);
37long long dynamic_debug_enabled2;
38EXPORT_SYMBOL_GPL(dynamic_debug_enabled2);
39
40struct ddebug_table {
41 struct list_head link;
42 char *mod_name;
43 unsigned int num_ddebugs;
44 unsigned int num_enabled;
45 struct _ddebug *ddebugs;
46};
47
48struct ddebug_query {
49 const char *filename;
50 const char *module;
51 const char *function;
52 const char *format;
53 unsigned int first_lineno, last_lineno;
54};
55
56struct ddebug_iter {
57 struct ddebug_table *table;
58 unsigned int idx;
59};
60
61static DEFINE_MUTEX(ddebug_lock);
62static LIST_HEAD(ddebug_tables);
63static int verbose = 0;
64
65/* Return the last part of a pathname */
66static inline const char *basename(const char *path)
67{
68 const char *tail = strrchr(path, '/');
69 return tail ? tail+1 : path;
70}
71
72/* format a string into buf[] which describes the _ddebug's flags */
73static char *ddebug_describe_flags(struct _ddebug *dp, char *buf,
74 size_t maxlen)
75{
76 char *p = buf;
77
78 BUG_ON(maxlen < 4);
79 if (dp->flags & _DPRINTK_FLAGS_PRINT)
80 *p++ = 'p';
81 if (p == buf)
82 *p++ = '-';
83 *p = '\0';
84
85 return buf;
86}
87
88/*
89 * must be called with ddebug_lock held
90 */
91
92static int disabled_hash(char hash, bool first_table)
93{
94 struct ddebug_table *dt;
95 char table_hash_value;
96
97 list_for_each_entry(dt, &ddebug_tables, link) {
98 if (first_table)
99 table_hash_value = dt->ddebugs->primary_hash;
100 else
101 table_hash_value = dt->ddebugs->secondary_hash;
102 if (dt->num_enabled && (hash == table_hash_value))
103 return 0;
104 }
105 return 1;
106}
107
108/*
109 * Search the tables for _ddebug's which match the given
110 * `query' and apply the `flags' and `mask' to them. Tells
111 * the user which ddebug's were changed, or whether none
112 * were matched.
113 */
114static void ddebug_change(const struct ddebug_query *query,
115 unsigned int flags, unsigned int mask)
116{
117 int i;
118 struct ddebug_table *dt;
119 unsigned int newflags;
120 unsigned int nfound = 0;
121 char flagbuf[8];
122
123 /* search for matching ddebugs */
124 mutex_lock(&ddebug_lock);
125 list_for_each_entry(dt, &ddebug_tables, link) {
126
127 /* match against the module name */
128 if (query->module != NULL &&
129 strcmp(query->module, dt->mod_name))
130 continue;
131
132 for (i = 0 ; i < dt->num_ddebugs ; i++) {
133 struct _ddebug *dp = &dt->ddebugs[i];
134
135 /* match against the source filename */
136 if (query->filename != NULL &&
137 strcmp(query->filename, dp->filename) &&
138 strcmp(query->filename, basename(dp->filename)))
139 continue;
140
141 /* match against the function */
142 if (query->function != NULL &&
143 strcmp(query->function, dp->function))
144 continue;
145
146 /* match against the format */
147 if (query->format != NULL &&
148 strstr(dp->format, query->format) == NULL)
149 continue;
150
151 /* match against the line number range */
152 if (query->first_lineno &&
153 dp->lineno < query->first_lineno)
154 continue;
155 if (query->last_lineno &&
156 dp->lineno > query->last_lineno)
157 continue;
158
159 nfound++;
160
161 newflags = (dp->flags & mask) | flags;
162 if (newflags == dp->flags)
163 continue;
164
165 if (!newflags)
166 dt->num_enabled--;
167 else if (!dp->flags)
168 dt->num_enabled++;
169 dp->flags = newflags;
170 if (newflags) {
171 dynamic_debug_enabled |=
172 (1LL << dp->primary_hash);
173 dynamic_debug_enabled2 |=
174 (1LL << dp->secondary_hash);
175 } else {
176 if (disabled_hash(dp->primary_hash, true))
177 dynamic_debug_enabled &=
178 ~(1LL << dp->primary_hash);
179 if (disabled_hash(dp->secondary_hash, false))
180 dynamic_debug_enabled2 &=
181 ~(1LL << dp->secondary_hash);
182 }
183 if (verbose)
184 printk(KERN_INFO
185 "ddebug: changed %s:%d [%s]%s %s\n",
186 dp->filename, dp->lineno,
187 dt->mod_name, dp->function,
188 ddebug_describe_flags(dp, flagbuf,
189 sizeof(flagbuf)));
190 }
191 }
192 mutex_unlock(&ddebug_lock);
193
194 if (!nfound && verbose)
195 printk(KERN_INFO "ddebug: no matches for query\n");
196}
197
198/*
199 * Split the buffer `buf' into space-separated words.
200 * Handles simple " and ' quoting, i.e. without nested,
201 * embedded or escaped \". Return the number of words
202 * or <0 on error.
203 */
204static int ddebug_tokenize(char *buf, char *words[], int maxwords)
205{
206 int nwords = 0;
207
208 while (*buf) {
209 char *end;
210
211 /* Skip leading whitespace */
212 while (*buf && isspace(*buf))
213 buf++;
214 if (!*buf)
215 break; /* oh, it was trailing whitespace */
216
217 /* Run `end' over a word, either whitespace separated or quoted */
218 if (*buf == '"' || *buf == '\'') {
219 int quote = *buf++;
220 for (end = buf ; *end && *end != quote ; end++)
221 ;
222 if (!*end)
223 return -EINVAL; /* unclosed quote */
224 } else {
225 for (end = buf ; *end && !isspace(*end) ; end++)
226 ;
227 BUG_ON(end == buf);
228 }
229 /* Here `buf' is the start of the word, `end' is one past the end */
230
231 if (nwords == maxwords)
232 return -EINVAL; /* ran out of words[] before bytes */
233 if (*end)
234 *end++ = '\0'; /* terminate the word */
235 words[nwords++] = buf;
236 buf = end;
237 }
238
239 if (verbose) {
240 int i;
241 printk(KERN_INFO "%s: split into words:", __func__);
242 for (i = 0 ; i < nwords ; i++)
243 printk(" \"%s\"", words[i]);
244 printk("\n");
245 }
246
247 return nwords;
248}
249
250/*
251 * Parse a single line number. Note that the empty string ""
252 * is treated as a special case and converted to zero, which
253 * is later treated as a "don't care" value.
254 */
255static inline int parse_lineno(const char *str, unsigned int *val)
256{
257 char *end = NULL;
258 BUG_ON(str == NULL);
259 if (*str == '\0') {
260 *val = 0;
261 return 0;
262 }
263 *val = simple_strtoul(str, &end, 10);
264 return end == NULL || end == str || *end != '\0' ? -EINVAL : 0;
265}
266
267/*
268 * Undo octal escaping in a string, inplace. This is useful to
269 * allow the user to express a query which matches a format
270 * containing embedded spaces.
271 */
272#define isodigit(c) ((c) >= '0' && (c) <= '7')
273static char *unescape(char *str)
274{
275 char *in = str;
276 char *out = str;
277
278 while (*in) {
279 if (*in == '\\') {
280 if (in[1] == '\\') {
281 *out++ = '\\';
282 in += 2;
283 continue;
284 } else if (in[1] == 't') {
285 *out++ = '\t';
286 in += 2;
287 continue;
288 } else if (in[1] == 'n') {
289 *out++ = '\n';
290 in += 2;
291 continue;
292 } else if (isodigit(in[1]) &&
293 isodigit(in[2]) &&
294 isodigit(in[3])) {
295 *out++ = ((in[1] - '0')<<6) |
296 ((in[2] - '0')<<3) |
297 (in[3] - '0');
298 in += 4;
299 continue;
300 }
301 }
302 *out++ = *in++;
303 }
304 *out = '\0';
305
306 return str;
307}
308
309/*
310 * Parse words[] as a ddebug query specification, which is a series
311 * of (keyword, value) pairs chosen from these possibilities:
312 *
313 * func <function-name>
314 * file <full-pathname>
315 * file <base-filename>
316 * module <module-name>
317 * format <escaped-string-to-find-in-format>
318 * line <lineno>
319 * line <first-lineno>-<last-lineno> // where either may be empty
320 */
321static int ddebug_parse_query(char *words[], int nwords,
322 struct ddebug_query *query)
323{
324 unsigned int i;
325
326 /* check we have an even number of words */
327 if (nwords % 2 != 0)
328 return -EINVAL;
329 memset(query, 0, sizeof(*query));
330
331 for (i = 0 ; i < nwords ; i += 2) {
332 if (!strcmp(words[i], "func"))
333 query->function = words[i+1];
334 else if (!strcmp(words[i], "file"))
335 query->filename = words[i+1];
336 else if (!strcmp(words[i], "module"))
337 query->module = words[i+1];
338 else if (!strcmp(words[i], "format"))
339 query->format = unescape(words[i+1]);
340 else if (!strcmp(words[i], "line")) {
341 char *first = words[i+1];
342 char *last = strchr(first, '-');
343 if (last)
344 *last++ = '\0';
345 if (parse_lineno(first, &query->first_lineno) < 0)
346 return -EINVAL;
347 if (last != NULL) {
348 /* range <first>-<last> */
349 if (parse_lineno(last, &query->last_lineno) < 0)
350 return -EINVAL;
351 } else {
352 query->last_lineno = query->first_lineno;
353 }
354 } else {
355 if (verbose)
356 printk(KERN_ERR "%s: unknown keyword \"%s\"\n",
357 __func__, words[i]);
358 return -EINVAL;
359 }
360 }
361
362 if (verbose)
363 printk(KERN_INFO "%s: q->function=\"%s\" q->filename=\"%s\" "
364 "q->module=\"%s\" q->format=\"%s\" q->lineno=%u-%u\n",
365 __func__, query->function, query->filename,
366 query->module, query->format, query->first_lineno,
367 query->last_lineno);
368
369 return 0;
370}
371
372/*
373 * Parse `str' as a flags specification, format [-+=][p]+.
374 * Sets up *maskp and *flagsp to be used when changing the
375 * flags fields of matched _ddebug's. Returns 0 on success
376 * or <0 on error.
377 */
378static int ddebug_parse_flags(const char *str, unsigned int *flagsp,
379 unsigned int *maskp)
380{
381 unsigned flags = 0;
382 int op = '=';
383
384 switch (*str) {
385 case '+':
386 case '-':
387 case '=':
388 op = *str++;
389 break;
390 default:
391 return -EINVAL;
392 }
393 if (verbose)
394 printk(KERN_INFO "%s: op='%c'\n", __func__, op);
395
396 for ( ; *str ; ++str) {
397 switch (*str) {
398 case 'p':
399 flags |= _DPRINTK_FLAGS_PRINT;
400 break;
401 default:
402 return -EINVAL;
403 }
404 }
405 if (flags == 0)
406 return -EINVAL;
407 if (verbose)
408 printk(KERN_INFO "%s: flags=0x%x\n", __func__, flags);
409
410 /* calculate final *flagsp, *maskp according to mask and op */
411 switch (op) {
412 case '=':
413 *maskp = 0;
414 *flagsp = flags;
415 break;
416 case '+':
417 *maskp = ~0U;
418 *flagsp = flags;
419 break;
420 case '-':
421 *maskp = ~flags;
422 *flagsp = 0;
423 break;
424 }
425 if (verbose)
426 printk(KERN_INFO "%s: *flagsp=0x%x *maskp=0x%x\n",
427 __func__, *flagsp, *maskp);
428 return 0;
429}
430
431/*
432 * File_ops->write method for <debugfs>/dynamic_debug/conrol. Gathers the
433 * command text from userspace, parses and executes it.
434 */
435static ssize_t ddebug_proc_write(struct file *file, const char __user *ubuf,
436 size_t len, loff_t *offp)
437{
438 unsigned int flags = 0, mask = 0;
439 struct ddebug_query query;
440#define MAXWORDS 9
441 int nwords;
442 char *words[MAXWORDS];
443 char tmpbuf[256];
444
445 if (len == 0)
446 return 0;
447 /* we don't check *offp -- multiple writes() are allowed */
448 if (len > sizeof(tmpbuf)-1)
449 return -E2BIG;
450 if (copy_from_user(tmpbuf, ubuf, len))
451 return -EFAULT;
452 tmpbuf[len] = '\0';
453 if (verbose)
454 printk(KERN_INFO "%s: read %d bytes from userspace\n",
455 __func__, (int)len);
456
457 nwords = ddebug_tokenize(tmpbuf, words, MAXWORDS);
458 if (nwords < 0)
459 return -EINVAL;
460 if (ddebug_parse_query(words, nwords-1, &query))
461 return -EINVAL;
462 if (ddebug_parse_flags(words[nwords-1], &flags, &mask))
463 return -EINVAL;
464
465 /* actually go and implement the change */
466 ddebug_change(&query, flags, mask);
467
468 *offp += len;
469 return len;
470}
471
472/*
473 * Set the iterator to point to the first _ddebug object
474 * and return a pointer to that first object. Returns
475 * NULL if there are no _ddebugs at all.
476 */
477static struct _ddebug *ddebug_iter_first(struct ddebug_iter *iter)
478{
479 if (list_empty(&ddebug_tables)) {
480 iter->table = NULL;
481 iter->idx = 0;
482 return NULL;
483 }
484 iter->table = list_entry(ddebug_tables.next,
485 struct ddebug_table, link);
486 iter->idx = 0;
487 return &iter->table->ddebugs[iter->idx];
488}
489
490/*
491 * Advance the iterator to point to the next _ddebug
492 * object from the one the iterator currently points at,
493 * and returns a pointer to the new _ddebug. Returns
494 * NULL if the iterator has seen all the _ddebugs.
495 */
496static struct _ddebug *ddebug_iter_next(struct ddebug_iter *iter)
497{
498 if (iter->table == NULL)
499 return NULL;
500 if (++iter->idx == iter->table->num_ddebugs) {
501 /* iterate to next table */
502 iter->idx = 0;
503 if (list_is_last(&iter->table->link, &ddebug_tables)) {
504 iter->table = NULL;
505 return NULL;
506 }
507 iter->table = list_entry(iter->table->link.next,
508 struct ddebug_table, link);
509 }
510 return &iter->table->ddebugs[iter->idx];
511}
512
513/*
514 * Seq_ops start method. Called at the start of every
515 * read() call from userspace. Takes the ddebug_lock and
516 * seeks the seq_file's iterator to the given position.
517 */
518static void *ddebug_proc_start(struct seq_file *m, loff_t *pos)
519{
520 struct ddebug_iter *iter = m->private;
521 struct _ddebug *dp;
522 int n = *pos;
523
524 if (verbose)
525 printk(KERN_INFO "%s: called m=%p *pos=%lld\n",
526 __func__, m, (unsigned long long)*pos);
527
528 mutex_lock(&ddebug_lock);
529
530 if (!n)
531 return SEQ_START_TOKEN;
532 if (n < 0)
533 return NULL;
534 dp = ddebug_iter_first(iter);
535 while (dp != NULL && --n > 0)
536 dp = ddebug_iter_next(iter);
537 return dp;
538}
539
540/*
541 * Seq_ops next method. Called several times within a read()
542 * call from userspace, with ddebug_lock held. Walks to the
543 * next _ddebug object with a special case for the header line.
544 */
545static void *ddebug_proc_next(struct seq_file *m, void *p, loff_t *pos)
546{
547 struct ddebug_iter *iter = m->private;
548 struct _ddebug *dp;
549
550 if (verbose)
551 printk(KERN_INFO "%s: called m=%p p=%p *pos=%lld\n",
552 __func__, m, p, (unsigned long long)*pos);
553
554 if (p == SEQ_START_TOKEN)
555 dp = ddebug_iter_first(iter);
556 else
557 dp = ddebug_iter_next(iter);
558 ++*pos;
559 return dp;
560}
561
562/*
563 * Seq_ops show method. Called several times within a read()
564 * call from userspace, with ddebug_lock held. Formats the
565 * current _ddebug as a single human-readable line, with a
566 * special case for the header line.
567 */
568static int ddebug_proc_show(struct seq_file *m, void *p)
569{
570 struct ddebug_iter *iter = m->private;
571 struct _ddebug *dp = p;
572 char flagsbuf[8];
573
574 if (verbose)
575 printk(KERN_INFO "%s: called m=%p p=%p\n",
576 __func__, m, p);
577
578 if (p == SEQ_START_TOKEN) {
579 seq_puts(m,
580 "# filename:lineno [module]function flags format\n");
581 return 0;
582 }
583
584 seq_printf(m, "%s:%u [%s]%s %s \"",
585 dp->filename, dp->lineno,
586 iter->table->mod_name, dp->function,
587 ddebug_describe_flags(dp, flagsbuf, sizeof(flagsbuf)));
588 seq_escape(m, dp->format, "\t\r\n\"");
589 seq_puts(m, "\"\n");
590
591 return 0;
592}
593
594/*
595 * Seq_ops stop method. Called at the end of each read()
596 * call from userspace. Drops ddebug_lock.
597 */
598static void ddebug_proc_stop(struct seq_file *m, void *p)
599{
600 if (verbose)
601 printk(KERN_INFO "%s: called m=%p p=%p\n",
602 __func__, m, p);
603 mutex_unlock(&ddebug_lock);
604}
605
606static const struct seq_operations ddebug_proc_seqops = {
607 .start = ddebug_proc_start,
608 .next = ddebug_proc_next,
609 .show = ddebug_proc_show,
610 .stop = ddebug_proc_stop
611};
612
613/*
614 * File_ops->open method for <debugfs>/dynamic_debug/control. Does the seq_file
615 * setup dance, and also creates an iterator to walk the _ddebugs.
616 * Note that we create a seq_file always, even for O_WRONLY files
617 * where it's not needed, as doing so simplifies the ->release method.
618 */
619static int ddebug_proc_open(struct inode *inode, struct file *file)
620{
621 struct ddebug_iter *iter;
622 int err;
623
624 if (verbose)
625 printk(KERN_INFO "%s: called\n", __func__);
626
627 iter = kzalloc(sizeof(*iter), GFP_KERNEL);
628 if (iter == NULL)
629 return -ENOMEM;
630
631 err = seq_open(file, &ddebug_proc_seqops);
632 if (err) {
633 kfree(iter);
634 return err;
635 }
636 ((struct seq_file *) file->private_data)->private = iter;
637 return 0;
638}
639
640static const struct file_operations ddebug_proc_fops = {
641 .owner = THIS_MODULE,
642 .open = ddebug_proc_open,
643 .read = seq_read,
644 .llseek = seq_lseek,
645 .release = seq_release_private,
646 .write = ddebug_proc_write
647};
648
649/*
650 * Allocate a new ddebug_table for the given module
651 * and add it to the global list.
652 */
653int ddebug_add_module(struct _ddebug *tab, unsigned int n,
654 const char *name)
655{
656 struct ddebug_table *dt;
657 char *new_name;
658
659 dt = kzalloc(sizeof(*dt), GFP_KERNEL);
660 if (dt == NULL)
661 return -ENOMEM;
662 new_name = kstrdup(name, GFP_KERNEL);
663 if (new_name == NULL) {
664 kfree(dt);
665 return -ENOMEM;
666 }
667 dt->mod_name = new_name;
668 dt->num_ddebugs = n;
669 dt->num_enabled = 0;
670 dt->ddebugs = tab;
671
672 mutex_lock(&ddebug_lock);
673 list_add_tail(&dt->link, &ddebug_tables);
674 mutex_unlock(&ddebug_lock);
675
676 if (verbose)
677 printk(KERN_INFO "%u debug prints in module %s\n",
678 n, dt->mod_name);
679 return 0;
680}
681EXPORT_SYMBOL_GPL(ddebug_add_module);
682
683static void ddebug_table_free(struct ddebug_table *dt)
684{
685 list_del_init(&dt->link);
686 kfree(dt->mod_name);
687 kfree(dt);
688}
689
690/*
691 * Called in response to a module being unloaded. Removes
692 * any ddebug_table's which point at the module.
693 */
694int ddebug_remove_module(char *mod_name)
695{
696 struct ddebug_table *dt, *nextdt;
697 int ret = -ENOENT;
698
699 if (verbose)
700 printk(KERN_INFO "%s: removing module \"%s\"\n",
701 __func__, mod_name);
702
703 mutex_lock(&ddebug_lock);
704 list_for_each_entry_safe(dt, nextdt, &ddebug_tables, link) {
705 if (!strcmp(dt->mod_name, mod_name)) {
706 ddebug_table_free(dt);
707 ret = 0;
708 }
709 }
710 mutex_unlock(&ddebug_lock);
711 return ret;
712}
713EXPORT_SYMBOL_GPL(ddebug_remove_module);
714
715static void ddebug_remove_all_tables(void)
716{
717 mutex_lock(&ddebug_lock);
718 while (!list_empty(&ddebug_tables)) {
719 struct ddebug_table *dt = list_entry(ddebug_tables.next,
720 struct ddebug_table,
721 link);
722 ddebug_table_free(dt);
723 }
724 mutex_unlock(&ddebug_lock);
725}
726
727static int __init dynamic_debug_init(void)
728{
729 struct dentry *dir, *file;
730 struct _ddebug *iter, *iter_start;
731 const char *modname = NULL;
732 int ret = 0;
733 int n = 0;
734
735 dir = debugfs_create_dir("dynamic_debug", NULL);
736 if (!dir)
737 return -ENOMEM;
738 file = debugfs_create_file("control", 0644, dir, NULL,
739 &ddebug_proc_fops);
740 if (!file) {
741 debugfs_remove(dir);
742 return -ENOMEM;
743 }
744 if (__start___verbose != __stop___verbose) {
745 iter = __start___verbose;
746 modname = iter->modname;
747 iter_start = iter;
748 for (; iter < __stop___verbose; iter++) {
749 if (strcmp(modname, iter->modname)) {
750 ret = ddebug_add_module(iter_start, n, modname);
751 if (ret)
752 goto out_free;
753 n = 0;
754 modname = iter->modname;
755 iter_start = iter;
756 }
757 n++;
758 }
759 ret = ddebug_add_module(iter_start, n, modname);
760 }
761out_free:
762 if (ret) {
763 ddebug_remove_all_tables();
764 debugfs_remove(dir);
765 debugfs_remove(file);
766 }
767 return 0;
768}
769module_init(dynamic_debug_init);
diff --git a/lib/dynamic_printk.c b/lib/dynamic_printk.c
deleted file mode 100644
index 165a19763dc9..000000000000
--- a/lib/dynamic_printk.c
+++ /dev/null
@@ -1,414 +0,0 @@
1/*
2 * lib/dynamic_printk.c
3 *
4 * make pr_debug()/dev_dbg() calls runtime configurable based upon their
5 * their source module.
6 *
7 * Copyright (C) 2008 Red Hat, Inc., Jason Baron <jbaron@redhat.com>
8 */
9
10#include <linux/kernel.h>
11#include <linux/module.h>
12#include <linux/uaccess.h>
13#include <linux/seq_file.h>
14#include <linux/debugfs.h>
15#include <linux/fs.h>
16
17extern struct mod_debug __start___verbose[];
18extern struct mod_debug __stop___verbose[];
19
20struct debug_name {
21 struct hlist_node hlist;
22 struct hlist_node hlist2;
23 int hash1;
24 int hash2;
25 char *name;
26 int enable;
27 int type;
28};
29
30static int nr_entries;
31static int num_enabled;
32int dynamic_enabled = DYNAMIC_ENABLED_NONE;
33static struct hlist_head module_table[DEBUG_HASH_TABLE_SIZE] =
34 { [0 ... DEBUG_HASH_TABLE_SIZE-1] = HLIST_HEAD_INIT };
35static struct hlist_head module_table2[DEBUG_HASH_TABLE_SIZE] =
36 { [0 ... DEBUG_HASH_TABLE_SIZE-1] = HLIST_HEAD_INIT };
37static DECLARE_MUTEX(debug_list_mutex);
38
39/* dynamic_printk_enabled, and dynamic_printk_enabled2 are bitmasks in which
40 * bit n is set to 1 if any modname hashes into the bucket n, 0 otherwise. They
41 * use independent hash functions, to reduce the chance of false positives.
42 */
43long long dynamic_printk_enabled;
44EXPORT_SYMBOL_GPL(dynamic_printk_enabled);
45long long dynamic_printk_enabled2;
46EXPORT_SYMBOL_GPL(dynamic_printk_enabled2);
47
48/* returns the debug module pointer. */
49static struct debug_name *find_debug_module(char *module_name)
50{
51 int i;
52 struct hlist_head *head;
53 struct hlist_node *node;
54 struct debug_name *element;
55
56 element = NULL;
57 for (i = 0; i < DEBUG_HASH_TABLE_SIZE; i++) {
58 head = &module_table[i];
59 hlist_for_each_entry_rcu(element, node, head, hlist)
60 if (!strcmp(element->name, module_name))
61 return element;
62 }
63 return NULL;
64}
65
66/* returns the debug module pointer. */
67static struct debug_name *find_debug_module_hash(char *module_name, int hash)
68{
69 struct hlist_head *head;
70 struct hlist_node *node;
71 struct debug_name *element;
72
73 element = NULL;
74 head = &module_table[hash];
75 hlist_for_each_entry_rcu(element, node, head, hlist)
76 if (!strcmp(element->name, module_name))
77 return element;
78 return NULL;
79}
80
81/* caller must hold mutex*/
82static int __add_debug_module(char *mod_name, int hash, int hash2)
83{
84 struct debug_name *new;
85 char *module_name;
86 int ret = 0;
87
88 if (find_debug_module(mod_name)) {
89 ret = -EINVAL;
90 goto out;
91 }
92 module_name = kmalloc(strlen(mod_name) + 1, GFP_KERNEL);
93 if (!module_name) {
94 ret = -ENOMEM;
95 goto out;
96 }
97 module_name = strcpy(module_name, mod_name);
98 module_name[strlen(mod_name)] = '\0';
99 new = kzalloc(sizeof(struct debug_name), GFP_KERNEL);
100 if (!new) {
101 kfree(module_name);
102 ret = -ENOMEM;
103 goto out;
104 }
105 INIT_HLIST_NODE(&new->hlist);
106 INIT_HLIST_NODE(&new->hlist2);
107 new->name = module_name;
108 new->hash1 = hash;
109 new->hash2 = hash2;
110 hlist_add_head_rcu(&new->hlist, &module_table[hash]);
111 hlist_add_head_rcu(&new->hlist2, &module_table2[hash2]);
112 nr_entries++;
113out:
114 return ret;
115}
116
117int unregister_dynamic_debug_module(char *mod_name)
118{
119 struct debug_name *element;
120 int ret = 0;
121
122 down(&debug_list_mutex);
123 element = find_debug_module(mod_name);
124 if (!element) {
125 ret = -EINVAL;
126 goto out;
127 }
128 hlist_del_rcu(&element->hlist);
129 hlist_del_rcu(&element->hlist2);
130 synchronize_rcu();
131 kfree(element->name);
132 if (element->enable)
133 num_enabled--;
134 kfree(element);
135 nr_entries--;
136out:
137 up(&debug_list_mutex);
138 return ret;
139}
140EXPORT_SYMBOL_GPL(unregister_dynamic_debug_module);
141
142int register_dynamic_debug_module(char *mod_name, int type, char *share_name,
143 char *flags, int hash, int hash2)
144{
145 struct debug_name *elem;
146 int ret = 0;
147
148 down(&debug_list_mutex);
149 elem = find_debug_module(mod_name);
150 if (!elem) {
151 if (__add_debug_module(mod_name, hash, hash2))
152 goto out;
153 elem = find_debug_module(mod_name);
154 if (dynamic_enabled == DYNAMIC_ENABLED_ALL &&
155 !strcmp(mod_name, share_name)) {
156 elem->enable = true;
157 num_enabled++;
158 }
159 }
160 elem->type |= type;
161out:
162 up(&debug_list_mutex);
163 return ret;
164}
165EXPORT_SYMBOL_GPL(register_dynamic_debug_module);
166
167int __dynamic_dbg_enabled_helper(char *mod_name, int type, int value, int hash)
168{
169 struct debug_name *elem;
170 int ret = 0;
171
172 if (dynamic_enabled == DYNAMIC_ENABLED_ALL)
173 return 1;
174 rcu_read_lock();
175 elem = find_debug_module_hash(mod_name, hash);
176 if (elem && elem->enable)
177 ret = 1;
178 rcu_read_unlock();
179 return ret;
180}
181EXPORT_SYMBOL_GPL(__dynamic_dbg_enabled_helper);
182
183static void set_all(bool enable)
184{
185 struct debug_name *e;
186 struct hlist_node *node;
187 int i;
188 long long enable_mask;
189
190 for (i = 0; i < DEBUG_HASH_TABLE_SIZE; i++) {
191 if (module_table[i].first != NULL) {
192 hlist_for_each_entry(e, node, &module_table[i], hlist) {
193 e->enable = enable;
194 }
195 }
196 }
197 if (enable)
198 enable_mask = ULLONG_MAX;
199 else
200 enable_mask = 0;
201 dynamic_printk_enabled = enable_mask;
202 dynamic_printk_enabled2 = enable_mask;
203}
204
205static int disabled_hash(int i, bool first_table)
206{
207 struct debug_name *e;
208 struct hlist_node *node;
209
210 if (first_table) {
211 hlist_for_each_entry(e, node, &module_table[i], hlist) {
212 if (e->enable)
213 return 0;
214 }
215 } else {
216 hlist_for_each_entry(e, node, &module_table2[i], hlist2) {
217 if (e->enable)
218 return 0;
219 }
220 }
221 return 1;
222}
223
224static ssize_t pr_debug_write(struct file *file, const char __user *buf,
225 size_t length, loff_t *ppos)
226{
227 char *buffer, *s, *value_str, *setting_str;
228 int err, value;
229 struct debug_name *elem = NULL;
230 int all = 0;
231
232 if (length > PAGE_SIZE || length < 0)
233 return -EINVAL;
234
235 buffer = (char *)__get_free_page(GFP_KERNEL);
236 if (!buffer)
237 return -ENOMEM;
238
239 err = -EFAULT;
240 if (copy_from_user(buffer, buf, length))
241 goto out;
242
243 err = -EINVAL;
244 if (length < PAGE_SIZE)
245 buffer[length] = '\0';
246 else if (buffer[PAGE_SIZE-1])
247 goto out;
248
249 err = -EINVAL;
250 down(&debug_list_mutex);
251
252 if (strncmp("set", buffer, 3))
253 goto out_up;
254 s = buffer + 3;
255 setting_str = strsep(&s, "=");
256 if (s == NULL)
257 goto out_up;
258 setting_str = strstrip(setting_str);
259 value_str = strsep(&s, " ");
260 if (s == NULL)
261 goto out_up;
262 s = strstrip(s);
263 if (!strncmp(s, "all", 3))
264 all = 1;
265 else
266 elem = find_debug_module(s);
267 if (!strncmp(setting_str, "enable", 6)) {
268 value = !!simple_strtol(value_str, NULL, 10);
269 if (all) {
270 if (value) {
271 set_all(true);
272 num_enabled = nr_entries;
273 dynamic_enabled = DYNAMIC_ENABLED_ALL;
274 } else {
275 set_all(false);
276 num_enabled = 0;
277 dynamic_enabled = DYNAMIC_ENABLED_NONE;
278 }
279 err = 0;
280 } else if (elem) {
281 if (value && (elem->enable == 0)) {
282 dynamic_printk_enabled |= (1LL << elem->hash1);
283 dynamic_printk_enabled2 |= (1LL << elem->hash2);
284 elem->enable = 1;
285 num_enabled++;
286 dynamic_enabled = DYNAMIC_ENABLED_SOME;
287 err = 0;
288 printk(KERN_DEBUG
289 "debugging enabled for module %s\n",
290 elem->name);
291 } else if (!value && (elem->enable == 1)) {
292 elem->enable = 0;
293 num_enabled--;
294 if (disabled_hash(elem->hash1, true))
295 dynamic_printk_enabled &=
296 ~(1LL << elem->hash1);
297 if (disabled_hash(elem->hash2, false))
298 dynamic_printk_enabled2 &=
299 ~(1LL << elem->hash2);
300 if (num_enabled)
301 dynamic_enabled = DYNAMIC_ENABLED_SOME;
302 else
303 dynamic_enabled = DYNAMIC_ENABLED_NONE;
304 err = 0;
305 printk(KERN_DEBUG
306 "debugging disabled for module %s\n",
307 elem->name);
308 }
309 }
310 }
311 if (!err)
312 err = length;
313out_up:
314 up(&debug_list_mutex);
315out:
316 free_page((unsigned long)buffer);
317 return err;
318}
319
320static void *pr_debug_seq_start(struct seq_file *f, loff_t *pos)
321{
322 return (*pos < DEBUG_HASH_TABLE_SIZE) ? pos : NULL;
323}
324
325static void *pr_debug_seq_next(struct seq_file *s, void *v, loff_t *pos)
326{
327 (*pos)++;
328 if (*pos >= DEBUG_HASH_TABLE_SIZE)
329 return NULL;
330 return pos;
331}
332
333static void pr_debug_seq_stop(struct seq_file *s, void *v)
334{
335 /* Nothing to do */
336}
337
338static int pr_debug_seq_show(struct seq_file *s, void *v)
339{
340 struct hlist_head *head;
341 struct hlist_node *node;
342 struct debug_name *elem;
343 unsigned int i = *(loff_t *) v;
344
345 rcu_read_lock();
346 head = &module_table[i];
347 hlist_for_each_entry_rcu(elem, node, head, hlist) {
348 seq_printf(s, "%s enabled=%d", elem->name, elem->enable);
349 seq_printf(s, "\n");
350 }
351 rcu_read_unlock();
352 return 0;
353}
354
355static struct seq_operations pr_debug_seq_ops = {
356 .start = pr_debug_seq_start,
357 .next = pr_debug_seq_next,
358 .stop = pr_debug_seq_stop,
359 .show = pr_debug_seq_show
360};
361
362static int pr_debug_open(struct inode *inode, struct file *filp)
363{
364 return seq_open(filp, &pr_debug_seq_ops);
365}
366
367static const struct file_operations pr_debug_operations = {
368 .open = pr_debug_open,
369 .read = seq_read,
370 .write = pr_debug_write,
371 .llseek = seq_lseek,
372 .release = seq_release,
373};
374
375static int __init dynamic_printk_init(void)
376{
377 struct dentry *dir, *file;
378 struct mod_debug *iter;
379 unsigned long value;
380
381 dir = debugfs_create_dir("dynamic_printk", NULL);
382 if (!dir)
383 return -ENOMEM;
384 file = debugfs_create_file("modules", 0644, dir, NULL,
385 &pr_debug_operations);
386 if (!file) {
387 debugfs_remove(dir);
388 return -ENOMEM;
389 }
390 for (value = (unsigned long)__start___verbose;
391 value < (unsigned long)__stop___verbose;
392 value += sizeof(struct mod_debug)) {
393 iter = (struct mod_debug *)value;
394 register_dynamic_debug_module(iter->modname,
395 iter->type,
396 iter->logical_modname,
397 iter->flag_names, iter->hash, iter->hash2);
398 }
399 if (dynamic_enabled == DYNAMIC_ENABLED_ALL)
400 set_all(true);
401 return 0;
402}
403module_init(dynamic_printk_init);
404/* may want to move this earlier so we can get traces as early as possible */
405
406static int __init dynamic_printk_setup(char *str)
407{
408 if (str)
409 return -ENOENT;
410 dynamic_enabled = DYNAMIC_ENABLED_ALL;
411 return 0;
412}
413/* Use early_param(), so we can get debug output as early as possible */
414early_param("dynamic_printk", dynamic_printk_setup);
diff --git a/lib/extable.c b/lib/extable.c
index 179c08745595..4cac81ec225e 100644
--- a/lib/extable.c
+++ b/lib/extable.c
@@ -39,7 +39,26 @@ void sort_extable(struct exception_table_entry *start,
39 sort(start, finish - start, sizeof(struct exception_table_entry), 39 sort(start, finish - start, sizeof(struct exception_table_entry),
40 cmp_ex, NULL); 40 cmp_ex, NULL);
41} 41}
42#endif 42
43#ifdef CONFIG_MODULES
44/*
45 * If the exception table is sorted, any referring to the module init
46 * will be at the beginning or the end.
47 */
48void trim_init_extable(struct module *m)
49{
50 /*trim the beginning*/
51 while (m->num_exentries && within_module_init(m->extable[0].insn, m)) {
52 m->extable++;
53 m->num_exentries--;
54 }
55 /*trim the end*/
56 while (m->num_exentries &&
57 within_module_init(m->extable[m->num_exentries-1].insn, m))
58 m->num_exentries--;
59}
60#endif /* CONFIG_MODULES */
61#endif /* !ARCH_HAS_SORT_EXTABLE */
43 62
44#ifndef ARCH_HAS_SEARCH_EXTABLE 63#ifndef ARCH_HAS_SEARCH_EXTABLE
45/* 64/*
diff --git a/lib/flex_array.c b/lib/flex_array.c
new file mode 100644
index 000000000000..7baed2fc3bc8
--- /dev/null
+++ b/lib/flex_array.c
@@ -0,0 +1,268 @@
1/*
2 * Flexible array managed in PAGE_SIZE parts
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
17 *
18 * Copyright IBM Corporation, 2009
19 *
20 * Author: Dave Hansen <dave@linux.vnet.ibm.com>
21 */
22
23#include <linux/flex_array.h>
24#include <linux/slab.h>
25#include <linux/stddef.h>
26
27struct flex_array_part {
28 char elements[FLEX_ARRAY_PART_SIZE];
29};
30
31static inline int __elements_per_part(int element_size)
32{
33 return FLEX_ARRAY_PART_SIZE / element_size;
34}
35
36static inline int bytes_left_in_base(void)
37{
38 int element_offset = offsetof(struct flex_array, parts);
39 int bytes_left = FLEX_ARRAY_BASE_SIZE - element_offset;
40 return bytes_left;
41}
42
43static inline int nr_base_part_ptrs(void)
44{
45 return bytes_left_in_base() / sizeof(struct flex_array_part *);
46}
47
48/*
49 * If a user requests an allocation which is small
50 * enough, we may simply use the space in the
51 * flex_array->parts[] array to store the user
52 * data.
53 */
54static inline int elements_fit_in_base(struct flex_array *fa)
55{
56 int data_size = fa->element_size * fa->total_nr_elements;
57 if (data_size <= bytes_left_in_base())
58 return 1;
59 return 0;
60}
61
62/**
63 * flex_array_alloc - allocate a new flexible array
64 * @element_size: the size of individual elements in the array
65 * @total: total number of elements that this should hold
66 *
67 * Note: all locking must be provided by the caller.
68 *
69 * @total is used to size internal structures. If the user ever
70 * accesses any array indexes >=@total, it will produce errors.
71 *
72 * The maximum number of elements is defined as: the number of
73 * elements that can be stored in a page times the number of
74 * page pointers that we can fit in the base structure or (using
75 * integer math):
76 *
77 * (PAGE_SIZE/element_size) * (PAGE_SIZE-8)/sizeof(void *)
78 *
79 * Here's a table showing example capacities. Note that the maximum
80 * index that the get/put() functions is just nr_objects-1. This
81 * basically means that you get 4MB of storage on 32-bit and 2MB on
82 * 64-bit.
83 *
84 *
85 * Element size | Objects | Objects |
86 * PAGE_SIZE=4k | 32-bit | 64-bit |
87 * ---------------------------------|
88 * 1 bytes | 4186112 | 2093056 |
89 * 2 bytes | 2093056 | 1046528 |
90 * 3 bytes | 1395030 | 697515 |
91 * 4 bytes | 1046528 | 523264 |
92 * 32 bytes | 130816 | 65408 |
93 * 33 bytes | 126728 | 63364 |
94 * 2048 bytes | 2044 | 1022 |
95 * 2049 bytes | 1022 | 511 |
96 * void * | 1046528 | 261632 |
97 *
98 * Since 64-bit pointers are twice the size, we lose half the
99 * capacity in the base structure. Also note that no effort is made
100 * to efficiently pack objects across page boundaries.
101 */
102struct flex_array *flex_array_alloc(int element_size, unsigned int total,
103 gfp_t flags)
104{
105 struct flex_array *ret;
106 int max_size = nr_base_part_ptrs() * __elements_per_part(element_size);
107
108 /* max_size will end up 0 if element_size > PAGE_SIZE */
109 if (total > max_size)
110 return NULL;
111 ret = kzalloc(sizeof(struct flex_array), flags);
112 if (!ret)
113 return NULL;
114 ret->element_size = element_size;
115 ret->total_nr_elements = total;
116 return ret;
117}
118
119static int fa_element_to_part_nr(struct flex_array *fa,
120 unsigned int element_nr)
121{
122 return element_nr / __elements_per_part(fa->element_size);
123}
124
125/**
126 * flex_array_free_parts - just free the second-level pages
127 *
128 * This is to be used in cases where the base 'struct flex_array'
129 * has been statically allocated and should not be free.
130 */
131void flex_array_free_parts(struct flex_array *fa)
132{
133 int part_nr;
134 int max_part = nr_base_part_ptrs();
135
136 if (elements_fit_in_base(fa))
137 return;
138 for (part_nr = 0; part_nr < max_part; part_nr++)
139 kfree(fa->parts[part_nr]);
140}
141
142void flex_array_free(struct flex_array *fa)
143{
144 flex_array_free_parts(fa);
145 kfree(fa);
146}
147
148static unsigned int index_inside_part(struct flex_array *fa,
149 unsigned int element_nr)
150{
151 unsigned int part_offset;
152
153 part_offset = element_nr % __elements_per_part(fa->element_size);
154 return part_offset * fa->element_size;
155}
156
157static struct flex_array_part *
158__fa_get_part(struct flex_array *fa, int part_nr, gfp_t flags)
159{
160 struct flex_array_part *part = fa->parts[part_nr];
161 if (!part) {
162 /*
163 * This leaves the part pages uninitialized
164 * and with potentially random data, just
165 * as if the user had kmalloc()'d the whole.
166 * __GFP_ZERO can be used to zero it.
167 */
168 part = kmalloc(FLEX_ARRAY_PART_SIZE, flags);
169 if (!part)
170 return NULL;
171 fa->parts[part_nr] = part;
172 }
173 return part;
174}
175
176/**
177 * flex_array_put - copy data into the array at @element_nr
178 * @src: address of data to copy into the array
179 * @element_nr: index of the position in which to insert
180 * the new element.
181 *
182 * Note that this *copies* the contents of @src into
183 * the array. If you are trying to store an array of
184 * pointers, make sure to pass in &ptr instead of ptr.
185 *
186 * Locking must be provided by the caller.
187 */
188int flex_array_put(struct flex_array *fa, unsigned int element_nr, void *src,
189 gfp_t flags)
190{
191 int part_nr = fa_element_to_part_nr(fa, element_nr);
192 struct flex_array_part *part;
193 void *dst;
194
195 if (element_nr >= fa->total_nr_elements)
196 return -ENOSPC;
197 if (elements_fit_in_base(fa))
198 part = (struct flex_array_part *)&fa->parts[0];
199 else {
200 part = __fa_get_part(fa, part_nr, flags);
201 if (!part)
202 return -ENOMEM;
203 }
204 dst = &part->elements[index_inside_part(fa, element_nr)];
205 memcpy(dst, src, fa->element_size);
206 return 0;
207}
208
209/**
210 * flex_array_prealloc - guarantee that array space exists
211 * @start: index of first array element for which space is allocated
212 * @end: index of last (inclusive) element for which space is allocated
213 *
214 * This will guarantee that no future calls to flex_array_put()
215 * will allocate memory. It can be used if you are expecting to
216 * be holding a lock or in some atomic context while writing
217 * data into the array.
218 *
219 * Locking must be provided by the caller.
220 */
221int flex_array_prealloc(struct flex_array *fa, unsigned int start,
222 unsigned int end, gfp_t flags)
223{
224 int start_part;
225 int end_part;
226 int part_nr;
227 struct flex_array_part *part;
228
229 if (start >= fa->total_nr_elements || end >= fa->total_nr_elements)
230 return -ENOSPC;
231 if (elements_fit_in_base(fa))
232 return 0;
233 start_part = fa_element_to_part_nr(fa, start);
234 end_part = fa_element_to_part_nr(fa, end);
235 for (part_nr = start_part; part_nr <= end_part; part_nr++) {
236 part = __fa_get_part(fa, part_nr, flags);
237 if (!part)
238 return -ENOMEM;
239 }
240 return 0;
241}
242
243/**
244 * flex_array_get - pull data back out of the array
245 * @element_nr: index of the element to fetch from the array
246 *
247 * Returns a pointer to the data at index @element_nr. Note
248 * that this is a copy of the data that was passed in. If you
249 * are using this to store pointers, you'll get back &ptr.
250 *
251 * Locking must be provided by the caller.
252 */
253void *flex_array_get(struct flex_array *fa, unsigned int element_nr)
254{
255 int part_nr = fa_element_to_part_nr(fa, element_nr);
256 struct flex_array_part *part;
257
258 if (element_nr >= fa->total_nr_elements)
259 return NULL;
260 if (elements_fit_in_base(fa))
261 part = (struct flex_array_part *)&fa->parts[0];
262 else {
263 part = fa->parts[part_nr];
264 if (!part)
265 return NULL;
266 }
267 return &part->elements[index_inside_part(fa, element_nr)];
268}
diff --git a/lib/gcd.c b/lib/gcd.c
new file mode 100644
index 000000000000..f879033d9822
--- /dev/null
+++ b/lib/gcd.c
@@ -0,0 +1,18 @@
1#include <linux/kernel.h>
2#include <linux/gcd.h>
3#include <linux/module.h>
4
5/* Greatest common divisor */
6unsigned long gcd(unsigned long a, unsigned long b)
7{
8 unsigned long r;
9
10 if (a < b)
11 swap(a, b);
12 while ((r = a % b) != 0) {
13 a = b;
14 b = r;
15 }
16 return b;
17}
18EXPORT_SYMBOL_GPL(gcd);
diff --git a/lib/genalloc.c b/lib/genalloc.c
index f6d276db2d58..eed2bdb865e7 100644
--- a/lib/genalloc.c
+++ b/lib/genalloc.c
@@ -85,7 +85,6 @@ void gen_pool_destroy(struct gen_pool *pool)
85 int bit, end_bit; 85 int bit, end_bit;
86 86
87 87
88 write_lock(&pool->lock);
89 list_for_each_safe(_chunk, _next_chunk, &pool->chunks) { 88 list_for_each_safe(_chunk, _next_chunk, &pool->chunks) {
90 chunk = list_entry(_chunk, struct gen_pool_chunk, next_chunk); 89 chunk = list_entry(_chunk, struct gen_pool_chunk, next_chunk);
91 list_del(&chunk->next_chunk); 90 list_del(&chunk->next_chunk);
diff --git a/lib/hexdump.c b/lib/hexdump.c
index f07c0db81d26..39af2560f765 100644
--- a/lib/hexdump.c
+++ b/lib/hexdump.c
@@ -65,7 +65,8 @@ void hex_dump_to_buffer(const void *buf, size_t len, int rowsize,
65 65
66 for (j = 0; j < ngroups; j++) 66 for (j = 0; j < ngroups; j++)
67 lx += scnprintf(linebuf + lx, linebuflen - lx, 67 lx += scnprintf(linebuf + lx, linebuflen - lx,
68 "%16.16llx ", (unsigned long long)*(ptr8 + j)); 68 "%s%16.16llx", j ? " " : "",
69 (unsigned long long)*(ptr8 + j));
69 ascii_column = 17 * ngroups + 2; 70 ascii_column = 17 * ngroups + 2;
70 break; 71 break;
71 } 72 }
@@ -76,7 +77,7 @@ void hex_dump_to_buffer(const void *buf, size_t len, int rowsize,
76 77
77 for (j = 0; j < ngroups; j++) 78 for (j = 0; j < ngroups; j++)
78 lx += scnprintf(linebuf + lx, linebuflen - lx, 79 lx += scnprintf(linebuf + lx, linebuflen - lx,
79 "%8.8x ", *(ptr4 + j)); 80 "%s%8.8x", j ? " " : "", *(ptr4 + j));
80 ascii_column = 9 * ngroups + 2; 81 ascii_column = 9 * ngroups + 2;
81 break; 82 break;
82 } 83 }
@@ -87,19 +88,21 @@ void hex_dump_to_buffer(const void *buf, size_t len, int rowsize,
87 88
88 for (j = 0; j < ngroups; j++) 89 for (j = 0; j < ngroups; j++)
89 lx += scnprintf(linebuf + lx, linebuflen - lx, 90 lx += scnprintf(linebuf + lx, linebuflen - lx,
90 "%4.4x ", *(ptr2 + j)); 91 "%s%4.4x", j ? " " : "", *(ptr2 + j));
91 ascii_column = 5 * ngroups + 2; 92 ascii_column = 5 * ngroups + 2;
92 break; 93 break;
93 } 94 }
94 95
95 default: 96 default:
96 for (j = 0; (j < rowsize) && (j < len) && (lx + 4) < linebuflen; 97 for (j = 0; (j < len) && (lx + 3) <= linebuflen; j++) {
97 j++) {
98 ch = ptr[j]; 98 ch = ptr[j];
99 linebuf[lx++] = hex_asc_hi(ch); 99 linebuf[lx++] = hex_asc_hi(ch);
100 linebuf[lx++] = hex_asc_lo(ch); 100 linebuf[lx++] = hex_asc_lo(ch);
101 linebuf[lx++] = ' '; 101 linebuf[lx++] = ' ';
102 } 102 }
103 if (j)
104 lx--;
105
103 ascii_column = 3 * rowsize + 2; 106 ascii_column = 3 * rowsize + 2;
104 break; 107 break;
105 } 108 }
@@ -108,7 +111,7 @@ void hex_dump_to_buffer(const void *buf, size_t len, int rowsize,
108 111
109 while (lx < (linebuflen - 1) && lx < (ascii_column - 1)) 112 while (lx < (linebuflen - 1) && lx < (ascii_column - 1))
110 linebuf[lx++] = ' '; 113 linebuf[lx++] = ' ';
111 for (j = 0; (j < rowsize) && (j < len) && (lx + 2) < linebuflen; j++) 114 for (j = 0; (j < len) && (lx + 2) < linebuflen; j++)
112 linebuf[lx++] = (isascii(ptr[j]) && isprint(ptr[j])) ? ptr[j] 115 linebuf[lx++] = (isascii(ptr[j]) && isprint(ptr[j])) ? ptr[j]
113 : '.'; 116 : '.';
114nil: 117nil:
diff --git a/lib/idr.c b/lib/idr.c
index c11c5765cdef..80ca9aca038b 100644
--- a/lib/idr.c
+++ b/lib/idr.c
@@ -449,6 +449,7 @@ void idr_remove_all(struct idr *idp)
449 449
450 n = idp->layers * IDR_BITS; 450 n = idp->layers * IDR_BITS;
451 p = idp->top; 451 p = idp->top;
452 rcu_assign_pointer(idp->top, NULL);
452 max = 1 << n; 453 max = 1 << n;
453 454
454 id = 0; 455 id = 0;
@@ -467,7 +468,6 @@ void idr_remove_all(struct idr *idp)
467 p = *--paa; 468 p = *--paa;
468 } 469 }
469 } 470 }
470 rcu_assign_pointer(idp->top, NULL);
471 idp->layers = 0; 471 idp->layers = 0;
472} 472}
473EXPORT_SYMBOL(idr_remove_all); 473EXPORT_SYMBOL(idr_remove_all);
@@ -579,6 +579,52 @@ int idr_for_each(struct idr *idp,
579EXPORT_SYMBOL(idr_for_each); 579EXPORT_SYMBOL(idr_for_each);
580 580
581/** 581/**
582 * idr_get_next - lookup next object of id to given id.
583 * @idp: idr handle
584 * @id: pointer to lookup key
585 *
586 * Returns pointer to registered object with id, which is next number to
587 * given id.
588 */
589
590void *idr_get_next(struct idr *idp, int *nextidp)
591{
592 struct idr_layer *p, *pa[MAX_LEVEL];
593 struct idr_layer **paa = &pa[0];
594 int id = *nextidp;
595 int n, max;
596
597 /* find first ent */
598 n = idp->layers * IDR_BITS;
599 max = 1 << n;
600 p = rcu_dereference(idp->top);
601 if (!p)
602 return NULL;
603
604 while (id < max) {
605 while (n > 0 && p) {
606 n -= IDR_BITS;
607 *paa++ = p;
608 p = rcu_dereference(p->ary[(id >> n) & IDR_MASK]);
609 }
610
611 if (p) {
612 *nextidp = id;
613 return p;
614 }
615
616 id += 1 << n;
617 while (n < fls(id)) {
618 n += IDR_BITS;
619 p = *--paa;
620 }
621 }
622 return NULL;
623}
624
625
626
627/**
582 * idr_replace - replace pointer for given id 628 * idr_replace - replace pointer for given id
583 * @idp: idr handle 629 * @idp: idr handle
584 * @ptr: pointer you want associated with the id 630 * @ptr: pointer you want associated with the id
diff --git a/lib/is_single_threaded.c b/lib/is_single_threaded.c
index f1ed2fe76c65..bd2bea963364 100644
--- a/lib/is_single_threaded.c
+++ b/lib/is_single_threaded.c
@@ -12,34 +12,47 @@
12 12
13#include <linux/sched.h> 13#include <linux/sched.h>
14 14
15/** 15/*
16 * is_single_threaded - Determine if a thread group is single-threaded or not 16 * Returns true if the task does not share ->mm with another thread/process.
17 * @p: A task in the thread group in question
18 *
19 * This returns true if the thread group to which a task belongs is single
20 * threaded, false if it is not.
21 */ 17 */
22bool is_single_threaded(struct task_struct *p) 18bool current_is_single_threaded(void)
23{ 19{
24 struct task_struct *g, *t; 20 struct task_struct *task = current;
25 struct mm_struct *mm = p->mm; 21 struct mm_struct *mm = task->mm;
22 struct task_struct *p, *t;
23 bool ret;
26 24
27 if (atomic_read(&p->signal->count) != 1) 25 if (atomic_read(&task->signal->live) != 1)
28 goto no; 26 return false;
29 27
30 if (atomic_read(&p->mm->mm_users) != 1) { 28 if (atomic_read(&mm->mm_users) == 1)
31 read_lock(&tasklist_lock); 29 return true;
32 do_each_thread(g, t) {
33 if (t->mm == mm && t != p)
34 goto no_unlock;
35 } while_each_thread(g, t);
36 read_unlock(&tasklist_lock);
37 }
38 30
39 return true; 31 ret = false;
32 rcu_read_lock();
33 for_each_process(p) {
34 if (unlikely(p->flags & PF_KTHREAD))
35 continue;
36 if (unlikely(p == task->group_leader))
37 continue;
38
39 t = p;
40 do {
41 if (unlikely(t->mm == mm))
42 goto found;
43 if (likely(t->mm))
44 break;
45 /*
46 * t->mm == NULL. Make sure next_thread/next_task
47 * will see other CLONE_VM tasks which might be
48 * forked before exiting.
49 */
50 smp_rmb();
51 } while_each_thread(p, t);
52 }
53 ret = true;
54found:
55 rcu_read_unlock();
40 56
41no_unlock: 57 return ret;
42 read_unlock(&tasklist_lock);
43no:
44 return false;
45} 58}
diff --git a/lib/kernel_lock.c b/lib/kernel_lock.c
index 01a3c22c1b5a..39f1029e3525 100644
--- a/lib/kernel_lock.c
+++ b/lib/kernel_lock.c
@@ -39,7 +39,7 @@ static __cacheline_aligned_in_smp DEFINE_SPINLOCK(kernel_flag);
39int __lockfunc __reacquire_kernel_lock(void) 39int __lockfunc __reacquire_kernel_lock(void)
40{ 40{
41 while (!_raw_spin_trylock(&kernel_flag)) { 41 while (!_raw_spin_trylock(&kernel_flag)) {
42 if (test_thread_flag(TIF_NEED_RESCHED)) 42 if (need_resched())
43 return -EAGAIN; 43 return -EAGAIN;
44 cpu_relax(); 44 cpu_relax();
45 } 45 }
diff --git a/lib/kobject.c b/lib/kobject.c
index 0487d1f64806..b512b746d2af 100644
--- a/lib/kobject.c
+++ b/lib/kobject.c
@@ -212,12 +212,15 @@ static int kobject_add_internal(struct kobject *kobj)
212 * @fmt: format string used to build the name 212 * @fmt: format string used to build the name
213 * @vargs: vargs to format the string. 213 * @vargs: vargs to format the string.
214 */ 214 */
215static int kobject_set_name_vargs(struct kobject *kobj, const char *fmt, 215int kobject_set_name_vargs(struct kobject *kobj, const char *fmt,
216 va_list vargs) 216 va_list vargs)
217{ 217{
218 const char *old_name = kobj->name; 218 const char *old_name = kobj->name;
219 char *s; 219 char *s;
220 220
221 if (kobj->name && !fmt)
222 return 0;
223
221 kobj->name = kvasprintf(GFP_KERNEL, fmt, vargs); 224 kobj->name = kvasprintf(GFP_KERNEL, fmt, vargs);
222 if (!kobj->name) 225 if (!kobj->name)
223 return -ENOMEM; 226 return -ENOMEM;
@@ -790,11 +793,16 @@ static struct kset *kset_create(const char *name,
790 struct kobject *parent_kobj) 793 struct kobject *parent_kobj)
791{ 794{
792 struct kset *kset; 795 struct kset *kset;
796 int retval;
793 797
794 kset = kzalloc(sizeof(*kset), GFP_KERNEL); 798 kset = kzalloc(sizeof(*kset), GFP_KERNEL);
795 if (!kset) 799 if (!kset)
796 return NULL; 800 return NULL;
797 kobject_set_name(&kset->kobj, name); 801 retval = kobject_set_name(&kset->kobj, name);
802 if (retval) {
803 kfree(kset);
804 return NULL;
805 }
798 kset->uevent_ops = uevent_ops; 806 kset->uevent_ops = uevent_ops;
799 kset->kobj.parent = parent_kobj; 807 kset->kobj.parent = parent_kobj;
800 808
diff --git a/lib/kobject_uevent.c b/lib/kobject_uevent.c
index 318328ddbd1c..920a3ca6e259 100644
--- a/lib/kobject_uevent.c
+++ b/lib/kobject_uevent.c
@@ -118,6 +118,13 @@ int kobject_uevent_env(struct kobject *kobj, enum kobject_action action,
118 kset = top_kobj->kset; 118 kset = top_kobj->kset;
119 uevent_ops = kset->uevent_ops; 119 uevent_ops = kset->uevent_ops;
120 120
121 /* skip the event, if uevent_suppress is set*/
122 if (kobj->uevent_suppress) {
123 pr_debug("kobject: '%s' (%p): %s: uevent_suppress "
124 "caused the event to drop!\n",
125 kobject_name(kobj), kobj, __func__);
126 return 0;
127 }
121 /* skip the event, if the filter returns zero. */ 128 /* skip the event, if the filter returns zero. */
122 if (uevent_ops && uevent_ops->filter) 129 if (uevent_ops && uevent_ops->filter)
123 if (!uevent_ops->filter(kset, kobj)) { 130 if (!uevent_ops->filter(kset, kobj)) {
@@ -227,6 +234,9 @@ int kobject_uevent_env(struct kobject *kobj, enum kobject_action action,
227 NETLINK_CB(skb).dst_group = 1; 234 NETLINK_CB(skb).dst_group = 1;
228 retval = netlink_broadcast(uevent_sock, skb, 0, 1, 235 retval = netlink_broadcast(uevent_sock, skb, 0, 1,
229 GFP_KERNEL); 236 GFP_KERNEL);
237 /* ENOBUFS should be handled in userspace */
238 if (retval == -ENOBUFS)
239 retval = 0;
230 } else 240 } else
231 retval = -ENOMEM; 241 retval = -ENOMEM;
232 } 242 }
@@ -318,7 +328,7 @@ static int __init kobject_uevent_init(void)
318 "kobject_uevent: unable to create netlink socket!\n"); 328 "kobject_uevent: unable to create netlink socket!\n");
319 return -ENODEV; 329 return -ENODEV;
320 } 330 }
321 331 netlink_set_nonroot(NETLINK_KOBJECT_UEVENT, NL_NONROOT_RECV);
322 return 0; 332 return 0;
323} 333}
324 334
diff --git a/lib/lmb.c b/lib/lmb.c
index 97e547037084..0343c05609f0 100644
--- a/lib/lmb.c
+++ b/lib/lmb.c
@@ -29,33 +29,33 @@ static int __init early_lmb(char *p)
29} 29}
30early_param("lmb", early_lmb); 30early_param("lmb", early_lmb);
31 31
32void lmb_dump_all(void) 32static void lmb_dump(struct lmb_region *region, char *name)
33{ 33{
34 unsigned long i; 34 unsigned long long base, size;
35 int i;
36
37 pr_info(" %s.cnt = 0x%lx\n", name, region->cnt);
38
39 for (i = 0; i < region->cnt; i++) {
40 base = region->region[i].base;
41 size = region->region[i].size;
42
43 pr_info(" %s[0x%x]\t0x%016llx - 0x%016llx, 0x%llx bytes\n",
44 name, i, base, base + size - 1, size);
45 }
46}
35 47
48void lmb_dump_all(void)
49{
36 if (!lmb_debug) 50 if (!lmb_debug)
37 return; 51 return;
38 52
39 pr_info("lmb_dump_all:\n"); 53 pr_info("LMB configuration:\n");
40 pr_info(" memory.cnt = 0x%lx\n", lmb.memory.cnt); 54 pr_info(" rmo_size = 0x%llx\n", (unsigned long long)lmb.rmo_size);
41 pr_info(" memory.size = 0x%llx\n", 55 pr_info(" memory.size = 0x%llx\n", (unsigned long long)lmb.memory.size);
42 (unsigned long long)lmb.memory.size);
43 for (i=0; i < lmb.memory.cnt ;i++) {
44 pr_info(" memory.region[0x%lx].base = 0x%llx\n",
45 i, (unsigned long long)lmb.memory.region[i].base);
46 pr_info(" .size = 0x%llx\n",
47 (unsigned long long)lmb.memory.region[i].size);
48 }
49 56
50 pr_info(" reserved.cnt = 0x%lx\n", lmb.reserved.cnt); 57 lmb_dump(&lmb.memory, "memory");
51 pr_info(" reserved.size = 0x%llx\n", 58 lmb_dump(&lmb.reserved, "reserved");
52 (unsigned long long)lmb.memory.size);
53 for (i=0; i < lmb.reserved.cnt ;i++) {
54 pr_info(" reserved.region[0x%lx].base = 0x%llx\n",
55 i, (unsigned long long)lmb.reserved.region[i].base);
56 pr_info(" .size = 0x%llx\n",
57 (unsigned long long)lmb.reserved.region[i].size);
58 }
59} 59}
60 60
61static unsigned long lmb_addrs_overlap(u64 base1, u64 size1, u64 base2, 61static unsigned long lmb_addrs_overlap(u64 base1, u64 size1, u64 base2,
@@ -429,7 +429,7 @@ u64 __init lmb_phys_mem_size(void)
429 return lmb.memory.size; 429 return lmb.memory.size;
430} 430}
431 431
432u64 __init lmb_end_of_DRAM(void) 432u64 lmb_end_of_DRAM(void)
433{ 433{
434 int idx = lmb.memory.cnt - 1; 434 int idx = lmb.memory.cnt - 1;
435 435
diff --git a/lib/locking-selftest.c b/lib/locking-selftest.c
index 280332c1827c..619313ed6c46 100644
--- a/lib/locking-selftest.c
+++ b/lib/locking-selftest.c
@@ -157,11 +157,11 @@ static void init_shared_classes(void)
157#define SOFTIRQ_ENTER() \ 157#define SOFTIRQ_ENTER() \
158 local_bh_disable(); \ 158 local_bh_disable(); \
159 local_irq_disable(); \ 159 local_irq_disable(); \
160 trace_softirq_enter(); \ 160 lockdep_softirq_enter(); \
161 WARN_ON(!in_softirq()); 161 WARN_ON(!in_softirq());
162 162
163#define SOFTIRQ_EXIT() \ 163#define SOFTIRQ_EXIT() \
164 trace_softirq_exit(); \ 164 lockdep_softirq_exit(); \
165 local_irq_enable(); \ 165 local_irq_enable(); \
166 local_bh_enable(); 166 local_bh_enable();
167 167
diff --git a/lib/nlattr.c b/lib/nlattr.c
new file mode 100644
index 000000000000..c4706eb98d3d
--- /dev/null
+++ b/lib/nlattr.c
@@ -0,0 +1,502 @@
1/*
2 * NETLINK Netlink attributes
3 *
4 * Authors: Thomas Graf <tgraf@suug.ch>
5 * Alexey Kuznetsov <kuznet@ms2.inr.ac.ru>
6 */
7
8#include <linux/module.h>
9#include <linux/kernel.h>
10#include <linux/errno.h>
11#include <linux/jiffies.h>
12#include <linux/netdevice.h>
13#include <linux/skbuff.h>
14#include <linux/string.h>
15#include <linux/types.h>
16#include <net/netlink.h>
17
18static u16 nla_attr_minlen[NLA_TYPE_MAX+1] __read_mostly = {
19 [NLA_U8] = sizeof(u8),
20 [NLA_U16] = sizeof(u16),
21 [NLA_U32] = sizeof(u32),
22 [NLA_U64] = sizeof(u64),
23 [NLA_NESTED] = NLA_HDRLEN,
24};
25
26static int validate_nla(struct nlattr *nla, int maxtype,
27 const struct nla_policy *policy)
28{
29 const struct nla_policy *pt;
30 int minlen = 0, attrlen = nla_len(nla), type = nla_type(nla);
31
32 if (type <= 0 || type > maxtype)
33 return 0;
34
35 pt = &policy[type];
36
37 BUG_ON(pt->type > NLA_TYPE_MAX);
38
39 switch (pt->type) {
40 case NLA_FLAG:
41 if (attrlen > 0)
42 return -ERANGE;
43 break;
44
45 case NLA_NUL_STRING:
46 if (pt->len)
47 minlen = min_t(int, attrlen, pt->len + 1);
48 else
49 minlen = attrlen;
50
51 if (!minlen || memchr(nla_data(nla), '\0', minlen) == NULL)
52 return -EINVAL;
53 /* fall through */
54
55 case NLA_STRING:
56 if (attrlen < 1)
57 return -ERANGE;
58
59 if (pt->len) {
60 char *buf = nla_data(nla);
61
62 if (buf[attrlen - 1] == '\0')
63 attrlen--;
64
65 if (attrlen > pt->len)
66 return -ERANGE;
67 }
68 break;
69
70 case NLA_BINARY:
71 if (pt->len && attrlen > pt->len)
72 return -ERANGE;
73 break;
74
75 case NLA_NESTED_COMPAT:
76 if (attrlen < pt->len)
77 return -ERANGE;
78 if (attrlen < NLA_ALIGN(pt->len))
79 break;
80 if (attrlen < NLA_ALIGN(pt->len) + NLA_HDRLEN)
81 return -ERANGE;
82 nla = nla_data(nla) + NLA_ALIGN(pt->len);
83 if (attrlen < NLA_ALIGN(pt->len) + NLA_HDRLEN + nla_len(nla))
84 return -ERANGE;
85 break;
86 case NLA_NESTED:
87 /* a nested attributes is allowed to be empty; if its not,
88 * it must have a size of at least NLA_HDRLEN.
89 */
90 if (attrlen == 0)
91 break;
92 default:
93 if (pt->len)
94 minlen = pt->len;
95 else if (pt->type != NLA_UNSPEC)
96 minlen = nla_attr_minlen[pt->type];
97
98 if (attrlen < minlen)
99 return -ERANGE;
100 }
101
102 return 0;
103}
104
105/**
106 * nla_validate - Validate a stream of attributes
107 * @head: head of attribute stream
108 * @len: length of attribute stream
109 * @maxtype: maximum attribute type to be expected
110 * @policy: validation policy
111 *
112 * Validates all attributes in the specified attribute stream against the
113 * specified policy. Attributes with a type exceeding maxtype will be
114 * ignored. See documenation of struct nla_policy for more details.
115 *
116 * Returns 0 on success or a negative error code.
117 */
118int nla_validate(struct nlattr *head, int len, int maxtype,
119 const struct nla_policy *policy)
120{
121 struct nlattr *nla;
122 int rem, err;
123
124 nla_for_each_attr(nla, head, len, rem) {
125 err = validate_nla(nla, maxtype, policy);
126 if (err < 0)
127 goto errout;
128 }
129
130 err = 0;
131errout:
132 return err;
133}
134
135/**
136 * nla_policy_len - Determin the max. length of a policy
137 * @policy: policy to use
138 * @n: number of policies
139 *
140 * Determines the max. length of the policy. It is currently used
141 * to allocated Netlink buffers roughly the size of the actual
142 * message.
143 *
144 * Returns 0 on success or a negative error code.
145 */
146int
147nla_policy_len(const struct nla_policy *p, int n)
148{
149 int i, len = 0;
150
151 for (i = 0; i < n; i++) {
152 if (p->len)
153 len += nla_total_size(p->len);
154 else if (nla_attr_minlen[p->type])
155 len += nla_total_size(nla_attr_minlen[p->type]);
156 }
157
158 return len;
159}
160
161/**
162 * nla_parse - Parse a stream of attributes into a tb buffer
163 * @tb: destination array with maxtype+1 elements
164 * @maxtype: maximum attribute type to be expected
165 * @head: head of attribute stream
166 * @len: length of attribute stream
167 * @policy: validation policy
168 *
169 * Parses a stream of attributes and stores a pointer to each attribute in
170 * the tb array accessable via the attribute type. Attributes with a type
171 * exceeding maxtype will be silently ignored for backwards compatibility
172 * reasons. policy may be set to NULL if no validation is required.
173 *
174 * Returns 0 on success or a negative error code.
175 */
176int nla_parse(struct nlattr *tb[], int maxtype, struct nlattr *head, int len,
177 const struct nla_policy *policy)
178{
179 struct nlattr *nla;
180 int rem, err;
181
182 memset(tb, 0, sizeof(struct nlattr *) * (maxtype + 1));
183
184 nla_for_each_attr(nla, head, len, rem) {
185 u16 type = nla_type(nla);
186
187 if (type > 0 && type <= maxtype) {
188 if (policy) {
189 err = validate_nla(nla, maxtype, policy);
190 if (err < 0)
191 goto errout;
192 }
193
194 tb[type] = nla;
195 }
196 }
197
198 if (unlikely(rem > 0))
199 printk(KERN_WARNING "netlink: %d bytes leftover after parsing "
200 "attributes.\n", rem);
201
202 err = 0;
203errout:
204 return err;
205}
206
207/**
208 * nla_find - Find a specific attribute in a stream of attributes
209 * @head: head of attribute stream
210 * @len: length of attribute stream
211 * @attrtype: type of attribute to look for
212 *
213 * Returns the first attribute in the stream matching the specified type.
214 */
215struct nlattr *nla_find(struct nlattr *head, int len, int attrtype)
216{
217 struct nlattr *nla;
218 int rem;
219
220 nla_for_each_attr(nla, head, len, rem)
221 if (nla_type(nla) == attrtype)
222 return nla;
223
224 return NULL;
225}
226
227/**
228 * nla_strlcpy - Copy string attribute payload into a sized buffer
229 * @dst: where to copy the string to
230 * @nla: attribute to copy the string from
231 * @dstsize: size of destination buffer
232 *
233 * Copies at most dstsize - 1 bytes into the destination buffer.
234 * The result is always a valid NUL-terminated string. Unlike
235 * strlcpy the destination buffer is always padded out.
236 *
237 * Returns the length of the source buffer.
238 */
239size_t nla_strlcpy(char *dst, const struct nlattr *nla, size_t dstsize)
240{
241 size_t srclen = nla_len(nla);
242 char *src = nla_data(nla);
243
244 if (srclen > 0 && src[srclen - 1] == '\0')
245 srclen--;
246
247 if (dstsize > 0) {
248 size_t len = (srclen >= dstsize) ? dstsize - 1 : srclen;
249
250 memset(dst, 0, dstsize);
251 memcpy(dst, src, len);
252 }
253
254 return srclen;
255}
256
257/**
258 * nla_memcpy - Copy a netlink attribute into another memory area
259 * @dest: where to copy to memcpy
260 * @src: netlink attribute to copy from
261 * @count: size of the destination area
262 *
263 * Note: The number of bytes copied is limited by the length of
264 * attribute's payload. memcpy
265 *
266 * Returns the number of bytes copied.
267 */
268int nla_memcpy(void *dest, const struct nlattr *src, int count)
269{
270 int minlen = min_t(int, count, nla_len(src));
271
272 memcpy(dest, nla_data(src), minlen);
273
274 return minlen;
275}
276
277/**
278 * nla_memcmp - Compare an attribute with sized memory area
279 * @nla: netlink attribute
280 * @data: memory area
281 * @size: size of memory area
282 */
283int nla_memcmp(const struct nlattr *nla, const void *data,
284 size_t size)
285{
286 int d = nla_len(nla) - size;
287
288 if (d == 0)
289 d = memcmp(nla_data(nla), data, size);
290
291 return d;
292}
293
294/**
295 * nla_strcmp - Compare a string attribute against a string
296 * @nla: netlink string attribute
297 * @str: another string
298 */
299int nla_strcmp(const struct nlattr *nla, const char *str)
300{
301 int len = strlen(str) + 1;
302 int d = nla_len(nla) - len;
303
304 if (d == 0)
305 d = memcmp(nla_data(nla), str, len);
306
307 return d;
308}
309
310#ifdef CONFIG_NET
311/**
312 * __nla_reserve - reserve room for attribute on the skb
313 * @skb: socket buffer to reserve room on
314 * @attrtype: attribute type
315 * @attrlen: length of attribute payload
316 *
317 * Adds a netlink attribute header to a socket buffer and reserves
318 * room for the payload but does not copy it.
319 *
320 * The caller is responsible to ensure that the skb provides enough
321 * tailroom for the attribute header and payload.
322 */
323struct nlattr *__nla_reserve(struct sk_buff *skb, int attrtype, int attrlen)
324{
325 struct nlattr *nla;
326
327 nla = (struct nlattr *) skb_put(skb, nla_total_size(attrlen));
328 nla->nla_type = attrtype;
329 nla->nla_len = nla_attr_size(attrlen);
330
331 memset((unsigned char *) nla + nla->nla_len, 0, nla_padlen(attrlen));
332
333 return nla;
334}
335EXPORT_SYMBOL(__nla_reserve);
336
337/**
338 * __nla_reserve_nohdr - reserve room for attribute without header
339 * @skb: socket buffer to reserve room on
340 * @attrlen: length of attribute payload
341 *
342 * Reserves room for attribute payload without a header.
343 *
344 * The caller is responsible to ensure that the skb provides enough
345 * tailroom for the payload.
346 */
347void *__nla_reserve_nohdr(struct sk_buff *skb, int attrlen)
348{
349 void *start;
350
351 start = skb_put(skb, NLA_ALIGN(attrlen));
352 memset(start, 0, NLA_ALIGN(attrlen));
353
354 return start;
355}
356EXPORT_SYMBOL(__nla_reserve_nohdr);
357
358/**
359 * nla_reserve - reserve room for attribute on the skb
360 * @skb: socket buffer to reserve room on
361 * @attrtype: attribute type
362 * @attrlen: length of attribute payload
363 *
364 * Adds a netlink attribute header to a socket buffer and reserves
365 * room for the payload but does not copy it.
366 *
367 * Returns NULL if the tailroom of the skb is insufficient to store
368 * the attribute header and payload.
369 */
370struct nlattr *nla_reserve(struct sk_buff *skb, int attrtype, int attrlen)
371{
372 if (unlikely(skb_tailroom(skb) < nla_total_size(attrlen)))
373 return NULL;
374
375 return __nla_reserve(skb, attrtype, attrlen);
376}
377EXPORT_SYMBOL(nla_reserve);
378
379/**
380 * nla_reserve_nohdr - reserve room for attribute without header
381 * @skb: socket buffer to reserve room on
382 * @attrlen: length of attribute payload
383 *
384 * Reserves room for attribute payload without a header.
385 *
386 * Returns NULL if the tailroom of the skb is insufficient to store
387 * the attribute payload.
388 */
389void *nla_reserve_nohdr(struct sk_buff *skb, int attrlen)
390{
391 if (unlikely(skb_tailroom(skb) < NLA_ALIGN(attrlen)))
392 return NULL;
393
394 return __nla_reserve_nohdr(skb, attrlen);
395}
396EXPORT_SYMBOL(nla_reserve_nohdr);
397
398/**
399 * __nla_put - Add a netlink attribute to a socket buffer
400 * @skb: socket buffer to add attribute to
401 * @attrtype: attribute type
402 * @attrlen: length of attribute payload
403 * @data: head of attribute payload
404 *
405 * The caller is responsible to ensure that the skb provides enough
406 * tailroom for the attribute header and payload.
407 */
408void __nla_put(struct sk_buff *skb, int attrtype, int attrlen,
409 const void *data)
410{
411 struct nlattr *nla;
412
413 nla = __nla_reserve(skb, attrtype, attrlen);
414 memcpy(nla_data(nla), data, attrlen);
415}
416EXPORT_SYMBOL(__nla_put);
417
418/**
419 * __nla_put_nohdr - Add a netlink attribute without header
420 * @skb: socket buffer to add attribute to
421 * @attrlen: length of attribute payload
422 * @data: head of attribute payload
423 *
424 * The caller is responsible to ensure that the skb provides enough
425 * tailroom for the attribute payload.
426 */
427void __nla_put_nohdr(struct sk_buff *skb, int attrlen, const void *data)
428{
429 void *start;
430
431 start = __nla_reserve_nohdr(skb, attrlen);
432 memcpy(start, data, attrlen);
433}
434EXPORT_SYMBOL(__nla_put_nohdr);
435
436/**
437 * nla_put - Add a netlink attribute to a socket buffer
438 * @skb: socket buffer to add attribute to
439 * @attrtype: attribute type
440 * @attrlen: length of attribute payload
441 * @data: head of attribute payload
442 *
443 * Returns -EMSGSIZE if the tailroom of the skb is insufficient to store
444 * the attribute header and payload.
445 */
446int nla_put(struct sk_buff *skb, int attrtype, int attrlen, const void *data)
447{
448 if (unlikely(skb_tailroom(skb) < nla_total_size(attrlen)))
449 return -EMSGSIZE;
450
451 __nla_put(skb, attrtype, attrlen, data);
452 return 0;
453}
454EXPORT_SYMBOL(nla_put);
455
456/**
457 * nla_put_nohdr - Add a netlink attribute without header
458 * @skb: socket buffer to add attribute to
459 * @attrlen: length of attribute payload
460 * @data: head of attribute payload
461 *
462 * Returns -EMSGSIZE if the tailroom of the skb is insufficient to store
463 * the attribute payload.
464 */
465int nla_put_nohdr(struct sk_buff *skb, int attrlen, const void *data)
466{
467 if (unlikely(skb_tailroom(skb) < NLA_ALIGN(attrlen)))
468 return -EMSGSIZE;
469
470 __nla_put_nohdr(skb, attrlen, data);
471 return 0;
472}
473EXPORT_SYMBOL(nla_put_nohdr);
474
475/**
476 * nla_append - Add a netlink attribute without header or padding
477 * @skb: socket buffer to add attribute to
478 * @attrlen: length of attribute payload
479 * @data: head of attribute payload
480 *
481 * Returns -EMSGSIZE if the tailroom of the skb is insufficient to store
482 * the attribute payload.
483 */
484int nla_append(struct sk_buff *skb, int attrlen, const void *data)
485{
486 if (unlikely(skb_tailroom(skb) < NLA_ALIGN(attrlen)))
487 return -EMSGSIZE;
488
489 memcpy(skb_put(skb, attrlen), data, attrlen);
490 return 0;
491}
492EXPORT_SYMBOL(nla_append);
493#endif
494
495EXPORT_SYMBOL(nla_validate);
496EXPORT_SYMBOL(nla_policy_len);
497EXPORT_SYMBOL(nla_parse);
498EXPORT_SYMBOL(nla_find);
499EXPORT_SYMBOL(nla_strlcpy);
500EXPORT_SYMBOL(nla_memcpy);
501EXPORT_SYMBOL(nla_memcmp);
502EXPORT_SYMBOL(nla_strcmp);
diff --git a/lib/radix-tree.c b/lib/radix-tree.c
index 4bb42a0344ec..23abbd93cae1 100644
--- a/lib/radix-tree.c
+++ b/lib/radix-tree.c
@@ -351,20 +351,12 @@ int radix_tree_insert(struct radix_tree_root *root,
351} 351}
352EXPORT_SYMBOL(radix_tree_insert); 352EXPORT_SYMBOL(radix_tree_insert);
353 353
354/** 354/*
355 * radix_tree_lookup_slot - lookup a slot in a radix tree 355 * is_slot == 1 : search for the slot.
356 * @root: radix tree root 356 * is_slot == 0 : search for the node.
357 * @index: index key
358 *
359 * Returns: the slot corresponding to the position @index in the
360 * radix tree @root. This is useful for update-if-exists operations.
361 *
362 * This function can be called under rcu_read_lock iff the slot is not
363 * modified by radix_tree_replace_slot, otherwise it must be called
364 * exclusive from other writers. Any dereference of the slot must be done
365 * using radix_tree_deref_slot.
366 */ 357 */
367void **radix_tree_lookup_slot(struct radix_tree_root *root, unsigned long index) 358static void *radix_tree_lookup_element(struct radix_tree_root *root,
359 unsigned long index, int is_slot)
368{ 360{
369 unsigned int height, shift; 361 unsigned int height, shift;
370 struct radix_tree_node *node, **slot; 362 struct radix_tree_node *node, **slot;
@@ -376,7 +368,7 @@ void **radix_tree_lookup_slot(struct radix_tree_root *root, unsigned long index)
376 if (!radix_tree_is_indirect_ptr(node)) { 368 if (!radix_tree_is_indirect_ptr(node)) {
377 if (index > 0) 369 if (index > 0)
378 return NULL; 370 return NULL;
379 return (void **)&root->rnode; 371 return is_slot ? (void *)&root->rnode : node;
380 } 372 }
381 node = radix_tree_indirect_to_ptr(node); 373 node = radix_tree_indirect_to_ptr(node);
382 374
@@ -397,7 +389,25 @@ void **radix_tree_lookup_slot(struct radix_tree_root *root, unsigned long index)
397 height--; 389 height--;
398 } while (height > 0); 390 } while (height > 0);
399 391
400 return (void **)slot; 392 return is_slot ? (void *)slot:node;
393}
394
395/**
396 * radix_tree_lookup_slot - lookup a slot in a radix tree
397 * @root: radix tree root
398 * @index: index key
399 *
400 * Returns: the slot corresponding to the position @index in the
401 * radix tree @root. This is useful for update-if-exists operations.
402 *
403 * This function can be called under rcu_read_lock iff the slot is not
404 * modified by radix_tree_replace_slot, otherwise it must be called
405 * exclusive from other writers. Any dereference of the slot must be done
406 * using radix_tree_deref_slot.
407 */
408void **radix_tree_lookup_slot(struct radix_tree_root *root, unsigned long index)
409{
410 return (void **)radix_tree_lookup_element(root, index, 1);
401} 411}
402EXPORT_SYMBOL(radix_tree_lookup_slot); 412EXPORT_SYMBOL(radix_tree_lookup_slot);
403 413
@@ -415,38 +425,7 @@ EXPORT_SYMBOL(radix_tree_lookup_slot);
415 */ 425 */
416void *radix_tree_lookup(struct radix_tree_root *root, unsigned long index) 426void *radix_tree_lookup(struct radix_tree_root *root, unsigned long index)
417{ 427{
418 unsigned int height, shift; 428 return radix_tree_lookup_element(root, index, 0);
419 struct radix_tree_node *node, **slot;
420
421 node = rcu_dereference(root->rnode);
422 if (node == NULL)
423 return NULL;
424
425 if (!radix_tree_is_indirect_ptr(node)) {
426 if (index > 0)
427 return NULL;
428 return node;
429 }
430 node = radix_tree_indirect_to_ptr(node);
431
432 height = node->height;
433 if (index > radix_tree_maxindex(height))
434 return NULL;
435
436 shift = (height-1) * RADIX_TREE_MAP_SHIFT;
437
438 do {
439 slot = (struct radix_tree_node **)
440 (node->slots + ((index>>shift) & RADIX_TREE_MAP_MASK));
441 node = rcu_dereference(*slot);
442 if (node == NULL)
443 return NULL;
444
445 shift -= RADIX_TREE_MAP_SHIFT;
446 height--;
447 } while (height > 0);
448
449 return node;
450} 429}
451EXPORT_SYMBOL(radix_tree_lookup); 430EXPORT_SYMBOL(radix_tree_lookup);
452 431
@@ -666,6 +645,43 @@ unsigned long radix_tree_next_hole(struct radix_tree_root *root,
666} 645}
667EXPORT_SYMBOL(radix_tree_next_hole); 646EXPORT_SYMBOL(radix_tree_next_hole);
668 647
648/**
649 * radix_tree_prev_hole - find the prev hole (not-present entry)
650 * @root: tree root
651 * @index: index key
652 * @max_scan: maximum range to search
653 *
654 * Search backwards in the range [max(index-max_scan+1, 0), index]
655 * for the first hole.
656 *
657 * Returns: the index of the hole if found, otherwise returns an index
658 * outside of the set specified (in which case 'index - return >= max_scan'
659 * will be true). In rare cases of wrap-around, LONG_MAX will be returned.
660 *
661 * radix_tree_next_hole may be called under rcu_read_lock. However, like
662 * radix_tree_gang_lookup, this will not atomically search a snapshot of
663 * the tree at a single point in time. For example, if a hole is created
664 * at index 10, then subsequently a hole is created at index 5,
665 * radix_tree_prev_hole covering both indexes may return 5 if called under
666 * rcu_read_lock.
667 */
668unsigned long radix_tree_prev_hole(struct radix_tree_root *root,
669 unsigned long index, unsigned long max_scan)
670{
671 unsigned long i;
672
673 for (i = 0; i < max_scan; i++) {
674 if (!radix_tree_lookup(root, index))
675 break;
676 index--;
677 if (index == LONG_MAX)
678 break;
679 }
680
681 return index;
682}
683EXPORT_SYMBOL(radix_tree_prev_hole);
684
669static unsigned int 685static unsigned int
670__lookup(struct radix_tree_node *slot, void ***results, unsigned long index, 686__lookup(struct radix_tree_node *slot, void ***results, unsigned long index,
671 unsigned int max_items, unsigned long *next_index) 687 unsigned int max_items, unsigned long *next_index)
diff --git a/lib/rational.c b/lib/rational.c
new file mode 100644
index 000000000000..b3c099b5478e
--- /dev/null
+++ b/lib/rational.c
@@ -0,0 +1,62 @@
1/*
2 * rational fractions
3 *
4 * Copyright (C) 2009 emlix GmbH, Oskar Schirmer <os@emlix.com>
5 *
6 * helper functions when coping with rational numbers
7 */
8
9#include <linux/rational.h>
10
11/*
12 * calculate best rational approximation for a given fraction
13 * taking into account restricted register size, e.g. to find
14 * appropriate values for a pll with 5 bit denominator and
15 * 8 bit numerator register fields, trying to set up with a
16 * frequency ratio of 3.1415, one would say:
17 *
18 * rational_best_approximation(31415, 10000,
19 * (1 << 8) - 1, (1 << 5) - 1, &n, &d);
20 *
21 * you may look at given_numerator as a fixed point number,
22 * with the fractional part size described in given_denominator.
23 *
24 * for theoretical background, see:
25 * http://en.wikipedia.org/wiki/Continued_fraction
26 */
27
28void rational_best_approximation(
29 unsigned long given_numerator, unsigned long given_denominator,
30 unsigned long max_numerator, unsigned long max_denominator,
31 unsigned long *best_numerator, unsigned long *best_denominator)
32{
33 unsigned long n, d, n0, d0, n1, d1;
34 n = given_numerator;
35 d = given_denominator;
36 n0 = d1 = 0;
37 n1 = d0 = 1;
38 for (;;) {
39 unsigned long t, a;
40 if ((n1 > max_numerator) || (d1 > max_denominator)) {
41 n1 = n0;
42 d1 = d0;
43 break;
44 }
45 if (d == 0)
46 break;
47 t = d;
48 a = n / d;
49 d = n % d;
50 n = t;
51 t = n0 + a * n1;
52 n0 = n1;
53 n1 = t;
54 t = d0 + a * d1;
55 d0 = d1;
56 d1 = t;
57 }
58 *best_numerator = n1;
59 *best_denominator = d1;
60}
61
62EXPORT_SYMBOL(rational_best_approximation);
diff --git a/lib/rbtree.c b/lib/rbtree.c
index 9956b99649f0..e2aa3be29858 100644
--- a/lib/rbtree.c
+++ b/lib/rbtree.c
@@ -163,17 +163,14 @@ static void __rb_erase_color(struct rb_node *node, struct rb_node *parent,
163 { 163 {
164 if (!other->rb_right || rb_is_black(other->rb_right)) 164 if (!other->rb_right || rb_is_black(other->rb_right))
165 { 165 {
166 struct rb_node *o_left; 166 rb_set_black(other->rb_left);
167 if ((o_left = other->rb_left))
168 rb_set_black(o_left);
169 rb_set_red(other); 167 rb_set_red(other);
170 __rb_rotate_right(other, root); 168 __rb_rotate_right(other, root);
171 other = parent->rb_right; 169 other = parent->rb_right;
172 } 170 }
173 rb_set_color(other, rb_color(parent)); 171 rb_set_color(other, rb_color(parent));
174 rb_set_black(parent); 172 rb_set_black(parent);
175 if (other->rb_right) 173 rb_set_black(other->rb_right);
176 rb_set_black(other->rb_right);
177 __rb_rotate_left(parent, root); 174 __rb_rotate_left(parent, root);
178 node = root->rb_node; 175 node = root->rb_node;
179 break; 176 break;
@@ -200,17 +197,14 @@ static void __rb_erase_color(struct rb_node *node, struct rb_node *parent,
200 { 197 {
201 if (!other->rb_left || rb_is_black(other->rb_left)) 198 if (!other->rb_left || rb_is_black(other->rb_left))
202 { 199 {
203 register struct rb_node *o_right; 200 rb_set_black(other->rb_right);
204 if ((o_right = other->rb_right))
205 rb_set_black(o_right);
206 rb_set_red(other); 201 rb_set_red(other);
207 __rb_rotate_left(other, root); 202 __rb_rotate_left(other, root);
208 other = parent->rb_left; 203 other = parent->rb_left;
209 } 204 }
210 rb_set_color(other, rb_color(parent)); 205 rb_set_color(other, rb_color(parent));
211 rb_set_black(parent); 206 rb_set_black(parent);
212 if (other->rb_left) 207 rb_set_black(other->rb_left);
213 rb_set_black(other->rb_left);
214 __rb_rotate_right(parent, root); 208 __rb_rotate_right(parent, root);
215 node = root->rb_node; 209 node = root->rb_node;
216 break; 210 break;
@@ -237,34 +231,34 @@ void rb_erase(struct rb_node *node, struct rb_root *root)
237 node = node->rb_right; 231 node = node->rb_right;
238 while ((left = node->rb_left) != NULL) 232 while ((left = node->rb_left) != NULL)
239 node = left; 233 node = left;
234
235 if (rb_parent(old)) {
236 if (rb_parent(old)->rb_left == old)
237 rb_parent(old)->rb_left = node;
238 else
239 rb_parent(old)->rb_right = node;
240 } else
241 root->rb_node = node;
242
240 child = node->rb_right; 243 child = node->rb_right;
241 parent = rb_parent(node); 244 parent = rb_parent(node);
242 color = rb_color(node); 245 color = rb_color(node);
243 246
244 if (child)
245 rb_set_parent(child, parent);
246 if (parent == old) { 247 if (parent == old) {
247 parent->rb_right = child;
248 parent = node; 248 parent = node;
249 } else 249 } else {
250 if (child)
251 rb_set_parent(child, parent);
250 parent->rb_left = child; 252 parent->rb_left = child;
251 253
254 node->rb_right = old->rb_right;
255 rb_set_parent(old->rb_right, node);
256 }
257
252 node->rb_parent_color = old->rb_parent_color; 258 node->rb_parent_color = old->rb_parent_color;
253 node->rb_right = old->rb_right;
254 node->rb_left = old->rb_left; 259 node->rb_left = old->rb_left;
255
256 if (rb_parent(old))
257 {
258 if (rb_parent(old)->rb_left == old)
259 rb_parent(old)->rb_left = node;
260 else
261 rb_parent(old)->rb_right = node;
262 } else
263 root->rb_node = node;
264
265 rb_set_parent(old->rb_left, node); 260 rb_set_parent(old->rb_left, node);
266 if (old->rb_right) 261
267 rb_set_parent(old->rb_right, node);
268 goto color; 262 goto color;
269 } 263 }
270 264
diff --git a/lib/scatterlist.c b/lib/scatterlist.c
index b7b449dafbe5..0d475d8167bf 100644
--- a/lib/scatterlist.c
+++ b/lib/scatterlist.c
@@ -314,6 +314,7 @@ void sg_miter_start(struct sg_mapping_iter *miter, struct scatterlist *sgl,
314 miter->__sg = sgl; 314 miter->__sg = sgl;
315 miter->__nents = nents; 315 miter->__nents = nents;
316 miter->__offset = 0; 316 miter->__offset = 0;
317 WARN_ON(!(flags & (SG_MITER_TO_SG | SG_MITER_FROM_SG)));
317 miter->__flags = flags; 318 miter->__flags = flags;
318} 319}
319EXPORT_SYMBOL(sg_miter_start); 320EXPORT_SYMBOL(sg_miter_start);
@@ -347,9 +348,12 @@ bool sg_miter_next(struct sg_mapping_iter *miter)
347 sg_miter_stop(miter); 348 sg_miter_stop(miter);
348 349
349 /* get to the next sg if necessary. __offset is adjusted by stop */ 350 /* get to the next sg if necessary. __offset is adjusted by stop */
350 if (miter->__offset == miter->__sg->length && --miter->__nents) { 351 while (miter->__offset == miter->__sg->length) {
351 miter->__sg = sg_next(miter->__sg); 352 if (--miter->__nents) {
352 miter->__offset = 0; 353 miter->__sg = sg_next(miter->__sg);
354 miter->__offset = 0;
355 } else
356 return false;
353 } 357 }
354 358
355 /* map the next page */ 359 /* map the next page */
@@ -391,6 +395,9 @@ void sg_miter_stop(struct sg_mapping_iter *miter)
391 if (miter->addr) { 395 if (miter->addr) {
392 miter->__offset += miter->consumed; 396 miter->__offset += miter->consumed;
393 397
398 if (miter->__flags & SG_MITER_TO_SG)
399 flush_kernel_dcache_page(miter->page);
400
394 if (miter->__flags & SG_MITER_ATOMIC) { 401 if (miter->__flags & SG_MITER_ATOMIC) {
395 WARN_ON(!irqs_disabled()); 402 WARN_ON(!irqs_disabled());
396 kunmap_atomic(miter->addr, KM_BIO_SRC_IRQ); 403 kunmap_atomic(miter->addr, KM_BIO_SRC_IRQ);
@@ -423,8 +430,14 @@ static size_t sg_copy_buffer(struct scatterlist *sgl, unsigned int nents,
423 unsigned int offset = 0; 430 unsigned int offset = 0;
424 struct sg_mapping_iter miter; 431 struct sg_mapping_iter miter;
425 unsigned long flags; 432 unsigned long flags;
433 unsigned int sg_flags = SG_MITER_ATOMIC;
434
435 if (to_buffer)
436 sg_flags |= SG_MITER_FROM_SG;
437 else
438 sg_flags |= SG_MITER_TO_SG;
426 439
427 sg_miter_start(&miter, sgl, nents, SG_MITER_ATOMIC); 440 sg_miter_start(&miter, sgl, nents, sg_flags);
428 441
429 local_irq_save(flags); 442 local_irq_save(flags);
430 443
@@ -435,10 +448,8 @@ static size_t sg_copy_buffer(struct scatterlist *sgl, unsigned int nents,
435 448
436 if (to_buffer) 449 if (to_buffer)
437 memcpy(buf + offset, miter.addr, len); 450 memcpy(buf + offset, miter.addr, len);
438 else { 451 else
439 memcpy(miter.addr, buf + offset, len); 452 memcpy(miter.addr, buf + offset, len);
440 flush_kernel_dcache_page(miter.page);
441 }
442 453
443 offset += len; 454 offset += len;
444 } 455 }
diff --git a/lib/smp_processor_id.c b/lib/smp_processor_id.c
index 0f8fc22ed103..4689cb073da4 100644
--- a/lib/smp_processor_id.c
+++ b/lib/smp_processor_id.c
@@ -22,7 +22,7 @@ notrace unsigned int debug_smp_processor_id(void)
22 * Kernel threads bound to a single CPU can safely use 22 * Kernel threads bound to a single CPU can safely use
23 * smp_processor_id(): 23 * smp_processor_id():
24 */ 24 */
25 if (cpus_equal(current->cpus_allowed, cpumask_of_cpu(this_cpu))) 25 if (cpumask_equal(&current->cpus_allowed, cpumask_of(this_cpu)))
26 goto out; 26 goto out;
27 27
28 /* 28 /*
diff --git a/lib/swiotlb.c b/lib/swiotlb.c
index 1f991acc2a05..ac25cd28e807 100644
--- a/lib/swiotlb.c
+++ b/lib/swiotlb.c
@@ -60,8 +60,8 @@ enum dma_sync_target {
60int swiotlb_force; 60int swiotlb_force;
61 61
62/* 62/*
63 * Used to do a quick range check in swiotlb_unmap_single and 63 * Used to do a quick range check in unmap_single and
64 * swiotlb_sync_single_*, to see if the memory was in fact allocated by this 64 * sync_single_*, to see if the memory was in fact allocated by this
65 * API. 65 * API.
66 */ 66 */
67static char *io_tlb_start, *io_tlb_end; 67static char *io_tlb_start, *io_tlb_end;
@@ -114,40 +114,11 @@ setup_io_tlb_npages(char *str)
114__setup("swiotlb=", setup_io_tlb_npages); 114__setup("swiotlb=", setup_io_tlb_npages);
115/* make io_tlb_overflow tunable too? */ 115/* make io_tlb_overflow tunable too? */
116 116
117void * __weak __init swiotlb_alloc_boot(size_t size, unsigned long nslabs) 117/* Note that this doesn't work with highmem page */
118{
119 return alloc_bootmem_low_pages(size);
120}
121
122void * __weak swiotlb_alloc(unsigned order, unsigned long nslabs)
123{
124 return (void *)__get_free_pages(GFP_DMA | __GFP_NOWARN, order);
125}
126
127dma_addr_t __weak swiotlb_phys_to_bus(struct device *hwdev, phys_addr_t paddr)
128{
129 return paddr;
130}
131
132phys_addr_t __weak swiotlb_bus_to_phys(dma_addr_t baddr)
133{
134 return baddr;
135}
136
137static dma_addr_t swiotlb_virt_to_bus(struct device *hwdev, 118static dma_addr_t swiotlb_virt_to_bus(struct device *hwdev,
138 volatile void *address) 119 volatile void *address)
139{ 120{
140 return swiotlb_phys_to_bus(hwdev, virt_to_phys(address)); 121 return phys_to_dma(hwdev, virt_to_phys(address));
141}
142
143static void *swiotlb_bus_to_virt(dma_addr_t address)
144{
145 return phys_to_virt(swiotlb_bus_to_phys(address));
146}
147
148int __weak swiotlb_arch_range_needs_mapping(void *ptr, size_t size)
149{
150 return 0;
151} 122}
152 123
153static void swiotlb_print_info(unsigned long bytes) 124static void swiotlb_print_info(unsigned long bytes)
@@ -183,7 +154,7 @@ swiotlb_init_with_default_size(size_t default_size)
183 /* 154 /*
184 * Get IO TLB memory from the low pages 155 * Get IO TLB memory from the low pages
185 */ 156 */
186 io_tlb_start = swiotlb_alloc_boot(bytes, io_tlb_nslabs); 157 io_tlb_start = alloc_bootmem_low_pages(bytes);
187 if (!io_tlb_start) 158 if (!io_tlb_start)
188 panic("Cannot allocate SWIOTLB buffer"); 159 panic("Cannot allocate SWIOTLB buffer");
189 io_tlb_end = io_tlb_start + bytes; 160 io_tlb_end = io_tlb_start + bytes;
@@ -239,7 +210,8 @@ swiotlb_late_init_with_default_size(size_t default_size)
239 bytes = io_tlb_nslabs << IO_TLB_SHIFT; 210 bytes = io_tlb_nslabs << IO_TLB_SHIFT;
240 211
241 while ((SLABS_PER_PAGE << order) > IO_TLB_MIN_SLABS) { 212 while ((SLABS_PER_PAGE << order) > IO_TLB_MIN_SLABS) {
242 io_tlb_start = swiotlb_alloc(order, io_tlb_nslabs); 213 io_tlb_start = (void *)__get_free_pages(GFP_DMA | __GFP_NOWARN,
214 order);
243 if (io_tlb_start) 215 if (io_tlb_start)
244 break; 216 break;
245 order--; 217 order--;
@@ -309,20 +281,10 @@ cleanup1:
309 return -ENOMEM; 281 return -ENOMEM;
310} 282}
311 283
312static int 284static int is_swiotlb_buffer(phys_addr_t paddr)
313address_needs_mapping(struct device *hwdev, dma_addr_t addr, size_t size)
314{ 285{
315 return !is_buffer_dma_capable(dma_get_mask(hwdev), addr, size); 286 return paddr >= virt_to_phys(io_tlb_start) &&
316} 287 paddr < virt_to_phys(io_tlb_end);
317
318static inline int range_needs_mapping(void *ptr, size_t size)
319{
320 return swiotlb_force || swiotlb_arch_range_needs_mapping(ptr, size);
321}
322
323static int is_swiotlb_buffer(char *addr)
324{
325 return addr >= io_tlb_start && addr < io_tlb_end;
326} 288}
327 289
328/* 290/*
@@ -341,7 +303,7 @@ static void swiotlb_bounce(phys_addr_t phys, char *dma_addr, size_t size,
341 unsigned long flags; 303 unsigned long flags;
342 304
343 while (size) { 305 while (size) {
344 sz = min(PAGE_SIZE - offset, size); 306 sz = min_t(size_t, PAGE_SIZE - offset, size);
345 307
346 local_irq_save(flags); 308 local_irq_save(flags);
347 buffer = kmap_atomic(pfn_to_page(pfn), 309 buffer = kmap_atomic(pfn_to_page(pfn),
@@ -476,7 +438,7 @@ found:
476 * dma_addr is the kernel virtual address of the bounce buffer to unmap. 438 * dma_addr is the kernel virtual address of the bounce buffer to unmap.
477 */ 439 */
478static void 440static void
479unmap_single(struct device *hwdev, char *dma_addr, size_t size, int dir) 441do_unmap_single(struct device *hwdev, char *dma_addr, size_t size, int dir)
480{ 442{
481 unsigned long flags; 443 unsigned long flags;
482 int i, count, nslots = ALIGN(size, 1 << IO_TLB_SHIFT) >> IO_TLB_SHIFT; 444 int i, count, nslots = ALIGN(size, 1 << IO_TLB_SHIFT) >> IO_TLB_SHIFT;
@@ -549,18 +511,15 @@ swiotlb_alloc_coherent(struct device *hwdev, size_t size,
549 dma_addr_t dev_addr; 511 dma_addr_t dev_addr;
550 void *ret; 512 void *ret;
551 int order = get_order(size); 513 int order = get_order(size);
552 u64 dma_mask = DMA_32BIT_MASK; 514 u64 dma_mask = DMA_BIT_MASK(32);
553 515
554 if (hwdev && hwdev->coherent_dma_mask) 516 if (hwdev && hwdev->coherent_dma_mask)
555 dma_mask = hwdev->coherent_dma_mask; 517 dma_mask = hwdev->coherent_dma_mask;
556 518
557 ret = (void *)__get_free_pages(flags, order); 519 ret = (void *)__get_free_pages(flags, order);
558 if (ret && 520 if (ret && swiotlb_virt_to_bus(hwdev, ret) + size > dma_mask) {
559 !is_buffer_dma_capable(dma_mask, swiotlb_virt_to_bus(hwdev, ret),
560 size)) {
561 /* 521 /*
562 * The allocated memory isn't reachable by the device. 522 * The allocated memory isn't reachable by the device.
563 * Fall back on swiotlb_map_single().
564 */ 523 */
565 free_pages((unsigned long) ret, order); 524 free_pages((unsigned long) ret, order);
566 ret = NULL; 525 ret = NULL;
@@ -568,9 +527,8 @@ swiotlb_alloc_coherent(struct device *hwdev, size_t size,
568 if (!ret) { 527 if (!ret) {
569 /* 528 /*
570 * We are either out of memory or the device can't DMA 529 * We are either out of memory or the device can't DMA
571 * to GFP_DMA memory; fall back on 530 * to GFP_DMA memory; fall back on map_single(), which
572 * swiotlb_map_single(), which will grab memory from 531 * will grab memory from the lowest available address range.
573 * the lowest available address range.
574 */ 532 */
575 ret = map_single(hwdev, 0, size, DMA_FROM_DEVICE); 533 ret = map_single(hwdev, 0, size, DMA_FROM_DEVICE);
576 if (!ret) 534 if (!ret)
@@ -581,13 +539,13 @@ swiotlb_alloc_coherent(struct device *hwdev, size_t size,
581 dev_addr = swiotlb_virt_to_bus(hwdev, ret); 539 dev_addr = swiotlb_virt_to_bus(hwdev, ret);
582 540
583 /* Confirm address can be DMA'd by device */ 541 /* Confirm address can be DMA'd by device */
584 if (!is_buffer_dma_capable(dma_mask, dev_addr, size)) { 542 if (dev_addr + size > dma_mask) {
585 printk("hwdev DMA mask = 0x%016Lx, dev_addr = 0x%016Lx\n", 543 printk("hwdev DMA mask = 0x%016Lx, dev_addr = 0x%016Lx\n",
586 (unsigned long long)dma_mask, 544 (unsigned long long)dma_mask,
587 (unsigned long long)dev_addr); 545 (unsigned long long)dev_addr);
588 546
589 /* DMA_TO_DEVICE to avoid memcpy in unmap_single */ 547 /* DMA_TO_DEVICE to avoid memcpy in unmap_single */
590 unmap_single(hwdev, ret, size, DMA_TO_DEVICE); 548 do_unmap_single(hwdev, ret, size, DMA_TO_DEVICE);
591 return NULL; 549 return NULL;
592 } 550 }
593 *dma_handle = dev_addr; 551 *dma_handle = dev_addr;
@@ -597,14 +555,16 @@ EXPORT_SYMBOL(swiotlb_alloc_coherent);
597 555
598void 556void
599swiotlb_free_coherent(struct device *hwdev, size_t size, void *vaddr, 557swiotlb_free_coherent(struct device *hwdev, size_t size, void *vaddr,
600 dma_addr_t dma_handle) 558 dma_addr_t dev_addr)
601{ 559{
560 phys_addr_t paddr = dma_to_phys(hwdev, dev_addr);
561
602 WARN_ON(irqs_disabled()); 562 WARN_ON(irqs_disabled());
603 if (!is_swiotlb_buffer(vaddr)) 563 if (!is_swiotlb_buffer(paddr))
604 free_pages((unsigned long) vaddr, get_order(size)); 564 free_pages((unsigned long)vaddr, get_order(size));
605 else 565 else
606 /* DMA_TO_DEVICE to avoid memcpy in unmap_single */ 566 /* DMA_TO_DEVICE to avoid memcpy in unmap_single */
607 unmap_single(hwdev, vaddr, size, DMA_TO_DEVICE); 567 do_unmap_single(hwdev, vaddr, size, DMA_TO_DEVICE);
608} 568}
609EXPORT_SYMBOL(swiotlb_free_coherent); 569EXPORT_SYMBOL(swiotlb_free_coherent);
610 570
@@ -621,12 +581,15 @@ swiotlb_full(struct device *dev, size_t size, int dir, int do_panic)
621 printk(KERN_ERR "DMA: Out of SW-IOMMU space for %zu bytes at " 581 printk(KERN_ERR "DMA: Out of SW-IOMMU space for %zu bytes at "
622 "device %s\n", size, dev ? dev_name(dev) : "?"); 582 "device %s\n", size, dev ? dev_name(dev) : "?");
623 583
624 if (size > io_tlb_overflow && do_panic) { 584 if (size <= io_tlb_overflow || !do_panic)
625 if (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL) 585 return;
626 panic("DMA: Memory would be corrupted\n"); 586
627 if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL) 587 if (dir == DMA_BIDIRECTIONAL)
628 panic("DMA: Random memory would be DMAed\n"); 588 panic("DMA: Random memory could be DMA accessed\n");
629 } 589 if (dir == DMA_FROM_DEVICE)
590 panic("DMA: Random memory could be DMA written\n");
591 if (dir == DMA_TO_DEVICE)
592 panic("DMA: Random memory could be DMA read\n");
630} 593}
631 594
632/* 595/*
@@ -634,88 +597,92 @@ swiotlb_full(struct device *dev, size_t size, int dir, int do_panic)
634 * physical address to use is returned. 597 * physical address to use is returned.
635 * 598 *
636 * Once the device is given the dma address, the device owns this memory until 599 * Once the device is given the dma address, the device owns this memory until
637 * either swiotlb_unmap_single or swiotlb_dma_sync_single is performed. 600 * either swiotlb_unmap_page or swiotlb_dma_sync_single is performed.
638 */ 601 */
639dma_addr_t 602dma_addr_t swiotlb_map_page(struct device *dev, struct page *page,
640swiotlb_map_single_attrs(struct device *hwdev, void *ptr, size_t size, 603 unsigned long offset, size_t size,
641 int dir, struct dma_attrs *attrs) 604 enum dma_data_direction dir,
605 struct dma_attrs *attrs)
642{ 606{
643 dma_addr_t dev_addr = swiotlb_virt_to_bus(hwdev, ptr); 607 phys_addr_t phys = page_to_phys(page) + offset;
608 dma_addr_t dev_addr = phys_to_dma(dev, phys);
644 void *map; 609 void *map;
645 610
646 BUG_ON(dir == DMA_NONE); 611 BUG_ON(dir == DMA_NONE);
647 /* 612 /*
648 * If the pointer passed in happens to be in the device's DMA window, 613 * If the address happens to be in the device's DMA window,
649 * we can safely return the device addr and not worry about bounce 614 * we can safely return the device addr and not worry about bounce
650 * buffering it. 615 * buffering it.
651 */ 616 */
652 if (!address_needs_mapping(hwdev, dev_addr, size) && 617 if (dma_capable(dev, dev_addr, size) && !swiotlb_force)
653 !range_needs_mapping(ptr, size))
654 return dev_addr; 618 return dev_addr;
655 619
656 /* 620 /*
657 * Oh well, have to allocate and map a bounce buffer. 621 * Oh well, have to allocate and map a bounce buffer.
658 */ 622 */
659 map = map_single(hwdev, virt_to_phys(ptr), size, dir); 623 map = map_single(dev, phys, size, dir);
660 if (!map) { 624 if (!map) {
661 swiotlb_full(hwdev, size, dir, 1); 625 swiotlb_full(dev, size, dir, 1);
662 map = io_tlb_overflow_buffer; 626 map = io_tlb_overflow_buffer;
663 } 627 }
664 628
665 dev_addr = swiotlb_virt_to_bus(hwdev, map); 629 dev_addr = swiotlb_virt_to_bus(dev, map);
666 630
667 /* 631 /*
668 * Ensure that the address returned is DMA'ble 632 * Ensure that the address returned is DMA'ble
669 */ 633 */
670 if (address_needs_mapping(hwdev, dev_addr, size)) 634 if (!dma_capable(dev, dev_addr, size))
671 panic("map_single: bounce buffer is not DMA'ble"); 635 panic("map_single: bounce buffer is not DMA'ble");
672 636
673 return dev_addr; 637 return dev_addr;
674} 638}
675EXPORT_SYMBOL(swiotlb_map_single_attrs); 639EXPORT_SYMBOL_GPL(swiotlb_map_page);
676
677dma_addr_t
678swiotlb_map_single(struct device *hwdev, void *ptr, size_t size, int dir)
679{
680 return swiotlb_map_single_attrs(hwdev, ptr, size, dir, NULL);
681}
682EXPORT_SYMBOL(swiotlb_map_single);
683 640
684/* 641/*
685 * Unmap a single streaming mode DMA translation. The dma_addr and size must 642 * Unmap a single streaming mode DMA translation. The dma_addr and size must
686 * match what was provided for in a previous swiotlb_map_single call. All 643 * match what was provided for in a previous swiotlb_map_page call. All
687 * other usages are undefined. 644 * other usages are undefined.
688 * 645 *
689 * After this call, reads by the cpu to the buffer are guaranteed to see 646 * After this call, reads by the cpu to the buffer are guaranteed to see
690 * whatever the device wrote there. 647 * whatever the device wrote there.
691 */ 648 */
692void 649static void unmap_single(struct device *hwdev, dma_addr_t dev_addr,
693swiotlb_unmap_single_attrs(struct device *hwdev, dma_addr_t dev_addr, 650 size_t size, int dir)
694 size_t size, int dir, struct dma_attrs *attrs)
695{ 651{
696 char *dma_addr = swiotlb_bus_to_virt(dev_addr); 652 phys_addr_t paddr = dma_to_phys(hwdev, dev_addr);
697 653
698 BUG_ON(dir == DMA_NONE); 654 BUG_ON(dir == DMA_NONE);
699 if (is_swiotlb_buffer(dma_addr)) 655
700 unmap_single(hwdev, dma_addr, size, dir); 656 if (is_swiotlb_buffer(paddr)) {
701 else if (dir == DMA_FROM_DEVICE) 657 do_unmap_single(hwdev, phys_to_virt(paddr), size, dir);
702 dma_mark_clean(dma_addr, size); 658 return;
659 }
660
661 if (dir != DMA_FROM_DEVICE)
662 return;
663
664 /*
665 * phys_to_virt doesn't work with hihgmem page but we could
666 * call dma_mark_clean() with hihgmem page here. However, we
667 * are fine since dma_mark_clean() is null on POWERPC. We can
668 * make dma_mark_clean() take a physical address if necessary.
669 */
670 dma_mark_clean(phys_to_virt(paddr), size);
703} 671}
704EXPORT_SYMBOL(swiotlb_unmap_single_attrs);
705 672
706void 673void swiotlb_unmap_page(struct device *hwdev, dma_addr_t dev_addr,
707swiotlb_unmap_single(struct device *hwdev, dma_addr_t dev_addr, size_t size, 674 size_t size, enum dma_data_direction dir,
708 int dir) 675 struct dma_attrs *attrs)
709{ 676{
710 return swiotlb_unmap_single_attrs(hwdev, dev_addr, size, dir, NULL); 677 unmap_single(hwdev, dev_addr, size, dir);
711} 678}
712EXPORT_SYMBOL(swiotlb_unmap_single); 679EXPORT_SYMBOL_GPL(swiotlb_unmap_page);
713 680
714/* 681/*
715 * Make physical memory consistent for a single streaming mode DMA translation 682 * Make physical memory consistent for a single streaming mode DMA translation
716 * after a transfer. 683 * after a transfer.
717 * 684 *
718 * If you perform a swiotlb_map_single() but wish to interrogate the buffer 685 * If you perform a swiotlb_map_page() but wish to interrogate the buffer
719 * using the cpu, yet do not wish to teardown the dma mapping, you must 686 * using the cpu, yet do not wish to teardown the dma mapping, you must
720 * call this function before doing so. At the next point you give the dma 687 * call this function before doing so. At the next point you give the dma
721 * address back to the card, you must first perform a 688 * address back to the card, you must first perform a
@@ -725,18 +692,24 @@ static void
725swiotlb_sync_single(struct device *hwdev, dma_addr_t dev_addr, 692swiotlb_sync_single(struct device *hwdev, dma_addr_t dev_addr,
726 size_t size, int dir, int target) 693 size_t size, int dir, int target)
727{ 694{
728 char *dma_addr = swiotlb_bus_to_virt(dev_addr); 695 phys_addr_t paddr = dma_to_phys(hwdev, dev_addr);
729 696
730 BUG_ON(dir == DMA_NONE); 697 BUG_ON(dir == DMA_NONE);
731 if (is_swiotlb_buffer(dma_addr)) 698
732 sync_single(hwdev, dma_addr, size, dir, target); 699 if (is_swiotlb_buffer(paddr)) {
733 else if (dir == DMA_FROM_DEVICE) 700 sync_single(hwdev, phys_to_virt(paddr), size, dir, target);
734 dma_mark_clean(dma_addr, size); 701 return;
702 }
703
704 if (dir != DMA_FROM_DEVICE)
705 return;
706
707 dma_mark_clean(phys_to_virt(paddr), size);
735} 708}
736 709
737void 710void
738swiotlb_sync_single_for_cpu(struct device *hwdev, dma_addr_t dev_addr, 711swiotlb_sync_single_for_cpu(struct device *hwdev, dma_addr_t dev_addr,
739 size_t size, int dir) 712 size_t size, enum dma_data_direction dir)
740{ 713{
741 swiotlb_sync_single(hwdev, dev_addr, size, dir, SYNC_FOR_CPU); 714 swiotlb_sync_single(hwdev, dev_addr, size, dir, SYNC_FOR_CPU);
742} 715}
@@ -744,7 +717,7 @@ EXPORT_SYMBOL(swiotlb_sync_single_for_cpu);
744 717
745void 718void
746swiotlb_sync_single_for_device(struct device *hwdev, dma_addr_t dev_addr, 719swiotlb_sync_single_for_device(struct device *hwdev, dma_addr_t dev_addr,
747 size_t size, int dir) 720 size_t size, enum dma_data_direction dir)
748{ 721{
749 swiotlb_sync_single(hwdev, dev_addr, size, dir, SYNC_FOR_DEVICE); 722 swiotlb_sync_single(hwdev, dev_addr, size, dir, SYNC_FOR_DEVICE);
750} 723}
@@ -758,18 +731,13 @@ swiotlb_sync_single_range(struct device *hwdev, dma_addr_t dev_addr,
758 unsigned long offset, size_t size, 731 unsigned long offset, size_t size,
759 int dir, int target) 732 int dir, int target)
760{ 733{
761 char *dma_addr = swiotlb_bus_to_virt(dev_addr) + offset; 734 swiotlb_sync_single(hwdev, dev_addr + offset, size, dir, target);
762
763 BUG_ON(dir == DMA_NONE);
764 if (is_swiotlb_buffer(dma_addr))
765 sync_single(hwdev, dma_addr, size, dir, target);
766 else if (dir == DMA_FROM_DEVICE)
767 dma_mark_clean(dma_addr, size);
768} 735}
769 736
770void 737void
771swiotlb_sync_single_range_for_cpu(struct device *hwdev, dma_addr_t dev_addr, 738swiotlb_sync_single_range_for_cpu(struct device *hwdev, dma_addr_t dev_addr,
772 unsigned long offset, size_t size, int dir) 739 unsigned long offset, size_t size,
740 enum dma_data_direction dir)
773{ 741{
774 swiotlb_sync_single_range(hwdev, dev_addr, offset, size, dir, 742 swiotlb_sync_single_range(hwdev, dev_addr, offset, size, dir,
775 SYNC_FOR_CPU); 743 SYNC_FOR_CPU);
@@ -778,7 +746,8 @@ EXPORT_SYMBOL_GPL(swiotlb_sync_single_range_for_cpu);
778 746
779void 747void
780swiotlb_sync_single_range_for_device(struct device *hwdev, dma_addr_t dev_addr, 748swiotlb_sync_single_range_for_device(struct device *hwdev, dma_addr_t dev_addr,
781 unsigned long offset, size_t size, int dir) 749 unsigned long offset, size_t size,
750 enum dma_data_direction dir)
782{ 751{
783 swiotlb_sync_single_range(hwdev, dev_addr, offset, size, dir, 752 swiotlb_sync_single_range(hwdev, dev_addr, offset, size, dir,
784 SYNC_FOR_DEVICE); 753 SYNC_FOR_DEVICE);
@@ -787,7 +756,7 @@ EXPORT_SYMBOL_GPL(swiotlb_sync_single_range_for_device);
787 756
788/* 757/*
789 * Map a set of buffers described by scatterlist in streaming mode for DMA. 758 * Map a set of buffers described by scatterlist in streaming mode for DMA.
790 * This is the scatter-gather version of the above swiotlb_map_single 759 * This is the scatter-gather version of the above swiotlb_map_page
791 * interface. Here the scatter gather list elements are each tagged with the 760 * interface. Here the scatter gather list elements are each tagged with the
792 * appropriate dma address and length. They are obtained via 761 * appropriate dma address and length. They are obtained via
793 * sg_dma_{address,length}(SG). 762 * sg_dma_{address,length}(SG).
@@ -798,12 +767,12 @@ EXPORT_SYMBOL_GPL(swiotlb_sync_single_range_for_device);
798 * The routine returns the number of addr/length pairs actually 767 * The routine returns the number of addr/length pairs actually
799 * used, at most nents. 768 * used, at most nents.
800 * 769 *
801 * Device ownership issues as mentioned above for swiotlb_map_single are the 770 * Device ownership issues as mentioned above for swiotlb_map_page are the
802 * same here. 771 * same here.
803 */ 772 */
804int 773int
805swiotlb_map_sg_attrs(struct device *hwdev, struct scatterlist *sgl, int nelems, 774swiotlb_map_sg_attrs(struct device *hwdev, struct scatterlist *sgl, int nelems,
806 int dir, struct dma_attrs *attrs) 775 enum dma_data_direction dir, struct dma_attrs *attrs)
807{ 776{
808 struct scatterlist *sg; 777 struct scatterlist *sg;
809 int i; 778 int i;
@@ -811,11 +780,11 @@ swiotlb_map_sg_attrs(struct device *hwdev, struct scatterlist *sgl, int nelems,
811 BUG_ON(dir == DMA_NONE); 780 BUG_ON(dir == DMA_NONE);
812 781
813 for_each_sg(sgl, sg, nelems, i) { 782 for_each_sg(sgl, sg, nelems, i) {
814 void *addr = sg_virt(sg); 783 phys_addr_t paddr = sg_phys(sg);
815 dma_addr_t dev_addr = swiotlb_virt_to_bus(hwdev, addr); 784 dma_addr_t dev_addr = phys_to_dma(hwdev, paddr);
816 785
817 if (range_needs_mapping(addr, sg->length) || 786 if (swiotlb_force ||
818 address_needs_mapping(hwdev, dev_addr, sg->length)) { 787 !dma_capable(hwdev, dev_addr, sg->length)) {
819 void *map = map_single(hwdev, sg_phys(sg), 788 void *map = map_single(hwdev, sg_phys(sg),
820 sg->length, dir); 789 sg->length, dir);
821 if (!map) { 790 if (!map) {
@@ -846,24 +815,20 @@ EXPORT_SYMBOL(swiotlb_map_sg);
846 815
847/* 816/*
848 * Unmap a set of streaming mode DMA translations. Again, cpu read rules 817 * Unmap a set of streaming mode DMA translations. Again, cpu read rules
849 * concerning calls here are the same as for swiotlb_unmap_single() above. 818 * concerning calls here are the same as for swiotlb_unmap_page() above.
850 */ 819 */
851void 820void
852swiotlb_unmap_sg_attrs(struct device *hwdev, struct scatterlist *sgl, 821swiotlb_unmap_sg_attrs(struct device *hwdev, struct scatterlist *sgl,
853 int nelems, int dir, struct dma_attrs *attrs) 822 int nelems, enum dma_data_direction dir, struct dma_attrs *attrs)
854{ 823{
855 struct scatterlist *sg; 824 struct scatterlist *sg;
856 int i; 825 int i;
857 826
858 BUG_ON(dir == DMA_NONE); 827 BUG_ON(dir == DMA_NONE);
859 828
860 for_each_sg(sgl, sg, nelems, i) { 829 for_each_sg(sgl, sg, nelems, i)
861 if (sg->dma_address != swiotlb_virt_to_bus(hwdev, sg_virt(sg))) 830 unmap_single(hwdev, sg->dma_address, sg->dma_length, dir);
862 unmap_single(hwdev, swiotlb_bus_to_virt(sg->dma_address), 831
863 sg->dma_length, dir);
864 else if (dir == DMA_FROM_DEVICE)
865 dma_mark_clean(sg_virt(sg), sg->dma_length);
866 }
867} 832}
868EXPORT_SYMBOL(swiotlb_unmap_sg_attrs); 833EXPORT_SYMBOL(swiotlb_unmap_sg_attrs);
869 834
@@ -889,20 +854,14 @@ swiotlb_sync_sg(struct device *hwdev, struct scatterlist *sgl,
889 struct scatterlist *sg; 854 struct scatterlist *sg;
890 int i; 855 int i;
891 856
892 BUG_ON(dir == DMA_NONE); 857 for_each_sg(sgl, sg, nelems, i)
893 858 swiotlb_sync_single(hwdev, sg->dma_address,
894 for_each_sg(sgl, sg, nelems, i) {
895 if (sg->dma_address != swiotlb_virt_to_bus(hwdev, sg_virt(sg)))
896 sync_single(hwdev, swiotlb_bus_to_virt(sg->dma_address),
897 sg->dma_length, dir, target); 859 sg->dma_length, dir, target);
898 else if (dir == DMA_FROM_DEVICE)
899 dma_mark_clean(sg_virt(sg), sg->dma_length);
900 }
901} 860}
902 861
903void 862void
904swiotlb_sync_sg_for_cpu(struct device *hwdev, struct scatterlist *sg, 863swiotlb_sync_sg_for_cpu(struct device *hwdev, struct scatterlist *sg,
905 int nelems, int dir) 864 int nelems, enum dma_data_direction dir)
906{ 865{
907 swiotlb_sync_sg(hwdev, sg, nelems, dir, SYNC_FOR_CPU); 866 swiotlb_sync_sg(hwdev, sg, nelems, dir, SYNC_FOR_CPU);
908} 867}
@@ -910,7 +869,7 @@ EXPORT_SYMBOL(swiotlb_sync_sg_for_cpu);
910 869
911void 870void
912swiotlb_sync_sg_for_device(struct device *hwdev, struct scatterlist *sg, 871swiotlb_sync_sg_for_device(struct device *hwdev, struct scatterlist *sg,
913 int nelems, int dir) 872 int nelems, enum dma_data_direction dir)
914{ 873{
915 swiotlb_sync_sg(hwdev, sg, nelems, dir, SYNC_FOR_DEVICE); 874 swiotlb_sync_sg(hwdev, sg, nelems, dir, SYNC_FOR_DEVICE);
916} 875}
diff --git a/lib/vsprintf.c b/lib/vsprintf.c
index 0fbd0121d91d..a1941f8d205f 100644
--- a/lib/vsprintf.c
+++ b/lib/vsprintf.c
@@ -25,6 +25,7 @@
25#include <linux/kallsyms.h> 25#include <linux/kallsyms.h>
26#include <linux/uaccess.h> 26#include <linux/uaccess.h>
27#include <linux/ioport.h> 27#include <linux/ioport.h>
28#include <net/addrconf.h>
28 29
29#include <asm/page.h> /* for PAGE_SIZE */ 30#include <asm/page.h> /* for PAGE_SIZE */
30#include <asm/div64.h> 31#include <asm/div64.h>
@@ -396,7 +397,40 @@ static noinline char* put_dec(char *buf, unsigned long long num)
396#define SMALL 32 /* Must be 32 == 0x20 */ 397#define SMALL 32 /* Must be 32 == 0x20 */
397#define SPECIAL 64 /* 0x */ 398#define SPECIAL 64 /* 0x */
398 399
399static char *number(char *buf, char *end, unsigned long long num, int base, int size, int precision, int type) 400enum format_type {
401 FORMAT_TYPE_NONE, /* Just a string part */
402 FORMAT_TYPE_WIDTH,
403 FORMAT_TYPE_PRECISION,
404 FORMAT_TYPE_CHAR,
405 FORMAT_TYPE_STR,
406 FORMAT_TYPE_PTR,
407 FORMAT_TYPE_PERCENT_CHAR,
408 FORMAT_TYPE_INVALID,
409 FORMAT_TYPE_LONG_LONG,
410 FORMAT_TYPE_ULONG,
411 FORMAT_TYPE_LONG,
412 FORMAT_TYPE_UBYTE,
413 FORMAT_TYPE_BYTE,
414 FORMAT_TYPE_USHORT,
415 FORMAT_TYPE_SHORT,
416 FORMAT_TYPE_UINT,
417 FORMAT_TYPE_INT,
418 FORMAT_TYPE_NRCHARS,
419 FORMAT_TYPE_SIZE_T,
420 FORMAT_TYPE_PTRDIFF
421};
422
423struct printf_spec {
424 enum format_type type;
425 int flags; /* flags to number() */
426 int field_width; /* width of output field */
427 int base;
428 int precision; /* # of digits/chars */
429 int qualifier;
430};
431
432static char *number(char *buf, char *end, unsigned long long num,
433 struct printf_spec spec)
400{ 434{
401 /* we are called with base 8, 10 or 16, only, thus don't need "G..." */ 435 /* we are called with base 8, 10 or 16, only, thus don't need "G..." */
402 static const char digits[16] = "0123456789ABCDEF"; /* "GHIJKLMNOPQRSTUVWXYZ"; */ 436 static const char digits[16] = "0123456789ABCDEF"; /* "GHIJKLMNOPQRSTUVWXYZ"; */
@@ -404,32 +438,32 @@ static char *number(char *buf, char *end, unsigned long long num, int base, int
404 char tmp[66]; 438 char tmp[66];
405 char sign; 439 char sign;
406 char locase; 440 char locase;
407 int need_pfx = ((type & SPECIAL) && base != 10); 441 int need_pfx = ((spec.flags & SPECIAL) && spec.base != 10);
408 int i; 442 int i;
409 443
410 /* locase = 0 or 0x20. ORing digits or letters with 'locase' 444 /* locase = 0 or 0x20. ORing digits or letters with 'locase'
411 * produces same digits or (maybe lowercased) letters */ 445 * produces same digits or (maybe lowercased) letters */
412 locase = (type & SMALL); 446 locase = (spec.flags & SMALL);
413 if (type & LEFT) 447 if (spec.flags & LEFT)
414 type &= ~ZEROPAD; 448 spec.flags &= ~ZEROPAD;
415 sign = 0; 449 sign = 0;
416 if (type & SIGN) { 450 if (spec.flags & SIGN) {
417 if ((signed long long) num < 0) { 451 if ((signed long long) num < 0) {
418 sign = '-'; 452 sign = '-';
419 num = - (signed long long) num; 453 num = - (signed long long) num;
420 size--; 454 spec.field_width--;
421 } else if (type & PLUS) { 455 } else if (spec.flags & PLUS) {
422 sign = '+'; 456 sign = '+';
423 size--; 457 spec.field_width--;
424 } else if (type & SPACE) { 458 } else if (spec.flags & SPACE) {
425 sign = ' '; 459 sign = ' ';
426 size--; 460 spec.field_width--;
427 } 461 }
428 } 462 }
429 if (need_pfx) { 463 if (need_pfx) {
430 size--; 464 spec.field_width--;
431 if (base == 16) 465 if (spec.base == 16)
432 size--; 466 spec.field_width--;
433 } 467 }
434 468
435 /* generate full string in tmp[], in reverse order */ 469 /* generate full string in tmp[], in reverse order */
@@ -441,10 +475,10 @@ static char *number(char *buf, char *end, unsigned long long num, int base, int
441 tmp[i++] = (digits[do_div(num,base)] | locase); 475 tmp[i++] = (digits[do_div(num,base)] | locase);
442 } while (num != 0); 476 } while (num != 0);
443 */ 477 */
444 else if (base != 10) { /* 8 or 16 */ 478 else if (spec.base != 10) { /* 8 or 16 */
445 int mask = base - 1; 479 int mask = spec.base - 1;
446 int shift = 3; 480 int shift = 3;
447 if (base == 16) shift = 4; 481 if (spec.base == 16) shift = 4;
448 do { 482 do {
449 tmp[i++] = (digits[((unsigned char)num) & mask] | locase); 483 tmp[i++] = (digits[((unsigned char)num) & mask] | locase);
450 num >>= shift; 484 num >>= shift;
@@ -454,12 +488,12 @@ static char *number(char *buf, char *end, unsigned long long num, int base, int
454 } 488 }
455 489
456 /* printing 100 using %2d gives "100", not "00" */ 490 /* printing 100 using %2d gives "100", not "00" */
457 if (i > precision) 491 if (i > spec.precision)
458 precision = i; 492 spec.precision = i;
459 /* leading space padding */ 493 /* leading space padding */
460 size -= precision; 494 spec.field_width -= spec.precision;
461 if (!(type & (ZEROPAD+LEFT))) { 495 if (!(spec.flags & (ZEROPAD+LEFT))) {
462 while(--size >= 0) { 496 while(--spec.field_width >= 0) {
463 if (buf < end) 497 if (buf < end)
464 *buf = ' '; 498 *buf = ' ';
465 ++buf; 499 ++buf;
@@ -476,23 +510,23 @@ static char *number(char *buf, char *end, unsigned long long num, int base, int
476 if (buf < end) 510 if (buf < end)
477 *buf = '0'; 511 *buf = '0';
478 ++buf; 512 ++buf;
479 if (base == 16) { 513 if (spec.base == 16) {
480 if (buf < end) 514 if (buf < end)
481 *buf = ('X' | locase); 515 *buf = ('X' | locase);
482 ++buf; 516 ++buf;
483 } 517 }
484 } 518 }
485 /* zero or space padding */ 519 /* zero or space padding */
486 if (!(type & LEFT)) { 520 if (!(spec.flags & LEFT)) {
487 char c = (type & ZEROPAD) ? '0' : ' '; 521 char c = (spec.flags & ZEROPAD) ? '0' : ' ';
488 while (--size >= 0) { 522 while (--spec.field_width >= 0) {
489 if (buf < end) 523 if (buf < end)
490 *buf = c; 524 *buf = c;
491 ++buf; 525 ++buf;
492 } 526 }
493 } 527 }
494 /* hmm even more zero padding? */ 528 /* hmm even more zero padding? */
495 while (i <= --precision) { 529 while (i <= --spec.precision) {
496 if (buf < end) 530 if (buf < end)
497 *buf = '0'; 531 *buf = '0';
498 ++buf; 532 ++buf;
@@ -504,7 +538,7 @@ static char *number(char *buf, char *end, unsigned long long num, int base, int
504 ++buf; 538 ++buf;
505 } 539 }
506 /* trailing space padding */ 540 /* trailing space padding */
507 while (--size >= 0) { 541 while (--spec.field_width >= 0) {
508 if (buf < end) 542 if (buf < end)
509 *buf = ' '; 543 *buf = ' ';
510 ++buf; 544 ++buf;
@@ -512,17 +546,17 @@ static char *number(char *buf, char *end, unsigned long long num, int base, int
512 return buf; 546 return buf;
513} 547}
514 548
515static char *string(char *buf, char *end, char *s, int field_width, int precision, int flags) 549static char *string(char *buf, char *end, char *s, struct printf_spec spec)
516{ 550{
517 int len, i; 551 int len, i;
518 552
519 if ((unsigned long)s < PAGE_SIZE) 553 if ((unsigned long)s < PAGE_SIZE)
520 s = "<NULL>"; 554 s = "<NULL>";
521 555
522 len = strnlen(s, precision); 556 len = strnlen(s, spec.precision);
523 557
524 if (!(flags & LEFT)) { 558 if (!(spec.flags & LEFT)) {
525 while (len < field_width--) { 559 while (len < spec.field_width--) {
526 if (buf < end) 560 if (buf < end)
527 *buf = ' '; 561 *buf = ' ';
528 ++buf; 562 ++buf;
@@ -533,7 +567,7 @@ static char *string(char *buf, char *end, char *s, int field_width, int precisio
533 *buf = *s; 567 *buf = *s;
534 ++buf; ++s; 568 ++buf; ++s;
535 } 569 }
536 while (len < field_width--) { 570 while (len < spec.field_width--) {
537 if (buf < end) 571 if (buf < end)
538 *buf = ' '; 572 *buf = ' ';
539 ++buf; 573 ++buf;
@@ -541,21 +575,27 @@ static char *string(char *buf, char *end, char *s, int field_width, int precisio
541 return buf; 575 return buf;
542} 576}
543 577
544static char *symbol_string(char *buf, char *end, void *ptr, int field_width, int precision, int flags) 578static char *symbol_string(char *buf, char *end, void *ptr,
579 struct printf_spec spec, char ext)
545{ 580{
546 unsigned long value = (unsigned long) ptr; 581 unsigned long value = (unsigned long) ptr;
547#ifdef CONFIG_KALLSYMS 582#ifdef CONFIG_KALLSYMS
548 char sym[KSYM_SYMBOL_LEN]; 583 char sym[KSYM_SYMBOL_LEN];
549 sprint_symbol(sym, value); 584 if (ext != 'f')
550 return string(buf, end, sym, field_width, precision, flags); 585 sprint_symbol(sym, value);
586 else
587 kallsyms_lookup(value, NULL, NULL, NULL, sym);
588 return string(buf, end, sym, spec);
551#else 589#else
552 field_width = 2*sizeof(void *); 590 spec.field_width = 2*sizeof(void *);
553 flags |= SPECIAL | SMALL | ZEROPAD; 591 spec.flags |= SPECIAL | SMALL | ZEROPAD;
554 return number(buf, end, value, 16, field_width, precision, flags); 592 spec.base = 16;
593 return number(buf, end, value, spec);
555#endif 594#endif
556} 595}
557 596
558static char *resource_string(char *buf, char *end, struct resource *res, int field_width, int precision, int flags) 597static char *resource_string(char *buf, char *end, struct resource *res,
598 struct printf_spec spec)
559{ 599{
560#ifndef IO_RSRC_PRINTK_SIZE 600#ifndef IO_RSRC_PRINTK_SIZE
561#define IO_RSRC_PRINTK_SIZE 4 601#define IO_RSRC_PRINTK_SIZE 4
@@ -564,7 +604,11 @@ static char *resource_string(char *buf, char *end, struct resource *res, int fie
564#ifndef MEM_RSRC_PRINTK_SIZE 604#ifndef MEM_RSRC_PRINTK_SIZE
565#define MEM_RSRC_PRINTK_SIZE 8 605#define MEM_RSRC_PRINTK_SIZE 8
566#endif 606#endif
567 607 struct printf_spec num_spec = {
608 .base = 16,
609 .precision = -1,
610 .flags = SPECIAL | SMALL | ZEROPAD,
611 };
568 /* room for the actual numbers, the two "0x", -, [, ] and the final zero */ 612 /* room for the actual numbers, the two "0x", -, [, ] and the final zero */
569 char sym[4*sizeof(resource_size_t) + 8]; 613 char sym[4*sizeof(resource_size_t) + 8];
570 char *p = sym, *pend = sym + sizeof(sym); 614 char *p = sym, *pend = sym + sizeof(sym);
@@ -576,69 +620,174 @@ static char *resource_string(char *buf, char *end, struct resource *res, int fie
576 size = MEM_RSRC_PRINTK_SIZE; 620 size = MEM_RSRC_PRINTK_SIZE;
577 621
578 *p++ = '['; 622 *p++ = '[';
579 p = number(p, pend, res->start, 16, size, -1, SPECIAL | SMALL | ZEROPAD); 623 num_spec.field_width = size;
624 p = number(p, pend, res->start, num_spec);
580 *p++ = '-'; 625 *p++ = '-';
581 p = number(p, pend, res->end, 16, size, -1, SPECIAL | SMALL | ZEROPAD); 626 p = number(p, pend, res->end, num_spec);
582 *p++ = ']'; 627 *p++ = ']';
583 *p = 0; 628 *p = 0;
584 629
585 return string(buf, end, sym, field_width, precision, flags); 630 return string(buf, end, sym, spec);
586} 631}
587 632
588static char *mac_address_string(char *buf, char *end, u8 *addr, int field_width, 633static char *mac_address_string(char *buf, char *end, u8 *addr,
589 int precision, int flags) 634 struct printf_spec spec, const char *fmt)
590{ 635{
591 char mac_addr[6 * 3]; /* (6 * 2 hex digits), 5 colons and trailing zero */ 636 char mac_addr[sizeof("xx:xx:xx:xx:xx:xx")];
592 char *p = mac_addr; 637 char *p = mac_addr;
593 int i; 638 int i;
594 639
595 for (i = 0; i < 6; i++) { 640 for (i = 0; i < 6; i++) {
596 p = pack_hex_byte(p, addr[i]); 641 p = pack_hex_byte(p, addr[i]);
597 if (!(flags & SPECIAL) && i != 5) 642 if (fmt[0] == 'M' && i != 5)
598 *p++ = ':'; 643 *p++ = ':';
599 } 644 }
600 *p = '\0'; 645 *p = '\0';
601 646
602 return string(buf, end, mac_addr, field_width, precision, flags & ~SPECIAL); 647 return string(buf, end, mac_addr, spec);
603} 648}
604 649
605static char *ip6_addr_string(char *buf, char *end, u8 *addr, int field_width, 650static char *ip4_string(char *p, const u8 *addr, bool leading_zeros)
606 int precision, int flags)
607{ 651{
608 char ip6_addr[8 * 5]; /* (8 * 4 hex digits), 7 colons and trailing zero */
609 char *p = ip6_addr;
610 int i; 652 int i;
611 653
654 for (i = 0; i < 4; i++) {
655 char temp[3]; /* hold each IP quad in reverse order */
656 int digits = put_dec_trunc(temp, addr[i]) - temp;
657 if (leading_zeros) {
658 if (digits < 3)
659 *p++ = '0';
660 if (digits < 2)
661 *p++ = '0';
662 }
663 /* reverse the digits in the quad */
664 while (digits--)
665 *p++ = temp[digits];
666 if (i < 3)
667 *p++ = '.';
668 }
669
670 *p = '\0';
671 return p;
672}
673
674static char *ip6_compressed_string(char *p, const char *addr)
675{
676 int i;
677 int j;
678 int range;
679 unsigned char zerolength[8];
680 int longest = 1;
681 int colonpos = -1;
682 u16 word;
683 u8 hi;
684 u8 lo;
685 bool needcolon = false;
686 bool useIPv4;
687 struct in6_addr in6;
688
689 memcpy(&in6, addr, sizeof(struct in6_addr));
690
691 useIPv4 = ipv6_addr_v4mapped(&in6) || ipv6_addr_is_isatap(&in6);
692
693 memset(zerolength, 0, sizeof(zerolength));
694
695 if (useIPv4)
696 range = 6;
697 else
698 range = 8;
699
700 /* find position of longest 0 run */
701 for (i = 0; i < range; i++) {
702 for (j = i; j < range; j++) {
703 if (in6.s6_addr16[j] != 0)
704 break;
705 zerolength[i]++;
706 }
707 }
708 for (i = 0; i < range; i++) {
709 if (zerolength[i] > longest) {
710 longest = zerolength[i];
711 colonpos = i;
712 }
713 }
714
715 /* emit address */
716 for (i = 0; i < range; i++) {
717 if (i == colonpos) {
718 if (needcolon || i == 0)
719 *p++ = ':';
720 *p++ = ':';
721 needcolon = false;
722 i += longest - 1;
723 continue;
724 }
725 if (needcolon) {
726 *p++ = ':';
727 needcolon = false;
728 }
729 /* hex u16 without leading 0s */
730 word = ntohs(in6.s6_addr16[i]);
731 hi = word >> 8;
732 lo = word & 0xff;
733 if (hi) {
734 if (hi > 0x0f)
735 p = pack_hex_byte(p, hi);
736 else
737 *p++ = hex_asc_lo(hi);
738 }
739 if (hi || lo > 0x0f)
740 p = pack_hex_byte(p, lo);
741 else
742 *p++ = hex_asc_lo(lo);
743 needcolon = true;
744 }
745
746 if (useIPv4) {
747 if (needcolon)
748 *p++ = ':';
749 p = ip4_string(p, &in6.s6_addr[12], false);
750 }
751
752 *p = '\0';
753 return p;
754}
755
756static char *ip6_string(char *p, const char *addr, const char *fmt)
757{
758 int i;
612 for (i = 0; i < 8; i++) { 759 for (i = 0; i < 8; i++) {
613 p = pack_hex_byte(p, addr[2 * i]); 760 p = pack_hex_byte(p, *addr++);
614 p = pack_hex_byte(p, addr[2 * i + 1]); 761 p = pack_hex_byte(p, *addr++);
615 if (!(flags & SPECIAL) && i != 7) 762 if (fmt[0] == 'I' && i != 7)
616 *p++ = ':'; 763 *p++ = ':';
617 } 764 }
765
618 *p = '\0'; 766 *p = '\0';
767 return p;
768}
769
770static char *ip6_addr_string(char *buf, char *end, const u8 *addr,
771 struct printf_spec spec, const char *fmt)
772{
773 char ip6_addr[sizeof("xxxx:xxxx:xxxx:xxxx:xxxx:xxxx:255.255.255.255")];
619 774
620 return string(buf, end, ip6_addr, field_width, precision, flags & ~SPECIAL); 775 if (fmt[0] == 'I' && fmt[2] == 'c')
776 ip6_compressed_string(ip6_addr, addr);
777 else
778 ip6_string(ip6_addr, addr, fmt);
779
780 return string(buf, end, ip6_addr, spec);
621} 781}
622 782
623static char *ip4_addr_string(char *buf, char *end, u8 *addr, int field_width, 783static char *ip4_addr_string(char *buf, char *end, const u8 *addr,
624 int precision, int flags) 784 struct printf_spec spec, const char *fmt)
625{ 785{
626 char ip4_addr[4 * 4]; /* (4 * 3 decimal digits), 3 dots and trailing zero */ 786 char ip4_addr[sizeof("255.255.255.255")];
627 char temp[3]; /* hold each IP quad in reverse order */
628 char *p = ip4_addr;
629 int i, digits;
630 787
631 for (i = 0; i < 4; i++) { 788 ip4_string(ip4_addr, addr, fmt[0] == 'i');
632 digits = put_dec_trunc(temp, addr[i]) - temp;
633 /* reverse the digits in the quad */
634 while (digits--)
635 *p++ = temp[digits];
636 if (i != 3)
637 *p++ = '.';
638 }
639 *p = '\0';
640 789
641 return string(buf, end, ip4_addr, field_width, precision, flags & ~SPECIAL); 790 return string(buf, end, ip4_addr, spec);
642} 791}
643 792
644/* 793/*
@@ -648,56 +797,267 @@ static char *ip4_addr_string(char *buf, char *end, u8 *addr, int field_width,
648 * 797 *
649 * Right now we handle: 798 * Right now we handle:
650 * 799 *
651 * - 'F' For symbolic function descriptor pointers 800 * - 'F' For symbolic function descriptor pointers with offset
801 * - 'f' For simple symbolic function names without offset
652 * - 'S' For symbolic direct pointers 802 * - 'S' For symbolic direct pointers
653 * - 'R' For a struct resource pointer, it prints the range of 803 * - 'R' For a struct resource pointer, it prints the range of
654 * addresses (not the name nor the flags) 804 * addresses (not the name nor the flags)
655 * - 'M' For a 6-byte MAC address, it prints the address in the 805 * - 'M' For a 6-byte MAC address, it prints the address in the
656 * usual colon-separated hex notation 806 * usual colon-separated hex notation
657 * - 'I' [46] for IPv4/IPv6 addresses printed in the usual way (dot-separated 807 * - 'm' For a 6-byte MAC address, it prints the hex address without colons
658 * decimal for v4 and colon separated network-order 16 bit hex for v6) 808 * - 'I' [46] for IPv4/IPv6 addresses printed in the usual way
659 * - 'i' [46] for 'raw' IPv4/IPv6 addresses, IPv6 omits the colons, IPv4 is 809 * IPv4 uses dot-separated decimal without leading 0's (1.2.3.4)
660 * currently the same 810 * IPv6 uses colon separated network-order 16 bit hex with leading 0's
661 * 811 * - 'i' [46] for 'raw' IPv4/IPv6 addresses
812 * IPv6 omits the colons (01020304...0f)
813 * IPv4 uses dot-separated decimal with leading 0's (010.123.045.006)
814 * - 'I6c' for IPv6 addresses printed as specified by
815 * http://www.ietf.org/id/draft-kawamura-ipv6-text-representation-03.txt
662 * Note: The difference between 'S' and 'F' is that on ia64 and ppc64 816 * Note: The difference between 'S' and 'F' is that on ia64 and ppc64
663 * function pointers are really function descriptors, which contain a 817 * function pointers are really function descriptors, which contain a
664 * pointer to the real address. 818 * pointer to the real address.
665 */ 819 */
666static char *pointer(const char *fmt, char *buf, char *end, void *ptr, int field_width, int precision, int flags) 820static char *pointer(const char *fmt, char *buf, char *end, void *ptr,
821 struct printf_spec spec)
667{ 822{
668 if (!ptr) 823 if (!ptr)
669 return string(buf, end, "(null)", field_width, precision, flags); 824 return string(buf, end, "(null)", spec);
670 825
671 switch (*fmt) { 826 switch (*fmt) {
672 case 'F': 827 case 'F':
828 case 'f':
673 ptr = dereference_function_descriptor(ptr); 829 ptr = dereference_function_descriptor(ptr);
674 /* Fallthrough */ 830 /* Fallthrough */
675 case 'S': 831 case 'S':
676 return symbol_string(buf, end, ptr, field_width, precision, flags); 832 return symbol_string(buf, end, ptr, spec, *fmt);
677 case 'R': 833 case 'R':
678 return resource_string(buf, end, ptr, field_width, precision, flags); 834 return resource_string(buf, end, ptr, spec);
679 case 'm': 835 case 'M': /* Colon separated: 00:01:02:03:04:05 */
680 flags |= SPECIAL; 836 case 'm': /* Contiguous: 000102030405 */
681 /* Fallthrough */ 837 return mac_address_string(buf, end, ptr, spec, fmt);
682 case 'M': 838 case 'I': /* Formatted IP supported
683 return mac_address_string(buf, end, ptr, field_width, precision, flags); 839 * 4: 1.2.3.4
840 * 6: 0001:0203:...:0708
841 * 6c: 1::708 or 1::1.2.3.4
842 */
843 case 'i': /* Contiguous:
844 * 4: 001.002.003.004
845 * 6: 000102...0f
846 */
847 switch (fmt[1]) {
848 case '6':
849 return ip6_addr_string(buf, end, ptr, spec, fmt);
850 case '4':
851 return ip4_addr_string(buf, end, ptr, spec, fmt);
852 }
853 break;
854 }
855 spec.flags |= SMALL;
856 if (spec.field_width == -1) {
857 spec.field_width = 2*sizeof(void *);
858 spec.flags |= ZEROPAD;
859 }
860 spec.base = 16;
861
862 return number(buf, end, (unsigned long) ptr, spec);
863}
864
865/*
866 * Helper function to decode printf style format.
867 * Each call decode a token from the format and return the
868 * number of characters read (or likely the delta where it wants
869 * to go on the next call).
870 * The decoded token is returned through the parameters
871 *
872 * 'h', 'l', or 'L' for integer fields
873 * 'z' support added 23/7/1999 S.H.
874 * 'z' changed to 'Z' --davidm 1/25/99
875 * 't' added for ptrdiff_t
876 *
877 * @fmt: the format string
878 * @type of the token returned
879 * @flags: various flags such as +, -, # tokens..
880 * @field_width: overwritten width
881 * @base: base of the number (octal, hex, ...)
882 * @precision: precision of a number
883 * @qualifier: qualifier of a number (long, size_t, ...)
884 */
885static int format_decode(const char *fmt, struct printf_spec *spec)
886{
887 const char *start = fmt;
888
889 /* we finished early by reading the field width */
890 if (spec->type == FORMAT_TYPE_WIDTH) {
891 if (spec->field_width < 0) {
892 spec->field_width = -spec->field_width;
893 spec->flags |= LEFT;
894 }
895 spec->type = FORMAT_TYPE_NONE;
896 goto precision;
897 }
898
899 /* we finished early by reading the precision */
900 if (spec->type == FORMAT_TYPE_PRECISION) {
901 if (spec->precision < 0)
902 spec->precision = 0;
903
904 spec->type = FORMAT_TYPE_NONE;
905 goto qualifier;
906 }
907
908 /* By default */
909 spec->type = FORMAT_TYPE_NONE;
910
911 for (; *fmt ; ++fmt) {
912 if (*fmt == '%')
913 break;
914 }
915
916 /* Return the current non-format string */
917 if (fmt != start || !*fmt)
918 return fmt - start;
919
920 /* Process flags */
921 spec->flags = 0;
922
923 while (1) { /* this also skips first '%' */
924 bool found = true;
925
926 ++fmt;
927
928 switch (*fmt) {
929 case '-': spec->flags |= LEFT; break;
930 case '+': spec->flags |= PLUS; break;
931 case ' ': spec->flags |= SPACE; break;
932 case '#': spec->flags |= SPECIAL; break;
933 case '0': spec->flags |= ZEROPAD; break;
934 default: found = false;
935 }
936
937 if (!found)
938 break;
939 }
940
941 /* get field width */
942 spec->field_width = -1;
943
944 if (isdigit(*fmt))
945 spec->field_width = skip_atoi(&fmt);
946 else if (*fmt == '*') {
947 /* it's the next argument */
948 spec->type = FORMAT_TYPE_WIDTH;
949 return ++fmt - start;
950 }
951
952precision:
953 /* get the precision */
954 spec->precision = -1;
955 if (*fmt == '.') {
956 ++fmt;
957 if (isdigit(*fmt)) {
958 spec->precision = skip_atoi(&fmt);
959 if (spec->precision < 0)
960 spec->precision = 0;
961 } else if (*fmt == '*') {
962 /* it's the next argument */
963 spec->type = FORMAT_TYPE_PRECISION;
964 return ++fmt - start;
965 }
966 }
967
968qualifier:
969 /* get the conversion qualifier */
970 spec->qualifier = -1;
971 if (*fmt == 'h' || *fmt == 'l' || *fmt == 'L' ||
972 *fmt == 'Z' || *fmt == 'z' || *fmt == 't') {
973 spec->qualifier = *fmt++;
974 if (unlikely(spec->qualifier == *fmt)) {
975 if (spec->qualifier == 'l') {
976 spec->qualifier = 'L';
977 ++fmt;
978 } else if (spec->qualifier == 'h') {
979 spec->qualifier = 'H';
980 ++fmt;
981 }
982 }
983 }
984
985 /* default base */
986 spec->base = 10;
987 switch (*fmt) {
988 case 'c':
989 spec->type = FORMAT_TYPE_CHAR;
990 return ++fmt - start;
991
992 case 's':
993 spec->type = FORMAT_TYPE_STR;
994 return ++fmt - start;
995
996 case 'p':
997 spec->type = FORMAT_TYPE_PTR;
998 return fmt - start;
999 /* skip alnum */
1000
1001 case 'n':
1002 spec->type = FORMAT_TYPE_NRCHARS;
1003 return ++fmt - start;
1004
1005 case '%':
1006 spec->type = FORMAT_TYPE_PERCENT_CHAR;
1007 return ++fmt - start;
1008
1009 /* integer number formats - set up the flags and "break" */
1010 case 'o':
1011 spec->base = 8;
1012 break;
1013
1014 case 'x':
1015 spec->flags |= SMALL;
1016
1017 case 'X':
1018 spec->base = 16;
1019 break;
1020
1021 case 'd':
684 case 'i': 1022 case 'i':
685 flags |= SPECIAL; 1023 spec->flags |= SIGN;
686 /* Fallthrough */ 1024 case 'u':
687 case 'I':
688 if (fmt[1] == '6')
689 return ip6_addr_string(buf, end, ptr, field_width, precision, flags);
690 if (fmt[1] == '4')
691 return ip4_addr_string(buf, end, ptr, field_width, precision, flags);
692 flags &= ~SPECIAL;
693 break; 1025 break;
1026
1027 default:
1028 spec->type = FORMAT_TYPE_INVALID;
1029 return fmt - start;
694 } 1030 }
695 flags |= SMALL; 1031
696 if (field_width == -1) { 1032 if (spec->qualifier == 'L')
697 field_width = 2*sizeof(void *); 1033 spec->type = FORMAT_TYPE_LONG_LONG;
698 flags |= ZEROPAD; 1034 else if (spec->qualifier == 'l') {
1035 if (spec->flags & SIGN)
1036 spec->type = FORMAT_TYPE_LONG;
1037 else
1038 spec->type = FORMAT_TYPE_ULONG;
1039 } else if (spec->qualifier == 'Z' || spec->qualifier == 'z') {
1040 spec->type = FORMAT_TYPE_SIZE_T;
1041 } else if (spec->qualifier == 't') {
1042 spec->type = FORMAT_TYPE_PTRDIFF;
1043 } else if (spec->qualifier == 'H') {
1044 if (spec->flags & SIGN)
1045 spec->type = FORMAT_TYPE_BYTE;
1046 else
1047 spec->type = FORMAT_TYPE_UBYTE;
1048 } else if (spec->qualifier == 'h') {
1049 if (spec->flags & SIGN)
1050 spec->type = FORMAT_TYPE_SHORT;
1051 else
1052 spec->type = FORMAT_TYPE_USHORT;
1053 } else {
1054 if (spec->flags & SIGN)
1055 spec->type = FORMAT_TYPE_INT;
1056 else
1057 spec->type = FORMAT_TYPE_UINT;
699 } 1058 }
700 return number(buf, end, (unsigned long) ptr, 16, field_width, precision, flags); 1059
1060 return ++fmt - start;
701} 1061}
702 1062
703/** 1063/**
@@ -709,7 +1069,8 @@ static char *pointer(const char *fmt, char *buf, char *end, void *ptr, int field
709 * 1069 *
710 * This function follows C99 vsnprintf, but has some extensions: 1070 * This function follows C99 vsnprintf, but has some extensions:
711 * %pS output the name of a text symbol 1071 * %pS output the name of a text symbol
712 * %pF output the name of a function pointer 1072 * %pF output the name of a function pointer with its offset
1073 * %pf output the name of a function pointer without its offset
713 * %pR output the address range in a struct resource 1074 * %pR output the address range in a struct resource
714 * 1075 *
715 * The return value is the number of characters which would 1076 * The return value is the number of characters which would
@@ -726,18 +1087,9 @@ static char *pointer(const char *fmt, char *buf, char *end, void *ptr, int field
726int vsnprintf(char *buf, size_t size, const char *fmt, va_list args) 1087int vsnprintf(char *buf, size_t size, const char *fmt, va_list args)
727{ 1088{
728 unsigned long long num; 1089 unsigned long long num;
729 int base;
730 char *str, *end, c; 1090 char *str, *end, c;
731 1091 int read;
732 int flags; /* flags to number() */ 1092 struct printf_spec spec = {0};
733
734 int field_width; /* width of output field */
735 int precision; /* min. # of digits for integers; max
736 number of chars for from string */
737 int qualifier; /* 'h', 'l', or 'L' for integer fields */
738 /* 'z' support added 23/7/1999 S.H. */
739 /* 'z' changed to 'Z' --davidm 1/25/99 */
740 /* 't' added for ptrdiff_t */
741 1093
742 /* Reject out-of-range values early. Large positive sizes are 1094 /* Reject out-of-range values early. Large positive sizes are
743 used for unknown buffer sizes. */ 1095 used for unknown buffer sizes. */
@@ -758,184 +1110,143 @@ int vsnprintf(char *buf, size_t size, const char *fmt, va_list args)
758 size = end - buf; 1110 size = end - buf;
759 } 1111 }
760 1112
761 for (; *fmt ; ++fmt) { 1113 while (*fmt) {
762 if (*fmt != '%') { 1114 const char *old_fmt = fmt;
763 if (str < end)
764 *str = *fmt;
765 ++str;
766 continue;
767 }
768 1115
769 /* process flags */ 1116 read = format_decode(fmt, &spec);
770 flags = 0;
771 repeat:
772 ++fmt; /* this also skips first '%' */
773 switch (*fmt) {
774 case '-': flags |= LEFT; goto repeat;
775 case '+': flags |= PLUS; goto repeat;
776 case ' ': flags |= SPACE; goto repeat;
777 case '#': flags |= SPECIAL; goto repeat;
778 case '0': flags |= ZEROPAD; goto repeat;
779 }
780 1117
781 /* get field width */ 1118 fmt += read;
782 field_width = -1;
783 if (isdigit(*fmt))
784 field_width = skip_atoi(&fmt);
785 else if (*fmt == '*') {
786 ++fmt;
787 /* it's the next argument */
788 field_width = va_arg(args, int);
789 if (field_width < 0) {
790 field_width = -field_width;
791 flags |= LEFT;
792 }
793 }
794 1119
795 /* get the precision */ 1120 switch (spec.type) {
796 precision = -1; 1121 case FORMAT_TYPE_NONE: {
797 if (*fmt == '.') { 1122 int copy = read;
798 ++fmt; 1123 if (str < end) {
799 if (isdigit(*fmt)) 1124 if (copy > end - str)
800 precision = skip_atoi(&fmt); 1125 copy = end - str;
801 else if (*fmt == '*') { 1126 memcpy(str, old_fmt, copy);
802 ++fmt;
803 /* it's the next argument */
804 precision = va_arg(args, int);
805 } 1127 }
806 if (precision < 0) 1128 str += read;
807 precision = 0; 1129 break;
808 } 1130 }
809 1131
810 /* get the conversion qualifier */ 1132 case FORMAT_TYPE_WIDTH:
811 qualifier = -1; 1133 spec.field_width = va_arg(args, int);
812 if (*fmt == 'h' || *fmt == 'l' || *fmt == 'L' || 1134 break;
813 *fmt =='Z' || *fmt == 'z' || *fmt == 't') {
814 qualifier = *fmt;
815 ++fmt;
816 if (qualifier == 'l' && *fmt == 'l') {
817 qualifier = 'L';
818 ++fmt;
819 }
820 }
821 1135
822 /* default base */ 1136 case FORMAT_TYPE_PRECISION:
823 base = 10; 1137 spec.precision = va_arg(args, int);
1138 break;
824 1139
825 switch (*fmt) { 1140 case FORMAT_TYPE_CHAR:
826 case 'c': 1141 if (!(spec.flags & LEFT)) {
827 if (!(flags & LEFT)) { 1142 while (--spec.field_width > 0) {
828 while (--field_width > 0) {
829 if (str < end)
830 *str = ' ';
831 ++str;
832 }
833 }
834 c = (unsigned char) va_arg(args, int);
835 if (str < end)
836 *str = c;
837 ++str;
838 while (--field_width > 0) {
839 if (str < end) 1143 if (str < end)
840 *str = ' '; 1144 *str = ' ';
841 ++str; 1145 ++str;
842 }
843 continue;
844
845 case 's':
846 str = string(str, end, va_arg(args, char *), field_width, precision, flags);
847 continue;
848
849 case 'p':
850 str = pointer(fmt+1, str, end,
851 va_arg(args, void *),
852 field_width, precision, flags);
853 /* Skip all alphanumeric pointer suffixes */
854 while (isalnum(fmt[1]))
855 fmt++;
856 continue;
857
858 case 'n':
859 /* FIXME:
860 * What does C99 say about the overflow case here? */
861 if (qualifier == 'l') {
862 long * ip = va_arg(args, long *);
863 *ip = (str - buf);
864 } else if (qualifier == 'Z' || qualifier == 'z') {
865 size_t * ip = va_arg(args, size_t *);
866 *ip = (str - buf);
867 } else {
868 int * ip = va_arg(args, int *);
869 *ip = (str - buf);
870 }
871 continue;
872 1146
873 case '%': 1147 }
1148 }
1149 c = (unsigned char) va_arg(args, int);
1150 if (str < end)
1151 *str = c;
1152 ++str;
1153 while (--spec.field_width > 0) {
874 if (str < end) 1154 if (str < end)
875 *str = '%'; 1155 *str = ' ';
876 ++str; 1156 ++str;
877 continue; 1157 }
1158 break;
878 1159
879 /* integer number formats - set up the flags and "break" */ 1160 case FORMAT_TYPE_STR:
880 case 'o': 1161 str = string(str, end, va_arg(args, char *), spec);
881 base = 8; 1162 break;
882 break;
883 1163
884 case 'x': 1164 case FORMAT_TYPE_PTR:
885 flags |= SMALL; 1165 str = pointer(fmt+1, str, end, va_arg(args, void *),
886 case 'X': 1166 spec);
887 base = 16; 1167 while (isalnum(*fmt))
888 break; 1168 fmt++;
1169 break;
889 1170
890 case 'd': 1171 case FORMAT_TYPE_PERCENT_CHAR:
891 case 'i': 1172 if (str < end)
892 flags |= SIGN; 1173 *str = '%';
893 case 'u': 1174 ++str;
894 break; 1175 break;
895 1176
896 default: 1177 case FORMAT_TYPE_INVALID:
897 if (str < end) 1178 if (str < end)
898 *str = '%'; 1179 *str = '%';
899 ++str; 1180 ++str;
900 if (*fmt) { 1181 break;
901 if (str < end) 1182
902 *str = *fmt; 1183 case FORMAT_TYPE_NRCHARS: {
903 ++str; 1184 int qualifier = spec.qualifier;
904 } else { 1185
905 --fmt; 1186 if (qualifier == 'l') {
906 } 1187 long *ip = va_arg(args, long *);
907 continue; 1188 *ip = (str - buf);
1189 } else if (qualifier == 'Z' ||
1190 qualifier == 'z') {
1191 size_t *ip = va_arg(args, size_t *);
1192 *ip = (str - buf);
1193 } else {
1194 int *ip = va_arg(args, int *);
1195 *ip = (str - buf);
1196 }
1197 break;
908 } 1198 }
909 if (qualifier == 'L') 1199
910 num = va_arg(args, long long); 1200 default:
911 else if (qualifier == 'l') { 1201 switch (spec.type) {
912 num = va_arg(args, unsigned long); 1202 case FORMAT_TYPE_LONG_LONG:
913 if (flags & SIGN) 1203 num = va_arg(args, long long);
914 num = (signed long) num; 1204 break;
915 } else if (qualifier == 'Z' || qualifier == 'z') { 1205 case FORMAT_TYPE_ULONG:
916 num = va_arg(args, size_t); 1206 num = va_arg(args, unsigned long);
917 } else if (qualifier == 't') { 1207 break;
918 num = va_arg(args, ptrdiff_t); 1208 case FORMAT_TYPE_LONG:
919 } else if (qualifier == 'h') { 1209 num = va_arg(args, long);
920 num = (unsigned short) va_arg(args, int); 1210 break;
921 if (flags & SIGN) 1211 case FORMAT_TYPE_SIZE_T:
922 num = (signed short) num; 1212 num = va_arg(args, size_t);
923 } else { 1213 break;
924 num = va_arg(args, unsigned int); 1214 case FORMAT_TYPE_PTRDIFF:
925 if (flags & SIGN) 1215 num = va_arg(args, ptrdiff_t);
926 num = (signed int) num; 1216 break;
1217 case FORMAT_TYPE_UBYTE:
1218 num = (unsigned char) va_arg(args, int);
1219 break;
1220 case FORMAT_TYPE_BYTE:
1221 num = (signed char) va_arg(args, int);
1222 break;
1223 case FORMAT_TYPE_USHORT:
1224 num = (unsigned short) va_arg(args, int);
1225 break;
1226 case FORMAT_TYPE_SHORT:
1227 num = (short) va_arg(args, int);
1228 break;
1229 case FORMAT_TYPE_INT:
1230 num = (int) va_arg(args, int);
1231 break;
1232 default:
1233 num = va_arg(args, unsigned int);
1234 }
1235
1236 str = number(str, end, num, spec);
927 } 1237 }
928 str = number(str, end, num, base,
929 field_width, precision, flags);
930 } 1238 }
1239
931 if (size > 0) { 1240 if (size > 0) {
932 if (str < end) 1241 if (str < end)
933 *str = '\0'; 1242 *str = '\0';
934 else 1243 else
935 end[-1] = '\0'; 1244 end[-1] = '\0';
936 } 1245 }
1246
937 /* the trailing null byte doesn't count towards the total */ 1247 /* the trailing null byte doesn't count towards the total */
938 return str-buf; 1248 return str-buf;
1249
939} 1250}
940EXPORT_SYMBOL(vsnprintf); 1251EXPORT_SYMBOL(vsnprintf);
941 1252
@@ -1058,6 +1369,374 @@ int sprintf(char * buf, const char *fmt, ...)
1058} 1369}
1059EXPORT_SYMBOL(sprintf); 1370EXPORT_SYMBOL(sprintf);
1060 1371
1372#ifdef CONFIG_BINARY_PRINTF
1373/*
1374 * bprintf service:
1375 * vbin_printf() - VA arguments to binary data
1376 * bstr_printf() - Binary data to text string
1377 */
1378
1379/**
1380 * vbin_printf - Parse a format string and place args' binary value in a buffer
1381 * @bin_buf: The buffer to place args' binary value
1382 * @size: The size of the buffer(by words(32bits), not characters)
1383 * @fmt: The format string to use
1384 * @args: Arguments for the format string
1385 *
1386 * The format follows C99 vsnprintf, except %n is ignored, and its argument
1387 * is skiped.
1388 *
1389 * The return value is the number of words(32bits) which would be generated for
1390 * the given input.
1391 *
1392 * NOTE:
1393 * If the return value is greater than @size, the resulting bin_buf is NOT
1394 * valid for bstr_printf().
1395 */
1396int vbin_printf(u32 *bin_buf, size_t size, const char *fmt, va_list args)
1397{
1398 struct printf_spec spec = {0};
1399 char *str, *end;
1400 int read;
1401
1402 str = (char *)bin_buf;
1403 end = (char *)(bin_buf + size);
1404
1405#define save_arg(type) \
1406do { \
1407 if (sizeof(type) == 8) { \
1408 unsigned long long value; \
1409 str = PTR_ALIGN(str, sizeof(u32)); \
1410 value = va_arg(args, unsigned long long); \
1411 if (str + sizeof(type) <= end) { \
1412 *(u32 *)str = *(u32 *)&value; \
1413 *(u32 *)(str + 4) = *((u32 *)&value + 1); \
1414 } \
1415 } else { \
1416 unsigned long value; \
1417 str = PTR_ALIGN(str, sizeof(type)); \
1418 value = va_arg(args, int); \
1419 if (str + sizeof(type) <= end) \
1420 *(typeof(type) *)str = (type)value; \
1421 } \
1422 str += sizeof(type); \
1423} while (0)
1424
1425
1426 while (*fmt) {
1427 read = format_decode(fmt, &spec);
1428
1429 fmt += read;
1430
1431 switch (spec.type) {
1432 case FORMAT_TYPE_NONE:
1433 break;
1434
1435 case FORMAT_TYPE_WIDTH:
1436 case FORMAT_TYPE_PRECISION:
1437 save_arg(int);
1438 break;
1439
1440 case FORMAT_TYPE_CHAR:
1441 save_arg(char);
1442 break;
1443
1444 case FORMAT_TYPE_STR: {
1445 const char *save_str = va_arg(args, char *);
1446 size_t len;
1447 if ((unsigned long)save_str > (unsigned long)-PAGE_SIZE
1448 || (unsigned long)save_str < PAGE_SIZE)
1449 save_str = "<NULL>";
1450 len = strlen(save_str);
1451 if (str + len + 1 < end)
1452 memcpy(str, save_str, len + 1);
1453 str += len + 1;
1454 break;
1455 }
1456
1457 case FORMAT_TYPE_PTR:
1458 save_arg(void *);
1459 /* skip all alphanumeric pointer suffixes */
1460 while (isalnum(*fmt))
1461 fmt++;
1462 break;
1463
1464 case FORMAT_TYPE_PERCENT_CHAR:
1465 break;
1466
1467 case FORMAT_TYPE_INVALID:
1468 break;
1469
1470 case FORMAT_TYPE_NRCHARS: {
1471 /* skip %n 's argument */
1472 int qualifier = spec.qualifier;
1473 void *skip_arg;
1474 if (qualifier == 'l')
1475 skip_arg = va_arg(args, long *);
1476 else if (qualifier == 'Z' || qualifier == 'z')
1477 skip_arg = va_arg(args, size_t *);
1478 else
1479 skip_arg = va_arg(args, int *);
1480 break;
1481 }
1482
1483 default:
1484 switch (spec.type) {
1485
1486 case FORMAT_TYPE_LONG_LONG:
1487 save_arg(long long);
1488 break;
1489 case FORMAT_TYPE_ULONG:
1490 case FORMAT_TYPE_LONG:
1491 save_arg(unsigned long);
1492 break;
1493 case FORMAT_TYPE_SIZE_T:
1494 save_arg(size_t);
1495 break;
1496 case FORMAT_TYPE_PTRDIFF:
1497 save_arg(ptrdiff_t);
1498 break;
1499 case FORMAT_TYPE_UBYTE:
1500 case FORMAT_TYPE_BYTE:
1501 save_arg(char);
1502 break;
1503 case FORMAT_TYPE_USHORT:
1504 case FORMAT_TYPE_SHORT:
1505 save_arg(short);
1506 break;
1507 default:
1508 save_arg(int);
1509 }
1510 }
1511 }
1512 return (u32 *)(PTR_ALIGN(str, sizeof(u32))) - bin_buf;
1513
1514#undef save_arg
1515}
1516EXPORT_SYMBOL_GPL(vbin_printf);
1517
1518/**
1519 * bstr_printf - Format a string from binary arguments and place it in a buffer
1520 * @buf: The buffer to place the result into
1521 * @size: The size of the buffer, including the trailing null space
1522 * @fmt: The format string to use
1523 * @bin_buf: Binary arguments for the format string
1524 *
1525 * This function like C99 vsnprintf, but the difference is that vsnprintf gets
1526 * arguments from stack, and bstr_printf gets arguments from @bin_buf which is
1527 * a binary buffer that generated by vbin_printf.
1528 *
1529 * The format follows C99 vsnprintf, but has some extensions:
1530 * %pS output the name of a text symbol
1531 * %pF output the name of a function pointer with its offset
1532 * %pf output the name of a function pointer without its offset
1533 * %pR output the address range in a struct resource
1534 * %n is ignored
1535 *
1536 * The return value is the number of characters which would
1537 * be generated for the given input, excluding the trailing
1538 * '\0', as per ISO C99. If you want to have the exact
1539 * number of characters written into @buf as return value
1540 * (not including the trailing '\0'), use vscnprintf(). If the
1541 * return is greater than or equal to @size, the resulting
1542 * string is truncated.
1543 */
1544int bstr_printf(char *buf, size_t size, const char *fmt, const u32 *bin_buf)
1545{
1546 unsigned long long num;
1547 char *str, *end, c;
1548 const char *args = (const char *)bin_buf;
1549
1550 struct printf_spec spec = {0};
1551
1552 if (unlikely((int) size < 0)) {
1553 /* There can be only one.. */
1554 static char warn = 1;
1555 WARN_ON(warn);
1556 warn = 0;
1557 return 0;
1558 }
1559
1560 str = buf;
1561 end = buf + size;
1562
1563#define get_arg(type) \
1564({ \
1565 typeof(type) value; \
1566 if (sizeof(type) == 8) { \
1567 args = PTR_ALIGN(args, sizeof(u32)); \
1568 *(u32 *)&value = *(u32 *)args; \
1569 *((u32 *)&value + 1) = *(u32 *)(args + 4); \
1570 } else { \
1571 args = PTR_ALIGN(args, sizeof(type)); \
1572 value = *(typeof(type) *)args; \
1573 } \
1574 args += sizeof(type); \
1575 value; \
1576})
1577
1578 /* Make sure end is always >= buf */
1579 if (end < buf) {
1580 end = ((void *)-1);
1581 size = end - buf;
1582 }
1583
1584 while (*fmt) {
1585 int read;
1586 const char *old_fmt = fmt;
1587
1588 read = format_decode(fmt, &spec);
1589
1590 fmt += read;
1591
1592 switch (spec.type) {
1593 case FORMAT_TYPE_NONE: {
1594 int copy = read;
1595 if (str < end) {
1596 if (copy > end - str)
1597 copy = end - str;
1598 memcpy(str, old_fmt, copy);
1599 }
1600 str += read;
1601 break;
1602 }
1603
1604 case FORMAT_TYPE_WIDTH:
1605 spec.field_width = get_arg(int);
1606 break;
1607
1608 case FORMAT_TYPE_PRECISION:
1609 spec.precision = get_arg(int);
1610 break;
1611
1612 case FORMAT_TYPE_CHAR:
1613 if (!(spec.flags & LEFT)) {
1614 while (--spec.field_width > 0) {
1615 if (str < end)
1616 *str = ' ';
1617 ++str;
1618 }
1619 }
1620 c = (unsigned char) get_arg(char);
1621 if (str < end)
1622 *str = c;
1623 ++str;
1624 while (--spec.field_width > 0) {
1625 if (str < end)
1626 *str = ' ';
1627 ++str;
1628 }
1629 break;
1630
1631 case FORMAT_TYPE_STR: {
1632 const char *str_arg = args;
1633 size_t len = strlen(str_arg);
1634 args += len + 1;
1635 str = string(str, end, (char *)str_arg, spec);
1636 break;
1637 }
1638
1639 case FORMAT_TYPE_PTR:
1640 str = pointer(fmt+1, str, end, get_arg(void *), spec);
1641 while (isalnum(*fmt))
1642 fmt++;
1643 break;
1644
1645 case FORMAT_TYPE_PERCENT_CHAR:
1646 if (str < end)
1647 *str = '%';
1648 ++str;
1649 break;
1650
1651 case FORMAT_TYPE_INVALID:
1652 if (str < end)
1653 *str = '%';
1654 ++str;
1655 break;
1656
1657 case FORMAT_TYPE_NRCHARS:
1658 /* skip */
1659 break;
1660
1661 default:
1662 switch (spec.type) {
1663
1664 case FORMAT_TYPE_LONG_LONG:
1665 num = get_arg(long long);
1666 break;
1667 case FORMAT_TYPE_ULONG:
1668 num = get_arg(unsigned long);
1669 break;
1670 case FORMAT_TYPE_LONG:
1671 num = get_arg(unsigned long);
1672 break;
1673 case FORMAT_TYPE_SIZE_T:
1674 num = get_arg(size_t);
1675 break;
1676 case FORMAT_TYPE_PTRDIFF:
1677 num = get_arg(ptrdiff_t);
1678 break;
1679 case FORMAT_TYPE_UBYTE:
1680 num = get_arg(unsigned char);
1681 break;
1682 case FORMAT_TYPE_BYTE:
1683 num = get_arg(signed char);
1684 break;
1685 case FORMAT_TYPE_USHORT:
1686 num = get_arg(unsigned short);
1687 break;
1688 case FORMAT_TYPE_SHORT:
1689 num = get_arg(short);
1690 break;
1691 case FORMAT_TYPE_UINT:
1692 num = get_arg(unsigned int);
1693 break;
1694 default:
1695 num = get_arg(int);
1696 }
1697
1698 str = number(str, end, num, spec);
1699 }
1700 }
1701
1702 if (size > 0) {
1703 if (str < end)
1704 *str = '\0';
1705 else
1706 end[-1] = '\0';
1707 }
1708
1709#undef get_arg
1710
1711 /* the trailing null byte doesn't count towards the total */
1712 return str - buf;
1713}
1714EXPORT_SYMBOL_GPL(bstr_printf);
1715
1716/**
1717 * bprintf - Parse a format string and place args' binary value in a buffer
1718 * @bin_buf: The buffer to place args' binary value
1719 * @size: The size of the buffer(by words(32bits), not characters)
1720 * @fmt: The format string to use
1721 * @...: Arguments for the format string
1722 *
1723 * The function returns the number of words(u32) written
1724 * into @bin_buf.
1725 */
1726int bprintf(u32 *bin_buf, size_t size, const char *fmt, ...)
1727{
1728 va_list args;
1729 int ret;
1730
1731 va_start(args, fmt);
1732 ret = vbin_printf(bin_buf, size, fmt, args);
1733 va_end(args);
1734 return ret;
1735}
1736EXPORT_SYMBOL_GPL(bprintf);
1737
1738#endif /* CONFIG_BINARY_PRINTF */
1739
1061/** 1740/**
1062 * vsscanf - Unformat a buffer into a list of arguments 1741 * vsscanf - Unformat a buffer into a list of arguments
1063 * @buf: input buffer 1742 * @buf: input buffer
diff --git a/lib/zlib_inflate/inflate.h b/lib/zlib_inflate/inflate.h
index df8a6c92052d..3d17b3d1b21f 100644
--- a/lib/zlib_inflate/inflate.h
+++ b/lib/zlib_inflate/inflate.h
@@ -1,3 +1,6 @@
1#ifndef INFLATE_H
2#define INFLATE_H
3
1/* inflate.h -- internal inflate state definition 4/* inflate.h -- internal inflate state definition
2 * Copyright (C) 1995-2004 Mark Adler 5 * Copyright (C) 1995-2004 Mark Adler
3 * For conditions of distribution and use, see copyright notice in zlib.h 6 * For conditions of distribution and use, see copyright notice in zlib.h
@@ -105,3 +108,4 @@ struct inflate_state {
105 unsigned short work[288]; /* work area for code table building */ 108 unsigned short work[288]; /* work area for code table building */
106 code codes[ENOUGH]; /* space for code tables */ 109 code codes[ENOUGH]; /* space for code tables */
107}; 110};
111#endif
diff --git a/lib/zlib_inflate/inftrees.h b/lib/zlib_inflate/inftrees.h
index 5f5219b1240e..b70b4731ac7a 100644
--- a/lib/zlib_inflate/inftrees.h
+++ b/lib/zlib_inflate/inftrees.h
@@ -1,3 +1,6 @@
1#ifndef INFTREES_H
2#define INFTREES_H
3
1/* inftrees.h -- header to use inftrees.c 4/* inftrees.h -- header to use inftrees.c
2 * Copyright (C) 1995-2005 Mark Adler 5 * Copyright (C) 1995-2005 Mark Adler
3 * For conditions of distribution and use, see copyright notice in zlib.h 6 * For conditions of distribution and use, see copyright notice in zlib.h
@@ -53,3 +56,4 @@ typedef enum {
53extern int zlib_inflate_table (codetype type, unsigned short *lens, 56extern int zlib_inflate_table (codetype type, unsigned short *lens,
54 unsigned codes, code **table, 57 unsigned codes, code **table,
55 unsigned *bits, unsigned short *work); 58 unsigned *bits, unsigned short *work);
59#endif