aboutsummaryrefslogtreecommitdiffstats
path: root/lib
diff options
context:
space:
mode:
authorIngo Molnar <mingo@elte.hu>2009-04-07 05:15:40 -0400
committerIngo Molnar <mingo@elte.hu>2009-04-07 05:15:40 -0400
commit5e34437840d33554f69380584311743b39e8fbeb (patch)
treee081135619ee146af5efb9ee883afca950df5757 /lib
parent77d05632baee21b1cef8730d7c06aa69601e4dca (diff)
parentd508afb437daee7cf07da085b635c44a4ebf9b38 (diff)
Merge branch 'linus' into core/softlockup
Conflicts: kernel/sysctl.c
Diffstat (limited to 'lib')
-rw-r--r--lib/Kconfig29
-rw-r--r--lib/Kconfig.debug88
-rw-r--r--lib/Makefile14
-rw-r--r--lib/bitmap.c16
-rw-r--r--lib/cpumask.c4
-rw-r--r--lib/debugobjects.c127
-rw-r--r--lib/decompress.c54
-rw-r--r--lib/decompress_bunzip2.c736
-rw-r--r--lib/decompress_inflate.c168
-rw-r--r--lib/decompress_unlzma.c648
-rw-r--r--lib/dma-debug.c955
-rw-r--r--lib/dynamic_debug.c769
-rw-r--r--lib/dynamic_printk.c414
-rw-r--r--lib/idr.c48
-rw-r--r--lib/kernel_lock.c2
-rw-r--r--lib/kobject.c2
-rw-r--r--lib/kobject_uevent.c12
-rw-r--r--lib/lmb.c42
-rw-r--r--lib/locking-selftest.c4
-rw-r--r--lib/nlattr.c502
-rw-r--r--lib/rbtree.c14
-rw-r--r--lib/swiotlb.c88
-rw-r--r--lib/vsprintf.c1005
-rw-r--r--lib/zlib_inflate/inflate.h4
-rw-r--r--lib/zlib_inflate/inftrees.h4
25 files changed, 4955 insertions, 794 deletions
diff --git a/lib/Kconfig b/lib/Kconfig
index 03c2c24b9083..8ade0a7a91e0 100644
--- a/lib/Kconfig
+++ b/lib/Kconfig
@@ -2,6 +2,9 @@
2# Library configuration 2# Library configuration
3# 3#
4 4
5config BINARY_PRINTF
6 def_bool n
7
5menu "Library routines" 8menu "Library routines"
6 9
7config BITREVERSE 10config BITREVERSE
@@ -98,6 +101,20 @@ config LZO_DECOMPRESS
98 tristate 101 tristate
99 102
100# 103#
104# These all provide a common interface (hence the apparent duplication with
105# ZLIB_INFLATE; DECOMPRESS_GZIP is just a wrapper.)
106#
107config DECOMPRESS_GZIP
108 select ZLIB_INFLATE
109 tristate
110
111config DECOMPRESS_BZIP2
112 tristate
113
114config DECOMPRESS_LZMA
115 tristate
116
117#
101# Generic allocator support is selected if needed 118# Generic allocator support is selected if needed
102# 119#
103config GENERIC_ALLOCATOR 120config GENERIC_ALLOCATOR
@@ -136,12 +153,6 @@ config TEXTSEARCH_BM
136config TEXTSEARCH_FSM 153config TEXTSEARCH_FSM
137 tristate 154 tristate
138 155
139#
140# plist support is select#ed if needed
141#
142config PLIST
143 boolean
144
145config HAS_IOMEM 156config HAS_IOMEM
146 boolean 157 boolean
147 depends on !NO_IOMEM 158 depends on !NO_IOMEM
@@ -174,4 +185,10 @@ config DISABLE_OBSOLETE_CPUMASK_FUNCTIONS
174 bool "Disable obsolete cpumask functions" if DEBUG_PER_CPU_MAPS 185 bool "Disable obsolete cpumask functions" if DEBUG_PER_CPU_MAPS
175 depends on EXPERIMENTAL && BROKEN 186 depends on EXPERIMENTAL && BROKEN
176 187
188#
189# Netlink attribute parsing support is select'ed if needed
190#
191config NLATTR
192 bool
193
177endmenu 194endmenu
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
index 898e07c33c3c..c6e854f215fa 100644
--- a/lib/Kconfig.debug
+++ b/lib/Kconfig.debug
@@ -440,7 +440,7 @@ config LOCKDEP
440 bool 440 bool
441 depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT 441 depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT
442 select STACKTRACE 442 select STACKTRACE
443 select FRAME_POINTER if !X86 && !MIPS && !PPC 443 select FRAME_POINTER if !X86 && !MIPS && !PPC && !ARM_UNWIND
444 select KALLSYMS 444 select KALLSYMS
445 select KALLSYMS_ALL 445 select KALLSYMS_ALL
446 446
@@ -834,6 +834,7 @@ config SYSCTL_SYSCALL_CHECK
834 to properly maintain and use. This enables checks that help 834 to properly maintain and use. This enables checks that help
835 you to keep things correct. 835 you to keep things correct.
836 836
837source mm/Kconfig.debug
837source kernel/trace/Kconfig 838source kernel/trace/Kconfig
838 839
839config PROVIDE_OHCI1394_DMA_INIT 840config PROVIDE_OHCI1394_DMA_INIT
@@ -876,7 +877,7 @@ config FIREWIRE_OHCI_REMOTE_DMA
876 877
877 If unsure, say N. 878 If unsure, say N.
878 879
879menuconfig BUILD_DOCSRC 880config BUILD_DOCSRC
880 bool "Build targets in Documentation/ tree" 881 bool "Build targets in Documentation/ tree"
881 depends on HEADERS_CHECK 882 depends on HEADERS_CHECK
882 help 883 help
@@ -885,60 +886,81 @@ menuconfig BUILD_DOCSRC
885 886
886 Say N if you are unsure. 887 Say N if you are unsure.
887 888
888config DYNAMIC_PRINTK_DEBUG 889config DYNAMIC_DEBUG
889 bool "Enable dynamic printk() call support" 890 bool "Enable dynamic printk() support"
890 default n 891 default n
891 depends on PRINTK 892 depends on PRINTK
893 depends on DEBUG_FS
892 select PRINTK_DEBUG 894 select PRINTK_DEBUG
893 help 895 help
894 896
895 Compiles debug level messages into the kernel, which would not 897 Compiles debug level messages into the kernel, which would not
896 otherwise be available at runtime. These messages can then be 898 otherwise be available at runtime. These messages can then be
897 enabled/disabled on a per module basis. This mechanism implicitly 899 enabled/disabled based on various levels of scope - per source file,
898 enables all pr_debug() and dev_dbg() calls. The impact of this 900 function, module, format string, and line number. This mechanism
899 compile option is a larger kernel text size of about 2%. 901 implicitly enables all pr_debug() and dev_dbg() calls. The impact of
902 this compile option is a larger kernel text size of about 2%.
900 903
901 Usage: 904 Usage:
902 905
903 Dynamic debugging is controlled by the debugfs file, 906 Dynamic debugging is controlled via the 'dynamic_debug/ddebug' file,
904 dynamic_printk/modules. This file contains a list of the modules that 907 which is contained in the 'debugfs' filesystem. Thus, the debugfs
905 can be enabled. The format of the file is the module name, followed 908 filesystem must first be mounted before making use of this feature.
906 by a set of flags that can be enabled. The first flag is always the 909 We refer the control file as: <debugfs>/dynamic_debug/ddebug. This
907 'enabled' flag. For example: 910 file contains a list of the debug statements that can be enabled. The
911 format for each line of the file is:
908 912
909 <module_name> <enabled=0/1> 913 filename:lineno [module]function flags format
910 .
911 .
912 .
913 914
914 <module_name> : Name of the module in which the debug call resides 915 filename : source file of the debug statement
915 <enabled=0/1> : whether the messages are enabled or not 916 lineno : line number of the debug statement
917 module : module that contains the debug statement
918 function : function that contains the debug statement
919 flags : 'p' means the line is turned 'on' for printing
920 format : the format used for the debug statement
916 921
917 From a live system: 922 From a live system:
918 923
919 snd_hda_intel enabled=0 924 nullarbor:~ # cat <debugfs>/dynamic_debug/ddebug
920 fixup enabled=0 925 # filename:lineno [module]function flags format
921 driver enabled=0 926 fs/aio.c:222 [aio]__put_ioctx - "__put_ioctx:\040freeing\040%p\012"
927 fs/aio.c:248 [aio]ioctx_alloc - "ENOMEM:\040nr_events\040too\040high\012"
928 fs/aio.c:1770 [aio]sys_io_cancel - "calling\040cancel\012"
922 929
923 Enable a module: 930 Example usage:
924 931
925 $echo "set enabled=1 <module_name>" > dynamic_printk/modules 932 // enable the message at line 1603 of file svcsock.c
933 nullarbor:~ # echo -n 'file svcsock.c line 1603 +p' >
934 <debugfs>/dynamic_debug/ddebug
926 935
927 Disable a module: 936 // enable all the messages in file svcsock.c
937 nullarbor:~ # echo -n 'file svcsock.c +p' >
938 <debugfs>/dynamic_debug/ddebug
928 939
929 $echo "set enabled=0 <module_name>" > dynamic_printk/modules 940 // enable all the messages in the NFS server module
941 nullarbor:~ # echo -n 'module nfsd +p' >
942 <debugfs>/dynamic_debug/ddebug
930 943
931 Enable all modules: 944 // enable all 12 messages in the function svc_process()
945 nullarbor:~ # echo -n 'func svc_process +p' >
946 <debugfs>/dynamic_debug/ddebug
932 947
933 $echo "set enabled=1 all" > dynamic_printk/modules 948 // disable all 12 messages in the function svc_process()
949 nullarbor:~ # echo -n 'func svc_process -p' >
950 <debugfs>/dynamic_debug/ddebug
934 951
935 Disable all modules: 952 See Documentation/dynamic-debug-howto.txt for additional information.
936 953
937 $echo "set enabled=0 all" > dynamic_printk/modules 954config DMA_API_DEBUG
938 955 bool "Enable debugging of DMA-API usage"
939 Finally, passing "dynamic_printk" at the command line enables 956 depends on HAVE_DMA_API_DEBUG
940 debugging for all modules. This mode can be turned off via the above 957 help
941 disable command. 958 Enable this option to debug the use of the DMA API by device drivers.
959 With this option you will be able to detect common bugs in device
960 drivers like double-freeing of DMA mappings or freeing mappings that
961 were never allocated.
962 This option causes a performance degredation. Use only if you want
963 to debug device drivers. If unsure, say N.
942 964
943source "samples/Kconfig" 965source "samples/Kconfig"
944 966
diff --git a/lib/Makefile b/lib/Makefile
index 32b0e64ded27..d6edd6753f40 100644
--- a/lib/Makefile
+++ b/lib/Makefile
@@ -11,7 +11,8 @@ lib-y := ctype.o string.o vsprintf.o cmdline.o \
11 rbtree.o radix-tree.o dump_stack.o \ 11 rbtree.o radix-tree.o dump_stack.o \
12 idr.o int_sqrt.o extable.o prio_tree.o \ 12 idr.o int_sqrt.o extable.o prio_tree.o \
13 sha1.o irq_regs.o reciprocal_div.o argv_split.o \ 13 sha1.o irq_regs.o reciprocal_div.o argv_split.o \
14 proportions.o prio_heap.o ratelimit.o show_mem.o is_single_threaded.o 14 proportions.o prio_heap.o ratelimit.o show_mem.o \
15 is_single_threaded.o plist.o decompress.o
15 16
16lib-$(CONFIG_MMU) += ioremap.o 17lib-$(CONFIG_MMU) += ioremap.o
17lib-$(CONFIG_SMP) += cpumask.o 18lib-$(CONFIG_SMP) += cpumask.o
@@ -40,7 +41,6 @@ lib-$(CONFIG_GENERIC_FIND_NEXT_BIT) += find_next_bit.o
40lib-$(CONFIG_GENERIC_FIND_LAST_BIT) += find_last_bit.o 41lib-$(CONFIG_GENERIC_FIND_LAST_BIT) += find_last_bit.o
41obj-$(CONFIG_GENERIC_HWEIGHT) += hweight.o 42obj-$(CONFIG_GENERIC_HWEIGHT) += hweight.o
42obj-$(CONFIG_LOCK_KERNEL) += kernel_lock.o 43obj-$(CONFIG_LOCK_KERNEL) += kernel_lock.o
43obj-$(CONFIG_PLIST) += plist.o
44obj-$(CONFIG_DEBUG_PREEMPT) += smp_processor_id.o 44obj-$(CONFIG_DEBUG_PREEMPT) += smp_processor_id.o
45obj-$(CONFIG_DEBUG_LIST) += list_debug.o 45obj-$(CONFIG_DEBUG_LIST) += list_debug.o
46obj-$(CONFIG_DEBUG_OBJECTS) += debugobjects.o 46obj-$(CONFIG_DEBUG_OBJECTS) += debugobjects.o
@@ -65,6 +65,10 @@ obj-$(CONFIG_REED_SOLOMON) += reed_solomon/
65obj-$(CONFIG_LZO_COMPRESS) += lzo/ 65obj-$(CONFIG_LZO_COMPRESS) += lzo/
66obj-$(CONFIG_LZO_DECOMPRESS) += lzo/ 66obj-$(CONFIG_LZO_DECOMPRESS) += lzo/
67 67
68lib-$(CONFIG_DECOMPRESS_GZIP) += decompress_inflate.o
69lib-$(CONFIG_DECOMPRESS_BZIP2) += decompress_bunzip2.o
70lib-$(CONFIG_DECOMPRESS_LZMA) += decompress_unlzma.o
71
68obj-$(CONFIG_TEXTSEARCH) += textsearch.o 72obj-$(CONFIG_TEXTSEARCH) += textsearch.o
69obj-$(CONFIG_TEXTSEARCH_KMP) += ts_kmp.o 73obj-$(CONFIG_TEXTSEARCH_KMP) += ts_kmp.o
70obj-$(CONFIG_TEXTSEARCH_BM) += ts_bm.o 74obj-$(CONFIG_TEXTSEARCH_BM) += ts_bm.o
@@ -82,7 +86,11 @@ obj-$(CONFIG_HAVE_LMB) += lmb.o
82 86
83obj-$(CONFIG_HAVE_ARCH_TRACEHOOK) += syscall.o 87obj-$(CONFIG_HAVE_ARCH_TRACEHOOK) += syscall.o
84 88
85obj-$(CONFIG_DYNAMIC_PRINTK_DEBUG) += dynamic_printk.o 89obj-$(CONFIG_DYNAMIC_DEBUG) += dynamic_debug.o
90
91obj-$(CONFIG_NLATTR) += nlattr.o
92
93obj-$(CONFIG_DMA_API_DEBUG) += dma-debug.o
86 94
87hostprogs-y := gen_crc32table 95hostprogs-y := gen_crc32table
88clean-files := crc32table.h 96clean-files := crc32table.h
diff --git a/lib/bitmap.c b/lib/bitmap.c
index 1338469ac849..35a1f7ff4149 100644
--- a/lib/bitmap.c
+++ b/lib/bitmap.c
@@ -948,15 +948,15 @@ done:
948 */ 948 */
949int bitmap_find_free_region(unsigned long *bitmap, int bits, int order) 949int bitmap_find_free_region(unsigned long *bitmap, int bits, int order)
950{ 950{
951 int pos; /* scans bitmap by regions of size order */ 951 int pos, end; /* scans bitmap by regions of size order */
952 952
953 for (pos = 0; pos < bits; pos += (1 << order)) 953 for (pos = 0 ; (end = pos + (1 << order)) <= bits; pos = end) {
954 if (__reg_op(bitmap, pos, order, REG_OP_ISFREE)) 954 if (!__reg_op(bitmap, pos, order, REG_OP_ISFREE))
955 break; 955 continue;
956 if (pos == bits) 956 __reg_op(bitmap, pos, order, REG_OP_ALLOC);
957 return -ENOMEM; 957 return pos;
958 __reg_op(bitmap, pos, order, REG_OP_ALLOC); 958 }
959 return pos; 959 return -ENOMEM;
960} 960}
961EXPORT_SYMBOL(bitmap_find_free_region); 961EXPORT_SYMBOL(bitmap_find_free_region);
962 962
diff --git a/lib/cpumask.c b/lib/cpumask.c
index 3389e2440da0..1f71b97de0f9 100644
--- a/lib/cpumask.c
+++ b/lib/cpumask.c
@@ -109,10 +109,10 @@ bool alloc_cpumask_var_node(cpumask_var_t *mask, gfp_t flags, int node)
109#endif 109#endif
110 /* FIXME: Bandaid to save us from old primitives which go to NR_CPUS. */ 110 /* FIXME: Bandaid to save us from old primitives which go to NR_CPUS. */
111 if (*mask) { 111 if (*mask) {
112 unsigned char *ptr = (unsigned char *)cpumask_bits(*mask);
112 unsigned int tail; 113 unsigned int tail;
113 tail = BITS_TO_LONGS(NR_CPUS - nr_cpumask_bits) * sizeof(long); 114 tail = BITS_TO_LONGS(NR_CPUS - nr_cpumask_bits) * sizeof(long);
114 memset(cpumask_bits(*mask) + cpumask_size() - tail, 115 memset(ptr + cpumask_size() - tail, 0, tail);
115 0, tail);
116 } 116 }
117 117
118 return *mask != NULL; 118 return *mask != NULL;
diff --git a/lib/debugobjects.c b/lib/debugobjects.c
index 5d99be1fd988..2755a3bd16a1 100644
--- a/lib/debugobjects.c
+++ b/lib/debugobjects.c
@@ -30,7 +30,7 @@ struct debug_bucket {
30 30
31static struct debug_bucket obj_hash[ODEBUG_HASH_SIZE]; 31static struct debug_bucket obj_hash[ODEBUG_HASH_SIZE];
32 32
33static struct debug_obj obj_static_pool[ODEBUG_POOL_SIZE]; 33static struct debug_obj obj_static_pool[ODEBUG_POOL_SIZE] __initdata;
34 34
35static DEFINE_SPINLOCK(pool_lock); 35static DEFINE_SPINLOCK(pool_lock);
36 36
@@ -50,12 +50,23 @@ static int debug_objects_enabled __read_mostly
50 50
51static struct debug_obj_descr *descr_test __read_mostly; 51static struct debug_obj_descr *descr_test __read_mostly;
52 52
53static void free_obj_work(struct work_struct *work);
54static DECLARE_WORK(debug_obj_work, free_obj_work);
55
53static int __init enable_object_debug(char *str) 56static int __init enable_object_debug(char *str)
54{ 57{
55 debug_objects_enabled = 1; 58 debug_objects_enabled = 1;
56 return 0; 59 return 0;
57} 60}
61
62static int __init disable_object_debug(char *str)
63{
64 debug_objects_enabled = 0;
65 return 0;
66}
67
58early_param("debug_objects", enable_object_debug); 68early_param("debug_objects", enable_object_debug);
69early_param("no_debug_objects", disable_object_debug);
59 70
60static const char *obj_states[ODEBUG_STATE_MAX] = { 71static const char *obj_states[ODEBUG_STATE_MAX] = {
61 [ODEBUG_STATE_NONE] = "none", 72 [ODEBUG_STATE_NONE] = "none",
@@ -146,25 +157,51 @@ alloc_object(void *addr, struct debug_bucket *b, struct debug_obj_descr *descr)
146} 157}
147 158
148/* 159/*
149 * Put the object back into the pool or give it back to kmem_cache: 160 * workqueue function to free objects.
150 */ 161 */
151static void free_object(struct debug_obj *obj) 162static void free_obj_work(struct work_struct *work)
152{ 163{
153 unsigned long idx = (unsigned long)(obj - obj_static_pool); 164 struct debug_obj *obj;
154 unsigned long flags; 165 unsigned long flags;
155 166
156 if (obj_pool_free < ODEBUG_POOL_SIZE || idx < ODEBUG_POOL_SIZE) { 167 spin_lock_irqsave(&pool_lock, flags);
157 spin_lock_irqsave(&pool_lock, flags); 168 while (obj_pool_free > ODEBUG_POOL_SIZE) {
158 hlist_add_head(&obj->node, &obj_pool); 169 obj = hlist_entry(obj_pool.first, typeof(*obj), node);
159 obj_pool_free++; 170 hlist_del(&obj->node);
160 obj_pool_used--; 171 obj_pool_free--;
161 spin_unlock_irqrestore(&pool_lock, flags); 172 /*
162 } else { 173 * We release pool_lock across kmem_cache_free() to
163 spin_lock_irqsave(&pool_lock, flags); 174 * avoid contention on pool_lock.
164 obj_pool_used--; 175 */
165 spin_unlock_irqrestore(&pool_lock, flags); 176 spin_unlock_irqrestore(&pool_lock, flags);
166 kmem_cache_free(obj_cache, obj); 177 kmem_cache_free(obj_cache, obj);
178 spin_lock_irqsave(&pool_lock, flags);
167 } 179 }
180 spin_unlock_irqrestore(&pool_lock, flags);
181}
182
183/*
184 * Put the object back into the pool and schedule work to free objects
185 * if necessary.
186 */
187static void free_object(struct debug_obj *obj)
188{
189 unsigned long flags;
190 int sched = 0;
191
192 spin_lock_irqsave(&pool_lock, flags);
193 /*
194 * schedule work when the pool is filled and the cache is
195 * initialized:
196 */
197 if (obj_pool_free > ODEBUG_POOL_SIZE && obj_cache)
198 sched = !work_pending(&debug_obj_work);
199 hlist_add_head(&obj->node, &obj_pool);
200 obj_pool_free++;
201 obj_pool_used--;
202 spin_unlock_irqrestore(&pool_lock, flags);
203 if (sched)
204 schedule_work(&debug_obj_work);
168} 205}
169 206
170/* 207/*
@@ -876,6 +913,63 @@ void __init debug_objects_early_init(void)
876} 913}
877 914
878/* 915/*
916 * Convert the statically allocated objects to dynamic ones:
917 */
918static int debug_objects_replace_static_objects(void)
919{
920 struct debug_bucket *db = obj_hash;
921 struct hlist_node *node, *tmp;
922 struct debug_obj *obj, *new;
923 HLIST_HEAD(objects);
924 int i, cnt = 0;
925
926 for (i = 0; i < ODEBUG_POOL_SIZE; i++) {
927 obj = kmem_cache_zalloc(obj_cache, GFP_KERNEL);
928 if (!obj)
929 goto free;
930 hlist_add_head(&obj->node, &objects);
931 }
932
933 /*
934 * When debug_objects_mem_init() is called we know that only
935 * one CPU is up, so disabling interrupts is enough
936 * protection. This avoids the lockdep hell of lock ordering.
937 */
938 local_irq_disable();
939
940 /* Remove the statically allocated objects from the pool */
941 hlist_for_each_entry_safe(obj, node, tmp, &obj_pool, node)
942 hlist_del(&obj->node);
943 /* Move the allocated objects to the pool */
944 hlist_move_list(&objects, &obj_pool);
945
946 /* Replace the active object references */
947 for (i = 0; i < ODEBUG_HASH_SIZE; i++, db++) {
948 hlist_move_list(&db->list, &objects);
949
950 hlist_for_each_entry(obj, node, &objects, node) {
951 new = hlist_entry(obj_pool.first, typeof(*obj), node);
952 hlist_del(&new->node);
953 /* copy object data */
954 *new = *obj;
955 hlist_add_head(&new->node, &db->list);
956 cnt++;
957 }
958 }
959
960 printk(KERN_DEBUG "ODEBUG: %d of %d active objects replaced\n", cnt,
961 obj_pool_used);
962 local_irq_enable();
963 return 0;
964free:
965 hlist_for_each_entry_safe(obj, node, tmp, &objects, node) {
966 hlist_del(&obj->node);
967 kmem_cache_free(obj_cache, obj);
968 }
969 return -ENOMEM;
970}
971
972/*
879 * Called after the kmem_caches are functional to setup a dedicated 973 * Called after the kmem_caches are functional to setup a dedicated
880 * cache pool, which has the SLAB_DEBUG_OBJECTS flag set. This flag 974 * cache pool, which has the SLAB_DEBUG_OBJECTS flag set. This flag
881 * prevents that the debug code is called on kmem_cache_free() for the 975 * prevents that the debug code is called on kmem_cache_free() for the
@@ -890,8 +984,11 @@ void __init debug_objects_mem_init(void)
890 sizeof (struct debug_obj), 0, 984 sizeof (struct debug_obj), 0,
891 SLAB_DEBUG_OBJECTS, NULL); 985 SLAB_DEBUG_OBJECTS, NULL);
892 986
893 if (!obj_cache) 987 if (!obj_cache || debug_objects_replace_static_objects()) {
894 debug_objects_enabled = 0; 988 debug_objects_enabled = 0;
895 else 989 if (obj_cache)
990 kmem_cache_destroy(obj_cache);
991 printk(KERN_WARNING "ODEBUG: out of memory.\n");
992 } else
896 debug_objects_selftest(); 993 debug_objects_selftest();
897} 994}
diff --git a/lib/decompress.c b/lib/decompress.c
new file mode 100644
index 000000000000..d2842f571674
--- /dev/null
+++ b/lib/decompress.c
@@ -0,0 +1,54 @@
1/*
2 * decompress.c
3 *
4 * Detect the decompression method based on magic number
5 */
6
7#include <linux/decompress/generic.h>
8
9#include <linux/decompress/bunzip2.h>
10#include <linux/decompress/unlzma.h>
11#include <linux/decompress/inflate.h>
12
13#include <linux/types.h>
14#include <linux/string.h>
15
16#ifndef CONFIG_DECOMPRESS_GZIP
17# define gunzip NULL
18#endif
19#ifndef CONFIG_DECOMPRESS_BZIP2
20# define bunzip2 NULL
21#endif
22#ifndef CONFIG_DECOMPRESS_LZMA
23# define unlzma NULL
24#endif
25
26static const struct compress_format {
27 unsigned char magic[2];
28 const char *name;
29 decompress_fn decompressor;
30} compressed_formats[] = {
31 { {037, 0213}, "gzip", gunzip },
32 { {037, 0236}, "gzip", gunzip },
33 { {0x42, 0x5a}, "bzip2", bunzip2 },
34 { {0x5d, 0x00}, "lzma", unlzma },
35 { {0, 0}, NULL, NULL }
36};
37
38decompress_fn decompress_method(const unsigned char *inbuf, int len,
39 const char **name)
40{
41 const struct compress_format *cf;
42
43 if (len < 2)
44 return NULL; /* Need at least this much... */
45
46 for (cf = compressed_formats; cf->name; cf++) {
47 if (!memcmp(inbuf, cf->magic, 2))
48 break;
49
50 }
51 if (name)
52 *name = cf->name;
53 return cf->decompressor;
54}
diff --git a/lib/decompress_bunzip2.c b/lib/decompress_bunzip2.c
new file mode 100644
index 000000000000..708e2a86d87b
--- /dev/null
+++ b/lib/decompress_bunzip2.c
@@ -0,0 +1,736 @@
1/* vi: set sw = 4 ts = 4: */
2/* Small bzip2 deflate implementation, by Rob Landley (rob@landley.net).
3
4 Based on bzip2 decompression code by Julian R Seward (jseward@acm.org),
5 which also acknowledges contributions by Mike Burrows, David Wheeler,
6 Peter Fenwick, Alistair Moffat, Radford Neal, Ian H. Witten,
7 Robert Sedgewick, and Jon L. Bentley.
8
9 This code is licensed under the LGPLv2:
10 LGPL (http://www.gnu.org/copyleft/lgpl.html
11*/
12
13/*
14 Size and speed optimizations by Manuel Novoa III (mjn3@codepoet.org).
15
16 More efficient reading of Huffman codes, a streamlined read_bunzip()
17 function, and various other tweaks. In (limited) tests, approximately
18 20% faster than bzcat on x86 and about 10% faster on arm.
19
20 Note that about 2/3 of the time is spent in read_unzip() reversing
21 the Burrows-Wheeler transformation. Much of that time is delay
22 resulting from cache misses.
23
24 I would ask that anyone benefiting from this work, especially those
25 using it in commercial products, consider making a donation to my local
26 non-profit hospice organization in the name of the woman I loved, who
27 passed away Feb. 12, 2003.
28
29 In memory of Toni W. Hagan
30
31 Hospice of Acadiana, Inc.
32 2600 Johnston St., Suite 200
33 Lafayette, LA 70503-3240
34
35 Phone (337) 232-1234 or 1-800-738-2226
36 Fax (337) 232-1297
37
38 http://www.hospiceacadiana.com/
39
40 Manuel
41 */
42
43/*
44 Made it fit for running in Linux Kernel by Alain Knaff (alain@knaff.lu)
45*/
46
47
48#ifndef STATIC
49#include <linux/decompress/bunzip2.h>
50#endif /* !STATIC */
51
52#include <linux/decompress/mm.h>
53#include <linux/slab.h>
54
55#ifndef INT_MAX
56#define INT_MAX 0x7fffffff
57#endif
58
59/* Constants for Huffman coding */
60#define MAX_GROUPS 6
61#define GROUP_SIZE 50 /* 64 would have been more efficient */
62#define MAX_HUFCODE_BITS 20 /* Longest Huffman code allowed */
63#define MAX_SYMBOLS 258 /* 256 literals + RUNA + RUNB */
64#define SYMBOL_RUNA 0
65#define SYMBOL_RUNB 1
66
67/* Status return values */
68#define RETVAL_OK 0
69#define RETVAL_LAST_BLOCK (-1)
70#define RETVAL_NOT_BZIP_DATA (-2)
71#define RETVAL_UNEXPECTED_INPUT_EOF (-3)
72#define RETVAL_UNEXPECTED_OUTPUT_EOF (-4)
73#define RETVAL_DATA_ERROR (-5)
74#define RETVAL_OUT_OF_MEMORY (-6)
75#define RETVAL_OBSOLETE_INPUT (-7)
76
77/* Other housekeeping constants */
78#define BZIP2_IOBUF_SIZE 4096
79
80/* This is what we know about each Huffman coding group */
81struct group_data {
82 /* We have an extra slot at the end of limit[] for a sentinal value. */
83 int limit[MAX_HUFCODE_BITS+1];
84 int base[MAX_HUFCODE_BITS];
85 int permute[MAX_SYMBOLS];
86 int minLen, maxLen;
87};
88
89/* Structure holding all the housekeeping data, including IO buffers and
90 memory that persists between calls to bunzip */
91struct bunzip_data {
92 /* State for interrupting output loop */
93 int writeCopies, writePos, writeRunCountdown, writeCount, writeCurrent;
94 /* I/O tracking data (file handles, buffers, positions, etc.) */
95 int (*fill)(void*, unsigned int);
96 int inbufCount, inbufPos /*, outbufPos*/;
97 unsigned char *inbuf /*,*outbuf*/;
98 unsigned int inbufBitCount, inbufBits;
99 /* The CRC values stored in the block header and calculated from the
100 data */
101 unsigned int crc32Table[256], headerCRC, totalCRC, writeCRC;
102 /* Intermediate buffer and its size (in bytes) */
103 unsigned int *dbuf, dbufSize;
104 /* These things are a bit too big to go on the stack */
105 unsigned char selectors[32768]; /* nSelectors = 15 bits */
106 struct group_data groups[MAX_GROUPS]; /* Huffman coding tables */
107 int io_error; /* non-zero if we have IO error */
108};
109
110
111/* Return the next nnn bits of input. All reads from the compressed input
112 are done through this function. All reads are big endian */
113static unsigned int INIT get_bits(struct bunzip_data *bd, char bits_wanted)
114{
115 unsigned int bits = 0;
116
117 /* If we need to get more data from the byte buffer, do so.
118 (Loop getting one byte at a time to enforce endianness and avoid
119 unaligned access.) */
120 while (bd->inbufBitCount < bits_wanted) {
121 /* If we need to read more data from file into byte buffer, do
122 so */
123 if (bd->inbufPos == bd->inbufCount) {
124 if (bd->io_error)
125 return 0;
126 bd->inbufCount = bd->fill(bd->inbuf, BZIP2_IOBUF_SIZE);
127 if (bd->inbufCount <= 0) {
128 bd->io_error = RETVAL_UNEXPECTED_INPUT_EOF;
129 return 0;
130 }
131 bd->inbufPos = 0;
132 }
133 /* Avoid 32-bit overflow (dump bit buffer to top of output) */
134 if (bd->inbufBitCount >= 24) {
135 bits = bd->inbufBits&((1 << bd->inbufBitCount)-1);
136 bits_wanted -= bd->inbufBitCount;
137 bits <<= bits_wanted;
138 bd->inbufBitCount = 0;
139 }
140 /* Grab next 8 bits of input from buffer. */
141 bd->inbufBits = (bd->inbufBits << 8)|bd->inbuf[bd->inbufPos++];
142 bd->inbufBitCount += 8;
143 }
144 /* Calculate result */
145 bd->inbufBitCount -= bits_wanted;
146 bits |= (bd->inbufBits >> bd->inbufBitCount)&((1 << bits_wanted)-1);
147
148 return bits;
149}
150
151/* Unpacks the next block and sets up for the inverse burrows-wheeler step. */
152
153static int INIT get_next_block(struct bunzip_data *bd)
154{
155 struct group_data *hufGroup = NULL;
156 int *base = NULL;
157 int *limit = NULL;
158 int dbufCount, nextSym, dbufSize, groupCount, selector,
159 i, j, k, t, runPos, symCount, symTotal, nSelectors,
160 byteCount[256];
161 unsigned char uc, symToByte[256], mtfSymbol[256], *selectors;
162 unsigned int *dbuf, origPtr;
163
164 dbuf = bd->dbuf;
165 dbufSize = bd->dbufSize;
166 selectors = bd->selectors;
167
168 /* Read in header signature and CRC, then validate signature.
169 (last block signature means CRC is for whole file, return now) */
170 i = get_bits(bd, 24);
171 j = get_bits(bd, 24);
172 bd->headerCRC = get_bits(bd, 32);
173 if ((i == 0x177245) && (j == 0x385090))
174 return RETVAL_LAST_BLOCK;
175 if ((i != 0x314159) || (j != 0x265359))
176 return RETVAL_NOT_BZIP_DATA;
177 /* We can add support for blockRandomised if anybody complains.
178 There was some code for this in busybox 1.0.0-pre3, but nobody ever
179 noticed that it didn't actually work. */
180 if (get_bits(bd, 1))
181 return RETVAL_OBSOLETE_INPUT;
182 origPtr = get_bits(bd, 24);
183 if (origPtr > dbufSize)
184 return RETVAL_DATA_ERROR;
185 /* mapping table: if some byte values are never used (encoding things
186 like ascii text), the compression code removes the gaps to have fewer
187 symbols to deal with, and writes a sparse bitfield indicating which
188 values were present. We make a translation table to convert the
189 symbols back to the corresponding bytes. */
190 t = get_bits(bd, 16);
191 symTotal = 0;
192 for (i = 0; i < 16; i++) {
193 if (t&(1 << (15-i))) {
194 k = get_bits(bd, 16);
195 for (j = 0; j < 16; j++)
196 if (k&(1 << (15-j)))
197 symToByte[symTotal++] = (16*i)+j;
198 }
199 }
200 /* How many different Huffman coding groups does this block use? */
201 groupCount = get_bits(bd, 3);
202 if (groupCount < 2 || groupCount > MAX_GROUPS)
203 return RETVAL_DATA_ERROR;
204 /* nSelectors: Every GROUP_SIZE many symbols we select a new
205 Huffman coding group. Read in the group selector list,
206 which is stored as MTF encoded bit runs. (MTF = Move To
207 Front, as each value is used it's moved to the start of the
208 list.) */
209 nSelectors = get_bits(bd, 15);
210 if (!nSelectors)
211 return RETVAL_DATA_ERROR;
212 for (i = 0; i < groupCount; i++)
213 mtfSymbol[i] = i;
214 for (i = 0; i < nSelectors; i++) {
215 /* Get next value */
216 for (j = 0; get_bits(bd, 1); j++)
217 if (j >= groupCount)
218 return RETVAL_DATA_ERROR;
219 /* Decode MTF to get the next selector */
220 uc = mtfSymbol[j];
221 for (; j; j--)
222 mtfSymbol[j] = mtfSymbol[j-1];
223 mtfSymbol[0] = selectors[i] = uc;
224 }
225 /* Read the Huffman coding tables for each group, which code
226 for symTotal literal symbols, plus two run symbols (RUNA,
227 RUNB) */
228 symCount = symTotal+2;
229 for (j = 0; j < groupCount; j++) {
230 unsigned char length[MAX_SYMBOLS], temp[MAX_HUFCODE_BITS+1];
231 int minLen, maxLen, pp;
232 /* Read Huffman code lengths for each symbol. They're
233 stored in a way similar to mtf; record a starting
234 value for the first symbol, and an offset from the
235 previous value for everys symbol after that.
236 (Subtracting 1 before the loop and then adding it
237 back at the end is an optimization that makes the
238 test inside the loop simpler: symbol length 0
239 becomes negative, so an unsigned inequality catches
240 it.) */
241 t = get_bits(bd, 5)-1;
242 for (i = 0; i < symCount; i++) {
243 for (;;) {
244 if (((unsigned)t) > (MAX_HUFCODE_BITS-1))
245 return RETVAL_DATA_ERROR;
246
247 /* If first bit is 0, stop. Else
248 second bit indicates whether to
249 increment or decrement the value.
250 Optimization: grab 2 bits and unget
251 the second if the first was 0. */
252
253 k = get_bits(bd, 2);
254 if (k < 2) {
255 bd->inbufBitCount++;
256 break;
257 }
258 /* Add one if second bit 1, else
259 * subtract 1. Avoids if/else */
260 t += (((k+1)&2)-1);
261 }
262 /* Correct for the initial -1, to get the
263 * final symbol length */
264 length[i] = t+1;
265 }
266 /* Find largest and smallest lengths in this group */
267 minLen = maxLen = length[0];
268
269 for (i = 1; i < symCount; i++) {
270 if (length[i] > maxLen)
271 maxLen = length[i];
272 else if (length[i] < minLen)
273 minLen = length[i];
274 }
275
276 /* Calculate permute[], base[], and limit[] tables from
277 * length[].
278 *
279 * permute[] is the lookup table for converting
280 * Huffman coded symbols into decoded symbols. base[]
281 * is the amount to subtract from the value of a
282 * Huffman symbol of a given length when using
283 * permute[].
284 *
285 * limit[] indicates the largest numerical value a
286 * symbol with a given number of bits can have. This
287 * is how the Huffman codes can vary in length: each
288 * code with a value > limit[length] needs another
289 * bit.
290 */
291 hufGroup = bd->groups+j;
292 hufGroup->minLen = minLen;
293 hufGroup->maxLen = maxLen;
294 /* Note that minLen can't be smaller than 1, so we
295 adjust the base and limit array pointers so we're
296 not always wasting the first entry. We do this
297 again when using them (during symbol decoding).*/
298 base = hufGroup->base-1;
299 limit = hufGroup->limit-1;
300 /* Calculate permute[]. Concurently, initialize
301 * temp[] and limit[]. */
302 pp = 0;
303 for (i = minLen; i <= maxLen; i++) {
304 temp[i] = limit[i] = 0;
305 for (t = 0; t < symCount; t++)
306 if (length[t] == i)
307 hufGroup->permute[pp++] = t;
308 }
309 /* Count symbols coded for at each bit length */
310 for (i = 0; i < symCount; i++)
311 temp[length[i]]++;
312 /* Calculate limit[] (the largest symbol-coding value
313 *at each bit length, which is (previous limit <<
314 *1)+symbols at this level), and base[] (number of
315 *symbols to ignore at each bit length, which is limit
316 *minus the cumulative count of symbols coded for
317 *already). */
318 pp = t = 0;
319 for (i = minLen; i < maxLen; i++) {
320 pp += temp[i];
321 /* We read the largest possible symbol size
322 and then unget bits after determining how
323 many we need, and those extra bits could be
324 set to anything. (They're noise from
325 future symbols.) At each level we're
326 really only interested in the first few
327 bits, so here we set all the trailing
328 to-be-ignored bits to 1 so they don't
329 affect the value > limit[length]
330 comparison. */
331 limit[i] = (pp << (maxLen - i)) - 1;
332 pp <<= 1;
333 base[i+1] = pp-(t += temp[i]);
334 }
335 limit[maxLen+1] = INT_MAX; /* Sentinal value for
336 * reading next sym. */
337 limit[maxLen] = pp+temp[maxLen]-1;
338 base[minLen] = 0;
339 }
340 /* We've finished reading and digesting the block header. Now
341 read this block's Huffman coded symbols from the file and
342 undo the Huffman coding and run length encoding, saving the
343 result into dbuf[dbufCount++] = uc */
344
345 /* Initialize symbol occurrence counters and symbol Move To
346 * Front table */
347 for (i = 0; i < 256; i++) {
348 byteCount[i] = 0;
349 mtfSymbol[i] = (unsigned char)i;
350 }
351 /* Loop through compressed symbols. */
352 runPos = dbufCount = symCount = selector = 0;
353 for (;;) {
354 /* Determine which Huffman coding group to use. */
355 if (!(symCount--)) {
356 symCount = GROUP_SIZE-1;
357 if (selector >= nSelectors)
358 return RETVAL_DATA_ERROR;
359 hufGroup = bd->groups+selectors[selector++];
360 base = hufGroup->base-1;
361 limit = hufGroup->limit-1;
362 }
363 /* Read next Huffman-coded symbol. */
364 /* Note: It is far cheaper to read maxLen bits and
365 back up than it is to read minLen bits and then an
366 additional bit at a time, testing as we go.
367 Because there is a trailing last block (with file
368 CRC), there is no danger of the overread causing an
369 unexpected EOF for a valid compressed file. As a
370 further optimization, we do the read inline
371 (falling back to a call to get_bits if the buffer
372 runs dry). The following (up to got_huff_bits:) is
373 equivalent to j = get_bits(bd, hufGroup->maxLen);
374 */
375 while (bd->inbufBitCount < hufGroup->maxLen) {
376 if (bd->inbufPos == bd->inbufCount) {
377 j = get_bits(bd, hufGroup->maxLen);
378 goto got_huff_bits;
379 }
380 bd->inbufBits =
381 (bd->inbufBits << 8)|bd->inbuf[bd->inbufPos++];
382 bd->inbufBitCount += 8;
383 };
384 bd->inbufBitCount -= hufGroup->maxLen;
385 j = (bd->inbufBits >> bd->inbufBitCount)&
386 ((1 << hufGroup->maxLen)-1);
387got_huff_bits:
388 /* Figure how how many bits are in next symbol and
389 * unget extras */
390 i = hufGroup->minLen;
391 while (j > limit[i])
392 ++i;
393 bd->inbufBitCount += (hufGroup->maxLen - i);
394 /* Huffman decode value to get nextSym (with bounds checking) */
395 if ((i > hufGroup->maxLen)
396 || (((unsigned)(j = (j>>(hufGroup->maxLen-i))-base[i]))
397 >= MAX_SYMBOLS))
398 return RETVAL_DATA_ERROR;
399 nextSym = hufGroup->permute[j];
400 /* We have now decoded the symbol, which indicates
401 either a new literal byte, or a repeated run of the
402 most recent literal byte. First, check if nextSym
403 indicates a repeated run, and if so loop collecting
404 how many times to repeat the last literal. */
405 if (((unsigned)nextSym) <= SYMBOL_RUNB) { /* RUNA or RUNB */
406 /* If this is the start of a new run, zero out
407 * counter */
408 if (!runPos) {
409 runPos = 1;
410 t = 0;
411 }
412 /* Neat trick that saves 1 symbol: instead of
413 or-ing 0 or 1 at each bit position, add 1
414 or 2 instead. For example, 1011 is 1 << 0
415 + 1 << 1 + 2 << 2. 1010 is 2 << 0 + 2 << 1
416 + 1 << 2. You can make any bit pattern
417 that way using 1 less symbol than the basic
418 or 0/1 method (except all bits 0, which
419 would use no symbols, but a run of length 0
420 doesn't mean anything in this context).
421 Thus space is saved. */
422 t += (runPos << nextSym);
423 /* +runPos if RUNA; +2*runPos if RUNB */
424
425 runPos <<= 1;
426 continue;
427 }
428 /* When we hit the first non-run symbol after a run,
429 we now know how many times to repeat the last
430 literal, so append that many copies to our buffer
431 of decoded symbols (dbuf) now. (The last literal
432 used is the one at the head of the mtfSymbol
433 array.) */
434 if (runPos) {
435 runPos = 0;
436 if (dbufCount+t >= dbufSize)
437 return RETVAL_DATA_ERROR;
438
439 uc = symToByte[mtfSymbol[0]];
440 byteCount[uc] += t;
441 while (t--)
442 dbuf[dbufCount++] = uc;
443 }
444 /* Is this the terminating symbol? */
445 if (nextSym > symTotal)
446 break;
447 /* At this point, nextSym indicates a new literal
448 character. Subtract one to get the position in the
449 MTF array at which this literal is currently to be
450 found. (Note that the result can't be -1 or 0,
451 because 0 and 1 are RUNA and RUNB. But another
452 instance of the first symbol in the mtf array,
453 position 0, would have been handled as part of a
454 run above. Therefore 1 unused mtf position minus 2
455 non-literal nextSym values equals -1.) */
456 if (dbufCount >= dbufSize)
457 return RETVAL_DATA_ERROR;
458 i = nextSym - 1;
459 uc = mtfSymbol[i];
460 /* Adjust the MTF array. Since we typically expect to
461 *move only a small number of symbols, and are bound
462 *by 256 in any case, using memmove here would
463 *typically be bigger and slower due to function call
464 *overhead and other assorted setup costs. */
465 do {
466 mtfSymbol[i] = mtfSymbol[i-1];
467 } while (--i);
468 mtfSymbol[0] = uc;
469 uc = symToByte[uc];
470 /* We have our literal byte. Save it into dbuf. */
471 byteCount[uc]++;
472 dbuf[dbufCount++] = (unsigned int)uc;
473 }
474 /* At this point, we've read all the Huffman-coded symbols
475 (and repeated runs) for this block from the input stream,
476 and decoded them into the intermediate buffer. There are
477 dbufCount many decoded bytes in dbuf[]. Now undo the
478 Burrows-Wheeler transform on dbuf. See
479 http://dogma.net/markn/articles/bwt/bwt.htm
480 */
481 /* Turn byteCount into cumulative occurrence counts of 0 to n-1. */
482 j = 0;
483 for (i = 0; i < 256; i++) {
484 k = j+byteCount[i];
485 byteCount[i] = j;
486 j = k;
487 }
488 /* Figure out what order dbuf would be in if we sorted it. */
489 for (i = 0; i < dbufCount; i++) {
490 uc = (unsigned char)(dbuf[i] & 0xff);
491 dbuf[byteCount[uc]] |= (i << 8);
492 byteCount[uc]++;
493 }
494 /* Decode first byte by hand to initialize "previous" byte.
495 Note that it doesn't get output, and if the first three
496 characters are identical it doesn't qualify as a run (hence
497 writeRunCountdown = 5). */
498 if (dbufCount) {
499 if (origPtr >= dbufCount)
500 return RETVAL_DATA_ERROR;
501 bd->writePos = dbuf[origPtr];
502 bd->writeCurrent = (unsigned char)(bd->writePos&0xff);
503 bd->writePos >>= 8;
504 bd->writeRunCountdown = 5;
505 }
506 bd->writeCount = dbufCount;
507
508 return RETVAL_OK;
509}
510
511/* Undo burrows-wheeler transform on intermediate buffer to produce output.
512 If start_bunzip was initialized with out_fd =-1, then up to len bytes of
513 data are written to outbuf. Return value is number of bytes written or
514 error (all errors are negative numbers). If out_fd!=-1, outbuf and len
515 are ignored, data is written to out_fd and return is RETVAL_OK or error.
516*/
517
518static int INIT read_bunzip(struct bunzip_data *bd, char *outbuf, int len)
519{
520 const unsigned int *dbuf;
521 int pos, xcurrent, previous, gotcount;
522
523 /* If last read was short due to end of file, return last block now */
524 if (bd->writeCount < 0)
525 return bd->writeCount;
526
527 gotcount = 0;
528 dbuf = bd->dbuf;
529 pos = bd->writePos;
530 xcurrent = bd->writeCurrent;
531
532 /* We will always have pending decoded data to write into the output
533 buffer unless this is the very first call (in which case we haven't
534 Huffman-decoded a block into the intermediate buffer yet). */
535
536 if (bd->writeCopies) {
537 /* Inside the loop, writeCopies means extra copies (beyond 1) */
538 --bd->writeCopies;
539 /* Loop outputting bytes */
540 for (;;) {
541 /* If the output buffer is full, snapshot
542 * state and return */
543 if (gotcount >= len) {
544 bd->writePos = pos;
545 bd->writeCurrent = xcurrent;
546 bd->writeCopies++;
547 return len;
548 }
549 /* Write next byte into output buffer, updating CRC */
550 outbuf[gotcount++] = xcurrent;
551 bd->writeCRC = (((bd->writeCRC) << 8)
552 ^bd->crc32Table[((bd->writeCRC) >> 24)
553 ^xcurrent]);
554 /* Loop now if we're outputting multiple
555 * copies of this byte */
556 if (bd->writeCopies) {
557 --bd->writeCopies;
558 continue;
559 }
560decode_next_byte:
561 if (!bd->writeCount--)
562 break;
563 /* Follow sequence vector to undo
564 * Burrows-Wheeler transform */
565 previous = xcurrent;
566 pos = dbuf[pos];
567 xcurrent = pos&0xff;
568 pos >>= 8;
569 /* After 3 consecutive copies of the same
570 byte, the 4th is a repeat count. We count
571 down from 4 instead *of counting up because
572 testing for non-zero is faster */
573 if (--bd->writeRunCountdown) {
574 if (xcurrent != previous)
575 bd->writeRunCountdown = 4;
576 } else {
577 /* We have a repeated run, this byte
578 * indicates the count */
579 bd->writeCopies = xcurrent;
580 xcurrent = previous;
581 bd->writeRunCountdown = 5;
582 /* Sometimes there are just 3 bytes
583 * (run length 0) */
584 if (!bd->writeCopies)
585 goto decode_next_byte;
586 /* Subtract the 1 copy we'd output
587 * anyway to get extras */
588 --bd->writeCopies;
589 }
590 }
591 /* Decompression of this block completed successfully */
592 bd->writeCRC = ~bd->writeCRC;
593 bd->totalCRC = ((bd->totalCRC << 1) |
594 (bd->totalCRC >> 31)) ^ bd->writeCRC;
595 /* If this block had a CRC error, force file level CRC error. */
596 if (bd->writeCRC != bd->headerCRC) {
597 bd->totalCRC = bd->headerCRC+1;
598 return RETVAL_LAST_BLOCK;
599 }
600 }
601
602 /* Refill the intermediate buffer by Huffman-decoding next
603 * block of input */
604 /* (previous is just a convenient unused temp variable here) */
605 previous = get_next_block(bd);
606 if (previous) {
607 bd->writeCount = previous;
608 return (previous != RETVAL_LAST_BLOCK) ? previous : gotcount;
609 }
610 bd->writeCRC = 0xffffffffUL;
611 pos = bd->writePos;
612 xcurrent = bd->writeCurrent;
613 goto decode_next_byte;
614}
615
616static int INIT nofill(void *buf, unsigned int len)
617{
618 return -1;
619}
620
621/* Allocate the structure, read file header. If in_fd ==-1, inbuf must contain
622 a complete bunzip file (len bytes long). If in_fd!=-1, inbuf and len are
623 ignored, and data is read from file handle into temporary buffer. */
624static int INIT start_bunzip(struct bunzip_data **bdp, void *inbuf, int len,
625 int (*fill)(void*, unsigned int))
626{
627 struct bunzip_data *bd;
628 unsigned int i, j, c;
629 const unsigned int BZh0 =
630 (((unsigned int)'B') << 24)+(((unsigned int)'Z') << 16)
631 +(((unsigned int)'h') << 8)+(unsigned int)'0';
632
633 /* Figure out how much data to allocate */
634 i = sizeof(struct bunzip_data);
635
636 /* Allocate bunzip_data. Most fields initialize to zero. */
637 bd = *bdp = malloc(i);
638 memset(bd, 0, sizeof(struct bunzip_data));
639 /* Setup input buffer */
640 bd->inbuf = inbuf;
641 bd->inbufCount = len;
642 if (fill != NULL)
643 bd->fill = fill;
644 else
645 bd->fill = nofill;
646
647 /* Init the CRC32 table (big endian) */
648 for (i = 0; i < 256; i++) {
649 c = i << 24;
650 for (j = 8; j; j--)
651 c = c&0x80000000 ? (c << 1)^0x04c11db7 : (c << 1);
652 bd->crc32Table[i] = c;
653 }
654
655 /* Ensure that file starts with "BZh['1'-'9']." */
656 i = get_bits(bd, 32);
657 if (((unsigned int)(i-BZh0-1)) >= 9)
658 return RETVAL_NOT_BZIP_DATA;
659
660 /* Fourth byte (ascii '1'-'9'), indicates block size in units of 100k of
661 uncompressed data. Allocate intermediate buffer for block. */
662 bd->dbufSize = 100000*(i-BZh0);
663
664 bd->dbuf = large_malloc(bd->dbufSize * sizeof(int));
665 return RETVAL_OK;
666}
667
668/* Example usage: decompress src_fd to dst_fd. (Stops at end of bzip2 data,
669 not end of file.) */
670STATIC int INIT bunzip2(unsigned char *buf, int len,
671 int(*fill)(void*, unsigned int),
672 int(*flush)(void*, unsigned int),
673 unsigned char *outbuf,
674 int *pos,
675 void(*error_fn)(char *x))
676{
677 struct bunzip_data *bd;
678 int i = -1;
679 unsigned char *inbuf;
680
681 set_error_fn(error_fn);
682 if (flush)
683 outbuf = malloc(BZIP2_IOBUF_SIZE);
684 else
685 len -= 4; /* Uncompressed size hack active in pre-boot
686 environment */
687 if (!outbuf) {
688 error("Could not allocate output bufer");
689 return -1;
690 }
691 if (buf)
692 inbuf = buf;
693 else
694 inbuf = malloc(BZIP2_IOBUF_SIZE);
695 if (!inbuf) {
696 error("Could not allocate input bufer");
697 goto exit_0;
698 }
699 i = start_bunzip(&bd, inbuf, len, fill);
700 if (!i) {
701 for (;;) {
702 i = read_bunzip(bd, outbuf, BZIP2_IOBUF_SIZE);
703 if (i <= 0)
704 break;
705 if (!flush)
706 outbuf += i;
707 else
708 if (i != flush(outbuf, i)) {
709 i = RETVAL_UNEXPECTED_OUTPUT_EOF;
710 break;
711 }
712 }
713 }
714 /* Check CRC and release memory */
715 if (i == RETVAL_LAST_BLOCK) {
716 if (bd->headerCRC != bd->totalCRC)
717 error("Data integrity error when decompressing.");
718 else
719 i = RETVAL_OK;
720 } else if (i == RETVAL_UNEXPECTED_OUTPUT_EOF) {
721 error("Compressed file ends unexpectedly");
722 }
723 if (bd->dbuf)
724 large_free(bd->dbuf);
725 if (pos)
726 *pos = bd->inbufPos;
727 free(bd);
728 if (!buf)
729 free(inbuf);
730exit_0:
731 if (flush)
732 free(outbuf);
733 return i;
734}
735
736#define decompress bunzip2
diff --git a/lib/decompress_inflate.c b/lib/decompress_inflate.c
new file mode 100644
index 000000000000..e36b296fc9f8
--- /dev/null
+++ b/lib/decompress_inflate.c
@@ -0,0 +1,168 @@
1#ifdef STATIC
2/* Pre-boot environment: included */
3
4/* prevent inclusion of _LINUX_KERNEL_H in pre-boot environment: lots
5 * errors about console_printk etc... on ARM */
6#define _LINUX_KERNEL_H
7
8#include "zlib_inflate/inftrees.c"
9#include "zlib_inflate/inffast.c"
10#include "zlib_inflate/inflate.c"
11
12#else /* STATIC */
13/* initramfs et al: linked */
14
15#include <linux/zutil.h>
16
17#include "zlib_inflate/inftrees.h"
18#include "zlib_inflate/inffast.h"
19#include "zlib_inflate/inflate.h"
20
21#include "zlib_inflate/infutil.h"
22
23#endif /* STATIC */
24
25#include <linux/decompress/mm.h>
26#include <linux/slab.h>
27
28#define INBUF_LEN (16*1024)
29
30/* Included from initramfs et al code */
31STATIC int INIT gunzip(unsigned char *buf, int len,
32 int(*fill)(void*, unsigned int),
33 int(*flush)(void*, unsigned int),
34 unsigned char *out_buf,
35 int *pos,
36 void(*error_fn)(char *x)) {
37 u8 *zbuf;
38 struct z_stream_s *strm;
39 int rc;
40 size_t out_len;
41
42 set_error_fn(error_fn);
43 rc = -1;
44 if (flush) {
45 out_len = 0x8000; /* 32 K */
46 out_buf = malloc(out_len);
47 } else {
48 out_len = 0x7fffffff; /* no limit */
49 }
50 if (!out_buf) {
51 error("Out of memory while allocating output buffer");
52 goto gunzip_nomem1;
53 }
54
55 if (buf)
56 zbuf = buf;
57 else {
58 zbuf = malloc(INBUF_LEN);
59 len = 0;
60 }
61 if (!zbuf) {
62 error("Out of memory while allocating input buffer");
63 goto gunzip_nomem2;
64 }
65
66 strm = malloc(sizeof(*strm));
67 if (strm == NULL) {
68 error("Out of memory while allocating z_stream");
69 goto gunzip_nomem3;
70 }
71
72 strm->workspace = malloc(flush ? zlib_inflate_workspacesize() :
73 sizeof(struct inflate_state));
74 if (strm->workspace == NULL) {
75 error("Out of memory while allocating workspace");
76 goto gunzip_nomem4;
77 }
78
79 if (len == 0)
80 len = fill(zbuf, INBUF_LEN);
81
82 /* verify the gzip header */
83 if (len < 10 ||
84 zbuf[0] != 0x1f || zbuf[1] != 0x8b || zbuf[2] != 0x08) {
85 if (pos)
86 *pos = 0;
87 error("Not a gzip file");
88 goto gunzip_5;
89 }
90
91 /* skip over gzip header (1f,8b,08... 10 bytes total +
92 * possible asciz filename)
93 */
94 strm->next_in = zbuf + 10;
95 /* skip over asciz filename */
96 if (zbuf[3] & 0x8) {
97 while (strm->next_in[0])
98 strm->next_in++;
99 strm->next_in++;
100 }
101 strm->avail_in = len - (strm->next_in - zbuf);
102
103 strm->next_out = out_buf;
104 strm->avail_out = out_len;
105
106 rc = zlib_inflateInit2(strm, -MAX_WBITS);
107
108 if (!flush) {
109 WS(strm)->inflate_state.wsize = 0;
110 WS(strm)->inflate_state.window = NULL;
111 }
112
113 while (rc == Z_OK) {
114 if (strm->avail_in == 0) {
115 /* TODO: handle case where both pos and fill are set */
116 len = fill(zbuf, INBUF_LEN);
117 if (len < 0) {
118 rc = -1;
119 error("read error");
120 break;
121 }
122 strm->next_in = zbuf;
123 strm->avail_in = len;
124 }
125 rc = zlib_inflate(strm, 0);
126
127 /* Write any data generated */
128 if (flush && strm->next_out > out_buf) {
129 int l = strm->next_out - out_buf;
130 if (l != flush(out_buf, l)) {
131 rc = -1;
132 error("write error");
133 break;
134 }
135 strm->next_out = out_buf;
136 strm->avail_out = out_len;
137 }
138
139 /* after Z_FINISH, only Z_STREAM_END is "we unpacked it all" */
140 if (rc == Z_STREAM_END) {
141 rc = 0;
142 break;
143 } else if (rc != Z_OK) {
144 error("uncompression error");
145 rc = -1;
146 }
147 }
148
149 zlib_inflateEnd(strm);
150 if (pos)
151 /* add + 8 to skip over trailer */
152 *pos = strm->next_in - zbuf+8;
153
154gunzip_5:
155 free(strm->workspace);
156gunzip_nomem4:
157 free(strm);
158gunzip_nomem3:
159 if (!buf)
160 free(zbuf);
161gunzip_nomem2:
162 if (flush)
163 free(out_buf);
164gunzip_nomem1:
165 return rc; /* returns Z_OK (0) if successful */
166}
167
168#define decompress gunzip
diff --git a/lib/decompress_unlzma.c b/lib/decompress_unlzma.c
new file mode 100644
index 000000000000..32123a1340e6
--- /dev/null
+++ b/lib/decompress_unlzma.c
@@ -0,0 +1,648 @@
1/* Lzma decompressor for Linux kernel. Shamelessly snarfed
2 *from busybox 1.1.1
3 *
4 *Linux kernel adaptation
5 *Copyright (C) 2006 Alain < alain@knaff.lu >
6 *
7 *Based on small lzma deflate implementation/Small range coder
8 *implementation for lzma.
9 *Copyright (C) 2006 Aurelien Jacobs < aurel@gnuage.org >
10 *
11 *Based on LzmaDecode.c from the LZMA SDK 4.22 (http://www.7-zip.org/)
12 *Copyright (C) 1999-2005 Igor Pavlov
13 *
14 *Copyrights of the parts, see headers below.
15 *
16 *
17 *This program is free software; you can redistribute it and/or
18 *modify it under the terms of the GNU Lesser General Public
19 *License as published by the Free Software Foundation; either
20 *version 2.1 of the License, or (at your option) any later version.
21 *
22 *This program is distributed in the hope that it will be useful,
23 *but WITHOUT ANY WARRANTY; without even the implied warranty of
24 *MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
25 *Lesser General Public License for more details.
26 *
27 *You should have received a copy of the GNU Lesser General Public
28 *License along with this library; if not, write to the Free Software
29 *Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
30 */
31
32#ifndef STATIC
33#include <linux/decompress/unlzma.h>
34#endif /* STATIC */
35
36#include <linux/decompress/mm.h>
37#include <linux/slab.h>
38
39#define MIN(a, b) (((a) < (b)) ? (a) : (b))
40
41static long long INIT read_int(unsigned char *ptr, int size)
42{
43 int i;
44 long long ret = 0;
45
46 for (i = 0; i < size; i++)
47 ret = (ret << 8) | ptr[size-i-1];
48 return ret;
49}
50
51#define ENDIAN_CONVERT(x) \
52 x = (typeof(x))read_int((unsigned char *)&x, sizeof(x))
53
54
55/* Small range coder implementation for lzma.
56 *Copyright (C) 2006 Aurelien Jacobs < aurel@gnuage.org >
57 *
58 *Based on LzmaDecode.c from the LZMA SDK 4.22 (http://www.7-zip.org/)
59 *Copyright (c) 1999-2005 Igor Pavlov
60 */
61
62#include <linux/compiler.h>
63
64#define LZMA_IOBUF_SIZE 0x10000
65
66struct rc {
67 int (*fill)(void*, unsigned int);
68 uint8_t *ptr;
69 uint8_t *buffer;
70 uint8_t *buffer_end;
71 int buffer_size;
72 uint32_t code;
73 uint32_t range;
74 uint32_t bound;
75};
76
77
78#define RC_TOP_BITS 24
79#define RC_MOVE_BITS 5
80#define RC_MODEL_TOTAL_BITS 11
81
82
83/* Called twice: once at startup and once in rc_normalize() */
84static void INIT rc_read(struct rc *rc)
85{
86 rc->buffer_size = rc->fill((char *)rc->buffer, LZMA_IOBUF_SIZE);
87 if (rc->buffer_size <= 0)
88 error("unexpected EOF");
89 rc->ptr = rc->buffer;
90 rc->buffer_end = rc->buffer + rc->buffer_size;
91}
92
93/* Called once */
94static inline void INIT rc_init(struct rc *rc,
95 int (*fill)(void*, unsigned int),
96 char *buffer, int buffer_size)
97{
98 rc->fill = fill;
99 rc->buffer = (uint8_t *)buffer;
100 rc->buffer_size = buffer_size;
101 rc->buffer_end = rc->buffer + rc->buffer_size;
102 rc->ptr = rc->buffer;
103
104 rc->code = 0;
105 rc->range = 0xFFFFFFFF;
106}
107
108static inline void INIT rc_init_code(struct rc *rc)
109{
110 int i;
111
112 for (i = 0; i < 5; i++) {
113 if (rc->ptr >= rc->buffer_end)
114 rc_read(rc);
115 rc->code = (rc->code << 8) | *rc->ptr++;
116 }
117}
118
119
120/* Called once. TODO: bb_maybe_free() */
121static inline void INIT rc_free(struct rc *rc)
122{
123 free(rc->buffer);
124}
125
126/* Called twice, but one callsite is in inline'd rc_is_bit_0_helper() */
127static void INIT rc_do_normalize(struct rc *rc)
128{
129 if (rc->ptr >= rc->buffer_end)
130 rc_read(rc);
131 rc->range <<= 8;
132 rc->code = (rc->code << 8) | *rc->ptr++;
133}
134static inline void INIT rc_normalize(struct rc *rc)
135{
136 if (rc->range < (1 << RC_TOP_BITS))
137 rc_do_normalize(rc);
138}
139
140/* Called 9 times */
141/* Why rc_is_bit_0_helper exists?
142 *Because we want to always expose (rc->code < rc->bound) to optimizer
143 */
144static inline uint32_t INIT rc_is_bit_0_helper(struct rc *rc, uint16_t *p)
145{
146 rc_normalize(rc);
147 rc->bound = *p * (rc->range >> RC_MODEL_TOTAL_BITS);
148 return rc->bound;
149}
150static inline int INIT rc_is_bit_0(struct rc *rc, uint16_t *p)
151{
152 uint32_t t = rc_is_bit_0_helper(rc, p);
153 return rc->code < t;
154}
155
156/* Called ~10 times, but very small, thus inlined */
157static inline void INIT rc_update_bit_0(struct rc *rc, uint16_t *p)
158{
159 rc->range = rc->bound;
160 *p += ((1 << RC_MODEL_TOTAL_BITS) - *p) >> RC_MOVE_BITS;
161}
162static inline void rc_update_bit_1(struct rc *rc, uint16_t *p)
163{
164 rc->range -= rc->bound;
165 rc->code -= rc->bound;
166 *p -= *p >> RC_MOVE_BITS;
167}
168
169/* Called 4 times in unlzma loop */
170static int INIT rc_get_bit(struct rc *rc, uint16_t *p, int *symbol)
171{
172 if (rc_is_bit_0(rc, p)) {
173 rc_update_bit_0(rc, p);
174 *symbol *= 2;
175 return 0;
176 } else {
177 rc_update_bit_1(rc, p);
178 *symbol = *symbol * 2 + 1;
179 return 1;
180 }
181}
182
183/* Called once */
184static inline int INIT rc_direct_bit(struct rc *rc)
185{
186 rc_normalize(rc);
187 rc->range >>= 1;
188 if (rc->code >= rc->range) {
189 rc->code -= rc->range;
190 return 1;
191 }
192 return 0;
193}
194
195/* Called twice */
196static inline void INIT
197rc_bit_tree_decode(struct rc *rc, uint16_t *p, int num_levels, int *symbol)
198{
199 int i = num_levels;
200
201 *symbol = 1;
202 while (i--)
203 rc_get_bit(rc, p + *symbol, symbol);
204 *symbol -= 1 << num_levels;
205}
206
207
208/*
209 * Small lzma deflate implementation.
210 * Copyright (C) 2006 Aurelien Jacobs < aurel@gnuage.org >
211 *
212 * Based on LzmaDecode.c from the LZMA SDK 4.22 (http://www.7-zip.org/)
213 * Copyright (C) 1999-2005 Igor Pavlov
214 */
215
216
217struct lzma_header {
218 uint8_t pos;
219 uint32_t dict_size;
220 uint64_t dst_size;
221} __attribute__ ((packed)) ;
222
223
224#define LZMA_BASE_SIZE 1846
225#define LZMA_LIT_SIZE 768
226
227#define LZMA_NUM_POS_BITS_MAX 4
228
229#define LZMA_LEN_NUM_LOW_BITS 3
230#define LZMA_LEN_NUM_MID_BITS 3
231#define LZMA_LEN_NUM_HIGH_BITS 8
232
233#define LZMA_LEN_CHOICE 0
234#define LZMA_LEN_CHOICE_2 (LZMA_LEN_CHOICE + 1)
235#define LZMA_LEN_LOW (LZMA_LEN_CHOICE_2 + 1)
236#define LZMA_LEN_MID (LZMA_LEN_LOW \
237 + (1 << (LZMA_NUM_POS_BITS_MAX + LZMA_LEN_NUM_LOW_BITS)))
238#define LZMA_LEN_HIGH (LZMA_LEN_MID \
239 +(1 << (LZMA_NUM_POS_BITS_MAX + LZMA_LEN_NUM_MID_BITS)))
240#define LZMA_NUM_LEN_PROBS (LZMA_LEN_HIGH + (1 << LZMA_LEN_NUM_HIGH_BITS))
241
242#define LZMA_NUM_STATES 12
243#define LZMA_NUM_LIT_STATES 7
244
245#define LZMA_START_POS_MODEL_INDEX 4
246#define LZMA_END_POS_MODEL_INDEX 14
247#define LZMA_NUM_FULL_DISTANCES (1 << (LZMA_END_POS_MODEL_INDEX >> 1))
248
249#define LZMA_NUM_POS_SLOT_BITS 6
250#define LZMA_NUM_LEN_TO_POS_STATES 4
251
252#define LZMA_NUM_ALIGN_BITS 4
253
254#define LZMA_MATCH_MIN_LEN 2
255
256#define LZMA_IS_MATCH 0
257#define LZMA_IS_REP (LZMA_IS_MATCH + (LZMA_NUM_STATES << LZMA_NUM_POS_BITS_MAX))
258#define LZMA_IS_REP_G0 (LZMA_IS_REP + LZMA_NUM_STATES)
259#define LZMA_IS_REP_G1 (LZMA_IS_REP_G0 + LZMA_NUM_STATES)
260#define LZMA_IS_REP_G2 (LZMA_IS_REP_G1 + LZMA_NUM_STATES)
261#define LZMA_IS_REP_0_LONG (LZMA_IS_REP_G2 + LZMA_NUM_STATES)
262#define LZMA_POS_SLOT (LZMA_IS_REP_0_LONG \
263 + (LZMA_NUM_STATES << LZMA_NUM_POS_BITS_MAX))
264#define LZMA_SPEC_POS (LZMA_POS_SLOT \
265 +(LZMA_NUM_LEN_TO_POS_STATES << LZMA_NUM_POS_SLOT_BITS))
266#define LZMA_ALIGN (LZMA_SPEC_POS \
267 + LZMA_NUM_FULL_DISTANCES - LZMA_END_POS_MODEL_INDEX)
268#define LZMA_LEN_CODER (LZMA_ALIGN + (1 << LZMA_NUM_ALIGN_BITS))
269#define LZMA_REP_LEN_CODER (LZMA_LEN_CODER + LZMA_NUM_LEN_PROBS)
270#define LZMA_LITERAL (LZMA_REP_LEN_CODER + LZMA_NUM_LEN_PROBS)
271
272
273struct writer {
274 uint8_t *buffer;
275 uint8_t previous_byte;
276 size_t buffer_pos;
277 int bufsize;
278 size_t global_pos;
279 int(*flush)(void*, unsigned int);
280 struct lzma_header *header;
281};
282
283struct cstate {
284 int state;
285 uint32_t rep0, rep1, rep2, rep3;
286};
287
288static inline size_t INIT get_pos(struct writer *wr)
289{
290 return
291 wr->global_pos + wr->buffer_pos;
292}
293
294static inline uint8_t INIT peek_old_byte(struct writer *wr,
295 uint32_t offs)
296{
297 if (!wr->flush) {
298 int32_t pos;
299 while (offs > wr->header->dict_size)
300 offs -= wr->header->dict_size;
301 pos = wr->buffer_pos - offs;
302 return wr->buffer[pos];
303 } else {
304 uint32_t pos = wr->buffer_pos - offs;
305 while (pos >= wr->header->dict_size)
306 pos += wr->header->dict_size;
307 return wr->buffer[pos];
308 }
309
310}
311
312static inline void INIT write_byte(struct writer *wr, uint8_t byte)
313{
314 wr->buffer[wr->buffer_pos++] = wr->previous_byte = byte;
315 if (wr->flush && wr->buffer_pos == wr->header->dict_size) {
316 wr->buffer_pos = 0;
317 wr->global_pos += wr->header->dict_size;
318 wr->flush((char *)wr->buffer, wr->header->dict_size);
319 }
320}
321
322
323static inline void INIT copy_byte(struct writer *wr, uint32_t offs)
324{
325 write_byte(wr, peek_old_byte(wr, offs));
326}
327
328static inline void INIT copy_bytes(struct writer *wr,
329 uint32_t rep0, int len)
330{
331 do {
332 copy_byte(wr, rep0);
333 len--;
334 } while (len != 0 && wr->buffer_pos < wr->header->dst_size);
335}
336
337static inline void INIT process_bit0(struct writer *wr, struct rc *rc,
338 struct cstate *cst, uint16_t *p,
339 int pos_state, uint16_t *prob,
340 int lc, uint32_t literal_pos_mask) {
341 int mi = 1;
342 rc_update_bit_0(rc, prob);
343 prob = (p + LZMA_LITERAL +
344 (LZMA_LIT_SIZE
345 * (((get_pos(wr) & literal_pos_mask) << lc)
346 + (wr->previous_byte >> (8 - lc))))
347 );
348
349 if (cst->state >= LZMA_NUM_LIT_STATES) {
350 int match_byte = peek_old_byte(wr, cst->rep0);
351 do {
352 int bit;
353 uint16_t *prob_lit;
354
355 match_byte <<= 1;
356 bit = match_byte & 0x100;
357 prob_lit = prob + 0x100 + bit + mi;
358 if (rc_get_bit(rc, prob_lit, &mi)) {
359 if (!bit)
360 break;
361 } else {
362 if (bit)
363 break;
364 }
365 } while (mi < 0x100);
366 }
367 while (mi < 0x100) {
368 uint16_t *prob_lit = prob + mi;
369 rc_get_bit(rc, prob_lit, &mi);
370 }
371 write_byte(wr, mi);
372 if (cst->state < 4)
373 cst->state = 0;
374 else if (cst->state < 10)
375 cst->state -= 3;
376 else
377 cst->state -= 6;
378}
379
380static inline void INIT process_bit1(struct writer *wr, struct rc *rc,
381 struct cstate *cst, uint16_t *p,
382 int pos_state, uint16_t *prob) {
383 int offset;
384 uint16_t *prob_len;
385 int num_bits;
386 int len;
387
388 rc_update_bit_1(rc, prob);
389 prob = p + LZMA_IS_REP + cst->state;
390 if (rc_is_bit_0(rc, prob)) {
391 rc_update_bit_0(rc, prob);
392 cst->rep3 = cst->rep2;
393 cst->rep2 = cst->rep1;
394 cst->rep1 = cst->rep0;
395 cst->state = cst->state < LZMA_NUM_LIT_STATES ? 0 : 3;
396 prob = p + LZMA_LEN_CODER;
397 } else {
398 rc_update_bit_1(rc, prob);
399 prob = p + LZMA_IS_REP_G0 + cst->state;
400 if (rc_is_bit_0(rc, prob)) {
401 rc_update_bit_0(rc, prob);
402 prob = (p + LZMA_IS_REP_0_LONG
403 + (cst->state <<
404 LZMA_NUM_POS_BITS_MAX) +
405 pos_state);
406 if (rc_is_bit_0(rc, prob)) {
407 rc_update_bit_0(rc, prob);
408
409 cst->state = cst->state < LZMA_NUM_LIT_STATES ?
410 9 : 11;
411 copy_byte(wr, cst->rep0);
412 return;
413 } else {
414 rc_update_bit_1(rc, prob);
415 }
416 } else {
417 uint32_t distance;
418
419 rc_update_bit_1(rc, prob);
420 prob = p + LZMA_IS_REP_G1 + cst->state;
421 if (rc_is_bit_0(rc, prob)) {
422 rc_update_bit_0(rc, prob);
423 distance = cst->rep1;
424 } else {
425 rc_update_bit_1(rc, prob);
426 prob = p + LZMA_IS_REP_G2 + cst->state;
427 if (rc_is_bit_0(rc, prob)) {
428 rc_update_bit_0(rc, prob);
429 distance = cst->rep2;
430 } else {
431 rc_update_bit_1(rc, prob);
432 distance = cst->rep3;
433 cst->rep3 = cst->rep2;
434 }
435 cst->rep2 = cst->rep1;
436 }
437 cst->rep1 = cst->rep0;
438 cst->rep0 = distance;
439 }
440 cst->state = cst->state < LZMA_NUM_LIT_STATES ? 8 : 11;
441 prob = p + LZMA_REP_LEN_CODER;
442 }
443
444 prob_len = prob + LZMA_LEN_CHOICE;
445 if (rc_is_bit_0(rc, prob_len)) {
446 rc_update_bit_0(rc, prob_len);
447 prob_len = (prob + LZMA_LEN_LOW
448 + (pos_state <<
449 LZMA_LEN_NUM_LOW_BITS));
450 offset = 0;
451 num_bits = LZMA_LEN_NUM_LOW_BITS;
452 } else {
453 rc_update_bit_1(rc, prob_len);
454 prob_len = prob + LZMA_LEN_CHOICE_2;
455 if (rc_is_bit_0(rc, prob_len)) {
456 rc_update_bit_0(rc, prob_len);
457 prob_len = (prob + LZMA_LEN_MID
458 + (pos_state <<
459 LZMA_LEN_NUM_MID_BITS));
460 offset = 1 << LZMA_LEN_NUM_LOW_BITS;
461 num_bits = LZMA_LEN_NUM_MID_BITS;
462 } else {
463 rc_update_bit_1(rc, prob_len);
464 prob_len = prob + LZMA_LEN_HIGH;
465 offset = ((1 << LZMA_LEN_NUM_LOW_BITS)
466 + (1 << LZMA_LEN_NUM_MID_BITS));
467 num_bits = LZMA_LEN_NUM_HIGH_BITS;
468 }
469 }
470
471 rc_bit_tree_decode(rc, prob_len, num_bits, &len);
472 len += offset;
473
474 if (cst->state < 4) {
475 int pos_slot;
476
477 cst->state += LZMA_NUM_LIT_STATES;
478 prob =
479 p + LZMA_POS_SLOT +
480 ((len <
481 LZMA_NUM_LEN_TO_POS_STATES ? len :
482 LZMA_NUM_LEN_TO_POS_STATES - 1)
483 << LZMA_NUM_POS_SLOT_BITS);
484 rc_bit_tree_decode(rc, prob,
485 LZMA_NUM_POS_SLOT_BITS,
486 &pos_slot);
487 if (pos_slot >= LZMA_START_POS_MODEL_INDEX) {
488 int i, mi;
489 num_bits = (pos_slot >> 1) - 1;
490 cst->rep0 = 2 | (pos_slot & 1);
491 if (pos_slot < LZMA_END_POS_MODEL_INDEX) {
492 cst->rep0 <<= num_bits;
493 prob = p + LZMA_SPEC_POS +
494 cst->rep0 - pos_slot - 1;
495 } else {
496 num_bits -= LZMA_NUM_ALIGN_BITS;
497 while (num_bits--)
498 cst->rep0 = (cst->rep0 << 1) |
499 rc_direct_bit(rc);
500 prob = p + LZMA_ALIGN;
501 cst->rep0 <<= LZMA_NUM_ALIGN_BITS;
502 num_bits = LZMA_NUM_ALIGN_BITS;
503 }
504 i = 1;
505 mi = 1;
506 while (num_bits--) {
507 if (rc_get_bit(rc, prob + mi, &mi))
508 cst->rep0 |= i;
509 i <<= 1;
510 }
511 } else
512 cst->rep0 = pos_slot;
513 if (++(cst->rep0) == 0)
514 return;
515 }
516
517 len += LZMA_MATCH_MIN_LEN;
518
519 copy_bytes(wr, cst->rep0, len);
520}
521
522
523
524STATIC inline int INIT unlzma(unsigned char *buf, int in_len,
525 int(*fill)(void*, unsigned int),
526 int(*flush)(void*, unsigned int),
527 unsigned char *output,
528 int *posp,
529 void(*error_fn)(char *x)
530 )
531{
532 struct lzma_header header;
533 int lc, pb, lp;
534 uint32_t pos_state_mask;
535 uint32_t literal_pos_mask;
536 uint16_t *p;
537 int num_probs;
538 struct rc rc;
539 int i, mi;
540 struct writer wr;
541 struct cstate cst;
542 unsigned char *inbuf;
543 int ret = -1;
544
545 set_error_fn(error_fn);
546 if (!flush)
547 in_len -= 4; /* Uncompressed size hack active in pre-boot
548 environment */
549 if (buf)
550 inbuf = buf;
551 else
552 inbuf = malloc(LZMA_IOBUF_SIZE);
553 if (!inbuf) {
554 error("Could not allocate input bufer");
555 goto exit_0;
556 }
557
558 cst.state = 0;
559 cst.rep0 = cst.rep1 = cst.rep2 = cst.rep3 = 1;
560
561 wr.header = &header;
562 wr.flush = flush;
563 wr.global_pos = 0;
564 wr.previous_byte = 0;
565 wr.buffer_pos = 0;
566
567 rc_init(&rc, fill, inbuf, in_len);
568
569 for (i = 0; i < sizeof(header); i++) {
570 if (rc.ptr >= rc.buffer_end)
571 rc_read(&rc);
572 ((unsigned char *)&header)[i] = *rc.ptr++;
573 }
574
575 if (header.pos >= (9 * 5 * 5))
576 error("bad header");
577
578 mi = 0;
579 lc = header.pos;
580 while (lc >= 9) {
581 mi++;
582 lc -= 9;
583 }
584 pb = 0;
585 lp = mi;
586 while (lp >= 5) {
587 pb++;
588 lp -= 5;
589 }
590 pos_state_mask = (1 << pb) - 1;
591 literal_pos_mask = (1 << lp) - 1;
592
593 ENDIAN_CONVERT(header.dict_size);
594 ENDIAN_CONVERT(header.dst_size);
595
596 if (header.dict_size == 0)
597 header.dict_size = 1;
598
599 if (output)
600 wr.buffer = output;
601 else {
602 wr.bufsize = MIN(header.dst_size, header.dict_size);
603 wr.buffer = large_malloc(wr.bufsize);
604 }
605 if (wr.buffer == NULL)
606 goto exit_1;
607
608 num_probs = LZMA_BASE_SIZE + (LZMA_LIT_SIZE << (lc + lp));
609 p = (uint16_t *) large_malloc(num_probs * sizeof(*p));
610 if (p == 0)
611 goto exit_2;
612 num_probs = LZMA_LITERAL + (LZMA_LIT_SIZE << (lc + lp));
613 for (i = 0; i < num_probs; i++)
614 p[i] = (1 << RC_MODEL_TOTAL_BITS) >> 1;
615
616 rc_init_code(&rc);
617
618 while (get_pos(&wr) < header.dst_size) {
619 int pos_state = get_pos(&wr) & pos_state_mask;
620 uint16_t *prob = p + LZMA_IS_MATCH +
621 (cst.state << LZMA_NUM_POS_BITS_MAX) + pos_state;
622 if (rc_is_bit_0(&rc, prob))
623 process_bit0(&wr, &rc, &cst, p, pos_state, prob,
624 lc, literal_pos_mask);
625 else {
626 process_bit1(&wr, &rc, &cst, p, pos_state, prob);
627 if (cst.rep0 == 0)
628 break;
629 }
630 }
631
632 if (posp)
633 *posp = rc.ptr-rc.buffer;
634 if (wr.flush)
635 wr.flush(wr.buffer, wr.buffer_pos);
636 ret = 0;
637 large_free(p);
638exit_2:
639 if (!output)
640 large_free(wr.buffer);
641exit_1:
642 if (!buf)
643 free(inbuf);
644exit_0:
645 return ret;
646}
647
648#define decompress unlzma
diff --git a/lib/dma-debug.c b/lib/dma-debug.c
new file mode 100644
index 000000000000..d3da7edc034f
--- /dev/null
+++ b/lib/dma-debug.c
@@ -0,0 +1,955 @@
1/*
2 * Copyright (C) 2008 Advanced Micro Devices, Inc.
3 *
4 * Author: Joerg Roedel <joerg.roedel@amd.com>
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 as published
8 * by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
18 */
19
20#include <linux/scatterlist.h>
21#include <linux/dma-mapping.h>
22#include <linux/stacktrace.h>
23#include <linux/dma-debug.h>
24#include <linux/spinlock.h>
25#include <linux/debugfs.h>
26#include <linux/device.h>
27#include <linux/types.h>
28#include <linux/sched.h>
29#include <linux/list.h>
30#include <linux/slab.h>
31
32#include <asm/sections.h>
33
34#define HASH_SIZE 1024ULL
35#define HASH_FN_SHIFT 13
36#define HASH_FN_MASK (HASH_SIZE - 1)
37
38enum {
39 dma_debug_single,
40 dma_debug_page,
41 dma_debug_sg,
42 dma_debug_coherent,
43};
44
45#define DMA_DEBUG_STACKTRACE_ENTRIES 5
46
47struct dma_debug_entry {
48 struct list_head list;
49 struct device *dev;
50 int type;
51 phys_addr_t paddr;
52 u64 dev_addr;
53 u64 size;
54 int direction;
55 int sg_call_ents;
56 int sg_mapped_ents;
57#ifdef CONFIG_STACKTRACE
58 struct stack_trace stacktrace;
59 unsigned long st_entries[DMA_DEBUG_STACKTRACE_ENTRIES];
60#endif
61};
62
63struct hash_bucket {
64 struct list_head list;
65 spinlock_t lock;
66} ____cacheline_aligned_in_smp;
67
68/* Hash list to save the allocated dma addresses */
69static struct hash_bucket dma_entry_hash[HASH_SIZE];
70/* List of pre-allocated dma_debug_entry's */
71static LIST_HEAD(free_entries);
72/* Lock for the list above */
73static DEFINE_SPINLOCK(free_entries_lock);
74
75/* Global disable flag - will be set in case of an error */
76static bool global_disable __read_mostly;
77
78/* Global error count */
79static u32 error_count;
80
81/* Global error show enable*/
82static u32 show_all_errors __read_mostly;
83/* Number of errors to show */
84static u32 show_num_errors = 1;
85
86static u32 num_free_entries;
87static u32 min_free_entries;
88
89/* number of preallocated entries requested by kernel cmdline */
90static u32 req_entries;
91
92/* debugfs dentry's for the stuff above */
93static struct dentry *dma_debug_dent __read_mostly;
94static struct dentry *global_disable_dent __read_mostly;
95static struct dentry *error_count_dent __read_mostly;
96static struct dentry *show_all_errors_dent __read_mostly;
97static struct dentry *show_num_errors_dent __read_mostly;
98static struct dentry *num_free_entries_dent __read_mostly;
99static struct dentry *min_free_entries_dent __read_mostly;
100
101static const char *type2name[4] = { "single", "page",
102 "scather-gather", "coherent" };
103
104static const char *dir2name[4] = { "DMA_BIDIRECTIONAL", "DMA_TO_DEVICE",
105 "DMA_FROM_DEVICE", "DMA_NONE" };
106
107/*
108 * The access to some variables in this macro is racy. We can't use atomic_t
109 * here because all these variables are exported to debugfs. Some of them even
110 * writeable. This is also the reason why a lock won't help much. But anyway,
111 * the races are no big deal. Here is why:
112 *
113 * error_count: the addition is racy, but the worst thing that can happen is
114 * that we don't count some errors
115 * show_num_errors: the subtraction is racy. Also no big deal because in
116 * worst case this will result in one warning more in the
117 * system log than the user configured. This variable is
118 * writeable via debugfs.
119 */
120static inline void dump_entry_trace(struct dma_debug_entry *entry)
121{
122#ifdef CONFIG_STACKTRACE
123 if (entry) {
124 printk(KERN_WARNING "Mapped at:\n");
125 print_stack_trace(&entry->stacktrace, 0);
126 }
127#endif
128}
129
130#define err_printk(dev, entry, format, arg...) do { \
131 error_count += 1; \
132 if (show_all_errors || show_num_errors > 0) { \
133 WARN(1, "%s %s: " format, \
134 dev_driver_string(dev), \
135 dev_name(dev) , ## arg); \
136 dump_entry_trace(entry); \
137 } \
138 if (!show_all_errors && show_num_errors > 0) \
139 show_num_errors -= 1; \
140 } while (0);
141
142/*
143 * Hash related functions
144 *
145 * Every DMA-API request is saved into a struct dma_debug_entry. To
146 * have quick access to these structs they are stored into a hash.
147 */
148static int hash_fn(struct dma_debug_entry *entry)
149{
150 /*
151 * Hash function is based on the dma address.
152 * We use bits 20-27 here as the index into the hash
153 */
154 return (entry->dev_addr >> HASH_FN_SHIFT) & HASH_FN_MASK;
155}
156
157/*
158 * Request exclusive access to a hash bucket for a given dma_debug_entry.
159 */
160static struct hash_bucket *get_hash_bucket(struct dma_debug_entry *entry,
161 unsigned long *flags)
162{
163 int idx = hash_fn(entry);
164 unsigned long __flags;
165
166 spin_lock_irqsave(&dma_entry_hash[idx].lock, __flags);
167 *flags = __flags;
168 return &dma_entry_hash[idx];
169}
170
171/*
172 * Give up exclusive access to the hash bucket
173 */
174static void put_hash_bucket(struct hash_bucket *bucket,
175 unsigned long *flags)
176{
177 unsigned long __flags = *flags;
178
179 spin_unlock_irqrestore(&bucket->lock, __flags);
180}
181
182/*
183 * Search a given entry in the hash bucket list
184 */
185static struct dma_debug_entry *hash_bucket_find(struct hash_bucket *bucket,
186 struct dma_debug_entry *ref)
187{
188 struct dma_debug_entry *entry;
189
190 list_for_each_entry(entry, &bucket->list, list) {
191 if ((entry->dev_addr == ref->dev_addr) &&
192 (entry->dev == ref->dev))
193 return entry;
194 }
195
196 return NULL;
197}
198
199/*
200 * Add an entry to a hash bucket
201 */
202static void hash_bucket_add(struct hash_bucket *bucket,
203 struct dma_debug_entry *entry)
204{
205 list_add_tail(&entry->list, &bucket->list);
206}
207
208/*
209 * Remove entry from a hash bucket list
210 */
211static void hash_bucket_del(struct dma_debug_entry *entry)
212{
213 list_del(&entry->list);
214}
215
216/*
217 * Dump mapping entries for debugging purposes
218 */
219void debug_dma_dump_mappings(struct device *dev)
220{
221 int idx;
222
223 for (idx = 0; idx < HASH_SIZE; idx++) {
224 struct hash_bucket *bucket = &dma_entry_hash[idx];
225 struct dma_debug_entry *entry;
226 unsigned long flags;
227
228 spin_lock_irqsave(&bucket->lock, flags);
229
230 list_for_each_entry(entry, &bucket->list, list) {
231 if (!dev || dev == entry->dev) {
232 dev_info(entry->dev,
233 "%s idx %d P=%Lx D=%Lx L=%Lx %s\n",
234 type2name[entry->type], idx,
235 (unsigned long long)entry->paddr,
236 entry->dev_addr, entry->size,
237 dir2name[entry->direction]);
238 }
239 }
240
241 spin_unlock_irqrestore(&bucket->lock, flags);
242 }
243}
244EXPORT_SYMBOL(debug_dma_dump_mappings);
245
246/*
247 * Wrapper function for adding an entry to the hash.
248 * This function takes care of locking itself.
249 */
250static void add_dma_entry(struct dma_debug_entry *entry)
251{
252 struct hash_bucket *bucket;
253 unsigned long flags;
254
255 bucket = get_hash_bucket(entry, &flags);
256 hash_bucket_add(bucket, entry);
257 put_hash_bucket(bucket, &flags);
258}
259
260/* struct dma_entry allocator
261 *
262 * The next two functions implement the allocator for
263 * struct dma_debug_entries.
264 */
265static struct dma_debug_entry *dma_entry_alloc(void)
266{
267 struct dma_debug_entry *entry = NULL;
268 unsigned long flags;
269
270 spin_lock_irqsave(&free_entries_lock, flags);
271
272 if (list_empty(&free_entries)) {
273 printk(KERN_ERR "DMA-API: debugging out of memory "
274 "- disabling\n");
275 global_disable = true;
276 goto out;
277 }
278
279 entry = list_entry(free_entries.next, struct dma_debug_entry, list);
280 list_del(&entry->list);
281 memset(entry, 0, sizeof(*entry));
282
283#ifdef CONFIG_STACKTRACE
284 entry->stacktrace.max_entries = DMA_DEBUG_STACKTRACE_ENTRIES;
285 entry->stacktrace.entries = entry->st_entries;
286 entry->stacktrace.skip = 2;
287 save_stack_trace(&entry->stacktrace);
288#endif
289 num_free_entries -= 1;
290 if (num_free_entries < min_free_entries)
291 min_free_entries = num_free_entries;
292
293out:
294 spin_unlock_irqrestore(&free_entries_lock, flags);
295
296 return entry;
297}
298
299static void dma_entry_free(struct dma_debug_entry *entry)
300{
301 unsigned long flags;
302
303 /*
304 * add to beginning of the list - this way the entries are
305 * more likely cache hot when they are reallocated.
306 */
307 spin_lock_irqsave(&free_entries_lock, flags);
308 list_add(&entry->list, &free_entries);
309 num_free_entries += 1;
310 spin_unlock_irqrestore(&free_entries_lock, flags);
311}
312
313/*
314 * DMA-API debugging init code
315 *
316 * The init code does two things:
317 * 1. Initialize core data structures
318 * 2. Preallocate a given number of dma_debug_entry structs
319 */
320
321static int prealloc_memory(u32 num_entries)
322{
323 struct dma_debug_entry *entry, *next_entry;
324 int i;
325
326 for (i = 0; i < num_entries; ++i) {
327 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
328 if (!entry)
329 goto out_err;
330
331 list_add_tail(&entry->list, &free_entries);
332 }
333
334 num_free_entries = num_entries;
335 min_free_entries = num_entries;
336
337 printk(KERN_INFO "DMA-API: preallocated %d debug entries\n",
338 num_entries);
339
340 return 0;
341
342out_err:
343
344 list_for_each_entry_safe(entry, next_entry, &free_entries, list) {
345 list_del(&entry->list);
346 kfree(entry);
347 }
348
349 return -ENOMEM;
350}
351
352static int dma_debug_fs_init(void)
353{
354 dma_debug_dent = debugfs_create_dir("dma-api", NULL);
355 if (!dma_debug_dent) {
356 printk(KERN_ERR "DMA-API: can not create debugfs directory\n");
357 return -ENOMEM;
358 }
359
360 global_disable_dent = debugfs_create_bool("disabled", 0444,
361 dma_debug_dent,
362 (u32 *)&global_disable);
363 if (!global_disable_dent)
364 goto out_err;
365
366 error_count_dent = debugfs_create_u32("error_count", 0444,
367 dma_debug_dent, &error_count);
368 if (!error_count_dent)
369 goto out_err;
370
371 show_all_errors_dent = debugfs_create_u32("all_errors", 0644,
372 dma_debug_dent,
373 &show_all_errors);
374 if (!show_all_errors_dent)
375 goto out_err;
376
377 show_num_errors_dent = debugfs_create_u32("num_errors", 0644,
378 dma_debug_dent,
379 &show_num_errors);
380 if (!show_num_errors_dent)
381 goto out_err;
382
383 num_free_entries_dent = debugfs_create_u32("num_free_entries", 0444,
384 dma_debug_dent,
385 &num_free_entries);
386 if (!num_free_entries_dent)
387 goto out_err;
388
389 min_free_entries_dent = debugfs_create_u32("min_free_entries", 0444,
390 dma_debug_dent,
391 &min_free_entries);
392 if (!min_free_entries_dent)
393 goto out_err;
394
395 return 0;
396
397out_err:
398 debugfs_remove_recursive(dma_debug_dent);
399
400 return -ENOMEM;
401}
402
403static int device_dma_allocations(struct device *dev)
404{
405 struct dma_debug_entry *entry;
406 unsigned long flags;
407 int count = 0, i;
408
409 for (i = 0; i < HASH_SIZE; ++i) {
410 spin_lock_irqsave(&dma_entry_hash[i].lock, flags);
411 list_for_each_entry(entry, &dma_entry_hash[i].list, list) {
412 if (entry->dev == dev)
413 count += 1;
414 }
415 spin_unlock_irqrestore(&dma_entry_hash[i].lock, flags);
416 }
417
418 return count;
419}
420
421static int dma_debug_device_change(struct notifier_block *nb,
422 unsigned long action, void *data)
423{
424 struct device *dev = data;
425 int count;
426
427
428 switch (action) {
429 case BUS_NOTIFY_UNBIND_DRIVER:
430 count = device_dma_allocations(dev);
431 if (count == 0)
432 break;
433 err_printk(dev, NULL, "DMA-API: device driver has pending "
434 "DMA allocations while released from device "
435 "[count=%d]\n", count);
436 break;
437 default:
438 break;
439 }
440
441 return 0;
442}
443
444void dma_debug_add_bus(struct bus_type *bus)
445{
446 struct notifier_block *nb;
447
448 nb = kzalloc(sizeof(struct notifier_block), GFP_KERNEL);
449 if (nb == NULL) {
450 printk(KERN_ERR "dma_debug_add_bus: out of memory\n");
451 return;
452 }
453
454 nb->notifier_call = dma_debug_device_change;
455
456 bus_register_notifier(bus, nb);
457}
458
459/*
460 * Let the architectures decide how many entries should be preallocated.
461 */
462void dma_debug_init(u32 num_entries)
463{
464 int i;
465
466 if (global_disable)
467 return;
468
469 for (i = 0; i < HASH_SIZE; ++i) {
470 INIT_LIST_HEAD(&dma_entry_hash[i].list);
471 dma_entry_hash[i].lock = SPIN_LOCK_UNLOCKED;
472 }
473
474 if (dma_debug_fs_init() != 0) {
475 printk(KERN_ERR "DMA-API: error creating debugfs entries "
476 "- disabling\n");
477 global_disable = true;
478
479 return;
480 }
481
482 if (req_entries)
483 num_entries = req_entries;
484
485 if (prealloc_memory(num_entries) != 0) {
486 printk(KERN_ERR "DMA-API: debugging out of memory error "
487 "- disabled\n");
488 global_disable = true;
489
490 return;
491 }
492
493 printk(KERN_INFO "DMA-API: debugging enabled by kernel config\n");
494}
495
496static __init int dma_debug_cmdline(char *str)
497{
498 if (!str)
499 return -EINVAL;
500
501 if (strncmp(str, "off", 3) == 0) {
502 printk(KERN_INFO "DMA-API: debugging disabled on kernel "
503 "command line\n");
504 global_disable = true;
505 }
506
507 return 0;
508}
509
510static __init int dma_debug_entries_cmdline(char *str)
511{
512 int res;
513
514 if (!str)
515 return -EINVAL;
516
517 res = get_option(&str, &req_entries);
518
519 if (!res)
520 req_entries = 0;
521
522 return 0;
523}
524
525__setup("dma_debug=", dma_debug_cmdline);
526__setup("dma_debug_entries=", dma_debug_entries_cmdline);
527
528static void check_unmap(struct dma_debug_entry *ref)
529{
530 struct dma_debug_entry *entry;
531 struct hash_bucket *bucket;
532 unsigned long flags;
533
534 if (dma_mapping_error(ref->dev, ref->dev_addr)) {
535 err_printk(ref->dev, NULL, "DMA-API: device driver tries "
536 "to free an invalid DMA memory address\n");
537 return;
538 }
539
540 bucket = get_hash_bucket(ref, &flags);
541 entry = hash_bucket_find(bucket, ref);
542
543 if (!entry) {
544 err_printk(ref->dev, NULL, "DMA-API: device driver tries "
545 "to free DMA memory it has not allocated "
546 "[device address=0x%016llx] [size=%llu bytes]\n",
547 ref->dev_addr, ref->size);
548 goto out;
549 }
550
551 if (ref->size != entry->size) {
552 err_printk(ref->dev, entry, "DMA-API: device driver frees "
553 "DMA memory with different size "
554 "[device address=0x%016llx] [map size=%llu bytes] "
555 "[unmap size=%llu bytes]\n",
556 ref->dev_addr, entry->size, ref->size);
557 }
558
559 if (ref->type != entry->type) {
560 err_printk(ref->dev, entry, "DMA-API: device driver frees "
561 "DMA memory with wrong function "
562 "[device address=0x%016llx] [size=%llu bytes] "
563 "[mapped as %s] [unmapped as %s]\n",
564 ref->dev_addr, ref->size,
565 type2name[entry->type], type2name[ref->type]);
566 } else if ((entry->type == dma_debug_coherent) &&
567 (ref->paddr != entry->paddr)) {
568 err_printk(ref->dev, entry, "DMA-API: device driver frees "
569 "DMA memory with different CPU address "
570 "[device address=0x%016llx] [size=%llu bytes] "
571 "[cpu alloc address=%p] [cpu free address=%p]",
572 ref->dev_addr, ref->size,
573 (void *)entry->paddr, (void *)ref->paddr);
574 }
575
576 if (ref->sg_call_ents && ref->type == dma_debug_sg &&
577 ref->sg_call_ents != entry->sg_call_ents) {
578 err_printk(ref->dev, entry, "DMA-API: device driver frees "
579 "DMA sg list with different entry count "
580 "[map count=%d] [unmap count=%d]\n",
581 entry->sg_call_ents, ref->sg_call_ents);
582 }
583
584 /*
585 * This may be no bug in reality - but most implementations of the
586 * DMA API don't handle this properly, so check for it here
587 */
588 if (ref->direction != entry->direction) {
589 err_printk(ref->dev, entry, "DMA-API: device driver frees "
590 "DMA memory with different direction "
591 "[device address=0x%016llx] [size=%llu bytes] "
592 "[mapped with %s] [unmapped with %s]\n",
593 ref->dev_addr, ref->size,
594 dir2name[entry->direction],
595 dir2name[ref->direction]);
596 }
597
598 hash_bucket_del(entry);
599 dma_entry_free(entry);
600
601out:
602 put_hash_bucket(bucket, &flags);
603}
604
605static void check_for_stack(struct device *dev, void *addr)
606{
607 if (object_is_on_stack(addr))
608 err_printk(dev, NULL, "DMA-API: device driver maps memory from"
609 "stack [addr=%p]\n", addr);
610}
611
612static inline bool overlap(void *addr, u64 size, void *start, void *end)
613{
614 void *addr2 = (char *)addr + size;
615
616 return ((addr >= start && addr < end) ||
617 (addr2 >= start && addr2 < end) ||
618 ((addr < start) && (addr2 >= end)));
619}
620
621static void check_for_illegal_area(struct device *dev, void *addr, u64 size)
622{
623 if (overlap(addr, size, _text, _etext) ||
624 overlap(addr, size, __start_rodata, __end_rodata))
625 err_printk(dev, NULL, "DMA-API: device driver maps "
626 "memory from kernel text or rodata "
627 "[addr=%p] [size=%llu]\n", addr, size);
628}
629
630static void check_sync(struct device *dev, dma_addr_t addr,
631 u64 size, u64 offset, int direction, bool to_cpu)
632{
633 struct dma_debug_entry ref = {
634 .dev = dev,
635 .dev_addr = addr,
636 .size = size,
637 .direction = direction,
638 };
639 struct dma_debug_entry *entry;
640 struct hash_bucket *bucket;
641 unsigned long flags;
642
643 bucket = get_hash_bucket(&ref, &flags);
644
645 entry = hash_bucket_find(bucket, &ref);
646
647 if (!entry) {
648 err_printk(dev, NULL, "DMA-API: device driver tries "
649 "to sync DMA memory it has not allocated "
650 "[device address=0x%016llx] [size=%llu bytes]\n",
651 (unsigned long long)addr, size);
652 goto out;
653 }
654
655 if ((offset + size) > entry->size) {
656 err_printk(dev, entry, "DMA-API: device driver syncs"
657 " DMA memory outside allocated range "
658 "[device address=0x%016llx] "
659 "[allocation size=%llu bytes] [sync offset=%llu] "
660 "[sync size=%llu]\n", entry->dev_addr, entry->size,
661 offset, size);
662 }
663
664 if (direction != entry->direction) {
665 err_printk(dev, entry, "DMA-API: device driver syncs "
666 "DMA memory with different direction "
667 "[device address=0x%016llx] [size=%llu bytes] "
668 "[mapped with %s] [synced with %s]\n",
669 (unsigned long long)addr, entry->size,
670 dir2name[entry->direction],
671 dir2name[direction]);
672 }
673
674 if (entry->direction == DMA_BIDIRECTIONAL)
675 goto out;
676
677 if (to_cpu && !(entry->direction == DMA_FROM_DEVICE) &&
678 !(direction == DMA_TO_DEVICE))
679 err_printk(dev, entry, "DMA-API: device driver syncs "
680 "device read-only DMA memory for cpu "
681 "[device address=0x%016llx] [size=%llu bytes] "
682 "[mapped with %s] [synced with %s]\n",
683 (unsigned long long)addr, entry->size,
684 dir2name[entry->direction],
685 dir2name[direction]);
686
687 if (!to_cpu && !(entry->direction == DMA_TO_DEVICE) &&
688 !(direction == DMA_FROM_DEVICE))
689 err_printk(dev, entry, "DMA-API: device driver syncs "
690 "device write-only DMA memory to device "
691 "[device address=0x%016llx] [size=%llu bytes] "
692 "[mapped with %s] [synced with %s]\n",
693 (unsigned long long)addr, entry->size,
694 dir2name[entry->direction],
695 dir2name[direction]);
696
697out:
698 put_hash_bucket(bucket, &flags);
699
700}
701
702void debug_dma_map_page(struct device *dev, struct page *page, size_t offset,
703 size_t size, int direction, dma_addr_t dma_addr,
704 bool map_single)
705{
706 struct dma_debug_entry *entry;
707
708 if (unlikely(global_disable))
709 return;
710
711 if (unlikely(dma_mapping_error(dev, dma_addr)))
712 return;
713
714 entry = dma_entry_alloc();
715 if (!entry)
716 return;
717
718 entry->dev = dev;
719 entry->type = dma_debug_page;
720 entry->paddr = page_to_phys(page) + offset;
721 entry->dev_addr = dma_addr;
722 entry->size = size;
723 entry->direction = direction;
724
725 if (map_single)
726 entry->type = dma_debug_single;
727
728 if (!PageHighMem(page)) {
729 void *addr = ((char *)page_address(page)) + offset;
730 check_for_stack(dev, addr);
731 check_for_illegal_area(dev, addr, size);
732 }
733
734 add_dma_entry(entry);
735}
736EXPORT_SYMBOL(debug_dma_map_page);
737
738void debug_dma_unmap_page(struct device *dev, dma_addr_t addr,
739 size_t size, int direction, bool map_single)
740{
741 struct dma_debug_entry ref = {
742 .type = dma_debug_page,
743 .dev = dev,
744 .dev_addr = addr,
745 .size = size,
746 .direction = direction,
747 };
748
749 if (unlikely(global_disable))
750 return;
751
752 if (map_single)
753 ref.type = dma_debug_single;
754
755 check_unmap(&ref);
756}
757EXPORT_SYMBOL(debug_dma_unmap_page);
758
759void debug_dma_map_sg(struct device *dev, struct scatterlist *sg,
760 int nents, int mapped_ents, int direction)
761{
762 struct dma_debug_entry *entry;
763 struct scatterlist *s;
764 int i;
765
766 if (unlikely(global_disable))
767 return;
768
769 for_each_sg(sg, s, mapped_ents, i) {
770 entry = dma_entry_alloc();
771 if (!entry)
772 return;
773
774 entry->type = dma_debug_sg;
775 entry->dev = dev;
776 entry->paddr = sg_phys(s);
777 entry->size = s->length;
778 entry->dev_addr = s->dma_address;
779 entry->direction = direction;
780 entry->sg_call_ents = nents;
781 entry->sg_mapped_ents = mapped_ents;
782
783 if (!PageHighMem(sg_page(s))) {
784 check_for_stack(dev, sg_virt(s));
785 check_for_illegal_area(dev, sg_virt(s), s->length);
786 }
787
788 add_dma_entry(entry);
789 }
790}
791EXPORT_SYMBOL(debug_dma_map_sg);
792
793void debug_dma_unmap_sg(struct device *dev, struct scatterlist *sglist,
794 int nelems, int dir)
795{
796 struct dma_debug_entry *entry;
797 struct scatterlist *s;
798 int mapped_ents = 0, i;
799 unsigned long flags;
800
801 if (unlikely(global_disable))
802 return;
803
804 for_each_sg(sglist, s, nelems, i) {
805
806 struct dma_debug_entry ref = {
807 .type = dma_debug_sg,
808 .dev = dev,
809 .paddr = sg_phys(s),
810 .dev_addr = s->dma_address,
811 .size = s->length,
812 .direction = dir,
813 .sg_call_ents = 0,
814 };
815
816 if (mapped_ents && i >= mapped_ents)
817 break;
818
819 if (mapped_ents == 0) {
820 struct hash_bucket *bucket;
821 ref.sg_call_ents = nelems;
822 bucket = get_hash_bucket(&ref, &flags);
823 entry = hash_bucket_find(bucket, &ref);
824 if (entry)
825 mapped_ents = entry->sg_mapped_ents;
826 put_hash_bucket(bucket, &flags);
827 }
828
829 check_unmap(&ref);
830 }
831}
832EXPORT_SYMBOL(debug_dma_unmap_sg);
833
834void debug_dma_alloc_coherent(struct device *dev, size_t size,
835 dma_addr_t dma_addr, void *virt)
836{
837 struct dma_debug_entry *entry;
838
839 if (unlikely(global_disable))
840 return;
841
842 if (unlikely(virt == NULL))
843 return;
844
845 entry = dma_entry_alloc();
846 if (!entry)
847 return;
848
849 entry->type = dma_debug_coherent;
850 entry->dev = dev;
851 entry->paddr = virt_to_phys(virt);
852 entry->size = size;
853 entry->dev_addr = dma_addr;
854 entry->direction = DMA_BIDIRECTIONAL;
855
856 add_dma_entry(entry);
857}
858EXPORT_SYMBOL(debug_dma_alloc_coherent);
859
860void debug_dma_free_coherent(struct device *dev, size_t size,
861 void *virt, dma_addr_t addr)
862{
863 struct dma_debug_entry ref = {
864 .type = dma_debug_coherent,
865 .dev = dev,
866 .paddr = virt_to_phys(virt),
867 .dev_addr = addr,
868 .size = size,
869 .direction = DMA_BIDIRECTIONAL,
870 };
871
872 if (unlikely(global_disable))
873 return;
874
875 check_unmap(&ref);
876}
877EXPORT_SYMBOL(debug_dma_free_coherent);
878
879void debug_dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle,
880 size_t size, int direction)
881{
882 if (unlikely(global_disable))
883 return;
884
885 check_sync(dev, dma_handle, size, 0, direction, true);
886}
887EXPORT_SYMBOL(debug_dma_sync_single_for_cpu);
888
889void debug_dma_sync_single_for_device(struct device *dev,
890 dma_addr_t dma_handle, size_t size,
891 int direction)
892{
893 if (unlikely(global_disable))
894 return;
895
896 check_sync(dev, dma_handle, size, 0, direction, false);
897}
898EXPORT_SYMBOL(debug_dma_sync_single_for_device);
899
900void debug_dma_sync_single_range_for_cpu(struct device *dev,
901 dma_addr_t dma_handle,
902 unsigned long offset, size_t size,
903 int direction)
904{
905 if (unlikely(global_disable))
906 return;
907
908 check_sync(dev, dma_handle, size, offset, direction, true);
909}
910EXPORT_SYMBOL(debug_dma_sync_single_range_for_cpu);
911
912void debug_dma_sync_single_range_for_device(struct device *dev,
913 dma_addr_t dma_handle,
914 unsigned long offset,
915 size_t size, int direction)
916{
917 if (unlikely(global_disable))
918 return;
919
920 check_sync(dev, dma_handle, size, offset, direction, false);
921}
922EXPORT_SYMBOL(debug_dma_sync_single_range_for_device);
923
924void debug_dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,
925 int nelems, int direction)
926{
927 struct scatterlist *s;
928 int i;
929
930 if (unlikely(global_disable))
931 return;
932
933 for_each_sg(sg, s, nelems, i) {
934 check_sync(dev, s->dma_address, s->dma_length, 0,
935 direction, true);
936 }
937}
938EXPORT_SYMBOL(debug_dma_sync_sg_for_cpu);
939
940void debug_dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
941 int nelems, int direction)
942{
943 struct scatterlist *s;
944 int i;
945
946 if (unlikely(global_disable))
947 return;
948
949 for_each_sg(sg, s, nelems, i) {
950 check_sync(dev, s->dma_address, s->dma_length, 0,
951 direction, false);
952 }
953}
954EXPORT_SYMBOL(debug_dma_sync_sg_for_device);
955
diff --git a/lib/dynamic_debug.c b/lib/dynamic_debug.c
new file mode 100644
index 000000000000..833139ce1e22
--- /dev/null
+++ b/lib/dynamic_debug.c
@@ -0,0 +1,769 @@
1/*
2 * lib/dynamic_debug.c
3 *
4 * make pr_debug()/dev_dbg() calls runtime configurable based upon their
5 * source module.
6 *
7 * Copyright (C) 2008 Jason Baron <jbaron@redhat.com>
8 * By Greg Banks <gnb@melbourne.sgi.com>
9 * Copyright (c) 2008 Silicon Graphics Inc. All Rights Reserved.
10 */
11
12#include <linux/kernel.h>
13#include <linux/module.h>
14#include <linux/moduleparam.h>
15#include <linux/kallsyms.h>
16#include <linux/version.h>
17#include <linux/types.h>
18#include <linux/mutex.h>
19#include <linux/proc_fs.h>
20#include <linux/seq_file.h>
21#include <linux/list.h>
22#include <linux/sysctl.h>
23#include <linux/ctype.h>
24#include <linux/uaccess.h>
25#include <linux/dynamic_debug.h>
26#include <linux/debugfs.h>
27
28extern struct _ddebug __start___verbose[];
29extern struct _ddebug __stop___verbose[];
30
31/* dynamic_debug_enabled, and dynamic_debug_enabled2 are bitmasks in which
32 * bit n is set to 1 if any modname hashes into the bucket n, 0 otherwise. They
33 * use independent hash functions, to reduce the chance of false positives.
34 */
35long long dynamic_debug_enabled;
36EXPORT_SYMBOL_GPL(dynamic_debug_enabled);
37long long dynamic_debug_enabled2;
38EXPORT_SYMBOL_GPL(dynamic_debug_enabled2);
39
40struct ddebug_table {
41 struct list_head link;
42 char *mod_name;
43 unsigned int num_ddebugs;
44 unsigned int num_enabled;
45 struct _ddebug *ddebugs;
46};
47
48struct ddebug_query {
49 const char *filename;
50 const char *module;
51 const char *function;
52 const char *format;
53 unsigned int first_lineno, last_lineno;
54};
55
56struct ddebug_iter {
57 struct ddebug_table *table;
58 unsigned int idx;
59};
60
61static DEFINE_MUTEX(ddebug_lock);
62static LIST_HEAD(ddebug_tables);
63static int verbose = 0;
64
65/* Return the last part of a pathname */
66static inline const char *basename(const char *path)
67{
68 const char *tail = strrchr(path, '/');
69 return tail ? tail+1 : path;
70}
71
72/* format a string into buf[] which describes the _ddebug's flags */
73static char *ddebug_describe_flags(struct _ddebug *dp, char *buf,
74 size_t maxlen)
75{
76 char *p = buf;
77
78 BUG_ON(maxlen < 4);
79 if (dp->flags & _DPRINTK_FLAGS_PRINT)
80 *p++ = 'p';
81 if (p == buf)
82 *p++ = '-';
83 *p = '\0';
84
85 return buf;
86}
87
88/*
89 * must be called with ddebug_lock held
90 */
91
92static int disabled_hash(char hash, bool first_table)
93{
94 struct ddebug_table *dt;
95 char table_hash_value;
96
97 list_for_each_entry(dt, &ddebug_tables, link) {
98 if (first_table)
99 table_hash_value = dt->ddebugs->primary_hash;
100 else
101 table_hash_value = dt->ddebugs->secondary_hash;
102 if (dt->num_enabled && (hash == table_hash_value))
103 return 0;
104 }
105 return 1;
106}
107
108/*
109 * Search the tables for _ddebug's which match the given
110 * `query' and apply the `flags' and `mask' to them. Tells
111 * the user which ddebug's were changed, or whether none
112 * were matched.
113 */
114static void ddebug_change(const struct ddebug_query *query,
115 unsigned int flags, unsigned int mask)
116{
117 int i;
118 struct ddebug_table *dt;
119 unsigned int newflags;
120 unsigned int nfound = 0;
121 char flagbuf[8];
122
123 /* search for matching ddebugs */
124 mutex_lock(&ddebug_lock);
125 list_for_each_entry(dt, &ddebug_tables, link) {
126
127 /* match against the module name */
128 if (query->module != NULL &&
129 strcmp(query->module, dt->mod_name))
130 continue;
131
132 for (i = 0 ; i < dt->num_ddebugs ; i++) {
133 struct _ddebug *dp = &dt->ddebugs[i];
134
135 /* match against the source filename */
136 if (query->filename != NULL &&
137 strcmp(query->filename, dp->filename) &&
138 strcmp(query->filename, basename(dp->filename)))
139 continue;
140
141 /* match against the function */
142 if (query->function != NULL &&
143 strcmp(query->function, dp->function))
144 continue;
145
146 /* match against the format */
147 if (query->format != NULL &&
148 strstr(dp->format, query->format) == NULL)
149 continue;
150
151 /* match against the line number range */
152 if (query->first_lineno &&
153 dp->lineno < query->first_lineno)
154 continue;
155 if (query->last_lineno &&
156 dp->lineno > query->last_lineno)
157 continue;
158
159 nfound++;
160
161 newflags = (dp->flags & mask) | flags;
162 if (newflags == dp->flags)
163 continue;
164
165 if (!newflags)
166 dt->num_enabled--;
167 else if (!dp-flags)
168 dt->num_enabled++;
169 dp->flags = newflags;
170 if (newflags) {
171 dynamic_debug_enabled |=
172 (1LL << dp->primary_hash);
173 dynamic_debug_enabled2 |=
174 (1LL << dp->secondary_hash);
175 } else {
176 if (disabled_hash(dp->primary_hash, true))
177 dynamic_debug_enabled &=
178 ~(1LL << dp->primary_hash);
179 if (disabled_hash(dp->secondary_hash, false))
180 dynamic_debug_enabled2 &=
181 ~(1LL << dp->secondary_hash);
182 }
183 if (verbose)
184 printk(KERN_INFO
185 "ddebug: changed %s:%d [%s]%s %s\n",
186 dp->filename, dp->lineno,
187 dt->mod_name, dp->function,
188 ddebug_describe_flags(dp, flagbuf,
189 sizeof(flagbuf)));
190 }
191 }
192 mutex_unlock(&ddebug_lock);
193
194 if (!nfound && verbose)
195 printk(KERN_INFO "ddebug: no matches for query\n");
196}
197
198/*
199 * Split the buffer `buf' into space-separated words.
200 * Handles simple " and ' quoting, i.e. without nested,
201 * embedded or escaped \". Return the number of words
202 * or <0 on error.
203 */
204static int ddebug_tokenize(char *buf, char *words[], int maxwords)
205{
206 int nwords = 0;
207
208 while (*buf) {
209 char *end;
210
211 /* Skip leading whitespace */
212 while (*buf && isspace(*buf))
213 buf++;
214 if (!*buf)
215 break; /* oh, it was trailing whitespace */
216
217 /* Run `end' over a word, either whitespace separated or quoted */
218 if (*buf == '"' || *buf == '\'') {
219 int quote = *buf++;
220 for (end = buf ; *end && *end != quote ; end++)
221 ;
222 if (!*end)
223 return -EINVAL; /* unclosed quote */
224 } else {
225 for (end = buf ; *end && !isspace(*end) ; end++)
226 ;
227 BUG_ON(end == buf);
228 }
229 /* Here `buf' is the start of the word, `end' is one past the end */
230
231 if (nwords == maxwords)
232 return -EINVAL; /* ran out of words[] before bytes */
233 if (*end)
234 *end++ = '\0'; /* terminate the word */
235 words[nwords++] = buf;
236 buf = end;
237 }
238
239 if (verbose) {
240 int i;
241 printk(KERN_INFO "%s: split into words:", __func__);
242 for (i = 0 ; i < nwords ; i++)
243 printk(" \"%s\"", words[i]);
244 printk("\n");
245 }
246
247 return nwords;
248}
249
250/*
251 * Parse a single line number. Note that the empty string ""
252 * is treated as a special case and converted to zero, which
253 * is later treated as a "don't care" value.
254 */
255static inline int parse_lineno(const char *str, unsigned int *val)
256{
257 char *end = NULL;
258 BUG_ON(str == NULL);
259 if (*str == '\0') {
260 *val = 0;
261 return 0;
262 }
263 *val = simple_strtoul(str, &end, 10);
264 return end == NULL || end == str || *end != '\0' ? -EINVAL : 0;
265}
266
267/*
268 * Undo octal escaping in a string, inplace. This is useful to
269 * allow the user to express a query which matches a format
270 * containing embedded spaces.
271 */
272#define isodigit(c) ((c) >= '0' && (c) <= '7')
273static char *unescape(char *str)
274{
275 char *in = str;
276 char *out = str;
277
278 while (*in) {
279 if (*in == '\\') {
280 if (in[1] == '\\') {
281 *out++ = '\\';
282 in += 2;
283 continue;
284 } else if (in[1] == 't') {
285 *out++ = '\t';
286 in += 2;
287 continue;
288 } else if (in[1] == 'n') {
289 *out++ = '\n';
290 in += 2;
291 continue;
292 } else if (isodigit(in[1]) &&
293 isodigit(in[2]) &&
294 isodigit(in[3])) {
295 *out++ = ((in[1] - '0')<<6) |
296 ((in[2] - '0')<<3) |
297 (in[3] - '0');
298 in += 4;
299 continue;
300 }
301 }
302 *out++ = *in++;
303 }
304 *out = '\0';
305
306 return str;
307}
308
309/*
310 * Parse words[] as a ddebug query specification, which is a series
311 * of (keyword, value) pairs chosen from these possibilities:
312 *
313 * func <function-name>
314 * file <full-pathname>
315 * file <base-filename>
316 * module <module-name>
317 * format <escaped-string-to-find-in-format>
318 * line <lineno>
319 * line <first-lineno>-<last-lineno> // where either may be empty
320 */
321static int ddebug_parse_query(char *words[], int nwords,
322 struct ddebug_query *query)
323{
324 unsigned int i;
325
326 /* check we have an even number of words */
327 if (nwords % 2 != 0)
328 return -EINVAL;
329 memset(query, 0, sizeof(*query));
330
331 for (i = 0 ; i < nwords ; i += 2) {
332 if (!strcmp(words[i], "func"))
333 query->function = words[i+1];
334 else if (!strcmp(words[i], "file"))
335 query->filename = words[i+1];
336 else if (!strcmp(words[i], "module"))
337 query->module = words[i+1];
338 else if (!strcmp(words[i], "format"))
339 query->format = unescape(words[i+1]);
340 else if (!strcmp(words[i], "line")) {
341 char *first = words[i+1];
342 char *last = strchr(first, '-');
343 if (last)
344 *last++ = '\0';
345 if (parse_lineno(first, &query->first_lineno) < 0)
346 return -EINVAL;
347 if (last != NULL) {
348 /* range <first>-<last> */
349 if (parse_lineno(last, &query->last_lineno) < 0)
350 return -EINVAL;
351 } else {
352 query->last_lineno = query->first_lineno;
353 }
354 } else {
355 if (verbose)
356 printk(KERN_ERR "%s: unknown keyword \"%s\"\n",
357 __func__, words[i]);
358 return -EINVAL;
359 }
360 }
361
362 if (verbose)
363 printk(KERN_INFO "%s: q->function=\"%s\" q->filename=\"%s\" "
364 "q->module=\"%s\" q->format=\"%s\" q->lineno=%u-%u\n",
365 __func__, query->function, query->filename,
366 query->module, query->format, query->first_lineno,
367 query->last_lineno);
368
369 return 0;
370}
371
372/*
373 * Parse `str' as a flags specification, format [-+=][p]+.
374 * Sets up *maskp and *flagsp to be used when changing the
375 * flags fields of matched _ddebug's. Returns 0 on success
376 * or <0 on error.
377 */
378static int ddebug_parse_flags(const char *str, unsigned int *flagsp,
379 unsigned int *maskp)
380{
381 unsigned flags = 0;
382 int op = '=';
383
384 switch (*str) {
385 case '+':
386 case '-':
387 case '=':
388 op = *str++;
389 break;
390 default:
391 return -EINVAL;
392 }
393 if (verbose)
394 printk(KERN_INFO "%s: op='%c'\n", __func__, op);
395
396 for ( ; *str ; ++str) {
397 switch (*str) {
398 case 'p':
399 flags |= _DPRINTK_FLAGS_PRINT;
400 break;
401 default:
402 return -EINVAL;
403 }
404 }
405 if (flags == 0)
406 return -EINVAL;
407 if (verbose)
408 printk(KERN_INFO "%s: flags=0x%x\n", __func__, flags);
409
410 /* calculate final *flagsp, *maskp according to mask and op */
411 switch (op) {
412 case '=':
413 *maskp = 0;
414 *flagsp = flags;
415 break;
416 case '+':
417 *maskp = ~0U;
418 *flagsp = flags;
419 break;
420 case '-':
421 *maskp = ~flags;
422 *flagsp = 0;
423 break;
424 }
425 if (verbose)
426 printk(KERN_INFO "%s: *flagsp=0x%x *maskp=0x%x\n",
427 __func__, *flagsp, *maskp);
428 return 0;
429}
430
431/*
432 * File_ops->write method for <debugfs>/dynamic_debug/conrol. Gathers the
433 * command text from userspace, parses and executes it.
434 */
435static ssize_t ddebug_proc_write(struct file *file, const char __user *ubuf,
436 size_t len, loff_t *offp)
437{
438 unsigned int flags = 0, mask = 0;
439 struct ddebug_query query;
440#define MAXWORDS 9
441 int nwords;
442 char *words[MAXWORDS];
443 char tmpbuf[256];
444
445 if (len == 0)
446 return 0;
447 /* we don't check *offp -- multiple writes() are allowed */
448 if (len > sizeof(tmpbuf)-1)
449 return -E2BIG;
450 if (copy_from_user(tmpbuf, ubuf, len))
451 return -EFAULT;
452 tmpbuf[len] = '\0';
453 if (verbose)
454 printk(KERN_INFO "%s: read %d bytes from userspace\n",
455 __func__, (int)len);
456
457 nwords = ddebug_tokenize(tmpbuf, words, MAXWORDS);
458 if (nwords < 0)
459 return -EINVAL;
460 if (ddebug_parse_query(words, nwords-1, &query))
461 return -EINVAL;
462 if (ddebug_parse_flags(words[nwords-1], &flags, &mask))
463 return -EINVAL;
464
465 /* actually go and implement the change */
466 ddebug_change(&query, flags, mask);
467
468 *offp += len;
469 return len;
470}
471
472/*
473 * Set the iterator to point to the first _ddebug object
474 * and return a pointer to that first object. Returns
475 * NULL if there are no _ddebugs at all.
476 */
477static struct _ddebug *ddebug_iter_first(struct ddebug_iter *iter)
478{
479 if (list_empty(&ddebug_tables)) {
480 iter->table = NULL;
481 iter->idx = 0;
482 return NULL;
483 }
484 iter->table = list_entry(ddebug_tables.next,
485 struct ddebug_table, link);
486 iter->idx = 0;
487 return &iter->table->ddebugs[iter->idx];
488}
489
490/*
491 * Advance the iterator to point to the next _ddebug
492 * object from the one the iterator currently points at,
493 * and returns a pointer to the new _ddebug. Returns
494 * NULL if the iterator has seen all the _ddebugs.
495 */
496static struct _ddebug *ddebug_iter_next(struct ddebug_iter *iter)
497{
498 if (iter->table == NULL)
499 return NULL;
500 if (++iter->idx == iter->table->num_ddebugs) {
501 /* iterate to next table */
502 iter->idx = 0;
503 if (list_is_last(&iter->table->link, &ddebug_tables)) {
504 iter->table = NULL;
505 return NULL;
506 }
507 iter->table = list_entry(iter->table->link.next,
508 struct ddebug_table, link);
509 }
510 return &iter->table->ddebugs[iter->idx];
511}
512
513/*
514 * Seq_ops start method. Called at the start of every
515 * read() call from userspace. Takes the ddebug_lock and
516 * seeks the seq_file's iterator to the given position.
517 */
518static void *ddebug_proc_start(struct seq_file *m, loff_t *pos)
519{
520 struct ddebug_iter *iter = m->private;
521 struct _ddebug *dp;
522 int n = *pos;
523
524 if (verbose)
525 printk(KERN_INFO "%s: called m=%p *pos=%lld\n",
526 __func__, m, (unsigned long long)*pos);
527
528 mutex_lock(&ddebug_lock);
529
530 if (!n)
531 return SEQ_START_TOKEN;
532 if (n < 0)
533 return NULL;
534 dp = ddebug_iter_first(iter);
535 while (dp != NULL && --n > 0)
536 dp = ddebug_iter_next(iter);
537 return dp;
538}
539
540/*
541 * Seq_ops next method. Called several times within a read()
542 * call from userspace, with ddebug_lock held. Walks to the
543 * next _ddebug object with a special case for the header line.
544 */
545static void *ddebug_proc_next(struct seq_file *m, void *p, loff_t *pos)
546{
547 struct ddebug_iter *iter = m->private;
548 struct _ddebug *dp;
549
550 if (verbose)
551 printk(KERN_INFO "%s: called m=%p p=%p *pos=%lld\n",
552 __func__, m, p, (unsigned long long)*pos);
553
554 if (p == SEQ_START_TOKEN)
555 dp = ddebug_iter_first(iter);
556 else
557 dp = ddebug_iter_next(iter);
558 ++*pos;
559 return dp;
560}
561
562/*
563 * Seq_ops show method. Called several times within a read()
564 * call from userspace, with ddebug_lock held. Formats the
565 * current _ddebug as a single human-readable line, with a
566 * special case for the header line.
567 */
568static int ddebug_proc_show(struct seq_file *m, void *p)
569{
570 struct ddebug_iter *iter = m->private;
571 struct _ddebug *dp = p;
572 char flagsbuf[8];
573
574 if (verbose)
575 printk(KERN_INFO "%s: called m=%p p=%p\n",
576 __func__, m, p);
577
578 if (p == SEQ_START_TOKEN) {
579 seq_puts(m,
580 "# filename:lineno [module]function flags format\n");
581 return 0;
582 }
583
584 seq_printf(m, "%s:%u [%s]%s %s \"",
585 dp->filename, dp->lineno,
586 iter->table->mod_name, dp->function,
587 ddebug_describe_flags(dp, flagsbuf, sizeof(flagsbuf)));
588 seq_escape(m, dp->format, "\t\r\n\"");
589 seq_puts(m, "\"\n");
590
591 return 0;
592}
593
594/*
595 * Seq_ops stop method. Called at the end of each read()
596 * call from userspace. Drops ddebug_lock.
597 */
598static void ddebug_proc_stop(struct seq_file *m, void *p)
599{
600 if (verbose)
601 printk(KERN_INFO "%s: called m=%p p=%p\n",
602 __func__, m, p);
603 mutex_unlock(&ddebug_lock);
604}
605
606static const struct seq_operations ddebug_proc_seqops = {
607 .start = ddebug_proc_start,
608 .next = ddebug_proc_next,
609 .show = ddebug_proc_show,
610 .stop = ddebug_proc_stop
611};
612
613/*
614 * File_ops->open method for <debugfs>/dynamic_debug/control. Does the seq_file
615 * setup dance, and also creates an iterator to walk the _ddebugs.
616 * Note that we create a seq_file always, even for O_WRONLY files
617 * where it's not needed, as doing so simplifies the ->release method.
618 */
619static int ddebug_proc_open(struct inode *inode, struct file *file)
620{
621 struct ddebug_iter *iter;
622 int err;
623
624 if (verbose)
625 printk(KERN_INFO "%s: called\n", __func__);
626
627 iter = kzalloc(sizeof(*iter), GFP_KERNEL);
628 if (iter == NULL)
629 return -ENOMEM;
630
631 err = seq_open(file, &ddebug_proc_seqops);
632 if (err) {
633 kfree(iter);
634 return err;
635 }
636 ((struct seq_file *) file->private_data)->private = iter;
637 return 0;
638}
639
640static const struct file_operations ddebug_proc_fops = {
641 .owner = THIS_MODULE,
642 .open = ddebug_proc_open,
643 .read = seq_read,
644 .llseek = seq_lseek,
645 .release = seq_release_private,
646 .write = ddebug_proc_write
647};
648
649/*
650 * Allocate a new ddebug_table for the given module
651 * and add it to the global list.
652 */
653int ddebug_add_module(struct _ddebug *tab, unsigned int n,
654 const char *name)
655{
656 struct ddebug_table *dt;
657 char *new_name;
658
659 dt = kzalloc(sizeof(*dt), GFP_KERNEL);
660 if (dt == NULL)
661 return -ENOMEM;
662 new_name = kstrdup(name, GFP_KERNEL);
663 if (new_name == NULL) {
664 kfree(dt);
665 return -ENOMEM;
666 }
667 dt->mod_name = new_name;
668 dt->num_ddebugs = n;
669 dt->num_enabled = 0;
670 dt->ddebugs = tab;
671
672 mutex_lock(&ddebug_lock);
673 list_add_tail(&dt->link, &ddebug_tables);
674 mutex_unlock(&ddebug_lock);
675
676 if (verbose)
677 printk(KERN_INFO "%u debug prints in module %s\n",
678 n, dt->mod_name);
679 return 0;
680}
681EXPORT_SYMBOL_GPL(ddebug_add_module);
682
683static void ddebug_table_free(struct ddebug_table *dt)
684{
685 list_del_init(&dt->link);
686 kfree(dt->mod_name);
687 kfree(dt);
688}
689
690/*
691 * Called in response to a module being unloaded. Removes
692 * any ddebug_table's which point at the module.
693 */
694int ddebug_remove_module(char *mod_name)
695{
696 struct ddebug_table *dt, *nextdt;
697 int ret = -ENOENT;
698
699 if (verbose)
700 printk(KERN_INFO "%s: removing module \"%s\"\n",
701 __func__, mod_name);
702
703 mutex_lock(&ddebug_lock);
704 list_for_each_entry_safe(dt, nextdt, &ddebug_tables, link) {
705 if (!strcmp(dt->mod_name, mod_name)) {
706 ddebug_table_free(dt);
707 ret = 0;
708 }
709 }
710 mutex_unlock(&ddebug_lock);
711 return ret;
712}
713EXPORT_SYMBOL_GPL(ddebug_remove_module);
714
715static void ddebug_remove_all_tables(void)
716{
717 mutex_lock(&ddebug_lock);
718 while (!list_empty(&ddebug_tables)) {
719 struct ddebug_table *dt = list_entry(ddebug_tables.next,
720 struct ddebug_table,
721 link);
722 ddebug_table_free(dt);
723 }
724 mutex_unlock(&ddebug_lock);
725}
726
727static int __init dynamic_debug_init(void)
728{
729 struct dentry *dir, *file;
730 struct _ddebug *iter, *iter_start;
731 const char *modname = NULL;
732 int ret = 0;
733 int n = 0;
734
735 dir = debugfs_create_dir("dynamic_debug", NULL);
736 if (!dir)
737 return -ENOMEM;
738 file = debugfs_create_file("control", 0644, dir, NULL,
739 &ddebug_proc_fops);
740 if (!file) {
741 debugfs_remove(dir);
742 return -ENOMEM;
743 }
744 if (__start___verbose != __stop___verbose) {
745 iter = __start___verbose;
746 modname = iter->modname;
747 iter_start = iter;
748 for (; iter < __stop___verbose; iter++) {
749 if (strcmp(modname, iter->modname)) {
750 ret = ddebug_add_module(iter_start, n, modname);
751 if (ret)
752 goto out_free;
753 n = 0;
754 modname = iter->modname;
755 iter_start = iter;
756 }
757 n++;
758 }
759 ret = ddebug_add_module(iter_start, n, modname);
760 }
761out_free:
762 if (ret) {
763 ddebug_remove_all_tables();
764 debugfs_remove(dir);
765 debugfs_remove(file);
766 }
767 return 0;
768}
769module_init(dynamic_debug_init);
diff --git a/lib/dynamic_printk.c b/lib/dynamic_printk.c
deleted file mode 100644
index 165a19763dc9..000000000000
--- a/lib/dynamic_printk.c
+++ /dev/null
@@ -1,414 +0,0 @@
1/*
2 * lib/dynamic_printk.c
3 *
4 * make pr_debug()/dev_dbg() calls runtime configurable based upon their
5 * their source module.
6 *
7 * Copyright (C) 2008 Red Hat, Inc., Jason Baron <jbaron@redhat.com>
8 */
9
10#include <linux/kernel.h>
11#include <linux/module.h>
12#include <linux/uaccess.h>
13#include <linux/seq_file.h>
14#include <linux/debugfs.h>
15#include <linux/fs.h>
16
17extern struct mod_debug __start___verbose[];
18extern struct mod_debug __stop___verbose[];
19
20struct debug_name {
21 struct hlist_node hlist;
22 struct hlist_node hlist2;
23 int hash1;
24 int hash2;
25 char *name;
26 int enable;
27 int type;
28};
29
30static int nr_entries;
31static int num_enabled;
32int dynamic_enabled = DYNAMIC_ENABLED_NONE;
33static struct hlist_head module_table[DEBUG_HASH_TABLE_SIZE] =
34 { [0 ... DEBUG_HASH_TABLE_SIZE-1] = HLIST_HEAD_INIT };
35static struct hlist_head module_table2[DEBUG_HASH_TABLE_SIZE] =
36 { [0 ... DEBUG_HASH_TABLE_SIZE-1] = HLIST_HEAD_INIT };
37static DECLARE_MUTEX(debug_list_mutex);
38
39/* dynamic_printk_enabled, and dynamic_printk_enabled2 are bitmasks in which
40 * bit n is set to 1 if any modname hashes into the bucket n, 0 otherwise. They
41 * use independent hash functions, to reduce the chance of false positives.
42 */
43long long dynamic_printk_enabled;
44EXPORT_SYMBOL_GPL(dynamic_printk_enabled);
45long long dynamic_printk_enabled2;
46EXPORT_SYMBOL_GPL(dynamic_printk_enabled2);
47
48/* returns the debug module pointer. */
49static struct debug_name *find_debug_module(char *module_name)
50{
51 int i;
52 struct hlist_head *head;
53 struct hlist_node *node;
54 struct debug_name *element;
55
56 element = NULL;
57 for (i = 0; i < DEBUG_HASH_TABLE_SIZE; i++) {
58 head = &module_table[i];
59 hlist_for_each_entry_rcu(element, node, head, hlist)
60 if (!strcmp(element->name, module_name))
61 return element;
62 }
63 return NULL;
64}
65
66/* returns the debug module pointer. */
67static struct debug_name *find_debug_module_hash(char *module_name, int hash)
68{
69 struct hlist_head *head;
70 struct hlist_node *node;
71 struct debug_name *element;
72
73 element = NULL;
74 head = &module_table[hash];
75 hlist_for_each_entry_rcu(element, node, head, hlist)
76 if (!strcmp(element->name, module_name))
77 return element;
78 return NULL;
79}
80
81/* caller must hold mutex*/
82static int __add_debug_module(char *mod_name, int hash, int hash2)
83{
84 struct debug_name *new;
85 char *module_name;
86 int ret = 0;
87
88 if (find_debug_module(mod_name)) {
89 ret = -EINVAL;
90 goto out;
91 }
92 module_name = kmalloc(strlen(mod_name) + 1, GFP_KERNEL);
93 if (!module_name) {
94 ret = -ENOMEM;
95 goto out;
96 }
97 module_name = strcpy(module_name, mod_name);
98 module_name[strlen(mod_name)] = '\0';
99 new = kzalloc(sizeof(struct debug_name), GFP_KERNEL);
100 if (!new) {
101 kfree(module_name);
102 ret = -ENOMEM;
103 goto out;
104 }
105 INIT_HLIST_NODE(&new->hlist);
106 INIT_HLIST_NODE(&new->hlist2);
107 new->name = module_name;
108 new->hash1 = hash;
109 new->hash2 = hash2;
110 hlist_add_head_rcu(&new->hlist, &module_table[hash]);
111 hlist_add_head_rcu(&new->hlist2, &module_table2[hash2]);
112 nr_entries++;
113out:
114 return ret;
115}
116
117int unregister_dynamic_debug_module(char *mod_name)
118{
119 struct debug_name *element;
120 int ret = 0;
121
122 down(&debug_list_mutex);
123 element = find_debug_module(mod_name);
124 if (!element) {
125 ret = -EINVAL;
126 goto out;
127 }
128 hlist_del_rcu(&element->hlist);
129 hlist_del_rcu(&element->hlist2);
130 synchronize_rcu();
131 kfree(element->name);
132 if (element->enable)
133 num_enabled--;
134 kfree(element);
135 nr_entries--;
136out:
137 up(&debug_list_mutex);
138 return ret;
139}
140EXPORT_SYMBOL_GPL(unregister_dynamic_debug_module);
141
142int register_dynamic_debug_module(char *mod_name, int type, char *share_name,
143 char *flags, int hash, int hash2)
144{
145 struct debug_name *elem;
146 int ret = 0;
147
148 down(&debug_list_mutex);
149 elem = find_debug_module(mod_name);
150 if (!elem) {
151 if (__add_debug_module(mod_name, hash, hash2))
152 goto out;
153 elem = find_debug_module(mod_name);
154 if (dynamic_enabled == DYNAMIC_ENABLED_ALL &&
155 !strcmp(mod_name, share_name)) {
156 elem->enable = true;
157 num_enabled++;
158 }
159 }
160 elem->type |= type;
161out:
162 up(&debug_list_mutex);
163 return ret;
164}
165EXPORT_SYMBOL_GPL(register_dynamic_debug_module);
166
167int __dynamic_dbg_enabled_helper(char *mod_name, int type, int value, int hash)
168{
169 struct debug_name *elem;
170 int ret = 0;
171
172 if (dynamic_enabled == DYNAMIC_ENABLED_ALL)
173 return 1;
174 rcu_read_lock();
175 elem = find_debug_module_hash(mod_name, hash);
176 if (elem && elem->enable)
177 ret = 1;
178 rcu_read_unlock();
179 return ret;
180}
181EXPORT_SYMBOL_GPL(__dynamic_dbg_enabled_helper);
182
183static void set_all(bool enable)
184{
185 struct debug_name *e;
186 struct hlist_node *node;
187 int i;
188 long long enable_mask;
189
190 for (i = 0; i < DEBUG_HASH_TABLE_SIZE; i++) {
191 if (module_table[i].first != NULL) {
192 hlist_for_each_entry(e, node, &module_table[i], hlist) {
193 e->enable = enable;
194 }
195 }
196 }
197 if (enable)
198 enable_mask = ULLONG_MAX;
199 else
200 enable_mask = 0;
201 dynamic_printk_enabled = enable_mask;
202 dynamic_printk_enabled2 = enable_mask;
203}
204
205static int disabled_hash(int i, bool first_table)
206{
207 struct debug_name *e;
208 struct hlist_node *node;
209
210 if (first_table) {
211 hlist_for_each_entry(e, node, &module_table[i], hlist) {
212 if (e->enable)
213 return 0;
214 }
215 } else {
216 hlist_for_each_entry(e, node, &module_table2[i], hlist2) {
217 if (e->enable)
218 return 0;
219 }
220 }
221 return 1;
222}
223
224static ssize_t pr_debug_write(struct file *file, const char __user *buf,
225 size_t length, loff_t *ppos)
226{
227 char *buffer, *s, *value_str, *setting_str;
228 int err, value;
229 struct debug_name *elem = NULL;
230 int all = 0;
231
232 if (length > PAGE_SIZE || length < 0)
233 return -EINVAL;
234
235 buffer = (char *)__get_free_page(GFP_KERNEL);
236 if (!buffer)
237 return -ENOMEM;
238
239 err = -EFAULT;
240 if (copy_from_user(buffer, buf, length))
241 goto out;
242
243 err = -EINVAL;
244 if (length < PAGE_SIZE)
245 buffer[length] = '\0';
246 else if (buffer[PAGE_SIZE-1])
247 goto out;
248
249 err = -EINVAL;
250 down(&debug_list_mutex);
251
252 if (strncmp("set", buffer, 3))
253 goto out_up;
254 s = buffer + 3;
255 setting_str = strsep(&s, "=");
256 if (s == NULL)
257 goto out_up;
258 setting_str = strstrip(setting_str);
259 value_str = strsep(&s, " ");
260 if (s == NULL)
261 goto out_up;
262 s = strstrip(s);
263 if (!strncmp(s, "all", 3))
264 all = 1;
265 else
266 elem = find_debug_module(s);
267 if (!strncmp(setting_str, "enable", 6)) {
268 value = !!simple_strtol(value_str, NULL, 10);
269 if (all) {
270 if (value) {
271 set_all(true);
272 num_enabled = nr_entries;
273 dynamic_enabled = DYNAMIC_ENABLED_ALL;
274 } else {
275 set_all(false);
276 num_enabled = 0;
277 dynamic_enabled = DYNAMIC_ENABLED_NONE;
278 }
279 err = 0;
280 } else if (elem) {
281 if (value && (elem->enable == 0)) {
282 dynamic_printk_enabled |= (1LL << elem->hash1);
283 dynamic_printk_enabled2 |= (1LL << elem->hash2);
284 elem->enable = 1;
285 num_enabled++;
286 dynamic_enabled = DYNAMIC_ENABLED_SOME;
287 err = 0;
288 printk(KERN_DEBUG
289 "debugging enabled for module %s\n",
290 elem->name);
291 } else if (!value && (elem->enable == 1)) {
292 elem->enable = 0;
293 num_enabled--;
294 if (disabled_hash(elem->hash1, true))
295 dynamic_printk_enabled &=
296 ~(1LL << elem->hash1);
297 if (disabled_hash(elem->hash2, false))
298 dynamic_printk_enabled2 &=
299 ~(1LL << elem->hash2);
300 if (num_enabled)
301 dynamic_enabled = DYNAMIC_ENABLED_SOME;
302 else
303 dynamic_enabled = DYNAMIC_ENABLED_NONE;
304 err = 0;
305 printk(KERN_DEBUG
306 "debugging disabled for module %s\n",
307 elem->name);
308 }
309 }
310 }
311 if (!err)
312 err = length;
313out_up:
314 up(&debug_list_mutex);
315out:
316 free_page((unsigned long)buffer);
317 return err;
318}
319
320static void *pr_debug_seq_start(struct seq_file *f, loff_t *pos)
321{
322 return (*pos < DEBUG_HASH_TABLE_SIZE) ? pos : NULL;
323}
324
325static void *pr_debug_seq_next(struct seq_file *s, void *v, loff_t *pos)
326{
327 (*pos)++;
328 if (*pos >= DEBUG_HASH_TABLE_SIZE)
329 return NULL;
330 return pos;
331}
332
333static void pr_debug_seq_stop(struct seq_file *s, void *v)
334{
335 /* Nothing to do */
336}
337
338static int pr_debug_seq_show(struct seq_file *s, void *v)
339{
340 struct hlist_head *head;
341 struct hlist_node *node;
342 struct debug_name *elem;
343 unsigned int i = *(loff_t *) v;
344
345 rcu_read_lock();
346 head = &module_table[i];
347 hlist_for_each_entry_rcu(elem, node, head, hlist) {
348 seq_printf(s, "%s enabled=%d", elem->name, elem->enable);
349 seq_printf(s, "\n");
350 }
351 rcu_read_unlock();
352 return 0;
353}
354
355static struct seq_operations pr_debug_seq_ops = {
356 .start = pr_debug_seq_start,
357 .next = pr_debug_seq_next,
358 .stop = pr_debug_seq_stop,
359 .show = pr_debug_seq_show
360};
361
362static int pr_debug_open(struct inode *inode, struct file *filp)
363{
364 return seq_open(filp, &pr_debug_seq_ops);
365}
366
367static const struct file_operations pr_debug_operations = {
368 .open = pr_debug_open,
369 .read = seq_read,
370 .write = pr_debug_write,
371 .llseek = seq_lseek,
372 .release = seq_release,
373};
374
375static int __init dynamic_printk_init(void)
376{
377 struct dentry *dir, *file;
378 struct mod_debug *iter;
379 unsigned long value;
380
381 dir = debugfs_create_dir("dynamic_printk", NULL);
382 if (!dir)
383 return -ENOMEM;
384 file = debugfs_create_file("modules", 0644, dir, NULL,
385 &pr_debug_operations);
386 if (!file) {
387 debugfs_remove(dir);
388 return -ENOMEM;
389 }
390 for (value = (unsigned long)__start___verbose;
391 value < (unsigned long)__stop___verbose;
392 value += sizeof(struct mod_debug)) {
393 iter = (struct mod_debug *)value;
394 register_dynamic_debug_module(iter->modname,
395 iter->type,
396 iter->logical_modname,
397 iter->flag_names, iter->hash, iter->hash2);
398 }
399 if (dynamic_enabled == DYNAMIC_ENABLED_ALL)
400 set_all(true);
401 return 0;
402}
403module_init(dynamic_printk_init);
404/* may want to move this earlier so we can get traces as early as possible */
405
406static int __init dynamic_printk_setup(char *str)
407{
408 if (str)
409 return -ENOENT;
410 dynamic_enabled = DYNAMIC_ENABLED_ALL;
411 return 0;
412}
413/* Use early_param(), so we can get debug output as early as possible */
414early_param("dynamic_printk", dynamic_printk_setup);
diff --git a/lib/idr.c b/lib/idr.c
index c11c5765cdef..80ca9aca038b 100644
--- a/lib/idr.c
+++ b/lib/idr.c
@@ -449,6 +449,7 @@ void idr_remove_all(struct idr *idp)
449 449
450 n = idp->layers * IDR_BITS; 450 n = idp->layers * IDR_BITS;
451 p = idp->top; 451 p = idp->top;
452 rcu_assign_pointer(idp->top, NULL);
452 max = 1 << n; 453 max = 1 << n;
453 454
454 id = 0; 455 id = 0;
@@ -467,7 +468,6 @@ void idr_remove_all(struct idr *idp)
467 p = *--paa; 468 p = *--paa;
468 } 469 }
469 } 470 }
470 rcu_assign_pointer(idp->top, NULL);
471 idp->layers = 0; 471 idp->layers = 0;
472} 472}
473EXPORT_SYMBOL(idr_remove_all); 473EXPORT_SYMBOL(idr_remove_all);
@@ -579,6 +579,52 @@ int idr_for_each(struct idr *idp,
579EXPORT_SYMBOL(idr_for_each); 579EXPORT_SYMBOL(idr_for_each);
580 580
581/** 581/**
582 * idr_get_next - lookup next object of id to given id.
583 * @idp: idr handle
584 * @id: pointer to lookup key
585 *
586 * Returns pointer to registered object with id, which is next number to
587 * given id.
588 */
589
590void *idr_get_next(struct idr *idp, int *nextidp)
591{
592 struct idr_layer *p, *pa[MAX_LEVEL];
593 struct idr_layer **paa = &pa[0];
594 int id = *nextidp;
595 int n, max;
596
597 /* find first ent */
598 n = idp->layers * IDR_BITS;
599 max = 1 << n;
600 p = rcu_dereference(idp->top);
601 if (!p)
602 return NULL;
603
604 while (id < max) {
605 while (n > 0 && p) {
606 n -= IDR_BITS;
607 *paa++ = p;
608 p = rcu_dereference(p->ary[(id >> n) & IDR_MASK]);
609 }
610
611 if (p) {
612 *nextidp = id;
613 return p;
614 }
615
616 id += 1 << n;
617 while (n < fls(id)) {
618 n += IDR_BITS;
619 p = *--paa;
620 }
621 }
622 return NULL;
623}
624
625
626
627/**
582 * idr_replace - replace pointer for given id 628 * idr_replace - replace pointer for given id
583 * @idp: idr handle 629 * @idp: idr handle
584 * @ptr: pointer you want associated with the id 630 * @ptr: pointer you want associated with the id
diff --git a/lib/kernel_lock.c b/lib/kernel_lock.c
index 01a3c22c1b5a..39f1029e3525 100644
--- a/lib/kernel_lock.c
+++ b/lib/kernel_lock.c
@@ -39,7 +39,7 @@ static __cacheline_aligned_in_smp DEFINE_SPINLOCK(kernel_flag);
39int __lockfunc __reacquire_kernel_lock(void) 39int __lockfunc __reacquire_kernel_lock(void)
40{ 40{
41 while (!_raw_spin_trylock(&kernel_flag)) { 41 while (!_raw_spin_trylock(&kernel_flag)) {
42 if (test_thread_flag(TIF_NEED_RESCHED)) 42 if (need_resched())
43 return -EAGAIN; 43 return -EAGAIN;
44 cpu_relax(); 44 cpu_relax();
45 } 45 }
diff --git a/lib/kobject.c b/lib/kobject.c
index 0487d1f64806..a6dec32f2ddd 100644
--- a/lib/kobject.c
+++ b/lib/kobject.c
@@ -212,7 +212,7 @@ static int kobject_add_internal(struct kobject *kobj)
212 * @fmt: format string used to build the name 212 * @fmt: format string used to build the name
213 * @vargs: vargs to format the string. 213 * @vargs: vargs to format the string.
214 */ 214 */
215static int kobject_set_name_vargs(struct kobject *kobj, const char *fmt, 215int kobject_set_name_vargs(struct kobject *kobj, const char *fmt,
216 va_list vargs) 216 va_list vargs)
217{ 217{
218 const char *old_name = kobj->name; 218 const char *old_name = kobj->name;
diff --git a/lib/kobject_uevent.c b/lib/kobject_uevent.c
index 318328ddbd1c..97a777ad4f59 100644
--- a/lib/kobject_uevent.c
+++ b/lib/kobject_uevent.c
@@ -118,6 +118,13 @@ int kobject_uevent_env(struct kobject *kobj, enum kobject_action action,
118 kset = top_kobj->kset; 118 kset = top_kobj->kset;
119 uevent_ops = kset->uevent_ops; 119 uevent_ops = kset->uevent_ops;
120 120
121 /* skip the event, if uevent_suppress is set*/
122 if (kobj->uevent_suppress) {
123 pr_debug("kobject: '%s' (%p): %s: uevent_suppress "
124 "caused the event to drop!\n",
125 kobject_name(kobj), kobj, __func__);
126 return 0;
127 }
121 /* skip the event, if the filter returns zero. */ 128 /* skip the event, if the filter returns zero. */
122 if (uevent_ops && uevent_ops->filter) 129 if (uevent_ops && uevent_ops->filter)
123 if (!uevent_ops->filter(kset, kobj)) { 130 if (!uevent_ops->filter(kset, kobj)) {
@@ -227,6 +234,9 @@ int kobject_uevent_env(struct kobject *kobj, enum kobject_action action,
227 NETLINK_CB(skb).dst_group = 1; 234 NETLINK_CB(skb).dst_group = 1;
228 retval = netlink_broadcast(uevent_sock, skb, 0, 1, 235 retval = netlink_broadcast(uevent_sock, skb, 0, 1,
229 GFP_KERNEL); 236 GFP_KERNEL);
237 /* ENOBUFS should be handled in userspace */
238 if (retval == -ENOBUFS)
239 retval = 0;
230 } else 240 } else
231 retval = -ENOMEM; 241 retval = -ENOMEM;
232 } 242 }
@@ -248,7 +258,7 @@ int kobject_uevent_env(struct kobject *kobj, enum kobject_action action,
248 goto exit; 258 goto exit;
249 259
250 retval = call_usermodehelper(argv[0], argv, 260 retval = call_usermodehelper(argv[0], argv,
251 env->envp, UMH_WAIT_EXEC); 261 env->envp, UMH_NO_WAIT);
252 } 262 }
253 263
254exit: 264exit:
diff --git a/lib/lmb.c b/lib/lmb.c
index 97e547037084..e4a6482d8b26 100644
--- a/lib/lmb.c
+++ b/lib/lmb.c
@@ -29,33 +29,33 @@ static int __init early_lmb(char *p)
29} 29}
30early_param("lmb", early_lmb); 30early_param("lmb", early_lmb);
31 31
32void lmb_dump_all(void) 32static void lmb_dump(struct lmb_region *region, char *name)
33{ 33{
34 unsigned long i; 34 unsigned long long base, size;
35 int i;
36
37 pr_info(" %s.cnt = 0x%lx\n", name, region->cnt);
38
39 for (i = 0; i < region->cnt; i++) {
40 base = region->region[i].base;
41 size = region->region[i].size;
42
43 pr_info(" %s[0x%x]\t0x%016llx - 0x%016llx, 0x%llx bytes\n",
44 name, i, base, base + size - 1, size);
45 }
46}
35 47
48void lmb_dump_all(void)
49{
36 if (!lmb_debug) 50 if (!lmb_debug)
37 return; 51 return;
38 52
39 pr_info("lmb_dump_all:\n"); 53 pr_info("LMB configuration:\n");
40 pr_info(" memory.cnt = 0x%lx\n", lmb.memory.cnt); 54 pr_info(" rmo_size = 0x%llx\n", (unsigned long long)lmb.rmo_size);
41 pr_info(" memory.size = 0x%llx\n", 55 pr_info(" memory.size = 0x%llx\n", (unsigned long long)lmb.memory.size);
42 (unsigned long long)lmb.memory.size);
43 for (i=0; i < lmb.memory.cnt ;i++) {
44 pr_info(" memory.region[0x%lx].base = 0x%llx\n",
45 i, (unsigned long long)lmb.memory.region[i].base);
46 pr_info(" .size = 0x%llx\n",
47 (unsigned long long)lmb.memory.region[i].size);
48 }
49 56
50 pr_info(" reserved.cnt = 0x%lx\n", lmb.reserved.cnt); 57 lmb_dump(&lmb.memory, "memory");
51 pr_info(" reserved.size = 0x%llx\n", 58 lmb_dump(&lmb.reserved, "reserved");
52 (unsigned long long)lmb.memory.size);
53 for (i=0; i < lmb.reserved.cnt ;i++) {
54 pr_info(" reserved.region[0x%lx].base = 0x%llx\n",
55 i, (unsigned long long)lmb.reserved.region[i].base);
56 pr_info(" .size = 0x%llx\n",
57 (unsigned long long)lmb.reserved.region[i].size);
58 }
59} 59}
60 60
61static unsigned long lmb_addrs_overlap(u64 base1, u64 size1, u64 base2, 61static unsigned long lmb_addrs_overlap(u64 base1, u64 size1, u64 base2,
diff --git a/lib/locking-selftest.c b/lib/locking-selftest.c
index 280332c1827c..619313ed6c46 100644
--- a/lib/locking-selftest.c
+++ b/lib/locking-selftest.c
@@ -157,11 +157,11 @@ static void init_shared_classes(void)
157#define SOFTIRQ_ENTER() \ 157#define SOFTIRQ_ENTER() \
158 local_bh_disable(); \ 158 local_bh_disable(); \
159 local_irq_disable(); \ 159 local_irq_disable(); \
160 trace_softirq_enter(); \ 160 lockdep_softirq_enter(); \
161 WARN_ON(!in_softirq()); 161 WARN_ON(!in_softirq());
162 162
163#define SOFTIRQ_EXIT() \ 163#define SOFTIRQ_EXIT() \
164 trace_softirq_exit(); \ 164 lockdep_softirq_exit(); \
165 local_irq_enable(); \ 165 local_irq_enable(); \
166 local_bh_enable(); 166 local_bh_enable();
167 167
diff --git a/lib/nlattr.c b/lib/nlattr.c
new file mode 100644
index 000000000000..c4706eb98d3d
--- /dev/null
+++ b/lib/nlattr.c
@@ -0,0 +1,502 @@
1/*
2 * NETLINK Netlink attributes
3 *
4 * Authors: Thomas Graf <tgraf@suug.ch>
5 * Alexey Kuznetsov <kuznet@ms2.inr.ac.ru>
6 */
7
8#include <linux/module.h>
9#include <linux/kernel.h>
10#include <linux/errno.h>
11#include <linux/jiffies.h>
12#include <linux/netdevice.h>
13#include <linux/skbuff.h>
14#include <linux/string.h>
15#include <linux/types.h>
16#include <net/netlink.h>
17
18static u16 nla_attr_minlen[NLA_TYPE_MAX+1] __read_mostly = {
19 [NLA_U8] = sizeof(u8),
20 [NLA_U16] = sizeof(u16),
21 [NLA_U32] = sizeof(u32),
22 [NLA_U64] = sizeof(u64),
23 [NLA_NESTED] = NLA_HDRLEN,
24};
25
26static int validate_nla(struct nlattr *nla, int maxtype,
27 const struct nla_policy *policy)
28{
29 const struct nla_policy *pt;
30 int minlen = 0, attrlen = nla_len(nla), type = nla_type(nla);
31
32 if (type <= 0 || type > maxtype)
33 return 0;
34
35 pt = &policy[type];
36
37 BUG_ON(pt->type > NLA_TYPE_MAX);
38
39 switch (pt->type) {
40 case NLA_FLAG:
41 if (attrlen > 0)
42 return -ERANGE;
43 break;
44
45 case NLA_NUL_STRING:
46 if (pt->len)
47 minlen = min_t(int, attrlen, pt->len + 1);
48 else
49 minlen = attrlen;
50
51 if (!minlen || memchr(nla_data(nla), '\0', minlen) == NULL)
52 return -EINVAL;
53 /* fall through */
54
55 case NLA_STRING:
56 if (attrlen < 1)
57 return -ERANGE;
58
59 if (pt->len) {
60 char *buf = nla_data(nla);
61
62 if (buf[attrlen - 1] == '\0')
63 attrlen--;
64
65 if (attrlen > pt->len)
66 return -ERANGE;
67 }
68 break;
69
70 case NLA_BINARY:
71 if (pt->len && attrlen > pt->len)
72 return -ERANGE;
73 break;
74
75 case NLA_NESTED_COMPAT:
76 if (attrlen < pt->len)
77 return -ERANGE;
78 if (attrlen < NLA_ALIGN(pt->len))
79 break;
80 if (attrlen < NLA_ALIGN(pt->len) + NLA_HDRLEN)
81 return -ERANGE;
82 nla = nla_data(nla) + NLA_ALIGN(pt->len);
83 if (attrlen < NLA_ALIGN(pt->len) + NLA_HDRLEN + nla_len(nla))
84 return -ERANGE;
85 break;
86 case NLA_NESTED:
87 /* a nested attributes is allowed to be empty; if its not,
88 * it must have a size of at least NLA_HDRLEN.
89 */
90 if (attrlen == 0)
91 break;
92 default:
93 if (pt->len)
94 minlen = pt->len;
95 else if (pt->type != NLA_UNSPEC)
96 minlen = nla_attr_minlen[pt->type];
97
98 if (attrlen < minlen)
99 return -ERANGE;
100 }
101
102 return 0;
103}
104
105/**
106 * nla_validate - Validate a stream of attributes
107 * @head: head of attribute stream
108 * @len: length of attribute stream
109 * @maxtype: maximum attribute type to be expected
110 * @policy: validation policy
111 *
112 * Validates all attributes in the specified attribute stream against the
113 * specified policy. Attributes with a type exceeding maxtype will be
114 * ignored. See documenation of struct nla_policy for more details.
115 *
116 * Returns 0 on success or a negative error code.
117 */
118int nla_validate(struct nlattr *head, int len, int maxtype,
119 const struct nla_policy *policy)
120{
121 struct nlattr *nla;
122 int rem, err;
123
124 nla_for_each_attr(nla, head, len, rem) {
125 err = validate_nla(nla, maxtype, policy);
126 if (err < 0)
127 goto errout;
128 }
129
130 err = 0;
131errout:
132 return err;
133}
134
135/**
136 * nla_policy_len - Determin the max. length of a policy
137 * @policy: policy to use
138 * @n: number of policies
139 *
140 * Determines the max. length of the policy. It is currently used
141 * to allocated Netlink buffers roughly the size of the actual
142 * message.
143 *
144 * Returns 0 on success or a negative error code.
145 */
146int
147nla_policy_len(const struct nla_policy *p, int n)
148{
149 int i, len = 0;
150
151 for (i = 0; i < n; i++) {
152 if (p->len)
153 len += nla_total_size(p->len);
154 else if (nla_attr_minlen[p->type])
155 len += nla_total_size(nla_attr_minlen[p->type]);
156 }
157
158 return len;
159}
160
161/**
162 * nla_parse - Parse a stream of attributes into a tb buffer
163 * @tb: destination array with maxtype+1 elements
164 * @maxtype: maximum attribute type to be expected
165 * @head: head of attribute stream
166 * @len: length of attribute stream
167 * @policy: validation policy
168 *
169 * Parses a stream of attributes and stores a pointer to each attribute in
170 * the tb array accessable via the attribute type. Attributes with a type
171 * exceeding maxtype will be silently ignored for backwards compatibility
172 * reasons. policy may be set to NULL if no validation is required.
173 *
174 * Returns 0 on success or a negative error code.
175 */
176int nla_parse(struct nlattr *tb[], int maxtype, struct nlattr *head, int len,
177 const struct nla_policy *policy)
178{
179 struct nlattr *nla;
180 int rem, err;
181
182 memset(tb, 0, sizeof(struct nlattr *) * (maxtype + 1));
183
184 nla_for_each_attr(nla, head, len, rem) {
185 u16 type = nla_type(nla);
186
187 if (type > 0 && type <= maxtype) {
188 if (policy) {
189 err = validate_nla(nla, maxtype, policy);
190 if (err < 0)
191 goto errout;
192 }
193
194 tb[type] = nla;
195 }
196 }
197
198 if (unlikely(rem > 0))
199 printk(KERN_WARNING "netlink: %d bytes leftover after parsing "
200 "attributes.\n", rem);
201
202 err = 0;
203errout:
204 return err;
205}
206
207/**
208 * nla_find - Find a specific attribute in a stream of attributes
209 * @head: head of attribute stream
210 * @len: length of attribute stream
211 * @attrtype: type of attribute to look for
212 *
213 * Returns the first attribute in the stream matching the specified type.
214 */
215struct nlattr *nla_find(struct nlattr *head, int len, int attrtype)
216{
217 struct nlattr *nla;
218 int rem;
219
220 nla_for_each_attr(nla, head, len, rem)
221 if (nla_type(nla) == attrtype)
222 return nla;
223
224 return NULL;
225}
226
227/**
228 * nla_strlcpy - Copy string attribute payload into a sized buffer
229 * @dst: where to copy the string to
230 * @nla: attribute to copy the string from
231 * @dstsize: size of destination buffer
232 *
233 * Copies at most dstsize - 1 bytes into the destination buffer.
234 * The result is always a valid NUL-terminated string. Unlike
235 * strlcpy the destination buffer is always padded out.
236 *
237 * Returns the length of the source buffer.
238 */
239size_t nla_strlcpy(char *dst, const struct nlattr *nla, size_t dstsize)
240{
241 size_t srclen = nla_len(nla);
242 char *src = nla_data(nla);
243
244 if (srclen > 0 && src[srclen - 1] == '\0')
245 srclen--;
246
247 if (dstsize > 0) {
248 size_t len = (srclen >= dstsize) ? dstsize - 1 : srclen;
249
250 memset(dst, 0, dstsize);
251 memcpy(dst, src, len);
252 }
253
254 return srclen;
255}
256
257/**
258 * nla_memcpy - Copy a netlink attribute into another memory area
259 * @dest: where to copy to memcpy
260 * @src: netlink attribute to copy from
261 * @count: size of the destination area
262 *
263 * Note: The number of bytes copied is limited by the length of
264 * attribute's payload. memcpy
265 *
266 * Returns the number of bytes copied.
267 */
268int nla_memcpy(void *dest, const struct nlattr *src, int count)
269{
270 int minlen = min_t(int, count, nla_len(src));
271
272 memcpy(dest, nla_data(src), minlen);
273
274 return minlen;
275}
276
277/**
278 * nla_memcmp - Compare an attribute with sized memory area
279 * @nla: netlink attribute
280 * @data: memory area
281 * @size: size of memory area
282 */
283int nla_memcmp(const struct nlattr *nla, const void *data,
284 size_t size)
285{
286 int d = nla_len(nla) - size;
287
288 if (d == 0)
289 d = memcmp(nla_data(nla), data, size);
290
291 return d;
292}
293
294/**
295 * nla_strcmp - Compare a string attribute against a string
296 * @nla: netlink string attribute
297 * @str: another string
298 */
299int nla_strcmp(const struct nlattr *nla, const char *str)
300{
301 int len = strlen(str) + 1;
302 int d = nla_len(nla) - len;
303
304 if (d == 0)
305 d = memcmp(nla_data(nla), str, len);
306
307 return d;
308}
309
310#ifdef CONFIG_NET
311/**
312 * __nla_reserve - reserve room for attribute on the skb
313 * @skb: socket buffer to reserve room on
314 * @attrtype: attribute type
315 * @attrlen: length of attribute payload
316 *
317 * Adds a netlink attribute header to a socket buffer and reserves
318 * room for the payload but does not copy it.
319 *
320 * The caller is responsible to ensure that the skb provides enough
321 * tailroom for the attribute header and payload.
322 */
323struct nlattr *__nla_reserve(struct sk_buff *skb, int attrtype, int attrlen)
324{
325 struct nlattr *nla;
326
327 nla = (struct nlattr *) skb_put(skb, nla_total_size(attrlen));
328 nla->nla_type = attrtype;
329 nla->nla_len = nla_attr_size(attrlen);
330
331 memset((unsigned char *) nla + nla->nla_len, 0, nla_padlen(attrlen));
332
333 return nla;
334}
335EXPORT_SYMBOL(__nla_reserve);
336
337/**
338 * __nla_reserve_nohdr - reserve room for attribute without header
339 * @skb: socket buffer to reserve room on
340 * @attrlen: length of attribute payload
341 *
342 * Reserves room for attribute payload without a header.
343 *
344 * The caller is responsible to ensure that the skb provides enough
345 * tailroom for the payload.
346 */
347void *__nla_reserve_nohdr(struct sk_buff *skb, int attrlen)
348{
349 void *start;
350
351 start = skb_put(skb, NLA_ALIGN(attrlen));
352 memset(start, 0, NLA_ALIGN(attrlen));
353
354 return start;
355}
356EXPORT_SYMBOL(__nla_reserve_nohdr);
357
358/**
359 * nla_reserve - reserve room for attribute on the skb
360 * @skb: socket buffer to reserve room on
361 * @attrtype: attribute type
362 * @attrlen: length of attribute payload
363 *
364 * Adds a netlink attribute header to a socket buffer and reserves
365 * room for the payload but does not copy it.
366 *
367 * Returns NULL if the tailroom of the skb is insufficient to store
368 * the attribute header and payload.
369 */
370struct nlattr *nla_reserve(struct sk_buff *skb, int attrtype, int attrlen)
371{
372 if (unlikely(skb_tailroom(skb) < nla_total_size(attrlen)))
373 return NULL;
374
375 return __nla_reserve(skb, attrtype, attrlen);
376}
377EXPORT_SYMBOL(nla_reserve);
378
379/**
380 * nla_reserve_nohdr - reserve room for attribute without header
381 * @skb: socket buffer to reserve room on
382 * @attrlen: length of attribute payload
383 *
384 * Reserves room for attribute payload without a header.
385 *
386 * Returns NULL if the tailroom of the skb is insufficient to store
387 * the attribute payload.
388 */
389void *nla_reserve_nohdr(struct sk_buff *skb, int attrlen)
390{
391 if (unlikely(skb_tailroom(skb) < NLA_ALIGN(attrlen)))
392 return NULL;
393
394 return __nla_reserve_nohdr(skb, attrlen);
395}
396EXPORT_SYMBOL(nla_reserve_nohdr);
397
398/**
399 * __nla_put - Add a netlink attribute to a socket buffer
400 * @skb: socket buffer to add attribute to
401 * @attrtype: attribute type
402 * @attrlen: length of attribute payload
403 * @data: head of attribute payload
404 *
405 * The caller is responsible to ensure that the skb provides enough
406 * tailroom for the attribute header and payload.
407 */
408void __nla_put(struct sk_buff *skb, int attrtype, int attrlen,
409 const void *data)
410{
411 struct nlattr *nla;
412
413 nla = __nla_reserve(skb, attrtype, attrlen);
414 memcpy(nla_data(nla), data, attrlen);
415}
416EXPORT_SYMBOL(__nla_put);
417
418/**
419 * __nla_put_nohdr - Add a netlink attribute without header
420 * @skb: socket buffer to add attribute to
421 * @attrlen: length of attribute payload
422 * @data: head of attribute payload
423 *
424 * The caller is responsible to ensure that the skb provides enough
425 * tailroom for the attribute payload.
426 */
427void __nla_put_nohdr(struct sk_buff *skb, int attrlen, const void *data)
428{
429 void *start;
430
431 start = __nla_reserve_nohdr(skb, attrlen);
432 memcpy(start, data, attrlen);
433}
434EXPORT_SYMBOL(__nla_put_nohdr);
435
436/**
437 * nla_put - Add a netlink attribute to a socket buffer
438 * @skb: socket buffer to add attribute to
439 * @attrtype: attribute type
440 * @attrlen: length of attribute payload
441 * @data: head of attribute payload
442 *
443 * Returns -EMSGSIZE if the tailroom of the skb is insufficient to store
444 * the attribute header and payload.
445 */
446int nla_put(struct sk_buff *skb, int attrtype, int attrlen, const void *data)
447{
448 if (unlikely(skb_tailroom(skb) < nla_total_size(attrlen)))
449 return -EMSGSIZE;
450
451 __nla_put(skb, attrtype, attrlen, data);
452 return 0;
453}
454EXPORT_SYMBOL(nla_put);
455
456/**
457 * nla_put_nohdr - Add a netlink attribute without header
458 * @skb: socket buffer to add attribute to
459 * @attrlen: length of attribute payload
460 * @data: head of attribute payload
461 *
462 * Returns -EMSGSIZE if the tailroom of the skb is insufficient to store
463 * the attribute payload.
464 */
465int nla_put_nohdr(struct sk_buff *skb, int attrlen, const void *data)
466{
467 if (unlikely(skb_tailroom(skb) < NLA_ALIGN(attrlen)))
468 return -EMSGSIZE;
469
470 __nla_put_nohdr(skb, attrlen, data);
471 return 0;
472}
473EXPORT_SYMBOL(nla_put_nohdr);
474
475/**
476 * nla_append - Add a netlink attribute without header or padding
477 * @skb: socket buffer to add attribute to
478 * @attrlen: length of attribute payload
479 * @data: head of attribute payload
480 *
481 * Returns -EMSGSIZE if the tailroom of the skb is insufficient to store
482 * the attribute payload.
483 */
484int nla_append(struct sk_buff *skb, int attrlen, const void *data)
485{
486 if (unlikely(skb_tailroom(skb) < NLA_ALIGN(attrlen)))
487 return -EMSGSIZE;
488
489 memcpy(skb_put(skb, attrlen), data, attrlen);
490 return 0;
491}
492EXPORT_SYMBOL(nla_append);
493#endif
494
495EXPORT_SYMBOL(nla_validate);
496EXPORT_SYMBOL(nla_policy_len);
497EXPORT_SYMBOL(nla_parse);
498EXPORT_SYMBOL(nla_find);
499EXPORT_SYMBOL(nla_strlcpy);
500EXPORT_SYMBOL(nla_memcpy);
501EXPORT_SYMBOL(nla_memcmp);
502EXPORT_SYMBOL(nla_strcmp);
diff --git a/lib/rbtree.c b/lib/rbtree.c
index 9956b99649f0..f653659e0bc1 100644
--- a/lib/rbtree.c
+++ b/lib/rbtree.c
@@ -163,17 +163,14 @@ static void __rb_erase_color(struct rb_node *node, struct rb_node *parent,
163 { 163 {
164 if (!other->rb_right || rb_is_black(other->rb_right)) 164 if (!other->rb_right || rb_is_black(other->rb_right))
165 { 165 {
166 struct rb_node *o_left; 166 rb_set_black(other->rb_left);
167 if ((o_left = other->rb_left))
168 rb_set_black(o_left);
169 rb_set_red(other); 167 rb_set_red(other);
170 __rb_rotate_right(other, root); 168 __rb_rotate_right(other, root);
171 other = parent->rb_right; 169 other = parent->rb_right;
172 } 170 }
173 rb_set_color(other, rb_color(parent)); 171 rb_set_color(other, rb_color(parent));
174 rb_set_black(parent); 172 rb_set_black(parent);
175 if (other->rb_right) 173 rb_set_black(other->rb_right);
176 rb_set_black(other->rb_right);
177 __rb_rotate_left(parent, root); 174 __rb_rotate_left(parent, root);
178 node = root->rb_node; 175 node = root->rb_node;
179 break; 176 break;
@@ -200,17 +197,14 @@ static void __rb_erase_color(struct rb_node *node, struct rb_node *parent,
200 { 197 {
201 if (!other->rb_left || rb_is_black(other->rb_left)) 198 if (!other->rb_left || rb_is_black(other->rb_left))
202 { 199 {
203 register struct rb_node *o_right; 200 rb_set_black(other->rb_right);
204 if ((o_right = other->rb_right))
205 rb_set_black(o_right);
206 rb_set_red(other); 201 rb_set_red(other);
207 __rb_rotate_left(other, root); 202 __rb_rotate_left(other, root);
208 other = parent->rb_left; 203 other = parent->rb_left;
209 } 204 }
210 rb_set_color(other, rb_color(parent)); 205 rb_set_color(other, rb_color(parent));
211 rb_set_black(parent); 206 rb_set_black(parent);
212 if (other->rb_left) 207 rb_set_black(other->rb_left);
213 rb_set_black(other->rb_left);
214 __rb_rotate_right(parent, root); 208 __rb_rotate_right(parent, root);
215 node = root->rb_node; 209 node = root->rb_node;
216 break; 210 break;
diff --git a/lib/swiotlb.c b/lib/swiotlb.c
index 1f991acc2a05..32e2bd3b1142 100644
--- a/lib/swiotlb.c
+++ b/lib/swiotlb.c
@@ -145,7 +145,7 @@ static void *swiotlb_bus_to_virt(dma_addr_t address)
145 return phys_to_virt(swiotlb_bus_to_phys(address)); 145 return phys_to_virt(swiotlb_bus_to_phys(address));
146} 146}
147 147
148int __weak swiotlb_arch_range_needs_mapping(void *ptr, size_t size) 148int __weak swiotlb_arch_range_needs_mapping(phys_addr_t paddr, size_t size)
149{ 149{
150 return 0; 150 return 0;
151} 151}
@@ -315,9 +315,9 @@ address_needs_mapping(struct device *hwdev, dma_addr_t addr, size_t size)
315 return !is_buffer_dma_capable(dma_get_mask(hwdev), addr, size); 315 return !is_buffer_dma_capable(dma_get_mask(hwdev), addr, size);
316} 316}
317 317
318static inline int range_needs_mapping(void *ptr, size_t size) 318static inline int range_needs_mapping(phys_addr_t paddr, size_t size)
319{ 319{
320 return swiotlb_force || swiotlb_arch_range_needs_mapping(ptr, size); 320 return swiotlb_force || swiotlb_arch_range_needs_mapping(paddr, size);
321} 321}
322 322
323static int is_swiotlb_buffer(char *addr) 323static int is_swiotlb_buffer(char *addr)
@@ -636,11 +636,14 @@ swiotlb_full(struct device *dev, size_t size, int dir, int do_panic)
636 * Once the device is given the dma address, the device owns this memory until 636 * Once the device is given the dma address, the device owns this memory until
637 * either swiotlb_unmap_single or swiotlb_dma_sync_single is performed. 637 * either swiotlb_unmap_single or swiotlb_dma_sync_single is performed.
638 */ 638 */
639dma_addr_t 639dma_addr_t swiotlb_map_page(struct device *dev, struct page *page,
640swiotlb_map_single_attrs(struct device *hwdev, void *ptr, size_t size, 640 unsigned long offset, size_t size,
641 int dir, struct dma_attrs *attrs) 641 enum dma_data_direction dir,
642{ 642 struct dma_attrs *attrs)
643 dma_addr_t dev_addr = swiotlb_virt_to_bus(hwdev, ptr); 643{
644 phys_addr_t phys = page_to_phys(page) + offset;
645 void *ptr = page_address(page) + offset;
646 dma_addr_t dev_addr = swiotlb_phys_to_bus(dev, phys);
644 void *map; 647 void *map;
645 648
646 BUG_ON(dir == DMA_NONE); 649 BUG_ON(dir == DMA_NONE);
@@ -649,37 +652,30 @@ swiotlb_map_single_attrs(struct device *hwdev, void *ptr, size_t size,
649 * we can safely return the device addr and not worry about bounce 652 * we can safely return the device addr and not worry about bounce
650 * buffering it. 653 * buffering it.
651 */ 654 */
652 if (!address_needs_mapping(hwdev, dev_addr, size) && 655 if (!address_needs_mapping(dev, dev_addr, size) &&
653 !range_needs_mapping(ptr, size)) 656 !range_needs_mapping(virt_to_phys(ptr), size))
654 return dev_addr; 657 return dev_addr;
655 658
656 /* 659 /*
657 * Oh well, have to allocate and map a bounce buffer. 660 * Oh well, have to allocate and map a bounce buffer.
658 */ 661 */
659 map = map_single(hwdev, virt_to_phys(ptr), size, dir); 662 map = map_single(dev, phys, size, dir);
660 if (!map) { 663 if (!map) {
661 swiotlb_full(hwdev, size, dir, 1); 664 swiotlb_full(dev, size, dir, 1);
662 map = io_tlb_overflow_buffer; 665 map = io_tlb_overflow_buffer;
663 } 666 }
664 667
665 dev_addr = swiotlb_virt_to_bus(hwdev, map); 668 dev_addr = swiotlb_virt_to_bus(dev, map);
666 669
667 /* 670 /*
668 * Ensure that the address returned is DMA'ble 671 * Ensure that the address returned is DMA'ble
669 */ 672 */
670 if (address_needs_mapping(hwdev, dev_addr, size)) 673 if (address_needs_mapping(dev, dev_addr, size))
671 panic("map_single: bounce buffer is not DMA'ble"); 674 panic("map_single: bounce buffer is not DMA'ble");
672 675
673 return dev_addr; 676 return dev_addr;
674} 677}
675EXPORT_SYMBOL(swiotlb_map_single_attrs); 678EXPORT_SYMBOL_GPL(swiotlb_map_page);
676
677dma_addr_t
678swiotlb_map_single(struct device *hwdev, void *ptr, size_t size, int dir)
679{
680 return swiotlb_map_single_attrs(hwdev, ptr, size, dir, NULL);
681}
682EXPORT_SYMBOL(swiotlb_map_single);
683 679
684/* 680/*
685 * Unmap a single streaming mode DMA translation. The dma_addr and size must 681 * Unmap a single streaming mode DMA translation. The dma_addr and size must
@@ -689,9 +685,9 @@ EXPORT_SYMBOL(swiotlb_map_single);
689 * After this call, reads by the cpu to the buffer are guaranteed to see 685 * After this call, reads by the cpu to the buffer are guaranteed to see
690 * whatever the device wrote there. 686 * whatever the device wrote there.
691 */ 687 */
692void 688void swiotlb_unmap_page(struct device *hwdev, dma_addr_t dev_addr,
693swiotlb_unmap_single_attrs(struct device *hwdev, dma_addr_t dev_addr, 689 size_t size, enum dma_data_direction dir,
694 size_t size, int dir, struct dma_attrs *attrs) 690 struct dma_attrs *attrs)
695{ 691{
696 char *dma_addr = swiotlb_bus_to_virt(dev_addr); 692 char *dma_addr = swiotlb_bus_to_virt(dev_addr);
697 693
@@ -701,15 +697,7 @@ swiotlb_unmap_single_attrs(struct device *hwdev, dma_addr_t dev_addr,
701 else if (dir == DMA_FROM_DEVICE) 697 else if (dir == DMA_FROM_DEVICE)
702 dma_mark_clean(dma_addr, size); 698 dma_mark_clean(dma_addr, size);
703} 699}
704EXPORT_SYMBOL(swiotlb_unmap_single_attrs); 700EXPORT_SYMBOL_GPL(swiotlb_unmap_page);
705
706void
707swiotlb_unmap_single(struct device *hwdev, dma_addr_t dev_addr, size_t size,
708 int dir)
709{
710 return swiotlb_unmap_single_attrs(hwdev, dev_addr, size, dir, NULL);
711}
712EXPORT_SYMBOL(swiotlb_unmap_single);
713 701
714/* 702/*
715 * Make physical memory consistent for a single streaming mode DMA translation 703 * Make physical memory consistent for a single streaming mode DMA translation
@@ -736,7 +724,7 @@ swiotlb_sync_single(struct device *hwdev, dma_addr_t dev_addr,
736 724
737void 725void
738swiotlb_sync_single_for_cpu(struct device *hwdev, dma_addr_t dev_addr, 726swiotlb_sync_single_for_cpu(struct device *hwdev, dma_addr_t dev_addr,
739 size_t size, int dir) 727 size_t size, enum dma_data_direction dir)
740{ 728{
741 swiotlb_sync_single(hwdev, dev_addr, size, dir, SYNC_FOR_CPU); 729 swiotlb_sync_single(hwdev, dev_addr, size, dir, SYNC_FOR_CPU);
742} 730}
@@ -744,7 +732,7 @@ EXPORT_SYMBOL(swiotlb_sync_single_for_cpu);
744 732
745void 733void
746swiotlb_sync_single_for_device(struct device *hwdev, dma_addr_t dev_addr, 734swiotlb_sync_single_for_device(struct device *hwdev, dma_addr_t dev_addr,
747 size_t size, int dir) 735 size_t size, enum dma_data_direction dir)
748{ 736{
749 swiotlb_sync_single(hwdev, dev_addr, size, dir, SYNC_FOR_DEVICE); 737 swiotlb_sync_single(hwdev, dev_addr, size, dir, SYNC_FOR_DEVICE);
750} 738}
@@ -769,7 +757,8 @@ swiotlb_sync_single_range(struct device *hwdev, dma_addr_t dev_addr,
769 757
770void 758void
771swiotlb_sync_single_range_for_cpu(struct device *hwdev, dma_addr_t dev_addr, 759swiotlb_sync_single_range_for_cpu(struct device *hwdev, dma_addr_t dev_addr,
772 unsigned long offset, size_t size, int dir) 760 unsigned long offset, size_t size,
761 enum dma_data_direction dir)
773{ 762{
774 swiotlb_sync_single_range(hwdev, dev_addr, offset, size, dir, 763 swiotlb_sync_single_range(hwdev, dev_addr, offset, size, dir,
775 SYNC_FOR_CPU); 764 SYNC_FOR_CPU);
@@ -778,7 +767,8 @@ EXPORT_SYMBOL_GPL(swiotlb_sync_single_range_for_cpu);
778 767
779void 768void
780swiotlb_sync_single_range_for_device(struct device *hwdev, dma_addr_t dev_addr, 769swiotlb_sync_single_range_for_device(struct device *hwdev, dma_addr_t dev_addr,
781 unsigned long offset, size_t size, int dir) 770 unsigned long offset, size_t size,
771 enum dma_data_direction dir)
782{ 772{
783 swiotlb_sync_single_range(hwdev, dev_addr, offset, size, dir, 773 swiotlb_sync_single_range(hwdev, dev_addr, offset, size, dir,
784 SYNC_FOR_DEVICE); 774 SYNC_FOR_DEVICE);
@@ -803,7 +793,7 @@ EXPORT_SYMBOL_GPL(swiotlb_sync_single_range_for_device);
803 */ 793 */
804int 794int
805swiotlb_map_sg_attrs(struct device *hwdev, struct scatterlist *sgl, int nelems, 795swiotlb_map_sg_attrs(struct device *hwdev, struct scatterlist *sgl, int nelems,
806 int dir, struct dma_attrs *attrs) 796 enum dma_data_direction dir, struct dma_attrs *attrs)
807{ 797{
808 struct scatterlist *sg; 798 struct scatterlist *sg;
809 int i; 799 int i;
@@ -811,10 +801,10 @@ swiotlb_map_sg_attrs(struct device *hwdev, struct scatterlist *sgl, int nelems,
811 BUG_ON(dir == DMA_NONE); 801 BUG_ON(dir == DMA_NONE);
812 802
813 for_each_sg(sgl, sg, nelems, i) { 803 for_each_sg(sgl, sg, nelems, i) {
814 void *addr = sg_virt(sg); 804 phys_addr_t paddr = sg_phys(sg);
815 dma_addr_t dev_addr = swiotlb_virt_to_bus(hwdev, addr); 805 dma_addr_t dev_addr = swiotlb_phys_to_bus(hwdev, paddr);
816 806
817 if (range_needs_mapping(addr, sg->length) || 807 if (range_needs_mapping(paddr, sg->length) ||
818 address_needs_mapping(hwdev, dev_addr, sg->length)) { 808 address_needs_mapping(hwdev, dev_addr, sg->length)) {
819 void *map = map_single(hwdev, sg_phys(sg), 809 void *map = map_single(hwdev, sg_phys(sg),
820 sg->length, dir); 810 sg->length, dir);
@@ -850,7 +840,7 @@ EXPORT_SYMBOL(swiotlb_map_sg);
850 */ 840 */
851void 841void
852swiotlb_unmap_sg_attrs(struct device *hwdev, struct scatterlist *sgl, 842swiotlb_unmap_sg_attrs(struct device *hwdev, struct scatterlist *sgl,
853 int nelems, int dir, struct dma_attrs *attrs) 843 int nelems, enum dma_data_direction dir, struct dma_attrs *attrs)
854{ 844{
855 struct scatterlist *sg; 845 struct scatterlist *sg;
856 int i; 846 int i;
@@ -858,11 +848,11 @@ swiotlb_unmap_sg_attrs(struct device *hwdev, struct scatterlist *sgl,
858 BUG_ON(dir == DMA_NONE); 848 BUG_ON(dir == DMA_NONE);
859 849
860 for_each_sg(sgl, sg, nelems, i) { 850 for_each_sg(sgl, sg, nelems, i) {
861 if (sg->dma_address != swiotlb_virt_to_bus(hwdev, sg_virt(sg))) 851 if (sg->dma_address != swiotlb_phys_to_bus(hwdev, sg_phys(sg)))
862 unmap_single(hwdev, swiotlb_bus_to_virt(sg->dma_address), 852 unmap_single(hwdev, swiotlb_bus_to_virt(sg->dma_address),
863 sg->dma_length, dir); 853 sg->dma_length, dir);
864 else if (dir == DMA_FROM_DEVICE) 854 else if (dir == DMA_FROM_DEVICE)
865 dma_mark_clean(sg_virt(sg), sg->dma_length); 855 dma_mark_clean(swiotlb_bus_to_virt(sg->dma_address), sg->dma_length);
866 } 856 }
867} 857}
868EXPORT_SYMBOL(swiotlb_unmap_sg_attrs); 858EXPORT_SYMBOL(swiotlb_unmap_sg_attrs);
@@ -892,17 +882,17 @@ swiotlb_sync_sg(struct device *hwdev, struct scatterlist *sgl,
892 BUG_ON(dir == DMA_NONE); 882 BUG_ON(dir == DMA_NONE);
893 883
894 for_each_sg(sgl, sg, nelems, i) { 884 for_each_sg(sgl, sg, nelems, i) {
895 if (sg->dma_address != swiotlb_virt_to_bus(hwdev, sg_virt(sg))) 885 if (sg->dma_address != swiotlb_phys_to_bus(hwdev, sg_phys(sg)))
896 sync_single(hwdev, swiotlb_bus_to_virt(sg->dma_address), 886 sync_single(hwdev, swiotlb_bus_to_virt(sg->dma_address),
897 sg->dma_length, dir, target); 887 sg->dma_length, dir, target);
898 else if (dir == DMA_FROM_DEVICE) 888 else if (dir == DMA_FROM_DEVICE)
899 dma_mark_clean(sg_virt(sg), sg->dma_length); 889 dma_mark_clean(swiotlb_bus_to_virt(sg->dma_address), sg->dma_length);
900 } 890 }
901} 891}
902 892
903void 893void
904swiotlb_sync_sg_for_cpu(struct device *hwdev, struct scatterlist *sg, 894swiotlb_sync_sg_for_cpu(struct device *hwdev, struct scatterlist *sg,
905 int nelems, int dir) 895 int nelems, enum dma_data_direction dir)
906{ 896{
907 swiotlb_sync_sg(hwdev, sg, nelems, dir, SYNC_FOR_CPU); 897 swiotlb_sync_sg(hwdev, sg, nelems, dir, SYNC_FOR_CPU);
908} 898}
@@ -910,7 +900,7 @@ EXPORT_SYMBOL(swiotlb_sync_sg_for_cpu);
910 900
911void 901void
912swiotlb_sync_sg_for_device(struct device *hwdev, struct scatterlist *sg, 902swiotlb_sync_sg_for_device(struct device *hwdev, struct scatterlist *sg,
913 int nelems, int dir) 903 int nelems, enum dma_data_direction dir)
914{ 904{
915 swiotlb_sync_sg(hwdev, sg, nelems, dir, SYNC_FOR_DEVICE); 905 swiotlb_sync_sg(hwdev, sg, nelems, dir, SYNC_FOR_DEVICE);
916} 906}
diff --git a/lib/vsprintf.c b/lib/vsprintf.c
index 0fbd0121d91d..be3001f912e4 100644
--- a/lib/vsprintf.c
+++ b/lib/vsprintf.c
@@ -396,7 +396,38 @@ static noinline char* put_dec(char *buf, unsigned long long num)
396#define SMALL 32 /* Must be 32 == 0x20 */ 396#define SMALL 32 /* Must be 32 == 0x20 */
397#define SPECIAL 64 /* 0x */ 397#define SPECIAL 64 /* 0x */
398 398
399static char *number(char *buf, char *end, unsigned long long num, int base, int size, int precision, int type) 399enum format_type {
400 FORMAT_TYPE_NONE, /* Just a string part */
401 FORMAT_TYPE_WIDTH,
402 FORMAT_TYPE_PRECISION,
403 FORMAT_TYPE_CHAR,
404 FORMAT_TYPE_STR,
405 FORMAT_TYPE_PTR,
406 FORMAT_TYPE_PERCENT_CHAR,
407 FORMAT_TYPE_INVALID,
408 FORMAT_TYPE_LONG_LONG,
409 FORMAT_TYPE_ULONG,
410 FORMAT_TYPE_LONG,
411 FORMAT_TYPE_USHORT,
412 FORMAT_TYPE_SHORT,
413 FORMAT_TYPE_UINT,
414 FORMAT_TYPE_INT,
415 FORMAT_TYPE_NRCHARS,
416 FORMAT_TYPE_SIZE_T,
417 FORMAT_TYPE_PTRDIFF
418};
419
420struct printf_spec {
421 enum format_type type;
422 int flags; /* flags to number() */
423 int field_width; /* width of output field */
424 int base;
425 int precision; /* # of digits/chars */
426 int qualifier;
427};
428
429static char *number(char *buf, char *end, unsigned long long num,
430 struct printf_spec spec)
400{ 431{
401 /* we are called with base 8, 10 or 16, only, thus don't need "G..." */ 432 /* we are called with base 8, 10 or 16, only, thus don't need "G..." */
402 static const char digits[16] = "0123456789ABCDEF"; /* "GHIJKLMNOPQRSTUVWXYZ"; */ 433 static const char digits[16] = "0123456789ABCDEF"; /* "GHIJKLMNOPQRSTUVWXYZ"; */
@@ -404,32 +435,32 @@ static char *number(char *buf, char *end, unsigned long long num, int base, int
404 char tmp[66]; 435 char tmp[66];
405 char sign; 436 char sign;
406 char locase; 437 char locase;
407 int need_pfx = ((type & SPECIAL) && base != 10); 438 int need_pfx = ((spec.flags & SPECIAL) && spec.base != 10);
408 int i; 439 int i;
409 440
410 /* locase = 0 or 0x20. ORing digits or letters with 'locase' 441 /* locase = 0 or 0x20. ORing digits or letters with 'locase'
411 * produces same digits or (maybe lowercased) letters */ 442 * produces same digits or (maybe lowercased) letters */
412 locase = (type & SMALL); 443 locase = (spec.flags & SMALL);
413 if (type & LEFT) 444 if (spec.flags & LEFT)
414 type &= ~ZEROPAD; 445 spec.flags &= ~ZEROPAD;
415 sign = 0; 446 sign = 0;
416 if (type & SIGN) { 447 if (spec.flags & SIGN) {
417 if ((signed long long) num < 0) { 448 if ((signed long long) num < 0) {
418 sign = '-'; 449 sign = '-';
419 num = - (signed long long) num; 450 num = - (signed long long) num;
420 size--; 451 spec.field_width--;
421 } else if (type & PLUS) { 452 } else if (spec.flags & PLUS) {
422 sign = '+'; 453 sign = '+';
423 size--; 454 spec.field_width--;
424 } else if (type & SPACE) { 455 } else if (spec.flags & SPACE) {
425 sign = ' '; 456 sign = ' ';
426 size--; 457 spec.field_width--;
427 } 458 }
428 } 459 }
429 if (need_pfx) { 460 if (need_pfx) {
430 size--; 461 spec.field_width--;
431 if (base == 16) 462 if (spec.base == 16)
432 size--; 463 spec.field_width--;
433 } 464 }
434 465
435 /* generate full string in tmp[], in reverse order */ 466 /* generate full string in tmp[], in reverse order */
@@ -441,10 +472,10 @@ static char *number(char *buf, char *end, unsigned long long num, int base, int
441 tmp[i++] = (digits[do_div(num,base)] | locase); 472 tmp[i++] = (digits[do_div(num,base)] | locase);
442 } while (num != 0); 473 } while (num != 0);
443 */ 474 */
444 else if (base != 10) { /* 8 or 16 */ 475 else if (spec.base != 10) { /* 8 or 16 */
445 int mask = base - 1; 476 int mask = spec.base - 1;
446 int shift = 3; 477 int shift = 3;
447 if (base == 16) shift = 4; 478 if (spec.base == 16) shift = 4;
448 do { 479 do {
449 tmp[i++] = (digits[((unsigned char)num) & mask] | locase); 480 tmp[i++] = (digits[((unsigned char)num) & mask] | locase);
450 num >>= shift; 481 num >>= shift;
@@ -454,12 +485,12 @@ static char *number(char *buf, char *end, unsigned long long num, int base, int
454 } 485 }
455 486
456 /* printing 100 using %2d gives "100", not "00" */ 487 /* printing 100 using %2d gives "100", not "00" */
457 if (i > precision) 488 if (i > spec.precision)
458 precision = i; 489 spec.precision = i;
459 /* leading space padding */ 490 /* leading space padding */
460 size -= precision; 491 spec.field_width -= spec.precision;
461 if (!(type & (ZEROPAD+LEFT))) { 492 if (!(spec.flags & (ZEROPAD+LEFT))) {
462 while(--size >= 0) { 493 while(--spec.field_width >= 0) {
463 if (buf < end) 494 if (buf < end)
464 *buf = ' '; 495 *buf = ' ';
465 ++buf; 496 ++buf;
@@ -476,23 +507,23 @@ static char *number(char *buf, char *end, unsigned long long num, int base, int
476 if (buf < end) 507 if (buf < end)
477 *buf = '0'; 508 *buf = '0';
478 ++buf; 509 ++buf;
479 if (base == 16) { 510 if (spec.base == 16) {
480 if (buf < end) 511 if (buf < end)
481 *buf = ('X' | locase); 512 *buf = ('X' | locase);
482 ++buf; 513 ++buf;
483 } 514 }
484 } 515 }
485 /* zero or space padding */ 516 /* zero or space padding */
486 if (!(type & LEFT)) { 517 if (!(spec.flags & LEFT)) {
487 char c = (type & ZEROPAD) ? '0' : ' '; 518 char c = (spec.flags & ZEROPAD) ? '0' : ' ';
488 while (--size >= 0) { 519 while (--spec.field_width >= 0) {
489 if (buf < end) 520 if (buf < end)
490 *buf = c; 521 *buf = c;
491 ++buf; 522 ++buf;
492 } 523 }
493 } 524 }
494 /* hmm even more zero padding? */ 525 /* hmm even more zero padding? */
495 while (i <= --precision) { 526 while (i <= --spec.precision) {
496 if (buf < end) 527 if (buf < end)
497 *buf = '0'; 528 *buf = '0';
498 ++buf; 529 ++buf;
@@ -504,7 +535,7 @@ static char *number(char *buf, char *end, unsigned long long num, int base, int
504 ++buf; 535 ++buf;
505 } 536 }
506 /* trailing space padding */ 537 /* trailing space padding */
507 while (--size >= 0) { 538 while (--spec.field_width >= 0) {
508 if (buf < end) 539 if (buf < end)
509 *buf = ' '; 540 *buf = ' ';
510 ++buf; 541 ++buf;
@@ -512,17 +543,17 @@ static char *number(char *buf, char *end, unsigned long long num, int base, int
512 return buf; 543 return buf;
513} 544}
514 545
515static char *string(char *buf, char *end, char *s, int field_width, int precision, int flags) 546static char *string(char *buf, char *end, char *s, struct printf_spec spec)
516{ 547{
517 int len, i; 548 int len, i;
518 549
519 if ((unsigned long)s < PAGE_SIZE) 550 if ((unsigned long)s < PAGE_SIZE)
520 s = "<NULL>"; 551 s = "<NULL>";
521 552
522 len = strnlen(s, precision); 553 len = strnlen(s, spec.precision);
523 554
524 if (!(flags & LEFT)) { 555 if (!(spec.flags & LEFT)) {
525 while (len < field_width--) { 556 while (len < spec.field_width--) {
526 if (buf < end) 557 if (buf < end)
527 *buf = ' '; 558 *buf = ' ';
528 ++buf; 559 ++buf;
@@ -533,7 +564,7 @@ static char *string(char *buf, char *end, char *s, int field_width, int precisio
533 *buf = *s; 564 *buf = *s;
534 ++buf; ++s; 565 ++buf; ++s;
535 } 566 }
536 while (len < field_width--) { 567 while (len < spec.field_width--) {
537 if (buf < end) 568 if (buf < end)
538 *buf = ' '; 569 *buf = ' ';
539 ++buf; 570 ++buf;
@@ -541,21 +572,24 @@ static char *string(char *buf, char *end, char *s, int field_width, int precisio
541 return buf; 572 return buf;
542} 573}
543 574
544static char *symbol_string(char *buf, char *end, void *ptr, int field_width, int precision, int flags) 575static char *symbol_string(char *buf, char *end, void *ptr,
576 struct printf_spec spec)
545{ 577{
546 unsigned long value = (unsigned long) ptr; 578 unsigned long value = (unsigned long) ptr;
547#ifdef CONFIG_KALLSYMS 579#ifdef CONFIG_KALLSYMS
548 char sym[KSYM_SYMBOL_LEN]; 580 char sym[KSYM_SYMBOL_LEN];
549 sprint_symbol(sym, value); 581 sprint_symbol(sym, value);
550 return string(buf, end, sym, field_width, precision, flags); 582 return string(buf, end, sym, spec);
551#else 583#else
552 field_width = 2*sizeof(void *); 584 spec.field_width = 2*sizeof(void *);
553 flags |= SPECIAL | SMALL | ZEROPAD; 585 spec.flags |= SPECIAL | SMALL | ZEROPAD;
554 return number(buf, end, value, 16, field_width, precision, flags); 586 spec.base = 16;
587 return number(buf, end, value, spec);
555#endif 588#endif
556} 589}
557 590
558static char *resource_string(char *buf, char *end, struct resource *res, int field_width, int precision, int flags) 591static char *resource_string(char *buf, char *end, struct resource *res,
592 struct printf_spec spec)
559{ 593{
560#ifndef IO_RSRC_PRINTK_SIZE 594#ifndef IO_RSRC_PRINTK_SIZE
561#define IO_RSRC_PRINTK_SIZE 4 595#define IO_RSRC_PRINTK_SIZE 4
@@ -564,7 +598,11 @@ static char *resource_string(char *buf, char *end, struct resource *res, int fie
564#ifndef MEM_RSRC_PRINTK_SIZE 598#ifndef MEM_RSRC_PRINTK_SIZE
565#define MEM_RSRC_PRINTK_SIZE 8 599#define MEM_RSRC_PRINTK_SIZE 8
566#endif 600#endif
567 601 struct printf_spec num_spec = {
602 .base = 16,
603 .precision = -1,
604 .flags = SPECIAL | SMALL | ZEROPAD,
605 };
568 /* room for the actual numbers, the two "0x", -, [, ] and the final zero */ 606 /* room for the actual numbers, the two "0x", -, [, ] and the final zero */
569 char sym[4*sizeof(resource_size_t) + 8]; 607 char sym[4*sizeof(resource_size_t) + 8];
570 char *p = sym, *pend = sym + sizeof(sym); 608 char *p = sym, *pend = sym + sizeof(sym);
@@ -576,17 +614,18 @@ static char *resource_string(char *buf, char *end, struct resource *res, int fie
576 size = MEM_RSRC_PRINTK_SIZE; 614 size = MEM_RSRC_PRINTK_SIZE;
577 615
578 *p++ = '['; 616 *p++ = '[';
579 p = number(p, pend, res->start, 16, size, -1, SPECIAL | SMALL | ZEROPAD); 617 num_spec.field_width = size;
618 p = number(p, pend, res->start, num_spec);
580 *p++ = '-'; 619 *p++ = '-';
581 p = number(p, pend, res->end, 16, size, -1, SPECIAL | SMALL | ZEROPAD); 620 p = number(p, pend, res->end, num_spec);
582 *p++ = ']'; 621 *p++ = ']';
583 *p = 0; 622 *p = 0;
584 623
585 return string(buf, end, sym, field_width, precision, flags); 624 return string(buf, end, sym, spec);
586} 625}
587 626
588static char *mac_address_string(char *buf, char *end, u8 *addr, int field_width, 627static char *mac_address_string(char *buf, char *end, u8 *addr,
589 int precision, int flags) 628 struct printf_spec spec)
590{ 629{
591 char mac_addr[6 * 3]; /* (6 * 2 hex digits), 5 colons and trailing zero */ 630 char mac_addr[6 * 3]; /* (6 * 2 hex digits), 5 colons and trailing zero */
592 char *p = mac_addr; 631 char *p = mac_addr;
@@ -594,16 +633,17 @@ static char *mac_address_string(char *buf, char *end, u8 *addr, int field_width,
594 633
595 for (i = 0; i < 6; i++) { 634 for (i = 0; i < 6; i++) {
596 p = pack_hex_byte(p, addr[i]); 635 p = pack_hex_byte(p, addr[i]);
597 if (!(flags & SPECIAL) && i != 5) 636 if (!(spec.flags & SPECIAL) && i != 5)
598 *p++ = ':'; 637 *p++ = ':';
599 } 638 }
600 *p = '\0'; 639 *p = '\0';
640 spec.flags &= ~SPECIAL;
601 641
602 return string(buf, end, mac_addr, field_width, precision, flags & ~SPECIAL); 642 return string(buf, end, mac_addr, spec);
603} 643}
604 644
605static char *ip6_addr_string(char *buf, char *end, u8 *addr, int field_width, 645static char *ip6_addr_string(char *buf, char *end, u8 *addr,
606 int precision, int flags) 646 struct printf_spec spec)
607{ 647{
608 char ip6_addr[8 * 5]; /* (8 * 4 hex digits), 7 colons and trailing zero */ 648 char ip6_addr[8 * 5]; /* (8 * 4 hex digits), 7 colons and trailing zero */
609 char *p = ip6_addr; 649 char *p = ip6_addr;
@@ -612,16 +652,17 @@ static char *ip6_addr_string(char *buf, char *end, u8 *addr, int field_width,
612 for (i = 0; i < 8; i++) { 652 for (i = 0; i < 8; i++) {
613 p = pack_hex_byte(p, addr[2 * i]); 653 p = pack_hex_byte(p, addr[2 * i]);
614 p = pack_hex_byte(p, addr[2 * i + 1]); 654 p = pack_hex_byte(p, addr[2 * i + 1]);
615 if (!(flags & SPECIAL) && i != 7) 655 if (!(spec.flags & SPECIAL) && i != 7)
616 *p++ = ':'; 656 *p++ = ':';
617 } 657 }
618 *p = '\0'; 658 *p = '\0';
659 spec.flags &= ~SPECIAL;
619 660
620 return string(buf, end, ip6_addr, field_width, precision, flags & ~SPECIAL); 661 return string(buf, end, ip6_addr, spec);
621} 662}
622 663
623static char *ip4_addr_string(char *buf, char *end, u8 *addr, int field_width, 664static char *ip4_addr_string(char *buf, char *end, u8 *addr,
624 int precision, int flags) 665 struct printf_spec spec)
625{ 666{
626 char ip4_addr[4 * 4]; /* (4 * 3 decimal digits), 3 dots and trailing zero */ 667 char ip4_addr[4 * 4]; /* (4 * 3 decimal digits), 3 dots and trailing zero */
627 char temp[3]; /* hold each IP quad in reverse order */ 668 char temp[3]; /* hold each IP quad in reverse order */
@@ -637,8 +678,9 @@ static char *ip4_addr_string(char *buf, char *end, u8 *addr, int field_width,
637 *p++ = '.'; 678 *p++ = '.';
638 } 679 }
639 *p = '\0'; 680 *p = '\0';
681 spec.flags &= ~SPECIAL;
640 682
641 return string(buf, end, ip4_addr, field_width, precision, flags & ~SPECIAL); 683 return string(buf, end, ip4_addr, spec);
642} 684}
643 685
644/* 686/*
@@ -663,41 +705,233 @@ static char *ip4_addr_string(char *buf, char *end, u8 *addr, int field_width,
663 * function pointers are really function descriptors, which contain a 705 * function pointers are really function descriptors, which contain a
664 * pointer to the real address. 706 * pointer to the real address.
665 */ 707 */
666static char *pointer(const char *fmt, char *buf, char *end, void *ptr, int field_width, int precision, int flags) 708static char *pointer(const char *fmt, char *buf, char *end, void *ptr,
709 struct printf_spec spec)
667{ 710{
668 if (!ptr) 711 if (!ptr)
669 return string(buf, end, "(null)", field_width, precision, flags); 712 return string(buf, end, "(null)", spec);
670 713
671 switch (*fmt) { 714 switch (*fmt) {
672 case 'F': 715 case 'F':
673 ptr = dereference_function_descriptor(ptr); 716 ptr = dereference_function_descriptor(ptr);
674 /* Fallthrough */ 717 /* Fallthrough */
675 case 'S': 718 case 'S':
676 return symbol_string(buf, end, ptr, field_width, precision, flags); 719 return symbol_string(buf, end, ptr, spec);
677 case 'R': 720 case 'R':
678 return resource_string(buf, end, ptr, field_width, precision, flags); 721 return resource_string(buf, end, ptr, spec);
679 case 'm': 722 case 'm':
680 flags |= SPECIAL; 723 spec.flags |= SPECIAL;
681 /* Fallthrough */ 724 /* Fallthrough */
682 case 'M': 725 case 'M':
683 return mac_address_string(buf, end, ptr, field_width, precision, flags); 726 return mac_address_string(buf, end, ptr, spec);
684 case 'i': 727 case 'i':
685 flags |= SPECIAL; 728 spec.flags |= SPECIAL;
686 /* Fallthrough */ 729 /* Fallthrough */
687 case 'I': 730 case 'I':
688 if (fmt[1] == '6') 731 if (fmt[1] == '6')
689 return ip6_addr_string(buf, end, ptr, field_width, precision, flags); 732 return ip6_addr_string(buf, end, ptr, spec);
690 if (fmt[1] == '4') 733 if (fmt[1] == '4')
691 return ip4_addr_string(buf, end, ptr, field_width, precision, flags); 734 return ip4_addr_string(buf, end, ptr, spec);
692 flags &= ~SPECIAL; 735 spec.flags &= ~SPECIAL;
736 break;
737 }
738 spec.flags |= SMALL;
739 if (spec.field_width == -1) {
740 spec.field_width = 2*sizeof(void *);
741 spec.flags |= ZEROPAD;
742 }
743 spec.base = 16;
744
745 return number(buf, end, (unsigned long) ptr, spec);
746}
747
748/*
749 * Helper function to decode printf style format.
750 * Each call decode a token from the format and return the
751 * number of characters read (or likely the delta where it wants
752 * to go on the next call).
753 * The decoded token is returned through the parameters
754 *
755 * 'h', 'l', or 'L' for integer fields
756 * 'z' support added 23/7/1999 S.H.
757 * 'z' changed to 'Z' --davidm 1/25/99
758 * 't' added for ptrdiff_t
759 *
760 * @fmt: the format string
761 * @type of the token returned
762 * @flags: various flags such as +, -, # tokens..
763 * @field_width: overwritten width
764 * @base: base of the number (octal, hex, ...)
765 * @precision: precision of a number
766 * @qualifier: qualifier of a number (long, size_t, ...)
767 */
768static int format_decode(const char *fmt, struct printf_spec *spec)
769{
770 const char *start = fmt;
771
772 /* we finished early by reading the field width */
773 if (spec->type == FORMAT_TYPE_WIDTH) {
774 if (spec->field_width < 0) {
775 spec->field_width = -spec->field_width;
776 spec->flags |= LEFT;
777 }
778 spec->type = FORMAT_TYPE_NONE;
779 goto precision;
780 }
781
782 /* we finished early by reading the precision */
783 if (spec->type == FORMAT_TYPE_PRECISION) {
784 if (spec->precision < 0)
785 spec->precision = 0;
786
787 spec->type = FORMAT_TYPE_NONE;
788 goto qualifier;
789 }
790
791 /* By default */
792 spec->type = FORMAT_TYPE_NONE;
793
794 for (; *fmt ; ++fmt) {
795 if (*fmt == '%')
796 break;
797 }
798
799 /* Return the current non-format string */
800 if (fmt != start || !*fmt)
801 return fmt - start;
802
803 /* Process flags */
804 spec->flags = 0;
805
806 while (1) { /* this also skips first '%' */
807 bool found = true;
808
809 ++fmt;
810
811 switch (*fmt) {
812 case '-': spec->flags |= LEFT; break;
813 case '+': spec->flags |= PLUS; break;
814 case ' ': spec->flags |= SPACE; break;
815 case '#': spec->flags |= SPECIAL; break;
816 case '0': spec->flags |= ZEROPAD; break;
817 default: found = false;
818 }
819
820 if (!found)
821 break;
822 }
823
824 /* get field width */
825 spec->field_width = -1;
826
827 if (isdigit(*fmt))
828 spec->field_width = skip_atoi(&fmt);
829 else if (*fmt == '*') {
830 /* it's the next argument */
831 spec->type = FORMAT_TYPE_WIDTH;
832 return ++fmt - start;
833 }
834
835precision:
836 /* get the precision */
837 spec->precision = -1;
838 if (*fmt == '.') {
839 ++fmt;
840 if (isdigit(*fmt)) {
841 spec->precision = skip_atoi(&fmt);
842 if (spec->precision < 0)
843 spec->precision = 0;
844 } else if (*fmt == '*') {
845 /* it's the next argument */
846 spec->type = FORMAT_TYPE_PRECISION;
847 return ++fmt - start;
848 }
849 }
850
851qualifier:
852 /* get the conversion qualifier */
853 spec->qualifier = -1;
854 if (*fmt == 'h' || *fmt == 'l' || *fmt == 'L' ||
855 *fmt == 'Z' || *fmt == 'z' || *fmt == 't') {
856 spec->qualifier = *fmt;
857 ++fmt;
858 if (spec->qualifier == 'l' && *fmt == 'l') {
859 spec->qualifier = 'L';
860 ++fmt;
861 }
862 }
863
864 /* default base */
865 spec->base = 10;
866 switch (*fmt) {
867 case 'c':
868 spec->type = FORMAT_TYPE_CHAR;
869 return ++fmt - start;
870
871 case 's':
872 spec->type = FORMAT_TYPE_STR;
873 return ++fmt - start;
874
875 case 'p':
876 spec->type = FORMAT_TYPE_PTR;
877 return fmt - start;
878 /* skip alnum */
879
880 case 'n':
881 spec->type = FORMAT_TYPE_NRCHARS;
882 return ++fmt - start;
883
884 case '%':
885 spec->type = FORMAT_TYPE_PERCENT_CHAR;
886 return ++fmt - start;
887
888 /* integer number formats - set up the flags and "break" */
889 case 'o':
890 spec->base = 8;
693 break; 891 break;
892
893 case 'x':
894 spec->flags |= SMALL;
895
896 case 'X':
897 spec->base = 16;
898 break;
899
900 case 'd':
901 case 'i':
902 spec->flags |= SIGN;
903 case 'u':
904 break;
905
906 default:
907 spec->type = FORMAT_TYPE_INVALID;
908 return fmt - start;
694 } 909 }
695 flags |= SMALL; 910
696 if (field_width == -1) { 911 if (spec->qualifier == 'L')
697 field_width = 2*sizeof(void *); 912 spec->type = FORMAT_TYPE_LONG_LONG;
698 flags |= ZEROPAD; 913 else if (spec->qualifier == 'l') {
914 if (spec->flags & SIGN)
915 spec->type = FORMAT_TYPE_LONG;
916 else
917 spec->type = FORMAT_TYPE_ULONG;
918 } else if (spec->qualifier == 'Z' || spec->qualifier == 'z') {
919 spec->type = FORMAT_TYPE_SIZE_T;
920 } else if (spec->qualifier == 't') {
921 spec->type = FORMAT_TYPE_PTRDIFF;
922 } else if (spec->qualifier == 'h') {
923 if (spec->flags & SIGN)
924 spec->type = FORMAT_TYPE_SHORT;
925 else
926 spec->type = FORMAT_TYPE_USHORT;
927 } else {
928 if (spec->flags & SIGN)
929 spec->type = FORMAT_TYPE_INT;
930 else
931 spec->type = FORMAT_TYPE_UINT;
699 } 932 }
700 return number(buf, end, (unsigned long) ptr, 16, field_width, precision, flags); 933
934 return ++fmt - start;
701} 935}
702 936
703/** 937/**
@@ -726,18 +960,9 @@ static char *pointer(const char *fmt, char *buf, char *end, void *ptr, int field
726int vsnprintf(char *buf, size_t size, const char *fmt, va_list args) 960int vsnprintf(char *buf, size_t size, const char *fmt, va_list args)
727{ 961{
728 unsigned long long num; 962 unsigned long long num;
729 int base;
730 char *str, *end, c; 963 char *str, *end, c;
731 964 int read;
732 int flags; /* flags to number() */ 965 struct printf_spec spec = {0};
733
734 int field_width; /* width of output field */
735 int precision; /* min. # of digits for integers; max
736 number of chars for from string */
737 int qualifier; /* 'h', 'l', or 'L' for integer fields */
738 /* 'z' support added 23/7/1999 S.H. */
739 /* 'z' changed to 'Z' --davidm 1/25/99 */
740 /* 't' added for ptrdiff_t */
741 966
742 /* Reject out-of-range values early. Large positive sizes are 967 /* Reject out-of-range values early. Large positive sizes are
743 used for unknown buffer sizes. */ 968 used for unknown buffer sizes. */
@@ -758,184 +983,144 @@ int vsnprintf(char *buf, size_t size, const char *fmt, va_list args)
758 size = end - buf; 983 size = end - buf;
759 } 984 }
760 985
761 for (; *fmt ; ++fmt) { 986 while (*fmt) {
762 if (*fmt != '%') { 987 const char *old_fmt = fmt;
763 if (str < end)
764 *str = *fmt;
765 ++str;
766 continue;
767 }
768 988
769 /* process flags */ 989 read = format_decode(fmt, &spec);
770 flags = 0;
771 repeat:
772 ++fmt; /* this also skips first '%' */
773 switch (*fmt) {
774 case '-': flags |= LEFT; goto repeat;
775 case '+': flags |= PLUS; goto repeat;
776 case ' ': flags |= SPACE; goto repeat;
777 case '#': flags |= SPECIAL; goto repeat;
778 case '0': flags |= ZEROPAD; goto repeat;
779 }
780 990
781 /* get field width */ 991 fmt += read;
782 field_width = -1;
783 if (isdigit(*fmt))
784 field_width = skip_atoi(&fmt);
785 else if (*fmt == '*') {
786 ++fmt;
787 /* it's the next argument */
788 field_width = va_arg(args, int);
789 if (field_width < 0) {
790 field_width = -field_width;
791 flags |= LEFT;
792 }
793 }
794 992
795 /* get the precision */ 993 switch (spec.type) {
796 precision = -1; 994 case FORMAT_TYPE_NONE: {
797 if (*fmt == '.') { 995 int copy = read;
798 ++fmt; 996 if (str < end) {
799 if (isdigit(*fmt)) 997 if (copy > end - str)
800 precision = skip_atoi(&fmt); 998 copy = end - str;
801 else if (*fmt == '*') { 999 memcpy(str, old_fmt, copy);
802 ++fmt;
803 /* it's the next argument */
804 precision = va_arg(args, int);
805 } 1000 }
806 if (precision < 0) 1001 str += read;
807 precision = 0; 1002 break;
808 } 1003 }
809 1004
810 /* get the conversion qualifier */ 1005 case FORMAT_TYPE_WIDTH:
811 qualifier = -1; 1006 spec.field_width = va_arg(args, int);
812 if (*fmt == 'h' || *fmt == 'l' || *fmt == 'L' || 1007 break;
813 *fmt =='Z' || *fmt == 'z' || *fmt == 't') {
814 qualifier = *fmt;
815 ++fmt;
816 if (qualifier == 'l' && *fmt == 'l') {
817 qualifier = 'L';
818 ++fmt;
819 }
820 }
821 1008
822 /* default base */ 1009 case FORMAT_TYPE_PRECISION:
823 base = 10; 1010 spec.precision = va_arg(args, int);
1011 break;
824 1012
825 switch (*fmt) { 1013 case FORMAT_TYPE_CHAR:
826 case 'c': 1014 if (!(spec.flags & LEFT)) {
827 if (!(flags & LEFT)) { 1015 while (--spec.field_width > 0) {
828 while (--field_width > 0) {
829 if (str < end)
830 *str = ' ';
831 ++str;
832 }
833 }
834 c = (unsigned char) va_arg(args, int);
835 if (str < end)
836 *str = c;
837 ++str;
838 while (--field_width > 0) {
839 if (str < end) 1016 if (str < end)
840 *str = ' '; 1017 *str = ' ';
841 ++str; 1018 ++str;
842 }
843 continue;
844
845 case 's':
846 str = string(str, end, va_arg(args, char *), field_width, precision, flags);
847 continue;
848
849 case 'p':
850 str = pointer(fmt+1, str, end,
851 va_arg(args, void *),
852 field_width, precision, flags);
853 /* Skip all alphanumeric pointer suffixes */
854 while (isalnum(fmt[1]))
855 fmt++;
856 continue;
857
858 case 'n':
859 /* FIXME:
860 * What does C99 say about the overflow case here? */
861 if (qualifier == 'l') {
862 long * ip = va_arg(args, long *);
863 *ip = (str - buf);
864 } else if (qualifier == 'Z' || qualifier == 'z') {
865 size_t * ip = va_arg(args, size_t *);
866 *ip = (str - buf);
867 } else {
868 int * ip = va_arg(args, int *);
869 *ip = (str - buf);
870 }
871 continue;
872 1019
873 case '%': 1020 }
1021 }
1022 c = (unsigned char) va_arg(args, int);
1023 if (str < end)
1024 *str = c;
1025 ++str;
1026 while (--spec.field_width > 0) {
874 if (str < end) 1027 if (str < end)
875 *str = '%'; 1028 *str = ' ';
876 ++str; 1029 ++str;
877 continue; 1030 }
1031 break;
878 1032
879 /* integer number formats - set up the flags and "break" */ 1033 case FORMAT_TYPE_STR:
880 case 'o': 1034 str = string(str, end, va_arg(args, char *), spec);
881 base = 8; 1035 break;
882 break;
883 1036
884 case 'x': 1037 case FORMAT_TYPE_PTR:
885 flags |= SMALL; 1038 str = pointer(fmt+1, str, end, va_arg(args, void *),
886 case 'X': 1039 spec);
887 base = 16; 1040 while (isalnum(*fmt))
888 break; 1041 fmt++;
1042 break;
889 1043
890 case 'd': 1044 case FORMAT_TYPE_PERCENT_CHAR:
891 case 'i': 1045 if (str < end)
892 flags |= SIGN; 1046 *str = '%';
893 case 'u': 1047 ++str;
894 break; 1048 break;
895 1049
896 default: 1050 case FORMAT_TYPE_INVALID:
1051 if (str < end)
1052 *str = '%';
1053 ++str;
1054 if (*fmt) {
897 if (str < end) 1055 if (str < end)
898 *str = '%'; 1056 *str = *fmt;
899 ++str; 1057 ++str;
900 if (*fmt) { 1058 } else {
901 if (str < end) 1059 --fmt;
902 *str = *fmt; 1060 }
903 ++str; 1061 break;
904 } else { 1062
905 --fmt; 1063 case FORMAT_TYPE_NRCHARS: {
906 } 1064 int qualifier = spec.qualifier;
907 continue; 1065
1066 if (qualifier == 'l') {
1067 long *ip = va_arg(args, long *);
1068 *ip = (str - buf);
1069 } else if (qualifier == 'Z' ||
1070 qualifier == 'z') {
1071 size_t *ip = va_arg(args, size_t *);
1072 *ip = (str - buf);
1073 } else {
1074 int *ip = va_arg(args, int *);
1075 *ip = (str - buf);
1076 }
1077 break;
908 } 1078 }
909 if (qualifier == 'L') 1079
910 num = va_arg(args, long long); 1080 default:
911 else if (qualifier == 'l') { 1081 switch (spec.type) {
912 num = va_arg(args, unsigned long); 1082 case FORMAT_TYPE_LONG_LONG:
913 if (flags & SIGN) 1083 num = va_arg(args, long long);
914 num = (signed long) num; 1084 break;
915 } else if (qualifier == 'Z' || qualifier == 'z') { 1085 case FORMAT_TYPE_ULONG:
916 num = va_arg(args, size_t); 1086 num = va_arg(args, unsigned long);
917 } else if (qualifier == 't') { 1087 break;
918 num = va_arg(args, ptrdiff_t); 1088 case FORMAT_TYPE_LONG:
919 } else if (qualifier == 'h') { 1089 num = va_arg(args, long);
920 num = (unsigned short) va_arg(args, int); 1090 break;
921 if (flags & SIGN) 1091 case FORMAT_TYPE_SIZE_T:
922 num = (signed short) num; 1092 num = va_arg(args, size_t);
923 } else { 1093 break;
924 num = va_arg(args, unsigned int); 1094 case FORMAT_TYPE_PTRDIFF:
925 if (flags & SIGN) 1095 num = va_arg(args, ptrdiff_t);
926 num = (signed int) num; 1096 break;
1097 case FORMAT_TYPE_USHORT:
1098 num = (unsigned short) va_arg(args, int);
1099 break;
1100 case FORMAT_TYPE_SHORT:
1101 num = (short) va_arg(args, int);
1102 break;
1103 case FORMAT_TYPE_INT:
1104 num = (int) va_arg(args, int);
1105 break;
1106 default:
1107 num = va_arg(args, unsigned int);
1108 }
1109
1110 str = number(str, end, num, spec);
927 } 1111 }
928 str = number(str, end, num, base,
929 field_width, precision, flags);
930 } 1112 }
1113
931 if (size > 0) { 1114 if (size > 0) {
932 if (str < end) 1115 if (str < end)
933 *str = '\0'; 1116 *str = '\0';
934 else 1117 else
935 end[-1] = '\0'; 1118 end[-1] = '\0';
936 } 1119 }
1120
937 /* the trailing null byte doesn't count towards the total */ 1121 /* the trailing null byte doesn't count towards the total */
938 return str-buf; 1122 return str-buf;
1123
939} 1124}
940EXPORT_SYMBOL(vsnprintf); 1125EXPORT_SYMBOL(vsnprintf);
941 1126
@@ -1058,6 +1243,372 @@ int sprintf(char * buf, const char *fmt, ...)
1058} 1243}
1059EXPORT_SYMBOL(sprintf); 1244EXPORT_SYMBOL(sprintf);
1060 1245
1246#ifdef CONFIG_BINARY_PRINTF
1247/*
1248 * bprintf service:
1249 * vbin_printf() - VA arguments to binary data
1250 * bstr_printf() - Binary data to text string
1251 */
1252
1253/**
1254 * vbin_printf - Parse a format string and place args' binary value in a buffer
1255 * @bin_buf: The buffer to place args' binary value
1256 * @size: The size of the buffer(by words(32bits), not characters)
1257 * @fmt: The format string to use
1258 * @args: Arguments for the format string
1259 *
1260 * The format follows C99 vsnprintf, except %n is ignored, and its argument
1261 * is skiped.
1262 *
1263 * The return value is the number of words(32bits) which would be generated for
1264 * the given input.
1265 *
1266 * NOTE:
1267 * If the return value is greater than @size, the resulting bin_buf is NOT
1268 * valid for bstr_printf().
1269 */
1270int vbin_printf(u32 *bin_buf, size_t size, const char *fmt, va_list args)
1271{
1272 struct printf_spec spec = {0};
1273 char *str, *end;
1274 int read;
1275
1276 str = (char *)bin_buf;
1277 end = (char *)(bin_buf + size);
1278
1279#define save_arg(type) \
1280do { \
1281 if (sizeof(type) == 8) { \
1282 unsigned long long value; \
1283 str = PTR_ALIGN(str, sizeof(u32)); \
1284 value = va_arg(args, unsigned long long); \
1285 if (str + sizeof(type) <= end) { \
1286 *(u32 *)str = *(u32 *)&value; \
1287 *(u32 *)(str + 4) = *((u32 *)&value + 1); \
1288 } \
1289 } else { \
1290 unsigned long value; \
1291 str = PTR_ALIGN(str, sizeof(type)); \
1292 value = va_arg(args, int); \
1293 if (str + sizeof(type) <= end) \
1294 *(typeof(type) *)str = (type)value; \
1295 } \
1296 str += sizeof(type); \
1297} while (0)
1298
1299
1300 while (*fmt) {
1301 read = format_decode(fmt, &spec);
1302
1303 fmt += read;
1304
1305 switch (spec.type) {
1306 case FORMAT_TYPE_NONE:
1307 break;
1308
1309 case FORMAT_TYPE_WIDTH:
1310 case FORMAT_TYPE_PRECISION:
1311 save_arg(int);
1312 break;
1313
1314 case FORMAT_TYPE_CHAR:
1315 save_arg(char);
1316 break;
1317
1318 case FORMAT_TYPE_STR: {
1319 const char *save_str = va_arg(args, char *);
1320 size_t len;
1321 if ((unsigned long)save_str > (unsigned long)-PAGE_SIZE
1322 || (unsigned long)save_str < PAGE_SIZE)
1323 save_str = "<NULL>";
1324 len = strlen(save_str);
1325 if (str + len + 1 < end)
1326 memcpy(str, save_str, len + 1);
1327 str += len + 1;
1328 break;
1329 }
1330
1331 case FORMAT_TYPE_PTR:
1332 save_arg(void *);
1333 /* skip all alphanumeric pointer suffixes */
1334 while (isalnum(*fmt))
1335 fmt++;
1336 break;
1337
1338 case FORMAT_TYPE_PERCENT_CHAR:
1339 break;
1340
1341 case FORMAT_TYPE_INVALID:
1342 if (!*fmt)
1343 --fmt;
1344 break;
1345
1346 case FORMAT_TYPE_NRCHARS: {
1347 /* skip %n 's argument */
1348 int qualifier = spec.qualifier;
1349 void *skip_arg;
1350 if (qualifier == 'l')
1351 skip_arg = va_arg(args, long *);
1352 else if (qualifier == 'Z' || qualifier == 'z')
1353 skip_arg = va_arg(args, size_t *);
1354 else
1355 skip_arg = va_arg(args, int *);
1356 break;
1357 }
1358
1359 default:
1360 switch (spec.type) {
1361
1362 case FORMAT_TYPE_LONG_LONG:
1363 save_arg(long long);
1364 break;
1365 case FORMAT_TYPE_ULONG:
1366 case FORMAT_TYPE_LONG:
1367 save_arg(unsigned long);
1368 break;
1369 case FORMAT_TYPE_SIZE_T:
1370 save_arg(size_t);
1371 break;
1372 case FORMAT_TYPE_PTRDIFF:
1373 save_arg(ptrdiff_t);
1374 break;
1375 case FORMAT_TYPE_USHORT:
1376 case FORMAT_TYPE_SHORT:
1377 save_arg(short);
1378 break;
1379 default:
1380 save_arg(int);
1381 }
1382 }
1383 }
1384 return (u32 *)(PTR_ALIGN(str, sizeof(u32))) - bin_buf;
1385
1386#undef save_arg
1387}
1388EXPORT_SYMBOL_GPL(vbin_printf);
1389
1390/**
1391 * bstr_printf - Format a string from binary arguments and place it in a buffer
1392 * @buf: The buffer to place the result into
1393 * @size: The size of the buffer, including the trailing null space
1394 * @fmt: The format string to use
1395 * @bin_buf: Binary arguments for the format string
1396 *
1397 * This function like C99 vsnprintf, but the difference is that vsnprintf gets
1398 * arguments from stack, and bstr_printf gets arguments from @bin_buf which is
1399 * a binary buffer that generated by vbin_printf.
1400 *
1401 * The format follows C99 vsnprintf, but has some extensions:
1402 * %pS output the name of a text symbol
1403 * %pF output the name of a function pointer
1404 * %pR output the address range in a struct resource
1405 * %n is ignored
1406 *
1407 * The return value is the number of characters which would
1408 * be generated for the given input, excluding the trailing
1409 * '\0', as per ISO C99. If you want to have the exact
1410 * number of characters written into @buf as return value
1411 * (not including the trailing '\0'), use vscnprintf(). If the
1412 * return is greater than or equal to @size, the resulting
1413 * string is truncated.
1414 */
1415int bstr_printf(char *buf, size_t size, const char *fmt, const u32 *bin_buf)
1416{
1417 unsigned long long num;
1418 char *str, *end, c;
1419 const char *args = (const char *)bin_buf;
1420
1421 struct printf_spec spec = {0};
1422
1423 if (unlikely((int) size < 0)) {
1424 /* There can be only one.. */
1425 static char warn = 1;
1426 WARN_ON(warn);
1427 warn = 0;
1428 return 0;
1429 }
1430
1431 str = buf;
1432 end = buf + size;
1433
1434#define get_arg(type) \
1435({ \
1436 typeof(type) value; \
1437 if (sizeof(type) == 8) { \
1438 args = PTR_ALIGN(args, sizeof(u32)); \
1439 *(u32 *)&value = *(u32 *)args; \
1440 *((u32 *)&value + 1) = *(u32 *)(args + 4); \
1441 } else { \
1442 args = PTR_ALIGN(args, sizeof(type)); \
1443 value = *(typeof(type) *)args; \
1444 } \
1445 args += sizeof(type); \
1446 value; \
1447})
1448
1449 /* Make sure end is always >= buf */
1450 if (end < buf) {
1451 end = ((void *)-1);
1452 size = end - buf;
1453 }
1454
1455 while (*fmt) {
1456 int read;
1457 const char *old_fmt = fmt;
1458
1459 read = format_decode(fmt, &spec);
1460
1461 fmt += read;
1462
1463 switch (spec.type) {
1464 case FORMAT_TYPE_NONE: {
1465 int copy = read;
1466 if (str < end) {
1467 if (copy > end - str)
1468 copy = end - str;
1469 memcpy(str, old_fmt, copy);
1470 }
1471 str += read;
1472 break;
1473 }
1474
1475 case FORMAT_TYPE_WIDTH:
1476 spec.field_width = get_arg(int);
1477 break;
1478
1479 case FORMAT_TYPE_PRECISION:
1480 spec.precision = get_arg(int);
1481 break;
1482
1483 case FORMAT_TYPE_CHAR:
1484 if (!(spec.flags & LEFT)) {
1485 while (--spec.field_width > 0) {
1486 if (str < end)
1487 *str = ' ';
1488 ++str;
1489 }
1490 }
1491 c = (unsigned char) get_arg(char);
1492 if (str < end)
1493 *str = c;
1494 ++str;
1495 while (--spec.field_width > 0) {
1496 if (str < end)
1497 *str = ' ';
1498 ++str;
1499 }
1500 break;
1501
1502 case FORMAT_TYPE_STR: {
1503 const char *str_arg = args;
1504 size_t len = strlen(str_arg);
1505 args += len + 1;
1506 str = string(str, end, (char *)str_arg, spec);
1507 break;
1508 }
1509
1510 case FORMAT_TYPE_PTR:
1511 str = pointer(fmt+1, str, end, get_arg(void *), spec);
1512 while (isalnum(*fmt))
1513 fmt++;
1514 break;
1515
1516 case FORMAT_TYPE_PERCENT_CHAR:
1517 if (str < end)
1518 *str = '%';
1519 ++str;
1520 break;
1521
1522 case FORMAT_TYPE_INVALID:
1523 if (str < end)
1524 *str = '%';
1525 ++str;
1526 if (*fmt) {
1527 if (str < end)
1528 *str = *fmt;
1529 ++str;
1530 } else {
1531 --fmt;
1532 }
1533 break;
1534
1535 case FORMAT_TYPE_NRCHARS:
1536 /* skip */
1537 break;
1538
1539 default:
1540 switch (spec.type) {
1541
1542 case FORMAT_TYPE_LONG_LONG:
1543 num = get_arg(long long);
1544 break;
1545 case FORMAT_TYPE_ULONG:
1546 num = get_arg(unsigned long);
1547 break;
1548 case FORMAT_TYPE_LONG:
1549 num = get_arg(unsigned long);
1550 break;
1551 case FORMAT_TYPE_SIZE_T:
1552 num = get_arg(size_t);
1553 break;
1554 case FORMAT_TYPE_PTRDIFF:
1555 num = get_arg(ptrdiff_t);
1556 break;
1557 case FORMAT_TYPE_USHORT:
1558 num = get_arg(unsigned short);
1559 break;
1560 case FORMAT_TYPE_SHORT:
1561 num = get_arg(short);
1562 break;
1563 case FORMAT_TYPE_UINT:
1564 num = get_arg(unsigned int);
1565 break;
1566 default:
1567 num = get_arg(int);
1568 }
1569
1570 str = number(str, end, num, spec);
1571 }
1572 }
1573
1574 if (size > 0) {
1575 if (str < end)
1576 *str = '\0';
1577 else
1578 end[-1] = '\0';
1579 }
1580
1581#undef get_arg
1582
1583 /* the trailing null byte doesn't count towards the total */
1584 return str - buf;
1585}
1586EXPORT_SYMBOL_GPL(bstr_printf);
1587
1588/**
1589 * bprintf - Parse a format string and place args' binary value in a buffer
1590 * @bin_buf: The buffer to place args' binary value
1591 * @size: The size of the buffer(by words(32bits), not characters)
1592 * @fmt: The format string to use
1593 * @...: Arguments for the format string
1594 *
1595 * The function returns the number of words(u32) written
1596 * into @bin_buf.
1597 */
1598int bprintf(u32 *bin_buf, size_t size, const char *fmt, ...)
1599{
1600 va_list args;
1601 int ret;
1602
1603 va_start(args, fmt);
1604 ret = vbin_printf(bin_buf, size, fmt, args);
1605 va_end(args);
1606 return ret;
1607}
1608EXPORT_SYMBOL_GPL(bprintf);
1609
1610#endif /* CONFIG_BINARY_PRINTF */
1611
1061/** 1612/**
1062 * vsscanf - Unformat a buffer into a list of arguments 1613 * vsscanf - Unformat a buffer into a list of arguments
1063 * @buf: input buffer 1614 * @buf: input buffer
diff --git a/lib/zlib_inflate/inflate.h b/lib/zlib_inflate/inflate.h
index df8a6c92052d..3d17b3d1b21f 100644
--- a/lib/zlib_inflate/inflate.h
+++ b/lib/zlib_inflate/inflate.h
@@ -1,3 +1,6 @@
1#ifndef INFLATE_H
2#define INFLATE_H
3
1/* inflate.h -- internal inflate state definition 4/* inflate.h -- internal inflate state definition
2 * Copyright (C) 1995-2004 Mark Adler 5 * Copyright (C) 1995-2004 Mark Adler
3 * For conditions of distribution and use, see copyright notice in zlib.h 6 * For conditions of distribution and use, see copyright notice in zlib.h
@@ -105,3 +108,4 @@ struct inflate_state {
105 unsigned short work[288]; /* work area for code table building */ 108 unsigned short work[288]; /* work area for code table building */
106 code codes[ENOUGH]; /* space for code tables */ 109 code codes[ENOUGH]; /* space for code tables */
107}; 110};
111#endif
diff --git a/lib/zlib_inflate/inftrees.h b/lib/zlib_inflate/inftrees.h
index 5f5219b1240e..b70b4731ac7a 100644
--- a/lib/zlib_inflate/inftrees.h
+++ b/lib/zlib_inflate/inftrees.h
@@ -1,3 +1,6 @@
1#ifndef INFTREES_H
2#define INFTREES_H
3
1/* inftrees.h -- header to use inftrees.c 4/* inftrees.h -- header to use inftrees.c
2 * Copyright (C) 1995-2005 Mark Adler 5 * Copyright (C) 1995-2005 Mark Adler
3 * For conditions of distribution and use, see copyright notice in zlib.h 6 * For conditions of distribution and use, see copyright notice in zlib.h
@@ -53,3 +56,4 @@ typedef enum {
53extern int zlib_inflate_table (codetype type, unsigned short *lens, 56extern int zlib_inflate_table (codetype type, unsigned short *lens,
54 unsigned codes, code **table, 57 unsigned codes, code **table,
55 unsigned *bits, unsigned short *work); 58 unsigned *bits, unsigned short *work);
59#endif