diff options
Diffstat (limited to 'lib')
-rw-r--r-- | lib/Kconfig.debug | 2 | ||||
-rw-r--r-- | lib/Makefile | 9 | ||||
-rw-r--r-- | lib/bitrev.c | 3 | ||||
-rw-r--r-- | lib/bug.c | 2 | ||||
-rw-r--r-- | lib/debugobjects.c | 15 | ||||
-rw-r--r-- | lib/div64.c | 10 | ||||
-rw-r--r-- | lib/percpu_counter.c | 7 | ||||
-rw-r--r-- | lib/radix-tree.c | 122 | ||||
-rw-r--r-- | lib/smp_processor_id.c | 6 | ||||
-rw-r--r-- | lib/ts_bm.c | 2 | ||||
-rw-r--r-- | lib/vsprintf.c | 128 |
11 files changed, 197 insertions, 109 deletions
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug index 6263e2d851f1..c459e8547bd8 100644 --- a/lib/Kconfig.debug +++ b/lib/Kconfig.debug | |||
@@ -636,6 +636,8 @@ config LATENCYTOP | |||
636 | Enable this option if you want to use the LatencyTOP tool | 636 | Enable this option if you want to use the LatencyTOP tool |
637 | to find out which userspace is blocking on what kernel operations. | 637 | to find out which userspace is blocking on what kernel operations. |
638 | 638 | ||
639 | source kernel/trace/Kconfig | ||
640 | |||
639 | config PROVIDE_OHCI1394_DMA_INIT | 641 | config PROVIDE_OHCI1394_DMA_INIT |
640 | bool "Remote debugging over FireWire early on boot" | 642 | bool "Remote debugging over FireWire early on boot" |
641 | depends on PCI && X86 | 643 | depends on PCI && X86 |
diff --git a/lib/Makefile b/lib/Makefile index 74b0cfb1fcc3..4b836a53c08f 100644 --- a/lib/Makefile +++ b/lib/Makefile | |||
@@ -8,6 +8,15 @@ lib-y := ctype.o string.o vsprintf.o cmdline.o \ | |||
8 | sha1.o irq_regs.o reciprocal_div.o argv_split.o \ | 8 | sha1.o irq_regs.o reciprocal_div.o argv_split.o \ |
9 | proportions.o prio_heap.o ratelimit.o | 9 | proportions.o prio_heap.o ratelimit.o |
10 | 10 | ||
11 | ifdef CONFIG_FTRACE | ||
12 | # Do not profile string.o, since it may be used in early boot or vdso | ||
13 | CFLAGS_REMOVE_string.o = -pg | ||
14 | # Also do not profile any debug utilities | ||
15 | CFLAGS_REMOVE_spinlock_debug.o = -pg | ||
16 | CFLAGS_REMOVE_list_debug.o = -pg | ||
17 | CFLAGS_REMOVE_debugobjects.o = -pg | ||
18 | endif | ||
19 | |||
11 | lib-$(CONFIG_MMU) += ioremap.o | 20 | lib-$(CONFIG_MMU) += ioremap.o |
12 | lib-$(CONFIG_SMP) += cpumask.o | 21 | lib-$(CONFIG_SMP) += cpumask.o |
13 | 22 | ||
diff --git a/lib/bitrev.c b/lib/bitrev.c index 989aff73f881..3956203456d4 100644 --- a/lib/bitrev.c +++ b/lib/bitrev.c | |||
@@ -42,10 +42,11 @@ const u8 byte_rev_table[256] = { | |||
42 | }; | 42 | }; |
43 | EXPORT_SYMBOL_GPL(byte_rev_table); | 43 | EXPORT_SYMBOL_GPL(byte_rev_table); |
44 | 44 | ||
45 | static __always_inline u16 bitrev16(u16 x) | 45 | u16 bitrev16(u16 x) |
46 | { | 46 | { |
47 | return (bitrev8(x & 0xff) << 8) | bitrev8(x >> 8); | 47 | return (bitrev8(x & 0xff) << 8) | bitrev8(x >> 8); |
48 | } | 48 | } |
49 | EXPORT_SYMBOL(bitrev16); | ||
49 | 50 | ||
50 | /** | 51 | /** |
51 | * bitrev32 - reverse the order of bits in a u32 value | 52 | * bitrev32 - reverse the order of bits in a u32 value |
@@ -37,6 +37,7 @@ | |||
37 | */ | 37 | */ |
38 | #include <linux/list.h> | 38 | #include <linux/list.h> |
39 | #include <linux/module.h> | 39 | #include <linux/module.h> |
40 | #include <linux/kernel.h> | ||
40 | #include <linux/bug.h> | 41 | #include <linux/bug.h> |
41 | #include <linux/sched.h> | 42 | #include <linux/sched.h> |
42 | 43 | ||
@@ -149,6 +150,7 @@ enum bug_trap_type report_bug(unsigned long bugaddr, struct pt_regs *regs) | |||
149 | (void *)bugaddr); | 150 | (void *)bugaddr); |
150 | 151 | ||
151 | show_regs(regs); | 152 | show_regs(regs); |
153 | add_taint(TAINT_WARN); | ||
152 | return BUG_TRAP_TYPE_WARN; | 154 | return BUG_TRAP_TYPE_WARN; |
153 | } | 155 | } |
154 | 156 | ||
diff --git a/lib/debugobjects.c b/lib/debugobjects.c index a76a5e122ae1..85b18d79be89 100644 --- a/lib/debugobjects.c +++ b/lib/debugobjects.c | |||
@@ -68,6 +68,7 @@ static int fill_pool(void) | |||
68 | { | 68 | { |
69 | gfp_t gfp = GFP_ATOMIC | __GFP_NORETRY | __GFP_NOWARN; | 69 | gfp_t gfp = GFP_ATOMIC | __GFP_NORETRY | __GFP_NOWARN; |
70 | struct debug_obj *new; | 70 | struct debug_obj *new; |
71 | unsigned long flags; | ||
71 | 72 | ||
72 | if (likely(obj_pool_free >= ODEBUG_POOL_MIN_LEVEL)) | 73 | if (likely(obj_pool_free >= ODEBUG_POOL_MIN_LEVEL)) |
73 | return obj_pool_free; | 74 | return obj_pool_free; |
@@ -81,10 +82,10 @@ static int fill_pool(void) | |||
81 | if (!new) | 82 | if (!new) |
82 | return obj_pool_free; | 83 | return obj_pool_free; |
83 | 84 | ||
84 | spin_lock(&pool_lock); | 85 | spin_lock_irqsave(&pool_lock, flags); |
85 | hlist_add_head(&new->node, &obj_pool); | 86 | hlist_add_head(&new->node, &obj_pool); |
86 | obj_pool_free++; | 87 | obj_pool_free++; |
87 | spin_unlock(&pool_lock); | 88 | spin_unlock_irqrestore(&pool_lock, flags); |
88 | } | 89 | } |
89 | return obj_pool_free; | 90 | return obj_pool_free; |
90 | } | 91 | } |
@@ -110,16 +111,13 @@ static struct debug_obj *lookup_object(void *addr, struct debug_bucket *b) | |||
110 | } | 111 | } |
111 | 112 | ||
112 | /* | 113 | /* |
113 | * Allocate a new object. If the pool is empty and no refill possible, | 114 | * Allocate a new object. If the pool is empty, switch off the debugger. |
114 | * switch off the debugger. | ||
115 | */ | 115 | */ |
116 | static struct debug_obj * | 116 | static struct debug_obj * |
117 | alloc_object(void *addr, struct debug_bucket *b, struct debug_obj_descr *descr) | 117 | alloc_object(void *addr, struct debug_bucket *b, struct debug_obj_descr *descr) |
118 | { | 118 | { |
119 | struct debug_obj *obj = NULL; | 119 | struct debug_obj *obj = NULL; |
120 | int retry = 0; | ||
121 | 120 | ||
122 | repeat: | ||
123 | spin_lock(&pool_lock); | 121 | spin_lock(&pool_lock); |
124 | if (obj_pool.first) { | 122 | if (obj_pool.first) { |
125 | obj = hlist_entry(obj_pool.first, typeof(*obj), node); | 123 | obj = hlist_entry(obj_pool.first, typeof(*obj), node); |
@@ -141,9 +139,6 @@ repeat: | |||
141 | } | 139 | } |
142 | spin_unlock(&pool_lock); | 140 | spin_unlock(&pool_lock); |
143 | 141 | ||
144 | if (fill_pool() && !obj && !retry++) | ||
145 | goto repeat; | ||
146 | |||
147 | return obj; | 142 | return obj; |
148 | } | 143 | } |
149 | 144 | ||
@@ -261,6 +256,8 @@ __debug_object_init(void *addr, struct debug_obj_descr *descr, int onstack) | |||
261 | struct debug_obj *obj; | 256 | struct debug_obj *obj; |
262 | unsigned long flags; | 257 | unsigned long flags; |
263 | 258 | ||
259 | fill_pool(); | ||
260 | |||
264 | db = get_bucket((unsigned long) addr); | 261 | db = get_bucket((unsigned long) addr); |
265 | 262 | ||
266 | spin_lock_irqsave(&db->lock, flags); | 263 | spin_lock_irqsave(&db->lock, flags); |
diff --git a/lib/div64.c b/lib/div64.c index bb5bd0c0f030..a111eb8de9cf 100644 --- a/lib/div64.c +++ b/lib/div64.c | |||
@@ -98,3 +98,13 @@ EXPORT_SYMBOL(div64_u64); | |||
98 | #endif | 98 | #endif |
99 | 99 | ||
100 | #endif /* BITS_PER_LONG == 32 */ | 100 | #endif /* BITS_PER_LONG == 32 */ |
101 | |||
102 | /* | ||
103 | * Iterative div/mod for use when dividend is not expected to be much | ||
104 | * bigger than divisor. | ||
105 | */ | ||
106 | u32 iter_div_u64_rem(u64 dividend, u32 divisor, u64 *remainder) | ||
107 | { | ||
108 | return __iter_div_u64_rem(dividend, divisor, remainder); | ||
109 | } | ||
110 | EXPORT_SYMBOL(iter_div_u64_rem); | ||
diff --git a/lib/percpu_counter.c b/lib/percpu_counter.c index 119174494cb5..4a8ba4bf5f6f 100644 --- a/lib/percpu_counter.c +++ b/lib/percpu_counter.c | |||
@@ -52,7 +52,7 @@ EXPORT_SYMBOL(__percpu_counter_add); | |||
52 | * Add up all the per-cpu counts, return the result. This is a more accurate | 52 | * Add up all the per-cpu counts, return the result. This is a more accurate |
53 | * but much slower version of percpu_counter_read_positive() | 53 | * but much slower version of percpu_counter_read_positive() |
54 | */ | 54 | */ |
55 | s64 __percpu_counter_sum(struct percpu_counter *fbc) | 55 | s64 __percpu_counter_sum(struct percpu_counter *fbc, int set) |
56 | { | 56 | { |
57 | s64 ret; | 57 | s64 ret; |
58 | int cpu; | 58 | int cpu; |
@@ -62,7 +62,12 @@ s64 __percpu_counter_sum(struct percpu_counter *fbc) | |||
62 | for_each_online_cpu(cpu) { | 62 | for_each_online_cpu(cpu) { |
63 | s32 *pcount = per_cpu_ptr(fbc->counters, cpu); | 63 | s32 *pcount = per_cpu_ptr(fbc->counters, cpu); |
64 | ret += *pcount; | 64 | ret += *pcount; |
65 | if (set) | ||
66 | *pcount = 0; | ||
65 | } | 67 | } |
68 | if (set) | ||
69 | fbc->count = ret; | ||
70 | |||
66 | spin_unlock(&fbc->lock); | 71 | spin_unlock(&fbc->lock); |
67 | return ret; | 72 | return ret; |
68 | } | 73 | } |
diff --git a/lib/radix-tree.c b/lib/radix-tree.c index bd521716ab1a..56ec21a7f73d 100644 --- a/lib/radix-tree.c +++ b/lib/radix-tree.c | |||
@@ -1,7 +1,7 @@ | |||
1 | /* | 1 | /* |
2 | * Copyright (C) 2001 Momchil Velikov | 2 | * Copyright (C) 2001 Momchil Velikov |
3 | * Portions Copyright (C) 2001 Christoph Hellwig | 3 | * Portions Copyright (C) 2001 Christoph Hellwig |
4 | * Copyright (C) 2005 SGI, Christoph Lameter <clameter@sgi.com> | 4 | * Copyright (C) 2005 SGI, Christoph Lameter |
5 | * Copyright (C) 2006 Nick Piggin | 5 | * Copyright (C) 2006 Nick Piggin |
6 | * | 6 | * |
7 | * This program is free software; you can redistribute it and/or | 7 | * This program is free software; you can redistribute it and/or |
@@ -88,6 +88,57 @@ static inline gfp_t root_gfp_mask(struct radix_tree_root *root) | |||
88 | return root->gfp_mask & __GFP_BITS_MASK; | 88 | return root->gfp_mask & __GFP_BITS_MASK; |
89 | } | 89 | } |
90 | 90 | ||
91 | static inline void tag_set(struct radix_tree_node *node, unsigned int tag, | ||
92 | int offset) | ||
93 | { | ||
94 | __set_bit(offset, node->tags[tag]); | ||
95 | } | ||
96 | |||
97 | static inline void tag_clear(struct radix_tree_node *node, unsigned int tag, | ||
98 | int offset) | ||
99 | { | ||
100 | __clear_bit(offset, node->tags[tag]); | ||
101 | } | ||
102 | |||
103 | static inline int tag_get(struct radix_tree_node *node, unsigned int tag, | ||
104 | int offset) | ||
105 | { | ||
106 | return test_bit(offset, node->tags[tag]); | ||
107 | } | ||
108 | |||
109 | static inline void root_tag_set(struct radix_tree_root *root, unsigned int tag) | ||
110 | { | ||
111 | root->gfp_mask |= (__force gfp_t)(1 << (tag + __GFP_BITS_SHIFT)); | ||
112 | } | ||
113 | |||
114 | static inline void root_tag_clear(struct radix_tree_root *root, unsigned int tag) | ||
115 | { | ||
116 | root->gfp_mask &= (__force gfp_t)~(1 << (tag + __GFP_BITS_SHIFT)); | ||
117 | } | ||
118 | |||
119 | static inline void root_tag_clear_all(struct radix_tree_root *root) | ||
120 | { | ||
121 | root->gfp_mask &= __GFP_BITS_MASK; | ||
122 | } | ||
123 | |||
124 | static inline int root_tag_get(struct radix_tree_root *root, unsigned int tag) | ||
125 | { | ||
126 | return (__force unsigned)root->gfp_mask & (1 << (tag + __GFP_BITS_SHIFT)); | ||
127 | } | ||
128 | |||
129 | /* | ||
130 | * Returns 1 if any slot in the node has this tag set. | ||
131 | * Otherwise returns 0. | ||
132 | */ | ||
133 | static inline int any_tag_set(struct radix_tree_node *node, unsigned int tag) | ||
134 | { | ||
135 | int idx; | ||
136 | for (idx = 0; idx < RADIX_TREE_TAG_LONGS; idx++) { | ||
137 | if (node->tags[tag][idx]) | ||
138 | return 1; | ||
139 | } | ||
140 | return 0; | ||
141 | } | ||
91 | /* | 142 | /* |
92 | * This assumes that the caller has performed appropriate preallocation, and | 143 | * This assumes that the caller has performed appropriate preallocation, and |
93 | * that the caller has pinned this thread of control to the current CPU. | 144 | * that the caller has pinned this thread of control to the current CPU. |
@@ -124,6 +175,17 @@ static void radix_tree_node_rcu_free(struct rcu_head *head) | |||
124 | { | 175 | { |
125 | struct radix_tree_node *node = | 176 | struct radix_tree_node *node = |
126 | container_of(head, struct radix_tree_node, rcu_head); | 177 | container_of(head, struct radix_tree_node, rcu_head); |
178 | |||
179 | /* | ||
180 | * must only free zeroed nodes into the slab. radix_tree_shrink | ||
181 | * can leave us with a non-NULL entry in the first slot, so clear | ||
182 | * that here to make sure. | ||
183 | */ | ||
184 | tag_clear(node, 0, 0); | ||
185 | tag_clear(node, 1, 0); | ||
186 | node->slots[0] = NULL; | ||
187 | node->count = 0; | ||
188 | |||
127 | kmem_cache_free(radix_tree_node_cachep, node); | 189 | kmem_cache_free(radix_tree_node_cachep, node); |
128 | } | 190 | } |
129 | 191 | ||
@@ -165,59 +227,6 @@ out: | |||
165 | } | 227 | } |
166 | EXPORT_SYMBOL(radix_tree_preload); | 228 | EXPORT_SYMBOL(radix_tree_preload); |
167 | 229 | ||
168 | static inline void tag_set(struct radix_tree_node *node, unsigned int tag, | ||
169 | int offset) | ||
170 | { | ||
171 | __set_bit(offset, node->tags[tag]); | ||
172 | } | ||
173 | |||
174 | static inline void tag_clear(struct radix_tree_node *node, unsigned int tag, | ||
175 | int offset) | ||
176 | { | ||
177 | __clear_bit(offset, node->tags[tag]); | ||
178 | } | ||
179 | |||
180 | static inline int tag_get(struct radix_tree_node *node, unsigned int tag, | ||
181 | int offset) | ||
182 | { | ||
183 | return test_bit(offset, node->tags[tag]); | ||
184 | } | ||
185 | |||
186 | static inline void root_tag_set(struct radix_tree_root *root, unsigned int tag) | ||
187 | { | ||
188 | root->gfp_mask |= (__force gfp_t)(1 << (tag + __GFP_BITS_SHIFT)); | ||
189 | } | ||
190 | |||
191 | |||
192 | static inline void root_tag_clear(struct radix_tree_root *root, unsigned int tag) | ||
193 | { | ||
194 | root->gfp_mask &= (__force gfp_t)~(1 << (tag + __GFP_BITS_SHIFT)); | ||
195 | } | ||
196 | |||
197 | static inline void root_tag_clear_all(struct radix_tree_root *root) | ||
198 | { | ||
199 | root->gfp_mask &= __GFP_BITS_MASK; | ||
200 | } | ||
201 | |||
202 | static inline int root_tag_get(struct radix_tree_root *root, unsigned int tag) | ||
203 | { | ||
204 | return (__force unsigned)root->gfp_mask & (1 << (tag + __GFP_BITS_SHIFT)); | ||
205 | } | ||
206 | |||
207 | /* | ||
208 | * Returns 1 if any slot in the node has this tag set. | ||
209 | * Otherwise returns 0. | ||
210 | */ | ||
211 | static inline int any_tag_set(struct radix_tree_node *node, unsigned int tag) | ||
212 | { | ||
213 | int idx; | ||
214 | for (idx = 0; idx < RADIX_TREE_TAG_LONGS; idx++) { | ||
215 | if (node->tags[tag][idx]) | ||
216 | return 1; | ||
217 | } | ||
218 | return 0; | ||
219 | } | ||
220 | |||
221 | /* | 230 | /* |
222 | * Return the maximum key which can be store into a | 231 | * Return the maximum key which can be store into a |
223 | * radix tree with height HEIGHT. | 232 | * radix tree with height HEIGHT. |
@@ -930,11 +939,6 @@ static inline void radix_tree_shrink(struct radix_tree_root *root) | |||
930 | newptr = radix_tree_ptr_to_indirect(newptr); | 939 | newptr = radix_tree_ptr_to_indirect(newptr); |
931 | root->rnode = newptr; | 940 | root->rnode = newptr; |
932 | root->height--; | 941 | root->height--; |
933 | /* must only free zeroed nodes into the slab */ | ||
934 | tag_clear(to_free, 0, 0); | ||
935 | tag_clear(to_free, 1, 0); | ||
936 | to_free->slots[0] = NULL; | ||
937 | to_free->count = 0; | ||
938 | radix_tree_node_free(to_free); | 942 | radix_tree_node_free(to_free); |
939 | } | 943 | } |
940 | } | 944 | } |
diff --git a/lib/smp_processor_id.c b/lib/smp_processor_id.c index 6c90fb90e19c..3b4dc098181e 100644 --- a/lib/smp_processor_id.c +++ b/lib/smp_processor_id.c | |||
@@ -7,7 +7,7 @@ | |||
7 | #include <linux/kallsyms.h> | 7 | #include <linux/kallsyms.h> |
8 | #include <linux/sched.h> | 8 | #include <linux/sched.h> |
9 | 9 | ||
10 | unsigned int debug_smp_processor_id(void) | 10 | notrace unsigned int debug_smp_processor_id(void) |
11 | { | 11 | { |
12 | unsigned long preempt_count = preempt_count(); | 12 | unsigned long preempt_count = preempt_count(); |
13 | int this_cpu = raw_smp_processor_id(); | 13 | int this_cpu = raw_smp_processor_id(); |
@@ -37,7 +37,7 @@ unsigned int debug_smp_processor_id(void) | |||
37 | /* | 37 | /* |
38 | * Avoid recursion: | 38 | * Avoid recursion: |
39 | */ | 39 | */ |
40 | preempt_disable(); | 40 | preempt_disable_notrace(); |
41 | 41 | ||
42 | if (!printk_ratelimit()) | 42 | if (!printk_ratelimit()) |
43 | goto out_enable; | 43 | goto out_enable; |
@@ -49,7 +49,7 @@ unsigned int debug_smp_processor_id(void) | |||
49 | dump_stack(); | 49 | dump_stack(); |
50 | 50 | ||
51 | out_enable: | 51 | out_enable: |
52 | preempt_enable_no_resched(); | 52 | preempt_enable_no_resched_notrace(); |
53 | out: | 53 | out: |
54 | return this_cpu; | 54 | return this_cpu; |
55 | } | 55 | } |
diff --git a/lib/ts_bm.c b/lib/ts_bm.c index d90822c378a4..4a7fce72898e 100644 --- a/lib/ts_bm.c +++ b/lib/ts_bm.c | |||
@@ -63,7 +63,7 @@ static unsigned int bm_find(struct ts_config *conf, struct ts_state *state) | |||
63 | struct ts_bm *bm = ts_config_priv(conf); | 63 | struct ts_bm *bm = ts_config_priv(conf); |
64 | unsigned int i, text_len, consumed = state->offset; | 64 | unsigned int i, text_len, consumed = state->offset; |
65 | const u8 *text; | 65 | const u8 *text; |
66 | int shift = bm->patlen, bs; | 66 | int shift = bm->patlen - 1, bs; |
67 | 67 | ||
68 | for (;;) { | 68 | for (;;) { |
69 | text_len = conf->get_next_block(consumed, &text, conf, state); | 69 | text_len = conf->get_next_block(consumed, &text, conf, state); |
diff --git a/lib/vsprintf.c b/lib/vsprintf.c index 6021757a4496..1dc2d1d18fa8 100644 --- a/lib/vsprintf.c +++ b/lib/vsprintf.c | |||
@@ -22,6 +22,8 @@ | |||
22 | #include <linux/string.h> | 22 | #include <linux/string.h> |
23 | #include <linux/ctype.h> | 23 | #include <linux/ctype.h> |
24 | #include <linux/kernel.h> | 24 | #include <linux/kernel.h> |
25 | #include <linux/kallsyms.h> | ||
26 | #include <linux/uaccess.h> | ||
25 | 27 | ||
26 | #include <asm/page.h> /* for PAGE_SIZE */ | 28 | #include <asm/page.h> /* for PAGE_SIZE */ |
27 | #include <asm/div64.h> | 29 | #include <asm/div64.h> |
@@ -482,6 +484,89 @@ static char *number(char *buf, char *end, unsigned long long num, int base, int | |||
482 | return buf; | 484 | return buf; |
483 | } | 485 | } |
484 | 486 | ||
487 | static char *string(char *buf, char *end, char *s, int field_width, int precision, int flags) | ||
488 | { | ||
489 | int len, i; | ||
490 | |||
491 | if ((unsigned long)s < PAGE_SIZE) | ||
492 | s = "<NULL>"; | ||
493 | |||
494 | len = strnlen(s, precision); | ||
495 | |||
496 | if (!(flags & LEFT)) { | ||
497 | while (len < field_width--) { | ||
498 | if (buf < end) | ||
499 | *buf = ' '; | ||
500 | ++buf; | ||
501 | } | ||
502 | } | ||
503 | for (i = 0; i < len; ++i) { | ||
504 | if (buf < end) | ||
505 | *buf = *s; | ||
506 | ++buf; ++s; | ||
507 | } | ||
508 | while (len < field_width--) { | ||
509 | if (buf < end) | ||
510 | *buf = ' '; | ||
511 | ++buf; | ||
512 | } | ||
513 | return buf; | ||
514 | } | ||
515 | |||
516 | static inline void *dereference_function_descriptor(void *ptr) | ||
517 | { | ||
518 | #if defined(CONFIG_IA64) || defined(CONFIG_PPC64) | ||
519 | void *p; | ||
520 | if (!probe_kernel_address(ptr, p)) | ||
521 | ptr = p; | ||
522 | #endif | ||
523 | return ptr; | ||
524 | } | ||
525 | |||
526 | static char *symbol_string(char *buf, char *end, void *ptr, int field_width, int precision, int flags) | ||
527 | { | ||
528 | unsigned long value = (unsigned long) ptr; | ||
529 | #ifdef CONFIG_KALLSYMS | ||
530 | char sym[KSYM_SYMBOL_LEN]; | ||
531 | sprint_symbol(sym, value); | ||
532 | return string(buf, end, sym, field_width, precision, flags); | ||
533 | #else | ||
534 | field_width = 2*sizeof(void *); | ||
535 | flags |= SPECIAL | SMALL | ZEROPAD; | ||
536 | return number(buf, end, value, 16, field_width, precision, flags); | ||
537 | #endif | ||
538 | } | ||
539 | |||
540 | /* | ||
541 | * Show a '%p' thing. A kernel extension is that the '%p' is followed | ||
542 | * by an extra set of alphanumeric characters that are extended format | ||
543 | * specifiers. | ||
544 | * | ||
545 | * Right now we just handle 'F' (for symbolic Function descriptor pointers) | ||
546 | * and 'S' (for Symbolic direct pointers), but this can easily be | ||
547 | * extended in the future (network address types etc). | ||
548 | * | ||
549 | * The difference between 'S' and 'F' is that on ia64 and ppc64 function | ||
550 | * pointers are really function descriptors, which contain a pointer the | ||
551 | * real address. | ||
552 | */ | ||
553 | static char *pointer(const char *fmt, char *buf, char *end, void *ptr, int field_width, int precision, int flags) | ||
554 | { | ||
555 | switch (*fmt) { | ||
556 | case 'F': | ||
557 | ptr = dereference_function_descriptor(ptr); | ||
558 | /* Fallthrough */ | ||
559 | case 'S': | ||
560 | return symbol_string(buf, end, ptr, field_width, precision, flags); | ||
561 | } | ||
562 | flags |= SMALL; | ||
563 | if (field_width == -1) { | ||
564 | field_width = 2*sizeof(void *); | ||
565 | flags |= ZEROPAD; | ||
566 | } | ||
567 | return number(buf, end, (unsigned long) ptr, 16, field_width, precision, flags); | ||
568 | } | ||
569 | |||
485 | /** | 570 | /** |
486 | * vsnprintf - Format a string and place it in a buffer | 571 | * vsnprintf - Format a string and place it in a buffer |
487 | * @buf: The buffer to place the result into | 572 | * @buf: The buffer to place the result into |
@@ -502,11 +587,9 @@ static char *number(char *buf, char *end, unsigned long long num, int base, int | |||
502 | */ | 587 | */ |
503 | int vsnprintf(char *buf, size_t size, const char *fmt, va_list args) | 588 | int vsnprintf(char *buf, size_t size, const char *fmt, va_list args) |
504 | { | 589 | { |
505 | int len; | ||
506 | unsigned long long num; | 590 | unsigned long long num; |
507 | int i, base; | 591 | int base; |
508 | char *str, *end, c; | 592 | char *str, *end, c; |
509 | const char *s; | ||
510 | 593 | ||
511 | int flags; /* flags to number() */ | 594 | int flags; /* flags to number() */ |
512 | 595 | ||
@@ -622,43 +705,18 @@ int vsnprintf(char *buf, size_t size, const char *fmt, va_list args) | |||
622 | continue; | 705 | continue; |
623 | 706 | ||
624 | case 's': | 707 | case 's': |
625 | s = va_arg(args, char *); | 708 | str = string(str, end, va_arg(args, char *), field_width, precision, flags); |
626 | if ((unsigned long)s < PAGE_SIZE) | ||
627 | s = "<NULL>"; | ||
628 | |||
629 | len = strnlen(s, precision); | ||
630 | |||
631 | if (!(flags & LEFT)) { | ||
632 | while (len < field_width--) { | ||
633 | if (str < end) | ||
634 | *str = ' '; | ||
635 | ++str; | ||
636 | } | ||
637 | } | ||
638 | for (i = 0; i < len; ++i) { | ||
639 | if (str < end) | ||
640 | *str = *s; | ||
641 | ++str; ++s; | ||
642 | } | ||
643 | while (len < field_width--) { | ||
644 | if (str < end) | ||
645 | *str = ' '; | ||
646 | ++str; | ||
647 | } | ||
648 | continue; | 709 | continue; |
649 | 710 | ||
650 | case 'p': | 711 | case 'p': |
651 | flags |= SMALL; | 712 | str = pointer(fmt+1, str, end, |
652 | if (field_width == -1) { | 713 | va_arg(args, void *), |
653 | field_width = 2*sizeof(void *); | 714 | field_width, precision, flags); |
654 | flags |= ZEROPAD; | 715 | /* Skip all alphanumeric pointer suffixes */ |
655 | } | 716 | while (isalnum(fmt[1])) |
656 | str = number(str, end, | 717 | fmt++; |
657 | (unsigned long) va_arg(args, void *), | ||
658 | 16, field_width, precision, flags); | ||
659 | continue; | 718 | continue; |
660 | 719 | ||
661 | |||
662 | case 'n': | 720 | case 'n': |
663 | /* FIXME: | 721 | /* FIXME: |
664 | * What does C99 say about the overflow case here? */ | 722 | * What does C99 say about the overflow case here? */ |