aboutsummaryrefslogtreecommitdiffstats
path: root/lib
diff options
context:
space:
mode:
Diffstat (limited to 'lib')
-rw-r--r--lib/Kconfig9
-rw-r--r--lib/Kconfig.debug38
-rw-r--r--lib/Kconfig.kmemcheck91
-rw-r--r--lib/Makefile7
-rw-r--r--lib/atomic64.c175
-rw-r--r--lib/checksum.c193
-rw-r--r--lib/cpumask.c23
-rw-r--r--lib/dec_and_lock.c3
-rw-r--r--lib/extable.c21
-rw-r--r--lib/gcd.c18
-rw-r--r--lib/genalloc.c1
-rw-r--r--lib/hexdump.c15
-rw-r--r--lib/kobject.c7
-rw-r--r--lib/radix-tree.c110
-rw-r--r--lib/rational.c62
-rw-r--r--lib/rbtree.c34
-rw-r--r--lib/vsprintf.c56
17 files changed, 765 insertions, 98 deletions
diff --git a/lib/Kconfig b/lib/Kconfig
index 8ade0a7a91e..bb1326d3839 100644
--- a/lib/Kconfig
+++ b/lib/Kconfig
@@ -10,6 +10,9 @@ menu "Library routines"
10config BITREVERSE 10config BITREVERSE
11 tristate 11 tristate
12 12
13config RATIONAL
14 boolean
15
13config GENERIC_FIND_FIRST_BIT 16config GENERIC_FIND_FIRST_BIT
14 bool 17 bool
15 18
@@ -191,4 +194,10 @@ config DISABLE_OBSOLETE_CPUMASK_FUNCTIONS
191config NLATTR 194config NLATTR
192 bool 195 bool
193 196
197#
198# Generic 64-bit atomic support is selected if needed
199#
200config GENERIC_ATOMIC64
201 bool
202
194endmenu 203endmenu
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
index 3be4b7caf5b..23067ab1a73 100644
--- a/lib/Kconfig.debug
+++ b/lib/Kconfig.debug
@@ -300,7 +300,7 @@ config DEBUG_OBJECTS_ENABLE_DEFAULT
300 300
301config DEBUG_SLAB 301config DEBUG_SLAB
302 bool "Debug slab memory allocations" 302 bool "Debug slab memory allocations"
303 depends on DEBUG_KERNEL && SLAB 303 depends on DEBUG_KERNEL && SLAB && !KMEMCHECK
304 help 304 help
305 Say Y here to have the kernel do limited verification on memory 305 Say Y here to have the kernel do limited verification on memory
306 allocation as well as poisoning memory on free to catch use of freed 306 allocation as well as poisoning memory on free to catch use of freed
@@ -312,7 +312,7 @@ config DEBUG_SLAB_LEAK
312 312
313config SLUB_DEBUG_ON 313config SLUB_DEBUG_ON
314 bool "SLUB debugging on by default" 314 bool "SLUB debugging on by default"
315 depends on SLUB && SLUB_DEBUG 315 depends on SLUB && SLUB_DEBUG && !KMEMCHECK
316 default n 316 default n
317 help 317 help
318 Boot with debugging on by default. SLUB boots by default with 318 Boot with debugging on by default. SLUB boots by default with
@@ -336,6 +336,38 @@ config SLUB_STATS
336 out which slabs are relevant to a particular load. 336 out which slabs are relevant to a particular load.
337 Try running: slabinfo -DA 337 Try running: slabinfo -DA
338 338
339config DEBUG_KMEMLEAK
340 bool "Kernel memory leak detector"
341 depends on DEBUG_KERNEL && EXPERIMENTAL && (X86 || ARM) && \
342 !MEMORY_HOTPLUG
343 select DEBUG_SLAB if SLAB
344 select SLUB_DEBUG if SLUB
345 select DEBUG_FS if SYSFS
346 select STACKTRACE if STACKTRACE_SUPPORT
347 select KALLSYMS
348 help
349 Say Y here if you want to enable the memory leak
350 detector. The memory allocation/freeing is traced in a way
351 similar to the Boehm's conservative garbage collector, the
352 difference being that the orphan objects are not freed but
353 only shown in /sys/kernel/debug/kmemleak. Enabling this
354 feature will introduce an overhead to memory
355 allocations. See Documentation/kmemleak.txt for more
356 details.
357
358 In order to access the kmemleak file, debugfs needs to be
359 mounted (usually at /sys/kernel/debug).
360
361config DEBUG_KMEMLEAK_TEST
362 tristate "Simple test for the kernel memory leak detector"
363 depends on DEBUG_KMEMLEAK
364 help
365 Say Y or M here to build a test for the kernel memory leak
366 detector. This option enables a module that explicitly leaks
367 memory.
368
369 If unsure, say N.
370
339config DEBUG_PREEMPT 371config DEBUG_PREEMPT
340 bool "Debug preemptible kernel" 372 bool "Debug preemptible kernel"
341 depends on DEBUG_KERNEL && PREEMPT && (TRACE_IRQFLAGS_SUPPORT || PPC64) 373 depends on DEBUG_KERNEL && PREEMPT && (TRACE_IRQFLAGS_SUPPORT || PPC64)
@@ -964,3 +996,5 @@ config DMA_API_DEBUG
964source "samples/Kconfig" 996source "samples/Kconfig"
965 997
966source "lib/Kconfig.kgdb" 998source "lib/Kconfig.kgdb"
999
1000source "lib/Kconfig.kmemcheck"
diff --git a/lib/Kconfig.kmemcheck b/lib/Kconfig.kmemcheck
new file mode 100644
index 00000000000..603c81b6654
--- /dev/null
+++ b/lib/Kconfig.kmemcheck
@@ -0,0 +1,91 @@
1config HAVE_ARCH_KMEMCHECK
2 bool
3
4menuconfig KMEMCHECK
5 bool "kmemcheck: trap use of uninitialized memory"
6 depends on DEBUG_KERNEL
7 depends on !X86_USE_3DNOW
8 depends on SLUB || SLAB
9 depends on !CC_OPTIMIZE_FOR_SIZE
10 depends on !FUNCTION_TRACER
11 select FRAME_POINTER
12 select STACKTRACE
13 default n
14 help
15 This option enables tracing of dynamically allocated kernel memory
16 to see if memory is used before it has been given an initial value.
17 Be aware that this requires half of your memory for bookkeeping and
18 will insert extra code at *every* read and write to tracked memory
19 thus slow down the kernel code (but user code is unaffected).
20
21 The kernel may be started with kmemcheck=0 or kmemcheck=1 to disable
22 or enable kmemcheck at boot-time. If the kernel is started with
23 kmemcheck=0, the large memory and CPU overhead is not incurred.
24
25choice
26 prompt "kmemcheck: default mode at boot"
27 depends on KMEMCHECK
28 default KMEMCHECK_ONESHOT_BY_DEFAULT
29 help
30 This option controls the default behaviour of kmemcheck when the
31 kernel boots and no kmemcheck= parameter is given.
32
33config KMEMCHECK_DISABLED_BY_DEFAULT
34 bool "disabled"
35 depends on KMEMCHECK
36
37config KMEMCHECK_ENABLED_BY_DEFAULT
38 bool "enabled"
39 depends on KMEMCHECK
40
41config KMEMCHECK_ONESHOT_BY_DEFAULT
42 bool "one-shot"
43 depends on KMEMCHECK
44 help
45 In one-shot mode, only the first error detected is reported before
46 kmemcheck is disabled.
47
48endchoice
49
50config KMEMCHECK_QUEUE_SIZE
51 int "kmemcheck: error queue size"
52 depends on KMEMCHECK
53 default 64
54 help
55 Select the maximum number of errors to store in the queue. Since
56 errors can occur virtually anywhere and in any context, we need a
57 temporary storage area which is guarantueed not to generate any
58 other faults. The queue will be emptied as soon as a tasklet may
59 be scheduled. If the queue is full, new error reports will be
60 lost.
61
62config KMEMCHECK_SHADOW_COPY_SHIFT
63 int "kmemcheck: shadow copy size (5 => 32 bytes, 6 => 64 bytes)"
64 depends on KMEMCHECK
65 range 2 8
66 default 5
67 help
68 Select the number of shadow bytes to save along with each entry of
69 the queue. These bytes indicate what parts of an allocation are
70 initialized, uninitialized, etc. and will be displayed when an
71 error is detected to help the debugging of a particular problem.
72
73config KMEMCHECK_PARTIAL_OK
74 bool "kmemcheck: allow partially uninitialized memory"
75 depends on KMEMCHECK
76 default y
77 help
78 This option works around certain GCC optimizations that produce
79 32-bit reads from 16-bit variables where the upper 16 bits are
80 thrown away afterwards. This may of course also hide some real
81 bugs.
82
83config KMEMCHECK_BITOPS_OK
84 bool "kmemcheck: allow bit-field manipulation"
85 depends on KMEMCHECK
86 default n
87 help
88 This option silences warnings that would be generated for bit-field
89 accesses where not all the bits are initialized at the same time.
90 This may also hide some real bugs.
91
diff --git a/lib/Makefile b/lib/Makefile
index 33a40e40e3e..b6d1857bbf0 100644
--- a/lib/Makefile
+++ b/lib/Makefile
@@ -21,7 +21,7 @@ lib-y += kobject.o kref.o klist.o
21 21
22obj-y += bcd.o div64.o sort.o parser.o halfmd4.o debug_locks.o random32.o \ 22obj-y += bcd.o div64.o sort.o parser.o halfmd4.o debug_locks.o random32.o \
23 bust_spinlocks.o hexdump.o kasprintf.o bitmap.o scatterlist.o \ 23 bust_spinlocks.o hexdump.o kasprintf.o bitmap.o scatterlist.o \
24 string_helpers.o 24 string_helpers.o gcd.o
25 25
26ifeq ($(CONFIG_DEBUG_KOBJECT),y) 26ifeq ($(CONFIG_DEBUG_KOBJECT),y)
27CFLAGS_kobject.o += -DDEBUG 27CFLAGS_kobject.o += -DDEBUG
@@ -50,6 +50,7 @@ ifneq ($(CONFIG_HAVE_DEC_LOCK),y)
50endif 50endif
51 51
52obj-$(CONFIG_BITREVERSE) += bitrev.o 52obj-$(CONFIG_BITREVERSE) += bitrev.o
53obj-$(CONFIG_RATIONAL) += rational.o
53obj-$(CONFIG_CRC_CCITT) += crc-ccitt.o 54obj-$(CONFIG_CRC_CCITT) += crc-ccitt.o
54obj-$(CONFIG_CRC16) += crc16.o 55obj-$(CONFIG_CRC16) += crc16.o
55obj-$(CONFIG_CRC_T10DIF)+= crc-t10dif.o 56obj-$(CONFIG_CRC_T10DIF)+= crc-t10dif.o
@@ -92,6 +93,10 @@ obj-$(CONFIG_NLATTR) += nlattr.o
92 93
93obj-$(CONFIG_DMA_API_DEBUG) += dma-debug.o 94obj-$(CONFIG_DMA_API_DEBUG) += dma-debug.o
94 95
96obj-$(CONFIG_GENERIC_CSUM) += checksum.o
97
98obj-$(CONFIG_GENERIC_ATOMIC64) += atomic64.o
99
95hostprogs-y := gen_crc32table 100hostprogs-y := gen_crc32table
96clean-files := crc32table.h 101clean-files := crc32table.h
97 102
diff --git a/lib/atomic64.c b/lib/atomic64.c
new file mode 100644
index 00000000000..c5e72556241
--- /dev/null
+++ b/lib/atomic64.c
@@ -0,0 +1,175 @@
1/*
2 * Generic implementation of 64-bit atomics using spinlocks,
3 * useful on processors that don't have 64-bit atomic instructions.
4 *
5 * Copyright © 2009 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com>
6 *
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * as published by the Free Software Foundation; either version
10 * 2 of the License, or (at your option) any later version.
11 */
12#include <linux/types.h>
13#include <linux/cache.h>
14#include <linux/spinlock.h>
15#include <linux/init.h>
16#include <asm/atomic.h>
17
18/*
19 * We use a hashed array of spinlocks to provide exclusive access
20 * to each atomic64_t variable. Since this is expected to used on
21 * systems with small numbers of CPUs (<= 4 or so), we use a
22 * relatively small array of 16 spinlocks to avoid wasting too much
23 * memory on the spinlock array.
24 */
25#define NR_LOCKS 16
26
27/*
28 * Ensure each lock is in a separate cacheline.
29 */
30static union {
31 spinlock_t lock;
32 char pad[L1_CACHE_BYTES];
33} atomic64_lock[NR_LOCKS] __cacheline_aligned_in_smp;
34
35static inline spinlock_t *lock_addr(const atomic64_t *v)
36{
37 unsigned long addr = (unsigned long) v;
38
39 addr >>= L1_CACHE_SHIFT;
40 addr ^= (addr >> 8) ^ (addr >> 16);
41 return &atomic64_lock[addr & (NR_LOCKS - 1)].lock;
42}
43
44long long atomic64_read(const atomic64_t *v)
45{
46 unsigned long flags;
47 spinlock_t *lock = lock_addr(v);
48 long long val;
49
50 spin_lock_irqsave(lock, flags);
51 val = v->counter;
52 spin_unlock_irqrestore(lock, flags);
53 return val;
54}
55
56void atomic64_set(atomic64_t *v, long long i)
57{
58 unsigned long flags;
59 spinlock_t *lock = lock_addr(v);
60
61 spin_lock_irqsave(lock, flags);
62 v->counter = i;
63 spin_unlock_irqrestore(lock, flags);
64}
65
66void atomic64_add(long long a, atomic64_t *v)
67{
68 unsigned long flags;
69 spinlock_t *lock = lock_addr(v);
70
71 spin_lock_irqsave(lock, flags);
72 v->counter += a;
73 spin_unlock_irqrestore(lock, flags);
74}
75
76long long atomic64_add_return(long long a, atomic64_t *v)
77{
78 unsigned long flags;
79 spinlock_t *lock = lock_addr(v);
80 long long val;
81
82 spin_lock_irqsave(lock, flags);
83 val = v->counter += a;
84 spin_unlock_irqrestore(lock, flags);
85 return val;
86}
87
88void atomic64_sub(long long a, atomic64_t *v)
89{
90 unsigned long flags;
91 spinlock_t *lock = lock_addr(v);
92
93 spin_lock_irqsave(lock, flags);
94 v->counter -= a;
95 spin_unlock_irqrestore(lock, flags);
96}
97
98long long atomic64_sub_return(long long a, atomic64_t *v)
99{
100 unsigned long flags;
101 spinlock_t *lock = lock_addr(v);
102 long long val;
103
104 spin_lock_irqsave(lock, flags);
105 val = v->counter -= a;
106 spin_unlock_irqrestore(lock, flags);
107 return val;
108}
109
110long long atomic64_dec_if_positive(atomic64_t *v)
111{
112 unsigned long flags;
113 spinlock_t *lock = lock_addr(v);
114 long long val;
115
116 spin_lock_irqsave(lock, flags);
117 val = v->counter - 1;
118 if (val >= 0)
119 v->counter = val;
120 spin_unlock_irqrestore(lock, flags);
121 return val;
122}
123
124long long atomic64_cmpxchg(atomic64_t *v, long long o, long long n)
125{
126 unsigned long flags;
127 spinlock_t *lock = lock_addr(v);
128 long long val;
129
130 spin_lock_irqsave(lock, flags);
131 val = v->counter;
132 if (val == o)
133 v->counter = n;
134 spin_unlock_irqrestore(lock, flags);
135 return val;
136}
137
138long long atomic64_xchg(atomic64_t *v, long long new)
139{
140 unsigned long flags;
141 spinlock_t *lock = lock_addr(v);
142 long long val;
143
144 spin_lock_irqsave(lock, flags);
145 val = v->counter;
146 v->counter = new;
147 spin_unlock_irqrestore(lock, flags);
148 return val;
149}
150
151int atomic64_add_unless(atomic64_t *v, long long a, long long u)
152{
153 unsigned long flags;
154 spinlock_t *lock = lock_addr(v);
155 int ret = 1;
156
157 spin_lock_irqsave(lock, flags);
158 if (v->counter != u) {
159 v->counter += a;
160 ret = 0;
161 }
162 spin_unlock_irqrestore(lock, flags);
163 return ret;
164}
165
166static int init_atomic64_lock(void)
167{
168 int i;
169
170 for (i = 0; i < NR_LOCKS; ++i)
171 spin_lock_init(&atomic64_lock[i].lock);
172 return 0;
173}
174
175pure_initcall(init_atomic64_lock);
diff --git a/lib/checksum.c b/lib/checksum.c
new file mode 100644
index 00000000000..12e5a1c91cd
--- /dev/null
+++ b/lib/checksum.c
@@ -0,0 +1,193 @@
1/*
2 *
3 * INET An implementation of the TCP/IP protocol suite for the LINUX
4 * operating system. INET is implemented using the BSD Socket
5 * interface as the means of communication with the user level.
6 *
7 * IP/TCP/UDP checksumming routines
8 *
9 * Authors: Jorge Cwik, <jorge@laser.satlink.net>
10 * Arnt Gulbrandsen, <agulbra@nvg.unit.no>
11 * Tom May, <ftom@netcom.com>
12 * Andreas Schwab, <schwab@issan.informatik.uni-dortmund.de>
13 * Lots of code moved from tcp.c and ip.c; see those files
14 * for more names.
15 *
16 * 03/02/96 Jes Sorensen, Andreas Schwab, Roman Hodek:
17 * Fixed some nasty bugs, causing some horrible crashes.
18 * A: At some points, the sum (%0) was used as
19 * length-counter instead of the length counter
20 * (%1). Thanks to Roman Hodek for pointing this out.
21 * B: GCC seems to mess up if one uses too many
22 * data-registers to hold input values and one tries to
23 * specify d0 and d1 as scratch registers. Letting gcc
24 * choose these registers itself solves the problem.
25 *
26 * This program is free software; you can redistribute it and/or
27 * modify it under the terms of the GNU General Public License
28 * as published by the Free Software Foundation; either version
29 * 2 of the License, or (at your option) any later version.
30 */
31
32/* Revised by Kenneth Albanowski for m68knommu. Basic problem: unaligned access
33 kills, so most of the assembly has to go. */
34
35#include <linux/module.h>
36#include <net/checksum.h>
37
38#include <asm/byteorder.h>
39
40static inline unsigned short from32to16(unsigned long x)
41{
42 /* add up 16-bit and 16-bit for 16+c bit */
43 x = (x & 0xffff) + (x >> 16);
44 /* add up carry.. */
45 x = (x & 0xffff) + (x >> 16);
46 return x;
47}
48
49static unsigned int do_csum(const unsigned char *buff, int len)
50{
51 int odd, count;
52 unsigned long result = 0;
53
54 if (len <= 0)
55 goto out;
56 odd = 1 & (unsigned long) buff;
57 if (odd) {
58 result = *buff;
59 len--;
60 buff++;
61 }
62 count = len >> 1; /* nr of 16-bit words.. */
63 if (count) {
64 if (2 & (unsigned long) buff) {
65 result += *(unsigned short *) buff;
66 count--;
67 len -= 2;
68 buff += 2;
69 }
70 count >>= 1; /* nr of 32-bit words.. */
71 if (count) {
72 unsigned long carry = 0;
73 do {
74 unsigned long w = *(unsigned long *) buff;
75 count--;
76 buff += 4;
77 result += carry;
78 result += w;
79 carry = (w > result);
80 } while (count);
81 result += carry;
82 result = (result & 0xffff) + (result >> 16);
83 }
84 if (len & 2) {
85 result += *(unsigned short *) buff;
86 buff += 2;
87 }
88 }
89 if (len & 1)
90 result += (*buff << 8);
91 result = from32to16(result);
92 if (odd)
93 result = ((result >> 8) & 0xff) | ((result & 0xff) << 8);
94out:
95 return result;
96}
97
98/*
99 * This is a version of ip_compute_csum() optimized for IP headers,
100 * which always checksum on 4 octet boundaries.
101 */
102__sum16 ip_fast_csum(const void *iph, unsigned int ihl)
103{
104 return (__force __sum16)~do_csum(iph, ihl*4);
105}
106EXPORT_SYMBOL(ip_fast_csum);
107
108/*
109 * computes the checksum of a memory block at buff, length len,
110 * and adds in "sum" (32-bit)
111 *
112 * returns a 32-bit number suitable for feeding into itself
113 * or csum_tcpudp_magic
114 *
115 * this function must be called with even lengths, except
116 * for the last fragment, which may be odd
117 *
118 * it's best to have buff aligned on a 32-bit boundary
119 */
120__wsum csum_partial(const void *buff, int len, __wsum wsum)
121{
122 unsigned int sum = (__force unsigned int)wsum;
123 unsigned int result = do_csum(buff, len);
124
125 /* add in old sum, and carry.. */
126 result += sum;
127 if (sum > result)
128 result += 1;
129 return (__force __wsum)result;
130}
131EXPORT_SYMBOL(csum_partial);
132
133/*
134 * this routine is used for miscellaneous IP-like checksums, mainly
135 * in icmp.c
136 */
137__sum16 ip_compute_csum(const void *buff, int len)
138{
139 return (__force __sum16)~do_csum(buff, len);
140}
141EXPORT_SYMBOL(ip_compute_csum);
142
143/*
144 * copy from fs while checksumming, otherwise like csum_partial
145 */
146__wsum
147csum_partial_copy_from_user(const void __user *src, void *dst, int len,
148 __wsum sum, int *csum_err)
149{
150 int missing;
151
152 missing = __copy_from_user(dst, src, len);
153 if (missing) {
154 memset(dst + len - missing, 0, missing);
155 *csum_err = -EFAULT;
156 } else
157 *csum_err = 0;
158
159 return csum_partial(dst, len, sum);
160}
161EXPORT_SYMBOL(csum_partial_copy_from_user);
162
163/*
164 * copy from ds while checksumming, otherwise like csum_partial
165 */
166__wsum
167csum_partial_copy(const void *src, void *dst, int len, __wsum sum)
168{
169 memcpy(dst, src, len);
170 return csum_partial(dst, len, sum);
171}
172EXPORT_SYMBOL(csum_partial_copy);
173
174#ifndef csum_tcpudp_nofold
175__wsum csum_tcpudp_nofold(__be32 saddr, __be32 daddr,
176 unsigned short len,
177 unsigned short proto,
178 __wsum sum)
179{
180 unsigned long long s = (__force u32)sum;
181
182 s += (__force u32)saddr;
183 s += (__force u32)daddr;
184#ifdef __BIG_ENDIAN
185 s += proto + len;
186#else
187 s += (proto + len) << 8;
188#endif
189 s += (s >> 32);
190 return (__force __wsum)s;
191}
192EXPORT_SYMBOL(csum_tcpudp_nofold);
193#endif
diff --git a/lib/cpumask.c b/lib/cpumask.c
index 1f71b97de0f..7bb4142a502 100644
--- a/lib/cpumask.c
+++ b/lib/cpumask.c
@@ -92,15 +92,8 @@ int cpumask_any_but(const struct cpumask *mask, unsigned int cpu)
92 */ 92 */
93bool alloc_cpumask_var_node(cpumask_var_t *mask, gfp_t flags, int node) 93bool alloc_cpumask_var_node(cpumask_var_t *mask, gfp_t flags, int node)
94{ 94{
95 if (likely(slab_is_available())) 95 *mask = kmalloc_node(cpumask_size(), flags, node);
96 *mask = kmalloc_node(cpumask_size(), flags, node); 96
97 else {
98#ifdef CONFIG_DEBUG_PER_CPU_MAPS
99 printk(KERN_ERR
100 "=> alloc_cpumask_var: kmalloc not available!\n");
101#endif
102 *mask = NULL;
103 }
104#ifdef CONFIG_DEBUG_PER_CPU_MAPS 97#ifdef CONFIG_DEBUG_PER_CPU_MAPS
105 if (!*mask) { 98 if (!*mask) {
106 printk(KERN_ERR "=> alloc_cpumask_var: failed!\n"); 99 printk(KERN_ERR "=> alloc_cpumask_var: failed!\n");
@@ -119,6 +112,12 @@ bool alloc_cpumask_var_node(cpumask_var_t *mask, gfp_t flags, int node)
119} 112}
120EXPORT_SYMBOL(alloc_cpumask_var_node); 113EXPORT_SYMBOL(alloc_cpumask_var_node);
121 114
115bool zalloc_cpumask_var_node(cpumask_var_t *mask, gfp_t flags, int node)
116{
117 return alloc_cpumask_var_node(mask, flags | __GFP_ZERO, node);
118}
119EXPORT_SYMBOL(zalloc_cpumask_var_node);
120
122/** 121/**
123 * alloc_cpumask_var - allocate a struct cpumask 122 * alloc_cpumask_var - allocate a struct cpumask
124 * @mask: pointer to cpumask_var_t where the cpumask is returned 123 * @mask: pointer to cpumask_var_t where the cpumask is returned
@@ -135,6 +134,12 @@ bool alloc_cpumask_var(cpumask_var_t *mask, gfp_t flags)
135} 134}
136EXPORT_SYMBOL(alloc_cpumask_var); 135EXPORT_SYMBOL(alloc_cpumask_var);
137 136
137bool zalloc_cpumask_var(cpumask_var_t *mask, gfp_t flags)
138{
139 return alloc_cpumask_var(mask, flags | __GFP_ZERO);
140}
141EXPORT_SYMBOL(zalloc_cpumask_var);
142
138/** 143/**
139 * alloc_bootmem_cpumask_var - allocate a struct cpumask from the bootmem arena. 144 * alloc_bootmem_cpumask_var - allocate a struct cpumask from the bootmem arena.
140 * @mask: pointer to cpumask_var_t where the cpumask is returned 145 * @mask: pointer to cpumask_var_t where the cpumask is returned
diff --git a/lib/dec_and_lock.c b/lib/dec_and_lock.c
index a65c3145554..e73822aa6e9 100644
--- a/lib/dec_and_lock.c
+++ b/lib/dec_and_lock.c
@@ -19,11 +19,10 @@
19 */ 19 */
20int _atomic_dec_and_lock(atomic_t *atomic, spinlock_t *lock) 20int _atomic_dec_and_lock(atomic_t *atomic, spinlock_t *lock)
21{ 21{
22#ifdef CONFIG_SMP
23 /* Subtract 1 from counter unless that drops it to 0 (ie. it was 1) */ 22 /* Subtract 1 from counter unless that drops it to 0 (ie. it was 1) */
24 if (atomic_add_unless(atomic, -1, 1)) 23 if (atomic_add_unless(atomic, -1, 1))
25 return 0; 24 return 0;
26#endif 25
27 /* Otherwise do it the slow way */ 26 /* Otherwise do it the slow way */
28 spin_lock(lock); 27 spin_lock(lock);
29 if (atomic_dec_and_test(atomic)) 28 if (atomic_dec_and_test(atomic))
diff --git a/lib/extable.c b/lib/extable.c
index 179c0874559..4cac81ec225 100644
--- a/lib/extable.c
+++ b/lib/extable.c
@@ -39,7 +39,26 @@ void sort_extable(struct exception_table_entry *start,
39 sort(start, finish - start, sizeof(struct exception_table_entry), 39 sort(start, finish - start, sizeof(struct exception_table_entry),
40 cmp_ex, NULL); 40 cmp_ex, NULL);
41} 41}
42#endif 42
43#ifdef CONFIG_MODULES
44/*
45 * If the exception table is sorted, any referring to the module init
46 * will be at the beginning or the end.
47 */
48void trim_init_extable(struct module *m)
49{
50 /*trim the beginning*/
51 while (m->num_exentries && within_module_init(m->extable[0].insn, m)) {
52 m->extable++;
53 m->num_exentries--;
54 }
55 /*trim the end*/
56 while (m->num_exentries &&
57 within_module_init(m->extable[m->num_exentries-1].insn, m))
58 m->num_exentries--;
59}
60#endif /* CONFIG_MODULES */
61#endif /* !ARCH_HAS_SORT_EXTABLE */
43 62
44#ifndef ARCH_HAS_SEARCH_EXTABLE 63#ifndef ARCH_HAS_SEARCH_EXTABLE
45/* 64/*
diff --git a/lib/gcd.c b/lib/gcd.c
new file mode 100644
index 00000000000..f879033d982
--- /dev/null
+++ b/lib/gcd.c
@@ -0,0 +1,18 @@
1#include <linux/kernel.h>
2#include <linux/gcd.h>
3#include <linux/module.h>
4
5/* Greatest common divisor */
6unsigned long gcd(unsigned long a, unsigned long b)
7{
8 unsigned long r;
9
10 if (a < b)
11 swap(a, b);
12 while ((r = a % b) != 0) {
13 a = b;
14 b = r;
15 }
16 return b;
17}
18EXPORT_SYMBOL_GPL(gcd);
diff --git a/lib/genalloc.c b/lib/genalloc.c
index f6d276db2d5..eed2bdb865e 100644
--- a/lib/genalloc.c
+++ b/lib/genalloc.c
@@ -85,7 +85,6 @@ void gen_pool_destroy(struct gen_pool *pool)
85 int bit, end_bit; 85 int bit, end_bit;
86 86
87 87
88 write_lock(&pool->lock);
89 list_for_each_safe(_chunk, _next_chunk, &pool->chunks) { 88 list_for_each_safe(_chunk, _next_chunk, &pool->chunks) {
90 chunk = list_entry(_chunk, struct gen_pool_chunk, next_chunk); 89 chunk = list_entry(_chunk, struct gen_pool_chunk, next_chunk);
91 list_del(&chunk->next_chunk); 90 list_del(&chunk->next_chunk);
diff --git a/lib/hexdump.c b/lib/hexdump.c
index f07c0db81d2..39af2560f76 100644
--- a/lib/hexdump.c
+++ b/lib/hexdump.c
@@ -65,7 +65,8 @@ void hex_dump_to_buffer(const void *buf, size_t len, int rowsize,
65 65
66 for (j = 0; j < ngroups; j++) 66 for (j = 0; j < ngroups; j++)
67 lx += scnprintf(linebuf + lx, linebuflen - lx, 67 lx += scnprintf(linebuf + lx, linebuflen - lx,
68 "%16.16llx ", (unsigned long long)*(ptr8 + j)); 68 "%s%16.16llx", j ? " " : "",
69 (unsigned long long)*(ptr8 + j));
69 ascii_column = 17 * ngroups + 2; 70 ascii_column = 17 * ngroups + 2;
70 break; 71 break;
71 } 72 }
@@ -76,7 +77,7 @@ void hex_dump_to_buffer(const void *buf, size_t len, int rowsize,
76 77
77 for (j = 0; j < ngroups; j++) 78 for (j = 0; j < ngroups; j++)
78 lx += scnprintf(linebuf + lx, linebuflen - lx, 79 lx += scnprintf(linebuf + lx, linebuflen - lx,
79 "%8.8x ", *(ptr4 + j)); 80 "%s%8.8x", j ? " " : "", *(ptr4 + j));
80 ascii_column = 9 * ngroups + 2; 81 ascii_column = 9 * ngroups + 2;
81 break; 82 break;
82 } 83 }
@@ -87,19 +88,21 @@ void hex_dump_to_buffer(const void *buf, size_t len, int rowsize,
87 88
88 for (j = 0; j < ngroups; j++) 89 for (j = 0; j < ngroups; j++)
89 lx += scnprintf(linebuf + lx, linebuflen - lx, 90 lx += scnprintf(linebuf + lx, linebuflen - lx,
90 "%4.4x ", *(ptr2 + j)); 91 "%s%4.4x", j ? " " : "", *(ptr2 + j));
91 ascii_column = 5 * ngroups + 2; 92 ascii_column = 5 * ngroups + 2;
92 break; 93 break;
93 } 94 }
94 95
95 default: 96 default:
96 for (j = 0; (j < rowsize) && (j < len) && (lx + 4) < linebuflen; 97 for (j = 0; (j < len) && (lx + 3) <= linebuflen; j++) {
97 j++) {
98 ch = ptr[j]; 98 ch = ptr[j];
99 linebuf[lx++] = hex_asc_hi(ch); 99 linebuf[lx++] = hex_asc_hi(ch);
100 linebuf[lx++] = hex_asc_lo(ch); 100 linebuf[lx++] = hex_asc_lo(ch);
101 linebuf[lx++] = ' '; 101 linebuf[lx++] = ' ';
102 } 102 }
103 if (j)
104 lx--;
105
103 ascii_column = 3 * rowsize + 2; 106 ascii_column = 3 * rowsize + 2;
104 break; 107 break;
105 } 108 }
@@ -108,7 +111,7 @@ void hex_dump_to_buffer(const void *buf, size_t len, int rowsize,
108 111
109 while (lx < (linebuflen - 1) && lx < (ascii_column - 1)) 112 while (lx < (linebuflen - 1) && lx < (ascii_column - 1))
110 linebuf[lx++] = ' '; 113 linebuf[lx++] = ' ';
111 for (j = 0; (j < rowsize) && (j < len) && (lx + 2) < linebuflen; j++) 114 for (j = 0; (j < len) && (lx + 2) < linebuflen; j++)
112 linebuf[lx++] = (isascii(ptr[j]) && isprint(ptr[j])) ? ptr[j] 115 linebuf[lx++] = (isascii(ptr[j]) && isprint(ptr[j])) ? ptr[j]
113 : '.'; 116 : '.';
114nil: 117nil:
diff --git a/lib/kobject.c b/lib/kobject.c
index bacf6fe4f7a..b512b746d2a 100644
--- a/lib/kobject.c
+++ b/lib/kobject.c
@@ -793,11 +793,16 @@ static struct kset *kset_create(const char *name,
793 struct kobject *parent_kobj) 793 struct kobject *parent_kobj)
794{ 794{
795 struct kset *kset; 795 struct kset *kset;
796 int retval;
796 797
797 kset = kzalloc(sizeof(*kset), GFP_KERNEL); 798 kset = kzalloc(sizeof(*kset), GFP_KERNEL);
798 if (!kset) 799 if (!kset)
799 return NULL; 800 return NULL;
800 kobject_set_name(&kset->kobj, name); 801 retval = kobject_set_name(&kset->kobj, name);
802 if (retval) {
803 kfree(kset);
804 return NULL;
805 }
801 kset->uevent_ops = uevent_ops; 806 kset->uevent_ops = uevent_ops;
802 kset->kobj.parent = parent_kobj; 807 kset->kobj.parent = parent_kobj;
803 808
diff --git a/lib/radix-tree.c b/lib/radix-tree.c
index 4bb42a0344e..23abbd93cae 100644
--- a/lib/radix-tree.c
+++ b/lib/radix-tree.c
@@ -351,20 +351,12 @@ int radix_tree_insert(struct radix_tree_root *root,
351} 351}
352EXPORT_SYMBOL(radix_tree_insert); 352EXPORT_SYMBOL(radix_tree_insert);
353 353
354/** 354/*
355 * radix_tree_lookup_slot - lookup a slot in a radix tree 355 * is_slot == 1 : search for the slot.
356 * @root: radix tree root 356 * is_slot == 0 : search for the node.
357 * @index: index key
358 *
359 * Returns: the slot corresponding to the position @index in the
360 * radix tree @root. This is useful for update-if-exists operations.
361 *
362 * This function can be called under rcu_read_lock iff the slot is not
363 * modified by radix_tree_replace_slot, otherwise it must be called
364 * exclusive from other writers. Any dereference of the slot must be done
365 * using radix_tree_deref_slot.
366 */ 357 */
367void **radix_tree_lookup_slot(struct radix_tree_root *root, unsigned long index) 358static void *radix_tree_lookup_element(struct radix_tree_root *root,
359 unsigned long index, int is_slot)
368{ 360{
369 unsigned int height, shift; 361 unsigned int height, shift;
370 struct radix_tree_node *node, **slot; 362 struct radix_tree_node *node, **slot;
@@ -376,7 +368,7 @@ void **radix_tree_lookup_slot(struct radix_tree_root *root, unsigned long index)
376 if (!radix_tree_is_indirect_ptr(node)) { 368 if (!radix_tree_is_indirect_ptr(node)) {
377 if (index > 0) 369 if (index > 0)
378 return NULL; 370 return NULL;
379 return (void **)&root->rnode; 371 return is_slot ? (void *)&root->rnode : node;
380 } 372 }
381 node = radix_tree_indirect_to_ptr(node); 373 node = radix_tree_indirect_to_ptr(node);
382 374
@@ -397,7 +389,25 @@ void **radix_tree_lookup_slot(struct radix_tree_root *root, unsigned long index)
397 height--; 389 height--;
398 } while (height > 0); 390 } while (height > 0);
399 391
400 return (void **)slot; 392 return is_slot ? (void *)slot:node;
393}
394
395/**
396 * radix_tree_lookup_slot - lookup a slot in a radix tree
397 * @root: radix tree root
398 * @index: index key
399 *
400 * Returns: the slot corresponding to the position @index in the
401 * radix tree @root. This is useful for update-if-exists operations.
402 *
403 * This function can be called under rcu_read_lock iff the slot is not
404 * modified by radix_tree_replace_slot, otherwise it must be called
405 * exclusive from other writers. Any dereference of the slot must be done
406 * using radix_tree_deref_slot.
407 */
408void **radix_tree_lookup_slot(struct radix_tree_root *root, unsigned long index)
409{
410 return (void **)radix_tree_lookup_element(root, index, 1);
401} 411}
402EXPORT_SYMBOL(radix_tree_lookup_slot); 412EXPORT_SYMBOL(radix_tree_lookup_slot);
403 413
@@ -415,38 +425,7 @@ EXPORT_SYMBOL(radix_tree_lookup_slot);
415 */ 425 */
416void *radix_tree_lookup(struct radix_tree_root *root, unsigned long index) 426void *radix_tree_lookup(struct radix_tree_root *root, unsigned long index)
417{ 427{
418 unsigned int height, shift; 428 return radix_tree_lookup_element(root, index, 0);
419 struct radix_tree_node *node, **slot;
420
421 node = rcu_dereference(root->rnode);
422 if (node == NULL)
423 return NULL;
424
425 if (!radix_tree_is_indirect_ptr(node)) {
426 if (index > 0)
427 return NULL;
428 return node;
429 }
430 node = radix_tree_indirect_to_ptr(node);
431
432 height = node->height;
433 if (index > radix_tree_maxindex(height))
434 return NULL;
435
436 shift = (height-1) * RADIX_TREE_MAP_SHIFT;
437
438 do {
439 slot = (struct radix_tree_node **)
440 (node->slots + ((index>>shift) & RADIX_TREE_MAP_MASK));
441 node = rcu_dereference(*slot);
442 if (node == NULL)
443 return NULL;
444
445 shift -= RADIX_TREE_MAP_SHIFT;
446 height--;
447 } while (height > 0);
448
449 return node;
450} 429}
451EXPORT_SYMBOL(radix_tree_lookup); 430EXPORT_SYMBOL(radix_tree_lookup);
452 431
@@ -666,6 +645,43 @@ unsigned long radix_tree_next_hole(struct radix_tree_root *root,
666} 645}
667EXPORT_SYMBOL(radix_tree_next_hole); 646EXPORT_SYMBOL(radix_tree_next_hole);
668 647
648/**
649 * radix_tree_prev_hole - find the prev hole (not-present entry)
650 * @root: tree root
651 * @index: index key
652 * @max_scan: maximum range to search
653 *
654 * Search backwards in the range [max(index-max_scan+1, 0), index]
655 * for the first hole.
656 *
657 * Returns: the index of the hole if found, otherwise returns an index
658 * outside of the set specified (in which case 'index - return >= max_scan'
659 * will be true). In rare cases of wrap-around, LONG_MAX will be returned.
660 *
661 * radix_tree_next_hole may be called under rcu_read_lock. However, like
662 * radix_tree_gang_lookup, this will not atomically search a snapshot of
663 * the tree at a single point in time. For example, if a hole is created
664 * at index 10, then subsequently a hole is created at index 5,
665 * radix_tree_prev_hole covering both indexes may return 5 if called under
666 * rcu_read_lock.
667 */
668unsigned long radix_tree_prev_hole(struct radix_tree_root *root,
669 unsigned long index, unsigned long max_scan)
670{
671 unsigned long i;
672
673 for (i = 0; i < max_scan; i++) {
674 if (!radix_tree_lookup(root, index))
675 break;
676 index--;
677 if (index == LONG_MAX)
678 break;
679 }
680
681 return index;
682}
683EXPORT_SYMBOL(radix_tree_prev_hole);
684
669static unsigned int 685static unsigned int
670__lookup(struct radix_tree_node *slot, void ***results, unsigned long index, 686__lookup(struct radix_tree_node *slot, void ***results, unsigned long index,
671 unsigned int max_items, unsigned long *next_index) 687 unsigned int max_items, unsigned long *next_index)
diff --git a/lib/rational.c b/lib/rational.c
new file mode 100644
index 00000000000..b3c099b5478
--- /dev/null
+++ b/lib/rational.c
@@ -0,0 +1,62 @@
1/*
2 * rational fractions
3 *
4 * Copyright (C) 2009 emlix GmbH, Oskar Schirmer <os@emlix.com>
5 *
6 * helper functions when coping with rational numbers
7 */
8
9#include <linux/rational.h>
10
11/*
12 * calculate best rational approximation for a given fraction
13 * taking into account restricted register size, e.g. to find
14 * appropriate values for a pll with 5 bit denominator and
15 * 8 bit numerator register fields, trying to set up with a
16 * frequency ratio of 3.1415, one would say:
17 *
18 * rational_best_approximation(31415, 10000,
19 * (1 << 8) - 1, (1 << 5) - 1, &n, &d);
20 *
21 * you may look at given_numerator as a fixed point number,
22 * with the fractional part size described in given_denominator.
23 *
24 * for theoretical background, see:
25 * http://en.wikipedia.org/wiki/Continued_fraction
26 */
27
28void rational_best_approximation(
29 unsigned long given_numerator, unsigned long given_denominator,
30 unsigned long max_numerator, unsigned long max_denominator,
31 unsigned long *best_numerator, unsigned long *best_denominator)
32{
33 unsigned long n, d, n0, d0, n1, d1;
34 n = given_numerator;
35 d = given_denominator;
36 n0 = d1 = 0;
37 n1 = d0 = 1;
38 for (;;) {
39 unsigned long t, a;
40 if ((n1 > max_numerator) || (d1 > max_denominator)) {
41 n1 = n0;
42 d1 = d0;
43 break;
44 }
45 if (d == 0)
46 break;
47 t = d;
48 a = n / d;
49 d = n % d;
50 n = t;
51 t = n0 + a * n1;
52 n0 = n1;
53 n1 = t;
54 t = d0 + a * d1;
55 d0 = d1;
56 d1 = t;
57 }
58 *best_numerator = n1;
59 *best_denominator = d1;
60}
61
62EXPORT_SYMBOL(rational_best_approximation);
diff --git a/lib/rbtree.c b/lib/rbtree.c
index f653659e0bc..e2aa3be2985 100644
--- a/lib/rbtree.c
+++ b/lib/rbtree.c
@@ -231,34 +231,34 @@ void rb_erase(struct rb_node *node, struct rb_root *root)
231 node = node->rb_right; 231 node = node->rb_right;
232 while ((left = node->rb_left) != NULL) 232 while ((left = node->rb_left) != NULL)
233 node = left; 233 node = left;
234
235 if (rb_parent(old)) {
236 if (rb_parent(old)->rb_left == old)
237 rb_parent(old)->rb_left = node;
238 else
239 rb_parent(old)->rb_right = node;
240 } else
241 root->rb_node = node;
242
234 child = node->rb_right; 243 child = node->rb_right;
235 parent = rb_parent(node); 244 parent = rb_parent(node);
236 color = rb_color(node); 245 color = rb_color(node);
237 246
238 if (child)
239 rb_set_parent(child, parent);
240 if (parent == old) { 247 if (parent == old) {
241 parent->rb_right = child;
242 parent = node; 248 parent = node;
243 } else 249 } else {
250 if (child)
251 rb_set_parent(child, parent);
244 parent->rb_left = child; 252 parent->rb_left = child;
245 253
254 node->rb_right = old->rb_right;
255 rb_set_parent(old->rb_right, node);
256 }
257
246 node->rb_parent_color = old->rb_parent_color; 258 node->rb_parent_color = old->rb_parent_color;
247 node->rb_right = old->rb_right;
248 node->rb_left = old->rb_left; 259 node->rb_left = old->rb_left;
249
250 if (rb_parent(old))
251 {
252 if (rb_parent(old)->rb_left == old)
253 rb_parent(old)->rb_left = node;
254 else
255 rb_parent(old)->rb_right = node;
256 } else
257 root->rb_node = node;
258
259 rb_set_parent(old->rb_left, node); 260 rb_set_parent(old->rb_left, node);
260 if (old->rb_right) 261
261 rb_set_parent(old->rb_right, node);
262 goto color; 262 goto color;
263 } 263 }
264 264
diff --git a/lib/vsprintf.c b/lib/vsprintf.c
index 7536acea135..756ccafa9ce 100644
--- a/lib/vsprintf.c
+++ b/lib/vsprintf.c
@@ -408,6 +408,8 @@ enum format_type {
408 FORMAT_TYPE_LONG_LONG, 408 FORMAT_TYPE_LONG_LONG,
409 FORMAT_TYPE_ULONG, 409 FORMAT_TYPE_ULONG,
410 FORMAT_TYPE_LONG, 410 FORMAT_TYPE_LONG,
411 FORMAT_TYPE_UBYTE,
412 FORMAT_TYPE_BYTE,
411 FORMAT_TYPE_USHORT, 413 FORMAT_TYPE_USHORT,
412 FORMAT_TYPE_SHORT, 414 FORMAT_TYPE_SHORT,
413 FORMAT_TYPE_UINT, 415 FORMAT_TYPE_UINT,
@@ -573,12 +575,15 @@ static char *string(char *buf, char *end, char *s, struct printf_spec spec)
573} 575}
574 576
575static char *symbol_string(char *buf, char *end, void *ptr, 577static char *symbol_string(char *buf, char *end, void *ptr,
576 struct printf_spec spec) 578 struct printf_spec spec, char ext)
577{ 579{
578 unsigned long value = (unsigned long) ptr; 580 unsigned long value = (unsigned long) ptr;
579#ifdef CONFIG_KALLSYMS 581#ifdef CONFIG_KALLSYMS
580 char sym[KSYM_SYMBOL_LEN]; 582 char sym[KSYM_SYMBOL_LEN];
581 sprint_symbol(sym, value); 583 if (ext != 'f')
584 sprint_symbol(sym, value);
585 else
586 kallsyms_lookup(value, NULL, NULL, NULL, sym);
582 return string(buf, end, sym, spec); 587 return string(buf, end, sym, spec);
583#else 588#else
584 spec.field_width = 2*sizeof(void *); 589 spec.field_width = 2*sizeof(void *);
@@ -690,7 +695,8 @@ static char *ip4_addr_string(char *buf, char *end, u8 *addr,
690 * 695 *
691 * Right now we handle: 696 * Right now we handle:
692 * 697 *
693 * - 'F' For symbolic function descriptor pointers 698 * - 'F' For symbolic function descriptor pointers with offset
699 * - 'f' For simple symbolic function names without offset
694 * - 'S' For symbolic direct pointers 700 * - 'S' For symbolic direct pointers
695 * - 'R' For a struct resource pointer, it prints the range of 701 * - 'R' For a struct resource pointer, it prints the range of
696 * addresses (not the name nor the flags) 702 * addresses (not the name nor the flags)
@@ -713,10 +719,11 @@ static char *pointer(const char *fmt, char *buf, char *end, void *ptr,
713 719
714 switch (*fmt) { 720 switch (*fmt) {
715 case 'F': 721 case 'F':
722 case 'f':
716 ptr = dereference_function_descriptor(ptr); 723 ptr = dereference_function_descriptor(ptr);
717 /* Fallthrough */ 724 /* Fallthrough */
718 case 'S': 725 case 'S':
719 return symbol_string(buf, end, ptr, spec); 726 return symbol_string(buf, end, ptr, spec, *fmt);
720 case 'R': 727 case 'R':
721 return resource_string(buf, end, ptr, spec); 728 return resource_string(buf, end, ptr, spec);
722 case 'm': 729 case 'm':
@@ -853,11 +860,15 @@ qualifier:
853 spec->qualifier = -1; 860 spec->qualifier = -1;
854 if (*fmt == 'h' || *fmt == 'l' || *fmt == 'L' || 861 if (*fmt == 'h' || *fmt == 'l' || *fmt == 'L' ||
855 *fmt == 'Z' || *fmt == 'z' || *fmt == 't') { 862 *fmt == 'Z' || *fmt == 'z' || *fmt == 't') {
856 spec->qualifier = *fmt; 863 spec->qualifier = *fmt++;
857 ++fmt; 864 if (unlikely(spec->qualifier == *fmt)) {
858 if (spec->qualifier == 'l' && *fmt == 'l') { 865 if (spec->qualifier == 'l') {
859 spec->qualifier = 'L'; 866 spec->qualifier = 'L';
860 ++fmt; 867 ++fmt;
868 } else if (spec->qualifier == 'h') {
869 spec->qualifier = 'H';
870 ++fmt;
871 }
861 } 872 }
862 } 873 }
863 874
@@ -919,6 +930,11 @@ qualifier:
919 spec->type = FORMAT_TYPE_SIZE_T; 930 spec->type = FORMAT_TYPE_SIZE_T;
920 } else if (spec->qualifier == 't') { 931 } else if (spec->qualifier == 't') {
921 spec->type = FORMAT_TYPE_PTRDIFF; 932 spec->type = FORMAT_TYPE_PTRDIFF;
933 } else if (spec->qualifier == 'H') {
934 if (spec->flags & SIGN)
935 spec->type = FORMAT_TYPE_BYTE;
936 else
937 spec->type = FORMAT_TYPE_UBYTE;
922 } else if (spec->qualifier == 'h') { 938 } else if (spec->qualifier == 'h') {
923 if (spec->flags & SIGN) 939 if (spec->flags & SIGN)
924 spec->type = FORMAT_TYPE_SHORT; 940 spec->type = FORMAT_TYPE_SHORT;
@@ -943,7 +959,8 @@ qualifier:
943 * 959 *
944 * This function follows C99 vsnprintf, but has some extensions: 960 * This function follows C99 vsnprintf, but has some extensions:
945 * %pS output the name of a text symbol 961 * %pS output the name of a text symbol
946 * %pF output the name of a function pointer 962 * %pF output the name of a function pointer with its offset
963 * %pf output the name of a function pointer without its offset
947 * %pR output the address range in a struct resource 964 * %pR output the address range in a struct resource
948 * 965 *
949 * The return value is the number of characters which would 966 * The return value is the number of characters which would
@@ -1087,6 +1104,12 @@ int vsnprintf(char *buf, size_t size, const char *fmt, va_list args)
1087 case FORMAT_TYPE_PTRDIFF: 1104 case FORMAT_TYPE_PTRDIFF:
1088 num = va_arg(args, ptrdiff_t); 1105 num = va_arg(args, ptrdiff_t);
1089 break; 1106 break;
1107 case FORMAT_TYPE_UBYTE:
1108 num = (unsigned char) va_arg(args, int);
1109 break;
1110 case FORMAT_TYPE_BYTE:
1111 num = (signed char) va_arg(args, int);
1112 break;
1090 case FORMAT_TYPE_USHORT: 1113 case FORMAT_TYPE_USHORT:
1091 num = (unsigned short) va_arg(args, int); 1114 num = (unsigned short) va_arg(args, int);
1092 break; 1115 break;
@@ -1363,6 +1386,10 @@ do { \
1363 case FORMAT_TYPE_PTRDIFF: 1386 case FORMAT_TYPE_PTRDIFF:
1364 save_arg(ptrdiff_t); 1387 save_arg(ptrdiff_t);
1365 break; 1388 break;
1389 case FORMAT_TYPE_UBYTE:
1390 case FORMAT_TYPE_BYTE:
1391 save_arg(char);
1392 break;
1366 case FORMAT_TYPE_USHORT: 1393 case FORMAT_TYPE_USHORT:
1367 case FORMAT_TYPE_SHORT: 1394 case FORMAT_TYPE_SHORT:
1368 save_arg(short); 1395 save_arg(short);
@@ -1391,7 +1418,8 @@ EXPORT_SYMBOL_GPL(vbin_printf);
1391 * 1418 *
1392 * The format follows C99 vsnprintf, but has some extensions: 1419 * The format follows C99 vsnprintf, but has some extensions:
1393 * %pS output the name of a text symbol 1420 * %pS output the name of a text symbol
1394 * %pF output the name of a function pointer 1421 * %pF output the name of a function pointer with its offset
1422 * %pf output the name of a function pointer without its offset
1395 * %pR output the address range in a struct resource 1423 * %pR output the address range in a struct resource
1396 * %n is ignored 1424 * %n is ignored
1397 * 1425 *
@@ -1538,6 +1566,12 @@ int bstr_printf(char *buf, size_t size, const char *fmt, const u32 *bin_buf)
1538 case FORMAT_TYPE_PTRDIFF: 1566 case FORMAT_TYPE_PTRDIFF:
1539 num = get_arg(ptrdiff_t); 1567 num = get_arg(ptrdiff_t);
1540 break; 1568 break;
1569 case FORMAT_TYPE_UBYTE:
1570 num = get_arg(unsigned char);
1571 break;
1572 case FORMAT_TYPE_BYTE:
1573 num = get_arg(signed char);
1574 break;
1541 case FORMAT_TYPE_USHORT: 1575 case FORMAT_TYPE_USHORT:
1542 num = get_arg(unsigned short); 1576 num = get_arg(unsigned short);
1543 break; 1577 break;