aboutsummaryrefslogtreecommitdiffstats
path: root/lib
diff options
context:
space:
mode:
Diffstat (limited to 'lib')
-rw-r--r--lib/Kconfig.debug54
-rw-r--r--lib/Kconfig.kmemcheck3
-rw-r--r--lib/Makefile2
-rw-r--r--lib/atomic64.c11
-rw-r--r--lib/bitmap.c12
-rw-r--r--lib/decompress_bunzip2.c24
-rw-r--r--lib/decompress_inflate.c18
-rw-r--r--lib/decompress_unlzma.c33
-rw-r--r--lib/dma-debug.c54
-rw-r--r--lib/dynamic_debug.c2
-rw-r--r--lib/flex_array.c327
-rw-r--r--lib/inflate.c2
-rw-r--r--lib/is_single_threaded.c61
-rw-r--r--lib/lmb.c2
-rw-r--r--lib/scatterlist.c16
-rw-r--r--lib/swiotlb.c124
-rw-r--r--lib/vsprintf.c236
-rw-r--r--lib/zlib_deflate/deflate.c4
18 files changed, 753 insertions, 232 deletions
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
index 4c32b1a1a06e..891155817bc6 100644
--- a/lib/Kconfig.debug
+++ b/lib/Kconfig.debug
@@ -50,6 +50,14 @@ config MAGIC_SYSRQ
50 keys are documented in <file:Documentation/sysrq.txt>. Don't say Y 50 keys are documented in <file:Documentation/sysrq.txt>. Don't say Y
51 unless you really know what this hack does. 51 unless you really know what this hack does.
52 52
53config STRIP_ASM_SYMS
54 bool "Strip assembler-generated symbols during link"
55 default n
56 help
57 Strip internal assembler-generated symbols during a link (symbols
58 that look like '.Lxxx') so they don't pollute the output of
59 get_wchan() and suchlike.
60
53config UNUSED_SYMBOLS 61config UNUSED_SYMBOLS
54 bool "Enable unused/obsolete exported symbols" 62 bool "Enable unused/obsolete exported symbols"
55 default y if X86 63 default y if X86
@@ -338,7 +346,7 @@ config SLUB_STATS
338 346
339config DEBUG_KMEMLEAK 347config DEBUG_KMEMLEAK
340 bool "Kernel memory leak detector" 348 bool "Kernel memory leak detector"
341 depends on DEBUG_KERNEL && EXPERIMENTAL && (X86 || ARM) && \ 349 depends on DEBUG_KERNEL && EXPERIMENTAL && (X86 || ARM || PPC) && \
342 !MEMORY_HOTPLUG 350 !MEMORY_HOTPLUG
343 select DEBUG_FS if SYSFS 351 select DEBUG_FS if SYSFS
344 select STACKTRACE if STACKTRACE_SUPPORT 352 select STACKTRACE if STACKTRACE_SUPPORT
@@ -359,6 +367,18 @@ config DEBUG_KMEMLEAK
359 In order to access the kmemleak file, debugfs needs to be 367 In order to access the kmemleak file, debugfs needs to be
360 mounted (usually at /sys/kernel/debug). 368 mounted (usually at /sys/kernel/debug).
361 369
370config DEBUG_KMEMLEAK_EARLY_LOG_SIZE
371 int "Maximum kmemleak early log entries"
372 depends on DEBUG_KMEMLEAK
373 range 200 2000
374 default 400
375 help
376 Kmemleak must track all the memory allocations to avoid
377 reporting false positives. Since memory may be allocated or
378 freed before kmemleak is initialised, an early log buffer is
379 used to store these actions. If kmemleak reports "early log
380 buffer exceeded", please increase this value.
381
362config DEBUG_KMEMLEAK_TEST 382config DEBUG_KMEMLEAK_TEST
363 tristate "Simple test for the kernel memory leak detector" 383 tristate "Simple test for the kernel memory leak detector"
364 depends on DEBUG_KMEMLEAK 384 depends on DEBUG_KMEMLEAK
@@ -641,6 +661,21 @@ config DEBUG_NOTIFIERS
641 This is a relatively cheap check but if you care about maximum 661 This is a relatively cheap check but if you care about maximum
642 performance, say N. 662 performance, say N.
643 663
664config DEBUG_CREDENTIALS
665 bool "Debug credential management"
666 depends on DEBUG_KERNEL
667 help
668 Enable this to turn on some debug checking for credential
669 management. The additional code keeps track of the number of
670 pointers from task_structs to any given cred struct, and checks to
671 see that this number never exceeds the usage count of the cred
672 struct.
673
674 Furthermore, if SELinux is enabled, this also checks that the
675 security pointer in the cred struct is never seen to be invalid.
676
677 If unsure, say N.
678
644# 679#
645# Select this config option from the architecture Kconfig, if it 680# Select this config option from the architecture Kconfig, if it
646# it is preferred to always offer frame pointers as a config 681# it is preferred to always offer frame pointers as a config
@@ -713,7 +748,7 @@ config RCU_TORTURE_TEST_RUNNABLE
713 748
714config RCU_CPU_STALL_DETECTOR 749config RCU_CPU_STALL_DETECTOR
715 bool "Check for stalled CPUs delaying RCU grace periods" 750 bool "Check for stalled CPUs delaying RCU grace periods"
716 depends on CLASSIC_RCU || TREE_RCU 751 depends on TREE_RCU || TREE_PREEMPT_RCU
717 default n 752 default n
718 help 753 help
719 This option causes RCU to printk information on which 754 This option causes RCU to printk information on which
@@ -778,6 +813,21 @@ config DEBUG_BLOCK_EXT_DEVT
778 813
779 Say N if you are unsure. 814 Say N if you are unsure.
780 815
816config DEBUG_FORCE_WEAK_PER_CPU
817 bool "Force weak per-cpu definitions"
818 depends on DEBUG_KERNEL
819 help
820 s390 and alpha require percpu variables in modules to be
821 defined weak to work around addressing range issue which
822 puts the following two restrictions on percpu variable
823 definitions.
824
825 1. percpu symbols must be unique whether static or not
826 2. percpu variables can't be defined inside a function
827
828 To ensure that generic code follows the above rules, this
829 option forces all percpu variables to be defined as weak.
830
781config LKDTM 831config LKDTM
782 tristate "Linux Kernel Dump Test Tool Module" 832 tristate "Linux Kernel Dump Test Tool Module"
783 depends on DEBUG_KERNEL 833 depends on DEBUG_KERNEL
diff --git a/lib/Kconfig.kmemcheck b/lib/Kconfig.kmemcheck
index 603c81b66549..846e039a86b4 100644
--- a/lib/Kconfig.kmemcheck
+++ b/lib/Kconfig.kmemcheck
@@ -1,6 +1,8 @@
1config HAVE_ARCH_KMEMCHECK 1config HAVE_ARCH_KMEMCHECK
2 bool 2 bool
3 3
4if HAVE_ARCH_KMEMCHECK
5
4menuconfig KMEMCHECK 6menuconfig KMEMCHECK
5 bool "kmemcheck: trap use of uninitialized memory" 7 bool "kmemcheck: trap use of uninitialized memory"
6 depends on DEBUG_KERNEL 8 depends on DEBUG_KERNEL
@@ -89,3 +91,4 @@ config KMEMCHECK_BITOPS_OK
89 accesses where not all the bits are initialized at the same time. 91 accesses where not all the bits are initialized at the same time.
90 This may also hide some real bugs. 92 This may also hide some real bugs.
91 93
94endif
diff --git a/lib/Makefile b/lib/Makefile
index b6d1857bbf08..2e78277eff9d 100644
--- a/lib/Makefile
+++ b/lib/Makefile
@@ -12,7 +12,7 @@ lib-y := ctype.o string.o vsprintf.o cmdline.o \
12 idr.o int_sqrt.o extable.o prio_tree.o \ 12 idr.o int_sqrt.o extable.o prio_tree.o \
13 sha1.o irq_regs.o reciprocal_div.o argv_split.o \ 13 sha1.o irq_regs.o reciprocal_div.o argv_split.o \
14 proportions.o prio_heap.o ratelimit.o show_mem.o \ 14 proportions.o prio_heap.o ratelimit.o show_mem.o \
15 is_single_threaded.o plist.o decompress.o 15 is_single_threaded.o plist.o decompress.o flex_array.o
16 16
17lib-$(CONFIG_MMU) += ioremap.o 17lib-$(CONFIG_MMU) += ioremap.o
18lib-$(CONFIG_SMP) += cpumask.o 18lib-$(CONFIG_SMP) += cpumask.o
diff --git a/lib/atomic64.c b/lib/atomic64.c
index c5e725562416..8bee16ec7524 100644
--- a/lib/atomic64.c
+++ b/lib/atomic64.c
@@ -13,6 +13,7 @@
13#include <linux/cache.h> 13#include <linux/cache.h>
14#include <linux/spinlock.h> 14#include <linux/spinlock.h>
15#include <linux/init.h> 15#include <linux/init.h>
16#include <linux/module.h>
16#include <asm/atomic.h> 17#include <asm/atomic.h>
17 18
18/* 19/*
@@ -52,6 +53,7 @@ long long atomic64_read(const atomic64_t *v)
52 spin_unlock_irqrestore(lock, flags); 53 spin_unlock_irqrestore(lock, flags);
53 return val; 54 return val;
54} 55}
56EXPORT_SYMBOL(atomic64_read);
55 57
56void atomic64_set(atomic64_t *v, long long i) 58void atomic64_set(atomic64_t *v, long long i)
57{ 59{
@@ -62,6 +64,7 @@ void atomic64_set(atomic64_t *v, long long i)
62 v->counter = i; 64 v->counter = i;
63 spin_unlock_irqrestore(lock, flags); 65 spin_unlock_irqrestore(lock, flags);
64} 66}
67EXPORT_SYMBOL(atomic64_set);
65 68
66void atomic64_add(long long a, atomic64_t *v) 69void atomic64_add(long long a, atomic64_t *v)
67{ 70{
@@ -72,6 +75,7 @@ void atomic64_add(long long a, atomic64_t *v)
72 v->counter += a; 75 v->counter += a;
73 spin_unlock_irqrestore(lock, flags); 76 spin_unlock_irqrestore(lock, flags);
74} 77}
78EXPORT_SYMBOL(atomic64_add);
75 79
76long long atomic64_add_return(long long a, atomic64_t *v) 80long long atomic64_add_return(long long a, atomic64_t *v)
77{ 81{
@@ -84,6 +88,7 @@ long long atomic64_add_return(long long a, atomic64_t *v)
84 spin_unlock_irqrestore(lock, flags); 88 spin_unlock_irqrestore(lock, flags);
85 return val; 89 return val;
86} 90}
91EXPORT_SYMBOL(atomic64_add_return);
87 92
88void atomic64_sub(long long a, atomic64_t *v) 93void atomic64_sub(long long a, atomic64_t *v)
89{ 94{
@@ -94,6 +99,7 @@ void atomic64_sub(long long a, atomic64_t *v)
94 v->counter -= a; 99 v->counter -= a;
95 spin_unlock_irqrestore(lock, flags); 100 spin_unlock_irqrestore(lock, flags);
96} 101}
102EXPORT_SYMBOL(atomic64_sub);
97 103
98long long atomic64_sub_return(long long a, atomic64_t *v) 104long long atomic64_sub_return(long long a, atomic64_t *v)
99{ 105{
@@ -106,6 +112,7 @@ long long atomic64_sub_return(long long a, atomic64_t *v)
106 spin_unlock_irqrestore(lock, flags); 112 spin_unlock_irqrestore(lock, flags);
107 return val; 113 return val;
108} 114}
115EXPORT_SYMBOL(atomic64_sub_return);
109 116
110long long atomic64_dec_if_positive(atomic64_t *v) 117long long atomic64_dec_if_positive(atomic64_t *v)
111{ 118{
@@ -120,6 +127,7 @@ long long atomic64_dec_if_positive(atomic64_t *v)
120 spin_unlock_irqrestore(lock, flags); 127 spin_unlock_irqrestore(lock, flags);
121 return val; 128 return val;
122} 129}
130EXPORT_SYMBOL(atomic64_dec_if_positive);
123 131
124long long atomic64_cmpxchg(atomic64_t *v, long long o, long long n) 132long long atomic64_cmpxchg(atomic64_t *v, long long o, long long n)
125{ 133{
@@ -134,6 +142,7 @@ long long atomic64_cmpxchg(atomic64_t *v, long long o, long long n)
134 spin_unlock_irqrestore(lock, flags); 142 spin_unlock_irqrestore(lock, flags);
135 return val; 143 return val;
136} 144}
145EXPORT_SYMBOL(atomic64_cmpxchg);
137 146
138long long atomic64_xchg(atomic64_t *v, long long new) 147long long atomic64_xchg(atomic64_t *v, long long new)
139{ 148{
@@ -147,6 +156,7 @@ long long atomic64_xchg(atomic64_t *v, long long new)
147 spin_unlock_irqrestore(lock, flags); 156 spin_unlock_irqrestore(lock, flags);
148 return val; 157 return val;
149} 158}
159EXPORT_SYMBOL(atomic64_xchg);
150 160
151int atomic64_add_unless(atomic64_t *v, long long a, long long u) 161int atomic64_add_unless(atomic64_t *v, long long a, long long u)
152{ 162{
@@ -162,6 +172,7 @@ int atomic64_add_unless(atomic64_t *v, long long a, long long u)
162 spin_unlock_irqrestore(lock, flags); 172 spin_unlock_irqrestore(lock, flags);
163 return ret; 173 return ret;
164} 174}
175EXPORT_SYMBOL(atomic64_add_unless);
165 176
166static int init_atomic64_lock(void) 177static int init_atomic64_lock(void)
167{ 178{
diff --git a/lib/bitmap.c b/lib/bitmap.c
index 35a1f7ff4149..702565821c99 100644
--- a/lib/bitmap.c
+++ b/lib/bitmap.c
@@ -179,14 +179,16 @@ void __bitmap_shift_left(unsigned long *dst,
179} 179}
180EXPORT_SYMBOL(__bitmap_shift_left); 180EXPORT_SYMBOL(__bitmap_shift_left);
181 181
182void __bitmap_and(unsigned long *dst, const unsigned long *bitmap1, 182int __bitmap_and(unsigned long *dst, const unsigned long *bitmap1,
183 const unsigned long *bitmap2, int bits) 183 const unsigned long *bitmap2, int bits)
184{ 184{
185 int k; 185 int k;
186 int nr = BITS_TO_LONGS(bits); 186 int nr = BITS_TO_LONGS(bits);
187 unsigned long result = 0;
187 188
188 for (k = 0; k < nr; k++) 189 for (k = 0; k < nr; k++)
189 dst[k] = bitmap1[k] & bitmap2[k]; 190 result |= (dst[k] = bitmap1[k] & bitmap2[k]);
191 return result != 0;
190} 192}
191EXPORT_SYMBOL(__bitmap_and); 193EXPORT_SYMBOL(__bitmap_and);
192 194
@@ -212,14 +214,16 @@ void __bitmap_xor(unsigned long *dst, const unsigned long *bitmap1,
212} 214}
213EXPORT_SYMBOL(__bitmap_xor); 215EXPORT_SYMBOL(__bitmap_xor);
214 216
215void __bitmap_andnot(unsigned long *dst, const unsigned long *bitmap1, 217int __bitmap_andnot(unsigned long *dst, const unsigned long *bitmap1,
216 const unsigned long *bitmap2, int bits) 218 const unsigned long *bitmap2, int bits)
217{ 219{
218 int k; 220 int k;
219 int nr = BITS_TO_LONGS(bits); 221 int nr = BITS_TO_LONGS(bits);
222 unsigned long result = 0;
220 223
221 for (k = 0; k < nr; k++) 224 for (k = 0; k < nr; k++)
222 dst[k] = bitmap1[k] & ~bitmap2[k]; 225 result |= (dst[k] = bitmap1[k] & ~bitmap2[k]);
226 return result != 0;
223} 227}
224EXPORT_SYMBOL(__bitmap_andnot); 228EXPORT_SYMBOL(__bitmap_andnot);
225 229
diff --git a/lib/decompress_bunzip2.c b/lib/decompress_bunzip2.c
index 708e2a86d87b..600f473a5610 100644
--- a/lib/decompress_bunzip2.c
+++ b/lib/decompress_bunzip2.c
@@ -45,12 +45,14 @@
45*/ 45*/
46 46
47 47
48#ifndef STATIC 48#ifdef STATIC
49#define PREBOOT
50#else
49#include <linux/decompress/bunzip2.h> 51#include <linux/decompress/bunzip2.h>
50#endif /* !STATIC */ 52#include <linux/slab.h>
53#endif /* STATIC */
51 54
52#include <linux/decompress/mm.h> 55#include <linux/decompress/mm.h>
53#include <linux/slab.h>
54 56
55#ifndef INT_MAX 57#ifndef INT_MAX
56#define INT_MAX 0x7fffffff 58#define INT_MAX 0x7fffffff
@@ -681,9 +683,7 @@ STATIC int INIT bunzip2(unsigned char *buf, int len,
681 set_error_fn(error_fn); 683 set_error_fn(error_fn);
682 if (flush) 684 if (flush)
683 outbuf = malloc(BZIP2_IOBUF_SIZE); 685 outbuf = malloc(BZIP2_IOBUF_SIZE);
684 else 686
685 len -= 4; /* Uncompressed size hack active in pre-boot
686 environment */
687 if (!outbuf) { 687 if (!outbuf) {
688 error("Could not allocate output bufer"); 688 error("Could not allocate output bufer");
689 return -1; 689 return -1;
@@ -733,4 +733,14 @@ exit_0:
733 return i; 733 return i;
734} 734}
735 735
736#define decompress bunzip2 736#ifdef PREBOOT
737STATIC int INIT decompress(unsigned char *buf, int len,
738 int(*fill)(void*, unsigned int),
739 int(*flush)(void*, unsigned int),
740 unsigned char *outbuf,
741 int *pos,
742 void(*error_fn)(char *x))
743{
744 return bunzip2(buf, len - 4, fill, flush, outbuf, pos, error_fn);
745}
746#endif
diff --git a/lib/decompress_inflate.c b/lib/decompress_inflate.c
index e36b296fc9f8..fc686c7a0a0d 100644
--- a/lib/decompress_inflate.c
+++ b/lib/decompress_inflate.c
@@ -19,13 +19,18 @@
19#include "zlib_inflate/inflate.h" 19#include "zlib_inflate/inflate.h"
20 20
21#include "zlib_inflate/infutil.h" 21#include "zlib_inflate/infutil.h"
22#include <linux/slab.h>
22 23
23#endif /* STATIC */ 24#endif /* STATIC */
24 25
25#include <linux/decompress/mm.h> 26#include <linux/decompress/mm.h>
26#include <linux/slab.h>
27 27
28#define INBUF_LEN (16*1024) 28#define GZIP_IOBUF_SIZE (16*1024)
29
30static int nofill(void *buffer, unsigned int len)
31{
32 return -1;
33}
29 34
30/* Included from initramfs et al code */ 35/* Included from initramfs et al code */
31STATIC int INIT gunzip(unsigned char *buf, int len, 36STATIC int INIT gunzip(unsigned char *buf, int len,
@@ -55,7 +60,7 @@ STATIC int INIT gunzip(unsigned char *buf, int len,
55 if (buf) 60 if (buf)
56 zbuf = buf; 61 zbuf = buf;
57 else { 62 else {
58 zbuf = malloc(INBUF_LEN); 63 zbuf = malloc(GZIP_IOBUF_SIZE);
59 len = 0; 64 len = 0;
60 } 65 }
61 if (!zbuf) { 66 if (!zbuf) {
@@ -76,8 +81,11 @@ STATIC int INIT gunzip(unsigned char *buf, int len,
76 goto gunzip_nomem4; 81 goto gunzip_nomem4;
77 } 82 }
78 83
84 if (!fill)
85 fill = nofill;
86
79 if (len == 0) 87 if (len == 0)
80 len = fill(zbuf, INBUF_LEN); 88 len = fill(zbuf, GZIP_IOBUF_SIZE);
81 89
82 /* verify the gzip header */ 90 /* verify the gzip header */
83 if (len < 10 || 91 if (len < 10 ||
@@ -113,7 +121,7 @@ STATIC int INIT gunzip(unsigned char *buf, int len,
113 while (rc == Z_OK) { 121 while (rc == Z_OK) {
114 if (strm->avail_in == 0) { 122 if (strm->avail_in == 0) {
115 /* TODO: handle case where both pos and fill are set */ 123 /* TODO: handle case where both pos and fill are set */
116 len = fill(zbuf, INBUF_LEN); 124 len = fill(zbuf, GZIP_IOBUF_SIZE);
117 if (len < 0) { 125 if (len < 0) {
118 rc = -1; 126 rc = -1;
119 error("read error"); 127 error("read error");
diff --git a/lib/decompress_unlzma.c b/lib/decompress_unlzma.c
index 32123a1340e6..ca82fde81c8f 100644
--- a/lib/decompress_unlzma.c
+++ b/lib/decompress_unlzma.c
@@ -29,12 +29,14 @@
29 *Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA 29 *Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
30 */ 30 */
31 31
32#ifndef STATIC 32#ifdef STATIC
33#define PREBOOT
34#else
33#include <linux/decompress/unlzma.h> 35#include <linux/decompress/unlzma.h>
36#include <linux/slab.h>
34#endif /* STATIC */ 37#endif /* STATIC */
35 38
36#include <linux/decompress/mm.h> 39#include <linux/decompress/mm.h>
37#include <linux/slab.h>
38 40
39#define MIN(a, b) (((a) < (b)) ? (a) : (b)) 41#define MIN(a, b) (((a) < (b)) ? (a) : (b))
40 42
@@ -80,6 +82,11 @@ struct rc {
80#define RC_MODEL_TOTAL_BITS 11 82#define RC_MODEL_TOTAL_BITS 11
81 83
82 84
85static int nofill(void *buffer, unsigned int len)
86{
87 return -1;
88}
89
83/* Called twice: once at startup and once in rc_normalize() */ 90/* Called twice: once at startup and once in rc_normalize() */
84static void INIT rc_read(struct rc *rc) 91static void INIT rc_read(struct rc *rc)
85{ 92{
@@ -95,7 +102,10 @@ static inline void INIT rc_init(struct rc *rc,
95 int (*fill)(void*, unsigned int), 102 int (*fill)(void*, unsigned int),
96 char *buffer, int buffer_size) 103 char *buffer, int buffer_size)
97{ 104{
98 rc->fill = fill; 105 if (fill)
106 rc->fill = fill;
107 else
108 rc->fill = nofill;
99 rc->buffer = (uint8_t *)buffer; 109 rc->buffer = (uint8_t *)buffer;
100 rc->buffer_size = buffer_size; 110 rc->buffer_size = buffer_size;
101 rc->buffer_end = rc->buffer + rc->buffer_size; 111 rc->buffer_end = rc->buffer + rc->buffer_size;
@@ -543,9 +553,7 @@ STATIC inline int INIT unlzma(unsigned char *buf, int in_len,
543 int ret = -1; 553 int ret = -1;
544 554
545 set_error_fn(error_fn); 555 set_error_fn(error_fn);
546 if (!flush) 556
547 in_len -= 4; /* Uncompressed size hack active in pre-boot
548 environment */
549 if (buf) 557 if (buf)
550 inbuf = buf; 558 inbuf = buf;
551 else 559 else
@@ -645,4 +653,15 @@ exit_0:
645 return ret; 653 return ret;
646} 654}
647 655
648#define decompress unlzma 656#ifdef PREBOOT
657STATIC int INIT decompress(unsigned char *buf, int in_len,
658 int(*fill)(void*, unsigned int),
659 int(*flush)(void*, unsigned int),
660 unsigned char *output,
661 int *posp,
662 void(*error_fn)(char *x)
663 )
664{
665 return unlzma(buf, in_len - 4, fill, flush, output, posp, error_fn);
666}
667#endif
diff --git a/lib/dma-debug.c b/lib/dma-debug.c
index 3b93129a968c..58a9f9fc609a 100644
--- a/lib/dma-debug.c
+++ b/lib/dma-debug.c
@@ -156,9 +156,13 @@ static bool driver_filter(struct device *dev)
156 return true; 156 return true;
157 157
158 /* driver filter on and initialized */ 158 /* driver filter on and initialized */
159 if (current_driver && dev->driver == current_driver) 159 if (current_driver && dev && dev->driver == current_driver)
160 return true; 160 return true;
161 161
162 /* driver filter on, but we can't filter on a NULL device... */
163 if (!dev)
164 return false;
165
162 if (current_driver || !current_driver_name[0]) 166 if (current_driver || !current_driver_name[0])
163 return false; 167 return false;
164 168
@@ -183,17 +187,17 @@ static bool driver_filter(struct device *dev)
183 return ret; 187 return ret;
184} 188}
185 189
186#define err_printk(dev, entry, format, arg...) do { \ 190#define err_printk(dev, entry, format, arg...) do { \
187 error_count += 1; \ 191 error_count += 1; \
188 if (driver_filter(dev) && \ 192 if (driver_filter(dev) && \
189 (show_all_errors || show_num_errors > 0)) { \ 193 (show_all_errors || show_num_errors > 0)) { \
190 WARN(1, "%s %s: " format, \ 194 WARN(1, "%s %s: " format, \
191 dev_driver_string(dev), \ 195 dev ? dev_driver_string(dev) : "NULL", \
192 dev_name(dev) , ## arg); \ 196 dev ? dev_name(dev) : "NULL", ## arg); \
193 dump_entry_trace(entry); \ 197 dump_entry_trace(entry); \
194 } \ 198 } \
195 if (!show_all_errors && show_num_errors > 0) \ 199 if (!show_all_errors && show_num_errors > 0) \
196 show_num_errors -= 1; \ 200 show_num_errors -= 1; \
197 } while (0); 201 } while (0);
198 202
199/* 203/*
@@ -716,7 +720,7 @@ void dma_debug_init(u32 num_entries)
716 720
717 for (i = 0; i < HASH_SIZE; ++i) { 721 for (i = 0; i < HASH_SIZE; ++i) {
718 INIT_LIST_HEAD(&dma_entry_hash[i].list); 722 INIT_LIST_HEAD(&dma_entry_hash[i].list);
719 dma_entry_hash[i].lock = SPIN_LOCK_UNLOCKED; 723 spin_lock_init(&dma_entry_hash[i].lock);
720 } 724 }
721 725
722 if (dma_debug_fs_init() != 0) { 726 if (dma_debug_fs_init() != 0) {
@@ -856,22 +860,21 @@ static void check_for_stack(struct device *dev, void *addr)
856 "stack [addr=%p]\n", addr); 860 "stack [addr=%p]\n", addr);
857} 861}
858 862
859static inline bool overlap(void *addr, u64 size, void *start, void *end) 863static inline bool overlap(void *addr, unsigned long len, void *start, void *end)
860{ 864{
861 void *addr2 = (char *)addr + size; 865 unsigned long a1 = (unsigned long)addr;
866 unsigned long b1 = a1 + len;
867 unsigned long a2 = (unsigned long)start;
868 unsigned long b2 = (unsigned long)end;
862 869
863 return ((addr >= start && addr < end) || 870 return !(b1 <= a2 || a1 >= b2);
864 (addr2 >= start && addr2 < end) ||
865 ((addr < start) && (addr2 >= end)));
866} 871}
867 872
868static void check_for_illegal_area(struct device *dev, void *addr, u64 size) 873static void check_for_illegal_area(struct device *dev, void *addr, unsigned long len)
869{ 874{
870 if (overlap(addr, size, _text, _etext) || 875 if (overlap(addr, len, _text, _etext) ||
871 overlap(addr, size, __start_rodata, __end_rodata)) 876 overlap(addr, len, __start_rodata, __end_rodata))
872 err_printk(dev, NULL, "DMA-API: device driver maps " 877 err_printk(dev, NULL, "DMA-API: device driver maps memory from kernel text or rodata [addr=%p] [len=%lu]\n", addr, len);
873 "memory from kernel text or rodata "
874 "[addr=%p] [size=%llu]\n", addr, size);
875} 878}
876 879
877static void check_sync(struct device *dev, 880static void check_sync(struct device *dev,
@@ -969,7 +972,8 @@ void debug_dma_map_page(struct device *dev, struct page *page, size_t offset,
969 entry->type = dma_debug_single; 972 entry->type = dma_debug_single;
970 973
971 if (!PageHighMem(page)) { 974 if (!PageHighMem(page)) {
972 void *addr = ((char *)page_address(page)) + offset; 975 void *addr = page_address(page) + offset;
976
973 check_for_stack(dev, addr); 977 check_for_stack(dev, addr);
974 check_for_illegal_area(dev, addr, size); 978 check_for_illegal_area(dev, addr, size);
975 } 979 }
diff --git a/lib/dynamic_debug.c b/lib/dynamic_debug.c
index 833139ce1e22..e22c148e4b7f 100644
--- a/lib/dynamic_debug.c
+++ b/lib/dynamic_debug.c
@@ -164,7 +164,7 @@ static void ddebug_change(const struct ddebug_query *query,
164 164
165 if (!newflags) 165 if (!newflags)
166 dt->num_enabled--; 166 dt->num_enabled--;
167 else if (!dp-flags) 167 else if (!dp->flags)
168 dt->num_enabled++; 168 dt->num_enabled++;
169 dp->flags = newflags; 169 dp->flags = newflags;
170 if (newflags) { 170 if (newflags) {
diff --git a/lib/flex_array.c b/lib/flex_array.c
new file mode 100644
index 000000000000..66eef2e4483e
--- /dev/null
+++ b/lib/flex_array.c
@@ -0,0 +1,327 @@
1/*
2 * Flexible array managed in PAGE_SIZE parts
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
17 *
18 * Copyright IBM Corporation, 2009
19 *
20 * Author: Dave Hansen <dave@linux.vnet.ibm.com>
21 */
22
23#include <linux/flex_array.h>
24#include <linux/slab.h>
25#include <linux/stddef.h>
26
27struct flex_array_part {
28 char elements[FLEX_ARRAY_PART_SIZE];
29};
30
31/*
32 * If a user requests an allocation which is small
33 * enough, we may simply use the space in the
34 * flex_array->parts[] array to store the user
35 * data.
36 */
37static inline int elements_fit_in_base(struct flex_array *fa)
38{
39 int data_size = fa->element_size * fa->total_nr_elements;
40 if (data_size <= FLEX_ARRAY_BASE_BYTES_LEFT)
41 return 1;
42 return 0;
43}
44
45/**
46 * flex_array_alloc - allocate a new flexible array
47 * @element_size: the size of individual elements in the array
48 * @total: total number of elements that this should hold
49 * @flags: page allocation flags to use for base array
50 *
51 * Note: all locking must be provided by the caller.
52 *
53 * @total is used to size internal structures. If the user ever
54 * accesses any array indexes >=@total, it will produce errors.
55 *
56 * The maximum number of elements is defined as: the number of
57 * elements that can be stored in a page times the number of
58 * page pointers that we can fit in the base structure or (using
59 * integer math):
60 *
61 * (PAGE_SIZE/element_size) * (PAGE_SIZE-8)/sizeof(void *)
62 *
63 * Here's a table showing example capacities. Note that the maximum
64 * index that the get/put() functions is just nr_objects-1. This
65 * basically means that you get 4MB of storage on 32-bit and 2MB on
66 * 64-bit.
67 *
68 *
69 * Element size | Objects | Objects |
70 * PAGE_SIZE=4k | 32-bit | 64-bit |
71 * ---------------------------------|
72 * 1 bytes | 4186112 | 2093056 |
73 * 2 bytes | 2093056 | 1046528 |
74 * 3 bytes | 1395030 | 697515 |
75 * 4 bytes | 1046528 | 523264 |
76 * 32 bytes | 130816 | 65408 |
77 * 33 bytes | 126728 | 63364 |
78 * 2048 bytes | 2044 | 1022 |
79 * 2049 bytes | 1022 | 511 |
80 * void * | 1046528 | 261632 |
81 *
82 * Since 64-bit pointers are twice the size, we lose half the
83 * capacity in the base structure. Also note that no effort is made
84 * to efficiently pack objects across page boundaries.
85 */
86struct flex_array *flex_array_alloc(int element_size, unsigned int total,
87 gfp_t flags)
88{
89 struct flex_array *ret;
90 int max_size = FLEX_ARRAY_NR_BASE_PTRS *
91 FLEX_ARRAY_ELEMENTS_PER_PART(element_size);
92
93 /* max_size will end up 0 if element_size > PAGE_SIZE */
94 if (total > max_size)
95 return NULL;
96 ret = kzalloc(sizeof(struct flex_array), flags);
97 if (!ret)
98 return NULL;
99 ret->element_size = element_size;
100 ret->total_nr_elements = total;
101 if (elements_fit_in_base(ret) && !(flags & __GFP_ZERO))
102 memset(ret->parts[0], FLEX_ARRAY_FREE,
103 FLEX_ARRAY_BASE_BYTES_LEFT);
104 return ret;
105}
106
107static int fa_element_to_part_nr(struct flex_array *fa,
108 unsigned int element_nr)
109{
110 return element_nr / FLEX_ARRAY_ELEMENTS_PER_PART(fa->element_size);
111}
112
113/**
114 * flex_array_free_parts - just free the second-level pages
115 * @fa: the flex array from which to free parts
116 *
117 * This is to be used in cases where the base 'struct flex_array'
118 * has been statically allocated and should not be free.
119 */
120void flex_array_free_parts(struct flex_array *fa)
121{
122 int part_nr;
123
124 if (elements_fit_in_base(fa))
125 return;
126 for (part_nr = 0; part_nr < FLEX_ARRAY_NR_BASE_PTRS; part_nr++)
127 kfree(fa->parts[part_nr]);
128}
129
130void flex_array_free(struct flex_array *fa)
131{
132 flex_array_free_parts(fa);
133 kfree(fa);
134}
135
136static unsigned int index_inside_part(struct flex_array *fa,
137 unsigned int element_nr)
138{
139 unsigned int part_offset;
140
141 part_offset = element_nr %
142 FLEX_ARRAY_ELEMENTS_PER_PART(fa->element_size);
143 return part_offset * fa->element_size;
144}
145
146static struct flex_array_part *
147__fa_get_part(struct flex_array *fa, int part_nr, gfp_t flags)
148{
149 struct flex_array_part *part = fa->parts[part_nr];
150 if (!part) {
151 part = kmalloc(sizeof(struct flex_array_part), flags);
152 if (!part)
153 return NULL;
154 if (!(flags & __GFP_ZERO))
155 memset(part, FLEX_ARRAY_FREE,
156 sizeof(struct flex_array_part));
157 fa->parts[part_nr] = part;
158 }
159 return part;
160}
161
162/**
163 * flex_array_put - copy data into the array at @element_nr
164 * @fa: the flex array to copy data into
165 * @element_nr: index of the position in which to insert
166 * the new element.
167 * @src: address of data to copy into the array
168 * @flags: page allocation flags to use for array expansion
169 *
170 *
171 * Note that this *copies* the contents of @src into
172 * the array. If you are trying to store an array of
173 * pointers, make sure to pass in &ptr instead of ptr.
174 *
175 * Locking must be provided by the caller.
176 */
177int flex_array_put(struct flex_array *fa, unsigned int element_nr, void *src,
178 gfp_t flags)
179{
180 int part_nr = fa_element_to_part_nr(fa, element_nr);
181 struct flex_array_part *part;
182 void *dst;
183
184 if (element_nr >= fa->total_nr_elements)
185 return -ENOSPC;
186 if (elements_fit_in_base(fa))
187 part = (struct flex_array_part *)&fa->parts[0];
188 else {
189 part = __fa_get_part(fa, part_nr, flags);
190 if (!part)
191 return -ENOMEM;
192 }
193 dst = &part->elements[index_inside_part(fa, element_nr)];
194 memcpy(dst, src, fa->element_size);
195 return 0;
196}
197
198/**
199 * flex_array_clear - clear element in array at @element_nr
200 * @fa: the flex array of the element.
201 * @element_nr: index of the position to clear.
202 *
203 * Locking must be provided by the caller.
204 */
205int flex_array_clear(struct flex_array *fa, unsigned int element_nr)
206{
207 int part_nr = fa_element_to_part_nr(fa, element_nr);
208 struct flex_array_part *part;
209 void *dst;
210
211 if (element_nr >= fa->total_nr_elements)
212 return -ENOSPC;
213 if (elements_fit_in_base(fa))
214 part = (struct flex_array_part *)&fa->parts[0];
215 else {
216 part = fa->parts[part_nr];
217 if (!part)
218 return -EINVAL;
219 }
220 dst = &part->elements[index_inside_part(fa, element_nr)];
221 memset(dst, FLEX_ARRAY_FREE, fa->element_size);
222 return 0;
223}
224
225/**
226 * flex_array_prealloc - guarantee that array space exists
227 * @fa: the flex array for which to preallocate parts
228 * @start: index of first array element for which space is allocated
229 * @end: index of last (inclusive) element for which space is allocated
230 * @flags: page allocation flags
231 *
232 * This will guarantee that no future calls to flex_array_put()
233 * will allocate memory. It can be used if you are expecting to
234 * be holding a lock or in some atomic context while writing
235 * data into the array.
236 *
237 * Locking must be provided by the caller.
238 */
239int flex_array_prealloc(struct flex_array *fa, unsigned int start,
240 unsigned int end, gfp_t flags)
241{
242 int start_part;
243 int end_part;
244 int part_nr;
245 struct flex_array_part *part;
246
247 if (start >= fa->total_nr_elements || end >= fa->total_nr_elements)
248 return -ENOSPC;
249 if (elements_fit_in_base(fa))
250 return 0;
251 start_part = fa_element_to_part_nr(fa, start);
252 end_part = fa_element_to_part_nr(fa, end);
253 for (part_nr = start_part; part_nr <= end_part; part_nr++) {
254 part = __fa_get_part(fa, part_nr, flags);
255 if (!part)
256 return -ENOMEM;
257 }
258 return 0;
259}
260
261/**
262 * flex_array_get - pull data back out of the array
263 * @fa: the flex array from which to extract data
264 * @element_nr: index of the element to fetch from the array
265 *
266 * Returns a pointer to the data at index @element_nr. Note
267 * that this is a copy of the data that was passed in. If you
268 * are using this to store pointers, you'll get back &ptr.
269 *
270 * Locking must be provided by the caller.
271 */
272void *flex_array_get(struct flex_array *fa, unsigned int element_nr)
273{
274 int part_nr = fa_element_to_part_nr(fa, element_nr);
275 struct flex_array_part *part;
276
277 if (element_nr >= fa->total_nr_elements)
278 return NULL;
279 if (elements_fit_in_base(fa))
280 part = (struct flex_array_part *)&fa->parts[0];
281 else {
282 part = fa->parts[part_nr];
283 if (!part)
284 return NULL;
285 }
286 return &part->elements[index_inside_part(fa, element_nr)];
287}
288
289static int part_is_free(struct flex_array_part *part)
290{
291 int i;
292
293 for (i = 0; i < sizeof(struct flex_array_part); i++)
294 if (part->elements[i] != FLEX_ARRAY_FREE)
295 return 0;
296 return 1;
297}
298
299/**
300 * flex_array_shrink - free unused second-level pages
301 * @fa: the flex array to shrink
302 *
303 * Frees all second-level pages that consist solely of unused
304 * elements. Returns the number of pages freed.
305 *
306 * Locking must be provided by the caller.
307 */
308int flex_array_shrink(struct flex_array *fa)
309{
310 struct flex_array_part *part;
311 int part_nr;
312 int ret = 0;
313
314 if (elements_fit_in_base(fa))
315 return ret;
316 for (part_nr = 0; part_nr < FLEX_ARRAY_NR_BASE_PTRS; part_nr++) {
317 part = fa->parts[part_nr];
318 if (!part)
319 continue;
320 if (part_is_free(part)) {
321 fa->parts[part_nr] = NULL;
322 kfree(part);
323 ret++;
324 }
325 }
326 return ret;
327}
diff --git a/lib/inflate.c b/lib/inflate.c
index 1a8e8a978128..d10255973a9f 100644
--- a/lib/inflate.c
+++ b/lib/inflate.c
@@ -7,7 +7,7 @@
7 * Adapted for booting Linux by Hannu Savolainen 1993 7 * Adapted for booting Linux by Hannu Savolainen 1993
8 * based on gzip-1.0.3 8 * based on gzip-1.0.3
9 * 9 *
10 * Nicolas Pitre <nico@cam.org>, 1999/04/14 : 10 * Nicolas Pitre <nico@fluxnic.net>, 1999/04/14 :
11 * Little mods for all variable to reside either into rodata or bss segments 11 * Little mods for all variable to reside either into rodata or bss segments
12 * by marking constant variables with 'const' and initializing all the others 12 * by marking constant variables with 'const' and initializing all the others
13 * at run-time only. This allows for the kernel uncompressor to run 13 * at run-time only. This allows for the kernel uncompressor to run
diff --git a/lib/is_single_threaded.c b/lib/is_single_threaded.c
index f1ed2fe76c65..bd2bea963364 100644
--- a/lib/is_single_threaded.c
+++ b/lib/is_single_threaded.c
@@ -12,34 +12,47 @@
12 12
13#include <linux/sched.h> 13#include <linux/sched.h>
14 14
15/** 15/*
16 * is_single_threaded - Determine if a thread group is single-threaded or not 16 * Returns true if the task does not share ->mm with another thread/process.
17 * @p: A task in the thread group in question
18 *
19 * This returns true if the thread group to which a task belongs is single
20 * threaded, false if it is not.
21 */ 17 */
22bool is_single_threaded(struct task_struct *p) 18bool current_is_single_threaded(void)
23{ 19{
24 struct task_struct *g, *t; 20 struct task_struct *task = current;
25 struct mm_struct *mm = p->mm; 21 struct mm_struct *mm = task->mm;
22 struct task_struct *p, *t;
23 bool ret;
26 24
27 if (atomic_read(&p->signal->count) != 1) 25 if (atomic_read(&task->signal->live) != 1)
28 goto no; 26 return false;
29 27
30 if (atomic_read(&p->mm->mm_users) != 1) { 28 if (atomic_read(&mm->mm_users) == 1)
31 read_lock(&tasklist_lock); 29 return true;
32 do_each_thread(g, t) {
33 if (t->mm == mm && t != p)
34 goto no_unlock;
35 } while_each_thread(g, t);
36 read_unlock(&tasklist_lock);
37 }
38 30
39 return true; 31 ret = false;
32 rcu_read_lock();
33 for_each_process(p) {
34 if (unlikely(p->flags & PF_KTHREAD))
35 continue;
36 if (unlikely(p == task->group_leader))
37 continue;
38
39 t = p;
40 do {
41 if (unlikely(t->mm == mm))
42 goto found;
43 if (likely(t->mm))
44 break;
45 /*
46 * t->mm == NULL. Make sure next_thread/next_task
47 * will see other CLONE_VM tasks which might be
48 * forked before exiting.
49 */
50 smp_rmb();
51 } while_each_thread(p, t);
52 }
53 ret = true;
54found:
55 rcu_read_unlock();
40 56
41no_unlock: 57 return ret;
42 read_unlock(&tasklist_lock);
43no:
44 return false;
45} 58}
diff --git a/lib/lmb.c b/lib/lmb.c
index e4a6482d8b26..0343c05609f0 100644
--- a/lib/lmb.c
+++ b/lib/lmb.c
@@ -429,7 +429,7 @@ u64 __init lmb_phys_mem_size(void)
429 return lmb.memory.size; 429 return lmb.memory.size;
430} 430}
431 431
432u64 __init lmb_end_of_DRAM(void) 432u64 lmb_end_of_DRAM(void)
433{ 433{
434 int idx = lmb.memory.cnt - 1; 434 int idx = lmb.memory.cnt - 1;
435 435
diff --git a/lib/scatterlist.c b/lib/scatterlist.c
index a295e404e908..0d475d8167bf 100644
--- a/lib/scatterlist.c
+++ b/lib/scatterlist.c
@@ -314,6 +314,7 @@ void sg_miter_start(struct sg_mapping_iter *miter, struct scatterlist *sgl,
314 miter->__sg = sgl; 314 miter->__sg = sgl;
315 miter->__nents = nents; 315 miter->__nents = nents;
316 miter->__offset = 0; 316 miter->__offset = 0;
317 WARN_ON(!(flags & (SG_MITER_TO_SG | SG_MITER_FROM_SG)));
317 miter->__flags = flags; 318 miter->__flags = flags;
318} 319}
319EXPORT_SYMBOL(sg_miter_start); 320EXPORT_SYMBOL(sg_miter_start);
@@ -394,6 +395,9 @@ void sg_miter_stop(struct sg_mapping_iter *miter)
394 if (miter->addr) { 395 if (miter->addr) {
395 miter->__offset += miter->consumed; 396 miter->__offset += miter->consumed;
396 397
398 if (miter->__flags & SG_MITER_TO_SG)
399 flush_kernel_dcache_page(miter->page);
400
397 if (miter->__flags & SG_MITER_ATOMIC) { 401 if (miter->__flags & SG_MITER_ATOMIC) {
398 WARN_ON(!irqs_disabled()); 402 WARN_ON(!irqs_disabled());
399 kunmap_atomic(miter->addr, KM_BIO_SRC_IRQ); 403 kunmap_atomic(miter->addr, KM_BIO_SRC_IRQ);
@@ -426,8 +430,14 @@ static size_t sg_copy_buffer(struct scatterlist *sgl, unsigned int nents,
426 unsigned int offset = 0; 430 unsigned int offset = 0;
427 struct sg_mapping_iter miter; 431 struct sg_mapping_iter miter;
428 unsigned long flags; 432 unsigned long flags;
433 unsigned int sg_flags = SG_MITER_ATOMIC;
434
435 if (to_buffer)
436 sg_flags |= SG_MITER_FROM_SG;
437 else
438 sg_flags |= SG_MITER_TO_SG;
429 439
430 sg_miter_start(&miter, sgl, nents, SG_MITER_ATOMIC); 440 sg_miter_start(&miter, sgl, nents, sg_flags);
431 441
432 local_irq_save(flags); 442 local_irq_save(flags);
433 443
@@ -438,10 +448,8 @@ static size_t sg_copy_buffer(struct scatterlist *sgl, unsigned int nents,
438 448
439 if (to_buffer) 449 if (to_buffer)
440 memcpy(buf + offset, miter.addr, len); 450 memcpy(buf + offset, miter.addr, len);
441 else { 451 else
442 memcpy(miter.addr, buf + offset, len); 452 memcpy(miter.addr, buf + offset, len);
443 flush_kernel_dcache_page(miter.page);
444 }
445 453
446 offset += len; 454 offset += len;
447 } 455 }
diff --git a/lib/swiotlb.c b/lib/swiotlb.c
index bffe6d7ef9d9..ac25cd28e807 100644
--- a/lib/swiotlb.c
+++ b/lib/swiotlb.c
@@ -114,46 +114,11 @@ setup_io_tlb_npages(char *str)
114__setup("swiotlb=", setup_io_tlb_npages); 114__setup("swiotlb=", setup_io_tlb_npages);
115/* make io_tlb_overflow tunable too? */ 115/* make io_tlb_overflow tunable too? */
116 116
117void * __weak __init swiotlb_alloc_boot(size_t size, unsigned long nslabs) 117/* Note that this doesn't work with highmem page */
118{
119 return alloc_bootmem_low_pages(size);
120}
121
122void * __weak swiotlb_alloc(unsigned order, unsigned long nslabs)
123{
124 return (void *)__get_free_pages(GFP_DMA | __GFP_NOWARN, order);
125}
126
127dma_addr_t __weak swiotlb_phys_to_bus(struct device *hwdev, phys_addr_t paddr)
128{
129 return paddr;
130}
131
132phys_addr_t __weak swiotlb_bus_to_phys(struct device *hwdev, dma_addr_t baddr)
133{
134 return baddr;
135}
136
137static dma_addr_t swiotlb_virt_to_bus(struct device *hwdev, 118static dma_addr_t swiotlb_virt_to_bus(struct device *hwdev,
138 volatile void *address) 119 volatile void *address)
139{ 120{
140 return swiotlb_phys_to_bus(hwdev, virt_to_phys(address)); 121 return phys_to_dma(hwdev, virt_to_phys(address));
141}
142
143void * __weak swiotlb_bus_to_virt(struct device *hwdev, dma_addr_t address)
144{
145 return phys_to_virt(swiotlb_bus_to_phys(hwdev, address));
146}
147
148int __weak swiotlb_arch_address_needs_mapping(struct device *hwdev,
149 dma_addr_t addr, size_t size)
150{
151 return !is_buffer_dma_capable(dma_get_mask(hwdev), addr, size);
152}
153
154int __weak swiotlb_arch_range_needs_mapping(phys_addr_t paddr, size_t size)
155{
156 return 0;
157} 122}
158 123
159static void swiotlb_print_info(unsigned long bytes) 124static void swiotlb_print_info(unsigned long bytes)
@@ -189,7 +154,7 @@ swiotlb_init_with_default_size(size_t default_size)
189 /* 154 /*
190 * Get IO TLB memory from the low pages 155 * Get IO TLB memory from the low pages
191 */ 156 */
192 io_tlb_start = swiotlb_alloc_boot(bytes, io_tlb_nslabs); 157 io_tlb_start = alloc_bootmem_low_pages(bytes);
193 if (!io_tlb_start) 158 if (!io_tlb_start)
194 panic("Cannot allocate SWIOTLB buffer"); 159 panic("Cannot allocate SWIOTLB buffer");
195 io_tlb_end = io_tlb_start + bytes; 160 io_tlb_end = io_tlb_start + bytes;
@@ -245,7 +210,8 @@ swiotlb_late_init_with_default_size(size_t default_size)
245 bytes = io_tlb_nslabs << IO_TLB_SHIFT; 210 bytes = io_tlb_nslabs << IO_TLB_SHIFT;
246 211
247 while ((SLABS_PER_PAGE << order) > IO_TLB_MIN_SLABS) { 212 while ((SLABS_PER_PAGE << order) > IO_TLB_MIN_SLABS) {
248 io_tlb_start = swiotlb_alloc(order, io_tlb_nslabs); 213 io_tlb_start = (void *)__get_free_pages(GFP_DMA | __GFP_NOWARN,
214 order);
249 if (io_tlb_start) 215 if (io_tlb_start)
250 break; 216 break;
251 order--; 217 order--;
@@ -315,20 +281,10 @@ cleanup1:
315 return -ENOMEM; 281 return -ENOMEM;
316} 282}
317 283
318static inline int 284static int is_swiotlb_buffer(phys_addr_t paddr)
319address_needs_mapping(struct device *hwdev, dma_addr_t addr, size_t size)
320{ 285{
321 return swiotlb_arch_address_needs_mapping(hwdev, addr, size); 286 return paddr >= virt_to_phys(io_tlb_start) &&
322} 287 paddr < virt_to_phys(io_tlb_end);
323
324static inline int range_needs_mapping(phys_addr_t paddr, size_t size)
325{
326 return swiotlb_force || swiotlb_arch_range_needs_mapping(paddr, size);
327}
328
329static int is_swiotlb_buffer(char *addr)
330{
331 return addr >= io_tlb_start && addr < io_tlb_end;
332} 288}
333 289
334/* 290/*
@@ -561,9 +517,7 @@ swiotlb_alloc_coherent(struct device *hwdev, size_t size,
561 dma_mask = hwdev->coherent_dma_mask; 517 dma_mask = hwdev->coherent_dma_mask;
562 518
563 ret = (void *)__get_free_pages(flags, order); 519 ret = (void *)__get_free_pages(flags, order);
564 if (ret && 520 if (ret && swiotlb_virt_to_bus(hwdev, ret) + size > dma_mask) {
565 !is_buffer_dma_capable(dma_mask, swiotlb_virt_to_bus(hwdev, ret),
566 size)) {
567 /* 521 /*
568 * The allocated memory isn't reachable by the device. 522 * The allocated memory isn't reachable by the device.
569 */ 523 */
@@ -585,7 +539,7 @@ swiotlb_alloc_coherent(struct device *hwdev, size_t size,
585 dev_addr = swiotlb_virt_to_bus(hwdev, ret); 539 dev_addr = swiotlb_virt_to_bus(hwdev, ret);
586 540
587 /* Confirm address can be DMA'd by device */ 541 /* Confirm address can be DMA'd by device */
588 if (!is_buffer_dma_capable(dma_mask, dev_addr, size)) { 542 if (dev_addr + size > dma_mask) {
589 printk("hwdev DMA mask = 0x%016Lx, dev_addr = 0x%016Lx\n", 543 printk("hwdev DMA mask = 0x%016Lx, dev_addr = 0x%016Lx\n",
590 (unsigned long long)dma_mask, 544 (unsigned long long)dma_mask,
591 (unsigned long long)dev_addr); 545 (unsigned long long)dev_addr);
@@ -601,11 +555,13 @@ EXPORT_SYMBOL(swiotlb_alloc_coherent);
601 555
602void 556void
603swiotlb_free_coherent(struct device *hwdev, size_t size, void *vaddr, 557swiotlb_free_coherent(struct device *hwdev, size_t size, void *vaddr,
604 dma_addr_t dma_handle) 558 dma_addr_t dev_addr)
605{ 559{
560 phys_addr_t paddr = dma_to_phys(hwdev, dev_addr);
561
606 WARN_ON(irqs_disabled()); 562 WARN_ON(irqs_disabled());
607 if (!is_swiotlb_buffer(vaddr)) 563 if (!is_swiotlb_buffer(paddr))
608 free_pages((unsigned long) vaddr, get_order(size)); 564 free_pages((unsigned long)vaddr, get_order(size));
609 else 565 else
610 /* DMA_TO_DEVICE to avoid memcpy in unmap_single */ 566 /* DMA_TO_DEVICE to avoid memcpy in unmap_single */
611 do_unmap_single(hwdev, vaddr, size, DMA_TO_DEVICE); 567 do_unmap_single(hwdev, vaddr, size, DMA_TO_DEVICE);
@@ -625,12 +581,15 @@ swiotlb_full(struct device *dev, size_t size, int dir, int do_panic)
625 printk(KERN_ERR "DMA: Out of SW-IOMMU space for %zu bytes at " 581 printk(KERN_ERR "DMA: Out of SW-IOMMU space for %zu bytes at "
626 "device %s\n", size, dev ? dev_name(dev) : "?"); 582 "device %s\n", size, dev ? dev_name(dev) : "?");
627 583
628 if (size > io_tlb_overflow && do_panic) { 584 if (size <= io_tlb_overflow || !do_panic)
629 if (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL) 585 return;
630 panic("DMA: Memory would be corrupted\n"); 586
631 if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL) 587 if (dir == DMA_BIDIRECTIONAL)
632 panic("DMA: Random memory would be DMAed\n"); 588 panic("DMA: Random memory could be DMA accessed\n");
633 } 589 if (dir == DMA_FROM_DEVICE)
590 panic("DMA: Random memory could be DMA written\n");
591 if (dir == DMA_TO_DEVICE)
592 panic("DMA: Random memory could be DMA read\n");
634} 593}
635 594
636/* 595/*
@@ -646,7 +605,7 @@ dma_addr_t swiotlb_map_page(struct device *dev, struct page *page,
646 struct dma_attrs *attrs) 605 struct dma_attrs *attrs)
647{ 606{
648 phys_addr_t phys = page_to_phys(page) + offset; 607 phys_addr_t phys = page_to_phys(page) + offset;
649 dma_addr_t dev_addr = swiotlb_phys_to_bus(dev, phys); 608 dma_addr_t dev_addr = phys_to_dma(dev, phys);
650 void *map; 609 void *map;
651 610
652 BUG_ON(dir == DMA_NONE); 611 BUG_ON(dir == DMA_NONE);
@@ -655,8 +614,7 @@ dma_addr_t swiotlb_map_page(struct device *dev, struct page *page,
655 * we can safely return the device addr and not worry about bounce 614 * we can safely return the device addr and not worry about bounce
656 * buffering it. 615 * buffering it.
657 */ 616 */
658 if (!address_needs_mapping(dev, dev_addr, size) && 617 if (dma_capable(dev, dev_addr, size) && !swiotlb_force)
659 !range_needs_mapping(phys, size))
660 return dev_addr; 618 return dev_addr;
661 619
662 /* 620 /*
@@ -673,7 +631,7 @@ dma_addr_t swiotlb_map_page(struct device *dev, struct page *page,
673 /* 631 /*
674 * Ensure that the address returned is DMA'ble 632 * Ensure that the address returned is DMA'ble
675 */ 633 */
676 if (address_needs_mapping(dev, dev_addr, size)) 634 if (!dma_capable(dev, dev_addr, size))
677 panic("map_single: bounce buffer is not DMA'ble"); 635 panic("map_single: bounce buffer is not DMA'ble");
678 636
679 return dev_addr; 637 return dev_addr;
@@ -691,19 +649,25 @@ EXPORT_SYMBOL_GPL(swiotlb_map_page);
691static void unmap_single(struct device *hwdev, dma_addr_t dev_addr, 649static void unmap_single(struct device *hwdev, dma_addr_t dev_addr,
692 size_t size, int dir) 650 size_t size, int dir)
693{ 651{
694 char *dma_addr = swiotlb_bus_to_virt(hwdev, dev_addr); 652 phys_addr_t paddr = dma_to_phys(hwdev, dev_addr);
695 653
696 BUG_ON(dir == DMA_NONE); 654 BUG_ON(dir == DMA_NONE);
697 655
698 if (is_swiotlb_buffer(dma_addr)) { 656 if (is_swiotlb_buffer(paddr)) {
699 do_unmap_single(hwdev, dma_addr, size, dir); 657 do_unmap_single(hwdev, phys_to_virt(paddr), size, dir);
700 return; 658 return;
701 } 659 }
702 660
703 if (dir != DMA_FROM_DEVICE) 661 if (dir != DMA_FROM_DEVICE)
704 return; 662 return;
705 663
706 dma_mark_clean(dma_addr, size); 664 /*
665 * phys_to_virt doesn't work with hihgmem page but we could
666 * call dma_mark_clean() with hihgmem page here. However, we
667 * are fine since dma_mark_clean() is null on POWERPC. We can
668 * make dma_mark_clean() take a physical address if necessary.
669 */
670 dma_mark_clean(phys_to_virt(paddr), size);
707} 671}
708 672
709void swiotlb_unmap_page(struct device *hwdev, dma_addr_t dev_addr, 673void swiotlb_unmap_page(struct device *hwdev, dma_addr_t dev_addr,
@@ -728,19 +692,19 @@ static void
728swiotlb_sync_single(struct device *hwdev, dma_addr_t dev_addr, 692swiotlb_sync_single(struct device *hwdev, dma_addr_t dev_addr,
729 size_t size, int dir, int target) 693 size_t size, int dir, int target)
730{ 694{
731 char *dma_addr = swiotlb_bus_to_virt(hwdev, dev_addr); 695 phys_addr_t paddr = dma_to_phys(hwdev, dev_addr);
732 696
733 BUG_ON(dir == DMA_NONE); 697 BUG_ON(dir == DMA_NONE);
734 698
735 if (is_swiotlb_buffer(dma_addr)) { 699 if (is_swiotlb_buffer(paddr)) {
736 sync_single(hwdev, dma_addr, size, dir, target); 700 sync_single(hwdev, phys_to_virt(paddr), size, dir, target);
737 return; 701 return;
738 } 702 }
739 703
740 if (dir != DMA_FROM_DEVICE) 704 if (dir != DMA_FROM_DEVICE)
741 return; 705 return;
742 706
743 dma_mark_clean(dma_addr, size); 707 dma_mark_clean(phys_to_virt(paddr), size);
744} 708}
745 709
746void 710void
@@ -817,10 +781,10 @@ swiotlb_map_sg_attrs(struct device *hwdev, struct scatterlist *sgl, int nelems,
817 781
818 for_each_sg(sgl, sg, nelems, i) { 782 for_each_sg(sgl, sg, nelems, i) {
819 phys_addr_t paddr = sg_phys(sg); 783 phys_addr_t paddr = sg_phys(sg);
820 dma_addr_t dev_addr = swiotlb_phys_to_bus(hwdev, paddr); 784 dma_addr_t dev_addr = phys_to_dma(hwdev, paddr);
821 785
822 if (range_needs_mapping(paddr, sg->length) || 786 if (swiotlb_force ||
823 address_needs_mapping(hwdev, dev_addr, sg->length)) { 787 !dma_capable(hwdev, dev_addr, sg->length)) {
824 void *map = map_single(hwdev, sg_phys(sg), 788 void *map = map_single(hwdev, sg_phys(sg),
825 sg->length, dir); 789 sg->length, dir);
826 if (!map) { 790 if (!map) {
diff --git a/lib/vsprintf.c b/lib/vsprintf.c
index 756ccafa9cec..33bed5e67a21 100644
--- a/lib/vsprintf.c
+++ b/lib/vsprintf.c
@@ -25,6 +25,7 @@
25#include <linux/kallsyms.h> 25#include <linux/kallsyms.h>
26#include <linux/uaccess.h> 26#include <linux/uaccess.h>
27#include <linux/ioport.h> 27#include <linux/ioport.h>
28#include <net/addrconf.h>
28 29
29#include <asm/page.h> /* for PAGE_SIZE */ 30#include <asm/page.h> /* for PAGE_SIZE */
30#include <asm/div64.h> 31#include <asm/div64.h>
@@ -580,7 +581,7 @@ static char *symbol_string(char *buf, char *end, void *ptr,
580 unsigned long value = (unsigned long) ptr; 581 unsigned long value = (unsigned long) ptr;
581#ifdef CONFIG_KALLSYMS 582#ifdef CONFIG_KALLSYMS
582 char sym[KSYM_SYMBOL_LEN]; 583 char sym[KSYM_SYMBOL_LEN];
583 if (ext != 'f') 584 if (ext != 'f' && ext != 's')
584 sprint_symbol(sym, value); 585 sprint_symbol(sym, value);
585 else 586 else
586 kallsyms_lookup(value, NULL, NULL, NULL, sym); 587 kallsyms_lookup(value, NULL, NULL, NULL, sym);
@@ -630,60 +631,161 @@ static char *resource_string(char *buf, char *end, struct resource *res,
630} 631}
631 632
632static char *mac_address_string(char *buf, char *end, u8 *addr, 633static char *mac_address_string(char *buf, char *end, u8 *addr,
633 struct printf_spec spec) 634 struct printf_spec spec, const char *fmt)
634{ 635{
635 char mac_addr[6 * 3]; /* (6 * 2 hex digits), 5 colons and trailing zero */ 636 char mac_addr[sizeof("xx:xx:xx:xx:xx:xx")];
636 char *p = mac_addr; 637 char *p = mac_addr;
637 int i; 638 int i;
638 639
639 for (i = 0; i < 6; i++) { 640 for (i = 0; i < 6; i++) {
640 p = pack_hex_byte(p, addr[i]); 641 p = pack_hex_byte(p, addr[i]);
641 if (!(spec.flags & SPECIAL) && i != 5) 642 if (fmt[0] == 'M' && i != 5)
642 *p++ = ':'; 643 *p++ = ':';
643 } 644 }
644 *p = '\0'; 645 *p = '\0';
645 spec.flags &= ~SPECIAL;
646 646
647 return string(buf, end, mac_addr, spec); 647 return string(buf, end, mac_addr, spec);
648} 648}
649 649
650static char *ip6_addr_string(char *buf, char *end, u8 *addr, 650static char *ip4_string(char *p, const u8 *addr, bool leading_zeros)
651 struct printf_spec spec)
652{ 651{
653 char ip6_addr[8 * 5]; /* (8 * 4 hex digits), 7 colons and trailing zero */
654 char *p = ip6_addr;
655 int i; 652 int i;
656 653
654 for (i = 0; i < 4; i++) {
655 char temp[3]; /* hold each IP quad in reverse order */
656 int digits = put_dec_trunc(temp, addr[i]) - temp;
657 if (leading_zeros) {
658 if (digits < 3)
659 *p++ = '0';
660 if (digits < 2)
661 *p++ = '0';
662 }
663 /* reverse the digits in the quad */
664 while (digits--)
665 *p++ = temp[digits];
666 if (i < 3)
667 *p++ = '.';
668 }
669
670 *p = '\0';
671 return p;
672}
673
674static char *ip6_compressed_string(char *p, const char *addr)
675{
676 int i;
677 int j;
678 int range;
679 unsigned char zerolength[8];
680 int longest = 1;
681 int colonpos = -1;
682 u16 word;
683 u8 hi;
684 u8 lo;
685 bool needcolon = false;
686 bool useIPv4;
687 struct in6_addr in6;
688
689 memcpy(&in6, addr, sizeof(struct in6_addr));
690
691 useIPv4 = ipv6_addr_v4mapped(&in6) || ipv6_addr_is_isatap(&in6);
692
693 memset(zerolength, 0, sizeof(zerolength));
694
695 if (useIPv4)
696 range = 6;
697 else
698 range = 8;
699
700 /* find position of longest 0 run */
701 for (i = 0; i < range; i++) {
702 for (j = i; j < range; j++) {
703 if (in6.s6_addr16[j] != 0)
704 break;
705 zerolength[i]++;
706 }
707 }
708 for (i = 0; i < range; i++) {
709 if (zerolength[i] > longest) {
710 longest = zerolength[i];
711 colonpos = i;
712 }
713 }
714
715 /* emit address */
716 for (i = 0; i < range; i++) {
717 if (i == colonpos) {
718 if (needcolon || i == 0)
719 *p++ = ':';
720 *p++ = ':';
721 needcolon = false;
722 i += longest - 1;
723 continue;
724 }
725 if (needcolon) {
726 *p++ = ':';
727 needcolon = false;
728 }
729 /* hex u16 without leading 0s */
730 word = ntohs(in6.s6_addr16[i]);
731 hi = word >> 8;
732 lo = word & 0xff;
733 if (hi) {
734 if (hi > 0x0f)
735 p = pack_hex_byte(p, hi);
736 else
737 *p++ = hex_asc_lo(hi);
738 }
739 if (hi || lo > 0x0f)
740 p = pack_hex_byte(p, lo);
741 else
742 *p++ = hex_asc_lo(lo);
743 needcolon = true;
744 }
745
746 if (useIPv4) {
747 if (needcolon)
748 *p++ = ':';
749 p = ip4_string(p, &in6.s6_addr[12], false);
750 }
751
752 *p = '\0';
753 return p;
754}
755
756static char *ip6_string(char *p, const char *addr, const char *fmt)
757{
758 int i;
657 for (i = 0; i < 8; i++) { 759 for (i = 0; i < 8; i++) {
658 p = pack_hex_byte(p, addr[2 * i]); 760 p = pack_hex_byte(p, *addr++);
659 p = pack_hex_byte(p, addr[2 * i + 1]); 761 p = pack_hex_byte(p, *addr++);
660 if (!(spec.flags & SPECIAL) && i != 7) 762 if (fmt[0] == 'I' && i != 7)
661 *p++ = ':'; 763 *p++ = ':';
662 } 764 }
765
663 *p = '\0'; 766 *p = '\0';
664 spec.flags &= ~SPECIAL; 767 return p;
768}
769
770static char *ip6_addr_string(char *buf, char *end, const u8 *addr,
771 struct printf_spec spec, const char *fmt)
772{
773 char ip6_addr[sizeof("xxxx:xxxx:xxxx:xxxx:xxxx:xxxx:255.255.255.255")];
774
775 if (fmt[0] == 'I' && fmt[2] == 'c')
776 ip6_compressed_string(ip6_addr, addr);
777 else
778 ip6_string(ip6_addr, addr, fmt);
665 779
666 return string(buf, end, ip6_addr, spec); 780 return string(buf, end, ip6_addr, spec);
667} 781}
668 782
669static char *ip4_addr_string(char *buf, char *end, u8 *addr, 783static char *ip4_addr_string(char *buf, char *end, const u8 *addr,
670 struct printf_spec spec) 784 struct printf_spec spec, const char *fmt)
671{ 785{
672 char ip4_addr[4 * 4]; /* (4 * 3 decimal digits), 3 dots and trailing zero */ 786 char ip4_addr[sizeof("255.255.255.255")];
673 char temp[3]; /* hold each IP quad in reverse order */
674 char *p = ip4_addr;
675 int i, digits;
676 787
677 for (i = 0; i < 4; i++) { 788 ip4_string(ip4_addr, addr, fmt[0] == 'i');
678 digits = put_dec_trunc(temp, addr[i]) - temp;
679 /* reverse the digits in the quad */
680 while (digits--)
681 *p++ = temp[digits];
682 if (i != 3)
683 *p++ = '.';
684 }
685 *p = '\0';
686 spec.flags &= ~SPECIAL;
687 789
688 return string(buf, end, ip4_addr, spec); 790 return string(buf, end, ip4_addr, spec);
689} 791}
@@ -697,16 +799,21 @@ static char *ip4_addr_string(char *buf, char *end, u8 *addr,
697 * 799 *
698 * - 'F' For symbolic function descriptor pointers with offset 800 * - 'F' For symbolic function descriptor pointers with offset
699 * - 'f' For simple symbolic function names without offset 801 * - 'f' For simple symbolic function names without offset
700 * - 'S' For symbolic direct pointers 802 * - 'S' For symbolic direct pointers with offset
803 * - 's' For symbolic direct pointers without offset
701 * - 'R' For a struct resource pointer, it prints the range of 804 * - 'R' For a struct resource pointer, it prints the range of
702 * addresses (not the name nor the flags) 805 * addresses (not the name nor the flags)
703 * - 'M' For a 6-byte MAC address, it prints the address in the 806 * - 'M' For a 6-byte MAC address, it prints the address in the
704 * usual colon-separated hex notation 807 * usual colon-separated hex notation
705 * - 'I' [46] for IPv4/IPv6 addresses printed in the usual way (dot-separated 808 * - 'm' For a 6-byte MAC address, it prints the hex address without colons
706 * decimal for v4 and colon separated network-order 16 bit hex for v6) 809 * - 'I' [46] for IPv4/IPv6 addresses printed in the usual way
707 * - 'i' [46] for 'raw' IPv4/IPv6 addresses, IPv6 omits the colons, IPv4 is 810 * IPv4 uses dot-separated decimal without leading 0's (1.2.3.4)
708 * currently the same 811 * IPv6 uses colon separated network-order 16 bit hex with leading 0's
709 * 812 * - 'i' [46] for 'raw' IPv4/IPv6 addresses
813 * IPv6 omits the colons (01020304...0f)
814 * IPv4 uses dot-separated decimal with leading 0's (010.123.045.006)
815 * - 'I6c' for IPv6 addresses printed as specified by
816 * http://www.ietf.org/id/draft-kawamura-ipv6-text-representation-03.txt
710 * Note: The difference between 'S' and 'F' is that on ia64 and ppc64 817 * Note: The difference between 'S' and 'F' is that on ia64 and ppc64
711 * function pointers are really function descriptors, which contain a 818 * function pointers are really function descriptors, which contain a
712 * pointer to the real address. 819 * pointer to the real address.
@@ -721,25 +828,30 @@ static char *pointer(const char *fmt, char *buf, char *end, void *ptr,
721 case 'F': 828 case 'F':
722 case 'f': 829 case 'f':
723 ptr = dereference_function_descriptor(ptr); 830 ptr = dereference_function_descriptor(ptr);
831 case 's':
724 /* Fallthrough */ 832 /* Fallthrough */
725 case 'S': 833 case 'S':
726 return symbol_string(buf, end, ptr, spec, *fmt); 834 return symbol_string(buf, end, ptr, spec, *fmt);
727 case 'R': 835 case 'R':
728 return resource_string(buf, end, ptr, spec); 836 return resource_string(buf, end, ptr, spec);
729 case 'm': 837 case 'M': /* Colon separated: 00:01:02:03:04:05 */
730 spec.flags |= SPECIAL; 838 case 'm': /* Contiguous: 000102030405 */
731 /* Fallthrough */ 839 return mac_address_string(buf, end, ptr, spec, fmt);
732 case 'M': 840 case 'I': /* Formatted IP supported
733 return mac_address_string(buf, end, ptr, spec); 841 * 4: 1.2.3.4
734 case 'i': 842 * 6: 0001:0203:...:0708
735 spec.flags |= SPECIAL; 843 * 6c: 1::708 or 1::1.2.3.4
736 /* Fallthrough */ 844 */
737 case 'I': 845 case 'i': /* Contiguous:
738 if (fmt[1] == '6') 846 * 4: 001.002.003.004
739 return ip6_addr_string(buf, end, ptr, spec); 847 * 6: 000102...0f
740 if (fmt[1] == '4') 848 */
741 return ip4_addr_string(buf, end, ptr, spec); 849 switch (fmt[1]) {
742 spec.flags &= ~SPECIAL; 850 case '6':
851 return ip6_addr_string(buf, end, ptr, spec, fmt);
852 case '4':
853 return ip4_addr_string(buf, end, ptr, spec, fmt);
854 }
743 break; 855 break;
744 } 856 }
745 spec.flags |= SMALL; 857 spec.flags |= SMALL;
@@ -958,10 +1070,12 @@ qualifier:
958 * @args: Arguments for the format string 1070 * @args: Arguments for the format string
959 * 1071 *
960 * This function follows C99 vsnprintf, but has some extensions: 1072 * This function follows C99 vsnprintf, but has some extensions:
961 * %pS output the name of a text symbol 1073 * %pS output the name of a text symbol with offset
1074 * %ps output the name of a text symbol without offset
962 * %pF output the name of a function pointer with its offset 1075 * %pF output the name of a function pointer with its offset
963 * %pf output the name of a function pointer without its offset 1076 * %pf output the name of a function pointer without its offset
964 * %pR output the address range in a struct resource 1077 * %pR output the address range in a struct resource
1078 * %n is ignored
965 * 1079 *
966 * The return value is the number of characters which would 1080 * The return value is the number of characters which would
967 * be generated for the given input, excluding the trailing 1081 * be generated for the given input, excluding the trailing
@@ -983,13 +1097,8 @@ int vsnprintf(char *buf, size_t size, const char *fmt, va_list args)
983 1097
984 /* Reject out-of-range values early. Large positive sizes are 1098 /* Reject out-of-range values early. Large positive sizes are
985 used for unknown buffer sizes. */ 1099 used for unknown buffer sizes. */
986 if (unlikely((int) size < 0)) { 1100 if (WARN_ON_ONCE((int) size < 0))
987 /* There can be only one.. */
988 static char warn = 1;
989 WARN_ON(warn);
990 warn = 0;
991 return 0; 1101 return 0;
992 }
993 1102
994 str = buf; 1103 str = buf;
995 end = buf + size; 1104 end = buf + size;
@@ -1417,11 +1526,7 @@ EXPORT_SYMBOL_GPL(vbin_printf);
1417 * a binary buffer that generated by vbin_printf. 1526 * a binary buffer that generated by vbin_printf.
1418 * 1527 *
1419 * The format follows C99 vsnprintf, but has some extensions: 1528 * The format follows C99 vsnprintf, but has some extensions:
1420 * %pS output the name of a text symbol 1529 * see vsnprintf comment for details.
1421 * %pF output the name of a function pointer with its offset
1422 * %pf output the name of a function pointer without its offset
1423 * %pR output the address range in a struct resource
1424 * %n is ignored
1425 * 1530 *
1426 * The return value is the number of characters which would 1531 * The return value is the number of characters which would
1427 * be generated for the given input, excluding the trailing 1532 * be generated for the given input, excluding the trailing
@@ -1439,13 +1544,8 @@ int bstr_printf(char *buf, size_t size, const char *fmt, const u32 *bin_buf)
1439 1544
1440 struct printf_spec spec = {0}; 1545 struct printf_spec spec = {0};
1441 1546
1442 if (unlikely((int) size < 0)) { 1547 if (WARN_ON_ONCE((int) size < 0))
1443 /* There can be only one.. */
1444 static char warn = 1;
1445 WARN_ON(warn);
1446 warn = 0;
1447 return 0; 1548 return 0;
1448 }
1449 1549
1450 str = buf; 1550 str = buf;
1451 end = buf + size; 1551 end = buf + size;
@@ -1671,7 +1771,7 @@ int vsscanf(const char * buf, const char * fmt, va_list args)
1671 * advance both strings to next white space 1771 * advance both strings to next white space
1672 */ 1772 */
1673 if (*fmt == '*') { 1773 if (*fmt == '*') {
1674 while (!isspace(*fmt) && *fmt) 1774 while (!isspace(*fmt) && *fmt != '%' && *fmt)
1675 fmt++; 1775 fmt++;
1676 while (!isspace(*str) && *str) 1776 while (!isspace(*str) && *str)
1677 str++; 1777 str++;
diff --git a/lib/zlib_deflate/deflate.c b/lib/zlib_deflate/deflate.c
index c3e4a2baf835..46a31e5f49c3 100644
--- a/lib/zlib_deflate/deflate.c
+++ b/lib/zlib_deflate/deflate.c
@@ -135,7 +135,7 @@ static const config configuration_table[10] = {
135 135
136/* =========================================================================== 136/* ===========================================================================
137 * Update a hash value with the given input byte 137 * Update a hash value with the given input byte
138 * IN assertion: all calls to to UPDATE_HASH are made with consecutive 138 * IN assertion: all calls to UPDATE_HASH are made with consecutive
139 * input characters, so that a running hash key can be computed from the 139 * input characters, so that a running hash key can be computed from the
140 * previous key instead of complete recalculation each time. 140 * previous key instead of complete recalculation each time.
141 */ 141 */
@@ -146,7 +146,7 @@ static const config configuration_table[10] = {
146 * Insert string str in the dictionary and set match_head to the previous head 146 * Insert string str in the dictionary and set match_head to the previous head
147 * of the hash chain (the most recent string with same hash key). Return 147 * of the hash chain (the most recent string with same hash key). Return
148 * the previous length of the hash chain. 148 * the previous length of the hash chain.
149 * IN assertion: all calls to to INSERT_STRING are made with consecutive 149 * IN assertion: all calls to INSERT_STRING are made with consecutive
150 * input characters and the first MIN_MATCH bytes of str are valid 150 * input characters and the first MIN_MATCH bytes of str are valid
151 * (except for the last MIN_MATCH-1 bytes of the input file). 151 * (except for the last MIN_MATCH-1 bytes of the input file).
152 */ 152 */