aboutsummaryrefslogtreecommitdiffstats
path: root/lib
diff options
context:
space:
mode:
Diffstat (limited to 'lib')
-rw-r--r--lib/Kconfig.debug42
-rw-r--r--lib/Kconfig.kmemcheck3
-rw-r--r--lib/decompress_inflate.c8
-rw-r--r--lib/decompress_unlzma.c10
-rw-r--r--lib/flex_array.c121
-rw-r--r--lib/inflate.c2
-rw-r--r--lib/is_single_threaded.c61
-rw-r--r--lib/swiotlb.c124
-rw-r--r--lib/vsprintf.c229
-rw-r--r--lib/zlib_deflate/deflate.c4
10 files changed, 396 insertions, 208 deletions
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
index 12327b2bb785..891155817bc6 100644
--- a/lib/Kconfig.debug
+++ b/lib/Kconfig.debug
@@ -50,6 +50,14 @@ config MAGIC_SYSRQ
50 keys are documented in <file:Documentation/sysrq.txt>. Don't say Y 50 keys are documented in <file:Documentation/sysrq.txt>. Don't say Y
51 unless you really know what this hack does. 51 unless you really know what this hack does.
52 52
53config STRIP_ASM_SYMS
54 bool "Strip assembler-generated symbols during link"
55 default n
56 help
57 Strip internal assembler-generated symbols during a link (symbols
58 that look like '.Lxxx') so they don't pollute the output of
59 get_wchan() and suchlike.
60
53config UNUSED_SYMBOLS 61config UNUSED_SYMBOLS
54 bool "Enable unused/obsolete exported symbols" 62 bool "Enable unused/obsolete exported symbols"
55 default y if X86 63 default y if X86
@@ -338,7 +346,7 @@ config SLUB_STATS
338 346
339config DEBUG_KMEMLEAK 347config DEBUG_KMEMLEAK
340 bool "Kernel memory leak detector" 348 bool "Kernel memory leak detector"
341 depends on DEBUG_KERNEL && EXPERIMENTAL && (X86 || ARM) && \ 349 depends on DEBUG_KERNEL && EXPERIMENTAL && (X86 || ARM || PPC) && \
342 !MEMORY_HOTPLUG 350 !MEMORY_HOTPLUG
343 select DEBUG_FS if SYSFS 351 select DEBUG_FS if SYSFS
344 select STACKTRACE if STACKTRACE_SUPPORT 352 select STACKTRACE if STACKTRACE_SUPPORT
@@ -653,6 +661,21 @@ config DEBUG_NOTIFIERS
653 This is a relatively cheap check but if you care about maximum 661 This is a relatively cheap check but if you care about maximum
654 performance, say N. 662 performance, say N.
655 663
664config DEBUG_CREDENTIALS
665 bool "Debug credential management"
666 depends on DEBUG_KERNEL
667 help
668 Enable this to turn on some debug checking for credential
669 management. The additional code keeps track of the number of
670 pointers from task_structs to any given cred struct, and checks to
671 see that this number never exceeds the usage count of the cred
672 struct.
673
674 Furthermore, if SELinux is enabled, this also checks that the
675 security pointer in the cred struct is never seen to be invalid.
676
677 If unsure, say N.
678
656# 679#
657# Select this config option from the architecture Kconfig, if it 680# Select this config option from the architecture Kconfig, if it
658# it is preferred to always offer frame pointers as a config 681# it is preferred to always offer frame pointers as a config
@@ -725,7 +748,7 @@ config RCU_TORTURE_TEST_RUNNABLE
725 748
726config RCU_CPU_STALL_DETECTOR 749config RCU_CPU_STALL_DETECTOR
727 bool "Check for stalled CPUs delaying RCU grace periods" 750 bool "Check for stalled CPUs delaying RCU grace periods"
728 depends on CLASSIC_RCU || TREE_RCU 751 depends on TREE_RCU || TREE_PREEMPT_RCU
729 default n 752 default n
730 help 753 help
731 This option causes RCU to printk information on which 754 This option causes RCU to printk information on which
@@ -790,6 +813,21 @@ config DEBUG_BLOCK_EXT_DEVT
790 813
791 Say N if you are unsure. 814 Say N if you are unsure.
792 815
816config DEBUG_FORCE_WEAK_PER_CPU
817 bool "Force weak per-cpu definitions"
818 depends on DEBUG_KERNEL
819 help
820 s390 and alpha require percpu variables in modules to be
821 defined weak to work around addressing range issue which
822 puts the following two restrictions on percpu variable
823 definitions.
824
825 1. percpu symbols must be unique whether static or not
826 2. percpu variables can't be defined inside a function
827
828 To ensure that generic code follows the above rules, this
829 option forces all percpu variables to be defined as weak.
830
793config LKDTM 831config LKDTM
794 tristate "Linux Kernel Dump Test Tool Module" 832 tristate "Linux Kernel Dump Test Tool Module"
795 depends on DEBUG_KERNEL 833 depends on DEBUG_KERNEL
diff --git a/lib/Kconfig.kmemcheck b/lib/Kconfig.kmemcheck
index 603c81b66549..846e039a86b4 100644
--- a/lib/Kconfig.kmemcheck
+++ b/lib/Kconfig.kmemcheck
@@ -1,6 +1,8 @@
1config HAVE_ARCH_KMEMCHECK 1config HAVE_ARCH_KMEMCHECK
2 bool 2 bool
3 3
4if HAVE_ARCH_KMEMCHECK
5
4menuconfig KMEMCHECK 6menuconfig KMEMCHECK
5 bool "kmemcheck: trap use of uninitialized memory" 7 bool "kmemcheck: trap use of uninitialized memory"
6 depends on DEBUG_KERNEL 8 depends on DEBUG_KERNEL
@@ -89,3 +91,4 @@ config KMEMCHECK_BITOPS_OK
89 accesses where not all the bits are initialized at the same time. 91 accesses where not all the bits are initialized at the same time.
90 This may also hide some real bugs. 92 This may also hide some real bugs.
91 93
94endif
diff --git a/lib/decompress_inflate.c b/lib/decompress_inflate.c
index 68dfce59c1b8..fc686c7a0a0d 100644
--- a/lib/decompress_inflate.c
+++ b/lib/decompress_inflate.c
@@ -27,6 +27,11 @@
27 27
28#define GZIP_IOBUF_SIZE (16*1024) 28#define GZIP_IOBUF_SIZE (16*1024)
29 29
30static int nofill(void *buffer, unsigned int len)
31{
32 return -1;
33}
34
30/* Included from initramfs et al code */ 35/* Included from initramfs et al code */
31STATIC int INIT gunzip(unsigned char *buf, int len, 36STATIC int INIT gunzip(unsigned char *buf, int len,
32 int(*fill)(void*, unsigned int), 37 int(*fill)(void*, unsigned int),
@@ -76,6 +81,9 @@ STATIC int INIT gunzip(unsigned char *buf, int len,
76 goto gunzip_nomem4; 81 goto gunzip_nomem4;
77 } 82 }
78 83
84 if (!fill)
85 fill = nofill;
86
79 if (len == 0) 87 if (len == 0)
80 len = fill(zbuf, GZIP_IOBUF_SIZE); 88 len = fill(zbuf, GZIP_IOBUF_SIZE);
81 89
diff --git a/lib/decompress_unlzma.c b/lib/decompress_unlzma.c
index 0b954e04bd30..ca82fde81c8f 100644
--- a/lib/decompress_unlzma.c
+++ b/lib/decompress_unlzma.c
@@ -82,6 +82,11 @@ struct rc {
82#define RC_MODEL_TOTAL_BITS 11 82#define RC_MODEL_TOTAL_BITS 11
83 83
84 84
85static int nofill(void *buffer, unsigned int len)
86{
87 return -1;
88}
89
85/* Called twice: once at startup and once in rc_normalize() */ 90/* Called twice: once at startup and once in rc_normalize() */
86static void INIT rc_read(struct rc *rc) 91static void INIT rc_read(struct rc *rc)
87{ 92{
@@ -97,7 +102,10 @@ static inline void INIT rc_init(struct rc *rc,
97 int (*fill)(void*, unsigned int), 102 int (*fill)(void*, unsigned int),
98 char *buffer, int buffer_size) 103 char *buffer, int buffer_size)
99{ 104{
100 rc->fill = fill; 105 if (fill)
106 rc->fill = fill;
107 else
108 rc->fill = nofill;
101 rc->buffer = (uint8_t *)buffer; 109 rc->buffer = (uint8_t *)buffer;
102 rc->buffer_size = buffer_size; 110 rc->buffer_size = buffer_size;
103 rc->buffer_end = rc->buffer + rc->buffer_size; 111 rc->buffer_end = rc->buffer + rc->buffer_size;
diff --git a/lib/flex_array.c b/lib/flex_array.c
index 7baed2fc3bc8..66eef2e4483e 100644
--- a/lib/flex_array.c
+++ b/lib/flex_array.c
@@ -28,23 +28,6 @@ struct flex_array_part {
28 char elements[FLEX_ARRAY_PART_SIZE]; 28 char elements[FLEX_ARRAY_PART_SIZE];
29}; 29};
30 30
31static inline int __elements_per_part(int element_size)
32{
33 return FLEX_ARRAY_PART_SIZE / element_size;
34}
35
36static inline int bytes_left_in_base(void)
37{
38 int element_offset = offsetof(struct flex_array, parts);
39 int bytes_left = FLEX_ARRAY_BASE_SIZE - element_offset;
40 return bytes_left;
41}
42
43static inline int nr_base_part_ptrs(void)
44{
45 return bytes_left_in_base() / sizeof(struct flex_array_part *);
46}
47
48/* 31/*
49 * If a user requests an allocation which is small 32 * If a user requests an allocation which is small
50 * enough, we may simply use the space in the 33 * enough, we may simply use the space in the
@@ -54,7 +37,7 @@ static inline int nr_base_part_ptrs(void)
54static inline int elements_fit_in_base(struct flex_array *fa) 37static inline int elements_fit_in_base(struct flex_array *fa)
55{ 38{
56 int data_size = fa->element_size * fa->total_nr_elements; 39 int data_size = fa->element_size * fa->total_nr_elements;
57 if (data_size <= bytes_left_in_base()) 40 if (data_size <= FLEX_ARRAY_BASE_BYTES_LEFT)
58 return 1; 41 return 1;
59 return 0; 42 return 0;
60} 43}
@@ -63,6 +46,7 @@ static inline int elements_fit_in_base(struct flex_array *fa)
63 * flex_array_alloc - allocate a new flexible array 46 * flex_array_alloc - allocate a new flexible array
64 * @element_size: the size of individual elements in the array 47 * @element_size: the size of individual elements in the array
65 * @total: total number of elements that this should hold 48 * @total: total number of elements that this should hold
49 * @flags: page allocation flags to use for base array
66 * 50 *
67 * Note: all locking must be provided by the caller. 51 * Note: all locking must be provided by the caller.
68 * 52 *
@@ -103,7 +87,8 @@ struct flex_array *flex_array_alloc(int element_size, unsigned int total,
103 gfp_t flags) 87 gfp_t flags)
104{ 88{
105 struct flex_array *ret; 89 struct flex_array *ret;
106 int max_size = nr_base_part_ptrs() * __elements_per_part(element_size); 90 int max_size = FLEX_ARRAY_NR_BASE_PTRS *
91 FLEX_ARRAY_ELEMENTS_PER_PART(element_size);
107 92
108 /* max_size will end up 0 if element_size > PAGE_SIZE */ 93 /* max_size will end up 0 if element_size > PAGE_SIZE */
109 if (total > max_size) 94 if (total > max_size)
@@ -113,17 +98,21 @@ struct flex_array *flex_array_alloc(int element_size, unsigned int total,
113 return NULL; 98 return NULL;
114 ret->element_size = element_size; 99 ret->element_size = element_size;
115 ret->total_nr_elements = total; 100 ret->total_nr_elements = total;
101 if (elements_fit_in_base(ret) && !(flags & __GFP_ZERO))
102 memset(ret->parts[0], FLEX_ARRAY_FREE,
103 FLEX_ARRAY_BASE_BYTES_LEFT);
116 return ret; 104 return ret;
117} 105}
118 106
119static int fa_element_to_part_nr(struct flex_array *fa, 107static int fa_element_to_part_nr(struct flex_array *fa,
120 unsigned int element_nr) 108 unsigned int element_nr)
121{ 109{
122 return element_nr / __elements_per_part(fa->element_size); 110 return element_nr / FLEX_ARRAY_ELEMENTS_PER_PART(fa->element_size);
123} 111}
124 112
125/** 113/**
126 * flex_array_free_parts - just free the second-level pages 114 * flex_array_free_parts - just free the second-level pages
115 * @fa: the flex array from which to free parts
127 * 116 *
128 * This is to be used in cases where the base 'struct flex_array' 117 * This is to be used in cases where the base 'struct flex_array'
129 * has been statically allocated and should not be free. 118 * has been statically allocated and should not be free.
@@ -131,11 +120,10 @@ static int fa_element_to_part_nr(struct flex_array *fa,
131void flex_array_free_parts(struct flex_array *fa) 120void flex_array_free_parts(struct flex_array *fa)
132{ 121{
133 int part_nr; 122 int part_nr;
134 int max_part = nr_base_part_ptrs();
135 123
136 if (elements_fit_in_base(fa)) 124 if (elements_fit_in_base(fa))
137 return; 125 return;
138 for (part_nr = 0; part_nr < max_part; part_nr++) 126 for (part_nr = 0; part_nr < FLEX_ARRAY_NR_BASE_PTRS; part_nr++)
139 kfree(fa->parts[part_nr]); 127 kfree(fa->parts[part_nr]);
140} 128}
141 129
@@ -150,7 +138,8 @@ static unsigned int index_inside_part(struct flex_array *fa,
150{ 138{
151 unsigned int part_offset; 139 unsigned int part_offset;
152 140
153 part_offset = element_nr % __elements_per_part(fa->element_size); 141 part_offset = element_nr %
142 FLEX_ARRAY_ELEMENTS_PER_PART(fa->element_size);
154 return part_offset * fa->element_size; 143 return part_offset * fa->element_size;
155} 144}
156 145
@@ -159,15 +148,12 @@ __fa_get_part(struct flex_array *fa, int part_nr, gfp_t flags)
159{ 148{
160 struct flex_array_part *part = fa->parts[part_nr]; 149 struct flex_array_part *part = fa->parts[part_nr];
161 if (!part) { 150 if (!part) {
162 /* 151 part = kmalloc(sizeof(struct flex_array_part), flags);
163 * This leaves the part pages uninitialized
164 * and with potentially random data, just
165 * as if the user had kmalloc()'d the whole.
166 * __GFP_ZERO can be used to zero it.
167 */
168 part = kmalloc(FLEX_ARRAY_PART_SIZE, flags);
169 if (!part) 152 if (!part)
170 return NULL; 153 return NULL;
154 if (!(flags & __GFP_ZERO))
155 memset(part, FLEX_ARRAY_FREE,
156 sizeof(struct flex_array_part));
171 fa->parts[part_nr] = part; 157 fa->parts[part_nr] = part;
172 } 158 }
173 return part; 159 return part;
@@ -175,9 +161,12 @@ __fa_get_part(struct flex_array *fa, int part_nr, gfp_t flags)
175 161
176/** 162/**
177 * flex_array_put - copy data into the array at @element_nr 163 * flex_array_put - copy data into the array at @element_nr
178 * @src: address of data to copy into the array 164 * @fa: the flex array to copy data into
179 * @element_nr: index of the position in which to insert 165 * @element_nr: index of the position in which to insert
180 * the new element. 166 * the new element.
167 * @src: address of data to copy into the array
168 * @flags: page allocation flags to use for array expansion
169 *
181 * 170 *
182 * Note that this *copies* the contents of @src into 171 * Note that this *copies* the contents of @src into
183 * the array. If you are trying to store an array of 172 * the array. If you are trying to store an array of
@@ -207,9 +196,38 @@ int flex_array_put(struct flex_array *fa, unsigned int element_nr, void *src,
207} 196}
208 197
209/** 198/**
199 * flex_array_clear - clear element in array at @element_nr
200 * @fa: the flex array of the element.
201 * @element_nr: index of the position to clear.
202 *
203 * Locking must be provided by the caller.
204 */
205int flex_array_clear(struct flex_array *fa, unsigned int element_nr)
206{
207 int part_nr = fa_element_to_part_nr(fa, element_nr);
208 struct flex_array_part *part;
209 void *dst;
210
211 if (element_nr >= fa->total_nr_elements)
212 return -ENOSPC;
213 if (elements_fit_in_base(fa))
214 part = (struct flex_array_part *)&fa->parts[0];
215 else {
216 part = fa->parts[part_nr];
217 if (!part)
218 return -EINVAL;
219 }
220 dst = &part->elements[index_inside_part(fa, element_nr)];
221 memset(dst, FLEX_ARRAY_FREE, fa->element_size);
222 return 0;
223}
224
225/**
210 * flex_array_prealloc - guarantee that array space exists 226 * flex_array_prealloc - guarantee that array space exists
227 * @fa: the flex array for which to preallocate parts
211 * @start: index of first array element for which space is allocated 228 * @start: index of first array element for which space is allocated
212 * @end: index of last (inclusive) element for which space is allocated 229 * @end: index of last (inclusive) element for which space is allocated
230 * @flags: page allocation flags
213 * 231 *
214 * This will guarantee that no future calls to flex_array_put() 232 * This will guarantee that no future calls to flex_array_put()
215 * will allocate memory. It can be used if you are expecting to 233 * will allocate memory. It can be used if you are expecting to
@@ -242,6 +260,7 @@ int flex_array_prealloc(struct flex_array *fa, unsigned int start,
242 260
243/** 261/**
244 * flex_array_get - pull data back out of the array 262 * flex_array_get - pull data back out of the array
263 * @fa: the flex array from which to extract data
245 * @element_nr: index of the element to fetch from the array 264 * @element_nr: index of the element to fetch from the array
246 * 265 *
247 * Returns a pointer to the data at index @element_nr. Note 266 * Returns a pointer to the data at index @element_nr. Note
@@ -266,3 +285,43 @@ void *flex_array_get(struct flex_array *fa, unsigned int element_nr)
266 } 285 }
267 return &part->elements[index_inside_part(fa, element_nr)]; 286 return &part->elements[index_inside_part(fa, element_nr)];
268} 287}
288
289static int part_is_free(struct flex_array_part *part)
290{
291 int i;
292
293 for (i = 0; i < sizeof(struct flex_array_part); i++)
294 if (part->elements[i] != FLEX_ARRAY_FREE)
295 return 0;
296 return 1;
297}
298
299/**
300 * flex_array_shrink - free unused second-level pages
301 * @fa: the flex array to shrink
302 *
303 * Frees all second-level pages that consist solely of unused
304 * elements. Returns the number of pages freed.
305 *
306 * Locking must be provided by the caller.
307 */
308int flex_array_shrink(struct flex_array *fa)
309{
310 struct flex_array_part *part;
311 int part_nr;
312 int ret = 0;
313
314 if (elements_fit_in_base(fa))
315 return ret;
316 for (part_nr = 0; part_nr < FLEX_ARRAY_NR_BASE_PTRS; part_nr++) {
317 part = fa->parts[part_nr];
318 if (!part)
319 continue;
320 if (part_is_free(part)) {
321 fa->parts[part_nr] = NULL;
322 kfree(part);
323 ret++;
324 }
325 }
326 return ret;
327}
diff --git a/lib/inflate.c b/lib/inflate.c
index 1a8e8a978128..d10255973a9f 100644
--- a/lib/inflate.c
+++ b/lib/inflate.c
@@ -7,7 +7,7 @@
7 * Adapted for booting Linux by Hannu Savolainen 1993 7 * Adapted for booting Linux by Hannu Savolainen 1993
8 * based on gzip-1.0.3 8 * based on gzip-1.0.3
9 * 9 *
10 * Nicolas Pitre <nico@cam.org>, 1999/04/14 : 10 * Nicolas Pitre <nico@fluxnic.net>, 1999/04/14 :
11 * Little mods for all variable to reside either into rodata or bss segments 11 * Little mods for all variable to reside either into rodata or bss segments
12 * by marking constant variables with 'const' and initializing all the others 12 * by marking constant variables with 'const' and initializing all the others
13 * at run-time only. This allows for the kernel uncompressor to run 13 * at run-time only. This allows for the kernel uncompressor to run
diff --git a/lib/is_single_threaded.c b/lib/is_single_threaded.c
index f1ed2fe76c65..bd2bea963364 100644
--- a/lib/is_single_threaded.c
+++ b/lib/is_single_threaded.c
@@ -12,34 +12,47 @@
12 12
13#include <linux/sched.h> 13#include <linux/sched.h>
14 14
15/** 15/*
16 * is_single_threaded - Determine if a thread group is single-threaded or not 16 * Returns true if the task does not share ->mm with another thread/process.
17 * @p: A task in the thread group in question
18 *
19 * This returns true if the thread group to which a task belongs is single
20 * threaded, false if it is not.
21 */ 17 */
22bool is_single_threaded(struct task_struct *p) 18bool current_is_single_threaded(void)
23{ 19{
24 struct task_struct *g, *t; 20 struct task_struct *task = current;
25 struct mm_struct *mm = p->mm; 21 struct mm_struct *mm = task->mm;
22 struct task_struct *p, *t;
23 bool ret;
26 24
27 if (atomic_read(&p->signal->count) != 1) 25 if (atomic_read(&task->signal->live) != 1)
28 goto no; 26 return false;
29 27
30 if (atomic_read(&p->mm->mm_users) != 1) { 28 if (atomic_read(&mm->mm_users) == 1)
31 read_lock(&tasklist_lock); 29 return true;
32 do_each_thread(g, t) {
33 if (t->mm == mm && t != p)
34 goto no_unlock;
35 } while_each_thread(g, t);
36 read_unlock(&tasklist_lock);
37 }
38 30
39 return true; 31 ret = false;
32 rcu_read_lock();
33 for_each_process(p) {
34 if (unlikely(p->flags & PF_KTHREAD))
35 continue;
36 if (unlikely(p == task->group_leader))
37 continue;
38
39 t = p;
40 do {
41 if (unlikely(t->mm == mm))
42 goto found;
43 if (likely(t->mm))
44 break;
45 /*
46 * t->mm == NULL. Make sure next_thread/next_task
47 * will see other CLONE_VM tasks which might be
48 * forked before exiting.
49 */
50 smp_rmb();
51 } while_each_thread(p, t);
52 }
53 ret = true;
54found:
55 rcu_read_unlock();
40 56
41no_unlock: 57 return ret;
42 read_unlock(&tasklist_lock);
43no:
44 return false;
45} 58}
diff --git a/lib/swiotlb.c b/lib/swiotlb.c
index bffe6d7ef9d9..ac25cd28e807 100644
--- a/lib/swiotlb.c
+++ b/lib/swiotlb.c
@@ -114,46 +114,11 @@ setup_io_tlb_npages(char *str)
114__setup("swiotlb=", setup_io_tlb_npages); 114__setup("swiotlb=", setup_io_tlb_npages);
115/* make io_tlb_overflow tunable too? */ 115/* make io_tlb_overflow tunable too? */
116 116
117void * __weak __init swiotlb_alloc_boot(size_t size, unsigned long nslabs) 117/* Note that this doesn't work with highmem page */
118{
119 return alloc_bootmem_low_pages(size);
120}
121
122void * __weak swiotlb_alloc(unsigned order, unsigned long nslabs)
123{
124 return (void *)__get_free_pages(GFP_DMA | __GFP_NOWARN, order);
125}
126
127dma_addr_t __weak swiotlb_phys_to_bus(struct device *hwdev, phys_addr_t paddr)
128{
129 return paddr;
130}
131
132phys_addr_t __weak swiotlb_bus_to_phys(struct device *hwdev, dma_addr_t baddr)
133{
134 return baddr;
135}
136
137static dma_addr_t swiotlb_virt_to_bus(struct device *hwdev, 118static dma_addr_t swiotlb_virt_to_bus(struct device *hwdev,
138 volatile void *address) 119 volatile void *address)
139{ 120{
140 return swiotlb_phys_to_bus(hwdev, virt_to_phys(address)); 121 return phys_to_dma(hwdev, virt_to_phys(address));
141}
142
143void * __weak swiotlb_bus_to_virt(struct device *hwdev, dma_addr_t address)
144{
145 return phys_to_virt(swiotlb_bus_to_phys(hwdev, address));
146}
147
148int __weak swiotlb_arch_address_needs_mapping(struct device *hwdev,
149 dma_addr_t addr, size_t size)
150{
151 return !is_buffer_dma_capable(dma_get_mask(hwdev), addr, size);
152}
153
154int __weak swiotlb_arch_range_needs_mapping(phys_addr_t paddr, size_t size)
155{
156 return 0;
157} 122}
158 123
159static void swiotlb_print_info(unsigned long bytes) 124static void swiotlb_print_info(unsigned long bytes)
@@ -189,7 +154,7 @@ swiotlb_init_with_default_size(size_t default_size)
189 /* 154 /*
190 * Get IO TLB memory from the low pages 155 * Get IO TLB memory from the low pages
191 */ 156 */
192 io_tlb_start = swiotlb_alloc_boot(bytes, io_tlb_nslabs); 157 io_tlb_start = alloc_bootmem_low_pages(bytes);
193 if (!io_tlb_start) 158 if (!io_tlb_start)
194 panic("Cannot allocate SWIOTLB buffer"); 159 panic("Cannot allocate SWIOTLB buffer");
195 io_tlb_end = io_tlb_start + bytes; 160 io_tlb_end = io_tlb_start + bytes;
@@ -245,7 +210,8 @@ swiotlb_late_init_with_default_size(size_t default_size)
245 bytes = io_tlb_nslabs << IO_TLB_SHIFT; 210 bytes = io_tlb_nslabs << IO_TLB_SHIFT;
246 211
247 while ((SLABS_PER_PAGE << order) > IO_TLB_MIN_SLABS) { 212 while ((SLABS_PER_PAGE << order) > IO_TLB_MIN_SLABS) {
248 io_tlb_start = swiotlb_alloc(order, io_tlb_nslabs); 213 io_tlb_start = (void *)__get_free_pages(GFP_DMA | __GFP_NOWARN,
214 order);
249 if (io_tlb_start) 215 if (io_tlb_start)
250 break; 216 break;
251 order--; 217 order--;
@@ -315,20 +281,10 @@ cleanup1:
315 return -ENOMEM; 281 return -ENOMEM;
316} 282}
317 283
318static inline int 284static int is_swiotlb_buffer(phys_addr_t paddr)
319address_needs_mapping(struct device *hwdev, dma_addr_t addr, size_t size)
320{ 285{
321 return swiotlb_arch_address_needs_mapping(hwdev, addr, size); 286 return paddr >= virt_to_phys(io_tlb_start) &&
322} 287 paddr < virt_to_phys(io_tlb_end);
323
324static inline int range_needs_mapping(phys_addr_t paddr, size_t size)
325{
326 return swiotlb_force || swiotlb_arch_range_needs_mapping(paddr, size);
327}
328
329static int is_swiotlb_buffer(char *addr)
330{
331 return addr >= io_tlb_start && addr < io_tlb_end;
332} 288}
333 289
334/* 290/*
@@ -561,9 +517,7 @@ swiotlb_alloc_coherent(struct device *hwdev, size_t size,
561 dma_mask = hwdev->coherent_dma_mask; 517 dma_mask = hwdev->coherent_dma_mask;
562 518
563 ret = (void *)__get_free_pages(flags, order); 519 ret = (void *)__get_free_pages(flags, order);
564 if (ret && 520 if (ret && swiotlb_virt_to_bus(hwdev, ret) + size > dma_mask) {
565 !is_buffer_dma_capable(dma_mask, swiotlb_virt_to_bus(hwdev, ret),
566 size)) {
567 /* 521 /*
568 * The allocated memory isn't reachable by the device. 522 * The allocated memory isn't reachable by the device.
569 */ 523 */
@@ -585,7 +539,7 @@ swiotlb_alloc_coherent(struct device *hwdev, size_t size,
585 dev_addr = swiotlb_virt_to_bus(hwdev, ret); 539 dev_addr = swiotlb_virt_to_bus(hwdev, ret);
586 540
587 /* Confirm address can be DMA'd by device */ 541 /* Confirm address can be DMA'd by device */
588 if (!is_buffer_dma_capable(dma_mask, dev_addr, size)) { 542 if (dev_addr + size > dma_mask) {
589 printk("hwdev DMA mask = 0x%016Lx, dev_addr = 0x%016Lx\n", 543 printk("hwdev DMA mask = 0x%016Lx, dev_addr = 0x%016Lx\n",
590 (unsigned long long)dma_mask, 544 (unsigned long long)dma_mask,
591 (unsigned long long)dev_addr); 545 (unsigned long long)dev_addr);
@@ -601,11 +555,13 @@ EXPORT_SYMBOL(swiotlb_alloc_coherent);
601 555
602void 556void
603swiotlb_free_coherent(struct device *hwdev, size_t size, void *vaddr, 557swiotlb_free_coherent(struct device *hwdev, size_t size, void *vaddr,
604 dma_addr_t dma_handle) 558 dma_addr_t dev_addr)
605{ 559{
560 phys_addr_t paddr = dma_to_phys(hwdev, dev_addr);
561
606 WARN_ON(irqs_disabled()); 562 WARN_ON(irqs_disabled());
607 if (!is_swiotlb_buffer(vaddr)) 563 if (!is_swiotlb_buffer(paddr))
608 free_pages((unsigned long) vaddr, get_order(size)); 564 free_pages((unsigned long)vaddr, get_order(size));
609 else 565 else
610 /* DMA_TO_DEVICE to avoid memcpy in unmap_single */ 566 /* DMA_TO_DEVICE to avoid memcpy in unmap_single */
611 do_unmap_single(hwdev, vaddr, size, DMA_TO_DEVICE); 567 do_unmap_single(hwdev, vaddr, size, DMA_TO_DEVICE);
@@ -625,12 +581,15 @@ swiotlb_full(struct device *dev, size_t size, int dir, int do_panic)
625 printk(KERN_ERR "DMA: Out of SW-IOMMU space for %zu bytes at " 581 printk(KERN_ERR "DMA: Out of SW-IOMMU space for %zu bytes at "
626 "device %s\n", size, dev ? dev_name(dev) : "?"); 582 "device %s\n", size, dev ? dev_name(dev) : "?");
627 583
628 if (size > io_tlb_overflow && do_panic) { 584 if (size <= io_tlb_overflow || !do_panic)
629 if (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL) 585 return;
630 panic("DMA: Memory would be corrupted\n"); 586
631 if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL) 587 if (dir == DMA_BIDIRECTIONAL)
632 panic("DMA: Random memory would be DMAed\n"); 588 panic("DMA: Random memory could be DMA accessed\n");
633 } 589 if (dir == DMA_FROM_DEVICE)
590 panic("DMA: Random memory could be DMA written\n");
591 if (dir == DMA_TO_DEVICE)
592 panic("DMA: Random memory could be DMA read\n");
634} 593}
635 594
636/* 595/*
@@ -646,7 +605,7 @@ dma_addr_t swiotlb_map_page(struct device *dev, struct page *page,
646 struct dma_attrs *attrs) 605 struct dma_attrs *attrs)
647{ 606{
648 phys_addr_t phys = page_to_phys(page) + offset; 607 phys_addr_t phys = page_to_phys(page) + offset;
649 dma_addr_t dev_addr = swiotlb_phys_to_bus(dev, phys); 608 dma_addr_t dev_addr = phys_to_dma(dev, phys);
650 void *map; 609 void *map;
651 610
652 BUG_ON(dir == DMA_NONE); 611 BUG_ON(dir == DMA_NONE);
@@ -655,8 +614,7 @@ dma_addr_t swiotlb_map_page(struct device *dev, struct page *page,
655 * we can safely return the device addr and not worry about bounce 614 * we can safely return the device addr and not worry about bounce
656 * buffering it. 615 * buffering it.
657 */ 616 */
658 if (!address_needs_mapping(dev, dev_addr, size) && 617 if (dma_capable(dev, dev_addr, size) && !swiotlb_force)
659 !range_needs_mapping(phys, size))
660 return dev_addr; 618 return dev_addr;
661 619
662 /* 620 /*
@@ -673,7 +631,7 @@ dma_addr_t swiotlb_map_page(struct device *dev, struct page *page,
673 /* 631 /*
674 * Ensure that the address returned is DMA'ble 632 * Ensure that the address returned is DMA'ble
675 */ 633 */
676 if (address_needs_mapping(dev, dev_addr, size)) 634 if (!dma_capable(dev, dev_addr, size))
677 panic("map_single: bounce buffer is not DMA'ble"); 635 panic("map_single: bounce buffer is not DMA'ble");
678 636
679 return dev_addr; 637 return dev_addr;
@@ -691,19 +649,25 @@ EXPORT_SYMBOL_GPL(swiotlb_map_page);
691static void unmap_single(struct device *hwdev, dma_addr_t dev_addr, 649static void unmap_single(struct device *hwdev, dma_addr_t dev_addr,
692 size_t size, int dir) 650 size_t size, int dir)
693{ 651{
694 char *dma_addr = swiotlb_bus_to_virt(hwdev, dev_addr); 652 phys_addr_t paddr = dma_to_phys(hwdev, dev_addr);
695 653
696 BUG_ON(dir == DMA_NONE); 654 BUG_ON(dir == DMA_NONE);
697 655
698 if (is_swiotlb_buffer(dma_addr)) { 656 if (is_swiotlb_buffer(paddr)) {
699 do_unmap_single(hwdev, dma_addr, size, dir); 657 do_unmap_single(hwdev, phys_to_virt(paddr), size, dir);
700 return; 658 return;
701 } 659 }
702 660
703 if (dir != DMA_FROM_DEVICE) 661 if (dir != DMA_FROM_DEVICE)
704 return; 662 return;
705 663
706 dma_mark_clean(dma_addr, size); 664 /*
665 * phys_to_virt doesn't work with hihgmem page but we could
666 * call dma_mark_clean() with hihgmem page here. However, we
667 * are fine since dma_mark_clean() is null on POWERPC. We can
668 * make dma_mark_clean() take a physical address if necessary.
669 */
670 dma_mark_clean(phys_to_virt(paddr), size);
707} 671}
708 672
709void swiotlb_unmap_page(struct device *hwdev, dma_addr_t dev_addr, 673void swiotlb_unmap_page(struct device *hwdev, dma_addr_t dev_addr,
@@ -728,19 +692,19 @@ static void
728swiotlb_sync_single(struct device *hwdev, dma_addr_t dev_addr, 692swiotlb_sync_single(struct device *hwdev, dma_addr_t dev_addr,
729 size_t size, int dir, int target) 693 size_t size, int dir, int target)
730{ 694{
731 char *dma_addr = swiotlb_bus_to_virt(hwdev, dev_addr); 695 phys_addr_t paddr = dma_to_phys(hwdev, dev_addr);
732 696
733 BUG_ON(dir == DMA_NONE); 697 BUG_ON(dir == DMA_NONE);
734 698
735 if (is_swiotlb_buffer(dma_addr)) { 699 if (is_swiotlb_buffer(paddr)) {
736 sync_single(hwdev, dma_addr, size, dir, target); 700 sync_single(hwdev, phys_to_virt(paddr), size, dir, target);
737 return; 701 return;
738 } 702 }
739 703
740 if (dir != DMA_FROM_DEVICE) 704 if (dir != DMA_FROM_DEVICE)
741 return; 705 return;
742 706
743 dma_mark_clean(dma_addr, size); 707 dma_mark_clean(phys_to_virt(paddr), size);
744} 708}
745 709
746void 710void
@@ -817,10 +781,10 @@ swiotlb_map_sg_attrs(struct device *hwdev, struct scatterlist *sgl, int nelems,
817 781
818 for_each_sg(sgl, sg, nelems, i) { 782 for_each_sg(sgl, sg, nelems, i) {
819 phys_addr_t paddr = sg_phys(sg); 783 phys_addr_t paddr = sg_phys(sg);
820 dma_addr_t dev_addr = swiotlb_phys_to_bus(hwdev, paddr); 784 dma_addr_t dev_addr = phys_to_dma(hwdev, paddr);
821 785
822 if (range_needs_mapping(paddr, sg->length) || 786 if (swiotlb_force ||
823 address_needs_mapping(hwdev, dev_addr, sg->length)) { 787 !dma_capable(hwdev, dev_addr, sg->length)) {
824 void *map = map_single(hwdev, sg_phys(sg), 788 void *map = map_single(hwdev, sg_phys(sg),
825 sg->length, dir); 789 sg->length, dir);
826 if (!map) { 790 if (!map) {
diff --git a/lib/vsprintf.c b/lib/vsprintf.c
index 756ccafa9cec..73a14b8c6d1f 100644
--- a/lib/vsprintf.c
+++ b/lib/vsprintf.c
@@ -25,6 +25,7 @@
25#include <linux/kallsyms.h> 25#include <linux/kallsyms.h>
26#include <linux/uaccess.h> 26#include <linux/uaccess.h>
27#include <linux/ioport.h> 27#include <linux/ioport.h>
28#include <net/addrconf.h>
28 29
29#include <asm/page.h> /* for PAGE_SIZE */ 30#include <asm/page.h> /* for PAGE_SIZE */
30#include <asm/div64.h> 31#include <asm/div64.h>
@@ -580,7 +581,7 @@ static char *symbol_string(char *buf, char *end, void *ptr,
580 unsigned long value = (unsigned long) ptr; 581 unsigned long value = (unsigned long) ptr;
581#ifdef CONFIG_KALLSYMS 582#ifdef CONFIG_KALLSYMS
582 char sym[KSYM_SYMBOL_LEN]; 583 char sym[KSYM_SYMBOL_LEN];
583 if (ext != 'f') 584 if (ext != 'f' && ext != 's')
584 sprint_symbol(sym, value); 585 sprint_symbol(sym, value);
585 else 586 else
586 kallsyms_lookup(value, NULL, NULL, NULL, sym); 587 kallsyms_lookup(value, NULL, NULL, NULL, sym);
@@ -630,60 +631,156 @@ static char *resource_string(char *buf, char *end, struct resource *res,
630} 631}
631 632
632static char *mac_address_string(char *buf, char *end, u8 *addr, 633static char *mac_address_string(char *buf, char *end, u8 *addr,
633 struct printf_spec spec) 634 struct printf_spec spec, const char *fmt)
634{ 635{
635 char mac_addr[6 * 3]; /* (6 * 2 hex digits), 5 colons and trailing zero */ 636 char mac_addr[sizeof("xx:xx:xx:xx:xx:xx")];
636 char *p = mac_addr; 637 char *p = mac_addr;
637 int i; 638 int i;
638 639
639 for (i = 0; i < 6; i++) { 640 for (i = 0; i < 6; i++) {
640 p = pack_hex_byte(p, addr[i]); 641 p = pack_hex_byte(p, addr[i]);
641 if (!(spec.flags & SPECIAL) && i != 5) 642 if (fmt[0] == 'M' && i != 5)
642 *p++ = ':'; 643 *p++ = ':';
643 } 644 }
644 *p = '\0'; 645 *p = '\0';
645 spec.flags &= ~SPECIAL;
646 646
647 return string(buf, end, mac_addr, spec); 647 return string(buf, end, mac_addr, spec);
648} 648}
649 649
650static char *ip6_addr_string(char *buf, char *end, u8 *addr, 650static char *ip4_string(char *p, const u8 *addr, bool leading_zeros)
651 struct printf_spec spec)
652{ 651{
653 char ip6_addr[8 * 5]; /* (8 * 4 hex digits), 7 colons and trailing zero */
654 char *p = ip6_addr;
655 int i; 652 int i;
656 653
654 for (i = 0; i < 4; i++) {
655 char temp[3]; /* hold each IP quad in reverse order */
656 int digits = put_dec_trunc(temp, addr[i]) - temp;
657 if (leading_zeros) {
658 if (digits < 3)
659 *p++ = '0';
660 if (digits < 2)
661 *p++ = '0';
662 }
663 /* reverse the digits in the quad */
664 while (digits--)
665 *p++ = temp[digits];
666 if (i < 3)
667 *p++ = '.';
668 }
669
670 *p = '\0';
671 return p;
672}
673
674static char *ip6_compressed_string(char *p, const struct in6_addr *addr)
675{
676 int i;
677 int j;
678 int range;
679 unsigned char zerolength[8];
680 int longest = 1;
681 int colonpos = -1;
682 u16 word;
683 u8 hi;
684 u8 lo;
685 bool needcolon = false;
686 bool useIPv4 = ipv6_addr_v4mapped(addr) || ipv6_addr_is_isatap(addr);
687
688 memset(zerolength, 0, sizeof(zerolength));
689
690 if (useIPv4)
691 range = 6;
692 else
693 range = 8;
694
695 /* find position of longest 0 run */
696 for (i = 0; i < range; i++) {
697 for (j = i; j < range; j++) {
698 if (addr->s6_addr16[j] != 0)
699 break;
700 zerolength[i]++;
701 }
702 }
703 for (i = 0; i < range; i++) {
704 if (zerolength[i] > longest) {
705 longest = zerolength[i];
706 colonpos = i;
707 }
708 }
709
710 /* emit address */
711 for (i = 0; i < range; i++) {
712 if (i == colonpos) {
713 if (needcolon || i == 0)
714 *p++ = ':';
715 *p++ = ':';
716 needcolon = false;
717 i += longest - 1;
718 continue;
719 }
720 if (needcolon) {
721 *p++ = ':';
722 needcolon = false;
723 }
724 /* hex u16 without leading 0s */
725 word = ntohs(addr->s6_addr16[i]);
726 hi = word >> 8;
727 lo = word & 0xff;
728 if (hi) {
729 if (hi > 0x0f)
730 p = pack_hex_byte(p, hi);
731 else
732 *p++ = hex_asc_lo(hi);
733 }
734 if (hi || lo > 0x0f)
735 p = pack_hex_byte(p, lo);
736 else
737 *p++ = hex_asc_lo(lo);
738 needcolon = true;
739 }
740
741 if (useIPv4) {
742 if (needcolon)
743 *p++ = ':';
744 p = ip4_string(p, &addr->s6_addr[12], false);
745 }
746
747 *p = '\0';
748 return p;
749}
750
751static char *ip6_string(char *p, const struct in6_addr *addr, const char *fmt)
752{
753 int i;
657 for (i = 0; i < 8; i++) { 754 for (i = 0; i < 8; i++) {
658 p = pack_hex_byte(p, addr[2 * i]); 755 p = pack_hex_byte(p, addr->s6_addr[2 * i]);
659 p = pack_hex_byte(p, addr[2 * i + 1]); 756 p = pack_hex_byte(p, addr->s6_addr[2 * i + 1]);
660 if (!(spec.flags & SPECIAL) && i != 7) 757 if (fmt[0] == 'I' && i != 7)
661 *p++ = ':'; 758 *p++ = ':';
662 } 759 }
760
663 *p = '\0'; 761 *p = '\0';
664 spec.flags &= ~SPECIAL; 762 return p;
763}
764
765static char *ip6_addr_string(char *buf, char *end, const u8 *addr,
766 struct printf_spec spec, const char *fmt)
767{
768 char ip6_addr[sizeof("xxxx:xxxx:xxxx:xxxx:xxxx:xxxx:255.255.255.255")];
769
770 if (fmt[0] == 'I' && fmt[2] == 'c')
771 ip6_compressed_string(ip6_addr, (const struct in6_addr *)addr);
772 else
773 ip6_string(ip6_addr, (const struct in6_addr *)addr, fmt);
665 774
666 return string(buf, end, ip6_addr, spec); 775 return string(buf, end, ip6_addr, spec);
667} 776}
668 777
669static char *ip4_addr_string(char *buf, char *end, u8 *addr, 778static char *ip4_addr_string(char *buf, char *end, const u8 *addr,
670 struct printf_spec spec) 779 struct printf_spec spec, const char *fmt)
671{ 780{
672 char ip4_addr[4 * 4]; /* (4 * 3 decimal digits), 3 dots and trailing zero */ 781 char ip4_addr[sizeof("255.255.255.255")];
673 char temp[3]; /* hold each IP quad in reverse order */
674 char *p = ip4_addr;
675 int i, digits;
676 782
677 for (i = 0; i < 4; i++) { 783 ip4_string(ip4_addr, addr, fmt[0] == 'i');
678 digits = put_dec_trunc(temp, addr[i]) - temp;
679 /* reverse the digits in the quad */
680 while (digits--)
681 *p++ = temp[digits];
682 if (i != 3)
683 *p++ = '.';
684 }
685 *p = '\0';
686 spec.flags &= ~SPECIAL;
687 784
688 return string(buf, end, ip4_addr, spec); 785 return string(buf, end, ip4_addr, spec);
689} 786}
@@ -697,16 +794,21 @@ static char *ip4_addr_string(char *buf, char *end, u8 *addr,
697 * 794 *
698 * - 'F' For symbolic function descriptor pointers with offset 795 * - 'F' For symbolic function descriptor pointers with offset
699 * - 'f' For simple symbolic function names without offset 796 * - 'f' For simple symbolic function names without offset
700 * - 'S' For symbolic direct pointers 797 * - 'S' For symbolic direct pointers with offset
798 * - 's' For symbolic direct pointers without offset
701 * - 'R' For a struct resource pointer, it prints the range of 799 * - 'R' For a struct resource pointer, it prints the range of
702 * addresses (not the name nor the flags) 800 * addresses (not the name nor the flags)
703 * - 'M' For a 6-byte MAC address, it prints the address in the 801 * - 'M' For a 6-byte MAC address, it prints the address in the
704 * usual colon-separated hex notation 802 * usual colon-separated hex notation
705 * - 'I' [46] for IPv4/IPv6 addresses printed in the usual way (dot-separated 803 * - 'm' For a 6-byte MAC address, it prints the hex address without colons
706 * decimal for v4 and colon separated network-order 16 bit hex for v6) 804 * - 'I' [46] for IPv4/IPv6 addresses printed in the usual way
707 * - 'i' [46] for 'raw' IPv4/IPv6 addresses, IPv6 omits the colons, IPv4 is 805 * IPv4 uses dot-separated decimal without leading 0's (1.2.3.4)
708 * currently the same 806 * IPv6 uses colon separated network-order 16 bit hex with leading 0's
709 * 807 * - 'i' [46] for 'raw' IPv4/IPv6 addresses
808 * IPv6 omits the colons (01020304...0f)
809 * IPv4 uses dot-separated decimal with leading 0's (010.123.045.006)
810 * - 'I6c' for IPv6 addresses printed as specified by
811 * http://www.ietf.org/id/draft-kawamura-ipv6-text-representation-03.txt
710 * Note: The difference between 'S' and 'F' is that on ia64 and ppc64 812 * Note: The difference between 'S' and 'F' is that on ia64 and ppc64
711 * function pointers are really function descriptors, which contain a 813 * function pointers are really function descriptors, which contain a
712 * pointer to the real address. 814 * pointer to the real address.
@@ -721,25 +823,30 @@ static char *pointer(const char *fmt, char *buf, char *end, void *ptr,
721 case 'F': 823 case 'F':
722 case 'f': 824 case 'f':
723 ptr = dereference_function_descriptor(ptr); 825 ptr = dereference_function_descriptor(ptr);
826 case 's':
724 /* Fallthrough */ 827 /* Fallthrough */
725 case 'S': 828 case 'S':
726 return symbol_string(buf, end, ptr, spec, *fmt); 829 return symbol_string(buf, end, ptr, spec, *fmt);
727 case 'R': 830 case 'R':
728 return resource_string(buf, end, ptr, spec); 831 return resource_string(buf, end, ptr, spec);
729 case 'm': 832 case 'M': /* Colon separated: 00:01:02:03:04:05 */
730 spec.flags |= SPECIAL; 833 case 'm': /* Contiguous: 000102030405 */
731 /* Fallthrough */ 834 return mac_address_string(buf, end, ptr, spec, fmt);
732 case 'M': 835 case 'I': /* Formatted IP supported
733 return mac_address_string(buf, end, ptr, spec); 836 * 4: 1.2.3.4
734 case 'i': 837 * 6: 0001:0203:...:0708
735 spec.flags |= SPECIAL; 838 * 6c: 1::708 or 1::1.2.3.4
736 /* Fallthrough */ 839 */
737 case 'I': 840 case 'i': /* Contiguous:
738 if (fmt[1] == '6') 841 * 4: 001.002.003.004
739 return ip6_addr_string(buf, end, ptr, spec); 842 * 6: 000102...0f
740 if (fmt[1] == '4') 843 */
741 return ip4_addr_string(buf, end, ptr, spec); 844 switch (fmt[1]) {
742 spec.flags &= ~SPECIAL; 845 case '6':
846 return ip6_addr_string(buf, end, ptr, spec, fmt);
847 case '4':
848 return ip4_addr_string(buf, end, ptr, spec, fmt);
849 }
743 break; 850 break;
744 } 851 }
745 spec.flags |= SMALL; 852 spec.flags |= SMALL;
@@ -958,10 +1065,12 @@ qualifier:
958 * @args: Arguments for the format string 1065 * @args: Arguments for the format string
959 * 1066 *
960 * This function follows C99 vsnprintf, but has some extensions: 1067 * This function follows C99 vsnprintf, but has some extensions:
961 * %pS output the name of a text symbol 1068 * %pS output the name of a text symbol with offset
1069 * %ps output the name of a text symbol without offset
962 * %pF output the name of a function pointer with its offset 1070 * %pF output the name of a function pointer with its offset
963 * %pf output the name of a function pointer without its offset 1071 * %pf output the name of a function pointer without its offset
964 * %pR output the address range in a struct resource 1072 * %pR output the address range in a struct resource
1073 * %n is ignored
965 * 1074 *
966 * The return value is the number of characters which would 1075 * The return value is the number of characters which would
967 * be generated for the given input, excluding the trailing 1076 * be generated for the given input, excluding the trailing
@@ -983,13 +1092,8 @@ int vsnprintf(char *buf, size_t size, const char *fmt, va_list args)
983 1092
984 /* Reject out-of-range values early. Large positive sizes are 1093 /* Reject out-of-range values early. Large positive sizes are
985 used for unknown buffer sizes. */ 1094 used for unknown buffer sizes. */
986 if (unlikely((int) size < 0)) { 1095 if (WARN_ON_ONCE((int) size < 0))
987 /* There can be only one.. */
988 static char warn = 1;
989 WARN_ON(warn);
990 warn = 0;
991 return 0; 1096 return 0;
992 }
993 1097
994 str = buf; 1098 str = buf;
995 end = buf + size; 1099 end = buf + size;
@@ -1417,11 +1521,7 @@ EXPORT_SYMBOL_GPL(vbin_printf);
1417 * a binary buffer that generated by vbin_printf. 1521 * a binary buffer that generated by vbin_printf.
1418 * 1522 *
1419 * The format follows C99 vsnprintf, but has some extensions: 1523 * The format follows C99 vsnprintf, but has some extensions:
1420 * %pS output the name of a text symbol 1524 * see vsnprintf comment for details.
1421 * %pF output the name of a function pointer with its offset
1422 * %pf output the name of a function pointer without its offset
1423 * %pR output the address range in a struct resource
1424 * %n is ignored
1425 * 1525 *
1426 * The return value is the number of characters which would 1526 * The return value is the number of characters which would
1427 * be generated for the given input, excluding the trailing 1527 * be generated for the given input, excluding the trailing
@@ -1439,13 +1539,8 @@ int bstr_printf(char *buf, size_t size, const char *fmt, const u32 *bin_buf)
1439 1539
1440 struct printf_spec spec = {0}; 1540 struct printf_spec spec = {0};
1441 1541
1442 if (unlikely((int) size < 0)) { 1542 if (WARN_ON_ONCE((int) size < 0))
1443 /* There can be only one.. */
1444 static char warn = 1;
1445 WARN_ON(warn);
1446 warn = 0;
1447 return 0; 1543 return 0;
1448 }
1449 1544
1450 str = buf; 1545 str = buf;
1451 end = buf + size; 1546 end = buf + size;
diff --git a/lib/zlib_deflate/deflate.c b/lib/zlib_deflate/deflate.c
index c3e4a2baf835..46a31e5f49c3 100644
--- a/lib/zlib_deflate/deflate.c
+++ b/lib/zlib_deflate/deflate.c
@@ -135,7 +135,7 @@ static const config configuration_table[10] = {
135 135
136/* =========================================================================== 136/* ===========================================================================
137 * Update a hash value with the given input byte 137 * Update a hash value with the given input byte
138 * IN assertion: all calls to to UPDATE_HASH are made with consecutive 138 * IN assertion: all calls to UPDATE_HASH are made with consecutive
139 * input characters, so that a running hash key can be computed from the 139 * input characters, so that a running hash key can be computed from the
140 * previous key instead of complete recalculation each time. 140 * previous key instead of complete recalculation each time.
141 */ 141 */
@@ -146,7 +146,7 @@ static const config configuration_table[10] = {
146 * Insert string str in the dictionary and set match_head to the previous head 146 * Insert string str in the dictionary and set match_head to the previous head
147 * of the hash chain (the most recent string with same hash key). Return 147 * of the hash chain (the most recent string with same hash key). Return
148 * the previous length of the hash chain. 148 * the previous length of the hash chain.
149 * IN assertion: all calls to to INSERT_STRING are made with consecutive 149 * IN assertion: all calls to INSERT_STRING are made with consecutive
150 * input characters and the first MIN_MATCH bytes of str are valid 150 * input characters and the first MIN_MATCH bytes of str are valid
151 * (except for the last MIN_MATCH-1 bytes of the input file). 151 * (except for the last MIN_MATCH-1 bytes of the input file).
152 */ 152 */