aboutsummaryrefslogtreecommitdiffstats
path: root/lib
diff options
context:
space:
mode:
Diffstat (limited to 'lib')
-rw-r--r--lib/Kconfig.debug17
-rw-r--r--lib/bitmap.c12
-rw-r--r--lib/dma-debug.c28
-rw-r--r--lib/flex_array.c41
-rw-r--r--lib/inflate.c2
-rw-r--r--lib/is_single_threaded.c61
-rw-r--r--lib/lmb.c2
-rw-r--r--lib/swiotlb.c124
-rw-r--r--lib/vsprintf.c199
9 files changed, 296 insertions, 190 deletions
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
index 43173c4e0ade..55d2acc607a1 100644
--- a/lib/Kconfig.debug
+++ b/lib/Kconfig.debug
@@ -653,6 +653,21 @@ config DEBUG_NOTIFIERS
653 This is a relatively cheap check but if you care about maximum 653 This is a relatively cheap check but if you care about maximum
654 performance, say N. 654 performance, say N.
655 655
656config DEBUG_CREDENTIALS
657 bool "Debug credential management"
658 depends on DEBUG_KERNEL
659 help
660 Enable this to turn on some debug checking for credential
661 management. The additional code keeps track of the number of
662 pointers from task_structs to any given cred struct, and checks to
663 see that this number never exceeds the usage count of the cred
664 struct.
665
666 Furthermore, if SELinux is enabled, this also checks that the
667 security pointer in the cred struct is never seen to be invalid.
668
669 If unsure, say N.
670
656# 671#
657# Select this config option from the architecture Kconfig, if it 672# Select this config option from the architecture Kconfig, if it
658# it is preferred to always offer frame pointers as a config 673# it is preferred to always offer frame pointers as a config
@@ -725,7 +740,7 @@ config RCU_TORTURE_TEST_RUNNABLE
725 740
726config RCU_CPU_STALL_DETECTOR 741config RCU_CPU_STALL_DETECTOR
727 bool "Check for stalled CPUs delaying RCU grace periods" 742 bool "Check for stalled CPUs delaying RCU grace periods"
728 depends on CLASSIC_RCU || TREE_RCU 743 depends on TREE_RCU || TREE_PREEMPT_RCU
729 default n 744 default n
730 help 745 help
731 This option causes RCU to printk information on which 746 This option causes RCU to printk information on which
diff --git a/lib/bitmap.c b/lib/bitmap.c
index 35a1f7ff4149..702565821c99 100644
--- a/lib/bitmap.c
+++ b/lib/bitmap.c
@@ -179,14 +179,16 @@ void __bitmap_shift_left(unsigned long *dst,
179} 179}
180EXPORT_SYMBOL(__bitmap_shift_left); 180EXPORT_SYMBOL(__bitmap_shift_left);
181 181
182void __bitmap_and(unsigned long *dst, const unsigned long *bitmap1, 182int __bitmap_and(unsigned long *dst, const unsigned long *bitmap1,
183 const unsigned long *bitmap2, int bits) 183 const unsigned long *bitmap2, int bits)
184{ 184{
185 int k; 185 int k;
186 int nr = BITS_TO_LONGS(bits); 186 int nr = BITS_TO_LONGS(bits);
187 unsigned long result = 0;
187 188
188 for (k = 0; k < nr; k++) 189 for (k = 0; k < nr; k++)
189 dst[k] = bitmap1[k] & bitmap2[k]; 190 result |= (dst[k] = bitmap1[k] & bitmap2[k]);
191 return result != 0;
190} 192}
191EXPORT_SYMBOL(__bitmap_and); 193EXPORT_SYMBOL(__bitmap_and);
192 194
@@ -212,14 +214,16 @@ void __bitmap_xor(unsigned long *dst, const unsigned long *bitmap1,
212} 214}
213EXPORT_SYMBOL(__bitmap_xor); 215EXPORT_SYMBOL(__bitmap_xor);
214 216
215void __bitmap_andnot(unsigned long *dst, const unsigned long *bitmap1, 217int __bitmap_andnot(unsigned long *dst, const unsigned long *bitmap1,
216 const unsigned long *bitmap2, int bits) 218 const unsigned long *bitmap2, int bits)
217{ 219{
218 int k; 220 int k;
219 int nr = BITS_TO_LONGS(bits); 221 int nr = BITS_TO_LONGS(bits);
222 unsigned long result = 0;
220 223
221 for (k = 0; k < nr; k++) 224 for (k = 0; k < nr; k++)
222 dst[k] = bitmap1[k] & ~bitmap2[k]; 225 result |= (dst[k] = bitmap1[k] & ~bitmap2[k]);
226 return result != 0;
223} 227}
224EXPORT_SYMBOL(__bitmap_andnot); 228EXPORT_SYMBOL(__bitmap_andnot);
225 229
diff --git a/lib/dma-debug.c b/lib/dma-debug.c
index 65b0d99b6d0a..58a9f9fc609a 100644
--- a/lib/dma-debug.c
+++ b/lib/dma-debug.c
@@ -156,9 +156,13 @@ static bool driver_filter(struct device *dev)
156 return true; 156 return true;
157 157
158 /* driver filter on and initialized */ 158 /* driver filter on and initialized */
159 if (current_driver && dev->driver == current_driver) 159 if (current_driver && dev && dev->driver == current_driver)
160 return true; 160 return true;
161 161
162 /* driver filter on, but we can't filter on a NULL device... */
163 if (!dev)
164 return false;
165
162 if (current_driver || !current_driver_name[0]) 166 if (current_driver || !current_driver_name[0])
163 return false; 167 return false;
164 168
@@ -183,17 +187,17 @@ static bool driver_filter(struct device *dev)
183 return ret; 187 return ret;
184} 188}
185 189
186#define err_printk(dev, entry, format, arg...) do { \ 190#define err_printk(dev, entry, format, arg...) do { \
187 error_count += 1; \ 191 error_count += 1; \
188 if (driver_filter(dev) && \ 192 if (driver_filter(dev) && \
189 (show_all_errors || show_num_errors > 0)) { \ 193 (show_all_errors || show_num_errors > 0)) { \
190 WARN(1, "%s %s: " format, \ 194 WARN(1, "%s %s: " format, \
191 dev_driver_string(dev), \ 195 dev ? dev_driver_string(dev) : "NULL", \
192 dev_name(dev) , ## arg); \ 196 dev ? dev_name(dev) : "NULL", ## arg); \
193 dump_entry_trace(entry); \ 197 dump_entry_trace(entry); \
194 } \ 198 } \
195 if (!show_all_errors && show_num_errors > 0) \ 199 if (!show_all_errors && show_num_errors > 0) \
196 show_num_errors -= 1; \ 200 show_num_errors -= 1; \
197 } while (0); 201 } while (0);
198 202
199/* 203/*
diff --git a/lib/flex_array.c b/lib/flex_array.c
index 08f1636d296a..7baed2fc3bc8 100644
--- a/lib/flex_array.c
+++ b/lib/flex_array.c
@@ -99,7 +99,8 @@ static inline int elements_fit_in_base(struct flex_array *fa)
99 * capacity in the base structure. Also note that no effort is made 99 * capacity in the base structure. Also note that no effort is made
100 * to efficiently pack objects across page boundaries. 100 * to efficiently pack objects across page boundaries.
101 */ 101 */
102struct flex_array *flex_array_alloc(int element_size, int total, gfp_t flags) 102struct flex_array *flex_array_alloc(int element_size, unsigned int total,
103 gfp_t flags)
103{ 104{
104 struct flex_array *ret; 105 struct flex_array *ret;
105 int max_size = nr_base_part_ptrs() * __elements_per_part(element_size); 106 int max_size = nr_base_part_ptrs() * __elements_per_part(element_size);
@@ -115,16 +116,14 @@ struct flex_array *flex_array_alloc(int element_size, int total, gfp_t flags)
115 return ret; 116 return ret;
116} 117}
117 118
118static int fa_element_to_part_nr(struct flex_array *fa, int element_nr) 119static int fa_element_to_part_nr(struct flex_array *fa,
120 unsigned int element_nr)
119{ 121{
120 return element_nr / __elements_per_part(fa->element_size); 122 return element_nr / __elements_per_part(fa->element_size);
121} 123}
122 124
123/** 125/**
124 * flex_array_free_parts - just free the second-level pages 126 * flex_array_free_parts - just free the second-level pages
125 * @src: address of data to copy into the array
126 * @element_nr: index of the position in which to insert
127 * the new element.
128 * 127 *
129 * This is to be used in cases where the base 'struct flex_array' 128 * This is to be used in cases where the base 'struct flex_array'
130 * has been statically allocated and should not be free. 129 * has been statically allocated and should not be free.
@@ -146,14 +145,12 @@ void flex_array_free(struct flex_array *fa)
146 kfree(fa); 145 kfree(fa);
147} 146}
148 147
149static int fa_index_inside_part(struct flex_array *fa, int element_nr) 148static unsigned int index_inside_part(struct flex_array *fa,
149 unsigned int element_nr)
150{ 150{
151 return element_nr % __elements_per_part(fa->element_size); 151 unsigned int part_offset;
152}
153 152
154static int index_inside_part(struct flex_array *fa, int element_nr) 153 part_offset = element_nr % __elements_per_part(fa->element_size);
155{
156 int part_offset = fa_index_inside_part(fa, element_nr);
157 return part_offset * fa->element_size; 154 return part_offset * fa->element_size;
158} 155}
159 156
@@ -188,7 +185,8 @@ __fa_get_part(struct flex_array *fa, int part_nr, gfp_t flags)
188 * 185 *
189 * Locking must be provided by the caller. 186 * Locking must be provided by the caller.
190 */ 187 */
191int flex_array_put(struct flex_array *fa, int element_nr, void *src, gfp_t flags) 188int flex_array_put(struct flex_array *fa, unsigned int element_nr, void *src,
189 gfp_t flags)
192{ 190{
193 int part_nr = fa_element_to_part_nr(fa, element_nr); 191 int part_nr = fa_element_to_part_nr(fa, element_nr);
194 struct flex_array_part *part; 192 struct flex_array_part *part;
@@ -198,10 +196,11 @@ int flex_array_put(struct flex_array *fa, int element_nr, void *src, gfp_t flags
198 return -ENOSPC; 196 return -ENOSPC;
199 if (elements_fit_in_base(fa)) 197 if (elements_fit_in_base(fa))
200 part = (struct flex_array_part *)&fa->parts[0]; 198 part = (struct flex_array_part *)&fa->parts[0];
201 else 199 else {
202 part = __fa_get_part(fa, part_nr, flags); 200 part = __fa_get_part(fa, part_nr, flags);
203 if (!part) 201 if (!part)
204 return -ENOMEM; 202 return -ENOMEM;
203 }
205 dst = &part->elements[index_inside_part(fa, element_nr)]; 204 dst = &part->elements[index_inside_part(fa, element_nr)];
206 memcpy(dst, src, fa->element_size); 205 memcpy(dst, src, fa->element_size);
207 return 0; 206 return 0;
@@ -219,7 +218,8 @@ int flex_array_put(struct flex_array *fa, int element_nr, void *src, gfp_t flags
219 * 218 *
220 * Locking must be provided by the caller. 219 * Locking must be provided by the caller.
221 */ 220 */
222int flex_array_prealloc(struct flex_array *fa, int start, int end, gfp_t flags) 221int flex_array_prealloc(struct flex_array *fa, unsigned int start,
222 unsigned int end, gfp_t flags)
223{ 223{
224 int start_part; 224 int start_part;
225 int end_part; 225 int end_part;
@@ -250,18 +250,19 @@ int flex_array_prealloc(struct flex_array *fa, int start, int end, gfp_t flags)
250 * 250 *
251 * Locking must be provided by the caller. 251 * Locking must be provided by the caller.
252 */ 252 */
253void *flex_array_get(struct flex_array *fa, int element_nr) 253void *flex_array_get(struct flex_array *fa, unsigned int element_nr)
254{ 254{
255 int part_nr = fa_element_to_part_nr(fa, element_nr); 255 int part_nr = fa_element_to_part_nr(fa, element_nr);
256 struct flex_array_part *part; 256 struct flex_array_part *part;
257 257
258 if (element_nr >= fa->total_nr_elements) 258 if (element_nr >= fa->total_nr_elements)
259 return NULL; 259 return NULL;
260 if (!fa->parts[part_nr])
261 return NULL;
262 if (elements_fit_in_base(fa)) 260 if (elements_fit_in_base(fa))
263 part = (struct flex_array_part *)&fa->parts[0]; 261 part = (struct flex_array_part *)&fa->parts[0];
264 else 262 else {
265 part = fa->parts[part_nr]; 263 part = fa->parts[part_nr];
264 if (!part)
265 return NULL;
266 }
266 return &part->elements[index_inside_part(fa, element_nr)]; 267 return &part->elements[index_inside_part(fa, element_nr)];
267} 268}
diff --git a/lib/inflate.c b/lib/inflate.c
index 1a8e8a978128..d10255973a9f 100644
--- a/lib/inflate.c
+++ b/lib/inflate.c
@@ -7,7 +7,7 @@
7 * Adapted for booting Linux by Hannu Savolainen 1993 7 * Adapted for booting Linux by Hannu Savolainen 1993
8 * based on gzip-1.0.3 8 * based on gzip-1.0.3
9 * 9 *
10 * Nicolas Pitre <nico@cam.org>, 1999/04/14 : 10 * Nicolas Pitre <nico@fluxnic.net>, 1999/04/14 :
11 * Little mods for all variable to reside either into rodata or bss segments 11 * Little mods for all variable to reside either into rodata or bss segments
12 * by marking constant variables with 'const' and initializing all the others 12 * by marking constant variables with 'const' and initializing all the others
13 * at run-time only. This allows for the kernel uncompressor to run 13 * at run-time only. This allows for the kernel uncompressor to run
diff --git a/lib/is_single_threaded.c b/lib/is_single_threaded.c
index f1ed2fe76c65..bd2bea963364 100644
--- a/lib/is_single_threaded.c
+++ b/lib/is_single_threaded.c
@@ -12,34 +12,47 @@
12 12
13#include <linux/sched.h> 13#include <linux/sched.h>
14 14
15/** 15/*
16 * is_single_threaded - Determine if a thread group is single-threaded or not 16 * Returns true if the task does not share ->mm with another thread/process.
17 * @p: A task in the thread group in question
18 *
19 * This returns true if the thread group to which a task belongs is single
20 * threaded, false if it is not.
21 */ 17 */
22bool is_single_threaded(struct task_struct *p) 18bool current_is_single_threaded(void)
23{ 19{
24 struct task_struct *g, *t; 20 struct task_struct *task = current;
25 struct mm_struct *mm = p->mm; 21 struct mm_struct *mm = task->mm;
22 struct task_struct *p, *t;
23 bool ret;
26 24
27 if (atomic_read(&p->signal->count) != 1) 25 if (atomic_read(&task->signal->live) != 1)
28 goto no; 26 return false;
29 27
30 if (atomic_read(&p->mm->mm_users) != 1) { 28 if (atomic_read(&mm->mm_users) == 1)
31 read_lock(&tasklist_lock); 29 return true;
32 do_each_thread(g, t) {
33 if (t->mm == mm && t != p)
34 goto no_unlock;
35 } while_each_thread(g, t);
36 read_unlock(&tasklist_lock);
37 }
38 30
39 return true; 31 ret = false;
32 rcu_read_lock();
33 for_each_process(p) {
34 if (unlikely(p->flags & PF_KTHREAD))
35 continue;
36 if (unlikely(p == task->group_leader))
37 continue;
38
39 t = p;
40 do {
41 if (unlikely(t->mm == mm))
42 goto found;
43 if (likely(t->mm))
44 break;
45 /*
46 * t->mm == NULL. Make sure next_thread/next_task
47 * will see other CLONE_VM tasks which might be
48 * forked before exiting.
49 */
50 smp_rmb();
51 } while_each_thread(p, t);
52 }
53 ret = true;
54found:
55 rcu_read_unlock();
40 56
41no_unlock: 57 return ret;
42 read_unlock(&tasklist_lock);
43no:
44 return false;
45} 58}
diff --git a/lib/lmb.c b/lib/lmb.c
index e4a6482d8b26..0343c05609f0 100644
--- a/lib/lmb.c
+++ b/lib/lmb.c
@@ -429,7 +429,7 @@ u64 __init lmb_phys_mem_size(void)
429 return lmb.memory.size; 429 return lmb.memory.size;
430} 430}
431 431
432u64 __init lmb_end_of_DRAM(void) 432u64 lmb_end_of_DRAM(void)
433{ 433{
434 int idx = lmb.memory.cnt - 1; 434 int idx = lmb.memory.cnt - 1;
435 435
diff --git a/lib/swiotlb.c b/lib/swiotlb.c
index bffe6d7ef9d9..ac25cd28e807 100644
--- a/lib/swiotlb.c
+++ b/lib/swiotlb.c
@@ -114,46 +114,11 @@ setup_io_tlb_npages(char *str)
114__setup("swiotlb=", setup_io_tlb_npages); 114__setup("swiotlb=", setup_io_tlb_npages);
115/* make io_tlb_overflow tunable too? */ 115/* make io_tlb_overflow tunable too? */
116 116
117void * __weak __init swiotlb_alloc_boot(size_t size, unsigned long nslabs) 117/* Note that this doesn't work with highmem page */
118{
119 return alloc_bootmem_low_pages(size);
120}
121
122void * __weak swiotlb_alloc(unsigned order, unsigned long nslabs)
123{
124 return (void *)__get_free_pages(GFP_DMA | __GFP_NOWARN, order);
125}
126
127dma_addr_t __weak swiotlb_phys_to_bus(struct device *hwdev, phys_addr_t paddr)
128{
129 return paddr;
130}
131
132phys_addr_t __weak swiotlb_bus_to_phys(struct device *hwdev, dma_addr_t baddr)
133{
134 return baddr;
135}
136
137static dma_addr_t swiotlb_virt_to_bus(struct device *hwdev, 118static dma_addr_t swiotlb_virt_to_bus(struct device *hwdev,
138 volatile void *address) 119 volatile void *address)
139{ 120{
140 return swiotlb_phys_to_bus(hwdev, virt_to_phys(address)); 121 return phys_to_dma(hwdev, virt_to_phys(address));
141}
142
143void * __weak swiotlb_bus_to_virt(struct device *hwdev, dma_addr_t address)
144{
145 return phys_to_virt(swiotlb_bus_to_phys(hwdev, address));
146}
147
148int __weak swiotlb_arch_address_needs_mapping(struct device *hwdev,
149 dma_addr_t addr, size_t size)
150{
151 return !is_buffer_dma_capable(dma_get_mask(hwdev), addr, size);
152}
153
154int __weak swiotlb_arch_range_needs_mapping(phys_addr_t paddr, size_t size)
155{
156 return 0;
157} 122}
158 123
159static void swiotlb_print_info(unsigned long bytes) 124static void swiotlb_print_info(unsigned long bytes)
@@ -189,7 +154,7 @@ swiotlb_init_with_default_size(size_t default_size)
189 /* 154 /*
190 * Get IO TLB memory from the low pages 155 * Get IO TLB memory from the low pages
191 */ 156 */
192 io_tlb_start = swiotlb_alloc_boot(bytes, io_tlb_nslabs); 157 io_tlb_start = alloc_bootmem_low_pages(bytes);
193 if (!io_tlb_start) 158 if (!io_tlb_start)
194 panic("Cannot allocate SWIOTLB buffer"); 159 panic("Cannot allocate SWIOTLB buffer");
195 io_tlb_end = io_tlb_start + bytes; 160 io_tlb_end = io_tlb_start + bytes;
@@ -245,7 +210,8 @@ swiotlb_late_init_with_default_size(size_t default_size)
245 bytes = io_tlb_nslabs << IO_TLB_SHIFT; 210 bytes = io_tlb_nslabs << IO_TLB_SHIFT;
246 211
247 while ((SLABS_PER_PAGE << order) > IO_TLB_MIN_SLABS) { 212 while ((SLABS_PER_PAGE << order) > IO_TLB_MIN_SLABS) {
248 io_tlb_start = swiotlb_alloc(order, io_tlb_nslabs); 213 io_tlb_start = (void *)__get_free_pages(GFP_DMA | __GFP_NOWARN,
214 order);
249 if (io_tlb_start) 215 if (io_tlb_start)
250 break; 216 break;
251 order--; 217 order--;
@@ -315,20 +281,10 @@ cleanup1:
315 return -ENOMEM; 281 return -ENOMEM;
316} 282}
317 283
318static inline int 284static int is_swiotlb_buffer(phys_addr_t paddr)
319address_needs_mapping(struct device *hwdev, dma_addr_t addr, size_t size)
320{ 285{
321 return swiotlb_arch_address_needs_mapping(hwdev, addr, size); 286 return paddr >= virt_to_phys(io_tlb_start) &&
322} 287 paddr < virt_to_phys(io_tlb_end);
323
324static inline int range_needs_mapping(phys_addr_t paddr, size_t size)
325{
326 return swiotlb_force || swiotlb_arch_range_needs_mapping(paddr, size);
327}
328
329static int is_swiotlb_buffer(char *addr)
330{
331 return addr >= io_tlb_start && addr < io_tlb_end;
332} 288}
333 289
334/* 290/*
@@ -561,9 +517,7 @@ swiotlb_alloc_coherent(struct device *hwdev, size_t size,
561 dma_mask = hwdev->coherent_dma_mask; 517 dma_mask = hwdev->coherent_dma_mask;
562 518
563 ret = (void *)__get_free_pages(flags, order); 519 ret = (void *)__get_free_pages(flags, order);
564 if (ret && 520 if (ret && swiotlb_virt_to_bus(hwdev, ret) + size > dma_mask) {
565 !is_buffer_dma_capable(dma_mask, swiotlb_virt_to_bus(hwdev, ret),
566 size)) {
567 /* 521 /*
568 * The allocated memory isn't reachable by the device. 522 * The allocated memory isn't reachable by the device.
569 */ 523 */
@@ -585,7 +539,7 @@ swiotlb_alloc_coherent(struct device *hwdev, size_t size,
585 dev_addr = swiotlb_virt_to_bus(hwdev, ret); 539 dev_addr = swiotlb_virt_to_bus(hwdev, ret);
586 540
587 /* Confirm address can be DMA'd by device */ 541 /* Confirm address can be DMA'd by device */
588 if (!is_buffer_dma_capable(dma_mask, dev_addr, size)) { 542 if (dev_addr + size > dma_mask) {
589 printk("hwdev DMA mask = 0x%016Lx, dev_addr = 0x%016Lx\n", 543 printk("hwdev DMA mask = 0x%016Lx, dev_addr = 0x%016Lx\n",
590 (unsigned long long)dma_mask, 544 (unsigned long long)dma_mask,
591 (unsigned long long)dev_addr); 545 (unsigned long long)dev_addr);
@@ -601,11 +555,13 @@ EXPORT_SYMBOL(swiotlb_alloc_coherent);
601 555
602void 556void
603swiotlb_free_coherent(struct device *hwdev, size_t size, void *vaddr, 557swiotlb_free_coherent(struct device *hwdev, size_t size, void *vaddr,
604 dma_addr_t dma_handle) 558 dma_addr_t dev_addr)
605{ 559{
560 phys_addr_t paddr = dma_to_phys(hwdev, dev_addr);
561
606 WARN_ON(irqs_disabled()); 562 WARN_ON(irqs_disabled());
607 if (!is_swiotlb_buffer(vaddr)) 563 if (!is_swiotlb_buffer(paddr))
608 free_pages((unsigned long) vaddr, get_order(size)); 564 free_pages((unsigned long)vaddr, get_order(size));
609 else 565 else
610 /* DMA_TO_DEVICE to avoid memcpy in unmap_single */ 566 /* DMA_TO_DEVICE to avoid memcpy in unmap_single */
611 do_unmap_single(hwdev, vaddr, size, DMA_TO_DEVICE); 567 do_unmap_single(hwdev, vaddr, size, DMA_TO_DEVICE);
@@ -625,12 +581,15 @@ swiotlb_full(struct device *dev, size_t size, int dir, int do_panic)
625 printk(KERN_ERR "DMA: Out of SW-IOMMU space for %zu bytes at " 581 printk(KERN_ERR "DMA: Out of SW-IOMMU space for %zu bytes at "
626 "device %s\n", size, dev ? dev_name(dev) : "?"); 582 "device %s\n", size, dev ? dev_name(dev) : "?");
627 583
628 if (size > io_tlb_overflow && do_panic) { 584 if (size <= io_tlb_overflow || !do_panic)
629 if (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL) 585 return;
630 panic("DMA: Memory would be corrupted\n"); 586
631 if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL) 587 if (dir == DMA_BIDIRECTIONAL)
632 panic("DMA: Random memory would be DMAed\n"); 588 panic("DMA: Random memory could be DMA accessed\n");
633 } 589 if (dir == DMA_FROM_DEVICE)
590 panic("DMA: Random memory could be DMA written\n");
591 if (dir == DMA_TO_DEVICE)
592 panic("DMA: Random memory could be DMA read\n");
634} 593}
635 594
636/* 595/*
@@ -646,7 +605,7 @@ dma_addr_t swiotlb_map_page(struct device *dev, struct page *page,
646 struct dma_attrs *attrs) 605 struct dma_attrs *attrs)
647{ 606{
648 phys_addr_t phys = page_to_phys(page) + offset; 607 phys_addr_t phys = page_to_phys(page) + offset;
649 dma_addr_t dev_addr = swiotlb_phys_to_bus(dev, phys); 608 dma_addr_t dev_addr = phys_to_dma(dev, phys);
650 void *map; 609 void *map;
651 610
652 BUG_ON(dir == DMA_NONE); 611 BUG_ON(dir == DMA_NONE);
@@ -655,8 +614,7 @@ dma_addr_t swiotlb_map_page(struct device *dev, struct page *page,
655 * we can safely return the device addr and not worry about bounce 614 * we can safely return the device addr and not worry about bounce
656 * buffering it. 615 * buffering it.
657 */ 616 */
658 if (!address_needs_mapping(dev, dev_addr, size) && 617 if (dma_capable(dev, dev_addr, size) && !swiotlb_force)
659 !range_needs_mapping(phys, size))
660 return dev_addr; 618 return dev_addr;
661 619
662 /* 620 /*
@@ -673,7 +631,7 @@ dma_addr_t swiotlb_map_page(struct device *dev, struct page *page,
673 /* 631 /*
674 * Ensure that the address returned is DMA'ble 632 * Ensure that the address returned is DMA'ble
675 */ 633 */
676 if (address_needs_mapping(dev, dev_addr, size)) 634 if (!dma_capable(dev, dev_addr, size))
677 panic("map_single: bounce buffer is not DMA'ble"); 635 panic("map_single: bounce buffer is not DMA'ble");
678 636
679 return dev_addr; 637 return dev_addr;
@@ -691,19 +649,25 @@ EXPORT_SYMBOL_GPL(swiotlb_map_page);
691static void unmap_single(struct device *hwdev, dma_addr_t dev_addr, 649static void unmap_single(struct device *hwdev, dma_addr_t dev_addr,
692 size_t size, int dir) 650 size_t size, int dir)
693{ 651{
694 char *dma_addr = swiotlb_bus_to_virt(hwdev, dev_addr); 652 phys_addr_t paddr = dma_to_phys(hwdev, dev_addr);
695 653
696 BUG_ON(dir == DMA_NONE); 654 BUG_ON(dir == DMA_NONE);
697 655
698 if (is_swiotlb_buffer(dma_addr)) { 656 if (is_swiotlb_buffer(paddr)) {
699 do_unmap_single(hwdev, dma_addr, size, dir); 657 do_unmap_single(hwdev, phys_to_virt(paddr), size, dir);
700 return; 658 return;
701 } 659 }
702 660
703 if (dir != DMA_FROM_DEVICE) 661 if (dir != DMA_FROM_DEVICE)
704 return; 662 return;
705 663
706 dma_mark_clean(dma_addr, size); 664 /*
665 * phys_to_virt doesn't work with hihgmem page but we could
666 * call dma_mark_clean() with hihgmem page here. However, we
667 * are fine since dma_mark_clean() is null on POWERPC. We can
668 * make dma_mark_clean() take a physical address if necessary.
669 */
670 dma_mark_clean(phys_to_virt(paddr), size);
707} 671}
708 672
709void swiotlb_unmap_page(struct device *hwdev, dma_addr_t dev_addr, 673void swiotlb_unmap_page(struct device *hwdev, dma_addr_t dev_addr,
@@ -728,19 +692,19 @@ static void
728swiotlb_sync_single(struct device *hwdev, dma_addr_t dev_addr, 692swiotlb_sync_single(struct device *hwdev, dma_addr_t dev_addr,
729 size_t size, int dir, int target) 693 size_t size, int dir, int target)
730{ 694{
731 char *dma_addr = swiotlb_bus_to_virt(hwdev, dev_addr); 695 phys_addr_t paddr = dma_to_phys(hwdev, dev_addr);
732 696
733 BUG_ON(dir == DMA_NONE); 697 BUG_ON(dir == DMA_NONE);
734 698
735 if (is_swiotlb_buffer(dma_addr)) { 699 if (is_swiotlb_buffer(paddr)) {
736 sync_single(hwdev, dma_addr, size, dir, target); 700 sync_single(hwdev, phys_to_virt(paddr), size, dir, target);
737 return; 701 return;
738 } 702 }
739 703
740 if (dir != DMA_FROM_DEVICE) 704 if (dir != DMA_FROM_DEVICE)
741 return; 705 return;
742 706
743 dma_mark_clean(dma_addr, size); 707 dma_mark_clean(phys_to_virt(paddr), size);
744} 708}
745 709
746void 710void
@@ -817,10 +781,10 @@ swiotlb_map_sg_attrs(struct device *hwdev, struct scatterlist *sgl, int nelems,
817 781
818 for_each_sg(sgl, sg, nelems, i) { 782 for_each_sg(sgl, sg, nelems, i) {
819 phys_addr_t paddr = sg_phys(sg); 783 phys_addr_t paddr = sg_phys(sg);
820 dma_addr_t dev_addr = swiotlb_phys_to_bus(hwdev, paddr); 784 dma_addr_t dev_addr = phys_to_dma(hwdev, paddr);
821 785
822 if (range_needs_mapping(paddr, sg->length) || 786 if (swiotlb_force ||
823 address_needs_mapping(hwdev, dev_addr, sg->length)) { 787 !dma_capable(hwdev, dev_addr, sg->length)) {
824 void *map = map_single(hwdev, sg_phys(sg), 788 void *map = map_single(hwdev, sg_phys(sg),
825 sg->length, dir); 789 sg->length, dir);
826 if (!map) { 790 if (!map) {
diff --git a/lib/vsprintf.c b/lib/vsprintf.c
index 756ccafa9cec..cb8a112030bb 100644
--- a/lib/vsprintf.c
+++ b/lib/vsprintf.c
@@ -25,6 +25,7 @@
25#include <linux/kallsyms.h> 25#include <linux/kallsyms.h>
26#include <linux/uaccess.h> 26#include <linux/uaccess.h>
27#include <linux/ioport.h> 27#include <linux/ioport.h>
28#include <net/addrconf.h>
28 29
29#include <asm/page.h> /* for PAGE_SIZE */ 30#include <asm/page.h> /* for PAGE_SIZE */
30#include <asm/div64.h> 31#include <asm/div64.h>
@@ -630,60 +631,156 @@ static char *resource_string(char *buf, char *end, struct resource *res,
630} 631}
631 632
632static char *mac_address_string(char *buf, char *end, u8 *addr, 633static char *mac_address_string(char *buf, char *end, u8 *addr,
633 struct printf_spec spec) 634 struct printf_spec spec, const char *fmt)
634{ 635{
635 char mac_addr[6 * 3]; /* (6 * 2 hex digits), 5 colons and trailing zero */ 636 char mac_addr[sizeof("xx:xx:xx:xx:xx:xx")];
636 char *p = mac_addr; 637 char *p = mac_addr;
637 int i; 638 int i;
638 639
639 for (i = 0; i < 6; i++) { 640 for (i = 0; i < 6; i++) {
640 p = pack_hex_byte(p, addr[i]); 641 p = pack_hex_byte(p, addr[i]);
641 if (!(spec.flags & SPECIAL) && i != 5) 642 if (fmt[0] == 'M' && i != 5)
642 *p++ = ':'; 643 *p++ = ':';
643 } 644 }
644 *p = '\0'; 645 *p = '\0';
645 spec.flags &= ~SPECIAL;
646 646
647 return string(buf, end, mac_addr, spec); 647 return string(buf, end, mac_addr, spec);
648} 648}
649 649
650static char *ip6_addr_string(char *buf, char *end, u8 *addr, 650static char *ip4_string(char *p, const u8 *addr, bool leading_zeros)
651 struct printf_spec spec) 651{
652 int i;
653
654 for (i = 0; i < 4; i++) {
655 char temp[3]; /* hold each IP quad in reverse order */
656 int digits = put_dec_trunc(temp, addr[i]) - temp;
657 if (leading_zeros) {
658 if (digits < 3)
659 *p++ = '0';
660 if (digits < 2)
661 *p++ = '0';
662 }
663 /* reverse the digits in the quad */
664 while (digits--)
665 *p++ = temp[digits];
666 if (i < 3)
667 *p++ = '.';
668 }
669
670 *p = '\0';
671 return p;
672}
673
674static char *ip6_compressed_string(char *p, const struct in6_addr *addr)
652{ 675{
653 char ip6_addr[8 * 5]; /* (8 * 4 hex digits), 7 colons and trailing zero */
654 char *p = ip6_addr;
655 int i; 676 int i;
677 int j;
678 int range;
679 unsigned char zerolength[8];
680 int longest = 1;
681 int colonpos = -1;
682 u16 word;
683 u8 hi;
684 u8 lo;
685 bool needcolon = false;
686 bool useIPv4 = ipv6_addr_v4mapped(addr) || ipv6_addr_is_isatap(addr);
687
688 memset(zerolength, 0, sizeof(zerolength));
689
690 if (useIPv4)
691 range = 6;
692 else
693 range = 8;
694
695 /* find position of longest 0 run */
696 for (i = 0; i < range; i++) {
697 for (j = i; j < range; j++) {
698 if (addr->s6_addr16[j] != 0)
699 break;
700 zerolength[i]++;
701 }
702 }
703 for (i = 0; i < range; i++) {
704 if (zerolength[i] > longest) {
705 longest = zerolength[i];
706 colonpos = i;
707 }
708 }
709
710 /* emit address */
711 for (i = 0; i < range; i++) {
712 if (i == colonpos) {
713 if (needcolon || i == 0)
714 *p++ = ':';
715 *p++ = ':';
716 needcolon = false;
717 i += longest - 1;
718 continue;
719 }
720 if (needcolon) {
721 *p++ = ':';
722 needcolon = false;
723 }
724 /* hex u16 without leading 0s */
725 word = ntohs(addr->s6_addr16[i]);
726 hi = word >> 8;
727 lo = word & 0xff;
728 if (hi) {
729 if (hi > 0x0f)
730 p = pack_hex_byte(p, hi);
731 else
732 *p++ = hex_asc_lo(hi);
733 }
734 if (hi || lo > 0x0f)
735 p = pack_hex_byte(p, lo);
736 else
737 *p++ = hex_asc_lo(lo);
738 needcolon = true;
739 }
740
741 if (useIPv4) {
742 if (needcolon)
743 *p++ = ':';
744 p = ip4_string(p, &addr->s6_addr[12], false);
745 }
656 746
747 *p = '\0';
748 return p;
749}
750
751static char *ip6_string(char *p, const struct in6_addr *addr, const char *fmt)
752{
753 int i;
657 for (i = 0; i < 8; i++) { 754 for (i = 0; i < 8; i++) {
658 p = pack_hex_byte(p, addr[2 * i]); 755 p = pack_hex_byte(p, addr->s6_addr[2 * i]);
659 p = pack_hex_byte(p, addr[2 * i + 1]); 756 p = pack_hex_byte(p, addr->s6_addr[2 * i + 1]);
660 if (!(spec.flags & SPECIAL) && i != 7) 757 if (fmt[0] == 'I' && i != 7)
661 *p++ = ':'; 758 *p++ = ':';
662 } 759 }
760
663 *p = '\0'; 761 *p = '\0';
664 spec.flags &= ~SPECIAL; 762 return p;
763}
764
765static char *ip6_addr_string(char *buf, char *end, const u8 *addr,
766 struct printf_spec spec, const char *fmt)
767{
768 char ip6_addr[sizeof("xxxx:xxxx:xxxx:xxxx:xxxx:xxxx:255.255.255.255")];
769
770 if (fmt[0] == 'I' && fmt[2] == 'c')
771 ip6_compressed_string(ip6_addr, (const struct in6_addr *)addr);
772 else
773 ip6_string(ip6_addr, (const struct in6_addr *)addr, fmt);
665 774
666 return string(buf, end, ip6_addr, spec); 775 return string(buf, end, ip6_addr, spec);
667} 776}
668 777
669static char *ip4_addr_string(char *buf, char *end, u8 *addr, 778static char *ip4_addr_string(char *buf, char *end, const u8 *addr,
670 struct printf_spec spec) 779 struct printf_spec spec, const char *fmt)
671{ 780{
672 char ip4_addr[4 * 4]; /* (4 * 3 decimal digits), 3 dots and trailing zero */ 781 char ip4_addr[sizeof("255.255.255.255")];
673 char temp[3]; /* hold each IP quad in reverse order */
674 char *p = ip4_addr;
675 int i, digits;
676 782
677 for (i = 0; i < 4; i++) { 783 ip4_string(ip4_addr, addr, fmt[0] == 'i');
678 digits = put_dec_trunc(temp, addr[i]) - temp;
679 /* reverse the digits in the quad */
680 while (digits--)
681 *p++ = temp[digits];
682 if (i != 3)
683 *p++ = '.';
684 }
685 *p = '\0';
686 spec.flags &= ~SPECIAL;
687 784
688 return string(buf, end, ip4_addr, spec); 785 return string(buf, end, ip4_addr, spec);
689} 786}
@@ -702,11 +799,15 @@ static char *ip4_addr_string(char *buf, char *end, u8 *addr,
702 * addresses (not the name nor the flags) 799 * addresses (not the name nor the flags)
703 * - 'M' For a 6-byte MAC address, it prints the address in the 800 * - 'M' For a 6-byte MAC address, it prints the address in the
704 * usual colon-separated hex notation 801 * usual colon-separated hex notation
705 * - 'I' [46] for IPv4/IPv6 addresses printed in the usual way (dot-separated 802 * - 'm' For a 6-byte MAC address, it prints the hex address without colons
706 * decimal for v4 and colon separated network-order 16 bit hex for v6) 803 * - 'I' [46] for IPv4/IPv6 addresses printed in the usual way
707 * - 'i' [46] for 'raw' IPv4/IPv6 addresses, IPv6 omits the colons, IPv4 is 804 * IPv4 uses dot-separated decimal without leading 0's (1.2.3.4)
708 * currently the same 805 * IPv6 uses colon separated network-order 16 bit hex with leading 0's
709 * 806 * - 'i' [46] for 'raw' IPv4/IPv6 addresses
807 * IPv6 omits the colons (01020304...0f)
808 * IPv4 uses dot-separated decimal with leading 0's (010.123.045.006)
809 * - 'I6c' for IPv6 addresses printed as specified by
810 * http://www.ietf.org/id/draft-kawamura-ipv6-text-representation-03.txt
710 * Note: The difference between 'S' and 'F' is that on ia64 and ppc64 811 * Note: The difference between 'S' and 'F' is that on ia64 and ppc64
711 * function pointers are really function descriptors, which contain a 812 * function pointers are really function descriptors, which contain a
712 * pointer to the real address. 813 * pointer to the real address.
@@ -726,20 +827,24 @@ static char *pointer(const char *fmt, char *buf, char *end, void *ptr,
726 return symbol_string(buf, end, ptr, spec, *fmt); 827 return symbol_string(buf, end, ptr, spec, *fmt);
727 case 'R': 828 case 'R':
728 return resource_string(buf, end, ptr, spec); 829 return resource_string(buf, end, ptr, spec);
729 case 'm': 830 case 'M': /* Colon separated: 00:01:02:03:04:05 */
730 spec.flags |= SPECIAL; 831 case 'm': /* Contiguous: 000102030405 */
731 /* Fallthrough */ 832 return mac_address_string(buf, end, ptr, spec, fmt);
732 case 'M': 833 case 'I': /* Formatted IP supported
733 return mac_address_string(buf, end, ptr, spec); 834 * 4: 1.2.3.4
734 case 'i': 835 * 6: 0001:0203:...:0708
735 spec.flags |= SPECIAL; 836 * 6c: 1::708 or 1::1.2.3.4
736 /* Fallthrough */ 837 */
737 case 'I': 838 case 'i': /* Contiguous:
738 if (fmt[1] == '6') 839 * 4: 001.002.003.004
739 return ip6_addr_string(buf, end, ptr, spec); 840 * 6: 000102...0f
740 if (fmt[1] == '4') 841 */
741 return ip4_addr_string(buf, end, ptr, spec); 842 switch (fmt[1]) {
742 spec.flags &= ~SPECIAL; 843 case '6':
844 return ip6_addr_string(buf, end, ptr, spec, fmt);
845 case '4':
846 return ip4_addr_string(buf, end, ptr, spec, fmt);
847 }
743 break; 848 break;
744 } 849 }
745 spec.flags |= SMALL; 850 spec.flags |= SMALL;