diff options
Diffstat (limited to 'lib')
-rw-r--r-- | lib/Kconfig.debug | 17 | ||||
-rw-r--r-- | lib/bitmap.c | 12 | ||||
-rw-r--r-- | lib/dma-debug.c | 28 | ||||
-rw-r--r-- | lib/flex_array.c | 41 | ||||
-rw-r--r-- | lib/is_single_threaded.c | 61 | ||||
-rw-r--r-- | lib/lmb.c | 2 | ||||
-rw-r--r-- | lib/swiotlb.c | 124 |
7 files changed, 143 insertions, 142 deletions
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug index 12327b2bb785..7dbd5d9c29a4 100644 --- a/lib/Kconfig.debug +++ b/lib/Kconfig.debug | |||
@@ -653,6 +653,21 @@ config DEBUG_NOTIFIERS | |||
653 | This is a relatively cheap check but if you care about maximum | 653 | This is a relatively cheap check but if you care about maximum |
654 | performance, say N. | 654 | performance, say N. |
655 | 655 | ||
656 | config DEBUG_CREDENTIALS | ||
657 | bool "Debug credential management" | ||
658 | depends on DEBUG_KERNEL | ||
659 | help | ||
660 | Enable this to turn on some debug checking for credential | ||
661 | management. The additional code keeps track of the number of | ||
662 | pointers from task_structs to any given cred struct, and checks to | ||
663 | see that this number never exceeds the usage count of the cred | ||
664 | struct. | ||
665 | |||
666 | Furthermore, if SELinux is enabled, this also checks that the | ||
667 | security pointer in the cred struct is never seen to be invalid. | ||
668 | |||
669 | If unsure, say N. | ||
670 | |||
656 | # | 671 | # |
657 | # Select this config option from the architecture Kconfig, if it | 672 | # Select this config option from the architecture Kconfig, if it |
658 | # it is preferred to always offer frame pointers as a config | 673 | # it is preferred to always offer frame pointers as a config |
@@ -725,7 +740,7 @@ config RCU_TORTURE_TEST_RUNNABLE | |||
725 | 740 | ||
726 | config RCU_CPU_STALL_DETECTOR | 741 | config RCU_CPU_STALL_DETECTOR |
727 | bool "Check for stalled CPUs delaying RCU grace periods" | 742 | bool "Check for stalled CPUs delaying RCU grace periods" |
728 | depends on CLASSIC_RCU || TREE_RCU | 743 | depends on TREE_RCU || TREE_PREEMPT_RCU |
729 | default n | 744 | default n |
730 | help | 745 | help |
731 | This option causes RCU to printk information on which | 746 | This option causes RCU to printk information on which |
diff --git a/lib/bitmap.c b/lib/bitmap.c index 35a1f7ff4149..702565821c99 100644 --- a/lib/bitmap.c +++ b/lib/bitmap.c | |||
@@ -179,14 +179,16 @@ void __bitmap_shift_left(unsigned long *dst, | |||
179 | } | 179 | } |
180 | EXPORT_SYMBOL(__bitmap_shift_left); | 180 | EXPORT_SYMBOL(__bitmap_shift_left); |
181 | 181 | ||
182 | void __bitmap_and(unsigned long *dst, const unsigned long *bitmap1, | 182 | int __bitmap_and(unsigned long *dst, const unsigned long *bitmap1, |
183 | const unsigned long *bitmap2, int bits) | 183 | const unsigned long *bitmap2, int bits) |
184 | { | 184 | { |
185 | int k; | 185 | int k; |
186 | int nr = BITS_TO_LONGS(bits); | 186 | int nr = BITS_TO_LONGS(bits); |
187 | unsigned long result = 0; | ||
187 | 188 | ||
188 | for (k = 0; k < nr; k++) | 189 | for (k = 0; k < nr; k++) |
189 | dst[k] = bitmap1[k] & bitmap2[k]; | 190 | result |= (dst[k] = bitmap1[k] & bitmap2[k]); |
191 | return result != 0; | ||
190 | } | 192 | } |
191 | EXPORT_SYMBOL(__bitmap_and); | 193 | EXPORT_SYMBOL(__bitmap_and); |
192 | 194 | ||
@@ -212,14 +214,16 @@ void __bitmap_xor(unsigned long *dst, const unsigned long *bitmap1, | |||
212 | } | 214 | } |
213 | EXPORT_SYMBOL(__bitmap_xor); | 215 | EXPORT_SYMBOL(__bitmap_xor); |
214 | 216 | ||
215 | void __bitmap_andnot(unsigned long *dst, const unsigned long *bitmap1, | 217 | int __bitmap_andnot(unsigned long *dst, const unsigned long *bitmap1, |
216 | const unsigned long *bitmap2, int bits) | 218 | const unsigned long *bitmap2, int bits) |
217 | { | 219 | { |
218 | int k; | 220 | int k; |
219 | int nr = BITS_TO_LONGS(bits); | 221 | int nr = BITS_TO_LONGS(bits); |
222 | unsigned long result = 0; | ||
220 | 223 | ||
221 | for (k = 0; k < nr; k++) | 224 | for (k = 0; k < nr; k++) |
222 | dst[k] = bitmap1[k] & ~bitmap2[k]; | 225 | result |= (dst[k] = bitmap1[k] & ~bitmap2[k]); |
226 | return result != 0; | ||
223 | } | 227 | } |
224 | EXPORT_SYMBOL(__bitmap_andnot); | 228 | EXPORT_SYMBOL(__bitmap_andnot); |
225 | 229 | ||
diff --git a/lib/dma-debug.c b/lib/dma-debug.c index 65b0d99b6d0a..58a9f9fc609a 100644 --- a/lib/dma-debug.c +++ b/lib/dma-debug.c | |||
@@ -156,9 +156,13 @@ static bool driver_filter(struct device *dev) | |||
156 | return true; | 156 | return true; |
157 | 157 | ||
158 | /* driver filter on and initialized */ | 158 | /* driver filter on and initialized */ |
159 | if (current_driver && dev->driver == current_driver) | 159 | if (current_driver && dev && dev->driver == current_driver) |
160 | return true; | 160 | return true; |
161 | 161 | ||
162 | /* driver filter on, but we can't filter on a NULL device... */ | ||
163 | if (!dev) | ||
164 | return false; | ||
165 | |||
162 | if (current_driver || !current_driver_name[0]) | 166 | if (current_driver || !current_driver_name[0]) |
163 | return false; | 167 | return false; |
164 | 168 | ||
@@ -183,17 +187,17 @@ static bool driver_filter(struct device *dev) | |||
183 | return ret; | 187 | return ret; |
184 | } | 188 | } |
185 | 189 | ||
186 | #define err_printk(dev, entry, format, arg...) do { \ | 190 | #define err_printk(dev, entry, format, arg...) do { \ |
187 | error_count += 1; \ | 191 | error_count += 1; \ |
188 | if (driver_filter(dev) && \ | 192 | if (driver_filter(dev) && \ |
189 | (show_all_errors || show_num_errors > 0)) { \ | 193 | (show_all_errors || show_num_errors > 0)) { \ |
190 | WARN(1, "%s %s: " format, \ | 194 | WARN(1, "%s %s: " format, \ |
191 | dev_driver_string(dev), \ | 195 | dev ? dev_driver_string(dev) : "NULL", \ |
192 | dev_name(dev) , ## arg); \ | 196 | dev ? dev_name(dev) : "NULL", ## arg); \ |
193 | dump_entry_trace(entry); \ | 197 | dump_entry_trace(entry); \ |
194 | } \ | 198 | } \ |
195 | if (!show_all_errors && show_num_errors > 0) \ | 199 | if (!show_all_errors && show_num_errors > 0) \ |
196 | show_num_errors -= 1; \ | 200 | show_num_errors -= 1; \ |
197 | } while (0); | 201 | } while (0); |
198 | 202 | ||
199 | /* | 203 | /* |
diff --git a/lib/flex_array.c b/lib/flex_array.c index 08f1636d296a..7baed2fc3bc8 100644 --- a/lib/flex_array.c +++ b/lib/flex_array.c | |||
@@ -99,7 +99,8 @@ static inline int elements_fit_in_base(struct flex_array *fa) | |||
99 | * capacity in the base structure. Also note that no effort is made | 99 | * capacity in the base structure. Also note that no effort is made |
100 | * to efficiently pack objects across page boundaries. | 100 | * to efficiently pack objects across page boundaries. |
101 | */ | 101 | */ |
102 | struct flex_array *flex_array_alloc(int element_size, int total, gfp_t flags) | 102 | struct flex_array *flex_array_alloc(int element_size, unsigned int total, |
103 | gfp_t flags) | ||
103 | { | 104 | { |
104 | struct flex_array *ret; | 105 | struct flex_array *ret; |
105 | int max_size = nr_base_part_ptrs() * __elements_per_part(element_size); | 106 | int max_size = nr_base_part_ptrs() * __elements_per_part(element_size); |
@@ -115,16 +116,14 @@ struct flex_array *flex_array_alloc(int element_size, int total, gfp_t flags) | |||
115 | return ret; | 116 | return ret; |
116 | } | 117 | } |
117 | 118 | ||
118 | static int fa_element_to_part_nr(struct flex_array *fa, int element_nr) | 119 | static int fa_element_to_part_nr(struct flex_array *fa, |
120 | unsigned int element_nr) | ||
119 | { | 121 | { |
120 | return element_nr / __elements_per_part(fa->element_size); | 122 | return element_nr / __elements_per_part(fa->element_size); |
121 | } | 123 | } |
122 | 124 | ||
123 | /** | 125 | /** |
124 | * flex_array_free_parts - just free the second-level pages | 126 | * flex_array_free_parts - just free the second-level pages |
125 | * @src: address of data to copy into the array | ||
126 | * @element_nr: index of the position in which to insert | ||
127 | * the new element. | ||
128 | * | 127 | * |
129 | * This is to be used in cases where the base 'struct flex_array' | 128 | * This is to be used in cases where the base 'struct flex_array' |
130 | * has been statically allocated and should not be free. | 129 | * has been statically allocated and should not be free. |
@@ -146,14 +145,12 @@ void flex_array_free(struct flex_array *fa) | |||
146 | kfree(fa); | 145 | kfree(fa); |
147 | } | 146 | } |
148 | 147 | ||
149 | static int fa_index_inside_part(struct flex_array *fa, int element_nr) | 148 | static unsigned int index_inside_part(struct flex_array *fa, |
149 | unsigned int element_nr) | ||
150 | { | 150 | { |
151 | return element_nr % __elements_per_part(fa->element_size); | 151 | unsigned int part_offset; |
152 | } | ||
153 | 152 | ||
154 | static int index_inside_part(struct flex_array *fa, int element_nr) | 153 | part_offset = element_nr % __elements_per_part(fa->element_size); |
155 | { | ||
156 | int part_offset = fa_index_inside_part(fa, element_nr); | ||
157 | return part_offset * fa->element_size; | 154 | return part_offset * fa->element_size; |
158 | } | 155 | } |
159 | 156 | ||
@@ -188,7 +185,8 @@ __fa_get_part(struct flex_array *fa, int part_nr, gfp_t flags) | |||
188 | * | 185 | * |
189 | * Locking must be provided by the caller. | 186 | * Locking must be provided by the caller. |
190 | */ | 187 | */ |
191 | int flex_array_put(struct flex_array *fa, int element_nr, void *src, gfp_t flags) | 188 | int flex_array_put(struct flex_array *fa, unsigned int element_nr, void *src, |
189 | gfp_t flags) | ||
192 | { | 190 | { |
193 | int part_nr = fa_element_to_part_nr(fa, element_nr); | 191 | int part_nr = fa_element_to_part_nr(fa, element_nr); |
194 | struct flex_array_part *part; | 192 | struct flex_array_part *part; |
@@ -198,10 +196,11 @@ int flex_array_put(struct flex_array *fa, int element_nr, void *src, gfp_t flags | |||
198 | return -ENOSPC; | 196 | return -ENOSPC; |
199 | if (elements_fit_in_base(fa)) | 197 | if (elements_fit_in_base(fa)) |
200 | part = (struct flex_array_part *)&fa->parts[0]; | 198 | part = (struct flex_array_part *)&fa->parts[0]; |
201 | else | 199 | else { |
202 | part = __fa_get_part(fa, part_nr, flags); | 200 | part = __fa_get_part(fa, part_nr, flags); |
203 | if (!part) | 201 | if (!part) |
204 | return -ENOMEM; | 202 | return -ENOMEM; |
203 | } | ||
205 | dst = &part->elements[index_inside_part(fa, element_nr)]; | 204 | dst = &part->elements[index_inside_part(fa, element_nr)]; |
206 | memcpy(dst, src, fa->element_size); | 205 | memcpy(dst, src, fa->element_size); |
207 | return 0; | 206 | return 0; |
@@ -219,7 +218,8 @@ int flex_array_put(struct flex_array *fa, int element_nr, void *src, gfp_t flags | |||
219 | * | 218 | * |
220 | * Locking must be provided by the caller. | 219 | * Locking must be provided by the caller. |
221 | */ | 220 | */ |
222 | int flex_array_prealloc(struct flex_array *fa, int start, int end, gfp_t flags) | 221 | int flex_array_prealloc(struct flex_array *fa, unsigned int start, |
222 | unsigned int end, gfp_t flags) | ||
223 | { | 223 | { |
224 | int start_part; | 224 | int start_part; |
225 | int end_part; | 225 | int end_part; |
@@ -250,18 +250,19 @@ int flex_array_prealloc(struct flex_array *fa, int start, int end, gfp_t flags) | |||
250 | * | 250 | * |
251 | * Locking must be provided by the caller. | 251 | * Locking must be provided by the caller. |
252 | */ | 252 | */ |
253 | void *flex_array_get(struct flex_array *fa, int element_nr) | 253 | void *flex_array_get(struct flex_array *fa, unsigned int element_nr) |
254 | { | 254 | { |
255 | int part_nr = fa_element_to_part_nr(fa, element_nr); | 255 | int part_nr = fa_element_to_part_nr(fa, element_nr); |
256 | struct flex_array_part *part; | 256 | struct flex_array_part *part; |
257 | 257 | ||
258 | if (element_nr >= fa->total_nr_elements) | 258 | if (element_nr >= fa->total_nr_elements) |
259 | return NULL; | 259 | return NULL; |
260 | if (!fa->parts[part_nr]) | ||
261 | return NULL; | ||
262 | if (elements_fit_in_base(fa)) | 260 | if (elements_fit_in_base(fa)) |
263 | part = (struct flex_array_part *)&fa->parts[0]; | 261 | part = (struct flex_array_part *)&fa->parts[0]; |
264 | else | 262 | else { |
265 | part = fa->parts[part_nr]; | 263 | part = fa->parts[part_nr]; |
264 | if (!part) | ||
265 | return NULL; | ||
266 | } | ||
266 | return &part->elements[index_inside_part(fa, element_nr)]; | 267 | return &part->elements[index_inside_part(fa, element_nr)]; |
267 | } | 268 | } |
diff --git a/lib/is_single_threaded.c b/lib/is_single_threaded.c index f1ed2fe76c65..bd2bea963364 100644 --- a/lib/is_single_threaded.c +++ b/lib/is_single_threaded.c | |||
@@ -12,34 +12,47 @@ | |||
12 | 12 | ||
13 | #include <linux/sched.h> | 13 | #include <linux/sched.h> |
14 | 14 | ||
15 | /** | 15 | /* |
16 | * is_single_threaded - Determine if a thread group is single-threaded or not | 16 | * Returns true if the task does not share ->mm with another thread/process. |
17 | * @p: A task in the thread group in question | ||
18 | * | ||
19 | * This returns true if the thread group to which a task belongs is single | ||
20 | * threaded, false if it is not. | ||
21 | */ | 17 | */ |
22 | bool is_single_threaded(struct task_struct *p) | 18 | bool current_is_single_threaded(void) |
23 | { | 19 | { |
24 | struct task_struct *g, *t; | 20 | struct task_struct *task = current; |
25 | struct mm_struct *mm = p->mm; | 21 | struct mm_struct *mm = task->mm; |
22 | struct task_struct *p, *t; | ||
23 | bool ret; | ||
26 | 24 | ||
27 | if (atomic_read(&p->signal->count) != 1) | 25 | if (atomic_read(&task->signal->live) != 1) |
28 | goto no; | 26 | return false; |
29 | 27 | ||
30 | if (atomic_read(&p->mm->mm_users) != 1) { | 28 | if (atomic_read(&mm->mm_users) == 1) |
31 | read_lock(&tasklist_lock); | 29 | return true; |
32 | do_each_thread(g, t) { | ||
33 | if (t->mm == mm && t != p) | ||
34 | goto no_unlock; | ||
35 | } while_each_thread(g, t); | ||
36 | read_unlock(&tasklist_lock); | ||
37 | } | ||
38 | 30 | ||
39 | return true; | 31 | ret = false; |
32 | rcu_read_lock(); | ||
33 | for_each_process(p) { | ||
34 | if (unlikely(p->flags & PF_KTHREAD)) | ||
35 | continue; | ||
36 | if (unlikely(p == task->group_leader)) | ||
37 | continue; | ||
38 | |||
39 | t = p; | ||
40 | do { | ||
41 | if (unlikely(t->mm == mm)) | ||
42 | goto found; | ||
43 | if (likely(t->mm)) | ||
44 | break; | ||
45 | /* | ||
46 | * t->mm == NULL. Make sure next_thread/next_task | ||
47 | * will see other CLONE_VM tasks which might be | ||
48 | * forked before exiting. | ||
49 | */ | ||
50 | smp_rmb(); | ||
51 | } while_each_thread(p, t); | ||
52 | } | ||
53 | ret = true; | ||
54 | found: | ||
55 | rcu_read_unlock(); | ||
40 | 56 | ||
41 | no_unlock: | 57 | return ret; |
42 | read_unlock(&tasklist_lock); | ||
43 | no: | ||
44 | return false; | ||
45 | } | 58 | } |
@@ -429,7 +429,7 @@ u64 __init lmb_phys_mem_size(void) | |||
429 | return lmb.memory.size; | 429 | return lmb.memory.size; |
430 | } | 430 | } |
431 | 431 | ||
432 | u64 __init lmb_end_of_DRAM(void) | 432 | u64 lmb_end_of_DRAM(void) |
433 | { | 433 | { |
434 | int idx = lmb.memory.cnt - 1; | 434 | int idx = lmb.memory.cnt - 1; |
435 | 435 | ||
diff --git a/lib/swiotlb.c b/lib/swiotlb.c index bffe6d7ef9d9..ac25cd28e807 100644 --- a/lib/swiotlb.c +++ b/lib/swiotlb.c | |||
@@ -114,46 +114,11 @@ setup_io_tlb_npages(char *str) | |||
114 | __setup("swiotlb=", setup_io_tlb_npages); | 114 | __setup("swiotlb=", setup_io_tlb_npages); |
115 | /* make io_tlb_overflow tunable too? */ | 115 | /* make io_tlb_overflow tunable too? */ |
116 | 116 | ||
117 | void * __weak __init swiotlb_alloc_boot(size_t size, unsigned long nslabs) | 117 | /* Note that this doesn't work with highmem page */ |
118 | { | ||
119 | return alloc_bootmem_low_pages(size); | ||
120 | } | ||
121 | |||
122 | void * __weak swiotlb_alloc(unsigned order, unsigned long nslabs) | ||
123 | { | ||
124 | return (void *)__get_free_pages(GFP_DMA | __GFP_NOWARN, order); | ||
125 | } | ||
126 | |||
127 | dma_addr_t __weak swiotlb_phys_to_bus(struct device *hwdev, phys_addr_t paddr) | ||
128 | { | ||
129 | return paddr; | ||
130 | } | ||
131 | |||
132 | phys_addr_t __weak swiotlb_bus_to_phys(struct device *hwdev, dma_addr_t baddr) | ||
133 | { | ||
134 | return baddr; | ||
135 | } | ||
136 | |||
137 | static dma_addr_t swiotlb_virt_to_bus(struct device *hwdev, | 118 | static dma_addr_t swiotlb_virt_to_bus(struct device *hwdev, |
138 | volatile void *address) | 119 | volatile void *address) |
139 | { | 120 | { |
140 | return swiotlb_phys_to_bus(hwdev, virt_to_phys(address)); | 121 | return phys_to_dma(hwdev, virt_to_phys(address)); |
141 | } | ||
142 | |||
143 | void * __weak swiotlb_bus_to_virt(struct device *hwdev, dma_addr_t address) | ||
144 | { | ||
145 | return phys_to_virt(swiotlb_bus_to_phys(hwdev, address)); | ||
146 | } | ||
147 | |||
148 | int __weak swiotlb_arch_address_needs_mapping(struct device *hwdev, | ||
149 | dma_addr_t addr, size_t size) | ||
150 | { | ||
151 | return !is_buffer_dma_capable(dma_get_mask(hwdev), addr, size); | ||
152 | } | ||
153 | |||
154 | int __weak swiotlb_arch_range_needs_mapping(phys_addr_t paddr, size_t size) | ||
155 | { | ||
156 | return 0; | ||
157 | } | 122 | } |
158 | 123 | ||
159 | static void swiotlb_print_info(unsigned long bytes) | 124 | static void swiotlb_print_info(unsigned long bytes) |
@@ -189,7 +154,7 @@ swiotlb_init_with_default_size(size_t default_size) | |||
189 | /* | 154 | /* |
190 | * Get IO TLB memory from the low pages | 155 | * Get IO TLB memory from the low pages |
191 | */ | 156 | */ |
192 | io_tlb_start = swiotlb_alloc_boot(bytes, io_tlb_nslabs); | 157 | io_tlb_start = alloc_bootmem_low_pages(bytes); |
193 | if (!io_tlb_start) | 158 | if (!io_tlb_start) |
194 | panic("Cannot allocate SWIOTLB buffer"); | 159 | panic("Cannot allocate SWIOTLB buffer"); |
195 | io_tlb_end = io_tlb_start + bytes; | 160 | io_tlb_end = io_tlb_start + bytes; |
@@ -245,7 +210,8 @@ swiotlb_late_init_with_default_size(size_t default_size) | |||
245 | bytes = io_tlb_nslabs << IO_TLB_SHIFT; | 210 | bytes = io_tlb_nslabs << IO_TLB_SHIFT; |
246 | 211 | ||
247 | while ((SLABS_PER_PAGE << order) > IO_TLB_MIN_SLABS) { | 212 | while ((SLABS_PER_PAGE << order) > IO_TLB_MIN_SLABS) { |
248 | io_tlb_start = swiotlb_alloc(order, io_tlb_nslabs); | 213 | io_tlb_start = (void *)__get_free_pages(GFP_DMA | __GFP_NOWARN, |
214 | order); | ||
249 | if (io_tlb_start) | 215 | if (io_tlb_start) |
250 | break; | 216 | break; |
251 | order--; | 217 | order--; |
@@ -315,20 +281,10 @@ cleanup1: | |||
315 | return -ENOMEM; | 281 | return -ENOMEM; |
316 | } | 282 | } |
317 | 283 | ||
318 | static inline int | 284 | static int is_swiotlb_buffer(phys_addr_t paddr) |
319 | address_needs_mapping(struct device *hwdev, dma_addr_t addr, size_t size) | ||
320 | { | 285 | { |
321 | return swiotlb_arch_address_needs_mapping(hwdev, addr, size); | 286 | return paddr >= virt_to_phys(io_tlb_start) && |
322 | } | 287 | paddr < virt_to_phys(io_tlb_end); |
323 | |||
324 | static inline int range_needs_mapping(phys_addr_t paddr, size_t size) | ||
325 | { | ||
326 | return swiotlb_force || swiotlb_arch_range_needs_mapping(paddr, size); | ||
327 | } | ||
328 | |||
329 | static int is_swiotlb_buffer(char *addr) | ||
330 | { | ||
331 | return addr >= io_tlb_start && addr < io_tlb_end; | ||
332 | } | 288 | } |
333 | 289 | ||
334 | /* | 290 | /* |
@@ -561,9 +517,7 @@ swiotlb_alloc_coherent(struct device *hwdev, size_t size, | |||
561 | dma_mask = hwdev->coherent_dma_mask; | 517 | dma_mask = hwdev->coherent_dma_mask; |
562 | 518 | ||
563 | ret = (void *)__get_free_pages(flags, order); | 519 | ret = (void *)__get_free_pages(flags, order); |
564 | if (ret && | 520 | if (ret && swiotlb_virt_to_bus(hwdev, ret) + size > dma_mask) { |
565 | !is_buffer_dma_capable(dma_mask, swiotlb_virt_to_bus(hwdev, ret), | ||
566 | size)) { | ||
567 | /* | 521 | /* |
568 | * The allocated memory isn't reachable by the device. | 522 | * The allocated memory isn't reachable by the device. |
569 | */ | 523 | */ |
@@ -585,7 +539,7 @@ swiotlb_alloc_coherent(struct device *hwdev, size_t size, | |||
585 | dev_addr = swiotlb_virt_to_bus(hwdev, ret); | 539 | dev_addr = swiotlb_virt_to_bus(hwdev, ret); |
586 | 540 | ||
587 | /* Confirm address can be DMA'd by device */ | 541 | /* Confirm address can be DMA'd by device */ |
588 | if (!is_buffer_dma_capable(dma_mask, dev_addr, size)) { | 542 | if (dev_addr + size > dma_mask) { |
589 | printk("hwdev DMA mask = 0x%016Lx, dev_addr = 0x%016Lx\n", | 543 | printk("hwdev DMA mask = 0x%016Lx, dev_addr = 0x%016Lx\n", |
590 | (unsigned long long)dma_mask, | 544 | (unsigned long long)dma_mask, |
591 | (unsigned long long)dev_addr); | 545 | (unsigned long long)dev_addr); |
@@ -601,11 +555,13 @@ EXPORT_SYMBOL(swiotlb_alloc_coherent); | |||
601 | 555 | ||
602 | void | 556 | void |
603 | swiotlb_free_coherent(struct device *hwdev, size_t size, void *vaddr, | 557 | swiotlb_free_coherent(struct device *hwdev, size_t size, void *vaddr, |
604 | dma_addr_t dma_handle) | 558 | dma_addr_t dev_addr) |
605 | { | 559 | { |
560 | phys_addr_t paddr = dma_to_phys(hwdev, dev_addr); | ||
561 | |||
606 | WARN_ON(irqs_disabled()); | 562 | WARN_ON(irqs_disabled()); |
607 | if (!is_swiotlb_buffer(vaddr)) | 563 | if (!is_swiotlb_buffer(paddr)) |
608 | free_pages((unsigned long) vaddr, get_order(size)); | 564 | free_pages((unsigned long)vaddr, get_order(size)); |
609 | else | 565 | else |
610 | /* DMA_TO_DEVICE to avoid memcpy in unmap_single */ | 566 | /* DMA_TO_DEVICE to avoid memcpy in unmap_single */ |
611 | do_unmap_single(hwdev, vaddr, size, DMA_TO_DEVICE); | 567 | do_unmap_single(hwdev, vaddr, size, DMA_TO_DEVICE); |
@@ -625,12 +581,15 @@ swiotlb_full(struct device *dev, size_t size, int dir, int do_panic) | |||
625 | printk(KERN_ERR "DMA: Out of SW-IOMMU space for %zu bytes at " | 581 | printk(KERN_ERR "DMA: Out of SW-IOMMU space for %zu bytes at " |
626 | "device %s\n", size, dev ? dev_name(dev) : "?"); | 582 | "device %s\n", size, dev ? dev_name(dev) : "?"); |
627 | 583 | ||
628 | if (size > io_tlb_overflow && do_panic) { | 584 | if (size <= io_tlb_overflow || !do_panic) |
629 | if (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL) | 585 | return; |
630 | panic("DMA: Memory would be corrupted\n"); | 586 | |
631 | if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL) | 587 | if (dir == DMA_BIDIRECTIONAL) |
632 | panic("DMA: Random memory would be DMAed\n"); | 588 | panic("DMA: Random memory could be DMA accessed\n"); |
633 | } | 589 | if (dir == DMA_FROM_DEVICE) |
590 | panic("DMA: Random memory could be DMA written\n"); | ||
591 | if (dir == DMA_TO_DEVICE) | ||
592 | panic("DMA: Random memory could be DMA read\n"); | ||
634 | } | 593 | } |
635 | 594 | ||
636 | /* | 595 | /* |
@@ -646,7 +605,7 @@ dma_addr_t swiotlb_map_page(struct device *dev, struct page *page, | |||
646 | struct dma_attrs *attrs) | 605 | struct dma_attrs *attrs) |
647 | { | 606 | { |
648 | phys_addr_t phys = page_to_phys(page) + offset; | 607 | phys_addr_t phys = page_to_phys(page) + offset; |
649 | dma_addr_t dev_addr = swiotlb_phys_to_bus(dev, phys); | 608 | dma_addr_t dev_addr = phys_to_dma(dev, phys); |
650 | void *map; | 609 | void *map; |
651 | 610 | ||
652 | BUG_ON(dir == DMA_NONE); | 611 | BUG_ON(dir == DMA_NONE); |
@@ -655,8 +614,7 @@ dma_addr_t swiotlb_map_page(struct device *dev, struct page *page, | |||
655 | * we can safely return the device addr and not worry about bounce | 614 | * we can safely return the device addr and not worry about bounce |
656 | * buffering it. | 615 | * buffering it. |
657 | */ | 616 | */ |
658 | if (!address_needs_mapping(dev, dev_addr, size) && | 617 | if (dma_capable(dev, dev_addr, size) && !swiotlb_force) |
659 | !range_needs_mapping(phys, size)) | ||
660 | return dev_addr; | 618 | return dev_addr; |
661 | 619 | ||
662 | /* | 620 | /* |
@@ -673,7 +631,7 @@ dma_addr_t swiotlb_map_page(struct device *dev, struct page *page, | |||
673 | /* | 631 | /* |
674 | * Ensure that the address returned is DMA'ble | 632 | * Ensure that the address returned is DMA'ble |
675 | */ | 633 | */ |
676 | if (address_needs_mapping(dev, dev_addr, size)) | 634 | if (!dma_capable(dev, dev_addr, size)) |
677 | panic("map_single: bounce buffer is not DMA'ble"); | 635 | panic("map_single: bounce buffer is not DMA'ble"); |
678 | 636 | ||
679 | return dev_addr; | 637 | return dev_addr; |
@@ -691,19 +649,25 @@ EXPORT_SYMBOL_GPL(swiotlb_map_page); | |||
691 | static void unmap_single(struct device *hwdev, dma_addr_t dev_addr, | 649 | static void unmap_single(struct device *hwdev, dma_addr_t dev_addr, |
692 | size_t size, int dir) | 650 | size_t size, int dir) |
693 | { | 651 | { |
694 | char *dma_addr = swiotlb_bus_to_virt(hwdev, dev_addr); | 652 | phys_addr_t paddr = dma_to_phys(hwdev, dev_addr); |
695 | 653 | ||
696 | BUG_ON(dir == DMA_NONE); | 654 | BUG_ON(dir == DMA_NONE); |
697 | 655 | ||
698 | if (is_swiotlb_buffer(dma_addr)) { | 656 | if (is_swiotlb_buffer(paddr)) { |
699 | do_unmap_single(hwdev, dma_addr, size, dir); | 657 | do_unmap_single(hwdev, phys_to_virt(paddr), size, dir); |
700 | return; | 658 | return; |
701 | } | 659 | } |
702 | 660 | ||
703 | if (dir != DMA_FROM_DEVICE) | 661 | if (dir != DMA_FROM_DEVICE) |
704 | return; | 662 | return; |
705 | 663 | ||
706 | dma_mark_clean(dma_addr, size); | 664 | /* |
665 | * phys_to_virt doesn't work with hihgmem page but we could | ||
666 | * call dma_mark_clean() with hihgmem page here. However, we | ||
667 | * are fine since dma_mark_clean() is null on POWERPC. We can | ||
668 | * make dma_mark_clean() take a physical address if necessary. | ||
669 | */ | ||
670 | dma_mark_clean(phys_to_virt(paddr), size); | ||
707 | } | 671 | } |
708 | 672 | ||
709 | void swiotlb_unmap_page(struct device *hwdev, dma_addr_t dev_addr, | 673 | void swiotlb_unmap_page(struct device *hwdev, dma_addr_t dev_addr, |
@@ -728,19 +692,19 @@ static void | |||
728 | swiotlb_sync_single(struct device *hwdev, dma_addr_t dev_addr, | 692 | swiotlb_sync_single(struct device *hwdev, dma_addr_t dev_addr, |
729 | size_t size, int dir, int target) | 693 | size_t size, int dir, int target) |
730 | { | 694 | { |
731 | char *dma_addr = swiotlb_bus_to_virt(hwdev, dev_addr); | 695 | phys_addr_t paddr = dma_to_phys(hwdev, dev_addr); |
732 | 696 | ||
733 | BUG_ON(dir == DMA_NONE); | 697 | BUG_ON(dir == DMA_NONE); |
734 | 698 | ||
735 | if (is_swiotlb_buffer(dma_addr)) { | 699 | if (is_swiotlb_buffer(paddr)) { |
736 | sync_single(hwdev, dma_addr, size, dir, target); | 700 | sync_single(hwdev, phys_to_virt(paddr), size, dir, target); |
737 | return; | 701 | return; |
738 | } | 702 | } |
739 | 703 | ||
740 | if (dir != DMA_FROM_DEVICE) | 704 | if (dir != DMA_FROM_DEVICE) |
741 | return; | 705 | return; |
742 | 706 | ||
743 | dma_mark_clean(dma_addr, size); | 707 | dma_mark_clean(phys_to_virt(paddr), size); |
744 | } | 708 | } |
745 | 709 | ||
746 | void | 710 | void |
@@ -817,10 +781,10 @@ swiotlb_map_sg_attrs(struct device *hwdev, struct scatterlist *sgl, int nelems, | |||
817 | 781 | ||
818 | for_each_sg(sgl, sg, nelems, i) { | 782 | for_each_sg(sgl, sg, nelems, i) { |
819 | phys_addr_t paddr = sg_phys(sg); | 783 | phys_addr_t paddr = sg_phys(sg); |
820 | dma_addr_t dev_addr = swiotlb_phys_to_bus(hwdev, paddr); | 784 | dma_addr_t dev_addr = phys_to_dma(hwdev, paddr); |
821 | 785 | ||
822 | if (range_needs_mapping(paddr, sg->length) || | 786 | if (swiotlb_force || |
823 | address_needs_mapping(hwdev, dev_addr, sg->length)) { | 787 | !dma_capable(hwdev, dev_addr, sg->length)) { |
824 | void *map = map_single(hwdev, sg_phys(sg), | 788 | void *map = map_single(hwdev, sg_phys(sg), |
825 | sg->length, dir); | 789 | sg->length, dir); |
826 | if (!map) { | 790 | if (!map) { |