aboutsummaryrefslogtreecommitdiffstats
path: root/lib
diff options
context:
space:
mode:
authorIngo Molnar <mingo@elte.hu>2008-10-12 06:35:23 -0400
committerIngo Molnar <mingo@elte.hu>2008-10-12 06:37:32 -0400
commit365d46dc9be9b3c833990a06f3994b1987eda578 (patch)
tree9397d1304144a288411f2118707f44ff5e862fa6 /lib
parent5dc64a3442b98eaa0e3730c35fcf00cf962a93e7 (diff)
parentfd048088306656824958e7783ffcee27e241b361 (diff)
Merge branch 'linus' into x86/xen
Conflicts: arch/x86/kernel/cpu/common.c arch/x86/kernel/process_64.c arch/x86/xen/enlighten.c
Diffstat (limited to 'lib')
-rw-r--r--lib/Kconfig.debug48
-rw-r--r--lib/Makefile3
-rw-r--r--lib/iommu-helper.c5
-rw-r--r--lib/klist.c96
-rw-r--r--lib/percpu_counter.c8
-rw-r--r--lib/string_helpers.c64
-rw-r--r--lib/swiotlb.c49
7 files changed, 208 insertions, 65 deletions
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
index 0b504814e378..ce697e0b319e 100644
--- a/lib/Kconfig.debug
+++ b/lib/Kconfig.debug
@@ -597,6 +597,19 @@ config RCU_TORTURE_TEST_RUNNABLE
597 Say N here if you want the RCU torture tests to start only 597 Say N here if you want the RCU torture tests to start only
598 after being manually enabled via /proc. 598 after being manually enabled via /proc.
599 599
600config RCU_CPU_STALL_DETECTOR
601 bool "Check for stalled CPUs delaying RCU grace periods"
602 depends on CLASSIC_RCU
603 default n
604 help
605 This option causes RCU to printk information on which
606 CPUs are delaying the current grace period, but only when
607 the grace period extends for excessive time periods.
608
609 Say Y if you want RCU to perform such checks.
610
611 Say N if you are unsure.
612
600config KPROBES_SANITY_TEST 613config KPROBES_SANITY_TEST
601 bool "Kprobes sanity tests" 614 bool "Kprobes sanity tests"
602 depends on DEBUG_KERNEL 615 depends on DEBUG_KERNEL
@@ -624,6 +637,28 @@ config BACKTRACE_SELF_TEST
624 637
625 Say N if you are unsure. 638 Say N if you are unsure.
626 639
640config DEBUG_BLOCK_EXT_DEVT
641 bool "Force extended block device numbers and spread them"
642 depends on DEBUG_KERNEL
643 depends on BLOCK
644 default n
645 help
646 Conventionally, block device numbers are allocated from
647 predetermined contiguous area. However, extended block area
648 may introduce non-contiguous block device numbers. This
649 option forces most block device numbers to be allocated from
650 the extended space and spreads them to discover kernel or
651 userland code paths which assume predetermined contiguous
652 device number allocation.
653
654 Note that turning on this debug option shuffles all the
655 device numbers for all IDE and SCSI devices including libata
656 ones, so root partition specified using device number
657 directly (via rdev or root=MAJ:MIN) won't work anymore.
658 Textual device names (root=/dev/sdXn) will continue to work.
659
660 Say N if you are unsure.
661
627config LKDTM 662config LKDTM
628 tristate "Linux Kernel Dump Test Tool Module" 663 tristate "Linux Kernel Dump Test Tool Module"
629 depends on DEBUG_KERNEL 664 depends on DEBUG_KERNEL
@@ -661,10 +696,21 @@ config FAIL_PAGE_ALLOC
661 696
662config FAIL_MAKE_REQUEST 697config FAIL_MAKE_REQUEST
663 bool "Fault-injection capability for disk IO" 698 bool "Fault-injection capability for disk IO"
664 depends on FAULT_INJECTION 699 depends on FAULT_INJECTION && BLOCK
665 help 700 help
666 Provide fault-injection capability for disk IO. 701 Provide fault-injection capability for disk IO.
667 702
703config FAIL_IO_TIMEOUT
704 bool "Faul-injection capability for faking disk interrupts"
705 depends on FAULT_INJECTION && BLOCK
706 help
707 Provide fault-injection capability on end IO handling. This
708 will make the block layer "forget" an interrupt as configured,
709 thus exercising the error handling.
710
711 Only works with drivers that use the generic timeout handling,
712 for others it wont do anything.
713
668config FAULT_INJECTION_DEBUG_FS 714config FAULT_INJECTION_DEBUG_FS
669 bool "Debugfs entries for fault-injection capabilities" 715 bool "Debugfs entries for fault-injection capabilities"
670 depends on FAULT_INJECTION && SYSFS && DEBUG_FS 716 depends on FAULT_INJECTION && SYSFS && DEBUG_FS
diff --git a/lib/Makefile b/lib/Makefile
index 3b1f94bbe9de..44001af76a7d 100644
--- a/lib/Makefile
+++ b/lib/Makefile
@@ -19,7 +19,8 @@ lib-$(CONFIG_SMP) += cpumask.o
19lib-y += kobject.o kref.o klist.o 19lib-y += kobject.o kref.o klist.o
20 20
21obj-y += bcd.o div64.o sort.o parser.o halfmd4.o debug_locks.o random32.o \ 21obj-y += bcd.o div64.o sort.o parser.o halfmd4.o debug_locks.o random32.o \
22 bust_spinlocks.o hexdump.o kasprintf.o bitmap.o scatterlist.o 22 bust_spinlocks.o hexdump.o kasprintf.o bitmap.o scatterlist.o \
23 string_helpers.o
23 24
24ifeq ($(CONFIG_DEBUG_KOBJECT),y) 25ifeq ($(CONFIG_DEBUG_KOBJECT),y)
25CFLAGS_kobject.o += -DDEBUG 26CFLAGS_kobject.o += -DDEBUG
diff --git a/lib/iommu-helper.c b/lib/iommu-helper.c
index a3b8d4c3f77a..5d90074dca75 100644
--- a/lib/iommu-helper.c
+++ b/lib/iommu-helper.c
@@ -30,8 +30,7 @@ again:
30 return index; 30 return index;
31} 31}
32 32
33static inline void set_bit_area(unsigned long *map, unsigned long i, 33void iommu_area_reserve(unsigned long *map, unsigned long i, int len)
34 int len)
35{ 34{
36 unsigned long end = i + len; 35 unsigned long end = i + len;
37 while (i < end) { 36 while (i < end) {
@@ -64,7 +63,7 @@ again:
64 start = index + 1; 63 start = index + 1;
65 goto again; 64 goto again;
66 } 65 }
67 set_bit_area(map, index, nr); 66 iommu_area_reserve(map, index, nr);
68 } 67 }
69 return index; 68 return index;
70} 69}
diff --git a/lib/klist.c b/lib/klist.c
index cca37f96faa2..bbdd3015c2c7 100644
--- a/lib/klist.c
+++ b/lib/klist.c
@@ -37,6 +37,37 @@
37#include <linux/klist.h> 37#include <linux/klist.h>
38#include <linux/module.h> 38#include <linux/module.h>
39 39
40/*
41 * Use the lowest bit of n_klist to mark deleted nodes and exclude
42 * dead ones from iteration.
43 */
44#define KNODE_DEAD 1LU
45#define KNODE_KLIST_MASK ~KNODE_DEAD
46
47static struct klist *knode_klist(struct klist_node *knode)
48{
49 return (struct klist *)
50 ((unsigned long)knode->n_klist & KNODE_KLIST_MASK);
51}
52
53static bool knode_dead(struct klist_node *knode)
54{
55 return (unsigned long)knode->n_klist & KNODE_DEAD;
56}
57
58static void knode_set_klist(struct klist_node *knode, struct klist *klist)
59{
60 knode->n_klist = klist;
61 /* no knode deserves to start its life dead */
62 WARN_ON(knode_dead(knode));
63}
64
65static void knode_kill(struct klist_node *knode)
66{
67 /* and no knode should die twice ever either, see we're very humane */
68 WARN_ON(knode_dead(knode));
69 *(unsigned long *)&knode->n_klist |= KNODE_DEAD;
70}
40 71
41/** 72/**
42 * klist_init - Initialize a klist structure. 73 * klist_init - Initialize a klist structure.
@@ -79,7 +110,7 @@ static void klist_node_init(struct klist *k, struct klist_node *n)
79 INIT_LIST_HEAD(&n->n_node); 110 INIT_LIST_HEAD(&n->n_node);
80 init_completion(&n->n_removed); 111 init_completion(&n->n_removed);
81 kref_init(&n->n_ref); 112 kref_init(&n->n_ref);
82 n->n_klist = k; 113 knode_set_klist(n, k);
83 if (k->get) 114 if (k->get)
84 k->get(n); 115 k->get(n);
85} 116}
@@ -115,7 +146,7 @@ EXPORT_SYMBOL_GPL(klist_add_tail);
115 */ 146 */
116void klist_add_after(struct klist_node *n, struct klist_node *pos) 147void klist_add_after(struct klist_node *n, struct klist_node *pos)
117{ 148{
118 struct klist *k = pos->n_klist; 149 struct klist *k = knode_klist(pos);
119 150
120 klist_node_init(k, n); 151 klist_node_init(k, n);
121 spin_lock(&k->k_lock); 152 spin_lock(&k->k_lock);
@@ -131,7 +162,7 @@ EXPORT_SYMBOL_GPL(klist_add_after);
131 */ 162 */
132void klist_add_before(struct klist_node *n, struct klist_node *pos) 163void klist_add_before(struct klist_node *n, struct klist_node *pos)
133{ 164{
134 struct klist *k = pos->n_klist; 165 struct klist *k = knode_klist(pos);
135 166
136 klist_node_init(k, n); 167 klist_node_init(k, n);
137 spin_lock(&k->k_lock); 168 spin_lock(&k->k_lock);
@@ -144,9 +175,10 @@ static void klist_release(struct kref *kref)
144{ 175{
145 struct klist_node *n = container_of(kref, struct klist_node, n_ref); 176 struct klist_node *n = container_of(kref, struct klist_node, n_ref);
146 177
178 WARN_ON(!knode_dead(n));
147 list_del(&n->n_node); 179 list_del(&n->n_node);
148 complete(&n->n_removed); 180 complete(&n->n_removed);
149 n->n_klist = NULL; 181 knode_set_klist(n, NULL);
150} 182}
151 183
152static int klist_dec_and_del(struct klist_node *n) 184static int klist_dec_and_del(struct klist_node *n)
@@ -154,22 +186,29 @@ static int klist_dec_and_del(struct klist_node *n)
154 return kref_put(&n->n_ref, klist_release); 186 return kref_put(&n->n_ref, klist_release);
155} 187}
156 188
157/** 189static void klist_put(struct klist_node *n, bool kill)
158 * klist_del - Decrement the reference count of node and try to remove.
159 * @n: node we're deleting.
160 */
161void klist_del(struct klist_node *n)
162{ 190{
163 struct klist *k = n->n_klist; 191 struct klist *k = knode_klist(n);
164 void (*put)(struct klist_node *) = k->put; 192 void (*put)(struct klist_node *) = k->put;
165 193
166 spin_lock(&k->k_lock); 194 spin_lock(&k->k_lock);
195 if (kill)
196 knode_kill(n);
167 if (!klist_dec_and_del(n)) 197 if (!klist_dec_and_del(n))
168 put = NULL; 198 put = NULL;
169 spin_unlock(&k->k_lock); 199 spin_unlock(&k->k_lock);
170 if (put) 200 if (put)
171 put(n); 201 put(n);
172} 202}
203
204/**
205 * klist_del - Decrement the reference count of node and try to remove.
206 * @n: node we're deleting.
207 */
208void klist_del(struct klist_node *n)
209{
210 klist_put(n, true);
211}
173EXPORT_SYMBOL_GPL(klist_del); 212EXPORT_SYMBOL_GPL(klist_del);
174 213
175/** 214/**
@@ -206,7 +245,6 @@ void klist_iter_init_node(struct klist *k, struct klist_iter *i,
206 struct klist_node *n) 245 struct klist_node *n)
207{ 246{
208 i->i_klist = k; 247 i->i_klist = k;
209 i->i_head = &k->k_list;
210 i->i_cur = n; 248 i->i_cur = n;
211 if (n) 249 if (n)
212 kref_get(&n->n_ref); 250 kref_get(&n->n_ref);
@@ -237,7 +275,7 @@ EXPORT_SYMBOL_GPL(klist_iter_init);
237void klist_iter_exit(struct klist_iter *i) 275void klist_iter_exit(struct klist_iter *i)
238{ 276{
239 if (i->i_cur) { 277 if (i->i_cur) {
240 klist_del(i->i_cur); 278 klist_put(i->i_cur, false);
241 i->i_cur = NULL; 279 i->i_cur = NULL;
242 } 280 }
243} 281}
@@ -258,27 +296,33 @@ static struct klist_node *to_klist_node(struct list_head *n)
258 */ 296 */
259struct klist_node *klist_next(struct klist_iter *i) 297struct klist_node *klist_next(struct klist_iter *i)
260{ 298{
261 struct list_head *next;
262 struct klist_node *lnode = i->i_cur;
263 struct klist_node *knode = NULL;
264 void (*put)(struct klist_node *) = i->i_klist->put; 299 void (*put)(struct klist_node *) = i->i_klist->put;
300 struct klist_node *last = i->i_cur;
301 struct klist_node *next;
265 302
266 spin_lock(&i->i_klist->k_lock); 303 spin_lock(&i->i_klist->k_lock);
267 if (lnode) { 304
268 next = lnode->n_node.next; 305 if (last) {
269 if (!klist_dec_and_del(lnode)) 306 next = to_klist_node(last->n_node.next);
307 if (!klist_dec_and_del(last))
270 put = NULL; 308 put = NULL;
271 } else 309 } else
272 next = i->i_head->next; 310 next = to_klist_node(i->i_klist->k_list.next);
273 311
274 if (next != i->i_head) { 312 i->i_cur = NULL;
275 knode = to_klist_node(next); 313 while (next != to_klist_node(&i->i_klist->k_list)) {
276 kref_get(&knode->n_ref); 314 if (likely(!knode_dead(next))) {
315 kref_get(&next->n_ref);
316 i->i_cur = next;
317 break;
318 }
319 next = to_klist_node(next->n_node.next);
277 } 320 }
278 i->i_cur = knode; 321
279 spin_unlock(&i->i_klist->k_lock); 322 spin_unlock(&i->i_klist->k_lock);
280 if (put && lnode) 323
281 put(lnode); 324 if (put && last)
282 return knode; 325 put(last);
326 return i->i_cur;
283} 327}
284EXPORT_SYMBOL_GPL(klist_next); 328EXPORT_SYMBOL_GPL(klist_next);
diff --git a/lib/percpu_counter.c b/lib/percpu_counter.c
index 4a8ba4bf5f6f..a8663890a88c 100644
--- a/lib/percpu_counter.c
+++ b/lib/percpu_counter.c
@@ -52,7 +52,7 @@ EXPORT_SYMBOL(__percpu_counter_add);
52 * Add up all the per-cpu counts, return the result. This is a more accurate 52 * Add up all the per-cpu counts, return the result. This is a more accurate
53 * but much slower version of percpu_counter_read_positive() 53 * but much slower version of percpu_counter_read_positive()
54 */ 54 */
55s64 __percpu_counter_sum(struct percpu_counter *fbc, int set) 55s64 __percpu_counter_sum(struct percpu_counter *fbc)
56{ 56{
57 s64 ret; 57 s64 ret;
58 int cpu; 58 int cpu;
@@ -62,11 +62,9 @@ s64 __percpu_counter_sum(struct percpu_counter *fbc, int set)
62 for_each_online_cpu(cpu) { 62 for_each_online_cpu(cpu) {
63 s32 *pcount = per_cpu_ptr(fbc->counters, cpu); 63 s32 *pcount = per_cpu_ptr(fbc->counters, cpu);
64 ret += *pcount; 64 ret += *pcount;
65 if (set) 65 *pcount = 0;
66 *pcount = 0;
67 } 66 }
68 if (set) 67 fbc->count = ret;
69 fbc->count = ret;
70 68
71 spin_unlock(&fbc->lock); 69 spin_unlock(&fbc->lock);
72 return ret; 70 return ret;
diff --git a/lib/string_helpers.c b/lib/string_helpers.c
new file mode 100644
index 000000000000..8347925030ff
--- /dev/null
+++ b/lib/string_helpers.c
@@ -0,0 +1,64 @@
1/*
2 * Helpers for formatting and printing strings
3 *
4 * Copyright 31 August 2008 James Bottomley
5 */
6#include <linux/kernel.h>
7#include <linux/math64.h>
8#include <linux/module.h>
9#include <linux/string_helpers.h>
10
11/**
12 * string_get_size - get the size in the specified units
13 * @size: The size to be converted
14 * @units: units to use (powers of 1000 or 1024)
15 * @buf: buffer to format to
16 * @len: length of buffer
17 *
18 * This function returns a string formatted to 3 significant figures
19 * giving the size in the required units. Returns 0 on success or
20 * error on failure. @buf is always zero terminated.
21 *
22 */
23int string_get_size(u64 size, const enum string_size_units units,
24 char *buf, int len)
25{
26 const char *units_10[] = { "B", "KB", "MB", "GB", "TB", "PB",
27 "EB", "ZB", "YB", NULL};
28 const char *units_2[] = {"B", "KiB", "MiB", "GiB", "TiB", "PiB",
29 "EiB", "ZiB", "YiB", NULL };
30 const char **units_str[] = {
31 [STRING_UNITS_10] = units_10,
32 [STRING_UNITS_2] = units_2,
33 };
34 const int divisor[] = {
35 [STRING_UNITS_10] = 1000,
36 [STRING_UNITS_2] = 1024,
37 };
38 int i, j;
39 u64 remainder = 0, sf_cap;
40 char tmp[8];
41
42 tmp[0] = '\0';
43
44 for (i = 0; size > divisor[units] && units_str[units][i]; i++)
45 remainder = do_div(size, divisor[units]);
46
47 sf_cap = size;
48 for (j = 0; sf_cap*10 < 1000; j++)
49 sf_cap *= 10;
50
51 if (j) {
52 remainder *= 1000;
53 do_div(remainder, divisor[units]);
54 snprintf(tmp, sizeof(tmp), ".%03lld",
55 (unsigned long long)remainder);
56 tmp[j+1] = '\0';
57 }
58
59 snprintf(buf, len, "%lld%s%s", (unsigned long long)size,
60 tmp, units_str[units][i]);
61
62 return 0;
63}
64EXPORT_SYMBOL(string_get_size);
diff --git a/lib/swiotlb.c b/lib/swiotlb.c
index 8826fdf0f180..f8eebd489149 100644
--- a/lib/swiotlb.c
+++ b/lib/swiotlb.c
@@ -274,13 +274,14 @@ cleanup1:
274} 274}
275 275
276static int 276static int
277address_needs_mapping(struct device *hwdev, dma_addr_t addr) 277address_needs_mapping(struct device *hwdev, dma_addr_t addr, size_t size)
278{ 278{
279 dma_addr_t mask = 0xffffffff; 279 return !is_buffer_dma_capable(dma_get_mask(hwdev), addr, size);
280 /* If the device has a mask, use it, otherwise default to 32 bits */ 280}
281 if (hwdev && hwdev->dma_mask) 281
282 mask = *hwdev->dma_mask; 282static int is_swiotlb_buffer(char *addr)
283 return (addr & ~mask) != 0; 283{
284 return addr >= io_tlb_start && addr < io_tlb_end;
284} 285}
285 286
286/* 287/*
@@ -467,15 +468,8 @@ swiotlb_alloc_coherent(struct device *hwdev, size_t size,
467 void *ret; 468 void *ret;
468 int order = get_order(size); 469 int order = get_order(size);
469 470
470 /*
471 * XXX fix me: the DMA API should pass us an explicit DMA mask
472 * instead, or use ZONE_DMA32 (ia64 overloads ZONE_DMA to be a ~32
473 * bit range instead of a 16MB one).
474 */
475 flags |= GFP_DMA;
476
477 ret = (void *)__get_free_pages(flags, order); 471 ret = (void *)__get_free_pages(flags, order);
478 if (ret && address_needs_mapping(hwdev, virt_to_bus(ret))) { 472 if (ret && address_needs_mapping(hwdev, virt_to_bus(ret), size)) {
479 /* 473 /*
480 * The allocated memory isn't reachable by the device. 474 * The allocated memory isn't reachable by the device.
481 * Fall back on swiotlb_map_single(). 475 * Fall back on swiotlb_map_single().
@@ -490,19 +484,16 @@ swiotlb_alloc_coherent(struct device *hwdev, size_t size,
490 * swiotlb_map_single(), which will grab memory from 484 * swiotlb_map_single(), which will grab memory from
491 * the lowest available address range. 485 * the lowest available address range.
492 */ 486 */
493 dma_addr_t handle; 487 ret = map_single(hwdev, NULL, size, DMA_FROM_DEVICE);
494 handle = swiotlb_map_single(hwdev, NULL, size, DMA_FROM_DEVICE); 488 if (!ret)
495 if (swiotlb_dma_mapping_error(hwdev, handle))
496 return NULL; 489 return NULL;
497
498 ret = bus_to_virt(handle);
499 } 490 }
500 491
501 memset(ret, 0, size); 492 memset(ret, 0, size);
502 dev_addr = virt_to_bus(ret); 493 dev_addr = virt_to_bus(ret);
503 494
504 /* Confirm address can be DMA'd by device */ 495 /* Confirm address can be DMA'd by device */
505 if (address_needs_mapping(hwdev, dev_addr)) { 496 if (address_needs_mapping(hwdev, dev_addr, size)) {
506 printk("hwdev DMA mask = 0x%016Lx, dev_addr = 0x%016Lx\n", 497 printk("hwdev DMA mask = 0x%016Lx, dev_addr = 0x%016Lx\n",
507 (unsigned long long)*hwdev->dma_mask, 498 (unsigned long long)*hwdev->dma_mask,
508 (unsigned long long)dev_addr); 499 (unsigned long long)dev_addr);
@@ -518,12 +509,11 @@ swiotlb_free_coherent(struct device *hwdev, size_t size, void *vaddr,
518 dma_addr_t dma_handle) 509 dma_addr_t dma_handle)
519{ 510{
520 WARN_ON(irqs_disabled()); 511 WARN_ON(irqs_disabled());
521 if (!(vaddr >= (void *)io_tlb_start 512 if (!is_swiotlb_buffer(vaddr))
522 && vaddr < (void *)io_tlb_end))
523 free_pages((unsigned long) vaddr, get_order(size)); 513 free_pages((unsigned long) vaddr, get_order(size));
524 else 514 else
525 /* DMA_TO_DEVICE to avoid memcpy in unmap_single */ 515 /* DMA_TO_DEVICE to avoid memcpy in unmap_single */
526 swiotlb_unmap_single (hwdev, dma_handle, size, DMA_TO_DEVICE); 516 unmap_single(hwdev, vaddr, size, DMA_TO_DEVICE);
527} 517}
528 518
529static void 519static void
@@ -567,7 +557,7 @@ swiotlb_map_single_attrs(struct device *hwdev, void *ptr, size_t size,
567 * we can safely return the device addr and not worry about bounce 557 * we can safely return the device addr and not worry about bounce
568 * buffering it. 558 * buffering it.
569 */ 559 */
570 if (!address_needs_mapping(hwdev, dev_addr) && !swiotlb_force) 560 if (!address_needs_mapping(hwdev, dev_addr, size) && !swiotlb_force)
571 return dev_addr; 561 return dev_addr;
572 562
573 /* 563 /*
@@ -584,7 +574,7 @@ swiotlb_map_single_attrs(struct device *hwdev, void *ptr, size_t size,
584 /* 574 /*
585 * Ensure that the address returned is DMA'ble 575 * Ensure that the address returned is DMA'ble
586 */ 576 */
587 if (address_needs_mapping(hwdev, dev_addr)) 577 if (address_needs_mapping(hwdev, dev_addr, size))
588 panic("map_single: bounce buffer is not DMA'ble"); 578 panic("map_single: bounce buffer is not DMA'ble");
589 579
590 return dev_addr; 580 return dev_addr;
@@ -612,7 +602,7 @@ swiotlb_unmap_single_attrs(struct device *hwdev, dma_addr_t dev_addr,
612 char *dma_addr = bus_to_virt(dev_addr); 602 char *dma_addr = bus_to_virt(dev_addr);
613 603
614 BUG_ON(dir == DMA_NONE); 604 BUG_ON(dir == DMA_NONE);
615 if (dma_addr >= io_tlb_start && dma_addr < io_tlb_end) 605 if (is_swiotlb_buffer(dma_addr))
616 unmap_single(hwdev, dma_addr, size, dir); 606 unmap_single(hwdev, dma_addr, size, dir);
617 else if (dir == DMA_FROM_DEVICE) 607 else if (dir == DMA_FROM_DEVICE)
618 dma_mark_clean(dma_addr, size); 608 dma_mark_clean(dma_addr, size);
@@ -642,7 +632,7 @@ swiotlb_sync_single(struct device *hwdev, dma_addr_t dev_addr,
642 char *dma_addr = bus_to_virt(dev_addr); 632 char *dma_addr = bus_to_virt(dev_addr);
643 633
644 BUG_ON(dir == DMA_NONE); 634 BUG_ON(dir == DMA_NONE);
645 if (dma_addr >= io_tlb_start && dma_addr < io_tlb_end) 635 if (is_swiotlb_buffer(dma_addr))
646 sync_single(hwdev, dma_addr, size, dir, target); 636 sync_single(hwdev, dma_addr, size, dir, target);
647 else if (dir == DMA_FROM_DEVICE) 637 else if (dir == DMA_FROM_DEVICE)
648 dma_mark_clean(dma_addr, size); 638 dma_mark_clean(dma_addr, size);
@@ -673,7 +663,7 @@ swiotlb_sync_single_range(struct device *hwdev, dma_addr_t dev_addr,
673 char *dma_addr = bus_to_virt(dev_addr) + offset; 663 char *dma_addr = bus_to_virt(dev_addr) + offset;
674 664
675 BUG_ON(dir == DMA_NONE); 665 BUG_ON(dir == DMA_NONE);
676 if (dma_addr >= io_tlb_start && dma_addr < io_tlb_end) 666 if (is_swiotlb_buffer(dma_addr))
677 sync_single(hwdev, dma_addr, size, dir, target); 667 sync_single(hwdev, dma_addr, size, dir, target);
678 else if (dir == DMA_FROM_DEVICE) 668 else if (dir == DMA_FROM_DEVICE)
679 dma_mark_clean(dma_addr, size); 669 dma_mark_clean(dma_addr, size);
@@ -727,7 +717,8 @@ swiotlb_map_sg_attrs(struct device *hwdev, struct scatterlist *sgl, int nelems,
727 for_each_sg(sgl, sg, nelems, i) { 717 for_each_sg(sgl, sg, nelems, i) {
728 addr = SG_ENT_VIRT_ADDRESS(sg); 718 addr = SG_ENT_VIRT_ADDRESS(sg);
729 dev_addr = virt_to_bus(addr); 719 dev_addr = virt_to_bus(addr);
730 if (swiotlb_force || address_needs_mapping(hwdev, dev_addr)) { 720 if (swiotlb_force ||
721 address_needs_mapping(hwdev, dev_addr, sg->length)) {
731 void *map = map_single(hwdev, addr, sg->length, dir); 722 void *map = map_single(hwdev, addr, sg->length, dir);
732 if (!map) { 723 if (!map) {
733 /* Don't panic here, we expect map_sg users 724 /* Don't panic here, we expect map_sg users