aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2013-11-14 23:34:37 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2013-11-14 23:34:37 -0500
commiteda670c626a4f53eb8ac5f20d8c10d3f0b54c583 (patch)
treee8b31fdeddd520b0fc56483f0a33c0501ee3b692 /drivers
parentb746f9c7941f227ad582b4f0bc981f3adcbc46b2 (diff)
parent18c51e1a3fabb455ff1f5cd610097d89f577b8f7 (diff)
Merge tag 'stable/for-linus-3.13-rc0-tag' of git://git.kernel.org/pub/scm/linux/kernel/git/xen/tip
Pull Xen updates from Konrad Rzeszutek Wilk: "This has tons of fixes and two major features which are concentrated around the Xen SWIOTLB library. The short <blurb> is that the tracing facility (just one function) has been added to SWIOTLB to make it easier to track I/O progress. Additionally under Xen and ARM (32 & 64) the Xen-SWIOTLB driver "is used to translate physical to machine and machine to physical addresses of foreign[guest] pages for DMA operations" (Stefano) when booting under hardware without proper IOMMU. There are also bug-fixes, cleanups, compile warning fixes, etc. The commit times for some of the commits is a bit fresh - that is b/c we wanted to make sure we have the Ack's from the ARM folks - which with the string of back-to-back conferences took a bit of time. Rest assured - the code has been stewing in #linux-next for some time. Features: - SWIOTLB has tracing added when doing bounce buffer. - Xen ARM/ARM64 can use Xen-SWIOTLB. This work allows Linux to safely program real devices for DMA operations when running as a guest on Xen on ARM, without IOMMU support. [*1] - xen_raw_printk works with PVHVM guests if needed. Bug-fixes: - Make memory ballooning work under HVM with large MMIO region. - Inform hypervisor of MCFG regions found in ACPI DSDT. - Remove deprecated IRQF_DISABLED. - Remove deprecated __cpuinit. [*1]: "On arm and arm64 all Xen guests, including dom0, run with second stage translation enabled. As a consequence when dom0 programs a device for a DMA operation is going to use (pseudo) physical addresses instead machine addresses. This work introduces two trees to track physical to machine and machine to physical mappings of foreign pages. Local pages are assumed mapped 1:1 (physical address == machine address). It enables the SWIOTLB-Xen driver on ARM and ARM64, so that Linux can translate physical addresses to machine addresses for dma operations when necessary. " (Stefano)" * tag 'stable/for-linus-3.13-rc0-tag' of git://git.kernel.org/pub/scm/linux/kernel/git/xen/tip: (32 commits) xen/arm: pfn_to_mfn and mfn_to_pfn return the argument if nothing is in the p2m arm,arm64/include/asm/io.h: define struct bio_vec swiotlb-xen: missing include dma-direction.h pci-swiotlb-xen: call pci_request_acs only ifdef CONFIG_PCI arm: make SWIOTLB available xen: delete new instances of added __cpuinit xen/balloon: Set balloon's initial state to number of existing RAM pages xen/mcfg: Call PHYSDEVOP_pci_mmcfg_reserved for MCFG areas. xen: remove deprecated IRQF_DISABLED x86/xen: remove deprecated IRQF_DISABLED swiotlb-xen: fix error code returned by xen_swiotlb_map_sg_attrs swiotlb-xen: static inline xen_phys_to_bus, xen_bus_to_phys, xen_virt_to_bus and range_straddles_page_boundary grant-table: call set_phys_to_machine after mapping grant refs arm,arm64: do not always merge biovec if we are running on Xen swiotlb: print a warning when the swiotlb is full swiotlb-xen: use xen_dma_map/unmap_page, xen_dma_sync_single_for_cpu/device xen: introduce xen_dma_map/unmap_page and xen_dma_sync_single_for_cpu/device tracing/events: Fix swiotlb tracepoint creation swiotlb-xen: use xen_alloc/free_coherent_pages xen: introduce xen_alloc/free_coherent_pages ...
Diffstat (limited to 'drivers')
-rw-r--r--drivers/tty/hvc/hvc_xen.c19
-rw-r--r--drivers/xen/Kconfig1
-rw-r--r--drivers/xen/balloon.c6
-rw-r--r--drivers/xen/evtchn.c2
-rw-r--r--drivers/xen/grant-table.c19
-rw-r--r--drivers/xen/pci.c47
-rw-r--r--drivers/xen/platform-pci.c2
-rw-r--r--drivers/xen/swiotlb-xen.c119
8 files changed, 179 insertions, 36 deletions
diff --git a/drivers/tty/hvc/hvc_xen.c b/drivers/tty/hvc/hvc_xen.c
index c193af6a628f..636c9baad7a5 100644
--- a/drivers/tty/hvc/hvc_xen.c
+++ b/drivers/tty/hvc/hvc_xen.c
@@ -183,7 +183,7 @@ static int dom0_write_console(uint32_t vtermno, const char *str, int len)
183{ 183{
184 int rc = HYPERVISOR_console_io(CONSOLEIO_write, len, (char *)str); 184 int rc = HYPERVISOR_console_io(CONSOLEIO_write, len, (char *)str);
185 if (rc < 0) 185 if (rc < 0)
186 return 0; 186 return rc;
187 187
188 return len; 188 return len;
189} 189}
@@ -642,7 +642,22 @@ struct console xenboot_console = {
642 642
643void xen_raw_console_write(const char *str) 643void xen_raw_console_write(const char *str)
644{ 644{
645 dom0_write_console(0, str, strlen(str)); 645 ssize_t len = strlen(str);
646 int rc = 0;
647
648 if (xen_domain()) {
649 rc = dom0_write_console(0, str, len);
650#ifdef CONFIG_X86
651 if (rc == -ENOSYS && xen_hvm_domain())
652 goto outb_print;
653
654 } else if (xen_cpuid_base()) {
655 int i;
656outb_print:
657 for (i = 0; i < len; i++)
658 outb(str[i], 0xe9);
659#endif
660 }
646} 661}
647 662
648void xen_raw_printk(const char *fmt, ...) 663void xen_raw_printk(const char *fmt, ...)
diff --git a/drivers/xen/Kconfig b/drivers/xen/Kconfig
index 23eae5cb69c2..c794ea182140 100644
--- a/drivers/xen/Kconfig
+++ b/drivers/xen/Kconfig
@@ -140,7 +140,6 @@ config XEN_GRANT_DEV_ALLOC
140 140
141config SWIOTLB_XEN 141config SWIOTLB_XEN
142 def_bool y 142 def_bool y
143 depends on PCI && X86
144 select SWIOTLB 143 select SWIOTLB
145 144
146config XEN_TMEM 145config XEN_TMEM
diff --git a/drivers/xen/balloon.c b/drivers/xen/balloon.c
index b232908a6192..55ea73f7c70b 100644
--- a/drivers/xen/balloon.c
+++ b/drivers/xen/balloon.c
@@ -596,7 +596,7 @@ static void __init balloon_add_region(unsigned long start_pfn,
596 } 596 }
597} 597}
598 598
599static int __cpuinit balloon_cpu_notify(struct notifier_block *self, 599static int balloon_cpu_notify(struct notifier_block *self,
600 unsigned long action, void *hcpu) 600 unsigned long action, void *hcpu)
601{ 601{
602 int cpu = (long)hcpu; 602 int cpu = (long)hcpu;
@@ -616,7 +616,7 @@ static int __cpuinit balloon_cpu_notify(struct notifier_block *self,
616 return NOTIFY_OK; 616 return NOTIFY_OK;
617} 617}
618 618
619static struct notifier_block balloon_cpu_notifier __cpuinitdata = { 619static struct notifier_block balloon_cpu_notifier = {
620 .notifier_call = balloon_cpu_notify, 620 .notifier_call = balloon_cpu_notify,
621}; 621};
622 622
@@ -641,7 +641,7 @@ static int __init balloon_init(void)
641 641
642 balloon_stats.current_pages = xen_pv_domain() 642 balloon_stats.current_pages = xen_pv_domain()
643 ? min(xen_start_info->nr_pages - xen_released_pages, max_pfn) 643 ? min(xen_start_info->nr_pages - xen_released_pages, max_pfn)
644 : max_pfn; 644 : get_num_physpages();
645 balloon_stats.target_pages = balloon_stats.current_pages; 645 balloon_stats.target_pages = balloon_stats.current_pages;
646 balloon_stats.balloon_low = 0; 646 balloon_stats.balloon_low = 0;
647 balloon_stats.balloon_high = 0; 647 balloon_stats.balloon_high = 0;
diff --git a/drivers/xen/evtchn.c b/drivers/xen/evtchn.c
index 8b3a69a06c39..5de2063e16d3 100644
--- a/drivers/xen/evtchn.c
+++ b/drivers/xen/evtchn.c
@@ -305,7 +305,7 @@ static int evtchn_bind_to_user(struct per_user_data *u, int port)
305 if (rc < 0) 305 if (rc < 0)
306 goto err; 306 goto err;
307 307
308 rc = bind_evtchn_to_irqhandler(port, evtchn_interrupt, IRQF_DISABLED, 308 rc = bind_evtchn_to_irqhandler(port, evtchn_interrupt, 0,
309 u->name, evtchn); 309 u->name, evtchn);
310 if (rc < 0) 310 if (rc < 0)
311 goto err; 311 goto err;
diff --git a/drivers/xen/grant-table.c b/drivers/xen/grant-table.c
index c4d2298893b1..62ccf5424ba8 100644
--- a/drivers/xen/grant-table.c
+++ b/drivers/xen/grant-table.c
@@ -49,6 +49,7 @@
49#include <xen/grant_table.h> 49#include <xen/grant_table.h>
50#include <xen/interface/memory.h> 50#include <xen/interface/memory.h>
51#include <xen/hvc-console.h> 51#include <xen/hvc-console.h>
52#include <xen/swiotlb-xen.h>
52#include <asm/xen/hypercall.h> 53#include <asm/xen/hypercall.h>
53#include <asm/xen/interface.h> 54#include <asm/xen/interface.h>
54 55
@@ -898,8 +899,16 @@ int gnttab_map_refs(struct gnttab_map_grant_ref *map_ops,
898 gnttab_retry_eagain_gop(GNTTABOP_map_grant_ref, map_ops + i, 899 gnttab_retry_eagain_gop(GNTTABOP_map_grant_ref, map_ops + i,
899 &map_ops[i].status, __func__); 900 &map_ops[i].status, __func__);
900 901
901 if (xen_feature(XENFEAT_auto_translated_physmap)) 902 /* this is basically a nop on x86 */
903 if (xen_feature(XENFEAT_auto_translated_physmap)) {
904 for (i = 0; i < count; i++) {
905 if (map_ops[i].status)
906 continue;
907 set_phys_to_machine(map_ops[i].host_addr >> PAGE_SHIFT,
908 map_ops[i].dev_bus_addr >> PAGE_SHIFT);
909 }
902 return ret; 910 return ret;
911 }
903 912
904 if (!in_interrupt() && paravirt_get_lazy_mode() == PARAVIRT_LAZY_NONE) { 913 if (!in_interrupt() && paravirt_get_lazy_mode() == PARAVIRT_LAZY_NONE) {
905 arch_enter_lazy_mmu_mode(); 914 arch_enter_lazy_mmu_mode();
@@ -942,8 +951,14 @@ int gnttab_unmap_refs(struct gnttab_unmap_grant_ref *unmap_ops,
942 if (ret) 951 if (ret)
943 return ret; 952 return ret;
944 953
945 if (xen_feature(XENFEAT_auto_translated_physmap)) 954 /* this is basically a nop on x86 */
955 if (xen_feature(XENFEAT_auto_translated_physmap)) {
956 for (i = 0; i < count; i++) {
957 set_phys_to_machine(unmap_ops[i].host_addr >> PAGE_SHIFT,
958 INVALID_P2M_ENTRY);
959 }
946 return ret; 960 return ret;
961 }
947 962
948 if (!in_interrupt() && paravirt_get_lazy_mode() == PARAVIRT_LAZY_NONE) { 963 if (!in_interrupt() && paravirt_get_lazy_mode() == PARAVIRT_LAZY_NONE) {
949 arch_enter_lazy_mmu_mode(); 964 arch_enter_lazy_mmu_mode();
diff --git a/drivers/xen/pci.c b/drivers/xen/pci.c
index 18fff88254eb..d15f6e80479f 100644
--- a/drivers/xen/pci.c
+++ b/drivers/xen/pci.c
@@ -26,6 +26,7 @@
26#include <asm/xen/hypervisor.h> 26#include <asm/xen/hypervisor.h>
27#include <asm/xen/hypercall.h> 27#include <asm/xen/hypercall.h>
28#include "../pci/pci.h" 28#include "../pci/pci.h"
29#include <asm/pci_x86.h>
29 30
30static bool __read_mostly pci_seg_supported = true; 31static bool __read_mostly pci_seg_supported = true;
31 32
@@ -192,3 +193,49 @@ static int __init register_xen_pci_notifier(void)
192} 193}
193 194
194arch_initcall(register_xen_pci_notifier); 195arch_initcall(register_xen_pci_notifier);
196
197#ifdef CONFIG_PCI_MMCONFIG
198static int __init xen_mcfg_late(void)
199{
200 struct pci_mmcfg_region *cfg;
201 int rc;
202
203 if (!xen_initial_domain())
204 return 0;
205
206 if ((pci_probe & PCI_PROBE_MMCONF) == 0)
207 return 0;
208
209 if (list_empty(&pci_mmcfg_list))
210 return 0;
211
212 /* Check whether they are in the right area. */
213 list_for_each_entry(cfg, &pci_mmcfg_list, list) {
214 struct physdev_pci_mmcfg_reserved r;
215
216 r.address = cfg->address;
217 r.segment = cfg->segment;
218 r.start_bus = cfg->start_bus;
219 r.end_bus = cfg->end_bus;
220 r.flags = XEN_PCI_MMCFG_RESERVED;
221
222 rc = HYPERVISOR_physdev_op(PHYSDEVOP_pci_mmcfg_reserved, &r);
223 switch (rc) {
224 case 0:
225 case -ENOSYS:
226 continue;
227
228 default:
229 pr_warn("Failed to report MMCONFIG reservation"
230 " state for %s to hypervisor"
231 " (%d)\n",
232 cfg->name, rc);
233 }
234 }
235 return 0;
236}
237/*
238 * Needs to be done after acpi_init which are subsys_initcall.
239 */
240subsys_initcall_sync(xen_mcfg_late);
241#endif
diff --git a/drivers/xen/platform-pci.c b/drivers/xen/platform-pci.c
index 99db9e1eb8ba..2f3528e93cb9 100644
--- a/drivers/xen/platform-pci.c
+++ b/drivers/xen/platform-pci.c
@@ -84,7 +84,7 @@ static irqreturn_t do_hvm_evtchn_intr(int irq, void *dev_id)
84static int xen_allocate_irq(struct pci_dev *pdev) 84static int xen_allocate_irq(struct pci_dev *pdev)
85{ 85{
86 return request_irq(pdev->irq, do_hvm_evtchn_intr, 86 return request_irq(pdev->irq, do_hvm_evtchn_intr,
87 IRQF_DISABLED | IRQF_NOBALANCING | IRQF_TRIGGER_RISING, 87 IRQF_NOBALANCING | IRQF_TRIGGER_RISING,
88 "xen-platform-pci", pdev); 88 "xen-platform-pci", pdev);
89} 89}
90 90
diff --git a/drivers/xen/swiotlb-xen.c b/drivers/xen/swiotlb-xen.c
index 1b2277c311d2..a224bc74b6b9 100644
--- a/drivers/xen/swiotlb-xen.c
+++ b/drivers/xen/swiotlb-xen.c
@@ -42,12 +42,31 @@
42#include <xen/page.h> 42#include <xen/page.h>
43#include <xen/xen-ops.h> 43#include <xen/xen-ops.h>
44#include <xen/hvc-console.h> 44#include <xen/hvc-console.h>
45
46#include <asm/dma-mapping.h>
47#include <asm/xen/page-coherent.h>
48
49#include <trace/events/swiotlb.h>
45/* 50/*
46 * Used to do a quick range check in swiotlb_tbl_unmap_single and 51 * Used to do a quick range check in swiotlb_tbl_unmap_single and
47 * swiotlb_tbl_sync_single_*, to see if the memory was in fact allocated by this 52 * swiotlb_tbl_sync_single_*, to see if the memory was in fact allocated by this
48 * API. 53 * API.
49 */ 54 */
50 55
56#ifndef CONFIG_X86
57static unsigned long dma_alloc_coherent_mask(struct device *dev,
58 gfp_t gfp)
59{
60 unsigned long dma_mask = 0;
61
62 dma_mask = dev->coherent_dma_mask;
63 if (!dma_mask)
64 dma_mask = (gfp & GFP_DMA) ? DMA_BIT_MASK(24) : DMA_BIT_MASK(32);
65
66 return dma_mask;
67}
68#endif
69
51static char *xen_io_tlb_start, *xen_io_tlb_end; 70static char *xen_io_tlb_start, *xen_io_tlb_end;
52static unsigned long xen_io_tlb_nslabs; 71static unsigned long xen_io_tlb_nslabs;
53/* 72/*
@@ -56,17 +75,17 @@ static unsigned long xen_io_tlb_nslabs;
56 75
57static u64 start_dma_addr; 76static u64 start_dma_addr;
58 77
59static dma_addr_t xen_phys_to_bus(phys_addr_t paddr) 78static inline dma_addr_t xen_phys_to_bus(phys_addr_t paddr)
60{ 79{
61 return phys_to_machine(XPADDR(paddr)).maddr; 80 return phys_to_machine(XPADDR(paddr)).maddr;
62} 81}
63 82
64static phys_addr_t xen_bus_to_phys(dma_addr_t baddr) 83static inline phys_addr_t xen_bus_to_phys(dma_addr_t baddr)
65{ 84{
66 return machine_to_phys(XMADDR(baddr)).paddr; 85 return machine_to_phys(XMADDR(baddr)).paddr;
67} 86}
68 87
69static dma_addr_t xen_virt_to_bus(void *address) 88static inline dma_addr_t xen_virt_to_bus(void *address)
70{ 89{
71 return xen_phys_to_bus(virt_to_phys(address)); 90 return xen_phys_to_bus(virt_to_phys(address));
72} 91}
@@ -89,7 +108,7 @@ static int check_pages_physically_contiguous(unsigned long pfn,
89 return 1; 108 return 1;
90} 109}
91 110
92static int range_straddles_page_boundary(phys_addr_t p, size_t size) 111static inline int range_straddles_page_boundary(phys_addr_t p, size_t size)
93{ 112{
94 unsigned long pfn = PFN_DOWN(p); 113 unsigned long pfn = PFN_DOWN(p);
95 unsigned int offset = p & ~PAGE_MASK; 114 unsigned int offset = p & ~PAGE_MASK;
@@ -126,6 +145,8 @@ xen_swiotlb_fixup(void *buf, size_t size, unsigned long nslabs)
126{ 145{
127 int i, rc; 146 int i, rc;
128 int dma_bits; 147 int dma_bits;
148 dma_addr_t dma_handle;
149 phys_addr_t p = virt_to_phys(buf);
129 150
130 dma_bits = get_order(IO_TLB_SEGSIZE << IO_TLB_SHIFT) + PAGE_SHIFT; 151 dma_bits = get_order(IO_TLB_SEGSIZE << IO_TLB_SHIFT) + PAGE_SHIFT;
131 152
@@ -135,9 +156,9 @@ xen_swiotlb_fixup(void *buf, size_t size, unsigned long nslabs)
135 156
136 do { 157 do {
137 rc = xen_create_contiguous_region( 158 rc = xen_create_contiguous_region(
138 (unsigned long)buf + (i << IO_TLB_SHIFT), 159 p + (i << IO_TLB_SHIFT),
139 get_order(slabs << IO_TLB_SHIFT), 160 get_order(slabs << IO_TLB_SHIFT),
140 dma_bits); 161 dma_bits, &dma_handle);
141 } while (rc && dma_bits++ < max_dma_bits); 162 } while (rc && dma_bits++ < max_dma_bits);
142 if (rc) 163 if (rc)
143 return rc; 164 return rc;
@@ -263,7 +284,6 @@ xen_swiotlb_alloc_coherent(struct device *hwdev, size_t size,
263 void *ret; 284 void *ret;
264 int order = get_order(size); 285 int order = get_order(size);
265 u64 dma_mask = DMA_BIT_MASK(32); 286 u64 dma_mask = DMA_BIT_MASK(32);
266 unsigned long vstart;
267 phys_addr_t phys; 287 phys_addr_t phys;
268 dma_addr_t dev_addr; 288 dma_addr_t dev_addr;
269 289
@@ -278,8 +298,12 @@ xen_swiotlb_alloc_coherent(struct device *hwdev, size_t size,
278 if (dma_alloc_from_coherent(hwdev, size, dma_handle, &ret)) 298 if (dma_alloc_from_coherent(hwdev, size, dma_handle, &ret))
279 return ret; 299 return ret;
280 300
281 vstart = __get_free_pages(flags, order); 301 /* On ARM this function returns an ioremap'ped virtual address for
282 ret = (void *)vstart; 302 * which virt_to_phys doesn't return the corresponding physical
303 * address. In fact on ARM virt_to_phys only works for kernel direct
304 * mapped RAM memory. Also see comment below.
305 */
306 ret = xen_alloc_coherent_pages(hwdev, size, dma_handle, flags, attrs);
283 307
284 if (!ret) 308 if (!ret)
285 return ret; 309 return ret;
@@ -287,18 +311,21 @@ xen_swiotlb_alloc_coherent(struct device *hwdev, size_t size,
287 if (hwdev && hwdev->coherent_dma_mask) 311 if (hwdev && hwdev->coherent_dma_mask)
288 dma_mask = dma_alloc_coherent_mask(hwdev, flags); 312 dma_mask = dma_alloc_coherent_mask(hwdev, flags);
289 313
290 phys = virt_to_phys(ret); 314 /* At this point dma_handle is the physical address, next we are
315 * going to set it to the machine address.
316 * Do not use virt_to_phys(ret) because on ARM it doesn't correspond
317 * to *dma_handle. */
318 phys = *dma_handle;
291 dev_addr = xen_phys_to_bus(phys); 319 dev_addr = xen_phys_to_bus(phys);
292 if (((dev_addr + size - 1 <= dma_mask)) && 320 if (((dev_addr + size - 1 <= dma_mask)) &&
293 !range_straddles_page_boundary(phys, size)) 321 !range_straddles_page_boundary(phys, size))
294 *dma_handle = dev_addr; 322 *dma_handle = dev_addr;
295 else { 323 else {
296 if (xen_create_contiguous_region(vstart, order, 324 if (xen_create_contiguous_region(phys, order,
297 fls64(dma_mask)) != 0) { 325 fls64(dma_mask), dma_handle) != 0) {
298 free_pages(vstart, order); 326 xen_free_coherent_pages(hwdev, size, ret, (dma_addr_t)phys, attrs);
299 return NULL; 327 return NULL;
300 } 328 }
301 *dma_handle = virt_to_machine(ret).maddr;
302 } 329 }
303 memset(ret, 0, size); 330 memset(ret, 0, size);
304 return ret; 331 return ret;
@@ -319,13 +346,15 @@ xen_swiotlb_free_coherent(struct device *hwdev, size_t size, void *vaddr,
319 if (hwdev && hwdev->coherent_dma_mask) 346 if (hwdev && hwdev->coherent_dma_mask)
320 dma_mask = hwdev->coherent_dma_mask; 347 dma_mask = hwdev->coherent_dma_mask;
321 348
322 phys = virt_to_phys(vaddr); 349 /* do not use virt_to_phys because on ARM it doesn't return you the
350 * physical address */
351 phys = xen_bus_to_phys(dev_addr);
323 352
324 if (((dev_addr + size - 1 > dma_mask)) || 353 if (((dev_addr + size - 1 > dma_mask)) ||
325 range_straddles_page_boundary(phys, size)) 354 range_straddles_page_boundary(phys, size))
326 xen_destroy_contiguous_region((unsigned long)vaddr, order); 355 xen_destroy_contiguous_region(phys, order);
327 356
328 free_pages((unsigned long)vaddr, order); 357 xen_free_coherent_pages(hwdev, size, vaddr, (dma_addr_t)phys, attrs);
329} 358}
330EXPORT_SYMBOL_GPL(xen_swiotlb_free_coherent); 359EXPORT_SYMBOL_GPL(xen_swiotlb_free_coherent);
331 360
@@ -352,16 +381,25 @@ dma_addr_t xen_swiotlb_map_page(struct device *dev, struct page *page,
352 * buffering it. 381 * buffering it.
353 */ 382 */
354 if (dma_capable(dev, dev_addr, size) && 383 if (dma_capable(dev, dev_addr, size) &&
355 !range_straddles_page_boundary(phys, size) && !swiotlb_force) 384 !range_straddles_page_boundary(phys, size) && !swiotlb_force) {
385 /* we are not interested in the dma_addr returned by
386 * xen_dma_map_page, only in the potential cache flushes executed
387 * by the function. */
388 xen_dma_map_page(dev, page, offset, size, dir, attrs);
356 return dev_addr; 389 return dev_addr;
390 }
357 391
358 /* 392 /*
359 * Oh well, have to allocate and map a bounce buffer. 393 * Oh well, have to allocate and map a bounce buffer.
360 */ 394 */
395 trace_swiotlb_bounced(dev, dev_addr, size, swiotlb_force);
396
361 map = swiotlb_tbl_map_single(dev, start_dma_addr, phys, size, dir); 397 map = swiotlb_tbl_map_single(dev, start_dma_addr, phys, size, dir);
362 if (map == SWIOTLB_MAP_ERROR) 398 if (map == SWIOTLB_MAP_ERROR)
363 return DMA_ERROR_CODE; 399 return DMA_ERROR_CODE;
364 400
401 xen_dma_map_page(dev, pfn_to_page(map >> PAGE_SHIFT),
402 map & ~PAGE_MASK, size, dir, attrs);
365 dev_addr = xen_phys_to_bus(map); 403 dev_addr = xen_phys_to_bus(map);
366 404
367 /* 405 /*
@@ -384,12 +422,15 @@ EXPORT_SYMBOL_GPL(xen_swiotlb_map_page);
384 * whatever the device wrote there. 422 * whatever the device wrote there.
385 */ 423 */
386static void xen_unmap_single(struct device *hwdev, dma_addr_t dev_addr, 424static void xen_unmap_single(struct device *hwdev, dma_addr_t dev_addr,
387 size_t size, enum dma_data_direction dir) 425 size_t size, enum dma_data_direction dir,
426 struct dma_attrs *attrs)
388{ 427{
389 phys_addr_t paddr = xen_bus_to_phys(dev_addr); 428 phys_addr_t paddr = xen_bus_to_phys(dev_addr);
390 429
391 BUG_ON(dir == DMA_NONE); 430 BUG_ON(dir == DMA_NONE);
392 431
432 xen_dma_unmap_page(hwdev, paddr, size, dir, attrs);
433
393 /* NOTE: We use dev_addr here, not paddr! */ 434 /* NOTE: We use dev_addr here, not paddr! */
394 if (is_xen_swiotlb_buffer(dev_addr)) { 435 if (is_xen_swiotlb_buffer(dev_addr)) {
395 swiotlb_tbl_unmap_single(hwdev, paddr, size, dir); 436 swiotlb_tbl_unmap_single(hwdev, paddr, size, dir);
@@ -412,7 +453,7 @@ void xen_swiotlb_unmap_page(struct device *hwdev, dma_addr_t dev_addr,
412 size_t size, enum dma_data_direction dir, 453 size_t size, enum dma_data_direction dir,
413 struct dma_attrs *attrs) 454 struct dma_attrs *attrs)
414{ 455{
415 xen_unmap_single(hwdev, dev_addr, size, dir); 456 xen_unmap_single(hwdev, dev_addr, size, dir, attrs);
416} 457}
417EXPORT_SYMBOL_GPL(xen_swiotlb_unmap_page); 458EXPORT_SYMBOL_GPL(xen_swiotlb_unmap_page);
418 459
@@ -435,11 +476,15 @@ xen_swiotlb_sync_single(struct device *hwdev, dma_addr_t dev_addr,
435 476
436 BUG_ON(dir == DMA_NONE); 477 BUG_ON(dir == DMA_NONE);
437 478
479 if (target == SYNC_FOR_CPU)
480 xen_dma_sync_single_for_cpu(hwdev, paddr, size, dir);
481
438 /* NOTE: We use dev_addr here, not paddr! */ 482 /* NOTE: We use dev_addr here, not paddr! */
439 if (is_xen_swiotlb_buffer(dev_addr)) { 483 if (is_xen_swiotlb_buffer(dev_addr))
440 swiotlb_tbl_sync_single(hwdev, paddr, size, dir, target); 484 swiotlb_tbl_sync_single(hwdev, paddr, size, dir, target);
441 return; 485
442 } 486 if (target == SYNC_FOR_DEVICE)
487 xen_dma_sync_single_for_cpu(hwdev, paddr, size, dir);
443 488
444 if (dir != DMA_FROM_DEVICE) 489 if (dir != DMA_FROM_DEVICE)
445 return; 490 return;
@@ -502,16 +547,26 @@ xen_swiotlb_map_sg_attrs(struct device *hwdev, struct scatterlist *sgl,
502 sg->length, 547 sg->length,
503 dir); 548 dir);
504 if (map == SWIOTLB_MAP_ERROR) { 549 if (map == SWIOTLB_MAP_ERROR) {
550 dev_warn(hwdev, "swiotlb buffer is full\n");
505 /* Don't panic here, we expect map_sg users 551 /* Don't panic here, we expect map_sg users
506 to do proper error handling. */ 552 to do proper error handling. */
507 xen_swiotlb_unmap_sg_attrs(hwdev, sgl, i, dir, 553 xen_swiotlb_unmap_sg_attrs(hwdev, sgl, i, dir,
508 attrs); 554 attrs);
509 sg_dma_len(sgl) = 0; 555 sg_dma_len(sgl) = 0;
510 return DMA_ERROR_CODE; 556 return 0;
511 } 557 }
512 sg->dma_address = xen_phys_to_bus(map); 558 sg->dma_address = xen_phys_to_bus(map);
513 } else 559 } else {
560 /* we are not interested in the dma_addr returned by
561 * xen_dma_map_page, only in the potential cache flushes executed
562 * by the function. */
563 xen_dma_map_page(hwdev, pfn_to_page(paddr >> PAGE_SHIFT),
564 paddr & ~PAGE_MASK,
565 sg->length,
566 dir,
567 attrs);
514 sg->dma_address = dev_addr; 568 sg->dma_address = dev_addr;
569 }
515 sg_dma_len(sg) = sg->length; 570 sg_dma_len(sg) = sg->length;
516 } 571 }
517 return nelems; 572 return nelems;
@@ -533,7 +588,7 @@ xen_swiotlb_unmap_sg_attrs(struct device *hwdev, struct scatterlist *sgl,
533 BUG_ON(dir == DMA_NONE); 588 BUG_ON(dir == DMA_NONE);
534 589
535 for_each_sg(sgl, sg, nelems, i) 590 for_each_sg(sgl, sg, nelems, i)
536 xen_unmap_single(hwdev, sg->dma_address, sg_dma_len(sg), dir); 591 xen_unmap_single(hwdev, sg->dma_address, sg_dma_len(sg), dir, attrs);
537 592
538} 593}
539EXPORT_SYMBOL_GPL(xen_swiotlb_unmap_sg_attrs); 594EXPORT_SYMBOL_GPL(xen_swiotlb_unmap_sg_attrs);
@@ -593,3 +648,15 @@ xen_swiotlb_dma_supported(struct device *hwdev, u64 mask)
593 return xen_virt_to_bus(xen_io_tlb_end - 1) <= mask; 648 return xen_virt_to_bus(xen_io_tlb_end - 1) <= mask;
594} 649}
595EXPORT_SYMBOL_GPL(xen_swiotlb_dma_supported); 650EXPORT_SYMBOL_GPL(xen_swiotlb_dma_supported);
651
652int
653xen_swiotlb_set_dma_mask(struct device *dev, u64 dma_mask)
654{
655 if (!dev->dma_mask || !xen_swiotlb_dma_supported(dev, dma_mask))
656 return -EIO;
657
658 *dev->dma_mask = dma_mask;
659
660 return 0;
661}
662EXPORT_SYMBOL_GPL(xen_swiotlb_set_dma_mask);