aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/xen
diff options
context:
space:
mode:
authorJiri Kosina <jkosina@suse.cz>2013-12-19 09:08:03 -0500
committerJiri Kosina <jkosina@suse.cz>2013-12-19 09:08:32 -0500
commite23c34bb41da65f354fb7eee04300c56ee48f60c (patch)
tree549fbe449d55273b81ef104a9755109bf4ae7817 /drivers/xen
parentb481c2cb3534c85dca625973b33eba15f9af3e4c (diff)
parent319e2e3f63c348a9b66db4667efa73178e18b17d (diff)
Merge branch 'master' into for-next
Sync with Linus' tree to be able to apply fixes on top of newer things in tree (efi-stub). Signed-off-by: Jiri Kosina <jkosina@suse.cz>
Diffstat (limited to 'drivers/xen')
-rw-r--r--drivers/xen/Kconfig1
-rw-r--r--drivers/xen/balloon.c36
-rw-r--r--drivers/xen/evtchn.c2
-rw-r--r--drivers/xen/grant-table.c25
-rw-r--r--drivers/xen/pci.c53
-rw-r--r--drivers/xen/platform-pci.c2
-rw-r--r--drivers/xen/swiotlb-xen.c124
-rw-r--r--drivers/xen/xenbus/xenbus_probe.c24
-rw-r--r--drivers/xen/xenbus/xenbus_probe.h2
-rw-r--r--drivers/xen/xenbus/xenbus_probe_backend.c2
-rw-r--r--drivers/xen/xenbus/xenbus_probe_frontend.c2
11 files changed, 210 insertions, 63 deletions
diff --git a/drivers/xen/Kconfig b/drivers/xen/Kconfig
index e8dc72c4bf11..12ba6db65142 100644
--- a/drivers/xen/Kconfig
+++ b/drivers/xen/Kconfig
@@ -140,7 +140,6 @@ config XEN_GRANT_DEV_ALLOC
140 140
141config SWIOTLB_XEN 141config SWIOTLB_XEN
142 def_bool y 142 def_bool y
143 depends on PCI && X86
144 select SWIOTLB 143 select SWIOTLB
145 144
146config XEN_TMEM 145config XEN_TMEM
diff --git a/drivers/xen/balloon.c b/drivers/xen/balloon.c
index 3101cf6daf56..55ea73f7c70b 100644
--- a/drivers/xen/balloon.c
+++ b/drivers/xen/balloon.c
@@ -349,8 +349,6 @@ static enum bp_state increase_reservation(unsigned long nr_pages)
349 BUG_ON(page == NULL); 349 BUG_ON(page == NULL);
350 350
351 pfn = page_to_pfn(page); 351 pfn = page_to_pfn(page);
352 BUG_ON(!xen_feature(XENFEAT_auto_translated_physmap) &&
353 phys_to_machine_mapping_valid(pfn));
354 352
355 set_phys_to_machine(pfn, frame_list[i]); 353 set_phys_to_machine(pfn, frame_list[i]);
356 354
@@ -380,6 +378,7 @@ static enum bp_state decrease_reservation(unsigned long nr_pages, gfp_t gfp)
380 enum bp_state state = BP_DONE; 378 enum bp_state state = BP_DONE;
381 unsigned long pfn, i; 379 unsigned long pfn, i;
382 struct page *page; 380 struct page *page;
381 struct page *scratch_page;
383 int ret; 382 int ret;
384 struct xen_memory_reservation reservation = { 383 struct xen_memory_reservation reservation = {
385 .address_bits = 0, 384 .address_bits = 0,
@@ -412,34 +411,35 @@ static enum bp_state decrease_reservation(unsigned long nr_pages, gfp_t gfp)
412 411
413 scrub_page(page); 412 scrub_page(page);
414 413
414 /*
415 * Ballooned out frames are effectively replaced with
416 * a scratch frame. Ensure direct mappings and the
417 * p2m are consistent.
418 */
419 scratch_page = get_balloon_scratch_page();
415#ifdef CONFIG_XEN_HAVE_PVMMU 420#ifdef CONFIG_XEN_HAVE_PVMMU
416 if (xen_pv_domain() && !PageHighMem(page)) { 421 if (xen_pv_domain() && !PageHighMem(page)) {
417 ret = HYPERVISOR_update_va_mapping( 422 ret = HYPERVISOR_update_va_mapping(
418 (unsigned long)__va(pfn << PAGE_SHIFT), 423 (unsigned long)__va(pfn << PAGE_SHIFT),
419 pfn_pte(page_to_pfn(__get_cpu_var(balloon_scratch_page)), 424 pfn_pte(page_to_pfn(scratch_page),
420 PAGE_KERNEL_RO), 0); 425 PAGE_KERNEL_RO), 0);
421 BUG_ON(ret); 426 BUG_ON(ret);
422 } 427 }
423#endif 428#endif
424 }
425
426 /* Ensure that ballooned highmem pages don't have kmaps. */
427 kmap_flush_unused();
428 flush_tlb_all();
429
430 /* No more mappings: invalidate P2M and add to balloon. */
431 for (i = 0; i < nr_pages; i++) {
432 pfn = mfn_to_pfn(frame_list[i]);
433 if (!xen_feature(XENFEAT_auto_translated_physmap)) { 429 if (!xen_feature(XENFEAT_auto_translated_physmap)) {
434 unsigned long p; 430 unsigned long p;
435 struct page *pg; 431 p = page_to_pfn(scratch_page);
436 pg = __get_cpu_var(balloon_scratch_page);
437 p = page_to_pfn(pg);
438 __set_phys_to_machine(pfn, pfn_to_mfn(p)); 432 __set_phys_to_machine(pfn, pfn_to_mfn(p));
439 } 433 }
434 put_balloon_scratch_page();
435
440 balloon_append(pfn_to_page(pfn)); 436 balloon_append(pfn_to_page(pfn));
441 } 437 }
442 438
439 /* Ensure that ballooned highmem pages don't have kmaps. */
440 kmap_flush_unused();
441 flush_tlb_all();
442
443 set_xen_guest_handle(reservation.extent_start, frame_list); 443 set_xen_guest_handle(reservation.extent_start, frame_list);
444 reservation.nr_extents = nr_pages; 444 reservation.nr_extents = nr_pages;
445 ret = HYPERVISOR_memory_op(XENMEM_decrease_reservation, &reservation); 445 ret = HYPERVISOR_memory_op(XENMEM_decrease_reservation, &reservation);
@@ -596,7 +596,7 @@ static void __init balloon_add_region(unsigned long start_pfn,
596 } 596 }
597} 597}
598 598
599static int __cpuinit balloon_cpu_notify(struct notifier_block *self, 599static int balloon_cpu_notify(struct notifier_block *self,
600 unsigned long action, void *hcpu) 600 unsigned long action, void *hcpu)
601{ 601{
602 int cpu = (long)hcpu; 602 int cpu = (long)hcpu;
@@ -616,7 +616,7 @@ static int __cpuinit balloon_cpu_notify(struct notifier_block *self,
616 return NOTIFY_OK; 616 return NOTIFY_OK;
617} 617}
618 618
619static struct notifier_block balloon_cpu_notifier __cpuinitdata = { 619static struct notifier_block balloon_cpu_notifier = {
620 .notifier_call = balloon_cpu_notify, 620 .notifier_call = balloon_cpu_notify,
621}; 621};
622 622
@@ -641,7 +641,7 @@ static int __init balloon_init(void)
641 641
642 balloon_stats.current_pages = xen_pv_domain() 642 balloon_stats.current_pages = xen_pv_domain()
643 ? min(xen_start_info->nr_pages - xen_released_pages, max_pfn) 643 ? min(xen_start_info->nr_pages - xen_released_pages, max_pfn)
644 : max_pfn; 644 : get_num_physpages();
645 balloon_stats.target_pages = balloon_stats.current_pages; 645 balloon_stats.target_pages = balloon_stats.current_pages;
646 balloon_stats.balloon_low = 0; 646 balloon_stats.balloon_low = 0;
647 balloon_stats.balloon_high = 0; 647 balloon_stats.balloon_high = 0;
diff --git a/drivers/xen/evtchn.c b/drivers/xen/evtchn.c
index 8b3a69a06c39..5de2063e16d3 100644
--- a/drivers/xen/evtchn.c
+++ b/drivers/xen/evtchn.c
@@ -305,7 +305,7 @@ static int evtchn_bind_to_user(struct per_user_data *u, int port)
305 if (rc < 0) 305 if (rc < 0)
306 goto err; 306 goto err;
307 307
308 rc = bind_evtchn_to_irqhandler(port, evtchn_interrupt, IRQF_DISABLED, 308 rc = bind_evtchn_to_irqhandler(port, evtchn_interrupt, 0,
309 u->name, evtchn); 309 u->name, evtchn);
310 if (rc < 0) 310 if (rc < 0)
311 goto err; 311 goto err;
diff --git a/drivers/xen/grant-table.c b/drivers/xen/grant-table.c
index c4d2298893b1..028387192b60 100644
--- a/drivers/xen/grant-table.c
+++ b/drivers/xen/grant-table.c
@@ -49,6 +49,7 @@
49#include <xen/grant_table.h> 49#include <xen/grant_table.h>
50#include <xen/interface/memory.h> 50#include <xen/interface/memory.h>
51#include <xen/hvc-console.h> 51#include <xen/hvc-console.h>
52#include <xen/swiotlb-xen.h>
52#include <asm/xen/hypercall.h> 53#include <asm/xen/hypercall.h>
53#include <asm/xen/interface.h> 54#include <asm/xen/interface.h>
54 55
@@ -898,8 +899,16 @@ int gnttab_map_refs(struct gnttab_map_grant_ref *map_ops,
898 gnttab_retry_eagain_gop(GNTTABOP_map_grant_ref, map_ops + i, 899 gnttab_retry_eagain_gop(GNTTABOP_map_grant_ref, map_ops + i,
899 &map_ops[i].status, __func__); 900 &map_ops[i].status, __func__);
900 901
901 if (xen_feature(XENFEAT_auto_translated_physmap)) 902 /* this is basically a nop on x86 */
903 if (xen_feature(XENFEAT_auto_translated_physmap)) {
904 for (i = 0; i < count; i++) {
905 if (map_ops[i].status)
906 continue;
907 set_phys_to_machine(map_ops[i].host_addr >> PAGE_SHIFT,
908 map_ops[i].dev_bus_addr >> PAGE_SHIFT);
909 }
902 return ret; 910 return ret;
911 }
903 912
904 if (!in_interrupt() && paravirt_get_lazy_mode() == PARAVIRT_LAZY_NONE) { 913 if (!in_interrupt() && paravirt_get_lazy_mode() == PARAVIRT_LAZY_NONE) {
905 arch_enter_lazy_mmu_mode(); 914 arch_enter_lazy_mmu_mode();
@@ -921,9 +930,10 @@ int gnttab_map_refs(struct gnttab_map_grant_ref *map_ops,
921 ret = m2p_add_override(mfn, pages[i], kmap_ops ? 930 ret = m2p_add_override(mfn, pages[i], kmap_ops ?
922 &kmap_ops[i] : NULL); 931 &kmap_ops[i] : NULL);
923 if (ret) 932 if (ret)
924 return ret; 933 goto out;
925 } 934 }
926 935
936 out:
927 if (lazy) 937 if (lazy)
928 arch_leave_lazy_mmu_mode(); 938 arch_leave_lazy_mmu_mode();
929 939
@@ -942,8 +952,14 @@ int gnttab_unmap_refs(struct gnttab_unmap_grant_ref *unmap_ops,
942 if (ret) 952 if (ret)
943 return ret; 953 return ret;
944 954
945 if (xen_feature(XENFEAT_auto_translated_physmap)) 955 /* this is basically a nop on x86 */
956 if (xen_feature(XENFEAT_auto_translated_physmap)) {
957 for (i = 0; i < count; i++) {
958 set_phys_to_machine(unmap_ops[i].host_addr >> PAGE_SHIFT,
959 INVALID_P2M_ENTRY);
960 }
946 return ret; 961 return ret;
962 }
947 963
948 if (!in_interrupt() && paravirt_get_lazy_mode() == PARAVIRT_LAZY_NONE) { 964 if (!in_interrupt() && paravirt_get_lazy_mode() == PARAVIRT_LAZY_NONE) {
949 arch_enter_lazy_mmu_mode(); 965 arch_enter_lazy_mmu_mode();
@@ -954,9 +970,10 @@ int gnttab_unmap_refs(struct gnttab_unmap_grant_ref *unmap_ops,
954 ret = m2p_remove_override(pages[i], kmap_ops ? 970 ret = m2p_remove_override(pages[i], kmap_ops ?
955 &kmap_ops[i] : NULL); 971 &kmap_ops[i] : NULL);
956 if (ret) 972 if (ret)
957 return ret; 973 goto out;
958 } 974 }
959 975
976 out:
960 if (lazy) 977 if (lazy)
961 arch_leave_lazy_mmu_mode(); 978 arch_leave_lazy_mmu_mode();
962 979
diff --git a/drivers/xen/pci.c b/drivers/xen/pci.c
index 18fff88254eb..188825122aae 100644
--- a/drivers/xen/pci.c
+++ b/drivers/xen/pci.c
@@ -26,6 +26,7 @@
26#include <asm/xen/hypervisor.h> 26#include <asm/xen/hypervisor.h>
27#include <asm/xen/hypercall.h> 27#include <asm/xen/hypercall.h>
28#include "../pci/pci.h" 28#include "../pci/pci.h"
29#include <asm/pci_x86.h>
29 30
30static bool __read_mostly pci_seg_supported = true; 31static bool __read_mostly pci_seg_supported = true;
31 32
@@ -58,12 +59,12 @@ static int xen_add_device(struct device *dev)
58 add.flags = XEN_PCI_DEV_EXTFN; 59 add.flags = XEN_PCI_DEV_EXTFN;
59 60
60#ifdef CONFIG_ACPI 61#ifdef CONFIG_ACPI
61 handle = DEVICE_ACPI_HANDLE(&pci_dev->dev); 62 handle = ACPI_HANDLE(&pci_dev->dev);
62 if (!handle && pci_dev->bus->bridge) 63 if (!handle && pci_dev->bus->bridge)
63 handle = DEVICE_ACPI_HANDLE(pci_dev->bus->bridge); 64 handle = ACPI_HANDLE(pci_dev->bus->bridge);
64#ifdef CONFIG_PCI_IOV 65#ifdef CONFIG_PCI_IOV
65 if (!handle && pci_dev->is_virtfn) 66 if (!handle && pci_dev->is_virtfn)
66 handle = DEVICE_ACPI_HANDLE(physfn->bus->bridge); 67 handle = ACPI_HANDLE(physfn->bus->bridge);
67#endif 68#endif
68 if (handle) { 69 if (handle) {
69 acpi_status status; 70 acpi_status status;
@@ -192,3 +193,49 @@ static int __init register_xen_pci_notifier(void)
192} 193}
193 194
194arch_initcall(register_xen_pci_notifier); 195arch_initcall(register_xen_pci_notifier);
196
197#ifdef CONFIG_PCI_MMCONFIG
198static int __init xen_mcfg_late(void)
199{
200 struct pci_mmcfg_region *cfg;
201 int rc;
202
203 if (!xen_initial_domain())
204 return 0;
205
206 if ((pci_probe & PCI_PROBE_MMCONF) == 0)
207 return 0;
208
209 if (list_empty(&pci_mmcfg_list))
210 return 0;
211
212 /* Check whether they are in the right area. */
213 list_for_each_entry(cfg, &pci_mmcfg_list, list) {
214 struct physdev_pci_mmcfg_reserved r;
215
216 r.address = cfg->address;
217 r.segment = cfg->segment;
218 r.start_bus = cfg->start_bus;
219 r.end_bus = cfg->end_bus;
220 r.flags = XEN_PCI_MMCFG_RESERVED;
221
222 rc = HYPERVISOR_physdev_op(PHYSDEVOP_pci_mmcfg_reserved, &r);
223 switch (rc) {
224 case 0:
225 case -ENOSYS:
226 continue;
227
228 default:
229 pr_warn("Failed to report MMCONFIG reservation"
230 " state for %s to hypervisor"
231 " (%d)\n",
232 cfg->name, rc);
233 }
234 }
235 return 0;
236}
237/*
238 * Needs to be done after acpi_init which are subsys_initcall.
239 */
240subsys_initcall_sync(xen_mcfg_late);
241#endif
diff --git a/drivers/xen/platform-pci.c b/drivers/xen/platform-pci.c
index 99db9e1eb8ba..2f3528e93cb9 100644
--- a/drivers/xen/platform-pci.c
+++ b/drivers/xen/platform-pci.c
@@ -84,7 +84,7 @@ static irqreturn_t do_hvm_evtchn_intr(int irq, void *dev_id)
84static int xen_allocate_irq(struct pci_dev *pdev) 84static int xen_allocate_irq(struct pci_dev *pdev)
85{ 85{
86 return request_irq(pdev->irq, do_hvm_evtchn_intr, 86 return request_irq(pdev->irq, do_hvm_evtchn_intr,
87 IRQF_DISABLED | IRQF_NOBALANCING | IRQF_TRIGGER_RISING, 87 IRQF_NOBALANCING | IRQF_TRIGGER_RISING,
88 "xen-platform-pci", pdev); 88 "xen-platform-pci", pdev);
89} 89}
90 90
diff --git a/drivers/xen/swiotlb-xen.c b/drivers/xen/swiotlb-xen.c
index 1b2277c311d2..1eac0731c349 100644
--- a/drivers/xen/swiotlb-xen.c
+++ b/drivers/xen/swiotlb-xen.c
@@ -42,12 +42,31 @@
42#include <xen/page.h> 42#include <xen/page.h>
43#include <xen/xen-ops.h> 43#include <xen/xen-ops.h>
44#include <xen/hvc-console.h> 44#include <xen/hvc-console.h>
45
46#include <asm/dma-mapping.h>
47#include <asm/xen/page-coherent.h>
48
49#include <trace/events/swiotlb.h>
45/* 50/*
46 * Used to do a quick range check in swiotlb_tbl_unmap_single and 51 * Used to do a quick range check in swiotlb_tbl_unmap_single and
47 * swiotlb_tbl_sync_single_*, to see if the memory was in fact allocated by this 52 * swiotlb_tbl_sync_single_*, to see if the memory was in fact allocated by this
48 * API. 53 * API.
49 */ 54 */
50 55
56#ifndef CONFIG_X86
57static unsigned long dma_alloc_coherent_mask(struct device *dev,
58 gfp_t gfp)
59{
60 unsigned long dma_mask = 0;
61
62 dma_mask = dev->coherent_dma_mask;
63 if (!dma_mask)
64 dma_mask = (gfp & GFP_DMA) ? DMA_BIT_MASK(24) : DMA_BIT_MASK(32);
65
66 return dma_mask;
67}
68#endif
69
51static char *xen_io_tlb_start, *xen_io_tlb_end; 70static char *xen_io_tlb_start, *xen_io_tlb_end;
52static unsigned long xen_io_tlb_nslabs; 71static unsigned long xen_io_tlb_nslabs;
53/* 72/*
@@ -56,17 +75,17 @@ static unsigned long xen_io_tlb_nslabs;
56 75
57static u64 start_dma_addr; 76static u64 start_dma_addr;
58 77
59static dma_addr_t xen_phys_to_bus(phys_addr_t paddr) 78static inline dma_addr_t xen_phys_to_bus(phys_addr_t paddr)
60{ 79{
61 return phys_to_machine(XPADDR(paddr)).maddr; 80 return phys_to_machine(XPADDR(paddr)).maddr;
62} 81}
63 82
64static phys_addr_t xen_bus_to_phys(dma_addr_t baddr) 83static inline phys_addr_t xen_bus_to_phys(dma_addr_t baddr)
65{ 84{
66 return machine_to_phys(XMADDR(baddr)).paddr; 85 return machine_to_phys(XMADDR(baddr)).paddr;
67} 86}
68 87
69static dma_addr_t xen_virt_to_bus(void *address) 88static inline dma_addr_t xen_virt_to_bus(void *address)
70{ 89{
71 return xen_phys_to_bus(virt_to_phys(address)); 90 return xen_phys_to_bus(virt_to_phys(address));
72} 91}
@@ -89,7 +108,7 @@ static int check_pages_physically_contiguous(unsigned long pfn,
89 return 1; 108 return 1;
90} 109}
91 110
92static int range_straddles_page_boundary(phys_addr_t p, size_t size) 111static inline int range_straddles_page_boundary(phys_addr_t p, size_t size)
93{ 112{
94 unsigned long pfn = PFN_DOWN(p); 113 unsigned long pfn = PFN_DOWN(p);
95 unsigned int offset = p & ~PAGE_MASK; 114 unsigned int offset = p & ~PAGE_MASK;
@@ -126,6 +145,8 @@ xen_swiotlb_fixup(void *buf, size_t size, unsigned long nslabs)
126{ 145{
127 int i, rc; 146 int i, rc;
128 int dma_bits; 147 int dma_bits;
148 dma_addr_t dma_handle;
149 phys_addr_t p = virt_to_phys(buf);
129 150
130 dma_bits = get_order(IO_TLB_SEGSIZE << IO_TLB_SHIFT) + PAGE_SHIFT; 151 dma_bits = get_order(IO_TLB_SEGSIZE << IO_TLB_SHIFT) + PAGE_SHIFT;
131 152
@@ -135,9 +156,9 @@ xen_swiotlb_fixup(void *buf, size_t size, unsigned long nslabs)
135 156
136 do { 157 do {
137 rc = xen_create_contiguous_region( 158 rc = xen_create_contiguous_region(
138 (unsigned long)buf + (i << IO_TLB_SHIFT), 159 p + (i << IO_TLB_SHIFT),
139 get_order(slabs << IO_TLB_SHIFT), 160 get_order(slabs << IO_TLB_SHIFT),
140 dma_bits); 161 dma_bits, &dma_handle);
141 } while (rc && dma_bits++ < max_dma_bits); 162 } while (rc && dma_bits++ < max_dma_bits);
142 if (rc) 163 if (rc)
143 return rc; 164 return rc;
@@ -263,7 +284,6 @@ xen_swiotlb_alloc_coherent(struct device *hwdev, size_t size,
263 void *ret; 284 void *ret;
264 int order = get_order(size); 285 int order = get_order(size);
265 u64 dma_mask = DMA_BIT_MASK(32); 286 u64 dma_mask = DMA_BIT_MASK(32);
266 unsigned long vstart;
267 phys_addr_t phys; 287 phys_addr_t phys;
268 dma_addr_t dev_addr; 288 dma_addr_t dev_addr;
269 289
@@ -278,8 +298,12 @@ xen_swiotlb_alloc_coherent(struct device *hwdev, size_t size,
278 if (dma_alloc_from_coherent(hwdev, size, dma_handle, &ret)) 298 if (dma_alloc_from_coherent(hwdev, size, dma_handle, &ret))
279 return ret; 299 return ret;
280 300
281 vstart = __get_free_pages(flags, order); 301 /* On ARM this function returns an ioremap'ped virtual address for
282 ret = (void *)vstart; 302 * which virt_to_phys doesn't return the corresponding physical
303 * address. In fact on ARM virt_to_phys only works for kernel direct
304 * mapped RAM memory. Also see comment below.
305 */
306 ret = xen_alloc_coherent_pages(hwdev, size, dma_handle, flags, attrs);
283 307
284 if (!ret) 308 if (!ret)
285 return ret; 309 return ret;
@@ -287,18 +311,21 @@ xen_swiotlb_alloc_coherent(struct device *hwdev, size_t size,
287 if (hwdev && hwdev->coherent_dma_mask) 311 if (hwdev && hwdev->coherent_dma_mask)
288 dma_mask = dma_alloc_coherent_mask(hwdev, flags); 312 dma_mask = dma_alloc_coherent_mask(hwdev, flags);
289 313
290 phys = virt_to_phys(ret); 314 /* At this point dma_handle is the physical address, next we are
315 * going to set it to the machine address.
316 * Do not use virt_to_phys(ret) because on ARM it doesn't correspond
317 * to *dma_handle. */
318 phys = *dma_handle;
291 dev_addr = xen_phys_to_bus(phys); 319 dev_addr = xen_phys_to_bus(phys);
292 if (((dev_addr + size - 1 <= dma_mask)) && 320 if (((dev_addr + size - 1 <= dma_mask)) &&
293 !range_straddles_page_boundary(phys, size)) 321 !range_straddles_page_boundary(phys, size))
294 *dma_handle = dev_addr; 322 *dma_handle = dev_addr;
295 else { 323 else {
296 if (xen_create_contiguous_region(vstart, order, 324 if (xen_create_contiguous_region(phys, order,
297 fls64(dma_mask)) != 0) { 325 fls64(dma_mask), dma_handle) != 0) {
298 free_pages(vstart, order); 326 xen_free_coherent_pages(hwdev, size, ret, (dma_addr_t)phys, attrs);
299 return NULL; 327 return NULL;
300 } 328 }
301 *dma_handle = virt_to_machine(ret).maddr;
302 } 329 }
303 memset(ret, 0, size); 330 memset(ret, 0, size);
304 return ret; 331 return ret;
@@ -319,13 +346,15 @@ xen_swiotlb_free_coherent(struct device *hwdev, size_t size, void *vaddr,
319 if (hwdev && hwdev->coherent_dma_mask) 346 if (hwdev && hwdev->coherent_dma_mask)
320 dma_mask = hwdev->coherent_dma_mask; 347 dma_mask = hwdev->coherent_dma_mask;
321 348
322 phys = virt_to_phys(vaddr); 349 /* do not use virt_to_phys because on ARM it doesn't return you the
350 * physical address */
351 phys = xen_bus_to_phys(dev_addr);
323 352
324 if (((dev_addr + size - 1 > dma_mask)) || 353 if (((dev_addr + size - 1 > dma_mask)) ||
325 range_straddles_page_boundary(phys, size)) 354 range_straddles_page_boundary(phys, size))
326 xen_destroy_contiguous_region((unsigned long)vaddr, order); 355 xen_destroy_contiguous_region(phys, order);
327 356
328 free_pages((unsigned long)vaddr, order); 357 xen_free_coherent_pages(hwdev, size, vaddr, (dma_addr_t)phys, attrs);
329} 358}
330EXPORT_SYMBOL_GPL(xen_swiotlb_free_coherent); 359EXPORT_SYMBOL_GPL(xen_swiotlb_free_coherent);
331 360
@@ -352,16 +381,25 @@ dma_addr_t xen_swiotlb_map_page(struct device *dev, struct page *page,
352 * buffering it. 381 * buffering it.
353 */ 382 */
354 if (dma_capable(dev, dev_addr, size) && 383 if (dma_capable(dev, dev_addr, size) &&
355 !range_straddles_page_boundary(phys, size) && !swiotlb_force) 384 !range_straddles_page_boundary(phys, size) && !swiotlb_force) {
385 /* we are not interested in the dma_addr returned by
386 * xen_dma_map_page, only in the potential cache flushes executed
387 * by the function. */
388 xen_dma_map_page(dev, page, offset, size, dir, attrs);
356 return dev_addr; 389 return dev_addr;
390 }
357 391
358 /* 392 /*
359 * Oh well, have to allocate and map a bounce buffer. 393 * Oh well, have to allocate and map a bounce buffer.
360 */ 394 */
395 trace_swiotlb_bounced(dev, dev_addr, size, swiotlb_force);
396
361 map = swiotlb_tbl_map_single(dev, start_dma_addr, phys, size, dir); 397 map = swiotlb_tbl_map_single(dev, start_dma_addr, phys, size, dir);
362 if (map == SWIOTLB_MAP_ERROR) 398 if (map == SWIOTLB_MAP_ERROR)
363 return DMA_ERROR_CODE; 399 return DMA_ERROR_CODE;
364 400
401 xen_dma_map_page(dev, pfn_to_page(map >> PAGE_SHIFT),
402 map & ~PAGE_MASK, size, dir, attrs);
365 dev_addr = xen_phys_to_bus(map); 403 dev_addr = xen_phys_to_bus(map);
366 404
367 /* 405 /*
@@ -384,12 +422,15 @@ EXPORT_SYMBOL_GPL(xen_swiotlb_map_page);
384 * whatever the device wrote there. 422 * whatever the device wrote there.
385 */ 423 */
386static void xen_unmap_single(struct device *hwdev, dma_addr_t dev_addr, 424static void xen_unmap_single(struct device *hwdev, dma_addr_t dev_addr,
387 size_t size, enum dma_data_direction dir) 425 size_t size, enum dma_data_direction dir,
426 struct dma_attrs *attrs)
388{ 427{
389 phys_addr_t paddr = xen_bus_to_phys(dev_addr); 428 phys_addr_t paddr = xen_bus_to_phys(dev_addr);
390 429
391 BUG_ON(dir == DMA_NONE); 430 BUG_ON(dir == DMA_NONE);
392 431
432 xen_dma_unmap_page(hwdev, paddr, size, dir, attrs);
433
393 /* NOTE: We use dev_addr here, not paddr! */ 434 /* NOTE: We use dev_addr here, not paddr! */
394 if (is_xen_swiotlb_buffer(dev_addr)) { 435 if (is_xen_swiotlb_buffer(dev_addr)) {
395 swiotlb_tbl_unmap_single(hwdev, paddr, size, dir); 436 swiotlb_tbl_unmap_single(hwdev, paddr, size, dir);
@@ -412,7 +453,7 @@ void xen_swiotlb_unmap_page(struct device *hwdev, dma_addr_t dev_addr,
412 size_t size, enum dma_data_direction dir, 453 size_t size, enum dma_data_direction dir,
413 struct dma_attrs *attrs) 454 struct dma_attrs *attrs)
414{ 455{
415 xen_unmap_single(hwdev, dev_addr, size, dir); 456 xen_unmap_single(hwdev, dev_addr, size, dir, attrs);
416} 457}
417EXPORT_SYMBOL_GPL(xen_swiotlb_unmap_page); 458EXPORT_SYMBOL_GPL(xen_swiotlb_unmap_page);
418 459
@@ -435,11 +476,15 @@ xen_swiotlb_sync_single(struct device *hwdev, dma_addr_t dev_addr,
435 476
436 BUG_ON(dir == DMA_NONE); 477 BUG_ON(dir == DMA_NONE);
437 478
479 if (target == SYNC_FOR_CPU)
480 xen_dma_sync_single_for_cpu(hwdev, paddr, size, dir);
481
438 /* NOTE: We use dev_addr here, not paddr! */ 482 /* NOTE: We use dev_addr here, not paddr! */
439 if (is_xen_swiotlb_buffer(dev_addr)) { 483 if (is_xen_swiotlb_buffer(dev_addr))
440 swiotlb_tbl_sync_single(hwdev, paddr, size, dir, target); 484 swiotlb_tbl_sync_single(hwdev, paddr, size, dir, target);
441 return; 485
442 } 486 if (target == SYNC_FOR_DEVICE)
487 xen_dma_sync_single_for_cpu(hwdev, paddr, size, dir);
443 488
444 if (dir != DMA_FROM_DEVICE) 489 if (dir != DMA_FROM_DEVICE)
445 return; 490 return;
@@ -502,16 +547,31 @@ xen_swiotlb_map_sg_attrs(struct device *hwdev, struct scatterlist *sgl,
502 sg->length, 547 sg->length,
503 dir); 548 dir);
504 if (map == SWIOTLB_MAP_ERROR) { 549 if (map == SWIOTLB_MAP_ERROR) {
550 dev_warn(hwdev, "swiotlb buffer is full\n");
505 /* Don't panic here, we expect map_sg users 551 /* Don't panic here, we expect map_sg users
506 to do proper error handling. */ 552 to do proper error handling. */
507 xen_swiotlb_unmap_sg_attrs(hwdev, sgl, i, dir, 553 xen_swiotlb_unmap_sg_attrs(hwdev, sgl, i, dir,
508 attrs); 554 attrs);
509 sg_dma_len(sgl) = 0; 555 sg_dma_len(sgl) = 0;
510 return DMA_ERROR_CODE; 556 return 0;
511 } 557 }
558 xen_dma_map_page(hwdev, pfn_to_page(map >> PAGE_SHIFT),
559 map & ~PAGE_MASK,
560 sg->length,
561 dir,
562 attrs);
512 sg->dma_address = xen_phys_to_bus(map); 563 sg->dma_address = xen_phys_to_bus(map);
513 } else 564 } else {
565 /* we are not interested in the dma_addr returned by
566 * xen_dma_map_page, only in the potential cache flushes executed
567 * by the function. */
568 xen_dma_map_page(hwdev, pfn_to_page(paddr >> PAGE_SHIFT),
569 paddr & ~PAGE_MASK,
570 sg->length,
571 dir,
572 attrs);
514 sg->dma_address = dev_addr; 573 sg->dma_address = dev_addr;
574 }
515 sg_dma_len(sg) = sg->length; 575 sg_dma_len(sg) = sg->length;
516 } 576 }
517 return nelems; 577 return nelems;
@@ -533,7 +593,7 @@ xen_swiotlb_unmap_sg_attrs(struct device *hwdev, struct scatterlist *sgl,
533 BUG_ON(dir == DMA_NONE); 593 BUG_ON(dir == DMA_NONE);
534 594
535 for_each_sg(sgl, sg, nelems, i) 595 for_each_sg(sgl, sg, nelems, i)
536 xen_unmap_single(hwdev, sg->dma_address, sg_dma_len(sg), dir); 596 xen_unmap_single(hwdev, sg->dma_address, sg_dma_len(sg), dir, attrs);
537 597
538} 598}
539EXPORT_SYMBOL_GPL(xen_swiotlb_unmap_sg_attrs); 599EXPORT_SYMBOL_GPL(xen_swiotlb_unmap_sg_attrs);
@@ -593,3 +653,15 @@ xen_swiotlb_dma_supported(struct device *hwdev, u64 mask)
593 return xen_virt_to_bus(xen_io_tlb_end - 1) <= mask; 653 return xen_virt_to_bus(xen_io_tlb_end - 1) <= mask;
594} 654}
595EXPORT_SYMBOL_GPL(xen_swiotlb_dma_supported); 655EXPORT_SYMBOL_GPL(xen_swiotlb_dma_supported);
656
657int
658xen_swiotlb_set_dma_mask(struct device *dev, u64 dma_mask)
659{
660 if (!dev->dma_mask || !xen_swiotlb_dma_supported(dev, dma_mask))
661 return -EIO;
662
663 *dev->dma_mask = dma_mask;
664
665 return 0;
666}
667EXPORT_SYMBOL_GPL(xen_swiotlb_set_dma_mask);
diff --git a/drivers/xen/xenbus/xenbus_probe.c b/drivers/xen/xenbus/xenbus_probe.c
index 38e92b770e91..3c0a74b3e9b1 100644
--- a/drivers/xen/xenbus/xenbus_probe.c
+++ b/drivers/xen/xenbus/xenbus_probe.c
@@ -384,12 +384,14 @@ static ssize_t nodename_show(struct device *dev,
384{ 384{
385 return sprintf(buf, "%s\n", to_xenbus_device(dev)->nodename); 385 return sprintf(buf, "%s\n", to_xenbus_device(dev)->nodename);
386} 386}
387static DEVICE_ATTR_RO(nodename);
387 388
388static ssize_t devtype_show(struct device *dev, 389static ssize_t devtype_show(struct device *dev,
389 struct device_attribute *attr, char *buf) 390 struct device_attribute *attr, char *buf)
390{ 391{
391 return sprintf(buf, "%s\n", to_xenbus_device(dev)->devicetype); 392 return sprintf(buf, "%s\n", to_xenbus_device(dev)->devicetype);
392} 393}
394static DEVICE_ATTR_RO(devtype);
393 395
394static ssize_t modalias_show(struct device *dev, 396static ssize_t modalias_show(struct device *dev,
395 struct device_attribute *attr, char *buf) 397 struct device_attribute *attr, char *buf)
@@ -397,14 +399,24 @@ static ssize_t modalias_show(struct device *dev,
397 return sprintf(buf, "%s:%s\n", dev->bus->name, 399 return sprintf(buf, "%s:%s\n", dev->bus->name,
398 to_xenbus_device(dev)->devicetype); 400 to_xenbus_device(dev)->devicetype);
399} 401}
402static DEVICE_ATTR_RO(modalias);
400 403
401struct device_attribute xenbus_dev_attrs[] = { 404static struct attribute *xenbus_dev_attrs[] = {
402 __ATTR_RO(nodename), 405 &dev_attr_nodename.attr,
403 __ATTR_RO(devtype), 406 &dev_attr_devtype.attr,
404 __ATTR_RO(modalias), 407 &dev_attr_modalias.attr,
405 __ATTR_NULL 408 NULL,
406}; 409};
407EXPORT_SYMBOL_GPL(xenbus_dev_attrs); 410
411static const struct attribute_group xenbus_dev_group = {
412 .attrs = xenbus_dev_attrs,
413};
414
415const struct attribute_group *xenbus_dev_groups[] = {
416 &xenbus_dev_group,
417 NULL,
418};
419EXPORT_SYMBOL_GPL(xenbus_dev_groups);
408 420
409int xenbus_probe_node(struct xen_bus_type *bus, 421int xenbus_probe_node(struct xen_bus_type *bus,
410 const char *type, 422 const char *type,
diff --git a/drivers/xen/xenbus/xenbus_probe.h b/drivers/xen/xenbus/xenbus_probe.h
index 146f857a36f8..1085ec294a19 100644
--- a/drivers/xen/xenbus/xenbus_probe.h
+++ b/drivers/xen/xenbus/xenbus_probe.h
@@ -54,7 +54,7 @@ enum xenstore_init {
54 XS_LOCAL, 54 XS_LOCAL,
55}; 55};
56 56
57extern struct device_attribute xenbus_dev_attrs[]; 57extern const struct attribute_group *xenbus_dev_groups[];
58 58
59extern int xenbus_match(struct device *_dev, struct device_driver *_drv); 59extern int xenbus_match(struct device *_dev, struct device_driver *_drv);
60extern int xenbus_dev_probe(struct device *_dev); 60extern int xenbus_dev_probe(struct device *_dev);
diff --git a/drivers/xen/xenbus/xenbus_probe_backend.c b/drivers/xen/xenbus/xenbus_probe_backend.c
index 998bbbab816b..5125dce11a60 100644
--- a/drivers/xen/xenbus/xenbus_probe_backend.c
+++ b/drivers/xen/xenbus/xenbus_probe_backend.c
@@ -200,7 +200,7 @@ static struct xen_bus_type xenbus_backend = {
200 .probe = xenbus_dev_probe, 200 .probe = xenbus_dev_probe,
201 .remove = xenbus_dev_remove, 201 .remove = xenbus_dev_remove,
202 .shutdown = xenbus_dev_shutdown, 202 .shutdown = xenbus_dev_shutdown,
203 .dev_attrs = xenbus_dev_attrs, 203 .dev_groups = xenbus_dev_groups,
204 }, 204 },
205}; 205};
206 206
diff --git a/drivers/xen/xenbus/xenbus_probe_frontend.c b/drivers/xen/xenbus/xenbus_probe_frontend.c
index 34b20bfa4e8c..129bf84c19ec 100644
--- a/drivers/xen/xenbus/xenbus_probe_frontend.c
+++ b/drivers/xen/xenbus/xenbus_probe_frontend.c
@@ -154,7 +154,7 @@ static struct xen_bus_type xenbus_frontend = {
154 .probe = xenbus_frontend_dev_probe, 154 .probe = xenbus_frontend_dev_probe,
155 .remove = xenbus_dev_remove, 155 .remove = xenbus_dev_remove,
156 .shutdown = xenbus_dev_shutdown, 156 .shutdown = xenbus_dev_shutdown,
157 .dev_attrs = xenbus_dev_attrs, 157 .dev_groups = xenbus_dev_groups,
158 158
159 .pm = &xenbus_pm_ops, 159 .pm = &xenbus_pm_ops,
160 }, 160 },