aboutsummaryrefslogtreecommitdiffstats
path: root/arch/ia64
diff options
context:
space:
mode:
Diffstat (limited to 'arch/ia64')
-rw-r--r--arch/ia64/dig/Makefile4
-rw-r--r--arch/ia64/dig/dig_vtd_iommu.c59
-rw-r--r--arch/ia64/hp/common/hwsw_iommu.c165
-rw-r--r--arch/ia64/hp/common/sba_iommu.c79
-rw-r--r--arch/ia64/hp/sim/simserial.c49
-rw-r--r--arch/ia64/ia32/ia32_entry.S2
-rw-r--r--arch/ia64/include/asm/dma-mapping.h194
-rw-r--r--arch/ia64/include/asm/kvm.h49
-rw-r--r--arch/ia64/include/asm/kvm_host.h18
-rw-r--r--arch/ia64/include/asm/machvec.h102
-rw-r--r--arch/ia64/include/asm/machvec_dig_vtd.h20
-rw-r--r--arch/ia64/include/asm/machvec_hpzx1.h23
-rw-r--r--arch/ia64/include/asm/machvec_hpzx1_swiotlb.h27
-rw-r--r--arch/ia64/include/asm/machvec_sn2.h27
-rw-r--r--arch/ia64/include/asm/msidef.h42
-rw-r--r--arch/ia64/include/asm/socket.h3
-rw-r--r--arch/ia64/include/asm/topology.h5
-rw-r--r--arch/ia64/kernel/Makefile4
-rw-r--r--arch/ia64/kernel/dma-mapping.c13
-rw-r--r--arch/ia64/kernel/irq.c2
-rw-r--r--arch/ia64/kernel/irq_ia64.c31
-rw-r--r--arch/ia64/kernel/machvec.c8
-rw-r--r--arch/ia64/kernel/msi_ia64.c55
-rw-r--r--arch/ia64/kernel/palinfo.c2
-rw-r--r--arch/ia64/kernel/pci-dma.c46
-rw-r--r--arch/ia64/kernel/pci-swiotlb.c30
-rw-r--r--arch/ia64/kernel/perfmon.c2
-rw-r--r--arch/ia64/kernel/time.c16
-rw-r--r--arch/ia64/kernel/vmlinux.lds.S12
-rw-r--r--arch/ia64/kvm/Kconfig4
-rw-r--r--arch/ia64/kvm/irq.h2
-rw-r--r--arch/ia64/kvm/kvm-ia64.c125
-rw-r--r--arch/ia64/kvm/kvm_fw.c151
-rw-r--r--arch/ia64/kvm/process.c71
-rw-r--r--arch/ia64/kvm/vcpu.c44
-rw-r--r--arch/ia64/kvm/vcpu.h4
-rw-r--r--arch/ia64/kvm/vtlb.c44
-rw-r--r--arch/ia64/sn/kernel/sn2/prominfo_proc.c9
-rw-r--r--arch/ia64/sn/pci/pci_dma.c99
39 files changed, 836 insertions, 806 deletions
diff --git a/arch/ia64/dig/Makefile b/arch/ia64/dig/Makefile
index 5c0283830bd6..2f7caddf093e 100644
--- a/arch/ia64/dig/Makefile
+++ b/arch/ia64/dig/Makefile
@@ -7,8 +7,8 @@
7 7
8obj-y := setup.o 8obj-y := setup.o
9ifeq ($(CONFIG_DMAR), y) 9ifeq ($(CONFIG_DMAR), y)
10obj-$(CONFIG_IA64_GENERIC) += machvec.o machvec_vtd.o dig_vtd_iommu.o 10obj-$(CONFIG_IA64_GENERIC) += machvec.o machvec_vtd.o
11else 11else
12obj-$(CONFIG_IA64_GENERIC) += machvec.o 12obj-$(CONFIG_IA64_GENERIC) += machvec.o
13endif 13endif
14obj-$(CONFIG_IA64_DIG_VTD) += dig_vtd_iommu.o 14
diff --git a/arch/ia64/dig/dig_vtd_iommu.c b/arch/ia64/dig/dig_vtd_iommu.c
deleted file mode 100644
index 1c8a079017a3..000000000000
--- a/arch/ia64/dig/dig_vtd_iommu.c
+++ /dev/null
@@ -1,59 +0,0 @@
1#include <linux/types.h>
2#include <linux/kernel.h>
3#include <linux/module.h>
4#include <linux/intel-iommu.h>
5
6void *
7vtd_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle,
8 gfp_t flags)
9{
10 return intel_alloc_coherent(dev, size, dma_handle, flags);
11}
12EXPORT_SYMBOL_GPL(vtd_alloc_coherent);
13
14void
15vtd_free_coherent(struct device *dev, size_t size, void *vaddr,
16 dma_addr_t dma_handle)
17{
18 intel_free_coherent(dev, size, vaddr, dma_handle);
19}
20EXPORT_SYMBOL_GPL(vtd_free_coherent);
21
22dma_addr_t
23vtd_map_single_attrs(struct device *dev, void *addr, size_t size,
24 int dir, struct dma_attrs *attrs)
25{
26 return intel_map_single(dev, (phys_addr_t)addr, size, dir);
27}
28EXPORT_SYMBOL_GPL(vtd_map_single_attrs);
29
30void
31vtd_unmap_single_attrs(struct device *dev, dma_addr_t iova, size_t size,
32 int dir, struct dma_attrs *attrs)
33{
34 intel_unmap_single(dev, iova, size, dir);
35}
36EXPORT_SYMBOL_GPL(vtd_unmap_single_attrs);
37
38int
39vtd_map_sg_attrs(struct device *dev, struct scatterlist *sglist, int nents,
40 int dir, struct dma_attrs *attrs)
41{
42 return intel_map_sg(dev, sglist, nents, dir);
43}
44EXPORT_SYMBOL_GPL(vtd_map_sg_attrs);
45
46void
47vtd_unmap_sg_attrs(struct device *dev, struct scatterlist *sglist,
48 int nents, int dir, struct dma_attrs *attrs)
49{
50 intel_unmap_sg(dev, sglist, nents, dir);
51}
52EXPORT_SYMBOL_GPL(vtd_unmap_sg_attrs);
53
54int
55vtd_dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
56{
57 return 0;
58}
59EXPORT_SYMBOL_GPL(vtd_dma_mapping_error);
diff --git a/arch/ia64/hp/common/hwsw_iommu.c b/arch/ia64/hp/common/hwsw_iommu.c
index 2769dbfd03bf..e4a80d82e3d8 100644
--- a/arch/ia64/hp/common/hwsw_iommu.c
+++ b/arch/ia64/hp/common/hwsw_iommu.c
@@ -13,49 +13,34 @@
13 */ 13 */
14 14
15#include <linux/device.h> 15#include <linux/device.h>
16#include <linux/dma-mapping.h>
16#include <linux/swiotlb.h> 17#include <linux/swiotlb.h>
17
18#include <asm/machvec.h> 18#include <asm/machvec.h>
19 19
20extern struct dma_map_ops sba_dma_ops, swiotlb_dma_ops;
21
20/* swiotlb declarations & definitions: */ 22/* swiotlb declarations & definitions: */
21extern int swiotlb_late_init_with_default_size (size_t size); 23extern int swiotlb_late_init_with_default_size (size_t size);
22 24
23/* hwiommu declarations & definitions: */
24
25extern ia64_mv_dma_alloc_coherent sba_alloc_coherent;
26extern ia64_mv_dma_free_coherent sba_free_coherent;
27extern ia64_mv_dma_map_single_attrs sba_map_single_attrs;
28extern ia64_mv_dma_unmap_single_attrs sba_unmap_single_attrs;
29extern ia64_mv_dma_map_sg_attrs sba_map_sg_attrs;
30extern ia64_mv_dma_unmap_sg_attrs sba_unmap_sg_attrs;
31extern ia64_mv_dma_supported sba_dma_supported;
32extern ia64_mv_dma_mapping_error sba_dma_mapping_error;
33
34#define hwiommu_alloc_coherent sba_alloc_coherent
35#define hwiommu_free_coherent sba_free_coherent
36#define hwiommu_map_single_attrs sba_map_single_attrs
37#define hwiommu_unmap_single_attrs sba_unmap_single_attrs
38#define hwiommu_map_sg_attrs sba_map_sg_attrs
39#define hwiommu_unmap_sg_attrs sba_unmap_sg_attrs
40#define hwiommu_dma_supported sba_dma_supported
41#define hwiommu_dma_mapping_error sba_dma_mapping_error
42#define hwiommu_sync_single_for_cpu machvec_dma_sync_single
43#define hwiommu_sync_sg_for_cpu machvec_dma_sync_sg
44#define hwiommu_sync_single_for_device machvec_dma_sync_single
45#define hwiommu_sync_sg_for_device machvec_dma_sync_sg
46
47
48/* 25/*
49 * Note: we need to make the determination of whether or not to use 26 * Note: we need to make the determination of whether or not to use
50 * the sw I/O TLB based purely on the device structure. Anything else 27 * the sw I/O TLB based purely on the device structure. Anything else
51 * would be unreliable or would be too intrusive. 28 * would be unreliable or would be too intrusive.
52 */ 29 */
53static inline int 30static inline int use_swiotlb(struct device *dev)
54use_swiotlb (struct device *dev)
55{ 31{
56 return dev && dev->dma_mask && !hwiommu_dma_supported(dev, *dev->dma_mask); 32 return dev && dev->dma_mask &&
33 !sba_dma_ops.dma_supported(dev, *dev->dma_mask);
57} 34}
58 35
36struct dma_map_ops *hwsw_dma_get_ops(struct device *dev)
37{
38 if (use_swiotlb(dev))
39 return &swiotlb_dma_ops;
40 return &sba_dma_ops;
41}
42EXPORT_SYMBOL(hwsw_dma_get_ops);
43
59void __init 44void __init
60hwsw_init (void) 45hwsw_init (void)
61{ 46{
@@ -71,125 +56,3 @@ hwsw_init (void)
71#endif 56#endif
72 } 57 }
73} 58}
74
75void *
76hwsw_alloc_coherent (struct device *dev, size_t size, dma_addr_t *dma_handle, gfp_t flags)
77{
78 if (use_swiotlb(dev))
79 return swiotlb_alloc_coherent(dev, size, dma_handle, flags);
80 else
81 return hwiommu_alloc_coherent(dev, size, dma_handle, flags);
82}
83
84void
85hwsw_free_coherent (struct device *dev, size_t size, void *vaddr, dma_addr_t dma_handle)
86{
87 if (use_swiotlb(dev))
88 swiotlb_free_coherent(dev, size, vaddr, dma_handle);
89 else
90 hwiommu_free_coherent(dev, size, vaddr, dma_handle);
91}
92
93dma_addr_t
94hwsw_map_single_attrs(struct device *dev, void *addr, size_t size, int dir,
95 struct dma_attrs *attrs)
96{
97 if (use_swiotlb(dev))
98 return swiotlb_map_single_attrs(dev, addr, size, dir, attrs);
99 else
100 return hwiommu_map_single_attrs(dev, addr, size, dir, attrs);
101}
102EXPORT_SYMBOL(hwsw_map_single_attrs);
103
104void
105hwsw_unmap_single_attrs(struct device *dev, dma_addr_t iova, size_t size,
106 int dir, struct dma_attrs *attrs)
107{
108 if (use_swiotlb(dev))
109 return swiotlb_unmap_single_attrs(dev, iova, size, dir, attrs);
110 else
111 return hwiommu_unmap_single_attrs(dev, iova, size, dir, attrs);
112}
113EXPORT_SYMBOL(hwsw_unmap_single_attrs);
114
115int
116hwsw_map_sg_attrs(struct device *dev, struct scatterlist *sglist, int nents,
117 int dir, struct dma_attrs *attrs)
118{
119 if (use_swiotlb(dev))
120 return swiotlb_map_sg_attrs(dev, sglist, nents, dir, attrs);
121 else
122 return hwiommu_map_sg_attrs(dev, sglist, nents, dir, attrs);
123}
124EXPORT_SYMBOL(hwsw_map_sg_attrs);
125
126void
127hwsw_unmap_sg_attrs(struct device *dev, struct scatterlist *sglist, int nents,
128 int dir, struct dma_attrs *attrs)
129{
130 if (use_swiotlb(dev))
131 return swiotlb_unmap_sg_attrs(dev, sglist, nents, dir, attrs);
132 else
133 return hwiommu_unmap_sg_attrs(dev, sglist, nents, dir, attrs);
134}
135EXPORT_SYMBOL(hwsw_unmap_sg_attrs);
136
137void
138hwsw_sync_single_for_cpu (struct device *dev, dma_addr_t addr, size_t size, int dir)
139{
140 if (use_swiotlb(dev))
141 swiotlb_sync_single_for_cpu(dev, addr, size, dir);
142 else
143 hwiommu_sync_single_for_cpu(dev, addr, size, dir);
144}
145
146void
147hwsw_sync_sg_for_cpu (struct device *dev, struct scatterlist *sg, int nelems, int dir)
148{
149 if (use_swiotlb(dev))
150 swiotlb_sync_sg_for_cpu(dev, sg, nelems, dir);
151 else
152 hwiommu_sync_sg_for_cpu(dev, sg, nelems, dir);
153}
154
155void
156hwsw_sync_single_for_device (struct device *dev, dma_addr_t addr, size_t size, int dir)
157{
158 if (use_swiotlb(dev))
159 swiotlb_sync_single_for_device(dev, addr, size, dir);
160 else
161 hwiommu_sync_single_for_device(dev, addr, size, dir);
162}
163
164void
165hwsw_sync_sg_for_device (struct device *dev, struct scatterlist *sg, int nelems, int dir)
166{
167 if (use_swiotlb(dev))
168 swiotlb_sync_sg_for_device(dev, sg, nelems, dir);
169 else
170 hwiommu_sync_sg_for_device(dev, sg, nelems, dir);
171}
172
173int
174hwsw_dma_supported (struct device *dev, u64 mask)
175{
176 if (hwiommu_dma_supported(dev, mask))
177 return 1;
178 return swiotlb_dma_supported(dev, mask);
179}
180
181int
182hwsw_dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
183{
184 return hwiommu_dma_mapping_error(dev, dma_addr) ||
185 swiotlb_dma_mapping_error(dev, dma_addr);
186}
187
188EXPORT_SYMBOL(hwsw_dma_mapping_error);
189EXPORT_SYMBOL(hwsw_dma_supported);
190EXPORT_SYMBOL(hwsw_alloc_coherent);
191EXPORT_SYMBOL(hwsw_free_coherent);
192EXPORT_SYMBOL(hwsw_sync_single_for_cpu);
193EXPORT_SYMBOL(hwsw_sync_single_for_device);
194EXPORT_SYMBOL(hwsw_sync_sg_for_cpu);
195EXPORT_SYMBOL(hwsw_sync_sg_for_device);
diff --git a/arch/ia64/hp/common/sba_iommu.c b/arch/ia64/hp/common/sba_iommu.c
index 6d5e6c5630e3..56ceb68eb99d 100644
--- a/arch/ia64/hp/common/sba_iommu.c
+++ b/arch/ia64/hp/common/sba_iommu.c
@@ -36,6 +36,7 @@
36#include <linux/bitops.h> /* hweight64() */ 36#include <linux/bitops.h> /* hweight64() */
37#include <linux/crash_dump.h> 37#include <linux/crash_dump.h>
38#include <linux/iommu-helper.h> 38#include <linux/iommu-helper.h>
39#include <linux/dma-mapping.h>
39 40
40#include <asm/delay.h> /* ia64_get_itc() */ 41#include <asm/delay.h> /* ia64_get_itc() */
41#include <asm/io.h> 42#include <asm/io.h>
@@ -908,11 +909,13 @@ sba_mark_invalid(struct ioc *ioc, dma_addr_t iova, size_t byte_cnt)
908 * 909 *
909 * See Documentation/PCI/PCI-DMA-mapping.txt 910 * See Documentation/PCI/PCI-DMA-mapping.txt
910 */ 911 */
911dma_addr_t 912static dma_addr_t sba_map_page(struct device *dev, struct page *page,
912sba_map_single_attrs(struct device *dev, void *addr, size_t size, int dir, 913 unsigned long poff, size_t size,
913 struct dma_attrs *attrs) 914 enum dma_data_direction dir,
915 struct dma_attrs *attrs)
914{ 916{
915 struct ioc *ioc; 917 struct ioc *ioc;
918 void *addr = page_address(page) + poff;
916 dma_addr_t iovp; 919 dma_addr_t iovp;
917 dma_addr_t offset; 920 dma_addr_t offset;
918 u64 *pdir_start; 921 u64 *pdir_start;
@@ -990,7 +993,14 @@ sba_map_single_attrs(struct device *dev, void *addr, size_t size, int dir,
990#endif 993#endif
991 return SBA_IOVA(ioc, iovp, offset); 994 return SBA_IOVA(ioc, iovp, offset);
992} 995}
993EXPORT_SYMBOL(sba_map_single_attrs); 996
997static dma_addr_t sba_map_single_attrs(struct device *dev, void *addr,
998 size_t size, enum dma_data_direction dir,
999 struct dma_attrs *attrs)
1000{
1001 return sba_map_page(dev, virt_to_page(addr),
1002 (unsigned long)addr & ~PAGE_MASK, size, dir, attrs);
1003}
994 1004
995#ifdef ENABLE_MARK_CLEAN 1005#ifdef ENABLE_MARK_CLEAN
996static SBA_INLINE void 1006static SBA_INLINE void
@@ -1026,8 +1036,8 @@ sba_mark_clean(struct ioc *ioc, dma_addr_t iova, size_t size)
1026 * 1036 *
1027 * See Documentation/PCI/PCI-DMA-mapping.txt 1037 * See Documentation/PCI/PCI-DMA-mapping.txt
1028 */ 1038 */
1029void sba_unmap_single_attrs(struct device *dev, dma_addr_t iova, size_t size, 1039static void sba_unmap_page(struct device *dev, dma_addr_t iova, size_t size,
1030 int dir, struct dma_attrs *attrs) 1040 enum dma_data_direction dir, struct dma_attrs *attrs)
1031{ 1041{
1032 struct ioc *ioc; 1042 struct ioc *ioc;
1033#if DELAYED_RESOURCE_CNT > 0 1043#if DELAYED_RESOURCE_CNT > 0
@@ -1094,7 +1104,12 @@ void sba_unmap_single_attrs(struct device *dev, dma_addr_t iova, size_t size,
1094 spin_unlock_irqrestore(&ioc->res_lock, flags); 1104 spin_unlock_irqrestore(&ioc->res_lock, flags);
1095#endif /* DELAYED_RESOURCE_CNT == 0 */ 1105#endif /* DELAYED_RESOURCE_CNT == 0 */
1096} 1106}
1097EXPORT_SYMBOL(sba_unmap_single_attrs); 1107
1108void sba_unmap_single_attrs(struct device *dev, dma_addr_t iova, size_t size,
1109 enum dma_data_direction dir, struct dma_attrs *attrs)
1110{
1111 sba_unmap_page(dev, iova, size, dir, attrs);
1112}
1098 1113
1099/** 1114/**
1100 * sba_alloc_coherent - allocate/map shared mem for DMA 1115 * sba_alloc_coherent - allocate/map shared mem for DMA
@@ -1104,7 +1119,7 @@ EXPORT_SYMBOL(sba_unmap_single_attrs);
1104 * 1119 *
1105 * See Documentation/PCI/PCI-DMA-mapping.txt 1120 * See Documentation/PCI/PCI-DMA-mapping.txt
1106 */ 1121 */
1107void * 1122static void *
1108sba_alloc_coherent (struct device *dev, size_t size, dma_addr_t *dma_handle, gfp_t flags) 1123sba_alloc_coherent (struct device *dev, size_t size, dma_addr_t *dma_handle, gfp_t flags)
1109{ 1124{
1110 struct ioc *ioc; 1125 struct ioc *ioc;
@@ -1167,7 +1182,8 @@ sba_alloc_coherent (struct device *dev, size_t size, dma_addr_t *dma_handle, gfp
1167 * 1182 *
1168 * See Documentation/PCI/PCI-DMA-mapping.txt 1183 * See Documentation/PCI/PCI-DMA-mapping.txt
1169 */ 1184 */
1170void sba_free_coherent (struct device *dev, size_t size, void *vaddr, dma_addr_t dma_handle) 1185static void sba_free_coherent (struct device *dev, size_t size, void *vaddr,
1186 dma_addr_t dma_handle)
1171{ 1187{
1172 sba_unmap_single_attrs(dev, dma_handle, size, 0, NULL); 1188 sba_unmap_single_attrs(dev, dma_handle, size, 0, NULL);
1173 free_pages((unsigned long) vaddr, get_order(size)); 1189 free_pages((unsigned long) vaddr, get_order(size));
@@ -1422,8 +1438,9 @@ sba_coalesce_chunks(struct ioc *ioc, struct device *dev,
1422 * 1438 *
1423 * See Documentation/PCI/PCI-DMA-mapping.txt 1439 * See Documentation/PCI/PCI-DMA-mapping.txt
1424 */ 1440 */
1425int sba_map_sg_attrs(struct device *dev, struct scatterlist *sglist, int nents, 1441static int sba_map_sg_attrs(struct device *dev, struct scatterlist *sglist,
1426 int dir, struct dma_attrs *attrs) 1442 int nents, enum dma_data_direction dir,
1443 struct dma_attrs *attrs)
1427{ 1444{
1428 struct ioc *ioc; 1445 struct ioc *ioc;
1429 int coalesced, filled = 0; 1446 int coalesced, filled = 0;
@@ -1502,7 +1519,6 @@ int sba_map_sg_attrs(struct device *dev, struct scatterlist *sglist, int nents,
1502 1519
1503 return filled; 1520 return filled;
1504} 1521}
1505EXPORT_SYMBOL(sba_map_sg_attrs);
1506 1522
1507/** 1523/**
1508 * sba_unmap_sg_attrs - unmap Scatter/Gather list 1524 * sba_unmap_sg_attrs - unmap Scatter/Gather list
@@ -1514,8 +1530,9 @@ EXPORT_SYMBOL(sba_map_sg_attrs);
1514 * 1530 *
1515 * See Documentation/PCI/PCI-DMA-mapping.txt 1531 * See Documentation/PCI/PCI-DMA-mapping.txt
1516 */ 1532 */
1517void sba_unmap_sg_attrs(struct device *dev, struct scatterlist *sglist, 1533static void sba_unmap_sg_attrs(struct device *dev, struct scatterlist *sglist,
1518 int nents, int dir, struct dma_attrs *attrs) 1534 int nents, enum dma_data_direction dir,
1535 struct dma_attrs *attrs)
1519{ 1536{
1520#ifdef ASSERT_PDIR_SANITY 1537#ifdef ASSERT_PDIR_SANITY
1521 struct ioc *ioc; 1538 struct ioc *ioc;
@@ -1551,7 +1568,6 @@ void sba_unmap_sg_attrs(struct device *dev, struct scatterlist *sglist,
1551#endif 1568#endif
1552 1569
1553} 1570}
1554EXPORT_SYMBOL(sba_unmap_sg_attrs);
1555 1571
1556/************************************************************** 1572/**************************************************************
1557* 1573*
@@ -2064,6 +2080,8 @@ static struct acpi_driver acpi_sba_ioc_driver = {
2064 }, 2080 },
2065}; 2081};
2066 2082
2083extern struct dma_map_ops swiotlb_dma_ops;
2084
2067static int __init 2085static int __init
2068sba_init(void) 2086sba_init(void)
2069{ 2087{
@@ -2077,6 +2095,7 @@ sba_init(void)
2077 * a successful kdump kernel boot is to use the swiotlb. 2095 * a successful kdump kernel boot is to use the swiotlb.
2078 */ 2096 */
2079 if (is_kdump_kernel()) { 2097 if (is_kdump_kernel()) {
2098 dma_ops = &swiotlb_dma_ops;
2080 if (swiotlb_late_init_with_default_size(64 * (1<<20)) != 0) 2099 if (swiotlb_late_init_with_default_size(64 * (1<<20)) != 0)
2081 panic("Unable to initialize software I/O TLB:" 2100 panic("Unable to initialize software I/O TLB:"
2082 " Try machvec=dig boot option"); 2101 " Try machvec=dig boot option");
@@ -2092,6 +2111,7 @@ sba_init(void)
2092 * If we didn't find something sba_iommu can claim, we 2111 * If we didn't find something sba_iommu can claim, we
2093 * need to setup the swiotlb and switch to the dig machvec. 2112 * need to setup the swiotlb and switch to the dig machvec.
2094 */ 2113 */
2114 dma_ops = &swiotlb_dma_ops;
2095 if (swiotlb_late_init_with_default_size(64 * (1<<20)) != 0) 2115 if (swiotlb_late_init_with_default_size(64 * (1<<20)) != 0)
2096 panic("Unable to find SBA IOMMU or initialize " 2116 panic("Unable to find SBA IOMMU or initialize "
2097 "software I/O TLB: Try machvec=dig boot option"); 2117 "software I/O TLB: Try machvec=dig boot option");
@@ -2138,15 +2158,13 @@ nosbagart(char *str)
2138 return 1; 2158 return 1;
2139} 2159}
2140 2160
2141int 2161static int sba_dma_supported (struct device *dev, u64 mask)
2142sba_dma_supported (struct device *dev, u64 mask)
2143{ 2162{
2144 /* make sure it's at least 32bit capable */ 2163 /* make sure it's at least 32bit capable */
2145 return ((mask & 0xFFFFFFFFUL) == 0xFFFFFFFFUL); 2164 return ((mask & 0xFFFFFFFFUL) == 0xFFFFFFFFUL);
2146} 2165}
2147 2166
2148int 2167static int sba_dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
2149sba_dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
2150{ 2168{
2151 return 0; 2169 return 0;
2152} 2170}
@@ -2176,7 +2194,22 @@ sba_page_override(char *str)
2176 2194
2177__setup("sbapagesize=",sba_page_override); 2195__setup("sbapagesize=",sba_page_override);
2178 2196
2179EXPORT_SYMBOL(sba_dma_mapping_error); 2197struct dma_map_ops sba_dma_ops = {
2180EXPORT_SYMBOL(sba_dma_supported); 2198 .alloc_coherent = sba_alloc_coherent,
2181EXPORT_SYMBOL(sba_alloc_coherent); 2199 .free_coherent = sba_free_coherent,
2182EXPORT_SYMBOL(sba_free_coherent); 2200 .map_page = sba_map_page,
2201 .unmap_page = sba_unmap_page,
2202 .map_sg = sba_map_sg_attrs,
2203 .unmap_sg = sba_unmap_sg_attrs,
2204 .sync_single_for_cpu = machvec_dma_sync_single,
2205 .sync_sg_for_cpu = machvec_dma_sync_sg,
2206 .sync_single_for_device = machvec_dma_sync_single,
2207 .sync_sg_for_device = machvec_dma_sync_sg,
2208 .dma_supported = sba_dma_supported,
2209 .mapping_error = sba_dma_mapping_error,
2210};
2211
2212void sba_dma_init(void)
2213{
2214 dma_ops = &sba_dma_ops;
2215}
diff --git a/arch/ia64/hp/sim/simserial.c b/arch/ia64/hp/sim/simserial.c
index 24b1ad5334cb..2bef5261d96d 100644
--- a/arch/ia64/hp/sim/simserial.c
+++ b/arch/ia64/hp/sim/simserial.c
@@ -24,6 +24,7 @@
24#include <linux/major.h> 24#include <linux/major.h>
25#include <linux/fcntl.h> 25#include <linux/fcntl.h>
26#include <linux/mm.h> 26#include <linux/mm.h>
27#include <linux/seq_file.h>
27#include <linux/slab.h> 28#include <linux/slab.h>
28#include <linux/capability.h> 29#include <linux/capability.h>
29#include <linux/console.h> 30#include <linux/console.h>
@@ -848,38 +849,36 @@ static int rs_open(struct tty_struct *tty, struct file * filp)
848 * /proc fs routines.... 849 * /proc fs routines....
849 */ 850 */
850 851
851static inline int line_info(char *buf, struct serial_state *state) 852static inline void line_info(struct seq_file *m, struct serial_state *state)
852{ 853{
853 return sprintf(buf, "%d: uart:%s port:%lX irq:%d\n", 854 seq_printf(m, "%d: uart:%s port:%lX irq:%d\n",
854 state->line, uart_config[state->type].name, 855 state->line, uart_config[state->type].name,
855 state->port, state->irq); 856 state->port, state->irq);
856} 857}
857 858
858static int rs_read_proc(char *page, char **start, off_t off, int count, 859static int rs_proc_show(struct seq_file *m, void *v)
859 int *eof, void *data)
860{ 860{
861 int i, len = 0, l; 861 int i;
862 off_t begin = 0; 862
863 863 seq_printf(m, "simserinfo:1.0 driver:%s\n", serial_version);
864 len += sprintf(page, "simserinfo:1.0 driver:%s\n", serial_version); 864 for (i = 0; i < NR_PORTS; i++)
865 for (i = 0; i < NR_PORTS && len < 4000; i++) { 865 line_info(m, &rs_table[i]);
866 l = line_info(page + len, &rs_table[i]); 866 return 0;
867 len += l;
868 if (len+begin > off+count)
869 goto done;
870 if (len+begin < off) {
871 begin += len;
872 len = 0;
873 }
874 }
875 *eof = 1;
876done:
877 if (off >= len+begin)
878 return 0;
879 *start = page + (begin-off);
880 return ((count < begin+len-off) ? count : begin+len-off);
881} 867}
882 868
869static int rs_proc_open(struct inode *inode, struct file *file)
870{
871 return single_open(file, rs_proc_show, NULL);
872}
873
874static const struct file_operations rs_proc_fops = {
875 .owner = THIS_MODULE,
876 .open = rs_proc_open,
877 .read = seq_read,
878 .llseek = seq_lseek,
879 .release = single_release,
880};
881
883/* 882/*
884 * --------------------------------------------------------------------- 883 * ---------------------------------------------------------------------
885 * rs_init() and friends 884 * rs_init() and friends
@@ -917,7 +916,7 @@ static const struct tty_operations hp_ops = {
917 .start = rs_start, 916 .start = rs_start,
918 .hangup = rs_hangup, 917 .hangup = rs_hangup,
919 .wait_until_sent = rs_wait_until_sent, 918 .wait_until_sent = rs_wait_until_sent,
920 .read_proc = rs_read_proc, 919 .proc_fops = &rs_proc_fops,
921}; 920};
922 921
923/* 922/*
diff --git a/arch/ia64/ia32/ia32_entry.S b/arch/ia64/ia32/ia32_entry.S
index a46f8395e9a5..af9405cd70e5 100644
--- a/arch/ia64/ia32/ia32_entry.S
+++ b/arch/ia64/ia32/ia32_entry.S
@@ -240,7 +240,7 @@ ia32_syscall_table:
240 data8 sys_ni_syscall 240 data8 sys_ni_syscall
241 data8 sys_umask /* 60 */ 241 data8 sys_umask /* 60 */
242 data8 sys_chroot 242 data8 sys_chroot
243 data8 sys_ustat 243 data8 compat_sys_ustat
244 data8 sys_dup2 244 data8 sys_dup2
245 data8 sys_getppid 245 data8 sys_getppid
246 data8 sys_getpgrp /* 65 */ 246 data8 sys_getpgrp /* 65 */
diff --git a/arch/ia64/include/asm/dma-mapping.h b/arch/ia64/include/asm/dma-mapping.h
index 1f912d927585..36c0009dbece 100644
--- a/arch/ia64/include/asm/dma-mapping.h
+++ b/arch/ia64/include/asm/dma-mapping.h
@@ -11,99 +11,128 @@
11 11
12#define ARCH_HAS_DMA_GET_REQUIRED_MASK 12#define ARCH_HAS_DMA_GET_REQUIRED_MASK
13 13
14struct dma_mapping_ops { 14extern struct dma_map_ops *dma_ops;
15 int (*mapping_error)(struct device *dev,
16 dma_addr_t dma_addr);
17 void* (*alloc_coherent)(struct device *dev, size_t size,
18 dma_addr_t *dma_handle, gfp_t gfp);
19 void (*free_coherent)(struct device *dev, size_t size,
20 void *vaddr, dma_addr_t dma_handle);
21 dma_addr_t (*map_single)(struct device *hwdev, unsigned long ptr,
22 size_t size, int direction);
23 void (*unmap_single)(struct device *dev, dma_addr_t addr,
24 size_t size, int direction);
25 void (*sync_single_for_cpu)(struct device *hwdev,
26 dma_addr_t dma_handle, size_t size,
27 int direction);
28 void (*sync_single_for_device)(struct device *hwdev,
29 dma_addr_t dma_handle, size_t size,
30 int direction);
31 void (*sync_single_range_for_cpu)(struct device *hwdev,
32 dma_addr_t dma_handle, unsigned long offset,
33 size_t size, int direction);
34 void (*sync_single_range_for_device)(struct device *hwdev,
35 dma_addr_t dma_handle, unsigned long offset,
36 size_t size, int direction);
37 void (*sync_sg_for_cpu)(struct device *hwdev,
38 struct scatterlist *sg, int nelems,
39 int direction);
40 void (*sync_sg_for_device)(struct device *hwdev,
41 struct scatterlist *sg, int nelems,
42 int direction);
43 int (*map_sg)(struct device *hwdev, struct scatterlist *sg,
44 int nents, int direction);
45 void (*unmap_sg)(struct device *hwdev,
46 struct scatterlist *sg, int nents,
47 int direction);
48 int (*dma_supported_op)(struct device *hwdev, u64 mask);
49 int is_phys;
50};
51
52extern struct dma_mapping_ops *dma_ops;
53extern struct ia64_machine_vector ia64_mv; 15extern struct ia64_machine_vector ia64_mv;
54extern void set_iommu_machvec(void); 16extern void set_iommu_machvec(void);
55 17
56#define dma_alloc_coherent(dev, size, handle, gfp) \ 18extern void machvec_dma_sync_single(struct device *, dma_addr_t, size_t,
57 platform_dma_alloc_coherent(dev, size, handle, (gfp) | GFP_DMA) 19 enum dma_data_direction);
20extern void machvec_dma_sync_sg(struct device *, struct scatterlist *, int,
21 enum dma_data_direction);
58 22
59/* coherent mem. is cheap */ 23static inline void *dma_alloc_coherent(struct device *dev, size_t size,
60static inline void * 24 dma_addr_t *daddr, gfp_t gfp)
61dma_alloc_noncoherent(struct device *dev, size_t size, dma_addr_t *dma_handle,
62 gfp_t flag)
63{ 25{
64 return dma_alloc_coherent(dev, size, dma_handle, flag); 26 struct dma_map_ops *ops = platform_dma_get_ops(dev);
27 return ops->alloc_coherent(dev, size, daddr, gfp);
65} 28}
66#define dma_free_coherent platform_dma_free_coherent 29
67static inline void 30static inline void dma_free_coherent(struct device *dev, size_t size,
68dma_free_noncoherent(struct device *dev, size_t size, void *cpu_addr, 31 void *caddr, dma_addr_t daddr)
69 dma_addr_t dma_handle) 32{
33 struct dma_map_ops *ops = platform_dma_get_ops(dev);
34 ops->free_coherent(dev, size, caddr, daddr);
35}
36
37#define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
38#define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
39
40static inline dma_addr_t dma_map_single_attrs(struct device *dev,
41 void *caddr, size_t size,
42 enum dma_data_direction dir,
43 struct dma_attrs *attrs)
44{
45 struct dma_map_ops *ops = platform_dma_get_ops(dev);
46 return ops->map_page(dev, virt_to_page(caddr),
47 (unsigned long)caddr & ~PAGE_MASK, size,
48 dir, attrs);
49}
50
51static inline void dma_unmap_single_attrs(struct device *dev, dma_addr_t daddr,
52 size_t size,
53 enum dma_data_direction dir,
54 struct dma_attrs *attrs)
55{
56 struct dma_map_ops *ops = platform_dma_get_ops(dev);
57 ops->unmap_page(dev, daddr, size, dir, attrs);
58}
59
60#define dma_map_single(d, a, s, r) dma_map_single_attrs(d, a, s, r, NULL)
61#define dma_unmap_single(d, a, s, r) dma_unmap_single_attrs(d, a, s, r, NULL)
62
63static inline int dma_map_sg_attrs(struct device *dev, struct scatterlist *sgl,
64 int nents, enum dma_data_direction dir,
65 struct dma_attrs *attrs)
66{
67 struct dma_map_ops *ops = platform_dma_get_ops(dev);
68 return ops->map_sg(dev, sgl, nents, dir, attrs);
69}
70
71static inline void dma_unmap_sg_attrs(struct device *dev,
72 struct scatterlist *sgl, int nents,
73 enum dma_data_direction dir,
74 struct dma_attrs *attrs)
75{
76 struct dma_map_ops *ops = platform_dma_get_ops(dev);
77 ops->unmap_sg(dev, sgl, nents, dir, attrs);
78}
79
80#define dma_map_sg(d, s, n, r) dma_map_sg_attrs(d, s, n, r, NULL)
81#define dma_unmap_sg(d, s, n, r) dma_unmap_sg_attrs(d, s, n, r, NULL)
82
83static inline void dma_sync_single_for_cpu(struct device *dev, dma_addr_t daddr,
84 size_t size,
85 enum dma_data_direction dir)
70{ 86{
71 dma_free_coherent(dev, size, cpu_addr, dma_handle); 87 struct dma_map_ops *ops = platform_dma_get_ops(dev);
88 ops->sync_single_for_cpu(dev, daddr, size, dir);
72} 89}
73#define dma_map_single_attrs platform_dma_map_single_attrs 90
74static inline dma_addr_t dma_map_single(struct device *dev, void *cpu_addr, 91static inline void dma_sync_sg_for_cpu(struct device *dev,
75 size_t size, int dir) 92 struct scatterlist *sgl,
93 int nents, enum dma_data_direction dir)
76{ 94{
77 return dma_map_single_attrs(dev, cpu_addr, size, dir, NULL); 95 struct dma_map_ops *ops = platform_dma_get_ops(dev);
96 ops->sync_sg_for_cpu(dev, sgl, nents, dir);
78} 97}
79#define dma_map_sg_attrs platform_dma_map_sg_attrs 98
80static inline int dma_map_sg(struct device *dev, struct scatterlist *sgl, 99static inline void dma_sync_single_for_device(struct device *dev,
81 int nents, int dir) 100 dma_addr_t daddr,
101 size_t size,
102 enum dma_data_direction dir)
82{ 103{
83 return dma_map_sg_attrs(dev, sgl, nents, dir, NULL); 104 struct dma_map_ops *ops = platform_dma_get_ops(dev);
105 ops->sync_single_for_device(dev, daddr, size, dir);
84} 106}
85#define dma_unmap_single_attrs platform_dma_unmap_single_attrs 107
86static inline void dma_unmap_single(struct device *dev, dma_addr_t cpu_addr, 108static inline void dma_sync_sg_for_device(struct device *dev,
87 size_t size, int dir) 109 struct scatterlist *sgl,
110 int nents,
111 enum dma_data_direction dir)
88{ 112{
89 return dma_unmap_single_attrs(dev, cpu_addr, size, dir, NULL); 113 struct dma_map_ops *ops = platform_dma_get_ops(dev);
114 ops->sync_sg_for_device(dev, sgl, nents, dir);
90} 115}
91#define dma_unmap_sg_attrs platform_dma_unmap_sg_attrs 116
92static inline void dma_unmap_sg(struct device *dev, struct scatterlist *sgl, 117static inline int dma_mapping_error(struct device *dev, dma_addr_t daddr)
93 int nents, int dir) 118{
119 struct dma_map_ops *ops = platform_dma_get_ops(dev);
120 return ops->mapping_error(dev, daddr);
121}
122
123static inline dma_addr_t dma_map_page(struct device *dev, struct page *page,
124 size_t offset, size_t size,
125 enum dma_data_direction dir)
94{ 126{
95 return dma_unmap_sg_attrs(dev, sgl, nents, dir, NULL); 127 struct dma_map_ops *ops = platform_dma_get_ops(dev);
128 return ops->map_page(dev, page, offset, size, dir, NULL);
96} 129}
97#define dma_sync_single_for_cpu platform_dma_sync_single_for_cpu
98#define dma_sync_sg_for_cpu platform_dma_sync_sg_for_cpu
99#define dma_sync_single_for_device platform_dma_sync_single_for_device
100#define dma_sync_sg_for_device platform_dma_sync_sg_for_device
101#define dma_mapping_error platform_dma_mapping_error
102 130
103#define dma_map_page(dev, pg, off, size, dir) \ 131static inline void dma_unmap_page(struct device *dev, dma_addr_t addr,
104 dma_map_single(dev, page_address(pg) + (off), (size), (dir)) 132 size_t size, enum dma_data_direction dir)
105#define dma_unmap_page(dev, dma_addr, size, dir) \ 133{
106 dma_unmap_single(dev, dma_addr, size, dir) 134 dma_unmap_single(dev, addr, size, dir);
135}
107 136
108/* 137/*
109 * Rest of this file is part of the "Advanced DMA API". Use at your own risk. 138 * Rest of this file is part of the "Advanced DMA API". Use at your own risk.
@@ -115,7 +144,11 @@ static inline void dma_unmap_sg(struct device *dev, struct scatterlist *sgl,
115#define dma_sync_single_range_for_device(dev, dma_handle, offset, size, dir) \ 144#define dma_sync_single_range_for_device(dev, dma_handle, offset, size, dir) \
116 dma_sync_single_for_device(dev, dma_handle, size, dir) 145 dma_sync_single_for_device(dev, dma_handle, size, dir)
117 146
118#define dma_supported platform_dma_supported 147static inline int dma_supported(struct device *dev, u64 mask)
148{
149 struct dma_map_ops *ops = platform_dma_get_ops(dev);
150 return ops->dma_supported(dev, mask);
151}
119 152
120static inline int 153static inline int
121dma_set_mask (struct device *dev, u64 mask) 154dma_set_mask (struct device *dev, u64 mask)
@@ -141,11 +174,4 @@ dma_cache_sync (struct device *dev, void *vaddr, size_t size,
141 174
142#define dma_is_consistent(d, h) (1) /* all we do is coherent memory... */ 175#define dma_is_consistent(d, h) (1) /* all we do is coherent memory... */
143 176
144static inline struct dma_mapping_ops *get_dma_ops(struct device *dev)
145{
146 return dma_ops;
147}
148
149
150
151#endif /* _ASM_IA64_DMA_MAPPING_H */ 177#endif /* _ASM_IA64_DMA_MAPPING_H */
diff --git a/arch/ia64/include/asm/kvm.h b/arch/ia64/include/asm/kvm.h
index 2b0a38e84705..18a7e49abbc5 100644
--- a/arch/ia64/include/asm/kvm.h
+++ b/arch/ia64/include/asm/kvm.h
@@ -165,7 +165,40 @@ struct saved_vpd {
165 unsigned long vcpuid[5]; 165 unsigned long vcpuid[5];
166 unsigned long vpsr; 166 unsigned long vpsr;
167 unsigned long vpr; 167 unsigned long vpr;
168 unsigned long vcr[128]; 168 union {
169 unsigned long vcr[128];
170 struct {
171 unsigned long dcr;
172 unsigned long itm;
173 unsigned long iva;
174 unsigned long rsv1[5];
175 unsigned long pta;
176 unsigned long rsv2[7];
177 unsigned long ipsr;
178 unsigned long isr;
179 unsigned long rsv3;
180 unsigned long iip;
181 unsigned long ifa;
182 unsigned long itir;
183 unsigned long iipa;
184 unsigned long ifs;
185 unsigned long iim;
186 unsigned long iha;
187 unsigned long rsv4[38];
188 unsigned long lid;
189 unsigned long ivr;
190 unsigned long tpr;
191 unsigned long eoi;
192 unsigned long irr[4];
193 unsigned long itv;
194 unsigned long pmv;
195 unsigned long cmcv;
196 unsigned long rsv5[5];
197 unsigned long lrr0;
198 unsigned long lrr1;
199 unsigned long rsv6[46];
200 };
201 };
169}; 202};
170 203
171struct kvm_regs { 204struct kvm_regs {
@@ -213,4 +246,18 @@ struct kvm_sregs {
213struct kvm_fpu { 246struct kvm_fpu {
214}; 247};
215 248
249#define KVM_IA64_VCPU_STACK_SHIFT 16
250#define KVM_IA64_VCPU_STACK_SIZE (1UL << KVM_IA64_VCPU_STACK_SHIFT)
251
252struct kvm_ia64_vcpu_stack {
253 unsigned char stack[KVM_IA64_VCPU_STACK_SIZE];
254};
255
256struct kvm_debug_exit_arch {
257};
258
259/* for KVM_SET_GUEST_DEBUG */
260struct kvm_guest_debug_arch {
261};
262
216#endif 263#endif
diff --git a/arch/ia64/include/asm/kvm_host.h b/arch/ia64/include/asm/kvm_host.h
index 348663661659..4542651e6acb 100644
--- a/arch/ia64/include/asm/kvm_host.h
+++ b/arch/ia64/include/asm/kvm_host.h
@@ -112,7 +112,11 @@
112#define VCPU_STRUCT_SHIFT 16 112#define VCPU_STRUCT_SHIFT 16
113#define VCPU_STRUCT_SIZE (__IA64_UL_CONST(1) << VCPU_STRUCT_SHIFT) 113#define VCPU_STRUCT_SIZE (__IA64_UL_CONST(1) << VCPU_STRUCT_SHIFT)
114 114
115#define KVM_STK_OFFSET VCPU_STRUCT_SIZE 115/*
116 * This must match KVM_IA64_VCPU_STACK_{SHIFT,SIZE} arch/ia64/include/asm/kvm.h
117 */
118#define KVM_STK_SHIFT 16
119#define KVM_STK_OFFSET (__IA64_UL_CONST(1)<< KVM_STK_SHIFT)
116 120
117#define KVM_VM_STRUCT_SHIFT 19 121#define KVM_VM_STRUCT_SHIFT 19
118#define KVM_VM_STRUCT_SIZE (__IA64_UL_CONST(1) << KVM_VM_STRUCT_SHIFT) 122#define KVM_VM_STRUCT_SIZE (__IA64_UL_CONST(1) << KVM_VM_STRUCT_SHIFT)
@@ -153,10 +157,10 @@ struct kvm_vm_data {
153 struct kvm_vcpu_data vcpu_data[KVM_MAX_VCPUS]; 157 struct kvm_vcpu_data vcpu_data[KVM_MAX_VCPUS];
154}; 158};
155 159
156#define VCPU_BASE(n) KVM_VM_DATA_BASE + \ 160#define VCPU_BASE(n) (KVM_VM_DATA_BASE + \
157 offsetof(struct kvm_vm_data, vcpu_data[n]) 161 offsetof(struct kvm_vm_data, vcpu_data[n]))
158#define VM_BASE KVM_VM_DATA_BASE + \ 162#define KVM_VM_BASE (KVM_VM_DATA_BASE + \
159 offsetof(struct kvm_vm_data, kvm_vm_struct) 163 offsetof(struct kvm_vm_data, kvm_vm_struct))
160#define KVM_MEM_DIRTY_LOG_BASE KVM_VM_DATA_BASE + \ 164#define KVM_MEM_DIRTY_LOG_BASE KVM_VM_DATA_BASE + \
161 offsetof(struct kvm_vm_data, kvm_mem_dirty_log) 165 offsetof(struct kvm_vm_data, kvm_mem_dirty_log)
162 166
@@ -235,8 +239,6 @@ struct kvm_vm_data {
235 239
236struct kvm; 240struct kvm;
237struct kvm_vcpu; 241struct kvm_vcpu;
238struct kvm_guest_debug{
239};
240 242
241struct kvm_mmio_req { 243struct kvm_mmio_req {
242 uint64_t addr; /* physical address */ 244 uint64_t addr; /* physical address */
@@ -462,6 +464,8 @@ struct kvm_arch {
462 unsigned long metaphysical_rr4; 464 unsigned long metaphysical_rr4;
463 unsigned long vmm_init_rr; 465 unsigned long vmm_init_rr;
464 466
467 int online_vcpus;
468
465 struct kvm_ioapic *vioapic; 469 struct kvm_ioapic *vioapic;
466 struct kvm_vm_stat stat; 470 struct kvm_vm_stat stat;
467 struct kvm_sal_data rdv_sal_data; 471 struct kvm_sal_data rdv_sal_data;
diff --git a/arch/ia64/include/asm/machvec.h b/arch/ia64/include/asm/machvec.h
index fe87b2121707..367d299d9938 100644
--- a/arch/ia64/include/asm/machvec.h
+++ b/arch/ia64/include/asm/machvec.h
@@ -11,7 +11,6 @@
11#define _ASM_IA64_MACHVEC_H 11#define _ASM_IA64_MACHVEC_H
12 12
13#include <linux/types.h> 13#include <linux/types.h>
14#include <linux/swiotlb.h>
15 14
16/* forward declarations: */ 15/* forward declarations: */
17struct device; 16struct device;
@@ -45,24 +44,8 @@ typedef void ia64_mv_kernel_launch_event_t(void);
45 44
46/* DMA-mapping interface: */ 45/* DMA-mapping interface: */
47typedef void ia64_mv_dma_init (void); 46typedef void ia64_mv_dma_init (void);
48typedef void *ia64_mv_dma_alloc_coherent (struct device *, size_t, dma_addr_t *, gfp_t);
49typedef void ia64_mv_dma_free_coherent (struct device *, size_t, void *, dma_addr_t);
50typedef dma_addr_t ia64_mv_dma_map_single (struct device *, void *, size_t, int);
51typedef void ia64_mv_dma_unmap_single (struct device *, dma_addr_t, size_t, int);
52typedef int ia64_mv_dma_map_sg (struct device *, struct scatterlist *, int, int);
53typedef void ia64_mv_dma_unmap_sg (struct device *, struct scatterlist *, int, int);
54typedef void ia64_mv_dma_sync_single_for_cpu (struct device *, dma_addr_t, size_t, int);
55typedef void ia64_mv_dma_sync_sg_for_cpu (struct device *, struct scatterlist *, int, int);
56typedef void ia64_mv_dma_sync_single_for_device (struct device *, dma_addr_t, size_t, int);
57typedef void ia64_mv_dma_sync_sg_for_device (struct device *, struct scatterlist *, int, int);
58typedef int ia64_mv_dma_mapping_error(struct device *, dma_addr_t dma_addr);
59typedef int ia64_mv_dma_supported (struct device *, u64);
60
61typedef dma_addr_t ia64_mv_dma_map_single_attrs (struct device *, void *, size_t, int, struct dma_attrs *);
62typedef void ia64_mv_dma_unmap_single_attrs (struct device *, dma_addr_t, size_t, int, struct dma_attrs *);
63typedef int ia64_mv_dma_map_sg_attrs (struct device *, struct scatterlist *, int, int, struct dma_attrs *);
64typedef void ia64_mv_dma_unmap_sg_attrs (struct device *, struct scatterlist *, int, int, struct dma_attrs *);
65typedef u64 ia64_mv_dma_get_required_mask (struct device *); 47typedef u64 ia64_mv_dma_get_required_mask (struct device *);
48typedef struct dma_map_ops *ia64_mv_dma_get_ops(struct device *);
66 49
67/* 50/*
68 * WARNING: The legacy I/O space is _architected_. Platforms are 51 * WARNING: The legacy I/O space is _architected_. Platforms are
@@ -114,8 +97,6 @@ machvec_noop_bus (struct pci_bus *bus)
114 97
115extern void machvec_setup (char **); 98extern void machvec_setup (char **);
116extern void machvec_timer_interrupt (int, void *); 99extern void machvec_timer_interrupt (int, void *);
117extern void machvec_dma_sync_single (struct device *, dma_addr_t, size_t, int);
118extern void machvec_dma_sync_sg (struct device *, struct scatterlist *, int, int);
119extern void machvec_tlb_migrate_finish (struct mm_struct *); 100extern void machvec_tlb_migrate_finish (struct mm_struct *);
120 101
121# if defined (CONFIG_IA64_HP_SIM) 102# if defined (CONFIG_IA64_HP_SIM)
@@ -148,19 +129,8 @@ extern void machvec_tlb_migrate_finish (struct mm_struct *);
148# define platform_global_tlb_purge ia64_mv.global_tlb_purge 129# define platform_global_tlb_purge ia64_mv.global_tlb_purge
149# define platform_tlb_migrate_finish ia64_mv.tlb_migrate_finish 130# define platform_tlb_migrate_finish ia64_mv.tlb_migrate_finish
150# define platform_dma_init ia64_mv.dma_init 131# define platform_dma_init ia64_mv.dma_init
151# define platform_dma_alloc_coherent ia64_mv.dma_alloc_coherent
152# define platform_dma_free_coherent ia64_mv.dma_free_coherent
153# define platform_dma_map_single_attrs ia64_mv.dma_map_single_attrs
154# define platform_dma_unmap_single_attrs ia64_mv.dma_unmap_single_attrs
155# define platform_dma_map_sg_attrs ia64_mv.dma_map_sg_attrs
156# define platform_dma_unmap_sg_attrs ia64_mv.dma_unmap_sg_attrs
157# define platform_dma_sync_single_for_cpu ia64_mv.dma_sync_single_for_cpu
158# define platform_dma_sync_sg_for_cpu ia64_mv.dma_sync_sg_for_cpu
159# define platform_dma_sync_single_for_device ia64_mv.dma_sync_single_for_device
160# define platform_dma_sync_sg_for_device ia64_mv.dma_sync_sg_for_device
161# define platform_dma_mapping_error ia64_mv.dma_mapping_error
162# define platform_dma_supported ia64_mv.dma_supported
163# define platform_dma_get_required_mask ia64_mv.dma_get_required_mask 132# define platform_dma_get_required_mask ia64_mv.dma_get_required_mask
133# define platform_dma_get_ops ia64_mv.dma_get_ops
164# define platform_irq_to_vector ia64_mv.irq_to_vector 134# define platform_irq_to_vector ia64_mv.irq_to_vector
165# define platform_local_vector_to_irq ia64_mv.local_vector_to_irq 135# define platform_local_vector_to_irq ia64_mv.local_vector_to_irq
166# define platform_pci_get_legacy_mem ia64_mv.pci_get_legacy_mem 136# define platform_pci_get_legacy_mem ia64_mv.pci_get_legacy_mem
@@ -203,19 +173,8 @@ struct ia64_machine_vector {
203 ia64_mv_global_tlb_purge_t *global_tlb_purge; 173 ia64_mv_global_tlb_purge_t *global_tlb_purge;
204 ia64_mv_tlb_migrate_finish_t *tlb_migrate_finish; 174 ia64_mv_tlb_migrate_finish_t *tlb_migrate_finish;
205 ia64_mv_dma_init *dma_init; 175 ia64_mv_dma_init *dma_init;
206 ia64_mv_dma_alloc_coherent *dma_alloc_coherent;
207 ia64_mv_dma_free_coherent *dma_free_coherent;
208 ia64_mv_dma_map_single_attrs *dma_map_single_attrs;
209 ia64_mv_dma_unmap_single_attrs *dma_unmap_single_attrs;
210 ia64_mv_dma_map_sg_attrs *dma_map_sg_attrs;
211 ia64_mv_dma_unmap_sg_attrs *dma_unmap_sg_attrs;
212 ia64_mv_dma_sync_single_for_cpu *dma_sync_single_for_cpu;
213 ia64_mv_dma_sync_sg_for_cpu *dma_sync_sg_for_cpu;
214 ia64_mv_dma_sync_single_for_device *dma_sync_single_for_device;
215 ia64_mv_dma_sync_sg_for_device *dma_sync_sg_for_device;
216 ia64_mv_dma_mapping_error *dma_mapping_error;
217 ia64_mv_dma_supported *dma_supported;
218 ia64_mv_dma_get_required_mask *dma_get_required_mask; 176 ia64_mv_dma_get_required_mask *dma_get_required_mask;
177 ia64_mv_dma_get_ops *dma_get_ops;
219 ia64_mv_irq_to_vector *irq_to_vector; 178 ia64_mv_irq_to_vector *irq_to_vector;
220 ia64_mv_local_vector_to_irq *local_vector_to_irq; 179 ia64_mv_local_vector_to_irq *local_vector_to_irq;
221 ia64_mv_pci_get_legacy_mem_t *pci_get_legacy_mem; 180 ia64_mv_pci_get_legacy_mem_t *pci_get_legacy_mem;
@@ -254,19 +213,8 @@ struct ia64_machine_vector {
254 platform_global_tlb_purge, \ 213 platform_global_tlb_purge, \
255 platform_tlb_migrate_finish, \ 214 platform_tlb_migrate_finish, \
256 platform_dma_init, \ 215 platform_dma_init, \
257 platform_dma_alloc_coherent, \
258 platform_dma_free_coherent, \
259 platform_dma_map_single_attrs, \
260 platform_dma_unmap_single_attrs, \
261 platform_dma_map_sg_attrs, \
262 platform_dma_unmap_sg_attrs, \
263 platform_dma_sync_single_for_cpu, \
264 platform_dma_sync_sg_for_cpu, \
265 platform_dma_sync_single_for_device, \
266 platform_dma_sync_sg_for_device, \
267 platform_dma_mapping_error, \
268 platform_dma_supported, \
269 platform_dma_get_required_mask, \ 216 platform_dma_get_required_mask, \
217 platform_dma_get_ops, \
270 platform_irq_to_vector, \ 218 platform_irq_to_vector, \
271 platform_local_vector_to_irq, \ 219 platform_local_vector_to_irq, \
272 platform_pci_get_legacy_mem, \ 220 platform_pci_get_legacy_mem, \
@@ -302,6 +250,9 @@ extern void machvec_init_from_cmdline(const char *cmdline);
302# error Unknown configuration. Update arch/ia64/include/asm/machvec.h. 250# error Unknown configuration. Update arch/ia64/include/asm/machvec.h.
303# endif /* CONFIG_IA64_GENERIC */ 251# endif /* CONFIG_IA64_GENERIC */
304 252
253extern void swiotlb_dma_init(void);
254extern struct dma_map_ops *dma_get_ops(struct device *);
255
305/* 256/*
306 * Define default versions so we can extend machvec for new platforms without having 257 * Define default versions so we can extend machvec for new platforms without having
307 * to update the machvec files for all existing platforms. 258 * to update the machvec files for all existing platforms.
@@ -332,43 +283,10 @@ extern void machvec_init_from_cmdline(const char *cmdline);
332# define platform_kernel_launch_event machvec_noop 283# define platform_kernel_launch_event machvec_noop
333#endif 284#endif
334#ifndef platform_dma_init 285#ifndef platform_dma_init
335# define platform_dma_init swiotlb_init 286# define platform_dma_init swiotlb_dma_init
336#endif
337#ifndef platform_dma_alloc_coherent
338# define platform_dma_alloc_coherent swiotlb_alloc_coherent
339#endif
340#ifndef platform_dma_free_coherent
341# define platform_dma_free_coherent swiotlb_free_coherent
342#endif
343#ifndef platform_dma_map_single_attrs
344# define platform_dma_map_single_attrs swiotlb_map_single_attrs
345#endif
346#ifndef platform_dma_unmap_single_attrs
347# define platform_dma_unmap_single_attrs swiotlb_unmap_single_attrs
348#endif
349#ifndef platform_dma_map_sg_attrs
350# define platform_dma_map_sg_attrs swiotlb_map_sg_attrs
351#endif
352#ifndef platform_dma_unmap_sg_attrs
353# define platform_dma_unmap_sg_attrs swiotlb_unmap_sg_attrs
354#endif
355#ifndef platform_dma_sync_single_for_cpu
356# define platform_dma_sync_single_for_cpu swiotlb_sync_single_for_cpu
357#endif
358#ifndef platform_dma_sync_sg_for_cpu
359# define platform_dma_sync_sg_for_cpu swiotlb_sync_sg_for_cpu
360#endif
361#ifndef platform_dma_sync_single_for_device
362# define platform_dma_sync_single_for_device swiotlb_sync_single_for_device
363#endif
364#ifndef platform_dma_sync_sg_for_device
365# define platform_dma_sync_sg_for_device swiotlb_sync_sg_for_device
366#endif
367#ifndef platform_dma_mapping_error
368# define platform_dma_mapping_error swiotlb_dma_mapping_error
369#endif 287#endif
370#ifndef platform_dma_supported 288#ifndef platform_dma_get_ops
371# define platform_dma_supported swiotlb_dma_supported 289# define platform_dma_get_ops dma_get_ops
372#endif 290#endif
373#ifndef platform_dma_get_required_mask 291#ifndef platform_dma_get_required_mask
374# define platform_dma_get_required_mask ia64_dma_get_required_mask 292# define platform_dma_get_required_mask ia64_dma_get_required_mask
diff --git a/arch/ia64/include/asm/machvec_dig_vtd.h b/arch/ia64/include/asm/machvec_dig_vtd.h
index 3400b561e711..6ab1de5c45ef 100644
--- a/arch/ia64/include/asm/machvec_dig_vtd.h
+++ b/arch/ia64/include/asm/machvec_dig_vtd.h
@@ -2,14 +2,6 @@
2#define _ASM_IA64_MACHVEC_DIG_VTD_h 2#define _ASM_IA64_MACHVEC_DIG_VTD_h
3 3
4extern ia64_mv_setup_t dig_setup; 4extern ia64_mv_setup_t dig_setup;
5extern ia64_mv_dma_alloc_coherent vtd_alloc_coherent;
6extern ia64_mv_dma_free_coherent vtd_free_coherent;
7extern ia64_mv_dma_map_single_attrs vtd_map_single_attrs;
8extern ia64_mv_dma_unmap_single_attrs vtd_unmap_single_attrs;
9extern ia64_mv_dma_map_sg_attrs vtd_map_sg_attrs;
10extern ia64_mv_dma_unmap_sg_attrs vtd_unmap_sg_attrs;
11extern ia64_mv_dma_supported iommu_dma_supported;
12extern ia64_mv_dma_mapping_error vtd_dma_mapping_error;
13extern ia64_mv_dma_init pci_iommu_alloc; 5extern ia64_mv_dma_init pci_iommu_alloc;
14 6
15/* 7/*
@@ -22,17 +14,5 @@ extern ia64_mv_dma_init pci_iommu_alloc;
22#define platform_name "dig_vtd" 14#define platform_name "dig_vtd"
23#define platform_setup dig_setup 15#define platform_setup dig_setup
24#define platform_dma_init pci_iommu_alloc 16#define platform_dma_init pci_iommu_alloc
25#define platform_dma_alloc_coherent vtd_alloc_coherent
26#define platform_dma_free_coherent vtd_free_coherent
27#define platform_dma_map_single_attrs vtd_map_single_attrs
28#define platform_dma_unmap_single_attrs vtd_unmap_single_attrs
29#define platform_dma_map_sg_attrs vtd_map_sg_attrs
30#define platform_dma_unmap_sg_attrs vtd_unmap_sg_attrs
31#define platform_dma_sync_single_for_cpu machvec_dma_sync_single
32#define platform_dma_sync_sg_for_cpu machvec_dma_sync_sg
33#define platform_dma_sync_single_for_device machvec_dma_sync_single
34#define platform_dma_sync_sg_for_device machvec_dma_sync_sg
35#define platform_dma_supported iommu_dma_supported
36#define platform_dma_mapping_error vtd_dma_mapping_error
37 17
38#endif /* _ASM_IA64_MACHVEC_DIG_VTD_h */ 18#endif /* _ASM_IA64_MACHVEC_DIG_VTD_h */
diff --git a/arch/ia64/include/asm/machvec_hpzx1.h b/arch/ia64/include/asm/machvec_hpzx1.h
index 2f57f5144b9f..3bd83d78a412 100644
--- a/arch/ia64/include/asm/machvec_hpzx1.h
+++ b/arch/ia64/include/asm/machvec_hpzx1.h
@@ -2,14 +2,7 @@
2#define _ASM_IA64_MACHVEC_HPZX1_h 2#define _ASM_IA64_MACHVEC_HPZX1_h
3 3
4extern ia64_mv_setup_t dig_setup; 4extern ia64_mv_setup_t dig_setup;
5extern ia64_mv_dma_alloc_coherent sba_alloc_coherent; 5extern ia64_mv_dma_init sba_dma_init;
6extern ia64_mv_dma_free_coherent sba_free_coherent;
7extern ia64_mv_dma_map_single_attrs sba_map_single_attrs;
8extern ia64_mv_dma_unmap_single_attrs sba_unmap_single_attrs;
9extern ia64_mv_dma_map_sg_attrs sba_map_sg_attrs;
10extern ia64_mv_dma_unmap_sg_attrs sba_unmap_sg_attrs;
11extern ia64_mv_dma_supported sba_dma_supported;
12extern ia64_mv_dma_mapping_error sba_dma_mapping_error;
13 6
14/* 7/*
15 * This stuff has dual use! 8 * This stuff has dual use!
@@ -20,18 +13,6 @@ extern ia64_mv_dma_mapping_error sba_dma_mapping_error;
20 */ 13 */
21#define platform_name "hpzx1" 14#define platform_name "hpzx1"
22#define platform_setup dig_setup 15#define platform_setup dig_setup
23#define platform_dma_init machvec_noop 16#define platform_dma_init sba_dma_init
24#define platform_dma_alloc_coherent sba_alloc_coherent
25#define platform_dma_free_coherent sba_free_coherent
26#define platform_dma_map_single_attrs sba_map_single_attrs
27#define platform_dma_unmap_single_attrs sba_unmap_single_attrs
28#define platform_dma_map_sg_attrs sba_map_sg_attrs
29#define platform_dma_unmap_sg_attrs sba_unmap_sg_attrs
30#define platform_dma_sync_single_for_cpu machvec_dma_sync_single
31#define platform_dma_sync_sg_for_cpu machvec_dma_sync_sg
32#define platform_dma_sync_single_for_device machvec_dma_sync_single
33#define platform_dma_sync_sg_for_device machvec_dma_sync_sg
34#define platform_dma_supported sba_dma_supported
35#define platform_dma_mapping_error sba_dma_mapping_error
36 17
37#endif /* _ASM_IA64_MACHVEC_HPZX1_h */ 18#endif /* _ASM_IA64_MACHVEC_HPZX1_h */
diff --git a/arch/ia64/include/asm/machvec_hpzx1_swiotlb.h b/arch/ia64/include/asm/machvec_hpzx1_swiotlb.h
index a842cdda827b..1091ac39740c 100644
--- a/arch/ia64/include/asm/machvec_hpzx1_swiotlb.h
+++ b/arch/ia64/include/asm/machvec_hpzx1_swiotlb.h
@@ -2,18 +2,7 @@
2#define _ASM_IA64_MACHVEC_HPZX1_SWIOTLB_h 2#define _ASM_IA64_MACHVEC_HPZX1_SWIOTLB_h
3 3
4extern ia64_mv_setup_t dig_setup; 4extern ia64_mv_setup_t dig_setup;
5extern ia64_mv_dma_alloc_coherent hwsw_alloc_coherent; 5extern ia64_mv_dma_get_ops hwsw_dma_get_ops;
6extern ia64_mv_dma_free_coherent hwsw_free_coherent;
7extern ia64_mv_dma_map_single_attrs hwsw_map_single_attrs;
8extern ia64_mv_dma_unmap_single_attrs hwsw_unmap_single_attrs;
9extern ia64_mv_dma_map_sg_attrs hwsw_map_sg_attrs;
10extern ia64_mv_dma_unmap_sg_attrs hwsw_unmap_sg_attrs;
11extern ia64_mv_dma_supported hwsw_dma_supported;
12extern ia64_mv_dma_mapping_error hwsw_dma_mapping_error;
13extern ia64_mv_dma_sync_single_for_cpu hwsw_sync_single_for_cpu;
14extern ia64_mv_dma_sync_sg_for_cpu hwsw_sync_sg_for_cpu;
15extern ia64_mv_dma_sync_single_for_device hwsw_sync_single_for_device;
16extern ia64_mv_dma_sync_sg_for_device hwsw_sync_sg_for_device;
17 6
18/* 7/*
19 * This stuff has dual use! 8 * This stuff has dual use!
@@ -23,20 +12,8 @@ extern ia64_mv_dma_sync_sg_for_device hwsw_sync_sg_for_device;
23 * the macros are used directly. 12 * the macros are used directly.
24 */ 13 */
25#define platform_name "hpzx1_swiotlb" 14#define platform_name "hpzx1_swiotlb"
26
27#define platform_setup dig_setup 15#define platform_setup dig_setup
28#define platform_dma_init machvec_noop 16#define platform_dma_init machvec_noop
29#define platform_dma_alloc_coherent hwsw_alloc_coherent 17#define platform_dma_get_ops hwsw_dma_get_ops
30#define platform_dma_free_coherent hwsw_free_coherent
31#define platform_dma_map_single_attrs hwsw_map_single_attrs
32#define platform_dma_unmap_single_attrs hwsw_unmap_single_attrs
33#define platform_dma_map_sg_attrs hwsw_map_sg_attrs
34#define platform_dma_unmap_sg_attrs hwsw_unmap_sg_attrs
35#define platform_dma_supported hwsw_dma_supported
36#define platform_dma_mapping_error hwsw_dma_mapping_error
37#define platform_dma_sync_single_for_cpu hwsw_sync_single_for_cpu
38#define platform_dma_sync_sg_for_cpu hwsw_sync_sg_for_cpu
39#define platform_dma_sync_single_for_device hwsw_sync_single_for_device
40#define platform_dma_sync_sg_for_device hwsw_sync_sg_for_device
41 18
42#endif /* _ASM_IA64_MACHVEC_HPZX1_SWIOTLB_h */ 19#endif /* _ASM_IA64_MACHVEC_HPZX1_SWIOTLB_h */
diff --git a/arch/ia64/include/asm/machvec_sn2.h b/arch/ia64/include/asm/machvec_sn2.h
index f1a6e0d6dfa5..f061a30aac42 100644
--- a/arch/ia64/include/asm/machvec_sn2.h
+++ b/arch/ia64/include/asm/machvec_sn2.h
@@ -55,19 +55,8 @@ extern ia64_mv_readb_t __sn_readb_relaxed;
55extern ia64_mv_readw_t __sn_readw_relaxed; 55extern ia64_mv_readw_t __sn_readw_relaxed;
56extern ia64_mv_readl_t __sn_readl_relaxed; 56extern ia64_mv_readl_t __sn_readl_relaxed;
57extern ia64_mv_readq_t __sn_readq_relaxed; 57extern ia64_mv_readq_t __sn_readq_relaxed;
58extern ia64_mv_dma_alloc_coherent sn_dma_alloc_coherent;
59extern ia64_mv_dma_free_coherent sn_dma_free_coherent;
60extern ia64_mv_dma_map_single_attrs sn_dma_map_single_attrs;
61extern ia64_mv_dma_unmap_single_attrs sn_dma_unmap_single_attrs;
62extern ia64_mv_dma_map_sg_attrs sn_dma_map_sg_attrs;
63extern ia64_mv_dma_unmap_sg_attrs sn_dma_unmap_sg_attrs;
64extern ia64_mv_dma_sync_single_for_cpu sn_dma_sync_single_for_cpu;
65extern ia64_mv_dma_sync_sg_for_cpu sn_dma_sync_sg_for_cpu;
66extern ia64_mv_dma_sync_single_for_device sn_dma_sync_single_for_device;
67extern ia64_mv_dma_sync_sg_for_device sn_dma_sync_sg_for_device;
68extern ia64_mv_dma_mapping_error sn_dma_mapping_error;
69extern ia64_mv_dma_supported sn_dma_supported;
70extern ia64_mv_dma_get_required_mask sn_dma_get_required_mask; 58extern ia64_mv_dma_get_required_mask sn_dma_get_required_mask;
59extern ia64_mv_dma_init sn_dma_init;
71extern ia64_mv_migrate_t sn_migrate; 60extern ia64_mv_migrate_t sn_migrate;
72extern ia64_mv_kernel_launch_event_t sn_kernel_launch_event; 61extern ia64_mv_kernel_launch_event_t sn_kernel_launch_event;
73extern ia64_mv_setup_msi_irq_t sn_setup_msi_irq; 62extern ia64_mv_setup_msi_irq_t sn_setup_msi_irq;
@@ -111,20 +100,8 @@ extern ia64_mv_pci_fixup_bus_t sn_pci_fixup_bus;
111#define platform_pci_get_legacy_mem sn_pci_get_legacy_mem 100#define platform_pci_get_legacy_mem sn_pci_get_legacy_mem
112#define platform_pci_legacy_read sn_pci_legacy_read 101#define platform_pci_legacy_read sn_pci_legacy_read
113#define platform_pci_legacy_write sn_pci_legacy_write 102#define platform_pci_legacy_write sn_pci_legacy_write
114#define platform_dma_init machvec_noop
115#define platform_dma_alloc_coherent sn_dma_alloc_coherent
116#define platform_dma_free_coherent sn_dma_free_coherent
117#define platform_dma_map_single_attrs sn_dma_map_single_attrs
118#define platform_dma_unmap_single_attrs sn_dma_unmap_single_attrs
119#define platform_dma_map_sg_attrs sn_dma_map_sg_attrs
120#define platform_dma_unmap_sg_attrs sn_dma_unmap_sg_attrs
121#define platform_dma_sync_single_for_cpu sn_dma_sync_single_for_cpu
122#define platform_dma_sync_sg_for_cpu sn_dma_sync_sg_for_cpu
123#define platform_dma_sync_single_for_device sn_dma_sync_single_for_device
124#define platform_dma_sync_sg_for_device sn_dma_sync_sg_for_device
125#define platform_dma_mapping_error sn_dma_mapping_error
126#define platform_dma_supported sn_dma_supported
127#define platform_dma_get_required_mask sn_dma_get_required_mask 103#define platform_dma_get_required_mask sn_dma_get_required_mask
104#define platform_dma_init sn_dma_init
128#define platform_migrate sn_migrate 105#define platform_migrate sn_migrate
129#define platform_kernel_launch_event sn_kernel_launch_event 106#define platform_kernel_launch_event sn_kernel_launch_event
130#ifdef CONFIG_PCI_MSI 107#ifdef CONFIG_PCI_MSI
diff --git a/arch/ia64/include/asm/msidef.h b/arch/ia64/include/asm/msidef.h
new file mode 100644
index 000000000000..592c1047a0c5
--- /dev/null
+++ b/arch/ia64/include/asm/msidef.h
@@ -0,0 +1,42 @@
1#ifndef _IA64_MSI_DEF_H
2#define _IA64_MSI_DEF_H
3
4/*
5 * Shifts for APIC-based data
6 */
7
8#define MSI_DATA_VECTOR_SHIFT 0
9#define MSI_DATA_VECTOR(v) (((u8)v) << MSI_DATA_VECTOR_SHIFT)
10#define MSI_DATA_VECTOR_MASK 0xffffff00
11
12#define MSI_DATA_DELIVERY_MODE_SHIFT 8
13#define MSI_DATA_DELIVERY_FIXED (0 << MSI_DATA_DELIVERY_MODE_SHIFT)
14#define MSI_DATA_DELIVERY_LOWPRI (1 << MSI_DATA_DELIVERY_MODE_SHIFT)
15
16#define MSI_DATA_LEVEL_SHIFT 14
17#define MSI_DATA_LEVEL_DEASSERT (0 << MSI_DATA_LEVEL_SHIFT)
18#define MSI_DATA_LEVEL_ASSERT (1 << MSI_DATA_LEVEL_SHIFT)
19
20#define MSI_DATA_TRIGGER_SHIFT 15
21#define MSI_DATA_TRIGGER_EDGE (0 << MSI_DATA_TRIGGER_SHIFT)
22#define MSI_DATA_TRIGGER_LEVEL (1 << MSI_DATA_TRIGGER_SHIFT)
23
24/*
25 * Shift/mask fields for APIC-based bus address
26 */
27
28#define MSI_ADDR_DEST_ID_SHIFT 4
29#define MSI_ADDR_HEADER 0xfee00000
30
31#define MSI_ADDR_DEST_ID_MASK 0xfff0000f
32#define MSI_ADDR_DEST_ID_CPU(cpu) ((cpu) << MSI_ADDR_DEST_ID_SHIFT)
33
34#define MSI_ADDR_DEST_MODE_SHIFT 2
35#define MSI_ADDR_DEST_MODE_PHYS (0 << MSI_ADDR_DEST_MODE_SHIFT)
36#define MSI_ADDR_DEST_MODE_LOGIC (1 << MSI_ADDR_DEST_MODE_SHIFT)
37
38#define MSI_ADDR_REDIRECTION_SHIFT 3
39#define MSI_ADDR_REDIRECTION_CPU (0 << MSI_ADDR_REDIRECTION_SHIFT)
40#define MSI_ADDR_REDIRECTION_LOWPRI (1 << MSI_ADDR_REDIRECTION_SHIFT)
41
42#endif/* _IA64_MSI_DEF_H */
diff --git a/arch/ia64/include/asm/socket.h b/arch/ia64/include/asm/socket.h
index d5ef0aa3e312..745421225ec6 100644
--- a/arch/ia64/include/asm/socket.h
+++ b/arch/ia64/include/asm/socket.h
@@ -63,4 +63,7 @@
63 63
64#define SO_MARK 36 64#define SO_MARK 36
65 65
66#define SO_TIMESTAMPING 37
67#define SCM_TIMESTAMPING SO_TIMESTAMPING
68
66#endif /* _ASM_IA64_SOCKET_H */ 69#endif /* _ASM_IA64_SOCKET_H */
diff --git a/arch/ia64/include/asm/topology.h b/arch/ia64/include/asm/topology.h
index 3193f4417e16..f260dcf21515 100644
--- a/arch/ia64/include/asm/topology.h
+++ b/arch/ia64/include/asm/topology.h
@@ -44,11 +44,6 @@
44#define parent_node(nid) (nid) 44#define parent_node(nid) (nid)
45 45
46/* 46/*
47 * Returns the number of the first CPU on Node 'node'.
48 */
49#define node_to_first_cpu(node) (cpumask_first(cpumask_of_node(node)))
50
51/*
52 * Determines the node for a given pci bus 47 * Determines the node for a given pci bus
53 */ 48 */
54#define pcibus_to_node(bus) PCI_CONTROLLER(bus)->node 49#define pcibus_to_node(bus) PCI_CONTROLLER(bus)->node
diff --git a/arch/ia64/kernel/Makefile b/arch/ia64/kernel/Makefile
index ab6e7ec0bba3..dc62df021673 100644
--- a/arch/ia64/kernel/Makefile
+++ b/arch/ia64/kernel/Makefile
@@ -11,7 +11,7 @@ extra-y := head.o init_task.o vmlinux.lds
11obj-y := acpi.o entry.o efi.o efi_stub.o gate-data.o fsys.o ia64_ksyms.o irq.o irq_ia64.o \ 11obj-y := acpi.o entry.o efi.o efi_stub.o gate-data.o fsys.o ia64_ksyms.o irq.o irq_ia64.o \
12 irq_lsapic.o ivt.o machvec.o pal.o patch.o process.o perfmon.o ptrace.o sal.o \ 12 irq_lsapic.o ivt.o machvec.o pal.o patch.o process.o perfmon.o ptrace.o sal.o \
13 salinfo.o setup.o signal.o sys_ia64.o time.o traps.o unaligned.o \ 13 salinfo.o setup.o signal.o sys_ia64.o time.o traps.o unaligned.o \
14 unwind.o mca.o mca_asm.o topology.o 14 unwind.o mca.o mca_asm.o topology.o dma-mapping.o
15 15
16obj-$(CONFIG_IA64_BRL_EMU) += brl_emu.o 16obj-$(CONFIG_IA64_BRL_EMU) += brl_emu.o
17obj-$(CONFIG_IA64_GENERIC) += acpi-ext.o 17obj-$(CONFIG_IA64_GENERIC) += acpi-ext.o
@@ -48,9 +48,7 @@ ifneq ($(CONFIG_IA64_ESI),)
48obj-y += esi_stub.o # must be in kernel proper 48obj-y += esi_stub.o # must be in kernel proper
49endif 49endif
50obj-$(CONFIG_DMAR) += pci-dma.o 50obj-$(CONFIG_DMAR) += pci-dma.o
51ifeq ($(CONFIG_DMAR), y)
52obj-$(CONFIG_SWIOTLB) += pci-swiotlb.o 51obj-$(CONFIG_SWIOTLB) += pci-swiotlb.o
53endif
54 52
55# The gate DSO image is built using a special linker script. 53# The gate DSO image is built using a special linker script.
56targets += gate.so gate-syms.o 54targets += gate.so gate-syms.o
diff --git a/arch/ia64/kernel/dma-mapping.c b/arch/ia64/kernel/dma-mapping.c
new file mode 100644
index 000000000000..086a2aeb0404
--- /dev/null
+++ b/arch/ia64/kernel/dma-mapping.c
@@ -0,0 +1,13 @@
1#include <linux/dma-mapping.h>
2
3/* Set this to 1 if there is a HW IOMMU in the system */
4int iommu_detected __read_mostly;
5
6struct dma_map_ops *dma_ops;
7EXPORT_SYMBOL(dma_ops);
8
9struct dma_map_ops *dma_get_ops(struct device *dev)
10{
11 return dma_ops;
12}
13EXPORT_SYMBOL(dma_get_ops);
diff --git a/arch/ia64/kernel/irq.c b/arch/ia64/kernel/irq.c
index 226233a6fa19..7429752ef5ad 100644
--- a/arch/ia64/kernel/irq.c
+++ b/arch/ia64/kernel/irq.c
@@ -80,7 +80,7 @@ int show_interrupts(struct seq_file *p, void *v)
80 seq_printf(p, "%10u ", kstat_irqs(i)); 80 seq_printf(p, "%10u ", kstat_irqs(i));
81#else 81#else
82 for_each_online_cpu(j) { 82 for_each_online_cpu(j) {
83 seq_printf(p, "%10u ", kstat_cpu(j).irqs[i]); 83 seq_printf(p, "%10u ", kstat_irqs_cpu(i, j));
84 } 84 }
85#endif 85#endif
86 seq_printf(p, " %14s", irq_desc[i].chip->name); 86 seq_printf(p, " %14s", irq_desc[i].chip->name);
diff --git a/arch/ia64/kernel/irq_ia64.c b/arch/ia64/kernel/irq_ia64.c
index 927ad027820c..acc4d19ae62a 100644
--- a/arch/ia64/kernel/irq_ia64.c
+++ b/arch/ia64/kernel/irq_ia64.c
@@ -493,16 +493,15 @@ ia64_handle_irq (ia64_vector vector, struct pt_regs *regs)
493 saved_tpr = ia64_getreg(_IA64_REG_CR_TPR); 493 saved_tpr = ia64_getreg(_IA64_REG_CR_TPR);
494 ia64_srlz_d(); 494 ia64_srlz_d();
495 while (vector != IA64_SPURIOUS_INT_VECTOR) { 495 while (vector != IA64_SPURIOUS_INT_VECTOR) {
496 struct irq_desc *desc = irq_to_desc(vector); 496 int irq = local_vector_to_irq(vector);
497 struct irq_desc *desc = irq_to_desc(irq);
497 498
498 if (unlikely(IS_LOCAL_TLB_FLUSH(vector))) { 499 if (unlikely(IS_LOCAL_TLB_FLUSH(vector))) {
499 smp_local_flush_tlb(); 500 smp_local_flush_tlb();
500 kstat_incr_irqs_this_cpu(vector, desc); 501 kstat_incr_irqs_this_cpu(irq, desc);
501 } else if (unlikely(IS_RESCHEDULE(vector))) 502 } else if (unlikely(IS_RESCHEDULE(vector))) {
502 kstat_incr_irqs_this_cpu(vector, desc); 503 kstat_incr_irqs_this_cpu(irq, desc);
503 else { 504 } else {
504 int irq = local_vector_to_irq(vector);
505
506 ia64_setreg(_IA64_REG_CR_TPR, vector); 505 ia64_setreg(_IA64_REG_CR_TPR, vector);
507 ia64_srlz_d(); 506 ia64_srlz_d();
508 507
@@ -545,24 +544,24 @@ void ia64_process_pending_intr(void)
545 544
546 vector = ia64_get_ivr(); 545 vector = ia64_get_ivr();
547 546
548 irq_enter(); 547 irq_enter();
549 saved_tpr = ia64_getreg(_IA64_REG_CR_TPR); 548 saved_tpr = ia64_getreg(_IA64_REG_CR_TPR);
550 ia64_srlz_d(); 549 ia64_srlz_d();
551 550
552 /* 551 /*
553 * Perform normal interrupt style processing 552 * Perform normal interrupt style processing
554 */ 553 */
555 while (vector != IA64_SPURIOUS_INT_VECTOR) { 554 while (vector != IA64_SPURIOUS_INT_VECTOR) {
556 struct irq_desc *desc = irq_to_desc(vector); 555 int irq = local_vector_to_irq(vector);
556 struct irq_desc *desc = irq_to_desc(irq);
557 557
558 if (unlikely(IS_LOCAL_TLB_FLUSH(vector))) { 558 if (unlikely(IS_LOCAL_TLB_FLUSH(vector))) {
559 smp_local_flush_tlb(); 559 smp_local_flush_tlb();
560 kstat_incr_irqs_this_cpu(vector, desc); 560 kstat_incr_irqs_this_cpu(irq, desc);
561 } else if (unlikely(IS_RESCHEDULE(vector))) 561 } else if (unlikely(IS_RESCHEDULE(vector))) {
562 kstat_incr_irqs_this_cpu(vector, desc); 562 kstat_incr_irqs_this_cpu(irq, desc);
563 else { 563 } else {
564 struct pt_regs *old_regs = set_irq_regs(NULL); 564 struct pt_regs *old_regs = set_irq_regs(NULL);
565 int irq = local_vector_to_irq(vector);
566 565
567 ia64_setreg(_IA64_REG_CR_TPR, vector); 566 ia64_setreg(_IA64_REG_CR_TPR, vector);
568 ia64_srlz_d(); 567 ia64_srlz_d();
diff --git a/arch/ia64/kernel/machvec.c b/arch/ia64/kernel/machvec.c
index 7ccb228ceedc..d41a40ef80c0 100644
--- a/arch/ia64/kernel/machvec.c
+++ b/arch/ia64/kernel/machvec.c
@@ -1,5 +1,5 @@
1#include <linux/module.h> 1#include <linux/module.h>
2 2#include <linux/dma-mapping.h>
3#include <asm/machvec.h> 3#include <asm/machvec.h>
4#include <asm/system.h> 4#include <asm/system.h>
5 5
@@ -75,14 +75,16 @@ machvec_timer_interrupt (int irq, void *dev_id)
75EXPORT_SYMBOL(machvec_timer_interrupt); 75EXPORT_SYMBOL(machvec_timer_interrupt);
76 76
77void 77void
78machvec_dma_sync_single (struct device *hwdev, dma_addr_t dma_handle, size_t size, int dir) 78machvec_dma_sync_single(struct device *hwdev, dma_addr_t dma_handle, size_t size,
79 enum dma_data_direction dir)
79{ 80{
80 mb(); 81 mb();
81} 82}
82EXPORT_SYMBOL(machvec_dma_sync_single); 83EXPORT_SYMBOL(machvec_dma_sync_single);
83 84
84void 85void
85machvec_dma_sync_sg (struct device *hwdev, struct scatterlist *sg, int n, int dir) 86machvec_dma_sync_sg(struct device *hwdev, struct scatterlist *sg, int n,
87 enum dma_data_direction dir)
86{ 88{
87 mb(); 89 mb();
88} 90}
diff --git a/arch/ia64/kernel/msi_ia64.c b/arch/ia64/kernel/msi_ia64.c
index dcb6b7c51ea7..2b15e233f7fe 100644
--- a/arch/ia64/kernel/msi_ia64.c
+++ b/arch/ia64/kernel/msi_ia64.c
@@ -7,44 +7,7 @@
7#include <linux/msi.h> 7#include <linux/msi.h>
8#include <linux/dmar.h> 8#include <linux/dmar.h>
9#include <asm/smp.h> 9#include <asm/smp.h>
10 10#include <asm/msidef.h>
11/*
12 * Shifts for APIC-based data
13 */
14
15#define MSI_DATA_VECTOR_SHIFT 0
16#define MSI_DATA_VECTOR(v) (((u8)v) << MSI_DATA_VECTOR_SHIFT)
17#define MSI_DATA_VECTOR_MASK 0xffffff00
18
19#define MSI_DATA_DELIVERY_SHIFT 8
20#define MSI_DATA_DELIVERY_FIXED (0 << MSI_DATA_DELIVERY_SHIFT)
21#define MSI_DATA_DELIVERY_LOWPRI (1 << MSI_DATA_DELIVERY_SHIFT)
22
23#define MSI_DATA_LEVEL_SHIFT 14
24#define MSI_DATA_LEVEL_DEASSERT (0 << MSI_DATA_LEVEL_SHIFT)
25#define MSI_DATA_LEVEL_ASSERT (1 << MSI_DATA_LEVEL_SHIFT)
26
27#define MSI_DATA_TRIGGER_SHIFT 15
28#define MSI_DATA_TRIGGER_EDGE (0 << MSI_DATA_TRIGGER_SHIFT)
29#define MSI_DATA_TRIGGER_LEVEL (1 << MSI_DATA_TRIGGER_SHIFT)
30
31/*
32 * Shift/mask fields for APIC-based bus address
33 */
34
35#define MSI_TARGET_CPU_SHIFT 4
36#define MSI_ADDR_HEADER 0xfee00000
37
38#define MSI_ADDR_DESTID_MASK 0xfff0000f
39#define MSI_ADDR_DESTID_CPU(cpu) ((cpu) << MSI_TARGET_CPU_SHIFT)
40
41#define MSI_ADDR_DESTMODE_SHIFT 2
42#define MSI_ADDR_DESTMODE_PHYS (0 << MSI_ADDR_DESTMODE_SHIFT)
43#define MSI_ADDR_DESTMODE_LOGIC (1 << MSI_ADDR_DESTMODE_SHIFT)
44
45#define MSI_ADDR_REDIRECTION_SHIFT 3
46#define MSI_ADDR_REDIRECTION_CPU (0 << MSI_ADDR_REDIRECTION_SHIFT)
47#define MSI_ADDR_REDIRECTION_LOWPRI (1 << MSI_ADDR_REDIRECTION_SHIFT)
48 11
49static struct irq_chip ia64_msi_chip; 12static struct irq_chip ia64_msi_chip;
50 13
@@ -65,8 +28,8 @@ static void ia64_set_msi_irq_affinity(unsigned int irq,
65 read_msi_msg(irq, &msg); 28 read_msi_msg(irq, &msg);
66 29
67 addr = msg.address_lo; 30 addr = msg.address_lo;
68 addr &= MSI_ADDR_DESTID_MASK; 31 addr &= MSI_ADDR_DEST_ID_MASK;
69 addr |= MSI_ADDR_DESTID_CPU(cpu_physical_id(cpu)); 32 addr |= MSI_ADDR_DEST_ID_CPU(cpu_physical_id(cpu));
70 msg.address_lo = addr; 33 msg.address_lo = addr;
71 34
72 data = msg.data; 35 data = msg.data;
@@ -98,9 +61,9 @@ int ia64_setup_msi_irq(struct pci_dev *pdev, struct msi_desc *desc)
98 msg.address_hi = 0; 61 msg.address_hi = 0;
99 msg.address_lo = 62 msg.address_lo =
100 MSI_ADDR_HEADER | 63 MSI_ADDR_HEADER |
101 MSI_ADDR_DESTMODE_PHYS | 64 MSI_ADDR_DEST_MODE_PHYS |
102 MSI_ADDR_REDIRECTION_CPU | 65 MSI_ADDR_REDIRECTION_CPU |
103 MSI_ADDR_DESTID_CPU(dest_phys_id); 66 MSI_ADDR_DEST_ID_CPU(dest_phys_id);
104 67
105 msg.data = 68 msg.data =
106 MSI_DATA_TRIGGER_EDGE | 69 MSI_DATA_TRIGGER_EDGE |
@@ -183,8 +146,8 @@ static void dmar_msi_set_affinity(unsigned int irq, const struct cpumask *mask)
183 146
184 msg.data &= ~MSI_DATA_VECTOR_MASK; 147 msg.data &= ~MSI_DATA_VECTOR_MASK;
185 msg.data |= MSI_DATA_VECTOR(cfg->vector); 148 msg.data |= MSI_DATA_VECTOR(cfg->vector);
186 msg.address_lo &= ~MSI_ADDR_DESTID_MASK; 149 msg.address_lo &= ~MSI_ADDR_DEST_ID_MASK;
187 msg.address_lo |= MSI_ADDR_DESTID_CPU(cpu_physical_id(cpu)); 150 msg.address_lo |= MSI_ADDR_DEST_ID_CPU(cpu_physical_id(cpu));
188 151
189 dmar_msi_write(irq, &msg); 152 dmar_msi_write(irq, &msg);
190 cpumask_copy(irq_desc[irq].affinity, mask); 153 cpumask_copy(irq_desc[irq].affinity, mask);
@@ -215,9 +178,9 @@ msi_compose_msg(struct pci_dev *pdev, unsigned int irq, struct msi_msg *msg)
215 msg->address_hi = 0; 178 msg->address_hi = 0;
216 msg->address_lo = 179 msg->address_lo =
217 MSI_ADDR_HEADER | 180 MSI_ADDR_HEADER |
218 MSI_ADDR_DESTMODE_PHYS | 181 MSI_ADDR_DEST_MODE_PHYS |
219 MSI_ADDR_REDIRECTION_CPU | 182 MSI_ADDR_REDIRECTION_CPU |
220 MSI_ADDR_DESTID_CPU(dest); 183 MSI_ADDR_DEST_ID_CPU(dest);
221 184
222 msg->data = 185 msg->data =
223 MSI_DATA_TRIGGER_EDGE | 186 MSI_DATA_TRIGGER_EDGE |
diff --git a/arch/ia64/kernel/palinfo.c b/arch/ia64/kernel/palinfo.c
index e5c57f413ca2..a4f19c70aadd 100644
--- a/arch/ia64/kernel/palinfo.c
+++ b/arch/ia64/kernel/palinfo.c
@@ -1002,8 +1002,6 @@ create_palinfo_proc_entries(unsigned int cpu)
1002 *pdir = create_proc_read_entry( 1002 *pdir = create_proc_read_entry(
1003 palinfo_entries[j].name, 0, cpu_dir, 1003 palinfo_entries[j].name, 0, cpu_dir,
1004 palinfo_read_entry, (void *)f.value); 1004 palinfo_read_entry, (void *)f.value);
1005 if (*pdir)
1006 (*pdir)->owner = THIS_MODULE;
1007 pdir++; 1005 pdir++;
1008 } 1006 }
1009} 1007}
diff --git a/arch/ia64/kernel/pci-dma.c b/arch/ia64/kernel/pci-dma.c
index d0ada067a4af..e4cb443bb988 100644
--- a/arch/ia64/kernel/pci-dma.c
+++ b/arch/ia64/kernel/pci-dma.c
@@ -32,9 +32,6 @@ int force_iommu __read_mostly = 1;
32int force_iommu __read_mostly; 32int force_iommu __read_mostly;
33#endif 33#endif
34 34
35/* Set this to 1 if there is a HW IOMMU in the system */
36int iommu_detected __read_mostly;
37
38/* Dummy device used for NULL arguments (normally ISA). Better would 35/* Dummy device used for NULL arguments (normally ISA). Better would
39 be probably a smaller DMA mask, but this is bug-to-bug compatible 36 be probably a smaller DMA mask, but this is bug-to-bug compatible
40 to i386. */ 37 to i386. */
@@ -44,18 +41,7 @@ struct device fallback_dev = {
44 .dma_mask = &fallback_dev.coherent_dma_mask, 41 .dma_mask = &fallback_dev.coherent_dma_mask,
45}; 42};
46 43
47void __init pci_iommu_alloc(void) 44extern struct dma_map_ops intel_dma_ops;
48{
49 /*
50 * The order of these functions is important for
51 * fall-back/fail-over reasons
52 */
53 detect_intel_iommu();
54
55#ifdef CONFIG_SWIOTLB
56 pci_swiotlb_init();
57#endif
58}
59 45
60static int __init pci_iommu_init(void) 46static int __init pci_iommu_init(void)
61{ 47{
@@ -79,15 +65,12 @@ iommu_dma_init(void)
79 return; 65 return;
80} 66}
81 67
82struct dma_mapping_ops *dma_ops;
83EXPORT_SYMBOL(dma_ops);
84
85int iommu_dma_supported(struct device *dev, u64 mask) 68int iommu_dma_supported(struct device *dev, u64 mask)
86{ 69{
87 struct dma_mapping_ops *ops = get_dma_ops(dev); 70 struct dma_map_ops *ops = platform_dma_get_ops(dev);
88 71
89 if (ops->dma_supported_op) 72 if (ops->dma_supported)
90 return ops->dma_supported_op(dev, mask); 73 return ops->dma_supported(dev, mask);
91 74
92 /* Copied from i386. Doesn't make much sense, because it will 75 /* Copied from i386. Doesn't make much sense, because it will
93 only work for pci_alloc_coherent. 76 only work for pci_alloc_coherent.
@@ -116,4 +99,25 @@ int iommu_dma_supported(struct device *dev, u64 mask)
116} 99}
117EXPORT_SYMBOL(iommu_dma_supported); 100EXPORT_SYMBOL(iommu_dma_supported);
118 101
102void __init pci_iommu_alloc(void)
103{
104 dma_ops = &intel_dma_ops;
105
106 dma_ops->sync_single_for_cpu = machvec_dma_sync_single;
107 dma_ops->sync_sg_for_cpu = machvec_dma_sync_sg;
108 dma_ops->sync_single_for_device = machvec_dma_sync_single;
109 dma_ops->sync_sg_for_device = machvec_dma_sync_sg;
110 dma_ops->dma_supported = iommu_dma_supported;
111
112 /*
113 * The order of these functions is important for
114 * fall-back/fail-over reasons
115 */
116 detect_intel_iommu();
117
118#ifdef CONFIG_SWIOTLB
119 pci_swiotlb_init();
120#endif
121}
122
119#endif 123#endif
diff --git a/arch/ia64/kernel/pci-swiotlb.c b/arch/ia64/kernel/pci-swiotlb.c
index 16c50516dbc1..573f02c39a00 100644
--- a/arch/ia64/kernel/pci-swiotlb.c
+++ b/arch/ia64/kernel/pci-swiotlb.c
@@ -13,23 +13,37 @@
13int swiotlb __read_mostly; 13int swiotlb __read_mostly;
14EXPORT_SYMBOL(swiotlb); 14EXPORT_SYMBOL(swiotlb);
15 15
16struct dma_mapping_ops swiotlb_dma_ops = { 16static void *ia64_swiotlb_alloc_coherent(struct device *dev, size_t size,
17 .mapping_error = swiotlb_dma_mapping_error, 17 dma_addr_t *dma_handle, gfp_t gfp)
18 .alloc_coherent = swiotlb_alloc_coherent, 18{
19 if (dev->coherent_dma_mask != DMA_64BIT_MASK)
20 gfp |= GFP_DMA;
21 return swiotlb_alloc_coherent(dev, size, dma_handle, gfp);
22}
23
24struct dma_map_ops swiotlb_dma_ops = {
25 .alloc_coherent = ia64_swiotlb_alloc_coherent,
19 .free_coherent = swiotlb_free_coherent, 26 .free_coherent = swiotlb_free_coherent,
20 .map_single = swiotlb_map_single, 27 .map_page = swiotlb_map_page,
21 .unmap_single = swiotlb_unmap_single, 28 .unmap_page = swiotlb_unmap_page,
29 .map_sg = swiotlb_map_sg_attrs,
30 .unmap_sg = swiotlb_unmap_sg_attrs,
22 .sync_single_for_cpu = swiotlb_sync_single_for_cpu, 31 .sync_single_for_cpu = swiotlb_sync_single_for_cpu,
23 .sync_single_for_device = swiotlb_sync_single_for_device, 32 .sync_single_for_device = swiotlb_sync_single_for_device,
24 .sync_single_range_for_cpu = swiotlb_sync_single_range_for_cpu, 33 .sync_single_range_for_cpu = swiotlb_sync_single_range_for_cpu,
25 .sync_single_range_for_device = swiotlb_sync_single_range_for_device, 34 .sync_single_range_for_device = swiotlb_sync_single_range_for_device,
26 .sync_sg_for_cpu = swiotlb_sync_sg_for_cpu, 35 .sync_sg_for_cpu = swiotlb_sync_sg_for_cpu,
27 .sync_sg_for_device = swiotlb_sync_sg_for_device, 36 .sync_sg_for_device = swiotlb_sync_sg_for_device,
28 .map_sg = swiotlb_map_sg, 37 .dma_supported = swiotlb_dma_supported,
29 .unmap_sg = swiotlb_unmap_sg, 38 .mapping_error = swiotlb_dma_mapping_error,
30 .dma_supported_op = swiotlb_dma_supported,
31}; 39};
32 40
41void __init swiotlb_dma_init(void)
42{
43 dma_ops = &swiotlb_dma_ops;
44 swiotlb_init();
45}
46
33void __init pci_swiotlb_init(void) 47void __init pci_swiotlb_init(void)
34{ 48{
35 if (!iommu_detected) { 49 if (!iommu_detected) {
diff --git a/arch/ia64/kernel/perfmon.c b/arch/ia64/kernel/perfmon.c
index 0e499757309b..5c0f408cfd71 100644
--- a/arch/ia64/kernel/perfmon.c
+++ b/arch/ia64/kernel/perfmon.c
@@ -2196,7 +2196,7 @@ pfmfs_delete_dentry(struct dentry *dentry)
2196 return 1; 2196 return 1;
2197} 2197}
2198 2198
2199static struct dentry_operations pfmfs_dentry_operations = { 2199static const struct dentry_operations pfmfs_dentry_operations = {
2200 .d_delete = pfmfs_delete_dentry, 2200 .d_delete = pfmfs_delete_dentry,
2201}; 2201};
2202 2202
diff --git a/arch/ia64/kernel/time.c b/arch/ia64/kernel/time.c
index f0ebb342409d..d6747bae52d8 100644
--- a/arch/ia64/kernel/time.c
+++ b/arch/ia64/kernel/time.c
@@ -20,6 +20,7 @@
20#include <linux/efi.h> 20#include <linux/efi.h>
21#include <linux/timex.h> 21#include <linux/timex.h>
22#include <linux/clocksource.h> 22#include <linux/clocksource.h>
23#include <linux/platform_device.h>
23 24
24#include <asm/machvec.h> 25#include <asm/machvec.h>
25#include <asm/delay.h> 26#include <asm/delay.h>
@@ -405,6 +406,21 @@ static struct irqaction timer_irqaction = {
405 .name = "timer" 406 .name = "timer"
406}; 407};
407 408
409static struct platform_device rtc_efi_dev = {
410 .name = "rtc-efi",
411 .id = -1,
412};
413
414static int __init rtc_init(void)
415{
416 if (platform_device_register(&rtc_efi_dev) < 0)
417 printk(KERN_ERR "unable to register rtc device...\n");
418
419 /* not necessarily an error */
420 return 0;
421}
422module_init(rtc_init);
423
408void __init 424void __init
409time_init (void) 425time_init (void)
410{ 426{
diff --git a/arch/ia64/kernel/vmlinux.lds.S b/arch/ia64/kernel/vmlinux.lds.S
index f45e4e508eca..3765efc5f963 100644
--- a/arch/ia64/kernel/vmlinux.lds.S
+++ b/arch/ia64/kernel/vmlinux.lds.S
@@ -213,17 +213,9 @@ SECTIONS
213 { *(.data.cacheline_aligned) } 213 { *(.data.cacheline_aligned) }
214 214
215 /* Per-cpu data: */ 215 /* Per-cpu data: */
216 percpu : { } :percpu
217 . = ALIGN(PERCPU_PAGE_SIZE); 216 . = ALIGN(PERCPU_PAGE_SIZE);
218 __phys_per_cpu_start = .; 217 PERCPU_VADDR(PERCPU_ADDR, :percpu)
219 .data.percpu PERCPU_ADDR : AT(__phys_per_cpu_start - LOAD_OFFSET) 218 __phys_per_cpu_start = __per_cpu_load;
220 {
221 __per_cpu_start = .;
222 *(.data.percpu.page_aligned)
223 *(.data.percpu)
224 *(.data.percpu.shared_aligned)
225 __per_cpu_end = .;
226 }
227 . = __phys_per_cpu_start + PERCPU_PAGE_SIZE; /* ensure percpu data fits 219 . = __phys_per_cpu_start + PERCPU_PAGE_SIZE; /* ensure percpu data fits
228 * into percpu page size 220 * into percpu page size
229 */ 221 */
diff --git a/arch/ia64/kvm/Kconfig b/arch/ia64/kvm/Kconfig
index f833a0b4188d..0a2d6b86075a 100644
--- a/arch/ia64/kvm/Kconfig
+++ b/arch/ia64/kvm/Kconfig
@@ -4,6 +4,10 @@
4config HAVE_KVM 4config HAVE_KVM
5 bool 5 bool
6 6
7config HAVE_KVM_IRQCHIP
8 bool
9 default y
10
7menuconfig VIRTUALIZATION 11menuconfig VIRTUALIZATION
8 bool "Virtualization" 12 bool "Virtualization"
9 depends on HAVE_KVM || IA64 13 depends on HAVE_KVM || IA64
diff --git a/arch/ia64/kvm/irq.h b/arch/ia64/kvm/irq.h
index c6786e8b1bf4..c0785a728271 100644
--- a/arch/ia64/kvm/irq.h
+++ b/arch/ia64/kvm/irq.h
@@ -23,6 +23,8 @@
23#ifndef __IRQ_H 23#ifndef __IRQ_H
24#define __IRQ_H 24#define __IRQ_H
25 25
26#include "lapic.h"
27
26static inline int irqchip_in_kernel(struct kvm *kvm) 28static inline int irqchip_in_kernel(struct kvm *kvm)
27{ 29{
28 return 1; 30 return 1;
diff --git a/arch/ia64/kvm/kvm-ia64.c b/arch/ia64/kvm/kvm-ia64.c
index 28f982045f29..076b00d1dbff 100644
--- a/arch/ia64/kvm/kvm-ia64.c
+++ b/arch/ia64/kvm/kvm-ia64.c
@@ -182,7 +182,7 @@ int kvm_dev_ioctl_check_extension(long ext)
182 switch (ext) { 182 switch (ext) {
183 case KVM_CAP_IRQCHIP: 183 case KVM_CAP_IRQCHIP:
184 case KVM_CAP_MP_STATE: 184 case KVM_CAP_MP_STATE:
185 185 case KVM_CAP_IRQ_INJECT_STATUS:
186 r = 1; 186 r = 1;
187 break; 187 break;
188 case KVM_CAP_COALESCED_MMIO: 188 case KVM_CAP_COALESCED_MMIO:
@@ -314,7 +314,7 @@ static struct kvm_vcpu *lid_to_vcpu(struct kvm *kvm, unsigned long id,
314 union ia64_lid lid; 314 union ia64_lid lid;
315 int i; 315 int i;
316 316
317 for (i = 0; i < KVM_MAX_VCPUS; i++) { 317 for (i = 0; i < kvm->arch.online_vcpus; i++) {
318 if (kvm->vcpus[i]) { 318 if (kvm->vcpus[i]) {
319 lid.val = VCPU_LID(kvm->vcpus[i]); 319 lid.val = VCPU_LID(kvm->vcpus[i]);
320 if (lid.id == id && lid.eid == eid) 320 if (lid.id == id && lid.eid == eid)
@@ -388,7 +388,7 @@ static int handle_global_purge(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
388 388
389 call_data.ptc_g_data = p->u.ptc_g_data; 389 call_data.ptc_g_data = p->u.ptc_g_data;
390 390
391 for (i = 0; i < KVM_MAX_VCPUS; i++) { 391 for (i = 0; i < kvm->arch.online_vcpus; i++) {
392 if (!kvm->vcpus[i] || kvm->vcpus[i]->arch.mp_state == 392 if (!kvm->vcpus[i] || kvm->vcpus[i]->arch.mp_state ==
393 KVM_MP_STATE_UNINITIALIZED || 393 KVM_MP_STATE_UNINITIALIZED ||
394 vcpu == kvm->vcpus[i]) 394 vcpu == kvm->vcpus[i])
@@ -788,6 +788,8 @@ struct kvm *kvm_arch_create_vm(void)
788 return ERR_PTR(-ENOMEM); 788 return ERR_PTR(-ENOMEM);
789 kvm_init_vm(kvm); 789 kvm_init_vm(kvm);
790 790
791 kvm->arch.online_vcpus = 0;
792
791 return kvm; 793 return kvm;
792 794
793} 795}
@@ -919,7 +921,13 @@ long kvm_arch_vm_ioctl(struct file *filp,
919 r = kvm_ioapic_init(kvm); 921 r = kvm_ioapic_init(kvm);
920 if (r) 922 if (r)
921 goto out; 923 goto out;
924 r = kvm_setup_default_irq_routing(kvm);
925 if (r) {
926 kfree(kvm->arch.vioapic);
927 goto out;
928 }
922 break; 929 break;
930 case KVM_IRQ_LINE_STATUS:
923 case KVM_IRQ_LINE: { 931 case KVM_IRQ_LINE: {
924 struct kvm_irq_level irq_event; 932 struct kvm_irq_level irq_event;
925 933
@@ -927,10 +935,17 @@ long kvm_arch_vm_ioctl(struct file *filp,
927 if (copy_from_user(&irq_event, argp, sizeof irq_event)) 935 if (copy_from_user(&irq_event, argp, sizeof irq_event))
928 goto out; 936 goto out;
929 if (irqchip_in_kernel(kvm)) { 937 if (irqchip_in_kernel(kvm)) {
938 __s32 status;
930 mutex_lock(&kvm->lock); 939 mutex_lock(&kvm->lock);
931 kvm_set_irq(kvm, KVM_USERSPACE_IRQ_SOURCE_ID, 940 status = kvm_set_irq(kvm, KVM_USERSPACE_IRQ_SOURCE_ID,
932 irq_event.irq, irq_event.level); 941 irq_event.irq, irq_event.level);
933 mutex_unlock(&kvm->lock); 942 mutex_unlock(&kvm->lock);
943 if (ioctl == KVM_IRQ_LINE_STATUS) {
944 irq_event.status = status;
945 if (copy_to_user(argp, &irq_event,
946 sizeof irq_event))
947 goto out;
948 }
934 r = 0; 949 r = 0;
935 } 950 }
936 break; 951 break;
@@ -1149,7 +1164,7 @@ int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
1149 1164
1150 /*Initialize itc offset for vcpus*/ 1165 /*Initialize itc offset for vcpus*/
1151 itc_offset = 0UL - ia64_getreg(_IA64_REG_AR_ITC); 1166 itc_offset = 0UL - ia64_getreg(_IA64_REG_AR_ITC);
1152 for (i = 0; i < KVM_MAX_VCPUS; i++) { 1167 for (i = 0; i < kvm->arch.online_vcpus; i++) {
1153 v = (struct kvm_vcpu *)((char *)vcpu + 1168 v = (struct kvm_vcpu *)((char *)vcpu +
1154 sizeof(struct kvm_vcpu_data) * i); 1169 sizeof(struct kvm_vcpu_data) * i);
1155 v->arch.itc_offset = itc_offset; 1170 v->arch.itc_offset = itc_offset;
@@ -1283,6 +1298,8 @@ struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm,
1283 goto fail; 1298 goto fail;
1284 } 1299 }
1285 1300
1301 kvm->arch.online_vcpus++;
1302
1286 return vcpu; 1303 return vcpu;
1287fail: 1304fail:
1288 return ERR_PTR(r); 1305 return ERR_PTR(r);
@@ -1303,8 +1320,8 @@ int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
1303 return -EINVAL; 1320 return -EINVAL;
1304} 1321}
1305 1322
1306int kvm_arch_vcpu_ioctl_debug_guest(struct kvm_vcpu *vcpu, 1323int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
1307 struct kvm_debug_guest *dbg) 1324 struct kvm_guest_debug *dbg)
1308{ 1325{
1309 return -EINVAL; 1326 return -EINVAL;
1310} 1327}
@@ -1421,6 +1438,23 @@ int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
1421 return 0; 1438 return 0;
1422} 1439}
1423 1440
1441int kvm_arch_vcpu_ioctl_get_stack(struct kvm_vcpu *vcpu,
1442 struct kvm_ia64_vcpu_stack *stack)
1443{
1444 memcpy(stack, vcpu, sizeof(struct kvm_ia64_vcpu_stack));
1445 return 0;
1446}
1447
1448int kvm_arch_vcpu_ioctl_set_stack(struct kvm_vcpu *vcpu,
1449 struct kvm_ia64_vcpu_stack *stack)
1450{
1451 memcpy(vcpu + 1, &stack->stack[0] + sizeof(struct kvm_vcpu),
1452 sizeof(struct kvm_ia64_vcpu_stack) - sizeof(struct kvm_vcpu));
1453
1454 vcpu->arch.exit_data = ((struct kvm_vcpu *)stack)->arch.exit_data;
1455 return 0;
1456}
1457
1424void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu) 1458void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu)
1425{ 1459{
1426 1460
@@ -1430,9 +1464,78 @@ void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu)
1430 1464
1431 1465
1432long kvm_arch_vcpu_ioctl(struct file *filp, 1466long kvm_arch_vcpu_ioctl(struct file *filp,
1433 unsigned int ioctl, unsigned long arg) 1467 unsigned int ioctl, unsigned long arg)
1434{ 1468{
1435 return -EINVAL; 1469 struct kvm_vcpu *vcpu = filp->private_data;
1470 void __user *argp = (void __user *)arg;
1471 struct kvm_ia64_vcpu_stack *stack = NULL;
1472 long r;
1473
1474 switch (ioctl) {
1475 case KVM_IA64_VCPU_GET_STACK: {
1476 struct kvm_ia64_vcpu_stack __user *user_stack;
1477 void __user *first_p = argp;
1478
1479 r = -EFAULT;
1480 if (copy_from_user(&user_stack, first_p, sizeof(void *)))
1481 goto out;
1482
1483 if (!access_ok(VERIFY_WRITE, user_stack,
1484 sizeof(struct kvm_ia64_vcpu_stack))) {
1485 printk(KERN_INFO "KVM_IA64_VCPU_GET_STACK: "
1486 "Illegal user destination address for stack\n");
1487 goto out;
1488 }
1489 stack = kzalloc(sizeof(struct kvm_ia64_vcpu_stack), GFP_KERNEL);
1490 if (!stack) {
1491 r = -ENOMEM;
1492 goto out;
1493 }
1494
1495 r = kvm_arch_vcpu_ioctl_get_stack(vcpu, stack);
1496 if (r)
1497 goto out;
1498
1499 if (copy_to_user(user_stack, stack,
1500 sizeof(struct kvm_ia64_vcpu_stack)))
1501 goto out;
1502
1503 break;
1504 }
1505 case KVM_IA64_VCPU_SET_STACK: {
1506 struct kvm_ia64_vcpu_stack __user *user_stack;
1507 void __user *first_p = argp;
1508
1509 r = -EFAULT;
1510 if (copy_from_user(&user_stack, first_p, sizeof(void *)))
1511 goto out;
1512
1513 if (!access_ok(VERIFY_READ, user_stack,
1514 sizeof(struct kvm_ia64_vcpu_stack))) {
1515 printk(KERN_INFO "KVM_IA64_VCPU_SET_STACK: "
1516 "Illegal user address for stack\n");
1517 goto out;
1518 }
1519 stack = kmalloc(sizeof(struct kvm_ia64_vcpu_stack), GFP_KERNEL);
1520 if (!stack) {
1521 r = -ENOMEM;
1522 goto out;
1523 }
1524 if (copy_from_user(stack, user_stack,
1525 sizeof(struct kvm_ia64_vcpu_stack)))
1526 goto out;
1527
1528 r = kvm_arch_vcpu_ioctl_set_stack(vcpu, stack);
1529 break;
1530 }
1531
1532 default:
1533 r = -EINVAL;
1534 }
1535
1536out:
1537 kfree(stack);
1538 return r;
1436} 1539}
1437 1540
1438int kvm_arch_set_memory_region(struct kvm *kvm, 1541int kvm_arch_set_memory_region(struct kvm *kvm,
@@ -1472,7 +1575,7 @@ void kvm_arch_flush_shadow(struct kvm *kvm)
1472} 1575}
1473 1576
1474long kvm_arch_dev_ioctl(struct file *filp, 1577long kvm_arch_dev_ioctl(struct file *filp,
1475 unsigned int ioctl, unsigned long arg) 1578 unsigned int ioctl, unsigned long arg)
1476{ 1579{
1477 return -EINVAL; 1580 return -EINVAL;
1478} 1581}
@@ -1737,7 +1840,7 @@ struct kvm_vcpu *kvm_get_lowest_prio_vcpu(struct kvm *kvm, u8 vector,
1737 struct kvm_vcpu *lvcpu = kvm->vcpus[0]; 1840 struct kvm_vcpu *lvcpu = kvm->vcpus[0];
1738 int i; 1841 int i;
1739 1842
1740 for (i = 1; i < KVM_MAX_VCPUS; i++) { 1843 for (i = 1; i < kvm->arch.online_vcpus; i++) {
1741 if (!kvm->vcpus[i]) 1844 if (!kvm->vcpus[i])
1742 continue; 1845 continue;
1743 if (lvcpu->arch.xtp > kvm->vcpus[i]->arch.xtp) 1846 if (lvcpu->arch.xtp > kvm->vcpus[i]->arch.xtp)
diff --git a/arch/ia64/kvm/kvm_fw.c b/arch/ia64/kvm/kvm_fw.c
index cb7600bdff9d..a8ae52ed5635 100644
--- a/arch/ia64/kvm/kvm_fw.c
+++ b/arch/ia64/kvm/kvm_fw.c
@@ -227,6 +227,18 @@ static struct ia64_pal_retval pal_proc_get_features(struct kvm_vcpu *vcpu)
227 return result; 227 return result;
228} 228}
229 229
230static struct ia64_pal_retval pal_register_info(struct kvm_vcpu *vcpu)
231{
232
233 struct ia64_pal_retval result = {0, 0, 0, 0};
234 long in0, in1, in2, in3;
235
236 kvm_get_pal_call_data(vcpu, &in0, &in1, &in2, &in3);
237 result.status = ia64_pal_register_info(in1, &result.v1, &result.v2);
238
239 return result;
240}
241
230static struct ia64_pal_retval pal_cache_info(struct kvm_vcpu *vcpu) 242static struct ia64_pal_retval pal_cache_info(struct kvm_vcpu *vcpu)
231{ 243{
232 244
@@ -268,8 +280,12 @@ static struct ia64_pal_retval pal_vm_summary(struct kvm_vcpu *vcpu)
268static struct ia64_pal_retval pal_vm_info(struct kvm_vcpu *vcpu) 280static struct ia64_pal_retval pal_vm_info(struct kvm_vcpu *vcpu)
269{ 281{
270 struct ia64_pal_retval result; 282 struct ia64_pal_retval result;
283 unsigned long in0, in1, in2, in3;
271 284
272 INIT_PAL_STATUS_UNIMPLEMENTED(result); 285 kvm_get_pal_call_data(vcpu, &in0, &in1, &in2, &in3);
286
287 result.status = ia64_pal_vm_info(in1, in2,
288 (pal_tc_info_u_t *)&result.v1, &result.v2);
273 289
274 return result; 290 return result;
275} 291}
@@ -292,6 +308,108 @@ static void prepare_for_halt(struct kvm_vcpu *vcpu)
292 vcpu->arch.timer_fired = 0; 308 vcpu->arch.timer_fired = 0;
293} 309}
294 310
311static struct ia64_pal_retval pal_perf_mon_info(struct kvm_vcpu *vcpu)
312{
313 long status;
314 unsigned long in0, in1, in2, in3, r9;
315 unsigned long pm_buffer[16];
316
317 kvm_get_pal_call_data(vcpu, &in0, &in1, &in2, &in3);
318 status = ia64_pal_perf_mon_info(pm_buffer,
319 (pal_perf_mon_info_u_t *) &r9);
320 if (status != 0) {
321 printk(KERN_DEBUG"PAL_PERF_MON_INFO fails ret=%ld\n", status);
322 } else {
323 if (in1)
324 memcpy((void *)in1, pm_buffer, sizeof(pm_buffer));
325 else {
326 status = PAL_STATUS_EINVAL;
327 printk(KERN_WARNING"Invalid parameters "
328 "for PAL call:0x%lx!\n", in0);
329 }
330 }
331 return (struct ia64_pal_retval){status, r9, 0, 0};
332}
333
334static struct ia64_pal_retval pal_halt_info(struct kvm_vcpu *vcpu)
335{
336 unsigned long in0, in1, in2, in3;
337 long status;
338 unsigned long res = 1000UL | (1000UL << 16) | (10UL << 32)
339 | (1UL << 61) | (1UL << 60);
340
341 kvm_get_pal_call_data(vcpu, &in0, &in1, &in2, &in3);
342 if (in1) {
343 memcpy((void *)in1, &res, sizeof(res));
344 status = 0;
345 } else{
346 status = PAL_STATUS_EINVAL;
347 printk(KERN_WARNING"Invalid parameters "
348 "for PAL call:0x%lx!\n", in0);
349 }
350
351 return (struct ia64_pal_retval){status, 0, 0, 0};
352}
353
354static struct ia64_pal_retval pal_mem_attrib(struct kvm_vcpu *vcpu)
355{
356 unsigned long r9;
357 long status;
358
359 status = ia64_pal_mem_attrib(&r9);
360
361 return (struct ia64_pal_retval){status, r9, 0, 0};
362}
363
364static void remote_pal_prefetch_visibility(void *v)
365{
366 s64 trans_type = (s64)v;
367 ia64_pal_prefetch_visibility(trans_type);
368}
369
370static struct ia64_pal_retval pal_prefetch_visibility(struct kvm_vcpu *vcpu)
371{
372 struct ia64_pal_retval result = {0, 0, 0, 0};
373 unsigned long in0, in1, in2, in3;
374 kvm_get_pal_call_data(vcpu, &in0, &in1, &in2, &in3);
375 result.status = ia64_pal_prefetch_visibility(in1);
376 if (result.status == 0) {
377 /* Must be performed on all remote processors
378 in the coherence domain. */
379 smp_call_function(remote_pal_prefetch_visibility,
380 (void *)in1, 1);
381 /* Unnecessary on remote processor for other vcpus!*/
382 result.status = 1;
383 }
384 return result;
385}
386
387static void remote_pal_mc_drain(void *v)
388{
389 ia64_pal_mc_drain();
390}
391
392static struct ia64_pal_retval pal_get_brand_info(struct kvm_vcpu *vcpu)
393{
394 struct ia64_pal_retval result = {0, 0, 0, 0};
395 unsigned long in0, in1, in2, in3;
396
397 kvm_get_pal_call_data(vcpu, &in0, &in1, &in2, &in3);
398
399 if (in1 == 0 && in2) {
400 char brand_info[128];
401 result.status = ia64_pal_get_brand_info(brand_info);
402 if (result.status == PAL_STATUS_SUCCESS)
403 memcpy((void *)in2, brand_info, 128);
404 } else {
405 result.status = PAL_STATUS_REQUIRES_MEMORY;
406 printk(KERN_WARNING"Invalid parameters for "
407 "PAL call:0x%lx!\n", in0);
408 }
409
410 return result;
411}
412
295int kvm_pal_emul(struct kvm_vcpu *vcpu, struct kvm_run *run) 413int kvm_pal_emul(struct kvm_vcpu *vcpu, struct kvm_run *run)
296{ 414{
297 415
@@ -300,14 +418,22 @@ int kvm_pal_emul(struct kvm_vcpu *vcpu, struct kvm_run *run)
300 int ret = 1; 418 int ret = 1;
301 419
302 gr28 = kvm_get_pal_call_index(vcpu); 420 gr28 = kvm_get_pal_call_index(vcpu);
303 /*printk("pal_call index:%lx\n",gr28);*/
304 switch (gr28) { 421 switch (gr28) {
305 case PAL_CACHE_FLUSH: 422 case PAL_CACHE_FLUSH:
306 result = pal_cache_flush(vcpu); 423 result = pal_cache_flush(vcpu);
307 break; 424 break;
425 case PAL_MEM_ATTRIB:
426 result = pal_mem_attrib(vcpu);
427 break;
308 case PAL_CACHE_SUMMARY: 428 case PAL_CACHE_SUMMARY:
309 result = pal_cache_summary(vcpu); 429 result = pal_cache_summary(vcpu);
310 break; 430 break;
431 case PAL_PERF_MON_INFO:
432 result = pal_perf_mon_info(vcpu);
433 break;
434 case PAL_HALT_INFO:
435 result = pal_halt_info(vcpu);
436 break;
311 case PAL_HALT_LIGHT: 437 case PAL_HALT_LIGHT:
312 { 438 {
313 INIT_PAL_STATUS_SUCCESS(result); 439 INIT_PAL_STATUS_SUCCESS(result);
@@ -317,6 +443,16 @@ int kvm_pal_emul(struct kvm_vcpu *vcpu, struct kvm_run *run)
317 } 443 }
318 break; 444 break;
319 445
446 case PAL_PREFETCH_VISIBILITY:
447 result = pal_prefetch_visibility(vcpu);
448 break;
449 case PAL_MC_DRAIN:
450 result.status = ia64_pal_mc_drain();
451 /* FIXME: All vcpus likely call PAL_MC_DRAIN.
452 That causes the congestion. */
453 smp_call_function(remote_pal_mc_drain, NULL, 1);
454 break;
455
320 case PAL_FREQ_RATIOS: 456 case PAL_FREQ_RATIOS:
321 result = pal_freq_ratios(vcpu); 457 result = pal_freq_ratios(vcpu);
322 break; 458 break;
@@ -346,6 +482,9 @@ int kvm_pal_emul(struct kvm_vcpu *vcpu, struct kvm_run *run)
346 INIT_PAL_STATUS_SUCCESS(result); 482 INIT_PAL_STATUS_SUCCESS(result);
347 result.v1 = (1L << 32) | 1L; 483 result.v1 = (1L << 32) | 1L;
348 break; 484 break;
485 case PAL_REGISTER_INFO:
486 result = pal_register_info(vcpu);
487 break;
349 case PAL_VM_PAGE_SIZE: 488 case PAL_VM_PAGE_SIZE:
350 result.status = ia64_pal_vm_page_size(&result.v0, 489 result.status = ia64_pal_vm_page_size(&result.v0,
351 &result.v1); 490 &result.v1);
@@ -365,12 +504,18 @@ int kvm_pal_emul(struct kvm_vcpu *vcpu, struct kvm_run *run)
365 result.status = ia64_pal_version( 504 result.status = ia64_pal_version(
366 (pal_version_u_t *)&result.v0, 505 (pal_version_u_t *)&result.v0,
367 (pal_version_u_t *)&result.v1); 506 (pal_version_u_t *)&result.v1);
368
369 break; 507 break;
370 case PAL_FIXED_ADDR: 508 case PAL_FIXED_ADDR:
371 result.status = PAL_STATUS_SUCCESS; 509 result.status = PAL_STATUS_SUCCESS;
372 result.v0 = vcpu->vcpu_id; 510 result.v0 = vcpu->vcpu_id;
373 break; 511 break;
512 case PAL_BRAND_INFO:
513 result = pal_get_brand_info(vcpu);
514 break;
515 case PAL_GET_PSTATE:
516 case PAL_CACHE_SHARED_INFO:
517 INIT_PAL_STATUS_UNIMPLEMENTED(result);
518 break;
374 default: 519 default:
375 INIT_PAL_STATUS_UNIMPLEMENTED(result); 520 INIT_PAL_STATUS_UNIMPLEMENTED(result);
376 printk(KERN_WARNING"kvm: Unsupported pal call," 521 printk(KERN_WARNING"kvm: Unsupported pal call,"
diff --git a/arch/ia64/kvm/process.c b/arch/ia64/kvm/process.c
index 230eae482f32..b1dc80952d91 100644
--- a/arch/ia64/kvm/process.c
+++ b/arch/ia64/kvm/process.c
@@ -167,7 +167,6 @@ static u64 vcpu_get_itir_on_fault(struct kvm_vcpu *vcpu, u64 ifa)
167 return (rr1.val); 167 return (rr1.val);
168} 168}
169 169
170
171/* 170/*
172 * Set vIFA & vITIR & vIHA, when vPSR.ic =1 171 * Set vIFA & vITIR & vIHA, when vPSR.ic =1
173 * Parameter: 172 * Parameter:
@@ -222,8 +221,6 @@ void itlb_fault(struct kvm_vcpu *vcpu, u64 vadr)
222 inject_guest_interruption(vcpu, IA64_INST_TLB_VECTOR); 221 inject_guest_interruption(vcpu, IA64_INST_TLB_VECTOR);
223} 222}
224 223
225
226
227/* 224/*
228 * Data Nested TLB Fault 225 * Data Nested TLB Fault
229 * @ Data Nested TLB Vector 226 * @ Data Nested TLB Vector
@@ -245,7 +242,6 @@ void alt_dtlb(struct kvm_vcpu *vcpu, u64 vadr)
245 inject_guest_interruption(vcpu, IA64_ALT_DATA_TLB_VECTOR); 242 inject_guest_interruption(vcpu, IA64_ALT_DATA_TLB_VECTOR);
246} 243}
247 244
248
249/* 245/*
250 * Data TLB Fault 246 * Data TLB Fault
251 * @ Data TLB vector 247 * @ Data TLB vector
@@ -265,8 +261,6 @@ static void _vhpt_fault(struct kvm_vcpu *vcpu, u64 vadr)
265 /* If vPSR.ic, IFA, ITIR, IHA*/ 261 /* If vPSR.ic, IFA, ITIR, IHA*/
266 set_ifa_itir_iha(vcpu, vadr, 1, 1, 1); 262 set_ifa_itir_iha(vcpu, vadr, 1, 1, 1);
267 inject_guest_interruption(vcpu, IA64_VHPT_TRANS_VECTOR); 263 inject_guest_interruption(vcpu, IA64_VHPT_TRANS_VECTOR);
268
269
270} 264}
271 265
272/* 266/*
@@ -279,7 +273,6 @@ void ivhpt_fault(struct kvm_vcpu *vcpu, u64 vadr)
279 _vhpt_fault(vcpu, vadr); 273 _vhpt_fault(vcpu, vadr);
280} 274}
281 275
282
283/* 276/*
284 * VHPT Data Fault 277 * VHPT Data Fault
285 * @ VHPT Translation vector 278 * @ VHPT Translation vector
@@ -290,8 +283,6 @@ void dvhpt_fault(struct kvm_vcpu *vcpu, u64 vadr)
290 _vhpt_fault(vcpu, vadr); 283 _vhpt_fault(vcpu, vadr);
291} 284}
292 285
293
294
295/* 286/*
296 * Deal with: 287 * Deal with:
297 * General Exception vector 288 * General Exception vector
@@ -301,7 +292,6 @@ void _general_exception(struct kvm_vcpu *vcpu)
301 inject_guest_interruption(vcpu, IA64_GENEX_VECTOR); 292 inject_guest_interruption(vcpu, IA64_GENEX_VECTOR);
302} 293}
303 294
304
305/* 295/*
306 * Illegal Operation Fault 296 * Illegal Operation Fault
307 * @ General Exception Vector 297 * @ General Exception Vector
@@ -419,19 +409,16 @@ static void __page_not_present(struct kvm_vcpu *vcpu, u64 vadr)
419 inject_guest_interruption(vcpu, IA64_PAGE_NOT_PRESENT_VECTOR); 409 inject_guest_interruption(vcpu, IA64_PAGE_NOT_PRESENT_VECTOR);
420} 410}
421 411
422
423void data_page_not_present(struct kvm_vcpu *vcpu, u64 vadr) 412void data_page_not_present(struct kvm_vcpu *vcpu, u64 vadr)
424{ 413{
425 __page_not_present(vcpu, vadr); 414 __page_not_present(vcpu, vadr);
426} 415}
427 416
428
429void inst_page_not_present(struct kvm_vcpu *vcpu, u64 vadr) 417void inst_page_not_present(struct kvm_vcpu *vcpu, u64 vadr)
430{ 418{
431 __page_not_present(vcpu, vadr); 419 __page_not_present(vcpu, vadr);
432} 420}
433 421
434
435/* Deal with 422/* Deal with
436 * Data access rights vector 423 * Data access rights vector
437 */ 424 */
@@ -563,22 +550,64 @@ void reflect_interruption(u64 ifa, u64 isr, u64 iim,
563 inject_guest_interruption(vcpu, vector); 550 inject_guest_interruption(vcpu, vector);
564} 551}
565 552
553static unsigned long kvm_trans_pal_call_args(struct kvm_vcpu *vcpu,
554 unsigned long arg)
555{
556 struct thash_data *data;
557 unsigned long gpa, poff;
558
559 if (!is_physical_mode(vcpu)) {
560 /* Depends on caller to provide the DTR or DTC mapping.*/
561 data = vtlb_lookup(vcpu, arg, D_TLB);
562 if (data)
563 gpa = data->page_flags & _PAGE_PPN_MASK;
564 else {
565 data = vhpt_lookup(arg);
566 if (!data)
567 return 0;
568 gpa = data->gpaddr & _PAGE_PPN_MASK;
569 }
570
571 poff = arg & (PSIZE(data->ps) - 1);
572 arg = PAGEALIGN(gpa, data->ps) | poff;
573 }
574 arg = kvm_gpa_to_mpa(arg << 1 >> 1);
575
576 return (unsigned long)__va(arg);
577}
578
566static void set_pal_call_data(struct kvm_vcpu *vcpu) 579static void set_pal_call_data(struct kvm_vcpu *vcpu)
567{ 580{
568 struct exit_ctl_data *p = &vcpu->arch.exit_data; 581 struct exit_ctl_data *p = &vcpu->arch.exit_data;
582 unsigned long gr28 = vcpu_get_gr(vcpu, 28);
583 unsigned long gr29 = vcpu_get_gr(vcpu, 29);
584 unsigned long gr30 = vcpu_get_gr(vcpu, 30);
569 585
570 /*FIXME:For static and stacked convention, firmware 586 /*FIXME:For static and stacked convention, firmware
571 * has put the parameters in gr28-gr31 before 587 * has put the parameters in gr28-gr31 before
572 * break to vmm !!*/ 588 * break to vmm !!*/
573 589
574 p->u.pal_data.gr28 = vcpu_get_gr(vcpu, 28); 590 switch (gr28) {
575 p->u.pal_data.gr29 = vcpu_get_gr(vcpu, 29); 591 case PAL_PERF_MON_INFO:
576 p->u.pal_data.gr30 = vcpu_get_gr(vcpu, 30); 592 case PAL_HALT_INFO:
593 p->u.pal_data.gr29 = kvm_trans_pal_call_args(vcpu, gr29);
594 p->u.pal_data.gr30 = vcpu_get_gr(vcpu, 30);
595 break;
596 case PAL_BRAND_INFO:
597 p->u.pal_data.gr29 = gr29;;
598 p->u.pal_data.gr30 = kvm_trans_pal_call_args(vcpu, gr30);
599 break;
600 default:
601 p->u.pal_data.gr29 = gr29;;
602 p->u.pal_data.gr30 = vcpu_get_gr(vcpu, 30);
603 }
604 p->u.pal_data.gr28 = gr28;
577 p->u.pal_data.gr31 = vcpu_get_gr(vcpu, 31); 605 p->u.pal_data.gr31 = vcpu_get_gr(vcpu, 31);
606
578 p->exit_reason = EXIT_REASON_PAL_CALL; 607 p->exit_reason = EXIT_REASON_PAL_CALL;
579} 608}
580 609
581static void set_pal_call_result(struct kvm_vcpu *vcpu) 610static void get_pal_call_result(struct kvm_vcpu *vcpu)
582{ 611{
583 struct exit_ctl_data *p = &vcpu->arch.exit_data; 612 struct exit_ctl_data *p = &vcpu->arch.exit_data;
584 613
@@ -606,7 +635,7 @@ static void set_sal_call_data(struct kvm_vcpu *vcpu)
606 p->exit_reason = EXIT_REASON_SAL_CALL; 635 p->exit_reason = EXIT_REASON_SAL_CALL;
607} 636}
608 637
609static void set_sal_call_result(struct kvm_vcpu *vcpu) 638static void get_sal_call_result(struct kvm_vcpu *vcpu)
610{ 639{
611 struct exit_ctl_data *p = &vcpu->arch.exit_data; 640 struct exit_ctl_data *p = &vcpu->arch.exit_data;
612 641
@@ -629,13 +658,13 @@ void kvm_ia64_handle_break(unsigned long ifa, struct kvm_pt_regs *regs,
629 if (iim == DOMN_PAL_REQUEST) { 658 if (iim == DOMN_PAL_REQUEST) {
630 set_pal_call_data(v); 659 set_pal_call_data(v);
631 vmm_transition(v); 660 vmm_transition(v);
632 set_pal_call_result(v); 661 get_pal_call_result(v);
633 vcpu_increment_iip(v); 662 vcpu_increment_iip(v);
634 return; 663 return;
635 } else if (iim == DOMN_SAL_REQUEST) { 664 } else if (iim == DOMN_SAL_REQUEST) {
636 set_sal_call_data(v); 665 set_sal_call_data(v);
637 vmm_transition(v); 666 vmm_transition(v);
638 set_sal_call_result(v); 667 get_sal_call_result(v);
639 vcpu_increment_iip(v); 668 vcpu_increment_iip(v);
640 return; 669 return;
641 } 670 }
@@ -703,7 +732,6 @@ void vhpi_detection(struct kvm_vcpu *vcpu)
703 } 732 }
704} 733}
705 734
706
707void leave_hypervisor_tail(void) 735void leave_hypervisor_tail(void)
708{ 736{
709 struct kvm_vcpu *v = current_vcpu; 737 struct kvm_vcpu *v = current_vcpu;
@@ -737,7 +765,6 @@ void leave_hypervisor_tail(void)
737 } 765 }
738} 766}
739 767
740
741static inline void handle_lds(struct kvm_pt_regs *regs) 768static inline void handle_lds(struct kvm_pt_regs *regs)
742{ 769{
743 regs->cr_ipsr |= IA64_PSR_ED; 770 regs->cr_ipsr |= IA64_PSR_ED;
diff --git a/arch/ia64/kvm/vcpu.c b/arch/ia64/kvm/vcpu.c
index ecd526b55323..d4d280505878 100644
--- a/arch/ia64/kvm/vcpu.c
+++ b/arch/ia64/kvm/vcpu.c
@@ -112,7 +112,6 @@ void switch_to_physical_rid(struct kvm_vcpu *vcpu)
112 return; 112 return;
113} 113}
114 114
115
116void switch_to_virtual_rid(struct kvm_vcpu *vcpu) 115void switch_to_virtual_rid(struct kvm_vcpu *vcpu)
117{ 116{
118 unsigned long psr; 117 unsigned long psr;
@@ -166,8 +165,6 @@ void switch_mm_mode(struct kvm_vcpu *vcpu, struct ia64_psr old_psr,
166 return; 165 return;
167} 166}
168 167
169
170
171/* 168/*
172 * In physical mode, insert tc/tr for region 0 and 4 uses 169 * In physical mode, insert tc/tr for region 0 and 4 uses
173 * RID[0] and RID[4] which is for physical mode emulation. 170 * RID[0] and RID[4] which is for physical mode emulation.
@@ -269,7 +266,6 @@ static inline unsigned long fph_index(struct kvm_pt_regs *regs,
269 return rotate_reg(96, rrb_fr, (regnum - IA64_FIRST_ROTATING_FR)); 266 return rotate_reg(96, rrb_fr, (regnum - IA64_FIRST_ROTATING_FR));
270} 267}
271 268
272
273/* 269/*
274 * The inverse of the above: given bspstore and the number of 270 * The inverse of the above: given bspstore and the number of
275 * registers, calculate ar.bsp. 271 * registers, calculate ar.bsp.
@@ -811,12 +807,15 @@ static inline void vcpu_set_itm(struct kvm_vcpu *vcpu, u64 val);
811static void vcpu_set_itc(struct kvm_vcpu *vcpu, u64 val) 807static void vcpu_set_itc(struct kvm_vcpu *vcpu, u64 val)
812{ 808{
813 struct kvm_vcpu *v; 809 struct kvm_vcpu *v;
810 struct kvm *kvm;
814 int i; 811 int i;
815 long itc_offset = val - ia64_getreg(_IA64_REG_AR_ITC); 812 long itc_offset = val - ia64_getreg(_IA64_REG_AR_ITC);
816 unsigned long vitv = VCPU(vcpu, itv); 813 unsigned long vitv = VCPU(vcpu, itv);
817 814
815 kvm = (struct kvm *)KVM_VM_BASE;
816
818 if (vcpu->vcpu_id == 0) { 817 if (vcpu->vcpu_id == 0) {
819 for (i = 0; i < KVM_MAX_VCPUS; i++) { 818 for (i = 0; i < kvm->arch.online_vcpus; i++) {
820 v = (struct kvm_vcpu *)((char *)vcpu + 819 v = (struct kvm_vcpu *)((char *)vcpu +
821 sizeof(struct kvm_vcpu_data) * i); 820 sizeof(struct kvm_vcpu_data) * i);
822 VMX(v, itc_offset) = itc_offset; 821 VMX(v, itc_offset) = itc_offset;
@@ -1039,8 +1038,6 @@ u64 vcpu_tak(struct kvm_vcpu *vcpu, u64 vadr)
1039 return key; 1038 return key;
1040} 1039}
1041 1040
1042
1043
1044void kvm_thash(struct kvm_vcpu *vcpu, INST64 inst) 1041void kvm_thash(struct kvm_vcpu *vcpu, INST64 inst)
1045{ 1042{
1046 unsigned long thash, vadr; 1043 unsigned long thash, vadr;
@@ -1050,7 +1047,6 @@ void kvm_thash(struct kvm_vcpu *vcpu, INST64 inst)
1050 vcpu_set_gr(vcpu, inst.M46.r1, thash, 0); 1047 vcpu_set_gr(vcpu, inst.M46.r1, thash, 0);
1051} 1048}
1052 1049
1053
1054void kvm_ttag(struct kvm_vcpu *vcpu, INST64 inst) 1050void kvm_ttag(struct kvm_vcpu *vcpu, INST64 inst)
1055{ 1051{
1056 unsigned long tag, vadr; 1052 unsigned long tag, vadr;
@@ -1131,7 +1127,6 @@ int vcpu_tpa(struct kvm_vcpu *vcpu, u64 vadr, u64 *padr)
1131 return IA64_NO_FAULT; 1127 return IA64_NO_FAULT;
1132} 1128}
1133 1129
1134
1135int kvm_tpa(struct kvm_vcpu *vcpu, INST64 inst) 1130int kvm_tpa(struct kvm_vcpu *vcpu, INST64 inst)
1136{ 1131{
1137 unsigned long r1, r3; 1132 unsigned long r1, r3;
@@ -1154,7 +1149,6 @@ void kvm_tak(struct kvm_vcpu *vcpu, INST64 inst)
1154 vcpu_set_gr(vcpu, inst.M46.r1, r1, 0); 1149 vcpu_set_gr(vcpu, inst.M46.r1, r1, 0);
1155} 1150}
1156 1151
1157
1158/************************************ 1152/************************************
1159 * Insert/Purge translation register/cache 1153 * Insert/Purge translation register/cache
1160 ************************************/ 1154 ************************************/
@@ -1385,7 +1379,6 @@ void kvm_mov_to_ar_reg(struct kvm_vcpu *vcpu, INST64 inst)
1385 vcpu_set_itc(vcpu, r2); 1379 vcpu_set_itc(vcpu, r2);
1386} 1380}
1387 1381
1388
1389void kvm_mov_from_ar_reg(struct kvm_vcpu *vcpu, INST64 inst) 1382void kvm_mov_from_ar_reg(struct kvm_vcpu *vcpu, INST64 inst)
1390{ 1383{
1391 unsigned long r1; 1384 unsigned long r1;
@@ -1393,8 +1386,9 @@ void kvm_mov_from_ar_reg(struct kvm_vcpu *vcpu, INST64 inst)
1393 r1 = vcpu_get_itc(vcpu); 1386 r1 = vcpu_get_itc(vcpu);
1394 vcpu_set_gr(vcpu, inst.M31.r1, r1, 0); 1387 vcpu_set_gr(vcpu, inst.M31.r1, r1, 0);
1395} 1388}
1389
1396/************************************************************************** 1390/**************************************************************************
1397 struct kvm_vcpu*protection key register access routines 1391 struct kvm_vcpu protection key register access routines
1398 **************************************************************************/ 1392 **************************************************************************/
1399 1393
1400unsigned long vcpu_get_pkr(struct kvm_vcpu *vcpu, unsigned long reg) 1394unsigned long vcpu_get_pkr(struct kvm_vcpu *vcpu, unsigned long reg)
@@ -1407,20 +1401,6 @@ void vcpu_set_pkr(struct kvm_vcpu *vcpu, unsigned long reg, unsigned long val)
1407 ia64_set_pkr(reg, val); 1401 ia64_set_pkr(reg, val);
1408} 1402}
1409 1403
1410
1411unsigned long vcpu_get_itir_on_fault(struct kvm_vcpu *vcpu, unsigned long ifa)
1412{
1413 union ia64_rr rr, rr1;
1414
1415 rr.val = vcpu_get_rr(vcpu, ifa);
1416 rr1.val = 0;
1417 rr1.ps = rr.ps;
1418 rr1.rid = rr.rid;
1419 return (rr1.val);
1420}
1421
1422
1423
1424/******************************** 1404/********************************
1425 * Moves to privileged registers 1405 * Moves to privileged registers
1426 ********************************/ 1406 ********************************/
@@ -1464,8 +1444,6 @@ unsigned long vcpu_set_rr(struct kvm_vcpu *vcpu, unsigned long reg,
1464 return (IA64_NO_FAULT); 1444 return (IA64_NO_FAULT);
1465} 1445}
1466 1446
1467
1468
1469void kvm_mov_to_rr(struct kvm_vcpu *vcpu, INST64 inst) 1447void kvm_mov_to_rr(struct kvm_vcpu *vcpu, INST64 inst)
1470{ 1448{
1471 unsigned long r3, r2; 1449 unsigned long r3, r2;
@@ -1510,8 +1488,6 @@ void kvm_mov_to_pkr(struct kvm_vcpu *vcpu, INST64 inst)
1510 vcpu_set_pkr(vcpu, r3, r2); 1488 vcpu_set_pkr(vcpu, r3, r2);
1511} 1489}
1512 1490
1513
1514
1515void kvm_mov_from_rr(struct kvm_vcpu *vcpu, INST64 inst) 1491void kvm_mov_from_rr(struct kvm_vcpu *vcpu, INST64 inst)
1516{ 1492{
1517 unsigned long r3, r1; 1493 unsigned long r3, r1;
@@ -1557,7 +1533,6 @@ void kvm_mov_from_pmc(struct kvm_vcpu *vcpu, INST64 inst)
1557 vcpu_set_gr(vcpu, inst.M43.r1, r1, 0); 1533 vcpu_set_gr(vcpu, inst.M43.r1, r1, 0);
1558} 1534}
1559 1535
1560
1561unsigned long vcpu_get_cpuid(struct kvm_vcpu *vcpu, unsigned long reg) 1536unsigned long vcpu_get_cpuid(struct kvm_vcpu *vcpu, unsigned long reg)
1562{ 1537{
1563 /* FIXME: This could get called as a result of a rsvd-reg fault */ 1538 /* FIXME: This could get called as a result of a rsvd-reg fault */
@@ -1609,7 +1584,6 @@ unsigned long kvm_mov_to_cr(struct kvm_vcpu *vcpu, INST64 inst)
1609 return 0; 1584 return 0;
1610} 1585}
1611 1586
1612
1613unsigned long kvm_mov_from_cr(struct kvm_vcpu *vcpu, INST64 inst) 1587unsigned long kvm_mov_from_cr(struct kvm_vcpu *vcpu, INST64 inst)
1614{ 1588{
1615 unsigned long tgt = inst.M33.r1; 1589 unsigned long tgt = inst.M33.r1;
@@ -1633,8 +1607,6 @@ unsigned long kvm_mov_from_cr(struct kvm_vcpu *vcpu, INST64 inst)
1633 return 0; 1607 return 0;
1634} 1608}
1635 1609
1636
1637
1638void vcpu_set_psr(struct kvm_vcpu *vcpu, unsigned long val) 1610void vcpu_set_psr(struct kvm_vcpu *vcpu, unsigned long val)
1639{ 1611{
1640 1612
@@ -1776,9 +1748,6 @@ void vcpu_bsw1(struct kvm_vcpu *vcpu)
1776 } 1748 }
1777} 1749}
1778 1750
1779
1780
1781
1782void vcpu_rfi(struct kvm_vcpu *vcpu) 1751void vcpu_rfi(struct kvm_vcpu *vcpu)
1783{ 1752{
1784 unsigned long ifs, psr; 1753 unsigned long ifs, psr;
@@ -1796,7 +1765,6 @@ void vcpu_rfi(struct kvm_vcpu *vcpu)
1796 regs->cr_iip = VCPU(vcpu, iip); 1765 regs->cr_iip = VCPU(vcpu, iip);
1797} 1766}
1798 1767
1799
1800/* 1768/*
1801 VPSR can't keep track of below bits of guest PSR 1769 VPSR can't keep track of below bits of guest PSR
1802 This function gets guest PSR 1770 This function gets guest PSR
diff --git a/arch/ia64/kvm/vcpu.h b/arch/ia64/kvm/vcpu.h
index b2f12a562bdf..042af92ced83 100644
--- a/arch/ia64/kvm/vcpu.h
+++ b/arch/ia64/kvm/vcpu.h
@@ -703,7 +703,7 @@ extern u64 guest_vhpt_lookup(u64 iha, u64 *pte);
703extern void thash_purge_entries(struct kvm_vcpu *v, u64 va, u64 ps); 703extern void thash_purge_entries(struct kvm_vcpu *v, u64 va, u64 ps);
704extern void thash_purge_entries_remote(struct kvm_vcpu *v, u64 va, u64 ps); 704extern void thash_purge_entries_remote(struct kvm_vcpu *v, u64 va, u64 ps);
705extern u64 translate_phy_pte(u64 *pte, u64 itir, u64 va); 705extern u64 translate_phy_pte(u64 *pte, u64 itir, u64 va);
706extern int thash_purge_and_insert(struct kvm_vcpu *v, u64 pte, 706extern void thash_purge_and_insert(struct kvm_vcpu *v, u64 pte,
707 u64 itir, u64 ifa, int type); 707 u64 itir, u64 ifa, int type);
708extern void thash_purge_all(struct kvm_vcpu *v); 708extern void thash_purge_all(struct kvm_vcpu *v);
709extern struct thash_data *vtlb_lookup(struct kvm_vcpu *v, 709extern struct thash_data *vtlb_lookup(struct kvm_vcpu *v,
@@ -738,7 +738,7 @@ void kvm_init_vhpt(struct kvm_vcpu *v);
738void thash_init(struct thash_cb *hcb, u64 sz); 738void thash_init(struct thash_cb *hcb, u64 sz);
739 739
740void panic_vm(struct kvm_vcpu *v, const char *fmt, ...); 740void panic_vm(struct kvm_vcpu *v, const char *fmt, ...);
741 741u64 kvm_gpa_to_mpa(u64 gpa);
742extern u64 ia64_call_vsa(u64 proc, u64 arg1, u64 arg2, u64 arg3, 742extern u64 ia64_call_vsa(u64 proc, u64 arg1, u64 arg2, u64 arg3,
743 u64 arg4, u64 arg5, u64 arg6, u64 arg7); 743 u64 arg4, u64 arg5, u64 arg6, u64 arg7);
744 744
diff --git a/arch/ia64/kvm/vtlb.c b/arch/ia64/kvm/vtlb.c
index 6b6307a3bd55..38232b37668b 100644
--- a/arch/ia64/kvm/vtlb.c
+++ b/arch/ia64/kvm/vtlb.c
@@ -164,11 +164,11 @@ static void vhpt_insert(u64 pte, u64 itir, u64 ifa, u64 gpte)
164 unsigned long ps, gpaddr; 164 unsigned long ps, gpaddr;
165 165
166 ps = itir_ps(itir); 166 ps = itir_ps(itir);
167 rr.val = ia64_get_rr(ifa);
167 168
168 gpaddr = ((gpte & _PAGE_PPN_MASK) >> ps << ps) | 169 gpaddr = ((gpte & _PAGE_PPN_MASK) >> ps << ps) |
169 (ifa & ((1UL << ps) - 1)); 170 (ifa & ((1UL << ps) - 1));
170 171
171 rr.val = ia64_get_rr(ifa);
172 head = (struct thash_data *)ia64_thash(ifa); 172 head = (struct thash_data *)ia64_thash(ifa);
173 head->etag = INVALID_TI_TAG; 173 head->etag = INVALID_TI_TAG;
174 ia64_mf(); 174 ia64_mf();
@@ -412,16 +412,14 @@ u64 translate_phy_pte(u64 *pte, u64 itir, u64 va)
412 412
413/* 413/*
414 * Purge overlap TCs and then insert the new entry to emulate itc ops. 414 * Purge overlap TCs and then insert the new entry to emulate itc ops.
415 * Notes: Only TC entry can purge and insert. 415 * Notes: Only TC entry can purge and insert.
416 * 1 indicates this is MMIO
417 */ 416 */
418int thash_purge_and_insert(struct kvm_vcpu *v, u64 pte, u64 itir, 417void thash_purge_and_insert(struct kvm_vcpu *v, u64 pte, u64 itir,
419 u64 ifa, int type) 418 u64 ifa, int type)
420{ 419{
421 u64 ps; 420 u64 ps;
422 u64 phy_pte, io_mask, index; 421 u64 phy_pte, io_mask, index;
423 union ia64_rr vrr, mrr; 422 union ia64_rr vrr, mrr;
424 int ret = 0;
425 423
426 ps = itir_ps(itir); 424 ps = itir_ps(itir);
427 vrr.val = vcpu_get_rr(v, ifa); 425 vrr.val = vcpu_get_rr(v, ifa);
@@ -441,25 +439,19 @@ int thash_purge_and_insert(struct kvm_vcpu *v, u64 pte, u64 itir,
441 phy_pte &= ~_PAGE_MA_MASK; 439 phy_pte &= ~_PAGE_MA_MASK;
442 } 440 }
443 441
444 if (pte & VTLB_PTE_IO)
445 ret = 1;
446
447 vtlb_purge(v, ifa, ps); 442 vtlb_purge(v, ifa, ps);
448 vhpt_purge(v, ifa, ps); 443 vhpt_purge(v, ifa, ps);
449 444
450 if (ps == mrr.ps) { 445 if ((ps != mrr.ps) || (pte & VTLB_PTE_IO)) {
451 if (!(pte&VTLB_PTE_IO)) {
452 vhpt_insert(phy_pte, itir, ifa, pte);
453 } else {
454 vtlb_insert(v, pte, itir, ifa);
455 vcpu_quick_region_set(VMX(v, tc_regions), ifa);
456 }
457 } else if (ps > mrr.ps) {
458 vtlb_insert(v, pte, itir, ifa); 446 vtlb_insert(v, pte, itir, ifa);
459 vcpu_quick_region_set(VMX(v, tc_regions), ifa); 447 vcpu_quick_region_set(VMX(v, tc_regions), ifa);
460 if (!(pte&VTLB_PTE_IO)) 448 }
461 vhpt_insert(phy_pte, itir, ifa, pte); 449 if (pte & VTLB_PTE_IO)
462 } else { 450 return;
451
452 if (ps >= mrr.ps)
453 vhpt_insert(phy_pte, itir, ifa, pte);
454 else {
463 u64 psr; 455 u64 psr;
464 phy_pte &= ~PAGE_FLAGS_RV_MASK; 456 phy_pte &= ~PAGE_FLAGS_RV_MASK;
465 psr = ia64_clear_ic(); 457 psr = ia64_clear_ic();
@@ -469,7 +461,6 @@ int thash_purge_and_insert(struct kvm_vcpu *v, u64 pte, u64 itir,
469 if (!(pte&VTLB_PTE_IO)) 461 if (!(pte&VTLB_PTE_IO))
470 mark_pages_dirty(v, pte, ps); 462 mark_pages_dirty(v, pte, ps);
471 463
472 return ret;
473} 464}
474 465
475/* 466/*
@@ -509,7 +500,6 @@ void thash_purge_all(struct kvm_vcpu *v)
509 local_flush_tlb_all(); 500 local_flush_tlb_all();
510} 501}
511 502
512
513/* 503/*
514 * Lookup the hash table and its collision chain to find an entry 504 * Lookup the hash table and its collision chain to find an entry
515 * covering this address rid:va or the entry. 505 * covering this address rid:va or the entry.
@@ -517,7 +507,6 @@ void thash_purge_all(struct kvm_vcpu *v)
517 * INPUT: 507 * INPUT:
518 * in: TLB format for both VHPT & TLB. 508 * in: TLB format for both VHPT & TLB.
519 */ 509 */
520
521struct thash_data *vtlb_lookup(struct kvm_vcpu *v, u64 va, int is_data) 510struct thash_data *vtlb_lookup(struct kvm_vcpu *v, u64 va, int is_data)
522{ 511{
523 struct thash_data *cch; 512 struct thash_data *cch;
@@ -547,7 +536,6 @@ struct thash_data *vtlb_lookup(struct kvm_vcpu *v, u64 va, int is_data)
547 return NULL; 536 return NULL;
548} 537}
549 538
550
551/* 539/*
552 * Initialize internal control data before service. 540 * Initialize internal control data before service.
553 */ 541 */
@@ -573,6 +561,10 @@ void thash_init(struct thash_cb *hcb, u64 sz)
573u64 kvm_get_mpt_entry(u64 gpfn) 561u64 kvm_get_mpt_entry(u64 gpfn)
574{ 562{
575 u64 *base = (u64 *) KVM_P2M_BASE; 563 u64 *base = (u64 *) KVM_P2M_BASE;
564
565 if (gpfn >= (KVM_P2M_SIZE >> 3))
566 panic_vm(current_vcpu, "Invalid gpfn =%lx\n", gpfn);
567
576 return *(base + gpfn); 568 return *(base + gpfn);
577} 569}
578 570
@@ -589,7 +581,6 @@ u64 kvm_gpa_to_mpa(u64 gpa)
589 return (pte >> PAGE_SHIFT << PAGE_SHIFT) | (gpa & ~PAGE_MASK); 581 return (pte >> PAGE_SHIFT << PAGE_SHIFT) | (gpa & ~PAGE_MASK);
590} 582}
591 583
592
593/* 584/*
594 * Fetch guest bundle code. 585 * Fetch guest bundle code.
595 * INPUT: 586 * INPUT:
@@ -631,7 +622,6 @@ int fetch_code(struct kvm_vcpu *vcpu, u64 gip, IA64_BUNDLE *pbundle)
631 return IA64_NO_FAULT; 622 return IA64_NO_FAULT;
632} 623}
633 624
634
635void kvm_init_vhpt(struct kvm_vcpu *v) 625void kvm_init_vhpt(struct kvm_vcpu *v)
636{ 626{
637 v->arch.vhpt.num = VHPT_NUM_ENTRIES; 627 v->arch.vhpt.num = VHPT_NUM_ENTRIES;
diff --git a/arch/ia64/sn/kernel/sn2/prominfo_proc.c b/arch/ia64/sn/kernel/sn2/prominfo_proc.c
index 4dcce3d0e04c..e63328818643 100644
--- a/arch/ia64/sn/kernel/sn2/prominfo_proc.c
+++ b/arch/ia64/sn/kernel/sn2/prominfo_proc.c
@@ -225,7 +225,6 @@ static struct proc_dir_entry *sgi_prominfo_entry;
225int __init prominfo_init(void) 225int __init prominfo_init(void)
226{ 226{
227 struct proc_dir_entry **entp; 227 struct proc_dir_entry **entp;
228 struct proc_dir_entry *p;
229 cnodeid_t cnodeid; 228 cnodeid_t cnodeid;
230 unsigned long nasid; 229 unsigned long nasid;
231 int size; 230 int size;
@@ -246,14 +245,10 @@ int __init prominfo_init(void)
246 sprintf(name, "node%d", cnodeid); 245 sprintf(name, "node%d", cnodeid);
247 *entp = proc_mkdir(name, sgi_prominfo_entry); 246 *entp = proc_mkdir(name, sgi_prominfo_entry);
248 nasid = cnodeid_to_nasid(cnodeid); 247 nasid = cnodeid_to_nasid(cnodeid);
249 p = create_proc_read_entry("fit", 0, *entp, read_fit_entry, 248 create_proc_read_entry("fit", 0, *entp, read_fit_entry,
250 (void *)nasid); 249 (void *)nasid);
251 if (p) 250 create_proc_read_entry("version", 0, *entp,
252 p->owner = THIS_MODULE;
253 p = create_proc_read_entry("version", 0, *entp,
254 read_version_entry, (void *)nasid); 251 read_version_entry, (void *)nasid);
255 if (p)
256 p->owner = THIS_MODULE;
257 entp++; 252 entp++;
258 } 253 }
259 254
diff --git a/arch/ia64/sn/pci/pci_dma.c b/arch/ia64/sn/pci/pci_dma.c
index 863f5017baae..8c130e8f00e1 100644
--- a/arch/ia64/sn/pci/pci_dma.c
+++ b/arch/ia64/sn/pci/pci_dma.c
@@ -10,7 +10,7 @@
10 */ 10 */
11 11
12#include <linux/module.h> 12#include <linux/module.h>
13#include <linux/dma-attrs.h> 13#include <linux/dma-mapping.h>
14#include <asm/dma.h> 14#include <asm/dma.h>
15#include <asm/sn/intr.h> 15#include <asm/sn/intr.h>
16#include <asm/sn/pcibus_provider_defs.h> 16#include <asm/sn/pcibus_provider_defs.h>
@@ -31,7 +31,7 @@
31 * this function. Of course, SN only supports devices that have 32 or more 31 * this function. Of course, SN only supports devices that have 32 or more
32 * address bits when using the PMU. 32 * address bits when using the PMU.
33 */ 33 */
34int sn_dma_supported(struct device *dev, u64 mask) 34static int sn_dma_supported(struct device *dev, u64 mask)
35{ 35{
36 BUG_ON(dev->bus != &pci_bus_type); 36 BUG_ON(dev->bus != &pci_bus_type);
37 37
@@ -39,7 +39,6 @@ int sn_dma_supported(struct device *dev, u64 mask)
39 return 0; 39 return 0;
40 return 1; 40 return 1;
41} 41}
42EXPORT_SYMBOL(sn_dma_supported);
43 42
44/** 43/**
45 * sn_dma_set_mask - set the DMA mask 44 * sn_dma_set_mask - set the DMA mask
@@ -75,8 +74,8 @@ EXPORT_SYMBOL(sn_dma_set_mask);
75 * queue for a SCSI controller). See Documentation/DMA-API.txt for 74 * queue for a SCSI controller). See Documentation/DMA-API.txt for
76 * more information. 75 * more information.
77 */ 76 */
78void *sn_dma_alloc_coherent(struct device *dev, size_t size, 77static void *sn_dma_alloc_coherent(struct device *dev, size_t size,
79 dma_addr_t * dma_handle, gfp_t flags) 78 dma_addr_t * dma_handle, gfp_t flags)
80{ 79{
81 void *cpuaddr; 80 void *cpuaddr;
82 unsigned long phys_addr; 81 unsigned long phys_addr;
@@ -124,7 +123,6 @@ void *sn_dma_alloc_coherent(struct device *dev, size_t size,
124 123
125 return cpuaddr; 124 return cpuaddr;
126} 125}
127EXPORT_SYMBOL(sn_dma_alloc_coherent);
128 126
129/** 127/**
130 * sn_pci_free_coherent - free memory associated with coherent DMAable region 128 * sn_pci_free_coherent - free memory associated with coherent DMAable region
@@ -136,8 +134,8 @@ EXPORT_SYMBOL(sn_dma_alloc_coherent);
136 * Frees the memory allocated by dma_alloc_coherent(), potentially unmapping 134 * Frees the memory allocated by dma_alloc_coherent(), potentially unmapping
137 * any associated IOMMU mappings. 135 * any associated IOMMU mappings.
138 */ 136 */
139void sn_dma_free_coherent(struct device *dev, size_t size, void *cpu_addr, 137static void sn_dma_free_coherent(struct device *dev, size_t size, void *cpu_addr,
140 dma_addr_t dma_handle) 138 dma_addr_t dma_handle)
141{ 139{
142 struct pci_dev *pdev = to_pci_dev(dev); 140 struct pci_dev *pdev = to_pci_dev(dev);
143 struct sn_pcibus_provider *provider = SN_PCIDEV_BUSPROVIDER(pdev); 141 struct sn_pcibus_provider *provider = SN_PCIDEV_BUSPROVIDER(pdev);
@@ -147,7 +145,6 @@ void sn_dma_free_coherent(struct device *dev, size_t size, void *cpu_addr,
147 provider->dma_unmap(pdev, dma_handle, 0); 145 provider->dma_unmap(pdev, dma_handle, 0);
148 free_pages((unsigned long)cpu_addr, get_order(size)); 146 free_pages((unsigned long)cpu_addr, get_order(size));
149} 147}
150EXPORT_SYMBOL(sn_dma_free_coherent);
151 148
152/** 149/**
153 * sn_dma_map_single_attrs - map a single page for DMA 150 * sn_dma_map_single_attrs - map a single page for DMA
@@ -173,10 +170,12 @@ EXPORT_SYMBOL(sn_dma_free_coherent);
173 * TODO: simplify our interface; 170 * TODO: simplify our interface;
174 * figure out how to save dmamap handle so can use two step. 171 * figure out how to save dmamap handle so can use two step.
175 */ 172 */
176dma_addr_t sn_dma_map_single_attrs(struct device *dev, void *cpu_addr, 173static dma_addr_t sn_dma_map_page(struct device *dev, struct page *page,
177 size_t size, int direction, 174 unsigned long offset, size_t size,
178 struct dma_attrs *attrs) 175 enum dma_data_direction dir,
176 struct dma_attrs *attrs)
179{ 177{
178 void *cpu_addr = page_address(page) + offset;
180 dma_addr_t dma_addr; 179 dma_addr_t dma_addr;
181 unsigned long phys_addr; 180 unsigned long phys_addr;
182 struct pci_dev *pdev = to_pci_dev(dev); 181 struct pci_dev *pdev = to_pci_dev(dev);
@@ -201,7 +200,6 @@ dma_addr_t sn_dma_map_single_attrs(struct device *dev, void *cpu_addr,
201 } 200 }
202 return dma_addr; 201 return dma_addr;
203} 202}
204EXPORT_SYMBOL(sn_dma_map_single_attrs);
205 203
206/** 204/**
207 * sn_dma_unmap_single_attrs - unamp a DMA mapped page 205 * sn_dma_unmap_single_attrs - unamp a DMA mapped page
@@ -215,21 +213,20 @@ EXPORT_SYMBOL(sn_dma_map_single_attrs);
215 * by @dma_handle into the coherence domain. On SN, we're always cache 213 * by @dma_handle into the coherence domain. On SN, we're always cache
216 * coherent, so we just need to free any ATEs associated with this mapping. 214 * coherent, so we just need to free any ATEs associated with this mapping.
217 */ 215 */
218void sn_dma_unmap_single_attrs(struct device *dev, dma_addr_t dma_addr, 216static void sn_dma_unmap_page(struct device *dev, dma_addr_t dma_addr,
219 size_t size, int direction, 217 size_t size, enum dma_data_direction dir,
220 struct dma_attrs *attrs) 218 struct dma_attrs *attrs)
221{ 219{
222 struct pci_dev *pdev = to_pci_dev(dev); 220 struct pci_dev *pdev = to_pci_dev(dev);
223 struct sn_pcibus_provider *provider = SN_PCIDEV_BUSPROVIDER(pdev); 221 struct sn_pcibus_provider *provider = SN_PCIDEV_BUSPROVIDER(pdev);
224 222
225 BUG_ON(dev->bus != &pci_bus_type); 223 BUG_ON(dev->bus != &pci_bus_type);
226 224
227 provider->dma_unmap(pdev, dma_addr, direction); 225 provider->dma_unmap(pdev, dma_addr, dir);
228} 226}
229EXPORT_SYMBOL(sn_dma_unmap_single_attrs);
230 227
231/** 228/**
232 * sn_dma_unmap_sg_attrs - unmap a DMA scatterlist 229 * sn_dma_unmap_sg - unmap a DMA scatterlist
233 * @dev: device to unmap 230 * @dev: device to unmap
234 * @sg: scatterlist to unmap 231 * @sg: scatterlist to unmap
235 * @nhwentries: number of scatterlist entries 232 * @nhwentries: number of scatterlist entries
@@ -238,9 +235,9 @@ EXPORT_SYMBOL(sn_dma_unmap_single_attrs);
238 * 235 *
239 * Unmap a set of streaming mode DMA translations. 236 * Unmap a set of streaming mode DMA translations.
240 */ 237 */
241void sn_dma_unmap_sg_attrs(struct device *dev, struct scatterlist *sgl, 238static void sn_dma_unmap_sg(struct device *dev, struct scatterlist *sgl,
242 int nhwentries, int direction, 239 int nhwentries, enum dma_data_direction dir,
243 struct dma_attrs *attrs) 240 struct dma_attrs *attrs)
244{ 241{
245 int i; 242 int i;
246 struct pci_dev *pdev = to_pci_dev(dev); 243 struct pci_dev *pdev = to_pci_dev(dev);
@@ -250,15 +247,14 @@ void sn_dma_unmap_sg_attrs(struct device *dev, struct scatterlist *sgl,
250 BUG_ON(dev->bus != &pci_bus_type); 247 BUG_ON(dev->bus != &pci_bus_type);
251 248
252 for_each_sg(sgl, sg, nhwentries, i) { 249 for_each_sg(sgl, sg, nhwentries, i) {
253 provider->dma_unmap(pdev, sg->dma_address, direction); 250 provider->dma_unmap(pdev, sg->dma_address, dir);
254 sg->dma_address = (dma_addr_t) NULL; 251 sg->dma_address = (dma_addr_t) NULL;
255 sg->dma_length = 0; 252 sg->dma_length = 0;
256 } 253 }
257} 254}
258EXPORT_SYMBOL(sn_dma_unmap_sg_attrs);
259 255
260/** 256/**
261 * sn_dma_map_sg_attrs - map a scatterlist for DMA 257 * sn_dma_map_sg - map a scatterlist for DMA
262 * @dev: device to map for 258 * @dev: device to map for
263 * @sg: scatterlist to map 259 * @sg: scatterlist to map
264 * @nhwentries: number of entries 260 * @nhwentries: number of entries
@@ -272,8 +268,9 @@ EXPORT_SYMBOL(sn_dma_unmap_sg_attrs);
272 * 268 *
273 * Maps each entry of @sg for DMA. 269 * Maps each entry of @sg for DMA.
274 */ 270 */
275int sn_dma_map_sg_attrs(struct device *dev, struct scatterlist *sgl, 271static int sn_dma_map_sg(struct device *dev, struct scatterlist *sgl,
276 int nhwentries, int direction, struct dma_attrs *attrs) 272 int nhwentries, enum dma_data_direction dir,
273 struct dma_attrs *attrs)
277{ 274{
278 unsigned long phys_addr; 275 unsigned long phys_addr;
279 struct scatterlist *saved_sg = sgl, *sg; 276 struct scatterlist *saved_sg = sgl, *sg;
@@ -310,8 +307,7 @@ int sn_dma_map_sg_attrs(struct device *dev, struct scatterlist *sgl,
310 * Free any successfully allocated entries. 307 * Free any successfully allocated entries.
311 */ 308 */
312 if (i > 0) 309 if (i > 0)
313 sn_dma_unmap_sg_attrs(dev, saved_sg, i, 310 sn_dma_unmap_sg(dev, saved_sg, i, dir, attrs);
314 direction, attrs);
315 return 0; 311 return 0;
316 } 312 }
317 313
@@ -320,41 +316,36 @@ int sn_dma_map_sg_attrs(struct device *dev, struct scatterlist *sgl,
320 316
321 return nhwentries; 317 return nhwentries;
322} 318}
323EXPORT_SYMBOL(sn_dma_map_sg_attrs);
324 319
325void sn_dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle, 320static void sn_dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle,
326 size_t size, int direction) 321 size_t size, enum dma_data_direction dir)
327{ 322{
328 BUG_ON(dev->bus != &pci_bus_type); 323 BUG_ON(dev->bus != &pci_bus_type);
329} 324}
330EXPORT_SYMBOL(sn_dma_sync_single_for_cpu);
331 325
332void sn_dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle, 326static void sn_dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle,
333 size_t size, int direction) 327 size_t size,
328 enum dma_data_direction dir)
334{ 329{
335 BUG_ON(dev->bus != &pci_bus_type); 330 BUG_ON(dev->bus != &pci_bus_type);
336} 331}
337EXPORT_SYMBOL(sn_dma_sync_single_for_device);
338 332
339void sn_dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, 333static void sn_dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,
340 int nelems, int direction) 334 int nelems, enum dma_data_direction dir)
341{ 335{
342 BUG_ON(dev->bus != &pci_bus_type); 336 BUG_ON(dev->bus != &pci_bus_type);
343} 337}
344EXPORT_SYMBOL(sn_dma_sync_sg_for_cpu);
345 338
346void sn_dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, 339static void sn_dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
347 int nelems, int direction) 340 int nelems, enum dma_data_direction dir)
348{ 341{
349 BUG_ON(dev->bus != &pci_bus_type); 342 BUG_ON(dev->bus != &pci_bus_type);
350} 343}
351EXPORT_SYMBOL(sn_dma_sync_sg_for_device);
352 344
353int sn_dma_mapping_error(struct device *dev, dma_addr_t dma_addr) 345static int sn_dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
354{ 346{
355 return 0; 347 return 0;
356} 348}
357EXPORT_SYMBOL(sn_dma_mapping_error);
358 349
359u64 sn_dma_get_required_mask(struct device *dev) 350u64 sn_dma_get_required_mask(struct device *dev)
360{ 351{
@@ -471,3 +462,23 @@ int sn_pci_legacy_write(struct pci_bus *bus, u16 port, u32 val, u8 size)
471 out: 462 out:
472 return ret; 463 return ret;
473} 464}
465
466static struct dma_map_ops sn_dma_ops = {
467 .alloc_coherent = sn_dma_alloc_coherent,
468 .free_coherent = sn_dma_free_coherent,
469 .map_page = sn_dma_map_page,
470 .unmap_page = sn_dma_unmap_page,
471 .map_sg = sn_dma_map_sg,
472 .unmap_sg = sn_dma_unmap_sg,
473 .sync_single_for_cpu = sn_dma_sync_single_for_cpu,
474 .sync_sg_for_cpu = sn_dma_sync_sg_for_cpu,
475 .sync_single_for_device = sn_dma_sync_single_for_device,
476 .sync_sg_for_device = sn_dma_sync_sg_for_device,
477 .mapping_error = sn_dma_mapping_error,
478 .dma_supported = sn_dma_supported,
479};
480
481void sn_dma_init(void)
482{
483 dma_ops = &sn_dma_ops;
484}