diff options
Diffstat (limited to 'arch/ia64')
57 files changed, 865 insertions, 836 deletions
diff --git a/arch/ia64/dig/Makefile b/arch/ia64/dig/Makefile index 5c0283830bd6..2f7caddf093e 100644 --- a/arch/ia64/dig/Makefile +++ b/arch/ia64/dig/Makefile | |||
@@ -7,8 +7,8 @@ | |||
7 | 7 | ||
8 | obj-y := setup.o | 8 | obj-y := setup.o |
9 | ifeq ($(CONFIG_DMAR), y) | 9 | ifeq ($(CONFIG_DMAR), y) |
10 | obj-$(CONFIG_IA64_GENERIC) += machvec.o machvec_vtd.o dig_vtd_iommu.o | 10 | obj-$(CONFIG_IA64_GENERIC) += machvec.o machvec_vtd.o |
11 | else | 11 | else |
12 | obj-$(CONFIG_IA64_GENERIC) += machvec.o | 12 | obj-$(CONFIG_IA64_GENERIC) += machvec.o |
13 | endif | 13 | endif |
14 | obj-$(CONFIG_IA64_DIG_VTD) += dig_vtd_iommu.o | 14 | |
diff --git a/arch/ia64/dig/dig_vtd_iommu.c b/arch/ia64/dig/dig_vtd_iommu.c deleted file mode 100644 index 1c8a079017a3..000000000000 --- a/arch/ia64/dig/dig_vtd_iommu.c +++ /dev/null | |||
@@ -1,59 +0,0 @@ | |||
1 | #include <linux/types.h> | ||
2 | #include <linux/kernel.h> | ||
3 | #include <linux/module.h> | ||
4 | #include <linux/intel-iommu.h> | ||
5 | |||
6 | void * | ||
7 | vtd_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle, | ||
8 | gfp_t flags) | ||
9 | { | ||
10 | return intel_alloc_coherent(dev, size, dma_handle, flags); | ||
11 | } | ||
12 | EXPORT_SYMBOL_GPL(vtd_alloc_coherent); | ||
13 | |||
14 | void | ||
15 | vtd_free_coherent(struct device *dev, size_t size, void *vaddr, | ||
16 | dma_addr_t dma_handle) | ||
17 | { | ||
18 | intel_free_coherent(dev, size, vaddr, dma_handle); | ||
19 | } | ||
20 | EXPORT_SYMBOL_GPL(vtd_free_coherent); | ||
21 | |||
22 | dma_addr_t | ||
23 | vtd_map_single_attrs(struct device *dev, void *addr, size_t size, | ||
24 | int dir, struct dma_attrs *attrs) | ||
25 | { | ||
26 | return intel_map_single(dev, (phys_addr_t)addr, size, dir); | ||
27 | } | ||
28 | EXPORT_SYMBOL_GPL(vtd_map_single_attrs); | ||
29 | |||
30 | void | ||
31 | vtd_unmap_single_attrs(struct device *dev, dma_addr_t iova, size_t size, | ||
32 | int dir, struct dma_attrs *attrs) | ||
33 | { | ||
34 | intel_unmap_single(dev, iova, size, dir); | ||
35 | } | ||
36 | EXPORT_SYMBOL_GPL(vtd_unmap_single_attrs); | ||
37 | |||
38 | int | ||
39 | vtd_map_sg_attrs(struct device *dev, struct scatterlist *sglist, int nents, | ||
40 | int dir, struct dma_attrs *attrs) | ||
41 | { | ||
42 | return intel_map_sg(dev, sglist, nents, dir); | ||
43 | } | ||
44 | EXPORT_SYMBOL_GPL(vtd_map_sg_attrs); | ||
45 | |||
46 | void | ||
47 | vtd_unmap_sg_attrs(struct device *dev, struct scatterlist *sglist, | ||
48 | int nents, int dir, struct dma_attrs *attrs) | ||
49 | { | ||
50 | intel_unmap_sg(dev, sglist, nents, dir); | ||
51 | } | ||
52 | EXPORT_SYMBOL_GPL(vtd_unmap_sg_attrs); | ||
53 | |||
54 | int | ||
55 | vtd_dma_mapping_error(struct device *dev, dma_addr_t dma_addr) | ||
56 | { | ||
57 | return 0; | ||
58 | } | ||
59 | EXPORT_SYMBOL_GPL(vtd_dma_mapping_error); | ||
diff --git a/arch/ia64/hp/common/hwsw_iommu.c b/arch/ia64/hp/common/hwsw_iommu.c index 2769dbfd03bf..e4a80d82e3d8 100644 --- a/arch/ia64/hp/common/hwsw_iommu.c +++ b/arch/ia64/hp/common/hwsw_iommu.c | |||
@@ -13,49 +13,34 @@ | |||
13 | */ | 13 | */ |
14 | 14 | ||
15 | #include <linux/device.h> | 15 | #include <linux/device.h> |
16 | #include <linux/dma-mapping.h> | ||
16 | #include <linux/swiotlb.h> | 17 | #include <linux/swiotlb.h> |
17 | |||
18 | #include <asm/machvec.h> | 18 | #include <asm/machvec.h> |
19 | 19 | ||
20 | extern struct dma_map_ops sba_dma_ops, swiotlb_dma_ops; | ||
21 | |||
20 | /* swiotlb declarations & definitions: */ | 22 | /* swiotlb declarations & definitions: */ |
21 | extern int swiotlb_late_init_with_default_size (size_t size); | 23 | extern int swiotlb_late_init_with_default_size (size_t size); |
22 | 24 | ||
23 | /* hwiommu declarations & definitions: */ | ||
24 | |||
25 | extern ia64_mv_dma_alloc_coherent sba_alloc_coherent; | ||
26 | extern ia64_mv_dma_free_coherent sba_free_coherent; | ||
27 | extern ia64_mv_dma_map_single_attrs sba_map_single_attrs; | ||
28 | extern ia64_mv_dma_unmap_single_attrs sba_unmap_single_attrs; | ||
29 | extern ia64_mv_dma_map_sg_attrs sba_map_sg_attrs; | ||
30 | extern ia64_mv_dma_unmap_sg_attrs sba_unmap_sg_attrs; | ||
31 | extern ia64_mv_dma_supported sba_dma_supported; | ||
32 | extern ia64_mv_dma_mapping_error sba_dma_mapping_error; | ||
33 | |||
34 | #define hwiommu_alloc_coherent sba_alloc_coherent | ||
35 | #define hwiommu_free_coherent sba_free_coherent | ||
36 | #define hwiommu_map_single_attrs sba_map_single_attrs | ||
37 | #define hwiommu_unmap_single_attrs sba_unmap_single_attrs | ||
38 | #define hwiommu_map_sg_attrs sba_map_sg_attrs | ||
39 | #define hwiommu_unmap_sg_attrs sba_unmap_sg_attrs | ||
40 | #define hwiommu_dma_supported sba_dma_supported | ||
41 | #define hwiommu_dma_mapping_error sba_dma_mapping_error | ||
42 | #define hwiommu_sync_single_for_cpu machvec_dma_sync_single | ||
43 | #define hwiommu_sync_sg_for_cpu machvec_dma_sync_sg | ||
44 | #define hwiommu_sync_single_for_device machvec_dma_sync_single | ||
45 | #define hwiommu_sync_sg_for_device machvec_dma_sync_sg | ||
46 | |||
47 | |||
48 | /* | 25 | /* |
49 | * Note: we need to make the determination of whether or not to use | 26 | * Note: we need to make the determination of whether or not to use |
50 | * the sw I/O TLB based purely on the device structure. Anything else | 27 | * the sw I/O TLB based purely on the device structure. Anything else |
51 | * would be unreliable or would be too intrusive. | 28 | * would be unreliable or would be too intrusive. |
52 | */ | 29 | */ |
53 | static inline int | 30 | static inline int use_swiotlb(struct device *dev) |
54 | use_swiotlb (struct device *dev) | ||
55 | { | 31 | { |
56 | return dev && dev->dma_mask && !hwiommu_dma_supported(dev, *dev->dma_mask); | 32 | return dev && dev->dma_mask && |
33 | !sba_dma_ops.dma_supported(dev, *dev->dma_mask); | ||
57 | } | 34 | } |
58 | 35 | ||
36 | struct dma_map_ops *hwsw_dma_get_ops(struct device *dev) | ||
37 | { | ||
38 | if (use_swiotlb(dev)) | ||
39 | return &swiotlb_dma_ops; | ||
40 | return &sba_dma_ops; | ||
41 | } | ||
42 | EXPORT_SYMBOL(hwsw_dma_get_ops); | ||
43 | |||
59 | void __init | 44 | void __init |
60 | hwsw_init (void) | 45 | hwsw_init (void) |
61 | { | 46 | { |
@@ -71,125 +56,3 @@ hwsw_init (void) | |||
71 | #endif | 56 | #endif |
72 | } | 57 | } |
73 | } | 58 | } |
74 | |||
75 | void * | ||
76 | hwsw_alloc_coherent (struct device *dev, size_t size, dma_addr_t *dma_handle, gfp_t flags) | ||
77 | { | ||
78 | if (use_swiotlb(dev)) | ||
79 | return swiotlb_alloc_coherent(dev, size, dma_handle, flags); | ||
80 | else | ||
81 | return hwiommu_alloc_coherent(dev, size, dma_handle, flags); | ||
82 | } | ||
83 | |||
84 | void | ||
85 | hwsw_free_coherent (struct device *dev, size_t size, void *vaddr, dma_addr_t dma_handle) | ||
86 | { | ||
87 | if (use_swiotlb(dev)) | ||
88 | swiotlb_free_coherent(dev, size, vaddr, dma_handle); | ||
89 | else | ||
90 | hwiommu_free_coherent(dev, size, vaddr, dma_handle); | ||
91 | } | ||
92 | |||
93 | dma_addr_t | ||
94 | hwsw_map_single_attrs(struct device *dev, void *addr, size_t size, int dir, | ||
95 | struct dma_attrs *attrs) | ||
96 | { | ||
97 | if (use_swiotlb(dev)) | ||
98 | return swiotlb_map_single_attrs(dev, addr, size, dir, attrs); | ||
99 | else | ||
100 | return hwiommu_map_single_attrs(dev, addr, size, dir, attrs); | ||
101 | } | ||
102 | EXPORT_SYMBOL(hwsw_map_single_attrs); | ||
103 | |||
104 | void | ||
105 | hwsw_unmap_single_attrs(struct device *dev, dma_addr_t iova, size_t size, | ||
106 | int dir, struct dma_attrs *attrs) | ||
107 | { | ||
108 | if (use_swiotlb(dev)) | ||
109 | return swiotlb_unmap_single_attrs(dev, iova, size, dir, attrs); | ||
110 | else | ||
111 | return hwiommu_unmap_single_attrs(dev, iova, size, dir, attrs); | ||
112 | } | ||
113 | EXPORT_SYMBOL(hwsw_unmap_single_attrs); | ||
114 | |||
115 | int | ||
116 | hwsw_map_sg_attrs(struct device *dev, struct scatterlist *sglist, int nents, | ||
117 | int dir, struct dma_attrs *attrs) | ||
118 | { | ||
119 | if (use_swiotlb(dev)) | ||
120 | return swiotlb_map_sg_attrs(dev, sglist, nents, dir, attrs); | ||
121 | else | ||
122 | return hwiommu_map_sg_attrs(dev, sglist, nents, dir, attrs); | ||
123 | } | ||
124 | EXPORT_SYMBOL(hwsw_map_sg_attrs); | ||
125 | |||
126 | void | ||
127 | hwsw_unmap_sg_attrs(struct device *dev, struct scatterlist *sglist, int nents, | ||
128 | int dir, struct dma_attrs *attrs) | ||
129 | { | ||
130 | if (use_swiotlb(dev)) | ||
131 | return swiotlb_unmap_sg_attrs(dev, sglist, nents, dir, attrs); | ||
132 | else | ||
133 | return hwiommu_unmap_sg_attrs(dev, sglist, nents, dir, attrs); | ||
134 | } | ||
135 | EXPORT_SYMBOL(hwsw_unmap_sg_attrs); | ||
136 | |||
137 | void | ||
138 | hwsw_sync_single_for_cpu (struct device *dev, dma_addr_t addr, size_t size, int dir) | ||
139 | { | ||
140 | if (use_swiotlb(dev)) | ||
141 | swiotlb_sync_single_for_cpu(dev, addr, size, dir); | ||
142 | else | ||
143 | hwiommu_sync_single_for_cpu(dev, addr, size, dir); | ||
144 | } | ||
145 | |||
146 | void | ||
147 | hwsw_sync_sg_for_cpu (struct device *dev, struct scatterlist *sg, int nelems, int dir) | ||
148 | { | ||
149 | if (use_swiotlb(dev)) | ||
150 | swiotlb_sync_sg_for_cpu(dev, sg, nelems, dir); | ||
151 | else | ||
152 | hwiommu_sync_sg_for_cpu(dev, sg, nelems, dir); | ||
153 | } | ||
154 | |||
155 | void | ||
156 | hwsw_sync_single_for_device (struct device *dev, dma_addr_t addr, size_t size, int dir) | ||
157 | { | ||
158 | if (use_swiotlb(dev)) | ||
159 | swiotlb_sync_single_for_device(dev, addr, size, dir); | ||
160 | else | ||
161 | hwiommu_sync_single_for_device(dev, addr, size, dir); | ||
162 | } | ||
163 | |||
164 | void | ||
165 | hwsw_sync_sg_for_device (struct device *dev, struct scatterlist *sg, int nelems, int dir) | ||
166 | { | ||
167 | if (use_swiotlb(dev)) | ||
168 | swiotlb_sync_sg_for_device(dev, sg, nelems, dir); | ||
169 | else | ||
170 | hwiommu_sync_sg_for_device(dev, sg, nelems, dir); | ||
171 | } | ||
172 | |||
173 | int | ||
174 | hwsw_dma_supported (struct device *dev, u64 mask) | ||
175 | { | ||
176 | if (hwiommu_dma_supported(dev, mask)) | ||
177 | return 1; | ||
178 | return swiotlb_dma_supported(dev, mask); | ||
179 | } | ||
180 | |||
181 | int | ||
182 | hwsw_dma_mapping_error(struct device *dev, dma_addr_t dma_addr) | ||
183 | { | ||
184 | return hwiommu_dma_mapping_error(dev, dma_addr) || | ||
185 | swiotlb_dma_mapping_error(dev, dma_addr); | ||
186 | } | ||
187 | |||
188 | EXPORT_SYMBOL(hwsw_dma_mapping_error); | ||
189 | EXPORT_SYMBOL(hwsw_dma_supported); | ||
190 | EXPORT_SYMBOL(hwsw_alloc_coherent); | ||
191 | EXPORT_SYMBOL(hwsw_free_coherent); | ||
192 | EXPORT_SYMBOL(hwsw_sync_single_for_cpu); | ||
193 | EXPORT_SYMBOL(hwsw_sync_single_for_device); | ||
194 | EXPORT_SYMBOL(hwsw_sync_sg_for_cpu); | ||
195 | EXPORT_SYMBOL(hwsw_sync_sg_for_device); | ||
diff --git a/arch/ia64/hp/common/sba_iommu.c b/arch/ia64/hp/common/sba_iommu.c index 6d5e6c5630e3..56ceb68eb99d 100644 --- a/arch/ia64/hp/common/sba_iommu.c +++ b/arch/ia64/hp/common/sba_iommu.c | |||
@@ -36,6 +36,7 @@ | |||
36 | #include <linux/bitops.h> /* hweight64() */ | 36 | #include <linux/bitops.h> /* hweight64() */ |
37 | #include <linux/crash_dump.h> | 37 | #include <linux/crash_dump.h> |
38 | #include <linux/iommu-helper.h> | 38 | #include <linux/iommu-helper.h> |
39 | #include <linux/dma-mapping.h> | ||
39 | 40 | ||
40 | #include <asm/delay.h> /* ia64_get_itc() */ | 41 | #include <asm/delay.h> /* ia64_get_itc() */ |
41 | #include <asm/io.h> | 42 | #include <asm/io.h> |
@@ -908,11 +909,13 @@ sba_mark_invalid(struct ioc *ioc, dma_addr_t iova, size_t byte_cnt) | |||
908 | * | 909 | * |
909 | * See Documentation/PCI/PCI-DMA-mapping.txt | 910 | * See Documentation/PCI/PCI-DMA-mapping.txt |
910 | */ | 911 | */ |
911 | dma_addr_t | 912 | static dma_addr_t sba_map_page(struct device *dev, struct page *page, |
912 | sba_map_single_attrs(struct device *dev, void *addr, size_t size, int dir, | 913 | unsigned long poff, size_t size, |
913 | struct dma_attrs *attrs) | 914 | enum dma_data_direction dir, |
915 | struct dma_attrs *attrs) | ||
914 | { | 916 | { |
915 | struct ioc *ioc; | 917 | struct ioc *ioc; |
918 | void *addr = page_address(page) + poff; | ||
916 | dma_addr_t iovp; | 919 | dma_addr_t iovp; |
917 | dma_addr_t offset; | 920 | dma_addr_t offset; |
918 | u64 *pdir_start; | 921 | u64 *pdir_start; |
@@ -990,7 +993,14 @@ sba_map_single_attrs(struct device *dev, void *addr, size_t size, int dir, | |||
990 | #endif | 993 | #endif |
991 | return SBA_IOVA(ioc, iovp, offset); | 994 | return SBA_IOVA(ioc, iovp, offset); |
992 | } | 995 | } |
993 | EXPORT_SYMBOL(sba_map_single_attrs); | 996 | |
997 | static dma_addr_t sba_map_single_attrs(struct device *dev, void *addr, | ||
998 | size_t size, enum dma_data_direction dir, | ||
999 | struct dma_attrs *attrs) | ||
1000 | { | ||
1001 | return sba_map_page(dev, virt_to_page(addr), | ||
1002 | (unsigned long)addr & ~PAGE_MASK, size, dir, attrs); | ||
1003 | } | ||
994 | 1004 | ||
995 | #ifdef ENABLE_MARK_CLEAN | 1005 | #ifdef ENABLE_MARK_CLEAN |
996 | static SBA_INLINE void | 1006 | static SBA_INLINE void |
@@ -1026,8 +1036,8 @@ sba_mark_clean(struct ioc *ioc, dma_addr_t iova, size_t size) | |||
1026 | * | 1036 | * |
1027 | * See Documentation/PCI/PCI-DMA-mapping.txt | 1037 | * See Documentation/PCI/PCI-DMA-mapping.txt |
1028 | */ | 1038 | */ |
1029 | void sba_unmap_single_attrs(struct device *dev, dma_addr_t iova, size_t size, | 1039 | static void sba_unmap_page(struct device *dev, dma_addr_t iova, size_t size, |
1030 | int dir, struct dma_attrs *attrs) | 1040 | enum dma_data_direction dir, struct dma_attrs *attrs) |
1031 | { | 1041 | { |
1032 | struct ioc *ioc; | 1042 | struct ioc *ioc; |
1033 | #if DELAYED_RESOURCE_CNT > 0 | 1043 | #if DELAYED_RESOURCE_CNT > 0 |
@@ -1094,7 +1104,12 @@ void sba_unmap_single_attrs(struct device *dev, dma_addr_t iova, size_t size, | |||
1094 | spin_unlock_irqrestore(&ioc->res_lock, flags); | 1104 | spin_unlock_irqrestore(&ioc->res_lock, flags); |
1095 | #endif /* DELAYED_RESOURCE_CNT == 0 */ | 1105 | #endif /* DELAYED_RESOURCE_CNT == 0 */ |
1096 | } | 1106 | } |
1097 | EXPORT_SYMBOL(sba_unmap_single_attrs); | 1107 | |
1108 | void sba_unmap_single_attrs(struct device *dev, dma_addr_t iova, size_t size, | ||
1109 | enum dma_data_direction dir, struct dma_attrs *attrs) | ||
1110 | { | ||
1111 | sba_unmap_page(dev, iova, size, dir, attrs); | ||
1112 | } | ||
1098 | 1113 | ||
1099 | /** | 1114 | /** |
1100 | * sba_alloc_coherent - allocate/map shared mem for DMA | 1115 | * sba_alloc_coherent - allocate/map shared mem for DMA |
@@ -1104,7 +1119,7 @@ EXPORT_SYMBOL(sba_unmap_single_attrs); | |||
1104 | * | 1119 | * |
1105 | * See Documentation/PCI/PCI-DMA-mapping.txt | 1120 | * See Documentation/PCI/PCI-DMA-mapping.txt |
1106 | */ | 1121 | */ |
1107 | void * | 1122 | static void * |
1108 | sba_alloc_coherent (struct device *dev, size_t size, dma_addr_t *dma_handle, gfp_t flags) | 1123 | sba_alloc_coherent (struct device *dev, size_t size, dma_addr_t *dma_handle, gfp_t flags) |
1109 | { | 1124 | { |
1110 | struct ioc *ioc; | 1125 | struct ioc *ioc; |
@@ -1167,7 +1182,8 @@ sba_alloc_coherent (struct device *dev, size_t size, dma_addr_t *dma_handle, gfp | |||
1167 | * | 1182 | * |
1168 | * See Documentation/PCI/PCI-DMA-mapping.txt | 1183 | * See Documentation/PCI/PCI-DMA-mapping.txt |
1169 | */ | 1184 | */ |
1170 | void sba_free_coherent (struct device *dev, size_t size, void *vaddr, dma_addr_t dma_handle) | 1185 | static void sba_free_coherent (struct device *dev, size_t size, void *vaddr, |
1186 | dma_addr_t dma_handle) | ||
1171 | { | 1187 | { |
1172 | sba_unmap_single_attrs(dev, dma_handle, size, 0, NULL); | 1188 | sba_unmap_single_attrs(dev, dma_handle, size, 0, NULL); |
1173 | free_pages((unsigned long) vaddr, get_order(size)); | 1189 | free_pages((unsigned long) vaddr, get_order(size)); |
@@ -1422,8 +1438,9 @@ sba_coalesce_chunks(struct ioc *ioc, struct device *dev, | |||
1422 | * | 1438 | * |
1423 | * See Documentation/PCI/PCI-DMA-mapping.txt | 1439 | * See Documentation/PCI/PCI-DMA-mapping.txt |
1424 | */ | 1440 | */ |
1425 | int sba_map_sg_attrs(struct device *dev, struct scatterlist *sglist, int nents, | 1441 | static int sba_map_sg_attrs(struct device *dev, struct scatterlist *sglist, |
1426 | int dir, struct dma_attrs *attrs) | 1442 | int nents, enum dma_data_direction dir, |
1443 | struct dma_attrs *attrs) | ||
1427 | { | 1444 | { |
1428 | struct ioc *ioc; | 1445 | struct ioc *ioc; |
1429 | int coalesced, filled = 0; | 1446 | int coalesced, filled = 0; |
@@ -1502,7 +1519,6 @@ int sba_map_sg_attrs(struct device *dev, struct scatterlist *sglist, int nents, | |||
1502 | 1519 | ||
1503 | return filled; | 1520 | return filled; |
1504 | } | 1521 | } |
1505 | EXPORT_SYMBOL(sba_map_sg_attrs); | ||
1506 | 1522 | ||
1507 | /** | 1523 | /** |
1508 | * sba_unmap_sg_attrs - unmap Scatter/Gather list | 1524 | * sba_unmap_sg_attrs - unmap Scatter/Gather list |
@@ -1514,8 +1530,9 @@ EXPORT_SYMBOL(sba_map_sg_attrs); | |||
1514 | * | 1530 | * |
1515 | * See Documentation/PCI/PCI-DMA-mapping.txt | 1531 | * See Documentation/PCI/PCI-DMA-mapping.txt |
1516 | */ | 1532 | */ |
1517 | void sba_unmap_sg_attrs(struct device *dev, struct scatterlist *sglist, | 1533 | static void sba_unmap_sg_attrs(struct device *dev, struct scatterlist *sglist, |
1518 | int nents, int dir, struct dma_attrs *attrs) | 1534 | int nents, enum dma_data_direction dir, |
1535 | struct dma_attrs *attrs) | ||
1519 | { | 1536 | { |
1520 | #ifdef ASSERT_PDIR_SANITY | 1537 | #ifdef ASSERT_PDIR_SANITY |
1521 | struct ioc *ioc; | 1538 | struct ioc *ioc; |
@@ -1551,7 +1568,6 @@ void sba_unmap_sg_attrs(struct device *dev, struct scatterlist *sglist, | |||
1551 | #endif | 1568 | #endif |
1552 | 1569 | ||
1553 | } | 1570 | } |
1554 | EXPORT_SYMBOL(sba_unmap_sg_attrs); | ||
1555 | 1571 | ||
1556 | /************************************************************** | 1572 | /************************************************************** |
1557 | * | 1573 | * |
@@ -2064,6 +2080,8 @@ static struct acpi_driver acpi_sba_ioc_driver = { | |||
2064 | }, | 2080 | }, |
2065 | }; | 2081 | }; |
2066 | 2082 | ||
2083 | extern struct dma_map_ops swiotlb_dma_ops; | ||
2084 | |||
2067 | static int __init | 2085 | static int __init |
2068 | sba_init(void) | 2086 | sba_init(void) |
2069 | { | 2087 | { |
@@ -2077,6 +2095,7 @@ sba_init(void) | |||
2077 | * a successful kdump kernel boot is to use the swiotlb. | 2095 | * a successful kdump kernel boot is to use the swiotlb. |
2078 | */ | 2096 | */ |
2079 | if (is_kdump_kernel()) { | 2097 | if (is_kdump_kernel()) { |
2098 | dma_ops = &swiotlb_dma_ops; | ||
2080 | if (swiotlb_late_init_with_default_size(64 * (1<<20)) != 0) | 2099 | if (swiotlb_late_init_with_default_size(64 * (1<<20)) != 0) |
2081 | panic("Unable to initialize software I/O TLB:" | 2100 | panic("Unable to initialize software I/O TLB:" |
2082 | " Try machvec=dig boot option"); | 2101 | " Try machvec=dig boot option"); |
@@ -2092,6 +2111,7 @@ sba_init(void) | |||
2092 | * If we didn't find something sba_iommu can claim, we | 2111 | * If we didn't find something sba_iommu can claim, we |
2093 | * need to setup the swiotlb and switch to the dig machvec. | 2112 | * need to setup the swiotlb and switch to the dig machvec. |
2094 | */ | 2113 | */ |
2114 | dma_ops = &swiotlb_dma_ops; | ||
2095 | if (swiotlb_late_init_with_default_size(64 * (1<<20)) != 0) | 2115 | if (swiotlb_late_init_with_default_size(64 * (1<<20)) != 0) |
2096 | panic("Unable to find SBA IOMMU or initialize " | 2116 | panic("Unable to find SBA IOMMU or initialize " |
2097 | "software I/O TLB: Try machvec=dig boot option"); | 2117 | "software I/O TLB: Try machvec=dig boot option"); |
@@ -2138,15 +2158,13 @@ nosbagart(char *str) | |||
2138 | return 1; | 2158 | return 1; |
2139 | } | 2159 | } |
2140 | 2160 | ||
2141 | int | 2161 | static int sba_dma_supported (struct device *dev, u64 mask) |
2142 | sba_dma_supported (struct device *dev, u64 mask) | ||
2143 | { | 2162 | { |
2144 | /* make sure it's at least 32bit capable */ | 2163 | /* make sure it's at least 32bit capable */ |
2145 | return ((mask & 0xFFFFFFFFUL) == 0xFFFFFFFFUL); | 2164 | return ((mask & 0xFFFFFFFFUL) == 0xFFFFFFFFUL); |
2146 | } | 2165 | } |
2147 | 2166 | ||
2148 | int | 2167 | static int sba_dma_mapping_error(struct device *dev, dma_addr_t dma_addr) |
2149 | sba_dma_mapping_error(struct device *dev, dma_addr_t dma_addr) | ||
2150 | { | 2168 | { |
2151 | return 0; | 2169 | return 0; |
2152 | } | 2170 | } |
@@ -2176,7 +2194,22 @@ sba_page_override(char *str) | |||
2176 | 2194 | ||
2177 | __setup("sbapagesize=",sba_page_override); | 2195 | __setup("sbapagesize=",sba_page_override); |
2178 | 2196 | ||
2179 | EXPORT_SYMBOL(sba_dma_mapping_error); | 2197 | struct dma_map_ops sba_dma_ops = { |
2180 | EXPORT_SYMBOL(sba_dma_supported); | 2198 | .alloc_coherent = sba_alloc_coherent, |
2181 | EXPORT_SYMBOL(sba_alloc_coherent); | 2199 | .free_coherent = sba_free_coherent, |
2182 | EXPORT_SYMBOL(sba_free_coherent); | 2200 | .map_page = sba_map_page, |
2201 | .unmap_page = sba_unmap_page, | ||
2202 | .map_sg = sba_map_sg_attrs, | ||
2203 | .unmap_sg = sba_unmap_sg_attrs, | ||
2204 | .sync_single_for_cpu = machvec_dma_sync_single, | ||
2205 | .sync_sg_for_cpu = machvec_dma_sync_sg, | ||
2206 | .sync_single_for_device = machvec_dma_sync_single, | ||
2207 | .sync_sg_for_device = machvec_dma_sync_sg, | ||
2208 | .dma_supported = sba_dma_supported, | ||
2209 | .mapping_error = sba_dma_mapping_error, | ||
2210 | }; | ||
2211 | |||
2212 | void sba_dma_init(void) | ||
2213 | { | ||
2214 | dma_ops = &sba_dma_ops; | ||
2215 | } | ||
diff --git a/arch/ia64/ia32/ia32_entry.S b/arch/ia64/ia32/ia32_entry.S index a46f8395e9a5..af9405cd70e5 100644 --- a/arch/ia64/ia32/ia32_entry.S +++ b/arch/ia64/ia32/ia32_entry.S | |||
@@ -240,7 +240,7 @@ ia32_syscall_table: | |||
240 | data8 sys_ni_syscall | 240 | data8 sys_ni_syscall |
241 | data8 sys_umask /* 60 */ | 241 | data8 sys_umask /* 60 */ |
242 | data8 sys_chroot | 242 | data8 sys_chroot |
243 | data8 sys_ustat | 243 | data8 compat_sys_ustat |
244 | data8 sys_dup2 | 244 | data8 sys_dup2 |
245 | data8 sys_getppid | 245 | data8 sys_getppid |
246 | data8 sys_getpgrp /* 65 */ | 246 | data8 sys_getpgrp /* 65 */ |
diff --git a/arch/ia64/include/asm/dma-mapping.h b/arch/ia64/include/asm/dma-mapping.h index 1f912d927585..36c0009dbece 100644 --- a/arch/ia64/include/asm/dma-mapping.h +++ b/arch/ia64/include/asm/dma-mapping.h | |||
@@ -11,99 +11,128 @@ | |||
11 | 11 | ||
12 | #define ARCH_HAS_DMA_GET_REQUIRED_MASK | 12 | #define ARCH_HAS_DMA_GET_REQUIRED_MASK |
13 | 13 | ||
14 | struct dma_mapping_ops { | 14 | extern struct dma_map_ops *dma_ops; |
15 | int (*mapping_error)(struct device *dev, | ||
16 | dma_addr_t dma_addr); | ||
17 | void* (*alloc_coherent)(struct device *dev, size_t size, | ||
18 | dma_addr_t *dma_handle, gfp_t gfp); | ||
19 | void (*free_coherent)(struct device *dev, size_t size, | ||
20 | void *vaddr, dma_addr_t dma_handle); | ||
21 | dma_addr_t (*map_single)(struct device *hwdev, unsigned long ptr, | ||
22 | size_t size, int direction); | ||
23 | void (*unmap_single)(struct device *dev, dma_addr_t addr, | ||
24 | size_t size, int direction); | ||
25 | void (*sync_single_for_cpu)(struct device *hwdev, | ||
26 | dma_addr_t dma_handle, size_t size, | ||
27 | int direction); | ||
28 | void (*sync_single_for_device)(struct device *hwdev, | ||
29 | dma_addr_t dma_handle, size_t size, | ||
30 | int direction); | ||
31 | void (*sync_single_range_for_cpu)(struct device *hwdev, | ||
32 | dma_addr_t dma_handle, unsigned long offset, | ||
33 | size_t size, int direction); | ||
34 | void (*sync_single_range_for_device)(struct device *hwdev, | ||
35 | dma_addr_t dma_handle, unsigned long offset, | ||
36 | size_t size, int direction); | ||
37 | void (*sync_sg_for_cpu)(struct device *hwdev, | ||
38 | struct scatterlist *sg, int nelems, | ||
39 | int direction); | ||
40 | void (*sync_sg_for_device)(struct device *hwdev, | ||
41 | struct scatterlist *sg, int nelems, | ||
42 | int direction); | ||
43 | int (*map_sg)(struct device *hwdev, struct scatterlist *sg, | ||
44 | int nents, int direction); | ||
45 | void (*unmap_sg)(struct device *hwdev, | ||
46 | struct scatterlist *sg, int nents, | ||
47 | int direction); | ||
48 | int (*dma_supported_op)(struct device *hwdev, u64 mask); | ||
49 | int is_phys; | ||
50 | }; | ||
51 | |||
52 | extern struct dma_mapping_ops *dma_ops; | ||
53 | extern struct ia64_machine_vector ia64_mv; | 15 | extern struct ia64_machine_vector ia64_mv; |
54 | extern void set_iommu_machvec(void); | 16 | extern void set_iommu_machvec(void); |
55 | 17 | ||
56 | #define dma_alloc_coherent(dev, size, handle, gfp) \ | 18 | extern void machvec_dma_sync_single(struct device *, dma_addr_t, size_t, |
57 | platform_dma_alloc_coherent(dev, size, handle, (gfp) | GFP_DMA) | 19 | enum dma_data_direction); |
20 | extern void machvec_dma_sync_sg(struct device *, struct scatterlist *, int, | ||
21 | enum dma_data_direction); | ||
58 | 22 | ||
59 | /* coherent mem. is cheap */ | 23 | static inline void *dma_alloc_coherent(struct device *dev, size_t size, |
60 | static inline void * | 24 | dma_addr_t *daddr, gfp_t gfp) |
61 | dma_alloc_noncoherent(struct device *dev, size_t size, dma_addr_t *dma_handle, | ||
62 | gfp_t flag) | ||
63 | { | 25 | { |
64 | return dma_alloc_coherent(dev, size, dma_handle, flag); | 26 | struct dma_map_ops *ops = platform_dma_get_ops(dev); |
27 | return ops->alloc_coherent(dev, size, daddr, gfp); | ||
65 | } | 28 | } |
66 | #define dma_free_coherent platform_dma_free_coherent | 29 | |
67 | static inline void | 30 | static inline void dma_free_coherent(struct device *dev, size_t size, |
68 | dma_free_noncoherent(struct device *dev, size_t size, void *cpu_addr, | 31 | void *caddr, dma_addr_t daddr) |
69 | dma_addr_t dma_handle) | 32 | { |
33 | struct dma_map_ops *ops = platform_dma_get_ops(dev); | ||
34 | ops->free_coherent(dev, size, caddr, daddr); | ||
35 | } | ||
36 | |||
37 | #define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f) | ||
38 | #define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h) | ||
39 | |||
40 | static inline dma_addr_t dma_map_single_attrs(struct device *dev, | ||
41 | void *caddr, size_t size, | ||
42 | enum dma_data_direction dir, | ||
43 | struct dma_attrs *attrs) | ||
44 | { | ||
45 | struct dma_map_ops *ops = platform_dma_get_ops(dev); | ||
46 | return ops->map_page(dev, virt_to_page(caddr), | ||
47 | (unsigned long)caddr & ~PAGE_MASK, size, | ||
48 | dir, attrs); | ||
49 | } | ||
50 | |||
51 | static inline void dma_unmap_single_attrs(struct device *dev, dma_addr_t daddr, | ||
52 | size_t size, | ||
53 | enum dma_data_direction dir, | ||
54 | struct dma_attrs *attrs) | ||
55 | { | ||
56 | struct dma_map_ops *ops = platform_dma_get_ops(dev); | ||
57 | ops->unmap_page(dev, daddr, size, dir, attrs); | ||
58 | } | ||
59 | |||
60 | #define dma_map_single(d, a, s, r) dma_map_single_attrs(d, a, s, r, NULL) | ||
61 | #define dma_unmap_single(d, a, s, r) dma_unmap_single_attrs(d, a, s, r, NULL) | ||
62 | |||
63 | static inline int dma_map_sg_attrs(struct device *dev, struct scatterlist *sgl, | ||
64 | int nents, enum dma_data_direction dir, | ||
65 | struct dma_attrs *attrs) | ||
66 | { | ||
67 | struct dma_map_ops *ops = platform_dma_get_ops(dev); | ||
68 | return ops->map_sg(dev, sgl, nents, dir, attrs); | ||
69 | } | ||
70 | |||
71 | static inline void dma_unmap_sg_attrs(struct device *dev, | ||
72 | struct scatterlist *sgl, int nents, | ||
73 | enum dma_data_direction dir, | ||
74 | struct dma_attrs *attrs) | ||
75 | { | ||
76 | struct dma_map_ops *ops = platform_dma_get_ops(dev); | ||
77 | ops->unmap_sg(dev, sgl, nents, dir, attrs); | ||
78 | } | ||
79 | |||
80 | #define dma_map_sg(d, s, n, r) dma_map_sg_attrs(d, s, n, r, NULL) | ||
81 | #define dma_unmap_sg(d, s, n, r) dma_unmap_sg_attrs(d, s, n, r, NULL) | ||
82 | |||
83 | static inline void dma_sync_single_for_cpu(struct device *dev, dma_addr_t daddr, | ||
84 | size_t size, | ||
85 | enum dma_data_direction dir) | ||
70 | { | 86 | { |
71 | dma_free_coherent(dev, size, cpu_addr, dma_handle); | 87 | struct dma_map_ops *ops = platform_dma_get_ops(dev); |
88 | ops->sync_single_for_cpu(dev, daddr, size, dir); | ||
72 | } | 89 | } |
73 | #define dma_map_single_attrs platform_dma_map_single_attrs | 90 | |
74 | static inline dma_addr_t dma_map_single(struct device *dev, void *cpu_addr, | 91 | static inline void dma_sync_sg_for_cpu(struct device *dev, |
75 | size_t size, int dir) | 92 | struct scatterlist *sgl, |
93 | int nents, enum dma_data_direction dir) | ||
76 | { | 94 | { |
77 | return dma_map_single_attrs(dev, cpu_addr, size, dir, NULL); | 95 | struct dma_map_ops *ops = platform_dma_get_ops(dev); |
96 | ops->sync_sg_for_cpu(dev, sgl, nents, dir); | ||
78 | } | 97 | } |
79 | #define dma_map_sg_attrs platform_dma_map_sg_attrs | 98 | |
80 | static inline int dma_map_sg(struct device *dev, struct scatterlist *sgl, | 99 | static inline void dma_sync_single_for_device(struct device *dev, |
81 | int nents, int dir) | 100 | dma_addr_t daddr, |
101 | size_t size, | ||
102 | enum dma_data_direction dir) | ||
82 | { | 103 | { |
83 | return dma_map_sg_attrs(dev, sgl, nents, dir, NULL); | 104 | struct dma_map_ops *ops = platform_dma_get_ops(dev); |
105 | ops->sync_single_for_device(dev, daddr, size, dir); | ||
84 | } | 106 | } |
85 | #define dma_unmap_single_attrs platform_dma_unmap_single_attrs | 107 | |
86 | static inline void dma_unmap_single(struct device *dev, dma_addr_t cpu_addr, | 108 | static inline void dma_sync_sg_for_device(struct device *dev, |
87 | size_t size, int dir) | 109 | struct scatterlist *sgl, |
110 | int nents, | ||
111 | enum dma_data_direction dir) | ||
88 | { | 112 | { |
89 | return dma_unmap_single_attrs(dev, cpu_addr, size, dir, NULL); | 113 | struct dma_map_ops *ops = platform_dma_get_ops(dev); |
114 | ops->sync_sg_for_device(dev, sgl, nents, dir); | ||
90 | } | 115 | } |
91 | #define dma_unmap_sg_attrs platform_dma_unmap_sg_attrs | 116 | |
92 | static inline void dma_unmap_sg(struct device *dev, struct scatterlist *sgl, | 117 | static inline int dma_mapping_error(struct device *dev, dma_addr_t daddr) |
93 | int nents, int dir) | 118 | { |
119 | struct dma_map_ops *ops = platform_dma_get_ops(dev); | ||
120 | return ops->mapping_error(dev, daddr); | ||
121 | } | ||
122 | |||
123 | static inline dma_addr_t dma_map_page(struct device *dev, struct page *page, | ||
124 | size_t offset, size_t size, | ||
125 | enum dma_data_direction dir) | ||
94 | { | 126 | { |
95 | return dma_unmap_sg_attrs(dev, sgl, nents, dir, NULL); | 127 | struct dma_map_ops *ops = platform_dma_get_ops(dev); |
128 | return ops->map_page(dev, page, offset, size, dir, NULL); | ||
96 | } | 129 | } |
97 | #define dma_sync_single_for_cpu platform_dma_sync_single_for_cpu | ||
98 | #define dma_sync_sg_for_cpu platform_dma_sync_sg_for_cpu | ||
99 | #define dma_sync_single_for_device platform_dma_sync_single_for_device | ||
100 | #define dma_sync_sg_for_device platform_dma_sync_sg_for_device | ||
101 | #define dma_mapping_error platform_dma_mapping_error | ||
102 | 130 | ||
103 | #define dma_map_page(dev, pg, off, size, dir) \ | 131 | static inline void dma_unmap_page(struct device *dev, dma_addr_t addr, |
104 | dma_map_single(dev, page_address(pg) + (off), (size), (dir)) | 132 | size_t size, enum dma_data_direction dir) |
105 | #define dma_unmap_page(dev, dma_addr, size, dir) \ | 133 | { |
106 | dma_unmap_single(dev, dma_addr, size, dir) | 134 | dma_unmap_single(dev, addr, size, dir); |
135 | } | ||
107 | 136 | ||
108 | /* | 137 | /* |
109 | * Rest of this file is part of the "Advanced DMA API". Use at your own risk. | 138 | * Rest of this file is part of the "Advanced DMA API". Use at your own risk. |
@@ -115,7 +144,11 @@ static inline void dma_unmap_sg(struct device *dev, struct scatterlist *sgl, | |||
115 | #define dma_sync_single_range_for_device(dev, dma_handle, offset, size, dir) \ | 144 | #define dma_sync_single_range_for_device(dev, dma_handle, offset, size, dir) \ |
116 | dma_sync_single_for_device(dev, dma_handle, size, dir) | 145 | dma_sync_single_for_device(dev, dma_handle, size, dir) |
117 | 146 | ||
118 | #define dma_supported platform_dma_supported | 147 | static inline int dma_supported(struct device *dev, u64 mask) |
148 | { | ||
149 | struct dma_map_ops *ops = platform_dma_get_ops(dev); | ||
150 | return ops->dma_supported(dev, mask); | ||
151 | } | ||
119 | 152 | ||
120 | static inline int | 153 | static inline int |
121 | dma_set_mask (struct device *dev, u64 mask) | 154 | dma_set_mask (struct device *dev, u64 mask) |
@@ -141,11 +174,4 @@ dma_cache_sync (struct device *dev, void *vaddr, size_t size, | |||
141 | 174 | ||
142 | #define dma_is_consistent(d, h) (1) /* all we do is coherent memory... */ | 175 | #define dma_is_consistent(d, h) (1) /* all we do is coherent memory... */ |
143 | 176 | ||
144 | static inline struct dma_mapping_ops *get_dma_ops(struct device *dev) | ||
145 | { | ||
146 | return dma_ops; | ||
147 | } | ||
148 | |||
149 | |||
150 | |||
151 | #endif /* _ASM_IA64_DMA_MAPPING_H */ | 177 | #endif /* _ASM_IA64_DMA_MAPPING_H */ |
diff --git a/arch/ia64/include/asm/fpu.h b/arch/ia64/include/asm/fpu.h index 3859558ff0a4..0c26157cffa5 100644 --- a/arch/ia64/include/asm/fpu.h +++ b/arch/ia64/include/asm/fpu.h | |||
@@ -6,8 +6,6 @@ | |||
6 | * David Mosberger-Tang <davidm@hpl.hp.com> | 6 | * David Mosberger-Tang <davidm@hpl.hp.com> |
7 | */ | 7 | */ |
8 | 8 | ||
9 | #include <asm/types.h> | ||
10 | |||
11 | /* floating point status register: */ | 9 | /* floating point status register: */ |
12 | #define FPSR_TRAP_VD (1 << 0) /* invalid op trap disabled */ | 10 | #define FPSR_TRAP_VD (1 << 0) /* invalid op trap disabled */ |
13 | #define FPSR_TRAP_DD (1 << 1) /* denormal trap disabled */ | 11 | #define FPSR_TRAP_DD (1 << 1) /* denormal trap disabled */ |
diff --git a/arch/ia64/include/asm/gcc_intrin.h b/arch/ia64/include/asm/gcc_intrin.h index 0f5b55921758..c2c5fd8fcac4 100644 --- a/arch/ia64/include/asm/gcc_intrin.h +++ b/arch/ia64/include/asm/gcc_intrin.h | |||
@@ -6,6 +6,7 @@ | |||
6 | * Copyright (C) 2002,2003 Suresh Siddha <suresh.b.siddha@intel.com> | 6 | * Copyright (C) 2002,2003 Suresh Siddha <suresh.b.siddha@intel.com> |
7 | */ | 7 | */ |
8 | 8 | ||
9 | #include <linux/types.h> | ||
9 | #include <linux/compiler.h> | 10 | #include <linux/compiler.h> |
10 | 11 | ||
11 | /* define this macro to get some asm stmts included in 'c' files */ | 12 | /* define this macro to get some asm stmts included in 'c' files */ |
diff --git a/arch/ia64/include/asm/intrinsics.h b/arch/ia64/include/asm/intrinsics.h index fbe2ad9234d0..111ed5222892 100644 --- a/arch/ia64/include/asm/intrinsics.h +++ b/arch/ia64/include/asm/intrinsics.h | |||
@@ -10,6 +10,7 @@ | |||
10 | 10 | ||
11 | #ifndef __ASSEMBLY__ | 11 | #ifndef __ASSEMBLY__ |
12 | 12 | ||
13 | #include <linux/types.h> | ||
13 | /* include compiler specific intrinsics */ | 14 | /* include compiler specific intrinsics */ |
14 | #include <asm/ia64regs.h> | 15 | #include <asm/ia64regs.h> |
15 | #ifdef __INTEL_COMPILER | 16 | #ifdef __INTEL_COMPILER |
diff --git a/arch/ia64/include/asm/kvm.h b/arch/ia64/include/asm/kvm.h index bfa86b6af7cd..18a7e49abbc5 100644 --- a/arch/ia64/include/asm/kvm.h +++ b/arch/ia64/include/asm/kvm.h | |||
@@ -21,8 +21,7 @@ | |||
21 | * | 21 | * |
22 | */ | 22 | */ |
23 | 23 | ||
24 | #include <asm/types.h> | 24 | #include <linux/types.h> |
25 | |||
26 | #include <linux/ioctl.h> | 25 | #include <linux/ioctl.h> |
27 | 26 | ||
28 | /* Select x86 specific features in <linux/kvm.h> */ | 27 | /* Select x86 specific features in <linux/kvm.h> */ |
@@ -166,7 +165,40 @@ struct saved_vpd { | |||
166 | unsigned long vcpuid[5]; | 165 | unsigned long vcpuid[5]; |
167 | unsigned long vpsr; | 166 | unsigned long vpsr; |
168 | unsigned long vpr; | 167 | unsigned long vpr; |
169 | unsigned long vcr[128]; | 168 | union { |
169 | unsigned long vcr[128]; | ||
170 | struct { | ||
171 | unsigned long dcr; | ||
172 | unsigned long itm; | ||
173 | unsigned long iva; | ||
174 | unsigned long rsv1[5]; | ||
175 | unsigned long pta; | ||
176 | unsigned long rsv2[7]; | ||
177 | unsigned long ipsr; | ||
178 | unsigned long isr; | ||
179 | unsigned long rsv3; | ||
180 | unsigned long iip; | ||
181 | unsigned long ifa; | ||
182 | unsigned long itir; | ||
183 | unsigned long iipa; | ||
184 | unsigned long ifs; | ||
185 | unsigned long iim; | ||
186 | unsigned long iha; | ||
187 | unsigned long rsv4[38]; | ||
188 | unsigned long lid; | ||
189 | unsigned long ivr; | ||
190 | unsigned long tpr; | ||
191 | unsigned long eoi; | ||
192 | unsigned long irr[4]; | ||
193 | unsigned long itv; | ||
194 | unsigned long pmv; | ||
195 | unsigned long cmcv; | ||
196 | unsigned long rsv5[5]; | ||
197 | unsigned long lrr0; | ||
198 | unsigned long lrr1; | ||
199 | unsigned long rsv6[46]; | ||
200 | }; | ||
201 | }; | ||
170 | }; | 202 | }; |
171 | 203 | ||
172 | struct kvm_regs { | 204 | struct kvm_regs { |
@@ -214,4 +246,18 @@ struct kvm_sregs { | |||
214 | struct kvm_fpu { | 246 | struct kvm_fpu { |
215 | }; | 247 | }; |
216 | 248 | ||
249 | #define KVM_IA64_VCPU_STACK_SHIFT 16 | ||
250 | #define KVM_IA64_VCPU_STACK_SIZE (1UL << KVM_IA64_VCPU_STACK_SHIFT) | ||
251 | |||
252 | struct kvm_ia64_vcpu_stack { | ||
253 | unsigned char stack[KVM_IA64_VCPU_STACK_SIZE]; | ||
254 | }; | ||
255 | |||
256 | struct kvm_debug_exit_arch { | ||
257 | }; | ||
258 | |||
259 | /* for KVM_SET_GUEST_DEBUG */ | ||
260 | struct kvm_guest_debug_arch { | ||
261 | }; | ||
262 | |||
217 | #endif | 263 | #endif |
diff --git a/arch/ia64/include/asm/kvm_host.h b/arch/ia64/include/asm/kvm_host.h index 348663661659..4542651e6acb 100644 --- a/arch/ia64/include/asm/kvm_host.h +++ b/arch/ia64/include/asm/kvm_host.h | |||
@@ -112,7 +112,11 @@ | |||
112 | #define VCPU_STRUCT_SHIFT 16 | 112 | #define VCPU_STRUCT_SHIFT 16 |
113 | #define VCPU_STRUCT_SIZE (__IA64_UL_CONST(1) << VCPU_STRUCT_SHIFT) | 113 | #define VCPU_STRUCT_SIZE (__IA64_UL_CONST(1) << VCPU_STRUCT_SHIFT) |
114 | 114 | ||
115 | #define KVM_STK_OFFSET VCPU_STRUCT_SIZE | 115 | /* |
116 | * This must match KVM_IA64_VCPU_STACK_{SHIFT,SIZE} arch/ia64/include/asm/kvm.h | ||
117 | */ | ||
118 | #define KVM_STK_SHIFT 16 | ||
119 | #define KVM_STK_OFFSET (__IA64_UL_CONST(1)<< KVM_STK_SHIFT) | ||
116 | 120 | ||
117 | #define KVM_VM_STRUCT_SHIFT 19 | 121 | #define KVM_VM_STRUCT_SHIFT 19 |
118 | #define KVM_VM_STRUCT_SIZE (__IA64_UL_CONST(1) << KVM_VM_STRUCT_SHIFT) | 122 | #define KVM_VM_STRUCT_SIZE (__IA64_UL_CONST(1) << KVM_VM_STRUCT_SHIFT) |
@@ -153,10 +157,10 @@ struct kvm_vm_data { | |||
153 | struct kvm_vcpu_data vcpu_data[KVM_MAX_VCPUS]; | 157 | struct kvm_vcpu_data vcpu_data[KVM_MAX_VCPUS]; |
154 | }; | 158 | }; |
155 | 159 | ||
156 | #define VCPU_BASE(n) KVM_VM_DATA_BASE + \ | 160 | #define VCPU_BASE(n) (KVM_VM_DATA_BASE + \ |
157 | offsetof(struct kvm_vm_data, vcpu_data[n]) | 161 | offsetof(struct kvm_vm_data, vcpu_data[n])) |
158 | #define VM_BASE KVM_VM_DATA_BASE + \ | 162 | #define KVM_VM_BASE (KVM_VM_DATA_BASE + \ |
159 | offsetof(struct kvm_vm_data, kvm_vm_struct) | 163 | offsetof(struct kvm_vm_data, kvm_vm_struct)) |
160 | #define KVM_MEM_DIRTY_LOG_BASE KVM_VM_DATA_BASE + \ | 164 | #define KVM_MEM_DIRTY_LOG_BASE KVM_VM_DATA_BASE + \ |
161 | offsetof(struct kvm_vm_data, kvm_mem_dirty_log) | 165 | offsetof(struct kvm_vm_data, kvm_mem_dirty_log) |
162 | 166 | ||
@@ -235,8 +239,6 @@ struct kvm_vm_data { | |||
235 | 239 | ||
236 | struct kvm; | 240 | struct kvm; |
237 | struct kvm_vcpu; | 241 | struct kvm_vcpu; |
238 | struct kvm_guest_debug{ | ||
239 | }; | ||
240 | 242 | ||
241 | struct kvm_mmio_req { | 243 | struct kvm_mmio_req { |
242 | uint64_t addr; /* physical address */ | 244 | uint64_t addr; /* physical address */ |
@@ -462,6 +464,8 @@ struct kvm_arch { | |||
462 | unsigned long metaphysical_rr4; | 464 | unsigned long metaphysical_rr4; |
463 | unsigned long vmm_init_rr; | 465 | unsigned long vmm_init_rr; |
464 | 466 | ||
467 | int online_vcpus; | ||
468 | |||
465 | struct kvm_ioapic *vioapic; | 469 | struct kvm_ioapic *vioapic; |
466 | struct kvm_vm_stat stat; | 470 | struct kvm_vm_stat stat; |
467 | struct kvm_sal_data rdv_sal_data; | 471 | struct kvm_sal_data rdv_sal_data; |
diff --git a/arch/ia64/include/asm/machvec.h b/arch/ia64/include/asm/machvec.h index fe87b2121707..367d299d9938 100644 --- a/arch/ia64/include/asm/machvec.h +++ b/arch/ia64/include/asm/machvec.h | |||
@@ -11,7 +11,6 @@ | |||
11 | #define _ASM_IA64_MACHVEC_H | 11 | #define _ASM_IA64_MACHVEC_H |
12 | 12 | ||
13 | #include <linux/types.h> | 13 | #include <linux/types.h> |
14 | #include <linux/swiotlb.h> | ||
15 | 14 | ||
16 | /* forward declarations: */ | 15 | /* forward declarations: */ |
17 | struct device; | 16 | struct device; |
@@ -45,24 +44,8 @@ typedef void ia64_mv_kernel_launch_event_t(void); | |||
45 | 44 | ||
46 | /* DMA-mapping interface: */ | 45 | /* DMA-mapping interface: */ |
47 | typedef void ia64_mv_dma_init (void); | 46 | typedef void ia64_mv_dma_init (void); |
48 | typedef void *ia64_mv_dma_alloc_coherent (struct device *, size_t, dma_addr_t *, gfp_t); | ||
49 | typedef void ia64_mv_dma_free_coherent (struct device *, size_t, void *, dma_addr_t); | ||
50 | typedef dma_addr_t ia64_mv_dma_map_single (struct device *, void *, size_t, int); | ||
51 | typedef void ia64_mv_dma_unmap_single (struct device *, dma_addr_t, size_t, int); | ||
52 | typedef int ia64_mv_dma_map_sg (struct device *, struct scatterlist *, int, int); | ||
53 | typedef void ia64_mv_dma_unmap_sg (struct device *, struct scatterlist *, int, int); | ||
54 | typedef void ia64_mv_dma_sync_single_for_cpu (struct device *, dma_addr_t, size_t, int); | ||
55 | typedef void ia64_mv_dma_sync_sg_for_cpu (struct device *, struct scatterlist *, int, int); | ||
56 | typedef void ia64_mv_dma_sync_single_for_device (struct device *, dma_addr_t, size_t, int); | ||
57 | typedef void ia64_mv_dma_sync_sg_for_device (struct device *, struct scatterlist *, int, int); | ||
58 | typedef int ia64_mv_dma_mapping_error(struct device *, dma_addr_t dma_addr); | ||
59 | typedef int ia64_mv_dma_supported (struct device *, u64); | ||
60 | |||
61 | typedef dma_addr_t ia64_mv_dma_map_single_attrs (struct device *, void *, size_t, int, struct dma_attrs *); | ||
62 | typedef void ia64_mv_dma_unmap_single_attrs (struct device *, dma_addr_t, size_t, int, struct dma_attrs *); | ||
63 | typedef int ia64_mv_dma_map_sg_attrs (struct device *, struct scatterlist *, int, int, struct dma_attrs *); | ||
64 | typedef void ia64_mv_dma_unmap_sg_attrs (struct device *, struct scatterlist *, int, int, struct dma_attrs *); | ||
65 | typedef u64 ia64_mv_dma_get_required_mask (struct device *); | 47 | typedef u64 ia64_mv_dma_get_required_mask (struct device *); |
48 | typedef struct dma_map_ops *ia64_mv_dma_get_ops(struct device *); | ||
66 | 49 | ||
67 | /* | 50 | /* |
68 | * WARNING: The legacy I/O space is _architected_. Platforms are | 51 | * WARNING: The legacy I/O space is _architected_. Platforms are |
@@ -114,8 +97,6 @@ machvec_noop_bus (struct pci_bus *bus) | |||
114 | 97 | ||
115 | extern void machvec_setup (char **); | 98 | extern void machvec_setup (char **); |
116 | extern void machvec_timer_interrupt (int, void *); | 99 | extern void machvec_timer_interrupt (int, void *); |
117 | extern void machvec_dma_sync_single (struct device *, dma_addr_t, size_t, int); | ||
118 | extern void machvec_dma_sync_sg (struct device *, struct scatterlist *, int, int); | ||
119 | extern void machvec_tlb_migrate_finish (struct mm_struct *); | 100 | extern void machvec_tlb_migrate_finish (struct mm_struct *); |
120 | 101 | ||
121 | # if defined (CONFIG_IA64_HP_SIM) | 102 | # if defined (CONFIG_IA64_HP_SIM) |
@@ -148,19 +129,8 @@ extern void machvec_tlb_migrate_finish (struct mm_struct *); | |||
148 | # define platform_global_tlb_purge ia64_mv.global_tlb_purge | 129 | # define platform_global_tlb_purge ia64_mv.global_tlb_purge |
149 | # define platform_tlb_migrate_finish ia64_mv.tlb_migrate_finish | 130 | # define platform_tlb_migrate_finish ia64_mv.tlb_migrate_finish |
150 | # define platform_dma_init ia64_mv.dma_init | 131 | # define platform_dma_init ia64_mv.dma_init |
151 | # define platform_dma_alloc_coherent ia64_mv.dma_alloc_coherent | ||
152 | # define platform_dma_free_coherent ia64_mv.dma_free_coherent | ||
153 | # define platform_dma_map_single_attrs ia64_mv.dma_map_single_attrs | ||
154 | # define platform_dma_unmap_single_attrs ia64_mv.dma_unmap_single_attrs | ||
155 | # define platform_dma_map_sg_attrs ia64_mv.dma_map_sg_attrs | ||
156 | # define platform_dma_unmap_sg_attrs ia64_mv.dma_unmap_sg_attrs | ||
157 | # define platform_dma_sync_single_for_cpu ia64_mv.dma_sync_single_for_cpu | ||
158 | # define platform_dma_sync_sg_for_cpu ia64_mv.dma_sync_sg_for_cpu | ||
159 | # define platform_dma_sync_single_for_device ia64_mv.dma_sync_single_for_device | ||
160 | # define platform_dma_sync_sg_for_device ia64_mv.dma_sync_sg_for_device | ||
161 | # define platform_dma_mapping_error ia64_mv.dma_mapping_error | ||
162 | # define platform_dma_supported ia64_mv.dma_supported | ||
163 | # define platform_dma_get_required_mask ia64_mv.dma_get_required_mask | 132 | # define platform_dma_get_required_mask ia64_mv.dma_get_required_mask |
133 | # define platform_dma_get_ops ia64_mv.dma_get_ops | ||
164 | # define platform_irq_to_vector ia64_mv.irq_to_vector | 134 | # define platform_irq_to_vector ia64_mv.irq_to_vector |
165 | # define platform_local_vector_to_irq ia64_mv.local_vector_to_irq | 135 | # define platform_local_vector_to_irq ia64_mv.local_vector_to_irq |
166 | # define platform_pci_get_legacy_mem ia64_mv.pci_get_legacy_mem | 136 | # define platform_pci_get_legacy_mem ia64_mv.pci_get_legacy_mem |
@@ -203,19 +173,8 @@ struct ia64_machine_vector { | |||
203 | ia64_mv_global_tlb_purge_t *global_tlb_purge; | 173 | ia64_mv_global_tlb_purge_t *global_tlb_purge; |
204 | ia64_mv_tlb_migrate_finish_t *tlb_migrate_finish; | 174 | ia64_mv_tlb_migrate_finish_t *tlb_migrate_finish; |
205 | ia64_mv_dma_init *dma_init; | 175 | ia64_mv_dma_init *dma_init; |
206 | ia64_mv_dma_alloc_coherent *dma_alloc_coherent; | ||
207 | ia64_mv_dma_free_coherent *dma_free_coherent; | ||
208 | ia64_mv_dma_map_single_attrs *dma_map_single_attrs; | ||
209 | ia64_mv_dma_unmap_single_attrs *dma_unmap_single_attrs; | ||
210 | ia64_mv_dma_map_sg_attrs *dma_map_sg_attrs; | ||
211 | ia64_mv_dma_unmap_sg_attrs *dma_unmap_sg_attrs; | ||
212 | ia64_mv_dma_sync_single_for_cpu *dma_sync_single_for_cpu; | ||
213 | ia64_mv_dma_sync_sg_for_cpu *dma_sync_sg_for_cpu; | ||
214 | ia64_mv_dma_sync_single_for_device *dma_sync_single_for_device; | ||
215 | ia64_mv_dma_sync_sg_for_device *dma_sync_sg_for_device; | ||
216 | ia64_mv_dma_mapping_error *dma_mapping_error; | ||
217 | ia64_mv_dma_supported *dma_supported; | ||
218 | ia64_mv_dma_get_required_mask *dma_get_required_mask; | 176 | ia64_mv_dma_get_required_mask *dma_get_required_mask; |
177 | ia64_mv_dma_get_ops *dma_get_ops; | ||
219 | ia64_mv_irq_to_vector *irq_to_vector; | 178 | ia64_mv_irq_to_vector *irq_to_vector; |
220 | ia64_mv_local_vector_to_irq *local_vector_to_irq; | 179 | ia64_mv_local_vector_to_irq *local_vector_to_irq; |
221 | ia64_mv_pci_get_legacy_mem_t *pci_get_legacy_mem; | 180 | ia64_mv_pci_get_legacy_mem_t *pci_get_legacy_mem; |
@@ -254,19 +213,8 @@ struct ia64_machine_vector { | |||
254 | platform_global_tlb_purge, \ | 213 | platform_global_tlb_purge, \ |
255 | platform_tlb_migrate_finish, \ | 214 | platform_tlb_migrate_finish, \ |
256 | platform_dma_init, \ | 215 | platform_dma_init, \ |
257 | platform_dma_alloc_coherent, \ | ||
258 | platform_dma_free_coherent, \ | ||
259 | platform_dma_map_single_attrs, \ | ||
260 | platform_dma_unmap_single_attrs, \ | ||
261 | platform_dma_map_sg_attrs, \ | ||
262 | platform_dma_unmap_sg_attrs, \ | ||
263 | platform_dma_sync_single_for_cpu, \ | ||
264 | platform_dma_sync_sg_for_cpu, \ | ||
265 | platform_dma_sync_single_for_device, \ | ||
266 | platform_dma_sync_sg_for_device, \ | ||
267 | platform_dma_mapping_error, \ | ||
268 | platform_dma_supported, \ | ||
269 | platform_dma_get_required_mask, \ | 216 | platform_dma_get_required_mask, \ |
217 | platform_dma_get_ops, \ | ||
270 | platform_irq_to_vector, \ | 218 | platform_irq_to_vector, \ |
271 | platform_local_vector_to_irq, \ | 219 | platform_local_vector_to_irq, \ |
272 | platform_pci_get_legacy_mem, \ | 220 | platform_pci_get_legacy_mem, \ |
@@ -302,6 +250,9 @@ extern void machvec_init_from_cmdline(const char *cmdline); | |||
302 | # error Unknown configuration. Update arch/ia64/include/asm/machvec.h. | 250 | # error Unknown configuration. Update arch/ia64/include/asm/machvec.h. |
303 | # endif /* CONFIG_IA64_GENERIC */ | 251 | # endif /* CONFIG_IA64_GENERIC */ |
304 | 252 | ||
253 | extern void swiotlb_dma_init(void); | ||
254 | extern struct dma_map_ops *dma_get_ops(struct device *); | ||
255 | |||
305 | /* | 256 | /* |
306 | * Define default versions so we can extend machvec for new platforms without having | 257 | * Define default versions so we can extend machvec for new platforms without having |
307 | * to update the machvec files for all existing platforms. | 258 | * to update the machvec files for all existing platforms. |
@@ -332,43 +283,10 @@ extern void machvec_init_from_cmdline(const char *cmdline); | |||
332 | # define platform_kernel_launch_event machvec_noop | 283 | # define platform_kernel_launch_event machvec_noop |
333 | #endif | 284 | #endif |
334 | #ifndef platform_dma_init | 285 | #ifndef platform_dma_init |
335 | # define platform_dma_init swiotlb_init | 286 | # define platform_dma_init swiotlb_dma_init |
336 | #endif | ||
337 | #ifndef platform_dma_alloc_coherent | ||
338 | # define platform_dma_alloc_coherent swiotlb_alloc_coherent | ||
339 | #endif | ||
340 | #ifndef platform_dma_free_coherent | ||
341 | # define platform_dma_free_coherent swiotlb_free_coherent | ||
342 | #endif | ||
343 | #ifndef platform_dma_map_single_attrs | ||
344 | # define platform_dma_map_single_attrs swiotlb_map_single_attrs | ||
345 | #endif | ||
346 | #ifndef platform_dma_unmap_single_attrs | ||
347 | # define platform_dma_unmap_single_attrs swiotlb_unmap_single_attrs | ||
348 | #endif | ||
349 | #ifndef platform_dma_map_sg_attrs | ||
350 | # define platform_dma_map_sg_attrs swiotlb_map_sg_attrs | ||
351 | #endif | ||
352 | #ifndef platform_dma_unmap_sg_attrs | ||
353 | # define platform_dma_unmap_sg_attrs swiotlb_unmap_sg_attrs | ||
354 | #endif | ||
355 | #ifndef platform_dma_sync_single_for_cpu | ||
356 | # define platform_dma_sync_single_for_cpu swiotlb_sync_single_for_cpu | ||
357 | #endif | ||
358 | #ifndef platform_dma_sync_sg_for_cpu | ||
359 | # define platform_dma_sync_sg_for_cpu swiotlb_sync_sg_for_cpu | ||
360 | #endif | ||
361 | #ifndef platform_dma_sync_single_for_device | ||
362 | # define platform_dma_sync_single_for_device swiotlb_sync_single_for_device | ||
363 | #endif | ||
364 | #ifndef platform_dma_sync_sg_for_device | ||
365 | # define platform_dma_sync_sg_for_device swiotlb_sync_sg_for_device | ||
366 | #endif | ||
367 | #ifndef platform_dma_mapping_error | ||
368 | # define platform_dma_mapping_error swiotlb_dma_mapping_error | ||
369 | #endif | 287 | #endif |
370 | #ifndef platform_dma_supported | 288 | #ifndef platform_dma_get_ops |
371 | # define platform_dma_supported swiotlb_dma_supported | 289 | # define platform_dma_get_ops dma_get_ops |
372 | #endif | 290 | #endif |
373 | #ifndef platform_dma_get_required_mask | 291 | #ifndef platform_dma_get_required_mask |
374 | # define platform_dma_get_required_mask ia64_dma_get_required_mask | 292 | # define platform_dma_get_required_mask ia64_dma_get_required_mask |
diff --git a/arch/ia64/include/asm/machvec_dig_vtd.h b/arch/ia64/include/asm/machvec_dig_vtd.h index 3400b561e711..6ab1de5c45ef 100644 --- a/arch/ia64/include/asm/machvec_dig_vtd.h +++ b/arch/ia64/include/asm/machvec_dig_vtd.h | |||
@@ -2,14 +2,6 @@ | |||
2 | #define _ASM_IA64_MACHVEC_DIG_VTD_h | 2 | #define _ASM_IA64_MACHVEC_DIG_VTD_h |
3 | 3 | ||
4 | extern ia64_mv_setup_t dig_setup; | 4 | extern ia64_mv_setup_t dig_setup; |
5 | extern ia64_mv_dma_alloc_coherent vtd_alloc_coherent; | ||
6 | extern ia64_mv_dma_free_coherent vtd_free_coherent; | ||
7 | extern ia64_mv_dma_map_single_attrs vtd_map_single_attrs; | ||
8 | extern ia64_mv_dma_unmap_single_attrs vtd_unmap_single_attrs; | ||
9 | extern ia64_mv_dma_map_sg_attrs vtd_map_sg_attrs; | ||
10 | extern ia64_mv_dma_unmap_sg_attrs vtd_unmap_sg_attrs; | ||
11 | extern ia64_mv_dma_supported iommu_dma_supported; | ||
12 | extern ia64_mv_dma_mapping_error vtd_dma_mapping_error; | ||
13 | extern ia64_mv_dma_init pci_iommu_alloc; | 5 | extern ia64_mv_dma_init pci_iommu_alloc; |
14 | 6 | ||
15 | /* | 7 | /* |
@@ -22,17 +14,5 @@ extern ia64_mv_dma_init pci_iommu_alloc; | |||
22 | #define platform_name "dig_vtd" | 14 | #define platform_name "dig_vtd" |
23 | #define platform_setup dig_setup | 15 | #define platform_setup dig_setup |
24 | #define platform_dma_init pci_iommu_alloc | 16 | #define platform_dma_init pci_iommu_alloc |
25 | #define platform_dma_alloc_coherent vtd_alloc_coherent | ||
26 | #define platform_dma_free_coherent vtd_free_coherent | ||
27 | #define platform_dma_map_single_attrs vtd_map_single_attrs | ||
28 | #define platform_dma_unmap_single_attrs vtd_unmap_single_attrs | ||
29 | #define platform_dma_map_sg_attrs vtd_map_sg_attrs | ||
30 | #define platform_dma_unmap_sg_attrs vtd_unmap_sg_attrs | ||
31 | #define platform_dma_sync_single_for_cpu machvec_dma_sync_single | ||
32 | #define platform_dma_sync_sg_for_cpu machvec_dma_sync_sg | ||
33 | #define platform_dma_sync_single_for_device machvec_dma_sync_single | ||
34 | #define platform_dma_sync_sg_for_device machvec_dma_sync_sg | ||
35 | #define platform_dma_supported iommu_dma_supported | ||
36 | #define platform_dma_mapping_error vtd_dma_mapping_error | ||
37 | 17 | ||
38 | #endif /* _ASM_IA64_MACHVEC_DIG_VTD_h */ | 18 | #endif /* _ASM_IA64_MACHVEC_DIG_VTD_h */ |
diff --git a/arch/ia64/include/asm/machvec_hpzx1.h b/arch/ia64/include/asm/machvec_hpzx1.h index 2f57f5144b9f..3bd83d78a412 100644 --- a/arch/ia64/include/asm/machvec_hpzx1.h +++ b/arch/ia64/include/asm/machvec_hpzx1.h | |||
@@ -2,14 +2,7 @@ | |||
2 | #define _ASM_IA64_MACHVEC_HPZX1_h | 2 | #define _ASM_IA64_MACHVEC_HPZX1_h |
3 | 3 | ||
4 | extern ia64_mv_setup_t dig_setup; | 4 | extern ia64_mv_setup_t dig_setup; |
5 | extern ia64_mv_dma_alloc_coherent sba_alloc_coherent; | 5 | extern ia64_mv_dma_init sba_dma_init; |
6 | extern ia64_mv_dma_free_coherent sba_free_coherent; | ||
7 | extern ia64_mv_dma_map_single_attrs sba_map_single_attrs; | ||
8 | extern ia64_mv_dma_unmap_single_attrs sba_unmap_single_attrs; | ||
9 | extern ia64_mv_dma_map_sg_attrs sba_map_sg_attrs; | ||
10 | extern ia64_mv_dma_unmap_sg_attrs sba_unmap_sg_attrs; | ||
11 | extern ia64_mv_dma_supported sba_dma_supported; | ||
12 | extern ia64_mv_dma_mapping_error sba_dma_mapping_error; | ||
13 | 6 | ||
14 | /* | 7 | /* |
15 | * This stuff has dual use! | 8 | * This stuff has dual use! |
@@ -20,18 +13,6 @@ extern ia64_mv_dma_mapping_error sba_dma_mapping_error; | |||
20 | */ | 13 | */ |
21 | #define platform_name "hpzx1" | 14 | #define platform_name "hpzx1" |
22 | #define platform_setup dig_setup | 15 | #define platform_setup dig_setup |
23 | #define platform_dma_init machvec_noop | 16 | #define platform_dma_init sba_dma_init |
24 | #define platform_dma_alloc_coherent sba_alloc_coherent | ||
25 | #define platform_dma_free_coherent sba_free_coherent | ||
26 | #define platform_dma_map_single_attrs sba_map_single_attrs | ||
27 | #define platform_dma_unmap_single_attrs sba_unmap_single_attrs | ||
28 | #define platform_dma_map_sg_attrs sba_map_sg_attrs | ||
29 | #define platform_dma_unmap_sg_attrs sba_unmap_sg_attrs | ||
30 | #define platform_dma_sync_single_for_cpu machvec_dma_sync_single | ||
31 | #define platform_dma_sync_sg_for_cpu machvec_dma_sync_sg | ||
32 | #define platform_dma_sync_single_for_device machvec_dma_sync_single | ||
33 | #define platform_dma_sync_sg_for_device machvec_dma_sync_sg | ||
34 | #define platform_dma_supported sba_dma_supported | ||
35 | #define platform_dma_mapping_error sba_dma_mapping_error | ||
36 | 17 | ||
37 | #endif /* _ASM_IA64_MACHVEC_HPZX1_h */ | 18 | #endif /* _ASM_IA64_MACHVEC_HPZX1_h */ |
diff --git a/arch/ia64/include/asm/machvec_hpzx1_swiotlb.h b/arch/ia64/include/asm/machvec_hpzx1_swiotlb.h index a842cdda827b..1091ac39740c 100644 --- a/arch/ia64/include/asm/machvec_hpzx1_swiotlb.h +++ b/arch/ia64/include/asm/machvec_hpzx1_swiotlb.h | |||
@@ -2,18 +2,7 @@ | |||
2 | #define _ASM_IA64_MACHVEC_HPZX1_SWIOTLB_h | 2 | #define _ASM_IA64_MACHVEC_HPZX1_SWIOTLB_h |
3 | 3 | ||
4 | extern ia64_mv_setup_t dig_setup; | 4 | extern ia64_mv_setup_t dig_setup; |
5 | extern ia64_mv_dma_alloc_coherent hwsw_alloc_coherent; | 5 | extern ia64_mv_dma_get_ops hwsw_dma_get_ops; |
6 | extern ia64_mv_dma_free_coherent hwsw_free_coherent; | ||
7 | extern ia64_mv_dma_map_single_attrs hwsw_map_single_attrs; | ||
8 | extern ia64_mv_dma_unmap_single_attrs hwsw_unmap_single_attrs; | ||
9 | extern ia64_mv_dma_map_sg_attrs hwsw_map_sg_attrs; | ||
10 | extern ia64_mv_dma_unmap_sg_attrs hwsw_unmap_sg_attrs; | ||
11 | extern ia64_mv_dma_supported hwsw_dma_supported; | ||
12 | extern ia64_mv_dma_mapping_error hwsw_dma_mapping_error; | ||
13 | extern ia64_mv_dma_sync_single_for_cpu hwsw_sync_single_for_cpu; | ||
14 | extern ia64_mv_dma_sync_sg_for_cpu hwsw_sync_sg_for_cpu; | ||
15 | extern ia64_mv_dma_sync_single_for_device hwsw_sync_single_for_device; | ||
16 | extern ia64_mv_dma_sync_sg_for_device hwsw_sync_sg_for_device; | ||
17 | 6 | ||
18 | /* | 7 | /* |
19 | * This stuff has dual use! | 8 | * This stuff has dual use! |
@@ -23,20 +12,8 @@ extern ia64_mv_dma_sync_sg_for_device hwsw_sync_sg_for_device; | |||
23 | * the macros are used directly. | 12 | * the macros are used directly. |
24 | */ | 13 | */ |
25 | #define platform_name "hpzx1_swiotlb" | 14 | #define platform_name "hpzx1_swiotlb" |
26 | |||
27 | #define platform_setup dig_setup | 15 | #define platform_setup dig_setup |
28 | #define platform_dma_init machvec_noop | 16 | #define platform_dma_init machvec_noop |
29 | #define platform_dma_alloc_coherent hwsw_alloc_coherent | 17 | #define platform_dma_get_ops hwsw_dma_get_ops |
30 | #define platform_dma_free_coherent hwsw_free_coherent | ||
31 | #define platform_dma_map_single_attrs hwsw_map_single_attrs | ||
32 | #define platform_dma_unmap_single_attrs hwsw_unmap_single_attrs | ||
33 | #define platform_dma_map_sg_attrs hwsw_map_sg_attrs | ||
34 | #define platform_dma_unmap_sg_attrs hwsw_unmap_sg_attrs | ||
35 | #define platform_dma_supported hwsw_dma_supported | ||
36 | #define platform_dma_mapping_error hwsw_dma_mapping_error | ||
37 | #define platform_dma_sync_single_for_cpu hwsw_sync_single_for_cpu | ||
38 | #define platform_dma_sync_sg_for_cpu hwsw_sync_sg_for_cpu | ||
39 | #define platform_dma_sync_single_for_device hwsw_sync_single_for_device | ||
40 | #define platform_dma_sync_sg_for_device hwsw_sync_sg_for_device | ||
41 | 18 | ||
42 | #endif /* _ASM_IA64_MACHVEC_HPZX1_SWIOTLB_h */ | 19 | #endif /* _ASM_IA64_MACHVEC_HPZX1_SWIOTLB_h */ |
diff --git a/arch/ia64/include/asm/machvec_sn2.h b/arch/ia64/include/asm/machvec_sn2.h index f1a6e0d6dfa5..f061a30aac42 100644 --- a/arch/ia64/include/asm/machvec_sn2.h +++ b/arch/ia64/include/asm/machvec_sn2.h | |||
@@ -55,19 +55,8 @@ extern ia64_mv_readb_t __sn_readb_relaxed; | |||
55 | extern ia64_mv_readw_t __sn_readw_relaxed; | 55 | extern ia64_mv_readw_t __sn_readw_relaxed; |
56 | extern ia64_mv_readl_t __sn_readl_relaxed; | 56 | extern ia64_mv_readl_t __sn_readl_relaxed; |
57 | extern ia64_mv_readq_t __sn_readq_relaxed; | 57 | extern ia64_mv_readq_t __sn_readq_relaxed; |
58 | extern ia64_mv_dma_alloc_coherent sn_dma_alloc_coherent; | ||
59 | extern ia64_mv_dma_free_coherent sn_dma_free_coherent; | ||
60 | extern ia64_mv_dma_map_single_attrs sn_dma_map_single_attrs; | ||
61 | extern ia64_mv_dma_unmap_single_attrs sn_dma_unmap_single_attrs; | ||
62 | extern ia64_mv_dma_map_sg_attrs sn_dma_map_sg_attrs; | ||
63 | extern ia64_mv_dma_unmap_sg_attrs sn_dma_unmap_sg_attrs; | ||
64 | extern ia64_mv_dma_sync_single_for_cpu sn_dma_sync_single_for_cpu; | ||
65 | extern ia64_mv_dma_sync_sg_for_cpu sn_dma_sync_sg_for_cpu; | ||
66 | extern ia64_mv_dma_sync_single_for_device sn_dma_sync_single_for_device; | ||
67 | extern ia64_mv_dma_sync_sg_for_device sn_dma_sync_sg_for_device; | ||
68 | extern ia64_mv_dma_mapping_error sn_dma_mapping_error; | ||
69 | extern ia64_mv_dma_supported sn_dma_supported; | ||
70 | extern ia64_mv_dma_get_required_mask sn_dma_get_required_mask; | 58 | extern ia64_mv_dma_get_required_mask sn_dma_get_required_mask; |
59 | extern ia64_mv_dma_init sn_dma_init; | ||
71 | extern ia64_mv_migrate_t sn_migrate; | 60 | extern ia64_mv_migrate_t sn_migrate; |
72 | extern ia64_mv_kernel_launch_event_t sn_kernel_launch_event; | 61 | extern ia64_mv_kernel_launch_event_t sn_kernel_launch_event; |
73 | extern ia64_mv_setup_msi_irq_t sn_setup_msi_irq; | 62 | extern ia64_mv_setup_msi_irq_t sn_setup_msi_irq; |
@@ -111,20 +100,8 @@ extern ia64_mv_pci_fixup_bus_t sn_pci_fixup_bus; | |||
111 | #define platform_pci_get_legacy_mem sn_pci_get_legacy_mem | 100 | #define platform_pci_get_legacy_mem sn_pci_get_legacy_mem |
112 | #define platform_pci_legacy_read sn_pci_legacy_read | 101 | #define platform_pci_legacy_read sn_pci_legacy_read |
113 | #define platform_pci_legacy_write sn_pci_legacy_write | 102 | #define platform_pci_legacy_write sn_pci_legacy_write |
114 | #define platform_dma_init machvec_noop | ||
115 | #define platform_dma_alloc_coherent sn_dma_alloc_coherent | ||
116 | #define platform_dma_free_coherent sn_dma_free_coherent | ||
117 | #define platform_dma_map_single_attrs sn_dma_map_single_attrs | ||
118 | #define platform_dma_unmap_single_attrs sn_dma_unmap_single_attrs | ||
119 | #define platform_dma_map_sg_attrs sn_dma_map_sg_attrs | ||
120 | #define platform_dma_unmap_sg_attrs sn_dma_unmap_sg_attrs | ||
121 | #define platform_dma_sync_single_for_cpu sn_dma_sync_single_for_cpu | ||
122 | #define platform_dma_sync_sg_for_cpu sn_dma_sync_sg_for_cpu | ||
123 | #define platform_dma_sync_single_for_device sn_dma_sync_single_for_device | ||
124 | #define platform_dma_sync_sg_for_device sn_dma_sync_sg_for_device | ||
125 | #define platform_dma_mapping_error sn_dma_mapping_error | ||
126 | #define platform_dma_supported sn_dma_supported | ||
127 | #define platform_dma_get_required_mask sn_dma_get_required_mask | 103 | #define platform_dma_get_required_mask sn_dma_get_required_mask |
104 | #define platform_dma_init sn_dma_init | ||
128 | #define platform_migrate sn_migrate | 105 | #define platform_migrate sn_migrate |
129 | #define platform_kernel_launch_event sn_kernel_launch_event | 106 | #define platform_kernel_launch_event sn_kernel_launch_event |
130 | #ifdef CONFIG_PCI_MSI | 107 | #ifdef CONFIG_PCI_MSI |
diff --git a/arch/ia64/include/asm/mmu_context.h b/arch/ia64/include/asm/mmu_context.h index 040bc87db930..7f2a456603cb 100644 --- a/arch/ia64/include/asm/mmu_context.h +++ b/arch/ia64/include/asm/mmu_context.h | |||
@@ -87,7 +87,7 @@ get_mmu_context (struct mm_struct *mm) | |||
87 | /* re-check, now that we've got the lock: */ | 87 | /* re-check, now that we've got the lock: */ |
88 | context = mm->context; | 88 | context = mm->context; |
89 | if (context == 0) { | 89 | if (context == 0) { |
90 | cpus_clear(mm->cpu_vm_mask); | 90 | cpumask_clear(mm_cpumask(mm)); |
91 | if (ia64_ctx.next >= ia64_ctx.limit) { | 91 | if (ia64_ctx.next >= ia64_ctx.limit) { |
92 | ia64_ctx.next = find_next_zero_bit(ia64_ctx.bitmap, | 92 | ia64_ctx.next = find_next_zero_bit(ia64_ctx.bitmap, |
93 | ia64_ctx.max_ctx, ia64_ctx.next); | 93 | ia64_ctx.max_ctx, ia64_ctx.next); |
@@ -166,8 +166,8 @@ activate_context (struct mm_struct *mm) | |||
166 | 166 | ||
167 | do { | 167 | do { |
168 | context = get_mmu_context(mm); | 168 | context = get_mmu_context(mm); |
169 | if (!cpu_isset(smp_processor_id(), mm->cpu_vm_mask)) | 169 | if (!cpumask_test_cpu(smp_processor_id(), mm_cpumask(mm))) |
170 | cpu_set(smp_processor_id(), mm->cpu_vm_mask); | 170 | cpumask_set_cpu(smp_processor_id(), mm_cpumask(mm)); |
171 | reload_context(context); | 171 | reload_context(context); |
172 | /* | 172 | /* |
173 | * in the unlikely event of a TLB-flush by another thread, | 173 | * in the unlikely event of a TLB-flush by another thread, |
diff --git a/arch/ia64/include/asm/msidef.h b/arch/ia64/include/asm/msidef.h new file mode 100644 index 000000000000..592c1047a0c5 --- /dev/null +++ b/arch/ia64/include/asm/msidef.h | |||
@@ -0,0 +1,42 @@ | |||
1 | #ifndef _IA64_MSI_DEF_H | ||
2 | #define _IA64_MSI_DEF_H | ||
3 | |||
4 | /* | ||
5 | * Shifts for APIC-based data | ||
6 | */ | ||
7 | |||
8 | #define MSI_DATA_VECTOR_SHIFT 0 | ||
9 | #define MSI_DATA_VECTOR(v) (((u8)v) << MSI_DATA_VECTOR_SHIFT) | ||
10 | #define MSI_DATA_VECTOR_MASK 0xffffff00 | ||
11 | |||
12 | #define MSI_DATA_DELIVERY_MODE_SHIFT 8 | ||
13 | #define MSI_DATA_DELIVERY_FIXED (0 << MSI_DATA_DELIVERY_MODE_SHIFT) | ||
14 | #define MSI_DATA_DELIVERY_LOWPRI (1 << MSI_DATA_DELIVERY_MODE_SHIFT) | ||
15 | |||
16 | #define MSI_DATA_LEVEL_SHIFT 14 | ||
17 | #define MSI_DATA_LEVEL_DEASSERT (0 << MSI_DATA_LEVEL_SHIFT) | ||
18 | #define MSI_DATA_LEVEL_ASSERT (1 << MSI_DATA_LEVEL_SHIFT) | ||
19 | |||
20 | #define MSI_DATA_TRIGGER_SHIFT 15 | ||
21 | #define MSI_DATA_TRIGGER_EDGE (0 << MSI_DATA_TRIGGER_SHIFT) | ||
22 | #define MSI_DATA_TRIGGER_LEVEL (1 << MSI_DATA_TRIGGER_SHIFT) | ||
23 | |||
24 | /* | ||
25 | * Shift/mask fields for APIC-based bus address | ||
26 | */ | ||
27 | |||
28 | #define MSI_ADDR_DEST_ID_SHIFT 4 | ||
29 | #define MSI_ADDR_HEADER 0xfee00000 | ||
30 | |||
31 | #define MSI_ADDR_DEST_ID_MASK 0xfff0000f | ||
32 | #define MSI_ADDR_DEST_ID_CPU(cpu) ((cpu) << MSI_ADDR_DEST_ID_SHIFT) | ||
33 | |||
34 | #define MSI_ADDR_DEST_MODE_SHIFT 2 | ||
35 | #define MSI_ADDR_DEST_MODE_PHYS (0 << MSI_ADDR_DEST_MODE_SHIFT) | ||
36 | #define MSI_ADDR_DEST_MODE_LOGIC (1 << MSI_ADDR_DEST_MODE_SHIFT) | ||
37 | |||
38 | #define MSI_ADDR_REDIRECTION_SHIFT 3 | ||
39 | #define MSI_ADDR_REDIRECTION_CPU (0 << MSI_ADDR_REDIRECTION_SHIFT) | ||
40 | #define MSI_ADDR_REDIRECTION_LOWPRI (1 << MSI_ADDR_REDIRECTION_SHIFT) | ||
41 | |||
42 | #endif/* _IA64_MSI_DEF_H */ | ||
diff --git a/arch/ia64/include/asm/percpu.h b/arch/ia64/include/asm/percpu.h index 77f30b664b4e..30cf46534dd2 100644 --- a/arch/ia64/include/asm/percpu.h +++ b/arch/ia64/include/asm/percpu.h | |||
@@ -27,12 +27,12 @@ extern void *per_cpu_init(void); | |||
27 | 27 | ||
28 | #else /* ! SMP */ | 28 | #else /* ! SMP */ |
29 | 29 | ||
30 | #define PER_CPU_ATTRIBUTES __attribute__((__section__(".data.percpu"))) | ||
31 | |||
32 | #define per_cpu_init() (__phys_per_cpu_start) | 30 | #define per_cpu_init() (__phys_per_cpu_start) |
33 | 31 | ||
34 | #endif /* SMP */ | 32 | #endif /* SMP */ |
35 | 33 | ||
34 | #define PER_CPU_BASE_SECTION ".data.percpu" | ||
35 | |||
36 | /* | 36 | /* |
37 | * Be extremely careful when taking the address of this variable! Due to virtual | 37 | * Be extremely careful when taking the address of this variable! Due to virtual |
38 | * remapping, it is different from the canonical address returned by __get_cpu_var(var)! | 38 | * remapping, it is different from the canonical address returned by __get_cpu_var(var)! |
diff --git a/arch/ia64/include/asm/smp.h b/arch/ia64/include/asm/smp.h index 21c402365d0e..598408336251 100644 --- a/arch/ia64/include/asm/smp.h +++ b/arch/ia64/include/asm/smp.h | |||
@@ -126,7 +126,8 @@ extern void identify_siblings (struct cpuinfo_ia64 *); | |||
126 | extern int is_multithreading_enabled(void); | 126 | extern int is_multithreading_enabled(void); |
127 | 127 | ||
128 | extern void arch_send_call_function_single_ipi(int cpu); | 128 | extern void arch_send_call_function_single_ipi(int cpu); |
129 | extern void arch_send_call_function_ipi(cpumask_t mask); | 129 | extern void arch_send_call_function_ipi_mask(const struct cpumask *mask); |
130 | #define arch_send_call_function_ipi_mask arch_send_call_function_ipi_mask | ||
130 | 131 | ||
131 | #else /* CONFIG_SMP */ | 132 | #else /* CONFIG_SMP */ |
132 | 133 | ||
diff --git a/arch/ia64/include/asm/socket.h b/arch/ia64/include/asm/socket.h index d5ef0aa3e312..745421225ec6 100644 --- a/arch/ia64/include/asm/socket.h +++ b/arch/ia64/include/asm/socket.h | |||
@@ -63,4 +63,7 @@ | |||
63 | 63 | ||
64 | #define SO_MARK 36 | 64 | #define SO_MARK 36 |
65 | 65 | ||
66 | #define SO_TIMESTAMPING 37 | ||
67 | #define SCM_TIMESTAMPING SO_TIMESTAMPING | ||
68 | |||
66 | #endif /* _ASM_IA64_SOCKET_H */ | 69 | #endif /* _ASM_IA64_SOCKET_H */ |
diff --git a/arch/ia64/include/asm/swab.h b/arch/ia64/include/asm/swab.h index 6aa58b699eea..c89a8cb5d8a5 100644 --- a/arch/ia64/include/asm/swab.h +++ b/arch/ia64/include/asm/swab.h | |||
@@ -6,7 +6,7 @@ | |||
6 | * David Mosberger-Tang <davidm@hpl.hp.com>, Hewlett-Packard Co. | 6 | * David Mosberger-Tang <davidm@hpl.hp.com>, Hewlett-Packard Co. |
7 | */ | 7 | */ |
8 | 8 | ||
9 | #include <asm/types.h> | 9 | #include <linux/types.h> |
10 | #include <asm/intrinsics.h> | 10 | #include <asm/intrinsics.h> |
11 | #include <linux/compiler.h> | 11 | #include <linux/compiler.h> |
12 | 12 | ||
diff --git a/arch/ia64/include/asm/topology.h b/arch/ia64/include/asm/topology.h index 32f3af1641c5..7b4c8c70b2d1 100644 --- a/arch/ia64/include/asm/topology.h +++ b/arch/ia64/include/asm/topology.h | |||
@@ -44,11 +44,6 @@ | |||
44 | #define parent_node(nid) (nid) | 44 | #define parent_node(nid) (nid) |
45 | 45 | ||
46 | /* | 46 | /* |
47 | * Returns the number of the first CPU on Node 'node'. | ||
48 | */ | ||
49 | #define node_to_first_cpu(node) (cpumask_first(cpumask_of_node(node))) | ||
50 | |||
51 | /* | ||
52 | * Determines the node for a given pci bus | 47 | * Determines the node for a given pci bus |
53 | */ | 48 | */ |
54 | #define pcibus_to_node(bus) PCI_CONTROLLER(bus)->node | 49 | #define pcibus_to_node(bus) PCI_CONTROLLER(bus)->node |
@@ -84,7 +79,7 @@ void build_cpu_to_node_map(void); | |||
84 | .child = NULL, \ | 79 | .child = NULL, \ |
85 | .groups = NULL, \ | 80 | .groups = NULL, \ |
86 | .min_interval = 8, \ | 81 | .min_interval = 8, \ |
87 | .max_interval = 8*(min(num_online_cpus(), 32)), \ | 82 | .max_interval = 8*(min(num_online_cpus(), 32U)), \ |
88 | .busy_factor = 64, \ | 83 | .busy_factor = 64, \ |
89 | .imbalance_pct = 125, \ | 84 | .imbalance_pct = 125, \ |
90 | .cache_nice_tries = 2, \ | 85 | .cache_nice_tries = 2, \ |
@@ -117,11 +112,6 @@ void build_cpu_to_node_map(void); | |||
117 | 112 | ||
118 | extern void arch_fix_phys_package_id(int num, u32 slot); | 113 | extern void arch_fix_phys_package_id(int num, u32 slot); |
119 | 114 | ||
120 | #define pcibus_to_cpumask(bus) (pcibus_to_node(bus) == -1 ? \ | ||
121 | CPU_MASK_ALL : \ | ||
122 | node_to_cpumask(pcibus_to_node(bus)) \ | ||
123 | ) | ||
124 | |||
125 | #define cpumask_of_pcibus(bus) (pcibus_to_node(bus) == -1 ? \ | 115 | #define cpumask_of_pcibus(bus) (pcibus_to_node(bus) == -1 ? \ |
126 | cpu_all_mask : \ | 116 | cpu_all_mask : \ |
127 | cpumask_of_node(pcibus_to_node(bus))) | 117 | cpumask_of_node(pcibus_to_node(bus))) |
diff --git a/arch/ia64/include/asm/uv/uv.h b/arch/ia64/include/asm/uv/uv.h new file mode 100644 index 000000000000..61b5bdfd980e --- /dev/null +++ b/arch/ia64/include/asm/uv/uv.h | |||
@@ -0,0 +1,13 @@ | |||
1 | #ifndef _ASM_IA64_UV_UV_H | ||
2 | #define _ASM_IA64_UV_UV_H | ||
3 | |||
4 | #include <asm/system.h> | ||
5 | #include <asm/sn/simulator.h> | ||
6 | |||
7 | static inline int is_uv_system(void) | ||
8 | { | ||
9 | /* temporary support for running on hardware simulator */ | ||
10 | return IS_MEDUSA() || ia64_platform_is("uv"); | ||
11 | } | ||
12 | |||
13 | #endif /* _ASM_IA64_UV_UV_H */ | ||
diff --git a/arch/ia64/kernel/Makefile b/arch/ia64/kernel/Makefile index dbc19e4d5ef7..5628e9a990a6 100644 --- a/arch/ia64/kernel/Makefile +++ b/arch/ia64/kernel/Makefile | |||
@@ -7,7 +7,7 @@ extra-y := head.o init_task.o vmlinux.lds | |||
7 | obj-y := acpi.o entry.o efi.o efi_stub.o gate-data.o fsys.o ia64_ksyms.o irq.o irq_ia64.o \ | 7 | obj-y := acpi.o entry.o efi.o efi_stub.o gate-data.o fsys.o ia64_ksyms.o irq.o irq_ia64.o \ |
8 | irq_lsapic.o ivt.o machvec.o pal.o paravirt_patchlist.o patch.o process.o perfmon.o ptrace.o sal.o \ | 8 | irq_lsapic.o ivt.o machvec.o pal.o paravirt_patchlist.o patch.o process.o perfmon.o ptrace.o sal.o \ |
9 | salinfo.o setup.o signal.o sys_ia64.o time.o traps.o unaligned.o \ | 9 | salinfo.o setup.o signal.o sys_ia64.o time.o traps.o unaligned.o \ |
10 | unwind.o mca.o mca_asm.o topology.o | 10 | unwind.o mca.o mca_asm.o topology.o dma-mapping.o |
11 | 11 | ||
12 | obj-$(CONFIG_IA64_BRL_EMU) += brl_emu.o | 12 | obj-$(CONFIG_IA64_BRL_EMU) += brl_emu.o |
13 | obj-$(CONFIG_IA64_GENERIC) += acpi-ext.o | 13 | obj-$(CONFIG_IA64_GENERIC) += acpi-ext.o |
@@ -44,9 +44,7 @@ ifneq ($(CONFIG_IA64_ESI),) | |||
44 | obj-y += esi_stub.o # must be in kernel proper | 44 | obj-y += esi_stub.o # must be in kernel proper |
45 | endif | 45 | endif |
46 | obj-$(CONFIG_DMAR) += pci-dma.o | 46 | obj-$(CONFIG_DMAR) += pci-dma.o |
47 | ifeq ($(CONFIG_DMAR), y) | ||
48 | obj-$(CONFIG_SWIOTLB) += pci-swiotlb.o | 47 | obj-$(CONFIG_SWIOTLB) += pci-swiotlb.o |
49 | endif | ||
50 | 48 | ||
51 | # fp_emulate() expects f2-f5,f16-f31 to contain the user-level state. | 49 | # fp_emulate() expects f2-f5,f16-f31 to contain the user-level state. |
52 | CFLAGS_traps.o += -mfixed-range=f2-f5,f16-f31 | 50 | CFLAGS_traps.o += -mfixed-range=f2-f5,f16-f31 |
diff --git a/arch/ia64/kernel/acpi.c b/arch/ia64/kernel/acpi.c index d541671caf4a..5510317db37b 100644 --- a/arch/ia64/kernel/acpi.c +++ b/arch/ia64/kernel/acpi.c | |||
@@ -199,6 +199,10 @@ char *__init __acpi_map_table(unsigned long phys_addr, unsigned long size) | |||
199 | return __va(phys_addr); | 199 | return __va(phys_addr); |
200 | } | 200 | } |
201 | 201 | ||
202 | void __init __acpi_unmap_table(char *map, unsigned long size) | ||
203 | { | ||
204 | } | ||
205 | |||
202 | /* -------------------------------------------------------------------------- | 206 | /* -------------------------------------------------------------------------- |
203 | Boot-time Table Parsing | 207 | Boot-time Table Parsing |
204 | -------------------------------------------------------------------------- */ | 208 | -------------------------------------------------------------------------- */ |
@@ -886,7 +890,7 @@ __init void prefill_possible_map(void) | |||
886 | possible, max((possible - available_cpus), 0)); | 890 | possible, max((possible - available_cpus), 0)); |
887 | 891 | ||
888 | for (i = 0; i < possible; i++) | 892 | for (i = 0; i < possible; i++) |
889 | cpu_set(i, cpu_possible_map); | 893 | set_cpu_possible(i, true); |
890 | } | 894 | } |
891 | 895 | ||
892 | int acpi_map_lsapic(acpi_handle handle, int *pcpu) | 896 | int acpi_map_lsapic(acpi_handle handle, int *pcpu) |
@@ -924,9 +928,9 @@ int acpi_map_lsapic(acpi_handle handle, int *pcpu) | |||
924 | buffer.length = ACPI_ALLOCATE_BUFFER; | 928 | buffer.length = ACPI_ALLOCATE_BUFFER; |
925 | buffer.pointer = NULL; | 929 | buffer.pointer = NULL; |
926 | 930 | ||
927 | cpus_complement(tmp_map, cpu_present_map); | 931 | cpumask_complement(&tmp_map, cpu_present_mask); |
928 | cpu = first_cpu(tmp_map); | 932 | cpu = cpumask_first(&tmp_map); |
929 | if (cpu >= NR_CPUS) | 933 | if (cpu >= nr_cpu_ids) |
930 | return -EINVAL; | 934 | return -EINVAL; |
931 | 935 | ||
932 | acpi_map_cpu2node(handle, cpu, physid); | 936 | acpi_map_cpu2node(handle, cpu, physid); |
diff --git a/arch/ia64/kernel/dma-mapping.c b/arch/ia64/kernel/dma-mapping.c new file mode 100644 index 000000000000..086a2aeb0404 --- /dev/null +++ b/arch/ia64/kernel/dma-mapping.c | |||
@@ -0,0 +1,13 @@ | |||
1 | #include <linux/dma-mapping.h> | ||
2 | |||
3 | /* Set this to 1 if there is a HW IOMMU in the system */ | ||
4 | int iommu_detected __read_mostly; | ||
5 | |||
6 | struct dma_map_ops *dma_ops; | ||
7 | EXPORT_SYMBOL(dma_ops); | ||
8 | |||
9 | struct dma_map_ops *dma_get_ops(struct device *dev) | ||
10 | { | ||
11 | return dma_ops; | ||
12 | } | ||
13 | EXPORT_SYMBOL(dma_get_ops); | ||
diff --git a/arch/ia64/kernel/iosapic.c b/arch/ia64/kernel/iosapic.c index e13125058bed..166e0d839fa0 100644 --- a/arch/ia64/kernel/iosapic.c +++ b/arch/ia64/kernel/iosapic.c | |||
@@ -880,7 +880,7 @@ iosapic_unregister_intr (unsigned int gsi) | |||
880 | if (iosapic_intr_info[irq].count == 0) { | 880 | if (iosapic_intr_info[irq].count == 0) { |
881 | #ifdef CONFIG_SMP | 881 | #ifdef CONFIG_SMP |
882 | /* Clear affinity */ | 882 | /* Clear affinity */ |
883 | cpus_setall(idesc->affinity); | 883 | cpumask_setall(idesc->affinity); |
884 | #endif | 884 | #endif |
885 | /* Clear the interrupt information */ | 885 | /* Clear the interrupt information */ |
886 | iosapic_intr_info[irq].dest = 0; | 886 | iosapic_intr_info[irq].dest = 0; |
diff --git a/arch/ia64/kernel/irq.c b/arch/ia64/kernel/irq.c index a58f64ca9f0e..7429752ef5ad 100644 --- a/arch/ia64/kernel/irq.c +++ b/arch/ia64/kernel/irq.c | |||
@@ -80,7 +80,7 @@ int show_interrupts(struct seq_file *p, void *v) | |||
80 | seq_printf(p, "%10u ", kstat_irqs(i)); | 80 | seq_printf(p, "%10u ", kstat_irqs(i)); |
81 | #else | 81 | #else |
82 | for_each_online_cpu(j) { | 82 | for_each_online_cpu(j) { |
83 | seq_printf(p, "%10u ", kstat_cpu(j).irqs[i]); | 83 | seq_printf(p, "%10u ", kstat_irqs_cpu(i, j)); |
84 | } | 84 | } |
85 | #endif | 85 | #endif |
86 | seq_printf(p, " %14s", irq_desc[i].chip->name); | 86 | seq_printf(p, " %14s", irq_desc[i].chip->name); |
@@ -103,7 +103,7 @@ static char irq_redir [NR_IRQS]; // = { [0 ... NR_IRQS-1] = 1 }; | |||
103 | void set_irq_affinity_info (unsigned int irq, int hwid, int redir) | 103 | void set_irq_affinity_info (unsigned int irq, int hwid, int redir) |
104 | { | 104 | { |
105 | if (irq < NR_IRQS) { | 105 | if (irq < NR_IRQS) { |
106 | cpumask_copy(&irq_desc[irq].affinity, | 106 | cpumask_copy(irq_desc[irq].affinity, |
107 | cpumask_of(cpu_logical_id(hwid))); | 107 | cpumask_of(cpu_logical_id(hwid))); |
108 | irq_redir[irq] = (char) (redir & 0xff); | 108 | irq_redir[irq] = (char) (redir & 0xff); |
109 | } | 109 | } |
@@ -148,7 +148,7 @@ static void migrate_irqs(void) | |||
148 | if (desc->status == IRQ_PER_CPU) | 148 | if (desc->status == IRQ_PER_CPU) |
149 | continue; | 149 | continue; |
150 | 150 | ||
151 | if (cpumask_any_and(&irq_desc[irq].affinity, cpu_online_mask) | 151 | if (cpumask_any_and(irq_desc[irq].affinity, cpu_online_mask) |
152 | >= nr_cpu_ids) { | 152 | >= nr_cpu_ids) { |
153 | /* | 153 | /* |
154 | * Save it for phase 2 processing | 154 | * Save it for phase 2 processing |
diff --git a/arch/ia64/kernel/irq_ia64.c b/arch/ia64/kernel/irq_ia64.c index 28d3d483db92..acc4d19ae62a 100644 --- a/arch/ia64/kernel/irq_ia64.c +++ b/arch/ia64/kernel/irq_ia64.c | |||
@@ -493,14 +493,15 @@ ia64_handle_irq (ia64_vector vector, struct pt_regs *regs) | |||
493 | saved_tpr = ia64_getreg(_IA64_REG_CR_TPR); | 493 | saved_tpr = ia64_getreg(_IA64_REG_CR_TPR); |
494 | ia64_srlz_d(); | 494 | ia64_srlz_d(); |
495 | while (vector != IA64_SPURIOUS_INT_VECTOR) { | 495 | while (vector != IA64_SPURIOUS_INT_VECTOR) { |
496 | int irq = local_vector_to_irq(vector); | ||
497 | struct irq_desc *desc = irq_to_desc(irq); | ||
498 | |||
496 | if (unlikely(IS_LOCAL_TLB_FLUSH(vector))) { | 499 | if (unlikely(IS_LOCAL_TLB_FLUSH(vector))) { |
497 | smp_local_flush_tlb(); | 500 | smp_local_flush_tlb(); |
498 | kstat_this_cpu.irqs[vector]++; | 501 | kstat_incr_irqs_this_cpu(irq, desc); |
499 | } else if (unlikely(IS_RESCHEDULE(vector))) | 502 | } else if (unlikely(IS_RESCHEDULE(vector))) { |
500 | kstat_this_cpu.irqs[vector]++; | 503 | kstat_incr_irqs_this_cpu(irq, desc); |
501 | else { | 504 | } else { |
502 | int irq = local_vector_to_irq(vector); | ||
503 | |||
504 | ia64_setreg(_IA64_REG_CR_TPR, vector); | 505 | ia64_setreg(_IA64_REG_CR_TPR, vector); |
505 | ia64_srlz_d(); | 506 | ia64_srlz_d(); |
506 | 507 | ||
@@ -543,22 +544,24 @@ void ia64_process_pending_intr(void) | |||
543 | 544 | ||
544 | vector = ia64_get_ivr(); | 545 | vector = ia64_get_ivr(); |
545 | 546 | ||
546 | irq_enter(); | 547 | irq_enter(); |
547 | saved_tpr = ia64_getreg(_IA64_REG_CR_TPR); | 548 | saved_tpr = ia64_getreg(_IA64_REG_CR_TPR); |
548 | ia64_srlz_d(); | 549 | ia64_srlz_d(); |
549 | 550 | ||
550 | /* | 551 | /* |
551 | * Perform normal interrupt style processing | 552 | * Perform normal interrupt style processing |
552 | */ | 553 | */ |
553 | while (vector != IA64_SPURIOUS_INT_VECTOR) { | 554 | while (vector != IA64_SPURIOUS_INT_VECTOR) { |
555 | int irq = local_vector_to_irq(vector); | ||
556 | struct irq_desc *desc = irq_to_desc(irq); | ||
557 | |||
554 | if (unlikely(IS_LOCAL_TLB_FLUSH(vector))) { | 558 | if (unlikely(IS_LOCAL_TLB_FLUSH(vector))) { |
555 | smp_local_flush_tlb(); | 559 | smp_local_flush_tlb(); |
556 | kstat_this_cpu.irqs[vector]++; | 560 | kstat_incr_irqs_this_cpu(irq, desc); |
557 | } else if (unlikely(IS_RESCHEDULE(vector))) | 561 | } else if (unlikely(IS_RESCHEDULE(vector))) { |
558 | kstat_this_cpu.irqs[vector]++; | 562 | kstat_incr_irqs_this_cpu(irq, desc); |
559 | else { | 563 | } else { |
560 | struct pt_regs *old_regs = set_irq_regs(NULL); | 564 | struct pt_regs *old_regs = set_irq_regs(NULL); |
561 | int irq = local_vector_to_irq(vector); | ||
562 | 565 | ||
563 | ia64_setreg(_IA64_REG_CR_TPR, vector); | 566 | ia64_setreg(_IA64_REG_CR_TPR, vector); |
564 | ia64_srlz_d(); | 567 | ia64_srlz_d(); |
diff --git a/arch/ia64/kernel/machvec.c b/arch/ia64/kernel/machvec.c index 7ccb228ceedc..d41a40ef80c0 100644 --- a/arch/ia64/kernel/machvec.c +++ b/arch/ia64/kernel/machvec.c | |||
@@ -1,5 +1,5 @@ | |||
1 | #include <linux/module.h> | 1 | #include <linux/module.h> |
2 | 2 | #include <linux/dma-mapping.h> | |
3 | #include <asm/machvec.h> | 3 | #include <asm/machvec.h> |
4 | #include <asm/system.h> | 4 | #include <asm/system.h> |
5 | 5 | ||
@@ -75,14 +75,16 @@ machvec_timer_interrupt (int irq, void *dev_id) | |||
75 | EXPORT_SYMBOL(machvec_timer_interrupt); | 75 | EXPORT_SYMBOL(machvec_timer_interrupt); |
76 | 76 | ||
77 | void | 77 | void |
78 | machvec_dma_sync_single (struct device *hwdev, dma_addr_t dma_handle, size_t size, int dir) | 78 | machvec_dma_sync_single(struct device *hwdev, dma_addr_t dma_handle, size_t size, |
79 | enum dma_data_direction dir) | ||
79 | { | 80 | { |
80 | mb(); | 81 | mb(); |
81 | } | 82 | } |
82 | EXPORT_SYMBOL(machvec_dma_sync_single); | 83 | EXPORT_SYMBOL(machvec_dma_sync_single); |
83 | 84 | ||
84 | void | 85 | void |
85 | machvec_dma_sync_sg (struct device *hwdev, struct scatterlist *sg, int n, int dir) | 86 | machvec_dma_sync_sg(struct device *hwdev, struct scatterlist *sg, int n, |
87 | enum dma_data_direction dir) | ||
86 | { | 88 | { |
87 | mb(); | 89 | mb(); |
88 | } | 90 | } |
diff --git a/arch/ia64/kernel/mca.c b/arch/ia64/kernel/mca.c index bab1de2d2f6a..8f33a8840422 100644 --- a/arch/ia64/kernel/mca.c +++ b/arch/ia64/kernel/mca.c | |||
@@ -1456,9 +1456,9 @@ ia64_mca_cmc_int_caller(int cmc_irq, void *arg) | |||
1456 | 1456 | ||
1457 | ia64_mca_cmc_int_handler(cmc_irq, arg); | 1457 | ia64_mca_cmc_int_handler(cmc_irq, arg); |
1458 | 1458 | ||
1459 | for (++cpuid ; cpuid < NR_CPUS && !cpu_online(cpuid) ; cpuid++); | 1459 | cpuid = cpumask_next(cpuid+1, cpu_online_mask); |
1460 | 1460 | ||
1461 | if (cpuid < NR_CPUS) { | 1461 | if (cpuid < nr_cpu_ids) { |
1462 | platform_send_ipi(cpuid, IA64_CMCP_VECTOR, IA64_IPI_DM_INT, 0); | 1462 | platform_send_ipi(cpuid, IA64_CMCP_VECTOR, IA64_IPI_DM_INT, 0); |
1463 | } else { | 1463 | } else { |
1464 | /* If no log record, switch out of polling mode */ | 1464 | /* If no log record, switch out of polling mode */ |
@@ -1525,7 +1525,7 @@ ia64_mca_cpe_int_caller(int cpe_irq, void *arg) | |||
1525 | 1525 | ||
1526 | ia64_mca_cpe_int_handler(cpe_irq, arg); | 1526 | ia64_mca_cpe_int_handler(cpe_irq, arg); |
1527 | 1527 | ||
1528 | for (++cpuid ; cpuid < NR_CPUS && !cpu_online(cpuid) ; cpuid++); | 1528 | cpuid = cpumask_next(cpuid+1, cpu_online_mask); |
1529 | 1529 | ||
1530 | if (cpuid < NR_CPUS) { | 1530 | if (cpuid < NR_CPUS) { |
1531 | platform_send_ipi(cpuid, IA64_CPEP_VECTOR, IA64_IPI_DM_INT, 0); | 1531 | platform_send_ipi(cpuid, IA64_CPEP_VECTOR, IA64_IPI_DM_INT, 0); |
diff --git a/arch/ia64/kernel/msi_ia64.c b/arch/ia64/kernel/msi_ia64.c index 890339339035..2b15e233f7fe 100644 --- a/arch/ia64/kernel/msi_ia64.c +++ b/arch/ia64/kernel/msi_ia64.c | |||
@@ -7,44 +7,7 @@ | |||
7 | #include <linux/msi.h> | 7 | #include <linux/msi.h> |
8 | #include <linux/dmar.h> | 8 | #include <linux/dmar.h> |
9 | #include <asm/smp.h> | 9 | #include <asm/smp.h> |
10 | 10 | #include <asm/msidef.h> | |
11 | /* | ||
12 | * Shifts for APIC-based data | ||
13 | */ | ||
14 | |||
15 | #define MSI_DATA_VECTOR_SHIFT 0 | ||
16 | #define MSI_DATA_VECTOR(v) (((u8)v) << MSI_DATA_VECTOR_SHIFT) | ||
17 | #define MSI_DATA_VECTOR_MASK 0xffffff00 | ||
18 | |||
19 | #define MSI_DATA_DELIVERY_SHIFT 8 | ||
20 | #define MSI_DATA_DELIVERY_FIXED (0 << MSI_DATA_DELIVERY_SHIFT) | ||
21 | #define MSI_DATA_DELIVERY_LOWPRI (1 << MSI_DATA_DELIVERY_SHIFT) | ||
22 | |||
23 | #define MSI_DATA_LEVEL_SHIFT 14 | ||
24 | #define MSI_DATA_LEVEL_DEASSERT (0 << MSI_DATA_LEVEL_SHIFT) | ||
25 | #define MSI_DATA_LEVEL_ASSERT (1 << MSI_DATA_LEVEL_SHIFT) | ||
26 | |||
27 | #define MSI_DATA_TRIGGER_SHIFT 15 | ||
28 | #define MSI_DATA_TRIGGER_EDGE (0 << MSI_DATA_TRIGGER_SHIFT) | ||
29 | #define MSI_DATA_TRIGGER_LEVEL (1 << MSI_DATA_TRIGGER_SHIFT) | ||
30 | |||
31 | /* | ||
32 | * Shift/mask fields for APIC-based bus address | ||
33 | */ | ||
34 | |||
35 | #define MSI_TARGET_CPU_SHIFT 4 | ||
36 | #define MSI_ADDR_HEADER 0xfee00000 | ||
37 | |||
38 | #define MSI_ADDR_DESTID_MASK 0xfff0000f | ||
39 | #define MSI_ADDR_DESTID_CPU(cpu) ((cpu) << MSI_TARGET_CPU_SHIFT) | ||
40 | |||
41 | #define MSI_ADDR_DESTMODE_SHIFT 2 | ||
42 | #define MSI_ADDR_DESTMODE_PHYS (0 << MSI_ADDR_DESTMODE_SHIFT) | ||
43 | #define MSI_ADDR_DESTMODE_LOGIC (1 << MSI_ADDR_DESTMODE_SHIFT) | ||
44 | |||
45 | #define MSI_ADDR_REDIRECTION_SHIFT 3 | ||
46 | #define MSI_ADDR_REDIRECTION_CPU (0 << MSI_ADDR_REDIRECTION_SHIFT) | ||
47 | #define MSI_ADDR_REDIRECTION_LOWPRI (1 << MSI_ADDR_REDIRECTION_SHIFT) | ||
48 | 11 | ||
49 | static struct irq_chip ia64_msi_chip; | 12 | static struct irq_chip ia64_msi_chip; |
50 | 13 | ||
@@ -65,8 +28,8 @@ static void ia64_set_msi_irq_affinity(unsigned int irq, | |||
65 | read_msi_msg(irq, &msg); | 28 | read_msi_msg(irq, &msg); |
66 | 29 | ||
67 | addr = msg.address_lo; | 30 | addr = msg.address_lo; |
68 | addr &= MSI_ADDR_DESTID_MASK; | 31 | addr &= MSI_ADDR_DEST_ID_MASK; |
69 | addr |= MSI_ADDR_DESTID_CPU(cpu_physical_id(cpu)); | 32 | addr |= MSI_ADDR_DEST_ID_CPU(cpu_physical_id(cpu)); |
70 | msg.address_lo = addr; | 33 | msg.address_lo = addr; |
71 | 34 | ||
72 | data = msg.data; | 35 | data = msg.data; |
@@ -75,7 +38,7 @@ static void ia64_set_msi_irq_affinity(unsigned int irq, | |||
75 | msg.data = data; | 38 | msg.data = data; |
76 | 39 | ||
77 | write_msi_msg(irq, &msg); | 40 | write_msi_msg(irq, &msg); |
78 | irq_desc[irq].affinity = cpumask_of_cpu(cpu); | 41 | cpumask_copy(irq_desc[irq].affinity, cpumask_of(cpu)); |
79 | } | 42 | } |
80 | #endif /* CONFIG_SMP */ | 43 | #endif /* CONFIG_SMP */ |
81 | 44 | ||
@@ -98,9 +61,9 @@ int ia64_setup_msi_irq(struct pci_dev *pdev, struct msi_desc *desc) | |||
98 | msg.address_hi = 0; | 61 | msg.address_hi = 0; |
99 | msg.address_lo = | 62 | msg.address_lo = |
100 | MSI_ADDR_HEADER | | 63 | MSI_ADDR_HEADER | |
101 | MSI_ADDR_DESTMODE_PHYS | | 64 | MSI_ADDR_DEST_MODE_PHYS | |
102 | MSI_ADDR_REDIRECTION_CPU | | 65 | MSI_ADDR_REDIRECTION_CPU | |
103 | MSI_ADDR_DESTID_CPU(dest_phys_id); | 66 | MSI_ADDR_DEST_ID_CPU(dest_phys_id); |
104 | 67 | ||
105 | msg.data = | 68 | msg.data = |
106 | MSI_DATA_TRIGGER_EDGE | | 69 | MSI_DATA_TRIGGER_EDGE | |
@@ -183,11 +146,11 @@ static void dmar_msi_set_affinity(unsigned int irq, const struct cpumask *mask) | |||
183 | 146 | ||
184 | msg.data &= ~MSI_DATA_VECTOR_MASK; | 147 | msg.data &= ~MSI_DATA_VECTOR_MASK; |
185 | msg.data |= MSI_DATA_VECTOR(cfg->vector); | 148 | msg.data |= MSI_DATA_VECTOR(cfg->vector); |
186 | msg.address_lo &= ~MSI_ADDR_DESTID_MASK; | 149 | msg.address_lo &= ~MSI_ADDR_DEST_ID_MASK; |
187 | msg.address_lo |= MSI_ADDR_DESTID_CPU(cpu_physical_id(cpu)); | 150 | msg.address_lo |= MSI_ADDR_DEST_ID_CPU(cpu_physical_id(cpu)); |
188 | 151 | ||
189 | dmar_msi_write(irq, &msg); | 152 | dmar_msi_write(irq, &msg); |
190 | irq_desc[irq].affinity = *mask; | 153 | cpumask_copy(irq_desc[irq].affinity, mask); |
191 | } | 154 | } |
192 | #endif /* CONFIG_SMP */ | 155 | #endif /* CONFIG_SMP */ |
193 | 156 | ||
@@ -215,9 +178,9 @@ msi_compose_msg(struct pci_dev *pdev, unsigned int irq, struct msi_msg *msg) | |||
215 | msg->address_hi = 0; | 178 | msg->address_hi = 0; |
216 | msg->address_lo = | 179 | msg->address_lo = |
217 | MSI_ADDR_HEADER | | 180 | MSI_ADDR_HEADER | |
218 | MSI_ADDR_DESTMODE_PHYS | | 181 | MSI_ADDR_DEST_MODE_PHYS | |
219 | MSI_ADDR_REDIRECTION_CPU | | 182 | MSI_ADDR_REDIRECTION_CPU | |
220 | MSI_ADDR_DESTID_CPU(dest); | 183 | MSI_ADDR_DEST_ID_CPU(dest); |
221 | 184 | ||
222 | msg->data = | 185 | msg->data = |
223 | MSI_DATA_TRIGGER_EDGE | | 186 | MSI_DATA_TRIGGER_EDGE | |
diff --git a/arch/ia64/kernel/palinfo.c b/arch/ia64/kernel/palinfo.c index e5c57f413ca2..a4f19c70aadd 100644 --- a/arch/ia64/kernel/palinfo.c +++ b/arch/ia64/kernel/palinfo.c | |||
@@ -1002,8 +1002,6 @@ create_palinfo_proc_entries(unsigned int cpu) | |||
1002 | *pdir = create_proc_read_entry( | 1002 | *pdir = create_proc_read_entry( |
1003 | palinfo_entries[j].name, 0, cpu_dir, | 1003 | palinfo_entries[j].name, 0, cpu_dir, |
1004 | palinfo_read_entry, (void *)f.value); | 1004 | palinfo_read_entry, (void *)f.value); |
1005 | if (*pdir) | ||
1006 | (*pdir)->owner = THIS_MODULE; | ||
1007 | pdir++; | 1005 | pdir++; |
1008 | } | 1006 | } |
1009 | } | 1007 | } |
diff --git a/arch/ia64/kernel/pci-dma.c b/arch/ia64/kernel/pci-dma.c index d0ada067a4af..e4cb443bb988 100644 --- a/arch/ia64/kernel/pci-dma.c +++ b/arch/ia64/kernel/pci-dma.c | |||
@@ -32,9 +32,6 @@ int force_iommu __read_mostly = 1; | |||
32 | int force_iommu __read_mostly; | 32 | int force_iommu __read_mostly; |
33 | #endif | 33 | #endif |
34 | 34 | ||
35 | /* Set this to 1 if there is a HW IOMMU in the system */ | ||
36 | int iommu_detected __read_mostly; | ||
37 | |||
38 | /* Dummy device used for NULL arguments (normally ISA). Better would | 35 | /* Dummy device used for NULL arguments (normally ISA). Better would |
39 | be probably a smaller DMA mask, but this is bug-to-bug compatible | 36 | be probably a smaller DMA mask, but this is bug-to-bug compatible |
40 | to i386. */ | 37 | to i386. */ |
@@ -44,18 +41,7 @@ struct device fallback_dev = { | |||
44 | .dma_mask = &fallback_dev.coherent_dma_mask, | 41 | .dma_mask = &fallback_dev.coherent_dma_mask, |
45 | }; | 42 | }; |
46 | 43 | ||
47 | void __init pci_iommu_alloc(void) | 44 | extern struct dma_map_ops intel_dma_ops; |
48 | { | ||
49 | /* | ||
50 | * The order of these functions is important for | ||
51 | * fall-back/fail-over reasons | ||
52 | */ | ||
53 | detect_intel_iommu(); | ||
54 | |||
55 | #ifdef CONFIG_SWIOTLB | ||
56 | pci_swiotlb_init(); | ||
57 | #endif | ||
58 | } | ||
59 | 45 | ||
60 | static int __init pci_iommu_init(void) | 46 | static int __init pci_iommu_init(void) |
61 | { | 47 | { |
@@ -79,15 +65,12 @@ iommu_dma_init(void) | |||
79 | return; | 65 | return; |
80 | } | 66 | } |
81 | 67 | ||
82 | struct dma_mapping_ops *dma_ops; | ||
83 | EXPORT_SYMBOL(dma_ops); | ||
84 | |||
85 | int iommu_dma_supported(struct device *dev, u64 mask) | 68 | int iommu_dma_supported(struct device *dev, u64 mask) |
86 | { | 69 | { |
87 | struct dma_mapping_ops *ops = get_dma_ops(dev); | 70 | struct dma_map_ops *ops = platform_dma_get_ops(dev); |
88 | 71 | ||
89 | if (ops->dma_supported_op) | 72 | if (ops->dma_supported) |
90 | return ops->dma_supported_op(dev, mask); | 73 | return ops->dma_supported(dev, mask); |
91 | 74 | ||
92 | /* Copied from i386. Doesn't make much sense, because it will | 75 | /* Copied from i386. Doesn't make much sense, because it will |
93 | only work for pci_alloc_coherent. | 76 | only work for pci_alloc_coherent. |
@@ -116,4 +99,25 @@ int iommu_dma_supported(struct device *dev, u64 mask) | |||
116 | } | 99 | } |
117 | EXPORT_SYMBOL(iommu_dma_supported); | 100 | EXPORT_SYMBOL(iommu_dma_supported); |
118 | 101 | ||
102 | void __init pci_iommu_alloc(void) | ||
103 | { | ||
104 | dma_ops = &intel_dma_ops; | ||
105 | |||
106 | dma_ops->sync_single_for_cpu = machvec_dma_sync_single; | ||
107 | dma_ops->sync_sg_for_cpu = machvec_dma_sync_sg; | ||
108 | dma_ops->sync_single_for_device = machvec_dma_sync_single; | ||
109 | dma_ops->sync_sg_for_device = machvec_dma_sync_sg; | ||
110 | dma_ops->dma_supported = iommu_dma_supported; | ||
111 | |||
112 | /* | ||
113 | * The order of these functions is important for | ||
114 | * fall-back/fail-over reasons | ||
115 | */ | ||
116 | detect_intel_iommu(); | ||
117 | |||
118 | #ifdef CONFIG_SWIOTLB | ||
119 | pci_swiotlb_init(); | ||
120 | #endif | ||
121 | } | ||
122 | |||
119 | #endif | 123 | #endif |
diff --git a/arch/ia64/kernel/pci-swiotlb.c b/arch/ia64/kernel/pci-swiotlb.c index 16c50516dbc1..573f02c39a00 100644 --- a/arch/ia64/kernel/pci-swiotlb.c +++ b/arch/ia64/kernel/pci-swiotlb.c | |||
@@ -13,23 +13,37 @@ | |||
13 | int swiotlb __read_mostly; | 13 | int swiotlb __read_mostly; |
14 | EXPORT_SYMBOL(swiotlb); | 14 | EXPORT_SYMBOL(swiotlb); |
15 | 15 | ||
16 | struct dma_mapping_ops swiotlb_dma_ops = { | 16 | static void *ia64_swiotlb_alloc_coherent(struct device *dev, size_t size, |
17 | .mapping_error = swiotlb_dma_mapping_error, | 17 | dma_addr_t *dma_handle, gfp_t gfp) |
18 | .alloc_coherent = swiotlb_alloc_coherent, | 18 | { |
19 | if (dev->coherent_dma_mask != DMA_64BIT_MASK) | ||
20 | gfp |= GFP_DMA; | ||
21 | return swiotlb_alloc_coherent(dev, size, dma_handle, gfp); | ||
22 | } | ||
23 | |||
24 | struct dma_map_ops swiotlb_dma_ops = { | ||
25 | .alloc_coherent = ia64_swiotlb_alloc_coherent, | ||
19 | .free_coherent = swiotlb_free_coherent, | 26 | .free_coherent = swiotlb_free_coherent, |
20 | .map_single = swiotlb_map_single, | 27 | .map_page = swiotlb_map_page, |
21 | .unmap_single = swiotlb_unmap_single, | 28 | .unmap_page = swiotlb_unmap_page, |
29 | .map_sg = swiotlb_map_sg_attrs, | ||
30 | .unmap_sg = swiotlb_unmap_sg_attrs, | ||
22 | .sync_single_for_cpu = swiotlb_sync_single_for_cpu, | 31 | .sync_single_for_cpu = swiotlb_sync_single_for_cpu, |
23 | .sync_single_for_device = swiotlb_sync_single_for_device, | 32 | .sync_single_for_device = swiotlb_sync_single_for_device, |
24 | .sync_single_range_for_cpu = swiotlb_sync_single_range_for_cpu, | 33 | .sync_single_range_for_cpu = swiotlb_sync_single_range_for_cpu, |
25 | .sync_single_range_for_device = swiotlb_sync_single_range_for_device, | 34 | .sync_single_range_for_device = swiotlb_sync_single_range_for_device, |
26 | .sync_sg_for_cpu = swiotlb_sync_sg_for_cpu, | 35 | .sync_sg_for_cpu = swiotlb_sync_sg_for_cpu, |
27 | .sync_sg_for_device = swiotlb_sync_sg_for_device, | 36 | .sync_sg_for_device = swiotlb_sync_sg_for_device, |
28 | .map_sg = swiotlb_map_sg, | 37 | .dma_supported = swiotlb_dma_supported, |
29 | .unmap_sg = swiotlb_unmap_sg, | 38 | .mapping_error = swiotlb_dma_mapping_error, |
30 | .dma_supported_op = swiotlb_dma_supported, | ||
31 | }; | 39 | }; |
32 | 40 | ||
41 | void __init swiotlb_dma_init(void) | ||
42 | { | ||
43 | dma_ops = &swiotlb_dma_ops; | ||
44 | swiotlb_init(); | ||
45 | } | ||
46 | |||
33 | void __init pci_swiotlb_init(void) | 47 | void __init pci_swiotlb_init(void) |
34 | { | 48 | { |
35 | if (!iommu_detected) { | 49 | if (!iommu_detected) { |
diff --git a/arch/ia64/kernel/perfmon.c b/arch/ia64/kernel/perfmon.c index 0e499757309b..8a06dc480594 100644 --- a/arch/ia64/kernel/perfmon.c +++ b/arch/ia64/kernel/perfmon.c | |||
@@ -2196,7 +2196,7 @@ pfmfs_delete_dentry(struct dentry *dentry) | |||
2196 | return 1; | 2196 | return 1; |
2197 | } | 2197 | } |
2198 | 2198 | ||
2199 | static struct dentry_operations pfmfs_dentry_operations = { | 2199 | static const struct dentry_operations pfmfs_dentry_operations = { |
2200 | .d_delete = pfmfs_delete_dentry, | 2200 | .d_delete = pfmfs_delete_dentry, |
2201 | }; | 2201 | }; |
2202 | 2202 | ||
@@ -5603,7 +5603,7 @@ pfm_interrupt_handler(int irq, void *arg) | |||
5603 | * /proc/perfmon interface, for debug only | 5603 | * /proc/perfmon interface, for debug only |
5604 | */ | 5604 | */ |
5605 | 5605 | ||
5606 | #define PFM_PROC_SHOW_HEADER ((void *)NR_CPUS+1) | 5606 | #define PFM_PROC_SHOW_HEADER ((void *)nr_cpu_ids+1) |
5607 | 5607 | ||
5608 | static void * | 5608 | static void * |
5609 | pfm_proc_start(struct seq_file *m, loff_t *pos) | 5609 | pfm_proc_start(struct seq_file *m, loff_t *pos) |
@@ -5612,7 +5612,7 @@ pfm_proc_start(struct seq_file *m, loff_t *pos) | |||
5612 | return PFM_PROC_SHOW_HEADER; | 5612 | return PFM_PROC_SHOW_HEADER; |
5613 | } | 5613 | } |
5614 | 5614 | ||
5615 | while (*pos <= NR_CPUS) { | 5615 | while (*pos <= nr_cpu_ids) { |
5616 | if (cpu_online(*pos - 1)) { | 5616 | if (cpu_online(*pos - 1)) { |
5617 | return (void *)*pos; | 5617 | return (void *)*pos; |
5618 | } | 5618 | } |
diff --git a/arch/ia64/kernel/salinfo.c b/arch/ia64/kernel/salinfo.c index ecb9eb78d687..7053c55b7649 100644 --- a/arch/ia64/kernel/salinfo.c +++ b/arch/ia64/kernel/salinfo.c | |||
@@ -317,7 +317,7 @@ retry: | |||
317 | } | 317 | } |
318 | 318 | ||
319 | n = data->cpu_check; | 319 | n = data->cpu_check; |
320 | for (i = 0; i < NR_CPUS; i++) { | 320 | for (i = 0; i < nr_cpu_ids; i++) { |
321 | if (cpu_isset(n, data->cpu_event)) { | 321 | if (cpu_isset(n, data->cpu_event)) { |
322 | if (!cpu_online(n)) { | 322 | if (!cpu_online(n)) { |
323 | cpu_clear(n, data->cpu_event); | 323 | cpu_clear(n, data->cpu_event); |
@@ -326,7 +326,7 @@ retry: | |||
326 | cpu = n; | 326 | cpu = n; |
327 | break; | 327 | break; |
328 | } | 328 | } |
329 | if (++n == NR_CPUS) | 329 | if (++n == nr_cpu_ids) |
330 | n = 0; | 330 | n = 0; |
331 | } | 331 | } |
332 | 332 | ||
@@ -337,7 +337,7 @@ retry: | |||
337 | 337 | ||
338 | /* for next read, start checking at next CPU */ | 338 | /* for next read, start checking at next CPU */ |
339 | data->cpu_check = cpu; | 339 | data->cpu_check = cpu; |
340 | if (++data->cpu_check == NR_CPUS) | 340 | if (++data->cpu_check == nr_cpu_ids) |
341 | data->cpu_check = 0; | 341 | data->cpu_check = 0; |
342 | 342 | ||
343 | snprintf(cmd, sizeof(cmd), "read %d\n", cpu); | 343 | snprintf(cmd, sizeof(cmd), "read %d\n", cpu); |
diff --git a/arch/ia64/kernel/setup.c b/arch/ia64/kernel/setup.c index 4ed3e1c117e7..833b3ef92779 100644 --- a/arch/ia64/kernel/setup.c +++ b/arch/ia64/kernel/setup.c | |||
@@ -732,10 +732,10 @@ static void * | |||
732 | c_start (struct seq_file *m, loff_t *pos) | 732 | c_start (struct seq_file *m, loff_t *pos) |
733 | { | 733 | { |
734 | #ifdef CONFIG_SMP | 734 | #ifdef CONFIG_SMP |
735 | while (*pos < NR_CPUS && !cpu_isset(*pos, cpu_online_map)) | 735 | while (*pos < nr_cpu_ids && !cpu_online(*pos)) |
736 | ++*pos; | 736 | ++*pos; |
737 | #endif | 737 | #endif |
738 | return *pos < NR_CPUS ? cpu_data(*pos) : NULL; | 738 | return *pos < nr_cpu_ids ? cpu_data(*pos) : NULL; |
739 | } | 739 | } |
740 | 740 | ||
741 | static void * | 741 | static void * |
diff --git a/arch/ia64/kernel/smp.c b/arch/ia64/kernel/smp.c index da8f020d82c1..2ea4199d9c57 100644 --- a/arch/ia64/kernel/smp.c +++ b/arch/ia64/kernel/smp.c | |||
@@ -166,11 +166,11 @@ send_IPI_allbutself (int op) | |||
166 | * Called with preemption disabled. | 166 | * Called with preemption disabled. |
167 | */ | 167 | */ |
168 | static inline void | 168 | static inline void |
169 | send_IPI_mask(cpumask_t mask, int op) | 169 | send_IPI_mask(const struct cpumask *mask, int op) |
170 | { | 170 | { |
171 | unsigned int cpu; | 171 | unsigned int cpu; |
172 | 172 | ||
173 | for_each_cpu_mask(cpu, mask) { | 173 | for_each_cpu(cpu, mask) { |
174 | send_IPI_single(cpu, op); | 174 | send_IPI_single(cpu, op); |
175 | } | 175 | } |
176 | } | 176 | } |
@@ -316,7 +316,7 @@ void arch_send_call_function_single_ipi(int cpu) | |||
316 | send_IPI_single(cpu, IPI_CALL_FUNC_SINGLE); | 316 | send_IPI_single(cpu, IPI_CALL_FUNC_SINGLE); |
317 | } | 317 | } |
318 | 318 | ||
319 | void arch_send_call_function_ipi(cpumask_t mask) | 319 | void arch_send_call_function_ipi_mask(const struct cpumask *mask) |
320 | { | 320 | { |
321 | send_IPI_mask(mask, IPI_CALL_FUNC); | 321 | send_IPI_mask(mask, IPI_CALL_FUNC); |
322 | } | 322 | } |
diff --git a/arch/ia64/kernel/smpboot.c b/arch/ia64/kernel/smpboot.c index 52290547c85b..7700e23034bb 100644 --- a/arch/ia64/kernel/smpboot.c +++ b/arch/ia64/kernel/smpboot.c | |||
@@ -581,14 +581,14 @@ smp_build_cpu_map (void) | |||
581 | 581 | ||
582 | ia64_cpu_to_sapicid[0] = boot_cpu_id; | 582 | ia64_cpu_to_sapicid[0] = boot_cpu_id; |
583 | cpus_clear(cpu_present_map); | 583 | cpus_clear(cpu_present_map); |
584 | cpu_set(0, cpu_present_map); | 584 | set_cpu_present(0, true); |
585 | cpu_set(0, cpu_possible_map); | 585 | set_cpu_possible(0, true); |
586 | for (cpu = 1, i = 0; i < smp_boot_data.cpu_count; i++) { | 586 | for (cpu = 1, i = 0; i < smp_boot_data.cpu_count; i++) { |
587 | sapicid = smp_boot_data.cpu_phys_id[i]; | 587 | sapicid = smp_boot_data.cpu_phys_id[i]; |
588 | if (sapicid == boot_cpu_id) | 588 | if (sapicid == boot_cpu_id) |
589 | continue; | 589 | continue; |
590 | cpu_set(cpu, cpu_present_map); | 590 | set_cpu_present(cpu, true); |
591 | cpu_set(cpu, cpu_possible_map); | 591 | set_cpu_possible(cpu, true); |
592 | ia64_cpu_to_sapicid[cpu] = sapicid; | 592 | ia64_cpu_to_sapicid[cpu] = sapicid; |
593 | cpu++; | 593 | cpu++; |
594 | } | 594 | } |
@@ -626,12 +626,9 @@ smp_prepare_cpus (unsigned int max_cpus) | |||
626 | */ | 626 | */ |
627 | if (!max_cpus) { | 627 | if (!max_cpus) { |
628 | printk(KERN_INFO "SMP mode deactivated.\n"); | 628 | printk(KERN_INFO "SMP mode deactivated.\n"); |
629 | cpus_clear(cpu_online_map); | 629 | init_cpu_online(cpumask_of(0)); |
630 | cpus_clear(cpu_present_map); | 630 | init_cpu_present(cpumask_of(0)); |
631 | cpus_clear(cpu_possible_map); | 631 | init_cpu_possible(cpumask_of(0)); |
632 | cpu_set(0, cpu_online_map); | ||
633 | cpu_set(0, cpu_present_map); | ||
634 | cpu_set(0, cpu_possible_map); | ||
635 | return; | 632 | return; |
636 | } | 633 | } |
637 | } | 634 | } |
diff --git a/arch/ia64/kernel/vmlinux.lds.S b/arch/ia64/kernel/vmlinux.lds.S index 794d168bc8a4..4a95e86b9ac2 100644 --- a/arch/ia64/kernel/vmlinux.lds.S +++ b/arch/ia64/kernel/vmlinux.lds.S | |||
@@ -243,16 +243,9 @@ SECTIONS | |||
243 | { *(.data.cacheline_aligned) } | 243 | { *(.data.cacheline_aligned) } |
244 | 244 | ||
245 | /* Per-cpu data: */ | 245 | /* Per-cpu data: */ |
246 | percpu : { } :percpu | ||
247 | . = ALIGN(PERCPU_PAGE_SIZE); | 246 | . = ALIGN(PERCPU_PAGE_SIZE); |
248 | __phys_per_cpu_start = .; | 247 | PERCPU_VADDR(PERCPU_ADDR, :percpu) |
249 | .data.percpu PERCPU_ADDR : AT(__phys_per_cpu_start - LOAD_OFFSET) | 248 | __phys_per_cpu_start = __per_cpu_load; |
250 | { | ||
251 | __per_cpu_start = .; | ||
252 | *(.data.percpu) | ||
253 | *(.data.percpu.shared_aligned) | ||
254 | __per_cpu_end = .; | ||
255 | } | ||
256 | . = __phys_per_cpu_start + PERCPU_PAGE_SIZE; /* ensure percpu data fits | 249 | . = __phys_per_cpu_start + PERCPU_PAGE_SIZE; /* ensure percpu data fits |
257 | * into percpu page size | 250 | * into percpu page size |
258 | */ | 251 | */ |
diff --git a/arch/ia64/kvm/Kconfig b/arch/ia64/kvm/Kconfig index f833a0b4188d..0a2d6b86075a 100644 --- a/arch/ia64/kvm/Kconfig +++ b/arch/ia64/kvm/Kconfig | |||
@@ -4,6 +4,10 @@ | |||
4 | config HAVE_KVM | 4 | config HAVE_KVM |
5 | bool | 5 | bool |
6 | 6 | ||
7 | config HAVE_KVM_IRQCHIP | ||
8 | bool | ||
9 | default y | ||
10 | |||
7 | menuconfig VIRTUALIZATION | 11 | menuconfig VIRTUALIZATION |
8 | bool "Virtualization" | 12 | bool "Virtualization" |
9 | depends on HAVE_KVM || IA64 | 13 | depends on HAVE_KVM || IA64 |
diff --git a/arch/ia64/kvm/irq.h b/arch/ia64/kvm/irq.h index c6786e8b1bf4..c0785a728271 100644 --- a/arch/ia64/kvm/irq.h +++ b/arch/ia64/kvm/irq.h | |||
@@ -23,6 +23,8 @@ | |||
23 | #ifndef __IRQ_H | 23 | #ifndef __IRQ_H |
24 | #define __IRQ_H | 24 | #define __IRQ_H |
25 | 25 | ||
26 | #include "lapic.h" | ||
27 | |||
26 | static inline int irqchip_in_kernel(struct kvm *kvm) | 28 | static inline int irqchip_in_kernel(struct kvm *kvm) |
27 | { | 29 | { |
28 | return 1; | 30 | return 1; |
diff --git a/arch/ia64/kvm/kvm-ia64.c b/arch/ia64/kvm/kvm-ia64.c index 0344c6664485..28af6a731bb8 100644 --- a/arch/ia64/kvm/kvm-ia64.c +++ b/arch/ia64/kvm/kvm-ia64.c | |||
@@ -182,7 +182,7 @@ int kvm_dev_ioctl_check_extension(long ext) | |||
182 | switch (ext) { | 182 | switch (ext) { |
183 | case KVM_CAP_IRQCHIP: | 183 | case KVM_CAP_IRQCHIP: |
184 | case KVM_CAP_MP_STATE: | 184 | case KVM_CAP_MP_STATE: |
185 | 185 | case KVM_CAP_IRQ_INJECT_STATUS: | |
186 | r = 1; | 186 | r = 1; |
187 | break; | 187 | break; |
188 | case KVM_CAP_COALESCED_MMIO: | 188 | case KVM_CAP_COALESCED_MMIO: |
@@ -314,7 +314,7 @@ static struct kvm_vcpu *lid_to_vcpu(struct kvm *kvm, unsigned long id, | |||
314 | union ia64_lid lid; | 314 | union ia64_lid lid; |
315 | int i; | 315 | int i; |
316 | 316 | ||
317 | for (i = 0; i < KVM_MAX_VCPUS; i++) { | 317 | for (i = 0; i < kvm->arch.online_vcpus; i++) { |
318 | if (kvm->vcpus[i]) { | 318 | if (kvm->vcpus[i]) { |
319 | lid.val = VCPU_LID(kvm->vcpus[i]); | 319 | lid.val = VCPU_LID(kvm->vcpus[i]); |
320 | if (lid.id == id && lid.eid == eid) | 320 | if (lid.id == id && lid.eid == eid) |
@@ -388,7 +388,7 @@ static int handle_global_purge(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) | |||
388 | 388 | ||
389 | call_data.ptc_g_data = p->u.ptc_g_data; | 389 | call_data.ptc_g_data = p->u.ptc_g_data; |
390 | 390 | ||
391 | for (i = 0; i < KVM_MAX_VCPUS; i++) { | 391 | for (i = 0; i < kvm->arch.online_vcpus; i++) { |
392 | if (!kvm->vcpus[i] || kvm->vcpus[i]->arch.mp_state == | 392 | if (!kvm->vcpus[i] || kvm->vcpus[i]->arch.mp_state == |
393 | KVM_MP_STATE_UNINITIALIZED || | 393 | KVM_MP_STATE_UNINITIALIZED || |
394 | vcpu == kvm->vcpus[i]) | 394 | vcpu == kvm->vcpus[i]) |
@@ -788,6 +788,8 @@ struct kvm *kvm_arch_create_vm(void) | |||
788 | return ERR_PTR(-ENOMEM); | 788 | return ERR_PTR(-ENOMEM); |
789 | kvm_init_vm(kvm); | 789 | kvm_init_vm(kvm); |
790 | 790 | ||
791 | kvm->arch.online_vcpus = 0; | ||
792 | |||
791 | return kvm; | 793 | return kvm; |
792 | 794 | ||
793 | } | 795 | } |
@@ -919,7 +921,13 @@ long kvm_arch_vm_ioctl(struct file *filp, | |||
919 | r = kvm_ioapic_init(kvm); | 921 | r = kvm_ioapic_init(kvm); |
920 | if (r) | 922 | if (r) |
921 | goto out; | 923 | goto out; |
924 | r = kvm_setup_default_irq_routing(kvm); | ||
925 | if (r) { | ||
926 | kfree(kvm->arch.vioapic); | ||
927 | goto out; | ||
928 | } | ||
922 | break; | 929 | break; |
930 | case KVM_IRQ_LINE_STATUS: | ||
923 | case KVM_IRQ_LINE: { | 931 | case KVM_IRQ_LINE: { |
924 | struct kvm_irq_level irq_event; | 932 | struct kvm_irq_level irq_event; |
925 | 933 | ||
@@ -927,10 +935,17 @@ long kvm_arch_vm_ioctl(struct file *filp, | |||
927 | if (copy_from_user(&irq_event, argp, sizeof irq_event)) | 935 | if (copy_from_user(&irq_event, argp, sizeof irq_event)) |
928 | goto out; | 936 | goto out; |
929 | if (irqchip_in_kernel(kvm)) { | 937 | if (irqchip_in_kernel(kvm)) { |
938 | __s32 status; | ||
930 | mutex_lock(&kvm->lock); | 939 | mutex_lock(&kvm->lock); |
931 | kvm_set_irq(kvm, KVM_USERSPACE_IRQ_SOURCE_ID, | 940 | status = kvm_set_irq(kvm, KVM_USERSPACE_IRQ_SOURCE_ID, |
932 | irq_event.irq, irq_event.level); | 941 | irq_event.irq, irq_event.level); |
933 | mutex_unlock(&kvm->lock); | 942 | mutex_unlock(&kvm->lock); |
943 | if (ioctl == KVM_IRQ_LINE_STATUS) { | ||
944 | irq_event.status = status; | ||
945 | if (copy_to_user(argp, &irq_event, | ||
946 | sizeof irq_event)) | ||
947 | goto out; | ||
948 | } | ||
934 | r = 0; | 949 | r = 0; |
935 | } | 950 | } |
936 | break; | 951 | break; |
@@ -1149,7 +1164,7 @@ int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu) | |||
1149 | 1164 | ||
1150 | /*Initialize itc offset for vcpus*/ | 1165 | /*Initialize itc offset for vcpus*/ |
1151 | itc_offset = 0UL - ia64_getreg(_IA64_REG_AR_ITC); | 1166 | itc_offset = 0UL - ia64_getreg(_IA64_REG_AR_ITC); |
1152 | for (i = 0; i < KVM_MAX_VCPUS; i++) { | 1167 | for (i = 0; i < kvm->arch.online_vcpus; i++) { |
1153 | v = (struct kvm_vcpu *)((char *)vcpu + | 1168 | v = (struct kvm_vcpu *)((char *)vcpu + |
1154 | sizeof(struct kvm_vcpu_data) * i); | 1169 | sizeof(struct kvm_vcpu_data) * i); |
1155 | v->arch.itc_offset = itc_offset; | 1170 | v->arch.itc_offset = itc_offset; |
@@ -1283,6 +1298,8 @@ struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, | |||
1283 | goto fail; | 1298 | goto fail; |
1284 | } | 1299 | } |
1285 | 1300 | ||
1301 | kvm->arch.online_vcpus++; | ||
1302 | |||
1286 | return vcpu; | 1303 | return vcpu; |
1287 | fail: | 1304 | fail: |
1288 | return ERR_PTR(r); | 1305 | return ERR_PTR(r); |
@@ -1303,8 +1320,8 @@ int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu) | |||
1303 | return -EINVAL; | 1320 | return -EINVAL; |
1304 | } | 1321 | } |
1305 | 1322 | ||
1306 | int kvm_arch_vcpu_ioctl_debug_guest(struct kvm_vcpu *vcpu, | 1323 | int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu, |
1307 | struct kvm_debug_guest *dbg) | 1324 | struct kvm_guest_debug *dbg) |
1308 | { | 1325 | { |
1309 | return -EINVAL; | 1326 | return -EINVAL; |
1310 | } | 1327 | } |
@@ -1421,6 +1438,23 @@ int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs) | |||
1421 | return 0; | 1438 | return 0; |
1422 | } | 1439 | } |
1423 | 1440 | ||
1441 | int kvm_arch_vcpu_ioctl_get_stack(struct kvm_vcpu *vcpu, | ||
1442 | struct kvm_ia64_vcpu_stack *stack) | ||
1443 | { | ||
1444 | memcpy(stack, vcpu, sizeof(struct kvm_ia64_vcpu_stack)); | ||
1445 | return 0; | ||
1446 | } | ||
1447 | |||
1448 | int kvm_arch_vcpu_ioctl_set_stack(struct kvm_vcpu *vcpu, | ||
1449 | struct kvm_ia64_vcpu_stack *stack) | ||
1450 | { | ||
1451 | memcpy(vcpu + 1, &stack->stack[0] + sizeof(struct kvm_vcpu), | ||
1452 | sizeof(struct kvm_ia64_vcpu_stack) - sizeof(struct kvm_vcpu)); | ||
1453 | |||
1454 | vcpu->arch.exit_data = ((struct kvm_vcpu *)stack)->arch.exit_data; | ||
1455 | return 0; | ||
1456 | } | ||
1457 | |||
1424 | void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu) | 1458 | void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu) |
1425 | { | 1459 | { |
1426 | 1460 | ||
@@ -1430,9 +1464,78 @@ void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu) | |||
1430 | 1464 | ||
1431 | 1465 | ||
1432 | long kvm_arch_vcpu_ioctl(struct file *filp, | 1466 | long kvm_arch_vcpu_ioctl(struct file *filp, |
1433 | unsigned int ioctl, unsigned long arg) | 1467 | unsigned int ioctl, unsigned long arg) |
1434 | { | 1468 | { |
1435 | return -EINVAL; | 1469 | struct kvm_vcpu *vcpu = filp->private_data; |
1470 | void __user *argp = (void __user *)arg; | ||
1471 | struct kvm_ia64_vcpu_stack *stack = NULL; | ||
1472 | long r; | ||
1473 | |||
1474 | switch (ioctl) { | ||
1475 | case KVM_IA64_VCPU_GET_STACK: { | ||
1476 | struct kvm_ia64_vcpu_stack __user *user_stack; | ||
1477 | void __user *first_p = argp; | ||
1478 | |||
1479 | r = -EFAULT; | ||
1480 | if (copy_from_user(&user_stack, first_p, sizeof(void *))) | ||
1481 | goto out; | ||
1482 | |||
1483 | if (!access_ok(VERIFY_WRITE, user_stack, | ||
1484 | sizeof(struct kvm_ia64_vcpu_stack))) { | ||
1485 | printk(KERN_INFO "KVM_IA64_VCPU_GET_STACK: " | ||
1486 | "Illegal user destination address for stack\n"); | ||
1487 | goto out; | ||
1488 | } | ||
1489 | stack = kzalloc(sizeof(struct kvm_ia64_vcpu_stack), GFP_KERNEL); | ||
1490 | if (!stack) { | ||
1491 | r = -ENOMEM; | ||
1492 | goto out; | ||
1493 | } | ||
1494 | |||
1495 | r = kvm_arch_vcpu_ioctl_get_stack(vcpu, stack); | ||
1496 | if (r) | ||
1497 | goto out; | ||
1498 | |||
1499 | if (copy_to_user(user_stack, stack, | ||
1500 | sizeof(struct kvm_ia64_vcpu_stack))) | ||
1501 | goto out; | ||
1502 | |||
1503 | break; | ||
1504 | } | ||
1505 | case KVM_IA64_VCPU_SET_STACK: { | ||
1506 | struct kvm_ia64_vcpu_stack __user *user_stack; | ||
1507 | void __user *first_p = argp; | ||
1508 | |||
1509 | r = -EFAULT; | ||
1510 | if (copy_from_user(&user_stack, first_p, sizeof(void *))) | ||
1511 | goto out; | ||
1512 | |||
1513 | if (!access_ok(VERIFY_READ, user_stack, | ||
1514 | sizeof(struct kvm_ia64_vcpu_stack))) { | ||
1515 | printk(KERN_INFO "KVM_IA64_VCPU_SET_STACK: " | ||
1516 | "Illegal user address for stack\n"); | ||
1517 | goto out; | ||
1518 | } | ||
1519 | stack = kmalloc(sizeof(struct kvm_ia64_vcpu_stack), GFP_KERNEL); | ||
1520 | if (!stack) { | ||
1521 | r = -ENOMEM; | ||
1522 | goto out; | ||
1523 | } | ||
1524 | if (copy_from_user(stack, user_stack, | ||
1525 | sizeof(struct kvm_ia64_vcpu_stack))) | ||
1526 | goto out; | ||
1527 | |||
1528 | r = kvm_arch_vcpu_ioctl_set_stack(vcpu, stack); | ||
1529 | break; | ||
1530 | } | ||
1531 | |||
1532 | default: | ||
1533 | r = -EINVAL; | ||
1534 | } | ||
1535 | |||
1536 | out: | ||
1537 | kfree(stack); | ||
1538 | return r; | ||
1436 | } | 1539 | } |
1437 | 1540 | ||
1438 | int kvm_arch_set_memory_region(struct kvm *kvm, | 1541 | int kvm_arch_set_memory_region(struct kvm *kvm, |
@@ -1472,7 +1575,7 @@ void kvm_arch_flush_shadow(struct kvm *kvm) | |||
1472 | } | 1575 | } |
1473 | 1576 | ||
1474 | long kvm_arch_dev_ioctl(struct file *filp, | 1577 | long kvm_arch_dev_ioctl(struct file *filp, |
1475 | unsigned int ioctl, unsigned long arg) | 1578 | unsigned int ioctl, unsigned long arg) |
1476 | { | 1579 | { |
1477 | return -EINVAL; | 1580 | return -EINVAL; |
1478 | } | 1581 | } |
@@ -1737,7 +1840,7 @@ struct kvm_vcpu *kvm_get_lowest_prio_vcpu(struct kvm *kvm, u8 vector, | |||
1737 | struct kvm_vcpu *lvcpu = kvm->vcpus[0]; | 1840 | struct kvm_vcpu *lvcpu = kvm->vcpus[0]; |
1738 | int i; | 1841 | int i; |
1739 | 1842 | ||
1740 | for (i = 1; i < KVM_MAX_VCPUS; i++) { | 1843 | for (i = 1; i < kvm->arch.online_vcpus; i++) { |
1741 | if (!kvm->vcpus[i]) | 1844 | if (!kvm->vcpus[i]) |
1742 | continue; | 1845 | continue; |
1743 | if (lvcpu->arch.xtp > kvm->vcpus[i]->arch.xtp) | 1846 | if (lvcpu->arch.xtp > kvm->vcpus[i]->arch.xtp) |
diff --git a/arch/ia64/kvm/kvm_fw.c b/arch/ia64/kvm/kvm_fw.c index cb7600bdff9d..a8ae52ed5635 100644 --- a/arch/ia64/kvm/kvm_fw.c +++ b/arch/ia64/kvm/kvm_fw.c | |||
@@ -227,6 +227,18 @@ static struct ia64_pal_retval pal_proc_get_features(struct kvm_vcpu *vcpu) | |||
227 | return result; | 227 | return result; |
228 | } | 228 | } |
229 | 229 | ||
230 | static struct ia64_pal_retval pal_register_info(struct kvm_vcpu *vcpu) | ||
231 | { | ||
232 | |||
233 | struct ia64_pal_retval result = {0, 0, 0, 0}; | ||
234 | long in0, in1, in2, in3; | ||
235 | |||
236 | kvm_get_pal_call_data(vcpu, &in0, &in1, &in2, &in3); | ||
237 | result.status = ia64_pal_register_info(in1, &result.v1, &result.v2); | ||
238 | |||
239 | return result; | ||
240 | } | ||
241 | |||
230 | static struct ia64_pal_retval pal_cache_info(struct kvm_vcpu *vcpu) | 242 | static struct ia64_pal_retval pal_cache_info(struct kvm_vcpu *vcpu) |
231 | { | 243 | { |
232 | 244 | ||
@@ -268,8 +280,12 @@ static struct ia64_pal_retval pal_vm_summary(struct kvm_vcpu *vcpu) | |||
268 | static struct ia64_pal_retval pal_vm_info(struct kvm_vcpu *vcpu) | 280 | static struct ia64_pal_retval pal_vm_info(struct kvm_vcpu *vcpu) |
269 | { | 281 | { |
270 | struct ia64_pal_retval result; | 282 | struct ia64_pal_retval result; |
283 | unsigned long in0, in1, in2, in3; | ||
271 | 284 | ||
272 | INIT_PAL_STATUS_UNIMPLEMENTED(result); | 285 | kvm_get_pal_call_data(vcpu, &in0, &in1, &in2, &in3); |
286 | |||
287 | result.status = ia64_pal_vm_info(in1, in2, | ||
288 | (pal_tc_info_u_t *)&result.v1, &result.v2); | ||
273 | 289 | ||
274 | return result; | 290 | return result; |
275 | } | 291 | } |
@@ -292,6 +308,108 @@ static void prepare_for_halt(struct kvm_vcpu *vcpu) | |||
292 | vcpu->arch.timer_fired = 0; | 308 | vcpu->arch.timer_fired = 0; |
293 | } | 309 | } |
294 | 310 | ||
311 | static struct ia64_pal_retval pal_perf_mon_info(struct kvm_vcpu *vcpu) | ||
312 | { | ||
313 | long status; | ||
314 | unsigned long in0, in1, in2, in3, r9; | ||
315 | unsigned long pm_buffer[16]; | ||
316 | |||
317 | kvm_get_pal_call_data(vcpu, &in0, &in1, &in2, &in3); | ||
318 | status = ia64_pal_perf_mon_info(pm_buffer, | ||
319 | (pal_perf_mon_info_u_t *) &r9); | ||
320 | if (status != 0) { | ||
321 | printk(KERN_DEBUG"PAL_PERF_MON_INFO fails ret=%ld\n", status); | ||
322 | } else { | ||
323 | if (in1) | ||
324 | memcpy((void *)in1, pm_buffer, sizeof(pm_buffer)); | ||
325 | else { | ||
326 | status = PAL_STATUS_EINVAL; | ||
327 | printk(KERN_WARNING"Invalid parameters " | ||
328 | "for PAL call:0x%lx!\n", in0); | ||
329 | } | ||
330 | } | ||
331 | return (struct ia64_pal_retval){status, r9, 0, 0}; | ||
332 | } | ||
333 | |||
334 | static struct ia64_pal_retval pal_halt_info(struct kvm_vcpu *vcpu) | ||
335 | { | ||
336 | unsigned long in0, in1, in2, in3; | ||
337 | long status; | ||
338 | unsigned long res = 1000UL | (1000UL << 16) | (10UL << 32) | ||
339 | | (1UL << 61) | (1UL << 60); | ||
340 | |||
341 | kvm_get_pal_call_data(vcpu, &in0, &in1, &in2, &in3); | ||
342 | if (in1) { | ||
343 | memcpy((void *)in1, &res, sizeof(res)); | ||
344 | status = 0; | ||
345 | } else{ | ||
346 | status = PAL_STATUS_EINVAL; | ||
347 | printk(KERN_WARNING"Invalid parameters " | ||
348 | "for PAL call:0x%lx!\n", in0); | ||
349 | } | ||
350 | |||
351 | return (struct ia64_pal_retval){status, 0, 0, 0}; | ||
352 | } | ||
353 | |||
354 | static struct ia64_pal_retval pal_mem_attrib(struct kvm_vcpu *vcpu) | ||
355 | { | ||
356 | unsigned long r9; | ||
357 | long status; | ||
358 | |||
359 | status = ia64_pal_mem_attrib(&r9); | ||
360 | |||
361 | return (struct ia64_pal_retval){status, r9, 0, 0}; | ||
362 | } | ||
363 | |||
364 | static void remote_pal_prefetch_visibility(void *v) | ||
365 | { | ||
366 | s64 trans_type = (s64)v; | ||
367 | ia64_pal_prefetch_visibility(trans_type); | ||
368 | } | ||
369 | |||
370 | static struct ia64_pal_retval pal_prefetch_visibility(struct kvm_vcpu *vcpu) | ||
371 | { | ||
372 | struct ia64_pal_retval result = {0, 0, 0, 0}; | ||
373 | unsigned long in0, in1, in2, in3; | ||
374 | kvm_get_pal_call_data(vcpu, &in0, &in1, &in2, &in3); | ||
375 | result.status = ia64_pal_prefetch_visibility(in1); | ||
376 | if (result.status == 0) { | ||
377 | /* Must be performed on all remote processors | ||
378 | in the coherence domain. */ | ||
379 | smp_call_function(remote_pal_prefetch_visibility, | ||
380 | (void *)in1, 1); | ||
381 | /* Unnecessary on remote processor for other vcpus!*/ | ||
382 | result.status = 1; | ||
383 | } | ||
384 | return result; | ||
385 | } | ||
386 | |||
387 | static void remote_pal_mc_drain(void *v) | ||
388 | { | ||
389 | ia64_pal_mc_drain(); | ||
390 | } | ||
391 | |||
392 | static struct ia64_pal_retval pal_get_brand_info(struct kvm_vcpu *vcpu) | ||
393 | { | ||
394 | struct ia64_pal_retval result = {0, 0, 0, 0}; | ||
395 | unsigned long in0, in1, in2, in3; | ||
396 | |||
397 | kvm_get_pal_call_data(vcpu, &in0, &in1, &in2, &in3); | ||
398 | |||
399 | if (in1 == 0 && in2) { | ||
400 | char brand_info[128]; | ||
401 | result.status = ia64_pal_get_brand_info(brand_info); | ||
402 | if (result.status == PAL_STATUS_SUCCESS) | ||
403 | memcpy((void *)in2, brand_info, 128); | ||
404 | } else { | ||
405 | result.status = PAL_STATUS_REQUIRES_MEMORY; | ||
406 | printk(KERN_WARNING"Invalid parameters for " | ||
407 | "PAL call:0x%lx!\n", in0); | ||
408 | } | ||
409 | |||
410 | return result; | ||
411 | } | ||
412 | |||
295 | int kvm_pal_emul(struct kvm_vcpu *vcpu, struct kvm_run *run) | 413 | int kvm_pal_emul(struct kvm_vcpu *vcpu, struct kvm_run *run) |
296 | { | 414 | { |
297 | 415 | ||
@@ -300,14 +418,22 @@ int kvm_pal_emul(struct kvm_vcpu *vcpu, struct kvm_run *run) | |||
300 | int ret = 1; | 418 | int ret = 1; |
301 | 419 | ||
302 | gr28 = kvm_get_pal_call_index(vcpu); | 420 | gr28 = kvm_get_pal_call_index(vcpu); |
303 | /*printk("pal_call index:%lx\n",gr28);*/ | ||
304 | switch (gr28) { | 421 | switch (gr28) { |
305 | case PAL_CACHE_FLUSH: | 422 | case PAL_CACHE_FLUSH: |
306 | result = pal_cache_flush(vcpu); | 423 | result = pal_cache_flush(vcpu); |
307 | break; | 424 | break; |
425 | case PAL_MEM_ATTRIB: | ||
426 | result = pal_mem_attrib(vcpu); | ||
427 | break; | ||
308 | case PAL_CACHE_SUMMARY: | 428 | case PAL_CACHE_SUMMARY: |
309 | result = pal_cache_summary(vcpu); | 429 | result = pal_cache_summary(vcpu); |
310 | break; | 430 | break; |
431 | case PAL_PERF_MON_INFO: | ||
432 | result = pal_perf_mon_info(vcpu); | ||
433 | break; | ||
434 | case PAL_HALT_INFO: | ||
435 | result = pal_halt_info(vcpu); | ||
436 | break; | ||
311 | case PAL_HALT_LIGHT: | 437 | case PAL_HALT_LIGHT: |
312 | { | 438 | { |
313 | INIT_PAL_STATUS_SUCCESS(result); | 439 | INIT_PAL_STATUS_SUCCESS(result); |
@@ -317,6 +443,16 @@ int kvm_pal_emul(struct kvm_vcpu *vcpu, struct kvm_run *run) | |||
317 | } | 443 | } |
318 | break; | 444 | break; |
319 | 445 | ||
446 | case PAL_PREFETCH_VISIBILITY: | ||
447 | result = pal_prefetch_visibility(vcpu); | ||
448 | break; | ||
449 | case PAL_MC_DRAIN: | ||
450 | result.status = ia64_pal_mc_drain(); | ||
451 | /* FIXME: All vcpus likely call PAL_MC_DRAIN. | ||
452 | That causes the congestion. */ | ||
453 | smp_call_function(remote_pal_mc_drain, NULL, 1); | ||
454 | break; | ||
455 | |||
320 | case PAL_FREQ_RATIOS: | 456 | case PAL_FREQ_RATIOS: |
321 | result = pal_freq_ratios(vcpu); | 457 | result = pal_freq_ratios(vcpu); |
322 | break; | 458 | break; |
@@ -346,6 +482,9 @@ int kvm_pal_emul(struct kvm_vcpu *vcpu, struct kvm_run *run) | |||
346 | INIT_PAL_STATUS_SUCCESS(result); | 482 | INIT_PAL_STATUS_SUCCESS(result); |
347 | result.v1 = (1L << 32) | 1L; | 483 | result.v1 = (1L << 32) | 1L; |
348 | break; | 484 | break; |
485 | case PAL_REGISTER_INFO: | ||
486 | result = pal_register_info(vcpu); | ||
487 | break; | ||
349 | case PAL_VM_PAGE_SIZE: | 488 | case PAL_VM_PAGE_SIZE: |
350 | result.status = ia64_pal_vm_page_size(&result.v0, | 489 | result.status = ia64_pal_vm_page_size(&result.v0, |
351 | &result.v1); | 490 | &result.v1); |
@@ -365,12 +504,18 @@ int kvm_pal_emul(struct kvm_vcpu *vcpu, struct kvm_run *run) | |||
365 | result.status = ia64_pal_version( | 504 | result.status = ia64_pal_version( |
366 | (pal_version_u_t *)&result.v0, | 505 | (pal_version_u_t *)&result.v0, |
367 | (pal_version_u_t *)&result.v1); | 506 | (pal_version_u_t *)&result.v1); |
368 | |||
369 | break; | 507 | break; |
370 | case PAL_FIXED_ADDR: | 508 | case PAL_FIXED_ADDR: |
371 | result.status = PAL_STATUS_SUCCESS; | 509 | result.status = PAL_STATUS_SUCCESS; |
372 | result.v0 = vcpu->vcpu_id; | 510 | result.v0 = vcpu->vcpu_id; |
373 | break; | 511 | break; |
512 | case PAL_BRAND_INFO: | ||
513 | result = pal_get_brand_info(vcpu); | ||
514 | break; | ||
515 | case PAL_GET_PSTATE: | ||
516 | case PAL_CACHE_SHARED_INFO: | ||
517 | INIT_PAL_STATUS_UNIMPLEMENTED(result); | ||
518 | break; | ||
374 | default: | 519 | default: |
375 | INIT_PAL_STATUS_UNIMPLEMENTED(result); | 520 | INIT_PAL_STATUS_UNIMPLEMENTED(result); |
376 | printk(KERN_WARNING"kvm: Unsupported pal call," | 521 | printk(KERN_WARNING"kvm: Unsupported pal call," |
diff --git a/arch/ia64/kvm/process.c b/arch/ia64/kvm/process.c index 230eae482f32..b1dc80952d91 100644 --- a/arch/ia64/kvm/process.c +++ b/arch/ia64/kvm/process.c | |||
@@ -167,7 +167,6 @@ static u64 vcpu_get_itir_on_fault(struct kvm_vcpu *vcpu, u64 ifa) | |||
167 | return (rr1.val); | 167 | return (rr1.val); |
168 | } | 168 | } |
169 | 169 | ||
170 | |||
171 | /* | 170 | /* |
172 | * Set vIFA & vITIR & vIHA, when vPSR.ic =1 | 171 | * Set vIFA & vITIR & vIHA, when vPSR.ic =1 |
173 | * Parameter: | 172 | * Parameter: |
@@ -222,8 +221,6 @@ void itlb_fault(struct kvm_vcpu *vcpu, u64 vadr) | |||
222 | inject_guest_interruption(vcpu, IA64_INST_TLB_VECTOR); | 221 | inject_guest_interruption(vcpu, IA64_INST_TLB_VECTOR); |
223 | } | 222 | } |
224 | 223 | ||
225 | |||
226 | |||
227 | /* | 224 | /* |
228 | * Data Nested TLB Fault | 225 | * Data Nested TLB Fault |
229 | * @ Data Nested TLB Vector | 226 | * @ Data Nested TLB Vector |
@@ -245,7 +242,6 @@ void alt_dtlb(struct kvm_vcpu *vcpu, u64 vadr) | |||
245 | inject_guest_interruption(vcpu, IA64_ALT_DATA_TLB_VECTOR); | 242 | inject_guest_interruption(vcpu, IA64_ALT_DATA_TLB_VECTOR); |
246 | } | 243 | } |
247 | 244 | ||
248 | |||
249 | /* | 245 | /* |
250 | * Data TLB Fault | 246 | * Data TLB Fault |
251 | * @ Data TLB vector | 247 | * @ Data TLB vector |
@@ -265,8 +261,6 @@ static void _vhpt_fault(struct kvm_vcpu *vcpu, u64 vadr) | |||
265 | /* If vPSR.ic, IFA, ITIR, IHA*/ | 261 | /* If vPSR.ic, IFA, ITIR, IHA*/ |
266 | set_ifa_itir_iha(vcpu, vadr, 1, 1, 1); | 262 | set_ifa_itir_iha(vcpu, vadr, 1, 1, 1); |
267 | inject_guest_interruption(vcpu, IA64_VHPT_TRANS_VECTOR); | 263 | inject_guest_interruption(vcpu, IA64_VHPT_TRANS_VECTOR); |
268 | |||
269 | |||
270 | } | 264 | } |
271 | 265 | ||
272 | /* | 266 | /* |
@@ -279,7 +273,6 @@ void ivhpt_fault(struct kvm_vcpu *vcpu, u64 vadr) | |||
279 | _vhpt_fault(vcpu, vadr); | 273 | _vhpt_fault(vcpu, vadr); |
280 | } | 274 | } |
281 | 275 | ||
282 | |||
283 | /* | 276 | /* |
284 | * VHPT Data Fault | 277 | * VHPT Data Fault |
285 | * @ VHPT Translation vector | 278 | * @ VHPT Translation vector |
@@ -290,8 +283,6 @@ void dvhpt_fault(struct kvm_vcpu *vcpu, u64 vadr) | |||
290 | _vhpt_fault(vcpu, vadr); | 283 | _vhpt_fault(vcpu, vadr); |
291 | } | 284 | } |
292 | 285 | ||
293 | |||
294 | |||
295 | /* | 286 | /* |
296 | * Deal with: | 287 | * Deal with: |
297 | * General Exception vector | 288 | * General Exception vector |
@@ -301,7 +292,6 @@ void _general_exception(struct kvm_vcpu *vcpu) | |||
301 | inject_guest_interruption(vcpu, IA64_GENEX_VECTOR); | 292 | inject_guest_interruption(vcpu, IA64_GENEX_VECTOR); |
302 | } | 293 | } |
303 | 294 | ||
304 | |||
305 | /* | 295 | /* |
306 | * Illegal Operation Fault | 296 | * Illegal Operation Fault |
307 | * @ General Exception Vector | 297 | * @ General Exception Vector |
@@ -419,19 +409,16 @@ static void __page_not_present(struct kvm_vcpu *vcpu, u64 vadr) | |||
419 | inject_guest_interruption(vcpu, IA64_PAGE_NOT_PRESENT_VECTOR); | 409 | inject_guest_interruption(vcpu, IA64_PAGE_NOT_PRESENT_VECTOR); |
420 | } | 410 | } |
421 | 411 | ||
422 | |||
423 | void data_page_not_present(struct kvm_vcpu *vcpu, u64 vadr) | 412 | void data_page_not_present(struct kvm_vcpu *vcpu, u64 vadr) |
424 | { | 413 | { |
425 | __page_not_present(vcpu, vadr); | 414 | __page_not_present(vcpu, vadr); |
426 | } | 415 | } |
427 | 416 | ||
428 | |||
429 | void inst_page_not_present(struct kvm_vcpu *vcpu, u64 vadr) | 417 | void inst_page_not_present(struct kvm_vcpu *vcpu, u64 vadr) |
430 | { | 418 | { |
431 | __page_not_present(vcpu, vadr); | 419 | __page_not_present(vcpu, vadr); |
432 | } | 420 | } |
433 | 421 | ||
434 | |||
435 | /* Deal with | 422 | /* Deal with |
436 | * Data access rights vector | 423 | * Data access rights vector |
437 | */ | 424 | */ |
@@ -563,22 +550,64 @@ void reflect_interruption(u64 ifa, u64 isr, u64 iim, | |||
563 | inject_guest_interruption(vcpu, vector); | 550 | inject_guest_interruption(vcpu, vector); |
564 | } | 551 | } |
565 | 552 | ||
553 | static unsigned long kvm_trans_pal_call_args(struct kvm_vcpu *vcpu, | ||
554 | unsigned long arg) | ||
555 | { | ||
556 | struct thash_data *data; | ||
557 | unsigned long gpa, poff; | ||
558 | |||
559 | if (!is_physical_mode(vcpu)) { | ||
560 | /* Depends on caller to provide the DTR or DTC mapping.*/ | ||
561 | data = vtlb_lookup(vcpu, arg, D_TLB); | ||
562 | if (data) | ||
563 | gpa = data->page_flags & _PAGE_PPN_MASK; | ||
564 | else { | ||
565 | data = vhpt_lookup(arg); | ||
566 | if (!data) | ||
567 | return 0; | ||
568 | gpa = data->gpaddr & _PAGE_PPN_MASK; | ||
569 | } | ||
570 | |||
571 | poff = arg & (PSIZE(data->ps) - 1); | ||
572 | arg = PAGEALIGN(gpa, data->ps) | poff; | ||
573 | } | ||
574 | arg = kvm_gpa_to_mpa(arg << 1 >> 1); | ||
575 | |||
576 | return (unsigned long)__va(arg); | ||
577 | } | ||
578 | |||
566 | static void set_pal_call_data(struct kvm_vcpu *vcpu) | 579 | static void set_pal_call_data(struct kvm_vcpu *vcpu) |
567 | { | 580 | { |
568 | struct exit_ctl_data *p = &vcpu->arch.exit_data; | 581 | struct exit_ctl_data *p = &vcpu->arch.exit_data; |
582 | unsigned long gr28 = vcpu_get_gr(vcpu, 28); | ||
583 | unsigned long gr29 = vcpu_get_gr(vcpu, 29); | ||
584 | unsigned long gr30 = vcpu_get_gr(vcpu, 30); | ||
569 | 585 | ||
570 | /*FIXME:For static and stacked convention, firmware | 586 | /*FIXME:For static and stacked convention, firmware |
571 | * has put the parameters in gr28-gr31 before | 587 | * has put the parameters in gr28-gr31 before |
572 | * break to vmm !!*/ | 588 | * break to vmm !!*/ |
573 | 589 | ||
574 | p->u.pal_data.gr28 = vcpu_get_gr(vcpu, 28); | 590 | switch (gr28) { |
575 | p->u.pal_data.gr29 = vcpu_get_gr(vcpu, 29); | 591 | case PAL_PERF_MON_INFO: |
576 | p->u.pal_data.gr30 = vcpu_get_gr(vcpu, 30); | 592 | case PAL_HALT_INFO: |
593 | p->u.pal_data.gr29 = kvm_trans_pal_call_args(vcpu, gr29); | ||
594 | p->u.pal_data.gr30 = vcpu_get_gr(vcpu, 30); | ||
595 | break; | ||
596 | case PAL_BRAND_INFO: | ||
597 | p->u.pal_data.gr29 = gr29;; | ||
598 | p->u.pal_data.gr30 = kvm_trans_pal_call_args(vcpu, gr30); | ||
599 | break; | ||
600 | default: | ||
601 | p->u.pal_data.gr29 = gr29;; | ||
602 | p->u.pal_data.gr30 = vcpu_get_gr(vcpu, 30); | ||
603 | } | ||
604 | p->u.pal_data.gr28 = gr28; | ||
577 | p->u.pal_data.gr31 = vcpu_get_gr(vcpu, 31); | 605 | p->u.pal_data.gr31 = vcpu_get_gr(vcpu, 31); |
606 | |||
578 | p->exit_reason = EXIT_REASON_PAL_CALL; | 607 | p->exit_reason = EXIT_REASON_PAL_CALL; |
579 | } | 608 | } |
580 | 609 | ||
581 | static void set_pal_call_result(struct kvm_vcpu *vcpu) | 610 | static void get_pal_call_result(struct kvm_vcpu *vcpu) |
582 | { | 611 | { |
583 | struct exit_ctl_data *p = &vcpu->arch.exit_data; | 612 | struct exit_ctl_data *p = &vcpu->arch.exit_data; |
584 | 613 | ||
@@ -606,7 +635,7 @@ static void set_sal_call_data(struct kvm_vcpu *vcpu) | |||
606 | p->exit_reason = EXIT_REASON_SAL_CALL; | 635 | p->exit_reason = EXIT_REASON_SAL_CALL; |
607 | } | 636 | } |
608 | 637 | ||
609 | static void set_sal_call_result(struct kvm_vcpu *vcpu) | 638 | static void get_sal_call_result(struct kvm_vcpu *vcpu) |
610 | { | 639 | { |
611 | struct exit_ctl_data *p = &vcpu->arch.exit_data; | 640 | struct exit_ctl_data *p = &vcpu->arch.exit_data; |
612 | 641 | ||
@@ -629,13 +658,13 @@ void kvm_ia64_handle_break(unsigned long ifa, struct kvm_pt_regs *regs, | |||
629 | if (iim == DOMN_PAL_REQUEST) { | 658 | if (iim == DOMN_PAL_REQUEST) { |
630 | set_pal_call_data(v); | 659 | set_pal_call_data(v); |
631 | vmm_transition(v); | 660 | vmm_transition(v); |
632 | set_pal_call_result(v); | 661 | get_pal_call_result(v); |
633 | vcpu_increment_iip(v); | 662 | vcpu_increment_iip(v); |
634 | return; | 663 | return; |
635 | } else if (iim == DOMN_SAL_REQUEST) { | 664 | } else if (iim == DOMN_SAL_REQUEST) { |
636 | set_sal_call_data(v); | 665 | set_sal_call_data(v); |
637 | vmm_transition(v); | 666 | vmm_transition(v); |
638 | set_sal_call_result(v); | 667 | get_sal_call_result(v); |
639 | vcpu_increment_iip(v); | 668 | vcpu_increment_iip(v); |
640 | return; | 669 | return; |
641 | } | 670 | } |
@@ -703,7 +732,6 @@ void vhpi_detection(struct kvm_vcpu *vcpu) | |||
703 | } | 732 | } |
704 | } | 733 | } |
705 | 734 | ||
706 | |||
707 | void leave_hypervisor_tail(void) | 735 | void leave_hypervisor_tail(void) |
708 | { | 736 | { |
709 | struct kvm_vcpu *v = current_vcpu; | 737 | struct kvm_vcpu *v = current_vcpu; |
@@ -737,7 +765,6 @@ void leave_hypervisor_tail(void) | |||
737 | } | 765 | } |
738 | } | 766 | } |
739 | 767 | ||
740 | |||
741 | static inline void handle_lds(struct kvm_pt_regs *regs) | 768 | static inline void handle_lds(struct kvm_pt_regs *regs) |
742 | { | 769 | { |
743 | regs->cr_ipsr |= IA64_PSR_ED; | 770 | regs->cr_ipsr |= IA64_PSR_ED; |
diff --git a/arch/ia64/kvm/vcpu.c b/arch/ia64/kvm/vcpu.c index 6839a52f7a41..a18ee17b9192 100644 --- a/arch/ia64/kvm/vcpu.c +++ b/arch/ia64/kvm/vcpu.c | |||
@@ -112,7 +112,6 @@ void switch_to_physical_rid(struct kvm_vcpu *vcpu) | |||
112 | return; | 112 | return; |
113 | } | 113 | } |
114 | 114 | ||
115 | |||
116 | void switch_to_virtual_rid(struct kvm_vcpu *vcpu) | 115 | void switch_to_virtual_rid(struct kvm_vcpu *vcpu) |
117 | { | 116 | { |
118 | unsigned long psr; | 117 | unsigned long psr; |
@@ -166,8 +165,6 @@ void switch_mm_mode(struct kvm_vcpu *vcpu, struct ia64_psr old_psr, | |||
166 | return; | 165 | return; |
167 | } | 166 | } |
168 | 167 | ||
169 | |||
170 | |||
171 | /* | 168 | /* |
172 | * In physical mode, insert tc/tr for region 0 and 4 uses | 169 | * In physical mode, insert tc/tr for region 0 and 4 uses |
173 | * RID[0] and RID[4] which is for physical mode emulation. | 170 | * RID[0] and RID[4] which is for physical mode emulation. |
@@ -269,7 +266,6 @@ static inline unsigned long fph_index(struct kvm_pt_regs *regs, | |||
269 | return rotate_reg(96, rrb_fr, (regnum - IA64_FIRST_ROTATING_FR)); | 266 | return rotate_reg(96, rrb_fr, (regnum - IA64_FIRST_ROTATING_FR)); |
270 | } | 267 | } |
271 | 268 | ||
272 | |||
273 | /* | 269 | /* |
274 | * The inverse of the above: given bspstore and the number of | 270 | * The inverse of the above: given bspstore and the number of |
275 | * registers, calculate ar.bsp. | 271 | * registers, calculate ar.bsp. |
@@ -811,12 +807,15 @@ static inline void vcpu_set_itm(struct kvm_vcpu *vcpu, u64 val); | |||
811 | static void vcpu_set_itc(struct kvm_vcpu *vcpu, u64 val) | 807 | static void vcpu_set_itc(struct kvm_vcpu *vcpu, u64 val) |
812 | { | 808 | { |
813 | struct kvm_vcpu *v; | 809 | struct kvm_vcpu *v; |
810 | struct kvm *kvm; | ||
814 | int i; | 811 | int i; |
815 | long itc_offset = val - ia64_getreg(_IA64_REG_AR_ITC); | 812 | long itc_offset = val - ia64_getreg(_IA64_REG_AR_ITC); |
816 | unsigned long vitv = VCPU(vcpu, itv); | 813 | unsigned long vitv = VCPU(vcpu, itv); |
817 | 814 | ||
815 | kvm = (struct kvm *)KVM_VM_BASE; | ||
816 | |||
818 | if (vcpu->vcpu_id == 0) { | 817 | if (vcpu->vcpu_id == 0) { |
819 | for (i = 0; i < KVM_MAX_VCPUS; i++) { | 818 | for (i = 0; i < kvm->arch.online_vcpus; i++) { |
820 | v = (struct kvm_vcpu *)((char *)vcpu + | 819 | v = (struct kvm_vcpu *)((char *)vcpu + |
821 | sizeof(struct kvm_vcpu_data) * i); | 820 | sizeof(struct kvm_vcpu_data) * i); |
822 | VMX(v, itc_offset) = itc_offset; | 821 | VMX(v, itc_offset) = itc_offset; |
@@ -1039,8 +1038,6 @@ u64 vcpu_tak(struct kvm_vcpu *vcpu, u64 vadr) | |||
1039 | return key; | 1038 | return key; |
1040 | } | 1039 | } |
1041 | 1040 | ||
1042 | |||
1043 | |||
1044 | void kvm_thash(struct kvm_vcpu *vcpu, INST64 inst) | 1041 | void kvm_thash(struct kvm_vcpu *vcpu, INST64 inst) |
1045 | { | 1042 | { |
1046 | unsigned long thash, vadr; | 1043 | unsigned long thash, vadr; |
@@ -1050,7 +1047,6 @@ void kvm_thash(struct kvm_vcpu *vcpu, INST64 inst) | |||
1050 | vcpu_set_gr(vcpu, inst.M46.r1, thash, 0); | 1047 | vcpu_set_gr(vcpu, inst.M46.r1, thash, 0); |
1051 | } | 1048 | } |
1052 | 1049 | ||
1053 | |||
1054 | void kvm_ttag(struct kvm_vcpu *vcpu, INST64 inst) | 1050 | void kvm_ttag(struct kvm_vcpu *vcpu, INST64 inst) |
1055 | { | 1051 | { |
1056 | unsigned long tag, vadr; | 1052 | unsigned long tag, vadr; |
@@ -1131,7 +1127,6 @@ int vcpu_tpa(struct kvm_vcpu *vcpu, u64 vadr, u64 *padr) | |||
1131 | return IA64_NO_FAULT; | 1127 | return IA64_NO_FAULT; |
1132 | } | 1128 | } |
1133 | 1129 | ||
1134 | |||
1135 | int kvm_tpa(struct kvm_vcpu *vcpu, INST64 inst) | 1130 | int kvm_tpa(struct kvm_vcpu *vcpu, INST64 inst) |
1136 | { | 1131 | { |
1137 | unsigned long r1, r3; | 1132 | unsigned long r1, r3; |
@@ -1154,7 +1149,6 @@ void kvm_tak(struct kvm_vcpu *vcpu, INST64 inst) | |||
1154 | vcpu_set_gr(vcpu, inst.M46.r1, r1, 0); | 1149 | vcpu_set_gr(vcpu, inst.M46.r1, r1, 0); |
1155 | } | 1150 | } |
1156 | 1151 | ||
1157 | |||
1158 | /************************************ | 1152 | /************************************ |
1159 | * Insert/Purge translation register/cache | 1153 | * Insert/Purge translation register/cache |
1160 | ************************************/ | 1154 | ************************************/ |
@@ -1385,7 +1379,6 @@ void kvm_mov_to_ar_reg(struct kvm_vcpu *vcpu, INST64 inst) | |||
1385 | vcpu_set_itc(vcpu, r2); | 1379 | vcpu_set_itc(vcpu, r2); |
1386 | } | 1380 | } |
1387 | 1381 | ||
1388 | |||
1389 | void kvm_mov_from_ar_reg(struct kvm_vcpu *vcpu, INST64 inst) | 1382 | void kvm_mov_from_ar_reg(struct kvm_vcpu *vcpu, INST64 inst) |
1390 | { | 1383 | { |
1391 | unsigned long r1; | 1384 | unsigned long r1; |
@@ -1393,8 +1386,9 @@ void kvm_mov_from_ar_reg(struct kvm_vcpu *vcpu, INST64 inst) | |||
1393 | r1 = vcpu_get_itc(vcpu); | 1386 | r1 = vcpu_get_itc(vcpu); |
1394 | vcpu_set_gr(vcpu, inst.M31.r1, r1, 0); | 1387 | vcpu_set_gr(vcpu, inst.M31.r1, r1, 0); |
1395 | } | 1388 | } |
1389 | |||
1396 | /************************************************************************** | 1390 | /************************************************************************** |
1397 | struct kvm_vcpu*protection key register access routines | 1391 | struct kvm_vcpu protection key register access routines |
1398 | **************************************************************************/ | 1392 | **************************************************************************/ |
1399 | 1393 | ||
1400 | unsigned long vcpu_get_pkr(struct kvm_vcpu *vcpu, unsigned long reg) | 1394 | unsigned long vcpu_get_pkr(struct kvm_vcpu *vcpu, unsigned long reg) |
@@ -1407,20 +1401,6 @@ void vcpu_set_pkr(struct kvm_vcpu *vcpu, unsigned long reg, unsigned long val) | |||
1407 | ia64_set_pkr(reg, val); | 1401 | ia64_set_pkr(reg, val); |
1408 | } | 1402 | } |
1409 | 1403 | ||
1410 | |||
1411 | unsigned long vcpu_get_itir_on_fault(struct kvm_vcpu *vcpu, unsigned long ifa) | ||
1412 | { | ||
1413 | union ia64_rr rr, rr1; | ||
1414 | |||
1415 | rr.val = vcpu_get_rr(vcpu, ifa); | ||
1416 | rr1.val = 0; | ||
1417 | rr1.ps = rr.ps; | ||
1418 | rr1.rid = rr.rid; | ||
1419 | return (rr1.val); | ||
1420 | } | ||
1421 | |||
1422 | |||
1423 | |||
1424 | /******************************** | 1404 | /******************************** |
1425 | * Moves to privileged registers | 1405 | * Moves to privileged registers |
1426 | ********************************/ | 1406 | ********************************/ |
@@ -1464,8 +1444,6 @@ unsigned long vcpu_set_rr(struct kvm_vcpu *vcpu, unsigned long reg, | |||
1464 | return (IA64_NO_FAULT); | 1444 | return (IA64_NO_FAULT); |
1465 | } | 1445 | } |
1466 | 1446 | ||
1467 | |||
1468 | |||
1469 | void kvm_mov_to_rr(struct kvm_vcpu *vcpu, INST64 inst) | 1447 | void kvm_mov_to_rr(struct kvm_vcpu *vcpu, INST64 inst) |
1470 | { | 1448 | { |
1471 | unsigned long r3, r2; | 1449 | unsigned long r3, r2; |
@@ -1510,8 +1488,6 @@ void kvm_mov_to_pkr(struct kvm_vcpu *vcpu, INST64 inst) | |||
1510 | vcpu_set_pkr(vcpu, r3, r2); | 1488 | vcpu_set_pkr(vcpu, r3, r2); |
1511 | } | 1489 | } |
1512 | 1490 | ||
1513 | |||
1514 | |||
1515 | void kvm_mov_from_rr(struct kvm_vcpu *vcpu, INST64 inst) | 1491 | void kvm_mov_from_rr(struct kvm_vcpu *vcpu, INST64 inst) |
1516 | { | 1492 | { |
1517 | unsigned long r3, r1; | 1493 | unsigned long r3, r1; |
@@ -1557,7 +1533,6 @@ void kvm_mov_from_pmc(struct kvm_vcpu *vcpu, INST64 inst) | |||
1557 | vcpu_set_gr(vcpu, inst.M43.r1, r1, 0); | 1533 | vcpu_set_gr(vcpu, inst.M43.r1, r1, 0); |
1558 | } | 1534 | } |
1559 | 1535 | ||
1560 | |||
1561 | unsigned long vcpu_get_cpuid(struct kvm_vcpu *vcpu, unsigned long reg) | 1536 | unsigned long vcpu_get_cpuid(struct kvm_vcpu *vcpu, unsigned long reg) |
1562 | { | 1537 | { |
1563 | /* FIXME: This could get called as a result of a rsvd-reg fault */ | 1538 | /* FIXME: This could get called as a result of a rsvd-reg fault */ |
@@ -1609,7 +1584,6 @@ unsigned long kvm_mov_to_cr(struct kvm_vcpu *vcpu, INST64 inst) | |||
1609 | return 0; | 1584 | return 0; |
1610 | } | 1585 | } |
1611 | 1586 | ||
1612 | |||
1613 | unsigned long kvm_mov_from_cr(struct kvm_vcpu *vcpu, INST64 inst) | 1587 | unsigned long kvm_mov_from_cr(struct kvm_vcpu *vcpu, INST64 inst) |
1614 | { | 1588 | { |
1615 | unsigned long tgt = inst.M33.r1; | 1589 | unsigned long tgt = inst.M33.r1; |
@@ -1633,8 +1607,6 @@ unsigned long kvm_mov_from_cr(struct kvm_vcpu *vcpu, INST64 inst) | |||
1633 | return 0; | 1607 | return 0; |
1634 | } | 1608 | } |
1635 | 1609 | ||
1636 | |||
1637 | |||
1638 | void vcpu_set_psr(struct kvm_vcpu *vcpu, unsigned long val) | 1610 | void vcpu_set_psr(struct kvm_vcpu *vcpu, unsigned long val) |
1639 | { | 1611 | { |
1640 | 1612 | ||
@@ -1776,9 +1748,6 @@ void vcpu_bsw1(struct kvm_vcpu *vcpu) | |||
1776 | } | 1748 | } |
1777 | } | 1749 | } |
1778 | 1750 | ||
1779 | |||
1780 | |||
1781 | |||
1782 | void vcpu_rfi(struct kvm_vcpu *vcpu) | 1751 | void vcpu_rfi(struct kvm_vcpu *vcpu) |
1783 | { | 1752 | { |
1784 | unsigned long ifs, psr; | 1753 | unsigned long ifs, psr; |
@@ -1796,7 +1765,6 @@ void vcpu_rfi(struct kvm_vcpu *vcpu) | |||
1796 | regs->cr_iip = VCPU(vcpu, iip); | 1765 | regs->cr_iip = VCPU(vcpu, iip); |
1797 | } | 1766 | } |
1798 | 1767 | ||
1799 | |||
1800 | /* | 1768 | /* |
1801 | VPSR can't keep track of below bits of guest PSR | 1769 | VPSR can't keep track of below bits of guest PSR |
1802 | This function gets guest PSR | 1770 | This function gets guest PSR |
diff --git a/arch/ia64/kvm/vcpu.h b/arch/ia64/kvm/vcpu.h index b2f12a562bdf..042af92ced83 100644 --- a/arch/ia64/kvm/vcpu.h +++ b/arch/ia64/kvm/vcpu.h | |||
@@ -703,7 +703,7 @@ extern u64 guest_vhpt_lookup(u64 iha, u64 *pte); | |||
703 | extern void thash_purge_entries(struct kvm_vcpu *v, u64 va, u64 ps); | 703 | extern void thash_purge_entries(struct kvm_vcpu *v, u64 va, u64 ps); |
704 | extern void thash_purge_entries_remote(struct kvm_vcpu *v, u64 va, u64 ps); | 704 | extern void thash_purge_entries_remote(struct kvm_vcpu *v, u64 va, u64 ps); |
705 | extern u64 translate_phy_pte(u64 *pte, u64 itir, u64 va); | 705 | extern u64 translate_phy_pte(u64 *pte, u64 itir, u64 va); |
706 | extern int thash_purge_and_insert(struct kvm_vcpu *v, u64 pte, | 706 | extern void thash_purge_and_insert(struct kvm_vcpu *v, u64 pte, |
707 | u64 itir, u64 ifa, int type); | 707 | u64 itir, u64 ifa, int type); |
708 | extern void thash_purge_all(struct kvm_vcpu *v); | 708 | extern void thash_purge_all(struct kvm_vcpu *v); |
709 | extern struct thash_data *vtlb_lookup(struct kvm_vcpu *v, | 709 | extern struct thash_data *vtlb_lookup(struct kvm_vcpu *v, |
@@ -738,7 +738,7 @@ void kvm_init_vhpt(struct kvm_vcpu *v); | |||
738 | void thash_init(struct thash_cb *hcb, u64 sz); | 738 | void thash_init(struct thash_cb *hcb, u64 sz); |
739 | 739 | ||
740 | void panic_vm(struct kvm_vcpu *v, const char *fmt, ...); | 740 | void panic_vm(struct kvm_vcpu *v, const char *fmt, ...); |
741 | 741 | u64 kvm_gpa_to_mpa(u64 gpa); | |
742 | extern u64 ia64_call_vsa(u64 proc, u64 arg1, u64 arg2, u64 arg3, | 742 | extern u64 ia64_call_vsa(u64 proc, u64 arg1, u64 arg2, u64 arg3, |
743 | u64 arg4, u64 arg5, u64 arg6, u64 arg7); | 743 | u64 arg4, u64 arg5, u64 arg6, u64 arg7); |
744 | 744 | ||
diff --git a/arch/ia64/kvm/vtlb.c b/arch/ia64/kvm/vtlb.c index 1de4dbda37e7..2c2501f13159 100644 --- a/arch/ia64/kvm/vtlb.c +++ b/arch/ia64/kvm/vtlb.c | |||
@@ -164,11 +164,11 @@ static void vhpt_insert(u64 pte, u64 itir, u64 ifa, u64 gpte) | |||
164 | unsigned long ps, gpaddr; | 164 | unsigned long ps, gpaddr; |
165 | 165 | ||
166 | ps = itir_ps(itir); | 166 | ps = itir_ps(itir); |
167 | rr.val = ia64_get_rr(ifa); | ||
167 | 168 | ||
168 | gpaddr = ((gpte & _PAGE_PPN_MASK) >> ps << ps) | | 169 | gpaddr = ((gpte & _PAGE_PPN_MASK) >> ps << ps) | |
169 | (ifa & ((1UL << ps) - 1)); | 170 | (ifa & ((1UL << ps) - 1)); |
170 | 171 | ||
171 | rr.val = ia64_get_rr(ifa); | ||
172 | head = (struct thash_data *)ia64_thash(ifa); | 172 | head = (struct thash_data *)ia64_thash(ifa); |
173 | head->etag = INVALID_TI_TAG; | 173 | head->etag = INVALID_TI_TAG; |
174 | ia64_mf(); | 174 | ia64_mf(); |
@@ -413,16 +413,14 @@ u64 translate_phy_pte(u64 *pte, u64 itir, u64 va) | |||
413 | 413 | ||
414 | /* | 414 | /* |
415 | * Purge overlap TCs and then insert the new entry to emulate itc ops. | 415 | * Purge overlap TCs and then insert the new entry to emulate itc ops. |
416 | * Notes: Only TC entry can purge and insert. | 416 | * Notes: Only TC entry can purge and insert. |
417 | * 1 indicates this is MMIO | ||
418 | */ | 417 | */ |
419 | int thash_purge_and_insert(struct kvm_vcpu *v, u64 pte, u64 itir, | 418 | void thash_purge_and_insert(struct kvm_vcpu *v, u64 pte, u64 itir, |
420 | u64 ifa, int type) | 419 | u64 ifa, int type) |
421 | { | 420 | { |
422 | u64 ps; | 421 | u64 ps; |
423 | u64 phy_pte, io_mask, index; | 422 | u64 phy_pte, io_mask, index; |
424 | union ia64_rr vrr, mrr; | 423 | union ia64_rr vrr, mrr; |
425 | int ret = 0; | ||
426 | 424 | ||
427 | ps = itir_ps(itir); | 425 | ps = itir_ps(itir); |
428 | vrr.val = vcpu_get_rr(v, ifa); | 426 | vrr.val = vcpu_get_rr(v, ifa); |
@@ -442,25 +440,19 @@ int thash_purge_and_insert(struct kvm_vcpu *v, u64 pte, u64 itir, | |||
442 | phy_pte &= ~_PAGE_MA_MASK; | 440 | phy_pte &= ~_PAGE_MA_MASK; |
443 | } | 441 | } |
444 | 442 | ||
445 | if (pte & VTLB_PTE_IO) | ||
446 | ret = 1; | ||
447 | |||
448 | vtlb_purge(v, ifa, ps); | 443 | vtlb_purge(v, ifa, ps); |
449 | vhpt_purge(v, ifa, ps); | 444 | vhpt_purge(v, ifa, ps); |
450 | 445 | ||
451 | if (ps == mrr.ps) { | 446 | if ((ps != mrr.ps) || (pte & VTLB_PTE_IO)) { |
452 | if (!(pte&VTLB_PTE_IO)) { | ||
453 | vhpt_insert(phy_pte, itir, ifa, pte); | ||
454 | } else { | ||
455 | vtlb_insert(v, pte, itir, ifa); | ||
456 | vcpu_quick_region_set(VMX(v, tc_regions), ifa); | ||
457 | } | ||
458 | } else if (ps > mrr.ps) { | ||
459 | vtlb_insert(v, pte, itir, ifa); | 447 | vtlb_insert(v, pte, itir, ifa); |
460 | vcpu_quick_region_set(VMX(v, tc_regions), ifa); | 448 | vcpu_quick_region_set(VMX(v, tc_regions), ifa); |
461 | if (!(pte&VTLB_PTE_IO)) | 449 | } |
462 | vhpt_insert(phy_pte, itir, ifa, pte); | 450 | if (pte & VTLB_PTE_IO) |
463 | } else { | 451 | return; |
452 | |||
453 | if (ps >= mrr.ps) | ||
454 | vhpt_insert(phy_pte, itir, ifa, pte); | ||
455 | else { | ||
464 | u64 psr; | 456 | u64 psr; |
465 | phy_pte &= ~PAGE_FLAGS_RV_MASK; | 457 | phy_pte &= ~PAGE_FLAGS_RV_MASK; |
466 | psr = ia64_clear_ic(); | 458 | psr = ia64_clear_ic(); |
@@ -471,7 +463,6 @@ int thash_purge_and_insert(struct kvm_vcpu *v, u64 pte, u64 itir, | |||
471 | if (!(pte&VTLB_PTE_IO)) | 463 | if (!(pte&VTLB_PTE_IO)) |
472 | mark_pages_dirty(v, pte, ps); | 464 | mark_pages_dirty(v, pte, ps); |
473 | 465 | ||
474 | return ret; | ||
475 | } | 466 | } |
476 | 467 | ||
477 | /* | 468 | /* |
@@ -511,7 +502,6 @@ void thash_purge_all(struct kvm_vcpu *v) | |||
511 | local_flush_tlb_all(); | 502 | local_flush_tlb_all(); |
512 | } | 503 | } |
513 | 504 | ||
514 | |||
515 | /* | 505 | /* |
516 | * Lookup the hash table and its collision chain to find an entry | 506 | * Lookup the hash table and its collision chain to find an entry |
517 | * covering this address rid:va or the entry. | 507 | * covering this address rid:va or the entry. |
@@ -519,7 +509,6 @@ void thash_purge_all(struct kvm_vcpu *v) | |||
519 | * INPUT: | 509 | * INPUT: |
520 | * in: TLB format for both VHPT & TLB. | 510 | * in: TLB format for both VHPT & TLB. |
521 | */ | 511 | */ |
522 | |||
523 | struct thash_data *vtlb_lookup(struct kvm_vcpu *v, u64 va, int is_data) | 512 | struct thash_data *vtlb_lookup(struct kvm_vcpu *v, u64 va, int is_data) |
524 | { | 513 | { |
525 | struct thash_data *cch; | 514 | struct thash_data *cch; |
@@ -549,7 +538,6 @@ struct thash_data *vtlb_lookup(struct kvm_vcpu *v, u64 va, int is_data) | |||
549 | return NULL; | 538 | return NULL; |
550 | } | 539 | } |
551 | 540 | ||
552 | |||
553 | /* | 541 | /* |
554 | * Initialize internal control data before service. | 542 | * Initialize internal control data before service. |
555 | */ | 543 | */ |
@@ -575,6 +563,10 @@ void thash_init(struct thash_cb *hcb, u64 sz) | |||
575 | u64 kvm_get_mpt_entry(u64 gpfn) | 563 | u64 kvm_get_mpt_entry(u64 gpfn) |
576 | { | 564 | { |
577 | u64 *base = (u64 *) KVM_P2M_BASE; | 565 | u64 *base = (u64 *) KVM_P2M_BASE; |
566 | |||
567 | if (gpfn >= (KVM_P2M_SIZE >> 3)) | ||
568 | panic_vm(current_vcpu, "Invalid gpfn =%lx\n", gpfn); | ||
569 | |||
578 | return *(base + gpfn); | 570 | return *(base + gpfn); |
579 | } | 571 | } |
580 | 572 | ||
@@ -591,7 +583,6 @@ u64 kvm_gpa_to_mpa(u64 gpa) | |||
591 | return (pte >> PAGE_SHIFT << PAGE_SHIFT) | (gpa & ~PAGE_MASK); | 583 | return (pte >> PAGE_SHIFT << PAGE_SHIFT) | (gpa & ~PAGE_MASK); |
592 | } | 584 | } |
593 | 585 | ||
594 | |||
595 | /* | 586 | /* |
596 | * Fetch guest bundle code. | 587 | * Fetch guest bundle code. |
597 | * INPUT: | 588 | * INPUT: |
@@ -633,7 +624,6 @@ int fetch_code(struct kvm_vcpu *vcpu, u64 gip, IA64_BUNDLE *pbundle) | |||
633 | return IA64_NO_FAULT; | 624 | return IA64_NO_FAULT; |
634 | } | 625 | } |
635 | 626 | ||
636 | |||
637 | void kvm_init_vhpt(struct kvm_vcpu *v) | 627 | void kvm_init_vhpt(struct kvm_vcpu *v) |
638 | { | 628 | { |
639 | v->arch.vhpt.num = VHPT_NUM_ENTRIES; | 629 | v->arch.vhpt.num = VHPT_NUM_ENTRIES; |
diff --git a/arch/ia64/mm/tlb.c b/arch/ia64/mm/tlb.c index bd9818a36b47..b9f3d7bbb338 100644 --- a/arch/ia64/mm/tlb.c +++ b/arch/ia64/mm/tlb.c | |||
@@ -309,7 +309,7 @@ flush_tlb_range (struct vm_area_struct *vma, unsigned long start, | |||
309 | 309 | ||
310 | preempt_disable(); | 310 | preempt_disable(); |
311 | #ifdef CONFIG_SMP | 311 | #ifdef CONFIG_SMP |
312 | if (mm != current->active_mm || cpus_weight(mm->cpu_vm_mask) != 1) { | 312 | if (mm != current->active_mm || cpumask_weight(mm_cpumask(mm)) != 1) { |
313 | platform_global_tlb_purge(mm, start, end, nbits); | 313 | platform_global_tlb_purge(mm, start, end, nbits); |
314 | preempt_enable(); | 314 | preempt_enable(); |
315 | return; | 315 | return; |
diff --git a/arch/ia64/sn/kernel/msi_sn.c b/arch/ia64/sn/kernel/msi_sn.c index ca553b0429ce..81e428943d73 100644 --- a/arch/ia64/sn/kernel/msi_sn.c +++ b/arch/ia64/sn/kernel/msi_sn.c | |||
@@ -205,7 +205,7 @@ static void sn_set_msi_irq_affinity(unsigned int irq, | |||
205 | msg.address_lo = (u32)(bus_addr & 0x00000000ffffffff); | 205 | msg.address_lo = (u32)(bus_addr & 0x00000000ffffffff); |
206 | 206 | ||
207 | write_msi_msg(irq, &msg); | 207 | write_msi_msg(irq, &msg); |
208 | irq_desc[irq].affinity = *cpu_mask; | 208 | cpumask_copy(irq_desc[irq].affinity, cpu_mask); |
209 | } | 209 | } |
210 | #endif /* CONFIG_SMP */ | 210 | #endif /* CONFIG_SMP */ |
211 | 211 | ||
diff --git a/arch/ia64/sn/kernel/setup.c b/arch/ia64/sn/kernel/setup.c index 02c5b8a9fb60..12097776afc0 100644 --- a/arch/ia64/sn/kernel/setup.c +++ b/arch/ia64/sn/kernel/setup.c | |||
@@ -750,7 +750,7 @@ nasid_slice_to_cpuid(int nasid, int slice) | |||
750 | { | 750 | { |
751 | long cpu; | 751 | long cpu; |
752 | 752 | ||
753 | for (cpu = 0; cpu < NR_CPUS; cpu++) | 753 | for (cpu = 0; cpu < nr_cpu_ids; cpu++) |
754 | if (cpuid_to_nasid(cpu) == nasid && | 754 | if (cpuid_to_nasid(cpu) == nasid && |
755 | cpuid_to_slice(cpu) == slice) | 755 | cpuid_to_slice(cpu) == slice) |
756 | return cpu; | 756 | return cpu; |
diff --git a/arch/ia64/sn/kernel/sn2/prominfo_proc.c b/arch/ia64/sn/kernel/sn2/prominfo_proc.c index 4dcce3d0e04c..e63328818643 100644 --- a/arch/ia64/sn/kernel/sn2/prominfo_proc.c +++ b/arch/ia64/sn/kernel/sn2/prominfo_proc.c | |||
@@ -225,7 +225,6 @@ static struct proc_dir_entry *sgi_prominfo_entry; | |||
225 | int __init prominfo_init(void) | 225 | int __init prominfo_init(void) |
226 | { | 226 | { |
227 | struct proc_dir_entry **entp; | 227 | struct proc_dir_entry **entp; |
228 | struct proc_dir_entry *p; | ||
229 | cnodeid_t cnodeid; | 228 | cnodeid_t cnodeid; |
230 | unsigned long nasid; | 229 | unsigned long nasid; |
231 | int size; | 230 | int size; |
@@ -246,14 +245,10 @@ int __init prominfo_init(void) | |||
246 | sprintf(name, "node%d", cnodeid); | 245 | sprintf(name, "node%d", cnodeid); |
247 | *entp = proc_mkdir(name, sgi_prominfo_entry); | 246 | *entp = proc_mkdir(name, sgi_prominfo_entry); |
248 | nasid = cnodeid_to_nasid(cnodeid); | 247 | nasid = cnodeid_to_nasid(cnodeid); |
249 | p = create_proc_read_entry("fit", 0, *entp, read_fit_entry, | 248 | create_proc_read_entry("fit", 0, *entp, read_fit_entry, |
250 | (void *)nasid); | 249 | (void *)nasid); |
251 | if (p) | 250 | create_proc_read_entry("version", 0, *entp, |
252 | p->owner = THIS_MODULE; | ||
253 | p = create_proc_read_entry("version", 0, *entp, | ||
254 | read_version_entry, (void *)nasid); | 251 | read_version_entry, (void *)nasid); |
255 | if (p) | ||
256 | p->owner = THIS_MODULE; | ||
257 | entp++; | 252 | entp++; |
258 | } | 253 | } |
259 | 254 | ||
diff --git a/arch/ia64/sn/kernel/sn2/sn2_smp.c b/arch/ia64/sn/kernel/sn2/sn2_smp.c index e585f9a2afb9..3c2f242d90cb 100644 --- a/arch/ia64/sn/kernel/sn2/sn2_smp.c +++ b/arch/ia64/sn/kernel/sn2/sn2_smp.c | |||
@@ -133,7 +133,7 @@ sn2_ipi_flush_all_tlb(struct mm_struct *mm) | |||
133 | unsigned long itc; | 133 | unsigned long itc; |
134 | 134 | ||
135 | itc = ia64_get_itc(); | 135 | itc = ia64_get_itc(); |
136 | smp_flush_tlb_cpumask(mm->cpu_vm_mask); | 136 | smp_flush_tlb_cpumask(*mm_cpumask(mm)); |
137 | itc = ia64_get_itc() - itc; | 137 | itc = ia64_get_itc() - itc; |
138 | __get_cpu_var(ptcstats).shub_ipi_flushes_itc_clocks += itc; | 138 | __get_cpu_var(ptcstats).shub_ipi_flushes_itc_clocks += itc; |
139 | __get_cpu_var(ptcstats).shub_ipi_flushes++; | 139 | __get_cpu_var(ptcstats).shub_ipi_flushes++; |
@@ -182,7 +182,7 @@ sn2_global_tlb_purge(struct mm_struct *mm, unsigned long start, | |||
182 | nodes_clear(nodes_flushed); | 182 | nodes_clear(nodes_flushed); |
183 | i = 0; | 183 | i = 0; |
184 | 184 | ||
185 | for_each_cpu_mask(cpu, mm->cpu_vm_mask) { | 185 | for_each_cpu(cpu, mm_cpumask(mm)) { |
186 | cnode = cpu_to_node(cpu); | 186 | cnode = cpu_to_node(cpu); |
187 | node_set(cnode, nodes_flushed); | 187 | node_set(cnode, nodes_flushed); |
188 | lcpu = cpu; | 188 | lcpu = cpu; |
@@ -461,7 +461,7 @@ bool sn_cpu_disable_allowed(int cpu) | |||
461 | 461 | ||
462 | static void *sn2_ptc_seq_start(struct seq_file *file, loff_t * offset) | 462 | static void *sn2_ptc_seq_start(struct seq_file *file, loff_t * offset) |
463 | { | 463 | { |
464 | if (*offset < NR_CPUS) | 464 | if (*offset < nr_cpu_ids) |
465 | return offset; | 465 | return offset; |
466 | return NULL; | 466 | return NULL; |
467 | } | 467 | } |
@@ -469,7 +469,7 @@ static void *sn2_ptc_seq_start(struct seq_file *file, loff_t * offset) | |||
469 | static void *sn2_ptc_seq_next(struct seq_file *file, void *data, loff_t * offset) | 469 | static void *sn2_ptc_seq_next(struct seq_file *file, void *data, loff_t * offset) |
470 | { | 470 | { |
471 | (*offset)++; | 471 | (*offset)++; |
472 | if (*offset < NR_CPUS) | 472 | if (*offset < nr_cpu_ids) |
473 | return offset; | 473 | return offset; |
474 | return NULL; | 474 | return NULL; |
475 | } | 475 | } |
@@ -491,7 +491,7 @@ static int sn2_ptc_seq_show(struct seq_file *file, void *data) | |||
491 | seq_printf(file, "# ptctest %d, flushopt %d\n", sn2_ptctest, sn2_flush_opt); | 491 | seq_printf(file, "# ptctest %d, flushopt %d\n", sn2_ptctest, sn2_flush_opt); |
492 | } | 492 | } |
493 | 493 | ||
494 | if (cpu < NR_CPUS && cpu_online(cpu)) { | 494 | if (cpu < nr_cpu_ids && cpu_online(cpu)) { |
495 | stat = &per_cpu(ptcstats, cpu); | 495 | stat = &per_cpu(ptcstats, cpu); |
496 | seq_printf(file, "cpu %d %ld %ld %ld %ld %ld %ld %ld %ld %ld %ld %ld %ld\n", cpu, stat->ptc_l, | 496 | seq_printf(file, "cpu %d %ld %ld %ld %ld %ld %ld %ld %ld %ld %ld %ld %ld\n", cpu, stat->ptc_l, |
497 | stat->change_rid, stat->shub_ptc_flushes, stat->nodes_flushed, | 497 | stat->change_rid, stat->shub_ptc_flushes, stat->nodes_flushed, |
diff --git a/arch/ia64/sn/kernel/sn2/sn_hwperf.c b/arch/ia64/sn/kernel/sn2/sn_hwperf.c index be339477f906..45f3c2390428 100644 --- a/arch/ia64/sn/kernel/sn2/sn_hwperf.c +++ b/arch/ia64/sn/kernel/sn2/sn_hwperf.c | |||
@@ -612,7 +612,7 @@ static int sn_hwperf_op_cpu(struct sn_hwperf_op_info *op_info) | |||
612 | op_info->a->arg &= SN_HWPERF_ARG_OBJID_MASK; | 612 | op_info->a->arg &= SN_HWPERF_ARG_OBJID_MASK; |
613 | 613 | ||
614 | if (cpu != SN_HWPERF_ARG_ANY_CPU) { | 614 | if (cpu != SN_HWPERF_ARG_ANY_CPU) { |
615 | if (cpu >= NR_CPUS || !cpu_online(cpu)) { | 615 | if (cpu >= nr_cpu_ids || !cpu_online(cpu)) { |
616 | r = -EINVAL; | 616 | r = -EINVAL; |
617 | goto out; | 617 | goto out; |
618 | } | 618 | } |
diff --git a/arch/ia64/sn/pci/pci_dma.c b/arch/ia64/sn/pci/pci_dma.c index 863f5017baae..8c130e8f00e1 100644 --- a/arch/ia64/sn/pci/pci_dma.c +++ b/arch/ia64/sn/pci/pci_dma.c | |||
@@ -10,7 +10,7 @@ | |||
10 | */ | 10 | */ |
11 | 11 | ||
12 | #include <linux/module.h> | 12 | #include <linux/module.h> |
13 | #include <linux/dma-attrs.h> | 13 | #include <linux/dma-mapping.h> |
14 | #include <asm/dma.h> | 14 | #include <asm/dma.h> |
15 | #include <asm/sn/intr.h> | 15 | #include <asm/sn/intr.h> |
16 | #include <asm/sn/pcibus_provider_defs.h> | 16 | #include <asm/sn/pcibus_provider_defs.h> |
@@ -31,7 +31,7 @@ | |||
31 | * this function. Of course, SN only supports devices that have 32 or more | 31 | * this function. Of course, SN only supports devices that have 32 or more |
32 | * address bits when using the PMU. | 32 | * address bits when using the PMU. |
33 | */ | 33 | */ |
34 | int sn_dma_supported(struct device *dev, u64 mask) | 34 | static int sn_dma_supported(struct device *dev, u64 mask) |
35 | { | 35 | { |
36 | BUG_ON(dev->bus != &pci_bus_type); | 36 | BUG_ON(dev->bus != &pci_bus_type); |
37 | 37 | ||
@@ -39,7 +39,6 @@ int sn_dma_supported(struct device *dev, u64 mask) | |||
39 | return 0; | 39 | return 0; |
40 | return 1; | 40 | return 1; |
41 | } | 41 | } |
42 | EXPORT_SYMBOL(sn_dma_supported); | ||
43 | 42 | ||
44 | /** | 43 | /** |
45 | * sn_dma_set_mask - set the DMA mask | 44 | * sn_dma_set_mask - set the DMA mask |
@@ -75,8 +74,8 @@ EXPORT_SYMBOL(sn_dma_set_mask); | |||
75 | * queue for a SCSI controller). See Documentation/DMA-API.txt for | 74 | * queue for a SCSI controller). See Documentation/DMA-API.txt for |
76 | * more information. | 75 | * more information. |
77 | */ | 76 | */ |
78 | void *sn_dma_alloc_coherent(struct device *dev, size_t size, | 77 | static void *sn_dma_alloc_coherent(struct device *dev, size_t size, |
79 | dma_addr_t * dma_handle, gfp_t flags) | 78 | dma_addr_t * dma_handle, gfp_t flags) |
80 | { | 79 | { |
81 | void *cpuaddr; | 80 | void *cpuaddr; |
82 | unsigned long phys_addr; | 81 | unsigned long phys_addr; |
@@ -124,7 +123,6 @@ void *sn_dma_alloc_coherent(struct device *dev, size_t size, | |||
124 | 123 | ||
125 | return cpuaddr; | 124 | return cpuaddr; |
126 | } | 125 | } |
127 | EXPORT_SYMBOL(sn_dma_alloc_coherent); | ||
128 | 126 | ||
129 | /** | 127 | /** |
130 | * sn_pci_free_coherent - free memory associated with coherent DMAable region | 128 | * sn_pci_free_coherent - free memory associated with coherent DMAable region |
@@ -136,8 +134,8 @@ EXPORT_SYMBOL(sn_dma_alloc_coherent); | |||
136 | * Frees the memory allocated by dma_alloc_coherent(), potentially unmapping | 134 | * Frees the memory allocated by dma_alloc_coherent(), potentially unmapping |
137 | * any associated IOMMU mappings. | 135 | * any associated IOMMU mappings. |
138 | */ | 136 | */ |
139 | void sn_dma_free_coherent(struct device *dev, size_t size, void *cpu_addr, | 137 | static void sn_dma_free_coherent(struct device *dev, size_t size, void *cpu_addr, |
140 | dma_addr_t dma_handle) | 138 | dma_addr_t dma_handle) |
141 | { | 139 | { |
142 | struct pci_dev *pdev = to_pci_dev(dev); | 140 | struct pci_dev *pdev = to_pci_dev(dev); |
143 | struct sn_pcibus_provider *provider = SN_PCIDEV_BUSPROVIDER(pdev); | 141 | struct sn_pcibus_provider *provider = SN_PCIDEV_BUSPROVIDER(pdev); |
@@ -147,7 +145,6 @@ void sn_dma_free_coherent(struct device *dev, size_t size, void *cpu_addr, | |||
147 | provider->dma_unmap(pdev, dma_handle, 0); | 145 | provider->dma_unmap(pdev, dma_handle, 0); |
148 | free_pages((unsigned long)cpu_addr, get_order(size)); | 146 | free_pages((unsigned long)cpu_addr, get_order(size)); |
149 | } | 147 | } |
150 | EXPORT_SYMBOL(sn_dma_free_coherent); | ||
151 | 148 | ||
152 | /** | 149 | /** |
153 | * sn_dma_map_single_attrs - map a single page for DMA | 150 | * sn_dma_map_single_attrs - map a single page for DMA |
@@ -173,10 +170,12 @@ EXPORT_SYMBOL(sn_dma_free_coherent); | |||
173 | * TODO: simplify our interface; | 170 | * TODO: simplify our interface; |
174 | * figure out how to save dmamap handle so can use two step. | 171 | * figure out how to save dmamap handle so can use two step. |
175 | */ | 172 | */ |
176 | dma_addr_t sn_dma_map_single_attrs(struct device *dev, void *cpu_addr, | 173 | static dma_addr_t sn_dma_map_page(struct device *dev, struct page *page, |
177 | size_t size, int direction, | 174 | unsigned long offset, size_t size, |
178 | struct dma_attrs *attrs) | 175 | enum dma_data_direction dir, |
176 | struct dma_attrs *attrs) | ||
179 | { | 177 | { |
178 | void *cpu_addr = page_address(page) + offset; | ||
180 | dma_addr_t dma_addr; | 179 | dma_addr_t dma_addr; |
181 | unsigned long phys_addr; | 180 | unsigned long phys_addr; |
182 | struct pci_dev *pdev = to_pci_dev(dev); | 181 | struct pci_dev *pdev = to_pci_dev(dev); |
@@ -201,7 +200,6 @@ dma_addr_t sn_dma_map_single_attrs(struct device *dev, void *cpu_addr, | |||
201 | } | 200 | } |
202 | return dma_addr; | 201 | return dma_addr; |
203 | } | 202 | } |
204 | EXPORT_SYMBOL(sn_dma_map_single_attrs); | ||
205 | 203 | ||
206 | /** | 204 | /** |
207 | * sn_dma_unmap_single_attrs - unamp a DMA mapped page | 205 | * sn_dma_unmap_single_attrs - unamp a DMA mapped page |
@@ -215,21 +213,20 @@ EXPORT_SYMBOL(sn_dma_map_single_attrs); | |||
215 | * by @dma_handle into the coherence domain. On SN, we're always cache | 213 | * by @dma_handle into the coherence domain. On SN, we're always cache |
216 | * coherent, so we just need to free any ATEs associated with this mapping. | 214 | * coherent, so we just need to free any ATEs associated with this mapping. |
217 | */ | 215 | */ |
218 | void sn_dma_unmap_single_attrs(struct device *dev, dma_addr_t dma_addr, | 216 | static void sn_dma_unmap_page(struct device *dev, dma_addr_t dma_addr, |
219 | size_t size, int direction, | 217 | size_t size, enum dma_data_direction dir, |
220 | struct dma_attrs *attrs) | 218 | struct dma_attrs *attrs) |
221 | { | 219 | { |
222 | struct pci_dev *pdev = to_pci_dev(dev); | 220 | struct pci_dev *pdev = to_pci_dev(dev); |
223 | struct sn_pcibus_provider *provider = SN_PCIDEV_BUSPROVIDER(pdev); | 221 | struct sn_pcibus_provider *provider = SN_PCIDEV_BUSPROVIDER(pdev); |
224 | 222 | ||
225 | BUG_ON(dev->bus != &pci_bus_type); | 223 | BUG_ON(dev->bus != &pci_bus_type); |
226 | 224 | ||
227 | provider->dma_unmap(pdev, dma_addr, direction); | 225 | provider->dma_unmap(pdev, dma_addr, dir); |
228 | } | 226 | } |
229 | EXPORT_SYMBOL(sn_dma_unmap_single_attrs); | ||
230 | 227 | ||
231 | /** | 228 | /** |
232 | * sn_dma_unmap_sg_attrs - unmap a DMA scatterlist | 229 | * sn_dma_unmap_sg - unmap a DMA scatterlist |
233 | * @dev: device to unmap | 230 | * @dev: device to unmap |
234 | * @sg: scatterlist to unmap | 231 | * @sg: scatterlist to unmap |
235 | * @nhwentries: number of scatterlist entries | 232 | * @nhwentries: number of scatterlist entries |
@@ -238,9 +235,9 @@ EXPORT_SYMBOL(sn_dma_unmap_single_attrs); | |||
238 | * | 235 | * |
239 | * Unmap a set of streaming mode DMA translations. | 236 | * Unmap a set of streaming mode DMA translations. |
240 | */ | 237 | */ |
241 | void sn_dma_unmap_sg_attrs(struct device *dev, struct scatterlist *sgl, | 238 | static void sn_dma_unmap_sg(struct device *dev, struct scatterlist *sgl, |
242 | int nhwentries, int direction, | 239 | int nhwentries, enum dma_data_direction dir, |
243 | struct dma_attrs *attrs) | 240 | struct dma_attrs *attrs) |
244 | { | 241 | { |
245 | int i; | 242 | int i; |
246 | struct pci_dev *pdev = to_pci_dev(dev); | 243 | struct pci_dev *pdev = to_pci_dev(dev); |
@@ -250,15 +247,14 @@ void sn_dma_unmap_sg_attrs(struct device *dev, struct scatterlist *sgl, | |||
250 | BUG_ON(dev->bus != &pci_bus_type); | 247 | BUG_ON(dev->bus != &pci_bus_type); |
251 | 248 | ||
252 | for_each_sg(sgl, sg, nhwentries, i) { | 249 | for_each_sg(sgl, sg, nhwentries, i) { |
253 | provider->dma_unmap(pdev, sg->dma_address, direction); | 250 | provider->dma_unmap(pdev, sg->dma_address, dir); |
254 | sg->dma_address = (dma_addr_t) NULL; | 251 | sg->dma_address = (dma_addr_t) NULL; |
255 | sg->dma_length = 0; | 252 | sg->dma_length = 0; |
256 | } | 253 | } |
257 | } | 254 | } |
258 | EXPORT_SYMBOL(sn_dma_unmap_sg_attrs); | ||
259 | 255 | ||
260 | /** | 256 | /** |
261 | * sn_dma_map_sg_attrs - map a scatterlist for DMA | 257 | * sn_dma_map_sg - map a scatterlist for DMA |
262 | * @dev: device to map for | 258 | * @dev: device to map for |
263 | * @sg: scatterlist to map | 259 | * @sg: scatterlist to map |
264 | * @nhwentries: number of entries | 260 | * @nhwentries: number of entries |
@@ -272,8 +268,9 @@ EXPORT_SYMBOL(sn_dma_unmap_sg_attrs); | |||
272 | * | 268 | * |
273 | * Maps each entry of @sg for DMA. | 269 | * Maps each entry of @sg for DMA. |
274 | */ | 270 | */ |
275 | int sn_dma_map_sg_attrs(struct device *dev, struct scatterlist *sgl, | 271 | static int sn_dma_map_sg(struct device *dev, struct scatterlist *sgl, |
276 | int nhwentries, int direction, struct dma_attrs *attrs) | 272 | int nhwentries, enum dma_data_direction dir, |
273 | struct dma_attrs *attrs) | ||
277 | { | 274 | { |
278 | unsigned long phys_addr; | 275 | unsigned long phys_addr; |
279 | struct scatterlist *saved_sg = sgl, *sg; | 276 | struct scatterlist *saved_sg = sgl, *sg; |
@@ -310,8 +307,7 @@ int sn_dma_map_sg_attrs(struct device *dev, struct scatterlist *sgl, | |||
310 | * Free any successfully allocated entries. | 307 | * Free any successfully allocated entries. |
311 | */ | 308 | */ |
312 | if (i > 0) | 309 | if (i > 0) |
313 | sn_dma_unmap_sg_attrs(dev, saved_sg, i, | 310 | sn_dma_unmap_sg(dev, saved_sg, i, dir, attrs); |
314 | direction, attrs); | ||
315 | return 0; | 311 | return 0; |
316 | } | 312 | } |
317 | 313 | ||
@@ -320,41 +316,36 @@ int sn_dma_map_sg_attrs(struct device *dev, struct scatterlist *sgl, | |||
320 | 316 | ||
321 | return nhwentries; | 317 | return nhwentries; |
322 | } | 318 | } |
323 | EXPORT_SYMBOL(sn_dma_map_sg_attrs); | ||
324 | 319 | ||
325 | void sn_dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle, | 320 | static void sn_dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle, |
326 | size_t size, int direction) | 321 | size_t size, enum dma_data_direction dir) |
327 | { | 322 | { |
328 | BUG_ON(dev->bus != &pci_bus_type); | 323 | BUG_ON(dev->bus != &pci_bus_type); |
329 | } | 324 | } |
330 | EXPORT_SYMBOL(sn_dma_sync_single_for_cpu); | ||
331 | 325 | ||
332 | void sn_dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle, | 326 | static void sn_dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle, |
333 | size_t size, int direction) | 327 | size_t size, |
328 | enum dma_data_direction dir) | ||
334 | { | 329 | { |
335 | BUG_ON(dev->bus != &pci_bus_type); | 330 | BUG_ON(dev->bus != &pci_bus_type); |
336 | } | 331 | } |
337 | EXPORT_SYMBOL(sn_dma_sync_single_for_device); | ||
338 | 332 | ||
339 | void sn_dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, | 333 | static void sn_dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, |
340 | int nelems, int direction) | 334 | int nelems, enum dma_data_direction dir) |
341 | { | 335 | { |
342 | BUG_ON(dev->bus != &pci_bus_type); | 336 | BUG_ON(dev->bus != &pci_bus_type); |
343 | } | 337 | } |
344 | EXPORT_SYMBOL(sn_dma_sync_sg_for_cpu); | ||
345 | 338 | ||
346 | void sn_dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, | 339 | static void sn_dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, |
347 | int nelems, int direction) | 340 | int nelems, enum dma_data_direction dir) |
348 | { | 341 | { |
349 | BUG_ON(dev->bus != &pci_bus_type); | 342 | BUG_ON(dev->bus != &pci_bus_type); |
350 | } | 343 | } |
351 | EXPORT_SYMBOL(sn_dma_sync_sg_for_device); | ||
352 | 344 | ||
353 | int sn_dma_mapping_error(struct device *dev, dma_addr_t dma_addr) | 345 | static int sn_dma_mapping_error(struct device *dev, dma_addr_t dma_addr) |
354 | { | 346 | { |
355 | return 0; | 347 | return 0; |
356 | } | 348 | } |
357 | EXPORT_SYMBOL(sn_dma_mapping_error); | ||
358 | 349 | ||
359 | u64 sn_dma_get_required_mask(struct device *dev) | 350 | u64 sn_dma_get_required_mask(struct device *dev) |
360 | { | 351 | { |
@@ -471,3 +462,23 @@ int sn_pci_legacy_write(struct pci_bus *bus, u16 port, u32 val, u8 size) | |||
471 | out: | 462 | out: |
472 | return ret; | 463 | return ret; |
473 | } | 464 | } |
465 | |||
466 | static struct dma_map_ops sn_dma_ops = { | ||
467 | .alloc_coherent = sn_dma_alloc_coherent, | ||
468 | .free_coherent = sn_dma_free_coherent, | ||
469 | .map_page = sn_dma_map_page, | ||
470 | .unmap_page = sn_dma_unmap_page, | ||
471 | .map_sg = sn_dma_map_sg, | ||
472 | .unmap_sg = sn_dma_unmap_sg, | ||
473 | .sync_single_for_cpu = sn_dma_sync_single_for_cpu, | ||
474 | .sync_sg_for_cpu = sn_dma_sync_sg_for_cpu, | ||
475 | .sync_single_for_device = sn_dma_sync_single_for_device, | ||
476 | .sync_sg_for_device = sn_dma_sync_sg_for_device, | ||
477 | .mapping_error = sn_dma_mapping_error, | ||
478 | .dma_supported = sn_dma_supported, | ||
479 | }; | ||
480 | |||
481 | void sn_dma_init(void) | ||
482 | { | ||
483 | dma_ops = &sn_dma_ops; | ||
484 | } | ||