aboutsummaryrefslogtreecommitdiffstats
path: root/arch/ia64/sn/pci
diff options
context:
space:
mode:
authorFUJITA Tomonori <fujita.tomonori@lab.ntt.co.jp>2009-01-05 09:59:02 -0500
committerIngo Molnar <mingo@elte.hu>2009-01-06 08:06:57 -0500
commit160c1d8e40866edfeae7d68816b7005d70acf391 (patch)
tree37dd78b2ea28a3953a46d401bd9657005eb444d7 /arch/ia64/sn/pci
parentf0402a262e1a4c03fc66b83659823bdcaac3c41a (diff)
x86, ia64: convert to use generic dma_map_ops struct
This converts X86 and IA64 to use include/linux/dma-mapping.h. It's a bit large but pretty boring. The major change for X86 is converting 'int dir' to 'enum dma_data_direction dir' in DMA mapping operations. The major changes for IA64 is using map_page and unmap_page instead of map_single and unmap_single. Signed-off-by: FUJITA Tomonori <fujita.tomonori@lab.ntt.co.jp> Acked-by: Tony Luck <tony.luck@intel.com> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'arch/ia64/sn/pci')
-rw-r--r--arch/ia64/sn/pci/pci_dma.c58
1 files changed, 30 insertions, 28 deletions
diff --git a/arch/ia64/sn/pci/pci_dma.c b/arch/ia64/sn/pci/pci_dma.c
index efdd69490009..9c788f9cedfd 100644
--- a/arch/ia64/sn/pci/pci_dma.c
+++ b/arch/ia64/sn/pci/pci_dma.c
@@ -10,7 +10,6 @@
10 */ 10 */
11 11
12#include <linux/module.h> 12#include <linux/module.h>
13#include <linux/dma-attrs.h>
14#include <linux/dma-mapping.h> 13#include <linux/dma-mapping.h>
15#include <asm/dma.h> 14#include <asm/dma.h>
16#include <asm/sn/intr.h> 15#include <asm/sn/intr.h>
@@ -171,10 +170,12 @@ static void sn_dma_free_coherent(struct device *dev, size_t size, void *cpu_addr
171 * TODO: simplify our interface; 170 * TODO: simplify our interface;
172 * figure out how to save dmamap handle so can use two step. 171 * figure out how to save dmamap handle so can use two step.
173 */ 172 */
174static dma_addr_t sn_dma_map_single_attrs(struct device *dev, void *cpu_addr, 173static dma_addr_t sn_dma_map_page(struct device *dev, struct page *page,
175 size_t size, int direction, 174 unsigned long offset, size_t size,
176 struct dma_attrs *attrs) 175 enum dma_data_direction dir,
176 struct dma_attrs *attrs)
177{ 177{
178 void *cpu_addr = page_address(page) + offset;
178 dma_addr_t dma_addr; 179 dma_addr_t dma_addr;
179 unsigned long phys_addr; 180 unsigned long phys_addr;
180 struct pci_dev *pdev = to_pci_dev(dev); 181 struct pci_dev *pdev = to_pci_dev(dev);
@@ -212,20 +213,20 @@ static dma_addr_t sn_dma_map_single_attrs(struct device *dev, void *cpu_addr,
212 * by @dma_handle into the coherence domain. On SN, we're always cache 213 * by @dma_handle into the coherence domain. On SN, we're always cache
213 * coherent, so we just need to free any ATEs associated with this mapping. 214 * coherent, so we just need to free any ATEs associated with this mapping.
214 */ 215 */
215static void sn_dma_unmap_single_attrs(struct device *dev, dma_addr_t dma_addr, 216static void sn_dma_unmap_page(struct device *dev, dma_addr_t dma_addr,
216 size_t size, int direction, 217 size_t size, enum dma_data_direction dir,
217 struct dma_attrs *attrs) 218 struct dma_attrs *attrs)
218{ 219{
219 struct pci_dev *pdev = to_pci_dev(dev); 220 struct pci_dev *pdev = to_pci_dev(dev);
220 struct sn_pcibus_provider *provider = SN_PCIDEV_BUSPROVIDER(pdev); 221 struct sn_pcibus_provider *provider = SN_PCIDEV_BUSPROVIDER(pdev);
221 222
222 BUG_ON(dev->bus != &pci_bus_type); 223 BUG_ON(dev->bus != &pci_bus_type);
223 224
224 provider->dma_unmap(pdev, dma_addr, direction); 225 provider->dma_unmap(pdev, dma_addr, dir);
225} 226}
226 227
227/** 228/**
228 * sn_dma_unmap_sg_attrs - unmap a DMA scatterlist 229 * sn_dma_unmap_sg - unmap a DMA scatterlist
229 * @dev: device to unmap 230 * @dev: device to unmap
230 * @sg: scatterlist to unmap 231 * @sg: scatterlist to unmap
231 * @nhwentries: number of scatterlist entries 232 * @nhwentries: number of scatterlist entries
@@ -234,9 +235,9 @@ static void sn_dma_unmap_single_attrs(struct device *dev, dma_addr_t dma_addr,
234 * 235 *
235 * Unmap a set of streaming mode DMA translations. 236 * Unmap a set of streaming mode DMA translations.
236 */ 237 */
237static void sn_dma_unmap_sg_attrs(struct device *dev, struct scatterlist *sgl, 238static void sn_dma_unmap_sg(struct device *dev, struct scatterlist *sgl,
238 int nhwentries, int direction, 239 int nhwentries, enum dma_data_direction dir,
239 struct dma_attrs *attrs) 240 struct dma_attrs *attrs)
240{ 241{
241 int i; 242 int i;
242 struct pci_dev *pdev = to_pci_dev(dev); 243 struct pci_dev *pdev = to_pci_dev(dev);
@@ -246,14 +247,14 @@ static void sn_dma_unmap_sg_attrs(struct device *dev, struct scatterlist *sgl,
246 BUG_ON(dev->bus != &pci_bus_type); 247 BUG_ON(dev->bus != &pci_bus_type);
247 248
248 for_each_sg(sgl, sg, nhwentries, i) { 249 for_each_sg(sgl, sg, nhwentries, i) {
249 provider->dma_unmap(pdev, sg->dma_address, direction); 250 provider->dma_unmap(pdev, sg->dma_address, dir);
250 sg->dma_address = (dma_addr_t) NULL; 251 sg->dma_address = (dma_addr_t) NULL;
251 sg->dma_length = 0; 252 sg->dma_length = 0;
252 } 253 }
253} 254}
254 255
255/** 256/**
256 * sn_dma_map_sg_attrs - map a scatterlist for DMA 257 * sn_dma_map_sg - map a scatterlist for DMA
257 * @dev: device to map for 258 * @dev: device to map for
258 * @sg: scatterlist to map 259 * @sg: scatterlist to map
259 * @nhwentries: number of entries 260 * @nhwentries: number of entries
@@ -267,8 +268,9 @@ static void sn_dma_unmap_sg_attrs(struct device *dev, struct scatterlist *sgl,
267 * 268 *
268 * Maps each entry of @sg for DMA. 269 * Maps each entry of @sg for DMA.
269 */ 270 */
270static int sn_dma_map_sg_attrs(struct device *dev, struct scatterlist *sgl, 271static int sn_dma_map_sg(struct device *dev, struct scatterlist *sgl,
271 int nhwentries, int direction, struct dma_attrs *attrs) 272 int nhwentries, enum dma_data_direction dir,
273 struct dma_attrs *attrs)
272{ 274{
273 unsigned long phys_addr; 275 unsigned long phys_addr;
274 struct scatterlist *saved_sg = sgl, *sg; 276 struct scatterlist *saved_sg = sgl, *sg;
@@ -305,8 +307,7 @@ static int sn_dma_map_sg_attrs(struct device *dev, struct scatterlist *sgl,
305 * Free any successfully allocated entries. 307 * Free any successfully allocated entries.
306 */ 308 */
307 if (i > 0) 309 if (i > 0)
308 sn_dma_unmap_sg_attrs(dev, saved_sg, i, 310 sn_dma_unmap_sg(dev, saved_sg, i, dir, attrs);
309 direction, attrs);
310 return 0; 311 return 0;
311 } 312 }
312 313
@@ -317,25 +318,26 @@ static int sn_dma_map_sg_attrs(struct device *dev, struct scatterlist *sgl,
317} 318}
318 319
319static void sn_dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle, 320static void sn_dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle,
320 size_t size, int direction) 321 size_t size, enum dma_data_direction dir)
321{ 322{
322 BUG_ON(dev->bus != &pci_bus_type); 323 BUG_ON(dev->bus != &pci_bus_type);
323} 324}
324 325
325static void sn_dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle, 326static void sn_dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle,
326 size_t size, int direction) 327 size_t size,
328 enum dma_data_direction dir)
327{ 329{
328 BUG_ON(dev->bus != &pci_bus_type); 330 BUG_ON(dev->bus != &pci_bus_type);
329} 331}
330 332
331static void sn_dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, 333static void sn_dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,
332 int nelems, int direction) 334 int nelems, enum dma_data_direction dir)
333{ 335{
334 BUG_ON(dev->bus != &pci_bus_type); 336 BUG_ON(dev->bus != &pci_bus_type);
335} 337}
336 338
337static void sn_dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, 339static void sn_dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
338 int nelems, int direction) 340 int nelems, enum dma_data_direction dir)
339{ 341{
340 BUG_ON(dev->bus != &pci_bus_type); 342 BUG_ON(dev->bus != &pci_bus_type);
341} 343}
@@ -455,19 +457,19 @@ int sn_pci_legacy_write(struct pci_bus *bus, u16 port, u32 val, u8 size)
455 return ret; 457 return ret;
456} 458}
457 459
458static struct dma_mapping_ops sn_dma_ops = { 460static struct dma_map_ops sn_dma_ops = {
459 .alloc_coherent = sn_dma_alloc_coherent, 461 .alloc_coherent = sn_dma_alloc_coherent,
460 .free_coherent = sn_dma_free_coherent, 462 .free_coherent = sn_dma_free_coherent,
461 .map_single_attrs = sn_dma_map_single_attrs, 463 .map_page = sn_dma_map_page,
462 .unmap_single_attrs = sn_dma_unmap_single_attrs, 464 .unmap_page = sn_dma_unmap_page,
463 .map_sg_attrs = sn_dma_map_sg_attrs, 465 .map_sg = sn_dma_map_sg,
464 .unmap_sg_attrs = sn_dma_unmap_sg_attrs, 466 .unmap_sg = sn_dma_unmap_sg,
465 .sync_single_for_cpu = sn_dma_sync_single_for_cpu, 467 .sync_single_for_cpu = sn_dma_sync_single_for_cpu,
466 .sync_sg_for_cpu = sn_dma_sync_sg_for_cpu, 468 .sync_sg_for_cpu = sn_dma_sync_sg_for_cpu,
467 .sync_single_for_device = sn_dma_sync_single_for_device, 469 .sync_single_for_device = sn_dma_sync_single_for_device,
468 .sync_sg_for_device = sn_dma_sync_sg_for_device, 470 .sync_sg_for_device = sn_dma_sync_sg_for_device,
469 .mapping_error = sn_dma_mapping_error, 471 .mapping_error = sn_dma_mapping_error,
470 .dma_supported_op = sn_dma_supported, 472 .dma_supported = sn_dma_supported,
471}; 473};
472 474
473void sn_dma_init(void) 475void sn_dma_init(void)