diff options
author | FUJITA Tomonori <fujita.tomonori@lab.ntt.co.jp> | 2009-01-05 09:59:02 -0500 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2009-01-06 08:06:57 -0500 |
commit | 160c1d8e40866edfeae7d68816b7005d70acf391 (patch) | |
tree | 37dd78b2ea28a3953a46d401bd9657005eb444d7 /arch/x86/kernel | |
parent | f0402a262e1a4c03fc66b83659823bdcaac3c41a (diff) |
x86, ia64: convert to use generic dma_map_ops struct
This converts X86 and IA64 to use include/linux/dma-mapping.h.
It's a bit large but pretty boring. The major change for X86 is
converting 'int dir' to 'enum dma_data_direction dir' in DMA mapping
operations. The major changes for IA64 is using map_page and
unmap_page instead of map_single and unmap_single.
Signed-off-by: FUJITA Tomonori <fujita.tomonori@lab.ntt.co.jp>
Acked-by: Tony Luck <tony.luck@intel.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'arch/x86/kernel')
-rw-r--r-- | arch/x86/kernel/amd_iommu.c | 8 | ||||
-rw-r--r-- | arch/x86/kernel/pci-calgary_64.c | 15 | ||||
-rw-r--r-- | arch/x86/kernel/pci-dma.c | 4 | ||||
-rw-r--r-- | arch/x86/kernel/pci-gart_64.c | 14 | ||||
-rw-r--r-- | arch/x86/kernel/pci-nommu.c | 5 | ||||
-rw-r--r-- | arch/x86/kernel/pci-swiotlb_64.c | 6 |
6 files changed, 28 insertions, 24 deletions
diff --git a/arch/x86/kernel/amd_iommu.c b/arch/x86/kernel/amd_iommu.c index a5dedb690a9a..008e522b9536 100644 --- a/arch/x86/kernel/amd_iommu.c +++ b/arch/x86/kernel/amd_iommu.c | |||
@@ -1394,7 +1394,8 @@ static int map_sg_no_iommu(struct device *dev, struct scatterlist *sglist, | |||
1394 | * lists). | 1394 | * lists). |
1395 | */ | 1395 | */ |
1396 | static int map_sg(struct device *dev, struct scatterlist *sglist, | 1396 | static int map_sg(struct device *dev, struct scatterlist *sglist, |
1397 | int nelems, int dir) | 1397 | int nelems, enum dma_data_direction dir, |
1398 | struct dma_attrs *attrs) | ||
1398 | { | 1399 | { |
1399 | unsigned long flags; | 1400 | unsigned long flags; |
1400 | struct amd_iommu *iommu; | 1401 | struct amd_iommu *iommu; |
@@ -1461,7 +1462,8 @@ unmap: | |||
1461 | * lists). | 1462 | * lists). |
1462 | */ | 1463 | */ |
1463 | static void unmap_sg(struct device *dev, struct scatterlist *sglist, | 1464 | static void unmap_sg(struct device *dev, struct scatterlist *sglist, |
1464 | int nelems, int dir) | 1465 | int nelems, enum dma_data_direction dir, |
1466 | struct dma_attrs *attrs) | ||
1465 | { | 1467 | { |
1466 | unsigned long flags; | 1468 | unsigned long flags; |
1467 | struct amd_iommu *iommu; | 1469 | struct amd_iommu *iommu; |
@@ -1648,7 +1650,7 @@ static void prealloc_protection_domains(void) | |||
1648 | } | 1650 | } |
1649 | } | 1651 | } |
1650 | 1652 | ||
1651 | static struct dma_mapping_ops amd_iommu_dma_ops = { | 1653 | static struct dma_map_ops amd_iommu_dma_ops = { |
1652 | .alloc_coherent = alloc_coherent, | 1654 | .alloc_coherent = alloc_coherent, |
1653 | .free_coherent = free_coherent, | 1655 | .free_coherent = free_coherent, |
1654 | .map_page = map_page, | 1656 | .map_page = map_page, |
diff --git a/arch/x86/kernel/pci-calgary_64.c b/arch/x86/kernel/pci-calgary_64.c index 756138b604e1..755c21e906f3 100644 --- a/arch/x86/kernel/pci-calgary_64.c +++ b/arch/x86/kernel/pci-calgary_64.c | |||
@@ -380,8 +380,9 @@ static inline struct iommu_table *find_iommu_table(struct device *dev) | |||
380 | return tbl; | 380 | return tbl; |
381 | } | 381 | } |
382 | 382 | ||
383 | static void calgary_unmap_sg(struct device *dev, | 383 | static void calgary_unmap_sg(struct device *dev, struct scatterlist *sglist, |
384 | struct scatterlist *sglist, int nelems, int direction) | 384 | int nelems,enum dma_data_direction dir, |
385 | struct dma_attrs *attrs) | ||
385 | { | 386 | { |
386 | struct iommu_table *tbl = find_iommu_table(dev); | 387 | struct iommu_table *tbl = find_iommu_table(dev); |
387 | struct scatterlist *s; | 388 | struct scatterlist *s; |
@@ -404,7 +405,8 @@ static void calgary_unmap_sg(struct device *dev, | |||
404 | } | 405 | } |
405 | 406 | ||
406 | static int calgary_map_sg(struct device *dev, struct scatterlist *sg, | 407 | static int calgary_map_sg(struct device *dev, struct scatterlist *sg, |
407 | int nelems, int direction) | 408 | int nelems, enum dma_data_direction dir, |
409 | struct dma_attrs *attrs) | ||
408 | { | 410 | { |
409 | struct iommu_table *tbl = find_iommu_table(dev); | 411 | struct iommu_table *tbl = find_iommu_table(dev); |
410 | struct scatterlist *s; | 412 | struct scatterlist *s; |
@@ -429,15 +431,14 @@ static int calgary_map_sg(struct device *dev, struct scatterlist *sg, | |||
429 | s->dma_address = (entry << PAGE_SHIFT) | s->offset; | 431 | s->dma_address = (entry << PAGE_SHIFT) | s->offset; |
430 | 432 | ||
431 | /* insert into HW table */ | 433 | /* insert into HW table */ |
432 | tce_build(tbl, entry, npages, vaddr & PAGE_MASK, | 434 | tce_build(tbl, entry, npages, vaddr & PAGE_MASK, dir); |
433 | direction); | ||
434 | 435 | ||
435 | s->dma_length = s->length; | 436 | s->dma_length = s->length; |
436 | } | 437 | } |
437 | 438 | ||
438 | return nelems; | 439 | return nelems; |
439 | error: | 440 | error: |
440 | calgary_unmap_sg(dev, sg, nelems, direction); | 441 | calgary_unmap_sg(dev, sg, nelems, dir, NULL); |
441 | for_each_sg(sg, s, nelems, i) { | 442 | for_each_sg(sg, s, nelems, i) { |
442 | sg->dma_address = bad_dma_address; | 443 | sg->dma_address = bad_dma_address; |
443 | sg->dma_length = 0; | 444 | sg->dma_length = 0; |
@@ -518,7 +519,7 @@ static void calgary_free_coherent(struct device *dev, size_t size, | |||
518 | free_pages((unsigned long)vaddr, get_order(size)); | 519 | free_pages((unsigned long)vaddr, get_order(size)); |
519 | } | 520 | } |
520 | 521 | ||
521 | static struct dma_mapping_ops calgary_dma_ops = { | 522 | static struct dma_map_ops calgary_dma_ops = { |
522 | .alloc_coherent = calgary_alloc_coherent, | 523 | .alloc_coherent = calgary_alloc_coherent, |
523 | .free_coherent = calgary_free_coherent, | 524 | .free_coherent = calgary_free_coherent, |
524 | .map_sg = calgary_map_sg, | 525 | .map_sg = calgary_map_sg, |
diff --git a/arch/x86/kernel/pci-dma.c b/arch/x86/kernel/pci-dma.c index 19a1044a0cd9..0d75c129b18a 100644 --- a/arch/x86/kernel/pci-dma.c +++ b/arch/x86/kernel/pci-dma.c | |||
@@ -12,7 +12,7 @@ | |||
12 | 12 | ||
13 | static int forbid_dac __read_mostly; | 13 | static int forbid_dac __read_mostly; |
14 | 14 | ||
15 | struct dma_mapping_ops *dma_ops; | 15 | struct dma_map_ops *dma_ops; |
16 | EXPORT_SYMBOL(dma_ops); | 16 | EXPORT_SYMBOL(dma_ops); |
17 | 17 | ||
18 | static int iommu_sac_force __read_mostly; | 18 | static int iommu_sac_force __read_mostly; |
@@ -224,7 +224,7 @@ early_param("iommu", iommu_setup); | |||
224 | 224 | ||
225 | int dma_supported(struct device *dev, u64 mask) | 225 | int dma_supported(struct device *dev, u64 mask) |
226 | { | 226 | { |
227 | struct dma_mapping_ops *ops = get_dma_ops(dev); | 227 | struct dma_map_ops *ops = get_dma_ops(dev); |
228 | 228 | ||
229 | #ifdef CONFIG_PCI | 229 | #ifdef CONFIG_PCI |
230 | if (mask > 0xffffffff && forbid_dac > 0) { | 230 | if (mask > 0xffffffff && forbid_dac > 0) { |
diff --git a/arch/x86/kernel/pci-gart_64.c b/arch/x86/kernel/pci-gart_64.c index 9c557c0c928c..8cb3e45439cf 100644 --- a/arch/x86/kernel/pci-gart_64.c +++ b/arch/x86/kernel/pci-gart_64.c | |||
@@ -302,8 +302,8 @@ static void gart_unmap_page(struct device *dev, dma_addr_t dma_addr, | |||
302 | /* | 302 | /* |
303 | * Wrapper for pci_unmap_single working with scatterlists. | 303 | * Wrapper for pci_unmap_single working with scatterlists. |
304 | */ | 304 | */ |
305 | static void | 305 | static void gart_unmap_sg(struct device *dev, struct scatterlist *sg, int nents, |
306 | gart_unmap_sg(struct device *dev, struct scatterlist *sg, int nents, int dir) | 306 | enum dma_data_direction dir, struct dma_attrs *attrs) |
307 | { | 307 | { |
308 | struct scatterlist *s; | 308 | struct scatterlist *s; |
309 | int i; | 309 | int i; |
@@ -333,7 +333,7 @@ static int dma_map_sg_nonforce(struct device *dev, struct scatterlist *sg, | |||
333 | addr = dma_map_area(dev, addr, s->length, dir, 0); | 333 | addr = dma_map_area(dev, addr, s->length, dir, 0); |
334 | if (addr == bad_dma_address) { | 334 | if (addr == bad_dma_address) { |
335 | if (i > 0) | 335 | if (i > 0) |
336 | gart_unmap_sg(dev, sg, i, dir); | 336 | gart_unmap_sg(dev, sg, i, dir, NULL); |
337 | nents = 0; | 337 | nents = 0; |
338 | sg[0].dma_length = 0; | 338 | sg[0].dma_length = 0; |
339 | break; | 339 | break; |
@@ -404,8 +404,8 @@ dma_map_cont(struct device *dev, struct scatterlist *start, int nelems, | |||
404 | * DMA map all entries in a scatterlist. | 404 | * DMA map all entries in a scatterlist. |
405 | * Merge chunks that have page aligned sizes into a continuous mapping. | 405 | * Merge chunks that have page aligned sizes into a continuous mapping. |
406 | */ | 406 | */ |
407 | static int | 407 | static int gart_map_sg(struct device *dev, struct scatterlist *sg, int nents, |
408 | gart_map_sg(struct device *dev, struct scatterlist *sg, int nents, int dir) | 408 | enum dma_data_direction dir, struct dma_attrs *attrs) |
409 | { | 409 | { |
410 | struct scatterlist *s, *ps, *start_sg, *sgmap; | 410 | struct scatterlist *s, *ps, *start_sg, *sgmap; |
411 | int need = 0, nextneed, i, out, start; | 411 | int need = 0, nextneed, i, out, start; |
@@ -472,7 +472,7 @@ gart_map_sg(struct device *dev, struct scatterlist *sg, int nents, int dir) | |||
472 | 472 | ||
473 | error: | 473 | error: |
474 | flush_gart(); | 474 | flush_gart(); |
475 | gart_unmap_sg(dev, sg, out, dir); | 475 | gart_unmap_sg(dev, sg, out, dir, NULL); |
476 | 476 | ||
477 | /* When it was forced or merged try again in a dumb way */ | 477 | /* When it was forced or merged try again in a dumb way */ |
478 | if (force_iommu || iommu_merge) { | 478 | if (force_iommu || iommu_merge) { |
@@ -711,7 +711,7 @@ static __init int init_k8_gatt(struct agp_kern_info *info) | |||
711 | return -1; | 711 | return -1; |
712 | } | 712 | } |
713 | 713 | ||
714 | static struct dma_mapping_ops gart_dma_ops = { | 714 | static struct dma_map_ops gart_dma_ops = { |
715 | .map_sg = gart_map_sg, | 715 | .map_sg = gart_map_sg, |
716 | .unmap_sg = gart_unmap_sg, | 716 | .unmap_sg = gart_unmap_sg, |
717 | .map_page = gart_map_page, | 717 | .map_page = gart_map_page, |
diff --git a/arch/x86/kernel/pci-nommu.c b/arch/x86/kernel/pci-nommu.c index d42b69c90b40..fe50214db876 100644 --- a/arch/x86/kernel/pci-nommu.c +++ b/arch/x86/kernel/pci-nommu.c | |||
@@ -54,7 +54,8 @@ static dma_addr_t nommu_map_page(struct device *dev, struct page *page, | |||
54 | * the same here. | 54 | * the same here. |
55 | */ | 55 | */ |
56 | static int nommu_map_sg(struct device *hwdev, struct scatterlist *sg, | 56 | static int nommu_map_sg(struct device *hwdev, struct scatterlist *sg, |
57 | int nents, int direction) | 57 | int nents, enum dma_data_direction dir, |
58 | struct dma_attrs *attrs) | ||
58 | { | 59 | { |
59 | struct scatterlist *s; | 60 | struct scatterlist *s; |
60 | int i; | 61 | int i; |
@@ -78,7 +79,7 @@ static void nommu_free_coherent(struct device *dev, size_t size, void *vaddr, | |||
78 | free_pages((unsigned long)vaddr, get_order(size)); | 79 | free_pages((unsigned long)vaddr, get_order(size)); |
79 | } | 80 | } |
80 | 81 | ||
81 | struct dma_mapping_ops nommu_dma_ops = { | 82 | struct dma_map_ops nommu_dma_ops = { |
82 | .alloc_coherent = dma_generic_alloc_coherent, | 83 | .alloc_coherent = dma_generic_alloc_coherent, |
83 | .free_coherent = nommu_free_coherent, | 84 | .free_coherent = nommu_free_coherent, |
84 | .map_sg = nommu_map_sg, | 85 | .map_sg = nommu_map_sg, |
diff --git a/arch/x86/kernel/pci-swiotlb_64.c b/arch/x86/kernel/pci-swiotlb_64.c index 3ae354c0fdef..3f0d9924dd1c 100644 --- a/arch/x86/kernel/pci-swiotlb_64.c +++ b/arch/x86/kernel/pci-swiotlb_64.c | |||
@@ -67,7 +67,7 @@ static void *x86_swiotlb_alloc_coherent(struct device *hwdev, size_t size, | |||
67 | return swiotlb_alloc_coherent(hwdev, size, dma_handle, flags); | 67 | return swiotlb_alloc_coherent(hwdev, size, dma_handle, flags); |
68 | } | 68 | } |
69 | 69 | ||
70 | struct dma_mapping_ops swiotlb_dma_ops = { | 70 | struct dma_map_ops swiotlb_dma_ops = { |
71 | .mapping_error = swiotlb_dma_mapping_error, | 71 | .mapping_error = swiotlb_dma_mapping_error, |
72 | .alloc_coherent = x86_swiotlb_alloc_coherent, | 72 | .alloc_coherent = x86_swiotlb_alloc_coherent, |
73 | .free_coherent = swiotlb_free_coherent, | 73 | .free_coherent = swiotlb_free_coherent, |
@@ -77,8 +77,8 @@ struct dma_mapping_ops swiotlb_dma_ops = { | |||
77 | .sync_single_range_for_device = swiotlb_sync_single_range_for_device, | 77 | .sync_single_range_for_device = swiotlb_sync_single_range_for_device, |
78 | .sync_sg_for_cpu = swiotlb_sync_sg_for_cpu, | 78 | .sync_sg_for_cpu = swiotlb_sync_sg_for_cpu, |
79 | .sync_sg_for_device = swiotlb_sync_sg_for_device, | 79 | .sync_sg_for_device = swiotlb_sync_sg_for_device, |
80 | .map_sg = swiotlb_map_sg, | 80 | .map_sg = swiotlb_map_sg_attrs, |
81 | .unmap_sg = swiotlb_unmap_sg, | 81 | .unmap_sg = swiotlb_unmap_sg_attrs, |
82 | .map_page = swiotlb_map_page, | 82 | .map_page = swiotlb_map_page, |
83 | .unmap_page = swiotlb_unmap_page, | 83 | .unmap_page = swiotlb_unmap_page, |
84 | .dma_supported = NULL, | 84 | .dma_supported = NULL, |