diff options
author | Benjamin Herrenschmidt <benh@kernel.crashing.org> | 2006-11-11 01:25:17 -0500 |
---|---|---|
committer | Paul Mackerras <paulus@samba.org> | 2006-12-04 04:39:00 -0500 |
commit | acfd946a1aaffdec346c2864f596d4d92125d1ad (patch) | |
tree | c3947421aa5206039238fb7fb2bee2874ca831c1 /arch | |
parent | c80d9133e99de1af607314107910a2a1645efb17 (diff) |
[POWERPC] Make cell use direct DMA ops
Now that the direct DMA ops supports an offset, we use that instead
of defining our own.
Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Signed-off-by: Paul Mackerras <paulus@samba.org>
Diffstat (limited to 'arch')
-rw-r--r-- | arch/powerpc/platforms/cell/iommu.c | 79 |
1 files changed, 6 insertions, 73 deletions
diff --git a/arch/powerpc/platforms/cell/iommu.c b/arch/powerpc/platforms/cell/iommu.c index e3ea5311476e..6a97fe1319d0 100644 --- a/arch/powerpc/platforms/cell/iommu.c +++ b/arch/powerpc/platforms/cell/iommu.c | |||
@@ -46,8 +46,6 @@ | |||
46 | 46 | ||
47 | #include "iommu.h" | 47 | #include "iommu.h" |
48 | 48 | ||
49 | static dma_addr_t cell_dma_valid = SPIDER_DMA_VALID; | ||
50 | |||
51 | static inline unsigned long | 49 | static inline unsigned long |
52 | get_iopt_entry(unsigned long real_address, unsigned long ioid, | 50 | get_iopt_entry(unsigned long real_address, unsigned long ioid, |
53 | unsigned long prot) | 51 | unsigned long prot) |
@@ -417,83 +415,18 @@ static int cell_map_iommu(void) | |||
417 | return 1; | 415 | return 1; |
418 | } | 416 | } |
419 | 417 | ||
420 | static void *cell_alloc_coherent(struct device *hwdev, size_t size, | ||
421 | dma_addr_t *dma_handle, gfp_t flag) | ||
422 | { | ||
423 | void *ret; | ||
424 | |||
425 | ret = (void *)__get_free_pages(flag, get_order(size)); | ||
426 | if (ret != NULL) { | ||
427 | memset(ret, 0, size); | ||
428 | *dma_handle = virt_to_abs(ret) | cell_dma_valid; | ||
429 | } | ||
430 | return ret; | ||
431 | } | ||
432 | |||
433 | static void cell_free_coherent(struct device *hwdev, size_t size, | ||
434 | void *vaddr, dma_addr_t dma_handle) | ||
435 | { | ||
436 | free_pages((unsigned long)vaddr, get_order(size)); | ||
437 | } | ||
438 | |||
439 | static dma_addr_t cell_map_single(struct device *hwdev, void *ptr, | ||
440 | size_t size, enum dma_data_direction direction) | ||
441 | { | ||
442 | return virt_to_abs(ptr) | cell_dma_valid; | ||
443 | } | ||
444 | |||
445 | static void cell_unmap_single(struct device *hwdev, dma_addr_t dma_addr, | ||
446 | size_t size, enum dma_data_direction direction) | ||
447 | { | ||
448 | } | ||
449 | |||
450 | static int cell_map_sg(struct device *hwdev, struct scatterlist *sg, | ||
451 | int nents, enum dma_data_direction direction) | ||
452 | { | ||
453 | int i; | ||
454 | |||
455 | for (i = 0; i < nents; i++, sg++) { | ||
456 | sg->dma_address = (page_to_phys(sg->page) + sg->offset) | ||
457 | | cell_dma_valid; | ||
458 | sg->dma_length = sg->length; | ||
459 | } | ||
460 | |||
461 | return nents; | ||
462 | } | ||
463 | |||
464 | static void cell_unmap_sg(struct device *hwdev, struct scatterlist *sg, | ||
465 | int nents, enum dma_data_direction direction) | ||
466 | { | ||
467 | } | ||
468 | |||
469 | static int cell_dma_supported(struct device *dev, u64 mask) | ||
470 | { | ||
471 | return mask < 0x100000000ull; | ||
472 | } | ||
473 | |||
474 | static struct dma_mapping_ops cell_iommu_ops = { | ||
475 | .alloc_coherent = cell_alloc_coherent, | ||
476 | .free_coherent = cell_free_coherent, | ||
477 | .map_single = cell_map_single, | ||
478 | .unmap_single = cell_unmap_single, | ||
479 | .map_sg = cell_map_sg, | ||
480 | .unmap_sg = cell_unmap_sg, | ||
481 | .dma_supported = cell_dma_supported, | ||
482 | }; | ||
483 | |||
484 | void cell_init_iommu(void) | 418 | void cell_init_iommu(void) |
485 | { | 419 | { |
486 | int setup_bus = 0; | 420 | int setup_bus = 0; |
487 | 421 | ||
488 | /* If we have an Axon bridge, clear the DMA valid mask. This is fairly | ||
489 | * hackish but will work well enough until we have proper iommu code. | ||
490 | */ | ||
491 | if (of_find_node_by_name(NULL, "axon")) | ||
492 | cell_dma_valid = 0; | ||
493 | |||
494 | if (of_find_node_by_path("/mambo")) { | 422 | if (of_find_node_by_path("/mambo")) { |
495 | pr_info("Not using iommu on systemsim\n"); | 423 | pr_info("Not using iommu on systemsim\n"); |
496 | } else { | 424 | } else { |
425 | /* If we don't have an Axon bridge, we assume we have a | ||
426 | * spider which requires a DMA offset | ||
427 | */ | ||
428 | if (of_find_node_by_name(NULL, "axon") == NULL) | ||
429 | dma_direct_offset = SPIDER_DMA_VALID; | ||
497 | 430 | ||
498 | if (!(of_chosen && | 431 | if (!(of_chosen && |
499 | get_property(of_chosen, "linux,iommu-off", NULL))) | 432 | get_property(of_chosen, "linux,iommu-off", NULL))) |
@@ -509,5 +442,5 @@ void cell_init_iommu(void) | |||
509 | } | 442 | } |
510 | } | 443 | } |
511 | 444 | ||
512 | pci_dma_ops = &cell_iommu_ops; | 445 | pci_dma_ops = &dma_direct_ops; |
513 | } | 446 | } |