aboutsummaryrefslogtreecommitdiffstats
path: root/arch/sparc/mm/iommu.c
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2012-05-13 16:57:05 -0400
committerDavid S. Miller <davem@davemloft.net>2012-05-13 16:57:05 -0400
commitd894d964ff7ddf5a81a5b150fee46caf99619f26 (patch)
treecb34a4346407f98e2d2d0ec272b3aeb6f7b84ab5 /arch/sparc/mm/iommu.c
parent679bea5e438df70b5d4348fd2da4501aaeacebe0 (diff)
sparc32: Convert mmu_* interfaces from btfixup to method ops.
This set of changes displays one major danger of btfixup, interface signatures are not always type checked fully. As seen here the iounit variant of the map_dma_area routine had an incorrect type for one of it's arguments. It turns out to be harmless in this case, but just imagine trying to debug something involving this kind of problem. No thanks. Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'arch/sparc/mm/iommu.c')
-rw-r--r--arch/sparc/mm/iommu.c49
1 files changed, 36 insertions, 13 deletions
diff --git a/arch/sparc/mm/iommu.c b/arch/sparc/mm/iommu.c
index 349ba83f1789..c64f81e370aa 100644
--- a/arch/sparc/mm/iommu.c
+++ b/arch/sparc/mm/iommu.c
@@ -426,29 +426,52 @@ static void iommu_unmap_dma_area(struct device *dev, unsigned long busa, int len
426} 426}
427#endif 427#endif
428 428
429static const struct sparc32_dma_ops iommu_dma_noflush_ops = {
430 .get_scsi_one = iommu_get_scsi_one_noflush,
431 .get_scsi_sgl = iommu_get_scsi_sgl_noflush,
432 .release_scsi_one = iommu_release_scsi_one,
433 .release_scsi_sgl = iommu_release_scsi_sgl,
434#ifdef CONFIG_SBUS
435 .map_dma_area = iommu_map_dma_area,
436 .unmap_dma_area = iommu_unmap_dma_area,
437#endif
438};
439
440static const struct sparc32_dma_ops iommu_dma_gflush_ops = {
441 .get_scsi_one = iommu_get_scsi_one_gflush,
442 .get_scsi_sgl = iommu_get_scsi_sgl_gflush,
443 .release_scsi_one = iommu_release_scsi_one,
444 .release_scsi_sgl = iommu_release_scsi_sgl,
445#ifdef CONFIG_SBUS
446 .map_dma_area = iommu_map_dma_area,
447 .unmap_dma_area = iommu_unmap_dma_area,
448#endif
449};
450
451static const struct sparc32_dma_ops iommu_dma_pflush_ops = {
452 .get_scsi_one = iommu_get_scsi_one_pflush,
453 .get_scsi_sgl = iommu_get_scsi_sgl_pflush,
454 .release_scsi_one = iommu_release_scsi_one,
455 .release_scsi_sgl = iommu_release_scsi_sgl,
456#ifdef CONFIG_SBUS
457 .map_dma_area = iommu_map_dma_area,
458 .unmap_dma_area = iommu_unmap_dma_area,
459#endif
460};
461
429void __init ld_mmu_iommu(void) 462void __init ld_mmu_iommu(void)
430{ 463{
431 viking_flush = (BTFIXUPVAL_CALL(flush_page_for_dma) == (unsigned long)viking_flush_page); 464 viking_flush = (BTFIXUPVAL_CALL(flush_page_for_dma) == (unsigned long)viking_flush_page);
432 465
433 if (!BTFIXUPVAL_CALL(flush_page_for_dma)) { 466 if (!BTFIXUPVAL_CALL(flush_page_for_dma)) {
434 /* IO coherent chip */ 467 /* IO coherent chip */
435 BTFIXUPSET_CALL(mmu_get_scsi_one, iommu_get_scsi_one_noflush, BTFIXUPCALL_RETO0); 468 sparc32_dma_ops = &iommu_dma_noflush_ops;
436 BTFIXUPSET_CALL(mmu_get_scsi_sgl, iommu_get_scsi_sgl_noflush, BTFIXUPCALL_NORM);
437 } else if (flush_page_for_dma_global) { 469 } else if (flush_page_for_dma_global) {
438 /* flush_page_for_dma flushes everything, no matter of what page is it */ 470 /* flush_page_for_dma flushes everything, no matter of what page is it */
439 BTFIXUPSET_CALL(mmu_get_scsi_one, iommu_get_scsi_one_gflush, BTFIXUPCALL_NORM); 471 sparc32_dma_ops = &iommu_dma_gflush_ops;
440 BTFIXUPSET_CALL(mmu_get_scsi_sgl, iommu_get_scsi_sgl_gflush, BTFIXUPCALL_NORM);
441 } else { 472 } else {
442 BTFIXUPSET_CALL(mmu_get_scsi_one, iommu_get_scsi_one_pflush, BTFIXUPCALL_NORM); 473 sparc32_dma_ops = &iommu_dma_pflush_ops;
443 BTFIXUPSET_CALL(mmu_get_scsi_sgl, iommu_get_scsi_sgl_pflush, BTFIXUPCALL_NORM);
444 } 474 }
445 BTFIXUPSET_CALL(mmu_release_scsi_one, iommu_release_scsi_one, BTFIXUPCALL_NORM);
446 BTFIXUPSET_CALL(mmu_release_scsi_sgl, iommu_release_scsi_sgl, BTFIXUPCALL_NORM);
447
448#ifdef CONFIG_SBUS
449 BTFIXUPSET_CALL(mmu_map_dma_area, iommu_map_dma_area, BTFIXUPCALL_NORM);
450 BTFIXUPSET_CALL(mmu_unmap_dma_area, iommu_unmap_dma_area, BTFIXUPCALL_NORM);
451#endif
452 475
453 if (viking_mxcc_present || srmmu_modtype == HyperSparc) { 476 if (viking_mxcc_present || srmmu_modtype == HyperSparc) {
454 dvma_prot = __pgprot(SRMMU_CACHE | SRMMU_ET_PTE | SRMMU_PRIV); 477 dvma_prot = __pgprot(SRMMU_CACHE | SRMMU_ET_PTE | SRMMU_PRIV);