diff options
author | Christoph Hellwig <hch@lst.de> | 2019-02-13 02:01:09 -0500 |
---|---|---|
committer | Michael Ellerman <mpe@ellerman.id.au> | 2019-02-18 06:41:02 -0500 |
commit | ba767b5283c06e1a2fcdd1835c33e42b8fccd09c (patch) | |
tree | 74c4b10a7457272bb8c13ca5be09b2a61fb0926f | |
parent | cc9c156db500bda1487e25b451f9ff4d8dbee2ad (diff) |
powerpc/cell: use the generic iommu bypass code
This gets rid of a lot of clumsy code and finally allows us to mark
dma_iommu_ops const.
Includes fixes from Michael Ellerman.
Signed-off-by: Christoph Hellwig <hch@lst.de>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
-rw-r--r-- | arch/powerpc/include/asm/dma-mapping.h | 2 | ||||
-rw-r--r-- | arch/powerpc/include/asm/iommu.h | 6 | ||||
-rw-r--r-- | arch/powerpc/kernel/dma-iommu.c | 7 | ||||
-rw-r--r-- | arch/powerpc/platforms/cell/iommu.c | 140 |
4 files changed, 20 insertions, 135 deletions
diff --git a/arch/powerpc/include/asm/dma-mapping.h b/arch/powerpc/include/asm/dma-mapping.h index ff86b863eceb..1d80174db8a4 100644 --- a/arch/powerpc/include/asm/dma-mapping.h +++ b/arch/powerpc/include/asm/dma-mapping.h | |||
@@ -74,7 +74,7 @@ static inline unsigned long device_to_mask(struct device *dev) | |||
74 | * Available generic sets of operations | 74 | * Available generic sets of operations |
75 | */ | 75 | */ |
76 | #ifdef CONFIG_PPC64 | 76 | #ifdef CONFIG_PPC64 |
77 | extern struct dma_map_ops dma_iommu_ops; | 77 | extern const struct dma_map_ops dma_iommu_ops; |
78 | #endif | 78 | #endif |
79 | extern const struct dma_map_ops dma_nommu_ops; | 79 | extern const struct dma_map_ops dma_nommu_ops; |
80 | 80 | ||
diff --git a/arch/powerpc/include/asm/iommu.h b/arch/powerpc/include/asm/iommu.h index bd069a6542ab..6f00a892ebdf 100644 --- a/arch/powerpc/include/asm/iommu.h +++ b/arch/powerpc/include/asm/iommu.h | |||
@@ -319,5 +319,11 @@ extern void iommu_release_ownership(struct iommu_table *tbl); | |||
319 | extern enum dma_data_direction iommu_tce_direction(unsigned long tce); | 319 | extern enum dma_data_direction iommu_tce_direction(unsigned long tce); |
320 | extern unsigned long iommu_direction_to_tce_perm(enum dma_data_direction dir); | 320 | extern unsigned long iommu_direction_to_tce_perm(enum dma_data_direction dir); |
321 | 321 | ||
322 | #ifdef CONFIG_PPC_CELL_NATIVE | ||
323 | extern bool iommu_fixed_is_weak; | ||
324 | #else | ||
325 | #define iommu_fixed_is_weak false | ||
326 | #endif | ||
327 | |||
322 | #endif /* __KERNEL__ */ | 328 | #endif /* __KERNEL__ */ |
323 | #endif /* _ASM_IOMMU_H */ | 329 | #endif /* _ASM_IOMMU_H */ |
diff --git a/arch/powerpc/kernel/dma-iommu.c b/arch/powerpc/kernel/dma-iommu.c index fda92156b194..5a0b5e863b08 100644 --- a/arch/powerpc/kernel/dma-iommu.c +++ b/arch/powerpc/kernel/dma-iommu.c | |||
@@ -20,14 +20,15 @@ | |||
20 | */ | 20 | */ |
21 | static inline bool dma_iommu_alloc_bypass(struct device *dev) | 21 | static inline bool dma_iommu_alloc_bypass(struct device *dev) |
22 | { | 22 | { |
23 | return dev->archdata.iommu_bypass && | 23 | return dev->archdata.iommu_bypass && !iommu_fixed_is_weak && |
24 | dma_nommu_dma_supported(dev, dev->coherent_dma_mask); | 24 | dma_nommu_dma_supported(dev, dev->coherent_dma_mask); |
25 | } | 25 | } |
26 | 26 | ||
27 | static inline bool dma_iommu_map_bypass(struct device *dev, | 27 | static inline bool dma_iommu_map_bypass(struct device *dev, |
28 | unsigned long attrs) | 28 | unsigned long attrs) |
29 | { | 29 | { |
30 | return dev->archdata.iommu_bypass; | 30 | return dev->archdata.iommu_bypass && |
31 | (!iommu_fixed_is_weak || (attrs & DMA_ATTR_WEAK_ORDERING)); | ||
31 | } | 32 | } |
32 | 33 | ||
33 | /* Allocates a contiguous real buffer and creates mappings over it. | 34 | /* Allocates a contiguous real buffer and creates mappings over it. |
@@ -163,7 +164,7 @@ u64 dma_iommu_get_required_mask(struct device *dev) | |||
163 | return mask; | 164 | return mask; |
164 | } | 165 | } |
165 | 166 | ||
166 | struct dma_map_ops dma_iommu_ops = { | 167 | const struct dma_map_ops dma_iommu_ops = { |
167 | .alloc = dma_iommu_alloc_coherent, | 168 | .alloc = dma_iommu_alloc_coherent, |
168 | .free = dma_iommu_free_coherent, | 169 | .free = dma_iommu_free_coherent, |
169 | .mmap = dma_nommu_mmap_coherent, | 170 | .mmap = dma_nommu_mmap_coherent, |
diff --git a/arch/powerpc/platforms/cell/iommu.c b/arch/powerpc/platforms/cell/iommu.c index 4c609c0db5af..6663cd3e6bb6 100644 --- a/arch/powerpc/platforms/cell/iommu.c +++ b/arch/powerpc/platforms/cell/iommu.c | |||
@@ -546,7 +546,7 @@ static unsigned long cell_dma_nommu_offset; | |||
546 | static unsigned long dma_iommu_fixed_base; | 546 | static unsigned long dma_iommu_fixed_base; |
547 | 547 | ||
548 | /* iommu_fixed_is_weak is set if booted with iommu_fixed=weak */ | 548 | /* iommu_fixed_is_weak is set if booted with iommu_fixed=weak */ |
549 | static int iommu_fixed_is_weak; | 549 | bool iommu_fixed_is_weak; |
550 | 550 | ||
551 | static struct iommu_table *cell_get_iommu_table(struct device *dev) | 551 | static struct iommu_table *cell_get_iommu_table(struct device *dev) |
552 | { | 552 | { |
@@ -568,94 +568,6 @@ static struct iommu_table *cell_get_iommu_table(struct device *dev) | |||
568 | return &window->table; | 568 | return &window->table; |
569 | } | 569 | } |
570 | 570 | ||
571 | /* A coherent allocation implies strong ordering */ | ||
572 | |||
573 | static void *dma_fixed_alloc_coherent(struct device *dev, size_t size, | ||
574 | dma_addr_t *dma_handle, gfp_t flag, | ||
575 | unsigned long attrs) | ||
576 | { | ||
577 | if (iommu_fixed_is_weak) | ||
578 | return iommu_alloc_coherent(dev, cell_get_iommu_table(dev), | ||
579 | size, dma_handle, | ||
580 | device_to_mask(dev), flag, | ||
581 | dev_to_node(dev)); | ||
582 | else | ||
583 | return dma_nommu_ops.alloc(dev, size, dma_handle, flag, | ||
584 | attrs); | ||
585 | } | ||
586 | |||
587 | static void dma_fixed_free_coherent(struct device *dev, size_t size, | ||
588 | void *vaddr, dma_addr_t dma_handle, | ||
589 | unsigned long attrs) | ||
590 | { | ||
591 | if (iommu_fixed_is_weak) | ||
592 | iommu_free_coherent(cell_get_iommu_table(dev), size, vaddr, | ||
593 | dma_handle); | ||
594 | else | ||
595 | dma_nommu_ops.free(dev, size, vaddr, dma_handle, attrs); | ||
596 | } | ||
597 | |||
598 | static dma_addr_t dma_fixed_map_page(struct device *dev, struct page *page, | ||
599 | unsigned long offset, size_t size, | ||
600 | enum dma_data_direction direction, | ||
601 | unsigned long attrs) | ||
602 | { | ||
603 | if (iommu_fixed_is_weak == (attrs & DMA_ATTR_WEAK_ORDERING)) | ||
604 | return dma_nommu_ops.map_page(dev, page, offset, size, | ||
605 | direction, attrs); | ||
606 | else | ||
607 | return iommu_map_page(dev, cell_get_iommu_table(dev), page, | ||
608 | offset, size, device_to_mask(dev), | ||
609 | direction, attrs); | ||
610 | } | ||
611 | |||
612 | static void dma_fixed_unmap_page(struct device *dev, dma_addr_t dma_addr, | ||
613 | size_t size, enum dma_data_direction direction, | ||
614 | unsigned long attrs) | ||
615 | { | ||
616 | if (iommu_fixed_is_weak == (attrs & DMA_ATTR_WEAK_ORDERING)) | ||
617 | dma_nommu_ops.unmap_page(dev, dma_addr, size, direction, | ||
618 | attrs); | ||
619 | else | ||
620 | iommu_unmap_page(cell_get_iommu_table(dev), dma_addr, size, | ||
621 | direction, attrs); | ||
622 | } | ||
623 | |||
624 | static int dma_fixed_map_sg(struct device *dev, struct scatterlist *sg, | ||
625 | int nents, enum dma_data_direction direction, | ||
626 | unsigned long attrs) | ||
627 | { | ||
628 | if (iommu_fixed_is_weak == (attrs & DMA_ATTR_WEAK_ORDERING)) | ||
629 | return dma_nommu_ops.map_sg(dev, sg, nents, direction, attrs); | ||
630 | else | ||
631 | return ppc_iommu_map_sg(dev, cell_get_iommu_table(dev), sg, | ||
632 | nents, device_to_mask(dev), | ||
633 | direction, attrs); | ||
634 | } | ||
635 | |||
636 | static void dma_fixed_unmap_sg(struct device *dev, struct scatterlist *sg, | ||
637 | int nents, enum dma_data_direction direction, | ||
638 | unsigned long attrs) | ||
639 | { | ||
640 | if (iommu_fixed_is_weak == (attrs & DMA_ATTR_WEAK_ORDERING)) | ||
641 | dma_nommu_ops.unmap_sg(dev, sg, nents, direction, attrs); | ||
642 | else | ||
643 | ppc_iommu_unmap_sg(cell_get_iommu_table(dev), sg, nents, | ||
644 | direction, attrs); | ||
645 | } | ||
646 | |||
647 | static int dma_suported_and_switch(struct device *dev, u64 dma_mask); | ||
648 | |||
649 | static const struct dma_map_ops dma_iommu_fixed_ops = { | ||
650 | .alloc = dma_fixed_alloc_coherent, | ||
651 | .free = dma_fixed_free_coherent, | ||
652 | .map_sg = dma_fixed_map_sg, | ||
653 | .unmap_sg = dma_fixed_unmap_sg, | ||
654 | .dma_supported = dma_suported_and_switch, | ||
655 | .map_page = dma_fixed_map_page, | ||
656 | .unmap_page = dma_fixed_unmap_page, | ||
657 | }; | ||
658 | |||
659 | static u64 cell_iommu_get_fixed_address(struct device *dev); | 571 | static u64 cell_iommu_get_fixed_address(struct device *dev); |
660 | 572 | ||
661 | static void cell_dma_dev_setup(struct device *dev) | 573 | static void cell_dma_dev_setup(struct device *dev) |
@@ -956,22 +868,10 @@ out: | |||
956 | return dev_addr; | 868 | return dev_addr; |
957 | } | 869 | } |
958 | 870 | ||
959 | static int dma_suported_and_switch(struct device *dev, u64 dma_mask) | 871 | static bool cell_pci_iommu_bypass_supported(struct pci_dev *pdev, u64 mask) |
960 | { | 872 | { |
961 | if (dma_mask == DMA_BIT_MASK(64) && | 873 | return mask == DMA_BIT_MASK(64) && |
962 | cell_iommu_get_fixed_address(dev) != OF_BAD_ADDR) { | 874 | cell_iommu_get_fixed_address(&pdev->dev) != OF_BAD_ADDR; |
963 | dev_dbg(dev, "iommu: 64-bit OK, using fixed ops\n"); | ||
964 | set_dma_ops(dev, &dma_iommu_fixed_ops); | ||
965 | return 1; | ||
966 | } | ||
967 | |||
968 | if (dma_iommu_dma_supported(dev, dma_mask)) { | ||
969 | dev_dbg(dev, "iommu: not 64-bit, using default ops\n"); | ||
970 | set_dma_ops(dev, &dma_iommu_ops); | ||
971 | return 1; | ||
972 | } | ||
973 | |||
974 | return 0; | ||
975 | } | 875 | } |
976 | 876 | ||
977 | static void insert_16M_pte(unsigned long addr, unsigned long *ptab, | 877 | static void insert_16M_pte(unsigned long addr, unsigned long *ptab, |
@@ -1125,9 +1025,8 @@ static int __init cell_iommu_fixed_mapping_init(void) | |||
1125 | cell_iommu_setup_window(iommu, np, dbase, dsize, 0); | 1025 | cell_iommu_setup_window(iommu, np, dbase, dsize, 0); |
1126 | } | 1026 | } |
1127 | 1027 | ||
1128 | dma_iommu_ops.dma_supported = dma_suported_and_switch; | 1028 | cell_pci_controller_ops.iommu_bypass_supported = |
1129 | set_pci_dma_ops(&dma_iommu_ops); | 1029 | cell_pci_iommu_bypass_supported; |
1130 | |||
1131 | return 0; | 1030 | return 0; |
1132 | } | 1031 | } |
1133 | 1032 | ||
@@ -1148,7 +1047,7 @@ static int __init setup_iommu_fixed(char *str) | |||
1148 | pciep = of_find_node_by_type(NULL, "pcie-endpoint"); | 1047 | pciep = of_find_node_by_type(NULL, "pcie-endpoint"); |
1149 | 1048 | ||
1150 | if (strcmp(str, "weak") == 0 || (pciep && strcmp(str, "strong") != 0)) | 1049 | if (strcmp(str, "weak") == 0 || (pciep && strcmp(str, "strong") != 0)) |
1151 | iommu_fixed_is_weak = DMA_ATTR_WEAK_ORDERING; | 1050 | iommu_fixed_is_weak = true; |
1152 | 1051 | ||
1153 | of_node_put(pciep); | 1052 | of_node_put(pciep); |
1154 | 1053 | ||
@@ -1156,26 +1055,6 @@ static int __init setup_iommu_fixed(char *str) | |||
1156 | } | 1055 | } |
1157 | __setup("iommu_fixed=", setup_iommu_fixed); | 1056 | __setup("iommu_fixed=", setup_iommu_fixed); |
1158 | 1057 | ||
1159 | static u64 cell_dma_get_required_mask(struct device *dev) | ||
1160 | { | ||
1161 | const struct dma_map_ops *dma_ops; | ||
1162 | |||
1163 | if (!dev->dma_mask) | ||
1164 | return 0; | ||
1165 | |||
1166 | if (!iommu_fixed_disabled && | ||
1167 | cell_iommu_get_fixed_address(dev) != OF_BAD_ADDR) | ||
1168 | return DMA_BIT_MASK(64); | ||
1169 | |||
1170 | dma_ops = get_dma_ops(dev); | ||
1171 | if (dma_ops->get_required_mask) | ||
1172 | return dma_ops->get_required_mask(dev); | ||
1173 | |||
1174 | WARN_ONCE(1, "no get_required_mask in %p ops", dma_ops); | ||
1175 | |||
1176 | return DMA_BIT_MASK(64); | ||
1177 | } | ||
1178 | |||
1179 | static int __init cell_iommu_init(void) | 1058 | static int __init cell_iommu_init(void) |
1180 | { | 1059 | { |
1181 | struct device_node *np; | 1060 | struct device_node *np; |
@@ -1192,10 +1071,9 @@ static int __init cell_iommu_init(void) | |||
1192 | 1071 | ||
1193 | /* Setup various callbacks */ | 1072 | /* Setup various callbacks */ |
1194 | cell_pci_controller_ops.dma_dev_setup = cell_pci_dma_dev_setup; | 1073 | cell_pci_controller_ops.dma_dev_setup = cell_pci_dma_dev_setup; |
1195 | ppc_md.dma_get_required_mask = cell_dma_get_required_mask; | ||
1196 | 1074 | ||
1197 | if (!iommu_fixed_disabled && cell_iommu_fixed_mapping_init() == 0) | 1075 | if (!iommu_fixed_disabled && cell_iommu_fixed_mapping_init() == 0) |
1198 | goto bail; | 1076 | goto done; |
1199 | 1077 | ||
1200 | /* Create an iommu for each /axon node. */ | 1078 | /* Create an iommu for each /axon node. */ |
1201 | for_each_node_by_name(np, "axon") { | 1079 | for_each_node_by_name(np, "axon") { |
@@ -1212,7 +1090,7 @@ static int __init cell_iommu_init(void) | |||
1212 | continue; | 1090 | continue; |
1213 | cell_iommu_init_one(np, SPIDER_DMA_OFFSET); | 1091 | cell_iommu_init_one(np, SPIDER_DMA_OFFSET); |
1214 | } | 1092 | } |
1215 | 1093 | done: | |
1216 | /* Setup default PCI iommu ops */ | 1094 | /* Setup default PCI iommu ops */ |
1217 | set_pci_dma_ops(&dma_iommu_ops); | 1095 | set_pci_dma_ops(&dma_iommu_ops); |
1218 | 1096 | ||