aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorMichael Ellerman <michael@ellerman.id.au>2008-02-29 02:33:29 -0500
committerArnd Bergmann <arnd@arndb.de>2008-03-03 02:03:15 -0500
commitda40451bba23b51eaca4170a095891646ce72104 (patch)
treef4f6c9687539da120e30df1955aa3bce458a8be7
parent225d49050f9b6506f2f9df6b40e591ee93939d11 (diff)
[POWERPC] Convert the cell IOMMU fixed mapping to 16M IOMMU pages
The only tricky part is we need to adjust the PTE insertion loop to cater for holes in the page table. The PTEs for each segment start on a 4K boundary, so with 16M pages we have 16 PTEs per segment and then a gap to the next 4K page boundary. It might be possible to allocate the PTEs for each segment separately, saving the memory currently filling the gaps. However we'd need to check that's OK with the hardware, and that it actually saves memory. Signed-off-by: Michael Ellerman <michael@ellerman.id.au> Signed-off-by: Arnd Bergmann <arnd@arndb.de>
-rw-r--r--arch/powerpc/platforms/cell/iommu.c37
1 files changed, 22 insertions, 15 deletions
diff --git a/arch/powerpc/platforms/cell/iommu.c b/arch/powerpc/platforms/cell/iommu.c
index b0e347e4933a..20ea0e118f24 100644
--- a/arch/powerpc/platforms/cell/iommu.c
+++ b/arch/powerpc/platforms/cell/iommu.c
@@ -882,38 +882,45 @@ static void cell_dma_dev_setup_fixed(struct device *dev)
882 dev_dbg(dev, "iommu: fixed addr = %lx\n", addr); 882 dev_dbg(dev, "iommu: fixed addr = %lx\n", addr);
883} 883}
884 884
885static void insert_16M_pte(unsigned long addr, unsigned long *ptab,
886 unsigned long base_pte)
887{
888 unsigned long segment, offset;
889
890 segment = addr >> IO_SEGMENT_SHIFT;
891 offset = (addr >> 24) - (segment << IO_PAGENO_BITS(24));
892 ptab = ptab + (segment * (1 << 12) / sizeof(unsigned long));
893
894 pr_debug("iommu: addr %lx ptab %p segment %lx offset %lx\n",
895 addr, ptab, segment, offset);
896
897 ptab[offset] = base_pte | (__pa(addr) & IOPTE_RPN_Mask);
898}
899
885static void cell_iommu_setup_fixed_ptab(struct cbe_iommu *iommu, 900static void cell_iommu_setup_fixed_ptab(struct cbe_iommu *iommu,
886 struct device_node *np, unsigned long dbase, unsigned long dsize, 901 struct device_node *np, unsigned long dbase, unsigned long dsize,
887 unsigned long fbase, unsigned long fsize) 902 unsigned long fbase, unsigned long fsize)
888{ 903{
889 int i; 904 unsigned long base_pte, uaddr, ioaddr, *ptab;
890 unsigned long base_pte, uaddr, *io_pte, *ptab;
891 905
892 ptab = cell_iommu_alloc_ptab(iommu, fbase, fsize, dbase, dsize, 906 ptab = cell_iommu_alloc_ptab(iommu, fbase, fsize, dbase, dsize, 24);
893 IOMMU_PAGE_SHIFT);
894 907
895 dma_iommu_fixed_base = fbase; 908 dma_iommu_fixed_base = fbase;
896 909
897 /* convert from bytes into page table indices */
898 dbase = dbase >> IOMMU_PAGE_SHIFT;
899 dsize = dsize >> IOMMU_PAGE_SHIFT;
900 fbase = fbase >> IOMMU_PAGE_SHIFT;
901 fsize = fsize >> IOMMU_PAGE_SHIFT;
902
903 pr_debug("iommu: mapping 0x%lx pages from 0x%lx\n", fsize, fbase); 910 pr_debug("iommu: mapping 0x%lx pages from 0x%lx\n", fsize, fbase);
904 911
905 io_pte = ptab;
906 base_pte = IOPTE_PP_W | IOPTE_PP_R | IOPTE_M | IOPTE_SO_RW 912 base_pte = IOPTE_PP_W | IOPTE_PP_R | IOPTE_M | IOPTE_SO_RW
907 | (cell_iommu_get_ioid(np) & IOPTE_IOID_Mask); 913 | (cell_iommu_get_ioid(np) & IOPTE_IOID_Mask);
908 914
909 uaddr = 0; 915 for (uaddr = 0; uaddr < fsize; uaddr += (1 << 24)) {
910 for (i = fbase; i < fbase + fsize; i++, uaddr += IOMMU_PAGE_SIZE) {
911 /* Don't touch the dynamic region */ 916 /* Don't touch the dynamic region */
912 if (i >= dbase && i < (dbase + dsize)) { 917 ioaddr = uaddr + fbase;
918 if (ioaddr >= dbase && ioaddr < (dbase + dsize)) {
913 pr_debug("iommu: fixed/dynamic overlap, skipping\n"); 919 pr_debug("iommu: fixed/dynamic overlap, skipping\n");
914 continue; 920 continue;
915 } 921 }
916 io_pte[i - fbase] = base_pte | (__pa(uaddr) & IOPTE_RPN_Mask); 922
923 insert_16M_pte(uaddr, ptab, base_pte);
917 } 924 }
918 925
919 mb(); 926 mb();