aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
authorMichael Ellerman <michael@ellerman.id.au>2008-02-29 02:33:27 -0500
committerArnd Bergmann <arnd@arndb.de>2008-03-03 02:03:15 -0500
commit225d49050f9b6506f2f9df6b40e591ee93939d11 (patch)
treef65a8146f67fec193842606705b95940e1923779 /arch
parent3d3e6da17d6af42a3fd4891fb09d93dca002e590 (diff)
[POWERPC] Allow for different IOMMU page sizes in cell IOMMU code
Make some preliminary changes to cell_iommu_alloc_ptab() to allow it to take the page size as a parameter rather than assuming IOMMU_PAGE_SIZE. Signed-off-by: Michael Ellerman <michael@ellerman.id.au> Signed-off-by: Arnd Bergmann <arnd@arndb.de>
Diffstat (limited to 'arch')
-rw-r--r--arch/powerpc/platforms/cell/iommu.c31
1 files changed, 18 insertions, 13 deletions
diff --git a/arch/powerpc/platforms/cell/iommu.c b/arch/powerpc/platforms/cell/iommu.c
index 7a861cb960d2..b0e347e4933a 100644
--- a/arch/powerpc/platforms/cell/iommu.c
+++ b/arch/powerpc/platforms/cell/iommu.c
@@ -113,7 +113,7 @@
113 113
114/* IOMMU sizing */ 114/* IOMMU sizing */
115#define IO_SEGMENT_SHIFT 28 115#define IO_SEGMENT_SHIFT 28
116#define IO_PAGENO_BITS (IO_SEGMENT_SHIFT - IOMMU_PAGE_SHIFT) 116#define IO_PAGENO_BITS(shift) (IO_SEGMENT_SHIFT - (shift))
117 117
118/* The high bit needs to be set on every DMA address */ 118/* The high bit needs to be set on every DMA address */
119#define SPIDER_DMA_OFFSET 0x80000000ul 119#define SPIDER_DMA_OFFSET 0x80000000ul
@@ -328,7 +328,7 @@ static void cell_iommu_setup_stab(struct cbe_iommu *iommu,
328 328
329static unsigned long *cell_iommu_alloc_ptab(struct cbe_iommu *iommu, 329static unsigned long *cell_iommu_alloc_ptab(struct cbe_iommu *iommu,
330 unsigned long base, unsigned long size, unsigned long gap_base, 330 unsigned long base, unsigned long size, unsigned long gap_base,
331 unsigned long gap_size) 331 unsigned long gap_size, unsigned long page_shift)
332{ 332{
333 struct page *page; 333 struct page *page;
334 int i; 334 int i;
@@ -337,7 +337,10 @@ static unsigned long *cell_iommu_alloc_ptab(struct cbe_iommu *iommu,
337 337
338 start_seg = base >> IO_SEGMENT_SHIFT; 338 start_seg = base >> IO_SEGMENT_SHIFT;
339 segments = size >> IO_SEGMENT_SHIFT; 339 segments = size >> IO_SEGMENT_SHIFT;
340 pages_per_segment = 1ull << IO_PAGENO_BITS; 340 pages_per_segment = 1ull << IO_PAGENO_BITS(page_shift);
341 /* PTEs for each segment must start on a 4K bounday */
342 pages_per_segment = max(pages_per_segment,
343 (1 << 12) / sizeof(unsigned long));
341 344
342 ptab_size = segments * pages_per_segment * sizeof(unsigned long); 345 ptab_size = segments * pages_per_segment * sizeof(unsigned long);
343 pr_debug("%s: iommu[%d]: ptab_size: %lu, order: %d\n", __FUNCTION__, 346 pr_debug("%s: iommu[%d]: ptab_size: %lu, order: %d\n", __FUNCTION__,
@@ -358,13 +361,12 @@ static unsigned long *cell_iommu_alloc_ptab(struct cbe_iommu *iommu,
358 /* initialise the STEs */ 361 /* initialise the STEs */
359 reg = IOSTE_V | ((n_pte_pages - 1) << 5); 362 reg = IOSTE_V | ((n_pte_pages - 1) << 5);
360 363
361 if (IOMMU_PAGE_SIZE == 0x1000) 364 switch (page_shift) {
362 reg |= IOSTE_PS_4K; 365 case 12: reg |= IOSTE_PS_4K; break;
363 else if (IOMMU_PAGE_SIZE == 0x10000) 366 case 16: reg |= IOSTE_PS_64K; break;
364 reg |= IOSTE_PS_64K; 367 case 20: reg |= IOSTE_PS_1M; break;
365 else { 368 case 24: reg |= IOSTE_PS_16M; break;
366 extern void __unknown_page_size_error(void); 369 default: BUG();
367 __unknown_page_size_error();
368 } 370 }
369 371
370 gap_base = gap_base >> IO_SEGMENT_SHIFT; 372 gap_base = gap_base >> IO_SEGMENT_SHIFT;
@@ -429,7 +431,8 @@ static void cell_iommu_setup_hardware(struct cbe_iommu *iommu,
429 unsigned long base, unsigned long size) 431 unsigned long base, unsigned long size)
430{ 432{
431 cell_iommu_setup_stab(iommu, base, size, 0, 0); 433 cell_iommu_setup_stab(iommu, base, size, 0, 0);
432 iommu->ptab = cell_iommu_alloc_ptab(iommu, base, size, 0, 0); 434 iommu->ptab = cell_iommu_alloc_ptab(iommu, base, size, 0, 0,
435 IOMMU_PAGE_SHIFT);
433 cell_iommu_enable_hardware(iommu); 436 cell_iommu_enable_hardware(iommu);
434} 437}
435 438
@@ -886,7 +889,8 @@ static void cell_iommu_setup_fixed_ptab(struct cbe_iommu *iommu,
886 int i; 889 int i;
887 unsigned long base_pte, uaddr, *io_pte, *ptab; 890 unsigned long base_pte, uaddr, *io_pte, *ptab;
888 891
889 ptab = cell_iommu_alloc_ptab(iommu, fbase, fsize, dbase, dsize); 892 ptab = cell_iommu_alloc_ptab(iommu, fbase, fsize, dbase, dsize,
893 IOMMU_PAGE_SHIFT);
890 894
891 dma_iommu_fixed_base = fbase; 895 dma_iommu_fixed_base = fbase;
892 896
@@ -1008,7 +1012,8 @@ static int __init cell_iommu_fixed_mapping_init(void)
1008 dbase + dsize, fbase, fbase + fsize); 1012 dbase + dsize, fbase, fbase + fsize);
1009 1013
1010 cell_iommu_setup_stab(iommu, dbase, dsize, fbase, fsize); 1014 cell_iommu_setup_stab(iommu, dbase, dsize, fbase, fsize);
1011 iommu->ptab = cell_iommu_alloc_ptab(iommu, dbase, dsize, 0, 0); 1015 iommu->ptab = cell_iommu_alloc_ptab(iommu, dbase, dsize, 0, 0,
1016 IOMMU_PAGE_SHIFT);
1012 cell_iommu_setup_fixed_ptab(iommu, np, dbase, dsize, 1017 cell_iommu_setup_fixed_ptab(iommu, np, dbase, dsize,
1013 fbase, fsize); 1018 fbase, fsize);
1014 cell_iommu_enable_hardware(iommu); 1019 cell_iommu_enable_hardware(iommu);