aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
Diffstat (limited to 'arch')
-rw-r--r--arch/powerpc/platforms/cell/iommu.c69
1 files changed, 43 insertions, 26 deletions
diff --git a/arch/powerpc/platforms/cell/iommu.c b/arch/powerpc/platforms/cell/iommu.c
index 8e57e1af3785..187a723eafcd 100644
--- a/arch/powerpc/platforms/cell/iommu.c
+++ b/arch/powerpc/platforms/cell/iommu.c
@@ -306,50 +306,54 @@ static int cell_iommu_find_ioc(int nid, unsigned long *base)
306 return -ENODEV; 306 return -ENODEV;
307} 307}
308 308
309static void cell_iommu_setup_page_tables(struct cbe_iommu *iommu, 309static void cell_iommu_setup_stab(struct cbe_iommu *iommu,
310 unsigned long dbase, unsigned long dsize, 310 unsigned long dbase, unsigned long dsize,
311 unsigned long fbase, unsigned long fsize) 311 unsigned long fbase, unsigned long fsize)
312{ 312{
313 struct page *page; 313 struct page *page;
314 int i; 314 unsigned long segments, stab_size;
315 unsigned long reg, segments, pages_per_segment, ptab_size, stab_size,
316 n_pte_pages, base;
317
318 base = dbase;
319 if (fsize != 0)
320 base = min(fbase, dbase);
321 315
322 segments = max(dbase + dsize, fbase + fsize) >> IO_SEGMENT_SHIFT; 316 segments = max(dbase + dsize, fbase + fsize) >> IO_SEGMENT_SHIFT;
323 pages_per_segment = 1ull << IO_PAGENO_BITS;
324 317
325 pr_debug("%s: iommu[%d]: segments: %lu, pages per segment: %lu\n", 318 pr_debug("%s: iommu[%d]: segments: %lu\n",
326 __FUNCTION__, iommu->nid, segments, pages_per_segment); 319 __FUNCTION__, iommu->nid, segments);
327 320
328 /* set up the segment table */ 321 /* set up the segment table */
329 stab_size = segments * sizeof(unsigned long); 322 stab_size = segments * sizeof(unsigned long);
330 page = alloc_pages_node(iommu->nid, GFP_KERNEL, get_order(stab_size)); 323 page = alloc_pages_node(iommu->nid, GFP_KERNEL, get_order(stab_size));
331 BUG_ON(!page); 324 BUG_ON(!page);
332 iommu->stab = page_address(page); 325 iommu->stab = page_address(page);
333 clear_page(iommu->stab); 326 memset(iommu->stab, 0, stab_size);
327}
328
329static unsigned long *cell_iommu_alloc_ptab(struct cbe_iommu *iommu,
330 unsigned long base, unsigned long size, unsigned long gap_base,
331 unsigned long gap_size)
332{
333 struct page *page;
334 int i;
335 unsigned long reg, segments, pages_per_segment, ptab_size,
336 n_pte_pages, start_seg, *ptab;
337
338 start_seg = base >> IO_SEGMENT_SHIFT;
339 segments = size >> IO_SEGMENT_SHIFT;
340 pages_per_segment = 1ull << IO_PAGENO_BITS;
334 341
335 /* ... and the page tables. Since these are contiguous, we can treat
336 * the page tables as one array of ptes, like pSeries does.
337 */
338 ptab_size = segments * pages_per_segment * sizeof(unsigned long); 342 ptab_size = segments * pages_per_segment * sizeof(unsigned long);
339 pr_debug("%s: iommu[%d]: ptab_size: %lu, order: %d\n", __FUNCTION__, 343 pr_debug("%s: iommu[%d]: ptab_size: %lu, order: %d\n", __FUNCTION__,
340 iommu->nid, ptab_size, get_order(ptab_size)); 344 iommu->nid, ptab_size, get_order(ptab_size));
341 page = alloc_pages_node(iommu->nid, GFP_KERNEL, get_order(ptab_size)); 345 page = alloc_pages_node(iommu->nid, GFP_KERNEL, get_order(ptab_size));
342 BUG_ON(!page); 346 BUG_ON(!page);
343 347
344 iommu->ptab = page_address(page); 348 ptab = page_address(page);
345 memset(iommu->ptab, 0, ptab_size); 349 memset(ptab, 0, ptab_size);
346 350
347 /* number of pages needed for a page table */ 351 /* number of pages needed for a page table */
348 n_pte_pages = (pages_per_segment * 352 n_pte_pages = (pages_per_segment *
349 sizeof(unsigned long)) >> IOMMU_PAGE_SHIFT; 353 sizeof(unsigned long)) >> IOMMU_PAGE_SHIFT;
350 354
351 pr_debug("%s: iommu[%d]: stab at %p, ptab at %p, n_pte_pages: %lu\n", 355 pr_debug("%s: iommu[%d]: stab at %p, ptab at %p, n_pte_pages: %lu\n",
352 __FUNCTION__, iommu->nid, iommu->stab, iommu->ptab, 356 __FUNCTION__, iommu->nid, iommu->stab, ptab,
353 n_pte_pages); 357 n_pte_pages);
354 358
355 /* initialise the STEs */ 359 /* initialise the STEs */
@@ -364,12 +368,21 @@ static void cell_iommu_setup_page_tables(struct cbe_iommu *iommu,
364 __unknown_page_size_error(); 368 __unknown_page_size_error();
365 } 369 }
366 370
371 gap_base = gap_base >> IO_SEGMENT_SHIFT;
372 gap_size = gap_size >> IO_SEGMENT_SHIFT;
373
367 pr_debug("Setting up IOMMU stab:\n"); 374 pr_debug("Setting up IOMMU stab:\n");
368 for (i = base >> IO_SEGMENT_SHIFT; i < segments; i++) { 375 for (i = start_seg; i < (start_seg + segments); i++) {
369 iommu->stab[i] = reg | 376 if (i >= gap_base && i < (gap_base + gap_size)) {
370 (__pa(iommu->ptab) + n_pte_pages * IOMMU_PAGE_SIZE * i); 377 pr_debug("\toverlap at %d, skipping\n", i);
378 continue;
379 }
380 iommu->stab[i] = reg | (__pa(ptab) + n_pte_pages *
381 IOMMU_PAGE_SIZE * (i - start_seg));
371 pr_debug("\t[%d] 0x%016lx\n", i, iommu->stab[i]); 382 pr_debug("\t[%d] 0x%016lx\n", i, iommu->stab[i]);
372 } 383 }
384
385 return ptab;
373} 386}
374 387
375static void cell_iommu_enable_hardware(struct cbe_iommu *iommu) 388static void cell_iommu_enable_hardware(struct cbe_iommu *iommu)
@@ -416,7 +429,8 @@ static void cell_iommu_enable_hardware(struct cbe_iommu *iommu)
416static void cell_iommu_setup_hardware(struct cbe_iommu *iommu, 429static void cell_iommu_setup_hardware(struct cbe_iommu *iommu,
417 unsigned long base, unsigned long size) 430 unsigned long base, unsigned long size)
418{ 431{
419 cell_iommu_setup_page_tables(iommu, base, size, 0, 0); 432 cell_iommu_setup_stab(iommu, base, size, 0, 0);
433 iommu->ptab = cell_iommu_alloc_ptab(iommu, base, size, 0, 0);
420 cell_iommu_enable_hardware(iommu); 434 cell_iommu_enable_hardware(iommu);
421} 435}
422 436
@@ -870,8 +884,10 @@ static void cell_iommu_setup_fixed_ptab(struct cbe_iommu *iommu,
870 struct device_node *np, unsigned long dbase, unsigned long dsize, 884 struct device_node *np, unsigned long dbase, unsigned long dsize,
871 unsigned long fbase, unsigned long fsize) 885 unsigned long fbase, unsigned long fsize)
872{ 886{
873 unsigned long base_pte, uaddr, *io_pte;
874 int i; 887 int i;
888 unsigned long base_pte, uaddr, *io_pte, *ptab;
889
890 ptab = cell_iommu_alloc_ptab(iommu, fbase, fsize, dbase, dsize);
875 891
876 dma_iommu_fixed_base = fbase; 892 dma_iommu_fixed_base = fbase;
877 893
@@ -883,7 +899,7 @@ static void cell_iommu_setup_fixed_ptab(struct cbe_iommu *iommu,
883 899
884 pr_debug("iommu: mapping 0x%lx pages from 0x%lx\n", fsize, fbase); 900 pr_debug("iommu: mapping 0x%lx pages from 0x%lx\n", fsize, fbase);
885 901
886 io_pte = iommu->ptab; 902 io_pte = ptab;
887 base_pte = IOPTE_PP_W | IOPTE_PP_R | IOPTE_M | IOPTE_SO_RW 903 base_pte = IOPTE_PP_W | IOPTE_PP_R | IOPTE_M | IOPTE_SO_RW
888 | (cell_iommu_get_ioid(np) & IOPTE_IOID_Mask); 904 | (cell_iommu_get_ioid(np) & IOPTE_IOID_Mask);
889 905
@@ -894,7 +910,7 @@ static void cell_iommu_setup_fixed_ptab(struct cbe_iommu *iommu,
894 pr_debug("iommu: fixed/dynamic overlap, skipping\n"); 910 pr_debug("iommu: fixed/dynamic overlap, skipping\n");
895 continue; 911 continue;
896 } 912 }
897 io_pte[i] = base_pte | (__pa(uaddr) & IOPTE_RPN_Mask); 913 io_pte[i - fbase] = base_pte | (__pa(uaddr) & IOPTE_RPN_Mask);
898 } 914 }
899 915
900 mb(); 916 mb();
@@ -992,7 +1008,8 @@ static int __init cell_iommu_fixed_mapping_init(void)
992 "fixed window 0x%lx-0x%lx\n", iommu->nid, dbase, 1008 "fixed window 0x%lx-0x%lx\n", iommu->nid, dbase,
993 dbase + dsize, fbase, fbase + fsize); 1009 dbase + dsize, fbase, fbase + fsize);
994 1010
995 cell_iommu_setup_page_tables(iommu, dbase, dsize, fbase, fsize); 1011 cell_iommu_setup_stab(iommu, dbase, dsize, fbase, fsize);
1012 iommu->ptab = cell_iommu_alloc_ptab(iommu, dbase, dsize, 0, 0);
996 cell_iommu_setup_fixed_ptab(iommu, np, dbase, dsize, 1013 cell_iommu_setup_fixed_ptab(iommu, np, dbase, dsize,
997 fbase, fsize); 1014 fbase, fsize);
998 cell_iommu_enable_hardware(iommu); 1015 cell_iommu_enable_hardware(iommu);