aboutsummaryrefslogtreecommitdiffstats
path: root/arch/powerpc/kernel/iommu.c
diff options
context:
space:
mode:
authorFUJITA Tomonori <tomof@acm.org>2008-02-05 01:28:09 -0500
committerLinus Torvalds <torvalds@woody.linux-foundation.org>2008-02-05 12:44:11 -0500
commit383af9525bb27f927511874f6306247ec13f1c28 (patch)
treec2a88846ba944954c87aaeb9087fc5ff0f0f9d57 /arch/powerpc/kernel/iommu.c
parentfb3475e9b6bfa666107512fbd6006c26014f04b8 (diff)
iommu sg: powerpc: remove DMA 4GB boundary protection
Previously, during initialization of the IOMMU tables, the last entry at each 4GB boundary is marked as used since there are many adapters which cannot handle DMAing across any 4GB boundary. The IOMMU doesn't allocate a memory area spanning LLD's segment boundary anymore. The segment boundary of devices are set to 4GB by default. So we can remove 4GB boundary protection now. Signed-off-by: FUJITA Tomonori <fujita.tomonori@lab.ntt.co.jp> Cc: Jeff Garzik <jeff@garzik.org> Cc: James Bottomley <James.Bottomley@steeleye.com> Cc: Jens Axboe <jens.axboe@oracle.com> Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org> Cc: Paul Mackerras <paulus@samba.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'arch/powerpc/kernel/iommu.c')
-rw-r--r--arch/powerpc/kernel/iommu.c21
1 files changed, 1 insertions, 20 deletions
diff --git a/arch/powerpc/kernel/iommu.c b/arch/powerpc/kernel/iommu.c
index c42219c0afda..8f1f4e539c4b 100644
--- a/arch/powerpc/kernel/iommu.c
+++ b/arch/powerpc/kernel/iommu.c
@@ -453,9 +453,6 @@ void iommu_unmap_sg(struct iommu_table *tbl, struct scatterlist *sglist,
453struct iommu_table *iommu_init_table(struct iommu_table *tbl, int nid) 453struct iommu_table *iommu_init_table(struct iommu_table *tbl, int nid)
454{ 454{
455 unsigned long sz; 455 unsigned long sz;
456 unsigned long start_index, end_index;
457 unsigned long entries_per_4g;
458 unsigned long index;
459 static int welcomed = 0; 456 static int welcomed = 0;
460 struct page *page; 457 struct page *page;
461 458
@@ -477,6 +474,7 @@ struct iommu_table *iommu_init_table(struct iommu_table *tbl, int nid)
477 474
478#ifdef CONFIG_CRASH_DUMP 475#ifdef CONFIG_CRASH_DUMP
479 if (ppc_md.tce_get) { 476 if (ppc_md.tce_get) {
477 unsigned long index;
480 unsigned long tceval; 478 unsigned long tceval;
481 unsigned long tcecount = 0; 479 unsigned long tcecount = 0;
482 480
@@ -507,23 +505,6 @@ struct iommu_table *iommu_init_table(struct iommu_table *tbl, int nid)
507 ppc_md.tce_free(tbl, tbl->it_offset, tbl->it_size); 505 ppc_md.tce_free(tbl, tbl->it_offset, tbl->it_size);
508#endif 506#endif
509 507
510 /*
511 * DMA cannot cross 4 GB boundary. Mark last entry of each 4
512 * GB chunk as reserved.
513 */
514 if (protect4gb) {
515 entries_per_4g = 0x100000000l >> IOMMU_PAGE_SHIFT;
516
517 /* Mark the last bit before a 4GB boundary as used */
518 start_index = tbl->it_offset | (entries_per_4g - 1);
519 start_index -= tbl->it_offset;
520
521 end_index = tbl->it_size;
522
523 for (index = start_index; index < end_index - 1; index += entries_per_4g)
524 __set_bit(index, tbl->it_map);
525 }
526
527 if (!welcomed) { 508 if (!welcomed) {
528 printk(KERN_INFO "IOMMU table initialized, virtual merging %s\n", 509 printk(KERN_INFO "IOMMU table initialized, virtual merging %s\n",
529 novmerge ? "disabled" : "enabled"); 510 novmerge ? "disabled" : "enabled");