diff options
author | Jake Moilanen <moilanen@austin.ibm.com> | 2007-03-29 09:44:02 -0400 |
---|---|---|
committer | Paul Mackerras <paulus@samba.org> | 2007-04-12 13:55:13 -0400 |
commit | 569975591c5530fdc9c7a3c45122e5e46f075a74 (patch) | |
tree | 47908de09d2e7d55d82a3b9d2e47b34b2c7f8133 /arch/powerpc/kernel/iommu.c | |
parent | 1f9209cfe06be715b82075e79c9aab3c5b714010 (diff) |
[POWERPC] DMA 4GB boundary protection
There are many adapters which cannot handle DMAing across any 4 GB
boundary. For instance, the latest Emulex adapters.
This normally is not an issue as firmware gives dma-windows under
4gigs. However, some of the new System-P boxes have dma-windows above
4gigs, and this present a problem.
During initialization of the IOMMU tables, the last entry at each 4GB
boundary is marked as used. Thus no mappings can cross the boundary.
If a table ends at a 4GB boundary, the entry is not marked as used.
A boot option to remove this 4GB protection is given w/ protect4gb=off.
This exposes the potential issue for driver and hardware development
purposes.
Signed-off-by: Jake Moilanen <moilanen@austin.ibm.com>
Acked-by: Olof Johansson <olof@lixom.net>
Signed-off-by: Paul Mackerras <paulus@samba.org>
Diffstat (limited to 'arch/powerpc/kernel/iommu.c')
-rw-r--r-- | arch/powerpc/kernel/iommu.c | 35 |
1 files changed, 34 insertions, 1 deletions
diff --git a/arch/powerpc/kernel/iommu.c b/arch/powerpc/kernel/iommu.c index c50d7072f305..d2598e2e7bbe 100644 --- a/arch/powerpc/kernel/iommu.c +++ b/arch/powerpc/kernel/iommu.c | |||
@@ -47,6 +47,8 @@ static int novmerge = 0; | |||
47 | static int novmerge = 1; | 47 | static int novmerge = 1; |
48 | #endif | 48 | #endif |
49 | 49 | ||
50 | static int protect4gb = 1; | ||
51 | |||
50 | static inline unsigned long iommu_num_pages(unsigned long vaddr, | 52 | static inline unsigned long iommu_num_pages(unsigned long vaddr, |
51 | unsigned long slen) | 53 | unsigned long slen) |
52 | { | 54 | { |
@@ -58,6 +60,16 @@ static inline unsigned long iommu_num_pages(unsigned long vaddr, | |||
58 | return npages; | 60 | return npages; |
59 | } | 61 | } |
60 | 62 | ||
63 | static int __init setup_protect4gb(char *str) | ||
64 | { | ||
65 | if (strcmp(str, "on") == 0) | ||
66 | protect4gb = 1; | ||
67 | else if (strcmp(str, "off") == 0) | ||
68 | protect4gb = 0; | ||
69 | |||
70 | return 1; | ||
71 | } | ||
72 | |||
61 | static int __init setup_iommu(char *str) | 73 | static int __init setup_iommu(char *str) |
62 | { | 74 | { |
63 | if (!strcmp(str, "novmerge")) | 75 | if (!strcmp(str, "novmerge")) |
@@ -67,6 +79,7 @@ static int __init setup_iommu(char *str) | |||
67 | return 1; | 79 | return 1; |
68 | } | 80 | } |
69 | 81 | ||
82 | __setup("protect4gb=", setup_protect4gb); | ||
70 | __setup("iommu=", setup_iommu); | 83 | __setup("iommu=", setup_iommu); |
71 | 84 | ||
72 | static unsigned long iommu_range_alloc(struct iommu_table *tbl, | 85 | static unsigned long iommu_range_alloc(struct iommu_table *tbl, |
@@ -439,6 +452,9 @@ void iommu_unmap_sg(struct iommu_table *tbl, struct scatterlist *sglist, | |||
439 | struct iommu_table *iommu_init_table(struct iommu_table *tbl, int nid) | 452 | struct iommu_table *iommu_init_table(struct iommu_table *tbl, int nid) |
440 | { | 453 | { |
441 | unsigned long sz; | 454 | unsigned long sz; |
455 | unsigned long start_index, end_index; | ||
456 | unsigned long entries_per_4g; | ||
457 | unsigned long index; | ||
442 | static int welcomed = 0; | 458 | static int welcomed = 0; |
443 | struct page *page; | 459 | struct page *page; |
444 | 460 | ||
@@ -460,7 +476,7 @@ struct iommu_table *iommu_init_table(struct iommu_table *tbl, int nid) | |||
460 | 476 | ||
461 | #ifdef CONFIG_CRASH_DUMP | 477 | #ifdef CONFIG_CRASH_DUMP |
462 | if (ppc_md.tce_get) { | 478 | if (ppc_md.tce_get) { |
463 | unsigned long index, tceval; | 479 | unsigned long tceval; |
464 | unsigned long tcecount = 0; | 480 | unsigned long tcecount = 0; |
465 | 481 | ||
466 | /* | 482 | /* |
@@ -490,6 +506,23 @@ struct iommu_table *iommu_init_table(struct iommu_table *tbl, int nid) | |||
490 | ppc_md.tce_free(tbl, tbl->it_offset, tbl->it_size); | 506 | ppc_md.tce_free(tbl, tbl->it_offset, tbl->it_size); |
491 | #endif | 507 | #endif |
492 | 508 | ||
509 | /* | ||
510 | * DMA cannot cross 4 GB boundary. Mark last entry of each 4 | ||
511 | * GB chunk as reserved. | ||
512 | */ | ||
513 | if (protect4gb) { | ||
514 | entries_per_4g = 0x100000000l >> IOMMU_PAGE_SHIFT; | ||
515 | |||
516 | /* Mark the last bit before a 4GB boundary as used */ | ||
517 | start_index = tbl->it_offset | (entries_per_4g - 1); | ||
518 | start_index -= tbl->it_offset; | ||
519 | |||
520 | end_index = tbl->it_size; | ||
521 | |||
522 | for (index = start_index; index < end_index - 1; index += entries_per_4g) | ||
523 | __set_bit(index, tbl->it_map); | ||
524 | } | ||
525 | |||
493 | if (!welcomed) { | 526 | if (!welcomed) { |
494 | printk(KERN_INFO "IOMMU table initialized, virtual merging %s\n", | 527 | printk(KERN_INFO "IOMMU table initialized, virtual merging %s\n", |
495 | novmerge ? "disabled" : "enabled"); | 528 | novmerge ? "disabled" : "enabled"); |