diff options
Diffstat (limited to 'arch/powerpc/kernel/iommu.c')
-rw-r--r-- | arch/powerpc/kernel/iommu.c | 116 |
1 files changed, 54 insertions, 62 deletions
diff --git a/arch/powerpc/kernel/iommu.c b/arch/powerpc/kernel/iommu.c index 2d0c9ef555e9..8f1f4e539c4b 100644 --- a/arch/powerpc/kernel/iommu.c +++ b/arch/powerpc/kernel/iommu.c | |||
@@ -31,6 +31,7 @@ | |||
31 | #include <linux/string.h> | 31 | #include <linux/string.h> |
32 | #include <linux/dma-mapping.h> | 32 | #include <linux/dma-mapping.h> |
33 | #include <linux/bitops.h> | 33 | #include <linux/bitops.h> |
34 | #include <linux/iommu-helper.h> | ||
34 | #include <asm/io.h> | 35 | #include <asm/io.h> |
35 | #include <asm/prom.h> | 36 | #include <asm/prom.h> |
36 | #include <asm/iommu.h> | 37 | #include <asm/iommu.h> |
@@ -81,17 +82,19 @@ static int __init setup_iommu(char *str) | |||
81 | __setup("protect4gb=", setup_protect4gb); | 82 | __setup("protect4gb=", setup_protect4gb); |
82 | __setup("iommu=", setup_iommu); | 83 | __setup("iommu=", setup_iommu); |
83 | 84 | ||
84 | static unsigned long iommu_range_alloc(struct iommu_table *tbl, | 85 | static unsigned long iommu_range_alloc(struct device *dev, |
86 | struct iommu_table *tbl, | ||
85 | unsigned long npages, | 87 | unsigned long npages, |
86 | unsigned long *handle, | 88 | unsigned long *handle, |
87 | unsigned long mask, | 89 | unsigned long mask, |
88 | unsigned int align_order) | 90 | unsigned int align_order) |
89 | { | 91 | { |
90 | unsigned long n, end, i, start; | 92 | unsigned long n, end, start; |
91 | unsigned long limit; | 93 | unsigned long limit; |
92 | int largealloc = npages > 15; | 94 | int largealloc = npages > 15; |
93 | int pass = 0; | 95 | int pass = 0; |
94 | unsigned long align_mask; | 96 | unsigned long align_mask; |
97 | unsigned long boundary_size; | ||
95 | 98 | ||
96 | align_mask = 0xffffffffffffffffl >> (64 - align_order); | 99 | align_mask = 0xffffffffffffffffl >> (64 - align_order); |
97 | 100 | ||
@@ -136,14 +139,17 @@ static unsigned long iommu_range_alloc(struct iommu_table *tbl, | |||
136 | start &= mask; | 139 | start &= mask; |
137 | } | 140 | } |
138 | 141 | ||
139 | n = find_next_zero_bit(tbl->it_map, limit, start); | 142 | if (dev) |
140 | 143 | boundary_size = ALIGN(dma_get_seg_boundary(dev) + 1, | |
141 | /* Align allocation */ | 144 | 1 << IOMMU_PAGE_SHIFT); |
142 | n = (n + align_mask) & ~align_mask; | 145 | else |
143 | 146 | boundary_size = ALIGN(1UL << 32, 1 << IOMMU_PAGE_SHIFT); | |
144 | end = n + npages; | 147 | /* 4GB boundary for iseries_hv_alloc and iseries_hv_map */ |
145 | 148 | ||
146 | if (unlikely(end >= limit)) { | 149 | n = iommu_area_alloc(tbl->it_map, limit, start, npages, |
150 | tbl->it_offset, boundary_size >> IOMMU_PAGE_SHIFT, | ||
151 | align_mask); | ||
152 | if (n == -1) { | ||
147 | if (likely(pass < 2)) { | 153 | if (likely(pass < 2)) { |
148 | /* First failure, just rescan the half of the table. | 154 | /* First failure, just rescan the half of the table. |
149 | * Second failure, rescan the other half of the table. | 155 | * Second failure, rescan the other half of the table. |
@@ -158,14 +164,7 @@ static unsigned long iommu_range_alloc(struct iommu_table *tbl, | |||
158 | } | 164 | } |
159 | } | 165 | } |
160 | 166 | ||
161 | for (i = n; i < end; i++) | 167 | end = n + npages; |
162 | if (test_bit(i, tbl->it_map)) { | ||
163 | start = i+1; | ||
164 | goto again; | ||
165 | } | ||
166 | |||
167 | for (i = n; i < end; i++) | ||
168 | __set_bit(i, tbl->it_map); | ||
169 | 168 | ||
170 | /* Bump the hint to a new block for small allocs. */ | 169 | /* Bump the hint to a new block for small allocs. */ |
171 | if (largealloc) { | 170 | if (largealloc) { |
@@ -184,16 +183,17 @@ static unsigned long iommu_range_alloc(struct iommu_table *tbl, | |||
184 | return n; | 183 | return n; |
185 | } | 184 | } |
186 | 185 | ||
187 | static dma_addr_t iommu_alloc(struct iommu_table *tbl, void *page, | 186 | static dma_addr_t iommu_alloc(struct device *dev, struct iommu_table *tbl, |
188 | unsigned int npages, enum dma_data_direction direction, | 187 | void *page, unsigned int npages, |
189 | unsigned long mask, unsigned int align_order) | 188 | enum dma_data_direction direction, |
189 | unsigned long mask, unsigned int align_order) | ||
190 | { | 190 | { |
191 | unsigned long entry, flags; | 191 | unsigned long entry, flags; |
192 | dma_addr_t ret = DMA_ERROR_CODE; | 192 | dma_addr_t ret = DMA_ERROR_CODE; |
193 | 193 | ||
194 | spin_lock_irqsave(&(tbl->it_lock), flags); | 194 | spin_lock_irqsave(&(tbl->it_lock), flags); |
195 | 195 | ||
196 | entry = iommu_range_alloc(tbl, npages, NULL, mask, align_order); | 196 | entry = iommu_range_alloc(dev, tbl, npages, NULL, mask, align_order); |
197 | 197 | ||
198 | if (unlikely(entry == DMA_ERROR_CODE)) { | 198 | if (unlikely(entry == DMA_ERROR_CODE)) { |
199 | spin_unlock_irqrestore(&(tbl->it_lock), flags); | 199 | spin_unlock_irqrestore(&(tbl->it_lock), flags); |
@@ -224,7 +224,6 @@ static void __iommu_free(struct iommu_table *tbl, dma_addr_t dma_addr, | |||
224 | unsigned int npages) | 224 | unsigned int npages) |
225 | { | 225 | { |
226 | unsigned long entry, free_entry; | 226 | unsigned long entry, free_entry; |
227 | unsigned long i; | ||
228 | 227 | ||
229 | entry = dma_addr >> IOMMU_PAGE_SHIFT; | 228 | entry = dma_addr >> IOMMU_PAGE_SHIFT; |
230 | free_entry = entry - tbl->it_offset; | 229 | free_entry = entry - tbl->it_offset; |
@@ -246,9 +245,7 @@ static void __iommu_free(struct iommu_table *tbl, dma_addr_t dma_addr, | |||
246 | } | 245 | } |
247 | 246 | ||
248 | ppc_md.tce_free(tbl, entry, npages); | 247 | ppc_md.tce_free(tbl, entry, npages); |
249 | 248 | iommu_area_free(tbl->it_map, free_entry, npages); | |
250 | for (i = 0; i < npages; i++) | ||
251 | __clear_bit(free_entry+i, tbl->it_map); | ||
252 | } | 249 | } |
253 | 250 | ||
254 | static void iommu_free(struct iommu_table *tbl, dma_addr_t dma_addr, | 251 | static void iommu_free(struct iommu_table *tbl, dma_addr_t dma_addr, |
@@ -270,15 +267,18 @@ static void iommu_free(struct iommu_table *tbl, dma_addr_t dma_addr, | |||
270 | spin_unlock_irqrestore(&(tbl->it_lock), flags); | 267 | spin_unlock_irqrestore(&(tbl->it_lock), flags); |
271 | } | 268 | } |
272 | 269 | ||
273 | int iommu_map_sg(struct iommu_table *tbl, struct scatterlist *sglist, | 270 | int iommu_map_sg(struct device *dev, struct scatterlist *sglist, |
274 | int nelems, unsigned long mask, | 271 | int nelems, unsigned long mask, |
275 | enum dma_data_direction direction) | 272 | enum dma_data_direction direction) |
276 | { | 273 | { |
274 | struct iommu_table *tbl = dev->archdata.dma_data; | ||
277 | dma_addr_t dma_next = 0, dma_addr; | 275 | dma_addr_t dma_next = 0, dma_addr; |
278 | unsigned long flags; | 276 | unsigned long flags; |
279 | struct scatterlist *s, *outs, *segstart; | 277 | struct scatterlist *s, *outs, *segstart; |
280 | int outcount, incount, i; | 278 | int outcount, incount, i; |
279 | unsigned int align; | ||
281 | unsigned long handle; | 280 | unsigned long handle; |
281 | unsigned int max_seg_size; | ||
282 | 282 | ||
283 | BUG_ON(direction == DMA_NONE); | 283 | BUG_ON(direction == DMA_NONE); |
284 | 284 | ||
@@ -297,6 +297,7 @@ int iommu_map_sg(struct iommu_table *tbl, struct scatterlist *sglist, | |||
297 | 297 | ||
298 | spin_lock_irqsave(&(tbl->it_lock), flags); | 298 | spin_lock_irqsave(&(tbl->it_lock), flags); |
299 | 299 | ||
300 | max_seg_size = dma_get_max_seg_size(dev); | ||
300 | for_each_sg(sglist, s, nelems, i) { | 301 | for_each_sg(sglist, s, nelems, i) { |
301 | unsigned long vaddr, npages, entry, slen; | 302 | unsigned long vaddr, npages, entry, slen; |
302 | 303 | ||
@@ -309,7 +310,12 @@ int iommu_map_sg(struct iommu_table *tbl, struct scatterlist *sglist, | |||
309 | /* Allocate iommu entries for that segment */ | 310 | /* Allocate iommu entries for that segment */ |
310 | vaddr = (unsigned long) sg_virt(s); | 311 | vaddr = (unsigned long) sg_virt(s); |
311 | npages = iommu_num_pages(vaddr, slen); | 312 | npages = iommu_num_pages(vaddr, slen); |
312 | entry = iommu_range_alloc(tbl, npages, &handle, mask >> IOMMU_PAGE_SHIFT, 0); | 313 | align = 0; |
314 | if (IOMMU_PAGE_SHIFT < PAGE_SHIFT && slen >= PAGE_SIZE && | ||
315 | (vaddr & ~PAGE_MASK) == 0) | ||
316 | align = PAGE_SHIFT - IOMMU_PAGE_SHIFT; | ||
317 | entry = iommu_range_alloc(dev, tbl, npages, &handle, | ||
318 | mask >> IOMMU_PAGE_SHIFT, align); | ||
313 | 319 | ||
314 | DBG(" - vaddr: %lx, size: %lx\n", vaddr, slen); | 320 | DBG(" - vaddr: %lx, size: %lx\n", vaddr, slen); |
315 | 321 | ||
@@ -338,7 +344,8 @@ int iommu_map_sg(struct iommu_table *tbl, struct scatterlist *sglist, | |||
338 | /* We cannot merge if: | 344 | /* We cannot merge if: |
339 | * - allocated dma_addr isn't contiguous to previous allocation | 345 | * - allocated dma_addr isn't contiguous to previous allocation |
340 | */ | 346 | */ |
341 | if (novmerge || (dma_addr != dma_next)) { | 347 | if (novmerge || (dma_addr != dma_next) || |
348 | (outs->dma_length + s->length > max_seg_size)) { | ||
342 | /* Can't merge: create a new segment */ | 349 | /* Can't merge: create a new segment */ |
343 | segstart = s; | 350 | segstart = s; |
344 | outcount++; | 351 | outcount++; |
@@ -446,9 +453,6 @@ void iommu_unmap_sg(struct iommu_table *tbl, struct scatterlist *sglist, | |||
446 | struct iommu_table *iommu_init_table(struct iommu_table *tbl, int nid) | 453 | struct iommu_table *iommu_init_table(struct iommu_table *tbl, int nid) |
447 | { | 454 | { |
448 | unsigned long sz; | 455 | unsigned long sz; |
449 | unsigned long start_index, end_index; | ||
450 | unsigned long entries_per_4g; | ||
451 | unsigned long index; | ||
452 | static int welcomed = 0; | 456 | static int welcomed = 0; |
453 | struct page *page; | 457 | struct page *page; |
454 | 458 | ||
@@ -470,6 +474,7 @@ struct iommu_table *iommu_init_table(struct iommu_table *tbl, int nid) | |||
470 | 474 | ||
471 | #ifdef CONFIG_CRASH_DUMP | 475 | #ifdef CONFIG_CRASH_DUMP |
472 | if (ppc_md.tce_get) { | 476 | if (ppc_md.tce_get) { |
477 | unsigned long index; | ||
473 | unsigned long tceval; | 478 | unsigned long tceval; |
474 | unsigned long tcecount = 0; | 479 | unsigned long tcecount = 0; |
475 | 480 | ||
@@ -500,23 +505,6 @@ struct iommu_table *iommu_init_table(struct iommu_table *tbl, int nid) | |||
500 | ppc_md.tce_free(tbl, tbl->it_offset, tbl->it_size); | 505 | ppc_md.tce_free(tbl, tbl->it_offset, tbl->it_size); |
501 | #endif | 506 | #endif |
502 | 507 | ||
503 | /* | ||
504 | * DMA cannot cross 4 GB boundary. Mark last entry of each 4 | ||
505 | * GB chunk as reserved. | ||
506 | */ | ||
507 | if (protect4gb) { | ||
508 | entries_per_4g = 0x100000000l >> IOMMU_PAGE_SHIFT; | ||
509 | |||
510 | /* Mark the last bit before a 4GB boundary as used */ | ||
511 | start_index = tbl->it_offset | (entries_per_4g - 1); | ||
512 | start_index -= tbl->it_offset; | ||
513 | |||
514 | end_index = tbl->it_size; | ||
515 | |||
516 | for (index = start_index; index < end_index - 1; index += entries_per_4g) | ||
517 | __set_bit(index, tbl->it_map); | ||
518 | } | ||
519 | |||
520 | if (!welcomed) { | 508 | if (!welcomed) { |
521 | printk(KERN_INFO "IOMMU table initialized, virtual merging %s\n", | 509 | printk(KERN_INFO "IOMMU table initialized, virtual merging %s\n", |
522 | novmerge ? "disabled" : "enabled"); | 510 | novmerge ? "disabled" : "enabled"); |
@@ -526,16 +514,14 @@ struct iommu_table *iommu_init_table(struct iommu_table *tbl, int nid) | |||
526 | return tbl; | 514 | return tbl; |
527 | } | 515 | } |
528 | 516 | ||
529 | void iommu_free_table(struct device_node *dn) | 517 | void iommu_free_table(struct iommu_table *tbl, const char *node_name) |
530 | { | 518 | { |
531 | struct pci_dn *pdn = dn->data; | ||
532 | struct iommu_table *tbl = pdn->iommu_table; | ||
533 | unsigned long bitmap_sz, i; | 519 | unsigned long bitmap_sz, i; |
534 | unsigned int order; | 520 | unsigned int order; |
535 | 521 | ||
536 | if (!tbl || !tbl->it_map) { | 522 | if (!tbl || !tbl->it_map) { |
537 | printk(KERN_ERR "%s: expected TCE map for %s\n", __FUNCTION__, | 523 | printk(KERN_ERR "%s: expected TCE map for %s\n", __FUNCTION__, |
538 | dn->full_name); | 524 | node_name); |
539 | return; | 525 | return; |
540 | } | 526 | } |
541 | 527 | ||
@@ -544,7 +530,7 @@ void iommu_free_table(struct device_node *dn) | |||
544 | for (i = 0; i < (tbl->it_size/64); i++) { | 530 | for (i = 0; i < (tbl->it_size/64); i++) { |
545 | if (tbl->it_map[i] != 0) { | 531 | if (tbl->it_map[i] != 0) { |
546 | printk(KERN_WARNING "%s: Unexpected TCEs for %s\n", | 532 | printk(KERN_WARNING "%s: Unexpected TCEs for %s\n", |
547 | __FUNCTION__, dn->full_name); | 533 | __FUNCTION__, node_name); |
548 | break; | 534 | break; |
549 | } | 535 | } |
550 | } | 536 | } |
@@ -566,13 +552,13 @@ void iommu_free_table(struct device_node *dn) | |||
566 | * need not be page aligned, the dma_addr_t returned will point to the same | 552 | * need not be page aligned, the dma_addr_t returned will point to the same |
567 | * byte within the page as vaddr. | 553 | * byte within the page as vaddr. |
568 | */ | 554 | */ |
569 | dma_addr_t iommu_map_single(struct iommu_table *tbl, void *vaddr, | 555 | dma_addr_t iommu_map_single(struct device *dev, struct iommu_table *tbl, |
570 | size_t size, unsigned long mask, | 556 | void *vaddr, size_t size, unsigned long mask, |
571 | enum dma_data_direction direction) | 557 | enum dma_data_direction direction) |
572 | { | 558 | { |
573 | dma_addr_t dma_handle = DMA_ERROR_CODE; | 559 | dma_addr_t dma_handle = DMA_ERROR_CODE; |
574 | unsigned long uaddr; | 560 | unsigned long uaddr; |
575 | unsigned int npages; | 561 | unsigned int npages, align; |
576 | 562 | ||
577 | BUG_ON(direction == DMA_NONE); | 563 | BUG_ON(direction == DMA_NONE); |
578 | 564 | ||
@@ -580,8 +566,13 @@ dma_addr_t iommu_map_single(struct iommu_table *tbl, void *vaddr, | |||
580 | npages = iommu_num_pages(uaddr, size); | 566 | npages = iommu_num_pages(uaddr, size); |
581 | 567 | ||
582 | if (tbl) { | 568 | if (tbl) { |
583 | dma_handle = iommu_alloc(tbl, vaddr, npages, direction, | 569 | align = 0; |
584 | mask >> IOMMU_PAGE_SHIFT, 0); | 570 | if (IOMMU_PAGE_SHIFT < PAGE_SHIFT && size >= PAGE_SIZE && |
571 | ((unsigned long)vaddr & ~PAGE_MASK) == 0) | ||
572 | align = PAGE_SHIFT - IOMMU_PAGE_SHIFT; | ||
573 | |||
574 | dma_handle = iommu_alloc(dev, tbl, vaddr, npages, direction, | ||
575 | mask >> IOMMU_PAGE_SHIFT, align); | ||
585 | if (dma_handle == DMA_ERROR_CODE) { | 576 | if (dma_handle == DMA_ERROR_CODE) { |
586 | if (printk_ratelimit()) { | 577 | if (printk_ratelimit()) { |
587 | printk(KERN_INFO "iommu_alloc failed, " | 578 | printk(KERN_INFO "iommu_alloc failed, " |
@@ -612,8 +603,9 @@ void iommu_unmap_single(struct iommu_table *tbl, dma_addr_t dma_handle, | |||
612 | * Returns the virtual address of the buffer and sets dma_handle | 603 | * Returns the virtual address of the buffer and sets dma_handle |
613 | * to the dma address (mapping) of the first page. | 604 | * to the dma address (mapping) of the first page. |
614 | */ | 605 | */ |
615 | void *iommu_alloc_coherent(struct iommu_table *tbl, size_t size, | 606 | void *iommu_alloc_coherent(struct device *dev, struct iommu_table *tbl, |
616 | dma_addr_t *dma_handle, unsigned long mask, gfp_t flag, int node) | 607 | size_t size, dma_addr_t *dma_handle, |
608 | unsigned long mask, gfp_t flag, int node) | ||
617 | { | 609 | { |
618 | void *ret = NULL; | 610 | void *ret = NULL; |
619 | dma_addr_t mapping; | 611 | dma_addr_t mapping; |
@@ -647,7 +639,7 @@ void *iommu_alloc_coherent(struct iommu_table *tbl, size_t size, | |||
647 | /* Set up tces to cover the allocated range */ | 639 | /* Set up tces to cover the allocated range */ |
648 | nio_pages = size >> IOMMU_PAGE_SHIFT; | 640 | nio_pages = size >> IOMMU_PAGE_SHIFT; |
649 | io_order = get_iommu_order(size); | 641 | io_order = get_iommu_order(size); |
650 | mapping = iommu_alloc(tbl, ret, nio_pages, DMA_BIDIRECTIONAL, | 642 | mapping = iommu_alloc(dev, tbl, ret, nio_pages, DMA_BIDIRECTIONAL, |
651 | mask >> IOMMU_PAGE_SHIFT, io_order); | 643 | mask >> IOMMU_PAGE_SHIFT, io_order); |
652 | if (mapping == DMA_ERROR_CODE) { | 644 | if (mapping == DMA_ERROR_CODE) { |
653 | free_pages((unsigned long)ret, order); | 645 | free_pages((unsigned long)ret, order); |