aboutsummaryrefslogtreecommitdiffstats
path: root/arch/powerpc/kernel/iommu.c
diff options
context:
space:
mode:
authorLinas Vepstas <linas@austin.ibm.com>2006-10-30 00:15:59 -0500
committerPaul Mackerras <paulus@samba.org>2006-10-31 22:52:48 -0500
commit5d2efba64b231a1733c4048d1708d77e07f26426 (patch)
tree2893dd45b9c26cef6cddb5fef0c6f820c5eb534e /arch/powerpc/kernel/iommu.c
parentdd6c89f686bdb2a5de72fab636fc839e5a0add6d (diff)
[POWERPC] Use 4kB iommu pages even on 64kB-page systems
The 10Gigabit ethernet device drivers appear to be able to chew up all 256MB of TCE mappings on pSeries systems, as evidenced by numerous error messages: iommu_alloc failed, tbl c0000000010d5c48 vaddr c0000000d875eff0 npages 1 Some experimentation indicates that this is essentially because one 1500 byte ethernet MTU gets mapped as a 64K DMA region when the large 64K pages are enabled. Thus, it doesn't take much to exhaust all of the available DMA mappings for a high-speed card. This patch changes the iommu allocator to work with its own unique, distinct page size. Although the patch is long, its actually quite simple: it just #defines a distinct IOMMU_PAGE_SIZE and then uses this in all the places that matter. As a side effect, it also dramatically improves network performance on platforms with H-calls on iommu translation inserts/removes (since we no longer call it 16 times for a 1500 bytes packet when the iommu HW is still 4k). In the future, we might want to make the IOMMU_PAGE_SIZE a variable in the iommu_table instance, thus allowing support for different HW page sizes in the iommu itself. Signed-off-by: Linas Vepstas <linas@austin.ibm.com> Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org> Acked-by: Olof Johansson <olof@lixom.net> Acked-by: Stephen Rothwell <sfr@canb.auug.org.au> Signed-off-by: Paul Mackerras <paulus@samba.org>
Diffstat (limited to 'arch/powerpc/kernel/iommu.c')
-rw-r--r--arch/powerpc/kernel/iommu.c77
1 files changed, 45 insertions, 32 deletions
diff --git a/arch/powerpc/kernel/iommu.c b/arch/powerpc/kernel/iommu.c
index f88a2a675d90..ba6b7256084b 100644
--- a/arch/powerpc/kernel/iommu.c
+++ b/arch/powerpc/kernel/iommu.c
@@ -47,6 +47,17 @@ static int novmerge = 0;
47static int novmerge = 1; 47static int novmerge = 1;
48#endif 48#endif
49 49
50static inline unsigned long iommu_num_pages(unsigned long vaddr,
51 unsigned long slen)
52{
53 unsigned long npages;
54
55 npages = IOMMU_PAGE_ALIGN(vaddr + slen) - (vaddr & IOMMU_PAGE_MASK);
56 npages >>= IOMMU_PAGE_SHIFT;
57
58 return npages;
59}
60
50static int __init setup_iommu(char *str) 61static int __init setup_iommu(char *str)
51{ 62{
52 if (!strcmp(str, "novmerge")) 63 if (!strcmp(str, "novmerge"))
@@ -178,10 +189,10 @@ static dma_addr_t iommu_alloc(struct iommu_table *tbl, void *page,
178 } 189 }
179 190
180 entry += tbl->it_offset; /* Offset into real TCE table */ 191 entry += tbl->it_offset; /* Offset into real TCE table */
181 ret = entry << PAGE_SHIFT; /* Set the return dma address */ 192 ret = entry << IOMMU_PAGE_SHIFT; /* Set the return dma address */
182 193
183 /* Put the TCEs in the HW table */ 194 /* Put the TCEs in the HW table */
184 ppc_md.tce_build(tbl, entry, npages, (unsigned long)page & PAGE_MASK, 195 ppc_md.tce_build(tbl, entry, npages, (unsigned long)page & IOMMU_PAGE_MASK,
185 direction); 196 direction);
186 197
187 198
@@ -203,7 +214,7 @@ static void __iommu_free(struct iommu_table *tbl, dma_addr_t dma_addr,
203 unsigned long entry, free_entry; 214 unsigned long entry, free_entry;
204 unsigned long i; 215 unsigned long i;
205 216
206 entry = dma_addr >> PAGE_SHIFT; 217 entry = dma_addr >> IOMMU_PAGE_SHIFT;
207 free_entry = entry - tbl->it_offset; 218 free_entry = entry - tbl->it_offset;
208 219
209 if (((free_entry + npages) > tbl->it_size) || 220 if (((free_entry + npages) > tbl->it_size) ||
@@ -270,7 +281,7 @@ int iommu_map_sg(struct device *dev, struct iommu_table *tbl,
270 /* Init first segment length for backout at failure */ 281 /* Init first segment length for backout at failure */
271 outs->dma_length = 0; 282 outs->dma_length = 0;
272 283
273 DBG("mapping %d elements:\n", nelems); 284 DBG("sg mapping %d elements:\n", nelems);
274 285
275 spin_lock_irqsave(&(tbl->it_lock), flags); 286 spin_lock_irqsave(&(tbl->it_lock), flags);
276 287
@@ -285,9 +296,8 @@ int iommu_map_sg(struct device *dev, struct iommu_table *tbl,
285 } 296 }
286 /* Allocate iommu entries for that segment */ 297 /* Allocate iommu entries for that segment */
287 vaddr = (unsigned long)page_address(s->page) + s->offset; 298 vaddr = (unsigned long)page_address(s->page) + s->offset;
288 npages = PAGE_ALIGN(vaddr + slen) - (vaddr & PAGE_MASK); 299 npages = iommu_num_pages(vaddr, slen);
289 npages >>= PAGE_SHIFT; 300 entry = iommu_range_alloc(tbl, npages, &handle, mask >> IOMMU_PAGE_SHIFT, 0);
290 entry = iommu_range_alloc(tbl, npages, &handle, mask >> PAGE_SHIFT, 0);
291 301
292 DBG(" - vaddr: %lx, size: %lx\n", vaddr, slen); 302 DBG(" - vaddr: %lx, size: %lx\n", vaddr, slen);
293 303
@@ -301,14 +311,14 @@ int iommu_map_sg(struct device *dev, struct iommu_table *tbl,
301 311
302 /* Convert entry to a dma_addr_t */ 312 /* Convert entry to a dma_addr_t */
303 entry += tbl->it_offset; 313 entry += tbl->it_offset;
304 dma_addr = entry << PAGE_SHIFT; 314 dma_addr = entry << IOMMU_PAGE_SHIFT;
305 dma_addr |= s->offset; 315 dma_addr |= (s->offset & ~IOMMU_PAGE_MASK);
306 316
307 DBG(" - %lx pages, entry: %lx, dma_addr: %lx\n", 317 DBG(" - %lu pages, entry: %lx, dma_addr: %lx\n",
308 npages, entry, dma_addr); 318 npages, entry, dma_addr);
309 319
310 /* Insert into HW table */ 320 /* Insert into HW table */
311 ppc_md.tce_build(tbl, entry, npages, vaddr & PAGE_MASK, direction); 321 ppc_md.tce_build(tbl, entry, npages, vaddr & IOMMU_PAGE_MASK, direction);
312 322
313 /* If we are in an open segment, try merging */ 323 /* If we are in an open segment, try merging */
314 if (segstart != s) { 324 if (segstart != s) {
@@ -323,7 +333,7 @@ int iommu_map_sg(struct device *dev, struct iommu_table *tbl,
323 DBG(" can't merge, new segment.\n"); 333 DBG(" can't merge, new segment.\n");
324 } else { 334 } else {
325 outs->dma_length += s->length; 335 outs->dma_length += s->length;
326 DBG(" merged, new len: %lx\n", outs->dma_length); 336 DBG(" merged, new len: %ux\n", outs->dma_length);
327 } 337 }
328 } 338 }
329 339
@@ -367,9 +377,8 @@ int iommu_map_sg(struct device *dev, struct iommu_table *tbl,
367 if (s->dma_length != 0) { 377 if (s->dma_length != 0) {
368 unsigned long vaddr, npages; 378 unsigned long vaddr, npages;
369 379
370 vaddr = s->dma_address & PAGE_MASK; 380 vaddr = s->dma_address & IOMMU_PAGE_MASK;
371 npages = (PAGE_ALIGN(s->dma_address + s->dma_length) - vaddr) 381 npages = iommu_num_pages(s->dma_address, s->dma_length);
372 >> PAGE_SHIFT;
373 __iommu_free(tbl, vaddr, npages); 382 __iommu_free(tbl, vaddr, npages);
374 s->dma_address = DMA_ERROR_CODE; 383 s->dma_address = DMA_ERROR_CODE;
375 s->dma_length = 0; 384 s->dma_length = 0;
@@ -398,8 +407,7 @@ void iommu_unmap_sg(struct iommu_table *tbl, struct scatterlist *sglist,
398 407
399 if (sglist->dma_length == 0) 408 if (sglist->dma_length == 0)
400 break; 409 break;
401 npages = (PAGE_ALIGN(dma_handle + sglist->dma_length) 410 npages = iommu_num_pages(dma_handle,sglist->dma_length);
402 - (dma_handle & PAGE_MASK)) >> PAGE_SHIFT;
403 __iommu_free(tbl, dma_handle, npages); 411 __iommu_free(tbl, dma_handle, npages);
404 sglist++; 412 sglist++;
405 } 413 }
@@ -532,12 +540,11 @@ dma_addr_t iommu_map_single(struct iommu_table *tbl, void *vaddr,
532 BUG_ON(direction == DMA_NONE); 540 BUG_ON(direction == DMA_NONE);
533 541
534 uaddr = (unsigned long)vaddr; 542 uaddr = (unsigned long)vaddr;
535 npages = PAGE_ALIGN(uaddr + size) - (uaddr & PAGE_MASK); 543 npages = iommu_num_pages(uaddr, size);
536 npages >>= PAGE_SHIFT;
537 544
538 if (tbl) { 545 if (tbl) {
539 dma_handle = iommu_alloc(tbl, vaddr, npages, direction, 546 dma_handle = iommu_alloc(tbl, vaddr, npages, direction,
540 mask >> PAGE_SHIFT, 0); 547 mask >> IOMMU_PAGE_SHIFT, 0);
541 if (dma_handle == DMA_ERROR_CODE) { 548 if (dma_handle == DMA_ERROR_CODE) {
542 if (printk_ratelimit()) { 549 if (printk_ratelimit()) {
543 printk(KERN_INFO "iommu_alloc failed, " 550 printk(KERN_INFO "iommu_alloc failed, "
@@ -545,7 +552,7 @@ dma_addr_t iommu_map_single(struct iommu_table *tbl, void *vaddr,
545 tbl, vaddr, npages); 552 tbl, vaddr, npages);
546 } 553 }
547 } else 554 } else
548 dma_handle |= (uaddr & ~PAGE_MASK); 555 dma_handle |= (uaddr & ~IOMMU_PAGE_MASK);
549 } 556 }
550 557
551 return dma_handle; 558 return dma_handle;
@@ -554,11 +561,14 @@ dma_addr_t iommu_map_single(struct iommu_table *tbl, void *vaddr,
554void iommu_unmap_single(struct iommu_table *tbl, dma_addr_t dma_handle, 561void iommu_unmap_single(struct iommu_table *tbl, dma_addr_t dma_handle,
555 size_t size, enum dma_data_direction direction) 562 size_t size, enum dma_data_direction direction)
556{ 563{
564 unsigned int npages;
565
557 BUG_ON(direction == DMA_NONE); 566 BUG_ON(direction == DMA_NONE);
558 567
559 if (tbl) 568 if (tbl) {
560 iommu_free(tbl, dma_handle, (PAGE_ALIGN(dma_handle + size) - 569 npages = iommu_num_pages(dma_handle, size);
561 (dma_handle & PAGE_MASK)) >> PAGE_SHIFT); 570 iommu_free(tbl, dma_handle, npages);
571 }
562} 572}
563 573
564/* Allocates a contiguous real buffer and creates mappings over it. 574/* Allocates a contiguous real buffer and creates mappings over it.
@@ -570,11 +580,11 @@ void *iommu_alloc_coherent(struct iommu_table *tbl, size_t size,
570{ 580{
571 void *ret = NULL; 581 void *ret = NULL;
572 dma_addr_t mapping; 582 dma_addr_t mapping;
573 unsigned int npages, order; 583 unsigned int order;
584 unsigned int nio_pages, io_order;
574 struct page *page; 585 struct page *page;
575 586
576 size = PAGE_ALIGN(size); 587 size = PAGE_ALIGN(size);
577 npages = size >> PAGE_SHIFT;
578 order = get_order(size); 588 order = get_order(size);
579 589
580 /* 590 /*
@@ -598,8 +608,10 @@ void *iommu_alloc_coherent(struct iommu_table *tbl, size_t size,
598 memset(ret, 0, size); 608 memset(ret, 0, size);
599 609
600 /* Set up tces to cover the allocated range */ 610 /* Set up tces to cover the allocated range */
601 mapping = iommu_alloc(tbl, ret, npages, DMA_BIDIRECTIONAL, 611 nio_pages = size >> IOMMU_PAGE_SHIFT;
602 mask >> PAGE_SHIFT, order); 612 io_order = get_iommu_order(size);
613 mapping = iommu_alloc(tbl, ret, nio_pages, DMA_BIDIRECTIONAL,
614 mask >> IOMMU_PAGE_SHIFT, io_order);
603 if (mapping == DMA_ERROR_CODE) { 615 if (mapping == DMA_ERROR_CODE) {
604 free_pages((unsigned long)ret, order); 616 free_pages((unsigned long)ret, order);
605 return NULL; 617 return NULL;
@@ -611,12 +623,13 @@ void *iommu_alloc_coherent(struct iommu_table *tbl, size_t size,
611void iommu_free_coherent(struct iommu_table *tbl, size_t size, 623void iommu_free_coherent(struct iommu_table *tbl, size_t size,
612 void *vaddr, dma_addr_t dma_handle) 624 void *vaddr, dma_addr_t dma_handle)
613{ 625{
614 unsigned int npages;
615
616 if (tbl) { 626 if (tbl) {
627 unsigned int nio_pages;
628
629 size = PAGE_ALIGN(size);
630 nio_pages = size >> IOMMU_PAGE_SHIFT;
631 iommu_free(tbl, dma_handle, nio_pages);
617 size = PAGE_ALIGN(size); 632 size = PAGE_ALIGN(size);
618 npages = size >> PAGE_SHIFT;
619 iommu_free(tbl, dma_handle, npages);
620 free_pages((unsigned long)vaddr, get_order(size)); 633 free_pages((unsigned long)vaddr, get_order(size));
621 } 634 }
622} 635}