diff options
| -rw-r--r-- | arch/sparc/include/asm/iommu_64.h | 2 | ||||
| -rw-r--r-- | arch/sparc/kernel/iommu.c | 78 | ||||
| -rw-r--r-- | arch/sparc/kernel/ldc.c | 60 | ||||
| -rw-r--r-- | arch/sparc/kernel/pci_sun4v.c | 64 | ||||
| -rw-r--r-- | include/linux/iommu-common.h | 38 | ||||
| -rw-r--r-- | lib/iommu-common.c | 190 |
6 files changed, 202 insertions, 230 deletions
diff --git a/arch/sparc/include/asm/iommu_64.h b/arch/sparc/include/asm/iommu_64.h index e3cd4493d81d..cd0d69fa7592 100644 --- a/arch/sparc/include/asm/iommu_64.h +++ b/arch/sparc/include/asm/iommu_64.h | |||
| @@ -25,7 +25,7 @@ struct iommu_arena { | |||
| 25 | }; | 25 | }; |
| 26 | 26 | ||
| 27 | struct iommu { | 27 | struct iommu { |
| 28 | struct iommu_table tbl; | 28 | struct iommu_map_table tbl; |
| 29 | spinlock_t lock; | 29 | spinlock_t lock; |
| 30 | u32 dma_addr_mask; | 30 | u32 dma_addr_mask; |
| 31 | iopte_t *page_table; | 31 | iopte_t *page_table; |
diff --git a/arch/sparc/kernel/iommu.c b/arch/sparc/kernel/iommu.c index 9b16b341b6ae..5320689c06e9 100644 --- a/arch/sparc/kernel/iommu.c +++ b/arch/sparc/kernel/iommu.c | |||
| @@ -13,15 +13,12 @@ | |||
| 13 | #include <linux/errno.h> | 13 | #include <linux/errno.h> |
| 14 | #include <linux/iommu-helper.h> | 14 | #include <linux/iommu-helper.h> |
| 15 | #include <linux/bitmap.h> | 15 | #include <linux/bitmap.h> |
| 16 | #include <linux/hash.h> | ||
| 17 | #include <linux/iommu-common.h> | 16 | #include <linux/iommu-common.h> |
| 18 | 17 | ||
| 19 | #ifdef CONFIG_PCI | 18 | #ifdef CONFIG_PCI |
| 20 | #include <linux/pci.h> | 19 | #include <linux/pci.h> |
| 21 | #endif | 20 | #endif |
| 22 | 21 | ||
| 23 | static DEFINE_PER_CPU(unsigned int, iommu_pool_hash); | ||
| 24 | |||
| 25 | #include <asm/iommu.h> | 22 | #include <asm/iommu.h> |
| 26 | 23 | ||
| 27 | #include "iommu_common.h" | 24 | #include "iommu_common.h" |
| @@ -49,9 +46,9 @@ static DEFINE_PER_CPU(unsigned int, iommu_pool_hash); | |||
| 49 | "i" (ASI_PHYS_BYPASS_EC_E)) | 46 | "i" (ASI_PHYS_BYPASS_EC_E)) |
| 50 | 47 | ||
| 51 | /* Must be invoked under the IOMMU lock. */ | 48 | /* Must be invoked under the IOMMU lock. */ |
| 52 | static void iommu_flushall(struct iommu_table *iommu_table) | 49 | static void iommu_flushall(struct iommu_map_table *iommu_map_table) |
| 53 | { | 50 | { |
| 54 | struct iommu *iommu = container_of(iommu_table, struct iommu, tbl); | 51 | struct iommu *iommu = container_of(iommu_map_table, struct iommu, tbl); |
| 55 | if (iommu->iommu_flushinv) { | 52 | if (iommu->iommu_flushinv) { |
| 56 | iommu_write(iommu->iommu_flushinv, ~(u64)0); | 53 | iommu_write(iommu->iommu_flushinv, ~(u64)0); |
| 57 | } else { | 54 | } else { |
| @@ -92,23 +89,6 @@ static inline void iopte_make_dummy(struct iommu *iommu, iopte_t *iopte) | |||
| 92 | iopte_val(*iopte) = val; | 89 | iopte_val(*iopte) = val; |
| 93 | } | 90 | } |
| 94 | 91 | ||
| 95 | static struct iommu_tbl_ops iommu_sparc_ops = { | ||
| 96 | .reset = iommu_flushall | ||
| 97 | }; | ||
| 98 | |||
| 99 | static void setup_iommu_pool_hash(void) | ||
| 100 | { | ||
| 101 | unsigned int i; | ||
| 102 | static bool do_once; | ||
| 103 | |||
| 104 | if (do_once) | ||
| 105 | return; | ||
| 106 | do_once = true; | ||
| 107 | for_each_possible_cpu(i) | ||
| 108 | per_cpu(iommu_pool_hash, i) = hash_32(i, IOMMU_POOL_HASHBITS); | ||
| 109 | } | ||
| 110 | |||
| 111 | |||
| 112 | int iommu_table_init(struct iommu *iommu, int tsbsize, | 92 | int iommu_table_init(struct iommu *iommu, int tsbsize, |
| 113 | u32 dma_offset, u32 dma_addr_mask, | 93 | u32 dma_offset, u32 dma_addr_mask, |
| 114 | int numa_node) | 94 | int numa_node) |
| @@ -121,7 +101,7 @@ int iommu_table_init(struct iommu *iommu, int tsbsize, | |||
| 121 | /* Setup initial software IOMMU state. */ | 101 | /* Setup initial software IOMMU state. */ |
| 122 | spin_lock_init(&iommu->lock); | 102 | spin_lock_init(&iommu->lock); |
| 123 | iommu->ctx_lowest_free = 1; | 103 | iommu->ctx_lowest_free = 1; |
| 124 | iommu->tbl.page_table_map_base = dma_offset; | 104 | iommu->tbl.table_map_base = dma_offset; |
| 125 | iommu->dma_addr_mask = dma_addr_mask; | 105 | iommu->dma_addr_mask = dma_addr_mask; |
| 126 | 106 | ||
| 127 | /* Allocate and initialize the free area map. */ | 107 | /* Allocate and initialize the free area map. */ |
| @@ -131,12 +111,10 @@ int iommu_table_init(struct iommu *iommu, int tsbsize, | |||
| 131 | if (!iommu->tbl.map) | 111 | if (!iommu->tbl.map) |
| 132 | return -ENOMEM; | 112 | return -ENOMEM; |
| 133 | memset(iommu->tbl.map, 0, sz); | 113 | memset(iommu->tbl.map, 0, sz); |
| 134 | if (tlb_type != hypervisor) | ||
| 135 | iommu_sparc_ops.reset = NULL; /* not needed on on sun4v */ | ||
| 136 | 114 | ||
| 137 | setup_iommu_pool_hash(); | ||
| 138 | iommu_tbl_pool_init(&iommu->tbl, num_tsb_entries, IO_PAGE_SHIFT, | 115 | iommu_tbl_pool_init(&iommu->tbl, num_tsb_entries, IO_PAGE_SHIFT, |
| 139 | &iommu_sparc_ops, false, 1); | 116 | (tlb_type != hypervisor ? iommu_flushall : NULL), |
| 117 | false, 1, false); | ||
| 140 | 118 | ||
| 141 | /* Allocate and initialize the dummy page which we | 119 | /* Allocate and initialize the dummy page which we |
| 142 | * set inactive IO PTEs to point to. | 120 | * set inactive IO PTEs to point to. |
| @@ -182,7 +160,7 @@ static inline iopte_t *alloc_npages(struct device *dev, | |||
| 182 | unsigned long entry; | 160 | unsigned long entry; |
| 183 | 161 | ||
| 184 | entry = iommu_tbl_range_alloc(dev, &iommu->tbl, npages, NULL, | 162 | entry = iommu_tbl_range_alloc(dev, &iommu->tbl, npages, NULL, |
| 185 | __this_cpu_read(iommu_pool_hash)); | 163 | (unsigned long)(-1), 0); |
| 186 | if (unlikely(entry == DMA_ERROR_CODE)) | 164 | if (unlikely(entry == DMA_ERROR_CODE)) |
| 187 | return NULL; | 165 | return NULL; |
| 188 | 166 | ||
| @@ -249,7 +227,7 @@ static void *dma_4u_alloc_coherent(struct device *dev, size_t size, | |||
| 249 | return NULL; | 227 | return NULL; |
| 250 | } | 228 | } |
| 251 | 229 | ||
| 252 | *dma_addrp = (iommu->tbl.page_table_map_base + | 230 | *dma_addrp = (iommu->tbl.table_map_base + |
| 253 | ((iopte - iommu->page_table) << IO_PAGE_SHIFT)); | 231 | ((iopte - iommu->page_table) << IO_PAGE_SHIFT)); |
| 254 | ret = (void *) first_page; | 232 | ret = (void *) first_page; |
| 255 | npages = size >> IO_PAGE_SHIFT; | 233 | npages = size >> IO_PAGE_SHIFT; |
| @@ -275,7 +253,7 @@ static void dma_4u_free_coherent(struct device *dev, size_t size, | |||
| 275 | npages = IO_PAGE_ALIGN(size) >> IO_PAGE_SHIFT; | 253 | npages = IO_PAGE_ALIGN(size) >> IO_PAGE_SHIFT; |
| 276 | iommu = dev->archdata.iommu; | 254 | iommu = dev->archdata.iommu; |
| 277 | 255 | ||
| 278 | iommu_tbl_range_free(&iommu->tbl, dvma, npages, false, NULL); | 256 | iommu_tbl_range_free(&iommu->tbl, dvma, npages, DMA_ERROR_CODE); |
| 279 | 257 | ||
| 280 | order = get_order(size); | 258 | order = get_order(size); |
| 281 | if (order < 10) | 259 | if (order < 10) |
| @@ -315,7 +293,7 @@ static dma_addr_t dma_4u_map_page(struct device *dev, struct page *page, | |||
| 315 | if (unlikely(!base)) | 293 | if (unlikely(!base)) |
| 316 | goto bad; | 294 | goto bad; |
| 317 | 295 | ||
| 318 | bus_addr = (iommu->tbl.page_table_map_base + | 296 | bus_addr = (iommu->tbl.table_map_base + |
| 319 | ((base - iommu->page_table) << IO_PAGE_SHIFT)); | 297 | ((base - iommu->page_table) << IO_PAGE_SHIFT)); |
| 320 | ret = bus_addr | (oaddr & ~IO_PAGE_MASK); | 298 | ret = bus_addr | (oaddr & ~IO_PAGE_MASK); |
| 321 | base_paddr = __pa(oaddr & IO_PAGE_MASK); | 299 | base_paddr = __pa(oaddr & IO_PAGE_MASK); |
| @@ -426,7 +404,7 @@ static void dma_4u_unmap_page(struct device *dev, dma_addr_t bus_addr, | |||
| 426 | npages = IO_PAGE_ALIGN(bus_addr + sz) - (bus_addr & IO_PAGE_MASK); | 404 | npages = IO_PAGE_ALIGN(bus_addr + sz) - (bus_addr & IO_PAGE_MASK); |
| 427 | npages >>= IO_PAGE_SHIFT; | 405 | npages >>= IO_PAGE_SHIFT; |
| 428 | base = iommu->page_table + | 406 | base = iommu->page_table + |
| 429 | ((bus_addr - iommu->tbl.page_table_map_base) >> IO_PAGE_SHIFT); | 407 | ((bus_addr - iommu->tbl.table_map_base) >> IO_PAGE_SHIFT); |
| 430 | bus_addr &= IO_PAGE_MASK; | 408 | bus_addr &= IO_PAGE_MASK; |
| 431 | 409 | ||
| 432 | spin_lock_irqsave(&iommu->lock, flags); | 410 | spin_lock_irqsave(&iommu->lock, flags); |
| @@ -448,8 +426,7 @@ static void dma_4u_unmap_page(struct device *dev, dma_addr_t bus_addr, | |||
| 448 | iommu_free_ctx(iommu, ctx); | 426 | iommu_free_ctx(iommu, ctx); |
| 449 | spin_unlock_irqrestore(&iommu->lock, flags); | 427 | spin_unlock_irqrestore(&iommu->lock, flags); |
| 450 | 428 | ||
| 451 | iommu_tbl_range_free(&iommu->tbl, bus_addr, npages, | 429 | iommu_tbl_range_free(&iommu->tbl, bus_addr, npages, DMA_ERROR_CODE); |
| 452 | false, NULL); | ||
| 453 | } | 430 | } |
| 454 | 431 | ||
| 455 | static int dma_4u_map_sg(struct device *dev, struct scatterlist *sglist, | 432 | static int dma_4u_map_sg(struct device *dev, struct scatterlist *sglist, |
| @@ -497,7 +474,7 @@ static int dma_4u_map_sg(struct device *dev, struct scatterlist *sglist, | |||
| 497 | max_seg_size = dma_get_max_seg_size(dev); | 474 | max_seg_size = dma_get_max_seg_size(dev); |
| 498 | seg_boundary_size = ALIGN(dma_get_seg_boundary(dev) + 1, | 475 | seg_boundary_size = ALIGN(dma_get_seg_boundary(dev) + 1, |
| 499 | IO_PAGE_SIZE) >> IO_PAGE_SHIFT; | 476 | IO_PAGE_SIZE) >> IO_PAGE_SHIFT; |
| 500 | base_shift = iommu->tbl.page_table_map_base >> IO_PAGE_SHIFT; | 477 | base_shift = iommu->tbl.table_map_base >> IO_PAGE_SHIFT; |
| 501 | for_each_sg(sglist, s, nelems, i) { | 478 | for_each_sg(sglist, s, nelems, i) { |
| 502 | unsigned long paddr, npages, entry, out_entry = 0, slen; | 479 | unsigned long paddr, npages, entry, out_entry = 0, slen; |
| 503 | iopte_t *base; | 480 | iopte_t *base; |
| @@ -511,8 +488,8 @@ static int dma_4u_map_sg(struct device *dev, struct scatterlist *sglist, | |||
| 511 | /* Allocate iommu entries for that segment */ | 488 | /* Allocate iommu entries for that segment */ |
| 512 | paddr = (unsigned long) SG_ENT_PHYS_ADDRESS(s); | 489 | paddr = (unsigned long) SG_ENT_PHYS_ADDRESS(s); |
| 513 | npages = iommu_num_pages(paddr, slen, IO_PAGE_SIZE); | 490 | npages = iommu_num_pages(paddr, slen, IO_PAGE_SIZE); |
| 514 | entry = iommu_tbl_range_alloc(dev, &iommu->tbl, npages, &handle, | 491 | entry = iommu_tbl_range_alloc(dev, &iommu->tbl, npages, |
| 515 | __this_cpu_read(iommu_pool_hash)); | 492 | &handle, (unsigned long)(-1), 0); |
| 516 | 493 | ||
| 517 | /* Handle failure */ | 494 | /* Handle failure */ |
| 518 | if (unlikely(entry == DMA_ERROR_CODE)) { | 495 | if (unlikely(entry == DMA_ERROR_CODE)) { |
| @@ -525,7 +502,7 @@ static int dma_4u_map_sg(struct device *dev, struct scatterlist *sglist, | |||
| 525 | base = iommu->page_table + entry; | 502 | base = iommu->page_table + entry; |
| 526 | 503 | ||
| 527 | /* Convert entry to a dma_addr_t */ | 504 | /* Convert entry to a dma_addr_t */ |
| 528 | dma_addr = iommu->tbl.page_table_map_base + | 505 | dma_addr = iommu->tbl.table_map_base + |
| 529 | (entry << IO_PAGE_SHIFT); | 506 | (entry << IO_PAGE_SHIFT); |
| 530 | dma_addr |= (s->offset & ~IO_PAGE_MASK); | 507 | dma_addr |= (s->offset & ~IO_PAGE_MASK); |
| 531 | 508 | ||
| @@ -586,7 +563,7 @@ iommu_map_failed: | |||
| 586 | npages = iommu_num_pages(s->dma_address, s->dma_length, | 563 | npages = iommu_num_pages(s->dma_address, s->dma_length, |
| 587 | IO_PAGE_SIZE); | 564 | IO_PAGE_SIZE); |
| 588 | 565 | ||
| 589 | entry = (vaddr - iommu->tbl.page_table_map_base) | 566 | entry = (vaddr - iommu->tbl.table_map_base) |
| 590 | >> IO_PAGE_SHIFT; | 567 | >> IO_PAGE_SHIFT; |
| 591 | base = iommu->page_table + entry; | 568 | base = iommu->page_table + entry; |
| 592 | 569 | ||
| @@ -594,7 +571,7 @@ iommu_map_failed: | |||
| 594 | iopte_make_dummy(iommu, base + j); | 571 | iopte_make_dummy(iommu, base + j); |
| 595 | 572 | ||
| 596 | iommu_tbl_range_free(&iommu->tbl, vaddr, npages, | 573 | iommu_tbl_range_free(&iommu->tbl, vaddr, npages, |
| 597 | false, NULL); | 574 | DMA_ERROR_CODE); |
| 598 | 575 | ||
| 599 | s->dma_address = DMA_ERROR_CODE; | 576 | s->dma_address = DMA_ERROR_CODE; |
| 600 | s->dma_length = 0; | 577 | s->dma_length = 0; |
| @@ -610,19 +587,18 @@ iommu_map_failed: | |||
| 610 | /* If contexts are being used, they are the same in all of the mappings | 587 | /* If contexts are being used, they are the same in all of the mappings |
| 611 | * we make for a particular SG. | 588 | * we make for a particular SG. |
| 612 | */ | 589 | */ |
| 613 | static unsigned long fetch_sg_ctx(struct iommu *iommu, | 590 | static unsigned long fetch_sg_ctx(struct iommu *iommu, struct scatterlist *sg) |
| 614 | struct scatterlist *sg) | ||
| 615 | { | 591 | { |
| 616 | unsigned long ctx = 0; | 592 | unsigned long ctx = 0; |
| 617 | 593 | ||
| 618 | if (iommu->iommu_ctxflush) { | 594 | if (iommu->iommu_ctxflush) { |
| 619 | iopte_t *base; | 595 | iopte_t *base; |
| 620 | u32 bus_addr; | 596 | u32 bus_addr; |
| 621 | struct iommu_table *tbl = &iommu->tbl; | 597 | struct iommu_map_table *tbl = &iommu->tbl; |
| 622 | 598 | ||
| 623 | bus_addr = sg->dma_address & IO_PAGE_MASK; | 599 | bus_addr = sg->dma_address & IO_PAGE_MASK; |
| 624 | base = iommu->page_table + | 600 | base = iommu->page_table + |
| 625 | ((bus_addr - tbl->page_table_map_base) >> IO_PAGE_SHIFT); | 601 | ((bus_addr - tbl->table_map_base) >> IO_PAGE_SHIFT); |
| 626 | 602 | ||
| 627 | ctx = (iopte_val(*base) & IOPTE_CONTEXT) >> 47UL; | 603 | ctx = (iopte_val(*base) & IOPTE_CONTEXT) >> 47UL; |
| 628 | } | 604 | } |
| @@ -659,7 +635,7 @@ static void dma_4u_unmap_sg(struct device *dev, struct scatterlist *sglist, | |||
| 659 | break; | 635 | break; |
| 660 | npages = iommu_num_pages(dma_handle, len, IO_PAGE_SIZE); | 636 | npages = iommu_num_pages(dma_handle, len, IO_PAGE_SIZE); |
| 661 | 637 | ||
| 662 | entry = ((dma_handle - iommu->tbl.page_table_map_base) | 638 | entry = ((dma_handle - iommu->tbl.table_map_base) |
| 663 | >> IO_PAGE_SHIFT); | 639 | >> IO_PAGE_SHIFT); |
| 664 | base = iommu->page_table + entry; | 640 | base = iommu->page_table + entry; |
| 665 | 641 | ||
| @@ -671,8 +647,8 @@ static void dma_4u_unmap_sg(struct device *dev, struct scatterlist *sglist, | |||
| 671 | for (i = 0; i < npages; i++) | 647 | for (i = 0; i < npages; i++) |
| 672 | iopte_make_dummy(iommu, base + i); | 648 | iopte_make_dummy(iommu, base + i); |
| 673 | 649 | ||
| 674 | iommu_tbl_range_free(&iommu->tbl, dma_handle, npages, false, | 650 | iommu_tbl_range_free(&iommu->tbl, dma_handle, npages, |
| 675 | NULL); | 651 | DMA_ERROR_CODE); |
| 676 | sg = sg_next(sg); | 652 | sg = sg_next(sg); |
| 677 | } | 653 | } |
| 678 | 654 | ||
| @@ -706,10 +682,10 @@ static void dma_4u_sync_single_for_cpu(struct device *dev, | |||
| 706 | if (iommu->iommu_ctxflush && | 682 | if (iommu->iommu_ctxflush && |
| 707 | strbuf->strbuf_ctxflush) { | 683 | strbuf->strbuf_ctxflush) { |
| 708 | iopte_t *iopte; | 684 | iopte_t *iopte; |
| 709 | struct iommu_table *tbl = &iommu->tbl; | 685 | struct iommu_map_table *tbl = &iommu->tbl; |
| 710 | 686 | ||
| 711 | iopte = iommu->page_table + | 687 | iopte = iommu->page_table + |
| 712 | ((bus_addr - tbl->page_table_map_base)>>IO_PAGE_SHIFT); | 688 | ((bus_addr - tbl->table_map_base)>>IO_PAGE_SHIFT); |
| 713 | ctx = (iopte_val(*iopte) & IOPTE_CONTEXT) >> 47UL; | 689 | ctx = (iopte_val(*iopte) & IOPTE_CONTEXT) >> 47UL; |
| 714 | } | 690 | } |
| 715 | 691 | ||
| @@ -742,10 +718,10 @@ static void dma_4u_sync_sg_for_cpu(struct device *dev, | |||
| 742 | if (iommu->iommu_ctxflush && | 718 | if (iommu->iommu_ctxflush && |
| 743 | strbuf->strbuf_ctxflush) { | 719 | strbuf->strbuf_ctxflush) { |
| 744 | iopte_t *iopte; | 720 | iopte_t *iopte; |
| 745 | struct iommu_table *tbl = &iommu->tbl; | 721 | struct iommu_map_table *tbl = &iommu->tbl; |
| 746 | 722 | ||
| 747 | iopte = iommu->page_table + ((sglist[0].dma_address - | 723 | iopte = iommu->page_table + ((sglist[0].dma_address - |
| 748 | tbl->page_table_map_base) >> IO_PAGE_SHIFT); | 724 | tbl->table_map_base) >> IO_PAGE_SHIFT); |
| 749 | ctx = (iopte_val(*iopte) & IOPTE_CONTEXT) >> 47UL; | 725 | ctx = (iopte_val(*iopte) & IOPTE_CONTEXT) >> 47UL; |
| 750 | } | 726 | } |
| 751 | 727 | ||
diff --git a/arch/sparc/kernel/ldc.c b/arch/sparc/kernel/ldc.c index d485697c37c0..d2ae0f70059e 100644 --- a/arch/sparc/kernel/ldc.c +++ b/arch/sparc/kernel/ldc.c | |||
| @@ -15,7 +15,6 @@ | |||
| 15 | #include <linux/list.h> | 15 | #include <linux/list.h> |
| 16 | #include <linux/init.h> | 16 | #include <linux/init.h> |
| 17 | #include <linux/bitmap.h> | 17 | #include <linux/bitmap.h> |
| 18 | #include <linux/hash.h> | ||
| 19 | #include <linux/iommu-common.h> | 18 | #include <linux/iommu-common.h> |
| 20 | 19 | ||
| 21 | #include <asm/hypervisor.h> | 20 | #include <asm/hypervisor.h> |
| @@ -32,7 +31,6 @@ | |||
| 32 | #define COOKIE_PGSZ_CODE 0xf000000000000000ULL | 31 | #define COOKIE_PGSZ_CODE 0xf000000000000000ULL |
| 33 | #define COOKIE_PGSZ_CODE_SHIFT 60ULL | 32 | #define COOKIE_PGSZ_CODE_SHIFT 60ULL |
| 34 | 33 | ||
| 35 | static DEFINE_PER_CPU(unsigned int, ldc_pool_hash); | ||
| 36 | 34 | ||
| 37 | static char version[] = | 35 | static char version[] = |
| 38 | DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n"; | 36 | DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n"; |
| @@ -108,7 +106,7 @@ struct ldc_iommu { | |||
| 108 | /* Protects ldc_unmap. */ | 106 | /* Protects ldc_unmap. */ |
| 109 | spinlock_t lock; | 107 | spinlock_t lock; |
| 110 | struct ldc_mtable_entry *page_table; | 108 | struct ldc_mtable_entry *page_table; |
| 111 | struct iommu_table iommu_table; | 109 | struct iommu_map_table iommu_map_table; |
| 112 | }; | 110 | }; |
| 113 | 111 | ||
| 114 | struct ldc_channel { | 112 | struct ldc_channel { |
| @@ -1015,18 +1013,9 @@ static unsigned long ldc_cookie_to_index(u64 cookie, void *arg) | |||
| 1015 | return (cookie >> (13ULL + (szcode * 3ULL))); | 1013 | return (cookie >> (13ULL + (szcode * 3ULL))); |
| 1016 | } | 1014 | } |
| 1017 | 1015 | ||
| 1018 | struct ldc_demap_arg { | 1016 | static void ldc_demap(struct ldc_iommu *iommu, unsigned long id, u64 cookie, |
| 1019 | struct ldc_iommu *ldc_iommu; | 1017 | unsigned long entry, unsigned long npages) |
| 1020 | u64 cookie; | ||
| 1021 | unsigned long id; | ||
| 1022 | }; | ||
| 1023 | |||
| 1024 | static void ldc_demap(void *arg, unsigned long entry, unsigned long npages) | ||
| 1025 | { | 1018 | { |
| 1026 | struct ldc_demap_arg *ldc_demap_arg = arg; | ||
| 1027 | struct ldc_iommu *iommu = ldc_demap_arg->ldc_iommu; | ||
| 1028 | unsigned long id = ldc_demap_arg->id; | ||
| 1029 | u64 cookie = ldc_demap_arg->cookie; | ||
| 1030 | struct ldc_mtable_entry *base; | 1019 | struct ldc_mtable_entry *base; |
| 1031 | unsigned long i, shift; | 1020 | unsigned long i, shift; |
| 1032 | 1021 | ||
| @@ -1043,36 +1032,17 @@ static void ldc_demap(void *arg, unsigned long entry, unsigned long npages) | |||
| 1043 | /* XXX Make this configurable... XXX */ | 1032 | /* XXX Make this configurable... XXX */ |
| 1044 | #define LDC_IOTABLE_SIZE (8 * 1024) | 1033 | #define LDC_IOTABLE_SIZE (8 * 1024) |
| 1045 | 1034 | ||
| 1046 | struct iommu_tbl_ops ldc_iommu_ops = { | ||
| 1047 | .cookie_to_index = ldc_cookie_to_index, | ||
| 1048 | .demap = ldc_demap, | ||
| 1049 | }; | ||
| 1050 | |||
| 1051 | static void setup_ldc_pool_hash(void) | ||
| 1052 | { | ||
| 1053 | unsigned int i; | ||
| 1054 | static bool do_once; | ||
| 1055 | |||
| 1056 | if (do_once) | ||
| 1057 | return; | ||
| 1058 | do_once = true; | ||
| 1059 | for_each_possible_cpu(i) | ||
| 1060 | per_cpu(ldc_pool_hash, i) = hash_32(i, IOMMU_POOL_HASHBITS); | ||
| 1061 | } | ||
| 1062 | |||
| 1063 | |||
| 1064 | static int ldc_iommu_init(const char *name, struct ldc_channel *lp) | 1035 | static int ldc_iommu_init(const char *name, struct ldc_channel *lp) |
| 1065 | { | 1036 | { |
| 1066 | unsigned long sz, num_tsb_entries, tsbsize, order; | 1037 | unsigned long sz, num_tsb_entries, tsbsize, order; |
| 1067 | struct ldc_iommu *ldc_iommu = &lp->iommu; | 1038 | struct ldc_iommu *ldc_iommu = &lp->iommu; |
| 1068 | struct iommu_table *iommu = &ldc_iommu->iommu_table; | 1039 | struct iommu_map_table *iommu = &ldc_iommu->iommu_map_table; |
| 1069 | struct ldc_mtable_entry *table; | 1040 | struct ldc_mtable_entry *table; |
| 1070 | unsigned long hv_err; | 1041 | unsigned long hv_err; |
| 1071 | int err; | 1042 | int err; |
| 1072 | 1043 | ||
| 1073 | num_tsb_entries = LDC_IOTABLE_SIZE; | 1044 | num_tsb_entries = LDC_IOTABLE_SIZE; |
| 1074 | tsbsize = num_tsb_entries * sizeof(struct ldc_mtable_entry); | 1045 | tsbsize = num_tsb_entries * sizeof(struct ldc_mtable_entry); |
| 1075 | setup_ldc_pool_hash(); | ||
| 1076 | spin_lock_init(&ldc_iommu->lock); | 1046 | spin_lock_init(&ldc_iommu->lock); |
| 1077 | 1047 | ||
| 1078 | sz = num_tsb_entries / 8; | 1048 | sz = num_tsb_entries / 8; |
| @@ -1083,7 +1053,9 @@ static int ldc_iommu_init(const char *name, struct ldc_channel *lp) | |||
| 1083 | return -ENOMEM; | 1053 | return -ENOMEM; |
| 1084 | } | 1054 | } |
| 1085 | iommu_tbl_pool_init(iommu, num_tsb_entries, PAGE_SHIFT, | 1055 | iommu_tbl_pool_init(iommu, num_tsb_entries, PAGE_SHIFT, |
| 1086 | &ldc_iommu_ops, false, 1); | 1056 | NULL, false /* no large pool */, |
| 1057 | 1 /* npools */, | ||
| 1058 | true /* skip span boundary check */); | ||
| 1087 | 1059 | ||
| 1088 | order = get_order(tsbsize); | 1060 | order = get_order(tsbsize); |
| 1089 | 1061 | ||
| @@ -1122,7 +1094,7 @@ out_free_map: | |||
| 1122 | static void ldc_iommu_release(struct ldc_channel *lp) | 1094 | static void ldc_iommu_release(struct ldc_channel *lp) |
| 1123 | { | 1095 | { |
| 1124 | struct ldc_iommu *ldc_iommu = &lp->iommu; | 1096 | struct ldc_iommu *ldc_iommu = &lp->iommu; |
| 1125 | struct iommu_table *iommu = &ldc_iommu->iommu_table; | 1097 | struct iommu_map_table *iommu = &ldc_iommu->iommu_map_table; |
| 1126 | unsigned long num_tsb_entries, tsbsize, order; | 1098 | unsigned long num_tsb_entries, tsbsize, order; |
| 1127 | 1099 | ||
| 1128 | (void) sun4v_ldc_set_map_table(lp->id, 0, 0); | 1100 | (void) sun4v_ldc_set_map_table(lp->id, 0, 0); |
| @@ -1979,8 +1951,8 @@ static struct ldc_mtable_entry *alloc_npages(struct ldc_iommu *iommu, | |||
| 1979 | { | 1951 | { |
| 1980 | long entry; | 1952 | long entry; |
| 1981 | 1953 | ||
| 1982 | entry = iommu_tbl_range_alloc(NULL, &iommu->iommu_table, npages, | 1954 | entry = iommu_tbl_range_alloc(NULL, &iommu->iommu_map_table, |
| 1983 | NULL, __this_cpu_read(ldc_pool_hash)); | 1955 | npages, NULL, (unsigned long)-1, 0); |
| 1984 | if (unlikely(entry < 0)) | 1956 | if (unlikely(entry < 0)) |
| 1985 | return NULL; | 1957 | return NULL; |
| 1986 | 1958 | ||
| @@ -2191,17 +2163,13 @@ EXPORT_SYMBOL(ldc_map_single); | |||
| 2191 | static void free_npages(unsigned long id, struct ldc_iommu *iommu, | 2163 | static void free_npages(unsigned long id, struct ldc_iommu *iommu, |
| 2192 | u64 cookie, u64 size) | 2164 | u64 cookie, u64 size) |
| 2193 | { | 2165 | { |
| 2194 | unsigned long npages; | 2166 | unsigned long npages, entry; |
| 2195 | struct ldc_demap_arg demap_arg; | ||
| 2196 | |||
| 2197 | demap_arg.ldc_iommu = iommu; | ||
| 2198 | demap_arg.cookie = cookie; | ||
| 2199 | demap_arg.id = id; | ||
| 2200 | 2167 | ||
| 2201 | npages = PAGE_ALIGN(((cookie & ~PAGE_MASK) + size)) >> PAGE_SHIFT; | 2168 | npages = PAGE_ALIGN(((cookie & ~PAGE_MASK) + size)) >> PAGE_SHIFT; |
| 2202 | iommu_tbl_range_free(&iommu->iommu_table, cookie, npages, true, | ||
| 2203 | &demap_arg); | ||
| 2204 | 2169 | ||
| 2170 | entry = ldc_cookie_to_index(cookie, iommu); | ||
| 2171 | ldc_demap(iommu, id, cookie, entry, npages); | ||
| 2172 | iommu_tbl_range_free(&iommu->iommu_map_table, cookie, npages, entry); | ||
| 2205 | } | 2173 | } |
| 2206 | 2174 | ||
| 2207 | void ldc_unmap(struct ldc_channel *lp, struct ldc_trans_cookie *cookies, | 2175 | void ldc_unmap(struct ldc_channel *lp, struct ldc_trans_cookie *cookies, |
diff --git a/arch/sparc/kernel/pci_sun4v.c b/arch/sparc/kernel/pci_sun4v.c index 9b76b9d639e1..d2fe57dad433 100644 --- a/arch/sparc/kernel/pci_sun4v.c +++ b/arch/sparc/kernel/pci_sun4v.c | |||
| @@ -15,7 +15,6 @@ | |||
| 15 | #include <linux/export.h> | 15 | #include <linux/export.h> |
| 16 | #include <linux/log2.h> | 16 | #include <linux/log2.h> |
| 17 | #include <linux/of_device.h> | 17 | #include <linux/of_device.h> |
| 18 | #include <linux/hash.h> | ||
| 19 | #include <linux/iommu-common.h> | 18 | #include <linux/iommu-common.h> |
| 20 | 19 | ||
| 21 | #include <asm/iommu.h> | 20 | #include <asm/iommu.h> |
| @@ -30,7 +29,6 @@ | |||
| 30 | 29 | ||
| 31 | #define DRIVER_NAME "pci_sun4v" | 30 | #define DRIVER_NAME "pci_sun4v" |
| 32 | #define PFX DRIVER_NAME ": " | 31 | #define PFX DRIVER_NAME ": " |
| 33 | static DEFINE_PER_CPU(unsigned int, iommu_pool_hash); | ||
| 34 | 32 | ||
| 35 | static unsigned long vpci_major = 1; | 33 | static unsigned long vpci_major = 1; |
| 36 | static unsigned long vpci_minor = 1; | 34 | static unsigned long vpci_minor = 1; |
| @@ -159,13 +157,12 @@ static void *dma_4v_alloc_coherent(struct device *dev, size_t size, | |||
| 159 | iommu = dev->archdata.iommu; | 157 | iommu = dev->archdata.iommu; |
| 160 | 158 | ||
| 161 | entry = iommu_tbl_range_alloc(dev, &iommu->tbl, npages, NULL, | 159 | entry = iommu_tbl_range_alloc(dev, &iommu->tbl, npages, NULL, |
| 162 | __this_cpu_read(iommu_pool_hash)); | 160 | (unsigned long)(-1), 0); |
| 163 | 161 | ||
| 164 | if (unlikely(entry == DMA_ERROR_CODE)) | 162 | if (unlikely(entry == DMA_ERROR_CODE)) |
| 165 | goto range_alloc_fail; | 163 | goto range_alloc_fail; |
| 166 | 164 | ||
| 167 | *dma_addrp = (iommu->tbl.page_table_map_base + | 165 | *dma_addrp = (iommu->tbl.table_map_base + (entry << IO_PAGE_SHIFT)); |
| 168 | (entry << IO_PAGE_SHIFT)); | ||
| 169 | ret = (void *) first_page; | 166 | ret = (void *) first_page; |
| 170 | first_page = __pa(first_page); | 167 | first_page = __pa(first_page); |
| 171 | 168 | ||
| @@ -190,7 +187,7 @@ static void *dma_4v_alloc_coherent(struct device *dev, size_t size, | |||
| 190 | return ret; | 187 | return ret; |
| 191 | 188 | ||
| 192 | iommu_map_fail: | 189 | iommu_map_fail: |
| 193 | iommu_tbl_range_free(&iommu->tbl, *dma_addrp, npages, false, NULL); | 190 | iommu_tbl_range_free(&iommu->tbl, *dma_addrp, npages, DMA_ERROR_CODE); |
| 194 | 191 | ||
| 195 | range_alloc_fail: | 192 | range_alloc_fail: |
| 196 | free_pages(first_page, order); | 193 | free_pages(first_page, order); |
| @@ -227,9 +224,9 @@ static void dma_4v_free_coherent(struct device *dev, size_t size, void *cpu, | |||
| 227 | iommu = dev->archdata.iommu; | 224 | iommu = dev->archdata.iommu; |
| 228 | pbm = dev->archdata.host_controller; | 225 | pbm = dev->archdata.host_controller; |
| 229 | devhandle = pbm->devhandle; | 226 | devhandle = pbm->devhandle; |
| 230 | entry = ((dvma - iommu->tbl.page_table_map_base) >> IO_PAGE_SHIFT); | 227 | entry = ((dvma - iommu->tbl.table_map_base) >> IO_PAGE_SHIFT); |
| 231 | dma_4v_iommu_demap(&devhandle, entry, npages); | 228 | dma_4v_iommu_demap(&devhandle, entry, npages); |
| 232 | iommu_tbl_range_free(&iommu->tbl, dvma, npages, false, NULL); | 229 | iommu_tbl_range_free(&iommu->tbl, dvma, npages, DMA_ERROR_CODE); |
| 233 | order = get_order(size); | 230 | order = get_order(size); |
| 234 | if (order < 10) | 231 | if (order < 10) |
| 235 | free_pages((unsigned long)cpu, order); | 232 | free_pages((unsigned long)cpu, order); |
| @@ -257,13 +254,12 @@ static dma_addr_t dma_4v_map_page(struct device *dev, struct page *page, | |||
| 257 | npages >>= IO_PAGE_SHIFT; | 254 | npages >>= IO_PAGE_SHIFT; |
| 258 | 255 | ||
| 259 | entry = iommu_tbl_range_alloc(dev, &iommu->tbl, npages, NULL, | 256 | entry = iommu_tbl_range_alloc(dev, &iommu->tbl, npages, NULL, |
| 260 | __this_cpu_read(iommu_pool_hash)); | 257 | (unsigned long)(-1), 0); |
| 261 | 258 | ||
| 262 | if (unlikely(entry == DMA_ERROR_CODE)) | 259 | if (unlikely(entry == DMA_ERROR_CODE)) |
| 263 | goto bad; | 260 | goto bad; |
| 264 | 261 | ||
| 265 | bus_addr = (iommu->tbl.page_table_map_base + | 262 | bus_addr = (iommu->tbl.table_map_base + (entry << IO_PAGE_SHIFT)); |
| 266 | (entry << IO_PAGE_SHIFT)); | ||
| 267 | ret = bus_addr | (oaddr & ~IO_PAGE_MASK); | 263 | ret = bus_addr | (oaddr & ~IO_PAGE_MASK); |
| 268 | base_paddr = __pa(oaddr & IO_PAGE_MASK); | 264 | base_paddr = __pa(oaddr & IO_PAGE_MASK); |
| 269 | prot = HV_PCI_MAP_ATTR_READ; | 265 | prot = HV_PCI_MAP_ATTR_READ; |
| @@ -292,7 +288,7 @@ bad: | |||
| 292 | return DMA_ERROR_CODE; | 288 | return DMA_ERROR_CODE; |
| 293 | 289 | ||
| 294 | iommu_map_fail: | 290 | iommu_map_fail: |
| 295 | iommu_tbl_range_free(&iommu->tbl, bus_addr, npages, false, NULL); | 291 | iommu_tbl_range_free(&iommu->tbl, bus_addr, npages, DMA_ERROR_CODE); |
| 296 | return DMA_ERROR_CODE; | 292 | return DMA_ERROR_CODE; |
| 297 | } | 293 | } |
| 298 | 294 | ||
| @@ -319,9 +315,9 @@ static void dma_4v_unmap_page(struct device *dev, dma_addr_t bus_addr, | |||
| 319 | npages = IO_PAGE_ALIGN(bus_addr + sz) - (bus_addr & IO_PAGE_MASK); | 315 | npages = IO_PAGE_ALIGN(bus_addr + sz) - (bus_addr & IO_PAGE_MASK); |
| 320 | npages >>= IO_PAGE_SHIFT; | 316 | npages >>= IO_PAGE_SHIFT; |
| 321 | bus_addr &= IO_PAGE_MASK; | 317 | bus_addr &= IO_PAGE_MASK; |
| 322 | entry = (bus_addr - iommu->tbl.page_table_map_base) >> IO_PAGE_SHIFT; | 318 | entry = (bus_addr - iommu->tbl.table_map_base) >> IO_PAGE_SHIFT; |
| 323 | dma_4v_iommu_demap(&devhandle, entry, npages); | 319 | dma_4v_iommu_demap(&devhandle, entry, npages); |
| 324 | iommu_tbl_range_free(&iommu->tbl, bus_addr, npages, false, NULL); | 320 | iommu_tbl_range_free(&iommu->tbl, bus_addr, npages, DMA_ERROR_CODE); |
| 325 | } | 321 | } |
| 326 | 322 | ||
| 327 | static int dma_4v_map_sg(struct device *dev, struct scatterlist *sglist, | 323 | static int dma_4v_map_sg(struct device *dev, struct scatterlist *sglist, |
| @@ -363,7 +359,7 @@ static int dma_4v_map_sg(struct device *dev, struct scatterlist *sglist, | |||
| 363 | max_seg_size = dma_get_max_seg_size(dev); | 359 | max_seg_size = dma_get_max_seg_size(dev); |
| 364 | seg_boundary_size = ALIGN(dma_get_seg_boundary(dev) + 1, | 360 | seg_boundary_size = ALIGN(dma_get_seg_boundary(dev) + 1, |
| 365 | IO_PAGE_SIZE) >> IO_PAGE_SHIFT; | 361 | IO_PAGE_SIZE) >> IO_PAGE_SHIFT; |
| 366 | base_shift = iommu->tbl.page_table_map_base >> IO_PAGE_SHIFT; | 362 | base_shift = iommu->tbl.table_map_base >> IO_PAGE_SHIFT; |
| 367 | for_each_sg(sglist, s, nelems, i) { | 363 | for_each_sg(sglist, s, nelems, i) { |
| 368 | unsigned long paddr, npages, entry, out_entry = 0, slen; | 364 | unsigned long paddr, npages, entry, out_entry = 0, slen; |
| 369 | 365 | ||
| @@ -376,8 +372,8 @@ static int dma_4v_map_sg(struct device *dev, struct scatterlist *sglist, | |||
| 376 | /* Allocate iommu entries for that segment */ | 372 | /* Allocate iommu entries for that segment */ |
| 377 | paddr = (unsigned long) SG_ENT_PHYS_ADDRESS(s); | 373 | paddr = (unsigned long) SG_ENT_PHYS_ADDRESS(s); |
| 378 | npages = iommu_num_pages(paddr, slen, IO_PAGE_SIZE); | 374 | npages = iommu_num_pages(paddr, slen, IO_PAGE_SIZE); |
| 379 | entry = iommu_tbl_range_alloc(dev, &iommu->tbl, npages, &handle, | 375 | entry = iommu_tbl_range_alloc(dev, &iommu->tbl, npages, |
| 380 | __this_cpu_read(iommu_pool_hash)); | 376 | &handle, (unsigned long)(-1), 0); |
| 381 | 377 | ||
| 382 | /* Handle failure */ | 378 | /* Handle failure */ |
| 383 | if (unlikely(entry == DMA_ERROR_CODE)) { | 379 | if (unlikely(entry == DMA_ERROR_CODE)) { |
| @@ -390,8 +386,7 @@ static int dma_4v_map_sg(struct device *dev, struct scatterlist *sglist, | |||
| 390 | iommu_batch_new_entry(entry); | 386 | iommu_batch_new_entry(entry); |
| 391 | 387 | ||
| 392 | /* Convert entry to a dma_addr_t */ | 388 | /* Convert entry to a dma_addr_t */ |
| 393 | dma_addr = iommu->tbl.page_table_map_base + | 389 | dma_addr = iommu->tbl.table_map_base + (entry << IO_PAGE_SHIFT); |
| 394 | (entry << IO_PAGE_SHIFT); | ||
| 395 | dma_addr |= (s->offset & ~IO_PAGE_MASK); | 390 | dma_addr |= (s->offset & ~IO_PAGE_MASK); |
| 396 | 391 | ||
| 397 | /* Insert into HW table */ | 392 | /* Insert into HW table */ |
| @@ -456,7 +451,7 @@ iommu_map_failed: | |||
| 456 | npages = iommu_num_pages(s->dma_address, s->dma_length, | 451 | npages = iommu_num_pages(s->dma_address, s->dma_length, |
| 457 | IO_PAGE_SIZE); | 452 | IO_PAGE_SIZE); |
| 458 | iommu_tbl_range_free(&iommu->tbl, vaddr, npages, | 453 | iommu_tbl_range_free(&iommu->tbl, vaddr, npages, |
| 459 | false, NULL); | 454 | DMA_ERROR_CODE); |
| 460 | /* XXX demap? XXX */ | 455 | /* XXX demap? XXX */ |
| 461 | s->dma_address = DMA_ERROR_CODE; | 456 | s->dma_address = DMA_ERROR_CODE; |
| 462 | s->dma_length = 0; | 457 | s->dma_length = 0; |
| @@ -492,16 +487,16 @@ static void dma_4v_unmap_sg(struct device *dev, struct scatterlist *sglist, | |||
| 492 | dma_addr_t dma_handle = sg->dma_address; | 487 | dma_addr_t dma_handle = sg->dma_address; |
| 493 | unsigned int len = sg->dma_length; | 488 | unsigned int len = sg->dma_length; |
| 494 | unsigned long npages; | 489 | unsigned long npages; |
| 495 | struct iommu_table *tbl = &iommu->tbl; | 490 | struct iommu_map_table *tbl = &iommu->tbl; |
| 496 | unsigned long shift = IO_PAGE_SHIFT; | 491 | unsigned long shift = IO_PAGE_SHIFT; |
| 497 | 492 | ||
| 498 | if (!len) | 493 | if (!len) |
| 499 | break; | 494 | break; |
| 500 | npages = iommu_num_pages(dma_handle, len, IO_PAGE_SIZE); | 495 | npages = iommu_num_pages(dma_handle, len, IO_PAGE_SIZE); |
| 501 | entry = ((dma_handle - tbl->page_table_map_base) >> shift); | 496 | entry = ((dma_handle - tbl->table_map_base) >> shift); |
| 502 | dma_4v_iommu_demap(&devhandle, entry, npages); | 497 | dma_4v_iommu_demap(&devhandle, entry, npages); |
| 503 | iommu_tbl_range_free(&iommu->tbl, dma_handle, npages, | 498 | iommu_tbl_range_free(&iommu->tbl, dma_handle, npages, |
| 504 | false, NULL); | 499 | DMA_ERROR_CODE); |
| 505 | sg = sg_next(sg); | 500 | sg = sg_next(sg); |
| 506 | } | 501 | } |
| 507 | 502 | ||
| @@ -517,8 +512,6 @@ static struct dma_map_ops sun4v_dma_ops = { | |||
| 517 | .unmap_sg = dma_4v_unmap_sg, | 512 | .unmap_sg = dma_4v_unmap_sg, |
| 518 | }; | 513 | }; |
| 519 | 514 | ||
| 520 | static struct iommu_tbl_ops dma_4v_iommu_ops; | ||
| 521 | |||
| 522 | static void pci_sun4v_scan_bus(struct pci_pbm_info *pbm, struct device *parent) | 515 | static void pci_sun4v_scan_bus(struct pci_pbm_info *pbm, struct device *parent) |
| 523 | { | 516 | { |
| 524 | struct property *prop; | 517 | struct property *prop; |
| @@ -533,7 +526,7 @@ static void pci_sun4v_scan_bus(struct pci_pbm_info *pbm, struct device *parent) | |||
| 533 | } | 526 | } |
| 534 | 527 | ||
| 535 | static unsigned long probe_existing_entries(struct pci_pbm_info *pbm, | 528 | static unsigned long probe_existing_entries(struct pci_pbm_info *pbm, |
| 536 | struct iommu_table *iommu) | 529 | struct iommu_map_table *iommu) |
| 537 | { | 530 | { |
| 538 | struct iommu_pool *pool; | 531 | struct iommu_pool *pool; |
| 539 | unsigned long i, pool_nr, cnt = 0; | 532 | unsigned long i, pool_nr, cnt = 0; |
| @@ -541,7 +534,7 @@ static unsigned long probe_existing_entries(struct pci_pbm_info *pbm, | |||
| 541 | 534 | ||
| 542 | devhandle = pbm->devhandle; | 535 | devhandle = pbm->devhandle; |
| 543 | for (pool_nr = 0; pool_nr < iommu->nr_pools; pool_nr++) { | 536 | for (pool_nr = 0; pool_nr < iommu->nr_pools; pool_nr++) { |
| 544 | pool = &(iommu->arena_pool[pool_nr]); | 537 | pool = &(iommu->pools[pool_nr]); |
| 545 | for (i = pool->start; i <= pool->end; i++) { | 538 | for (i = pool->start; i <= pool->end; i++) { |
| 546 | unsigned long ret, io_attrs, ra; | 539 | unsigned long ret, io_attrs, ra; |
| 547 | 540 | ||
| @@ -587,8 +580,9 @@ static int pci_sun4v_iommu_init(struct pci_pbm_info *pbm) | |||
| 587 | dma_offset = vdma[0]; | 580 | dma_offset = vdma[0]; |
| 588 | 581 | ||
| 589 | /* Setup initial software IOMMU state. */ | 582 | /* Setup initial software IOMMU state. */ |
| 583 | spin_lock_init(&iommu->lock); | ||
| 590 | iommu->ctx_lowest_free = 1; | 584 | iommu->ctx_lowest_free = 1; |
| 591 | iommu->tbl.page_table_map_base = dma_offset; | 585 | iommu->tbl.table_map_base = dma_offset; |
| 592 | iommu->dma_addr_mask = dma_mask; | 586 | iommu->dma_addr_mask = dma_mask; |
| 593 | 587 | ||
| 594 | /* Allocate and initialize the free area map. */ | 588 | /* Allocate and initialize the free area map. */ |
| @@ -600,8 +594,9 @@ static int pci_sun4v_iommu_init(struct pci_pbm_info *pbm) | |||
| 600 | return -ENOMEM; | 594 | return -ENOMEM; |
| 601 | } | 595 | } |
| 602 | iommu_tbl_pool_init(&iommu->tbl, num_tsb_entries, IO_PAGE_SHIFT, | 596 | iommu_tbl_pool_init(&iommu->tbl, num_tsb_entries, IO_PAGE_SHIFT, |
| 603 | &dma_4v_iommu_ops, false /* no large_pool */, | 597 | NULL, false /* no large_pool */, |
| 604 | 0 /* default npools */); | 598 | 0 /* default npools */, |
| 599 | false /* want span boundary checking */); | ||
| 605 | sz = probe_existing_entries(pbm, &iommu->tbl); | 600 | sz = probe_existing_entries(pbm, &iommu->tbl); |
| 606 | if (sz) | 601 | if (sz) |
| 607 | printk("%s: Imported %lu TSB entries from OBP\n", | 602 | printk("%s: Imported %lu TSB entries from OBP\n", |
| @@ -1001,17 +996,8 @@ static struct platform_driver pci_sun4v_driver = { | |||
| 1001 | .probe = pci_sun4v_probe, | 996 | .probe = pci_sun4v_probe, |
| 1002 | }; | 997 | }; |
| 1003 | 998 | ||
| 1004 | static void setup_iommu_pool_hash(void) | ||
| 1005 | { | ||
| 1006 | unsigned int i; | ||
| 1007 | |||
| 1008 | for_each_possible_cpu(i) | ||
| 1009 | per_cpu(iommu_pool_hash, i) = hash_32(i, IOMMU_POOL_HASHBITS); | ||
| 1010 | } | ||
| 1011 | |||
| 1012 | static int __init pci_sun4v_init(void) | 999 | static int __init pci_sun4v_init(void) |
| 1013 | { | 1000 | { |
| 1014 | setup_iommu_pool_hash(); | ||
| 1015 | return platform_driver_register(&pci_sun4v_driver); | 1001 | return platform_driver_register(&pci_sun4v_driver); |
| 1016 | } | 1002 | } |
| 1017 | 1003 | ||
diff --git a/include/linux/iommu-common.h b/include/linux/iommu-common.h index 6be5c863f329..bbced83b32ee 100644 --- a/include/linux/iommu-common.h +++ b/include/linux/iommu-common.h | |||
| @@ -15,41 +15,37 @@ struct iommu_pool { | |||
| 15 | spinlock_t lock; | 15 | spinlock_t lock; |
| 16 | }; | 16 | }; |
| 17 | 17 | ||
| 18 | struct iommu_table; | 18 | struct iommu_map_table { |
| 19 | 19 | unsigned long table_map_base; | |
| 20 | struct iommu_tbl_ops { | 20 | unsigned long table_shift; |
| 21 | unsigned long (*cookie_to_index)(u64, void *); | ||
| 22 | void (*demap)(void *, unsigned long, unsigned long); | ||
| 23 | void (*reset)(struct iommu_table *); | ||
| 24 | }; | ||
| 25 | |||
| 26 | struct iommu_table { | ||
| 27 | unsigned long page_table_map_base; | ||
| 28 | unsigned long page_table_shift; | ||
| 29 | unsigned long nr_pools; | 21 | unsigned long nr_pools; |
| 30 | const struct iommu_tbl_ops *iommu_tbl_ops; | 22 | void (*lazy_flush)(struct iommu_map_table *); |
| 31 | unsigned long poolsize; | 23 | unsigned long poolsize; |
| 32 | struct iommu_pool arena_pool[IOMMU_NR_POOLS]; | 24 | struct iommu_pool pools[IOMMU_NR_POOLS]; |
| 33 | u32 flags; | 25 | u32 flags; |
| 34 | #define IOMMU_HAS_LARGE_POOL 0x00000001 | 26 | #define IOMMU_HAS_LARGE_POOL 0x00000001 |
| 27 | #define IOMMU_NO_SPAN_BOUND 0x00000002 | ||
| 28 | #define IOMMU_NEED_FLUSH 0x00000004 | ||
| 35 | struct iommu_pool large_pool; | 29 | struct iommu_pool large_pool; |
| 36 | unsigned long *map; | 30 | unsigned long *map; |
| 37 | }; | 31 | }; |
| 38 | 32 | ||
| 39 | extern void iommu_tbl_pool_init(struct iommu_table *iommu, | 33 | extern void iommu_tbl_pool_init(struct iommu_map_table *iommu, |
| 40 | unsigned long num_entries, | 34 | unsigned long num_entries, |
| 41 | u32 page_table_shift, | 35 | u32 table_shift, |
| 42 | const struct iommu_tbl_ops *iommu_tbl_ops, | 36 | void (*lazy_flush)(struct iommu_map_table *), |
| 43 | bool large_pool, u32 npools); | 37 | bool large_pool, u32 npools, |
| 38 | bool skip_span_boundary_check); | ||
| 44 | 39 | ||
| 45 | extern unsigned long iommu_tbl_range_alloc(struct device *dev, | 40 | extern unsigned long iommu_tbl_range_alloc(struct device *dev, |
| 46 | struct iommu_table *iommu, | 41 | struct iommu_map_table *iommu, |
| 47 | unsigned long npages, | 42 | unsigned long npages, |
| 48 | unsigned long *handle, | 43 | unsigned long *handle, |
| 49 | unsigned int pool_hash); | 44 | unsigned long mask, |
| 45 | unsigned int align_order); | ||
| 50 | 46 | ||
| 51 | extern void iommu_tbl_range_free(struct iommu_table *iommu, | 47 | extern void iommu_tbl_range_free(struct iommu_map_table *iommu, |
| 52 | u64 dma_addr, unsigned long npages, | 48 | u64 dma_addr, unsigned long npages, |
| 53 | bool do_demap, void *demap_arg); | 49 | unsigned long entry); |
| 54 | 50 | ||
| 55 | #endif | 51 | #endif |
diff --git a/lib/iommu-common.c b/lib/iommu-common.c index fac4f35250c9..a1a517cba7ec 100644 --- a/lib/iommu-common.c +++ b/lib/iommu-common.c | |||
| @@ -9,37 +9,72 @@ | |||
| 9 | #include <linux/iommu-helper.h> | 9 | #include <linux/iommu-helper.h> |
| 10 | #include <linux/iommu-common.h> | 10 | #include <linux/iommu-common.h> |
| 11 | #include <linux/dma-mapping.h> | 11 | #include <linux/dma-mapping.h> |
| 12 | #include <linux/hash.h> | ||
| 12 | 13 | ||
| 13 | #ifndef DMA_ERROR_CODE | 14 | #ifndef DMA_ERROR_CODE |
| 14 | #define DMA_ERROR_CODE (~(dma_addr_t)0x0) | 15 | #define DMA_ERROR_CODE (~(dma_addr_t)0x0) |
| 15 | #endif | 16 | #endif |
| 16 | 17 | ||
| 17 | #define IOMMU_LARGE_ALLOC 15 | 18 | unsigned long iommu_large_alloc = 15; |
| 19 | |||
| 20 | static DEFINE_PER_CPU(unsigned int, iommu_pool_hash); | ||
| 21 | |||
| 22 | static inline bool need_flush(struct iommu_map_table *iommu) | ||
| 23 | { | ||
| 24 | return (iommu->lazy_flush != NULL && | ||
| 25 | (iommu->flags & IOMMU_NEED_FLUSH) != 0); | ||
| 26 | } | ||
| 27 | |||
| 28 | static inline void set_flush(struct iommu_map_table *iommu) | ||
| 29 | { | ||
| 30 | iommu->flags |= IOMMU_NEED_FLUSH; | ||
| 31 | } | ||
| 32 | |||
| 33 | static inline void clear_flush(struct iommu_map_table *iommu) | ||
| 34 | { | ||
| 35 | iommu->flags &= ~IOMMU_NEED_FLUSH; | ||
| 36 | } | ||
| 37 | |||
| 38 | static void setup_iommu_pool_hash(void) | ||
| 39 | { | ||
| 40 | unsigned int i; | ||
| 41 | static bool do_once; | ||
| 42 | |||
| 43 | if (do_once) | ||
| 44 | return; | ||
| 45 | do_once = true; | ||
| 46 | for_each_possible_cpu(i) | ||
| 47 | per_cpu(iommu_pool_hash, i) = hash_32(i, IOMMU_POOL_HASHBITS); | ||
| 48 | } | ||
| 18 | 49 | ||
| 19 | /* | 50 | /* |
| 20 | * Initialize iommu_pool entries for the iommu_table. `num_entries' | 51 | * Initialize iommu_pool entries for the iommu_map_table. `num_entries' |
| 21 | * is the number of table entries. If `large_pool' is set to true, | 52 | * is the number of table entries. If `large_pool' is set to true, |
| 22 | * the top 1/4 of the table will be set aside for pool allocations | 53 | * the top 1/4 of the table will be set aside for pool allocations |
| 23 | * of more than IOMMU_LARGE_ALLOC pages. | 54 | * of more than iommu_large_alloc pages. |
| 24 | */ | 55 | */ |
| 25 | extern void iommu_tbl_pool_init(struct iommu_table *iommu, | 56 | extern void iommu_tbl_pool_init(struct iommu_map_table *iommu, |
| 26 | unsigned long num_entries, | 57 | unsigned long num_entries, |
| 27 | u32 page_table_shift, | 58 | u32 table_shift, |
| 28 | const struct iommu_tbl_ops *iommu_tbl_ops, | 59 | void (*lazy_flush)(struct iommu_map_table *), |
| 29 | bool large_pool, u32 npools) | 60 | bool large_pool, u32 npools, |
| 61 | bool skip_span_boundary_check) | ||
| 30 | { | 62 | { |
| 31 | unsigned int start, i; | 63 | unsigned int start, i; |
| 32 | struct iommu_pool *p = &(iommu->large_pool); | 64 | struct iommu_pool *p = &(iommu->large_pool); |
| 33 | 65 | ||
| 66 | setup_iommu_pool_hash(); | ||
| 34 | if (npools == 0) | 67 | if (npools == 0) |
| 35 | iommu->nr_pools = IOMMU_NR_POOLS; | 68 | iommu->nr_pools = IOMMU_NR_POOLS; |
| 36 | else | 69 | else |
| 37 | iommu->nr_pools = npools; | 70 | iommu->nr_pools = npools; |
| 38 | BUG_ON(npools > IOMMU_NR_POOLS); | 71 | BUG_ON(npools > IOMMU_NR_POOLS); |
| 39 | 72 | ||
| 40 | iommu->page_table_shift = page_table_shift; | 73 | iommu->table_shift = table_shift; |
| 41 | iommu->iommu_tbl_ops = iommu_tbl_ops; | 74 | iommu->lazy_flush = lazy_flush; |
| 42 | start = 0; | 75 | start = 0; |
| 76 | if (skip_span_boundary_check) | ||
| 77 | iommu->flags |= IOMMU_NO_SPAN_BOUND; | ||
| 43 | if (large_pool) | 78 | if (large_pool) |
| 44 | iommu->flags |= IOMMU_HAS_LARGE_POOL; | 79 | iommu->flags |= IOMMU_HAS_LARGE_POOL; |
| 45 | 80 | ||
| @@ -48,11 +83,11 @@ extern void iommu_tbl_pool_init(struct iommu_table *iommu, | |||
| 48 | else | 83 | else |
| 49 | iommu->poolsize = (num_entries * 3 / 4)/iommu->nr_pools; | 84 | iommu->poolsize = (num_entries * 3 / 4)/iommu->nr_pools; |
| 50 | for (i = 0; i < iommu->nr_pools; i++) { | 85 | for (i = 0; i < iommu->nr_pools; i++) { |
| 51 | spin_lock_init(&(iommu->arena_pool[i].lock)); | 86 | spin_lock_init(&(iommu->pools[i].lock)); |
| 52 | iommu->arena_pool[i].start = start; | 87 | iommu->pools[i].start = start; |
| 53 | iommu->arena_pool[i].hint = start; | 88 | iommu->pools[i].hint = start; |
| 54 | start += iommu->poolsize; /* start for next pool */ | 89 | start += iommu->poolsize; /* start for next pool */ |
| 55 | iommu->arena_pool[i].end = start - 1; | 90 | iommu->pools[i].end = start - 1; |
| 56 | } | 91 | } |
| 57 | if (!large_pool) | 92 | if (!large_pool) |
| 58 | return; | 93 | return; |
| @@ -65,121 +100,136 @@ extern void iommu_tbl_pool_init(struct iommu_table *iommu, | |||
| 65 | EXPORT_SYMBOL(iommu_tbl_pool_init); | 100 | EXPORT_SYMBOL(iommu_tbl_pool_init); |
| 66 | 101 | ||
| 67 | unsigned long iommu_tbl_range_alloc(struct device *dev, | 102 | unsigned long iommu_tbl_range_alloc(struct device *dev, |
| 68 | struct iommu_table *iommu, | 103 | struct iommu_map_table *iommu, |
| 69 | unsigned long npages, | 104 | unsigned long npages, |
| 70 | unsigned long *handle, | 105 | unsigned long *handle, |
| 71 | unsigned int pool_hash) | 106 | unsigned long mask, |
| 107 | unsigned int align_order) | ||
| 72 | { | 108 | { |
| 109 | unsigned int pool_hash = __this_cpu_read(iommu_pool_hash); | ||
| 73 | unsigned long n, end, start, limit, boundary_size; | 110 | unsigned long n, end, start, limit, boundary_size; |
| 74 | struct iommu_pool *arena; | 111 | struct iommu_pool *pool; |
| 75 | int pass = 0; | 112 | int pass = 0; |
| 76 | unsigned int pool_nr; | 113 | unsigned int pool_nr; |
| 77 | unsigned int npools = iommu->nr_pools; | 114 | unsigned int npools = iommu->nr_pools; |
| 78 | unsigned long flags; | 115 | unsigned long flags; |
| 79 | bool large_pool = ((iommu->flags & IOMMU_HAS_LARGE_POOL) != 0); | 116 | bool large_pool = ((iommu->flags & IOMMU_HAS_LARGE_POOL) != 0); |
| 80 | bool largealloc = (large_pool && npages > IOMMU_LARGE_ALLOC); | 117 | bool largealloc = (large_pool && npages > iommu_large_alloc); |
| 81 | unsigned long shift; | 118 | unsigned long shift; |
| 119 | unsigned long align_mask = 0; | ||
| 120 | |||
| 121 | if (align_order > 0) | ||
| 122 | align_mask = 0xffffffffffffffffl >> (64 - align_order); | ||
| 82 | 123 | ||
| 83 | /* Sanity check */ | 124 | /* Sanity check */ |
| 84 | if (unlikely(npages == 0)) { | 125 | if (unlikely(npages == 0)) { |
| 85 | printk_ratelimited("npages == 0\n"); | 126 | WARN_ON_ONCE(1); |
| 86 | return DMA_ERROR_CODE; | 127 | return DMA_ERROR_CODE; |
| 87 | } | 128 | } |
| 88 | 129 | ||
| 89 | if (largealloc) { | 130 | if (largealloc) { |
| 90 | arena = &(iommu->large_pool); | 131 | pool = &(iommu->large_pool); |
| 91 | spin_lock_irqsave(&arena->lock, flags); | ||
| 92 | pool_nr = 0; /* to keep compiler happy */ | 132 | pool_nr = 0; /* to keep compiler happy */ |
| 93 | } else { | 133 | } else { |
| 94 | /* pick out pool_nr */ | 134 | /* pick out pool_nr */ |
| 95 | pool_nr = pool_hash & (npools - 1); | 135 | pool_nr = pool_hash & (npools - 1); |
| 96 | arena = &(iommu->arena_pool[pool_nr]); | 136 | pool = &(iommu->pools[pool_nr]); |
| 97 | |||
| 98 | /* find first available unlocked pool */ | ||
| 99 | while (!spin_trylock_irqsave(&(arena->lock), flags)) { | ||
| 100 | pool_nr = (pool_nr + 1) & (iommu->nr_pools - 1); | ||
| 101 | arena = &(iommu->arena_pool[pool_nr]); | ||
| 102 | } | ||
| 103 | } | 137 | } |
| 138 | spin_lock_irqsave(&pool->lock, flags); | ||
| 104 | 139 | ||
| 105 | again: | 140 | again: |
| 106 | if (pass == 0 && handle && *handle && | 141 | if (pass == 0 && handle && *handle && |
| 107 | (*handle >= arena->start) && (*handle < arena->end)) | 142 | (*handle >= pool->start) && (*handle < pool->end)) |
| 108 | start = *handle; | 143 | start = *handle; |
| 109 | else | 144 | else |
| 110 | start = arena->hint; | 145 | start = pool->hint; |
| 111 | 146 | ||
| 112 | limit = arena->end; | 147 | limit = pool->end; |
| 113 | 148 | ||
| 114 | /* The case below can happen if we have a small segment appended | 149 | /* The case below can happen if we have a small segment appended |
| 115 | * to a large, or when the previous alloc was at the very end of | 150 | * to a large, or when the previous alloc was at the very end of |
| 116 | * the available space. If so, go back to the beginning and flush. | 151 | * the available space. If so, go back to the beginning. If a |
| 152 | * flush is needed, it will get done based on the return value | ||
| 153 | * from iommu_area_alloc() below. | ||
| 117 | */ | 154 | */ |
| 118 | if (start >= limit) { | 155 | if (start >= limit) |
| 119 | start = arena->start; | 156 | start = pool->start; |
| 120 | if (iommu->iommu_tbl_ops->reset != NULL) | 157 | shift = iommu->table_map_base >> iommu->table_shift; |
| 121 | iommu->iommu_tbl_ops->reset(iommu); | 158 | if (limit + shift > mask) { |
| 159 | limit = mask - shift + 1; | ||
| 160 | /* If we're constrained on address range, first try | ||
| 161 | * at the masked hint to avoid O(n) search complexity, | ||
| 162 | * but on second pass, start at 0 in pool 0. | ||
| 163 | */ | ||
| 164 | if ((start & mask) >= limit || pass > 0) { | ||
| 165 | spin_unlock(&(pool->lock)); | ||
| 166 | pool = &(iommu->pools[0]); | ||
| 167 | spin_lock(&(pool->lock)); | ||
| 168 | start = pool->start; | ||
| 169 | } else { | ||
| 170 | start &= mask; | ||
| 171 | } | ||
| 122 | } | 172 | } |
| 123 | 173 | ||
| 124 | if (dev) | 174 | if (dev) |
| 125 | boundary_size = ALIGN(dma_get_seg_boundary(dev) + 1, | 175 | boundary_size = ALIGN(dma_get_seg_boundary(dev) + 1, |
| 126 | 1 << iommu->page_table_shift); | 176 | 1 << iommu->table_shift); |
| 127 | else | 177 | else |
| 128 | boundary_size = ALIGN(1ULL << 32, 1 << iommu->page_table_shift); | 178 | boundary_size = ALIGN(1ULL << 32, 1 << iommu->table_shift); |
| 129 | 179 | ||
| 130 | shift = iommu->page_table_map_base >> iommu->page_table_shift; | 180 | boundary_size = boundary_size >> iommu->table_shift; |
| 131 | boundary_size = boundary_size >> iommu->page_table_shift; | ||
| 132 | /* | 181 | /* |
| 133 | * if the iommu has a non-trivial cookie <-> index mapping, we set | 182 | * if the skip_span_boundary_check had been set during init, we set |
| 134 | * things up so that iommu_is_span_boundary() merely checks if the | 183 | * things up so that iommu_is_span_boundary() merely checks if the |
| 135 | * (index + npages) < num_tsb_entries | 184 | * (index + npages) < num_tsb_entries |
| 136 | */ | 185 | */ |
| 137 | if (iommu->iommu_tbl_ops->cookie_to_index != NULL) { | 186 | if ((iommu->flags & IOMMU_NO_SPAN_BOUND) != 0) { |
| 138 | shift = 0; | 187 | shift = 0; |
| 139 | boundary_size = iommu->poolsize * iommu->nr_pools; | 188 | boundary_size = iommu->poolsize * iommu->nr_pools; |
| 140 | } | 189 | } |
| 141 | n = iommu_area_alloc(iommu->map, limit, start, npages, shift, | 190 | n = iommu_area_alloc(iommu->map, limit, start, npages, shift, |
| 142 | boundary_size, 0); | 191 | boundary_size, align_mask); |
| 143 | if (n == -1) { | 192 | if (n == -1) { |
| 144 | if (likely(pass == 0)) { | 193 | if (likely(pass == 0)) { |
| 145 | /* First failure, rescan from the beginning. */ | 194 | /* First failure, rescan from the beginning. */ |
| 146 | arena->hint = arena->start; | 195 | pool->hint = pool->start; |
| 147 | if (iommu->iommu_tbl_ops->reset != NULL) | 196 | set_flush(iommu); |
| 148 | iommu->iommu_tbl_ops->reset(iommu); | ||
| 149 | pass++; | 197 | pass++; |
| 150 | goto again; | 198 | goto again; |
| 151 | } else if (!largealloc && pass <= iommu->nr_pools) { | 199 | } else if (!largealloc && pass <= iommu->nr_pools) { |
| 152 | spin_unlock(&(arena->lock)); | 200 | spin_unlock(&(pool->lock)); |
| 153 | pool_nr = (pool_nr + 1) & (iommu->nr_pools - 1); | 201 | pool_nr = (pool_nr + 1) & (iommu->nr_pools - 1); |
| 154 | arena = &(iommu->arena_pool[pool_nr]); | 202 | pool = &(iommu->pools[pool_nr]); |
| 155 | while (!spin_trylock(&(arena->lock))) { | 203 | spin_lock(&(pool->lock)); |
| 156 | pool_nr = (pool_nr + 1) & (iommu->nr_pools - 1); | 204 | pool->hint = pool->start; |
| 157 | arena = &(iommu->arena_pool[pool_nr]); | 205 | set_flush(iommu); |
| 158 | } | ||
| 159 | arena->hint = arena->start; | ||
| 160 | pass++; | 206 | pass++; |
| 161 | goto again; | 207 | goto again; |
| 162 | } else { | 208 | } else { |
| 163 | /* give up */ | 209 | /* give up */ |
| 164 | spin_unlock_irqrestore(&(arena->lock), flags); | 210 | n = DMA_ERROR_CODE; |
| 165 | return DMA_ERROR_CODE; | 211 | goto bail; |
| 166 | } | 212 | } |
| 167 | } | 213 | } |
| 214 | if (n < pool->hint || need_flush(iommu)) { | ||
| 215 | clear_flush(iommu); | ||
| 216 | iommu->lazy_flush(iommu); | ||
| 217 | } | ||
| 168 | 218 | ||
| 169 | end = n + npages; | 219 | end = n + npages; |
| 170 | 220 | pool->hint = end; | |
| 171 | arena->hint = end; | ||
| 172 | 221 | ||
| 173 | /* Update handle for SG allocations */ | 222 | /* Update handle for SG allocations */ |
| 174 | if (handle) | 223 | if (handle) |
| 175 | *handle = end; | 224 | *handle = end; |
| 176 | spin_unlock_irqrestore(&(arena->lock), flags); | 225 | bail: |
| 226 | spin_unlock_irqrestore(&(pool->lock), flags); | ||
| 177 | 227 | ||
| 178 | return n; | 228 | return n; |
| 179 | } | 229 | } |
| 180 | EXPORT_SYMBOL(iommu_tbl_range_alloc); | 230 | EXPORT_SYMBOL(iommu_tbl_range_alloc); |
| 181 | 231 | ||
| 182 | static struct iommu_pool *get_pool(struct iommu_table *tbl, | 232 | static struct iommu_pool *get_pool(struct iommu_map_table *tbl, |
| 183 | unsigned long entry) | 233 | unsigned long entry) |
| 184 | { | 234 | { |
| 185 | struct iommu_pool *p; | 235 | struct iommu_pool *p; |
| @@ -193,31 +243,27 @@ static struct iommu_pool *get_pool(struct iommu_table *tbl, | |||
| 193 | unsigned int pool_nr = entry / tbl->poolsize; | 243 | unsigned int pool_nr = entry / tbl->poolsize; |
| 194 | 244 | ||
| 195 | BUG_ON(pool_nr >= tbl->nr_pools); | 245 | BUG_ON(pool_nr >= tbl->nr_pools); |
| 196 | p = &tbl->arena_pool[pool_nr]; | 246 | p = &tbl->pools[pool_nr]; |
| 197 | } | 247 | } |
| 198 | return p; | 248 | return p; |
| 199 | } | 249 | } |
| 200 | 250 | ||
| 201 | void iommu_tbl_range_free(struct iommu_table *iommu, u64 dma_addr, | 251 | /* Caller supplies the index of the entry into the iommu map table |
| 202 | unsigned long npages, bool do_demap, void *demap_arg) | 252 | * itself when the mapping from dma_addr to the entry is not the |
| 253 | * default addr->entry mapping below. | ||
| 254 | */ | ||
| 255 | void iommu_tbl_range_free(struct iommu_map_table *iommu, u64 dma_addr, | ||
| 256 | unsigned long npages, unsigned long entry) | ||
| 203 | { | 257 | { |
| 204 | unsigned long entry; | ||
| 205 | struct iommu_pool *pool; | 258 | struct iommu_pool *pool; |
| 206 | unsigned long flags; | 259 | unsigned long flags; |
| 207 | unsigned long shift = iommu->page_table_shift; | 260 | unsigned long shift = iommu->table_shift; |
| 208 | 261 | ||
| 209 | if (iommu->iommu_tbl_ops->cookie_to_index != NULL) { | 262 | if (entry == DMA_ERROR_CODE) /* use default addr->entry mapping */ |
| 210 | entry = (*iommu->iommu_tbl_ops->cookie_to_index)(dma_addr, | 263 | entry = (dma_addr - iommu->table_map_base) >> shift; |
| 211 | demap_arg); | ||
| 212 | } else { | ||
| 213 | entry = (dma_addr - iommu->page_table_map_base) >> shift; | ||
| 214 | } | ||
| 215 | pool = get_pool(iommu, entry); | 264 | pool = get_pool(iommu, entry); |
| 216 | 265 | ||
| 217 | spin_lock_irqsave(&(pool->lock), flags); | 266 | spin_lock_irqsave(&(pool->lock), flags); |
| 218 | if (do_demap && iommu->iommu_tbl_ops->demap != NULL) | ||
| 219 | (*iommu->iommu_tbl_ops->demap)(demap_arg, entry, npages); | ||
| 220 | |||
| 221 | bitmap_clear(iommu->map, entry, npages); | 267 | bitmap_clear(iommu->map, entry, npages); |
| 222 | spin_unlock_irqrestore(&(pool->lock), flags); | 268 | spin_unlock_irqrestore(&(pool->lock), flags); |
| 223 | } | 269 | } |
