diff options
| author | David S. Miller <davem@davemloft.net> | 2015-04-18 15:35:09 -0400 |
|---|---|---|
| committer | David S. Miller <davem@davemloft.net> | 2015-04-18 15:35:09 -0400 |
| commit | ccb301862aa51ea7c10c10b440f3e8bbeac5b720 (patch) | |
| tree | 2567af96358ff14960a686df6e253244dcf3e99d /include/linux | |
| parent | c12f048ffdf3a5802239426dc290290929268dc9 (diff) | |
| parent | 2f0c0fdc085c0d415457a1c52344f72e12c4cec6 (diff) | |
Merge branch 'iommu-generic-allocator'
Sowmini Varadhan says:
====================
Generic IOMMU pooled allocator
Investigation of network performance on Sparc shows a high
degree of locking contention in the IOMMU allocator, and it
was noticed that the PowerPC code has a better locking model.
This patch series tries to extract the generic parts of the
PowerPC code so that it can be shared across multiple PCI
devices and architectures.
v10: resend patchv9 without RFC tag, and a new mail Message-Id,
(previous non-RFC attempt did not show up on the patchwork queue?)
Full revision history below:
v2 changes:
- incorporate David Miller editorial comments: sparc specific
fields moved from iommu-common into sparc's iommu_64.h
- make the npools value an input parameter, for the case when
the iommu map size is not very large
- cookie_to_index mapping, and optimizations for span-boundary
check, for use case such as LDC.
v3: eliminate iommu_sparc, rearrange the ->demap indirection to
be invoked under the pool lock.
v4: David Miller review changes:
- s/IOMMU_ERROR_CODE/DMA_ERROR_CODE
- page_table_map_base and page_table_shift are unsigned long, not u32.
v5: removed ->cookie_to_index and ->demap indirection from the
iommu_tbl_ops The caller needs to call these functions as needed,
before invoking the generic arena allocator functions.
Added the "skip_span_boundary" argument to iommu_tbl_pool_init() for
those callers like LDC which do no care about span boundary checks.
v6: removed iommu_tbl_ops, and instead pass the ->flush_all as
an indirection to iommu_tbl_pool_init(); only invoke ->flush_all
when there is no large_pool, based on the assumption that large-pool
usage is infrequently encountered
v7: moved pool_hash initialization to lib/iommu-common.c and cleaned up
code duplication from sun4v/sun4u/ldc.
v8: Addresses BenH comments with one exception: I've left the
IOMMU_POOL_HASH as is, so that powerpc can tailor it to their
convenience. Discard trylock for simple spin_lock to acquire pool
v9: Addresses latest BenH comments: need_flush checks, add support
for dma mask and align_order.
v10: resend without RFC tag, and new mail Message-Id.
====================
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'include/linux')
| -rw-r--r-- | include/linux/iommu-common.h | 51 |
1 files changed, 51 insertions, 0 deletions
diff --git a/include/linux/iommu-common.h b/include/linux/iommu-common.h new file mode 100644 index 000000000000..bbced83b32ee --- /dev/null +++ b/include/linux/iommu-common.h | |||
| @@ -0,0 +1,51 @@ | |||
| 1 | #ifndef _LINUX_IOMMU_COMMON_H | ||
| 2 | #define _LINUX_IOMMU_COMMON_H | ||
| 3 | |||
| 4 | #include <linux/spinlock_types.h> | ||
| 5 | #include <linux/device.h> | ||
| 6 | #include <asm/page.h> | ||
| 7 | |||
| 8 | #define IOMMU_POOL_HASHBITS 4 | ||
| 9 | #define IOMMU_NR_POOLS (1 << IOMMU_POOL_HASHBITS) | ||
| 10 | |||
| 11 | struct iommu_pool { | ||
| 12 | unsigned long start; | ||
| 13 | unsigned long end; | ||
| 14 | unsigned long hint; | ||
| 15 | spinlock_t lock; | ||
| 16 | }; | ||
| 17 | |||
| 18 | struct iommu_map_table { | ||
| 19 | unsigned long table_map_base; | ||
| 20 | unsigned long table_shift; | ||
| 21 | unsigned long nr_pools; | ||
| 22 | void (*lazy_flush)(struct iommu_map_table *); | ||
| 23 | unsigned long poolsize; | ||
| 24 | struct iommu_pool pools[IOMMU_NR_POOLS]; | ||
| 25 | u32 flags; | ||
| 26 | #define IOMMU_HAS_LARGE_POOL 0x00000001 | ||
| 27 | #define IOMMU_NO_SPAN_BOUND 0x00000002 | ||
| 28 | #define IOMMU_NEED_FLUSH 0x00000004 | ||
| 29 | struct iommu_pool large_pool; | ||
| 30 | unsigned long *map; | ||
| 31 | }; | ||
| 32 | |||
| 33 | extern void iommu_tbl_pool_init(struct iommu_map_table *iommu, | ||
| 34 | unsigned long num_entries, | ||
| 35 | u32 table_shift, | ||
| 36 | void (*lazy_flush)(struct iommu_map_table *), | ||
| 37 | bool large_pool, u32 npools, | ||
| 38 | bool skip_span_boundary_check); | ||
| 39 | |||
| 40 | extern unsigned long iommu_tbl_range_alloc(struct device *dev, | ||
| 41 | struct iommu_map_table *iommu, | ||
| 42 | unsigned long npages, | ||
| 43 | unsigned long *handle, | ||
| 44 | unsigned long mask, | ||
| 45 | unsigned int align_order); | ||
| 46 | |||
| 47 | extern void iommu_tbl_range_free(struct iommu_map_table *iommu, | ||
| 48 | u64 dma_addr, unsigned long npages, | ||
| 49 | unsigned long entry); | ||
| 50 | |||
| 51 | #endif | ||
