diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2015-04-22 02:21:34 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2015-04-22 02:21:34 -0400 |
commit | db4fd9c5d072a20ea6b7e40276a9822e04732610 (patch) | |
tree | af8e0845d9a21c568b34d34195858c1f462b53e6 /lib | |
parent | 8aaa51b63cc3c5f3b2e72d2f0e193d9c2e00fe46 (diff) | |
parent | 0edfad5959df7379c9e554fbe8ba264ae232d321 (diff) |
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/sparc
Pull sparc fixes from David Miller:
1) ldc_alloc_exp_dring() can be called from softints, so use
GFP_ATOMIC. From Sowmini Varadhan.
2) Some minor warning/build fixups for the new iommu-common code on
certain archs and with certain debug options enabled. Also from
Sowmini Varadhan.
* git://git.kernel.org/pub/scm/linux/kernel/git/davem/sparc:
sparc: Use GFP_ATOMIC in ldc_alloc_exp_dring() as it can be called in softirq context
sparc64: Use M7 PMC write on all chips T4 and onward.
iommu-common: rename iommu_pool_hash to iommu_hash_common
iommu-common: fix x86_64 compiler warnings
Diffstat (limited to 'lib')
-rw-r--r-- | lib/iommu-common.c | 20 |
1 files changed, 10 insertions, 10 deletions
diff --git a/lib/iommu-common.c b/lib/iommu-common.c index a1a517cba7ec..df30632f0bef 100644 --- a/lib/iommu-common.c +++ b/lib/iommu-common.c | |||
@@ -15,9 +15,9 @@ | |||
15 | #define DMA_ERROR_CODE (~(dma_addr_t)0x0) | 15 | #define DMA_ERROR_CODE (~(dma_addr_t)0x0) |
16 | #endif | 16 | #endif |
17 | 17 | ||
18 | unsigned long iommu_large_alloc = 15; | 18 | static unsigned long iommu_large_alloc = 15; |
19 | 19 | ||
20 | static DEFINE_PER_CPU(unsigned int, iommu_pool_hash); | 20 | static DEFINE_PER_CPU(unsigned int, iommu_hash_common); |
21 | 21 | ||
22 | static inline bool need_flush(struct iommu_map_table *iommu) | 22 | static inline bool need_flush(struct iommu_map_table *iommu) |
23 | { | 23 | { |
@@ -44,7 +44,7 @@ static void setup_iommu_pool_hash(void) | |||
44 | return; | 44 | return; |
45 | do_once = true; | 45 | do_once = true; |
46 | for_each_possible_cpu(i) | 46 | for_each_possible_cpu(i) |
47 | per_cpu(iommu_pool_hash, i) = hash_32(i, IOMMU_POOL_HASHBITS); | 47 | per_cpu(iommu_hash_common, i) = hash_32(i, IOMMU_POOL_HASHBITS); |
48 | } | 48 | } |
49 | 49 | ||
50 | /* | 50 | /* |
@@ -53,12 +53,12 @@ static void setup_iommu_pool_hash(void) | |||
53 | * the top 1/4 of the table will be set aside for pool allocations | 53 | * the top 1/4 of the table will be set aside for pool allocations |
54 | * of more than iommu_large_alloc pages. | 54 | * of more than iommu_large_alloc pages. |
55 | */ | 55 | */ |
56 | extern void iommu_tbl_pool_init(struct iommu_map_table *iommu, | 56 | void iommu_tbl_pool_init(struct iommu_map_table *iommu, |
57 | unsigned long num_entries, | 57 | unsigned long num_entries, |
58 | u32 table_shift, | 58 | u32 table_shift, |
59 | void (*lazy_flush)(struct iommu_map_table *), | 59 | void (*lazy_flush)(struct iommu_map_table *), |
60 | bool large_pool, u32 npools, | 60 | bool large_pool, u32 npools, |
61 | bool skip_span_boundary_check) | 61 | bool skip_span_boundary_check) |
62 | { | 62 | { |
63 | unsigned int start, i; | 63 | unsigned int start, i; |
64 | struct iommu_pool *p = &(iommu->large_pool); | 64 | struct iommu_pool *p = &(iommu->large_pool); |
@@ -106,7 +106,7 @@ unsigned long iommu_tbl_range_alloc(struct device *dev, | |||
106 | unsigned long mask, | 106 | unsigned long mask, |
107 | unsigned int align_order) | 107 | unsigned int align_order) |
108 | { | 108 | { |
109 | unsigned int pool_hash = __this_cpu_read(iommu_pool_hash); | 109 | unsigned int pool_hash = __this_cpu_read(iommu_hash_common); |
110 | unsigned long n, end, start, limit, boundary_size; | 110 | unsigned long n, end, start, limit, boundary_size; |
111 | struct iommu_pool *pool; | 111 | struct iommu_pool *pool; |
112 | int pass = 0; | 112 | int pass = 0; |