aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--arch/sparc/kernel/ldc.c2
-rw-r--r--arch/sparc/kernel/perf_event.c35
-rw-r--r--lib/iommu-common.c20
3 files changed, 14 insertions, 43 deletions
diff --git a/arch/sparc/kernel/ldc.c b/arch/sparc/kernel/ldc.c
index d2ae0f70059e..7d3ca30fcd15 100644
--- a/arch/sparc/kernel/ldc.c
+++ b/arch/sparc/kernel/ldc.c
@@ -2290,7 +2290,7 @@ void *ldc_alloc_exp_dring(struct ldc_channel *lp, unsigned int len,
2290 if (len & (8UL - 1)) 2290 if (len & (8UL - 1))
2291 return ERR_PTR(-EINVAL); 2291 return ERR_PTR(-EINVAL);
2292 2292
2293 buf = kzalloc(len, GFP_KERNEL); 2293 buf = kzalloc(len, GFP_ATOMIC);
2294 if (!buf) 2294 if (!buf)
2295 return ERR_PTR(-ENOMEM); 2295 return ERR_PTR(-ENOMEM);
2296 2296
diff --git a/arch/sparc/kernel/perf_event.c b/arch/sparc/kernel/perf_event.c
index 86eebfa3b158..59cf917a77b5 100644
--- a/arch/sparc/kernel/perf_event.c
+++ b/arch/sparc/kernel/perf_event.c
@@ -737,25 +737,9 @@ static void sparc_vt_write_pmc(int idx, u64 val)
737{ 737{
738 u64 pcr; 738 u64 pcr;
739 739
740 /* There seems to be an internal latch on the overflow event
741 * on SPARC-T4 that prevents it from triggering unless you
742 * update the PIC exactly as we do here. The requirement
743 * seems to be that you have to turn off event counting in the
744 * PCR around the PIC update.
745 *
746 * For example, after the following sequence:
747 *
748 * 1) set PIC to -1
749 * 2) enable event counting and overflow reporting in PCR
750 * 3) overflow triggers, softint 15 handler invoked
751 * 4) clear OV bit in PCR
752 * 5) write PIC to -1
753 *
754 * a subsequent overflow event will not trigger. This
755 * sequence works on SPARC-T3 and previous chips.
756 */
757 pcr = pcr_ops->read_pcr(idx); 740 pcr = pcr_ops->read_pcr(idx);
758 pcr_ops->write_pcr(idx, PCR_N4_PICNPT); 741 /* ensure ov and ntc are reset */
742 pcr &= ~(PCR_N4_OV | PCR_N4_NTC);
759 743
760 pcr_ops->write_pic(idx, val & 0xffffffff); 744 pcr_ops->write_pic(idx, val & 0xffffffff);
761 745
@@ -792,25 +776,12 @@ static const struct sparc_pmu niagara4_pmu = {
792 .num_pic_regs = 4, 776 .num_pic_regs = 4,
793}; 777};
794 778
795static void sparc_m7_write_pmc(int idx, u64 val)
796{
797 u64 pcr;
798
799 pcr = pcr_ops->read_pcr(idx);
800 /* ensure ov and ntc are reset */
801 pcr &= ~(PCR_N4_OV | PCR_N4_NTC);
802
803 pcr_ops->write_pic(idx, val & 0xffffffff);
804
805 pcr_ops->write_pcr(idx, pcr);
806}
807
808static const struct sparc_pmu sparc_m7_pmu = { 779static const struct sparc_pmu sparc_m7_pmu = {
809 .event_map = niagara4_event_map, 780 .event_map = niagara4_event_map,
810 .cache_map = &niagara4_cache_map, 781 .cache_map = &niagara4_cache_map,
811 .max_events = ARRAY_SIZE(niagara4_perfmon_event_map), 782 .max_events = ARRAY_SIZE(niagara4_perfmon_event_map),
812 .read_pmc = sparc_vt_read_pmc, 783 .read_pmc = sparc_vt_read_pmc,
813 .write_pmc = sparc_m7_write_pmc, 784 .write_pmc = sparc_vt_write_pmc,
814 .upper_shift = 5, 785 .upper_shift = 5,
815 .lower_shift = 5, 786 .lower_shift = 5,
816 .event_mask = 0x7ff, 787 .event_mask = 0x7ff,
diff --git a/lib/iommu-common.c b/lib/iommu-common.c
index a1a517cba7ec..df30632f0bef 100644
--- a/lib/iommu-common.c
+++ b/lib/iommu-common.c
@@ -15,9 +15,9 @@
15#define DMA_ERROR_CODE (~(dma_addr_t)0x0) 15#define DMA_ERROR_CODE (~(dma_addr_t)0x0)
16#endif 16#endif
17 17
18unsigned long iommu_large_alloc = 15; 18static unsigned long iommu_large_alloc = 15;
19 19
20static DEFINE_PER_CPU(unsigned int, iommu_pool_hash); 20static DEFINE_PER_CPU(unsigned int, iommu_hash_common);
21 21
22static inline bool need_flush(struct iommu_map_table *iommu) 22static inline bool need_flush(struct iommu_map_table *iommu)
23{ 23{
@@ -44,7 +44,7 @@ static void setup_iommu_pool_hash(void)
44 return; 44 return;
45 do_once = true; 45 do_once = true;
46 for_each_possible_cpu(i) 46 for_each_possible_cpu(i)
47 per_cpu(iommu_pool_hash, i) = hash_32(i, IOMMU_POOL_HASHBITS); 47 per_cpu(iommu_hash_common, i) = hash_32(i, IOMMU_POOL_HASHBITS);
48} 48}
49 49
50/* 50/*
@@ -53,12 +53,12 @@ static void setup_iommu_pool_hash(void)
53 * the top 1/4 of the table will be set aside for pool allocations 53 * the top 1/4 of the table will be set aside for pool allocations
54 * of more than iommu_large_alloc pages. 54 * of more than iommu_large_alloc pages.
55 */ 55 */
56extern void iommu_tbl_pool_init(struct iommu_map_table *iommu, 56void iommu_tbl_pool_init(struct iommu_map_table *iommu,
57 unsigned long num_entries, 57 unsigned long num_entries,
58 u32 table_shift, 58 u32 table_shift,
59 void (*lazy_flush)(struct iommu_map_table *), 59 void (*lazy_flush)(struct iommu_map_table *),
60 bool large_pool, u32 npools, 60 bool large_pool, u32 npools,
61 bool skip_span_boundary_check) 61 bool skip_span_boundary_check)
62{ 62{
63 unsigned int start, i; 63 unsigned int start, i;
64 struct iommu_pool *p = &(iommu->large_pool); 64 struct iommu_pool *p = &(iommu->large_pool);
@@ -106,7 +106,7 @@ unsigned long iommu_tbl_range_alloc(struct device *dev,
106 unsigned long mask, 106 unsigned long mask,
107 unsigned int align_order) 107 unsigned int align_order)
108{ 108{
109 unsigned int pool_hash = __this_cpu_read(iommu_pool_hash); 109 unsigned int pool_hash = __this_cpu_read(iommu_hash_common);
110 unsigned long n, end, start, limit, boundary_size; 110 unsigned long n, end, start, limit, boundary_size;
111 struct iommu_pool *pool; 111 struct iommu_pool *pool;
112 int pass = 0; 112 int pass = 0;