aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2014-02-12 01:28:47 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2014-02-12 01:28:47 -0500
commit45f7fdc2ffb9d5af4dab593843e89da70d1259e3 (patch)
tree9f2b4f4a8970fd40f60fa58d2f2f0aa3021b8965 /arch
parentbbb1955514877182e8d20a5f62c7f8c9fd330ec7 (diff)
parentcd15b048445d0a54f7147c35a86c5a16ef231554 (diff)
Merge branch 'merge' of git://git.kernel.org/pub/scm/linux/kernel/git/benh/powerpc
Pull powerpc updates from Ben Herrenschmidt: "Here is some powerpc goodness for -rc2. Arguably -rc1 material more than -rc2 but I was travelling (again !) It's mostly bug fixes including regressions, but there are a couple of new things that I decided to drop-in. One is a straightforward patch from Michael to add a bunch of P8 cache events to perf. The other one is a patch by myself to add the direct DMA (iommu bypass) for PCIe on Power8 for 64-bit capable devices. This has been around for a while, I had lost track of it. However it's been in our internal kernels we use for testing P8 already and it affects only P8 related code. Since P8 is still unreleased the risk is pretty much nil at this point" * 'merge' of git://git.kernel.org/pub/scm/linux/kernel/git/benh/powerpc: powerpc/powernv: Add iommu DMA bypass support for IODA2 powerpc: Fix endian issues in kexec and crash dump code powerpc/ppc32: Fix the bug in the init of non-base exception stack for UP powerpc/xmon: Don't signal we've entered until we're finished printing powerpc/xmon: Fix timeout loop in get_output_lock() powerpc/xmon: Don't loop forever in get_output_lock() powerpc/perf: Configure BHRB filter before enabling PMU interrupts crypto/nx/nx-842: Fix handling of vmalloc addresses powerpc/pseries: Select ARCH_RANDOM on pseries powerpc/perf: Add Power8 cache & TLB events powerpc/relocate fix relocate processing in LE mode powerpc: Fix kdump hang issue on p8 with relocation on exception enabled. powerpc/pseries: Disable relocation on exception while going down during crash. powerpc/eeh: Drop taken reference to driver on eeh_rmv_device powerpc: Fix build failure in sysdev/mpic.c for MPIC_WEIRD=y
Diffstat (limited to 'arch')
-rw-r--r--arch/powerpc/include/asm/dma-mapping.h1
-rw-r--r--arch/powerpc/include/asm/iommu.h1
-rw-r--r--arch/powerpc/include/asm/sections.h12
-rw-r--r--arch/powerpc/kernel/dma.c10
-rw-r--r--arch/powerpc/kernel/eeh_driver.c8
-rw-r--r--arch/powerpc/kernel/iommu.c12
-rw-r--r--arch/powerpc/kernel/irq.c5
-rw-r--r--arch/powerpc/kernel/machine_kexec.c14
-rw-r--r--arch/powerpc/kernel/machine_kexec_64.c6
-rw-r--r--arch/powerpc/kernel/reloc_64.S4
-rw-r--r--arch/powerpc/kernel/setup_32.c5
-rw-r--r--arch/powerpc/mm/hash_utils_64.c14
-rw-r--r--arch/powerpc/perf/core-book3s.c5
-rw-r--r--arch/powerpc/perf/power8-pmu.c144
-rw-r--r--arch/powerpc/platforms/powernv/pci-ioda.c84
-rw-r--r--arch/powerpc/platforms/powernv/pci.c10
-rw-r--r--arch/powerpc/platforms/powernv/pci.h6
-rw-r--r--arch/powerpc/platforms/powernv/powernv.h8
-rw-r--r--arch/powerpc/platforms/powernv/setup.c9
-rw-r--r--arch/powerpc/platforms/pseries/Kconfig1
-rw-r--r--arch/powerpc/platforms/pseries/setup.c3
-rw-r--r--arch/powerpc/sysdev/mpic.c38
-rw-r--r--arch/powerpc/xmon/xmon.c24
23 files changed, 379 insertions, 45 deletions
diff --git a/arch/powerpc/include/asm/dma-mapping.h b/arch/powerpc/include/asm/dma-mapping.h
index e27e9ad6818e..150866b2a3fe 100644
--- a/arch/powerpc/include/asm/dma-mapping.h
+++ b/arch/powerpc/include/asm/dma-mapping.h
@@ -134,6 +134,7 @@ static inline int dma_supported(struct device *dev, u64 mask)
134} 134}
135 135
136extern int dma_set_mask(struct device *dev, u64 dma_mask); 136extern int dma_set_mask(struct device *dev, u64 dma_mask);
137extern int __dma_set_mask(struct device *dev, u64 dma_mask);
137 138
138#define dma_alloc_coherent(d,s,h,f) dma_alloc_attrs(d,s,h,f,NULL) 139#define dma_alloc_coherent(d,s,h,f) dma_alloc_attrs(d,s,h,f,NULL)
139 140
diff --git a/arch/powerpc/include/asm/iommu.h b/arch/powerpc/include/asm/iommu.h
index f7a8036579b5..42632c7a2a4e 100644
--- a/arch/powerpc/include/asm/iommu.h
+++ b/arch/powerpc/include/asm/iommu.h
@@ -77,6 +77,7 @@ struct iommu_table {
77#ifdef CONFIG_IOMMU_API 77#ifdef CONFIG_IOMMU_API
78 struct iommu_group *it_group; 78 struct iommu_group *it_group;
79#endif 79#endif
80 void (*set_bypass)(struct iommu_table *tbl, bool enable);
80}; 81};
81 82
82/* Pure 2^n version of get_order */ 83/* Pure 2^n version of get_order */
diff --git a/arch/powerpc/include/asm/sections.h b/arch/powerpc/include/asm/sections.h
index 4ee06fe15de4..d0e784e0ff48 100644
--- a/arch/powerpc/include/asm/sections.h
+++ b/arch/powerpc/include/asm/sections.h
@@ -8,6 +8,7 @@
8 8
9#ifdef __powerpc64__ 9#ifdef __powerpc64__
10 10
11extern char __start_interrupts[];
11extern char __end_interrupts[]; 12extern char __end_interrupts[];
12 13
13extern char __prom_init_toc_start[]; 14extern char __prom_init_toc_start[];
@@ -21,6 +22,17 @@ static inline int in_kernel_text(unsigned long addr)
21 return 0; 22 return 0;
22} 23}
23 24
25static inline int overlaps_interrupt_vector_text(unsigned long start,
26 unsigned long end)
27{
28 unsigned long real_start, real_end;
29 real_start = __start_interrupts - _stext;
30 real_end = __end_interrupts - _stext;
31
32 return start < (unsigned long)__va(real_end) &&
33 (unsigned long)__va(real_start) < end;
34}
35
24static inline int overlaps_kernel_text(unsigned long start, unsigned long end) 36static inline int overlaps_kernel_text(unsigned long start, unsigned long end)
25{ 37{
26 return start < (unsigned long)__init_end && 38 return start < (unsigned long)__init_end &&
diff --git a/arch/powerpc/kernel/dma.c b/arch/powerpc/kernel/dma.c
index 8032b97ccdcb..ee78f6e49d64 100644
--- a/arch/powerpc/kernel/dma.c
+++ b/arch/powerpc/kernel/dma.c
@@ -191,12 +191,10 @@ EXPORT_SYMBOL(dma_direct_ops);
191 191
192#define PREALLOC_DMA_DEBUG_ENTRIES (1 << 16) 192#define PREALLOC_DMA_DEBUG_ENTRIES (1 << 16)
193 193
194int dma_set_mask(struct device *dev, u64 dma_mask) 194int __dma_set_mask(struct device *dev, u64 dma_mask)
195{ 195{
196 struct dma_map_ops *dma_ops = get_dma_ops(dev); 196 struct dma_map_ops *dma_ops = get_dma_ops(dev);
197 197
198 if (ppc_md.dma_set_mask)
199 return ppc_md.dma_set_mask(dev, dma_mask);
200 if ((dma_ops != NULL) && (dma_ops->set_dma_mask != NULL)) 198 if ((dma_ops != NULL) && (dma_ops->set_dma_mask != NULL))
201 return dma_ops->set_dma_mask(dev, dma_mask); 199 return dma_ops->set_dma_mask(dev, dma_mask);
202 if (!dev->dma_mask || !dma_supported(dev, dma_mask)) 200 if (!dev->dma_mask || !dma_supported(dev, dma_mask))
@@ -204,6 +202,12 @@ int dma_set_mask(struct device *dev, u64 dma_mask)
204 *dev->dma_mask = dma_mask; 202 *dev->dma_mask = dma_mask;
205 return 0; 203 return 0;
206} 204}
205int dma_set_mask(struct device *dev, u64 dma_mask)
206{
207 if (ppc_md.dma_set_mask)
208 return ppc_md.dma_set_mask(dev, dma_mask);
209 return __dma_set_mask(dev, dma_mask);
210}
207EXPORT_SYMBOL(dma_set_mask); 211EXPORT_SYMBOL(dma_set_mask);
208 212
209u64 dma_get_required_mask(struct device *dev) 213u64 dma_get_required_mask(struct device *dev)
diff --git a/arch/powerpc/kernel/eeh_driver.c b/arch/powerpc/kernel/eeh_driver.c
index 7bb30dca4e19..fdc679d309ec 100644
--- a/arch/powerpc/kernel/eeh_driver.c
+++ b/arch/powerpc/kernel/eeh_driver.c
@@ -362,9 +362,13 @@ static void *eeh_rmv_device(void *data, void *userdata)
362 */ 362 */
363 if (!dev || (dev->hdr_type & PCI_HEADER_TYPE_BRIDGE)) 363 if (!dev || (dev->hdr_type & PCI_HEADER_TYPE_BRIDGE))
364 return NULL; 364 return NULL;
365
365 driver = eeh_pcid_get(dev); 366 driver = eeh_pcid_get(dev);
366 if (driver && driver->err_handler) 367 if (driver) {
367 return NULL; 368 eeh_pcid_put(dev);
369 if (driver->err_handler)
370 return NULL;
371 }
368 372
369 /* Remove it from PCI subsystem */ 373 /* Remove it from PCI subsystem */
370 pr_debug("EEH: Removing %s without EEH sensitive driver\n", 374 pr_debug("EEH: Removing %s without EEH sensitive driver\n",
diff --git a/arch/powerpc/kernel/iommu.c b/arch/powerpc/kernel/iommu.c
index d773dd440a45..88e3ec6e1d96 100644
--- a/arch/powerpc/kernel/iommu.c
+++ b/arch/powerpc/kernel/iommu.c
@@ -1088,6 +1088,14 @@ int iommu_take_ownership(struct iommu_table *tbl)
1088 memset(tbl->it_map, 0xff, sz); 1088 memset(tbl->it_map, 0xff, sz);
1089 iommu_clear_tces_and_put_pages(tbl, tbl->it_offset, tbl->it_size); 1089 iommu_clear_tces_and_put_pages(tbl, tbl->it_offset, tbl->it_size);
1090 1090
1091 /*
1092 * Disable iommu bypass, otherwise the user can DMA to all of
1093 * our physical memory via the bypass window instead of just
1094 * the pages that has been explicitly mapped into the iommu
1095 */
1096 if (tbl->set_bypass)
1097 tbl->set_bypass(tbl, false);
1098
1091 return 0; 1099 return 0;
1092} 1100}
1093EXPORT_SYMBOL_GPL(iommu_take_ownership); 1101EXPORT_SYMBOL_GPL(iommu_take_ownership);
@@ -1102,6 +1110,10 @@ void iommu_release_ownership(struct iommu_table *tbl)
1102 /* Restore bit#0 set by iommu_init_table() */ 1110 /* Restore bit#0 set by iommu_init_table() */
1103 if (tbl->it_offset == 0) 1111 if (tbl->it_offset == 0)
1104 set_bit(0, tbl->it_map); 1112 set_bit(0, tbl->it_map);
1113
1114 /* The kernel owns the device now, we can restore the iommu bypass */
1115 if (tbl->set_bypass)
1116 tbl->set_bypass(tbl, true);
1105} 1117}
1106EXPORT_SYMBOL_GPL(iommu_release_ownership); 1118EXPORT_SYMBOL_GPL(iommu_release_ownership);
1107 1119
diff --git a/arch/powerpc/kernel/irq.c b/arch/powerpc/kernel/irq.c
index 9729b23bfb0a..1d0848bba049 100644
--- a/arch/powerpc/kernel/irq.c
+++ b/arch/powerpc/kernel/irq.c
@@ -559,8 +559,13 @@ void exc_lvl_ctx_init(void)
559#ifdef CONFIG_PPC64 559#ifdef CONFIG_PPC64
560 cpu_nr = i; 560 cpu_nr = i;
561#else 561#else
562#ifdef CONFIG_SMP
562 cpu_nr = get_hard_smp_processor_id(i); 563 cpu_nr = get_hard_smp_processor_id(i);
564#else
565 cpu_nr = 0;
563#endif 566#endif
567#endif
568
564 memset((void *)critirq_ctx[cpu_nr], 0, THREAD_SIZE); 569 memset((void *)critirq_ctx[cpu_nr], 0, THREAD_SIZE);
565 tp = critirq_ctx[cpu_nr]; 570 tp = critirq_ctx[cpu_nr];
566 tp->cpu = cpu_nr; 571 tp->cpu = cpu_nr;
diff --git a/arch/powerpc/kernel/machine_kexec.c b/arch/powerpc/kernel/machine_kexec.c
index 75d4f7340da8..015ae55c1868 100644
--- a/arch/powerpc/kernel/machine_kexec.c
+++ b/arch/powerpc/kernel/machine_kexec.c
@@ -196,7 +196,9 @@ int overlaps_crashkernel(unsigned long start, unsigned long size)
196 196
197/* Values we need to export to the second kernel via the device tree. */ 197/* Values we need to export to the second kernel via the device tree. */
198static phys_addr_t kernel_end; 198static phys_addr_t kernel_end;
199static phys_addr_t crashk_base;
199static phys_addr_t crashk_size; 200static phys_addr_t crashk_size;
201static unsigned long long mem_limit;
200 202
201static struct property kernel_end_prop = { 203static struct property kernel_end_prop = {
202 .name = "linux,kernel-end", 204 .name = "linux,kernel-end",
@@ -207,7 +209,7 @@ static struct property kernel_end_prop = {
207static struct property crashk_base_prop = { 209static struct property crashk_base_prop = {
208 .name = "linux,crashkernel-base", 210 .name = "linux,crashkernel-base",
209 .length = sizeof(phys_addr_t), 211 .length = sizeof(phys_addr_t),
210 .value = &crashk_res.start, 212 .value = &crashk_base
211}; 213};
212 214
213static struct property crashk_size_prop = { 215static struct property crashk_size_prop = {
@@ -219,9 +221,11 @@ static struct property crashk_size_prop = {
219static struct property memory_limit_prop = { 221static struct property memory_limit_prop = {
220 .name = "linux,memory-limit", 222 .name = "linux,memory-limit",
221 .length = sizeof(unsigned long long), 223 .length = sizeof(unsigned long long),
222 .value = &memory_limit, 224 .value = &mem_limit,
223}; 225};
224 226
227#define cpu_to_be_ulong __PASTE(cpu_to_be, BITS_PER_LONG)
228
225static void __init export_crashk_values(struct device_node *node) 229static void __init export_crashk_values(struct device_node *node)
226{ 230{
227 struct property *prop; 231 struct property *prop;
@@ -237,8 +241,9 @@ static void __init export_crashk_values(struct device_node *node)
237 of_remove_property(node, prop); 241 of_remove_property(node, prop);
238 242
239 if (crashk_res.start != 0) { 243 if (crashk_res.start != 0) {
244 crashk_base = cpu_to_be_ulong(crashk_res.start),
240 of_add_property(node, &crashk_base_prop); 245 of_add_property(node, &crashk_base_prop);
241 crashk_size = resource_size(&crashk_res); 246 crashk_size = cpu_to_be_ulong(resource_size(&crashk_res));
242 of_add_property(node, &crashk_size_prop); 247 of_add_property(node, &crashk_size_prop);
243 } 248 }
244 249
@@ -246,6 +251,7 @@ static void __init export_crashk_values(struct device_node *node)
246 * memory_limit is required by the kexec-tools to limit the 251 * memory_limit is required by the kexec-tools to limit the
247 * crash regions to the actual memory used. 252 * crash regions to the actual memory used.
248 */ 253 */
254 mem_limit = cpu_to_be_ulong(memory_limit);
249 of_update_property(node, &memory_limit_prop); 255 of_update_property(node, &memory_limit_prop);
250} 256}
251 257
@@ -264,7 +270,7 @@ static int __init kexec_setup(void)
264 of_remove_property(node, prop); 270 of_remove_property(node, prop);
265 271
266 /* information needed by userspace when using default_machine_kexec */ 272 /* information needed by userspace when using default_machine_kexec */
267 kernel_end = __pa(_end); 273 kernel_end = cpu_to_be_ulong(__pa(_end));
268 of_add_property(node, &kernel_end_prop); 274 of_add_property(node, &kernel_end_prop);
269 275
270 export_crashk_values(node); 276 export_crashk_values(node);
diff --git a/arch/powerpc/kernel/machine_kexec_64.c b/arch/powerpc/kernel/machine_kexec_64.c
index be4e6d648f60..59d229a2a3e0 100644
--- a/arch/powerpc/kernel/machine_kexec_64.c
+++ b/arch/powerpc/kernel/machine_kexec_64.c
@@ -369,6 +369,7 @@ void default_machine_kexec(struct kimage *image)
369 369
370/* Values we need to export to the second kernel via the device tree. */ 370/* Values we need to export to the second kernel via the device tree. */
371static unsigned long htab_base; 371static unsigned long htab_base;
372static unsigned long htab_size;
372 373
373static struct property htab_base_prop = { 374static struct property htab_base_prop = {
374 .name = "linux,htab-base", 375 .name = "linux,htab-base",
@@ -379,7 +380,7 @@ static struct property htab_base_prop = {
379static struct property htab_size_prop = { 380static struct property htab_size_prop = {
380 .name = "linux,htab-size", 381 .name = "linux,htab-size",
381 .length = sizeof(unsigned long), 382 .length = sizeof(unsigned long),
382 .value = &htab_size_bytes, 383 .value = &htab_size,
383}; 384};
384 385
385static int __init export_htab_values(void) 386static int __init export_htab_values(void)
@@ -403,8 +404,9 @@ static int __init export_htab_values(void)
403 if (prop) 404 if (prop)
404 of_remove_property(node, prop); 405 of_remove_property(node, prop);
405 406
406 htab_base = __pa(htab_address); 407 htab_base = cpu_to_be64(__pa(htab_address));
407 of_add_property(node, &htab_base_prop); 408 of_add_property(node, &htab_base_prop);
409 htab_size = cpu_to_be64(htab_size_bytes);
408 of_add_property(node, &htab_size_prop); 410 of_add_property(node, &htab_size_prop);
409 411
410 of_node_put(node); 412 of_node_put(node);
diff --git a/arch/powerpc/kernel/reloc_64.S b/arch/powerpc/kernel/reloc_64.S
index b47a0e1ab001..1482327cfeba 100644
--- a/arch/powerpc/kernel/reloc_64.S
+++ b/arch/powerpc/kernel/reloc_64.S
@@ -69,8 +69,8 @@ _GLOBAL(relocate)
69 * R_PPC64_RELATIVE ones. 69 * R_PPC64_RELATIVE ones.
70 */ 70 */
71 mtctr r8 71 mtctr r8
725: lwz r0,12(9) /* ELF64_R_TYPE(reloc->r_info) */ 725: ld r0,8(9) /* ELF64_R_TYPE(reloc->r_info) */
73 cmpwi r0,R_PPC64_RELATIVE 73 cmpdi r0,R_PPC64_RELATIVE
74 bne 6f 74 bne 6f
75 ld r6,0(r9) /* reloc->r_offset */ 75 ld r6,0(r9) /* reloc->r_offset */
76 ld r0,16(r9) /* reloc->r_addend */ 76 ld r0,16(r9) /* reloc->r_addend */
diff --git a/arch/powerpc/kernel/setup_32.c b/arch/powerpc/kernel/setup_32.c
index 2b0da27eaee4..04cc4fcca78b 100644
--- a/arch/powerpc/kernel/setup_32.c
+++ b/arch/powerpc/kernel/setup_32.c
@@ -247,7 +247,12 @@ static void __init exc_lvl_early_init(void)
247 /* interrupt stacks must be in lowmem, we get that for free on ppc32 247 /* interrupt stacks must be in lowmem, we get that for free on ppc32
248 * as the memblock is limited to lowmem by MEMBLOCK_REAL_LIMIT */ 248 * as the memblock is limited to lowmem by MEMBLOCK_REAL_LIMIT */
249 for_each_possible_cpu(i) { 249 for_each_possible_cpu(i) {
250#ifdef CONFIG_SMP
250 hw_cpu = get_hard_smp_processor_id(i); 251 hw_cpu = get_hard_smp_processor_id(i);
252#else
253 hw_cpu = 0;
254#endif
255
251 critirq_ctx[hw_cpu] = (struct thread_info *) 256 critirq_ctx[hw_cpu] = (struct thread_info *)
252 __va(memblock_alloc(THREAD_SIZE, THREAD_SIZE)); 257 __va(memblock_alloc(THREAD_SIZE, THREAD_SIZE));
253#ifdef CONFIG_BOOKE 258#ifdef CONFIG_BOOKE
diff --git a/arch/powerpc/mm/hash_utils_64.c b/arch/powerpc/mm/hash_utils_64.c
index de6881259aef..d766d6ee33fe 100644
--- a/arch/powerpc/mm/hash_utils_64.c
+++ b/arch/powerpc/mm/hash_utils_64.c
@@ -207,6 +207,20 @@ int htab_bolt_mapping(unsigned long vstart, unsigned long vend,
207 if (overlaps_kernel_text(vaddr, vaddr + step)) 207 if (overlaps_kernel_text(vaddr, vaddr + step))
208 tprot &= ~HPTE_R_N; 208 tprot &= ~HPTE_R_N;
209 209
210 /*
211 * If relocatable, check if it overlaps interrupt vectors that
212 * are copied down to real 0. For relocatable kernel
213 * (e.g. kdump case) we copy interrupt vectors down to real
214 * address 0. Mark that region as executable. This is
215 * because on p8 system with relocation on exception feature
216 * enabled, exceptions are raised with MMU (IR=DR=1) ON. Hence
217 * in order to execute the interrupt handlers in virtual
218 * mode the vector region need to be marked as executable.
219 */
220 if ((PHYSICAL_START > MEMORY_START) &&
221 overlaps_interrupt_vector_text(vaddr, vaddr + step))
222 tprot &= ~HPTE_R_N;
223
210 hash = hpt_hash(vpn, shift, ssize); 224 hash = hpt_hash(vpn, shift, ssize);
211 hpteg = ((hash & htab_hash_mask) * HPTES_PER_GROUP); 225 hpteg = ((hash & htab_hash_mask) * HPTES_PER_GROUP);
212 226
diff --git a/arch/powerpc/perf/core-book3s.c b/arch/powerpc/perf/core-book3s.c
index 29b89e863d7c..67cf22083f4c 100644
--- a/arch/powerpc/perf/core-book3s.c
+++ b/arch/powerpc/perf/core-book3s.c
@@ -1147,6 +1147,9 @@ static void power_pmu_enable(struct pmu *pmu)
1147 mmcr0 = ebb_switch_in(ebb, cpuhw->mmcr[0]); 1147 mmcr0 = ebb_switch_in(ebb, cpuhw->mmcr[0]);
1148 1148
1149 mb(); 1149 mb();
1150 if (cpuhw->bhrb_users)
1151 ppmu->config_bhrb(cpuhw->bhrb_filter);
1152
1150 write_mmcr0(cpuhw, mmcr0); 1153 write_mmcr0(cpuhw, mmcr0);
1151 1154
1152 /* 1155 /*
@@ -1158,8 +1161,6 @@ static void power_pmu_enable(struct pmu *pmu)
1158 } 1161 }
1159 1162
1160 out: 1163 out:
1161 if (cpuhw->bhrb_users)
1162 ppmu->config_bhrb(cpuhw->bhrb_filter);
1163 1164
1164 local_irq_restore(flags); 1165 local_irq_restore(flags);
1165} 1166}
diff --git a/arch/powerpc/perf/power8-pmu.c b/arch/powerpc/perf/power8-pmu.c
index a3f7abd2f13f..96cee20dcd34 100644
--- a/arch/powerpc/perf/power8-pmu.c
+++ b/arch/powerpc/perf/power8-pmu.c
@@ -25,6 +25,37 @@
25#define PM_BRU_FIN 0x10068 25#define PM_BRU_FIN 0x10068
26#define PM_BR_MPRED_CMPL 0x400f6 26#define PM_BR_MPRED_CMPL 0x400f6
27 27
28/* All L1 D cache load references counted at finish, gated by reject */
29#define PM_LD_REF_L1 0x100ee
30/* Load Missed L1 */
31#define PM_LD_MISS_L1 0x3e054
32/* Store Missed L1 */
33#define PM_ST_MISS_L1 0x300f0
34/* L1 cache data prefetches */
35#define PM_L1_PREF 0x0d8b8
36/* Instruction fetches from L1 */
37#define PM_INST_FROM_L1 0x04080
38/* Demand iCache Miss */
39#define PM_L1_ICACHE_MISS 0x200fd
40/* Instruction Demand sectors wriittent into IL1 */
41#define PM_L1_DEMAND_WRITE 0x0408c
42/* Instruction prefetch written into IL1 */
43#define PM_IC_PREF_WRITE 0x0408e
44/* The data cache was reloaded from local core's L3 due to a demand load */
45#define PM_DATA_FROM_L3 0x4c042
46/* Demand LD - L3 Miss (not L2 hit and not L3 hit) */
47#define PM_DATA_FROM_L3MISS 0x300fe
48/* All successful D-side store dispatches for this thread */
49#define PM_L2_ST 0x17080
50/* All successful D-side store dispatches for this thread that were L2 Miss */
51#define PM_L2_ST_MISS 0x17082
52/* Total HW L3 prefetches(Load+store) */
53#define PM_L3_PREF_ALL 0x4e052
54/* Data PTEG reload */
55#define PM_DTLB_MISS 0x300fc
56/* ITLB Reloaded */
57#define PM_ITLB_MISS 0x400fc
58
28 59
29/* 60/*
30 * Raw event encoding for POWER8: 61 * Raw event encoding for POWER8:
@@ -557,6 +588,8 @@ static int power8_generic_events[] = {
557 [PERF_COUNT_HW_INSTRUCTIONS] = PM_INST_CMPL, 588 [PERF_COUNT_HW_INSTRUCTIONS] = PM_INST_CMPL,
558 [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = PM_BRU_FIN, 589 [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = PM_BRU_FIN,
559 [PERF_COUNT_HW_BRANCH_MISSES] = PM_BR_MPRED_CMPL, 590 [PERF_COUNT_HW_BRANCH_MISSES] = PM_BR_MPRED_CMPL,
591 [PERF_COUNT_HW_CACHE_REFERENCES] = PM_LD_REF_L1,
592 [PERF_COUNT_HW_CACHE_MISSES] = PM_LD_MISS_L1,
560}; 593};
561 594
562static u64 power8_bhrb_filter_map(u64 branch_sample_type) 595static u64 power8_bhrb_filter_map(u64 branch_sample_type)
@@ -596,6 +629,116 @@ static void power8_config_bhrb(u64 pmu_bhrb_filter)
596 mtspr(SPRN_MMCRA, (mfspr(SPRN_MMCRA) | pmu_bhrb_filter)); 629 mtspr(SPRN_MMCRA, (mfspr(SPRN_MMCRA) | pmu_bhrb_filter));
597} 630}
598 631
632#define C(x) PERF_COUNT_HW_CACHE_##x
633
634/*
635 * Table of generalized cache-related events.
636 * 0 means not supported, -1 means nonsensical, other values
637 * are event codes.
638 */
639static int power8_cache_events[C(MAX)][C(OP_MAX)][C(RESULT_MAX)] = {
640 [ C(L1D) ] = {
641 [ C(OP_READ) ] = {
642 [ C(RESULT_ACCESS) ] = PM_LD_REF_L1,
643 [ C(RESULT_MISS) ] = PM_LD_MISS_L1,
644 },
645 [ C(OP_WRITE) ] = {
646 [ C(RESULT_ACCESS) ] = 0,
647 [ C(RESULT_MISS) ] = PM_ST_MISS_L1,
648 },
649 [ C(OP_PREFETCH) ] = {
650 [ C(RESULT_ACCESS) ] = PM_L1_PREF,
651 [ C(RESULT_MISS) ] = 0,
652 },
653 },
654 [ C(L1I) ] = {
655 [ C(OP_READ) ] = {
656 [ C(RESULT_ACCESS) ] = PM_INST_FROM_L1,
657 [ C(RESULT_MISS) ] = PM_L1_ICACHE_MISS,
658 },
659 [ C(OP_WRITE) ] = {
660 [ C(RESULT_ACCESS) ] = PM_L1_DEMAND_WRITE,
661 [ C(RESULT_MISS) ] = -1,
662 },
663 [ C(OP_PREFETCH) ] = {
664 [ C(RESULT_ACCESS) ] = PM_IC_PREF_WRITE,
665 [ C(RESULT_MISS) ] = 0,
666 },
667 },
668 [ C(LL) ] = {
669 [ C(OP_READ) ] = {
670 [ C(RESULT_ACCESS) ] = PM_DATA_FROM_L3,
671 [ C(RESULT_MISS) ] = PM_DATA_FROM_L3MISS,
672 },
673 [ C(OP_WRITE) ] = {
674 [ C(RESULT_ACCESS) ] = PM_L2_ST,
675 [ C(RESULT_MISS) ] = PM_L2_ST_MISS,
676 },
677 [ C(OP_PREFETCH) ] = {
678 [ C(RESULT_ACCESS) ] = PM_L3_PREF_ALL,
679 [ C(RESULT_MISS) ] = 0,
680 },
681 },
682 [ C(DTLB) ] = {
683 [ C(OP_READ) ] = {
684 [ C(RESULT_ACCESS) ] = 0,
685 [ C(RESULT_MISS) ] = PM_DTLB_MISS,
686 },
687 [ C(OP_WRITE) ] = {
688 [ C(RESULT_ACCESS) ] = -1,
689 [ C(RESULT_MISS) ] = -1,
690 },
691 [ C(OP_PREFETCH) ] = {
692 [ C(RESULT_ACCESS) ] = -1,
693 [ C(RESULT_MISS) ] = -1,
694 },
695 },
696 [ C(ITLB) ] = {
697 [ C(OP_READ) ] = {
698 [ C(RESULT_ACCESS) ] = 0,
699 [ C(RESULT_MISS) ] = PM_ITLB_MISS,
700 },
701 [ C(OP_WRITE) ] = {
702 [ C(RESULT_ACCESS) ] = -1,
703 [ C(RESULT_MISS) ] = -1,
704 },
705 [ C(OP_PREFETCH) ] = {
706 [ C(RESULT_ACCESS) ] = -1,
707 [ C(RESULT_MISS) ] = -1,
708 },
709 },
710 [ C(BPU) ] = {
711 [ C(OP_READ) ] = {
712 [ C(RESULT_ACCESS) ] = PM_BRU_FIN,
713 [ C(RESULT_MISS) ] = PM_BR_MPRED_CMPL,
714 },
715 [ C(OP_WRITE) ] = {
716 [ C(RESULT_ACCESS) ] = -1,
717 [ C(RESULT_MISS) ] = -1,
718 },
719 [ C(OP_PREFETCH) ] = {
720 [ C(RESULT_ACCESS) ] = -1,
721 [ C(RESULT_MISS) ] = -1,
722 },
723 },
724 [ C(NODE) ] = {
725 [ C(OP_READ) ] = {
726 [ C(RESULT_ACCESS) ] = -1,
727 [ C(RESULT_MISS) ] = -1,
728 },
729 [ C(OP_WRITE) ] = {
730 [ C(RESULT_ACCESS) ] = -1,
731 [ C(RESULT_MISS) ] = -1,
732 },
733 [ C(OP_PREFETCH) ] = {
734 [ C(RESULT_ACCESS) ] = -1,
735 [ C(RESULT_MISS) ] = -1,
736 },
737 },
738};
739
740#undef C
741
599static struct power_pmu power8_pmu = { 742static struct power_pmu power8_pmu = {
600 .name = "POWER8", 743 .name = "POWER8",
601 .n_counter = 6, 744 .n_counter = 6,
@@ -611,6 +754,7 @@ static struct power_pmu power8_pmu = {
611 .flags = PPMU_HAS_SSLOT | PPMU_HAS_SIER | PPMU_BHRB | PPMU_EBB, 754 .flags = PPMU_HAS_SSLOT | PPMU_HAS_SIER | PPMU_BHRB | PPMU_EBB,
612 .n_generic = ARRAY_SIZE(power8_generic_events), 755 .n_generic = ARRAY_SIZE(power8_generic_events),
613 .generic_events = power8_generic_events, 756 .generic_events = power8_generic_events,
757 .cache_events = &power8_cache_events,
614 .attr_groups = power8_pmu_attr_groups, 758 .attr_groups = power8_pmu_attr_groups,
615 .bhrb_nr = 32, 759 .bhrb_nr = 32,
616}; 760};
diff --git a/arch/powerpc/platforms/powernv/pci-ioda.c b/arch/powerpc/platforms/powernv/pci-ioda.c
index 7d6dcc6d5fa9..3b2b4fb3585b 100644
--- a/arch/powerpc/platforms/powernv/pci-ioda.c
+++ b/arch/powerpc/platforms/powernv/pci-ioda.c
@@ -21,6 +21,7 @@
21#include <linux/irq.h> 21#include <linux/irq.h>
22#include <linux/io.h> 22#include <linux/io.h>
23#include <linux/msi.h> 23#include <linux/msi.h>
24#include <linux/memblock.h>
24 25
25#include <asm/sections.h> 26#include <asm/sections.h>
26#include <asm/io.h> 27#include <asm/io.h>
@@ -460,9 +461,39 @@ static void pnv_pci_ioda_dma_dev_setup(struct pnv_phb *phb, struct pci_dev *pdev
460 return; 461 return;
461 462
462 pe = &phb->ioda.pe_array[pdn->pe_number]; 463 pe = &phb->ioda.pe_array[pdn->pe_number];
464 WARN_ON(get_dma_ops(&pdev->dev) != &dma_iommu_ops);
463 set_iommu_table_base_and_group(&pdev->dev, &pe->tce32_table); 465 set_iommu_table_base_and_group(&pdev->dev, &pe->tce32_table);
464} 466}
465 467
468static int pnv_pci_ioda_dma_set_mask(struct pnv_phb *phb,
469 struct pci_dev *pdev, u64 dma_mask)
470{
471 struct pci_dn *pdn = pci_get_pdn(pdev);
472 struct pnv_ioda_pe *pe;
473 uint64_t top;
474 bool bypass = false;
475
476 if (WARN_ON(!pdn || pdn->pe_number == IODA_INVALID_PE))
477 return -ENODEV;;
478
479 pe = &phb->ioda.pe_array[pdn->pe_number];
480 if (pe->tce_bypass_enabled) {
481 top = pe->tce_bypass_base + memblock_end_of_DRAM() - 1;
482 bypass = (dma_mask >= top);
483 }
484
485 if (bypass) {
486 dev_info(&pdev->dev, "Using 64-bit DMA iommu bypass\n");
487 set_dma_ops(&pdev->dev, &dma_direct_ops);
488 set_dma_offset(&pdev->dev, pe->tce_bypass_base);
489 } else {
490 dev_info(&pdev->dev, "Using 32-bit DMA via iommu\n");
491 set_dma_ops(&pdev->dev, &dma_iommu_ops);
492 set_iommu_table_base(&pdev->dev, &pe->tce32_table);
493 }
494 return 0;
495}
496
466static void pnv_ioda_setup_bus_dma(struct pnv_ioda_pe *pe, struct pci_bus *bus) 497static void pnv_ioda_setup_bus_dma(struct pnv_ioda_pe *pe, struct pci_bus *bus)
467{ 498{
468 struct pci_dev *dev; 499 struct pci_dev *dev;
@@ -657,6 +688,56 @@ static void pnv_pci_ioda_setup_dma_pe(struct pnv_phb *phb,
657 __free_pages(tce_mem, get_order(TCE32_TABLE_SIZE * segs)); 688 __free_pages(tce_mem, get_order(TCE32_TABLE_SIZE * segs));
658} 689}
659 690
691static void pnv_pci_ioda2_set_bypass(struct iommu_table *tbl, bool enable)
692{
693 struct pnv_ioda_pe *pe = container_of(tbl, struct pnv_ioda_pe,
694 tce32_table);
695 uint16_t window_id = (pe->pe_number << 1 ) + 1;
696 int64_t rc;
697
698 pe_info(pe, "%sabling 64-bit DMA bypass\n", enable ? "En" : "Dis");
699 if (enable) {
700 phys_addr_t top = memblock_end_of_DRAM();
701
702 top = roundup_pow_of_two(top);
703 rc = opal_pci_map_pe_dma_window_real(pe->phb->opal_id,
704 pe->pe_number,
705 window_id,
706 pe->tce_bypass_base,
707 top);
708 } else {
709 rc = opal_pci_map_pe_dma_window_real(pe->phb->opal_id,
710 pe->pe_number,
711 window_id,
712 pe->tce_bypass_base,
713 0);
714
715 /*
716 * We might want to reset the DMA ops of all devices on
717 * this PE. However in theory, that shouldn't be necessary
718 * as this is used for VFIO/KVM pass-through and the device
719 * hasn't yet been returned to its kernel driver
720 */
721 }
722 if (rc)
723 pe_err(pe, "OPAL error %lld configuring bypass window\n", rc);
724 else
725 pe->tce_bypass_enabled = enable;
726}
727
728static void pnv_pci_ioda2_setup_bypass_pe(struct pnv_phb *phb,
729 struct pnv_ioda_pe *pe)
730{
731 /* TVE #1 is selected by PCI address bit 59 */
732 pe->tce_bypass_base = 1ull << 59;
733
734 /* Install set_bypass callback for VFIO */
735 pe->tce32_table.set_bypass = pnv_pci_ioda2_set_bypass;
736
737 /* Enable bypass by default */
738 pnv_pci_ioda2_set_bypass(&pe->tce32_table, true);
739}
740
660static void pnv_pci_ioda2_setup_dma_pe(struct pnv_phb *phb, 741static void pnv_pci_ioda2_setup_dma_pe(struct pnv_phb *phb,
661 struct pnv_ioda_pe *pe) 742 struct pnv_ioda_pe *pe)
662{ 743{
@@ -727,6 +808,8 @@ static void pnv_pci_ioda2_setup_dma_pe(struct pnv_phb *phb,
727 else 808 else
728 pnv_ioda_setup_bus_dma(pe, pe->pbus); 809 pnv_ioda_setup_bus_dma(pe, pe->pbus);
729 810
811 /* Also create a bypass window */
812 pnv_pci_ioda2_setup_bypass_pe(phb, pe);
730 return; 813 return;
731fail: 814fail:
732 if (pe->tce32_seg >= 0) 815 if (pe->tce32_seg >= 0)
@@ -1286,6 +1369,7 @@ void __init pnv_pci_init_ioda_phb(struct device_node *np,
1286 1369
1287 /* Setup TCEs */ 1370 /* Setup TCEs */
1288 phb->dma_dev_setup = pnv_pci_ioda_dma_dev_setup; 1371 phb->dma_dev_setup = pnv_pci_ioda_dma_dev_setup;
1372 phb->dma_set_mask = pnv_pci_ioda_dma_set_mask;
1289 1373
1290 /* Setup shutdown function for kexec */ 1374 /* Setup shutdown function for kexec */
1291 phb->shutdown = pnv_pci_ioda_shutdown; 1375 phb->shutdown = pnv_pci_ioda_shutdown;
diff --git a/arch/powerpc/platforms/powernv/pci.c b/arch/powerpc/platforms/powernv/pci.c
index b555ebc57ef5..95633d79ef5d 100644
--- a/arch/powerpc/platforms/powernv/pci.c
+++ b/arch/powerpc/platforms/powernv/pci.c
@@ -634,6 +634,16 @@ static void pnv_pci_dma_dev_setup(struct pci_dev *pdev)
634 pnv_pci_dma_fallback_setup(hose, pdev); 634 pnv_pci_dma_fallback_setup(hose, pdev);
635} 635}
636 636
637int pnv_pci_dma_set_mask(struct pci_dev *pdev, u64 dma_mask)
638{
639 struct pci_controller *hose = pci_bus_to_host(pdev->bus);
640 struct pnv_phb *phb = hose->private_data;
641
642 if (phb && phb->dma_set_mask)
643 return phb->dma_set_mask(phb, pdev, dma_mask);
644 return __dma_set_mask(&pdev->dev, dma_mask);
645}
646
637void pnv_pci_shutdown(void) 647void pnv_pci_shutdown(void)
638{ 648{
639 struct pci_controller *hose; 649 struct pci_controller *hose;
diff --git a/arch/powerpc/platforms/powernv/pci.h b/arch/powerpc/platforms/powernv/pci.h
index 13f1942a9a5f..cde169442775 100644
--- a/arch/powerpc/platforms/powernv/pci.h
+++ b/arch/powerpc/platforms/powernv/pci.h
@@ -54,7 +54,9 @@ struct pnv_ioda_pe {
54 struct iommu_table tce32_table; 54 struct iommu_table tce32_table;
55 phys_addr_t tce_inval_reg_phys; 55 phys_addr_t tce_inval_reg_phys;
56 56
57 /* XXX TODO: Add support for additional 64-bit iommus */ 57 /* 64-bit TCE bypass region */
58 bool tce_bypass_enabled;
59 uint64_t tce_bypass_base;
58 60
59 /* MSIs. MVE index is identical for for 32 and 64 bit MSI 61 /* MSIs. MVE index is identical for for 32 and 64 bit MSI
60 * and -1 if not supported. (It's actually identical to the 62 * and -1 if not supported. (It's actually identical to the
@@ -113,6 +115,8 @@ struct pnv_phb {
113 unsigned int hwirq, unsigned int virq, 115 unsigned int hwirq, unsigned int virq,
114 unsigned int is_64, struct msi_msg *msg); 116 unsigned int is_64, struct msi_msg *msg);
115 void (*dma_dev_setup)(struct pnv_phb *phb, struct pci_dev *pdev); 117 void (*dma_dev_setup)(struct pnv_phb *phb, struct pci_dev *pdev);
118 int (*dma_set_mask)(struct pnv_phb *phb, struct pci_dev *pdev,
119 u64 dma_mask);
116 void (*fixup_phb)(struct pci_controller *hose); 120 void (*fixup_phb)(struct pci_controller *hose);
117 u32 (*bdfn_to_pe)(struct pnv_phb *phb, struct pci_bus *bus, u32 devfn); 121 u32 (*bdfn_to_pe)(struct pnv_phb *phb, struct pci_bus *bus, u32 devfn);
118 void (*shutdown)(struct pnv_phb *phb); 122 void (*shutdown)(struct pnv_phb *phb);
diff --git a/arch/powerpc/platforms/powernv/powernv.h b/arch/powerpc/platforms/powernv/powernv.h
index de6819be1f95..0051e108ef0f 100644
--- a/arch/powerpc/platforms/powernv/powernv.h
+++ b/arch/powerpc/platforms/powernv/powernv.h
@@ -7,12 +7,20 @@ extern void pnv_smp_init(void);
7static inline void pnv_smp_init(void) { } 7static inline void pnv_smp_init(void) { }
8#endif 8#endif
9 9
10struct pci_dev;
11
10#ifdef CONFIG_PCI 12#ifdef CONFIG_PCI
11extern void pnv_pci_init(void); 13extern void pnv_pci_init(void);
12extern void pnv_pci_shutdown(void); 14extern void pnv_pci_shutdown(void);
15extern int pnv_pci_dma_set_mask(struct pci_dev *pdev, u64 dma_mask);
13#else 16#else
14static inline void pnv_pci_init(void) { } 17static inline void pnv_pci_init(void) { }
15static inline void pnv_pci_shutdown(void) { } 18static inline void pnv_pci_shutdown(void) { }
19
20static inline int pnv_pci_dma_set_mask(struct pci_dev *pdev, u64 dma_mask)
21{
22 return -ENODEV;
23}
16#endif 24#endif
17 25
18extern void pnv_lpc_init(void); 26extern void pnv_lpc_init(void);
diff --git a/arch/powerpc/platforms/powernv/setup.c b/arch/powerpc/platforms/powernv/setup.c
index 21166f65c97c..110f4fbd319f 100644
--- a/arch/powerpc/platforms/powernv/setup.c
+++ b/arch/powerpc/platforms/powernv/setup.c
@@ -27,6 +27,7 @@
27#include <linux/interrupt.h> 27#include <linux/interrupt.h>
28#include <linux/bug.h> 28#include <linux/bug.h>
29#include <linux/cpuidle.h> 29#include <linux/cpuidle.h>
30#include <linux/pci.h>
30 31
31#include <asm/machdep.h> 32#include <asm/machdep.h>
32#include <asm/firmware.h> 33#include <asm/firmware.h>
@@ -141,6 +142,13 @@ static void pnv_progress(char *s, unsigned short hex)
141{ 142{
142} 143}
143 144
145static int pnv_dma_set_mask(struct device *dev, u64 dma_mask)
146{
147 if (dev_is_pci(dev))
148 return pnv_pci_dma_set_mask(to_pci_dev(dev), dma_mask);
149 return __dma_set_mask(dev, dma_mask);
150}
151
144static void pnv_shutdown(void) 152static void pnv_shutdown(void)
145{ 153{
146 /* Let the PCI code clear up IODA tables */ 154 /* Let the PCI code clear up IODA tables */
@@ -238,6 +246,7 @@ define_machine(powernv) {
238 .machine_shutdown = pnv_shutdown, 246 .machine_shutdown = pnv_shutdown,
239 .power_save = powernv_idle, 247 .power_save = powernv_idle,
240 .calibrate_decr = generic_calibrate_decr, 248 .calibrate_decr = generic_calibrate_decr,
249 .dma_set_mask = pnv_dma_set_mask,
241#ifdef CONFIG_KEXEC 250#ifdef CONFIG_KEXEC
242 .kexec_cpu_down = pnv_kexec_cpu_down, 251 .kexec_cpu_down = pnv_kexec_cpu_down,
243#endif 252#endif
diff --git a/arch/powerpc/platforms/pseries/Kconfig b/arch/powerpc/platforms/pseries/Kconfig
index 37300f6ee244..80b1d57c306a 100644
--- a/arch/powerpc/platforms/pseries/Kconfig
+++ b/arch/powerpc/platforms/pseries/Kconfig
@@ -20,6 +20,7 @@ config PPC_PSERIES
20 select PPC_DOORBELL 20 select PPC_DOORBELL
21 select HAVE_CONTEXT_TRACKING 21 select HAVE_CONTEXT_TRACKING
22 select HOTPLUG_CPU if SMP 22 select HOTPLUG_CPU if SMP
23 select ARCH_RANDOM
23 default y 24 default y
24 25
25config PPC_SPLPAR 26config PPC_SPLPAR
diff --git a/arch/powerpc/platforms/pseries/setup.c b/arch/powerpc/platforms/pseries/setup.c
index 8e639d7cbda7..972df0ffd4dc 100644
--- a/arch/powerpc/platforms/pseries/setup.c
+++ b/arch/powerpc/platforms/pseries/setup.c
@@ -430,8 +430,7 @@ static void pSeries_machine_kexec(struct kimage *image)
430{ 430{
431 long rc; 431 long rc;
432 432
433 if (firmware_has_feature(FW_FEATURE_SET_MODE) && 433 if (firmware_has_feature(FW_FEATURE_SET_MODE)) {
434 (image->type != KEXEC_TYPE_CRASH)) {
435 rc = pSeries_disable_reloc_on_exc(); 434 rc = pSeries_disable_reloc_on_exc();
436 if (rc != H_SUCCESS) 435 if (rc != H_SUCCESS)
437 pr_warning("Warning: Failed to disable relocation on " 436 pr_warning("Warning: Failed to disable relocation on "
diff --git a/arch/powerpc/sysdev/mpic.c b/arch/powerpc/sysdev/mpic.c
index 0e166ed4cd16..8209744b2829 100644
--- a/arch/powerpc/sysdev/mpic.c
+++ b/arch/powerpc/sysdev/mpic.c
@@ -886,25 +886,25 @@ int mpic_set_irq_type(struct irq_data *d, unsigned int flow_type)
886 886
887 /* Default: read HW settings */ 887 /* Default: read HW settings */
888 if (flow_type == IRQ_TYPE_DEFAULT) { 888 if (flow_type == IRQ_TYPE_DEFAULT) {
889 switch(vold & (MPIC_INFO(VECPRI_POLARITY_MASK) | 889 int vold_ps;
890 MPIC_INFO(VECPRI_SENSE_MASK))) { 890
891 case MPIC_INFO(VECPRI_SENSE_EDGE) | 891 vold_ps = vold & (MPIC_INFO(VECPRI_POLARITY_MASK) |
892 MPIC_INFO(VECPRI_POLARITY_POSITIVE): 892 MPIC_INFO(VECPRI_SENSE_MASK));
893 flow_type = IRQ_TYPE_EDGE_RISING; 893
894 break; 894 if (vold_ps == (MPIC_INFO(VECPRI_SENSE_EDGE) |
895 case MPIC_INFO(VECPRI_SENSE_EDGE) | 895 MPIC_INFO(VECPRI_POLARITY_POSITIVE)))
896 MPIC_INFO(VECPRI_POLARITY_NEGATIVE): 896 flow_type = IRQ_TYPE_EDGE_RISING;
897 flow_type = IRQ_TYPE_EDGE_FALLING; 897 else if (vold_ps == (MPIC_INFO(VECPRI_SENSE_EDGE) |
898 break; 898 MPIC_INFO(VECPRI_POLARITY_NEGATIVE)))
899 case MPIC_INFO(VECPRI_SENSE_LEVEL) | 899 flow_type = IRQ_TYPE_EDGE_FALLING;
900 MPIC_INFO(VECPRI_POLARITY_POSITIVE): 900 else if (vold_ps == (MPIC_INFO(VECPRI_SENSE_LEVEL) |
901 flow_type = IRQ_TYPE_LEVEL_HIGH; 901 MPIC_INFO(VECPRI_POLARITY_POSITIVE)))
902 break; 902 flow_type = IRQ_TYPE_LEVEL_HIGH;
903 case MPIC_INFO(VECPRI_SENSE_LEVEL) | 903 else if (vold_ps == (MPIC_INFO(VECPRI_SENSE_LEVEL) |
904 MPIC_INFO(VECPRI_POLARITY_NEGATIVE): 904 MPIC_INFO(VECPRI_POLARITY_NEGATIVE)))
905 flow_type = IRQ_TYPE_LEVEL_LOW; 905 flow_type = IRQ_TYPE_LEVEL_LOW;
906 break; 906 else
907 } 907 WARN_ONCE(1, "mpic: unknown IRQ type %d\n", vold);
908 } 908 }
909 909
910 /* Apply to irq desc */ 910 /* Apply to irq desc */
diff --git a/arch/powerpc/xmon/xmon.c b/arch/powerpc/xmon/xmon.c
index a90731b3d44a..b07909850f77 100644
--- a/arch/powerpc/xmon/xmon.c
+++ b/arch/powerpc/xmon/xmon.c
@@ -309,16 +309,23 @@ static void get_output_lock(void)
309 309
310 if (xmon_speaker == me) 310 if (xmon_speaker == me)
311 return; 311 return;
312
312 for (;;) { 313 for (;;) {
313 if (xmon_speaker == 0) { 314 last_speaker = cmpxchg(&xmon_speaker, 0, me);
314 last_speaker = cmpxchg(&xmon_speaker, 0, me); 315 if (last_speaker == 0)
315 if (last_speaker == 0) 316 return;
316 return; 317
317 } 318 /*
318 timeout = 10000000; 319 * Wait a full second for the lock, we might be on a slow
320 * console, but check every 100us.
321 */
322 timeout = 10000;
319 while (xmon_speaker == last_speaker) { 323 while (xmon_speaker == last_speaker) {
320 if (--timeout > 0) 324 if (--timeout > 0) {
325 udelay(100);
321 continue; 326 continue;
327 }
328
322 /* hostile takeover */ 329 /* hostile takeover */
323 prev = cmpxchg(&xmon_speaker, last_speaker, me); 330 prev = cmpxchg(&xmon_speaker, last_speaker, me);
324 if (prev == last_speaker) 331 if (prev == last_speaker)
@@ -397,7 +404,6 @@ static int xmon_core(struct pt_regs *regs, int fromipi)
397 } 404 }
398 405
399 xmon_fault_jmp[cpu] = recurse_jmp; 406 xmon_fault_jmp[cpu] = recurse_jmp;
400 cpumask_set_cpu(cpu, &cpus_in_xmon);
401 407
402 bp = NULL; 408 bp = NULL;
403 if ((regs->msr & (MSR_IR|MSR_PR|MSR_64BIT)) == (MSR_IR|MSR_64BIT)) 409 if ((regs->msr & (MSR_IR|MSR_PR|MSR_64BIT)) == (MSR_IR|MSR_64BIT))
@@ -419,6 +425,8 @@ static int xmon_core(struct pt_regs *regs, int fromipi)
419 release_output_lock(); 425 release_output_lock();
420 } 426 }
421 427
428 cpumask_set_cpu(cpu, &cpus_in_xmon);
429
422 waiting: 430 waiting:
423 secondary = 1; 431 secondary = 1;
424 while (secondary && !xmon_gate) { 432 while (secondary && !xmon_gate) {