aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authortravis@sgi.com <travis@sgi.com>2017-01-25 11:35:19 -0500
committerIngo Molnar <mingo@kernel.org>2017-02-01 04:20:59 -0500
commitabdf1df6bc0416ec19b841e92b497ca55b23454c (patch)
treecc4e8ffffc1e15f88a1f01f556d7a3745a02a5ce
parent74862b03b46a852662c1a30c859b985261ff5d5c (diff)
x86/platform/UV: Add Support for UV4 Hubless NMIs
Merge new UV Hubless NMI support into existing UV NMI handler. Signed-off-by: Mike Travis <travis@sgi.com> Reviewed-by: Russ Anderson <rja@hpe.com> Acked-by: Thomas Gleixner <tglx@linutronix.de> Acked-by: Dimitri Sivanich <sivanich@hpe.com> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Peter Zijlstra <peterz@infradead.org> Link: http://lkml.kernel.org/r/20170125163517.585269837@asylum.americas.sgi.com Signed-off-by: Ingo Molnar <mingo@kernel.org>
-rw-r--r--arch/x86/include/asm/uv/uv_hub.h3
-rw-r--r--arch/x86/kernel/apic/x2apic_uv_x.c2
-rw-r--r--arch/x86/platform/uv/uv_nmi.c193
3 files changed, 176 insertions, 22 deletions
diff --git a/arch/x86/include/asm/uv/uv_hub.h b/arch/x86/include/asm/uv/uv_hub.h
index 097b80c989c4..72e8300b1e8a 100644
--- a/arch/x86/include/asm/uv/uv_hub.h
+++ b/arch/x86/include/asm/uv/uv_hub.h
@@ -772,6 +772,7 @@ static inline int uv_num_possible_blades(void)
772 772
773/* Per Hub NMI support */ 773/* Per Hub NMI support */
774extern void uv_nmi_setup(void); 774extern void uv_nmi_setup(void);
775extern void uv_nmi_setup_hubless(void);
775 776
776/* BMC sets a bit this MMR non-zero before sending an NMI */ 777/* BMC sets a bit this MMR non-zero before sending an NMI */
777#define UVH_NMI_MMR UVH_SCRATCH5 778#define UVH_NMI_MMR UVH_SCRATCH5
@@ -799,6 +800,8 @@ struct uv_hub_nmi_s {
799 atomic_t read_mmr_count; /* count of MMR reads */ 800 atomic_t read_mmr_count; /* count of MMR reads */
800 atomic_t nmi_count; /* count of true UV NMIs */ 801 atomic_t nmi_count; /* count of true UV NMIs */
801 unsigned long nmi_value; /* last value read from NMI MMR */ 802 unsigned long nmi_value; /* last value read from NMI MMR */
803 bool hub_present; /* false means UV hubless system */
804 bool pch_owner; /* indicates this hub owns PCH */
802}; 805};
803 806
804struct uv_cpu_nmi_s { 807struct uv_cpu_nmi_s {
diff --git a/arch/x86/kernel/apic/x2apic_uv_x.c b/arch/x86/kernel/apic/x2apic_uv_x.c
index d02cc7e65e4d..e9f8f8cdd570 100644
--- a/arch/x86/kernel/apic/x2apic_uv_x.c
+++ b/arch/x86/kernel/apic/x2apic_uv_x.c
@@ -1514,6 +1514,8 @@ void __init uv_system_init(void)
1514 1514
1515 if (is_uv_system()) 1515 if (is_uv_system())
1516 uv_system_init_hub(); 1516 uv_system_init_hub();
1517 else
1518 uv_nmi_setup_hubless();
1517} 1519}
1518 1520
1519apic_driver(apic_x2apic_uv_x); 1521apic_driver(apic_x2apic_uv_x);
diff --git a/arch/x86/platform/uv/uv_nmi.c b/arch/x86/platform/uv/uv_nmi.c
index 8410e7d0a5b5..df7b092941fe 100644
--- a/arch/x86/platform/uv/uv_nmi.c
+++ b/arch/x86/platform/uv/uv_nmi.c
@@ -67,6 +67,18 @@ static struct uv_hub_nmi_s **uv_hub_nmi_list;
67DEFINE_PER_CPU(struct uv_cpu_nmi_s, uv_cpu_nmi); 67DEFINE_PER_CPU(struct uv_cpu_nmi_s, uv_cpu_nmi);
68EXPORT_PER_CPU_SYMBOL_GPL(uv_cpu_nmi); 68EXPORT_PER_CPU_SYMBOL_GPL(uv_cpu_nmi);
69 69
70/* UV hubless values */
71#define NMI_CONTROL_PORT 0x70
72#define NMI_DUMMY_PORT 0x71
73#define GPI_NMI_STS_GPP_D_0 0x164
74#define GPI_NMI_ENA_GPP_D_0 0x174
75#define STS_GPP_D_0_MASK 0x1
76#define PAD_CFG_DW0_GPP_D_0 0x4c0
77#define GPIROUTNMI (1ul << 17)
78#define PCH_PCR_GPIO_1_BASE 0xfdae0000ul
79#define PCH_PCR_GPIO_ADDRESS(offset) (int *)((u64)(pch_base) | (u64)(offset))
80
81static u64 *pch_base;
70static unsigned long nmi_mmr; 82static unsigned long nmi_mmr;
71static unsigned long nmi_mmr_clear; 83static unsigned long nmi_mmr_clear;
72static unsigned long nmi_mmr_pending; 84static unsigned long nmi_mmr_pending;
@@ -144,6 +156,19 @@ module_param_named(wait_count, uv_nmi_wait_count, int, 0644);
144static int uv_nmi_retry_count = 500; 156static int uv_nmi_retry_count = 500;
145module_param_named(retry_count, uv_nmi_retry_count, int, 0644); 157module_param_named(retry_count, uv_nmi_retry_count, int, 0644);
146 158
159static bool uv_pch_intr_enable = true;
160static bool uv_pch_intr_now_enabled;
161module_param_named(pch_intr_enable, uv_pch_intr_enable, bool, 0644);
162
163static int uv_nmi_debug;
164module_param_named(debug, uv_nmi_debug, int, 0644);
165
166#define nmi_debug(fmt, ...) \
167 do { \
168 if (uv_nmi_debug) \
169 pr_info(fmt, ##__VA_ARGS__); \
170 } while (0)
171
147/* 172/*
148 * Valid NMI Actions: 173 * Valid NMI Actions:
149 * "dump" - dump process stack for each cpu 174 * "dump" - dump process stack for each cpu
@@ -192,6 +217,77 @@ static inline void uv_local_mmr_clear_nmi(void)
192} 217}
193 218
194/* 219/*
220 * UV hubless NMI handler functions
221 */
222static inline void uv_reassert_nmi(void)
223{
224 /* (from arch/x86/include/asm/mach_traps.h) */
225 outb(0x8f, NMI_CONTROL_PORT);
226 inb(NMI_DUMMY_PORT); /* dummy read */
227 outb(0x0f, NMI_CONTROL_PORT);
228 inb(NMI_DUMMY_PORT); /* dummy read */
229}
230
231static void uv_init_hubless_pch_io(int offset, int mask, int data)
232{
233 int *addr = PCH_PCR_GPIO_ADDRESS(offset);
234 int readd = readl(addr);
235
236 if (mask) { /* OR in new data */
237 int writed = (readd & ~mask) | data;
238
239 nmi_debug("UV:PCH: %p = %x & %x | %x (%x)\n",
240 addr, readd, ~mask, data, writed);
241 writel(writed, addr);
242 } else if (readd & data) { /* clear status bit */
243 nmi_debug("UV:PCH: %p = %x\n", addr, data);
244 writel(data, addr);
245 }
246
247 (void)readl(addr); /* flush write data */
248}
249
250static void uv_nmi_setup_hubless_intr(void)
251{
252 uv_pch_intr_now_enabled = uv_pch_intr_enable;
253
254 uv_init_hubless_pch_io(
255 PAD_CFG_DW0_GPP_D_0, GPIROUTNMI,
256 uv_pch_intr_now_enabled ? GPIROUTNMI : 0);
257
258 nmi_debug("UV:NMI: GPP_D_0 interrupt %s\n",
259 uv_pch_intr_now_enabled ? "enabled" : "disabled");
260}
261
262static int uv_nmi_test_hubless(struct uv_hub_nmi_s *hub_nmi)
263{
264 int *pstat = PCH_PCR_GPIO_ADDRESS(GPI_NMI_STS_GPP_D_0);
265 int status = *pstat;
266
267 hub_nmi->nmi_value = status;
268 atomic_inc(&hub_nmi->read_mmr_count);
269
270 if (!(status & STS_GPP_D_0_MASK)) /* Not a UV external NMI */
271 return 0;
272
273 *pstat = STS_GPP_D_0_MASK; /* Is a UV NMI: clear GPP_D_0 status */
274 (void)*pstat; /* flush write */
275
276 return 1;
277}
278
279static int uv_test_nmi(struct uv_hub_nmi_s *hub_nmi)
280{
281 if (hub_nmi->hub_present)
282 return uv_nmi_test_mmr(hub_nmi);
283
284 if (hub_nmi->pch_owner) /* Only PCH owner can check status */
285 return uv_nmi_test_hubless(hub_nmi);
286
287 return -1;
288}
289
290/*
195 * If first cpu in on this hub, set hub_nmi "in_nmi" and "owner" values and 291 * If first cpu in on this hub, set hub_nmi "in_nmi" and "owner" values and
196 * return true. If first cpu in on the system, set global "in_nmi" flag. 292 * return true. If first cpu in on the system, set global "in_nmi" flag.
197 */ 293 */
@@ -214,6 +310,7 @@ static int uv_check_nmi(struct uv_hub_nmi_s *hub_nmi)
214{ 310{
215 int cpu = smp_processor_id(); 311 int cpu = smp_processor_id();
216 int nmi = 0; 312 int nmi = 0;
313 int nmi_detected = 0;
217 314
218 local64_inc(&uv_nmi_count); 315 local64_inc(&uv_nmi_count);
219 this_cpu_inc(uv_cpu_nmi.queries); 316 this_cpu_inc(uv_cpu_nmi.queries);
@@ -224,20 +321,26 @@ static int uv_check_nmi(struct uv_hub_nmi_s *hub_nmi)
224 break; 321 break;
225 322
226 if (raw_spin_trylock(&hub_nmi->nmi_lock)) { 323 if (raw_spin_trylock(&hub_nmi->nmi_lock)) {
324 nmi_detected = uv_test_nmi(hub_nmi);
227 325
228 /* check hub MMR NMI flag */ 326 /* check flag for UV external NMI */
229 if (uv_nmi_test_mmr(hub_nmi)) { 327 if (nmi_detected > 0) {
230 uv_set_in_nmi(cpu, hub_nmi); 328 uv_set_in_nmi(cpu, hub_nmi);
231 nmi = 1; 329 nmi = 1;
232 break; 330 break;
233 } 331 }
234 332
235 /* MMR NMI flag is clear */ 333 /* A non-PCH node in a hubless system waits for NMI */
334 else if (nmi_detected < 0)
335 goto slave_wait;
336
337 /* MMR/PCH NMI flag is clear */
236 raw_spin_unlock(&hub_nmi->nmi_lock); 338 raw_spin_unlock(&hub_nmi->nmi_lock);
237 339
238 } else { 340 } else {
239 /* wait a moment for the hub nmi locker to set flag */ 341
240 cpu_relax(); 342 /* Wait a moment for the HUB NMI locker to set flag */
343slave_wait: cpu_relax();
241 udelay(uv_nmi_slave_delay); 344 udelay(uv_nmi_slave_delay);
242 345
243 /* re-check hub in_nmi flag */ 346 /* re-check hub in_nmi flag */
@@ -246,13 +349,20 @@ static int uv_check_nmi(struct uv_hub_nmi_s *hub_nmi)
246 break; 349 break;
247 } 350 }
248 351
249 /* check if this BMC missed setting the MMR NMI flag */ 352 /*
353 * Check if this BMC missed setting the MMR NMI flag (or)
354 * UV hubless system where only PCH owner can check flag
355 */
250 if (!nmi) { 356 if (!nmi) {
251 nmi = atomic_read(&uv_in_nmi); 357 nmi = atomic_read(&uv_in_nmi);
252 if (nmi) 358 if (nmi)
253 uv_set_in_nmi(cpu, hub_nmi); 359 uv_set_in_nmi(cpu, hub_nmi);
254 } 360 }
255 361
362 /* If we're holding the hub lock, release it now */
363 if (nmi_detected < 0)
364 raw_spin_unlock(&hub_nmi->nmi_lock);
365
256 } while (0); 366 } while (0);
257 367
258 if (!nmi) 368 if (!nmi)
@@ -269,7 +379,10 @@ static inline void uv_clear_nmi(int cpu)
269 if (cpu == atomic_read(&hub_nmi->cpu_owner)) { 379 if (cpu == atomic_read(&hub_nmi->cpu_owner)) {
270 atomic_set(&hub_nmi->cpu_owner, -1); 380 atomic_set(&hub_nmi->cpu_owner, -1);
271 atomic_set(&hub_nmi->in_nmi, 0); 381 atomic_set(&hub_nmi->in_nmi, 0);
272 uv_local_mmr_clear_nmi(); 382 if (hub_nmi->hub_present)
383 uv_local_mmr_clear_nmi();
384 else
385 uv_reassert_nmi();
273 raw_spin_unlock(&hub_nmi->nmi_lock); 386 raw_spin_unlock(&hub_nmi->nmi_lock);
274 } 387 }
275} 388}
@@ -297,11 +410,12 @@ static void uv_nmi_cleanup_mask(void)
297 } 410 }
298} 411}
299 412
300/* Loop waiting as cpus enter nmi handler */ 413/* Loop waiting as cpus enter NMI handler */
301static int uv_nmi_wait_cpus(int first) 414static int uv_nmi_wait_cpus(int first)
302{ 415{
303 int i, j, k, n = num_online_cpus(); 416 int i, j, k, n = num_online_cpus();
304 int last_k = 0, waiting = 0; 417 int last_k = 0, waiting = 0;
418 int cpu = smp_processor_id();
305 419
306 if (first) { 420 if (first) {
307 cpumask_copy(uv_nmi_cpu_mask, cpu_online_mask); 421 cpumask_copy(uv_nmi_cpu_mask, cpu_online_mask);
@@ -310,6 +424,12 @@ static int uv_nmi_wait_cpus(int first)
310 k = n - cpumask_weight(uv_nmi_cpu_mask); 424 k = n - cpumask_weight(uv_nmi_cpu_mask);
311 } 425 }
312 426
427 /* PCH NMI causes only one cpu to respond */
428 if (first && uv_pch_intr_now_enabled) {
429 cpumask_clear_cpu(cpu, uv_nmi_cpu_mask);
430 return n - k - 1;
431 }
432
313 udelay(uv_nmi_initial_delay); 433 udelay(uv_nmi_initial_delay);
314 for (i = 0; i < uv_nmi_retry_count; i++) { 434 for (i = 0; i < uv_nmi_retry_count; i++) {
315 int loop_delay = uv_nmi_loop_delay; 435 int loop_delay = uv_nmi_loop_delay;
@@ -358,7 +478,7 @@ static void uv_nmi_wait(int master)
358 break; 478 break;
359 479
360 /* if not all made it in, send IPI NMI to them */ 480 /* if not all made it in, send IPI NMI to them */
361 pr_alert("UV: Sending NMI IPI to %d non-responding CPUs: %*pbl\n", 481 pr_alert("UV: Sending NMI IPI to %d CPUs: %*pbl\n",
362 cpumask_weight(uv_nmi_cpu_mask), 482 cpumask_weight(uv_nmi_cpu_mask),
363 cpumask_pr_args(uv_nmi_cpu_mask)); 483 cpumask_pr_args(uv_nmi_cpu_mask));
364 484
@@ -538,7 +658,7 @@ static inline int uv_nmi_kdb_reason(void)
538#else /* !CONFIG_KGDB_KDB */ 658#else /* !CONFIG_KGDB_KDB */
539static inline int uv_nmi_kdb_reason(void) 659static inline int uv_nmi_kdb_reason(void)
540{ 660{
541 /* Insure user is expecting to attach gdb remote */ 661 /* Ensure user is expecting to attach gdb remote */
542 if (uv_nmi_action_is("kgdb")) 662 if (uv_nmi_action_is("kgdb"))
543 return 0; 663 return 0;
544 664
@@ -626,15 +746,18 @@ int uv_handle_nmi(unsigned int reason, struct pt_regs *regs)
626 /* Pause as all cpus enter the NMI handler */ 746 /* Pause as all cpus enter the NMI handler */
627 uv_nmi_wait(master); 747 uv_nmi_wait(master);
628 748
629 /* Dump state of each cpu */ 749 /* Process actions other than "kdump": */
630 if (uv_nmi_action_is("ips") || uv_nmi_action_is("dump")) 750 if (uv_nmi_action_is("ips") || uv_nmi_action_is("dump")) {
631 uv_nmi_dump_state(cpu, regs, master); 751 uv_nmi_dump_state(cpu, regs, master);
632 752 } else if (uv_nmi_action_is("kdb") || uv_nmi_action_is("kgdb")) {
633 /* Call KGDB/KDB if enabled */
634 else if (uv_nmi_action_is("kdb") || uv_nmi_action_is("kgdb"))
635 uv_call_kgdb_kdb(cpu, regs, master); 753 uv_call_kgdb_kdb(cpu, regs, master);
754 } else {
755 if (master)
756 pr_alert("UV: unknown NMI action: %s\n", uv_nmi_action);
757 uv_nmi_sync_exit(master);
758 }
636 759
637 /* Clear per_cpu "in nmi" flag */ 760 /* Clear per_cpu "in_nmi" flag */
638 this_cpu_write(uv_cpu_nmi.state, UV_NMI_STATE_OUT); 761 this_cpu_write(uv_cpu_nmi.state, UV_NMI_STATE_OUT);
639 762
640 /* Clear MMR NMI flag on each hub */ 763 /* Clear MMR NMI flag on each hub */
@@ -648,6 +771,7 @@ int uv_handle_nmi(unsigned int reason, struct pt_regs *regs)
648 atomic_set(&uv_nmi_cpu, -1); 771 atomic_set(&uv_nmi_cpu, -1);
649 atomic_set(&uv_in_nmi, 0); 772 atomic_set(&uv_in_nmi, 0);
650 atomic_set(&uv_nmi_kexec_failed, 0); 773 atomic_set(&uv_nmi_kexec_failed, 0);
774 atomic_set(&uv_nmi_slave_continue, SLAVE_CLEAR);
651 } 775 }
652 776
653 uv_nmi_touch_watchdogs(); 777 uv_nmi_touch_watchdogs();
@@ -697,28 +821,53 @@ void uv_nmi_init(void)
697 apic_write(APIC_LVT1, value); 821 apic_write(APIC_LVT1, value);
698} 822}
699 823
700void uv_nmi_setup(void) 824/* Setup HUB NMI info */
825void __init uv_nmi_setup_common(bool hubbed)
701{ 826{
702 int size = sizeof(void *) * (1 << NODES_SHIFT); 827 int size = sizeof(void *) * (1 << NODES_SHIFT);
703 int cpu, nid; 828 int cpu;
704 829
705 /* Setup hub nmi info */
706 uv_nmi_setup_mmrs();
707 uv_hub_nmi_list = kzalloc(size, GFP_KERNEL); 830 uv_hub_nmi_list = kzalloc(size, GFP_KERNEL);
708 pr_info("UV: NMI hub list @ 0x%p (%d)\n", uv_hub_nmi_list, size); 831 nmi_debug("UV: NMI hub list @ 0x%p (%d)\n", uv_hub_nmi_list, size);
709 BUG_ON(!uv_hub_nmi_list); 832 BUG_ON(!uv_hub_nmi_list);
710 size = sizeof(struct uv_hub_nmi_s); 833 size = sizeof(struct uv_hub_nmi_s);
711 for_each_present_cpu(cpu) { 834 for_each_present_cpu(cpu) {
712 nid = cpu_to_node(cpu); 835 int nid = cpu_to_node(cpu);
713 if (uv_hub_nmi_list[nid] == NULL) { 836 if (uv_hub_nmi_list[nid] == NULL) {
714 uv_hub_nmi_list[nid] = kzalloc_node(size, 837 uv_hub_nmi_list[nid] = kzalloc_node(size,
715 GFP_KERNEL, nid); 838 GFP_KERNEL, nid);
716 BUG_ON(!uv_hub_nmi_list[nid]); 839 BUG_ON(!uv_hub_nmi_list[nid]);
717 raw_spin_lock_init(&(uv_hub_nmi_list[nid]->nmi_lock)); 840 raw_spin_lock_init(&(uv_hub_nmi_list[nid]->nmi_lock));
718 atomic_set(&uv_hub_nmi_list[nid]->cpu_owner, -1); 841 atomic_set(&uv_hub_nmi_list[nid]->cpu_owner, -1);
842 uv_hub_nmi_list[nid]->hub_present = hubbed;
843 uv_hub_nmi_list[nid]->pch_owner = (nid == 0);
719 } 844 }
720 uv_hub_nmi_per(cpu) = uv_hub_nmi_list[nid]; 845 uv_hub_nmi_per(cpu) = uv_hub_nmi_list[nid];
721 } 846 }
722 BUG_ON(!alloc_cpumask_var(&uv_nmi_cpu_mask, GFP_KERNEL)); 847 BUG_ON(!alloc_cpumask_var(&uv_nmi_cpu_mask, GFP_KERNEL));
848}
849
850/* Setup for UV Hub systems */
851void __init uv_nmi_setup(void)
852{
853 uv_nmi_setup_mmrs();
854 uv_nmi_setup_common(true);
855 uv_register_nmi_notifier();
856 pr_info("UV: Hub NMI enabled\n");
857}
858
859/* Setup for UV Hubless systems */
860void __init uv_nmi_setup_hubless(void)
861{
862 uv_nmi_setup_common(false);
863 pch_base = xlate_dev_mem_ptr(PCH_PCR_GPIO_1_BASE);
864 nmi_debug("UV: PCH base:%p from 0x%lx, GPP_D_0\n",
865 pch_base, PCH_PCR_GPIO_1_BASE);
866 uv_init_hubless_pch_io(GPI_NMI_ENA_GPP_D_0,
867 STS_GPP_D_0_MASK, STS_GPP_D_0_MASK);
868 uv_nmi_setup_hubless_intr();
869 /* Ensure NMI enabled in Processor Interface Reg: */
870 uv_reassert_nmi();
723 uv_register_nmi_notifier(); 871 uv_register_nmi_notifier();
872 pr_info("UV: Hubless NMI enabled\n");
724} 873}