aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arc/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arc/kernel')
-rw-r--r--arch/arc/kernel/entry-arcv2.S4
-rw-r--r--arch/arc/kernel/head.S16
-rw-r--r--arch/arc/kernel/intc-arcv2.c2
-rw-r--r--arch/arc/kernel/perf_event.c241
-rw-r--r--arch/arc/kernel/setup.c144
-rw-r--r--arch/arc/kernel/troubleshoot.c30
6 files changed, 319 insertions, 118 deletions
diff --git a/arch/arc/kernel/entry-arcv2.S b/arch/arc/kernel/entry-arcv2.S
index cc558a25b8fa..562089d62d9d 100644
--- a/arch/arc/kernel/entry-arcv2.S
+++ b/arch/arc/kernel/entry-arcv2.S
@@ -209,7 +209,9 @@ restore_regs:
209;####### Return from Intr ####### 209;####### Return from Intr #######
210 210
211debug_marker_l1: 211debug_marker_l1:
212 bbit1.nt r0, STATUS_DE_BIT, .Lintr_ret_to_delay_slot 212 ; bbit1.nt r0, STATUS_DE_BIT, .Lintr_ret_to_delay_slot
213 btst r0, STATUS_DE_BIT ; Z flag set if bit clear
214 bnz .Lintr_ret_to_delay_slot ; branch if STATUS_DE_BIT set
213 215
214.Lisr_ret_fast_path: 216.Lisr_ret_fast_path:
215 ; Handle special case #1: (Entry via Exception, Return via IRQ) 217 ; Handle special case #1: (Entry via Exception, Return via IRQ)
diff --git a/arch/arc/kernel/head.S b/arch/arc/kernel/head.S
index 8b90d25a15cc..30e090625916 100644
--- a/arch/arc/kernel/head.S
+++ b/arch/arc/kernel/head.S
@@ -17,6 +17,7 @@
17#include <asm/entry.h> 17#include <asm/entry.h>
18#include <asm/arcregs.h> 18#include <asm/arcregs.h>
19#include <asm/cache.h> 19#include <asm/cache.h>
20#include <asm/irqflags.h>
20 21
21.macro CPU_EARLY_SETUP 22.macro CPU_EARLY_SETUP
22 23
@@ -47,6 +48,15 @@
47 sr r5, [ARC_REG_DC_CTRL] 48 sr r5, [ARC_REG_DC_CTRL]
48 49
491: 501:
51
52#ifdef CONFIG_ISA_ARCV2
53 ; Unaligned access is disabled at reset, so re-enable early as
54 ; gcc 7.3.1 (ARC GNU 2018.03) onwards generates unaligned access
55 ; by default
56 lr r5, [status32]
57 bset r5, r5, STATUS_AD_BIT
58 kflag r5
59#endif
50.endm 60.endm
51 61
52 .section .init.text, "ax",@progbits 62 .section .init.text, "ax",@progbits
@@ -90,15 +100,13 @@ ENTRY(stext)
90 st.ab 0, [r5, 4] 100 st.ab 0, [r5, 4]
911: 1011:
92 102
93#ifdef CONFIG_ARC_UBOOT_SUPPORT
94 ; Uboot - kernel ABI 103 ; Uboot - kernel ABI
95 ; r0 = [0] No uboot interaction, [1] cmdline in r2, [2] DTB in r2 104 ; r0 = [0] No uboot interaction, [1] cmdline in r2, [2] DTB in r2
96 ; r1 = magic number (board identity, unused as of now 105 ; r1 = magic number (always zero as of now)
97 ; r2 = pointer to uboot provided cmdline or external DTB in mem 106 ; r2 = pointer to uboot provided cmdline or external DTB in mem
98 ; These are handled later in setup_arch() 107 ; These are handled later in handle_uboot_args()
99 st r0, [@uboot_tag] 108 st r0, [@uboot_tag]
100 st r2, [@uboot_arg] 109 st r2, [@uboot_arg]
101#endif
102 110
103 ; setup "current" tsk and optionally cache it in dedicated r25 111 ; setup "current" tsk and optionally cache it in dedicated r25
104 mov r9, @init_task 112 mov r9, @init_task
diff --git a/arch/arc/kernel/intc-arcv2.c b/arch/arc/kernel/intc-arcv2.c
index 067ea362fb3e..cf18b3e5a934 100644
--- a/arch/arc/kernel/intc-arcv2.c
+++ b/arch/arc/kernel/intc-arcv2.c
@@ -49,11 +49,13 @@ void arc_init_IRQ(void)
49 49
50 *(unsigned int *)&ictrl = 0; 50 *(unsigned int *)&ictrl = 0;
51 51
52#ifndef CONFIG_ARC_IRQ_NO_AUTOSAVE
52 ictrl.save_nr_gpr_pairs = 6; /* r0 to r11 (r12 saved manually) */ 53 ictrl.save_nr_gpr_pairs = 6; /* r0 to r11 (r12 saved manually) */
53 ictrl.save_blink = 1; 54 ictrl.save_blink = 1;
54 ictrl.save_lp_regs = 1; /* LP_COUNT, LP_START, LP_END */ 55 ictrl.save_lp_regs = 1; /* LP_COUNT, LP_START, LP_END */
55 ictrl.save_u_to_u = 0; /* user ctxt saved on kernel stack */ 56 ictrl.save_u_to_u = 0; /* user ctxt saved on kernel stack */
56 ictrl.save_idx_regs = 1; /* JLI, LDI, EI */ 57 ictrl.save_idx_regs = 1; /* JLI, LDI, EI */
58#endif
57 59
58 WRITE_AUX(AUX_IRQ_CTRL, ictrl); 60 WRITE_AUX(AUX_IRQ_CTRL, ictrl);
59 61
diff --git a/arch/arc/kernel/perf_event.c b/arch/arc/kernel/perf_event.c
index 8aec462d90fb..861a8aea51f9 100644
--- a/arch/arc/kernel/perf_event.c
+++ b/arch/arc/kernel/perf_event.c
@@ -1,15 +1,10 @@
1/* 1// SPDX-License-Identifier: GPL-2.0+
2 * Linux performance counter support for ARC700 series 2//
3 * 3// Linux performance counter support for ARC CPUs.
4 * Copyright (C) 2013-2015 Synopsys, Inc. (www.synopsys.com) 4// This code is inspired by the perf support of various other architectures.
5 * 5//
6 * This code is inspired by the perf support of various other architectures. 6// Copyright (C) 2013-2018 Synopsys, Inc. (www.synopsys.com)
7 * 7
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 *
12 */
13#include <linux/errno.h> 8#include <linux/errno.h>
14#include <linux/interrupt.h> 9#include <linux/interrupt.h>
15#include <linux/module.h> 10#include <linux/module.h>
@@ -19,12 +14,31 @@
19#include <asm/arcregs.h> 14#include <asm/arcregs.h>
20#include <asm/stacktrace.h> 15#include <asm/stacktrace.h>
21 16
17/* HW holds 8 symbols + one for null terminator */
18#define ARCPMU_EVENT_NAME_LEN 9
19
20enum arc_pmu_attr_groups {
21 ARCPMU_ATTR_GR_EVENTS,
22 ARCPMU_ATTR_GR_FORMATS,
23 ARCPMU_NR_ATTR_GR
24};
25
26struct arc_pmu_raw_event_entry {
27 char name[ARCPMU_EVENT_NAME_LEN];
28};
29
22struct arc_pmu { 30struct arc_pmu {
23 struct pmu pmu; 31 struct pmu pmu;
24 unsigned int irq; 32 unsigned int irq;
25 int n_counters; 33 int n_counters;
34 int n_events;
26 u64 max_period; 35 u64 max_period;
27 int ev_hw_idx[PERF_COUNT_ARC_HW_MAX]; 36 int ev_hw_idx[PERF_COUNT_ARC_HW_MAX];
37
38 struct arc_pmu_raw_event_entry *raw_entry;
39 struct attribute **attrs;
40 struct perf_pmu_events_attr *attr;
41 const struct attribute_group *attr_groups[ARCPMU_NR_ATTR_GR + 1];
28}; 42};
29 43
30struct arc_pmu_cpu { 44struct arc_pmu_cpu {
@@ -49,6 +63,7 @@ static int callchain_trace(unsigned int addr, void *data)
49{ 63{
50 struct arc_callchain_trace *ctrl = data; 64 struct arc_callchain_trace *ctrl = data;
51 struct perf_callchain_entry_ctx *entry = ctrl->perf_stuff; 65 struct perf_callchain_entry_ctx *entry = ctrl->perf_stuff;
66
52 perf_callchain_store(entry, addr); 67 perf_callchain_store(entry, addr);
53 68
54 if (ctrl->depth++ < 3) 69 if (ctrl->depth++ < 3)
@@ -57,8 +72,8 @@ static int callchain_trace(unsigned int addr, void *data)
57 return -1; 72 return -1;
58} 73}
59 74
60void 75void perf_callchain_kernel(struct perf_callchain_entry_ctx *entry,
61perf_callchain_kernel(struct perf_callchain_entry_ctx *entry, struct pt_regs *regs) 76 struct pt_regs *regs)
62{ 77{
63 struct arc_callchain_trace ctrl = { 78 struct arc_callchain_trace ctrl = {
64 .depth = 0, 79 .depth = 0,
@@ -68,8 +83,8 @@ perf_callchain_kernel(struct perf_callchain_entry_ctx *entry, struct pt_regs *re
68 arc_unwind_core(NULL, regs, callchain_trace, &ctrl); 83 arc_unwind_core(NULL, regs, callchain_trace, &ctrl);
69} 84}
70 85
71void 86void perf_callchain_user(struct perf_callchain_entry_ctx *entry,
72perf_callchain_user(struct perf_callchain_entry_ctx *entry, struct pt_regs *regs) 87 struct pt_regs *regs)
73{ 88{
74 /* 89 /*
75 * User stack can't be unwound trivially with kernel dwarf unwinder 90 * User stack can't be unwound trivially with kernel dwarf unwinder
@@ -82,10 +97,10 @@ static struct arc_pmu *arc_pmu;
82static DEFINE_PER_CPU(struct arc_pmu_cpu, arc_pmu_cpu); 97static DEFINE_PER_CPU(struct arc_pmu_cpu, arc_pmu_cpu);
83 98
84/* read counter #idx; note that counter# != event# on ARC! */ 99/* read counter #idx; note that counter# != event# on ARC! */
85static uint64_t arc_pmu_read_counter(int idx) 100static u64 arc_pmu_read_counter(int idx)
86{ 101{
87 uint32_t tmp; 102 u32 tmp;
88 uint64_t result; 103 u64 result;
89 104
90 /* 105 /*
91 * ARC supports making 'snapshots' of the counters, so we don't 106 * ARC supports making 'snapshots' of the counters, so we don't
@@ -94,7 +109,7 @@ static uint64_t arc_pmu_read_counter(int idx)
94 write_aux_reg(ARC_REG_PCT_INDEX, idx); 109 write_aux_reg(ARC_REG_PCT_INDEX, idx);
95 tmp = read_aux_reg(ARC_REG_PCT_CONTROL); 110 tmp = read_aux_reg(ARC_REG_PCT_CONTROL);
96 write_aux_reg(ARC_REG_PCT_CONTROL, tmp | ARC_REG_PCT_CONTROL_SN); 111 write_aux_reg(ARC_REG_PCT_CONTROL, tmp | ARC_REG_PCT_CONTROL_SN);
97 result = (uint64_t) (read_aux_reg(ARC_REG_PCT_SNAPH)) << 32; 112 result = (u64) (read_aux_reg(ARC_REG_PCT_SNAPH)) << 32;
98 result |= read_aux_reg(ARC_REG_PCT_SNAPL); 113 result |= read_aux_reg(ARC_REG_PCT_SNAPL);
99 114
100 return result; 115 return result;
@@ -103,9 +118,9 @@ static uint64_t arc_pmu_read_counter(int idx)
103static void arc_perf_event_update(struct perf_event *event, 118static void arc_perf_event_update(struct perf_event *event,
104 struct hw_perf_event *hwc, int idx) 119 struct hw_perf_event *hwc, int idx)
105{ 120{
106 uint64_t prev_raw_count = local64_read(&hwc->prev_count); 121 u64 prev_raw_count = local64_read(&hwc->prev_count);
107 uint64_t new_raw_count = arc_pmu_read_counter(idx); 122 u64 new_raw_count = arc_pmu_read_counter(idx);
108 int64_t delta = new_raw_count - prev_raw_count; 123 s64 delta = new_raw_count - prev_raw_count;
109 124
110 /* 125 /*
111 * We aren't afraid of hwc->prev_count changing beneath our feet 126 * We aren't afraid of hwc->prev_count changing beneath our feet
@@ -155,7 +170,7 @@ static int arc_pmu_event_init(struct perf_event *event)
155 int ret; 170 int ret;
156 171
157 if (!is_sampling_event(event)) { 172 if (!is_sampling_event(event)) {
158 hwc->sample_period = arc_pmu->max_period; 173 hwc->sample_period = arc_pmu->max_period;
159 hwc->last_period = hwc->sample_period; 174 hwc->last_period = hwc->sample_period;
160 local64_set(&hwc->period_left, hwc->sample_period); 175 local64_set(&hwc->period_left, hwc->sample_period);
161 } 176 }
@@ -192,6 +207,18 @@ static int arc_pmu_event_init(struct perf_event *event)
192 pr_debug("init cache event with h/w %08x \'%s\'\n", 207 pr_debug("init cache event with h/w %08x \'%s\'\n",
193 (int)hwc->config, arc_pmu_ev_hw_map[ret]); 208 (int)hwc->config, arc_pmu_ev_hw_map[ret]);
194 return 0; 209 return 0;
210
211 case PERF_TYPE_RAW:
212 if (event->attr.config >= arc_pmu->n_events)
213 return -ENOENT;
214
215 hwc->config |= event->attr.config;
216 pr_debug("init raw event with idx %lld \'%s\'\n",
217 event->attr.config,
218 arc_pmu->raw_entry[event->attr.config].name);
219
220 return 0;
221
195 default: 222 default:
196 return -ENOENT; 223 return -ENOENT;
197 } 224 }
@@ -200,7 +227,7 @@ static int arc_pmu_event_init(struct perf_event *event)
200/* starts all counters */ 227/* starts all counters */
201static void arc_pmu_enable(struct pmu *pmu) 228static void arc_pmu_enable(struct pmu *pmu)
202{ 229{
203 uint32_t tmp; 230 u32 tmp;
204 tmp = read_aux_reg(ARC_REG_PCT_CONTROL); 231 tmp = read_aux_reg(ARC_REG_PCT_CONTROL);
205 write_aux_reg(ARC_REG_PCT_CONTROL, (tmp & 0xffff0000) | 0x1); 232 write_aux_reg(ARC_REG_PCT_CONTROL, (tmp & 0xffff0000) | 0x1);
206} 233}
@@ -208,7 +235,7 @@ static void arc_pmu_enable(struct pmu *pmu)
208/* stops all counters */ 235/* stops all counters */
209static void arc_pmu_disable(struct pmu *pmu) 236static void arc_pmu_disable(struct pmu *pmu)
210{ 237{
211 uint32_t tmp; 238 u32 tmp;
212 tmp = read_aux_reg(ARC_REG_PCT_CONTROL); 239 tmp = read_aux_reg(ARC_REG_PCT_CONTROL);
213 write_aux_reg(ARC_REG_PCT_CONTROL, (tmp & 0xffff0000) | 0x0); 240 write_aux_reg(ARC_REG_PCT_CONTROL, (tmp & 0xffff0000) | 0x0);
214} 241}
@@ -228,7 +255,7 @@ static int arc_pmu_event_set_period(struct perf_event *event)
228 local64_set(&hwc->period_left, left); 255 local64_set(&hwc->period_left, left);
229 hwc->last_period = period; 256 hwc->last_period = period;
230 overflow = 1; 257 overflow = 1;
231 } else if (unlikely(left <= 0)) { 258 } else if (unlikely(left <= 0)) {
232 /* left underflowed by less than period. */ 259 /* left underflowed by less than period. */
233 left += period; 260 left += period;
234 local64_set(&hwc->period_left, left); 261 local64_set(&hwc->period_left, left);
@@ -246,8 +273,8 @@ static int arc_pmu_event_set_period(struct perf_event *event)
246 write_aux_reg(ARC_REG_PCT_INDEX, idx); 273 write_aux_reg(ARC_REG_PCT_INDEX, idx);
247 274
248 /* Write value */ 275 /* Write value */
249 write_aux_reg(ARC_REG_PCT_COUNTL, (u32)value); 276 write_aux_reg(ARC_REG_PCT_COUNTL, lower_32_bits(value));
250 write_aux_reg(ARC_REG_PCT_COUNTH, (value >> 32)); 277 write_aux_reg(ARC_REG_PCT_COUNTH, upper_32_bits(value));
251 278
252 perf_event_update_userpage(event); 279 perf_event_update_userpage(event);
253 280
@@ -277,7 +304,7 @@ static void arc_pmu_start(struct perf_event *event, int flags)
277 /* Enable interrupt for this counter */ 304 /* Enable interrupt for this counter */
278 if (is_sampling_event(event)) 305 if (is_sampling_event(event))
279 write_aux_reg(ARC_REG_PCT_INT_CTRL, 306 write_aux_reg(ARC_REG_PCT_INT_CTRL,
280 read_aux_reg(ARC_REG_PCT_INT_CTRL) | (1 << idx)); 307 read_aux_reg(ARC_REG_PCT_INT_CTRL) | BIT(idx));
281 308
282 /* enable ARC pmu here */ 309 /* enable ARC pmu here */
283 write_aux_reg(ARC_REG_PCT_INDEX, idx); /* counter # */ 310 write_aux_reg(ARC_REG_PCT_INDEX, idx); /* counter # */
@@ -295,9 +322,9 @@ static void arc_pmu_stop(struct perf_event *event, int flags)
295 * Reset interrupt flag by writing of 1. This is required 322 * Reset interrupt flag by writing of 1. This is required
296 * to make sure pending interrupt was not left. 323 * to make sure pending interrupt was not left.
297 */ 324 */
298 write_aux_reg(ARC_REG_PCT_INT_ACT, 1 << idx); 325 write_aux_reg(ARC_REG_PCT_INT_ACT, BIT(idx));
299 write_aux_reg(ARC_REG_PCT_INT_CTRL, 326 write_aux_reg(ARC_REG_PCT_INT_CTRL,
300 read_aux_reg(ARC_REG_PCT_INT_CTRL) & ~(1 << idx)); 327 read_aux_reg(ARC_REG_PCT_INT_CTRL) & ~BIT(idx));
301 } 328 }
302 329
303 if (!(event->hw.state & PERF_HES_STOPPED)) { 330 if (!(event->hw.state & PERF_HES_STOPPED)) {
@@ -349,9 +376,10 @@ static int arc_pmu_add(struct perf_event *event, int flags)
349 376
350 if (is_sampling_event(event)) { 377 if (is_sampling_event(event)) {
351 /* Mimic full counter overflow as other arches do */ 378 /* Mimic full counter overflow as other arches do */
352 write_aux_reg(ARC_REG_PCT_INT_CNTL, (u32)arc_pmu->max_period); 379 write_aux_reg(ARC_REG_PCT_INT_CNTL,
380 lower_32_bits(arc_pmu->max_period));
353 write_aux_reg(ARC_REG_PCT_INT_CNTH, 381 write_aux_reg(ARC_REG_PCT_INT_CNTH,
354 (arc_pmu->max_period >> 32)); 382 upper_32_bits(arc_pmu->max_period));
355 } 383 }
356 384
357 write_aux_reg(ARC_REG_PCT_CONFIG, 0); 385 write_aux_reg(ARC_REG_PCT_CONFIG, 0);
@@ -392,7 +420,7 @@ static irqreturn_t arc_pmu_intr(int irq, void *dev)
392 idx = __ffs(active_ints); 420 idx = __ffs(active_ints);
393 421
394 /* Reset interrupt flag by writing of 1 */ 422 /* Reset interrupt flag by writing of 1 */
395 write_aux_reg(ARC_REG_PCT_INT_ACT, 1 << idx); 423 write_aux_reg(ARC_REG_PCT_INT_ACT, BIT(idx));
396 424
397 /* 425 /*
398 * On reset of "interrupt active" bit corresponding 426 * On reset of "interrupt active" bit corresponding
@@ -400,7 +428,7 @@ static irqreturn_t arc_pmu_intr(int irq, void *dev)
400 * Now we need to re-enable interrupt for the counter. 428 * Now we need to re-enable interrupt for the counter.
401 */ 429 */
402 write_aux_reg(ARC_REG_PCT_INT_CTRL, 430 write_aux_reg(ARC_REG_PCT_INT_CTRL,
403 read_aux_reg(ARC_REG_PCT_INT_CTRL) | (1 << idx)); 431 read_aux_reg(ARC_REG_PCT_INT_CTRL) | BIT(idx));
404 432
405 event = pmu_cpu->act_counter[idx]; 433 event = pmu_cpu->act_counter[idx];
406 hwc = &event->hw; 434 hwc = &event->hw;
@@ -414,7 +442,7 @@ static irqreturn_t arc_pmu_intr(int irq, void *dev)
414 arc_pmu_stop(event, 0); 442 arc_pmu_stop(event, 0);
415 } 443 }
416 444
417 active_ints &= ~(1U << idx); 445 active_ints &= ~BIT(idx);
418 } while (active_ints); 446 } while (active_ints);
419 447
420done: 448done:
@@ -441,19 +469,108 @@ static void arc_cpu_pmu_irq_init(void *data)
441 write_aux_reg(ARC_REG_PCT_INT_ACT, 0xffffffff); 469 write_aux_reg(ARC_REG_PCT_INT_ACT, 0xffffffff);
442} 470}
443 471
472/* Event field occupies the bottom 15 bits of our config field */
473PMU_FORMAT_ATTR(event, "config:0-14");
474static struct attribute *arc_pmu_format_attrs[] = {
475 &format_attr_event.attr,
476 NULL,
477};
478
479static struct attribute_group arc_pmu_format_attr_gr = {
480 .name = "format",
481 .attrs = arc_pmu_format_attrs,
482};
483
484static ssize_t arc_pmu_events_sysfs_show(struct device *dev,
485 struct device_attribute *attr,
486 char *page)
487{
488 struct perf_pmu_events_attr *pmu_attr;
489
490 pmu_attr = container_of(attr, struct perf_pmu_events_attr, attr);
491 return sprintf(page, "event=0x%04llx\n", pmu_attr->id);
492}
493
494/*
495 * We don't add attrs here as we don't have pre-defined list of perf events.
496 * We will generate and add attrs dynamically in probe() after we read HW
497 * configuration.
498 */
499static struct attribute_group arc_pmu_events_attr_gr = {
500 .name = "events",
501};
502
503static void arc_pmu_add_raw_event_attr(int j, char *str)
504{
505 memmove(arc_pmu->raw_entry[j].name, str, ARCPMU_EVENT_NAME_LEN - 1);
506 arc_pmu->attr[j].attr.attr.name = arc_pmu->raw_entry[j].name;
507 arc_pmu->attr[j].attr.attr.mode = VERIFY_OCTAL_PERMISSIONS(0444);
508 arc_pmu->attr[j].attr.show = arc_pmu_events_sysfs_show;
509 arc_pmu->attr[j].id = j;
510 arc_pmu->attrs[j] = &(arc_pmu->attr[j].attr.attr);
511}
512
513static int arc_pmu_raw_alloc(struct device *dev)
514{
515 arc_pmu->attr = devm_kmalloc_array(dev, arc_pmu->n_events + 1,
516 sizeof(*arc_pmu->attr), GFP_KERNEL | __GFP_ZERO);
517 if (!arc_pmu->attr)
518 return -ENOMEM;
519
520 arc_pmu->attrs = devm_kmalloc_array(dev, arc_pmu->n_events + 1,
521 sizeof(*arc_pmu->attrs), GFP_KERNEL | __GFP_ZERO);
522 if (!arc_pmu->attrs)
523 return -ENOMEM;
524
525 arc_pmu->raw_entry = devm_kmalloc_array(dev, arc_pmu->n_events,
526 sizeof(*arc_pmu->raw_entry), GFP_KERNEL | __GFP_ZERO);
527 if (!arc_pmu->raw_entry)
528 return -ENOMEM;
529
530 return 0;
531}
532
533static inline bool event_in_hw_event_map(int i, char *name)
534{
535 if (!arc_pmu_ev_hw_map[i])
536 return false;
537
538 if (!strlen(arc_pmu_ev_hw_map[i]))
539 return false;
540
541 if (strcmp(arc_pmu_ev_hw_map[i], name))
542 return false;
543
544 return true;
545}
546
547static void arc_pmu_map_hw_event(int j, char *str)
548{
549 int i;
550
551 /* See if HW condition has been mapped to a perf event_id */
552 for (i = 0; i < ARRAY_SIZE(arc_pmu_ev_hw_map); i++) {
553 if (event_in_hw_event_map(i, str)) {
554 pr_debug("mapping perf event %2d to h/w event \'%8s\' (idx %d)\n",
555 i, str, j);
556 arc_pmu->ev_hw_idx[i] = j;
557 }
558 }
559}
560
444static int arc_pmu_device_probe(struct platform_device *pdev) 561static int arc_pmu_device_probe(struct platform_device *pdev)
445{ 562{
446 struct arc_reg_pct_build pct_bcr; 563 struct arc_reg_pct_build pct_bcr;
447 struct arc_reg_cc_build cc_bcr; 564 struct arc_reg_cc_build cc_bcr;
448 int i, j, has_interrupts; 565 int i, has_interrupts;
449 int counter_size; /* in bits */ 566 int counter_size; /* in bits */
450 567
451 union cc_name { 568 union cc_name {
452 struct { 569 struct {
453 uint32_t word0, word1; 570 u32 word0, word1;
454 char sentinel; 571 char sentinel;
455 } indiv; 572 } indiv;
456 char str[9]; 573 char str[ARCPMU_EVENT_NAME_LEN];
457 } cc_name; 574 } cc_name;
458 575
459 576
@@ -463,15 +580,22 @@ static int arc_pmu_device_probe(struct platform_device *pdev)
463 return -ENODEV; 580 return -ENODEV;
464 } 581 }
465 BUILD_BUG_ON(ARC_PERF_MAX_COUNTERS > 32); 582 BUILD_BUG_ON(ARC_PERF_MAX_COUNTERS > 32);
466 BUG_ON(pct_bcr.c > ARC_PERF_MAX_COUNTERS); 583 if (WARN_ON(pct_bcr.c > ARC_PERF_MAX_COUNTERS))
584 return -EINVAL;
467 585
468 READ_BCR(ARC_REG_CC_BUILD, cc_bcr); 586 READ_BCR(ARC_REG_CC_BUILD, cc_bcr);
469 BUG_ON(!cc_bcr.v); /* Counters exist but No countable conditions ? */ 587 if (WARN(!cc_bcr.v, "Counters exist but No countable conditions?"))
588 return -EINVAL;
470 589
471 arc_pmu = devm_kzalloc(&pdev->dev, sizeof(struct arc_pmu), GFP_KERNEL); 590 arc_pmu = devm_kzalloc(&pdev->dev, sizeof(struct arc_pmu), GFP_KERNEL);
472 if (!arc_pmu) 591 if (!arc_pmu)
473 return -ENOMEM; 592 return -ENOMEM;
474 593
594 arc_pmu->n_events = cc_bcr.c;
595
596 if (arc_pmu_raw_alloc(&pdev->dev))
597 return -ENOMEM;
598
475 has_interrupts = is_isa_arcv2() ? pct_bcr.i : 0; 599 has_interrupts = is_isa_arcv2() ? pct_bcr.i : 0;
476 600
477 arc_pmu->n_counters = pct_bcr.c; 601 arc_pmu->n_counters = pct_bcr.c;
@@ -481,30 +605,26 @@ static int arc_pmu_device_probe(struct platform_device *pdev)
481 605
482 pr_info("ARC perf\t: %d counters (%d bits), %d conditions%s\n", 606 pr_info("ARC perf\t: %d counters (%d bits), %d conditions%s\n",
483 arc_pmu->n_counters, counter_size, cc_bcr.c, 607 arc_pmu->n_counters, counter_size, cc_bcr.c,
484 has_interrupts ? ", [overflow IRQ support]":""); 608 has_interrupts ? ", [overflow IRQ support]" : "");
485 609
486 cc_name.str[8] = 0; 610 cc_name.str[ARCPMU_EVENT_NAME_LEN - 1] = 0;
487 for (i = 0; i < PERF_COUNT_ARC_HW_MAX; i++) 611 for (i = 0; i < PERF_COUNT_ARC_HW_MAX; i++)
488 arc_pmu->ev_hw_idx[i] = -1; 612 arc_pmu->ev_hw_idx[i] = -1;
489 613
490 /* loop thru all available h/w condition indexes */ 614 /* loop thru all available h/w condition indexes */
491 for (j = 0; j < cc_bcr.c; j++) { 615 for (i = 0; i < cc_bcr.c; i++) {
492 write_aux_reg(ARC_REG_CC_INDEX, j); 616 write_aux_reg(ARC_REG_CC_INDEX, i);
493 cc_name.indiv.word0 = read_aux_reg(ARC_REG_CC_NAME0); 617 cc_name.indiv.word0 = read_aux_reg(ARC_REG_CC_NAME0);
494 cc_name.indiv.word1 = read_aux_reg(ARC_REG_CC_NAME1); 618 cc_name.indiv.word1 = read_aux_reg(ARC_REG_CC_NAME1);
495 619
496 /* See if it has been mapped to a perf event_id */ 620 arc_pmu_map_hw_event(i, cc_name.str);
497 for (i = 0; i < ARRAY_SIZE(arc_pmu_ev_hw_map); i++) { 621 arc_pmu_add_raw_event_attr(i, cc_name.str);
498 if (arc_pmu_ev_hw_map[i] &&
499 !strcmp(arc_pmu_ev_hw_map[i], cc_name.str) &&
500 strlen(arc_pmu_ev_hw_map[i])) {
501 pr_debug("mapping perf event %2d to h/w event \'%8s\' (idx %d)\n",
502 i, cc_name.str, j);
503 arc_pmu->ev_hw_idx[i] = j;
504 }
505 }
506 } 622 }
507 623
624 arc_pmu_events_attr_gr.attrs = arc_pmu->attrs;
625 arc_pmu->attr_groups[ARCPMU_ATTR_GR_EVENTS] = &arc_pmu_events_attr_gr;
626 arc_pmu->attr_groups[ARCPMU_ATTR_GR_FORMATS] = &arc_pmu_format_attr_gr;
627
508 arc_pmu->pmu = (struct pmu) { 628 arc_pmu->pmu = (struct pmu) {
509 .pmu_enable = arc_pmu_enable, 629 .pmu_enable = arc_pmu_enable,
510 .pmu_disable = arc_pmu_disable, 630 .pmu_disable = arc_pmu_disable,
@@ -514,6 +634,7 @@ static int arc_pmu_device_probe(struct platform_device *pdev)
514 .start = arc_pmu_start, 634 .start = arc_pmu_start,
515 .stop = arc_pmu_stop, 635 .stop = arc_pmu_stop,
516 .read = arc_pmu_read, 636 .read = arc_pmu_read,
637 .attr_groups = arc_pmu->attr_groups,
517 }; 638 };
518 639
519 if (has_interrupts) { 640 if (has_interrupts) {
@@ -535,17 +656,19 @@ static int arc_pmu_device_probe(struct platform_device *pdev)
535 } else 656 } else
536 arc_pmu->pmu.capabilities |= PERF_PMU_CAP_NO_INTERRUPT; 657 arc_pmu->pmu.capabilities |= PERF_PMU_CAP_NO_INTERRUPT;
537 658
538 return perf_pmu_register(&arc_pmu->pmu, pdev->name, PERF_TYPE_RAW); 659 /*
660 * perf parser doesn't really like '-' symbol in events name, so let's
661 * use '_' in arc pct name as it goes to kernel PMU event prefix.
662 */
663 return perf_pmu_register(&arc_pmu->pmu, "arc_pct", PERF_TYPE_RAW);
539} 664}
540 665
541#ifdef CONFIG_OF
542static const struct of_device_id arc_pmu_match[] = { 666static const struct of_device_id arc_pmu_match[] = {
543 { .compatible = "snps,arc700-pct" }, 667 { .compatible = "snps,arc700-pct" },
544 { .compatible = "snps,archs-pct" }, 668 { .compatible = "snps,archs-pct" },
545 {}, 669 {},
546}; 670};
547MODULE_DEVICE_TABLE(of, arc_pmu_match); 671MODULE_DEVICE_TABLE(of, arc_pmu_match);
548#endif
549 672
550static struct platform_driver arc_pmu_driver = { 673static struct platform_driver arc_pmu_driver = {
551 .driver = { 674 .driver = {
diff --git a/arch/arc/kernel/setup.c b/arch/arc/kernel/setup.c
index 2e018b8c2e19..7b2340996cf8 100644
--- a/arch/arc/kernel/setup.c
+++ b/arch/arc/kernel/setup.c
@@ -123,6 +123,7 @@ static void read_arc_build_cfg_regs(void)
123 struct cpuinfo_arc *cpu = &cpuinfo_arc700[smp_processor_id()]; 123 struct cpuinfo_arc *cpu = &cpuinfo_arc700[smp_processor_id()];
124 const struct id_to_str *tbl; 124 const struct id_to_str *tbl;
125 struct bcr_isa_arcv2 isa; 125 struct bcr_isa_arcv2 isa;
126 struct bcr_actionpoint ap;
126 127
127 FIX_PTR(cpu); 128 FIX_PTR(cpu);
128 129
@@ -195,20 +196,40 @@ static void read_arc_build_cfg_regs(void)
195 cpu->bpu.full = bpu.ft; 196 cpu->bpu.full = bpu.ft;
196 cpu->bpu.num_cache = 256 << bpu.bce; 197 cpu->bpu.num_cache = 256 << bpu.bce;
197 cpu->bpu.num_pred = 2048 << bpu.pte; 198 cpu->bpu.num_pred = 2048 << bpu.pte;
199 cpu->bpu.ret_stk = 4 << bpu.rse;
198 200
199 if (cpu->core.family >= 0x54) { 201 if (cpu->core.family >= 0x54) {
200 unsigned int exec_ctrl;
201 202
202 READ_BCR(AUX_EXEC_CTRL, exec_ctrl); 203 struct bcr_uarch_build_arcv2 uarch;
203 cpu->extn.dual_enb = !(exec_ctrl & 1);
204 204
205 /* dual issue always present for this core */ 205 /*
206 cpu->extn.dual = 1; 206 * The first 0x54 core (uarch maj:min 0:1 or 0:2) was
207 * dual issue only (HS4x). But next uarch rev (1:0)
208 * allows it be configured for single issue (HS3x)
209 * Ensure we fiddle with dual issue only on HS4x
210 */
211 READ_BCR(ARC_REG_MICRO_ARCH_BCR, uarch);
212
213 if (uarch.prod == 4) {
214 unsigned int exec_ctrl;
215
216 /* dual issue hardware always present */
217 cpu->extn.dual = 1;
218
219 READ_BCR(AUX_EXEC_CTRL, exec_ctrl);
220
221 /* dual issue hardware enabled ? */
222 cpu->extn.dual_enb = !(exec_ctrl & 1);
223
224 }
207 } 225 }
208 } 226 }
209 227
210 READ_BCR(ARC_REG_AP_BCR, bcr); 228 READ_BCR(ARC_REG_AP_BCR, ap);
211 cpu->extn.ap = bcr.ver ? 1 : 0; 229 if (ap.ver) {
230 cpu->extn.ap_num = 2 << ap.num;
231 cpu->extn.ap_full = !ap.min;
232 }
212 233
213 READ_BCR(ARC_REG_SMART_BCR, bcr); 234 READ_BCR(ARC_REG_SMART_BCR, bcr);
214 cpu->extn.smart = bcr.ver ? 1 : 0; 235 cpu->extn.smart = bcr.ver ? 1 : 0;
@@ -216,8 +237,6 @@ static void read_arc_build_cfg_regs(void)
216 READ_BCR(ARC_REG_RTT_BCR, bcr); 237 READ_BCR(ARC_REG_RTT_BCR, bcr);
217 cpu->extn.rtt = bcr.ver ? 1 : 0; 238 cpu->extn.rtt = bcr.ver ? 1 : 0;
218 239
219 cpu->extn.debug = cpu->extn.ap | cpu->extn.smart | cpu->extn.rtt;
220
221 READ_BCR(ARC_REG_ISA_CFG_BCR, isa); 240 READ_BCR(ARC_REG_ISA_CFG_BCR, isa);
222 241
223 /* some hacks for lack of feature BCR info in old ARC700 cores */ 242 /* some hacks for lack of feature BCR info in old ARC700 cores */
@@ -299,10 +318,10 @@ static char *arc_cpu_mumbojumbo(int cpu_id, char *buf, int len)
299 318
300 if (cpu->bpu.ver) 319 if (cpu->bpu.ver)
301 n += scnprintf(buf + n, len - n, 320 n += scnprintf(buf + n, len - n,
302 "BPU\t\t: %s%s match, cache:%d, Predict Table:%d", 321 "BPU\t\t: %s%s match, cache:%d, Predict Table:%d Return stk: %d",
303 IS_AVAIL1(cpu->bpu.full, "full"), 322 IS_AVAIL1(cpu->bpu.full, "full"),
304 IS_AVAIL1(!cpu->bpu.full, "partial"), 323 IS_AVAIL1(!cpu->bpu.full, "partial"),
305 cpu->bpu.num_cache, cpu->bpu.num_pred); 324 cpu->bpu.num_cache, cpu->bpu.num_pred, cpu->bpu.ret_stk);
306 325
307 if (is_isa_arcv2()) { 326 if (is_isa_arcv2()) {
308 struct bcr_lpb lpb; 327 struct bcr_lpb lpb;
@@ -336,11 +355,17 @@ static char *arc_extn_mumbojumbo(int cpu_id, char *buf, int len)
336 IS_AVAIL1(cpu->extn.fpu_sp, "SP "), 355 IS_AVAIL1(cpu->extn.fpu_sp, "SP "),
337 IS_AVAIL1(cpu->extn.fpu_dp, "DP ")); 356 IS_AVAIL1(cpu->extn.fpu_dp, "DP "));
338 357
339 if (cpu->extn.debug) 358 if (cpu->extn.ap_num | cpu->extn.smart | cpu->extn.rtt) {
340 n += scnprintf(buf + n, len - n, "DEBUG\t\t: %s%s%s\n", 359 n += scnprintf(buf + n, len - n, "DEBUG\t\t: %s%s",
341 IS_AVAIL1(cpu->extn.ap, "ActionPoint "),
342 IS_AVAIL1(cpu->extn.smart, "smaRT "), 360 IS_AVAIL1(cpu->extn.smart, "smaRT "),
343 IS_AVAIL1(cpu->extn.rtt, "RTT ")); 361 IS_AVAIL1(cpu->extn.rtt, "RTT "));
362 if (cpu->extn.ap_num) {
363 n += scnprintf(buf + n, len - n, "ActionPoint %d/%s",
364 cpu->extn.ap_num,
365 cpu->extn.ap_full ? "full":"min");
366 }
367 n += scnprintf(buf + n, len - n, "\n");
368 }
344 369
345 if (cpu->dccm.sz || cpu->iccm.sz) 370 if (cpu->dccm.sz || cpu->iccm.sz)
346 n += scnprintf(buf + n, len - n, "Extn [CCM]\t: DCCM @ %x, %d KB / ICCM: @ %x, %d KB\n", 371 n += scnprintf(buf + n, len - n, "Extn [CCM]\t: DCCM @ %x, %d KB / ICCM: @ %x, %d KB\n",
@@ -453,43 +478,78 @@ void setup_processor(void)
453 arc_chk_core_config(); 478 arc_chk_core_config();
454} 479}
455 480
456static inline int is_kernel(unsigned long addr) 481static inline bool uboot_arg_invalid(unsigned long addr)
457{ 482{
458 if (addr >= (unsigned long)_stext && addr <= (unsigned long)_end) 483 /*
459 return 1; 484 * Check that it is a untranslated address (although MMU is not enabled
460 return 0; 485 * yet, it being a high address ensures this is not by fluke)
486 */
487 if (addr < PAGE_OFFSET)
488 return true;
489
490 /* Check that address doesn't clobber resident kernel image */
491 return addr >= (unsigned long)_stext && addr <= (unsigned long)_end;
461} 492}
462 493
463void __init setup_arch(char **cmdline_p) 494#define IGNORE_ARGS "Ignore U-boot args: "
495
496/* uboot_tag values for U-boot - kernel ABI revision 0; see head.S */
497#define UBOOT_TAG_NONE 0
498#define UBOOT_TAG_CMDLINE 1
499#define UBOOT_TAG_DTB 2
500
501void __init handle_uboot_args(void)
464{ 502{
465#ifdef CONFIG_ARC_UBOOT_SUPPORT 503 bool use_embedded_dtb = true;
466 /* make sure that uboot passed pointer to cmdline/dtb is valid */ 504 bool append_cmdline = false;
467 if (uboot_tag && is_kernel((unsigned long)uboot_arg)) 505
468 panic("Invalid uboot arg\n"); 506 /* check that we know this tag */
469 507 if (uboot_tag != UBOOT_TAG_NONE &&
470 /* See if u-boot passed an external Device Tree blob */ 508 uboot_tag != UBOOT_TAG_CMDLINE &&
471 machine_desc = setup_machine_fdt(uboot_arg); /* uboot_tag == 2 */ 509 uboot_tag != UBOOT_TAG_DTB) {
472 if (!machine_desc) 510 pr_warn(IGNORE_ARGS "invalid uboot tag: '%08x'\n", uboot_tag);
473#endif 511 goto ignore_uboot_args;
474 { 512 }
475 /* No, so try the embedded one */ 513
514 if (uboot_tag != UBOOT_TAG_NONE &&
515 uboot_arg_invalid((unsigned long)uboot_arg)) {
516 pr_warn(IGNORE_ARGS "invalid uboot arg: '%px'\n", uboot_arg);
517 goto ignore_uboot_args;
518 }
519
520 /* see if U-boot passed an external Device Tree blob */
521 if (uboot_tag == UBOOT_TAG_DTB) {
522 machine_desc = setup_machine_fdt((void *)uboot_arg);
523
524 /* external Device Tree blob is invalid - use embedded one */
525 use_embedded_dtb = !machine_desc;
526 }
527
528 if (uboot_tag == UBOOT_TAG_CMDLINE)
529 append_cmdline = true;
530
531ignore_uboot_args:
532
533 if (use_embedded_dtb) {
476 machine_desc = setup_machine_fdt(__dtb_start); 534 machine_desc = setup_machine_fdt(__dtb_start);
477 if (!machine_desc) 535 if (!machine_desc)
478 panic("Embedded DT invalid\n"); 536 panic("Embedded DT invalid\n");
537 }
479 538
480 /* 539 /*
481 * If we are here, it is established that @uboot_arg didn't 540 * NOTE: @boot_command_line is populated by setup_machine_fdt() so this
482 * point to DT blob. Instead if u-boot says it is cmdline, 541 * append processing can only happen after.
483 * append to embedded DT cmdline. 542 */
484 * setup_machine_fdt() would have populated @boot_command_line 543 if (append_cmdline) {
485 */ 544 /* Ensure a whitespace between the 2 cmdlines */
486 if (uboot_tag == 1) { 545 strlcat(boot_command_line, " ", COMMAND_LINE_SIZE);
487 /* Ensure a whitespace between the 2 cmdlines */ 546 strlcat(boot_command_line, uboot_arg, COMMAND_LINE_SIZE);
488 strlcat(boot_command_line, " ", COMMAND_LINE_SIZE);
489 strlcat(boot_command_line, uboot_arg,
490 COMMAND_LINE_SIZE);
491 }
492 } 547 }
548}
549
550void __init setup_arch(char **cmdline_p)
551{
552 handle_uboot_args();
493 553
494 /* Save unparsed command line copy for /proc/cmdline */ 554 /* Save unparsed command line copy for /proc/cmdline */
495 *cmdline_p = boot_command_line; 555 *cmdline_p = boot_command_line;
diff --git a/arch/arc/kernel/troubleshoot.c b/arch/arc/kernel/troubleshoot.c
index e8d9fb452346..215f515442e0 100644
--- a/arch/arc/kernel/troubleshoot.c
+++ b/arch/arc/kernel/troubleshoot.c
@@ -18,6 +18,8 @@
18#include <asm/arcregs.h> 18#include <asm/arcregs.h>
19#include <asm/irqflags.h> 19#include <asm/irqflags.h>
20 20
21#define ARC_PATH_MAX 256
22
21/* 23/*
22 * Common routine to print scratch regs (r0-r12) or callee regs (r13-r25) 24 * Common routine to print scratch regs (r0-r12) or callee regs (r13-r25)
23 * -Prints 3 regs per line and a CR. 25 * -Prints 3 regs per line and a CR.
@@ -58,11 +60,12 @@ static void show_callee_regs(struct callee_regs *cregs)
58 print_reg_file(&(cregs->r13), 13); 60 print_reg_file(&(cregs->r13), 13);
59} 61}
60 62
61static void print_task_path_n_nm(struct task_struct *tsk, char *buf) 63static void print_task_path_n_nm(struct task_struct *tsk)
62{ 64{
63 char *path_nm = NULL; 65 char *path_nm = NULL;
64 struct mm_struct *mm; 66 struct mm_struct *mm;
65 struct file *exe_file; 67 struct file *exe_file;
68 char buf[ARC_PATH_MAX];
66 69
67 mm = get_task_mm(tsk); 70 mm = get_task_mm(tsk);
68 if (!mm) 71 if (!mm)
@@ -72,7 +75,7 @@ static void print_task_path_n_nm(struct task_struct *tsk, char *buf)
72 mmput(mm); 75 mmput(mm);
73 76
74 if (exe_file) { 77 if (exe_file) {
75 path_nm = file_path(exe_file, buf, 255); 78 path_nm = file_path(exe_file, buf, ARC_PATH_MAX-1);
76 fput(exe_file); 79 fput(exe_file);
77 } 80 }
78 81
@@ -80,10 +83,9 @@ done:
80 pr_info("Path: %s\n", !IS_ERR(path_nm) ? path_nm : "?"); 83 pr_info("Path: %s\n", !IS_ERR(path_nm) ? path_nm : "?");
81} 84}
82 85
83static void show_faulting_vma(unsigned long address, char *buf) 86static void show_faulting_vma(unsigned long address)
84{ 87{
85 struct vm_area_struct *vma; 88 struct vm_area_struct *vma;
86 char *nm = buf;
87 struct mm_struct *active_mm = current->active_mm; 89 struct mm_struct *active_mm = current->active_mm;
88 90
89 /* can't use print_vma_addr() yet as it doesn't check for 91 /* can't use print_vma_addr() yet as it doesn't check for
@@ -96,8 +98,11 @@ static void show_faulting_vma(unsigned long address, char *buf)
96 * if the container VMA is not found 98 * if the container VMA is not found
97 */ 99 */
98 if (vma && (vma->vm_start <= address)) { 100 if (vma && (vma->vm_start <= address)) {
101 char buf[ARC_PATH_MAX];
102 char *nm = "?";
103
99 if (vma->vm_file) { 104 if (vma->vm_file) {
100 nm = file_path(vma->vm_file, buf, PAGE_SIZE - 1); 105 nm = file_path(vma->vm_file, buf, ARC_PATH_MAX-1);
101 if (IS_ERR(nm)) 106 if (IS_ERR(nm))
102 nm = "?"; 107 nm = "?";
103 } 108 }
@@ -173,13 +178,14 @@ void show_regs(struct pt_regs *regs)
173{ 178{
174 struct task_struct *tsk = current; 179 struct task_struct *tsk = current;
175 struct callee_regs *cregs; 180 struct callee_regs *cregs;
176 char *buf;
177 181
178 buf = (char *)__get_free_page(GFP_KERNEL); 182 /*
179 if (!buf) 183 * generic code calls us with preemption disabled, but some calls
180 return; 184 * here could sleep, so re-enable to avoid lockdep splat
185 */
186 preempt_enable();
181 187
182 print_task_path_n_nm(tsk, buf); 188 print_task_path_n_nm(tsk);
183 show_regs_print_info(KERN_INFO); 189 show_regs_print_info(KERN_INFO);
184 190
185 show_ecr_verbose(regs); 191 show_ecr_verbose(regs);
@@ -189,7 +195,7 @@ void show_regs(struct pt_regs *regs)
189 (void *)regs->blink, (void *)regs->ret); 195 (void *)regs->blink, (void *)regs->ret);
190 196
191 if (user_mode(regs)) 197 if (user_mode(regs))
192 show_faulting_vma(regs->ret, buf); /* faulting code, not data */ 198 show_faulting_vma(regs->ret); /* faulting code, not data */
193 199
194 pr_info("[STAT32]: 0x%08lx", regs->status32); 200 pr_info("[STAT32]: 0x%08lx", regs->status32);
195 201
@@ -222,7 +228,7 @@ void show_regs(struct pt_regs *regs)
222 if (cregs) 228 if (cregs)
223 show_callee_regs(cregs); 229 show_callee_regs(cregs);
224 230
225 free_page((unsigned long)buf); 231 preempt_disable();
226} 232}
227 233
228void show_kernel_fault_diag(const char *str, struct pt_regs *regs, 234void show_kernel_fault_diag(const char *str, struct pt_regs *regs,