aboutsummaryrefslogtreecommitdiffstats
path: root/arch/mips/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'arch/mips/kernel')
-rw-r--r--arch/mips/kernel/Makefile8
-rw-r--r--arch/mips/kernel/asm-offsets.c4
-rw-r--r--arch/mips/kernel/crash.c71
-rw-r--r--arch/mips/kernel/crash_dump.c75
-rw-r--r--arch/mips/kernel/irq-rm9000.c106
-rw-r--r--arch/mips/kernel/machine_kexec.c33
-rw-r--r--arch/mips/kernel/mips-mt-fpaff.c4
-rw-r--r--arch/mips/kernel/mips_ksyms.c2
-rw-r--r--arch/mips/kernel/perf_event_mipsxx.c124
-rw-r--r--arch/mips/kernel/process.c4
-rw-r--r--arch/mips/kernel/relocate_kernel.S107
-rw-r--r--arch/mips/kernel/scall64-n32.S6
-rw-r--r--arch/mips/kernel/setup.c56
-rw-r--r--arch/mips/kernel/signal.c13
-rw-r--r--arch/mips/kernel/smp.c17
-rw-r--r--arch/mips/kernel/traps.c25
16 files changed, 516 insertions, 139 deletions
diff --git a/arch/mips/kernel/Makefile b/arch/mips/kernel/Makefile
index 8b28bc4e14ea..007c33d73715 100644
--- a/arch/mips/kernel/Makefile
+++ b/arch/mips/kernel/Makefile
@@ -16,7 +16,7 @@ CFLAGS_REMOVE_perf_event_mipsxx.o = -pg
16endif 16endif
17 17
18obj-$(CONFIG_CEVT_BCM1480) += cevt-bcm1480.o 18obj-$(CONFIG_CEVT_BCM1480) += cevt-bcm1480.o
19obj-$(CONFIG_CEVT_R4K_LIB) += cevt-r4k.o 19obj-$(CONFIG_CEVT_R4K) += cevt-r4k.o
20obj-$(CONFIG_MIPS_MT_SMTC) += cevt-smtc.o 20obj-$(CONFIG_MIPS_MT_SMTC) += cevt-smtc.o
21obj-$(CONFIG_CEVT_DS1287) += cevt-ds1287.o 21obj-$(CONFIG_CEVT_DS1287) += cevt-ds1287.o
22obj-$(CONFIG_CEVT_GT641XX) += cevt-gt641xx.o 22obj-$(CONFIG_CEVT_GT641XX) += cevt-gt641xx.o
@@ -25,7 +25,7 @@ obj-$(CONFIG_CEVT_TXX9) += cevt-txx9.o
25obj-$(CONFIG_CSRC_BCM1480) += csrc-bcm1480.o 25obj-$(CONFIG_CSRC_BCM1480) += csrc-bcm1480.o
26obj-$(CONFIG_CSRC_IOASIC) += csrc-ioasic.o 26obj-$(CONFIG_CSRC_IOASIC) += csrc-ioasic.o
27obj-$(CONFIG_CSRC_POWERTV) += csrc-powertv.o 27obj-$(CONFIG_CSRC_POWERTV) += csrc-powertv.o
28obj-$(CONFIG_CSRC_R4K_LIB) += csrc-r4k.o 28obj-$(CONFIG_CSRC_R4K) += csrc-r4k.o
29obj-$(CONFIG_CSRC_SB1250) += csrc-sb1250.o 29obj-$(CONFIG_CSRC_SB1250) += csrc-sb1250.o
30obj-$(CONFIG_SYNC_R4K) += sync-r4k.o 30obj-$(CONFIG_SYNC_R4K) += sync-r4k.o
31 31
@@ -58,7 +58,6 @@ obj-$(CONFIG_MIPS_VPE_APSP_API) += rtlx.o
58obj-$(CONFIG_I8259) += i8259.o 58obj-$(CONFIG_I8259) += i8259.o
59obj-$(CONFIG_IRQ_CPU) += irq_cpu.o 59obj-$(CONFIG_IRQ_CPU) += irq_cpu.o
60obj-$(CONFIG_IRQ_CPU_RM7K) += irq-rm7000.o 60obj-$(CONFIG_IRQ_CPU_RM7K) += irq-rm7000.o
61obj-$(CONFIG_IRQ_CPU_RM9K) += irq-rm9000.o
62obj-$(CONFIG_MIPS_MSC) += irq-msc01.o 61obj-$(CONFIG_MIPS_MSC) += irq-msc01.o
63obj-$(CONFIG_IRQ_TXX9) += irq_txx9.o 62obj-$(CONFIG_IRQ_TXX9) += irq_txx9.o
64obj-$(CONFIG_IRQ_GT641XX) += irq-gt641xx.o 63obj-$(CONFIG_IRQ_GT641XX) += irq-gt641xx.o
@@ -80,7 +79,8 @@ obj-$(CONFIG_I8253) += i8253.o
80 79
81obj-$(CONFIG_GPIO_TXX9) += gpio_txx9.o 80obj-$(CONFIG_GPIO_TXX9) += gpio_txx9.o
82 81
83obj-$(CONFIG_KEXEC) += machine_kexec.o relocate_kernel.o 82obj-$(CONFIG_KEXEC) += machine_kexec.o relocate_kernel.o crash.o
83obj-$(CONFIG_CRASH_DUMP) += crash_dump.o
84obj-$(CONFIG_EARLY_PRINTK) += early_printk.o 84obj-$(CONFIG_EARLY_PRINTK) += early_printk.o
85obj-$(CONFIG_SPINLOCK_TEST) += spinlock_test.o 85obj-$(CONFIG_SPINLOCK_TEST) += spinlock_test.o
86obj-$(CONFIG_MIPS_MACHINE) += mips_machine.o 86obj-$(CONFIG_MIPS_MACHINE) += mips_machine.o
diff --git a/arch/mips/kernel/asm-offsets.c b/arch/mips/kernel/asm-offsets.c
index 0c4bce4882a6..9690998d4ef3 100644
--- a/arch/mips/kernel/asm-offsets.c
+++ b/arch/mips/kernel/asm-offsets.c
@@ -125,10 +125,6 @@ void output_thread_defines(void)
125 thread.cp0_baduaddr); 125 thread.cp0_baduaddr);
126 OFFSET(THREAD_ECODE, task_struct, \ 126 OFFSET(THREAD_ECODE, task_struct, \
127 thread.error_code); 127 thread.error_code);
128 OFFSET(THREAD_TRAMP, task_struct, \
129 thread.irix_trampoline);
130 OFFSET(THREAD_OLDCTX, task_struct, \
131 thread.irix_oldctx);
132 BLANK(); 128 BLANK();
133} 129}
134 130
diff --git a/arch/mips/kernel/crash.c b/arch/mips/kernel/crash.c
new file mode 100644
index 000000000000..0f53c39324bb
--- /dev/null
+++ b/arch/mips/kernel/crash.c
@@ -0,0 +1,71 @@
1#include <linux/kernel.h>
2#include <linux/smp.h>
3#include <linux/reboot.h>
4#include <linux/kexec.h>
5#include <linux/bootmem.h>
6#include <linux/crash_dump.h>
7#include <linux/delay.h>
8#include <linux/init.h>
9#include <linux/irq.h>
10#include <linux/types.h>
11#include <linux/sched.h>
12
13/* This keeps a track of which one is crashing cpu. */
14static int crashing_cpu = -1;
15static cpumask_t cpus_in_crash = CPU_MASK_NONE;
16
17#ifdef CONFIG_SMP
18static void crash_shutdown_secondary(void *ignore)
19{
20 struct pt_regs *regs;
21 int cpu = smp_processor_id();
22
23 regs = task_pt_regs(current);
24
25 if (!cpu_online(cpu))
26 return;
27
28 local_irq_disable();
29 if (!cpu_isset(cpu, cpus_in_crash))
30 crash_save_cpu(regs, cpu);
31 cpu_set(cpu, cpus_in_crash);
32
33 while (!atomic_read(&kexec_ready_to_reboot))
34 cpu_relax();
35 relocated_kexec_smp_wait(NULL);
36 /* NOTREACHED */
37}
38
39static void crash_kexec_prepare_cpus(void)
40{
41 unsigned int msecs;
42
43 unsigned int ncpus = num_online_cpus() - 1;/* Excluding the panic cpu */
44
45 dump_send_ipi(crash_shutdown_secondary);
46 smp_wmb();
47
48 /*
49 * The crash CPU sends an IPI and wait for other CPUs to
50 * respond. Delay of at least 10 seconds.
51 */
52 pr_emerg("Sending IPI to other cpus...\n");
53 msecs = 10000;
54 while ((cpus_weight(cpus_in_crash) < ncpus) && (--msecs > 0)) {
55 cpu_relax();
56 mdelay(1);
57 }
58}
59
60#else /* !defined(CONFIG_SMP) */
61static void crash_kexec_prepare_cpus(void) {}
62#endif /* !defined(CONFIG_SMP) */
63
64void default_machine_crash_shutdown(struct pt_regs *regs)
65{
66 local_irq_disable();
67 crashing_cpu = smp_processor_id();
68 crash_save_cpu(regs, crashing_cpu);
69 crash_kexec_prepare_cpus();
70 cpu_set(crashing_cpu, cpus_in_crash);
71}
diff --git a/arch/mips/kernel/crash_dump.c b/arch/mips/kernel/crash_dump.c
new file mode 100644
index 000000000000..35bed0d2342c
--- /dev/null
+++ b/arch/mips/kernel/crash_dump.c
@@ -0,0 +1,75 @@
1#include <linux/highmem.h>
2#include <linux/bootmem.h>
3#include <linux/crash_dump.h>
4#include <asm/uaccess.h>
5
6static int __init parse_savemaxmem(char *p)
7{
8 if (p)
9 saved_max_pfn = (memparse(p, &p) >> PAGE_SHIFT) - 1;
10
11 return 1;
12}
13__setup("savemaxmem=", parse_savemaxmem);
14
15
16static void *kdump_buf_page;
17
18/**
19 * copy_oldmem_page - copy one page from "oldmem"
20 * @pfn: page frame number to be copied
21 * @buf: target memory address for the copy; this can be in kernel address
22 * space or user address space (see @userbuf)
23 * @csize: number of bytes to copy
24 * @offset: offset in bytes into the page (based on pfn) to begin the copy
25 * @userbuf: if set, @buf is in user address space, use copy_to_user(),
26 * otherwise @buf is in kernel address space, use memcpy().
27 *
28 * Copy a page from "oldmem". For this page, there is no pte mapped
29 * in the current kernel.
30 *
31 * Calling copy_to_user() in atomic context is not desirable. Hence first
32 * copying the data to a pre-allocated kernel page and then copying to user
33 * space in non-atomic context.
34 */
35ssize_t copy_oldmem_page(unsigned long pfn, char *buf,
36 size_t csize, unsigned long offset, int userbuf)
37{
38 void *vaddr;
39
40 if (!csize)
41 return 0;
42
43 vaddr = kmap_atomic_pfn(pfn);
44
45 if (!userbuf) {
46 memcpy(buf, (vaddr + offset), csize);
47 kunmap_atomic(vaddr);
48 } else {
49 if (!kdump_buf_page) {
50 pr_warning("Kdump: Kdump buffer page not allocated\n");
51
52 return -EFAULT;
53 }
54 copy_page(kdump_buf_page, vaddr);
55 kunmap_atomic(vaddr);
56 if (copy_to_user(buf, (kdump_buf_page + offset), csize))
57 return -EFAULT;
58 }
59
60 return csize;
61}
62
63static int __init kdump_buf_page_init(void)
64{
65 int ret = 0;
66
67 kdump_buf_page = kmalloc(PAGE_SIZE, GFP_KERNEL);
68 if (!kdump_buf_page) {
69 pr_warning("Kdump: Failed to allocate kdump buffer page\n");
70 ret = -ENOMEM;
71 }
72
73 return ret;
74}
75arch_initcall(kdump_buf_page_init);
diff --git a/arch/mips/kernel/irq-rm9000.c b/arch/mips/kernel/irq-rm9000.c
deleted file mode 100644
index 1282b9ae81c4..000000000000
--- a/arch/mips/kernel/irq-rm9000.c
+++ /dev/null
@@ -1,106 +0,0 @@
1/*
2 * Copyright (C) 2003 Ralf Baechle
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms of the GNU General Public License as published by the
6 * Free Software Foundation; either version 2 of the License, or (at your
7 * option) any later version.
8 *
9 * Handler for RM9000 extended interrupts. These are a non-standard
10 * feature so we handle them separately from standard interrupts.
11 */
12#include <linux/init.h>
13#include <linux/interrupt.h>
14#include <linux/irq.h>
15#include <linux/kernel.h>
16#include <linux/module.h>
17
18#include <asm/irq_cpu.h>
19#include <asm/mipsregs.h>
20
21static inline void unmask_rm9k_irq(struct irq_data *d)
22{
23 set_c0_intcontrol(0x1000 << (d->irq - RM9K_CPU_IRQ_BASE));
24}
25
26static inline void mask_rm9k_irq(struct irq_data *d)
27{
28 clear_c0_intcontrol(0x1000 << (d->irq - RM9K_CPU_IRQ_BASE));
29}
30
31static inline void rm9k_cpu_irq_enable(struct irq_data *d)
32{
33 unsigned long flags;
34
35 local_irq_save(flags);
36 unmask_rm9k_irq(d);
37 local_irq_restore(flags);
38}
39
40/*
41 * Performance counter interrupts are global on all processors.
42 */
43static void local_rm9k_perfcounter_irq_startup(void *args)
44{
45 rm9k_cpu_irq_enable(args);
46}
47
48static unsigned int rm9k_perfcounter_irq_startup(struct irq_data *d)
49{
50 on_each_cpu(local_rm9k_perfcounter_irq_startup, d, 1);
51
52 return 0;
53}
54
55static void local_rm9k_perfcounter_irq_shutdown(void *args)
56{
57 unsigned long flags;
58
59 local_irq_save(flags);
60 mask_rm9k_irq(args);
61 local_irq_restore(flags);
62}
63
64static void rm9k_perfcounter_irq_shutdown(struct irq_data *d)
65{
66 on_each_cpu(local_rm9k_perfcounter_irq_shutdown, d, 1);
67}
68
69static struct irq_chip rm9k_irq_controller = {
70 .name = "RM9000",
71 .irq_ack = mask_rm9k_irq,
72 .irq_mask = mask_rm9k_irq,
73 .irq_mask_ack = mask_rm9k_irq,
74 .irq_unmask = unmask_rm9k_irq,
75 .irq_eoi = unmask_rm9k_irq
76};
77
78static struct irq_chip rm9k_perfcounter_irq = {
79 .name = "RM9000",
80 .irq_startup = rm9k_perfcounter_irq_startup,
81 .irq_shutdown = rm9k_perfcounter_irq_shutdown,
82 .irq_ack = mask_rm9k_irq,
83 .irq_mask = mask_rm9k_irq,
84 .irq_mask_ack = mask_rm9k_irq,
85 .irq_unmask = unmask_rm9k_irq,
86};
87
88unsigned int rm9000_perfcount_irq;
89
90EXPORT_SYMBOL(rm9000_perfcount_irq);
91
92void __init rm9k_cpu_irq_init(void)
93{
94 int base = RM9K_CPU_IRQ_BASE;
95 int i;
96
97 clear_c0_intcontrol(0x0000f000); /* Mask all */
98
99 for (i = base; i < base + 4; i++)
100 irq_set_chip_and_handler(i, &rm9k_irq_controller,
101 handle_level_irq);
102
103 rm9000_perfcount_irq = base + 1;
104 irq_set_chip_and_handler(rm9000_perfcount_irq, &rm9k_perfcounter_irq,
105 handle_percpu_irq);
106}
diff --git a/arch/mips/kernel/machine_kexec.c b/arch/mips/kernel/machine_kexec.c
index 85beb9b0b2d0..992e18474da5 100644
--- a/arch/mips/kernel/machine_kexec.c
+++ b/arch/mips/kernel/machine_kexec.c
@@ -5,7 +5,7 @@
5 * This source code is licensed under the GNU General Public License, 5 * This source code is licensed under the GNU General Public License,
6 * Version 2. See the file COPYING for more details. 6 * Version 2. See the file COPYING for more details.
7 */ 7 */
8 8#include <linux/compiler.h>
9#include <linux/kexec.h> 9#include <linux/kexec.h>
10#include <linux/mm.h> 10#include <linux/mm.h>
11#include <linux/delay.h> 11#include <linux/delay.h>
@@ -19,9 +19,19 @@ extern const size_t relocate_new_kernel_size;
19extern unsigned long kexec_start_address; 19extern unsigned long kexec_start_address;
20extern unsigned long kexec_indirection_page; 20extern unsigned long kexec_indirection_page;
21 21
22int (*_machine_kexec_prepare)(struct kimage *) = NULL;
23void (*_machine_kexec_shutdown)(void) = NULL;
24void (*_machine_crash_shutdown)(struct pt_regs *regs) = NULL;
25#ifdef CONFIG_SMP
26void (*relocated_kexec_smp_wait) (void *);
27atomic_t kexec_ready_to_reboot = ATOMIC_INIT(0);
28#endif
29
22int 30int
23machine_kexec_prepare(struct kimage *kimage) 31machine_kexec_prepare(struct kimage *kimage)
24{ 32{
33 if (_machine_kexec_prepare)
34 return _machine_kexec_prepare(kimage);
25 return 0; 35 return 0;
26} 36}
27 37
@@ -33,14 +43,20 @@ machine_kexec_cleanup(struct kimage *kimage)
33void 43void
34machine_shutdown(void) 44machine_shutdown(void)
35{ 45{
46 if (_machine_kexec_shutdown)
47 _machine_kexec_shutdown();
36} 48}
37 49
38void 50void
39machine_crash_shutdown(struct pt_regs *regs) 51machine_crash_shutdown(struct pt_regs *regs)
40{ 52{
53 if (_machine_crash_shutdown)
54 _machine_crash_shutdown(regs);
55 else
56 default_machine_crash_shutdown(regs);
41} 57}
42 58
43typedef void (*noretfun_t)(void) __attribute__((noreturn)); 59typedef void (*noretfun_t)(void) __noreturn;
44 60
45void 61void
46machine_kexec(struct kimage *image) 62machine_kexec(struct kimage *image)
@@ -52,7 +68,9 @@ machine_kexec(struct kimage *image)
52 reboot_code_buffer = 68 reboot_code_buffer =
53 (unsigned long)page_address(image->control_code_page); 69 (unsigned long)page_address(image->control_code_page);
54 70
55 kexec_start_address = image->start; 71 kexec_start_address =
72 (unsigned long) phys_to_virt(image->start);
73
56 kexec_indirection_page = 74 kexec_indirection_page =
57 (unsigned long) phys_to_virt(image->head & PAGE_MASK); 75 (unsigned long) phys_to_virt(image->head & PAGE_MASK);
58 76
@@ -63,7 +81,7 @@ machine_kexec(struct kimage *image)
63 * The generic kexec code builds a page list with physical 81 * The generic kexec code builds a page list with physical
64 * addresses. they are directly accessible through KSEG0 (or 82 * addresses. they are directly accessible through KSEG0 (or
65 * CKSEG0 or XPHYS if on 64bit system), hence the 83 * CKSEG0 or XPHYS if on 64bit system), hence the
66 * pys_to_virt() call. 84 * phys_to_virt() call.
67 */ 85 */
68 for (ptr = &image->head; (entry = *ptr) && !(entry &IND_DONE); 86 for (ptr = &image->head; (entry = *ptr) && !(entry &IND_DONE);
69 ptr = (entry & IND_INDIRECTION) ? 87 ptr = (entry & IND_INDIRECTION) ?
@@ -81,5 +99,12 @@ machine_kexec(struct kimage *image)
81 printk("Will call new kernel at %08lx\n", image->start); 99 printk("Will call new kernel at %08lx\n", image->start);
82 printk("Bye ...\n"); 100 printk("Bye ...\n");
83 __flush_cache_all(); 101 __flush_cache_all();
102#ifdef CONFIG_SMP
103 /* All secondary cpus now may jump to kexec_wait cycle */
104 relocated_kexec_smp_wait = reboot_code_buffer +
105 (void *)(kexec_smp_wait - relocate_new_kernel);
106 smp_wmb();
107 atomic_set(&kexec_ready_to_reboot, 1);
108#endif
84 ((noretfun_t) reboot_code_buffer)(); 109 ((noretfun_t) reboot_code_buffer)();
85} 110}
diff --git a/arch/mips/kernel/mips-mt-fpaff.c b/arch/mips/kernel/mips-mt-fpaff.c
index 33f63bab478a..fd814e08c945 100644
--- a/arch/mips/kernel/mips-mt-fpaff.c
+++ b/arch/mips/kernel/mips-mt-fpaff.c
@@ -50,8 +50,8 @@ static bool check_same_owner(struct task_struct *p)
50 50
51 rcu_read_lock(); 51 rcu_read_lock();
52 pcred = __task_cred(p); 52 pcred = __task_cred(p);
53 match = (cred->euid == pcred->euid || 53 match = (uid_eq(cred->euid, pcred->euid) ||
54 cred->euid == pcred->uid); 54 uid_eq(cred->euid, pcred->uid));
55 rcu_read_unlock(); 55 rcu_read_unlock();
56 return match; 56 return match;
57} 57}
diff --git a/arch/mips/kernel/mips_ksyms.c b/arch/mips/kernel/mips_ksyms.c
index 2d9304c2b54c..df1e3e455f9a 100644
--- a/arch/mips/kernel/mips_ksyms.c
+++ b/arch/mips/kernel/mips_ksyms.c
@@ -11,7 +11,7 @@
11#include <linux/interrupt.h> 11#include <linux/interrupt.h>
12#include <linux/export.h> 12#include <linux/export.h>
13#include <asm/checksum.h> 13#include <asm/checksum.h>
14#include <asm/pgtable.h> 14#include <linux/mm.h>
15#include <asm/uaccess.h> 15#include <asm/uaccess.h>
16#include <asm/ftrace.h> 16#include <asm/ftrace.h>
17 17
diff --git a/arch/mips/kernel/perf_event_mipsxx.c b/arch/mips/kernel/perf_event_mipsxx.c
index a9b995dcf691..b14c14d90fc2 100644
--- a/arch/mips/kernel/perf_event_mipsxx.c
+++ b/arch/mips/kernel/perf_event_mipsxx.c
@@ -840,6 +840,16 @@ static const struct mips_perf_event bmips5000_event_map
840 [PERF_COUNT_HW_BRANCH_MISSES] = { 0x02, CNTR_ODD, T }, 840 [PERF_COUNT_HW_BRANCH_MISSES] = { 0x02, CNTR_ODD, T },
841}; 841};
842 842
843static const struct mips_perf_event xlp_event_map[PERF_COUNT_HW_MAX] = {
844 [PERF_COUNT_HW_CPU_CYCLES] = { 0x01, CNTR_ALL },
845 [PERF_COUNT_HW_INSTRUCTIONS] = { 0x18, CNTR_ALL }, /* PAPI_TOT_INS */
846 [PERF_COUNT_HW_CACHE_REFERENCES] = { 0x04, CNTR_ALL }, /* PAPI_L1_ICA */
847 [PERF_COUNT_HW_CACHE_MISSES] = { 0x07, CNTR_ALL }, /* PAPI_L1_ICM */
848 [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = { 0x1b, CNTR_ALL }, /* PAPI_BR_CN */
849 [PERF_COUNT_HW_BRANCH_MISSES] = { 0x1c, CNTR_ALL }, /* PAPI_BR_MSP */
850 [PERF_COUNT_HW_BUS_CYCLES] = { UNSUPPORTED_PERF_EVENT_ID },
851};
852
843/* 24K/34K/1004K cores can share the same cache event map. */ 853/* 24K/34K/1004K cores can share the same cache event map. */
844static const struct mips_perf_event mipsxxcore_cache_map 854static const struct mips_perf_event mipsxxcore_cache_map
845 [PERF_COUNT_HW_CACHE_MAX] 855 [PERF_COUNT_HW_CACHE_MAX]
@@ -1092,6 +1102,100 @@ static const struct mips_perf_event octeon_cache_map
1092}, 1102},
1093}; 1103};
1094 1104
1105static const struct mips_perf_event xlp_cache_map
1106 [PERF_COUNT_HW_CACHE_MAX]
1107 [PERF_COUNT_HW_CACHE_OP_MAX]
1108 [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
1109[C(L1D)] = {
1110 [C(OP_READ)] = {
1111 [C(RESULT_ACCESS)] = { 0x31, CNTR_ALL }, /* PAPI_L1_DCR */
1112 [C(RESULT_MISS)] = { 0x30, CNTR_ALL }, /* PAPI_L1_LDM */
1113 },
1114 [C(OP_WRITE)] = {
1115 [C(RESULT_ACCESS)] = { 0x2f, CNTR_ALL }, /* PAPI_L1_DCW */
1116 [C(RESULT_MISS)] = { 0x2e, CNTR_ALL }, /* PAPI_L1_STM */
1117 },
1118 [C(OP_PREFETCH)] = {
1119 [C(RESULT_ACCESS)] = { UNSUPPORTED_PERF_EVENT_ID },
1120 [C(RESULT_MISS)] = { UNSUPPORTED_PERF_EVENT_ID },
1121 },
1122},
1123[C(L1I)] = {
1124 [C(OP_READ)] = {
1125 [C(RESULT_ACCESS)] = { 0x04, CNTR_ALL }, /* PAPI_L1_ICA */
1126 [C(RESULT_MISS)] = { 0x07, CNTR_ALL }, /* PAPI_L1_ICM */
1127 },
1128 [C(OP_WRITE)] = {
1129 [C(RESULT_ACCESS)] = { UNSUPPORTED_PERF_EVENT_ID },
1130 [C(RESULT_MISS)] = { UNSUPPORTED_PERF_EVENT_ID },
1131 },
1132 [C(OP_PREFETCH)] = {
1133 [C(RESULT_ACCESS)] = { UNSUPPORTED_PERF_EVENT_ID },
1134 [C(RESULT_MISS)] = { UNSUPPORTED_PERF_EVENT_ID },
1135 },
1136},
1137[C(LL)] = {
1138 [C(OP_READ)] = {
1139 [C(RESULT_ACCESS)] = { 0x35, CNTR_ALL }, /* PAPI_L2_DCR */
1140 [C(RESULT_MISS)] = { 0x37, CNTR_ALL }, /* PAPI_L2_LDM */
1141 },
1142 [C(OP_WRITE)] = {
1143 [C(RESULT_ACCESS)] = { 0x34, CNTR_ALL }, /* PAPI_L2_DCA */
1144 [C(RESULT_MISS)] = { 0x36, CNTR_ALL }, /* PAPI_L2_DCM */
1145 },
1146 [C(OP_PREFETCH)] = {
1147 [C(RESULT_ACCESS)] = { UNSUPPORTED_PERF_EVENT_ID },
1148 [C(RESULT_MISS)] = { UNSUPPORTED_PERF_EVENT_ID },
1149 },
1150},
1151[C(DTLB)] = {
1152 /*
1153 * Only general DTLB misses are counted use the same event for
1154 * read and write.
1155 */
1156 [C(OP_READ)] = {
1157 [C(RESULT_ACCESS)] = { UNSUPPORTED_PERF_EVENT_ID },
1158 [C(RESULT_MISS)] = { 0x2d, CNTR_ALL }, /* PAPI_TLB_DM */
1159 },
1160 [C(OP_WRITE)] = {
1161 [C(RESULT_ACCESS)] = { UNSUPPORTED_PERF_EVENT_ID },
1162 [C(RESULT_MISS)] = { 0x2d, CNTR_ALL }, /* PAPI_TLB_DM */
1163 },
1164 [C(OP_PREFETCH)] = {
1165 [C(RESULT_ACCESS)] = { UNSUPPORTED_PERF_EVENT_ID },
1166 [C(RESULT_MISS)] = { UNSUPPORTED_PERF_EVENT_ID },
1167 },
1168},
1169[C(ITLB)] = {
1170 [C(OP_READ)] = {
1171 [C(RESULT_ACCESS)] = { UNSUPPORTED_PERF_EVENT_ID },
1172 [C(RESULT_MISS)] = { 0x08, CNTR_ALL }, /* PAPI_TLB_IM */
1173 },
1174 [C(OP_WRITE)] = {
1175 [C(RESULT_ACCESS)] = { UNSUPPORTED_PERF_EVENT_ID },
1176 [C(RESULT_MISS)] = { 0x08, CNTR_ALL }, /* PAPI_TLB_IM */
1177 },
1178 [C(OP_PREFETCH)] = {
1179 [C(RESULT_ACCESS)] = { UNSUPPORTED_PERF_EVENT_ID },
1180 [C(RESULT_MISS)] = { UNSUPPORTED_PERF_EVENT_ID },
1181 },
1182},
1183[C(BPU)] = {
1184 [C(OP_READ)] = {
1185 [C(RESULT_ACCESS)] = { UNSUPPORTED_PERF_EVENT_ID },
1186 [C(RESULT_MISS)] = { 0x25, CNTR_ALL },
1187 },
1188 [C(OP_WRITE)] = {
1189 [C(RESULT_ACCESS)] = { UNSUPPORTED_PERF_EVENT_ID },
1190 [C(RESULT_MISS)] = { UNSUPPORTED_PERF_EVENT_ID },
1191 },
1192 [C(OP_PREFETCH)] = {
1193 [C(RESULT_ACCESS)] = { UNSUPPORTED_PERF_EVENT_ID },
1194 [C(RESULT_MISS)] = { UNSUPPORTED_PERF_EVENT_ID },
1195 },
1196},
1197};
1198
1095#ifdef CONFIG_MIPS_MT_SMP 1199#ifdef CONFIG_MIPS_MT_SMP
1096static void check_and_calc_range(struct perf_event *event, 1200static void check_and_calc_range(struct perf_event *event,
1097 const struct mips_perf_event *pev) 1201 const struct mips_perf_event *pev)
@@ -1444,6 +1548,20 @@ static const struct mips_perf_event *octeon_pmu_map_raw_event(u64 config)
1444 return &raw_event; 1548 return &raw_event;
1445} 1549}
1446 1550
1551static const struct mips_perf_event *xlp_pmu_map_raw_event(u64 config)
1552{
1553 unsigned int raw_id = config & 0xff;
1554
1555 /* Only 1-63 are defined */
1556 if ((raw_id < 0x01) || (raw_id > 0x3f))
1557 return ERR_PTR(-EOPNOTSUPP);
1558
1559 raw_event.cntr_mask = CNTR_ALL;
1560 raw_event.event_id = raw_id;
1561
1562 return &raw_event;
1563}
1564
1447static int __init 1565static int __init
1448init_hw_perf_events(void) 1566init_hw_perf_events(void)
1449{ 1567{
@@ -1522,6 +1640,12 @@ init_hw_perf_events(void)
1522 mipspmu.general_event_map = &bmips5000_event_map; 1640 mipspmu.general_event_map = &bmips5000_event_map;
1523 mipspmu.cache_event_map = &bmips5000_cache_map; 1641 mipspmu.cache_event_map = &bmips5000_cache_map;
1524 break; 1642 break;
1643 case CPU_XLP:
1644 mipspmu.name = "xlp";
1645 mipspmu.general_event_map = &xlp_event_map;
1646 mipspmu.cache_event_map = &xlp_cache_map;
1647 mipspmu.map_raw_event = xlp_pmu_map_raw_event;
1648 break;
1525 default: 1649 default:
1526 pr_cont("Either hardware does not support performance " 1650 pr_cont("Either hardware does not support performance "
1527 "counters, or not yet implemented.\n"); 1651 "counters, or not yet implemented.\n");
diff --git a/arch/mips/kernel/process.c b/arch/mips/kernel/process.c
index 38097652d62d..a11c6f9fdd5e 100644
--- a/arch/mips/kernel/process.c
+++ b/arch/mips/kernel/process.c
@@ -72,9 +72,7 @@ void __noreturn cpu_idle(void)
72 } 72 }
73 } 73 }
74#ifdef CONFIG_HOTPLUG_CPU 74#ifdef CONFIG_HOTPLUG_CPU
75 if (!cpu_online(cpu) && !cpu_isset(cpu, cpu_callin_map) && 75 if (!cpu_online(cpu) && !cpu_isset(cpu, cpu_callin_map))
76 (system_state == SYSTEM_RUNNING ||
77 system_state == SYSTEM_BOOTING))
78 play_dead(); 76 play_dead();
79#endif 77#endif
80 rcu_idle_exit(); 78 rcu_idle_exit();
diff --git a/arch/mips/kernel/relocate_kernel.S b/arch/mips/kernel/relocate_kernel.S
index 87481f916a61..e4142c5f7c2b 100644
--- a/arch/mips/kernel/relocate_kernel.S
+++ b/arch/mips/kernel/relocate_kernel.S
@@ -15,6 +15,11 @@
15#include <asm/addrspace.h> 15#include <asm/addrspace.h>
16 16
17LEAF(relocate_new_kernel) 17LEAF(relocate_new_kernel)
18 PTR_L a0, arg0
19 PTR_L a1, arg1
20 PTR_L a2, arg2
21 PTR_L a3, arg3
22
18 PTR_L s0, kexec_indirection_page 23 PTR_L s0, kexec_indirection_page
19 PTR_L s1, kexec_start_address 24 PTR_L s1, kexec_start_address
20 25
@@ -26,7 +31,6 @@ process_entry:
26 and s3, s2, 0x1 31 and s3, s2, 0x1
27 beq s3, zero, 1f 32 beq s3, zero, 1f
28 and s4, s2, ~0x1 /* store destination addr in s4 */ 33 and s4, s2, ~0x1 /* store destination addr in s4 */
29 move a0, s4
30 b process_entry 34 b process_entry
31 35
321: 361:
@@ -60,10 +64,111 @@ copy_word:
60 b process_entry 64 b process_entry
61 65
62done: 66done:
67#ifdef CONFIG_SMP
68 /* kexec_flag reset is signal to other CPUs what kernel
69 was moved to it's location. Note - we need relocated address
70 of kexec_flag. */
71
72 bal 1f
73 1: move t1,ra;
74 PTR_LA t2,1b
75 PTR_LA t0,kexec_flag
76 PTR_SUB t0,t0,t2;
77 PTR_ADD t0,t1,t0;
78 LONG_S zero,(t0)
79#endif
80
81#ifdef CONFIG_CPU_CAVIUM_OCTEON
82 /* We need to flush I-cache before jumping to new kernel.
83 * Unfortunatelly, this code is cpu-specific.
84 */
85 .set push
86 .set noreorder
87 syncw
88 syncw
89 synci 0($0)
90 .set pop
91#else
92 sync
93#endif
63 /* jump to kexec_start_address */ 94 /* jump to kexec_start_address */
64 j s1 95 j s1
65 END(relocate_new_kernel) 96 END(relocate_new_kernel)
66 97
98#ifdef CONFIG_SMP
99/*
100 * Other CPUs should wait until code is relocated and
101 * then start at entry (?) point.
102 */
103LEAF(kexec_smp_wait)
104 PTR_L a0, s_arg0
105 PTR_L a1, s_arg1
106 PTR_L a2, s_arg2
107 PTR_L a3, s_arg3
108 PTR_L s1, kexec_start_address
109
110 /* Non-relocated address works for args and kexec_start_address ( old
111 * kernel is not overwritten). But we need relocated address of
112 * kexec_flag.
113 */
114
115 bal 1f
1161: move t1,ra;
117 PTR_LA t2,1b
118 PTR_LA t0,kexec_flag
119 PTR_SUB t0,t0,t2;
120 PTR_ADD t0,t1,t0;
121
1221: LONG_L s0, (t0)
123 bne s0, zero,1b
124
125#ifdef CONFIG_CPU_CAVIUM_OCTEON
126 .set push
127 .set noreorder
128 synci 0($0)
129 .set pop
130#else
131 sync
132#endif
133 j s1
134 END(kexec_smp_wait)
135#endif
136
137#ifdef __mips64
138 /* all PTR's must be aligned to 8 byte in 64-bit mode */
139 .align 3
140#endif
141
142/* All parameters to new kernel are passed in registers a0-a3.
143 * kexec_args[0..3] are uses to prepare register values.
144 */
145
146kexec_args:
147 EXPORT(kexec_args)
148arg0: PTR 0x0
149arg1: PTR 0x0
150arg2: PTR 0x0
151arg3: PTR 0x0
152 .size kexec_args,PTRSIZE*4
153
154#ifdef CONFIG_SMP
155/*
156 * Secondary CPUs may have different kernel parameters in
157 * their registers a0-a3. secondary_kexec_args[0..3] are used
158 * to prepare register values.
159 */
160secondary_kexec_args:
161 EXPORT(secondary_kexec_args)
162s_arg0: PTR 0x0
163s_arg1: PTR 0x0
164s_arg2: PTR 0x0
165s_arg3: PTR 0x0
166 .size secondary_kexec_args,PTRSIZE*4
167kexec_flag:
168 LONG 0x1
169
170#endif
171
67kexec_start_address: 172kexec_start_address:
68 EXPORT(kexec_start_address) 173 EXPORT(kexec_start_address)
69 PTR 0x0 174 PTR 0x0
diff --git a/arch/mips/kernel/scall64-n32.S b/arch/mips/kernel/scall64-n32.S
index 629719143763..ad3de9668da9 100644
--- a/arch/mips/kernel/scall64-n32.S
+++ b/arch/mips/kernel/scall64-n32.S
@@ -17,12 +17,6 @@
17#include <asm/thread_info.h> 17#include <asm/thread_info.h>
18#include <asm/unistd.h> 18#include <asm/unistd.h>
19 19
20/* This duplicates the definition from <linux/sched.h> */
21#define PT_TRACESYS 0x00000002 /* tracing system calls */
22
23/* This duplicates the definition from <asm/signal.h> */
24#define SIGILL 4 /* Illegal instruction (ANSI). */
25
26#ifndef CONFIG_MIPS32_O32 20#ifndef CONFIG_MIPS32_O32
27/* No O32, so define handle_sys here */ 21/* No O32, so define handle_sys here */
28#define handle_sysn32 handle_sys 22#define handle_sysn32 handle_sys
diff --git a/arch/mips/kernel/setup.c b/arch/mips/kernel/setup.c
index 290dc6a1d7a3..8c41187801ce 100644
--- a/arch/mips/kernel/setup.c
+++ b/arch/mips/kernel/setup.c
@@ -22,6 +22,7 @@
22#include <linux/console.h> 22#include <linux/console.h>
23#include <linux/pfn.h> 23#include <linux/pfn.h>
24#include <linux/debugfs.h> 24#include <linux/debugfs.h>
25#include <linux/kexec.h>
25 26
26#include <asm/addrspace.h> 27#include <asm/addrspace.h>
27#include <asm/bootinfo.h> 28#include <asm/bootinfo.h>
@@ -536,12 +537,64 @@ static void __init arch_mem_init(char **cmdline_p)
536 } 537 }
537 538
538 bootmem_init(); 539 bootmem_init();
540#ifdef CONFIG_KEXEC
541 if (crashk_res.start != crashk_res.end)
542 reserve_bootmem(crashk_res.start,
543 crashk_res.end - crashk_res.start + 1,
544 BOOTMEM_DEFAULT);
545#endif
539 device_tree_init(); 546 device_tree_init();
540 sparse_init(); 547 sparse_init();
541 plat_swiotlb_setup(); 548 plat_swiotlb_setup();
542 paging_init(); 549 paging_init();
543} 550}
544 551
552#ifdef CONFIG_KEXEC
553static inline unsigned long long get_total_mem(void)
554{
555 unsigned long long total;
556
557 total = max_pfn - min_low_pfn;
558 return total << PAGE_SHIFT;
559}
560
561static void __init mips_parse_crashkernel(void)
562{
563 unsigned long long total_mem;
564 unsigned long long crash_size, crash_base;
565 int ret;
566
567 total_mem = get_total_mem();
568 ret = parse_crashkernel(boot_command_line, total_mem,
569 &crash_size, &crash_base);
570 if (ret != 0 || crash_size <= 0)
571 return;
572
573 crashk_res.start = crash_base;
574 crashk_res.end = crash_base + crash_size - 1;
575}
576
577static void __init request_crashkernel(struct resource *res)
578{
579 int ret;
580
581 ret = request_resource(res, &crashk_res);
582 if (!ret)
583 pr_info("Reserving %ldMB of memory at %ldMB for crashkernel\n",
584 (unsigned long)((crashk_res.end -
585 crashk_res.start + 1) >> 20),
586 (unsigned long)(crashk_res.start >> 20));
587}
588#else /* !defined(CONFIG_KEXEC) */
589static void __init mips_parse_crashkernel(void)
590{
591}
592
593static void __init request_crashkernel(struct resource *res)
594{
595}
596#endif /* !defined(CONFIG_KEXEC) */
597
545static void __init resource_init(void) 598static void __init resource_init(void)
546{ 599{
547 int i; 600 int i;
@@ -557,6 +610,8 @@ static void __init resource_init(void)
557 /* 610 /*
558 * Request address space for all standard RAM. 611 * Request address space for all standard RAM.
559 */ 612 */
613 mips_parse_crashkernel();
614
560 for (i = 0; i < boot_mem_map.nr_map; i++) { 615 for (i = 0; i < boot_mem_map.nr_map; i++) {
561 struct resource *res; 616 struct resource *res;
562 unsigned long start, end; 617 unsigned long start, end;
@@ -593,6 +648,7 @@ static void __init resource_init(void)
593 */ 648 */
594 request_resource(res, &code_resource); 649 request_resource(res, &code_resource);
595 request_resource(res, &data_resource); 650 request_resource(res, &data_resource);
651 request_crashkernel(res);
596 } 652 }
597} 653}
598 654
diff --git a/arch/mips/kernel/signal.c b/arch/mips/kernel/signal.c
index 0e1a5b8ae817..b6aa77035019 100644
--- a/arch/mips/kernel/signal.c
+++ b/arch/mips/kernel/signal.c
@@ -568,17 +568,20 @@ static void do_signal(struct pt_regs *regs)
568 } 568 }
569 569
570 if (regs->regs[0]) { 570 if (regs->regs[0]) {
571 if (regs->regs[2] == ERESTARTNOHAND || 571 switch (regs->regs[2]) {
572 regs->regs[2] == ERESTARTSYS || 572 case ERESTARTNOHAND:
573 regs->regs[2] == ERESTARTNOINTR) { 573 case ERESTARTSYS:
574 case ERESTARTNOINTR:
574 regs->regs[2] = regs->regs[0]; 575 regs->regs[2] = regs->regs[0];
575 regs->regs[7] = regs->regs[26]; 576 regs->regs[7] = regs->regs[26];
576 regs->cp0_epc -= 4; 577 regs->cp0_epc -= 4;
577 } 578 break;
578 if (regs->regs[2] == ERESTART_RESTARTBLOCK) { 579
580 case ERESTART_RESTARTBLOCK:
579 regs->regs[2] = current->thread.abi->restart; 581 regs->regs[2] = current->thread.abi->restart;
580 regs->regs[7] = regs->regs[26]; 582 regs->regs[7] = regs->regs[26];
581 regs->cp0_epc -= 4; 583 regs->cp0_epc -= 4;
584 break;
582 } 585 }
583 regs->regs[0] = 0; /* Don't deal with this again. */ 586 regs->regs[0] = 0; /* Don't deal with this again. */
584 } 587 }
diff --git a/arch/mips/kernel/smp.c b/arch/mips/kernel/smp.c
index 9005bf9fb859..2e6374a589ec 100644
--- a/arch/mips/kernel/smp.c
+++ b/arch/mips/kernel/smp.c
@@ -386,3 +386,20 @@ void flush_tlb_one(unsigned long vaddr)
386 386
387EXPORT_SYMBOL(flush_tlb_page); 387EXPORT_SYMBOL(flush_tlb_page);
388EXPORT_SYMBOL(flush_tlb_one); 388EXPORT_SYMBOL(flush_tlb_one);
389
390#if defined(CONFIG_KEXEC)
391void (*dump_ipi_function_ptr)(void *) = NULL;
392void dump_send_ipi(void (*dump_ipi_callback)(void *))
393{
394 int i;
395 int cpu = smp_processor_id();
396
397 dump_ipi_function_ptr = dump_ipi_callback;
398 smp_mb();
399 for_each_online_cpu(i)
400 if (i != cpu)
401 mp_ops->send_ipi_single(i, SMP_DUMP);
402
403}
404EXPORT_SYMBOL(dump_send_ipi);
405#endif
diff --git a/arch/mips/kernel/traps.c b/arch/mips/kernel/traps.c
index 9be3df1fa8a4..cf7ac5483f53 100644
--- a/arch/mips/kernel/traps.c
+++ b/arch/mips/kernel/traps.c
@@ -13,6 +13,7 @@
13 */ 13 */
14#include <linux/bug.h> 14#include <linux/bug.h>
15#include <linux/compiler.h> 15#include <linux/compiler.h>
16#include <linux/kexec.h>
16#include <linux/init.h> 17#include <linux/init.h>
17#include <linux/kernel.h> 18#include <linux/kernel.h>
18#include <linux/module.h> 19#include <linux/module.h>
@@ -409,6 +410,9 @@ void __noreturn die(const char *str, struct pt_regs *regs)
409 panic("Fatal exception"); 410 panic("Fatal exception");
410 } 411 }
411 412
413 if (regs && kexec_should_crash(current))
414 crash_kexec(regs);
415
412 do_exit(sig); 416 do_exit(sig);
413} 417}
414 418
@@ -1021,6 +1025,24 @@ asmlinkage void do_cpu(struct pt_regs *regs)
1021 1025
1022 return; 1026 return;
1023 1027
1028 case 3:
1029 /*
1030 * Old (MIPS I and MIPS II) processors will set this code
1031 * for COP1X opcode instructions that replaced the original
1032 * COP3 space. We don't limit COP1 space instructions in
1033 * the emulator according to the CPU ISA, so we want to
1034 * treat COP1X instructions consistently regardless of which
1035 * code the CPU chose. Therefore we redirect this trap to
1036 * the FP emulator too.
1037 *
1038 * Then some newer FPU-less processors use this code
1039 * erroneously too, so they are covered by this choice
1040 * as well.
1041 */
1042 if (raw_cpu_has_fpu)
1043 break;
1044 /* Fall through. */
1045
1024 case 1: 1046 case 1:
1025 if (used_math()) /* Using the FPU again. */ 1047 if (used_math()) /* Using the FPU again. */
1026 own_fpu(1); 1048 own_fpu(1);
@@ -1044,9 +1066,6 @@ asmlinkage void do_cpu(struct pt_regs *regs)
1044 case 2: 1066 case 2:
1045 raw_notifier_call_chain(&cu2_chain, CU2_EXCEPTION, regs); 1067 raw_notifier_call_chain(&cu2_chain, CU2_EXCEPTION, regs);
1046 return; 1068 return;
1047
1048 case 3:
1049 break;
1050 } 1069 }
1051 1070
1052 force_sig(SIGILL, current); 1071 force_sig(SIGILL, current);