diff options
Diffstat (limited to 'arch/mips/kernel')
| -rw-r--r-- | arch/mips/kernel/Makefile | 8 | ||||
| -rw-r--r-- | arch/mips/kernel/asm-offsets.c | 4 | ||||
| -rw-r--r-- | arch/mips/kernel/cpu-probe.c | 1 | ||||
| -rw-r--r-- | arch/mips/kernel/crash.c | 71 | ||||
| -rw-r--r-- | arch/mips/kernel/crash_dump.c | 75 | ||||
| -rw-r--r-- | arch/mips/kernel/entry.S | 13 | ||||
| -rw-r--r-- | arch/mips/kernel/irq-rm9000.c | 106 | ||||
| -rw-r--r-- | arch/mips/kernel/linux32.c | 23 | ||||
| -rw-r--r-- | arch/mips/kernel/machine_kexec.c | 33 | ||||
| -rw-r--r-- | arch/mips/kernel/mips-mt-fpaff.c | 4 | ||||
| -rw-r--r-- | arch/mips/kernel/mips_ksyms.c | 4 | ||||
| -rw-r--r-- | arch/mips/kernel/perf_event_mipsxx.c | 124 | ||||
| -rw-r--r-- | arch/mips/kernel/process.c | 68 | ||||
| -rw-r--r-- | arch/mips/kernel/relocate_kernel.S | 107 | ||||
| -rw-r--r-- | arch/mips/kernel/scall64-n32.S | 14 | ||||
| -rw-r--r-- | arch/mips/kernel/scall64-o32.S | 2 | ||||
| -rw-r--r-- | arch/mips/kernel/setup.c | 82 | ||||
| -rw-r--r-- | arch/mips/kernel/signal.c | 13 | ||||
| -rw-r--r-- | arch/mips/kernel/smp-cmp.c | 2 | ||||
| -rw-r--r-- | arch/mips/kernel/smp.c | 17 | ||||
| -rw-r--r-- | arch/mips/kernel/syscall.c | 57 | ||||
| -rw-r--r-- | arch/mips/kernel/traps.c | 25 |
22 files changed, 580 insertions, 273 deletions
diff --git a/arch/mips/kernel/Makefile b/arch/mips/kernel/Makefile index 8b28bc4e14ea..007c33d73715 100644 --- a/arch/mips/kernel/Makefile +++ b/arch/mips/kernel/Makefile | |||
| @@ -16,7 +16,7 @@ CFLAGS_REMOVE_perf_event_mipsxx.o = -pg | |||
| 16 | endif | 16 | endif |
| 17 | 17 | ||
| 18 | obj-$(CONFIG_CEVT_BCM1480) += cevt-bcm1480.o | 18 | obj-$(CONFIG_CEVT_BCM1480) += cevt-bcm1480.o |
| 19 | obj-$(CONFIG_CEVT_R4K_LIB) += cevt-r4k.o | 19 | obj-$(CONFIG_CEVT_R4K) += cevt-r4k.o |
| 20 | obj-$(CONFIG_MIPS_MT_SMTC) += cevt-smtc.o | 20 | obj-$(CONFIG_MIPS_MT_SMTC) += cevt-smtc.o |
| 21 | obj-$(CONFIG_CEVT_DS1287) += cevt-ds1287.o | 21 | obj-$(CONFIG_CEVT_DS1287) += cevt-ds1287.o |
| 22 | obj-$(CONFIG_CEVT_GT641XX) += cevt-gt641xx.o | 22 | obj-$(CONFIG_CEVT_GT641XX) += cevt-gt641xx.o |
| @@ -25,7 +25,7 @@ obj-$(CONFIG_CEVT_TXX9) += cevt-txx9.o | |||
| 25 | obj-$(CONFIG_CSRC_BCM1480) += csrc-bcm1480.o | 25 | obj-$(CONFIG_CSRC_BCM1480) += csrc-bcm1480.o |
| 26 | obj-$(CONFIG_CSRC_IOASIC) += csrc-ioasic.o | 26 | obj-$(CONFIG_CSRC_IOASIC) += csrc-ioasic.o |
| 27 | obj-$(CONFIG_CSRC_POWERTV) += csrc-powertv.o | 27 | obj-$(CONFIG_CSRC_POWERTV) += csrc-powertv.o |
| 28 | obj-$(CONFIG_CSRC_R4K_LIB) += csrc-r4k.o | 28 | obj-$(CONFIG_CSRC_R4K) += csrc-r4k.o |
| 29 | obj-$(CONFIG_CSRC_SB1250) += csrc-sb1250.o | 29 | obj-$(CONFIG_CSRC_SB1250) += csrc-sb1250.o |
| 30 | obj-$(CONFIG_SYNC_R4K) += sync-r4k.o | 30 | obj-$(CONFIG_SYNC_R4K) += sync-r4k.o |
| 31 | 31 | ||
| @@ -58,7 +58,6 @@ obj-$(CONFIG_MIPS_VPE_APSP_API) += rtlx.o | |||
| 58 | obj-$(CONFIG_I8259) += i8259.o | 58 | obj-$(CONFIG_I8259) += i8259.o |
| 59 | obj-$(CONFIG_IRQ_CPU) += irq_cpu.o | 59 | obj-$(CONFIG_IRQ_CPU) += irq_cpu.o |
| 60 | obj-$(CONFIG_IRQ_CPU_RM7K) += irq-rm7000.o | 60 | obj-$(CONFIG_IRQ_CPU_RM7K) += irq-rm7000.o |
| 61 | obj-$(CONFIG_IRQ_CPU_RM9K) += irq-rm9000.o | ||
| 62 | obj-$(CONFIG_MIPS_MSC) += irq-msc01.o | 61 | obj-$(CONFIG_MIPS_MSC) += irq-msc01.o |
| 63 | obj-$(CONFIG_IRQ_TXX9) += irq_txx9.o | 62 | obj-$(CONFIG_IRQ_TXX9) += irq_txx9.o |
| 64 | obj-$(CONFIG_IRQ_GT641XX) += irq-gt641xx.o | 63 | obj-$(CONFIG_IRQ_GT641XX) += irq-gt641xx.o |
| @@ -80,7 +79,8 @@ obj-$(CONFIG_I8253) += i8253.o | |||
| 80 | 79 | ||
| 81 | obj-$(CONFIG_GPIO_TXX9) += gpio_txx9.o | 80 | obj-$(CONFIG_GPIO_TXX9) += gpio_txx9.o |
| 82 | 81 | ||
| 83 | obj-$(CONFIG_KEXEC) += machine_kexec.o relocate_kernel.o | 82 | obj-$(CONFIG_KEXEC) += machine_kexec.o relocate_kernel.o crash.o |
| 83 | obj-$(CONFIG_CRASH_DUMP) += crash_dump.o | ||
| 84 | obj-$(CONFIG_EARLY_PRINTK) += early_printk.o | 84 | obj-$(CONFIG_EARLY_PRINTK) += early_printk.o |
| 85 | obj-$(CONFIG_SPINLOCK_TEST) += spinlock_test.o | 85 | obj-$(CONFIG_SPINLOCK_TEST) += spinlock_test.o |
| 86 | obj-$(CONFIG_MIPS_MACHINE) += mips_machine.o | 86 | obj-$(CONFIG_MIPS_MACHINE) += mips_machine.o |
diff --git a/arch/mips/kernel/asm-offsets.c b/arch/mips/kernel/asm-offsets.c index 0c4bce4882a6..9690998d4ef3 100644 --- a/arch/mips/kernel/asm-offsets.c +++ b/arch/mips/kernel/asm-offsets.c | |||
| @@ -125,10 +125,6 @@ void output_thread_defines(void) | |||
| 125 | thread.cp0_baduaddr); | 125 | thread.cp0_baduaddr); |
| 126 | OFFSET(THREAD_ECODE, task_struct, \ | 126 | OFFSET(THREAD_ECODE, task_struct, \ |
| 127 | thread.error_code); | 127 | thread.error_code); |
| 128 | OFFSET(THREAD_TRAMP, task_struct, \ | ||
| 129 | thread.irix_trampoline); | ||
| 130 | OFFSET(THREAD_OLDCTX, task_struct, \ | ||
| 131 | thread.irix_oldctx); | ||
| 132 | BLANK(); | 128 | BLANK(); |
| 133 | } | 129 | } |
| 134 | 130 | ||
diff --git a/arch/mips/kernel/cpu-probe.c b/arch/mips/kernel/cpu-probe.c index b1fb7af3c350..cce3782c96c9 100644 --- a/arch/mips/kernel/cpu-probe.c +++ b/arch/mips/kernel/cpu-probe.c | |||
| @@ -510,7 +510,6 @@ static inline void cpu_probe_legacy(struct cpuinfo_mips *c, unsigned int cpu) | |||
| 510 | c->cputype = CPU_R3000A; | 510 | c->cputype = CPU_R3000A; |
| 511 | __cpu_name[cpu] = "R3000A"; | 511 | __cpu_name[cpu] = "R3000A"; |
| 512 | } | 512 | } |
| 513 | break; | ||
| 514 | } else { | 513 | } else { |
| 515 | c->cputype = CPU_R3000; | 514 | c->cputype = CPU_R3000; |
| 516 | __cpu_name[cpu] = "R3000"; | 515 | __cpu_name[cpu] = "R3000"; |
diff --git a/arch/mips/kernel/crash.c b/arch/mips/kernel/crash.c new file mode 100644 index 000000000000..0f53c39324bb --- /dev/null +++ b/arch/mips/kernel/crash.c | |||
| @@ -0,0 +1,71 @@ | |||
| 1 | #include <linux/kernel.h> | ||
| 2 | #include <linux/smp.h> | ||
| 3 | #include <linux/reboot.h> | ||
| 4 | #include <linux/kexec.h> | ||
| 5 | #include <linux/bootmem.h> | ||
| 6 | #include <linux/crash_dump.h> | ||
| 7 | #include <linux/delay.h> | ||
| 8 | #include <linux/init.h> | ||
| 9 | #include <linux/irq.h> | ||
| 10 | #include <linux/types.h> | ||
| 11 | #include <linux/sched.h> | ||
| 12 | |||
| 13 | /* This keeps a track of which one is crashing cpu. */ | ||
| 14 | static int crashing_cpu = -1; | ||
| 15 | static cpumask_t cpus_in_crash = CPU_MASK_NONE; | ||
| 16 | |||
| 17 | #ifdef CONFIG_SMP | ||
| 18 | static void crash_shutdown_secondary(void *ignore) | ||
| 19 | { | ||
| 20 | struct pt_regs *regs; | ||
| 21 | int cpu = smp_processor_id(); | ||
| 22 | |||
| 23 | regs = task_pt_regs(current); | ||
| 24 | |||
| 25 | if (!cpu_online(cpu)) | ||
| 26 | return; | ||
| 27 | |||
| 28 | local_irq_disable(); | ||
| 29 | if (!cpu_isset(cpu, cpus_in_crash)) | ||
| 30 | crash_save_cpu(regs, cpu); | ||
| 31 | cpu_set(cpu, cpus_in_crash); | ||
| 32 | |||
| 33 | while (!atomic_read(&kexec_ready_to_reboot)) | ||
| 34 | cpu_relax(); | ||
| 35 | relocated_kexec_smp_wait(NULL); | ||
| 36 | /* NOTREACHED */ | ||
| 37 | } | ||
| 38 | |||
| 39 | static void crash_kexec_prepare_cpus(void) | ||
| 40 | { | ||
| 41 | unsigned int msecs; | ||
| 42 | |||
| 43 | unsigned int ncpus = num_online_cpus() - 1;/* Excluding the panic cpu */ | ||
| 44 | |||
| 45 | dump_send_ipi(crash_shutdown_secondary); | ||
| 46 | smp_wmb(); | ||
| 47 | |||
| 48 | /* | ||
| 49 | * The crash CPU sends an IPI and wait for other CPUs to | ||
| 50 | * respond. Delay of at least 10 seconds. | ||
| 51 | */ | ||
| 52 | pr_emerg("Sending IPI to other cpus...\n"); | ||
| 53 | msecs = 10000; | ||
| 54 | while ((cpus_weight(cpus_in_crash) < ncpus) && (--msecs > 0)) { | ||
| 55 | cpu_relax(); | ||
| 56 | mdelay(1); | ||
| 57 | } | ||
| 58 | } | ||
| 59 | |||
| 60 | #else /* !defined(CONFIG_SMP) */ | ||
| 61 | static void crash_kexec_prepare_cpus(void) {} | ||
| 62 | #endif /* !defined(CONFIG_SMP) */ | ||
| 63 | |||
| 64 | void default_machine_crash_shutdown(struct pt_regs *regs) | ||
| 65 | { | ||
| 66 | local_irq_disable(); | ||
| 67 | crashing_cpu = smp_processor_id(); | ||
| 68 | crash_save_cpu(regs, crashing_cpu); | ||
| 69 | crash_kexec_prepare_cpus(); | ||
| 70 | cpu_set(crashing_cpu, cpus_in_crash); | ||
| 71 | } | ||
diff --git a/arch/mips/kernel/crash_dump.c b/arch/mips/kernel/crash_dump.c new file mode 100644 index 000000000000..35bed0d2342c --- /dev/null +++ b/arch/mips/kernel/crash_dump.c | |||
| @@ -0,0 +1,75 @@ | |||
| 1 | #include <linux/highmem.h> | ||
| 2 | #include <linux/bootmem.h> | ||
| 3 | #include <linux/crash_dump.h> | ||
| 4 | #include <asm/uaccess.h> | ||
| 5 | |||
| 6 | static int __init parse_savemaxmem(char *p) | ||
| 7 | { | ||
| 8 | if (p) | ||
| 9 | saved_max_pfn = (memparse(p, &p) >> PAGE_SHIFT) - 1; | ||
| 10 | |||
| 11 | return 1; | ||
| 12 | } | ||
| 13 | __setup("savemaxmem=", parse_savemaxmem); | ||
| 14 | |||
| 15 | |||
| 16 | static void *kdump_buf_page; | ||
| 17 | |||
| 18 | /** | ||
| 19 | * copy_oldmem_page - copy one page from "oldmem" | ||
| 20 | * @pfn: page frame number to be copied | ||
| 21 | * @buf: target memory address for the copy; this can be in kernel address | ||
| 22 | * space or user address space (see @userbuf) | ||
| 23 | * @csize: number of bytes to copy | ||
| 24 | * @offset: offset in bytes into the page (based on pfn) to begin the copy | ||
| 25 | * @userbuf: if set, @buf is in user address space, use copy_to_user(), | ||
| 26 | * otherwise @buf is in kernel address space, use memcpy(). | ||
| 27 | * | ||
| 28 | * Copy a page from "oldmem". For this page, there is no pte mapped | ||
| 29 | * in the current kernel. | ||
| 30 | * | ||
| 31 | * Calling copy_to_user() in atomic context is not desirable. Hence first | ||
| 32 | * copying the data to a pre-allocated kernel page and then copying to user | ||
| 33 | * space in non-atomic context. | ||
| 34 | */ | ||
| 35 | ssize_t copy_oldmem_page(unsigned long pfn, char *buf, | ||
| 36 | size_t csize, unsigned long offset, int userbuf) | ||
| 37 | { | ||
| 38 | void *vaddr; | ||
| 39 | |||
| 40 | if (!csize) | ||
| 41 | return 0; | ||
| 42 | |||
| 43 | vaddr = kmap_atomic_pfn(pfn); | ||
| 44 | |||
| 45 | if (!userbuf) { | ||
| 46 | memcpy(buf, (vaddr + offset), csize); | ||
| 47 | kunmap_atomic(vaddr); | ||
| 48 | } else { | ||
| 49 | if (!kdump_buf_page) { | ||
| 50 | pr_warning("Kdump: Kdump buffer page not allocated\n"); | ||
| 51 | |||
| 52 | return -EFAULT; | ||
| 53 | } | ||
| 54 | copy_page(kdump_buf_page, vaddr); | ||
| 55 | kunmap_atomic(vaddr); | ||
| 56 | if (copy_to_user(buf, (kdump_buf_page + offset), csize)) | ||
| 57 | return -EFAULT; | ||
| 58 | } | ||
| 59 | |||
| 60 | return csize; | ||
| 61 | } | ||
| 62 | |||
| 63 | static int __init kdump_buf_page_init(void) | ||
| 64 | { | ||
| 65 | int ret = 0; | ||
| 66 | |||
| 67 | kdump_buf_page = kmalloc(PAGE_SIZE, GFP_KERNEL); | ||
| 68 | if (!kdump_buf_page) { | ||
| 69 | pr_warning("Kdump: Failed to allocate kdump buffer page\n"); | ||
| 70 | ret = -ENOMEM; | ||
| 71 | } | ||
| 72 | |||
| 73 | return ret; | ||
| 74 | } | ||
| 75 | arch_initcall(kdump_buf_page_init); | ||
diff --git a/arch/mips/kernel/entry.S b/arch/mips/kernel/entry.S index a6c133212003..e5786858cdb6 100644 --- a/arch/mips/kernel/entry.S +++ b/arch/mips/kernel/entry.S | |||
| @@ -36,6 +36,11 @@ FEXPORT(ret_from_exception) | |||
| 36 | FEXPORT(ret_from_irq) | 36 | FEXPORT(ret_from_irq) |
| 37 | LONG_S s0, TI_REGS($28) | 37 | LONG_S s0, TI_REGS($28) |
| 38 | FEXPORT(__ret_from_irq) | 38 | FEXPORT(__ret_from_irq) |
| 39 | /* | ||
| 40 | * We can be coming here from a syscall done in the kernel space, | ||
| 41 | * e.g. a failed kernel_execve(). | ||
| 42 | */ | ||
| 43 | resume_userspace_check: | ||
| 39 | LONG_L t0, PT_STATUS(sp) # returning to kernel mode? | 44 | LONG_L t0, PT_STATUS(sp) # returning to kernel mode? |
| 40 | andi t0, t0, KU_USER | 45 | andi t0, t0, KU_USER |
| 41 | beqz t0, resume_kernel | 46 | beqz t0, resume_kernel |
| @@ -65,6 +70,12 @@ need_resched: | |||
| 65 | b need_resched | 70 | b need_resched |
| 66 | #endif | 71 | #endif |
| 67 | 72 | ||
| 73 | FEXPORT(ret_from_kernel_thread) | ||
| 74 | jal schedule_tail # a0 = struct task_struct *prev | ||
| 75 | move a0, s1 | ||
| 76 | jal s0 | ||
| 77 | j syscall_exit | ||
| 78 | |||
| 68 | FEXPORT(ret_from_fork) | 79 | FEXPORT(ret_from_fork) |
| 69 | jal schedule_tail # a0 = struct task_struct *prev | 80 | jal schedule_tail # a0 = struct task_struct *prev |
| 70 | 81 | ||
| @@ -162,7 +173,7 @@ work_notifysig: # deal with pending signals and | |||
| 162 | move a0, sp | 173 | move a0, sp |
| 163 | li a1, 0 | 174 | li a1, 0 |
| 164 | jal do_notify_resume # a2 already loaded | 175 | jal do_notify_resume # a2 already loaded |
| 165 | j resume_userspace | 176 | j resume_userspace_check |
| 166 | 177 | ||
| 167 | FEXPORT(syscall_exit_partial) | 178 | FEXPORT(syscall_exit_partial) |
| 168 | local_irq_disable # make sure need_resched doesn't | 179 | local_irq_disable # make sure need_resched doesn't |
diff --git a/arch/mips/kernel/irq-rm9000.c b/arch/mips/kernel/irq-rm9000.c deleted file mode 100644 index 1282b9ae81c4..000000000000 --- a/arch/mips/kernel/irq-rm9000.c +++ /dev/null | |||
| @@ -1,106 +0,0 @@ | |||
| 1 | /* | ||
| 2 | * Copyright (C) 2003 Ralf Baechle | ||
| 3 | * | ||
| 4 | * This program is free software; you can redistribute it and/or modify it | ||
| 5 | * under the terms of the GNU General Public License as published by the | ||
| 6 | * Free Software Foundation; either version 2 of the License, or (at your | ||
| 7 | * option) any later version. | ||
| 8 | * | ||
| 9 | * Handler for RM9000 extended interrupts. These are a non-standard | ||
| 10 | * feature so we handle them separately from standard interrupts. | ||
| 11 | */ | ||
| 12 | #include <linux/init.h> | ||
| 13 | #include <linux/interrupt.h> | ||
| 14 | #include <linux/irq.h> | ||
| 15 | #include <linux/kernel.h> | ||
| 16 | #include <linux/module.h> | ||
| 17 | |||
| 18 | #include <asm/irq_cpu.h> | ||
| 19 | #include <asm/mipsregs.h> | ||
| 20 | |||
| 21 | static inline void unmask_rm9k_irq(struct irq_data *d) | ||
| 22 | { | ||
| 23 | set_c0_intcontrol(0x1000 << (d->irq - RM9K_CPU_IRQ_BASE)); | ||
| 24 | } | ||
| 25 | |||
| 26 | static inline void mask_rm9k_irq(struct irq_data *d) | ||
| 27 | { | ||
| 28 | clear_c0_intcontrol(0x1000 << (d->irq - RM9K_CPU_IRQ_BASE)); | ||
| 29 | } | ||
| 30 | |||
| 31 | static inline void rm9k_cpu_irq_enable(struct irq_data *d) | ||
| 32 | { | ||
| 33 | unsigned long flags; | ||
| 34 | |||
| 35 | local_irq_save(flags); | ||
| 36 | unmask_rm9k_irq(d); | ||
| 37 | local_irq_restore(flags); | ||
| 38 | } | ||
| 39 | |||
| 40 | /* | ||
| 41 | * Performance counter interrupts are global on all processors. | ||
| 42 | */ | ||
| 43 | static void local_rm9k_perfcounter_irq_startup(void *args) | ||
| 44 | { | ||
| 45 | rm9k_cpu_irq_enable(args); | ||
| 46 | } | ||
| 47 | |||
| 48 | static unsigned int rm9k_perfcounter_irq_startup(struct irq_data *d) | ||
| 49 | { | ||
| 50 | on_each_cpu(local_rm9k_perfcounter_irq_startup, d, 1); | ||
| 51 | |||
| 52 | return 0; | ||
| 53 | } | ||
| 54 | |||
| 55 | static void local_rm9k_perfcounter_irq_shutdown(void *args) | ||
| 56 | { | ||
| 57 | unsigned long flags; | ||
| 58 | |||
| 59 | local_irq_save(flags); | ||
| 60 | mask_rm9k_irq(args); | ||
| 61 | local_irq_restore(flags); | ||
| 62 | } | ||
| 63 | |||
| 64 | static void rm9k_perfcounter_irq_shutdown(struct irq_data *d) | ||
| 65 | { | ||
| 66 | on_each_cpu(local_rm9k_perfcounter_irq_shutdown, d, 1); | ||
| 67 | } | ||
| 68 | |||
| 69 | static struct irq_chip rm9k_irq_controller = { | ||
| 70 | .name = "RM9000", | ||
| 71 | .irq_ack = mask_rm9k_irq, | ||
| 72 | .irq_mask = mask_rm9k_irq, | ||
| 73 | .irq_mask_ack = mask_rm9k_irq, | ||
| 74 | .irq_unmask = unmask_rm9k_irq, | ||
| 75 | .irq_eoi = unmask_rm9k_irq | ||
| 76 | }; | ||
| 77 | |||
| 78 | static struct irq_chip rm9k_perfcounter_irq = { | ||
| 79 | .name = "RM9000", | ||
| 80 | .irq_startup = rm9k_perfcounter_irq_startup, | ||
| 81 | .irq_shutdown = rm9k_perfcounter_irq_shutdown, | ||
| 82 | .irq_ack = mask_rm9k_irq, | ||
| 83 | .irq_mask = mask_rm9k_irq, | ||
| 84 | .irq_mask_ack = mask_rm9k_irq, | ||
| 85 | .irq_unmask = unmask_rm9k_irq, | ||
| 86 | }; | ||
| 87 | |||
| 88 | unsigned int rm9000_perfcount_irq; | ||
| 89 | |||
| 90 | EXPORT_SYMBOL(rm9000_perfcount_irq); | ||
| 91 | |||
| 92 | void __init rm9k_cpu_irq_init(void) | ||
| 93 | { | ||
| 94 | int base = RM9K_CPU_IRQ_BASE; | ||
| 95 | int i; | ||
| 96 | |||
| 97 | clear_c0_intcontrol(0x0000f000); /* Mask all */ | ||
| 98 | |||
| 99 | for (i = base; i < base + 4; i++) | ||
| 100 | irq_set_chip_and_handler(i, &rm9k_irq_controller, | ||
| 101 | handle_level_irq); | ||
| 102 | |||
| 103 | rm9000_perfcount_irq = base + 1; | ||
| 104 | irq_set_chip_and_handler(rm9000_perfcount_irq, &rm9k_perfcounter_irq, | ||
| 105 | handle_percpu_irq); | ||
| 106 | } | ||
diff --git a/arch/mips/kernel/linux32.c b/arch/mips/kernel/linux32.c index 3a21acedf882..7adab86c632c 100644 --- a/arch/mips/kernel/linux32.c +++ b/arch/mips/kernel/linux32.c | |||
| @@ -3,7 +3,6 @@ | |||
| 3 | * | 3 | * |
| 4 | * Copyright (C) 2000 Silicon Graphics, Inc. | 4 | * Copyright (C) 2000 Silicon Graphics, Inc. |
| 5 | * Written by Ulf Carlsson (ulfc@engr.sgi.com) | 5 | * Written by Ulf Carlsson (ulfc@engr.sgi.com) |
| 6 | * sys32_execve from ia64/ia32 code, Feb 2000, Kanoj Sarcar (kanoj@sgi.com) | ||
| 7 | */ | 6 | */ |
| 8 | #include <linux/compiler.h> | 7 | #include <linux/compiler.h> |
| 9 | #include <linux/mm.h> | 8 | #include <linux/mm.h> |
| @@ -77,26 +76,6 @@ out: | |||
| 77 | return error; | 76 | return error; |
| 78 | } | 77 | } |
| 79 | 78 | ||
| 80 | /* | ||
| 81 | * sys_execve() executes a new program. | ||
| 82 | */ | ||
| 83 | asmlinkage int sys32_execve(nabi_no_regargs struct pt_regs regs) | ||
| 84 | { | ||
| 85 | int error; | ||
| 86 | struct filename *filename; | ||
| 87 | |||
| 88 | filename = getname(compat_ptr(regs.regs[4])); | ||
| 89 | error = PTR_ERR(filename); | ||
| 90 | if (IS_ERR(filename)) | ||
| 91 | goto out; | ||
| 92 | error = compat_do_execve(filename->name, compat_ptr(regs.regs[5]), | ||
| 93 | compat_ptr(regs.regs[6]), ®s); | ||
| 94 | putname(filename); | ||
| 95 | |||
| 96 | out: | ||
| 97 | return error; | ||
| 98 | } | ||
| 99 | |||
| 100 | #define RLIM_INFINITY32 0x7fffffff | 79 | #define RLIM_INFINITY32 0x7fffffff |
| 101 | #define RESOURCE32(x) ((x > RLIM_INFINITY32) ? RLIM_INFINITY32 : x) | 80 | #define RESOURCE32(x) ((x > RLIM_INFINITY32) ? RLIM_INFINITY32 : x) |
| 102 | 81 | ||
| @@ -333,7 +312,7 @@ _sys32_clone(nabi_no_regargs struct pt_regs regs) | |||
| 333 | /* Use __dummy4 instead of getting it off the stack, so that | 312 | /* Use __dummy4 instead of getting it off the stack, so that |
| 334 | syscall() works. */ | 313 | syscall() works. */ |
| 335 | child_tidptr = (int __user *) __dummy4; | 314 | child_tidptr = (int __user *) __dummy4; |
| 336 | return do_fork(clone_flags, newsp, ®s, 0, | 315 | return do_fork(clone_flags, newsp, 0, |
| 337 | parent_tidptr, child_tidptr); | 316 | parent_tidptr, child_tidptr); |
| 338 | } | 317 | } |
| 339 | 318 | ||
diff --git a/arch/mips/kernel/machine_kexec.c b/arch/mips/kernel/machine_kexec.c index 85beb9b0b2d0..992e18474da5 100644 --- a/arch/mips/kernel/machine_kexec.c +++ b/arch/mips/kernel/machine_kexec.c | |||
| @@ -5,7 +5,7 @@ | |||
| 5 | * This source code is licensed under the GNU General Public License, | 5 | * This source code is licensed under the GNU General Public License, |
| 6 | * Version 2. See the file COPYING for more details. | 6 | * Version 2. See the file COPYING for more details. |
| 7 | */ | 7 | */ |
| 8 | 8 | #include <linux/compiler.h> | |
| 9 | #include <linux/kexec.h> | 9 | #include <linux/kexec.h> |
| 10 | #include <linux/mm.h> | 10 | #include <linux/mm.h> |
| 11 | #include <linux/delay.h> | 11 | #include <linux/delay.h> |
| @@ -19,9 +19,19 @@ extern const size_t relocate_new_kernel_size; | |||
| 19 | extern unsigned long kexec_start_address; | 19 | extern unsigned long kexec_start_address; |
| 20 | extern unsigned long kexec_indirection_page; | 20 | extern unsigned long kexec_indirection_page; |
| 21 | 21 | ||
| 22 | int (*_machine_kexec_prepare)(struct kimage *) = NULL; | ||
| 23 | void (*_machine_kexec_shutdown)(void) = NULL; | ||
| 24 | void (*_machine_crash_shutdown)(struct pt_regs *regs) = NULL; | ||
| 25 | #ifdef CONFIG_SMP | ||
| 26 | void (*relocated_kexec_smp_wait) (void *); | ||
| 27 | atomic_t kexec_ready_to_reboot = ATOMIC_INIT(0); | ||
| 28 | #endif | ||
| 29 | |||
| 22 | int | 30 | int |
| 23 | machine_kexec_prepare(struct kimage *kimage) | 31 | machine_kexec_prepare(struct kimage *kimage) |
| 24 | { | 32 | { |
| 33 | if (_machine_kexec_prepare) | ||
| 34 | return _machine_kexec_prepare(kimage); | ||
| 25 | return 0; | 35 | return 0; |
| 26 | } | 36 | } |
| 27 | 37 | ||
| @@ -33,14 +43,20 @@ machine_kexec_cleanup(struct kimage *kimage) | |||
| 33 | void | 43 | void |
| 34 | machine_shutdown(void) | 44 | machine_shutdown(void) |
| 35 | { | 45 | { |
| 46 | if (_machine_kexec_shutdown) | ||
| 47 | _machine_kexec_shutdown(); | ||
| 36 | } | 48 | } |
| 37 | 49 | ||
| 38 | void | 50 | void |
| 39 | machine_crash_shutdown(struct pt_regs *regs) | 51 | machine_crash_shutdown(struct pt_regs *regs) |
| 40 | { | 52 | { |
| 53 | if (_machine_crash_shutdown) | ||
| 54 | _machine_crash_shutdown(regs); | ||
| 55 | else | ||
| 56 | default_machine_crash_shutdown(regs); | ||
| 41 | } | 57 | } |
| 42 | 58 | ||
| 43 | typedef void (*noretfun_t)(void) __attribute__((noreturn)); | 59 | typedef void (*noretfun_t)(void) __noreturn; |
| 44 | 60 | ||
| 45 | void | 61 | void |
| 46 | machine_kexec(struct kimage *image) | 62 | machine_kexec(struct kimage *image) |
| @@ -52,7 +68,9 @@ machine_kexec(struct kimage *image) | |||
| 52 | reboot_code_buffer = | 68 | reboot_code_buffer = |
| 53 | (unsigned long)page_address(image->control_code_page); | 69 | (unsigned long)page_address(image->control_code_page); |
| 54 | 70 | ||
| 55 | kexec_start_address = image->start; | 71 | kexec_start_address = |
| 72 | (unsigned long) phys_to_virt(image->start); | ||
| 73 | |||
| 56 | kexec_indirection_page = | 74 | kexec_indirection_page = |
| 57 | (unsigned long) phys_to_virt(image->head & PAGE_MASK); | 75 | (unsigned long) phys_to_virt(image->head & PAGE_MASK); |
| 58 | 76 | ||
| @@ -63,7 +81,7 @@ machine_kexec(struct kimage *image) | |||
| 63 | * The generic kexec code builds a page list with physical | 81 | * The generic kexec code builds a page list with physical |
| 64 | * addresses. they are directly accessible through KSEG0 (or | 82 | * addresses. they are directly accessible through KSEG0 (or |
| 65 | * CKSEG0 or XPHYS if on 64bit system), hence the | 83 | * CKSEG0 or XPHYS if on 64bit system), hence the |
| 66 | * pys_to_virt() call. | 84 | * phys_to_virt() call. |
| 67 | */ | 85 | */ |
| 68 | for (ptr = &image->head; (entry = *ptr) && !(entry &IND_DONE); | 86 | for (ptr = &image->head; (entry = *ptr) && !(entry &IND_DONE); |
| 69 | ptr = (entry & IND_INDIRECTION) ? | 87 | ptr = (entry & IND_INDIRECTION) ? |
| @@ -81,5 +99,12 @@ machine_kexec(struct kimage *image) | |||
| 81 | printk("Will call new kernel at %08lx\n", image->start); | 99 | printk("Will call new kernel at %08lx\n", image->start); |
| 82 | printk("Bye ...\n"); | 100 | printk("Bye ...\n"); |
| 83 | __flush_cache_all(); | 101 | __flush_cache_all(); |
| 102 | #ifdef CONFIG_SMP | ||
| 103 | /* All secondary cpus now may jump to kexec_wait cycle */ | ||
| 104 | relocated_kexec_smp_wait = reboot_code_buffer + | ||
| 105 | (void *)(kexec_smp_wait - relocate_new_kernel); | ||
| 106 | smp_wmb(); | ||
| 107 | atomic_set(&kexec_ready_to_reboot, 1); | ||
| 108 | #endif | ||
| 84 | ((noretfun_t) reboot_code_buffer)(); | 109 | ((noretfun_t) reboot_code_buffer)(); |
| 85 | } | 110 | } |
diff --git a/arch/mips/kernel/mips-mt-fpaff.c b/arch/mips/kernel/mips-mt-fpaff.c index 33f63bab478a..fd814e08c945 100644 --- a/arch/mips/kernel/mips-mt-fpaff.c +++ b/arch/mips/kernel/mips-mt-fpaff.c | |||
| @@ -50,8 +50,8 @@ static bool check_same_owner(struct task_struct *p) | |||
| 50 | 50 | ||
| 51 | rcu_read_lock(); | 51 | rcu_read_lock(); |
| 52 | pcred = __task_cred(p); | 52 | pcred = __task_cred(p); |
| 53 | match = (cred->euid == pcred->euid || | 53 | match = (uid_eq(cred->euid, pcred->euid) || |
| 54 | cred->euid == pcred->uid); | 54 | uid_eq(cred->euid, pcred->uid)); |
| 55 | rcu_read_unlock(); | 55 | rcu_read_unlock(); |
| 56 | return match; | 56 | return match; |
| 57 | } | 57 | } |
diff --git a/arch/mips/kernel/mips_ksyms.c b/arch/mips/kernel/mips_ksyms.c index 3fc1691110dc..df1e3e455f9a 100644 --- a/arch/mips/kernel/mips_ksyms.c +++ b/arch/mips/kernel/mips_ksyms.c | |||
| @@ -11,7 +11,7 @@ | |||
| 11 | #include <linux/interrupt.h> | 11 | #include <linux/interrupt.h> |
| 12 | #include <linux/export.h> | 12 | #include <linux/export.h> |
| 13 | #include <asm/checksum.h> | 13 | #include <asm/checksum.h> |
| 14 | #include <asm/pgtable.h> | 14 | #include <linux/mm.h> |
| 15 | #include <asm/uaccess.h> | 15 | #include <asm/uaccess.h> |
| 16 | #include <asm/ftrace.h> | 16 | #include <asm/ftrace.h> |
| 17 | 17 | ||
| @@ -32,8 +32,6 @@ EXPORT_SYMBOL(memset); | |||
| 32 | EXPORT_SYMBOL(memcpy); | 32 | EXPORT_SYMBOL(memcpy); |
| 33 | EXPORT_SYMBOL(memmove); | 33 | EXPORT_SYMBOL(memmove); |
| 34 | 34 | ||
| 35 | EXPORT_SYMBOL(kernel_thread); | ||
| 36 | |||
| 37 | /* | 35 | /* |
| 38 | * Functions that operate on entire pages. Mostly used by memory management. | 36 | * Functions that operate on entire pages. Mostly used by memory management. |
| 39 | */ | 37 | */ |
diff --git a/arch/mips/kernel/perf_event_mipsxx.c b/arch/mips/kernel/perf_event_mipsxx.c index a9b995dcf691..b14c14d90fc2 100644 --- a/arch/mips/kernel/perf_event_mipsxx.c +++ b/arch/mips/kernel/perf_event_mipsxx.c | |||
| @@ -840,6 +840,16 @@ static const struct mips_perf_event bmips5000_event_map | |||
| 840 | [PERF_COUNT_HW_BRANCH_MISSES] = { 0x02, CNTR_ODD, T }, | 840 | [PERF_COUNT_HW_BRANCH_MISSES] = { 0x02, CNTR_ODD, T }, |
| 841 | }; | 841 | }; |
| 842 | 842 | ||
| 843 | static const struct mips_perf_event xlp_event_map[PERF_COUNT_HW_MAX] = { | ||
| 844 | [PERF_COUNT_HW_CPU_CYCLES] = { 0x01, CNTR_ALL }, | ||
| 845 | [PERF_COUNT_HW_INSTRUCTIONS] = { 0x18, CNTR_ALL }, /* PAPI_TOT_INS */ | ||
| 846 | [PERF_COUNT_HW_CACHE_REFERENCES] = { 0x04, CNTR_ALL }, /* PAPI_L1_ICA */ | ||
| 847 | [PERF_COUNT_HW_CACHE_MISSES] = { 0x07, CNTR_ALL }, /* PAPI_L1_ICM */ | ||
| 848 | [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = { 0x1b, CNTR_ALL }, /* PAPI_BR_CN */ | ||
| 849 | [PERF_COUNT_HW_BRANCH_MISSES] = { 0x1c, CNTR_ALL }, /* PAPI_BR_MSP */ | ||
| 850 | [PERF_COUNT_HW_BUS_CYCLES] = { UNSUPPORTED_PERF_EVENT_ID }, | ||
| 851 | }; | ||
| 852 | |||
| 843 | /* 24K/34K/1004K cores can share the same cache event map. */ | 853 | /* 24K/34K/1004K cores can share the same cache event map. */ |
| 844 | static const struct mips_perf_event mipsxxcore_cache_map | 854 | static const struct mips_perf_event mipsxxcore_cache_map |
| 845 | [PERF_COUNT_HW_CACHE_MAX] | 855 | [PERF_COUNT_HW_CACHE_MAX] |
| @@ -1092,6 +1102,100 @@ static const struct mips_perf_event octeon_cache_map | |||
| 1092 | }, | 1102 | }, |
| 1093 | }; | 1103 | }; |
| 1094 | 1104 | ||
| 1105 | static const struct mips_perf_event xlp_cache_map | ||
| 1106 | [PERF_COUNT_HW_CACHE_MAX] | ||
| 1107 | [PERF_COUNT_HW_CACHE_OP_MAX] | ||
| 1108 | [PERF_COUNT_HW_CACHE_RESULT_MAX] = { | ||
| 1109 | [C(L1D)] = { | ||
| 1110 | [C(OP_READ)] = { | ||
| 1111 | [C(RESULT_ACCESS)] = { 0x31, CNTR_ALL }, /* PAPI_L1_DCR */ | ||
| 1112 | [C(RESULT_MISS)] = { 0x30, CNTR_ALL }, /* PAPI_L1_LDM */ | ||
| 1113 | }, | ||
| 1114 | [C(OP_WRITE)] = { | ||
| 1115 | [C(RESULT_ACCESS)] = { 0x2f, CNTR_ALL }, /* PAPI_L1_DCW */ | ||
| 1116 | [C(RESULT_MISS)] = { 0x2e, CNTR_ALL }, /* PAPI_L1_STM */ | ||
| 1117 | }, | ||
| 1118 | [C(OP_PREFETCH)] = { | ||
| 1119 | [C(RESULT_ACCESS)] = { UNSUPPORTED_PERF_EVENT_ID }, | ||
| 1120 | [C(RESULT_MISS)] = { UNSUPPORTED_PERF_EVENT_ID }, | ||
| 1121 | }, | ||
| 1122 | }, | ||
| 1123 | [C(L1I)] = { | ||
| 1124 | [C(OP_READ)] = { | ||
| 1125 | [C(RESULT_ACCESS)] = { 0x04, CNTR_ALL }, /* PAPI_L1_ICA */ | ||
| 1126 | [C(RESULT_MISS)] = { 0x07, CNTR_ALL }, /* PAPI_L1_ICM */ | ||
| 1127 | }, | ||
| 1128 | [C(OP_WRITE)] = { | ||
| 1129 | [C(RESULT_ACCESS)] = { UNSUPPORTED_PERF_EVENT_ID }, | ||
| 1130 | [C(RESULT_MISS)] = { UNSUPPORTED_PERF_EVENT_ID }, | ||
| 1131 | }, | ||
| 1132 | [C(OP_PREFETCH)] = { | ||
| 1133 | [C(RESULT_ACCESS)] = { UNSUPPORTED_PERF_EVENT_ID }, | ||
| 1134 | [C(RESULT_MISS)] = { UNSUPPORTED_PERF_EVENT_ID }, | ||
| 1135 | }, | ||
| 1136 | }, | ||
| 1137 | [C(LL)] = { | ||
| 1138 | [C(OP_READ)] = { | ||
| 1139 | [C(RESULT_ACCESS)] = { 0x35, CNTR_ALL }, /* PAPI_L2_DCR */ | ||
| 1140 | [C(RESULT_MISS)] = { 0x37, CNTR_ALL }, /* PAPI_L2_LDM */ | ||
| 1141 | }, | ||
| 1142 | [C(OP_WRITE)] = { | ||
| 1143 | [C(RESULT_ACCESS)] = { 0x34, CNTR_ALL }, /* PAPI_L2_DCA */ | ||
| 1144 | [C(RESULT_MISS)] = { 0x36, CNTR_ALL }, /* PAPI_L2_DCM */ | ||
| 1145 | }, | ||
| 1146 | [C(OP_PREFETCH)] = { | ||
| 1147 | [C(RESULT_ACCESS)] = { UNSUPPORTED_PERF_EVENT_ID }, | ||
| 1148 | [C(RESULT_MISS)] = { UNSUPPORTED_PERF_EVENT_ID }, | ||
| 1149 | }, | ||
| 1150 | }, | ||
| 1151 | [C(DTLB)] = { | ||
| 1152 | /* | ||
| 1153 | * Only general DTLB misses are counted use the same event for | ||
| 1154 | * read and write. | ||
| 1155 | */ | ||
| 1156 | [C(OP_READ)] = { | ||
| 1157 | [C(RESULT_ACCESS)] = { UNSUPPORTED_PERF_EVENT_ID }, | ||
| 1158 | [C(RESULT_MISS)] = { 0x2d, CNTR_ALL }, /* PAPI_TLB_DM */ | ||
| 1159 | }, | ||
| 1160 | [C(OP_WRITE)] = { | ||
| 1161 | [C(RESULT_ACCESS)] = { UNSUPPORTED_PERF_EVENT_ID }, | ||
| 1162 | [C(RESULT_MISS)] = { 0x2d, CNTR_ALL }, /* PAPI_TLB_DM */ | ||
| 1163 | }, | ||
| 1164 | [C(OP_PREFETCH)] = { | ||
| 1165 | [C(RESULT_ACCESS)] = { UNSUPPORTED_PERF_EVENT_ID }, | ||
| 1166 | [C(RESULT_MISS)] = { UNSUPPORTED_PERF_EVENT_ID }, | ||
| 1167 | }, | ||
| 1168 | }, | ||
| 1169 | [C(ITLB)] = { | ||
| 1170 | [C(OP_READ)] = { | ||
| 1171 | [C(RESULT_ACCESS)] = { UNSUPPORTED_PERF_EVENT_ID }, | ||
| 1172 | [C(RESULT_MISS)] = { 0x08, CNTR_ALL }, /* PAPI_TLB_IM */ | ||
| 1173 | }, | ||
| 1174 | [C(OP_WRITE)] = { | ||
| 1175 | [C(RESULT_ACCESS)] = { UNSUPPORTED_PERF_EVENT_ID }, | ||
| 1176 | [C(RESULT_MISS)] = { 0x08, CNTR_ALL }, /* PAPI_TLB_IM */ | ||
| 1177 | }, | ||
| 1178 | [C(OP_PREFETCH)] = { | ||
| 1179 | [C(RESULT_ACCESS)] = { UNSUPPORTED_PERF_EVENT_ID }, | ||
| 1180 | [C(RESULT_MISS)] = { UNSUPPORTED_PERF_EVENT_ID }, | ||
| 1181 | }, | ||
| 1182 | }, | ||
| 1183 | [C(BPU)] = { | ||
| 1184 | [C(OP_READ)] = { | ||
| 1185 | [C(RESULT_ACCESS)] = { UNSUPPORTED_PERF_EVENT_ID }, | ||
| 1186 | [C(RESULT_MISS)] = { 0x25, CNTR_ALL }, | ||
| 1187 | }, | ||
| 1188 | [C(OP_WRITE)] = { | ||
| 1189 | [C(RESULT_ACCESS)] = { UNSUPPORTED_PERF_EVENT_ID }, | ||
| 1190 | [C(RESULT_MISS)] = { UNSUPPORTED_PERF_EVENT_ID }, | ||
| 1191 | }, | ||
| 1192 | [C(OP_PREFETCH)] = { | ||
| 1193 | [C(RESULT_ACCESS)] = { UNSUPPORTED_PERF_EVENT_ID }, | ||
| 1194 | [C(RESULT_MISS)] = { UNSUPPORTED_PERF_EVENT_ID }, | ||
| 1195 | }, | ||
| 1196 | }, | ||
| 1197 | }; | ||
| 1198 | |||
| 1095 | #ifdef CONFIG_MIPS_MT_SMP | 1199 | #ifdef CONFIG_MIPS_MT_SMP |
| 1096 | static void check_and_calc_range(struct perf_event *event, | 1200 | static void check_and_calc_range(struct perf_event *event, |
| 1097 | const struct mips_perf_event *pev) | 1201 | const struct mips_perf_event *pev) |
| @@ -1444,6 +1548,20 @@ static const struct mips_perf_event *octeon_pmu_map_raw_event(u64 config) | |||
| 1444 | return &raw_event; | 1548 | return &raw_event; |
| 1445 | } | 1549 | } |
| 1446 | 1550 | ||
| 1551 | static const struct mips_perf_event *xlp_pmu_map_raw_event(u64 config) | ||
| 1552 | { | ||
| 1553 | unsigned int raw_id = config & 0xff; | ||
| 1554 | |||
| 1555 | /* Only 1-63 are defined */ | ||
| 1556 | if ((raw_id < 0x01) || (raw_id > 0x3f)) | ||
| 1557 | return ERR_PTR(-EOPNOTSUPP); | ||
| 1558 | |||
| 1559 | raw_event.cntr_mask = CNTR_ALL; | ||
| 1560 | raw_event.event_id = raw_id; | ||
| 1561 | |||
| 1562 | return &raw_event; | ||
| 1563 | } | ||
| 1564 | |||
| 1447 | static int __init | 1565 | static int __init |
| 1448 | init_hw_perf_events(void) | 1566 | init_hw_perf_events(void) |
| 1449 | { | 1567 | { |
| @@ -1522,6 +1640,12 @@ init_hw_perf_events(void) | |||
| 1522 | mipspmu.general_event_map = &bmips5000_event_map; | 1640 | mipspmu.general_event_map = &bmips5000_event_map; |
| 1523 | mipspmu.cache_event_map = &bmips5000_cache_map; | 1641 | mipspmu.cache_event_map = &bmips5000_cache_map; |
| 1524 | break; | 1642 | break; |
| 1643 | case CPU_XLP: | ||
| 1644 | mipspmu.name = "xlp"; | ||
| 1645 | mipspmu.general_event_map = &xlp_event_map; | ||
| 1646 | mipspmu.cache_event_map = &xlp_cache_map; | ||
| 1647 | mipspmu.map_raw_event = xlp_pmu_map_raw_event; | ||
| 1648 | break; | ||
| 1525 | default: | 1649 | default: |
| 1526 | pr_cont("Either hardware does not support performance " | 1650 | pr_cont("Either hardware does not support performance " |
| 1527 | "counters, or not yet implemented.\n"); | 1651 | "counters, or not yet implemented.\n"); |
diff --git a/arch/mips/kernel/process.c b/arch/mips/kernel/process.c index e9a5fd7277f4..a11c6f9fdd5e 100644 --- a/arch/mips/kernel/process.c +++ b/arch/mips/kernel/process.c | |||
| @@ -72,9 +72,7 @@ void __noreturn cpu_idle(void) | |||
| 72 | } | 72 | } |
| 73 | } | 73 | } |
| 74 | #ifdef CONFIG_HOTPLUG_CPU | 74 | #ifdef CONFIG_HOTPLUG_CPU |
| 75 | if (!cpu_online(cpu) && !cpu_isset(cpu, cpu_callin_map) && | 75 | if (!cpu_online(cpu) && !cpu_isset(cpu, cpu_callin_map)) |
| 76 | (system_state == SYSTEM_RUNNING || | ||
| 77 | system_state == SYSTEM_BOOTING)) | ||
| 78 | play_dead(); | 76 | play_dead(); |
| 79 | #endif | 77 | #endif |
| 80 | rcu_idle_exit(); | 78 | rcu_idle_exit(); |
| @@ -84,6 +82,7 @@ void __noreturn cpu_idle(void) | |||
| 84 | } | 82 | } |
| 85 | 83 | ||
| 86 | asmlinkage void ret_from_fork(void); | 84 | asmlinkage void ret_from_fork(void); |
| 85 | asmlinkage void ret_from_kernel_thread(void); | ||
| 87 | 86 | ||
| 88 | void start_thread(struct pt_regs * regs, unsigned long pc, unsigned long sp) | 87 | void start_thread(struct pt_regs * regs, unsigned long pc, unsigned long sp) |
| 89 | { | 88 | { |
| @@ -113,10 +112,10 @@ void flush_thread(void) | |||
| 113 | } | 112 | } |
| 114 | 113 | ||
| 115 | int copy_thread(unsigned long clone_flags, unsigned long usp, | 114 | int copy_thread(unsigned long clone_flags, unsigned long usp, |
| 116 | unsigned long unused, struct task_struct *p, struct pt_regs *regs) | 115 | unsigned long arg, struct task_struct *p) |
| 117 | { | 116 | { |
| 118 | struct thread_info *ti = task_thread_info(p); | 117 | struct thread_info *ti = task_thread_info(p); |
| 119 | struct pt_regs *childregs; | 118 | struct pt_regs *childregs, *regs = current_pt_regs(); |
| 120 | unsigned long childksp; | 119 | unsigned long childksp; |
| 121 | p->set_child_tid = p->clear_child_tid = NULL; | 120 | p->set_child_tid = p->clear_child_tid = NULL; |
| 122 | 121 | ||
| @@ -136,19 +135,30 @@ int copy_thread(unsigned long clone_flags, unsigned long usp, | |||
| 136 | childregs = (struct pt_regs *) childksp - 1; | 135 | childregs = (struct pt_regs *) childksp - 1; |
| 137 | /* Put the stack after the struct pt_regs. */ | 136 | /* Put the stack after the struct pt_regs. */ |
| 138 | childksp = (unsigned long) childregs; | 137 | childksp = (unsigned long) childregs; |
| 138 | p->thread.cp0_status = read_c0_status() & ~(ST0_CU2|ST0_CU1); | ||
| 139 | if (unlikely(p->flags & PF_KTHREAD)) { | ||
| 140 | unsigned long status = p->thread.cp0_status; | ||
| 141 | memset(childregs, 0, sizeof(struct pt_regs)); | ||
| 142 | ti->addr_limit = KERNEL_DS; | ||
| 143 | p->thread.reg16 = usp; /* fn */ | ||
| 144 | p->thread.reg17 = arg; | ||
| 145 | p->thread.reg29 = childksp; | ||
| 146 | p->thread.reg31 = (unsigned long) ret_from_kernel_thread; | ||
| 147 | #if defined(CONFIG_CPU_R3000) || defined(CONFIG_CPU_TX39XX) | ||
| 148 | status = (status & ~(ST0_KUP | ST0_IEP | ST0_IEC)) | | ||
| 149 | ((status & (ST0_KUC | ST0_IEC)) << 2); | ||
| 150 | #else | ||
| 151 | status |= ST0_EXL; | ||
| 152 | #endif | ||
| 153 | childregs->cp0_status = status; | ||
| 154 | return 0; | ||
| 155 | } | ||
| 139 | *childregs = *regs; | 156 | *childregs = *regs; |
| 140 | childregs->regs[7] = 0; /* Clear error flag */ | 157 | childregs->regs[7] = 0; /* Clear error flag */ |
| 141 | |||
| 142 | childregs->regs[2] = 0; /* Child gets zero as return value */ | 158 | childregs->regs[2] = 0; /* Child gets zero as return value */ |
| 159 | childregs->regs[29] = usp; | ||
| 160 | ti->addr_limit = USER_DS; | ||
| 143 | 161 | ||
| 144 | if (childregs->cp0_status & ST0_CU0) { | ||
| 145 | childregs->regs[28] = (unsigned long) ti; | ||
| 146 | childregs->regs[29] = childksp; | ||
| 147 | ti->addr_limit = KERNEL_DS; | ||
| 148 | } else { | ||
| 149 | childregs->regs[29] = usp; | ||
| 150 | ti->addr_limit = USER_DS; | ||
| 151 | } | ||
| 152 | p->thread.reg29 = (unsigned long) childregs; | 162 | p->thread.reg29 = (unsigned long) childregs; |
| 153 | p->thread.reg31 = (unsigned long) ret_from_fork; | 163 | p->thread.reg31 = (unsigned long) ret_from_fork; |
| 154 | 164 | ||
| @@ -156,7 +166,6 @@ int copy_thread(unsigned long clone_flags, unsigned long usp, | |||
| 156 | * New tasks lose permission to use the fpu. This accelerates context | 166 | * New tasks lose permission to use the fpu. This accelerates context |
| 157 | * switching for most programs since they don't use the fpu. | 167 | * switching for most programs since they don't use the fpu. |
| 158 | */ | 168 | */ |
| 159 | p->thread.cp0_status = read_c0_status() & ~(ST0_CU2|ST0_CU1); | ||
| 160 | childregs->cp0_status &= ~(ST0_CU2|ST0_CU1); | 169 | childregs->cp0_status &= ~(ST0_CU2|ST0_CU1); |
| 161 | 170 | ||
| 162 | #ifdef CONFIG_MIPS_MT_SMTC | 171 | #ifdef CONFIG_MIPS_MT_SMTC |
| @@ -222,35 +231,6 @@ int dump_task_fpu(struct task_struct *t, elf_fpregset_t *fpr) | |||
| 222 | } | 231 | } |
| 223 | 232 | ||
| 224 | /* | 233 | /* |
| 225 | * Create a kernel thread | ||
| 226 | */ | ||
| 227 | static void __noreturn kernel_thread_helper(void *arg, int (*fn)(void *)) | ||
| 228 | { | ||
| 229 | do_exit(fn(arg)); | ||
| 230 | } | ||
| 231 | |||
| 232 | long kernel_thread(int (*fn)(void *), void *arg, unsigned long flags) | ||
| 233 | { | ||
| 234 | struct pt_regs regs; | ||
| 235 | |||
| 236 | memset(®s, 0, sizeof(regs)); | ||
| 237 | |||
| 238 | regs.regs[4] = (unsigned long) arg; | ||
| 239 | regs.regs[5] = (unsigned long) fn; | ||
| 240 | regs.cp0_epc = (unsigned long) kernel_thread_helper; | ||
| 241 | regs.cp0_status = read_c0_status(); | ||
| 242 | #if defined(CONFIG_CPU_R3000) || defined(CONFIG_CPU_TX39XX) | ||
| 243 | regs.cp0_status = (regs.cp0_status & ~(ST0_KUP | ST0_IEP | ST0_IEC)) | | ||
| 244 | ((regs.cp0_status & (ST0_KUC | ST0_IEC)) << 2); | ||
| 245 | #else | ||
| 246 | regs.cp0_status |= ST0_EXL; | ||
| 247 | #endif | ||
| 248 | |||
| 249 | /* Ok, create the new process.. */ | ||
| 250 | return do_fork(flags | CLONE_VM | CLONE_UNTRACED, 0, ®s, 0, NULL, NULL); | ||
| 251 | } | ||
| 252 | |||
| 253 | /* | ||
| 254 | * | 234 | * |
| 255 | */ | 235 | */ |
| 256 | struct mips_frame_info { | 236 | struct mips_frame_info { |
diff --git a/arch/mips/kernel/relocate_kernel.S b/arch/mips/kernel/relocate_kernel.S index 87481f916a61..e4142c5f7c2b 100644 --- a/arch/mips/kernel/relocate_kernel.S +++ b/arch/mips/kernel/relocate_kernel.S | |||
| @@ -15,6 +15,11 @@ | |||
| 15 | #include <asm/addrspace.h> | 15 | #include <asm/addrspace.h> |
| 16 | 16 | ||
| 17 | LEAF(relocate_new_kernel) | 17 | LEAF(relocate_new_kernel) |
| 18 | PTR_L a0, arg0 | ||
| 19 | PTR_L a1, arg1 | ||
| 20 | PTR_L a2, arg2 | ||
| 21 | PTR_L a3, arg3 | ||
| 22 | |||
| 18 | PTR_L s0, kexec_indirection_page | 23 | PTR_L s0, kexec_indirection_page |
| 19 | PTR_L s1, kexec_start_address | 24 | PTR_L s1, kexec_start_address |
| 20 | 25 | ||
| @@ -26,7 +31,6 @@ process_entry: | |||
| 26 | and s3, s2, 0x1 | 31 | and s3, s2, 0x1 |
| 27 | beq s3, zero, 1f | 32 | beq s3, zero, 1f |
| 28 | and s4, s2, ~0x1 /* store destination addr in s4 */ | 33 | and s4, s2, ~0x1 /* store destination addr in s4 */ |
| 29 | move a0, s4 | ||
| 30 | b process_entry | 34 | b process_entry |
| 31 | 35 | ||
| 32 | 1: | 36 | 1: |
| @@ -60,10 +64,111 @@ copy_word: | |||
| 60 | b process_entry | 64 | b process_entry |
| 61 | 65 | ||
| 62 | done: | 66 | done: |
| 67 | #ifdef CONFIG_SMP | ||
| 68 | /* kexec_flag reset is signal to other CPUs what kernel | ||
| 69 | was moved to it's location. Note - we need relocated address | ||
| 70 | of kexec_flag. */ | ||
| 71 | |||
| 72 | bal 1f | ||
| 73 | 1: move t1,ra; | ||
| 74 | PTR_LA t2,1b | ||
| 75 | PTR_LA t0,kexec_flag | ||
| 76 | PTR_SUB t0,t0,t2; | ||
| 77 | PTR_ADD t0,t1,t0; | ||
| 78 | LONG_S zero,(t0) | ||
| 79 | #endif | ||
| 80 | |||
| 81 | #ifdef CONFIG_CPU_CAVIUM_OCTEON | ||
| 82 | /* We need to flush I-cache before jumping to new kernel. | ||
| 83 | * Unfortunatelly, this code is cpu-specific. | ||
| 84 | */ | ||
| 85 | .set push | ||
| 86 | .set noreorder | ||
| 87 | syncw | ||
| 88 | syncw | ||
| 89 | synci 0($0) | ||
| 90 | .set pop | ||
| 91 | #else | ||
| 92 | sync | ||
| 93 | #endif | ||
| 63 | /* jump to kexec_start_address */ | 94 | /* jump to kexec_start_address */ |
| 64 | j s1 | 95 | j s1 |
| 65 | END(relocate_new_kernel) | 96 | END(relocate_new_kernel) |
| 66 | 97 | ||
| 98 | #ifdef CONFIG_SMP | ||
| 99 | /* | ||
| 100 | * Other CPUs should wait until code is relocated and | ||
| 101 | * then start at entry (?) point. | ||
| 102 | */ | ||
| 103 | LEAF(kexec_smp_wait) | ||
| 104 | PTR_L a0, s_arg0 | ||
| 105 | PTR_L a1, s_arg1 | ||
| 106 | PTR_L a2, s_arg2 | ||
| 107 | PTR_L a3, s_arg3 | ||
| 108 | PTR_L s1, kexec_start_address | ||
| 109 | |||
| 110 | /* Non-relocated address works for args and kexec_start_address ( old | ||
| 111 | * kernel is not overwritten). But we need relocated address of | ||
| 112 | * kexec_flag. | ||
| 113 | */ | ||
| 114 | |||
| 115 | bal 1f | ||
| 116 | 1: move t1,ra; | ||
| 117 | PTR_LA t2,1b | ||
| 118 | PTR_LA t0,kexec_flag | ||
| 119 | PTR_SUB t0,t0,t2; | ||
| 120 | PTR_ADD t0,t1,t0; | ||
| 121 | |||
| 122 | 1: LONG_L s0, (t0) | ||
| 123 | bne s0, zero,1b | ||
| 124 | |||
| 125 | #ifdef CONFIG_CPU_CAVIUM_OCTEON | ||
| 126 | .set push | ||
| 127 | .set noreorder | ||
| 128 | synci 0($0) | ||
| 129 | .set pop | ||
| 130 | #else | ||
| 131 | sync | ||
| 132 | #endif | ||
| 133 | j s1 | ||
| 134 | END(kexec_smp_wait) | ||
| 135 | #endif | ||
| 136 | |||
| 137 | #ifdef __mips64 | ||
| 138 | /* all PTR's must be aligned to 8 byte in 64-bit mode */ | ||
| 139 | .align 3 | ||
| 140 | #endif | ||
| 141 | |||
| 142 | /* All parameters to new kernel are passed in registers a0-a3. | ||
| 143 | * kexec_args[0..3] are uses to prepare register values. | ||
| 144 | */ | ||
| 145 | |||
| 146 | kexec_args: | ||
| 147 | EXPORT(kexec_args) | ||
| 148 | arg0: PTR 0x0 | ||
| 149 | arg1: PTR 0x0 | ||
| 150 | arg2: PTR 0x0 | ||
| 151 | arg3: PTR 0x0 | ||
| 152 | .size kexec_args,PTRSIZE*4 | ||
| 153 | |||
| 154 | #ifdef CONFIG_SMP | ||
| 155 | /* | ||
| 156 | * Secondary CPUs may have different kernel parameters in | ||
| 157 | * their registers a0-a3. secondary_kexec_args[0..3] are used | ||
| 158 | * to prepare register values. | ||
| 159 | */ | ||
| 160 | secondary_kexec_args: | ||
| 161 | EXPORT(secondary_kexec_args) | ||
| 162 | s_arg0: PTR 0x0 | ||
| 163 | s_arg1: PTR 0x0 | ||
| 164 | s_arg2: PTR 0x0 | ||
| 165 | s_arg3: PTR 0x0 | ||
| 166 | .size secondary_kexec_args,PTRSIZE*4 | ||
| 167 | kexec_flag: | ||
| 168 | LONG 0x1 | ||
| 169 | |||
| 170 | #endif | ||
| 171 | |||
| 67 | kexec_start_address: | 172 | kexec_start_address: |
| 68 | EXPORT(kexec_start_address) | 173 | EXPORT(kexec_start_address) |
| 69 | PTR 0x0 | 174 | PTR 0x0 |
diff --git a/arch/mips/kernel/scall64-n32.S b/arch/mips/kernel/scall64-n32.S index f6ba8381ee01..ad3de9668da9 100644 --- a/arch/mips/kernel/scall64-n32.S +++ b/arch/mips/kernel/scall64-n32.S | |||
| @@ -17,12 +17,6 @@ | |||
| 17 | #include <asm/thread_info.h> | 17 | #include <asm/thread_info.h> |
| 18 | #include <asm/unistd.h> | 18 | #include <asm/unistd.h> |
| 19 | 19 | ||
| 20 | /* This duplicates the definition from <linux/sched.h> */ | ||
| 21 | #define PT_TRACESYS 0x00000002 /* tracing system calls */ | ||
| 22 | |||
| 23 | /* This duplicates the definition from <asm/signal.h> */ | ||
| 24 | #define SIGILL 4 /* Illegal instruction (ANSI). */ | ||
| 25 | |||
| 26 | #ifndef CONFIG_MIPS32_O32 | 20 | #ifndef CONFIG_MIPS32_O32 |
| 27 | /* No O32, so define handle_sys here */ | 21 | /* No O32, so define handle_sys here */ |
| 28 | #define handle_sysn32 handle_sys | 22 | #define handle_sysn32 handle_sys |
| @@ -167,7 +161,7 @@ EXPORT(sysn32_call_table) | |||
| 167 | PTR sys_getsockopt | 161 | PTR sys_getsockopt |
| 168 | PTR sys_clone /* 6055 */ | 162 | PTR sys_clone /* 6055 */ |
| 169 | PTR sys_fork | 163 | PTR sys_fork |
| 170 | PTR sys32_execve | 164 | PTR compat_sys_execve |
| 171 | PTR sys_exit | 165 | PTR sys_exit |
| 172 | PTR compat_sys_wait4 | 166 | PTR compat_sys_wait4 |
| 173 | PTR sys_kill /* 6060 */ | 167 | PTR sys_kill /* 6060 */ |
| @@ -397,14 +391,14 @@ EXPORT(sysn32_call_table) | |||
| 397 | PTR sys_timerfd_create | 391 | PTR sys_timerfd_create |
| 398 | PTR compat_sys_timerfd_gettime /* 6285 */ | 392 | PTR compat_sys_timerfd_gettime /* 6285 */ |
| 399 | PTR compat_sys_timerfd_settime | 393 | PTR compat_sys_timerfd_settime |
| 400 | PTR sys_signalfd4 | 394 | PTR compat_sys_signalfd4 |
| 401 | PTR sys_eventfd2 | 395 | PTR sys_eventfd2 |
| 402 | PTR sys_epoll_create1 | 396 | PTR sys_epoll_create1 |
| 403 | PTR sys_dup3 /* 6290 */ | 397 | PTR sys_dup3 /* 6290 */ |
| 404 | PTR sys_pipe2 | 398 | PTR sys_pipe2 |
| 405 | PTR sys_inotify_init1 | 399 | PTR sys_inotify_init1 |
| 406 | PTR sys_preadv | 400 | PTR compat_sys_preadv |
| 407 | PTR sys_pwritev | 401 | PTR compat_sys_pwritev |
| 408 | PTR compat_sys_rt_tgsigqueueinfo /* 6295 */ | 402 | PTR compat_sys_rt_tgsigqueueinfo /* 6295 */ |
| 409 | PTR sys_perf_event_open | 403 | PTR sys_perf_event_open |
| 410 | PTR sys_accept4 | 404 | PTR sys_accept4 |
diff --git a/arch/mips/kernel/scall64-o32.S b/arch/mips/kernel/scall64-o32.S index 53c2d7245764..9601be6afa3d 100644 --- a/arch/mips/kernel/scall64-o32.S +++ b/arch/mips/kernel/scall64-o32.S | |||
| @@ -203,7 +203,7 @@ sys_call_table: | |||
| 203 | PTR sys_creat | 203 | PTR sys_creat |
| 204 | PTR sys_link | 204 | PTR sys_link |
| 205 | PTR sys_unlink /* 4010 */ | 205 | PTR sys_unlink /* 4010 */ |
| 206 | PTR sys32_execve | 206 | PTR compat_sys_execve |
| 207 | PTR sys_chdir | 207 | PTR sys_chdir |
| 208 | PTR compat_sys_time | 208 | PTR compat_sys_time |
| 209 | PTR sys_mknod | 209 | PTR sys_mknod |
diff --git a/arch/mips/kernel/setup.c b/arch/mips/kernel/setup.c index a53f8ec37aac..8c41187801ce 100644 --- a/arch/mips/kernel/setup.c +++ b/arch/mips/kernel/setup.c | |||
| @@ -22,6 +22,7 @@ | |||
| 22 | #include <linux/console.h> | 22 | #include <linux/console.h> |
| 23 | #include <linux/pfn.h> | 23 | #include <linux/pfn.h> |
| 24 | #include <linux/debugfs.h> | 24 | #include <linux/debugfs.h> |
| 25 | #include <linux/kexec.h> | ||
| 25 | 26 | ||
| 26 | #include <asm/addrspace.h> | 27 | #include <asm/addrspace.h> |
| 27 | #include <asm/bootinfo.h> | 28 | #include <asm/bootinfo.h> |
| @@ -79,7 +80,7 @@ static struct resource data_resource = { .name = "Kernel data", }; | |||
| 79 | void __init add_memory_region(phys_t start, phys_t size, long type) | 80 | void __init add_memory_region(phys_t start, phys_t size, long type) |
| 80 | { | 81 | { |
| 81 | int x = boot_mem_map.nr_map; | 82 | int x = boot_mem_map.nr_map; |
| 82 | struct boot_mem_map_entry *prev = boot_mem_map.map + x - 1; | 83 | int i; |
| 83 | 84 | ||
| 84 | /* Sanity check */ | 85 | /* Sanity check */ |
| 85 | if (start + size < start) { | 86 | if (start + size < start) { |
| @@ -88,15 +89,29 @@ void __init add_memory_region(phys_t start, phys_t size, long type) | |||
| 88 | } | 89 | } |
| 89 | 90 | ||
| 90 | /* | 91 | /* |
| 91 | * Try to merge with previous entry if any. This is far less than | 92 | * Try to merge with existing entry, if any. |
| 92 | * perfect but is sufficient for most real world cases. | ||
| 93 | */ | 93 | */ |
| 94 | if (x && prev->addr + prev->size == start && prev->type == type) { | 94 | for (i = 0; i < boot_mem_map.nr_map; i++) { |
| 95 | prev->size += size; | 95 | struct boot_mem_map_entry *entry = boot_mem_map.map + i; |
| 96 | unsigned long top; | ||
| 97 | |||
| 98 | if (entry->type != type) | ||
| 99 | continue; | ||
| 100 | |||
| 101 | if (start + size < entry->addr) | ||
| 102 | continue; /* no overlap */ | ||
| 103 | |||
| 104 | if (entry->addr + entry->size < start) | ||
| 105 | continue; /* no overlap */ | ||
| 106 | |||
| 107 | top = max(entry->addr + entry->size, start + size); | ||
| 108 | entry->addr = min(entry->addr, start); | ||
| 109 | entry->size = top - entry->addr; | ||
| 110 | |||
| 96 | return; | 111 | return; |
| 97 | } | 112 | } |
| 98 | 113 | ||
| 99 | if (x == BOOT_MEM_MAP_MAX) { | 114 | if (boot_mem_map.nr_map == BOOT_MEM_MAP_MAX) { |
| 100 | pr_err("Ooops! Too many entries in the memory map!\n"); | 115 | pr_err("Ooops! Too many entries in the memory map!\n"); |
| 101 | return; | 116 | return; |
| 102 | } | 117 | } |
| @@ -522,12 +537,64 @@ static void __init arch_mem_init(char **cmdline_p) | |||
| 522 | } | 537 | } |
| 523 | 538 | ||
| 524 | bootmem_init(); | 539 | bootmem_init(); |
| 540 | #ifdef CONFIG_KEXEC | ||
| 541 | if (crashk_res.start != crashk_res.end) | ||
| 542 | reserve_bootmem(crashk_res.start, | ||
| 543 | crashk_res.end - crashk_res.start + 1, | ||
| 544 | BOOTMEM_DEFAULT); | ||
| 545 | #endif | ||
| 525 | device_tree_init(); | 546 | device_tree_init(); |
| 526 | sparse_init(); | 547 | sparse_init(); |
| 527 | plat_swiotlb_setup(); | 548 | plat_swiotlb_setup(); |
| 528 | paging_init(); | 549 | paging_init(); |
| 529 | } | 550 | } |
| 530 | 551 | ||
| 552 | #ifdef CONFIG_KEXEC | ||
| 553 | static inline unsigned long long get_total_mem(void) | ||
| 554 | { | ||
| 555 | unsigned long long total; | ||
| 556 | |||
| 557 | total = max_pfn - min_low_pfn; | ||
| 558 | return total << PAGE_SHIFT; | ||
| 559 | } | ||
| 560 | |||
| 561 | static void __init mips_parse_crashkernel(void) | ||
| 562 | { | ||
| 563 | unsigned long long total_mem; | ||
| 564 | unsigned long long crash_size, crash_base; | ||
| 565 | int ret; | ||
| 566 | |||
| 567 | total_mem = get_total_mem(); | ||
| 568 | ret = parse_crashkernel(boot_command_line, total_mem, | ||
| 569 | &crash_size, &crash_base); | ||
| 570 | if (ret != 0 || crash_size <= 0) | ||
| 571 | return; | ||
| 572 | |||
| 573 | crashk_res.start = crash_base; | ||
| 574 | crashk_res.end = crash_base + crash_size - 1; | ||
| 575 | } | ||
| 576 | |||
| 577 | static void __init request_crashkernel(struct resource *res) | ||
| 578 | { | ||
| 579 | int ret; | ||
| 580 | |||
| 581 | ret = request_resource(res, &crashk_res); | ||
| 582 | if (!ret) | ||
| 583 | pr_info("Reserving %ldMB of memory at %ldMB for crashkernel\n", | ||
| 584 | (unsigned long)((crashk_res.end - | ||
| 585 | crashk_res.start + 1) >> 20), | ||
| 586 | (unsigned long)(crashk_res.start >> 20)); | ||
| 587 | } | ||
| 588 | #else /* !defined(CONFIG_KEXEC) */ | ||
| 589 | static void __init mips_parse_crashkernel(void) | ||
| 590 | { | ||
| 591 | } | ||
| 592 | |||
| 593 | static void __init request_crashkernel(struct resource *res) | ||
| 594 | { | ||
| 595 | } | ||
| 596 | #endif /* !defined(CONFIG_KEXEC) */ | ||
| 597 | |||
| 531 | static void __init resource_init(void) | 598 | static void __init resource_init(void) |
| 532 | { | 599 | { |
| 533 | int i; | 600 | int i; |
| @@ -543,6 +610,8 @@ static void __init resource_init(void) | |||
| 543 | /* | 610 | /* |
| 544 | * Request address space for all standard RAM. | 611 | * Request address space for all standard RAM. |
| 545 | */ | 612 | */ |
| 613 | mips_parse_crashkernel(); | ||
| 614 | |||
| 546 | for (i = 0; i < boot_mem_map.nr_map; i++) { | 615 | for (i = 0; i < boot_mem_map.nr_map; i++) { |
| 547 | struct resource *res; | 616 | struct resource *res; |
| 548 | unsigned long start, end; | 617 | unsigned long start, end; |
| @@ -579,6 +648,7 @@ static void __init resource_init(void) | |||
| 579 | */ | 648 | */ |
| 580 | request_resource(res, &code_resource); | 649 | request_resource(res, &code_resource); |
| 581 | request_resource(res, &data_resource); | 650 | request_resource(res, &data_resource); |
| 651 | request_crashkernel(res); | ||
| 582 | } | 652 | } |
| 583 | } | 653 | } |
| 584 | 654 | ||
diff --git a/arch/mips/kernel/signal.c b/arch/mips/kernel/signal.c index 0e1a5b8ae817..b6aa77035019 100644 --- a/arch/mips/kernel/signal.c +++ b/arch/mips/kernel/signal.c | |||
| @@ -568,17 +568,20 @@ static void do_signal(struct pt_regs *regs) | |||
| 568 | } | 568 | } |
| 569 | 569 | ||
| 570 | if (regs->regs[0]) { | 570 | if (regs->regs[0]) { |
| 571 | if (regs->regs[2] == ERESTARTNOHAND || | 571 | switch (regs->regs[2]) { |
| 572 | regs->regs[2] == ERESTARTSYS || | 572 | case ERESTARTNOHAND: |
| 573 | regs->regs[2] == ERESTARTNOINTR) { | 573 | case ERESTARTSYS: |
| 574 | case ERESTARTNOINTR: | ||
| 574 | regs->regs[2] = regs->regs[0]; | 575 | regs->regs[2] = regs->regs[0]; |
| 575 | regs->regs[7] = regs->regs[26]; | 576 | regs->regs[7] = regs->regs[26]; |
| 576 | regs->cp0_epc -= 4; | 577 | regs->cp0_epc -= 4; |
| 577 | } | 578 | break; |
| 578 | if (regs->regs[2] == ERESTART_RESTARTBLOCK) { | 579 | |
| 580 | case ERESTART_RESTARTBLOCK: | ||
| 579 | regs->regs[2] = current->thread.abi->restart; | 581 | regs->regs[2] = current->thread.abi->restart; |
| 580 | regs->regs[7] = regs->regs[26]; | 582 | regs->regs[7] = regs->regs[26]; |
| 581 | regs->cp0_epc -= 4; | 583 | regs->cp0_epc -= 4; |
| 584 | break; | ||
| 582 | } | 585 | } |
| 583 | regs->regs[0] = 0; /* Don't deal with this again. */ | 586 | regs->regs[0] = 0; /* Don't deal with this again. */ |
| 584 | } | 587 | } |
diff --git a/arch/mips/kernel/smp-cmp.c b/arch/mips/kernel/smp-cmp.c index afc379ca3753..06cd0c610f44 100644 --- a/arch/mips/kernel/smp-cmp.c +++ b/arch/mips/kernel/smp-cmp.c | |||
| @@ -97,7 +97,7 @@ static void cmp_init_secondary(void) | |||
| 97 | 97 | ||
| 98 | /* Enable per-cpu interrupts: platform specific */ | 98 | /* Enable per-cpu interrupts: platform specific */ |
| 99 | 99 | ||
| 100 | c->core = (read_c0_ebase() >> 1) & 0xff; | 100 | c->core = (read_c0_ebase() >> 1) & 0x1ff; |
| 101 | #if defined(CONFIG_MIPS_MT_SMP) || defined(CONFIG_MIPS_MT_SMTC) | 101 | #if defined(CONFIG_MIPS_MT_SMP) || defined(CONFIG_MIPS_MT_SMTC) |
| 102 | c->vpe_id = (read_c0_tcbind() >> TCBIND_CURVPE_SHIFT) & TCBIND_CURVPE; | 102 | c->vpe_id = (read_c0_tcbind() >> TCBIND_CURVPE_SHIFT) & TCBIND_CURVPE; |
| 103 | #endif | 103 | #endif |
diff --git a/arch/mips/kernel/smp.c b/arch/mips/kernel/smp.c index 9005bf9fb859..2e6374a589ec 100644 --- a/arch/mips/kernel/smp.c +++ b/arch/mips/kernel/smp.c | |||
| @@ -386,3 +386,20 @@ void flush_tlb_one(unsigned long vaddr) | |||
| 386 | 386 | ||
| 387 | EXPORT_SYMBOL(flush_tlb_page); | 387 | EXPORT_SYMBOL(flush_tlb_page); |
| 388 | EXPORT_SYMBOL(flush_tlb_one); | 388 | EXPORT_SYMBOL(flush_tlb_one); |
| 389 | |||
| 390 | #if defined(CONFIG_KEXEC) | ||
| 391 | void (*dump_ipi_function_ptr)(void *) = NULL; | ||
| 392 | void dump_send_ipi(void (*dump_ipi_callback)(void *)) | ||
| 393 | { | ||
| 394 | int i; | ||
| 395 | int cpu = smp_processor_id(); | ||
| 396 | |||
| 397 | dump_ipi_function_ptr = dump_ipi_callback; | ||
| 398 | smp_mb(); | ||
| 399 | for_each_online_cpu(i) | ||
| 400 | if (i != cpu) | ||
| 401 | mp_ops->send_ipi_single(i, SMP_DUMP); | ||
| 402 | |||
| 403 | } | ||
| 404 | EXPORT_SYMBOL(dump_send_ipi); | ||
| 405 | #endif | ||
diff --git a/arch/mips/kernel/syscall.c b/arch/mips/kernel/syscall.c index 2bd561bc05ae..201cb76b4df9 100644 --- a/arch/mips/kernel/syscall.c +++ b/arch/mips/kernel/syscall.c | |||
| @@ -92,7 +92,7 @@ save_static_function(sys_fork); | |||
| 92 | static int __used noinline | 92 | static int __used noinline |
| 93 | _sys_fork(nabi_no_regargs struct pt_regs regs) | 93 | _sys_fork(nabi_no_regargs struct pt_regs regs) |
| 94 | { | 94 | { |
| 95 | return do_fork(SIGCHLD, regs.regs[29], ®s, 0, NULL, NULL); | 95 | return do_fork(SIGCHLD, regs.regs[29], 0, NULL, NULL); |
| 96 | } | 96 | } |
| 97 | 97 | ||
| 98 | save_static_function(sys_clone); | 98 | save_static_function(sys_clone); |
| @@ -123,32 +123,10 @@ _sys_clone(nabi_no_regargs struct pt_regs regs) | |||
| 123 | #else | 123 | #else |
| 124 | child_tidptr = (int __user *) regs.regs[8]; | 124 | child_tidptr = (int __user *) regs.regs[8]; |
| 125 | #endif | 125 | #endif |
| 126 | return do_fork(clone_flags, newsp, ®s, 0, | 126 | return do_fork(clone_flags, newsp, 0, |
| 127 | parent_tidptr, child_tidptr); | 127 | parent_tidptr, child_tidptr); |
| 128 | } | 128 | } |
| 129 | 129 | ||
| 130 | /* | ||
| 131 | * sys_execve() executes a new program. | ||
| 132 | */ | ||
| 133 | asmlinkage int sys_execve(nabi_no_regargs struct pt_regs regs) | ||
| 134 | { | ||
| 135 | int error; | ||
| 136 | struct filename *filename; | ||
| 137 | |||
| 138 | filename = getname((const char __user *) (long)regs.regs[4]); | ||
| 139 | error = PTR_ERR(filename); | ||
| 140 | if (IS_ERR(filename)) | ||
| 141 | goto out; | ||
| 142 | error = do_execve(filename->name, | ||
| 143 | (const char __user *const __user *) (long)regs.regs[5], | ||
| 144 | (const char __user *const __user *) (long)regs.regs[6], | ||
| 145 | ®s); | ||
| 146 | putname(filename); | ||
| 147 | |||
| 148 | out: | ||
| 149 | return error; | ||
| 150 | } | ||
| 151 | |||
| 152 | SYSCALL_DEFINE1(set_thread_area, unsigned long, addr) | 130 | SYSCALL_DEFINE1(set_thread_area, unsigned long, addr) |
| 153 | { | 131 | { |
| 154 | struct thread_info *ti = task_thread_info(current); | 132 | struct thread_info *ti = task_thread_info(current); |
| @@ -313,34 +291,3 @@ asmlinkage void bad_stack(void) | |||
| 313 | { | 291 | { |
| 314 | do_exit(SIGSEGV); | 292 | do_exit(SIGSEGV); |
| 315 | } | 293 | } |
| 316 | |||
| 317 | /* | ||
| 318 | * Do a system call from kernel instead of calling sys_execve so we | ||
| 319 | * end up with proper pt_regs. | ||
| 320 | */ | ||
| 321 | int kernel_execve(const char *filename, | ||
| 322 | const char *const argv[], | ||
| 323 | const char *const envp[]) | ||
| 324 | { | ||
| 325 | register unsigned long __a0 asm("$4") = (unsigned long) filename; | ||
| 326 | register unsigned long __a1 asm("$5") = (unsigned long) argv; | ||
| 327 | register unsigned long __a2 asm("$6") = (unsigned long) envp; | ||
| 328 | register unsigned long __a3 asm("$7"); | ||
| 329 | unsigned long __v0; | ||
| 330 | |||
| 331 | __asm__ volatile (" \n" | ||
| 332 | " .set noreorder \n" | ||
| 333 | " li $2, %5 # __NR_execve \n" | ||
| 334 | " syscall \n" | ||
| 335 | " move %0, $2 \n" | ||
| 336 | " .set reorder \n" | ||
| 337 | : "=&r" (__v0), "=r" (__a3) | ||
| 338 | : "r" (__a0), "r" (__a1), "r" (__a2), "i" (__NR_execve) | ||
| 339 | : "$2", "$8", "$9", "$10", "$11", "$12", "$13", "$14", "$15", "$24", | ||
| 340 | "memory"); | ||
| 341 | |||
| 342 | if (__a3 == 0) | ||
| 343 | return __v0; | ||
| 344 | |||
| 345 | return -__v0; | ||
| 346 | } | ||
diff --git a/arch/mips/kernel/traps.c b/arch/mips/kernel/traps.c index 9be3df1fa8a4..cf7ac5483f53 100644 --- a/arch/mips/kernel/traps.c +++ b/arch/mips/kernel/traps.c | |||
| @@ -13,6 +13,7 @@ | |||
| 13 | */ | 13 | */ |
| 14 | #include <linux/bug.h> | 14 | #include <linux/bug.h> |
| 15 | #include <linux/compiler.h> | 15 | #include <linux/compiler.h> |
| 16 | #include <linux/kexec.h> | ||
| 16 | #include <linux/init.h> | 17 | #include <linux/init.h> |
| 17 | #include <linux/kernel.h> | 18 | #include <linux/kernel.h> |
| 18 | #include <linux/module.h> | 19 | #include <linux/module.h> |
| @@ -409,6 +410,9 @@ void __noreturn die(const char *str, struct pt_regs *regs) | |||
| 409 | panic("Fatal exception"); | 410 | panic("Fatal exception"); |
| 410 | } | 411 | } |
| 411 | 412 | ||
| 413 | if (regs && kexec_should_crash(current)) | ||
| 414 | crash_kexec(regs); | ||
| 415 | |||
| 412 | do_exit(sig); | 416 | do_exit(sig); |
| 413 | } | 417 | } |
| 414 | 418 | ||
| @@ -1021,6 +1025,24 @@ asmlinkage void do_cpu(struct pt_regs *regs) | |||
| 1021 | 1025 | ||
| 1022 | return; | 1026 | return; |
| 1023 | 1027 | ||
| 1028 | case 3: | ||
| 1029 | /* | ||
| 1030 | * Old (MIPS I and MIPS II) processors will set this code | ||
| 1031 | * for COP1X opcode instructions that replaced the original | ||
| 1032 | * COP3 space. We don't limit COP1 space instructions in | ||
| 1033 | * the emulator according to the CPU ISA, so we want to | ||
| 1034 | * treat COP1X instructions consistently regardless of which | ||
| 1035 | * code the CPU chose. Therefore we redirect this trap to | ||
| 1036 | * the FP emulator too. | ||
| 1037 | * | ||
| 1038 | * Then some newer FPU-less processors use this code | ||
| 1039 | * erroneously too, so they are covered by this choice | ||
| 1040 | * as well. | ||
| 1041 | */ | ||
| 1042 | if (raw_cpu_has_fpu) | ||
| 1043 | break; | ||
| 1044 | /* Fall through. */ | ||
| 1045 | |||
| 1024 | case 1: | 1046 | case 1: |
| 1025 | if (used_math()) /* Using the FPU again. */ | 1047 | if (used_math()) /* Using the FPU again. */ |
| 1026 | own_fpu(1); | 1048 | own_fpu(1); |
| @@ -1044,9 +1066,6 @@ asmlinkage void do_cpu(struct pt_regs *regs) | |||
| 1044 | case 2: | 1066 | case 2: |
| 1045 | raw_notifier_call_chain(&cu2_chain, CU2_EXCEPTION, regs); | 1067 | raw_notifier_call_chain(&cu2_chain, CU2_EXCEPTION, regs); |
| 1046 | return; | 1068 | return; |
| 1047 | |||
| 1048 | case 3: | ||
| 1049 | break; | ||
| 1050 | } | 1069 | } |
| 1051 | 1070 | ||
| 1052 | force_sig(SIGILL, current); | 1071 | force_sig(SIGILL, current); |
