aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--arch/mips/kernel/process.c43
-rw-r--r--arch/mips/kernel/traps.c1
-rw-r--r--arch/mips/mm/ioremap.c37
3 files changed, 55 insertions, 26 deletions
diff --git a/arch/mips/kernel/process.c b/arch/mips/kernel/process.c
index 8d85046adcc8..9670e70139fd 100644
--- a/arch/mips/kernel/process.c
+++ b/arch/mips/kernel/process.c
@@ -29,6 +29,7 @@
29#include <linux/kallsyms.h> 29#include <linux/kallsyms.h>
30#include <linux/random.h> 30#include <linux/random.h>
31#include <linux/prctl.h> 31#include <linux/prctl.h>
32#include <linux/nmi.h>
32 33
33#include <asm/asm.h> 34#include <asm/asm.h>
34#include <asm/bootinfo.h> 35#include <asm/bootinfo.h>
@@ -655,28 +656,42 @@ unsigned long arch_align_stack(unsigned long sp)
655 return sp & ALMASK; 656 return sp & ALMASK;
656} 657}
657 658
658static void arch_dump_stack(void *info) 659static DEFINE_PER_CPU(call_single_data_t, backtrace_csd);
660static struct cpumask backtrace_csd_busy;
661
662static void handle_backtrace(void *info)
659{ 663{
660 struct pt_regs *regs; 664 nmi_cpu_backtrace(get_irq_regs());
665 cpumask_clear_cpu(smp_processor_id(), &backtrace_csd_busy);
666}
661 667
662 regs = get_irq_regs(); 668static void raise_backtrace(cpumask_t *mask)
669{
670 call_single_data_t *csd;
671 int cpu;
663 672
664 if (regs) 673 for_each_cpu(cpu, mask) {
665 show_regs(regs); 674 /*
675 * If we previously sent an IPI to the target CPU & it hasn't
676 * cleared its bit in the busy cpumask then it didn't handle
677 * our previous IPI & it's not safe for us to reuse the
678 * call_single_data_t.
679 */
680 if (cpumask_test_and_set_cpu(cpu, &backtrace_csd_busy)) {
681 pr_warn("Unable to send backtrace IPI to CPU%u - perhaps it hung?\n",
682 cpu);
683 continue;
684 }
666 685
667 dump_stack(); 686 csd = &per_cpu(backtrace_csd, cpu);
687 csd->func = handle_backtrace;
688 smp_call_function_single_async(cpu, csd);
689 }
668} 690}
669 691
670void arch_trigger_cpumask_backtrace(const cpumask_t *mask, bool exclude_self) 692void arch_trigger_cpumask_backtrace(const cpumask_t *mask, bool exclude_self)
671{ 693{
672 long this_cpu = get_cpu(); 694 nmi_trigger_cpumask_backtrace(mask, exclude_self, raise_backtrace);
673
674 if (cpumask_test_cpu(this_cpu, mask) && !exclude_self)
675 dump_stack();
676
677 smp_call_function_many(mask, arch_dump_stack, NULL, 1);
678
679 put_cpu();
680} 695}
681 696
682int mips_get_process_fp_mode(struct task_struct *task) 697int mips_get_process_fp_mode(struct task_struct *task)
diff --git a/arch/mips/kernel/traps.c b/arch/mips/kernel/traps.c
index d67fa74622ee..8d505a21396e 100644
--- a/arch/mips/kernel/traps.c
+++ b/arch/mips/kernel/traps.c
@@ -351,6 +351,7 @@ static void __show_regs(const struct pt_regs *regs)
351void show_regs(struct pt_regs *regs) 351void show_regs(struct pt_regs *regs)
352{ 352{
353 __show_regs((struct pt_regs *)regs); 353 __show_regs((struct pt_regs *)regs);
354 dump_stack();
354} 355}
355 356
356void show_registers(struct pt_regs *regs) 357void show_registers(struct pt_regs *regs)
diff --git a/arch/mips/mm/ioremap.c b/arch/mips/mm/ioremap.c
index 1986e09fb457..1601d90b087b 100644
--- a/arch/mips/mm/ioremap.c
+++ b/arch/mips/mm/ioremap.c
@@ -9,6 +9,7 @@
9#include <linux/export.h> 9#include <linux/export.h>
10#include <asm/addrspace.h> 10#include <asm/addrspace.h>
11#include <asm/byteorder.h> 11#include <asm/byteorder.h>
12#include <linux/ioport.h>
12#include <linux/sched.h> 13#include <linux/sched.h>
13#include <linux/slab.h> 14#include <linux/slab.h>
14#include <linux/vmalloc.h> 15#include <linux/vmalloc.h>
@@ -98,6 +99,20 @@ static int remap_area_pages(unsigned long address, phys_addr_t phys_addr,
98 return error; 99 return error;
99} 100}
100 101
102static int __ioremap_check_ram(unsigned long start_pfn, unsigned long nr_pages,
103 void *arg)
104{
105 unsigned long i;
106
107 for (i = 0; i < nr_pages; i++) {
108 if (pfn_valid(start_pfn + i) &&
109 !PageReserved(pfn_to_page(start_pfn + i)))
110 return 1;
111 }
112
113 return 0;
114}
115
101/* 116/*
102 * Generic mapping function (not visible outside): 117 * Generic mapping function (not visible outside):
103 */ 118 */
@@ -116,8 +131,8 @@ static int remap_area_pages(unsigned long address, phys_addr_t phys_addr,
116 131
117void __iomem * __ioremap(phys_addr_t phys_addr, phys_addr_t size, unsigned long flags) 132void __iomem * __ioremap(phys_addr_t phys_addr, phys_addr_t size, unsigned long flags)
118{ 133{
134 unsigned long offset, pfn, last_pfn;
119 struct vm_struct * area; 135 struct vm_struct * area;
120 unsigned long offset;
121 phys_addr_t last_addr; 136 phys_addr_t last_addr;
122 void * addr; 137 void * addr;
123 138
@@ -137,18 +152,16 @@ void __iomem * __ioremap(phys_addr_t phys_addr, phys_addr_t size, unsigned long
137 return (void __iomem *) CKSEG1ADDR(phys_addr); 152 return (void __iomem *) CKSEG1ADDR(phys_addr);
138 153
139 /* 154 /*
140 * Don't allow anybody to remap normal RAM that we're using.. 155 * Don't allow anybody to remap RAM that may be allocated by the page
156 * allocator, since that could lead to races & data clobbering.
141 */ 157 */
142 if (phys_addr < virt_to_phys(high_memory)) { 158 pfn = PFN_DOWN(phys_addr);
143 char *t_addr, *t_end; 159 last_pfn = PFN_DOWN(last_addr);
144 struct page *page; 160 if (walk_system_ram_range(pfn, last_pfn - pfn + 1, NULL,
145 161 __ioremap_check_ram) == 1) {
146 t_addr = __va(phys_addr); 162 WARN_ONCE(1, "ioremap on RAM at %pa - %pa\n",
147 t_end = t_addr + (size - 1); 163 &phys_addr, &last_addr);
148 164 return NULL;
149 for(page = virt_to_page(t_addr); page <= virt_to_page(t_end); page++)
150 if(!PageReserved(page))
151 return NULL;
152 } 165 }
153 166
154 /* 167 /*