aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2017-07-02 14:53:44 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2017-07-02 14:53:44 -0400
commit79c496816963aa0561868b43c2c950dfeb282639 (patch)
treee127b465e24c9ba27d996fcb7b70aa67f60ffb3e
parent3a61a54cd72c93afa3b7246e3ed06f26ed37fde7 (diff)
parent854236363370995a609a10b03e35fd3dc5e9e4a1 (diff)
Merge branch 'upstream' of git://git.linux-mips.org/pub/scm/ralf/upstream-linus
Pull MIPS fixes from Ralf Baechle: "Here's a final round of fixes for 4.12: - Fix misordered instructions in assembly code making kenel startup via UHB unreliable. - Fix special case of MADDF and MADDF emulation. - Fix alignment issue in address calculation in pm-cps on 64 bit. - Fix IRQ tracing & lockdep when rescheduling - Systems with MAARs require post-DMA cache flushes. The reordering fix and the MADDF/MSUBF fix have sat in linux-next for a number of days. The others haven't propagated from my pull tree to linux-next yet but all have survived manual testing and Imagination's automated test system and there are no pending bug reports" * 'upstream' of git://git.linux-mips.org/pub/scm/ralf/upstream-linus: MIPS: Avoid accidental raw backtrace MIPS: Perform post-DMA cache flushes on systems with MAARs MIPS: Fix IRQ tracing & lockdep when rescheduling MIPS: pm-cps: Drop manual cache-line alignment of ready_count MIPS: math-emu: Handle zero accumulator case in MADDF and MSUBF separately MIPS: head: Reorder instructions missing a delay slot
-rw-r--r--arch/mips/kernel/entry.S3
-rw-r--r--arch/mips/kernel/head.S2
-rw-r--r--arch/mips/kernel/pm-cps.c9
-rw-r--r--arch/mips/kernel/traps.c2
-rw-r--r--arch/mips/math-emu/dp_maddf.c5
-rw-r--r--arch/mips/math-emu/sp_maddf.c5
-rw-r--r--arch/mips/mm/dma-default.c23
7 files changed, 33 insertions, 16 deletions
diff --git a/arch/mips/kernel/entry.S b/arch/mips/kernel/entry.S
index 8d83fc2a96b7..38a302919e6b 100644
--- a/arch/mips/kernel/entry.S
+++ b/arch/mips/kernel/entry.S
@@ -11,6 +11,7 @@
11#include <asm/asm.h> 11#include <asm/asm.h>
12#include <asm/asmmacro.h> 12#include <asm/asmmacro.h>
13#include <asm/compiler.h> 13#include <asm/compiler.h>
14#include <asm/irqflags.h>
14#include <asm/regdef.h> 15#include <asm/regdef.h>
15#include <asm/mipsregs.h> 16#include <asm/mipsregs.h>
16#include <asm/stackframe.h> 17#include <asm/stackframe.h>
@@ -119,6 +120,7 @@ work_pending:
119 andi t0, a2, _TIF_NEED_RESCHED # a2 is preloaded with TI_FLAGS 120 andi t0, a2, _TIF_NEED_RESCHED # a2 is preloaded with TI_FLAGS
120 beqz t0, work_notifysig 121 beqz t0, work_notifysig
121work_resched: 122work_resched:
123 TRACE_IRQS_OFF
122 jal schedule 124 jal schedule
123 125
124 local_irq_disable # make sure need_resched and 126 local_irq_disable # make sure need_resched and
@@ -155,6 +157,7 @@ syscall_exit_work:
155 beqz t0, work_pending # trace bit set? 157 beqz t0, work_pending # trace bit set?
156 local_irq_enable # could let syscall_trace_leave() 158 local_irq_enable # could let syscall_trace_leave()
157 # call schedule() instead 159 # call schedule() instead
160 TRACE_IRQS_ON
158 move a0, sp 161 move a0, sp
159 jal syscall_trace_leave 162 jal syscall_trace_leave
160 b resume_userspace 163 b resume_userspace
diff --git a/arch/mips/kernel/head.S b/arch/mips/kernel/head.S
index cf052204eb0a..d1bb506adc10 100644
--- a/arch/mips/kernel/head.S
+++ b/arch/mips/kernel/head.S
@@ -106,8 +106,8 @@ NESTED(kernel_entry, 16, sp) # kernel entry point
106 beq t0, t1, dtb_found 106 beq t0, t1, dtb_found
107#endif 107#endif
108 li t1, -2 108 li t1, -2
109 beq a0, t1, dtb_found
110 move t2, a1 109 move t2, a1
110 beq a0, t1, dtb_found
111 111
112 li t2, 0 112 li t2, 0
113dtb_found: 113dtb_found:
diff --git a/arch/mips/kernel/pm-cps.c b/arch/mips/kernel/pm-cps.c
index 5f928c34c148..d99416094ba9 100644
--- a/arch/mips/kernel/pm-cps.c
+++ b/arch/mips/kernel/pm-cps.c
@@ -56,7 +56,6 @@ DECLARE_BITMAP(state_support, CPS_PM_STATE_COUNT);
56 * state. Actually per-core rather than per-CPU. 56 * state. Actually per-core rather than per-CPU.
57 */ 57 */
58static DEFINE_PER_CPU_ALIGNED(u32*, ready_count); 58static DEFINE_PER_CPU_ALIGNED(u32*, ready_count);
59static DEFINE_PER_CPU_ALIGNED(void*, ready_count_alloc);
60 59
61/* Indicates online CPUs coupled with the current CPU */ 60/* Indicates online CPUs coupled with the current CPU */
62static DEFINE_PER_CPU_ALIGNED(cpumask_t, online_coupled); 61static DEFINE_PER_CPU_ALIGNED(cpumask_t, online_coupled);
@@ -642,7 +641,6 @@ static int cps_pm_online_cpu(unsigned int cpu)
642{ 641{
643 enum cps_pm_state state; 642 enum cps_pm_state state;
644 unsigned core = cpu_data[cpu].core; 643 unsigned core = cpu_data[cpu].core;
645 unsigned dlinesz = cpu_data[cpu].dcache.linesz;
646 void *entry_fn, *core_rc; 644 void *entry_fn, *core_rc;
647 645
648 for (state = CPS_PM_NC_WAIT; state < CPS_PM_STATE_COUNT; state++) { 646 for (state = CPS_PM_NC_WAIT; state < CPS_PM_STATE_COUNT; state++) {
@@ -662,16 +660,11 @@ static int cps_pm_online_cpu(unsigned int cpu)
662 } 660 }
663 661
664 if (!per_cpu(ready_count, core)) { 662 if (!per_cpu(ready_count, core)) {
665 core_rc = kmalloc(dlinesz * 2, GFP_KERNEL); 663 core_rc = kmalloc(sizeof(u32), GFP_KERNEL);
666 if (!core_rc) { 664 if (!core_rc) {
667 pr_err("Failed allocate core %u ready_count\n", core); 665 pr_err("Failed allocate core %u ready_count\n", core);
668 return -ENOMEM; 666 return -ENOMEM;
669 } 667 }
670 per_cpu(ready_count_alloc, core) = core_rc;
671
672 /* Ensure ready_count is aligned to a cacheline boundary */
673 core_rc += dlinesz - 1;
674 core_rc = (void *)((unsigned long)core_rc & ~(dlinesz - 1));
675 per_cpu(ready_count, core) = core_rc; 668 per_cpu(ready_count, core) = core_rc;
676 } 669 }
677 670
diff --git a/arch/mips/kernel/traps.c b/arch/mips/kernel/traps.c
index 9681b5877140..38dfa27730ff 100644
--- a/arch/mips/kernel/traps.c
+++ b/arch/mips/kernel/traps.c
@@ -201,6 +201,8 @@ void show_stack(struct task_struct *task, unsigned long *sp)
201{ 201{
202 struct pt_regs regs; 202 struct pt_regs regs;
203 mm_segment_t old_fs = get_fs(); 203 mm_segment_t old_fs = get_fs();
204
205 regs.cp0_status = KSU_KERNEL;
204 if (sp) { 206 if (sp) {
205 regs.regs[29] = (unsigned long)sp; 207 regs.regs[29] = (unsigned long)sp;
206 regs.regs[31] = 0; 208 regs.regs[31] = 0;
diff --git a/arch/mips/math-emu/dp_maddf.c b/arch/mips/math-emu/dp_maddf.c
index 4a2d03c72959..caa62f20a888 100644
--- a/arch/mips/math-emu/dp_maddf.c
+++ b/arch/mips/math-emu/dp_maddf.c
@@ -54,7 +54,7 @@ static union ieee754dp _dp_maddf(union ieee754dp z, union ieee754dp x,
54 return ieee754dp_nanxcpt(z); 54 return ieee754dp_nanxcpt(z);
55 case IEEE754_CLASS_DNORM: 55 case IEEE754_CLASS_DNORM:
56 DPDNORMZ; 56 DPDNORMZ;
57 /* QNAN is handled separately below */ 57 /* QNAN and ZERO cases are handled separately below */
58 } 58 }
59 59
60 switch (CLPAIR(xc, yc)) { 60 switch (CLPAIR(xc, yc)) {
@@ -210,6 +210,9 @@ static union ieee754dp _dp_maddf(union ieee754dp z, union ieee754dp x,
210 } 210 }
211 assert(rm & (DP_HIDDEN_BIT << 3)); 211 assert(rm & (DP_HIDDEN_BIT << 3));
212 212
213 if (zc == IEEE754_CLASS_ZERO)
214 return ieee754dp_format(rs, re, rm);
215
213 /* And now the addition */ 216 /* And now the addition */
214 assert(zm & DP_HIDDEN_BIT); 217 assert(zm & DP_HIDDEN_BIT);
215 218
diff --git a/arch/mips/math-emu/sp_maddf.c b/arch/mips/math-emu/sp_maddf.c
index a8cd8b4f235e..c91d5e5d9b5f 100644
--- a/arch/mips/math-emu/sp_maddf.c
+++ b/arch/mips/math-emu/sp_maddf.c
@@ -54,7 +54,7 @@ static union ieee754sp _sp_maddf(union ieee754sp z, union ieee754sp x,
54 return ieee754sp_nanxcpt(z); 54 return ieee754sp_nanxcpt(z);
55 case IEEE754_CLASS_DNORM: 55 case IEEE754_CLASS_DNORM:
56 SPDNORMZ; 56 SPDNORMZ;
57 /* QNAN is handled separately below */ 57 /* QNAN and ZERO cases are handled separately below */
58 } 58 }
59 59
60 switch (CLPAIR(xc, yc)) { 60 switch (CLPAIR(xc, yc)) {
@@ -203,6 +203,9 @@ static union ieee754sp _sp_maddf(union ieee754sp z, union ieee754sp x,
203 } 203 }
204 assert(rm & (SP_HIDDEN_BIT << 3)); 204 assert(rm & (SP_HIDDEN_BIT << 3));
205 205
206 if (zc == IEEE754_CLASS_ZERO)
207 return ieee754sp_format(rs, re, rm);
208
206 /* And now the addition */ 209 /* And now the addition */
207 210
208 assert(zm & SP_HIDDEN_BIT); 211 assert(zm & SP_HIDDEN_BIT);
diff --git a/arch/mips/mm/dma-default.c b/arch/mips/mm/dma-default.c
index fe8df14b6169..e08598c70b3e 100644
--- a/arch/mips/mm/dma-default.c
+++ b/arch/mips/mm/dma-default.c
@@ -68,12 +68,25 @@ static inline struct page *dma_addr_to_page(struct device *dev,
68 * systems and only the R10000 and R12000 are used in such systems, the 68 * systems and only the R10000 and R12000 are used in such systems, the
69 * SGI IP28 Indigo² rsp. SGI IP32 aka O2. 69 * SGI IP28 Indigo² rsp. SGI IP32 aka O2.
70 */ 70 */
71static inline int cpu_needs_post_dma_flush(struct device *dev) 71static inline bool cpu_needs_post_dma_flush(struct device *dev)
72{ 72{
73 return !plat_device_is_coherent(dev) && 73 if (plat_device_is_coherent(dev))
74 (boot_cpu_type() == CPU_R10000 || 74 return false;
75 boot_cpu_type() == CPU_R12000 || 75
76 boot_cpu_type() == CPU_BMIPS5000); 76 switch (boot_cpu_type()) {
77 case CPU_R10000:
78 case CPU_R12000:
79 case CPU_BMIPS5000:
80 return true;
81
82 default:
83 /*
84 * Presence of MAARs suggests that the CPU supports
85 * speculatively prefetching data, and therefore requires
86 * the post-DMA flush/invalidate.
87 */
88 return cpu_has_maar;
89 }
77} 90}
78 91
79static gfp_t massage_gfp_flags(const struct device *dev, gfp_t gfp) 92static gfp_t massage_gfp_flags(const struct device *dev, gfp_t gfp)