summaryrefslogtreecommitdiffstats
path: root/arch/sh
diff options
context:
space:
mode:
authorDmitry Torokhov <dmitry.torokhov@gmail.com>2013-12-16 05:04:49 -0500
committerDmitry Torokhov <dmitry.torokhov@gmail.com>2013-12-16 05:04:49 -0500
commit348324c5b10bcba8d9daabdfb85a6927311be34f (patch)
treed06ca3a264407a14a1f36c1b798d6dc0dc1582d8 /arch/sh
parent1e63bd9cc43db5400a1423a7ec8266b4e7c54bd0 (diff)
parent319e2e3f63c348a9b66db4667efa73178e18b17d (diff)
Merge tag 'v3.13-rc4' into next
Synchronize with mainline to bring in the new keycode definitions and new hwmon API.
Diffstat (limited to 'arch/sh')
-rw-r--r--arch/sh/Kconfig2
-rw-r--r--arch/sh/boards/mach-ecovec24/setup.c1
-rw-r--r--arch/sh/include/asm/Kbuild1
-rw-r--r--arch/sh/include/asm/fpu.h2
-rw-r--r--arch/sh/include/asm/mmu_context.h2
-rw-r--r--arch/sh/include/asm/pgalloc.h5
-rw-r--r--arch/sh/include/asm/processor_32.h10
-rw-r--r--arch/sh/include/asm/processor_64.h10
-rw-r--r--arch/sh/include/asm/thread_info.h2
-rw-r--r--arch/sh/kernel/cpu/fpu.c2
-rw-r--r--arch/sh/kernel/entry-common.S6
-rw-r--r--arch/sh/kernel/irq.c57
-rw-r--r--arch/sh/kernel/process_32.c6
-rw-r--r--arch/sh/kernel/process_64.c4
-rw-r--r--arch/sh/mm/init.c2
15 files changed, 59 insertions, 53 deletions
diff --git a/arch/sh/Kconfig b/arch/sh/Kconfig
index 224f4bc9925e..9b0979f4df7a 100644
--- a/arch/sh/Kconfig
+++ b/arch/sh/Kconfig
@@ -1,5 +1,6 @@
1config SUPERH 1config SUPERH
2 def_bool y 2 def_bool y
3 select ARCH_MIGHT_HAVE_PC_PARPORT
3 select EXPERT 4 select EXPERT
4 select CLKDEV_LOOKUP 5 select CLKDEV_LOOKUP
5 select HAVE_IDE if HAS_IOPORT 6 select HAVE_IDE if HAS_IOPORT
@@ -711,7 +712,6 @@ config CC_STACKPROTECTOR
711config SMP 712config SMP
712 bool "Symmetric multi-processing support" 713 bool "Symmetric multi-processing support"
713 depends on SYS_SUPPORTS_SMP 714 depends on SYS_SUPPORTS_SMP
714 select USE_GENERIC_SMP_HELPERS
715 ---help--- 715 ---help---
716 This enables support for systems with more than one CPU. If you have 716 This enables support for systems with more than one CPU. If you have
717 a system with only one CPU, like most personal computers, say N. If 717 a system with only one CPU, like most personal computers, say N. If
diff --git a/arch/sh/boards/mach-ecovec24/setup.c b/arch/sh/boards/mach-ecovec24/setup.c
index 23d7e45f9d14..5bc3a15465c7 100644
--- a/arch/sh/boards/mach-ecovec24/setup.c
+++ b/arch/sh/boards/mach-ecovec24/setup.c
@@ -15,6 +15,7 @@
15#include <linux/mmc/sh_mmcif.h> 15#include <linux/mmc/sh_mmcif.h>
16#include <linux/mmc/sh_mobile_sdhi.h> 16#include <linux/mmc/sh_mobile_sdhi.h>
17#include <linux/mtd/physmap.h> 17#include <linux/mtd/physmap.h>
18#include <linux/mfd/tmio.h>
18#include <linux/gpio.h> 19#include <linux/gpio.h>
19#include <linux/interrupt.h> 20#include <linux/interrupt.h>
20#include <linux/io.h> 21#include <linux/io.h>
diff --git a/arch/sh/include/asm/Kbuild b/arch/sh/include/asm/Kbuild
index 280bea9e5e2b..231efbb68108 100644
--- a/arch/sh/include/asm/Kbuild
+++ b/arch/sh/include/asm/Kbuild
@@ -34,3 +34,4 @@ generic-y += termios.h
34generic-y += trace_clock.h 34generic-y += trace_clock.h
35generic-y += ucontext.h 35generic-y += ucontext.h
36generic-y += xor.h 36generic-y += xor.h
37generic-y += preempt.h
diff --git a/arch/sh/include/asm/fpu.h b/arch/sh/include/asm/fpu.h
index 06c4281aab65..09fc2bc8a790 100644
--- a/arch/sh/include/asm/fpu.h
+++ b/arch/sh/include/asm/fpu.h
@@ -46,7 +46,7 @@ static inline void __unlazy_fpu(struct task_struct *tsk, struct pt_regs *regs)
46 save_fpu(tsk); 46 save_fpu(tsk);
47 release_fpu(regs); 47 release_fpu(regs);
48 } else 48 } else
49 tsk->fpu_counter = 0; 49 tsk->thread.fpu_counter = 0;
50} 50}
51 51
52static inline void unlazy_fpu(struct task_struct *tsk, struct pt_regs *regs) 52static inline void unlazy_fpu(struct task_struct *tsk, struct pt_regs *regs)
diff --git a/arch/sh/include/asm/mmu_context.h b/arch/sh/include/asm/mmu_context.h
index 21c5088788da..b9d9489a5012 100644
--- a/arch/sh/include/asm/mmu_context.h
+++ b/arch/sh/include/asm/mmu_context.h
@@ -81,7 +81,7 @@ static inline void get_mmu_context(struct mm_struct *mm, unsigned int cpu)
81 81
82 /* 82 /*
83 * Fix version; Note that we avoid version #0 83 * Fix version; Note that we avoid version #0
84 * to distingush NO_CONTEXT. 84 * to distinguish NO_CONTEXT.
85 */ 85 */
86 if (!asid) 86 if (!asid)
87 asid = MMU_CONTEXT_FIRST_VERSION; 87 asid = MMU_CONTEXT_FIRST_VERSION;
diff --git a/arch/sh/include/asm/pgalloc.h b/arch/sh/include/asm/pgalloc.h
index 8c00785c60d5..a33673b3687d 100644
--- a/arch/sh/include/asm/pgalloc.h
+++ b/arch/sh/include/asm/pgalloc.h
@@ -47,7 +47,10 @@ static inline pgtable_t pte_alloc_one(struct mm_struct *mm,
47 if (!pg) 47 if (!pg)
48 return NULL; 48 return NULL;
49 page = virt_to_page(pg); 49 page = virt_to_page(pg);
50 pgtable_page_ctor(page); 50 if (!pgtable_page_ctor(page)) {
51 quicklist_free(QUICK_PT, NULL, pg);
52 return NULL;
53 }
51 return page; 54 return page;
52} 55}
53 56
diff --git a/arch/sh/include/asm/processor_32.h b/arch/sh/include/asm/processor_32.h
index e699a12cdcca..18e0377f72bb 100644
--- a/arch/sh/include/asm/processor_32.h
+++ b/arch/sh/include/asm/processor_32.h
@@ -111,6 +111,16 @@ struct thread_struct {
111 111
112 /* Extended processor state */ 112 /* Extended processor state */
113 union thread_xstate *xstate; 113 union thread_xstate *xstate;
114
115 /*
116 * fpu_counter contains the number of consecutive context switches
117 * that the FPU is used. If this is over a threshold, the lazy fpu
118 * saving becomes unlazy to save the trap. This is an unsigned char
119 * so that after 256 times the counter wraps and the behavior turns
120 * lazy again; this to deal with bursty apps that only use FPU for
121 * a short time
122 */
123 unsigned char fpu_counter;
114}; 124};
115 125
116#define INIT_THREAD { \ 126#define INIT_THREAD { \
diff --git a/arch/sh/include/asm/processor_64.h b/arch/sh/include/asm/processor_64.h
index 1cc7d3197143..eedd4f625d07 100644
--- a/arch/sh/include/asm/processor_64.h
+++ b/arch/sh/include/asm/processor_64.h
@@ -126,6 +126,16 @@ struct thread_struct {
126 126
127 /* floating point info */ 127 /* floating point info */
128 union thread_xstate *xstate; 128 union thread_xstate *xstate;
129
130 /*
131 * fpu_counter contains the number of consecutive context switches
132 * that the FPU is used. If this is over a threshold, the lazy fpu
133 * saving becomes unlazy to save the trap. This is an unsigned char
134 * so that after 256 times the counter wraps and the behavior turns
135 * lazy again; this to deal with bursty apps that only use FPU for
136 * a short time
137 */
138 unsigned char fpu_counter;
129}; 139};
130 140
131#define INIT_MMAP \ 141#define INIT_MMAP \
diff --git a/arch/sh/include/asm/thread_info.h b/arch/sh/include/asm/thread_info.h
index 45a93669289d..ad27ffa65e2e 100644
--- a/arch/sh/include/asm/thread_info.h
+++ b/arch/sh/include/asm/thread_info.h
@@ -41,8 +41,6 @@ struct thread_info {
41 41
42#endif 42#endif
43 43
44#define PREEMPT_ACTIVE 0x10000000
45
46#if defined(CONFIG_4KSTACKS) 44#if defined(CONFIG_4KSTACKS)
47#define THREAD_SHIFT 12 45#define THREAD_SHIFT 12
48#else 46#else
diff --git a/arch/sh/kernel/cpu/fpu.c b/arch/sh/kernel/cpu/fpu.c
index f8f7af51c128..4e332244ea75 100644
--- a/arch/sh/kernel/cpu/fpu.c
+++ b/arch/sh/kernel/cpu/fpu.c
@@ -44,7 +44,7 @@ void __fpu_state_restore(void)
44 restore_fpu(tsk); 44 restore_fpu(tsk);
45 45
46 task_thread_info(tsk)->status |= TS_USEDFPU; 46 task_thread_info(tsk)->status |= TS_USEDFPU;
47 tsk->fpu_counter++; 47 tsk->thread.fpu_counter++;
48} 48}
49 49
50void fpu_state_restore(struct pt_regs *regs) 50void fpu_state_restore(struct pt_regs *regs)
diff --git a/arch/sh/kernel/entry-common.S b/arch/sh/kernel/entry-common.S
index 9b6e4beeb296..ca46834294b7 100644
--- a/arch/sh/kernel/entry-common.S
+++ b/arch/sh/kernel/entry-common.S
@@ -108,7 +108,7 @@ need_resched:
108 and #(0xf0>>1), r0 ! interrupts off (exception path)? 108 and #(0xf0>>1), r0 ! interrupts off (exception path)?
109 cmp/eq #(0xf0>>1), r0 109 cmp/eq #(0xf0>>1), r0
110 bt noresched 110 bt noresched
111 mov.l 3f, r0 111 mov.l 1f, r0
112 jsr @r0 ! call preempt_schedule_irq 112 jsr @r0 ! call preempt_schedule_irq
113 nop 113 nop
114 bra need_resched 114 bra need_resched
@@ -119,9 +119,7 @@ noresched:
119 nop 119 nop
120 120
121 .align 2 121 .align 2
1221: .long PREEMPT_ACTIVE 1221: .long preempt_schedule_irq
1232: .long schedule
1243: .long preempt_schedule_irq
125#endif 123#endif
126 124
127ENTRY(resume_userspace) 125ENTRY(resume_userspace)
diff --git a/arch/sh/kernel/irq.c b/arch/sh/kernel/irq.c
index 063af10ff3c1..0833736afa32 100644
--- a/arch/sh/kernel/irq.c
+++ b/arch/sh/kernel/irq.c
@@ -149,47 +149,32 @@ void irq_ctx_exit(int cpu)
149 hardirq_ctx[cpu] = NULL; 149 hardirq_ctx[cpu] = NULL;
150} 150}
151 151
152asmlinkage void do_softirq(void) 152void do_softirq_own_stack(void)
153{ 153{
154 unsigned long flags;
155 struct thread_info *curctx; 154 struct thread_info *curctx;
156 union irq_ctx *irqctx; 155 union irq_ctx *irqctx;
157 u32 *isp; 156 u32 *isp;
158 157
159 if (in_interrupt()) 158 curctx = current_thread_info();
160 return; 159 irqctx = softirq_ctx[smp_processor_id()];
161 160 irqctx->tinfo.task = curctx->task;
162 local_irq_save(flags); 161 irqctx->tinfo.previous_sp = current_stack_pointer;
163 162
164 if (local_softirq_pending()) { 163 /* build the stack frame on the softirq stack */
165 curctx = current_thread_info(); 164 isp = (u32 *)((char *)irqctx + sizeof(*irqctx));
166 irqctx = softirq_ctx[smp_processor_id()]; 165
167 irqctx->tinfo.task = curctx->task; 166 __asm__ __volatile__ (
168 irqctx->tinfo.previous_sp = current_stack_pointer; 167 "mov r15, r9 \n"
169 168 "jsr @%0 \n"
170 /* build the stack frame on the softirq stack */ 169 /* switch to the softirq stack */
171 isp = (u32 *)((char *)irqctx + sizeof(*irqctx)); 170 " mov %1, r15 \n"
172 171 /* restore the thread stack */
173 __asm__ __volatile__ ( 172 "mov r9, r15 \n"
174 "mov r15, r9 \n" 173 : /* no outputs */
175 "jsr @%0 \n" 174 : "r" (__do_softirq), "r" (isp)
176 /* switch to the softirq stack */ 175 : "memory", "r0", "r1", "r2", "r3", "r4",
177 " mov %1, r15 \n" 176 "r5", "r6", "r7", "r8", "r9", "r15", "t", "pr"
178 /* restore the thread stack */ 177 );
179 "mov r9, r15 \n"
180 : /* no outputs */
181 : "r" (__do_softirq), "r" (isp)
182 : "memory", "r0", "r1", "r2", "r3", "r4",
183 "r5", "r6", "r7", "r8", "r9", "r15", "t", "pr"
184 );
185
186 /*
187 * Shouldn't happen, we returned above if in_interrupt():
188 */
189 WARN_ON_ONCE(softirq_count());
190 }
191
192 local_irq_restore(flags);
193} 178}
194#else 179#else
195static inline void handle_one_irq(unsigned int irq) 180static inline void handle_one_irq(unsigned int irq)
diff --git a/arch/sh/kernel/process_32.c b/arch/sh/kernel/process_32.c
index ebd3933005b4..2885fc9d9dcd 100644
--- a/arch/sh/kernel/process_32.c
+++ b/arch/sh/kernel/process_32.c
@@ -156,7 +156,7 @@ int copy_thread(unsigned long clone_flags, unsigned long usp,
156#endif 156#endif
157 ti->addr_limit = KERNEL_DS; 157 ti->addr_limit = KERNEL_DS;
158 ti->status &= ~TS_USEDFPU; 158 ti->status &= ~TS_USEDFPU;
159 p->fpu_counter = 0; 159 p->thread.fpu_counter = 0;
160 return 0; 160 return 0;
161 } 161 }
162 *childregs = *current_pt_regs(); 162 *childregs = *current_pt_regs();
@@ -189,7 +189,7 @@ __switch_to(struct task_struct *prev, struct task_struct *next)
189 unlazy_fpu(prev, task_pt_regs(prev)); 189 unlazy_fpu(prev, task_pt_regs(prev));
190 190
191 /* we're going to use this soon, after a few expensive things */ 191 /* we're going to use this soon, after a few expensive things */
192 if (next->fpu_counter > 5) 192 if (next->thread.fpu_counter > 5)
193 prefetch(next_t->xstate); 193 prefetch(next_t->xstate);
194 194
195#ifdef CONFIG_MMU 195#ifdef CONFIG_MMU
@@ -207,7 +207,7 @@ __switch_to(struct task_struct *prev, struct task_struct *next)
207 * restore of the math state immediately to avoid the trap; the 207 * restore of the math state immediately to avoid the trap; the
208 * chances of needing FPU soon are obviously high now 208 * chances of needing FPU soon are obviously high now
209 */ 209 */
210 if (next->fpu_counter > 5) 210 if (next->thread.fpu_counter > 5)
211 __fpu_state_restore(); 211 __fpu_state_restore();
212 212
213 return prev; 213 return prev;
diff --git a/arch/sh/kernel/process_64.c b/arch/sh/kernel/process_64.c
index 174d124b419e..e2062e643341 100644
--- a/arch/sh/kernel/process_64.c
+++ b/arch/sh/kernel/process_64.c
@@ -374,7 +374,7 @@ asmlinkage void ret_from_kernel_thread(void);
374int copy_thread(unsigned long clone_flags, unsigned long usp, 374int copy_thread(unsigned long clone_flags, unsigned long usp,
375 unsigned long arg, struct task_struct *p) 375 unsigned long arg, struct task_struct *p)
376{ 376{
377 struct pt_regs *childregs, *regs = current_pt_regs(); 377 struct pt_regs *childregs;
378 378
379#ifdef CONFIG_SH_FPU 379#ifdef CONFIG_SH_FPU
380 /* can't happen for a kernel thread */ 380 /* can't happen for a kernel thread */
@@ -393,7 +393,7 @@ int copy_thread(unsigned long clone_flags, unsigned long usp,
393 if (unlikely(p->flags & PF_KTHREAD)) { 393 if (unlikely(p->flags & PF_KTHREAD)) {
394 memset(childregs, 0, sizeof(struct pt_regs)); 394 memset(childregs, 0, sizeof(struct pt_regs));
395 childregs->regs[2] = (unsigned long)arg; 395 childregs->regs[2] = (unsigned long)arg;
396 childregs->regs[3] = (unsigned long)fn; 396 childregs->regs[3] = (unsigned long)usp;
397 childregs->sr = (1 << 30); /* not user_mode */ 397 childregs->sr = (1 << 30); /* not user_mode */
398 childregs->sr |= SR_FD; /* Invalidate FPU flag */ 398 childregs->sr |= SR_FD; /* Invalidate FPU flag */
399 p->thread.pc = (unsigned long) ret_from_kernel_thread; 399 p->thread.pc = (unsigned long) ret_from_kernel_thread;
diff --git a/arch/sh/mm/init.c b/arch/sh/mm/init.c
index 33890fd267cb..2d089fe2cba9 100644
--- a/arch/sh/mm/init.c
+++ b/arch/sh/mm/init.c
@@ -231,7 +231,7 @@ static void __init bootmem_init_one_node(unsigned int nid)
231 if (!p->node_spanned_pages) 231 if (!p->node_spanned_pages)
232 return; 232 return;
233 233
234 end_pfn = p->node_start_pfn + p->node_spanned_pages; 234 end_pfn = pgdat_end_pfn(p);
235 235
236 total_pages = bootmem_bootmap_pages(p->node_spanned_pages); 236 total_pages = bootmem_bootmap_pages(p->node_spanned_pages);
237 237