aboutsummaryrefslogtreecommitdiffstats
path: root/arch/sh/kernel
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2009-12-09 22:03:16 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2009-12-09 22:03:16 -0500
commit3a43aaa31790c36b69ebf8a6396f37fade86b531 (patch)
tree7c7f8da6219d546f2b44534cb7be1fb5591d6ac4 /arch/sh/kernel
parentaed886ce777590eac87f7ce2897d9f8357754331 (diff)
parent6a5a0b9139b19dd1a107870269a35bc9cf18d2dc (diff)
Merge git://git.kernel.org/pub/scm/linux/kernel/git/lethal/sh-2.6
* git://git.kernel.org/pub/scm/linux/kernel/git/lethal/sh-2.6: (137 commits) sh: include empty zero page in romImage sh: Make associative cache writes fatal on all SH-4A parts. sh: Drop associative writes for SH-4 cache flushes. sh: Partial revert of copy/clear_user_highpage() optimizations. sh: Add default uImage rule for se7724, ap325rxa, and migor. sh: allow runtime pm without suspend/resume callbacks sh: mach-ecovec24: Remove un-defined settings for VPU sh: mach-ecovec24: LCDC drive ability become high sh: fix sh7724 VEU3F resource size serial: sh-sci: Fix too early port disabling. sh: pfc: pr_info() -> pr_debug() cleanups. sh: pfc: Convert from ctrl_xxx() to __raw_xxx() I/O routines. sh: Improve kfr2r09 serial port setup code sh: Break out SuperH PFC code sh: Move KEYSC header file sh: convert /proc/cpu/aligmnent, /proc/cpu/kernel_alignment to seq_file sh: Add CPG save/restore code for sh7724 R-standby sh: Add SDHI power control support to Ecovec mfd: Add power control platform data to SDHI driver sh: mach-ecovec24: modify address map ...
Diffstat (limited to 'arch/sh/kernel')
-rw-r--r--arch/sh/kernel/Makefile10
-rw-r--r--arch/sh/kernel/asm-offsets.c23
-rw-r--r--arch/sh/kernel/cpu/Makefile1
-rw-r--r--arch/sh/kernel/cpu/init.c28
-rw-r--r--arch/sh/kernel/cpu/sh2a/fpu.c27
-rw-r--r--arch/sh/kernel/cpu/sh3/entry.S33
-rw-r--r--arch/sh/kernel/cpu/sh4/Makefile8
-rw-r--r--arch/sh/kernel/cpu/sh4/fpu.c28
-rw-r--r--arch/sh/kernel/cpu/sh4/perf_event.c253
-rw-r--r--arch/sh/kernel/cpu/sh4a/Makefile1
-rw-r--r--arch/sh/kernel/cpu/sh4a/clock-sh7724.c2
-rw-r--r--arch/sh/kernel/cpu/sh4a/perf_event.c269
-rw-r--r--arch/sh/kernel/cpu/sh4a/setup-sh7724.c264
-rw-r--r--arch/sh/kernel/cpu/sh4a/setup-shx3.c45
-rw-r--r--arch/sh/kernel/cpu/sh4a/smp-shx3.c37
-rw-r--r--arch/sh/kernel/cpu/sh5/entry.S2
-rw-r--r--arch/sh/kernel/cpu/shmobile/cpuidle.c42
-rw-r--r--arch/sh/kernel/cpu/shmobile/pm.c117
-rw-r--r--arch/sh/kernel/cpu/shmobile/pm_runtime.c17
-rw-r--r--arch/sh/kernel/cpu/shmobile/sleep.S344
-rw-r--r--arch/sh/kernel/cpu/ubc.S59
-rw-r--r--arch/sh/kernel/dma-nommu.c82
-rw-r--r--arch/sh/kernel/dwarf.c222
-rw-r--r--arch/sh/kernel/entry-common.S2
-rw-r--r--arch/sh/kernel/ftrace.c146
-rw-r--r--arch/sh/kernel/gpio.c584
-rw-r--r--arch/sh/kernel/head_32.S2
-rw-r--r--arch/sh/kernel/idle.c78
-rw-r--r--arch/sh/kernel/io_generic.c4
-rw-r--r--arch/sh/kernel/irq.c14
-rw-r--r--arch/sh/kernel/irq_32.c57
-rw-r--r--arch/sh/kernel/irq_64.c51
-rw-r--r--arch/sh/kernel/machine_kexec.c6
-rw-r--r--arch/sh/kernel/machvec.c4
-rw-r--r--arch/sh/kernel/module.c9
-rw-r--r--arch/sh/kernel/perf_callchain.c98
-rw-r--r--arch/sh/kernel/perf_event.c312
-rw-r--r--arch/sh/kernel/process_32.c42
-rw-r--r--arch/sh/kernel/process_64.c2
-rw-r--r--arch/sh/kernel/return_address.c54
-rw-r--r--arch/sh/kernel/setup.c4
-rw-r--r--arch/sh/kernel/sh_ksyms_32.c67
-rw-r--r--arch/sh/kernel/sh_ksyms_64.c10
-rw-r--r--arch/sh/kernel/signal_32.c24
-rw-r--r--arch/sh/kernel/signal_64.c13
-rw-r--r--arch/sh/kernel/smp.c4
-rw-r--r--arch/sh/kernel/topology.c26
-rw-r--r--arch/sh/kernel/traps.c8
-rw-r--r--arch/sh/kernel/traps_32.c82
49 files changed, 2429 insertions, 1188 deletions
diff --git a/arch/sh/kernel/Makefile b/arch/sh/kernel/Makefile
index a2d0a40f3848..0471a3eb25ed 100644
--- a/arch/sh/kernel/Makefile
+++ b/arch/sh/kernel/Makefile
@@ -9,8 +9,12 @@ ifdef CONFIG_FUNCTION_TRACER
9CFLAGS_REMOVE_ftrace.o = -pg 9CFLAGS_REMOVE_ftrace.o = -pg
10endif 10endif
11 11
12obj-y := debugtraps.o dumpstack.o idle.o io.o io_generic.o irq.o \ 12CFLAGS_REMOVE_return_address.o = -pg
13 machvec.o nmi_debug.o process_$(BITS).o ptrace_$(BITS).o \ 13
14obj-y := debugtraps.o dma-nommu.o dumpstack.o \
15 idle.o io.o io_generic.o irq.o \
16 irq_$(BITS).o machvec.o nmi_debug.o process_$(BITS).o \
17 ptrace_$(BITS).o return_address.o \
14 setup.o signal_$(BITS).o sys_sh.o sys_sh$(BITS).o \ 18 setup.o signal_$(BITS).o sys_sh.o sys_sh$(BITS).o \
15 syscalls_$(BITS).o time.o topology.o traps.o \ 19 syscalls_$(BITS).o time.o topology.o traps.o \
16 traps_$(BITS).o unwinder.o 20 traps_$(BITS).o unwinder.o
@@ -28,13 +32,13 @@ obj-$(CONFIG_CRASH_DUMP) += crash_dump.o
28obj-$(CONFIG_STACKTRACE) += stacktrace.o 32obj-$(CONFIG_STACKTRACE) += stacktrace.o
29obj-$(CONFIG_IO_TRAPPED) += io_trapped.o 33obj-$(CONFIG_IO_TRAPPED) += io_trapped.o
30obj-$(CONFIG_KPROBES) += kprobes.o 34obj-$(CONFIG_KPROBES) += kprobes.o
31obj-$(CONFIG_GENERIC_GPIO) += gpio.o
32obj-$(CONFIG_DYNAMIC_FTRACE) += ftrace.o 35obj-$(CONFIG_DYNAMIC_FTRACE) += ftrace.o
33obj-$(CONFIG_FTRACE_SYSCALLS) += ftrace.o 36obj-$(CONFIG_FTRACE_SYSCALLS) += ftrace.o
34obj-$(CONFIG_FUNCTION_GRAPH_TRACER) += ftrace.o 37obj-$(CONFIG_FUNCTION_GRAPH_TRACER) += ftrace.o
35obj-$(CONFIG_DUMP_CODE) += disassemble.o 38obj-$(CONFIG_DUMP_CODE) += disassemble.o
36obj-$(CONFIG_HIBERNATION) += swsusp.o 39obj-$(CONFIG_HIBERNATION) += swsusp.o
37obj-$(CONFIG_DWARF_UNWINDER) += dwarf.o 40obj-$(CONFIG_DWARF_UNWINDER) += dwarf.o
41obj-$(CONFIG_PERF_EVENTS) += perf_event.o perf_callchain.o
38 42
39obj-$(CONFIG_GENERIC_CLOCKEVENTS_BROADCAST) += localtimer.o 43obj-$(CONFIG_GENERIC_CLOCKEVENTS_BROADCAST) += localtimer.o
40 44
diff --git a/arch/sh/kernel/asm-offsets.c b/arch/sh/kernel/asm-offsets.c
index d218e808294e..08a2be775b6c 100644
--- a/arch/sh/kernel/asm-offsets.c
+++ b/arch/sh/kernel/asm-offsets.c
@@ -34,5 +34,28 @@ int main(void)
34 DEFINE(PBE_NEXT, offsetof(struct pbe, next)); 34 DEFINE(PBE_NEXT, offsetof(struct pbe, next));
35 DEFINE(SWSUSP_ARCH_REGS_SIZE, sizeof(struct swsusp_arch_regs)); 35 DEFINE(SWSUSP_ARCH_REGS_SIZE, sizeof(struct swsusp_arch_regs));
36#endif 36#endif
37
38 DEFINE(SH_SLEEP_MODE, offsetof(struct sh_sleep_data, mode));
39 DEFINE(SH_SLEEP_SF_PRE, offsetof(struct sh_sleep_data, sf_pre));
40 DEFINE(SH_SLEEP_SF_POST, offsetof(struct sh_sleep_data, sf_post));
41 DEFINE(SH_SLEEP_RESUME, offsetof(struct sh_sleep_data, resume));
42 DEFINE(SH_SLEEP_VBR, offsetof(struct sh_sleep_data, vbr));
43 DEFINE(SH_SLEEP_SPC, offsetof(struct sh_sleep_data, spc));
44 DEFINE(SH_SLEEP_SR, offsetof(struct sh_sleep_data, sr));
45 DEFINE(SH_SLEEP_SP, offsetof(struct sh_sleep_data, sp));
46 DEFINE(SH_SLEEP_BASE_ADDR, offsetof(struct sh_sleep_data, addr));
47 DEFINE(SH_SLEEP_BASE_DATA, offsetof(struct sh_sleep_data, data));
48 DEFINE(SH_SLEEP_REG_STBCR, offsetof(struct sh_sleep_regs, stbcr));
49 DEFINE(SH_SLEEP_REG_BAR, offsetof(struct sh_sleep_regs, bar));
50 DEFINE(SH_SLEEP_REG_PTEH, offsetof(struct sh_sleep_regs, pteh));
51 DEFINE(SH_SLEEP_REG_PTEL, offsetof(struct sh_sleep_regs, ptel));
52 DEFINE(SH_SLEEP_REG_TTB, offsetof(struct sh_sleep_regs, ttb));
53 DEFINE(SH_SLEEP_REG_TEA, offsetof(struct sh_sleep_regs, tea));
54 DEFINE(SH_SLEEP_REG_MMUCR, offsetof(struct sh_sleep_regs, mmucr));
55 DEFINE(SH_SLEEP_REG_PTEA, offsetof(struct sh_sleep_regs, ptea));
56 DEFINE(SH_SLEEP_REG_PASCR, offsetof(struct sh_sleep_regs, pascr));
57 DEFINE(SH_SLEEP_REG_IRMCR, offsetof(struct sh_sleep_regs, irmcr));
58 DEFINE(SH_SLEEP_REG_CCR, offsetof(struct sh_sleep_regs, ccr));
59 DEFINE(SH_SLEEP_REG_RAMCR, offsetof(struct sh_sleep_regs, ramcr));
37 return 0; 60 return 0;
38} 61}
diff --git a/arch/sh/kernel/cpu/Makefile b/arch/sh/kernel/cpu/Makefile
index 3d6b9312dc47..d97c803719ec 100644
--- a/arch/sh/kernel/cpu/Makefile
+++ b/arch/sh/kernel/cpu/Makefile
@@ -15,7 +15,6 @@ obj-$(CONFIG_ARCH_SHMOBILE) += shmobile/
15 15
16# Common interfaces. 16# Common interfaces.
17 17
18obj-$(CONFIG_UBC_WAKEUP) += ubc.o
19obj-$(CONFIG_SH_ADC) += adc.o 18obj-$(CONFIG_SH_ADC) += adc.o
20obj-$(CONFIG_SH_CLK_CPG) += clock-cpg.o 19obj-$(CONFIG_SH_CLK_CPG) += clock-cpg.o
21 20
diff --git a/arch/sh/kernel/cpu/init.c b/arch/sh/kernel/cpu/init.c
index e932ebef4738..89b4b76c0d76 100644
--- a/arch/sh/kernel/cpu/init.c
+++ b/arch/sh/kernel/cpu/init.c
@@ -75,16 +75,11 @@ static void __init expmask_init(void)
75 /* 75 /*
76 * Future proofing. 76 * Future proofing.
77 * 77 *
78 * Disable support for slottable sleep instruction 78 * Disable support for slottable sleep instruction, non-nop
79 * and non-nop instructions in the rte delay slot. 79 * instructions in the rte delay slot, and associative writes to
80 * the memory-mapped cache array.
80 */ 81 */
81 expmask &= ~(EXPMASK_RTEDS | EXPMASK_BRDSSLP); 82 expmask &= ~(EXPMASK_RTEDS | EXPMASK_BRDSSLP | EXPMASK_MMCAW);
82
83 /*
84 * Enable associative writes to the memory-mapped cache array
85 * until the cache flush ops have been rewritten.
86 */
87 expmask |= EXPMASK_MMCAW;
88 83
89 __raw_writel(expmask, EXPMASK); 84 __raw_writel(expmask, EXPMASK);
90 ctrl_barrier(); 85 ctrl_barrier();
@@ -311,12 +306,12 @@ asmlinkage void __init sh_cpu_init(void)
311 if (fpu_disabled) { 306 if (fpu_disabled) {
312 printk("FPU Disabled\n"); 307 printk("FPU Disabled\n");
313 current_cpu_data.flags &= ~CPU_HAS_FPU; 308 current_cpu_data.flags &= ~CPU_HAS_FPU;
314 disable_fpu();
315 } 309 }
316 310
317 /* FPU initialization */ 311 /* FPU initialization */
312 disable_fpu();
318 if ((current_cpu_data.flags & CPU_HAS_FPU)) { 313 if ((current_cpu_data.flags & CPU_HAS_FPU)) {
319 clear_thread_flag(TIF_USEDFPU); 314 current_thread_info()->status &= ~TS_USEDFPU;
320 clear_used_math(); 315 clear_used_math();
321 } 316 }
322 317
@@ -338,17 +333,6 @@ asmlinkage void __init sh_cpu_init(void)
338 } 333 }
339#endif 334#endif
340 335
341 /*
342 * Some brain-damaged loaders decided it would be a good idea to put
343 * the UBC to sleep. This causes some issues when it comes to things
344 * like PTRACE_SINGLESTEP or doing hardware watchpoints in GDB. So ..
345 * we wake it up and hope that all is well.
346 */
347#ifdef CONFIG_SUPERH32
348 if (raw_smp_processor_id() == 0)
349 ubc_wakeup();
350#endif
351
352 speculative_execution_init(); 336 speculative_execution_init();
353 expmask_init(); 337 expmask_init();
354} 338}
diff --git a/arch/sh/kernel/cpu/sh2a/fpu.c b/arch/sh/kernel/cpu/sh2a/fpu.c
index 6df2fb98eb30..d395ce5740e7 100644
--- a/arch/sh/kernel/cpu/sh2a/fpu.c
+++ b/arch/sh/kernel/cpu/sh2a/fpu.c
@@ -25,14 +25,12 @@
25 25
26/* 26/*
27 * Save FPU registers onto task structure. 27 * Save FPU registers onto task structure.
28 * Assume called with FPU enabled (SR.FD=0).
29 */ 28 */
30void 29void
31save_fpu(struct task_struct *tsk, struct pt_regs *regs) 30save_fpu(struct task_struct *tsk)
32{ 31{
33 unsigned long dummy; 32 unsigned long dummy;
34 33
35 clear_tsk_thread_flag(tsk, TIF_USEDFPU);
36 enable_fpu(); 34 enable_fpu();
37 asm volatile("sts.l fpul, @-%0\n\t" 35 asm volatile("sts.l fpul, @-%0\n\t"
38 "sts.l fpscr, @-%0\n\t" 36 "sts.l fpscr, @-%0\n\t"
@@ -60,7 +58,6 @@ save_fpu(struct task_struct *tsk, struct pt_regs *regs)
60 : "memory"); 58 : "memory");
61 59
62 disable_fpu(); 60 disable_fpu();
63 release_fpu(regs);
64} 61}
65 62
66static void 63static void
@@ -598,31 +595,31 @@ BUILD_TRAP_HANDLER(fpu_error)
598 struct task_struct *tsk = current; 595 struct task_struct *tsk = current;
599 TRAP_HANDLER_DECL; 596 TRAP_HANDLER_DECL;
600 597
601 save_fpu(tsk, regs); 598 __unlazy_fpu(tsk, regs);
602 if (ieee_fpe_handler(regs)) { 599 if (ieee_fpe_handler(regs)) {
603 tsk->thread.fpu.hard.fpscr &= 600 tsk->thread.fpu.hard.fpscr &=
604 ~(FPSCR_CAUSE_MASK | FPSCR_FLAG_MASK); 601 ~(FPSCR_CAUSE_MASK | FPSCR_FLAG_MASK);
605 grab_fpu(regs); 602 grab_fpu(regs);
606 restore_fpu(tsk); 603 restore_fpu(tsk);
607 set_tsk_thread_flag(tsk, TIF_USEDFPU); 604 task_thread_info(tsk)->status |= TS_USEDFPU;
608 return; 605 return;
609 } 606 }
610 607
611 force_sig(SIGFPE, tsk); 608 force_sig(SIGFPE, tsk);
612} 609}
613 610
614BUILD_TRAP_HANDLER(fpu_state_restore) 611void fpu_state_restore(struct pt_regs *regs)
615{ 612{
616 struct task_struct *tsk = current; 613 struct task_struct *tsk = current;
617 TRAP_HANDLER_DECL;
618 614
619 grab_fpu(regs); 615 grab_fpu(regs);
620 if (!user_mode(regs)) { 616 if (unlikely(!user_mode(regs))) {
621 printk(KERN_ERR "BUG: FPU is used in kernel mode.\n"); 617 printk(KERN_ERR "BUG: FPU is used in kernel mode.\n");
618 BUG();
622 return; 619 return;
623 } 620 }
624 621
625 if (used_math()) { 622 if (likely(used_math())) {
626 /* Using the FPU again. */ 623 /* Using the FPU again. */
627 restore_fpu(tsk); 624 restore_fpu(tsk);
628 } else { 625 } else {
@@ -630,5 +627,13 @@ BUILD_TRAP_HANDLER(fpu_state_restore)
630 fpu_init(); 627 fpu_init();
631 set_used_math(); 628 set_used_math();
632 } 629 }
633 set_tsk_thread_flag(tsk, TIF_USEDFPU); 630 task_thread_info(tsk)->status |= TS_USEDFPU;
631 tsk->fpu_counter++;
632}
633
634BUILD_TRAP_HANDLER(fpu_state_restore)
635{
636 TRAP_HANDLER_DECL;
637
638 fpu_state_restore(regs);
634} 639}
diff --git a/arch/sh/kernel/cpu/sh3/entry.S b/arch/sh/kernel/cpu/sh3/entry.S
index bb407ef0b91e..3f7e2a22c7c2 100644
--- a/arch/sh/kernel/cpu/sh3/entry.S
+++ b/arch/sh/kernel/cpu/sh3/entry.S
@@ -297,41 +297,8 @@ ENTRY(vbr_base)
297! 297!
298 .balign 256,0,256 298 .balign 256,0,256
299general_exception: 299general_exception:
300#ifndef CONFIG_CPU_SUBTYPE_SHX3
301 bra handle_exception 300 bra handle_exception
302 sts pr, k3 ! save original pr value in k3 301 sts pr, k3 ! save original pr value in k3
303#else
304 mov.l 1f, k4
305 mov.l @k4, k4
306
307 ! Is EXPEVT larger than 0x800?
308 mov #0x8, k0
309 shll8 k0
310 cmp/hs k0, k4
311 bf 0f
312
313 ! then add 0x580 (k2 is 0xd80 or 0xda0)
314 mov #0x58, k0
315 shll2 k0
316 shll2 k0
317 add k0, k4
3180:
319 ! Setup stack and save DSP context (k0 contains original r15 on return)
320 bsr prepare_stack
321 nop
322
323 ! Save registers / Switch to bank 0
324 mov k4, k2 ! keep vector in k2
325 mov.l 1f, k4 ! SR bits to clear in k4
326 bsr save_regs ! needs original pr value in k3
327 nop
328
329 bra handle_exception_special
330 nop
331
332 .align 2
3331: .long EXPEVT
334#endif
335 302
336! prepare_stack() 303! prepare_stack()
337! - roll back gRB 304! - roll back gRB
diff --git a/arch/sh/kernel/cpu/sh4/Makefile b/arch/sh/kernel/cpu/sh4/Makefile
index 203b18347b83..3a1dbc709831 100644
--- a/arch/sh/kernel/cpu/sh4/Makefile
+++ b/arch/sh/kernel/cpu/sh4/Makefile
@@ -9,6 +9,11 @@ obj-$(CONFIG_HIBERNATION) += $(addprefix ../sh3/, swsusp.o)
9obj-$(CONFIG_SH_FPU) += fpu.o softfloat.o 9obj-$(CONFIG_SH_FPU) += fpu.o softfloat.o
10obj-$(CONFIG_SH_STORE_QUEUES) += sq.o 10obj-$(CONFIG_SH_STORE_QUEUES) += sq.o
11 11
12# Perf events
13perf-$(CONFIG_CPU_SUBTYPE_SH7750) := perf_event.o
14perf-$(CONFIG_CPU_SUBTYPE_SH7750S) := perf_event.o
15perf-$(CONFIG_CPU_SUBTYPE_SH7091) := perf_event.o
16
12# CPU subtype setup 17# CPU subtype setup
13obj-$(CONFIG_CPU_SUBTYPE_SH7750) += setup-sh7750.o 18obj-$(CONFIG_CPU_SUBTYPE_SH7750) += setup-sh7750.o
14obj-$(CONFIG_CPU_SUBTYPE_SH7750R) += setup-sh7750.o 19obj-$(CONFIG_CPU_SUBTYPE_SH7750R) += setup-sh7750.o
@@ -27,4 +32,5 @@ endif
27# Additional clocks by subtype 32# Additional clocks by subtype
28clock-$(CONFIG_CPU_SUBTYPE_SH4_202) += clock-sh4-202.o 33clock-$(CONFIG_CPU_SUBTYPE_SH4_202) += clock-sh4-202.o
29 34
30obj-y += $(clock-y) 35obj-y += $(clock-y)
36obj-$(CONFIG_PERF_EVENTS) += $(perf-y)
diff --git a/arch/sh/kernel/cpu/sh4/fpu.c b/arch/sh/kernel/cpu/sh4/fpu.c
index e3ea5411da6d..e97857aec8a0 100644
--- a/arch/sh/kernel/cpu/sh4/fpu.c
+++ b/arch/sh/kernel/cpu/sh4/fpu.c
@@ -41,13 +41,11 @@ static unsigned int fpu_exception_flags;
41 41
42/* 42/*
43 * Save FPU registers onto task structure. 43 * Save FPU registers onto task structure.
44 * Assume called with FPU enabled (SR.FD=0).
45 */ 44 */
46void save_fpu(struct task_struct *tsk, struct pt_regs *regs) 45void save_fpu(struct task_struct *tsk)
47{ 46{
48 unsigned long dummy; 47 unsigned long dummy;
49 48
50 clear_tsk_thread_flag(tsk, TIF_USEDFPU);
51 enable_fpu(); 49 enable_fpu();
52 asm volatile ("sts.l fpul, @-%0\n\t" 50 asm volatile ("sts.l fpul, @-%0\n\t"
53 "sts.l fpscr, @-%0\n\t" 51 "sts.l fpscr, @-%0\n\t"
@@ -92,7 +90,6 @@ void save_fpu(struct task_struct *tsk, struct pt_regs *regs)
92 :"memory"); 90 :"memory");
93 91
94 disable_fpu(); 92 disable_fpu();
95 release_fpu(regs);
96} 93}
97 94
98static void restore_fpu(struct task_struct *tsk) 95static void restore_fpu(struct task_struct *tsk)
@@ -285,7 +282,6 @@ static int ieee_fpe_handler(struct pt_regs *regs)
285 /* fcnvsd */ 282 /* fcnvsd */
286 struct task_struct *tsk = current; 283 struct task_struct *tsk = current;
287 284
288 save_fpu(tsk, regs);
289 if ((tsk->thread.fpu.hard.fpscr & FPSCR_CAUSE_ERROR)) 285 if ((tsk->thread.fpu.hard.fpscr & FPSCR_CAUSE_ERROR))
290 /* FPU error */ 286 /* FPU error */
291 denormal_to_double(&tsk->thread.fpu.hard, 287 denormal_to_double(&tsk->thread.fpu.hard,
@@ -462,7 +458,7 @@ BUILD_TRAP_HANDLER(fpu_error)
462 struct task_struct *tsk = current; 458 struct task_struct *tsk = current;
463 TRAP_HANDLER_DECL; 459 TRAP_HANDLER_DECL;
464 460
465 save_fpu(tsk, regs); 461 __unlazy_fpu(tsk, regs);
466 fpu_exception_flags = 0; 462 fpu_exception_flags = 0;
467 if (ieee_fpe_handler(regs)) { 463 if (ieee_fpe_handler(regs)) {
468 tsk->thread.fpu.hard.fpscr &= 464 tsk->thread.fpu.hard.fpscr &=
@@ -473,7 +469,7 @@ BUILD_TRAP_HANDLER(fpu_error)
473 tsk->thread.fpu.hard.fpscr |= (fpu_exception_flags >> 10); 469 tsk->thread.fpu.hard.fpscr |= (fpu_exception_flags >> 10);
474 grab_fpu(regs); 470 grab_fpu(regs);
475 restore_fpu(tsk); 471 restore_fpu(tsk);
476 set_tsk_thread_flag(tsk, TIF_USEDFPU); 472 task_thread_info(tsk)->status |= TS_USEDFPU;
477 if ((((tsk->thread.fpu.hard.fpscr & FPSCR_ENABLE_MASK) >> 7) & 473 if ((((tsk->thread.fpu.hard.fpscr & FPSCR_ENABLE_MASK) >> 7) &
478 (fpu_exception_flags >> 2)) == 0) { 474 (fpu_exception_flags >> 2)) == 0) {
479 return; 475 return;
@@ -483,18 +479,18 @@ BUILD_TRAP_HANDLER(fpu_error)
483 force_sig(SIGFPE, tsk); 479 force_sig(SIGFPE, tsk);
484} 480}
485 481
486BUILD_TRAP_HANDLER(fpu_state_restore) 482void fpu_state_restore(struct pt_regs *regs)
487{ 483{
488 struct task_struct *tsk = current; 484 struct task_struct *tsk = current;
489 TRAP_HANDLER_DECL;
490 485
491 grab_fpu(regs); 486 grab_fpu(regs);
492 if (!user_mode(regs)) { 487 if (unlikely(!user_mode(regs))) {
493 printk(KERN_ERR "BUG: FPU is used in kernel mode.\n"); 488 printk(KERN_ERR "BUG: FPU is used in kernel mode.\n");
489 BUG();
494 return; 490 return;
495 } 491 }
496 492
497 if (used_math()) { 493 if (likely(used_math())) {
498 /* Using the FPU again. */ 494 /* Using the FPU again. */
499 restore_fpu(tsk); 495 restore_fpu(tsk);
500 } else { 496 } else {
@@ -502,5 +498,13 @@ BUILD_TRAP_HANDLER(fpu_state_restore)
502 fpu_init(); 498 fpu_init();
503 set_used_math(); 499 set_used_math();
504 } 500 }
505 set_tsk_thread_flag(tsk, TIF_USEDFPU); 501 task_thread_info(tsk)->status |= TS_USEDFPU;
502 tsk->fpu_counter++;
503}
504
505BUILD_TRAP_HANDLER(fpu_state_restore)
506{
507 TRAP_HANDLER_DECL;
508
509 fpu_state_restore(regs);
506} 510}
diff --git a/arch/sh/kernel/cpu/sh4/perf_event.c b/arch/sh/kernel/cpu/sh4/perf_event.c
new file mode 100644
index 000000000000..7f9ecc9c2d02
--- /dev/null
+++ b/arch/sh/kernel/cpu/sh4/perf_event.c
@@ -0,0 +1,253 @@
1/*
2 * Performance events support for SH7750-style performance counters
3 *
4 * Copyright (C) 2009 Paul Mundt
5 *
6 * This file is subject to the terms and conditions of the GNU General Public
7 * License. See the file "COPYING" in the main directory of this archive
8 * for more details.
9 */
10#include <linux/kernel.h>
11#include <linux/init.h>
12#include <linux/io.h>
13#include <linux/irq.h>
14#include <linux/perf_event.h>
15#include <asm/processor.h>
16
17#define PM_CR_BASE 0xff000084 /* 16-bit */
18#define PM_CTR_BASE 0xff100004 /* 32-bit */
19
20#define PMCR(n) (PM_CR_BASE + ((n) * 0x04))
21#define PMCTRH(n) (PM_CTR_BASE + 0x00 + ((n) * 0x08))
22#define PMCTRL(n) (PM_CTR_BASE + 0x04 + ((n) * 0x08))
23
24#define PMCR_PMM_MASK 0x0000003f
25
26#define PMCR_CLKF 0x00000100
27#define PMCR_PMCLR 0x00002000
28#define PMCR_PMST 0x00004000
29#define PMCR_PMEN 0x00008000
30
31static struct sh_pmu sh7750_pmu;
32
33/*
34 * There are a number of events supported by each counter (33 in total).
35 * Since we have 2 counters, each counter will take the event code as it
36 * corresponds to the PMCR PMM setting. Each counter can be configured
37 * independently.
38 *
39 * Event Code Description
40 * ---------- -----------
41 *
42 * 0x01 Operand read access
43 * 0x02 Operand write access
44 * 0x03 UTLB miss
45 * 0x04 Operand cache read miss
46 * 0x05 Operand cache write miss
47 * 0x06 Instruction fetch (w/ cache)
48 * 0x07 Instruction TLB miss
49 * 0x08 Instruction cache miss
50 * 0x09 All operand accesses
51 * 0x0a All instruction accesses
52 * 0x0b OC RAM operand access
53 * 0x0d On-chip I/O space access
54 * 0x0e Operand access (r/w)
55 * 0x0f Operand cache miss (r/w)
56 * 0x10 Branch instruction
57 * 0x11 Branch taken
58 * 0x12 BSR/BSRF/JSR
59 * 0x13 Instruction execution
60 * 0x14 Instruction execution in parallel
61 * 0x15 FPU Instruction execution
62 * 0x16 Interrupt
63 * 0x17 NMI
64 * 0x18 trapa instruction execution
65 * 0x19 UBCA match
66 * 0x1a UBCB match
67 * 0x21 Instruction cache fill
68 * 0x22 Operand cache fill
69 * 0x23 Elapsed time
70 * 0x24 Pipeline freeze by I-cache miss
71 * 0x25 Pipeline freeze by D-cache miss
72 * 0x27 Pipeline freeze by branch instruction
73 * 0x28 Pipeline freeze by CPU register
74 * 0x29 Pipeline freeze by FPU
75 */
76
77static const int sh7750_general_events[] = {
78 [PERF_COUNT_HW_CPU_CYCLES] = 0x0023,
79 [PERF_COUNT_HW_INSTRUCTIONS] = 0x000a,
80 [PERF_COUNT_HW_CACHE_REFERENCES] = 0x0006, /* I-cache */
81 [PERF_COUNT_HW_CACHE_MISSES] = 0x0008, /* I-cache */
82 [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = 0x0010,
83 [PERF_COUNT_HW_BRANCH_MISSES] = -1,
84 [PERF_COUNT_HW_BUS_CYCLES] = -1,
85};
86
87#define C(x) PERF_COUNT_HW_CACHE_##x
88
89static const int sh7750_cache_events
90 [PERF_COUNT_HW_CACHE_MAX]
91 [PERF_COUNT_HW_CACHE_OP_MAX]
92 [PERF_COUNT_HW_CACHE_RESULT_MAX] =
93{
94 [ C(L1D) ] = {
95 [ C(OP_READ) ] = {
96 [ C(RESULT_ACCESS) ] = 0x0001,
97 [ C(RESULT_MISS) ] = 0x0004,
98 },
99 [ C(OP_WRITE) ] = {
100 [ C(RESULT_ACCESS) ] = 0x0002,
101 [ C(RESULT_MISS) ] = 0x0005,
102 },
103 [ C(OP_PREFETCH) ] = {
104 [ C(RESULT_ACCESS) ] = 0,
105 [ C(RESULT_MISS) ] = 0,
106 },
107 },
108
109 [ C(L1I) ] = {
110 [ C(OP_READ) ] = {
111 [ C(RESULT_ACCESS) ] = 0x0006,
112 [ C(RESULT_MISS) ] = 0x0008,
113 },
114 [ C(OP_WRITE) ] = {
115 [ C(RESULT_ACCESS) ] = -1,
116 [ C(RESULT_MISS) ] = -1,
117 },
118 [ C(OP_PREFETCH) ] = {
119 [ C(RESULT_ACCESS) ] = 0,
120 [ C(RESULT_MISS) ] = 0,
121 },
122 },
123
124 [ C(LL) ] = {
125 [ C(OP_READ) ] = {
126 [ C(RESULT_ACCESS) ] = 0,
127 [ C(RESULT_MISS) ] = 0,
128 },
129 [ C(OP_WRITE) ] = {
130 [ C(RESULT_ACCESS) ] = 0,
131 [ C(RESULT_MISS) ] = 0,
132 },
133 [ C(OP_PREFETCH) ] = {
134 [ C(RESULT_ACCESS) ] = 0,
135 [ C(RESULT_MISS) ] = 0,
136 },
137 },
138
139 [ C(DTLB) ] = {
140 [ C(OP_READ) ] = {
141 [ C(RESULT_ACCESS) ] = 0,
142 [ C(RESULT_MISS) ] = 0x0003,
143 },
144 [ C(OP_WRITE) ] = {
145 [ C(RESULT_ACCESS) ] = 0,
146 [ C(RESULT_MISS) ] = 0,
147 },
148 [ C(OP_PREFETCH) ] = {
149 [ C(RESULT_ACCESS) ] = 0,
150 [ C(RESULT_MISS) ] = 0,
151 },
152 },
153
154 [ C(ITLB) ] = {
155 [ C(OP_READ) ] = {
156 [ C(RESULT_ACCESS) ] = 0,
157 [ C(RESULT_MISS) ] = 0x0007,
158 },
159 [ C(OP_WRITE) ] = {
160 [ C(RESULT_ACCESS) ] = -1,
161 [ C(RESULT_MISS) ] = -1,
162 },
163 [ C(OP_PREFETCH) ] = {
164 [ C(RESULT_ACCESS) ] = -1,
165 [ C(RESULT_MISS) ] = -1,
166 },
167 },
168
169 [ C(BPU) ] = {
170 [ C(OP_READ) ] = {
171 [ C(RESULT_ACCESS) ] = -1,
172 [ C(RESULT_MISS) ] = -1,
173 },
174 [ C(OP_WRITE) ] = {
175 [ C(RESULT_ACCESS) ] = -1,
176 [ C(RESULT_MISS) ] = -1,
177 },
178 [ C(OP_PREFETCH) ] = {
179 [ C(RESULT_ACCESS) ] = -1,
180 [ C(RESULT_MISS) ] = -1,
181 },
182 },
183};
184
185static int sh7750_event_map(int event)
186{
187 return sh7750_general_events[event];
188}
189
190static u64 sh7750_pmu_read(int idx)
191{
192 return (u64)((u64)(__raw_readl(PMCTRH(idx)) & 0xffff) << 32) |
193 __raw_readl(PMCTRL(idx));
194}
195
196static void sh7750_pmu_disable(struct hw_perf_event *hwc, int idx)
197{
198 unsigned int tmp;
199
200 tmp = __raw_readw(PMCR(idx));
201 tmp &= ~(PMCR_PMM_MASK | PMCR_PMEN);
202 __raw_writew(tmp, PMCR(idx));
203}
204
205static void sh7750_pmu_enable(struct hw_perf_event *hwc, int idx)
206{
207 __raw_writew(__raw_readw(PMCR(idx)) | PMCR_PMCLR, PMCR(idx));
208 __raw_writew(hwc->config | PMCR_PMEN | PMCR_PMST, PMCR(idx));
209}
210
211static void sh7750_pmu_disable_all(void)
212{
213 int i;
214
215 for (i = 0; i < sh7750_pmu.num_events; i++)
216 __raw_writew(__raw_readw(PMCR(i)) & ~PMCR_PMEN, PMCR(i));
217}
218
219static void sh7750_pmu_enable_all(void)
220{
221 int i;
222
223 for (i = 0; i < sh7750_pmu.num_events; i++)
224 __raw_writew(__raw_readw(PMCR(i)) | PMCR_PMEN, PMCR(i));
225}
226
227static struct sh_pmu sh7750_pmu = {
228 .name = "SH7750",
229 .num_events = 2,
230 .event_map = sh7750_event_map,
231 .max_events = ARRAY_SIZE(sh7750_general_events),
232 .raw_event_mask = PMCR_PMM_MASK,
233 .cache_events = &sh7750_cache_events,
234 .read = sh7750_pmu_read,
235 .disable = sh7750_pmu_disable,
236 .enable = sh7750_pmu_enable,
237 .disable_all = sh7750_pmu_disable_all,
238 .enable_all = sh7750_pmu_enable_all,
239};
240
241static int __init sh7750_pmu_init(void)
242{
243 /*
244 * Make sure this CPU actually has perf counters.
245 */
246 if (!(boot_cpu_data.flags & CPU_HAS_PERF_COUNTER)) {
247 pr_notice("HW perf events unsupported, software events only.\n");
248 return -ENODEV;
249 }
250
251 return register_sh_pmu(&sh7750_pmu);
252}
253arch_initcall(sh7750_pmu_init);
diff --git a/arch/sh/kernel/cpu/sh4a/Makefile b/arch/sh/kernel/cpu/sh4a/Makefile
index 490d5dc9e372..33bab477d2e2 100644
--- a/arch/sh/kernel/cpu/sh4a/Makefile
+++ b/arch/sh/kernel/cpu/sh4a/Makefile
@@ -44,3 +44,4 @@ pinmux-$(CONFIG_CPU_SUBTYPE_SH7786) := pinmux-sh7786.o
44obj-y += $(clock-y) 44obj-y += $(clock-y)
45obj-$(CONFIG_SMP) += $(smp-y) 45obj-$(CONFIG_SMP) += $(smp-y)
46obj-$(CONFIG_GENERIC_GPIO) += $(pinmux-y) 46obj-$(CONFIG_GENERIC_GPIO) += $(pinmux-y)
47obj-$(CONFIG_PERF_EVENTS) += perf_event.o
diff --git a/arch/sh/kernel/cpu/sh4a/clock-sh7724.c b/arch/sh/kernel/cpu/sh4a/clock-sh7724.c
index dfe9192be63e..9db743802f06 100644
--- a/arch/sh/kernel/cpu/sh4a/clock-sh7724.c
+++ b/arch/sh/kernel/cpu/sh4a/clock-sh7724.c
@@ -152,7 +152,7 @@ struct clk div6_clks[] = {
152 SH_CLK_DIV6("fsia_clk", &div3_clk, FCLKACR, 0), 152 SH_CLK_DIV6("fsia_clk", &div3_clk, FCLKACR, 0),
153 SH_CLK_DIV6("fsib_clk", &div3_clk, FCLKBCR, 0), 153 SH_CLK_DIV6("fsib_clk", &div3_clk, FCLKBCR, 0),
154 SH_CLK_DIV6("irda_clk", &div3_clk, IRDACLKCR, 0), 154 SH_CLK_DIV6("irda_clk", &div3_clk, IRDACLKCR, 0),
155 SH_CLK_DIV6("spu_clk", &div3_clk, SPUCLKCR, 0), 155 SH_CLK_DIV6("spu_clk", &div3_clk, SPUCLKCR, CLK_ENABLE_ON_INIT),
156}; 156};
157 157
158#define R_CLK (&r_clk) 158#define R_CLK (&r_clk)
diff --git a/arch/sh/kernel/cpu/sh4a/perf_event.c b/arch/sh/kernel/cpu/sh4a/perf_event.c
new file mode 100644
index 000000000000..eddc21973fa1
--- /dev/null
+++ b/arch/sh/kernel/cpu/sh4a/perf_event.c
@@ -0,0 +1,269 @@
1/*
2 * Performance events support for SH-4A performance counters
3 *
4 * Copyright (C) 2009 Paul Mundt
5 *
6 * This file is subject to the terms and conditions of the GNU General Public
7 * License. See the file "COPYING" in the main directory of this archive
8 * for more details.
9 */
10#include <linux/kernel.h>
11#include <linux/init.h>
12#include <linux/io.h>
13#include <linux/irq.h>
14#include <linux/perf_event.h>
15#include <asm/processor.h>
16
17#define PPC_CCBR(idx) (0xff200800 + (sizeof(u32) * idx))
18#define PPC_PMCTR(idx) (0xfc100000 + (sizeof(u32) * idx))
19
20#define CCBR_CIT_MASK (0x7ff << 6)
21#define CCBR_DUC (1 << 3)
22#define CCBR_CMDS (1 << 1)
23#define CCBR_PPCE (1 << 0)
24
25#define PPC_PMCAT 0xfc100080
26
27#define PMCAT_OVF3 (1 << 27)
28#define PMCAT_CNN3 (1 << 26)
29#define PMCAT_CLR3 (1 << 25)
30#define PMCAT_OVF2 (1 << 19)
31#define PMCAT_CLR2 (1 << 17)
32#define PMCAT_OVF1 (1 << 11)
33#define PMCAT_CNN1 (1 << 10)
34#define PMCAT_CLR1 (1 << 9)
35#define PMCAT_OVF0 (1 << 3)
36#define PMCAT_CLR0 (1 << 1)
37
38static struct sh_pmu sh4a_pmu;
39
40/*
41 * Supported raw event codes:
42 *
43 * Event Code Description
44 * ---------- -----------
45 *
46 * 0x0000 number of elapsed cycles
47 * 0x0200 number of elapsed cycles in privileged mode
48 * 0x0280 number of elapsed cycles while SR.BL is asserted
49 * 0x0202 instruction execution
50 * 0x0203 instruction execution in parallel
51 * 0x0204 number of unconditional branches
52 * 0x0208 number of exceptions
53 * 0x0209 number of interrupts
54 * 0x0220 UTLB miss caused by instruction fetch
55 * 0x0222 UTLB miss caused by operand access
56 * 0x02a0 number of ITLB misses
57 * 0x0028 number of accesses to instruction memories
58 * 0x0029 number of accesses to instruction cache
59 * 0x002a instruction cache miss
60 * 0x022e number of access to instruction X/Y memory
61 * 0x0030 number of reads to operand memories
62 * 0x0038 number of writes to operand memories
63 * 0x0031 number of operand cache read accesses
64 * 0x0039 number of operand cache write accesses
65 * 0x0032 operand cache read miss
66 * 0x003a operand cache write miss
67 * 0x0236 number of reads to operand X/Y memory
68 * 0x023e number of writes to operand X/Y memory
69 * 0x0237 number of reads to operand U memory
70 * 0x023f number of writes to operand U memory
71 * 0x0337 number of U memory read buffer misses
72 * 0x02b4 number of wait cycles due to operand read access
73 * 0x02bc number of wait cycles due to operand write access
74 * 0x0033 number of wait cycles due to operand cache read miss
75 * 0x003b number of wait cycles due to operand cache write miss
76 */
77
78/*
79 * Special reserved bits used by hardware emulators, read values will
80 * vary, but writes must always be 0.
81 */
82#define PMCAT_EMU_CLR_MASK ((1 << 24) | (1 << 16) | (1 << 8) | (1 << 0))
83
84static const int sh4a_general_events[] = {
85 [PERF_COUNT_HW_CPU_CYCLES] = 0x0000,
86 [PERF_COUNT_HW_INSTRUCTIONS] = 0x0202,
87 [PERF_COUNT_HW_CACHE_REFERENCES] = 0x0029, /* I-cache */
88 [PERF_COUNT_HW_CACHE_MISSES] = 0x002a, /* I-cache */
89 [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = 0x0204,
90 [PERF_COUNT_HW_BRANCH_MISSES] = -1,
91 [PERF_COUNT_HW_BUS_CYCLES] = -1,
92};
93
94#define C(x) PERF_COUNT_HW_CACHE_##x
95
96static const int sh4a_cache_events
97 [PERF_COUNT_HW_CACHE_MAX]
98 [PERF_COUNT_HW_CACHE_OP_MAX]
99 [PERF_COUNT_HW_CACHE_RESULT_MAX] =
100{
101 [ C(L1D) ] = {
102 [ C(OP_READ) ] = {
103 [ C(RESULT_ACCESS) ] = 0x0031,
104 [ C(RESULT_MISS) ] = 0x0032,
105 },
106 [ C(OP_WRITE) ] = {
107 [ C(RESULT_ACCESS) ] = 0x0039,
108 [ C(RESULT_MISS) ] = 0x003a,
109 },
110 [ C(OP_PREFETCH) ] = {
111 [ C(RESULT_ACCESS) ] = 0,
112 [ C(RESULT_MISS) ] = 0,
113 },
114 },
115
116 [ C(L1I) ] = {
117 [ C(OP_READ) ] = {
118 [ C(RESULT_ACCESS) ] = 0x0029,
119 [ C(RESULT_MISS) ] = 0x002a,
120 },
121 [ C(OP_WRITE) ] = {
122 [ C(RESULT_ACCESS) ] = -1,
123 [ C(RESULT_MISS) ] = -1,
124 },
125 [ C(OP_PREFETCH) ] = {
126 [ C(RESULT_ACCESS) ] = 0,
127 [ C(RESULT_MISS) ] = 0,
128 },
129 },
130
131 [ C(LL) ] = {
132 [ C(OP_READ) ] = {
133 [ C(RESULT_ACCESS) ] = 0x0030,
134 [ C(RESULT_MISS) ] = 0,
135 },
136 [ C(OP_WRITE) ] = {
137 [ C(RESULT_ACCESS) ] = 0x0038,
138 [ C(RESULT_MISS) ] = 0,
139 },
140 [ C(OP_PREFETCH) ] = {
141 [ C(RESULT_ACCESS) ] = 0,
142 [ C(RESULT_MISS) ] = 0,
143 },
144 },
145
146 [ C(DTLB) ] = {
147 [ C(OP_READ) ] = {
148 [ C(RESULT_ACCESS) ] = 0x0222,
149 [ C(RESULT_MISS) ] = 0x0220,
150 },
151 [ C(OP_WRITE) ] = {
152 [ C(RESULT_ACCESS) ] = 0,
153 [ C(RESULT_MISS) ] = 0,
154 },
155 [ C(OP_PREFETCH) ] = {
156 [ C(RESULT_ACCESS) ] = 0,
157 [ C(RESULT_MISS) ] = 0,
158 },
159 },
160
161 [ C(ITLB) ] = {
162 [ C(OP_READ) ] = {
163 [ C(RESULT_ACCESS) ] = 0,
164 [ C(RESULT_MISS) ] = 0x02a0,
165 },
166 [ C(OP_WRITE) ] = {
167 [ C(RESULT_ACCESS) ] = -1,
168 [ C(RESULT_MISS) ] = -1,
169 },
170 [ C(OP_PREFETCH) ] = {
171 [ C(RESULT_ACCESS) ] = -1,
172 [ C(RESULT_MISS) ] = -1,
173 },
174 },
175
176 [ C(BPU) ] = {
177 [ C(OP_READ) ] = {
178 [ C(RESULT_ACCESS) ] = -1,
179 [ C(RESULT_MISS) ] = -1,
180 },
181 [ C(OP_WRITE) ] = {
182 [ C(RESULT_ACCESS) ] = -1,
183 [ C(RESULT_MISS) ] = -1,
184 },
185 [ C(OP_PREFETCH) ] = {
186 [ C(RESULT_ACCESS) ] = -1,
187 [ C(RESULT_MISS) ] = -1,
188 },
189 },
190};
191
192static int sh4a_event_map(int event)
193{
194 return sh4a_general_events[event];
195}
196
197static u64 sh4a_pmu_read(int idx)
198{
199 return __raw_readl(PPC_PMCTR(idx));
200}
201
202static void sh4a_pmu_disable(struct hw_perf_event *hwc, int idx)
203{
204 unsigned int tmp;
205
206 tmp = __raw_readl(PPC_CCBR(idx));
207 tmp &= ~(CCBR_CIT_MASK | CCBR_DUC);
208 __raw_writel(tmp, PPC_CCBR(idx));
209}
210
211static void sh4a_pmu_enable(struct hw_perf_event *hwc, int idx)
212{
213 unsigned int tmp;
214
215 tmp = __raw_readl(PPC_PMCAT);
216 tmp &= ~PMCAT_EMU_CLR_MASK;
217 tmp |= idx ? PMCAT_CLR1 : PMCAT_CLR0;
218 __raw_writel(tmp, PPC_PMCAT);
219
220 tmp = __raw_readl(PPC_CCBR(idx));
221 tmp |= (hwc->config << 6) | CCBR_CMDS | CCBR_PPCE;
222 __raw_writel(tmp, PPC_CCBR(idx));
223
224 __raw_writel(__raw_readl(PPC_CCBR(idx)) | CCBR_DUC, PPC_CCBR(idx));
225}
226
227static void sh4a_pmu_disable_all(void)
228{
229 int i;
230
231 for (i = 0; i < sh4a_pmu.num_events; i++)
232 __raw_writel(__raw_readl(PPC_CCBR(i)) & ~CCBR_DUC, PPC_CCBR(i));
233}
234
235static void sh4a_pmu_enable_all(void)
236{
237 int i;
238
239 for (i = 0; i < sh4a_pmu.num_events; i++)
240 __raw_writel(__raw_readl(PPC_CCBR(i)) | CCBR_DUC, PPC_CCBR(i));
241}
242
243static struct sh_pmu sh4a_pmu = {
244 .name = "SH-4A",
245 .num_events = 2,
246 .event_map = sh4a_event_map,
247 .max_events = ARRAY_SIZE(sh4a_general_events),
248 .raw_event_mask = 0x3ff,
249 .cache_events = &sh4a_cache_events,
250 .read = sh4a_pmu_read,
251 .disable = sh4a_pmu_disable,
252 .enable = sh4a_pmu_enable,
253 .disable_all = sh4a_pmu_disable_all,
254 .enable_all = sh4a_pmu_enable_all,
255};
256
257static int __init sh4a_pmu_init(void)
258{
259 /*
260 * Make sure this CPU actually has perf counters.
261 */
262 if (!(boot_cpu_data.flags & CPU_HAS_PERF_COUNTER)) {
263 pr_notice("HW perf events unsupported, software events only.\n");
264 return -ENODEV;
265 }
266
267 return register_sh_pmu(&sh4a_pmu);
268}
269arch_initcall(sh4a_pmu_init);
diff --git a/arch/sh/kernel/cpu/sh4a/setup-sh7724.c b/arch/sh/kernel/cpu/sh4a/setup-sh7724.c
index f3851fd757ec..845e89c936e7 100644
--- a/arch/sh/kernel/cpu/sh4a/setup-sh7724.c
+++ b/arch/sh/kernel/cpu/sh4a/setup-sh7724.c
@@ -20,6 +20,8 @@
20#include <linux/uio_driver.h> 20#include <linux/uio_driver.h>
21#include <linux/sh_timer.h> 21#include <linux/sh_timer.h>
22#include <linux/io.h> 22#include <linux/io.h>
23#include <linux/notifier.h>
24#include <asm/suspend.h>
23#include <asm/clock.h> 25#include <asm/clock.h>
24#include <asm/mmzone.h> 26#include <asm/mmzone.h>
25#include <cpu/sh7724.h> 27#include <cpu/sh7724.h>
@@ -202,7 +204,7 @@ static struct resource veu0_resources[] = {
202 [0] = { 204 [0] = {
203 .name = "VEU3F0", 205 .name = "VEU3F0",
204 .start = 0xfe920000, 206 .start = 0xfe920000,
205 .end = 0xfe9200cb - 1, 207 .end = 0xfe9200cb,
206 .flags = IORESOURCE_MEM, 208 .flags = IORESOURCE_MEM,
207 }, 209 },
208 [1] = { 210 [1] = {
@@ -234,7 +236,7 @@ static struct resource veu1_resources[] = {
234 [0] = { 236 [0] = {
235 .name = "VEU3F1", 237 .name = "VEU3F1",
236 .start = 0xfe924000, 238 .start = 0xfe924000,
237 .end = 0xfe9240cb - 1, 239 .end = 0xfe9240cb,
238 .flags = IORESOURCE_MEM, 240 .flags = IORESOURCE_MEM,
239 }, 241 },
240 [1] = { 242 [1] = {
@@ -523,6 +525,70 @@ static struct platform_device jpu_device = {
523 }, 525 },
524}; 526};
525 527
528/* SPU2DSP0 */
529static struct uio_info spu0_platform_data = {
530 .name = "SPU2DSP0",
531 .version = "0",
532 .irq = 86,
533};
534
535static struct resource spu0_resources[] = {
536 [0] = {
537 .name = "SPU2DSP0",
538 .start = 0xFE200000,
539 .end = 0xFE2FFFFF,
540 .flags = IORESOURCE_MEM,
541 },
542 [1] = {
543 /* place holder for contiguous memory */
544 },
545};
546
547static struct platform_device spu0_device = {
548 .name = "uio_pdrv_genirq",
549 .id = 4,
550 .dev = {
551 .platform_data = &spu0_platform_data,
552 },
553 .resource = spu0_resources,
554 .num_resources = ARRAY_SIZE(spu0_resources),
555 .archdata = {
556 .hwblk_id = HWBLK_SPU,
557 },
558};
559
560/* SPU2DSP1 */
561static struct uio_info spu1_platform_data = {
562 .name = "SPU2DSP1",
563 .version = "0",
564 .irq = 87,
565};
566
567static struct resource spu1_resources[] = {
568 [0] = {
569 .name = "SPU2DSP1",
570 .start = 0xFE300000,
571 .end = 0xFE3FFFFF,
572 .flags = IORESOURCE_MEM,
573 },
574 [1] = {
575 /* place holder for contiguous memory */
576 },
577};
578
579static struct platform_device spu1_device = {
580 .name = "uio_pdrv_genirq",
581 .id = 5,
582 .dev = {
583 .platform_data = &spu1_platform_data,
584 },
585 .resource = spu1_resources,
586 .num_resources = ARRAY_SIZE(spu1_resources),
587 .archdata = {
588 .hwblk_id = HWBLK_SPU,
589 },
590};
591
526static struct platform_device *sh7724_devices[] __initdata = { 592static struct platform_device *sh7724_devices[] __initdata = {
527 &cmt_device, 593 &cmt_device,
528 &tmu0_device, 594 &tmu0_device,
@@ -539,6 +605,8 @@ static struct platform_device *sh7724_devices[] __initdata = {
539 &veu0_device, 605 &veu0_device,
540 &veu1_device, 606 &veu1_device,
541 &jpu_device, 607 &jpu_device,
608 &spu0_device,
609 &spu1_device,
542}; 610};
543 611
544static int __init sh7724_devices_setup(void) 612static int __init sh7724_devices_setup(void)
@@ -547,6 +615,8 @@ static int __init sh7724_devices_setup(void)
547 platform_resource_setup_memory(&veu0_device, "veu0", 2 << 20); 615 platform_resource_setup_memory(&veu0_device, "veu0", 2 << 20);
548 platform_resource_setup_memory(&veu1_device, "veu1", 2 << 20); 616 platform_resource_setup_memory(&veu1_device, "veu1", 2 << 20);
549 platform_resource_setup_memory(&jpu_device, "jpu", 2 << 20); 617 platform_resource_setup_memory(&jpu_device, "jpu", 2 << 20);
618 platform_resource_setup_memory(&spu0_device, "spu0", 2 << 20);
619 platform_resource_setup_memory(&spu1_device, "spu1", 2 << 20);
550 620
551 return platform_add_devices(sh7724_devices, 621 return platform_add_devices(sh7724_devices,
552 ARRAY_SIZE(sh7724_devices)); 622 ARRAY_SIZE(sh7724_devices));
@@ -827,3 +897,193 @@ void __init plat_irq_setup(void)
827{ 897{
828 register_intc_controller(&intc_desc); 898 register_intc_controller(&intc_desc);
829} 899}
900
901static struct {
902 /* BSC */
903 unsigned long mmselr;
904 unsigned long cs0bcr;
905 unsigned long cs4bcr;
906 unsigned long cs5abcr;
907 unsigned long cs5bbcr;
908 unsigned long cs6abcr;
909 unsigned long cs6bbcr;
910 unsigned long cs4wcr;
911 unsigned long cs5awcr;
912 unsigned long cs5bwcr;
913 unsigned long cs6awcr;
914 unsigned long cs6bwcr;
915 /* INTC */
916 unsigned short ipra;
917 unsigned short iprb;
918 unsigned short iprc;
919 unsigned short iprd;
920 unsigned short ipre;
921 unsigned short iprf;
922 unsigned short iprg;
923 unsigned short iprh;
924 unsigned short ipri;
925 unsigned short iprj;
926 unsigned short iprk;
927 unsigned short iprl;
928 unsigned char imr0;
929 unsigned char imr1;
930 unsigned char imr2;
931 unsigned char imr3;
932 unsigned char imr4;
933 unsigned char imr5;
934 unsigned char imr6;
935 unsigned char imr7;
936 unsigned char imr8;
937 unsigned char imr9;
938 unsigned char imr10;
939 unsigned char imr11;
940 unsigned char imr12;
941 /* RWDT */
942 unsigned short rwtcnt;
943 unsigned short rwtcsr;
944 /* CPG */
945 unsigned long irdaclk;
946 unsigned long spuclk;
947} sh7724_rstandby_state;
948
949static int sh7724_pre_sleep_notifier_call(struct notifier_block *nb,
950 unsigned long flags, void *unused)
951{
952 if (!(flags & SUSP_SH_RSTANDBY))
953 return NOTIFY_DONE;
954
955 /* BCR */
956 sh7724_rstandby_state.mmselr = __raw_readl(0xff800020); /* MMSELR */
957 sh7724_rstandby_state.mmselr |= 0xa5a50000;
958 sh7724_rstandby_state.cs0bcr = __raw_readl(0xfec10004); /* CS0BCR */
959 sh7724_rstandby_state.cs4bcr = __raw_readl(0xfec10010); /* CS4BCR */
960 sh7724_rstandby_state.cs5abcr = __raw_readl(0xfec10014); /* CS5ABCR */
961 sh7724_rstandby_state.cs5bbcr = __raw_readl(0xfec10018); /* CS5BBCR */
962 sh7724_rstandby_state.cs6abcr = __raw_readl(0xfec1001c); /* CS6ABCR */
963 sh7724_rstandby_state.cs6bbcr = __raw_readl(0xfec10020); /* CS6BBCR */
964 sh7724_rstandby_state.cs4wcr = __raw_readl(0xfec10030); /* CS4WCR */
965 sh7724_rstandby_state.cs5awcr = __raw_readl(0xfec10034); /* CS5AWCR */
966 sh7724_rstandby_state.cs5bwcr = __raw_readl(0xfec10038); /* CS5BWCR */
967 sh7724_rstandby_state.cs6awcr = __raw_readl(0xfec1003c); /* CS6AWCR */
968 sh7724_rstandby_state.cs6bwcr = __raw_readl(0xfec10040); /* CS6BWCR */
969
970 /* INTC */
971 sh7724_rstandby_state.ipra = __raw_readw(0xa4080000); /* IPRA */
972 sh7724_rstandby_state.iprb = __raw_readw(0xa4080004); /* IPRB */
973 sh7724_rstandby_state.iprc = __raw_readw(0xa4080008); /* IPRC */
974 sh7724_rstandby_state.iprd = __raw_readw(0xa408000c); /* IPRD */
975 sh7724_rstandby_state.ipre = __raw_readw(0xa4080010); /* IPRE */
976 sh7724_rstandby_state.iprf = __raw_readw(0xa4080014); /* IPRF */
977 sh7724_rstandby_state.iprg = __raw_readw(0xa4080018); /* IPRG */
978 sh7724_rstandby_state.iprh = __raw_readw(0xa408001c); /* IPRH */
979 sh7724_rstandby_state.ipri = __raw_readw(0xa4080020); /* IPRI */
980 sh7724_rstandby_state.iprj = __raw_readw(0xa4080024); /* IPRJ */
981 sh7724_rstandby_state.iprk = __raw_readw(0xa4080028); /* IPRK */
982 sh7724_rstandby_state.iprl = __raw_readw(0xa408002c); /* IPRL */
983 sh7724_rstandby_state.imr0 = __raw_readb(0xa4080080); /* IMR0 */
984 sh7724_rstandby_state.imr1 = __raw_readb(0xa4080084); /* IMR1 */
985 sh7724_rstandby_state.imr2 = __raw_readb(0xa4080088); /* IMR2 */
986 sh7724_rstandby_state.imr3 = __raw_readb(0xa408008c); /* IMR3 */
987 sh7724_rstandby_state.imr4 = __raw_readb(0xa4080090); /* IMR4 */
988 sh7724_rstandby_state.imr5 = __raw_readb(0xa4080094); /* IMR5 */
989 sh7724_rstandby_state.imr6 = __raw_readb(0xa4080098); /* IMR6 */
990 sh7724_rstandby_state.imr7 = __raw_readb(0xa408009c); /* IMR7 */
991 sh7724_rstandby_state.imr8 = __raw_readb(0xa40800a0); /* IMR8 */
992 sh7724_rstandby_state.imr9 = __raw_readb(0xa40800a4); /* IMR9 */
993 sh7724_rstandby_state.imr10 = __raw_readb(0xa40800a8); /* IMR10 */
994 sh7724_rstandby_state.imr11 = __raw_readb(0xa40800ac); /* IMR11 */
995 sh7724_rstandby_state.imr12 = __raw_readb(0xa40800b0); /* IMR12 */
996
997 /* RWDT */
998 sh7724_rstandby_state.rwtcnt = __raw_readb(0xa4520000); /* RWTCNT */
999 sh7724_rstandby_state.rwtcnt |= 0x5a00;
1000 sh7724_rstandby_state.rwtcsr = __raw_readb(0xa4520004); /* RWTCSR */
1001 sh7724_rstandby_state.rwtcsr |= 0xa500;
1002 __raw_writew(sh7724_rstandby_state.rwtcsr & 0x07, 0xa4520004);
1003
1004 /* CPG */
1005 sh7724_rstandby_state.irdaclk = __raw_readl(0xa4150018); /* IRDACLKCR */
1006 sh7724_rstandby_state.spuclk = __raw_readl(0xa415003c); /* SPUCLKCR */
1007
1008 return NOTIFY_DONE;
1009}
1010
1011static int sh7724_post_sleep_notifier_call(struct notifier_block *nb,
1012 unsigned long flags, void *unused)
1013{
1014 if (!(flags & SUSP_SH_RSTANDBY))
1015 return NOTIFY_DONE;
1016
1017 /* BCR */
1018 __raw_writel(sh7724_rstandby_state.mmselr, 0xff800020); /* MMSELR */
1019 __raw_writel(sh7724_rstandby_state.cs0bcr, 0xfec10004); /* CS0BCR */
1020 __raw_writel(sh7724_rstandby_state.cs4bcr, 0xfec10010); /* CS4BCR */
1021 __raw_writel(sh7724_rstandby_state.cs5abcr, 0xfec10014); /* CS5ABCR */
1022 __raw_writel(sh7724_rstandby_state.cs5bbcr, 0xfec10018); /* CS5BBCR */
1023 __raw_writel(sh7724_rstandby_state.cs6abcr, 0xfec1001c); /* CS6ABCR */
1024 __raw_writel(sh7724_rstandby_state.cs6bbcr, 0xfec10020); /* CS6BBCR */
1025 __raw_writel(sh7724_rstandby_state.cs4wcr, 0xfec10030); /* CS4WCR */
1026 __raw_writel(sh7724_rstandby_state.cs5awcr, 0xfec10034); /* CS5AWCR */
1027 __raw_writel(sh7724_rstandby_state.cs5bwcr, 0xfec10038); /* CS5BWCR */
1028 __raw_writel(sh7724_rstandby_state.cs6awcr, 0xfec1003c); /* CS6AWCR */
1029 __raw_writel(sh7724_rstandby_state.cs6bwcr, 0xfec10040); /* CS6BWCR */
1030
1031 /* INTC */
1032 __raw_writew(sh7724_rstandby_state.ipra, 0xa4080000); /* IPRA */
1033 __raw_writew(sh7724_rstandby_state.iprb, 0xa4080004); /* IPRB */
1034 __raw_writew(sh7724_rstandby_state.iprc, 0xa4080008); /* IPRC */
1035 __raw_writew(sh7724_rstandby_state.iprd, 0xa408000c); /* IPRD */
1036 __raw_writew(sh7724_rstandby_state.ipre, 0xa4080010); /* IPRE */
1037 __raw_writew(sh7724_rstandby_state.iprf, 0xa4080014); /* IPRF */
1038 __raw_writew(sh7724_rstandby_state.iprg, 0xa4080018); /* IPRG */
1039 __raw_writew(sh7724_rstandby_state.iprh, 0xa408001c); /* IPRH */
1040 __raw_writew(sh7724_rstandby_state.ipri, 0xa4080020); /* IPRI */
1041 __raw_writew(sh7724_rstandby_state.iprj, 0xa4080024); /* IPRJ */
1042 __raw_writew(sh7724_rstandby_state.iprk, 0xa4080028); /* IPRK */
1043 __raw_writew(sh7724_rstandby_state.iprl, 0xa408002c); /* IPRL */
1044 __raw_writeb(sh7724_rstandby_state.imr0, 0xa4080080); /* IMR0 */
1045 __raw_writeb(sh7724_rstandby_state.imr1, 0xa4080084); /* IMR1 */
1046 __raw_writeb(sh7724_rstandby_state.imr2, 0xa4080088); /* IMR2 */
1047 __raw_writeb(sh7724_rstandby_state.imr3, 0xa408008c); /* IMR3 */
1048 __raw_writeb(sh7724_rstandby_state.imr4, 0xa4080090); /* IMR4 */
1049 __raw_writeb(sh7724_rstandby_state.imr5, 0xa4080094); /* IMR5 */
1050 __raw_writeb(sh7724_rstandby_state.imr6, 0xa4080098); /* IMR6 */
1051 __raw_writeb(sh7724_rstandby_state.imr7, 0xa408009c); /* IMR7 */
1052 __raw_writeb(sh7724_rstandby_state.imr8, 0xa40800a0); /* IMR8 */
1053 __raw_writeb(sh7724_rstandby_state.imr9, 0xa40800a4); /* IMR9 */
1054 __raw_writeb(sh7724_rstandby_state.imr10, 0xa40800a8); /* IMR10 */
1055 __raw_writeb(sh7724_rstandby_state.imr11, 0xa40800ac); /* IMR11 */
1056 __raw_writeb(sh7724_rstandby_state.imr12, 0xa40800b0); /* IMR12 */
1057
1058 /* RWDT */
1059 __raw_writew(sh7724_rstandby_state.rwtcnt, 0xa4520000); /* RWTCNT */
1060 __raw_writew(sh7724_rstandby_state.rwtcsr, 0xa4520004); /* RWTCSR */
1061
1062 /* CPG */
1063 __raw_writel(sh7724_rstandby_state.irdaclk, 0xa4150018); /* IRDACLKCR */
1064 __raw_writel(sh7724_rstandby_state.spuclk, 0xa415003c); /* SPUCLKCR */
1065
1066 return NOTIFY_DONE;
1067}
1068
1069static struct notifier_block sh7724_pre_sleep_notifier = {
1070 .notifier_call = sh7724_pre_sleep_notifier_call,
1071 .priority = SH_MOBILE_PRE(SH_MOBILE_SLEEP_CPU),
1072};
1073
1074static struct notifier_block sh7724_post_sleep_notifier = {
1075 .notifier_call = sh7724_post_sleep_notifier_call,
1076 .priority = SH_MOBILE_POST(SH_MOBILE_SLEEP_CPU),
1077};
1078
1079static int __init sh7724_sleep_setup(void)
1080{
1081 atomic_notifier_chain_register(&sh_mobile_pre_sleep_notifier_list,
1082 &sh7724_pre_sleep_notifier);
1083
1084 atomic_notifier_chain_register(&sh_mobile_post_sleep_notifier_list,
1085 &sh7724_post_sleep_notifier);
1086 return 0;
1087}
1088arch_initcall(sh7724_sleep_setup);
1089
diff --git a/arch/sh/kernel/cpu/sh4a/setup-shx3.c b/arch/sh/kernel/cpu/sh4a/setup-shx3.c
index e848443deeb9..c7ba9166e18a 100644
--- a/arch/sh/kernel/cpu/sh4a/setup-shx3.c
+++ b/arch/sh/kernel/cpu/sh4a/setup-shx3.c
@@ -15,6 +15,15 @@
15#include <linux/sh_timer.h> 15#include <linux/sh_timer.h>
16#include <asm/mmzone.h> 16#include <asm/mmzone.h>
17 17
18/*
19 * This intentionally only registers SCIF ports 0, 1, and 3. SCIF 2
20 * INTEVT values overlap with the FPU EXPEVT ones, requiring special
21 * demuxing in the exception dispatch path.
22 *
23 * As this overlap is something that never should have made it in to
24 * silicon in the first place, we just refuse to deal with the port at
25 * all rather than adding infrastructure to hack around it.
26 */
18static struct plat_sci_port sci_platform_data[] = { 27static struct plat_sci_port sci_platform_data[] = {
19 { 28 {
20 .mapbase = 0xffc30000, 29 .mapbase = 0xffc30000,
@@ -27,11 +36,6 @@ static struct plat_sci_port sci_platform_data[] = {
27 .type = PORT_SCIF, 36 .type = PORT_SCIF,
28 .irqs = { 44, 45, 47, 46 }, 37 .irqs = { 44, 45, 47, 46 },
29 }, { 38 }, {
30 .mapbase = 0xffc50000,
31 .flags = UPF_BOOT_AUTOCONF,
32 .type = PORT_SCIF,
33 .irqs = { 48, 49, 51, 50 },
34 }, {
35 .mapbase = 0xffc60000, 39 .mapbase = 0xffc60000,
36 .flags = UPF_BOOT_AUTOCONF, 40 .flags = UPF_BOOT_AUTOCONF,
37 .type = PORT_SCIF, 41 .type = PORT_SCIF,
@@ -268,7 +272,11 @@ enum {
268 UNUSED = 0, 272 UNUSED = 0,
269 273
270 /* interrupt sources */ 274 /* interrupt sources */
271 IRL, IRQ0, IRQ1, IRQ2, IRQ3, 275 IRL_LLLL, IRL_LLLH, IRL_LLHL, IRL_LLHH,
276 IRL_LHLL, IRL_LHLH, IRL_LHHL, IRL_LHHH,
277 IRL_HLLL, IRL_HLLH, IRL_HLHL, IRL_HLHH,
278 IRL_HHLL, IRL_HHLH, IRL_HHHL,
279 IRQ0, IRQ1, IRQ2, IRQ3,
272 HUDII, 280 HUDII,
273 TMU0, TMU1, TMU2, TMU3, TMU4, TMU5, 281 TMU0, TMU1, TMU2, TMU3, TMU4, TMU5,
274 PCII0, PCII1, PCII2, PCII3, PCII4, 282 PCII0, PCII1, PCII2, PCII3, PCII4,
@@ -291,7 +299,7 @@ enum {
291 INTICI4, INTICI5, INTICI6, INTICI7, 299 INTICI4, INTICI5, INTICI6, INTICI7,
292 300
293 /* interrupt groups */ 301 /* interrupt groups */
294 PCII56789, SCIF0, SCIF1, SCIF2, SCIF3, 302 IRL, PCII56789, SCIF0, SCIF1, SCIF2, SCIF3,
295 DMAC0, DMAC1, 303 DMAC0, DMAC1,
296}; 304};
297 305
@@ -309,8 +317,6 @@ static struct intc_vect vectors[] __initdata = {
309 INTC_VECT(SCIF0_BRI, 0x740), INTC_VECT(SCIF0_TXI, 0x760), 317 INTC_VECT(SCIF0_BRI, 0x740), INTC_VECT(SCIF0_TXI, 0x760),
310 INTC_VECT(SCIF1_ERI, 0x780), INTC_VECT(SCIF1_RXI, 0x7a0), 318 INTC_VECT(SCIF1_ERI, 0x780), INTC_VECT(SCIF1_RXI, 0x7a0),
311 INTC_VECT(SCIF1_BRI, 0x7c0), INTC_VECT(SCIF1_TXI, 0x7e0), 319 INTC_VECT(SCIF1_BRI, 0x7c0), INTC_VECT(SCIF1_TXI, 0x7e0),
312 INTC_VECT(SCIF2_ERI, 0x800), INTC_VECT(SCIF2_RXI, 0x820),
313 INTC_VECT(SCIF2_BRI, 0x840), INTC_VECT(SCIF2_TXI, 0x860),
314 INTC_VECT(SCIF3_ERI, 0x880), INTC_VECT(SCIF3_RXI, 0x8a0), 320 INTC_VECT(SCIF3_ERI, 0x880), INTC_VECT(SCIF3_RXI, 0x8a0),
315 INTC_VECT(SCIF3_BRI, 0x8c0), INTC_VECT(SCIF3_TXI, 0x8e0), 321 INTC_VECT(SCIF3_BRI, 0x8c0), INTC_VECT(SCIF3_TXI, 0x8e0),
316 INTC_VECT(DMAC0_DMINT0, 0x900), INTC_VECT(DMAC0_DMINT1, 0x920), 322 INTC_VECT(DMAC0_DMINT0, 0x900), INTC_VECT(DMAC0_DMINT1, 0x920),
@@ -344,10 +350,13 @@ static struct intc_vect vectors[] __initdata = {
344}; 350};
345 351
346static struct intc_group groups[] __initdata = { 352static struct intc_group groups[] __initdata = {
353 INTC_GROUP(IRL, IRL_LLLL, IRL_LLLH, IRL_LLHL, IRL_LLHH,
354 IRL_LHLL, IRL_LHLH, IRL_LHHL, IRL_LHHH,
355 IRL_HLLL, IRL_HLLH, IRL_HLHL, IRL_HLHH,
356 IRL_HHLL, IRL_HHLH, IRL_HHHL),
347 INTC_GROUP(PCII56789, PCII5, PCII6, PCII7, PCII8, PCII9), 357 INTC_GROUP(PCII56789, PCII5, PCII6, PCII7, PCII8, PCII9),
348 INTC_GROUP(SCIF0, SCIF0_ERI, SCIF0_RXI, SCIF0_BRI, SCIF0_TXI), 358 INTC_GROUP(SCIF0, SCIF0_ERI, SCIF0_RXI, SCIF0_BRI, SCIF0_TXI),
349 INTC_GROUP(SCIF1, SCIF1_ERI, SCIF1_RXI, SCIF1_BRI, SCIF1_TXI), 359 INTC_GROUP(SCIF1, SCIF1_ERI, SCIF1_RXI, SCIF1_BRI, SCIF1_TXI),
350 INTC_GROUP(SCIF2, SCIF2_ERI, SCIF2_RXI, SCIF2_BRI, SCIF2_TXI),
351 INTC_GROUP(SCIF3, SCIF3_ERI, SCIF3_RXI, SCIF3_BRI, SCIF3_TXI), 360 INTC_GROUP(SCIF3, SCIF3_ERI, SCIF3_RXI, SCIF3_BRI, SCIF3_TXI),
352 INTC_GROUP(DMAC0, DMAC0_DMINT0, DMAC0_DMINT1, DMAC0_DMINT2, 361 INTC_GROUP(DMAC0, DMAC0_DMINT0, DMAC0_DMINT1, DMAC0_DMINT2,
353 DMAC0_DMINT3, DMAC0_DMINT4, DMAC0_DMINT5, DMAC0_DMAE), 362 DMAC0_DMINT3, DMAC0_DMINT4, DMAC0_DMINT5, DMAC0_DMAE),
@@ -419,14 +428,14 @@ static DECLARE_INTC_DESC(intc_desc_irq, "shx3-irq", vectors_irq, groups,
419 428
420/* External interrupt pins in IRL mode */ 429/* External interrupt pins in IRL mode */
421static struct intc_vect vectors_irl[] __initdata = { 430static struct intc_vect vectors_irl[] __initdata = {
422 INTC_VECT(IRL, 0x200), INTC_VECT(IRL, 0x220), 431 INTC_VECT(IRL_LLLL, 0x200), INTC_VECT(IRL_LLLH, 0x220),
423 INTC_VECT(IRL, 0x240), INTC_VECT(IRL, 0x260), 432 INTC_VECT(IRL_LLHL, 0x240), INTC_VECT(IRL_LLHH, 0x260),
424 INTC_VECT(IRL, 0x280), INTC_VECT(IRL, 0x2a0), 433 INTC_VECT(IRL_LHLL, 0x280), INTC_VECT(IRL_LHLH, 0x2a0),
425 INTC_VECT(IRL, 0x2c0), INTC_VECT(IRL, 0x2e0), 434 INTC_VECT(IRL_LHHL, 0x2c0), INTC_VECT(IRL_LHHH, 0x2e0),
426 INTC_VECT(IRL, 0x300), INTC_VECT(IRL, 0x320), 435 INTC_VECT(IRL_HLLL, 0x300), INTC_VECT(IRL_HLLH, 0x320),
427 INTC_VECT(IRL, 0x340), INTC_VECT(IRL, 0x360), 436 INTC_VECT(IRL_HLHL, 0x340), INTC_VECT(IRL_HLHH, 0x360),
428 INTC_VECT(IRL, 0x380), INTC_VECT(IRL, 0x3a0), 437 INTC_VECT(IRL_HHLL, 0x380), INTC_VECT(IRL_HHLH, 0x3a0),
429 INTC_VECT(IRL, 0x3c0), 438 INTC_VECT(IRL_HHHL, 0x3c0),
430}; 439};
431 440
432static DECLARE_INTC_DESC(intc_desc_irl, "shx3-irl", vectors_irl, groups, 441static DECLARE_INTC_DESC(intc_desc_irl, "shx3-irl", vectors_irl, groups,
diff --git a/arch/sh/kernel/cpu/sh4a/smp-shx3.c b/arch/sh/kernel/cpu/sh4a/smp-shx3.c
index 185ec3976a25..5863e0c4d02f 100644
--- a/arch/sh/kernel/cpu/sh4a/smp-shx3.c
+++ b/arch/sh/kernel/cpu/sh4a/smp-shx3.c
@@ -14,6 +14,13 @@
14#include <linux/interrupt.h> 14#include <linux/interrupt.h>
15#include <linux/io.h> 15#include <linux/io.h>
16 16
17#define STBCR_REG(phys_id) (0xfe400004 | (phys_id << 12))
18#define RESET_REG(phys_id) (0xfe400008 | (phys_id << 12))
19
20#define STBCR_MSTP 0x00000001
21#define STBCR_RESET 0x00000002
22#define STBCR_LTSLP 0x80000000
23
17static irqreturn_t ipi_interrupt_handler(int irq, void *arg) 24static irqreturn_t ipi_interrupt_handler(int irq, void *arg)
18{ 25{
19 unsigned int message = (unsigned int)(long)arg; 26 unsigned int message = (unsigned int)(long)arg;
@@ -21,9 +28,9 @@ static irqreturn_t ipi_interrupt_handler(int irq, void *arg)
21 unsigned int offs = 4 * cpu; 28 unsigned int offs = 4 * cpu;
22 unsigned int x; 29 unsigned int x;
23 30
24 x = ctrl_inl(0xfe410070 + offs); /* C0INITICI..CnINTICI */ 31 x = __raw_readl(0xfe410070 + offs); /* C0INITICI..CnINTICI */
25 x &= (1 << (message << 2)); 32 x &= (1 << (message << 2));
26 ctrl_outl(x, 0xfe410080 + offs); /* C0INTICICLR..CnINTICICLR */ 33 __raw_writel(x, 0xfe410080 + offs); /* C0INTICICLR..CnINTICICLR */
27 34
28 smp_message_recv(message); 35 smp_message_recv(message);
29 36
@@ -37,6 +44,9 @@ void __init plat_smp_setup(void)
37 44
38 init_cpu_possible(cpumask_of(cpu)); 45 init_cpu_possible(cpumask_of(cpu));
39 46
47 /* Enable light sleep for the boot CPU */
48 __raw_writel(__raw_readl(STBCR_REG(cpu)) | STBCR_LTSLP, STBCR_REG(cpu));
49
40 __cpu_number_map[0] = 0; 50 __cpu_number_map[0] = 0;
41 __cpu_logical_map[0] = 0; 51 __cpu_logical_map[0] = 0;
42 52
@@ -66,32 +76,23 @@ void __init plat_prepare_cpus(unsigned int max_cpus)
66 "IPI", (void *)(long)i); 76 "IPI", (void *)(long)i);
67} 77}
68 78
69#define STBCR_REG(phys_id) (0xfe400004 | (phys_id << 12))
70#define RESET_REG(phys_id) (0xfe400008 | (phys_id << 12))
71
72#define STBCR_MSTP 0x00000001
73#define STBCR_RESET 0x00000002
74#define STBCR_LTSLP 0x80000000
75
76#define STBCR_AP_VAL (STBCR_RESET | STBCR_LTSLP)
77
78void plat_start_cpu(unsigned int cpu, unsigned long entry_point) 79void plat_start_cpu(unsigned int cpu, unsigned long entry_point)
79{ 80{
80 ctrl_outl(entry_point, RESET_REG(cpu)); 81 __raw_writel(entry_point, RESET_REG(cpu));
81 82
82 if (!(ctrl_inl(STBCR_REG(cpu)) & STBCR_MSTP)) 83 if (!(__raw_readl(STBCR_REG(cpu)) & STBCR_MSTP))
83 ctrl_outl(STBCR_MSTP, STBCR_REG(cpu)); 84 __raw_writel(STBCR_MSTP, STBCR_REG(cpu));
84 85
85 while (!(ctrl_inl(STBCR_REG(cpu)) & STBCR_MSTP)) 86 while (!(__raw_readl(STBCR_REG(cpu)) & STBCR_MSTP))
86 cpu_relax(); 87 cpu_relax();
87 88
88 /* Start up secondary processor by sending a reset */ 89 /* Start up secondary processor by sending a reset */
89 ctrl_outl(STBCR_AP_VAL, STBCR_REG(cpu)); 90 __raw_writel(STBCR_RESET | STBCR_LTSLP, STBCR_REG(cpu));
90} 91}
91 92
92int plat_smp_processor_id(void) 93int plat_smp_processor_id(void)
93{ 94{
94 return ctrl_inl(0xff000048); /* CPIDR */ 95 return __raw_readl(0xff000048); /* CPIDR */
95} 96}
96 97
97void plat_send_ipi(unsigned int cpu, unsigned int message) 98void plat_send_ipi(unsigned int cpu, unsigned int message)
@@ -100,5 +101,5 @@ void plat_send_ipi(unsigned int cpu, unsigned int message)
100 101
101 BUG_ON(cpu >= 4); 102 BUG_ON(cpu >= 4);
102 103
103 ctrl_outl(1 << (message << 2), addr); /* C0INTICI..CnINTICI */ 104 __raw_writel(1 << (message << 2), addr); /* C0INTICI..CnINTICI */
104} 105}
diff --git a/arch/sh/kernel/cpu/sh5/entry.S b/arch/sh/kernel/cpu/sh5/entry.S
index b0aacf675258..8f13f73cb2cb 100644
--- a/arch/sh/kernel/cpu/sh5/entry.S
+++ b/arch/sh/kernel/cpu/sh5/entry.S
@@ -933,7 +933,7 @@ ret_with_reschedule:
933 933
934 pta restore_all, tr1 934 pta restore_all, tr1
935 935
936 movi (_TIF_SIGPENDING | _TIF_RESTORE_SIGMASK), r8 936 movi _TIF_SIGPENDING, r8
937 and r8, r7, r8 937 and r8, r7, r8
938 pta work_notifysig, tr0 938 pta work_notifysig, tr0
939 bne r8, ZERO, tr0 939 bne r8, ZERO, tr0
diff --git a/arch/sh/kernel/cpu/shmobile/cpuidle.c b/arch/sh/kernel/cpu/shmobile/cpuidle.c
index 1c504bd972c3..83972aa319c2 100644
--- a/arch/sh/kernel/cpu/shmobile/cpuidle.c
+++ b/arch/sh/kernel/cpu/shmobile/cpuidle.c
@@ -87,25 +87,31 @@ void sh_mobile_setup_cpuidle(void)
87 87
88 dev->safe_state = state; 88 dev->safe_state = state;
89 89
90 state = &dev->states[i++]; 90 if (sh_mobile_sleep_supported & SUSP_SH_SF) {
91 snprintf(state->name, CPUIDLE_NAME_LEN, "C1"); 91 state = &dev->states[i++];
92 strncpy(state->desc, "SuperH Sleep Mode [SF]", CPUIDLE_DESC_LEN); 92 snprintf(state->name, CPUIDLE_NAME_LEN, "C1");
93 state->exit_latency = 100; 93 strncpy(state->desc, "SuperH Sleep Mode [SF]",
94 state->target_residency = 1 * 2; 94 CPUIDLE_DESC_LEN);
95 state->power_usage = 1; 95 state->exit_latency = 100;
96 state->flags = 0; 96 state->target_residency = 1 * 2;
97 state->flags |= CPUIDLE_FLAG_TIME_VALID; 97 state->power_usage = 1;
98 state->enter = cpuidle_sleep_enter; 98 state->flags = 0;
99 state->flags |= CPUIDLE_FLAG_TIME_VALID;
100 state->enter = cpuidle_sleep_enter;
101 }
99 102
100 state = &dev->states[i++]; 103 if (sh_mobile_sleep_supported & SUSP_SH_STANDBY) {
101 snprintf(state->name, CPUIDLE_NAME_LEN, "C2"); 104 state = &dev->states[i++];
102 strncpy(state->desc, "SuperH Mobile Standby Mode [SF]", CPUIDLE_DESC_LEN); 105 snprintf(state->name, CPUIDLE_NAME_LEN, "C2");
103 state->exit_latency = 2300; 106 strncpy(state->desc, "SuperH Mobile Standby Mode [SF]",
104 state->target_residency = 1 * 2; 107 CPUIDLE_DESC_LEN);
105 state->power_usage = 1; 108 state->exit_latency = 2300;
106 state->flags = 0; 109 state->target_residency = 1 * 2;
107 state->flags |= CPUIDLE_FLAG_TIME_VALID; 110 state->power_usage = 1;
108 state->enter = cpuidle_sleep_enter; 111 state->flags = 0;
112 state->flags |= CPUIDLE_FLAG_TIME_VALID;
113 state->enter = cpuidle_sleep_enter;
114 }
109 115
110 dev->state_count = i; 116 dev->state_count = i;
111 117
diff --git a/arch/sh/kernel/cpu/shmobile/pm.c b/arch/sh/kernel/cpu/shmobile/pm.c
index ee3c2aaf66fb..ca029a44743c 100644
--- a/arch/sh/kernel/cpu/shmobile/pm.c
+++ b/arch/sh/kernel/cpu/shmobile/pm.c
@@ -15,6 +15,13 @@
15#include <linux/suspend.h> 15#include <linux/suspend.h>
16#include <asm/suspend.h> 16#include <asm/suspend.h>
17#include <asm/uaccess.h> 17#include <asm/uaccess.h>
18#include <asm/cacheflush.h>
19
20/*
21 * Notifier lists for pre/post sleep notification
22 */
23ATOMIC_NOTIFIER_HEAD(sh_mobile_pre_sleep_notifier_list);
24ATOMIC_NOTIFIER_HEAD(sh_mobile_post_sleep_notifier_list);
18 25
19/* 26/*
20 * Sleep modes available on SuperH Mobile: 27 * Sleep modes available on SuperH Mobile:
@@ -26,30 +33,105 @@
26#define SUSP_MODE_SLEEP (SUSP_SH_SLEEP) 33#define SUSP_MODE_SLEEP (SUSP_SH_SLEEP)
27#define SUSP_MODE_SLEEP_SF (SUSP_SH_SLEEP | SUSP_SH_SF) 34#define SUSP_MODE_SLEEP_SF (SUSP_SH_SLEEP | SUSP_SH_SF)
28#define SUSP_MODE_STANDBY_SF (SUSP_SH_STANDBY | SUSP_SH_SF) 35#define SUSP_MODE_STANDBY_SF (SUSP_SH_STANDBY | SUSP_SH_SF)
36#define SUSP_MODE_RSTANDBY (SUSP_SH_RSTANDBY | SUSP_SH_MMU | SUSP_SH_SF)
37 /*
38 * U-standby mode is unsupported since it needs bootloader hacks
39 */
29 40
30/* 41#ifdef CONFIG_CPU_SUBTYPE_SH7724
31 * The following modes are not there yet: 42#define RAM_BASE 0xfd800000 /* RSMEM */
32 * 43#else
33 * R-standby mode is unsupported, but will be added in the future 44#define RAM_BASE 0xe5200000 /* ILRAM */
34 * U-standby mode is low priority since it needs bootloader hacks 45#endif
35 */
36
37#define ILRAM_BASE 0xe5200000
38
39extern const unsigned char sh_mobile_standby[];
40extern const unsigned int sh_mobile_standby_size;
41 46
42void sh_mobile_call_standby(unsigned long mode) 47void sh_mobile_call_standby(unsigned long mode)
43{ 48{
44 void *onchip_mem = (void *)ILRAM_BASE; 49 void *onchip_mem = (void *)RAM_BASE;
45 void (*standby_onchip_mem)(unsigned long, unsigned long) = onchip_mem; 50 struct sh_sleep_data *sdp = onchip_mem;
51 void (*standby_onchip_mem)(unsigned long, unsigned long);
52
53 /* code located directly after data structure */
54 standby_onchip_mem = (void *)(sdp + 1);
55
56 atomic_notifier_call_chain(&sh_mobile_pre_sleep_notifier_list,
57 mode, NULL);
58
59 /* flush the caches if MMU flag is set */
60 if (mode & SUSP_SH_MMU)
61 flush_cache_all();
46 62
47 /* Let assembly snippet in on-chip memory handle the rest */ 63 /* Let assembly snippet in on-chip memory handle the rest */
48 standby_onchip_mem(mode, ILRAM_BASE); 64 standby_onchip_mem(mode, RAM_BASE);
65
66 atomic_notifier_call_chain(&sh_mobile_post_sleep_notifier_list,
67 mode, NULL);
68}
69
70extern char sh_mobile_sleep_enter_start;
71extern char sh_mobile_sleep_enter_end;
72
73extern char sh_mobile_sleep_resume_start;
74extern char sh_mobile_sleep_resume_end;
75
76unsigned long sh_mobile_sleep_supported = SUSP_SH_SLEEP;
77
78void sh_mobile_register_self_refresh(unsigned long flags,
79 void *pre_start, void *pre_end,
80 void *post_start, void *post_end)
81{
82 void *onchip_mem = (void *)RAM_BASE;
83 void *vp;
84 struct sh_sleep_data *sdp;
85 int n;
86
87 /* part 0: data area */
88 sdp = onchip_mem;
89 sdp->addr.stbcr = 0xa4150020; /* STBCR */
90 sdp->addr.bar = 0xa4150040; /* BAR */
91 sdp->addr.pteh = 0xff000000; /* PTEH */
92 sdp->addr.ptel = 0xff000004; /* PTEL */
93 sdp->addr.ttb = 0xff000008; /* TTB */
94 sdp->addr.tea = 0xff00000c; /* TEA */
95 sdp->addr.mmucr = 0xff000010; /* MMUCR */
96 sdp->addr.ptea = 0xff000034; /* PTEA */
97 sdp->addr.pascr = 0xff000070; /* PASCR */
98 sdp->addr.irmcr = 0xff000078; /* IRMCR */
99 sdp->addr.ccr = 0xff00001c; /* CCR */
100 sdp->addr.ramcr = 0xff000074; /* RAMCR */
101 vp = sdp + 1;
102
103 /* part 1: common code to enter sleep mode */
104 n = &sh_mobile_sleep_enter_end - &sh_mobile_sleep_enter_start;
105 memcpy(vp, &sh_mobile_sleep_enter_start, n);
106 vp += roundup(n, 4);
107
108 /* part 2: board specific code to enter self-refresh mode */
109 n = pre_end - pre_start;
110 memcpy(vp, pre_start, n);
111 sdp->sf_pre = (unsigned long)vp;
112 vp += roundup(n, 4);
113
114 /* part 3: board specific code to resume from self-refresh mode */
115 n = post_end - post_start;
116 memcpy(vp, post_start, n);
117 sdp->sf_post = (unsigned long)vp;
118 vp += roundup(n, 4);
119
120 /* part 4: common code to resume from sleep mode */
121 WARN_ON(vp > (onchip_mem + 0x600));
122 vp = onchip_mem + 0x600; /* located at interrupt vector */
123 n = &sh_mobile_sleep_resume_end - &sh_mobile_sleep_resume_start;
124 memcpy(vp, &sh_mobile_sleep_resume_start, n);
125 sdp->resume = (unsigned long)vp;
126
127 sh_mobile_sleep_supported |= flags;
49} 128}
50 129
51static int sh_pm_enter(suspend_state_t state) 130static int sh_pm_enter(suspend_state_t state)
52{ 131{
132 if (!(sh_mobile_sleep_supported & SUSP_MODE_STANDBY_SF))
133 return -ENXIO;
134
53 local_irq_disable(); 135 local_irq_disable();
54 set_bl_bit(); 136 set_bl_bit();
55 sh_mobile_call_standby(SUSP_MODE_STANDBY_SF); 137 sh_mobile_call_standby(SUSP_MODE_STANDBY_SF);
@@ -65,13 +147,6 @@ static struct platform_suspend_ops sh_pm_ops = {
65 147
66static int __init sh_pm_init(void) 148static int __init sh_pm_init(void)
67{ 149{
68 void *onchip_mem = (void *)ILRAM_BASE;
69
70 /* Copy the assembly snippet to the otherwise ununsed ILRAM */
71 memcpy(onchip_mem, sh_mobile_standby, sh_mobile_standby_size);
72 wmb();
73 ctrl_barrier();
74
75 suspend_set_ops(&sh_pm_ops); 150 suspend_set_ops(&sh_pm_ops);
76 sh_mobile_setup_cpuidle(); 151 sh_mobile_setup_cpuidle();
77 return 0; 152 return 0;
diff --git a/arch/sh/kernel/cpu/shmobile/pm_runtime.c b/arch/sh/kernel/cpu/shmobile/pm_runtime.c
index 7c615b17e209..6dcb8166a64d 100644
--- a/arch/sh/kernel/cpu/shmobile/pm_runtime.c
+++ b/arch/sh/kernel/cpu/shmobile/pm_runtime.c
@@ -45,12 +45,14 @@ static int __platform_pm_runtime_resume(struct platform_device *pdev)
45 45
46 dev_dbg(d, "__platform_pm_runtime_resume() [%d]\n", hwblk); 46 dev_dbg(d, "__platform_pm_runtime_resume() [%d]\n", hwblk);
47 47
48 if (d->driver && d->driver->pm && d->driver->pm->runtime_resume) { 48 if (d->driver) {
49 hwblk_enable(hwblk_info, hwblk); 49 hwblk_enable(hwblk_info, hwblk);
50 ret = 0; 50 ret = 0;
51 51
52 if (test_bit(PDEV_ARCHDATA_FLAG_SUSP, &ad->flags)) { 52 if (test_bit(PDEV_ARCHDATA_FLAG_SUSP, &ad->flags)) {
53 ret = d->driver->pm->runtime_resume(d); 53 if (d->driver->pm && d->driver->pm->runtime_resume)
54 ret = d->driver->pm->runtime_resume(d);
55
54 if (!ret) 56 if (!ret)
55 clear_bit(PDEV_ARCHDATA_FLAG_SUSP, &ad->flags); 57 clear_bit(PDEV_ARCHDATA_FLAG_SUSP, &ad->flags);
56 else 58 else
@@ -73,12 +75,15 @@ static int __platform_pm_runtime_suspend(struct platform_device *pdev)
73 75
74 dev_dbg(d, "__platform_pm_runtime_suspend() [%d]\n", hwblk); 76 dev_dbg(d, "__platform_pm_runtime_suspend() [%d]\n", hwblk);
75 77
76 if (d->driver && d->driver->pm && d->driver->pm->runtime_suspend) { 78 if (d->driver) {
77 BUG_ON(!test_bit(PDEV_ARCHDATA_FLAG_IDLE, &ad->flags)); 79 BUG_ON(!test_bit(PDEV_ARCHDATA_FLAG_IDLE, &ad->flags));
80 ret = 0;
78 81
79 hwblk_enable(hwblk_info, hwblk); 82 if (d->driver->pm && d->driver->pm->runtime_suspend) {
80 ret = d->driver->pm->runtime_suspend(d); 83 hwblk_enable(hwblk_info, hwblk);
81 hwblk_disable(hwblk_info, hwblk); 84 ret = d->driver->pm->runtime_suspend(d);
85 hwblk_disable(hwblk_info, hwblk);
86 }
82 87
83 if (!ret) { 88 if (!ret) {
84 set_bit(PDEV_ARCHDATA_FLAG_SUSP, &ad->flags); 89 set_bit(PDEV_ARCHDATA_FLAG_SUSP, &ad->flags);
diff --git a/arch/sh/kernel/cpu/shmobile/sleep.S b/arch/sh/kernel/cpu/shmobile/sleep.S
index a439e6c7824f..e9dd7fa0abd2 100644
--- a/arch/sh/kernel/cpu/shmobile/sleep.S
+++ b/arch/sh/kernel/cpu/shmobile/sleep.S
@@ -20,79 +20,103 @@
20 * Kernel mode register usage, see entry.S: 20 * Kernel mode register usage, see entry.S:
21 * k0 scratch 21 * k0 scratch
22 * k1 scratch 22 * k1 scratch
23 * k4 scratch
24 */ 23 */
25#define k0 r0 24#define k0 r0
26#define k1 r1 25#define k1 r1
27#define k4 r4
28 26
29/* manage self-refresh and enter standby mode. 27/* manage self-refresh and enter standby mode. must be self-contained.
30 * this code will be copied to on-chip memory and executed from there. 28 * this code will be copied to on-chip memory and executed from there.
31 */ 29 */
30 .balign 4
31ENTRY(sh_mobile_sleep_enter_start)
32 32
33 .balign 4096,0,4096 33 /* save mode flags */
34ENTRY(sh_mobile_standby) 34 mov.l r4, @(SH_SLEEP_MODE, r5)
35 35
36 /* save original vbr */ 36 /* save original vbr */
37 stc vbr, r1 37 stc vbr, r0
38 mova saved_vbr, r0 38 mov.l r0, @(SH_SLEEP_VBR, r5)
39 mov.l r1, @r0
40 39
41 /* point vbr to our on-chip memory page */ 40 /* point vbr to our on-chip memory page */
42 ldc r5, vbr 41 ldc r5, vbr
43 42
44 /* save return address */ 43 /* save return address */
45 mova saved_spc, r0 44 sts pr, r0
46 sts pr, r5 45 mov.l r0, @(SH_SLEEP_SPC, r5)
47 mov.l r5, @r0
48 46
49 /* save sr */ 47 /* save sr */
50 mova saved_sr, r0 48 stc sr, r0
51 stc sr, r5 49 mov.l r0, @(SH_SLEEP_SR, r5)
52 mov.l r5, @r0
53 50
54 /* save mode flags */ 51 /* save sp */
55 mova saved_mode, r0 52 mov.l r15, @(SH_SLEEP_SP, r5)
56 mov.l r4, @r0 53
54 /* save stbcr */
55 bsr save_register
56 mov #SH_SLEEP_REG_STBCR, r0
57
58 /* save mmu and cache context if needed */
59 mov.l @(SH_SLEEP_MODE, r5), r0
60 tst #SUSP_SH_MMU, r0
61 bt skip_mmu_save_disable
62
63 /* save mmu state */
64 bsr save_register
65 mov #SH_SLEEP_REG_PTEH, r0
66
67 bsr save_register
68 mov #SH_SLEEP_REG_PTEL, r0
69
70 bsr save_register
71 mov #SH_SLEEP_REG_TTB, r0
72
73 bsr save_register
74 mov #SH_SLEEP_REG_TEA, r0
75
76 bsr save_register
77 mov #SH_SLEEP_REG_MMUCR, r0
78
79 bsr save_register
80 mov #SH_SLEEP_REG_PTEA, r0
81
82 bsr save_register
83 mov #SH_SLEEP_REG_PASCR, r0
57 84
58 /* put mode flags in r0 */ 85 bsr save_register
59 mov r4, r0 86 mov #SH_SLEEP_REG_IRMCR, r0
60 87
88 /* invalidate TLBs and disable the MMU */
89 bsr get_register
90 mov #SH_SLEEP_REG_MMUCR, r0
91 mov #4, r1
92 mov.l r1, @r0
93 icbi @r0
94
95 /* save cache registers and disable caches */
96 bsr save_register
97 mov #SH_SLEEP_REG_CCR, r0
98
99 bsr save_register
100 mov #SH_SLEEP_REG_RAMCR, r0
101
102 bsr get_register
103 mov #SH_SLEEP_REG_CCR, r0
104 mov #0, r1
105 mov.l r1, @r0
106 icbi @r0
107
108skip_mmu_save_disable:
109 /* call self-refresh entering code if needed */
110 mov.l @(SH_SLEEP_MODE, r5), r0
61 tst #SUSP_SH_SF, r0 111 tst #SUSP_SH_SF, r0
62 bt skip_set_sf 112 bt skip_set_sf
63#ifdef CONFIG_CPU_SUBTYPE_SH7724 113
64 /* DBSC: put memory in self-refresh mode */ 114 mov.l @(SH_SLEEP_SF_PRE, r5), r0
65 mov.l dben_reg, r4 115 jsr @r0
66 mov.l dben_data0, r1 116 nop
67 mov.l r1, @r4
68
69 mov.l dbrfpdn0_reg, r4
70 mov.l dbrfpdn0_data0, r1
71 mov.l r1, @r4
72
73 mov.l dbcmdcnt_reg, r4
74 mov.l dbcmdcnt_data0, r1
75 mov.l r1, @r4
76
77 mov.l dbcmdcnt_reg, r4
78 mov.l dbcmdcnt_data1, r1
79 mov.l r1, @r4
80
81 mov.l dbrfpdn0_reg, r4
82 mov.l dbrfpdn0_data1, r1
83 mov.l r1, @r4
84#else
85 /* SBSC: disable power down and put in self-refresh mode */
86 mov.l 1f, r4
87 mov.l 2f, r1
88 mov.l @r4, r2
89 or r1, r2
90 mov.l 3f, r3
91 and r3, r2
92 mov.l r2, @r4
93#endif
94 117
95skip_set_sf: 118skip_set_sf:
119 mov.l @(SH_SLEEP_MODE, r5), r0
96 tst #SUSP_SH_STANDBY, r0 120 tst #SUSP_SH_STANDBY, r0
97 bt test_rstandby 121 bt test_rstandby
98 122
@@ -104,6 +128,12 @@ test_rstandby:
104 tst #SUSP_SH_RSTANDBY, r0 128 tst #SUSP_SH_RSTANDBY, r0
105 bt test_ustandby 129 bt test_ustandby
106 130
131 /* setup BAR register */
132 bsr get_register
133 mov #SH_SLEEP_REG_BAR, r0
134 mov.l @(SH_SLEEP_RESUME, r5), r1
135 mov.l r1, @r0
136
107 /* set mode to "r-standby mode" */ 137 /* set mode to "r-standby mode" */
108 bra do_sleep 138 bra do_sleep
109 mov #0x20, r1 139 mov #0x20, r1
@@ -123,124 +153,136 @@ force_sleep:
123 153
124do_sleep: 154do_sleep:
125 /* setup and enter selected standby mode */ 155 /* setup and enter selected standby mode */
126 mov.l 5f, r4 156 bsr get_register
127 mov.l r1, @r4 157 mov #SH_SLEEP_REG_STBCR, r0
158 mov.l r1, @r0
128again: 159again:
129 sleep 160 sleep
130 bra again 161 bra again
131 nop 162 nop
132 163
133restore_jump_vbr: 164save_register:
165 add #SH_SLEEP_BASE_ADDR, r0
166 mov.l @(r0, r5), r1
167 add #-SH_SLEEP_BASE_ADDR, r0
168 mov.l @r1, r1
169 add #SH_SLEEP_BASE_DATA, r0
170 mov.l r1, @(r0, r5)
171 add #-SH_SLEEP_BASE_DATA, r0
172 rts
173 nop
174
175get_register:
176 add #SH_SLEEP_BASE_ADDR, r0
177 mov.l @(r0, r5), r0
178 rts
179 nop
180ENTRY(sh_mobile_sleep_enter_end)
181
182 .balign 4
183ENTRY(sh_mobile_sleep_resume_start)
184
185 /* figure out start address */
186 bsr 0f
187 nop
1880:
189 sts pr, k1
190 mov.l 1f, k0
191 and k0, k1
192
193 /* store pointer to data area in VBR */
194 ldc k1, vbr
195
196 /* setup sr with saved sr */
197 mov.l @(SH_SLEEP_SR, k1), k0
198 ldc k0, sr
199
200 /* now: user register set! */
201 stc vbr, r5
202
134 /* setup spc with return address to c code */ 203 /* setup spc with return address to c code */
135 mov.l saved_spc, k0 204 mov.l @(SH_SLEEP_SPC, r5), r0
136 ldc k0, spc 205 ldc r0, spc
137 206
138 /* restore vbr */ 207 /* restore vbr */
139 mov.l saved_vbr, k0 208 mov.l @(SH_SLEEP_VBR, r5), r0
140 ldc k0, vbr 209 ldc r0, vbr
141 210
142 /* setup ssr with saved sr */ 211 /* setup ssr with saved sr */
143 mov.l saved_sr, k0 212 mov.l @(SH_SLEEP_SR, r5), r0
144 ldc k0, ssr 213 ldc r0, ssr
145 214
146 /* get mode flags */ 215 /* restore sp */
147 mov.l saved_mode, k0 216 mov.l @(SH_SLEEP_SP, r5), r15
148 217
149done_sleep: 218 /* restore sleep mode register */
150 /* reset standby mode to sleep mode */ 219 bsr restore_register
151 mov.l 5f, k4 220 mov #SH_SLEEP_REG_STBCR, r0
152 mov #0x00, k1
153 mov.l k1, @k4
154 221
155 tst #SUSP_SH_SF, k0 222 /* call self-refresh resume code if needed */
223 mov.l @(SH_SLEEP_MODE, r5), r0
224 tst #SUSP_SH_SF, r0
156 bt skip_restore_sf 225 bt skip_restore_sf
157 226
158#ifdef CONFIG_CPU_SUBTYPE_SH7724 227 mov.l @(SH_SLEEP_SF_POST, r5), r0
159 /* DBSC: put memory in auto-refresh mode */ 228 jsr @r0
160 mov.l dbrfpdn0_reg, k4 229 nop
161 mov.l dbrfpdn0_data0, k1 230
162 mov.l k1, @k4
163
164 nop /* sleep 140 ns */
165 nop
166 nop
167 nop
168
169 mov.l dbcmdcnt_reg, k4
170 mov.l dbcmdcnt_data0, k1
171 mov.l k1, @k4
172
173 mov.l dbcmdcnt_reg, k4
174 mov.l dbcmdcnt_data1, k1
175 mov.l k1, @k4
176
177 mov.l dben_reg, k4
178 mov.l dben_data1, k1
179 mov.l k1, @k4
180
181 mov.l dbrfpdn0_reg, k4
182 mov.l dbrfpdn0_data2, k1
183 mov.l k1, @k4
184#else
185 /* SBSC: set auto-refresh mode */
186 mov.l 1f, k4
187 mov.l @k4, k0
188 mov.l 4f, k1
189 and k1, k0
190 mov.l k0, @k4
191 mov.l 6f, k4
192 mov.l 8f, k0
193 mov.l @k4, k1
194 mov #-1, k4
195 add k4, k1
196 or k1, k0
197 mov.l 7f, k1
198 mov.l k0, @k1
199#endif
200skip_restore_sf: 231skip_restore_sf:
201 /* jump to vbr vector */ 232 /* restore mmu and cache state if needed */
202 mov.l saved_vbr, k0 233 mov.l @(SH_SLEEP_MODE, r5), r0
203 mov.l offset_vbr, k4 234 tst #SUSP_SH_MMU, r0
204 add k4, k0 235 bt skip_restore_mmu
205 jmp @k0 236
237 /* restore mmu state */
238 bsr restore_register
239 mov #SH_SLEEP_REG_PTEH, r0
240
241 bsr restore_register
242 mov #SH_SLEEP_REG_PTEL, r0
243
244 bsr restore_register
245 mov #SH_SLEEP_REG_TTB, r0
246
247 bsr restore_register
248 mov #SH_SLEEP_REG_TEA, r0
249
250 bsr restore_register
251 mov #SH_SLEEP_REG_PTEA, r0
252
253 bsr restore_register
254 mov #SH_SLEEP_REG_PASCR, r0
255
256 bsr restore_register
257 mov #SH_SLEEP_REG_IRMCR, r0
258
259 bsr restore_register
260 mov #SH_SLEEP_REG_MMUCR, r0
261 icbi @r0
262
263 /* restore cache settings */
264 bsr restore_register
265 mov #SH_SLEEP_REG_RAMCR, r0
266 icbi @r0
267
268 bsr restore_register
269 mov #SH_SLEEP_REG_CCR, r0
270 icbi @r0
271
272skip_restore_mmu:
273 rte
206 nop 274 nop
207 275
208 .balign 4 276restore_register:
209saved_mode: .long 0 277 add #SH_SLEEP_BASE_DATA, r0
210saved_spc: .long 0 278 mov.l @(r0, r5), r1
211saved_sr: .long 0 279 add #-SH_SLEEP_BASE_DATA, r0
212saved_vbr: .long 0 280 add #SH_SLEEP_BASE_ADDR, r0
213offset_vbr: .long 0x600 281 mov.l @(r0, r5), r0
214#ifdef CONFIG_CPU_SUBTYPE_SH7724 282 mov.l r1, @r0
215dben_reg: .long 0xfd000010 /* DBEN */ 283 rts
216dben_data0: .long 0
217dben_data1: .long 1
218dbrfpdn0_reg: .long 0xfd000040 /* DBRFPDN0 */
219dbrfpdn0_data0: .long 0
220dbrfpdn0_data1: .long 1
221dbrfpdn0_data2: .long 0x00010000
222dbcmdcnt_reg: .long 0xfd000014 /* DBCMDCNT */
223dbcmdcnt_data0: .long 2
224dbcmdcnt_data1: .long 4
225#else
2261: .long 0xfe400008 /* SDCR0 */
2272: .long 0x00000400
2283: .long 0xffff7fff
2294: .long 0xfffffbff
230#endif
2315: .long 0xa4150020 /* STBCR */
2326: .long 0xfe40001c /* RTCOR */
2337: .long 0xfe400018 /* RTCNT */
2348: .long 0xa55a0000
235
236
237/* interrupt vector @ 0x600 */
238 .balign 0x400,0,0x400
239 .long 0xdeadbeef
240 .balign 0x200,0,0x200
241 bra restore_jump_vbr
242 nop 284 nop
243sh_mobile_standby_end:
244 285
245ENTRY(sh_mobile_standby_size) 286 .balign 4
246 .long sh_mobile_standby_end - sh_mobile_standby 2871: .long ~0x7ff
288ENTRY(sh_mobile_sleep_resume_end)
diff --git a/arch/sh/kernel/cpu/ubc.S b/arch/sh/kernel/cpu/ubc.S
deleted file mode 100644
index 81923079fa12..000000000000
--- a/arch/sh/kernel/cpu/ubc.S
+++ /dev/null
@@ -1,59 +0,0 @@
1/*
2 * arch/sh/kernel/cpu/ubc.S
3 *
4 * Set of management routines for the User Break Controller (UBC)
5 *
6 * Copyright (C) 2002 Paul Mundt
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License as published by the
10 * Free Software Foundation; either version 2 of the License, or (at your
11 * option) any later version.
12 */
13#include <linux/linkage.h>
14#include <asm/ubc.h>
15
16#define STBCR2 0xffc00010
17
18ENTRY(ubc_sleep)
19 mov #0, r0
20
21 mov.l 1f, r1 ! Zero out UBC_BBRA ..
22 mov.w r0, @r1
23
24 mov.l 2f, r1 ! .. same for BBRB ..
25 mov.w r0, @r1
26
27 mov.l 3f, r1 ! .. and again for BRCR.
28 mov.w r0, @r1
29
30 mov.w @r1, r0 ! Dummy read BRCR
31
32 mov.l 4f, r1 ! Set MSTP5 in STBCR2
33 mov.b @r1, r0
34 or #0x01, r0
35 mov.b r0, @r1
36
37 mov.b @r1, r0 ! Two dummy reads ..
38 mov.b @r1, r0
39
40 rts
41 nop
42
43ENTRY(ubc_wakeup)
44 mov.l 4f, r1 ! Clear MSTP5
45 mov.b @r1, r0
46 and #0xfe, r0
47 mov.b r0, @r1
48
49 mov.b @r1, r0 ! Two more dummy reads ..
50 mov.b @r1, r0
51
52 rts
53 nop
54
551: .long UBC_BBRA
562: .long UBC_BBRB
573: .long UBC_BRCR
584: .long STBCR2
59
diff --git a/arch/sh/kernel/dma-nommu.c b/arch/sh/kernel/dma-nommu.c
new file mode 100644
index 000000000000..3c55b87f8b63
--- /dev/null
+++ b/arch/sh/kernel/dma-nommu.c
@@ -0,0 +1,82 @@
1/*
2 * DMA mapping support for platforms lacking IOMMUs.
3 *
4 * Copyright (C) 2009 Paul Mundt
5 *
6 * This file is subject to the terms and conditions of the GNU General Public
7 * License. See the file "COPYING" in the main directory of this archive
8 * for more details.
9 */
10#include <linux/dma-mapping.h>
11#include <linux/io.h>
12
13static dma_addr_t nommu_map_page(struct device *dev, struct page *page,
14 unsigned long offset, size_t size,
15 enum dma_data_direction dir,
16 struct dma_attrs *attrs)
17{
18 dma_addr_t addr = page_to_phys(page) + offset;
19
20 WARN_ON(size == 0);
21 dma_cache_sync(dev, page_address(page) + offset, size, dir);
22
23 return addr;
24}
25
26static int nommu_map_sg(struct device *dev, struct scatterlist *sg,
27 int nents, enum dma_data_direction dir,
28 struct dma_attrs *attrs)
29{
30 struct scatterlist *s;
31 int i;
32
33 WARN_ON(nents == 0 || sg[0].length == 0);
34
35 for_each_sg(sg, s, nents, i) {
36 BUG_ON(!sg_page(s));
37
38 dma_cache_sync(dev, sg_virt(s), s->length, dir);
39
40 s->dma_address = sg_phys(s);
41 s->dma_length = s->length;
42 }
43
44 return nents;
45}
46
47#ifdef CONFIG_DMA_NONCOHERENT
48static void nommu_sync_single(struct device *dev, dma_addr_t addr,
49 size_t size, enum dma_data_direction dir)
50{
51 dma_cache_sync(dev, phys_to_virt(addr), size, dir);
52}
53
54static void nommu_sync_sg(struct device *dev, struct scatterlist *sg,
55 int nelems, enum dma_data_direction dir)
56{
57 struct scatterlist *s;
58 int i;
59
60 for_each_sg(sg, s, nelems, i)
61 dma_cache_sync(dev, sg_virt(s), s->length, dir);
62}
63#endif
64
65struct dma_map_ops nommu_dma_ops = {
66 .alloc_coherent = dma_generic_alloc_coherent,
67 .free_coherent = dma_generic_free_coherent,
68 .map_page = nommu_map_page,
69 .map_sg = nommu_map_sg,
70#ifdef CONFIG_DMA_NONCOHERENT
71 .sync_single_for_device = nommu_sync_single,
72 .sync_sg_for_device = nommu_sync_sg,
73#endif
74 .is_phys = 1,
75};
76
77void __init no_iommu_init(void)
78{
79 if (dma_ops)
80 return;
81 dma_ops = &nommu_dma_ops;
82}
diff --git a/arch/sh/kernel/dwarf.c b/arch/sh/kernel/dwarf.c
index d76a23170dbb..3576b709f052 100644
--- a/arch/sh/kernel/dwarf.c
+++ b/arch/sh/kernel/dwarf.c
@@ -20,6 +20,7 @@
20#include <linux/list.h> 20#include <linux/list.h>
21#include <linux/mempool.h> 21#include <linux/mempool.h>
22#include <linux/mm.h> 22#include <linux/mm.h>
23#include <linux/elf.h>
23#include <linux/ftrace.h> 24#include <linux/ftrace.h>
24#include <asm/dwarf.h> 25#include <asm/dwarf.h>
25#include <asm/unwinder.h> 26#include <asm/unwinder.h>
@@ -530,7 +531,18 @@ static int dwarf_cfa_execute_insns(unsigned char *insn_start,
530} 531}
531 532
532/** 533/**
533 * dwarf_unwind_stack - recursively unwind the stack 534 * dwarf_free_frame - free the memory allocated for @frame
535 * @frame: the frame to free
536 */
537void dwarf_free_frame(struct dwarf_frame *frame)
538{
539 dwarf_frame_free_regs(frame);
540 mempool_free(frame, dwarf_frame_pool);
541}
542
543/**
544 * dwarf_unwind_stack - unwind the stack
545 *
534 * @pc: address of the function to unwind 546 * @pc: address of the function to unwind
535 * @prev: struct dwarf_frame of the previous stackframe on the callstack 547 * @prev: struct dwarf_frame of the previous stackframe on the callstack
536 * 548 *
@@ -548,9 +560,9 @@ struct dwarf_frame * dwarf_unwind_stack(unsigned long pc,
548 unsigned long addr; 560 unsigned long addr;
549 561
550 /* 562 /*
551 * If this is the first invocation of this recursive function we 563 * If we're starting at the top of the stack we need get the
552 * need get the contents of a physical register to get the CFA 564 * contents of a physical register to get the CFA in order to
553 * in order to begin the virtual unwinding of the stack. 565 * begin the virtual unwinding of the stack.
554 * 566 *
555 * NOTE: the return address is guaranteed to be setup by the 567 * NOTE: the return address is guaranteed to be setup by the
556 * time this function makes its first function call. 568 * time this function makes its first function call.
@@ -593,9 +605,8 @@ struct dwarf_frame * dwarf_unwind_stack(unsigned long pc,
593 fde = dwarf_lookup_fde(pc); 605 fde = dwarf_lookup_fde(pc);
594 if (!fde) { 606 if (!fde) {
595 /* 607 /*
596 * This is our normal exit path - the one that stops the 608 * This is our normal exit path. There are two reasons
597 * recursion. There's two reasons why we might exit 609 * why we might exit here,
598 * here,
599 * 610 *
600 * a) pc has no asscociated DWARF frame info and so 611 * a) pc has no asscociated DWARF frame info and so
601 * we don't know how to unwind this frame. This is 612 * we don't know how to unwind this frame. This is
@@ -637,10 +648,10 @@ struct dwarf_frame * dwarf_unwind_stack(unsigned long pc,
637 648
638 } else { 649 } else {
639 /* 650 /*
640 * Again, this is the first invocation of this 651 * Again, we're starting from the top of the
641 * recurisve function. We need to physically 652 * stack. We need to physically read
642 * read the contents of a register in order to 653 * the contents of a register in order to get
643 * get the Canonical Frame Address for this 654 * the Canonical Frame Address for this
644 * function. 655 * function.
645 */ 656 */
646 frame->cfa = dwarf_read_arch_reg(frame->cfa_register); 657 frame->cfa = dwarf_read_arch_reg(frame->cfa_register);
@@ -670,13 +681,12 @@ struct dwarf_frame * dwarf_unwind_stack(unsigned long pc,
670 return frame; 681 return frame;
671 682
672bail: 683bail:
673 dwarf_frame_free_regs(frame); 684 dwarf_free_frame(frame);
674 mempool_free(frame, dwarf_frame_pool);
675 return NULL; 685 return NULL;
676} 686}
677 687
678static int dwarf_parse_cie(void *entry, void *p, unsigned long len, 688static int dwarf_parse_cie(void *entry, void *p, unsigned long len,
679 unsigned char *end) 689 unsigned char *end, struct module *mod)
680{ 690{
681 struct dwarf_cie *cie; 691 struct dwarf_cie *cie;
682 unsigned long flags; 692 unsigned long flags;
@@ -772,6 +782,8 @@ static int dwarf_parse_cie(void *entry, void *p, unsigned long len,
772 cie->initial_instructions = p; 782 cie->initial_instructions = p;
773 cie->instructions_end = end; 783 cie->instructions_end = end;
774 784
785 cie->mod = mod;
786
775 /* Add to list */ 787 /* Add to list */
776 spin_lock_irqsave(&dwarf_cie_lock, flags); 788 spin_lock_irqsave(&dwarf_cie_lock, flags);
777 list_add_tail(&cie->link, &dwarf_cie_list); 789 list_add_tail(&cie->link, &dwarf_cie_list);
@@ -782,7 +794,7 @@ static int dwarf_parse_cie(void *entry, void *p, unsigned long len,
782 794
783static int dwarf_parse_fde(void *entry, u32 entry_type, 795static int dwarf_parse_fde(void *entry, u32 entry_type,
784 void *start, unsigned long len, 796 void *start, unsigned long len,
785 unsigned char *end) 797 unsigned char *end, struct module *mod)
786{ 798{
787 struct dwarf_fde *fde; 799 struct dwarf_fde *fde;
788 struct dwarf_cie *cie; 800 struct dwarf_cie *cie;
@@ -831,6 +843,8 @@ static int dwarf_parse_fde(void *entry, u32 entry_type,
831 fde->instructions = p; 843 fde->instructions = p;
832 fde->end = end; 844 fde->end = end;
833 845
846 fde->mod = mod;
847
834 /* Add to list. */ 848 /* Add to list. */
835 spin_lock_irqsave(&dwarf_fde_lock, flags); 849 spin_lock_irqsave(&dwarf_fde_lock, flags);
836 list_add_tail(&fde->link, &dwarf_fde_list); 850 list_add_tail(&fde->link, &dwarf_fde_list);
@@ -854,10 +868,8 @@ static void dwarf_unwinder_dump(struct task_struct *task,
854 while (1) { 868 while (1) {
855 frame = dwarf_unwind_stack(return_addr, _frame); 869 frame = dwarf_unwind_stack(return_addr, _frame);
856 870
857 if (_frame) { 871 if (_frame)
858 dwarf_frame_free_regs(_frame); 872 dwarf_free_frame(_frame);
859 mempool_free(_frame, dwarf_frame_pool);
860 }
861 873
862 _frame = frame; 874 _frame = frame;
863 875
@@ -867,6 +879,9 @@ static void dwarf_unwinder_dump(struct task_struct *task,
867 return_addr = frame->return_addr; 879 return_addr = frame->return_addr;
868 ops->address(data, return_addr, 1); 880 ops->address(data, return_addr, 1);
869 } 881 }
882
883 if (frame)
884 dwarf_free_frame(frame);
870} 885}
871 886
872static struct unwinder dwarf_unwinder = { 887static struct unwinder dwarf_unwinder = {
@@ -896,48 +911,28 @@ static void dwarf_unwinder_cleanup(void)
896} 911}
897 912
898/** 913/**
899 * dwarf_unwinder_init - initialise the dwarf unwinder 914 * dwarf_parse_section - parse DWARF section
915 * @eh_frame_start: start address of the .eh_frame section
916 * @eh_frame_end: end address of the .eh_frame section
917 * @mod: the kernel module containing the .eh_frame section
900 * 918 *
901 * Build the data structures describing the .dwarf_frame section to 919 * Parse the information in a .eh_frame section.
902 * make it easier to lookup CIE and FDE entries. Because the
903 * .eh_frame section is packed as tightly as possible it is not
904 * easy to lookup the FDE for a given PC, so we build a list of FDE
905 * and CIE entries that make it easier.
906 */ 920 */
907static int __init dwarf_unwinder_init(void) 921static int dwarf_parse_section(char *eh_frame_start, char *eh_frame_end,
922 struct module *mod)
908{ 923{
909 u32 entry_type; 924 u32 entry_type;
910 void *p, *entry; 925 void *p, *entry;
911 int count, err = 0; 926 int count, err = 0;
912 unsigned long len; 927 unsigned long len = 0;
913 unsigned int c_entries, f_entries; 928 unsigned int c_entries, f_entries;
914 unsigned char *end; 929 unsigned char *end;
915 INIT_LIST_HEAD(&dwarf_cie_list);
916 INIT_LIST_HEAD(&dwarf_fde_list);
917 930
918 c_entries = 0; 931 c_entries = 0;
919 f_entries = 0; 932 f_entries = 0;
920 entry = &__start_eh_frame; 933 entry = eh_frame_start;
921
922 dwarf_frame_cachep = kmem_cache_create("dwarf_frames",
923 sizeof(struct dwarf_frame), 0,
924 SLAB_PANIC | SLAB_HWCACHE_ALIGN | SLAB_NOTRACK, NULL);
925
926 dwarf_reg_cachep = kmem_cache_create("dwarf_regs",
927 sizeof(struct dwarf_reg), 0,
928 SLAB_PANIC | SLAB_HWCACHE_ALIGN | SLAB_NOTRACK, NULL);
929 934
930 dwarf_frame_pool = mempool_create(DWARF_FRAME_MIN_REQ, 935 while ((char *)entry < eh_frame_end) {
931 mempool_alloc_slab,
932 mempool_free_slab,
933 dwarf_frame_cachep);
934
935 dwarf_reg_pool = mempool_create(DWARF_REG_MIN_REQ,
936 mempool_alloc_slab,
937 mempool_free_slab,
938 dwarf_reg_cachep);
939
940 while ((char *)entry < __stop_eh_frame) {
941 p = entry; 936 p = entry;
942 937
943 count = dwarf_entry_len(p, &len); 938 count = dwarf_entry_len(p, &len);
@@ -949,6 +944,7 @@ static int __init dwarf_unwinder_init(void)
949 * entry and move to the next one because 'len' 944 * entry and move to the next one because 'len'
950 * tells us where our next entry is. 945 * tells us where our next entry is.
951 */ 946 */
947 err = -EINVAL;
952 goto out; 948 goto out;
953 } else 949 } else
954 p += count; 950 p += count;
@@ -960,13 +956,14 @@ static int __init dwarf_unwinder_init(void)
960 p += 4; 956 p += 4;
961 957
962 if (entry_type == DW_EH_FRAME_CIE) { 958 if (entry_type == DW_EH_FRAME_CIE) {
963 err = dwarf_parse_cie(entry, p, len, end); 959 err = dwarf_parse_cie(entry, p, len, end, mod);
964 if (err < 0) 960 if (err < 0)
965 goto out; 961 goto out;
966 else 962 else
967 c_entries++; 963 c_entries++;
968 } else { 964 } else {
969 err = dwarf_parse_fde(entry, entry_type, p, len, end); 965 err = dwarf_parse_fde(entry, entry_type, p, len,
966 end, mod);
970 if (err < 0) 967 if (err < 0)
971 goto out; 968 goto out;
972 else 969 else
@@ -979,6 +976,129 @@ static int __init dwarf_unwinder_init(void)
979 printk(KERN_INFO "DWARF unwinder initialised: read %u CIEs, %u FDEs\n", 976 printk(KERN_INFO "DWARF unwinder initialised: read %u CIEs, %u FDEs\n",
980 c_entries, f_entries); 977 c_entries, f_entries);
981 978
979 return 0;
980
981out:
982 return err;
983}
984
985#ifdef CONFIG_MODULES
986int module_dwarf_finalize(const Elf_Ehdr *hdr, const Elf_Shdr *sechdrs,
987 struct module *me)
988{
989 unsigned int i, err;
990 unsigned long start, end;
991 char *secstrings = (void *)hdr + sechdrs[hdr->e_shstrndx].sh_offset;
992
993 start = end = 0;
994
995 for (i = 1; i < hdr->e_shnum; i++) {
996 /* Alloc bit cleared means "ignore it." */
997 if ((sechdrs[i].sh_flags & SHF_ALLOC)
998 && !strcmp(secstrings+sechdrs[i].sh_name, ".eh_frame")) {
999 start = sechdrs[i].sh_addr;
1000 end = start + sechdrs[i].sh_size;
1001 break;
1002 }
1003 }
1004
1005 /* Did we find the .eh_frame section? */
1006 if (i != hdr->e_shnum) {
1007 err = dwarf_parse_section((char *)start, (char *)end, me);
1008 if (err) {
1009 printk(KERN_WARNING "%s: failed to parse DWARF info\n",
1010 me->name);
1011 return err;
1012 }
1013 }
1014
1015 return 0;
1016}
1017
1018/**
1019 * module_dwarf_cleanup - remove FDE/CIEs associated with @mod
1020 * @mod: the module that is being unloaded
1021 *
1022 * Remove any FDEs and CIEs from the global lists that came from
1023 * @mod's .eh_frame section because @mod is being unloaded.
1024 */
1025void module_dwarf_cleanup(struct module *mod)
1026{
1027 struct dwarf_fde *fde;
1028 struct dwarf_cie *cie;
1029 unsigned long flags;
1030
1031 spin_lock_irqsave(&dwarf_cie_lock, flags);
1032
1033again_cie:
1034 list_for_each_entry(cie, &dwarf_cie_list, link) {
1035 if (cie->mod == mod)
1036 break;
1037 }
1038
1039 if (&cie->link != &dwarf_cie_list) {
1040 list_del(&cie->link);
1041 kfree(cie);
1042 goto again_cie;
1043 }
1044
1045 spin_unlock_irqrestore(&dwarf_cie_lock, flags);
1046
1047 spin_lock_irqsave(&dwarf_fde_lock, flags);
1048
1049again_fde:
1050 list_for_each_entry(fde, &dwarf_fde_list, link) {
1051 if (fde->mod == mod)
1052 break;
1053 }
1054
1055 if (&fde->link != &dwarf_fde_list) {
1056 list_del(&fde->link);
1057 kfree(fde);
1058 goto again_fde;
1059 }
1060
1061 spin_unlock_irqrestore(&dwarf_fde_lock, flags);
1062}
1063#endif /* CONFIG_MODULES */
1064
1065/**
1066 * dwarf_unwinder_init - initialise the dwarf unwinder
1067 *
1068 * Build the data structures describing the .dwarf_frame section to
1069 * make it easier to lookup CIE and FDE entries. Because the
1070 * .eh_frame section is packed as tightly as possible it is not
1071 * easy to lookup the FDE for a given PC, so we build a list of FDE
1072 * and CIE entries that make it easier.
1073 */
1074static int __init dwarf_unwinder_init(void)
1075{
1076 int err;
1077 INIT_LIST_HEAD(&dwarf_cie_list);
1078 INIT_LIST_HEAD(&dwarf_fde_list);
1079
1080 dwarf_frame_cachep = kmem_cache_create("dwarf_frames",
1081 sizeof(struct dwarf_frame), 0,
1082 SLAB_PANIC | SLAB_HWCACHE_ALIGN | SLAB_NOTRACK, NULL);
1083
1084 dwarf_reg_cachep = kmem_cache_create("dwarf_regs",
1085 sizeof(struct dwarf_reg), 0,
1086 SLAB_PANIC | SLAB_HWCACHE_ALIGN | SLAB_NOTRACK, NULL);
1087
1088 dwarf_frame_pool = mempool_create(DWARF_FRAME_MIN_REQ,
1089 mempool_alloc_slab,
1090 mempool_free_slab,
1091 dwarf_frame_cachep);
1092
1093 dwarf_reg_pool = mempool_create(DWARF_REG_MIN_REQ,
1094 mempool_alloc_slab,
1095 mempool_free_slab,
1096 dwarf_reg_cachep);
1097
1098 err = dwarf_parse_section(__start_eh_frame, __stop_eh_frame, NULL);
1099 if (err)
1100 goto out;
1101
982 err = unwinder_register(&dwarf_unwinder); 1102 err = unwinder_register(&dwarf_unwinder);
983 if (err) 1103 if (err)
984 goto out; 1104 goto out;
diff --git a/arch/sh/kernel/entry-common.S b/arch/sh/kernel/entry-common.S
index 3eb84931d2aa..f0abd58c3a69 100644
--- a/arch/sh/kernel/entry-common.S
+++ b/arch/sh/kernel/entry-common.S
@@ -133,7 +133,7 @@ work_pending:
133 ! r8: current_thread_info 133 ! r8: current_thread_info
134 ! t: result of "tst #_TIF_NEED_RESCHED, r0" 134 ! t: result of "tst #_TIF_NEED_RESCHED, r0"
135 bf/s work_resched 135 bf/s work_resched
136 tst #(_TIF_SIGPENDING | _TIF_RESTORE_SIGMASK), r0 136 tst #_TIF_SIGPENDING, r0
137work_notifysig: 137work_notifysig:
138 bt/s __restore_all 138 bt/s __restore_all
139 mov r15, r4 139 mov r15, r4
diff --git a/arch/sh/kernel/ftrace.c b/arch/sh/kernel/ftrace.c
index 2c48e267256e..b6f41c109beb 100644
--- a/arch/sh/kernel/ftrace.c
+++ b/arch/sh/kernel/ftrace.c
@@ -62,6 +62,150 @@ static unsigned char *ftrace_call_replace(unsigned long ip, unsigned long addr)
62 return ftrace_replaced_code; 62 return ftrace_replaced_code;
63} 63}
64 64
65/*
66 * Modifying code must take extra care. On an SMP machine, if
67 * the code being modified is also being executed on another CPU
68 * that CPU will have undefined results and possibly take a GPF.
69 * We use kstop_machine to stop other CPUS from exectuing code.
70 * But this does not stop NMIs from happening. We still need
71 * to protect against that. We separate out the modification of
72 * the code to take care of this.
73 *
74 * Two buffers are added: An IP buffer and a "code" buffer.
75 *
76 * 1) Put the instruction pointer into the IP buffer
77 * and the new code into the "code" buffer.
78 * 2) Wait for any running NMIs to finish and set a flag that says
79 * we are modifying code, it is done in an atomic operation.
80 * 3) Write the code
81 * 4) clear the flag.
82 * 5) Wait for any running NMIs to finish.
83 *
84 * If an NMI is executed, the first thing it does is to call
85 * "ftrace_nmi_enter". This will check if the flag is set to write
86 * and if it is, it will write what is in the IP and "code" buffers.
87 *
88 * The trick is, it does not matter if everyone is writing the same
89 * content to the code location. Also, if a CPU is executing code
90 * it is OK to write to that code location if the contents being written
91 * are the same as what exists.
92 */
93#define MOD_CODE_WRITE_FLAG (1 << 31) /* set when NMI should do the write */
94static atomic_t nmi_running = ATOMIC_INIT(0);
95static int mod_code_status; /* holds return value of text write */
96static void *mod_code_ip; /* holds the IP to write to */
97static void *mod_code_newcode; /* holds the text to write to the IP */
98
99static unsigned nmi_wait_count;
100static atomic_t nmi_update_count = ATOMIC_INIT(0);
101
102int ftrace_arch_read_dyn_info(char *buf, int size)
103{
104 int r;
105
106 r = snprintf(buf, size, "%u %u",
107 nmi_wait_count,
108 atomic_read(&nmi_update_count));
109 return r;
110}
111
112static void clear_mod_flag(void)
113{
114 int old = atomic_read(&nmi_running);
115
116 for (;;) {
117 int new = old & ~MOD_CODE_WRITE_FLAG;
118
119 if (old == new)
120 break;
121
122 old = atomic_cmpxchg(&nmi_running, old, new);
123 }
124}
125
126static void ftrace_mod_code(void)
127{
128 /*
129 * Yes, more than one CPU process can be writing to mod_code_status.
130 * (and the code itself)
131 * But if one were to fail, then they all should, and if one were
132 * to succeed, then they all should.
133 */
134 mod_code_status = probe_kernel_write(mod_code_ip, mod_code_newcode,
135 MCOUNT_INSN_SIZE);
136
137 /* if we fail, then kill any new writers */
138 if (mod_code_status)
139 clear_mod_flag();
140}
141
142void ftrace_nmi_enter(void)
143{
144 if (atomic_inc_return(&nmi_running) & MOD_CODE_WRITE_FLAG) {
145 smp_rmb();
146 ftrace_mod_code();
147 atomic_inc(&nmi_update_count);
148 }
149 /* Must have previous changes seen before executions */
150 smp_mb();
151}
152
153void ftrace_nmi_exit(void)
154{
155 /* Finish all executions before clearing nmi_running */
156 smp_mb();
157 atomic_dec(&nmi_running);
158}
159
160static void wait_for_nmi_and_set_mod_flag(void)
161{
162 if (!atomic_cmpxchg(&nmi_running, 0, MOD_CODE_WRITE_FLAG))
163 return;
164
165 do {
166 cpu_relax();
167 } while (atomic_cmpxchg(&nmi_running, 0, MOD_CODE_WRITE_FLAG));
168
169 nmi_wait_count++;
170}
171
172static void wait_for_nmi(void)
173{
174 if (!atomic_read(&nmi_running))
175 return;
176
177 do {
178 cpu_relax();
179 } while (atomic_read(&nmi_running));
180
181 nmi_wait_count++;
182}
183
184static int
185do_ftrace_mod_code(unsigned long ip, void *new_code)
186{
187 mod_code_ip = (void *)ip;
188 mod_code_newcode = new_code;
189
190 /* The buffers need to be visible before we let NMIs write them */
191 smp_mb();
192
193 wait_for_nmi_and_set_mod_flag();
194
195 /* Make sure all running NMIs have finished before we write the code */
196 smp_mb();
197
198 ftrace_mod_code();
199
200 /* Make sure the write happens before clearing the bit */
201 smp_mb();
202
203 clear_mod_flag();
204 wait_for_nmi();
205
206 return mod_code_status;
207}
208
65static int ftrace_modify_code(unsigned long ip, unsigned char *old_code, 209static int ftrace_modify_code(unsigned long ip, unsigned char *old_code,
66 unsigned char *new_code) 210 unsigned char *new_code)
67{ 211{
@@ -86,7 +230,7 @@ static int ftrace_modify_code(unsigned long ip, unsigned char *old_code,
86 return -EINVAL; 230 return -EINVAL;
87 231
88 /* replace the text with the new text */ 232 /* replace the text with the new text */
89 if (probe_kernel_write((void *)ip, new_code, MCOUNT_INSN_SIZE)) 233 if (do_ftrace_mod_code(ip, new_code))
90 return -EPERM; 234 return -EPERM;
91 235
92 flush_icache_range(ip, ip + MCOUNT_INSN_SIZE); 236 flush_icache_range(ip, ip + MCOUNT_INSN_SIZE);
diff --git a/arch/sh/kernel/gpio.c b/arch/sh/kernel/gpio.c
deleted file mode 100644
index d22e5af699f9..000000000000
--- a/arch/sh/kernel/gpio.c
+++ /dev/null
@@ -1,584 +0,0 @@
1/*
2 * Pinmuxed GPIO support for SuperH.
3 *
4 * Copyright (C) 2008 Magnus Damm
5 *
6 * This file is subject to the terms and conditions of the GNU General Public
7 * License. See the file "COPYING" in the main directory of this archive
8 * for more details.
9 */
10
11#include <linux/errno.h>
12#include <linux/kernel.h>
13#include <linux/list.h>
14#include <linux/module.h>
15#include <linux/clk.h>
16#include <linux/err.h>
17#include <linux/io.h>
18#include <linux/irq.h>
19#include <linux/bitops.h>
20#include <linux/gpio.h>
21
22static int enum_in_range(pinmux_enum_t enum_id, struct pinmux_range *r)
23{
24 if (enum_id < r->begin)
25 return 0;
26
27 if (enum_id > r->end)
28 return 0;
29
30 return 1;
31}
32
33static unsigned long gpio_read_raw_reg(unsigned long reg,
34 unsigned long reg_width)
35{
36 switch (reg_width) {
37 case 8:
38 return ctrl_inb(reg);
39 case 16:
40 return ctrl_inw(reg);
41 case 32:
42 return ctrl_inl(reg);
43 }
44
45 BUG();
46 return 0;
47}
48
49static void gpio_write_raw_reg(unsigned long reg,
50 unsigned long reg_width,
51 unsigned long data)
52{
53 switch (reg_width) {
54 case 8:
55 ctrl_outb(data, reg);
56 return;
57 case 16:
58 ctrl_outw(data, reg);
59 return;
60 case 32:
61 ctrl_outl(data, reg);
62 return;
63 }
64
65 BUG();
66}
67
68static void gpio_write_bit(struct pinmux_data_reg *dr,
69 unsigned long in_pos, unsigned long value)
70{
71 unsigned long pos;
72
73 pos = dr->reg_width - (in_pos + 1);
74
75#ifdef DEBUG
76 pr_info("write_bit addr = %lx, value = %ld, pos = %ld, "
77 "r_width = %ld\n",
78 dr->reg, !!value, pos, dr->reg_width);
79#endif
80
81 if (value)
82 set_bit(pos, &dr->reg_shadow);
83 else
84 clear_bit(pos, &dr->reg_shadow);
85
86 gpio_write_raw_reg(dr->reg, dr->reg_width, dr->reg_shadow);
87}
88
89static int gpio_read_reg(unsigned long reg, unsigned long reg_width,
90 unsigned long field_width, unsigned long in_pos)
91{
92 unsigned long data, mask, pos;
93
94 data = 0;
95 mask = (1 << field_width) - 1;
96 pos = reg_width - ((in_pos + 1) * field_width);
97
98#ifdef DEBUG
99 pr_info("read_reg: addr = %lx, pos = %ld, "
100 "r_width = %ld, f_width = %ld\n",
101 reg, pos, reg_width, field_width);
102#endif
103
104 data = gpio_read_raw_reg(reg, reg_width);
105 return (data >> pos) & mask;
106}
107
108static void gpio_write_reg(unsigned long reg, unsigned long reg_width,
109 unsigned long field_width, unsigned long in_pos,
110 unsigned long value)
111{
112 unsigned long mask, pos;
113
114 mask = (1 << field_width) - 1;
115 pos = reg_width - ((in_pos + 1) * field_width);
116
117#ifdef DEBUG
118 pr_info("write_reg addr = %lx, value = %ld, pos = %ld, "
119 "r_width = %ld, f_width = %ld\n",
120 reg, value, pos, reg_width, field_width);
121#endif
122
123 mask = ~(mask << pos);
124 value = value << pos;
125
126 switch (reg_width) {
127 case 8:
128 ctrl_outb((ctrl_inb(reg) & mask) | value, reg);
129 break;
130 case 16:
131 ctrl_outw((ctrl_inw(reg) & mask) | value, reg);
132 break;
133 case 32:
134 ctrl_outl((ctrl_inl(reg) & mask) | value, reg);
135 break;
136 }
137}
138
139static int setup_data_reg(struct pinmux_info *gpioc, unsigned gpio)
140{
141 struct pinmux_gpio *gpiop = &gpioc->gpios[gpio];
142 struct pinmux_data_reg *data_reg;
143 int k, n;
144
145 if (!enum_in_range(gpiop->enum_id, &gpioc->data))
146 return -1;
147
148 k = 0;
149 while (1) {
150 data_reg = gpioc->data_regs + k;
151
152 if (!data_reg->reg_width)
153 break;
154
155 for (n = 0; n < data_reg->reg_width; n++) {
156 if (data_reg->enum_ids[n] == gpiop->enum_id) {
157 gpiop->flags &= ~PINMUX_FLAG_DREG;
158 gpiop->flags |= (k << PINMUX_FLAG_DREG_SHIFT);
159 gpiop->flags &= ~PINMUX_FLAG_DBIT;
160 gpiop->flags |= (n << PINMUX_FLAG_DBIT_SHIFT);
161 return 0;
162 }
163 }
164 k++;
165 }
166
167 BUG();
168
169 return -1;
170}
171
172static void setup_data_regs(struct pinmux_info *gpioc)
173{
174 struct pinmux_data_reg *drp;
175 int k;
176
177 for (k = gpioc->first_gpio; k <= gpioc->last_gpio; k++)
178 setup_data_reg(gpioc, k);
179
180 k = 0;
181 while (1) {
182 drp = gpioc->data_regs + k;
183
184 if (!drp->reg_width)
185 break;
186
187 drp->reg_shadow = gpio_read_raw_reg(drp->reg, drp->reg_width);
188 k++;
189 }
190}
191
192static int get_data_reg(struct pinmux_info *gpioc, unsigned gpio,
193 struct pinmux_data_reg **drp, int *bitp)
194{
195 struct pinmux_gpio *gpiop = &gpioc->gpios[gpio];
196 int k, n;
197
198 if (!enum_in_range(gpiop->enum_id, &gpioc->data))
199 return -1;
200
201 k = (gpiop->flags & PINMUX_FLAG_DREG) >> PINMUX_FLAG_DREG_SHIFT;
202 n = (gpiop->flags & PINMUX_FLAG_DBIT) >> PINMUX_FLAG_DBIT_SHIFT;
203 *drp = gpioc->data_regs + k;
204 *bitp = n;
205 return 0;
206}
207
208static int get_config_reg(struct pinmux_info *gpioc, pinmux_enum_t enum_id,
209 struct pinmux_cfg_reg **crp, int *indexp,
210 unsigned long **cntp)
211{
212 struct pinmux_cfg_reg *config_reg;
213 unsigned long r_width, f_width;
214 int k, n;
215
216 k = 0;
217 while (1) {
218 config_reg = gpioc->cfg_regs + k;
219
220 r_width = config_reg->reg_width;
221 f_width = config_reg->field_width;
222
223 if (!r_width)
224 break;
225 for (n = 0; n < (r_width / f_width) * 1 << f_width; n++) {
226 if (config_reg->enum_ids[n] == enum_id) {
227 *crp = config_reg;
228 *indexp = n;
229 *cntp = &config_reg->cnt[n / (1 << f_width)];
230 return 0;
231 }
232 }
233 k++;
234 }
235
236 return -1;
237}
238
239static int get_gpio_enum_id(struct pinmux_info *gpioc, unsigned gpio,
240 int pos, pinmux_enum_t *enum_idp)
241{
242 pinmux_enum_t enum_id = gpioc->gpios[gpio].enum_id;
243 pinmux_enum_t *data = gpioc->gpio_data;
244 int k;
245
246 if (!enum_in_range(enum_id, &gpioc->data)) {
247 if (!enum_in_range(enum_id, &gpioc->mark)) {
248 pr_err("non data/mark enum_id for gpio %d\n", gpio);
249 return -1;
250 }
251 }
252
253 if (pos) {
254 *enum_idp = data[pos + 1];
255 return pos + 1;
256 }
257
258 for (k = 0; k < gpioc->gpio_data_size; k++) {
259 if (data[k] == enum_id) {
260 *enum_idp = data[k + 1];
261 return k + 1;
262 }
263 }
264
265 pr_err("cannot locate data/mark enum_id for gpio %d\n", gpio);
266 return -1;
267}
268
269static void write_config_reg(struct pinmux_info *gpioc,
270 struct pinmux_cfg_reg *crp,
271 int index)
272{
273 unsigned long ncomb, pos, value;
274
275 ncomb = 1 << crp->field_width;
276 pos = index / ncomb;
277 value = index % ncomb;
278
279 gpio_write_reg(crp->reg, crp->reg_width, crp->field_width, pos, value);
280}
281
282static int check_config_reg(struct pinmux_info *gpioc,
283 struct pinmux_cfg_reg *crp,
284 int index)
285{
286 unsigned long ncomb, pos, value;
287
288 ncomb = 1 << crp->field_width;
289 pos = index / ncomb;
290 value = index % ncomb;
291
292 if (gpio_read_reg(crp->reg, crp->reg_width,
293 crp->field_width, pos) == value)
294 return 0;
295
296 return -1;
297}
298
299enum { GPIO_CFG_DRYRUN, GPIO_CFG_REQ, GPIO_CFG_FREE };
300
301static int pinmux_config_gpio(struct pinmux_info *gpioc, unsigned gpio,
302 int pinmux_type, int cfg_mode)
303{
304 struct pinmux_cfg_reg *cr = NULL;
305 pinmux_enum_t enum_id;
306 struct pinmux_range *range;
307 int in_range, pos, index;
308 unsigned long *cntp;
309
310 switch (pinmux_type) {
311
312 case PINMUX_TYPE_FUNCTION:
313 range = NULL;
314 break;
315
316 case PINMUX_TYPE_OUTPUT:
317 range = &gpioc->output;
318 break;
319
320 case PINMUX_TYPE_INPUT:
321 range = &gpioc->input;
322 break;
323
324 case PINMUX_TYPE_INPUT_PULLUP:
325 range = &gpioc->input_pu;
326 break;
327
328 case PINMUX_TYPE_INPUT_PULLDOWN:
329 range = &gpioc->input_pd;
330 break;
331
332 default:
333 goto out_err;
334 }
335
336 pos = 0;
337 enum_id = 0;
338 index = 0;
339 while (1) {
340 pos = get_gpio_enum_id(gpioc, gpio, pos, &enum_id);
341 if (pos <= 0)
342 goto out_err;
343
344 if (!enum_id)
345 break;
346
347 in_range = enum_in_range(enum_id, &gpioc->function);
348 if (!in_range && range) {
349 in_range = enum_in_range(enum_id, range);
350
351 if (in_range && enum_id == range->force)
352 continue;
353 }
354
355 if (!in_range)
356 continue;
357
358 if (get_config_reg(gpioc, enum_id, &cr, &index, &cntp) != 0)
359 goto out_err;
360
361 switch (cfg_mode) {
362 case GPIO_CFG_DRYRUN:
363 if (!*cntp || !check_config_reg(gpioc, cr, index))
364 continue;
365 break;
366
367 case GPIO_CFG_REQ:
368 write_config_reg(gpioc, cr, index);
369 *cntp = *cntp + 1;
370 break;
371
372 case GPIO_CFG_FREE:
373 *cntp = *cntp - 1;
374 break;
375 }
376 }
377
378 return 0;
379 out_err:
380 return -1;
381}
382
383static DEFINE_SPINLOCK(gpio_lock);
384
385static struct pinmux_info *chip_to_pinmux(struct gpio_chip *chip)
386{
387 return container_of(chip, struct pinmux_info, chip);
388}
389
390static int sh_gpio_request(struct gpio_chip *chip, unsigned offset)
391{
392 struct pinmux_info *gpioc = chip_to_pinmux(chip);
393 struct pinmux_data_reg *dummy;
394 unsigned long flags;
395 int i, ret, pinmux_type;
396
397 ret = -EINVAL;
398
399 if (!gpioc)
400 goto err_out;
401
402 spin_lock_irqsave(&gpio_lock, flags);
403
404 if ((gpioc->gpios[offset].flags & PINMUX_FLAG_TYPE) != PINMUX_TYPE_NONE)
405 goto err_unlock;
406
407 /* setup pin function here if no data is associated with pin */
408
409 if (get_data_reg(gpioc, offset, &dummy, &i) != 0)
410 pinmux_type = PINMUX_TYPE_FUNCTION;
411 else
412 pinmux_type = PINMUX_TYPE_GPIO;
413
414 if (pinmux_type == PINMUX_TYPE_FUNCTION) {
415 if (pinmux_config_gpio(gpioc, offset,
416 pinmux_type,
417 GPIO_CFG_DRYRUN) != 0)
418 goto err_unlock;
419
420 if (pinmux_config_gpio(gpioc, offset,
421 pinmux_type,
422 GPIO_CFG_REQ) != 0)
423 BUG();
424 }
425
426 gpioc->gpios[offset].flags &= ~PINMUX_FLAG_TYPE;
427 gpioc->gpios[offset].flags |= pinmux_type;
428
429 ret = 0;
430 err_unlock:
431 spin_unlock_irqrestore(&gpio_lock, flags);
432 err_out:
433 return ret;
434}
435
436static void sh_gpio_free(struct gpio_chip *chip, unsigned offset)
437{
438 struct pinmux_info *gpioc = chip_to_pinmux(chip);
439 unsigned long flags;
440 int pinmux_type;
441
442 if (!gpioc)
443 return;
444
445 spin_lock_irqsave(&gpio_lock, flags);
446
447 pinmux_type = gpioc->gpios[offset].flags & PINMUX_FLAG_TYPE;
448 pinmux_config_gpio(gpioc, offset, pinmux_type, GPIO_CFG_FREE);
449 gpioc->gpios[offset].flags &= ~PINMUX_FLAG_TYPE;
450 gpioc->gpios[offset].flags |= PINMUX_TYPE_NONE;
451
452 spin_unlock_irqrestore(&gpio_lock, flags);
453}
454
455static int pinmux_direction(struct pinmux_info *gpioc,
456 unsigned gpio, int new_pinmux_type)
457{
458 int pinmux_type;
459 int ret = -EINVAL;
460
461 if (!gpioc)
462 goto err_out;
463
464 pinmux_type = gpioc->gpios[gpio].flags & PINMUX_FLAG_TYPE;
465
466 switch (pinmux_type) {
467 case PINMUX_TYPE_GPIO:
468 break;
469 case PINMUX_TYPE_OUTPUT:
470 case PINMUX_TYPE_INPUT:
471 case PINMUX_TYPE_INPUT_PULLUP:
472 case PINMUX_TYPE_INPUT_PULLDOWN:
473 pinmux_config_gpio(gpioc, gpio, pinmux_type, GPIO_CFG_FREE);
474 break;
475 default:
476 goto err_out;
477 }
478
479 if (pinmux_config_gpio(gpioc, gpio,
480 new_pinmux_type,
481 GPIO_CFG_DRYRUN) != 0)
482 goto err_out;
483
484 if (pinmux_config_gpio(gpioc, gpio,
485 new_pinmux_type,
486 GPIO_CFG_REQ) != 0)
487 BUG();
488
489 gpioc->gpios[gpio].flags &= ~PINMUX_FLAG_TYPE;
490 gpioc->gpios[gpio].flags |= new_pinmux_type;
491
492 ret = 0;
493 err_out:
494 return ret;
495}
496
497static int sh_gpio_direction_input(struct gpio_chip *chip, unsigned offset)
498{
499 struct pinmux_info *gpioc = chip_to_pinmux(chip);
500 unsigned long flags;
501 int ret;
502
503 spin_lock_irqsave(&gpio_lock, flags);
504 ret = pinmux_direction(gpioc, offset, PINMUX_TYPE_INPUT);
505 spin_unlock_irqrestore(&gpio_lock, flags);
506
507 return ret;
508}
509
510static void sh_gpio_set_value(struct pinmux_info *gpioc,
511 unsigned gpio, int value)
512{
513 struct pinmux_data_reg *dr = NULL;
514 int bit = 0;
515
516 if (!gpioc || get_data_reg(gpioc, gpio, &dr, &bit) != 0)
517 BUG();
518 else
519 gpio_write_bit(dr, bit, value);
520}
521
522static int sh_gpio_direction_output(struct gpio_chip *chip, unsigned offset,
523 int value)
524{
525 struct pinmux_info *gpioc = chip_to_pinmux(chip);
526 unsigned long flags;
527 int ret;
528
529 sh_gpio_set_value(gpioc, offset, value);
530 spin_lock_irqsave(&gpio_lock, flags);
531 ret = pinmux_direction(gpioc, offset, PINMUX_TYPE_OUTPUT);
532 spin_unlock_irqrestore(&gpio_lock, flags);
533
534 return ret;
535}
536
537static int sh_gpio_get_value(struct pinmux_info *gpioc, unsigned gpio)
538{
539 struct pinmux_data_reg *dr = NULL;
540 int bit = 0;
541
542 if (!gpioc || get_data_reg(gpioc, gpio, &dr, &bit) != 0) {
543 BUG();
544 return 0;
545 }
546
547 return gpio_read_reg(dr->reg, dr->reg_width, 1, bit);
548}
549
550static int sh_gpio_get(struct gpio_chip *chip, unsigned offset)
551{
552 return sh_gpio_get_value(chip_to_pinmux(chip), offset);
553}
554
555static void sh_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
556{
557 sh_gpio_set_value(chip_to_pinmux(chip), offset, value);
558}
559
560int register_pinmux(struct pinmux_info *pip)
561{
562 struct gpio_chip *chip = &pip->chip;
563
564 pr_info("sh pinmux: %s handling gpio %d -> %d\n",
565 pip->name, pip->first_gpio, pip->last_gpio);
566
567 setup_data_regs(pip);
568
569 chip->request = sh_gpio_request;
570 chip->free = sh_gpio_free;
571 chip->direction_input = sh_gpio_direction_input;
572 chip->get = sh_gpio_get;
573 chip->direction_output = sh_gpio_direction_output;
574 chip->set = sh_gpio_set;
575
576 WARN_ON(pip->first_gpio != 0); /* needs testing */
577
578 chip->label = pip->name;
579 chip->owner = THIS_MODULE;
580 chip->base = pip->first_gpio;
581 chip->ngpio = (pip->last_gpio - pip->first_gpio) + 1;
582
583 return gpiochip_add(chip);
584}
diff --git a/arch/sh/kernel/head_32.S b/arch/sh/kernel/head_32.S
index a78be74b8d3e..1151ecdffa71 100644
--- a/arch/sh/kernel/head_32.S
+++ b/arch/sh/kernel/head_32.S
@@ -33,7 +33,7 @@ ENTRY(empty_zero_page)
33 .long 1 /* LOADER_TYPE */ 33 .long 1 /* LOADER_TYPE */
34 .long 0x00000000 /* INITRD_START */ 34 .long 0x00000000 /* INITRD_START */
35 .long 0x00000000 /* INITRD_SIZE */ 35 .long 0x00000000 /* INITRD_SIZE */
36#ifdef CONFIG_32BIT 36#if defined(CONFIG_32BIT) && defined(CONFIG_PMB_FIXED)
37 .long 0x53453f00 + 32 /* "SE?" = 32 bit */ 37 .long 0x53453f00 + 32 /* "SE?" = 32 bit */
38#else 38#else
39 .long 0x53453f00 + 29 /* "SE?" = 29 bit */ 39 .long 0x53453f00 + 29 /* "SE?" = 29 bit */
diff --git a/arch/sh/kernel/idle.c b/arch/sh/kernel/idle.c
index 27ff2dc093c7..aaff0037fcd7 100644
--- a/arch/sh/kernel/idle.c
+++ b/arch/sh/kernel/idle.c
@@ -21,7 +21,7 @@
21#include <asm/atomic.h> 21#include <asm/atomic.h>
22 22
23static int hlt_counter; 23static int hlt_counter;
24void (*pm_idle)(void); 24void (*pm_idle)(void) = NULL;
25void (*pm_power_off)(void); 25void (*pm_power_off)(void);
26EXPORT_SYMBOL(pm_power_off); 26EXPORT_SYMBOL(pm_power_off);
27 27
@@ -39,48 +39,92 @@ static int __init hlt_setup(char *__unused)
39} 39}
40__setup("hlt", hlt_setup); 40__setup("hlt", hlt_setup);
41 41
42static inline int hlt_works(void)
43{
44 return !hlt_counter;
45}
46
47/*
48 * On SMP it's slightly faster (but much more power-consuming!)
49 * to poll the ->work.need_resched flag instead of waiting for the
50 * cross-CPU IPI to arrive. Use this option with caution.
51 */
52static void poll_idle(void)
53{
54 local_irq_enable();
55 while (!need_resched())
56 cpu_relax();
57}
58
42void default_idle(void) 59void default_idle(void)
43{ 60{
44 if (!hlt_counter) { 61 if (hlt_works()) {
45 clear_thread_flag(TIF_POLLING_NRFLAG); 62 clear_thread_flag(TIF_POLLING_NRFLAG);
46 smp_mb__after_clear_bit(); 63 smp_mb__after_clear_bit();
47 set_bl_bit();
48 stop_critical_timings();
49 64
50 while (!need_resched()) 65 if (!need_resched()) {
66 local_irq_enable();
51 cpu_sleep(); 67 cpu_sleep();
68 } else
69 local_irq_enable();
52 70
53 start_critical_timings();
54 clear_bl_bit();
55 set_thread_flag(TIF_POLLING_NRFLAG); 71 set_thread_flag(TIF_POLLING_NRFLAG);
56 } else 72 } else
57 while (!need_resched()) 73 poll_idle();
58 cpu_relax();
59} 74}
60 75
76/*
77 * The idle thread. There's no useful work to be done, so just try to conserve
78 * power and have a low exit latency (ie sit in a loop waiting for somebody to
79 * say that they'd like to reschedule)
80 */
61void cpu_idle(void) 81void cpu_idle(void)
62{ 82{
83 unsigned int cpu = smp_processor_id();
84
63 set_thread_flag(TIF_POLLING_NRFLAG); 85 set_thread_flag(TIF_POLLING_NRFLAG);
64 86
65 /* endless idle loop with no priority at all */ 87 /* endless idle loop with no priority at all */
66 while (1) { 88 while (1) {
67 void (*idle)(void) = pm_idle; 89 tick_nohz_stop_sched_tick(1);
68 90
69 if (!idle) 91 while (!need_resched() && cpu_online(cpu)) {
70 idle = default_idle; 92 check_pgt_cache();
93 rmb();
71 94
72 tick_nohz_stop_sched_tick(1); 95 local_irq_disable();
73 while (!need_resched()) 96 /* Don't trace irqs off for idle */
74 idle(); 97 stop_critical_timings();
75 tick_nohz_restart_sched_tick(); 98 pm_idle();
99 /*
100 * Sanity check to ensure that pm_idle() returns
101 * with IRQs enabled
102 */
103 WARN_ON(irqs_disabled());
104 start_critical_timings();
105 }
76 106
107 tick_nohz_restart_sched_tick();
77 preempt_enable_no_resched(); 108 preempt_enable_no_resched();
78 schedule(); 109 schedule();
79 preempt_disable(); 110 preempt_disable();
80 check_pgt_cache();
81 } 111 }
82} 112}
83 113
114void __cpuinit select_idle_routine(void)
115{
116 /*
117 * If a platform has set its own idle routine, leave it alone.
118 */
119 if (pm_idle)
120 return;
121
122 if (hlt_works())
123 pm_idle = default_idle;
124 else
125 pm_idle = poll_idle;
126}
127
84static void do_nothing(void *unused) 128static void do_nothing(void *unused)
85{ 129{
86} 130}
diff --git a/arch/sh/kernel/io_generic.c b/arch/sh/kernel/io_generic.c
index b8fa6524760a..e1e1dbd19557 100644
--- a/arch/sh/kernel/io_generic.c
+++ b/arch/sh/kernel/io_generic.c
@@ -24,7 +24,7 @@
24#define dummy_read() 24#define dummy_read()
25#endif 25#endif
26 26
27unsigned long generic_io_base; 27unsigned long generic_io_base = 0;
28 28
29u8 generic_inb(unsigned long port) 29u8 generic_inb(unsigned long port)
30{ 30{
@@ -147,8 +147,10 @@ void generic_outsl(unsigned long port, const void *src, unsigned long count)
147 147
148void __iomem *generic_ioport_map(unsigned long addr, unsigned int size) 148void __iomem *generic_ioport_map(unsigned long addr, unsigned int size)
149{ 149{
150#ifdef P1SEG
150 if (PXSEG(addr) >= P1SEG) 151 if (PXSEG(addr) >= P1SEG)
151 return (void __iomem *)addr; 152 return (void __iomem *)addr;
153#endif
152 154
153 return (void __iomem *)(addr + generic_io_base); 155 return (void __iomem *)(addr + generic_io_base);
154} 156}
diff --git a/arch/sh/kernel/irq.c b/arch/sh/kernel/irq.c
index eac7da772fc2..e1913f28f418 100644
--- a/arch/sh/kernel/irq.c
+++ b/arch/sh/kernel/irq.c
@@ -37,7 +37,15 @@ void ack_bad_irq(unsigned int irq)
37 */ 37 */
38static int show_other_interrupts(struct seq_file *p, int prec) 38static int show_other_interrupts(struct seq_file *p, int prec)
39{ 39{
40 int j;
41
42 seq_printf(p, "%*s: ", prec, "NMI");
43 for_each_online_cpu(j)
44 seq_printf(p, "%10u ", irq_stat[j].__nmi_count);
45 seq_printf(p, " Non-maskable interrupts\n");
46
40 seq_printf(p, "%*s: %10u\n", prec, "ERR", atomic_read(&irq_err_count)); 47 seq_printf(p, "%*s: %10u\n", prec, "ERR", atomic_read(&irq_err_count));
48
41 return 0; 49 return 0;
42} 50}
43 51
@@ -255,6 +263,12 @@ void __init init_IRQ(void)
255{ 263{
256 plat_irq_setup(); 264 plat_irq_setup();
257 265
266 /*
267 * Pin any of the legacy IRQ vectors that haven't already been
268 * grabbed by the platform
269 */
270 reserve_irq_legacy();
271
258 /* Perform the machine specific initialisation */ 272 /* Perform the machine specific initialisation */
259 if (sh_mv.mv_init_irq) 273 if (sh_mv.mv_init_irq)
260 sh_mv.mv_init_irq(); 274 sh_mv.mv_init_irq();
diff --git a/arch/sh/kernel/irq_32.c b/arch/sh/kernel/irq_32.c
new file mode 100644
index 000000000000..e33ab15831f9
--- /dev/null
+++ b/arch/sh/kernel/irq_32.c
@@ -0,0 +1,57 @@
1/*
2 * SHcompact irqflags support
3 *
4 * Copyright (C) 2006 - 2009 Paul Mundt
5 *
6 * This file is subject to the terms and conditions of the GNU General Public
7 * License. See the file "COPYING" in the main directory of this archive
8 * for more details.
9 */
10#include <linux/irqflags.h>
11#include <linux/module.h>
12
13void notrace raw_local_irq_restore(unsigned long flags)
14{
15 unsigned long __dummy0, __dummy1;
16
17 if (flags == RAW_IRQ_DISABLED) {
18 __asm__ __volatile__ (
19 "stc sr, %0\n\t"
20 "or #0xf0, %0\n\t"
21 "ldc %0, sr\n\t"
22 : "=&z" (__dummy0)
23 : /* no inputs */
24 : "memory"
25 );
26 } else {
27 __asm__ __volatile__ (
28 "stc sr, %0\n\t"
29 "and %1, %0\n\t"
30#ifdef CONFIG_CPU_HAS_SR_RB
31 "stc r6_bank, %1\n\t"
32 "or %1, %0\n\t"
33#endif
34 "ldc %0, sr\n\t"
35 : "=&r" (__dummy0), "=r" (__dummy1)
36 : "1" (~RAW_IRQ_DISABLED)
37 : "memory"
38 );
39 }
40}
41EXPORT_SYMBOL(raw_local_irq_restore);
42
43unsigned long notrace __raw_local_save_flags(void)
44{
45 unsigned long flags;
46
47 __asm__ __volatile__ (
48 "stc sr, %0\n\t"
49 "and #0xf0, %0\n\t"
50 : "=&z" (flags)
51 : /* no inputs */
52 : "memory"
53 );
54
55 return flags;
56}
57EXPORT_SYMBOL(__raw_local_save_flags);
diff --git a/arch/sh/kernel/irq_64.c b/arch/sh/kernel/irq_64.c
new file mode 100644
index 000000000000..32365ba0e039
--- /dev/null
+++ b/arch/sh/kernel/irq_64.c
@@ -0,0 +1,51 @@
1/*
2 * SHmedia irqflags support
3 *
4 * Copyright (C) 2006 - 2009 Paul Mundt
5 *
6 * This file is subject to the terms and conditions of the GNU General Public
7 * License. See the file "COPYING" in the main directory of this archive
8 * for more details.
9 */
10#include <linux/irqflags.h>
11#include <linux/module.h>
12#include <cpu/registers.h>
13
14void notrace raw_local_irq_restore(unsigned long flags)
15{
16 unsigned long long __dummy;
17
18 if (flags == RAW_IRQ_DISABLED) {
19 __asm__ __volatile__ (
20 "getcon " __SR ", %0\n\t"
21 "or %0, %1, %0\n\t"
22 "putcon %0, " __SR "\n\t"
23 : "=&r" (__dummy)
24 : "r" (RAW_IRQ_DISABLED)
25 );
26 } else {
27 __asm__ __volatile__ (
28 "getcon " __SR ", %0\n\t"
29 "and %0, %1, %0\n\t"
30 "putcon %0, " __SR "\n\t"
31 : "=&r" (__dummy)
32 : "r" (~RAW_IRQ_DISABLED)
33 );
34 }
35}
36EXPORT_SYMBOL(raw_local_irq_restore);
37
38unsigned long notrace __raw_local_save_flags(void)
39{
40 unsigned long flags;
41
42 __asm__ __volatile__ (
43 "getcon " __SR ", %0\n\t"
44 "and %0, %1, %0"
45 : "=&r" (flags)
46 : "r" (RAW_IRQ_DISABLED)
47 );
48
49 return flags;
50}
51EXPORT_SYMBOL(__raw_local_save_flags);
diff --git a/arch/sh/kernel/machine_kexec.c b/arch/sh/kernel/machine_kexec.c
index 7ea2704ea033..76f280223ebd 100644
--- a/arch/sh/kernel/machine_kexec.c
+++ b/arch/sh/kernel/machine_kexec.c
@@ -46,12 +46,6 @@ void machine_crash_shutdown(struct pt_regs *regs)
46 */ 46 */
47int machine_kexec_prepare(struct kimage *image) 47int machine_kexec_prepare(struct kimage *image)
48{ 48{
49 /* older versions of kexec-tools are passing
50 * the zImage entry point as a virtual address.
51 */
52 if (image->start != PHYSADDR(image->start))
53 return -EINVAL; /* upgrade your kexec-tools */
54
55 return 0; 49 return 0;
56} 50}
57 51
diff --git a/arch/sh/kernel/machvec.c b/arch/sh/kernel/machvec.c
index cbce639b108a..1652340ba3f2 100644
--- a/arch/sh/kernel/machvec.c
+++ b/arch/sh/kernel/machvec.c
@@ -135,5 +135,9 @@ void __init sh_mv_setup(void)
135 if (!sh_mv.mv_nr_irqs) 135 if (!sh_mv.mv_nr_irqs)
136 sh_mv.mv_nr_irqs = NR_IRQS; 136 sh_mv.mv_nr_irqs = NR_IRQS;
137 137
138#ifdef P2SEG
138 __set_io_port_base(P2SEG); 139 __set_io_port_base(P2SEG);
140#else
141 __set_io_port_base(0);
142#endif
139} 143}
diff --git a/arch/sh/kernel/module.c b/arch/sh/kernel/module.c
index c2efdcde266f..43adddfe4c04 100644
--- a/arch/sh/kernel/module.c
+++ b/arch/sh/kernel/module.c
@@ -32,6 +32,7 @@
32#include <linux/string.h> 32#include <linux/string.h>
33#include <linux/kernel.h> 33#include <linux/kernel.h>
34#include <asm/unaligned.h> 34#include <asm/unaligned.h>
35#include <asm/dwarf.h>
35 36
36void *module_alloc(unsigned long size) 37void *module_alloc(unsigned long size)
37{ 38{
@@ -145,10 +146,16 @@ int module_finalize(const Elf_Ehdr *hdr,
145 const Elf_Shdr *sechdrs, 146 const Elf_Shdr *sechdrs,
146 struct module *me) 147 struct module *me)
147{ 148{
148 return module_bug_finalize(hdr, sechdrs, me); 149 int ret = 0;
150
151 ret |= module_dwarf_finalize(hdr, sechdrs, me);
152 ret |= module_bug_finalize(hdr, sechdrs, me);
153
154 return ret;
149} 155}
150 156
151void module_arch_cleanup(struct module *mod) 157void module_arch_cleanup(struct module *mod)
152{ 158{
153 module_bug_cleanup(mod); 159 module_bug_cleanup(mod);
160 module_dwarf_cleanup(mod);
154} 161}
diff --git a/arch/sh/kernel/perf_callchain.c b/arch/sh/kernel/perf_callchain.c
new file mode 100644
index 000000000000..24ea837eac5b
--- /dev/null
+++ b/arch/sh/kernel/perf_callchain.c
@@ -0,0 +1,98 @@
1/*
2 * Performance event callchain support - SuperH architecture code
3 *
4 * Copyright (C) 2009 Paul Mundt
5 *
6 * This file is subject to the terms and conditions of the GNU General Public
7 * License. See the file "COPYING" in the main directory of this archive
8 * for more details.
9 */
10#include <linux/kernel.h>
11#include <linux/sched.h>
12#include <linux/perf_event.h>
13#include <linux/percpu.h>
14#include <asm/unwinder.h>
15#include <asm/ptrace.h>
16
17static inline void callchain_store(struct perf_callchain_entry *entry, u64 ip)
18{
19 if (entry->nr < PERF_MAX_STACK_DEPTH)
20 entry->ip[entry->nr++] = ip;
21}
22
23static void callchain_warning(void *data, char *msg)
24{
25}
26
27static void
28callchain_warning_symbol(void *data, char *msg, unsigned long symbol)
29{
30}
31
32static int callchain_stack(void *data, char *name)
33{
34 return 0;
35}
36
37static void callchain_address(void *data, unsigned long addr, int reliable)
38{
39 struct perf_callchain_entry *entry = data;
40
41 if (reliable)
42 callchain_store(entry, addr);
43}
44
45static const struct stacktrace_ops callchain_ops = {
46 .warning = callchain_warning,
47 .warning_symbol = callchain_warning_symbol,
48 .stack = callchain_stack,
49 .address = callchain_address,
50};
51
52static void
53perf_callchain_kernel(struct pt_regs *regs, struct perf_callchain_entry *entry)
54{
55 callchain_store(entry, PERF_CONTEXT_KERNEL);
56 callchain_store(entry, regs->pc);
57
58 unwind_stack(NULL, regs, NULL, &callchain_ops, entry);
59}
60
61static void
62perf_do_callchain(struct pt_regs *regs, struct perf_callchain_entry *entry)
63{
64 int is_user;
65
66 if (!regs)
67 return;
68
69 is_user = user_mode(regs);
70
71 if (!current || current->pid == 0)
72 return;
73
74 if (is_user && current->state != TASK_RUNNING)
75 return;
76
77 /*
78 * Only the kernel side is implemented for now.
79 */
80 if (!is_user)
81 perf_callchain_kernel(regs, entry);
82}
83
84/*
85 * No need for separate IRQ and NMI entries.
86 */
87static DEFINE_PER_CPU(struct perf_callchain_entry, callchain);
88
89struct perf_callchain_entry *perf_callchain(struct pt_regs *regs)
90{
91 struct perf_callchain_entry *entry = &__get_cpu_var(callchain);
92
93 entry->nr = 0;
94
95 perf_do_callchain(regs, entry);
96
97 return entry;
98}
diff --git a/arch/sh/kernel/perf_event.c b/arch/sh/kernel/perf_event.c
new file mode 100644
index 000000000000..7ff0943e7a08
--- /dev/null
+++ b/arch/sh/kernel/perf_event.c
@@ -0,0 +1,312 @@
1/*
2 * Performance event support framework for SuperH hardware counters.
3 *
4 * Copyright (C) 2009 Paul Mundt
5 *
6 * Heavily based on the x86 and PowerPC implementations.
7 *
8 * x86:
9 * Copyright (C) 2008 Thomas Gleixner <tglx@linutronix.de>
10 * Copyright (C) 2008-2009 Red Hat, Inc., Ingo Molnar
11 * Copyright (C) 2009 Jaswinder Singh Rajput
12 * Copyright (C) 2009 Advanced Micro Devices, Inc., Robert Richter
13 * Copyright (C) 2008-2009 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
14 * Copyright (C) 2009 Intel Corporation, <markus.t.metzger@intel.com>
15 *
16 * ppc:
17 * Copyright 2008-2009 Paul Mackerras, IBM Corporation.
18 *
19 * This file is subject to the terms and conditions of the GNU General Public
20 * License. See the file "COPYING" in the main directory of this archive
21 * for more details.
22 */
23#include <linux/kernel.h>
24#include <linux/init.h>
25#include <linux/io.h>
26#include <linux/irq.h>
27#include <linux/perf_event.h>
28#include <asm/processor.h>
29
30struct cpu_hw_events {
31 struct perf_event *events[MAX_HWEVENTS];
32 unsigned long used_mask[BITS_TO_LONGS(MAX_HWEVENTS)];
33 unsigned long active_mask[BITS_TO_LONGS(MAX_HWEVENTS)];
34};
35
36DEFINE_PER_CPU(struct cpu_hw_events, cpu_hw_events);
37
38static struct sh_pmu *sh_pmu __read_mostly;
39
40/* Number of perf_events counting hardware events */
41static atomic_t num_events;
42/* Used to avoid races in calling reserve/release_pmc_hardware */
43static DEFINE_MUTEX(pmc_reserve_mutex);
44
45/*
46 * Stub these out for now, do something more profound later.
47 */
48int reserve_pmc_hardware(void)
49{
50 return 0;
51}
52
53void release_pmc_hardware(void)
54{
55}
56
57static inline int sh_pmu_initialized(void)
58{
59 return !!sh_pmu;
60}
61
62/*
63 * Release the PMU if this is the last perf_event.
64 */
65static void hw_perf_event_destroy(struct perf_event *event)
66{
67 if (!atomic_add_unless(&num_events, -1, 1)) {
68 mutex_lock(&pmc_reserve_mutex);
69 if (atomic_dec_return(&num_events) == 0)
70 release_pmc_hardware();
71 mutex_unlock(&pmc_reserve_mutex);
72 }
73}
74
75static int hw_perf_cache_event(int config, int *evp)
76{
77 unsigned long type, op, result;
78 int ev;
79
80 if (!sh_pmu->cache_events)
81 return -EINVAL;
82
83 /* unpack config */
84 type = config & 0xff;
85 op = (config >> 8) & 0xff;
86 result = (config >> 16) & 0xff;
87
88 if (type >= PERF_COUNT_HW_CACHE_MAX ||
89 op >= PERF_COUNT_HW_CACHE_OP_MAX ||
90 result >= PERF_COUNT_HW_CACHE_RESULT_MAX)
91 return -EINVAL;
92
93 ev = (*sh_pmu->cache_events)[type][op][result];
94 if (ev == 0)
95 return -EOPNOTSUPP;
96 if (ev == -1)
97 return -EINVAL;
98 *evp = ev;
99 return 0;
100}
101
102static int __hw_perf_event_init(struct perf_event *event)
103{
104 struct perf_event_attr *attr = &event->attr;
105 struct hw_perf_event *hwc = &event->hw;
106 int config = -1;
107 int err;
108
109 if (!sh_pmu_initialized())
110 return -ENODEV;
111
112 /*
113 * All of the on-chip counters are "limited", in that they have
114 * no interrupts, and are therefore unable to do sampling without
115 * further work and timer assistance.
116 */
117 if (hwc->sample_period)
118 return -EINVAL;
119
120 /*
121 * See if we need to reserve the counter.
122 *
123 * If no events are currently in use, then we have to take a
124 * mutex to ensure that we don't race with another task doing
125 * reserve_pmc_hardware or release_pmc_hardware.
126 */
127 err = 0;
128 if (!atomic_inc_not_zero(&num_events)) {
129 mutex_lock(&pmc_reserve_mutex);
130 if (atomic_read(&num_events) == 0 &&
131 reserve_pmc_hardware())
132 err = -EBUSY;
133 else
134 atomic_inc(&num_events);
135 mutex_unlock(&pmc_reserve_mutex);
136 }
137
138 if (err)
139 return err;
140
141 event->destroy = hw_perf_event_destroy;
142
143 switch (attr->type) {
144 case PERF_TYPE_RAW:
145 config = attr->config & sh_pmu->raw_event_mask;
146 break;
147 case PERF_TYPE_HW_CACHE:
148 err = hw_perf_cache_event(attr->config, &config);
149 if (err)
150 return err;
151 break;
152 case PERF_TYPE_HARDWARE:
153 if (attr->config >= sh_pmu->max_events)
154 return -EINVAL;
155
156 config = sh_pmu->event_map(attr->config);
157 break;
158 }
159
160 if (config == -1)
161 return -EINVAL;
162
163 hwc->config |= config;
164
165 return 0;
166}
167
168static void sh_perf_event_update(struct perf_event *event,
169 struct hw_perf_event *hwc, int idx)
170{
171 u64 prev_raw_count, new_raw_count;
172 s64 delta;
173 int shift = 0;
174
175 /*
176 * Depending on the counter configuration, they may or may not
177 * be chained, in which case the previous counter value can be
178 * updated underneath us if the lower-half overflows.
179 *
180 * Our tactic to handle this is to first atomically read and
181 * exchange a new raw count - then add that new-prev delta
182 * count to the generic counter atomically.
183 *
184 * As there is no interrupt associated with the overflow events,
185 * this is the simplest approach for maintaining consistency.
186 */
187again:
188 prev_raw_count = atomic64_read(&hwc->prev_count);
189 new_raw_count = sh_pmu->read(idx);
190
191 if (atomic64_cmpxchg(&hwc->prev_count, prev_raw_count,
192 new_raw_count) != prev_raw_count)
193 goto again;
194
195 /*
196 * Now we have the new raw value and have updated the prev
197 * timestamp already. We can now calculate the elapsed delta
198 * (counter-)time and add that to the generic counter.
199 *
200 * Careful, not all hw sign-extends above the physical width
201 * of the count.
202 */
203 delta = (new_raw_count << shift) - (prev_raw_count << shift);
204 delta >>= shift;
205
206 atomic64_add(delta, &event->count);
207}
208
209static void sh_pmu_disable(struct perf_event *event)
210{
211 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
212 struct hw_perf_event *hwc = &event->hw;
213 int idx = hwc->idx;
214
215 clear_bit(idx, cpuc->active_mask);
216 sh_pmu->disable(hwc, idx);
217
218 barrier();
219
220 sh_perf_event_update(event, &event->hw, idx);
221
222 cpuc->events[idx] = NULL;
223 clear_bit(idx, cpuc->used_mask);
224
225 perf_event_update_userpage(event);
226}
227
228static int sh_pmu_enable(struct perf_event *event)
229{
230 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
231 struct hw_perf_event *hwc = &event->hw;
232 int idx = hwc->idx;
233
234 if (test_and_set_bit(idx, cpuc->used_mask)) {
235 idx = find_first_zero_bit(cpuc->used_mask, sh_pmu->num_events);
236 if (idx == sh_pmu->num_events)
237 return -EAGAIN;
238
239 set_bit(idx, cpuc->used_mask);
240 hwc->idx = idx;
241 }
242
243 sh_pmu->disable(hwc, idx);
244
245 cpuc->events[idx] = event;
246 set_bit(idx, cpuc->active_mask);
247
248 sh_pmu->enable(hwc, idx);
249
250 perf_event_update_userpage(event);
251
252 return 0;
253}
254
255static void sh_pmu_read(struct perf_event *event)
256{
257 sh_perf_event_update(event, &event->hw, event->hw.idx);
258}
259
260static const struct pmu pmu = {
261 .enable = sh_pmu_enable,
262 .disable = sh_pmu_disable,
263 .read = sh_pmu_read,
264};
265
266const struct pmu *hw_perf_event_init(struct perf_event *event)
267{
268 int err = __hw_perf_event_init(event);
269 if (unlikely(err)) {
270 if (event->destroy)
271 event->destroy(event);
272 return ERR_PTR(err);
273 }
274
275 return &pmu;
276}
277
278void hw_perf_event_setup(int cpu)
279{
280 struct cpu_hw_events *cpuhw = &per_cpu(cpu_hw_events, cpu);
281
282 memset(cpuhw, 0, sizeof(struct cpu_hw_events));
283}
284
285void hw_perf_enable(void)
286{
287 if (!sh_pmu_initialized())
288 return;
289
290 sh_pmu->enable_all();
291}
292
293void hw_perf_disable(void)
294{
295 if (!sh_pmu_initialized())
296 return;
297
298 sh_pmu->disable_all();
299}
300
301int register_sh_pmu(struct sh_pmu *pmu)
302{
303 if (sh_pmu)
304 return -EBUSY;
305 sh_pmu = pmu;
306
307 pr_info("Performance Events: %s support registered\n", pmu->name);
308
309 WARN_ON(pmu->num_events > MAX_HWEVENTS);
310
311 return 0;
312}
diff --git a/arch/sh/kernel/process_32.c b/arch/sh/kernel/process_32.c
index 0673c4746be3..d8af889366a4 100644
--- a/arch/sh/kernel/process_32.c
+++ b/arch/sh/kernel/process_32.c
@@ -134,7 +134,10 @@ int kernel_thread(int (*fn)(void *), void * arg, unsigned long flags)
134 regs.regs[5] = (unsigned long)fn; 134 regs.regs[5] = (unsigned long)fn;
135 135
136 regs.pc = (unsigned long)kernel_thread_helper; 136 regs.pc = (unsigned long)kernel_thread_helper;
137 regs.sr = (1 << 30); 137 regs.sr = SR_MD;
138#if defined(CONFIG_SH_FPU)
139 regs.sr |= SR_FD;
140#endif
138 141
139 /* Ok, create the new process.. */ 142 /* Ok, create the new process.. */
140 pid = do_fork(flags | CLONE_VM | CLONE_UNTRACED, 0, 143 pid = do_fork(flags | CLONE_VM | CLONE_UNTRACED, 0,
@@ -142,6 +145,7 @@ int kernel_thread(int (*fn)(void *), void * arg, unsigned long flags)
142 145
143 return pid; 146 return pid;
144} 147}
148EXPORT_SYMBOL(kernel_thread);
145 149
146/* 150/*
147 * Free current thread data structures etc.. 151 * Free current thread data structures etc..
@@ -186,6 +190,16 @@ int dump_fpu(struct pt_regs *regs, elf_fpregset_t *fpu)
186 190
187 return fpvalid; 191 return fpvalid;
188} 192}
193EXPORT_SYMBOL(dump_fpu);
194
195/*
196 * This gets called before we allocate a new thread and copy
197 * the current task into it.
198 */
199void prepare_to_copy(struct task_struct *tsk)
200{
201 unlazy_fpu(tsk, task_pt_regs(tsk));
202}
189 203
190asmlinkage void ret_from_fork(void); 204asmlinkage void ret_from_fork(void);
191 205
@@ -195,16 +209,10 @@ int copy_thread(unsigned long clone_flags, unsigned long usp,
195{ 209{
196 struct thread_info *ti = task_thread_info(p); 210 struct thread_info *ti = task_thread_info(p);
197 struct pt_regs *childregs; 211 struct pt_regs *childregs;
198#if defined(CONFIG_SH_FPU) || defined(CONFIG_SH_DSP) 212#if defined(CONFIG_SH_DSP)
199 struct task_struct *tsk = current; 213 struct task_struct *tsk = current;
200#endif 214#endif
201 215
202#if defined(CONFIG_SH_FPU)
203 unlazy_fpu(tsk, regs);
204 p->thread.fpu = tsk->thread.fpu;
205 copy_to_stopped_child_used_math(p);
206#endif
207
208#if defined(CONFIG_SH_DSP) 216#if defined(CONFIG_SH_DSP)
209 if (is_dsp_enabled(tsk)) { 217 if (is_dsp_enabled(tsk)) {
210 /* We can use the __save_dsp or just copy the struct: 218 /* We can use the __save_dsp or just copy the struct:
@@ -224,6 +232,8 @@ int copy_thread(unsigned long clone_flags, unsigned long usp,
224 } else { 232 } else {
225 childregs->regs[15] = (unsigned long)childregs; 233 childregs->regs[15] = (unsigned long)childregs;
226 ti->addr_limit = KERNEL_DS; 234 ti->addr_limit = KERNEL_DS;
235 ti->status &= ~TS_USEDFPU;
236 p->fpu_counter = 0;
227 } 237 }
228 238
229 if (clone_flags & CLONE_SETTLS) 239 if (clone_flags & CLONE_SETTLS)
@@ -288,9 +298,13 @@ static void ubc_set_tracing(int asid, unsigned long pc)
288__notrace_funcgraph struct task_struct * 298__notrace_funcgraph struct task_struct *
289__switch_to(struct task_struct *prev, struct task_struct *next) 299__switch_to(struct task_struct *prev, struct task_struct *next)
290{ 300{
291#if defined(CONFIG_SH_FPU) 301 struct thread_struct *next_t = &next->thread;
302
292 unlazy_fpu(prev, task_pt_regs(prev)); 303 unlazy_fpu(prev, task_pt_regs(prev));
293#endif 304
305 /* we're going to use this soon, after a few expensive things */
306 if (next->fpu_counter > 5)
307 prefetch(&next_t->fpu.hard);
294 308
295#ifdef CONFIG_MMU 309#ifdef CONFIG_MMU
296 /* 310 /*
@@ -321,6 +335,14 @@ __switch_to(struct task_struct *prev, struct task_struct *next)
321#endif 335#endif
322 } 336 }
323 337
338 /*
339 * If the task has used fpu the last 5 timeslices, just do a full
340 * restore of the math state immediately to avoid the trap; the
341 * chances of needing FPU soon are obviously high now
342 */
343 if (next->fpu_counter > 5)
344 fpu_state_restore(task_pt_regs(next));
345
324 return prev; 346 return prev;
325} 347}
326 348
diff --git a/arch/sh/kernel/process_64.c b/arch/sh/kernel/process_64.c
index 1192398ef582..359b8a2f4d2e 100644
--- a/arch/sh/kernel/process_64.c
+++ b/arch/sh/kernel/process_64.c
@@ -335,6 +335,7 @@ int kernel_thread(int (*fn)(void *), void * arg, unsigned long flags)
335 return do_fork(flags | CLONE_VM | CLONE_UNTRACED, 0, 335 return do_fork(flags | CLONE_VM | CLONE_UNTRACED, 0,
336 &regs, 0, NULL, NULL); 336 &regs, 0, NULL, NULL);
337} 337}
338EXPORT_SYMBOL(kernel_thread);
338 339
339/* 340/*
340 * Free current thread data structures etc.. 341 * Free current thread data structures etc..
@@ -417,6 +418,7 @@ int dump_fpu(struct pt_regs *regs, elf_fpregset_t *fpu)
417 return 0; /* Task didn't use the fpu at all. */ 418 return 0; /* Task didn't use the fpu at all. */
418#endif 419#endif
419} 420}
421EXPORT_SYMBOL(dump_fpu);
420 422
421asmlinkage void ret_from_fork(void); 423asmlinkage void ret_from_fork(void);
422 424
diff --git a/arch/sh/kernel/return_address.c b/arch/sh/kernel/return_address.c
new file mode 100644
index 000000000000..df3ab5811074
--- /dev/null
+++ b/arch/sh/kernel/return_address.c
@@ -0,0 +1,54 @@
1/*
2 * arch/sh/kernel/return_address.c
3 *
4 * Copyright (C) 2009 Matt Fleming
5 * Copyright (C) 2009 Paul Mundt
6 *
7 * This file is subject to the terms and conditions of the GNU General Public
8 * License. See the file "COPYING" in the main directory of this archive
9 * for more details.
10 */
11#include <linux/kernel.h>
12#include <asm/dwarf.h>
13
14#ifdef CONFIG_DWARF_UNWINDER
15
16void *return_address(unsigned int depth)
17{
18 struct dwarf_frame *frame;
19 unsigned long ra;
20 int i;
21
22 for (i = 0, frame = NULL, ra = 0; i <= depth; i++) {
23 struct dwarf_frame *tmp;
24
25 tmp = dwarf_unwind_stack(ra, frame);
26
27 if (frame)
28 dwarf_free_frame(frame);
29
30 frame = tmp;
31
32 if (!frame || !frame->return_addr)
33 break;
34
35 ra = frame->return_addr;
36 }
37
38 /* Failed to unwind the stack to the specified depth. */
39 WARN_ON(i != depth + 1);
40
41 if (frame)
42 dwarf_free_frame(frame);
43
44 return (void *)ra;
45}
46
47#else
48
49void *return_address(unsigned int depth)
50{
51 return NULL;
52}
53
54#endif
diff --git a/arch/sh/kernel/setup.c b/arch/sh/kernel/setup.c
index 99b4fb553bf1..5a947a2567e4 100644
--- a/arch/sh/kernel/setup.c
+++ b/arch/sh/kernel/setup.c
@@ -453,6 +453,10 @@ void __init setup_arch(char **cmdline_p)
453 453
454 paging_init(); 454 paging_init();
455 455
456#ifdef CONFIG_PMB_ENABLE
457 pmb_init();
458#endif
459
456#ifdef CONFIG_SMP 460#ifdef CONFIG_SMP
457 plat_smp_setup(); 461 plat_smp_setup();
458#endif 462#endif
diff --git a/arch/sh/kernel/sh_ksyms_32.c b/arch/sh/kernel/sh_ksyms_32.c
index 444cce3ae921..3896f26efa4a 100644
--- a/arch/sh/kernel/sh_ksyms_32.c
+++ b/arch/sh/kernel/sh_ksyms_32.c
@@ -1,37 +1,11 @@
1#include <linux/module.h> 1#include <linux/module.h>
2#include <linux/smp.h> 2#include <linux/string.h>
3#include <linux/user.h> 3#include <linux/uaccess.h>
4#include <linux/elfcore.h> 4#include <linux/delay.h>
5#include <linux/sched.h> 5#include <linux/mm.h>
6#include <linux/in6.h>
7#include <linux/interrupt.h>
8#include <linux/vmalloc.h>
9#include <linux/pci.h>
10#include <linux/irq.h>
11#include <asm/sections.h>
12#include <asm/processor.h>
13#include <asm/uaccess.h>
14#include <asm/checksum.h> 6#include <asm/checksum.h>
15#include <asm/io.h> 7#include <asm/sections.h>
16#include <asm/delay.h>
17#include <asm/tlbflush.h>
18#include <asm/cacheflush.h>
19#include <asm/ftrace.h>
20
21extern int dump_fpu(struct pt_regs *, elf_fpregset_t *);
22
23/* platform dependent support */
24EXPORT_SYMBOL(dump_fpu);
25EXPORT_SYMBOL(kernel_thread);
26EXPORT_SYMBOL(strlen);
27
28/* PCI exports */
29#ifdef CONFIG_PCI
30EXPORT_SYMBOL(pci_alloc_consistent);
31EXPORT_SYMBOL(pci_free_consistent);
32#endif
33 8
34/* mem exports */
35EXPORT_SYMBOL(memchr); 9EXPORT_SYMBOL(memchr);
36EXPORT_SYMBOL(memcpy); 10EXPORT_SYMBOL(memcpy);
37EXPORT_SYMBOL(memset); 11EXPORT_SYMBOL(memset);
@@ -40,6 +14,13 @@ EXPORT_SYMBOL(__copy_user);
40EXPORT_SYMBOL(__udelay); 14EXPORT_SYMBOL(__udelay);
41EXPORT_SYMBOL(__ndelay); 15EXPORT_SYMBOL(__ndelay);
42EXPORT_SYMBOL(__const_udelay); 16EXPORT_SYMBOL(__const_udelay);
17EXPORT_SYMBOL(strlen);
18EXPORT_SYMBOL(csum_partial);
19EXPORT_SYMBOL(csum_partial_copy_generic);
20EXPORT_SYMBOL(copy_page);
21EXPORT_SYMBOL(__clear_user);
22EXPORT_SYMBOL(_ebss);
23EXPORT_SYMBOL(empty_zero_page);
43 24
44#define DECLARE_EXPORT(name) \ 25#define DECLARE_EXPORT(name) \
45 extern void name(void);EXPORT_SYMBOL(name) 26 extern void name(void);EXPORT_SYMBOL(name)
@@ -107,30 +88,6 @@ DECLARE_EXPORT(__sdivsi3_i4);
107DECLARE_EXPORT(__udivsi3_i4); 88DECLARE_EXPORT(__udivsi3_i4);
108DECLARE_EXPORT(__sdivsi3_i4i); 89DECLARE_EXPORT(__sdivsi3_i4i);
109DECLARE_EXPORT(__udivsi3_i4i); 90DECLARE_EXPORT(__udivsi3_i4i);
110
111#if !defined(CONFIG_CACHE_OFF) && (defined(CONFIG_CPU_SH4) || \
112 defined(CONFIG_SH7705_CACHE_32KB))
113/* needed by some modules */
114EXPORT_SYMBOL(flush_cache_all);
115EXPORT_SYMBOL(flush_cache_range);
116EXPORT_SYMBOL(flush_dcache_page);
117#endif
118
119#ifdef CONFIG_MCOUNT 91#ifdef CONFIG_MCOUNT
120DECLARE_EXPORT(mcount); 92DECLARE_EXPORT(mcount);
121#endif 93#endif
122EXPORT_SYMBOL(csum_partial);
123EXPORT_SYMBOL(csum_partial_copy_generic);
124#ifdef CONFIG_IPV6
125EXPORT_SYMBOL(csum_ipv6_magic);
126#endif
127EXPORT_SYMBOL(copy_page);
128EXPORT_SYMBOL(__clear_user);
129EXPORT_SYMBOL(_ebss);
130EXPORT_SYMBOL(empty_zero_page);
131
132#ifndef CONFIG_CACHE_OFF
133EXPORT_SYMBOL(__flush_purge_region);
134EXPORT_SYMBOL(__flush_wback_region);
135EXPORT_SYMBOL(__flush_invalidate_region);
136#endif
diff --git a/arch/sh/kernel/sh_ksyms_64.c b/arch/sh/kernel/sh_ksyms_64.c
index d008e17eb257..45afa5c51f67 100644
--- a/arch/sh/kernel/sh_ksyms_64.c
+++ b/arch/sh/kernel/sh_ksyms_64.c
@@ -24,16 +24,6 @@
24#include <asm/delay.h> 24#include <asm/delay.h>
25#include <asm/irq.h> 25#include <asm/irq.h>
26 26
27extern int dump_fpu(struct pt_regs *, elf_fpregset_t *);
28
29/* platform dependent support */
30EXPORT_SYMBOL(dump_fpu);
31EXPORT_SYMBOL(kernel_thread);
32
33#ifdef CONFIG_VT
34EXPORT_SYMBOL(screen_info);
35#endif
36
37EXPORT_SYMBOL(__put_user_asm_b); 27EXPORT_SYMBOL(__put_user_asm_b);
38EXPORT_SYMBOL(__put_user_asm_w); 28EXPORT_SYMBOL(__put_user_asm_w);
39EXPORT_SYMBOL(__put_user_asm_l); 29EXPORT_SYMBOL(__put_user_asm_l);
diff --git a/arch/sh/kernel/signal_32.c b/arch/sh/kernel/signal_32.c
index 3db37425210d..12815ce01ecd 100644
--- a/arch/sh/kernel/signal_32.c
+++ b/arch/sh/kernel/signal_32.c
@@ -67,7 +67,8 @@ sys_sigsuspend(old_sigset_t mask,
67 67
68 current->state = TASK_INTERRUPTIBLE; 68 current->state = TASK_INTERRUPTIBLE;
69 schedule(); 69 schedule();
70 set_thread_flag(TIF_RESTORE_SIGMASK); 70 set_restore_sigmask();
71
71 return -ERESTARTNOHAND; 72 return -ERESTARTNOHAND;
72} 73}
73 74
@@ -590,7 +591,7 @@ static void do_signal(struct pt_regs *regs, unsigned int save_r0)
590 if (try_to_freeze()) 591 if (try_to_freeze())
591 goto no_signal; 592 goto no_signal;
592 593
593 if (test_thread_flag(TIF_RESTORE_SIGMASK)) 594 if (current_thread_info()->status & TS_RESTORE_SIGMASK)
594 oldset = &current->saved_sigmask; 595 oldset = &current->saved_sigmask;
595 else 596 else
596 oldset = &current->blocked; 597 oldset = &current->blocked;
@@ -602,12 +603,13 @@ static void do_signal(struct pt_regs *regs, unsigned int save_r0)
602 /* Whee! Actually deliver the signal. */ 603 /* Whee! Actually deliver the signal. */
603 if (handle_signal(signr, &ka, &info, oldset, 604 if (handle_signal(signr, &ka, &info, oldset,
604 regs, save_r0) == 0) { 605 regs, save_r0) == 0) {
605 /* a signal was successfully delivered; the saved 606 /*
607 * A signal was successfully delivered; the saved
606 * sigmask will have been stored in the signal frame, 608 * sigmask will have been stored in the signal frame,
607 * and will be restored by sigreturn, so we can simply 609 * and will be restored by sigreturn, so we can simply
608 * clear the TIF_RESTORE_SIGMASK flag */ 610 * clear the TS_RESTORE_SIGMASK flag
609 if (test_thread_flag(TIF_RESTORE_SIGMASK)) 611 */
610 clear_thread_flag(TIF_RESTORE_SIGMASK); 612 current_thread_info()->status &= ~TS_RESTORE_SIGMASK;
611 613
612 tracehook_signal_handler(signr, &info, &ka, regs, 614 tracehook_signal_handler(signr, &info, &ka, regs,
613 test_thread_flag(TIF_SINGLESTEP)); 615 test_thread_flag(TIF_SINGLESTEP));
@@ -631,10 +633,12 @@ no_signal:
631 } 633 }
632 } 634 }
633 635
634 /* if there's no signal to deliver, we just put the saved sigmask 636 /*
635 * back */ 637 * If there's no signal to deliver, we just put the saved sigmask
636 if (test_thread_flag(TIF_RESTORE_SIGMASK)) { 638 * back.
637 clear_thread_flag(TIF_RESTORE_SIGMASK); 639 */
640 if (current_thread_info()->status & TS_RESTORE_SIGMASK) {
641 current_thread_info()->status &= ~TS_RESTORE_SIGMASK;
638 sigprocmask(SIG_SETMASK, &current->saved_sigmask, NULL); 642 sigprocmask(SIG_SETMASK, &current->saved_sigmask, NULL);
639 } 643 }
640} 644}
diff --git a/arch/sh/kernel/signal_64.c b/arch/sh/kernel/signal_64.c
index 74793c80a57a..feb3dddd3192 100644
--- a/arch/sh/kernel/signal_64.c
+++ b/arch/sh/kernel/signal_64.c
@@ -101,7 +101,7 @@ static int do_signal(struct pt_regs *regs, sigset_t *oldset)
101 if (try_to_freeze()) 101 if (try_to_freeze())
102 goto no_signal; 102 goto no_signal;
103 103
104 if (test_thread_flag(TIF_RESTORE_SIGMASK)) 104 if (current_thread_info()->status & TS_RESTORE_SIGMASK)
105 oldset = &current->saved_sigmask; 105 oldset = &current->saved_sigmask;
106 else if (!oldset) 106 else if (!oldset)
107 oldset = &current->blocked; 107 oldset = &current->blocked;
@@ -115,11 +115,9 @@ static int do_signal(struct pt_regs *regs, sigset_t *oldset)
115 /* 115 /*
116 * If a signal was successfully delivered, the 116 * If a signal was successfully delivered, the
117 * saved sigmask is in its frame, and we can 117 * saved sigmask is in its frame, and we can
118 * clear the TIF_RESTORE_SIGMASK flag. 118 * clear the TS_RESTORE_SIGMASK flag.
119 */ 119 */
120 if (test_thread_flag(TIF_RESTORE_SIGMASK)) 120 current_thread_info()->status &= ~TS_RESTORE_SIGMASK;
121 clear_thread_flag(TIF_RESTORE_SIGMASK);
122
123 tracehook_signal_handler(signr, &info, &ka, regs, 0); 121 tracehook_signal_handler(signr, &info, &ka, regs, 0);
124 return 1; 122 return 1;
125 } 123 }
@@ -146,8 +144,8 @@ no_signal:
146 } 144 }
147 145
148 /* No signal to deliver -- put the saved sigmask back */ 146 /* No signal to deliver -- put the saved sigmask back */
149 if (test_thread_flag(TIF_RESTORE_SIGMASK)) { 147 if (current_thread_info()->status & TS_RESTORE_SIGMASK) {
150 clear_thread_flag(TIF_RESTORE_SIGMASK); 148 current_thread_info()->status &= ~TS_RESTORE_SIGMASK;
151 sigprocmask(SIG_SETMASK, &current->saved_sigmask, NULL); 149 sigprocmask(SIG_SETMASK, &current->saved_sigmask, NULL);
152 } 150 }
153 151
@@ -176,6 +174,7 @@ sys_sigsuspend(old_sigset_t mask,
176 while (1) { 174 while (1) {
177 current->state = TASK_INTERRUPTIBLE; 175 current->state = TASK_INTERRUPTIBLE;
178 schedule(); 176 schedule();
177 set_restore_sigmask();
179 regs->pc += 4; /* because sys_sigreturn decrements the pc */ 178 regs->pc += 4; /* because sys_sigreturn decrements the pc */
180 if (do_signal(regs, &saveset)) { 179 if (do_signal(regs, &saveset)) {
181 /* pc now points at signal handler. Need to decrement 180 /* pc now points at signal handler. Need to decrement
diff --git a/arch/sh/kernel/smp.c b/arch/sh/kernel/smp.c
index 160db1003cfb..983e0792d5f3 100644
--- a/arch/sh/kernel/smp.c
+++ b/arch/sh/kernel/smp.c
@@ -122,7 +122,9 @@ int __cpuinit __cpu_up(unsigned int cpu)
122 stack_start.bss_start = 0; /* don't clear bss for secondary cpus */ 122 stack_start.bss_start = 0; /* don't clear bss for secondary cpus */
123 stack_start.start_kernel_fn = start_secondary; 123 stack_start.start_kernel_fn = start_secondary;
124 124
125 flush_cache_all(); 125 flush_icache_range((unsigned long)&stack_start,
126 (unsigned long)&stack_start + sizeof(stack_start));
127 wmb();
126 128
127 plat_start_cpu(cpu, (unsigned long)_stext); 129 plat_start_cpu(cpu, (unsigned long)_stext);
128 130
diff --git a/arch/sh/kernel/topology.c b/arch/sh/kernel/topology.c
index 0838942b7083..9b0b633b6c92 100644
--- a/arch/sh/kernel/topology.c
+++ b/arch/sh/kernel/topology.c
@@ -16,6 +16,32 @@
16 16
17static DEFINE_PER_CPU(struct cpu, cpu_devices); 17static DEFINE_PER_CPU(struct cpu, cpu_devices);
18 18
19cpumask_t cpu_core_map[NR_CPUS];
20
21static cpumask_t cpu_coregroup_map(unsigned int cpu)
22{
23 /*
24 * Presently all SH-X3 SMP cores are multi-cores, so just keep it
25 * simple until we have a method for determining topology..
26 */
27 return cpu_possible_map;
28}
29
30const struct cpumask *cpu_coregroup_mask(unsigned int cpu)
31{
32 return &cpu_core_map[cpu];
33}
34
35int arch_update_cpu_topology(void)
36{
37 unsigned int cpu;
38
39 for_each_possible_cpu(cpu)
40 cpu_core_map[cpu] = cpu_coregroup_map(cpu);
41
42 return 0;
43}
44
19static int __init topology_init(void) 45static int __init topology_init(void)
20{ 46{
21 int i, ret; 47 int i, ret;
diff --git a/arch/sh/kernel/traps.c b/arch/sh/kernel/traps.c
index a8396f36bd14..7b036339dc92 100644
--- a/arch/sh/kernel/traps.c
+++ b/arch/sh/kernel/traps.c
@@ -9,8 +9,8 @@
9#include <asm/unwinder.h> 9#include <asm/unwinder.h>
10#include <asm/system.h> 10#include <asm/system.h>
11 11
12#ifdef CONFIG_BUG 12#ifdef CONFIG_GENERIC_BUG
13void handle_BUG(struct pt_regs *regs) 13static void handle_BUG(struct pt_regs *regs)
14{ 14{
15 const struct bug_entry *bug; 15 const struct bug_entry *bug;
16 unsigned long bugaddr = regs->pc; 16 unsigned long bugaddr = regs->pc;
@@ -81,7 +81,7 @@ BUILD_TRAP_HANDLER(bug)
81 SIGTRAP) == NOTIFY_STOP) 81 SIGTRAP) == NOTIFY_STOP)
82 return; 82 return;
83 83
84#ifdef CONFIG_BUG 84#ifdef CONFIG_GENERIC_BUG
85 if (__kernel_text_address(instruction_pointer(regs))) { 85 if (__kernel_text_address(instruction_pointer(regs))) {
86 insn_size_t insn = *(insn_size_t *)instruction_pointer(regs); 86 insn_size_t insn = *(insn_size_t *)instruction_pointer(regs);
87 if (insn == TRAPA_BUG_OPCODE) 87 if (insn == TRAPA_BUG_OPCODE)
@@ -95,9 +95,11 @@ BUILD_TRAP_HANDLER(bug)
95 95
96BUILD_TRAP_HANDLER(nmi) 96BUILD_TRAP_HANDLER(nmi)
97{ 97{
98 unsigned int cpu = smp_processor_id();
98 TRAP_HANDLER_DECL; 99 TRAP_HANDLER_DECL;
99 100
100 nmi_enter(); 101 nmi_enter();
102 nmi_count(cpu)++;
101 103
102 switch (notify_die(DIE_NMI, "NMI", regs, 0, vec & 0xff, SIGINT)) { 104 switch (notify_die(DIE_NMI, "NMI", regs, 0, vec & 0xff, SIGINT)) {
103 case NOTIFY_OK: 105 case NOTIFY_OK:
diff --git a/arch/sh/kernel/traps_32.c b/arch/sh/kernel/traps_32.c
index 7a2ee3a6b8e7..3da5a125d884 100644
--- a/arch/sh/kernel/traps_32.c
+++ b/arch/sh/kernel/traps_32.c
@@ -25,6 +25,7 @@
25#include <linux/kexec.h> 25#include <linux/kexec.h>
26#include <linux/limits.h> 26#include <linux/limits.h>
27#include <linux/proc_fs.h> 27#include <linux/proc_fs.h>
28#include <linux/seq_file.h>
28#include <linux/sysfs.h> 29#include <linux/sysfs.h>
29#include <asm/system.h> 30#include <asm/system.h>
30#include <asm/uaccess.h> 31#include <asm/uaccess.h>
@@ -68,61 +69,49 @@ static const char *se_usermode_action[] = {
68 "signal+warn" 69 "signal+warn"
69}; 70};
70 71
71static int 72static int alignment_proc_show(struct seq_file *m, void *v)
72proc_alignment_read(char *page, char **start, off_t off, int count, int *eof,
73 void *data)
74{ 73{
75 char *p = page; 74 seq_printf(m, "User:\t\t%lu\n", se_user);
76 int len; 75 seq_printf(m, "System:\t\t%lu\n", se_sys);
77 76 seq_printf(m, "Half:\t\t%lu\n", se_half);
78 p += sprintf(p, "User:\t\t%lu\n", se_user); 77 seq_printf(m, "Word:\t\t%lu\n", se_word);
79 p += sprintf(p, "System:\t\t%lu\n", se_sys); 78 seq_printf(m, "DWord:\t\t%lu\n", se_dword);
80 p += sprintf(p, "Half:\t\t%lu\n", se_half); 79 seq_printf(m, "Multi:\t\t%lu\n", se_multi);
81 p += sprintf(p, "Word:\t\t%lu\n", se_word); 80 seq_printf(m, "User faults:\t%i (%s)\n", se_usermode,
82 p += sprintf(p, "DWord:\t\t%lu\n", se_dword);
83 p += sprintf(p, "Multi:\t\t%lu\n", se_multi);
84 p += sprintf(p, "User faults:\t%i (%s)\n", se_usermode,
85 se_usermode_action[se_usermode]); 81 se_usermode_action[se_usermode]);
86 p += sprintf(p, "Kernel faults:\t%i (fixup%s)\n", se_kernmode_warn, 82 seq_printf(m, "Kernel faults:\t%i (fixup%s)\n", se_kernmode_warn,
87 se_kernmode_warn ? "+warn" : ""); 83 se_kernmode_warn ? "+warn" : "");
88 84 return 0;
89 len = (p - page) - off;
90 if (len < 0)
91 len = 0;
92
93 *eof = (len <= count) ? 1 : 0;
94 *start = page + off;
95
96 return len;
97} 85}
98 86
99static int proc_alignment_write(struct file *file, const char __user *buffer, 87static int alignment_proc_open(struct inode *inode, struct file *file)
100 unsigned long count, void *data)
101{ 88{
102 char mode; 89 return single_open(file, alignment_proc_show, NULL);
103
104 if (count > 0) {
105 if (get_user(mode, buffer))
106 return -EFAULT;
107 if (mode >= '0' && mode <= '5')
108 se_usermode = mode - '0';
109 }
110 return count;
111} 90}
112 91
113static int proc_alignment_kern_write(struct file *file, const char __user *buffer, 92static ssize_t alignment_proc_write(struct file *file,
114 unsigned long count, void *data) 93 const char __user *buffer, size_t count, loff_t *pos)
115{ 94{
95 int *data = PDE(file->f_path.dentry->d_inode)->data;
116 char mode; 96 char mode;
117 97
118 if (count > 0) { 98 if (count > 0) {
119 if (get_user(mode, buffer)) 99 if (get_user(mode, buffer))
120 return -EFAULT; 100 return -EFAULT;
121 if (mode >= '0' && mode <= '1') 101 if (mode >= '0' && mode <= '5')
122 se_kernmode_warn = mode - '0'; 102 *data = mode - '0';
123 } 103 }
124 return count; 104 return count;
125} 105}
106
107static const struct file_operations alignment_proc_fops = {
108 .owner = THIS_MODULE,
109 .open = alignment_proc_open,
110 .read = seq_read,
111 .llseek = seq_lseek,
112 .release = single_release,
113 .write = alignment_proc_write,
114};
126#endif 115#endif
127 116
128static void dump_mem(const char *str, unsigned long bottom, unsigned long top) 117static void dump_mem(const char *str, unsigned long bottom, unsigned long top)
@@ -945,14 +934,9 @@ void __init trap_init(void)
945 set_exception_table_evt(0x800, do_reserved_inst); 934 set_exception_table_evt(0x800, do_reserved_inst);
946 set_exception_table_evt(0x820, do_illegal_slot_inst); 935 set_exception_table_evt(0x820, do_illegal_slot_inst);
947#elif defined(CONFIG_SH_FPU) 936#elif defined(CONFIG_SH_FPU)
948#ifdef CONFIG_CPU_SUBTYPE_SHX3
949 set_exception_table_evt(0xd80, fpu_state_restore_trap_handler);
950 set_exception_table_evt(0xda0, fpu_state_restore_trap_handler);
951#else
952 set_exception_table_evt(0x800, fpu_state_restore_trap_handler); 937 set_exception_table_evt(0x800, fpu_state_restore_trap_handler);
953 set_exception_table_evt(0x820, fpu_state_restore_trap_handler); 938 set_exception_table_evt(0x820, fpu_state_restore_trap_handler);
954#endif 939#endif
955#endif
956 940
957#ifdef CONFIG_CPU_SH2 941#ifdef CONFIG_CPU_SH2
958 set_exception_table_vec(TRAP_ADDRESS_ERROR, address_error_trap_handler); 942 set_exception_table_vec(TRAP_ADDRESS_ERROR, address_error_trap_handler);
@@ -1011,20 +995,16 @@ static int __init alignment_init(void)
1011 if (!dir) 995 if (!dir)
1012 return -ENOMEM; 996 return -ENOMEM;
1013 997
1014 res = create_proc_entry("alignment", S_IWUSR | S_IRUGO, dir); 998 res = proc_create_data("alignment", S_IWUSR | S_IRUGO, dir,
999 &alignment_proc_fops, &se_usermode);
1015 if (!res) 1000 if (!res)
1016 return -ENOMEM; 1001 return -ENOMEM;
1017 1002
1018 res->read_proc = proc_alignment_read; 1003 res = proc_create_data("kernel_alignment", S_IWUSR | S_IRUGO, dir,
1019 res->write_proc = proc_alignment_write; 1004 &alignment_proc_fops, &se_kernmode_warn);
1020
1021 res = create_proc_entry("kernel_alignment", S_IWUSR | S_IRUGO, dir);
1022 if (!res) 1005 if (!res)
1023 return -ENOMEM; 1006 return -ENOMEM;
1024 1007
1025 res->read_proc = proc_alignment_read;
1026 res->write_proc = proc_alignment_kern_write;
1027
1028 return 0; 1008 return 0;
1029} 1009}
1030 1010