diff options
| author | Linus Torvalds <torvalds@linux-foundation.org> | 2009-12-09 22:03:16 -0500 |
|---|---|---|
| committer | Linus Torvalds <torvalds@linux-foundation.org> | 2009-12-09 22:03:16 -0500 |
| commit | 3a43aaa31790c36b69ebf8a6396f37fade86b531 (patch) | |
| tree | 7c7f8da6219d546f2b44534cb7be1fb5591d6ac4 /arch/sh/kernel/cpu | |
| parent | aed886ce777590eac87f7ce2897d9f8357754331 (diff) | |
| parent | 6a5a0b9139b19dd1a107870269a35bc9cf18d2dc (diff) | |
Merge git://git.kernel.org/pub/scm/linux/kernel/git/lethal/sh-2.6
* git://git.kernel.org/pub/scm/linux/kernel/git/lethal/sh-2.6: (137 commits)
sh: include empty zero page in romImage
sh: Make associative cache writes fatal on all SH-4A parts.
sh: Drop associative writes for SH-4 cache flushes.
sh: Partial revert of copy/clear_user_highpage() optimizations.
sh: Add default uImage rule for se7724, ap325rxa, and migor.
sh: allow runtime pm without suspend/resume callbacks
sh: mach-ecovec24: Remove un-defined settings for VPU
sh: mach-ecovec24: LCDC drive ability become high
sh: fix sh7724 VEU3F resource size
serial: sh-sci: Fix too early port disabling.
sh: pfc: pr_info() -> pr_debug() cleanups.
sh: pfc: Convert from ctrl_xxx() to __raw_xxx() I/O routines.
sh: Improve kfr2r09 serial port setup code
sh: Break out SuperH PFC code
sh: Move KEYSC header file
sh: convert /proc/cpu/aligmnent, /proc/cpu/kernel_alignment to seq_file
sh: Add CPG save/restore code for sh7724 R-standby
sh: Add SDHI power control support to Ecovec
mfd: Add power control platform data to SDHI driver
sh: mach-ecovec24: modify address map
...
Diffstat (limited to 'arch/sh/kernel/cpu')
| -rw-r--r-- | arch/sh/kernel/cpu/Makefile | 1 | ||||
| -rw-r--r-- | arch/sh/kernel/cpu/init.c | 28 | ||||
| -rw-r--r-- | arch/sh/kernel/cpu/sh2a/fpu.c | 27 | ||||
| -rw-r--r-- | arch/sh/kernel/cpu/sh3/entry.S | 33 | ||||
| -rw-r--r-- | arch/sh/kernel/cpu/sh4/Makefile | 8 | ||||
| -rw-r--r-- | arch/sh/kernel/cpu/sh4/fpu.c | 28 | ||||
| -rw-r--r-- | arch/sh/kernel/cpu/sh4/perf_event.c | 253 | ||||
| -rw-r--r-- | arch/sh/kernel/cpu/sh4a/Makefile | 1 | ||||
| -rw-r--r-- | arch/sh/kernel/cpu/sh4a/clock-sh7724.c | 2 | ||||
| -rw-r--r-- | arch/sh/kernel/cpu/sh4a/perf_event.c | 269 | ||||
| -rw-r--r-- | arch/sh/kernel/cpu/sh4a/setup-sh7724.c | 264 | ||||
| -rw-r--r-- | arch/sh/kernel/cpu/sh4a/setup-shx3.c | 45 | ||||
| -rw-r--r-- | arch/sh/kernel/cpu/sh4a/smp-shx3.c | 37 | ||||
| -rw-r--r-- | arch/sh/kernel/cpu/sh5/entry.S | 2 | ||||
| -rw-r--r-- | arch/sh/kernel/cpu/shmobile/cpuidle.c | 42 | ||||
| -rw-r--r-- | arch/sh/kernel/cpu/shmobile/pm.c | 117 | ||||
| -rw-r--r-- | arch/sh/kernel/cpu/shmobile/pm_runtime.c | 17 | ||||
| -rw-r--r-- | arch/sh/kernel/cpu/shmobile/sleep.S | 344 | ||||
| -rw-r--r-- | arch/sh/kernel/cpu/ubc.S | 59 |
19 files changed, 1202 insertions, 375 deletions
diff --git a/arch/sh/kernel/cpu/Makefile b/arch/sh/kernel/cpu/Makefile index 3d6b9312dc47..d97c803719ec 100644 --- a/arch/sh/kernel/cpu/Makefile +++ b/arch/sh/kernel/cpu/Makefile | |||
| @@ -15,7 +15,6 @@ obj-$(CONFIG_ARCH_SHMOBILE) += shmobile/ | |||
| 15 | 15 | ||
| 16 | # Common interfaces. | 16 | # Common interfaces. |
| 17 | 17 | ||
| 18 | obj-$(CONFIG_UBC_WAKEUP) += ubc.o | ||
| 19 | obj-$(CONFIG_SH_ADC) += adc.o | 18 | obj-$(CONFIG_SH_ADC) += adc.o |
| 20 | obj-$(CONFIG_SH_CLK_CPG) += clock-cpg.o | 19 | obj-$(CONFIG_SH_CLK_CPG) += clock-cpg.o |
| 21 | 20 | ||
diff --git a/arch/sh/kernel/cpu/init.c b/arch/sh/kernel/cpu/init.c index e932ebef4738..89b4b76c0d76 100644 --- a/arch/sh/kernel/cpu/init.c +++ b/arch/sh/kernel/cpu/init.c | |||
| @@ -75,16 +75,11 @@ static void __init expmask_init(void) | |||
| 75 | /* | 75 | /* |
| 76 | * Future proofing. | 76 | * Future proofing. |
| 77 | * | 77 | * |
| 78 | * Disable support for slottable sleep instruction | 78 | * Disable support for slottable sleep instruction, non-nop |
| 79 | * and non-nop instructions in the rte delay slot. | 79 | * instructions in the rte delay slot, and associative writes to |
| 80 | * the memory-mapped cache array. | ||
| 80 | */ | 81 | */ |
| 81 | expmask &= ~(EXPMASK_RTEDS | EXPMASK_BRDSSLP); | 82 | expmask &= ~(EXPMASK_RTEDS | EXPMASK_BRDSSLP | EXPMASK_MMCAW); |
| 82 | |||
| 83 | /* | ||
| 84 | * Enable associative writes to the memory-mapped cache array | ||
| 85 | * until the cache flush ops have been rewritten. | ||
| 86 | */ | ||
| 87 | expmask |= EXPMASK_MMCAW; | ||
| 88 | 83 | ||
| 89 | __raw_writel(expmask, EXPMASK); | 84 | __raw_writel(expmask, EXPMASK); |
| 90 | ctrl_barrier(); | 85 | ctrl_barrier(); |
| @@ -311,12 +306,12 @@ asmlinkage void __init sh_cpu_init(void) | |||
| 311 | if (fpu_disabled) { | 306 | if (fpu_disabled) { |
| 312 | printk("FPU Disabled\n"); | 307 | printk("FPU Disabled\n"); |
| 313 | current_cpu_data.flags &= ~CPU_HAS_FPU; | 308 | current_cpu_data.flags &= ~CPU_HAS_FPU; |
| 314 | disable_fpu(); | ||
| 315 | } | 309 | } |
| 316 | 310 | ||
| 317 | /* FPU initialization */ | 311 | /* FPU initialization */ |
| 312 | disable_fpu(); | ||
| 318 | if ((current_cpu_data.flags & CPU_HAS_FPU)) { | 313 | if ((current_cpu_data.flags & CPU_HAS_FPU)) { |
| 319 | clear_thread_flag(TIF_USEDFPU); | 314 | current_thread_info()->status &= ~TS_USEDFPU; |
| 320 | clear_used_math(); | 315 | clear_used_math(); |
| 321 | } | 316 | } |
| 322 | 317 | ||
| @@ -338,17 +333,6 @@ asmlinkage void __init sh_cpu_init(void) | |||
| 338 | } | 333 | } |
| 339 | #endif | 334 | #endif |
| 340 | 335 | ||
| 341 | /* | ||
| 342 | * Some brain-damaged loaders decided it would be a good idea to put | ||
| 343 | * the UBC to sleep. This causes some issues when it comes to things | ||
| 344 | * like PTRACE_SINGLESTEP or doing hardware watchpoints in GDB. So .. | ||
| 345 | * we wake it up and hope that all is well. | ||
| 346 | */ | ||
| 347 | #ifdef CONFIG_SUPERH32 | ||
| 348 | if (raw_smp_processor_id() == 0) | ||
| 349 | ubc_wakeup(); | ||
| 350 | #endif | ||
| 351 | |||
| 352 | speculative_execution_init(); | 336 | speculative_execution_init(); |
| 353 | expmask_init(); | 337 | expmask_init(); |
| 354 | } | 338 | } |
diff --git a/arch/sh/kernel/cpu/sh2a/fpu.c b/arch/sh/kernel/cpu/sh2a/fpu.c index 6df2fb98eb30..d395ce5740e7 100644 --- a/arch/sh/kernel/cpu/sh2a/fpu.c +++ b/arch/sh/kernel/cpu/sh2a/fpu.c | |||
| @@ -25,14 +25,12 @@ | |||
| 25 | 25 | ||
| 26 | /* | 26 | /* |
| 27 | * Save FPU registers onto task structure. | 27 | * Save FPU registers onto task structure. |
| 28 | * Assume called with FPU enabled (SR.FD=0). | ||
| 29 | */ | 28 | */ |
| 30 | void | 29 | void |
| 31 | save_fpu(struct task_struct *tsk, struct pt_regs *regs) | 30 | save_fpu(struct task_struct *tsk) |
| 32 | { | 31 | { |
| 33 | unsigned long dummy; | 32 | unsigned long dummy; |
| 34 | 33 | ||
| 35 | clear_tsk_thread_flag(tsk, TIF_USEDFPU); | ||
| 36 | enable_fpu(); | 34 | enable_fpu(); |
| 37 | asm volatile("sts.l fpul, @-%0\n\t" | 35 | asm volatile("sts.l fpul, @-%0\n\t" |
| 38 | "sts.l fpscr, @-%0\n\t" | 36 | "sts.l fpscr, @-%0\n\t" |
| @@ -60,7 +58,6 @@ save_fpu(struct task_struct *tsk, struct pt_regs *regs) | |||
| 60 | : "memory"); | 58 | : "memory"); |
| 61 | 59 | ||
| 62 | disable_fpu(); | 60 | disable_fpu(); |
| 63 | release_fpu(regs); | ||
| 64 | } | 61 | } |
| 65 | 62 | ||
| 66 | static void | 63 | static void |
| @@ -598,31 +595,31 @@ BUILD_TRAP_HANDLER(fpu_error) | |||
| 598 | struct task_struct *tsk = current; | 595 | struct task_struct *tsk = current; |
| 599 | TRAP_HANDLER_DECL; | 596 | TRAP_HANDLER_DECL; |
| 600 | 597 | ||
| 601 | save_fpu(tsk, regs); | 598 | __unlazy_fpu(tsk, regs); |
| 602 | if (ieee_fpe_handler(regs)) { | 599 | if (ieee_fpe_handler(regs)) { |
| 603 | tsk->thread.fpu.hard.fpscr &= | 600 | tsk->thread.fpu.hard.fpscr &= |
| 604 | ~(FPSCR_CAUSE_MASK | FPSCR_FLAG_MASK); | 601 | ~(FPSCR_CAUSE_MASK | FPSCR_FLAG_MASK); |
| 605 | grab_fpu(regs); | 602 | grab_fpu(regs); |
| 606 | restore_fpu(tsk); | 603 | restore_fpu(tsk); |
| 607 | set_tsk_thread_flag(tsk, TIF_USEDFPU); | 604 | task_thread_info(tsk)->status |= TS_USEDFPU; |
| 608 | return; | 605 | return; |
| 609 | } | 606 | } |
| 610 | 607 | ||
| 611 | force_sig(SIGFPE, tsk); | 608 | force_sig(SIGFPE, tsk); |
| 612 | } | 609 | } |
| 613 | 610 | ||
| 614 | BUILD_TRAP_HANDLER(fpu_state_restore) | 611 | void fpu_state_restore(struct pt_regs *regs) |
| 615 | { | 612 | { |
| 616 | struct task_struct *tsk = current; | 613 | struct task_struct *tsk = current; |
| 617 | TRAP_HANDLER_DECL; | ||
| 618 | 614 | ||
| 619 | grab_fpu(regs); | 615 | grab_fpu(regs); |
| 620 | if (!user_mode(regs)) { | 616 | if (unlikely(!user_mode(regs))) { |
| 621 | printk(KERN_ERR "BUG: FPU is used in kernel mode.\n"); | 617 | printk(KERN_ERR "BUG: FPU is used in kernel mode.\n"); |
| 618 | BUG(); | ||
| 622 | return; | 619 | return; |
| 623 | } | 620 | } |
| 624 | 621 | ||
| 625 | if (used_math()) { | 622 | if (likely(used_math())) { |
| 626 | /* Using the FPU again. */ | 623 | /* Using the FPU again. */ |
| 627 | restore_fpu(tsk); | 624 | restore_fpu(tsk); |
| 628 | } else { | 625 | } else { |
| @@ -630,5 +627,13 @@ BUILD_TRAP_HANDLER(fpu_state_restore) | |||
| 630 | fpu_init(); | 627 | fpu_init(); |
| 631 | set_used_math(); | 628 | set_used_math(); |
| 632 | } | 629 | } |
| 633 | set_tsk_thread_flag(tsk, TIF_USEDFPU); | 630 | task_thread_info(tsk)->status |= TS_USEDFPU; |
| 631 | tsk->fpu_counter++; | ||
| 632 | } | ||
| 633 | |||
| 634 | BUILD_TRAP_HANDLER(fpu_state_restore) | ||
| 635 | { | ||
| 636 | TRAP_HANDLER_DECL; | ||
| 637 | |||
| 638 | fpu_state_restore(regs); | ||
| 634 | } | 639 | } |
diff --git a/arch/sh/kernel/cpu/sh3/entry.S b/arch/sh/kernel/cpu/sh3/entry.S index bb407ef0b91e..3f7e2a22c7c2 100644 --- a/arch/sh/kernel/cpu/sh3/entry.S +++ b/arch/sh/kernel/cpu/sh3/entry.S | |||
| @@ -297,41 +297,8 @@ ENTRY(vbr_base) | |||
| 297 | ! | 297 | ! |
| 298 | .balign 256,0,256 | 298 | .balign 256,0,256 |
| 299 | general_exception: | 299 | general_exception: |
| 300 | #ifndef CONFIG_CPU_SUBTYPE_SHX3 | ||
| 301 | bra handle_exception | 300 | bra handle_exception |
| 302 | sts pr, k3 ! save original pr value in k3 | 301 | sts pr, k3 ! save original pr value in k3 |
| 303 | #else | ||
| 304 | mov.l 1f, k4 | ||
| 305 | mov.l @k4, k4 | ||
| 306 | |||
| 307 | ! Is EXPEVT larger than 0x800? | ||
| 308 | mov #0x8, k0 | ||
| 309 | shll8 k0 | ||
| 310 | cmp/hs k0, k4 | ||
| 311 | bf 0f | ||
| 312 | |||
| 313 | ! then add 0x580 (k2 is 0xd80 or 0xda0) | ||
| 314 | mov #0x58, k0 | ||
| 315 | shll2 k0 | ||
| 316 | shll2 k0 | ||
| 317 | add k0, k4 | ||
| 318 | 0: | ||
| 319 | ! Setup stack and save DSP context (k0 contains original r15 on return) | ||
| 320 | bsr prepare_stack | ||
| 321 | nop | ||
| 322 | |||
| 323 | ! Save registers / Switch to bank 0 | ||
| 324 | mov k4, k2 ! keep vector in k2 | ||
| 325 | mov.l 1f, k4 ! SR bits to clear in k4 | ||
| 326 | bsr save_regs ! needs original pr value in k3 | ||
| 327 | nop | ||
| 328 | |||
| 329 | bra handle_exception_special | ||
| 330 | nop | ||
| 331 | |||
| 332 | .align 2 | ||
| 333 | 1: .long EXPEVT | ||
| 334 | #endif | ||
| 335 | 302 | ||
| 336 | ! prepare_stack() | 303 | ! prepare_stack() |
| 337 | ! - roll back gRB | 304 | ! - roll back gRB |
diff --git a/arch/sh/kernel/cpu/sh4/Makefile b/arch/sh/kernel/cpu/sh4/Makefile index 203b18347b83..3a1dbc709831 100644 --- a/arch/sh/kernel/cpu/sh4/Makefile +++ b/arch/sh/kernel/cpu/sh4/Makefile | |||
| @@ -9,6 +9,11 @@ obj-$(CONFIG_HIBERNATION) += $(addprefix ../sh3/, swsusp.o) | |||
| 9 | obj-$(CONFIG_SH_FPU) += fpu.o softfloat.o | 9 | obj-$(CONFIG_SH_FPU) += fpu.o softfloat.o |
| 10 | obj-$(CONFIG_SH_STORE_QUEUES) += sq.o | 10 | obj-$(CONFIG_SH_STORE_QUEUES) += sq.o |
| 11 | 11 | ||
| 12 | # Perf events | ||
| 13 | perf-$(CONFIG_CPU_SUBTYPE_SH7750) := perf_event.o | ||
| 14 | perf-$(CONFIG_CPU_SUBTYPE_SH7750S) := perf_event.o | ||
| 15 | perf-$(CONFIG_CPU_SUBTYPE_SH7091) := perf_event.o | ||
| 16 | |||
| 12 | # CPU subtype setup | 17 | # CPU subtype setup |
| 13 | obj-$(CONFIG_CPU_SUBTYPE_SH7750) += setup-sh7750.o | 18 | obj-$(CONFIG_CPU_SUBTYPE_SH7750) += setup-sh7750.o |
| 14 | obj-$(CONFIG_CPU_SUBTYPE_SH7750R) += setup-sh7750.o | 19 | obj-$(CONFIG_CPU_SUBTYPE_SH7750R) += setup-sh7750.o |
| @@ -27,4 +32,5 @@ endif | |||
| 27 | # Additional clocks by subtype | 32 | # Additional clocks by subtype |
| 28 | clock-$(CONFIG_CPU_SUBTYPE_SH4_202) += clock-sh4-202.o | 33 | clock-$(CONFIG_CPU_SUBTYPE_SH4_202) += clock-sh4-202.o |
| 29 | 34 | ||
| 30 | obj-y += $(clock-y) | 35 | obj-y += $(clock-y) |
| 36 | obj-$(CONFIG_PERF_EVENTS) += $(perf-y) | ||
diff --git a/arch/sh/kernel/cpu/sh4/fpu.c b/arch/sh/kernel/cpu/sh4/fpu.c index e3ea5411da6d..e97857aec8a0 100644 --- a/arch/sh/kernel/cpu/sh4/fpu.c +++ b/arch/sh/kernel/cpu/sh4/fpu.c | |||
| @@ -41,13 +41,11 @@ static unsigned int fpu_exception_flags; | |||
| 41 | 41 | ||
| 42 | /* | 42 | /* |
| 43 | * Save FPU registers onto task structure. | 43 | * Save FPU registers onto task structure. |
| 44 | * Assume called with FPU enabled (SR.FD=0). | ||
| 45 | */ | 44 | */ |
| 46 | void save_fpu(struct task_struct *tsk, struct pt_regs *regs) | 45 | void save_fpu(struct task_struct *tsk) |
| 47 | { | 46 | { |
| 48 | unsigned long dummy; | 47 | unsigned long dummy; |
| 49 | 48 | ||
| 50 | clear_tsk_thread_flag(tsk, TIF_USEDFPU); | ||
| 51 | enable_fpu(); | 49 | enable_fpu(); |
| 52 | asm volatile ("sts.l fpul, @-%0\n\t" | 50 | asm volatile ("sts.l fpul, @-%0\n\t" |
| 53 | "sts.l fpscr, @-%0\n\t" | 51 | "sts.l fpscr, @-%0\n\t" |
| @@ -92,7 +90,6 @@ void save_fpu(struct task_struct *tsk, struct pt_regs *regs) | |||
| 92 | :"memory"); | 90 | :"memory"); |
| 93 | 91 | ||
| 94 | disable_fpu(); | 92 | disable_fpu(); |
| 95 | release_fpu(regs); | ||
| 96 | } | 93 | } |
| 97 | 94 | ||
| 98 | static void restore_fpu(struct task_struct *tsk) | 95 | static void restore_fpu(struct task_struct *tsk) |
| @@ -285,7 +282,6 @@ static int ieee_fpe_handler(struct pt_regs *regs) | |||
| 285 | /* fcnvsd */ | 282 | /* fcnvsd */ |
| 286 | struct task_struct *tsk = current; | 283 | struct task_struct *tsk = current; |
| 287 | 284 | ||
| 288 | save_fpu(tsk, regs); | ||
| 289 | if ((tsk->thread.fpu.hard.fpscr & FPSCR_CAUSE_ERROR)) | 285 | if ((tsk->thread.fpu.hard.fpscr & FPSCR_CAUSE_ERROR)) |
| 290 | /* FPU error */ | 286 | /* FPU error */ |
| 291 | denormal_to_double(&tsk->thread.fpu.hard, | 287 | denormal_to_double(&tsk->thread.fpu.hard, |
| @@ -462,7 +458,7 @@ BUILD_TRAP_HANDLER(fpu_error) | |||
| 462 | struct task_struct *tsk = current; | 458 | struct task_struct *tsk = current; |
| 463 | TRAP_HANDLER_DECL; | 459 | TRAP_HANDLER_DECL; |
| 464 | 460 | ||
| 465 | save_fpu(tsk, regs); | 461 | __unlazy_fpu(tsk, regs); |
| 466 | fpu_exception_flags = 0; | 462 | fpu_exception_flags = 0; |
| 467 | if (ieee_fpe_handler(regs)) { | 463 | if (ieee_fpe_handler(regs)) { |
| 468 | tsk->thread.fpu.hard.fpscr &= | 464 | tsk->thread.fpu.hard.fpscr &= |
| @@ -473,7 +469,7 @@ BUILD_TRAP_HANDLER(fpu_error) | |||
| 473 | tsk->thread.fpu.hard.fpscr |= (fpu_exception_flags >> 10); | 469 | tsk->thread.fpu.hard.fpscr |= (fpu_exception_flags >> 10); |
| 474 | grab_fpu(regs); | 470 | grab_fpu(regs); |
| 475 | restore_fpu(tsk); | 471 | restore_fpu(tsk); |
| 476 | set_tsk_thread_flag(tsk, TIF_USEDFPU); | 472 | task_thread_info(tsk)->status |= TS_USEDFPU; |
| 477 | if ((((tsk->thread.fpu.hard.fpscr & FPSCR_ENABLE_MASK) >> 7) & | 473 | if ((((tsk->thread.fpu.hard.fpscr & FPSCR_ENABLE_MASK) >> 7) & |
| 478 | (fpu_exception_flags >> 2)) == 0) { | 474 | (fpu_exception_flags >> 2)) == 0) { |
| 479 | return; | 475 | return; |
| @@ -483,18 +479,18 @@ BUILD_TRAP_HANDLER(fpu_error) | |||
| 483 | force_sig(SIGFPE, tsk); | 479 | force_sig(SIGFPE, tsk); |
| 484 | } | 480 | } |
| 485 | 481 | ||
| 486 | BUILD_TRAP_HANDLER(fpu_state_restore) | 482 | void fpu_state_restore(struct pt_regs *regs) |
| 487 | { | 483 | { |
| 488 | struct task_struct *tsk = current; | 484 | struct task_struct *tsk = current; |
| 489 | TRAP_HANDLER_DECL; | ||
| 490 | 485 | ||
| 491 | grab_fpu(regs); | 486 | grab_fpu(regs); |
| 492 | if (!user_mode(regs)) { | 487 | if (unlikely(!user_mode(regs))) { |
| 493 | printk(KERN_ERR "BUG: FPU is used in kernel mode.\n"); | 488 | printk(KERN_ERR "BUG: FPU is used in kernel mode.\n"); |
| 489 | BUG(); | ||
| 494 | return; | 490 | return; |
| 495 | } | 491 | } |
| 496 | 492 | ||
| 497 | if (used_math()) { | 493 | if (likely(used_math())) { |
| 498 | /* Using the FPU again. */ | 494 | /* Using the FPU again. */ |
| 499 | restore_fpu(tsk); | 495 | restore_fpu(tsk); |
| 500 | } else { | 496 | } else { |
| @@ -502,5 +498,13 @@ BUILD_TRAP_HANDLER(fpu_state_restore) | |||
| 502 | fpu_init(); | 498 | fpu_init(); |
| 503 | set_used_math(); | 499 | set_used_math(); |
| 504 | } | 500 | } |
| 505 | set_tsk_thread_flag(tsk, TIF_USEDFPU); | 501 | task_thread_info(tsk)->status |= TS_USEDFPU; |
| 502 | tsk->fpu_counter++; | ||
| 503 | } | ||
| 504 | |||
| 505 | BUILD_TRAP_HANDLER(fpu_state_restore) | ||
| 506 | { | ||
| 507 | TRAP_HANDLER_DECL; | ||
| 508 | |||
| 509 | fpu_state_restore(regs); | ||
| 506 | } | 510 | } |
diff --git a/arch/sh/kernel/cpu/sh4/perf_event.c b/arch/sh/kernel/cpu/sh4/perf_event.c new file mode 100644 index 000000000000..7f9ecc9c2d02 --- /dev/null +++ b/arch/sh/kernel/cpu/sh4/perf_event.c | |||
| @@ -0,0 +1,253 @@ | |||
| 1 | /* | ||
| 2 | * Performance events support for SH7750-style performance counters | ||
| 3 | * | ||
| 4 | * Copyright (C) 2009 Paul Mundt | ||
| 5 | * | ||
| 6 | * This file is subject to the terms and conditions of the GNU General Public | ||
| 7 | * License. See the file "COPYING" in the main directory of this archive | ||
| 8 | * for more details. | ||
| 9 | */ | ||
| 10 | #include <linux/kernel.h> | ||
| 11 | #include <linux/init.h> | ||
| 12 | #include <linux/io.h> | ||
| 13 | #include <linux/irq.h> | ||
| 14 | #include <linux/perf_event.h> | ||
| 15 | #include <asm/processor.h> | ||
| 16 | |||
| 17 | #define PM_CR_BASE 0xff000084 /* 16-bit */ | ||
| 18 | #define PM_CTR_BASE 0xff100004 /* 32-bit */ | ||
| 19 | |||
| 20 | #define PMCR(n) (PM_CR_BASE + ((n) * 0x04)) | ||
| 21 | #define PMCTRH(n) (PM_CTR_BASE + 0x00 + ((n) * 0x08)) | ||
| 22 | #define PMCTRL(n) (PM_CTR_BASE + 0x04 + ((n) * 0x08)) | ||
| 23 | |||
| 24 | #define PMCR_PMM_MASK 0x0000003f | ||
| 25 | |||
| 26 | #define PMCR_CLKF 0x00000100 | ||
| 27 | #define PMCR_PMCLR 0x00002000 | ||
| 28 | #define PMCR_PMST 0x00004000 | ||
| 29 | #define PMCR_PMEN 0x00008000 | ||
| 30 | |||
| 31 | static struct sh_pmu sh7750_pmu; | ||
| 32 | |||
| 33 | /* | ||
| 34 | * There are a number of events supported by each counter (33 in total). | ||
| 35 | * Since we have 2 counters, each counter will take the event code as it | ||
| 36 | * corresponds to the PMCR PMM setting. Each counter can be configured | ||
| 37 | * independently. | ||
| 38 | * | ||
| 39 | * Event Code Description | ||
| 40 | * ---------- ----------- | ||
| 41 | * | ||
| 42 | * 0x01 Operand read access | ||
| 43 | * 0x02 Operand write access | ||
| 44 | * 0x03 UTLB miss | ||
| 45 | * 0x04 Operand cache read miss | ||
| 46 | * 0x05 Operand cache write miss | ||
| 47 | * 0x06 Instruction fetch (w/ cache) | ||
| 48 | * 0x07 Instruction TLB miss | ||
| 49 | * 0x08 Instruction cache miss | ||
| 50 | * 0x09 All operand accesses | ||
| 51 | * 0x0a All instruction accesses | ||
| 52 | * 0x0b OC RAM operand access | ||
| 53 | * 0x0d On-chip I/O space access | ||
| 54 | * 0x0e Operand access (r/w) | ||
| 55 | * 0x0f Operand cache miss (r/w) | ||
| 56 | * 0x10 Branch instruction | ||
| 57 | * 0x11 Branch taken | ||
| 58 | * 0x12 BSR/BSRF/JSR | ||
| 59 | * 0x13 Instruction execution | ||
| 60 | * 0x14 Instruction execution in parallel | ||
| 61 | * 0x15 FPU Instruction execution | ||
| 62 | * 0x16 Interrupt | ||
| 63 | * 0x17 NMI | ||
| 64 | * 0x18 trapa instruction execution | ||
| 65 | * 0x19 UBCA match | ||
| 66 | * 0x1a UBCB match | ||
| 67 | * 0x21 Instruction cache fill | ||
| 68 | * 0x22 Operand cache fill | ||
| 69 | * 0x23 Elapsed time | ||
| 70 | * 0x24 Pipeline freeze by I-cache miss | ||
| 71 | * 0x25 Pipeline freeze by D-cache miss | ||
| 72 | * 0x27 Pipeline freeze by branch instruction | ||
| 73 | * 0x28 Pipeline freeze by CPU register | ||
| 74 | * 0x29 Pipeline freeze by FPU | ||
| 75 | */ | ||
| 76 | |||
| 77 | static const int sh7750_general_events[] = { | ||
| 78 | [PERF_COUNT_HW_CPU_CYCLES] = 0x0023, | ||
| 79 | [PERF_COUNT_HW_INSTRUCTIONS] = 0x000a, | ||
| 80 | [PERF_COUNT_HW_CACHE_REFERENCES] = 0x0006, /* I-cache */ | ||
| 81 | [PERF_COUNT_HW_CACHE_MISSES] = 0x0008, /* I-cache */ | ||
| 82 | [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = 0x0010, | ||
| 83 | [PERF_COUNT_HW_BRANCH_MISSES] = -1, | ||
| 84 | [PERF_COUNT_HW_BUS_CYCLES] = -1, | ||
| 85 | }; | ||
| 86 | |||
| 87 | #define C(x) PERF_COUNT_HW_CACHE_##x | ||
| 88 | |||
| 89 | static const int sh7750_cache_events | ||
| 90 | [PERF_COUNT_HW_CACHE_MAX] | ||
| 91 | [PERF_COUNT_HW_CACHE_OP_MAX] | ||
| 92 | [PERF_COUNT_HW_CACHE_RESULT_MAX] = | ||
| 93 | { | ||
| 94 | [ C(L1D) ] = { | ||
| 95 | [ C(OP_READ) ] = { | ||
| 96 | [ C(RESULT_ACCESS) ] = 0x0001, | ||
| 97 | [ C(RESULT_MISS) ] = 0x0004, | ||
| 98 | }, | ||
| 99 | [ C(OP_WRITE) ] = { | ||
| 100 | [ C(RESULT_ACCESS) ] = 0x0002, | ||
| 101 | [ C(RESULT_MISS) ] = 0x0005, | ||
| 102 | }, | ||
| 103 | [ C(OP_PREFETCH) ] = { | ||
| 104 | [ C(RESULT_ACCESS) ] = 0, | ||
| 105 | [ C(RESULT_MISS) ] = 0, | ||
| 106 | }, | ||
| 107 | }, | ||
| 108 | |||
| 109 | [ C(L1I) ] = { | ||
| 110 | [ C(OP_READ) ] = { | ||
| 111 | [ C(RESULT_ACCESS) ] = 0x0006, | ||
| 112 | [ C(RESULT_MISS) ] = 0x0008, | ||
| 113 | }, | ||
| 114 | [ C(OP_WRITE) ] = { | ||
| 115 | [ C(RESULT_ACCESS) ] = -1, | ||
| 116 | [ C(RESULT_MISS) ] = -1, | ||
| 117 | }, | ||
| 118 | [ C(OP_PREFETCH) ] = { | ||
| 119 | [ C(RESULT_ACCESS) ] = 0, | ||
| 120 | [ C(RESULT_MISS) ] = 0, | ||
| 121 | }, | ||
| 122 | }, | ||
| 123 | |||
| 124 | [ C(LL) ] = { | ||
| 125 | [ C(OP_READ) ] = { | ||
| 126 | [ C(RESULT_ACCESS) ] = 0, | ||
| 127 | [ C(RESULT_MISS) ] = 0, | ||
| 128 | }, | ||
| 129 | [ C(OP_WRITE) ] = { | ||
| 130 | [ C(RESULT_ACCESS) ] = 0, | ||
| 131 | [ C(RESULT_MISS) ] = 0, | ||
| 132 | }, | ||
| 133 | [ C(OP_PREFETCH) ] = { | ||
| 134 | [ C(RESULT_ACCESS) ] = 0, | ||
| 135 | [ C(RESULT_MISS) ] = 0, | ||
| 136 | }, | ||
| 137 | }, | ||
| 138 | |||
| 139 | [ C(DTLB) ] = { | ||
| 140 | [ C(OP_READ) ] = { | ||
| 141 | [ C(RESULT_ACCESS) ] = 0, | ||
| 142 | [ C(RESULT_MISS) ] = 0x0003, | ||
| 143 | }, | ||
| 144 | [ C(OP_WRITE) ] = { | ||
| 145 | [ C(RESULT_ACCESS) ] = 0, | ||
| 146 | [ C(RESULT_MISS) ] = 0, | ||
| 147 | }, | ||
| 148 | [ C(OP_PREFETCH) ] = { | ||
| 149 | [ C(RESULT_ACCESS) ] = 0, | ||
| 150 | [ C(RESULT_MISS) ] = 0, | ||
| 151 | }, | ||
| 152 | }, | ||
| 153 | |||
| 154 | [ C(ITLB) ] = { | ||
| 155 | [ C(OP_READ) ] = { | ||
| 156 | [ C(RESULT_ACCESS) ] = 0, | ||
| 157 | [ C(RESULT_MISS) ] = 0x0007, | ||
| 158 | }, | ||
| 159 | [ C(OP_WRITE) ] = { | ||
| 160 | [ C(RESULT_ACCESS) ] = -1, | ||
| 161 | [ C(RESULT_MISS) ] = -1, | ||
| 162 | }, | ||
| 163 | [ C(OP_PREFETCH) ] = { | ||
| 164 | [ C(RESULT_ACCESS) ] = -1, | ||
| 165 | [ C(RESULT_MISS) ] = -1, | ||
| 166 | }, | ||
| 167 | }, | ||
| 168 | |||
| 169 | [ C(BPU) ] = { | ||
| 170 | [ C(OP_READ) ] = { | ||
| 171 | [ C(RESULT_ACCESS) ] = -1, | ||
| 172 | [ C(RESULT_MISS) ] = -1, | ||
| 173 | }, | ||
| 174 | [ C(OP_WRITE) ] = { | ||
| 175 | [ C(RESULT_ACCESS) ] = -1, | ||
| 176 | [ C(RESULT_MISS) ] = -1, | ||
| 177 | }, | ||
| 178 | [ C(OP_PREFETCH) ] = { | ||
| 179 | [ C(RESULT_ACCESS) ] = -1, | ||
| 180 | [ C(RESULT_MISS) ] = -1, | ||
| 181 | }, | ||
| 182 | }, | ||
| 183 | }; | ||
| 184 | |||
| 185 | static int sh7750_event_map(int event) | ||
| 186 | { | ||
| 187 | return sh7750_general_events[event]; | ||
| 188 | } | ||
| 189 | |||
| 190 | static u64 sh7750_pmu_read(int idx) | ||
| 191 | { | ||
| 192 | return (u64)((u64)(__raw_readl(PMCTRH(idx)) & 0xffff) << 32) | | ||
| 193 | __raw_readl(PMCTRL(idx)); | ||
| 194 | } | ||
| 195 | |||
| 196 | static void sh7750_pmu_disable(struct hw_perf_event *hwc, int idx) | ||
| 197 | { | ||
| 198 | unsigned int tmp; | ||
| 199 | |||
| 200 | tmp = __raw_readw(PMCR(idx)); | ||
| 201 | tmp &= ~(PMCR_PMM_MASK | PMCR_PMEN); | ||
| 202 | __raw_writew(tmp, PMCR(idx)); | ||
| 203 | } | ||
| 204 | |||
| 205 | static void sh7750_pmu_enable(struct hw_perf_event *hwc, int idx) | ||
| 206 | { | ||
| 207 | __raw_writew(__raw_readw(PMCR(idx)) | PMCR_PMCLR, PMCR(idx)); | ||
| 208 | __raw_writew(hwc->config | PMCR_PMEN | PMCR_PMST, PMCR(idx)); | ||
| 209 | } | ||
| 210 | |||
| 211 | static void sh7750_pmu_disable_all(void) | ||
| 212 | { | ||
| 213 | int i; | ||
| 214 | |||
| 215 | for (i = 0; i < sh7750_pmu.num_events; i++) | ||
| 216 | __raw_writew(__raw_readw(PMCR(i)) & ~PMCR_PMEN, PMCR(i)); | ||
| 217 | } | ||
| 218 | |||
| 219 | static void sh7750_pmu_enable_all(void) | ||
| 220 | { | ||
| 221 | int i; | ||
| 222 | |||
| 223 | for (i = 0; i < sh7750_pmu.num_events; i++) | ||
| 224 | __raw_writew(__raw_readw(PMCR(i)) | PMCR_PMEN, PMCR(i)); | ||
| 225 | } | ||
| 226 | |||
| 227 | static struct sh_pmu sh7750_pmu = { | ||
| 228 | .name = "SH7750", | ||
| 229 | .num_events = 2, | ||
| 230 | .event_map = sh7750_event_map, | ||
| 231 | .max_events = ARRAY_SIZE(sh7750_general_events), | ||
| 232 | .raw_event_mask = PMCR_PMM_MASK, | ||
| 233 | .cache_events = &sh7750_cache_events, | ||
| 234 | .read = sh7750_pmu_read, | ||
| 235 | .disable = sh7750_pmu_disable, | ||
| 236 | .enable = sh7750_pmu_enable, | ||
| 237 | .disable_all = sh7750_pmu_disable_all, | ||
| 238 | .enable_all = sh7750_pmu_enable_all, | ||
| 239 | }; | ||
| 240 | |||
| 241 | static int __init sh7750_pmu_init(void) | ||
| 242 | { | ||
| 243 | /* | ||
| 244 | * Make sure this CPU actually has perf counters. | ||
| 245 | */ | ||
| 246 | if (!(boot_cpu_data.flags & CPU_HAS_PERF_COUNTER)) { | ||
| 247 | pr_notice("HW perf events unsupported, software events only.\n"); | ||
| 248 | return -ENODEV; | ||
| 249 | } | ||
| 250 | |||
| 251 | return register_sh_pmu(&sh7750_pmu); | ||
| 252 | } | ||
| 253 | arch_initcall(sh7750_pmu_init); | ||
diff --git a/arch/sh/kernel/cpu/sh4a/Makefile b/arch/sh/kernel/cpu/sh4a/Makefile index 490d5dc9e372..33bab477d2e2 100644 --- a/arch/sh/kernel/cpu/sh4a/Makefile +++ b/arch/sh/kernel/cpu/sh4a/Makefile | |||
| @@ -44,3 +44,4 @@ pinmux-$(CONFIG_CPU_SUBTYPE_SH7786) := pinmux-sh7786.o | |||
| 44 | obj-y += $(clock-y) | 44 | obj-y += $(clock-y) |
| 45 | obj-$(CONFIG_SMP) += $(smp-y) | 45 | obj-$(CONFIG_SMP) += $(smp-y) |
| 46 | obj-$(CONFIG_GENERIC_GPIO) += $(pinmux-y) | 46 | obj-$(CONFIG_GENERIC_GPIO) += $(pinmux-y) |
| 47 | obj-$(CONFIG_PERF_EVENTS) += perf_event.o | ||
diff --git a/arch/sh/kernel/cpu/sh4a/clock-sh7724.c b/arch/sh/kernel/cpu/sh4a/clock-sh7724.c index dfe9192be63e..9db743802f06 100644 --- a/arch/sh/kernel/cpu/sh4a/clock-sh7724.c +++ b/arch/sh/kernel/cpu/sh4a/clock-sh7724.c | |||
| @@ -152,7 +152,7 @@ struct clk div6_clks[] = { | |||
| 152 | SH_CLK_DIV6("fsia_clk", &div3_clk, FCLKACR, 0), | 152 | SH_CLK_DIV6("fsia_clk", &div3_clk, FCLKACR, 0), |
| 153 | SH_CLK_DIV6("fsib_clk", &div3_clk, FCLKBCR, 0), | 153 | SH_CLK_DIV6("fsib_clk", &div3_clk, FCLKBCR, 0), |
| 154 | SH_CLK_DIV6("irda_clk", &div3_clk, IRDACLKCR, 0), | 154 | SH_CLK_DIV6("irda_clk", &div3_clk, IRDACLKCR, 0), |
| 155 | SH_CLK_DIV6("spu_clk", &div3_clk, SPUCLKCR, 0), | 155 | SH_CLK_DIV6("spu_clk", &div3_clk, SPUCLKCR, CLK_ENABLE_ON_INIT), |
| 156 | }; | 156 | }; |
| 157 | 157 | ||
| 158 | #define R_CLK (&r_clk) | 158 | #define R_CLK (&r_clk) |
diff --git a/arch/sh/kernel/cpu/sh4a/perf_event.c b/arch/sh/kernel/cpu/sh4a/perf_event.c new file mode 100644 index 000000000000..eddc21973fa1 --- /dev/null +++ b/arch/sh/kernel/cpu/sh4a/perf_event.c | |||
| @@ -0,0 +1,269 @@ | |||
| 1 | /* | ||
| 2 | * Performance events support for SH-4A performance counters | ||
| 3 | * | ||
| 4 | * Copyright (C) 2009 Paul Mundt | ||
| 5 | * | ||
| 6 | * This file is subject to the terms and conditions of the GNU General Public | ||
| 7 | * License. See the file "COPYING" in the main directory of this archive | ||
| 8 | * for more details. | ||
| 9 | */ | ||
| 10 | #include <linux/kernel.h> | ||
| 11 | #include <linux/init.h> | ||
| 12 | #include <linux/io.h> | ||
| 13 | #include <linux/irq.h> | ||
| 14 | #include <linux/perf_event.h> | ||
| 15 | #include <asm/processor.h> | ||
| 16 | |||
| 17 | #define PPC_CCBR(idx) (0xff200800 + (sizeof(u32) * idx)) | ||
| 18 | #define PPC_PMCTR(idx) (0xfc100000 + (sizeof(u32) * idx)) | ||
| 19 | |||
| 20 | #define CCBR_CIT_MASK (0x7ff << 6) | ||
| 21 | #define CCBR_DUC (1 << 3) | ||
| 22 | #define CCBR_CMDS (1 << 1) | ||
| 23 | #define CCBR_PPCE (1 << 0) | ||
| 24 | |||
| 25 | #define PPC_PMCAT 0xfc100080 | ||
| 26 | |||
| 27 | #define PMCAT_OVF3 (1 << 27) | ||
| 28 | #define PMCAT_CNN3 (1 << 26) | ||
| 29 | #define PMCAT_CLR3 (1 << 25) | ||
| 30 | #define PMCAT_OVF2 (1 << 19) | ||
| 31 | #define PMCAT_CLR2 (1 << 17) | ||
| 32 | #define PMCAT_OVF1 (1 << 11) | ||
| 33 | #define PMCAT_CNN1 (1 << 10) | ||
| 34 | #define PMCAT_CLR1 (1 << 9) | ||
| 35 | #define PMCAT_OVF0 (1 << 3) | ||
| 36 | #define PMCAT_CLR0 (1 << 1) | ||
| 37 | |||
| 38 | static struct sh_pmu sh4a_pmu; | ||
| 39 | |||
| 40 | /* | ||
| 41 | * Supported raw event codes: | ||
| 42 | * | ||
| 43 | * Event Code Description | ||
| 44 | * ---------- ----------- | ||
| 45 | * | ||
| 46 | * 0x0000 number of elapsed cycles | ||
| 47 | * 0x0200 number of elapsed cycles in privileged mode | ||
| 48 | * 0x0280 number of elapsed cycles while SR.BL is asserted | ||
| 49 | * 0x0202 instruction execution | ||
| 50 | * 0x0203 instruction execution in parallel | ||
| 51 | * 0x0204 number of unconditional branches | ||
| 52 | * 0x0208 number of exceptions | ||
| 53 | * 0x0209 number of interrupts | ||
| 54 | * 0x0220 UTLB miss caused by instruction fetch | ||
| 55 | * 0x0222 UTLB miss caused by operand access | ||
| 56 | * 0x02a0 number of ITLB misses | ||
| 57 | * 0x0028 number of accesses to instruction memories | ||
| 58 | * 0x0029 number of accesses to instruction cache | ||
| 59 | * 0x002a instruction cache miss | ||
| 60 | * 0x022e number of access to instruction X/Y memory | ||
| 61 | * 0x0030 number of reads to operand memories | ||
| 62 | * 0x0038 number of writes to operand memories | ||
| 63 | * 0x0031 number of operand cache read accesses | ||
| 64 | * 0x0039 number of operand cache write accesses | ||
| 65 | * 0x0032 operand cache read miss | ||
| 66 | * 0x003a operand cache write miss | ||
| 67 | * 0x0236 number of reads to operand X/Y memory | ||
| 68 | * 0x023e number of writes to operand X/Y memory | ||
| 69 | * 0x0237 number of reads to operand U memory | ||
| 70 | * 0x023f number of writes to operand U memory | ||
| 71 | * 0x0337 number of U memory read buffer misses | ||
| 72 | * 0x02b4 number of wait cycles due to operand read access | ||
| 73 | * 0x02bc number of wait cycles due to operand write access | ||
| 74 | * 0x0033 number of wait cycles due to operand cache read miss | ||
| 75 | * 0x003b number of wait cycles due to operand cache write miss | ||
| 76 | */ | ||
| 77 | |||
| 78 | /* | ||
| 79 | * Special reserved bits used by hardware emulators, read values will | ||
| 80 | * vary, but writes must always be 0. | ||
| 81 | */ | ||
| 82 | #define PMCAT_EMU_CLR_MASK ((1 << 24) | (1 << 16) | (1 << 8) | (1 << 0)) | ||
| 83 | |||
| 84 | static const int sh4a_general_events[] = { | ||
| 85 | [PERF_COUNT_HW_CPU_CYCLES] = 0x0000, | ||
| 86 | [PERF_COUNT_HW_INSTRUCTIONS] = 0x0202, | ||
| 87 | [PERF_COUNT_HW_CACHE_REFERENCES] = 0x0029, /* I-cache */ | ||
| 88 | [PERF_COUNT_HW_CACHE_MISSES] = 0x002a, /* I-cache */ | ||
| 89 | [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = 0x0204, | ||
| 90 | [PERF_COUNT_HW_BRANCH_MISSES] = -1, | ||
| 91 | [PERF_COUNT_HW_BUS_CYCLES] = -1, | ||
| 92 | }; | ||
| 93 | |||
| 94 | #define C(x) PERF_COUNT_HW_CACHE_##x | ||
| 95 | |||
| 96 | static const int sh4a_cache_events | ||
| 97 | [PERF_COUNT_HW_CACHE_MAX] | ||
| 98 | [PERF_COUNT_HW_CACHE_OP_MAX] | ||
| 99 | [PERF_COUNT_HW_CACHE_RESULT_MAX] = | ||
| 100 | { | ||
| 101 | [ C(L1D) ] = { | ||
| 102 | [ C(OP_READ) ] = { | ||
| 103 | [ C(RESULT_ACCESS) ] = 0x0031, | ||
| 104 | [ C(RESULT_MISS) ] = 0x0032, | ||
| 105 | }, | ||
| 106 | [ C(OP_WRITE) ] = { | ||
| 107 | [ C(RESULT_ACCESS) ] = 0x0039, | ||
| 108 | [ C(RESULT_MISS) ] = 0x003a, | ||
| 109 | }, | ||
| 110 | [ C(OP_PREFETCH) ] = { | ||
| 111 | [ C(RESULT_ACCESS) ] = 0, | ||
| 112 | [ C(RESULT_MISS) ] = 0, | ||
| 113 | }, | ||
| 114 | }, | ||
| 115 | |||
| 116 | [ C(L1I) ] = { | ||
| 117 | [ C(OP_READ) ] = { | ||
| 118 | [ C(RESULT_ACCESS) ] = 0x0029, | ||
| 119 | [ C(RESULT_MISS) ] = 0x002a, | ||
| 120 | }, | ||
| 121 | [ C(OP_WRITE) ] = { | ||
| 122 | [ C(RESULT_ACCESS) ] = -1, | ||
| 123 | [ C(RESULT_MISS) ] = -1, | ||
| 124 | }, | ||
| 125 | [ C(OP_PREFETCH) ] = { | ||
| 126 | [ C(RESULT_ACCESS) ] = 0, | ||
| 127 | [ C(RESULT_MISS) ] = 0, | ||
| 128 | }, | ||
| 129 | }, | ||
| 130 | |||
| 131 | [ C(LL) ] = { | ||
| 132 | [ C(OP_READ) ] = { | ||
| 133 | [ C(RESULT_ACCESS) ] = 0x0030, | ||
| 134 | [ C(RESULT_MISS) ] = 0, | ||
| 135 | }, | ||
| 136 | [ C(OP_WRITE) ] = { | ||
| 137 | [ C(RESULT_ACCESS) ] = 0x0038, | ||
| 138 | [ C(RESULT_MISS) ] = 0, | ||
| 139 | }, | ||
| 140 | [ C(OP_PREFETCH) ] = { | ||
| 141 | [ C(RESULT_ACCESS) ] = 0, | ||
| 142 | [ C(RESULT_MISS) ] = 0, | ||
| 143 | }, | ||
| 144 | }, | ||
| 145 | |||
| 146 | [ C(DTLB) ] = { | ||
| 147 | [ C(OP_READ) ] = { | ||
| 148 | [ C(RESULT_ACCESS) ] = 0x0222, | ||
| 149 | [ C(RESULT_MISS) ] = 0x0220, | ||
| 150 | }, | ||
| 151 | [ C(OP_WRITE) ] = { | ||
| 152 | [ C(RESULT_ACCESS) ] = 0, | ||
| 153 | [ C(RESULT_MISS) ] = 0, | ||
| 154 | }, | ||
| 155 | [ C(OP_PREFETCH) ] = { | ||
| 156 | [ C(RESULT_ACCESS) ] = 0, | ||
| 157 | [ C(RESULT_MISS) ] = 0, | ||
| 158 | }, | ||
| 159 | }, | ||
| 160 | |||
| 161 | [ C(ITLB) ] = { | ||
| 162 | [ C(OP_READ) ] = { | ||
| 163 | [ C(RESULT_ACCESS) ] = 0, | ||
| 164 | [ C(RESULT_MISS) ] = 0x02a0, | ||
| 165 | }, | ||
| 166 | [ C(OP_WRITE) ] = { | ||
| 167 | [ C(RESULT_ACCESS) ] = -1, | ||
| 168 | [ C(RESULT_MISS) ] = -1, | ||
| 169 | }, | ||
| 170 | [ C(OP_PREFETCH) ] = { | ||
| 171 | [ C(RESULT_ACCESS) ] = -1, | ||
| 172 | [ C(RESULT_MISS) ] = -1, | ||
| 173 | }, | ||
| 174 | }, | ||
| 175 | |||
| 176 | [ C(BPU) ] = { | ||
| 177 | [ C(OP_READ) ] = { | ||
| 178 | [ C(RESULT_ACCESS) ] = -1, | ||
| 179 | [ C(RESULT_MISS) ] = -1, | ||
| 180 | }, | ||
| 181 | [ C(OP_WRITE) ] = { | ||
| 182 | [ C(RESULT_ACCESS) ] = -1, | ||
| 183 | [ C(RESULT_MISS) ] = -1, | ||
| 184 | }, | ||
| 185 | [ C(OP_PREFETCH) ] = { | ||
| 186 | [ C(RESULT_ACCESS) ] = -1, | ||
| 187 | [ C(RESULT_MISS) ] = -1, | ||
| 188 | }, | ||
| 189 | }, | ||
| 190 | }; | ||
| 191 | |||
| 192 | static int sh4a_event_map(int event) | ||
| 193 | { | ||
| 194 | return sh4a_general_events[event]; | ||
| 195 | } | ||
| 196 | |||
| 197 | static u64 sh4a_pmu_read(int idx) | ||
| 198 | { | ||
| 199 | return __raw_readl(PPC_PMCTR(idx)); | ||
| 200 | } | ||
| 201 | |||
| 202 | static void sh4a_pmu_disable(struct hw_perf_event *hwc, int idx) | ||
| 203 | { | ||
| 204 | unsigned int tmp; | ||
| 205 | |||
| 206 | tmp = __raw_readl(PPC_CCBR(idx)); | ||
| 207 | tmp &= ~(CCBR_CIT_MASK | CCBR_DUC); | ||
| 208 | __raw_writel(tmp, PPC_CCBR(idx)); | ||
| 209 | } | ||
| 210 | |||
| 211 | static void sh4a_pmu_enable(struct hw_perf_event *hwc, int idx) | ||
| 212 | { | ||
| 213 | unsigned int tmp; | ||
| 214 | |||
| 215 | tmp = __raw_readl(PPC_PMCAT); | ||
| 216 | tmp &= ~PMCAT_EMU_CLR_MASK; | ||
| 217 | tmp |= idx ? PMCAT_CLR1 : PMCAT_CLR0; | ||
| 218 | __raw_writel(tmp, PPC_PMCAT); | ||
| 219 | |||
| 220 | tmp = __raw_readl(PPC_CCBR(idx)); | ||
| 221 | tmp |= (hwc->config << 6) | CCBR_CMDS | CCBR_PPCE; | ||
| 222 | __raw_writel(tmp, PPC_CCBR(idx)); | ||
| 223 | |||
| 224 | __raw_writel(__raw_readl(PPC_CCBR(idx)) | CCBR_DUC, PPC_CCBR(idx)); | ||
| 225 | } | ||
| 226 | |||
| 227 | static void sh4a_pmu_disable_all(void) | ||
| 228 | { | ||
| 229 | int i; | ||
| 230 | |||
| 231 | for (i = 0; i < sh4a_pmu.num_events; i++) | ||
| 232 | __raw_writel(__raw_readl(PPC_CCBR(i)) & ~CCBR_DUC, PPC_CCBR(i)); | ||
| 233 | } | ||
| 234 | |||
| 235 | static void sh4a_pmu_enable_all(void) | ||
| 236 | { | ||
| 237 | int i; | ||
| 238 | |||
| 239 | for (i = 0; i < sh4a_pmu.num_events; i++) | ||
| 240 | __raw_writel(__raw_readl(PPC_CCBR(i)) | CCBR_DUC, PPC_CCBR(i)); | ||
| 241 | } | ||
| 242 | |||
| 243 | static struct sh_pmu sh4a_pmu = { | ||
| 244 | .name = "SH-4A", | ||
| 245 | .num_events = 2, | ||
| 246 | .event_map = sh4a_event_map, | ||
| 247 | .max_events = ARRAY_SIZE(sh4a_general_events), | ||
| 248 | .raw_event_mask = 0x3ff, | ||
| 249 | .cache_events = &sh4a_cache_events, | ||
| 250 | .read = sh4a_pmu_read, | ||
| 251 | .disable = sh4a_pmu_disable, | ||
| 252 | .enable = sh4a_pmu_enable, | ||
| 253 | .disable_all = sh4a_pmu_disable_all, | ||
| 254 | .enable_all = sh4a_pmu_enable_all, | ||
| 255 | }; | ||
| 256 | |||
| 257 | static int __init sh4a_pmu_init(void) | ||
| 258 | { | ||
| 259 | /* | ||
| 260 | * Make sure this CPU actually has perf counters. | ||
| 261 | */ | ||
| 262 | if (!(boot_cpu_data.flags & CPU_HAS_PERF_COUNTER)) { | ||
| 263 | pr_notice("HW perf events unsupported, software events only.\n"); | ||
| 264 | return -ENODEV; | ||
| 265 | } | ||
| 266 | |||
| 267 | return register_sh_pmu(&sh4a_pmu); | ||
| 268 | } | ||
| 269 | arch_initcall(sh4a_pmu_init); | ||
diff --git a/arch/sh/kernel/cpu/sh4a/setup-sh7724.c b/arch/sh/kernel/cpu/sh4a/setup-sh7724.c index f3851fd757ec..845e89c936e7 100644 --- a/arch/sh/kernel/cpu/sh4a/setup-sh7724.c +++ b/arch/sh/kernel/cpu/sh4a/setup-sh7724.c | |||
| @@ -20,6 +20,8 @@ | |||
| 20 | #include <linux/uio_driver.h> | 20 | #include <linux/uio_driver.h> |
| 21 | #include <linux/sh_timer.h> | 21 | #include <linux/sh_timer.h> |
| 22 | #include <linux/io.h> | 22 | #include <linux/io.h> |
| 23 | #include <linux/notifier.h> | ||
| 24 | #include <asm/suspend.h> | ||
| 23 | #include <asm/clock.h> | 25 | #include <asm/clock.h> |
| 24 | #include <asm/mmzone.h> | 26 | #include <asm/mmzone.h> |
| 25 | #include <cpu/sh7724.h> | 27 | #include <cpu/sh7724.h> |
| @@ -202,7 +204,7 @@ static struct resource veu0_resources[] = { | |||
| 202 | [0] = { | 204 | [0] = { |
| 203 | .name = "VEU3F0", | 205 | .name = "VEU3F0", |
| 204 | .start = 0xfe920000, | 206 | .start = 0xfe920000, |
| 205 | .end = 0xfe9200cb - 1, | 207 | .end = 0xfe9200cb, |
| 206 | .flags = IORESOURCE_MEM, | 208 | .flags = IORESOURCE_MEM, |
| 207 | }, | 209 | }, |
| 208 | [1] = { | 210 | [1] = { |
| @@ -234,7 +236,7 @@ static struct resource veu1_resources[] = { | |||
| 234 | [0] = { | 236 | [0] = { |
| 235 | .name = "VEU3F1", | 237 | .name = "VEU3F1", |
| 236 | .start = 0xfe924000, | 238 | .start = 0xfe924000, |
| 237 | .end = 0xfe9240cb - 1, | 239 | .end = 0xfe9240cb, |
| 238 | .flags = IORESOURCE_MEM, | 240 | .flags = IORESOURCE_MEM, |
| 239 | }, | 241 | }, |
| 240 | [1] = { | 242 | [1] = { |
| @@ -523,6 +525,70 @@ static struct platform_device jpu_device = { | |||
| 523 | }, | 525 | }, |
| 524 | }; | 526 | }; |
| 525 | 527 | ||
| 528 | /* SPU2DSP0 */ | ||
| 529 | static struct uio_info spu0_platform_data = { | ||
| 530 | .name = "SPU2DSP0", | ||
| 531 | .version = "0", | ||
| 532 | .irq = 86, | ||
| 533 | }; | ||
| 534 | |||
| 535 | static struct resource spu0_resources[] = { | ||
| 536 | [0] = { | ||
| 537 | .name = "SPU2DSP0", | ||
| 538 | .start = 0xFE200000, | ||
| 539 | .end = 0xFE2FFFFF, | ||
| 540 | .flags = IORESOURCE_MEM, | ||
| 541 | }, | ||
| 542 | [1] = { | ||
| 543 | /* place holder for contiguous memory */ | ||
| 544 | }, | ||
| 545 | }; | ||
| 546 | |||
| 547 | static struct platform_device spu0_device = { | ||
| 548 | .name = "uio_pdrv_genirq", | ||
| 549 | .id = 4, | ||
| 550 | .dev = { | ||
| 551 | .platform_data = &spu0_platform_data, | ||
| 552 | }, | ||
| 553 | .resource = spu0_resources, | ||
| 554 | .num_resources = ARRAY_SIZE(spu0_resources), | ||
| 555 | .archdata = { | ||
| 556 | .hwblk_id = HWBLK_SPU, | ||
| 557 | }, | ||
| 558 | }; | ||
| 559 | |||
| 560 | /* SPU2DSP1 */ | ||
| 561 | static struct uio_info spu1_platform_data = { | ||
| 562 | .name = "SPU2DSP1", | ||
| 563 | .version = "0", | ||
| 564 | .irq = 87, | ||
| 565 | }; | ||
| 566 | |||
| 567 | static struct resource spu1_resources[] = { | ||
| 568 | [0] = { | ||
| 569 | .name = "SPU2DSP1", | ||
| 570 | .start = 0xFE300000, | ||
| 571 | .end = 0xFE3FFFFF, | ||
| 572 | .flags = IORESOURCE_MEM, | ||
| 573 | }, | ||
| 574 | [1] = { | ||
| 575 | /* place holder for contiguous memory */ | ||
| 576 | }, | ||
| 577 | }; | ||
| 578 | |||
| 579 | static struct platform_device spu1_device = { | ||
| 580 | .name = "uio_pdrv_genirq", | ||
| 581 | .id = 5, | ||
| 582 | .dev = { | ||
| 583 | .platform_data = &spu1_platform_data, | ||
| 584 | }, | ||
| 585 | .resource = spu1_resources, | ||
| 586 | .num_resources = ARRAY_SIZE(spu1_resources), | ||
| 587 | .archdata = { | ||
| 588 | .hwblk_id = HWBLK_SPU, | ||
| 589 | }, | ||
| 590 | }; | ||
| 591 | |||
| 526 | static struct platform_device *sh7724_devices[] __initdata = { | 592 | static struct platform_device *sh7724_devices[] __initdata = { |
| 527 | &cmt_device, | 593 | &cmt_device, |
| 528 | &tmu0_device, | 594 | &tmu0_device, |
| @@ -539,6 +605,8 @@ static struct platform_device *sh7724_devices[] __initdata = { | |||
| 539 | &veu0_device, | 605 | &veu0_device, |
| 540 | &veu1_device, | 606 | &veu1_device, |
| 541 | &jpu_device, | 607 | &jpu_device, |
| 608 | &spu0_device, | ||
| 609 | &spu1_device, | ||
| 542 | }; | 610 | }; |
| 543 | 611 | ||
| 544 | static int __init sh7724_devices_setup(void) | 612 | static int __init sh7724_devices_setup(void) |
| @@ -547,6 +615,8 @@ static int __init sh7724_devices_setup(void) | |||
| 547 | platform_resource_setup_memory(&veu0_device, "veu0", 2 << 20); | 615 | platform_resource_setup_memory(&veu0_device, "veu0", 2 << 20); |
| 548 | platform_resource_setup_memory(&veu1_device, "veu1", 2 << 20); | 616 | platform_resource_setup_memory(&veu1_device, "veu1", 2 << 20); |
| 549 | platform_resource_setup_memory(&jpu_device, "jpu", 2 << 20); | 617 | platform_resource_setup_memory(&jpu_device, "jpu", 2 << 20); |
| 618 | platform_resource_setup_memory(&spu0_device, "spu0", 2 << 20); | ||
| 619 | platform_resource_setup_memory(&spu1_device, "spu1", 2 << 20); | ||
| 550 | 620 | ||
| 551 | return platform_add_devices(sh7724_devices, | 621 | return platform_add_devices(sh7724_devices, |
| 552 | ARRAY_SIZE(sh7724_devices)); | 622 | ARRAY_SIZE(sh7724_devices)); |
| @@ -827,3 +897,193 @@ void __init plat_irq_setup(void) | |||
| 827 | { | 897 | { |
| 828 | register_intc_controller(&intc_desc); | 898 | register_intc_controller(&intc_desc); |
| 829 | } | 899 | } |
| 900 | |||
| 901 | static struct { | ||
| 902 | /* BSC */ | ||
| 903 | unsigned long mmselr; | ||
| 904 | unsigned long cs0bcr; | ||
| 905 | unsigned long cs4bcr; | ||
| 906 | unsigned long cs5abcr; | ||
| 907 | unsigned long cs5bbcr; | ||
| 908 | unsigned long cs6abcr; | ||
| 909 | unsigned long cs6bbcr; | ||
| 910 | unsigned long cs4wcr; | ||
| 911 | unsigned long cs5awcr; | ||
| 912 | unsigned long cs5bwcr; | ||
| 913 | unsigned long cs6awcr; | ||
| 914 | unsigned long cs6bwcr; | ||
| 915 | /* INTC */ | ||
| 916 | unsigned short ipra; | ||
| 917 | unsigned short iprb; | ||
| 918 | unsigned short iprc; | ||
| 919 | unsigned short iprd; | ||
| 920 | unsigned short ipre; | ||
| 921 | unsigned short iprf; | ||
| 922 | unsigned short iprg; | ||
| 923 | unsigned short iprh; | ||
| 924 | unsigned short ipri; | ||
| 925 | unsigned short iprj; | ||
| 926 | unsigned short iprk; | ||
| 927 | unsigned short iprl; | ||
| 928 | unsigned char imr0; | ||
| 929 | unsigned char imr1; | ||
| 930 | unsigned char imr2; | ||
| 931 | unsigned char imr3; | ||
| 932 | unsigned char imr4; | ||
| 933 | unsigned char imr5; | ||
| 934 | unsigned char imr6; | ||
| 935 | unsigned char imr7; | ||
| 936 | unsigned char imr8; | ||
| 937 | unsigned char imr9; | ||
| 938 | unsigned char imr10; | ||
| 939 | unsigned char imr11; | ||
| 940 | unsigned char imr12; | ||
| 941 | /* RWDT */ | ||
| 942 | unsigned short rwtcnt; | ||
| 943 | unsigned short rwtcsr; | ||
| 944 | /* CPG */ | ||
| 945 | unsigned long irdaclk; | ||
| 946 | unsigned long spuclk; | ||
| 947 | } sh7724_rstandby_state; | ||
| 948 | |||
| 949 | static int sh7724_pre_sleep_notifier_call(struct notifier_block *nb, | ||
| 950 | unsigned long flags, void *unused) | ||
| 951 | { | ||
| 952 | if (!(flags & SUSP_SH_RSTANDBY)) | ||
| 953 | return NOTIFY_DONE; | ||
| 954 | |||
| 955 | /* BCR */ | ||
| 956 | sh7724_rstandby_state.mmselr = __raw_readl(0xff800020); /* MMSELR */ | ||
| 957 | sh7724_rstandby_state.mmselr |= 0xa5a50000; | ||
| 958 | sh7724_rstandby_state.cs0bcr = __raw_readl(0xfec10004); /* CS0BCR */ | ||
| 959 | sh7724_rstandby_state.cs4bcr = __raw_readl(0xfec10010); /* CS4BCR */ | ||
| 960 | sh7724_rstandby_state.cs5abcr = __raw_readl(0xfec10014); /* CS5ABCR */ | ||
| 961 | sh7724_rstandby_state.cs5bbcr = __raw_readl(0xfec10018); /* CS5BBCR */ | ||
| 962 | sh7724_rstandby_state.cs6abcr = __raw_readl(0xfec1001c); /* CS6ABCR */ | ||
| 963 | sh7724_rstandby_state.cs6bbcr = __raw_readl(0xfec10020); /* CS6BBCR */ | ||
| 964 | sh7724_rstandby_state.cs4wcr = __raw_readl(0xfec10030); /* CS4WCR */ | ||
| 965 | sh7724_rstandby_state.cs5awcr = __raw_readl(0xfec10034); /* CS5AWCR */ | ||
| 966 | sh7724_rstandby_state.cs5bwcr = __raw_readl(0xfec10038); /* CS5BWCR */ | ||
| 967 | sh7724_rstandby_state.cs6awcr = __raw_readl(0xfec1003c); /* CS6AWCR */ | ||
| 968 | sh7724_rstandby_state.cs6bwcr = __raw_readl(0xfec10040); /* CS6BWCR */ | ||
| 969 | |||
| 970 | /* INTC */ | ||
| 971 | sh7724_rstandby_state.ipra = __raw_readw(0xa4080000); /* IPRA */ | ||
| 972 | sh7724_rstandby_state.iprb = __raw_readw(0xa4080004); /* IPRB */ | ||
| 973 | sh7724_rstandby_state.iprc = __raw_readw(0xa4080008); /* IPRC */ | ||
| 974 | sh7724_rstandby_state.iprd = __raw_readw(0xa408000c); /* IPRD */ | ||
| 975 | sh7724_rstandby_state.ipre = __raw_readw(0xa4080010); /* IPRE */ | ||
| 976 | sh7724_rstandby_state.iprf = __raw_readw(0xa4080014); /* IPRF */ | ||
| 977 | sh7724_rstandby_state.iprg = __raw_readw(0xa4080018); /* IPRG */ | ||
| 978 | sh7724_rstandby_state.iprh = __raw_readw(0xa408001c); /* IPRH */ | ||
| 979 | sh7724_rstandby_state.ipri = __raw_readw(0xa4080020); /* IPRI */ | ||
| 980 | sh7724_rstandby_state.iprj = __raw_readw(0xa4080024); /* IPRJ */ | ||
| 981 | sh7724_rstandby_state.iprk = __raw_readw(0xa4080028); /* IPRK */ | ||
| 982 | sh7724_rstandby_state.iprl = __raw_readw(0xa408002c); /* IPRL */ | ||
| 983 | sh7724_rstandby_state.imr0 = __raw_readb(0xa4080080); /* IMR0 */ | ||
| 984 | sh7724_rstandby_state.imr1 = __raw_readb(0xa4080084); /* IMR1 */ | ||
| 985 | sh7724_rstandby_state.imr2 = __raw_readb(0xa4080088); /* IMR2 */ | ||
| 986 | sh7724_rstandby_state.imr3 = __raw_readb(0xa408008c); /* IMR3 */ | ||
| 987 | sh7724_rstandby_state.imr4 = __raw_readb(0xa4080090); /* IMR4 */ | ||
| 988 | sh7724_rstandby_state.imr5 = __raw_readb(0xa4080094); /* IMR5 */ | ||
| 989 | sh7724_rstandby_state.imr6 = __raw_readb(0xa4080098); /* IMR6 */ | ||
| 990 | sh7724_rstandby_state.imr7 = __raw_readb(0xa408009c); /* IMR7 */ | ||
| 991 | sh7724_rstandby_state.imr8 = __raw_readb(0xa40800a0); /* IMR8 */ | ||
| 992 | sh7724_rstandby_state.imr9 = __raw_readb(0xa40800a4); /* IMR9 */ | ||
| 993 | sh7724_rstandby_state.imr10 = __raw_readb(0xa40800a8); /* IMR10 */ | ||
| 994 | sh7724_rstandby_state.imr11 = __raw_readb(0xa40800ac); /* IMR11 */ | ||
| 995 | sh7724_rstandby_state.imr12 = __raw_readb(0xa40800b0); /* IMR12 */ | ||
| 996 | |||
| 997 | /* RWDT */ | ||
| 998 | sh7724_rstandby_state.rwtcnt = __raw_readb(0xa4520000); /* RWTCNT */ | ||
| 999 | sh7724_rstandby_state.rwtcnt |= 0x5a00; | ||
| 1000 | sh7724_rstandby_state.rwtcsr = __raw_readb(0xa4520004); /* RWTCSR */ | ||
| 1001 | sh7724_rstandby_state.rwtcsr |= 0xa500; | ||
| 1002 | __raw_writew(sh7724_rstandby_state.rwtcsr & 0x07, 0xa4520004); | ||
| 1003 | |||
| 1004 | /* CPG */ | ||
| 1005 | sh7724_rstandby_state.irdaclk = __raw_readl(0xa4150018); /* IRDACLKCR */ | ||
| 1006 | sh7724_rstandby_state.spuclk = __raw_readl(0xa415003c); /* SPUCLKCR */ | ||
| 1007 | |||
| 1008 | return NOTIFY_DONE; | ||
| 1009 | } | ||
| 1010 | |||
| 1011 | static int sh7724_post_sleep_notifier_call(struct notifier_block *nb, | ||
| 1012 | unsigned long flags, void *unused) | ||
| 1013 | { | ||
| 1014 | if (!(flags & SUSP_SH_RSTANDBY)) | ||
| 1015 | return NOTIFY_DONE; | ||
| 1016 | |||
| 1017 | /* BCR */ | ||
| 1018 | __raw_writel(sh7724_rstandby_state.mmselr, 0xff800020); /* MMSELR */ | ||
| 1019 | __raw_writel(sh7724_rstandby_state.cs0bcr, 0xfec10004); /* CS0BCR */ | ||
| 1020 | __raw_writel(sh7724_rstandby_state.cs4bcr, 0xfec10010); /* CS4BCR */ | ||
| 1021 | __raw_writel(sh7724_rstandby_state.cs5abcr, 0xfec10014); /* CS5ABCR */ | ||
| 1022 | __raw_writel(sh7724_rstandby_state.cs5bbcr, 0xfec10018); /* CS5BBCR */ | ||
| 1023 | __raw_writel(sh7724_rstandby_state.cs6abcr, 0xfec1001c); /* CS6ABCR */ | ||
| 1024 | __raw_writel(sh7724_rstandby_state.cs6bbcr, 0xfec10020); /* CS6BBCR */ | ||
| 1025 | __raw_writel(sh7724_rstandby_state.cs4wcr, 0xfec10030); /* CS4WCR */ | ||
| 1026 | __raw_writel(sh7724_rstandby_state.cs5awcr, 0xfec10034); /* CS5AWCR */ | ||
| 1027 | __raw_writel(sh7724_rstandby_state.cs5bwcr, 0xfec10038); /* CS5BWCR */ | ||
| 1028 | __raw_writel(sh7724_rstandby_state.cs6awcr, 0xfec1003c); /* CS6AWCR */ | ||
| 1029 | __raw_writel(sh7724_rstandby_state.cs6bwcr, 0xfec10040); /* CS6BWCR */ | ||
| 1030 | |||
| 1031 | /* INTC */ | ||
| 1032 | __raw_writew(sh7724_rstandby_state.ipra, 0xa4080000); /* IPRA */ | ||
| 1033 | __raw_writew(sh7724_rstandby_state.iprb, 0xa4080004); /* IPRB */ | ||
| 1034 | __raw_writew(sh7724_rstandby_state.iprc, 0xa4080008); /* IPRC */ | ||
| 1035 | __raw_writew(sh7724_rstandby_state.iprd, 0xa408000c); /* IPRD */ | ||
| 1036 | __raw_writew(sh7724_rstandby_state.ipre, 0xa4080010); /* IPRE */ | ||
| 1037 | __raw_writew(sh7724_rstandby_state.iprf, 0xa4080014); /* IPRF */ | ||
| 1038 | __raw_writew(sh7724_rstandby_state.iprg, 0xa4080018); /* IPRG */ | ||
| 1039 | __raw_writew(sh7724_rstandby_state.iprh, 0xa408001c); /* IPRH */ | ||
| 1040 | __raw_writew(sh7724_rstandby_state.ipri, 0xa4080020); /* IPRI */ | ||
| 1041 | __raw_writew(sh7724_rstandby_state.iprj, 0xa4080024); /* IPRJ */ | ||
| 1042 | __raw_writew(sh7724_rstandby_state.iprk, 0xa4080028); /* IPRK */ | ||
| 1043 | __raw_writew(sh7724_rstandby_state.iprl, 0xa408002c); /* IPRL */ | ||
| 1044 | __raw_writeb(sh7724_rstandby_state.imr0, 0xa4080080); /* IMR0 */ | ||
| 1045 | __raw_writeb(sh7724_rstandby_state.imr1, 0xa4080084); /* IMR1 */ | ||
| 1046 | __raw_writeb(sh7724_rstandby_state.imr2, 0xa4080088); /* IMR2 */ | ||
| 1047 | __raw_writeb(sh7724_rstandby_state.imr3, 0xa408008c); /* IMR3 */ | ||
| 1048 | __raw_writeb(sh7724_rstandby_state.imr4, 0xa4080090); /* IMR4 */ | ||
| 1049 | __raw_writeb(sh7724_rstandby_state.imr5, 0xa4080094); /* IMR5 */ | ||
| 1050 | __raw_writeb(sh7724_rstandby_state.imr6, 0xa4080098); /* IMR6 */ | ||
| 1051 | __raw_writeb(sh7724_rstandby_state.imr7, 0xa408009c); /* IMR7 */ | ||
| 1052 | __raw_writeb(sh7724_rstandby_state.imr8, 0xa40800a0); /* IMR8 */ | ||
| 1053 | __raw_writeb(sh7724_rstandby_state.imr9, 0xa40800a4); /* IMR9 */ | ||
| 1054 | __raw_writeb(sh7724_rstandby_state.imr10, 0xa40800a8); /* IMR10 */ | ||
| 1055 | __raw_writeb(sh7724_rstandby_state.imr11, 0xa40800ac); /* IMR11 */ | ||
| 1056 | __raw_writeb(sh7724_rstandby_state.imr12, 0xa40800b0); /* IMR12 */ | ||
| 1057 | |||
| 1058 | /* RWDT */ | ||
| 1059 | __raw_writew(sh7724_rstandby_state.rwtcnt, 0xa4520000); /* RWTCNT */ | ||
| 1060 | __raw_writew(sh7724_rstandby_state.rwtcsr, 0xa4520004); /* RWTCSR */ | ||
| 1061 | |||
| 1062 | /* CPG */ | ||
| 1063 | __raw_writel(sh7724_rstandby_state.irdaclk, 0xa4150018); /* IRDACLKCR */ | ||
| 1064 | __raw_writel(sh7724_rstandby_state.spuclk, 0xa415003c); /* SPUCLKCR */ | ||
| 1065 | |||
| 1066 | return NOTIFY_DONE; | ||
| 1067 | } | ||
| 1068 | |||
| 1069 | static struct notifier_block sh7724_pre_sleep_notifier = { | ||
| 1070 | .notifier_call = sh7724_pre_sleep_notifier_call, | ||
| 1071 | .priority = SH_MOBILE_PRE(SH_MOBILE_SLEEP_CPU), | ||
| 1072 | }; | ||
| 1073 | |||
| 1074 | static struct notifier_block sh7724_post_sleep_notifier = { | ||
| 1075 | .notifier_call = sh7724_post_sleep_notifier_call, | ||
| 1076 | .priority = SH_MOBILE_POST(SH_MOBILE_SLEEP_CPU), | ||
| 1077 | }; | ||
| 1078 | |||
| 1079 | static int __init sh7724_sleep_setup(void) | ||
| 1080 | { | ||
| 1081 | atomic_notifier_chain_register(&sh_mobile_pre_sleep_notifier_list, | ||
| 1082 | &sh7724_pre_sleep_notifier); | ||
| 1083 | |||
| 1084 | atomic_notifier_chain_register(&sh_mobile_post_sleep_notifier_list, | ||
| 1085 | &sh7724_post_sleep_notifier); | ||
| 1086 | return 0; | ||
| 1087 | } | ||
| 1088 | arch_initcall(sh7724_sleep_setup); | ||
| 1089 | |||
diff --git a/arch/sh/kernel/cpu/sh4a/setup-shx3.c b/arch/sh/kernel/cpu/sh4a/setup-shx3.c index e848443deeb9..c7ba9166e18a 100644 --- a/arch/sh/kernel/cpu/sh4a/setup-shx3.c +++ b/arch/sh/kernel/cpu/sh4a/setup-shx3.c | |||
| @@ -15,6 +15,15 @@ | |||
| 15 | #include <linux/sh_timer.h> | 15 | #include <linux/sh_timer.h> |
| 16 | #include <asm/mmzone.h> | 16 | #include <asm/mmzone.h> |
| 17 | 17 | ||
| 18 | /* | ||
| 19 | * This intentionally only registers SCIF ports 0, 1, and 3. SCIF 2 | ||
| 20 | * INTEVT values overlap with the FPU EXPEVT ones, requiring special | ||
| 21 | * demuxing in the exception dispatch path. | ||
| 22 | * | ||
| 23 | * As this overlap is something that never should have made it in to | ||
| 24 | * silicon in the first place, we just refuse to deal with the port at | ||
| 25 | * all rather than adding infrastructure to hack around it. | ||
| 26 | */ | ||
| 18 | static struct plat_sci_port sci_platform_data[] = { | 27 | static struct plat_sci_port sci_platform_data[] = { |
| 19 | { | 28 | { |
| 20 | .mapbase = 0xffc30000, | 29 | .mapbase = 0xffc30000, |
| @@ -27,11 +36,6 @@ static struct plat_sci_port sci_platform_data[] = { | |||
| 27 | .type = PORT_SCIF, | 36 | .type = PORT_SCIF, |
| 28 | .irqs = { 44, 45, 47, 46 }, | 37 | .irqs = { 44, 45, 47, 46 }, |
| 29 | }, { | 38 | }, { |
| 30 | .mapbase = 0xffc50000, | ||
| 31 | .flags = UPF_BOOT_AUTOCONF, | ||
| 32 | .type = PORT_SCIF, | ||
| 33 | .irqs = { 48, 49, 51, 50 }, | ||
| 34 | }, { | ||
| 35 | .mapbase = 0xffc60000, | 39 | .mapbase = 0xffc60000, |
| 36 | .flags = UPF_BOOT_AUTOCONF, | 40 | .flags = UPF_BOOT_AUTOCONF, |
| 37 | .type = PORT_SCIF, | 41 | .type = PORT_SCIF, |
| @@ -268,7 +272,11 @@ enum { | |||
| 268 | UNUSED = 0, | 272 | UNUSED = 0, |
| 269 | 273 | ||
| 270 | /* interrupt sources */ | 274 | /* interrupt sources */ |
| 271 | IRL, IRQ0, IRQ1, IRQ2, IRQ3, | 275 | IRL_LLLL, IRL_LLLH, IRL_LLHL, IRL_LLHH, |
| 276 | IRL_LHLL, IRL_LHLH, IRL_LHHL, IRL_LHHH, | ||
| 277 | IRL_HLLL, IRL_HLLH, IRL_HLHL, IRL_HLHH, | ||
| 278 | IRL_HHLL, IRL_HHLH, IRL_HHHL, | ||
| 279 | IRQ0, IRQ1, IRQ2, IRQ3, | ||
| 272 | HUDII, | 280 | HUDII, |
| 273 | TMU0, TMU1, TMU2, TMU3, TMU4, TMU5, | 281 | TMU0, TMU1, TMU2, TMU3, TMU4, TMU5, |
| 274 | PCII0, PCII1, PCII2, PCII3, PCII4, | 282 | PCII0, PCII1, PCII2, PCII3, PCII4, |
| @@ -291,7 +299,7 @@ enum { | |||
| 291 | INTICI4, INTICI5, INTICI6, INTICI7, | 299 | INTICI4, INTICI5, INTICI6, INTICI7, |
| 292 | 300 | ||
| 293 | /* interrupt groups */ | 301 | /* interrupt groups */ |
| 294 | PCII56789, SCIF0, SCIF1, SCIF2, SCIF3, | 302 | IRL, PCII56789, SCIF0, SCIF1, SCIF2, SCIF3, |
| 295 | DMAC0, DMAC1, | 303 | DMAC0, DMAC1, |
| 296 | }; | 304 | }; |
| 297 | 305 | ||
| @@ -309,8 +317,6 @@ static struct intc_vect vectors[] __initdata = { | |||
| 309 | INTC_VECT(SCIF0_BRI, 0x740), INTC_VECT(SCIF0_TXI, 0x760), | 317 | INTC_VECT(SCIF0_BRI, 0x740), INTC_VECT(SCIF0_TXI, 0x760), |
| 310 | INTC_VECT(SCIF1_ERI, 0x780), INTC_VECT(SCIF1_RXI, 0x7a0), | 318 | INTC_VECT(SCIF1_ERI, 0x780), INTC_VECT(SCIF1_RXI, 0x7a0), |
| 311 | INTC_VECT(SCIF1_BRI, 0x7c0), INTC_VECT(SCIF1_TXI, 0x7e0), | 319 | INTC_VECT(SCIF1_BRI, 0x7c0), INTC_VECT(SCIF1_TXI, 0x7e0), |
| 312 | INTC_VECT(SCIF2_ERI, 0x800), INTC_VECT(SCIF2_RXI, 0x820), | ||
| 313 | INTC_VECT(SCIF2_BRI, 0x840), INTC_VECT(SCIF2_TXI, 0x860), | ||
| 314 | INTC_VECT(SCIF3_ERI, 0x880), INTC_VECT(SCIF3_RXI, 0x8a0), | 320 | INTC_VECT(SCIF3_ERI, 0x880), INTC_VECT(SCIF3_RXI, 0x8a0), |
| 315 | INTC_VECT(SCIF3_BRI, 0x8c0), INTC_VECT(SCIF3_TXI, 0x8e0), | 321 | INTC_VECT(SCIF3_BRI, 0x8c0), INTC_VECT(SCIF3_TXI, 0x8e0), |
| 316 | INTC_VECT(DMAC0_DMINT0, 0x900), INTC_VECT(DMAC0_DMINT1, 0x920), | 322 | INTC_VECT(DMAC0_DMINT0, 0x900), INTC_VECT(DMAC0_DMINT1, 0x920), |
| @@ -344,10 +350,13 @@ static struct intc_vect vectors[] __initdata = { | |||
| 344 | }; | 350 | }; |
| 345 | 351 | ||
| 346 | static struct intc_group groups[] __initdata = { | 352 | static struct intc_group groups[] __initdata = { |
| 353 | INTC_GROUP(IRL, IRL_LLLL, IRL_LLLH, IRL_LLHL, IRL_LLHH, | ||
| 354 | IRL_LHLL, IRL_LHLH, IRL_LHHL, IRL_LHHH, | ||
| 355 | IRL_HLLL, IRL_HLLH, IRL_HLHL, IRL_HLHH, | ||
| 356 | IRL_HHLL, IRL_HHLH, IRL_HHHL), | ||
| 347 | INTC_GROUP(PCII56789, PCII5, PCII6, PCII7, PCII8, PCII9), | 357 | INTC_GROUP(PCII56789, PCII5, PCII6, PCII7, PCII8, PCII9), |
| 348 | INTC_GROUP(SCIF0, SCIF0_ERI, SCIF0_RXI, SCIF0_BRI, SCIF0_TXI), | 358 | INTC_GROUP(SCIF0, SCIF0_ERI, SCIF0_RXI, SCIF0_BRI, SCIF0_TXI), |
| 349 | INTC_GROUP(SCIF1, SCIF1_ERI, SCIF1_RXI, SCIF1_BRI, SCIF1_TXI), | 359 | INTC_GROUP(SCIF1, SCIF1_ERI, SCIF1_RXI, SCIF1_BRI, SCIF1_TXI), |
| 350 | INTC_GROUP(SCIF2, SCIF2_ERI, SCIF2_RXI, SCIF2_BRI, SCIF2_TXI), | ||
| 351 | INTC_GROUP(SCIF3, SCIF3_ERI, SCIF3_RXI, SCIF3_BRI, SCIF3_TXI), | 360 | INTC_GROUP(SCIF3, SCIF3_ERI, SCIF3_RXI, SCIF3_BRI, SCIF3_TXI), |
| 352 | INTC_GROUP(DMAC0, DMAC0_DMINT0, DMAC0_DMINT1, DMAC0_DMINT2, | 361 | INTC_GROUP(DMAC0, DMAC0_DMINT0, DMAC0_DMINT1, DMAC0_DMINT2, |
| 353 | DMAC0_DMINT3, DMAC0_DMINT4, DMAC0_DMINT5, DMAC0_DMAE), | 362 | DMAC0_DMINT3, DMAC0_DMINT4, DMAC0_DMINT5, DMAC0_DMAE), |
| @@ -419,14 +428,14 @@ static DECLARE_INTC_DESC(intc_desc_irq, "shx3-irq", vectors_irq, groups, | |||
| 419 | 428 | ||
| 420 | /* External interrupt pins in IRL mode */ | 429 | /* External interrupt pins in IRL mode */ |
| 421 | static struct intc_vect vectors_irl[] __initdata = { | 430 | static struct intc_vect vectors_irl[] __initdata = { |
| 422 | INTC_VECT(IRL, 0x200), INTC_VECT(IRL, 0x220), | 431 | INTC_VECT(IRL_LLLL, 0x200), INTC_VECT(IRL_LLLH, 0x220), |
| 423 | INTC_VECT(IRL, 0x240), INTC_VECT(IRL, 0x260), | 432 | INTC_VECT(IRL_LLHL, 0x240), INTC_VECT(IRL_LLHH, 0x260), |
| 424 | INTC_VECT(IRL, 0x280), INTC_VECT(IRL, 0x2a0), | 433 | INTC_VECT(IRL_LHLL, 0x280), INTC_VECT(IRL_LHLH, 0x2a0), |
| 425 | INTC_VECT(IRL, 0x2c0), INTC_VECT(IRL, 0x2e0), | 434 | INTC_VECT(IRL_LHHL, 0x2c0), INTC_VECT(IRL_LHHH, 0x2e0), |
| 426 | INTC_VECT(IRL, 0x300), INTC_VECT(IRL, 0x320), | 435 | INTC_VECT(IRL_HLLL, 0x300), INTC_VECT(IRL_HLLH, 0x320), |
| 427 | INTC_VECT(IRL, 0x340), INTC_VECT(IRL, 0x360), | 436 | INTC_VECT(IRL_HLHL, 0x340), INTC_VECT(IRL_HLHH, 0x360), |
| 428 | INTC_VECT(IRL, 0x380), INTC_VECT(IRL, 0x3a0), | 437 | INTC_VECT(IRL_HHLL, 0x380), INTC_VECT(IRL_HHLH, 0x3a0), |
| 429 | INTC_VECT(IRL, 0x3c0), | 438 | INTC_VECT(IRL_HHHL, 0x3c0), |
| 430 | }; | 439 | }; |
| 431 | 440 | ||
| 432 | static DECLARE_INTC_DESC(intc_desc_irl, "shx3-irl", vectors_irl, groups, | 441 | static DECLARE_INTC_DESC(intc_desc_irl, "shx3-irl", vectors_irl, groups, |
diff --git a/arch/sh/kernel/cpu/sh4a/smp-shx3.c b/arch/sh/kernel/cpu/sh4a/smp-shx3.c index 185ec3976a25..5863e0c4d02f 100644 --- a/arch/sh/kernel/cpu/sh4a/smp-shx3.c +++ b/arch/sh/kernel/cpu/sh4a/smp-shx3.c | |||
| @@ -14,6 +14,13 @@ | |||
| 14 | #include <linux/interrupt.h> | 14 | #include <linux/interrupt.h> |
| 15 | #include <linux/io.h> | 15 | #include <linux/io.h> |
| 16 | 16 | ||
| 17 | #define STBCR_REG(phys_id) (0xfe400004 | (phys_id << 12)) | ||
| 18 | #define RESET_REG(phys_id) (0xfe400008 | (phys_id << 12)) | ||
| 19 | |||
| 20 | #define STBCR_MSTP 0x00000001 | ||
| 21 | #define STBCR_RESET 0x00000002 | ||
| 22 | #define STBCR_LTSLP 0x80000000 | ||
| 23 | |||
| 17 | static irqreturn_t ipi_interrupt_handler(int irq, void *arg) | 24 | static irqreturn_t ipi_interrupt_handler(int irq, void *arg) |
| 18 | { | 25 | { |
| 19 | unsigned int message = (unsigned int)(long)arg; | 26 | unsigned int message = (unsigned int)(long)arg; |
| @@ -21,9 +28,9 @@ static irqreturn_t ipi_interrupt_handler(int irq, void *arg) | |||
| 21 | unsigned int offs = 4 * cpu; | 28 | unsigned int offs = 4 * cpu; |
| 22 | unsigned int x; | 29 | unsigned int x; |
| 23 | 30 | ||
| 24 | x = ctrl_inl(0xfe410070 + offs); /* C0INITICI..CnINTICI */ | 31 | x = __raw_readl(0xfe410070 + offs); /* C0INITICI..CnINTICI */ |
| 25 | x &= (1 << (message << 2)); | 32 | x &= (1 << (message << 2)); |
| 26 | ctrl_outl(x, 0xfe410080 + offs); /* C0INTICICLR..CnINTICICLR */ | 33 | __raw_writel(x, 0xfe410080 + offs); /* C0INTICICLR..CnINTICICLR */ |
| 27 | 34 | ||
| 28 | smp_message_recv(message); | 35 | smp_message_recv(message); |
| 29 | 36 | ||
| @@ -37,6 +44,9 @@ void __init plat_smp_setup(void) | |||
| 37 | 44 | ||
| 38 | init_cpu_possible(cpumask_of(cpu)); | 45 | init_cpu_possible(cpumask_of(cpu)); |
| 39 | 46 | ||
| 47 | /* Enable light sleep for the boot CPU */ | ||
| 48 | __raw_writel(__raw_readl(STBCR_REG(cpu)) | STBCR_LTSLP, STBCR_REG(cpu)); | ||
| 49 | |||
| 40 | __cpu_number_map[0] = 0; | 50 | __cpu_number_map[0] = 0; |
| 41 | __cpu_logical_map[0] = 0; | 51 | __cpu_logical_map[0] = 0; |
| 42 | 52 | ||
| @@ -66,32 +76,23 @@ void __init plat_prepare_cpus(unsigned int max_cpus) | |||
| 66 | "IPI", (void *)(long)i); | 76 | "IPI", (void *)(long)i); |
| 67 | } | 77 | } |
| 68 | 78 | ||
| 69 | #define STBCR_REG(phys_id) (0xfe400004 | (phys_id << 12)) | ||
| 70 | #define RESET_REG(phys_id) (0xfe400008 | (phys_id << 12)) | ||
| 71 | |||
| 72 | #define STBCR_MSTP 0x00000001 | ||
| 73 | #define STBCR_RESET 0x00000002 | ||
| 74 | #define STBCR_LTSLP 0x80000000 | ||
| 75 | |||
| 76 | #define STBCR_AP_VAL (STBCR_RESET | STBCR_LTSLP) | ||
| 77 | |||
| 78 | void plat_start_cpu(unsigned int cpu, unsigned long entry_point) | 79 | void plat_start_cpu(unsigned int cpu, unsigned long entry_point) |
| 79 | { | 80 | { |
| 80 | ctrl_outl(entry_point, RESET_REG(cpu)); | 81 | __raw_writel(entry_point, RESET_REG(cpu)); |
| 81 | 82 | ||
| 82 | if (!(ctrl_inl(STBCR_REG(cpu)) & STBCR_MSTP)) | 83 | if (!(__raw_readl(STBCR_REG(cpu)) & STBCR_MSTP)) |
| 83 | ctrl_outl(STBCR_MSTP, STBCR_REG(cpu)); | 84 | __raw_writel(STBCR_MSTP, STBCR_REG(cpu)); |
| 84 | 85 | ||
| 85 | while (!(ctrl_inl(STBCR_REG(cpu)) & STBCR_MSTP)) | 86 | while (!(__raw_readl(STBCR_REG(cpu)) & STBCR_MSTP)) |
| 86 | cpu_relax(); | 87 | cpu_relax(); |
| 87 | 88 | ||
| 88 | /* Start up secondary processor by sending a reset */ | 89 | /* Start up secondary processor by sending a reset */ |
| 89 | ctrl_outl(STBCR_AP_VAL, STBCR_REG(cpu)); | 90 | __raw_writel(STBCR_RESET | STBCR_LTSLP, STBCR_REG(cpu)); |
| 90 | } | 91 | } |
| 91 | 92 | ||
| 92 | int plat_smp_processor_id(void) | 93 | int plat_smp_processor_id(void) |
| 93 | { | 94 | { |
| 94 | return ctrl_inl(0xff000048); /* CPIDR */ | 95 | return __raw_readl(0xff000048); /* CPIDR */ |
| 95 | } | 96 | } |
| 96 | 97 | ||
| 97 | void plat_send_ipi(unsigned int cpu, unsigned int message) | 98 | void plat_send_ipi(unsigned int cpu, unsigned int message) |
| @@ -100,5 +101,5 @@ void plat_send_ipi(unsigned int cpu, unsigned int message) | |||
| 100 | 101 | ||
| 101 | BUG_ON(cpu >= 4); | 102 | BUG_ON(cpu >= 4); |
| 102 | 103 | ||
| 103 | ctrl_outl(1 << (message << 2), addr); /* C0INTICI..CnINTICI */ | 104 | __raw_writel(1 << (message << 2), addr); /* C0INTICI..CnINTICI */ |
| 104 | } | 105 | } |
diff --git a/arch/sh/kernel/cpu/sh5/entry.S b/arch/sh/kernel/cpu/sh5/entry.S index b0aacf675258..8f13f73cb2cb 100644 --- a/arch/sh/kernel/cpu/sh5/entry.S +++ b/arch/sh/kernel/cpu/sh5/entry.S | |||
| @@ -933,7 +933,7 @@ ret_with_reschedule: | |||
| 933 | 933 | ||
| 934 | pta restore_all, tr1 | 934 | pta restore_all, tr1 |
| 935 | 935 | ||
| 936 | movi (_TIF_SIGPENDING | _TIF_RESTORE_SIGMASK), r8 | 936 | movi _TIF_SIGPENDING, r8 |
| 937 | and r8, r7, r8 | 937 | and r8, r7, r8 |
| 938 | pta work_notifysig, tr0 | 938 | pta work_notifysig, tr0 |
| 939 | bne r8, ZERO, tr0 | 939 | bne r8, ZERO, tr0 |
diff --git a/arch/sh/kernel/cpu/shmobile/cpuidle.c b/arch/sh/kernel/cpu/shmobile/cpuidle.c index 1c504bd972c3..83972aa319c2 100644 --- a/arch/sh/kernel/cpu/shmobile/cpuidle.c +++ b/arch/sh/kernel/cpu/shmobile/cpuidle.c | |||
| @@ -87,25 +87,31 @@ void sh_mobile_setup_cpuidle(void) | |||
| 87 | 87 | ||
| 88 | dev->safe_state = state; | 88 | dev->safe_state = state; |
| 89 | 89 | ||
| 90 | state = &dev->states[i++]; | 90 | if (sh_mobile_sleep_supported & SUSP_SH_SF) { |
| 91 | snprintf(state->name, CPUIDLE_NAME_LEN, "C1"); | 91 | state = &dev->states[i++]; |
| 92 | strncpy(state->desc, "SuperH Sleep Mode [SF]", CPUIDLE_DESC_LEN); | 92 | snprintf(state->name, CPUIDLE_NAME_LEN, "C1"); |
| 93 | state->exit_latency = 100; | 93 | strncpy(state->desc, "SuperH Sleep Mode [SF]", |
| 94 | state->target_residency = 1 * 2; | 94 | CPUIDLE_DESC_LEN); |
| 95 | state->power_usage = 1; | 95 | state->exit_latency = 100; |
| 96 | state->flags = 0; | 96 | state->target_residency = 1 * 2; |
| 97 | state->flags |= CPUIDLE_FLAG_TIME_VALID; | 97 | state->power_usage = 1; |
| 98 | state->enter = cpuidle_sleep_enter; | 98 | state->flags = 0; |
| 99 | state->flags |= CPUIDLE_FLAG_TIME_VALID; | ||
| 100 | state->enter = cpuidle_sleep_enter; | ||
| 101 | } | ||
| 99 | 102 | ||
| 100 | state = &dev->states[i++]; | 103 | if (sh_mobile_sleep_supported & SUSP_SH_STANDBY) { |
| 101 | snprintf(state->name, CPUIDLE_NAME_LEN, "C2"); | 104 | state = &dev->states[i++]; |
| 102 | strncpy(state->desc, "SuperH Mobile Standby Mode [SF]", CPUIDLE_DESC_LEN); | 105 | snprintf(state->name, CPUIDLE_NAME_LEN, "C2"); |
| 103 | state->exit_latency = 2300; | 106 | strncpy(state->desc, "SuperH Mobile Standby Mode [SF]", |
| 104 | state->target_residency = 1 * 2; | 107 | CPUIDLE_DESC_LEN); |
| 105 | state->power_usage = 1; | 108 | state->exit_latency = 2300; |
| 106 | state->flags = 0; | 109 | state->target_residency = 1 * 2; |
| 107 | state->flags |= CPUIDLE_FLAG_TIME_VALID; | 110 | state->power_usage = 1; |
| 108 | state->enter = cpuidle_sleep_enter; | 111 | state->flags = 0; |
| 112 | state->flags |= CPUIDLE_FLAG_TIME_VALID; | ||
| 113 | state->enter = cpuidle_sleep_enter; | ||
| 114 | } | ||
| 109 | 115 | ||
| 110 | dev->state_count = i; | 116 | dev->state_count = i; |
| 111 | 117 | ||
diff --git a/arch/sh/kernel/cpu/shmobile/pm.c b/arch/sh/kernel/cpu/shmobile/pm.c index ee3c2aaf66fb..ca029a44743c 100644 --- a/arch/sh/kernel/cpu/shmobile/pm.c +++ b/arch/sh/kernel/cpu/shmobile/pm.c | |||
| @@ -15,6 +15,13 @@ | |||
| 15 | #include <linux/suspend.h> | 15 | #include <linux/suspend.h> |
| 16 | #include <asm/suspend.h> | 16 | #include <asm/suspend.h> |
| 17 | #include <asm/uaccess.h> | 17 | #include <asm/uaccess.h> |
| 18 | #include <asm/cacheflush.h> | ||
| 19 | |||
| 20 | /* | ||
| 21 | * Notifier lists for pre/post sleep notification | ||
| 22 | */ | ||
| 23 | ATOMIC_NOTIFIER_HEAD(sh_mobile_pre_sleep_notifier_list); | ||
| 24 | ATOMIC_NOTIFIER_HEAD(sh_mobile_post_sleep_notifier_list); | ||
| 18 | 25 | ||
| 19 | /* | 26 | /* |
| 20 | * Sleep modes available on SuperH Mobile: | 27 | * Sleep modes available on SuperH Mobile: |
| @@ -26,30 +33,105 @@ | |||
| 26 | #define SUSP_MODE_SLEEP (SUSP_SH_SLEEP) | 33 | #define SUSP_MODE_SLEEP (SUSP_SH_SLEEP) |
| 27 | #define SUSP_MODE_SLEEP_SF (SUSP_SH_SLEEP | SUSP_SH_SF) | 34 | #define SUSP_MODE_SLEEP_SF (SUSP_SH_SLEEP | SUSP_SH_SF) |
| 28 | #define SUSP_MODE_STANDBY_SF (SUSP_SH_STANDBY | SUSP_SH_SF) | 35 | #define SUSP_MODE_STANDBY_SF (SUSP_SH_STANDBY | SUSP_SH_SF) |
| 36 | #define SUSP_MODE_RSTANDBY (SUSP_SH_RSTANDBY | SUSP_SH_MMU | SUSP_SH_SF) | ||
| 37 | /* | ||
| 38 | * U-standby mode is unsupported since it needs bootloader hacks | ||
| 39 | */ | ||
| 29 | 40 | ||
| 30 | /* | 41 | #ifdef CONFIG_CPU_SUBTYPE_SH7724 |
| 31 | * The following modes are not there yet: | 42 | #define RAM_BASE 0xfd800000 /* RSMEM */ |
| 32 | * | 43 | #else |
| 33 | * R-standby mode is unsupported, but will be added in the future | 44 | #define RAM_BASE 0xe5200000 /* ILRAM */ |
| 34 | * U-standby mode is low priority since it needs bootloader hacks | 45 | #endif |
| 35 | */ | ||
| 36 | |||
| 37 | #define ILRAM_BASE 0xe5200000 | ||
| 38 | |||
| 39 | extern const unsigned char sh_mobile_standby[]; | ||
| 40 | extern const unsigned int sh_mobile_standby_size; | ||
| 41 | 46 | ||
| 42 | void sh_mobile_call_standby(unsigned long mode) | 47 | void sh_mobile_call_standby(unsigned long mode) |
| 43 | { | 48 | { |
| 44 | void *onchip_mem = (void *)ILRAM_BASE; | 49 | void *onchip_mem = (void *)RAM_BASE; |
| 45 | void (*standby_onchip_mem)(unsigned long, unsigned long) = onchip_mem; | 50 | struct sh_sleep_data *sdp = onchip_mem; |
| 51 | void (*standby_onchip_mem)(unsigned long, unsigned long); | ||
| 52 | |||
| 53 | /* code located directly after data structure */ | ||
| 54 | standby_onchip_mem = (void *)(sdp + 1); | ||
| 55 | |||
| 56 | atomic_notifier_call_chain(&sh_mobile_pre_sleep_notifier_list, | ||
| 57 | mode, NULL); | ||
| 58 | |||
| 59 | /* flush the caches if MMU flag is set */ | ||
| 60 | if (mode & SUSP_SH_MMU) | ||
| 61 | flush_cache_all(); | ||
| 46 | 62 | ||
| 47 | /* Let assembly snippet in on-chip memory handle the rest */ | 63 | /* Let assembly snippet in on-chip memory handle the rest */ |
| 48 | standby_onchip_mem(mode, ILRAM_BASE); | 64 | standby_onchip_mem(mode, RAM_BASE); |
| 65 | |||
| 66 | atomic_notifier_call_chain(&sh_mobile_post_sleep_notifier_list, | ||
| 67 | mode, NULL); | ||
| 68 | } | ||
| 69 | |||
| 70 | extern char sh_mobile_sleep_enter_start; | ||
| 71 | extern char sh_mobile_sleep_enter_end; | ||
| 72 | |||
| 73 | extern char sh_mobile_sleep_resume_start; | ||
| 74 | extern char sh_mobile_sleep_resume_end; | ||
| 75 | |||
| 76 | unsigned long sh_mobile_sleep_supported = SUSP_SH_SLEEP; | ||
| 77 | |||
| 78 | void sh_mobile_register_self_refresh(unsigned long flags, | ||
| 79 | void *pre_start, void *pre_end, | ||
| 80 | void *post_start, void *post_end) | ||
| 81 | { | ||
| 82 | void *onchip_mem = (void *)RAM_BASE; | ||
| 83 | void *vp; | ||
| 84 | struct sh_sleep_data *sdp; | ||
| 85 | int n; | ||
| 86 | |||
| 87 | /* part 0: data area */ | ||
| 88 | sdp = onchip_mem; | ||
| 89 | sdp->addr.stbcr = 0xa4150020; /* STBCR */ | ||
| 90 | sdp->addr.bar = 0xa4150040; /* BAR */ | ||
| 91 | sdp->addr.pteh = 0xff000000; /* PTEH */ | ||
| 92 | sdp->addr.ptel = 0xff000004; /* PTEL */ | ||
| 93 | sdp->addr.ttb = 0xff000008; /* TTB */ | ||
| 94 | sdp->addr.tea = 0xff00000c; /* TEA */ | ||
| 95 | sdp->addr.mmucr = 0xff000010; /* MMUCR */ | ||
| 96 | sdp->addr.ptea = 0xff000034; /* PTEA */ | ||
| 97 | sdp->addr.pascr = 0xff000070; /* PASCR */ | ||
| 98 | sdp->addr.irmcr = 0xff000078; /* IRMCR */ | ||
| 99 | sdp->addr.ccr = 0xff00001c; /* CCR */ | ||
| 100 | sdp->addr.ramcr = 0xff000074; /* RAMCR */ | ||
| 101 | vp = sdp + 1; | ||
| 102 | |||
| 103 | /* part 1: common code to enter sleep mode */ | ||
| 104 | n = &sh_mobile_sleep_enter_end - &sh_mobile_sleep_enter_start; | ||
| 105 | memcpy(vp, &sh_mobile_sleep_enter_start, n); | ||
| 106 | vp += roundup(n, 4); | ||
| 107 | |||
| 108 | /* part 2: board specific code to enter self-refresh mode */ | ||
| 109 | n = pre_end - pre_start; | ||
| 110 | memcpy(vp, pre_start, n); | ||
| 111 | sdp->sf_pre = (unsigned long)vp; | ||
| 112 | vp += roundup(n, 4); | ||
| 113 | |||
| 114 | /* part 3: board specific code to resume from self-refresh mode */ | ||
| 115 | n = post_end - post_start; | ||
| 116 | memcpy(vp, post_start, n); | ||
| 117 | sdp->sf_post = (unsigned long)vp; | ||
| 118 | vp += roundup(n, 4); | ||
| 119 | |||
| 120 | /* part 4: common code to resume from sleep mode */ | ||
| 121 | WARN_ON(vp > (onchip_mem + 0x600)); | ||
| 122 | vp = onchip_mem + 0x600; /* located at interrupt vector */ | ||
| 123 | n = &sh_mobile_sleep_resume_end - &sh_mobile_sleep_resume_start; | ||
| 124 | memcpy(vp, &sh_mobile_sleep_resume_start, n); | ||
| 125 | sdp->resume = (unsigned long)vp; | ||
| 126 | |||
| 127 | sh_mobile_sleep_supported |= flags; | ||
| 49 | } | 128 | } |
| 50 | 129 | ||
| 51 | static int sh_pm_enter(suspend_state_t state) | 130 | static int sh_pm_enter(suspend_state_t state) |
| 52 | { | 131 | { |
| 132 | if (!(sh_mobile_sleep_supported & SUSP_MODE_STANDBY_SF)) | ||
| 133 | return -ENXIO; | ||
| 134 | |||
| 53 | local_irq_disable(); | 135 | local_irq_disable(); |
| 54 | set_bl_bit(); | 136 | set_bl_bit(); |
| 55 | sh_mobile_call_standby(SUSP_MODE_STANDBY_SF); | 137 | sh_mobile_call_standby(SUSP_MODE_STANDBY_SF); |
| @@ -65,13 +147,6 @@ static struct platform_suspend_ops sh_pm_ops = { | |||
| 65 | 147 | ||
| 66 | static int __init sh_pm_init(void) | 148 | static int __init sh_pm_init(void) |
| 67 | { | 149 | { |
| 68 | void *onchip_mem = (void *)ILRAM_BASE; | ||
| 69 | |||
| 70 | /* Copy the assembly snippet to the otherwise ununsed ILRAM */ | ||
| 71 | memcpy(onchip_mem, sh_mobile_standby, sh_mobile_standby_size); | ||
| 72 | wmb(); | ||
| 73 | ctrl_barrier(); | ||
| 74 | |||
| 75 | suspend_set_ops(&sh_pm_ops); | 150 | suspend_set_ops(&sh_pm_ops); |
| 76 | sh_mobile_setup_cpuidle(); | 151 | sh_mobile_setup_cpuidle(); |
| 77 | return 0; | 152 | return 0; |
diff --git a/arch/sh/kernel/cpu/shmobile/pm_runtime.c b/arch/sh/kernel/cpu/shmobile/pm_runtime.c index 7c615b17e209..6dcb8166a64d 100644 --- a/arch/sh/kernel/cpu/shmobile/pm_runtime.c +++ b/arch/sh/kernel/cpu/shmobile/pm_runtime.c | |||
| @@ -45,12 +45,14 @@ static int __platform_pm_runtime_resume(struct platform_device *pdev) | |||
| 45 | 45 | ||
| 46 | dev_dbg(d, "__platform_pm_runtime_resume() [%d]\n", hwblk); | 46 | dev_dbg(d, "__platform_pm_runtime_resume() [%d]\n", hwblk); |
| 47 | 47 | ||
| 48 | if (d->driver && d->driver->pm && d->driver->pm->runtime_resume) { | 48 | if (d->driver) { |
| 49 | hwblk_enable(hwblk_info, hwblk); | 49 | hwblk_enable(hwblk_info, hwblk); |
| 50 | ret = 0; | 50 | ret = 0; |
| 51 | 51 | ||
| 52 | if (test_bit(PDEV_ARCHDATA_FLAG_SUSP, &ad->flags)) { | 52 | if (test_bit(PDEV_ARCHDATA_FLAG_SUSP, &ad->flags)) { |
| 53 | ret = d->driver->pm->runtime_resume(d); | 53 | if (d->driver->pm && d->driver->pm->runtime_resume) |
| 54 | ret = d->driver->pm->runtime_resume(d); | ||
| 55 | |||
| 54 | if (!ret) | 56 | if (!ret) |
| 55 | clear_bit(PDEV_ARCHDATA_FLAG_SUSP, &ad->flags); | 57 | clear_bit(PDEV_ARCHDATA_FLAG_SUSP, &ad->flags); |
| 56 | else | 58 | else |
| @@ -73,12 +75,15 @@ static int __platform_pm_runtime_suspend(struct platform_device *pdev) | |||
| 73 | 75 | ||
| 74 | dev_dbg(d, "__platform_pm_runtime_suspend() [%d]\n", hwblk); | 76 | dev_dbg(d, "__platform_pm_runtime_suspend() [%d]\n", hwblk); |
| 75 | 77 | ||
| 76 | if (d->driver && d->driver->pm && d->driver->pm->runtime_suspend) { | 78 | if (d->driver) { |
| 77 | BUG_ON(!test_bit(PDEV_ARCHDATA_FLAG_IDLE, &ad->flags)); | 79 | BUG_ON(!test_bit(PDEV_ARCHDATA_FLAG_IDLE, &ad->flags)); |
| 80 | ret = 0; | ||
| 78 | 81 | ||
| 79 | hwblk_enable(hwblk_info, hwblk); | 82 | if (d->driver->pm && d->driver->pm->runtime_suspend) { |
| 80 | ret = d->driver->pm->runtime_suspend(d); | 83 | hwblk_enable(hwblk_info, hwblk); |
| 81 | hwblk_disable(hwblk_info, hwblk); | 84 | ret = d->driver->pm->runtime_suspend(d); |
| 85 | hwblk_disable(hwblk_info, hwblk); | ||
| 86 | } | ||
| 82 | 87 | ||
| 83 | if (!ret) { | 88 | if (!ret) { |
| 84 | set_bit(PDEV_ARCHDATA_FLAG_SUSP, &ad->flags); | 89 | set_bit(PDEV_ARCHDATA_FLAG_SUSP, &ad->flags); |
diff --git a/arch/sh/kernel/cpu/shmobile/sleep.S b/arch/sh/kernel/cpu/shmobile/sleep.S index a439e6c7824f..e9dd7fa0abd2 100644 --- a/arch/sh/kernel/cpu/shmobile/sleep.S +++ b/arch/sh/kernel/cpu/shmobile/sleep.S | |||
| @@ -20,79 +20,103 @@ | |||
| 20 | * Kernel mode register usage, see entry.S: | 20 | * Kernel mode register usage, see entry.S: |
| 21 | * k0 scratch | 21 | * k0 scratch |
| 22 | * k1 scratch | 22 | * k1 scratch |
| 23 | * k4 scratch | ||
| 24 | */ | 23 | */ |
| 25 | #define k0 r0 | 24 | #define k0 r0 |
| 26 | #define k1 r1 | 25 | #define k1 r1 |
| 27 | #define k4 r4 | ||
| 28 | 26 | ||
| 29 | /* manage self-refresh and enter standby mode. | 27 | /* manage self-refresh and enter standby mode. must be self-contained. |
| 30 | * this code will be copied to on-chip memory and executed from there. | 28 | * this code will be copied to on-chip memory and executed from there. |
| 31 | */ | 29 | */ |
| 30 | .balign 4 | ||
| 31 | ENTRY(sh_mobile_sleep_enter_start) | ||
| 32 | 32 | ||
| 33 | .balign 4096,0,4096 | 33 | /* save mode flags */ |
| 34 | ENTRY(sh_mobile_standby) | 34 | mov.l r4, @(SH_SLEEP_MODE, r5) |
| 35 | 35 | ||
| 36 | /* save original vbr */ | 36 | /* save original vbr */ |
| 37 | stc vbr, r1 | 37 | stc vbr, r0 |
| 38 | mova saved_vbr, r0 | 38 | mov.l r0, @(SH_SLEEP_VBR, r5) |
| 39 | mov.l r1, @r0 | ||
| 40 | 39 | ||
| 41 | /* point vbr to our on-chip memory page */ | 40 | /* point vbr to our on-chip memory page */ |
| 42 | ldc r5, vbr | 41 | ldc r5, vbr |
| 43 | 42 | ||
| 44 | /* save return address */ | 43 | /* save return address */ |
| 45 | mova saved_spc, r0 | 44 | sts pr, r0 |
| 46 | sts pr, r5 | 45 | mov.l r0, @(SH_SLEEP_SPC, r5) |
| 47 | mov.l r5, @r0 | ||
| 48 | 46 | ||
| 49 | /* save sr */ | 47 | /* save sr */ |
| 50 | mova saved_sr, r0 | 48 | stc sr, r0 |
| 51 | stc sr, r5 | 49 | mov.l r0, @(SH_SLEEP_SR, r5) |
| 52 | mov.l r5, @r0 | ||
| 53 | 50 | ||
| 54 | /* save mode flags */ | 51 | /* save sp */ |
| 55 | mova saved_mode, r0 | 52 | mov.l r15, @(SH_SLEEP_SP, r5) |
| 56 | mov.l r4, @r0 | 53 | |
| 54 | /* save stbcr */ | ||
| 55 | bsr save_register | ||
| 56 | mov #SH_SLEEP_REG_STBCR, r0 | ||
| 57 | |||
| 58 | /* save mmu and cache context if needed */ | ||
| 59 | mov.l @(SH_SLEEP_MODE, r5), r0 | ||
| 60 | tst #SUSP_SH_MMU, r0 | ||
| 61 | bt skip_mmu_save_disable | ||
| 62 | |||
| 63 | /* save mmu state */ | ||
| 64 | bsr save_register | ||
| 65 | mov #SH_SLEEP_REG_PTEH, r0 | ||
| 66 | |||
| 67 | bsr save_register | ||
| 68 | mov #SH_SLEEP_REG_PTEL, r0 | ||
| 69 | |||
| 70 | bsr save_register | ||
| 71 | mov #SH_SLEEP_REG_TTB, r0 | ||
| 72 | |||
| 73 | bsr save_register | ||
| 74 | mov #SH_SLEEP_REG_TEA, r0 | ||
| 75 | |||
| 76 | bsr save_register | ||
| 77 | mov #SH_SLEEP_REG_MMUCR, r0 | ||
| 78 | |||
| 79 | bsr save_register | ||
| 80 | mov #SH_SLEEP_REG_PTEA, r0 | ||
| 81 | |||
| 82 | bsr save_register | ||
| 83 | mov #SH_SLEEP_REG_PASCR, r0 | ||
| 57 | 84 | ||
| 58 | /* put mode flags in r0 */ | 85 | bsr save_register |
| 59 | mov r4, r0 | 86 | mov #SH_SLEEP_REG_IRMCR, r0 |
| 60 | 87 | ||
| 88 | /* invalidate TLBs and disable the MMU */ | ||
| 89 | bsr get_register | ||
| 90 | mov #SH_SLEEP_REG_MMUCR, r0 | ||
| 91 | mov #4, r1 | ||
| 92 | mov.l r1, @r0 | ||
| 93 | icbi @r0 | ||
| 94 | |||
| 95 | /* save cache registers and disable caches */ | ||
| 96 | bsr save_register | ||
| 97 | mov #SH_SLEEP_REG_CCR, r0 | ||
| 98 | |||
| 99 | bsr save_register | ||
| 100 | mov #SH_SLEEP_REG_RAMCR, r0 | ||
| 101 | |||
| 102 | bsr get_register | ||
| 103 | mov #SH_SLEEP_REG_CCR, r0 | ||
| 104 | mov #0, r1 | ||
| 105 | mov.l r1, @r0 | ||
| 106 | icbi @r0 | ||
| 107 | |||
| 108 | skip_mmu_save_disable: | ||
| 109 | /* call self-refresh entering code if needed */ | ||
| 110 | mov.l @(SH_SLEEP_MODE, r5), r0 | ||
| 61 | tst #SUSP_SH_SF, r0 | 111 | tst #SUSP_SH_SF, r0 |
| 62 | bt skip_set_sf | 112 | bt skip_set_sf |
| 63 | #ifdef CONFIG_CPU_SUBTYPE_SH7724 | 113 | |
| 64 | /* DBSC: put memory in self-refresh mode */ | 114 | mov.l @(SH_SLEEP_SF_PRE, r5), r0 |
| 65 | mov.l dben_reg, r4 | 115 | jsr @r0 |
| 66 | mov.l dben_data0, r1 | 116 | nop |
| 67 | mov.l r1, @r4 | ||
| 68 | |||
| 69 | mov.l dbrfpdn0_reg, r4 | ||
| 70 | mov.l dbrfpdn0_data0, r1 | ||
| 71 | mov.l r1, @r4 | ||
| 72 | |||
| 73 | mov.l dbcmdcnt_reg, r4 | ||
| 74 | mov.l dbcmdcnt_data0, r1 | ||
| 75 | mov.l r1, @r4 | ||
| 76 | |||
| 77 | mov.l dbcmdcnt_reg, r4 | ||
| 78 | mov.l dbcmdcnt_data1, r1 | ||
| 79 | mov.l r1, @r4 | ||
| 80 | |||
| 81 | mov.l dbrfpdn0_reg, r4 | ||
| 82 | mov.l dbrfpdn0_data1, r1 | ||
| 83 | mov.l r1, @r4 | ||
| 84 | #else | ||
| 85 | /* SBSC: disable power down and put in self-refresh mode */ | ||
| 86 | mov.l 1f, r4 | ||
| 87 | mov.l 2f, r1 | ||
| 88 | mov.l @r4, r2 | ||
| 89 | or r1, r2 | ||
| 90 | mov.l 3f, r3 | ||
| 91 | and r3, r2 | ||
| 92 | mov.l r2, @r4 | ||
| 93 | #endif | ||
| 94 | 117 | ||
| 95 | skip_set_sf: | 118 | skip_set_sf: |
| 119 | mov.l @(SH_SLEEP_MODE, r5), r0 | ||
| 96 | tst #SUSP_SH_STANDBY, r0 | 120 | tst #SUSP_SH_STANDBY, r0 |
| 97 | bt test_rstandby | 121 | bt test_rstandby |
| 98 | 122 | ||
| @@ -104,6 +128,12 @@ test_rstandby: | |||
| 104 | tst #SUSP_SH_RSTANDBY, r0 | 128 | tst #SUSP_SH_RSTANDBY, r0 |
| 105 | bt test_ustandby | 129 | bt test_ustandby |
| 106 | 130 | ||
| 131 | /* setup BAR register */ | ||
| 132 | bsr get_register | ||
| 133 | mov #SH_SLEEP_REG_BAR, r0 | ||
| 134 | mov.l @(SH_SLEEP_RESUME, r5), r1 | ||
| 135 | mov.l r1, @r0 | ||
| 136 | |||
| 107 | /* set mode to "r-standby mode" */ | 137 | /* set mode to "r-standby mode" */ |
| 108 | bra do_sleep | 138 | bra do_sleep |
| 109 | mov #0x20, r1 | 139 | mov #0x20, r1 |
| @@ -123,124 +153,136 @@ force_sleep: | |||
| 123 | 153 | ||
| 124 | do_sleep: | 154 | do_sleep: |
| 125 | /* setup and enter selected standby mode */ | 155 | /* setup and enter selected standby mode */ |
| 126 | mov.l 5f, r4 | 156 | bsr get_register |
| 127 | mov.l r1, @r4 | 157 | mov #SH_SLEEP_REG_STBCR, r0 |
| 158 | mov.l r1, @r0 | ||
| 128 | again: | 159 | again: |
| 129 | sleep | 160 | sleep |
| 130 | bra again | 161 | bra again |
| 131 | nop | 162 | nop |
| 132 | 163 | ||
| 133 | restore_jump_vbr: | 164 | save_register: |
| 165 | add #SH_SLEEP_BASE_ADDR, r0 | ||
| 166 | mov.l @(r0, r5), r1 | ||
| 167 | add #-SH_SLEEP_BASE_ADDR, r0 | ||
| 168 | mov.l @r1, r1 | ||
| 169 | add #SH_SLEEP_BASE_DATA, r0 | ||
| 170 | mov.l r1, @(r0, r5) | ||
| 171 | add #-SH_SLEEP_BASE_DATA, r0 | ||
| 172 | rts | ||
| 173 | nop | ||
| 174 | |||
| 175 | get_register: | ||
| 176 | add #SH_SLEEP_BASE_ADDR, r0 | ||
| 177 | mov.l @(r0, r5), r0 | ||
| 178 | rts | ||
| 179 | nop | ||
| 180 | ENTRY(sh_mobile_sleep_enter_end) | ||
| 181 | |||
| 182 | .balign 4 | ||
| 183 | ENTRY(sh_mobile_sleep_resume_start) | ||
| 184 | |||
| 185 | /* figure out start address */ | ||
| 186 | bsr 0f | ||
| 187 | nop | ||
| 188 | 0: | ||
| 189 | sts pr, k1 | ||
| 190 | mov.l 1f, k0 | ||
| 191 | and k0, k1 | ||
| 192 | |||
| 193 | /* store pointer to data area in VBR */ | ||
| 194 | ldc k1, vbr | ||
| 195 | |||
| 196 | /* setup sr with saved sr */ | ||
| 197 | mov.l @(SH_SLEEP_SR, k1), k0 | ||
| 198 | ldc k0, sr | ||
| 199 | |||
| 200 | /* now: user register set! */ | ||
| 201 | stc vbr, r5 | ||
| 202 | |||
| 134 | /* setup spc with return address to c code */ | 203 | /* setup spc with return address to c code */ |
| 135 | mov.l saved_spc, k0 | 204 | mov.l @(SH_SLEEP_SPC, r5), r0 |
| 136 | ldc k0, spc | 205 | ldc r0, spc |
| 137 | 206 | ||
| 138 | /* restore vbr */ | 207 | /* restore vbr */ |
| 139 | mov.l saved_vbr, k0 | 208 | mov.l @(SH_SLEEP_VBR, r5), r0 |
| 140 | ldc k0, vbr | 209 | ldc r0, vbr |
| 141 | 210 | ||
| 142 | /* setup ssr with saved sr */ | 211 | /* setup ssr with saved sr */ |
| 143 | mov.l saved_sr, k0 | 212 | mov.l @(SH_SLEEP_SR, r5), r0 |
| 144 | ldc k0, ssr | 213 | ldc r0, ssr |
| 145 | 214 | ||
| 146 | /* get mode flags */ | 215 | /* restore sp */ |
| 147 | mov.l saved_mode, k0 | 216 | mov.l @(SH_SLEEP_SP, r5), r15 |
| 148 | 217 | ||
| 149 | done_sleep: | 218 | /* restore sleep mode register */ |
| 150 | /* reset standby mode to sleep mode */ | 219 | bsr restore_register |
| 151 | mov.l 5f, k4 | 220 | mov #SH_SLEEP_REG_STBCR, r0 |
| 152 | mov #0x00, k1 | ||
| 153 | mov.l k1, @k4 | ||
| 154 | 221 | ||
| 155 | tst #SUSP_SH_SF, k0 | 222 | /* call self-refresh resume code if needed */ |
| 223 | mov.l @(SH_SLEEP_MODE, r5), r0 | ||
| 224 | tst #SUSP_SH_SF, r0 | ||
| 156 | bt skip_restore_sf | 225 | bt skip_restore_sf |
| 157 | 226 | ||
| 158 | #ifdef CONFIG_CPU_SUBTYPE_SH7724 | 227 | mov.l @(SH_SLEEP_SF_POST, r5), r0 |
| 159 | /* DBSC: put memory in auto-refresh mode */ | 228 | jsr @r0 |
| 160 | mov.l dbrfpdn0_reg, k4 | 229 | nop |
| 161 | mov.l dbrfpdn0_data0, k1 | 230 | |
| 162 | mov.l k1, @k4 | ||
| 163 | |||
| 164 | nop /* sleep 140 ns */ | ||
| 165 | nop | ||
| 166 | nop | ||
| 167 | nop | ||
| 168 | |||
| 169 | mov.l dbcmdcnt_reg, k4 | ||
| 170 | mov.l dbcmdcnt_data0, k1 | ||
| 171 | mov.l k1, @k4 | ||
| 172 | |||
| 173 | mov.l dbcmdcnt_reg, k4 | ||
| 174 | mov.l dbcmdcnt_data1, k1 | ||
| 175 | mov.l k1, @k4 | ||
| 176 | |||
| 177 | mov.l dben_reg, k4 | ||
| 178 | mov.l dben_data1, k1 | ||
| 179 | mov.l k1, @k4 | ||
| 180 | |||
| 181 | mov.l dbrfpdn0_reg, k4 | ||
| 182 | mov.l dbrfpdn0_data2, k1 | ||
| 183 | mov.l k1, @k4 | ||
| 184 | #else | ||
| 185 | /* SBSC: set auto-refresh mode */ | ||
| 186 | mov.l 1f, k4 | ||
| 187 | mov.l @k4, k0 | ||
| 188 | mov.l 4f, k1 | ||
| 189 | and k1, k0 | ||
| 190 | mov.l k0, @k4 | ||
| 191 | mov.l 6f, k4 | ||
| 192 | mov.l 8f, k0 | ||
| 193 | mov.l @k4, k1 | ||
| 194 | mov #-1, k4 | ||
| 195 | add k4, k1 | ||
| 196 | or k1, k0 | ||
| 197 | mov.l 7f, k1 | ||
| 198 | mov.l k0, @k1 | ||
| 199 | #endif | ||
| 200 | skip_restore_sf: | 231 | skip_restore_sf: |
| 201 | /* jump to vbr vector */ | 232 | /* restore mmu and cache state if needed */ |
| 202 | mov.l saved_vbr, k0 | 233 | mov.l @(SH_SLEEP_MODE, r5), r0 |
| 203 | mov.l offset_vbr, k4 | 234 | tst #SUSP_SH_MMU, r0 |
| 204 | add k4, k0 | 235 | bt skip_restore_mmu |
| 205 | jmp @k0 | 236 | |
| 237 | /* restore mmu state */ | ||
| 238 | bsr restore_register | ||
| 239 | mov #SH_SLEEP_REG_PTEH, r0 | ||
| 240 | |||
| 241 | bsr restore_register | ||
| 242 | mov #SH_SLEEP_REG_PTEL, r0 | ||
| 243 | |||
| 244 | bsr restore_register | ||
| 245 | mov #SH_SLEEP_REG_TTB, r0 | ||
| 246 | |||
| 247 | bsr restore_register | ||
| 248 | mov #SH_SLEEP_REG_TEA, r0 | ||
| 249 | |||
| 250 | bsr restore_register | ||
| 251 | mov #SH_SLEEP_REG_PTEA, r0 | ||
| 252 | |||
| 253 | bsr restore_register | ||
| 254 | mov #SH_SLEEP_REG_PASCR, r0 | ||
| 255 | |||
| 256 | bsr restore_register | ||
| 257 | mov #SH_SLEEP_REG_IRMCR, r0 | ||
| 258 | |||
| 259 | bsr restore_register | ||
| 260 | mov #SH_SLEEP_REG_MMUCR, r0 | ||
| 261 | icbi @r0 | ||
| 262 | |||
| 263 | /* restore cache settings */ | ||
| 264 | bsr restore_register | ||
| 265 | mov #SH_SLEEP_REG_RAMCR, r0 | ||
| 266 | icbi @r0 | ||
| 267 | |||
| 268 | bsr restore_register | ||
| 269 | mov #SH_SLEEP_REG_CCR, r0 | ||
| 270 | icbi @r0 | ||
| 271 | |||
| 272 | skip_restore_mmu: | ||
| 273 | rte | ||
| 206 | nop | 274 | nop |
| 207 | 275 | ||
| 208 | .balign 4 | 276 | restore_register: |
| 209 | saved_mode: .long 0 | 277 | add #SH_SLEEP_BASE_DATA, r0 |
| 210 | saved_spc: .long 0 | 278 | mov.l @(r0, r5), r1 |
| 211 | saved_sr: .long 0 | 279 | add #-SH_SLEEP_BASE_DATA, r0 |
| 212 | saved_vbr: .long 0 | 280 | add #SH_SLEEP_BASE_ADDR, r0 |
| 213 | offset_vbr: .long 0x600 | 281 | mov.l @(r0, r5), r0 |
| 214 | #ifdef CONFIG_CPU_SUBTYPE_SH7724 | 282 | mov.l r1, @r0 |
| 215 | dben_reg: .long 0xfd000010 /* DBEN */ | 283 | rts |
| 216 | dben_data0: .long 0 | ||
| 217 | dben_data1: .long 1 | ||
| 218 | dbrfpdn0_reg: .long 0xfd000040 /* DBRFPDN0 */ | ||
| 219 | dbrfpdn0_data0: .long 0 | ||
| 220 | dbrfpdn0_data1: .long 1 | ||
| 221 | dbrfpdn0_data2: .long 0x00010000 | ||
| 222 | dbcmdcnt_reg: .long 0xfd000014 /* DBCMDCNT */ | ||
| 223 | dbcmdcnt_data0: .long 2 | ||
| 224 | dbcmdcnt_data1: .long 4 | ||
| 225 | #else | ||
| 226 | 1: .long 0xfe400008 /* SDCR0 */ | ||
| 227 | 2: .long 0x00000400 | ||
| 228 | 3: .long 0xffff7fff | ||
| 229 | 4: .long 0xfffffbff | ||
| 230 | #endif | ||
| 231 | 5: .long 0xa4150020 /* STBCR */ | ||
| 232 | 6: .long 0xfe40001c /* RTCOR */ | ||
| 233 | 7: .long 0xfe400018 /* RTCNT */ | ||
| 234 | 8: .long 0xa55a0000 | ||
| 235 | |||
| 236 | |||
| 237 | /* interrupt vector @ 0x600 */ | ||
| 238 | .balign 0x400,0,0x400 | ||
| 239 | .long 0xdeadbeef | ||
| 240 | .balign 0x200,0,0x200 | ||
| 241 | bra restore_jump_vbr | ||
| 242 | nop | 284 | nop |
| 243 | sh_mobile_standby_end: | ||
| 244 | 285 | ||
| 245 | ENTRY(sh_mobile_standby_size) | 286 | .balign 4 |
| 246 | .long sh_mobile_standby_end - sh_mobile_standby | 287 | 1: .long ~0x7ff |
| 288 | ENTRY(sh_mobile_sleep_resume_end) | ||
diff --git a/arch/sh/kernel/cpu/ubc.S b/arch/sh/kernel/cpu/ubc.S deleted file mode 100644 index 81923079fa12..000000000000 --- a/arch/sh/kernel/cpu/ubc.S +++ /dev/null | |||
| @@ -1,59 +0,0 @@ | |||
| 1 | /* | ||
| 2 | * arch/sh/kernel/cpu/ubc.S | ||
| 3 | * | ||
| 4 | * Set of management routines for the User Break Controller (UBC) | ||
| 5 | * | ||
| 6 | * Copyright (C) 2002 Paul Mundt | ||
| 7 | * | ||
| 8 | * This program is free software; you can redistribute it and/or modify it | ||
| 9 | * under the terms of the GNU General Public License as published by the | ||
| 10 | * Free Software Foundation; either version 2 of the License, or (at your | ||
| 11 | * option) any later version. | ||
| 12 | */ | ||
| 13 | #include <linux/linkage.h> | ||
| 14 | #include <asm/ubc.h> | ||
| 15 | |||
| 16 | #define STBCR2 0xffc00010 | ||
| 17 | |||
| 18 | ENTRY(ubc_sleep) | ||
| 19 | mov #0, r0 | ||
| 20 | |||
| 21 | mov.l 1f, r1 ! Zero out UBC_BBRA .. | ||
| 22 | mov.w r0, @r1 | ||
| 23 | |||
| 24 | mov.l 2f, r1 ! .. same for BBRB .. | ||
| 25 | mov.w r0, @r1 | ||
| 26 | |||
| 27 | mov.l 3f, r1 ! .. and again for BRCR. | ||
| 28 | mov.w r0, @r1 | ||
| 29 | |||
| 30 | mov.w @r1, r0 ! Dummy read BRCR | ||
| 31 | |||
| 32 | mov.l 4f, r1 ! Set MSTP5 in STBCR2 | ||
| 33 | mov.b @r1, r0 | ||
| 34 | or #0x01, r0 | ||
| 35 | mov.b r0, @r1 | ||
| 36 | |||
| 37 | mov.b @r1, r0 ! Two dummy reads .. | ||
| 38 | mov.b @r1, r0 | ||
| 39 | |||
| 40 | rts | ||
| 41 | nop | ||
| 42 | |||
| 43 | ENTRY(ubc_wakeup) | ||
| 44 | mov.l 4f, r1 ! Clear MSTP5 | ||
| 45 | mov.b @r1, r0 | ||
| 46 | and #0xfe, r0 | ||
| 47 | mov.b r0, @r1 | ||
| 48 | |||
| 49 | mov.b @r1, r0 ! Two more dummy reads .. | ||
| 50 | mov.b @r1, r0 | ||
| 51 | |||
| 52 | rts | ||
| 53 | nop | ||
| 54 | |||
| 55 | 1: .long UBC_BBRA | ||
| 56 | 2: .long UBC_BBRB | ||
| 57 | 3: .long UBC_BRCR | ||
| 58 | 4: .long STBCR2 | ||
| 59 | |||
