diff options
146 files changed, 4741 insertions, 2405 deletions
diff --git a/arch/sh/Kconfig b/arch/sh/Kconfig index 88cdeb9f72d..0031a6979f3 100644 --- a/arch/sh/Kconfig +++ b/arch/sh/Kconfig | |||
@@ -16,7 +16,9 @@ config SUPERH | |||
16 | select HAVE_IOREMAP_PROT if MMU | 16 | select HAVE_IOREMAP_PROT if MMU |
17 | select HAVE_ARCH_TRACEHOOK | 17 | select HAVE_ARCH_TRACEHOOK |
18 | select HAVE_DMA_API_DEBUG | 18 | select HAVE_DMA_API_DEBUG |
19 | select HAVE_DMA_ATTRS | ||
19 | select HAVE_PERF_EVENTS | 20 | select HAVE_PERF_EVENTS |
21 | select PERF_USE_VMALLOC | ||
20 | select HAVE_KERNEL_GZIP | 22 | select HAVE_KERNEL_GZIP |
21 | select HAVE_KERNEL_BZIP2 | 23 | select HAVE_KERNEL_BZIP2 |
22 | select HAVE_KERNEL_LZMA | 24 | select HAVE_KERNEL_LZMA |
@@ -37,6 +39,7 @@ config SUPERH32 | |||
37 | select HAVE_FTRACE_MCOUNT_RECORD | 39 | select HAVE_FTRACE_MCOUNT_RECORD |
38 | select HAVE_DYNAMIC_FTRACE | 40 | select HAVE_DYNAMIC_FTRACE |
39 | select HAVE_FUNCTION_TRACE_MCOUNT_TEST | 41 | select HAVE_FUNCTION_TRACE_MCOUNT_TEST |
42 | select HAVE_FTRACE_NMI_ENTER if DYNAMIC_FTRACE | ||
40 | select HAVE_FUNCTION_GRAPH_TRACER | 43 | select HAVE_FUNCTION_GRAPH_TRACER |
41 | select HAVE_ARCH_KGDB | 44 | select HAVE_ARCH_KGDB |
42 | select ARCH_HIBERNATION_POSSIBLE if MMU | 45 | select ARCH_HIBERNATION_POSSIBLE if MMU |
@@ -170,6 +173,12 @@ config ARCH_HAS_CPU_IDLE_WAIT | |||
170 | config IO_TRAPPED | 173 | config IO_TRAPPED |
171 | bool | 174 | bool |
172 | 175 | ||
176 | config DMA_COHERENT | ||
177 | bool | ||
178 | |||
179 | config DMA_NONCOHERENT | ||
180 | def_bool !DMA_COHERENT | ||
181 | |||
173 | source "init/Kconfig" | 182 | source "init/Kconfig" |
174 | 183 | ||
175 | source "kernel/Kconfig.freezer" | 184 | source "kernel/Kconfig.freezer" |
@@ -220,6 +229,7 @@ config CPU_SHX2 | |||
220 | 229 | ||
221 | config CPU_SHX3 | 230 | config CPU_SHX3 |
222 | bool | 231 | bool |
232 | select DMA_COHERENT | ||
223 | 233 | ||
224 | config ARCH_SHMOBILE | 234 | config ARCH_SHMOBILE |
225 | bool | 235 | bool |
@@ -761,17 +771,6 @@ config ENTRY_OFFSET | |||
761 | default "0x00010000" if PAGE_SIZE_64KB | 771 | default "0x00010000" if PAGE_SIZE_64KB |
762 | default "0x00000000" | 772 | default "0x00000000" |
763 | 773 | ||
764 | config UBC_WAKEUP | ||
765 | bool "Wakeup UBC on startup" | ||
766 | depends on CPU_SH4 && !CPU_SH4A | ||
767 | help | ||
768 | Selecting this option will wakeup the User Break Controller (UBC) on | ||
769 | startup. Although the UBC is left in an awake state when the processor | ||
770 | comes up, some boot loaders misbehave by putting the UBC to sleep in a | ||
771 | power saving state, which causes issues with things like ptrace(). | ||
772 | |||
773 | If unsure, say N. | ||
774 | |||
775 | choice | 774 | choice |
776 | prompt "Kernel command line" | 775 | prompt "Kernel command line" |
777 | optional | 776 | optional |
@@ -818,7 +817,13 @@ config MAPLE | |||
818 | Dreamcast with a serial line terminal or a remote network | 817 | Dreamcast with a serial line terminal or a remote network |
819 | connection. | 818 | connection. |
820 | 819 | ||
821 | source "arch/sh/drivers/pci/Kconfig" | 820 | config PCI |
821 | bool "PCI support" | ||
822 | depends on SYS_SUPPORTS_PCI | ||
823 | help | ||
824 | Find out whether you have a PCI motherboard. PCI is the name of a | ||
825 | bus system, i.e. the way the CPU talks to the other stuff inside | ||
826 | your box. If you have PCI, say Y, otherwise N. | ||
822 | 827 | ||
823 | source "drivers/pci/pcie/Kconfig" | 828 | source "drivers/pci/pcie/Kconfig" |
824 | 829 | ||
diff --git a/arch/sh/Makefile b/arch/sh/Makefile index 66e40aabc60..ac17c5ac550 100644 --- a/arch/sh/Makefile +++ b/arch/sh/Makefile | |||
@@ -78,6 +78,9 @@ defaultimage-$(CONFIG_SUPERH32) := zImage | |||
78 | defaultimage-$(CONFIG_SH_SH7785LCR) := uImage | 78 | defaultimage-$(CONFIG_SH_SH7785LCR) := uImage |
79 | defaultimage-$(CONFIG_SH_RSK) := uImage | 79 | defaultimage-$(CONFIG_SH_RSK) := uImage |
80 | defaultimage-$(CONFIG_SH_URQUELL) := uImage | 80 | defaultimage-$(CONFIG_SH_URQUELL) := uImage |
81 | defaultimage-$(CONFIG_SH_MIGOR) := uImage | ||
82 | defaultimage-$(CONFIG_SH_AP325RXA) := uImage | ||
83 | defaultimage-$(CONFIG_SH_7724_SOLUTION_ENGINE) := uImage | ||
81 | defaultimage-$(CONFIG_SH_7206_SOLUTION_ENGINE) := vmlinux | 84 | defaultimage-$(CONFIG_SH_7206_SOLUTION_ENGINE) := vmlinux |
82 | defaultimage-$(CONFIG_SH_7619_SOLUTION_ENGINE) := vmlinux | 85 | defaultimage-$(CONFIG_SH_7619_SOLUTION_ENGINE) := vmlinux |
83 | 86 | ||
@@ -136,6 +139,7 @@ machdir-$(CONFIG_SH_7751_SYSTEMH) += mach-systemh | |||
136 | machdir-$(CONFIG_SH_EDOSK7705) += mach-edosk7705 | 139 | machdir-$(CONFIG_SH_EDOSK7705) += mach-edosk7705 |
137 | machdir-$(CONFIG_SH_HIGHLANDER) += mach-highlander | 140 | machdir-$(CONFIG_SH_HIGHLANDER) += mach-highlander |
138 | machdir-$(CONFIG_SH_MIGOR) += mach-migor | 141 | machdir-$(CONFIG_SH_MIGOR) += mach-migor |
142 | machdir-$(CONFIG_SH_AP325RXA) += mach-ap325rxa | ||
139 | machdir-$(CONFIG_SH_KFR2R09) += mach-kfr2r09 | 143 | machdir-$(CONFIG_SH_KFR2R09) += mach-kfr2r09 |
140 | machdir-$(CONFIG_SH_ECOVEC) += mach-ecovec24 | 144 | machdir-$(CONFIG_SH_ECOVEC) += mach-ecovec24 |
141 | machdir-$(CONFIG_SH_SDK7780) += mach-sdk7780 | 145 | machdir-$(CONFIG_SH_SDK7780) += mach-sdk7780 |
diff --git a/arch/sh/boards/Makefile b/arch/sh/boards/Makefile index 7baa2109023..ce0f2638178 100644 --- a/arch/sh/boards/Makefile +++ b/arch/sh/boards/Makefile | |||
@@ -1,7 +1,6 @@ | |||
1 | # | 1 | # |
2 | # Specific board support, not covered by a mach group. | 2 | # Specific board support, not covered by a mach group. |
3 | # | 3 | # |
4 | obj-$(CONFIG_SH_AP325RXA) += board-ap325rxa.o | ||
5 | obj-$(CONFIG_SH_MAGIC_PANEL_R2) += board-magicpanelr2.o | 4 | obj-$(CONFIG_SH_MAGIC_PANEL_R2) += board-magicpanelr2.o |
6 | obj-$(CONFIG_SH_SH7785LCR) += board-sh7785lcr.o | 5 | obj-$(CONFIG_SH_SH7785LCR) += board-sh7785lcr.o |
7 | obj-$(CONFIG_SH_URQUELL) += board-urquell.o | 6 | obj-$(CONFIG_SH_URQUELL) += board-urquell.o |
diff --git a/arch/sh/boards/mach-ap325rxa/Makefile b/arch/sh/boards/mach-ap325rxa/Makefile new file mode 100644 index 00000000000..4cf1774d261 --- /dev/null +++ b/arch/sh/boards/mach-ap325rxa/Makefile | |||
@@ -0,0 +1,2 @@ | |||
1 | obj-y := setup.o sdram.o | ||
2 | |||
diff --git a/arch/sh/boards/mach-ap325rxa/sdram.S b/arch/sh/boards/mach-ap325rxa/sdram.S new file mode 100644 index 00000000000..db24fbed4fc --- /dev/null +++ b/arch/sh/boards/mach-ap325rxa/sdram.S | |||
@@ -0,0 +1,69 @@ | |||
1 | /* | ||
2 | * AP325RXA sdram self/auto-refresh setup code | ||
3 | * | ||
4 | * Copyright (C) 2009 Magnus Damm | ||
5 | * | ||
6 | * This file is subject to the terms and conditions of the GNU General Public | ||
7 | * License. See the file "COPYING" in the main directory of this archive | ||
8 | * for more details. | ||
9 | */ | ||
10 | |||
11 | #include <linux/sys.h> | ||
12 | #include <linux/errno.h> | ||
13 | #include <linux/linkage.h> | ||
14 | #include <asm/asm-offsets.h> | ||
15 | #include <asm/suspend.h> | ||
16 | #include <asm/romimage-macros.h> | ||
17 | |||
18 | /* code to enter and leave self-refresh. must be self-contained. | ||
19 | * this code will be copied to on-chip memory and executed from there. | ||
20 | */ | ||
21 | .balign 4 | ||
22 | ENTRY(ap325rxa_sdram_enter_start) | ||
23 | |||
24 | /* SBSC: disable power down and put in self-refresh mode */ | ||
25 | mov.l 1f, r4 | ||
26 | mov.l 2f, r1 | ||
27 | mov.l @r4, r2 | ||
28 | or r1, r2 | ||
29 | mov.l 3f, r3 | ||
30 | and r3, r2 | ||
31 | mov.l r2, @r4 | ||
32 | |||
33 | rts | ||
34 | nop | ||
35 | |||
36 | .balign 4 | ||
37 | 1: .long 0xfe400008 /* SDCR0 */ | ||
38 | 2: .long 0x00000400 | ||
39 | 3: .long 0xffff7fff | ||
40 | ENTRY(ap325rxa_sdram_enter_end) | ||
41 | |||
42 | .balign 4 | ||
43 | ENTRY(ap325rxa_sdram_leave_start) | ||
44 | |||
45 | /* SBSC: set auto-refresh mode */ | ||
46 | mov.l 1f, r4 | ||
47 | mov.l @r4, r0 | ||
48 | mov.l 4f, r1 | ||
49 | and r1, r0 | ||
50 | mov.l r0, @r4 | ||
51 | mov.l 6f, r4 | ||
52 | mov.l 8f, r0 | ||
53 | mov.l @r4, r1 | ||
54 | mov #-1, r4 | ||
55 | add r4, r1 | ||
56 | or r1, r0 | ||
57 | mov.l 7f, r1 | ||
58 | mov.l r0, @r1 | ||
59 | |||
60 | rts | ||
61 | nop | ||
62 | |||
63 | .balign 4 | ||
64 | 1: .long 0xfe400008 /* SDCR0 */ | ||
65 | 4: .long 0xfffffbff | ||
66 | 6: .long 0xfe40001c /* RTCOR */ | ||
67 | 7: .long 0xfe400018 /* RTCNT */ | ||
68 | 8: .long 0xa55a0000 | ||
69 | ENTRY(ap325rxa_sdram_leave_end) | ||
diff --git a/arch/sh/boards/board-ap325rxa.c b/arch/sh/boards/mach-ap325rxa/setup.c index 2d080732a96..cf9dc12dfeb 100644 --- a/arch/sh/boards/board-ap325rxa.c +++ b/arch/sh/boards/mach-ap325rxa/setup.c | |||
@@ -20,8 +20,6 @@ | |||
20 | #include <linux/i2c.h> | 20 | #include <linux/i2c.h> |
21 | #include <linux/smsc911x.h> | 21 | #include <linux/smsc911x.h> |
22 | #include <linux/gpio.h> | 22 | #include <linux/gpio.h> |
23 | #include <linux/spi/spi.h> | ||
24 | #include <linux/spi/spi_gpio.h> | ||
25 | #include <media/ov772x.h> | 23 | #include <media/ov772x.h> |
26 | #include <media/soc_camera.h> | 24 | #include <media/soc_camera.h> |
27 | #include <media/soc_camera_platform.h> | 25 | #include <media/soc_camera_platform.h> |
@@ -29,6 +27,7 @@ | |||
29 | #include <video/sh_mobile_lcdc.h> | 27 | #include <video/sh_mobile_lcdc.h> |
30 | #include <asm/io.h> | 28 | #include <asm/io.h> |
31 | #include <asm/clock.h> | 29 | #include <asm/clock.h> |
30 | #include <asm/suspend.h> | ||
32 | #include <cpu/sh7723.h> | 31 | #include <cpu/sh7723.h> |
33 | 32 | ||
34 | static struct smsc911x_platform_config smsc911x_config = { | 33 | static struct smsc911x_platform_config smsc911x_config = { |
@@ -409,17 +408,49 @@ static struct platform_device ceu_device = { | |||
409 | }, | 408 | }, |
410 | }; | 409 | }; |
411 | 410 | ||
412 | struct spi_gpio_platform_data sdcard_cn3_platform_data = { | 411 | static struct resource sdhi0_cn3_resources[] = { |
413 | .sck = GPIO_PTD0, | 412 | [0] = { |
414 | .mosi = GPIO_PTD1, | 413 | .name = "SDHI0", |
415 | .miso = GPIO_PTD2, | 414 | .start = 0x04ce0000, |
416 | .num_chipselect = 1, | 415 | .end = 0x04ce01ff, |
416 | .flags = IORESOURCE_MEM, | ||
417 | }, | ||
418 | [1] = { | ||
419 | .start = 101, | ||
420 | .flags = IORESOURCE_IRQ, | ||
421 | }, | ||
417 | }; | 422 | }; |
418 | 423 | ||
419 | static struct platform_device sdcard_cn3_device = { | 424 | static struct platform_device sdhi0_cn3_device = { |
420 | .name = "spi_gpio", | 425 | .name = "sh_mobile_sdhi", |
421 | .dev = { | 426 | .id = 0, /* "sdhi0" clock */ |
422 | .platform_data = &sdcard_cn3_platform_data, | 427 | .num_resources = ARRAY_SIZE(sdhi0_cn3_resources), |
428 | .resource = sdhi0_cn3_resources, | ||
429 | .archdata = { | ||
430 | .hwblk_id = HWBLK_SDHI0, | ||
431 | }, | ||
432 | }; | ||
433 | |||
434 | static struct resource sdhi1_cn7_resources[] = { | ||
435 | [0] = { | ||
436 | .name = "SDHI1", | ||
437 | .start = 0x04cf0000, | ||
438 | .end = 0x04cf01ff, | ||
439 | .flags = IORESOURCE_MEM, | ||
440 | }, | ||
441 | [1] = { | ||
442 | .start = 24, | ||
443 | .flags = IORESOURCE_IRQ, | ||
444 | }, | ||
445 | }; | ||
446 | |||
447 | static struct platform_device sdhi1_cn7_device = { | ||
448 | .name = "sh_mobile_sdhi", | ||
449 | .id = 1, /* "sdhi1" clock */ | ||
450 | .num_resources = ARRAY_SIZE(sdhi1_cn7_resources), | ||
451 | .resource = sdhi1_cn7_resources, | ||
452 | .archdata = { | ||
453 | .hwblk_id = HWBLK_SDHI1, | ||
423 | }, | 454 | }, |
424 | }; | 455 | }; |
425 | 456 | ||
@@ -470,22 +501,26 @@ static struct platform_device *ap325rxa_devices[] __initdata = { | |||
470 | &lcdc_device, | 501 | &lcdc_device, |
471 | &ceu_device, | 502 | &ceu_device, |
472 | &nand_flash_device, | 503 | &nand_flash_device, |
473 | &sdcard_cn3_device, | 504 | &sdhi0_cn3_device, |
505 | &sdhi1_cn7_device, | ||
474 | &ap325rxa_camera[0], | 506 | &ap325rxa_camera[0], |
475 | &ap325rxa_camera[1], | 507 | &ap325rxa_camera[1], |
476 | }; | 508 | }; |
477 | 509 | ||
478 | static struct spi_board_info ap325rxa_spi_devices[] = { | 510 | extern char ap325rxa_sdram_enter_start; |
479 | { | 511 | extern char ap325rxa_sdram_enter_end; |
480 | .modalias = "mmc_spi", | 512 | extern char ap325rxa_sdram_leave_start; |
481 | .max_speed_hz = 5000000, | 513 | extern char ap325rxa_sdram_leave_end; |
482 | .chip_select = 0, | ||
483 | .controller_data = (void *) GPIO_PTD5, | ||
484 | }, | ||
485 | }; | ||
486 | 514 | ||
487 | static int __init ap325rxa_devices_setup(void) | 515 | static int __init ap325rxa_devices_setup(void) |
488 | { | 516 | { |
517 | /* register board specific self-refresh code */ | ||
518 | sh_mobile_register_self_refresh(SUSP_SH_STANDBY | SUSP_SH_SF, | ||
519 | &ap325rxa_sdram_enter_start, | ||
520 | &ap325rxa_sdram_enter_end, | ||
521 | &ap325rxa_sdram_leave_start, | ||
522 | &ap325rxa_sdram_leave_end); | ||
523 | |||
489 | /* LD3 and LD4 LEDs */ | 524 | /* LD3 and LD4 LEDs */ |
490 | gpio_request(GPIO_PTX5, NULL); /* RUN */ | 525 | gpio_request(GPIO_PTX5, NULL); /* RUN */ |
491 | gpio_direction_output(GPIO_PTX5, 1); | 526 | gpio_direction_output(GPIO_PTX5, 1); |
@@ -578,12 +613,28 @@ static int __init ap325rxa_devices_setup(void) | |||
578 | 613 | ||
579 | platform_resource_setup_memory(&ceu_device, "ceu", 4 << 20); | 614 | platform_resource_setup_memory(&ceu_device, "ceu", 4 << 20); |
580 | 615 | ||
616 | /* SDHI0 - CN3 - SD CARD */ | ||
617 | gpio_request(GPIO_FN_SDHI0CD_PTD, NULL); | ||
618 | gpio_request(GPIO_FN_SDHI0WP_PTD, NULL); | ||
619 | gpio_request(GPIO_FN_SDHI0D3_PTD, NULL); | ||
620 | gpio_request(GPIO_FN_SDHI0D2_PTD, NULL); | ||
621 | gpio_request(GPIO_FN_SDHI0D1_PTD, NULL); | ||
622 | gpio_request(GPIO_FN_SDHI0D0_PTD, NULL); | ||
623 | gpio_request(GPIO_FN_SDHI0CMD_PTD, NULL); | ||
624 | gpio_request(GPIO_FN_SDHI0CLK_PTD, NULL); | ||
625 | |||
626 | /* SDHI1 - CN7 - MICRO SD CARD */ | ||
627 | gpio_request(GPIO_FN_SDHI1CD, NULL); | ||
628 | gpio_request(GPIO_FN_SDHI1D3, NULL); | ||
629 | gpio_request(GPIO_FN_SDHI1D2, NULL); | ||
630 | gpio_request(GPIO_FN_SDHI1D1, NULL); | ||
631 | gpio_request(GPIO_FN_SDHI1D0, NULL); | ||
632 | gpio_request(GPIO_FN_SDHI1CMD, NULL); | ||
633 | gpio_request(GPIO_FN_SDHI1CLK, NULL); | ||
634 | |||
581 | i2c_register_board_info(0, ap325rxa_i2c_devices, | 635 | i2c_register_board_info(0, ap325rxa_i2c_devices, |
582 | ARRAY_SIZE(ap325rxa_i2c_devices)); | 636 | ARRAY_SIZE(ap325rxa_i2c_devices)); |
583 | 637 | ||
584 | spi_register_board_info(ap325rxa_spi_devices, | ||
585 | ARRAY_SIZE(ap325rxa_spi_devices)); | ||
586 | |||
587 | return platform_add_devices(ap325rxa_devices, | 638 | return platform_add_devices(ap325rxa_devices, |
588 | ARRAY_SIZE(ap325rxa_devices)); | 639 | ARRAY_SIZE(ap325rxa_devices)); |
589 | } | 640 | } |
diff --git a/arch/sh/boards/mach-ecovec24/Makefile b/arch/sh/boards/mach-ecovec24/Makefile index 51f85215165..e69bc82208f 100644 --- a/arch/sh/boards/mach-ecovec24/Makefile +++ b/arch/sh/boards/mach-ecovec24/Makefile | |||
@@ -6,4 +6,4 @@ | |||
6 | # for more details. | 6 | # for more details. |
7 | # | 7 | # |
8 | 8 | ||
9 | obj-y := setup.o \ No newline at end of file | 9 | obj-y := setup.o sdram.o \ No newline at end of file |
diff --git a/arch/sh/boards/mach-ecovec24/sdram.S b/arch/sh/boards/mach-ecovec24/sdram.S new file mode 100644 index 00000000000..83344004440 --- /dev/null +++ b/arch/sh/boards/mach-ecovec24/sdram.S | |||
@@ -0,0 +1,52 @@ | |||
1 | /* | ||
2 | * Ecovec24 sdram self/auto-refresh setup code | ||
3 | * | ||
4 | * Copyright (C) 2009 Magnus Damm | ||
5 | * | ||
6 | * This file is subject to the terms and conditions of the GNU General Public | ||
7 | * License. See the file "COPYING" in the main directory of this archive | ||
8 | * for more details. | ||
9 | */ | ||
10 | |||
11 | #include <linux/sys.h> | ||
12 | #include <linux/errno.h> | ||
13 | #include <linux/linkage.h> | ||
14 | #include <asm/asm-offsets.h> | ||
15 | #include <asm/suspend.h> | ||
16 | #include <asm/romimage-macros.h> | ||
17 | |||
18 | /* code to enter and leave self-refresh. must be self-contained. | ||
19 | * this code will be copied to on-chip memory and executed from there. | ||
20 | */ | ||
21 | .balign 4 | ||
22 | ENTRY(ecovec24_sdram_enter_start) | ||
23 | |||
24 | /* DBSC: put memory in self-refresh mode */ | ||
25 | |||
26 | ED 0xFD000010, 0x00000000 /* DBEN */ | ||
27 | ED 0xFD000040, 0x00000000 /* DBRFPDN0 */ | ||
28 | ED 0xFD000014, 0x00000002 /* DBCMDCNT (PALL) */ | ||
29 | ED 0xFD000014, 0x00000004 /* DBCMDCNT (REF) */ | ||
30 | ED 0xFD000040, 0x00000001 /* DBRFPDN0 */ | ||
31 | |||
32 | rts | ||
33 | nop | ||
34 | |||
35 | ENTRY(ecovec24_sdram_enter_end) | ||
36 | |||
37 | .balign 4 | ||
38 | ENTRY(ecovec24_sdram_leave_start) | ||
39 | |||
40 | /* DBSC: put memory in auto-refresh mode */ | ||
41 | |||
42 | ED 0xFD000040, 0x00000000 /* DBRFPDN0 */ | ||
43 | WAIT 1 | ||
44 | ED 0xFD000014, 0x00000002 /* DBCMDCNT (PALL) */ | ||
45 | ED 0xFD000014, 0x00000004 /* DBCMDCNT (REF) */ | ||
46 | ED 0xFD000010, 0x00000001 /* DBEN */ | ||
47 | ED 0xFD000040, 0x00010000 /* DBRFPDN0 */ | ||
48 | |||
49 | rts | ||
50 | nop | ||
51 | |||
52 | ENTRY(ecovec24_sdram_leave_end) | ||
diff --git a/arch/sh/boards/mach-ecovec24/setup.c b/arch/sh/boards/mach-ecovec24/setup.c index 3b1ceb46fa5..826e62326d5 100644 --- a/arch/sh/boards/mach-ecovec24/setup.c +++ b/arch/sh/boards/mach-ecovec24/setup.c | |||
@@ -20,12 +20,14 @@ | |||
20 | #include <linux/i2c.h> | 20 | #include <linux/i2c.h> |
21 | #include <linux/i2c/tsc2007.h> | 21 | #include <linux/i2c/tsc2007.h> |
22 | #include <linux/input.h> | 22 | #include <linux/input.h> |
23 | #include <linux/input/sh_keysc.h> | ||
24 | #include <linux/mfd/sh_mobile_sdhi.h> | ||
23 | #include <video/sh_mobile_lcdc.h> | 25 | #include <video/sh_mobile_lcdc.h> |
24 | #include <media/sh_mobile_ceu.h> | 26 | #include <media/sh_mobile_ceu.h> |
25 | #include <asm/heartbeat.h> | 27 | #include <asm/heartbeat.h> |
26 | #include <asm/sh_eth.h> | 28 | #include <asm/sh_eth.h> |
27 | #include <asm/sh_keysc.h> | ||
28 | #include <asm/clock.h> | 29 | #include <asm/clock.h> |
30 | #include <asm/suspend.h> | ||
29 | #include <cpu/sh7724.h> | 31 | #include <cpu/sh7724.h> |
30 | 32 | ||
31 | /* | 33 | /* |
@@ -147,6 +149,9 @@ static struct platform_device sh_eth_device = { | |||
147 | }, | 149 | }, |
148 | .num_resources = ARRAY_SIZE(sh_eth_resources), | 150 | .num_resources = ARRAY_SIZE(sh_eth_resources), |
149 | .resource = sh_eth_resources, | 151 | .resource = sh_eth_resources, |
152 | .archdata = { | ||
153 | .hwblk_id = HWBLK_ETHER, | ||
154 | }, | ||
150 | }; | 155 | }; |
151 | 156 | ||
152 | /* USB0 host */ | 157 | /* USB0 host */ |
@@ -185,30 +190,18 @@ static struct platform_device usb0_host_device = { | |||
185 | .resource = usb0_host_resources, | 190 | .resource = usb0_host_resources, |
186 | }; | 191 | }; |
187 | 192 | ||
188 | /* | 193 | /* USB1 host/function */ |
189 | * USB1 | ||
190 | * | ||
191 | * CN5 can use both host/function, | ||
192 | * and we can determine it by checking PTB[3] | ||
193 | * | ||
194 | * This time only USB1 host is supported. | ||
195 | */ | ||
196 | void usb1_port_power(int port, int power) | 194 | void usb1_port_power(int port, int power) |
197 | { | 195 | { |
198 | if (!gpio_get_value(GPIO_PTB3)) { | ||
199 | printk(KERN_ERR "USB1 function is not supported\n"); | ||
200 | return; | ||
201 | } | ||
202 | |||
203 | gpio_set_value(GPIO_PTB5, power); | 196 | gpio_set_value(GPIO_PTB5, power); |
204 | } | 197 | } |
205 | 198 | ||
206 | static struct r8a66597_platdata usb1_host_data = { | 199 | static struct r8a66597_platdata usb1_common_data = { |
207 | .on_chip = 1, | 200 | .on_chip = 1, |
208 | .port_power = usb1_port_power, | 201 | .port_power = usb1_port_power, |
209 | }; | 202 | }; |
210 | 203 | ||
211 | static struct resource usb1_host_resources[] = { | 204 | static struct resource usb1_common_resources[] = { |
212 | [0] = { | 205 | [0] = { |
213 | .start = 0xa4d90000, | 206 | .start = 0xa4d90000, |
214 | .end = 0xa4d90124 - 1, | 207 | .end = 0xa4d90124 - 1, |
@@ -221,16 +214,16 @@ static struct resource usb1_host_resources[] = { | |||
221 | }, | 214 | }, |
222 | }; | 215 | }; |
223 | 216 | ||
224 | static struct platform_device usb1_host_device = { | 217 | static struct platform_device usb1_common_device = { |
225 | .name = "r8a66597_hcd", | 218 | /* .name will be added in arch_setup */ |
226 | .id = 1, | 219 | .id = 1, |
227 | .dev = { | 220 | .dev = { |
228 | .dma_mask = NULL, /* not use dma */ | 221 | .dma_mask = NULL, /* not use dma */ |
229 | .coherent_dma_mask = 0xffffffff, | 222 | .coherent_dma_mask = 0xffffffff, |
230 | .platform_data = &usb1_host_data, | 223 | .platform_data = &usb1_common_data, |
231 | }, | 224 | }, |
232 | .num_resources = ARRAY_SIZE(usb1_host_resources), | 225 | .num_resources = ARRAY_SIZE(usb1_common_resources), |
233 | .resource = usb1_host_resources, | 226 | .resource = usb1_common_resources, |
234 | }; | 227 | }; |
235 | 228 | ||
236 | /* LCDC */ | 229 | /* LCDC */ |
@@ -428,16 +421,90 @@ static struct i2c_board_info ts_i2c_clients = { | |||
428 | .irq = IRQ0, | 421 | .irq = IRQ0, |
429 | }; | 422 | }; |
430 | 423 | ||
424 | /* SHDI0 */ | ||
425 | static void sdhi0_set_pwr(struct platform_device *pdev, int state) | ||
426 | { | ||
427 | gpio_set_value(GPIO_PTB6, state); | ||
428 | } | ||
429 | |||
430 | static struct sh_mobile_sdhi_info sdhi0_info = { | ||
431 | .set_pwr = sdhi0_set_pwr, | ||
432 | }; | ||
433 | |||
434 | static struct resource sdhi0_resources[] = { | ||
435 | [0] = { | ||
436 | .name = "SDHI0", | ||
437 | .start = 0x04ce0000, | ||
438 | .end = 0x04ce01ff, | ||
439 | .flags = IORESOURCE_MEM, | ||
440 | }, | ||
441 | [1] = { | ||
442 | .start = 101, | ||
443 | .flags = IORESOURCE_IRQ, | ||
444 | }, | ||
445 | }; | ||
446 | |||
447 | static struct platform_device sdhi0_device = { | ||
448 | .name = "sh_mobile_sdhi", | ||
449 | .num_resources = ARRAY_SIZE(sdhi0_resources), | ||
450 | .resource = sdhi0_resources, | ||
451 | .id = 0, | ||
452 | .dev = { | ||
453 | .platform_data = &sdhi0_info, | ||
454 | }, | ||
455 | .archdata = { | ||
456 | .hwblk_id = HWBLK_SDHI0, | ||
457 | }, | ||
458 | }; | ||
459 | |||
460 | /* SHDI1 */ | ||
461 | static void sdhi1_set_pwr(struct platform_device *pdev, int state) | ||
462 | { | ||
463 | gpio_set_value(GPIO_PTB7, state); | ||
464 | } | ||
465 | |||
466 | static struct sh_mobile_sdhi_info sdhi1_info = { | ||
467 | .set_pwr = sdhi1_set_pwr, | ||
468 | }; | ||
469 | |||
470 | static struct resource sdhi1_resources[] = { | ||
471 | [0] = { | ||
472 | .name = "SDHI1", | ||
473 | .start = 0x04cf0000, | ||
474 | .end = 0x04cf01ff, | ||
475 | .flags = IORESOURCE_MEM, | ||
476 | }, | ||
477 | [1] = { | ||
478 | .start = 24, | ||
479 | .flags = IORESOURCE_IRQ, | ||
480 | }, | ||
481 | }; | ||
482 | |||
483 | static struct platform_device sdhi1_device = { | ||
484 | .name = "sh_mobile_sdhi", | ||
485 | .num_resources = ARRAY_SIZE(sdhi1_resources), | ||
486 | .resource = sdhi1_resources, | ||
487 | .id = 1, | ||
488 | .dev = { | ||
489 | .platform_data = &sdhi1_info, | ||
490 | }, | ||
491 | .archdata = { | ||
492 | .hwblk_id = HWBLK_SDHI1, | ||
493 | }, | ||
494 | }; | ||
495 | |||
431 | static struct platform_device *ecovec_devices[] __initdata = { | 496 | static struct platform_device *ecovec_devices[] __initdata = { |
432 | &heartbeat_device, | 497 | &heartbeat_device, |
433 | &nor_flash_device, | 498 | &nor_flash_device, |
434 | &sh_eth_device, | 499 | &sh_eth_device, |
435 | &usb0_host_device, | 500 | &usb0_host_device, |
436 | &usb1_host_device, /* USB1 host support */ | 501 | &usb1_common_device, |
437 | &lcdc_device, | 502 | &lcdc_device, |
438 | &ceu0_device, | 503 | &ceu0_device, |
439 | &ceu1_device, | 504 | &ceu1_device, |
440 | &keysc_device, | 505 | &keysc_device, |
506 | &sdhi0_device, | ||
507 | &sdhi1_device, | ||
441 | }; | 508 | }; |
442 | 509 | ||
443 | #define EEPROM_ADDR 0x50 | 510 | #define EEPROM_ADDR 0x50 |
@@ -466,12 +533,9 @@ static u8 mac_read(struct i2c_adapter *a, u8 command) | |||
466 | return buf; | 533 | return buf; |
467 | } | 534 | } |
468 | 535 | ||
469 | #define MAC_LEN 6 | 536 | static void __init sh_eth_init(struct sh_eth_plat_data *pd) |
470 | static void __init sh_eth_init(void) | ||
471 | { | 537 | { |
472 | struct i2c_adapter *a = i2c_get_adapter(1); | 538 | struct i2c_adapter *a = i2c_get_adapter(1); |
473 | struct clk *eth_clk; | ||
474 | u8 mac[MAC_LEN]; | ||
475 | int i; | 539 | int i; |
476 | 540 | ||
477 | if (!a) { | 541 | if (!a) { |
@@ -479,39 +543,30 @@ static void __init sh_eth_init(void) | |||
479 | return; | 543 | return; |
480 | } | 544 | } |
481 | 545 | ||
482 | eth_clk = clk_get(NULL, "eth0"); | ||
483 | if (!eth_clk) { | ||
484 | pr_err("can not get eth0 clk\n"); | ||
485 | return; | ||
486 | } | ||
487 | |||
488 | /* read MAC address frome EEPROM */ | 546 | /* read MAC address frome EEPROM */ |
489 | for (i = 0; i < MAC_LEN; i++) { | 547 | for (i = 0; i < sizeof(pd->mac_addr); i++) { |
490 | mac[i] = mac_read(a, 0x10 + i); | 548 | pd->mac_addr[i] = mac_read(a, 0x10 + i); |
491 | msleep(10); | 549 | msleep(10); |
492 | } | 550 | } |
493 | |||
494 | /* clock enable */ | ||
495 | clk_enable(eth_clk); | ||
496 | |||
497 | /* reset sh-eth */ | ||
498 | ctrl_outl(0x1, SH_ETH_ADDR + 0x0); | ||
499 | |||
500 | /* set MAC addr */ | ||
501 | ctrl_outl((mac[0] << 24) | | ||
502 | (mac[1] << 16) | | ||
503 | (mac[2] << 8) | | ||
504 | (mac[3] << 0), SH_ETH_MAHR); | ||
505 | ctrl_outl((mac[4] << 8) | | ||
506 | (mac[5] << 0), SH_ETH_MALR); | ||
507 | |||
508 | clk_put(eth_clk); | ||
509 | } | 551 | } |
510 | 552 | ||
511 | #define PORT_HIZA 0xA4050158 | 553 | #define PORT_HIZA 0xA4050158 |
512 | #define IODRIVEA 0xA405018A | 554 | #define IODRIVEA 0xA405018A |
555 | |||
556 | extern char ecovec24_sdram_enter_start; | ||
557 | extern char ecovec24_sdram_enter_end; | ||
558 | extern char ecovec24_sdram_leave_start; | ||
559 | extern char ecovec24_sdram_leave_end; | ||
560 | |||
513 | static int __init arch_setup(void) | 561 | static int __init arch_setup(void) |
514 | { | 562 | { |
563 | /* register board specific self-refresh code */ | ||
564 | sh_mobile_register_self_refresh(SUSP_SH_STANDBY | SUSP_SH_SF, | ||
565 | &ecovec24_sdram_enter_start, | ||
566 | &ecovec24_sdram_enter_end, | ||
567 | &ecovec24_sdram_leave_start, | ||
568 | &ecovec24_sdram_leave_end); | ||
569 | |||
515 | /* enable STATUS0, STATUS2 and PDSTATUS */ | 570 | /* enable STATUS0, STATUS2 and PDSTATUS */ |
516 | gpio_request(GPIO_FN_STATUS0, NULL); | 571 | gpio_request(GPIO_FN_STATUS0, NULL); |
517 | gpio_request(GPIO_FN_STATUS2, NULL); | 572 | gpio_request(GPIO_FN_STATUS2, NULL); |
@@ -561,6 +616,14 @@ static int __init arch_setup(void) | |||
561 | ctrl_outw(0x0600, 0xa40501d4); | 616 | ctrl_outw(0x0600, 0xa40501d4); |
562 | ctrl_outw(0x0600, 0xa4050192); | 617 | ctrl_outw(0x0600, 0xa4050192); |
563 | 618 | ||
619 | if (gpio_get_value(GPIO_PTB3)) { | ||
620 | printk(KERN_INFO "USB1 function is selected\n"); | ||
621 | usb1_common_device.name = "r8a66597_udc"; | ||
622 | } else { | ||
623 | printk(KERN_INFO "USB1 host is selected\n"); | ||
624 | usb1_common_device.name = "r8a66597_hcd"; | ||
625 | } | ||
626 | |||
564 | /* enable LCDC */ | 627 | /* enable LCDC */ |
565 | gpio_request(GPIO_FN_LCDD23, NULL); | 628 | gpio_request(GPIO_FN_LCDD23, NULL); |
566 | gpio_request(GPIO_FN_LCDD22, NULL); | 629 | gpio_request(GPIO_FN_LCDD22, NULL); |
@@ -603,8 +666,8 @@ static int __init arch_setup(void) | |||
603 | gpio_direction_output(GPIO_PTR1, 0); | 666 | gpio_direction_output(GPIO_PTR1, 0); |
604 | gpio_direction_output(GPIO_PTA2, 0); | 667 | gpio_direction_output(GPIO_PTA2, 0); |
605 | 668 | ||
606 | /* I/O buffer drive ability is low */ | 669 | /* I/O buffer drive ability is high */ |
607 | ctrl_outw((ctrl_inw(IODRIVEA) & ~0x00c0) | 0x0040 , IODRIVEA); | 670 | ctrl_outw((ctrl_inw(IODRIVEA) & ~0x00c0) | 0x0080 , IODRIVEA); |
608 | 671 | ||
609 | if (gpio_get_value(GPIO_PTE6)) { | 672 | if (gpio_get_value(GPIO_PTE6)) { |
610 | /* DVI */ | 673 | /* DVI */ |
@@ -710,6 +773,33 @@ static int __init arch_setup(void) | |||
710 | gpio_direction_input(GPIO_PTR5); | 773 | gpio_direction_input(GPIO_PTR5); |
711 | gpio_direction_input(GPIO_PTR6); | 774 | gpio_direction_input(GPIO_PTR6); |
712 | 775 | ||
776 | /* enable SDHI0 (needs DS2.4 set to ON) */ | ||
777 | gpio_request(GPIO_FN_SDHI0CD, NULL); | ||
778 | gpio_request(GPIO_FN_SDHI0WP, NULL); | ||
779 | gpio_request(GPIO_FN_SDHI0CMD, NULL); | ||
780 | gpio_request(GPIO_FN_SDHI0CLK, NULL); | ||
781 | gpio_request(GPIO_FN_SDHI0D3, NULL); | ||
782 | gpio_request(GPIO_FN_SDHI0D2, NULL); | ||
783 | gpio_request(GPIO_FN_SDHI0D1, NULL); | ||
784 | gpio_request(GPIO_FN_SDHI0D0, NULL); | ||
785 | gpio_request(GPIO_PTB6, NULL); | ||
786 | gpio_direction_output(GPIO_PTB6, 0); | ||
787 | |||
788 | /* enable SDHI1 (needs DS2.6,7 set to ON,OFF) */ | ||
789 | gpio_request(GPIO_FN_SDHI1CD, NULL); | ||
790 | gpio_request(GPIO_FN_SDHI1WP, NULL); | ||
791 | gpio_request(GPIO_FN_SDHI1CMD, NULL); | ||
792 | gpio_request(GPIO_FN_SDHI1CLK, NULL); | ||
793 | gpio_request(GPIO_FN_SDHI1D3, NULL); | ||
794 | gpio_request(GPIO_FN_SDHI1D2, NULL); | ||
795 | gpio_request(GPIO_FN_SDHI1D1, NULL); | ||
796 | gpio_request(GPIO_FN_SDHI1D0, NULL); | ||
797 | gpio_request(GPIO_PTB7, NULL); | ||
798 | gpio_direction_output(GPIO_PTB7, 0); | ||
799 | |||
800 | /* I/O buffer drive ability is high for SDHI1 */ | ||
801 | ctrl_outw((ctrl_inw(IODRIVEA) & ~0x3000) | 0x2000 , IODRIVEA); | ||
802 | |||
713 | /* enable I2C device */ | 803 | /* enable I2C device */ |
714 | i2c_register_board_info(1, i2c1_devices, | 804 | i2c_register_board_info(1, i2c1_devices, |
715 | ARRAY_SIZE(i2c1_devices)); | 805 | ARRAY_SIZE(i2c1_devices)); |
@@ -721,12 +811,11 @@ arch_initcall(arch_setup); | |||
721 | 811 | ||
722 | static int __init devices_setup(void) | 812 | static int __init devices_setup(void) |
723 | { | 813 | { |
724 | sh_eth_init(); | 814 | sh_eth_init(&sh_eth_plat); |
725 | return 0; | 815 | return 0; |
726 | } | 816 | } |
727 | device_initcall(devices_setup); | 817 | device_initcall(devices_setup); |
728 | 818 | ||
729 | |||
730 | static struct sh_machine_vector mv_ecovec __initmv = { | 819 | static struct sh_machine_vector mv_ecovec __initmv = { |
731 | .mv_name = "R0P7724 (EcoVec)", | 820 | .mv_name = "R0P7724 (EcoVec)", |
732 | }; | 821 | }; |
diff --git a/arch/sh/boards/mach-highlander/setup.c b/arch/sh/boards/mach-highlander/setup.c index 566e69d8d72..f663c14d888 100644 --- a/arch/sh/boards/mach-highlander/setup.c +++ b/arch/sh/boards/mach-highlander/setup.c | |||
@@ -384,7 +384,7 @@ static unsigned char irl2irq[HL_NR_IRL]; | |||
384 | 384 | ||
385 | static int highlander_irq_demux(int irq) | 385 | static int highlander_irq_demux(int irq) |
386 | { | 386 | { |
387 | if (irq >= HL_NR_IRL || !irl2irq[irq]) | 387 | if (irq >= HL_NR_IRL || irq < 0 || !irl2irq[irq]) |
388 | return irq; | 388 | return irq; |
389 | 389 | ||
390 | return irl2irq[irq]; | 390 | return irl2irq[irq]; |
diff --git a/arch/sh/boards/mach-kfr2r09/Makefile b/arch/sh/boards/mach-kfr2r09/Makefile index 5d5867826e3..4e577a3bf65 100644 --- a/arch/sh/boards/mach-kfr2r09/Makefile +++ b/arch/sh/boards/mach-kfr2r09/Makefile | |||
@@ -1,2 +1,2 @@ | |||
1 | obj-y := setup.o | 1 | obj-y := setup.o sdram.o |
2 | obj-$(CONFIG_FB_SH_MOBILE_LCDC) += lcd_wqvga.o | 2 | obj-$(CONFIG_FB_SH_MOBILE_LCDC) += lcd_wqvga.o |
diff --git a/arch/sh/boards/mach-kfr2r09/sdram.S b/arch/sh/boards/mach-kfr2r09/sdram.S new file mode 100644 index 00000000000..0c9f55bec2f --- /dev/null +++ b/arch/sh/boards/mach-kfr2r09/sdram.S | |||
@@ -0,0 +1,80 @@ | |||
1 | /* | ||
2 | * KFR2R09 sdram self/auto-refresh setup code | ||
3 | * | ||
4 | * Copyright (C) 2009 Magnus Damm | ||
5 | * | ||
6 | * This file is subject to the terms and conditions of the GNU General Public | ||
7 | * License. See the file "COPYING" in the main directory of this archive | ||
8 | * for more details. | ||
9 | */ | ||
10 | |||
11 | #include <linux/sys.h> | ||
12 | #include <linux/errno.h> | ||
13 | #include <linux/linkage.h> | ||
14 | #include <asm/asm-offsets.h> | ||
15 | #include <asm/suspend.h> | ||
16 | #include <asm/romimage-macros.h> | ||
17 | |||
18 | /* code to enter and leave self-refresh. must be self-contained. | ||
19 | * this code will be copied to on-chip memory and executed from there. | ||
20 | */ | ||
21 | .balign 4 | ||
22 | ENTRY(kfr2r09_sdram_enter_start) | ||
23 | |||
24 | /* DBSC: put memory in self-refresh mode */ | ||
25 | |||
26 | ED 0xFD000010, 0x00000000 /* DBEN */ | ||
27 | ED 0xFD000040, 0x00000000 /* DBRFPDN0 */ | ||
28 | ED 0xFD000014, 0x00000002 /* DBCMDCNT (PALL) */ | ||
29 | ED 0xFD000014, 0x00000004 /* DBCMDCNT (REF) */ | ||
30 | ED 0xFD000040, 0x00000001 /* DBRFPDN0 */ | ||
31 | |||
32 | rts | ||
33 | nop | ||
34 | |||
35 | ENTRY(kfr2r09_sdram_enter_end) | ||
36 | |||
37 | .balign 4 | ||
38 | ENTRY(kfr2r09_sdram_leave_start) | ||
39 | |||
40 | /* DBSC: put memory in auto-refresh mode */ | ||
41 | |||
42 | mov.l @(SH_SLEEP_MODE, r5), r0 | ||
43 | tst #SUSP_SH_RSTANDBY, r0 | ||
44 | bf resume_rstandby | ||
45 | |||
46 | ED 0xFD000040, 0x00000000 /* DBRFPDN0 */ | ||
47 | WAIT 1 | ||
48 | ED 0xFD000014, 0x00000002 /* DBCMDCNT (PALL) */ | ||
49 | ED 0xFD000014, 0x00000004 /* DBCMDCNT (REF) */ | ||
50 | ED 0xFD000010, 0x00000001 /* DBEN */ | ||
51 | ED 0xFD000040, 0x00010000 /* DBRFPDN0 */ | ||
52 | |||
53 | rts | ||
54 | nop | ||
55 | |||
56 | resume_rstandby: | ||
57 | |||
58 | /* DBSC: re-initialize and put in auto-refresh */ | ||
59 | |||
60 | ED 0xFD000108, 0x40000301 /* DBPDCNT0 */ | ||
61 | ED 0xFD000020, 0x011B0002 /* DBCONF */ | ||
62 | ED 0xFD000030, 0x03060E02 /* DBTR0 */ | ||
63 | ED 0xFD000034, 0x01020102 /* DBTR1 */ | ||
64 | ED 0xFD000038, 0x01090406 /* DBTR2 */ | ||
65 | ED 0xFD000008, 0x00000004 /* DBKIND */ | ||
66 | ED 0xFD000040, 0x00000001 /* DBRFPDN0 */ | ||
67 | ED 0xFD000040, 0x00000000 /* DBRFPDN0 */ | ||
68 | ED 0xFD000018, 0x00000001 /* DBCKECNT */ | ||
69 | WAIT 1 | ||
70 | ED 0xFD000010, 0x00000001 /* DBEN */ | ||
71 | ED 0xFD000044, 0x000004AF /* DBRFPDN1 */ | ||
72 | ED 0xFD000048, 0x20CF0037 /* DBRFPDN2 */ | ||
73 | ED 0xFD000014, 0x00000004 /* DBCMDCNT (REF) */ | ||
74 | ED 0xFD000108, 0x40000300 /* DBPDCNT0 */ | ||
75 | ED 0xFD000040, 0x00010000 /* DBRFPDN0 */ | ||
76 | |||
77 | rts | ||
78 | nop | ||
79 | |||
80 | ENTRY(kfr2r09_sdram_leave_end) | ||
diff --git a/arch/sh/boards/mach-kfr2r09/setup.c b/arch/sh/boards/mach-kfr2r09/setup.c index c08d33fe210..87438d6603d 100644 --- a/arch/sh/boards/mach-kfr2r09/setup.c +++ b/arch/sh/boards/mach-kfr2r09/setup.c | |||
@@ -16,13 +16,16 @@ | |||
16 | #include <linux/clk.h> | 16 | #include <linux/clk.h> |
17 | #include <linux/gpio.h> | 17 | #include <linux/gpio.h> |
18 | #include <linux/input.h> | 18 | #include <linux/input.h> |
19 | #include <linux/input/sh_keysc.h> | ||
19 | #include <linux/i2c.h> | 20 | #include <linux/i2c.h> |
20 | #include <linux/usb/r8a66597.h> | 21 | #include <linux/usb/r8a66597.h> |
22 | #include <media/soc_camera.h> | ||
23 | #include <media/sh_mobile_ceu.h> | ||
21 | #include <video/sh_mobile_lcdc.h> | 24 | #include <video/sh_mobile_lcdc.h> |
25 | #include <asm/suspend.h> | ||
22 | #include <asm/clock.h> | 26 | #include <asm/clock.h> |
23 | #include <asm/machvec.h> | 27 | #include <asm/machvec.h> |
24 | #include <asm/io.h> | 28 | #include <asm/io.h> |
25 | #include <asm/sh_keysc.h> | ||
26 | #include <cpu/sh7724.h> | 29 | #include <cpu/sh7724.h> |
27 | #include <mach/kfr2r09.h> | 30 | #include <mach/kfr2r09.h> |
28 | 31 | ||
@@ -212,11 +215,154 @@ static struct platform_device kfr2r09_usb0_gadget_device = { | |||
212 | .resource = kfr2r09_usb0_gadget_resources, | 215 | .resource = kfr2r09_usb0_gadget_resources, |
213 | }; | 216 | }; |
214 | 217 | ||
218 | static struct sh_mobile_ceu_info sh_mobile_ceu_info = { | ||
219 | .flags = SH_CEU_FLAG_USE_8BIT_BUS, | ||
220 | }; | ||
221 | |||
222 | static struct resource kfr2r09_ceu_resources[] = { | ||
223 | [0] = { | ||
224 | .name = "CEU", | ||
225 | .start = 0xfe910000, | ||
226 | .end = 0xfe91009f, | ||
227 | .flags = IORESOURCE_MEM, | ||
228 | }, | ||
229 | [1] = { | ||
230 | .start = 52, | ||
231 | .end = 52, | ||
232 | .flags = IORESOURCE_IRQ, | ||
233 | }, | ||
234 | [2] = { | ||
235 | /* place holder for contiguous memory */ | ||
236 | }, | ||
237 | }; | ||
238 | |||
239 | static struct platform_device kfr2r09_ceu_device = { | ||
240 | .name = "sh_mobile_ceu", | ||
241 | .id = 0, /* "ceu0" clock */ | ||
242 | .num_resources = ARRAY_SIZE(kfr2r09_ceu_resources), | ||
243 | .resource = kfr2r09_ceu_resources, | ||
244 | .dev = { | ||
245 | .platform_data = &sh_mobile_ceu_info, | ||
246 | }, | ||
247 | .archdata = { | ||
248 | .hwblk_id = HWBLK_CEU0, | ||
249 | }, | ||
250 | }; | ||
251 | |||
252 | static struct i2c_board_info kfr2r09_i2c_camera = { | ||
253 | I2C_BOARD_INFO("rj54n1cb0c", 0x50), | ||
254 | }; | ||
255 | |||
256 | static struct clk *camera_clk; | ||
257 | |||
258 | #define DRVCRB 0xA405018C | ||
259 | static int camera_power(struct device *dev, int mode) | ||
260 | { | ||
261 | int ret; | ||
262 | |||
263 | if (mode) { | ||
264 | long rate; | ||
265 | |||
266 | camera_clk = clk_get(NULL, "video_clk"); | ||
267 | if (IS_ERR(camera_clk)) | ||
268 | return PTR_ERR(camera_clk); | ||
269 | |||
270 | /* set VIO_CKO clock to 25MHz */ | ||
271 | rate = clk_round_rate(camera_clk, 25000000); | ||
272 | ret = clk_set_rate(camera_clk, rate); | ||
273 | if (ret < 0) | ||
274 | goto eclkrate; | ||
275 | |||
276 | /* set DRVCRB | ||
277 | * | ||
278 | * use 1.8 V for VccQ_VIO | ||
279 | * use 2.85V for VccQ_SR | ||
280 | */ | ||
281 | ctrl_outw((ctrl_inw(DRVCRB) & ~0x0003) | 0x0001, DRVCRB); | ||
282 | |||
283 | /* reset clear */ | ||
284 | ret = gpio_request(GPIO_PTB4, NULL); | ||
285 | if (ret < 0) | ||
286 | goto eptb4; | ||
287 | ret = gpio_request(GPIO_PTB7, NULL); | ||
288 | if (ret < 0) | ||
289 | goto eptb7; | ||
290 | |||
291 | ret = gpio_direction_output(GPIO_PTB4, 1); | ||
292 | if (!ret) | ||
293 | ret = gpio_direction_output(GPIO_PTB7, 1); | ||
294 | if (ret < 0) | ||
295 | goto egpioout; | ||
296 | msleep(1); | ||
297 | |||
298 | ret = clk_enable(camera_clk); /* start VIO_CKO */ | ||
299 | if (ret < 0) | ||
300 | goto eclkon; | ||
301 | |||
302 | return 0; | ||
303 | } | ||
304 | |||
305 | ret = 0; | ||
306 | |||
307 | clk_disable(camera_clk); | ||
308 | eclkon: | ||
309 | gpio_set_value(GPIO_PTB7, 0); | ||
310 | egpioout: | ||
311 | gpio_set_value(GPIO_PTB4, 0); | ||
312 | gpio_free(GPIO_PTB7); | ||
313 | eptb7: | ||
314 | gpio_free(GPIO_PTB4); | ||
315 | eptb4: | ||
316 | eclkrate: | ||
317 | clk_put(camera_clk); | ||
318 | return ret; | ||
319 | } | ||
320 | |||
321 | static struct soc_camera_link rj54n1_link = { | ||
322 | .power = camera_power, | ||
323 | .board_info = &kfr2r09_i2c_camera, | ||
324 | .i2c_adapter_id = 1, | ||
325 | .module_name = "rj54n1cb0c", | ||
326 | }; | ||
327 | |||
328 | static struct platform_device kfr2r09_camera = { | ||
329 | .name = "soc-camera-pdrv", | ||
330 | .id = 0, | ||
331 | .dev = { | ||
332 | .platform_data = &rj54n1_link, | ||
333 | }, | ||
334 | }; | ||
335 | |||
336 | static struct resource kfr2r09_sh_sdhi0_resources[] = { | ||
337 | [0] = { | ||
338 | .name = "SDHI0", | ||
339 | .start = 0x04ce0000, | ||
340 | .end = 0x04ce01ff, | ||
341 | .flags = IORESOURCE_MEM, | ||
342 | }, | ||
343 | [1] = { | ||
344 | .start = 101, | ||
345 | .flags = IORESOURCE_IRQ, | ||
346 | }, | ||
347 | }; | ||
348 | |||
349 | static struct platform_device kfr2r09_sh_sdhi0_device = { | ||
350 | .name = "sh_mobile_sdhi", | ||
351 | .num_resources = ARRAY_SIZE(kfr2r09_sh_sdhi0_resources), | ||
352 | .resource = kfr2r09_sh_sdhi0_resources, | ||
353 | .archdata = { | ||
354 | .hwblk_id = HWBLK_SDHI0, | ||
355 | }, | ||
356 | }; | ||
357 | |||
215 | static struct platform_device *kfr2r09_devices[] __initdata = { | 358 | static struct platform_device *kfr2r09_devices[] __initdata = { |
216 | &kfr2r09_nor_flash_device, | 359 | &kfr2r09_nor_flash_device, |
217 | &kfr2r09_nand_flash_device, | 360 | &kfr2r09_nand_flash_device, |
218 | &kfr2r09_sh_keysc_device, | 361 | &kfr2r09_sh_keysc_device, |
219 | &kfr2r09_sh_lcdc_device, | 362 | &kfr2r09_sh_lcdc_device, |
363 | &kfr2r09_ceu_device, | ||
364 | &kfr2r09_camera, | ||
365 | &kfr2r09_sh_sdhi0_device, | ||
220 | }; | 366 | }; |
221 | 367 | ||
222 | #define BSC_CS0BCR 0xfec10004 | 368 | #define BSC_CS0BCR 0xfec10004 |
@@ -268,11 +414,59 @@ static int kfr2r09_usb0_gadget_i2c_setup(void) | |||
268 | 414 | ||
269 | return 0; | 415 | return 0; |
270 | } | 416 | } |
417 | |||
418 | static int kfr2r09_serial_i2c_setup(void) | ||
419 | { | ||
420 | struct i2c_adapter *a; | ||
421 | struct i2c_msg msg; | ||
422 | unsigned char buf[2]; | ||
423 | int ret; | ||
424 | |||
425 | a = i2c_get_adapter(0); | ||
426 | if (!a) | ||
427 | return -ENODEV; | ||
428 | |||
429 | /* set bit 6 (the 7th bit) of chip at 0x09, register 0x13 */ | ||
430 | buf[0] = 0x13; | ||
431 | msg.addr = 0x09; | ||
432 | msg.buf = buf; | ||
433 | msg.len = 1; | ||
434 | msg.flags = 0; | ||
435 | ret = i2c_transfer(a, &msg, 1); | ||
436 | if (ret != 1) | ||
437 | return -ENODEV; | ||
438 | |||
439 | buf[0] = 0; | ||
440 | msg.addr = 0x09; | ||
441 | msg.buf = buf; | ||
442 | msg.len = 1; | ||
443 | msg.flags = I2C_M_RD; | ||
444 | ret = i2c_transfer(a, &msg, 1); | ||
445 | if (ret != 1) | ||
446 | return -ENODEV; | ||
447 | |||
448 | buf[1] = buf[0] | (1 << 6); | ||
449 | buf[0] = 0x13; | ||
450 | msg.addr = 0x09; | ||
451 | msg.buf = buf; | ||
452 | msg.len = 2; | ||
453 | msg.flags = 0; | ||
454 | ret = i2c_transfer(a, &msg, 1); | ||
455 | if (ret != 1) | ||
456 | return -ENODEV; | ||
457 | |||
458 | return 0; | ||
459 | } | ||
271 | #else | 460 | #else |
272 | static int kfr2r09_usb0_gadget_i2c_setup(void) | 461 | static int kfr2r09_usb0_gadget_i2c_setup(void) |
273 | { | 462 | { |
274 | return -ENODEV; | 463 | return -ENODEV; |
275 | } | 464 | } |
465 | |||
466 | static int kfr2r09_serial_i2c_setup(void) | ||
467 | { | ||
468 | return -ENODEV; | ||
469 | } | ||
276 | #endif | 470 | #endif |
277 | 471 | ||
278 | static int kfr2r09_usb0_gadget_setup(void) | 472 | static int kfr2r09_usb0_gadget_setup(void) |
@@ -299,11 +493,27 @@ static int kfr2r09_usb0_gadget_setup(void) | |||
299 | return 0; | 493 | return 0; |
300 | } | 494 | } |
301 | 495 | ||
496 | extern char kfr2r09_sdram_enter_start; | ||
497 | extern char kfr2r09_sdram_enter_end; | ||
498 | extern char kfr2r09_sdram_leave_start; | ||
499 | extern char kfr2r09_sdram_leave_end; | ||
500 | |||
302 | static int __init kfr2r09_devices_setup(void) | 501 | static int __init kfr2r09_devices_setup(void) |
303 | { | 502 | { |
503 | /* register board specific self-refresh code */ | ||
504 | sh_mobile_register_self_refresh(SUSP_SH_STANDBY | SUSP_SH_SF | | ||
505 | SUSP_SH_RSTANDBY, | ||
506 | &kfr2r09_sdram_enter_start, | ||
507 | &kfr2r09_sdram_enter_end, | ||
508 | &kfr2r09_sdram_leave_start, | ||
509 | &kfr2r09_sdram_leave_end); | ||
510 | |||
304 | /* enable SCIF1 serial port for YC401 console support */ | 511 | /* enable SCIF1 serial port for YC401 console support */ |
305 | gpio_request(GPIO_FN_SCIF1_RXD, NULL); | 512 | gpio_request(GPIO_FN_SCIF1_RXD, NULL); |
306 | gpio_request(GPIO_FN_SCIF1_TXD, NULL); | 513 | gpio_request(GPIO_FN_SCIF1_TXD, NULL); |
514 | kfr2r09_serial_i2c_setup(); /* ECONTMSK(bit6=L10ONEN) set 1 */ | ||
515 | gpio_request(GPIO_PTG3, NULL); /* HPON_ON */ | ||
516 | gpio_direction_output(GPIO_PTG3, 1); /* HPON_ON = H */ | ||
307 | 517 | ||
308 | /* setup NOR flash at CS0 */ | 518 | /* setup NOR flash at CS0 */ |
309 | ctrl_outl(0x36db0400, BSC_CS0BCR); | 519 | ctrl_outl(0x36db0400, BSC_CS0BCR); |
@@ -361,6 +571,32 @@ static int __init kfr2r09_devices_setup(void) | |||
361 | if (kfr2r09_usb0_gadget_setup() == 0) | 571 | if (kfr2r09_usb0_gadget_setup() == 0) |
362 | platform_device_register(&kfr2r09_usb0_gadget_device); | 572 | platform_device_register(&kfr2r09_usb0_gadget_device); |
363 | 573 | ||
574 | /* CEU */ | ||
575 | gpio_request(GPIO_FN_VIO_CKO, NULL); | ||
576 | gpio_request(GPIO_FN_VIO0_CLK, NULL); | ||
577 | gpio_request(GPIO_FN_VIO0_VD, NULL); | ||
578 | gpio_request(GPIO_FN_VIO0_HD, NULL); | ||
579 | gpio_request(GPIO_FN_VIO0_FLD, NULL); | ||
580 | gpio_request(GPIO_FN_VIO0_D7, NULL); | ||
581 | gpio_request(GPIO_FN_VIO0_D6, NULL); | ||
582 | gpio_request(GPIO_FN_VIO0_D5, NULL); | ||
583 | gpio_request(GPIO_FN_VIO0_D4, NULL); | ||
584 | gpio_request(GPIO_FN_VIO0_D3, NULL); | ||
585 | gpio_request(GPIO_FN_VIO0_D2, NULL); | ||
586 | gpio_request(GPIO_FN_VIO0_D1, NULL); | ||
587 | gpio_request(GPIO_FN_VIO0_D0, NULL); | ||
588 | |||
589 | platform_resource_setup_memory(&kfr2r09_ceu_device, "ceu", 4 << 20); | ||
590 | |||
591 | /* SDHI0 connected to yc304 */ | ||
592 | gpio_request(GPIO_FN_SDHI0CD, NULL); | ||
593 | gpio_request(GPIO_FN_SDHI0D3, NULL); | ||
594 | gpio_request(GPIO_FN_SDHI0D2, NULL); | ||
595 | gpio_request(GPIO_FN_SDHI0D1, NULL); | ||
596 | gpio_request(GPIO_FN_SDHI0D0, NULL); | ||
597 | gpio_request(GPIO_FN_SDHI0CMD, NULL); | ||
598 | gpio_request(GPIO_FN_SDHI0CLK, NULL); | ||
599 | |||
364 | return platform_add_devices(kfr2r09_devices, | 600 | return platform_add_devices(kfr2r09_devices, |
365 | ARRAY_SIZE(kfr2r09_devices)); | 601 | ARRAY_SIZE(kfr2r09_devices)); |
366 | } | 602 | } |
diff --git a/arch/sh/boards/mach-migor/Makefile b/arch/sh/boards/mach-migor/Makefile index 5f231dd25c0..4601a89e5ac 100644 --- a/arch/sh/boards/mach-migor/Makefile +++ b/arch/sh/boards/mach-migor/Makefile | |||
@@ -1,2 +1,2 @@ | |||
1 | obj-y := setup.o | 1 | obj-y := setup.o sdram.o |
2 | obj-$(CONFIG_SH_MIGOR_QVGA) += lcd_qvga.o | 2 | obj-$(CONFIG_SH_MIGOR_QVGA) += lcd_qvga.o |
diff --git a/arch/sh/boards/mach-migor/sdram.S b/arch/sh/boards/mach-migor/sdram.S new file mode 100644 index 00000000000..614aa3a1398 --- /dev/null +++ b/arch/sh/boards/mach-migor/sdram.S | |||
@@ -0,0 +1,69 @@ | |||
1 | /* | ||
2 | * Migo-R sdram self/auto-refresh setup code | ||
3 | * | ||
4 | * Copyright (C) 2009 Magnus Damm | ||
5 | * | ||
6 | * This file is subject to the terms and conditions of the GNU General Public | ||
7 | * License. See the file "COPYING" in the main directory of this archive | ||
8 | * for more details. | ||
9 | */ | ||
10 | |||
11 | #include <linux/sys.h> | ||
12 | #include <linux/errno.h> | ||
13 | #include <linux/linkage.h> | ||
14 | #include <asm/asm-offsets.h> | ||
15 | #include <asm/suspend.h> | ||
16 | #include <asm/romimage-macros.h> | ||
17 | |||
18 | /* code to enter and leave self-refresh. must be self-contained. | ||
19 | * this code will be copied to on-chip memory and executed from there. | ||
20 | */ | ||
21 | .balign 4 | ||
22 | ENTRY(migor_sdram_enter_start) | ||
23 | |||
24 | /* SBSC: disable power down and put in self-refresh mode */ | ||
25 | mov.l 1f, r4 | ||
26 | mov.l 2f, r1 | ||
27 | mov.l @r4, r2 | ||
28 | or r1, r2 | ||
29 | mov.l 3f, r3 | ||
30 | and r3, r2 | ||
31 | mov.l r2, @r4 | ||
32 | |||
33 | rts | ||
34 | nop | ||
35 | |||
36 | .balign 4 | ||
37 | 1: .long 0xfe400008 /* SDCR0 */ | ||
38 | 2: .long 0x00000400 | ||
39 | 3: .long 0xffff7fff | ||
40 | ENTRY(migor_sdram_enter_end) | ||
41 | |||
42 | .balign 4 | ||
43 | ENTRY(migor_sdram_leave_start) | ||
44 | |||
45 | /* SBSC: set auto-refresh mode */ | ||
46 | mov.l 1f, r4 | ||
47 | mov.l @r4, r0 | ||
48 | mov.l 4f, r1 | ||
49 | and r1, r0 | ||
50 | mov.l r0, @r4 | ||
51 | mov.l 6f, r4 | ||
52 | mov.l 8f, r0 | ||
53 | mov.l @r4, r1 | ||
54 | mov #-1, r4 | ||
55 | add r4, r1 | ||
56 | or r1, r0 | ||
57 | mov.l 7f, r1 | ||
58 | mov.l r0, @r1 | ||
59 | |||
60 | rts | ||
61 | nop | ||
62 | |||
63 | .balign 4 | ||
64 | 1: .long 0xfe400008 /* SDCR0 */ | ||
65 | 4: .long 0xfffffbff | ||
66 | 6: .long 0xfe40001c /* RTCOR */ | ||
67 | 7: .long 0xfe400018 /* RTCNT */ | ||
68 | 8: .long 0xa55a0000 | ||
69 | ENTRY(migor_sdram_leave_end) | ||
diff --git a/arch/sh/boards/mach-migor/setup.c b/arch/sh/boards/mach-migor/setup.c index 6ed1fd32369..9099b6da995 100644 --- a/arch/sh/boards/mach-migor/setup.c +++ b/arch/sh/boards/mach-migor/setup.c | |||
@@ -11,6 +11,7 @@ | |||
11 | #include <linux/platform_device.h> | 11 | #include <linux/platform_device.h> |
12 | #include <linux/interrupt.h> | 12 | #include <linux/interrupt.h> |
13 | #include <linux/input.h> | 13 | #include <linux/input.h> |
14 | #include <linux/input/sh_keysc.h> | ||
14 | #include <linux/mtd/physmap.h> | 15 | #include <linux/mtd/physmap.h> |
15 | #include <linux/mtd/nand.h> | 16 | #include <linux/mtd/nand.h> |
16 | #include <linux/i2c.h> | 17 | #include <linux/i2c.h> |
@@ -18,8 +19,6 @@ | |||
18 | #include <linux/delay.h> | 19 | #include <linux/delay.h> |
19 | #include <linux/clk.h> | 20 | #include <linux/clk.h> |
20 | #include <linux/gpio.h> | 21 | #include <linux/gpio.h> |
21 | #include <linux/spi/spi.h> | ||
22 | #include <linux/spi/spi_gpio.h> | ||
23 | #include <video/sh_mobile_lcdc.h> | 22 | #include <video/sh_mobile_lcdc.h> |
24 | #include <media/sh_mobile_ceu.h> | 23 | #include <media/sh_mobile_ceu.h> |
25 | #include <media/ov772x.h> | 24 | #include <media/ov772x.h> |
@@ -27,7 +26,7 @@ | |||
27 | #include <asm/clock.h> | 26 | #include <asm/clock.h> |
28 | #include <asm/machvec.h> | 27 | #include <asm/machvec.h> |
29 | #include <asm/io.h> | 28 | #include <asm/io.h> |
30 | #include <asm/sh_keysc.h> | 29 | #include <asm/suspend.h> |
31 | #include <mach/migor.h> | 30 | #include <mach/migor.h> |
32 | #include <cpu/sh7722.h> | 31 | #include <cpu/sh7722.h> |
33 | 32 | ||
@@ -390,17 +389,25 @@ static struct platform_device migor_ceu_device = { | |||
390 | }, | 389 | }, |
391 | }; | 390 | }; |
392 | 391 | ||
393 | struct spi_gpio_platform_data sdcard_cn9_platform_data = { | 392 | static struct resource sdhi_cn9_resources[] = { |
394 | .sck = GPIO_PTD0, | 393 | [0] = { |
395 | .mosi = GPIO_PTD1, | 394 | .name = "SDHI", |
396 | .miso = GPIO_PTD2, | 395 | .start = 0x04ce0000, |
397 | .num_chipselect = 1, | 396 | .end = 0x04ce01ff, |
397 | .flags = IORESOURCE_MEM, | ||
398 | }, | ||
399 | [1] = { | ||
400 | .start = 101, | ||
401 | .flags = IORESOURCE_IRQ, | ||
402 | }, | ||
398 | }; | 403 | }; |
399 | 404 | ||
400 | static struct platform_device sdcard_cn9_device = { | 405 | static struct platform_device sdhi_cn9_device = { |
401 | .name = "spi_gpio", | 406 | .name = "sh_mobile_sdhi", |
402 | .dev = { | 407 | .num_resources = ARRAY_SIZE(sdhi_cn9_resources), |
403 | .platform_data = &sdcard_cn9_platform_data, | 408 | .resource = sdhi_cn9_resources, |
409 | .archdata = { | ||
410 | .hwblk_id = HWBLK_SDHI, | ||
404 | }, | 411 | }, |
405 | }; | 412 | }; |
406 | 413 | ||
@@ -467,23 +474,24 @@ static struct platform_device *migor_devices[] __initdata = { | |||
467 | &migor_ceu_device, | 474 | &migor_ceu_device, |
468 | &migor_nor_flash_device, | 475 | &migor_nor_flash_device, |
469 | &migor_nand_flash_device, | 476 | &migor_nand_flash_device, |
470 | &sdcard_cn9_device, | 477 | &sdhi_cn9_device, |
471 | &migor_camera[0], | 478 | &migor_camera[0], |
472 | &migor_camera[1], | 479 | &migor_camera[1], |
473 | }; | 480 | }; |
474 | 481 | ||
475 | static struct spi_board_info migor_spi_devices[] = { | 482 | extern char migor_sdram_enter_start; |
476 | { | 483 | extern char migor_sdram_enter_end; |
477 | .modalias = "mmc_spi", | 484 | extern char migor_sdram_leave_start; |
478 | .max_speed_hz = 5000000, | 485 | extern char migor_sdram_leave_end; |
479 | .chip_select = 0, | ||
480 | .controller_data = (void *) GPIO_PTD5, | ||
481 | }, | ||
482 | }; | ||
483 | 486 | ||
484 | static int __init migor_devices_setup(void) | 487 | static int __init migor_devices_setup(void) |
485 | { | 488 | { |
486 | 489 | /* register board specific self-refresh code */ | |
490 | sh_mobile_register_self_refresh(SUSP_SH_STANDBY | SUSP_SH_SF, | ||
491 | &migor_sdram_enter_start, | ||
492 | &migor_sdram_enter_end, | ||
493 | &migor_sdram_leave_start, | ||
494 | &migor_sdram_leave_end); | ||
487 | #ifdef CONFIG_PM | 495 | #ifdef CONFIG_PM |
488 | /* Let D11 LED show STATUS0 */ | 496 | /* Let D11 LED show STATUS0 */ |
489 | gpio_request(GPIO_FN_STATUS0, NULL); | 497 | gpio_request(GPIO_FN_STATUS0, NULL); |
@@ -525,6 +533,16 @@ static int __init migor_devices_setup(void) | |||
525 | gpio_request(GPIO_PTA1, NULL); | 533 | gpio_request(GPIO_PTA1, NULL); |
526 | gpio_direction_input(GPIO_PTA1); | 534 | gpio_direction_input(GPIO_PTA1); |
527 | 535 | ||
536 | /* SDHI */ | ||
537 | gpio_request(GPIO_FN_SDHICD, NULL); | ||
538 | gpio_request(GPIO_FN_SDHIWP, NULL); | ||
539 | gpio_request(GPIO_FN_SDHID3, NULL); | ||
540 | gpio_request(GPIO_FN_SDHID2, NULL); | ||
541 | gpio_request(GPIO_FN_SDHID1, NULL); | ||
542 | gpio_request(GPIO_FN_SDHID0, NULL); | ||
543 | gpio_request(GPIO_FN_SDHICMD, NULL); | ||
544 | gpio_request(GPIO_FN_SDHICLK, NULL); | ||
545 | |||
528 | /* Touch Panel */ | 546 | /* Touch Panel */ |
529 | gpio_request(GPIO_FN_IRQ6, NULL); | 547 | gpio_request(GPIO_FN_IRQ6, NULL); |
530 | 548 | ||
@@ -612,9 +630,6 @@ static int __init migor_devices_setup(void) | |||
612 | i2c_register_board_info(0, migor_i2c_devices, | 630 | i2c_register_board_info(0, migor_i2c_devices, |
613 | ARRAY_SIZE(migor_i2c_devices)); | 631 | ARRAY_SIZE(migor_i2c_devices)); |
614 | 632 | ||
615 | spi_register_board_info(migor_spi_devices, | ||
616 | ARRAY_SIZE(migor_spi_devices)); | ||
617 | |||
618 | return platform_add_devices(migor_devices, ARRAY_SIZE(migor_devices)); | 633 | return platform_add_devices(migor_devices, ARRAY_SIZE(migor_devices)); |
619 | } | 634 | } |
620 | arch_initcall(migor_devices_setup); | 635 | arch_initcall(migor_devices_setup); |
diff --git a/arch/sh/boards/mach-r2d/irq.c b/arch/sh/boards/mach-r2d/irq.c index c70fecedcac..78d7b27c80d 100644 --- a/arch/sh/boards/mach-r2d/irq.c +++ b/arch/sh/boards/mach-r2d/irq.c | |||
@@ -116,7 +116,7 @@ static unsigned char irl2irq[R2D_NR_IRL]; | |||
116 | 116 | ||
117 | int rts7751r2d_irq_demux(int irq) | 117 | int rts7751r2d_irq_demux(int irq) |
118 | { | 118 | { |
119 | if (irq >= R2D_NR_IRL || !irl2irq[irq]) | 119 | if (irq >= R2D_NR_IRL || irq < 0 || !irl2irq[irq]) |
120 | return irq; | 120 | return irq; |
121 | 121 | ||
122 | return irl2irq[irq]; | 122 | return irl2irq[irq]; |
diff --git a/arch/sh/boards/mach-se/7722/irq.c b/arch/sh/boards/mach-se/7722/irq.c index 02d21a3e2a8..4eb31acfafe 100644 --- a/arch/sh/boards/mach-se/7722/irq.c +++ b/arch/sh/boards/mach-se/7722/irq.c | |||
@@ -16,15 +16,17 @@ | |||
16 | #include <asm/io.h> | 16 | #include <asm/io.h> |
17 | #include <mach-se/mach/se7722.h> | 17 | #include <mach-se/mach/se7722.h> |
18 | 18 | ||
19 | unsigned int se7722_fpga_irq[SE7722_FPGA_IRQ_NR] = { 0, }; | ||
20 | |||
19 | static void disable_se7722_irq(unsigned int irq) | 21 | static void disable_se7722_irq(unsigned int irq) |
20 | { | 22 | { |
21 | unsigned int bit = irq - SE7722_FPGA_IRQ_BASE; | 23 | unsigned int bit = (unsigned int)get_irq_chip_data(irq); |
22 | ctrl_outw(ctrl_inw(IRQ01_MASK) | 1 << bit, IRQ01_MASK); | 24 | ctrl_outw(ctrl_inw(IRQ01_MASK) | 1 << bit, IRQ01_MASK); |
23 | } | 25 | } |
24 | 26 | ||
25 | static void enable_se7722_irq(unsigned int irq) | 27 | static void enable_se7722_irq(unsigned int irq) |
26 | { | 28 | { |
27 | unsigned int bit = irq - SE7722_FPGA_IRQ_BASE; | 29 | unsigned int bit = (unsigned int)get_irq_chip_data(irq); |
28 | ctrl_outw(ctrl_inw(IRQ01_MASK) & ~(1 << bit), IRQ01_MASK); | 30 | ctrl_outw(ctrl_inw(IRQ01_MASK) & ~(1 << bit), IRQ01_MASK); |
29 | } | 31 | } |
30 | 32 | ||
@@ -38,18 +40,15 @@ static struct irq_chip se7722_irq_chip __read_mostly = { | |||
38 | static void se7722_irq_demux(unsigned int irq, struct irq_desc *desc) | 40 | static void se7722_irq_demux(unsigned int irq, struct irq_desc *desc) |
39 | { | 41 | { |
40 | unsigned short intv = ctrl_inw(IRQ01_STS); | 42 | unsigned short intv = ctrl_inw(IRQ01_STS); |
41 | struct irq_desc *ext_desc; | 43 | unsigned int ext_irq = 0; |
42 | unsigned int ext_irq = SE7722_FPGA_IRQ_BASE; | ||
43 | 44 | ||
44 | intv &= (1 << SE7722_FPGA_IRQ_NR) - 1; | 45 | intv &= (1 << SE7722_FPGA_IRQ_NR) - 1; |
45 | 46 | ||
46 | while (intv) { | 47 | for (; intv; intv >>= 1, ext_irq++) { |
47 | if (intv & 1) { | 48 | if (!(intv & 1)) |
48 | ext_desc = irq_desc + ext_irq; | 49 | continue; |
49 | handle_level_irq(ext_irq, ext_desc); | 50 | |
50 | } | 51 | generic_handle_irq(se7722_fpga_irq[ext_irq]); |
51 | intv >>= 1; | ||
52 | ext_irq++; | ||
53 | } | 52 | } |
54 | } | 53 | } |
55 | 54 | ||
@@ -63,11 +62,18 @@ void __init init_se7722_IRQ(void) | |||
63 | ctrl_outw(0, IRQ01_MASK); /* disable all irqs */ | 62 | ctrl_outw(0, IRQ01_MASK); /* disable all irqs */ |
64 | ctrl_outw(0x2000, 0xb03fffec); /* mrshpc irq enable */ | 63 | ctrl_outw(0x2000, 0xb03fffec); /* mrshpc irq enable */ |
65 | 64 | ||
66 | for (i = 0; i < SE7722_FPGA_IRQ_NR; i++) | 65 | for (i = 0; i < SE7722_FPGA_IRQ_NR; i++) { |
67 | set_irq_chip_and_handler_name(SE7722_FPGA_IRQ_BASE + i, | 66 | se7722_fpga_irq[i] = create_irq(); |
67 | if (se7722_fpga_irq[i] < 0) | ||
68 | return; | ||
69 | |||
70 | set_irq_chip_and_handler_name(se7722_fpga_irq[i], | ||
68 | &se7722_irq_chip, | 71 | &se7722_irq_chip, |
69 | handle_level_irq, "level"); | 72 | handle_level_irq, "level"); |
70 | 73 | ||
74 | set_irq_chip_data(se7722_fpga_irq[i], (void *)i); | ||
75 | } | ||
76 | |||
71 | set_irq_chained_handler(IRQ0_IRQ, se7722_irq_demux); | 77 | set_irq_chained_handler(IRQ0_IRQ, se7722_irq_demux); |
72 | set_irq_type(IRQ0_IRQ, IRQ_TYPE_LEVEL_LOW); | 78 | set_irq_type(IRQ0_IRQ, IRQ_TYPE_LEVEL_LOW); |
73 | 79 | ||
diff --git a/arch/sh/boards/mach-se/7722/setup.c b/arch/sh/boards/mach-se/7722/setup.c index 36374078e52..b1cb9425b60 100644 --- a/arch/sh/boards/mach-se/7722/setup.c +++ b/arch/sh/boards/mach-se/7722/setup.c | |||
@@ -14,6 +14,7 @@ | |||
14 | #include <linux/platform_device.h> | 14 | #include <linux/platform_device.h> |
15 | #include <linux/ata_platform.h> | 15 | #include <linux/ata_platform.h> |
16 | #include <linux/input.h> | 16 | #include <linux/input.h> |
17 | #include <linux/input/sh_keysc.h> | ||
17 | #include <linux/smc91x.h> | 18 | #include <linux/smc91x.h> |
18 | #include <mach-se/mach/se7722.h> | 19 | #include <mach-se/mach/se7722.h> |
19 | #include <mach-se/mach/mrshpc.h> | 20 | #include <mach-se/mach/mrshpc.h> |
@@ -21,7 +22,6 @@ | |||
21 | #include <asm/clock.h> | 22 | #include <asm/clock.h> |
22 | #include <asm/io.h> | 23 | #include <asm/io.h> |
23 | #include <asm/heartbeat.h> | 24 | #include <asm/heartbeat.h> |
24 | #include <asm/sh_keysc.h> | ||
25 | #include <cpu/sh7722.h> | 25 | #include <cpu/sh7722.h> |
26 | 26 | ||
27 | /* Heartbeat */ | 27 | /* Heartbeat */ |
@@ -60,8 +60,7 @@ static struct resource smc91x_eth_resources[] = { | |||
60 | .flags = IORESOURCE_MEM, | 60 | .flags = IORESOURCE_MEM, |
61 | }, | 61 | }, |
62 | [1] = { | 62 | [1] = { |
63 | .start = SMC_IRQ, | 63 | /* Filled in later */ |
64 | .end = SMC_IRQ, | ||
65 | .flags = IORESOURCE_IRQ, | 64 | .flags = IORESOURCE_IRQ, |
66 | }, | 65 | }, |
67 | }; | 66 | }; |
@@ -90,8 +89,7 @@ static struct resource cf_ide_resources[] = { | |||
90 | .flags = IORESOURCE_IO, | 89 | .flags = IORESOURCE_IO, |
91 | }, | 90 | }, |
92 | [2] = { | 91 | [2] = { |
93 | .start = MRSHPC_IRQ0, | 92 | /* Filled in later */ |
94 | .end = MRSHPC_IRQ0, | ||
95 | .flags = IORESOURCE_IRQ, | 93 | .flags = IORESOURCE_IRQ, |
96 | }, | 94 | }, |
97 | }; | 95 | }; |
@@ -153,6 +151,14 @@ static struct platform_device *se7722_devices[] __initdata = { | |||
153 | static int __init se7722_devices_setup(void) | 151 | static int __init se7722_devices_setup(void) |
154 | { | 152 | { |
155 | mrshpc_setup_windows(); | 153 | mrshpc_setup_windows(); |
154 | |||
155 | /* Wire-up dynamic vectors */ | ||
156 | cf_ide_resources[2].start = cf_ide_resources[2].end = | ||
157 | se7722_fpga_irq[SE7722_FPGA_IRQ_MRSHPC0]; | ||
158 | |||
159 | smc91x_eth_resources[1].start = smc91x_eth_resources[1].end = | ||
160 | se7722_fpga_irq[SE7722_FPGA_IRQ_SMC]; | ||
161 | |||
156 | return platform_add_devices(se7722_devices, ARRAY_SIZE(se7722_devices)); | 162 | return platform_add_devices(se7722_devices, ARRAY_SIZE(se7722_devices)); |
157 | } | 163 | } |
158 | device_initcall(se7722_devices_setup); | 164 | device_initcall(se7722_devices_setup); |
@@ -193,6 +199,5 @@ static void __init se7722_setup(char **cmdline_p) | |||
193 | static struct sh_machine_vector mv_se7722 __initmv = { | 199 | static struct sh_machine_vector mv_se7722 __initmv = { |
194 | .mv_name = "Solution Engine 7722" , | 200 | .mv_name = "Solution Engine 7722" , |
195 | .mv_setup = se7722_setup , | 201 | .mv_setup = se7722_setup , |
196 | .mv_nr_irqs = SE7722_FPGA_IRQ_BASE + SE7722_FPGA_IRQ_NR, | ||
197 | .mv_init_irq = init_se7722_IRQ, | 202 | .mv_init_irq = init_se7722_IRQ, |
198 | }; | 203 | }; |
diff --git a/arch/sh/boards/mach-se/7724/Makefile b/arch/sh/boards/mach-se/7724/Makefile index 349cbd6ce82..a08b36830f0 100644 --- a/arch/sh/boards/mach-se/7724/Makefile +++ b/arch/sh/boards/mach-se/7724/Makefile | |||
@@ -7,4 +7,4 @@ | |||
7 | # | 7 | # |
8 | # | 8 | # |
9 | 9 | ||
10 | obj-y := setup.o irq.o \ No newline at end of file | 10 | obj-y := setup.o irq.o sdram.o |
diff --git a/arch/sh/boards/mach-se/7724/sdram.S b/arch/sh/boards/mach-se/7724/sdram.S new file mode 100644 index 00000000000..9040167d502 --- /dev/null +++ b/arch/sh/boards/mach-se/7724/sdram.S | |||
@@ -0,0 +1,52 @@ | |||
1 | /* | ||
2 | * MS7724SE sdram self/auto-refresh setup code | ||
3 | * | ||
4 | * Copyright (C) 2009 Magnus Damm | ||
5 | * | ||
6 | * This file is subject to the terms and conditions of the GNU General Public | ||
7 | * License. See the file "COPYING" in the main directory of this archive | ||
8 | * for more details. | ||
9 | */ | ||
10 | |||
11 | #include <linux/sys.h> | ||
12 | #include <linux/errno.h> | ||
13 | #include <linux/linkage.h> | ||
14 | #include <asm/asm-offsets.h> | ||
15 | #include <asm/suspend.h> | ||
16 | #include <asm/romimage-macros.h> | ||
17 | |||
18 | /* code to enter and leave self-refresh. must be self-contained. | ||
19 | * this code will be copied to on-chip memory and executed from there. | ||
20 | */ | ||
21 | .balign 4 | ||
22 | ENTRY(ms7724se_sdram_enter_start) | ||
23 | |||
24 | /* DBSC: put memory in self-refresh mode */ | ||
25 | |||
26 | ED 0xFD000010, 0x00000000 /* DBEN */ | ||
27 | ED 0xFD000040, 0x00000000 /* DBRFPDN0 */ | ||
28 | ED 0xFD000014, 0x00000002 /* DBCMDCNT (PALL) */ | ||
29 | ED 0xFD000014, 0x00000004 /* DBCMDCNT (REF) */ | ||
30 | ED 0xFD000040, 0x00000001 /* DBRFPDN0 */ | ||
31 | |||
32 | rts | ||
33 | nop | ||
34 | |||
35 | ENTRY(ms7724se_sdram_enter_end) | ||
36 | |||
37 | .balign 4 | ||
38 | ENTRY(ms7724se_sdram_leave_start) | ||
39 | |||
40 | /* DBSC: put memory in auto-refresh mode */ | ||
41 | |||
42 | ED 0xFD000040, 0x00000000 /* DBRFPDN0 */ | ||
43 | WAIT 1 | ||
44 | ED 0xFD000014, 0x00000002 /* DBCMDCNT (PALL) */ | ||
45 | ED 0xFD000014, 0x00000004 /* DBCMDCNT (REF) */ | ||
46 | ED 0xFD000010, 0x00000001 /* DBEN */ | ||
47 | ED 0xFD000040, 0x00010000 /* DBRFPDN0 */ | ||
48 | |||
49 | rts | ||
50 | nop | ||
51 | |||
52 | ENTRY(ms7724se_sdram_leave_end) | ||
diff --git a/arch/sh/boards/mach-se/7724/setup.c b/arch/sh/boards/mach-se/7724/setup.c index 0894bba9fad..4b0f0c0dc2b 100644 --- a/arch/sh/boards/mach-se/7724/setup.c +++ b/arch/sh/boards/mach-se/7724/setup.c | |||
@@ -19,6 +19,7 @@ | |||
19 | #include <linux/smc91x.h> | 19 | #include <linux/smc91x.h> |
20 | #include <linux/gpio.h> | 20 | #include <linux/gpio.h> |
21 | #include <linux/input.h> | 21 | #include <linux/input.h> |
22 | #include <linux/input/sh_keysc.h> | ||
22 | #include <linux/usb/r8a66597.h> | 23 | #include <linux/usb/r8a66597.h> |
23 | #include <video/sh_mobile_lcdc.h> | 24 | #include <video/sh_mobile_lcdc.h> |
24 | #include <media/sh_mobile_ceu.h> | 25 | #include <media/sh_mobile_ceu.h> |
@@ -27,7 +28,7 @@ | |||
27 | #include <asm/heartbeat.h> | 28 | #include <asm/heartbeat.h> |
28 | #include <asm/sh_eth.h> | 29 | #include <asm/sh_eth.h> |
29 | #include <asm/clock.h> | 30 | #include <asm/clock.h> |
30 | #include <asm/sh_keysc.h> | 31 | #include <asm/suspend.h> |
31 | #include <cpu/sh7724.h> | 32 | #include <cpu/sh7724.h> |
32 | #include <mach-se/mach/se7724.h> | 33 | #include <mach-se/mach/se7724.h> |
33 | 34 | ||
@@ -451,6 +452,52 @@ static struct platform_device sh7724_usb1_gadget_device = { | |||
451 | .resource = sh7724_usb1_gadget_resources, | 452 | .resource = sh7724_usb1_gadget_resources, |
452 | }; | 453 | }; |
453 | 454 | ||
455 | static struct resource sdhi0_cn7_resources[] = { | ||
456 | [0] = { | ||
457 | .name = "SDHI0", | ||
458 | .start = 0x04ce0000, | ||
459 | .end = 0x04ce01ff, | ||
460 | .flags = IORESOURCE_MEM, | ||
461 | }, | ||
462 | [1] = { | ||
463 | .start = 101, | ||
464 | .flags = IORESOURCE_IRQ, | ||
465 | }, | ||
466 | }; | ||
467 | |||
468 | static struct platform_device sdhi0_cn7_device = { | ||
469 | .name = "sh_mobile_sdhi", | ||
470 | .id = 0, | ||
471 | .num_resources = ARRAY_SIZE(sdhi0_cn7_resources), | ||
472 | .resource = sdhi0_cn7_resources, | ||
473 | .archdata = { | ||
474 | .hwblk_id = HWBLK_SDHI0, | ||
475 | }, | ||
476 | }; | ||
477 | |||
478 | static struct resource sdhi1_cn8_resources[] = { | ||
479 | [0] = { | ||
480 | .name = "SDHI1", | ||
481 | .start = 0x04cf0000, | ||
482 | .end = 0x04cf01ff, | ||
483 | .flags = IORESOURCE_MEM, | ||
484 | }, | ||
485 | [1] = { | ||
486 | .start = 24, | ||
487 | .flags = IORESOURCE_IRQ, | ||
488 | }, | ||
489 | }; | ||
490 | |||
491 | static struct platform_device sdhi1_cn8_device = { | ||
492 | .name = "sh_mobile_sdhi", | ||
493 | .id = 1, | ||
494 | .num_resources = ARRAY_SIZE(sdhi1_cn8_resources), | ||
495 | .resource = sdhi1_cn8_resources, | ||
496 | .archdata = { | ||
497 | .hwblk_id = HWBLK_SDHI1, | ||
498 | }, | ||
499 | }; | ||
500 | |||
454 | static struct platform_device *ms7724se_devices[] __initdata = { | 501 | static struct platform_device *ms7724se_devices[] __initdata = { |
455 | &heartbeat_device, | 502 | &heartbeat_device, |
456 | &smc91x_eth_device, | 503 | &smc91x_eth_device, |
@@ -463,6 +510,8 @@ static struct platform_device *ms7724se_devices[] __initdata = { | |||
463 | &sh7724_usb0_host_device, | 510 | &sh7724_usb0_host_device, |
464 | &sh7724_usb1_gadget_device, | 511 | &sh7724_usb1_gadget_device, |
465 | &fsi_device, | 512 | &fsi_device, |
513 | &sdhi0_cn7_device, | ||
514 | &sdhi1_cn8_device, | ||
466 | }; | 515 | }; |
467 | 516 | ||
468 | #define EEPROM_OP 0xBA206000 | 517 | #define EEPROM_OP 0xBA206000 |
@@ -487,7 +536,7 @@ static int __init sh_eth_is_eeprom_ready(void) | |||
487 | static void __init sh_eth_init(void) | 536 | static void __init sh_eth_init(void) |
488 | { | 537 | { |
489 | int i; | 538 | int i; |
490 | u16 mac[3]; | 539 | u16 mac; |
491 | 540 | ||
492 | /* check EEPROM status */ | 541 | /* check EEPROM status */ |
493 | if (!sh_eth_is_eeprom_ready()) | 542 | if (!sh_eth_is_eeprom_ready()) |
@@ -501,16 +550,10 @@ static void __init sh_eth_init(void) | |||
501 | if (!sh_eth_is_eeprom_ready()) | 550 | if (!sh_eth_is_eeprom_ready()) |
502 | return; | 551 | return; |
503 | 552 | ||
504 | mac[i] = ctrl_inw(EEPROM_DATA); | 553 | mac = ctrl_inw(EEPROM_DATA); |
505 | mac[i] = ((mac[i] & 0xFF) << 8) | (mac[i] >> 8); /* swap */ | 554 | sh_eth_plat.mac_addr[i << 1] = mac & 0xff; |
555 | sh_eth_plat.mac_addr[(i << 1) + 1] = mac >> 8; | ||
506 | } | 556 | } |
507 | |||
508 | /* reset sh-eth */ | ||
509 | ctrl_outl(0x1, SH_ETH_ADDR + 0x0); | ||
510 | |||
511 | /* set MAC addr */ | ||
512 | ctrl_outl(((mac[0] << 16) | (mac[1])), SH_ETH_MAHR); | ||
513 | ctrl_outl((mac[2]), SH_ETH_MALR); | ||
514 | } | 557 | } |
515 | 558 | ||
516 | #define SW4140 0xBA201000 | 559 | #define SW4140 0xBA201000 |
@@ -527,11 +570,22 @@ static void __init sh_eth_init(void) | |||
527 | #define SW41_G 0x4000 | 570 | #define SW41_G 0x4000 |
528 | #define SW41_H 0x8000 | 571 | #define SW41_H 0x8000 |
529 | 572 | ||
573 | extern char ms7724se_sdram_enter_start; | ||
574 | extern char ms7724se_sdram_enter_end; | ||
575 | extern char ms7724se_sdram_leave_start; | ||
576 | extern char ms7724se_sdram_leave_end; | ||
577 | |||
530 | static int __init devices_setup(void) | 578 | static int __init devices_setup(void) |
531 | { | 579 | { |
532 | u16 sw = ctrl_inw(SW4140); /* select camera, monitor */ | 580 | u16 sw = ctrl_inw(SW4140); /* select camera, monitor */ |
533 | struct clk *fsia_clk; | 581 | struct clk *fsia_clk; |
534 | 582 | ||
583 | /* register board specific self-refresh code */ | ||
584 | sh_mobile_register_self_refresh(SUSP_SH_STANDBY | SUSP_SH_SF, | ||
585 | &ms7724se_sdram_enter_start, | ||
586 | &ms7724se_sdram_enter_end, | ||
587 | &ms7724se_sdram_leave_start, | ||
588 | &ms7724se_sdram_leave_end); | ||
535 | /* Reset Release */ | 589 | /* Reset Release */ |
536 | ctrl_outw(ctrl_inw(FPGA_OUT) & | 590 | ctrl_outw(ctrl_inw(FPGA_OUT) & |
537 | ~((1 << 1) | /* LAN */ | 591 | ~((1 << 1) | /* LAN */ |
@@ -701,6 +755,26 @@ static int __init devices_setup(void) | |||
701 | clk_set_rate(&fsimcka_clk, 11000); | 755 | clk_set_rate(&fsimcka_clk, 11000); |
702 | clk_put(fsia_clk); | 756 | clk_put(fsia_clk); |
703 | 757 | ||
758 | /* SDHI0 connected to cn7 */ | ||
759 | gpio_request(GPIO_FN_SDHI0CD, NULL); | ||
760 | gpio_request(GPIO_FN_SDHI0WP, NULL); | ||
761 | gpio_request(GPIO_FN_SDHI0D3, NULL); | ||
762 | gpio_request(GPIO_FN_SDHI0D2, NULL); | ||
763 | gpio_request(GPIO_FN_SDHI0D1, NULL); | ||
764 | gpio_request(GPIO_FN_SDHI0D0, NULL); | ||
765 | gpio_request(GPIO_FN_SDHI0CMD, NULL); | ||
766 | gpio_request(GPIO_FN_SDHI0CLK, NULL); | ||
767 | |||
768 | /* SDHI1 connected to cn8 */ | ||
769 | gpio_request(GPIO_FN_SDHI1CD, NULL); | ||
770 | gpio_request(GPIO_FN_SDHI1WP, NULL); | ||
771 | gpio_request(GPIO_FN_SDHI1D3, NULL); | ||
772 | gpio_request(GPIO_FN_SDHI1D2, NULL); | ||
773 | gpio_request(GPIO_FN_SDHI1D1, NULL); | ||
774 | gpio_request(GPIO_FN_SDHI1D0, NULL); | ||
775 | gpio_request(GPIO_FN_SDHI1CMD, NULL); | ||
776 | gpio_request(GPIO_FN_SDHI1CLK, NULL); | ||
777 | |||
704 | /* | 778 | /* |
705 | * enable SH-Eth | 779 | * enable SH-Eth |
706 | * | 780 | * |
diff --git a/arch/sh/boot/compressed/misc.c b/arch/sh/boot/compressed/misc.c index fd56a71ca9d..b51b1fc4baa 100644 --- a/arch/sh/boot/compressed/misc.c +++ b/arch/sh/boot/compressed/misc.c | |||
@@ -131,7 +131,7 @@ void decompress_kernel(void) | |||
131 | #ifdef CONFIG_SUPERH64 | 131 | #ifdef CONFIG_SUPERH64 |
132 | output_addr = (CONFIG_MEMORY_START + 0x2000); | 132 | output_addr = (CONFIG_MEMORY_START + 0x2000); |
133 | #else | 133 | #else |
134 | output_addr = PHYSADDR((unsigned long)&_text+PAGE_SIZE); | 134 | output_addr = __pa((unsigned long)&_text+PAGE_SIZE); |
135 | #ifdef CONFIG_29BIT | 135 | #ifdef CONFIG_29BIT |
136 | output_addr |= P2SEG; | 136 | output_addr |= P2SEG; |
137 | #endif | 137 | #endif |
diff --git a/arch/sh/boot/romimage/Makefile b/arch/sh/boot/romimage/Makefile index 5806eee84f6..f473a24a2d9 100644 --- a/arch/sh/boot/romimage/Makefile +++ b/arch/sh/boot/romimage/Makefile | |||
@@ -4,16 +4,22 @@ | |||
4 | # create an image suitable for burning to flash from zImage | 4 | # create an image suitable for burning to flash from zImage |
5 | # | 5 | # |
6 | 6 | ||
7 | targets := vmlinux head.o | 7 | targets := vmlinux head.o zeropage.bin piggy.o |
8 | 8 | ||
9 | OBJECTS = $(obj)/head.o | 9 | OBJECTS = $(obj)/head.o |
10 | LDFLAGS_vmlinux := --oformat $(ld-bfd) -Ttext 0 -e romstart | 10 | LDFLAGS_vmlinux := --oformat $(ld-bfd) -Ttext 0 -e romstart \ |
11 | -T $(obj)/../../kernel/vmlinux.lds | ||
11 | 12 | ||
12 | $(obj)/vmlinux: $(OBJECTS) $(obj)/piggy.o FORCE | 13 | $(obj)/vmlinux: $(OBJECTS) $(obj)/piggy.o FORCE |
13 | $(call if_changed,ld) | 14 | $(call if_changed,ld) |
14 | @: | 15 | @: |
15 | 16 | ||
17 | OBJCOPYFLAGS += -j .empty_zero_page | ||
18 | |||
19 | $(obj)/zeropage.bin: vmlinux FORCE | ||
20 | $(call if_changed,objcopy) | ||
21 | |||
16 | LDFLAGS_piggy.o := -r --format binary --oformat $(ld-bfd) -T | 22 | LDFLAGS_piggy.o := -r --format binary --oformat $(ld-bfd) -T |
17 | 23 | ||
18 | $(obj)/piggy.o: $(obj)/vmlinux.scr arch/sh/boot/zImage FORCE | 24 | $(obj)/piggy.o: $(obj)/vmlinux.scr $(obj)/zeropage.bin arch/sh/boot/zImage FORCE |
19 | $(call if_changed,ld) | 25 | $(call if_changed,ld) |
diff --git a/arch/sh/boot/romimage/head.S b/arch/sh/boot/romimage/head.S index 219bc626dd7..93e779a405e 100644 --- a/arch/sh/boot/romimage/head.S +++ b/arch/sh/boot/romimage/head.S | |||
@@ -5,6 +5,44 @@ | |||
5 | */ | 5 | */ |
6 | 6 | ||
7 | .text | 7 | .text |
8 | #include <asm/page.h> | ||
9 | |||
8 | .global romstart | 10 | .global romstart |
9 | romstart: | 11 | romstart: |
12 | /* include board specific setup code */ | ||
10 | #include <mach/romimage.h> | 13 | #include <mach/romimage.h> |
14 | |||
15 | /* copy the empty_zero_page contents to where vmlinux expects it */ | ||
16 | mova empty_zero_page_src, r0 | ||
17 | mov.l empty_zero_page_dst, r1 | ||
18 | mov #(PAGE_SHIFT - 4), r4 | ||
19 | mov #1, r3 | ||
20 | shld r4, r3 /* r3 = PAGE_SIZE / 16 */ | ||
21 | |||
22 | 1: | ||
23 | mov.l @r0, r4 | ||
24 | mov.l @(4, r0), r5 | ||
25 | mov.l @(8, r0), r6 | ||
26 | mov.l @(12, r0), r7 | ||
27 | add #16,r0 | ||
28 | mov.l r4, @r1 | ||
29 | mov.l r5, @(4, r1) | ||
30 | mov.l r6, @(8, r1) | ||
31 | mov.l r7, @(12, r1) | ||
32 | dt r3 | ||
33 | add #16,r1 | ||
34 | bf 1b | ||
35 | |||
36 | /* jump to the zImage entry point located after the zero page data */ | ||
37 | mov #PAGE_SHIFT, r4 | ||
38 | mov #1, r1 | ||
39 | shld r4, r1 | ||
40 | mova empty_zero_page_src, r0 | ||
41 | add r1, r0 | ||
42 | jmp @r0 | ||
43 | nop | ||
44 | |||
45 | .align 2 | ||
46 | empty_zero_page_dst: | ||
47 | .long _text | ||
48 | empty_zero_page_src: | ||
diff --git a/arch/sh/drivers/dma/dma-sysfs.c b/arch/sh/drivers/dma/dma-sysfs.c index 347ee11351e..1ee631d3725 100644 --- a/arch/sh/drivers/dma/dma-sysfs.c +++ b/arch/sh/drivers/dma/dma-sysfs.c | |||
@@ -13,7 +13,6 @@ | |||
13 | #include <linux/init.h> | 13 | #include <linux/init.h> |
14 | #include <linux/sysdev.h> | 14 | #include <linux/sysdev.h> |
15 | #include <linux/platform_device.h> | 15 | #include <linux/platform_device.h> |
16 | #include <linux/module.h> | ||
17 | #include <linux/err.h> | 16 | #include <linux/err.h> |
18 | #include <linux/string.h> | 17 | #include <linux/string.h> |
19 | #include <asm/dma.h> | 18 | #include <asm/dma.h> |
@@ -21,7 +20,6 @@ | |||
21 | static struct sysdev_class dma_sysclass = { | 20 | static struct sysdev_class dma_sysclass = { |
22 | .name = "dma", | 21 | .name = "dma", |
23 | }; | 22 | }; |
24 | EXPORT_SYMBOL(dma_sysclass); | ||
25 | 23 | ||
26 | static ssize_t dma_show_devices(struct sys_device *dev, | 24 | static ssize_t dma_show_devices(struct sys_device *dev, |
27 | struct sysdev_attribute *attr, char *buf) | 25 | struct sysdev_attribute *attr, char *buf) |
diff --git a/arch/sh/drivers/pci/Kconfig b/arch/sh/drivers/pci/Kconfig deleted file mode 100644 index e8db585a663..00000000000 --- a/arch/sh/drivers/pci/Kconfig +++ /dev/null | |||
@@ -1,19 +0,0 @@ | |||
1 | config PCI | ||
2 | bool "PCI support" | ||
3 | depends on SYS_SUPPORTS_PCI | ||
4 | help | ||
5 | Find out whether you have a PCI motherboard. PCI is the name of a | ||
6 | bus system, i.e. the way the CPU talks to the other stuff inside | ||
7 | your box. If you have PCI, say Y, otherwise N. | ||
8 | |||
9 | config SH_PCIDMA_NONCOHERENT | ||
10 | bool "Cache and PCI noncoherent" | ||
11 | depends on PCI | ||
12 | default y | ||
13 | help | ||
14 | Enable this option if your platform does not have a CPU cache which | ||
15 | remains coherent with PCI DMA. It is safest to say 'Y', although you | ||
16 | will see better performance if you can say 'N', because the PCI DMA | ||
17 | code will not have to flush the CPU's caches. If you have a PCI host | ||
18 | bridge integrated with your SH CPU, refer carefully to the chip specs | ||
19 | to see if you can say 'N' here. Otherwise, leave it as 'Y'. | ||
diff --git a/arch/sh/include/asm/addrspace.h b/arch/sh/include/asm/addrspace.h index 80d40813e05..99d6b3ecbe2 100644 --- a/arch/sh/include/asm/addrspace.h +++ b/arch/sh/include/asm/addrspace.h | |||
@@ -28,9 +28,6 @@ | |||
28 | /* Returns the privileged segment base of a given address */ | 28 | /* Returns the privileged segment base of a given address */ |
29 | #define PXSEG(a) (((unsigned long)(a)) & 0xe0000000) | 29 | #define PXSEG(a) (((unsigned long)(a)) & 0xe0000000) |
30 | 30 | ||
31 | /* Returns the physical address of a PnSEG (n=1,2) address */ | ||
32 | #define PHYSADDR(a) (((unsigned long)(a)) & 0x1fffffff) | ||
33 | |||
34 | #if defined(CONFIG_29BIT) || defined(CONFIG_PMB_FIXED) | 31 | #if defined(CONFIG_29BIT) || defined(CONFIG_PMB_FIXED) |
35 | /* | 32 | /* |
36 | * Map an address to a certain privileged segment | 33 | * Map an address to a certain privileged segment |
@@ -60,5 +57,11 @@ | |||
60 | #define P3_ADDR_MAX P4SEG | 57 | #define P3_ADDR_MAX P4SEG |
61 | #endif | 58 | #endif |
62 | 59 | ||
60 | #ifndef __ASSEMBLY__ | ||
61 | #ifdef CONFIG_PMB | ||
62 | extern int __in_29bit_mode(void); | ||
63 | #endif /* CONFIG_PMB */ | ||
64 | #endif /* __ASSEMBLY__ */ | ||
65 | |||
63 | #endif /* __KERNEL__ */ | 66 | #endif /* __KERNEL__ */ |
64 | #endif /* __ASM_SH_ADDRSPACE_H */ | 67 | #endif /* __ASM_SH_ADDRSPACE_H */ |
diff --git a/arch/sh/include/asm/atomic.h b/arch/sh/include/asm/atomic.h index e8e78137c6f..b16388d7195 100644 --- a/arch/sh/include/asm/atomic.h +++ b/arch/sh/include/asm/atomic.h | |||
@@ -78,11 +78,10 @@ static inline int atomic_add_unless(atomic_t *v, int a, int u) | |||
78 | #define atomic_xchg(v, new) (xchg(&((v)->counter), new)) | 78 | #define atomic_xchg(v, new) (xchg(&((v)->counter), new)) |
79 | #define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0) | 79 | #define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0) |
80 | 80 | ||
81 | /* Atomic operations are already serializing on SH */ | 81 | #define smp_mb__before_atomic_dec() smp_mb() |
82 | #define smp_mb__before_atomic_dec() barrier() | 82 | #define smp_mb__after_atomic_dec() smp_mb() |
83 | #define smp_mb__after_atomic_dec() barrier() | 83 | #define smp_mb__before_atomic_inc() smp_mb() |
84 | #define smp_mb__before_atomic_inc() barrier() | 84 | #define smp_mb__after_atomic_inc() smp_mb() |
85 | #define smp_mb__after_atomic_inc() barrier() | ||
86 | 85 | ||
87 | #include <asm-generic/atomic-long.h> | 86 | #include <asm-generic/atomic-long.h> |
88 | #include <asm-generic/atomic64.h> | 87 | #include <asm-generic/atomic64.h> |
diff --git a/arch/sh/include/asm/bitops.h b/arch/sh/include/asm/bitops.h index ebe595b7ab1..98511e4d28c 100644 --- a/arch/sh/include/asm/bitops.h +++ b/arch/sh/include/asm/bitops.h | |||
@@ -26,8 +26,8 @@ | |||
26 | /* | 26 | /* |
27 | * clear_bit() doesn't provide any barrier for the compiler. | 27 | * clear_bit() doesn't provide any barrier for the compiler. |
28 | */ | 28 | */ |
29 | #define smp_mb__before_clear_bit() barrier() | 29 | #define smp_mb__before_clear_bit() smp_mb() |
30 | #define smp_mb__after_clear_bit() barrier() | 30 | #define smp_mb__after_clear_bit() smp_mb() |
31 | 31 | ||
32 | #ifdef CONFIG_SUPERH32 | 32 | #ifdef CONFIG_SUPERH32 |
33 | static inline unsigned long ffz(unsigned long word) | 33 | static inline unsigned long ffz(unsigned long word) |
diff --git a/arch/sh/include/asm/bugs.h b/arch/sh/include/asm/bugs.h index 46260fcbdf4..02a19a1c033 100644 --- a/arch/sh/include/asm/bugs.h +++ b/arch/sh/include/asm/bugs.h | |||
@@ -14,11 +14,15 @@ | |||
14 | 14 | ||
15 | #include <asm/processor.h> | 15 | #include <asm/processor.h> |
16 | 16 | ||
17 | extern void select_idle_routine(void); | ||
18 | |||
17 | static void __init check_bugs(void) | 19 | static void __init check_bugs(void) |
18 | { | 20 | { |
19 | extern unsigned long loops_per_jiffy; | 21 | extern unsigned long loops_per_jiffy; |
20 | char *p = &init_utsname()->machine[2]; /* "sh" */ | 22 | char *p = &init_utsname()->machine[2]; /* "sh" */ |
21 | 23 | ||
24 | select_idle_routine(); | ||
25 | |||
22 | current_cpu_data.loops_per_jiffy = loops_per_jiffy; | 26 | current_cpu_data.loops_per_jiffy = loops_per_jiffy; |
23 | 27 | ||
24 | switch (current_cpu_data.family) { | 28 | switch (current_cpu_data.family) { |
diff --git a/arch/sh/include/asm/dma-mapping.h b/arch/sh/include/asm/dma-mapping.h index 69d56dd4c96..87ced133a36 100644 --- a/arch/sh/include/asm/dma-mapping.h +++ b/arch/sh/include/asm/dma-mapping.h | |||
@@ -1,219 +1,108 @@ | |||
1 | #ifndef __ASM_SH_DMA_MAPPING_H | 1 | #ifndef __ASM_SH_DMA_MAPPING_H |
2 | #define __ASM_SH_DMA_MAPPING_H | 2 | #define __ASM_SH_DMA_MAPPING_H |
3 | 3 | ||
4 | #include <linux/mm.h> | 4 | extern struct dma_map_ops *dma_ops; |
5 | #include <linux/scatterlist.h> | 5 | extern void no_iommu_init(void); |
6 | #include <linux/dma-debug.h> | 6 | |
7 | #include <asm/cacheflush.h> | 7 | static inline struct dma_map_ops *get_dma_ops(struct device *dev) |
8 | #include <asm/io.h> | 8 | { |
9 | return dma_ops; | ||
10 | } | ||
11 | |||
9 | #include <asm-generic/dma-coherent.h> | 12 | #include <asm-generic/dma-coherent.h> |
13 | #include <asm-generic/dma-mapping-common.h> | ||
14 | |||
15 | static inline int dma_supported(struct device *dev, u64 mask) | ||
16 | { | ||
17 | struct dma_map_ops *ops = get_dma_ops(dev); | ||
10 | 18 | ||
11 | extern struct bus_type pci_bus_type; | 19 | if (ops->dma_supported) |
20 | return ops->dma_supported(dev, mask); | ||
12 | 21 | ||
13 | #define dma_supported(dev, mask) (1) | 22 | return 1; |
23 | } | ||
14 | 24 | ||
15 | static inline int dma_set_mask(struct device *dev, u64 mask) | 25 | static inline int dma_set_mask(struct device *dev, u64 mask) |
16 | { | 26 | { |
27 | struct dma_map_ops *ops = get_dma_ops(dev); | ||
28 | |||
17 | if (!dev->dma_mask || !dma_supported(dev, mask)) | 29 | if (!dev->dma_mask || !dma_supported(dev, mask)) |
18 | return -EIO; | 30 | return -EIO; |
31 | if (ops->set_dma_mask) | ||
32 | return ops->set_dma_mask(dev, mask); | ||
19 | 33 | ||
20 | *dev->dma_mask = mask; | 34 | *dev->dma_mask = mask; |
21 | 35 | ||
22 | return 0; | 36 | return 0; |
23 | } | 37 | } |
24 | 38 | ||
25 | void *dma_alloc_coherent(struct device *dev, size_t size, | ||
26 | dma_addr_t *dma_handle, gfp_t flag); | ||
27 | |||
28 | void dma_free_coherent(struct device *dev, size_t size, | ||
29 | void *vaddr, dma_addr_t dma_handle); | ||
30 | |||
31 | void dma_cache_sync(struct device *dev, void *vaddr, size_t size, | 39 | void dma_cache_sync(struct device *dev, void *vaddr, size_t size, |
32 | enum dma_data_direction dir); | 40 | enum dma_data_direction dir); |
33 | 41 | ||
34 | #define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f) | 42 | #define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f) |
35 | #define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h) | 43 | #define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h) |
36 | #define dma_is_consistent(d, h) (1) | ||
37 | |||
38 | static inline dma_addr_t dma_map_single(struct device *dev, | ||
39 | void *ptr, size_t size, | ||
40 | enum dma_data_direction dir) | ||
41 | { | ||
42 | dma_addr_t addr = virt_to_phys(ptr); | ||
43 | |||
44 | #if defined(CONFIG_PCI) && !defined(CONFIG_SH_PCIDMA_NONCOHERENT) | ||
45 | if (dev->bus == &pci_bus_type) | ||
46 | return addr; | ||
47 | #endif | ||
48 | dma_cache_sync(dev, ptr, size, dir); | ||
49 | |||
50 | debug_dma_map_page(dev, virt_to_page(ptr), | ||
51 | (unsigned long)ptr & ~PAGE_MASK, size, | ||
52 | dir, addr, true); | ||
53 | |||
54 | return addr; | ||
55 | } | ||
56 | |||
57 | static inline void dma_unmap_single(struct device *dev, dma_addr_t addr, | ||
58 | size_t size, enum dma_data_direction dir) | ||
59 | { | ||
60 | debug_dma_unmap_page(dev, addr, size, dir, true); | ||
61 | } | ||
62 | 44 | ||
63 | static inline int dma_map_sg(struct device *dev, struct scatterlist *sg, | 45 | #ifdef CONFIG_DMA_COHERENT |
64 | int nents, enum dma_data_direction dir) | 46 | #define dma_is_consistent(d, h) (1) |
65 | { | 47 | #else |
66 | int i; | 48 | #define dma_is_consistent(d, h) (0) |
67 | |||
68 | for (i = 0; i < nents; i++) { | ||
69 | #if !defined(CONFIG_PCI) || defined(CONFIG_SH_PCIDMA_NONCOHERENT) | ||
70 | dma_cache_sync(dev, sg_virt(&sg[i]), sg[i].length, dir); | ||
71 | #endif | 49 | #endif |
72 | sg[i].dma_address = sg_phys(&sg[i]); | ||
73 | sg[i].dma_length = sg[i].length; | ||
74 | } | ||
75 | 50 | ||
76 | debug_dma_map_sg(dev, sg, nents, i, dir); | 51 | static inline int dma_get_cache_alignment(void) |
77 | |||
78 | return nents; | ||
79 | } | ||
80 | |||
81 | static inline void dma_unmap_sg(struct device *dev, struct scatterlist *sg, | ||
82 | int nents, enum dma_data_direction dir) | ||
83 | { | ||
84 | debug_dma_unmap_sg(dev, sg, nents, dir); | ||
85 | } | ||
86 | |||
87 | static inline dma_addr_t dma_map_page(struct device *dev, struct page *page, | ||
88 | unsigned long offset, size_t size, | ||
89 | enum dma_data_direction dir) | ||
90 | { | ||
91 | return dma_map_single(dev, page_address(page) + offset, size, dir); | ||
92 | } | ||
93 | |||
94 | static inline void dma_unmap_page(struct device *dev, dma_addr_t dma_address, | ||
95 | size_t size, enum dma_data_direction dir) | ||
96 | { | ||
97 | dma_unmap_single(dev, dma_address, size, dir); | ||
98 | } | ||
99 | |||
100 | static inline void __dma_sync_single(struct device *dev, dma_addr_t dma_handle, | ||
101 | size_t size, enum dma_data_direction dir) | ||
102 | { | 52 | { |
103 | #if defined(CONFIG_PCI) && !defined(CONFIG_SH_PCIDMA_NONCOHERENT) | 53 | /* |
104 | if (dev->bus == &pci_bus_type) | 54 | * Each processor family will define its own L1_CACHE_SHIFT, |
105 | return; | 55 | * L1_CACHE_BYTES wraps to this, so this is always safe. |
106 | #endif | 56 | */ |
107 | dma_cache_sync(dev, phys_to_virt(dma_handle), size, dir); | 57 | return L1_CACHE_BYTES; |
108 | } | 58 | } |
109 | 59 | ||
110 | static inline void dma_sync_single_range(struct device *dev, | 60 | static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr) |
111 | dma_addr_t dma_handle, | ||
112 | unsigned long offset, size_t size, | ||
113 | enum dma_data_direction dir) | ||
114 | { | 61 | { |
115 | #if defined(CONFIG_PCI) && !defined(CONFIG_SH_PCIDMA_NONCOHERENT) | 62 | struct dma_map_ops *ops = get_dma_ops(dev); |
116 | if (dev->bus == &pci_bus_type) | ||
117 | return; | ||
118 | #endif | ||
119 | dma_cache_sync(dev, phys_to_virt(dma_handle) + offset, size, dir); | ||
120 | } | ||
121 | 63 | ||
122 | static inline void __dma_sync_sg(struct device *dev, struct scatterlist *sg, | 64 | if (ops->mapping_error) |
123 | int nelems, enum dma_data_direction dir) | 65 | return ops->mapping_error(dev, dma_addr); |
124 | { | ||
125 | int i; | ||
126 | 66 | ||
127 | for (i = 0; i < nelems; i++) { | 67 | return dma_addr == 0; |
128 | #if !defined(CONFIG_PCI) || defined(CONFIG_SH_PCIDMA_NONCOHERENT) | ||
129 | dma_cache_sync(dev, sg_virt(&sg[i]), sg[i].length, dir); | ||
130 | #endif | ||
131 | sg[i].dma_address = sg_phys(&sg[i]); | ||
132 | sg[i].dma_length = sg[i].length; | ||
133 | } | ||
134 | } | 68 | } |
135 | 69 | ||
136 | static inline void dma_sync_single_for_cpu(struct device *dev, | 70 | static inline void *dma_alloc_coherent(struct device *dev, size_t size, |
137 | dma_addr_t dma_handle, size_t size, | 71 | dma_addr_t *dma_handle, gfp_t gfp) |
138 | enum dma_data_direction dir) | ||
139 | { | 72 | { |
140 | __dma_sync_single(dev, dma_handle, size, dir); | 73 | struct dma_map_ops *ops = get_dma_ops(dev); |
141 | debug_dma_sync_single_for_cpu(dev, dma_handle, size, dir); | 74 | void *memory; |
142 | } | ||
143 | 75 | ||
144 | static inline void dma_sync_single_for_device(struct device *dev, | 76 | if (dma_alloc_from_coherent(dev, size, dma_handle, &memory)) |
145 | dma_addr_t dma_handle, | 77 | return memory; |
146 | size_t size, | 78 | if (!ops->alloc_coherent) |
147 | enum dma_data_direction dir) | 79 | return NULL; |
148 | { | ||
149 | __dma_sync_single(dev, dma_handle, size, dir); | ||
150 | debug_dma_sync_single_for_device(dev, dma_handle, size, dir); | ||
151 | } | ||
152 | 80 | ||
153 | static inline void dma_sync_single_range_for_cpu(struct device *dev, | 81 | memory = ops->alloc_coherent(dev, size, dma_handle, gfp); |
154 | dma_addr_t dma_handle, | 82 | debug_dma_alloc_coherent(dev, size, *dma_handle, memory); |
155 | unsigned long offset, | ||
156 | size_t size, | ||
157 | enum dma_data_direction direction) | ||
158 | { | ||
159 | dma_sync_single_for_cpu(dev, dma_handle+offset, size, direction); | ||
160 | debug_dma_sync_single_range_for_cpu(dev, dma_handle, | ||
161 | offset, size, direction); | ||
162 | } | ||
163 | 83 | ||
164 | static inline void dma_sync_single_range_for_device(struct device *dev, | 84 | return memory; |
165 | dma_addr_t dma_handle, | ||
166 | unsigned long offset, | ||
167 | size_t size, | ||
168 | enum dma_data_direction direction) | ||
169 | { | ||
170 | dma_sync_single_for_device(dev, dma_handle+offset, size, direction); | ||
171 | debug_dma_sync_single_range_for_device(dev, dma_handle, | ||
172 | offset, size, direction); | ||
173 | } | 85 | } |
174 | 86 | ||
175 | 87 | static inline void dma_free_coherent(struct device *dev, size_t size, | |
176 | static inline void dma_sync_sg_for_cpu(struct device *dev, | 88 | void *vaddr, dma_addr_t dma_handle) |
177 | struct scatterlist *sg, int nelems, | ||
178 | enum dma_data_direction dir) | ||
179 | { | 89 | { |
180 | __dma_sync_sg(dev, sg, nelems, dir); | 90 | struct dma_map_ops *ops = get_dma_ops(dev); |
181 | debug_dma_sync_sg_for_cpu(dev, sg, nelems, dir); | ||
182 | } | ||
183 | 91 | ||
184 | static inline void dma_sync_sg_for_device(struct device *dev, | 92 | WARN_ON(irqs_disabled()); /* for portability */ |
185 | struct scatterlist *sg, int nelems, | ||
186 | enum dma_data_direction dir) | ||
187 | { | ||
188 | __dma_sync_sg(dev, sg, nelems, dir); | ||
189 | debug_dma_sync_sg_for_device(dev, sg, nelems, dir); | ||
190 | } | ||
191 | 93 | ||
192 | static inline int dma_get_cache_alignment(void) | 94 | if (dma_release_from_coherent(dev, get_order(size), vaddr)) |
193 | { | 95 | return; |
194 | /* | ||
195 | * Each processor family will define its own L1_CACHE_SHIFT, | ||
196 | * L1_CACHE_BYTES wraps to this, so this is always safe. | ||
197 | */ | ||
198 | return L1_CACHE_BYTES; | ||
199 | } | ||
200 | 96 | ||
201 | static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr) | 97 | debug_dma_free_coherent(dev, size, vaddr, dma_handle); |
202 | { | 98 | if (ops->free_coherent) |
203 | return dma_addr == 0; | 99 | ops->free_coherent(dev, size, vaddr, dma_handle); |
204 | } | 100 | } |
205 | 101 | ||
206 | #define ARCH_HAS_DMA_DECLARE_COHERENT_MEMORY | 102 | /* arch/sh/mm/consistent.c */ |
207 | 103 | extern void *dma_generic_alloc_coherent(struct device *dev, size_t size, | |
208 | extern int | 104 | dma_addr_t *dma_addr, gfp_t flag); |
209 | dma_declare_coherent_memory(struct device *dev, dma_addr_t bus_addr, | 105 | extern void dma_generic_free_coherent(struct device *dev, size_t size, |
210 | dma_addr_t device_addr, size_t size, int flags); | 106 | void *vaddr, dma_addr_t dma_handle); |
211 | |||
212 | extern void | ||
213 | dma_release_declared_memory(struct device *dev); | ||
214 | |||
215 | extern void * | ||
216 | dma_mark_declared_memory_occupied(struct device *dev, | ||
217 | dma_addr_t device_addr, size_t size); | ||
218 | 107 | ||
219 | #endif /* __ASM_SH_DMA_MAPPING_H */ | 108 | #endif /* __ASM_SH_DMA_MAPPING_H */ |
diff --git a/arch/sh/include/asm/dwarf.h b/arch/sh/include/asm/dwarf.h index ced6795891a..bdccbbfdc0b 100644 --- a/arch/sh/include/asm/dwarf.h +++ b/arch/sh/include/asm/dwarf.h | |||
@@ -194,6 +194,12 @@ | |||
194 | #define DWARF_ARCH_RA_REG 17 | 194 | #define DWARF_ARCH_RA_REG 17 |
195 | 195 | ||
196 | #ifndef __ASSEMBLY__ | 196 | #ifndef __ASSEMBLY__ |
197 | |||
198 | #include <linux/compiler.h> | ||
199 | #include <linux/bug.h> | ||
200 | #include <linux/list.h> | ||
201 | #include <linux/module.h> | ||
202 | |||
197 | /* | 203 | /* |
198 | * Read either the frame pointer (r14) or the stack pointer (r15). | 204 | * Read either the frame pointer (r14) or the stack pointer (r15). |
199 | * NOTE: this MUST be inlined. | 205 | * NOTE: this MUST be inlined. |
@@ -241,6 +247,12 @@ struct dwarf_cie { | |||
241 | 247 | ||
242 | unsigned long flags; | 248 | unsigned long flags; |
243 | #define DWARF_CIE_Z_AUGMENTATION (1 << 0) | 249 | #define DWARF_CIE_Z_AUGMENTATION (1 << 0) |
250 | |||
251 | /* | ||
252 | * 'mod' will be non-NULL if this CIE came from a module's | ||
253 | * .eh_frame section. | ||
254 | */ | ||
255 | struct module *mod; | ||
244 | }; | 256 | }; |
245 | 257 | ||
246 | /** | 258 | /** |
@@ -255,6 +267,12 @@ struct dwarf_fde { | |||
255 | unsigned char *instructions; | 267 | unsigned char *instructions; |
256 | unsigned char *end; | 268 | unsigned char *end; |
257 | struct list_head link; | 269 | struct list_head link; |
270 | |||
271 | /* | ||
272 | * 'mod' will be non-NULL if this FDE came from a module's | ||
273 | * .eh_frame section. | ||
274 | */ | ||
275 | struct module *mod; | ||
258 | }; | 276 | }; |
259 | 277 | ||
260 | /** | 278 | /** |
@@ -364,6 +382,12 @@ static inline unsigned int DW_CFA_operand(unsigned long insn) | |||
364 | 382 | ||
365 | extern struct dwarf_frame *dwarf_unwind_stack(unsigned long, | 383 | extern struct dwarf_frame *dwarf_unwind_stack(unsigned long, |
366 | struct dwarf_frame *); | 384 | struct dwarf_frame *); |
385 | extern void dwarf_free_frame(struct dwarf_frame *); | ||
386 | |||
387 | extern int module_dwarf_finalize(const Elf_Ehdr *, const Elf_Shdr *, | ||
388 | struct module *); | ||
389 | extern void module_dwarf_cleanup(struct module *); | ||
390 | |||
367 | #endif /* !__ASSEMBLY__ */ | 391 | #endif /* !__ASSEMBLY__ */ |
368 | 392 | ||
369 | #define CFI_STARTPROC .cfi_startproc | 393 | #define CFI_STARTPROC .cfi_startproc |
@@ -391,6 +415,10 @@ extern struct dwarf_frame *dwarf_unwind_stack(unsigned long, | |||
391 | static inline void dwarf_unwinder_init(void) | 415 | static inline void dwarf_unwinder_init(void) |
392 | { | 416 | { |
393 | } | 417 | } |
418 | |||
419 | #define module_dwarf_finalize(hdr, sechdrs, me) (0) | ||
420 | #define module_dwarf_cleanup(mod) do { } while (0) | ||
421 | |||
394 | #endif | 422 | #endif |
395 | 423 | ||
396 | #endif /* CONFIG_DWARF_UNWINDER */ | 424 | #endif /* CONFIG_DWARF_UNWINDER */ |
diff --git a/arch/sh/include/asm/fixmap.h b/arch/sh/include/asm/fixmap.h index 721fcc4d5e9..5ac1e40a511 100644 --- a/arch/sh/include/asm/fixmap.h +++ b/arch/sh/include/asm/fixmap.h | |||
@@ -14,9 +14,9 @@ | |||
14 | #define _ASM_FIXMAP_H | 14 | #define _ASM_FIXMAP_H |
15 | 15 | ||
16 | #include <linux/kernel.h> | 16 | #include <linux/kernel.h> |
17 | #include <linux/threads.h> | ||
17 | #include <asm/page.h> | 18 | #include <asm/page.h> |
18 | #ifdef CONFIG_HIGHMEM | 19 | #ifdef CONFIG_HIGHMEM |
19 | #include <linux/threads.h> | ||
20 | #include <asm/kmap_types.h> | 20 | #include <asm/kmap_types.h> |
21 | #endif | 21 | #endif |
22 | 22 | ||
@@ -46,9 +46,15 @@ | |||
46 | * fix-mapped? | 46 | * fix-mapped? |
47 | */ | 47 | */ |
48 | enum fixed_addresses { | 48 | enum fixed_addresses { |
49 | #define FIX_N_COLOURS 16 | 49 | /* |
50 | * The FIX_CMAP entries are used by kmap_coherent() to get virtual | ||
51 | * addresses which are of a known color, and so their values are | ||
52 | * important. __fix_to_virt(FIX_CMAP_END - n) must give an address | ||
53 | * which is the same color as a page (n<<PAGE_SHIFT). | ||
54 | */ | ||
55 | #define FIX_N_COLOURS 8 | ||
50 | FIX_CMAP_BEGIN, | 56 | FIX_CMAP_BEGIN, |
51 | FIX_CMAP_END = FIX_CMAP_BEGIN + FIX_N_COLOURS, | 57 | FIX_CMAP_END = FIX_CMAP_BEGIN + (FIX_N_COLOURS * NR_CPUS) - 1, |
52 | FIX_UNCACHED, | 58 | FIX_UNCACHED, |
53 | #ifdef CONFIG_HIGHMEM | 59 | #ifdef CONFIG_HIGHMEM |
54 | FIX_KMAP_BEGIN, /* reserved pte's for temporary kernel mappings */ | 60 | FIX_KMAP_BEGIN, /* reserved pte's for temporary kernel mappings */ |
diff --git a/arch/sh/include/asm/fpu.h b/arch/sh/include/asm/fpu.h index 1d3aee04b5c..fb6bbb9b1cc 100644 --- a/arch/sh/include/asm/fpu.h +++ b/arch/sh/include/asm/fpu.h | |||
@@ -18,16 +18,15 @@ static inline void grab_fpu(struct pt_regs *regs) | |||
18 | 18 | ||
19 | struct task_struct; | 19 | struct task_struct; |
20 | 20 | ||
21 | extern void save_fpu(struct task_struct *__tsk, struct pt_regs *regs); | 21 | extern void save_fpu(struct task_struct *__tsk); |
22 | void fpu_state_restore(struct pt_regs *regs); | ||
22 | #else | 23 | #else |
23 | 24 | ||
25 | #define save_fpu(tsk) do { } while (0) | ||
24 | #define release_fpu(regs) do { } while (0) | 26 | #define release_fpu(regs) do { } while (0) |
25 | #define grab_fpu(regs) do { } while (0) | 27 | #define grab_fpu(regs) do { } while (0) |
28 | #define fpu_state_restore(regs) do { } while (0) | ||
26 | 29 | ||
27 | static inline void save_fpu(struct task_struct *tsk, struct pt_regs *regs) | ||
28 | { | ||
29 | clear_tsk_thread_flag(tsk, TIF_USEDFPU); | ||
30 | } | ||
31 | #endif | 30 | #endif |
32 | 31 | ||
33 | struct user_regset; | 32 | struct user_regset; |
@@ -39,19 +38,28 @@ extern int fpregs_get(struct task_struct *target, | |||
39 | unsigned int pos, unsigned int count, | 38 | unsigned int pos, unsigned int count, |
40 | void *kbuf, void __user *ubuf); | 39 | void *kbuf, void __user *ubuf); |
41 | 40 | ||
41 | static inline void __unlazy_fpu(struct task_struct *tsk, struct pt_regs *regs) | ||
42 | { | ||
43 | if (task_thread_info(tsk)->status & TS_USEDFPU) { | ||
44 | task_thread_info(tsk)->status &= ~TS_USEDFPU; | ||
45 | save_fpu(tsk); | ||
46 | release_fpu(regs); | ||
47 | } else | ||
48 | tsk->fpu_counter = 0; | ||
49 | } | ||
50 | |||
42 | static inline void unlazy_fpu(struct task_struct *tsk, struct pt_regs *regs) | 51 | static inline void unlazy_fpu(struct task_struct *tsk, struct pt_regs *regs) |
43 | { | 52 | { |
44 | preempt_disable(); | 53 | preempt_disable(); |
45 | if (test_tsk_thread_flag(tsk, TIF_USEDFPU)) | 54 | __unlazy_fpu(tsk, regs); |
46 | save_fpu(tsk, regs); | ||
47 | preempt_enable(); | 55 | preempt_enable(); |
48 | } | 56 | } |
49 | 57 | ||
50 | static inline void clear_fpu(struct task_struct *tsk, struct pt_regs *regs) | 58 | static inline void clear_fpu(struct task_struct *tsk, struct pt_regs *regs) |
51 | { | 59 | { |
52 | preempt_disable(); | 60 | preempt_disable(); |
53 | if (test_tsk_thread_flag(tsk, TIF_USEDFPU)) { | 61 | if (task_thread_info(tsk)->status & TS_USEDFPU) { |
54 | clear_tsk_thread_flag(tsk, TIF_USEDFPU); | 62 | task_thread_info(tsk)->status &= ~TS_USEDFPU; |
55 | release_fpu(regs); | 63 | release_fpu(regs); |
56 | } | 64 | } |
57 | preempt_enable(); | 65 | preempt_enable(); |
diff --git a/arch/sh/include/asm/ftrace.h b/arch/sh/include/asm/ftrace.h index 12f3a31f20a..13e9966464c 100644 --- a/arch/sh/include/asm/ftrace.h +++ b/arch/sh/include/asm/ftrace.h | |||
@@ -35,4 +35,21 @@ static inline unsigned long ftrace_call_adjust(unsigned long addr) | |||
35 | #endif /* __ASSEMBLY__ */ | 35 | #endif /* __ASSEMBLY__ */ |
36 | #endif /* CONFIG_FUNCTION_TRACER */ | 36 | #endif /* CONFIG_FUNCTION_TRACER */ |
37 | 37 | ||
38 | #ifndef __ASSEMBLY__ | ||
39 | |||
40 | /* arch/sh/kernel/return_address.c */ | ||
41 | extern void *return_address(unsigned int); | ||
42 | |||
43 | #define HAVE_ARCH_CALLER_ADDR | ||
44 | |||
45 | #define CALLER_ADDR0 ((unsigned long)__builtin_return_address(0)) | ||
46 | #define CALLER_ADDR1 ((unsigned long)return_address(1)) | ||
47 | #define CALLER_ADDR2 ((unsigned long)return_address(2)) | ||
48 | #define CALLER_ADDR3 ((unsigned long)return_address(3)) | ||
49 | #define CALLER_ADDR4 ((unsigned long)return_address(4)) | ||
50 | #define CALLER_ADDR5 ((unsigned long)return_address(5)) | ||
51 | #define CALLER_ADDR6 ((unsigned long)return_address(6)) | ||
52 | |||
53 | #endif /* __ASSEMBLY__ */ | ||
54 | |||
38 | #endif /* __ASM_SH_FTRACE_H */ | 55 | #endif /* __ASM_SH_FTRACE_H */ |
diff --git a/arch/sh/include/asm/gpio.h b/arch/sh/include/asm/gpio.h index 61f93da2c62..f8d9a731e90 100644 --- a/arch/sh/include/asm/gpio.h +++ b/arch/sh/include/asm/gpio.h | |||
@@ -20,7 +20,7 @@ | |||
20 | #endif | 20 | #endif |
21 | 21 | ||
22 | #define ARCH_NR_GPIOS 512 | 22 | #define ARCH_NR_GPIOS 512 |
23 | #include <asm-generic/gpio.h> | 23 | #include <linux/sh_pfc.h> |
24 | 24 | ||
25 | #ifdef CONFIG_GPIOLIB | 25 | #ifdef CONFIG_GPIOLIB |
26 | 26 | ||
@@ -53,84 +53,4 @@ static inline int irq_to_gpio(unsigned int irq) | |||
53 | 53 | ||
54 | #endif /* CONFIG_GPIOLIB */ | 54 | #endif /* CONFIG_GPIOLIB */ |
55 | 55 | ||
56 | typedef unsigned short pinmux_enum_t; | ||
57 | typedef unsigned short pinmux_flag_t; | ||
58 | |||
59 | #define PINMUX_TYPE_NONE 0 | ||
60 | #define PINMUX_TYPE_FUNCTION 1 | ||
61 | #define PINMUX_TYPE_GPIO 2 | ||
62 | #define PINMUX_TYPE_OUTPUT 3 | ||
63 | #define PINMUX_TYPE_INPUT 4 | ||
64 | #define PINMUX_TYPE_INPUT_PULLUP 5 | ||
65 | #define PINMUX_TYPE_INPUT_PULLDOWN 6 | ||
66 | |||
67 | #define PINMUX_FLAG_TYPE (0x7) | ||
68 | #define PINMUX_FLAG_WANT_PULLUP (1 << 3) | ||
69 | #define PINMUX_FLAG_WANT_PULLDOWN (1 << 4) | ||
70 | |||
71 | #define PINMUX_FLAG_DBIT_SHIFT 5 | ||
72 | #define PINMUX_FLAG_DBIT (0x1f << PINMUX_FLAG_DBIT_SHIFT) | ||
73 | #define PINMUX_FLAG_DREG_SHIFT 10 | ||
74 | #define PINMUX_FLAG_DREG (0x3f << PINMUX_FLAG_DREG_SHIFT) | ||
75 | |||
76 | struct pinmux_gpio { | ||
77 | pinmux_enum_t enum_id; | ||
78 | pinmux_flag_t flags; | ||
79 | }; | ||
80 | |||
81 | #define PINMUX_GPIO(gpio, data_or_mark) [gpio] = { data_or_mark } | ||
82 | #define PINMUX_DATA(data_or_mark, ids...) data_or_mark, ids, 0 | ||
83 | |||
84 | struct pinmux_cfg_reg { | ||
85 | unsigned long reg, reg_width, field_width; | ||
86 | unsigned long *cnt; | ||
87 | pinmux_enum_t *enum_ids; | ||
88 | }; | ||
89 | |||
90 | #define PINMUX_CFG_REG(name, r, r_width, f_width) \ | ||
91 | .reg = r, .reg_width = r_width, .field_width = f_width, \ | ||
92 | .cnt = (unsigned long [r_width / f_width]) {}, \ | ||
93 | .enum_ids = (pinmux_enum_t [(r_width / f_width) * (1 << f_width)]) \ | ||
94 | |||
95 | struct pinmux_data_reg { | ||
96 | unsigned long reg, reg_width, reg_shadow; | ||
97 | pinmux_enum_t *enum_ids; | ||
98 | }; | ||
99 | |||
100 | #define PINMUX_DATA_REG(name, r, r_width) \ | ||
101 | .reg = r, .reg_width = r_width, \ | ||
102 | .enum_ids = (pinmux_enum_t [r_width]) \ | ||
103 | |||
104 | struct pinmux_range { | ||
105 | pinmux_enum_t begin; | ||
106 | pinmux_enum_t end; | ||
107 | pinmux_enum_t force; | ||
108 | }; | ||
109 | |||
110 | struct pinmux_info { | ||
111 | char *name; | ||
112 | pinmux_enum_t reserved_id; | ||
113 | struct pinmux_range data; | ||
114 | struct pinmux_range input; | ||
115 | struct pinmux_range input_pd; | ||
116 | struct pinmux_range input_pu; | ||
117 | struct pinmux_range output; | ||
118 | struct pinmux_range mark; | ||
119 | struct pinmux_range function; | ||
120 | |||
121 | unsigned first_gpio, last_gpio; | ||
122 | |||
123 | struct pinmux_gpio *gpios; | ||
124 | struct pinmux_cfg_reg *cfg_regs; | ||
125 | struct pinmux_data_reg *data_regs; | ||
126 | |||
127 | pinmux_enum_t *gpio_data; | ||
128 | unsigned int gpio_data_size; | ||
129 | |||
130 | unsigned long *gpio_in_use; | ||
131 | struct gpio_chip chip; | ||
132 | }; | ||
133 | |||
134 | int register_pinmux(struct pinmux_info *pip); | ||
135 | |||
136 | #endif /* __ASM_SH_GPIO_H */ | 56 | #endif /* __ASM_SH_GPIO_H */ |
diff --git a/arch/sh/include/asm/hardirq.h b/arch/sh/include/asm/hardirq.h index a5be4afa790..48b191313a9 100644 --- a/arch/sh/include/asm/hardirq.h +++ b/arch/sh/include/asm/hardirq.h | |||
@@ -1,9 +1,16 @@ | |||
1 | #ifndef __ASM_SH_HARDIRQ_H | 1 | #ifndef __ASM_SH_HARDIRQ_H |
2 | #define __ASM_SH_HARDIRQ_H | 2 | #define __ASM_SH_HARDIRQ_H |
3 | 3 | ||
4 | extern void ack_bad_irq(unsigned int irq); | 4 | #include <linux/threads.h> |
5 | #define ack_bad_irq ack_bad_irq | 5 | #include <linux/irq.h> |
6 | |||
7 | typedef struct { | ||
8 | unsigned int __softirq_pending; | ||
9 | unsigned int __nmi_count; /* arch dependent */ | ||
10 | } ____cacheline_aligned irq_cpustat_t; | ||
6 | 11 | ||
7 | #include <asm-generic/hardirq.h> | 12 | #include <linux/irq_cpustat.h> /* Standard mappings for irq_cpustat_t above */ |
13 | |||
14 | extern void ack_bad_irq(unsigned int irq); | ||
8 | 15 | ||
9 | #endif /* __ASM_SH_HARDIRQ_H */ | 16 | #endif /* __ASM_SH_HARDIRQ_H */ |
diff --git a/arch/sh/include/asm/io.h b/arch/sh/include/asm/io.h index 5be45ea4dfe..512cd3e9d0c 100644 --- a/arch/sh/include/asm/io.h +++ b/arch/sh/include/asm/io.h | |||
@@ -90,15 +90,11 @@ | |||
90 | #define ctrl_outl __raw_writel | 90 | #define ctrl_outl __raw_writel |
91 | #define ctrl_outq __raw_writeq | 91 | #define ctrl_outq __raw_writeq |
92 | 92 | ||
93 | extern unsigned long generic_io_base; | ||
94 | |||
93 | static inline void ctrl_delay(void) | 95 | static inline void ctrl_delay(void) |
94 | { | 96 | { |
95 | #ifdef CONFIG_CPU_SH4 | 97 | __raw_readw(generic_io_base); |
96 | __raw_readw(CCN_PVR); | ||
97 | #elif defined(P2SEG) | ||
98 | __raw_readw(P2SEG); | ||
99 | #else | ||
100 | #error "Need a dummy address for delay" | ||
101 | #endif | ||
102 | } | 98 | } |
103 | 99 | ||
104 | #define __BUILD_MEMORY_STRING(bwlq, type) \ | 100 | #define __BUILD_MEMORY_STRING(bwlq, type) \ |
@@ -186,8 +182,6 @@ __BUILD_MEMORY_STRING(q, u64) | |||
186 | 182 | ||
187 | #define IO_SPACE_LIMIT 0xffffffff | 183 | #define IO_SPACE_LIMIT 0xffffffff |
188 | 184 | ||
189 | extern unsigned long generic_io_base; | ||
190 | |||
191 | /* | 185 | /* |
192 | * This function provides a method for the generic case where a | 186 | * This function provides a method for the generic case where a |
193 | * board-specific ioport_map simply needs to return the port + some | 187 | * board-specific ioport_map simply needs to return the port + some |
@@ -246,7 +240,7 @@ void __iounmap(void __iomem *addr); | |||
246 | static inline void __iomem * | 240 | static inline void __iomem * |
247 | __ioremap_mode(unsigned long offset, unsigned long size, unsigned long flags) | 241 | __ioremap_mode(unsigned long offset, unsigned long size, unsigned long flags) |
248 | { | 242 | { |
249 | #if defined(CONFIG_SUPERH32) && !defined(CONFIG_PMB_FIXED) | 243 | #if defined(CONFIG_SUPERH32) && !defined(CONFIG_PMB_FIXED) && !defined(CONFIG_PMB) |
250 | unsigned long last_addr = offset + size - 1; | 244 | unsigned long last_addr = offset + size - 1; |
251 | #endif | 245 | #endif |
252 | void __iomem *ret; | 246 | void __iomem *ret; |
@@ -255,7 +249,7 @@ __ioremap_mode(unsigned long offset, unsigned long size, unsigned long flags) | |||
255 | if (ret) | 249 | if (ret) |
256 | return ret; | 250 | return ret; |
257 | 251 | ||
258 | #if defined(CONFIG_SUPERH32) && !defined(CONFIG_PMB_FIXED) | 252 | #if defined(CONFIG_SUPERH32) && !defined(CONFIG_PMB_FIXED) && !defined(CONFIG_PMB) |
259 | /* | 253 | /* |
260 | * For P1 and P2 space this is trivial, as everything is already | 254 | * For P1 and P2 space this is trivial, as everything is already |
261 | * mapped. Uncached access for P1 addresses are done through P2. | 255 | * mapped. Uncached access for P1 addresses are done through P2. |
diff --git a/arch/sh/include/asm/irqflags.h b/arch/sh/include/asm/irqflags.h index 46e71da5be6..a741153b41c 100644 --- a/arch/sh/include/asm/irqflags.h +++ b/arch/sh/include/asm/irqflags.h | |||
@@ -1,34 +1,9 @@ | |||
1 | #ifndef __ASM_SH_IRQFLAGS_H | 1 | #ifndef __ASM_SH_IRQFLAGS_H |
2 | #define __ASM_SH_IRQFLAGS_H | 2 | #define __ASM_SH_IRQFLAGS_H |
3 | 3 | ||
4 | #ifdef CONFIG_SUPERH32 | 4 | #define RAW_IRQ_DISABLED 0xf0 |
5 | #include "irqflags_32.h" | 5 | #define RAW_IRQ_ENABLED 0x00 |
6 | #else | ||
7 | #include "irqflags_64.h" | ||
8 | #endif | ||
9 | 6 | ||
10 | #define raw_local_save_flags(flags) \ | 7 | #include <asm-generic/irqflags.h> |
11 | do { (flags) = __raw_local_save_flags(); } while (0) | ||
12 | |||
13 | static inline int raw_irqs_disabled_flags(unsigned long flags) | ||
14 | { | ||
15 | return (flags != 0); | ||
16 | } | ||
17 | |||
18 | static inline int raw_irqs_disabled(void) | ||
19 | { | ||
20 | unsigned long flags = __raw_local_save_flags(); | ||
21 | |||
22 | return raw_irqs_disabled_flags(flags); | ||
23 | } | ||
24 | |||
25 | #define raw_local_irq_save(flags) \ | ||
26 | do { (flags) = __raw_local_irq_save(); } while (0) | ||
27 | |||
28 | static inline void raw_local_irq_restore(unsigned long flags) | ||
29 | { | ||
30 | if ((flags & 0xf0) != 0xf0) | ||
31 | raw_local_irq_enable(); | ||
32 | } | ||
33 | 8 | ||
34 | #endif /* __ASM_SH_IRQFLAGS_H */ | 9 | #endif /* __ASM_SH_IRQFLAGS_H */ |
diff --git a/arch/sh/include/asm/irqflags_32.h b/arch/sh/include/asm/irqflags_32.h deleted file mode 100644 index 60218f54134..00000000000 --- a/arch/sh/include/asm/irqflags_32.h +++ /dev/null | |||
@@ -1,99 +0,0 @@ | |||
1 | #ifndef __ASM_SH_IRQFLAGS_32_H | ||
2 | #define __ASM_SH_IRQFLAGS_32_H | ||
3 | |||
4 | static inline void raw_local_irq_enable(void) | ||
5 | { | ||
6 | unsigned long __dummy0, __dummy1; | ||
7 | |||
8 | __asm__ __volatile__ ( | ||
9 | "stc sr, %0\n\t" | ||
10 | "and %1, %0\n\t" | ||
11 | #ifdef CONFIG_CPU_HAS_SR_RB | ||
12 | "stc r6_bank, %1\n\t" | ||
13 | "or %1, %0\n\t" | ||
14 | #endif | ||
15 | "ldc %0, sr\n\t" | ||
16 | : "=&r" (__dummy0), "=r" (__dummy1) | ||
17 | : "1" (~0x000000f0) | ||
18 | : "memory" | ||
19 | ); | ||
20 | } | ||
21 | |||
22 | static inline void raw_local_irq_disable(void) | ||
23 | { | ||
24 | unsigned long flags; | ||
25 | |||
26 | __asm__ __volatile__ ( | ||
27 | "stc sr, %0\n\t" | ||
28 | "or #0xf0, %0\n\t" | ||
29 | "ldc %0, sr\n\t" | ||
30 | : "=&z" (flags) | ||
31 | : /* no inputs */ | ||
32 | : "memory" | ||
33 | ); | ||
34 | } | ||
35 | |||
36 | static inline void set_bl_bit(void) | ||
37 | { | ||
38 | unsigned long __dummy0, __dummy1; | ||
39 | |||
40 | __asm__ __volatile__ ( | ||
41 | "stc sr, %0\n\t" | ||
42 | "or %2, %0\n\t" | ||
43 | "and %3, %0\n\t" | ||
44 | "ldc %0, sr\n\t" | ||
45 | : "=&r" (__dummy0), "=r" (__dummy1) | ||
46 | : "r" (0x10000000), "r" (0xffffff0f) | ||
47 | : "memory" | ||
48 | ); | ||
49 | } | ||
50 | |||
51 | static inline void clear_bl_bit(void) | ||
52 | { | ||
53 | unsigned long __dummy0, __dummy1; | ||
54 | |||
55 | __asm__ __volatile__ ( | ||
56 | "stc sr, %0\n\t" | ||
57 | "and %2, %0\n\t" | ||
58 | "ldc %0, sr\n\t" | ||
59 | : "=&r" (__dummy0), "=r" (__dummy1) | ||
60 | : "1" (~0x10000000) | ||
61 | : "memory" | ||
62 | ); | ||
63 | } | ||
64 | |||
65 | static inline unsigned long __raw_local_save_flags(void) | ||
66 | { | ||
67 | unsigned long flags; | ||
68 | |||
69 | __asm__ __volatile__ ( | ||
70 | "stc sr, %0\n\t" | ||
71 | "and #0xf0, %0\n\t" | ||
72 | : "=&z" (flags) | ||
73 | : /* no inputs */ | ||
74 | : "memory" | ||
75 | ); | ||
76 | |||
77 | return flags; | ||
78 | } | ||
79 | |||
80 | static inline unsigned long __raw_local_irq_save(void) | ||
81 | { | ||
82 | unsigned long flags, __dummy; | ||
83 | |||
84 | __asm__ __volatile__ ( | ||
85 | "stc sr, %1\n\t" | ||
86 | "mov %1, %0\n\t" | ||
87 | "or #0xf0, %0\n\t" | ||
88 | "ldc %0, sr\n\t" | ||
89 | "mov %1, %0\n\t" | ||
90 | "and #0xf0, %0\n\t" | ||
91 | : "=&z" (flags), "=&r" (__dummy) | ||
92 | : /* no inputs */ | ||
93 | : "memory" | ||
94 | ); | ||
95 | |||
96 | return flags; | ||
97 | } | ||
98 | |||
99 | #endif /* __ASM_SH_IRQFLAGS_32_H */ | ||
diff --git a/arch/sh/include/asm/irqflags_64.h b/arch/sh/include/asm/irqflags_64.h deleted file mode 100644 index 88f65222c1d..00000000000 --- a/arch/sh/include/asm/irqflags_64.h +++ /dev/null | |||
@@ -1,85 +0,0 @@ | |||
1 | #ifndef __ASM_SH_IRQFLAGS_64_H | ||
2 | #define __ASM_SH_IRQFLAGS_64_H | ||
3 | |||
4 | #include <cpu/registers.h> | ||
5 | |||
6 | #define SR_MASK_LL 0x00000000000000f0LL | ||
7 | #define SR_BL_LL 0x0000000010000000LL | ||
8 | |||
9 | static inline void raw_local_irq_enable(void) | ||
10 | { | ||
11 | unsigned long long __dummy0, __dummy1 = ~SR_MASK_LL; | ||
12 | |||
13 | __asm__ __volatile__("getcon " __SR ", %0\n\t" | ||
14 | "and %0, %1, %0\n\t" | ||
15 | "putcon %0, " __SR "\n\t" | ||
16 | : "=&r" (__dummy0) | ||
17 | : "r" (__dummy1)); | ||
18 | } | ||
19 | |||
20 | static inline void raw_local_irq_disable(void) | ||
21 | { | ||
22 | unsigned long long __dummy0, __dummy1 = SR_MASK_LL; | ||
23 | |||
24 | __asm__ __volatile__("getcon " __SR ", %0\n\t" | ||
25 | "or %0, %1, %0\n\t" | ||
26 | "putcon %0, " __SR "\n\t" | ||
27 | : "=&r" (__dummy0) | ||
28 | : "r" (__dummy1)); | ||
29 | } | ||
30 | |||
31 | static inline void set_bl_bit(void) | ||
32 | { | ||
33 | unsigned long long __dummy0, __dummy1 = SR_BL_LL; | ||
34 | |||
35 | __asm__ __volatile__("getcon " __SR ", %0\n\t" | ||
36 | "or %0, %1, %0\n\t" | ||
37 | "putcon %0, " __SR "\n\t" | ||
38 | : "=&r" (__dummy0) | ||
39 | : "r" (__dummy1)); | ||
40 | |||
41 | } | ||
42 | |||
43 | static inline void clear_bl_bit(void) | ||
44 | { | ||
45 | unsigned long long __dummy0, __dummy1 = ~SR_BL_LL; | ||
46 | |||
47 | __asm__ __volatile__("getcon " __SR ", %0\n\t" | ||
48 | "and %0, %1, %0\n\t" | ||
49 | "putcon %0, " __SR "\n\t" | ||
50 | : "=&r" (__dummy0) | ||
51 | : "r" (__dummy1)); | ||
52 | } | ||
53 | |||
54 | static inline unsigned long __raw_local_save_flags(void) | ||
55 | { | ||
56 | unsigned long long __dummy = SR_MASK_LL; | ||
57 | unsigned long flags; | ||
58 | |||
59 | __asm__ __volatile__ ( | ||
60 | "getcon " __SR ", %0\n\t" | ||
61 | "and %0, %1, %0" | ||
62 | : "=&r" (flags) | ||
63 | : "r" (__dummy)); | ||
64 | |||
65 | return flags; | ||
66 | } | ||
67 | |||
68 | static inline unsigned long __raw_local_irq_save(void) | ||
69 | { | ||
70 | unsigned long long __dummy0, __dummy1 = SR_MASK_LL; | ||
71 | unsigned long flags; | ||
72 | |||
73 | __asm__ __volatile__ ( | ||
74 | "getcon " __SR ", %1\n\t" | ||
75 | "or %1, r63, %0\n\t" | ||
76 | "or %1, %2, %1\n\t" | ||
77 | "putcon %1, " __SR "\n\t" | ||
78 | "and %0, %2, %0" | ||
79 | : "=&r" (flags), "=&r" (__dummy0) | ||
80 | : "r" (__dummy1)); | ||
81 | |||
82 | return flags; | ||
83 | } | ||
84 | |||
85 | #endif /* __ASM_SH_IRQFLAGS_64_H */ | ||
diff --git a/arch/sh/include/asm/mmu.h b/arch/sh/include/asm/mmu.h index f5963037c9d..c7426ad9926 100644 --- a/arch/sh/include/asm/mmu.h +++ b/arch/sh/include/asm/mmu.h | |||
@@ -7,12 +7,16 @@ | |||
7 | #define PMB_PASCR 0xff000070 | 7 | #define PMB_PASCR 0xff000070 |
8 | #define PMB_IRMCR 0xff000078 | 8 | #define PMB_IRMCR 0xff000078 |
9 | 9 | ||
10 | #define PASCR_SE 0x80000000 | ||
11 | |||
10 | #define PMB_ADDR 0xf6100000 | 12 | #define PMB_ADDR 0xf6100000 |
11 | #define PMB_DATA 0xf7100000 | 13 | #define PMB_DATA 0xf7100000 |
12 | #define PMB_ENTRY_MAX 16 | 14 | #define PMB_ENTRY_MAX 16 |
13 | #define PMB_E_MASK 0x0000000f | 15 | #define PMB_E_MASK 0x0000000f |
14 | #define PMB_E_SHIFT 8 | 16 | #define PMB_E_SHIFT 8 |
15 | 17 | ||
18 | #define PMB_PFN_MASK 0xff000000 | ||
19 | |||
16 | #define PMB_SZ_16M 0x00000000 | 20 | #define PMB_SZ_16M 0x00000000 |
17 | #define PMB_SZ_64M 0x00000010 | 21 | #define PMB_SZ_64M 0x00000010 |
18 | #define PMB_SZ_128M 0x00000080 | 22 | #define PMB_SZ_128M 0x00000080 |
@@ -62,17 +66,10 @@ struct pmb_entry { | |||
62 | }; | 66 | }; |
63 | 67 | ||
64 | /* arch/sh/mm/pmb.c */ | 68 | /* arch/sh/mm/pmb.c */ |
65 | int __set_pmb_entry(unsigned long vpn, unsigned long ppn, | ||
66 | unsigned long flags, int *entry); | ||
67 | int set_pmb_entry(struct pmb_entry *pmbe); | ||
68 | void clear_pmb_entry(struct pmb_entry *pmbe); | ||
69 | struct pmb_entry *pmb_alloc(unsigned long vpn, unsigned long ppn, | ||
70 | unsigned long flags); | ||
71 | void pmb_free(struct pmb_entry *pmbe); | ||
72 | long pmb_remap(unsigned long virt, unsigned long phys, | 69 | long pmb_remap(unsigned long virt, unsigned long phys, |
73 | unsigned long size, unsigned long flags); | 70 | unsigned long size, unsigned long flags); |
74 | void pmb_unmap(unsigned long addr); | 71 | void pmb_unmap(unsigned long addr); |
72 | int pmb_init(void); | ||
75 | #endif /* __ASSEMBLY__ */ | 73 | #endif /* __ASSEMBLY__ */ |
76 | 74 | ||
77 | #endif /* __MMU_H */ | 75 | #endif /* __MMU_H */ |
78 | |||
diff --git a/arch/sh/include/asm/pci.h b/arch/sh/include/asm/pci.h index 4163950cd1c..67f3999b544 100644 --- a/arch/sh/include/asm/pci.h +++ b/arch/sh/include/asm/pci.h | |||
@@ -3,8 +3,6 @@ | |||
3 | 3 | ||
4 | #ifdef __KERNEL__ | 4 | #ifdef __KERNEL__ |
5 | 5 | ||
6 | #include <linux/dma-mapping.h> | ||
7 | |||
8 | /* Can be used to override the logic in pci_scan_bus for skipping | 6 | /* Can be used to override the logic in pci_scan_bus for skipping |
9 | already-configured bus numbers - to be used for buggy BIOSes | 7 | already-configured bus numbers - to be used for buggy BIOSes |
10 | or architectures with incomplete PCI setup by the loader */ | 8 | or architectures with incomplete PCI setup by the loader */ |
@@ -54,30 +52,18 @@ static inline void pcibios_penalize_isa_irq(int irq, int active) | |||
54 | * address space. The networking and block device layers use | 52 | * address space. The networking and block device layers use |
55 | * this boolean for bounce buffer decisions. | 53 | * this boolean for bounce buffer decisions. |
56 | */ | 54 | */ |
57 | #define PCI_DMA_BUS_IS_PHYS (1) | 55 | #define PCI_DMA_BUS_IS_PHYS (dma_ops->is_phys) |
58 | |||
59 | #include <linux/types.h> | ||
60 | #include <linux/slab.h> | ||
61 | #include <asm/scatterlist.h> | ||
62 | #include <linux/string.h> | ||
63 | #include <asm/io.h> | ||
64 | 56 | ||
65 | /* pci_unmap_{single,page} being a nop depends upon the | 57 | /* pci_unmap_{single,page} being a nop depends upon the |
66 | * configuration. | 58 | * configuration. |
67 | */ | 59 | */ |
68 | #ifdef CONFIG_SH_PCIDMA_NONCOHERENT | 60 | #ifdef CONFIG_DMA_NONCOHERENT |
69 | #define DECLARE_PCI_UNMAP_ADDR(ADDR_NAME) \ | 61 | #define DECLARE_PCI_UNMAP_ADDR(ADDR_NAME) dma_addr_t ADDR_NAME; |
70 | dma_addr_t ADDR_NAME; | 62 | #define DECLARE_PCI_UNMAP_LEN(LEN_NAME) __u32 LEN_NAME; |
71 | #define DECLARE_PCI_UNMAP_LEN(LEN_NAME) \ | 63 | #define pci_unmap_addr(PTR, ADDR_NAME) ((PTR)->ADDR_NAME) |
72 | __u32 LEN_NAME; | 64 | #define pci_unmap_addr_set(PTR, ADDR_NAME, VAL) (((PTR)->ADDR_NAME) = (VAL)) |
73 | #define pci_unmap_addr(PTR, ADDR_NAME) \ | 65 | #define pci_unmap_len(PTR, LEN_NAME) ((PTR)->LEN_NAME) |
74 | ((PTR)->ADDR_NAME) | 66 | #define pci_unmap_len_set(PTR, LEN_NAME, VAL) (((PTR)->LEN_NAME) = (VAL)) |
75 | #define pci_unmap_addr_set(PTR, ADDR_NAME, VAL) \ | ||
76 | (((PTR)->ADDR_NAME) = (VAL)) | ||
77 | #define pci_unmap_len(PTR, LEN_NAME) \ | ||
78 | ((PTR)->LEN_NAME) | ||
79 | #define pci_unmap_len_set(PTR, LEN_NAME, VAL) \ | ||
80 | (((PTR)->LEN_NAME) = (VAL)) | ||
81 | #else | 67 | #else |
82 | #define DECLARE_PCI_UNMAP_ADDR(ADDR_NAME) | 68 | #define DECLARE_PCI_UNMAP_ADDR(ADDR_NAME) |
83 | #define DECLARE_PCI_UNMAP_LEN(LEN_NAME) | 69 | #define DECLARE_PCI_UNMAP_LEN(LEN_NAME) |
diff --git a/arch/sh/include/asm/perf_event.h b/arch/sh/include/asm/perf_event.h index 11a302297ab..3d0c9f36d15 100644 --- a/arch/sh/include/asm/perf_event.h +++ b/arch/sh/include/asm/perf_event.h | |||
@@ -1,8 +1,35 @@ | |||
1 | #ifndef __ASM_SH_PERF_EVENT_H | 1 | #ifndef __ASM_SH_PERF_EVENT_H |
2 | #define __ASM_SH_PERF_EVENT_H | 2 | #define __ASM_SH_PERF_EVENT_H |
3 | 3 | ||
4 | /* SH only supports software events through this interface. */ | 4 | struct hw_perf_event; |
5 | static inline void set_perf_event_pending(void) {} | 5 | |
6 | #define MAX_HWEVENTS 2 | ||
7 | |||
8 | struct sh_pmu { | ||
9 | const char *name; | ||
10 | unsigned int num_events; | ||
11 | void (*disable_all)(void); | ||
12 | void (*enable_all)(void); | ||
13 | void (*enable)(struct hw_perf_event *, int); | ||
14 | void (*disable)(struct hw_perf_event *, int); | ||
15 | u64 (*read)(int); | ||
16 | int (*event_map)(int); | ||
17 | unsigned int max_events; | ||
18 | unsigned long raw_event_mask; | ||
19 | const int (*cache_events)[PERF_COUNT_HW_CACHE_MAX] | ||
20 | [PERF_COUNT_HW_CACHE_OP_MAX] | ||
21 | [PERF_COUNT_HW_CACHE_RESULT_MAX]; | ||
22 | }; | ||
23 | |||
24 | /* arch/sh/kernel/perf_event.c */ | ||
25 | extern int register_sh_pmu(struct sh_pmu *); | ||
26 | extern int reserve_pmc_hardware(void); | ||
27 | extern void release_pmc_hardware(void); | ||
28 | |||
29 | static inline void set_perf_event_pending(void) | ||
30 | { | ||
31 | /* Nothing to see here, move along. */ | ||
32 | } | ||
6 | 33 | ||
7 | #define PERF_EVENT_INDEX_OFFSET 0 | 34 | #define PERF_EVENT_INDEX_OFFSET 0 |
8 | 35 | ||
diff --git a/arch/sh/include/asm/pgtable.h b/arch/sh/include/asm/pgtable.h index 4f3efa7d5a6..ba3046e4f06 100644 --- a/arch/sh/include/asm/pgtable.h +++ b/arch/sh/include/asm/pgtable.h | |||
@@ -75,13 +75,31 @@ static inline unsigned long long neff_sign_extend(unsigned long val) | |||
75 | #define USER_PTRS_PER_PGD (TASK_SIZE/PGDIR_SIZE) | 75 | #define USER_PTRS_PER_PGD (TASK_SIZE/PGDIR_SIZE) |
76 | #define FIRST_USER_ADDRESS 0 | 76 | #define FIRST_USER_ADDRESS 0 |
77 | 77 | ||
78 | #ifdef CONFIG_32BIT | 78 | #define PHYS_ADDR_MASK29 0x1fffffff |
79 | #define PHYS_ADDR_MASK 0xffffffff | 79 | #define PHYS_ADDR_MASK32 0xffffffff |
80 | |||
81 | #ifdef CONFIG_PMB | ||
82 | static inline unsigned long phys_addr_mask(void) | ||
83 | { | ||
84 | /* Is the MMU in 29bit mode? */ | ||
85 | if (__in_29bit_mode()) | ||
86 | return PHYS_ADDR_MASK29; | ||
87 | |||
88 | return PHYS_ADDR_MASK32; | ||
89 | } | ||
90 | #elif defined(CONFIG_32BIT) | ||
91 | static inline unsigned long phys_addr_mask(void) | ||
92 | { | ||
93 | return PHYS_ADDR_MASK32; | ||
94 | } | ||
80 | #else | 95 | #else |
81 | #define PHYS_ADDR_MASK 0x1fffffff | 96 | static inline unsigned long phys_addr_mask(void) |
97 | { | ||
98 | return PHYS_ADDR_MASK29; | ||
99 | } | ||
82 | #endif | 100 | #endif |
83 | 101 | ||
84 | #define PTE_PHYS_MASK (PHYS_ADDR_MASK & PAGE_MASK) | 102 | #define PTE_PHYS_MASK (phys_addr_mask() & PAGE_MASK) |
85 | #define PTE_FLAGS_MASK (~(PTE_PHYS_MASK) << PAGE_SHIFT) | 103 | #define PTE_FLAGS_MASK (~(PTE_PHYS_MASK) << PAGE_SHIFT) |
86 | 104 | ||
87 | #ifdef CONFIG_SUPERH32 | 105 | #ifdef CONFIG_SUPERH32 |
diff --git a/arch/sh/include/asm/pgtable_32.h b/arch/sh/include/asm/pgtable_32.h index c0d359ce337..b3543551620 100644 --- a/arch/sh/include/asm/pgtable_32.h +++ b/arch/sh/include/asm/pgtable_32.h | |||
@@ -108,7 +108,7 @@ static inline unsigned long copy_ptea_attributes(unsigned long x) | |||
108 | #define _PAGE_CLEAR_FLAGS (_PAGE_PROTNONE | _PAGE_ACCESSED | _PAGE_FILE) | 108 | #define _PAGE_CLEAR_FLAGS (_PAGE_PROTNONE | _PAGE_ACCESSED | _PAGE_FILE) |
109 | #endif | 109 | #endif |
110 | 110 | ||
111 | #define _PAGE_FLAGS_HARDWARE_MASK (PHYS_ADDR_MASK & ~(_PAGE_CLEAR_FLAGS)) | 111 | #define _PAGE_FLAGS_HARDWARE_MASK (phys_addr_mask() & ~(_PAGE_CLEAR_FLAGS)) |
112 | 112 | ||
113 | /* Hardware flags, page size encoding */ | 113 | /* Hardware flags, page size encoding */ |
114 | #if !defined(CONFIG_MMU) | 114 | #if !defined(CONFIG_MMU) |
diff --git a/arch/sh/include/asm/processor_32.h b/arch/sh/include/asm/processor_32.h index 9a8714945dc..1f3d6fab660 100644 --- a/arch/sh/include/asm/processor_32.h +++ b/arch/sh/include/asm/processor_32.h | |||
@@ -56,6 +56,7 @@ asmlinkage void __init sh_cpu_init(void); | |||
56 | #define SR_DSP 0x00001000 | 56 | #define SR_DSP 0x00001000 |
57 | #define SR_IMASK 0x000000f0 | 57 | #define SR_IMASK 0x000000f0 |
58 | #define SR_FD 0x00008000 | 58 | #define SR_FD 0x00008000 |
59 | #define SR_MD 0x40000000 | ||
59 | 60 | ||
60 | /* | 61 | /* |
61 | * DSP structure and data | 62 | * DSP structure and data |
@@ -136,7 +137,7 @@ struct mm_struct; | |||
136 | extern void release_thread(struct task_struct *); | 137 | extern void release_thread(struct task_struct *); |
137 | 138 | ||
138 | /* Prepare to copy thread state - unlazy all lazy status */ | 139 | /* Prepare to copy thread state - unlazy all lazy status */ |
139 | #define prepare_to_copy(tsk) do { } while (0) | 140 | void prepare_to_copy(struct task_struct *tsk); |
140 | 141 | ||
141 | /* | 142 | /* |
142 | * create a kernel thread without removing it from tasklists | 143 | * create a kernel thread without removing it from tasklists |
diff --git a/arch/sh/include/asm/scatterlist.h b/arch/sh/include/asm/scatterlist.h index 327cc2e4c97..e38d1d4c7f6 100644 --- a/arch/sh/include/asm/scatterlist.h +++ b/arch/sh/include/asm/scatterlist.h | |||
@@ -1,7 +1,7 @@ | |||
1 | #ifndef __ASM_SH_SCATTERLIST_H | 1 | #ifndef __ASM_SH_SCATTERLIST_H |
2 | #define __ASM_SH_SCATTERLIST_H | 2 | #define __ASM_SH_SCATTERLIST_H |
3 | 3 | ||
4 | #define ISA_DMA_THRESHOLD PHYS_ADDR_MASK | 4 | #define ISA_DMA_THRESHOLD phys_addr_mask() |
5 | 5 | ||
6 | #include <asm-generic/scatterlist.h> | 6 | #include <asm-generic/scatterlist.h> |
7 | 7 | ||
diff --git a/arch/sh/include/asm/suspend.h b/arch/sh/include/asm/suspend.h index 5c8ea28ff7a..fe9c2a1ad04 100644 --- a/arch/sh/include/asm/suspend.h +++ b/arch/sh/include/asm/suspend.h | |||
@@ -2,6 +2,7 @@ | |||
2 | #define _ASM_SH_SUSPEND_H | 2 | #define _ASM_SH_SUSPEND_H |
3 | 3 | ||
4 | #ifndef __ASSEMBLY__ | 4 | #ifndef __ASSEMBLY__ |
5 | #include <linux/notifier.h> | ||
5 | static inline int arch_prepare_suspend(void) { return 0; } | 6 | static inline int arch_prepare_suspend(void) { return 0; } |
6 | 7 | ||
7 | #include <asm/ptrace.h> | 8 | #include <asm/ptrace.h> |
@@ -19,6 +20,69 @@ void sh_mobile_setup_cpuidle(void); | |||
19 | static inline void sh_mobile_setup_cpuidle(void) {} | 20 | static inline void sh_mobile_setup_cpuidle(void) {} |
20 | #endif | 21 | #endif |
21 | 22 | ||
23 | /* notifier chains for pre/post sleep hooks */ | ||
24 | extern struct atomic_notifier_head sh_mobile_pre_sleep_notifier_list; | ||
25 | extern struct atomic_notifier_head sh_mobile_post_sleep_notifier_list; | ||
26 | |||
27 | /* priority levels for notifiers */ | ||
28 | #define SH_MOBILE_SLEEP_BOARD 0 | ||
29 | #define SH_MOBILE_SLEEP_CPU 1 | ||
30 | #define SH_MOBILE_PRE(x) (x) | ||
31 | #define SH_MOBILE_POST(x) (-(x)) | ||
32 | |||
33 | /* board code registration function for self-refresh assembly snippets */ | ||
34 | void sh_mobile_register_self_refresh(unsigned long flags, | ||
35 | void *pre_start, void *pre_end, | ||
36 | void *post_start, void *post_end); | ||
37 | |||
38 | /* register structure for address/data information */ | ||
39 | struct sh_sleep_regs { | ||
40 | unsigned long stbcr; | ||
41 | unsigned long bar; | ||
42 | |||
43 | /* MMU */ | ||
44 | unsigned long pteh; | ||
45 | unsigned long ptel; | ||
46 | unsigned long ttb; | ||
47 | unsigned long tea; | ||
48 | unsigned long mmucr; | ||
49 | unsigned long ptea; | ||
50 | unsigned long pascr; | ||
51 | unsigned long irmcr; | ||
52 | |||
53 | /* Cache */ | ||
54 | unsigned long ccr; | ||
55 | unsigned long ramcr; | ||
56 | }; | ||
57 | |||
58 | /* data area for low-level sleep code */ | ||
59 | struct sh_sleep_data { | ||
60 | /* current sleep mode (SUSP_SH_...) */ | ||
61 | unsigned long mode; | ||
62 | |||
63 | /* addresses of board specific self-refresh snippets */ | ||
64 | unsigned long sf_pre; | ||
65 | unsigned long sf_post; | ||
66 | |||
67 | /* address of resume code */ | ||
68 | unsigned long resume; | ||
69 | |||
70 | /* register state saved and restored by the assembly code */ | ||
71 | unsigned long vbr; | ||
72 | unsigned long spc; | ||
73 | unsigned long sr; | ||
74 | unsigned long sp; | ||
75 | |||
76 | /* structure for keeping register addresses */ | ||
77 | struct sh_sleep_regs addr; | ||
78 | |||
79 | /* structure for saving/restoring register state */ | ||
80 | struct sh_sleep_regs data; | ||
81 | }; | ||
82 | |||
83 | /* a bitmap of supported sleep modes (SUSP_SH..) */ | ||
84 | extern unsigned long sh_mobile_sleep_supported; | ||
85 | |||
22 | #endif | 86 | #endif |
23 | 87 | ||
24 | /* flags passed to assembly suspend code */ | 88 | /* flags passed to assembly suspend code */ |
@@ -27,5 +91,6 @@ static inline void sh_mobile_setup_cpuidle(void) {} | |||
27 | #define SUSP_SH_RSTANDBY (1 << 2) /* SH-Mobile R-standby mode */ | 91 | #define SUSP_SH_RSTANDBY (1 << 2) /* SH-Mobile R-standby mode */ |
28 | #define SUSP_SH_USTANDBY (1 << 3) /* SH-Mobile U-standby mode */ | 92 | #define SUSP_SH_USTANDBY (1 << 3) /* SH-Mobile U-standby mode */ |
29 | #define SUSP_SH_SF (1 << 4) /* Enable self-refresh */ | 93 | #define SUSP_SH_SF (1 << 4) /* Enable self-refresh */ |
94 | #define SUSP_SH_MMU (1 << 5) /* Save/restore MMU and cache */ | ||
30 | 95 | ||
31 | #endif /* _ASM_SH_SUSPEND_H */ | 96 | #endif /* _ASM_SH_SUSPEND_H */ |
diff --git a/arch/sh/include/asm/system.h b/arch/sh/include/asm/system.h index b5c5acdc8c0..c15415b4b16 100644 --- a/arch/sh/include/asm/system.h +++ b/arch/sh/include/asm/system.h | |||
@@ -171,10 +171,6 @@ BUILD_TRAP_HANDLER(fpu_error); | |||
171 | BUILD_TRAP_HANDLER(fpu_state_restore); | 171 | BUILD_TRAP_HANDLER(fpu_state_restore); |
172 | BUILD_TRAP_HANDLER(nmi); | 172 | BUILD_TRAP_HANDLER(nmi); |
173 | 173 | ||
174 | #ifdef CONFIG_BUG | ||
175 | extern void handle_BUG(struct pt_regs *); | ||
176 | #endif | ||
177 | |||
178 | #define arch_align_stack(x) (x) | 174 | #define arch_align_stack(x) (x) |
179 | 175 | ||
180 | struct mem_access { | 176 | struct mem_access { |
diff --git a/arch/sh/include/asm/system_32.h b/arch/sh/include/asm/system_32.h index 607d413f616..06814f5b59c 100644 --- a/arch/sh/include/asm/system_32.h +++ b/arch/sh/include/asm/system_32.h | |||
@@ -232,4 +232,33 @@ asmlinkage void do_exception_error(unsigned long r4, unsigned long r5, | |||
232 | unsigned long r6, unsigned long r7, | 232 | unsigned long r6, unsigned long r7, |
233 | struct pt_regs __regs); | 233 | struct pt_regs __regs); |
234 | 234 | ||
235 | static inline void set_bl_bit(void) | ||
236 | { | ||
237 | unsigned long __dummy0, __dummy1; | ||
238 | |||
239 | __asm__ __volatile__ ( | ||
240 | "stc sr, %0\n\t" | ||
241 | "or %2, %0\n\t" | ||
242 | "and %3, %0\n\t" | ||
243 | "ldc %0, sr\n\t" | ||
244 | : "=&r" (__dummy0), "=r" (__dummy1) | ||
245 | : "r" (0x10000000), "r" (0xffffff0f) | ||
246 | : "memory" | ||
247 | ); | ||
248 | } | ||
249 | |||
250 | static inline void clear_bl_bit(void) | ||
251 | { | ||
252 | unsigned long __dummy0, __dummy1; | ||
253 | |||
254 | __asm__ __volatile__ ( | ||
255 | "stc sr, %0\n\t" | ||
256 | "and %2, %0\n\t" | ||
257 | "ldc %0, sr\n\t" | ||
258 | : "=&r" (__dummy0), "=r" (__dummy1) | ||
259 | : "1" (~0x10000000) | ||
260 | : "memory" | ||
261 | ); | ||
262 | } | ||
263 | |||
235 | #endif /* __ASM_SH_SYSTEM_32_H */ | 264 | #endif /* __ASM_SH_SYSTEM_32_H */ |
diff --git a/arch/sh/include/asm/system_64.h b/arch/sh/include/asm/system_64.h index 8e4a03e7966..ab1dd917ea8 100644 --- a/arch/sh/include/asm/system_64.h +++ b/arch/sh/include/asm/system_64.h | |||
@@ -12,6 +12,7 @@ | |||
12 | * License. See the file "COPYING" in the main directory of this archive | 12 | * License. See the file "COPYING" in the main directory of this archive |
13 | * for more details. | 13 | * for more details. |
14 | */ | 14 | */ |
15 | #include <cpu/registers.h> | ||
15 | #include <asm/processor.h> | 16 | #include <asm/processor.h> |
16 | 17 | ||
17 | /* | 18 | /* |
@@ -47,4 +48,29 @@ static inline reg_size_t register_align(void *val) | |||
47 | return (unsigned long long)(signed long long)(signed long)val; | 48 | return (unsigned long long)(signed long long)(signed long)val; |
48 | } | 49 | } |
49 | 50 | ||
51 | #define SR_BL_LL 0x0000000010000000LL | ||
52 | |||
53 | static inline void set_bl_bit(void) | ||
54 | { | ||
55 | unsigned long long __dummy0, __dummy1 = SR_BL_LL; | ||
56 | |||
57 | __asm__ __volatile__("getcon " __SR ", %0\n\t" | ||
58 | "or %0, %1, %0\n\t" | ||
59 | "putcon %0, " __SR "\n\t" | ||
60 | : "=&r" (__dummy0) | ||
61 | : "r" (__dummy1)); | ||
62 | |||
63 | } | ||
64 | |||
65 | static inline void clear_bl_bit(void) | ||
66 | { | ||
67 | unsigned long long __dummy0, __dummy1 = ~SR_BL_LL; | ||
68 | |||
69 | __asm__ __volatile__("getcon " __SR ", %0\n\t" | ||
70 | "and %0, %1, %0\n\t" | ||
71 | "putcon %0, " __SR "\n\t" | ||
72 | : "=&r" (__dummy0) | ||
73 | : "r" (__dummy1)); | ||
74 | } | ||
75 | |||
50 | #endif /* __ASM_SH_SYSTEM_64_H */ | 76 | #endif /* __ASM_SH_SYSTEM_64_H */ |
diff --git a/arch/sh/include/asm/thread_info.h b/arch/sh/include/asm/thread_info.h index bdeb9d46d17..1f3d927e226 100644 --- a/arch/sh/include/asm/thread_info.h +++ b/arch/sh/include/asm/thread_info.h | |||
@@ -19,6 +19,7 @@ struct thread_info { | |||
19 | struct task_struct *task; /* main task structure */ | 19 | struct task_struct *task; /* main task structure */ |
20 | struct exec_domain *exec_domain; /* execution domain */ | 20 | struct exec_domain *exec_domain; /* execution domain */ |
21 | unsigned long flags; /* low level flags */ | 21 | unsigned long flags; /* low level flags */ |
22 | __u32 status; /* thread synchronous flags */ | ||
22 | __u32 cpu; | 23 | __u32 cpu; |
23 | int preempt_count; /* 0 => preemptable, <0 => BUG */ | 24 | int preempt_count; /* 0 => preemptable, <0 => BUG */ |
24 | mm_segment_t addr_limit; /* thread address space */ | 25 | mm_segment_t addr_limit; /* thread address space */ |
@@ -50,6 +51,7 @@ struct thread_info { | |||
50 | .task = &tsk, \ | 51 | .task = &tsk, \ |
51 | .exec_domain = &default_exec_domain, \ | 52 | .exec_domain = &default_exec_domain, \ |
52 | .flags = 0, \ | 53 | .flags = 0, \ |
54 | .status = 0, \ | ||
53 | .cpu = 0, \ | 55 | .cpu = 0, \ |
54 | .preempt_count = INIT_PREEMPT_COUNT, \ | 56 | .preempt_count = INIT_PREEMPT_COUNT, \ |
55 | .addr_limit = KERNEL_DS, \ | 57 | .addr_limit = KERNEL_DS, \ |
@@ -111,13 +113,11 @@ extern void free_thread_info(struct thread_info *ti); | |||
111 | #define TIF_SYSCALL_TRACE 0 /* syscall trace active */ | 113 | #define TIF_SYSCALL_TRACE 0 /* syscall trace active */ |
112 | #define TIF_SIGPENDING 1 /* signal pending */ | 114 | #define TIF_SIGPENDING 1 /* signal pending */ |
113 | #define TIF_NEED_RESCHED 2 /* rescheduling necessary */ | 115 | #define TIF_NEED_RESCHED 2 /* rescheduling necessary */ |
114 | #define TIF_RESTORE_SIGMASK 3 /* restore signal mask in do_signal() */ | ||
115 | #define TIF_SINGLESTEP 4 /* singlestepping active */ | 116 | #define TIF_SINGLESTEP 4 /* singlestepping active */ |
116 | #define TIF_SYSCALL_AUDIT 5 /* syscall auditing active */ | 117 | #define TIF_SYSCALL_AUDIT 5 /* syscall auditing active */ |
117 | #define TIF_SECCOMP 6 /* secure computing */ | 118 | #define TIF_SECCOMP 6 /* secure computing */ |
118 | #define TIF_NOTIFY_RESUME 7 /* callback before returning to user */ | 119 | #define TIF_NOTIFY_RESUME 7 /* callback before returning to user */ |
119 | #define TIF_SYSCALL_TRACEPOINT 8 /* for ftrace syscall instrumentation */ | 120 | #define TIF_SYSCALL_TRACEPOINT 8 /* for ftrace syscall instrumentation */ |
120 | #define TIF_USEDFPU 16 /* FPU was used by this task this quantum (SMP) */ | ||
121 | #define TIF_POLLING_NRFLAG 17 /* true if poll_idle() is polling TIF_NEED_RESCHED */ | 121 | #define TIF_POLLING_NRFLAG 17 /* true if poll_idle() is polling TIF_NEED_RESCHED */ |
122 | #define TIF_MEMDIE 18 | 122 | #define TIF_MEMDIE 18 |
123 | #define TIF_FREEZE 19 /* Freezing for suspend */ | 123 | #define TIF_FREEZE 19 /* Freezing for suspend */ |
@@ -125,13 +125,11 @@ extern void free_thread_info(struct thread_info *ti); | |||
125 | #define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE) | 125 | #define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE) |
126 | #define _TIF_SIGPENDING (1 << TIF_SIGPENDING) | 126 | #define _TIF_SIGPENDING (1 << TIF_SIGPENDING) |
127 | #define _TIF_NEED_RESCHED (1 << TIF_NEED_RESCHED) | 127 | #define _TIF_NEED_RESCHED (1 << TIF_NEED_RESCHED) |
128 | #define _TIF_RESTORE_SIGMASK (1 << TIF_RESTORE_SIGMASK) | ||
129 | #define _TIF_SINGLESTEP (1 << TIF_SINGLESTEP) | 128 | #define _TIF_SINGLESTEP (1 << TIF_SINGLESTEP) |
130 | #define _TIF_SYSCALL_AUDIT (1 << TIF_SYSCALL_AUDIT) | 129 | #define _TIF_SYSCALL_AUDIT (1 << TIF_SYSCALL_AUDIT) |
131 | #define _TIF_SECCOMP (1 << TIF_SECCOMP) | 130 | #define _TIF_SECCOMP (1 << TIF_SECCOMP) |
132 | #define _TIF_NOTIFY_RESUME (1 << TIF_NOTIFY_RESUME) | 131 | #define _TIF_NOTIFY_RESUME (1 << TIF_NOTIFY_RESUME) |
133 | #define _TIF_SYSCALL_TRACEPOINT (1 << TIF_SYSCALL_TRACEPOINT) | 132 | #define _TIF_SYSCALL_TRACEPOINT (1 << TIF_SYSCALL_TRACEPOINT) |
134 | #define _TIF_USEDFPU (1 << TIF_USEDFPU) | ||
135 | #define _TIF_POLLING_NRFLAG (1 << TIF_POLLING_NRFLAG) | 133 | #define _TIF_POLLING_NRFLAG (1 << TIF_POLLING_NRFLAG) |
136 | #define _TIF_FREEZE (1 << TIF_FREEZE) | 134 | #define _TIF_FREEZE (1 << TIF_FREEZE) |
137 | 135 | ||
@@ -149,13 +147,33 @@ extern void free_thread_info(struct thread_info *ti); | |||
149 | /* work to do on any return to u-space */ | 147 | /* work to do on any return to u-space */ |
150 | #define _TIF_ALLWORK_MASK (_TIF_SYSCALL_TRACE | _TIF_SIGPENDING | \ | 148 | #define _TIF_ALLWORK_MASK (_TIF_SYSCALL_TRACE | _TIF_SIGPENDING | \ |
151 | _TIF_NEED_RESCHED | _TIF_SYSCALL_AUDIT | \ | 149 | _TIF_NEED_RESCHED | _TIF_SYSCALL_AUDIT | \ |
152 | _TIF_SINGLESTEP | _TIF_RESTORE_SIGMASK | \ | 150 | _TIF_SINGLESTEP | _TIF_NOTIFY_RESUME | \ |
153 | _TIF_NOTIFY_RESUME | _TIF_SYSCALL_TRACEPOINT) | 151 | _TIF_SYSCALL_TRACEPOINT) |
154 | 152 | ||
155 | /* work to do on interrupt/exception return */ | 153 | /* work to do on interrupt/exception return */ |
156 | #define _TIF_WORK_MASK (_TIF_ALLWORK_MASK & ~(_TIF_SYSCALL_TRACE | \ | 154 | #define _TIF_WORK_MASK (_TIF_ALLWORK_MASK & ~(_TIF_SYSCALL_TRACE | \ |
157 | _TIF_SYSCALL_AUDIT | _TIF_SINGLESTEP)) | 155 | _TIF_SYSCALL_AUDIT | _TIF_SINGLESTEP)) |
158 | 156 | ||
157 | /* | ||
158 | * Thread-synchronous status. | ||
159 | * | ||
160 | * This is different from the flags in that nobody else | ||
161 | * ever touches our thread-synchronous status, so we don't | ||
162 | * have to worry about atomic accesses. | ||
163 | */ | ||
164 | #define TS_RESTORE_SIGMASK 0x0001 /* restore signal mask in do_signal() */ | ||
165 | #define TS_USEDFPU 0x0002 /* FPU used by this task this quantum */ | ||
166 | |||
167 | #ifndef __ASSEMBLY__ | ||
168 | #define HAVE_SET_RESTORE_SIGMASK 1 | ||
169 | static inline void set_restore_sigmask(void) | ||
170 | { | ||
171 | struct thread_info *ti = current_thread_info(); | ||
172 | ti->status |= TS_RESTORE_SIGMASK; | ||
173 | set_bit(TIF_SIGPENDING, (unsigned long *)&ti->flags); | ||
174 | } | ||
175 | #endif /* !__ASSEMBLY__ */ | ||
176 | |||
159 | #endif /* __KERNEL__ */ | 177 | #endif /* __KERNEL__ */ |
160 | 178 | ||
161 | #endif /* __ASM_SH_THREAD_INFO_H */ | 179 | #endif /* __ASM_SH_THREAD_INFO_H */ |
diff --git a/arch/sh/include/asm/topology.h b/arch/sh/include/asm/topology.h index 65e7bd2f224..37cdadd975a 100644 --- a/arch/sh/include/asm/topology.h +++ b/arch/sh/include/asm/topology.h | |||
@@ -40,6 +40,14 @@ | |||
40 | 40 | ||
41 | #endif | 41 | #endif |
42 | 42 | ||
43 | #define mc_capable() (1) | ||
44 | |||
45 | const struct cpumask *cpu_coregroup_mask(unsigned int cpu); | ||
46 | |||
47 | extern cpumask_t cpu_core_map[NR_CPUS]; | ||
48 | |||
49 | #define topology_core_cpumask(cpu) (&cpu_core_map[cpu]) | ||
50 | |||
43 | #include <asm-generic/topology.h> | 51 | #include <asm-generic/topology.h> |
44 | 52 | ||
45 | #endif /* _ASM_SH_TOPOLOGY_H */ | 53 | #endif /* _ASM_SH_TOPOLOGY_H */ |
diff --git a/arch/sh/include/asm/ubc.h b/arch/sh/include/asm/ubc.h index 4ca4b771737..9bf96168443 100644 --- a/arch/sh/include/asm/ubc.h +++ b/arch/sh/include/asm/ubc.h | |||
@@ -60,16 +60,5 @@ | |||
60 | #define BRCR_UBDE (1 << 0) | 60 | #define BRCR_UBDE (1 << 0) |
61 | #endif | 61 | #endif |
62 | 62 | ||
63 | #ifndef __ASSEMBLY__ | ||
64 | /* arch/sh/kernel/cpu/ubc.S */ | ||
65 | extern void ubc_sleep(void); | ||
66 | |||
67 | #ifdef CONFIG_UBC_WAKEUP | ||
68 | extern void ubc_wakeup(void); | ||
69 | #else | ||
70 | #define ubc_wakeup() do { } while (0) | ||
71 | #endif | ||
72 | #endif | ||
73 | |||
74 | #endif /* __KERNEL__ */ | 63 | #endif /* __KERNEL__ */ |
75 | #endif /* __ASM_SH_UBC_H */ | 64 | #endif /* __ASM_SH_UBC_H */ |
diff --git a/arch/sh/include/asm/watchdog.h b/arch/sh/include/asm/watchdog.h index 2fe7cee9e43..19dfff5c851 100644 --- a/arch/sh/include/asm/watchdog.h +++ b/arch/sh/include/asm/watchdog.h | |||
@@ -2,6 +2,8 @@ | |||
2 | * include/asm-sh/watchdog.h | 2 | * include/asm-sh/watchdog.h |
3 | * | 3 | * |
4 | * Copyright (C) 2002, 2003 Paul Mundt | 4 | * Copyright (C) 2002, 2003 Paul Mundt |
5 | * Copyright (C) 2009 Siemens AG | ||
6 | * Copyright (C) 2009 Valentin Sitdikov | ||
5 | * | 7 | * |
6 | * This program is free software; you can redistribute it and/or modify it | 8 | * This program is free software; you can redistribute it and/or modify it |
7 | * under the terms of the GNU General Public License as published by the | 9 | * under the terms of the GNU General Public License as published by the |
@@ -61,6 +63,61 @@ | |||
61 | #define WTCSR_CKS_2048 0x06 | 63 | #define WTCSR_CKS_2048 0x06 |
62 | #define WTCSR_CKS_4096 0x07 | 64 | #define WTCSR_CKS_4096 0x07 |
63 | 65 | ||
66 | #if defined(CONFIG_CPU_SUBTYPE_SH7785) || defined(CONFIG_CPU_SUBTYPE_SH7780) | ||
67 | /** | ||
68 | * sh_wdt_read_cnt - Read from Counter | ||
69 | * Reads back the WTCNT value. | ||
70 | */ | ||
71 | static inline __u32 sh_wdt_read_cnt(void) | ||
72 | { | ||
73 | return ctrl_inl(WTCNT_R); | ||
74 | } | ||
75 | |||
76 | /** | ||
77 | * sh_wdt_write_cnt - Write to Counter | ||
78 | * @val: Value to write | ||
79 | * | ||
80 | * Writes the given value @val to the lower byte of the timer counter. | ||
81 | * The upper byte is set manually on each write. | ||
82 | */ | ||
83 | static inline void sh_wdt_write_cnt(__u32 val) | ||
84 | { | ||
85 | ctrl_outl((WTCNT_HIGH << 24) | (__u32)val, WTCNT); | ||
86 | } | ||
87 | |||
88 | /** | ||
89 | * sh_wdt_write_bst - Write to Counter | ||
90 | * @val: Value to write | ||
91 | * | ||
92 | * Writes the given value @val to the lower byte of the timer counter. | ||
93 | * The upper byte is set manually on each write. | ||
94 | */ | ||
95 | static inline void sh_wdt_write_bst(__u32 val) | ||
96 | { | ||
97 | ctrl_outl((WTBST_HIGH << 24) | (__u32)val, WTBST); | ||
98 | } | ||
99 | /** | ||
100 | * sh_wdt_read_csr - Read from Control/Status Register | ||
101 | * | ||
102 | * Reads back the WTCSR value. | ||
103 | */ | ||
104 | static inline __u32 sh_wdt_read_csr(void) | ||
105 | { | ||
106 | return ctrl_inl(WTCSR_R); | ||
107 | } | ||
108 | |||
109 | /** | ||
110 | * sh_wdt_write_csr - Write to Control/Status Register | ||
111 | * @val: Value to write | ||
112 | * | ||
113 | * Writes the given value @val to the lower byte of the control/status | ||
114 | * register. The upper byte is set manually on each write. | ||
115 | */ | ||
116 | static inline void sh_wdt_write_csr(__u32 val) | ||
117 | { | ||
118 | ctrl_outl((WTCSR_HIGH << 24) | (__u32)val, WTCSR); | ||
119 | } | ||
120 | #else | ||
64 | /** | 121 | /** |
65 | * sh_wdt_read_cnt - Read from Counter | 122 | * sh_wdt_read_cnt - Read from Counter |
66 | * Reads back the WTCNT value. | 123 | * Reads back the WTCNT value. |
@@ -103,6 +160,6 @@ static inline void sh_wdt_write_csr(__u8 val) | |||
103 | { | 160 | { |
104 | ctrl_outw((WTCSR_HIGH << 8) | (__u16)val, WTCSR); | 161 | ctrl_outw((WTCSR_HIGH << 8) | (__u16)val, WTCSR); |
105 | } | 162 | } |
106 | 163 | #endif /* CONFIG_CPU_SUBTYPE_SH7785 || CONFIG_CPU_SUBTYPE_SH7780 */ | |
107 | #endif /* __KERNEL__ */ | 164 | #endif /* __KERNEL__ */ |
108 | #endif /* __ASM_SH_WATCHDOG_H */ | 165 | #endif /* __ASM_SH_WATCHDOG_H */ |
diff --git a/arch/sh/include/cpu-sh4/cpu/watchdog.h b/arch/sh/include/cpu-sh4/cpu/watchdog.h index 259f6a0ce23..7672301d0c7 100644 --- a/arch/sh/include/cpu-sh4/cpu/watchdog.h +++ b/arch/sh/include/cpu-sh4/cpu/watchdog.h | |||
@@ -2,6 +2,8 @@ | |||
2 | * include/asm-sh/cpu-sh4/watchdog.h | 2 | * include/asm-sh/cpu-sh4/watchdog.h |
3 | * | 3 | * |
4 | * Copyright (C) 2002, 2003 Paul Mundt | 4 | * Copyright (C) 2002, 2003 Paul Mundt |
5 | * Copyright (C) 2009 Siemens AG | ||
6 | * Copyright (C) 2009 Sitdikov Valentin | ||
5 | * | 7 | * |
6 | * This file is subject to the terms and conditions of the GNU General Public | 8 | * This file is subject to the terms and conditions of the GNU General Public |
7 | * License. See the file "COPYING" in the main directory of this archive | 9 | * License. See the file "COPYING" in the main directory of this archive |
@@ -10,9 +12,20 @@ | |||
10 | #ifndef __ASM_CPU_SH4_WATCHDOG_H | 12 | #ifndef __ASM_CPU_SH4_WATCHDOG_H |
11 | #define __ASM_CPU_SH4_WATCHDOG_H | 13 | #define __ASM_CPU_SH4_WATCHDOG_H |
12 | 14 | ||
15 | #if defined(CONFIG_CPU_SUBTYPE_SH7785) || defined(CONFIG_CPU_SUBTYPE_SH7780) | ||
16 | /* Prefix definition */ | ||
17 | #define WTBST_HIGH 0x55 | ||
18 | /* Register definitions */ | ||
19 | #define WTCNT_R 0xffcc0010 /*WDTCNT*/ | ||
20 | #define WTCSR 0xffcc0004 /*WDTCSR*/ | ||
21 | #define WTCNT 0xffcc0000 /*WDTST*/ | ||
22 | #define WTST WTCNT | ||
23 | #define WTBST 0xffcc0008 /*WDTBST*/ | ||
24 | #else | ||
13 | /* Register definitions */ | 25 | /* Register definitions */ |
14 | #define WTCNT 0xffc00008 | 26 | #define WTCNT 0xffc00008 |
15 | #define WTCSR 0xffc0000c | 27 | #define WTCSR 0xffc0000c |
28 | #endif | ||
16 | 29 | ||
17 | /* Bit definitions */ | 30 | /* Bit definitions */ |
18 | #define WTCSR_TME 0x80 | 31 | #define WTCSR_TME 0x80 |
diff --git a/arch/sh/include/mach-ecovec24/mach/partner-jet-setup.txt b/arch/sh/include/mach-ecovec24/mach/partner-jet-setup.txt index 8b8e4fa1fee..cc737b80733 100644 --- a/arch/sh/include/mach-ecovec24/mach/partner-jet-setup.txt +++ b/arch/sh/include/mach-ecovec24/mach/partner-jet-setup.txt | |||
@@ -22,13 +22,12 @@ ED 0xff000010, 0x00000004 | |||
22 | LIST "setup clocks" | 22 | LIST "setup clocks" |
23 | ED 0xa4150024, 0x00004000 | 23 | ED 0xa4150024, 0x00004000 |
24 | ED 0xa4150000, 0x8E003508 | 24 | ED 0xa4150000, 0x8E003508 |
25 | ED 0xa4150004, 0x00000000 | ||
26 | 25 | ||
27 | WAIT 1 | 26 | WAIT 1 |
28 | 27 | ||
29 | LIST "BSC" | 28 | LIST "BSC" |
30 | ED 0xff800020, 0xa5a50000 | 29 | ED 0xff800020, 0xa5a50000 |
31 | ED 0xfec10000, 0x00000013 | 30 | ED 0xfec10000, 0x00001013 |
32 | ED 0xfec10004, 0x11110400 | 31 | ED 0xfec10004, 0x11110400 |
33 | ED 0xfec10024, 0x00000440 | 32 | ED 0xfec10024, 0x00000440 |
34 | 33 | ||
diff --git a/arch/sh/include/mach-se/mach/se7722.h b/arch/sh/include/mach-se/mach/se7722.h index e971d9a82f4..16505bfb8a9 100644 --- a/arch/sh/include/mach-se/mach/se7722.h +++ b/arch/sh/include/mach-se/mach/se7722.h | |||
@@ -92,18 +92,11 @@ | |||
92 | #define SE7722_FPGA_IRQ_MRSHPC1 3 /* IRQ1 */ | 92 | #define SE7722_FPGA_IRQ_MRSHPC1 3 /* IRQ1 */ |
93 | #define SE7722_FPGA_IRQ_MRSHPC2 4 /* IRQ1 */ | 93 | #define SE7722_FPGA_IRQ_MRSHPC2 4 /* IRQ1 */ |
94 | #define SE7722_FPGA_IRQ_MRSHPC3 5 /* IRQ1 */ | 94 | #define SE7722_FPGA_IRQ_MRSHPC3 5 /* IRQ1 */ |
95 | |||
96 | #define SE7722_FPGA_IRQ_NR 6 | 95 | #define SE7722_FPGA_IRQ_NR 6 |
97 | #define SE7722_FPGA_IRQ_BASE 110 | ||
98 | |||
99 | #define MRSHPC_IRQ3 (SE7722_FPGA_IRQ_BASE + SE7722_FPGA_IRQ_MRSHPC3) | ||
100 | #define MRSHPC_IRQ2 (SE7722_FPGA_IRQ_BASE + SE7722_FPGA_IRQ_MRSHPC2) | ||
101 | #define MRSHPC_IRQ1 (SE7722_FPGA_IRQ_BASE + SE7722_FPGA_IRQ_MRSHPC1) | ||
102 | #define MRSHPC_IRQ0 (SE7722_FPGA_IRQ_BASE + SE7722_FPGA_IRQ_MRSHPC0) | ||
103 | #define SMC_IRQ (SE7722_FPGA_IRQ_BASE + SE7722_FPGA_IRQ_SMC) | ||
104 | #define USB_IRQ (SE7722_FPGA_IRQ_BASE + SE7722_FPGA_IRQ_USB) | ||
105 | 96 | ||
106 | /* arch/sh/boards/se/7722/irq.c */ | 97 | /* arch/sh/boards/se/7722/irq.c */ |
98 | extern unsigned int se7722_fpga_irq[]; | ||
99 | |||
107 | void init_se7722_IRQ(void); | 100 | void init_se7722_IRQ(void); |
108 | 101 | ||
109 | #define __IO_PREFIX se7722 | 102 | #define __IO_PREFIX se7722 |
diff --git a/arch/sh/kernel/Makefile b/arch/sh/kernel/Makefile index a2d0a40f384..0471a3eb25e 100644 --- a/arch/sh/kernel/Makefile +++ b/arch/sh/kernel/Makefile | |||
@@ -9,8 +9,12 @@ ifdef CONFIG_FUNCTION_TRACER | |||
9 | CFLAGS_REMOVE_ftrace.o = -pg | 9 | CFLAGS_REMOVE_ftrace.o = -pg |
10 | endif | 10 | endif |
11 | 11 | ||
12 | obj-y := debugtraps.o dumpstack.o idle.o io.o io_generic.o irq.o \ | 12 | CFLAGS_REMOVE_return_address.o = -pg |
13 | machvec.o nmi_debug.o process_$(BITS).o ptrace_$(BITS).o \ | 13 | |
14 | obj-y := debugtraps.o dma-nommu.o dumpstack.o \ | ||
15 | idle.o io.o io_generic.o irq.o \ | ||
16 | irq_$(BITS).o machvec.o nmi_debug.o process_$(BITS).o \ | ||
17 | ptrace_$(BITS).o return_address.o \ | ||
14 | setup.o signal_$(BITS).o sys_sh.o sys_sh$(BITS).o \ | 18 | setup.o signal_$(BITS).o sys_sh.o sys_sh$(BITS).o \ |
15 | syscalls_$(BITS).o time.o topology.o traps.o \ | 19 | syscalls_$(BITS).o time.o topology.o traps.o \ |
16 | traps_$(BITS).o unwinder.o | 20 | traps_$(BITS).o unwinder.o |
@@ -28,13 +32,13 @@ obj-$(CONFIG_CRASH_DUMP) += crash_dump.o | |||
28 | obj-$(CONFIG_STACKTRACE) += stacktrace.o | 32 | obj-$(CONFIG_STACKTRACE) += stacktrace.o |
29 | obj-$(CONFIG_IO_TRAPPED) += io_trapped.o | 33 | obj-$(CONFIG_IO_TRAPPED) += io_trapped.o |
30 | obj-$(CONFIG_KPROBES) += kprobes.o | 34 | obj-$(CONFIG_KPROBES) += kprobes.o |
31 | obj-$(CONFIG_GENERIC_GPIO) += gpio.o | ||
32 | obj-$(CONFIG_DYNAMIC_FTRACE) += ftrace.o | 35 | obj-$(CONFIG_DYNAMIC_FTRACE) += ftrace.o |
33 | obj-$(CONFIG_FTRACE_SYSCALLS) += ftrace.o | 36 | obj-$(CONFIG_FTRACE_SYSCALLS) += ftrace.o |
34 | obj-$(CONFIG_FUNCTION_GRAPH_TRACER) += ftrace.o | 37 | obj-$(CONFIG_FUNCTION_GRAPH_TRACER) += ftrace.o |
35 | obj-$(CONFIG_DUMP_CODE) += disassemble.o | 38 | obj-$(CONFIG_DUMP_CODE) += disassemble.o |
36 | obj-$(CONFIG_HIBERNATION) += swsusp.o | 39 | obj-$(CONFIG_HIBERNATION) += swsusp.o |
37 | obj-$(CONFIG_DWARF_UNWINDER) += dwarf.o | 40 | obj-$(CONFIG_DWARF_UNWINDER) += dwarf.o |
41 | obj-$(CONFIG_PERF_EVENTS) += perf_event.o perf_callchain.o | ||
38 | 42 | ||
39 | obj-$(CONFIG_GENERIC_CLOCKEVENTS_BROADCAST) += localtimer.o | 43 | obj-$(CONFIG_GENERIC_CLOCKEVENTS_BROADCAST) += localtimer.o |
40 | 44 | ||
diff --git a/arch/sh/kernel/asm-offsets.c b/arch/sh/kernel/asm-offsets.c index d218e808294..08a2be775b6 100644 --- a/arch/sh/kernel/asm-offsets.c +++ b/arch/sh/kernel/asm-offsets.c | |||
@@ -34,5 +34,28 @@ int main(void) | |||
34 | DEFINE(PBE_NEXT, offsetof(struct pbe, next)); | 34 | DEFINE(PBE_NEXT, offsetof(struct pbe, next)); |
35 | DEFINE(SWSUSP_ARCH_REGS_SIZE, sizeof(struct swsusp_arch_regs)); | 35 | DEFINE(SWSUSP_ARCH_REGS_SIZE, sizeof(struct swsusp_arch_regs)); |
36 | #endif | 36 | #endif |
37 | |||
38 | DEFINE(SH_SLEEP_MODE, offsetof(struct sh_sleep_data, mode)); | ||
39 | DEFINE(SH_SLEEP_SF_PRE, offsetof(struct sh_sleep_data, sf_pre)); | ||
40 | DEFINE(SH_SLEEP_SF_POST, offsetof(struct sh_sleep_data, sf_post)); | ||
41 | DEFINE(SH_SLEEP_RESUME, offsetof(struct sh_sleep_data, resume)); | ||
42 | DEFINE(SH_SLEEP_VBR, offsetof(struct sh_sleep_data, vbr)); | ||
43 | DEFINE(SH_SLEEP_SPC, offsetof(struct sh_sleep_data, spc)); | ||
44 | DEFINE(SH_SLEEP_SR, offsetof(struct sh_sleep_data, sr)); | ||
45 | DEFINE(SH_SLEEP_SP, offsetof(struct sh_sleep_data, sp)); | ||
46 | DEFINE(SH_SLEEP_BASE_ADDR, offsetof(struct sh_sleep_data, addr)); | ||
47 | DEFINE(SH_SLEEP_BASE_DATA, offsetof(struct sh_sleep_data, data)); | ||
48 | DEFINE(SH_SLEEP_REG_STBCR, offsetof(struct sh_sleep_regs, stbcr)); | ||
49 | DEFINE(SH_SLEEP_REG_BAR, offsetof(struct sh_sleep_regs, bar)); | ||
50 | DEFINE(SH_SLEEP_REG_PTEH, offsetof(struct sh_sleep_regs, pteh)); | ||
51 | DEFINE(SH_SLEEP_REG_PTEL, offsetof(struct sh_sleep_regs, ptel)); | ||
52 | DEFINE(SH_SLEEP_REG_TTB, offsetof(struct sh_sleep_regs, ttb)); | ||
53 | DEFINE(SH_SLEEP_REG_TEA, offsetof(struct sh_sleep_regs, tea)); | ||
54 | DEFINE(SH_SLEEP_REG_MMUCR, offsetof(struct sh_sleep_regs, mmucr)); | ||
55 | DEFINE(SH_SLEEP_REG_PTEA, offsetof(struct sh_sleep_regs, ptea)); | ||
56 | DEFINE(SH_SLEEP_REG_PASCR, offsetof(struct sh_sleep_regs, pascr)); | ||
57 | DEFINE(SH_SLEEP_REG_IRMCR, offsetof(struct sh_sleep_regs, irmcr)); | ||
58 | DEFINE(SH_SLEEP_REG_CCR, offsetof(struct sh_sleep_regs, ccr)); | ||
59 | DEFINE(SH_SLEEP_REG_RAMCR, offsetof(struct sh_sleep_regs, ramcr)); | ||
37 | return 0; | 60 | return 0; |
38 | } | 61 | } |
diff --git a/arch/sh/kernel/cpu/Makefile b/arch/sh/kernel/cpu/Makefile index 3d6b9312dc4..d97c803719e 100644 --- a/arch/sh/kernel/cpu/Makefile +++ b/arch/sh/kernel/cpu/Makefile | |||
@@ -15,7 +15,6 @@ obj-$(CONFIG_ARCH_SHMOBILE) += shmobile/ | |||
15 | 15 | ||
16 | # Common interfaces. | 16 | # Common interfaces. |
17 | 17 | ||
18 | obj-$(CONFIG_UBC_WAKEUP) += ubc.o | ||
19 | obj-$(CONFIG_SH_ADC) += adc.o | 18 | obj-$(CONFIG_SH_ADC) += adc.o |
20 | obj-$(CONFIG_SH_CLK_CPG) += clock-cpg.o | 19 | obj-$(CONFIG_SH_CLK_CPG) += clock-cpg.o |
21 | 20 | ||
diff --git a/arch/sh/kernel/cpu/init.c b/arch/sh/kernel/cpu/init.c index e932ebef473..89b4b76c0d7 100644 --- a/arch/sh/kernel/cpu/init.c +++ b/arch/sh/kernel/cpu/init.c | |||
@@ -75,16 +75,11 @@ static void __init expmask_init(void) | |||
75 | /* | 75 | /* |
76 | * Future proofing. | 76 | * Future proofing. |
77 | * | 77 | * |
78 | * Disable support for slottable sleep instruction | 78 | * Disable support for slottable sleep instruction, non-nop |
79 | * and non-nop instructions in the rte delay slot. | 79 | * instructions in the rte delay slot, and associative writes to |
80 | * the memory-mapped cache array. | ||
80 | */ | 81 | */ |
81 | expmask &= ~(EXPMASK_RTEDS | EXPMASK_BRDSSLP); | 82 | expmask &= ~(EXPMASK_RTEDS | EXPMASK_BRDSSLP | EXPMASK_MMCAW); |
82 | |||
83 | /* | ||
84 | * Enable associative writes to the memory-mapped cache array | ||
85 | * until the cache flush ops have been rewritten. | ||
86 | */ | ||
87 | expmask |= EXPMASK_MMCAW; | ||
88 | 83 | ||
89 | __raw_writel(expmask, EXPMASK); | 84 | __raw_writel(expmask, EXPMASK); |
90 | ctrl_barrier(); | 85 | ctrl_barrier(); |
@@ -311,12 +306,12 @@ asmlinkage void __init sh_cpu_init(void) | |||
311 | if (fpu_disabled) { | 306 | if (fpu_disabled) { |
312 | printk("FPU Disabled\n"); | 307 | printk("FPU Disabled\n"); |
313 | current_cpu_data.flags &= ~CPU_HAS_FPU; | 308 | current_cpu_data.flags &= ~CPU_HAS_FPU; |
314 | disable_fpu(); | ||
315 | } | 309 | } |
316 | 310 | ||
317 | /* FPU initialization */ | 311 | /* FPU initialization */ |
312 | disable_fpu(); | ||
318 | if ((current_cpu_data.flags & CPU_HAS_FPU)) { | 313 | if ((current_cpu_data.flags & CPU_HAS_FPU)) { |
319 | clear_thread_flag(TIF_USEDFPU); | 314 | current_thread_info()->status &= ~TS_USEDFPU; |
320 | clear_used_math(); | 315 | clear_used_math(); |
321 | } | 316 | } |
322 | 317 | ||
@@ -338,17 +333,6 @@ asmlinkage void __init sh_cpu_init(void) | |||
338 | } | 333 | } |
339 | #endif | 334 | #endif |
340 | 335 | ||
341 | /* | ||
342 | * Some brain-damaged loaders decided it would be a good idea to put | ||
343 | * the UBC to sleep. This causes some issues when it comes to things | ||
344 | * like PTRACE_SINGLESTEP or doing hardware watchpoints in GDB. So .. | ||
345 | * we wake it up and hope that all is well. | ||
346 | */ | ||
347 | #ifdef CONFIG_SUPERH32 | ||
348 | if (raw_smp_processor_id() == 0) | ||
349 | ubc_wakeup(); | ||
350 | #endif | ||
351 | |||
352 | speculative_execution_init(); | 336 | speculative_execution_init(); |
353 | expmask_init(); | 337 | expmask_init(); |
354 | } | 338 | } |
diff --git a/arch/sh/kernel/cpu/sh2a/fpu.c b/arch/sh/kernel/cpu/sh2a/fpu.c index 6df2fb98eb3..d395ce5740e 100644 --- a/arch/sh/kernel/cpu/sh2a/fpu.c +++ b/arch/sh/kernel/cpu/sh2a/fpu.c | |||
@@ -25,14 +25,12 @@ | |||
25 | 25 | ||
26 | /* | 26 | /* |
27 | * Save FPU registers onto task structure. | 27 | * Save FPU registers onto task structure. |
28 | * Assume called with FPU enabled (SR.FD=0). | ||
29 | */ | 28 | */ |
30 | void | 29 | void |
31 | save_fpu(struct task_struct *tsk, struct pt_regs *regs) | 30 | save_fpu(struct task_struct *tsk) |
32 | { | 31 | { |
33 | unsigned long dummy; | 32 | unsigned long dummy; |
34 | 33 | ||
35 | clear_tsk_thread_flag(tsk, TIF_USEDFPU); | ||
36 | enable_fpu(); | 34 | enable_fpu(); |
37 | asm volatile("sts.l fpul, @-%0\n\t" | 35 | asm volatile("sts.l fpul, @-%0\n\t" |
38 | "sts.l fpscr, @-%0\n\t" | 36 | "sts.l fpscr, @-%0\n\t" |
@@ -60,7 +58,6 @@ save_fpu(struct task_struct *tsk, struct pt_regs *regs) | |||
60 | : "memory"); | 58 | : "memory"); |
61 | 59 | ||
62 | disable_fpu(); | 60 | disable_fpu(); |
63 | release_fpu(regs); | ||
64 | } | 61 | } |
65 | 62 | ||
66 | static void | 63 | static void |
@@ -598,31 +595,31 @@ BUILD_TRAP_HANDLER(fpu_error) | |||
598 | struct task_struct *tsk = current; | 595 | struct task_struct *tsk = current; |
599 | TRAP_HANDLER_DECL; | 596 | TRAP_HANDLER_DECL; |
600 | 597 | ||
601 | save_fpu(tsk, regs); | 598 | __unlazy_fpu(tsk, regs); |
602 | if (ieee_fpe_handler(regs)) { | 599 | if (ieee_fpe_handler(regs)) { |
603 | tsk->thread.fpu.hard.fpscr &= | 600 | tsk->thread.fpu.hard.fpscr &= |
604 | ~(FPSCR_CAUSE_MASK | FPSCR_FLAG_MASK); | 601 | ~(FPSCR_CAUSE_MASK | FPSCR_FLAG_MASK); |
605 | grab_fpu(regs); | 602 | grab_fpu(regs); |
606 | restore_fpu(tsk); | 603 | restore_fpu(tsk); |
607 | set_tsk_thread_flag(tsk, TIF_USEDFPU); | 604 | task_thread_info(tsk)->status |= TS_USEDFPU; |
608 | return; | 605 | return; |
609 | } | 606 | } |
610 | 607 | ||
611 | force_sig(SIGFPE, tsk); | 608 | force_sig(SIGFPE, tsk); |
612 | } | 609 | } |
613 | 610 | ||
614 | BUILD_TRAP_HANDLER(fpu_state_restore) | 611 | void fpu_state_restore(struct pt_regs *regs) |
615 | { | 612 | { |
616 | struct task_struct *tsk = current; | 613 | struct task_struct *tsk = current; |
617 | TRAP_HANDLER_DECL; | ||
618 | 614 | ||
619 | grab_fpu(regs); | 615 | grab_fpu(regs); |
620 | if (!user_mode(regs)) { | 616 | if (unlikely(!user_mode(regs))) { |
621 | printk(KERN_ERR "BUG: FPU is used in kernel mode.\n"); | 617 | printk(KERN_ERR "BUG: FPU is used in kernel mode.\n"); |
618 | BUG(); | ||
622 | return; | 619 | return; |
623 | } | 620 | } |
624 | 621 | ||
625 | if (used_math()) { | 622 | if (likely(used_math())) { |
626 | /* Using the FPU again. */ | 623 | /* Using the FPU again. */ |
627 | restore_fpu(tsk); | 624 | restore_fpu(tsk); |
628 | } else { | 625 | } else { |
@@ -630,5 +627,13 @@ BUILD_TRAP_HANDLER(fpu_state_restore) | |||
630 | fpu_init(); | 627 | fpu_init(); |
631 | set_used_math(); | 628 | set_used_math(); |
632 | } | 629 | } |
633 | set_tsk_thread_flag(tsk, TIF_USEDFPU); | 630 | task_thread_info(tsk)->status |= TS_USEDFPU; |
631 | tsk->fpu_counter++; | ||
632 | } | ||
633 | |||
634 | BUILD_TRAP_HANDLER(fpu_state_restore) | ||
635 | { | ||
636 | TRAP_HANDLER_DECL; | ||
637 | |||
638 | fpu_state_restore(regs); | ||
634 | } | 639 | } |
diff --git a/arch/sh/kernel/cpu/sh3/entry.S b/arch/sh/kernel/cpu/sh3/entry.S index bb407ef0b91..3f7e2a22c7c 100644 --- a/arch/sh/kernel/cpu/sh3/entry.S +++ b/arch/sh/kernel/cpu/sh3/entry.S | |||
@@ -297,41 +297,8 @@ ENTRY(vbr_base) | |||
297 | ! | 297 | ! |
298 | .balign 256,0,256 | 298 | .balign 256,0,256 |
299 | general_exception: | 299 | general_exception: |
300 | #ifndef CONFIG_CPU_SUBTYPE_SHX3 | ||
301 | bra handle_exception | 300 | bra handle_exception |
302 | sts pr, k3 ! save original pr value in k3 | 301 | sts pr, k3 ! save original pr value in k3 |
303 | #else | ||
304 | mov.l 1f, k4 | ||
305 | mov.l @k4, k4 | ||
306 | |||
307 | ! Is EXPEVT larger than 0x800? | ||
308 | mov #0x8, k0 | ||
309 | shll8 k0 | ||
310 | cmp/hs k0, k4 | ||
311 | bf 0f | ||
312 | |||
313 | ! then add 0x580 (k2 is 0xd80 or 0xda0) | ||
314 | mov #0x58, k0 | ||
315 | shll2 k0 | ||
316 | shll2 k0 | ||
317 | add k0, k4 | ||
318 | 0: | ||
319 | ! Setup stack and save DSP context (k0 contains original r15 on return) | ||
320 | bsr prepare_stack | ||
321 | nop | ||
322 | |||
323 | ! Save registers / Switch to bank 0 | ||
324 | mov k4, k2 ! keep vector in k2 | ||
325 | mov.l 1f, k4 ! SR bits to clear in k4 | ||
326 | bsr save_regs ! needs original pr value in k3 | ||
327 | nop | ||
328 | |||
329 | bra handle_exception_special | ||
330 | nop | ||
331 | |||
332 | .align 2 | ||
333 | 1: .long EXPEVT | ||
334 | #endif | ||
335 | 302 | ||
336 | ! prepare_stack() | 303 | ! prepare_stack() |
337 | ! - roll back gRB | 304 | ! - roll back gRB |
diff --git a/arch/sh/kernel/cpu/sh4/Makefile b/arch/sh/kernel/cpu/sh4/Makefile index 203b18347b8..3a1dbc70983 100644 --- a/arch/sh/kernel/cpu/sh4/Makefile +++ b/arch/sh/kernel/cpu/sh4/Makefile | |||
@@ -9,6 +9,11 @@ obj-$(CONFIG_HIBERNATION) += $(addprefix ../sh3/, swsusp.o) | |||
9 | obj-$(CONFIG_SH_FPU) += fpu.o softfloat.o | 9 | obj-$(CONFIG_SH_FPU) += fpu.o softfloat.o |
10 | obj-$(CONFIG_SH_STORE_QUEUES) += sq.o | 10 | obj-$(CONFIG_SH_STORE_QUEUES) += sq.o |
11 | 11 | ||
12 | # Perf events | ||
13 | perf-$(CONFIG_CPU_SUBTYPE_SH7750) := perf_event.o | ||
14 | perf-$(CONFIG_CPU_SUBTYPE_SH7750S) := perf_event.o | ||
15 | perf-$(CONFIG_CPU_SUBTYPE_SH7091) := perf_event.o | ||
16 | |||
12 | # CPU subtype setup | 17 | # CPU subtype setup |
13 | obj-$(CONFIG_CPU_SUBTYPE_SH7750) += setup-sh7750.o | 18 | obj-$(CONFIG_CPU_SUBTYPE_SH7750) += setup-sh7750.o |
14 | obj-$(CONFIG_CPU_SUBTYPE_SH7750R) += setup-sh7750.o | 19 | obj-$(CONFIG_CPU_SUBTYPE_SH7750R) += setup-sh7750.o |
@@ -27,4 +32,5 @@ endif | |||
27 | # Additional clocks by subtype | 32 | # Additional clocks by subtype |
28 | clock-$(CONFIG_CPU_SUBTYPE_SH4_202) += clock-sh4-202.o | 33 | clock-$(CONFIG_CPU_SUBTYPE_SH4_202) += clock-sh4-202.o |
29 | 34 | ||
30 | obj-y += $(clock-y) | 35 | obj-y += $(clock-y) |
36 | obj-$(CONFIG_PERF_EVENTS) += $(perf-y) | ||
diff --git a/arch/sh/kernel/cpu/sh4/fpu.c b/arch/sh/kernel/cpu/sh4/fpu.c index e3ea5411da6..e97857aec8a 100644 --- a/arch/sh/kernel/cpu/sh4/fpu.c +++ b/arch/sh/kernel/cpu/sh4/fpu.c | |||
@@ -41,13 +41,11 @@ static unsigned int fpu_exception_flags; | |||
41 | 41 | ||
42 | /* | 42 | /* |
43 | * Save FPU registers onto task structure. | 43 | * Save FPU registers onto task structure. |
44 | * Assume called with FPU enabled (SR.FD=0). | ||
45 | */ | 44 | */ |
46 | void save_fpu(struct task_struct *tsk, struct pt_regs *regs) | 45 | void save_fpu(struct task_struct *tsk) |
47 | { | 46 | { |
48 | unsigned long dummy; | 47 | unsigned long dummy; |
49 | 48 | ||
50 | clear_tsk_thread_flag(tsk, TIF_USEDFPU); | ||
51 | enable_fpu(); | 49 | enable_fpu(); |
52 | asm volatile ("sts.l fpul, @-%0\n\t" | 50 | asm volatile ("sts.l fpul, @-%0\n\t" |
53 | "sts.l fpscr, @-%0\n\t" | 51 | "sts.l fpscr, @-%0\n\t" |
@@ -92,7 +90,6 @@ void save_fpu(struct task_struct *tsk, struct pt_regs *regs) | |||
92 | :"memory"); | 90 | :"memory"); |
93 | 91 | ||
94 | disable_fpu(); | 92 | disable_fpu(); |
95 | release_fpu(regs); | ||
96 | } | 93 | } |
97 | 94 | ||
98 | static void restore_fpu(struct task_struct *tsk) | 95 | static void restore_fpu(struct task_struct *tsk) |
@@ -285,7 +282,6 @@ static int ieee_fpe_handler(struct pt_regs *regs) | |||
285 | /* fcnvsd */ | 282 | /* fcnvsd */ |
286 | struct task_struct *tsk = current; | 283 | struct task_struct *tsk = current; |
287 | 284 | ||
288 | save_fpu(tsk, regs); | ||
289 | if ((tsk->thread.fpu.hard.fpscr & FPSCR_CAUSE_ERROR)) | 285 | if ((tsk->thread.fpu.hard.fpscr & FPSCR_CAUSE_ERROR)) |
290 | /* FPU error */ | 286 | /* FPU error */ |
291 | denormal_to_double(&tsk->thread.fpu.hard, | 287 | denormal_to_double(&tsk->thread.fpu.hard, |
@@ -462,7 +458,7 @@ BUILD_TRAP_HANDLER(fpu_error) | |||
462 | struct task_struct *tsk = current; | 458 | struct task_struct *tsk = current; |
463 | TRAP_HANDLER_DECL; | 459 | TRAP_HANDLER_DECL; |
464 | 460 | ||
465 | save_fpu(tsk, regs); | 461 | __unlazy_fpu(tsk, regs); |
466 | fpu_exception_flags = 0; | 462 | fpu_exception_flags = 0; |
467 | if (ieee_fpe_handler(regs)) { | 463 | if (ieee_fpe_handler(regs)) { |
468 | tsk->thread.fpu.hard.fpscr &= | 464 | tsk->thread.fpu.hard.fpscr &= |
@@ -473,7 +469,7 @@ BUILD_TRAP_HANDLER(fpu_error) | |||
473 | tsk->thread.fpu.hard.fpscr |= (fpu_exception_flags >> 10); | 469 | tsk->thread.fpu.hard.fpscr |= (fpu_exception_flags >> 10); |
474 | grab_fpu(regs); | 470 | grab_fpu(regs); |
475 | restore_fpu(tsk); | 471 | restore_fpu(tsk); |
476 | set_tsk_thread_flag(tsk, TIF_USEDFPU); | 472 | task_thread_info(tsk)->status |= TS_USEDFPU; |
477 | if ((((tsk->thread.fpu.hard.fpscr & FPSCR_ENABLE_MASK) >> 7) & | 473 | if ((((tsk->thread.fpu.hard.fpscr & FPSCR_ENABLE_MASK) >> 7) & |
478 | (fpu_exception_flags >> 2)) == 0) { | 474 | (fpu_exception_flags >> 2)) == 0) { |
479 | return; | 475 | return; |
@@ -483,18 +479,18 @@ BUILD_TRAP_HANDLER(fpu_error) | |||
483 | force_sig(SIGFPE, tsk); | 479 | force_sig(SIGFPE, tsk); |
484 | } | 480 | } |
485 | 481 | ||
486 | BUILD_TRAP_HANDLER(fpu_state_restore) | 482 | void fpu_state_restore(struct pt_regs *regs) |
487 | { | 483 | { |
488 | struct task_struct *tsk = current; | 484 | struct task_struct *tsk = current; |
489 | TRAP_HANDLER_DECL; | ||
490 | 485 | ||
491 | grab_fpu(regs); | 486 | grab_fpu(regs); |
492 | if (!user_mode(regs)) { | 487 | if (unlikely(!user_mode(regs))) { |
493 | printk(KERN_ERR "BUG: FPU is used in kernel mode.\n"); | 488 | printk(KERN_ERR "BUG: FPU is used in kernel mode.\n"); |
489 | BUG(); | ||
494 | return; | 490 | return; |
495 | } | 491 | } |
496 | 492 | ||
497 | if (used_math()) { | 493 | if (likely(used_math())) { |
498 | /* Using the FPU again. */ | 494 | /* Using the FPU again. */ |
499 | restore_fpu(tsk); | 495 | restore_fpu(tsk); |
500 | } else { | 496 | } else { |
@@ -502,5 +498,13 @@ BUILD_TRAP_HANDLER(fpu_state_restore) | |||
502 | fpu_init(); | 498 | fpu_init(); |
503 | set_used_math(); | 499 | set_used_math(); |
504 | } | 500 | } |
505 | set_tsk_thread_flag(tsk, TIF_USEDFPU); | 501 | task_thread_info(tsk)->status |= TS_USEDFPU; |
502 | tsk->fpu_counter++; | ||
503 | } | ||
504 | |||
505 | BUILD_TRAP_HANDLER(fpu_state_restore) | ||
506 | { | ||
507 | TRAP_HANDLER_DECL; | ||
508 | |||
509 | fpu_state_restore(regs); | ||
506 | } | 510 | } |
diff --git a/arch/sh/kernel/cpu/sh4/perf_event.c b/arch/sh/kernel/cpu/sh4/perf_event.c new file mode 100644 index 00000000000..7f9ecc9c2d0 --- /dev/null +++ b/arch/sh/kernel/cpu/sh4/perf_event.c | |||
@@ -0,0 +1,253 @@ | |||
1 | /* | ||
2 | * Performance events support for SH7750-style performance counters | ||
3 | * | ||
4 | * Copyright (C) 2009 Paul Mundt | ||
5 | * | ||
6 | * This file is subject to the terms and conditions of the GNU General Public | ||
7 | * License. See the file "COPYING" in the main directory of this archive | ||
8 | * for more details. | ||
9 | */ | ||
10 | #include <linux/kernel.h> | ||
11 | #include <linux/init.h> | ||
12 | #include <linux/io.h> | ||
13 | #include <linux/irq.h> | ||
14 | #include <linux/perf_event.h> | ||
15 | #include <asm/processor.h> | ||
16 | |||
17 | #define PM_CR_BASE 0xff000084 /* 16-bit */ | ||
18 | #define PM_CTR_BASE 0xff100004 /* 32-bit */ | ||
19 | |||
20 | #define PMCR(n) (PM_CR_BASE + ((n) * 0x04)) | ||
21 | #define PMCTRH(n) (PM_CTR_BASE + 0x00 + ((n) * 0x08)) | ||
22 | #define PMCTRL(n) (PM_CTR_BASE + 0x04 + ((n) * 0x08)) | ||
23 | |||
24 | #define PMCR_PMM_MASK 0x0000003f | ||
25 | |||
26 | #define PMCR_CLKF 0x00000100 | ||
27 | #define PMCR_PMCLR 0x00002000 | ||
28 | #define PMCR_PMST 0x00004000 | ||
29 | #define PMCR_PMEN 0x00008000 | ||
30 | |||
31 | static struct sh_pmu sh7750_pmu; | ||
32 | |||
33 | /* | ||
34 | * There are a number of events supported by each counter (33 in total). | ||
35 | * Since we have 2 counters, each counter will take the event code as it | ||
36 | * corresponds to the PMCR PMM setting. Each counter can be configured | ||
37 | * independently. | ||
38 | * | ||
39 | * Event Code Description | ||
40 | * ---------- ----------- | ||
41 | * | ||
42 | * 0x01 Operand read access | ||
43 | * 0x02 Operand write access | ||
44 | * 0x03 UTLB miss | ||
45 | * 0x04 Operand cache read miss | ||
46 | * 0x05 Operand cache write miss | ||
47 | * 0x06 Instruction fetch (w/ cache) | ||
48 | * 0x07 Instruction TLB miss | ||
49 | * 0x08 Instruction cache miss | ||
50 | * 0x09 All operand accesses | ||
51 | * 0x0a All instruction accesses | ||
52 | * 0x0b OC RAM operand access | ||
53 | * 0x0d On-chip I/O space access | ||
54 | * 0x0e Operand access (r/w) | ||
55 | * 0x0f Operand cache miss (r/w) | ||
56 | * 0x10 Branch instruction | ||
57 | * 0x11 Branch taken | ||
58 | * 0x12 BSR/BSRF/JSR | ||
59 | * 0x13 Instruction execution | ||
60 | * 0x14 Instruction execution in parallel | ||
61 | * 0x15 FPU Instruction execution | ||
62 | * 0x16 Interrupt | ||
63 | * 0x17 NMI | ||
64 | * 0x18 trapa instruction execution | ||
65 | * 0x19 UBCA match | ||
66 | * 0x1a UBCB match | ||
67 | * 0x21 Instruction cache fill | ||
68 | * 0x22 Operand cache fill | ||
69 | * 0x23 Elapsed time | ||
70 | * 0x24 Pipeline freeze by I-cache miss | ||
71 | * 0x25 Pipeline freeze by D-cache miss | ||
72 | * 0x27 Pipeline freeze by branch instruction | ||
73 | * 0x28 Pipeline freeze by CPU register | ||
74 | * 0x29 Pipeline freeze by FPU | ||
75 | */ | ||
76 | |||
77 | static const int sh7750_general_events[] = { | ||
78 | [PERF_COUNT_HW_CPU_CYCLES] = 0x0023, | ||
79 | [PERF_COUNT_HW_INSTRUCTIONS] = 0x000a, | ||
80 | [PERF_COUNT_HW_CACHE_REFERENCES] = 0x0006, /* I-cache */ | ||
81 | [PERF_COUNT_HW_CACHE_MISSES] = 0x0008, /* I-cache */ | ||
82 | [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = 0x0010, | ||
83 | [PERF_COUNT_HW_BRANCH_MISSES] = -1, | ||
84 | [PERF_COUNT_HW_BUS_CYCLES] = -1, | ||
85 | }; | ||
86 | |||
87 | #define C(x) PERF_COUNT_HW_CACHE_##x | ||
88 | |||
89 | static const int sh7750_cache_events | ||
90 | [PERF_COUNT_HW_CACHE_MAX] | ||
91 | [PERF_COUNT_HW_CACHE_OP_MAX] | ||
92 | [PERF_COUNT_HW_CACHE_RESULT_MAX] = | ||
93 | { | ||
94 | [ C(L1D) ] = { | ||
95 | [ C(OP_READ) ] = { | ||
96 | [ C(RESULT_ACCESS) ] = 0x0001, | ||
97 | [ C(RESULT_MISS) ] = 0x0004, | ||
98 | }, | ||
99 | [ C(OP_WRITE) ] = { | ||
100 | [ C(RESULT_ACCESS) ] = 0x0002, | ||
101 | [ C(RESULT_MISS) ] = 0x0005, | ||
102 | }, | ||
103 | [ C(OP_PREFETCH) ] = { | ||
104 | [ C(RESULT_ACCESS) ] = 0, | ||
105 | [ C(RESULT_MISS) ] = 0, | ||
106 | }, | ||
107 | }, | ||
108 | |||
109 | [ C(L1I) ] = { | ||
110 | [ C(OP_READ) ] = { | ||
111 | [ C(RESULT_ACCESS) ] = 0x0006, | ||
112 | [ C(RESULT_MISS) ] = 0x0008, | ||
113 | }, | ||
114 | [ C(OP_WRITE) ] = { | ||
115 | [ C(RESULT_ACCESS) ] = -1, | ||
116 | [ C(RESULT_MISS) ] = -1, | ||
117 | }, | ||
118 | [ C(OP_PREFETCH) ] = { | ||
119 | [ C(RESULT_ACCESS) ] = 0, | ||
120 | [ C(RESULT_MISS) ] = 0, | ||
121 | }, | ||
122 | }, | ||
123 | |||
124 | [ C(LL) ] = { | ||
125 | [ C(OP_READ) ] = { | ||
126 | [ C(RESULT_ACCESS) ] = 0, | ||
127 | [ C(RESULT_MISS) ] = 0, | ||
128 | }, | ||
129 | [ C(OP_WRITE) ] = { | ||
130 | [ C(RESULT_ACCESS) ] = 0, | ||
131 | [ C(RESULT_MISS) ] = 0, | ||
132 | }, | ||
133 | [ C(OP_PREFETCH) ] = { | ||
134 | [ C(RESULT_ACCESS) ] = 0, | ||
135 | [ C(RESULT_MISS) ] = 0, | ||
136 | }, | ||
137 | }, | ||
138 | |||
139 | [ C(DTLB) ] = { | ||
140 | [ C(OP_READ) ] = { | ||
141 | [ C(RESULT_ACCESS) ] = 0, | ||
142 | [ C(RESULT_MISS) ] = 0x0003, | ||
143 | }, | ||
144 | [ C(OP_WRITE) ] = { | ||
145 | [ C(RESULT_ACCESS) ] = 0, | ||
146 | [ C(RESULT_MISS) ] = 0, | ||
147 | }, | ||
148 | [ C(OP_PREFETCH) ] = { | ||
149 | [ C(RESULT_ACCESS) ] = 0, | ||
150 | [ C(RESULT_MISS) ] = 0, | ||
151 | }, | ||
152 | }, | ||
153 | |||
154 | [ C(ITLB) ] = { | ||
155 | [ C(OP_READ) ] = { | ||
156 | [ C(RESULT_ACCESS) ] = 0, | ||
157 | [ C(RESULT_MISS) ] = 0x0007, | ||
158 | }, | ||
159 | [ C(OP_WRITE) ] = { | ||
160 | [ C(RESULT_ACCESS) ] = -1, | ||
161 | [ C(RESULT_MISS) ] = -1, | ||
162 | }, | ||
163 | [ C(OP_PREFETCH) ] = { | ||
164 | [ C(RESULT_ACCESS) ] = -1, | ||
165 | [ C(RESULT_MISS) ] = -1, | ||
166 | }, | ||
167 | }, | ||
168 | |||
169 | [ C(BPU) ] = { | ||
170 | [ C(OP_READ) ] = { | ||
171 | [ C(RESULT_ACCESS) ] = -1, | ||
172 | [ C(RESULT_MISS) ] = -1, | ||
173 | }, | ||
174 | [ C(OP_WRITE) ] = { | ||
175 | [ C(RESULT_ACCESS) ] = -1, | ||
176 | [ C(RESULT_MISS) ] = -1, | ||
177 | }, | ||
178 | [ C(OP_PREFETCH) ] = { | ||
179 | [ C(RESULT_ACCESS) ] = -1, | ||
180 | [ C(RESULT_MISS) ] = -1, | ||
181 | }, | ||
182 | }, | ||
183 | }; | ||
184 | |||
185 | static int sh7750_event_map(int event) | ||
186 | { | ||
187 | return sh7750_general_events[event]; | ||
188 | } | ||
189 | |||
190 | static u64 sh7750_pmu_read(int idx) | ||
191 | { | ||
192 | return (u64)((u64)(__raw_readl(PMCTRH(idx)) & 0xffff) << 32) | | ||
193 | __raw_readl(PMCTRL(idx)); | ||
194 | } | ||
195 | |||
196 | static void sh7750_pmu_disable(struct hw_perf_event *hwc, int idx) | ||
197 | { | ||
198 | unsigned int tmp; | ||
199 | |||
200 | tmp = __raw_readw(PMCR(idx)); | ||
201 | tmp &= ~(PMCR_PMM_MASK | PMCR_PMEN); | ||
202 | __raw_writew(tmp, PMCR(idx)); | ||
203 | } | ||
204 | |||
205 | static void sh7750_pmu_enable(struct hw_perf_event *hwc, int idx) | ||
206 | { | ||
207 | __raw_writew(__raw_readw(PMCR(idx)) | PMCR_PMCLR, PMCR(idx)); | ||
208 | __raw_writew(hwc->config | PMCR_PMEN | PMCR_PMST, PMCR(idx)); | ||
209 | } | ||
210 | |||
211 | static void sh7750_pmu_disable_all(void) | ||
212 | { | ||
213 | int i; | ||
214 | |||
215 | for (i = 0; i < sh7750_pmu.num_events; i++) | ||
216 | __raw_writew(__raw_readw(PMCR(i)) & ~PMCR_PMEN, PMCR(i)); | ||
217 | } | ||
218 | |||
219 | static void sh7750_pmu_enable_all(void) | ||
220 | { | ||
221 | int i; | ||
222 | |||
223 | for (i = 0; i < sh7750_pmu.num_events; i++) | ||
224 | __raw_writew(__raw_readw(PMCR(i)) | PMCR_PMEN, PMCR(i)); | ||
225 | } | ||
226 | |||
227 | static struct sh_pmu sh7750_pmu = { | ||
228 | .name = "SH7750", | ||
229 | .num_events = 2, | ||
230 | .event_map = sh7750_event_map, | ||
231 | .max_events = ARRAY_SIZE(sh7750_general_events), | ||
232 | .raw_event_mask = PMCR_PMM_MASK, | ||
233 | .cache_events = &sh7750_cache_events, | ||
234 | .read = sh7750_pmu_read, | ||
235 | .disable = sh7750_pmu_disable, | ||
236 | .enable = sh7750_pmu_enable, | ||
237 | .disable_all = sh7750_pmu_disable_all, | ||
238 | .enable_all = sh7750_pmu_enable_all, | ||
239 | }; | ||
240 | |||
241 | static int __init sh7750_pmu_init(void) | ||
242 | { | ||
243 | /* | ||
244 | * Make sure this CPU actually has perf counters. | ||
245 | */ | ||
246 | if (!(boot_cpu_data.flags & CPU_HAS_PERF_COUNTER)) { | ||
247 | pr_notice("HW perf events unsupported, software events only.\n"); | ||
248 | return -ENODEV; | ||
249 | } | ||
250 | |||
251 | return register_sh_pmu(&sh7750_pmu); | ||
252 | } | ||
253 | arch_initcall(sh7750_pmu_init); | ||
diff --git a/arch/sh/kernel/cpu/sh4a/Makefile b/arch/sh/kernel/cpu/sh4a/Makefile index 490d5dc9e37..33bab477d2e 100644 --- a/arch/sh/kernel/cpu/sh4a/Makefile +++ b/arch/sh/kernel/cpu/sh4a/Makefile | |||
@@ -44,3 +44,4 @@ pinmux-$(CONFIG_CPU_SUBTYPE_SH7786) := pinmux-sh7786.o | |||
44 | obj-y += $(clock-y) | 44 | obj-y += $(clock-y) |
45 | obj-$(CONFIG_SMP) += $(smp-y) | 45 | obj-$(CONFIG_SMP) += $(smp-y) |
46 | obj-$(CONFIG_GENERIC_GPIO) += $(pinmux-y) | 46 | obj-$(CONFIG_GENERIC_GPIO) += $(pinmux-y) |
47 | obj-$(CONFIG_PERF_EVENTS) += perf_event.o | ||
diff --git a/arch/sh/kernel/cpu/sh4a/clock-sh7724.c b/arch/sh/kernel/cpu/sh4a/clock-sh7724.c index dfe9192be63..9db743802f0 100644 --- a/arch/sh/kernel/cpu/sh4a/clock-sh7724.c +++ b/arch/sh/kernel/cpu/sh4a/clock-sh7724.c | |||
@@ -152,7 +152,7 @@ struct clk div6_clks[] = { | |||
152 | SH_CLK_DIV6("fsia_clk", &div3_clk, FCLKACR, 0), | 152 | SH_CLK_DIV6("fsia_clk", &div3_clk, FCLKACR, 0), |
153 | SH_CLK_DIV6("fsib_clk", &div3_clk, FCLKBCR, 0), | 153 | SH_CLK_DIV6("fsib_clk", &div3_clk, FCLKBCR, 0), |
154 | SH_CLK_DIV6("irda_clk", &div3_clk, IRDACLKCR, 0), | 154 | SH_CLK_DIV6("irda_clk", &div3_clk, IRDACLKCR, 0), |
155 | SH_CLK_DIV6("spu_clk", &div3_clk, SPUCLKCR, 0), | 155 | SH_CLK_DIV6("spu_clk", &div3_clk, SPUCLKCR, CLK_ENABLE_ON_INIT), |
156 | }; | 156 | }; |
157 | 157 | ||
158 | #define R_CLK (&r_clk) | 158 | #define R_CLK (&r_clk) |
diff --git a/arch/sh/kernel/cpu/sh4a/perf_event.c b/arch/sh/kernel/cpu/sh4a/perf_event.c new file mode 100644 index 00000000000..eddc21973fa --- /dev/null +++ b/arch/sh/kernel/cpu/sh4a/perf_event.c | |||
@@ -0,0 +1,269 @@ | |||
1 | /* | ||
2 | * Performance events support for SH-4A performance counters | ||
3 | * | ||
4 | * Copyright (C) 2009 Paul Mundt | ||
5 | * | ||
6 | * This file is subject to the terms and conditions of the GNU General Public | ||
7 | * License. See the file "COPYING" in the main directory of this archive | ||
8 | * for more details. | ||
9 | */ | ||
10 | #include <linux/kernel.h> | ||
11 | #include <linux/init.h> | ||
12 | #include <linux/io.h> | ||
13 | #include <linux/irq.h> | ||
14 | #include <linux/perf_event.h> | ||
15 | #include <asm/processor.h> | ||
16 | |||
17 | #define PPC_CCBR(idx) (0xff200800 + (sizeof(u32) * idx)) | ||
18 | #define PPC_PMCTR(idx) (0xfc100000 + (sizeof(u32) * idx)) | ||
19 | |||
20 | #define CCBR_CIT_MASK (0x7ff << 6) | ||
21 | #define CCBR_DUC (1 << 3) | ||
22 | #define CCBR_CMDS (1 << 1) | ||
23 | #define CCBR_PPCE (1 << 0) | ||
24 | |||
25 | #define PPC_PMCAT 0xfc100080 | ||
26 | |||
27 | #define PMCAT_OVF3 (1 << 27) | ||
28 | #define PMCAT_CNN3 (1 << 26) | ||
29 | #define PMCAT_CLR3 (1 << 25) | ||
30 | #define PMCAT_OVF2 (1 << 19) | ||
31 | #define PMCAT_CLR2 (1 << 17) | ||
32 | #define PMCAT_OVF1 (1 << 11) | ||
33 | #define PMCAT_CNN1 (1 << 10) | ||
34 | #define PMCAT_CLR1 (1 << 9) | ||
35 | #define PMCAT_OVF0 (1 << 3) | ||
36 | #define PMCAT_CLR0 (1 << 1) | ||
37 | |||
38 | static struct sh_pmu sh4a_pmu; | ||
39 | |||
40 | /* | ||
41 | * Supported raw event codes: | ||
42 | * | ||
43 | * Event Code Description | ||
44 | * ---------- ----------- | ||
45 | * | ||
46 | * 0x0000 number of elapsed cycles | ||
47 | * 0x0200 number of elapsed cycles in privileged mode | ||
48 | * 0x0280 number of elapsed cycles while SR.BL is asserted | ||
49 | * 0x0202 instruction execution | ||
50 | * 0x0203 instruction execution in parallel | ||
51 | * 0x0204 number of unconditional branches | ||
52 | * 0x0208 number of exceptions | ||
53 | * 0x0209 number of interrupts | ||
54 | * 0x0220 UTLB miss caused by instruction fetch | ||
55 | * 0x0222 UTLB miss caused by operand access | ||
56 | * 0x02a0 number of ITLB misses | ||
57 | * 0x0028 number of accesses to instruction memories | ||
58 | * 0x0029 number of accesses to instruction cache | ||
59 | * 0x002a instruction cache miss | ||
60 | * 0x022e number of access to instruction X/Y memory | ||
61 | * 0x0030 number of reads to operand memories | ||
62 | * 0x0038 number of writes to operand memories | ||
63 | * 0x0031 number of operand cache read accesses | ||
64 | * 0x0039 number of operand cache write accesses | ||
65 | * 0x0032 operand cache read miss | ||
66 | * 0x003a operand cache write miss | ||
67 | * 0x0236 number of reads to operand X/Y memory | ||
68 | * 0x023e number of writes to operand X/Y memory | ||
69 | * 0x0237 number of reads to operand U memory | ||
70 | * 0x023f number of writes to operand U memory | ||
71 | * 0x0337 number of U memory read buffer misses | ||
72 | * 0x02b4 number of wait cycles due to operand read access | ||
73 | * 0x02bc number of wait cycles due to operand write access | ||
74 | * 0x0033 number of wait cycles due to operand cache read miss | ||
75 | * 0x003b number of wait cycles due to operand cache write miss | ||
76 | */ | ||
77 | |||
78 | /* | ||
79 | * Special reserved bits used by hardware emulators, read values will | ||
80 | * vary, but writes must always be 0. | ||
81 | */ | ||
82 | #define PMCAT_EMU_CLR_MASK ((1 << 24) | (1 << 16) | (1 << 8) | (1 << 0)) | ||
83 | |||
84 | static const int sh4a_general_events[] = { | ||
85 | [PERF_COUNT_HW_CPU_CYCLES] = 0x0000, | ||
86 | [PERF_COUNT_HW_INSTRUCTIONS] = 0x0202, | ||
87 | [PERF_COUNT_HW_CACHE_REFERENCES] = 0x0029, /* I-cache */ | ||
88 | [PERF_COUNT_HW_CACHE_MISSES] = 0x002a, /* I-cache */ | ||
89 | [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = 0x0204, | ||
90 | [PERF_COUNT_HW_BRANCH_MISSES] = -1, | ||
91 | [PERF_COUNT_HW_BUS_CYCLES] = -1, | ||
92 | }; | ||
93 | |||
94 | #define C(x) PERF_COUNT_HW_CACHE_##x | ||
95 | |||
96 | static const int sh4a_cache_events | ||
97 | [PERF_COUNT_HW_CACHE_MAX] | ||
98 | [PERF_COUNT_HW_CACHE_OP_MAX] | ||
99 | [PERF_COUNT_HW_CACHE_RESULT_MAX] = | ||
100 | { | ||
101 | [ C(L1D) ] = { | ||
102 | [ C(OP_READ) ] = { | ||
103 | [ C(RESULT_ACCESS) ] = 0x0031, | ||
104 | [ C(RESULT_MISS) ] = 0x0032, | ||
105 | }, | ||
106 | [ C(OP_WRITE) ] = { | ||
107 | [ C(RESULT_ACCESS) ] = 0x0039, | ||
108 | [ C(RESULT_MISS) ] = 0x003a, | ||
109 | }, | ||
110 | [ C(OP_PREFETCH) ] = { | ||
111 | [ C(RESULT_ACCESS) ] = 0, | ||
112 | [ C(RESULT_MISS) ] = 0, | ||
113 | }, | ||
114 | }, | ||
115 | |||
116 | [ C(L1I) ] = { | ||
117 | [ C(OP_READ) ] = { | ||
118 | [ C(RESULT_ACCESS) ] = 0x0029, | ||
119 | [ C(RESULT_MISS) ] = 0x002a, | ||
120 | }, | ||
121 | [ C(OP_WRITE) ] = { | ||
122 | [ C(RESULT_ACCESS) ] = -1, | ||
123 | [ C(RESULT_MISS) ] = -1, | ||
124 | }, | ||
125 | [ C(OP_PREFETCH) ] = { | ||
126 | [ C(RESULT_ACCESS) ] = 0, | ||
127 | [ C(RESULT_MISS) ] = 0, | ||
128 | }, | ||
129 | }, | ||
130 | |||
131 | [ C(LL) ] = { | ||
132 | [ C(OP_READ) ] = { | ||
133 | [ C(RESULT_ACCESS) ] = 0x0030, | ||
134 | [ C(RESULT_MISS) ] = 0, | ||
135 | }, | ||
136 | [ C(OP_WRITE) ] = { | ||
137 | [ C(RESULT_ACCESS) ] = 0x0038, | ||
138 | [ C(RESULT_MISS) ] = 0, | ||
139 | }, | ||
140 | [ C(OP_PREFETCH) ] = { | ||
141 | [ C(RESULT_ACCESS) ] = 0, | ||
142 | [ C(RESULT_MISS) ] = 0, | ||
143 | }, | ||
144 | }, | ||
145 | |||
146 | [ C(DTLB) ] = { | ||
147 | [ C(OP_READ) ] = { | ||
148 | [ C(RESULT_ACCESS) ] = 0x0222, | ||
149 | [ C(RESULT_MISS) ] = 0x0220, | ||
150 | }, | ||
151 | [ C(OP_WRITE) ] = { | ||
152 | [ C(RESULT_ACCESS) ] = 0, | ||
153 | [ C(RESULT_MISS) ] = 0, | ||
154 | }, | ||
155 | [ C(OP_PREFETCH) ] = { | ||
156 | [ C(RESULT_ACCESS) ] = 0, | ||
157 | [ C(RESULT_MISS) ] = 0, | ||
158 | }, | ||
159 | }, | ||
160 | |||
161 | [ C(ITLB) ] = { | ||
162 | [ C(OP_READ) ] = { | ||
163 | [ C(RESULT_ACCESS) ] = 0, | ||
164 | [ C(RESULT_MISS) ] = 0x02a0, | ||
165 | }, | ||
166 | [ C(OP_WRITE) ] = { | ||
167 | [ C(RESULT_ACCESS) ] = -1, | ||
168 | [ C(RESULT_MISS) ] = -1, | ||
169 | }, | ||
170 | [ C(OP_PREFETCH) ] = { | ||
171 | [ C(RESULT_ACCESS) ] = -1, | ||
172 | [ C(RESULT_MISS) ] = -1, | ||
173 | }, | ||
174 | }, | ||
175 | |||
176 | [ C(BPU) ] = { | ||
177 | [ C(OP_READ) ] = { | ||
178 | [ C(RESULT_ACCESS) ] = -1, | ||
179 | [ C(RESULT_MISS) ] = -1, | ||
180 | }, | ||
181 | [ C(OP_WRITE) ] = { | ||
182 | [ C(RESULT_ACCESS) ] = -1, | ||
183 | [ C(RESULT_MISS) ] = -1, | ||
184 | }, | ||
185 | [ C(OP_PREFETCH) ] = { | ||
186 | [ C(RESULT_ACCESS) ] = -1, | ||
187 | [ C(RESULT_MISS) ] = -1, | ||
188 | }, | ||
189 | }, | ||
190 | }; | ||
191 | |||
192 | static int sh4a_event_map(int event) | ||
193 | { | ||
194 | return sh4a_general_events[event]; | ||
195 | } | ||
196 | |||
197 | static u64 sh4a_pmu_read(int idx) | ||
198 | { | ||
199 | return __raw_readl(PPC_PMCTR(idx)); | ||
200 | } | ||
201 | |||
202 | static void sh4a_pmu_disable(struct hw_perf_event *hwc, int idx) | ||
203 | { | ||
204 | unsigned int tmp; | ||
205 | |||
206 | tmp = __raw_readl(PPC_CCBR(idx)); | ||
207 | tmp &= ~(CCBR_CIT_MASK | CCBR_DUC); | ||
208 | __raw_writel(tmp, PPC_CCBR(idx)); | ||
209 | } | ||
210 | |||
211 | static void sh4a_pmu_enable(struct hw_perf_event *hwc, int idx) | ||
212 | { | ||
213 | unsigned int tmp; | ||
214 | |||
215 | tmp = __raw_readl(PPC_PMCAT); | ||
216 | tmp &= ~PMCAT_EMU_CLR_MASK; | ||
217 | tmp |= idx ? PMCAT_CLR1 : PMCAT_CLR0; | ||
218 | __raw_writel(tmp, PPC_PMCAT); | ||
219 | |||
220 | tmp = __raw_readl(PPC_CCBR(idx)); | ||
221 | tmp |= (hwc->config << 6) | CCBR_CMDS | CCBR_PPCE; | ||
222 | __raw_writel(tmp, PPC_CCBR(idx)); | ||
223 | |||
224 | __raw_writel(__raw_readl(PPC_CCBR(idx)) | CCBR_DUC, PPC_CCBR(idx)); | ||
225 | } | ||
226 | |||
227 | static void sh4a_pmu_disable_all(void) | ||
228 | { | ||
229 | int i; | ||
230 | |||
231 | for (i = 0; i < sh4a_pmu.num_events; i++) | ||
232 | __raw_writel(__raw_readl(PPC_CCBR(i)) & ~CCBR_DUC, PPC_CCBR(i)); | ||
233 | } | ||
234 | |||
235 | static void sh4a_pmu_enable_all(void) | ||
236 | { | ||
237 | int i; | ||
238 | |||
239 | for (i = 0; i < sh4a_pmu.num_events; i++) | ||
240 | __raw_writel(__raw_readl(PPC_CCBR(i)) | CCBR_DUC, PPC_CCBR(i)); | ||
241 | } | ||
242 | |||
243 | static struct sh_pmu sh4a_pmu = { | ||
244 | .name = "SH-4A", | ||
245 | .num_events = 2, | ||
246 | .event_map = sh4a_event_map, | ||
247 | .max_events = ARRAY_SIZE(sh4a_general_events), | ||
248 | .raw_event_mask = 0x3ff, | ||
249 | .cache_events = &sh4a_cache_events, | ||
250 | .read = sh4a_pmu_read, | ||
251 | .disable = sh4a_pmu_disable, | ||
252 | .enable = sh4a_pmu_enable, | ||
253 | .disable_all = sh4a_pmu_disable_all, | ||
254 | .enable_all = sh4a_pmu_enable_all, | ||
255 | }; | ||
256 | |||
257 | static int __init sh4a_pmu_init(void) | ||
258 | { | ||
259 | /* | ||
260 | * Make sure this CPU actually has perf counters. | ||
261 | */ | ||
262 | if (!(boot_cpu_data.flags & CPU_HAS_PERF_COUNTER)) { | ||
263 | pr_notice("HW perf events unsupported, software events only.\n"); | ||
264 | return -ENODEV; | ||
265 | } | ||
266 | |||
267 | return register_sh_pmu(&sh4a_pmu); | ||
268 | } | ||
269 | arch_initcall(sh4a_pmu_init); | ||
diff --git a/arch/sh/kernel/cpu/sh4a/setup-sh7724.c b/arch/sh/kernel/cpu/sh4a/setup-sh7724.c index f3851fd757e..845e89c936e 100644 --- a/arch/sh/kernel/cpu/sh4a/setup-sh7724.c +++ b/arch/sh/kernel/cpu/sh4a/setup-sh7724.c | |||
@@ -20,6 +20,8 @@ | |||
20 | #include <linux/uio_driver.h> | 20 | #include <linux/uio_driver.h> |
21 | #include <linux/sh_timer.h> | 21 | #include <linux/sh_timer.h> |
22 | #include <linux/io.h> | 22 | #include <linux/io.h> |
23 | #include <linux/notifier.h> | ||
24 | #include <asm/suspend.h> | ||
23 | #include <asm/clock.h> | 25 | #include <asm/clock.h> |
24 | #include <asm/mmzone.h> | 26 | #include <asm/mmzone.h> |
25 | #include <cpu/sh7724.h> | 27 | #include <cpu/sh7724.h> |
@@ -202,7 +204,7 @@ static struct resource veu0_resources[] = { | |||
202 | [0] = { | 204 | [0] = { |
203 | .name = "VEU3F0", | 205 | .name = "VEU3F0", |
204 | .start = 0xfe920000, | 206 | .start = 0xfe920000, |
205 | .end = 0xfe9200cb - 1, | 207 | .end = 0xfe9200cb, |
206 | .flags = IORESOURCE_MEM, | 208 | .flags = IORESOURCE_MEM, |
207 | }, | 209 | }, |
208 | [1] = { | 210 | [1] = { |
@@ -234,7 +236,7 @@ static struct resource veu1_resources[] = { | |||
234 | [0] = { | 236 | [0] = { |
235 | .name = "VEU3F1", | 237 | .name = "VEU3F1", |
236 | .start = 0xfe924000, | 238 | .start = 0xfe924000, |
237 | .end = 0xfe9240cb - 1, | 239 | .end = 0xfe9240cb, |
238 | .flags = IORESOURCE_MEM, | 240 | .flags = IORESOURCE_MEM, |
239 | }, | 241 | }, |
240 | [1] = { | 242 | [1] = { |
@@ -523,6 +525,70 @@ static struct platform_device jpu_device = { | |||
523 | }, | 525 | }, |
524 | }; | 526 | }; |
525 | 527 | ||
528 | /* SPU2DSP0 */ | ||
529 | static struct uio_info spu0_platform_data = { | ||
530 | .name = "SPU2DSP0", | ||
531 | .version = "0", | ||
532 | .irq = 86, | ||
533 | }; | ||
534 | |||
535 | static struct resource spu0_resources[] = { | ||
536 | [0] = { | ||
537 | .name = "SPU2DSP0", | ||
538 | .start = 0xFE200000, | ||
539 | .end = 0xFE2FFFFF, | ||
540 | .flags = IORESOURCE_MEM, | ||
541 | }, | ||
542 | [1] = { | ||
543 | /* place holder for contiguous memory */ | ||
544 | }, | ||
545 | }; | ||
546 | |||
547 | static struct platform_device spu0_device = { | ||
548 | .name = "uio_pdrv_genirq", | ||
549 | .id = 4, | ||
550 | .dev = { | ||
551 | .platform_data = &spu0_platform_data, | ||
552 | }, | ||
553 | .resource = spu0_resources, | ||
554 | .num_resources = ARRAY_SIZE(spu0_resources), | ||
555 | .archdata = { | ||
556 | .hwblk_id = HWBLK_SPU, | ||
557 | }, | ||
558 | }; | ||
559 | |||
560 | /* SPU2DSP1 */ | ||
561 | static struct uio_info spu1_platform_data = { | ||
562 | .name = "SPU2DSP1", | ||
563 | .version = "0", | ||
564 | .irq = 87, | ||
565 | }; | ||
566 | |||
567 | static struct resource spu1_resources[] = { | ||
568 | [0] = { | ||
569 | .name = "SPU2DSP1", | ||
570 | .start = 0xFE300000, | ||
571 | .end = 0xFE3FFFFF, | ||
572 | .flags = IORESOURCE_MEM, | ||
573 | }, | ||
574 | [1] = { | ||
575 | /* place holder for contiguous memory */ | ||
576 | }, | ||
577 | }; | ||
578 | |||
579 | static struct platform_device spu1_device = { | ||
580 | .name = "uio_pdrv_genirq", | ||
581 | .id = 5, | ||
582 | .dev = { | ||
583 | .platform_data = &spu1_platform_data, | ||
584 | }, | ||
585 | .resource = spu1_resources, | ||
586 | .num_resources = ARRAY_SIZE(spu1_resources), | ||
587 | .archdata = { | ||
588 | .hwblk_id = HWBLK_SPU, | ||
589 | }, | ||
590 | }; | ||
591 | |||
526 | static struct platform_device *sh7724_devices[] __initdata = { | 592 | static struct platform_device *sh7724_devices[] __initdata = { |
527 | &cmt_device, | 593 | &cmt_device, |
528 | &tmu0_device, | 594 | &tmu0_device, |
@@ -539,6 +605,8 @@ static struct platform_device *sh7724_devices[] __initdata = { | |||
539 | &veu0_device, | 605 | &veu0_device, |
540 | &veu1_device, | 606 | &veu1_device, |
541 | &jpu_device, | 607 | &jpu_device, |
608 | &spu0_device, | ||
609 | &spu1_device, | ||
542 | }; | 610 | }; |
543 | 611 | ||
544 | static int __init sh7724_devices_setup(void) | 612 | static int __init sh7724_devices_setup(void) |
@@ -547,6 +615,8 @@ static int __init sh7724_devices_setup(void) | |||
547 | platform_resource_setup_memory(&veu0_device, "veu0", 2 << 20); | 615 | platform_resource_setup_memory(&veu0_device, "veu0", 2 << 20); |
548 | platform_resource_setup_memory(&veu1_device, "veu1", 2 << 20); | 616 | platform_resource_setup_memory(&veu1_device, "veu1", 2 << 20); |
549 | platform_resource_setup_memory(&jpu_device, "jpu", 2 << 20); | 617 | platform_resource_setup_memory(&jpu_device, "jpu", 2 << 20); |
618 | platform_resource_setup_memory(&spu0_device, "spu0", 2 << 20); | ||
619 | platform_resource_setup_memory(&spu1_device, "spu1", 2 << 20); | ||
550 | 620 | ||
551 | return platform_add_devices(sh7724_devices, | 621 | return platform_add_devices(sh7724_devices, |
552 | ARRAY_SIZE(sh7724_devices)); | 622 | ARRAY_SIZE(sh7724_devices)); |
@@ -827,3 +897,193 @@ void __init plat_irq_setup(void) | |||
827 | { | 897 | { |
828 | register_intc_controller(&intc_desc); | 898 | register_intc_controller(&intc_desc); |
829 | } | 899 | } |
900 | |||
901 | static struct { | ||
902 | /* BSC */ | ||
903 | unsigned long mmselr; | ||
904 | unsigned long cs0bcr; | ||
905 | unsigned long cs4bcr; | ||
906 | unsigned long cs5abcr; | ||
907 | unsigned long cs5bbcr; | ||
908 | unsigned long cs6abcr; | ||
909 | unsigned long cs6bbcr; | ||
910 | unsigned long cs4wcr; | ||
911 | unsigned long cs5awcr; | ||
912 | unsigned long cs5bwcr; | ||
913 | unsigned long cs6awcr; | ||
914 | unsigned long cs6bwcr; | ||
915 | /* INTC */ | ||
916 | unsigned short ipra; | ||
917 | unsigned short iprb; | ||
918 | unsigned short iprc; | ||
919 | unsigned short iprd; | ||
920 | unsigned short ipre; | ||
921 | unsigned short iprf; | ||
922 | unsigned short iprg; | ||
923 | unsigned short iprh; | ||
924 | unsigned short ipri; | ||
925 | unsigned short iprj; | ||
926 | unsigned short iprk; | ||
927 | unsigned short iprl; | ||
928 | unsigned char imr0; | ||
929 | unsigned char imr1; | ||
930 | unsigned char imr2; | ||
931 | unsigned char imr3; | ||
932 | unsigned char imr4; | ||
933 | unsigned char imr5; | ||
934 | unsigned char imr6; | ||
935 | unsigned char imr7; | ||
936 | unsigned char imr8; | ||
937 | unsigned char imr9; | ||
938 | unsigned char imr10; | ||
939 | unsigned char imr11; | ||
940 | unsigned char imr12; | ||
941 | /* RWDT */ | ||
942 | unsigned short rwtcnt; | ||
943 | unsigned short rwtcsr; | ||
944 | /* CPG */ | ||
945 | unsigned long irdaclk; | ||
946 | unsigned long spuclk; | ||
947 | } sh7724_rstandby_state; | ||
948 | |||
949 | static int sh7724_pre_sleep_notifier_call(struct notifier_block *nb, | ||
950 | unsigned long flags, void *unused) | ||
951 | { | ||
952 | if (!(flags & SUSP_SH_RSTANDBY)) | ||
953 | return NOTIFY_DONE; | ||
954 | |||
955 | /* BCR */ | ||
956 | sh7724_rstandby_state.mmselr = __raw_readl(0xff800020); /* MMSELR */ | ||
957 | sh7724_rstandby_state.mmselr |= 0xa5a50000; | ||
958 | sh7724_rstandby_state.cs0bcr = __raw_readl(0xfec10004); /* CS0BCR */ | ||
959 | sh7724_rstandby_state.cs4bcr = __raw_readl(0xfec10010); /* CS4BCR */ | ||
960 | sh7724_rstandby_state.cs5abcr = __raw_readl(0xfec10014); /* CS5ABCR */ | ||
961 | sh7724_rstandby_state.cs5bbcr = __raw_readl(0xfec10018); /* CS5BBCR */ | ||
962 | sh7724_rstandby_state.cs6abcr = __raw_readl(0xfec1001c); /* CS6ABCR */ | ||
963 | sh7724_rstandby_state.cs6bbcr = __raw_readl(0xfec10020); /* CS6BBCR */ | ||
964 | sh7724_rstandby_state.cs4wcr = __raw_readl(0xfec10030); /* CS4WCR */ | ||
965 | sh7724_rstandby_state.cs5awcr = __raw_readl(0xfec10034); /* CS5AWCR */ | ||
966 | sh7724_rstandby_state.cs5bwcr = __raw_readl(0xfec10038); /* CS5BWCR */ | ||
967 | sh7724_rstandby_state.cs6awcr = __raw_readl(0xfec1003c); /* CS6AWCR */ | ||
968 | sh7724_rstandby_state.cs6bwcr = __raw_readl(0xfec10040); /* CS6BWCR */ | ||
969 | |||
970 | /* INTC */ | ||
971 | sh7724_rstandby_state.ipra = __raw_readw(0xa4080000); /* IPRA */ | ||
972 | sh7724_rstandby_state.iprb = __raw_readw(0xa4080004); /* IPRB */ | ||
973 | sh7724_rstandby_state.iprc = __raw_readw(0xa4080008); /* IPRC */ | ||
974 | sh7724_rstandby_state.iprd = __raw_readw(0xa408000c); /* IPRD */ | ||
975 | sh7724_rstandby_state.ipre = __raw_readw(0xa4080010); /* IPRE */ | ||
976 | sh7724_rstandby_state.iprf = __raw_readw(0xa4080014); /* IPRF */ | ||
977 | sh7724_rstandby_state.iprg = __raw_readw(0xa4080018); /* IPRG */ | ||
978 | sh7724_rstandby_state.iprh = __raw_readw(0xa408001c); /* IPRH */ | ||
979 | sh7724_rstandby_state.ipri = __raw_readw(0xa4080020); /* IPRI */ | ||
980 | sh7724_rstandby_state.iprj = __raw_readw(0xa4080024); /* IPRJ */ | ||
981 | sh7724_rstandby_state.iprk = __raw_readw(0xa4080028); /* IPRK */ | ||
982 | sh7724_rstandby_state.iprl = __raw_readw(0xa408002c); /* IPRL */ | ||
983 | sh7724_rstandby_state.imr0 = __raw_readb(0xa4080080); /* IMR0 */ | ||
984 | sh7724_rstandby_state.imr1 = __raw_readb(0xa4080084); /* IMR1 */ | ||
985 | sh7724_rstandby_state.imr2 = __raw_readb(0xa4080088); /* IMR2 */ | ||
986 | sh7724_rstandby_state.imr3 = __raw_readb(0xa408008c); /* IMR3 */ | ||
987 | sh7724_rstandby_state.imr4 = __raw_readb(0xa4080090); /* IMR4 */ | ||
988 | sh7724_rstandby_state.imr5 = __raw_readb(0xa4080094); /* IMR5 */ | ||
989 | sh7724_rstandby_state.imr6 = __raw_readb(0xa4080098); /* IMR6 */ | ||
990 | sh7724_rstandby_state.imr7 = __raw_readb(0xa408009c); /* IMR7 */ | ||
991 | sh7724_rstandby_state.imr8 = __raw_readb(0xa40800a0); /* IMR8 */ | ||
992 | sh7724_rstandby_state.imr9 = __raw_readb(0xa40800a4); /* IMR9 */ | ||
993 | sh7724_rstandby_state.imr10 = __raw_readb(0xa40800a8); /* IMR10 */ | ||
994 | sh7724_rstandby_state.imr11 = __raw_readb(0xa40800ac); /* IMR11 */ | ||
995 | sh7724_rstandby_state.imr12 = __raw_readb(0xa40800b0); /* IMR12 */ | ||
996 | |||
997 | /* RWDT */ | ||
998 | sh7724_rstandby_state.rwtcnt = __raw_readb(0xa4520000); /* RWTCNT */ | ||
999 | sh7724_rstandby_state.rwtcnt |= 0x5a00; | ||
1000 | sh7724_rstandby_state.rwtcsr = __raw_readb(0xa4520004); /* RWTCSR */ | ||
1001 | sh7724_rstandby_state.rwtcsr |= 0xa500; | ||
1002 | __raw_writew(sh7724_rstandby_state.rwtcsr & 0x07, 0xa4520004); | ||
1003 | |||
1004 | /* CPG */ | ||
1005 | sh7724_rstandby_state.irdaclk = __raw_readl(0xa4150018); /* IRDACLKCR */ | ||
1006 | sh7724_rstandby_state.spuclk = __raw_readl(0xa415003c); /* SPUCLKCR */ | ||
1007 | |||
1008 | return NOTIFY_DONE; | ||
1009 | } | ||
1010 | |||
1011 | static int sh7724_post_sleep_notifier_call(struct notifier_block *nb, | ||
1012 | unsigned long flags, void *unused) | ||
1013 | { | ||
1014 | if (!(flags & SUSP_SH_RSTANDBY)) | ||
1015 | return NOTIFY_DONE; | ||
1016 | |||
1017 | /* BCR */ | ||
1018 | __raw_writel(sh7724_rstandby_state.mmselr, 0xff800020); /* MMSELR */ | ||
1019 | __raw_writel(sh7724_rstandby_state.cs0bcr, 0xfec10004); /* CS0BCR */ | ||
1020 | __raw_writel(sh7724_rstandby_state.cs4bcr, 0xfec10010); /* CS4BCR */ | ||
1021 | __raw_writel(sh7724_rstandby_state.cs5abcr, 0xfec10014); /* CS5ABCR */ | ||
1022 | __raw_writel(sh7724_rstandby_state.cs5bbcr, 0xfec10018); /* CS5BBCR */ | ||
1023 | __raw_writel(sh7724_rstandby_state.cs6abcr, 0xfec1001c); /* CS6ABCR */ | ||
1024 | __raw_writel(sh7724_rstandby_state.cs6bbcr, 0xfec10020); /* CS6BBCR */ | ||
1025 | __raw_writel(sh7724_rstandby_state.cs4wcr, 0xfec10030); /* CS4WCR */ | ||
1026 | __raw_writel(sh7724_rstandby_state.cs5awcr, 0xfec10034); /* CS5AWCR */ | ||
1027 | __raw_writel(sh7724_rstandby_state.cs5bwcr, 0xfec10038); /* CS5BWCR */ | ||
1028 | __raw_writel(sh7724_rstandby_state.cs6awcr, 0xfec1003c); /* CS6AWCR */ | ||
1029 | __raw_writel(sh7724_rstandby_state.cs6bwcr, 0xfec10040); /* CS6BWCR */ | ||
1030 | |||
1031 | /* INTC */ | ||
1032 | __raw_writew(sh7724_rstandby_state.ipra, 0xa4080000); /* IPRA */ | ||
1033 | __raw_writew(sh7724_rstandby_state.iprb, 0xa4080004); /* IPRB */ | ||
1034 | __raw_writew(sh7724_rstandby_state.iprc, 0xa4080008); /* IPRC */ | ||
1035 | __raw_writew(sh7724_rstandby_state.iprd, 0xa408000c); /* IPRD */ | ||
1036 | __raw_writew(sh7724_rstandby_state.ipre, 0xa4080010); /* IPRE */ | ||
1037 | __raw_writew(sh7724_rstandby_state.iprf, 0xa4080014); /* IPRF */ | ||
1038 | __raw_writew(sh7724_rstandby_state.iprg, 0xa4080018); /* IPRG */ | ||
1039 | __raw_writew(sh7724_rstandby_state.iprh, 0xa408001c); /* IPRH */ | ||
1040 | __raw_writew(sh7724_rstandby_state.ipri, 0xa4080020); /* IPRI */ | ||
1041 | __raw_writew(sh7724_rstandby_state.iprj, 0xa4080024); /* IPRJ */ | ||
1042 | __raw_writew(sh7724_rstandby_state.iprk, 0xa4080028); /* IPRK */ | ||
1043 | __raw_writew(sh7724_rstandby_state.iprl, 0xa408002c); /* IPRL */ | ||
1044 | __raw_writeb(sh7724_rstandby_state.imr0, 0xa4080080); /* IMR0 */ | ||
1045 | __raw_writeb(sh7724_rstandby_state.imr1, 0xa4080084); /* IMR1 */ | ||
1046 | __raw_writeb(sh7724_rstandby_state.imr2, 0xa4080088); /* IMR2 */ | ||
1047 | __raw_writeb(sh7724_rstandby_state.imr3, 0xa408008c); /* IMR3 */ | ||
1048 | __raw_writeb(sh7724_rstandby_state.imr4, 0xa4080090); /* IMR4 */ | ||
1049 | __raw_writeb(sh7724_rstandby_state.imr5, 0xa4080094); /* IMR5 */ | ||
1050 | __raw_writeb(sh7724_rstandby_state.imr6, 0xa4080098); /* IMR6 */ | ||
1051 | __raw_writeb(sh7724_rstandby_state.imr7, 0xa408009c); /* IMR7 */ | ||
1052 | __raw_writeb(sh7724_rstandby_state.imr8, 0xa40800a0); /* IMR8 */ | ||
1053 | __raw_writeb(sh7724_rstandby_state.imr9, 0xa40800a4); /* IMR9 */ | ||
1054 | __raw_writeb(sh7724_rstandby_state.imr10, 0xa40800a8); /* IMR10 */ | ||
1055 | __raw_writeb(sh7724_rstandby_state.imr11, 0xa40800ac); /* IMR11 */ | ||
1056 | __raw_writeb(sh7724_rstandby_state.imr12, 0xa40800b0); /* IMR12 */ | ||
1057 | |||
1058 | /* RWDT */ | ||
1059 | __raw_writew(sh7724_rstandby_state.rwtcnt, 0xa4520000); /* RWTCNT */ | ||
1060 | __raw_writew(sh7724_rstandby_state.rwtcsr, 0xa4520004); /* RWTCSR */ | ||
1061 | |||
1062 | /* CPG */ | ||
1063 | __raw_writel(sh7724_rstandby_state.irdaclk, 0xa4150018); /* IRDACLKCR */ | ||
1064 | __raw_writel(sh7724_rstandby_state.spuclk, 0xa415003c); /* SPUCLKCR */ | ||
1065 | |||
1066 | return NOTIFY_DONE; | ||
1067 | } | ||
1068 | |||
1069 | static struct notifier_block sh7724_pre_sleep_notifier = { | ||
1070 | .notifier_call = sh7724_pre_sleep_notifier_call, | ||
1071 | .priority = SH_MOBILE_PRE(SH_MOBILE_SLEEP_CPU), | ||
1072 | }; | ||
1073 | |||
1074 | static struct notifier_block sh7724_post_sleep_notifier = { | ||
1075 | .notifier_call = sh7724_post_sleep_notifier_call, | ||
1076 | .priority = SH_MOBILE_POST(SH_MOBILE_SLEEP_CPU), | ||
1077 | }; | ||
1078 | |||
1079 | static int __init sh7724_sleep_setup(void) | ||
1080 | { | ||
1081 | atomic_notifier_chain_register(&sh_mobile_pre_sleep_notifier_list, | ||
1082 | &sh7724_pre_sleep_notifier); | ||
1083 | |||
1084 | atomic_notifier_chain_register(&sh_mobile_post_sleep_notifier_list, | ||
1085 | &sh7724_post_sleep_notifier); | ||
1086 | return 0; | ||
1087 | } | ||
1088 | arch_initcall(sh7724_sleep_setup); | ||
1089 | |||
diff --git a/arch/sh/kernel/cpu/sh4a/setup-shx3.c b/arch/sh/kernel/cpu/sh4a/setup-shx3.c index e848443deeb..c7ba9166e18 100644 --- a/arch/sh/kernel/cpu/sh4a/setup-shx3.c +++ b/arch/sh/kernel/cpu/sh4a/setup-shx3.c | |||
@@ -15,6 +15,15 @@ | |||
15 | #include <linux/sh_timer.h> | 15 | #include <linux/sh_timer.h> |
16 | #include <asm/mmzone.h> | 16 | #include <asm/mmzone.h> |
17 | 17 | ||
18 | /* | ||
19 | * This intentionally only registers SCIF ports 0, 1, and 3. SCIF 2 | ||
20 | * INTEVT values overlap with the FPU EXPEVT ones, requiring special | ||
21 | * demuxing in the exception dispatch path. | ||
22 | * | ||
23 | * As this overlap is something that never should have made it in to | ||
24 | * silicon in the first place, we just refuse to deal with the port at | ||
25 | * all rather than adding infrastructure to hack around it. | ||
26 | */ | ||
18 | static struct plat_sci_port sci_platform_data[] = { | 27 | static struct plat_sci_port sci_platform_data[] = { |
19 | { | 28 | { |
20 | .mapbase = 0xffc30000, | 29 | .mapbase = 0xffc30000, |
@@ -27,11 +36,6 @@ static struct plat_sci_port sci_platform_data[] = { | |||
27 | .type = PORT_SCIF, | 36 | .type = PORT_SCIF, |
28 | .irqs = { 44, 45, 47, 46 }, | 37 | .irqs = { 44, 45, 47, 46 }, |
29 | }, { | 38 | }, { |
30 | .mapbase = 0xffc50000, | ||
31 | .flags = UPF_BOOT_AUTOCONF, | ||
32 | .type = PORT_SCIF, | ||
33 | .irqs = { 48, 49, 51, 50 }, | ||
34 | }, { | ||
35 | .mapbase = 0xffc60000, | 39 | .mapbase = 0xffc60000, |
36 | .flags = UPF_BOOT_AUTOCONF, | 40 | .flags = UPF_BOOT_AUTOCONF, |
37 | .type = PORT_SCIF, | 41 | .type = PORT_SCIF, |
@@ -268,7 +272,11 @@ enum { | |||
268 | UNUSED = 0, | 272 | UNUSED = 0, |
269 | 273 | ||
270 | /* interrupt sources */ | 274 | /* interrupt sources */ |
271 | IRL, IRQ0, IRQ1, IRQ2, IRQ3, | 275 | IRL_LLLL, IRL_LLLH, IRL_LLHL, IRL_LLHH, |
276 | IRL_LHLL, IRL_LHLH, IRL_LHHL, IRL_LHHH, | ||
277 | IRL_HLLL, IRL_HLLH, IRL_HLHL, IRL_HLHH, | ||
278 | IRL_HHLL, IRL_HHLH, IRL_HHHL, | ||
279 | IRQ0, IRQ1, IRQ2, IRQ3, | ||
272 | HUDII, | 280 | HUDII, |
273 | TMU0, TMU1, TMU2, TMU3, TMU4, TMU5, | 281 | TMU0, TMU1, TMU2, TMU3, TMU4, TMU5, |
274 | PCII0, PCII1, PCII2, PCII3, PCII4, | 282 | PCII0, PCII1, PCII2, PCII3, PCII4, |
@@ -291,7 +299,7 @@ enum { | |||
291 | INTICI4, INTICI5, INTICI6, INTICI7, | 299 | INTICI4, INTICI5, INTICI6, INTICI7, |
292 | 300 | ||
293 | /* interrupt groups */ | 301 | /* interrupt groups */ |
294 | PCII56789, SCIF0, SCIF1, SCIF2, SCIF3, | 302 | IRL, PCII56789, SCIF0, SCIF1, SCIF2, SCIF3, |
295 | DMAC0, DMAC1, | 303 | DMAC0, DMAC1, |
296 | }; | 304 | }; |
297 | 305 | ||
@@ -309,8 +317,6 @@ static struct intc_vect vectors[] __initdata = { | |||
309 | INTC_VECT(SCIF0_BRI, 0x740), INTC_VECT(SCIF0_TXI, 0x760), | 317 | INTC_VECT(SCIF0_BRI, 0x740), INTC_VECT(SCIF0_TXI, 0x760), |
310 | INTC_VECT(SCIF1_ERI, 0x780), INTC_VECT(SCIF1_RXI, 0x7a0), | 318 | INTC_VECT(SCIF1_ERI, 0x780), INTC_VECT(SCIF1_RXI, 0x7a0), |
311 | INTC_VECT(SCIF1_BRI, 0x7c0), INTC_VECT(SCIF1_TXI, 0x7e0), | 319 | INTC_VECT(SCIF1_BRI, 0x7c0), INTC_VECT(SCIF1_TXI, 0x7e0), |
312 | INTC_VECT(SCIF2_ERI, 0x800), INTC_VECT(SCIF2_RXI, 0x820), | ||
313 | INTC_VECT(SCIF2_BRI, 0x840), INTC_VECT(SCIF2_TXI, 0x860), | ||
314 | INTC_VECT(SCIF3_ERI, 0x880), INTC_VECT(SCIF3_RXI, 0x8a0), | 320 | INTC_VECT(SCIF3_ERI, 0x880), INTC_VECT(SCIF3_RXI, 0x8a0), |
315 | INTC_VECT(SCIF3_BRI, 0x8c0), INTC_VECT(SCIF3_TXI, 0x8e0), | 321 | INTC_VECT(SCIF3_BRI, 0x8c0), INTC_VECT(SCIF3_TXI, 0x8e0), |
316 | INTC_VECT(DMAC0_DMINT0, 0x900), INTC_VECT(DMAC0_DMINT1, 0x920), | 322 | INTC_VECT(DMAC0_DMINT0, 0x900), INTC_VECT(DMAC0_DMINT1, 0x920), |
@@ -344,10 +350,13 @@ static struct intc_vect vectors[] __initdata = { | |||
344 | }; | 350 | }; |
345 | 351 | ||
346 | static struct intc_group groups[] __initdata = { | 352 | static struct intc_group groups[] __initdata = { |
353 | INTC_GROUP(IRL, IRL_LLLL, IRL_LLLH, IRL_LLHL, IRL_LLHH, | ||
354 | IRL_LHLL, IRL_LHLH, IRL_LHHL, IRL_LHHH, | ||
355 | IRL_HLLL, IRL_HLLH, IRL_HLHL, IRL_HLHH, | ||
356 | IRL_HHLL, IRL_HHLH, IRL_HHHL), | ||
347 | INTC_GROUP(PCII56789, PCII5, PCII6, PCII7, PCII8, PCII9), | 357 | INTC_GROUP(PCII56789, PCII5, PCII6, PCII7, PCII8, PCII9), |
348 | INTC_GROUP(SCIF0, SCIF0_ERI, SCIF0_RXI, SCIF0_BRI, SCIF0_TXI), | 358 | INTC_GROUP(SCIF0, SCIF0_ERI, SCIF0_RXI, SCIF0_BRI, SCIF0_TXI), |
349 | INTC_GROUP(SCIF1, SCIF1_ERI, SCIF1_RXI, SCIF1_BRI, SCIF1_TXI), | 359 | INTC_GROUP(SCIF1, SCIF1_ERI, SCIF1_RXI, SCIF1_BRI, SCIF1_TXI), |
350 | INTC_GROUP(SCIF2, SCIF2_ERI, SCIF2_RXI, SCIF2_BRI, SCIF2_TXI), | ||
351 | INTC_GROUP(SCIF3, SCIF3_ERI, SCIF3_RXI, SCIF3_BRI, SCIF3_TXI), | 360 | INTC_GROUP(SCIF3, SCIF3_ERI, SCIF3_RXI, SCIF3_BRI, SCIF3_TXI), |
352 | INTC_GROUP(DMAC0, DMAC0_DMINT0, DMAC0_DMINT1, DMAC0_DMINT2, | 361 | INTC_GROUP(DMAC0, DMAC0_DMINT0, DMAC0_DMINT1, DMAC0_DMINT2, |
353 | DMAC0_DMINT3, DMAC0_DMINT4, DMAC0_DMINT5, DMAC0_DMAE), | 362 | DMAC0_DMINT3, DMAC0_DMINT4, DMAC0_DMINT5, DMAC0_DMAE), |
@@ -419,14 +428,14 @@ static DECLARE_INTC_DESC(intc_desc_irq, "shx3-irq", vectors_irq, groups, | |||
419 | 428 | ||
420 | /* External interrupt pins in IRL mode */ | 429 | /* External interrupt pins in IRL mode */ |
421 | static struct intc_vect vectors_irl[] __initdata = { | 430 | static struct intc_vect vectors_irl[] __initdata = { |
422 | INTC_VECT(IRL, 0x200), INTC_VECT(IRL, 0x220), | 431 | INTC_VECT(IRL_LLLL, 0x200), INTC_VECT(IRL_LLLH, 0x220), |
423 | INTC_VECT(IRL, 0x240), INTC_VECT(IRL, 0x260), | 432 | INTC_VECT(IRL_LLHL, 0x240), INTC_VECT(IRL_LLHH, 0x260), |
424 | INTC_VECT(IRL, 0x280), INTC_VECT(IRL, 0x2a0), | 433 | INTC_VECT(IRL_LHLL, 0x280), INTC_VECT(IRL_LHLH, 0x2a0), |
425 | INTC_VECT(IRL, 0x2c0), INTC_VECT(IRL, 0x2e0), | 434 | INTC_VECT(IRL_LHHL, 0x2c0), INTC_VECT(IRL_LHHH, 0x2e0), |
426 | INTC_VECT(IRL, 0x300), INTC_VECT(IRL, 0x320), | 435 | INTC_VECT(IRL_HLLL, 0x300), INTC_VECT(IRL_HLLH, 0x320), |
427 | INTC_VECT(IRL, 0x340), INTC_VECT(IRL, 0x360), | 436 | INTC_VECT(IRL_HLHL, 0x340), INTC_VECT(IRL_HLHH, 0x360), |
428 | INTC_VECT(IRL, 0x380), INTC_VECT(IRL, 0x3a0), | 437 | INTC_VECT(IRL_HHLL, 0x380), INTC_VECT(IRL_HHLH, 0x3a0), |
429 | INTC_VECT(IRL, 0x3c0), | 438 | INTC_VECT(IRL_HHHL, 0x3c0), |
430 | }; | 439 | }; |
431 | 440 | ||
432 | static DECLARE_INTC_DESC(intc_desc_irl, "shx3-irl", vectors_irl, groups, | 441 | static DECLARE_INTC_DESC(intc_desc_irl, "shx3-irl", vectors_irl, groups, |
diff --git a/arch/sh/kernel/cpu/sh4a/smp-shx3.c b/arch/sh/kernel/cpu/sh4a/smp-shx3.c index 185ec3976a2..5863e0c4d02 100644 --- a/arch/sh/kernel/cpu/sh4a/smp-shx3.c +++ b/arch/sh/kernel/cpu/sh4a/smp-shx3.c | |||
@@ -14,6 +14,13 @@ | |||
14 | #include <linux/interrupt.h> | 14 | #include <linux/interrupt.h> |
15 | #include <linux/io.h> | 15 | #include <linux/io.h> |
16 | 16 | ||
17 | #define STBCR_REG(phys_id) (0xfe400004 | (phys_id << 12)) | ||
18 | #define RESET_REG(phys_id) (0xfe400008 | (phys_id << 12)) | ||
19 | |||
20 | #define STBCR_MSTP 0x00000001 | ||
21 | #define STBCR_RESET 0x00000002 | ||
22 | #define STBCR_LTSLP 0x80000000 | ||
23 | |||
17 | static irqreturn_t ipi_interrupt_handler(int irq, void *arg) | 24 | static irqreturn_t ipi_interrupt_handler(int irq, void *arg) |
18 | { | 25 | { |
19 | unsigned int message = (unsigned int)(long)arg; | 26 | unsigned int message = (unsigned int)(long)arg; |
@@ -21,9 +28,9 @@ static irqreturn_t ipi_interrupt_handler(int irq, void *arg) | |||
21 | unsigned int offs = 4 * cpu; | 28 | unsigned int offs = 4 * cpu; |
22 | unsigned int x; | 29 | unsigned int x; |
23 | 30 | ||
24 | x = ctrl_inl(0xfe410070 + offs); /* C0INITICI..CnINTICI */ | 31 | x = __raw_readl(0xfe410070 + offs); /* C0INITICI..CnINTICI */ |
25 | x &= (1 << (message << 2)); | 32 | x &= (1 << (message << 2)); |
26 | ctrl_outl(x, 0xfe410080 + offs); /* C0INTICICLR..CnINTICICLR */ | 33 | __raw_writel(x, 0xfe410080 + offs); /* C0INTICICLR..CnINTICICLR */ |
27 | 34 | ||
28 | smp_message_recv(message); | 35 | smp_message_recv(message); |
29 | 36 | ||
@@ -37,6 +44,9 @@ void __init plat_smp_setup(void) | |||
37 | 44 | ||
38 | init_cpu_possible(cpumask_of(cpu)); | 45 | init_cpu_possible(cpumask_of(cpu)); |
39 | 46 | ||
47 | /* Enable light sleep for the boot CPU */ | ||
48 | __raw_writel(__raw_readl(STBCR_REG(cpu)) | STBCR_LTSLP, STBCR_REG(cpu)); | ||
49 | |||
40 | __cpu_number_map[0] = 0; | 50 | __cpu_number_map[0] = 0; |
41 | __cpu_logical_map[0] = 0; | 51 | __cpu_logical_map[0] = 0; |
42 | 52 | ||
@@ -66,32 +76,23 @@ void __init plat_prepare_cpus(unsigned int max_cpus) | |||
66 | "IPI", (void *)(long)i); | 76 | "IPI", (void *)(long)i); |
67 | } | 77 | } |
68 | 78 | ||
69 | #define STBCR_REG(phys_id) (0xfe400004 | (phys_id << 12)) | ||
70 | #define RESET_REG(phys_id) (0xfe400008 | (phys_id << 12)) | ||
71 | |||
72 | #define STBCR_MSTP 0x00000001 | ||
73 | #define STBCR_RESET 0x00000002 | ||
74 | #define STBCR_LTSLP 0x80000000 | ||
75 | |||
76 | #define STBCR_AP_VAL (STBCR_RESET | STBCR_LTSLP) | ||
77 | |||
78 | void plat_start_cpu(unsigned int cpu, unsigned long entry_point) | 79 | void plat_start_cpu(unsigned int cpu, unsigned long entry_point) |
79 | { | 80 | { |
80 | ctrl_outl(entry_point, RESET_REG(cpu)); | 81 | __raw_writel(entry_point, RESET_REG(cpu)); |
81 | 82 | ||
82 | if (!(ctrl_inl(STBCR_REG(cpu)) & STBCR_MSTP)) | 83 | if (!(__raw_readl(STBCR_REG(cpu)) & STBCR_MSTP)) |
83 | ctrl_outl(STBCR_MSTP, STBCR_REG(cpu)); | 84 | __raw_writel(STBCR_MSTP, STBCR_REG(cpu)); |
84 | 85 | ||
85 | while (!(ctrl_inl(STBCR_REG(cpu)) & STBCR_MSTP)) | 86 | while (!(__raw_readl(STBCR_REG(cpu)) & STBCR_MSTP)) |
86 | cpu_relax(); | 87 | cpu_relax(); |
87 | 88 | ||
88 | /* Start up secondary processor by sending a reset */ | 89 | /* Start up secondary processor by sending a reset */ |
89 | ctrl_outl(STBCR_AP_VAL, STBCR_REG(cpu)); | 90 | __raw_writel(STBCR_RESET | STBCR_LTSLP, STBCR_REG(cpu)); |
90 | } | 91 | } |
91 | 92 | ||
92 | int plat_smp_processor_id(void) | 93 | int plat_smp_processor_id(void) |
93 | { | 94 | { |
94 | return ctrl_inl(0xff000048); /* CPIDR */ | 95 | return __raw_readl(0xff000048); /* CPIDR */ |
95 | } | 96 | } |
96 | 97 | ||
97 | void plat_send_ipi(unsigned int cpu, unsigned int message) | 98 | void plat_send_ipi(unsigned int cpu, unsigned int message) |
@@ -100,5 +101,5 @@ void plat_send_ipi(unsigned int cpu, unsigned int message) | |||
100 | 101 | ||
101 | BUG_ON(cpu >= 4); | 102 | BUG_ON(cpu >= 4); |
102 | 103 | ||
103 | ctrl_outl(1 << (message << 2), addr); /* C0INTICI..CnINTICI */ | 104 | __raw_writel(1 << (message << 2), addr); /* C0INTICI..CnINTICI */ |
104 | } | 105 | } |
diff --git a/arch/sh/kernel/cpu/sh5/entry.S b/arch/sh/kernel/cpu/sh5/entry.S index b0aacf67525..8f13f73cb2c 100644 --- a/arch/sh/kernel/cpu/sh5/entry.S +++ b/arch/sh/kernel/cpu/sh5/entry.S | |||
@@ -933,7 +933,7 @@ ret_with_reschedule: | |||
933 | 933 | ||
934 | pta restore_all, tr1 | 934 | pta restore_all, tr1 |
935 | 935 | ||
936 | movi (_TIF_SIGPENDING | _TIF_RESTORE_SIGMASK), r8 | 936 | movi _TIF_SIGPENDING, r8 |
937 | and r8, r7, r8 | 937 | and r8, r7, r8 |
938 | pta work_notifysig, tr0 | 938 | pta work_notifysig, tr0 |
939 | bne r8, ZERO, tr0 | 939 | bne r8, ZERO, tr0 |
diff --git a/arch/sh/kernel/cpu/shmobile/cpuidle.c b/arch/sh/kernel/cpu/shmobile/cpuidle.c index 1c504bd972c..83972aa319c 100644 --- a/arch/sh/kernel/cpu/shmobile/cpuidle.c +++ b/arch/sh/kernel/cpu/shmobile/cpuidle.c | |||
@@ -87,25 +87,31 @@ void sh_mobile_setup_cpuidle(void) | |||
87 | 87 | ||
88 | dev->safe_state = state; | 88 | dev->safe_state = state; |
89 | 89 | ||
90 | state = &dev->states[i++]; | 90 | if (sh_mobile_sleep_supported & SUSP_SH_SF) { |
91 | snprintf(state->name, CPUIDLE_NAME_LEN, "C1"); | 91 | state = &dev->states[i++]; |
92 | strncpy(state->desc, "SuperH Sleep Mode [SF]", CPUIDLE_DESC_LEN); | 92 | snprintf(state->name, CPUIDLE_NAME_LEN, "C1"); |
93 | state->exit_latency = 100; | 93 | strncpy(state->desc, "SuperH Sleep Mode [SF]", |
94 | state->target_residency = 1 * 2; | 94 | CPUIDLE_DESC_LEN); |
95 | state->power_usage = 1; | 95 | state->exit_latency = 100; |
96 | state->flags = 0; | 96 | state->target_residency = 1 * 2; |
97 | state->flags |= CPUIDLE_FLAG_TIME_VALID; | 97 | state->power_usage = 1; |
98 | state->enter = cpuidle_sleep_enter; | 98 | state->flags = 0; |
99 | state->flags |= CPUIDLE_FLAG_TIME_VALID; | ||
100 | state->enter = cpuidle_sleep_enter; | ||
101 | } | ||
99 | 102 | ||
100 | state = &dev->states[i++]; | 103 | if (sh_mobile_sleep_supported & SUSP_SH_STANDBY) { |
101 | snprintf(state->name, CPUIDLE_NAME_LEN, "C2"); | 104 | state = &dev->states[i++]; |
102 | strncpy(state->desc, "SuperH Mobile Standby Mode [SF]", CPUIDLE_DESC_LEN); | 105 | snprintf(state->name, CPUIDLE_NAME_LEN, "C2"); |
103 | state->exit_latency = 2300; | 106 | strncpy(state->desc, "SuperH Mobile Standby Mode [SF]", |
104 | state->target_residency = 1 * 2; | 107 | CPUIDLE_DESC_LEN); |
105 | state->power_usage = 1; | 108 | state->exit_latency = 2300; |
106 | state->flags = 0; | 109 | state->target_residency = 1 * 2; |
107 | state->flags |= CPUIDLE_FLAG_TIME_VALID; | 110 | state->power_usage = 1; |
108 | state->enter = cpuidle_sleep_enter; | 111 | state->flags = 0; |
112 | state->flags |= CPUIDLE_FLAG_TIME_VALID; | ||
113 | state->enter = cpuidle_sleep_enter; | ||
114 | } | ||
109 | 115 | ||
110 | dev->state_count = i; | 116 | dev->state_count = i; |
111 | 117 | ||
diff --git a/arch/sh/kernel/cpu/shmobile/pm.c b/arch/sh/kernel/cpu/shmobile/pm.c index ee3c2aaf66f..ca029a44743 100644 --- a/arch/sh/kernel/cpu/shmobile/pm.c +++ b/arch/sh/kernel/cpu/shmobile/pm.c | |||
@@ -15,6 +15,13 @@ | |||
15 | #include <linux/suspend.h> | 15 | #include <linux/suspend.h> |
16 | #include <asm/suspend.h> | 16 | #include <asm/suspend.h> |
17 | #include <asm/uaccess.h> | 17 | #include <asm/uaccess.h> |
18 | #include <asm/cacheflush.h> | ||
19 | |||
20 | /* | ||
21 | * Notifier lists for pre/post sleep notification | ||
22 | */ | ||
23 | ATOMIC_NOTIFIER_HEAD(sh_mobile_pre_sleep_notifier_list); | ||
24 | ATOMIC_NOTIFIER_HEAD(sh_mobile_post_sleep_notifier_list); | ||
18 | 25 | ||
19 | /* | 26 | /* |
20 | * Sleep modes available on SuperH Mobile: | 27 | * Sleep modes available on SuperH Mobile: |
@@ -26,30 +33,105 @@ | |||
26 | #define SUSP_MODE_SLEEP (SUSP_SH_SLEEP) | 33 | #define SUSP_MODE_SLEEP (SUSP_SH_SLEEP) |
27 | #define SUSP_MODE_SLEEP_SF (SUSP_SH_SLEEP | SUSP_SH_SF) | 34 | #define SUSP_MODE_SLEEP_SF (SUSP_SH_SLEEP | SUSP_SH_SF) |
28 | #define SUSP_MODE_STANDBY_SF (SUSP_SH_STANDBY | SUSP_SH_SF) | 35 | #define SUSP_MODE_STANDBY_SF (SUSP_SH_STANDBY | SUSP_SH_SF) |
36 | #define SUSP_MODE_RSTANDBY (SUSP_SH_RSTANDBY | SUSP_SH_MMU | SUSP_SH_SF) | ||
37 | /* | ||
38 | * U-standby mode is unsupported since it needs bootloader hacks | ||
39 | */ | ||
29 | 40 | ||
30 | /* | 41 | #ifdef CONFIG_CPU_SUBTYPE_SH7724 |
31 | * The following modes are not there yet: | 42 | #define RAM_BASE 0xfd800000 /* RSMEM */ |
32 | * | 43 | #else |
33 | * R-standby mode is unsupported, but will be added in the future | 44 | #define RAM_BASE 0xe5200000 /* ILRAM */ |
34 | * U-standby mode is low priority since it needs bootloader hacks | 45 | #endif |
35 | */ | ||
36 | |||
37 | #define ILRAM_BASE 0xe5200000 | ||
38 | |||
39 | extern const unsigned char sh_mobile_standby[]; | ||
40 | extern const unsigned int sh_mobile_standby_size; | ||
41 | 46 | ||
42 | void sh_mobile_call_standby(unsigned long mode) | 47 | void sh_mobile_call_standby(unsigned long mode) |
43 | { | 48 | { |
44 | void *onchip_mem = (void *)ILRAM_BASE; | 49 | void *onchip_mem = (void *)RAM_BASE; |
45 | void (*standby_onchip_mem)(unsigned long, unsigned long) = onchip_mem; | 50 | struct sh_sleep_data *sdp = onchip_mem; |
51 | void (*standby_onchip_mem)(unsigned long, unsigned long); | ||
52 | |||
53 | /* code located directly after data structure */ | ||
54 | standby_onchip_mem = (void *)(sdp + 1); | ||
55 | |||
56 | atomic_notifier_call_chain(&sh_mobile_pre_sleep_notifier_list, | ||
57 | mode, NULL); | ||
58 | |||
59 | /* flush the caches if MMU flag is set */ | ||
60 | if (mode & SUSP_SH_MMU) | ||
61 | flush_cache_all(); | ||
46 | 62 | ||
47 | /* Let assembly snippet in on-chip memory handle the rest */ | 63 | /* Let assembly snippet in on-chip memory handle the rest */ |
48 | standby_onchip_mem(mode, ILRAM_BASE); | 64 | standby_onchip_mem(mode, RAM_BASE); |
65 | |||
66 | atomic_notifier_call_chain(&sh_mobile_post_sleep_notifier_list, | ||
67 | mode, NULL); | ||
68 | } | ||
69 | |||
70 | extern char sh_mobile_sleep_enter_start; | ||
71 | extern char sh_mobile_sleep_enter_end; | ||
72 | |||
73 | extern char sh_mobile_sleep_resume_start; | ||
74 | extern char sh_mobile_sleep_resume_end; | ||
75 | |||
76 | unsigned long sh_mobile_sleep_supported = SUSP_SH_SLEEP; | ||
77 | |||
78 | void sh_mobile_register_self_refresh(unsigned long flags, | ||
79 | void *pre_start, void *pre_end, | ||
80 | void *post_start, void *post_end) | ||
81 | { | ||
82 | void *onchip_mem = (void *)RAM_BASE; | ||
83 | void *vp; | ||
84 | struct sh_sleep_data *sdp; | ||
85 | int n; | ||
86 | |||
87 | /* part 0: data area */ | ||
88 | sdp = onchip_mem; | ||
89 | sdp->addr.stbcr = 0xa4150020; /* STBCR */ | ||
90 | sdp->addr.bar = 0xa4150040; /* BAR */ | ||
91 | sdp->addr.pteh = 0xff000000; /* PTEH */ | ||
92 | sdp->addr.ptel = 0xff000004; /* PTEL */ | ||
93 | sdp->addr.ttb = 0xff000008; /* TTB */ | ||
94 | sdp->addr.tea = 0xff00000c; /* TEA */ | ||
95 | sdp->addr.mmucr = 0xff000010; /* MMUCR */ | ||
96 | sdp->addr.ptea = 0xff000034; /* PTEA */ | ||
97 | sdp->addr.pascr = 0xff000070; /* PASCR */ | ||
98 | sdp->addr.irmcr = 0xff000078; /* IRMCR */ | ||
99 | sdp->addr.ccr = 0xff00001c; /* CCR */ | ||
100 | sdp->addr.ramcr = 0xff000074; /* RAMCR */ | ||
101 | vp = sdp + 1; | ||
102 | |||
103 | /* part 1: common code to enter sleep mode */ | ||
104 | n = &sh_mobile_sleep_enter_end - &sh_mobile_sleep_enter_start; | ||
105 | memcpy(vp, &sh_mobile_sleep_enter_start, n); | ||
106 | vp += roundup(n, 4); | ||
107 | |||
108 | /* part 2: board specific code to enter self-refresh mode */ | ||
109 | n = pre_end - pre_start; | ||
110 | memcpy(vp, pre_start, n); | ||
111 | sdp->sf_pre = (unsigned long)vp; | ||
112 | vp += roundup(n, 4); | ||
113 | |||
114 | /* part 3: board specific code to resume from self-refresh mode */ | ||
115 | n = post_end - post_start; | ||
116 | memcpy(vp, post_start, n); | ||
117 | sdp->sf_post = (unsigned long)vp; | ||
118 | vp += roundup(n, 4); | ||
119 | |||
120 | /* part 4: common code to resume from sleep mode */ | ||
121 | WARN_ON(vp > (onchip_mem + 0x600)); | ||
122 | vp = onchip_mem + 0x600; /* located at interrupt vector */ | ||
123 | n = &sh_mobile_sleep_resume_end - &sh_mobile_sleep_resume_start; | ||
124 | memcpy(vp, &sh_mobile_sleep_resume_start, n); | ||
125 | sdp->resume = (unsigned long)vp; | ||
126 | |||
127 | sh_mobile_sleep_supported |= flags; | ||
49 | } | 128 | } |
50 | 129 | ||
51 | static int sh_pm_enter(suspend_state_t state) | 130 | static int sh_pm_enter(suspend_state_t state) |
52 | { | 131 | { |
132 | if (!(sh_mobile_sleep_supported & SUSP_MODE_STANDBY_SF)) | ||
133 | return -ENXIO; | ||
134 | |||
53 | local_irq_disable(); | 135 | local_irq_disable(); |
54 | set_bl_bit(); | 136 | set_bl_bit(); |
55 | sh_mobile_call_standby(SUSP_MODE_STANDBY_SF); | 137 | sh_mobile_call_standby(SUSP_MODE_STANDBY_SF); |
@@ -65,13 +147,6 @@ static struct platform_suspend_ops sh_pm_ops = { | |||
65 | 147 | ||
66 | static int __init sh_pm_init(void) | 148 | static int __init sh_pm_init(void) |
67 | { | 149 | { |
68 | void *onchip_mem = (void *)ILRAM_BASE; | ||
69 | |||
70 | /* Copy the assembly snippet to the otherwise ununsed ILRAM */ | ||
71 | memcpy(onchip_mem, sh_mobile_standby, sh_mobile_standby_size); | ||
72 | wmb(); | ||
73 | ctrl_barrier(); | ||
74 | |||
75 | suspend_set_ops(&sh_pm_ops); | 150 | suspend_set_ops(&sh_pm_ops); |
76 | sh_mobile_setup_cpuidle(); | 151 | sh_mobile_setup_cpuidle(); |
77 | return 0; | 152 | return 0; |
diff --git a/arch/sh/kernel/cpu/shmobile/pm_runtime.c b/arch/sh/kernel/cpu/shmobile/pm_runtime.c index 7c615b17e20..6dcb8166a64 100644 --- a/arch/sh/kernel/cpu/shmobile/pm_runtime.c +++ b/arch/sh/kernel/cpu/shmobile/pm_runtime.c | |||
@@ -45,12 +45,14 @@ static int __platform_pm_runtime_resume(struct platform_device *pdev) | |||
45 | 45 | ||
46 | dev_dbg(d, "__platform_pm_runtime_resume() [%d]\n", hwblk); | 46 | dev_dbg(d, "__platform_pm_runtime_resume() [%d]\n", hwblk); |
47 | 47 | ||
48 | if (d->driver && d->driver->pm && d->driver->pm->runtime_resume) { | 48 | if (d->driver) { |
49 | hwblk_enable(hwblk_info, hwblk); | 49 | hwblk_enable(hwblk_info, hwblk); |
50 | ret = 0; | 50 | ret = 0; |
51 | 51 | ||
52 | if (test_bit(PDEV_ARCHDATA_FLAG_SUSP, &ad->flags)) { | 52 | if (test_bit(PDEV_ARCHDATA_FLAG_SUSP, &ad->flags)) { |
53 | ret = d->driver->pm->runtime_resume(d); | 53 | if (d->driver->pm && d->driver->pm->runtime_resume) |
54 | ret = d->driver->pm->runtime_resume(d); | ||
55 | |||
54 | if (!ret) | 56 | if (!ret) |
55 | clear_bit(PDEV_ARCHDATA_FLAG_SUSP, &ad->flags); | 57 | clear_bit(PDEV_ARCHDATA_FLAG_SUSP, &ad->flags); |
56 | else | 58 | else |
@@ -73,12 +75,15 @@ static int __platform_pm_runtime_suspend(struct platform_device *pdev) | |||
73 | 75 | ||
74 | dev_dbg(d, "__platform_pm_runtime_suspend() [%d]\n", hwblk); | 76 | dev_dbg(d, "__platform_pm_runtime_suspend() [%d]\n", hwblk); |
75 | 77 | ||
76 | if (d->driver && d->driver->pm && d->driver->pm->runtime_suspend) { | 78 | if (d->driver) { |
77 | BUG_ON(!test_bit(PDEV_ARCHDATA_FLAG_IDLE, &ad->flags)); | 79 | BUG_ON(!test_bit(PDEV_ARCHDATA_FLAG_IDLE, &ad->flags)); |
80 | ret = 0; | ||
78 | 81 | ||
79 | hwblk_enable(hwblk_info, hwblk); | 82 | if (d->driver->pm && d->driver->pm->runtime_suspend) { |
80 | ret = d->driver->pm->runtime_suspend(d); | 83 | hwblk_enable(hwblk_info, hwblk); |
81 | hwblk_disable(hwblk_info, hwblk); | 84 | ret = d->driver->pm->runtime_suspend(d); |
85 | hwblk_disable(hwblk_info, hwblk); | ||
86 | } | ||
82 | 87 | ||
83 | if (!ret) { | 88 | if (!ret) { |
84 | set_bit(PDEV_ARCHDATA_FLAG_SUSP, &ad->flags); | 89 | set_bit(PDEV_ARCHDATA_FLAG_SUSP, &ad->flags); |
diff --git a/arch/sh/kernel/cpu/shmobile/sleep.S b/arch/sh/kernel/cpu/shmobile/sleep.S index a439e6c7824..e9dd7fa0abd 100644 --- a/arch/sh/kernel/cpu/shmobile/sleep.S +++ b/arch/sh/kernel/cpu/shmobile/sleep.S | |||
@@ -20,79 +20,103 @@ | |||
20 | * Kernel mode register usage, see entry.S: | 20 | * Kernel mode register usage, see entry.S: |
21 | * k0 scratch | 21 | * k0 scratch |
22 | * k1 scratch | 22 | * k1 scratch |
23 | * k4 scratch | ||
24 | */ | 23 | */ |
25 | #define k0 r0 | 24 | #define k0 r0 |
26 | #define k1 r1 | 25 | #define k1 r1 |
27 | #define k4 r4 | ||
28 | 26 | ||
29 | /* manage self-refresh and enter standby mode. | 27 | /* manage self-refresh and enter standby mode. must be self-contained. |
30 | * this code will be copied to on-chip memory and executed from there. | 28 | * this code will be copied to on-chip memory and executed from there. |
31 | */ | 29 | */ |
30 | .balign 4 | ||
31 | ENTRY(sh_mobile_sleep_enter_start) | ||
32 | 32 | ||
33 | .balign 4096,0,4096 | 33 | /* save mode flags */ |
34 | ENTRY(sh_mobile_standby) | 34 | mov.l r4, @(SH_SLEEP_MODE, r5) |
35 | 35 | ||
36 | /* save original vbr */ | 36 | /* save original vbr */ |
37 | stc vbr, r1 | 37 | stc vbr, r0 |
38 | mova saved_vbr, r0 | 38 | mov.l r0, @(SH_SLEEP_VBR, r5) |
39 | mov.l r1, @r0 | ||
40 | 39 | ||
41 | /* point vbr to our on-chip memory page */ | 40 | /* point vbr to our on-chip memory page */ |
42 | ldc r5, vbr | 41 | ldc r5, vbr |
43 | 42 | ||
44 | /* save return address */ | 43 | /* save return address */ |
45 | mova saved_spc, r0 | 44 | sts pr, r0 |
46 | sts pr, r5 | 45 | mov.l r0, @(SH_SLEEP_SPC, r5) |
47 | mov.l r5, @r0 | ||
48 | 46 | ||
49 | /* save sr */ | 47 | /* save sr */ |
50 | mova saved_sr, r0 | 48 | stc sr, r0 |
51 | stc sr, r5 | 49 | mov.l r0, @(SH_SLEEP_SR, r5) |
52 | mov.l r5, @r0 | ||
53 | 50 | ||
54 | /* save mode flags */ | 51 | /* save sp */ |
55 | mova saved_mode, r0 | 52 | mov.l r15, @(SH_SLEEP_SP, r5) |
56 | mov.l r4, @r0 | 53 | |
54 | /* save stbcr */ | ||
55 | bsr save_register | ||
56 | mov #SH_SLEEP_REG_STBCR, r0 | ||
57 | |||
58 | /* save mmu and cache context if needed */ | ||
59 | mov.l @(SH_SLEEP_MODE, r5), r0 | ||
60 | tst #SUSP_SH_MMU, r0 | ||
61 | bt skip_mmu_save_disable | ||
62 | |||
63 | /* save mmu state */ | ||
64 | bsr save_register | ||
65 | mov #SH_SLEEP_REG_PTEH, r0 | ||
66 | |||
67 | bsr save_register | ||
68 | mov #SH_SLEEP_REG_PTEL, r0 | ||
69 | |||
70 | bsr save_register | ||
71 | mov #SH_SLEEP_REG_TTB, r0 | ||
72 | |||
73 | bsr save_register | ||
74 | mov #SH_SLEEP_REG_TEA, r0 | ||
75 | |||
76 | bsr save_register | ||
77 | mov #SH_SLEEP_REG_MMUCR, r0 | ||
78 | |||
79 | bsr save_register | ||
80 | mov #SH_SLEEP_REG_PTEA, r0 | ||
81 | |||
82 | bsr save_register | ||
83 | mov #SH_SLEEP_REG_PASCR, r0 | ||
57 | 84 | ||
58 | /* put mode flags in r0 */ | 85 | bsr save_register |
59 | mov r4, r0 | 86 | mov #SH_SLEEP_REG_IRMCR, r0 |
60 | 87 | ||
88 | /* invalidate TLBs and disable the MMU */ | ||
89 | bsr get_register | ||
90 | mov #SH_SLEEP_REG_MMUCR, r0 | ||
91 | mov #4, r1 | ||
92 | mov.l r1, @r0 | ||
93 | icbi @r0 | ||
94 | |||
95 | /* save cache registers and disable caches */ | ||
96 | bsr save_register | ||
97 | mov #SH_SLEEP_REG_CCR, r0 | ||
98 | |||
99 | bsr save_register | ||
100 | mov #SH_SLEEP_REG_RAMCR, r0 | ||
101 | |||
102 | bsr get_register | ||
103 | mov #SH_SLEEP_REG_CCR, r0 | ||
104 | mov #0, r1 | ||
105 | mov.l r1, @r0 | ||
106 | icbi @r0 | ||
107 | |||
108 | skip_mmu_save_disable: | ||
109 | /* call self-refresh entering code if needed */ | ||
110 | mov.l @(SH_SLEEP_MODE, r5), r0 | ||
61 | tst #SUSP_SH_SF, r0 | 111 | tst #SUSP_SH_SF, r0 |
62 | bt skip_set_sf | 112 | bt skip_set_sf |
63 | #ifdef CONFIG_CPU_SUBTYPE_SH7724 | 113 | |
64 | /* DBSC: put memory in self-refresh mode */ | 114 | mov.l @(SH_SLEEP_SF_PRE, r5), r0 |
65 | mov.l dben_reg, r4 | 115 | jsr @r0 |
66 | mov.l dben_data0, r1 | 116 | nop |
67 | mov.l r1, @r4 | ||
68 | |||
69 | mov.l dbrfpdn0_reg, r4 | ||
70 | mov.l dbrfpdn0_data0, r1 | ||
71 | mov.l r1, @r4 | ||
72 | |||
73 | mov.l dbcmdcnt_reg, r4 | ||
74 | mov.l dbcmdcnt_data0, r1 | ||
75 | mov.l r1, @r4 | ||
76 | |||
77 | mov.l dbcmdcnt_reg, r4 | ||
78 | mov.l dbcmdcnt_data1, r1 | ||
79 | mov.l r1, @r4 | ||
80 | |||
81 | mov.l dbrfpdn0_reg, r4 | ||
82 | mov.l dbrfpdn0_data1, r1 | ||
83 | mov.l r1, @r4 | ||
84 | #else | ||
85 | /* SBSC: disable power down and put in self-refresh mode */ | ||
86 | mov.l 1f, r4 | ||
87 | mov.l 2f, r1 | ||
88 | mov.l @r4, r2 | ||
89 | or r1, r2 | ||
90 | mov.l 3f, r3 | ||
91 | and r3, r2 | ||
92 | mov.l r2, @r4 | ||
93 | #endif | ||
94 | 117 | ||
95 | skip_set_sf: | 118 | skip_set_sf: |
119 | mov.l @(SH_SLEEP_MODE, r5), r0 | ||
96 | tst #SUSP_SH_STANDBY, r0 | 120 | tst #SUSP_SH_STANDBY, r0 |
97 | bt test_rstandby | 121 | bt test_rstandby |
98 | 122 | ||
@@ -104,6 +128,12 @@ test_rstandby: | |||
104 | tst #SUSP_SH_RSTANDBY, r0 | 128 | tst #SUSP_SH_RSTANDBY, r0 |
105 | bt test_ustandby | 129 | bt test_ustandby |
106 | 130 | ||
131 | /* setup BAR register */ | ||
132 | bsr get_register | ||
133 | mov #SH_SLEEP_REG_BAR, r0 | ||
134 | mov.l @(SH_SLEEP_RESUME, r5), r1 | ||
135 | mov.l r1, @r0 | ||
136 | |||
107 | /* set mode to "r-standby mode" */ | 137 | /* set mode to "r-standby mode" */ |
108 | bra do_sleep | 138 | bra do_sleep |
109 | mov #0x20, r1 | 139 | mov #0x20, r1 |
@@ -123,124 +153,136 @@ force_sleep: | |||
123 | 153 | ||
124 | do_sleep: | 154 | do_sleep: |
125 | /* setup and enter selected standby mode */ | 155 | /* setup and enter selected standby mode */ |
126 | mov.l 5f, r4 | 156 | bsr get_register |
127 | mov.l r1, @r4 | 157 | mov #SH_SLEEP_REG_STBCR, r0 |
158 | mov.l r1, @r0 | ||
128 | again: | 159 | again: |
129 | sleep | 160 | sleep |
130 | bra again | 161 | bra again |
131 | nop | 162 | nop |
132 | 163 | ||
133 | restore_jump_vbr: | 164 | save_register: |
165 | add #SH_SLEEP_BASE_ADDR, r0 | ||
166 | mov.l @(r0, r5), r1 | ||
167 | add #-SH_SLEEP_BASE_ADDR, r0 | ||
168 | mov.l @r1, r1 | ||
169 | add #SH_SLEEP_BASE_DATA, r0 | ||
170 | mov.l r1, @(r0, r5) | ||
171 | add #-SH_SLEEP_BASE_DATA, r0 | ||
172 | rts | ||
173 | nop | ||
174 | |||
175 | get_register: | ||
176 | add #SH_SLEEP_BASE_ADDR, r0 | ||
177 | mov.l @(r0, r5), r0 | ||
178 | rts | ||
179 | nop | ||
180 | ENTRY(sh_mobile_sleep_enter_end) | ||
181 | |||
182 | .balign 4 | ||
183 | ENTRY(sh_mobile_sleep_resume_start) | ||
184 | |||
185 | /* figure out start address */ | ||
186 | bsr 0f | ||
187 | nop | ||
188 | 0: | ||
189 | sts pr, k1 | ||
190 | mov.l 1f, k0 | ||
191 | and k0, k1 | ||
192 | |||
193 | /* store pointer to data area in VBR */ | ||
194 | ldc k1, vbr | ||
195 | |||
196 | /* setup sr with saved sr */ | ||
197 | mov.l @(SH_SLEEP_SR, k1), k0 | ||
198 | ldc k0, sr | ||
199 | |||
200 | /* now: user register set! */ | ||
201 | stc vbr, r5 | ||
202 | |||
134 | /* setup spc with return address to c code */ | 203 | /* setup spc with return address to c code */ |
135 | mov.l saved_spc, k0 | 204 | mov.l @(SH_SLEEP_SPC, r5), r0 |
136 | ldc k0, spc | 205 | ldc r0, spc |
137 | 206 | ||
138 | /* restore vbr */ | 207 | /* restore vbr */ |
139 | mov.l saved_vbr, k0 | 208 | mov.l @(SH_SLEEP_VBR, r5), r0 |
140 | ldc k0, vbr | 209 | ldc r0, vbr |
141 | 210 | ||
142 | /* setup ssr with saved sr */ | 211 | /* setup ssr with saved sr */ |
143 | mov.l saved_sr, k0 | 212 | mov.l @(SH_SLEEP_SR, r5), r0 |
144 | ldc k0, ssr | 213 | ldc r0, ssr |
145 | 214 | ||
146 | /* get mode flags */ | 215 | /* restore sp */ |
147 | mov.l saved_mode, k0 | 216 | mov.l @(SH_SLEEP_SP, r5), r15 |
148 | 217 | ||
149 | done_sleep: | 218 | /* restore sleep mode register */ |
150 | /* reset standby mode to sleep mode */ | 219 | bsr restore_register |
151 | mov.l 5f, k4 | 220 | mov #SH_SLEEP_REG_STBCR, r0 |
152 | mov #0x00, k1 | ||
153 | mov.l k1, @k4 | ||
154 | 221 | ||
155 | tst #SUSP_SH_SF, k0 | 222 | /* call self-refresh resume code if needed */ |
223 | mov.l @(SH_SLEEP_MODE, r5), r0 | ||
224 | tst #SUSP_SH_SF, r0 | ||
156 | bt skip_restore_sf | 225 | bt skip_restore_sf |
157 | 226 | ||
158 | #ifdef CONFIG_CPU_SUBTYPE_SH7724 | 227 | mov.l @(SH_SLEEP_SF_POST, r5), r0 |
159 | /* DBSC: put memory in auto-refresh mode */ | 228 | jsr @r0 |
160 | mov.l dbrfpdn0_reg, k4 | 229 | nop |
161 | mov.l dbrfpdn0_data0, k1 | 230 | |
162 | mov.l k1, @k4 | ||
163 | |||
164 | nop /* sleep 140 ns */ | ||
165 | nop | ||
166 | nop | ||
167 | nop | ||
168 | |||
169 | mov.l dbcmdcnt_reg, k4 | ||
170 | mov.l dbcmdcnt_data0, k1 | ||
171 | mov.l k1, @k4 | ||
172 | |||
173 | mov.l dbcmdcnt_reg, k4 | ||
174 | mov.l dbcmdcnt_data1, k1 | ||
175 | mov.l k1, @k4 | ||
176 | |||
177 | mov.l dben_reg, k4 | ||
178 | mov.l dben_data1, k1 | ||
179 | mov.l k1, @k4 | ||
180 | |||
181 | mov.l dbrfpdn0_reg, k4 | ||
182 | mov.l dbrfpdn0_data2, k1 | ||
183 | mov.l k1, @k4 | ||
184 | #else | ||
185 | /* SBSC: set auto-refresh mode */ | ||
186 | mov.l 1f, k4 | ||
187 | mov.l @k4, k0 | ||
188 | mov.l 4f, k1 | ||
189 | and k1, k0 | ||
190 | mov.l k0, @k4 | ||
191 | mov.l 6f, k4 | ||
192 | mov.l 8f, k0 | ||
193 | mov.l @k4, k1 | ||
194 | mov #-1, k4 | ||
195 | add k4, k1 | ||
196 | or k1, k0 | ||
197 | mov.l 7f, k1 | ||
198 | mov.l k0, @k1 | ||
199 | #endif | ||
200 | skip_restore_sf: | 231 | skip_restore_sf: |
201 | /* jump to vbr vector */ | 232 | /* restore mmu and cache state if needed */ |
202 | mov.l saved_vbr, k0 | 233 | mov.l @(SH_SLEEP_MODE, r5), r0 |
203 | mov.l offset_vbr, k4 | 234 | tst #SUSP_SH_MMU, r0 |
204 | add k4, k0 | 235 | bt skip_restore_mmu |
205 | jmp @k0 | 236 | |
237 | /* restore mmu state */ | ||
238 | bsr restore_register | ||
239 | mov #SH_SLEEP_REG_PTEH, r0 | ||
240 | |||
241 | bsr restore_register | ||
242 | mov #SH_SLEEP_REG_PTEL, r0 | ||
243 | |||
244 | bsr restore_register | ||
245 | mov #SH_SLEEP_REG_TTB, r0 | ||
246 | |||
247 | bsr restore_register | ||
248 | mov #SH_SLEEP_REG_TEA, r0 | ||
249 | |||
250 | bsr restore_register | ||
251 | mov #SH_SLEEP_REG_PTEA, r0 | ||
252 | |||
253 | bsr restore_register | ||
254 | mov #SH_SLEEP_REG_PASCR, r0 | ||
255 | |||
256 | bsr restore_register | ||
257 | mov #SH_SLEEP_REG_IRMCR, r0 | ||
258 | |||
259 | bsr restore_register | ||
260 | mov #SH_SLEEP_REG_MMUCR, r0 | ||
261 | icbi @r0 | ||
262 | |||
263 | /* restore cache settings */ | ||
264 | bsr restore_register | ||
265 | mov #SH_SLEEP_REG_RAMCR, r0 | ||
266 | icbi @r0 | ||
267 | |||
268 | bsr restore_register | ||
269 | mov #SH_SLEEP_REG_CCR, r0 | ||
270 | icbi @r0 | ||
271 | |||
272 | skip_restore_mmu: | ||
273 | rte | ||
206 | nop | 274 | nop |
207 | 275 | ||
208 | .balign 4 | 276 | restore_register: |
209 | saved_mode: .long 0 | 277 | add #SH_SLEEP_BASE_DATA, r0 |
210 | saved_spc: .long 0 | 278 | mov.l @(r0, r5), r1 |
211 | saved_sr: .long 0 | 279 | add #-SH_SLEEP_BASE_DATA, r0 |
212 | saved_vbr: .long 0 | 280 | add #SH_SLEEP_BASE_ADDR, r0 |
213 | offset_vbr: .long 0x600 | 281 | mov.l @(r0, r5), r0 |
214 | #ifdef CONFIG_CPU_SUBTYPE_SH7724 | 282 | mov.l r1, @r0 |
215 | dben_reg: .long 0xfd000010 /* DBEN */ | 283 | rts |
216 | dben_data0: .long 0 | ||
217 | dben_data1: .long 1 | ||
218 | dbrfpdn0_reg: .long 0xfd000040 /* DBRFPDN0 */ | ||
219 | dbrfpdn0_data0: .long 0 | ||
220 | dbrfpdn0_data1: .long 1 | ||
221 | dbrfpdn0_data2: .long 0x00010000 | ||
222 | dbcmdcnt_reg: .long 0xfd000014 /* DBCMDCNT */ | ||
223 | dbcmdcnt_data0: .long 2 | ||
224 | dbcmdcnt_data1: .long 4 | ||
225 | #else | ||
226 | 1: .long 0xfe400008 /* SDCR0 */ | ||
227 | 2: .long 0x00000400 | ||
228 | 3: .long 0xffff7fff | ||
229 | 4: .long 0xfffffbff | ||
230 | #endif | ||
231 | 5: .long 0xa4150020 /* STBCR */ | ||
232 | 6: .long 0xfe40001c /* RTCOR */ | ||
233 | 7: .long 0xfe400018 /* RTCNT */ | ||
234 | 8: .long 0xa55a0000 | ||
235 | |||
236 | |||
237 | /* interrupt vector @ 0x600 */ | ||
238 | .balign 0x400,0,0x400 | ||
239 | .long 0xdeadbeef | ||
240 | .balign 0x200,0,0x200 | ||
241 | bra restore_jump_vbr | ||
242 | nop | 284 | nop |
243 | sh_mobile_standby_end: | ||
244 | 285 | ||
245 | ENTRY(sh_mobile_standby_size) | 286 | .balign 4 |
246 | .long sh_mobile_standby_end - sh_mobile_standby | 287 | 1: .long ~0x7ff |
288 | ENTRY(sh_mobile_sleep_resume_end) | ||
diff --git a/arch/sh/kernel/cpu/ubc.S b/arch/sh/kernel/cpu/ubc.S deleted file mode 100644 index 81923079fa1..00000000000 --- a/arch/sh/kernel/cpu/ubc.S +++ /dev/null | |||
@@ -1,59 +0,0 @@ | |||
1 | /* | ||
2 | * arch/sh/kernel/cpu/ubc.S | ||
3 | * | ||
4 | * Set of management routines for the User Break Controller (UBC) | ||
5 | * | ||
6 | * Copyright (C) 2002 Paul Mundt | ||
7 | * | ||
8 | * This program is free software; you can redistribute it and/or modify it | ||
9 | * under the terms of the GNU General Public License as published by the | ||
10 | * Free Software Foundation; either version 2 of the License, or (at your | ||
11 | * option) any later version. | ||
12 | */ | ||
13 | #include <linux/linkage.h> | ||
14 | #include <asm/ubc.h> | ||
15 | |||
16 | #define STBCR2 0xffc00010 | ||
17 | |||
18 | ENTRY(ubc_sleep) | ||
19 | mov #0, r0 | ||
20 | |||
21 | mov.l 1f, r1 ! Zero out UBC_BBRA .. | ||
22 | mov.w r0, @r1 | ||
23 | |||
24 | mov.l 2f, r1 ! .. same for BBRB .. | ||
25 | mov.w r0, @r1 | ||
26 | |||
27 | mov.l 3f, r1 ! .. and again for BRCR. | ||
28 | mov.w r0, @r1 | ||
29 | |||
30 | mov.w @r1, r0 ! Dummy read BRCR | ||
31 | |||
32 | mov.l 4f, r1 ! Set MSTP5 in STBCR2 | ||
33 | mov.b @r1, r0 | ||
34 | or #0x01, r0 | ||
35 | mov.b r0, @r1 | ||
36 | |||
37 | mov.b @r1, r0 ! Two dummy reads .. | ||
38 | mov.b @r1, r0 | ||
39 | |||
40 | rts | ||
41 | nop | ||
42 | |||
43 | ENTRY(ubc_wakeup) | ||
44 | mov.l 4f, r1 ! Clear MSTP5 | ||
45 | mov.b @r1, r0 | ||
46 | and #0xfe, r0 | ||
47 | mov.b r0, @r1 | ||
48 | |||
49 | mov.b @r1, r0 ! Two more dummy reads .. | ||
50 | mov.b @r1, r0 | ||
51 | |||
52 | rts | ||
53 | nop | ||
54 | |||
55 | 1: .long UBC_BBRA | ||
56 | 2: .long UBC_BBRB | ||
57 | 3: .long UBC_BRCR | ||
58 | 4: .long STBCR2 | ||
59 | |||
diff --git a/arch/sh/kernel/dma-nommu.c b/arch/sh/kernel/dma-nommu.c new file mode 100644 index 00000000000..3c55b87f8b6 --- /dev/null +++ b/arch/sh/kernel/dma-nommu.c | |||
@@ -0,0 +1,82 @@ | |||
1 | /* | ||
2 | * DMA mapping support for platforms lacking IOMMUs. | ||
3 | * | ||
4 | * Copyright (C) 2009 Paul Mundt | ||
5 | * | ||
6 | * This file is subject to the terms and conditions of the GNU General Public | ||
7 | * License. See the file "COPYING" in the main directory of this archive | ||
8 | * for more details. | ||
9 | */ | ||
10 | #include <linux/dma-mapping.h> | ||
11 | #include <linux/io.h> | ||
12 | |||
13 | static dma_addr_t nommu_map_page(struct device *dev, struct page *page, | ||
14 | unsigned long offset, size_t size, | ||
15 | enum dma_data_direction dir, | ||
16 | struct dma_attrs *attrs) | ||
17 | { | ||
18 | dma_addr_t addr = page_to_phys(page) + offset; | ||
19 | |||
20 | WARN_ON(size == 0); | ||
21 | dma_cache_sync(dev, page_address(page) + offset, size, dir); | ||
22 | |||
23 | return addr; | ||
24 | } | ||
25 | |||
26 | static int nommu_map_sg(struct device *dev, struct scatterlist *sg, | ||
27 | int nents, enum dma_data_direction dir, | ||
28 | struct dma_attrs *attrs) | ||
29 | { | ||
30 | struct scatterlist *s; | ||
31 | int i; | ||
32 | |||
33 | WARN_ON(nents == 0 || sg[0].length == 0); | ||
34 | |||
35 | for_each_sg(sg, s, nents, i) { | ||
36 | BUG_ON(!sg_page(s)); | ||
37 | |||
38 | dma_cache_sync(dev, sg_virt(s), s->length, dir); | ||
39 | |||
40 | s->dma_address = sg_phys(s); | ||
41 | s->dma_length = s->length; | ||
42 | } | ||
43 | |||
44 | return nents; | ||
45 | } | ||
46 | |||
47 | #ifdef CONFIG_DMA_NONCOHERENT | ||
48 | static void nommu_sync_single(struct device *dev, dma_addr_t addr, | ||
49 | size_t size, enum dma_data_direction dir) | ||
50 | { | ||
51 | dma_cache_sync(dev, phys_to_virt(addr), size, dir); | ||
52 | } | ||
53 | |||
54 | static void nommu_sync_sg(struct device *dev, struct scatterlist *sg, | ||
55 | int nelems, enum dma_data_direction dir) | ||
56 | { | ||
57 | struct scatterlist *s; | ||
58 | int i; | ||
59 | |||
60 | for_each_sg(sg, s, nelems, i) | ||
61 | dma_cache_sync(dev, sg_virt(s), s->length, dir); | ||
62 | } | ||
63 | #endif | ||
64 | |||
65 | struct dma_map_ops nommu_dma_ops = { | ||
66 | .alloc_coherent = dma_generic_alloc_coherent, | ||
67 | .free_coherent = dma_generic_free_coherent, | ||
68 | .map_page = nommu_map_page, | ||
69 | .map_sg = nommu_map_sg, | ||
70 | #ifdef CONFIG_DMA_NONCOHERENT | ||
71 | .sync_single_for_device = nommu_sync_single, | ||
72 | .sync_sg_for_device = nommu_sync_sg, | ||
73 | #endif | ||
74 | .is_phys = 1, | ||
75 | }; | ||
76 | |||
77 | void __init no_iommu_init(void) | ||
78 | { | ||
79 | if (dma_ops) | ||
80 | return; | ||
81 | dma_ops = &nommu_dma_ops; | ||
82 | } | ||
diff --git a/arch/sh/kernel/dwarf.c b/arch/sh/kernel/dwarf.c index d76a23170db..3576b709f05 100644 --- a/arch/sh/kernel/dwarf.c +++ b/arch/sh/kernel/dwarf.c | |||
@@ -20,6 +20,7 @@ | |||
20 | #include <linux/list.h> | 20 | #include <linux/list.h> |
21 | #include <linux/mempool.h> | 21 | #include <linux/mempool.h> |
22 | #include <linux/mm.h> | 22 | #include <linux/mm.h> |
23 | #include <linux/elf.h> | ||
23 | #include <linux/ftrace.h> | 24 | #include <linux/ftrace.h> |
24 | #include <asm/dwarf.h> | 25 | #include <asm/dwarf.h> |
25 | #include <asm/unwinder.h> | 26 | #include <asm/unwinder.h> |
@@ -530,7 +531,18 @@ static int dwarf_cfa_execute_insns(unsigned char *insn_start, | |||
530 | } | 531 | } |
531 | 532 | ||
532 | /** | 533 | /** |
533 | * dwarf_unwind_stack - recursively unwind the stack | 534 | * dwarf_free_frame - free the memory allocated for @frame |
535 | * @frame: the frame to free | ||
536 | */ | ||
537 | void dwarf_free_frame(struct dwarf_frame *frame) | ||
538 | { | ||
539 | dwarf_frame_free_regs(frame); | ||
540 | mempool_free(frame, dwarf_frame_pool); | ||
541 | } | ||
542 | |||
543 | /** | ||
544 | * dwarf_unwind_stack - unwind the stack | ||
545 | * | ||
534 | * @pc: address of the function to unwind | 546 | * @pc: address of the function to unwind |
535 | * @prev: struct dwarf_frame of the previous stackframe on the callstack | 547 | * @prev: struct dwarf_frame of the previous stackframe on the callstack |
536 | * | 548 | * |
@@ -548,9 +560,9 @@ struct dwarf_frame * dwarf_unwind_stack(unsigned long pc, | |||
548 | unsigned long addr; | 560 | unsigned long addr; |
549 | 561 | ||
550 | /* | 562 | /* |
551 | * If this is the first invocation of this recursive function we | 563 | * If we're starting at the top of the stack we need get the |
552 | * need get the contents of a physical register to get the CFA | 564 | * contents of a physical register to get the CFA in order to |
553 | * in order to begin the virtual unwinding of the stack. | 565 | * begin the virtual unwinding of the stack. |
554 | * | 566 | * |
555 | * NOTE: the return address is guaranteed to be setup by the | 567 | * NOTE: the return address is guaranteed to be setup by the |
556 | * time this function makes its first function call. | 568 | * time this function makes its first function call. |
@@ -593,9 +605,8 @@ struct dwarf_frame * dwarf_unwind_stack(unsigned long pc, | |||
593 | fde = dwarf_lookup_fde(pc); | 605 | fde = dwarf_lookup_fde(pc); |
594 | if (!fde) { | 606 | if (!fde) { |
595 | /* | 607 | /* |
596 | * This is our normal exit path - the one that stops the | 608 | * This is our normal exit path. There are two reasons |
597 | * recursion. There's two reasons why we might exit | 609 | * why we might exit here, |
598 | * here, | ||
599 | * | 610 | * |
600 | * a) pc has no asscociated DWARF frame info and so | 611 | * a) pc has no asscociated DWARF frame info and so |
601 | * we don't know how to unwind this frame. This is | 612 | * we don't know how to unwind this frame. This is |
@@ -637,10 +648,10 @@ struct dwarf_frame * dwarf_unwind_stack(unsigned long pc, | |||
637 | 648 | ||
638 | } else { | 649 | } else { |
639 | /* | 650 | /* |
640 | * Again, this is the first invocation of this | 651 | * Again, we're starting from the top of the |
641 | * recurisve function. We need to physically | 652 | * stack. We need to physically read |
642 | * read the contents of a register in order to | 653 | * the contents of a register in order to get |
643 | * get the Canonical Frame Address for this | 654 | * the Canonical Frame Address for this |
644 | * function. | 655 | * function. |
645 | */ | 656 | */ |
646 | frame->cfa = dwarf_read_arch_reg(frame->cfa_register); | 657 | frame->cfa = dwarf_read_arch_reg(frame->cfa_register); |
@@ -670,13 +681,12 @@ struct dwarf_frame * dwarf_unwind_stack(unsigned long pc, | |||
670 | return frame; | 681 | return frame; |
671 | 682 | ||
672 | bail: | 683 | bail: |
673 | dwarf_frame_free_regs(frame); | 684 | dwarf_free_frame(frame); |
674 | mempool_free(frame, dwarf_frame_pool); | ||
675 | return NULL; | 685 | return NULL; |
676 | } | 686 | } |
677 | 687 | ||
678 | static int dwarf_parse_cie(void *entry, void *p, unsigned long len, | 688 | static int dwarf_parse_cie(void *entry, void *p, unsigned long len, |
679 | unsigned char *end) | 689 | unsigned char *end, struct module *mod) |
680 | { | 690 | { |
681 | struct dwarf_cie *cie; | 691 | struct dwarf_cie *cie; |
682 | unsigned long flags; | 692 | unsigned long flags; |
@@ -772,6 +782,8 @@ static int dwarf_parse_cie(void *entry, void *p, unsigned long len, | |||
772 | cie->initial_instructions = p; | 782 | cie->initial_instructions = p; |
773 | cie->instructions_end = end; | 783 | cie->instructions_end = end; |
774 | 784 | ||
785 | cie->mod = mod; | ||
786 | |||
775 | /* Add to list */ | 787 | /* Add to list */ |
776 | spin_lock_irqsave(&dwarf_cie_lock, flags); | 788 | spin_lock_irqsave(&dwarf_cie_lock, flags); |
777 | list_add_tail(&cie->link, &dwarf_cie_list); | 789 | list_add_tail(&cie->link, &dwarf_cie_list); |
@@ -782,7 +794,7 @@ static int dwarf_parse_cie(void *entry, void *p, unsigned long len, | |||
782 | 794 | ||
783 | static int dwarf_parse_fde(void *entry, u32 entry_type, | 795 | static int dwarf_parse_fde(void *entry, u32 entry_type, |
784 | void *start, unsigned long len, | 796 | void *start, unsigned long len, |
785 | unsigned char *end) | 797 | unsigned char *end, struct module *mod) |
786 | { | 798 | { |
787 | struct dwarf_fde *fde; | 799 | struct dwarf_fde *fde; |
788 | struct dwarf_cie *cie; | 800 | struct dwarf_cie *cie; |
@@ -831,6 +843,8 @@ static int dwarf_parse_fde(void *entry, u32 entry_type, | |||
831 | fde->instructions = p; | 843 | fde->instructions = p; |
832 | fde->end = end; | 844 | fde->end = end; |
833 | 845 | ||
846 | fde->mod = mod; | ||
847 | |||
834 | /* Add to list. */ | 848 | /* Add to list. */ |
835 | spin_lock_irqsave(&dwarf_fde_lock, flags); | 849 | spin_lock_irqsave(&dwarf_fde_lock, flags); |
836 | list_add_tail(&fde->link, &dwarf_fde_list); | 850 | list_add_tail(&fde->link, &dwarf_fde_list); |
@@ -854,10 +868,8 @@ static void dwarf_unwinder_dump(struct task_struct *task, | |||
854 | while (1) { | 868 | while (1) { |
855 | frame = dwarf_unwind_stack(return_addr, _frame); | 869 | frame = dwarf_unwind_stack(return_addr, _frame); |
856 | 870 | ||
857 | if (_frame) { | 871 | if (_frame) |
858 | dwarf_frame_free_regs(_frame); | 872 | dwarf_free_frame(_frame); |
859 | mempool_free(_frame, dwarf_frame_pool); | ||
860 | } | ||
861 | 873 | ||
862 | _frame = frame; | 874 | _frame = frame; |
863 | 875 | ||
@@ -867,6 +879,9 @@ static void dwarf_unwinder_dump(struct task_struct *task, | |||
867 | return_addr = frame->return_addr; | 879 | return_addr = frame->return_addr; |
868 | ops->address(data, return_addr, 1); | 880 | ops->address(data, return_addr, 1); |
869 | } | 881 | } |
882 | |||
883 | if (frame) | ||
884 | dwarf_free_frame(frame); | ||
870 | } | 885 | } |
871 | 886 | ||
872 | static struct unwinder dwarf_unwinder = { | 887 | static struct unwinder dwarf_unwinder = { |
@@ -896,48 +911,28 @@ static void dwarf_unwinder_cleanup(void) | |||
896 | } | 911 | } |
897 | 912 | ||
898 | /** | 913 | /** |
899 | * dwarf_unwinder_init - initialise the dwarf unwinder | 914 | * dwarf_parse_section - parse DWARF section |
915 | * @eh_frame_start: start address of the .eh_frame section | ||
916 | * @eh_frame_end: end address of the .eh_frame section | ||
917 | * @mod: the kernel module containing the .eh_frame section | ||
900 | * | 918 | * |
901 | * Build the data structures describing the .dwarf_frame section to | 919 | * Parse the information in a .eh_frame section. |
902 | * make it easier to lookup CIE and FDE entries. Because the | ||
903 | * .eh_frame section is packed as tightly as possible it is not | ||
904 | * easy to lookup the FDE for a given PC, so we build a list of FDE | ||
905 | * and CIE entries that make it easier. | ||
906 | */ | 920 | */ |
907 | static int __init dwarf_unwinder_init(void) | 921 | static int dwarf_parse_section(char *eh_frame_start, char *eh_frame_end, |
922 | struct module *mod) | ||
908 | { | 923 | { |
909 | u32 entry_type; | 924 | u32 entry_type; |
910 | void *p, *entry; | 925 | void *p, *entry; |
911 | int count, err = 0; | 926 | int count, err = 0; |
912 | unsigned long len; | 927 | unsigned long len = 0; |
913 | unsigned int c_entries, f_entries; | 928 | unsigned int c_entries, f_entries; |
914 | unsigned char *end; | 929 | unsigned char *end; |
915 | INIT_LIST_HEAD(&dwarf_cie_list); | ||
916 | INIT_LIST_HEAD(&dwarf_fde_list); | ||
917 | 930 | ||
918 | c_entries = 0; | 931 | c_entries = 0; |
919 | f_entries = 0; | 932 | f_entries = 0; |
920 | entry = &__start_eh_frame; | 933 | entry = eh_frame_start; |
921 | |||
922 | dwarf_frame_cachep = kmem_cache_create("dwarf_frames", | ||
923 | sizeof(struct dwarf_frame), 0, | ||
924 | SLAB_PANIC | SLAB_HWCACHE_ALIGN | SLAB_NOTRACK, NULL); | ||
925 | |||
926 | dwarf_reg_cachep = kmem_cache_create("dwarf_regs", | ||
927 | sizeof(struct dwarf_reg), 0, | ||
928 | SLAB_PANIC | SLAB_HWCACHE_ALIGN | SLAB_NOTRACK, NULL); | ||
929 | 934 | ||
930 | dwarf_frame_pool = mempool_create(DWARF_FRAME_MIN_REQ, | 935 | while ((char *)entry < eh_frame_end) { |
931 | mempool_alloc_slab, | ||
932 | mempool_free_slab, | ||
933 | dwarf_frame_cachep); | ||
934 | |||
935 | dwarf_reg_pool = mempool_create(DWARF_REG_MIN_REQ, | ||
936 | mempool_alloc_slab, | ||
937 | mempool_free_slab, | ||
938 | dwarf_reg_cachep); | ||
939 | |||
940 | while ((char *)entry < __stop_eh_frame) { | ||
941 | p = entry; | 936 | p = entry; |
942 | 937 | ||
943 | count = dwarf_entry_len(p, &len); | 938 | count = dwarf_entry_len(p, &len); |
@@ -949,6 +944,7 @@ static int __init dwarf_unwinder_init(void) | |||
949 | * entry and move to the next one because 'len' | 944 | * entry and move to the next one because 'len' |
950 | * tells us where our next entry is. | 945 | * tells us where our next entry is. |
951 | */ | 946 | */ |
947 | err = -EINVAL; | ||
952 | goto out; | 948 | goto out; |
953 | } else | 949 | } else |
954 | p += count; | 950 | p += count; |
@@ -960,13 +956,14 @@ static int __init dwarf_unwinder_init(void) | |||
960 | p += 4; | 956 | p += 4; |
961 | 957 | ||
962 | if (entry_type == DW_EH_FRAME_CIE) { | 958 | if (entry_type == DW_EH_FRAME_CIE) { |
963 | err = dwarf_parse_cie(entry, p, len, end); | 959 | err = dwarf_parse_cie(entry, p, len, end, mod); |
964 | if (err < 0) | 960 | if (err < 0) |
965 | goto out; | 961 | goto out; |
966 | else | 962 | else |
967 | c_entries++; | 963 | c_entries++; |
968 | } else { | 964 | } else { |
969 | err = dwarf_parse_fde(entry, entry_type, p, len, end); | 965 | err = dwarf_parse_fde(entry, entry_type, p, len, |
966 | end, mod); | ||
970 | if (err < 0) | 967 | if (err < 0) |
971 | goto out; | 968 | goto out; |
972 | else | 969 | else |
@@ -979,6 +976,129 @@ static int __init dwarf_unwinder_init(void) | |||
979 | printk(KERN_INFO "DWARF unwinder initialised: read %u CIEs, %u FDEs\n", | 976 | printk(KERN_INFO "DWARF unwinder initialised: read %u CIEs, %u FDEs\n", |
980 | c_entries, f_entries); | 977 | c_entries, f_entries); |
981 | 978 | ||
979 | return 0; | ||
980 | |||
981 | out: | ||
982 | return err; | ||
983 | } | ||
984 | |||
985 | #ifdef CONFIG_MODULES | ||
986 | int module_dwarf_finalize(const Elf_Ehdr *hdr, const Elf_Shdr *sechdrs, | ||
987 | struct module *me) | ||
988 | { | ||
989 | unsigned int i, err; | ||
990 | unsigned long start, end; | ||
991 | char *secstrings = (void *)hdr + sechdrs[hdr->e_shstrndx].sh_offset; | ||
992 | |||
993 | start = end = 0; | ||
994 | |||
995 | for (i = 1; i < hdr->e_shnum; i++) { | ||
996 | /* Alloc bit cleared means "ignore it." */ | ||
997 | if ((sechdrs[i].sh_flags & SHF_ALLOC) | ||
998 | && !strcmp(secstrings+sechdrs[i].sh_name, ".eh_frame")) { | ||
999 | start = sechdrs[i].sh_addr; | ||
1000 | end = start + sechdrs[i].sh_size; | ||
1001 | break; | ||
1002 | } | ||
1003 | } | ||
1004 | |||
1005 | /* Did we find the .eh_frame section? */ | ||
1006 | if (i != hdr->e_shnum) { | ||
1007 | err = dwarf_parse_section((char *)start, (char *)end, me); | ||
1008 | if (err) { | ||
1009 | printk(KERN_WARNING "%s: failed to parse DWARF info\n", | ||
1010 | me->name); | ||
1011 | return err; | ||
1012 | } | ||
1013 | } | ||
1014 | |||
1015 | return 0; | ||
1016 | } | ||
1017 | |||
1018 | /** | ||
1019 | * module_dwarf_cleanup - remove FDE/CIEs associated with @mod | ||
1020 | * @mod: the module that is being unloaded | ||
1021 | * | ||
1022 | * Remove any FDEs and CIEs from the global lists that came from | ||
1023 | * @mod's .eh_frame section because @mod is being unloaded. | ||
1024 | */ | ||
1025 | void module_dwarf_cleanup(struct module *mod) | ||
1026 | { | ||
1027 | struct dwarf_fde *fde; | ||
1028 | struct dwarf_cie *cie; | ||
1029 | unsigned long flags; | ||
1030 | |||
1031 | spin_lock_irqsave(&dwarf_cie_lock, flags); | ||
1032 | |||
1033 | again_cie: | ||
1034 | list_for_each_entry(cie, &dwarf_cie_list, link) { | ||
1035 | if (cie->mod == mod) | ||
1036 | break; | ||
1037 | } | ||
1038 | |||
1039 | if (&cie->link != &dwarf_cie_list) { | ||
1040 | list_del(&cie->link); | ||
1041 | kfree(cie); | ||
1042 | goto again_cie; | ||
1043 | } | ||
1044 | |||
1045 | spin_unlock_irqrestore(&dwarf_cie_lock, flags); | ||
1046 | |||
1047 | spin_lock_irqsave(&dwarf_fde_lock, flags); | ||
1048 | |||
1049 | again_fde: | ||
1050 | list_for_each_entry(fde, &dwarf_fde_list, link) { | ||
1051 | if (fde->mod == mod) | ||
1052 | break; | ||
1053 | } | ||
1054 | |||
1055 | if (&fde->link != &dwarf_fde_list) { | ||
1056 | list_del(&fde->link); | ||
1057 | kfree(fde); | ||
1058 | goto again_fde; | ||
1059 | } | ||
1060 | |||
1061 | spin_unlock_irqrestore(&dwarf_fde_lock, flags); | ||
1062 | } | ||
1063 | #endif /* CONFIG_MODULES */ | ||
1064 | |||
1065 | /** | ||
1066 | * dwarf_unwinder_init - initialise the dwarf unwinder | ||
1067 | * | ||
1068 | * Build the data structures describing the .dwarf_frame section to | ||
1069 | * make it easier to lookup CIE and FDE entries. Because the | ||
1070 | * .eh_frame section is packed as tightly as possible it is not | ||
1071 | * easy to lookup the FDE for a given PC, so we build a list of FDE | ||
1072 | * and CIE entries that make it easier. | ||
1073 | */ | ||
1074 | static int __init dwarf_unwinder_init(void) | ||
1075 | { | ||
1076 | int err; | ||
1077 | INIT_LIST_HEAD(&dwarf_cie_list); | ||
1078 | INIT_LIST_HEAD(&dwarf_fde_list); | ||
1079 | |||
1080 | dwarf_frame_cachep = kmem_cache_create("dwarf_frames", | ||
1081 | sizeof(struct dwarf_frame), 0, | ||
1082 | SLAB_PANIC | SLAB_HWCACHE_ALIGN | SLAB_NOTRACK, NULL); | ||
1083 | |||
1084 | dwarf_reg_cachep = kmem_cache_create("dwarf_regs", | ||
1085 | sizeof(struct dwarf_reg), 0, | ||
1086 | SLAB_PANIC | SLAB_HWCACHE_ALIGN | SLAB_NOTRACK, NULL); | ||
1087 | |||
1088 | dwarf_frame_pool = mempool_create(DWARF_FRAME_MIN_REQ, | ||
1089 | mempool_alloc_slab, | ||
1090 | mempool_free_slab, | ||
1091 | dwarf_frame_cachep); | ||
1092 | |||
1093 | dwarf_reg_pool = mempool_create(DWARF_REG_MIN_REQ, | ||
1094 | mempool_alloc_slab, | ||
1095 | mempool_free_slab, | ||
1096 | dwarf_reg_cachep); | ||
1097 | |||
1098 | err = dwarf_parse_section(__start_eh_frame, __stop_eh_frame, NULL); | ||
1099 | if (err) | ||
1100 | goto out; | ||
1101 | |||
982 | err = unwinder_register(&dwarf_unwinder); | 1102 | err = unwinder_register(&dwarf_unwinder); |
983 | if (err) | 1103 | if (err) |
984 | goto out; | 1104 | goto out; |
diff --git a/arch/sh/kernel/entry-common.S b/arch/sh/kernel/entry-common.S index 3eb84931d2a..f0abd58c3a6 100644 --- a/arch/sh/kernel/entry-common.S +++ b/arch/sh/kernel/entry-common.S | |||
@@ -133,7 +133,7 @@ work_pending: | |||
133 | ! r8: current_thread_info | 133 | ! r8: current_thread_info |
134 | ! t: result of "tst #_TIF_NEED_RESCHED, r0" | 134 | ! t: result of "tst #_TIF_NEED_RESCHED, r0" |
135 | bf/s work_resched | 135 | bf/s work_resched |
136 | tst #(_TIF_SIGPENDING | _TIF_RESTORE_SIGMASK), r0 | 136 | tst #_TIF_SIGPENDING, r0 |
137 | work_notifysig: | 137 | work_notifysig: |
138 | bt/s __restore_all | 138 | bt/s __restore_all |
139 | mov r15, r4 | 139 | mov r15, r4 |
diff --git a/arch/sh/kernel/ftrace.c b/arch/sh/kernel/ftrace.c index 2c48e267256..b6f41c109be 100644 --- a/arch/sh/kernel/ftrace.c +++ b/arch/sh/kernel/ftrace.c | |||
@@ -62,6 +62,150 @@ static unsigned char *ftrace_call_replace(unsigned long ip, unsigned long addr) | |||
62 | return ftrace_replaced_code; | 62 | return ftrace_replaced_code; |
63 | } | 63 | } |
64 | 64 | ||
65 | /* | ||
66 | * Modifying code must take extra care. On an SMP machine, if | ||
67 | * the code being modified is also being executed on another CPU | ||
68 | * that CPU will have undefined results and possibly take a GPF. | ||
69 | * We use kstop_machine to stop other CPUS from exectuing code. | ||
70 | * But this does not stop NMIs from happening. We still need | ||
71 | * to protect against that. We separate out the modification of | ||
72 | * the code to take care of this. | ||
73 | * | ||
74 | * Two buffers are added: An IP buffer and a "code" buffer. | ||
75 | * | ||
76 | * 1) Put the instruction pointer into the IP buffer | ||
77 | * and the new code into the "code" buffer. | ||
78 | * 2) Wait for any running NMIs to finish and set a flag that says | ||
79 | * we are modifying code, it is done in an atomic operation. | ||
80 | * 3) Write the code | ||
81 | * 4) clear the flag. | ||
82 | * 5) Wait for any running NMIs to finish. | ||
83 | * | ||
84 | * If an NMI is executed, the first thing it does is to call | ||
85 | * "ftrace_nmi_enter". This will check if the flag is set to write | ||
86 | * and if it is, it will write what is in the IP and "code" buffers. | ||
87 | * | ||
88 | * The trick is, it does not matter if everyone is writing the same | ||
89 | * content to the code location. Also, if a CPU is executing code | ||
90 | * it is OK to write to that code location if the contents being written | ||
91 | * are the same as what exists. | ||
92 | */ | ||
93 | #define MOD_CODE_WRITE_FLAG (1 << 31) /* set when NMI should do the write */ | ||
94 | static atomic_t nmi_running = ATOMIC_INIT(0); | ||
95 | static int mod_code_status; /* holds return value of text write */ | ||
96 | static void *mod_code_ip; /* holds the IP to write to */ | ||
97 | static void *mod_code_newcode; /* holds the text to write to the IP */ | ||
98 | |||
99 | static unsigned nmi_wait_count; | ||
100 | static atomic_t nmi_update_count = ATOMIC_INIT(0); | ||
101 | |||
102 | int ftrace_arch_read_dyn_info(char *buf, int size) | ||
103 | { | ||
104 | int r; | ||
105 | |||
106 | r = snprintf(buf, size, "%u %u", | ||
107 | nmi_wait_count, | ||
108 | atomic_read(&nmi_update_count)); | ||
109 | return r; | ||
110 | } | ||
111 | |||
112 | static void clear_mod_flag(void) | ||
113 | { | ||
114 | int old = atomic_read(&nmi_running); | ||
115 | |||
116 | for (;;) { | ||
117 | int new = old & ~MOD_CODE_WRITE_FLAG; | ||
118 | |||
119 | if (old == new) | ||
120 | break; | ||
121 | |||
122 | old = atomic_cmpxchg(&nmi_running, old, new); | ||
123 | } | ||
124 | } | ||
125 | |||
126 | static void ftrace_mod_code(void) | ||
127 | { | ||
128 | /* | ||
129 | * Yes, more than one CPU process can be writing to mod_code_status. | ||
130 | * (and the code itself) | ||
131 | * But if one were to fail, then they all should, and if one were | ||
132 | * to succeed, then they all should. | ||
133 | */ | ||
134 | mod_code_status = probe_kernel_write(mod_code_ip, mod_code_newcode, | ||
135 | MCOUNT_INSN_SIZE); | ||
136 | |||
137 | /* if we fail, then kill any new writers */ | ||
138 | if (mod_code_status) | ||
139 | clear_mod_flag(); | ||
140 | } | ||
141 | |||
142 | void ftrace_nmi_enter(void) | ||
143 | { | ||
144 | if (atomic_inc_return(&nmi_running) & MOD_CODE_WRITE_FLAG) { | ||
145 | smp_rmb(); | ||
146 | ftrace_mod_code(); | ||
147 | atomic_inc(&nmi_update_count); | ||
148 | } | ||
149 | /* Must have previous changes seen before executions */ | ||
150 | smp_mb(); | ||
151 | } | ||
152 | |||
153 | void ftrace_nmi_exit(void) | ||
154 | { | ||
155 | /* Finish all executions before clearing nmi_running */ | ||
156 | smp_mb(); | ||
157 | atomic_dec(&nmi_running); | ||
158 | } | ||
159 | |||
160 | static void wait_for_nmi_and_set_mod_flag(void) | ||
161 | { | ||
162 | if (!atomic_cmpxchg(&nmi_running, 0, MOD_CODE_WRITE_FLAG)) | ||
163 | return; | ||
164 | |||
165 | do { | ||
166 | cpu_relax(); | ||
167 | } while (atomic_cmpxchg(&nmi_running, 0, MOD_CODE_WRITE_FLAG)); | ||
168 | |||
169 | nmi_wait_count++; | ||
170 | } | ||
171 | |||
172 | static void wait_for_nmi(void) | ||
173 | { | ||
174 | if (!atomic_read(&nmi_running)) | ||
175 | return; | ||
176 | |||
177 | do { | ||
178 | cpu_relax(); | ||
179 | } while (atomic_read(&nmi_running)); | ||
180 | |||
181 | nmi_wait_count++; | ||
182 | } | ||
183 | |||
184 | static int | ||
185 | do_ftrace_mod_code(unsigned long ip, void *new_code) | ||
186 | { | ||
187 | mod_code_ip = (void *)ip; | ||
188 | mod_code_newcode = new_code; | ||
189 | |||
190 | /* The buffers need to be visible before we let NMIs write them */ | ||
191 | smp_mb(); | ||
192 | |||
193 | wait_for_nmi_and_set_mod_flag(); | ||
194 | |||
195 | /* Make sure all running NMIs have finished before we write the code */ | ||
196 | smp_mb(); | ||
197 | |||
198 | ftrace_mod_code(); | ||
199 | |||
200 | /* Make sure the write happens before clearing the bit */ | ||
201 | smp_mb(); | ||
202 | |||
203 | clear_mod_flag(); | ||
204 | wait_for_nmi(); | ||
205 | |||
206 | return mod_code_status; | ||
207 | } | ||
208 | |||
65 | static int ftrace_modify_code(unsigned long ip, unsigned char *old_code, | 209 | static int ftrace_modify_code(unsigned long ip, unsigned char *old_code, |
66 | unsigned char *new_code) | 210 | unsigned char *new_code) |
67 | { | 211 | { |
@@ -86,7 +230,7 @@ static int ftrace_modify_code(unsigned long ip, unsigned char *old_code, | |||
86 | return -EINVAL; | 230 | return -EINVAL; |
87 | 231 | ||
88 | /* replace the text with the new text */ | 232 | /* replace the text with the new text */ |
89 | if (probe_kernel_write((void *)ip, new_code, MCOUNT_INSN_SIZE)) | 233 | if (do_ftrace_mod_code(ip, new_code)) |
90 | return -EPERM; | 234 | return -EPERM; |
91 | 235 | ||
92 | flush_icache_range(ip, ip + MCOUNT_INSN_SIZE); | 236 | flush_icache_range(ip, ip + MCOUNT_INSN_SIZE); |
diff --git a/arch/sh/kernel/head_32.S b/arch/sh/kernel/head_32.S index a78be74b8d3..1151ecdffa7 100644 --- a/arch/sh/kernel/head_32.S +++ b/arch/sh/kernel/head_32.S | |||
@@ -33,7 +33,7 @@ ENTRY(empty_zero_page) | |||
33 | .long 1 /* LOADER_TYPE */ | 33 | .long 1 /* LOADER_TYPE */ |
34 | .long 0x00000000 /* INITRD_START */ | 34 | .long 0x00000000 /* INITRD_START */ |
35 | .long 0x00000000 /* INITRD_SIZE */ | 35 | .long 0x00000000 /* INITRD_SIZE */ |
36 | #ifdef CONFIG_32BIT | 36 | #if defined(CONFIG_32BIT) && defined(CONFIG_PMB_FIXED) |
37 | .long 0x53453f00 + 32 /* "SE?" = 32 bit */ | 37 | .long 0x53453f00 + 32 /* "SE?" = 32 bit */ |
38 | #else | 38 | #else |
39 | .long 0x53453f00 + 29 /* "SE?" = 29 bit */ | 39 | .long 0x53453f00 + 29 /* "SE?" = 29 bit */ |
diff --git a/arch/sh/kernel/idle.c b/arch/sh/kernel/idle.c index 27ff2dc093c..aaff0037fcd 100644 --- a/arch/sh/kernel/idle.c +++ b/arch/sh/kernel/idle.c | |||
@@ -21,7 +21,7 @@ | |||
21 | #include <asm/atomic.h> | 21 | #include <asm/atomic.h> |
22 | 22 | ||
23 | static int hlt_counter; | 23 | static int hlt_counter; |
24 | void (*pm_idle)(void); | 24 | void (*pm_idle)(void) = NULL; |
25 | void (*pm_power_off)(void); | 25 | void (*pm_power_off)(void); |
26 | EXPORT_SYMBOL(pm_power_off); | 26 | EXPORT_SYMBOL(pm_power_off); |
27 | 27 | ||
@@ -39,48 +39,92 @@ static int __init hlt_setup(char *__unused) | |||
39 | } | 39 | } |
40 | __setup("hlt", hlt_setup); | 40 | __setup("hlt", hlt_setup); |
41 | 41 | ||
42 | static inline int hlt_works(void) | ||
43 | { | ||
44 | return !hlt_counter; | ||
45 | } | ||
46 | |||
47 | /* | ||
48 | * On SMP it's slightly faster (but much more power-consuming!) | ||
49 | * to poll the ->work.need_resched flag instead of waiting for the | ||
50 | * cross-CPU IPI to arrive. Use this option with caution. | ||
51 | */ | ||
52 | static void poll_idle(void) | ||
53 | { | ||
54 | local_irq_enable(); | ||
55 | while (!need_resched()) | ||
56 | cpu_relax(); | ||
57 | } | ||
58 | |||
42 | void default_idle(void) | 59 | void default_idle(void) |
43 | { | 60 | { |
44 | if (!hlt_counter) { | 61 | if (hlt_works()) { |
45 | clear_thread_flag(TIF_POLLING_NRFLAG); | 62 | clear_thread_flag(TIF_POLLING_NRFLAG); |
46 | smp_mb__after_clear_bit(); | 63 | smp_mb__after_clear_bit(); |
47 | set_bl_bit(); | ||
48 | stop_critical_timings(); | ||
49 | 64 | ||
50 | while (!need_resched()) | 65 | if (!need_resched()) { |
66 | local_irq_enable(); | ||
51 | cpu_sleep(); | 67 | cpu_sleep(); |
68 | } else | ||
69 | local_irq_enable(); | ||
52 | 70 | ||
53 | start_critical_timings(); | ||
54 | clear_bl_bit(); | ||
55 | set_thread_flag(TIF_POLLING_NRFLAG); | 71 | set_thread_flag(TIF_POLLING_NRFLAG); |
56 | } else | 72 | } else |
57 | while (!need_resched()) | 73 | poll_idle(); |
58 | cpu_relax(); | ||
59 | } | 74 | } |
60 | 75 | ||
76 | /* | ||
77 | * The idle thread. There's no useful work to be done, so just try to conserve | ||
78 | * power and have a low exit latency (ie sit in a loop waiting for somebody to | ||
79 | * say that they'd like to reschedule) | ||
80 | */ | ||
61 | void cpu_idle(void) | 81 | void cpu_idle(void) |
62 | { | 82 | { |
83 | unsigned int cpu = smp_processor_id(); | ||
84 | |||
63 | set_thread_flag(TIF_POLLING_NRFLAG); | 85 | set_thread_flag(TIF_POLLING_NRFLAG); |
64 | 86 | ||
65 | /* endless idle loop with no priority at all */ | 87 | /* endless idle loop with no priority at all */ |
66 | while (1) { | 88 | while (1) { |
67 | void (*idle)(void) = pm_idle; | 89 | tick_nohz_stop_sched_tick(1); |
68 | 90 | ||
69 | if (!idle) | 91 | while (!need_resched() && cpu_online(cpu)) { |
70 | idle = default_idle; | 92 | check_pgt_cache(); |
93 | rmb(); | ||
71 | 94 | ||
72 | tick_nohz_stop_sched_tick(1); | 95 | local_irq_disable(); |
73 | while (!need_resched()) | 96 | /* Don't trace irqs off for idle */ |
74 | idle(); | 97 | stop_critical_timings(); |
75 | tick_nohz_restart_sched_tick(); | 98 | pm_idle(); |
99 | /* | ||
100 | * Sanity check to ensure that pm_idle() returns | ||
101 | * with IRQs enabled | ||
102 | */ | ||
103 | WARN_ON(irqs_disabled()); | ||
104 | start_critical_timings(); | ||
105 | } | ||
76 | 106 | ||
107 | tick_nohz_restart_sched_tick(); | ||
77 | preempt_enable_no_resched(); | 108 | preempt_enable_no_resched(); |
78 | schedule(); | 109 | schedule(); |
79 | preempt_disable(); | 110 | preempt_disable(); |
80 | check_pgt_cache(); | ||
81 | } | 111 | } |
82 | } | 112 | } |
83 | 113 | ||
114 | void __cpuinit select_idle_routine(void) | ||
115 | { | ||
116 | /* | ||
117 | * If a platform has set its own idle routine, leave it alone. | ||
118 | */ | ||
119 | if (pm_idle) | ||
120 | return; | ||
121 | |||
122 | if (hlt_works()) | ||
123 | pm_idle = default_idle; | ||
124 | else | ||
125 | pm_idle = poll_idle; | ||
126 | } | ||
127 | |||
84 | static void do_nothing(void *unused) | 128 | static void do_nothing(void *unused) |
85 | { | 129 | { |
86 | } | 130 | } |
diff --git a/arch/sh/kernel/io_generic.c b/arch/sh/kernel/io_generic.c index b8fa6524760..e1e1dbd1955 100644 --- a/arch/sh/kernel/io_generic.c +++ b/arch/sh/kernel/io_generic.c | |||
@@ -24,7 +24,7 @@ | |||
24 | #define dummy_read() | 24 | #define dummy_read() |
25 | #endif | 25 | #endif |
26 | 26 | ||
27 | unsigned long generic_io_base; | 27 | unsigned long generic_io_base = 0; |
28 | 28 | ||
29 | u8 generic_inb(unsigned long port) | 29 | u8 generic_inb(unsigned long port) |
30 | { | 30 | { |
@@ -147,8 +147,10 @@ void generic_outsl(unsigned long port, const void *src, unsigned long count) | |||
147 | 147 | ||
148 | void __iomem *generic_ioport_map(unsigned long addr, unsigned int size) | 148 | void __iomem *generic_ioport_map(unsigned long addr, unsigned int size) |
149 | { | 149 | { |
150 | #ifdef P1SEG | ||
150 | if (PXSEG(addr) >= P1SEG) | 151 | if (PXSEG(addr) >= P1SEG) |
151 | return (void __iomem *)addr; | 152 | return (void __iomem *)addr; |
153 | #endif | ||
152 | 154 | ||
153 | return (void __iomem *)(addr + generic_io_base); | 155 | return (void __iomem *)(addr + generic_io_base); |
154 | } | 156 | } |
diff --git a/arch/sh/kernel/irq.c b/arch/sh/kernel/irq.c index eac7da772fc..e1913f28f41 100644 --- a/arch/sh/kernel/irq.c +++ b/arch/sh/kernel/irq.c | |||
@@ -37,7 +37,15 @@ void ack_bad_irq(unsigned int irq) | |||
37 | */ | 37 | */ |
38 | static int show_other_interrupts(struct seq_file *p, int prec) | 38 | static int show_other_interrupts(struct seq_file *p, int prec) |
39 | { | 39 | { |
40 | int j; | ||
41 | |||
42 | seq_printf(p, "%*s: ", prec, "NMI"); | ||
43 | for_each_online_cpu(j) | ||
44 | seq_printf(p, "%10u ", irq_stat[j].__nmi_count); | ||
45 | seq_printf(p, " Non-maskable interrupts\n"); | ||
46 | |||
40 | seq_printf(p, "%*s: %10u\n", prec, "ERR", atomic_read(&irq_err_count)); | 47 | seq_printf(p, "%*s: %10u\n", prec, "ERR", atomic_read(&irq_err_count)); |
48 | |||
41 | return 0; | 49 | return 0; |
42 | } | 50 | } |
43 | 51 | ||
@@ -255,6 +263,12 @@ void __init init_IRQ(void) | |||
255 | { | 263 | { |
256 | plat_irq_setup(); | 264 | plat_irq_setup(); |
257 | 265 | ||
266 | /* | ||
267 | * Pin any of the legacy IRQ vectors that haven't already been | ||
268 | * grabbed by the platform | ||
269 | */ | ||
270 | reserve_irq_legacy(); | ||
271 | |||
258 | /* Perform the machine specific initialisation */ | 272 | /* Perform the machine specific initialisation */ |
259 | if (sh_mv.mv_init_irq) | 273 | if (sh_mv.mv_init_irq) |
260 | sh_mv.mv_init_irq(); | 274 | sh_mv.mv_init_irq(); |
diff --git a/arch/sh/kernel/irq_32.c b/arch/sh/kernel/irq_32.c new file mode 100644 index 00000000000..e33ab15831f --- /dev/null +++ b/arch/sh/kernel/irq_32.c | |||
@@ -0,0 +1,57 @@ | |||
1 | /* | ||
2 | * SHcompact irqflags support | ||
3 | * | ||
4 | * Copyright (C) 2006 - 2009 Paul Mundt | ||
5 | * | ||
6 | * This file is subject to the terms and conditions of the GNU General Public | ||
7 | * License. See the file "COPYING" in the main directory of this archive | ||
8 | * for more details. | ||
9 | */ | ||
10 | #include <linux/irqflags.h> | ||
11 | #include <linux/module.h> | ||
12 | |||
13 | void notrace raw_local_irq_restore(unsigned long flags) | ||
14 | { | ||
15 | unsigned long __dummy0, __dummy1; | ||
16 | |||
17 | if (flags == RAW_IRQ_DISABLED) { | ||
18 | __asm__ __volatile__ ( | ||
19 | "stc sr, %0\n\t" | ||
20 | "or #0xf0, %0\n\t" | ||
21 | "ldc %0, sr\n\t" | ||
22 | : "=&z" (__dummy0) | ||
23 | : /* no inputs */ | ||
24 | : "memory" | ||
25 | ); | ||
26 | } else { | ||
27 | __asm__ __volatile__ ( | ||
28 | "stc sr, %0\n\t" | ||
29 | "and %1, %0\n\t" | ||
30 | #ifdef CONFIG_CPU_HAS_SR_RB | ||
31 | "stc r6_bank, %1\n\t" | ||
32 | "or %1, %0\n\t" | ||
33 | #endif | ||
34 | "ldc %0, sr\n\t" | ||
35 | : "=&r" (__dummy0), "=r" (__dummy1) | ||
36 | : "1" (~RAW_IRQ_DISABLED) | ||
37 | : "memory" | ||
38 | ); | ||
39 | } | ||
40 | } | ||
41 | EXPORT_SYMBOL(raw_local_irq_restore); | ||
42 | |||
43 | unsigned long notrace __raw_local_save_flags(void) | ||
44 | { | ||
45 | unsigned long flags; | ||
46 | |||
47 | __asm__ __volatile__ ( | ||
48 | "stc sr, %0\n\t" | ||
49 | "and #0xf0, %0\n\t" | ||
50 | : "=&z" (flags) | ||
51 | : /* no inputs */ | ||
52 | : "memory" | ||
53 | ); | ||
54 | |||
55 | return flags; | ||
56 | } | ||
57 | EXPORT_SYMBOL(__raw_local_save_flags); | ||
diff --git a/arch/sh/kernel/irq_64.c b/arch/sh/kernel/irq_64.c new file mode 100644 index 00000000000..32365ba0e03 --- /dev/null +++ b/arch/sh/kernel/irq_64.c | |||
@@ -0,0 +1,51 @@ | |||
1 | /* | ||
2 | * SHmedia irqflags support | ||
3 | * | ||
4 | * Copyright (C) 2006 - 2009 Paul Mundt | ||
5 | * | ||
6 | * This file is subject to the terms and conditions of the GNU General Public | ||
7 | * License. See the file "COPYING" in the main directory of this archive | ||
8 | * for more details. | ||
9 | */ | ||
10 | #include <linux/irqflags.h> | ||
11 | #include <linux/module.h> | ||
12 | #include <cpu/registers.h> | ||
13 | |||
14 | void notrace raw_local_irq_restore(unsigned long flags) | ||
15 | { | ||
16 | unsigned long long __dummy; | ||
17 | |||
18 | if (flags == RAW_IRQ_DISABLED) { | ||
19 | __asm__ __volatile__ ( | ||
20 | "getcon " __SR ", %0\n\t" | ||
21 | "or %0, %1, %0\n\t" | ||
22 | "putcon %0, " __SR "\n\t" | ||
23 | : "=&r" (__dummy) | ||
24 | : "r" (RAW_IRQ_DISABLED) | ||
25 | ); | ||
26 | } else { | ||
27 | __asm__ __volatile__ ( | ||
28 | "getcon " __SR ", %0\n\t" | ||
29 | "and %0, %1, %0\n\t" | ||
30 | "putcon %0, " __SR "\n\t" | ||
31 | : "=&r" (__dummy) | ||
32 | : "r" (~RAW_IRQ_DISABLED) | ||
33 | ); | ||
34 | } | ||
35 | } | ||
36 | EXPORT_SYMBOL(raw_local_irq_restore); | ||
37 | |||
38 | unsigned long notrace __raw_local_save_flags(void) | ||
39 | { | ||
40 | unsigned long flags; | ||
41 | |||
42 | __asm__ __volatile__ ( | ||
43 | "getcon " __SR ", %0\n\t" | ||
44 | "and %0, %1, %0" | ||
45 | : "=&r" (flags) | ||
46 | : "r" (RAW_IRQ_DISABLED) | ||
47 | ); | ||
48 | |||
49 | return flags; | ||
50 | } | ||
51 | EXPORT_SYMBOL(__raw_local_save_flags); | ||
diff --git a/arch/sh/kernel/machine_kexec.c b/arch/sh/kernel/machine_kexec.c index 7ea2704ea03..76f280223eb 100644 --- a/arch/sh/kernel/machine_kexec.c +++ b/arch/sh/kernel/machine_kexec.c | |||
@@ -46,12 +46,6 @@ void machine_crash_shutdown(struct pt_regs *regs) | |||
46 | */ | 46 | */ |
47 | int machine_kexec_prepare(struct kimage *image) | 47 | int machine_kexec_prepare(struct kimage *image) |
48 | { | 48 | { |
49 | /* older versions of kexec-tools are passing | ||
50 | * the zImage entry point as a virtual address. | ||
51 | */ | ||
52 | if (image->start != PHYSADDR(image->start)) | ||
53 | return -EINVAL; /* upgrade your kexec-tools */ | ||
54 | |||
55 | return 0; | 49 | return 0; |
56 | } | 50 | } |
57 | 51 | ||
diff --git a/arch/sh/kernel/machvec.c b/arch/sh/kernel/machvec.c index cbce639b108..1652340ba3f 100644 --- a/arch/sh/kernel/machvec.c +++ b/arch/sh/kernel/machvec.c | |||
@@ -135,5 +135,9 @@ void __init sh_mv_setup(void) | |||
135 | if (!sh_mv.mv_nr_irqs) | 135 | if (!sh_mv.mv_nr_irqs) |
136 | sh_mv.mv_nr_irqs = NR_IRQS; | 136 | sh_mv.mv_nr_irqs = NR_IRQS; |
137 | 137 | ||
138 | #ifdef P2SEG | ||
138 | __set_io_port_base(P2SEG); | 139 | __set_io_port_base(P2SEG); |
140 | #else | ||
141 | __set_io_port_base(0); | ||
142 | #endif | ||
139 | } | 143 | } |
diff --git a/arch/sh/kernel/module.c b/arch/sh/kernel/module.c index c2efdcde266..43adddfe4c0 100644 --- a/arch/sh/kernel/module.c +++ b/arch/sh/kernel/module.c | |||
@@ -32,6 +32,7 @@ | |||
32 | #include <linux/string.h> | 32 | #include <linux/string.h> |
33 | #include <linux/kernel.h> | 33 | #include <linux/kernel.h> |
34 | #include <asm/unaligned.h> | 34 | #include <asm/unaligned.h> |
35 | #include <asm/dwarf.h> | ||
35 | 36 | ||
36 | void *module_alloc(unsigned long size) | 37 | void *module_alloc(unsigned long size) |
37 | { | 38 | { |
@@ -145,10 +146,16 @@ int module_finalize(const Elf_Ehdr *hdr, | |||
145 | const Elf_Shdr *sechdrs, | 146 | const Elf_Shdr *sechdrs, |
146 | struct module *me) | 147 | struct module *me) |
147 | { | 148 | { |
148 | return module_bug_finalize(hdr, sechdrs, me); | 149 | int ret = 0; |
150 | |||
151 | ret |= module_dwarf_finalize(hdr, sechdrs, me); | ||
152 | ret |= module_bug_finalize(hdr, sechdrs, me); | ||
153 | |||
154 | return ret; | ||
149 | } | 155 | } |
150 | 156 | ||
151 | void module_arch_cleanup(struct module *mod) | 157 | void module_arch_cleanup(struct module *mod) |
152 | { | 158 | { |
153 | module_bug_cleanup(mod); | 159 | module_bug_cleanup(mod); |
160 | module_dwarf_cleanup(mod); | ||
154 | } | 161 | } |
diff --git a/arch/sh/kernel/perf_callchain.c b/arch/sh/kernel/perf_callchain.c new file mode 100644 index 00000000000..24ea837eac5 --- /dev/null +++ b/arch/sh/kernel/perf_callchain.c | |||
@@ -0,0 +1,98 @@ | |||
1 | /* | ||
2 | * Performance event callchain support - SuperH architecture code | ||
3 | * | ||
4 | * Copyright (C) 2009 Paul Mundt | ||
5 | * | ||
6 | * This file is subject to the terms and conditions of the GNU General Public | ||
7 | * License. See the file "COPYING" in the main directory of this archive | ||
8 | * for more details. | ||
9 | */ | ||
10 | #include <linux/kernel.h> | ||
11 | #include <linux/sched.h> | ||
12 | #include <linux/perf_event.h> | ||
13 | #include <linux/percpu.h> | ||
14 | #include <asm/unwinder.h> | ||
15 | #include <asm/ptrace.h> | ||
16 | |||
17 | static inline void callchain_store(struct perf_callchain_entry *entry, u64 ip) | ||
18 | { | ||
19 | if (entry->nr < PERF_MAX_STACK_DEPTH) | ||
20 | entry->ip[entry->nr++] = ip; | ||
21 | } | ||
22 | |||
23 | static void callchain_warning(void *data, char *msg) | ||
24 | { | ||
25 | } | ||
26 | |||
27 | static void | ||
28 | callchain_warning_symbol(void *data, char *msg, unsigned long symbol) | ||
29 | { | ||
30 | } | ||
31 | |||
32 | static int callchain_stack(void *data, char *name) | ||
33 | { | ||
34 | return 0; | ||
35 | } | ||
36 | |||
37 | static void callchain_address(void *data, unsigned long addr, int reliable) | ||
38 | { | ||
39 | struct perf_callchain_entry *entry = data; | ||
40 | |||
41 | if (reliable) | ||
42 | callchain_store(entry, addr); | ||
43 | } | ||
44 | |||
45 | static const struct stacktrace_ops callchain_ops = { | ||
46 | .warning = callchain_warning, | ||
47 | .warning_symbol = callchain_warning_symbol, | ||
48 | .stack = callchain_stack, | ||
49 | .address = callchain_address, | ||
50 | }; | ||
51 | |||
52 | static void | ||
53 | perf_callchain_kernel(struct pt_regs *regs, struct perf_callchain_entry *entry) | ||
54 | { | ||
55 | callchain_store(entry, PERF_CONTEXT_KERNEL); | ||
56 | callchain_store(entry, regs->pc); | ||
57 | |||
58 | unwind_stack(NULL, regs, NULL, &callchain_ops, entry); | ||
59 | } | ||
60 | |||
61 | static void | ||
62 | perf_do_callchain(struct pt_regs *regs, struct perf_callchain_entry *entry) | ||
63 | { | ||
64 | int is_user; | ||
65 | |||
66 | if (!regs) | ||
67 | return; | ||
68 | |||
69 | is_user = user_mode(regs); | ||
70 | |||
71 | if (!current || current->pid == 0) | ||
72 | return; | ||
73 | |||
74 | if (is_user && current->state != TASK_RUNNING) | ||
75 | return; | ||
76 | |||
77 | /* | ||
78 | * Only the kernel side is implemented for now. | ||
79 | */ | ||
80 | if (!is_user) | ||
81 | perf_callchain_kernel(regs, entry); | ||
82 | } | ||
83 | |||
84 | /* | ||
85 | * No need for separate IRQ and NMI entries. | ||
86 | */ | ||
87 | static DEFINE_PER_CPU(struct perf_callchain_entry, callchain); | ||
88 | |||
89 | struct perf_callchain_entry *perf_callchain(struct pt_regs *regs) | ||
90 | { | ||
91 | struct perf_callchain_entry *entry = &__get_cpu_var(callchain); | ||
92 | |||
93 | entry->nr = 0; | ||
94 | |||
95 | perf_do_callchain(regs, entry); | ||
96 | |||
97 | return entry; | ||
98 | } | ||
diff --git a/arch/sh/kernel/perf_event.c b/arch/sh/kernel/perf_event.c new file mode 100644 index 00000000000..7ff0943e7a0 --- /dev/null +++ b/arch/sh/kernel/perf_event.c | |||
@@ -0,0 +1,312 @@ | |||
1 | /* | ||
2 | * Performance event support framework for SuperH hardware counters. | ||
3 | * | ||
4 | * Copyright (C) 2009 Paul Mundt | ||
5 | * | ||
6 | * Heavily based on the x86 and PowerPC implementations. | ||
7 | * | ||
8 | * x86: | ||
9 | * Copyright (C) 2008 Thomas Gleixner <tglx@linutronix.de> | ||
10 | * Copyright (C) 2008-2009 Red Hat, Inc., Ingo Molnar | ||
11 | * Copyright (C) 2009 Jaswinder Singh Rajput | ||
12 | * Copyright (C) 2009 Advanced Micro Devices, Inc., Robert Richter | ||
13 | * Copyright (C) 2008-2009 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com> | ||
14 | * Copyright (C) 2009 Intel Corporation, <markus.t.metzger@intel.com> | ||
15 | * | ||
16 | * ppc: | ||
17 | * Copyright 2008-2009 Paul Mackerras, IBM Corporation. | ||
18 | * | ||
19 | * This file is subject to the terms and conditions of the GNU General Public | ||
20 | * License. See the file "COPYING" in the main directory of this archive | ||
21 | * for more details. | ||
22 | */ | ||
23 | #include <linux/kernel.h> | ||
24 | #include <linux/init.h> | ||
25 | #include <linux/io.h> | ||
26 | #include <linux/irq.h> | ||
27 | #include <linux/perf_event.h> | ||
28 | #include <asm/processor.h> | ||
29 | |||
30 | struct cpu_hw_events { | ||
31 | struct perf_event *events[MAX_HWEVENTS]; | ||
32 | unsigned long used_mask[BITS_TO_LONGS(MAX_HWEVENTS)]; | ||
33 | unsigned long active_mask[BITS_TO_LONGS(MAX_HWEVENTS)]; | ||
34 | }; | ||
35 | |||
36 | DEFINE_PER_CPU(struct cpu_hw_events, cpu_hw_events); | ||
37 | |||
38 | static struct sh_pmu *sh_pmu __read_mostly; | ||
39 | |||
40 | /* Number of perf_events counting hardware events */ | ||
41 | static atomic_t num_events; | ||
42 | /* Used to avoid races in calling reserve/release_pmc_hardware */ | ||
43 | static DEFINE_MUTEX(pmc_reserve_mutex); | ||
44 | |||
45 | /* | ||
46 | * Stub these out for now, do something more profound later. | ||
47 | */ | ||
48 | int reserve_pmc_hardware(void) | ||
49 | { | ||
50 | return 0; | ||
51 | } | ||
52 | |||
53 | void release_pmc_hardware(void) | ||
54 | { | ||
55 | } | ||
56 | |||
57 | static inline int sh_pmu_initialized(void) | ||
58 | { | ||
59 | return !!sh_pmu; | ||
60 | } | ||
61 | |||
62 | /* | ||
63 | * Release the PMU if this is the last perf_event. | ||
64 | */ | ||
65 | static void hw_perf_event_destroy(struct perf_event *event) | ||
66 | { | ||
67 | if (!atomic_add_unless(&num_events, -1, 1)) { | ||
68 | mutex_lock(&pmc_reserve_mutex); | ||
69 | if (atomic_dec_return(&num_events) == 0) | ||
70 | release_pmc_hardware(); | ||
71 | mutex_unlock(&pmc_reserve_mutex); | ||
72 | } | ||
73 | } | ||
74 | |||
75 | static int hw_perf_cache_event(int config, int *evp) | ||
76 | { | ||
77 | unsigned long type, op, result; | ||
78 | int ev; | ||
79 | |||
80 | if (!sh_pmu->cache_events) | ||
81 | return -EINVAL; | ||
82 | |||
83 | /* unpack config */ | ||
84 | type = config & 0xff; | ||
85 | op = (config >> 8) & 0xff; | ||
86 | result = (config >> 16) & 0xff; | ||
87 | |||
88 | if (type >= PERF_COUNT_HW_CACHE_MAX || | ||
89 | op >= PERF_COUNT_HW_CACHE_OP_MAX || | ||
90 | result >= PERF_COUNT_HW_CACHE_RESULT_MAX) | ||
91 | return -EINVAL; | ||
92 | |||
93 | ev = (*sh_pmu->cache_events)[type][op][result]; | ||
94 | if (ev == 0) | ||
95 | return -EOPNOTSUPP; | ||
96 | if (ev == -1) | ||
97 | return -EINVAL; | ||
98 | *evp = ev; | ||
99 | return 0; | ||
100 | } | ||
101 | |||
102 | static int __hw_perf_event_init(struct perf_event *event) | ||
103 | { | ||
104 | struct perf_event_attr *attr = &event->attr; | ||
105 | struct hw_perf_event *hwc = &event->hw; | ||
106 | int config = -1; | ||
107 | int err; | ||
108 | |||
109 | if (!sh_pmu_initialized()) | ||
110 | return -ENODEV; | ||
111 | |||
112 | /* | ||
113 | * All of the on-chip counters are "limited", in that they have | ||
114 | * no interrupts, and are therefore unable to do sampling without | ||
115 | * further work and timer assistance. | ||
116 | */ | ||
117 | if (hwc->sample_period) | ||
118 | return -EINVAL; | ||
119 | |||
120 | /* | ||
121 | * See if we need to reserve the counter. | ||
122 | * | ||
123 | * If no events are currently in use, then we have to take a | ||
124 | * mutex to ensure that we don't race with another task doing | ||
125 | * reserve_pmc_hardware or release_pmc_hardware. | ||
126 | */ | ||
127 | err = 0; | ||
128 | if (!atomic_inc_not_zero(&num_events)) { | ||
129 | mutex_lock(&pmc_reserve_mutex); | ||
130 | if (atomic_read(&num_events) == 0 && | ||
131 | reserve_pmc_hardware()) | ||
132 | err = -EBUSY; | ||
133 | else | ||
134 | atomic_inc(&num_events); | ||
135 | mutex_unlock(&pmc_reserve_mutex); | ||
136 | } | ||
137 | |||
138 | if (err) | ||
139 | return err; | ||
140 | |||
141 | event->destroy = hw_perf_event_destroy; | ||
142 | |||
143 | switch (attr->type) { | ||
144 | case PERF_TYPE_RAW: | ||
145 | config = attr->config & sh_pmu->raw_event_mask; | ||
146 | break; | ||
147 | case PERF_TYPE_HW_CACHE: | ||
148 | err = hw_perf_cache_event(attr->config, &config); | ||
149 | if (err) | ||
150 | return err; | ||
151 | break; | ||
152 | case PERF_TYPE_HARDWARE: | ||
153 | if (attr->config >= sh_pmu->max_events) | ||
154 | return -EINVAL; | ||
155 | |||
156 | config = sh_pmu->event_map(attr->config); | ||
157 | break; | ||
158 | } | ||
159 | |||
160 | if (config == -1) | ||
161 | return -EINVAL; | ||
162 | |||
163 | hwc->config |= config; | ||
164 | |||
165 | return 0; | ||
166 | } | ||
167 | |||
168 | static void sh_perf_event_update(struct perf_event *event, | ||
169 | struct hw_perf_event *hwc, int idx) | ||
170 | { | ||
171 | u64 prev_raw_count, new_raw_count; | ||
172 | s64 delta; | ||
173 | int shift = 0; | ||
174 | |||
175 | /* | ||
176 | * Depending on the counter configuration, they may or may not | ||
177 | * be chained, in which case the previous counter value can be | ||
178 | * updated underneath us if the lower-half overflows. | ||
179 | * | ||
180 | * Our tactic to handle this is to first atomically read and | ||
181 | * exchange a new raw count - then add that new-prev delta | ||
182 | * count to the generic counter atomically. | ||
183 | * | ||
184 | * As there is no interrupt associated with the overflow events, | ||
185 | * this is the simplest approach for maintaining consistency. | ||
186 | */ | ||
187 | again: | ||
188 | prev_raw_count = atomic64_read(&hwc->prev_count); | ||
189 | new_raw_count = sh_pmu->read(idx); | ||
190 | |||
191 | if (atomic64_cmpxchg(&hwc->prev_count, prev_raw_count, | ||
192 | new_raw_count) != prev_raw_count) | ||
193 | goto again; | ||
194 | |||
195 | /* | ||
196 | * Now we have the new raw value and have updated the prev | ||
197 | * timestamp already. We can now calculate the elapsed delta | ||
198 | * (counter-)time and add that to the generic counter. | ||
199 | * | ||
200 | * Careful, not all hw sign-extends above the physical width | ||
201 | * of the count. | ||
202 | */ | ||
203 | delta = (new_raw_count << shift) - (prev_raw_count << shift); | ||
204 | delta >>= shift; | ||
205 | |||
206 | atomic64_add(delta, &event->count); | ||
207 | } | ||
208 | |||
209 | static void sh_pmu_disable(struct perf_event *event) | ||
210 | { | ||
211 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); | ||
212 | struct hw_perf_event *hwc = &event->hw; | ||
213 | int idx = hwc->idx; | ||
214 | |||
215 | clear_bit(idx, cpuc->active_mask); | ||
216 | sh_pmu->disable(hwc, idx); | ||
217 | |||
218 | barrier(); | ||
219 | |||
220 | sh_perf_event_update(event, &event->hw, idx); | ||
221 | |||
222 | cpuc->events[idx] = NULL; | ||
223 | clear_bit(idx, cpuc->used_mask); | ||
224 | |||
225 | perf_event_update_userpage(event); | ||
226 | } | ||
227 | |||
228 | static int sh_pmu_enable(struct perf_event *event) | ||
229 | { | ||
230 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); | ||
231 | struct hw_perf_event *hwc = &event->hw; | ||
232 | int idx = hwc->idx; | ||
233 | |||
234 | if (test_and_set_bit(idx, cpuc->used_mask)) { | ||
235 | idx = find_first_zero_bit(cpuc->used_mask, sh_pmu->num_events); | ||
236 | if (idx == sh_pmu->num_events) | ||
237 | return -EAGAIN; | ||
238 | |||
239 | set_bit(idx, cpuc->used_mask); | ||
240 | hwc->idx = idx; | ||
241 | } | ||
242 | |||
243 | sh_pmu->disable(hwc, idx); | ||
244 | |||
245 | cpuc->events[idx] = event; | ||
246 | set_bit(idx, cpuc->active_mask); | ||
247 | |||
248 | sh_pmu->enable(hwc, idx); | ||
249 | |||
250 | perf_event_update_userpage(event); | ||
251 | |||
252 | return 0; | ||
253 | } | ||
254 | |||
255 | static void sh_pmu_read(struct perf_event *event) | ||
256 | { | ||
257 | sh_perf_event_update(event, &event->hw, event->hw.idx); | ||
258 | } | ||
259 | |||
260 | static const struct pmu pmu = { | ||
261 | .enable = sh_pmu_enable, | ||
262 | .disable = sh_pmu_disable, | ||
263 | .read = sh_pmu_read, | ||
264 | }; | ||
265 | |||
266 | const struct pmu *hw_perf_event_init(struct perf_event *event) | ||
267 | { | ||
268 | int err = __hw_perf_event_init(event); | ||
269 | if (unlikely(err)) { | ||
270 | if (event->destroy) | ||
271 | event->destroy(event); | ||
272 | return ERR_PTR(err); | ||
273 | } | ||
274 | |||
275 | return &pmu; | ||
276 | } | ||
277 | |||
278 | void hw_perf_event_setup(int cpu) | ||
279 | { | ||
280 | struct cpu_hw_events *cpuhw = &per_cpu(cpu_hw_events, cpu); | ||
281 | |||
282 | memset(cpuhw, 0, sizeof(struct cpu_hw_events)); | ||
283 | } | ||
284 | |||
285 | void hw_perf_enable(void) | ||
286 | { | ||
287 | if (!sh_pmu_initialized()) | ||
288 | return; | ||
289 | |||
290 | sh_pmu->enable_all(); | ||
291 | } | ||
292 | |||
293 | void hw_perf_disable(void) | ||
294 | { | ||
295 | if (!sh_pmu_initialized()) | ||
296 | return; | ||
297 | |||
298 | sh_pmu->disable_all(); | ||
299 | } | ||
300 | |||
301 | int register_sh_pmu(struct sh_pmu *pmu) | ||
302 | { | ||
303 | if (sh_pmu) | ||
304 | return -EBUSY; | ||
305 | sh_pmu = pmu; | ||
306 | |||
307 | pr_info("Performance Events: %s support registered\n", pmu->name); | ||
308 | |||
309 | WARN_ON(pmu->num_events > MAX_HWEVENTS); | ||
310 | |||
311 | return 0; | ||
312 | } | ||
diff --git a/arch/sh/kernel/process_32.c b/arch/sh/kernel/process_32.c index 0673c4746be..d8af889366a 100644 --- a/arch/sh/kernel/process_32.c +++ b/arch/sh/kernel/process_32.c | |||
@@ -134,7 +134,10 @@ int kernel_thread(int (*fn)(void *), void * arg, unsigned long flags) | |||
134 | regs.regs[5] = (unsigned long)fn; | 134 | regs.regs[5] = (unsigned long)fn; |
135 | 135 | ||
136 | regs.pc = (unsigned long)kernel_thread_helper; | 136 | regs.pc = (unsigned long)kernel_thread_helper; |
137 | regs.sr = (1 << 30); | 137 | regs.sr = SR_MD; |
138 | #if defined(CONFIG_SH_FPU) | ||
139 | regs.sr |= SR_FD; | ||
140 | #endif | ||
138 | 141 | ||
139 | /* Ok, create the new process.. */ | 142 | /* Ok, create the new process.. */ |
140 | pid = do_fork(flags | CLONE_VM | CLONE_UNTRACED, 0, | 143 | pid = do_fork(flags | CLONE_VM | CLONE_UNTRACED, 0, |
@@ -142,6 +145,7 @@ int kernel_thread(int (*fn)(void *), void * arg, unsigned long flags) | |||
142 | 145 | ||
143 | return pid; | 146 | return pid; |
144 | } | 147 | } |
148 | EXPORT_SYMBOL(kernel_thread); | ||
145 | 149 | ||
146 | /* | 150 | /* |
147 | * Free current thread data structures etc.. | 151 | * Free current thread data structures etc.. |
@@ -186,6 +190,16 @@ int dump_fpu(struct pt_regs *regs, elf_fpregset_t *fpu) | |||
186 | 190 | ||
187 | return fpvalid; | 191 | return fpvalid; |
188 | } | 192 | } |
193 | EXPORT_SYMBOL(dump_fpu); | ||
194 | |||
195 | /* | ||
196 | * This gets called before we allocate a new thread and copy | ||
197 | * the current task into it. | ||
198 | */ | ||
199 | void prepare_to_copy(struct task_struct *tsk) | ||
200 | { | ||
201 | unlazy_fpu(tsk, task_pt_regs(tsk)); | ||
202 | } | ||
189 | 203 | ||
190 | asmlinkage void ret_from_fork(void); | 204 | asmlinkage void ret_from_fork(void); |
191 | 205 | ||
@@ -195,16 +209,10 @@ int copy_thread(unsigned long clone_flags, unsigned long usp, | |||
195 | { | 209 | { |
196 | struct thread_info *ti = task_thread_info(p); | 210 | struct thread_info *ti = task_thread_info(p); |
197 | struct pt_regs *childregs; | 211 | struct pt_regs *childregs; |
198 | #if defined(CONFIG_SH_FPU) || defined(CONFIG_SH_DSP) | 212 | #if defined(CONFIG_SH_DSP) |
199 | struct task_struct *tsk = current; | 213 | struct task_struct *tsk = current; |
200 | #endif | 214 | #endif |
201 | 215 | ||
202 | #if defined(CONFIG_SH_FPU) | ||
203 | unlazy_fpu(tsk, regs); | ||
204 | p->thread.fpu = tsk->thread.fpu; | ||
205 | copy_to_stopped_child_used_math(p); | ||
206 | #endif | ||
207 | |||
208 | #if defined(CONFIG_SH_DSP) | 216 | #if defined(CONFIG_SH_DSP) |
209 | if (is_dsp_enabled(tsk)) { | 217 | if (is_dsp_enabled(tsk)) { |
210 | /* We can use the __save_dsp or just copy the struct: | 218 | /* We can use the __save_dsp or just copy the struct: |
@@ -224,6 +232,8 @@ int copy_thread(unsigned long clone_flags, unsigned long usp, | |||
224 | } else { | 232 | } else { |
225 | childregs->regs[15] = (unsigned long)childregs; | 233 | childregs->regs[15] = (unsigned long)childregs; |
226 | ti->addr_limit = KERNEL_DS; | 234 | ti->addr_limit = KERNEL_DS; |
235 | ti->status &= ~TS_USEDFPU; | ||
236 | p->fpu_counter = 0; | ||
227 | } | 237 | } |
228 | 238 | ||
229 | if (clone_flags & CLONE_SETTLS) | 239 | if (clone_flags & CLONE_SETTLS) |
@@ -288,9 +298,13 @@ static void ubc_set_tracing(int asid, unsigned long pc) | |||
288 | __notrace_funcgraph struct task_struct * | 298 | __notrace_funcgraph struct task_struct * |
289 | __switch_to(struct task_struct *prev, struct task_struct *next) | 299 | __switch_to(struct task_struct *prev, struct task_struct *next) |
290 | { | 300 | { |
291 | #if defined(CONFIG_SH_FPU) | 301 | struct thread_struct *next_t = &next->thread; |
302 | |||
292 | unlazy_fpu(prev, task_pt_regs(prev)); | 303 | unlazy_fpu(prev, task_pt_regs(prev)); |
293 | #endif | 304 | |
305 | /* we're going to use this soon, after a few expensive things */ | ||
306 | if (next->fpu_counter > 5) | ||
307 | prefetch(&next_t->fpu.hard); | ||
294 | 308 | ||
295 | #ifdef CONFIG_MMU | 309 | #ifdef CONFIG_MMU |
296 | /* | 310 | /* |
@@ -321,6 +335,14 @@ __switch_to(struct task_struct *prev, struct task_struct *next) | |||
321 | #endif | 335 | #endif |
322 | } | 336 | } |
323 | 337 | ||
338 | /* | ||
339 | * If the task has used fpu the last 5 timeslices, just do a full | ||
340 | * restore of the math state immediately to avoid the trap; the | ||
341 | * chances of needing FPU soon are obviously high now | ||
342 | */ | ||
343 | if (next->fpu_counter > 5) | ||
344 | fpu_state_restore(task_pt_regs(next)); | ||
345 | |||
324 | return prev; | 346 | return prev; |
325 | } | 347 | } |
326 | 348 | ||
diff --git a/arch/sh/kernel/process_64.c b/arch/sh/kernel/process_64.c index 1192398ef58..359b8a2f4d2 100644 --- a/arch/sh/kernel/process_64.c +++ b/arch/sh/kernel/process_64.c | |||
@@ -335,6 +335,7 @@ int kernel_thread(int (*fn)(void *), void * arg, unsigned long flags) | |||
335 | return do_fork(flags | CLONE_VM | CLONE_UNTRACED, 0, | 335 | return do_fork(flags | CLONE_VM | CLONE_UNTRACED, 0, |
336 | ®s, 0, NULL, NULL); | 336 | ®s, 0, NULL, NULL); |
337 | } | 337 | } |
338 | EXPORT_SYMBOL(kernel_thread); | ||
338 | 339 | ||
339 | /* | 340 | /* |
340 | * Free current thread data structures etc.. | 341 | * Free current thread data structures etc.. |
@@ -417,6 +418,7 @@ int dump_fpu(struct pt_regs *regs, elf_fpregset_t *fpu) | |||
417 | return 0; /* Task didn't use the fpu at all. */ | 418 | return 0; /* Task didn't use the fpu at all. */ |
418 | #endif | 419 | #endif |
419 | } | 420 | } |
421 | EXPORT_SYMBOL(dump_fpu); | ||
420 | 422 | ||
421 | asmlinkage void ret_from_fork(void); | 423 | asmlinkage void ret_from_fork(void); |
422 | 424 | ||
diff --git a/arch/sh/kernel/return_address.c b/arch/sh/kernel/return_address.c new file mode 100644 index 00000000000..df3ab581107 --- /dev/null +++ b/arch/sh/kernel/return_address.c | |||
@@ -0,0 +1,54 @@ | |||
1 | /* | ||
2 | * arch/sh/kernel/return_address.c | ||
3 | * | ||
4 | * Copyright (C) 2009 Matt Fleming | ||
5 | * Copyright (C) 2009 Paul Mundt | ||
6 | * | ||
7 | * This file is subject to the terms and conditions of the GNU General Public | ||
8 | * License. See the file "COPYING" in the main directory of this archive | ||
9 | * for more details. | ||
10 | */ | ||
11 | #include <linux/kernel.h> | ||
12 | #include <asm/dwarf.h> | ||
13 | |||
14 | #ifdef CONFIG_DWARF_UNWINDER | ||
15 | |||
16 | void *return_address(unsigned int depth) | ||
17 | { | ||
18 | struct dwarf_frame *frame; | ||
19 | unsigned long ra; | ||
20 | int i; | ||
21 | |||
22 | for (i = 0, frame = NULL, ra = 0; i <= depth; i++) { | ||
23 | struct dwarf_frame *tmp; | ||
24 | |||
25 | tmp = dwarf_unwind_stack(ra, frame); | ||
26 | |||
27 | if (frame) | ||
28 | dwarf_free_frame(frame); | ||
29 | |||
30 | frame = tmp; | ||
31 | |||
32 | if (!frame || !frame->return_addr) | ||
33 | break; | ||
34 | |||
35 | ra = frame->return_addr; | ||
36 | } | ||
37 | |||
38 | /* Failed to unwind the stack to the specified depth. */ | ||
39 | WARN_ON(i != depth + 1); | ||
40 | |||
41 | if (frame) | ||
42 | dwarf_free_frame(frame); | ||
43 | |||
44 | return (void *)ra; | ||
45 | } | ||
46 | |||
47 | #else | ||
48 | |||
49 | void *return_address(unsigned int depth) | ||
50 | { | ||
51 | return NULL; | ||
52 | } | ||
53 | |||
54 | #endif | ||
diff --git a/arch/sh/kernel/setup.c b/arch/sh/kernel/setup.c index 99b4fb553bf..5a947a2567e 100644 --- a/arch/sh/kernel/setup.c +++ b/arch/sh/kernel/setup.c | |||
@@ -453,6 +453,10 @@ void __init setup_arch(char **cmdline_p) | |||
453 | 453 | ||
454 | paging_init(); | 454 | paging_init(); |
455 | 455 | ||
456 | #ifdef CONFIG_PMB_ENABLE | ||
457 | pmb_init(); | ||
458 | #endif | ||
459 | |||
456 | #ifdef CONFIG_SMP | 460 | #ifdef CONFIG_SMP |
457 | plat_smp_setup(); | 461 | plat_smp_setup(); |
458 | #endif | 462 | #endif |
diff --git a/arch/sh/kernel/sh_ksyms_32.c b/arch/sh/kernel/sh_ksyms_32.c index 444cce3ae92..3896f26efa4 100644 --- a/arch/sh/kernel/sh_ksyms_32.c +++ b/arch/sh/kernel/sh_ksyms_32.c | |||
@@ -1,37 +1,11 @@ | |||
1 | #include <linux/module.h> | 1 | #include <linux/module.h> |
2 | #include <linux/smp.h> | 2 | #include <linux/string.h> |
3 | #include <linux/user.h> | 3 | #include <linux/uaccess.h> |
4 | #include <linux/elfcore.h> | 4 | #include <linux/delay.h> |
5 | #include <linux/sched.h> | 5 | #include <linux/mm.h> |
6 | #include <linux/in6.h> | ||
7 | #include <linux/interrupt.h> | ||
8 | #include <linux/vmalloc.h> | ||
9 | #include <linux/pci.h> | ||
10 | #include <linux/irq.h> | ||
11 | #include <asm/sections.h> | ||
12 | #include <asm/processor.h> | ||
13 | #include <asm/uaccess.h> | ||
14 | #include <asm/checksum.h> | 6 | #include <asm/checksum.h> |
15 | #include <asm/io.h> | 7 | #include <asm/sections.h> |
16 | #include <asm/delay.h> | ||
17 | #include <asm/tlbflush.h> | ||
18 | #include <asm/cacheflush.h> | ||
19 | #include <asm/ftrace.h> | ||
20 | |||
21 | extern int dump_fpu(struct pt_regs *, elf_fpregset_t *); | ||
22 | |||
23 | /* platform dependent support */ | ||
24 | EXPORT_SYMBOL(dump_fpu); | ||
25 | EXPORT_SYMBOL(kernel_thread); | ||
26 | EXPORT_SYMBOL(strlen); | ||
27 | |||
28 | /* PCI exports */ | ||
29 | #ifdef CONFIG_PCI | ||
30 | EXPORT_SYMBOL(pci_alloc_consistent); | ||
31 | EXPORT_SYMBOL(pci_free_consistent); | ||
32 | #endif | ||
33 | 8 | ||
34 | /* mem exports */ | ||
35 | EXPORT_SYMBOL(memchr); | 9 | EXPORT_SYMBOL(memchr); |
36 | EXPORT_SYMBOL(memcpy); | 10 | EXPORT_SYMBOL(memcpy); |
37 | EXPORT_SYMBOL(memset); | 11 | EXPORT_SYMBOL(memset); |
@@ -40,6 +14,13 @@ EXPORT_SYMBOL(__copy_user); | |||
40 | EXPORT_SYMBOL(__udelay); | 14 | EXPORT_SYMBOL(__udelay); |
41 | EXPORT_SYMBOL(__ndelay); | 15 | EXPORT_SYMBOL(__ndelay); |
42 | EXPORT_SYMBOL(__const_udelay); | 16 | EXPORT_SYMBOL(__const_udelay); |
17 | EXPORT_SYMBOL(strlen); | ||
18 | EXPORT_SYMBOL(csum_partial); | ||
19 | EXPORT_SYMBOL(csum_partial_copy_generic); | ||
20 | EXPORT_SYMBOL(copy_page); | ||
21 | EXPORT_SYMBOL(__clear_user); | ||
22 | EXPORT_SYMBOL(_ebss); | ||
23 | EXPORT_SYMBOL(empty_zero_page); | ||
43 | 24 | ||
44 | #define DECLARE_EXPORT(name) \ | 25 | #define DECLARE_EXPORT(name) \ |
45 | extern void name(void);EXPORT_SYMBOL(name) | 26 | extern void name(void);EXPORT_SYMBOL(name) |
@@ -107,30 +88,6 @@ DECLARE_EXPORT(__sdivsi3_i4); | |||
107 | DECLARE_EXPORT(__udivsi3_i4); | 88 | DECLARE_EXPORT(__udivsi3_i4); |
108 | DECLARE_EXPORT(__sdivsi3_i4i); | 89 | DECLARE_EXPORT(__sdivsi3_i4i); |
109 | DECLARE_EXPORT(__udivsi3_i4i); | 90 | DECLARE_EXPORT(__udivsi3_i4i); |
110 | |||
111 | #if !defined(CONFIG_CACHE_OFF) && (defined(CONFIG_CPU_SH4) || \ | ||
112 | defined(CONFIG_SH7705_CACHE_32KB)) | ||
113 | /* needed by some modules */ | ||
114 | EXPORT_SYMBOL(flush_cache_all); | ||
115 | EXPORT_SYMBOL(flush_cache_range); | ||
116 | EXPORT_SYMBOL(flush_dcache_page); | ||
117 | #endif | ||
118 | |||
119 | #ifdef CONFIG_MCOUNT | 91 | #ifdef CONFIG_MCOUNT |
120 | DECLARE_EXPORT(mcount); | 92 | DECLARE_EXPORT(mcount); |
121 | #endif | 93 | #endif |
122 | EXPORT_SYMBOL(csum_partial); | ||
123 | EXPORT_SYMBOL(csum_partial_copy_generic); | ||
124 | #ifdef CONFIG_IPV6 | ||
125 | EXPORT_SYMBOL(csum_ipv6_magic); | ||
126 | #endif | ||
127 | EXPORT_SYMBOL(copy_page); | ||
128 | EXPORT_SYMBOL(__clear_user); | ||
129 | EXPORT_SYMBOL(_ebss); | ||
130 | EXPORT_SYMBOL(empty_zero_page); | ||
131 | |||
132 | #ifndef CONFIG_CACHE_OFF | ||
133 | EXPORT_SYMBOL(__flush_purge_region); | ||
134 | EXPORT_SYMBOL(__flush_wback_region); | ||
135 | EXPORT_SYMBOL(__flush_invalidate_region); | ||
136 | #endif | ||
diff --git a/arch/sh/kernel/sh_ksyms_64.c b/arch/sh/kernel/sh_ksyms_64.c index d008e17eb25..45afa5c51f6 100644 --- a/arch/sh/kernel/sh_ksyms_64.c +++ b/arch/sh/kernel/sh_ksyms_64.c | |||
@@ -24,16 +24,6 @@ | |||
24 | #include <asm/delay.h> | 24 | #include <asm/delay.h> |
25 | #include <asm/irq.h> | 25 | #include <asm/irq.h> |
26 | 26 | ||
27 | extern int dump_fpu(struct pt_regs *, elf_fpregset_t *); | ||
28 | |||
29 | /* platform dependent support */ | ||
30 | EXPORT_SYMBOL(dump_fpu); | ||
31 | EXPORT_SYMBOL(kernel_thread); | ||
32 | |||
33 | #ifdef CONFIG_VT | ||
34 | EXPORT_SYMBOL(screen_info); | ||
35 | #endif | ||
36 | |||
37 | EXPORT_SYMBOL(__put_user_asm_b); | 27 | EXPORT_SYMBOL(__put_user_asm_b); |
38 | EXPORT_SYMBOL(__put_user_asm_w); | 28 | EXPORT_SYMBOL(__put_user_asm_w); |
39 | EXPORT_SYMBOL(__put_user_asm_l); | 29 | EXPORT_SYMBOL(__put_user_asm_l); |
diff --git a/arch/sh/kernel/signal_32.c b/arch/sh/kernel/signal_32.c index 3db37425210..12815ce01ec 100644 --- a/arch/sh/kernel/signal_32.c +++ b/arch/sh/kernel/signal_32.c | |||
@@ -67,7 +67,8 @@ sys_sigsuspend(old_sigset_t mask, | |||
67 | 67 | ||
68 | current->state = TASK_INTERRUPTIBLE; | 68 | current->state = TASK_INTERRUPTIBLE; |
69 | schedule(); | 69 | schedule(); |
70 | set_thread_flag(TIF_RESTORE_SIGMASK); | 70 | set_restore_sigmask(); |
71 | |||
71 | return -ERESTARTNOHAND; | 72 | return -ERESTARTNOHAND; |
72 | } | 73 | } |
73 | 74 | ||
@@ -590,7 +591,7 @@ static void do_signal(struct pt_regs *regs, unsigned int save_r0) | |||
590 | if (try_to_freeze()) | 591 | if (try_to_freeze()) |
591 | goto no_signal; | 592 | goto no_signal; |
592 | 593 | ||
593 | if (test_thread_flag(TIF_RESTORE_SIGMASK)) | 594 | if (current_thread_info()->status & TS_RESTORE_SIGMASK) |
594 | oldset = ¤t->saved_sigmask; | 595 | oldset = ¤t->saved_sigmask; |
595 | else | 596 | else |
596 | oldset = ¤t->blocked; | 597 | oldset = ¤t->blocked; |
@@ -602,12 +603,13 @@ static void do_signal(struct pt_regs *regs, unsigned int save_r0) | |||
602 | /* Whee! Actually deliver the signal. */ | 603 | /* Whee! Actually deliver the signal. */ |
603 | if (handle_signal(signr, &ka, &info, oldset, | 604 | if (handle_signal(signr, &ka, &info, oldset, |
604 | regs, save_r0) == 0) { | 605 | regs, save_r0) == 0) { |
605 | /* a signal was successfully delivered; the saved | 606 | /* |
607 | * A signal was successfully delivered; the saved | ||
606 | * sigmask will have been stored in the signal frame, | 608 | * sigmask will have been stored in the signal frame, |
607 | * and will be restored by sigreturn, so we can simply | 609 | * and will be restored by sigreturn, so we can simply |
608 | * clear the TIF_RESTORE_SIGMASK flag */ | 610 | * clear the TS_RESTORE_SIGMASK flag |
609 | if (test_thread_flag(TIF_RESTORE_SIGMASK)) | 611 | */ |
610 | clear_thread_flag(TIF_RESTORE_SIGMASK); | 612 | current_thread_info()->status &= ~TS_RESTORE_SIGMASK; |
611 | 613 | ||
612 | tracehook_signal_handler(signr, &info, &ka, regs, | 614 | tracehook_signal_handler(signr, &info, &ka, regs, |
613 | test_thread_flag(TIF_SINGLESTEP)); | 615 | test_thread_flag(TIF_SINGLESTEP)); |
@@ -631,10 +633,12 @@ no_signal: | |||
631 | } | 633 | } |
632 | } | 634 | } |
633 | 635 | ||
634 | /* if there's no signal to deliver, we just put the saved sigmask | 636 | /* |
635 | * back */ | 637 | * If there's no signal to deliver, we just put the saved sigmask |
636 | if (test_thread_flag(TIF_RESTORE_SIGMASK)) { | 638 | * back. |
637 | clear_thread_flag(TIF_RESTORE_SIGMASK); | 639 | */ |
640 | if (current_thread_info()->status & TS_RESTORE_SIGMASK) { | ||
641 | current_thread_info()->status &= ~TS_RESTORE_SIGMASK; | ||
638 | sigprocmask(SIG_SETMASK, ¤t->saved_sigmask, NULL); | 642 | sigprocmask(SIG_SETMASK, ¤t->saved_sigmask, NULL); |
639 | } | 643 | } |
640 | } | 644 | } |
diff --git a/arch/sh/kernel/signal_64.c b/arch/sh/kernel/signal_64.c index 74793c80a57..feb3dddd319 100644 --- a/arch/sh/kernel/signal_64.c +++ b/arch/sh/kernel/signal_64.c | |||
@@ -101,7 +101,7 @@ static int do_signal(struct pt_regs *regs, sigset_t *oldset) | |||
101 | if (try_to_freeze()) | 101 | if (try_to_freeze()) |
102 | goto no_signal; | 102 | goto no_signal; |
103 | 103 | ||
104 | if (test_thread_flag(TIF_RESTORE_SIGMASK)) | 104 | if (current_thread_info()->status & TS_RESTORE_SIGMASK) |
105 | oldset = ¤t->saved_sigmask; | 105 | oldset = ¤t->saved_sigmask; |
106 | else if (!oldset) | 106 | else if (!oldset) |
107 | oldset = ¤t->blocked; | 107 | oldset = ¤t->blocked; |
@@ -115,11 +115,9 @@ static int do_signal(struct pt_regs *regs, sigset_t *oldset) | |||
115 | /* | 115 | /* |
116 | * If a signal was successfully delivered, the | 116 | * If a signal was successfully delivered, the |
117 | * saved sigmask is in its frame, and we can | 117 | * saved sigmask is in its frame, and we can |
118 | * clear the TIF_RESTORE_SIGMASK flag. | 118 | * clear the TS_RESTORE_SIGMASK flag. |
119 | */ | 119 | */ |
120 | if (test_thread_flag(TIF_RESTORE_SIGMASK)) | 120 | current_thread_info()->status &= ~TS_RESTORE_SIGMASK; |
121 | clear_thread_flag(TIF_RESTORE_SIGMASK); | ||
122 | |||
123 | tracehook_signal_handler(signr, &info, &ka, regs, 0); | 121 | tracehook_signal_handler(signr, &info, &ka, regs, 0); |
124 | return 1; | 122 | return 1; |
125 | } | 123 | } |
@@ -146,8 +144,8 @@ no_signal: | |||
146 | } | 144 | } |
147 | 145 | ||
148 | /* No signal to deliver -- put the saved sigmask back */ | 146 | /* No signal to deliver -- put the saved sigmask back */ |
149 | if (test_thread_flag(TIF_RESTORE_SIGMASK)) { | 147 | if (current_thread_info()->status & TS_RESTORE_SIGMASK) { |
150 | clear_thread_flag(TIF_RESTORE_SIGMASK); | 148 | current_thread_info()->status &= ~TS_RESTORE_SIGMASK; |
151 | sigprocmask(SIG_SETMASK, ¤t->saved_sigmask, NULL); | 149 | sigprocmask(SIG_SETMASK, ¤t->saved_sigmask, NULL); |
152 | } | 150 | } |
153 | 151 | ||
@@ -176,6 +174,7 @@ sys_sigsuspend(old_sigset_t mask, | |||
176 | while (1) { | 174 | while (1) { |
177 | current->state = TASK_INTERRUPTIBLE; | 175 | current->state = TASK_INTERRUPTIBLE; |
178 | schedule(); | 176 | schedule(); |
177 | set_restore_sigmask(); | ||
179 | regs->pc += 4; /* because sys_sigreturn decrements the pc */ | 178 | regs->pc += 4; /* because sys_sigreturn decrements the pc */ |
180 | if (do_signal(regs, &saveset)) { | 179 | if (do_signal(regs, &saveset)) { |
181 | /* pc now points at signal handler. Need to decrement | 180 | /* pc now points at signal handler. Need to decrement |
diff --git a/arch/sh/kernel/smp.c b/arch/sh/kernel/smp.c index 160db1003cf..983e0792d5f 100644 --- a/arch/sh/kernel/smp.c +++ b/arch/sh/kernel/smp.c | |||
@@ -122,7 +122,9 @@ int __cpuinit __cpu_up(unsigned int cpu) | |||
122 | stack_start.bss_start = 0; /* don't clear bss for secondary cpus */ | 122 | stack_start.bss_start = 0; /* don't clear bss for secondary cpus */ |
123 | stack_start.start_kernel_fn = start_secondary; | 123 | stack_start.start_kernel_fn = start_secondary; |
124 | 124 | ||
125 | flush_cache_all(); | 125 | flush_icache_range((unsigned long)&stack_start, |
126 | (unsigned long)&stack_start + sizeof(stack_start)); | ||
127 | wmb(); | ||
126 | 128 | ||
127 | plat_start_cpu(cpu, (unsigned long)_stext); | 129 | plat_start_cpu(cpu, (unsigned long)_stext); |
128 | 130 | ||
diff --git a/arch/sh/kernel/topology.c b/arch/sh/kernel/topology.c index 0838942b708..9b0b633b6c9 100644 --- a/arch/sh/kernel/topology.c +++ b/arch/sh/kernel/topology.c | |||
@@ -16,6 +16,32 @@ | |||
16 | 16 | ||
17 | static DEFINE_PER_CPU(struct cpu, cpu_devices); | 17 | static DEFINE_PER_CPU(struct cpu, cpu_devices); |
18 | 18 | ||
19 | cpumask_t cpu_core_map[NR_CPUS]; | ||
20 | |||
21 | static cpumask_t cpu_coregroup_map(unsigned int cpu) | ||
22 | { | ||
23 | /* | ||
24 | * Presently all SH-X3 SMP cores are multi-cores, so just keep it | ||
25 | * simple until we have a method for determining topology.. | ||
26 | */ | ||
27 | return cpu_possible_map; | ||
28 | } | ||
29 | |||
30 | const struct cpumask *cpu_coregroup_mask(unsigned int cpu) | ||
31 | { | ||
32 | return &cpu_core_map[cpu]; | ||
33 | } | ||
34 | |||
35 | int arch_update_cpu_topology(void) | ||
36 | { | ||
37 | unsigned int cpu; | ||
38 | |||
39 | for_each_possible_cpu(cpu) | ||
40 | cpu_core_map[cpu] = cpu_coregroup_map(cpu); | ||
41 | |||
42 | return 0; | ||
43 | } | ||
44 | |||
19 | static int __init topology_init(void) | 45 | static int __init topology_init(void) |
20 | { | 46 | { |
21 | int i, ret; | 47 | int i, ret; |
diff --git a/arch/sh/kernel/traps.c b/arch/sh/kernel/traps.c index a8396f36bd1..7b036339dc9 100644 --- a/arch/sh/kernel/traps.c +++ b/arch/sh/kernel/traps.c | |||
@@ -9,8 +9,8 @@ | |||
9 | #include <asm/unwinder.h> | 9 | #include <asm/unwinder.h> |
10 | #include <asm/system.h> | 10 | #include <asm/system.h> |
11 | 11 | ||
12 | #ifdef CONFIG_BUG | 12 | #ifdef CONFIG_GENERIC_BUG |
13 | void handle_BUG(struct pt_regs *regs) | 13 | static void handle_BUG(struct pt_regs *regs) |
14 | { | 14 | { |
15 | const struct bug_entry *bug; | 15 | const struct bug_entry *bug; |
16 | unsigned long bugaddr = regs->pc; | 16 | unsigned long bugaddr = regs->pc; |
@@ -81,7 +81,7 @@ BUILD_TRAP_HANDLER(bug) | |||
81 | SIGTRAP) == NOTIFY_STOP) | 81 | SIGTRAP) == NOTIFY_STOP) |
82 | return; | 82 | return; |
83 | 83 | ||
84 | #ifdef CONFIG_BUG | 84 | #ifdef CONFIG_GENERIC_BUG |
85 | if (__kernel_text_address(instruction_pointer(regs))) { | 85 | if (__kernel_text_address(instruction_pointer(regs))) { |
86 | insn_size_t insn = *(insn_size_t *)instruction_pointer(regs); | 86 | insn_size_t insn = *(insn_size_t *)instruction_pointer(regs); |
87 | if (insn == TRAPA_BUG_OPCODE) | 87 | if (insn == TRAPA_BUG_OPCODE) |
@@ -95,9 +95,11 @@ BUILD_TRAP_HANDLER(bug) | |||
95 | 95 | ||
96 | BUILD_TRAP_HANDLER(nmi) | 96 | BUILD_TRAP_HANDLER(nmi) |
97 | { | 97 | { |
98 | unsigned int cpu = smp_processor_id(); | ||
98 | TRAP_HANDLER_DECL; | 99 | TRAP_HANDLER_DECL; |
99 | 100 | ||
100 | nmi_enter(); | 101 | nmi_enter(); |
102 | nmi_count(cpu)++; | ||
101 | 103 | ||
102 | switch (notify_die(DIE_NMI, "NMI", regs, 0, vec & 0xff, SIGINT)) { | 104 | switch (notify_die(DIE_NMI, "NMI", regs, 0, vec & 0xff, SIGINT)) { |
103 | case NOTIFY_OK: | 105 | case NOTIFY_OK: |
diff --git a/arch/sh/kernel/traps_32.c b/arch/sh/kernel/traps_32.c index 7a2ee3a6b8e..3da5a125d88 100644 --- a/arch/sh/kernel/traps_32.c +++ b/arch/sh/kernel/traps_32.c | |||
@@ -25,6 +25,7 @@ | |||
25 | #include <linux/kexec.h> | 25 | #include <linux/kexec.h> |
26 | #include <linux/limits.h> | 26 | #include <linux/limits.h> |
27 | #include <linux/proc_fs.h> | 27 | #include <linux/proc_fs.h> |
28 | #include <linux/seq_file.h> | ||
28 | #include <linux/sysfs.h> | 29 | #include <linux/sysfs.h> |
29 | #include <asm/system.h> | 30 | #include <asm/system.h> |
30 | #include <asm/uaccess.h> | 31 | #include <asm/uaccess.h> |
@@ -68,61 +69,49 @@ static const char *se_usermode_action[] = { | |||
68 | "signal+warn" | 69 | "signal+warn" |
69 | }; | 70 | }; |
70 | 71 | ||
71 | static int | 72 | static int alignment_proc_show(struct seq_file *m, void *v) |
72 | proc_alignment_read(char *page, char **start, off_t off, int count, int *eof, | ||
73 | void *data) | ||
74 | { | 73 | { |
75 | char *p = page; | 74 | seq_printf(m, "User:\t\t%lu\n", se_user); |
76 | int len; | 75 | seq_printf(m, "System:\t\t%lu\n", se_sys); |
77 | 76 | seq_printf(m, "Half:\t\t%lu\n", se_half); | |
78 | p += sprintf(p, "User:\t\t%lu\n", se_user); | 77 | seq_printf(m, "Word:\t\t%lu\n", se_word); |
79 | p += sprintf(p, "System:\t\t%lu\n", se_sys); | 78 | seq_printf(m, "DWord:\t\t%lu\n", se_dword); |
80 | p += sprintf(p, "Half:\t\t%lu\n", se_half); | 79 | seq_printf(m, "Multi:\t\t%lu\n", se_multi); |
81 | p += sprintf(p, "Word:\t\t%lu\n", se_word); | 80 | seq_printf(m, "User faults:\t%i (%s)\n", se_usermode, |
82 | p += sprintf(p, "DWord:\t\t%lu\n", se_dword); | ||
83 | p += sprintf(p, "Multi:\t\t%lu\n", se_multi); | ||
84 | p += sprintf(p, "User faults:\t%i (%s)\n", se_usermode, | ||
85 | se_usermode_action[se_usermode]); | 81 | se_usermode_action[se_usermode]); |
86 | p += sprintf(p, "Kernel faults:\t%i (fixup%s)\n", se_kernmode_warn, | 82 | seq_printf(m, "Kernel faults:\t%i (fixup%s)\n", se_kernmode_warn, |
87 | se_kernmode_warn ? "+warn" : ""); | 83 | se_kernmode_warn ? "+warn" : ""); |
88 | 84 | return 0; | |
89 | len = (p - page) - off; | ||
90 | if (len < 0) | ||
91 | len = 0; | ||
92 | |||
93 | *eof = (len <= count) ? 1 : 0; | ||
94 | *start = page + off; | ||
95 | |||
96 | return len; | ||
97 | } | 85 | } |
98 | 86 | ||
99 | static int proc_alignment_write(struct file *file, const char __user *buffer, | 87 | static int alignment_proc_open(struct inode *inode, struct file *file) |
100 | unsigned long count, void *data) | ||
101 | { | 88 | { |
102 | char mode; | 89 | return single_open(file, alignment_proc_show, NULL); |
103 | |||
104 | if (count > 0) { | ||
105 | if (get_user(mode, buffer)) | ||
106 | return -EFAULT; | ||
107 | if (mode >= '0' && mode <= '5') | ||
108 | se_usermode = mode - '0'; | ||
109 | } | ||
110 | return count; | ||
111 | } | 90 | } |
112 | 91 | ||
113 | static int proc_alignment_kern_write(struct file *file, const char __user *buffer, | 92 | static ssize_t alignment_proc_write(struct file *file, |
114 | unsigned long count, void *data) | 93 | const char __user *buffer, size_t count, loff_t *pos) |
115 | { | 94 | { |
95 | int *data = PDE(file->f_path.dentry->d_inode)->data; | ||
116 | char mode; | 96 | char mode; |
117 | 97 | ||
118 | if (count > 0) { | 98 | if (count > 0) { |
119 | if (get_user(mode, buffer)) | 99 | if (get_user(mode, buffer)) |
120 | return -EFAULT; | 100 | return -EFAULT; |
121 | if (mode >= '0' && mode <= '1') | 101 | if (mode >= '0' && mode <= '5') |
122 | se_kernmode_warn = mode - '0'; | 102 | *data = mode - '0'; |
123 | } | 103 | } |
124 | return count; | 104 | return count; |
125 | } | 105 | } |
106 | |||
107 | static const struct file_operations alignment_proc_fops = { | ||
108 | .owner = THIS_MODULE, | ||
109 | .open = alignment_proc_open, | ||
110 | .read = seq_read, | ||
111 | .llseek = seq_lseek, | ||
112 | .release = single_release, | ||
113 | .write = alignment_proc_write, | ||
114 | }; | ||
126 | #endif | 115 | #endif |
127 | 116 | ||
128 | static void dump_mem(const char *str, unsigned long bottom, unsigned long top) | 117 | static void dump_mem(const char *str, unsigned long bottom, unsigned long top) |
@@ -945,14 +934,9 @@ void __init trap_init(void) | |||
945 | set_exception_table_evt(0x800, do_reserved_inst); | 934 | set_exception_table_evt(0x800, do_reserved_inst); |
946 | set_exception_table_evt(0x820, do_illegal_slot_inst); | 935 | set_exception_table_evt(0x820, do_illegal_slot_inst); |
947 | #elif defined(CONFIG_SH_FPU) | 936 | #elif defined(CONFIG_SH_FPU) |
948 | #ifdef CONFIG_CPU_SUBTYPE_SHX3 | ||
949 | set_exception_table_evt(0xd80, fpu_state_restore_trap_handler); | ||
950 | set_exception_table_evt(0xda0, fpu_state_restore_trap_handler); | ||
951 | #else | ||
952 | set_exception_table_evt(0x800, fpu_state_restore_trap_handler); | 937 | set_exception_table_evt(0x800, fpu_state_restore_trap_handler); |
953 | set_exception_table_evt(0x820, fpu_state_restore_trap_handler); | 938 | set_exception_table_evt(0x820, fpu_state_restore_trap_handler); |
954 | #endif | 939 | #endif |
955 | #endif | ||
956 | 940 | ||
957 | #ifdef CONFIG_CPU_SH2 | 941 | #ifdef CONFIG_CPU_SH2 |
958 | set_exception_table_vec(TRAP_ADDRESS_ERROR, address_error_trap_handler); | 942 | set_exception_table_vec(TRAP_ADDRESS_ERROR, address_error_trap_handler); |
@@ -1011,20 +995,16 @@ static int __init alignment_init(void) | |||
1011 | if (!dir) | 995 | if (!dir) |
1012 | return -ENOMEM; | 996 | return -ENOMEM; |
1013 | 997 | ||
1014 | res = create_proc_entry("alignment", S_IWUSR | S_IRUGO, dir); | 998 | res = proc_create_data("alignment", S_IWUSR | S_IRUGO, dir, |
999 | &alignment_proc_fops, &se_usermode); | ||
1015 | if (!res) | 1000 | if (!res) |
1016 | return -ENOMEM; | 1001 | return -ENOMEM; |
1017 | 1002 | ||
1018 | res->read_proc = proc_alignment_read; | 1003 | res = proc_create_data("kernel_alignment", S_IWUSR | S_IRUGO, dir, |
1019 | res->write_proc = proc_alignment_write; | 1004 | &alignment_proc_fops, &se_kernmode_warn); |
1020 | |||
1021 | res = create_proc_entry("kernel_alignment", S_IWUSR | S_IRUGO, dir); | ||
1022 | if (!res) | 1005 | if (!res) |
1023 | return -ENOMEM; | 1006 | return -ENOMEM; |
1024 | 1007 | ||
1025 | res->read_proc = proc_alignment_read; | ||
1026 | res->write_proc = proc_alignment_kern_write; | ||
1027 | |||
1028 | return 0; | 1008 | return 0; |
1029 | } | 1009 | } |
1030 | 1010 | ||
diff --git a/arch/sh/lib/Makefile b/arch/sh/lib/Makefile index a969b47c546..dab4d212981 100644 --- a/arch/sh/lib/Makefile +++ b/arch/sh/lib/Makefile | |||
@@ -2,7 +2,7 @@ | |||
2 | # Makefile for SuperH-specific library files.. | 2 | # Makefile for SuperH-specific library files.. |
3 | # | 3 | # |
4 | 4 | ||
5 | lib-y = delay.o memset.o memmove.o memchr.o \ | 5 | lib-y = delay.o memmove.o memchr.o \ |
6 | checksum.o strlen.o div64.o div64-generic.o | 6 | checksum.o strlen.o div64.o div64-generic.o |
7 | 7 | ||
8 | # Extracted from libgcc | 8 | # Extracted from libgcc |
@@ -23,8 +23,11 @@ obj-y += io.o | |||
23 | memcpy-y := memcpy.o | 23 | memcpy-y := memcpy.o |
24 | memcpy-$(CONFIG_CPU_SH4) := memcpy-sh4.o | 24 | memcpy-$(CONFIG_CPU_SH4) := memcpy-sh4.o |
25 | 25 | ||
26 | memset-y := memset.o | ||
27 | memset-$(CONFIG_CPU_SH4) := memset-sh4.o | ||
28 | |||
26 | lib-$(CONFIG_MMU) += copy_page.o __clear_user.o | 29 | lib-$(CONFIG_MMU) += copy_page.o __clear_user.o |
27 | lib-$(CONFIG_MCOUNT) += mcount.o | 30 | lib-$(CONFIG_MCOUNT) += mcount.o |
28 | lib-y += $(memcpy-y) $(udivsi3-y) | 31 | lib-y += $(memcpy-y) $(memset-y) $(udivsi3-y) |
29 | 32 | ||
30 | EXTRA_CFLAGS += -Werror | 33 | EXTRA_CFLAGS += -Werror |
diff --git a/arch/sh/lib/memset-sh4.S b/arch/sh/lib/memset-sh4.S new file mode 100644 index 00000000000..1a6e32cc4e4 --- /dev/null +++ b/arch/sh/lib/memset-sh4.S | |||
@@ -0,0 +1,107 @@ | |||
1 | /* | ||
2 | * "memset" implementation for SH4 | ||
3 | * | ||
4 | * Copyright (C) 1999 Niibe Yutaka | ||
5 | * Copyright (c) 2009 STMicroelectronics Limited | ||
6 | * Author: Stuart Menefy <stuart.menefy:st.com> | ||
7 | */ | ||
8 | |||
9 | /* | ||
10 | * void *memset(void *s, int c, size_t n); | ||
11 | */ | ||
12 | |||
13 | #include <linux/linkage.h> | ||
14 | |||
15 | ENTRY(memset) | ||
16 | mov #12,r0 | ||
17 | add r6,r4 | ||
18 | cmp/gt r6,r0 | ||
19 | bt/s 40f ! if it's too small, set a byte at once | ||
20 | mov r4,r0 | ||
21 | and #3,r0 | ||
22 | cmp/eq #0,r0 | ||
23 | bt/s 2f ! It's aligned | ||
24 | sub r0,r6 | ||
25 | 1: | ||
26 | dt r0 | ||
27 | bf/s 1b | ||
28 | mov.b r5,@-r4 | ||
29 | 2: ! make VVVV | ||
30 | extu.b r5,r5 | ||
31 | swap.b r5,r0 ! V0 | ||
32 | or r0,r5 ! VV | ||
33 | swap.w r5,r0 ! VV00 | ||
34 | or r0,r5 ! VVVV | ||
35 | |||
36 | ! Check if enough bytes need to be copied to be worth the big loop | ||
37 | mov #0x40, r0 ! (MT) | ||
38 | cmp/gt r6,r0 ! (MT) 64 > len => slow loop | ||
39 | |||
40 | bt/s 22f | ||
41 | mov r6,r0 | ||
42 | |||
43 | ! align the dst to the cache block size if necessary | ||
44 | mov r4, r3 | ||
45 | mov #~(0x1f), r1 | ||
46 | |||
47 | and r3, r1 | ||
48 | cmp/eq r3, r1 | ||
49 | |||
50 | bt/s 11f ! dst is already aligned | ||
51 | sub r1, r3 ! r3-r1 -> r3 | ||
52 | shlr2 r3 ! number of loops | ||
53 | |||
54 | 10: mov.l r5,@-r4 | ||
55 | dt r3 | ||
56 | bf/s 10b | ||
57 | add #-4, r6 | ||
58 | |||
59 | 11: ! dst is 32byte aligned | ||
60 | mov r6,r2 | ||
61 | mov #-5,r0 | ||
62 | shld r0,r2 ! number of loops | ||
63 | |||
64 | add #-32, r4 | ||
65 | mov r5, r0 | ||
66 | 12: | ||
67 | movca.l r0,@r4 | ||
68 | mov.l r5,@(4, r4) | ||
69 | mov.l r5,@(8, r4) | ||
70 | mov.l r5,@(12,r4) | ||
71 | mov.l r5,@(16,r4) | ||
72 | mov.l r5,@(20,r4) | ||
73 | add #-0x20, r6 | ||
74 | mov.l r5,@(24,r4) | ||
75 | dt r2 | ||
76 | mov.l r5,@(28,r4) | ||
77 | bf/s 12b | ||
78 | add #-32, r4 | ||
79 | |||
80 | add #32, r4 | ||
81 | mov #8, r0 | ||
82 | cmp/ge r0, r6 | ||
83 | bf 40f | ||
84 | |||
85 | mov r6,r0 | ||
86 | 22: | ||
87 | shlr2 r0 | ||
88 | shlr r0 ! r0 = r6 >> 3 | ||
89 | 3: | ||
90 | dt r0 | ||
91 | mov.l r5,@-r4 ! set 8-byte at once | ||
92 | bf/s 3b | ||
93 | mov.l r5,@-r4 | ||
94 | ! | ||
95 | mov #7,r0 | ||
96 | and r0,r6 | ||
97 | |||
98 | ! fill bytes (length may be zero) | ||
99 | 40: tst r6,r6 | ||
100 | bt 5f | ||
101 | 4: | ||
102 | dt r6 | ||
103 | bf/s 4b | ||
104 | mov.b r5,@-r4 | ||
105 | 5: | ||
106 | rts | ||
107 | mov r4,r0 | ||
diff --git a/arch/sh/math-emu/math.c b/arch/sh/math-emu/math.c index ac2d7abd256..d6c15cae091 100644 --- a/arch/sh/math-emu/math.c +++ b/arch/sh/math-emu/math.c | |||
@@ -558,7 +558,7 @@ static int ieee_fpe_handler(struct pt_regs *regs) | |||
558 | (finsn >> 8) & 0xf); | 558 | (finsn >> 8) & 0xf); |
559 | tsk->thread.fpu.hard.fpscr &= | 559 | tsk->thread.fpu.hard.fpscr &= |
560 | ~(FPSCR_CAUSE_MASK | FPSCR_FLAG_MASK); | 560 | ~(FPSCR_CAUSE_MASK | FPSCR_FLAG_MASK); |
561 | set_tsk_thread_flag(tsk, TIF_USEDFPU); | 561 | task_thread_info(tsk)->status |= TS_USEDFPU; |
562 | } else { | 562 | } else { |
563 | info.si_signo = SIGFPE; | 563 | info.si_signo = SIGFPE; |
564 | info.si_errno = 0; | 564 | info.si_errno = 0; |
@@ -619,10 +619,10 @@ int do_fpu_inst(unsigned short inst, struct pt_regs *regs) | |||
619 | struct task_struct *tsk = current; | 619 | struct task_struct *tsk = current; |
620 | struct sh_fpu_soft_struct *fpu = &(tsk->thread.fpu.soft); | 620 | struct sh_fpu_soft_struct *fpu = &(tsk->thread.fpu.soft); |
621 | 621 | ||
622 | if (!test_tsk_thread_flag(tsk, TIF_USEDFPU)) { | 622 | if (!(task_thread_info(tsk)->status & TS_USEDFPU)) { |
623 | /* initialize once. */ | 623 | /* initialize once. */ |
624 | fpu_init(fpu); | 624 | fpu_init(fpu); |
625 | set_tsk_thread_flag(tsk, TIF_USEDFPU); | 625 | task_thread_info(tsk)->status |= TS_USEDFPU; |
626 | } | 626 | } |
627 | 627 | ||
628 | return fpu_emulate(inst, fpu, regs); | 628 | return fpu_emulate(inst, fpu, regs); |
diff --git a/arch/sh/mm/Kconfig b/arch/sh/mm/Kconfig index 7f7b52f9beb..0e7ba8e891c 100644 --- a/arch/sh/mm/Kconfig +++ b/arch/sh/mm/Kconfig | |||
@@ -82,8 +82,7 @@ config 32BIT | |||
82 | 82 | ||
83 | config PMB_ENABLE | 83 | config PMB_ENABLE |
84 | bool "Support 32-bit physical addressing through PMB" | 84 | bool "Support 32-bit physical addressing through PMB" |
85 | depends on MMU && EXPERIMENTAL && (CPU_SUBTYPE_SH7757 || CPU_SUBTYPE_SH7780 || CPU_SUBTYPE_SH7785) | 85 | depends on MMU && EXPERIMENTAL && CPU_SH4A |
86 | select 32BIT | ||
87 | default y | 86 | default y |
88 | help | 87 | help |
89 | If you say Y here, physical addressing will be extended to | 88 | If you say Y here, physical addressing will be extended to |
@@ -97,8 +96,7 @@ choice | |||
97 | 96 | ||
98 | config PMB | 97 | config PMB |
99 | bool "PMB" | 98 | bool "PMB" |
100 | depends on MMU && EXPERIMENTAL && (CPU_SUBTYPE_SH7757 || CPU_SUBTYPE_SH7780 || CPU_SUBTYPE_SH7785) | 99 | depends on MMU && EXPERIMENTAL && CPU_SH4A |
101 | select 32BIT | ||
102 | help | 100 | help |
103 | If you say Y here, physical addressing will be extended to | 101 | If you say Y here, physical addressing will be extended to |
104 | 32-bits through the SH-4A PMB. If this is not set, legacy | 102 | 32-bits through the SH-4A PMB. If this is not set, legacy |
@@ -106,9 +104,7 @@ config PMB | |||
106 | 104 | ||
107 | config PMB_FIXED | 105 | config PMB_FIXED |
108 | bool "fixed PMB" | 106 | bool "fixed PMB" |
109 | depends on MMU && EXPERIMENTAL && (CPU_SUBTYPE_SH7757 || \ | 107 | depends on MMU && EXPERIMENTAL && CPU_SH4A |
110 | CPU_SUBTYPE_SH7780 || \ | ||
111 | CPU_SUBTYPE_SH7785) | ||
112 | select 32BIT | 108 | select 32BIT |
113 | help | 109 | help |
114 | If this option is enabled, fixed PMB mappings are inherited | 110 | If this option is enabled, fixed PMB mappings are inherited |
@@ -258,6 +254,15 @@ endchoice | |||
258 | 254 | ||
259 | source "mm/Kconfig" | 255 | source "mm/Kconfig" |
260 | 256 | ||
257 | config SCHED_MC | ||
258 | bool "Multi-core scheduler support" | ||
259 | depends on SMP | ||
260 | default y | ||
261 | help | ||
262 | Multi-core scheduler support improves the CPU scheduler's decision | ||
263 | making when dealing with multi-core CPU chips at a cost of slightly | ||
264 | increased overhead in some places. If unsure say N here. | ||
265 | |||
261 | endmenu | 266 | endmenu |
262 | 267 | ||
263 | menu "Cache configuration" | 268 | menu "Cache configuration" |
diff --git a/arch/sh/mm/Makefile b/arch/sh/mm/Makefile index 3759bf85329..8a70535fa7c 100644 --- a/arch/sh/mm/Makefile +++ b/arch/sh/mm/Makefile | |||
@@ -33,8 +33,7 @@ obj-y += $(tlb-y) | |||
33 | endif | 33 | endif |
34 | 34 | ||
35 | obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpage.o | 35 | obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpage.o |
36 | obj-$(CONFIG_PMB) += pmb.o | 36 | obj-$(CONFIG_PMB_ENABLE) += pmb.o |
37 | obj-$(CONFIG_PMB_FIXED) += pmb-fixed.o | ||
38 | obj-$(CONFIG_NUMA) += numa.o | 37 | obj-$(CONFIG_NUMA) += numa.o |
39 | 38 | ||
40 | # Special flags for fault_64.o. This puts restrictions on the number of | 39 | # Special flags for fault_64.o. This puts restrictions on the number of |
diff --git a/arch/sh/mm/cache-sh4.c b/arch/sh/mm/cache-sh4.c index b7f235c74d6..f36a08bf3d5 100644 --- a/arch/sh/mm/cache-sh4.c +++ b/arch/sh/mm/cache-sh4.c | |||
@@ -2,7 +2,7 @@ | |||
2 | * arch/sh/mm/cache-sh4.c | 2 | * arch/sh/mm/cache-sh4.c |
3 | * | 3 | * |
4 | * Copyright (C) 1999, 2000, 2002 Niibe Yutaka | 4 | * Copyright (C) 1999, 2000, 2002 Niibe Yutaka |
5 | * Copyright (C) 2001 - 2007 Paul Mundt | 5 | * Copyright (C) 2001 - 2009 Paul Mundt |
6 | * Copyright (C) 2003 Richard Curnow | 6 | * Copyright (C) 2003 Richard Curnow |
7 | * Copyright (c) 2007 STMicroelectronics (R&D) Ltd. | 7 | * Copyright (c) 2007 STMicroelectronics (R&D) Ltd. |
8 | * | 8 | * |
@@ -15,6 +15,8 @@ | |||
15 | #include <linux/io.h> | 15 | #include <linux/io.h> |
16 | #include <linux/mutex.h> | 16 | #include <linux/mutex.h> |
17 | #include <linux/fs.h> | 17 | #include <linux/fs.h> |
18 | #include <linux/highmem.h> | ||
19 | #include <asm/pgtable.h> | ||
18 | #include <asm/mmu_context.h> | 20 | #include <asm/mmu_context.h> |
19 | #include <asm/cacheflush.h> | 21 | #include <asm/cacheflush.h> |
20 | 22 | ||
@@ -23,21 +25,12 @@ | |||
23 | * flushing. Anything exceeding this will simply flush the dcache in its | 25 | * flushing. Anything exceeding this will simply flush the dcache in its |
24 | * entirety. | 26 | * entirety. |
25 | */ | 27 | */ |
26 | #define MAX_DCACHE_PAGES 64 /* XXX: Tune for ways */ | ||
27 | #define MAX_ICACHE_PAGES 32 | 28 | #define MAX_ICACHE_PAGES 32 |
28 | 29 | ||
29 | static void __flush_cache_one(unsigned long addr, unsigned long phys, | 30 | static void __flush_cache_one(unsigned long addr, unsigned long phys, |
30 | unsigned long exec_offset); | 31 | unsigned long exec_offset); |
31 | 32 | ||
32 | /* | 33 | /* |
33 | * This is initialised here to ensure that it is not placed in the BSS. If | ||
34 | * that were to happen, note that cache_init gets called before the BSS is | ||
35 | * cleared, so this would get nulled out which would be hopeless. | ||
36 | */ | ||
37 | static void (*__flush_dcache_segment_fn)(unsigned long, unsigned long) = | ||
38 | (void (*)(unsigned long, unsigned long))0xdeadbeef; | ||
39 | |||
40 | /* | ||
41 | * Write back the range of D-cache, and purge the I-cache. | 34 | * Write back the range of D-cache, and purge the I-cache. |
42 | * | 35 | * |
43 | * Called from kernel/module.c:sys_init_module and routine for a.out format, | 36 | * Called from kernel/module.c:sys_init_module and routine for a.out format, |
@@ -97,15 +90,15 @@ static inline void flush_cache_one(unsigned long start, unsigned long phys) | |||
97 | unsigned long flags, exec_offset = 0; | 90 | unsigned long flags, exec_offset = 0; |
98 | 91 | ||
99 | /* | 92 | /* |
100 | * All types of SH-4 require PC to be in P2 to operate on the I-cache. | 93 | * All types of SH-4 require PC to be uncached to operate on the I-cache. |
101 | * Some types of SH-4 require PC to be in P2 to operate on the D-cache. | 94 | * Some types of SH-4 require PC to be uncached to operate on the D-cache. |
102 | */ | 95 | */ |
103 | if ((boot_cpu_data.flags & CPU_HAS_P2_FLUSH_BUG) || | 96 | if ((boot_cpu_data.flags & CPU_HAS_P2_FLUSH_BUG) || |
104 | (start < CACHE_OC_ADDRESS_ARRAY)) | 97 | (start < CACHE_OC_ADDRESS_ARRAY)) |
105 | exec_offset = 0x20000000; | 98 | exec_offset = cached_to_uncached; |
106 | 99 | ||
107 | local_irq_save(flags); | 100 | local_irq_save(flags); |
108 | __flush_cache_one(start | SH_CACHE_ASSOC, P1SEGADDR(phys), exec_offset); | 101 | __flush_cache_one(start, phys, exec_offset); |
109 | local_irq_restore(flags); | 102 | local_irq_restore(flags); |
110 | } | 103 | } |
111 | 104 | ||
@@ -124,7 +117,7 @@ static void sh4_flush_dcache_page(void *arg) | |||
124 | else | 117 | else |
125 | #endif | 118 | #endif |
126 | { | 119 | { |
127 | unsigned long phys = PHYSADDR(page_address(page)); | 120 | unsigned long phys = page_to_phys(page); |
128 | unsigned long addr = CACHE_OC_ADDRESS_ARRAY; | 121 | unsigned long addr = CACHE_OC_ADDRESS_ARRAY; |
129 | int i, n; | 122 | int i, n; |
130 | 123 | ||
@@ -159,10 +152,27 @@ static void __uses_jump_to_uncached flush_icache_all(void) | |||
159 | local_irq_restore(flags); | 152 | local_irq_restore(flags); |
160 | } | 153 | } |
161 | 154 | ||
162 | static inline void flush_dcache_all(void) | 155 | static void flush_dcache_all(void) |
163 | { | 156 | { |
164 | (*__flush_dcache_segment_fn)(0UL, boot_cpu_data.dcache.way_size); | 157 | unsigned long addr, end_addr, entry_offset; |
165 | wmb(); | 158 | |
159 | end_addr = CACHE_OC_ADDRESS_ARRAY + | ||
160 | (current_cpu_data.dcache.sets << | ||
161 | current_cpu_data.dcache.entry_shift) * | ||
162 | current_cpu_data.dcache.ways; | ||
163 | |||
164 | entry_offset = 1 << current_cpu_data.dcache.entry_shift; | ||
165 | |||
166 | for (addr = CACHE_OC_ADDRESS_ARRAY; addr < end_addr; ) { | ||
167 | __raw_writel(0, addr); addr += entry_offset; | ||
168 | __raw_writel(0, addr); addr += entry_offset; | ||
169 | __raw_writel(0, addr); addr += entry_offset; | ||
170 | __raw_writel(0, addr); addr += entry_offset; | ||
171 | __raw_writel(0, addr); addr += entry_offset; | ||
172 | __raw_writel(0, addr); addr += entry_offset; | ||
173 | __raw_writel(0, addr); addr += entry_offset; | ||
174 | __raw_writel(0, addr); addr += entry_offset; | ||
175 | } | ||
166 | } | 176 | } |
167 | 177 | ||
168 | static void sh4_flush_cache_all(void *unused) | 178 | static void sh4_flush_cache_all(void *unused) |
@@ -171,89 +181,13 @@ static void sh4_flush_cache_all(void *unused) | |||
171 | flush_icache_all(); | 181 | flush_icache_all(); |
172 | } | 182 | } |
173 | 183 | ||
174 | static void __flush_cache_mm(struct mm_struct *mm, unsigned long start, | ||
175 | unsigned long end) | ||
176 | { | ||
177 | unsigned long d = 0, p = start & PAGE_MASK; | ||
178 | unsigned long alias_mask = boot_cpu_data.dcache.alias_mask; | ||
179 | unsigned long n_aliases = boot_cpu_data.dcache.n_aliases; | ||
180 | unsigned long select_bit; | ||
181 | unsigned long all_aliases_mask; | ||
182 | unsigned long addr_offset; | ||
183 | pgd_t *dir; | ||
184 | pmd_t *pmd; | ||
185 | pud_t *pud; | ||
186 | pte_t *pte; | ||
187 | int i; | ||
188 | |||
189 | dir = pgd_offset(mm, p); | ||
190 | pud = pud_offset(dir, p); | ||
191 | pmd = pmd_offset(pud, p); | ||
192 | end = PAGE_ALIGN(end); | ||
193 | |||
194 | all_aliases_mask = (1 << n_aliases) - 1; | ||
195 | |||
196 | do { | ||
197 | if (pmd_none(*pmd) || unlikely(pmd_bad(*pmd))) { | ||
198 | p &= PMD_MASK; | ||
199 | p += PMD_SIZE; | ||
200 | pmd++; | ||
201 | |||
202 | continue; | ||
203 | } | ||
204 | |||
205 | pte = pte_offset_kernel(pmd, p); | ||
206 | |||
207 | do { | ||
208 | unsigned long phys; | ||
209 | pte_t entry = *pte; | ||
210 | |||
211 | if (!(pte_val(entry) & _PAGE_PRESENT)) { | ||
212 | pte++; | ||
213 | p += PAGE_SIZE; | ||
214 | continue; | ||
215 | } | ||
216 | |||
217 | phys = pte_val(entry) & PTE_PHYS_MASK; | ||
218 | |||
219 | if ((p ^ phys) & alias_mask) { | ||
220 | d |= 1 << ((p & alias_mask) >> PAGE_SHIFT); | ||
221 | d |= 1 << ((phys & alias_mask) >> PAGE_SHIFT); | ||
222 | |||
223 | if (d == all_aliases_mask) | ||
224 | goto loop_exit; | ||
225 | } | ||
226 | |||
227 | pte++; | ||
228 | p += PAGE_SIZE; | ||
229 | } while (p < end && ((unsigned long)pte & ~PAGE_MASK)); | ||
230 | pmd++; | ||
231 | } while (p < end); | ||
232 | |||
233 | loop_exit: | ||
234 | addr_offset = 0; | ||
235 | select_bit = 1; | ||
236 | |||
237 | for (i = 0; i < n_aliases; i++) { | ||
238 | if (d & select_bit) { | ||
239 | (*__flush_dcache_segment_fn)(addr_offset, PAGE_SIZE); | ||
240 | wmb(); | ||
241 | } | ||
242 | |||
243 | select_bit <<= 1; | ||
244 | addr_offset += PAGE_SIZE; | ||
245 | } | ||
246 | } | ||
247 | |||
248 | /* | 184 | /* |
249 | * Note : (RPC) since the caches are physically tagged, the only point | 185 | * Note : (RPC) since the caches are physically tagged, the only point |
250 | * of flush_cache_mm for SH-4 is to get rid of aliases from the | 186 | * of flush_cache_mm for SH-4 is to get rid of aliases from the |
251 | * D-cache. The assumption elsewhere, e.g. flush_cache_range, is that | 187 | * D-cache. The assumption elsewhere, e.g. flush_cache_range, is that |
252 | * lines can stay resident so long as the virtual address they were | 188 | * lines can stay resident so long as the virtual address they were |
253 | * accessed with (hence cache set) is in accord with the physical | 189 | * accessed with (hence cache set) is in accord with the physical |
254 | * address (i.e. tag). It's no different here. So I reckon we don't | 190 | * address (i.e. tag). It's no different here. |
255 | * need to flush the I-cache, since aliases don't matter for that. We | ||
256 | * should try that. | ||
257 | * | 191 | * |
258 | * Caller takes mm->mmap_sem. | 192 | * Caller takes mm->mmap_sem. |
259 | */ | 193 | */ |
@@ -264,33 +198,7 @@ static void sh4_flush_cache_mm(void *arg) | |||
264 | if (cpu_context(smp_processor_id(), mm) == NO_CONTEXT) | 198 | if (cpu_context(smp_processor_id(), mm) == NO_CONTEXT) |
265 | return; | 199 | return; |
266 | 200 | ||
267 | /* | 201 | flush_dcache_all(); |
268 | * If cache is only 4k-per-way, there are never any 'aliases'. Since | ||
269 | * the cache is physically tagged, the data can just be left in there. | ||
270 | */ | ||
271 | if (boot_cpu_data.dcache.n_aliases == 0) | ||
272 | return; | ||
273 | |||
274 | /* | ||
275 | * Don't bother groveling around the dcache for the VMA ranges | ||
276 | * if there are too many PTEs to make it worthwhile. | ||
277 | */ | ||
278 | if (mm->nr_ptes >= MAX_DCACHE_PAGES) | ||
279 | flush_dcache_all(); | ||
280 | else { | ||
281 | struct vm_area_struct *vma; | ||
282 | |||
283 | /* | ||
284 | * In this case there are reasonably sized ranges to flush, | ||
285 | * iterate through the VMA list and take care of any aliases. | ||
286 | */ | ||
287 | for (vma = mm->mmap; vma; vma = vma->vm_next) | ||
288 | __flush_cache_mm(mm, vma->vm_start, vma->vm_end); | ||
289 | } | ||
290 | |||
291 | /* Only touch the icache if one of the VMAs has VM_EXEC set. */ | ||
292 | if (mm->exec_vm) | ||
293 | flush_icache_all(); | ||
294 | } | 202 | } |
295 | 203 | ||
296 | /* | 204 | /* |
@@ -303,44 +211,63 @@ static void sh4_flush_cache_page(void *args) | |||
303 | { | 211 | { |
304 | struct flusher_data *data = args; | 212 | struct flusher_data *data = args; |
305 | struct vm_area_struct *vma; | 213 | struct vm_area_struct *vma; |
214 | struct page *page; | ||
306 | unsigned long address, pfn, phys; | 215 | unsigned long address, pfn, phys; |
307 | unsigned int alias_mask; | 216 | int map_coherent = 0; |
217 | pgd_t *pgd; | ||
218 | pud_t *pud; | ||
219 | pmd_t *pmd; | ||
220 | pte_t *pte; | ||
221 | void *vaddr; | ||
308 | 222 | ||
309 | vma = data->vma; | 223 | vma = data->vma; |
310 | address = data->addr1; | 224 | address = data->addr1 & PAGE_MASK; |
311 | pfn = data->addr2; | 225 | pfn = data->addr2; |
312 | phys = pfn << PAGE_SHIFT; | 226 | phys = pfn << PAGE_SHIFT; |
227 | page = pfn_to_page(pfn); | ||
313 | 228 | ||
314 | if (cpu_context(smp_processor_id(), vma->vm_mm) == NO_CONTEXT) | 229 | if (cpu_context(smp_processor_id(), vma->vm_mm) == NO_CONTEXT) |
315 | return; | 230 | return; |
316 | 231 | ||
317 | alias_mask = boot_cpu_data.dcache.alias_mask; | 232 | pgd = pgd_offset(vma->vm_mm, address); |
318 | 233 | pud = pud_offset(pgd, address); | |
319 | /* We only need to flush D-cache when we have alias */ | 234 | pmd = pmd_offset(pud, address); |
320 | if ((address^phys) & alias_mask) { | 235 | pte = pte_offset_kernel(pmd, address); |
321 | /* Loop 4K of the D-cache */ | 236 | |
322 | flush_cache_one( | 237 | /* If the page isn't present, there is nothing to do here. */ |
323 | CACHE_OC_ADDRESS_ARRAY | (address & alias_mask), | 238 | if (!(pte_val(*pte) & _PAGE_PRESENT)) |
324 | phys); | 239 | return; |
325 | /* Loop another 4K of the D-cache */ | ||
326 | flush_cache_one( | ||
327 | CACHE_OC_ADDRESS_ARRAY | (phys & alias_mask), | ||
328 | phys); | ||
329 | } | ||
330 | 240 | ||
331 | alias_mask = boot_cpu_data.icache.alias_mask; | 241 | if ((vma->vm_mm == current->active_mm)) |
332 | if (vma->vm_flags & VM_EXEC) { | 242 | vaddr = NULL; |
243 | else { | ||
333 | /* | 244 | /* |
334 | * Evict entries from the portion of the cache from which code | 245 | * Use kmap_coherent or kmap_atomic to do flushes for |
335 | * may have been executed at this address (virtual). There's | 246 | * another ASID than the current one. |
336 | * no need to evict from the portion corresponding to the | ||
337 | * physical address as for the D-cache, because we know the | ||
338 | * kernel has never executed the code through its identity | ||
339 | * translation. | ||
340 | */ | 247 | */ |
341 | flush_cache_one( | 248 | map_coherent = (current_cpu_data.dcache.n_aliases && |
342 | CACHE_IC_ADDRESS_ARRAY | (address & alias_mask), | 249 | !test_bit(PG_dcache_dirty, &page->flags) && |
343 | phys); | 250 | page_mapped(page)); |
251 | if (map_coherent) | ||
252 | vaddr = kmap_coherent(page, address); | ||
253 | else | ||
254 | vaddr = kmap_atomic(page, KM_USER0); | ||
255 | |||
256 | address = (unsigned long)vaddr; | ||
257 | } | ||
258 | |||
259 | if (pages_do_alias(address, phys)) | ||
260 | flush_cache_one(CACHE_OC_ADDRESS_ARRAY | | ||
261 | (address & shm_align_mask), phys); | ||
262 | |||
263 | if (vma->vm_flags & VM_EXEC) | ||
264 | flush_icache_all(); | ||
265 | |||
266 | if (vaddr) { | ||
267 | if (map_coherent) | ||
268 | kunmap_coherent(vaddr); | ||
269 | else | ||
270 | kunmap_atomic(vaddr, KM_USER0); | ||
344 | } | 271 | } |
345 | } | 272 | } |
346 | 273 | ||
@@ -373,24 +300,10 @@ static void sh4_flush_cache_range(void *args) | |||
373 | if (boot_cpu_data.dcache.n_aliases == 0) | 300 | if (boot_cpu_data.dcache.n_aliases == 0) |
374 | return; | 301 | return; |
375 | 302 | ||
376 | /* | 303 | flush_dcache_all(); |
377 | * Don't bother with the lookup and alias check if we have a | ||
378 | * wide range to cover, just blow away the dcache in its | ||
379 | * entirety instead. -- PFM. | ||
380 | */ | ||
381 | if (((end - start) >> PAGE_SHIFT) >= MAX_DCACHE_PAGES) | ||
382 | flush_dcache_all(); | ||
383 | else | ||
384 | __flush_cache_mm(vma->vm_mm, start, end); | ||
385 | 304 | ||
386 | if (vma->vm_flags & VM_EXEC) { | 305 | if (vma->vm_flags & VM_EXEC) |
387 | /* | ||
388 | * TODO: Is this required??? Need to look at how I-cache | ||
389 | * coherency is assured when new programs are loaded to see if | ||
390 | * this matters. | ||
391 | */ | ||
392 | flush_icache_all(); | 306 | flush_icache_all(); |
393 | } | ||
394 | } | 307 | } |
395 | 308 | ||
396 | /** | 309 | /** |
@@ -464,245 +377,6 @@ static void __flush_cache_one(unsigned long addr, unsigned long phys, | |||
464 | } while (--way_count != 0); | 377 | } while (--way_count != 0); |
465 | } | 378 | } |
466 | 379 | ||
467 | /* | ||
468 | * Break the 1, 2 and 4 way variants of this out into separate functions to | ||
469 | * avoid nearly all the overhead of having the conditional stuff in the function | ||
470 | * bodies (+ the 1 and 2 way cases avoid saving any registers too). | ||
471 | * | ||
472 | * We want to eliminate unnecessary bus transactions, so this code uses | ||
473 | * a non-obvious technique. | ||
474 | * | ||
475 | * Loop over a cache way sized block of, one cache line at a time. For each | ||
476 | * line, use movca.a to cause the current cache line contents to be written | ||
477 | * back, but without reading anything from main memory. However this has the | ||
478 | * side effect that the cache is now caching that memory location. So follow | ||
479 | * this with a cache invalidate to mark the cache line invalid. And do all | ||
480 | * this with interrupts disabled, to avoid the cache line being accidently | ||
481 | * evicted while it is holding garbage. | ||
482 | * | ||
483 | * This also breaks in a number of circumstances: | ||
484 | * - if there are modifications to the region of memory just above | ||
485 | * empty_zero_page (for example because a breakpoint has been placed | ||
486 | * there), then these can be lost. | ||
487 | * | ||
488 | * This is because the the memory address which the cache temporarily | ||
489 | * caches in the above description is empty_zero_page. So the | ||
490 | * movca.l hits the cache (it is assumed that it misses, or at least | ||
491 | * isn't dirty), modifies the line and then invalidates it, losing the | ||
492 | * required change. | ||
493 | * | ||
494 | * - If caches are disabled or configured in write-through mode, then | ||
495 | * the movca.l writes garbage directly into memory. | ||
496 | */ | ||
497 | static void __flush_dcache_segment_writethrough(unsigned long start, | ||
498 | unsigned long extent_per_way) | ||
499 | { | ||
500 | unsigned long addr; | ||
501 | int i; | ||
502 | |||
503 | addr = CACHE_OC_ADDRESS_ARRAY | (start & cpu_data->dcache.entry_mask); | ||
504 | |||
505 | while (extent_per_way) { | ||
506 | for (i = 0; i < cpu_data->dcache.ways; i++) | ||
507 | __raw_writel(0, addr + cpu_data->dcache.way_incr * i); | ||
508 | |||
509 | addr += cpu_data->dcache.linesz; | ||
510 | extent_per_way -= cpu_data->dcache.linesz; | ||
511 | } | ||
512 | } | ||
513 | |||
514 | static void __flush_dcache_segment_1way(unsigned long start, | ||
515 | unsigned long extent_per_way) | ||
516 | { | ||
517 | unsigned long orig_sr, sr_with_bl; | ||
518 | unsigned long base_addr; | ||
519 | unsigned long way_incr, linesz, way_size; | ||
520 | struct cache_info *dcache; | ||
521 | register unsigned long a0, a0e; | ||
522 | |||
523 | asm volatile("stc sr, %0" : "=r" (orig_sr)); | ||
524 | sr_with_bl = orig_sr | (1<<28); | ||
525 | base_addr = ((unsigned long)&empty_zero_page[0]); | ||
526 | |||
527 | /* | ||
528 | * The previous code aligned base_addr to 16k, i.e. the way_size of all | ||
529 | * existing SH-4 D-caches. Whilst I don't see a need to have this | ||
530 | * aligned to any better than the cache line size (which it will be | ||
531 | * anyway by construction), let's align it to at least the way_size of | ||
532 | * any existing or conceivable SH-4 D-cache. -- RPC | ||
533 | */ | ||
534 | base_addr = ((base_addr >> 16) << 16); | ||
535 | base_addr |= start; | ||
536 | |||
537 | dcache = &boot_cpu_data.dcache; | ||
538 | linesz = dcache->linesz; | ||
539 | way_incr = dcache->way_incr; | ||
540 | way_size = dcache->way_size; | ||
541 | |||
542 | a0 = base_addr; | ||
543 | a0e = base_addr + extent_per_way; | ||
544 | do { | ||
545 | asm volatile("ldc %0, sr" : : "r" (sr_with_bl)); | ||
546 | asm volatile("movca.l r0, @%0\n\t" | ||
547 | "ocbi @%0" : : "r" (a0)); | ||
548 | a0 += linesz; | ||
549 | asm volatile("movca.l r0, @%0\n\t" | ||
550 | "ocbi @%0" : : "r" (a0)); | ||
551 | a0 += linesz; | ||
552 | asm volatile("movca.l r0, @%0\n\t" | ||
553 | "ocbi @%0" : : "r" (a0)); | ||
554 | a0 += linesz; | ||
555 | asm volatile("movca.l r0, @%0\n\t" | ||
556 | "ocbi @%0" : : "r" (a0)); | ||
557 | asm volatile("ldc %0, sr" : : "r" (orig_sr)); | ||
558 | a0 += linesz; | ||
559 | } while (a0 < a0e); | ||
560 | } | ||
561 | |||
562 | static void __flush_dcache_segment_2way(unsigned long start, | ||
563 | unsigned long extent_per_way) | ||
564 | { | ||
565 | unsigned long orig_sr, sr_with_bl; | ||
566 | unsigned long base_addr; | ||
567 | unsigned long way_incr, linesz, way_size; | ||
568 | struct cache_info *dcache; | ||
569 | register unsigned long a0, a1, a0e; | ||
570 | |||
571 | asm volatile("stc sr, %0" : "=r" (orig_sr)); | ||
572 | sr_with_bl = orig_sr | (1<<28); | ||
573 | base_addr = ((unsigned long)&empty_zero_page[0]); | ||
574 | |||
575 | /* See comment under 1-way above */ | ||
576 | base_addr = ((base_addr >> 16) << 16); | ||
577 | base_addr |= start; | ||
578 | |||
579 | dcache = &boot_cpu_data.dcache; | ||
580 | linesz = dcache->linesz; | ||
581 | way_incr = dcache->way_incr; | ||
582 | way_size = dcache->way_size; | ||
583 | |||
584 | a0 = base_addr; | ||
585 | a1 = a0 + way_incr; | ||
586 | a0e = base_addr + extent_per_way; | ||
587 | do { | ||
588 | asm volatile("ldc %0, sr" : : "r" (sr_with_bl)); | ||
589 | asm volatile("movca.l r0, @%0\n\t" | ||
590 | "movca.l r0, @%1\n\t" | ||
591 | "ocbi @%0\n\t" | ||
592 | "ocbi @%1" : : | ||
593 | "r" (a0), "r" (a1)); | ||
594 | a0 += linesz; | ||
595 | a1 += linesz; | ||
596 | asm volatile("movca.l r0, @%0\n\t" | ||
597 | "movca.l r0, @%1\n\t" | ||
598 | "ocbi @%0\n\t" | ||
599 | "ocbi @%1" : : | ||
600 | "r" (a0), "r" (a1)); | ||
601 | a0 += linesz; | ||
602 | a1 += linesz; | ||
603 | asm volatile("movca.l r0, @%0\n\t" | ||
604 | "movca.l r0, @%1\n\t" | ||
605 | "ocbi @%0\n\t" | ||
606 | "ocbi @%1" : : | ||
607 | "r" (a0), "r" (a1)); | ||
608 | a0 += linesz; | ||
609 | a1 += linesz; | ||
610 | asm volatile("movca.l r0, @%0\n\t" | ||
611 | "movca.l r0, @%1\n\t" | ||
612 | "ocbi @%0\n\t" | ||
613 | "ocbi @%1" : : | ||
614 | "r" (a0), "r" (a1)); | ||
615 | asm volatile("ldc %0, sr" : : "r" (orig_sr)); | ||
616 | a0 += linesz; | ||
617 | a1 += linesz; | ||
618 | } while (a0 < a0e); | ||
619 | } | ||
620 | |||
621 | static void __flush_dcache_segment_4way(unsigned long start, | ||
622 | unsigned long extent_per_way) | ||
623 | { | ||
624 | unsigned long orig_sr, sr_with_bl; | ||
625 | unsigned long base_addr; | ||
626 | unsigned long way_incr, linesz, way_size; | ||
627 | struct cache_info *dcache; | ||
628 | register unsigned long a0, a1, a2, a3, a0e; | ||
629 | |||
630 | asm volatile("stc sr, %0" : "=r" (orig_sr)); | ||
631 | sr_with_bl = orig_sr | (1<<28); | ||
632 | base_addr = ((unsigned long)&empty_zero_page[0]); | ||
633 | |||
634 | /* See comment under 1-way above */ | ||
635 | base_addr = ((base_addr >> 16) << 16); | ||
636 | base_addr |= start; | ||
637 | |||
638 | dcache = &boot_cpu_data.dcache; | ||
639 | linesz = dcache->linesz; | ||
640 | way_incr = dcache->way_incr; | ||
641 | way_size = dcache->way_size; | ||
642 | |||
643 | a0 = base_addr; | ||
644 | a1 = a0 + way_incr; | ||
645 | a2 = a1 + way_incr; | ||
646 | a3 = a2 + way_incr; | ||
647 | a0e = base_addr + extent_per_way; | ||
648 | do { | ||
649 | asm volatile("ldc %0, sr" : : "r" (sr_with_bl)); | ||
650 | asm volatile("movca.l r0, @%0\n\t" | ||
651 | "movca.l r0, @%1\n\t" | ||
652 | "movca.l r0, @%2\n\t" | ||
653 | "movca.l r0, @%3\n\t" | ||
654 | "ocbi @%0\n\t" | ||
655 | "ocbi @%1\n\t" | ||
656 | "ocbi @%2\n\t" | ||
657 | "ocbi @%3\n\t" : : | ||
658 | "r" (a0), "r" (a1), "r" (a2), "r" (a3)); | ||
659 | a0 += linesz; | ||
660 | a1 += linesz; | ||
661 | a2 += linesz; | ||
662 | a3 += linesz; | ||
663 | asm volatile("movca.l r0, @%0\n\t" | ||
664 | "movca.l r0, @%1\n\t" | ||
665 | "movca.l r0, @%2\n\t" | ||
666 | "movca.l r0, @%3\n\t" | ||
667 | "ocbi @%0\n\t" | ||
668 | "ocbi @%1\n\t" | ||
669 | "ocbi @%2\n\t" | ||
670 | "ocbi @%3\n\t" : : | ||
671 | "r" (a0), "r" (a1), "r" (a2), "r" (a3)); | ||
672 | a0 += linesz; | ||
673 | a1 += linesz; | ||
674 | a2 += linesz; | ||
675 | a3 += linesz; | ||
676 | asm volatile("movca.l r0, @%0\n\t" | ||
677 | "movca.l r0, @%1\n\t" | ||
678 | "movca.l r0, @%2\n\t" | ||
679 | "movca.l r0, @%3\n\t" | ||
680 | "ocbi @%0\n\t" | ||
681 | "ocbi @%1\n\t" | ||
682 | "ocbi @%2\n\t" | ||
683 | "ocbi @%3\n\t" : : | ||
684 | "r" (a0), "r" (a1), "r" (a2), "r" (a3)); | ||
685 | a0 += linesz; | ||
686 | a1 += linesz; | ||
687 | a2 += linesz; | ||
688 | a3 += linesz; | ||
689 | asm volatile("movca.l r0, @%0\n\t" | ||
690 | "movca.l r0, @%1\n\t" | ||
691 | "movca.l r0, @%2\n\t" | ||
692 | "movca.l r0, @%3\n\t" | ||
693 | "ocbi @%0\n\t" | ||
694 | "ocbi @%1\n\t" | ||
695 | "ocbi @%2\n\t" | ||
696 | "ocbi @%3\n\t" : : | ||
697 | "r" (a0), "r" (a1), "r" (a2), "r" (a3)); | ||
698 | asm volatile("ldc %0, sr" : : "r" (orig_sr)); | ||
699 | a0 += linesz; | ||
700 | a1 += linesz; | ||
701 | a2 += linesz; | ||
702 | a3 += linesz; | ||
703 | } while (a0 < a0e); | ||
704 | } | ||
705 | |||
706 | extern void __weak sh4__flush_region_init(void); | 380 | extern void __weak sh4__flush_region_init(void); |
707 | 381 | ||
708 | /* | 382 | /* |
@@ -710,32 +384,11 @@ extern void __weak sh4__flush_region_init(void); | |||
710 | */ | 384 | */ |
711 | void __init sh4_cache_init(void) | 385 | void __init sh4_cache_init(void) |
712 | { | 386 | { |
713 | unsigned int wt_enabled = !!(__raw_readl(CCR) & CCR_CACHE_WT); | ||
714 | |||
715 | printk("PVR=%08x CVR=%08x PRR=%08x\n", | 387 | printk("PVR=%08x CVR=%08x PRR=%08x\n", |
716 | ctrl_inl(CCN_PVR), | 388 | ctrl_inl(CCN_PVR), |
717 | ctrl_inl(CCN_CVR), | 389 | ctrl_inl(CCN_CVR), |
718 | ctrl_inl(CCN_PRR)); | 390 | ctrl_inl(CCN_PRR)); |
719 | 391 | ||
720 | if (wt_enabled) | ||
721 | __flush_dcache_segment_fn = __flush_dcache_segment_writethrough; | ||
722 | else { | ||
723 | switch (boot_cpu_data.dcache.ways) { | ||
724 | case 1: | ||
725 | __flush_dcache_segment_fn = __flush_dcache_segment_1way; | ||
726 | break; | ||
727 | case 2: | ||
728 | __flush_dcache_segment_fn = __flush_dcache_segment_2way; | ||
729 | break; | ||
730 | case 4: | ||
731 | __flush_dcache_segment_fn = __flush_dcache_segment_4way; | ||
732 | break; | ||
733 | default: | ||
734 | panic("unknown number of cache ways\n"); | ||
735 | break; | ||
736 | } | ||
737 | } | ||
738 | |||
739 | local_flush_icache_range = sh4_flush_icache_range; | 392 | local_flush_icache_range = sh4_flush_icache_range; |
740 | local_flush_dcache_page = sh4_flush_dcache_page; | 393 | local_flush_dcache_page = sh4_flush_dcache_page; |
741 | local_flush_cache_all = sh4_flush_cache_all; | 394 | local_flush_cache_all = sh4_flush_cache_all; |
diff --git a/arch/sh/mm/cache-sh5.c b/arch/sh/mm/cache-sh5.c index 467ff8e260f..eb4cc4ec795 100644 --- a/arch/sh/mm/cache-sh5.c +++ b/arch/sh/mm/cache-sh5.c | |||
@@ -563,7 +563,7 @@ static void sh5_flush_cache_page(void *args) | |||
563 | 563 | ||
564 | static void sh5_flush_dcache_page(void *page) | 564 | static void sh5_flush_dcache_page(void *page) |
565 | { | 565 | { |
566 | sh64_dcache_purge_phy_page(page_to_phys(page)); | 566 | sh64_dcache_purge_phy_page(page_to_phys((struct page *)page)); |
567 | wmb(); | 567 | wmb(); |
568 | } | 568 | } |
569 | 569 | ||
diff --git a/arch/sh/mm/cache-sh7705.c b/arch/sh/mm/cache-sh7705.c index 2601935eb58..f527fb70fce 100644 --- a/arch/sh/mm/cache-sh7705.c +++ b/arch/sh/mm/cache-sh7705.c | |||
@@ -141,7 +141,7 @@ static void sh7705_flush_dcache_page(void *arg) | |||
141 | if (mapping && !mapping_mapped(mapping)) | 141 | if (mapping && !mapping_mapped(mapping)) |
142 | set_bit(PG_dcache_dirty, &page->flags); | 142 | set_bit(PG_dcache_dirty, &page->flags); |
143 | else | 143 | else |
144 | __flush_dcache_page(PHYSADDR(page_address(page))); | 144 | __flush_dcache_page(__pa(page_address(page))); |
145 | } | 145 | } |
146 | 146 | ||
147 | static void __uses_jump_to_uncached sh7705_flush_cache_all(void *args) | 147 | static void __uses_jump_to_uncached sh7705_flush_cache_all(void *args) |
diff --git a/arch/sh/mm/cache.c b/arch/sh/mm/cache.c index a2dc7f9ecc5..e9415d3ea94 100644 --- a/arch/sh/mm/cache.c +++ b/arch/sh/mm/cache.c | |||
@@ -27,8 +27,11 @@ void (*local_flush_icache_page)(void *args) = cache_noop; | |||
27 | void (*local_flush_cache_sigtramp)(void *args) = cache_noop; | 27 | void (*local_flush_cache_sigtramp)(void *args) = cache_noop; |
28 | 28 | ||
29 | void (*__flush_wback_region)(void *start, int size); | 29 | void (*__flush_wback_region)(void *start, int size); |
30 | EXPORT_SYMBOL(__flush_wback_region); | ||
30 | void (*__flush_purge_region)(void *start, int size); | 31 | void (*__flush_purge_region)(void *start, int size); |
32 | EXPORT_SYMBOL(__flush_purge_region); | ||
31 | void (*__flush_invalidate_region)(void *start, int size); | 33 | void (*__flush_invalidate_region)(void *start, int size); |
34 | EXPORT_SYMBOL(__flush_invalidate_region); | ||
32 | 35 | ||
33 | static inline void noop__flush_region(void *start, int size) | 36 | static inline void noop__flush_region(void *start, int size) |
34 | { | 37 | { |
@@ -161,14 +164,21 @@ void flush_cache_all(void) | |||
161 | { | 164 | { |
162 | cacheop_on_each_cpu(local_flush_cache_all, NULL, 1); | 165 | cacheop_on_each_cpu(local_flush_cache_all, NULL, 1); |
163 | } | 166 | } |
167 | EXPORT_SYMBOL(flush_cache_all); | ||
164 | 168 | ||
165 | void flush_cache_mm(struct mm_struct *mm) | 169 | void flush_cache_mm(struct mm_struct *mm) |
166 | { | 170 | { |
171 | if (boot_cpu_data.dcache.n_aliases == 0) | ||
172 | return; | ||
173 | |||
167 | cacheop_on_each_cpu(local_flush_cache_mm, mm, 1); | 174 | cacheop_on_each_cpu(local_flush_cache_mm, mm, 1); |
168 | } | 175 | } |
169 | 176 | ||
170 | void flush_cache_dup_mm(struct mm_struct *mm) | 177 | void flush_cache_dup_mm(struct mm_struct *mm) |
171 | { | 178 | { |
179 | if (boot_cpu_data.dcache.n_aliases == 0) | ||
180 | return; | ||
181 | |||
172 | cacheop_on_each_cpu(local_flush_cache_dup_mm, mm, 1); | 182 | cacheop_on_each_cpu(local_flush_cache_dup_mm, mm, 1); |
173 | } | 183 | } |
174 | 184 | ||
@@ -195,11 +205,13 @@ void flush_cache_range(struct vm_area_struct *vma, unsigned long start, | |||
195 | 205 | ||
196 | cacheop_on_each_cpu(local_flush_cache_range, (void *)&data, 1); | 206 | cacheop_on_each_cpu(local_flush_cache_range, (void *)&data, 1); |
197 | } | 207 | } |
208 | EXPORT_SYMBOL(flush_cache_range); | ||
198 | 209 | ||
199 | void flush_dcache_page(struct page *page) | 210 | void flush_dcache_page(struct page *page) |
200 | { | 211 | { |
201 | cacheop_on_each_cpu(local_flush_dcache_page, page, 1); | 212 | cacheop_on_each_cpu(local_flush_dcache_page, page, 1); |
202 | } | 213 | } |
214 | EXPORT_SYMBOL(flush_dcache_page); | ||
203 | 215 | ||
204 | void flush_icache_range(unsigned long start, unsigned long end) | 216 | void flush_icache_range(unsigned long start, unsigned long end) |
205 | { | 217 | { |
@@ -265,7 +277,11 @@ static void __init emit_cache_params(void) | |||
265 | 277 | ||
266 | void __init cpu_cache_init(void) | 278 | void __init cpu_cache_init(void) |
267 | { | 279 | { |
268 | unsigned int cache_disabled = !(__raw_readl(CCR) & CCR_CACHE_ENABLE); | 280 | unsigned int cache_disabled = 0; |
281 | |||
282 | #ifdef CCR | ||
283 | cache_disabled = !(__raw_readl(CCR) & CCR_CACHE_ENABLE); | ||
284 | #endif | ||
269 | 285 | ||
270 | compute_alias(&boot_cpu_data.icache); | 286 | compute_alias(&boot_cpu_data.icache); |
271 | compute_alias(&boot_cpu_data.dcache); | 287 | compute_alias(&boot_cpu_data.dcache); |
diff --git a/arch/sh/mm/consistent.c b/arch/sh/mm/consistent.c index e098ec158dd..902967e3f84 100644 --- a/arch/sh/mm/consistent.c +++ b/arch/sh/mm/consistent.c | |||
@@ -15,11 +15,15 @@ | |||
15 | #include <linux/dma-mapping.h> | 15 | #include <linux/dma-mapping.h> |
16 | #include <linux/dma-debug.h> | 16 | #include <linux/dma-debug.h> |
17 | #include <linux/io.h> | 17 | #include <linux/io.h> |
18 | #include <linux/module.h> | ||
18 | #include <asm/cacheflush.h> | 19 | #include <asm/cacheflush.h> |
19 | #include <asm/addrspace.h> | 20 | #include <asm/addrspace.h> |
20 | 21 | ||
21 | #define PREALLOC_DMA_DEBUG_ENTRIES 4096 | 22 | #define PREALLOC_DMA_DEBUG_ENTRIES 4096 |
22 | 23 | ||
24 | struct dma_map_ops *dma_ops; | ||
25 | EXPORT_SYMBOL(dma_ops); | ||
26 | |||
23 | static int __init dma_init(void) | 27 | static int __init dma_init(void) |
24 | { | 28 | { |
25 | dma_debug_init(PREALLOC_DMA_DEBUG_ENTRIES); | 29 | dma_debug_init(PREALLOC_DMA_DEBUG_ENTRIES); |
@@ -27,15 +31,12 @@ static int __init dma_init(void) | |||
27 | } | 31 | } |
28 | fs_initcall(dma_init); | 32 | fs_initcall(dma_init); |
29 | 33 | ||
30 | void *dma_alloc_coherent(struct device *dev, size_t size, | 34 | void *dma_generic_alloc_coherent(struct device *dev, size_t size, |
31 | dma_addr_t *dma_handle, gfp_t gfp) | 35 | dma_addr_t *dma_handle, gfp_t gfp) |
32 | { | 36 | { |
33 | void *ret, *ret_nocache; | 37 | void *ret, *ret_nocache; |
34 | int order = get_order(size); | 38 | int order = get_order(size); |
35 | 39 | ||
36 | if (dma_alloc_from_coherent(dev, size, dma_handle, &ret)) | ||
37 | return ret; | ||
38 | |||
39 | ret = (void *)__get_free_pages(gfp, order); | 40 | ret = (void *)__get_free_pages(gfp, order); |
40 | if (!ret) | 41 | if (!ret) |
41 | return NULL; | 42 | return NULL; |
@@ -57,35 +58,26 @@ void *dma_alloc_coherent(struct device *dev, size_t size, | |||
57 | 58 | ||
58 | *dma_handle = virt_to_phys(ret); | 59 | *dma_handle = virt_to_phys(ret); |
59 | 60 | ||
60 | debug_dma_alloc_coherent(dev, size, *dma_handle, ret_nocache); | ||
61 | |||
62 | return ret_nocache; | 61 | return ret_nocache; |
63 | } | 62 | } |
64 | EXPORT_SYMBOL(dma_alloc_coherent); | ||
65 | 63 | ||
66 | void dma_free_coherent(struct device *dev, size_t size, | 64 | void dma_generic_free_coherent(struct device *dev, size_t size, |
67 | void *vaddr, dma_addr_t dma_handle) | 65 | void *vaddr, dma_addr_t dma_handle) |
68 | { | 66 | { |
69 | int order = get_order(size); | 67 | int order = get_order(size); |
70 | unsigned long pfn = dma_handle >> PAGE_SHIFT; | 68 | unsigned long pfn = dma_handle >> PAGE_SHIFT; |
71 | int k; | 69 | int k; |
72 | 70 | ||
73 | WARN_ON(irqs_disabled()); /* for portability */ | ||
74 | |||
75 | if (dma_release_from_coherent(dev, order, vaddr)) | ||
76 | return; | ||
77 | |||
78 | debug_dma_free_coherent(dev, size, vaddr, dma_handle); | ||
79 | for (k = 0; k < (1 << order); k++) | 71 | for (k = 0; k < (1 << order); k++) |
80 | __free_pages(pfn_to_page(pfn + k), 0); | 72 | __free_pages(pfn_to_page(pfn + k), 0); |
73 | |||
81 | iounmap(vaddr); | 74 | iounmap(vaddr); |
82 | } | 75 | } |
83 | EXPORT_SYMBOL(dma_free_coherent); | ||
84 | 76 | ||
85 | void dma_cache_sync(struct device *dev, void *vaddr, size_t size, | 77 | void dma_cache_sync(struct device *dev, void *vaddr, size_t size, |
86 | enum dma_data_direction direction) | 78 | enum dma_data_direction direction) |
87 | { | 79 | { |
88 | #ifdef CONFIG_CPU_SH5 | 80 | #if defined(CONFIG_CPU_SH5) || defined(CONFIG_PMB) |
89 | void *p1addr = vaddr; | 81 | void *p1addr = vaddr; |
90 | #else | 82 | #else |
91 | void *p1addr = (void*) P1SEGADDR((unsigned long)vaddr); | 83 | void *p1addr = (void*) P1SEGADDR((unsigned long)vaddr); |
diff --git a/arch/sh/mm/init.c b/arch/sh/mm/init.c index 8173e38afd3..432acd07e76 100644 --- a/arch/sh/mm/init.c +++ b/arch/sh/mm/init.c | |||
@@ -15,6 +15,7 @@ | |||
15 | #include <linux/pagemap.h> | 15 | #include <linux/pagemap.h> |
16 | #include <linux/percpu.h> | 16 | #include <linux/percpu.h> |
17 | #include <linux/io.h> | 17 | #include <linux/io.h> |
18 | #include <linux/dma-mapping.h> | ||
18 | #include <asm/mmu_context.h> | 19 | #include <asm/mmu_context.h> |
19 | #include <asm/tlb.h> | 20 | #include <asm/tlb.h> |
20 | #include <asm/cacheflush.h> | 21 | #include <asm/cacheflush.h> |
@@ -186,11 +187,21 @@ void __init paging_init(void) | |||
186 | set_fixmap_nocache(FIX_UNCACHED, __pa(&__uncached_start)); | 187 | set_fixmap_nocache(FIX_UNCACHED, __pa(&__uncached_start)); |
187 | } | 188 | } |
188 | 189 | ||
190 | /* | ||
191 | * Early initialization for any I/O MMUs we might have. | ||
192 | */ | ||
193 | static void __init iommu_init(void) | ||
194 | { | ||
195 | no_iommu_init(); | ||
196 | } | ||
197 | |||
189 | void __init mem_init(void) | 198 | void __init mem_init(void) |
190 | { | 199 | { |
191 | int codesize, datasize, initsize; | 200 | int codesize, datasize, initsize; |
192 | int nid; | 201 | int nid; |
193 | 202 | ||
203 | iommu_init(); | ||
204 | |||
194 | num_physpages = 0; | 205 | num_physpages = 0; |
195 | high_memory = NULL; | 206 | high_memory = NULL; |
196 | 207 | ||
@@ -323,4 +334,12 @@ int memory_add_physaddr_to_nid(u64 addr) | |||
323 | } | 334 | } |
324 | EXPORT_SYMBOL_GPL(memory_add_physaddr_to_nid); | 335 | EXPORT_SYMBOL_GPL(memory_add_physaddr_to_nid); |
325 | #endif | 336 | #endif |
337 | |||
326 | #endif /* CONFIG_MEMORY_HOTPLUG */ | 338 | #endif /* CONFIG_MEMORY_HOTPLUG */ |
339 | |||
340 | #ifdef CONFIG_PMB | ||
341 | int __in_29bit_mode(void) | ||
342 | { | ||
343 | return !(ctrl_inl(PMB_PASCR) & PASCR_SE); | ||
344 | } | ||
345 | #endif /* CONFIG_PMB */ | ||
diff --git a/arch/sh/mm/kmap.c b/arch/sh/mm/kmap.c index 16e01b5fed0..15d74ea4209 100644 --- a/arch/sh/mm/kmap.c +++ b/arch/sh/mm/kmap.c | |||
@@ -39,7 +39,9 @@ void *kmap_coherent(struct page *page, unsigned long addr) | |||
39 | pagefault_disable(); | 39 | pagefault_disable(); |
40 | 40 | ||
41 | idx = FIX_CMAP_END - | 41 | idx = FIX_CMAP_END - |
42 | ((addr & current_cpu_data.dcache.alias_mask) >> PAGE_SHIFT); | 42 | (((addr >> PAGE_SHIFT) & (FIX_N_COLOURS - 1)) + |
43 | (FIX_N_COLOURS * smp_processor_id())); | ||
44 | |||
43 | vaddr = __fix_to_virt(idx); | 45 | vaddr = __fix_to_virt(idx); |
44 | 46 | ||
45 | BUG_ON(!pte_none(*(kmap_coherent_pte - idx))); | 47 | BUG_ON(!pte_none(*(kmap_coherent_pte - idx))); |
diff --git a/arch/sh/mm/numa.c b/arch/sh/mm/numa.c index 9b784fdb947..6c524446c0f 100644 --- a/arch/sh/mm/numa.c +++ b/arch/sh/mm/numa.c | |||
@@ -60,7 +60,7 @@ void __init setup_bootmem_node(int nid, unsigned long start, unsigned long end) | |||
60 | unsigned long bootmem_paddr; | 60 | unsigned long bootmem_paddr; |
61 | 61 | ||
62 | /* Don't allow bogus node assignment */ | 62 | /* Don't allow bogus node assignment */ |
63 | BUG_ON(nid > MAX_NUMNODES || nid == 0); | 63 | BUG_ON(nid > MAX_NUMNODES || nid <= 0); |
64 | 64 | ||
65 | start_pfn = start >> PAGE_SHIFT; | 65 | start_pfn = start >> PAGE_SHIFT; |
66 | end_pfn = end >> PAGE_SHIFT; | 66 | end_pfn = end >> PAGE_SHIFT; |
diff --git a/arch/sh/mm/pmb-fixed.c b/arch/sh/mm/pmb-fixed.c deleted file mode 100644 index 43c8eac4d8a..00000000000 --- a/arch/sh/mm/pmb-fixed.c +++ /dev/null | |||
@@ -1,45 +0,0 @@ | |||
1 | /* | ||
2 | * arch/sh/mm/fixed_pmb.c | ||
3 | * | ||
4 | * Copyright (C) 2009 Renesas Solutions Corp. | ||
5 | * | ||
6 | * This file is subject to the terms and conditions of the GNU General Public | ||
7 | * License. See the file "COPYING" in the main directory of this archive | ||
8 | * for more details. | ||
9 | */ | ||
10 | #include <linux/init.h> | ||
11 | #include <linux/mm.h> | ||
12 | #include <linux/io.h> | ||
13 | #include <asm/mmu.h> | ||
14 | #include <asm/mmu_context.h> | ||
15 | |||
16 | static int __uses_jump_to_uncached fixed_pmb_init(void) | ||
17 | { | ||
18 | int i; | ||
19 | unsigned long addr, data; | ||
20 | |||
21 | jump_to_uncached(); | ||
22 | |||
23 | for (i = 0; i < PMB_ENTRY_MAX; i++) { | ||
24 | addr = PMB_DATA + (i << PMB_E_SHIFT); | ||
25 | data = ctrl_inl(addr); | ||
26 | if (!(data & PMB_V)) | ||
27 | continue; | ||
28 | |||
29 | if (data & PMB_C) { | ||
30 | #if defined(CONFIG_CACHE_WRITETHROUGH) | ||
31 | data |= PMB_WT; | ||
32 | #elif defined(CONFIG_CACHE_WRITEBACK) | ||
33 | data &= ~PMB_WT; | ||
34 | #else | ||
35 | data &= ~(PMB_C | PMB_WT); | ||
36 | #endif | ||
37 | } | ||
38 | ctrl_outl(data, addr); | ||
39 | } | ||
40 | |||
41 | back_to_cached(); | ||
42 | |||
43 | return 0; | ||
44 | } | ||
45 | arch_initcall(fixed_pmb_init); | ||
diff --git a/arch/sh/mm/pmb.c b/arch/sh/mm/pmb.c index aade3110211..280f6a16603 100644 --- a/arch/sh/mm/pmb.c +++ b/arch/sh/mm/pmb.c | |||
@@ -35,29 +35,9 @@ | |||
35 | 35 | ||
36 | static void __pmb_unmap(struct pmb_entry *); | 36 | static void __pmb_unmap(struct pmb_entry *); |
37 | 37 | ||
38 | static struct kmem_cache *pmb_cache; | 38 | static struct pmb_entry pmb_entry_list[NR_PMB_ENTRIES]; |
39 | static unsigned long pmb_map; | 39 | static unsigned long pmb_map; |
40 | 40 | ||
41 | static struct pmb_entry pmb_init_map[] = { | ||
42 | /* vpn ppn flags (ub/sz/c/wt) */ | ||
43 | |||
44 | /* P1 Section Mappings */ | ||
45 | { 0x80000000, 0x00000000, PMB_SZ_64M | PMB_C, }, | ||
46 | { 0x84000000, 0x04000000, PMB_SZ_64M | PMB_C, }, | ||
47 | { 0x88000000, 0x08000000, PMB_SZ_128M | PMB_C, }, | ||
48 | { 0x90000000, 0x10000000, PMB_SZ_64M | PMB_C, }, | ||
49 | { 0x94000000, 0x14000000, PMB_SZ_64M | PMB_C, }, | ||
50 | { 0x98000000, 0x18000000, PMB_SZ_64M | PMB_C, }, | ||
51 | |||
52 | /* P2 Section Mappings */ | ||
53 | { 0xa0000000, 0x00000000, PMB_UB | PMB_SZ_64M | PMB_WT, }, | ||
54 | { 0xa4000000, 0x04000000, PMB_UB | PMB_SZ_64M | PMB_WT, }, | ||
55 | { 0xa8000000, 0x08000000, PMB_UB | PMB_SZ_128M | PMB_WT, }, | ||
56 | { 0xb0000000, 0x10000000, PMB_UB | PMB_SZ_64M | PMB_WT, }, | ||
57 | { 0xb4000000, 0x14000000, PMB_UB | PMB_SZ_64M | PMB_WT, }, | ||
58 | { 0xb8000000, 0x18000000, PMB_UB | PMB_SZ_64M | PMB_WT, }, | ||
59 | }; | ||
60 | |||
61 | static inline unsigned long mk_pmb_entry(unsigned int entry) | 41 | static inline unsigned long mk_pmb_entry(unsigned int entry) |
62 | { | 42 | { |
63 | return (entry & PMB_E_MASK) << PMB_E_SHIFT; | 43 | return (entry & PMB_E_MASK) << PMB_E_SHIFT; |
@@ -73,81 +53,68 @@ static inline unsigned long mk_pmb_data(unsigned int entry) | |||
73 | return mk_pmb_entry(entry) | PMB_DATA; | 53 | return mk_pmb_entry(entry) | PMB_DATA; |
74 | } | 54 | } |
75 | 55 | ||
76 | static DEFINE_SPINLOCK(pmb_list_lock); | 56 | static int pmb_alloc_entry(void) |
77 | static struct pmb_entry *pmb_list; | ||
78 | |||
79 | static inline void pmb_list_add(struct pmb_entry *pmbe) | ||
80 | { | 57 | { |
81 | struct pmb_entry **p, *tmp; | 58 | unsigned int pos; |
82 | 59 | ||
83 | p = &pmb_list; | 60 | repeat: |
84 | while ((tmp = *p) != NULL) | 61 | pos = find_first_zero_bit(&pmb_map, NR_PMB_ENTRIES); |
85 | p = &tmp->next; | ||
86 | 62 | ||
87 | pmbe->next = tmp; | 63 | if (unlikely(pos > NR_PMB_ENTRIES)) |
88 | *p = pmbe; | 64 | return -ENOSPC; |
89 | } | ||
90 | 65 | ||
91 | static inline void pmb_list_del(struct pmb_entry *pmbe) | 66 | if (test_and_set_bit(pos, &pmb_map)) |
92 | { | 67 | goto repeat; |
93 | struct pmb_entry **p, *tmp; | ||
94 | 68 | ||
95 | for (p = &pmb_list; (tmp = *p); p = &tmp->next) | 69 | return pos; |
96 | if (tmp == pmbe) { | ||
97 | *p = tmp->next; | ||
98 | return; | ||
99 | } | ||
100 | } | 70 | } |
101 | 71 | ||
102 | struct pmb_entry *pmb_alloc(unsigned long vpn, unsigned long ppn, | 72 | static struct pmb_entry *pmb_alloc(unsigned long vpn, unsigned long ppn, |
103 | unsigned long flags) | 73 | unsigned long flags, int entry) |
104 | { | 74 | { |
105 | struct pmb_entry *pmbe; | 75 | struct pmb_entry *pmbe; |
76 | int pos; | ||
77 | |||
78 | if (entry == PMB_NO_ENTRY) { | ||
79 | pos = pmb_alloc_entry(); | ||
80 | if (pos < 0) | ||
81 | return ERR_PTR(pos); | ||
82 | } else { | ||
83 | if (test_bit(entry, &pmb_map)) | ||
84 | return ERR_PTR(-ENOSPC); | ||
85 | pos = entry; | ||
86 | } | ||
106 | 87 | ||
107 | pmbe = kmem_cache_alloc(pmb_cache, GFP_KERNEL); | 88 | pmbe = &pmb_entry_list[pos]; |
108 | if (!pmbe) | 89 | if (!pmbe) |
109 | return ERR_PTR(-ENOMEM); | 90 | return ERR_PTR(-ENOMEM); |
110 | 91 | ||
111 | pmbe->vpn = vpn; | 92 | pmbe->vpn = vpn; |
112 | pmbe->ppn = ppn; | 93 | pmbe->ppn = ppn; |
113 | pmbe->flags = flags; | 94 | pmbe->flags = flags; |
114 | 95 | pmbe->entry = pos; | |
115 | spin_lock_irq(&pmb_list_lock); | ||
116 | pmb_list_add(pmbe); | ||
117 | spin_unlock_irq(&pmb_list_lock); | ||
118 | 96 | ||
119 | return pmbe; | 97 | return pmbe; |
120 | } | 98 | } |
121 | 99 | ||
122 | void pmb_free(struct pmb_entry *pmbe) | 100 | static void pmb_free(struct pmb_entry *pmbe) |
123 | { | 101 | { |
124 | spin_lock_irq(&pmb_list_lock); | 102 | int pos = pmbe->entry; |
125 | pmb_list_del(pmbe); | ||
126 | spin_unlock_irq(&pmb_list_lock); | ||
127 | 103 | ||
128 | kmem_cache_free(pmb_cache, pmbe); | 104 | pmbe->vpn = 0; |
105 | pmbe->ppn = 0; | ||
106 | pmbe->flags = 0; | ||
107 | pmbe->entry = 0; | ||
108 | |||
109 | clear_bit(pos, &pmb_map); | ||
129 | } | 110 | } |
130 | 111 | ||
131 | /* | 112 | /* |
132 | * Must be in P2 for __set_pmb_entry() | 113 | * Must be in P2 for __set_pmb_entry() |
133 | */ | 114 | */ |
134 | int __set_pmb_entry(unsigned long vpn, unsigned long ppn, | 115 | static void __set_pmb_entry(unsigned long vpn, unsigned long ppn, |
135 | unsigned long flags, int *entry) | 116 | unsigned long flags, int pos) |
136 | { | 117 | { |
137 | unsigned int pos = *entry; | ||
138 | |||
139 | if (unlikely(pos == PMB_NO_ENTRY)) | ||
140 | pos = find_first_zero_bit(&pmb_map, NR_PMB_ENTRIES); | ||
141 | |||
142 | repeat: | ||
143 | if (unlikely(pos > NR_PMB_ENTRIES)) | ||
144 | return -ENOSPC; | ||
145 | |||
146 | if (test_and_set_bit(pos, &pmb_map)) { | ||
147 | pos = find_first_zero_bit(&pmb_map, NR_PMB_ENTRIES); | ||
148 | goto repeat; | ||
149 | } | ||
150 | |||
151 | ctrl_outl(vpn | PMB_V, mk_pmb_addr(pos)); | 118 | ctrl_outl(vpn | PMB_V, mk_pmb_addr(pos)); |
152 | 119 | ||
153 | #ifdef CONFIG_CACHE_WRITETHROUGH | 120 | #ifdef CONFIG_CACHE_WRITETHROUGH |
@@ -161,35 +128,21 @@ repeat: | |||
161 | #endif | 128 | #endif |
162 | 129 | ||
163 | ctrl_outl(ppn | flags | PMB_V, mk_pmb_data(pos)); | 130 | ctrl_outl(ppn | flags | PMB_V, mk_pmb_data(pos)); |
164 | |||
165 | *entry = pos; | ||
166 | |||
167 | return 0; | ||
168 | } | 131 | } |
169 | 132 | ||
170 | int __uses_jump_to_uncached set_pmb_entry(struct pmb_entry *pmbe) | 133 | static void __uses_jump_to_uncached set_pmb_entry(struct pmb_entry *pmbe) |
171 | { | 134 | { |
172 | int ret; | ||
173 | |||
174 | jump_to_uncached(); | 135 | jump_to_uncached(); |
175 | ret = __set_pmb_entry(pmbe->vpn, pmbe->ppn, pmbe->flags, &pmbe->entry); | 136 | __set_pmb_entry(pmbe->vpn, pmbe->ppn, pmbe->flags, pmbe->entry); |
176 | back_to_cached(); | 137 | back_to_cached(); |
177 | |||
178 | return ret; | ||
179 | } | 138 | } |
180 | 139 | ||
181 | void __uses_jump_to_uncached clear_pmb_entry(struct pmb_entry *pmbe) | 140 | static void __uses_jump_to_uncached clear_pmb_entry(struct pmb_entry *pmbe) |
182 | { | 141 | { |
183 | unsigned int entry = pmbe->entry; | 142 | unsigned int entry = pmbe->entry; |
184 | unsigned long addr; | 143 | unsigned long addr; |
185 | 144 | ||
186 | /* | 145 | if (unlikely(entry >= NR_PMB_ENTRIES)) |
187 | * Don't allow clearing of wired init entries, P1 or P2 access | ||
188 | * without a corresponding mapping in the PMB will lead to reset | ||
189 | * by the TLB. | ||
190 | */ | ||
191 | if (unlikely(entry < ARRAY_SIZE(pmb_init_map) || | ||
192 | entry >= NR_PMB_ENTRIES)) | ||
193 | return; | 146 | return; |
194 | 147 | ||
195 | jump_to_uncached(); | 148 | jump_to_uncached(); |
@@ -202,8 +155,6 @@ void __uses_jump_to_uncached clear_pmb_entry(struct pmb_entry *pmbe) | |||
202 | ctrl_outl(ctrl_inl(addr) & ~PMB_V, addr); | 155 | ctrl_outl(ctrl_inl(addr) & ~PMB_V, addr); |
203 | 156 | ||
204 | back_to_cached(); | 157 | back_to_cached(); |
205 | |||
206 | clear_bit(entry, &pmb_map); | ||
207 | } | 158 | } |
208 | 159 | ||
209 | 160 | ||
@@ -239,23 +190,17 @@ long pmb_remap(unsigned long vaddr, unsigned long phys, | |||
239 | 190 | ||
240 | again: | 191 | again: |
241 | for (i = 0; i < ARRAY_SIZE(pmb_sizes); i++) { | 192 | for (i = 0; i < ARRAY_SIZE(pmb_sizes); i++) { |
242 | int ret; | ||
243 | |||
244 | if (size < pmb_sizes[i].size) | 193 | if (size < pmb_sizes[i].size) |
245 | continue; | 194 | continue; |
246 | 195 | ||
247 | pmbe = pmb_alloc(vaddr, phys, pmb_flags | pmb_sizes[i].flag); | 196 | pmbe = pmb_alloc(vaddr, phys, pmb_flags | pmb_sizes[i].flag, |
197 | PMB_NO_ENTRY); | ||
248 | if (IS_ERR(pmbe)) { | 198 | if (IS_ERR(pmbe)) { |
249 | err = PTR_ERR(pmbe); | 199 | err = PTR_ERR(pmbe); |
250 | goto out; | 200 | goto out; |
251 | } | 201 | } |
252 | 202 | ||
253 | ret = set_pmb_entry(pmbe); | 203 | set_pmb_entry(pmbe); |
254 | if (ret != 0) { | ||
255 | pmb_free(pmbe); | ||
256 | err = -EBUSY; | ||
257 | goto out; | ||
258 | } | ||
259 | 204 | ||
260 | phys += pmb_sizes[i].size; | 205 | phys += pmb_sizes[i].size; |
261 | vaddr += pmb_sizes[i].size; | 206 | vaddr += pmb_sizes[i].size; |
@@ -292,11 +237,16 @@ out: | |||
292 | 237 | ||
293 | void pmb_unmap(unsigned long addr) | 238 | void pmb_unmap(unsigned long addr) |
294 | { | 239 | { |
295 | struct pmb_entry **p, *pmbe; | 240 | struct pmb_entry *pmbe = NULL; |
241 | int i; | ||
296 | 242 | ||
297 | for (p = &pmb_list; (pmbe = *p); p = &pmbe->next) | 243 | for (i = 0; i < ARRAY_SIZE(pmb_entry_list); i++) { |
298 | if (pmbe->vpn == addr) | 244 | if (test_bit(i, &pmb_map)) { |
299 | break; | 245 | pmbe = &pmb_entry_list[i]; |
246 | if (pmbe->vpn == addr) | ||
247 | break; | ||
248 | } | ||
249 | } | ||
300 | 250 | ||
301 | if (unlikely(!pmbe)) | 251 | if (unlikely(!pmbe)) |
302 | return; | 252 | return; |
@@ -306,13 +256,22 @@ void pmb_unmap(unsigned long addr) | |||
306 | 256 | ||
307 | static void __pmb_unmap(struct pmb_entry *pmbe) | 257 | static void __pmb_unmap(struct pmb_entry *pmbe) |
308 | { | 258 | { |
309 | WARN_ON(!test_bit(pmbe->entry, &pmb_map)); | 259 | BUG_ON(!test_bit(pmbe->entry, &pmb_map)); |
310 | 260 | ||
311 | do { | 261 | do { |
312 | struct pmb_entry *pmblink = pmbe; | 262 | struct pmb_entry *pmblink = pmbe; |
313 | 263 | ||
314 | if (pmbe->entry != PMB_NO_ENTRY) | 264 | /* |
315 | clear_pmb_entry(pmbe); | 265 | * We may be called before this pmb_entry has been |
266 | * entered into the PMB table via set_pmb_entry(), but | ||
267 | * that's OK because we've allocated a unique slot for | ||
268 | * this entry in pmb_alloc() (even if we haven't filled | ||
269 | * it yet). | ||
270 | * | ||
271 | * Therefore, calling clear_pmb_entry() is safe as no | ||
272 | * other mapping can be using that slot. | ||
273 | */ | ||
274 | clear_pmb_entry(pmbe); | ||
316 | 275 | ||
317 | pmbe = pmblink->link; | 276 | pmbe = pmblink->link; |
318 | 277 | ||
@@ -320,42 +279,34 @@ static void __pmb_unmap(struct pmb_entry *pmbe) | |||
320 | } while (pmbe); | 279 | } while (pmbe); |
321 | } | 280 | } |
322 | 281 | ||
323 | static void pmb_cache_ctor(void *pmb) | 282 | #ifdef CONFIG_PMB |
283 | int __uses_jump_to_uncached pmb_init(void) | ||
324 | { | 284 | { |
325 | struct pmb_entry *pmbe = pmb; | 285 | unsigned int i; |
326 | 286 | long size, ret; | |
327 | memset(pmb, 0, sizeof(struct pmb_entry)); | ||
328 | |||
329 | pmbe->entry = PMB_NO_ENTRY; | ||
330 | } | ||
331 | |||
332 | static int __uses_jump_to_uncached pmb_init(void) | ||
333 | { | ||
334 | unsigned int nr_entries = ARRAY_SIZE(pmb_init_map); | ||
335 | unsigned int entry, i; | ||
336 | |||
337 | BUG_ON(unlikely(nr_entries >= NR_PMB_ENTRIES)); | ||
338 | |||
339 | pmb_cache = kmem_cache_create("pmb", sizeof(struct pmb_entry), 0, | ||
340 | SLAB_PANIC, pmb_cache_ctor); | ||
341 | 287 | ||
342 | jump_to_uncached(); | 288 | jump_to_uncached(); |
343 | 289 | ||
344 | /* | 290 | /* |
345 | * Ordering is important, P2 must be mapped in the PMB before we | 291 | * Insert PMB entries for the P1 and P2 areas so that, after |
346 | * can set PMB.SE, and P1 must be mapped before we jump back to | 292 | * we've switched the MMU to 32-bit mode, the semantics of P1 |
347 | * P1 space. | 293 | * and P2 are the same as in 29-bit mode, e.g. |
294 | * | ||
295 | * P1 - provides a cached window onto physical memory | ||
296 | * P2 - provides an uncached window onto physical memory | ||
348 | */ | 297 | */ |
349 | for (entry = 0; entry < nr_entries; entry++) { | 298 | size = __MEMORY_START + __MEMORY_SIZE; |
350 | struct pmb_entry *pmbe = pmb_init_map + entry; | ||
351 | 299 | ||
352 | __set_pmb_entry(pmbe->vpn, pmbe->ppn, pmbe->flags, &entry); | 300 | ret = pmb_remap(P1SEG, 0x00000000, size, PMB_C); |
353 | } | 301 | BUG_ON(ret != size); |
302 | |||
303 | ret = pmb_remap(P2SEG, 0x00000000, size, PMB_WT | PMB_UB); | ||
304 | BUG_ON(ret != size); | ||
354 | 305 | ||
355 | ctrl_outl(0, PMB_IRMCR); | 306 | ctrl_outl(0, PMB_IRMCR); |
356 | 307 | ||
357 | /* PMB.SE and UB[7] */ | 308 | /* PMB.SE and UB[7] */ |
358 | ctrl_outl((1 << 31) | (1 << 7), PMB_PASCR); | 309 | ctrl_outl(PASCR_SE | (1 << 7), PMB_PASCR); |
359 | 310 | ||
360 | /* Flush out the TLB */ | 311 | /* Flush out the TLB */ |
361 | i = ctrl_inl(MMUCR); | 312 | i = ctrl_inl(MMUCR); |
@@ -366,7 +317,53 @@ static int __uses_jump_to_uncached pmb_init(void) | |||
366 | 317 | ||
367 | return 0; | 318 | return 0; |
368 | } | 319 | } |
369 | arch_initcall(pmb_init); | 320 | #else |
321 | int __uses_jump_to_uncached pmb_init(void) | ||
322 | { | ||
323 | int i; | ||
324 | unsigned long addr, data; | ||
325 | |||
326 | jump_to_uncached(); | ||
327 | |||
328 | for (i = 0; i < PMB_ENTRY_MAX; i++) { | ||
329 | struct pmb_entry *pmbe; | ||
330 | unsigned long vpn, ppn, flags; | ||
331 | |||
332 | addr = PMB_DATA + (i << PMB_E_SHIFT); | ||
333 | data = ctrl_inl(addr); | ||
334 | if (!(data & PMB_V)) | ||
335 | continue; | ||
336 | |||
337 | if (data & PMB_C) { | ||
338 | #if defined(CONFIG_CACHE_WRITETHROUGH) | ||
339 | data |= PMB_WT; | ||
340 | #elif defined(CONFIG_CACHE_WRITEBACK) | ||
341 | data &= ~PMB_WT; | ||
342 | #else | ||
343 | data &= ~(PMB_C | PMB_WT); | ||
344 | #endif | ||
345 | } | ||
346 | ctrl_outl(data, addr); | ||
347 | |||
348 | ppn = data & PMB_PFN_MASK; | ||
349 | |||
350 | flags = data & (PMB_C | PMB_WT | PMB_UB); | ||
351 | flags |= data & PMB_SZ_MASK; | ||
352 | |||
353 | addr = PMB_ADDR + (i << PMB_E_SHIFT); | ||
354 | data = ctrl_inl(addr); | ||
355 | |||
356 | vpn = data & PMB_PFN_MASK; | ||
357 | |||
358 | pmbe = pmb_alloc(vpn, ppn, flags, i); | ||
359 | WARN_ON(IS_ERR(pmbe)); | ||
360 | } | ||
361 | |||
362 | back_to_cached(); | ||
363 | |||
364 | return 0; | ||
365 | } | ||
366 | #endif /* CONFIG_PMB */ | ||
370 | 367 | ||
371 | static int pmb_seq_show(struct seq_file *file, void *iter) | 368 | static int pmb_seq_show(struct seq_file *file, void *iter) |
372 | { | 369 | { |
@@ -434,15 +431,18 @@ postcore_initcall(pmb_debugfs_init); | |||
434 | static int pmb_sysdev_suspend(struct sys_device *dev, pm_message_t state) | 431 | static int pmb_sysdev_suspend(struct sys_device *dev, pm_message_t state) |
435 | { | 432 | { |
436 | static pm_message_t prev_state; | 433 | static pm_message_t prev_state; |
434 | int i; | ||
437 | 435 | ||
438 | /* Restore the PMB after a resume from hibernation */ | 436 | /* Restore the PMB after a resume from hibernation */ |
439 | if (state.event == PM_EVENT_ON && | 437 | if (state.event == PM_EVENT_ON && |
440 | prev_state.event == PM_EVENT_FREEZE) { | 438 | prev_state.event == PM_EVENT_FREEZE) { |
441 | struct pmb_entry *pmbe; | 439 | struct pmb_entry *pmbe; |
442 | spin_lock_irq(&pmb_list_lock); | 440 | for (i = 0; i < ARRAY_SIZE(pmb_entry_list); i++) { |
443 | for (pmbe = pmb_list; pmbe; pmbe = pmbe->next) | 441 | if (test_bit(i, &pmb_map)) { |
444 | set_pmb_entry(pmbe); | 442 | pmbe = &pmb_entry_list[i]; |
445 | spin_unlock_irq(&pmb_list_lock); | 443 | set_pmb_entry(pmbe); |
444 | } | ||
445 | } | ||
446 | } | 446 | } |
447 | prev_state = state; | 447 | prev_state = state; |
448 | return 0; | 448 | return 0; |
diff --git a/arch/sh/oprofile/Makefile b/arch/sh/oprofile/Makefile index 8e6eec91c14..4886c5c1786 100644 --- a/arch/sh/oprofile/Makefile +++ b/arch/sh/oprofile/Makefile | |||
@@ -7,7 +7,3 @@ DRIVER_OBJS = $(addprefix ../../../drivers/oprofile/, \ | |||
7 | timer_int.o ) | 7 | timer_int.o ) |
8 | 8 | ||
9 | oprofile-y := $(DRIVER_OBJS) common.o backtrace.o | 9 | oprofile-y := $(DRIVER_OBJS) common.o backtrace.o |
10 | |||
11 | oprofile-$(CONFIG_CPU_SUBTYPE_SH7750S) += op_model_sh7750.o | ||
12 | oprofile-$(CONFIG_CPU_SUBTYPE_SH7750) += op_model_sh7750.o | ||
13 | oprofile-$(CONFIG_CPU_SUBTYPE_SH7091) += op_model_sh7750.o | ||
diff --git a/arch/sh/oprofile/common.c b/arch/sh/oprofile/common.c index 44f4e31c6d6..ac604937f3e 100644 --- a/arch/sh/oprofile/common.c +++ b/arch/sh/oprofile/common.c | |||
@@ -20,9 +20,6 @@ | |||
20 | #include <asm/processor.h> | 20 | #include <asm/processor.h> |
21 | #include "op_impl.h" | 21 | #include "op_impl.h" |
22 | 22 | ||
23 | extern struct op_sh_model op_model_sh7750_ops __weak; | ||
24 | extern struct op_sh_model op_model_sh4a_ops __weak; | ||
25 | |||
26 | static struct op_sh_model *model; | 23 | static struct op_sh_model *model; |
27 | 24 | ||
28 | static struct op_counter_config ctr[20]; | 25 | static struct op_counter_config ctr[20]; |
@@ -94,33 +91,14 @@ int __init oprofile_arch_init(struct oprofile_operations *ops) | |||
94 | */ | 91 | */ |
95 | ops->backtrace = sh_backtrace; | 92 | ops->backtrace = sh_backtrace; |
96 | 93 | ||
97 | switch (current_cpu_data.type) { | 94 | /* |
98 | /* SH-4 types */ | 95 | * XXX |
99 | case CPU_SH7750: | 96 | * |
100 | case CPU_SH7750S: | 97 | * All of the SH7750/SH-4A counters have been converted to perf, |
101 | lmodel = &op_model_sh7750_ops; | 98 | * this infrastructure hook is left for other users until they've |
102 | break; | 99 | * had a chance to convert over, at which point all of this |
103 | 100 | * will be deleted. | |
104 | /* SH-4A types */ | 101 | */ |
105 | case CPU_SH7763: | ||
106 | case CPU_SH7770: | ||
107 | case CPU_SH7780: | ||
108 | case CPU_SH7781: | ||
109 | case CPU_SH7785: | ||
110 | case CPU_SH7786: | ||
111 | case CPU_SH7723: | ||
112 | case CPU_SH7724: | ||
113 | case CPU_SHX3: | ||
114 | lmodel = &op_model_sh4a_ops; | ||
115 | break; | ||
116 | |||
117 | /* SH4AL-DSP types */ | ||
118 | case CPU_SH7343: | ||
119 | case CPU_SH7722: | ||
120 | case CPU_SH7366: | ||
121 | lmodel = &op_model_sh4a_ops; | ||
122 | break; | ||
123 | } | ||
124 | 102 | ||
125 | if (!lmodel) | 103 | if (!lmodel) |
126 | return -ENODEV; | 104 | return -ENODEV; |
diff --git a/arch/sh/oprofile/op_impl.h b/arch/sh/oprofile/op_impl.h index 4d509975eba..1244479ceb2 100644 --- a/arch/sh/oprofile/op_impl.h +++ b/arch/sh/oprofile/op_impl.h | |||
@@ -6,7 +6,7 @@ struct op_counter_config { | |||
6 | unsigned long enabled; | 6 | unsigned long enabled; |
7 | unsigned long event; | 7 | unsigned long event; |
8 | 8 | ||
9 | unsigned long long count; | 9 | unsigned long count; |
10 | 10 | ||
11 | /* Dummy values for userspace tool compliance */ | 11 | /* Dummy values for userspace tool compliance */ |
12 | unsigned long kernel; | 12 | unsigned long kernel; |
diff --git a/arch/sh/oprofile/op_model_sh7750.c b/arch/sh/oprofile/op_model_sh7750.c deleted file mode 100644 index c892c7c30c2..00000000000 --- a/arch/sh/oprofile/op_model_sh7750.c +++ /dev/null | |||
@@ -1,255 +0,0 @@ | |||
1 | /* | ||
2 | * arch/sh/oprofile/op_model_sh7750.c | ||
3 | * | ||
4 | * OProfile support for SH7750/SH7750S Performance Counters | ||
5 | * | ||
6 | * Copyright (C) 2003 - 2008 Paul Mundt | ||
7 | * | ||
8 | * This file is subject to the terms and conditions of the GNU General Public | ||
9 | * License. See the file "COPYING" in the main directory of this archive | ||
10 | * for more details. | ||
11 | */ | ||
12 | #include <linux/kernel.h> | ||
13 | #include <linux/oprofile.h> | ||
14 | #include <linux/profile.h> | ||
15 | #include <linux/init.h> | ||
16 | #include <linux/errno.h> | ||
17 | #include <linux/interrupt.h> | ||
18 | #include <linux/io.h> | ||
19 | #include <linux/fs.h> | ||
20 | #include "op_impl.h" | ||
21 | |||
22 | #define PM_CR_BASE 0xff000084 /* 16-bit */ | ||
23 | #define PM_CTR_BASE 0xff100004 /* 32-bit */ | ||
24 | |||
25 | #define PMCR(n) (PM_CR_BASE + ((n) * 0x04)) | ||
26 | #define PMCTRH(n) (PM_CTR_BASE + 0x00 + ((n) * 0x08)) | ||
27 | #define PMCTRL(n) (PM_CTR_BASE + 0x04 + ((n) * 0x08)) | ||
28 | |||
29 | #define PMCR_PMM_MASK 0x0000003f | ||
30 | |||
31 | #define PMCR_CLKF 0x00000100 | ||
32 | #define PMCR_PMCLR 0x00002000 | ||
33 | #define PMCR_PMST 0x00004000 | ||
34 | #define PMCR_PMEN 0x00008000 | ||
35 | |||
36 | struct op_sh_model op_model_sh7750_ops; | ||
37 | |||
38 | #define NR_CNTRS 2 | ||
39 | |||
40 | static struct sh7750_ppc_register_config { | ||
41 | unsigned int ctrl; | ||
42 | unsigned long cnt_hi; | ||
43 | unsigned long cnt_lo; | ||
44 | } regcache[NR_CNTRS]; | ||
45 | |||
46 | /* | ||
47 | * There are a number of events supported by each counter (33 in total). | ||
48 | * Since we have 2 counters, each counter will take the event code as it | ||
49 | * corresponds to the PMCR PMM setting. Each counter can be configured | ||
50 | * independently. | ||
51 | * | ||
52 | * Event Code Description | ||
53 | * ---------- ----------- | ||
54 | * | ||
55 | * 0x01 Operand read access | ||
56 | * 0x02 Operand write access | ||
57 | * 0x03 UTLB miss | ||
58 | * 0x04 Operand cache read miss | ||
59 | * 0x05 Operand cache write miss | ||
60 | * 0x06 Instruction fetch (w/ cache) | ||
61 | * 0x07 Instruction TLB miss | ||
62 | * 0x08 Instruction cache miss | ||
63 | * 0x09 All operand accesses | ||
64 | * 0x0a All instruction accesses | ||
65 | * 0x0b OC RAM operand access | ||
66 | * 0x0d On-chip I/O space access | ||
67 | * 0x0e Operand access (r/w) | ||
68 | * 0x0f Operand cache miss (r/w) | ||
69 | * 0x10 Branch instruction | ||
70 | * 0x11 Branch taken | ||
71 | * 0x12 BSR/BSRF/JSR | ||
72 | * 0x13 Instruction execution | ||
73 | * 0x14 Instruction execution in parallel | ||
74 | * 0x15 FPU Instruction execution | ||
75 | * 0x16 Interrupt | ||
76 | * 0x17 NMI | ||
77 | * 0x18 trapa instruction execution | ||
78 | * 0x19 UBCA match | ||
79 | * 0x1a UBCB match | ||
80 | * 0x21 Instruction cache fill | ||
81 | * 0x22 Operand cache fill | ||
82 | * 0x23 Elapsed time | ||
83 | * 0x24 Pipeline freeze by I-cache miss | ||
84 | * 0x25 Pipeline freeze by D-cache miss | ||
85 | * 0x27 Pipeline freeze by branch instruction | ||
86 | * 0x28 Pipeline freeze by CPU register | ||
87 | * 0x29 Pipeline freeze by FPU | ||
88 | * | ||
89 | * Unfortunately we don't have a native exception or interrupt for counter | ||
90 | * overflow (although since these counters can run for 16.3 days without | ||
91 | * overflowing, it's not really necessary). | ||
92 | * | ||
93 | * OProfile on the other hand likes to have samples taken periodically, so | ||
94 | * for now we just piggyback the timer interrupt to get the expected | ||
95 | * behavior. | ||
96 | */ | ||
97 | |||
98 | static int sh7750_timer_notify(struct pt_regs *regs) | ||
99 | { | ||
100 | oprofile_add_sample(regs, 0); | ||
101 | return 0; | ||
102 | } | ||
103 | |||
104 | static u64 sh7750_read_counter(int counter) | ||
105 | { | ||
106 | return (u64)((u64)(__raw_readl(PMCTRH(counter)) & 0xffff) << 32) | | ||
107 | __raw_readl(PMCTRL(counter)); | ||
108 | } | ||
109 | |||
110 | /* | ||
111 | * Files will be in a path like: | ||
112 | * | ||
113 | * /<oprofilefs mount point>/<counter number>/<file> | ||
114 | * | ||
115 | * So when dealing with <file>, we look to the parent dentry for the counter | ||
116 | * number. | ||
117 | */ | ||
118 | static inline int to_counter(struct file *file) | ||
119 | { | ||
120 | const unsigned char *name = file->f_path.dentry->d_parent->d_name.name; | ||
121 | |||
122 | return (int)simple_strtol(name, NULL, 10); | ||
123 | } | ||
124 | |||
125 | /* | ||
126 | * XXX: We have 48-bit counters, so we're probably going to want something | ||
127 | * more along the lines of oprofilefs_ullong_to_user().. Truncating to | ||
128 | * unsigned long works fine for now though, as long as we don't attempt to | ||
129 | * profile for too horribly long. | ||
130 | */ | ||
131 | static ssize_t sh7750_read_count(struct file *file, char __user *buf, | ||
132 | size_t count, loff_t *ppos) | ||
133 | { | ||
134 | int counter = to_counter(file); | ||
135 | u64 val = sh7750_read_counter(counter); | ||
136 | |||
137 | return oprofilefs_ulong_to_user((unsigned long)val, buf, count, ppos); | ||
138 | } | ||
139 | |||
140 | static ssize_t sh7750_write_count(struct file *file, const char __user *buf, | ||
141 | size_t count, loff_t *ppos) | ||
142 | { | ||
143 | int counter = to_counter(file); | ||
144 | unsigned long val; | ||
145 | |||
146 | if (oprofilefs_ulong_from_user(&val, buf, count)) | ||
147 | return -EFAULT; | ||
148 | |||
149 | /* | ||
150 | * Any write will clear the counter, although only 0 should be | ||
151 | * written for this purpose, as we do not support setting the | ||
152 | * counter to an arbitrary value. | ||
153 | */ | ||
154 | WARN_ON(val != 0); | ||
155 | |||
156 | __raw_writew(__raw_readw(PMCR(counter)) | PMCR_PMCLR, PMCR(counter)); | ||
157 | |||
158 | return count; | ||
159 | } | ||
160 | |||
161 | static const struct file_operations count_fops = { | ||
162 | .read = sh7750_read_count, | ||
163 | .write = sh7750_write_count, | ||
164 | }; | ||
165 | |||
166 | static int sh7750_ppc_create_files(struct super_block *sb, struct dentry *dir) | ||
167 | { | ||
168 | return oprofilefs_create_file(sb, dir, "count", &count_fops); | ||
169 | } | ||
170 | |||
171 | static void sh7750_ppc_reg_setup(struct op_counter_config *ctr) | ||
172 | { | ||
173 | unsigned int counters = op_model_sh7750_ops.num_counters; | ||
174 | int i; | ||
175 | |||
176 | for (i = 0; i < counters; i++) { | ||
177 | regcache[i].ctrl = 0; | ||
178 | regcache[i].cnt_hi = 0; | ||
179 | regcache[i].cnt_lo = 0; | ||
180 | |||
181 | if (!ctr[i].enabled) | ||
182 | continue; | ||
183 | |||
184 | regcache[i].ctrl |= ctr[i].event | PMCR_PMEN | PMCR_PMST; | ||
185 | regcache[i].cnt_hi = (unsigned long)((ctr->count >> 32) & 0xffff); | ||
186 | regcache[i].cnt_lo = (unsigned long)(ctr->count & 0xffffffff); | ||
187 | } | ||
188 | } | ||
189 | |||
190 | static void sh7750_ppc_cpu_setup(void *args) | ||
191 | { | ||
192 | unsigned int counters = op_model_sh7750_ops.num_counters; | ||
193 | int i; | ||
194 | |||
195 | for (i = 0; i < counters; i++) { | ||
196 | __raw_writew(0, PMCR(i)); | ||
197 | __raw_writel(regcache[i].cnt_hi, PMCTRH(i)); | ||
198 | __raw_writel(regcache[i].cnt_lo, PMCTRL(i)); | ||
199 | } | ||
200 | } | ||
201 | |||
202 | static void sh7750_ppc_cpu_start(void *args) | ||
203 | { | ||
204 | unsigned int counters = op_model_sh7750_ops.num_counters; | ||
205 | int i; | ||
206 | |||
207 | for (i = 0; i < counters; i++) | ||
208 | __raw_writew(regcache[i].ctrl, PMCR(i)); | ||
209 | } | ||
210 | |||
211 | static void sh7750_ppc_cpu_stop(void *args) | ||
212 | { | ||
213 | unsigned int counters = op_model_sh7750_ops.num_counters; | ||
214 | int i; | ||
215 | |||
216 | /* Disable the counters */ | ||
217 | for (i = 0; i < counters; i++) | ||
218 | __raw_writew(__raw_readw(PMCR(i)) & ~PMCR_PMEN, PMCR(i)); | ||
219 | } | ||
220 | |||
221 | static inline void sh7750_ppc_reset(void) | ||
222 | { | ||
223 | unsigned int counters = op_model_sh7750_ops.num_counters; | ||
224 | int i; | ||
225 | |||
226 | /* Clear the counters */ | ||
227 | for (i = 0; i < counters; i++) | ||
228 | __raw_writew(__raw_readw(PMCR(i)) | PMCR_PMCLR, PMCR(i)); | ||
229 | } | ||
230 | |||
231 | static int sh7750_ppc_init(void) | ||
232 | { | ||
233 | sh7750_ppc_reset(); | ||
234 | |||
235 | return register_timer_hook(sh7750_timer_notify); | ||
236 | } | ||
237 | |||
238 | static void sh7750_ppc_exit(void) | ||
239 | { | ||
240 | unregister_timer_hook(sh7750_timer_notify); | ||
241 | |||
242 | sh7750_ppc_reset(); | ||
243 | } | ||
244 | |||
245 | struct op_sh_model op_model_sh7750_ops = { | ||
246 | .cpu_type = "sh/sh7750", | ||
247 | .num_counters = NR_CNTRS, | ||
248 | .reg_setup = sh7750_ppc_reg_setup, | ||
249 | .cpu_setup = sh7750_ppc_cpu_setup, | ||
250 | .cpu_start = sh7750_ppc_cpu_start, | ||
251 | .cpu_stop = sh7750_ppc_cpu_stop, | ||
252 | .init = sh7750_ppc_init, | ||
253 | .exit = sh7750_ppc_exit, | ||
254 | .create_files = sh7750_ppc_create_files, | ||
255 | }; | ||
diff --git a/drivers/cdrom/gdrom.c b/drivers/cdrom/gdrom.c index a762283d2a2..e789e6c9a42 100644 --- a/drivers/cdrom/gdrom.c +++ b/drivers/cdrom/gdrom.c | |||
@@ -214,7 +214,7 @@ static void gdrom_spicommand(void *spi_string, int buflen) | |||
214 | gdrom_getsense(NULL); | 214 | gdrom_getsense(NULL); |
215 | return; | 215 | return; |
216 | } | 216 | } |
217 | outsw(PHYSADDR(GDROM_DATA_REG), cmd, 6); | 217 | outsw(GDROM_DATA_REG, cmd, 6); |
218 | } | 218 | } |
219 | 219 | ||
220 | 220 | ||
@@ -298,7 +298,7 @@ static int gdrom_readtoc_cmd(struct gdromtoc *toc, int session) | |||
298 | err = -EINVAL; | 298 | err = -EINVAL; |
299 | goto cleanup_readtoc; | 299 | goto cleanup_readtoc; |
300 | } | 300 | } |
301 | insw(PHYSADDR(GDROM_DATA_REG), toc, tocsize/2); | 301 | insw(GDROM_DATA_REG, toc, tocsize/2); |
302 | if (gd.status & 0x01) | 302 | if (gd.status & 0x01) |
303 | err = -EINVAL; | 303 | err = -EINVAL; |
304 | 304 | ||
@@ -449,7 +449,7 @@ static int gdrom_getsense(short *bufstring) | |||
449 | GDROM_DEFAULT_TIMEOUT); | 449 | GDROM_DEFAULT_TIMEOUT); |
450 | if (gd.pending) | 450 | if (gd.pending) |
451 | goto cleanup_sense; | 451 | goto cleanup_sense; |
452 | insw(PHYSADDR(GDROM_DATA_REG), &sense, sense_command->buflen/2); | 452 | insw(GDROM_DATA_REG, &sense, sense_command->buflen/2); |
453 | if (sense[1] & 40) { | 453 | if (sense[1] & 40) { |
454 | printk(KERN_INFO "GDROM: Drive not ready - command aborted\n"); | 454 | printk(KERN_INFO "GDROM: Drive not ready - command aborted\n"); |
455 | goto cleanup_sense; | 455 | goto cleanup_sense; |
@@ -586,7 +586,7 @@ static void gdrom_readdisk_dma(struct work_struct *work) | |||
586 | spin_unlock(&gdrom_lock); | 586 | spin_unlock(&gdrom_lock); |
587 | block = blk_rq_pos(req)/GD_TO_BLK + GD_SESSION_OFFSET; | 587 | block = blk_rq_pos(req)/GD_TO_BLK + GD_SESSION_OFFSET; |
588 | block_cnt = blk_rq_sectors(req)/GD_TO_BLK; | 588 | block_cnt = blk_rq_sectors(req)/GD_TO_BLK; |
589 | ctrl_outl(PHYSADDR(req->buffer), GDROM_DMA_STARTADDR_REG); | 589 | ctrl_outl(virt_to_phys(req->buffer), GDROM_DMA_STARTADDR_REG); |
590 | ctrl_outl(block_cnt * GDROM_HARD_SECTOR, GDROM_DMA_LENGTH_REG); | 590 | ctrl_outl(block_cnt * GDROM_HARD_SECTOR, GDROM_DMA_LENGTH_REG); |
591 | ctrl_outl(1, GDROM_DMA_DIRECTION_REG); | 591 | ctrl_outl(1, GDROM_DMA_DIRECTION_REG); |
592 | ctrl_outl(1, GDROM_DMA_ENABLE_REG); | 592 | ctrl_outl(1, GDROM_DMA_ENABLE_REG); |
@@ -615,7 +615,7 @@ static void gdrom_readdisk_dma(struct work_struct *work) | |||
615 | cpu_relax(); | 615 | cpu_relax(); |
616 | gd.pending = 1; | 616 | gd.pending = 1; |
617 | gd.transfer = 1; | 617 | gd.transfer = 1; |
618 | outsw(PHYSADDR(GDROM_DATA_REG), &read_command->cmd, 6); | 618 | outsw(GDROM_DATA_REG, &read_command->cmd, 6); |
619 | timeout = jiffies + HZ / 2; | 619 | timeout = jiffies + HZ / 2; |
620 | /* Wait for any pending DMA to finish */ | 620 | /* Wait for any pending DMA to finish */ |
621 | while (ctrl_inb(GDROM_DMA_STATUS_REG) && | 621 | while (ctrl_inb(GDROM_DMA_STATUS_REG) && |
diff --git a/drivers/input/keyboard/sh_keysc.c b/drivers/input/keyboard/sh_keysc.c index 887af79b7bf..076111fc72d 100644 --- a/drivers/input/keyboard/sh_keysc.c +++ b/drivers/input/keyboard/sh_keysc.c | |||
@@ -18,9 +18,9 @@ | |||
18 | #include <linux/delay.h> | 18 | #include <linux/delay.h> |
19 | #include <linux/platform_device.h> | 19 | #include <linux/platform_device.h> |
20 | #include <linux/input.h> | 20 | #include <linux/input.h> |
21 | #include <linux/input/sh_keysc.h> | ||
21 | #include <linux/clk.h> | 22 | #include <linux/clk.h> |
22 | #include <linux/io.h> | 23 | #include <linux/io.h> |
23 | #include <asm/sh_keysc.h> | ||
24 | 24 | ||
25 | #define KYCR1_OFFS 0x00 | 25 | #define KYCR1_OFFS 0x00 |
26 | #define KYCR2_OFFS 0x04 | 26 | #define KYCR2_OFFS 0x04 |
diff --git a/drivers/mfd/Kconfig b/drivers/mfd/Kconfig index 08f2d07bf56..a296e717e86 100644 --- a/drivers/mfd/Kconfig +++ b/drivers/mfd/Kconfig | |||
@@ -35,6 +35,14 @@ config MFD_ASIC3 | |||
35 | This driver supports the ASIC3 multifunction chip found on many | 35 | This driver supports the ASIC3 multifunction chip found on many |
36 | PDAs (mainly iPAQ and HTC based ones) | 36 | PDAs (mainly iPAQ and HTC based ones) |
37 | 37 | ||
38 | config MFD_SH_MOBILE_SDHI | ||
39 | bool "Support for SuperH Mobile SDHI" | ||
40 | depends on SUPERH | ||
41 | select MFD_CORE | ||
42 | ---help--- | ||
43 | This driver supports the SDHI hardware block found in many | ||
44 | SuperH Mobile SoCs. | ||
45 | |||
38 | config MFD_DM355EVM_MSP | 46 | config MFD_DM355EVM_MSP |
39 | bool "DaVinci DM355 EVM microcontroller" | 47 | bool "DaVinci DM355 EVM microcontroller" |
40 | depends on I2C && MACH_DAVINCI_DM355_EVM | 48 | depends on I2C && MACH_DAVINCI_DM355_EVM |
diff --git a/drivers/mfd/Makefile b/drivers/mfd/Makefile index af0fc903cec..11350c1d930 100644 --- a/drivers/mfd/Makefile +++ b/drivers/mfd/Makefile | |||
@@ -4,6 +4,7 @@ | |||
4 | 4 | ||
5 | obj-$(CONFIG_MFD_SM501) += sm501.o | 5 | obj-$(CONFIG_MFD_SM501) += sm501.o |
6 | obj-$(CONFIG_MFD_ASIC3) += asic3.o | 6 | obj-$(CONFIG_MFD_ASIC3) += asic3.o |
7 | obj-$(CONFIG_MFD_SH_MOBILE_SDHI) += sh_mobile_sdhi.o | ||
7 | 8 | ||
8 | obj-$(CONFIG_HTC_EGPIO) += htc-egpio.o | 9 | obj-$(CONFIG_HTC_EGPIO) += htc-egpio.o |
9 | obj-$(CONFIG_HTC_PASIC3) += htc-pasic3.o | 10 | obj-$(CONFIG_HTC_PASIC3) += htc-pasic3.o |
diff --git a/drivers/mfd/sh_mobile_sdhi.c b/drivers/mfd/sh_mobile_sdhi.c new file mode 100644 index 00000000000..03efae8041a --- /dev/null +++ b/drivers/mfd/sh_mobile_sdhi.c | |||
@@ -0,0 +1,156 @@ | |||
1 | /* | ||
2 | * SuperH Mobile SDHI | ||
3 | * | ||
4 | * Copyright (C) 2009 Magnus Damm | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or modify | ||
7 | * it under the terms of the GNU General Public License version 2 as | ||
8 | * published by the Free Software Foundation. | ||
9 | * | ||
10 | * Based on "Compaq ASIC3 support": | ||
11 | * | ||
12 | * Copyright 2001 Compaq Computer Corporation. | ||
13 | * Copyright 2004-2005 Phil Blundell | ||
14 | * Copyright 2007-2008 OpenedHand Ltd. | ||
15 | * | ||
16 | * Authors: Phil Blundell <pb@handhelds.org>, | ||
17 | * Samuel Ortiz <sameo@openedhand.com> | ||
18 | * | ||
19 | */ | ||
20 | |||
21 | #include <linux/kernel.h> | ||
22 | #include <linux/clk.h> | ||
23 | #include <linux/platform_device.h> | ||
24 | |||
25 | #include <linux/mfd/core.h> | ||
26 | #include <linux/mfd/tmio.h> | ||
27 | #include <linux/mfd/sh_mobile_sdhi.h> | ||
28 | |||
29 | struct sh_mobile_sdhi { | ||
30 | struct clk *clk; | ||
31 | struct tmio_mmc_data mmc_data; | ||
32 | struct mfd_cell cell_mmc; | ||
33 | }; | ||
34 | |||
35 | static struct resource sh_mobile_sdhi_resources[] = { | ||
36 | { | ||
37 | .start = 0x000, | ||
38 | .end = 0x1ff, | ||
39 | .flags = IORESOURCE_MEM, | ||
40 | }, | ||
41 | { | ||
42 | .start = 0, | ||
43 | .end = 0, | ||
44 | .flags = IORESOURCE_IRQ, | ||
45 | }, | ||
46 | }; | ||
47 | |||
48 | static struct mfd_cell sh_mobile_sdhi_cell = { | ||
49 | .name = "tmio-mmc", | ||
50 | .num_resources = ARRAY_SIZE(sh_mobile_sdhi_resources), | ||
51 | .resources = sh_mobile_sdhi_resources, | ||
52 | }; | ||
53 | |||
54 | static void sh_mobile_sdhi_set_pwr(struct platform_device *tmio, int state) | ||
55 | { | ||
56 | struct platform_device *pdev = to_platform_device(tmio->dev.parent); | ||
57 | struct sh_mobile_sdhi_info *p = pdev->dev.platform_data; | ||
58 | |||
59 | if (p && p->set_pwr) | ||
60 | p->set_pwr(pdev, state); | ||
61 | } | ||
62 | |||
63 | static int __init sh_mobile_sdhi_probe(struct platform_device *pdev) | ||
64 | { | ||
65 | struct sh_mobile_sdhi *priv; | ||
66 | struct resource *mem; | ||
67 | char clk_name[8]; | ||
68 | int ret, irq; | ||
69 | |||
70 | mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); | ||
71 | if (!mem) | ||
72 | dev_err(&pdev->dev, "missing MEM resource\n"); | ||
73 | |||
74 | irq = platform_get_irq(pdev, 0); | ||
75 | if (irq < 0) | ||
76 | dev_err(&pdev->dev, "missing IRQ resource\n"); | ||
77 | |||
78 | if (!mem || (irq < 0)) | ||
79 | return -EINVAL; | ||
80 | |||
81 | priv = kzalloc(sizeof(struct sh_mobile_sdhi), GFP_KERNEL); | ||
82 | if (priv == NULL) { | ||
83 | dev_err(&pdev->dev, "kzalloc failed\n"); | ||
84 | return -ENOMEM; | ||
85 | } | ||
86 | |||
87 | snprintf(clk_name, sizeof(clk_name), "sdhi%d", pdev->id); | ||
88 | priv->clk = clk_get(&pdev->dev, clk_name); | ||
89 | if (IS_ERR(priv->clk)) { | ||
90 | dev_err(&pdev->dev, "cannot get clock \"%s\"\n", clk_name); | ||
91 | ret = PTR_ERR(priv->clk); | ||
92 | kfree(priv); | ||
93 | return ret; | ||
94 | } | ||
95 | |||
96 | clk_enable(priv->clk); | ||
97 | |||
98 | /* FIXME: silly const unsigned int hclk */ | ||
99 | *(unsigned int *)&priv->mmc_data.hclk = clk_get_rate(priv->clk); | ||
100 | priv->mmc_data.set_pwr = sh_mobile_sdhi_set_pwr; | ||
101 | |||
102 | memcpy(&priv->cell_mmc, &sh_mobile_sdhi_cell, sizeof(priv->cell_mmc)); | ||
103 | priv->cell_mmc.driver_data = &priv->mmc_data; | ||
104 | priv->cell_mmc.platform_data = &priv->cell_mmc; | ||
105 | priv->cell_mmc.data_size = sizeof(priv->cell_mmc); | ||
106 | |||
107 | platform_set_drvdata(pdev, priv); | ||
108 | |||
109 | ret = mfd_add_devices(&pdev->dev, pdev->id, | ||
110 | &priv->cell_mmc, 1, mem, irq); | ||
111 | if (ret) { | ||
112 | clk_disable(priv->clk); | ||
113 | clk_put(priv->clk); | ||
114 | kfree(priv); | ||
115 | } | ||
116 | |||
117 | return ret; | ||
118 | } | ||
119 | |||
120 | static int sh_mobile_sdhi_remove(struct platform_device *pdev) | ||
121 | { | ||
122 | struct sh_mobile_sdhi *priv = platform_get_drvdata(pdev); | ||
123 | |||
124 | mfd_remove_devices(&pdev->dev); | ||
125 | clk_disable(priv->clk); | ||
126 | clk_put(priv->clk); | ||
127 | kfree(priv); | ||
128 | |||
129 | return 0; | ||
130 | } | ||
131 | |||
132 | static struct platform_driver sh_mobile_sdhi_driver = { | ||
133 | .driver = { | ||
134 | .name = "sh_mobile_sdhi", | ||
135 | .owner = THIS_MODULE, | ||
136 | }, | ||
137 | .probe = sh_mobile_sdhi_probe, | ||
138 | .remove = __devexit_p(sh_mobile_sdhi_remove), | ||
139 | }; | ||
140 | |||
141 | static int __init sh_mobile_sdhi_init(void) | ||
142 | { | ||
143 | return platform_driver_register(&sh_mobile_sdhi_driver); | ||
144 | } | ||
145 | |||
146 | static void __exit sh_mobile_sdhi_exit(void) | ||
147 | { | ||
148 | platform_driver_unregister(&sh_mobile_sdhi_driver); | ||
149 | } | ||
150 | |||
151 | module_init(sh_mobile_sdhi_init); | ||
152 | module_exit(sh_mobile_sdhi_exit); | ||
153 | |||
154 | MODULE_DESCRIPTION("SuperH Mobile SDHI driver"); | ||
155 | MODULE_AUTHOR("Magnus Damm"); | ||
156 | MODULE_LICENSE("GPL v2"); | ||
diff --git a/drivers/mmc/host/Kconfig b/drivers/mmc/host/Kconfig index 432ae8358c8..e04b751680d 100644 --- a/drivers/mmc/host/Kconfig +++ b/drivers/mmc/host/Kconfig | |||
@@ -329,7 +329,7 @@ config MMC_SDRICOH_CS | |||
329 | 329 | ||
330 | config MMC_TMIO | 330 | config MMC_TMIO |
331 | tristate "Toshiba Mobile IO Controller (TMIO) MMC/SD function support" | 331 | tristate "Toshiba Mobile IO Controller (TMIO) MMC/SD function support" |
332 | depends on MFD_TMIO || MFD_ASIC3 | 332 | depends on MFD_TMIO || MFD_ASIC3 || SUPERH |
333 | help | 333 | help |
334 | This provides support for the SD/MMC cell found in TC6393XB, | 334 | This provides support for the SD/MMC cell found in TC6393XB, |
335 | T7L66XB and also HTC ASIC3 | 335 | T7L66XB and also HTC ASIC3 |
diff --git a/drivers/rtc/rtc-ds1302.c b/drivers/rtc/rtc-ds1302.c index d490628b64d..1e73c8f42e3 100644 --- a/drivers/rtc/rtc-ds1302.c +++ b/drivers/rtc/rtc-ds1302.c | |||
@@ -201,7 +201,7 @@ static struct platform_driver ds1302_platform_driver = { | |||
201 | .name = DRV_NAME, | 201 | .name = DRV_NAME, |
202 | .owner = THIS_MODULE, | 202 | .owner = THIS_MODULE, |
203 | }, | 203 | }, |
204 | .remove = __exit_p(ds1302_rtc_remove), | 204 | .remove = __devexit_p(ds1302_rtc_remove), |
205 | }; | 205 | }; |
206 | 206 | ||
207 | static int __init ds1302_rtc_init(void) | 207 | static int __init ds1302_rtc_init(void) |
diff --git a/drivers/serial/Kconfig b/drivers/serial/Kconfig index 50943ff78f4..9ff47db0b2c 100644 --- a/drivers/serial/Kconfig +++ b/drivers/serial/Kconfig | |||
@@ -996,7 +996,7 @@ config SERIAL_IP22_ZILOG_CONSOLE | |||
996 | 996 | ||
997 | config SERIAL_SH_SCI | 997 | config SERIAL_SH_SCI |
998 | tristate "SuperH SCI(F) serial port support" | 998 | tristate "SuperH SCI(F) serial port support" |
999 | depends on SUPERH || H8300 | 999 | depends on HAVE_CLK && (SUPERH || H8300) |
1000 | select SERIAL_CORE | 1000 | select SERIAL_CORE |
1001 | 1001 | ||
1002 | config SERIAL_SH_SCI_NR_UARTS | 1002 | config SERIAL_SH_SCI_NR_UARTS |
diff --git a/drivers/serial/sh-sci.c b/drivers/serial/sh-sci.c index 6498bd1fb6d..ff38dbdb5c6 100644 --- a/drivers/serial/sh-sci.c +++ b/drivers/serial/sh-sci.c | |||
@@ -50,7 +50,6 @@ | |||
50 | #include <linux/list.h> | 50 | #include <linux/list.h> |
51 | 51 | ||
52 | #ifdef CONFIG_SUPERH | 52 | #ifdef CONFIG_SUPERH |
53 | #include <asm/clock.h> | ||
54 | #include <asm/sh_bios.h> | 53 | #include <asm/sh_bios.h> |
55 | #endif | 54 | #endif |
56 | 55 | ||
@@ -79,22 +78,18 @@ struct sci_port { | |||
79 | struct timer_list break_timer; | 78 | struct timer_list break_timer; |
80 | int break_flag; | 79 | int break_flag; |
81 | 80 | ||
82 | #ifdef CONFIG_HAVE_CLK | ||
83 | /* Interface clock */ | 81 | /* Interface clock */ |
84 | struct clk *iclk; | 82 | struct clk *iclk; |
85 | /* Data clock */ | 83 | /* Data clock */ |
86 | struct clk *dclk; | 84 | struct clk *dclk; |
87 | #endif | 85 | |
88 | struct list_head node; | 86 | struct list_head node; |
89 | }; | 87 | }; |
90 | 88 | ||
91 | struct sh_sci_priv { | 89 | struct sh_sci_priv { |
92 | spinlock_t lock; | 90 | spinlock_t lock; |
93 | struct list_head ports; | 91 | struct list_head ports; |
94 | |||
95 | #ifdef CONFIG_HAVE_CLK | ||
96 | struct notifier_block clk_nb; | 92 | struct notifier_block clk_nb; |
97 | #endif | ||
98 | }; | 93 | }; |
99 | 94 | ||
100 | /* Function prototypes */ | 95 | /* Function prototypes */ |
@@ -156,32 +151,6 @@ static void sci_poll_put_char(struct uart_port *port, unsigned char c) | |||
156 | } | 151 | } |
157 | #endif /* CONFIG_CONSOLE_POLL || CONFIG_SERIAL_SH_SCI_CONSOLE */ | 152 | #endif /* CONFIG_CONSOLE_POLL || CONFIG_SERIAL_SH_SCI_CONSOLE */ |
158 | 153 | ||
159 | #if defined(__H8300S__) | ||
160 | enum { sci_disable, sci_enable }; | ||
161 | |||
162 | static void h8300_sci_config(struct uart_port *port, unsigned int ctrl) | ||
163 | { | ||
164 | volatile unsigned char *mstpcrl = (volatile unsigned char *)MSTPCRL; | ||
165 | int ch = (port->mapbase - SMR0) >> 3; | ||
166 | unsigned char mask = 1 << (ch+1); | ||
167 | |||
168 | if (ctrl == sci_disable) | ||
169 | *mstpcrl |= mask; | ||
170 | else | ||
171 | *mstpcrl &= ~mask; | ||
172 | } | ||
173 | |||
174 | static void h8300_sci_enable(struct uart_port *port) | ||
175 | { | ||
176 | h8300_sci_config(port, sci_enable); | ||
177 | } | ||
178 | |||
179 | static void h8300_sci_disable(struct uart_port *port) | ||
180 | { | ||
181 | h8300_sci_config(port, sci_disable); | ||
182 | } | ||
183 | #endif | ||
184 | |||
185 | #if defined(__H8300H__) || defined(__H8300S__) | 154 | #if defined(__H8300H__) || defined(__H8300S__) |
186 | static void sci_init_pins(struct uart_port *port, unsigned int cflag) | 155 | static void sci_init_pins(struct uart_port *port, unsigned int cflag) |
187 | { | 156 | { |
@@ -733,7 +702,6 @@ static irqreturn_t sci_mpxed_interrupt(int irq, void *ptr) | |||
733 | return ret; | 702 | return ret; |
734 | } | 703 | } |
735 | 704 | ||
736 | #ifdef CONFIG_HAVE_CLK | ||
737 | /* | 705 | /* |
738 | * Here we define a transistion notifier so that we can update all of our | 706 | * Here we define a transistion notifier so that we can update all of our |
739 | * ports' baud rate when the peripheral clock changes. | 707 | * ports' baud rate when the peripheral clock changes. |
@@ -751,7 +719,6 @@ static int sci_notifier(struct notifier_block *self, | |||
751 | spin_lock_irqsave(&priv->lock, flags); | 719 | spin_lock_irqsave(&priv->lock, flags); |
752 | list_for_each_entry(sci_port, &priv->ports, node) | 720 | list_for_each_entry(sci_port, &priv->ports, node) |
753 | sci_port->port.uartclk = clk_get_rate(sci_port->dclk); | 721 | sci_port->port.uartclk = clk_get_rate(sci_port->dclk); |
754 | |||
755 | spin_unlock_irqrestore(&priv->lock, flags); | 722 | spin_unlock_irqrestore(&priv->lock, flags); |
756 | } | 723 | } |
757 | 724 | ||
@@ -778,7 +745,6 @@ static void sci_clk_disable(struct uart_port *port) | |||
778 | 745 | ||
779 | clk_disable(sci_port->dclk); | 746 | clk_disable(sci_port->dclk); |
780 | } | 747 | } |
781 | #endif | ||
782 | 748 | ||
783 | static int sci_request_irq(struct sci_port *port) | 749 | static int sci_request_irq(struct sci_port *port) |
784 | { | 750 | { |
@@ -833,8 +799,8 @@ static void sci_free_irq(struct sci_port *port) | |||
833 | 799 | ||
834 | static unsigned int sci_tx_empty(struct uart_port *port) | 800 | static unsigned int sci_tx_empty(struct uart_port *port) |
835 | { | 801 | { |
836 | /* Can't detect */ | 802 | unsigned short status = sci_in(port, SCxSR); |
837 | return TIOCSER_TEMT; | 803 | return status & SCxSR_TEND(port) ? TIOCSER_TEMT : 0; |
838 | } | 804 | } |
839 | 805 | ||
840 | static void sci_set_mctrl(struct uart_port *port, unsigned int mctrl) | 806 | static void sci_set_mctrl(struct uart_port *port, unsigned int mctrl) |
@@ -1077,21 +1043,10 @@ static void __devinit sci_init_single(struct platform_device *dev, | |||
1077 | sci_port->port.iotype = UPIO_MEM; | 1043 | sci_port->port.iotype = UPIO_MEM; |
1078 | sci_port->port.line = index; | 1044 | sci_port->port.line = index; |
1079 | sci_port->port.fifosize = 1; | 1045 | sci_port->port.fifosize = 1; |
1080 | |||
1081 | #if defined(__H8300H__) || defined(__H8300S__) | ||
1082 | #ifdef __H8300S__ | ||
1083 | sci_port->enable = h8300_sci_enable; | ||
1084 | sci_port->disable = h8300_sci_disable; | ||
1085 | #endif | ||
1086 | sci_port->port.uartclk = CONFIG_CPU_CLOCK; | ||
1087 | #elif defined(CONFIG_HAVE_CLK) | ||
1088 | sci_port->iclk = p->clk ? clk_get(&dev->dev, p->clk) : NULL; | 1046 | sci_port->iclk = p->clk ? clk_get(&dev->dev, p->clk) : NULL; |
1089 | sci_port->dclk = clk_get(&dev->dev, "peripheral_clk"); | 1047 | sci_port->dclk = clk_get(&dev->dev, "peripheral_clk"); |
1090 | sci_port->enable = sci_clk_enable; | 1048 | sci_port->enable = sci_clk_enable; |
1091 | sci_port->disable = sci_clk_disable; | 1049 | sci_port->disable = sci_clk_disable; |
1092 | #else | ||
1093 | #error "Need a valid uartclk" | ||
1094 | #endif | ||
1095 | 1050 | ||
1096 | sci_port->break_timer.data = (unsigned long)sci_port; | 1051 | sci_port->break_timer.data = (unsigned long)sci_port; |
1097 | sci_port->break_timer.function = sci_break_timer; | 1052 | sci_port->break_timer.function = sci_break_timer; |
@@ -1106,7 +1061,6 @@ static void __devinit sci_init_single(struct platform_device *dev, | |||
1106 | sci_port->type = sci_port->port.type = p->type; | 1061 | sci_port->type = sci_port->port.type = p->type; |
1107 | 1062 | ||
1108 | memcpy(&sci_port->irqs, &p->irqs, sizeof(p->irqs)); | 1063 | memcpy(&sci_port->irqs, &p->irqs, sizeof(p->irqs)); |
1109 | |||
1110 | } | 1064 | } |
1111 | 1065 | ||
1112 | #ifdef CONFIG_SERIAL_SH_SCI_CONSOLE | 1066 | #ifdef CONFIG_SERIAL_SH_SCI_CONSOLE |
@@ -1239,14 +1193,11 @@ static int sci_remove(struct platform_device *dev) | |||
1239 | struct sci_port *p; | 1193 | struct sci_port *p; |
1240 | unsigned long flags; | 1194 | unsigned long flags; |
1241 | 1195 | ||
1242 | #ifdef CONFIG_HAVE_CLK | ||
1243 | cpufreq_unregister_notifier(&priv->clk_nb, CPUFREQ_TRANSITION_NOTIFIER); | 1196 | cpufreq_unregister_notifier(&priv->clk_nb, CPUFREQ_TRANSITION_NOTIFIER); |
1244 | #endif | ||
1245 | 1197 | ||
1246 | spin_lock_irqsave(&priv->lock, flags); | 1198 | spin_lock_irqsave(&priv->lock, flags); |
1247 | list_for_each_entry(p, &priv->ports, node) | 1199 | list_for_each_entry(p, &priv->ports, node) |
1248 | uart_remove_one_port(&sci_uart_driver, &p->port); | 1200 | uart_remove_one_port(&sci_uart_driver, &p->port); |
1249 | |||
1250 | spin_unlock_irqrestore(&priv->lock, flags); | 1201 | spin_unlock_irqrestore(&priv->lock, flags); |
1251 | 1202 | ||
1252 | kfree(priv); | 1203 | kfree(priv); |
@@ -1307,10 +1258,8 @@ static int __devinit sci_probe(struct platform_device *dev) | |||
1307 | spin_lock_init(&priv->lock); | 1258 | spin_lock_init(&priv->lock); |
1308 | platform_set_drvdata(dev, priv); | 1259 | platform_set_drvdata(dev, priv); |
1309 | 1260 | ||
1310 | #ifdef CONFIG_HAVE_CLK | ||
1311 | priv->clk_nb.notifier_call = sci_notifier; | 1261 | priv->clk_nb.notifier_call = sci_notifier; |
1312 | cpufreq_register_notifier(&priv->clk_nb, CPUFREQ_TRANSITION_NOTIFIER); | 1262 | cpufreq_register_notifier(&priv->clk_nb, CPUFREQ_TRANSITION_NOTIFIER); |
1313 | #endif | ||
1314 | 1263 | ||
1315 | if (dev->id != -1) { | 1264 | if (dev->id != -1) { |
1316 | ret = sci_probe_single(dev, dev->id, p, &sci_ports[dev->id]); | 1265 | ret = sci_probe_single(dev, dev->id, p, &sci_ports[dev->id]); |
@@ -1370,7 +1319,7 @@ static struct dev_pm_ops sci_dev_pm_ops = { | |||
1370 | 1319 | ||
1371 | static struct platform_driver sci_driver = { | 1320 | static struct platform_driver sci_driver = { |
1372 | .probe = sci_probe, | 1321 | .probe = sci_probe, |
1373 | .remove = __devexit_p(sci_remove), | 1322 | .remove = sci_remove, |
1374 | .driver = { | 1323 | .driver = { |
1375 | .name = "sh-sci", | 1324 | .name = "sh-sci", |
1376 | .owner = THIS_MODULE, | 1325 | .owner = THIS_MODULE, |
diff --git a/drivers/serial/sh-sci.h b/drivers/serial/sh-sci.h index 3e2fcf93b42..a32094eeb42 100644 --- a/drivers/serial/sh-sci.h +++ b/drivers/serial/sh-sci.h | |||
@@ -1,5 +1,5 @@ | |||
1 | #include <linux/serial_core.h> | 1 | #include <linux/serial_core.h> |
2 | #include <asm/io.h> | 2 | #include <linux/io.h> |
3 | #include <linux/gpio.h> | 3 | #include <linux/gpio.h> |
4 | 4 | ||
5 | #if defined(CONFIG_H83007) || defined(CONFIG_H83068) | 5 | #if defined(CONFIG_H83007) || defined(CONFIG_H83068) |
diff --git a/drivers/sh/Makefile b/drivers/sh/Makefile index 6a025cefe6d..4956bf1f213 100644 --- a/drivers/sh/Makefile +++ b/drivers/sh/Makefile | |||
@@ -3,4 +3,5 @@ | |||
3 | # | 3 | # |
4 | obj-$(CONFIG_SUPERHYWAY) += superhyway/ | 4 | obj-$(CONFIG_SUPERHYWAY) += superhyway/ |
5 | obj-$(CONFIG_MAPLE) += maple/ | 5 | obj-$(CONFIG_MAPLE) += maple/ |
6 | obj-$(CONFIG_GENERIC_GPIO) += pfc.o | ||
6 | obj-y += intc.o | 7 | obj-y += intc.o |
diff --git a/drivers/sh/intc.c b/drivers/sh/intc.c index 559b5fe9dc0..a7e5c2e9986 100644 --- a/drivers/sh/intc.c +++ b/drivers/sh/intc.c | |||
@@ -2,6 +2,7 @@ | |||
2 | * Shared interrupt handling code for IPR and INTC2 types of IRQs. | 2 | * Shared interrupt handling code for IPR and INTC2 types of IRQs. |
3 | * | 3 | * |
4 | * Copyright (C) 2007, 2008 Magnus Damm | 4 | * Copyright (C) 2007, 2008 Magnus Damm |
5 | * Copyright (C) 2009 Paul Mundt | ||
5 | * | 6 | * |
6 | * Based on intc2.c and ipr.c | 7 | * Based on intc2.c and ipr.c |
7 | * | 8 | * |
@@ -24,6 +25,7 @@ | |||
24 | #include <linux/sysdev.h> | 25 | #include <linux/sysdev.h> |
25 | #include <linux/list.h> | 26 | #include <linux/list.h> |
26 | #include <linux/topology.h> | 27 | #include <linux/topology.h> |
28 | #include <linux/bitmap.h> | ||
27 | 29 | ||
28 | #define _INTC_MK(fn, mode, addr_e, addr_d, width, shift) \ | 30 | #define _INTC_MK(fn, mode, addr_e, addr_d, width, shift) \ |
29 | ((shift) | ((width) << 5) | ((fn) << 9) | ((mode) << 13) | \ | 31 | ((shift) | ((width) << 5) | ((fn) << 9) | ((mode) << 13) | \ |
@@ -59,6 +61,20 @@ struct intc_desc_int { | |||
59 | 61 | ||
60 | static LIST_HEAD(intc_list); | 62 | static LIST_HEAD(intc_list); |
61 | 63 | ||
64 | /* | ||
65 | * The intc_irq_map provides a global map of bound IRQ vectors for a | ||
66 | * given platform. Allocation of IRQs are either static through the CPU | ||
67 | * vector map, or dynamic in the case of board mux vectors or MSI. | ||
68 | * | ||
69 | * As this is a central point for all IRQ controllers on the system, | ||
70 | * each of the available sources are mapped out here. This combined with | ||
71 | * sparseirq makes it quite trivial to keep the vector map tightly packed | ||
72 | * when dynamically creating IRQs, as well as tying in to otherwise | ||
73 | * unused irq_desc positions in the sparse array. | ||
74 | */ | ||
75 | static DECLARE_BITMAP(intc_irq_map, NR_IRQS); | ||
76 | static DEFINE_SPINLOCK(vector_lock); | ||
77 | |||
62 | #ifdef CONFIG_SMP | 78 | #ifdef CONFIG_SMP |
63 | #define IS_SMP(x) x.smp | 79 | #define IS_SMP(x) x.smp |
64 | #define INTC_REG(d, x, c) (d->reg[(x)] + ((d->smp[(x)] & 0xff) * c)) | 80 | #define INTC_REG(d, x, c) (d->reg[(x)] + ((d->smp[(x)] & 0xff) * c)) |
@@ -70,9 +86,7 @@ static LIST_HEAD(intc_list); | |||
70 | #endif | 86 | #endif |
71 | 87 | ||
72 | static unsigned int intc_prio_level[NR_IRQS]; /* for now */ | 88 | static unsigned int intc_prio_level[NR_IRQS]; /* for now */ |
73 | #if defined(CONFIG_CPU_SH3) || defined(CONFIG_CPU_SH4A) | ||
74 | static unsigned long ack_handle[NR_IRQS]; | 89 | static unsigned long ack_handle[NR_IRQS]; |
75 | #endif | ||
76 | 90 | ||
77 | static inline struct intc_desc_int *get_intc_desc(unsigned int irq) | 91 | static inline struct intc_desc_int *get_intc_desc(unsigned int irq) |
78 | { | 92 | { |
@@ -250,7 +264,6 @@ static int intc_set_wake(unsigned int irq, unsigned int on) | |||
250 | return 0; /* allow wakeup, but setup hardware in intc_suspend() */ | 264 | return 0; /* allow wakeup, but setup hardware in intc_suspend() */ |
251 | } | 265 | } |
252 | 266 | ||
253 | #if defined(CONFIG_CPU_SH3) || defined(CONFIG_CPU_SH4A) | ||
254 | static void intc_mask_ack(unsigned int irq) | 267 | static void intc_mask_ack(unsigned int irq) |
255 | { | 268 | { |
256 | struct intc_desc_int *d = get_intc_desc(irq); | 269 | struct intc_desc_int *d = get_intc_desc(irq); |
@@ -282,7 +295,6 @@ static void intc_mask_ack(unsigned int irq) | |||
282 | } | 295 | } |
283 | } | 296 | } |
284 | } | 297 | } |
285 | #endif | ||
286 | 298 | ||
287 | static struct intc_handle_int *intc_find_irq(struct intc_handle_int *hp, | 299 | static struct intc_handle_int *intc_find_irq(struct intc_handle_int *hp, |
288 | unsigned int nr_hp, | 300 | unsigned int nr_hp, |
@@ -501,7 +513,6 @@ static unsigned int __init intc_prio_data(struct intc_desc *desc, | |||
501 | return 0; | 513 | return 0; |
502 | } | 514 | } |
503 | 515 | ||
504 | #if defined(CONFIG_CPU_SH3) || defined(CONFIG_CPU_SH4A) | ||
505 | static unsigned int __init intc_ack_data(struct intc_desc *desc, | 516 | static unsigned int __init intc_ack_data(struct intc_desc *desc, |
506 | struct intc_desc_int *d, | 517 | struct intc_desc_int *d, |
507 | intc_enum enum_id) | 518 | intc_enum enum_id) |
@@ -533,7 +544,6 @@ static unsigned int __init intc_ack_data(struct intc_desc *desc, | |||
533 | 544 | ||
534 | return 0; | 545 | return 0; |
535 | } | 546 | } |
536 | #endif | ||
537 | 547 | ||
538 | static unsigned int __init intc_sense_data(struct intc_desc *desc, | 548 | static unsigned int __init intc_sense_data(struct intc_desc *desc, |
539 | struct intc_desc_int *d, | 549 | struct intc_desc_int *d, |
@@ -572,6 +582,11 @@ static void __init intc_register_irq(struct intc_desc *desc, | |||
572 | struct intc_handle_int *hp; | 582 | struct intc_handle_int *hp; |
573 | unsigned int data[2], primary; | 583 | unsigned int data[2], primary; |
574 | 584 | ||
585 | /* | ||
586 | * Register the IRQ position with the global IRQ map | ||
587 | */ | ||
588 | set_bit(irq, intc_irq_map); | ||
589 | |||
575 | /* Prefer single interrupt source bitmap over other combinations: | 590 | /* Prefer single interrupt source bitmap over other combinations: |
576 | * 1. bitmap, single interrupt source | 591 | * 1. bitmap, single interrupt source |
577 | * 2. priority, single interrupt source | 592 | * 2. priority, single interrupt source |
@@ -641,10 +656,8 @@ static void __init intc_register_irq(struct intc_desc *desc, | |||
641 | /* irq should be disabled by default */ | 656 | /* irq should be disabled by default */ |
642 | d->chip.mask(irq); | 657 | d->chip.mask(irq); |
643 | 658 | ||
644 | #if defined(CONFIG_CPU_SH3) || defined(CONFIG_CPU_SH4A) | ||
645 | if (desc->ack_regs) | 659 | if (desc->ack_regs) |
646 | ack_handle[irq] = intc_ack_data(desc, d, enum_id); | 660 | ack_handle[irq] = intc_ack_data(desc, d, enum_id); |
647 | #endif | ||
648 | } | 661 | } |
649 | 662 | ||
650 | static unsigned int __init save_reg(struct intc_desc_int *d, | 663 | static unsigned int __init save_reg(struct intc_desc_int *d, |
@@ -681,10 +694,8 @@ void __init register_intc_controller(struct intc_desc *desc) | |||
681 | d->nr_reg = desc->mask_regs ? desc->nr_mask_regs * 2 : 0; | 694 | d->nr_reg = desc->mask_regs ? desc->nr_mask_regs * 2 : 0; |
682 | d->nr_reg += desc->prio_regs ? desc->nr_prio_regs * 2 : 0; | 695 | d->nr_reg += desc->prio_regs ? desc->nr_prio_regs * 2 : 0; |
683 | d->nr_reg += desc->sense_regs ? desc->nr_sense_regs : 0; | 696 | d->nr_reg += desc->sense_regs ? desc->nr_sense_regs : 0; |
684 | |||
685 | #if defined(CONFIG_CPU_SH3) || defined(CONFIG_CPU_SH4A) | ||
686 | d->nr_reg += desc->ack_regs ? desc->nr_ack_regs : 0; | 697 | d->nr_reg += desc->ack_regs ? desc->nr_ack_regs : 0; |
687 | #endif | 698 | |
688 | d->reg = kzalloc(d->nr_reg * sizeof(*d->reg), GFP_NOWAIT); | 699 | d->reg = kzalloc(d->nr_reg * sizeof(*d->reg), GFP_NOWAIT); |
689 | #ifdef CONFIG_SMP | 700 | #ifdef CONFIG_SMP |
690 | d->smp = kzalloc(d->nr_reg * sizeof(*d->smp), GFP_NOWAIT); | 701 | d->smp = kzalloc(d->nr_reg * sizeof(*d->smp), GFP_NOWAIT); |
@@ -727,14 +738,12 @@ void __init register_intc_controller(struct intc_desc *desc) | |||
727 | d->chip.set_type = intc_set_sense; | 738 | d->chip.set_type = intc_set_sense; |
728 | d->chip.set_wake = intc_set_wake; | 739 | d->chip.set_wake = intc_set_wake; |
729 | 740 | ||
730 | #if defined(CONFIG_CPU_SH3) || defined(CONFIG_CPU_SH4A) | ||
731 | if (desc->ack_regs) { | 741 | if (desc->ack_regs) { |
732 | for (i = 0; i < desc->nr_ack_regs; i++) | 742 | for (i = 0; i < desc->nr_ack_regs; i++) |
733 | k += save_reg(d, k, desc->ack_regs[i].set_reg, 0); | 743 | k += save_reg(d, k, desc->ack_regs[i].set_reg, 0); |
734 | 744 | ||
735 | d->chip.mask_ack = intc_mask_ack; | 745 | d->chip.mask_ack = intc_mask_ack; |
736 | } | 746 | } |
737 | #endif | ||
738 | 747 | ||
739 | BUG_ON(k > 256); /* _INTC_ADDR_E() and _INTC_ADDR_D() are 8 bits */ | 748 | BUG_ON(k > 256); /* _INTC_ADDR_E() and _INTC_ADDR_D() are 8 bits */ |
740 | 749 | ||
@@ -856,5 +865,91 @@ static int __init register_intc_sysdevs(void) | |||
856 | 865 | ||
857 | return error; | 866 | return error; |
858 | } | 867 | } |
859 | |||
860 | device_initcall(register_intc_sysdevs); | 868 | device_initcall(register_intc_sysdevs); |
869 | |||
870 | /* | ||
871 | * Dynamic IRQ allocation and deallocation | ||
872 | */ | ||
873 | static unsigned int create_irq_on_node(unsigned int irq_want, int node) | ||
874 | { | ||
875 | unsigned int irq = 0, new; | ||
876 | unsigned long flags; | ||
877 | struct irq_desc *desc; | ||
878 | |||
879 | spin_lock_irqsave(&vector_lock, flags); | ||
880 | |||
881 | /* | ||
882 | * First try the wanted IRQ, then scan. | ||
883 | */ | ||
884 | if (test_and_set_bit(irq_want, intc_irq_map)) { | ||
885 | new = find_first_zero_bit(intc_irq_map, nr_irqs); | ||
886 | if (unlikely(new == nr_irqs)) | ||
887 | goto out_unlock; | ||
888 | |||
889 | desc = irq_to_desc_alloc_node(new, node); | ||
890 | if (unlikely(!desc)) { | ||
891 | pr_info("can't get irq_desc for %d\n", new); | ||
892 | goto out_unlock; | ||
893 | } | ||
894 | |||
895 | desc = move_irq_desc(desc, node); | ||
896 | __set_bit(new, intc_irq_map); | ||
897 | irq = new; | ||
898 | } | ||
899 | |||
900 | out_unlock: | ||
901 | spin_unlock_irqrestore(&vector_lock, flags); | ||
902 | |||
903 | if (irq > 0) | ||
904 | dynamic_irq_init(irq); | ||
905 | |||
906 | return irq; | ||
907 | } | ||
908 | |||
909 | int create_irq(void) | ||
910 | { | ||
911 | int nid = cpu_to_node(smp_processor_id()); | ||
912 | int irq; | ||
913 | |||
914 | irq = create_irq_on_node(NR_IRQS_LEGACY, nid); | ||
915 | if (irq == 0) | ||
916 | irq = -1; | ||
917 | |||
918 | return irq; | ||
919 | } | ||
920 | |||
921 | void destroy_irq(unsigned int irq) | ||
922 | { | ||
923 | unsigned long flags; | ||
924 | |||
925 | dynamic_irq_cleanup(irq); | ||
926 | |||
927 | spin_lock_irqsave(&vector_lock, flags); | ||
928 | __clear_bit(irq, intc_irq_map); | ||
929 | spin_unlock_irqrestore(&vector_lock, flags); | ||
930 | } | ||
931 | |||
932 | int reserve_irq_vector(unsigned int irq) | ||
933 | { | ||
934 | unsigned long flags; | ||
935 | int ret = 0; | ||
936 | |||
937 | spin_lock_irqsave(&vector_lock, flags); | ||
938 | if (test_and_set_bit(irq, intc_irq_map)) | ||
939 | ret = -EBUSY; | ||
940 | spin_unlock_irqrestore(&vector_lock, flags); | ||
941 | |||
942 | return ret; | ||
943 | } | ||
944 | |||
945 | void reserve_irq_legacy(void) | ||
946 | { | ||
947 | unsigned long flags; | ||
948 | int i, j; | ||
949 | |||
950 | spin_lock_irqsave(&vector_lock, flags); | ||
951 | j = find_first_bit(intc_irq_map, nr_irqs); | ||
952 | for (i = 0; i < j; i++) | ||
953 | __set_bit(i, intc_irq_map); | ||
954 | spin_unlock_irqrestore(&vector_lock, flags); | ||
955 | } | ||
diff --git a/drivers/sh/maple/maple.c b/drivers/sh/maple/maple.c index 93c20e135ee..4e8f57d4131 100644 --- a/drivers/sh/maple/maple.c +++ b/drivers/sh/maple/maple.c | |||
@@ -106,7 +106,7 @@ static void maple_dma_reset(void) | |||
106 | * max delay is 11 | 106 | * max delay is 11 |
107 | */ | 107 | */ |
108 | ctrl_outl(MAPLE_2MBPS | MAPLE_TIMEOUT(0xFFFF), MAPLE_SPEED); | 108 | ctrl_outl(MAPLE_2MBPS | MAPLE_TIMEOUT(0xFFFF), MAPLE_SPEED); |
109 | ctrl_outl(PHYSADDR(maple_sendbuf), MAPLE_DMAADDR); | 109 | ctrl_outl(virt_to_phys(maple_sendbuf), MAPLE_DMAADDR); |
110 | ctrl_outl(1, MAPLE_ENABLE); | 110 | ctrl_outl(1, MAPLE_ENABLE); |
111 | } | 111 | } |
112 | 112 | ||
@@ -258,7 +258,7 @@ static void maple_build_block(struct mapleq *mq) | |||
258 | maple_lastptr = maple_sendptr; | 258 | maple_lastptr = maple_sendptr; |
259 | 259 | ||
260 | *maple_sendptr++ = (port << 16) | len | 0x80000000; | 260 | *maple_sendptr++ = (port << 16) | len | 0x80000000; |
261 | *maple_sendptr++ = PHYSADDR(mq->recvbuf->buf); | 261 | *maple_sendptr++ = virt_to_phys(mq->recvbuf->buf); |
262 | *maple_sendptr++ = | 262 | *maple_sendptr++ = |
263 | mq->command | (to << 8) | (from << 16) | (len << 24); | 263 | mq->command | (to << 8) | (from << 16) | (len << 24); |
264 | while (len-- > 0) | 264 | while (len-- > 0) |
diff --git a/arch/sh/kernel/gpio.c b/drivers/sh/pfc.c index d22e5af699f..841ed5030c8 100644 --- a/arch/sh/kernel/gpio.c +++ b/drivers/sh/pfc.c | |||
@@ -7,7 +7,6 @@ | |||
7 | * License. See the file "COPYING" in the main directory of this archive | 7 | * License. See the file "COPYING" in the main directory of this archive |
8 | * for more details. | 8 | * for more details. |
9 | */ | 9 | */ |
10 | |||
11 | #include <linux/errno.h> | 10 | #include <linux/errno.h> |
12 | #include <linux/kernel.h> | 11 | #include <linux/kernel.h> |
13 | #include <linux/list.h> | 12 | #include <linux/list.h> |
@@ -35,11 +34,11 @@ static unsigned long gpio_read_raw_reg(unsigned long reg, | |||
35 | { | 34 | { |
36 | switch (reg_width) { | 35 | switch (reg_width) { |
37 | case 8: | 36 | case 8: |
38 | return ctrl_inb(reg); | 37 | return __raw_readb(reg); |
39 | case 16: | 38 | case 16: |
40 | return ctrl_inw(reg); | 39 | return __raw_readw(reg); |
41 | case 32: | 40 | case 32: |
42 | return ctrl_inl(reg); | 41 | return __raw_readl(reg); |
43 | } | 42 | } |
44 | 43 | ||
45 | BUG(); | 44 | BUG(); |
@@ -52,13 +51,13 @@ static void gpio_write_raw_reg(unsigned long reg, | |||
52 | { | 51 | { |
53 | switch (reg_width) { | 52 | switch (reg_width) { |
54 | case 8: | 53 | case 8: |
55 | ctrl_outb(data, reg); | 54 | __raw_writeb(data, reg); |
56 | return; | 55 | return; |
57 | case 16: | 56 | case 16: |
58 | ctrl_outw(data, reg); | 57 | __raw_writew(data, reg); |
59 | return; | 58 | return; |
60 | case 32: | 59 | case 32: |
61 | ctrl_outl(data, reg); | 60 | __raw_writel(data, reg); |
62 | return; | 61 | return; |
63 | } | 62 | } |
64 | 63 | ||
@@ -72,11 +71,9 @@ static void gpio_write_bit(struct pinmux_data_reg *dr, | |||
72 | 71 | ||
73 | pos = dr->reg_width - (in_pos + 1); | 72 | pos = dr->reg_width - (in_pos + 1); |
74 | 73 | ||
75 | #ifdef DEBUG | 74 | pr_debug("write_bit addr = %lx, value = %ld, pos = %ld, " |
76 | pr_info("write_bit addr = %lx, value = %ld, pos = %ld, " | 75 | "r_width = %ld\n", |
77 | "r_width = %ld\n", | 76 | dr->reg, !!value, pos, dr->reg_width); |
78 | dr->reg, !!value, pos, dr->reg_width); | ||
79 | #endif | ||
80 | 77 | ||
81 | if (value) | 78 | if (value) |
82 | set_bit(pos, &dr->reg_shadow); | 79 | set_bit(pos, &dr->reg_shadow); |
@@ -95,11 +92,9 @@ static int gpio_read_reg(unsigned long reg, unsigned long reg_width, | |||
95 | mask = (1 << field_width) - 1; | 92 | mask = (1 << field_width) - 1; |
96 | pos = reg_width - ((in_pos + 1) * field_width); | 93 | pos = reg_width - ((in_pos + 1) * field_width); |
97 | 94 | ||
98 | #ifdef DEBUG | 95 | pr_debug("read_reg: addr = %lx, pos = %ld, " |
99 | pr_info("read_reg: addr = %lx, pos = %ld, " | 96 | "r_width = %ld, f_width = %ld\n", |
100 | "r_width = %ld, f_width = %ld\n", | 97 | reg, pos, reg_width, field_width); |
101 | reg, pos, reg_width, field_width); | ||
102 | #endif | ||
103 | 98 | ||
104 | data = gpio_read_raw_reg(reg, reg_width); | 99 | data = gpio_read_raw_reg(reg, reg_width); |
105 | return (data >> pos) & mask; | 100 | return (data >> pos) & mask; |
@@ -114,24 +109,22 @@ static void gpio_write_reg(unsigned long reg, unsigned long reg_width, | |||
114 | mask = (1 << field_width) - 1; | 109 | mask = (1 << field_width) - 1; |
115 | pos = reg_width - ((in_pos + 1) * field_width); | 110 | pos = reg_width - ((in_pos + 1) * field_width); |
116 | 111 | ||
117 | #ifdef DEBUG | 112 | pr_debug("write_reg addr = %lx, value = %ld, pos = %ld, " |
118 | pr_info("write_reg addr = %lx, value = %ld, pos = %ld, " | 113 | "r_width = %ld, f_width = %ld\n", |
119 | "r_width = %ld, f_width = %ld\n", | 114 | reg, value, pos, reg_width, field_width); |
120 | reg, value, pos, reg_width, field_width); | ||
121 | #endif | ||
122 | 115 | ||
123 | mask = ~(mask << pos); | 116 | mask = ~(mask << pos); |
124 | value = value << pos; | 117 | value = value << pos; |
125 | 118 | ||
126 | switch (reg_width) { | 119 | switch (reg_width) { |
127 | case 8: | 120 | case 8: |
128 | ctrl_outb((ctrl_inb(reg) & mask) | value, reg); | 121 | __raw_writeb((__raw_readb(reg) & mask) | value, reg); |
129 | break; | 122 | break; |
130 | case 16: | 123 | case 16: |
131 | ctrl_outw((ctrl_inw(reg) & mask) | value, reg); | 124 | __raw_writew((__raw_readw(reg) & mask) | value, reg); |
132 | break; | 125 | break; |
133 | case 32: | 126 | case 32: |
134 | ctrl_outl((ctrl_inl(reg) & mask) | value, reg); | 127 | __raw_writel((__raw_readl(reg) & mask) | value, reg); |
135 | break; | 128 | break; |
136 | } | 129 | } |
137 | } | 130 | } |
diff --git a/drivers/video/sh_mobile_lcdcfb.c b/drivers/video/sh_mobile_lcdcfb.c index 3ad5157f989..b4b5de930cf 100644 --- a/drivers/video/sh_mobile_lcdcfb.c +++ b/drivers/video/sh_mobile_lcdcfb.c | |||
@@ -281,18 +281,34 @@ static void sh_mobile_lcdc_deferred_io(struct fb_info *info, | |||
281 | struct list_head *pagelist) | 281 | struct list_head *pagelist) |
282 | { | 282 | { |
283 | struct sh_mobile_lcdc_chan *ch = info->par; | 283 | struct sh_mobile_lcdc_chan *ch = info->par; |
284 | unsigned int nr_pages; | ||
285 | 284 | ||
286 | /* enable clocks before accessing hardware */ | 285 | /* enable clocks before accessing hardware */ |
287 | sh_mobile_lcdc_clk_on(ch->lcdc); | 286 | sh_mobile_lcdc_clk_on(ch->lcdc); |
288 | 287 | ||
289 | nr_pages = sh_mobile_lcdc_sginit(info, pagelist); | 288 | /* |
290 | dma_map_sg(info->dev, ch->sglist, nr_pages, DMA_TO_DEVICE); | 289 | * It's possible to get here without anything on the pagelist via |
291 | 290 | * sh_mobile_lcdc_deferred_io_touch() or via a userspace fsync() | |
292 | /* trigger panel update */ | 291 | * invocation. In the former case, the acceleration routines are |
293 | lcdc_write_chan(ch, LDSM2R, 1); | 292 | * stepped in to when using the framebuffer console causing the |
294 | 293 | * workqueue to be scheduled without any dirty pages on the list. | |
295 | dma_unmap_sg(info->dev, ch->sglist, nr_pages, DMA_TO_DEVICE); | 294 | * |
295 | * Despite this, a panel update is still needed given that the | ||
296 | * acceleration routines have their own methods for writing in | ||
297 | * that still need to be updated. | ||
298 | * | ||
299 | * The fsync() and empty pagelist case could be optimized for, | ||
300 | * but we don't bother, as any application exhibiting such | ||
301 | * behaviour is fundamentally broken anyways. | ||
302 | */ | ||
303 | if (!list_empty(pagelist)) { | ||
304 | unsigned int nr_pages = sh_mobile_lcdc_sginit(info, pagelist); | ||
305 | |||
306 | /* trigger panel update */ | ||
307 | dma_map_sg(info->dev, ch->sglist, nr_pages, DMA_TO_DEVICE); | ||
308 | lcdc_write_chan(ch, LDSM2R, 1); | ||
309 | dma_unmap_sg(info->dev, ch->sglist, nr_pages, DMA_TO_DEVICE); | ||
310 | } else | ||
311 | lcdc_write_chan(ch, LDSM2R, 1); | ||
296 | } | 312 | } |
297 | 313 | ||
298 | static void sh_mobile_lcdc_deferred_io_touch(struct fb_info *info) | 314 | static void sh_mobile_lcdc_deferred_io_touch(struct fb_info *info) |
diff --git a/arch/sh/include/asm/sh_keysc.h b/include/linux/input/sh_keysc.h index 4a65b1e40ea..c211b5cf08e 100644 --- a/arch/sh/include/asm/sh_keysc.h +++ b/include/linux/input/sh_keysc.h | |||
@@ -1,5 +1,5 @@ | |||
1 | #ifndef __ASM_KEYSC_H__ | 1 | #ifndef __SH_KEYSC_H__ |
2 | #define __ASM_KEYSC_H__ | 2 | #define __SH_KEYSC_H__ |
3 | 3 | ||
4 | #define SH_KEYSC_MAXKEYS 30 | 4 | #define SH_KEYSC_MAXKEYS 30 |
5 | 5 | ||
@@ -11,4 +11,4 @@ struct sh_keysc_info { | |||
11 | int keycodes[SH_KEYSC_MAXKEYS]; | 11 | int keycodes[SH_KEYSC_MAXKEYS]; |
12 | }; | 12 | }; |
13 | 13 | ||
14 | #endif /* __ASM_KEYSC_H__ */ | 14 | #endif /* __SH_KEYSC_H__ */ |
diff --git a/include/linux/mfd/sh_mobile_sdhi.h b/include/linux/mfd/sh_mobile_sdhi.h new file mode 100644 index 00000000000..3bcd7163485 --- /dev/null +++ b/include/linux/mfd/sh_mobile_sdhi.h | |||
@@ -0,0 +1,8 @@ | |||
1 | #ifndef __SH_MOBILE_SDHI_H__ | ||
2 | #define __SH_MOBILE_SDHI_H__ | ||
3 | |||
4 | struct sh_mobile_sdhi_info { | ||
5 | void (*set_pwr)(struct platform_device *pdev, int state); | ||
6 | }; | ||
7 | |||
8 | #endif /* __SH_MOBILE_SDHI_H__ */ | ||
diff --git a/include/linux/sh_intc.h b/include/linux/sh_intc.h index 68e212ff9dd..4ef246f1465 100644 --- a/include/linux/sh_intc.h +++ b/include/linux/sh_intc.h | |||
@@ -57,10 +57,8 @@ struct intc_desc { | |||
57 | struct intc_sense_reg *sense_regs; | 57 | struct intc_sense_reg *sense_regs; |
58 | unsigned int nr_sense_regs; | 58 | unsigned int nr_sense_regs; |
59 | char *name; | 59 | char *name; |
60 | #if defined(CONFIG_CPU_SH3) || defined(CONFIG_CPU_SH4A) | ||
61 | struct intc_mask_reg *ack_regs; | 60 | struct intc_mask_reg *ack_regs; |
62 | unsigned int nr_ack_regs; | 61 | unsigned int nr_ack_regs; |
63 | #endif | ||
64 | }; | 62 | }; |
65 | 63 | ||
66 | #define _INTC_ARRAY(a) a, sizeof(a)/sizeof(*a) | 64 | #define _INTC_ARRAY(a) a, sizeof(a)/sizeof(*a) |
@@ -73,7 +71,6 @@ struct intc_desc symbol __initdata = { \ | |||
73 | chipname, \ | 71 | chipname, \ |
74 | } | 72 | } |
75 | 73 | ||
76 | #if defined(CONFIG_CPU_SH3) || defined(CONFIG_CPU_SH4A) | ||
77 | #define DECLARE_INTC_DESC_ACK(symbol, chipname, vectors, groups, \ | 74 | #define DECLARE_INTC_DESC_ACK(symbol, chipname, vectors, groups, \ |
78 | mask_regs, prio_regs, sense_regs, ack_regs) \ | 75 | mask_regs, prio_regs, sense_regs, ack_regs) \ |
79 | struct intc_desc symbol __initdata = { \ | 76 | struct intc_desc symbol __initdata = { \ |
@@ -83,9 +80,11 @@ struct intc_desc symbol __initdata = { \ | |||
83 | chipname, \ | 80 | chipname, \ |
84 | _INTC_ARRAY(ack_regs), \ | 81 | _INTC_ARRAY(ack_regs), \ |
85 | } | 82 | } |
86 | #endif | ||
87 | 83 | ||
88 | void __init register_intc_controller(struct intc_desc *desc); | 84 | void __init register_intc_controller(struct intc_desc *desc); |
89 | int intc_set_priority(unsigned int irq, unsigned int prio); | 85 | int intc_set_priority(unsigned int irq, unsigned int prio); |
90 | 86 | ||
87 | int reserve_irq_vector(unsigned int irq); | ||
88 | void reserve_irq_legacy(void); | ||
89 | |||
91 | #endif /* __SH_INTC_H */ | 90 | #endif /* __SH_INTC_H */ |
diff --git a/include/linux/sh_pfc.h b/include/linux/sh_pfc.h new file mode 100644 index 00000000000..07c08af9f8f --- /dev/null +++ b/include/linux/sh_pfc.h | |||
@@ -0,0 +1,96 @@ | |||
1 | /* | ||
2 | * SuperH Pin Function Controller Support | ||
3 | * | ||
4 | * Copyright (c) 2008 Magnus Damm | ||
5 | * | ||
6 | * This file is subject to the terms and conditions of the GNU General Public | ||
7 | * License. See the file "COPYING" in the main directory of this archive | ||
8 | * for more details. | ||
9 | */ | ||
10 | |||
11 | #ifndef __SH_PFC_H | ||
12 | #define __SH_PFC_H | ||
13 | |||
14 | #include <asm-generic/gpio.h> | ||
15 | |||
16 | typedef unsigned short pinmux_enum_t; | ||
17 | typedef unsigned short pinmux_flag_t; | ||
18 | |||
19 | #define PINMUX_TYPE_NONE 0 | ||
20 | #define PINMUX_TYPE_FUNCTION 1 | ||
21 | #define PINMUX_TYPE_GPIO 2 | ||
22 | #define PINMUX_TYPE_OUTPUT 3 | ||
23 | #define PINMUX_TYPE_INPUT 4 | ||
24 | #define PINMUX_TYPE_INPUT_PULLUP 5 | ||
25 | #define PINMUX_TYPE_INPUT_PULLDOWN 6 | ||
26 | |||
27 | #define PINMUX_FLAG_TYPE (0x7) | ||
28 | #define PINMUX_FLAG_WANT_PULLUP (1 << 3) | ||
29 | #define PINMUX_FLAG_WANT_PULLDOWN (1 << 4) | ||
30 | |||
31 | #define PINMUX_FLAG_DBIT_SHIFT 5 | ||
32 | #define PINMUX_FLAG_DBIT (0x1f << PINMUX_FLAG_DBIT_SHIFT) | ||
33 | #define PINMUX_FLAG_DREG_SHIFT 10 | ||
34 | #define PINMUX_FLAG_DREG (0x3f << PINMUX_FLAG_DREG_SHIFT) | ||
35 | |||
36 | struct pinmux_gpio { | ||
37 | pinmux_enum_t enum_id; | ||
38 | pinmux_flag_t flags; | ||
39 | }; | ||
40 | |||
41 | #define PINMUX_GPIO(gpio, data_or_mark) [gpio] = { data_or_mark } | ||
42 | #define PINMUX_DATA(data_or_mark, ids...) data_or_mark, ids, 0 | ||
43 | |||
44 | struct pinmux_cfg_reg { | ||
45 | unsigned long reg, reg_width, field_width; | ||
46 | unsigned long *cnt; | ||
47 | pinmux_enum_t *enum_ids; | ||
48 | }; | ||
49 | |||
50 | #define PINMUX_CFG_REG(name, r, r_width, f_width) \ | ||
51 | .reg = r, .reg_width = r_width, .field_width = f_width, \ | ||
52 | .cnt = (unsigned long [r_width / f_width]) {}, \ | ||
53 | .enum_ids = (pinmux_enum_t [(r_width / f_width) * (1 << f_width)]) \ | ||
54 | |||
55 | struct pinmux_data_reg { | ||
56 | unsigned long reg, reg_width, reg_shadow; | ||
57 | pinmux_enum_t *enum_ids; | ||
58 | }; | ||
59 | |||
60 | #define PINMUX_DATA_REG(name, r, r_width) \ | ||
61 | .reg = r, .reg_width = r_width, \ | ||
62 | .enum_ids = (pinmux_enum_t [r_width]) \ | ||
63 | |||
64 | struct pinmux_range { | ||
65 | pinmux_enum_t begin; | ||
66 | pinmux_enum_t end; | ||
67 | pinmux_enum_t force; | ||
68 | }; | ||
69 | |||
70 | struct pinmux_info { | ||
71 | char *name; | ||
72 | pinmux_enum_t reserved_id; | ||
73 | struct pinmux_range data; | ||
74 | struct pinmux_range input; | ||
75 | struct pinmux_range input_pd; | ||
76 | struct pinmux_range input_pu; | ||
77 | struct pinmux_range output; | ||
78 | struct pinmux_range mark; | ||
79 | struct pinmux_range function; | ||
80 | |||
81 | unsigned first_gpio, last_gpio; | ||
82 | |||
83 | struct pinmux_gpio *gpios; | ||
84 | struct pinmux_cfg_reg *cfg_regs; | ||
85 | struct pinmux_data_reg *data_regs; | ||
86 | |||
87 | pinmux_enum_t *gpio_data; | ||
88 | unsigned int gpio_data_size; | ||
89 | |||
90 | unsigned long *gpio_in_use; | ||
91 | struct gpio_chip chip; | ||
92 | }; | ||
93 | |||
94 | int register_pinmux(struct pinmux_info *pip); | ||
95 | |||
96 | #endif /* __SH_PFC_H */ | ||