diff options
69 files changed, 1418 insertions, 806 deletions
diff --git a/MAINTAINERS b/MAINTAINERS index 4145511cea5a..796dd54add3a 100644 --- a/MAINTAINERS +++ b/MAINTAINERS | |||
@@ -9252,6 +9252,16 @@ W: http://www.st.com/spear | |||
9252 | S: Maintained | 9252 | S: Maintained |
9253 | F: drivers/pinctrl/spear/ | 9253 | F: drivers/pinctrl/spear/ |
9254 | 9254 | ||
9255 | PISTACHIO SOC SUPPORT | ||
9256 | M: James Hartley <james.hartley@imgtec.com> | ||
9257 | M: Ionela Voinescu <ionela.voinescu@imgtec.com> | ||
9258 | L: linux-mips@linux-mips.org | ||
9259 | S: Maintained | ||
9260 | F: arch/mips/pistachio/ | ||
9261 | F: arch/mips/include/asm/mach-pistachio/ | ||
9262 | F: arch/mips/boot/dts/pistachio/ | ||
9263 | F: arch/mips/configs/pistachio*_defconfig | ||
9264 | |||
9255 | PKTCDVD DRIVER | 9265 | PKTCDVD DRIVER |
9256 | M: Jiri Kosina <jikos@kernel.org> | 9266 | M: Jiri Kosina <jikos@kernel.org> |
9257 | S: Maintained | 9267 | S: Maintained |
diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig index 29867139851e..26388562e300 100644 --- a/arch/mips/Kconfig +++ b/arch/mips/Kconfig | |||
@@ -64,6 +64,7 @@ config MIPS | |||
64 | select GENERIC_TIME_VSYSCALL | 64 | select GENERIC_TIME_VSYSCALL |
65 | select ARCH_CLOCKSOURCE_DATA | 65 | select ARCH_CLOCKSOURCE_DATA |
66 | select HANDLE_DOMAIN_IRQ | 66 | select HANDLE_DOMAIN_IRQ |
67 | select HAVE_EXIT_THREAD | ||
67 | 68 | ||
68 | menu "Machine selection" | 69 | menu "Machine selection" |
69 | 70 | ||
@@ -384,7 +385,7 @@ config MACH_PISTACHIO | |||
384 | select CLKSRC_MIPS_GIC | 385 | select CLKSRC_MIPS_GIC |
385 | select COMMON_CLK | 386 | select COMMON_CLK |
386 | select CSRC_R4K | 387 | select CSRC_R4K |
387 | select DMA_MAYBE_COHERENT | 388 | select DMA_NONCOHERENT |
388 | select GPIOLIB | 389 | select GPIOLIB |
389 | select IRQ_MIPS_CPU | 390 | select IRQ_MIPS_CPU |
390 | select LIBFDT | 391 | select LIBFDT |
@@ -880,7 +881,6 @@ config CAVIUM_OCTEON_SOC | |||
880 | select SYS_SUPPORTS_HOTPLUG_CPU if CPU_BIG_ENDIAN | 881 | select SYS_SUPPORTS_HOTPLUG_CPU if CPU_BIG_ENDIAN |
881 | select SYS_HAS_EARLY_PRINTK | 882 | select SYS_HAS_EARLY_PRINTK |
882 | select SYS_HAS_CPU_CAVIUM_OCTEON | 883 | select SYS_HAS_CPU_CAVIUM_OCTEON |
883 | select SWAP_IO_SPACE | ||
884 | select HW_HAS_PCI | 884 | select HW_HAS_PCI |
885 | select ZONE_DMA32 | 885 | select ZONE_DMA32 |
886 | select HOLES_IN_ZONE | 886 | select HOLES_IN_ZONE |
@@ -1111,16 +1111,6 @@ config NEED_DMA_MAP_STATE | |||
1111 | config SYS_HAS_EARLY_PRINTK | 1111 | config SYS_HAS_EARLY_PRINTK |
1112 | bool | 1112 | bool |
1113 | 1113 | ||
1114 | config HOTPLUG_CPU | ||
1115 | bool "Support for hot-pluggable CPUs" | ||
1116 | depends on SMP && SYS_SUPPORTS_HOTPLUG_CPU | ||
1117 | help | ||
1118 | Say Y here to allow turning CPUs off and on. CPUs can be | ||
1119 | controlled through /sys/devices/system/cpu. | ||
1120 | (Note: power management support will enable this option | ||
1121 | automatically on SMP systems. ) | ||
1122 | Say N if you want to disable CPU hotplug. | ||
1123 | |||
1124 | config SYS_SUPPORTS_HOTPLUG_CPU | 1114 | config SYS_SUPPORTS_HOTPLUG_CPU |
1125 | bool | 1115 | bool |
1126 | 1116 | ||
@@ -1406,7 +1396,6 @@ config CPU_LOONGSON1B | |||
1406 | bool "Loongson 1B" | 1396 | bool "Loongson 1B" |
1407 | depends on SYS_HAS_CPU_LOONGSON1B | 1397 | depends on SYS_HAS_CPU_LOONGSON1B |
1408 | select CPU_LOONGSON1 | 1398 | select CPU_LOONGSON1 |
1409 | select ARCH_WANT_OPTIONAL_GPIOLIB | ||
1410 | select LEDS_GPIO_REGISTER | 1399 | select LEDS_GPIO_REGISTER |
1411 | help | 1400 | help |
1412 | The Loongson 1B is a 32-bit SoC, which implements the MIPS32 | 1401 | The Loongson 1B is a 32-bit SoC, which implements the MIPS32 |
@@ -2636,6 +2625,16 @@ config SMP | |||
2636 | 2625 | ||
2637 | If you don't know what to do here, say N. | 2626 | If you don't know what to do here, say N. |
2638 | 2627 | ||
2628 | config HOTPLUG_CPU | ||
2629 | bool "Support for hot-pluggable CPUs" | ||
2630 | depends on SMP && SYS_SUPPORTS_HOTPLUG_CPU | ||
2631 | help | ||
2632 | Say Y here to allow turning CPUs off and on. CPUs can be | ||
2633 | controlled through /sys/devices/system/cpu. | ||
2634 | (Note: power management support will enable this option | ||
2635 | automatically on SMP systems. ) | ||
2636 | Say N if you want to disable CPU hotplug. | ||
2637 | |||
2639 | config SMP_UP | 2638 | config SMP_UP |
2640 | bool | 2639 | bool |
2641 | 2640 | ||
@@ -2887,10 +2886,10 @@ choice | |||
2887 | the documented boot protocol using a device tree. | 2886 | the documented boot protocol using a device tree. |
2888 | 2887 | ||
2889 | config MIPS_RAW_APPENDED_DTB | 2888 | config MIPS_RAW_APPENDED_DTB |
2890 | bool "vmlinux.bin" | 2889 | bool "vmlinux.bin or vmlinuz.bin" |
2891 | help | 2890 | help |
2892 | With this option, the boot code will look for a device tree binary | 2891 | With this option, the boot code will look for a device tree binary |
2893 | DTB) appended to raw vmlinux.bin (without decompressor). | 2892 | DTB) appended to raw vmlinux.bin or vmlinuz.bin. |
2894 | (e.g. cat vmlinux.bin <filename>.dtb > vmlinux_w_dtb). | 2893 | (e.g. cat vmlinux.bin <filename>.dtb > vmlinux_w_dtb). |
2895 | 2894 | ||
2896 | This is meant as a backward compatibility convenience for those | 2895 | This is meant as a backward compatibility convenience for those |
@@ -2902,24 +2901,6 @@ choice | |||
2902 | look like a DTB header after a reboot if no actual DTB is appended | 2901 | look like a DTB header after a reboot if no actual DTB is appended |
2903 | to vmlinux.bin. Do not leave this option active in a production kernel | 2902 | to vmlinux.bin. Do not leave this option active in a production kernel |
2904 | if you don't intend to always append a DTB. | 2903 | if you don't intend to always append a DTB. |
2905 | |||
2906 | config MIPS_ZBOOT_APPENDED_DTB | ||
2907 | bool "vmlinuz.bin" | ||
2908 | depends on SYS_SUPPORTS_ZBOOT | ||
2909 | help | ||
2910 | With this option, the boot code will look for a device tree binary | ||
2911 | DTB) appended to raw vmlinuz.bin (with decompressor). | ||
2912 | (e.g. cat vmlinuz.bin <filename>.dtb > vmlinuz_w_dtb). | ||
2913 | |||
2914 | This is meant as a backward compatibility convenience for those | ||
2915 | systems with a bootloader that can't be upgraded to accommodate | ||
2916 | the documented boot protocol using a device tree. | ||
2917 | |||
2918 | Beware that there is very little in terms of protection against | ||
2919 | this option being confused by leftover garbage in memory that might | ||
2920 | look like a DTB header after a reboot if no actual DTB is appended | ||
2921 | to vmlinuz.bin. Do not leave this option active in a production kernel | ||
2922 | if you don't intend to always append a DTB. | ||
2923 | endchoice | 2904 | endchoice |
2924 | 2905 | ||
2925 | choice | 2906 | choice |
diff --git a/arch/mips/ath79/setup.c b/arch/mips/ath79/setup.c index 3a0019deb7f7..f206dafbb0a3 100644 --- a/arch/mips/ath79/setup.c +++ b/arch/mips/ath79/setup.c | |||
@@ -203,8 +203,8 @@ void __init plat_mem_setup(void) | |||
203 | fdt_start = fw_getenvl("fdt_start"); | 203 | fdt_start = fw_getenvl("fdt_start"); |
204 | if (fdt_start) | 204 | if (fdt_start) |
205 | __dt_setup_arch((void *)KSEG0ADDR(fdt_start)); | 205 | __dt_setup_arch((void *)KSEG0ADDR(fdt_start)); |
206 | else if (fw_arg0 == -2) | 206 | else if (fw_passed_dtb) |
207 | __dt_setup_arch((void *)KSEG0ADDR(fw_arg1)); | 207 | __dt_setup_arch((void *)KSEG0ADDR(fw_passed_dtb)); |
208 | 208 | ||
209 | if (mips_machtype != ATH79_MACH_GENERIC_OF) { | 209 | if (mips_machtype != ATH79_MACH_GENERIC_OF) { |
210 | ath79_reset_base = ioremap_nocache(AR71XX_RESET_BASE, | 210 | ath79_reset_base = ioremap_nocache(AR71XX_RESET_BASE, |
diff --git a/arch/mips/bmips/setup.c b/arch/mips/bmips/setup.c index f146d1219bde..6776042679dd 100644 --- a/arch/mips/bmips/setup.c +++ b/arch/mips/bmips/setup.c | |||
@@ -162,8 +162,8 @@ void __init plat_mem_setup(void) | |||
162 | /* intended to somewhat resemble ARM; see Documentation/arm/Booting */ | 162 | /* intended to somewhat resemble ARM; see Documentation/arm/Booting */ |
163 | if (fw_arg0 == 0 && fw_arg1 == 0xffffffff) | 163 | if (fw_arg0 == 0 && fw_arg1 == 0xffffffff) |
164 | dtb = phys_to_virt(fw_arg2); | 164 | dtb = phys_to_virt(fw_arg2); |
165 | else if (fw_arg0 == -2) /* UHI interface */ | 165 | else if (fw_passed_dtb) /* UHI interface */ |
166 | dtb = (void *)fw_arg1; | 166 | dtb = (void *)fw_passed_dtb; |
167 | else if (__dtb_start != __dtb_end) | 167 | else if (__dtb_start != __dtb_end) |
168 | dtb = (void *)__dtb_start; | 168 | dtb = (void *)__dtb_start; |
169 | else | 169 | else |
diff --git a/arch/mips/boot/compressed/decompress.c b/arch/mips/boot/compressed/decompress.c index 080cd53bac36..fdf99e9dd4c3 100644 --- a/arch/mips/boot/compressed/decompress.c +++ b/arch/mips/boot/compressed/decompress.c | |||
@@ -14,6 +14,7 @@ | |||
14 | #include <linux/types.h> | 14 | #include <linux/types.h> |
15 | #include <linux/kernel.h> | 15 | #include <linux/kernel.h> |
16 | #include <linux/string.h> | 16 | #include <linux/string.h> |
17 | #include <linux/libfdt.h> | ||
17 | 18 | ||
18 | #include <asm/addrspace.h> | 19 | #include <asm/addrspace.h> |
19 | 20 | ||
@@ -36,6 +37,8 @@ extern void puthex(unsigned long long val); | |||
36 | #define puthex(val) do {} while (0) | 37 | #define puthex(val) do {} while (0) |
37 | #endif | 38 | #endif |
38 | 39 | ||
40 | extern char __appended_dtb[]; | ||
41 | |||
39 | void error(char *x) | 42 | void error(char *x) |
40 | { | 43 | { |
41 | puts("\n\n"); | 44 | puts("\n\n"); |
@@ -114,6 +117,20 @@ void decompress_kernel(unsigned long boot_heap_start) | |||
114 | __decompress((char *)zimage_start, zimage_size, 0, 0, | 117 | __decompress((char *)zimage_start, zimage_size, 0, 0, |
115 | (void *)VMLINUX_LOAD_ADDRESS_ULL, 0, 0, error); | 118 | (void *)VMLINUX_LOAD_ADDRESS_ULL, 0, 0, error); |
116 | 119 | ||
120 | if (IS_ENABLED(CONFIG_MIPS_RAW_APPENDED_DTB) && | ||
121 | fdt_magic((void *)&__appended_dtb) == FDT_MAGIC) { | ||
122 | unsigned int image_size, dtb_size; | ||
123 | |||
124 | dtb_size = fdt_totalsize((void *)&__appended_dtb); | ||
125 | |||
126 | /* last four bytes is always image size in little endian */ | ||
127 | image_size = le32_to_cpup((void *)&__image_end - 4); | ||
128 | |||
129 | /* copy dtb to where the booted kernel will expect it */ | ||
130 | memcpy((void *)VMLINUX_LOAD_ADDRESS_ULL + image_size, | ||
131 | __appended_dtb, dtb_size); | ||
132 | } | ||
133 | |||
117 | /* FIXME: should we flush cache here? */ | 134 | /* FIXME: should we flush cache here? */ |
118 | puts("Now, booting the kernel...\n"); | 135 | puts("Now, booting the kernel...\n"); |
119 | } | 136 | } |
diff --git a/arch/mips/boot/compressed/head.S b/arch/mips/boot/compressed/head.S index c580e853b9fb..409cb483a9ff 100644 --- a/arch/mips/boot/compressed/head.S +++ b/arch/mips/boot/compressed/head.S | |||
@@ -25,22 +25,6 @@ start: | |||
25 | move s2, a2 | 25 | move s2, a2 |
26 | move s3, a3 | 26 | move s3, a3 |
27 | 27 | ||
28 | #ifdef CONFIG_MIPS_ZBOOT_APPENDED_DTB | ||
29 | PTR_LA t0, __appended_dtb | ||
30 | #ifdef CONFIG_CPU_BIG_ENDIAN | ||
31 | li t1, 0xd00dfeed | ||
32 | #else | ||
33 | li t1, 0xedfe0dd0 | ||
34 | #endif | ||
35 | lw t2, (t0) | ||
36 | bne t1, t2, not_found | ||
37 | nop | ||
38 | |||
39 | move s1, t0 | ||
40 | PTR_LI s0, -2 | ||
41 | not_found: | ||
42 | #endif | ||
43 | |||
44 | /* Clear BSS */ | 28 | /* Clear BSS */ |
45 | PTR_LA a0, _edata | 29 | PTR_LA a0, _edata |
46 | PTR_LA a2, _end | 30 | PTR_LA a2, _end |
diff --git a/arch/mips/boot/dts/cavium-octeon/dlink_dsr-1000n.dts b/arch/mips/boot/dts/cavium-octeon/dlink_dsr-1000n.dts index d6bc994f736f..b134798a0fd7 100644 --- a/arch/mips/boot/dts/cavium-octeon/dlink_dsr-1000n.dts +++ b/arch/mips/boot/dts/cavium-octeon/dlink_dsr-1000n.dts | |||
@@ -9,6 +9,7 @@ | |||
9 | */ | 9 | */ |
10 | 10 | ||
11 | /include/ "octeon_3xxx.dtsi" | 11 | /include/ "octeon_3xxx.dtsi" |
12 | #include <dt-bindings/gpio/gpio.h> | ||
12 | 13 | ||
13 | / { | 14 | / { |
14 | model = "dlink,dsr-1000n"; | 15 | model = "dlink,dsr-1000n"; |
@@ -63,12 +64,27 @@ | |||
63 | 64 | ||
64 | usb1 { | 65 | usb1 { |
65 | label = "usb1"; | 66 | label = "usb1"; |
66 | gpios = <&gpio 9 1>; /* Active low */ | 67 | gpios = <&gpio 9 GPIO_ACTIVE_LOW>; |
67 | }; | 68 | }; |
68 | 69 | ||
69 | usb2 { | 70 | usb2 { |
70 | label = "usb2"; | 71 | label = "usb2"; |
71 | gpios = <&gpio 10 1>; /* Active low */ | 72 | gpios = <&gpio 10 GPIO_ACTIVE_LOW>; |
73 | }; | ||
74 | |||
75 | wps { | ||
76 | label = "wps"; | ||
77 | gpios = <&gpio 11 GPIO_ACTIVE_LOW>; | ||
78 | }; | ||
79 | |||
80 | wireless1 { | ||
81 | label = "5g"; | ||
82 | gpios = <&gpio 17 GPIO_ACTIVE_LOW>; | ||
83 | }; | ||
84 | |||
85 | wireless2 { | ||
86 | label = "2.4g"; | ||
87 | gpios = <&gpio 18 GPIO_ACTIVE_LOW>; | ||
72 | }; | 88 | }; |
73 | }; | 89 | }; |
74 | 90 | ||
diff --git a/arch/mips/boot/dts/cavium-octeon/octeon_3xxx.dts b/arch/mips/boot/dts/cavium-octeon/octeon_3xxx.dts index de61f02d3ef6..ca6b4467bcd3 100644 --- a/arch/mips/boot/dts/cavium-octeon/octeon_3xxx.dts +++ b/arch/mips/boot/dts/cavium-octeon/octeon_3xxx.dts | |||
@@ -388,16 +388,4 @@ | |||
388 | usbn = &usbn; | 388 | usbn = &usbn; |
389 | led0 = &led0; | 389 | led0 = &led0; |
390 | }; | 390 | }; |
391 | |||
392 | dsr1000n-leds { | ||
393 | compatible = "gpio-leds"; | ||
394 | usb1 { | ||
395 | label = "usb1"; | ||
396 | gpios = <&gpio 9 1>; /* Active low */ | ||
397 | }; | ||
398 | usb2 { | ||
399 | label = "usb2"; | ||
400 | gpios = <&gpio 10 1>; /* Active low */ | ||
401 | }; | ||
402 | }; | ||
403 | }; | 391 | }; |
diff --git a/arch/mips/boot/tools/relocs_64.c b/arch/mips/boot/tools/relocs_64.c index b671b5e2dcd8..06066e6ac2f9 100644 --- a/arch/mips/boot/tools/relocs_64.c +++ b/arch/mips/boot/tools/relocs_64.c | |||
@@ -9,17 +9,20 @@ | |||
9 | 9 | ||
10 | typedef uint8_t Elf64_Byte; | 10 | typedef uint8_t Elf64_Byte; |
11 | 11 | ||
12 | typedef struct { | 12 | typedef union { |
13 | Elf64_Word r_sym; /* Symbol index. */ | 13 | struct { |
14 | Elf64_Byte r_ssym; /* Special symbol. */ | 14 | Elf64_Word r_sym; /* Symbol index. */ |
15 | Elf64_Byte r_type3; /* Third relocation. */ | 15 | Elf64_Byte r_ssym; /* Special symbol. */ |
16 | Elf64_Byte r_type2; /* Second relocation. */ | 16 | Elf64_Byte r_type3; /* Third relocation. */ |
17 | Elf64_Byte r_type; /* First relocation. */ | 17 | Elf64_Byte r_type2; /* Second relocation. */ |
18 | Elf64_Byte r_type; /* First relocation. */ | ||
19 | } fields; | ||
20 | Elf64_Xword unused; | ||
18 | } Elf64_Mips_Rela; | 21 | } Elf64_Mips_Rela; |
19 | 22 | ||
20 | #define ELF_CLASS ELFCLASS64 | 23 | #define ELF_CLASS ELFCLASS64 |
21 | #define ELF_R_SYM(val) (((Elf64_Mips_Rela *)(&val))->r_sym) | 24 | #define ELF_R_SYM(val) (((Elf64_Mips_Rela *)(&val))->fields.r_sym) |
22 | #define ELF_R_TYPE(val) (((Elf64_Mips_Rela *)(&val))->r_type) | 25 | #define ELF_R_TYPE(val) (((Elf64_Mips_Rela *)(&val))->fields.r_type) |
23 | #define ELF_ST_TYPE(o) ELF64_ST_TYPE(o) | 26 | #define ELF_ST_TYPE(o) ELF64_ST_TYPE(o) |
24 | #define ELF_ST_BIND(o) ELF64_ST_BIND(o) | 27 | #define ELF_ST_BIND(o) ELF64_ST_BIND(o) |
25 | #define ELF_ST_VISIBILITY(o) ELF64_ST_VISIBILITY(o) | 28 | #define ELF_ST_VISIBILITY(o) ELF64_ST_VISIBILITY(o) |
diff --git a/arch/mips/cavium-octeon/executive/cvmx-bootmem.c b/arch/mips/cavium-octeon/executive/cvmx-bootmem.c index 504ed61a47cd..b65a6c1ac016 100644 --- a/arch/mips/cavium-octeon/executive/cvmx-bootmem.c +++ b/arch/mips/cavium-octeon/executive/cvmx-bootmem.c | |||
@@ -668,7 +668,7 @@ int64_t cvmx_bootmem_phy_named_block_alloc(uint64_t size, uint64_t min_addr, | |||
668 | /* | 668 | /* |
669 | * Round size up to mult of minimum alignment bytes We need | 669 | * Round size up to mult of minimum alignment bytes We need |
670 | * the actual size allocated to allow for blocks to be | 670 | * the actual size allocated to allow for blocks to be |
671 | * coallesced when they are freed. The alloc routine does the | 671 | * coalesced when they are freed. The alloc routine does the |
672 | * same rounding up on all allocations. | 672 | * same rounding up on all allocations. |
673 | */ | 673 | */ |
674 | size = ALIGN(size, CVMX_BOOTMEM_ALIGNMENT_SIZE); | 674 | size = ALIGN(size, CVMX_BOOTMEM_ALIGNMENT_SIZE); |
diff --git a/arch/mips/cavium-octeon/executive/cvmx-helper-board.c b/arch/mips/cavium-octeon/executive/cvmx-helper-board.c index 36e30d65ba05..ff49fc04500c 100644 --- a/arch/mips/cavium-octeon/executive/cvmx-helper-board.c +++ b/arch/mips/cavium-octeon/executive/cvmx-helper-board.c | |||
@@ -186,15 +186,6 @@ int cvmx_helper_board_get_mii_address(int ipd_port) | |||
186 | return 7 - ipd_port; | 186 | return 7 - ipd_port; |
187 | else | 187 | else |
188 | return -1; | 188 | return -1; |
189 | case CVMX_BOARD_TYPE_CUST_DSR1000N: | ||
190 | /* | ||
191 | * Port 2 connects to Broadcom PHY (B5081). Other ports (0-1) | ||
192 | * connect to a switch (BCM53115). | ||
193 | */ | ||
194 | if (ipd_port == 2) | ||
195 | return 8; | ||
196 | else | ||
197 | return -1; | ||
198 | case CVMX_BOARD_TYPE_KONTRON_S1901: | 189 | case CVMX_BOARD_TYPE_KONTRON_S1901: |
199 | if (ipd_port == CVMX_HELPER_BOARD_MGMT_IPD_PORT) | 190 | if (ipd_port == CVMX_HELPER_BOARD_MGMT_IPD_PORT) |
200 | return 1; | 191 | return 1; |
@@ -289,18 +280,6 @@ cvmx_helper_link_info_t __cvmx_helper_board_link_get(int ipd_port) | |||
289 | return result; | 280 | return result; |
290 | } | 281 | } |
291 | break; | 282 | break; |
292 | case CVMX_BOARD_TYPE_CUST_DSR1000N: | ||
293 | if (ipd_port == 0 || ipd_port == 1) { | ||
294 | /* Ports 0 and 1 connect to a switch (BCM53115). */ | ||
295 | result.s.link_up = 1; | ||
296 | result.s.full_duplex = 1; | ||
297 | result.s.speed = 1000; | ||
298 | return result; | ||
299 | } else { | ||
300 | /* Port 2 uses a Broadcom PHY (B5081). */ | ||
301 | is_broadcom_phy = 1; | ||
302 | } | ||
303 | break; | ||
304 | } | 283 | } |
305 | 284 | ||
306 | phy_addr = cvmx_helper_board_get_mii_address(ipd_port); | 285 | phy_addr = cvmx_helper_board_get_mii_address(ipd_port); |
@@ -765,7 +744,6 @@ enum cvmx_helper_board_usb_clock_types __cvmx_helper_board_usb_get_clock_type(vo | |||
765 | case CVMX_BOARD_TYPE_LANAI2_G: | 744 | case CVMX_BOARD_TYPE_LANAI2_G: |
766 | case CVMX_BOARD_TYPE_NIC10E_66: | 745 | case CVMX_BOARD_TYPE_NIC10E_66: |
767 | case CVMX_BOARD_TYPE_UBNT_E100: | 746 | case CVMX_BOARD_TYPE_UBNT_E100: |
768 | case CVMX_BOARD_TYPE_CUST_DSR1000N: | ||
769 | return USB_CLOCK_TYPE_CRYSTAL_12; | 747 | return USB_CLOCK_TYPE_CRYSTAL_12; |
770 | case CVMX_BOARD_TYPE_NIC10E: | 748 | case CVMX_BOARD_TYPE_NIC10E: |
771 | return USB_CLOCK_TYPE_REF_12; | 749 | return USB_CLOCK_TYPE_REF_12; |
diff --git a/arch/mips/cavium-octeon/octeon-irq.c b/arch/mips/cavium-octeon/octeon-irq.c index 368eb490354c..5a9b87b7993e 100644 --- a/arch/mips/cavium-octeon/octeon-irq.c +++ b/arch/mips/cavium-octeon/octeon-irq.c | |||
@@ -1260,7 +1260,7 @@ static int octeon_irq_gpio_map(struct irq_domain *d, | |||
1260 | 1260 | ||
1261 | line = (hw + gpiod->base_hwirq) >> 6; | 1261 | line = (hw + gpiod->base_hwirq) >> 6; |
1262 | bit = (hw + gpiod->base_hwirq) & 63; | 1262 | bit = (hw + gpiod->base_hwirq) & 63; |
1263 | if (line > ARRAY_SIZE(octeon_irq_ciu_to_irq) || | 1263 | if (line >= ARRAY_SIZE(octeon_irq_ciu_to_irq) || |
1264 | octeon_irq_ciu_to_irq[line][bit] != 0) | 1264 | octeon_irq_ciu_to_irq[line][bit] != 0) |
1265 | return -EINVAL; | 1265 | return -EINVAL; |
1266 | 1266 | ||
@@ -1542,10 +1542,6 @@ static int __init octeon_irq_init_ciu( | |||
1542 | goto err; | 1542 | goto err; |
1543 | } | 1543 | } |
1544 | 1544 | ||
1545 | r = octeon_irq_force_ciu_mapping(ciu_domain, OCTEON_IRQ_USB0, 0, 56); | ||
1546 | if (r) | ||
1547 | goto err; | ||
1548 | |||
1549 | r = octeon_irq_force_ciu_mapping(ciu_domain, OCTEON_IRQ_TWSI2, 0, 59); | 1545 | r = octeon_irq_force_ciu_mapping(ciu_domain, OCTEON_IRQ_TWSI2, 0, 59); |
1550 | if (r) | 1546 | if (r) |
1551 | goto err; | 1547 | goto err; |
@@ -1559,10 +1555,6 @@ static int __init octeon_irq_init_ciu( | |||
1559 | goto err; | 1555 | goto err; |
1560 | } | 1556 | } |
1561 | 1557 | ||
1562 | r = octeon_irq_force_ciu_mapping(ciu_domain, OCTEON_IRQ_USB1, 1, 17); | ||
1563 | if (r) | ||
1564 | goto err; | ||
1565 | |||
1566 | /* Enable the CIU lines */ | 1558 | /* Enable the CIU lines */ |
1567 | set_c0_status(STATUSF_IP3 | STATUSF_IP2); | 1559 | set_c0_status(STATUSF_IP3 | STATUSF_IP2); |
1568 | if (octeon_irq_use_ip4) | 1560 | if (octeon_irq_use_ip4) |
@@ -2077,10 +2069,6 @@ static int __init octeon_irq_init_ciu2( | |||
2077 | goto err; | 2069 | goto err; |
2078 | } | 2070 | } |
2079 | 2071 | ||
2080 | r = octeon_irq_force_ciu_mapping(ciu_domain, OCTEON_IRQ_USB0, 3, 44); | ||
2081 | if (r) | ||
2082 | goto err; | ||
2083 | |||
2084 | for (i = 0; i < 4; i++) { | 2072 | for (i = 0; i < 4; i++) { |
2085 | r = octeon_irq_force_ciu_mapping( | 2073 | r = octeon_irq_force_ciu_mapping( |
2086 | ciu_domain, i + OCTEON_IRQ_PCI_INT0, 4, i); | 2074 | ciu_domain, i + OCTEON_IRQ_PCI_INT0, 4, i); |
diff --git a/arch/mips/cavium-octeon/octeon-platform.c b/arch/mips/cavium-octeon/octeon-platform.c index 7aeafedff94e..b31fbc9d6eae 100644 --- a/arch/mips/cavium-octeon/octeon-platform.c +++ b/arch/mips/cavium-octeon/octeon-platform.c | |||
@@ -3,33 +3,27 @@ | |||
3 | * License. See the file "COPYING" in the main directory of this archive | 3 | * License. See the file "COPYING" in the main directory of this archive |
4 | * for more details. | 4 | * for more details. |
5 | * | 5 | * |
6 | * Copyright (C) 2004-2011 Cavium Networks | 6 | * Copyright (C) 2004-2016 Cavium Networks |
7 | * Copyright (C) 2008 Wind River Systems | 7 | * Copyright (C) 2008 Wind River Systems |
8 | */ | 8 | */ |
9 | 9 | ||
10 | #include <linux/delay.h> | ||
11 | #include <linux/init.h> | 10 | #include <linux/init.h> |
12 | #include <linux/irq.h> | 11 | #include <linux/delay.h> |
13 | #include <linux/i2c.h> | ||
14 | #include <linux/usb.h> | ||
15 | #include <linux/dma-mapping.h> | ||
16 | #include <linux/etherdevice.h> | 12 | #include <linux/etherdevice.h> |
17 | #include <linux/module.h> | ||
18 | #include <linux/mutex.h> | ||
19 | #include <linux/slab.h> | ||
20 | #include <linux/platform_device.h> | ||
21 | #include <linux/of_platform.h> | 13 | #include <linux/of_platform.h> |
22 | #include <linux/of_fdt.h> | 14 | #include <linux/of_fdt.h> |
23 | #include <linux/libfdt.h> | 15 | #include <linux/libfdt.h> |
16 | #include <linux/usb/ehci_def.h> | ||
24 | #include <linux/usb/ehci_pdriver.h> | 17 | #include <linux/usb/ehci_pdriver.h> |
25 | #include <linux/usb/ohci_pdriver.h> | 18 | #include <linux/usb/ohci_pdriver.h> |
26 | 19 | ||
27 | #include <asm/octeon/octeon.h> | 20 | #include <asm/octeon/octeon.h> |
28 | #include <asm/octeon/cvmx-rnm-defs.h> | ||
29 | #include <asm/octeon/cvmx-helper.h> | ||
30 | #include <asm/octeon/cvmx-helper-board.h> | 21 | #include <asm/octeon/cvmx-helper-board.h> |
31 | #include <asm/octeon/cvmx-uctlx-defs.h> | 22 | #include <asm/octeon/cvmx-uctlx-defs.h> |
32 | 23 | ||
24 | #define CVMX_UAHCX_EHCI_USBCMD (CVMX_ADD_IO_SEG(0x00016F0000000010ull)) | ||
25 | #define CVMX_UAHCX_OHCI_USBCMD (CVMX_ADD_IO_SEG(0x00016F0000000408ull)) | ||
26 | |||
33 | /* Octeon Random Number Generator. */ | 27 | /* Octeon Random Number Generator. */ |
34 | static int __init octeon_rng_device_init(void) | 28 | static int __init octeon_rng_device_init(void) |
35 | { | 29 | { |
@@ -78,12 +72,36 @@ static DEFINE_MUTEX(octeon2_usb_clocks_mutex); | |||
78 | 72 | ||
79 | static int octeon2_usb_clock_start_cnt; | 73 | static int octeon2_usb_clock_start_cnt; |
80 | 74 | ||
75 | static int __init octeon2_usb_reset(void) | ||
76 | { | ||
77 | union cvmx_uctlx_clk_rst_ctl clk_rst_ctl; | ||
78 | u32 ucmd; | ||
79 | |||
80 | if (!OCTEON_IS_OCTEON2()) | ||
81 | return 0; | ||
82 | |||
83 | clk_rst_ctl.u64 = cvmx_read_csr(CVMX_UCTLX_CLK_RST_CTL(0)); | ||
84 | if (clk_rst_ctl.s.hrst) { | ||
85 | ucmd = cvmx_read64_uint32(CVMX_UAHCX_EHCI_USBCMD); | ||
86 | ucmd &= ~CMD_RUN; | ||
87 | cvmx_write64_uint32(CVMX_UAHCX_EHCI_USBCMD, ucmd); | ||
88 | mdelay(2); | ||
89 | ucmd |= CMD_RESET; | ||
90 | cvmx_write64_uint32(CVMX_UAHCX_EHCI_USBCMD, ucmd); | ||
91 | ucmd = cvmx_read64_uint32(CVMX_UAHCX_OHCI_USBCMD); | ||
92 | ucmd |= CMD_RUN; | ||
93 | cvmx_write64_uint32(CVMX_UAHCX_OHCI_USBCMD, ucmd); | ||
94 | } | ||
95 | |||
96 | return 0; | ||
97 | } | ||
98 | arch_initcall(octeon2_usb_reset); | ||
99 | |||
81 | static void octeon2_usb_clocks_start(struct device *dev) | 100 | static void octeon2_usb_clocks_start(struct device *dev) |
82 | { | 101 | { |
83 | u64 div; | 102 | u64 div; |
84 | union cvmx_uctlx_if_ena if_ena; | 103 | union cvmx_uctlx_if_ena if_ena; |
85 | union cvmx_uctlx_clk_rst_ctl clk_rst_ctl; | 104 | union cvmx_uctlx_clk_rst_ctl clk_rst_ctl; |
86 | union cvmx_uctlx_uphy_ctl_status uphy_ctl_status; | ||
87 | union cvmx_uctlx_uphy_portx_ctl_status port_ctl_status; | 105 | union cvmx_uctlx_uphy_portx_ctl_status port_ctl_status; |
88 | int i; | 106 | int i; |
89 | unsigned long io_clk_64_to_ns; | 107 | unsigned long io_clk_64_to_ns; |
@@ -131,6 +149,17 @@ static void octeon2_usb_clocks_start(struct device *dev) | |||
131 | if_ena.s.en = 1; | 149 | if_ena.s.en = 1; |
132 | cvmx_write_csr(CVMX_UCTLX_IF_ENA(0), if_ena.u64); | 150 | cvmx_write_csr(CVMX_UCTLX_IF_ENA(0), if_ena.u64); |
133 | 151 | ||
152 | for (i = 0; i <= 1; i++) { | ||
153 | port_ctl_status.u64 = | ||
154 | cvmx_read_csr(CVMX_UCTLX_UPHY_PORTX_CTL_STATUS(i, 0)); | ||
155 | /* Set txvreftune to 15 to obtain compliant 'eye' diagram. */ | ||
156 | port_ctl_status.s.txvreftune = 15; | ||
157 | port_ctl_status.s.txrisetune = 1; | ||
158 | port_ctl_status.s.txpreemphasistune = 1; | ||
159 | cvmx_write_csr(CVMX_UCTLX_UPHY_PORTX_CTL_STATUS(i, 0), | ||
160 | port_ctl_status.u64); | ||
161 | } | ||
162 | |||
134 | /* Step 3: Configure the reference clock, PHY, and HCLK */ | 163 | /* Step 3: Configure the reference clock, PHY, and HCLK */ |
135 | clk_rst_ctl.u64 = cvmx_read_csr(CVMX_UCTLX_CLK_RST_CTL(0)); | 164 | clk_rst_ctl.u64 = cvmx_read_csr(CVMX_UCTLX_CLK_RST_CTL(0)); |
136 | 165 | ||
@@ -218,29 +247,10 @@ static void octeon2_usb_clocks_start(struct device *dev) | |||
218 | clk_rst_ctl.s.p_por = 0; | 247 | clk_rst_ctl.s.p_por = 0; |
219 | cvmx_write_csr(CVMX_UCTLX_CLK_RST_CTL(0), clk_rst_ctl.u64); | 248 | cvmx_write_csr(CVMX_UCTLX_CLK_RST_CTL(0), clk_rst_ctl.u64); |
220 | 249 | ||
221 | /* Step 5: Wait 1 ms for the PHY clock to start. */ | 250 | /* Step 5: Wait 3 ms for the PHY clock to start. */ |
222 | mdelay(1); | 251 | mdelay(3); |
223 | 252 | ||
224 | /* | 253 | /* Steps 6..9 for ATE only, are skipped. */ |
225 | * Step 6: Program the reset input from automatic test | ||
226 | * equipment field in the UPHY CSR | ||
227 | */ | ||
228 | uphy_ctl_status.u64 = cvmx_read_csr(CVMX_UCTLX_UPHY_CTL_STATUS(0)); | ||
229 | uphy_ctl_status.s.ate_reset = 1; | ||
230 | cvmx_write_csr(CVMX_UCTLX_UPHY_CTL_STATUS(0), uphy_ctl_status.u64); | ||
231 | |||
232 | /* Step 7: Wait for at least 10ns. */ | ||
233 | ndelay(10); | ||
234 | |||
235 | /* Step 8: Clear the ATE_RESET field in the UPHY CSR. */ | ||
236 | uphy_ctl_status.s.ate_reset = 0; | ||
237 | cvmx_write_csr(CVMX_UCTLX_UPHY_CTL_STATUS(0), uphy_ctl_status.u64); | ||
238 | |||
239 | /* | ||
240 | * Step 9: Wait for at least 20ns for UPHY to output PHY clock | ||
241 | * signals and OHCI_CLK48 | ||
242 | */ | ||
243 | ndelay(20); | ||
244 | 254 | ||
245 | /* Step 10: Configure the OHCI_CLK48 and OHCI_CLK12 clocks. */ | 255 | /* Step 10: Configure the OHCI_CLK48 and OHCI_CLK12 clocks. */ |
246 | /* 10a */ | 256 | /* 10a */ |
@@ -261,6 +271,20 @@ static void octeon2_usb_clocks_start(struct device *dev) | |||
261 | clk_rst_ctl.s.p_prst = 1; | 271 | clk_rst_ctl.s.p_prst = 1; |
262 | cvmx_write_csr(CVMX_UCTLX_CLK_RST_CTL(0), clk_rst_ctl.u64); | 272 | cvmx_write_csr(CVMX_UCTLX_CLK_RST_CTL(0), clk_rst_ctl.u64); |
263 | 273 | ||
274 | /* Step 11b */ | ||
275 | udelay(1); | ||
276 | |||
277 | /* Step 11c */ | ||
278 | clk_rst_ctl.s.p_prst = 0; | ||
279 | cvmx_write_csr(CVMX_UCTLX_CLK_RST_CTL(0), clk_rst_ctl.u64); | ||
280 | |||
281 | /* Step 11d */ | ||
282 | mdelay(1); | ||
283 | |||
284 | /* Step 11e */ | ||
285 | clk_rst_ctl.s.p_prst = 1; | ||
286 | cvmx_write_csr(CVMX_UCTLX_CLK_RST_CTL(0), clk_rst_ctl.u64); | ||
287 | |||
264 | /* Step 12: Wait 1 uS. */ | 288 | /* Step 12: Wait 1 uS. */ |
265 | udelay(1); | 289 | udelay(1); |
266 | 290 | ||
@@ -269,21 +293,9 @@ static void octeon2_usb_clocks_start(struct device *dev) | |||
269 | cvmx_write_csr(CVMX_UCTLX_CLK_RST_CTL(0), clk_rst_ctl.u64); | 293 | cvmx_write_csr(CVMX_UCTLX_CLK_RST_CTL(0), clk_rst_ctl.u64); |
270 | 294 | ||
271 | end_clock: | 295 | end_clock: |
272 | /* Now we can set some other registers. */ | ||
273 | |||
274 | for (i = 0; i <= 1; i++) { | ||
275 | port_ctl_status.u64 = | ||
276 | cvmx_read_csr(CVMX_UCTLX_UPHY_PORTX_CTL_STATUS(i, 0)); | ||
277 | /* Set txvreftune to 15 to obtain compliant 'eye' diagram. */ | ||
278 | port_ctl_status.s.txvreftune = 15; | ||
279 | port_ctl_status.s.txrisetune = 1; | ||
280 | port_ctl_status.s.txpreemphasistune = 1; | ||
281 | cvmx_write_csr(CVMX_UCTLX_UPHY_PORTX_CTL_STATUS(i, 0), | ||
282 | port_ctl_status.u64); | ||
283 | } | ||
284 | |||
285 | /* Set uSOF cycle period to 60,000 bits. */ | 296 | /* Set uSOF cycle period to 60,000 bits. */ |
286 | cvmx_write_csr(CVMX_UCTLX_EHCI_FLA(0), 0x20ull); | 297 | cvmx_write_csr(CVMX_UCTLX_EHCI_FLA(0), 0x20ull); |
298 | |||
287 | exit: | 299 | exit: |
288 | mutex_unlock(&octeon2_usb_clocks_mutex); | 300 | mutex_unlock(&octeon2_usb_clocks_mutex); |
289 | } | 301 | } |
@@ -311,7 +323,11 @@ static struct usb_ehci_pdata octeon_ehci_pdata = { | |||
311 | #ifdef __BIG_ENDIAN | 323 | #ifdef __BIG_ENDIAN |
312 | .big_endian_mmio = 1, | 324 | .big_endian_mmio = 1, |
313 | #endif | 325 | #endif |
314 | .dma_mask_64 = 1, | 326 | /* |
327 | * We can DMA from anywhere. But the descriptors must be in | ||
328 | * the lower 4GB. | ||
329 | */ | ||
330 | .dma_mask_64 = 0, | ||
315 | .power_on = octeon_ehci_power_on, | 331 | .power_on = octeon_ehci_power_on, |
316 | .power_off = octeon_ehci_power_off, | 332 | .power_off = octeon_ehci_power_off, |
317 | }; | 333 | }; |
@@ -689,6 +705,10 @@ int __init octeon_prune_device_tree(void) | |||
689 | if (fdt_check_header(initial_boot_params)) | 705 | if (fdt_check_header(initial_boot_params)) |
690 | panic("Corrupt Device Tree."); | 706 | panic("Corrupt Device Tree."); |
691 | 707 | ||
708 | WARN(octeon_bootinfo->board_type == CVMX_BOARD_TYPE_CUST_DSR1000N, | ||
709 | "Built-in DTB booting is deprecated on %s. Please switch to use appended DTB.", | ||
710 | cvmx_board_type_to_string(octeon_bootinfo->board_type)); | ||
711 | |||
692 | aliases = fdt_path_offset(initial_boot_params, "/aliases"); | 712 | aliases = fdt_path_offset(initial_boot_params, "/aliases"); |
693 | if (aliases < 0) { | 713 | if (aliases < 0) { |
694 | pr_err("Error: No /aliases node in device tree."); | 714 | pr_err("Error: No /aliases node in device tree."); |
@@ -1032,13 +1052,6 @@ end_led: | |||
1032 | } | 1052 | } |
1033 | } | 1053 | } |
1034 | 1054 | ||
1035 | if (octeon_bootinfo->board_type != CVMX_BOARD_TYPE_CUST_DSR1000N) { | ||
1036 | int dsr1000n_leds = fdt_path_offset(initial_boot_params, | ||
1037 | "/dsr1000n-leds"); | ||
1038 | if (dsr1000n_leds >= 0) | ||
1039 | fdt_nop_node(initial_boot_params, dsr1000n_leds); | ||
1040 | } | ||
1041 | |||
1042 | return 0; | 1055 | return 0; |
1043 | } | 1056 | } |
1044 | 1057 | ||
diff --git a/arch/mips/cavium-octeon/setup.c b/arch/mips/cavium-octeon/setup.c index 64f852b063a8..cb16fcc5f8f0 100644 --- a/arch/mips/cavium-octeon/setup.c +++ b/arch/mips/cavium-octeon/setup.c | |||
@@ -40,9 +40,27 @@ | |||
40 | 40 | ||
41 | #include <asm/octeon/octeon.h> | 41 | #include <asm/octeon/octeon.h> |
42 | #include <asm/octeon/pci-octeon.h> | 42 | #include <asm/octeon/pci-octeon.h> |
43 | #include <asm/octeon/cvmx-mio-defs.h> | ||
44 | #include <asm/octeon/cvmx-rst-defs.h> | 43 | #include <asm/octeon/cvmx-rst-defs.h> |
45 | 44 | ||
45 | /* | ||
46 | * TRUE for devices having registers with little-endian byte | ||
47 | * order, FALSE for registers with native-endian byte order. | ||
48 | * PCI mandates little-endian, USB and SATA are configuraable, | ||
49 | * but we chose little-endian for these. | ||
50 | */ | ||
51 | const bool octeon_should_swizzle_table[256] = { | ||
52 | [0x00] = true, /* bootbus/CF */ | ||
53 | [0x1b] = true, /* PCI mmio window */ | ||
54 | [0x1c] = true, /* PCI mmio window */ | ||
55 | [0x1d] = true, /* PCI mmio window */ | ||
56 | [0x1e] = true, /* PCI mmio window */ | ||
57 | [0x68] = true, /* OCTEON III USB */ | ||
58 | [0x69] = true, /* OCTEON III USB */ | ||
59 | [0x6c] = true, /* OCTEON III SATA */ | ||
60 | [0x6f] = true, /* OCTEON II USB */ | ||
61 | }; | ||
62 | EXPORT_SYMBOL(octeon_should_swizzle_table); | ||
63 | |||
46 | #ifdef CONFIG_PCI | 64 | #ifdef CONFIG_PCI |
47 | extern void pci_console_init(const char *arg); | 65 | extern void pci_console_init(const char *arg); |
48 | #endif | 66 | #endif |
diff --git a/arch/mips/cavium-octeon/smp.c b/arch/mips/cavium-octeon/smp.c index 33aab89259f3..4d457d602d3b 100644 --- a/arch/mips/cavium-octeon/smp.c +++ b/arch/mips/cavium-octeon/smp.c | |||
@@ -271,6 +271,7 @@ static int octeon_cpu_disable(void) | |||
271 | return -ENOTSUPP; | 271 | return -ENOTSUPP; |
272 | 272 | ||
273 | set_cpu_online(cpu, false); | 273 | set_cpu_online(cpu, false); |
274 | calculate_cpu_foreign_map(); | ||
274 | cpumask_clear_cpu(cpu, &cpu_callin_map); | 275 | cpumask_clear_cpu(cpu, &cpu_callin_map); |
275 | octeon_fixup_irqs(); | 276 | octeon_fixup_irqs(); |
276 | 277 | ||
diff --git a/arch/mips/cobalt/setup.c b/arch/mips/cobalt/setup.c index 9a8c2fe8d334..c136a18c7221 100644 --- a/arch/mips/cobalt/setup.c +++ b/arch/mips/cobalt/setup.c | |||
@@ -42,8 +42,8 @@ const char *get_system_type(void) | |||
42 | 42 | ||
43 | /* | 43 | /* |
44 | * Cobalt doesn't have PS/2 keyboard/mouse interfaces, | 44 | * Cobalt doesn't have PS/2 keyboard/mouse interfaces, |
45 | * keyboard conntroller is never used. | 45 | * keyboard controller is never used. |
46 | * Also PCI-ISA bridge DMA contoroller is never used. | 46 | * Also PCI-ISA bridge DMA controller is never used. |
47 | */ | 47 | */ |
48 | static struct resource cobalt_reserved_resources[] = { | 48 | static struct resource cobalt_reserved_resources[] = { |
49 | { /* dma1 */ | 49 | { /* dma1 */ |
diff --git a/arch/mips/configs/ath25_defconfig b/arch/mips/configs/ath25_defconfig new file mode 100644 index 000000000000..2c829950be17 --- /dev/null +++ b/arch/mips/configs/ath25_defconfig | |||
@@ -0,0 +1,119 @@ | |||
1 | CONFIG_ATH25=y | ||
2 | # CONFIG_COMPACTION is not set | ||
3 | CONFIG_HZ_100=y | ||
4 | # CONFIG_SECCOMP is not set | ||
5 | # CONFIG_LOCALVERSION_AUTO is not set | ||
6 | CONFIG_SYSVIPC=y | ||
7 | # CONFIG_CROSS_MEMORY_ATTACH is not set | ||
8 | # CONFIG_FHANDLE is not set | ||
9 | CONFIG_HIGH_RES_TIMERS=y | ||
10 | CONFIG_BLK_DEV_INITRD=y | ||
11 | # CONFIG_RD_GZIP is not set | ||
12 | # CONFIG_RD_BZIP2 is not set | ||
13 | # CONFIG_RD_XZ is not set | ||
14 | # CONFIG_RD_LZO is not set | ||
15 | # CONFIG_RD_LZ4 is not set | ||
16 | CONFIG_CC_OPTIMIZE_FOR_SIZE=y | ||
17 | # CONFIG_AIO is not set | ||
18 | CONFIG_EMBEDDED=y | ||
19 | # CONFIG_VM_EVENT_COUNTERS is not set | ||
20 | # CONFIG_SLUB_DEBUG is not set | ||
21 | # CONFIG_COMPAT_BRK is not set | ||
22 | CONFIG_MODULES=y | ||
23 | CONFIG_MODULE_UNLOAD=y | ||
24 | # CONFIG_BLK_DEV_BSG is not set | ||
25 | # CONFIG_IOSCHED_CFQ is not set | ||
26 | # CONFIG_SUSPEND is not set | ||
27 | CONFIG_NET=y | ||
28 | CONFIG_PACKET=y | ||
29 | CONFIG_UNIX=y | ||
30 | CONFIG_INET=y | ||
31 | CONFIG_IP_MULTICAST=y | ||
32 | CONFIG_IP_ADVANCED_ROUTER=y | ||
33 | # CONFIG_INET_XFRM_MODE_TRANSPORT is not set | ||
34 | # CONFIG_INET_XFRM_MODE_TUNNEL is not set | ||
35 | # CONFIG_INET_XFRM_MODE_BEET is not set | ||
36 | # CONFIG_IPV6 is not set | ||
37 | CONFIG_CFG80211=m | ||
38 | CONFIG_MAC80211=m | ||
39 | CONFIG_MAC80211_DEBUGFS=y | ||
40 | CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug" | ||
41 | # CONFIG_FIRMWARE_IN_KERNEL is not set | ||
42 | CONFIG_MTD=y | ||
43 | CONFIG_MTD_REDBOOT_PARTS=y | ||
44 | CONFIG_MTD_REDBOOT_DIRECTORY_BLOCK=-2 | ||
45 | CONFIG_MTD_CMDLINE_PARTS=y | ||
46 | CONFIG_MTD_BLOCK=y | ||
47 | CONFIG_MTD_CFI=y | ||
48 | CONFIG_MTD_CFI_ADV_OPTIONS=y | ||
49 | CONFIG_MTD_CFI_GEOMETRY=y | ||
50 | # CONFIG_MTD_MAP_BANK_WIDTH_1 is not set | ||
51 | # CONFIG_MTD_MAP_BANK_WIDTH_4 is not set | ||
52 | # CONFIG_MTD_CFI_I2 is not set | ||
53 | CONFIG_MTD_CFI_AMDSTD=y | ||
54 | CONFIG_MTD_COMPLEX_MAPPINGS=y | ||
55 | CONFIG_MTD_PHYSMAP=y | ||
56 | CONFIG_NETDEVICES=y | ||
57 | # CONFIG_ETHERNET is not set | ||
58 | # CONFIG_WLAN_VENDOR_ADMTEK is not set | ||
59 | CONFIG_ATH5K=m | ||
60 | # CONFIG_WLAN_VENDOR_ATMEL is not set | ||
61 | # CONFIG_WLAN_VENDOR_BROADCOM is not set | ||
62 | # CONFIG_WLAN_VENDOR_CISCO is not set | ||
63 | # CONFIG_WLAN_VENDOR_INTEL is not set | ||
64 | # CONFIG_WLAN_VENDOR_INTERSIL is not set | ||
65 | # CONFIG_WLAN_VENDOR_MARVELL is not set | ||
66 | # CONFIG_WLAN_VENDOR_MEDIATEK is not set | ||
67 | # CONFIG_WLAN_VENDOR_RALINK is not set | ||
68 | # CONFIG_WLAN_VENDOR_REALTEK is not set | ||
69 | # CONFIG_WLAN_VENDOR_RSI is not set | ||
70 | # CONFIG_WLAN_VENDOR_ST is not set | ||
71 | # CONFIG_WLAN_VENDOR_TI is not set | ||
72 | # CONFIG_WLAN_VENDOR_ZYDAS is not set | ||
73 | CONFIG_INPUT=m | ||
74 | # CONFIG_INPUT_KEYBOARD is not set | ||
75 | # CONFIG_INPUT_MOUSE is not set | ||
76 | # CONFIG_SERIO is not set | ||
77 | # CONFIG_VT is not set | ||
78 | # CONFIG_LEGACY_PTYS is not set | ||
79 | # CONFIG_DEVKMEM is not set | ||
80 | CONFIG_SERIAL_8250=y | ||
81 | CONFIG_SERIAL_8250_CONSOLE=y | ||
82 | # CONFIG_SERIAL_8250_PCI is not set | ||
83 | CONFIG_SERIAL_8250_NR_UARTS=1 | ||
84 | CONFIG_SERIAL_8250_RUNTIME_UARTS=1 | ||
85 | # CONFIG_HW_RANDOM is not set | ||
86 | # CONFIG_HWMON is not set | ||
87 | # CONFIG_VGA_ARB is not set | ||
88 | CONFIG_USB=m | ||
89 | CONFIG_USB_EHCI_HCD=m | ||
90 | CONFIG_LEDS_CLASS=y | ||
91 | # CONFIG_IOMMU_SUPPORT is not set | ||
92 | # CONFIG_DNOTIFY is not set | ||
93 | # CONFIG_PROC_PAGE_MONITOR is not set | ||
94 | CONFIG_TMPFS=y | ||
95 | CONFIG_TMPFS_XATTR=y | ||
96 | CONFIG_JFFS2_FS=y | ||
97 | CONFIG_JFFS2_SUMMARY=y | ||
98 | CONFIG_JFFS2_FS_XATTR=y | ||
99 | # CONFIG_JFFS2_FS_POSIX_ACL is not set | ||
100 | # CONFIG_JFFS2_FS_SECURITY is not set | ||
101 | CONFIG_JFFS2_COMPRESSION_OPTIONS=y | ||
102 | # CONFIG_JFFS2_ZLIB is not set | ||
103 | CONFIG_SQUASHFS=y | ||
104 | CONFIG_SQUASHFS_FILE_DIRECT=y | ||
105 | CONFIG_SQUASHFS_DECOMP_MULTI_PERCPU=y | ||
106 | # CONFIG_SQUASHFS_ZLIB is not set | ||
107 | CONFIG_SQUASHFS_XZ=y | ||
108 | CONFIG_PRINTK_TIME=y | ||
109 | # CONFIG_ENABLE_MUST_CHECK is not set | ||
110 | CONFIG_STRIP_ASM_SYMS=y | ||
111 | CONFIG_DEBUG_FS=y | ||
112 | # CONFIG_SCHED_DEBUG is not set | ||
113 | # CONFIG_FTRACE is not set | ||
114 | # CONFIG_XZ_DEC_X86 is not set | ||
115 | # CONFIG_XZ_DEC_POWERPC is not set | ||
116 | # CONFIG_XZ_DEC_IA64 is not set | ||
117 | # CONFIG_XZ_DEC_ARM is not set | ||
118 | # CONFIG_XZ_DEC_ARMTHUMB is not set | ||
119 | # CONFIG_XZ_DEC_SPARC is not set | ||
diff --git a/arch/mips/configs/cavium_octeon_defconfig b/arch/mips/configs/cavium_octeon_defconfig index dcac308cec39..d470d08362c0 100644 --- a/arch/mips/configs/cavium_octeon_defconfig +++ b/arch/mips/configs/cavium_octeon_defconfig | |||
@@ -59,6 +59,8 @@ CONFIG_EEPROM_AT25=y | |||
59 | CONFIG_BLK_DEV_SD=y | 59 | CONFIG_BLK_DEV_SD=y |
60 | CONFIG_ATA=y | 60 | CONFIG_ATA=y |
61 | CONFIG_SATA_AHCI=y | 61 | CONFIG_SATA_AHCI=y |
62 | CONFIG_SATA_AHCI_PLATFORM=y | ||
63 | CONFIG_AHCI_OCTEON=y | ||
62 | CONFIG_PATA_OCTEON_CF=y | 64 | CONFIG_PATA_OCTEON_CF=y |
63 | CONFIG_SATA_SIL=y | 65 | CONFIG_SATA_SIL=y |
64 | CONFIG_NETDEVICES=y | 66 | CONFIG_NETDEVICES=y |
diff --git a/arch/mips/include/asm/bootinfo.h b/arch/mips/include/asm/bootinfo.h index 9f67033961a6..ee9f5f2d18fc 100644 --- a/arch/mips/include/asm/bootinfo.h +++ b/arch/mips/include/asm/bootinfo.h | |||
@@ -127,6 +127,10 @@ extern char arcs_cmdline[COMMAND_LINE_SIZE]; | |||
127 | */ | 127 | */ |
128 | extern unsigned long fw_arg0, fw_arg1, fw_arg2, fw_arg3; | 128 | extern unsigned long fw_arg0, fw_arg1, fw_arg2, fw_arg3; |
129 | 129 | ||
130 | #ifdef CONFIG_USE_OF | ||
131 | extern unsigned long fw_passed_dtb; | ||
132 | #endif | ||
133 | |||
130 | /* | 134 | /* |
131 | * Platform memory detection hook called by setup_arch | 135 | * Platform memory detection hook called by setup_arch |
132 | */ | 136 | */ |
diff --git a/arch/mips/include/asm/dsemul.h b/arch/mips/include/asm/dsemul.h new file mode 100644 index 000000000000..a6e067801f23 --- /dev/null +++ b/arch/mips/include/asm/dsemul.h | |||
@@ -0,0 +1,92 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2016 Imagination Technologies | ||
3 | * Author: Paul Burton <paul.burton@imgtec.com> | ||
4 | * | ||
5 | * This program is free software; you can redistribute it and/or modify it | ||
6 | * under the terms of the GNU General Public License as published by the | ||
7 | * Free Software Foundation; either version 2 of the License, or (at your | ||
8 | * option) any later version. | ||
9 | */ | ||
10 | |||
11 | #ifndef __MIPS_ASM_DSEMUL_H__ | ||
12 | #define __MIPS_ASM_DSEMUL_H__ | ||
13 | |||
14 | #include <asm/break.h> | ||
15 | #include <asm/inst.h> | ||
16 | |||
17 | /* Break instruction with special math emu break code set */ | ||
18 | #define BREAK_MATH(micromips) (((micromips) ? 0x7 : 0xd) | (BRK_MEMU << 16)) | ||
19 | |||
20 | /* When used as a frame index, indicates the lack of a frame */ | ||
21 | #define BD_EMUFRAME_NONE ((int)BIT(31)) | ||
22 | |||
23 | struct mm_struct; | ||
24 | struct pt_regs; | ||
25 | struct task_struct; | ||
26 | |||
27 | /** | ||
28 | * mips_dsemul() - 'Emulate' an instruction from a branch delay slot | ||
29 | * @regs: User thread register context. | ||
30 | * @ir: The instruction to be 'emulated'. | ||
31 | * @branch_pc: The PC of the branch instruction. | ||
32 | * @cont_pc: The PC to continue at following 'emulation'. | ||
33 | * | ||
34 | * Emulate or execute an arbitrary MIPS instruction within the context of | ||
35 | * the current user thread. This is used primarily to handle instructions | ||
36 | * in the delay slots of emulated branch instructions, for example FP | ||
37 | * branch instructions on systems without an FPU. | ||
38 | * | ||
39 | * Return: Zero on success, negative if ir is a NOP, signal number on failure. | ||
40 | */ | ||
41 | extern int mips_dsemul(struct pt_regs *regs, mips_instruction ir, | ||
42 | unsigned long branch_pc, unsigned long cont_pc); | ||
43 | |||
44 | /** | ||
45 | * do_dsemulret() - Return from a delay slot 'emulation' frame | ||
46 | * @xcp: User thread register context. | ||
47 | * | ||
48 | * Call in response to the BRK_MEMU break instruction used to return to | ||
49 | * the kernel from branch delay slot 'emulation' frames following a call | ||
50 | * to mips_dsemul(). Restores the user thread PC to the value that was | ||
51 | * passed as the cpc parameter to mips_dsemul(). | ||
52 | * | ||
53 | * Return: True if an emulation frame was returned from, else false. | ||
54 | */ | ||
55 | extern bool do_dsemulret(struct pt_regs *xcp); | ||
56 | |||
57 | /** | ||
58 | * dsemul_thread_cleanup() - Cleanup thread 'emulation' frame | ||
59 | * @tsk: The task structure associated with the thread | ||
60 | * | ||
61 | * If the thread @tsk has a branch delay slot 'emulation' frame | ||
62 | * allocated to it then free that frame. | ||
63 | * | ||
64 | * Return: True if a frame was freed, else false. | ||
65 | */ | ||
66 | extern bool dsemul_thread_cleanup(struct task_struct *tsk); | ||
67 | |||
68 | /** | ||
69 | * dsemul_thread_rollback() - Rollback from an 'emulation' frame | ||
70 | * @regs: User thread register context. | ||
71 | * | ||
72 | * If the current thread, whose register context is represented by @regs, | ||
73 | * is executing within a delay slot 'emulation' frame then exit that | ||
74 | * frame. The PC will be rolled back to the branch if the instruction | ||
75 | * that was being 'emulated' has not yet executed, or advanced to the | ||
76 | * continuation PC if it has. | ||
77 | * | ||
78 | * Return: True if a frame was exited, else false. | ||
79 | */ | ||
80 | extern bool dsemul_thread_rollback(struct pt_regs *regs); | ||
81 | |||
82 | /** | ||
83 | * dsemul_mm_cleanup() - Cleanup per-mm delay slot 'emulation' state | ||
84 | * @mm: The struct mm_struct to cleanup state for. | ||
85 | * | ||
86 | * Cleanup state for the given @mm, ensuring that any memory allocated | ||
87 | * for delay slot 'emulation' book-keeping is freed. This is to be called | ||
88 | * before @mm is freed in order to avoid memory leaks. | ||
89 | */ | ||
90 | extern void dsemul_mm_cleanup(struct mm_struct *mm); | ||
91 | |||
92 | #endif /* __MIPS_ASM_DSEMUL_H__ */ | ||
diff --git a/arch/mips/include/asm/elf.h b/arch/mips/include/asm/elf.h index f5f45717968e..2b3dc2973670 100644 --- a/arch/mips/include/asm/elf.h +++ b/arch/mips/include/asm/elf.h | |||
@@ -458,6 +458,7 @@ extern const char *__elf_platform; | |||
458 | #define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2) | 458 | #define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2) |
459 | #endif | 459 | #endif |
460 | 460 | ||
461 | /* update AT_VECTOR_SIZE_ARCH if the number of NEW_AUX_ENT entries changes */ | ||
461 | #define ARCH_DLINFO \ | 462 | #define ARCH_DLINFO \ |
462 | do { \ | 463 | do { \ |
463 | NEW_AUX_ENT(AT_SYSINFO_EHDR, \ | 464 | NEW_AUX_ENT(AT_SYSINFO_EHDR, \ |
@@ -498,4 +499,7 @@ extern int arch_check_elf(void *ehdr, bool has_interpreter, void *interp_ehdr, | |||
498 | extern void mips_set_personality_nan(struct arch_elf_state *state); | 499 | extern void mips_set_personality_nan(struct arch_elf_state *state); |
499 | extern void mips_set_personality_fp(struct arch_elf_state *state); | 500 | extern void mips_set_personality_fp(struct arch_elf_state *state); |
500 | 501 | ||
502 | #define elf_read_implies_exec(ex, stk) mips_elf_read_implies_exec(&(ex), stk) | ||
503 | extern int mips_elf_read_implies_exec(void *elf_ex, int exstack); | ||
504 | |||
501 | #endif /* _ASM_ELF_H */ | 505 | #endif /* _ASM_ELF_H */ |
diff --git a/arch/mips/include/asm/fpu_emulator.h b/arch/mips/include/asm/fpu_emulator.h index 3225c3c0724b..355dc25172e7 100644 --- a/arch/mips/include/asm/fpu_emulator.h +++ b/arch/mips/include/asm/fpu_emulator.h | |||
@@ -24,7 +24,7 @@ | |||
24 | #define _ASM_FPU_EMULATOR_H | 24 | #define _ASM_FPU_EMULATOR_H |
25 | 25 | ||
26 | #include <linux/sched.h> | 26 | #include <linux/sched.h> |
27 | #include <asm/break.h> | 27 | #include <asm/dsemul.h> |
28 | #include <asm/thread_info.h> | 28 | #include <asm/thread_info.h> |
29 | #include <asm/inst.h> | 29 | #include <asm/inst.h> |
30 | #include <asm/local.h> | 30 | #include <asm/local.h> |
@@ -60,27 +60,16 @@ do { \ | |||
60 | #define MIPS_FPU_EMU_INC_STATS(M) do { } while (0) | 60 | #define MIPS_FPU_EMU_INC_STATS(M) do { } while (0) |
61 | #endif /* CONFIG_DEBUG_FS */ | 61 | #endif /* CONFIG_DEBUG_FS */ |
62 | 62 | ||
63 | extern int mips_dsemul(struct pt_regs *regs, mips_instruction ir, | ||
64 | unsigned long cpc); | ||
65 | extern int do_dsemulret(struct pt_regs *xcp); | ||
66 | extern int fpu_emulator_cop1Handler(struct pt_regs *xcp, | 63 | extern int fpu_emulator_cop1Handler(struct pt_regs *xcp, |
67 | struct mips_fpu_struct *ctx, int has_fpu, | 64 | struct mips_fpu_struct *ctx, int has_fpu, |
68 | void *__user *fault_addr); | 65 | void *__user *fault_addr); |
69 | int process_fpemu_return(int sig, void __user *fault_addr, | 66 | int process_fpemu_return(int sig, void __user *fault_addr, |
70 | unsigned long fcr31); | 67 | unsigned long fcr31); |
68 | int isBranchInstr(struct pt_regs *regs, struct mm_decoded_insn dec_insn, | ||
69 | unsigned long *contpc); | ||
71 | int mm_isBranchInstr(struct pt_regs *regs, struct mm_decoded_insn dec_insn, | 70 | int mm_isBranchInstr(struct pt_regs *regs, struct mm_decoded_insn dec_insn, |
72 | unsigned long *contpc); | 71 | unsigned long *contpc); |
73 | 72 | ||
74 | /* | ||
75 | * Instruction inserted following the badinst to further tag the sequence | ||
76 | */ | ||
77 | #define BD_COOKIE 0x0000bd36 /* tne $0, $0 with baggage */ | ||
78 | |||
79 | /* | ||
80 | * Break instruction with special math emu break code set | ||
81 | */ | ||
82 | #define BREAK_MATH(micromips) (((micromips) ? 0x7 : 0xd) | (BRK_MEMU << 16)) | ||
83 | |||
84 | #define SIGNALLING_NAN 0x7ff800007ff80000LL | 73 | #define SIGNALLING_NAN 0x7ff800007ff80000LL |
85 | 74 | ||
86 | static inline void fpu_emulator_init_fpu(void) | 75 | static inline void fpu_emulator_init_fpu(void) |
diff --git a/arch/mips/include/asm/mach-cavium-octeon/irq.h b/arch/mips/include/asm/mach-cavium-octeon/irq.h index cceae32a0732..64b86b9d30fe 100644 --- a/arch/mips/include/asm/mach-cavium-octeon/irq.h +++ b/arch/mips/include/asm/mach-cavium-octeon/irq.h | |||
@@ -42,8 +42,6 @@ enum octeon_irq { | |||
42 | OCTEON_IRQ_TIMER1, | 42 | OCTEON_IRQ_TIMER1, |
43 | OCTEON_IRQ_TIMER2, | 43 | OCTEON_IRQ_TIMER2, |
44 | OCTEON_IRQ_TIMER3, | 44 | OCTEON_IRQ_TIMER3, |
45 | OCTEON_IRQ_USB0, | ||
46 | OCTEON_IRQ_USB1, | ||
47 | #ifndef CONFIG_PCI_MSI | 45 | #ifndef CONFIG_PCI_MSI |
48 | OCTEON_IRQ_LAST = 127 | 46 | OCTEON_IRQ_LAST = 127 |
49 | #endif | 47 | #endif |
diff --git a/arch/mips/include/asm/mach-cavium-octeon/mangle-port.h b/arch/mips/include/asm/mach-cavium-octeon/mangle-port.h index 374eefafb320..0cf5ac1f7245 100644 --- a/arch/mips/include/asm/mach-cavium-octeon/mangle-port.h +++ b/arch/mips/include/asm/mach-cavium-octeon/mangle-port.h | |||
@@ -12,6 +12,14 @@ | |||
12 | 12 | ||
13 | #ifdef __BIG_ENDIAN | 13 | #ifdef __BIG_ENDIAN |
14 | 14 | ||
15 | static inline bool __should_swizzle_bits(volatile void *a) | ||
16 | { | ||
17 | extern const bool octeon_should_swizzle_table[]; | ||
18 | |||
19 | unsigned long did = ((unsigned long)a >> 40) & 0xff; | ||
20 | return octeon_should_swizzle_table[did]; | ||
21 | } | ||
22 | |||
15 | # define __swizzle_addr_b(port) (port) | 23 | # define __swizzle_addr_b(port) (port) |
16 | # define __swizzle_addr_w(port) (port) | 24 | # define __swizzle_addr_w(port) (port) |
17 | # define __swizzle_addr_l(port) (port) | 25 | # define __swizzle_addr_l(port) (port) |
@@ -19,6 +27,8 @@ | |||
19 | 27 | ||
20 | #else /* __LITTLE_ENDIAN */ | 28 | #else /* __LITTLE_ENDIAN */ |
21 | 29 | ||
30 | #define __should_swizzle_bits(a) false | ||
31 | |||
22 | static inline bool __should_swizzle_addr(unsigned long p) | 32 | static inline bool __should_swizzle_addr(unsigned long p) |
23 | { | 33 | { |
24 | /* boot bus? */ | 34 | /* boot bus? */ |
@@ -35,40 +45,14 @@ static inline bool __should_swizzle_addr(unsigned long p) | |||
35 | 45 | ||
36 | #endif /* __BIG_ENDIAN */ | 46 | #endif /* __BIG_ENDIAN */ |
37 | 47 | ||
38 | /* | ||
39 | * Sane hardware offers swapping of PCI/ISA I/O space accesses in hardware; | ||
40 | * less sane hardware forces software to fiddle with this... | ||
41 | * | ||
42 | * Regardless, if the host bus endianness mismatches that of PCI/ISA, then | ||
43 | * you can't have the numerical value of data and byte addresses within | ||
44 | * multibyte quantities both preserved at the same time. Hence two | ||
45 | * variations of functions: non-prefixed ones that preserve the value | ||
46 | * and prefixed ones that preserve byte addresses. The latters are | ||
47 | * typically used for moving raw data between a peripheral and memory (cf. | ||
48 | * string I/O functions), hence the "__mem_" prefix. | ||
49 | */ | ||
50 | #if defined(CONFIG_SWAP_IO_SPACE) | ||
51 | 48 | ||
52 | # define ioswabb(a, x) (x) | 49 | # define ioswabb(a, x) (x) |
53 | # define __mem_ioswabb(a, x) (x) | 50 | # define __mem_ioswabb(a, x) (x) |
54 | # define ioswabw(a, x) le16_to_cpu(x) | 51 | # define ioswabw(a, x) (__should_swizzle_bits(a) ? le16_to_cpu(x) : x) |
55 | # define __mem_ioswabw(a, x) (x) | 52 | # define __mem_ioswabw(a, x) (x) |
56 | # define ioswabl(a, x) le32_to_cpu(x) | 53 | # define ioswabl(a, x) (__should_swizzle_bits(a) ? le32_to_cpu(x) : x) |
57 | # define __mem_ioswabl(a, x) (x) | 54 | # define __mem_ioswabl(a, x) (x) |
58 | # define ioswabq(a, x) le64_to_cpu(x) | 55 | # define ioswabq(a, x) (__should_swizzle_bits(a) ? le64_to_cpu(x) : x) |
59 | # define __mem_ioswabq(a, x) (x) | 56 | # define __mem_ioswabq(a, x) (x) |
60 | 57 | ||
61 | #else | ||
62 | |||
63 | # define ioswabb(a, x) (x) | ||
64 | # define __mem_ioswabb(a, x) (x) | ||
65 | # define ioswabw(a, x) (x) | ||
66 | # define __mem_ioswabw(a, x) cpu_to_le16(x) | ||
67 | # define ioswabl(a, x) (x) | ||
68 | # define __mem_ioswabl(a, x) cpu_to_le32(x) | ||
69 | # define ioswabq(a, x) (x) | ||
70 | # define __mem_ioswabq(a, x) cpu_to_le32(x) | ||
71 | |||
72 | #endif | ||
73 | |||
74 | #endif /* __ASM_MACH_GENERIC_MANGLE_PORT_H */ | 58 | #endif /* __ASM_MACH_GENERIC_MANGLE_PORT_H */ |
diff --git a/arch/mips/include/asm/mmu.h b/arch/mips/include/asm/mmu.h index 1afa1f986df8..f6ba08d77931 100644 --- a/arch/mips/include/asm/mmu.h +++ b/arch/mips/include/asm/mmu.h | |||
@@ -2,11 +2,20 @@ | |||
2 | #define __ASM_MMU_H | 2 | #define __ASM_MMU_H |
3 | 3 | ||
4 | #include <linux/atomic.h> | 4 | #include <linux/atomic.h> |
5 | #include <linux/spinlock.h> | ||
6 | #include <linux/wait.h> | ||
5 | 7 | ||
6 | typedef struct { | 8 | typedef struct { |
7 | unsigned long asid[NR_CPUS]; | 9 | unsigned long asid[NR_CPUS]; |
8 | void *vdso; | 10 | void *vdso; |
9 | atomic_t fp_mode_switching; | 11 | atomic_t fp_mode_switching; |
12 | |||
13 | /* lock to be held whilst modifying fp_bd_emupage_allocmap */ | ||
14 | spinlock_t bd_emupage_lock; | ||
15 | /* bitmap tracking allocation of fp_bd_emupage */ | ||
16 | unsigned long *bd_emupage_allocmap; | ||
17 | /* wait queue for threads requiring an emuframe */ | ||
18 | wait_queue_head_t bd_emupage_queue; | ||
10 | } mm_context_t; | 19 | } mm_context_t; |
11 | 20 | ||
12 | #endif /* __ASM_MMU_H */ | 21 | #endif /* __ASM_MMU_H */ |
diff --git a/arch/mips/include/asm/mmu_context.h b/arch/mips/include/asm/mmu_context.h index fc57e135cb0a..ddd57ade1aa8 100644 --- a/arch/mips/include/asm/mmu_context.h +++ b/arch/mips/include/asm/mmu_context.h | |||
@@ -16,6 +16,7 @@ | |||
16 | #include <linux/smp.h> | 16 | #include <linux/smp.h> |
17 | #include <linux/slab.h> | 17 | #include <linux/slab.h> |
18 | #include <asm/cacheflush.h> | 18 | #include <asm/cacheflush.h> |
19 | #include <asm/dsemul.h> | ||
19 | #include <asm/hazards.h> | 20 | #include <asm/hazards.h> |
20 | #include <asm/tlbflush.h> | 21 | #include <asm/tlbflush.h> |
21 | #include <asm-generic/mm_hooks.h> | 22 | #include <asm-generic/mm_hooks.h> |
@@ -128,6 +129,10 @@ init_new_context(struct task_struct *tsk, struct mm_struct *mm) | |||
128 | 129 | ||
129 | atomic_set(&mm->context.fp_mode_switching, 0); | 130 | atomic_set(&mm->context.fp_mode_switching, 0); |
130 | 131 | ||
132 | mm->context.bd_emupage_allocmap = NULL; | ||
133 | spin_lock_init(&mm->context.bd_emupage_lock); | ||
134 | init_waitqueue_head(&mm->context.bd_emupage_queue); | ||
135 | |||
131 | return 0; | 136 | return 0; |
132 | } | 137 | } |
133 | 138 | ||
@@ -162,6 +167,7 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, | |||
162 | */ | 167 | */ |
163 | static inline void destroy_context(struct mm_struct *mm) | 168 | static inline void destroy_context(struct mm_struct *mm) |
164 | { | 169 | { |
170 | dsemul_mm_cleanup(mm); | ||
165 | } | 171 | } |
166 | 172 | ||
167 | #define deactivate_mm(tsk, mm) do { } while (0) | 173 | #define deactivate_mm(tsk, mm) do { } while (0) |
diff --git a/arch/mips/include/asm/msa.h b/arch/mips/include/asm/msa.h index ddf496cb2a2a..8967b475ab10 100644 --- a/arch/mips/include/asm/msa.h +++ b/arch/mips/include/asm/msa.h | |||
@@ -168,6 +168,7 @@ static inline unsigned int read_msa_##name(void) \ | |||
168 | unsigned int reg; \ | 168 | unsigned int reg; \ |
169 | __asm__ __volatile__( \ | 169 | __asm__ __volatile__( \ |
170 | " .set push\n" \ | 170 | " .set push\n" \ |
171 | " .set fp=64\n" \ | ||
171 | " .set msa\n" \ | 172 | " .set msa\n" \ |
172 | " cfcmsa %0, $" #cs "\n" \ | 173 | " cfcmsa %0, $" #cs "\n" \ |
173 | " .set pop\n" \ | 174 | " .set pop\n" \ |
@@ -179,6 +180,7 @@ static inline void write_msa_##name(unsigned int val) \ | |||
179 | { \ | 180 | { \ |
180 | __asm__ __volatile__( \ | 181 | __asm__ __volatile__( \ |
181 | " .set push\n" \ | 182 | " .set push\n" \ |
183 | " .set fp=64\n" \ | ||
182 | " .set msa\n" \ | 184 | " .set msa\n" \ |
183 | " ctcmsa $" #cs ", %0\n" \ | 185 | " ctcmsa $" #cs ", %0\n" \ |
184 | " .set pop\n" \ | 186 | " .set pop\n" \ |
diff --git a/arch/mips/include/asm/page.h b/arch/mips/include/asm/page.h index 21ed7150fec3..ea0cd9773914 100644 --- a/arch/mips/include/asm/page.h +++ b/arch/mips/include/asm/page.h | |||
@@ -162,16 +162,34 @@ typedef struct { unsigned long pgprot; } pgprot_t; | |||
162 | /* | 162 | /* |
163 | * __pa()/__va() should be used only during mem init. | 163 | * __pa()/__va() should be used only during mem init. |
164 | */ | 164 | */ |
165 | #ifdef CONFIG_64BIT | 165 | static inline unsigned long ___pa(unsigned long x) |
166 | #define __pa(x) \ | 166 | { |
167 | ({ \ | 167 | if (config_enabled(CONFIG_64BIT)) { |
168 | unsigned long __x = (unsigned long)(x); \ | 168 | /* |
169 | __x < CKSEG0 ? XPHYSADDR(__x) : CPHYSADDR(__x); \ | 169 | * For MIPS64 the virtual address may either be in one of |
170 | }) | 170 | * the compatibility segements ckseg0 or ckseg1, or it may |
171 | #else | 171 | * be in xkphys. |
172 | #define __pa(x) \ | 172 | */ |
173 | ((unsigned long)(x) - PAGE_OFFSET + PHYS_OFFSET) | 173 | return x < CKSEG0 ? XPHYSADDR(x) : CPHYSADDR(x); |
174 | #endif | 174 | } |
175 | |||
176 | if (!config_enabled(CONFIG_EVA)) { | ||
177 | /* | ||
178 | * We're using the standard MIPS32 legacy memory map, ie. | ||
179 | * the address x is going to be in kseg0 or kseg1. We can | ||
180 | * handle either case by masking out the desired bits using | ||
181 | * CPHYSADDR. | ||
182 | */ | ||
183 | return CPHYSADDR(x); | ||
184 | } | ||
185 | |||
186 | /* | ||
187 | * EVA is in use so the memory map could be anything, making it not | ||
188 | * safe to just mask out bits. | ||
189 | */ | ||
190 | return x - PAGE_OFFSET + PHYS_OFFSET; | ||
191 | } | ||
192 | #define __pa(x) ___pa((unsigned long)(x)) | ||
175 | #define __va(x) ((void *)((unsigned long)(x) + PAGE_OFFSET - PHYS_OFFSET)) | 193 | #define __va(x) ((void *)((unsigned long)(x) + PAGE_OFFSET - PHYS_OFFSET)) |
176 | #include <asm/io.h> | 194 | #include <asm/io.h> |
177 | 195 | ||
@@ -229,8 +247,10 @@ extern int __virt_addr_valid(const volatile void *kaddr); | |||
229 | #define virt_addr_valid(kaddr) \ | 247 | #define virt_addr_valid(kaddr) \ |
230 | __virt_addr_valid((const volatile void *) (kaddr)) | 248 | __virt_addr_valid((const volatile void *) (kaddr)) |
231 | 249 | ||
232 | #define VM_DATA_DEFAULT_FLAGS (VM_READ | VM_WRITE | VM_EXEC | \ | 250 | #define VM_DATA_DEFAULT_FLAGS \ |
233 | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC) | 251 | (VM_READ | VM_WRITE | \ |
252 | ((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0) | \ | ||
253 | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC) | ||
234 | 254 | ||
235 | #define UNCAC_ADDR(addr) ((addr) - PAGE_OFFSET + UNCAC_BASE) | 255 | #define UNCAC_ADDR(addr) ((addr) - PAGE_OFFSET + UNCAC_BASE) |
236 | #define CAC_ADDR(addr) ((addr) - UNCAC_BASE + PAGE_OFFSET) | 256 | #define CAC_ADDR(addr) ((addr) - UNCAC_BASE + PAGE_OFFSET) |
diff --git a/arch/mips/include/asm/processor.h b/arch/mips/include/asm/processor.h index 7e78b6208d7d..0d36c87acbe2 100644 --- a/arch/mips/include/asm/processor.h +++ b/arch/mips/include/asm/processor.h | |||
@@ -11,12 +11,14 @@ | |||
11 | #ifndef _ASM_PROCESSOR_H | 11 | #ifndef _ASM_PROCESSOR_H |
12 | #define _ASM_PROCESSOR_H | 12 | #define _ASM_PROCESSOR_H |
13 | 13 | ||
14 | #include <linux/atomic.h> | ||
14 | #include <linux/cpumask.h> | 15 | #include <linux/cpumask.h> |
15 | #include <linux/threads.h> | 16 | #include <linux/threads.h> |
16 | 17 | ||
17 | #include <asm/cachectl.h> | 18 | #include <asm/cachectl.h> |
18 | #include <asm/cpu.h> | 19 | #include <asm/cpu.h> |
19 | #include <asm/cpu-info.h> | 20 | #include <asm/cpu-info.h> |
21 | #include <asm/dsemul.h> | ||
20 | #include <asm/mipsregs.h> | 22 | #include <asm/mipsregs.h> |
21 | #include <asm/prefetch.h> | 23 | #include <asm/prefetch.h> |
22 | 24 | ||
@@ -78,7 +80,11 @@ extern unsigned int vced_count, vcei_count; | |||
78 | 80 | ||
79 | #endif | 81 | #endif |
80 | 82 | ||
81 | #define STACK_TOP (TASK_SIZE & PAGE_MASK) | 83 | /* |
84 | * One page above the stack is used for branch delay slot "emulation". | ||
85 | * See dsemul.c for details. | ||
86 | */ | ||
87 | #define STACK_TOP ((TASK_SIZE & PAGE_MASK) - PAGE_SIZE) | ||
82 | 88 | ||
83 | /* | 89 | /* |
84 | * This decides where the kernel will search for a free chunk of vm | 90 | * This decides where the kernel will search for a free chunk of vm |
@@ -256,6 +262,12 @@ struct thread_struct { | |||
256 | 262 | ||
257 | /* Saved fpu/fpu emulator stuff. */ | 263 | /* Saved fpu/fpu emulator stuff. */ |
258 | struct mips_fpu_struct fpu FPU_ALIGN; | 264 | struct mips_fpu_struct fpu FPU_ALIGN; |
265 | /* Assigned branch delay slot 'emulation' frame */ | ||
266 | atomic_t bd_emu_frame; | ||
267 | /* PC of the branch from a branch delay slot 'emulation' */ | ||
268 | unsigned long bd_emu_branch_pc; | ||
269 | /* PC to continue from following a branch delay slot 'emulation' */ | ||
270 | unsigned long bd_emu_cont_pc; | ||
259 | #ifdef CONFIG_MIPS_MT_FPAFF | 271 | #ifdef CONFIG_MIPS_MT_FPAFF |
260 | /* Emulated instruction count */ | 272 | /* Emulated instruction count */ |
261 | unsigned long emulated_fp; | 273 | unsigned long emulated_fp; |
@@ -323,6 +335,10 @@ struct thread_struct { | |||
323 | * FPU affinity state (null if not FPAFF) \ | 335 | * FPU affinity state (null if not FPAFF) \ |
324 | */ \ | 336 | */ \ |
325 | FPAFF_INIT \ | 337 | FPAFF_INIT \ |
338 | /* Delay slot emulation */ \ | ||
339 | .bd_emu_frame = ATOMIC_INIT(BD_EMUFRAME_NONE), \ | ||
340 | .bd_emu_branch_pc = 0, \ | ||
341 | .bd_emu_cont_pc = 0, \ | ||
326 | /* \ | 342 | /* \ |
327 | * Saved DSP stuff \ | 343 | * Saved DSP stuff \ |
328 | */ \ | 344 | */ \ |
diff --git a/arch/mips/include/asm/r4kcache.h b/arch/mips/include/asm/r4kcache.h index 38902bf97adc..667ca3c467b7 100644 --- a/arch/mips/include/asm/r4kcache.h +++ b/arch/mips/include/asm/r4kcache.h | |||
@@ -210,7 +210,11 @@ static inline void protected_writeback_dcache_line(unsigned long addr) | |||
210 | 210 | ||
211 | static inline void protected_writeback_scache_line(unsigned long addr) | 211 | static inline void protected_writeback_scache_line(unsigned long addr) |
212 | { | 212 | { |
213 | #ifdef CONFIG_EVA | ||
214 | protected_cachee_op(Hit_Writeback_Inv_SD, addr); | ||
215 | #else | ||
213 | protected_cache_op(Hit_Writeback_Inv_SD, addr); | 216 | protected_cache_op(Hit_Writeback_Inv_SD, addr); |
217 | #endif | ||
214 | } | 218 | } |
215 | 219 | ||
216 | /* | 220 | /* |
diff --git a/arch/mips/include/asm/signal.h b/arch/mips/include/asm/signal.h index 82eae1583bcf..23d6b8015c79 100644 --- a/arch/mips/include/asm/signal.h +++ b/arch/mips/include/asm/signal.h | |||
@@ -11,7 +11,7 @@ | |||
11 | 11 | ||
12 | #include <uapi/asm/signal.h> | 12 | #include <uapi/asm/signal.h> |
13 | 13 | ||
14 | #ifdef CONFIG_MIPS32_COMPAT | 14 | #ifdef CONFIG_MIPS32_O32 |
15 | extern struct mips_abi mips_abi_32; | 15 | extern struct mips_abi mips_abi_32; |
16 | 16 | ||
17 | #define sig_uses_siginfo(ka, abi) \ | 17 | #define sig_uses_siginfo(ka, abi) \ |
diff --git a/arch/mips/include/asm/smp.h b/arch/mips/include/asm/smp.h index 03722d4326a1..8bc6c70a4030 100644 --- a/arch/mips/include/asm/smp.h +++ b/arch/mips/include/asm/smp.h | |||
@@ -23,7 +23,7 @@ | |||
23 | extern int smp_num_siblings; | 23 | extern int smp_num_siblings; |
24 | extern cpumask_t cpu_sibling_map[]; | 24 | extern cpumask_t cpu_sibling_map[]; |
25 | extern cpumask_t cpu_core_map[]; | 25 | extern cpumask_t cpu_core_map[]; |
26 | extern cpumask_t cpu_foreign_map; | 26 | extern cpumask_t cpu_foreign_map[]; |
27 | 27 | ||
28 | #define raw_smp_processor_id() (current_thread_info()->cpu) | 28 | #define raw_smp_processor_id() (current_thread_info()->cpu) |
29 | 29 | ||
@@ -53,6 +53,8 @@ extern cpumask_t cpu_coherent_mask; | |||
53 | 53 | ||
54 | extern void asmlinkage smp_bootstrap(void); | 54 | extern void asmlinkage smp_bootstrap(void); |
55 | 55 | ||
56 | extern void calculate_cpu_foreign_map(void); | ||
57 | |||
56 | /* | 58 | /* |
57 | * this function sends a 'reschedule' IPI to another CPU. | 59 | * this function sends a 'reschedule' IPI to another CPU. |
58 | * it goes straight through and wastes no time serializing | 60 | * it goes straight through and wastes no time serializing |
diff --git a/arch/mips/include/uapi/asm/auxvec.h b/arch/mips/include/uapi/asm/auxvec.h index c9c7195272c4..45ba259a3618 100644 --- a/arch/mips/include/uapi/asm/auxvec.h +++ b/arch/mips/include/uapi/asm/auxvec.h | |||
@@ -14,4 +14,6 @@ | |||
14 | /* Location of VDSO image. */ | 14 | /* Location of VDSO image. */ |
15 | #define AT_SYSINFO_EHDR 33 | 15 | #define AT_SYSINFO_EHDR 33 |
16 | 16 | ||
17 | #define AT_VECTOR_SIZE_ARCH 1 /* entries in ARCH_DLINFO */ | ||
18 | |||
17 | #endif /* __ASM_AUXVEC_H */ | 19 | #endif /* __ASM_AUXVEC_H */ |
diff --git a/arch/mips/kernel/Makefile b/arch/mips/kernel/Makefile index e6053d07072f..4a603a3ea657 100644 --- a/arch/mips/kernel/Makefile +++ b/arch/mips/kernel/Makefile | |||
@@ -71,7 +71,7 @@ obj-$(CONFIG_32BIT) += scall32-o32.o | |||
71 | obj-$(CONFIG_64BIT) += scall64-64.o | 71 | obj-$(CONFIG_64BIT) += scall64-64.o |
72 | obj-$(CONFIG_MIPS32_COMPAT) += linux32.o ptrace32.o signal32.o | 72 | obj-$(CONFIG_MIPS32_COMPAT) += linux32.o ptrace32.o signal32.o |
73 | obj-$(CONFIG_MIPS32_N32) += binfmt_elfn32.o scall64-n32.o signal_n32.o | 73 | obj-$(CONFIG_MIPS32_N32) += binfmt_elfn32.o scall64-n32.o signal_n32.o |
74 | obj-$(CONFIG_MIPS32_O32) += binfmt_elfo32.o scall64-o32.o | 74 | obj-$(CONFIG_MIPS32_O32) += binfmt_elfo32.o scall64-o32.o signal_o32.o |
75 | 75 | ||
76 | obj-$(CONFIG_KGDB) += kgdb.o | 76 | obj-$(CONFIG_KGDB) += kgdb.o |
77 | obj-$(CONFIG_PROC_FS) += proc.o | 77 | obj-$(CONFIG_PROC_FS) += proc.o |
diff --git a/arch/mips/kernel/cevt-r4k.c b/arch/mips/kernel/cevt-r4k.c index e4c21bbf9422..804d2a2a19fe 100644 --- a/arch/mips/kernel/cevt-r4k.c +++ b/arch/mips/kernel/cevt-r4k.c | |||
@@ -276,12 +276,7 @@ int r4k_clockevent_init(void) | |||
276 | CLOCK_EVT_FEAT_C3STOP | | 276 | CLOCK_EVT_FEAT_C3STOP | |
277 | CLOCK_EVT_FEAT_PERCPU; | 277 | CLOCK_EVT_FEAT_PERCPU; |
278 | 278 | ||
279 | clockevent_set_clock(cd, mips_hpt_frequency); | ||
280 | |||
281 | /* Calculate the min / max delta */ | ||
282 | cd->max_delta_ns = clockevent_delta2ns(0x7fffffff, cd); | ||
283 | min_delta = calculate_min_delta(); | 279 | min_delta = calculate_min_delta(); |
284 | cd->min_delta_ns = clockevent_delta2ns(min_delta, cd); | ||
285 | 280 | ||
286 | cd->rating = 300; | 281 | cd->rating = 300; |
287 | cd->irq = irq; | 282 | cd->irq = irq; |
@@ -289,7 +284,7 @@ int r4k_clockevent_init(void) | |||
289 | cd->set_next_event = mips_next_event; | 284 | cd->set_next_event = mips_next_event; |
290 | cd->event_handler = mips_event_handler; | 285 | cd->event_handler = mips_event_handler; |
291 | 286 | ||
292 | clockevents_register_device(cd); | 287 | clockevents_config_and_register(cd, mips_hpt_frequency, min_delta, 0x7fffffff); |
293 | 288 | ||
294 | if (cp0_timer_irq_installed) | 289 | if (cp0_timer_irq_installed) |
295 | return 0; | 290 | return 0; |
diff --git a/arch/mips/kernel/csrc-r4k.c b/arch/mips/kernel/csrc-r4k.c index 1f910563fdf6..d76275da54cb 100644 --- a/arch/mips/kernel/csrc-r4k.c +++ b/arch/mips/kernel/csrc-r4k.c | |||
@@ -23,7 +23,7 @@ static struct clocksource clocksource_mips = { | |||
23 | .flags = CLOCK_SOURCE_IS_CONTINUOUS, | 23 | .flags = CLOCK_SOURCE_IS_CONTINUOUS, |
24 | }; | 24 | }; |
25 | 25 | ||
26 | static u64 notrace r4k_read_sched_clock(void) | 26 | static u64 __maybe_unused notrace r4k_read_sched_clock(void) |
27 | { | 27 | { |
28 | return read_c0_count(); | 28 | return read_c0_count(); |
29 | } | 29 | } |
@@ -82,7 +82,9 @@ int __init init_r4k_clocksource(void) | |||
82 | 82 | ||
83 | clocksource_register_hz(&clocksource_mips, mips_hpt_frequency); | 83 | clocksource_register_hz(&clocksource_mips, mips_hpt_frequency); |
84 | 84 | ||
85 | #ifndef CONFIG_CPU_FREQ | ||
85 | sched_clock_register(r4k_read_sched_clock, 32, mips_hpt_frequency); | 86 | sched_clock_register(r4k_read_sched_clock, 32, mips_hpt_frequency); |
87 | #endif | ||
86 | 88 | ||
87 | return 0; | 89 | return 0; |
88 | } | 90 | } |
diff --git a/arch/mips/kernel/elf.c b/arch/mips/kernel/elf.c index e6eb7f1f7723..6430bff21fff 100644 --- a/arch/mips/kernel/elf.c +++ b/arch/mips/kernel/elf.c | |||
@@ -8,9 +8,12 @@ | |||
8 | * option) any later version. | 8 | * option) any later version. |
9 | */ | 9 | */ |
10 | 10 | ||
11 | #include <linux/binfmts.h> | ||
11 | #include <linux/elf.h> | 12 | #include <linux/elf.h> |
13 | #include <linux/export.h> | ||
12 | #include <linux/sched.h> | 14 | #include <linux/sched.h> |
13 | 15 | ||
16 | #include <asm/cpu-features.h> | ||
14 | #include <asm/cpu-info.h> | 17 | #include <asm/cpu-info.h> |
15 | 18 | ||
16 | /* Whether to accept legacy-NaN and 2008-NaN user binaries. */ | 19 | /* Whether to accept legacy-NaN and 2008-NaN user binaries. */ |
@@ -326,3 +329,19 @@ void mips_set_personality_nan(struct arch_elf_state *state) | |||
326 | BUG(); | 329 | BUG(); |
327 | } | 330 | } |
328 | } | 331 | } |
332 | |||
333 | int mips_elf_read_implies_exec(void *elf_ex, int exstack) | ||
334 | { | ||
335 | if (exstack != EXSTACK_DISABLE_X) { | ||
336 | /* The binary doesn't request a non-executable stack */ | ||
337 | return 1; | ||
338 | } | ||
339 | |||
340 | if (!cpu_has_rixi) { | ||
341 | /* The CPU doesn't support non-executable memory */ | ||
342 | return 1; | ||
343 | } | ||
344 | |||
345 | return 0; | ||
346 | } | ||
347 | EXPORT_SYMBOL(mips_elf_read_implies_exec); | ||
diff --git a/arch/mips/kernel/head.S b/arch/mips/kernel/head.S index 56e8fede3fd8..cf052204eb0a 100644 --- a/arch/mips/kernel/head.S +++ b/arch/mips/kernel/head.S | |||
@@ -93,21 +93,24 @@ NESTED(kernel_entry, 16, sp) # kernel entry point | |||
93 | jr t0 | 93 | jr t0 |
94 | 0: | 94 | 0: |
95 | 95 | ||
96 | #ifdef CONFIG_USE_OF | ||
96 | #ifdef CONFIG_MIPS_RAW_APPENDED_DTB | 97 | #ifdef CONFIG_MIPS_RAW_APPENDED_DTB |
97 | PTR_LA t0, __appended_dtb | 98 | PTR_LA t2, __appended_dtb |
98 | 99 | ||
99 | #ifdef CONFIG_CPU_BIG_ENDIAN | 100 | #ifdef CONFIG_CPU_BIG_ENDIAN |
100 | li t1, 0xd00dfeed | 101 | li t1, 0xd00dfeed |
101 | #else | 102 | #else |
102 | li t1, 0xedfe0dd0 | 103 | li t1, 0xedfe0dd0 |
103 | #endif | 104 | #endif |
104 | lw t2, (t0) | 105 | lw t0, (t2) |
105 | bne t1, t2, not_found | 106 | beq t0, t1, dtb_found |
106 | nop | 107 | #endif |
108 | li t1, -2 | ||
109 | beq a0, t1, dtb_found | ||
110 | move t2, a1 | ||
107 | 111 | ||
108 | move a1, t0 | 112 | li t2, 0 |
109 | PTR_LI a0, -2 | 113 | dtb_found: |
110 | not_found: | ||
111 | #endif | 114 | #endif |
112 | PTR_LA t0, __bss_start # clear .bss | 115 | PTR_LA t0, __bss_start # clear .bss |
113 | LONG_S zero, (t0) | 116 | LONG_S zero, (t0) |
@@ -122,6 +125,10 @@ not_found: | |||
122 | LONG_S a2, fw_arg2 | 125 | LONG_S a2, fw_arg2 |
123 | LONG_S a3, fw_arg3 | 126 | LONG_S a3, fw_arg3 |
124 | 127 | ||
128 | #ifdef CONFIG_USE_OF | ||
129 | LONG_S t2, fw_passed_dtb | ||
130 | #endif | ||
131 | |||
125 | MTC0 zero, CP0_CONTEXT # clear context register | 132 | MTC0 zero, CP0_CONTEXT # clear context register |
126 | PTR_LA $28, init_thread_union | 133 | PTR_LA $28, init_thread_union |
127 | /* Set the SP after an empty pt_regs. */ | 134 | /* Set the SP after an empty pt_regs. */ |
diff --git a/arch/mips/kernel/mips-r2-to-r6-emul.c b/arch/mips/kernel/mips-r2-to-r6-emul.c index 43fbadc78d0a..c3372cac6db2 100644 --- a/arch/mips/kernel/mips-r2-to-r6-emul.c +++ b/arch/mips/kernel/mips-r2-to-r6-emul.c | |||
@@ -283,7 +283,7 @@ static int jr_func(struct pt_regs *regs, u32 ir) | |||
283 | err = mipsr6_emul(regs, nir); | 283 | err = mipsr6_emul(regs, nir); |
284 | if (err > 0) { | 284 | if (err > 0) { |
285 | regs->cp0_epc = nepc; | 285 | regs->cp0_epc = nepc; |
286 | err = mips_dsemul(regs, nir, cepc); | 286 | err = mips_dsemul(regs, nir, epc, cepc); |
287 | if (err == SIGILL) | 287 | if (err == SIGILL) |
288 | err = SIGEMT; | 288 | err = SIGEMT; |
289 | MIPS_R2_STATS(dsemul); | 289 | MIPS_R2_STATS(dsemul); |
@@ -1033,7 +1033,7 @@ repeat: | |||
1033 | if (nir) { | 1033 | if (nir) { |
1034 | err = mipsr6_emul(regs, nir); | 1034 | err = mipsr6_emul(regs, nir); |
1035 | if (err > 0) { | 1035 | if (err > 0) { |
1036 | err = mips_dsemul(regs, nir, cpc); | 1036 | err = mips_dsemul(regs, nir, epc, cpc); |
1037 | if (err == SIGILL) | 1037 | if (err == SIGILL) |
1038 | err = SIGEMT; | 1038 | err = SIGEMT; |
1039 | MIPS_R2_STATS(dsemul); | 1039 | MIPS_R2_STATS(dsemul); |
@@ -1082,7 +1082,7 @@ repeat: | |||
1082 | if (nir) { | 1082 | if (nir) { |
1083 | err = mipsr6_emul(regs, nir); | 1083 | err = mipsr6_emul(regs, nir); |
1084 | if (err > 0) { | 1084 | if (err > 0) { |
1085 | err = mips_dsemul(regs, nir, cpc); | 1085 | err = mips_dsemul(regs, nir, epc, cpc); |
1086 | if (err == SIGILL) | 1086 | if (err == SIGILL) |
1087 | err = SIGEMT; | 1087 | err = SIGEMT; |
1088 | MIPS_R2_STATS(dsemul); | 1088 | MIPS_R2_STATS(dsemul); |
@@ -1149,7 +1149,7 @@ repeat: | |||
1149 | if (nir) { | 1149 | if (nir) { |
1150 | err = mipsr6_emul(regs, nir); | 1150 | err = mipsr6_emul(regs, nir); |
1151 | if (err > 0) { | 1151 | if (err > 0) { |
1152 | err = mips_dsemul(regs, nir, cpc); | 1152 | err = mips_dsemul(regs, nir, epc, cpc); |
1153 | if (err == SIGILL) | 1153 | if (err == SIGILL) |
1154 | err = SIGEMT; | 1154 | err = SIGEMT; |
1155 | MIPS_R2_STATS(dsemul); | 1155 | MIPS_R2_STATS(dsemul); |
diff --git a/arch/mips/kernel/process.c b/arch/mips/kernel/process.c index 813ed7829c61..7429ad09fbe3 100644 --- a/arch/mips/kernel/process.c +++ b/arch/mips/kernel/process.c | |||
@@ -30,6 +30,7 @@ | |||
30 | #include <asm/asm.h> | 30 | #include <asm/asm.h> |
31 | #include <asm/bootinfo.h> | 31 | #include <asm/bootinfo.h> |
32 | #include <asm/cpu.h> | 32 | #include <asm/cpu.h> |
33 | #include <asm/dsemul.h> | ||
33 | #include <asm/dsp.h> | 34 | #include <asm/dsp.h> |
34 | #include <asm/fpu.h> | 35 | #include <asm/fpu.h> |
35 | #include <asm/msa.h> | 36 | #include <asm/msa.h> |
@@ -68,11 +69,22 @@ void start_thread(struct pt_regs * regs, unsigned long pc, unsigned long sp) | |||
68 | lose_fpu(0); | 69 | lose_fpu(0); |
69 | clear_thread_flag(TIF_MSA_CTX_LIVE); | 70 | clear_thread_flag(TIF_MSA_CTX_LIVE); |
70 | clear_used_math(); | 71 | clear_used_math(); |
72 | atomic_set(¤t->thread.bd_emu_frame, BD_EMUFRAME_NONE); | ||
71 | init_dsp(); | 73 | init_dsp(); |
72 | regs->cp0_epc = pc; | 74 | regs->cp0_epc = pc; |
73 | regs->regs[29] = sp; | 75 | regs->regs[29] = sp; |
74 | } | 76 | } |
75 | 77 | ||
78 | void exit_thread(struct task_struct *tsk) | ||
79 | { | ||
80 | /* | ||
81 | * User threads may have allocated a delay slot emulation frame. | ||
82 | * If so, clean up that allocation. | ||
83 | */ | ||
84 | if (!(current->flags & PF_KTHREAD)) | ||
85 | dsemul_thread_cleanup(tsk); | ||
86 | } | ||
87 | |||
76 | int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src) | 88 | int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src) |
77 | { | 89 | { |
78 | /* | 90 | /* |
@@ -159,6 +171,8 @@ int copy_thread(unsigned long clone_flags, unsigned long usp, | |||
159 | clear_tsk_thread_flag(p, TIF_FPUBOUND); | 171 | clear_tsk_thread_flag(p, TIF_FPUBOUND); |
160 | #endif /* CONFIG_MIPS_MT_FPAFF */ | 172 | #endif /* CONFIG_MIPS_MT_FPAFF */ |
161 | 173 | ||
174 | atomic_set(&p->thread.bd_emu_frame, BD_EMUFRAME_NONE); | ||
175 | |||
162 | if (clone_flags & CLONE_SETTLS) | 176 | if (clone_flags & CLONE_SETTLS) |
163 | ti->tp_value = regs->regs[7]; | 177 | ti->tp_value = regs->regs[7]; |
164 | 178 | ||
diff --git a/arch/mips/kernel/scall64-n32.S b/arch/mips/kernel/scall64-n32.S index 9c0b387d6427..51d3988933f8 100644 --- a/arch/mips/kernel/scall64-n32.S +++ b/arch/mips/kernel/scall64-n32.S | |||
@@ -348,7 +348,7 @@ EXPORT(sysn32_call_table) | |||
348 | PTR sys_ni_syscall /* available, was setaltroot */ | 348 | PTR sys_ni_syscall /* available, was setaltroot */ |
349 | PTR sys_add_key | 349 | PTR sys_add_key |
350 | PTR sys_request_key | 350 | PTR sys_request_key |
351 | PTR sys_keyctl /* 6245 */ | 351 | PTR compat_sys_keyctl /* 6245 */ |
352 | PTR sys_set_thread_area | 352 | PTR sys_set_thread_area |
353 | PTR sys_inotify_init | 353 | PTR sys_inotify_init |
354 | PTR sys_inotify_add_watch | 354 | PTR sys_inotify_add_watch |
diff --git a/arch/mips/kernel/scall64-o32.S b/arch/mips/kernel/scall64-o32.S index f4f28b1580de..6efa7136748f 100644 --- a/arch/mips/kernel/scall64-o32.S +++ b/arch/mips/kernel/scall64-o32.S | |||
@@ -504,7 +504,7 @@ EXPORT(sys32_call_table) | |||
504 | PTR sys_ni_syscall /* available, was setaltroot */ | 504 | PTR sys_ni_syscall /* available, was setaltroot */ |
505 | PTR sys_add_key /* 4280 */ | 505 | PTR sys_add_key /* 4280 */ |
506 | PTR sys_request_key | 506 | PTR sys_request_key |
507 | PTR sys_keyctl | 507 | PTR compat_sys_keyctl |
508 | PTR sys_set_thread_area | 508 | PTR sys_set_thread_area |
509 | PTR sys_inotify_init | 509 | PTR sys_inotify_init |
510 | PTR sys_inotify_add_watch /* 4285 */ | 510 | PTR sys_inotify_add_watch /* 4285 */ |
diff --git a/arch/mips/kernel/segment.c b/arch/mips/kernel/segment.c index 87bc74a5a518..2703f218202e 100644 --- a/arch/mips/kernel/segment.c +++ b/arch/mips/kernel/segment.c | |||
@@ -26,17 +26,20 @@ static void build_segment_config(char *str, unsigned int cfg) | |||
26 | 26 | ||
27 | /* | 27 | /* |
28 | * Access modes MK, MSK and MUSK are mapped segments. Therefore | 28 | * Access modes MK, MSK and MUSK are mapped segments. Therefore |
29 | * there is no direct physical address mapping. | 29 | * there is no direct physical address mapping unless it becomes |
30 | * unmapped uncached at error level due to EU. | ||
30 | */ | 31 | */ |
31 | if ((am == 0) || (am > 3)) { | 32 | if ((am == 0) || (am > 3) || (cfg & MIPS_SEGCFG_EU)) |
32 | str += sprintf(str, " %03lx", | 33 | str += sprintf(str, " %03lx", |
33 | ((cfg & MIPS_SEGCFG_PA) >> MIPS_SEGCFG_PA_SHIFT)); | 34 | ((cfg & MIPS_SEGCFG_PA) >> MIPS_SEGCFG_PA_SHIFT)); |
35 | else | ||
36 | str += sprintf(str, " UND"); | ||
37 | |||
38 | if ((am == 0) || (am > 3)) | ||
34 | str += sprintf(str, " %01ld", | 39 | str += sprintf(str, " %01ld", |
35 | ((cfg & MIPS_SEGCFG_C) >> MIPS_SEGCFG_C_SHIFT)); | 40 | ((cfg & MIPS_SEGCFG_C) >> MIPS_SEGCFG_C_SHIFT)); |
36 | } else { | 41 | else |
37 | str += sprintf(str, " UND"); | ||
38 | str += sprintf(str, " U"); | 42 | str += sprintf(str, " U"); |
39 | } | ||
40 | 43 | ||
41 | /* Exception configuration. */ | 44 | /* Exception configuration. */ |
42 | str += sprintf(str, " %01ld\n", | 45 | str += sprintf(str, " %01ld\n", |
diff --git a/arch/mips/kernel/setup.c b/arch/mips/kernel/setup.c index ef408a03e818..36cf8d65c47d 100644 --- a/arch/mips/kernel/setup.c +++ b/arch/mips/kernel/setup.c | |||
@@ -875,6 +875,10 @@ void __init setup_arch(char **cmdline_p) | |||
875 | unsigned long kernelsp[NR_CPUS]; | 875 | unsigned long kernelsp[NR_CPUS]; |
876 | unsigned long fw_arg0, fw_arg1, fw_arg2, fw_arg3; | 876 | unsigned long fw_arg0, fw_arg1, fw_arg2, fw_arg3; |
877 | 877 | ||
878 | #ifdef CONFIG_USE_OF | ||
879 | unsigned long fw_passed_dtb; | ||
880 | #endif | ||
881 | |||
878 | #ifdef CONFIG_DEBUG_FS | 882 | #ifdef CONFIG_DEBUG_FS |
879 | struct dentry *mips_debugfs_dir; | 883 | struct dentry *mips_debugfs_dir; |
880 | static int __init debugfs_mips(void) | 884 | static int __init debugfs_mips(void) |
diff --git a/arch/mips/kernel/signal.c b/arch/mips/kernel/signal.c index 1975cd2f7de6..9e224469c788 100644 --- a/arch/mips/kernel/signal.c +++ b/arch/mips/kernel/signal.c | |||
@@ -772,6 +772,14 @@ static void handle_signal(struct ksignal *ksig, struct pt_regs *regs) | |||
772 | struct mips_abi *abi = current->thread.abi; | 772 | struct mips_abi *abi = current->thread.abi; |
773 | void *vdso = current->mm->context.vdso; | 773 | void *vdso = current->mm->context.vdso; |
774 | 774 | ||
775 | /* | ||
776 | * If we were emulating a delay slot instruction, exit that frame such | ||
777 | * that addresses in the sigframe are as expected for userland and we | ||
778 | * don't have a problem if we reuse the thread's frame for an | ||
779 | * instruction within the signal handler. | ||
780 | */ | ||
781 | dsemul_thread_rollback(regs); | ||
782 | |||
775 | if (regs->regs[0]) { | 783 | if (regs->regs[0]) { |
776 | switch(regs->regs[2]) { | 784 | switch(regs->regs[2]) { |
777 | case ERESTART_RESTARTBLOCK: | 785 | case ERESTART_RESTARTBLOCK: |
diff --git a/arch/mips/kernel/signal32.c b/arch/mips/kernel/signal32.c index 78c8349d151c..97b7c51b8251 100644 --- a/arch/mips/kernel/signal32.c +++ b/arch/mips/kernel/signal32.c | |||
@@ -6,129 +6,26 @@ | |||
6 | * Copyright (C) 1991, 1992 Linus Torvalds | 6 | * Copyright (C) 1991, 1992 Linus Torvalds |
7 | * Copyright (C) 1994 - 2000, 2006 Ralf Baechle | 7 | * Copyright (C) 1994 - 2000, 2006 Ralf Baechle |
8 | * Copyright (C) 1999, 2000 Silicon Graphics, Inc. | 8 | * Copyright (C) 1999, 2000 Silicon Graphics, Inc. |
9 | * Copyright (C) 2016, Imagination Technologies Ltd. | ||
9 | */ | 10 | */ |
10 | #include <linux/cache.h> | 11 | #include <linux/compiler.h> |
11 | #include <linux/compat.h> | 12 | #include <linux/errno.h> |
12 | #include <linux/sched.h> | ||
13 | #include <linux/mm.h> | ||
14 | #include <linux/smp.h> | ||
15 | #include <linux/kernel.h> | 13 | #include <linux/kernel.h> |
16 | #include <linux/signal.h> | 14 | #include <linux/signal.h> |
17 | #include <linux/syscalls.h> | 15 | #include <linux/syscalls.h> |
18 | #include <linux/errno.h> | ||
19 | #include <linux/wait.h> | ||
20 | #include <linux/ptrace.h> | ||
21 | #include <linux/suspend.h> | ||
22 | #include <linux/compiler.h> | ||
23 | #include <linux/uaccess.h> | ||
24 | 16 | ||
25 | #include <asm/abi.h> | 17 | #include <asm/compat.h> |
26 | #include <asm/asm.h> | ||
27 | #include <asm/compat-signal.h> | 18 | #include <asm/compat-signal.h> |
28 | #include <linux/bitops.h> | 19 | #include <asm/uaccess.h> |
29 | #include <asm/cacheflush.h> | 20 | #include <asm/unistd.h> |
30 | #include <asm/sim.h> | ||
31 | #include <asm/ucontext.h> | ||
32 | #include <asm/fpu.h> | ||
33 | #include <asm/war.h> | ||
34 | #include <asm/dsp.h> | ||
35 | 21 | ||
36 | #include "signal-common.h" | 22 | #include "signal-common.h" |
37 | 23 | ||
38 | /* | ||
39 | * Including <asm/unistd.h> would give use the 64-bit syscall numbers ... | ||
40 | */ | ||
41 | #define __NR_O32_restart_syscall 4253 | ||
42 | |||
43 | /* 32-bit compatibility types */ | 24 | /* 32-bit compatibility types */ |
44 | 25 | ||
45 | typedef unsigned int __sighandler32_t; | 26 | typedef unsigned int __sighandler32_t; |
46 | typedef void (*vfptr_t)(void); | 27 | typedef void (*vfptr_t)(void); |
47 | 28 | ||
48 | struct ucontext32 { | ||
49 | u32 uc_flags; | ||
50 | s32 uc_link; | ||
51 | compat_stack_t uc_stack; | ||
52 | struct sigcontext32 uc_mcontext; | ||
53 | compat_sigset_t uc_sigmask; /* mask last for extensibility */ | ||
54 | }; | ||
55 | |||
56 | struct sigframe32 { | ||
57 | u32 sf_ass[4]; /* argument save space for o32 */ | ||
58 | u32 sf_pad[2]; /* Was: signal trampoline */ | ||
59 | struct sigcontext32 sf_sc; | ||
60 | compat_sigset_t sf_mask; | ||
61 | }; | ||
62 | |||
63 | struct rt_sigframe32 { | ||
64 | u32 rs_ass[4]; /* argument save space for o32 */ | ||
65 | u32 rs_pad[2]; /* Was: signal trampoline */ | ||
66 | compat_siginfo_t rs_info; | ||
67 | struct ucontext32 rs_uc; | ||
68 | }; | ||
69 | |||
70 | static int setup_sigcontext32(struct pt_regs *regs, | ||
71 | struct sigcontext32 __user *sc) | ||
72 | { | ||
73 | int err = 0; | ||
74 | int i; | ||
75 | |||
76 | err |= __put_user(regs->cp0_epc, &sc->sc_pc); | ||
77 | |||
78 | err |= __put_user(0, &sc->sc_regs[0]); | ||
79 | for (i = 1; i < 32; i++) | ||
80 | err |= __put_user(regs->regs[i], &sc->sc_regs[i]); | ||
81 | |||
82 | err |= __put_user(regs->hi, &sc->sc_mdhi); | ||
83 | err |= __put_user(regs->lo, &sc->sc_mdlo); | ||
84 | if (cpu_has_dsp) { | ||
85 | err |= __put_user(rddsp(DSP_MASK), &sc->sc_dsp); | ||
86 | err |= __put_user(mfhi1(), &sc->sc_hi1); | ||
87 | err |= __put_user(mflo1(), &sc->sc_lo1); | ||
88 | err |= __put_user(mfhi2(), &sc->sc_hi2); | ||
89 | err |= __put_user(mflo2(), &sc->sc_lo2); | ||
90 | err |= __put_user(mfhi3(), &sc->sc_hi3); | ||
91 | err |= __put_user(mflo3(), &sc->sc_lo3); | ||
92 | } | ||
93 | |||
94 | /* | ||
95 | * Save FPU state to signal context. Signal handler | ||
96 | * will "inherit" current FPU state. | ||
97 | */ | ||
98 | err |= protected_save_fp_context(sc); | ||
99 | |||
100 | return err; | ||
101 | } | ||
102 | |||
103 | static int restore_sigcontext32(struct pt_regs *regs, | ||
104 | struct sigcontext32 __user *sc) | ||
105 | { | ||
106 | int err = 0; | ||
107 | s32 treg; | ||
108 | int i; | ||
109 | |||
110 | /* Always make any pending restarted system calls return -EINTR */ | ||
111 | current->restart_block.fn = do_no_restart_syscall; | ||
112 | |||
113 | err |= __get_user(regs->cp0_epc, &sc->sc_pc); | ||
114 | err |= __get_user(regs->hi, &sc->sc_mdhi); | ||
115 | err |= __get_user(regs->lo, &sc->sc_mdlo); | ||
116 | if (cpu_has_dsp) { | ||
117 | err |= __get_user(treg, &sc->sc_hi1); mthi1(treg); | ||
118 | err |= __get_user(treg, &sc->sc_lo1); mtlo1(treg); | ||
119 | err |= __get_user(treg, &sc->sc_hi2); mthi2(treg); | ||
120 | err |= __get_user(treg, &sc->sc_lo2); mtlo2(treg); | ||
121 | err |= __get_user(treg, &sc->sc_hi3); mthi3(treg); | ||
122 | err |= __get_user(treg, &sc->sc_lo3); mtlo3(treg); | ||
123 | err |= __get_user(treg, &sc->sc_dsp); wrdsp(treg, DSP_MASK); | ||
124 | } | ||
125 | |||
126 | for (i = 1; i < 32; i++) | ||
127 | err |= __get_user(regs->regs[i], &sc->sc_regs[i]); | ||
128 | |||
129 | return err ?: protected_restore_fp_context(sc); | ||
130 | } | ||
131 | |||
132 | /* | 29 | /* |
133 | * Atomically swap in the new signal mask, and wait for a signal. | 30 | * Atomically swap in the new signal mask, and wait for a signal. |
134 | */ | 31 | */ |
@@ -247,176 +144,3 @@ int copy_siginfo_from_user32(siginfo_t *to, compat_siginfo_t __user *from) | |||
247 | 144 | ||
248 | return 0; | 145 | return 0; |
249 | } | 146 | } |
250 | |||
251 | asmlinkage void sys32_sigreturn(nabi_no_regargs struct pt_regs regs) | ||
252 | { | ||
253 | struct sigframe32 __user *frame; | ||
254 | sigset_t blocked; | ||
255 | int sig; | ||
256 | |||
257 | frame = (struct sigframe32 __user *) regs.regs[29]; | ||
258 | if (!access_ok(VERIFY_READ, frame, sizeof(*frame))) | ||
259 | goto badframe; | ||
260 | if (__copy_conv_sigset_from_user(&blocked, &frame->sf_mask)) | ||
261 | goto badframe; | ||
262 | |||
263 | set_current_blocked(&blocked); | ||
264 | |||
265 | sig = restore_sigcontext32(®s, &frame->sf_sc); | ||
266 | if (sig < 0) | ||
267 | goto badframe; | ||
268 | else if (sig) | ||
269 | force_sig(sig, current); | ||
270 | |||
271 | /* | ||
272 | * Don't let your children do this ... | ||
273 | */ | ||
274 | __asm__ __volatile__( | ||
275 | "move\t$29, %0\n\t" | ||
276 | "j\tsyscall_exit" | ||
277 | :/* no outputs */ | ||
278 | :"r" (®s)); | ||
279 | /* Unreached */ | ||
280 | |||
281 | badframe: | ||
282 | force_sig(SIGSEGV, current); | ||
283 | } | ||
284 | |||
285 | asmlinkage void sys32_rt_sigreturn(nabi_no_regargs struct pt_regs regs) | ||
286 | { | ||
287 | struct rt_sigframe32 __user *frame; | ||
288 | sigset_t set; | ||
289 | int sig; | ||
290 | |||
291 | frame = (struct rt_sigframe32 __user *) regs.regs[29]; | ||
292 | if (!access_ok(VERIFY_READ, frame, sizeof(*frame))) | ||
293 | goto badframe; | ||
294 | if (__copy_conv_sigset_from_user(&set, &frame->rs_uc.uc_sigmask)) | ||
295 | goto badframe; | ||
296 | |||
297 | set_current_blocked(&set); | ||
298 | |||
299 | sig = restore_sigcontext32(®s, &frame->rs_uc.uc_mcontext); | ||
300 | if (sig < 0) | ||
301 | goto badframe; | ||
302 | else if (sig) | ||
303 | force_sig(sig, current); | ||
304 | |||
305 | if (compat_restore_altstack(&frame->rs_uc.uc_stack)) | ||
306 | goto badframe; | ||
307 | |||
308 | /* | ||
309 | * Don't let your children do this ... | ||
310 | */ | ||
311 | __asm__ __volatile__( | ||
312 | "move\t$29, %0\n\t" | ||
313 | "j\tsyscall_exit" | ||
314 | :/* no outputs */ | ||
315 | :"r" (®s)); | ||
316 | /* Unreached */ | ||
317 | |||
318 | badframe: | ||
319 | force_sig(SIGSEGV, current); | ||
320 | } | ||
321 | |||
322 | static int setup_frame_32(void *sig_return, struct ksignal *ksig, | ||
323 | struct pt_regs *regs, sigset_t *set) | ||
324 | { | ||
325 | struct sigframe32 __user *frame; | ||
326 | int err = 0; | ||
327 | |||
328 | frame = get_sigframe(ksig, regs, sizeof(*frame)); | ||
329 | if (!access_ok(VERIFY_WRITE, frame, sizeof (*frame))) | ||
330 | return -EFAULT; | ||
331 | |||
332 | err |= setup_sigcontext32(regs, &frame->sf_sc); | ||
333 | err |= __copy_conv_sigset_to_user(&frame->sf_mask, set); | ||
334 | |||
335 | if (err) | ||
336 | return -EFAULT; | ||
337 | |||
338 | /* | ||
339 | * Arguments to signal handler: | ||
340 | * | ||
341 | * a0 = signal number | ||
342 | * a1 = 0 (should be cause) | ||
343 | * a2 = pointer to struct sigcontext | ||
344 | * | ||
345 | * $25 and c0_epc point to the signal handler, $29 points to the | ||
346 | * struct sigframe. | ||
347 | */ | ||
348 | regs->regs[ 4] = ksig->sig; | ||
349 | regs->regs[ 5] = 0; | ||
350 | regs->regs[ 6] = (unsigned long) &frame->sf_sc; | ||
351 | regs->regs[29] = (unsigned long) frame; | ||
352 | regs->regs[31] = (unsigned long) sig_return; | ||
353 | regs->cp0_epc = regs->regs[25] = (unsigned long) ksig->ka.sa.sa_handler; | ||
354 | |||
355 | DEBUGP("SIG deliver (%s:%d): sp=0x%p pc=0x%lx ra=0x%lx\n", | ||
356 | current->comm, current->pid, | ||
357 | frame, regs->cp0_epc, regs->regs[31]); | ||
358 | |||
359 | return 0; | ||
360 | } | ||
361 | |||
362 | static int setup_rt_frame_32(void *sig_return, struct ksignal *ksig, | ||
363 | struct pt_regs *regs, sigset_t *set) | ||
364 | { | ||
365 | struct rt_sigframe32 __user *frame; | ||
366 | int err = 0; | ||
367 | |||
368 | frame = get_sigframe(ksig, regs, sizeof(*frame)); | ||
369 | if (!access_ok(VERIFY_WRITE, frame, sizeof (*frame))) | ||
370 | return -EFAULT; | ||
371 | |||
372 | /* Convert (siginfo_t -> compat_siginfo_t) and copy to user. */ | ||
373 | err |= copy_siginfo_to_user32(&frame->rs_info, &ksig->info); | ||
374 | |||
375 | /* Create the ucontext. */ | ||
376 | err |= __put_user(0, &frame->rs_uc.uc_flags); | ||
377 | err |= __put_user(0, &frame->rs_uc.uc_link); | ||
378 | err |= __compat_save_altstack(&frame->rs_uc.uc_stack, regs->regs[29]); | ||
379 | err |= setup_sigcontext32(regs, &frame->rs_uc.uc_mcontext); | ||
380 | err |= __copy_conv_sigset_to_user(&frame->rs_uc.uc_sigmask, set); | ||
381 | |||
382 | if (err) | ||
383 | return -EFAULT; | ||
384 | |||
385 | /* | ||
386 | * Arguments to signal handler: | ||
387 | * | ||
388 | * a0 = signal number | ||
389 | * a1 = 0 (should be cause) | ||
390 | * a2 = pointer to ucontext | ||
391 | * | ||
392 | * $25 and c0_epc point to the signal handler, $29 points to | ||
393 | * the struct rt_sigframe32. | ||
394 | */ | ||
395 | regs->regs[ 4] = ksig->sig; | ||
396 | regs->regs[ 5] = (unsigned long) &frame->rs_info; | ||
397 | regs->regs[ 6] = (unsigned long) &frame->rs_uc; | ||
398 | regs->regs[29] = (unsigned long) frame; | ||
399 | regs->regs[31] = (unsigned long) sig_return; | ||
400 | regs->cp0_epc = regs->regs[25] = (unsigned long) ksig->ka.sa.sa_handler; | ||
401 | |||
402 | DEBUGP("SIG deliver (%s:%d): sp=0x%p pc=0x%lx ra=0x%lx\n", | ||
403 | current->comm, current->pid, | ||
404 | frame, regs->cp0_epc, regs->regs[31]); | ||
405 | |||
406 | return 0; | ||
407 | } | ||
408 | |||
409 | /* | ||
410 | * o32 compatibility on 64-bit kernels, without DSP ASE | ||
411 | */ | ||
412 | struct mips_abi mips_abi_32 = { | ||
413 | .setup_frame = setup_frame_32, | ||
414 | .setup_rt_frame = setup_rt_frame_32, | ||
415 | .restart = __NR_O32_restart_syscall, | ||
416 | |||
417 | .off_sc_fpregs = offsetof(struct sigcontext32, sc_fpregs), | ||
418 | .off_sc_fpc_csr = offsetof(struct sigcontext32, sc_fpc_csr), | ||
419 | .off_sc_used_math = offsetof(struct sigcontext32, sc_used_math), | ||
420 | |||
421 | .vdso = &vdso_image_o32, | ||
422 | }; | ||
diff --git a/arch/mips/kernel/signal_o32.c b/arch/mips/kernel/signal_o32.c new file mode 100644 index 000000000000..5e169fc5ca5c --- /dev/null +++ b/arch/mips/kernel/signal_o32.c | |||
@@ -0,0 +1,285 @@ | |||
1 | /* | ||
2 | * This file is subject to the terms and conditions of the GNU General Public | ||
3 | * License. See the file "COPYING" in the main directory of this archive | ||
4 | * for more details. | ||
5 | * | ||
6 | * Copyright (C) 1991, 1992 Linus Torvalds | ||
7 | * Copyright (C) 1994 - 2000, 2006 Ralf Baechle | ||
8 | * Copyright (C) 1999, 2000 Silicon Graphics, Inc. | ||
9 | * Copyright (C) 2016, Imagination Technologies Ltd. | ||
10 | */ | ||
11 | #include <linux/compiler.h> | ||
12 | #include <linux/errno.h> | ||
13 | #include <linux/signal.h> | ||
14 | #include <linux/uaccess.h> | ||
15 | |||
16 | #include <asm/abi.h> | ||
17 | #include <asm/compat-signal.h> | ||
18 | #include <asm/dsp.h> | ||
19 | #include <asm/sim.h> | ||
20 | #include <asm/unistd.h> | ||
21 | |||
22 | #include "signal-common.h" | ||
23 | |||
24 | /* | ||
25 | * Including <asm/unistd.h> would give use the 64-bit syscall numbers ... | ||
26 | */ | ||
27 | #define __NR_O32_restart_syscall 4253 | ||
28 | |||
29 | struct sigframe32 { | ||
30 | u32 sf_ass[4]; /* argument save space for o32 */ | ||
31 | u32 sf_pad[2]; /* Was: signal trampoline */ | ||
32 | struct sigcontext32 sf_sc; | ||
33 | compat_sigset_t sf_mask; | ||
34 | }; | ||
35 | |||
36 | struct ucontext32 { | ||
37 | u32 uc_flags; | ||
38 | s32 uc_link; | ||
39 | compat_stack_t uc_stack; | ||
40 | struct sigcontext32 uc_mcontext; | ||
41 | compat_sigset_t uc_sigmask; /* mask last for extensibility */ | ||
42 | }; | ||
43 | |||
44 | struct rt_sigframe32 { | ||
45 | u32 rs_ass[4]; /* argument save space for o32 */ | ||
46 | u32 rs_pad[2]; /* Was: signal trampoline */ | ||
47 | compat_siginfo_t rs_info; | ||
48 | struct ucontext32 rs_uc; | ||
49 | }; | ||
50 | |||
51 | static int setup_sigcontext32(struct pt_regs *regs, | ||
52 | struct sigcontext32 __user *sc) | ||
53 | { | ||
54 | int err = 0; | ||
55 | int i; | ||
56 | |||
57 | err |= __put_user(regs->cp0_epc, &sc->sc_pc); | ||
58 | |||
59 | err |= __put_user(0, &sc->sc_regs[0]); | ||
60 | for (i = 1; i < 32; i++) | ||
61 | err |= __put_user(regs->regs[i], &sc->sc_regs[i]); | ||
62 | |||
63 | err |= __put_user(regs->hi, &sc->sc_mdhi); | ||
64 | err |= __put_user(regs->lo, &sc->sc_mdlo); | ||
65 | if (cpu_has_dsp) { | ||
66 | err |= __put_user(rddsp(DSP_MASK), &sc->sc_dsp); | ||
67 | err |= __put_user(mfhi1(), &sc->sc_hi1); | ||
68 | err |= __put_user(mflo1(), &sc->sc_lo1); | ||
69 | err |= __put_user(mfhi2(), &sc->sc_hi2); | ||
70 | err |= __put_user(mflo2(), &sc->sc_lo2); | ||
71 | err |= __put_user(mfhi3(), &sc->sc_hi3); | ||
72 | err |= __put_user(mflo3(), &sc->sc_lo3); | ||
73 | } | ||
74 | |||
75 | /* | ||
76 | * Save FPU state to signal context. Signal handler | ||
77 | * will "inherit" current FPU state. | ||
78 | */ | ||
79 | err |= protected_save_fp_context(sc); | ||
80 | |||
81 | return err; | ||
82 | } | ||
83 | |||
84 | static int restore_sigcontext32(struct pt_regs *regs, | ||
85 | struct sigcontext32 __user *sc) | ||
86 | { | ||
87 | int err = 0; | ||
88 | s32 treg; | ||
89 | int i; | ||
90 | |||
91 | /* Always make any pending restarted system calls return -EINTR */ | ||
92 | current->restart_block.fn = do_no_restart_syscall; | ||
93 | |||
94 | err |= __get_user(regs->cp0_epc, &sc->sc_pc); | ||
95 | err |= __get_user(regs->hi, &sc->sc_mdhi); | ||
96 | err |= __get_user(regs->lo, &sc->sc_mdlo); | ||
97 | if (cpu_has_dsp) { | ||
98 | err |= __get_user(treg, &sc->sc_hi1); mthi1(treg); | ||
99 | err |= __get_user(treg, &sc->sc_lo1); mtlo1(treg); | ||
100 | err |= __get_user(treg, &sc->sc_hi2); mthi2(treg); | ||
101 | err |= __get_user(treg, &sc->sc_lo2); mtlo2(treg); | ||
102 | err |= __get_user(treg, &sc->sc_hi3); mthi3(treg); | ||
103 | err |= __get_user(treg, &sc->sc_lo3); mtlo3(treg); | ||
104 | err |= __get_user(treg, &sc->sc_dsp); wrdsp(treg, DSP_MASK); | ||
105 | } | ||
106 | |||
107 | for (i = 1; i < 32; i++) | ||
108 | err |= __get_user(regs->regs[i], &sc->sc_regs[i]); | ||
109 | |||
110 | return err ?: protected_restore_fp_context(sc); | ||
111 | } | ||
112 | |||
113 | static int setup_frame_32(void *sig_return, struct ksignal *ksig, | ||
114 | struct pt_regs *regs, sigset_t *set) | ||
115 | { | ||
116 | struct sigframe32 __user *frame; | ||
117 | int err = 0; | ||
118 | |||
119 | frame = get_sigframe(ksig, regs, sizeof(*frame)); | ||
120 | if (!access_ok(VERIFY_WRITE, frame, sizeof (*frame))) | ||
121 | return -EFAULT; | ||
122 | |||
123 | err |= setup_sigcontext32(regs, &frame->sf_sc); | ||
124 | err |= __copy_conv_sigset_to_user(&frame->sf_mask, set); | ||
125 | |||
126 | if (err) | ||
127 | return -EFAULT; | ||
128 | |||
129 | /* | ||
130 | * Arguments to signal handler: | ||
131 | * | ||
132 | * a0 = signal number | ||
133 | * a1 = 0 (should be cause) | ||
134 | * a2 = pointer to struct sigcontext | ||
135 | * | ||
136 | * $25 and c0_epc point to the signal handler, $29 points to the | ||
137 | * struct sigframe. | ||
138 | */ | ||
139 | regs->regs[ 4] = ksig->sig; | ||
140 | regs->regs[ 5] = 0; | ||
141 | regs->regs[ 6] = (unsigned long) &frame->sf_sc; | ||
142 | regs->regs[29] = (unsigned long) frame; | ||
143 | regs->regs[31] = (unsigned long) sig_return; | ||
144 | regs->cp0_epc = regs->regs[25] = (unsigned long) ksig->ka.sa.sa_handler; | ||
145 | |||
146 | DEBUGP("SIG deliver (%s:%d): sp=0x%p pc=0x%lx ra=0x%lx\n", | ||
147 | current->comm, current->pid, | ||
148 | frame, regs->cp0_epc, regs->regs[31]); | ||
149 | |||
150 | return 0; | ||
151 | } | ||
152 | |||
153 | asmlinkage void sys32_rt_sigreturn(nabi_no_regargs struct pt_regs regs) | ||
154 | { | ||
155 | struct rt_sigframe32 __user *frame; | ||
156 | sigset_t set; | ||
157 | int sig; | ||
158 | |||
159 | frame = (struct rt_sigframe32 __user *) regs.regs[29]; | ||
160 | if (!access_ok(VERIFY_READ, frame, sizeof(*frame))) | ||
161 | goto badframe; | ||
162 | if (__copy_conv_sigset_from_user(&set, &frame->rs_uc.uc_sigmask)) | ||
163 | goto badframe; | ||
164 | |||
165 | set_current_blocked(&set); | ||
166 | |||
167 | sig = restore_sigcontext32(®s, &frame->rs_uc.uc_mcontext); | ||
168 | if (sig < 0) | ||
169 | goto badframe; | ||
170 | else if (sig) | ||
171 | force_sig(sig, current); | ||
172 | |||
173 | if (compat_restore_altstack(&frame->rs_uc.uc_stack)) | ||
174 | goto badframe; | ||
175 | |||
176 | /* | ||
177 | * Don't let your children do this ... | ||
178 | */ | ||
179 | __asm__ __volatile__( | ||
180 | "move\t$29, %0\n\t" | ||
181 | "j\tsyscall_exit" | ||
182 | :/* no outputs */ | ||
183 | :"r" (®s)); | ||
184 | /* Unreached */ | ||
185 | |||
186 | badframe: | ||
187 | force_sig(SIGSEGV, current); | ||
188 | } | ||
189 | |||
190 | static int setup_rt_frame_32(void *sig_return, struct ksignal *ksig, | ||
191 | struct pt_regs *regs, sigset_t *set) | ||
192 | { | ||
193 | struct rt_sigframe32 __user *frame; | ||
194 | int err = 0; | ||
195 | |||
196 | frame = get_sigframe(ksig, regs, sizeof(*frame)); | ||
197 | if (!access_ok(VERIFY_WRITE, frame, sizeof (*frame))) | ||
198 | return -EFAULT; | ||
199 | |||
200 | /* Convert (siginfo_t -> compat_siginfo_t) and copy to user. */ | ||
201 | err |= copy_siginfo_to_user32(&frame->rs_info, &ksig->info); | ||
202 | |||
203 | /* Create the ucontext. */ | ||
204 | err |= __put_user(0, &frame->rs_uc.uc_flags); | ||
205 | err |= __put_user(0, &frame->rs_uc.uc_link); | ||
206 | err |= __compat_save_altstack(&frame->rs_uc.uc_stack, regs->regs[29]); | ||
207 | err |= setup_sigcontext32(regs, &frame->rs_uc.uc_mcontext); | ||
208 | err |= __copy_conv_sigset_to_user(&frame->rs_uc.uc_sigmask, set); | ||
209 | |||
210 | if (err) | ||
211 | return -EFAULT; | ||
212 | |||
213 | /* | ||
214 | * Arguments to signal handler: | ||
215 | * | ||
216 | * a0 = signal number | ||
217 | * a1 = 0 (should be cause) | ||
218 | * a2 = pointer to ucontext | ||
219 | * | ||
220 | * $25 and c0_epc point to the signal handler, $29 points to | ||
221 | * the struct rt_sigframe32. | ||
222 | */ | ||
223 | regs->regs[ 4] = ksig->sig; | ||
224 | regs->regs[ 5] = (unsigned long) &frame->rs_info; | ||
225 | regs->regs[ 6] = (unsigned long) &frame->rs_uc; | ||
226 | regs->regs[29] = (unsigned long) frame; | ||
227 | regs->regs[31] = (unsigned long) sig_return; | ||
228 | regs->cp0_epc = regs->regs[25] = (unsigned long) ksig->ka.sa.sa_handler; | ||
229 | |||
230 | DEBUGP("SIG deliver (%s:%d): sp=0x%p pc=0x%lx ra=0x%lx\n", | ||
231 | current->comm, current->pid, | ||
232 | frame, regs->cp0_epc, regs->regs[31]); | ||
233 | |||
234 | return 0; | ||
235 | } | ||
236 | |||
237 | /* | ||
238 | * o32 compatibility on 64-bit kernels, without DSP ASE | ||
239 | */ | ||
240 | struct mips_abi mips_abi_32 = { | ||
241 | .setup_frame = setup_frame_32, | ||
242 | .setup_rt_frame = setup_rt_frame_32, | ||
243 | .restart = __NR_O32_restart_syscall, | ||
244 | |||
245 | .off_sc_fpregs = offsetof(struct sigcontext32, sc_fpregs), | ||
246 | .off_sc_fpc_csr = offsetof(struct sigcontext32, sc_fpc_csr), | ||
247 | .off_sc_used_math = offsetof(struct sigcontext32, sc_used_math), | ||
248 | |||
249 | .vdso = &vdso_image_o32, | ||
250 | }; | ||
251 | |||
252 | |||
253 | asmlinkage void sys32_sigreturn(nabi_no_regargs struct pt_regs regs) | ||
254 | { | ||
255 | struct sigframe32 __user *frame; | ||
256 | sigset_t blocked; | ||
257 | int sig; | ||
258 | |||
259 | frame = (struct sigframe32 __user *) regs.regs[29]; | ||
260 | if (!access_ok(VERIFY_READ, frame, sizeof(*frame))) | ||
261 | goto badframe; | ||
262 | if (__copy_conv_sigset_from_user(&blocked, &frame->sf_mask)) | ||
263 | goto badframe; | ||
264 | |||
265 | set_current_blocked(&blocked); | ||
266 | |||
267 | sig = restore_sigcontext32(®s, &frame->sf_sc); | ||
268 | if (sig < 0) | ||
269 | goto badframe; | ||
270 | else if (sig) | ||
271 | force_sig(sig, current); | ||
272 | |||
273 | /* | ||
274 | * Don't let your children do this ... | ||
275 | */ | ||
276 | __asm__ __volatile__( | ||
277 | "move\t$29, %0\n\t" | ||
278 | "j\tsyscall_exit" | ||
279 | :/* no outputs */ | ||
280 | :"r" (®s)); | ||
281 | /* Unreached */ | ||
282 | |||
283 | badframe: | ||
284 | force_sig(SIGSEGV, current); | ||
285 | } | ||
diff --git a/arch/mips/kernel/smp-bmips.c b/arch/mips/kernel/smp-bmips.c index e02addc0307f..6d0f1321e084 100644 --- a/arch/mips/kernel/smp-bmips.c +++ b/arch/mips/kernel/smp-bmips.c | |||
@@ -363,6 +363,7 @@ static int bmips_cpu_disable(void) | |||
363 | pr_info("SMP: CPU%d is offline\n", cpu); | 363 | pr_info("SMP: CPU%d is offline\n", cpu); |
364 | 364 | ||
365 | set_cpu_online(cpu, false); | 365 | set_cpu_online(cpu, false); |
366 | calculate_cpu_foreign_map(); | ||
366 | cpumask_clear_cpu(cpu, &cpu_callin_map); | 367 | cpumask_clear_cpu(cpu, &cpu_callin_map); |
367 | clear_c0_status(IE_IRQ5); | 368 | clear_c0_status(IE_IRQ5); |
368 | 369 | ||
diff --git a/arch/mips/kernel/smp-cps.c b/arch/mips/kernel/smp-cps.c index 05b3201271b4..e9d9fc6c754c 100644 --- a/arch/mips/kernel/smp-cps.c +++ b/arch/mips/kernel/smp-cps.c | |||
@@ -206,7 +206,7 @@ err_out: | |||
206 | } | 206 | } |
207 | } | 207 | } |
208 | 208 | ||
209 | static void boot_core(unsigned core) | 209 | static void boot_core(unsigned int core, unsigned int vpe_id) |
210 | { | 210 | { |
211 | u32 access, stat, seq_state; | 211 | u32 access, stat, seq_state; |
212 | unsigned timeout; | 212 | unsigned timeout; |
@@ -233,8 +233,9 @@ static void boot_core(unsigned core) | |||
233 | mips_cpc_lock_other(core); | 233 | mips_cpc_lock_other(core); |
234 | 234 | ||
235 | if (mips_cm_revision() >= CM_REV_CM3) { | 235 | if (mips_cm_revision() >= CM_REV_CM3) { |
236 | /* Run VP0 following the reset */ | 236 | /* Run only the requested VP following the reset */ |
237 | write_cpc_co_vp_run(0x1); | 237 | write_cpc_co_vp_stop(0xf); |
238 | write_cpc_co_vp_run(1 << vpe_id); | ||
238 | 239 | ||
239 | /* | 240 | /* |
240 | * Ensure that the VP_RUN register is written before the | 241 | * Ensure that the VP_RUN register is written before the |
@@ -306,7 +307,7 @@ static void cps_boot_secondary(int cpu, struct task_struct *idle) | |||
306 | 307 | ||
307 | if (!test_bit(core, core_power)) { | 308 | if (!test_bit(core, core_power)) { |
308 | /* Boot a VPE on a powered down core */ | 309 | /* Boot a VPE on a powered down core */ |
309 | boot_core(core); | 310 | boot_core(core, vpe_id); |
310 | goto out; | 311 | goto out; |
311 | } | 312 | } |
312 | 313 | ||
@@ -397,6 +398,7 @@ static int cps_cpu_disable(void) | |||
397 | atomic_sub(1 << cpu_vpe_id(¤t_cpu_data), &core_cfg->vpe_mask); | 398 | atomic_sub(1 << cpu_vpe_id(¤t_cpu_data), &core_cfg->vpe_mask); |
398 | smp_mb__after_atomic(); | 399 | smp_mb__after_atomic(); |
399 | set_cpu_online(cpu, false); | 400 | set_cpu_online(cpu, false); |
401 | calculate_cpu_foreign_map(); | ||
400 | cpumask_clear_cpu(cpu, &cpu_callin_map); | 402 | cpumask_clear_cpu(cpu, &cpu_callin_map); |
401 | 403 | ||
402 | return 0; | 404 | return 0; |
@@ -411,14 +413,16 @@ static enum { | |||
411 | 413 | ||
412 | void play_dead(void) | 414 | void play_dead(void) |
413 | { | 415 | { |
414 | unsigned cpu, core; | 416 | unsigned int cpu, core, vpe_id; |
415 | 417 | ||
416 | local_irq_disable(); | 418 | local_irq_disable(); |
417 | idle_task_exit(); | 419 | idle_task_exit(); |
418 | cpu = smp_processor_id(); | 420 | cpu = smp_processor_id(); |
419 | cpu_death = CPU_DEATH_POWER; | 421 | cpu_death = CPU_DEATH_POWER; |
420 | 422 | ||
421 | if (cpu_has_mipsmt) { | 423 | pr_debug("CPU%d going offline\n", cpu); |
424 | |||
425 | if (cpu_has_mipsmt || cpu_has_vp) { | ||
422 | core = cpu_data[cpu].core; | 426 | core = cpu_data[cpu].core; |
423 | 427 | ||
424 | /* Look for another online VPE within the core */ | 428 | /* Look for another online VPE within the core */ |
@@ -439,10 +443,21 @@ void play_dead(void) | |||
439 | complete(&cpu_death_chosen); | 443 | complete(&cpu_death_chosen); |
440 | 444 | ||
441 | if (cpu_death == CPU_DEATH_HALT) { | 445 | if (cpu_death == CPU_DEATH_HALT) { |
442 | /* Halt this TC */ | 446 | vpe_id = cpu_vpe_id(&cpu_data[cpu]); |
443 | write_c0_tchalt(TCHALT_H); | 447 | |
444 | instruction_hazard(); | 448 | pr_debug("Halting core %d VP%d\n", core, vpe_id); |
449 | if (cpu_has_mipsmt) { | ||
450 | /* Halt this TC */ | ||
451 | write_c0_tchalt(TCHALT_H); | ||
452 | instruction_hazard(); | ||
453 | } else if (cpu_has_vp) { | ||
454 | write_cpc_cl_vp_stop(1 << vpe_id); | ||
455 | |||
456 | /* Ensure that the VP_STOP register is written */ | ||
457 | wmb(); | ||
458 | } | ||
445 | } else { | 459 | } else { |
460 | pr_debug("Gating power to core %d\n", core); | ||
446 | /* Power down the core */ | 461 | /* Power down the core */ |
447 | cps_pm_enter_state(CPS_PM_POWER_GATED); | 462 | cps_pm_enter_state(CPS_PM_POWER_GATED); |
448 | } | 463 | } |
@@ -469,6 +484,7 @@ static void wait_for_sibling_halt(void *ptr_cpu) | |||
469 | static void cps_cpu_die(unsigned int cpu) | 484 | static void cps_cpu_die(unsigned int cpu) |
470 | { | 485 | { |
471 | unsigned core = cpu_data[cpu].core; | 486 | unsigned core = cpu_data[cpu].core; |
487 | unsigned int vpe_id = cpu_vpe_id(&cpu_data[cpu]); | ||
472 | unsigned stat; | 488 | unsigned stat; |
473 | int err; | 489 | int err; |
474 | 490 | ||
@@ -497,10 +513,12 @@ static void cps_cpu_die(unsigned int cpu) | |||
497 | * in which case the CPC will refuse to power down the core. | 513 | * in which case the CPC will refuse to power down the core. |
498 | */ | 514 | */ |
499 | do { | 515 | do { |
516 | mips_cm_lock_other(core, vpe_id); | ||
500 | mips_cpc_lock_other(core); | 517 | mips_cpc_lock_other(core); |
501 | stat = read_cpc_co_stat_conf(); | 518 | stat = read_cpc_co_stat_conf(); |
502 | stat &= CPC_Cx_STAT_CONF_SEQSTATE_MSK; | 519 | stat &= CPC_Cx_STAT_CONF_SEQSTATE_MSK; |
503 | mips_cpc_unlock_other(); | 520 | mips_cpc_unlock_other(); |
521 | mips_cm_unlock_other(); | ||
504 | } while (stat != CPC_Cx_STAT_CONF_SEQSTATE_D0 && | 522 | } while (stat != CPC_Cx_STAT_CONF_SEQSTATE_D0 && |
505 | stat != CPC_Cx_STAT_CONF_SEQSTATE_D2 && | 523 | stat != CPC_Cx_STAT_CONF_SEQSTATE_D2 && |
506 | stat != CPC_Cx_STAT_CONF_SEQSTATE_U2); | 524 | stat != CPC_Cx_STAT_CONF_SEQSTATE_U2); |
@@ -517,6 +535,12 @@ static void cps_cpu_die(unsigned int cpu) | |||
517 | (void *)(unsigned long)cpu, 1); | 535 | (void *)(unsigned long)cpu, 1); |
518 | if (err) | 536 | if (err) |
519 | panic("Failed to call remote sibling CPU\n"); | 537 | panic("Failed to call remote sibling CPU\n"); |
538 | } else if (cpu_has_vp) { | ||
539 | do { | ||
540 | mips_cm_lock_other(core, vpe_id); | ||
541 | stat = read_cpc_co_vp_running(); | ||
542 | mips_cm_unlock_other(); | ||
543 | } while (stat & (1 << vpe_id)); | ||
520 | } | 544 | } |
521 | } | 545 | } |
522 | 546 | ||
diff --git a/arch/mips/kernel/smp.c b/arch/mips/kernel/smp.c index f9d01e953acb..f95f094f36e4 100644 --- a/arch/mips/kernel/smp.c +++ b/arch/mips/kernel/smp.c | |||
@@ -72,7 +72,7 @@ EXPORT_SYMBOL(cpu_core_map); | |||
72 | * A logcal cpu mask containing only one VPE per core to | 72 | * A logcal cpu mask containing only one VPE per core to |
73 | * reduce the number of IPIs on large MT systems. | 73 | * reduce the number of IPIs on large MT systems. |
74 | */ | 74 | */ |
75 | cpumask_t cpu_foreign_map __read_mostly; | 75 | cpumask_t cpu_foreign_map[NR_CPUS] __read_mostly; |
76 | EXPORT_SYMBOL(cpu_foreign_map); | 76 | EXPORT_SYMBOL(cpu_foreign_map); |
77 | 77 | ||
78 | /* representing cpus for which sibling maps can be computed */ | 78 | /* representing cpus for which sibling maps can be computed */ |
@@ -124,7 +124,7 @@ static inline void set_cpu_core_map(int cpu) | |||
124 | * Calculate a new cpu_foreign_map mask whenever a | 124 | * Calculate a new cpu_foreign_map mask whenever a |
125 | * new cpu appears or disappears. | 125 | * new cpu appears or disappears. |
126 | */ | 126 | */ |
127 | static inline void calculate_cpu_foreign_map(void) | 127 | void calculate_cpu_foreign_map(void) |
128 | { | 128 | { |
129 | int i, k, core_present; | 129 | int i, k, core_present; |
130 | cpumask_t temp_foreign_map; | 130 | cpumask_t temp_foreign_map; |
@@ -141,7 +141,9 @@ static inline void calculate_cpu_foreign_map(void) | |||
141 | cpumask_set_cpu(i, &temp_foreign_map); | 141 | cpumask_set_cpu(i, &temp_foreign_map); |
142 | } | 142 | } |
143 | 143 | ||
144 | cpumask_copy(&cpu_foreign_map, &temp_foreign_map); | 144 | for_each_online_cpu(i) |
145 | cpumask_andnot(&cpu_foreign_map[i], | ||
146 | &temp_foreign_map, &cpu_sibling_map[i]); | ||
145 | } | 147 | } |
146 | 148 | ||
147 | struct plat_smp_ops *mp_ops; | 149 | struct plat_smp_ops *mp_ops; |
@@ -344,16 +346,9 @@ asmlinkage void start_secondary(void) | |||
344 | static void stop_this_cpu(void *dummy) | 346 | static void stop_this_cpu(void *dummy) |
345 | { | 347 | { |
346 | /* | 348 | /* |
347 | * Remove this CPU. Be a bit slow here and | 349 | * Remove this CPU: |
348 | * set the bits for every online CPU so we don't miss | ||
349 | * any IPI whilst taking this VPE down. | ||
350 | */ | 350 | */ |
351 | 351 | ||
352 | cpumask_copy(&cpu_foreign_map, cpu_online_mask); | ||
353 | |||
354 | /* Make it visible to every other CPU */ | ||
355 | smp_mb(); | ||
356 | |||
357 | set_cpu_online(smp_processor_id(), false); | 352 | set_cpu_online(smp_processor_id(), false); |
358 | calculate_cpu_foreign_map(); | 353 | calculate_cpu_foreign_map(); |
359 | local_irq_disable(); | 354 | local_irq_disable(); |
@@ -512,10 +507,17 @@ void flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned l | |||
512 | smp_on_other_tlbs(flush_tlb_range_ipi, &fd); | 507 | smp_on_other_tlbs(flush_tlb_range_ipi, &fd); |
513 | } else { | 508 | } else { |
514 | unsigned int cpu; | 509 | unsigned int cpu; |
510 | int exec = vma->vm_flags & VM_EXEC; | ||
515 | 511 | ||
516 | for_each_online_cpu(cpu) { | 512 | for_each_online_cpu(cpu) { |
513 | /* | ||
514 | * flush_cache_range() will only fully flush icache if | ||
515 | * the VMA is executable, otherwise we must invalidate | ||
516 | * ASID without it appearing to has_valid_asid() as if | ||
517 | * mm has been completely unused by that CPU. | ||
518 | */ | ||
517 | if (cpu != smp_processor_id() && cpu_context(cpu, mm)) | 519 | if (cpu != smp_processor_id() && cpu_context(cpu, mm)) |
518 | cpu_context(cpu, mm) = 0; | 520 | cpu_context(cpu, mm) = !exec; |
519 | } | 521 | } |
520 | } | 522 | } |
521 | local_flush_tlb_range(vma, start, end); | 523 | local_flush_tlb_range(vma, start, end); |
@@ -560,8 +562,14 @@ void flush_tlb_page(struct vm_area_struct *vma, unsigned long page) | |||
560 | unsigned int cpu; | 562 | unsigned int cpu; |
561 | 563 | ||
562 | for_each_online_cpu(cpu) { | 564 | for_each_online_cpu(cpu) { |
565 | /* | ||
566 | * flush_cache_page() only does partial flushes, so | ||
567 | * invalidate ASID without it appearing to | ||
568 | * has_valid_asid() as if mm has been completely unused | ||
569 | * by that CPU. | ||
570 | */ | ||
563 | if (cpu != smp_processor_id() && cpu_context(cpu, vma->vm_mm)) | 571 | if (cpu != smp_processor_id() && cpu_context(cpu, vma->vm_mm)) |
564 | cpu_context(cpu, vma->vm_mm) = 0; | 572 | cpu_context(cpu, vma->vm_mm) = 1; |
565 | } | 573 | } |
566 | } | 574 | } |
567 | local_flush_tlb_page(vma, page); | 575 | local_flush_tlb_page(vma, page); |
diff --git a/arch/mips/kernel/traps.c b/arch/mips/kernel/traps.c index 6fb4704bd156..3de85be2486a 100644 --- a/arch/mips/kernel/traps.c +++ b/arch/mips/kernel/traps.c | |||
@@ -704,6 +704,7 @@ asmlinkage void do_ov(struct pt_regs *regs) | |||
704 | int process_fpemu_return(int sig, void __user *fault_addr, unsigned long fcr31) | 704 | int process_fpemu_return(int sig, void __user *fault_addr, unsigned long fcr31) |
705 | { | 705 | { |
706 | struct siginfo si = { 0 }; | 706 | struct siginfo si = { 0 }; |
707 | struct vm_area_struct *vma; | ||
707 | 708 | ||
708 | switch (sig) { | 709 | switch (sig) { |
709 | case 0: | 710 | case 0: |
@@ -744,7 +745,8 @@ int process_fpemu_return(int sig, void __user *fault_addr, unsigned long fcr31) | |||
744 | si.si_addr = fault_addr; | 745 | si.si_addr = fault_addr; |
745 | si.si_signo = sig; | 746 | si.si_signo = sig; |
746 | down_read(¤t->mm->mmap_sem); | 747 | down_read(¤t->mm->mmap_sem); |
747 | if (find_vma(current->mm, (unsigned long)fault_addr)) | 748 | vma = find_vma(current->mm, (unsigned long)fault_addr); |
749 | if (vma && (vma->vm_start <= (unsigned long)fault_addr)) | ||
748 | si.si_code = SEGV_ACCERR; | 750 | si.si_code = SEGV_ACCERR; |
749 | else | 751 | else |
750 | si.si_code = SEGV_MAPERR; | 752 | si.si_code = SEGV_MAPERR; |
diff --git a/arch/mips/kernel/vdso.c b/arch/mips/kernel/vdso.c index 54e1663ce639..9abe447a4b48 100644 --- a/arch/mips/kernel/vdso.c +++ b/arch/mips/kernel/vdso.c | |||
@@ -107,6 +107,16 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp) | |||
107 | if (down_write_killable(&mm->mmap_sem)) | 107 | if (down_write_killable(&mm->mmap_sem)) |
108 | return -EINTR; | 108 | return -EINTR; |
109 | 109 | ||
110 | /* Map delay slot emulation page */ | ||
111 | base = mmap_region(NULL, STACK_TOP, PAGE_SIZE, | ||
112 | VM_READ|VM_WRITE|VM_EXEC| | ||
113 | VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC, | ||
114 | 0); | ||
115 | if (IS_ERR_VALUE(base)) { | ||
116 | ret = base; | ||
117 | goto out; | ||
118 | } | ||
119 | |||
110 | /* | 120 | /* |
111 | * Determine total area size. This includes the VDSO data itself, the | 121 | * Determine total area size. This includes the VDSO data itself, the |
112 | * data page, and the GIC user page if present. Always create a mapping | 122 | * data page, and the GIC user page if present. Always create a mapping |
diff --git a/arch/mips/lantiq/irq.c b/arch/mips/lantiq/irq.c index ff17669e30a3..8ac0e5994ed2 100644 --- a/arch/mips/lantiq/irq.c +++ b/arch/mips/lantiq/irq.c | |||
@@ -66,7 +66,7 @@ int gic_present; | |||
66 | #endif | 66 | #endif |
67 | 67 | ||
68 | static int exin_avail; | 68 | static int exin_avail; |
69 | static struct resource ltq_eiu_irq[MAX_EIU]; | 69 | static u32 ltq_eiu_irq[MAX_EIU]; |
70 | static void __iomem *ltq_icu_membase[MAX_IM]; | 70 | static void __iomem *ltq_icu_membase[MAX_IM]; |
71 | static void __iomem *ltq_eiu_membase; | 71 | static void __iomem *ltq_eiu_membase; |
72 | static struct irq_domain *ltq_domain; | 72 | static struct irq_domain *ltq_domain; |
@@ -75,7 +75,7 @@ static int ltq_perfcount_irq; | |||
75 | int ltq_eiu_get_irq(int exin) | 75 | int ltq_eiu_get_irq(int exin) |
76 | { | 76 | { |
77 | if (exin < exin_avail) | 77 | if (exin < exin_avail) |
78 | return ltq_eiu_irq[exin].start; | 78 | return ltq_eiu_irq[exin]; |
79 | return -1; | 79 | return -1; |
80 | } | 80 | } |
81 | 81 | ||
@@ -125,8 +125,8 @@ static int ltq_eiu_settype(struct irq_data *d, unsigned int type) | |||
125 | { | 125 | { |
126 | int i; | 126 | int i; |
127 | 127 | ||
128 | for (i = 0; i < MAX_EIU; i++) { | 128 | for (i = 0; i < exin_avail; i++) { |
129 | if (d->hwirq == ltq_eiu_irq[i].start) { | 129 | if (d->hwirq == ltq_eiu_irq[i]) { |
130 | int val = 0; | 130 | int val = 0; |
131 | int edge = 0; | 131 | int edge = 0; |
132 | 132 | ||
@@ -173,8 +173,8 @@ static unsigned int ltq_startup_eiu_irq(struct irq_data *d) | |||
173 | int i; | 173 | int i; |
174 | 174 | ||
175 | ltq_enable_irq(d); | 175 | ltq_enable_irq(d); |
176 | for (i = 0; i < MAX_EIU; i++) { | 176 | for (i = 0; i < exin_avail; i++) { |
177 | if (d->hwirq == ltq_eiu_irq[i].start) { | 177 | if (d->hwirq == ltq_eiu_irq[i]) { |
178 | /* by default we are low level triggered */ | 178 | /* by default we are low level triggered */ |
179 | ltq_eiu_settype(d, IRQF_TRIGGER_LOW); | 179 | ltq_eiu_settype(d, IRQF_TRIGGER_LOW); |
180 | /* clear all pending */ | 180 | /* clear all pending */ |
@@ -195,8 +195,8 @@ static void ltq_shutdown_eiu_irq(struct irq_data *d) | |||
195 | int i; | 195 | int i; |
196 | 196 | ||
197 | ltq_disable_irq(d); | 197 | ltq_disable_irq(d); |
198 | for (i = 0; i < MAX_EIU; i++) { | 198 | for (i = 0; i < exin_avail; i++) { |
199 | if (d->hwirq == ltq_eiu_irq[i].start) { | 199 | if (d->hwirq == ltq_eiu_irq[i]) { |
200 | /* disable */ | 200 | /* disable */ |
201 | ltq_eiu_w32(ltq_eiu_r32(LTQ_EIU_EXIN_INEN) & ~BIT(i), | 201 | ltq_eiu_w32(ltq_eiu_r32(LTQ_EIU_EXIN_INEN) & ~BIT(i), |
202 | LTQ_EIU_EXIN_INEN); | 202 | LTQ_EIU_EXIN_INEN); |
@@ -206,7 +206,7 @@ static void ltq_shutdown_eiu_irq(struct irq_data *d) | |||
206 | } | 206 | } |
207 | 207 | ||
208 | static struct irq_chip ltq_irq_type = { | 208 | static struct irq_chip ltq_irq_type = { |
209 | "icu", | 209 | .name = "icu", |
210 | .irq_enable = ltq_enable_irq, | 210 | .irq_enable = ltq_enable_irq, |
211 | .irq_disable = ltq_disable_irq, | 211 | .irq_disable = ltq_disable_irq, |
212 | .irq_unmask = ltq_enable_irq, | 212 | .irq_unmask = ltq_enable_irq, |
@@ -216,7 +216,7 @@ static struct irq_chip ltq_irq_type = { | |||
216 | }; | 216 | }; |
217 | 217 | ||
218 | static struct irq_chip ltq_eiu_type = { | 218 | static struct irq_chip ltq_eiu_type = { |
219 | "eiu", | 219 | .name = "eiu", |
220 | .irq_startup = ltq_startup_eiu_irq, | 220 | .irq_startup = ltq_startup_eiu_irq, |
221 | .irq_shutdown = ltq_shutdown_eiu_irq, | 221 | .irq_shutdown = ltq_shutdown_eiu_irq, |
222 | .irq_enable = ltq_enable_irq, | 222 | .irq_enable = ltq_enable_irq, |
@@ -341,10 +341,10 @@ static int icu_map(struct irq_domain *d, unsigned int irq, irq_hw_number_t hw) | |||
341 | return 0; | 341 | return 0; |
342 | 342 | ||
343 | for (i = 0; i < exin_avail; i++) | 343 | for (i = 0; i < exin_avail; i++) |
344 | if (hw == ltq_eiu_irq[i].start) | 344 | if (hw == ltq_eiu_irq[i]) |
345 | chip = <q_eiu_type; | 345 | chip = <q_eiu_type; |
346 | 346 | ||
347 | irq_set_chip_and_handler(hw, chip, handle_level_irq); | 347 | irq_set_chip_and_handler(irq, chip, handle_level_irq); |
348 | 348 | ||
349 | return 0; | 349 | return 0; |
350 | } | 350 | } |
@@ -439,14 +439,15 @@ int __init icu_of_init(struct device_node *node, struct device_node *parent) | |||
439 | eiu_node = of_find_compatible_node(NULL, NULL, "lantiq,eiu-xway"); | 439 | eiu_node = of_find_compatible_node(NULL, NULL, "lantiq,eiu-xway"); |
440 | if (eiu_node && !of_address_to_resource(eiu_node, 0, &res)) { | 440 | if (eiu_node && !of_address_to_resource(eiu_node, 0, &res)) { |
441 | /* find out how many external irq sources we have */ | 441 | /* find out how many external irq sources we have */ |
442 | exin_avail = of_irq_count(eiu_node); | 442 | exin_avail = of_property_count_u32_elems(eiu_node, |
443 | "lantiq,eiu-irqs"); | ||
443 | 444 | ||
444 | if (exin_avail > MAX_EIU) | 445 | if (exin_avail > MAX_EIU) |
445 | exin_avail = MAX_EIU; | 446 | exin_avail = MAX_EIU; |
446 | 447 | ||
447 | ret = of_irq_to_resource_table(eiu_node, | 448 | ret = of_property_read_u32_array(eiu_node, "lantiq,eiu-irqs", |
448 | ltq_eiu_irq, exin_avail); | 449 | ltq_eiu_irq, exin_avail); |
449 | if (ret != exin_avail) | 450 | if (ret) |
450 | panic("failed to load external irq resources"); | 451 | panic("failed to load external irq resources"); |
451 | 452 | ||
452 | if (!request_mem_region(res.start, resource_size(&res), | 453 | if (!request_mem_region(res.start, resource_size(&res), |
diff --git a/arch/mips/lantiq/prom.c b/arch/mips/lantiq/prom.c index 5f693ac77a0d..4cbb000e778e 100644 --- a/arch/mips/lantiq/prom.c +++ b/arch/mips/lantiq/prom.c | |||
@@ -74,8 +74,8 @@ void __init plat_mem_setup(void) | |||
74 | 74 | ||
75 | set_io_port_base((unsigned long) KSEG1); | 75 | set_io_port_base((unsigned long) KSEG1); |
76 | 76 | ||
77 | if (fw_arg0 == -2) /* UHI interface */ | 77 | if (fw_passed_dtb) /* UHI interface */ |
78 | dtb = (void *)fw_arg1; | 78 | dtb = (void *)fw_passed_dtb; |
79 | else if (__dtb_start != __dtb_end) | 79 | else if (__dtb_start != __dtb_end) |
80 | dtb = (void *)__dtb_start; | 80 | dtb = (void *)__dtb_start; |
81 | else | 81 | else |
diff --git a/arch/mips/loongson64/loongson-3/hpet.c b/arch/mips/loongson64/loongson-3/hpet.c index 249039af66c4..4788bea62a6a 100644 --- a/arch/mips/loongson64/loongson-3/hpet.c +++ b/arch/mips/loongson64/loongson-3/hpet.c | |||
@@ -13,8 +13,8 @@ | |||
13 | #define SMBUS_PCI_REG64 0x64 | 13 | #define SMBUS_PCI_REG64 0x64 |
14 | #define SMBUS_PCI_REGB4 0xb4 | 14 | #define SMBUS_PCI_REGB4 0xb4 |
15 | 15 | ||
16 | #define HPET_MIN_CYCLES 64 | 16 | #define HPET_MIN_CYCLES 16 |
17 | #define HPET_MIN_PROG_DELTA (HPET_MIN_CYCLES + (HPET_MIN_CYCLES >> 1)) | 17 | #define HPET_MIN_PROG_DELTA (HPET_MIN_CYCLES * 12) |
18 | 18 | ||
19 | static DEFINE_SPINLOCK(hpet_lock); | 19 | static DEFINE_SPINLOCK(hpet_lock); |
20 | DEFINE_PER_CPU(struct clock_event_device, hpet_clockevent_device); | 20 | DEFINE_PER_CPU(struct clock_event_device, hpet_clockevent_device); |
@@ -157,14 +157,14 @@ static int hpet_tick_resume(struct clock_event_device *evt) | |||
157 | static int hpet_next_event(unsigned long delta, | 157 | static int hpet_next_event(unsigned long delta, |
158 | struct clock_event_device *evt) | 158 | struct clock_event_device *evt) |
159 | { | 159 | { |
160 | unsigned int cnt; | 160 | u32 cnt; |
161 | int res; | 161 | s32 res; |
162 | 162 | ||
163 | cnt = hpet_read(HPET_COUNTER); | 163 | cnt = hpet_read(HPET_COUNTER); |
164 | cnt += delta; | 164 | cnt += (u32) delta; |
165 | hpet_write(HPET_T0_CMP, cnt); | 165 | hpet_write(HPET_T0_CMP, cnt); |
166 | 166 | ||
167 | res = (int)(cnt - hpet_read(HPET_COUNTER)); | 167 | res = (s32)(cnt - hpet_read(HPET_COUNTER)); |
168 | 168 | ||
169 | return res < HPET_MIN_CYCLES ? -ETIME : 0; | 169 | return res < HPET_MIN_CYCLES ? -ETIME : 0; |
170 | } | 170 | } |
@@ -230,7 +230,7 @@ void __init setup_hpet_timer(void) | |||
230 | 230 | ||
231 | cd = &per_cpu(hpet_clockevent_device, cpu); | 231 | cd = &per_cpu(hpet_clockevent_device, cpu); |
232 | cd->name = "hpet"; | 232 | cd->name = "hpet"; |
233 | cd->rating = 320; | 233 | cd->rating = 100; |
234 | cd->features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT; | 234 | cd->features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT; |
235 | cd->set_state_shutdown = hpet_set_state_shutdown; | 235 | cd->set_state_shutdown = hpet_set_state_shutdown; |
236 | cd->set_state_periodic = hpet_set_state_periodic; | 236 | cd->set_state_periodic = hpet_set_state_periodic; |
diff --git a/arch/mips/loongson64/loongson-3/smp.c b/arch/mips/loongson64/loongson-3/smp.c index e59759af63d9..2fec6f753a35 100644 --- a/arch/mips/loongson64/loongson-3/smp.c +++ b/arch/mips/loongson64/loongson-3/smp.c | |||
@@ -417,6 +417,7 @@ static int loongson3_cpu_disable(void) | |||
417 | return -EBUSY; | 417 | return -EBUSY; |
418 | 418 | ||
419 | set_cpu_online(cpu, false); | 419 | set_cpu_online(cpu, false); |
420 | calculate_cpu_foreign_map(); | ||
420 | cpumask_clear_cpu(cpu, &cpu_callin_map); | 421 | cpumask_clear_cpu(cpu, &cpu_callin_map); |
421 | local_irq_save(flags); | 422 | local_irq_save(flags); |
422 | fixup_irqs(); | 423 | fixup_irqs(); |
diff --git a/arch/mips/math-emu/cp1emu.c b/arch/mips/math-emu/cp1emu.c index 92d15e68abb6..36775d20b0e7 100644 --- a/arch/mips/math-emu/cp1emu.c +++ b/arch/mips/math-emu/cp1emu.c | |||
@@ -434,8 +434,8 @@ static int microMIPS32_to_MIPS32(union mips_instruction *insn_ptr) | |||
434 | * a single subroutine should be used across both | 434 | * a single subroutine should be used across both |
435 | * modules. | 435 | * modules. |
436 | */ | 436 | */ |
437 | static int isBranchInstr(struct pt_regs *regs, struct mm_decoded_insn dec_insn, | 437 | int isBranchInstr(struct pt_regs *regs, struct mm_decoded_insn dec_insn, |
438 | unsigned long *contpc) | 438 | unsigned long *contpc) |
439 | { | 439 | { |
440 | union mips_instruction insn = (union mips_instruction)dec_insn.insn; | 440 | union mips_instruction insn = (union mips_instruction)dec_insn.insn; |
441 | unsigned int fcr31; | 441 | unsigned int fcr31; |
@@ -1268,7 +1268,7 @@ branch_common: | |||
1268 | * instruction in the dslot. | 1268 | * instruction in the dslot. |
1269 | */ | 1269 | */ |
1270 | sig = mips_dsemul(xcp, ir, | 1270 | sig = mips_dsemul(xcp, ir, |
1271 | contpc); | 1271 | bcpc, contpc); |
1272 | if (sig < 0) | 1272 | if (sig < 0) |
1273 | break; | 1273 | break; |
1274 | if (sig) | 1274 | if (sig) |
@@ -1323,7 +1323,7 @@ branch_common: | |||
1323 | * Single step the non-cp1 | 1323 | * Single step the non-cp1 |
1324 | * instruction in the dslot | 1324 | * instruction in the dslot |
1325 | */ | 1325 | */ |
1326 | sig = mips_dsemul(xcp, ir, contpc); | 1326 | sig = mips_dsemul(xcp, ir, bcpc, contpc); |
1327 | if (sig < 0) | 1327 | if (sig < 0) |
1328 | break; | 1328 | break; |
1329 | if (sig) | 1329 | if (sig) |
diff --git a/arch/mips/math-emu/dsemul.c b/arch/mips/math-emu/dsemul.c index 47074887e64c..72a4642eee2c 100644 --- a/arch/mips/math-emu/dsemul.c +++ b/arch/mips/math-emu/dsemul.c | |||
@@ -1,3 +1,6 @@ | |||
1 | #include <linux/err.h> | ||
2 | #include <linux/slab.h> | ||
3 | |||
1 | #include <asm/branch.h> | 4 | #include <asm/branch.h> |
2 | #include <asm/cacheflush.h> | 5 | #include <asm/cacheflush.h> |
3 | #include <asm/fpu_emulator.h> | 6 | #include <asm/fpu_emulator.h> |
@@ -5,43 +8,211 @@ | |||
5 | #include <asm/mipsregs.h> | 8 | #include <asm/mipsregs.h> |
6 | #include <asm/uaccess.h> | 9 | #include <asm/uaccess.h> |
7 | 10 | ||
8 | #include "ieee754.h" | 11 | /** |
9 | 12 | * struct emuframe - The 'emulation' frame structure | |
10 | /* | 13 | * @emul: The instruction to 'emulate'. |
11 | * Emulate the arbitrary instruction ir at xcp->cp0_epc. Required when | 14 | * @badinst: A break instruction to cause a return to the kernel. |
12 | * we have to emulate the instruction in a COP1 branch delay slot. Do | ||
13 | * not change cp0_epc due to the instruction | ||
14 | * | 15 | * |
15 | * According to the spec: | 16 | * This structure defines the frames placed within the delay slot emulation |
16 | * 1) it shouldn't be a branch :-) | 17 | * page in response to a call to mips_dsemul(). Each thread may be allocated |
17 | * 2) it can be a COP instruction :-( | 18 | * only one frame at any given time. The kernel stores within it the |
18 | * 3) if we are tring to run a protected memory space we must take | 19 | * instruction to be 'emulated' followed by a break instruction, then |
19 | * special care on memory access instructions :-( | 20 | * executes the frame in user mode. The break causes a trap to the kernel |
20 | */ | 21 | * which leads to do_dsemulret() being called unless the instruction in |
21 | 22 | * @emul causes a trap itself, is a branch, or a signal is delivered to | |
22 | /* | 23 | * the thread. In these cases the allocated frame will either be reused by |
23 | * "Trampoline" return routine to catch exception following | 24 | * a subsequent delay slot 'emulation', or be freed during signal delivery or |
24 | * execution of delay-slot instruction execution. | 25 | * upon thread exit. |
26 | * | ||
27 | * This approach is used because: | ||
28 | * | ||
29 | * - Actually emulating all instructions isn't feasible. We would need to | ||
30 | * be able to handle instructions from all revisions of the MIPS ISA, | ||
31 | * all ASEs & all vendor instruction set extensions. This would be a | ||
32 | * whole lot of work & continual maintenance burden as new instructions | ||
33 | * are introduced, and in the case of some vendor extensions may not | ||
34 | * even be possible. Thus we need to take the approach of actually | ||
35 | * executing the instruction. | ||
36 | * | ||
37 | * - We must execute the instruction within user context. If we were to | ||
38 | * execute the instruction in kernel mode then it would have access to | ||
39 | * kernel resources without very careful checks, leaving us with a | ||
40 | * high potential for security or stability issues to arise. | ||
41 | * | ||
42 | * - We used to place the frame on the users stack, but this requires | ||
43 | * that the stack be executable. This is bad for security so the | ||
44 | * per-process page is now used instead. | ||
45 | * | ||
46 | * - The instruction in @emul may be something entirely invalid for a | ||
47 | * delay slot. The user may (intentionally or otherwise) place a branch | ||
48 | * in a delay slot, or a kernel mode instruction, or something else | ||
49 | * which generates an exception. Thus we can't rely upon the break in | ||
50 | * @badinst always being hit. For this reason we track the index of the | ||
51 | * frame allocated to each thread, allowing us to clean it up at later | ||
52 | * points such as signal delivery or thread exit. | ||
53 | * | ||
54 | * - The user may generate a fake struct emuframe if they wish, invoking | ||
55 | * the BRK_MEMU break instruction themselves. We must therefore not | ||
56 | * trust that BRK_MEMU means there's actually a valid frame allocated | ||
57 | * to the thread, and must not allow the user to do anything they | ||
58 | * couldn't already. | ||
25 | */ | 59 | */ |
26 | |||
27 | struct emuframe { | 60 | struct emuframe { |
28 | mips_instruction emul; | 61 | mips_instruction emul; |
29 | mips_instruction badinst; | 62 | mips_instruction badinst; |
30 | mips_instruction cookie; | ||
31 | unsigned long epc; | ||
32 | }; | 63 | }; |
33 | 64 | ||
34 | /* | 65 | static const int emupage_frame_count = PAGE_SIZE / sizeof(struct emuframe); |
35 | * Set up an emulation frame for instruction IR, from a delay slot of | 66 | |
36 | * a branch jumping to CPC. Return 0 if successful, -1 if no emulation | 67 | static inline __user struct emuframe *dsemul_page(void) |
37 | * required, otherwise a signal number causing a frame setup failure. | 68 | { |
38 | */ | 69 | return (__user struct emuframe *)STACK_TOP; |
39 | int mips_dsemul(struct pt_regs *regs, mips_instruction ir, unsigned long cpc) | 70 | } |
71 | |||
72 | static int alloc_emuframe(void) | ||
73 | { | ||
74 | mm_context_t *mm_ctx = ¤t->mm->context; | ||
75 | int idx; | ||
76 | |||
77 | retry: | ||
78 | spin_lock(&mm_ctx->bd_emupage_lock); | ||
79 | |||
80 | /* Ensure we have an allocation bitmap */ | ||
81 | if (!mm_ctx->bd_emupage_allocmap) { | ||
82 | mm_ctx->bd_emupage_allocmap = | ||
83 | kcalloc(BITS_TO_LONGS(emupage_frame_count), | ||
84 | sizeof(unsigned long), | ||
85 | GFP_ATOMIC); | ||
86 | |||
87 | if (!mm_ctx->bd_emupage_allocmap) { | ||
88 | idx = BD_EMUFRAME_NONE; | ||
89 | goto out_unlock; | ||
90 | } | ||
91 | } | ||
92 | |||
93 | /* Attempt to allocate a single bit/frame */ | ||
94 | idx = bitmap_find_free_region(mm_ctx->bd_emupage_allocmap, | ||
95 | emupage_frame_count, 0); | ||
96 | if (idx < 0) { | ||
97 | /* | ||
98 | * Failed to allocate a frame. We'll wait until one becomes | ||
99 | * available. We unlock the page so that other threads actually | ||
100 | * get the opportunity to free their frames, which means | ||
101 | * technically the result of bitmap_full may be incorrect. | ||
102 | * However the worst case is that we repeat all this and end up | ||
103 | * back here again. | ||
104 | */ | ||
105 | spin_unlock(&mm_ctx->bd_emupage_lock); | ||
106 | if (!wait_event_killable(mm_ctx->bd_emupage_queue, | ||
107 | !bitmap_full(mm_ctx->bd_emupage_allocmap, | ||
108 | emupage_frame_count))) | ||
109 | goto retry; | ||
110 | |||
111 | /* Received a fatal signal - just give in */ | ||
112 | return BD_EMUFRAME_NONE; | ||
113 | } | ||
114 | |||
115 | /* Success! */ | ||
116 | pr_debug("allocate emuframe %d to %d\n", idx, current->pid); | ||
117 | out_unlock: | ||
118 | spin_unlock(&mm_ctx->bd_emupage_lock); | ||
119 | return idx; | ||
120 | } | ||
121 | |||
122 | static void free_emuframe(int idx, struct mm_struct *mm) | ||
123 | { | ||
124 | mm_context_t *mm_ctx = &mm->context; | ||
125 | |||
126 | spin_lock(&mm_ctx->bd_emupage_lock); | ||
127 | |||
128 | pr_debug("free emuframe %d from %d\n", idx, current->pid); | ||
129 | bitmap_clear(mm_ctx->bd_emupage_allocmap, idx, 1); | ||
130 | |||
131 | /* If some thread is waiting for a frame, now's its chance */ | ||
132 | wake_up(&mm_ctx->bd_emupage_queue); | ||
133 | |||
134 | spin_unlock(&mm_ctx->bd_emupage_lock); | ||
135 | } | ||
136 | |||
137 | static bool within_emuframe(struct pt_regs *regs) | ||
138 | { | ||
139 | unsigned long base = (unsigned long)dsemul_page(); | ||
140 | |||
141 | if (regs->cp0_epc < base) | ||
142 | return false; | ||
143 | if (regs->cp0_epc >= (base + PAGE_SIZE)) | ||
144 | return false; | ||
145 | |||
146 | return true; | ||
147 | } | ||
148 | |||
149 | bool dsemul_thread_cleanup(struct task_struct *tsk) | ||
150 | { | ||
151 | int fr_idx; | ||
152 | |||
153 | /* Clear any allocated frame, retrieving its index */ | ||
154 | fr_idx = atomic_xchg(&tsk->thread.bd_emu_frame, BD_EMUFRAME_NONE); | ||
155 | |||
156 | /* If no frame was allocated, we're done */ | ||
157 | if (fr_idx == BD_EMUFRAME_NONE) | ||
158 | return false; | ||
159 | |||
160 | task_lock(tsk); | ||
161 | |||
162 | /* Free the frame that this thread had allocated */ | ||
163 | if (tsk->mm) | ||
164 | free_emuframe(fr_idx, tsk->mm); | ||
165 | |||
166 | task_unlock(tsk); | ||
167 | return true; | ||
168 | } | ||
169 | |||
170 | bool dsemul_thread_rollback(struct pt_regs *regs) | ||
171 | { | ||
172 | struct emuframe __user *fr; | ||
173 | int fr_idx; | ||
174 | |||
175 | /* Do nothing if we're not executing from a frame */ | ||
176 | if (!within_emuframe(regs)) | ||
177 | return false; | ||
178 | |||
179 | /* Find the frame being executed */ | ||
180 | fr_idx = atomic_read(¤t->thread.bd_emu_frame); | ||
181 | if (fr_idx == BD_EMUFRAME_NONE) | ||
182 | return false; | ||
183 | fr = &dsemul_page()[fr_idx]; | ||
184 | |||
185 | /* | ||
186 | * If the PC is at the emul instruction, roll back to the branch. If | ||
187 | * PC is at the badinst (break) instruction, we've already emulated the | ||
188 | * instruction so progress to the continue PC. If it's anything else | ||
189 | * then something is amiss & the user has branched into some other area | ||
190 | * of the emupage - we'll free the allocated frame anyway. | ||
191 | */ | ||
192 | if (msk_isa16_mode(regs->cp0_epc) == (unsigned long)&fr->emul) | ||
193 | regs->cp0_epc = current->thread.bd_emu_branch_pc; | ||
194 | else if (msk_isa16_mode(regs->cp0_epc) == (unsigned long)&fr->badinst) | ||
195 | regs->cp0_epc = current->thread.bd_emu_cont_pc; | ||
196 | |||
197 | atomic_set(¤t->thread.bd_emu_frame, BD_EMUFRAME_NONE); | ||
198 | free_emuframe(fr_idx, current->mm); | ||
199 | return true; | ||
200 | } | ||
201 | |||
202 | void dsemul_mm_cleanup(struct mm_struct *mm) | ||
203 | { | ||
204 | mm_context_t *mm_ctx = &mm->context; | ||
205 | |||
206 | kfree(mm_ctx->bd_emupage_allocmap); | ||
207 | } | ||
208 | |||
209 | int mips_dsemul(struct pt_regs *regs, mips_instruction ir, | ||
210 | unsigned long branch_pc, unsigned long cont_pc) | ||
40 | { | 211 | { |
41 | int isa16 = get_isa16_mode(regs->cp0_epc); | 212 | int isa16 = get_isa16_mode(regs->cp0_epc); |
42 | mips_instruction break_math; | 213 | mips_instruction break_math; |
43 | struct emuframe __user *fr; | 214 | struct emuframe __user *fr; |
44 | int err; | 215 | int err, fr_idx; |
45 | 216 | ||
46 | /* NOP is easy */ | 217 | /* NOP is easy */ |
47 | if (ir == 0) | 218 | if (ir == 0) |
@@ -68,30 +239,20 @@ int mips_dsemul(struct pt_regs *regs, mips_instruction ir, unsigned long cpc) | |||
68 | } | 239 | } |
69 | } | 240 | } |
70 | 241 | ||
71 | pr_debug("dsemul %lx %lx\n", regs->cp0_epc, cpc); | 242 | pr_debug("dsemul 0x%08lx cont at 0x%08lx\n", regs->cp0_epc, cont_pc); |
72 | 243 | ||
73 | /* | 244 | /* Allocate a frame if we don't already have one */ |
74 | * The strategy is to push the instruction onto the user stack | 245 | fr_idx = atomic_read(¤t->thread.bd_emu_frame); |
75 | * and put a trap after it which we can catch and jump to | 246 | if (fr_idx == BD_EMUFRAME_NONE) |
76 | * the required address any alternative apart from full | 247 | fr_idx = alloc_emuframe(); |
77 | * instruction emulation!!. | 248 | if (fr_idx == BD_EMUFRAME_NONE) |
78 | * | ||
79 | * Algorithmics used a system call instruction, and | ||
80 | * borrowed that vector. MIPS/Linux version is a bit | ||
81 | * more heavyweight in the interests of portability and | ||
82 | * multiprocessor support. For Linux we use a BREAK 514 | ||
83 | * instruction causing a breakpoint exception. | ||
84 | */ | ||
85 | break_math = BREAK_MATH(isa16); | ||
86 | |||
87 | /* Ensure that the two instructions are in the same cache line */ | ||
88 | fr = (struct emuframe __user *) | ||
89 | ((regs->regs[29] - sizeof(struct emuframe)) & ~0x7); | ||
90 | |||
91 | /* Verify that the stack pointer is not completely insane */ | ||
92 | if (unlikely(!access_ok(VERIFY_WRITE, fr, sizeof(struct emuframe)))) | ||
93 | return SIGBUS; | 249 | return SIGBUS; |
250 | fr = &dsemul_page()[fr_idx]; | ||
251 | |||
252 | /* Retrieve the appropriately encoded break instruction */ | ||
253 | break_math = BREAK_MATH(isa16); | ||
94 | 254 | ||
255 | /* Write the instructions to the frame */ | ||
95 | if (isa16) { | 256 | if (isa16) { |
96 | err = __put_user(ir >> 16, | 257 | err = __put_user(ir >> 16, |
97 | (u16 __user *)(&fr->emul)); | 258 | (u16 __user *)(&fr->emul)); |
@@ -106,84 +267,36 @@ int mips_dsemul(struct pt_regs *regs, mips_instruction ir, unsigned long cpc) | |||
106 | err |= __put_user(break_math, &fr->badinst); | 267 | err |= __put_user(break_math, &fr->badinst); |
107 | } | 268 | } |
108 | 269 | ||
109 | err |= __put_user((mips_instruction)BD_COOKIE, &fr->cookie); | ||
110 | err |= __put_user(cpc, &fr->epc); | ||
111 | |||
112 | if (unlikely(err)) { | 270 | if (unlikely(err)) { |
113 | MIPS_FPU_EMU_INC_STATS(errors); | 271 | MIPS_FPU_EMU_INC_STATS(errors); |
272 | free_emuframe(fr_idx, current->mm); | ||
114 | return SIGBUS; | 273 | return SIGBUS; |
115 | } | 274 | } |
116 | 275 | ||
276 | /* Record the PC of the branch, PC to continue from & frame index */ | ||
277 | current->thread.bd_emu_branch_pc = branch_pc; | ||
278 | current->thread.bd_emu_cont_pc = cont_pc; | ||
279 | atomic_set(¤t->thread.bd_emu_frame, fr_idx); | ||
280 | |||
281 | /* Change user register context to execute the frame */ | ||
117 | regs->cp0_epc = (unsigned long)&fr->emul | isa16; | 282 | regs->cp0_epc = (unsigned long)&fr->emul | isa16; |
118 | 283 | ||
284 | /* Ensure the icache observes our newly written frame */ | ||
119 | flush_cache_sigtramp((unsigned long)&fr->emul); | 285 | flush_cache_sigtramp((unsigned long)&fr->emul); |
120 | 286 | ||
121 | return 0; | 287 | return 0; |
122 | } | 288 | } |
123 | 289 | ||
124 | int do_dsemulret(struct pt_regs *xcp) | 290 | bool do_dsemulret(struct pt_regs *xcp) |
125 | { | 291 | { |
126 | int isa16 = get_isa16_mode(xcp->cp0_epc); | 292 | /* Cleanup the allocated frame, returning if there wasn't one */ |
127 | struct emuframe __user *fr; | 293 | if (!dsemul_thread_cleanup(current)) { |
128 | unsigned long epc; | ||
129 | u32 insn, cookie; | ||
130 | int err = 0; | ||
131 | u16 instr[2]; | ||
132 | |||
133 | fr = (struct emuframe __user *) | ||
134 | (msk_isa16_mode(xcp->cp0_epc) - sizeof(mips_instruction)); | ||
135 | |||
136 | /* | ||
137 | * If we can't even access the area, something is very wrong, but we'll | ||
138 | * leave that to the default handling | ||
139 | */ | ||
140 | if (!access_ok(VERIFY_READ, fr, sizeof(struct emuframe))) | ||
141 | return 0; | ||
142 | |||
143 | /* | ||
144 | * Do some sanity checking on the stackframe: | ||
145 | * | ||
146 | * - Is the instruction pointed to by the EPC an BREAK_MATH? | ||
147 | * - Is the following memory word the BD_COOKIE? | ||
148 | */ | ||
149 | if (isa16) { | ||
150 | err = __get_user(instr[0], | ||
151 | (u16 __user *)(&fr->badinst)); | ||
152 | err |= __get_user(instr[1], | ||
153 | (u16 __user *)((long)(&fr->badinst) + 2)); | ||
154 | insn = (instr[0] << 16) | instr[1]; | ||
155 | } else { | ||
156 | err = __get_user(insn, &fr->badinst); | ||
157 | } | ||
158 | err |= __get_user(cookie, &fr->cookie); | ||
159 | |||
160 | if (unlikely(err || | ||
161 | insn != BREAK_MATH(isa16) || cookie != BD_COOKIE)) { | ||
162 | MIPS_FPU_EMU_INC_STATS(errors); | 294 | MIPS_FPU_EMU_INC_STATS(errors); |
163 | return 0; | 295 | return false; |
164 | } | ||
165 | |||
166 | /* | ||
167 | * At this point, we are satisfied that it's a BD emulation trap. Yes, | ||
168 | * a user might have deliberately put two malformed and useless | ||
169 | * instructions in a row in his program, in which case he's in for a | ||
170 | * nasty surprise - the next instruction will be treated as a | ||
171 | * continuation address! Alas, this seems to be the only way that we | ||
172 | * can handle signals, recursion, and longjmps() in the context of | ||
173 | * emulating the branch delay instruction. | ||
174 | */ | ||
175 | |||
176 | pr_debug("dsemulret\n"); | ||
177 | |||
178 | if (__get_user(epc, &fr->epc)) { /* Saved EPC */ | ||
179 | /* This is not a good situation to be in */ | ||
180 | force_sig(SIGBUS, current); | ||
181 | |||
182 | return 0; | ||
183 | } | 296 | } |
184 | 297 | ||
185 | /* Set EPC to return to post-branch instruction */ | 298 | /* Set EPC to return to post-branch instruction */ |
186 | xcp->cp0_epc = epc; | 299 | xcp->cp0_epc = current->thread.bd_emu_cont_pc; |
187 | MIPS_FPU_EMU_INC_STATS(ds_emul); | 300 | pr_debug("dsemulret to 0x%08lx\n", xcp->cp0_epc); |
188 | return 1; | 301 | return true; |
189 | } | 302 | } |
diff --git a/arch/mips/mm/c-r4k.c b/arch/mips/mm/c-r4k.c index 7a9c345e87e5..cd72805b64a7 100644 --- a/arch/mips/mm/c-r4k.c +++ b/arch/mips/mm/c-r4k.c | |||
@@ -40,6 +40,51 @@ | |||
40 | #include <asm/mips-cm.h> | 40 | #include <asm/mips-cm.h> |
41 | 41 | ||
42 | /* | 42 | /* |
43 | * Bits describing what cache ops an SMP callback function may perform. | ||
44 | * | ||
45 | * R4K_HIT - Virtual user or kernel address based cache operations. The | ||
46 | * active_mm must be checked before using user addresses, falling | ||
47 | * back to kmap. | ||
48 | * R4K_INDEX - Index based cache operations. | ||
49 | */ | ||
50 | |||
51 | #define R4K_HIT BIT(0) | ||
52 | #define R4K_INDEX BIT(1) | ||
53 | |||
54 | /** | ||
55 | * r4k_op_needs_ipi() - Decide if a cache op needs to be done on every core. | ||
56 | * @type: Type of cache operations (R4K_HIT or R4K_INDEX). | ||
57 | * | ||
58 | * Decides whether a cache op needs to be performed on every core in the system. | ||
59 | * This may change depending on the @type of cache operation, as well as the set | ||
60 | * of online CPUs, so preemption should be disabled by the caller to prevent CPU | ||
61 | * hotplug from changing the result. | ||
62 | * | ||
63 | * Returns: 1 if the cache operation @type should be done on every core in | ||
64 | * the system. | ||
65 | * 0 if the cache operation @type is globalized and only needs to | ||
66 | * be performed on a simple CPU. | ||
67 | */ | ||
68 | static inline bool r4k_op_needs_ipi(unsigned int type) | ||
69 | { | ||
70 | /* The MIPS Coherence Manager (CM) globalizes address-based cache ops */ | ||
71 | if (type == R4K_HIT && mips_cm_present()) | ||
72 | return false; | ||
73 | |||
74 | /* | ||
75 | * Hardware doesn't globalize the required cache ops, so SMP calls may | ||
76 | * be needed, but only if there are foreign CPUs (non-siblings with | ||
77 | * separate caches). | ||
78 | */ | ||
79 | /* cpu_foreign_map[] undeclared when !CONFIG_SMP */ | ||
80 | #ifdef CONFIG_SMP | ||
81 | return !cpumask_empty(&cpu_foreign_map[0]); | ||
82 | #else | ||
83 | return false; | ||
84 | #endif | ||
85 | } | ||
86 | |||
87 | /* | ||
43 | * Special Variant of smp_call_function for use by cache functions: | 88 | * Special Variant of smp_call_function for use by cache functions: |
44 | * | 89 | * |
45 | * o No return value | 90 | * o No return value |
@@ -48,30 +93,17 @@ | |||
48 | * primary cache. | 93 | * primary cache. |
49 | * o doesn't disable interrupts on the local CPU | 94 | * o doesn't disable interrupts on the local CPU |
50 | */ | 95 | */ |
51 | static inline void r4k_on_each_cpu(void (*func) (void *info), void *info) | 96 | static inline void r4k_on_each_cpu(unsigned int type, |
97 | void (*func)(void *info), void *info) | ||
52 | { | 98 | { |
53 | preempt_disable(); | 99 | preempt_disable(); |
54 | 100 | if (r4k_op_needs_ipi(type)) | |
55 | /* | 101 | smp_call_function_many(&cpu_foreign_map[smp_processor_id()], |
56 | * The Coherent Manager propagates address-based cache ops to other | 102 | func, info, 1); |
57 | * cores but not index-based ops. However, r4k_on_each_cpu is used | ||
58 | * in both cases so there is no easy way to tell what kind of op is | ||
59 | * executed to the other cores. The best we can probably do is | ||
60 | * to restrict that call when a CM is not present because both | ||
61 | * CM-based SMP protocols (CMP & CPS) restrict index-based cache ops. | ||
62 | */ | ||
63 | if (!mips_cm_present()) | ||
64 | smp_call_function_many(&cpu_foreign_map, func, info, 1); | ||
65 | func(info); | 103 | func(info); |
66 | preempt_enable(); | 104 | preempt_enable(); |
67 | } | 105 | } |
68 | 106 | ||
69 | #if defined(CONFIG_MIPS_CMP) || defined(CONFIG_MIPS_CPS) | ||
70 | #define cpu_has_safe_index_cacheops 0 | ||
71 | #else | ||
72 | #define cpu_has_safe_index_cacheops 1 | ||
73 | #endif | ||
74 | |||
75 | /* | 107 | /* |
76 | * Must die. | 108 | * Must die. |
77 | */ | 109 | */ |
@@ -462,22 +494,44 @@ static inline void local_r4k___flush_cache_all(void * args) | |||
462 | 494 | ||
463 | static void r4k___flush_cache_all(void) | 495 | static void r4k___flush_cache_all(void) |
464 | { | 496 | { |
465 | r4k_on_each_cpu(local_r4k___flush_cache_all, NULL); | 497 | r4k_on_each_cpu(R4K_INDEX, local_r4k___flush_cache_all, NULL); |
466 | } | 498 | } |
467 | 499 | ||
468 | static inline int has_valid_asid(const struct mm_struct *mm) | 500 | /** |
501 | * has_valid_asid() - Determine if an mm already has an ASID. | ||
502 | * @mm: Memory map. | ||
503 | * @type: R4K_HIT or R4K_INDEX, type of cache op. | ||
504 | * | ||
505 | * Determines whether @mm already has an ASID on any of the CPUs which cache ops | ||
506 | * of type @type within an r4k_on_each_cpu() call will affect. If | ||
507 | * r4k_on_each_cpu() does an SMP call to a single VPE in each core, then the | ||
508 | * scope of the operation is confined to sibling CPUs, otherwise all online CPUs | ||
509 | * will need to be checked. | ||
510 | * | ||
511 | * Must be called in non-preemptive context. | ||
512 | * | ||
513 | * Returns: 1 if the CPUs affected by @type cache ops have an ASID for @mm. | ||
514 | * 0 otherwise. | ||
515 | */ | ||
516 | static inline int has_valid_asid(const struct mm_struct *mm, unsigned int type) | ||
469 | { | 517 | { |
470 | #ifdef CONFIG_MIPS_MT_SMP | 518 | unsigned int i; |
471 | int i; | 519 | const cpumask_t *mask = cpu_present_mask; |
472 | 520 | ||
473 | for_each_online_cpu(i) | 521 | /* cpu_sibling_map[] undeclared when !CONFIG_SMP */ |
522 | #ifdef CONFIG_SMP | ||
523 | /* | ||
524 | * If r4k_on_each_cpu does SMP calls, it does them to a single VPE in | ||
525 | * each foreign core, so we only need to worry about siblings. | ||
526 | * Otherwise we need to worry about all present CPUs. | ||
527 | */ | ||
528 | if (r4k_op_needs_ipi(type)) | ||
529 | mask = &cpu_sibling_map[smp_processor_id()]; | ||
530 | #endif | ||
531 | for_each_cpu(i, mask) | ||
474 | if (cpu_context(i, mm)) | 532 | if (cpu_context(i, mm)) |
475 | return 1; | 533 | return 1; |
476 | |||
477 | return 0; | 534 | return 0; |
478 | #else | ||
479 | return cpu_context(smp_processor_id(), mm); | ||
480 | #endif | ||
481 | } | 535 | } |
482 | 536 | ||
483 | static void r4k__flush_cache_vmap(void) | 537 | static void r4k__flush_cache_vmap(void) |
@@ -490,12 +544,16 @@ static void r4k__flush_cache_vunmap(void) | |||
490 | r4k_blast_dcache(); | 544 | r4k_blast_dcache(); |
491 | } | 545 | } |
492 | 546 | ||
547 | /* | ||
548 | * Note: flush_tlb_range() assumes flush_cache_range() sufficiently flushes | ||
549 | * whole caches when vma is executable. | ||
550 | */ | ||
493 | static inline void local_r4k_flush_cache_range(void * args) | 551 | static inline void local_r4k_flush_cache_range(void * args) |
494 | { | 552 | { |
495 | struct vm_area_struct *vma = args; | 553 | struct vm_area_struct *vma = args; |
496 | int exec = vma->vm_flags & VM_EXEC; | 554 | int exec = vma->vm_flags & VM_EXEC; |
497 | 555 | ||
498 | if (!(has_valid_asid(vma->vm_mm))) | 556 | if (!has_valid_asid(vma->vm_mm, R4K_INDEX)) |
499 | return; | 557 | return; |
500 | 558 | ||
501 | /* | 559 | /* |
@@ -516,14 +574,14 @@ static void r4k_flush_cache_range(struct vm_area_struct *vma, | |||
516 | int exec = vma->vm_flags & VM_EXEC; | 574 | int exec = vma->vm_flags & VM_EXEC; |
517 | 575 | ||
518 | if (cpu_has_dc_aliases || exec) | 576 | if (cpu_has_dc_aliases || exec) |
519 | r4k_on_each_cpu(local_r4k_flush_cache_range, vma); | 577 | r4k_on_each_cpu(R4K_INDEX, local_r4k_flush_cache_range, vma); |
520 | } | 578 | } |
521 | 579 | ||
522 | static inline void local_r4k_flush_cache_mm(void * args) | 580 | static inline void local_r4k_flush_cache_mm(void * args) |
523 | { | 581 | { |
524 | struct mm_struct *mm = args; | 582 | struct mm_struct *mm = args; |
525 | 583 | ||
526 | if (!has_valid_asid(mm)) | 584 | if (!has_valid_asid(mm, R4K_INDEX)) |
527 | return; | 585 | return; |
528 | 586 | ||
529 | /* | 587 | /* |
@@ -548,7 +606,7 @@ static void r4k_flush_cache_mm(struct mm_struct *mm) | |||
548 | if (!cpu_has_dc_aliases) | 606 | if (!cpu_has_dc_aliases) |
549 | return; | 607 | return; |
550 | 608 | ||
551 | r4k_on_each_cpu(local_r4k_flush_cache_mm, mm); | 609 | r4k_on_each_cpu(R4K_INDEX, local_r4k_flush_cache_mm, mm); |
552 | } | 610 | } |
553 | 611 | ||
554 | struct flush_cache_page_args { | 612 | struct flush_cache_page_args { |
@@ -573,10 +631,10 @@ static inline void local_r4k_flush_cache_page(void *args) | |||
573 | void *vaddr; | 631 | void *vaddr; |
574 | 632 | ||
575 | /* | 633 | /* |
576 | * If ownes no valid ASID yet, cannot possibly have gotten | 634 | * If owns no valid ASID yet, cannot possibly have gotten |
577 | * this page into the cache. | 635 | * this page into the cache. |
578 | */ | 636 | */ |
579 | if (!has_valid_asid(mm)) | 637 | if (!has_valid_asid(mm, R4K_HIT)) |
580 | return; | 638 | return; |
581 | 639 | ||
582 | addr &= PAGE_MASK; | 640 | addr &= PAGE_MASK; |
@@ -643,7 +701,7 @@ static void r4k_flush_cache_page(struct vm_area_struct *vma, | |||
643 | args.addr = addr; | 701 | args.addr = addr; |
644 | args.pfn = pfn; | 702 | args.pfn = pfn; |
645 | 703 | ||
646 | r4k_on_each_cpu(local_r4k_flush_cache_page, &args); | 704 | r4k_on_each_cpu(R4K_HIT, local_r4k_flush_cache_page, &args); |
647 | } | 705 | } |
648 | 706 | ||
649 | static inline void local_r4k_flush_data_cache_page(void * addr) | 707 | static inline void local_r4k_flush_data_cache_page(void * addr) |
@@ -656,18 +714,23 @@ static void r4k_flush_data_cache_page(unsigned long addr) | |||
656 | if (in_atomic()) | 714 | if (in_atomic()) |
657 | local_r4k_flush_data_cache_page((void *)addr); | 715 | local_r4k_flush_data_cache_page((void *)addr); |
658 | else | 716 | else |
659 | r4k_on_each_cpu(local_r4k_flush_data_cache_page, (void *) addr); | 717 | r4k_on_each_cpu(R4K_HIT, local_r4k_flush_data_cache_page, |
718 | (void *) addr); | ||
660 | } | 719 | } |
661 | 720 | ||
662 | struct flush_icache_range_args { | 721 | struct flush_icache_range_args { |
663 | unsigned long start; | 722 | unsigned long start; |
664 | unsigned long end; | 723 | unsigned long end; |
724 | unsigned int type; | ||
665 | }; | 725 | }; |
666 | 726 | ||
667 | static inline void local_r4k_flush_icache_range(unsigned long start, unsigned long end) | 727 | static inline void __local_r4k_flush_icache_range(unsigned long start, |
728 | unsigned long end, | ||
729 | unsigned int type) | ||
668 | { | 730 | { |
669 | if (!cpu_has_ic_fills_f_dc) { | 731 | if (!cpu_has_ic_fills_f_dc) { |
670 | if (end - start >= dcache_size) { | 732 | if (type == R4K_INDEX || |
733 | (type & R4K_INDEX && end - start >= dcache_size)) { | ||
671 | r4k_blast_dcache(); | 734 | r4k_blast_dcache(); |
672 | } else { | 735 | } else { |
673 | R4600_HIT_CACHEOP_WAR_IMPL; | 736 | R4600_HIT_CACHEOP_WAR_IMPL; |
@@ -675,7 +738,8 @@ static inline void local_r4k_flush_icache_range(unsigned long start, unsigned lo | |||
675 | } | 738 | } |
676 | } | 739 | } |
677 | 740 | ||
678 | if (end - start > icache_size) | 741 | if (type == R4K_INDEX || |
742 | (type & R4K_INDEX && end - start > icache_size)) | ||
679 | r4k_blast_icache(); | 743 | r4k_blast_icache(); |
680 | else { | 744 | else { |
681 | switch (boot_cpu_type()) { | 745 | switch (boot_cpu_type()) { |
@@ -701,23 +765,52 @@ static inline void local_r4k_flush_icache_range(unsigned long start, unsigned lo | |||
701 | #endif | 765 | #endif |
702 | } | 766 | } |
703 | 767 | ||
768 | static inline void local_r4k_flush_icache_range(unsigned long start, | ||
769 | unsigned long end) | ||
770 | { | ||
771 | __local_r4k_flush_icache_range(start, end, R4K_HIT | R4K_INDEX); | ||
772 | } | ||
773 | |||
704 | static inline void local_r4k_flush_icache_range_ipi(void *args) | 774 | static inline void local_r4k_flush_icache_range_ipi(void *args) |
705 | { | 775 | { |
706 | struct flush_icache_range_args *fir_args = args; | 776 | struct flush_icache_range_args *fir_args = args; |
707 | unsigned long start = fir_args->start; | 777 | unsigned long start = fir_args->start; |
708 | unsigned long end = fir_args->end; | 778 | unsigned long end = fir_args->end; |
779 | unsigned int type = fir_args->type; | ||
709 | 780 | ||
710 | local_r4k_flush_icache_range(start, end); | 781 | __local_r4k_flush_icache_range(start, end, type); |
711 | } | 782 | } |
712 | 783 | ||
713 | static void r4k_flush_icache_range(unsigned long start, unsigned long end) | 784 | static void r4k_flush_icache_range(unsigned long start, unsigned long end) |
714 | { | 785 | { |
715 | struct flush_icache_range_args args; | 786 | struct flush_icache_range_args args; |
787 | unsigned long size, cache_size; | ||
716 | 788 | ||
717 | args.start = start; | 789 | args.start = start; |
718 | args.end = end; | 790 | args.end = end; |
791 | args.type = R4K_HIT | R4K_INDEX; | ||
719 | 792 | ||
720 | r4k_on_each_cpu(local_r4k_flush_icache_range_ipi, &args); | 793 | /* |
794 | * Indexed cache ops require an SMP call. | ||
795 | * Consider if that can or should be avoided. | ||
796 | */ | ||
797 | preempt_disable(); | ||
798 | if (r4k_op_needs_ipi(R4K_INDEX) && !r4k_op_needs_ipi(R4K_HIT)) { | ||
799 | /* | ||
800 | * If address-based cache ops don't require an SMP call, then | ||
801 | * use them exclusively for small flushes. | ||
802 | */ | ||
803 | size = start - end; | ||
804 | cache_size = icache_size; | ||
805 | if (!cpu_has_ic_fills_f_dc) { | ||
806 | size *= 2; | ||
807 | cache_size += dcache_size; | ||
808 | } | ||
809 | if (size <= cache_size) | ||
810 | args.type &= ~R4K_INDEX; | ||
811 | } | ||
812 | r4k_on_each_cpu(args.type, local_r4k_flush_icache_range_ipi, &args); | ||
813 | preempt_enable(); | ||
721 | instruction_hazard(); | 814 | instruction_hazard(); |
722 | } | 815 | } |
723 | 816 | ||
@@ -744,7 +837,7 @@ static void r4k_dma_cache_wback_inv(unsigned long addr, unsigned long size) | |||
744 | * subset property so we have to flush the primary caches | 837 | * subset property so we have to flush the primary caches |
745 | * explicitly | 838 | * explicitly |
746 | */ | 839 | */ |
747 | if (cpu_has_safe_index_cacheops && size >= dcache_size) { | 840 | if (size >= dcache_size) { |
748 | r4k_blast_dcache(); | 841 | r4k_blast_dcache(); |
749 | } else { | 842 | } else { |
750 | R4600_HIT_CACHEOP_WAR_IMPL; | 843 | R4600_HIT_CACHEOP_WAR_IMPL; |
@@ -781,7 +874,7 @@ static void r4k_dma_cache_inv(unsigned long addr, unsigned long size) | |||
781 | return; | 874 | return; |
782 | } | 875 | } |
783 | 876 | ||
784 | if (cpu_has_safe_index_cacheops && size >= dcache_size) { | 877 | if (size >= dcache_size) { |
785 | r4k_blast_dcache(); | 878 | r4k_blast_dcache(); |
786 | } else { | 879 | } else { |
787 | R4600_HIT_CACHEOP_WAR_IMPL; | 880 | R4600_HIT_CACHEOP_WAR_IMPL; |
@@ -794,25 +887,76 @@ static void r4k_dma_cache_inv(unsigned long addr, unsigned long size) | |||
794 | } | 887 | } |
795 | #endif /* CONFIG_DMA_NONCOHERENT || CONFIG_DMA_MAYBE_COHERENT */ | 888 | #endif /* CONFIG_DMA_NONCOHERENT || CONFIG_DMA_MAYBE_COHERENT */ |
796 | 889 | ||
890 | struct flush_cache_sigtramp_args { | ||
891 | struct mm_struct *mm; | ||
892 | struct page *page; | ||
893 | unsigned long addr; | ||
894 | }; | ||
895 | |||
797 | /* | 896 | /* |
798 | * While we're protected against bad userland addresses we don't care | 897 | * While we're protected against bad userland addresses we don't care |
799 | * very much about what happens in that case. Usually a segmentation | 898 | * very much about what happens in that case. Usually a segmentation |
800 | * fault will dump the process later on anyway ... | 899 | * fault will dump the process later on anyway ... |
801 | */ | 900 | */ |
802 | static void local_r4k_flush_cache_sigtramp(void * arg) | 901 | static void local_r4k_flush_cache_sigtramp(void *args) |
803 | { | 902 | { |
903 | struct flush_cache_sigtramp_args *fcs_args = args; | ||
904 | unsigned long addr = fcs_args->addr; | ||
905 | struct page *page = fcs_args->page; | ||
906 | struct mm_struct *mm = fcs_args->mm; | ||
907 | int map_coherent = 0; | ||
908 | void *vaddr; | ||
909 | |||
804 | unsigned long ic_lsize = cpu_icache_line_size(); | 910 | unsigned long ic_lsize = cpu_icache_line_size(); |
805 | unsigned long dc_lsize = cpu_dcache_line_size(); | 911 | unsigned long dc_lsize = cpu_dcache_line_size(); |
806 | unsigned long sc_lsize = cpu_scache_line_size(); | 912 | unsigned long sc_lsize = cpu_scache_line_size(); |
807 | unsigned long addr = (unsigned long) arg; | 913 | |
914 | /* | ||
915 | * If owns no valid ASID yet, cannot possibly have gotten | ||
916 | * this page into the cache. | ||
917 | */ | ||
918 | if (!has_valid_asid(mm, R4K_HIT)) | ||
919 | return; | ||
920 | |||
921 | if (mm == current->active_mm) { | ||
922 | vaddr = NULL; | ||
923 | } else { | ||
924 | /* | ||
925 | * Use kmap_coherent or kmap_atomic to do flushes for | ||
926 | * another ASID than the current one. | ||
927 | */ | ||
928 | map_coherent = (cpu_has_dc_aliases && | ||
929 | page_mapcount(page) && | ||
930 | !Page_dcache_dirty(page)); | ||
931 | if (map_coherent) | ||
932 | vaddr = kmap_coherent(page, addr); | ||
933 | else | ||
934 | vaddr = kmap_atomic(page); | ||
935 | addr = (unsigned long)vaddr + (addr & ~PAGE_MASK); | ||
936 | } | ||
808 | 937 | ||
809 | R4600_HIT_CACHEOP_WAR_IMPL; | 938 | R4600_HIT_CACHEOP_WAR_IMPL; |
810 | if (dc_lsize) | 939 | if (!cpu_has_ic_fills_f_dc) { |
811 | protected_writeback_dcache_line(addr & ~(dc_lsize - 1)); | 940 | if (dc_lsize) |
812 | if (!cpu_icache_snoops_remote_store && scache_size) | 941 | vaddr ? flush_dcache_line(addr & ~(dc_lsize - 1)) |
813 | protected_writeback_scache_line(addr & ~(sc_lsize - 1)); | 942 | : protected_writeback_dcache_line( |
943 | addr & ~(dc_lsize - 1)); | ||
944 | if (!cpu_icache_snoops_remote_store && scache_size) | ||
945 | vaddr ? flush_scache_line(addr & ~(sc_lsize - 1)) | ||
946 | : protected_writeback_scache_line( | ||
947 | addr & ~(sc_lsize - 1)); | ||
948 | } | ||
814 | if (ic_lsize) | 949 | if (ic_lsize) |
815 | protected_flush_icache_line(addr & ~(ic_lsize - 1)); | 950 | vaddr ? flush_icache_line(addr & ~(ic_lsize - 1)) |
951 | : protected_flush_icache_line(addr & ~(ic_lsize - 1)); | ||
952 | |||
953 | if (vaddr) { | ||
954 | if (map_coherent) | ||
955 | kunmap_coherent(); | ||
956 | else | ||
957 | kunmap_atomic(vaddr); | ||
958 | } | ||
959 | |||
816 | if (MIPS4K_ICACHE_REFILL_WAR) { | 960 | if (MIPS4K_ICACHE_REFILL_WAR) { |
817 | __asm__ __volatile__ ( | 961 | __asm__ __volatile__ ( |
818 | ".set push\n\t" | 962 | ".set push\n\t" |
@@ -837,7 +981,23 @@ static void local_r4k_flush_cache_sigtramp(void * arg) | |||
837 | 981 | ||
838 | static void r4k_flush_cache_sigtramp(unsigned long addr) | 982 | static void r4k_flush_cache_sigtramp(unsigned long addr) |
839 | { | 983 | { |
840 | r4k_on_each_cpu(local_r4k_flush_cache_sigtramp, (void *) addr); | 984 | struct flush_cache_sigtramp_args args; |
985 | int npages; | ||
986 | |||
987 | down_read(¤t->mm->mmap_sem); | ||
988 | |||
989 | npages = get_user_pages_fast(addr, 1, 0, &args.page); | ||
990 | if (npages < 1) | ||
991 | goto out; | ||
992 | |||
993 | args.mm = current->mm; | ||
994 | args.addr = addr; | ||
995 | |||
996 | r4k_on_each_cpu(R4K_HIT, local_r4k_flush_cache_sigtramp, &args); | ||
997 | |||
998 | put_page(args.page); | ||
999 | out: | ||
1000 | up_read(¤t->mm->mmap_sem); | ||
841 | } | 1001 | } |
842 | 1002 | ||
843 | static void r4k_flush_icache_all(void) | 1003 | static void r4k_flush_icache_all(void) |
@@ -851,6 +1011,15 @@ struct flush_kernel_vmap_range_args { | |||
851 | int size; | 1011 | int size; |
852 | }; | 1012 | }; |
853 | 1013 | ||
1014 | static inline void local_r4k_flush_kernel_vmap_range_index(void *args) | ||
1015 | { | ||
1016 | /* | ||
1017 | * Aliases only affect the primary caches so don't bother with | ||
1018 | * S-caches or T-caches. | ||
1019 | */ | ||
1020 | r4k_blast_dcache(); | ||
1021 | } | ||
1022 | |||
854 | static inline void local_r4k_flush_kernel_vmap_range(void *args) | 1023 | static inline void local_r4k_flush_kernel_vmap_range(void *args) |
855 | { | 1024 | { |
856 | struct flush_kernel_vmap_range_args *vmra = args; | 1025 | struct flush_kernel_vmap_range_args *vmra = args; |
@@ -861,12 +1030,8 @@ static inline void local_r4k_flush_kernel_vmap_range(void *args) | |||
861 | * Aliases only affect the primary caches so don't bother with | 1030 | * Aliases only affect the primary caches so don't bother with |
862 | * S-caches or T-caches. | 1031 | * S-caches or T-caches. |
863 | */ | 1032 | */ |
864 | if (cpu_has_safe_index_cacheops && size >= dcache_size) | 1033 | R4600_HIT_CACHEOP_WAR_IMPL; |
865 | r4k_blast_dcache(); | 1034 | blast_dcache_range(vaddr, vaddr + size); |
866 | else { | ||
867 | R4600_HIT_CACHEOP_WAR_IMPL; | ||
868 | blast_dcache_range(vaddr, vaddr + size); | ||
869 | } | ||
870 | } | 1035 | } |
871 | 1036 | ||
872 | static void r4k_flush_kernel_vmap_range(unsigned long vaddr, int size) | 1037 | static void r4k_flush_kernel_vmap_range(unsigned long vaddr, int size) |
@@ -876,7 +1041,12 @@ static void r4k_flush_kernel_vmap_range(unsigned long vaddr, int size) | |||
876 | args.vaddr = (unsigned long) vaddr; | 1041 | args.vaddr = (unsigned long) vaddr; |
877 | args.size = size; | 1042 | args.size = size; |
878 | 1043 | ||
879 | r4k_on_each_cpu(local_r4k_flush_kernel_vmap_range, &args); | 1044 | if (size >= dcache_size) |
1045 | r4k_on_each_cpu(R4K_INDEX, | ||
1046 | local_r4k_flush_kernel_vmap_range_index, NULL); | ||
1047 | else | ||
1048 | r4k_on_each_cpu(R4K_HIT, local_r4k_flush_kernel_vmap_range, | ||
1049 | &args); | ||
880 | } | 1050 | } |
881 | 1051 | ||
882 | static inline void rm7k_erratum31(void) | 1052 | static inline void rm7k_erratum31(void) |
diff --git a/arch/mips/mm/sc-debugfs.c b/arch/mips/mm/sc-debugfs.c index 5eefe3281b24..01f1154cdb0c 100644 --- a/arch/mips/mm/sc-debugfs.c +++ b/arch/mips/mm/sc-debugfs.c | |||
@@ -73,8 +73,8 @@ static int __init sc_debugfs_init(void) | |||
73 | 73 | ||
74 | file = debugfs_create_file("prefetch", S_IRUGO | S_IWUSR, dir, | 74 | file = debugfs_create_file("prefetch", S_IRUGO | S_IWUSR, dir, |
75 | NULL, &sc_prefetch_fops); | 75 | NULL, &sc_prefetch_fops); |
76 | if (IS_ERR(file)) | 76 | if (!file) |
77 | return PTR_ERR(file); | 77 | return -ENOMEM; |
78 | 78 | ||
79 | return 0; | 79 | return 0; |
80 | } | 80 | } |
diff --git a/arch/mips/mm/sc-rm7k.c b/arch/mips/mm/sc-rm7k.c index 9ac1efcfbcc7..78f900c59276 100644 --- a/arch/mips/mm/sc-rm7k.c +++ b/arch/mips/mm/sc-rm7k.c | |||
@@ -161,7 +161,7 @@ static void rm7k_tc_disable(void) | |||
161 | local_irq_save(flags); | 161 | local_irq_save(flags); |
162 | blast_rm7k_tcache(); | 162 | blast_rm7k_tcache(); |
163 | clear_c0_config(RM7K_CONF_TE); | 163 | clear_c0_config(RM7K_CONF_TE); |
164 | local_irq_save(flags); | 164 | local_irq_restore(flags); |
165 | } | 165 | } |
166 | 166 | ||
167 | static void rm7k_sc_disable(void) | 167 | static void rm7k_sc_disable(void) |
diff --git a/arch/mips/mm/tlbex.c b/arch/mips/mm/tlbex.c index ff49b29c2d16..55ce39606cb8 100644 --- a/arch/mips/mm/tlbex.c +++ b/arch/mips/mm/tlbex.c | |||
@@ -888,7 +888,7 @@ build_get_pgd_vmalloc64(u32 **p, struct uasm_label **l, struct uasm_reloc **r, | |||
888 | } | 888 | } |
889 | } | 889 | } |
890 | if (!did_vmalloc_branch) { | 890 | if (!did_vmalloc_branch) { |
891 | if (uasm_in_compat_space_p(swpd) && !uasm_rel_lo(swpd)) { | 891 | if (single_insn_swpd) { |
892 | uasm_il_b(p, r, label_vmalloc_done); | 892 | uasm_il_b(p, r, label_vmalloc_done); |
893 | uasm_i_lui(p, ptr, uasm_rel_hi(swpd)); | 893 | uasm_i_lui(p, ptr, uasm_rel_hi(swpd)); |
894 | } else { | 894 | } else { |
diff --git a/arch/mips/mm/uasm-mips.c b/arch/mips/mm/uasm-mips.c index cec524167822..763d3f1edb8a 100644 --- a/arch/mips/mm/uasm-mips.c +++ b/arch/mips/mm/uasm-mips.c | |||
@@ -65,7 +65,7 @@ static struct insn insn_table[] = { | |||
65 | #ifndef CONFIG_CPU_MIPSR6 | 65 | #ifndef CONFIG_CPU_MIPSR6 |
66 | { insn_cache, M(cache_op, 0, 0, 0, 0, 0), RS | RT | SIMM }, | 66 | { insn_cache, M(cache_op, 0, 0, 0, 0, 0), RS | RT | SIMM }, |
67 | #else | 67 | #else |
68 | { insn_cache, M6(cache_op, 0, 0, 0, cache6_op), RS | RT | SIMM9 }, | 68 | { insn_cache, M6(spec3_op, 0, 0, 0, cache6_op), RS | RT | SIMM9 }, |
69 | #endif | 69 | #endif |
70 | { insn_cfc1, M(cop1_op, cfc_op, 0, 0, 0, 0), RT | RD }, | 70 | { insn_cfc1, M(cop1_op, cfc_op, 0, 0, 0, 0), RT | RD }, |
71 | { insn_cfcmsa, M(msa_op, 0, msa_cfc_op, 0, 0, msa_elm_op), RD | RE }, | 71 | { insn_cfcmsa, M(msa_op, 0, msa_cfc_op, 0, 0, msa_elm_op), RD | RE }, |
diff --git a/arch/mips/mm/uasm.c b/arch/mips/mm/uasm.c index 3e0282d301d6..a82970442b8a 100644 --- a/arch/mips/mm/uasm.c +++ b/arch/mips/mm/uasm.c | |||
@@ -378,11 +378,7 @@ UASM_EXPORT_SYMBOL(ISAFUNC(uasm_build_label)); | |||
378 | int ISAFUNC(uasm_in_compat_space_p)(long addr) | 378 | int ISAFUNC(uasm_in_compat_space_p)(long addr) |
379 | { | 379 | { |
380 | /* Is this address in 32bit compat space? */ | 380 | /* Is this address in 32bit compat space? */ |
381 | #ifdef CONFIG_64BIT | 381 | return addr == (int)addr; |
382 | return (((addr) & 0xffffffff00000000L) == 0xffffffff00000000L); | ||
383 | #else | ||
384 | return 1; | ||
385 | #endif | ||
386 | } | 382 | } |
387 | UASM_EXPORT_SYMBOL(ISAFUNC(uasm_in_compat_space_p)); | 383 | UASM_EXPORT_SYMBOL(ISAFUNC(uasm_in_compat_space_p)); |
388 | 384 | ||
diff --git a/arch/mips/net/bpf_jit.c b/arch/mips/net/bpf_jit.c index d1b7bd09253a..39e7b472f0d8 100644 --- a/arch/mips/net/bpf_jit.c +++ b/arch/mips/net/bpf_jit.c | |||
@@ -1199,7 +1199,7 @@ void bpf_jit_compile(struct bpf_prog *fp) | |||
1199 | 1199 | ||
1200 | memset(&ctx, 0, sizeof(ctx)); | 1200 | memset(&ctx, 0, sizeof(ctx)); |
1201 | 1201 | ||
1202 | ctx.offsets = kcalloc(fp->len, sizeof(*ctx.offsets), GFP_KERNEL); | 1202 | ctx.offsets = kcalloc(fp->len + 1, sizeof(*ctx.offsets), GFP_KERNEL); |
1203 | if (ctx.offsets == NULL) | 1203 | if (ctx.offsets == NULL) |
1204 | return; | 1204 | return; |
1205 | 1205 | ||
diff --git a/arch/mips/pic32/pic32mzda/init.c b/arch/mips/pic32/pic32mzda/init.c index 77ecf32ef3dc..51599710472b 100644 --- a/arch/mips/pic32/pic32mzda/init.c +++ b/arch/mips/pic32/pic32mzda/init.c | |||
@@ -33,8 +33,8 @@ static ulong get_fdtaddr(void) | |||
33 | { | 33 | { |
34 | ulong ftaddr = 0; | 34 | ulong ftaddr = 0; |
35 | 35 | ||
36 | if ((fw_arg0 == -2) && fw_arg1 && !fw_arg2 && !fw_arg3) | 36 | if (fw_passed_dtb && !fw_arg2 && !fw_arg3) |
37 | return (ulong)fw_arg1; | 37 | return (ulong)fw_passed_dtb; |
38 | 38 | ||
39 | if (__dtb_start < __dtb_end) | 39 | if (__dtb_start < __dtb_end) |
40 | ftaddr = (ulong)__dtb_start; | 40 | ftaddr = (ulong)__dtb_start; |
diff --git a/arch/mips/pistachio/init.c b/arch/mips/pistachio/init.c index c50a670e60d2..1c91cad7988f 100644 --- a/arch/mips/pistachio/init.c +++ b/arch/mips/pistachio/init.c | |||
@@ -59,29 +59,6 @@ const char *get_system_type(void) | |||
59 | return sys_type; | 59 | return sys_type; |
60 | } | 60 | } |
61 | 61 | ||
62 | static void __init plat_setup_iocoherency(void) | ||
63 | { | ||
64 | /* | ||
65 | * Kernel has been configured with software coherency | ||
66 | * but we might choose to turn it off and use hardware | ||
67 | * coherency instead. | ||
68 | */ | ||
69 | if (mips_cm_numiocu() != 0) { | ||
70 | /* Nothing special needs to be done to enable coherency */ | ||
71 | pr_info("CMP IOCU detected\n"); | ||
72 | hw_coherentio = 1; | ||
73 | if (coherentio == 0) | ||
74 | pr_info("Hardware DMA cache coherency disabled\n"); | ||
75 | else | ||
76 | pr_info("Hardware DMA cache coherency enabled\n"); | ||
77 | } else { | ||
78 | if (coherentio == 1) | ||
79 | pr_info("Hardware DMA cache coherency unsupported, but enabled from command line!\n"); | ||
80 | else | ||
81 | pr_info("Software DMA cache coherency enabled\n"); | ||
82 | } | ||
83 | } | ||
84 | |||
85 | void __init *plat_get_fdt(void) | 62 | void __init *plat_get_fdt(void) |
86 | { | 63 | { |
87 | if (fw_arg0 != -2) | 64 | if (fw_arg0 != -2) |
@@ -92,8 +69,6 @@ void __init *plat_get_fdt(void) | |||
92 | void __init plat_mem_setup(void) | 69 | void __init plat_mem_setup(void) |
93 | { | 70 | { |
94 | __dt_setup_arch(plat_get_fdt()); | 71 | __dt_setup_arch(plat_get_fdt()); |
95 | |||
96 | plat_setup_iocoherency(); | ||
97 | } | 72 | } |
98 | 73 | ||
99 | #define DEFAULT_CPC_BASE_ADDR 0x1bde0000 | 74 | #define DEFAULT_CPC_BASE_ADDR 0x1bde0000 |
diff --git a/arch/mips/ralink/mt7620.c b/arch/mips/ralink/mt7620.c index d40edda0ca3b..3c7c9bf57bf3 100644 --- a/arch/mips/ralink/mt7620.c +++ b/arch/mips/ralink/mt7620.c | |||
@@ -175,7 +175,7 @@ static struct rt2880_pmx_func spi_cs1_grp_mt7628[] = { | |||
175 | }; | 175 | }; |
176 | 176 | ||
177 | static struct rt2880_pmx_func spis_grp_mt7628[] = { | 177 | static struct rt2880_pmx_func spis_grp_mt7628[] = { |
178 | FUNC("pwm", 3, 14, 4), | 178 | FUNC("pwm_uart2", 3, 14, 4), |
179 | FUNC("util", 2, 14, 4), | 179 | FUNC("util", 2, 14, 4), |
180 | FUNC("gpio", 1, 14, 4), | 180 | FUNC("gpio", 1, 14, 4), |
181 | FUNC("spis", 0, 14, 4), | 181 | FUNC("spis", 0, 14, 4), |
diff --git a/drivers/ssb/driver_gpio.c b/drivers/ssb/driver_gpio.c index 180e027b1c8a..796e22037bc4 100644 --- a/drivers/ssb/driver_gpio.c +++ b/drivers/ssb/driver_gpio.c | |||
@@ -23,7 +23,7 @@ | |||
23 | **************************************************/ | 23 | **************************************************/ |
24 | 24 | ||
25 | #if IS_ENABLED(CONFIG_SSB_EMBEDDED) | 25 | #if IS_ENABLED(CONFIG_SSB_EMBEDDED) |
26 | static int ssb_gpio_to_irq(struct gpio_chip *chip, unsigned gpio) | 26 | static int ssb_gpio_to_irq(struct gpio_chip *chip, unsigned int gpio) |
27 | { | 27 | { |
28 | struct ssb_bus *bus = gpiochip_get_data(chip); | 28 | struct ssb_bus *bus = gpiochip_get_data(chip); |
29 | 29 | ||
@@ -38,14 +38,14 @@ static int ssb_gpio_to_irq(struct gpio_chip *chip, unsigned gpio) | |||
38 | * ChipCommon | 38 | * ChipCommon |
39 | **************************************************/ | 39 | **************************************************/ |
40 | 40 | ||
41 | static int ssb_gpio_chipco_get_value(struct gpio_chip *chip, unsigned gpio) | 41 | static int ssb_gpio_chipco_get_value(struct gpio_chip *chip, unsigned int gpio) |
42 | { | 42 | { |
43 | struct ssb_bus *bus = gpiochip_get_data(chip); | 43 | struct ssb_bus *bus = gpiochip_get_data(chip); |
44 | 44 | ||
45 | return !!ssb_chipco_gpio_in(&bus->chipco, 1 << gpio); | 45 | return !!ssb_chipco_gpio_in(&bus->chipco, 1 << gpio); |
46 | } | 46 | } |
47 | 47 | ||
48 | static void ssb_gpio_chipco_set_value(struct gpio_chip *chip, unsigned gpio, | 48 | static void ssb_gpio_chipco_set_value(struct gpio_chip *chip, unsigned int gpio, |
49 | int value) | 49 | int value) |
50 | { | 50 | { |
51 | struct ssb_bus *bus = gpiochip_get_data(chip); | 51 | struct ssb_bus *bus = gpiochip_get_data(chip); |
@@ -54,7 +54,7 @@ static void ssb_gpio_chipco_set_value(struct gpio_chip *chip, unsigned gpio, | |||
54 | } | 54 | } |
55 | 55 | ||
56 | static int ssb_gpio_chipco_direction_input(struct gpio_chip *chip, | 56 | static int ssb_gpio_chipco_direction_input(struct gpio_chip *chip, |
57 | unsigned gpio) | 57 | unsigned int gpio) |
58 | { | 58 | { |
59 | struct ssb_bus *bus = gpiochip_get_data(chip); | 59 | struct ssb_bus *bus = gpiochip_get_data(chip); |
60 | 60 | ||
@@ -63,7 +63,7 @@ static int ssb_gpio_chipco_direction_input(struct gpio_chip *chip, | |||
63 | } | 63 | } |
64 | 64 | ||
65 | static int ssb_gpio_chipco_direction_output(struct gpio_chip *chip, | 65 | static int ssb_gpio_chipco_direction_output(struct gpio_chip *chip, |
66 | unsigned gpio, int value) | 66 | unsigned int gpio, int value) |
67 | { | 67 | { |
68 | struct ssb_bus *bus = gpiochip_get_data(chip); | 68 | struct ssb_bus *bus = gpiochip_get_data(chip); |
69 | 69 | ||
@@ -72,7 +72,7 @@ static int ssb_gpio_chipco_direction_output(struct gpio_chip *chip, | |||
72 | return 0; | 72 | return 0; |
73 | } | 73 | } |
74 | 74 | ||
75 | static int ssb_gpio_chipco_request(struct gpio_chip *chip, unsigned gpio) | 75 | static int ssb_gpio_chipco_request(struct gpio_chip *chip, unsigned int gpio) |
76 | { | 76 | { |
77 | struct ssb_bus *bus = gpiochip_get_data(chip); | 77 | struct ssb_bus *bus = gpiochip_get_data(chip); |
78 | 78 | ||
@@ -85,7 +85,7 @@ static int ssb_gpio_chipco_request(struct gpio_chip *chip, unsigned gpio) | |||
85 | return 0; | 85 | return 0; |
86 | } | 86 | } |
87 | 87 | ||
88 | static void ssb_gpio_chipco_free(struct gpio_chip *chip, unsigned gpio) | 88 | static void ssb_gpio_chipco_free(struct gpio_chip *chip, unsigned int gpio) |
89 | { | 89 | { |
90 | struct ssb_bus *bus = gpiochip_get_data(chip); | 90 | struct ssb_bus *bus = gpiochip_get_data(chip); |
91 | 91 | ||
@@ -256,14 +256,14 @@ static int ssb_gpio_chipco_init(struct ssb_bus *bus) | |||
256 | 256 | ||
257 | #ifdef CONFIG_SSB_DRIVER_EXTIF | 257 | #ifdef CONFIG_SSB_DRIVER_EXTIF |
258 | 258 | ||
259 | static int ssb_gpio_extif_get_value(struct gpio_chip *chip, unsigned gpio) | 259 | static int ssb_gpio_extif_get_value(struct gpio_chip *chip, unsigned int gpio) |
260 | { | 260 | { |
261 | struct ssb_bus *bus = gpiochip_get_data(chip); | 261 | struct ssb_bus *bus = gpiochip_get_data(chip); |
262 | 262 | ||
263 | return !!ssb_extif_gpio_in(&bus->extif, 1 << gpio); | 263 | return !!ssb_extif_gpio_in(&bus->extif, 1 << gpio); |
264 | } | 264 | } |
265 | 265 | ||
266 | static void ssb_gpio_extif_set_value(struct gpio_chip *chip, unsigned gpio, | 266 | static void ssb_gpio_extif_set_value(struct gpio_chip *chip, unsigned int gpio, |
267 | int value) | 267 | int value) |
268 | { | 268 | { |
269 | struct ssb_bus *bus = gpiochip_get_data(chip); | 269 | struct ssb_bus *bus = gpiochip_get_data(chip); |
@@ -272,7 +272,7 @@ static void ssb_gpio_extif_set_value(struct gpio_chip *chip, unsigned gpio, | |||
272 | } | 272 | } |
273 | 273 | ||
274 | static int ssb_gpio_extif_direction_input(struct gpio_chip *chip, | 274 | static int ssb_gpio_extif_direction_input(struct gpio_chip *chip, |
275 | unsigned gpio) | 275 | unsigned int gpio) |
276 | { | 276 | { |
277 | struct ssb_bus *bus = gpiochip_get_data(chip); | 277 | struct ssb_bus *bus = gpiochip_get_data(chip); |
278 | 278 | ||
@@ -281,7 +281,7 @@ static int ssb_gpio_extif_direction_input(struct gpio_chip *chip, | |||
281 | } | 281 | } |
282 | 282 | ||
283 | static int ssb_gpio_extif_direction_output(struct gpio_chip *chip, | 283 | static int ssb_gpio_extif_direction_output(struct gpio_chip *chip, |
284 | unsigned gpio, int value) | 284 | unsigned int gpio, int value) |
285 | { | 285 | { |
286 | struct ssb_bus *bus = gpiochip_get_data(chip); | 286 | struct ssb_bus *bus = gpiochip_get_data(chip); |
287 | 287 | ||