diff options
author | Olof Johansson <olof@lixom.net> | 2013-07-12 13:59:39 -0400 |
---|---|---|
committer | Olof Johansson <olof@lixom.net> | 2013-07-12 13:59:39 -0400 |
commit | f4b96f5e4ff8d86699c851c10245e102809b0331 (patch) | |
tree | f766102263bed71738431cabb4d4f6f086005cd8 /arch/arm | |
parent | 9d8812df35be58a5da0c44182c1e4ba2507cc6a7 (diff) | |
parent | c24a6ae18abde53b048372b066b93b71b1b91154 (diff) |
Merge tag 'omap-for-v3.11/fixes-for-merge-window' of git://git.kernel.org/pub/scm/linux/kernel/git/tmlind/linux-omap into fixes
Omap fixes and minor defconfig updates that would be good to
get in before -rc1.
* tag 'omap-for-v3.11/fixes-for-merge-window' of git://git.kernel.org/pub/scm/linux/kernel/git/tmlind/linux-omap:
ARM: OMAP2+: omap2plus_defconfig: Enable appended DTB support
ARM: OMAP2+: Enable TI_EDMA in omap2plus_defconfig
ARM: OMAP2+: omap2plus_defconfig: enable DRA752 thermal support by default
ARM: OMAP2+: omap2plus_defconfig: enable TI bandgap driver
ARM: OMAP2+: devices: remove duplicated include from devices.c
ARM: OMAP3: igep0020: Set DSS pins in correct mux mode.
ARM: OMAP2+: N900: enable N900-specific drivers even if device tree is enabled
ARM: OMAP2+: Cocci spatch "ptr_ret.spatch"
ARM: OMAP2+: Remove obsolete Makefile line
ARM: OMAP5: Enable Cortex A15 errata 798181
ARM: scu: provide inline dummy functions when SCU is not present
ARM: OMAP4: sleep: build OMAP4 specific functions only for OMAP4
ARM: OMAP2+: timer: initialize before using oh_name
Signed-off-by: Olof Johansson <olof@lixom.net>
Add/move/change conflicts in arch/arm/mach-omap2/Kconfig resolved.
Diffstat (limited to 'arch/arm')
134 files changed, 2140 insertions, 2711 deletions
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig index de7049bdea85..531cdda016f9 100644 --- a/arch/arm/Kconfig +++ b/arch/arm/Kconfig | |||
@@ -175,6 +175,9 @@ config ARCH_HAS_CPUFREQ | |||
175 | and that the relevant menu configurations are displayed for | 175 | and that the relevant menu configurations are displayed for |
176 | it. | 176 | it. |
177 | 177 | ||
178 | config ARCH_HAS_BANDGAP | ||
179 | bool | ||
180 | |||
178 | config GENERIC_HWEIGHT | 181 | config GENERIC_HWEIGHT |
179 | bool | 182 | bool |
180 | default y | 183 | default y |
@@ -1450,7 +1453,7 @@ config SMP | |||
1450 | depends on CPU_V6K || CPU_V7 | 1453 | depends on CPU_V6K || CPU_V7 |
1451 | depends on GENERIC_CLOCKEVENTS | 1454 | depends on GENERIC_CLOCKEVENTS |
1452 | depends on HAVE_SMP | 1455 | depends on HAVE_SMP |
1453 | depends on MMU | 1456 | depends on MMU || ARM_MPU |
1454 | select USE_GENERIC_SMP_HELPERS | 1457 | select USE_GENERIC_SMP_HELPERS |
1455 | help | 1458 | help |
1456 | This enables support for systems with more than one CPU. If you have | 1459 | This enables support for systems with more than one CPU. If you have |
@@ -1471,7 +1474,7 @@ config SMP | |||
1471 | 1474 | ||
1472 | config SMP_ON_UP | 1475 | config SMP_ON_UP |
1473 | bool "Allow booting SMP kernel on uniprocessor systems (EXPERIMENTAL)" | 1476 | bool "Allow booting SMP kernel on uniprocessor systems (EXPERIMENTAL)" |
1474 | depends on SMP && !XIP_KERNEL | 1477 | depends on SMP && !XIP_KERNEL && MMU |
1475 | default y | 1478 | default y |
1476 | help | 1479 | help |
1477 | SMP kernels contain instructions which fail on non-SMP processors. | 1480 | SMP kernels contain instructions which fail on non-SMP processors. |
@@ -1744,6 +1747,14 @@ config HW_PERF_EVENTS | |||
1744 | Enable hardware performance counter support for perf events. If | 1747 | Enable hardware performance counter support for perf events. If |
1745 | disabled, perf events will use software events only. | 1748 | disabled, perf events will use software events only. |
1746 | 1749 | ||
1750 | config SYS_SUPPORTS_HUGETLBFS | ||
1751 | def_bool y | ||
1752 | depends on ARM_LPAE | ||
1753 | |||
1754 | config HAVE_ARCH_TRANSPARENT_HUGEPAGE | ||
1755 | def_bool y | ||
1756 | depends on ARM_LPAE | ||
1757 | |||
1747 | source "mm/Kconfig" | 1758 | source "mm/Kconfig" |
1748 | 1759 | ||
1749 | config FORCE_MAX_ZONEORDER | 1760 | config FORCE_MAX_ZONEORDER |
diff --git a/arch/arm/Kconfig-nommu b/arch/arm/Kconfig-nommu index c859495da480..aed66d5df7f1 100644 --- a/arch/arm/Kconfig-nommu +++ b/arch/arm/Kconfig-nommu | |||
@@ -50,3 +50,15 @@ config REMAP_VECTORS_TO_RAM | |||
50 | Otherwise, say 'y' here. In this case, the kernel will require | 50 | Otherwise, say 'y' here. In this case, the kernel will require |
51 | external support to redirect the hardware exception vectors to | 51 | external support to redirect the hardware exception vectors to |
52 | the writable versions located at DRAM_BASE. | 52 | the writable versions located at DRAM_BASE. |
53 | |||
54 | config ARM_MPU | ||
55 | bool 'Use the ARM v7 PMSA Compliant MPU' | ||
56 | depends on CPU_V7 | ||
57 | default y | ||
58 | help | ||
59 | Some ARM systems without an MMU have instead a Memory Protection | ||
60 | Unit (MPU) that defines the type and permissions for regions of | ||
61 | memory. | ||
62 | |||
63 | If your CPU has an MPU then you should choose 'y' here unless you | ||
64 | know that you do not want to use the MPU. | ||
diff --git a/arch/arm/Kconfig.debug b/arch/arm/Kconfig.debug index ff4920b1f6c5..e401a766c0bd 100644 --- a/arch/arm/Kconfig.debug +++ b/arch/arm/Kconfig.debug | |||
@@ -560,6 +560,13 @@ choice | |||
560 | of the tiles using the RS1 memory map, including all new A-class | 560 | of the tiles using the RS1 memory map, including all new A-class |
561 | core tiles, FPGA-based SMMs and software models. | 561 | core tiles, FPGA-based SMMs and software models. |
562 | 562 | ||
563 | config DEBUG_VEXPRESS_UART0_CRX | ||
564 | bool "Use PL011 UART0 at 0xb0090000 (Cortex-R compliant tiles)" | ||
565 | depends on ARCH_VEXPRESS && !MMU | ||
566 | help | ||
567 | This option selects UART0 at 0xb0090000. This is appropriate for | ||
568 | Cortex-R series tiles and SMMs, such as Cortex-R5 and Cortex-R7 | ||
569 | |||
563 | config DEBUG_VT8500_UART0 | 570 | config DEBUG_VT8500_UART0 |
564 | bool "Use UART0 on VIA/Wondermedia SoCs" | 571 | bool "Use UART0 on VIA/Wondermedia SoCs" |
565 | depends on ARCH_VT8500 | 572 | depends on ARCH_VT8500 |
@@ -789,7 +796,8 @@ config DEBUG_LL_INCLUDE | |||
789 | default "debug/u300.S" if DEBUG_U300_UART | 796 | default "debug/u300.S" if DEBUG_U300_UART |
790 | default "debug/ux500.S" if DEBUG_UX500_UART | 797 | default "debug/ux500.S" if DEBUG_UX500_UART |
791 | default "debug/vexpress.S" if DEBUG_VEXPRESS_UART0_DETECT || \ | 798 | default "debug/vexpress.S" if DEBUG_VEXPRESS_UART0_DETECT || \ |
792 | DEBUG_VEXPRESS_UART0_CA9 || DEBUG_VEXPRESS_UART0_RS1 | 799 | DEBUG_VEXPRESS_UART0_CA9 || DEBUG_VEXPRESS_UART0_RS1 || \ |
800 | DEBUG_VEXPRESS_UART0_CRX | ||
793 | default "debug/vt8500.S" if DEBUG_VT8500_UART0 | 801 | default "debug/vt8500.S" if DEBUG_VT8500_UART0 |
794 | default "debug/zynq.S" if DEBUG_ZYNQ_UART0 || DEBUG_ZYNQ_UART1 | 802 | default "debug/zynq.S" if DEBUG_ZYNQ_UART0 || DEBUG_ZYNQ_UART1 |
795 | default "mach/debug-macro.S" | 803 | default "mach/debug-macro.S" |
diff --git a/arch/arm/Makefile b/arch/arm/Makefile index 72caf82a8280..c0ac0f5e5e5c 100644 --- a/arch/arm/Makefile +++ b/arch/arm/Makefile | |||
@@ -59,38 +59,44 @@ comma = , | |||
59 | # Note that GCC does not numerically define an architecture version | 59 | # Note that GCC does not numerically define an architecture version |
60 | # macro, but instead defines a whole series of macros which makes | 60 | # macro, but instead defines a whole series of macros which makes |
61 | # testing for a specific architecture or later rather impossible. | 61 | # testing for a specific architecture or later rather impossible. |
62 | arch-$(CONFIG_CPU_32v7M) :=-D__LINUX_ARM_ARCH__=7 -march=armv7-m -Wa,-march=armv7-m | 62 | arch-$(CONFIG_CPU_32v7M) =-D__LINUX_ARM_ARCH__=7 -march=armv7-m -Wa,-march=armv7-m |
63 | arch-$(CONFIG_CPU_32v7) :=-D__LINUX_ARM_ARCH__=7 $(call cc-option,-march=armv7-a,-march=armv5t -Wa$(comma)-march=armv7-a) | 63 | arch-$(CONFIG_CPU_32v7) =-D__LINUX_ARM_ARCH__=7 $(call cc-option,-march=armv7-a,-march=armv5t -Wa$(comma)-march=armv7-a) |
64 | arch-$(CONFIG_CPU_32v6) :=-D__LINUX_ARM_ARCH__=6 $(call cc-option,-march=armv6,-march=armv5t -Wa$(comma)-march=armv6) | 64 | arch-$(CONFIG_CPU_32v6) =-D__LINUX_ARM_ARCH__=6 $(call cc-option,-march=armv6,-march=armv5t -Wa$(comma)-march=armv6) |
65 | # Only override the compiler option if ARMv6. The ARMv6K extensions are | 65 | # Only override the compiler option if ARMv6. The ARMv6K extensions are |
66 | # always available in ARMv7 | 66 | # always available in ARMv7 |
67 | ifeq ($(CONFIG_CPU_32v6),y) | 67 | ifeq ($(CONFIG_CPU_32v6),y) |
68 | arch-$(CONFIG_CPU_32v6K) :=-D__LINUX_ARM_ARCH__=6 $(call cc-option,-march=armv6k,-march=armv5t -Wa$(comma)-march=armv6k) | 68 | arch-$(CONFIG_CPU_32v6K) =-D__LINUX_ARM_ARCH__=6 $(call cc-option,-march=armv6k,-march=armv5t -Wa$(comma)-march=armv6k) |
69 | endif | 69 | endif |
70 | arch-$(CONFIG_CPU_32v5) :=-D__LINUX_ARM_ARCH__=5 $(call cc-option,-march=armv5te,-march=armv4t) | 70 | arch-$(CONFIG_CPU_32v5) =-D__LINUX_ARM_ARCH__=5 $(call cc-option,-march=armv5te,-march=armv4t) |
71 | arch-$(CONFIG_CPU_32v4T) :=-D__LINUX_ARM_ARCH__=4 -march=armv4t | 71 | arch-$(CONFIG_CPU_32v4T) =-D__LINUX_ARM_ARCH__=4 -march=armv4t |
72 | arch-$(CONFIG_CPU_32v4) :=-D__LINUX_ARM_ARCH__=4 -march=armv4 | 72 | arch-$(CONFIG_CPU_32v4) =-D__LINUX_ARM_ARCH__=4 -march=armv4 |
73 | arch-$(CONFIG_CPU_32v3) :=-D__LINUX_ARM_ARCH__=3 -march=armv3 | 73 | arch-$(CONFIG_CPU_32v3) =-D__LINUX_ARM_ARCH__=3 -march=armv3 |
74 | |||
75 | # Evaluate arch cc-option calls now | ||
76 | arch-y := $(arch-y) | ||
74 | 77 | ||
75 | # This selects how we optimise for the processor. | 78 | # This selects how we optimise for the processor. |
76 | tune-$(CONFIG_CPU_ARM7TDMI) :=-mtune=arm7tdmi | 79 | tune-$(CONFIG_CPU_ARM7TDMI) =-mtune=arm7tdmi |
77 | tune-$(CONFIG_CPU_ARM720T) :=-mtune=arm7tdmi | 80 | tune-$(CONFIG_CPU_ARM720T) =-mtune=arm7tdmi |
78 | tune-$(CONFIG_CPU_ARM740T) :=-mtune=arm7tdmi | 81 | tune-$(CONFIG_CPU_ARM740T) =-mtune=arm7tdmi |
79 | tune-$(CONFIG_CPU_ARM9TDMI) :=-mtune=arm9tdmi | 82 | tune-$(CONFIG_CPU_ARM9TDMI) =-mtune=arm9tdmi |
80 | tune-$(CONFIG_CPU_ARM940T) :=-mtune=arm9tdmi | 83 | tune-$(CONFIG_CPU_ARM940T) =-mtune=arm9tdmi |
81 | tune-$(CONFIG_CPU_ARM946E) :=$(call cc-option,-mtune=arm9e,-mtune=arm9tdmi) | 84 | tune-$(CONFIG_CPU_ARM946E) =$(call cc-option,-mtune=arm9e,-mtune=arm9tdmi) |
82 | tune-$(CONFIG_CPU_ARM920T) :=-mtune=arm9tdmi | 85 | tune-$(CONFIG_CPU_ARM920T) =-mtune=arm9tdmi |
83 | tune-$(CONFIG_CPU_ARM922T) :=-mtune=arm9tdmi | 86 | tune-$(CONFIG_CPU_ARM922T) =-mtune=arm9tdmi |
84 | tune-$(CONFIG_CPU_ARM925T) :=-mtune=arm9tdmi | 87 | tune-$(CONFIG_CPU_ARM925T) =-mtune=arm9tdmi |
85 | tune-$(CONFIG_CPU_ARM926T) :=-mtune=arm9tdmi | 88 | tune-$(CONFIG_CPU_ARM926T) =-mtune=arm9tdmi |
86 | tune-$(CONFIG_CPU_FA526) :=-mtune=arm9tdmi | 89 | tune-$(CONFIG_CPU_FA526) =-mtune=arm9tdmi |
87 | tune-$(CONFIG_CPU_SA110) :=-mtune=strongarm110 | 90 | tune-$(CONFIG_CPU_SA110) =-mtune=strongarm110 |
88 | tune-$(CONFIG_CPU_SA1100) :=-mtune=strongarm1100 | 91 | tune-$(CONFIG_CPU_SA1100) =-mtune=strongarm1100 |
89 | tune-$(CONFIG_CPU_XSCALE) :=$(call cc-option,-mtune=xscale,-mtune=strongarm110) -Wa,-mcpu=xscale | 92 | tune-$(CONFIG_CPU_XSCALE) =$(call cc-option,-mtune=xscale,-mtune=strongarm110) -Wa,-mcpu=xscale |
90 | tune-$(CONFIG_CPU_XSC3) :=$(call cc-option,-mtune=xscale,-mtune=strongarm110) -Wa,-mcpu=xscale | 93 | tune-$(CONFIG_CPU_XSC3) =$(call cc-option,-mtune=xscale,-mtune=strongarm110) -Wa,-mcpu=xscale |
91 | tune-$(CONFIG_CPU_FEROCEON) :=$(call cc-option,-mtune=marvell-f,-mtune=xscale) | 94 | tune-$(CONFIG_CPU_FEROCEON) =$(call cc-option,-mtune=marvell-f,-mtune=xscale) |
92 | tune-$(CONFIG_CPU_V6) :=$(call cc-option,-mtune=arm1136j-s,-mtune=strongarm) | 95 | tune-$(CONFIG_CPU_V6) =$(call cc-option,-mtune=arm1136j-s,-mtune=strongarm) |
93 | tune-$(CONFIG_CPU_V6K) :=$(call cc-option,-mtune=arm1136j-s,-mtune=strongarm) | 96 | tune-$(CONFIG_CPU_V6K) =$(call cc-option,-mtune=arm1136j-s,-mtune=strongarm) |
97 | |||
98 | # Evaluate tune cc-option calls now | ||
99 | tune-y := $(tune-y) | ||
94 | 100 | ||
95 | ifeq ($(CONFIG_AEABI),y) | 101 | ifeq ($(CONFIG_AEABI),y) |
96 | CFLAGS_ABI :=-mabi=aapcs-linux -mno-thumb-interwork | 102 | CFLAGS_ABI :=-mabi=aapcs-linux -mno-thumb-interwork |
@@ -295,9 +301,10 @@ zImage Image xipImage bootpImage uImage: vmlinux | |||
295 | zinstall uinstall install: vmlinux | 301 | zinstall uinstall install: vmlinux |
296 | $(Q)$(MAKE) $(build)=$(boot) MACHINE=$(MACHINE) $@ | 302 | $(Q)$(MAKE) $(build)=$(boot) MACHINE=$(MACHINE) $@ |
297 | 303 | ||
298 | %.dtb: scripts | 304 | %.dtb: | scripts |
299 | $(Q)$(MAKE) $(build)=$(boot)/dts MACHINE=$(MACHINE) $(boot)/dts/$@ | 305 | $(Q)$(MAKE) $(build)=$(boot)/dts MACHINE=$(MACHINE) $(boot)/dts/$@ |
300 | 306 | ||
307 | PHONY += dtbs | ||
301 | dtbs: scripts | 308 | dtbs: scripts |
302 | $(Q)$(MAKE) $(build)=$(boot)/dts MACHINE=$(MACHINE) dtbs | 309 | $(Q)$(MAKE) $(build)=$(boot)/dts MACHINE=$(MACHINE) dtbs |
303 | 310 | ||
diff --git a/arch/arm/boot/compressed/Makefile b/arch/arm/boot/compressed/Makefile index 120b83bfde20..48d0a44270bd 100644 --- a/arch/arm/boot/compressed/Makefile +++ b/arch/arm/boot/compressed/Makefile | |||
@@ -27,7 +27,7 @@ OBJS += misc.o decompress.o | |||
27 | ifeq ($(CONFIG_DEBUG_UNCOMPRESS),y) | 27 | ifeq ($(CONFIG_DEBUG_UNCOMPRESS),y) |
28 | OBJS += debug.o | 28 | OBJS += debug.o |
29 | endif | 29 | endif |
30 | FONTC = $(srctree)/drivers/video/console/font_acorn_8x8.c | 30 | FONTC = $(srctree)/lib/fonts/font_acorn_8x8.c |
31 | 31 | ||
32 | # string library code (-Os is enforced to keep it much smaller) | 32 | # string library code (-Os is enforced to keep it much smaller) |
33 | OBJS += string.o | 33 | OBJS += string.o |
diff --git a/arch/arm/boot/compressed/atags_to_fdt.c b/arch/arm/boot/compressed/atags_to_fdt.c index aabc02a68482..d1153c8a765a 100644 --- a/arch/arm/boot/compressed/atags_to_fdt.c +++ b/arch/arm/boot/compressed/atags_to_fdt.c | |||
@@ -53,6 +53,17 @@ static const void *getprop(const void *fdt, const char *node_path, | |||
53 | return fdt_getprop(fdt, offset, property, len); | 53 | return fdt_getprop(fdt, offset, property, len); |
54 | } | 54 | } |
55 | 55 | ||
56 | static uint32_t get_cell_size(const void *fdt) | ||
57 | { | ||
58 | int len; | ||
59 | uint32_t cell_size = 1; | ||
60 | const uint32_t *size_len = getprop(fdt, "/", "#size-cells", &len); | ||
61 | |||
62 | if (size_len) | ||
63 | cell_size = fdt32_to_cpu(*size_len); | ||
64 | return cell_size; | ||
65 | } | ||
66 | |||
56 | static void merge_fdt_bootargs(void *fdt, const char *fdt_cmdline) | 67 | static void merge_fdt_bootargs(void *fdt, const char *fdt_cmdline) |
57 | { | 68 | { |
58 | char cmdline[COMMAND_LINE_SIZE]; | 69 | char cmdline[COMMAND_LINE_SIZE]; |
@@ -95,9 +106,11 @@ static void merge_fdt_bootargs(void *fdt, const char *fdt_cmdline) | |||
95 | int atags_to_fdt(void *atag_list, void *fdt, int total_space) | 106 | int atags_to_fdt(void *atag_list, void *fdt, int total_space) |
96 | { | 107 | { |
97 | struct tag *atag = atag_list; | 108 | struct tag *atag = atag_list; |
98 | uint32_t mem_reg_property[2 * NR_BANKS]; | 109 | /* In the case of 64 bits memory size, need to reserve 2 cells for |
110 | * address and size for each bank */ | ||
111 | uint32_t mem_reg_property[2 * 2 * NR_BANKS]; | ||
99 | int memcount = 0; | 112 | int memcount = 0; |
100 | int ret; | 113 | int ret, memsize; |
101 | 114 | ||
102 | /* make sure we've got an aligned pointer */ | 115 | /* make sure we've got an aligned pointer */ |
103 | if ((u32)atag_list & 0x3) | 116 | if ((u32)atag_list & 0x3) |
@@ -137,8 +150,25 @@ int atags_to_fdt(void *atag_list, void *fdt, int total_space) | |||
137 | continue; | 150 | continue; |
138 | if (!atag->u.mem.size) | 151 | if (!atag->u.mem.size) |
139 | continue; | 152 | continue; |
140 | mem_reg_property[memcount++] = cpu_to_fdt32(atag->u.mem.start); | 153 | memsize = get_cell_size(fdt); |
141 | mem_reg_property[memcount++] = cpu_to_fdt32(atag->u.mem.size); | 154 | |
155 | if (memsize == 2) { | ||
156 | /* if memsize is 2, that means that | ||
157 | * each data needs 2 cells of 32 bits, | ||
158 | * so the data are 64 bits */ | ||
159 | uint64_t *mem_reg_prop64 = | ||
160 | (uint64_t *)mem_reg_property; | ||
161 | mem_reg_prop64[memcount++] = | ||
162 | cpu_to_fdt64(atag->u.mem.start); | ||
163 | mem_reg_prop64[memcount++] = | ||
164 | cpu_to_fdt64(atag->u.mem.size); | ||
165 | } else { | ||
166 | mem_reg_property[memcount++] = | ||
167 | cpu_to_fdt32(atag->u.mem.start); | ||
168 | mem_reg_property[memcount++] = | ||
169 | cpu_to_fdt32(atag->u.mem.size); | ||
170 | } | ||
171 | |||
142 | } else if (atag->hdr.tag == ATAG_INITRD2) { | 172 | } else if (atag->hdr.tag == ATAG_INITRD2) { |
143 | uint32_t initrd_start, initrd_size; | 173 | uint32_t initrd_start, initrd_size; |
144 | initrd_start = atag->u.initrd.start; | 174 | initrd_start = atag->u.initrd.start; |
@@ -150,8 +180,10 @@ int atags_to_fdt(void *atag_list, void *fdt, int total_space) | |||
150 | } | 180 | } |
151 | } | 181 | } |
152 | 182 | ||
153 | if (memcount) | 183 | if (memcount) { |
154 | setprop(fdt, "/memory", "reg", mem_reg_property, 4*memcount); | 184 | setprop(fdt, "/memory", "reg", mem_reg_property, |
185 | 4 * memcount * memsize); | ||
186 | } | ||
155 | 187 | ||
156 | return fdt_pack(fdt); | 188 | return fdt_pack(fdt); |
157 | } | 189 | } |
diff --git a/arch/arm/boot/compressed/head.S b/arch/arm/boot/compressed/head.S index 032a8d987148..75189f13cf54 100644 --- a/arch/arm/boot/compressed/head.S +++ b/arch/arm/boot/compressed/head.S | |||
@@ -142,7 +142,6 @@ start: | |||
142 | mov r7, r1 @ save architecture ID | 142 | mov r7, r1 @ save architecture ID |
143 | mov r8, r2 @ save atags pointer | 143 | mov r8, r2 @ save atags pointer |
144 | 144 | ||
145 | #ifndef __ARM_ARCH_2__ | ||
146 | /* | 145 | /* |
147 | * Booting from Angel - need to enter SVC mode and disable | 146 | * Booting from Angel - need to enter SVC mode and disable |
148 | * FIQs/IRQs (numeric definitions from angel arm.h source). | 147 | * FIQs/IRQs (numeric definitions from angel arm.h source). |
@@ -158,10 +157,6 @@ not_angel: | |||
158 | safe_svcmode_maskall r0 | 157 | safe_svcmode_maskall r0 |
159 | msr spsr_cxsf, r9 @ Save the CPU boot mode in | 158 | msr spsr_cxsf, r9 @ Save the CPU boot mode in |
160 | @ SPSR | 159 | @ SPSR |
161 | #else | ||
162 | teqp pc, #0x0c000003 @ turn off interrupts | ||
163 | #endif | ||
164 | |||
165 | /* | 160 | /* |
166 | * Note that some cache flushing and other stuff may | 161 | * Note that some cache flushing and other stuff may |
167 | * be needed here - is there an Angel SWI call for this? | 162 | * be needed here - is there an Angel SWI call for this? |
@@ -183,7 +178,19 @@ not_angel: | |||
183 | ldr r4, =zreladdr | 178 | ldr r4, =zreladdr |
184 | #endif | 179 | #endif |
185 | 180 | ||
186 | bl cache_on | 181 | /* |
182 | * Set up a page table only if it won't overwrite ourself. | ||
183 | * That means r4 < pc && r4 - 16k page directory > &_end. | ||
184 | * Given that r4 > &_end is most unfrequent, we add a rough | ||
185 | * additional 1MB of room for a possible appended DTB. | ||
186 | */ | ||
187 | mov r0, pc | ||
188 | cmp r0, r4 | ||
189 | ldrcc r0, LC0+32 | ||
190 | addcc r0, r0, pc | ||
191 | cmpcc r4, r0 | ||
192 | orrcc r4, r4, #1 @ remember we skipped cache_on | ||
193 | blcs cache_on | ||
187 | 194 | ||
188 | restart: adr r0, LC0 | 195 | restart: adr r0, LC0 |
189 | ldmia r0, {r1, r2, r3, r6, r10, r11, r12} | 196 | ldmia r0, {r1, r2, r3, r6, r10, r11, r12} |
@@ -229,7 +236,7 @@ restart: adr r0, LC0 | |||
229 | * r0 = delta | 236 | * r0 = delta |
230 | * r2 = BSS start | 237 | * r2 = BSS start |
231 | * r3 = BSS end | 238 | * r3 = BSS end |
232 | * r4 = final kernel address | 239 | * r4 = final kernel address (possibly with LSB set) |
233 | * r5 = appended dtb size (still unknown) | 240 | * r5 = appended dtb size (still unknown) |
234 | * r6 = _edata | 241 | * r6 = _edata |
235 | * r7 = architecture ID | 242 | * r7 = architecture ID |
@@ -277,6 +284,7 @@ restart: adr r0, LC0 | |||
277 | */ | 284 | */ |
278 | cmp r0, #1 | 285 | cmp r0, #1 |
279 | sub r0, r4, #TEXT_OFFSET | 286 | sub r0, r4, #TEXT_OFFSET |
287 | bic r0, r0, #1 | ||
280 | add r0, r0, #0x100 | 288 | add r0, r0, #0x100 |
281 | mov r1, r6 | 289 | mov r1, r6 |
282 | sub r2, sp, r6 | 290 | sub r2, sp, r6 |
@@ -323,12 +331,13 @@ dtb_check_done: | |||
323 | 331 | ||
324 | /* | 332 | /* |
325 | * Check to see if we will overwrite ourselves. | 333 | * Check to see if we will overwrite ourselves. |
326 | * r4 = final kernel address | 334 | * r4 = final kernel address (possibly with LSB set) |
327 | * r9 = size of decompressed image | 335 | * r9 = size of decompressed image |
328 | * r10 = end of this image, including bss/stack/malloc space if non XIP | 336 | * r10 = end of this image, including bss/stack/malloc space if non XIP |
329 | * We basically want: | 337 | * We basically want: |
330 | * r4 - 16k page directory >= r10 -> OK | 338 | * r4 - 16k page directory >= r10 -> OK |
331 | * r4 + image length <= address of wont_overwrite -> OK | 339 | * r4 + image length <= address of wont_overwrite -> OK |
340 | * Note: the possible LSB in r4 is harmless here. | ||
332 | */ | 341 | */ |
333 | add r10, r10, #16384 | 342 | add r10, r10, #16384 |
334 | cmp r4, r10 | 343 | cmp r4, r10 |
@@ -390,7 +399,8 @@ dtb_check_done: | |||
390 | add sp, sp, r6 | 399 | add sp, sp, r6 |
391 | #endif | 400 | #endif |
392 | 401 | ||
393 | bl cache_clean_flush | 402 | tst r4, #1 |
403 | bleq cache_clean_flush | ||
394 | 404 | ||
395 | adr r0, BSYM(restart) | 405 | adr r0, BSYM(restart) |
396 | add r0, r0, r6 | 406 | add r0, r0, r6 |
@@ -402,7 +412,7 @@ wont_overwrite: | |||
402 | * r0 = delta | 412 | * r0 = delta |
403 | * r2 = BSS start | 413 | * r2 = BSS start |
404 | * r3 = BSS end | 414 | * r3 = BSS end |
405 | * r4 = kernel execution address | 415 | * r4 = kernel execution address (possibly with LSB set) |
406 | * r5 = appended dtb size (0 if not present) | 416 | * r5 = appended dtb size (0 if not present) |
407 | * r7 = architecture ID | 417 | * r7 = architecture ID |
408 | * r8 = atags pointer | 418 | * r8 = atags pointer |
@@ -465,6 +475,15 @@ not_relocated: mov r0, #0 | |||
465 | cmp r2, r3 | 475 | cmp r2, r3 |
466 | blo 1b | 476 | blo 1b |
467 | 477 | ||
478 | /* | ||
479 | * Did we skip the cache setup earlier? | ||
480 | * That is indicated by the LSB in r4. | ||
481 | * Do it now if so. | ||
482 | */ | ||
483 | tst r4, #1 | ||
484 | bic r4, r4, #1 | ||
485 | blne cache_on | ||
486 | |||
468 | /* | 487 | /* |
469 | * The C runtime environment should now be setup sufficiently. | 488 | * The C runtime environment should now be setup sufficiently. |
470 | * Set up some pointers, and start decompressing. | 489 | * Set up some pointers, and start decompressing. |
@@ -513,6 +532,7 @@ LC0: .word LC0 @ r1 | |||
513 | .word _got_start @ r11 | 532 | .word _got_start @ r11 |
514 | .word _got_end @ ip | 533 | .word _got_end @ ip |
515 | .word .L_user_stack_end @ sp | 534 | .word .L_user_stack_end @ sp |
535 | .word _end - restart + 16384 + 1024*1024 | ||
516 | .size LC0, . - LC0 | 536 | .size LC0, . - LC0 |
517 | 537 | ||
518 | #ifdef CONFIG_ARCH_RPC | 538 | #ifdef CONFIG_ARCH_RPC |
diff --git a/arch/arm/boot/dts/atlas6.dtsi b/arch/arm/boot/dts/atlas6.dtsi index 7d1a27949c13..9866cd736dee 100644 --- a/arch/arm/boot/dts/atlas6.dtsi +++ b/arch/arm/boot/dts/atlas6.dtsi | |||
@@ -613,7 +613,7 @@ | |||
613 | }; | 613 | }; |
614 | 614 | ||
615 | rtc-iobg { | 615 | rtc-iobg { |
616 | compatible = "sirf,prima2-rtciobg", "sirf-prima2-rtciobg-bus"; | 616 | compatible = "sirf,prima2-rtciobg", "sirf-prima2-rtciobg-bus", "simple-bus"; |
617 | #address-cells = <1>; | 617 | #address-cells = <1>; |
618 | #size-cells = <1>; | 618 | #size-cells = <1>; |
619 | reg = <0x80030000 0x10000>; | 619 | reg = <0x80030000 0x10000>; |
diff --git a/arch/arm/boot/dts/bcm11351.dtsi b/arch/arm/boot/dts/bcm11351.dtsi index 17979d5f23b4..c0cdf66f8964 100644 --- a/arch/arm/boot/dts/bcm11351.dtsi +++ b/arch/arm/boot/dts/bcm11351.dtsi | |||
@@ -50,10 +50,10 @@ | |||
50 | }; | 50 | }; |
51 | 51 | ||
52 | L2: l2-cache { | 52 | L2: l2-cache { |
53 | compatible = "arm,pl310-cache"; | 53 | compatible = "bcm,bcm11351-a2-pl310-cache"; |
54 | reg = <0x3ff20000 0x1000>; | 54 | reg = <0x3ff20000 0x1000>; |
55 | cache-unified; | 55 | cache-unified; |
56 | cache-level = <2>; | 56 | cache-level = <2>; |
57 | }; | 57 | }; |
58 | 58 | ||
59 | timer@35006000 { | 59 | timer@35006000 { |
diff --git a/arch/arm/boot/dts/ecx-common.dtsi b/arch/arm/boot/dts/ecx-common.dtsi index d61b535f682a..e8559b753c9d 100644 --- a/arch/arm/boot/dts/ecx-common.dtsi +++ b/arch/arm/boot/dts/ecx-common.dtsi | |||
@@ -33,6 +33,8 @@ | |||
33 | calxeda,port-phys = <&combophy5 0 &combophy0 0 | 33 | calxeda,port-phys = <&combophy5 0 &combophy0 0 |
34 | &combophy0 1 &combophy0 2 | 34 | &combophy0 1 &combophy0 2 |
35 | &combophy0 3>; | 35 | &combophy0 3>; |
36 | calxeda,sgpio-gpio =<&gpioh 5 1 &gpioh 6 1 &gpioh 7 1>; | ||
37 | calxeda,led-order = <4 0 1 2 3>; | ||
36 | }; | 38 | }; |
37 | 39 | ||
38 | sdhci@ffe0e000 { | 40 | sdhci@ffe0e000 { |
diff --git a/arch/arm/boot/dts/prima2.dtsi b/arch/arm/boot/dts/prima2.dtsi index 02edd8965f8a..05e9489cf95c 100644 --- a/arch/arm/boot/dts/prima2.dtsi +++ b/arch/arm/boot/dts/prima2.dtsi | |||
@@ -610,7 +610,7 @@ | |||
610 | }; | 610 | }; |
611 | 611 | ||
612 | rtc-iobg { | 612 | rtc-iobg { |
613 | compatible = "sirf,prima2-rtciobg", "sirf-prima2-rtciobg-bus"; | 613 | compatible = "sirf,prima2-rtciobg", "sirf-prima2-rtciobg-bus", "simple-bus"; |
614 | #address-cells = <1>; | 614 | #address-cells = <1>; |
615 | #size-cells = <1>; | 615 | #size-cells = <1>; |
616 | reg = <0x80030000 0x10000>; | 616 | reg = <0x80030000 0x10000>; |
diff --git a/arch/arm/common/mcpm_head.S b/arch/arm/common/mcpm_head.S index 8178705c4b24..80f033614a1f 100644 --- a/arch/arm/common/mcpm_head.S +++ b/arch/arm/common/mcpm_head.S | |||
@@ -32,11 +32,11 @@ | |||
32 | 1901: adr r0, 1902b | 32 | 1901: adr r0, 1902b |
33 | bl printascii | 33 | bl printascii |
34 | mov r0, r9 | 34 | mov r0, r9 |
35 | bl printhex8 | 35 | bl printhex2 |
36 | adr r0, 1903b | 36 | adr r0, 1903b |
37 | bl printascii | 37 | bl printascii |
38 | mov r0, r10 | 38 | mov r0, r10 |
39 | bl printhex8 | 39 | bl printhex2 |
40 | adr r0, 1904b | 40 | adr r0, 1904b |
41 | bl printascii | 41 | bl printascii |
42 | #endif | 42 | #endif |
diff --git a/arch/arm/common/mcpm_platsmp.c b/arch/arm/common/mcpm_platsmp.c index 3caed0db6986..510e5b13aa2e 100644 --- a/arch/arm/common/mcpm_platsmp.c +++ b/arch/arm/common/mcpm_platsmp.c | |||
@@ -19,10 +19,6 @@ | |||
19 | #include <asm/smp.h> | 19 | #include <asm/smp.h> |
20 | #include <asm/smp_plat.h> | 20 | #include <asm/smp_plat.h> |
21 | 21 | ||
22 | static void __init simple_smp_init_cpus(void) | ||
23 | { | ||
24 | } | ||
25 | |||
26 | static int __cpuinit mcpm_boot_secondary(unsigned int cpu, struct task_struct *idle) | 22 | static int __cpuinit mcpm_boot_secondary(unsigned int cpu, struct task_struct *idle) |
27 | { | 23 | { |
28 | unsigned int mpidr, pcpu, pcluster, ret; | 24 | unsigned int mpidr, pcpu, pcluster, ret; |
@@ -74,7 +70,6 @@ static void mcpm_cpu_die(unsigned int cpu) | |||
74 | #endif | 70 | #endif |
75 | 71 | ||
76 | static struct smp_operations __initdata mcpm_smp_ops = { | 72 | static struct smp_operations __initdata mcpm_smp_ops = { |
77 | .smp_init_cpus = simple_smp_init_cpus, | ||
78 | .smp_boot_secondary = mcpm_boot_secondary, | 73 | .smp_boot_secondary = mcpm_boot_secondary, |
79 | .smp_secondary_init = mcpm_secondary_init, | 74 | .smp_secondary_init = mcpm_secondary_init, |
80 | #ifdef CONFIG_HOTPLUG_CPU | 75 | #ifdef CONFIG_HOTPLUG_CPU |
diff --git a/arch/arm/configs/omap2plus_defconfig b/arch/arm/configs/omap2plus_defconfig index f9b7fccd795d..0870b5cd5533 100644 --- a/arch/arm/configs/omap2plus_defconfig +++ b/arch/arm/configs/omap2plus_defconfig | |||
@@ -38,6 +38,8 @@ CONFIG_NR_CPUS=2 | |||
38 | CONFIG_LEDS=y | 38 | CONFIG_LEDS=y |
39 | CONFIG_ZBOOT_ROM_TEXT=0x0 | 39 | CONFIG_ZBOOT_ROM_TEXT=0x0 |
40 | CONFIG_ZBOOT_ROM_BSS=0x0 | 40 | CONFIG_ZBOOT_ROM_BSS=0x0 |
41 | CONFIG_ARM_APPENDED_DTB=y | ||
42 | CONFIG_ARM_ATAG_DTB_COMPAT=y | ||
41 | CONFIG_CMDLINE="root=/dev/mmcblk0p2 rootwait console=ttyO2,115200" | 43 | CONFIG_CMDLINE="root=/dev/mmcblk0p2 rootwait console=ttyO2,115200" |
42 | CONFIG_KEXEC=y | 44 | CONFIG_KEXEC=y |
43 | CONFIG_FPE_NWFPE=y | 45 | CONFIG_FPE_NWFPE=y |
@@ -156,6 +158,13 @@ CONFIG_W1=y | |||
156 | CONFIG_POWER_SUPPLY=y | 158 | CONFIG_POWER_SUPPLY=y |
157 | CONFIG_SENSORS_LM75=m | 159 | CONFIG_SENSORS_LM75=m |
158 | CONFIG_WATCHDOG=y | 160 | CONFIG_WATCHDOG=y |
161 | CONFIG_THERMAL=y | ||
162 | CONFIG_THERMAL_HWMON=y | ||
163 | CONFIG_THERMAL_DEFAULT_GOV_STEP_WISE=y | ||
164 | CONFIG_THERMAL_GOV_FAIR_SHARE=y | ||
165 | CONFIG_THERMAL_GOV_STEP_WISE=y | ||
166 | CONFIG_THERMAL_GOV_USER_SPACE=y | ||
167 | CONFIG_CPU_THERMAL=y | ||
159 | CONFIG_OMAP_WATCHDOG=y | 168 | CONFIG_OMAP_WATCHDOG=y |
160 | CONFIG_TWL4030_WATCHDOG=y | 169 | CONFIG_TWL4030_WATCHDOG=y |
161 | CONFIG_MFD_TPS65217=y | 170 | CONFIG_MFD_TPS65217=y |
@@ -242,7 +251,13 @@ CONFIG_RTC_DRV_TWL92330=y | |||
242 | CONFIG_RTC_DRV_TWL4030=y | 251 | CONFIG_RTC_DRV_TWL4030=y |
243 | CONFIG_RTC_DRV_OMAP=y | 252 | CONFIG_RTC_DRV_OMAP=y |
244 | CONFIG_DMADEVICES=y | 253 | CONFIG_DMADEVICES=y |
254 | CONFIG_TI_EDMA=y | ||
245 | CONFIG_DMA_OMAP=y | 255 | CONFIG_DMA_OMAP=y |
256 | CONFIG_TI_SOC_THERMAL=y | ||
257 | CONFIG_TI_THERMAL=y | ||
258 | CONFIG_OMAP4_THERMAL=y | ||
259 | CONFIG_OMAP5_THERMAL=y | ||
260 | CONFIG_DRA752_THERMAL=y | ||
246 | CONFIG_EXT2_FS=y | 261 | CONFIG_EXT2_FS=y |
247 | CONFIG_EXT3_FS=y | 262 | CONFIG_EXT3_FS=y |
248 | # CONFIG_EXT3_FS_XATTR is not set | 263 | # CONFIG_EXT3_FS_XATTR is not set |
diff --git a/arch/arm/include/asm/arch_timer.h b/arch/arm/include/asm/arch_timer.h index 7c1bfc0aea0c..accefe099182 100644 --- a/arch/arm/include/asm/arch_timer.h +++ b/arch/arm/include/asm/arch_timer.h | |||
@@ -80,15 +80,6 @@ static inline u32 arch_timer_get_cntfrq(void) | |||
80 | return val; | 80 | return val; |
81 | } | 81 | } |
82 | 82 | ||
83 | static inline u64 arch_counter_get_cntpct(void) | ||
84 | { | ||
85 | u64 cval; | ||
86 | |||
87 | isb(); | ||
88 | asm volatile("mrrc p15, 0, %Q0, %R0, c14" : "=r" (cval)); | ||
89 | return cval; | ||
90 | } | ||
91 | |||
92 | static inline u64 arch_counter_get_cntvct(void) | 83 | static inline u64 arch_counter_get_cntvct(void) |
93 | { | 84 | { |
94 | u64 cval; | 85 | u64 cval; |
diff --git a/arch/arm/include/asm/cp15.h b/arch/arm/include/asm/cp15.h index cedd3721318b..6493802f880a 100644 --- a/arch/arm/include/asm/cp15.h +++ b/arch/arm/include/asm/cp15.h | |||
@@ -23,6 +23,11 @@ | |||
23 | #define CR_RR (1 << 14) /* Round Robin cache replacement */ | 23 | #define CR_RR (1 << 14) /* Round Robin cache replacement */ |
24 | #define CR_L4 (1 << 15) /* LDR pc can set T bit */ | 24 | #define CR_L4 (1 << 15) /* LDR pc can set T bit */ |
25 | #define CR_DT (1 << 16) | 25 | #define CR_DT (1 << 16) |
26 | #ifdef CONFIG_MMU | ||
27 | #define CR_HA (1 << 17) /* Hardware management of Access Flag */ | ||
28 | #else | ||
29 | #define CR_BR (1 << 17) /* MPU Background region enable (PMSA) */ | ||
30 | #endif | ||
26 | #define CR_IT (1 << 18) | 31 | #define CR_IT (1 << 18) |
27 | #define CR_ST (1 << 19) | 32 | #define CR_ST (1 << 19) |
28 | #define CR_FI (1 << 21) /* Fast interrupt (lower latency mode) */ | 33 | #define CR_FI (1 << 21) /* Fast interrupt (lower latency mode) */ |
diff --git a/arch/arm/include/asm/cputype.h b/arch/arm/include/asm/cputype.h index d7deb62554c9..8c25dc4e9851 100644 --- a/arch/arm/include/asm/cputype.h +++ b/arch/arm/include/asm/cputype.h | |||
@@ -8,6 +8,7 @@ | |||
8 | #define CPUID_CACHETYPE 1 | 8 | #define CPUID_CACHETYPE 1 |
9 | #define CPUID_TCM 2 | 9 | #define CPUID_TCM 2 |
10 | #define CPUID_TLBTYPE 3 | 10 | #define CPUID_TLBTYPE 3 |
11 | #define CPUID_MPUIR 4 | ||
11 | #define CPUID_MPIDR 5 | 12 | #define CPUID_MPIDR 5 |
12 | 13 | ||
13 | #ifdef CONFIG_CPU_V7M | 14 | #ifdef CONFIG_CPU_V7M |
diff --git a/arch/arm/include/asm/div64.h b/arch/arm/include/asm/div64.h index fe92ccf1d0b0..191ada6e4d2d 100644 --- a/arch/arm/include/asm/div64.h +++ b/arch/arm/include/asm/div64.h | |||
@@ -46,7 +46,7 @@ | |||
46 | __rem; \ | 46 | __rem; \ |
47 | }) | 47 | }) |
48 | 48 | ||
49 | #if __GNUC__ < 4 | 49 | #if __GNUC__ < 4 || !defined(CONFIG_AEABI) |
50 | 50 | ||
51 | /* | 51 | /* |
52 | * gcc versions earlier than 4.0 are simply too problematic for the | 52 | * gcc versions earlier than 4.0 are simply too problematic for the |
diff --git a/arch/arm/include/asm/glue-proc.h b/arch/arm/include/asm/glue-proc.h index e6168c0c18e9..74a8b84f3cb1 100644 --- a/arch/arm/include/asm/glue-proc.h +++ b/arch/arm/include/asm/glue-proc.h | |||
@@ -230,21 +230,21 @@ | |||
230 | # endif | 230 | # endif |
231 | #endif | 231 | #endif |
232 | 232 | ||
233 | #ifdef CONFIG_CPU_PJ4B | 233 | #ifdef CONFIG_CPU_V7M |
234 | # ifdef CPU_NAME | 234 | # ifdef CPU_NAME |
235 | # undef MULTI_CPU | 235 | # undef MULTI_CPU |
236 | # define MULTI_CPU | 236 | # define MULTI_CPU |
237 | # else | 237 | # else |
238 | # define CPU_NAME cpu_pj4b | 238 | # define CPU_NAME cpu_v7m |
239 | # endif | 239 | # endif |
240 | #endif | 240 | #endif |
241 | 241 | ||
242 | #ifdef CONFIG_CPU_V7M | 242 | #ifdef CONFIG_CPU_PJ4B |
243 | # ifdef CPU_NAME | 243 | # ifdef CPU_NAME |
244 | # undef MULTI_CPU | 244 | # undef MULTI_CPU |
245 | # define MULTI_CPU | 245 | # define MULTI_CPU |
246 | # else | 246 | # else |
247 | # define CPU_NAME cpu_v7m | 247 | # define CPU_NAME cpu_pj4b |
248 | # endif | 248 | # endif |
249 | #endif | 249 | #endif |
250 | 250 | ||
diff --git a/arch/arm/include/asm/hugetlb-3level.h b/arch/arm/include/asm/hugetlb-3level.h new file mode 100644 index 000000000000..d4014fbe5ea3 --- /dev/null +++ b/arch/arm/include/asm/hugetlb-3level.h | |||
@@ -0,0 +1,71 @@ | |||
1 | /* | ||
2 | * arch/arm/include/asm/hugetlb-3level.h | ||
3 | * | ||
4 | * Copyright (C) 2012 ARM Ltd. | ||
5 | * | ||
6 | * Based on arch/x86/include/asm/hugetlb.h. | ||
7 | * | ||
8 | * This program is free software; you can redistribute it and/or modify | ||
9 | * it under the terms of the GNU General Public License version 2 as | ||
10 | * published by the Free Software Foundation. | ||
11 | * | ||
12 | * This program is distributed in the hope that it will be useful, | ||
13 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
14 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
15 | * GNU General Public License for more details. | ||
16 | * | ||
17 | * You should have received a copy of the GNU General Public License | ||
18 | * along with this program; if not, write to the Free Software | ||
19 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA | ||
20 | */ | ||
21 | |||
22 | #ifndef _ASM_ARM_HUGETLB_3LEVEL_H | ||
23 | #define _ASM_ARM_HUGETLB_3LEVEL_H | ||
24 | |||
25 | |||
26 | /* | ||
27 | * If our huge pte is non-zero then mark the valid bit. | ||
28 | * This allows pte_present(huge_ptep_get(ptep)) to return true for non-zero | ||
29 | * ptes. | ||
30 | * (The valid bit is automatically cleared by set_pte_at for PROT_NONE ptes). | ||
31 | */ | ||
32 | static inline pte_t huge_ptep_get(pte_t *ptep) | ||
33 | { | ||
34 | pte_t retval = *ptep; | ||
35 | if (pte_val(retval)) | ||
36 | pte_val(retval) |= L_PTE_VALID; | ||
37 | return retval; | ||
38 | } | ||
39 | |||
40 | static inline void set_huge_pte_at(struct mm_struct *mm, unsigned long addr, | ||
41 | pte_t *ptep, pte_t pte) | ||
42 | { | ||
43 | set_pte_at(mm, addr, ptep, pte); | ||
44 | } | ||
45 | |||
46 | static inline void huge_ptep_clear_flush(struct vm_area_struct *vma, | ||
47 | unsigned long addr, pte_t *ptep) | ||
48 | { | ||
49 | ptep_clear_flush(vma, addr, ptep); | ||
50 | } | ||
51 | |||
52 | static inline void huge_ptep_set_wrprotect(struct mm_struct *mm, | ||
53 | unsigned long addr, pte_t *ptep) | ||
54 | { | ||
55 | ptep_set_wrprotect(mm, addr, ptep); | ||
56 | } | ||
57 | |||
58 | static inline pte_t huge_ptep_get_and_clear(struct mm_struct *mm, | ||
59 | unsigned long addr, pte_t *ptep) | ||
60 | { | ||
61 | return ptep_get_and_clear(mm, addr, ptep); | ||
62 | } | ||
63 | |||
64 | static inline int huge_ptep_set_access_flags(struct vm_area_struct *vma, | ||
65 | unsigned long addr, pte_t *ptep, | ||
66 | pte_t pte, int dirty) | ||
67 | { | ||
68 | return ptep_set_access_flags(vma, addr, ptep, pte, dirty); | ||
69 | } | ||
70 | |||
71 | #endif /* _ASM_ARM_HUGETLB_3LEVEL_H */ | ||
diff --git a/arch/arm/include/asm/hugetlb.h b/arch/arm/include/asm/hugetlb.h new file mode 100644 index 000000000000..1f1b1cd112f3 --- /dev/null +++ b/arch/arm/include/asm/hugetlb.h | |||
@@ -0,0 +1,84 @@ | |||
1 | /* | ||
2 | * arch/arm/include/asm/hugetlb.h | ||
3 | * | ||
4 | * Copyright (C) 2012 ARM Ltd. | ||
5 | * | ||
6 | * Based on arch/x86/include/asm/hugetlb.h | ||
7 | * | ||
8 | * This program is free software; you can redistribute it and/or modify | ||
9 | * it under the terms of the GNU General Public License version 2 as | ||
10 | * published by the Free Software Foundation. | ||
11 | * | ||
12 | * This program is distributed in the hope that it will be useful, | ||
13 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
14 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
15 | * GNU General Public License for more details. | ||
16 | * | ||
17 | * You should have received a copy of the GNU General Public License | ||
18 | * along with this program; if not, write to the Free Software | ||
19 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA | ||
20 | */ | ||
21 | |||
22 | #ifndef _ASM_ARM_HUGETLB_H | ||
23 | #define _ASM_ARM_HUGETLB_H | ||
24 | |||
25 | #include <asm/page.h> | ||
26 | #include <asm-generic/hugetlb.h> | ||
27 | |||
28 | #include <asm/hugetlb-3level.h> | ||
29 | |||
30 | static inline void hugetlb_free_pgd_range(struct mmu_gather *tlb, | ||
31 | unsigned long addr, unsigned long end, | ||
32 | unsigned long floor, | ||
33 | unsigned long ceiling) | ||
34 | { | ||
35 | free_pgd_range(tlb, addr, end, floor, ceiling); | ||
36 | } | ||
37 | |||
38 | |||
39 | static inline int is_hugepage_only_range(struct mm_struct *mm, | ||
40 | unsigned long addr, unsigned long len) | ||
41 | { | ||
42 | return 0; | ||
43 | } | ||
44 | |||
45 | static inline int prepare_hugepage_range(struct file *file, | ||
46 | unsigned long addr, unsigned long len) | ||
47 | { | ||
48 | struct hstate *h = hstate_file(file); | ||
49 | if (len & ~huge_page_mask(h)) | ||
50 | return -EINVAL; | ||
51 | if (addr & ~huge_page_mask(h)) | ||
52 | return -EINVAL; | ||
53 | return 0; | ||
54 | } | ||
55 | |||
56 | static inline void hugetlb_prefault_arch_hook(struct mm_struct *mm) | ||
57 | { | ||
58 | } | ||
59 | |||
60 | static inline int huge_pte_none(pte_t pte) | ||
61 | { | ||
62 | return pte_none(pte); | ||
63 | } | ||
64 | |||
65 | static inline pte_t huge_pte_wrprotect(pte_t pte) | ||
66 | { | ||
67 | return pte_wrprotect(pte); | ||
68 | } | ||
69 | |||
70 | static inline int arch_prepare_hugepage(struct page *page) | ||
71 | { | ||
72 | return 0; | ||
73 | } | ||
74 | |||
75 | static inline void arch_release_hugepage(struct page *page) | ||
76 | { | ||
77 | } | ||
78 | |||
79 | static inline void arch_clear_hugepage_flags(struct page *page) | ||
80 | { | ||
81 | clear_bit(PG_dcache_clean, &page->flags); | ||
82 | } | ||
83 | |||
84 | #endif /* _ASM_ARM_HUGETLB_H */ | ||
diff --git a/arch/arm/include/asm/io.h b/arch/arm/include/asm/io.h index 652b56086de7..d070741b2b37 100644 --- a/arch/arm/include/asm/io.h +++ b/arch/arm/include/asm/io.h | |||
@@ -130,16 +130,16 @@ static inline u32 __raw_readl(const volatile void __iomem *addr) | |||
130 | */ | 130 | */ |
131 | extern void __iomem *__arm_ioremap_pfn_caller(unsigned long, unsigned long, | 131 | extern void __iomem *__arm_ioremap_pfn_caller(unsigned long, unsigned long, |
132 | size_t, unsigned int, void *); | 132 | size_t, unsigned int, void *); |
133 | extern void __iomem *__arm_ioremap_caller(unsigned long, size_t, unsigned int, | 133 | extern void __iomem *__arm_ioremap_caller(phys_addr_t, size_t, unsigned int, |
134 | void *); | 134 | void *); |
135 | 135 | ||
136 | extern void __iomem *__arm_ioremap_pfn(unsigned long, unsigned long, size_t, unsigned int); | 136 | extern void __iomem *__arm_ioremap_pfn(unsigned long, unsigned long, size_t, unsigned int); |
137 | extern void __iomem *__arm_ioremap(unsigned long, size_t, unsigned int); | 137 | extern void __iomem *__arm_ioremap(phys_addr_t, size_t, unsigned int); |
138 | extern void __iomem *__arm_ioremap_exec(unsigned long, size_t, bool cached); | 138 | extern void __iomem *__arm_ioremap_exec(phys_addr_t, size_t, bool cached); |
139 | extern void __iounmap(volatile void __iomem *addr); | 139 | extern void __iounmap(volatile void __iomem *addr); |
140 | extern void __arm_iounmap(volatile void __iomem *addr); | 140 | extern void __arm_iounmap(volatile void __iomem *addr); |
141 | 141 | ||
142 | extern void __iomem * (*arch_ioremap_caller)(unsigned long, size_t, | 142 | extern void __iomem * (*arch_ioremap_caller)(phys_addr_t, size_t, |
143 | unsigned int, void *); | 143 | unsigned int, void *); |
144 | extern void (*arch_iounmap)(volatile void __iomem *); | 144 | extern void (*arch_iounmap)(volatile void __iomem *); |
145 | 145 | ||
diff --git a/arch/arm/include/asm/kvm_arch_timer.h b/arch/arm/include/asm/kvm_arch_timer.h deleted file mode 100644 index 68cb9e1dfb81..000000000000 --- a/arch/arm/include/asm/kvm_arch_timer.h +++ /dev/null | |||
@@ -1,85 +0,0 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2012 ARM Ltd. | ||
3 | * Author: Marc Zyngier <marc.zyngier@arm.com> | ||
4 | * | ||
5 | * This program is free software; you can redistribute it and/or modify | ||
6 | * it under the terms of the GNU General Public License version 2 as | ||
7 | * published by the Free Software Foundation. | ||
8 | * | ||
9 | * This program is distributed in the hope that it will be useful, | ||
10 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
11 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
12 | * GNU General Public License for more details. | ||
13 | * | ||
14 | * You should have received a copy of the GNU General Public License | ||
15 | * along with this program; if not, write to the Free Software | ||
16 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA | ||
17 | */ | ||
18 | |||
19 | #ifndef __ASM_ARM_KVM_ARCH_TIMER_H | ||
20 | #define __ASM_ARM_KVM_ARCH_TIMER_H | ||
21 | |||
22 | #include <linux/clocksource.h> | ||
23 | #include <linux/hrtimer.h> | ||
24 | #include <linux/workqueue.h> | ||
25 | |||
26 | struct arch_timer_kvm { | ||
27 | #ifdef CONFIG_KVM_ARM_TIMER | ||
28 | /* Is the timer enabled */ | ||
29 | bool enabled; | ||
30 | |||
31 | /* Virtual offset */ | ||
32 | cycle_t cntvoff; | ||
33 | #endif | ||
34 | }; | ||
35 | |||
36 | struct arch_timer_cpu { | ||
37 | #ifdef CONFIG_KVM_ARM_TIMER | ||
38 | /* Registers: control register, timer value */ | ||
39 | u32 cntv_ctl; /* Saved/restored */ | ||
40 | cycle_t cntv_cval; /* Saved/restored */ | ||
41 | |||
42 | /* | ||
43 | * Anything that is not used directly from assembly code goes | ||
44 | * here. | ||
45 | */ | ||
46 | |||
47 | /* Background timer used when the guest is not running */ | ||
48 | struct hrtimer timer; | ||
49 | |||
50 | /* Work queued with the above timer expires */ | ||
51 | struct work_struct expired; | ||
52 | |||
53 | /* Background timer active */ | ||
54 | bool armed; | ||
55 | |||
56 | /* Timer IRQ */ | ||
57 | const struct kvm_irq_level *irq; | ||
58 | #endif | ||
59 | }; | ||
60 | |||
61 | #ifdef CONFIG_KVM_ARM_TIMER | ||
62 | int kvm_timer_hyp_init(void); | ||
63 | int kvm_timer_init(struct kvm *kvm); | ||
64 | void kvm_timer_vcpu_init(struct kvm_vcpu *vcpu); | ||
65 | void kvm_timer_flush_hwstate(struct kvm_vcpu *vcpu); | ||
66 | void kvm_timer_sync_hwstate(struct kvm_vcpu *vcpu); | ||
67 | void kvm_timer_vcpu_terminate(struct kvm_vcpu *vcpu); | ||
68 | #else | ||
69 | static inline int kvm_timer_hyp_init(void) | ||
70 | { | ||
71 | return 0; | ||
72 | }; | ||
73 | |||
74 | static inline int kvm_timer_init(struct kvm *kvm) | ||
75 | { | ||
76 | return 0; | ||
77 | } | ||
78 | |||
79 | static inline void kvm_timer_vcpu_init(struct kvm_vcpu *vcpu) {} | ||
80 | static inline void kvm_timer_flush_hwstate(struct kvm_vcpu *vcpu) {} | ||
81 | static inline void kvm_timer_sync_hwstate(struct kvm_vcpu *vcpu) {} | ||
82 | static inline void kvm_timer_vcpu_terminate(struct kvm_vcpu *vcpu) {} | ||
83 | #endif | ||
84 | |||
85 | #endif | ||
diff --git a/arch/arm/include/asm/kvm_arm.h b/arch/arm/include/asm/kvm_arm.h index 124623e5ef14..64e96960de29 100644 --- a/arch/arm/include/asm/kvm_arm.h +++ b/arch/arm/include/asm/kvm_arm.h | |||
@@ -135,7 +135,6 @@ | |||
135 | #define KVM_PHYS_MASK (KVM_PHYS_SIZE - 1ULL) | 135 | #define KVM_PHYS_MASK (KVM_PHYS_SIZE - 1ULL) |
136 | #define PTRS_PER_S2_PGD (1ULL << (KVM_PHYS_SHIFT - 30)) | 136 | #define PTRS_PER_S2_PGD (1ULL << (KVM_PHYS_SHIFT - 30)) |
137 | #define S2_PGD_ORDER get_order(PTRS_PER_S2_PGD * sizeof(pgd_t)) | 137 | #define S2_PGD_ORDER get_order(PTRS_PER_S2_PGD * sizeof(pgd_t)) |
138 | #define S2_PGD_SIZE (1 << S2_PGD_ORDER) | ||
139 | 138 | ||
140 | /* Virtualization Translation Control Register (VTCR) bits */ | 139 | /* Virtualization Translation Control Register (VTCR) bits */ |
141 | #define VTCR_SH0 (3 << 12) | 140 | #define VTCR_SH0 (3 << 12) |
diff --git a/arch/arm/include/asm/kvm_asm.h b/arch/arm/include/asm/kvm_asm.h index 18d50322a9e2..a2f43ddcc300 100644 --- a/arch/arm/include/asm/kvm_asm.h +++ b/arch/arm/include/asm/kvm_asm.h | |||
@@ -37,16 +37,18 @@ | |||
37 | #define c5_AIFSR 15 /* Auxilary Instrunction Fault Status R */ | 37 | #define c5_AIFSR 15 /* Auxilary Instrunction Fault Status R */ |
38 | #define c6_DFAR 16 /* Data Fault Address Register */ | 38 | #define c6_DFAR 16 /* Data Fault Address Register */ |
39 | #define c6_IFAR 17 /* Instruction Fault Address Register */ | 39 | #define c6_IFAR 17 /* Instruction Fault Address Register */ |
40 | #define c9_L2CTLR 18 /* Cortex A15 L2 Control Register */ | 40 | #define c7_PAR 18 /* Physical Address Register */ |
41 | #define c10_PRRR 19 /* Primary Region Remap Register */ | 41 | #define c7_PAR_high 19 /* PAR top 32 bits */ |
42 | #define c10_NMRR 20 /* Normal Memory Remap Register */ | 42 | #define c9_L2CTLR 20 /* Cortex A15 L2 Control Register */ |
43 | #define c12_VBAR 21 /* Vector Base Address Register */ | 43 | #define c10_PRRR 21 /* Primary Region Remap Register */ |
44 | #define c13_CID 22 /* Context ID Register */ | 44 | #define c10_NMRR 22 /* Normal Memory Remap Register */ |
45 | #define c13_TID_URW 23 /* Thread ID, User R/W */ | 45 | #define c12_VBAR 23 /* Vector Base Address Register */ |
46 | #define c13_TID_URO 24 /* Thread ID, User R/O */ | 46 | #define c13_CID 24 /* Context ID Register */ |
47 | #define c13_TID_PRIV 25 /* Thread ID, Privileged */ | 47 | #define c13_TID_URW 25 /* Thread ID, User R/W */ |
48 | #define c14_CNTKCTL 26 /* Timer Control Register (PL1) */ | 48 | #define c13_TID_URO 26 /* Thread ID, User R/O */ |
49 | #define NR_CP15_REGS 27 /* Number of regs (incl. invalid) */ | 49 | #define c13_TID_PRIV 27 /* Thread ID, Privileged */ |
50 | #define c14_CNTKCTL 28 /* Timer Control Register (PL1) */ | ||
51 | #define NR_CP15_REGS 29 /* Number of regs (incl. invalid) */ | ||
50 | 52 | ||
51 | #define ARM_EXCEPTION_RESET 0 | 53 | #define ARM_EXCEPTION_RESET 0 |
52 | #define ARM_EXCEPTION_UNDEFINED 1 | 54 | #define ARM_EXCEPTION_UNDEFINED 1 |
@@ -72,8 +74,6 @@ extern char __kvm_hyp_vector[]; | |||
72 | extern char __kvm_hyp_code_start[]; | 74 | extern char __kvm_hyp_code_start[]; |
73 | extern char __kvm_hyp_code_end[]; | 75 | extern char __kvm_hyp_code_end[]; |
74 | 76 | ||
75 | extern void __kvm_tlb_flush_vmid(struct kvm *kvm); | ||
76 | |||
77 | extern void __kvm_flush_vm_context(void); | 77 | extern void __kvm_flush_vm_context(void); |
78 | extern void __kvm_tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa); | 78 | extern void __kvm_tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa); |
79 | 79 | ||
diff --git a/arch/arm/include/asm/kvm_emulate.h b/arch/arm/include/asm/kvm_emulate.h index 82b4babead2c..a464e8d7b6c5 100644 --- a/arch/arm/include/asm/kvm_emulate.h +++ b/arch/arm/include/asm/kvm_emulate.h | |||
@@ -65,11 +65,6 @@ static inline bool vcpu_mode_priv(struct kvm_vcpu *vcpu) | |||
65 | return cpsr_mode > USR_MODE;; | 65 | return cpsr_mode > USR_MODE;; |
66 | } | 66 | } |
67 | 67 | ||
68 | static inline bool kvm_vcpu_reg_is_pc(struct kvm_vcpu *vcpu, int reg) | ||
69 | { | ||
70 | return reg == 15; | ||
71 | } | ||
72 | |||
73 | static inline u32 kvm_vcpu_get_hsr(struct kvm_vcpu *vcpu) | 68 | static inline u32 kvm_vcpu_get_hsr(struct kvm_vcpu *vcpu) |
74 | { | 69 | { |
75 | return vcpu->arch.fault.hsr; | 70 | return vcpu->arch.fault.hsr; |
diff --git a/arch/arm/include/asm/kvm_host.h b/arch/arm/include/asm/kvm_host.h index 57cb786a6203..7d22517d8071 100644 --- a/arch/arm/include/asm/kvm_host.h +++ b/arch/arm/include/asm/kvm_host.h | |||
@@ -23,9 +23,14 @@ | |||
23 | #include <asm/kvm_asm.h> | 23 | #include <asm/kvm_asm.h> |
24 | #include <asm/kvm_mmio.h> | 24 | #include <asm/kvm_mmio.h> |
25 | #include <asm/fpstate.h> | 25 | #include <asm/fpstate.h> |
26 | #include <asm/kvm_arch_timer.h> | 26 | #include <kvm/arm_arch_timer.h> |
27 | 27 | ||
28 | #if defined(CONFIG_KVM_ARM_MAX_VCPUS) | ||
28 | #define KVM_MAX_VCPUS CONFIG_KVM_ARM_MAX_VCPUS | 29 | #define KVM_MAX_VCPUS CONFIG_KVM_ARM_MAX_VCPUS |
30 | #else | ||
31 | #define KVM_MAX_VCPUS 0 | ||
32 | #endif | ||
33 | |||
29 | #define KVM_USER_MEM_SLOTS 32 | 34 | #define KVM_USER_MEM_SLOTS 32 |
30 | #define KVM_PRIVATE_MEM_SLOTS 4 | 35 | #define KVM_PRIVATE_MEM_SLOTS 4 |
31 | #define KVM_COALESCED_MMIO_PAGE_OFFSET 1 | 36 | #define KVM_COALESCED_MMIO_PAGE_OFFSET 1 |
@@ -38,7 +43,7 @@ | |||
38 | #define KVM_NR_PAGE_SIZES 1 | 43 | #define KVM_NR_PAGE_SIZES 1 |
39 | #define KVM_PAGES_PER_HPAGE(x) (1UL<<31) | 44 | #define KVM_PAGES_PER_HPAGE(x) (1UL<<31) |
40 | 45 | ||
41 | #include <asm/kvm_vgic.h> | 46 | #include <kvm/arm_vgic.h> |
42 | 47 | ||
43 | struct kvm_vcpu; | 48 | struct kvm_vcpu; |
44 | u32 *kvm_vcpu_reg(struct kvm_vcpu *vcpu, u8 reg_num, u32 mode); | 49 | u32 *kvm_vcpu_reg(struct kvm_vcpu *vcpu, u8 reg_num, u32 mode); |
@@ -190,8 +195,8 @@ int kvm_arm_coproc_set_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *); | |||
190 | int handle_exit(struct kvm_vcpu *vcpu, struct kvm_run *run, | 195 | int handle_exit(struct kvm_vcpu *vcpu, struct kvm_run *run, |
191 | int exception_index); | 196 | int exception_index); |
192 | 197 | ||
193 | static inline void __cpu_init_hyp_mode(unsigned long long boot_pgd_ptr, | 198 | static inline void __cpu_init_hyp_mode(phys_addr_t boot_pgd_ptr, |
194 | unsigned long long pgd_ptr, | 199 | phys_addr_t pgd_ptr, |
195 | unsigned long hyp_stack_ptr, | 200 | unsigned long hyp_stack_ptr, |
196 | unsigned long vector_ptr) | 201 | unsigned long vector_ptr) |
197 | { | 202 | { |
diff --git a/arch/arm/include/asm/kvm_vgic.h b/arch/arm/include/asm/kvm_vgic.h deleted file mode 100644 index 343744e4809c..000000000000 --- a/arch/arm/include/asm/kvm_vgic.h +++ /dev/null | |||
@@ -1,220 +0,0 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2012 ARM Ltd. | ||
3 | * Author: Marc Zyngier <marc.zyngier@arm.com> | ||
4 | * | ||
5 | * This program is free software; you can redistribute it and/or modify | ||
6 | * it under the terms of the GNU General Public License version 2 as | ||
7 | * published by the Free Software Foundation. | ||
8 | * | ||
9 | * This program is distributed in the hope that it will be useful, | ||
10 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
11 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
12 | * GNU General Public License for more details. | ||
13 | * | ||
14 | * You should have received a copy of the GNU General Public License | ||
15 | * along with this program; if not, write to the Free Software | ||
16 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA | ||
17 | */ | ||
18 | |||
19 | #ifndef __ASM_ARM_KVM_VGIC_H | ||
20 | #define __ASM_ARM_KVM_VGIC_H | ||
21 | |||
22 | #include <linux/kernel.h> | ||
23 | #include <linux/kvm.h> | ||
24 | #include <linux/irqreturn.h> | ||
25 | #include <linux/spinlock.h> | ||
26 | #include <linux/types.h> | ||
27 | #include <linux/irqchip/arm-gic.h> | ||
28 | |||
29 | #define VGIC_NR_IRQS 128 | ||
30 | #define VGIC_NR_SGIS 16 | ||
31 | #define VGIC_NR_PPIS 16 | ||
32 | #define VGIC_NR_PRIVATE_IRQS (VGIC_NR_SGIS + VGIC_NR_PPIS) | ||
33 | #define VGIC_NR_SHARED_IRQS (VGIC_NR_IRQS - VGIC_NR_PRIVATE_IRQS) | ||
34 | #define VGIC_MAX_CPUS KVM_MAX_VCPUS | ||
35 | #define VGIC_MAX_LRS (1 << 6) | ||
36 | |||
37 | /* Sanity checks... */ | ||
38 | #if (VGIC_MAX_CPUS > 8) | ||
39 | #error Invalid number of CPU interfaces | ||
40 | #endif | ||
41 | |||
42 | #if (VGIC_NR_IRQS & 31) | ||
43 | #error "VGIC_NR_IRQS must be a multiple of 32" | ||
44 | #endif | ||
45 | |||
46 | #if (VGIC_NR_IRQS > 1024) | ||
47 | #error "VGIC_NR_IRQS must be <= 1024" | ||
48 | #endif | ||
49 | |||
50 | /* | ||
51 | * The GIC distributor registers describing interrupts have two parts: | ||
52 | * - 32 per-CPU interrupts (SGI + PPI) | ||
53 | * - a bunch of shared interrupts (SPI) | ||
54 | */ | ||
55 | struct vgic_bitmap { | ||
56 | union { | ||
57 | u32 reg[VGIC_NR_PRIVATE_IRQS / 32]; | ||
58 | DECLARE_BITMAP(reg_ul, VGIC_NR_PRIVATE_IRQS); | ||
59 | } percpu[VGIC_MAX_CPUS]; | ||
60 | union { | ||
61 | u32 reg[VGIC_NR_SHARED_IRQS / 32]; | ||
62 | DECLARE_BITMAP(reg_ul, VGIC_NR_SHARED_IRQS); | ||
63 | } shared; | ||
64 | }; | ||
65 | |||
66 | struct vgic_bytemap { | ||
67 | u32 percpu[VGIC_MAX_CPUS][VGIC_NR_PRIVATE_IRQS / 4]; | ||
68 | u32 shared[VGIC_NR_SHARED_IRQS / 4]; | ||
69 | }; | ||
70 | |||
71 | struct vgic_dist { | ||
72 | #ifdef CONFIG_KVM_ARM_VGIC | ||
73 | spinlock_t lock; | ||
74 | bool ready; | ||
75 | |||
76 | /* Virtual control interface mapping */ | ||
77 | void __iomem *vctrl_base; | ||
78 | |||
79 | /* Distributor and vcpu interface mapping in the guest */ | ||
80 | phys_addr_t vgic_dist_base; | ||
81 | phys_addr_t vgic_cpu_base; | ||
82 | |||
83 | /* Distributor enabled */ | ||
84 | u32 enabled; | ||
85 | |||
86 | /* Interrupt enabled (one bit per IRQ) */ | ||
87 | struct vgic_bitmap irq_enabled; | ||
88 | |||
89 | /* Interrupt 'pin' level */ | ||
90 | struct vgic_bitmap irq_state; | ||
91 | |||
92 | /* Level-triggered interrupt in progress */ | ||
93 | struct vgic_bitmap irq_active; | ||
94 | |||
95 | /* Interrupt priority. Not used yet. */ | ||
96 | struct vgic_bytemap irq_priority; | ||
97 | |||
98 | /* Level/edge triggered */ | ||
99 | struct vgic_bitmap irq_cfg; | ||
100 | |||
101 | /* Source CPU per SGI and target CPU */ | ||
102 | u8 irq_sgi_sources[VGIC_MAX_CPUS][VGIC_NR_SGIS]; | ||
103 | |||
104 | /* Target CPU for each IRQ */ | ||
105 | u8 irq_spi_cpu[VGIC_NR_SHARED_IRQS]; | ||
106 | struct vgic_bitmap irq_spi_target[VGIC_MAX_CPUS]; | ||
107 | |||
108 | /* Bitmap indicating which CPU has something pending */ | ||
109 | unsigned long irq_pending_on_cpu; | ||
110 | #endif | ||
111 | }; | ||
112 | |||
113 | struct vgic_cpu { | ||
114 | #ifdef CONFIG_KVM_ARM_VGIC | ||
115 | /* per IRQ to LR mapping */ | ||
116 | u8 vgic_irq_lr_map[VGIC_NR_IRQS]; | ||
117 | |||
118 | /* Pending interrupts on this VCPU */ | ||
119 | DECLARE_BITMAP( pending_percpu, VGIC_NR_PRIVATE_IRQS); | ||
120 | DECLARE_BITMAP( pending_shared, VGIC_NR_SHARED_IRQS); | ||
121 | |||
122 | /* Bitmap of used/free list registers */ | ||
123 | DECLARE_BITMAP( lr_used, VGIC_MAX_LRS); | ||
124 | |||
125 | /* Number of list registers on this CPU */ | ||
126 | int nr_lr; | ||
127 | |||
128 | /* CPU vif control registers for world switch */ | ||
129 | u32 vgic_hcr; | ||
130 | u32 vgic_vmcr; | ||
131 | u32 vgic_misr; /* Saved only */ | ||
132 | u32 vgic_eisr[2]; /* Saved only */ | ||
133 | u32 vgic_elrsr[2]; /* Saved only */ | ||
134 | u32 vgic_apr; | ||
135 | u32 vgic_lr[VGIC_MAX_LRS]; | ||
136 | #endif | ||
137 | }; | ||
138 | |||
139 | #define LR_EMPTY 0xff | ||
140 | |||
141 | struct kvm; | ||
142 | struct kvm_vcpu; | ||
143 | struct kvm_run; | ||
144 | struct kvm_exit_mmio; | ||
145 | |||
146 | #ifdef CONFIG_KVM_ARM_VGIC | ||
147 | int kvm_vgic_set_addr(struct kvm *kvm, unsigned long type, u64 addr); | ||
148 | int kvm_vgic_hyp_init(void); | ||
149 | int kvm_vgic_init(struct kvm *kvm); | ||
150 | int kvm_vgic_create(struct kvm *kvm); | ||
151 | int kvm_vgic_vcpu_init(struct kvm_vcpu *vcpu); | ||
152 | void kvm_vgic_flush_hwstate(struct kvm_vcpu *vcpu); | ||
153 | void kvm_vgic_sync_hwstate(struct kvm_vcpu *vcpu); | ||
154 | int kvm_vgic_inject_irq(struct kvm *kvm, int cpuid, unsigned int irq_num, | ||
155 | bool level); | ||
156 | int kvm_vgic_vcpu_pending_irq(struct kvm_vcpu *vcpu); | ||
157 | bool vgic_handle_mmio(struct kvm_vcpu *vcpu, struct kvm_run *run, | ||
158 | struct kvm_exit_mmio *mmio); | ||
159 | |||
160 | #define irqchip_in_kernel(k) (!!((k)->arch.vgic.vctrl_base)) | ||
161 | #define vgic_initialized(k) ((k)->arch.vgic.ready) | ||
162 | |||
163 | #else | ||
164 | static inline int kvm_vgic_hyp_init(void) | ||
165 | { | ||
166 | return 0; | ||
167 | } | ||
168 | |||
169 | static inline int kvm_vgic_set_addr(struct kvm *kvm, unsigned long type, u64 addr) | ||
170 | { | ||
171 | return 0; | ||
172 | } | ||
173 | |||
174 | static inline int kvm_vgic_init(struct kvm *kvm) | ||
175 | { | ||
176 | return 0; | ||
177 | } | ||
178 | |||
179 | static inline int kvm_vgic_create(struct kvm *kvm) | ||
180 | { | ||
181 | return 0; | ||
182 | } | ||
183 | |||
184 | static inline int kvm_vgic_vcpu_init(struct kvm_vcpu *vcpu) | ||
185 | { | ||
186 | return 0; | ||
187 | } | ||
188 | |||
189 | static inline void kvm_vgic_flush_hwstate(struct kvm_vcpu *vcpu) {} | ||
190 | static inline void kvm_vgic_sync_hwstate(struct kvm_vcpu *vcpu) {} | ||
191 | |||
192 | static inline int kvm_vgic_inject_irq(struct kvm *kvm, int cpuid, | ||
193 | unsigned int irq_num, bool level) | ||
194 | { | ||
195 | return 0; | ||
196 | } | ||
197 | |||
198 | static inline int kvm_vgic_vcpu_pending_irq(struct kvm_vcpu *vcpu) | ||
199 | { | ||
200 | return 0; | ||
201 | } | ||
202 | |||
203 | static inline bool vgic_handle_mmio(struct kvm_vcpu *vcpu, struct kvm_run *run, | ||
204 | struct kvm_exit_mmio *mmio) | ||
205 | { | ||
206 | return false; | ||
207 | } | ||
208 | |||
209 | static inline int irqchip_in_kernel(struct kvm *kvm) | ||
210 | { | ||
211 | return 0; | ||
212 | } | ||
213 | |||
214 | static inline bool vgic_initialized(struct kvm *kvm) | ||
215 | { | ||
216 | return true; | ||
217 | } | ||
218 | #endif | ||
219 | |||
220 | #endif | ||
diff --git a/arch/arm/include/asm/memory.h b/arch/arm/include/asm/memory.h index 57870ab313c5..e750a938fd3c 100644 --- a/arch/arm/include/asm/memory.h +++ b/arch/arm/include/asm/memory.h | |||
@@ -18,6 +18,8 @@ | |||
18 | #include <linux/types.h> | 18 | #include <linux/types.h> |
19 | #include <linux/sizes.h> | 19 | #include <linux/sizes.h> |
20 | 20 | ||
21 | #include <asm/cache.h> | ||
22 | |||
21 | #ifdef CONFIG_NEED_MACH_MEMORY_H | 23 | #ifdef CONFIG_NEED_MACH_MEMORY_H |
22 | #include <mach/memory.h> | 24 | #include <mach/memory.h> |
23 | #endif | 25 | #endif |
@@ -141,6 +143,20 @@ | |||
141 | #define page_to_phys(page) (__pfn_to_phys(page_to_pfn(page))) | 143 | #define page_to_phys(page) (__pfn_to_phys(page_to_pfn(page))) |
142 | #define phys_to_page(phys) (pfn_to_page(__phys_to_pfn(phys))) | 144 | #define phys_to_page(phys) (pfn_to_page(__phys_to_pfn(phys))) |
143 | 145 | ||
146 | /* | ||
147 | * Minimum guaranted alignment in pgd_alloc(). The page table pointers passed | ||
148 | * around in head.S and proc-*.S are shifted by this amount, in order to | ||
149 | * leave spare high bits for systems with physical address extension. This | ||
150 | * does not fully accomodate the 40-bit addressing capability of ARM LPAE, but | ||
151 | * gives us about 38-bits or so. | ||
152 | */ | ||
153 | #ifdef CONFIG_ARM_LPAE | ||
154 | #define ARCH_PGD_SHIFT L1_CACHE_SHIFT | ||
155 | #else | ||
156 | #define ARCH_PGD_SHIFT 0 | ||
157 | #endif | ||
158 | #define ARCH_PGD_MASK ((1 << ARCH_PGD_SHIFT) - 1) | ||
159 | |||
144 | #ifndef __ASSEMBLY__ | 160 | #ifndef __ASSEMBLY__ |
145 | 161 | ||
146 | /* | 162 | /* |
@@ -207,7 +223,7 @@ static inline unsigned long __phys_to_virt(unsigned long x) | |||
207 | * direct-mapped view. We assume this is the first page | 223 | * direct-mapped view. We assume this is the first page |
208 | * of RAM in the mem_map as well. | 224 | * of RAM in the mem_map as well. |
209 | */ | 225 | */ |
210 | #define PHYS_PFN_OFFSET (PHYS_OFFSET >> PAGE_SHIFT) | 226 | #define PHYS_PFN_OFFSET ((unsigned long)(PHYS_OFFSET >> PAGE_SHIFT)) |
211 | 227 | ||
212 | /* | 228 | /* |
213 | * These are *only* valid on the kernel direct mapped RAM memory. | 229 | * These are *only* valid on the kernel direct mapped RAM memory. |
@@ -260,12 +276,6 @@ static inline __deprecated void *bus_to_virt(unsigned long x) | |||
260 | /* | 276 | /* |
261 | * Conversion between a struct page and a physical address. | 277 | * Conversion between a struct page and a physical address. |
262 | * | 278 | * |
263 | * Note: when converting an unknown physical address to a | ||
264 | * struct page, the resulting pointer must be validated | ||
265 | * using VALID_PAGE(). It must return an invalid struct page | ||
266 | * for any physical address not corresponding to a system | ||
267 | * RAM address. | ||
268 | * | ||
269 | * page_to_pfn(page) convert a struct page * to a PFN number | 279 | * page_to_pfn(page) convert a struct page * to a PFN number |
270 | * pfn_to_page(pfn) convert a _valid_ PFN number to struct page * | 280 | * pfn_to_page(pfn) convert a _valid_ PFN number to struct page * |
271 | * | 281 | * |
diff --git a/arch/arm/include/asm/mmu_context.h b/arch/arm/include/asm/mmu_context.h index a7b85e0d0cc1..b5792b7fd8d3 100644 --- a/arch/arm/include/asm/mmu_context.h +++ b/arch/arm/include/asm/mmu_context.h | |||
@@ -18,6 +18,7 @@ | |||
18 | #include <asm/cacheflush.h> | 18 | #include <asm/cacheflush.h> |
19 | #include <asm/cachetype.h> | 19 | #include <asm/cachetype.h> |
20 | #include <asm/proc-fns.h> | 20 | #include <asm/proc-fns.h> |
21 | #include <asm/smp_plat.h> | ||
21 | #include <asm-generic/mm_hooks.h> | 22 | #include <asm-generic/mm_hooks.h> |
22 | 23 | ||
23 | void __check_vmalloc_seq(struct mm_struct *mm); | 24 | void __check_vmalloc_seq(struct mm_struct *mm); |
@@ -27,7 +28,15 @@ void __check_vmalloc_seq(struct mm_struct *mm); | |||
27 | void check_and_switch_context(struct mm_struct *mm, struct task_struct *tsk); | 28 | void check_and_switch_context(struct mm_struct *mm, struct task_struct *tsk); |
28 | #define init_new_context(tsk,mm) ({ atomic64_set(&mm->context.id, 0); 0; }) | 29 | #define init_new_context(tsk,mm) ({ atomic64_set(&mm->context.id, 0); 0; }) |
29 | 30 | ||
30 | DECLARE_PER_CPU(atomic64_t, active_asids); | 31 | #ifdef CONFIG_ARM_ERRATA_798181 |
32 | void a15_erratum_get_cpumask(int this_cpu, struct mm_struct *mm, | ||
33 | cpumask_t *mask); | ||
34 | #else /* !CONFIG_ARM_ERRATA_798181 */ | ||
35 | static inline void a15_erratum_get_cpumask(int this_cpu, struct mm_struct *mm, | ||
36 | cpumask_t *mask) | ||
37 | { | ||
38 | } | ||
39 | #endif /* CONFIG_ARM_ERRATA_798181 */ | ||
31 | 40 | ||
32 | #else /* !CONFIG_CPU_HAS_ASID */ | 41 | #else /* !CONFIG_CPU_HAS_ASID */ |
33 | 42 | ||
@@ -98,12 +107,16 @@ switch_mm(struct mm_struct *prev, struct mm_struct *next, | |||
98 | #ifdef CONFIG_MMU | 107 | #ifdef CONFIG_MMU |
99 | unsigned int cpu = smp_processor_id(); | 108 | unsigned int cpu = smp_processor_id(); |
100 | 109 | ||
101 | #ifdef CONFIG_SMP | 110 | /* |
102 | /* check for possible thread migration */ | 111 | * __sync_icache_dcache doesn't broadcast the I-cache invalidation, |
103 | if (!cpumask_empty(mm_cpumask(next)) && | 112 | * so check for possible thread migration and invalidate the I-cache |
113 | * if we're new to this CPU. | ||
114 | */ | ||
115 | if (cache_ops_need_broadcast() && | ||
116 | !cpumask_empty(mm_cpumask(next)) && | ||
104 | !cpumask_test_cpu(cpu, mm_cpumask(next))) | 117 | !cpumask_test_cpu(cpu, mm_cpumask(next))) |
105 | __flush_icache_all(); | 118 | __flush_icache_all(); |
106 | #endif | 119 | |
107 | if (!cpumask_test_and_set_cpu(cpu, mm_cpumask(next)) || prev != next) { | 120 | if (!cpumask_test_and_set_cpu(cpu, mm_cpumask(next)) || prev != next) { |
108 | check_and_switch_context(next, tsk); | 121 | check_and_switch_context(next, tsk); |
109 | if (cache_is_vivt()) | 122 | if (cache_is_vivt()) |
diff --git a/arch/arm/include/asm/mpu.h b/arch/arm/include/asm/mpu.h new file mode 100644 index 000000000000..c3247cc2fe08 --- /dev/null +++ b/arch/arm/include/asm/mpu.h | |||
@@ -0,0 +1,76 @@ | |||
1 | #ifndef __ARM_MPU_H | ||
2 | #define __ARM_MPU_H | ||
3 | |||
4 | #ifdef CONFIG_ARM_MPU | ||
5 | |||
6 | /* MPUIR layout */ | ||
7 | #define MPUIR_nU 1 | ||
8 | #define MPUIR_DREGION 8 | ||
9 | #define MPUIR_IREGION 16 | ||
10 | #define MPUIR_DREGION_SZMASK (0xFF << MPUIR_DREGION) | ||
11 | #define MPUIR_IREGION_SZMASK (0xFF << MPUIR_IREGION) | ||
12 | |||
13 | /* ID_MMFR0 data relevant to MPU */ | ||
14 | #define MMFR0_PMSA (0xF << 4) | ||
15 | #define MMFR0_PMSAv7 (3 << 4) | ||
16 | |||
17 | /* MPU D/I Size Register fields */ | ||
18 | #define MPU_RSR_SZ 1 | ||
19 | #define MPU_RSR_EN 0 | ||
20 | |||
21 | /* The D/I RSR value for an enabled region spanning the whole of memory */ | ||
22 | #define MPU_RSR_ALL_MEM 63 | ||
23 | |||
24 | /* Individual bits in the DR/IR ACR */ | ||
25 | #define MPU_ACR_XN (1 << 12) | ||
26 | #define MPU_ACR_SHARED (1 << 2) | ||
27 | |||
28 | /* C, B and TEX[2:0] bits only have semantic meanings when grouped */ | ||
29 | #define MPU_RGN_CACHEABLE 0xB | ||
30 | #define MPU_RGN_SHARED_CACHEABLE (MPU_RGN_CACHEABLE | MPU_ACR_SHARED) | ||
31 | #define MPU_RGN_STRONGLY_ORDERED 0 | ||
32 | |||
33 | /* Main region should only be shared for SMP */ | ||
34 | #ifdef CONFIG_SMP | ||
35 | #define MPU_RGN_NORMAL (MPU_RGN_CACHEABLE | MPU_ACR_SHARED) | ||
36 | #else | ||
37 | #define MPU_RGN_NORMAL MPU_RGN_CACHEABLE | ||
38 | #endif | ||
39 | |||
40 | /* Access permission bits of ACR (only define those that we use)*/ | ||
41 | #define MPU_AP_PL1RW_PL0RW (0x3 << 8) | ||
42 | #define MPU_AP_PL1RW_PL0R0 (0x2 << 8) | ||
43 | #define MPU_AP_PL1RW_PL0NA (0x1 << 8) | ||
44 | |||
45 | /* For minimal static MPU region configurations */ | ||
46 | #define MPU_PROBE_REGION 0 | ||
47 | #define MPU_BG_REGION 1 | ||
48 | #define MPU_RAM_REGION 2 | ||
49 | #define MPU_VECTORS_REGION 3 | ||
50 | |||
51 | /* Maximum number of regions Linux is interested in */ | ||
52 | #define MPU_MAX_REGIONS 16 | ||
53 | |||
54 | #define MPU_DATA_SIDE 0 | ||
55 | #define MPU_INSTR_SIDE 1 | ||
56 | |||
57 | #ifndef __ASSEMBLY__ | ||
58 | |||
59 | struct mpu_rgn { | ||
60 | /* Assume same attributes for d/i-side */ | ||
61 | u32 drbar; | ||
62 | u32 drsr; | ||
63 | u32 dracr; | ||
64 | }; | ||
65 | |||
66 | struct mpu_rgn_info { | ||
67 | u32 mpuir; | ||
68 | struct mpu_rgn rgns[MPU_MAX_REGIONS]; | ||
69 | }; | ||
70 | extern struct mpu_rgn_info mpu_rgn_info; | ||
71 | |||
72 | #endif /* __ASSEMBLY__ */ | ||
73 | |||
74 | #endif /* CONFIG_ARM_MPU */ | ||
75 | |||
76 | #endif | ||
diff --git a/arch/arm/include/asm/page.h b/arch/arm/include/asm/page.h index 812a4944e783..6363f3d1d505 100644 --- a/arch/arm/include/asm/page.h +++ b/arch/arm/include/asm/page.h | |||
@@ -13,7 +13,7 @@ | |||
13 | /* PAGE_SHIFT determines the page size */ | 13 | /* PAGE_SHIFT determines the page size */ |
14 | #define PAGE_SHIFT 12 | 14 | #define PAGE_SHIFT 12 |
15 | #define PAGE_SIZE (_AC(1,UL) << PAGE_SHIFT) | 15 | #define PAGE_SIZE (_AC(1,UL) << PAGE_SHIFT) |
16 | #define PAGE_MASK (~(PAGE_SIZE-1)) | 16 | #define PAGE_MASK (~((1 << PAGE_SHIFT) - 1)) |
17 | 17 | ||
18 | #ifndef __ASSEMBLY__ | 18 | #ifndef __ASSEMBLY__ |
19 | 19 | ||
diff --git a/arch/arm/include/asm/pgtable-3level-hwdef.h b/arch/arm/include/asm/pgtable-3level-hwdef.h index 18f5cef82ad5..626989fec4d3 100644 --- a/arch/arm/include/asm/pgtable-3level-hwdef.h +++ b/arch/arm/include/asm/pgtable-3level-hwdef.h | |||
@@ -30,6 +30,7 @@ | |||
30 | #define PMD_TYPE_FAULT (_AT(pmdval_t, 0) << 0) | 30 | #define PMD_TYPE_FAULT (_AT(pmdval_t, 0) << 0) |
31 | #define PMD_TYPE_TABLE (_AT(pmdval_t, 3) << 0) | 31 | #define PMD_TYPE_TABLE (_AT(pmdval_t, 3) << 0) |
32 | #define PMD_TYPE_SECT (_AT(pmdval_t, 1) << 0) | 32 | #define PMD_TYPE_SECT (_AT(pmdval_t, 1) << 0) |
33 | #define PMD_TABLE_BIT (_AT(pmdval_t, 1) << 1) | ||
33 | #define PMD_BIT4 (_AT(pmdval_t, 0)) | 34 | #define PMD_BIT4 (_AT(pmdval_t, 0)) |
34 | #define PMD_DOMAIN(x) (_AT(pmdval_t, 0)) | 35 | #define PMD_DOMAIN(x) (_AT(pmdval_t, 0)) |
35 | #define PMD_APTABLE_SHIFT (61) | 36 | #define PMD_APTABLE_SHIFT (61) |
@@ -41,6 +42,8 @@ | |||
41 | */ | 42 | */ |
42 | #define PMD_SECT_BUFFERABLE (_AT(pmdval_t, 1) << 2) | 43 | #define PMD_SECT_BUFFERABLE (_AT(pmdval_t, 1) << 2) |
43 | #define PMD_SECT_CACHEABLE (_AT(pmdval_t, 1) << 3) | 44 | #define PMD_SECT_CACHEABLE (_AT(pmdval_t, 1) << 3) |
45 | #define PMD_SECT_USER (_AT(pmdval_t, 1) << 6) /* AP[1] */ | ||
46 | #define PMD_SECT_RDONLY (_AT(pmdval_t, 1) << 7) /* AP[2] */ | ||
44 | #define PMD_SECT_S (_AT(pmdval_t, 3) << 8) | 47 | #define PMD_SECT_S (_AT(pmdval_t, 3) << 8) |
45 | #define PMD_SECT_AF (_AT(pmdval_t, 1) << 10) | 48 | #define PMD_SECT_AF (_AT(pmdval_t, 1) << 10) |
46 | #define PMD_SECT_nG (_AT(pmdval_t, 1) << 11) | 49 | #define PMD_SECT_nG (_AT(pmdval_t, 1) << 11) |
@@ -66,6 +69,7 @@ | |||
66 | #define PTE_TYPE_MASK (_AT(pteval_t, 3) << 0) | 69 | #define PTE_TYPE_MASK (_AT(pteval_t, 3) << 0) |
67 | #define PTE_TYPE_FAULT (_AT(pteval_t, 0) << 0) | 70 | #define PTE_TYPE_FAULT (_AT(pteval_t, 0) << 0) |
68 | #define PTE_TYPE_PAGE (_AT(pteval_t, 3) << 0) | 71 | #define PTE_TYPE_PAGE (_AT(pteval_t, 3) << 0) |
72 | #define PTE_TABLE_BIT (_AT(pteval_t, 1) << 1) | ||
69 | #define PTE_BUFFERABLE (_AT(pteval_t, 1) << 2) /* AttrIndx[0] */ | 73 | #define PTE_BUFFERABLE (_AT(pteval_t, 1) << 2) /* AttrIndx[0] */ |
70 | #define PTE_CACHEABLE (_AT(pteval_t, 1) << 3) /* AttrIndx[1] */ | 74 | #define PTE_CACHEABLE (_AT(pteval_t, 1) << 3) /* AttrIndx[1] */ |
71 | #define PTE_EXT_SHARED (_AT(pteval_t, 3) << 8) /* SH[1:0], inner shareable */ | 75 | #define PTE_EXT_SHARED (_AT(pteval_t, 3) << 8) /* SH[1:0], inner shareable */ |
@@ -79,4 +83,24 @@ | |||
79 | #define PHYS_MASK_SHIFT (40) | 83 | #define PHYS_MASK_SHIFT (40) |
80 | #define PHYS_MASK ((1ULL << PHYS_MASK_SHIFT) - 1) | 84 | #define PHYS_MASK ((1ULL << PHYS_MASK_SHIFT) - 1) |
81 | 85 | ||
86 | /* | ||
87 | * TTBR0/TTBR1 split (PAGE_OFFSET): | ||
88 | * 0x40000000: T0SZ = 2, T1SZ = 0 (not used) | ||
89 | * 0x80000000: T0SZ = 0, T1SZ = 1 | ||
90 | * 0xc0000000: T0SZ = 0, T1SZ = 2 | ||
91 | * | ||
92 | * Only use this feature if PHYS_OFFSET <= PAGE_OFFSET, otherwise | ||
93 | * booting secondary CPUs would end up using TTBR1 for the identity | ||
94 | * mapping set up in TTBR0. | ||
95 | */ | ||
96 | #if defined CONFIG_VMSPLIT_2G | ||
97 | #define TTBR1_OFFSET 16 /* skip two L1 entries */ | ||
98 | #elif defined CONFIG_VMSPLIT_3G | ||
99 | #define TTBR1_OFFSET (4096 * (1 + 3)) /* only L2, skip pgd + 3*pmd */ | ||
100 | #else | ||
101 | #define TTBR1_OFFSET 0 | ||
102 | #endif | ||
103 | |||
104 | #define TTBR1_SIZE (((PAGE_OFFSET >> 30) - 1) << 16) | ||
105 | |||
82 | #endif | 106 | #endif |
diff --git a/arch/arm/include/asm/pgtable-3level.h b/arch/arm/include/asm/pgtable-3level.h index 86b8fe398b95..5689c18c85f5 100644 --- a/arch/arm/include/asm/pgtable-3level.h +++ b/arch/arm/include/asm/pgtable-3level.h | |||
@@ -33,7 +33,7 @@ | |||
33 | #define PTRS_PER_PMD 512 | 33 | #define PTRS_PER_PMD 512 |
34 | #define PTRS_PER_PGD 4 | 34 | #define PTRS_PER_PGD 4 |
35 | 35 | ||
36 | #define PTE_HWTABLE_PTRS (PTRS_PER_PTE) | 36 | #define PTE_HWTABLE_PTRS (0) |
37 | #define PTE_HWTABLE_OFF (0) | 37 | #define PTE_HWTABLE_OFF (0) |
38 | #define PTE_HWTABLE_SIZE (PTRS_PER_PTE * sizeof(u64)) | 38 | #define PTE_HWTABLE_SIZE (PTRS_PER_PTE * sizeof(u64)) |
39 | 39 | ||
@@ -48,20 +48,28 @@ | |||
48 | #define PMD_SHIFT 21 | 48 | #define PMD_SHIFT 21 |
49 | 49 | ||
50 | #define PMD_SIZE (1UL << PMD_SHIFT) | 50 | #define PMD_SIZE (1UL << PMD_SHIFT) |
51 | #define PMD_MASK (~(PMD_SIZE-1)) | 51 | #define PMD_MASK (~((1 << PMD_SHIFT) - 1)) |
52 | #define PGDIR_SIZE (1UL << PGDIR_SHIFT) | 52 | #define PGDIR_SIZE (1UL << PGDIR_SHIFT) |
53 | #define PGDIR_MASK (~(PGDIR_SIZE-1)) | 53 | #define PGDIR_MASK (~((1 << PGDIR_SHIFT) - 1)) |
54 | 54 | ||
55 | /* | 55 | /* |
56 | * section address mask and size definitions. | 56 | * section address mask and size definitions. |
57 | */ | 57 | */ |
58 | #define SECTION_SHIFT 21 | 58 | #define SECTION_SHIFT 21 |
59 | #define SECTION_SIZE (1UL << SECTION_SHIFT) | 59 | #define SECTION_SIZE (1UL << SECTION_SHIFT) |
60 | #define SECTION_MASK (~(SECTION_SIZE-1)) | 60 | #define SECTION_MASK (~((1 << SECTION_SHIFT) - 1)) |
61 | 61 | ||
62 | #define USER_PTRS_PER_PGD (PAGE_OFFSET / PGDIR_SIZE) | 62 | #define USER_PTRS_PER_PGD (PAGE_OFFSET / PGDIR_SIZE) |
63 | 63 | ||
64 | /* | 64 | /* |
65 | * Hugetlb definitions. | ||
66 | */ | ||
67 | #define HPAGE_SHIFT PMD_SHIFT | ||
68 | #define HPAGE_SIZE (_AC(1, UL) << HPAGE_SHIFT) | ||
69 | #define HPAGE_MASK (~(HPAGE_SIZE - 1)) | ||
70 | #define HUGETLB_PAGE_ORDER (HPAGE_SHIFT - PAGE_SHIFT) | ||
71 | |||
72 | /* | ||
65 | * "Linux" PTE definitions for LPAE. | 73 | * "Linux" PTE definitions for LPAE. |
66 | * | 74 | * |
67 | * These bits overlap with the hardware bits but the naming is preserved for | 75 | * These bits overlap with the hardware bits but the naming is preserved for |
@@ -79,6 +87,11 @@ | |||
79 | #define L_PTE_SPECIAL (_AT(pteval_t, 1) << 56) /* unused */ | 87 | #define L_PTE_SPECIAL (_AT(pteval_t, 1) << 56) /* unused */ |
80 | #define L_PTE_NONE (_AT(pteval_t, 1) << 57) /* PROT_NONE */ | 88 | #define L_PTE_NONE (_AT(pteval_t, 1) << 57) /* PROT_NONE */ |
81 | 89 | ||
90 | #define PMD_SECT_VALID (_AT(pmdval_t, 1) << 0) | ||
91 | #define PMD_SECT_DIRTY (_AT(pmdval_t, 1) << 55) | ||
92 | #define PMD_SECT_SPLITTING (_AT(pmdval_t, 1) << 56) | ||
93 | #define PMD_SECT_NONE (_AT(pmdval_t, 1) << 57) | ||
94 | |||
82 | /* | 95 | /* |
83 | * To be used in assembly code with the upper page attributes. | 96 | * To be used in assembly code with the upper page attributes. |
84 | */ | 97 | */ |
@@ -166,8 +179,83 @@ static inline pmd_t *pmd_offset(pud_t *pud, unsigned long addr) | |||
166 | clean_pmd_entry(pmdp); \ | 179 | clean_pmd_entry(pmdp); \ |
167 | } while (0) | 180 | } while (0) |
168 | 181 | ||
182 | /* | ||
183 | * For 3 levels of paging the PTE_EXT_NG bit will be set for user address ptes | ||
184 | * that are written to a page table but not for ptes created with mk_pte. | ||
185 | * | ||
186 | * In hugetlb_no_page, a new huge pte (new_pte) is generated and passed to | ||
187 | * hugetlb_cow, where it is compared with an entry in a page table. | ||
188 | * This comparison test fails erroneously leading ultimately to a memory leak. | ||
189 | * | ||
190 | * To correct this behaviour, we mask off PTE_EXT_NG for any pte that is | ||
191 | * present before running the comparison. | ||
192 | */ | ||
193 | #define __HAVE_ARCH_PTE_SAME | ||
194 | #define pte_same(pte_a,pte_b) ((pte_present(pte_a) ? pte_val(pte_a) & ~PTE_EXT_NG \ | ||
195 | : pte_val(pte_a)) \ | ||
196 | == (pte_present(pte_b) ? pte_val(pte_b) & ~PTE_EXT_NG \ | ||
197 | : pte_val(pte_b))) | ||
198 | |||
169 | #define set_pte_ext(ptep,pte,ext) cpu_set_pte_ext(ptep,__pte(pte_val(pte)|(ext))) | 199 | #define set_pte_ext(ptep,pte,ext) cpu_set_pte_ext(ptep,__pte(pte_val(pte)|(ext))) |
170 | 200 | ||
201 | #define pte_huge(pte) (pte_val(pte) && !(pte_val(pte) & PTE_TABLE_BIT)) | ||
202 | #define pte_mkhuge(pte) (__pte(pte_val(pte) & ~PTE_TABLE_BIT)) | ||
203 | |||
204 | #define pmd_young(pmd) (pmd_val(pmd) & PMD_SECT_AF) | ||
205 | |||
206 | #define __HAVE_ARCH_PMD_WRITE | ||
207 | #define pmd_write(pmd) (!(pmd_val(pmd) & PMD_SECT_RDONLY)) | ||
208 | |||
209 | #ifdef CONFIG_TRANSPARENT_HUGEPAGE | ||
210 | #define pmd_trans_huge(pmd) (pmd_val(pmd) && !(pmd_val(pmd) & PMD_TABLE_BIT)) | ||
211 | #define pmd_trans_splitting(pmd) (pmd_val(pmd) & PMD_SECT_SPLITTING) | ||
212 | #endif | ||
213 | |||
214 | #define PMD_BIT_FUNC(fn,op) \ | ||
215 | static inline pmd_t pmd_##fn(pmd_t pmd) { pmd_val(pmd) op; return pmd; } | ||
216 | |||
217 | PMD_BIT_FUNC(wrprotect, |= PMD_SECT_RDONLY); | ||
218 | PMD_BIT_FUNC(mkold, &= ~PMD_SECT_AF); | ||
219 | PMD_BIT_FUNC(mksplitting, |= PMD_SECT_SPLITTING); | ||
220 | PMD_BIT_FUNC(mkwrite, &= ~PMD_SECT_RDONLY); | ||
221 | PMD_BIT_FUNC(mkdirty, |= PMD_SECT_DIRTY); | ||
222 | PMD_BIT_FUNC(mkyoung, |= PMD_SECT_AF); | ||
223 | |||
224 | #define pmd_mkhuge(pmd) (__pmd(pmd_val(pmd) & ~PMD_TABLE_BIT)) | ||
225 | |||
226 | #define pmd_pfn(pmd) (((pmd_val(pmd) & PMD_MASK) & PHYS_MASK) >> PAGE_SHIFT) | ||
227 | #define pfn_pmd(pfn,prot) (__pmd(((phys_addr_t)(pfn) << PAGE_SHIFT) | pgprot_val(prot))) | ||
228 | #define mk_pmd(page,prot) pfn_pmd(page_to_pfn(page),prot) | ||
229 | |||
230 | /* represent a notpresent pmd by zero, this is used by pmdp_invalidate */ | ||
231 | #define pmd_mknotpresent(pmd) (__pmd(0)) | ||
232 | |||
233 | static inline pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot) | ||
234 | { | ||
235 | const pmdval_t mask = PMD_SECT_USER | PMD_SECT_XN | PMD_SECT_RDONLY | | ||
236 | PMD_SECT_VALID | PMD_SECT_NONE; | ||
237 | pmd_val(pmd) = (pmd_val(pmd) & ~mask) | (pgprot_val(newprot) & mask); | ||
238 | return pmd; | ||
239 | } | ||
240 | |||
241 | static inline void set_pmd_at(struct mm_struct *mm, unsigned long addr, | ||
242 | pmd_t *pmdp, pmd_t pmd) | ||
243 | { | ||
244 | BUG_ON(addr >= TASK_SIZE); | ||
245 | |||
246 | /* create a faulting entry if PROT_NONE protected */ | ||
247 | if (pmd_val(pmd) & PMD_SECT_NONE) | ||
248 | pmd_val(pmd) &= ~PMD_SECT_VALID; | ||
249 | |||
250 | *pmdp = __pmd(pmd_val(pmd) | PMD_SECT_nG); | ||
251 | flush_pmd_entry(pmdp); | ||
252 | } | ||
253 | |||
254 | static inline int has_transparent_hugepage(void) | ||
255 | { | ||
256 | return 1; | ||
257 | } | ||
258 | |||
171 | #endif /* __ASSEMBLY__ */ | 259 | #endif /* __ASSEMBLY__ */ |
172 | 260 | ||
173 | #endif /* _ASM_PGTABLE_3LEVEL_H */ | 261 | #endif /* _ASM_PGTABLE_3LEVEL_H */ |
diff --git a/arch/arm/include/asm/pgtable.h b/arch/arm/include/asm/pgtable.h index 229e0dde9c71..04aeb02d2e11 100644 --- a/arch/arm/include/asm/pgtable.h +++ b/arch/arm/include/asm/pgtable.h | |||
@@ -24,6 +24,9 @@ | |||
24 | #include <asm/memory.h> | 24 | #include <asm/memory.h> |
25 | #include <asm/pgtable-hwdef.h> | 25 | #include <asm/pgtable-hwdef.h> |
26 | 26 | ||
27 | |||
28 | #include <asm/tlbflush.h> | ||
29 | |||
27 | #ifdef CONFIG_ARM_LPAE | 30 | #ifdef CONFIG_ARM_LPAE |
28 | #include <asm/pgtable-3level.h> | 31 | #include <asm/pgtable-3level.h> |
29 | #else | 32 | #else |
diff --git a/arch/arm/include/asm/proc-fns.h b/arch/arm/include/asm/proc-fns.h index f3628fb3d2b3..5324c1112f3a 100644 --- a/arch/arm/include/asm/proc-fns.h +++ b/arch/arm/include/asm/proc-fns.h | |||
@@ -60,7 +60,7 @@ extern struct processor { | |||
60 | /* | 60 | /* |
61 | * Set the page table | 61 | * Set the page table |
62 | */ | 62 | */ |
63 | void (*switch_mm)(unsigned long pgd_phys, struct mm_struct *mm); | 63 | void (*switch_mm)(phys_addr_t pgd_phys, struct mm_struct *mm); |
64 | /* | 64 | /* |
65 | * Set a possibly extended PTE. Non-extended PTEs should | 65 | * Set a possibly extended PTE. Non-extended PTEs should |
66 | * ignore 'ext'. | 66 | * ignore 'ext'. |
@@ -82,7 +82,7 @@ extern void cpu_proc_init(void); | |||
82 | extern void cpu_proc_fin(void); | 82 | extern void cpu_proc_fin(void); |
83 | extern int cpu_do_idle(void); | 83 | extern int cpu_do_idle(void); |
84 | extern void cpu_dcache_clean_area(void *, int); | 84 | extern void cpu_dcache_clean_area(void *, int); |
85 | extern void cpu_do_switch_mm(unsigned long pgd_phys, struct mm_struct *mm); | 85 | extern void cpu_do_switch_mm(phys_addr_t pgd_phys, struct mm_struct *mm); |
86 | #ifdef CONFIG_ARM_LPAE | 86 | #ifdef CONFIG_ARM_LPAE |
87 | extern void cpu_set_pte_ext(pte_t *ptep, pte_t pte); | 87 | extern void cpu_set_pte_ext(pte_t *ptep, pte_t pte); |
88 | #else | 88 | #else |
@@ -116,13 +116,25 @@ extern void cpu_resume(void); | |||
116 | #define cpu_switch_mm(pgd,mm) cpu_do_switch_mm(virt_to_phys(pgd),mm) | 116 | #define cpu_switch_mm(pgd,mm) cpu_do_switch_mm(virt_to_phys(pgd),mm) |
117 | 117 | ||
118 | #ifdef CONFIG_ARM_LPAE | 118 | #ifdef CONFIG_ARM_LPAE |
119 | |||
120 | #define cpu_get_ttbr(nr) \ | ||
121 | ({ \ | ||
122 | u64 ttbr; \ | ||
123 | __asm__("mrrc p15, " #nr ", %Q0, %R0, c2" \ | ||
124 | : "=r" (ttbr)); \ | ||
125 | ttbr; \ | ||
126 | }) | ||
127 | |||
128 | #define cpu_set_ttbr(nr, val) \ | ||
129 | do { \ | ||
130 | u64 ttbr = val; \ | ||
131 | __asm__("mcrr p15, " #nr ", %Q0, %R0, c2" \ | ||
132 | : : "r" (ttbr)); \ | ||
133 | } while (0) | ||
134 | |||
119 | #define cpu_get_pgd() \ | 135 | #define cpu_get_pgd() \ |
120 | ({ \ | 136 | ({ \ |
121 | unsigned long pg, pg2; \ | 137 | u64 pg = cpu_get_ttbr(0); \ |
122 | __asm__("mrrc p15, 0, %0, %1, c2" \ | ||
123 | : "=r" (pg), "=r" (pg2) \ | ||
124 | : \ | ||
125 | : "cc"); \ | ||
126 | pg &= ~(PTRS_PER_PGD*sizeof(pgd_t)-1); \ | 138 | pg &= ~(PTRS_PER_PGD*sizeof(pgd_t)-1); \ |
127 | (pgd_t *)phys_to_virt(pg); \ | 139 | (pgd_t *)phys_to_virt(pg); \ |
128 | }) | 140 | }) |
@@ -137,6 +149,10 @@ extern void cpu_resume(void); | |||
137 | }) | 149 | }) |
138 | #endif | 150 | #endif |
139 | 151 | ||
152 | #else /*!CONFIG_MMU */ | ||
153 | |||
154 | #define cpu_switch_mm(pgd,mm) { } | ||
155 | |||
140 | #endif | 156 | #endif |
141 | 157 | ||
142 | #endif /* __ASSEMBLY__ */ | 158 | #endif /* __ASSEMBLY__ */ |
diff --git a/arch/arm/include/asm/smp.h b/arch/arm/include/asm/smp.h index d3a22bebe6ce..a8cae71caceb 100644 --- a/arch/arm/include/asm/smp.h +++ b/arch/arm/include/asm/smp.h | |||
@@ -65,7 +65,10 @@ asmlinkage void secondary_start_kernel(void); | |||
65 | * Initial data for bringing up a secondary CPU. | 65 | * Initial data for bringing up a secondary CPU. |
66 | */ | 66 | */ |
67 | struct secondary_data { | 67 | struct secondary_data { |
68 | unsigned long pgdir; | 68 | union { |
69 | unsigned long mpu_rgn_szr; | ||
70 | unsigned long pgdir; | ||
71 | }; | ||
69 | unsigned long swapper_pg_dir; | 72 | unsigned long swapper_pg_dir; |
70 | void *stack; | 73 | void *stack; |
71 | }; | 74 | }; |
diff --git a/arch/arm/include/asm/smp_plat.h b/arch/arm/include/asm/smp_plat.h index e78983202737..6462a721ebd4 100644 --- a/arch/arm/include/asm/smp_plat.h +++ b/arch/arm/include/asm/smp_plat.h | |||
@@ -26,6 +26,9 @@ static inline bool is_smp(void) | |||
26 | } | 26 | } |
27 | 27 | ||
28 | /* all SMP configurations have the extended CPUID registers */ | 28 | /* all SMP configurations have the extended CPUID registers */ |
29 | #ifndef CONFIG_MMU | ||
30 | #define tlb_ops_need_broadcast() 0 | ||
31 | #else | ||
29 | static inline int tlb_ops_need_broadcast(void) | 32 | static inline int tlb_ops_need_broadcast(void) |
30 | { | 33 | { |
31 | if (!is_smp()) | 34 | if (!is_smp()) |
@@ -33,6 +36,7 @@ static inline int tlb_ops_need_broadcast(void) | |||
33 | 36 | ||
34 | return ((read_cpuid_ext(CPUID_EXT_MMFR3) >> 12) & 0xf) < 2; | 37 | return ((read_cpuid_ext(CPUID_EXT_MMFR3) >> 12) & 0xf) < 2; |
35 | } | 38 | } |
39 | #endif | ||
36 | 40 | ||
37 | #if !defined(CONFIG_SMP) || __LINUX_ARM_ARCH__ >= 7 | 41 | #if !defined(CONFIG_SMP) || __LINUX_ARM_ARCH__ >= 7 |
38 | #define cache_ops_need_broadcast() 0 | 42 | #define cache_ops_need_broadcast() 0 |
@@ -66,4 +70,22 @@ static inline int get_logical_index(u32 mpidr) | |||
66 | return -EINVAL; | 70 | return -EINVAL; |
67 | } | 71 | } |
68 | 72 | ||
73 | /* | ||
74 | * NOTE ! Assembly code relies on the following | ||
75 | * structure memory layout in order to carry out load | ||
76 | * multiple from its base address. For more | ||
77 | * information check arch/arm/kernel/sleep.S | ||
78 | */ | ||
79 | struct mpidr_hash { | ||
80 | u32 mask; /* used by sleep.S */ | ||
81 | u32 shift_aff[3]; /* used by sleep.S */ | ||
82 | u32 bits; | ||
83 | }; | ||
84 | |||
85 | extern struct mpidr_hash mpidr_hash; | ||
86 | |||
87 | static inline u32 mpidr_hash_size(void) | ||
88 | { | ||
89 | return 1 << mpidr_hash.bits; | ||
90 | } | ||
69 | #endif | 91 | #endif |
diff --git a/arch/arm/include/asm/smp_scu.h b/arch/arm/include/asm/smp_scu.h index 18d169373612..0393fbab8dd5 100644 --- a/arch/arm/include/asm/smp_scu.h +++ b/arch/arm/include/asm/smp_scu.h | |||
@@ -23,10 +23,21 @@ static inline unsigned long scu_a9_get_base(void) | |||
23 | return pa; | 23 | return pa; |
24 | } | 24 | } |
25 | 25 | ||
26 | #ifdef CONFIG_HAVE_ARM_SCU | ||
26 | unsigned int scu_get_core_count(void __iomem *); | 27 | unsigned int scu_get_core_count(void __iomem *); |
27 | int scu_power_mode(void __iomem *, unsigned int); | 28 | int scu_power_mode(void __iomem *, unsigned int); |
29 | #else | ||
30 | static inline unsigned int scu_get_core_count(void __iomem *scu_base) | ||
31 | { | ||
32 | return 0; | ||
33 | } | ||
34 | static inline int scu_power_mode(void __iomem *scu_base, unsigned int mode) | ||
35 | { | ||
36 | return -EINVAL; | ||
37 | } | ||
38 | #endif | ||
28 | 39 | ||
29 | #ifdef CONFIG_SMP | 40 | #if defined(CONFIG_SMP) && defined(CONFIG_HAVE_ARM_SCU) |
30 | void scu_enable(void __iomem *scu_base); | 41 | void scu_enable(void __iomem *scu_base); |
31 | #else | 42 | #else |
32 | static inline void scu_enable(void __iomem *scu_base) {} | 43 | static inline void scu_enable(void __iomem *scu_base) {} |
diff --git a/arch/arm/include/asm/spinlock.h b/arch/arm/include/asm/spinlock.h index 6220e9fdf4c7..f8b8965666e9 100644 --- a/arch/arm/include/asm/spinlock.h +++ b/arch/arm/include/asm/spinlock.h | |||
@@ -97,19 +97,22 @@ static inline void arch_spin_lock(arch_spinlock_t *lock) | |||
97 | 97 | ||
98 | static inline int arch_spin_trylock(arch_spinlock_t *lock) | 98 | static inline int arch_spin_trylock(arch_spinlock_t *lock) |
99 | { | 99 | { |
100 | unsigned long tmp; | 100 | unsigned long contended, res; |
101 | u32 slock; | 101 | u32 slock; |
102 | 102 | ||
103 | __asm__ __volatile__( | 103 | do { |
104 | " ldrex %0, [%2]\n" | 104 | __asm__ __volatile__( |
105 | " subs %1, %0, %0, ror #16\n" | 105 | " ldrex %0, [%3]\n" |
106 | " addeq %0, %0, %3\n" | 106 | " mov %2, #0\n" |
107 | " strexeq %1, %0, [%2]" | 107 | " subs %1, %0, %0, ror #16\n" |
108 | : "=&r" (slock), "=&r" (tmp) | 108 | " addeq %0, %0, %4\n" |
109 | : "r" (&lock->slock), "I" (1 << TICKET_SHIFT) | 109 | " strexeq %2, %0, [%3]" |
110 | : "cc"); | 110 | : "=&r" (slock), "=&r" (contended), "=r" (res) |
111 | 111 | : "r" (&lock->slock), "I" (1 << TICKET_SHIFT) | |
112 | if (tmp == 0) { | 112 | : "cc"); |
113 | } while (res); | ||
114 | |||
115 | if (!contended) { | ||
113 | smp_mb(); | 116 | smp_mb(); |
114 | return 1; | 117 | return 1; |
115 | } else { | 118 | } else { |
diff --git a/arch/arm/include/asm/suspend.h b/arch/arm/include/asm/suspend.h index 1c0a551ae375..cd20029bcd94 100644 --- a/arch/arm/include/asm/suspend.h +++ b/arch/arm/include/asm/suspend.h | |||
@@ -1,6 +1,11 @@ | |||
1 | #ifndef __ASM_ARM_SUSPEND_H | 1 | #ifndef __ASM_ARM_SUSPEND_H |
2 | #define __ASM_ARM_SUSPEND_H | 2 | #define __ASM_ARM_SUSPEND_H |
3 | 3 | ||
4 | struct sleep_save_sp { | ||
5 | u32 *save_ptr_stash; | ||
6 | u32 save_ptr_stash_phys; | ||
7 | }; | ||
8 | |||
4 | extern void cpu_resume(void); | 9 | extern void cpu_resume(void); |
5 | extern int cpu_suspend(unsigned long, int (*)(unsigned long)); | 10 | extern int cpu_suspend(unsigned long, int (*)(unsigned long)); |
6 | 11 | ||
diff --git a/arch/arm/include/asm/thread_info.h b/arch/arm/include/asm/thread_info.h index 1995d1a84060..214d4158089a 100644 --- a/arch/arm/include/asm/thread_info.h +++ b/arch/arm/include/asm/thread_info.h | |||
@@ -58,7 +58,7 @@ struct thread_info { | |||
58 | struct cpu_context_save cpu_context; /* cpu context */ | 58 | struct cpu_context_save cpu_context; /* cpu context */ |
59 | __u32 syscall; /* syscall number */ | 59 | __u32 syscall; /* syscall number */ |
60 | __u8 used_cp[16]; /* thread used copro */ | 60 | __u8 used_cp[16]; /* thread used copro */ |
61 | unsigned long tp_value; | 61 | unsigned long tp_value[2]; /* TLS registers */ |
62 | #ifdef CONFIG_CRUNCH | 62 | #ifdef CONFIG_CRUNCH |
63 | struct crunch_state crunchstate; | 63 | struct crunch_state crunchstate; |
64 | #endif | 64 | #endif |
diff --git a/arch/arm/include/asm/tlb.h b/arch/arm/include/asm/tlb.h index bdf2b8458ec1..46e7cfb3e721 100644 --- a/arch/arm/include/asm/tlb.h +++ b/arch/arm/include/asm/tlb.h | |||
@@ -204,6 +204,12 @@ static inline void __pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmdp, | |||
204 | #endif | 204 | #endif |
205 | } | 205 | } |
206 | 206 | ||
207 | static inline void | ||
208 | tlb_remove_pmd_tlb_entry(struct mmu_gather *tlb, pmd_t *pmdp, unsigned long addr) | ||
209 | { | ||
210 | tlb_add_flush(tlb, addr); | ||
211 | } | ||
212 | |||
207 | #define pte_free_tlb(tlb, ptep, addr) __pte_free_tlb(tlb, ptep, addr) | 213 | #define pte_free_tlb(tlb, ptep, addr) __pte_free_tlb(tlb, ptep, addr) |
208 | #define pmd_free_tlb(tlb, pmdp, addr) __pmd_free_tlb(tlb, pmdp, addr) | 214 | #define pmd_free_tlb(tlb, pmdp, addr) __pmd_free_tlb(tlb, pmdp, addr) |
209 | #define pud_free_tlb(tlb, pudp, addr) pud_free((tlb)->mm, pudp) | 215 | #define pud_free_tlb(tlb, pudp, addr) pud_free((tlb)->mm, pudp) |
diff --git a/arch/arm/include/asm/tlbflush.h b/arch/arm/include/asm/tlbflush.h index a3625d141c1d..fdbb9e369745 100644 --- a/arch/arm/include/asm/tlbflush.h +++ b/arch/arm/include/asm/tlbflush.h | |||
@@ -535,8 +535,33 @@ static inline void update_mmu_cache(struct vm_area_struct *vma, | |||
535 | } | 535 | } |
536 | #endif | 536 | #endif |
537 | 537 | ||
538 | #define update_mmu_cache_pmd(vma, address, pmd) do { } while (0) | ||
539 | |||
538 | #endif | 540 | #endif |
539 | 541 | ||
540 | #endif /* CONFIG_MMU */ | 542 | #elif defined(CONFIG_SMP) /* !CONFIG_MMU */ |
543 | |||
544 | #ifndef __ASSEMBLY__ | ||
545 | |||
546 | #include <linux/mm_types.h> | ||
547 | |||
548 | static inline void local_flush_tlb_all(void) { } | ||
549 | static inline void local_flush_tlb_mm(struct mm_struct *mm) { } | ||
550 | static inline void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long uaddr) { } | ||
551 | static inline void local_flush_tlb_kernel_page(unsigned long kaddr) { } | ||
552 | static inline void local_flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned long end) { } | ||
553 | static inline void local_flush_tlb_kernel_range(unsigned long start, unsigned long end) { } | ||
554 | static inline void local_flush_bp_all(void) { } | ||
555 | |||
556 | extern void flush_tlb_all(void); | ||
557 | extern void flush_tlb_mm(struct mm_struct *mm); | ||
558 | extern void flush_tlb_page(struct vm_area_struct *vma, unsigned long uaddr); | ||
559 | extern void flush_tlb_kernel_page(unsigned long kaddr); | ||
560 | extern void flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned long end); | ||
561 | extern void flush_tlb_kernel_range(unsigned long start, unsigned long end); | ||
562 | extern void flush_bp_all(void); | ||
563 | #endif /* __ASSEMBLY__ */ | ||
564 | |||
565 | #endif | ||
541 | 566 | ||
542 | #endif | 567 | #endif |
diff --git a/arch/arm/include/asm/tls.h b/arch/arm/include/asm/tls.h index 73409e6c0251..83259b873333 100644 --- a/arch/arm/include/asm/tls.h +++ b/arch/arm/include/asm/tls.h | |||
@@ -2,27 +2,30 @@ | |||
2 | #define __ASMARM_TLS_H | 2 | #define __ASMARM_TLS_H |
3 | 3 | ||
4 | #ifdef __ASSEMBLY__ | 4 | #ifdef __ASSEMBLY__ |
5 | .macro set_tls_none, tp, tmp1, tmp2 | 5 | #include <asm/asm-offsets.h> |
6 | .macro switch_tls_none, base, tp, tpuser, tmp1, tmp2 | ||
6 | .endm | 7 | .endm |
7 | 8 | ||
8 | .macro set_tls_v6k, tp, tmp1, tmp2 | 9 | .macro switch_tls_v6k, base, tp, tpuser, tmp1, tmp2 |
10 | mrc p15, 0, \tmp2, c13, c0, 2 @ get the user r/w register | ||
9 | mcr p15, 0, \tp, c13, c0, 3 @ set TLS register | 11 | mcr p15, 0, \tp, c13, c0, 3 @ set TLS register |
10 | mov \tmp1, #0 | 12 | mcr p15, 0, \tpuser, c13, c0, 2 @ and the user r/w register |
11 | mcr p15, 0, \tmp1, c13, c0, 2 @ clear user r/w TLS register | 13 | str \tmp2, [\base, #TI_TP_VALUE + 4] @ save it |
12 | .endm | 14 | .endm |
13 | 15 | ||
14 | .macro set_tls_v6, tp, tmp1, tmp2 | 16 | .macro switch_tls_v6, base, tp, tpuser, tmp1, tmp2 |
15 | ldr \tmp1, =elf_hwcap | 17 | ldr \tmp1, =elf_hwcap |
16 | ldr \tmp1, [\tmp1, #0] | 18 | ldr \tmp1, [\tmp1, #0] |
17 | mov \tmp2, #0xffff0fff | 19 | mov \tmp2, #0xffff0fff |
18 | tst \tmp1, #HWCAP_TLS @ hardware TLS available? | 20 | tst \tmp1, #HWCAP_TLS @ hardware TLS available? |
19 | mcrne p15, 0, \tp, c13, c0, 3 @ yes, set TLS register | ||
20 | movne \tmp1, #0 | ||
21 | mcrne p15, 0, \tmp1, c13, c0, 2 @ clear user r/w TLS register | ||
22 | streq \tp, [\tmp2, #-15] @ set TLS value at 0xffff0ff0 | 21 | streq \tp, [\tmp2, #-15] @ set TLS value at 0xffff0ff0 |
22 | mrcne p15, 0, \tmp2, c13, c0, 2 @ get the user r/w register | ||
23 | mcrne p15, 0, \tp, c13, c0, 3 @ yes, set TLS register | ||
24 | mcrne p15, 0, \tpuser, c13, c0, 2 @ set user r/w register | ||
25 | strne \tmp2, [\base, #TI_TP_VALUE + 4] @ save it | ||
23 | .endm | 26 | .endm |
24 | 27 | ||
25 | .macro set_tls_software, tp, tmp1, tmp2 | 28 | .macro switch_tls_software, base, tp, tpuser, tmp1, tmp2 |
26 | mov \tmp1, #0xffff0fff | 29 | mov \tmp1, #0xffff0fff |
27 | str \tp, [\tmp1, #-15] @ set TLS value at 0xffff0ff0 | 30 | str \tp, [\tmp1, #-15] @ set TLS value at 0xffff0ff0 |
28 | .endm | 31 | .endm |
@@ -31,19 +34,30 @@ | |||
31 | #ifdef CONFIG_TLS_REG_EMUL | 34 | #ifdef CONFIG_TLS_REG_EMUL |
32 | #define tls_emu 1 | 35 | #define tls_emu 1 |
33 | #define has_tls_reg 1 | 36 | #define has_tls_reg 1 |
34 | #define set_tls set_tls_none | 37 | #define switch_tls switch_tls_none |
35 | #elif defined(CONFIG_CPU_V6) | 38 | #elif defined(CONFIG_CPU_V6) |
36 | #define tls_emu 0 | 39 | #define tls_emu 0 |
37 | #define has_tls_reg (elf_hwcap & HWCAP_TLS) | 40 | #define has_tls_reg (elf_hwcap & HWCAP_TLS) |
38 | #define set_tls set_tls_v6 | 41 | #define switch_tls switch_tls_v6 |
39 | #elif defined(CONFIG_CPU_32v6K) | 42 | #elif defined(CONFIG_CPU_32v6K) |
40 | #define tls_emu 0 | 43 | #define tls_emu 0 |
41 | #define has_tls_reg 1 | 44 | #define has_tls_reg 1 |
42 | #define set_tls set_tls_v6k | 45 | #define switch_tls switch_tls_v6k |
43 | #else | 46 | #else |
44 | #define tls_emu 0 | 47 | #define tls_emu 0 |
45 | #define has_tls_reg 0 | 48 | #define has_tls_reg 0 |
46 | #define set_tls set_tls_software | 49 | #define switch_tls switch_tls_software |
47 | #endif | 50 | #endif |
48 | 51 | ||
52 | #ifndef __ASSEMBLY__ | ||
53 | static inline unsigned long get_tpuser(void) | ||
54 | { | ||
55 | unsigned long reg = 0; | ||
56 | |||
57 | if (has_tls_reg && !tls_emu) | ||
58 | __asm__("mrc p15, 0, %0, c13, c0, 2" : "=r" (reg)); | ||
59 | |||
60 | return reg; | ||
61 | } | ||
62 | #endif | ||
49 | #endif /* __ASMARM_TLS_H */ | 63 | #endif /* __ASMARM_TLS_H */ |
diff --git a/arch/arm/include/asm/xen/page.h b/arch/arm/include/asm/xen/page.h index 30cdacb675af..359a7b50b158 100644 --- a/arch/arm/include/asm/xen/page.h +++ b/arch/arm/include/asm/xen/page.h | |||
@@ -1,7 +1,6 @@ | |||
1 | #ifndef _ASM_ARM_XEN_PAGE_H | 1 | #ifndef _ASM_ARM_XEN_PAGE_H |
2 | #define _ASM_ARM_XEN_PAGE_H | 2 | #define _ASM_ARM_XEN_PAGE_H |
3 | 3 | ||
4 | #include <asm/mach/map.h> | ||
5 | #include <asm/page.h> | 4 | #include <asm/page.h> |
6 | #include <asm/pgtable.h> | 5 | #include <asm/pgtable.h> |
7 | 6 | ||
@@ -88,6 +87,6 @@ static inline bool set_phys_to_machine(unsigned long pfn, unsigned long mfn) | |||
88 | return __set_phys_to_machine(pfn, mfn); | 87 | return __set_phys_to_machine(pfn, mfn); |
89 | } | 88 | } |
90 | 89 | ||
91 | #define xen_remap(cookie, size) __arm_ioremap((cookie), (size), MT_MEMORY); | 90 | #define xen_remap(cookie, size) ioremap_cached((cookie), (size)); |
92 | 91 | ||
93 | #endif /* _ASM_ARM_XEN_PAGE_H */ | 92 | #endif /* _ASM_ARM_XEN_PAGE_H */ |
diff --git a/arch/arm/include/debug/vexpress.S b/arch/arm/include/debug/vexpress.S index dc8e882a6257..acafb229e2b6 100644 --- a/arch/arm/include/debug/vexpress.S +++ b/arch/arm/include/debug/vexpress.S | |||
@@ -16,6 +16,8 @@ | |||
16 | #define DEBUG_LL_PHYS_BASE_RS1 0x1c000000 | 16 | #define DEBUG_LL_PHYS_BASE_RS1 0x1c000000 |
17 | #define DEBUG_LL_UART_OFFSET_RS1 0x00090000 | 17 | #define DEBUG_LL_UART_OFFSET_RS1 0x00090000 |
18 | 18 | ||
19 | #define DEBUG_LL_UART_PHYS_CRX 0xb0090000 | ||
20 | |||
19 | #define DEBUG_LL_VIRT_BASE 0xf8000000 | 21 | #define DEBUG_LL_VIRT_BASE 0xf8000000 |
20 | 22 | ||
21 | #if defined(CONFIG_DEBUG_VEXPRESS_UART0_DETECT) | 23 | #if defined(CONFIG_DEBUG_VEXPRESS_UART0_DETECT) |
@@ -67,6 +69,14 @@ | |||
67 | 69 | ||
68 | #include <asm/hardware/debug-pl01x.S> | 70 | #include <asm/hardware/debug-pl01x.S> |
69 | 71 | ||
72 | #elif defined(CONFIG_DEBUG_VEXPRESS_UART0_CRX) | ||
73 | |||
74 | .macro addruart,rp,tmp,tmp2 | ||
75 | ldr \rp, =DEBUG_LL_UART_PHYS_CRX | ||
76 | .endm | ||
77 | |||
78 | #include <asm/hardware/debug-pl01x.S> | ||
79 | |||
70 | #else /* CONFIG_DEBUG_LL_UART_NONE */ | 80 | #else /* CONFIG_DEBUG_LL_UART_NONE */ |
71 | 81 | ||
72 | .macro addruart, rp, rv, tmp | 82 | .macro addruart, rp, rv, tmp |
diff --git a/arch/arm/include/uapi/asm/hwcap.h b/arch/arm/include/uapi/asm/hwcap.h index 3688fd15a32d..6d34d080372a 100644 --- a/arch/arm/include/uapi/asm/hwcap.h +++ b/arch/arm/include/uapi/asm/hwcap.h | |||
@@ -25,6 +25,6 @@ | |||
25 | #define HWCAP_IDIVT (1 << 18) | 25 | #define HWCAP_IDIVT (1 << 18) |
26 | #define HWCAP_VFPD32 (1 << 19) /* set if VFP has 32 regs (not 16) */ | 26 | #define HWCAP_VFPD32 (1 << 19) /* set if VFP has 32 regs (not 16) */ |
27 | #define HWCAP_IDIV (HWCAP_IDIVA | HWCAP_IDIVT) | 27 | #define HWCAP_IDIV (HWCAP_IDIVA | HWCAP_IDIVT) |
28 | 28 | #define HWCAP_LPAE (1 << 20) | |
29 | 29 | ||
30 | #endif /* _UAPI__ASMARM_HWCAP_H */ | 30 | #endif /* _UAPI__ASMARM_HWCAP_H */ |
diff --git a/arch/arm/kernel/Makefile b/arch/arm/kernel/Makefile index f4285b5ffb05..fccfbdb03df1 100644 --- a/arch/arm/kernel/Makefile +++ b/arch/arm/kernel/Makefile | |||
@@ -38,7 +38,10 @@ obj-$(CONFIG_ARTHUR) += arthur.o | |||
38 | obj-$(CONFIG_ISA_DMA) += dma-isa.o | 38 | obj-$(CONFIG_ISA_DMA) += dma-isa.o |
39 | obj-$(CONFIG_PCI) += bios32.o isa.o | 39 | obj-$(CONFIG_PCI) += bios32.o isa.o |
40 | obj-$(CONFIG_ARM_CPU_SUSPEND) += sleep.o suspend.o | 40 | obj-$(CONFIG_ARM_CPU_SUSPEND) += sleep.o suspend.o |
41 | obj-$(CONFIG_SMP) += smp.o smp_tlb.o | 41 | obj-$(CONFIG_SMP) += smp.o |
42 | ifdef CONFIG_MMU | ||
43 | obj-$(CONFIG_SMP) += smp_tlb.o | ||
44 | endif | ||
42 | obj-$(CONFIG_HAVE_ARM_SCU) += smp_scu.o | 45 | obj-$(CONFIG_HAVE_ARM_SCU) += smp_scu.o |
43 | obj-$(CONFIG_HAVE_ARM_TWD) += smp_twd.o | 46 | obj-$(CONFIG_HAVE_ARM_TWD) += smp_twd.o |
44 | obj-$(CONFIG_ARM_ARCH_TIMER) += arch_timer.o | 47 | obj-$(CONFIG_ARM_ARCH_TIMER) += arch_timer.o |
diff --git a/arch/arm/kernel/asm-offsets.c b/arch/arm/kernel/asm-offsets.c index ee68cce6b48e..ded041711beb 100644 --- a/arch/arm/kernel/asm-offsets.c +++ b/arch/arm/kernel/asm-offsets.c | |||
@@ -23,6 +23,7 @@ | |||
23 | #include <asm/thread_info.h> | 23 | #include <asm/thread_info.h> |
24 | #include <asm/memory.h> | 24 | #include <asm/memory.h> |
25 | #include <asm/procinfo.h> | 25 | #include <asm/procinfo.h> |
26 | #include <asm/suspend.h> | ||
26 | #include <asm/hardware/cache-l2x0.h> | 27 | #include <asm/hardware/cache-l2x0.h> |
27 | #include <linux/kbuild.h> | 28 | #include <linux/kbuild.h> |
28 | 29 | ||
@@ -145,6 +146,11 @@ int main(void) | |||
145 | #ifdef MULTI_CACHE | 146 | #ifdef MULTI_CACHE |
146 | DEFINE(CACHE_FLUSH_KERN_ALL, offsetof(struct cpu_cache_fns, flush_kern_all)); | 147 | DEFINE(CACHE_FLUSH_KERN_ALL, offsetof(struct cpu_cache_fns, flush_kern_all)); |
147 | #endif | 148 | #endif |
149 | #ifdef CONFIG_ARM_CPU_SUSPEND | ||
150 | DEFINE(SLEEP_SAVE_SP_SZ, sizeof(struct sleep_save_sp)); | ||
151 | DEFINE(SLEEP_SAVE_SP_PHYS, offsetof(struct sleep_save_sp, save_ptr_stash_phys)); | ||
152 | DEFINE(SLEEP_SAVE_SP_VIRT, offsetof(struct sleep_save_sp, save_ptr_stash)); | ||
153 | #endif | ||
148 | BLANK(); | 154 | BLANK(); |
149 | DEFINE(DMA_BIDIRECTIONAL, DMA_BIDIRECTIONAL); | 155 | DEFINE(DMA_BIDIRECTIONAL, DMA_BIDIRECTIONAL); |
150 | DEFINE(DMA_TO_DEVICE, DMA_TO_DEVICE); | 156 | DEFINE(DMA_TO_DEVICE, DMA_TO_DEVICE); |
diff --git a/arch/arm/kernel/entry-armv.S b/arch/arm/kernel/entry-armv.S index 582b405befc5..a39cfc2a1f90 100644 --- a/arch/arm/kernel/entry-armv.S +++ b/arch/arm/kernel/entry-armv.S | |||
@@ -685,15 +685,16 @@ ENTRY(__switch_to) | |||
685 | UNWIND(.fnstart ) | 685 | UNWIND(.fnstart ) |
686 | UNWIND(.cantunwind ) | 686 | UNWIND(.cantunwind ) |
687 | add ip, r1, #TI_CPU_SAVE | 687 | add ip, r1, #TI_CPU_SAVE |
688 | ldr r3, [r2, #TI_TP_VALUE] | ||
689 | ARM( stmia ip!, {r4 - sl, fp, sp, lr} ) @ Store most regs on stack | 688 | ARM( stmia ip!, {r4 - sl, fp, sp, lr} ) @ Store most regs on stack |
690 | THUMB( stmia ip!, {r4 - sl, fp} ) @ Store most regs on stack | 689 | THUMB( stmia ip!, {r4 - sl, fp} ) @ Store most regs on stack |
691 | THUMB( str sp, [ip], #4 ) | 690 | THUMB( str sp, [ip], #4 ) |
692 | THUMB( str lr, [ip], #4 ) | 691 | THUMB( str lr, [ip], #4 ) |
692 | ldr r4, [r2, #TI_TP_VALUE] | ||
693 | ldr r5, [r2, #TI_TP_VALUE + 4] | ||
693 | #ifdef CONFIG_CPU_USE_DOMAINS | 694 | #ifdef CONFIG_CPU_USE_DOMAINS |
694 | ldr r6, [r2, #TI_CPU_DOMAIN] | 695 | ldr r6, [r2, #TI_CPU_DOMAIN] |
695 | #endif | 696 | #endif |
696 | set_tls r3, r4, r5 | 697 | switch_tls r1, r4, r5, r3, r7 |
697 | #if defined(CONFIG_CC_STACKPROTECTOR) && !defined(CONFIG_SMP) | 698 | #if defined(CONFIG_CC_STACKPROTECTOR) && !defined(CONFIG_SMP) |
698 | ldr r7, [r2, #TI_TASK] | 699 | ldr r7, [r2, #TI_TASK] |
699 | ldr r8, =__stack_chk_guard | 700 | ldr r8, =__stack_chk_guard |
diff --git a/arch/arm/kernel/entry-common.S b/arch/arm/kernel/entry-common.S index 85a72b0809ca..94104bf69719 100644 --- a/arch/arm/kernel/entry-common.S +++ b/arch/arm/kernel/entry-common.S | |||
@@ -366,6 +366,16 @@ ENTRY(vector_swi) | |||
366 | #endif | 366 | #endif |
367 | zero_fp | 367 | zero_fp |
368 | 368 | ||
369 | #ifdef CONFIG_ALIGNMENT_TRAP | ||
370 | ldr ip, __cr_alignment | ||
371 | ldr ip, [ip] | ||
372 | mcr p15, 0, ip, c1, c0 @ update control register | ||
373 | #endif | ||
374 | |||
375 | enable_irq | ||
376 | ct_user_exit | ||
377 | get_thread_info tsk | ||
378 | |||
369 | /* | 379 | /* |
370 | * Get the system call number. | 380 | * Get the system call number. |
371 | */ | 381 | */ |
@@ -379,9 +389,9 @@ ENTRY(vector_swi) | |||
379 | #ifdef CONFIG_ARM_THUMB | 389 | #ifdef CONFIG_ARM_THUMB |
380 | tst r8, #PSR_T_BIT | 390 | tst r8, #PSR_T_BIT |
381 | movne r10, #0 @ no thumb OABI emulation | 391 | movne r10, #0 @ no thumb OABI emulation |
382 | ldreq r10, [lr, #-4] @ get SWI instruction | 392 | USER( ldreq r10, [lr, #-4] ) @ get SWI instruction |
383 | #else | 393 | #else |
384 | ldr r10, [lr, #-4] @ get SWI instruction | 394 | USER( ldr r10, [lr, #-4] ) @ get SWI instruction |
385 | #endif | 395 | #endif |
386 | #ifdef CONFIG_CPU_ENDIAN_BE8 | 396 | #ifdef CONFIG_CPU_ENDIAN_BE8 |
387 | rev r10, r10 @ little endian instruction | 397 | rev r10, r10 @ little endian instruction |
@@ -396,22 +406,13 @@ ENTRY(vector_swi) | |||
396 | /* Legacy ABI only, possibly thumb mode. */ | 406 | /* Legacy ABI only, possibly thumb mode. */ |
397 | tst r8, #PSR_T_BIT @ this is SPSR from save_user_regs | 407 | tst r8, #PSR_T_BIT @ this is SPSR from save_user_regs |
398 | addne scno, r7, #__NR_SYSCALL_BASE @ put OS number in | 408 | addne scno, r7, #__NR_SYSCALL_BASE @ put OS number in |
399 | ldreq scno, [lr, #-4] | 409 | USER( ldreq scno, [lr, #-4] ) |
400 | 410 | ||
401 | #else | 411 | #else |
402 | /* Legacy ABI only. */ | 412 | /* Legacy ABI only. */ |
403 | ldr scno, [lr, #-4] @ get SWI instruction | 413 | USER( ldr scno, [lr, #-4] ) @ get SWI instruction |
404 | #endif | 414 | #endif |
405 | 415 | ||
406 | #ifdef CONFIG_ALIGNMENT_TRAP | ||
407 | ldr ip, __cr_alignment | ||
408 | ldr ip, [ip] | ||
409 | mcr p15, 0, ip, c1, c0 @ update control register | ||
410 | #endif | ||
411 | enable_irq | ||
412 | ct_user_exit | ||
413 | |||
414 | get_thread_info tsk | ||
415 | adr tbl, sys_call_table @ load syscall table pointer | 416 | adr tbl, sys_call_table @ load syscall table pointer |
416 | 417 | ||
417 | #if defined(CONFIG_OABI_COMPAT) | 418 | #if defined(CONFIG_OABI_COMPAT) |
@@ -446,6 +447,21 @@ local_restart: | |||
446 | eor r0, scno, #__NR_SYSCALL_BASE @ put OS number back | 447 | eor r0, scno, #__NR_SYSCALL_BASE @ put OS number back |
447 | bcs arm_syscall | 448 | bcs arm_syscall |
448 | b sys_ni_syscall @ not private func | 449 | b sys_ni_syscall @ not private func |
450 | |||
451 | #if defined(CONFIG_OABI_COMPAT) || !defined(CONFIG_AEABI) | ||
452 | /* | ||
453 | * We failed to handle a fault trying to access the page | ||
454 | * containing the swi instruction, but we're not really in a | ||
455 | * position to return -EFAULT. Instead, return back to the | ||
456 | * instruction and re-enter the user fault handling path trying | ||
457 | * to page it in. This will likely result in sending SEGV to the | ||
458 | * current task. | ||
459 | */ | ||
460 | 9001: | ||
461 | sub lr, lr, #4 | ||
462 | str lr, [sp, #S_PC] | ||
463 | b ret_fast_syscall | ||
464 | #endif | ||
449 | ENDPROC(vector_swi) | 465 | ENDPROC(vector_swi) |
450 | 466 | ||
451 | /* | 467 | /* |
diff --git a/arch/arm/kernel/head-nommu.S b/arch/arm/kernel/head-nommu.S index 8812ce88f7a1..75f14cc3e073 100644 --- a/arch/arm/kernel/head-nommu.S +++ b/arch/arm/kernel/head-nommu.S | |||
@@ -17,9 +17,12 @@ | |||
17 | #include <asm/assembler.h> | 17 | #include <asm/assembler.h> |
18 | #include <asm/ptrace.h> | 18 | #include <asm/ptrace.h> |
19 | #include <asm/asm-offsets.h> | 19 | #include <asm/asm-offsets.h> |
20 | #include <asm/memory.h> | ||
20 | #include <asm/cp15.h> | 21 | #include <asm/cp15.h> |
21 | #include <asm/thread_info.h> | 22 | #include <asm/thread_info.h> |
22 | #include <asm/v7m.h> | 23 | #include <asm/v7m.h> |
24 | #include <asm/mpu.h> | ||
25 | #include <asm/page.h> | ||
23 | 26 | ||
24 | /* | 27 | /* |
25 | * Kernel startup entry point. | 28 | * Kernel startup entry point. |
@@ -63,12 +66,74 @@ ENTRY(stext) | |||
63 | movs r10, r5 @ invalid processor (r5=0)? | 66 | movs r10, r5 @ invalid processor (r5=0)? |
64 | beq __error_p @ yes, error 'p' | 67 | beq __error_p @ yes, error 'p' |
65 | 68 | ||
66 | adr lr, BSYM(__after_proc_init) @ return (PIC) address | 69 | #ifdef CONFIG_ARM_MPU |
70 | /* Calculate the size of a region covering just the kernel */ | ||
71 | ldr r5, =PHYS_OFFSET @ Region start: PHYS_OFFSET | ||
72 | ldr r6, =(_end) @ Cover whole kernel | ||
73 | sub r6, r6, r5 @ Minimum size of region to map | ||
74 | clz r6, r6 @ Region size must be 2^N... | ||
75 | rsb r6, r6, #31 @ ...so round up region size | ||
76 | lsl r6, r6, #MPU_RSR_SZ @ Put size in right field | ||
77 | orr r6, r6, #(1 << MPU_RSR_EN) @ Set region enabled bit | ||
78 | bl __setup_mpu | ||
79 | #endif | ||
80 | ldr r13, =__mmap_switched @ address to jump to after | ||
81 | @ initialising sctlr | ||
82 | adr lr, BSYM(1f) @ return (PIC) address | ||
67 | ARM( add pc, r10, #PROCINFO_INITFUNC ) | 83 | ARM( add pc, r10, #PROCINFO_INITFUNC ) |
68 | THUMB( add r12, r10, #PROCINFO_INITFUNC ) | 84 | THUMB( add r12, r10, #PROCINFO_INITFUNC ) |
69 | THUMB( mov pc, r12 ) | 85 | THUMB( mov pc, r12 ) |
86 | 1: b __after_proc_init | ||
70 | ENDPROC(stext) | 87 | ENDPROC(stext) |
71 | 88 | ||
89 | #ifdef CONFIG_SMP | ||
90 | __CPUINIT | ||
91 | ENTRY(secondary_startup) | ||
92 | /* | ||
93 | * Common entry point for secondary CPUs. | ||
94 | * | ||
95 | * Ensure that we're in SVC mode, and IRQs are disabled. Lookup | ||
96 | * the processor type - there is no need to check the machine type | ||
97 | * as it has already been validated by the primary processor. | ||
98 | */ | ||
99 | setmode PSR_F_BIT | PSR_I_BIT | SVC_MODE, r9 | ||
100 | #ifndef CONFIG_CPU_CP15 | ||
101 | ldr r9, =CONFIG_PROCESSOR_ID | ||
102 | #else | ||
103 | mrc p15, 0, r9, c0, c0 @ get processor id | ||
104 | #endif | ||
105 | bl __lookup_processor_type @ r5=procinfo r9=cpuid | ||
106 | movs r10, r5 @ invalid processor? | ||
107 | beq __error_p @ yes, error 'p' | ||
108 | |||
109 | adr r4, __secondary_data | ||
110 | ldmia r4, {r7, r12} | ||
111 | |||
112 | #ifdef CONFIG_ARM_MPU | ||
113 | /* Use MPU region info supplied by __cpu_up */ | ||
114 | ldr r6, [r7] @ get secondary_data.mpu_szr | ||
115 | bl __setup_mpu @ Initialize the MPU | ||
116 | #endif | ||
117 | |||
118 | adr lr, BSYM(__after_proc_init) @ return address | ||
119 | mov r13, r12 @ __secondary_switched address | ||
120 | ARM( add pc, r10, #PROCINFO_INITFUNC ) | ||
121 | THUMB( add r12, r10, #PROCINFO_INITFUNC ) | ||
122 | THUMB( mov pc, r12 ) | ||
123 | ENDPROC(secondary_startup) | ||
124 | |||
125 | ENTRY(__secondary_switched) | ||
126 | ldr sp, [r7, #8] @ set up the stack pointer | ||
127 | mov fp, #0 | ||
128 | b secondary_start_kernel | ||
129 | ENDPROC(__secondary_switched) | ||
130 | |||
131 | .type __secondary_data, %object | ||
132 | __secondary_data: | ||
133 | .long secondary_data | ||
134 | .long __secondary_switched | ||
135 | #endif /* CONFIG_SMP */ | ||
136 | |||
72 | /* | 137 | /* |
73 | * Set the Control Register and Read the process ID. | 138 | * Set the Control Register and Read the process ID. |
74 | */ | 139 | */ |
@@ -99,10 +164,97 @@ __after_proc_init: | |||
99 | #endif | 164 | #endif |
100 | mcr p15, 0, r0, c1, c0, 0 @ write control reg | 165 | mcr p15, 0, r0, c1, c0, 0 @ write control reg |
101 | #endif /* CONFIG_CPU_CP15 */ | 166 | #endif /* CONFIG_CPU_CP15 */ |
102 | 167 | mov pc, r13 | |
103 | b __mmap_switched @ clear the BSS and jump | ||
104 | @ to start_kernel | ||
105 | ENDPROC(__after_proc_init) | 168 | ENDPROC(__after_proc_init) |
106 | .ltorg | 169 | .ltorg |
107 | 170 | ||
171 | #ifdef CONFIG_ARM_MPU | ||
172 | |||
173 | |||
174 | /* Set which MPU region should be programmed */ | ||
175 | .macro set_region_nr tmp, rgnr | ||
176 | mov \tmp, \rgnr @ Use static region numbers | ||
177 | mcr p15, 0, \tmp, c6, c2, 0 @ Write RGNR | ||
178 | .endm | ||
179 | |||
180 | /* Setup a single MPU region, either D or I side (D-side for unified) */ | ||
181 | .macro setup_region bar, acr, sr, side = MPU_DATA_SIDE | ||
182 | mcr p15, 0, \bar, c6, c1, (0 + \side) @ I/DRBAR | ||
183 | mcr p15, 0, \acr, c6, c1, (4 + \side) @ I/DRACR | ||
184 | mcr p15, 0, \sr, c6, c1, (2 + \side) @ I/DRSR | ||
185 | .endm | ||
186 | |||
187 | /* | ||
188 | * Setup the MPU and initial MPU Regions. We create the following regions: | ||
189 | * Region 0: Use this for probing the MPU details, so leave disabled. | ||
190 | * Region 1: Background region - covers the whole of RAM as strongly ordered | ||
191 | * Region 2: Normal, Shared, cacheable for RAM. From PHYS_OFFSET, size from r6 | ||
192 | * Region 3: Normal, shared, inaccessible from PL0 to protect the vectors page | ||
193 | * | ||
194 | * r6: Value to be written to DRSR (and IRSR if required) for MPU_RAM_REGION | ||
195 | */ | ||
196 | |||
197 | ENTRY(__setup_mpu) | ||
198 | |||
199 | /* Probe for v7 PMSA compliance */ | ||
200 | mrc p15, 0, r0, c0, c1, 4 @ Read ID_MMFR0 | ||
201 | and r0, r0, #(MMFR0_PMSA) @ PMSA field | ||
202 | teq r0, #(MMFR0_PMSAv7) @ PMSA v7 | ||
203 | bne __error_p @ Fail: ARM_MPU on NOT v7 PMSA | ||
204 | |||
205 | /* Determine whether the D/I-side memory map is unified. We set the | ||
206 | * flags here and continue to use them for the rest of this function */ | ||
207 | mrc p15, 0, r0, c0, c0, 4 @ MPUIR | ||
208 | ands r5, r0, #MPUIR_DREGION_SZMASK @ 0 size d region => No MPU | ||
209 | beq __error_p @ Fail: ARM_MPU and no MPU | ||
210 | tst r0, #MPUIR_nU @ MPUIR_nU = 0 for unified | ||
211 | |||
212 | /* Setup second region first to free up r6 */ | ||
213 | set_region_nr r0, #MPU_RAM_REGION | ||
214 | isb | ||
215 | /* Full access from PL0, PL1, shared for CONFIG_SMP, cacheable */ | ||
216 | ldr r0, =PHYS_OFFSET @ RAM starts at PHYS_OFFSET | ||
217 | ldr r5,=(MPU_AP_PL1RW_PL0RW | MPU_RGN_NORMAL) | ||
218 | |||
219 | setup_region r0, r5, r6, MPU_DATA_SIDE @ PHYS_OFFSET, shared, enabled | ||
220 | beq 1f @ Memory-map not unified | ||
221 | setup_region r0, r5, r6, MPU_INSTR_SIDE @ PHYS_OFFSET, shared, enabled | ||
222 | 1: isb | ||
223 | |||
224 | /* First/background region */ | ||
225 | set_region_nr r0, #MPU_BG_REGION | ||
226 | isb | ||
227 | /* Execute Never, strongly ordered, inaccessible to PL0, rw PL1 */ | ||
228 | mov r0, #0 @ BG region starts at 0x0 | ||
229 | ldr r5,=(MPU_ACR_XN | MPU_RGN_STRONGLY_ORDERED | MPU_AP_PL1RW_PL0NA) | ||
230 | mov r6, #MPU_RSR_ALL_MEM @ 4GB region, enabled | ||
231 | |||
232 | setup_region r0, r5, r6, MPU_DATA_SIDE @ 0x0, BG region, enabled | ||
233 | beq 2f @ Memory-map not unified | ||
234 | setup_region r0, r5, r6, MPU_INSTR_SIDE @ 0x0, BG region, enabled | ||
235 | 2: isb | ||
236 | |||
237 | /* Vectors region */ | ||
238 | set_region_nr r0, #MPU_VECTORS_REGION | ||
239 | isb | ||
240 | /* Shared, inaccessible to PL0, rw PL1 */ | ||
241 | mov r0, #CONFIG_VECTORS_BASE @ Cover from VECTORS_BASE | ||
242 | ldr r5,=(MPU_AP_PL1RW_PL0NA | MPU_RGN_NORMAL) | ||
243 | /* Writing N to bits 5:1 (RSR_SZ) --> region size 2^N+1 */ | ||
244 | mov r6, #(((PAGE_SHIFT - 1) << MPU_RSR_SZ) | 1 << MPU_RSR_EN) | ||
245 | |||
246 | setup_region r0, r5, r6, MPU_DATA_SIDE @ VECTORS_BASE, PL0 NA, enabled | ||
247 | beq 3f @ Memory-map not unified | ||
248 | setup_region r0, r5, r6, MPU_INSTR_SIDE @ VECTORS_BASE, PL0 NA, enabled | ||
249 | 3: isb | ||
250 | |||
251 | /* Enable the MPU */ | ||
252 | mrc p15, 0, r0, c1, c0, 0 @ Read SCTLR | ||
253 | bic r0, r0, #CR_BR @ Disable the 'default mem-map' | ||
254 | orr r0, r0, #CR_M @ Set SCTRL.M (MPU on) | ||
255 | mcr p15, 0, r0, c1, c0, 0 @ Enable MPU | ||
256 | isb | ||
257 | mov pc,lr | ||
258 | ENDPROC(__setup_mpu) | ||
259 | #endif | ||
108 | #include "head-common.S" | 260 | #include "head-common.S" |
diff --git a/arch/arm/kernel/head.S b/arch/arm/kernel/head.S index 8bac553fe213..45e8935cae4e 100644 --- a/arch/arm/kernel/head.S +++ b/arch/arm/kernel/head.S | |||
@@ -156,7 +156,7 @@ ENDPROC(stext) | |||
156 | * | 156 | * |
157 | * Returns: | 157 | * Returns: |
158 | * r0, r3, r5-r7 corrupted | 158 | * r0, r3, r5-r7 corrupted |
159 | * r4 = physical page table address | 159 | * r4 = page table (see ARCH_PGD_SHIFT in asm/memory.h) |
160 | */ | 160 | */ |
161 | __create_page_tables: | 161 | __create_page_tables: |
162 | pgtbl r4, r8 @ page table address | 162 | pgtbl r4, r8 @ page table address |
@@ -331,6 +331,7 @@ __create_page_tables: | |||
331 | #endif | 331 | #endif |
332 | #ifdef CONFIG_ARM_LPAE | 332 | #ifdef CONFIG_ARM_LPAE |
333 | sub r4, r4, #0x1000 @ point to the PGD table | 333 | sub r4, r4, #0x1000 @ point to the PGD table |
334 | mov r4, r4, lsr #ARCH_PGD_SHIFT | ||
334 | #endif | 335 | #endif |
335 | mov pc, lr | 336 | mov pc, lr |
336 | ENDPROC(__create_page_tables) | 337 | ENDPROC(__create_page_tables) |
@@ -408,7 +409,7 @@ __secondary_data: | |||
408 | * r0 = cp#15 control register | 409 | * r0 = cp#15 control register |
409 | * r1 = machine ID | 410 | * r1 = machine ID |
410 | * r2 = atags or dtb pointer | 411 | * r2 = atags or dtb pointer |
411 | * r4 = page table pointer | 412 | * r4 = page table (see ARCH_PGD_SHIFT in asm/memory.h) |
412 | * r9 = processor ID | 413 | * r9 = processor ID |
413 | * r13 = *virtual* address to jump to upon completion | 414 | * r13 = *virtual* address to jump to upon completion |
414 | */ | 415 | */ |
@@ -427,10 +428,7 @@ __enable_mmu: | |||
427 | #ifdef CONFIG_CPU_ICACHE_DISABLE | 428 | #ifdef CONFIG_CPU_ICACHE_DISABLE |
428 | bic r0, r0, #CR_I | 429 | bic r0, r0, #CR_I |
429 | #endif | 430 | #endif |
430 | #ifdef CONFIG_ARM_LPAE | 431 | #ifndef CONFIG_ARM_LPAE |
431 | mov r5, #0 | ||
432 | mcrr p15, 0, r4, r5, c2 @ load TTBR0 | ||
433 | #else | ||
434 | mov r5, #(domain_val(DOMAIN_USER, DOMAIN_MANAGER) | \ | 432 | mov r5, #(domain_val(DOMAIN_USER, DOMAIN_MANAGER) | \ |
435 | domain_val(DOMAIN_KERNEL, DOMAIN_MANAGER) | \ | 433 | domain_val(DOMAIN_KERNEL, DOMAIN_MANAGER) | \ |
436 | domain_val(DOMAIN_TABLE, DOMAIN_MANAGER) | \ | 434 | domain_val(DOMAIN_TABLE, DOMAIN_MANAGER) | \ |
diff --git a/arch/arm/kernel/hyp-stub.S b/arch/arm/kernel/hyp-stub.S index 1315c4ccfa56..4910232c4833 100644 --- a/arch/arm/kernel/hyp-stub.S +++ b/arch/arm/kernel/hyp-stub.S | |||
@@ -153,6 +153,13 @@ THUMB( orr r7, #(1 << 30) ) @ HSCTLR.TE | |||
153 | mrc p15, 4, r7, c14, c1, 0 @ CNTHCTL | 153 | mrc p15, 4, r7, c14, c1, 0 @ CNTHCTL |
154 | orr r7, r7, #3 @ PL1PCEN | PL1PCTEN | 154 | orr r7, r7, #3 @ PL1PCEN | PL1PCTEN |
155 | mcr p15, 4, r7, c14, c1, 0 @ CNTHCTL | 155 | mcr p15, 4, r7, c14, c1, 0 @ CNTHCTL |
156 | mov r7, #0 | ||
157 | mcrr p15, 4, r7, r7, c14 @ CNTVOFF | ||
158 | |||
159 | @ Disable virtual timer in case it was counting | ||
160 | mrc p15, 0, r7, c14, c3, 1 @ CNTV_CTL | ||
161 | bic r7, #1 @ Clear ENABLE | ||
162 | mcr p15, 0, r7, c14, c3, 1 @ CNTV_CTL | ||
156 | 1: | 163 | 1: |
157 | #endif | 164 | #endif |
158 | 165 | ||
diff --git a/arch/arm/kernel/perf_event.c b/arch/arm/kernel/perf_event.c index 8c3094d0f7b7..d9f5cd4e533f 100644 --- a/arch/arm/kernel/perf_event.c +++ b/arch/arm/kernel/perf_event.c | |||
@@ -569,6 +569,7 @@ perf_callchain_user(struct perf_callchain_entry *entry, struct pt_regs *regs) | |||
569 | return; | 569 | return; |
570 | } | 570 | } |
571 | 571 | ||
572 | perf_callchain_store(entry, regs->ARM_pc); | ||
572 | tail = (struct frame_tail __user *)regs->ARM_fp - 1; | 573 | tail = (struct frame_tail __user *)regs->ARM_fp - 1; |
573 | 574 | ||
574 | while ((entry->nr < PERF_MAX_STACK_DEPTH) && | 575 | while ((entry->nr < PERF_MAX_STACK_DEPTH) && |
diff --git a/arch/arm/kernel/process.c b/arch/arm/kernel/process.c index 6e8931ccf13e..7f1efcd4a6e9 100644 --- a/arch/arm/kernel/process.c +++ b/arch/arm/kernel/process.c | |||
@@ -39,6 +39,7 @@ | |||
39 | #include <asm/thread_notify.h> | 39 | #include <asm/thread_notify.h> |
40 | #include <asm/stacktrace.h> | 40 | #include <asm/stacktrace.h> |
41 | #include <asm/mach/time.h> | 41 | #include <asm/mach/time.h> |
42 | #include <asm/tls.h> | ||
42 | 43 | ||
43 | #ifdef CONFIG_CC_STACKPROTECTOR | 44 | #ifdef CONFIG_CC_STACKPROTECTOR |
44 | #include <linux/stackprotector.h> | 45 | #include <linux/stackprotector.h> |
@@ -374,7 +375,8 @@ copy_thread(unsigned long clone_flags, unsigned long stack_start, | |||
374 | clear_ptrace_hw_breakpoint(p); | 375 | clear_ptrace_hw_breakpoint(p); |
375 | 376 | ||
376 | if (clone_flags & CLONE_SETTLS) | 377 | if (clone_flags & CLONE_SETTLS) |
377 | thread->tp_value = childregs->ARM_r3; | 378 | thread->tp_value[0] = childregs->ARM_r3; |
379 | thread->tp_value[1] = get_tpuser(); | ||
378 | 380 | ||
379 | thread_notify(THREAD_NOTIFY_COPY, thread); | 381 | thread_notify(THREAD_NOTIFY_COPY, thread); |
380 | 382 | ||
diff --git a/arch/arm/kernel/psci_smp.c b/arch/arm/kernel/psci_smp.c index 23a11424c568..219f1d73572a 100644 --- a/arch/arm/kernel/psci_smp.c +++ b/arch/arm/kernel/psci_smp.c | |||
@@ -68,8 +68,6 @@ void __ref psci_cpu_die(unsigned int cpu) | |||
68 | /* We should never return */ | 68 | /* We should never return */ |
69 | panic("psci: cpu %d failed to shutdown\n", cpu); | 69 | panic("psci: cpu %d failed to shutdown\n", cpu); |
70 | } | 70 | } |
71 | #else | ||
72 | #define psci_cpu_die NULL | ||
73 | #endif | 71 | #endif |
74 | 72 | ||
75 | bool __init psci_smp_available(void) | 73 | bool __init psci_smp_available(void) |
@@ -80,5 +78,7 @@ bool __init psci_smp_available(void) | |||
80 | 78 | ||
81 | struct smp_operations __initdata psci_smp_ops = { | 79 | struct smp_operations __initdata psci_smp_ops = { |
82 | .smp_boot_secondary = psci_boot_secondary, | 80 | .smp_boot_secondary = psci_boot_secondary, |
81 | #ifdef CONFIG_HOTPLUG_CPU | ||
83 | .cpu_die = psci_cpu_die, | 82 | .cpu_die = psci_cpu_die, |
83 | #endif | ||
84 | }; | 84 | }; |
diff --git a/arch/arm/kernel/ptrace.c b/arch/arm/kernel/ptrace.c index 03deeffd9f6d..2bc1514d6dbe 100644 --- a/arch/arm/kernel/ptrace.c +++ b/arch/arm/kernel/ptrace.c | |||
@@ -849,7 +849,7 @@ long arch_ptrace(struct task_struct *child, long request, | |||
849 | #endif | 849 | #endif |
850 | 850 | ||
851 | case PTRACE_GET_THREAD_AREA: | 851 | case PTRACE_GET_THREAD_AREA: |
852 | ret = put_user(task_thread_info(child)->tp_value, | 852 | ret = put_user(task_thread_info(child)->tp_value[0], |
853 | datap); | 853 | datap); |
854 | break; | 854 | break; |
855 | 855 | ||
diff --git a/arch/arm/kernel/setup.c b/arch/arm/kernel/setup.c index 1c8278de6c46..9b653278c9e8 100644 --- a/arch/arm/kernel/setup.c +++ b/arch/arm/kernel/setup.c | |||
@@ -367,7 +367,7 @@ void __init early_print(const char *str, ...) | |||
367 | 367 | ||
368 | static void __init cpuid_init_hwcaps(void) | 368 | static void __init cpuid_init_hwcaps(void) |
369 | { | 369 | { |
370 | unsigned int divide_instrs; | 370 | unsigned int divide_instrs, vmsa; |
371 | 371 | ||
372 | if (cpu_architecture() < CPU_ARCH_ARMv7) | 372 | if (cpu_architecture() < CPU_ARCH_ARMv7) |
373 | return; | 373 | return; |
@@ -380,6 +380,11 @@ static void __init cpuid_init_hwcaps(void) | |||
380 | case 1: | 380 | case 1: |
381 | elf_hwcap |= HWCAP_IDIVT; | 381 | elf_hwcap |= HWCAP_IDIVT; |
382 | } | 382 | } |
383 | |||
384 | /* LPAE implies atomic ldrd/strd instructions */ | ||
385 | vmsa = (read_cpuid_ext(CPUID_EXT_MMFR0) & 0xf) >> 0; | ||
386 | if (vmsa >= 5) | ||
387 | elf_hwcap |= HWCAP_LPAE; | ||
383 | } | 388 | } |
384 | 389 | ||
385 | static void __init feat_v6_fixup(void) | 390 | static void __init feat_v6_fixup(void) |
@@ -470,9 +475,82 @@ void __init smp_setup_processor_id(void) | |||
470 | for (i = 1; i < nr_cpu_ids; ++i) | 475 | for (i = 1; i < nr_cpu_ids; ++i) |
471 | cpu_logical_map(i) = i == cpu ? 0 : i; | 476 | cpu_logical_map(i) = i == cpu ? 0 : i; |
472 | 477 | ||
478 | /* | ||
479 | * clear __my_cpu_offset on boot CPU to avoid hang caused by | ||
480 | * using percpu variable early, for example, lockdep will | ||
481 | * access percpu variable inside lock_release | ||
482 | */ | ||
483 | set_my_cpu_offset(0); | ||
484 | |||
473 | printk(KERN_INFO "Booting Linux on physical CPU 0x%x\n", mpidr); | 485 | printk(KERN_INFO "Booting Linux on physical CPU 0x%x\n", mpidr); |
474 | } | 486 | } |
475 | 487 | ||
488 | struct mpidr_hash mpidr_hash; | ||
489 | #ifdef CONFIG_SMP | ||
490 | /** | ||
491 | * smp_build_mpidr_hash - Pre-compute shifts required at each affinity | ||
492 | * level in order to build a linear index from an | ||
493 | * MPIDR value. Resulting algorithm is a collision | ||
494 | * free hash carried out through shifting and ORing | ||
495 | */ | ||
496 | static void __init smp_build_mpidr_hash(void) | ||
497 | { | ||
498 | u32 i, affinity; | ||
499 | u32 fs[3], bits[3], ls, mask = 0; | ||
500 | /* | ||
501 | * Pre-scan the list of MPIDRS and filter out bits that do | ||
502 | * not contribute to affinity levels, ie they never toggle. | ||
503 | */ | ||
504 | for_each_possible_cpu(i) | ||
505 | mask |= (cpu_logical_map(i) ^ cpu_logical_map(0)); | ||
506 | pr_debug("mask of set bits 0x%x\n", mask); | ||
507 | /* | ||
508 | * Find and stash the last and first bit set at all affinity levels to | ||
509 | * check how many bits are required to represent them. | ||
510 | */ | ||
511 | for (i = 0; i < 3; i++) { | ||
512 | affinity = MPIDR_AFFINITY_LEVEL(mask, i); | ||
513 | /* | ||
514 | * Find the MSB bit and LSB bits position | ||
515 | * to determine how many bits are required | ||
516 | * to express the affinity level. | ||
517 | */ | ||
518 | ls = fls(affinity); | ||
519 | fs[i] = affinity ? ffs(affinity) - 1 : 0; | ||
520 | bits[i] = ls - fs[i]; | ||
521 | } | ||
522 | /* | ||
523 | * An index can be created from the MPIDR by isolating the | ||
524 | * significant bits at each affinity level and by shifting | ||
525 | * them in order to compress the 24 bits values space to a | ||
526 | * compressed set of values. This is equivalent to hashing | ||
527 | * the MPIDR through shifting and ORing. It is a collision free | ||
528 | * hash though not minimal since some levels might contain a number | ||
529 | * of CPUs that is not an exact power of 2 and their bit | ||
530 | * representation might contain holes, eg MPIDR[7:0] = {0x2, 0x80}. | ||
531 | */ | ||
532 | mpidr_hash.shift_aff[0] = fs[0]; | ||
533 | mpidr_hash.shift_aff[1] = MPIDR_LEVEL_BITS + fs[1] - bits[0]; | ||
534 | mpidr_hash.shift_aff[2] = 2*MPIDR_LEVEL_BITS + fs[2] - | ||
535 | (bits[1] + bits[0]); | ||
536 | mpidr_hash.mask = mask; | ||
537 | mpidr_hash.bits = bits[2] + bits[1] + bits[0]; | ||
538 | pr_debug("MPIDR hash: aff0[%u] aff1[%u] aff2[%u] mask[0x%x] bits[%u]\n", | ||
539 | mpidr_hash.shift_aff[0], | ||
540 | mpidr_hash.shift_aff[1], | ||
541 | mpidr_hash.shift_aff[2], | ||
542 | mpidr_hash.mask, | ||
543 | mpidr_hash.bits); | ||
544 | /* | ||
545 | * 4x is an arbitrary value used to warn on a hash table much bigger | ||
546 | * than expected on most systems. | ||
547 | */ | ||
548 | if (mpidr_hash_size() > 4 * num_possible_cpus()) | ||
549 | pr_warn("Large number of MPIDR hash buckets detected\n"); | ||
550 | sync_cache_w(&mpidr_hash); | ||
551 | } | ||
552 | #endif | ||
553 | |||
476 | static void __init setup_processor(void) | 554 | static void __init setup_processor(void) |
477 | { | 555 | { |
478 | struct proc_info_list *list; | 556 | struct proc_info_list *list; |
@@ -820,6 +898,7 @@ void __init setup_arch(char **cmdline_p) | |||
820 | smp_set_ops(mdesc->smp); | 898 | smp_set_ops(mdesc->smp); |
821 | } | 899 | } |
822 | smp_init_cpus(); | 900 | smp_init_cpus(); |
901 | smp_build_mpidr_hash(); | ||
823 | } | 902 | } |
824 | #endif | 903 | #endif |
825 | 904 | ||
@@ -892,6 +971,7 @@ static const char *hwcap_str[] = { | |||
892 | "vfpv4", | 971 | "vfpv4", |
893 | "idiva", | 972 | "idiva", |
894 | "idivt", | 973 | "idivt", |
974 | "lpae", | ||
895 | NULL | 975 | NULL |
896 | }; | 976 | }; |
897 | 977 | ||
diff --git a/arch/arm/kernel/signal.c b/arch/arm/kernel/signal.c index 296786bdbb73..1c16c35c271a 100644 --- a/arch/arm/kernel/signal.c +++ b/arch/arm/kernel/signal.c | |||
@@ -392,14 +392,19 @@ setup_return(struct pt_regs *regs, struct ksignal *ksig, | |||
392 | if (ksig->ka.sa.sa_flags & SA_SIGINFO) | 392 | if (ksig->ka.sa.sa_flags & SA_SIGINFO) |
393 | idx += 3; | 393 | idx += 3; |
394 | 394 | ||
395 | /* | ||
396 | * Put the sigreturn code on the stack no matter which return | ||
397 | * mechanism we use in order to remain ABI compliant | ||
398 | */ | ||
395 | if (__put_user(sigreturn_codes[idx], rc) || | 399 | if (__put_user(sigreturn_codes[idx], rc) || |
396 | __put_user(sigreturn_codes[idx+1], rc+1)) | 400 | __put_user(sigreturn_codes[idx+1], rc+1)) |
397 | return 1; | 401 | return 1; |
398 | 402 | ||
399 | if (cpsr & MODE32_BIT) { | 403 | if ((cpsr & MODE32_BIT) && !IS_ENABLED(CONFIG_ARM_MPU)) { |
400 | /* | 404 | /* |
401 | * 32-bit code can use the new high-page | 405 | * 32-bit code can use the new high-page |
402 | * signal return code support. | 406 | * signal return code support except when the MPU has |
407 | * protected the vectors page from PL0 | ||
403 | */ | 408 | */ |
404 | retcode = KERN_SIGRETURN_CODE + (idx << 2) + thumb; | 409 | retcode = KERN_SIGRETURN_CODE + (idx << 2) + thumb; |
405 | } else { | 410 | } else { |
diff --git a/arch/arm/kernel/sleep.S b/arch/arm/kernel/sleep.S index 987dcf33415c..db1536b8b30b 100644 --- a/arch/arm/kernel/sleep.S +++ b/arch/arm/kernel/sleep.S | |||
@@ -7,6 +7,49 @@ | |||
7 | .text | 7 | .text |
8 | 8 | ||
9 | /* | 9 | /* |
10 | * Implementation of MPIDR hash algorithm through shifting | ||
11 | * and OR'ing. | ||
12 | * | ||
13 | * @dst: register containing hash result | ||
14 | * @rs0: register containing affinity level 0 bit shift | ||
15 | * @rs1: register containing affinity level 1 bit shift | ||
16 | * @rs2: register containing affinity level 2 bit shift | ||
17 | * @mpidr: register containing MPIDR value | ||
18 | * @mask: register containing MPIDR mask | ||
19 | * | ||
20 | * Pseudo C-code: | ||
21 | * | ||
22 | *u32 dst; | ||
23 | * | ||
24 | *compute_mpidr_hash(u32 rs0, u32 rs1, u32 rs2, u32 mpidr, u32 mask) { | ||
25 | * u32 aff0, aff1, aff2; | ||
26 | * u32 mpidr_masked = mpidr & mask; | ||
27 | * aff0 = mpidr_masked & 0xff; | ||
28 | * aff1 = mpidr_masked & 0xff00; | ||
29 | * aff2 = mpidr_masked & 0xff0000; | ||
30 | * dst = (aff0 >> rs0 | aff1 >> rs1 | aff2 >> rs2); | ||
31 | *} | ||
32 | * Input registers: rs0, rs1, rs2, mpidr, mask | ||
33 | * Output register: dst | ||
34 | * Note: input and output registers must be disjoint register sets | ||
35 | (eg: a macro instance with mpidr = r1 and dst = r1 is invalid) | ||
36 | */ | ||
37 | .macro compute_mpidr_hash dst, rs0, rs1, rs2, mpidr, mask | ||
38 | and \mpidr, \mpidr, \mask @ mask out MPIDR bits | ||
39 | and \dst, \mpidr, #0xff @ mask=aff0 | ||
40 | ARM( mov \dst, \dst, lsr \rs0 ) @ dst=aff0>>rs0 | ||
41 | THUMB( lsr \dst, \dst, \rs0 ) | ||
42 | and \mask, \mpidr, #0xff00 @ mask = aff1 | ||
43 | ARM( orr \dst, \dst, \mask, lsr \rs1 ) @ dst|=(aff1>>rs1) | ||
44 | THUMB( lsr \mask, \mask, \rs1 ) | ||
45 | THUMB( orr \dst, \dst, \mask ) | ||
46 | and \mask, \mpidr, #0xff0000 @ mask = aff2 | ||
47 | ARM( orr \dst, \dst, \mask, lsr \rs2 ) @ dst|=(aff2>>rs2) | ||
48 | THUMB( lsr \mask, \mask, \rs2 ) | ||
49 | THUMB( orr \dst, \dst, \mask ) | ||
50 | .endm | ||
51 | |||
52 | /* | ||
10 | * Save CPU state for a suspend. This saves the CPU general purpose | 53 | * Save CPU state for a suspend. This saves the CPU general purpose |
11 | * registers, and allocates space on the kernel stack to save the CPU | 54 | * registers, and allocates space on the kernel stack to save the CPU |
12 | * specific registers and some other data for resume. | 55 | * specific registers and some other data for resume. |
@@ -29,12 +72,18 @@ ENTRY(__cpu_suspend) | |||
29 | mov r1, r4 @ size of save block | 72 | mov r1, r4 @ size of save block |
30 | mov r2, r5 @ virtual SP | 73 | mov r2, r5 @ virtual SP |
31 | ldr r3, =sleep_save_sp | 74 | ldr r3, =sleep_save_sp |
32 | #ifdef CONFIG_SMP | 75 | ldr r3, [r3, #SLEEP_SAVE_SP_VIRT] |
33 | ALT_SMP(mrc p15, 0, lr, c0, c0, 5) | 76 | ALT_SMP(mrc p15, 0, r9, c0, c0, 5) |
34 | ALT_UP(mov lr, #0) | 77 | ALT_UP_B(1f) |
35 | and lr, lr, #15 | 78 | ldr r8, =mpidr_hash |
79 | /* | ||
80 | * This ldmia relies on the memory layout of the mpidr_hash | ||
81 | * struct mpidr_hash. | ||
82 | */ | ||
83 | ldmia r8, {r4-r7} @ r4 = mpidr mask (r5,r6,r7) = l[0,1,2] shifts | ||
84 | compute_mpidr_hash lr, r5, r6, r7, r9, r4 | ||
36 | add r3, r3, lr, lsl #2 | 85 | add r3, r3, lr, lsl #2 |
37 | #endif | 86 | 1: |
38 | bl __cpu_suspend_save | 87 | bl __cpu_suspend_save |
39 | adr lr, BSYM(cpu_suspend_abort) | 88 | adr lr, BSYM(cpu_suspend_abort) |
40 | ldmfd sp!, {r0, pc} @ call suspend fn | 89 | ldmfd sp!, {r0, pc} @ call suspend fn |
@@ -81,15 +130,23 @@ ENDPROC(cpu_resume_after_mmu) | |||
81 | .data | 130 | .data |
82 | .align | 131 | .align |
83 | ENTRY(cpu_resume) | 132 | ENTRY(cpu_resume) |
84 | #ifdef CONFIG_SMP | 133 | mov r1, #0 |
85 | adr r0, sleep_save_sp | 134 | ALT_SMP(mrc p15, 0, r0, c0, c0, 5) |
86 | ALT_SMP(mrc p15, 0, r1, c0, c0, 5) | 135 | ALT_UP_B(1f) |
87 | ALT_UP(mov r1, #0) | 136 | adr r2, mpidr_hash_ptr |
88 | and r1, r1, #15 | 137 | ldr r3, [r2] |
89 | ldr r0, [r0, r1, lsl #2] @ stack phys addr | 138 | add r2, r2, r3 @ r2 = struct mpidr_hash phys address |
90 | #else | 139 | /* |
91 | ldr r0, sleep_save_sp @ stack phys addr | 140 | * This ldmia relies on the memory layout of the mpidr_hash |
92 | #endif | 141 | * struct mpidr_hash. |
142 | */ | ||
143 | ldmia r2, { r3-r6 } @ r3 = mpidr mask (r4,r5,r6) = l[0,1,2] shifts | ||
144 | compute_mpidr_hash r1, r4, r5, r6, r0, r3 | ||
145 | 1: | ||
146 | adr r0, _sleep_save_sp | ||
147 | ldr r0, [r0, #SLEEP_SAVE_SP_PHYS] | ||
148 | ldr r0, [r0, r1, lsl #2] | ||
149 | |||
93 | setmode PSR_I_BIT | PSR_F_BIT | SVC_MODE, r1 @ set SVC, irqs off | 150 | setmode PSR_I_BIT | PSR_F_BIT | SVC_MODE, r1 @ set SVC, irqs off |
94 | @ load phys pgd, stack, resume fn | 151 | @ load phys pgd, stack, resume fn |
95 | ARM( ldmia r0!, {r1, sp, pc} ) | 152 | ARM( ldmia r0!, {r1, sp, pc} ) |
@@ -98,7 +155,11 @@ THUMB( mov sp, r2 ) | |||
98 | THUMB( bx r3 ) | 155 | THUMB( bx r3 ) |
99 | ENDPROC(cpu_resume) | 156 | ENDPROC(cpu_resume) |
100 | 157 | ||
101 | sleep_save_sp: | 158 | .align 2 |
102 | .rept CONFIG_NR_CPUS | 159 | mpidr_hash_ptr: |
103 | .long 0 @ preserve stack phys ptr here | 160 | .long mpidr_hash - . @ mpidr_hash struct offset |
104 | .endr | 161 | |
162 | .type sleep_save_sp, #object | ||
163 | ENTRY(sleep_save_sp) | ||
164 | _sleep_save_sp: | ||
165 | .space SLEEP_SAVE_SP_SZ @ struct sleep_save_sp | ||
diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c index 5919eb451bb9..c5fb5469054b 100644 --- a/arch/arm/kernel/smp.c +++ b/arch/arm/kernel/smp.c | |||
@@ -45,6 +45,7 @@ | |||
45 | #include <asm/smp_plat.h> | 45 | #include <asm/smp_plat.h> |
46 | #include <asm/virt.h> | 46 | #include <asm/virt.h> |
47 | #include <asm/mach/arch.h> | 47 | #include <asm/mach/arch.h> |
48 | #include <asm/mpu.h> | ||
48 | 49 | ||
49 | /* | 50 | /* |
50 | * as from 2.5, kernels no longer have an init_tasks structure | 51 | * as from 2.5, kernels no longer have an init_tasks structure |
@@ -78,6 +79,13 @@ void __init smp_set_ops(struct smp_operations *ops) | |||
78 | smp_ops = *ops; | 79 | smp_ops = *ops; |
79 | }; | 80 | }; |
80 | 81 | ||
82 | static unsigned long get_arch_pgd(pgd_t *pgd) | ||
83 | { | ||
84 | phys_addr_t pgdir = virt_to_phys(pgd); | ||
85 | BUG_ON(pgdir & ARCH_PGD_MASK); | ||
86 | return pgdir >> ARCH_PGD_SHIFT; | ||
87 | } | ||
88 | |||
81 | int __cpuinit __cpu_up(unsigned int cpu, struct task_struct *idle) | 89 | int __cpuinit __cpu_up(unsigned int cpu, struct task_struct *idle) |
82 | { | 90 | { |
83 | int ret; | 91 | int ret; |
@@ -87,8 +95,14 @@ int __cpuinit __cpu_up(unsigned int cpu, struct task_struct *idle) | |||
87 | * its stack and the page tables. | 95 | * its stack and the page tables. |
88 | */ | 96 | */ |
89 | secondary_data.stack = task_stack_page(idle) + THREAD_START_SP; | 97 | secondary_data.stack = task_stack_page(idle) + THREAD_START_SP; |
90 | secondary_data.pgdir = virt_to_phys(idmap_pgd); | 98 | #ifdef CONFIG_ARM_MPU |
91 | secondary_data.swapper_pg_dir = virt_to_phys(swapper_pg_dir); | 99 | secondary_data.mpu_rgn_szr = mpu_rgn_info.rgns[MPU_RAM_REGION].drsr; |
100 | #endif | ||
101 | |||
102 | #ifdef CONFIG_MMU | ||
103 | secondary_data.pgdir = get_arch_pgd(idmap_pgd); | ||
104 | secondary_data.swapper_pg_dir = get_arch_pgd(swapper_pg_dir); | ||
105 | #endif | ||
92 | __cpuc_flush_dcache_area(&secondary_data, sizeof(secondary_data)); | 106 | __cpuc_flush_dcache_area(&secondary_data, sizeof(secondary_data)); |
93 | outer_clean_range(__pa(&secondary_data), __pa(&secondary_data + 1)); | 107 | outer_clean_range(__pa(&secondary_data), __pa(&secondary_data + 1)); |
94 | 108 | ||
@@ -112,9 +126,8 @@ int __cpuinit __cpu_up(unsigned int cpu, struct task_struct *idle) | |||
112 | pr_err("CPU%u: failed to boot: %d\n", cpu, ret); | 126 | pr_err("CPU%u: failed to boot: %d\n", cpu, ret); |
113 | } | 127 | } |
114 | 128 | ||
115 | secondary_data.stack = NULL; | ||
116 | secondary_data.pgdir = 0; | ||
117 | 129 | ||
130 | memset(&secondary_data, 0, sizeof(secondary_data)); | ||
118 | return ret; | 131 | return ret; |
119 | } | 132 | } |
120 | 133 | ||
diff --git a/arch/arm/kernel/smp_tlb.c b/arch/arm/kernel/smp_tlb.c index 9a52a07aa40e..a98b62dca2fa 100644 --- a/arch/arm/kernel/smp_tlb.c +++ b/arch/arm/kernel/smp_tlb.c | |||
@@ -103,7 +103,7 @@ static void broadcast_tlb_a15_erratum(void) | |||
103 | 103 | ||
104 | static void broadcast_tlb_mm_a15_erratum(struct mm_struct *mm) | 104 | static void broadcast_tlb_mm_a15_erratum(struct mm_struct *mm) |
105 | { | 105 | { |
106 | int cpu, this_cpu; | 106 | int this_cpu; |
107 | cpumask_t mask = { CPU_BITS_NONE }; | 107 | cpumask_t mask = { CPU_BITS_NONE }; |
108 | 108 | ||
109 | if (!erratum_a15_798181()) | 109 | if (!erratum_a15_798181()) |
@@ -111,21 +111,7 @@ static void broadcast_tlb_mm_a15_erratum(struct mm_struct *mm) | |||
111 | 111 | ||
112 | dummy_flush_tlb_a15_erratum(); | 112 | dummy_flush_tlb_a15_erratum(); |
113 | this_cpu = get_cpu(); | 113 | this_cpu = get_cpu(); |
114 | for_each_online_cpu(cpu) { | 114 | a15_erratum_get_cpumask(this_cpu, mm, &mask); |
115 | if (cpu == this_cpu) | ||
116 | continue; | ||
117 | /* | ||
118 | * We only need to send an IPI if the other CPUs are running | ||
119 | * the same ASID as the one being invalidated. There is no | ||
120 | * need for locking around the active_asids check since the | ||
121 | * switch_mm() function has at least one dmb() (as required by | ||
122 | * this workaround) in case a context switch happens on | ||
123 | * another CPU after the condition below. | ||
124 | */ | ||
125 | if (atomic64_read(&mm->context.id) == | ||
126 | atomic64_read(&per_cpu(active_asids, cpu))) | ||
127 | cpumask_set_cpu(cpu, &mask); | ||
128 | } | ||
129 | smp_call_function_many(&mask, ipi_flush_tlb_a15_erratum, NULL, 1); | 115 | smp_call_function_many(&mask, ipi_flush_tlb_a15_erratum, NULL, 1); |
130 | put_cpu(); | 116 | put_cpu(); |
131 | } | 117 | } |
diff --git a/arch/arm/kernel/suspend.c b/arch/arm/kernel/suspend.c index c59c97ea8268..41cf3cbf756d 100644 --- a/arch/arm/kernel/suspend.c +++ b/arch/arm/kernel/suspend.c | |||
@@ -1,15 +1,54 @@ | |||
1 | #include <linux/init.h> | 1 | #include <linux/init.h> |
2 | #include <linux/slab.h> | ||
2 | 3 | ||
4 | #include <asm/cacheflush.h> | ||
3 | #include <asm/idmap.h> | 5 | #include <asm/idmap.h> |
4 | #include <asm/pgalloc.h> | 6 | #include <asm/pgalloc.h> |
5 | #include <asm/pgtable.h> | 7 | #include <asm/pgtable.h> |
6 | #include <asm/memory.h> | 8 | #include <asm/memory.h> |
9 | #include <asm/smp_plat.h> | ||
7 | #include <asm/suspend.h> | 10 | #include <asm/suspend.h> |
8 | #include <asm/tlbflush.h> | 11 | #include <asm/tlbflush.h> |
9 | 12 | ||
10 | extern int __cpu_suspend(unsigned long, int (*)(unsigned long)); | 13 | extern int __cpu_suspend(unsigned long, int (*)(unsigned long)); |
11 | extern void cpu_resume_mmu(void); | 14 | extern void cpu_resume_mmu(void); |
12 | 15 | ||
16 | #ifdef CONFIG_MMU | ||
17 | /* | ||
18 | * Hide the first two arguments to __cpu_suspend - these are an implementation | ||
19 | * detail which platform code shouldn't have to know about. | ||
20 | */ | ||
21 | int cpu_suspend(unsigned long arg, int (*fn)(unsigned long)) | ||
22 | { | ||
23 | struct mm_struct *mm = current->active_mm; | ||
24 | int ret; | ||
25 | |||
26 | if (!idmap_pgd) | ||
27 | return -EINVAL; | ||
28 | |||
29 | /* | ||
30 | * Provide a temporary page table with an identity mapping for | ||
31 | * the MMU-enable code, required for resuming. On successful | ||
32 | * resume (indicated by a zero return code), we need to switch | ||
33 | * back to the correct page tables. | ||
34 | */ | ||
35 | ret = __cpu_suspend(arg, fn); | ||
36 | if (ret == 0) { | ||
37 | cpu_switch_mm(mm->pgd, mm); | ||
38 | local_flush_bp_all(); | ||
39 | local_flush_tlb_all(); | ||
40 | } | ||
41 | |||
42 | return ret; | ||
43 | } | ||
44 | #else | ||
45 | int cpu_suspend(unsigned long arg, int (*fn)(unsigned long)) | ||
46 | { | ||
47 | return __cpu_suspend(arg, fn); | ||
48 | } | ||
49 | #define idmap_pgd NULL | ||
50 | #endif | ||
51 | |||
13 | /* | 52 | /* |
14 | * This is called by __cpu_suspend() to save the state, and do whatever | 53 | * This is called by __cpu_suspend() to save the state, and do whatever |
15 | * flushing is required to ensure that when the CPU goes to sleep we have | 54 | * flushing is required to ensure that when the CPU goes to sleep we have |
@@ -47,30 +86,19 @@ void __cpu_suspend_save(u32 *ptr, u32 ptrsz, u32 sp, u32 *save_ptr) | |||
47 | virt_to_phys(save_ptr) + sizeof(*save_ptr)); | 86 | virt_to_phys(save_ptr) + sizeof(*save_ptr)); |
48 | } | 87 | } |
49 | 88 | ||
50 | /* | 89 | extern struct sleep_save_sp sleep_save_sp; |
51 | * Hide the first two arguments to __cpu_suspend - these are an implementation | ||
52 | * detail which platform code shouldn't have to know about. | ||
53 | */ | ||
54 | int cpu_suspend(unsigned long arg, int (*fn)(unsigned long)) | ||
55 | { | ||
56 | struct mm_struct *mm = current->active_mm; | ||
57 | int ret; | ||
58 | |||
59 | if (!idmap_pgd) | ||
60 | return -EINVAL; | ||
61 | 90 | ||
62 | /* | 91 | static int cpu_suspend_alloc_sp(void) |
63 | * Provide a temporary page table with an identity mapping for | 92 | { |
64 | * the MMU-enable code, required for resuming. On successful | 93 | void *ctx_ptr; |
65 | * resume (indicated by a zero return code), we need to switch | 94 | /* ctx_ptr is an array of physical addresses */ |
66 | * back to the correct page tables. | 95 | ctx_ptr = kcalloc(mpidr_hash_size(), sizeof(u32), GFP_KERNEL); |
67 | */ | ||
68 | ret = __cpu_suspend(arg, fn); | ||
69 | if (ret == 0) { | ||
70 | cpu_switch_mm(mm->pgd, mm); | ||
71 | local_flush_bp_all(); | ||
72 | local_flush_tlb_all(); | ||
73 | } | ||
74 | 96 | ||
75 | return ret; | 97 | if (WARN_ON(!ctx_ptr)) |
98 | return -ENOMEM; | ||
99 | sleep_save_sp.save_ptr_stash = ctx_ptr; | ||
100 | sleep_save_sp.save_ptr_stash_phys = virt_to_phys(ctx_ptr); | ||
101 | sync_cache_w(&sleep_save_sp); | ||
102 | return 0; | ||
76 | } | 103 | } |
104 | early_initcall(cpu_suspend_alloc_sp); | ||
diff --git a/arch/arm/kernel/traps.c b/arch/arm/kernel/traps.c index 486e12a0f26a..cab094c234ee 100644 --- a/arch/arm/kernel/traps.c +++ b/arch/arm/kernel/traps.c | |||
@@ -581,7 +581,7 @@ asmlinkage int arm_syscall(int no, struct pt_regs *regs) | |||
581 | return regs->ARM_r0; | 581 | return regs->ARM_r0; |
582 | 582 | ||
583 | case NR(set_tls): | 583 | case NR(set_tls): |
584 | thread->tp_value = regs->ARM_r0; | 584 | thread->tp_value[0] = regs->ARM_r0; |
585 | if (tls_emu) | 585 | if (tls_emu) |
586 | return 0; | 586 | return 0; |
587 | if (has_tls_reg) { | 587 | if (has_tls_reg) { |
@@ -699,7 +699,7 @@ static int get_tp_trap(struct pt_regs *regs, unsigned int instr) | |||
699 | int reg = (instr >> 12) & 15; | 699 | int reg = (instr >> 12) & 15; |
700 | if (reg == 15) | 700 | if (reg == 15) |
701 | return 1; | 701 | return 1; |
702 | regs->uregs[reg] = current_thread_info()->tp_value; | 702 | regs->uregs[reg] = current_thread_info()->tp_value[0]; |
703 | regs->ARM_pc += 4; | 703 | regs->ARM_pc += 4; |
704 | return 0; | 704 | return 0; |
705 | } | 705 | } |
diff --git a/arch/arm/kvm/Kconfig b/arch/arm/kvm/Kconfig index 370e1a8af6ac..ebf5015508b5 100644 --- a/arch/arm/kvm/Kconfig +++ b/arch/arm/kvm/Kconfig | |||
@@ -41,9 +41,9 @@ config KVM_ARM_HOST | |||
41 | Provides host support for ARM processors. | 41 | Provides host support for ARM processors. |
42 | 42 | ||
43 | config KVM_ARM_MAX_VCPUS | 43 | config KVM_ARM_MAX_VCPUS |
44 | int "Number maximum supported virtual CPUs per VM" if KVM_ARM_HOST | 44 | int "Number maximum supported virtual CPUs per VM" |
45 | default 4 if KVM_ARM_HOST | 45 | depends on KVM_ARM_HOST |
46 | default 0 | 46 | default 4 |
47 | help | 47 | help |
48 | Static number of max supported virtual CPUs per VM. | 48 | Static number of max supported virtual CPUs per VM. |
49 | 49 | ||
@@ -67,6 +67,4 @@ config KVM_ARM_TIMER | |||
67 | ---help--- | 67 | ---help--- |
68 | Adds support for the Architected Timers in virtual machines | 68 | Adds support for the Architected Timers in virtual machines |
69 | 69 | ||
70 | source drivers/virtio/Kconfig | ||
71 | |||
72 | endif # VIRTUALIZATION | 70 | endif # VIRTUALIZATION |
diff --git a/arch/arm/kvm/Makefile b/arch/arm/kvm/Makefile index 53c5ed83d16f..d99bee4950e5 100644 --- a/arch/arm/kvm/Makefile +++ b/arch/arm/kvm/Makefile | |||
@@ -14,10 +14,11 @@ CFLAGS_mmu.o := -I. | |||
14 | AFLAGS_init.o := -Wa,-march=armv7-a$(plus_virt) | 14 | AFLAGS_init.o := -Wa,-march=armv7-a$(plus_virt) |
15 | AFLAGS_interrupts.o := -Wa,-march=armv7-a$(plus_virt) | 15 | AFLAGS_interrupts.o := -Wa,-march=armv7-a$(plus_virt) |
16 | 16 | ||
17 | kvm-arm-y = $(addprefix ../../../virt/kvm/, kvm_main.o coalesced_mmio.o) | 17 | KVM := ../../../virt/kvm |
18 | kvm-arm-y = $(KVM)/kvm_main.o $(KVM)/coalesced_mmio.o | ||
18 | 19 | ||
19 | obj-y += kvm-arm.o init.o interrupts.o | 20 | obj-y += kvm-arm.o init.o interrupts.o |
20 | obj-y += arm.o handle_exit.o guest.o mmu.o emulate.o reset.o | 21 | obj-y += arm.o handle_exit.o guest.o mmu.o emulate.o reset.o |
21 | obj-y += coproc.o coproc_a15.o mmio.o psci.o perf.o | 22 | obj-y += coproc.o coproc_a15.o mmio.o psci.o perf.o |
22 | obj-$(CONFIG_KVM_ARM_VGIC) += vgic.o | 23 | obj-$(CONFIG_KVM_ARM_VGIC) += $(KVM)/arm/vgic.o |
23 | obj-$(CONFIG_KVM_ARM_TIMER) += arch_timer.o | 24 | obj-$(CONFIG_KVM_ARM_TIMER) += $(KVM)/arm/arch_timer.o |
diff --git a/arch/arm/kvm/arch_timer.c b/arch/arm/kvm/arch_timer.c deleted file mode 100644 index c55b6089e923..000000000000 --- a/arch/arm/kvm/arch_timer.c +++ /dev/null | |||
@@ -1,272 +0,0 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2012 ARM Ltd. | ||
3 | * Author: Marc Zyngier <marc.zyngier@arm.com> | ||
4 | * | ||
5 | * This program is free software; you can redistribute it and/or modify | ||
6 | * it under the terms of the GNU General Public License version 2 as | ||
7 | * published by the Free Software Foundation. | ||
8 | * | ||
9 | * This program is distributed in the hope that it will be useful, | ||
10 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
11 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
12 | * GNU General Public License for more details. | ||
13 | * | ||
14 | * You should have received a copy of the GNU General Public License | ||
15 | * along with this program; if not, write to the Free Software | ||
16 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA | ||
17 | */ | ||
18 | |||
19 | #include <linux/cpu.h> | ||
20 | #include <linux/of_irq.h> | ||
21 | #include <linux/kvm.h> | ||
22 | #include <linux/kvm_host.h> | ||
23 | #include <linux/interrupt.h> | ||
24 | |||
25 | #include <clocksource/arm_arch_timer.h> | ||
26 | #include <asm/arch_timer.h> | ||
27 | |||
28 | #include <asm/kvm_vgic.h> | ||
29 | #include <asm/kvm_arch_timer.h> | ||
30 | |||
31 | static struct timecounter *timecounter; | ||
32 | static struct workqueue_struct *wqueue; | ||
33 | static struct kvm_irq_level timer_irq = { | ||
34 | .level = 1, | ||
35 | }; | ||
36 | |||
37 | static cycle_t kvm_phys_timer_read(void) | ||
38 | { | ||
39 | return timecounter->cc->read(timecounter->cc); | ||
40 | } | ||
41 | |||
42 | static bool timer_is_armed(struct arch_timer_cpu *timer) | ||
43 | { | ||
44 | return timer->armed; | ||
45 | } | ||
46 | |||
47 | /* timer_arm: as in "arm the timer", not as in ARM the company */ | ||
48 | static void timer_arm(struct arch_timer_cpu *timer, u64 ns) | ||
49 | { | ||
50 | timer->armed = true; | ||
51 | hrtimer_start(&timer->timer, ktime_add_ns(ktime_get(), ns), | ||
52 | HRTIMER_MODE_ABS); | ||
53 | } | ||
54 | |||
55 | static void timer_disarm(struct arch_timer_cpu *timer) | ||
56 | { | ||
57 | if (timer_is_armed(timer)) { | ||
58 | hrtimer_cancel(&timer->timer); | ||
59 | cancel_work_sync(&timer->expired); | ||
60 | timer->armed = false; | ||
61 | } | ||
62 | } | ||
63 | |||
64 | static void kvm_timer_inject_irq(struct kvm_vcpu *vcpu) | ||
65 | { | ||
66 | struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu; | ||
67 | |||
68 | timer->cntv_ctl |= ARCH_TIMER_CTRL_IT_MASK; | ||
69 | kvm_vgic_inject_irq(vcpu->kvm, vcpu->vcpu_id, | ||
70 | vcpu->arch.timer_cpu.irq->irq, | ||
71 | vcpu->arch.timer_cpu.irq->level); | ||
72 | } | ||
73 | |||
74 | static irqreturn_t kvm_arch_timer_handler(int irq, void *dev_id) | ||
75 | { | ||
76 | struct kvm_vcpu *vcpu = *(struct kvm_vcpu **)dev_id; | ||
77 | |||
78 | /* | ||
79 | * We disable the timer in the world switch and let it be | ||
80 | * handled by kvm_timer_sync_hwstate(). Getting a timer | ||
81 | * interrupt at this point is a sure sign of some major | ||
82 | * breakage. | ||
83 | */ | ||
84 | pr_warn("Unexpected interrupt %d on vcpu %p\n", irq, vcpu); | ||
85 | return IRQ_HANDLED; | ||
86 | } | ||
87 | |||
88 | static void kvm_timer_inject_irq_work(struct work_struct *work) | ||
89 | { | ||
90 | struct kvm_vcpu *vcpu; | ||
91 | |||
92 | vcpu = container_of(work, struct kvm_vcpu, arch.timer_cpu.expired); | ||
93 | vcpu->arch.timer_cpu.armed = false; | ||
94 | kvm_timer_inject_irq(vcpu); | ||
95 | } | ||
96 | |||
97 | static enum hrtimer_restart kvm_timer_expire(struct hrtimer *hrt) | ||
98 | { | ||
99 | struct arch_timer_cpu *timer; | ||
100 | timer = container_of(hrt, struct arch_timer_cpu, timer); | ||
101 | queue_work(wqueue, &timer->expired); | ||
102 | return HRTIMER_NORESTART; | ||
103 | } | ||
104 | |||
105 | /** | ||
106 | * kvm_timer_flush_hwstate - prepare to move the virt timer to the cpu | ||
107 | * @vcpu: The vcpu pointer | ||
108 | * | ||
109 | * Disarm any pending soft timers, since the world-switch code will write the | ||
110 | * virtual timer state back to the physical CPU. | ||
111 | */ | ||
112 | void kvm_timer_flush_hwstate(struct kvm_vcpu *vcpu) | ||
113 | { | ||
114 | struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu; | ||
115 | |||
116 | /* | ||
117 | * We're about to run this vcpu again, so there is no need to | ||
118 | * keep the background timer running, as we're about to | ||
119 | * populate the CPU timer again. | ||
120 | */ | ||
121 | timer_disarm(timer); | ||
122 | } | ||
123 | |||
124 | /** | ||
125 | * kvm_timer_sync_hwstate - sync timer state from cpu | ||
126 | * @vcpu: The vcpu pointer | ||
127 | * | ||
128 | * Check if the virtual timer was armed and either schedule a corresponding | ||
129 | * soft timer or inject directly if already expired. | ||
130 | */ | ||
131 | void kvm_timer_sync_hwstate(struct kvm_vcpu *vcpu) | ||
132 | { | ||
133 | struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu; | ||
134 | cycle_t cval, now; | ||
135 | u64 ns; | ||
136 | |||
137 | if ((timer->cntv_ctl & ARCH_TIMER_CTRL_IT_MASK) || | ||
138 | !(timer->cntv_ctl & ARCH_TIMER_CTRL_ENABLE)) | ||
139 | return; | ||
140 | |||
141 | cval = timer->cntv_cval; | ||
142 | now = kvm_phys_timer_read() - vcpu->kvm->arch.timer.cntvoff; | ||
143 | |||
144 | BUG_ON(timer_is_armed(timer)); | ||
145 | |||
146 | if (cval <= now) { | ||
147 | /* | ||
148 | * Timer has already expired while we were not | ||
149 | * looking. Inject the interrupt and carry on. | ||
150 | */ | ||
151 | kvm_timer_inject_irq(vcpu); | ||
152 | return; | ||
153 | } | ||
154 | |||
155 | ns = cyclecounter_cyc2ns(timecounter->cc, cval - now); | ||
156 | timer_arm(timer, ns); | ||
157 | } | ||
158 | |||
159 | void kvm_timer_vcpu_init(struct kvm_vcpu *vcpu) | ||
160 | { | ||
161 | struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu; | ||
162 | |||
163 | INIT_WORK(&timer->expired, kvm_timer_inject_irq_work); | ||
164 | hrtimer_init(&timer->timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS); | ||
165 | timer->timer.function = kvm_timer_expire; | ||
166 | timer->irq = &timer_irq; | ||
167 | } | ||
168 | |||
169 | static void kvm_timer_init_interrupt(void *info) | ||
170 | { | ||
171 | enable_percpu_irq(timer_irq.irq, 0); | ||
172 | } | ||
173 | |||
174 | |||
175 | static int kvm_timer_cpu_notify(struct notifier_block *self, | ||
176 | unsigned long action, void *cpu) | ||
177 | { | ||
178 | switch (action) { | ||
179 | case CPU_STARTING: | ||
180 | case CPU_STARTING_FROZEN: | ||
181 | kvm_timer_init_interrupt(NULL); | ||
182 | break; | ||
183 | case CPU_DYING: | ||
184 | case CPU_DYING_FROZEN: | ||
185 | disable_percpu_irq(timer_irq.irq); | ||
186 | break; | ||
187 | } | ||
188 | |||
189 | return NOTIFY_OK; | ||
190 | } | ||
191 | |||
192 | static struct notifier_block kvm_timer_cpu_nb = { | ||
193 | .notifier_call = kvm_timer_cpu_notify, | ||
194 | }; | ||
195 | |||
196 | static const struct of_device_id arch_timer_of_match[] = { | ||
197 | { .compatible = "arm,armv7-timer", }, | ||
198 | {}, | ||
199 | }; | ||
200 | |||
201 | int kvm_timer_hyp_init(void) | ||
202 | { | ||
203 | struct device_node *np; | ||
204 | unsigned int ppi; | ||
205 | int err; | ||
206 | |||
207 | timecounter = arch_timer_get_timecounter(); | ||
208 | if (!timecounter) | ||
209 | return -ENODEV; | ||
210 | |||
211 | np = of_find_matching_node(NULL, arch_timer_of_match); | ||
212 | if (!np) { | ||
213 | kvm_err("kvm_arch_timer: can't find DT node\n"); | ||
214 | return -ENODEV; | ||
215 | } | ||
216 | |||
217 | ppi = irq_of_parse_and_map(np, 2); | ||
218 | if (!ppi) { | ||
219 | kvm_err("kvm_arch_timer: no virtual timer interrupt\n"); | ||
220 | err = -EINVAL; | ||
221 | goto out; | ||
222 | } | ||
223 | |||
224 | err = request_percpu_irq(ppi, kvm_arch_timer_handler, | ||
225 | "kvm guest timer", kvm_get_running_vcpus()); | ||
226 | if (err) { | ||
227 | kvm_err("kvm_arch_timer: can't request interrupt %d (%d)\n", | ||
228 | ppi, err); | ||
229 | goto out; | ||
230 | } | ||
231 | |||
232 | timer_irq.irq = ppi; | ||
233 | |||
234 | err = register_cpu_notifier(&kvm_timer_cpu_nb); | ||
235 | if (err) { | ||
236 | kvm_err("Cannot register timer CPU notifier\n"); | ||
237 | goto out_free; | ||
238 | } | ||
239 | |||
240 | wqueue = create_singlethread_workqueue("kvm_arch_timer"); | ||
241 | if (!wqueue) { | ||
242 | err = -ENOMEM; | ||
243 | goto out_free; | ||
244 | } | ||
245 | |||
246 | kvm_info("%s IRQ%d\n", np->name, ppi); | ||
247 | on_each_cpu(kvm_timer_init_interrupt, NULL, 1); | ||
248 | |||
249 | goto out; | ||
250 | out_free: | ||
251 | free_percpu_irq(ppi, kvm_get_running_vcpus()); | ||
252 | out: | ||
253 | of_node_put(np); | ||
254 | return err; | ||
255 | } | ||
256 | |||
257 | void kvm_timer_vcpu_terminate(struct kvm_vcpu *vcpu) | ||
258 | { | ||
259 | struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu; | ||
260 | |||
261 | timer_disarm(timer); | ||
262 | } | ||
263 | |||
264 | int kvm_timer_init(struct kvm *kvm) | ||
265 | { | ||
266 | if (timecounter && wqueue) { | ||
267 | kvm->arch.timer.cntvoff = kvm_phys_timer_read(); | ||
268 | kvm->arch.timer.enabled = 1; | ||
269 | } | ||
270 | |||
271 | return 0; | ||
272 | } | ||
diff --git a/arch/arm/kvm/arm.c b/arch/arm/kvm/arm.c index ef1703b9587b..741f66a2edbd 100644 --- a/arch/arm/kvm/arm.c +++ b/arch/arm/kvm/arm.c | |||
@@ -800,8 +800,8 @@ long kvm_arch_vm_ioctl(struct file *filp, | |||
800 | 800 | ||
801 | static void cpu_init_hyp_mode(void *dummy) | 801 | static void cpu_init_hyp_mode(void *dummy) |
802 | { | 802 | { |
803 | unsigned long long boot_pgd_ptr; | 803 | phys_addr_t boot_pgd_ptr; |
804 | unsigned long long pgd_ptr; | 804 | phys_addr_t pgd_ptr; |
805 | unsigned long hyp_stack_ptr; | 805 | unsigned long hyp_stack_ptr; |
806 | unsigned long stack_page; | 806 | unsigned long stack_page; |
807 | unsigned long vector_ptr; | 807 | unsigned long vector_ptr; |
@@ -809,8 +809,8 @@ static void cpu_init_hyp_mode(void *dummy) | |||
809 | /* Switch from the HYP stub to our own HYP init vector */ | 809 | /* Switch from the HYP stub to our own HYP init vector */ |
810 | __hyp_set_vectors(kvm_get_idmap_vector()); | 810 | __hyp_set_vectors(kvm_get_idmap_vector()); |
811 | 811 | ||
812 | boot_pgd_ptr = (unsigned long long)kvm_mmu_get_boot_httbr(); | 812 | boot_pgd_ptr = kvm_mmu_get_boot_httbr(); |
813 | pgd_ptr = (unsigned long long)kvm_mmu_get_httbr(); | 813 | pgd_ptr = kvm_mmu_get_httbr(); |
814 | stack_page = __get_cpu_var(kvm_arm_hyp_stack_page); | 814 | stack_page = __get_cpu_var(kvm_arm_hyp_stack_page); |
815 | hyp_stack_ptr = stack_page + PAGE_SIZE; | 815 | hyp_stack_ptr = stack_page + PAGE_SIZE; |
816 | vector_ptr = (unsigned long)__kvm_hyp_vector; | 816 | vector_ptr = (unsigned long)__kvm_hyp_vector; |
diff --git a/arch/arm/kvm/coproc.c b/arch/arm/kvm/coproc.c index 8eea97be1ed5..4a5199070430 100644 --- a/arch/arm/kvm/coproc.c +++ b/arch/arm/kvm/coproc.c | |||
@@ -180,6 +180,10 @@ static const struct coproc_reg cp15_regs[] = { | |||
180 | NULL, reset_unknown, c6_DFAR }, | 180 | NULL, reset_unknown, c6_DFAR }, |
181 | { CRn( 6), CRm( 0), Op1( 0), Op2( 2), is32, | 181 | { CRn( 6), CRm( 0), Op1( 0), Op2( 2), is32, |
182 | NULL, reset_unknown, c6_IFAR }, | 182 | NULL, reset_unknown, c6_IFAR }, |
183 | |||
184 | /* PAR swapped by interrupt.S */ | ||
185 | { CRn( 7), Op1( 0), is64, NULL, reset_unknown64, c7_PAR }, | ||
186 | |||
183 | /* | 187 | /* |
184 | * DC{C,I,CI}SW operations: | 188 | * DC{C,I,CI}SW operations: |
185 | */ | 189 | */ |
diff --git a/arch/arm/kvm/handle_exit.c b/arch/arm/kvm/handle_exit.c index 3d74a0be47db..df4c82d47ad7 100644 --- a/arch/arm/kvm/handle_exit.c +++ b/arch/arm/kvm/handle_exit.c | |||
@@ -52,9 +52,6 @@ static int handle_hvc(struct kvm_vcpu *vcpu, struct kvm_run *run) | |||
52 | 52 | ||
53 | static int handle_smc(struct kvm_vcpu *vcpu, struct kvm_run *run) | 53 | static int handle_smc(struct kvm_vcpu *vcpu, struct kvm_run *run) |
54 | { | 54 | { |
55 | if (kvm_psci_call(vcpu)) | ||
56 | return 1; | ||
57 | |||
58 | kvm_inject_undefined(vcpu); | 55 | kvm_inject_undefined(vcpu); |
59 | return 1; | 56 | return 1; |
60 | } | 57 | } |
diff --git a/arch/arm/kvm/interrupts.S b/arch/arm/kvm/interrupts.S index f7793df62f58..16cd4ba5d7fd 100644 --- a/arch/arm/kvm/interrupts.S +++ b/arch/arm/kvm/interrupts.S | |||
@@ -49,6 +49,7 @@ __kvm_hyp_code_start: | |||
49 | ENTRY(__kvm_tlb_flush_vmid_ipa) | 49 | ENTRY(__kvm_tlb_flush_vmid_ipa) |
50 | push {r2, r3} | 50 | push {r2, r3} |
51 | 51 | ||
52 | dsb ishst | ||
52 | add r0, r0, #KVM_VTTBR | 53 | add r0, r0, #KVM_VTTBR |
53 | ldrd r2, r3, [r0] | 54 | ldrd r2, r3, [r0] |
54 | mcrr p15, 6, r2, r3, c2 @ Write VTTBR | 55 | mcrr p15, 6, r2, r3, c2 @ Write VTTBR |
@@ -291,6 +292,7 @@ THUMB( orr r2, r2, #PSR_T_BIT ) | |||
291 | ldr r2, =BSYM(panic) | 292 | ldr r2, =BSYM(panic) |
292 | msr ELR_hyp, r2 | 293 | msr ELR_hyp, r2 |
293 | ldr r0, =\panic_str | 294 | ldr r0, =\panic_str |
295 | clrex @ Clear exclusive monitor | ||
294 | eret | 296 | eret |
295 | .endm | 297 | .endm |
296 | 298 | ||
@@ -414,6 +416,10 @@ guest_trap: | |||
414 | mrcne p15, 4, r2, c6, c0, 4 @ HPFAR | 416 | mrcne p15, 4, r2, c6, c0, 4 @ HPFAR |
415 | bne 3f | 417 | bne 3f |
416 | 418 | ||
419 | /* Preserve PAR */ | ||
420 | mrrc p15, 0, r0, r1, c7 @ PAR | ||
421 | push {r0, r1} | ||
422 | |||
417 | /* Resolve IPA using the xFAR */ | 423 | /* Resolve IPA using the xFAR */ |
418 | mcr p15, 0, r2, c7, c8, 0 @ ATS1CPR | 424 | mcr p15, 0, r2, c7, c8, 0 @ ATS1CPR |
419 | isb | 425 | isb |
@@ -424,13 +430,20 @@ guest_trap: | |||
424 | lsl r2, r2, #4 | 430 | lsl r2, r2, #4 |
425 | orr r2, r2, r1, lsl #24 | 431 | orr r2, r2, r1, lsl #24 |
426 | 432 | ||
433 | /* Restore PAR */ | ||
434 | pop {r0, r1} | ||
435 | mcrr p15, 0, r0, r1, c7 @ PAR | ||
436 | |||
427 | 3: load_vcpu @ Load VCPU pointer to r0 | 437 | 3: load_vcpu @ Load VCPU pointer to r0 |
428 | str r2, [r0, #VCPU_HPFAR] | 438 | str r2, [r0, #VCPU_HPFAR] |
429 | 439 | ||
430 | 1: mov r1, #ARM_EXCEPTION_HVC | 440 | 1: mov r1, #ARM_EXCEPTION_HVC |
431 | b __kvm_vcpu_return | 441 | b __kvm_vcpu_return |
432 | 442 | ||
433 | 4: pop {r0, r1, r2} @ Failed translation, return to guest | 443 | 4: pop {r0, r1} @ Failed translation, return to guest |
444 | mcrr p15, 0, r0, r1, c7 @ PAR | ||
445 | clrex | ||
446 | pop {r0, r1, r2} | ||
434 | eret | 447 | eret |
435 | 448 | ||
436 | /* | 449 | /* |
@@ -456,6 +469,7 @@ switch_to_guest_vfp: | |||
456 | 469 | ||
457 | pop {r3-r7} | 470 | pop {r3-r7} |
458 | pop {r0-r2} | 471 | pop {r0-r2} |
472 | clrex | ||
459 | eret | 473 | eret |
460 | #endif | 474 | #endif |
461 | 475 | ||
diff --git a/arch/arm/kvm/interrupts_head.S b/arch/arm/kvm/interrupts_head.S index 3c8f2f0b4c5e..6f18695a09cb 100644 --- a/arch/arm/kvm/interrupts_head.S +++ b/arch/arm/kvm/interrupts_head.S | |||
@@ -302,11 +302,14 @@ vcpu .req r0 @ vcpu pointer always in r0 | |||
302 | .endif | 302 | .endif |
303 | 303 | ||
304 | mrc p15, 0, r2, c14, c1, 0 @ CNTKCTL | 304 | mrc p15, 0, r2, c14, c1, 0 @ CNTKCTL |
305 | mrrc p15, 0, r4, r5, c7 @ PAR | ||
305 | 306 | ||
306 | .if \store_to_vcpu == 0 | 307 | .if \store_to_vcpu == 0 |
307 | push {r2} | 308 | push {r2,r4-r5} |
308 | .else | 309 | .else |
309 | str r2, [vcpu, #CP15_OFFSET(c14_CNTKCTL)] | 310 | str r2, [vcpu, #CP15_OFFSET(c14_CNTKCTL)] |
311 | add r12, vcpu, #CP15_OFFSET(c7_PAR) | ||
312 | strd r4, r5, [r12] | ||
310 | .endif | 313 | .endif |
311 | .endm | 314 | .endm |
312 | 315 | ||
@@ -319,12 +322,15 @@ vcpu .req r0 @ vcpu pointer always in r0 | |||
319 | */ | 322 | */ |
320 | .macro write_cp15_state read_from_vcpu | 323 | .macro write_cp15_state read_from_vcpu |
321 | .if \read_from_vcpu == 0 | 324 | .if \read_from_vcpu == 0 |
322 | pop {r2} | 325 | pop {r2,r4-r5} |
323 | .else | 326 | .else |
324 | ldr r2, [vcpu, #CP15_OFFSET(c14_CNTKCTL)] | 327 | ldr r2, [vcpu, #CP15_OFFSET(c14_CNTKCTL)] |
328 | add r12, vcpu, #CP15_OFFSET(c7_PAR) | ||
329 | ldrd r4, r5, [r12] | ||
325 | .endif | 330 | .endif |
326 | 331 | ||
327 | mcr p15, 0, r2, c14, c1, 0 @ CNTKCTL | 332 | mcr p15, 0, r2, c14, c1, 0 @ CNTKCTL |
333 | mcrr p15, 0, r4, r5, c7 @ PAR | ||
328 | 334 | ||
329 | .if \read_from_vcpu == 0 | 335 | .if \read_from_vcpu == 0 |
330 | pop {r2-r12} | 336 | pop {r2-r12} |
@@ -497,6 +503,10 @@ vcpu .req r0 @ vcpu pointer always in r0 | |||
497 | add r5, vcpu, r4 | 503 | add r5, vcpu, r4 |
498 | strd r2, r3, [r5] | 504 | strd r2, r3, [r5] |
499 | 505 | ||
506 | @ Ensure host CNTVCT == CNTPCT | ||
507 | mov r2, #0 | ||
508 | mcrr p15, 4, r2, r2, c14 @ CNTVOFF | ||
509 | |||
500 | 1: | 510 | 1: |
501 | #endif | 511 | #endif |
502 | @ Allow physical timer/counter access for the host | 512 | @ Allow physical timer/counter access for the host |
diff --git a/arch/arm/kvm/mmio.c b/arch/arm/kvm/mmio.c index 72a12f2171b2..b8e06b7a2833 100644 --- a/arch/arm/kvm/mmio.c +++ b/arch/arm/kvm/mmio.c | |||
@@ -86,12 +86,6 @@ static int decode_hsr(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa, | |||
86 | sign_extend = kvm_vcpu_dabt_issext(vcpu); | 86 | sign_extend = kvm_vcpu_dabt_issext(vcpu); |
87 | rt = kvm_vcpu_dabt_get_rd(vcpu); | 87 | rt = kvm_vcpu_dabt_get_rd(vcpu); |
88 | 88 | ||
89 | if (kvm_vcpu_reg_is_pc(vcpu, rt)) { | ||
90 | /* IO memory trying to read/write pc */ | ||
91 | kvm_inject_pabt(vcpu, kvm_vcpu_get_hfar(vcpu)); | ||
92 | return 1; | ||
93 | } | ||
94 | |||
95 | mmio->is_write = is_write; | 89 | mmio->is_write = is_write; |
96 | mmio->phys_addr = fault_ipa; | 90 | mmio->phys_addr = fault_ipa; |
97 | mmio->len = len; | 91 | mmio->len = len; |
diff --git a/arch/arm/kvm/mmu.c b/arch/arm/kvm/mmu.c index 84ba67b982c0..ca6bea4859b4 100644 --- a/arch/arm/kvm/mmu.c +++ b/arch/arm/kvm/mmu.c | |||
@@ -382,9 +382,6 @@ int kvm_alloc_stage2_pgd(struct kvm *kvm) | |||
382 | if (!pgd) | 382 | if (!pgd) |
383 | return -ENOMEM; | 383 | return -ENOMEM; |
384 | 384 | ||
385 | /* stage-2 pgd must be aligned to its size */ | ||
386 | VM_BUG_ON((unsigned long)pgd & (S2_PGD_SIZE - 1)); | ||
387 | |||
388 | memset(pgd, 0, PTRS_PER_S2_PGD * sizeof(pgd_t)); | 385 | memset(pgd, 0, PTRS_PER_S2_PGD * sizeof(pgd_t)); |
389 | kvm_clean_pgd(pgd); | 386 | kvm_clean_pgd(pgd); |
390 | kvm->arch.pgd = pgd; | 387 | kvm->arch.pgd = pgd; |
diff --git a/arch/arm/kvm/psci.c b/arch/arm/kvm/psci.c index 7ee5bb7a3667..86a693a02ba3 100644 --- a/arch/arm/kvm/psci.c +++ b/arch/arm/kvm/psci.c | |||
@@ -75,7 +75,7 @@ static unsigned long kvm_psci_vcpu_on(struct kvm_vcpu *source_vcpu) | |||
75 | * kvm_psci_call - handle PSCI call if r0 value is in range | 75 | * kvm_psci_call - handle PSCI call if r0 value is in range |
76 | * @vcpu: Pointer to the VCPU struct | 76 | * @vcpu: Pointer to the VCPU struct |
77 | * | 77 | * |
78 | * Handle PSCI calls from guests through traps from HVC or SMC instructions. | 78 | * Handle PSCI calls from guests through traps from HVC instructions. |
79 | * The calling convention is similar to SMC calls to the secure world where | 79 | * The calling convention is similar to SMC calls to the secure world where |
80 | * the function number is placed in r0 and this function returns true if the | 80 | * the function number is placed in r0 and this function returns true if the |
81 | * function number specified in r0 is withing the PSCI range, and false | 81 | * function number specified in r0 is withing the PSCI range, and false |
diff --git a/arch/arm/kvm/reset.c b/arch/arm/kvm/reset.c index b80256b554cd..b7840e7aa452 100644 --- a/arch/arm/kvm/reset.c +++ b/arch/arm/kvm/reset.c | |||
@@ -27,6 +27,8 @@ | |||
27 | #include <asm/kvm_arm.h> | 27 | #include <asm/kvm_arm.h> |
28 | #include <asm/kvm_coproc.h> | 28 | #include <asm/kvm_coproc.h> |
29 | 29 | ||
30 | #include <kvm/arm_arch_timer.h> | ||
31 | |||
30 | /****************************************************************************** | 32 | /****************************************************************************** |
31 | * Cortex-A15 Reset Values | 33 | * Cortex-A15 Reset Values |
32 | */ | 34 | */ |
@@ -37,6 +39,11 @@ static struct kvm_regs a15_regs_reset = { | |||
37 | .usr_regs.ARM_cpsr = SVC_MODE | PSR_A_BIT | PSR_I_BIT | PSR_F_BIT, | 39 | .usr_regs.ARM_cpsr = SVC_MODE | PSR_A_BIT | PSR_I_BIT | PSR_F_BIT, |
38 | }; | 40 | }; |
39 | 41 | ||
42 | static const struct kvm_irq_level a15_vtimer_irq = { | ||
43 | .irq = 27, | ||
44 | .level = 1, | ||
45 | }; | ||
46 | |||
40 | 47 | ||
41 | /******************************************************************************* | 48 | /******************************************************************************* |
42 | * Exported reset function | 49 | * Exported reset function |
@@ -52,6 +59,7 @@ static struct kvm_regs a15_regs_reset = { | |||
52 | int kvm_reset_vcpu(struct kvm_vcpu *vcpu) | 59 | int kvm_reset_vcpu(struct kvm_vcpu *vcpu) |
53 | { | 60 | { |
54 | struct kvm_regs *cpu_reset; | 61 | struct kvm_regs *cpu_reset; |
62 | const struct kvm_irq_level *cpu_vtimer_irq; | ||
55 | 63 | ||
56 | switch (vcpu->arch.target) { | 64 | switch (vcpu->arch.target) { |
57 | case KVM_ARM_TARGET_CORTEX_A15: | 65 | case KVM_ARM_TARGET_CORTEX_A15: |
@@ -59,6 +67,7 @@ int kvm_reset_vcpu(struct kvm_vcpu *vcpu) | |||
59 | return -EINVAL; | 67 | return -EINVAL; |
60 | cpu_reset = &a15_regs_reset; | 68 | cpu_reset = &a15_regs_reset; |
61 | vcpu->arch.midr = read_cpuid_id(); | 69 | vcpu->arch.midr = read_cpuid_id(); |
70 | cpu_vtimer_irq = &a15_vtimer_irq; | ||
62 | break; | 71 | break; |
63 | default: | 72 | default: |
64 | return -ENODEV; | 73 | return -ENODEV; |
@@ -70,5 +79,8 @@ int kvm_reset_vcpu(struct kvm_vcpu *vcpu) | |||
70 | /* Reset CP15 registers */ | 79 | /* Reset CP15 registers */ |
71 | kvm_reset_coprocs(vcpu); | 80 | kvm_reset_coprocs(vcpu); |
72 | 81 | ||
82 | /* Reset arch_timer context */ | ||
83 | kvm_timer_vcpu_reset(vcpu, cpu_vtimer_irq); | ||
84 | |||
73 | return 0; | 85 | return 0; |
74 | } | 86 | } |
diff --git a/arch/arm/kvm/vgic.c b/arch/arm/kvm/vgic.c deleted file mode 100644 index 17c5ac7d10ed..000000000000 --- a/arch/arm/kvm/vgic.c +++ /dev/null | |||
@@ -1,1499 +0,0 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2012 ARM Ltd. | ||
3 | * Author: Marc Zyngier <marc.zyngier@arm.com> | ||
4 | * | ||
5 | * This program is free software; you can redistribute it and/or modify | ||
6 | * it under the terms of the GNU General Public License version 2 as | ||
7 | * published by the Free Software Foundation. | ||
8 | * | ||
9 | * This program is distributed in the hope that it will be useful, | ||
10 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
11 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
12 | * GNU General Public License for more details. | ||
13 | * | ||
14 | * You should have received a copy of the GNU General Public License | ||
15 | * along with this program; if not, write to the Free Software | ||
16 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA | ||
17 | */ | ||
18 | |||
19 | #include <linux/cpu.h> | ||
20 | #include <linux/kvm.h> | ||
21 | #include <linux/kvm_host.h> | ||
22 | #include <linux/interrupt.h> | ||
23 | #include <linux/io.h> | ||
24 | #include <linux/of.h> | ||
25 | #include <linux/of_address.h> | ||
26 | #include <linux/of_irq.h> | ||
27 | |||
28 | #include <linux/irqchip/arm-gic.h> | ||
29 | |||
30 | #include <asm/kvm_emulate.h> | ||
31 | #include <asm/kvm_arm.h> | ||
32 | #include <asm/kvm_mmu.h> | ||
33 | |||
34 | /* | ||
35 | * How the whole thing works (courtesy of Christoffer Dall): | ||
36 | * | ||
37 | * - At any time, the dist->irq_pending_on_cpu is the oracle that knows if | ||
38 | * something is pending | ||
39 | * - VGIC pending interrupts are stored on the vgic.irq_state vgic | ||
40 | * bitmap (this bitmap is updated by both user land ioctls and guest | ||
41 | * mmio ops, and other in-kernel peripherals such as the | ||
42 | * arch. timers) and indicate the 'wire' state. | ||
43 | * - Every time the bitmap changes, the irq_pending_on_cpu oracle is | ||
44 | * recalculated | ||
45 | * - To calculate the oracle, we need info for each cpu from | ||
46 | * compute_pending_for_cpu, which considers: | ||
47 | * - PPI: dist->irq_state & dist->irq_enable | ||
48 | * - SPI: dist->irq_state & dist->irq_enable & dist->irq_spi_target | ||
49 | * - irq_spi_target is a 'formatted' version of the GICD_ICFGR | ||
50 | * registers, stored on each vcpu. We only keep one bit of | ||
51 | * information per interrupt, making sure that only one vcpu can | ||
52 | * accept the interrupt. | ||
53 | * - The same is true when injecting an interrupt, except that we only | ||
54 | * consider a single interrupt at a time. The irq_spi_cpu array | ||
55 | * contains the target CPU for each SPI. | ||
56 | * | ||
57 | * The handling of level interrupts adds some extra complexity. We | ||
58 | * need to track when the interrupt has been EOIed, so we can sample | ||
59 | * the 'line' again. This is achieved as such: | ||
60 | * | ||
61 | * - When a level interrupt is moved onto a vcpu, the corresponding | ||
62 | * bit in irq_active is set. As long as this bit is set, the line | ||
63 | * will be ignored for further interrupts. The interrupt is injected | ||
64 | * into the vcpu with the GICH_LR_EOI bit set (generate a | ||
65 | * maintenance interrupt on EOI). | ||
66 | * - When the interrupt is EOIed, the maintenance interrupt fires, | ||
67 | * and clears the corresponding bit in irq_active. This allow the | ||
68 | * interrupt line to be sampled again. | ||
69 | */ | ||
70 | |||
71 | #define VGIC_ADDR_UNDEF (-1) | ||
72 | #define IS_VGIC_ADDR_UNDEF(_x) ((_x) == VGIC_ADDR_UNDEF) | ||
73 | |||
74 | /* Physical address of vgic virtual cpu interface */ | ||
75 | static phys_addr_t vgic_vcpu_base; | ||
76 | |||
77 | /* Virtual control interface base address */ | ||
78 | static void __iomem *vgic_vctrl_base; | ||
79 | |||
80 | static struct device_node *vgic_node; | ||
81 | |||
82 | #define ACCESS_READ_VALUE (1 << 0) | ||
83 | #define ACCESS_READ_RAZ (0 << 0) | ||
84 | #define ACCESS_READ_MASK(x) ((x) & (1 << 0)) | ||
85 | #define ACCESS_WRITE_IGNORED (0 << 1) | ||
86 | #define ACCESS_WRITE_SETBIT (1 << 1) | ||
87 | #define ACCESS_WRITE_CLEARBIT (2 << 1) | ||
88 | #define ACCESS_WRITE_VALUE (3 << 1) | ||
89 | #define ACCESS_WRITE_MASK(x) ((x) & (3 << 1)) | ||
90 | |||
91 | static void vgic_retire_disabled_irqs(struct kvm_vcpu *vcpu); | ||
92 | static void vgic_update_state(struct kvm *kvm); | ||
93 | static void vgic_kick_vcpus(struct kvm *kvm); | ||
94 | static void vgic_dispatch_sgi(struct kvm_vcpu *vcpu, u32 reg); | ||
95 | static u32 vgic_nr_lr; | ||
96 | |||
97 | static unsigned int vgic_maint_irq; | ||
98 | |||
99 | static u32 *vgic_bitmap_get_reg(struct vgic_bitmap *x, | ||
100 | int cpuid, u32 offset) | ||
101 | { | ||
102 | offset >>= 2; | ||
103 | if (!offset) | ||
104 | return x->percpu[cpuid].reg; | ||
105 | else | ||
106 | return x->shared.reg + offset - 1; | ||
107 | } | ||
108 | |||
109 | static int vgic_bitmap_get_irq_val(struct vgic_bitmap *x, | ||
110 | int cpuid, int irq) | ||
111 | { | ||
112 | if (irq < VGIC_NR_PRIVATE_IRQS) | ||
113 | return test_bit(irq, x->percpu[cpuid].reg_ul); | ||
114 | |||
115 | return test_bit(irq - VGIC_NR_PRIVATE_IRQS, x->shared.reg_ul); | ||
116 | } | ||
117 | |||
118 | static void vgic_bitmap_set_irq_val(struct vgic_bitmap *x, int cpuid, | ||
119 | int irq, int val) | ||
120 | { | ||
121 | unsigned long *reg; | ||
122 | |||
123 | if (irq < VGIC_NR_PRIVATE_IRQS) { | ||
124 | reg = x->percpu[cpuid].reg_ul; | ||
125 | } else { | ||
126 | reg = x->shared.reg_ul; | ||
127 | irq -= VGIC_NR_PRIVATE_IRQS; | ||
128 | } | ||
129 | |||
130 | if (val) | ||
131 | set_bit(irq, reg); | ||
132 | else | ||
133 | clear_bit(irq, reg); | ||
134 | } | ||
135 | |||
136 | static unsigned long *vgic_bitmap_get_cpu_map(struct vgic_bitmap *x, int cpuid) | ||
137 | { | ||
138 | if (unlikely(cpuid >= VGIC_MAX_CPUS)) | ||
139 | return NULL; | ||
140 | return x->percpu[cpuid].reg_ul; | ||
141 | } | ||
142 | |||
143 | static unsigned long *vgic_bitmap_get_shared_map(struct vgic_bitmap *x) | ||
144 | { | ||
145 | return x->shared.reg_ul; | ||
146 | } | ||
147 | |||
148 | static u32 *vgic_bytemap_get_reg(struct vgic_bytemap *x, int cpuid, u32 offset) | ||
149 | { | ||
150 | offset >>= 2; | ||
151 | BUG_ON(offset > (VGIC_NR_IRQS / 4)); | ||
152 | if (offset < 4) | ||
153 | return x->percpu[cpuid] + offset; | ||
154 | else | ||
155 | return x->shared + offset - 8; | ||
156 | } | ||
157 | |||
158 | #define VGIC_CFG_LEVEL 0 | ||
159 | #define VGIC_CFG_EDGE 1 | ||
160 | |||
161 | static bool vgic_irq_is_edge(struct kvm_vcpu *vcpu, int irq) | ||
162 | { | ||
163 | struct vgic_dist *dist = &vcpu->kvm->arch.vgic; | ||
164 | int irq_val; | ||
165 | |||
166 | irq_val = vgic_bitmap_get_irq_val(&dist->irq_cfg, vcpu->vcpu_id, irq); | ||
167 | return irq_val == VGIC_CFG_EDGE; | ||
168 | } | ||
169 | |||
170 | static int vgic_irq_is_enabled(struct kvm_vcpu *vcpu, int irq) | ||
171 | { | ||
172 | struct vgic_dist *dist = &vcpu->kvm->arch.vgic; | ||
173 | |||
174 | return vgic_bitmap_get_irq_val(&dist->irq_enabled, vcpu->vcpu_id, irq); | ||
175 | } | ||
176 | |||
177 | static int vgic_irq_is_active(struct kvm_vcpu *vcpu, int irq) | ||
178 | { | ||
179 | struct vgic_dist *dist = &vcpu->kvm->arch.vgic; | ||
180 | |||
181 | return vgic_bitmap_get_irq_val(&dist->irq_active, vcpu->vcpu_id, irq); | ||
182 | } | ||
183 | |||
184 | static void vgic_irq_set_active(struct kvm_vcpu *vcpu, int irq) | ||
185 | { | ||
186 | struct vgic_dist *dist = &vcpu->kvm->arch.vgic; | ||
187 | |||
188 | vgic_bitmap_set_irq_val(&dist->irq_active, vcpu->vcpu_id, irq, 1); | ||
189 | } | ||
190 | |||
191 | static void vgic_irq_clear_active(struct kvm_vcpu *vcpu, int irq) | ||
192 | { | ||
193 | struct vgic_dist *dist = &vcpu->kvm->arch.vgic; | ||
194 | |||
195 | vgic_bitmap_set_irq_val(&dist->irq_active, vcpu->vcpu_id, irq, 0); | ||
196 | } | ||
197 | |||
198 | static int vgic_dist_irq_is_pending(struct kvm_vcpu *vcpu, int irq) | ||
199 | { | ||
200 | struct vgic_dist *dist = &vcpu->kvm->arch.vgic; | ||
201 | |||
202 | return vgic_bitmap_get_irq_val(&dist->irq_state, vcpu->vcpu_id, irq); | ||
203 | } | ||
204 | |||
205 | static void vgic_dist_irq_set(struct kvm_vcpu *vcpu, int irq) | ||
206 | { | ||
207 | struct vgic_dist *dist = &vcpu->kvm->arch.vgic; | ||
208 | |||
209 | vgic_bitmap_set_irq_val(&dist->irq_state, vcpu->vcpu_id, irq, 1); | ||
210 | } | ||
211 | |||
212 | static void vgic_dist_irq_clear(struct kvm_vcpu *vcpu, int irq) | ||
213 | { | ||
214 | struct vgic_dist *dist = &vcpu->kvm->arch.vgic; | ||
215 | |||
216 | vgic_bitmap_set_irq_val(&dist->irq_state, vcpu->vcpu_id, irq, 0); | ||
217 | } | ||
218 | |||
219 | static void vgic_cpu_irq_set(struct kvm_vcpu *vcpu, int irq) | ||
220 | { | ||
221 | if (irq < VGIC_NR_PRIVATE_IRQS) | ||
222 | set_bit(irq, vcpu->arch.vgic_cpu.pending_percpu); | ||
223 | else | ||
224 | set_bit(irq - VGIC_NR_PRIVATE_IRQS, | ||
225 | vcpu->arch.vgic_cpu.pending_shared); | ||
226 | } | ||
227 | |||
228 | static void vgic_cpu_irq_clear(struct kvm_vcpu *vcpu, int irq) | ||
229 | { | ||
230 | if (irq < VGIC_NR_PRIVATE_IRQS) | ||
231 | clear_bit(irq, vcpu->arch.vgic_cpu.pending_percpu); | ||
232 | else | ||
233 | clear_bit(irq - VGIC_NR_PRIVATE_IRQS, | ||
234 | vcpu->arch.vgic_cpu.pending_shared); | ||
235 | } | ||
236 | |||
237 | static u32 mmio_data_read(struct kvm_exit_mmio *mmio, u32 mask) | ||
238 | { | ||
239 | return *((u32 *)mmio->data) & mask; | ||
240 | } | ||
241 | |||
242 | static void mmio_data_write(struct kvm_exit_mmio *mmio, u32 mask, u32 value) | ||
243 | { | ||
244 | *((u32 *)mmio->data) = value & mask; | ||
245 | } | ||
246 | |||
247 | /** | ||
248 | * vgic_reg_access - access vgic register | ||
249 | * @mmio: pointer to the data describing the mmio access | ||
250 | * @reg: pointer to the virtual backing of vgic distributor data | ||
251 | * @offset: least significant 2 bits used for word offset | ||
252 | * @mode: ACCESS_ mode (see defines above) | ||
253 | * | ||
254 | * Helper to make vgic register access easier using one of the access | ||
255 | * modes defined for vgic register access | ||
256 | * (read,raz,write-ignored,setbit,clearbit,write) | ||
257 | */ | ||
258 | static void vgic_reg_access(struct kvm_exit_mmio *mmio, u32 *reg, | ||
259 | phys_addr_t offset, int mode) | ||
260 | { | ||
261 | int word_offset = (offset & 3) * 8; | ||
262 | u32 mask = (1UL << (mmio->len * 8)) - 1; | ||
263 | u32 regval; | ||
264 | |||
265 | /* | ||
266 | * Any alignment fault should have been delivered to the guest | ||
267 | * directly (ARM ARM B3.12.7 "Prioritization of aborts"). | ||
268 | */ | ||
269 | |||
270 | if (reg) { | ||
271 | regval = *reg; | ||
272 | } else { | ||
273 | BUG_ON(mode != (ACCESS_READ_RAZ | ACCESS_WRITE_IGNORED)); | ||
274 | regval = 0; | ||
275 | } | ||
276 | |||
277 | if (mmio->is_write) { | ||
278 | u32 data = mmio_data_read(mmio, mask) << word_offset; | ||
279 | switch (ACCESS_WRITE_MASK(mode)) { | ||
280 | case ACCESS_WRITE_IGNORED: | ||
281 | return; | ||
282 | |||
283 | case ACCESS_WRITE_SETBIT: | ||
284 | regval |= data; | ||
285 | break; | ||
286 | |||
287 | case ACCESS_WRITE_CLEARBIT: | ||
288 | regval &= ~data; | ||
289 | break; | ||
290 | |||
291 | case ACCESS_WRITE_VALUE: | ||
292 | regval = (regval & ~(mask << word_offset)) | data; | ||
293 | break; | ||
294 | } | ||
295 | *reg = regval; | ||
296 | } else { | ||
297 | switch (ACCESS_READ_MASK(mode)) { | ||
298 | case ACCESS_READ_RAZ: | ||
299 | regval = 0; | ||
300 | /* fall through */ | ||
301 | |||
302 | case ACCESS_READ_VALUE: | ||
303 | mmio_data_write(mmio, mask, regval >> word_offset); | ||
304 | } | ||
305 | } | ||
306 | } | ||
307 | |||
308 | static bool handle_mmio_misc(struct kvm_vcpu *vcpu, | ||
309 | struct kvm_exit_mmio *mmio, phys_addr_t offset) | ||
310 | { | ||
311 | u32 reg; | ||
312 | u32 word_offset = offset & 3; | ||
313 | |||
314 | switch (offset & ~3) { | ||
315 | case 0: /* CTLR */ | ||
316 | reg = vcpu->kvm->arch.vgic.enabled; | ||
317 | vgic_reg_access(mmio, ®, word_offset, | ||
318 | ACCESS_READ_VALUE | ACCESS_WRITE_VALUE); | ||
319 | if (mmio->is_write) { | ||
320 | vcpu->kvm->arch.vgic.enabled = reg & 1; | ||
321 | vgic_update_state(vcpu->kvm); | ||
322 | return true; | ||
323 | } | ||
324 | break; | ||
325 | |||
326 | case 4: /* TYPER */ | ||
327 | reg = (atomic_read(&vcpu->kvm->online_vcpus) - 1) << 5; | ||
328 | reg |= (VGIC_NR_IRQS >> 5) - 1; | ||
329 | vgic_reg_access(mmio, ®, word_offset, | ||
330 | ACCESS_READ_VALUE | ACCESS_WRITE_IGNORED); | ||
331 | break; | ||
332 | |||
333 | case 8: /* IIDR */ | ||
334 | reg = 0x4B00043B; | ||
335 | vgic_reg_access(mmio, ®, word_offset, | ||
336 | ACCESS_READ_VALUE | ACCESS_WRITE_IGNORED); | ||
337 | break; | ||
338 | } | ||
339 | |||
340 | return false; | ||
341 | } | ||
342 | |||
343 | static bool handle_mmio_raz_wi(struct kvm_vcpu *vcpu, | ||
344 | struct kvm_exit_mmio *mmio, phys_addr_t offset) | ||
345 | { | ||
346 | vgic_reg_access(mmio, NULL, offset, | ||
347 | ACCESS_READ_RAZ | ACCESS_WRITE_IGNORED); | ||
348 | return false; | ||
349 | } | ||
350 | |||
351 | static bool handle_mmio_set_enable_reg(struct kvm_vcpu *vcpu, | ||
352 | struct kvm_exit_mmio *mmio, | ||
353 | phys_addr_t offset) | ||
354 | { | ||
355 | u32 *reg = vgic_bitmap_get_reg(&vcpu->kvm->arch.vgic.irq_enabled, | ||
356 | vcpu->vcpu_id, offset); | ||
357 | vgic_reg_access(mmio, reg, offset, | ||
358 | ACCESS_READ_VALUE | ACCESS_WRITE_SETBIT); | ||
359 | if (mmio->is_write) { | ||
360 | vgic_update_state(vcpu->kvm); | ||
361 | return true; | ||
362 | } | ||
363 | |||
364 | return false; | ||
365 | } | ||
366 | |||
367 | static bool handle_mmio_clear_enable_reg(struct kvm_vcpu *vcpu, | ||
368 | struct kvm_exit_mmio *mmio, | ||
369 | phys_addr_t offset) | ||
370 | { | ||
371 | u32 *reg = vgic_bitmap_get_reg(&vcpu->kvm->arch.vgic.irq_enabled, | ||
372 | vcpu->vcpu_id, offset); | ||
373 | vgic_reg_access(mmio, reg, offset, | ||
374 | ACCESS_READ_VALUE | ACCESS_WRITE_CLEARBIT); | ||
375 | if (mmio->is_write) { | ||
376 | if (offset < 4) /* Force SGI enabled */ | ||
377 | *reg |= 0xffff; | ||
378 | vgic_retire_disabled_irqs(vcpu); | ||
379 | vgic_update_state(vcpu->kvm); | ||
380 | return true; | ||
381 | } | ||
382 | |||
383 | return false; | ||
384 | } | ||
385 | |||
386 | static bool handle_mmio_set_pending_reg(struct kvm_vcpu *vcpu, | ||
387 | struct kvm_exit_mmio *mmio, | ||
388 | phys_addr_t offset) | ||
389 | { | ||
390 | u32 *reg = vgic_bitmap_get_reg(&vcpu->kvm->arch.vgic.irq_state, | ||
391 | vcpu->vcpu_id, offset); | ||
392 | vgic_reg_access(mmio, reg, offset, | ||
393 | ACCESS_READ_VALUE | ACCESS_WRITE_SETBIT); | ||
394 | if (mmio->is_write) { | ||
395 | vgic_update_state(vcpu->kvm); | ||
396 | return true; | ||
397 | } | ||
398 | |||
399 | return false; | ||
400 | } | ||
401 | |||
402 | static bool handle_mmio_clear_pending_reg(struct kvm_vcpu *vcpu, | ||
403 | struct kvm_exit_mmio *mmio, | ||
404 | phys_addr_t offset) | ||
405 | { | ||
406 | u32 *reg = vgic_bitmap_get_reg(&vcpu->kvm->arch.vgic.irq_state, | ||
407 | vcpu->vcpu_id, offset); | ||
408 | vgic_reg_access(mmio, reg, offset, | ||
409 | ACCESS_READ_VALUE | ACCESS_WRITE_CLEARBIT); | ||
410 | if (mmio->is_write) { | ||
411 | vgic_update_state(vcpu->kvm); | ||
412 | return true; | ||
413 | } | ||
414 | |||
415 | return false; | ||
416 | } | ||
417 | |||
418 | static bool handle_mmio_priority_reg(struct kvm_vcpu *vcpu, | ||
419 | struct kvm_exit_mmio *mmio, | ||
420 | phys_addr_t offset) | ||
421 | { | ||
422 | u32 *reg = vgic_bytemap_get_reg(&vcpu->kvm->arch.vgic.irq_priority, | ||
423 | vcpu->vcpu_id, offset); | ||
424 | vgic_reg_access(mmio, reg, offset, | ||
425 | ACCESS_READ_VALUE | ACCESS_WRITE_VALUE); | ||
426 | return false; | ||
427 | } | ||
428 | |||
429 | #define GICD_ITARGETSR_SIZE 32 | ||
430 | #define GICD_CPUTARGETS_BITS 8 | ||
431 | #define GICD_IRQS_PER_ITARGETSR (GICD_ITARGETSR_SIZE / GICD_CPUTARGETS_BITS) | ||
432 | static u32 vgic_get_target_reg(struct kvm *kvm, int irq) | ||
433 | { | ||
434 | struct vgic_dist *dist = &kvm->arch.vgic; | ||
435 | struct kvm_vcpu *vcpu; | ||
436 | int i, c; | ||
437 | unsigned long *bmap; | ||
438 | u32 val = 0; | ||
439 | |||
440 | irq -= VGIC_NR_PRIVATE_IRQS; | ||
441 | |||
442 | kvm_for_each_vcpu(c, vcpu, kvm) { | ||
443 | bmap = vgic_bitmap_get_shared_map(&dist->irq_spi_target[c]); | ||
444 | for (i = 0; i < GICD_IRQS_PER_ITARGETSR; i++) | ||
445 | if (test_bit(irq + i, bmap)) | ||
446 | val |= 1 << (c + i * 8); | ||
447 | } | ||
448 | |||
449 | return val; | ||
450 | } | ||
451 | |||
452 | static void vgic_set_target_reg(struct kvm *kvm, u32 val, int irq) | ||
453 | { | ||
454 | struct vgic_dist *dist = &kvm->arch.vgic; | ||
455 | struct kvm_vcpu *vcpu; | ||
456 | int i, c; | ||
457 | unsigned long *bmap; | ||
458 | u32 target; | ||
459 | |||
460 | irq -= VGIC_NR_PRIVATE_IRQS; | ||
461 | |||
462 | /* | ||
463 | * Pick the LSB in each byte. This ensures we target exactly | ||
464 | * one vcpu per IRQ. If the byte is null, assume we target | ||
465 | * CPU0. | ||
466 | */ | ||
467 | for (i = 0; i < GICD_IRQS_PER_ITARGETSR; i++) { | ||
468 | int shift = i * GICD_CPUTARGETS_BITS; | ||
469 | target = ffs((val >> shift) & 0xffU); | ||
470 | target = target ? (target - 1) : 0; | ||
471 | dist->irq_spi_cpu[irq + i] = target; | ||
472 | kvm_for_each_vcpu(c, vcpu, kvm) { | ||
473 | bmap = vgic_bitmap_get_shared_map(&dist->irq_spi_target[c]); | ||
474 | if (c == target) | ||
475 | set_bit(irq + i, bmap); | ||
476 | else | ||
477 | clear_bit(irq + i, bmap); | ||
478 | } | ||
479 | } | ||
480 | } | ||
481 | |||
482 | static bool handle_mmio_target_reg(struct kvm_vcpu *vcpu, | ||
483 | struct kvm_exit_mmio *mmio, | ||
484 | phys_addr_t offset) | ||
485 | { | ||
486 | u32 reg; | ||
487 | |||
488 | /* We treat the banked interrupts targets as read-only */ | ||
489 | if (offset < 32) { | ||
490 | u32 roreg = 1 << vcpu->vcpu_id; | ||
491 | roreg |= roreg << 8; | ||
492 | roreg |= roreg << 16; | ||
493 | |||
494 | vgic_reg_access(mmio, &roreg, offset, | ||
495 | ACCESS_READ_VALUE | ACCESS_WRITE_IGNORED); | ||
496 | return false; | ||
497 | } | ||
498 | |||
499 | reg = vgic_get_target_reg(vcpu->kvm, offset & ~3U); | ||
500 | vgic_reg_access(mmio, ®, offset, | ||
501 | ACCESS_READ_VALUE | ACCESS_WRITE_VALUE); | ||
502 | if (mmio->is_write) { | ||
503 | vgic_set_target_reg(vcpu->kvm, reg, offset & ~3U); | ||
504 | vgic_update_state(vcpu->kvm); | ||
505 | return true; | ||
506 | } | ||
507 | |||
508 | return false; | ||
509 | } | ||
510 | |||
511 | static u32 vgic_cfg_expand(u16 val) | ||
512 | { | ||
513 | u32 res = 0; | ||
514 | int i; | ||
515 | |||
516 | /* | ||
517 | * Turn a 16bit value like abcd...mnop into a 32bit word | ||
518 | * a0b0c0d0...m0n0o0p0, which is what the HW cfg register is. | ||
519 | */ | ||
520 | for (i = 0; i < 16; i++) | ||
521 | res |= ((val >> i) & VGIC_CFG_EDGE) << (2 * i + 1); | ||
522 | |||
523 | return res; | ||
524 | } | ||
525 | |||
526 | static u16 vgic_cfg_compress(u32 val) | ||
527 | { | ||
528 | u16 res = 0; | ||
529 | int i; | ||
530 | |||
531 | /* | ||
532 | * Turn a 32bit word a0b0c0d0...m0n0o0p0 into 16bit value like | ||
533 | * abcd...mnop which is what we really care about. | ||
534 | */ | ||
535 | for (i = 0; i < 16; i++) | ||
536 | res |= ((val >> (i * 2 + 1)) & VGIC_CFG_EDGE) << i; | ||
537 | |||
538 | return res; | ||
539 | } | ||
540 | |||
541 | /* | ||
542 | * The distributor uses 2 bits per IRQ for the CFG register, but the | ||
543 | * LSB is always 0. As such, we only keep the upper bit, and use the | ||
544 | * two above functions to compress/expand the bits | ||
545 | */ | ||
546 | static bool handle_mmio_cfg_reg(struct kvm_vcpu *vcpu, | ||
547 | struct kvm_exit_mmio *mmio, phys_addr_t offset) | ||
548 | { | ||
549 | u32 val; | ||
550 | u32 *reg = vgic_bitmap_get_reg(&vcpu->kvm->arch.vgic.irq_cfg, | ||
551 | vcpu->vcpu_id, offset >> 1); | ||
552 | if (offset & 2) | ||
553 | val = *reg >> 16; | ||
554 | else | ||
555 | val = *reg & 0xffff; | ||
556 | |||
557 | val = vgic_cfg_expand(val); | ||
558 | vgic_reg_access(mmio, &val, offset, | ||
559 | ACCESS_READ_VALUE | ACCESS_WRITE_VALUE); | ||
560 | if (mmio->is_write) { | ||
561 | if (offset < 4) { | ||
562 | *reg = ~0U; /* Force PPIs/SGIs to 1 */ | ||
563 | return false; | ||
564 | } | ||
565 | |||
566 | val = vgic_cfg_compress(val); | ||
567 | if (offset & 2) { | ||
568 | *reg &= 0xffff; | ||
569 | *reg |= val << 16; | ||
570 | } else { | ||
571 | *reg &= 0xffff << 16; | ||
572 | *reg |= val; | ||
573 | } | ||
574 | } | ||
575 | |||
576 | return false; | ||
577 | } | ||
578 | |||
579 | static bool handle_mmio_sgi_reg(struct kvm_vcpu *vcpu, | ||
580 | struct kvm_exit_mmio *mmio, phys_addr_t offset) | ||
581 | { | ||
582 | u32 reg; | ||
583 | vgic_reg_access(mmio, ®, offset, | ||
584 | ACCESS_READ_RAZ | ACCESS_WRITE_VALUE); | ||
585 | if (mmio->is_write) { | ||
586 | vgic_dispatch_sgi(vcpu, reg); | ||
587 | vgic_update_state(vcpu->kvm); | ||
588 | return true; | ||
589 | } | ||
590 | |||
591 | return false; | ||
592 | } | ||
593 | |||
594 | /* | ||
595 | * I would have liked to use the kvm_bus_io_*() API instead, but it | ||
596 | * cannot cope with banked registers (only the VM pointer is passed | ||
597 | * around, and we need the vcpu). One of these days, someone please | ||
598 | * fix it! | ||
599 | */ | ||
600 | struct mmio_range { | ||
601 | phys_addr_t base; | ||
602 | unsigned long len; | ||
603 | bool (*handle_mmio)(struct kvm_vcpu *vcpu, struct kvm_exit_mmio *mmio, | ||
604 | phys_addr_t offset); | ||
605 | }; | ||
606 | |||
607 | static const struct mmio_range vgic_ranges[] = { | ||
608 | { | ||
609 | .base = GIC_DIST_CTRL, | ||
610 | .len = 12, | ||
611 | .handle_mmio = handle_mmio_misc, | ||
612 | }, | ||
613 | { | ||
614 | .base = GIC_DIST_IGROUP, | ||
615 | .len = VGIC_NR_IRQS / 8, | ||
616 | .handle_mmio = handle_mmio_raz_wi, | ||
617 | }, | ||
618 | { | ||
619 | .base = GIC_DIST_ENABLE_SET, | ||
620 | .len = VGIC_NR_IRQS / 8, | ||
621 | .handle_mmio = handle_mmio_set_enable_reg, | ||
622 | }, | ||
623 | { | ||
624 | .base = GIC_DIST_ENABLE_CLEAR, | ||
625 | .len = VGIC_NR_IRQS / 8, | ||
626 | .handle_mmio = handle_mmio_clear_enable_reg, | ||
627 | }, | ||
628 | { | ||
629 | .base = GIC_DIST_PENDING_SET, | ||
630 | .len = VGIC_NR_IRQS / 8, | ||
631 | .handle_mmio = handle_mmio_set_pending_reg, | ||
632 | }, | ||
633 | { | ||
634 | .base = GIC_DIST_PENDING_CLEAR, | ||
635 | .len = VGIC_NR_IRQS / 8, | ||
636 | .handle_mmio = handle_mmio_clear_pending_reg, | ||
637 | }, | ||
638 | { | ||
639 | .base = GIC_DIST_ACTIVE_SET, | ||
640 | .len = VGIC_NR_IRQS / 8, | ||
641 | .handle_mmio = handle_mmio_raz_wi, | ||
642 | }, | ||
643 | { | ||
644 | .base = GIC_DIST_ACTIVE_CLEAR, | ||
645 | .len = VGIC_NR_IRQS / 8, | ||
646 | .handle_mmio = handle_mmio_raz_wi, | ||
647 | }, | ||
648 | { | ||
649 | .base = GIC_DIST_PRI, | ||
650 | .len = VGIC_NR_IRQS, | ||
651 | .handle_mmio = handle_mmio_priority_reg, | ||
652 | }, | ||
653 | { | ||
654 | .base = GIC_DIST_TARGET, | ||
655 | .len = VGIC_NR_IRQS, | ||
656 | .handle_mmio = handle_mmio_target_reg, | ||
657 | }, | ||
658 | { | ||
659 | .base = GIC_DIST_CONFIG, | ||
660 | .len = VGIC_NR_IRQS / 4, | ||
661 | .handle_mmio = handle_mmio_cfg_reg, | ||
662 | }, | ||
663 | { | ||
664 | .base = GIC_DIST_SOFTINT, | ||
665 | .len = 4, | ||
666 | .handle_mmio = handle_mmio_sgi_reg, | ||
667 | }, | ||
668 | {} | ||
669 | }; | ||
670 | |||
671 | static const | ||
672 | struct mmio_range *find_matching_range(const struct mmio_range *ranges, | ||
673 | struct kvm_exit_mmio *mmio, | ||
674 | phys_addr_t base) | ||
675 | { | ||
676 | const struct mmio_range *r = ranges; | ||
677 | phys_addr_t addr = mmio->phys_addr - base; | ||
678 | |||
679 | while (r->len) { | ||
680 | if (addr >= r->base && | ||
681 | (addr + mmio->len) <= (r->base + r->len)) | ||
682 | return r; | ||
683 | r++; | ||
684 | } | ||
685 | |||
686 | return NULL; | ||
687 | } | ||
688 | |||
689 | /** | ||
690 | * vgic_handle_mmio - handle an in-kernel MMIO access | ||
691 | * @vcpu: pointer to the vcpu performing the access | ||
692 | * @run: pointer to the kvm_run structure | ||
693 | * @mmio: pointer to the data describing the access | ||
694 | * | ||
695 | * returns true if the MMIO access has been performed in kernel space, | ||
696 | * and false if it needs to be emulated in user space. | ||
697 | */ | ||
698 | bool vgic_handle_mmio(struct kvm_vcpu *vcpu, struct kvm_run *run, | ||
699 | struct kvm_exit_mmio *mmio) | ||
700 | { | ||
701 | const struct mmio_range *range; | ||
702 | struct vgic_dist *dist = &vcpu->kvm->arch.vgic; | ||
703 | unsigned long base = dist->vgic_dist_base; | ||
704 | bool updated_state; | ||
705 | unsigned long offset; | ||
706 | |||
707 | if (!irqchip_in_kernel(vcpu->kvm) || | ||
708 | mmio->phys_addr < base || | ||
709 | (mmio->phys_addr + mmio->len) > (base + KVM_VGIC_V2_DIST_SIZE)) | ||
710 | return false; | ||
711 | |||
712 | /* We don't support ldrd / strd or ldm / stm to the emulated vgic */ | ||
713 | if (mmio->len > 4) { | ||
714 | kvm_inject_dabt(vcpu, mmio->phys_addr); | ||
715 | return true; | ||
716 | } | ||
717 | |||
718 | range = find_matching_range(vgic_ranges, mmio, base); | ||
719 | if (unlikely(!range || !range->handle_mmio)) { | ||
720 | pr_warn("Unhandled access %d %08llx %d\n", | ||
721 | mmio->is_write, mmio->phys_addr, mmio->len); | ||
722 | return false; | ||
723 | } | ||
724 | |||
725 | spin_lock(&vcpu->kvm->arch.vgic.lock); | ||
726 | offset = mmio->phys_addr - range->base - base; | ||
727 | updated_state = range->handle_mmio(vcpu, mmio, offset); | ||
728 | spin_unlock(&vcpu->kvm->arch.vgic.lock); | ||
729 | kvm_prepare_mmio(run, mmio); | ||
730 | kvm_handle_mmio_return(vcpu, run); | ||
731 | |||
732 | if (updated_state) | ||
733 | vgic_kick_vcpus(vcpu->kvm); | ||
734 | |||
735 | return true; | ||
736 | } | ||
737 | |||
738 | static void vgic_dispatch_sgi(struct kvm_vcpu *vcpu, u32 reg) | ||
739 | { | ||
740 | struct kvm *kvm = vcpu->kvm; | ||
741 | struct vgic_dist *dist = &kvm->arch.vgic; | ||
742 | int nrcpus = atomic_read(&kvm->online_vcpus); | ||
743 | u8 target_cpus; | ||
744 | int sgi, mode, c, vcpu_id; | ||
745 | |||
746 | vcpu_id = vcpu->vcpu_id; | ||
747 | |||
748 | sgi = reg & 0xf; | ||
749 | target_cpus = (reg >> 16) & 0xff; | ||
750 | mode = (reg >> 24) & 3; | ||
751 | |||
752 | switch (mode) { | ||
753 | case 0: | ||
754 | if (!target_cpus) | ||
755 | return; | ||
756 | |||
757 | case 1: | ||
758 | target_cpus = ((1 << nrcpus) - 1) & ~(1 << vcpu_id) & 0xff; | ||
759 | break; | ||
760 | |||
761 | case 2: | ||
762 | target_cpus = 1 << vcpu_id; | ||
763 | break; | ||
764 | } | ||
765 | |||
766 | kvm_for_each_vcpu(c, vcpu, kvm) { | ||
767 | if (target_cpus & 1) { | ||
768 | /* Flag the SGI as pending */ | ||
769 | vgic_dist_irq_set(vcpu, sgi); | ||
770 | dist->irq_sgi_sources[c][sgi] |= 1 << vcpu_id; | ||
771 | kvm_debug("SGI%d from CPU%d to CPU%d\n", sgi, vcpu_id, c); | ||
772 | } | ||
773 | |||
774 | target_cpus >>= 1; | ||
775 | } | ||
776 | } | ||
777 | |||
778 | static int compute_pending_for_cpu(struct kvm_vcpu *vcpu) | ||
779 | { | ||
780 | struct vgic_dist *dist = &vcpu->kvm->arch.vgic; | ||
781 | unsigned long *pending, *enabled, *pend_percpu, *pend_shared; | ||
782 | unsigned long pending_private, pending_shared; | ||
783 | int vcpu_id; | ||
784 | |||
785 | vcpu_id = vcpu->vcpu_id; | ||
786 | pend_percpu = vcpu->arch.vgic_cpu.pending_percpu; | ||
787 | pend_shared = vcpu->arch.vgic_cpu.pending_shared; | ||
788 | |||
789 | pending = vgic_bitmap_get_cpu_map(&dist->irq_state, vcpu_id); | ||
790 | enabled = vgic_bitmap_get_cpu_map(&dist->irq_enabled, vcpu_id); | ||
791 | bitmap_and(pend_percpu, pending, enabled, VGIC_NR_PRIVATE_IRQS); | ||
792 | |||
793 | pending = vgic_bitmap_get_shared_map(&dist->irq_state); | ||
794 | enabled = vgic_bitmap_get_shared_map(&dist->irq_enabled); | ||
795 | bitmap_and(pend_shared, pending, enabled, VGIC_NR_SHARED_IRQS); | ||
796 | bitmap_and(pend_shared, pend_shared, | ||
797 | vgic_bitmap_get_shared_map(&dist->irq_spi_target[vcpu_id]), | ||
798 | VGIC_NR_SHARED_IRQS); | ||
799 | |||
800 | pending_private = find_first_bit(pend_percpu, VGIC_NR_PRIVATE_IRQS); | ||
801 | pending_shared = find_first_bit(pend_shared, VGIC_NR_SHARED_IRQS); | ||
802 | return (pending_private < VGIC_NR_PRIVATE_IRQS || | ||
803 | pending_shared < VGIC_NR_SHARED_IRQS); | ||
804 | } | ||
805 | |||
806 | /* | ||
807 | * Update the interrupt state and determine which CPUs have pending | ||
808 | * interrupts. Must be called with distributor lock held. | ||
809 | */ | ||
810 | static void vgic_update_state(struct kvm *kvm) | ||
811 | { | ||
812 | struct vgic_dist *dist = &kvm->arch.vgic; | ||
813 | struct kvm_vcpu *vcpu; | ||
814 | int c; | ||
815 | |||
816 | if (!dist->enabled) { | ||
817 | set_bit(0, &dist->irq_pending_on_cpu); | ||
818 | return; | ||
819 | } | ||
820 | |||
821 | kvm_for_each_vcpu(c, vcpu, kvm) { | ||
822 | if (compute_pending_for_cpu(vcpu)) { | ||
823 | pr_debug("CPU%d has pending interrupts\n", c); | ||
824 | set_bit(c, &dist->irq_pending_on_cpu); | ||
825 | } | ||
826 | } | ||
827 | } | ||
828 | |||
829 | #define LR_CPUID(lr) \ | ||
830 | (((lr) & GICH_LR_PHYSID_CPUID) >> GICH_LR_PHYSID_CPUID_SHIFT) | ||
831 | #define MK_LR_PEND(src, irq) \ | ||
832 | (GICH_LR_PENDING_BIT | ((src) << GICH_LR_PHYSID_CPUID_SHIFT) | (irq)) | ||
833 | |||
834 | /* | ||
835 | * An interrupt may have been disabled after being made pending on the | ||
836 | * CPU interface (the classic case is a timer running while we're | ||
837 | * rebooting the guest - the interrupt would kick as soon as the CPU | ||
838 | * interface gets enabled, with deadly consequences). | ||
839 | * | ||
840 | * The solution is to examine already active LRs, and check the | ||
841 | * interrupt is still enabled. If not, just retire it. | ||
842 | */ | ||
843 | static void vgic_retire_disabled_irqs(struct kvm_vcpu *vcpu) | ||
844 | { | ||
845 | struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu; | ||
846 | int lr; | ||
847 | |||
848 | for_each_set_bit(lr, vgic_cpu->lr_used, vgic_cpu->nr_lr) { | ||
849 | int irq = vgic_cpu->vgic_lr[lr] & GICH_LR_VIRTUALID; | ||
850 | |||
851 | if (!vgic_irq_is_enabled(vcpu, irq)) { | ||
852 | vgic_cpu->vgic_irq_lr_map[irq] = LR_EMPTY; | ||
853 | clear_bit(lr, vgic_cpu->lr_used); | ||
854 | vgic_cpu->vgic_lr[lr] &= ~GICH_LR_STATE; | ||
855 | if (vgic_irq_is_active(vcpu, irq)) | ||
856 | vgic_irq_clear_active(vcpu, irq); | ||
857 | } | ||
858 | } | ||
859 | } | ||
860 | |||
861 | /* | ||
862 | * Queue an interrupt to a CPU virtual interface. Return true on success, | ||
863 | * or false if it wasn't possible to queue it. | ||
864 | */ | ||
865 | static bool vgic_queue_irq(struct kvm_vcpu *vcpu, u8 sgi_source_id, int irq) | ||
866 | { | ||
867 | struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu; | ||
868 | int lr; | ||
869 | |||
870 | /* Sanitize the input... */ | ||
871 | BUG_ON(sgi_source_id & ~7); | ||
872 | BUG_ON(sgi_source_id && irq >= VGIC_NR_SGIS); | ||
873 | BUG_ON(irq >= VGIC_NR_IRQS); | ||
874 | |||
875 | kvm_debug("Queue IRQ%d\n", irq); | ||
876 | |||
877 | lr = vgic_cpu->vgic_irq_lr_map[irq]; | ||
878 | |||
879 | /* Do we have an active interrupt for the same CPUID? */ | ||
880 | if (lr != LR_EMPTY && | ||
881 | (LR_CPUID(vgic_cpu->vgic_lr[lr]) == sgi_source_id)) { | ||
882 | kvm_debug("LR%d piggyback for IRQ%d %x\n", | ||
883 | lr, irq, vgic_cpu->vgic_lr[lr]); | ||
884 | BUG_ON(!test_bit(lr, vgic_cpu->lr_used)); | ||
885 | vgic_cpu->vgic_lr[lr] |= GICH_LR_PENDING_BIT; | ||
886 | return true; | ||
887 | } | ||
888 | |||
889 | /* Try to use another LR for this interrupt */ | ||
890 | lr = find_first_zero_bit((unsigned long *)vgic_cpu->lr_used, | ||
891 | vgic_cpu->nr_lr); | ||
892 | if (lr >= vgic_cpu->nr_lr) | ||
893 | return false; | ||
894 | |||
895 | kvm_debug("LR%d allocated for IRQ%d %x\n", lr, irq, sgi_source_id); | ||
896 | vgic_cpu->vgic_lr[lr] = MK_LR_PEND(sgi_source_id, irq); | ||
897 | vgic_cpu->vgic_irq_lr_map[irq] = lr; | ||
898 | set_bit(lr, vgic_cpu->lr_used); | ||
899 | |||
900 | if (!vgic_irq_is_edge(vcpu, irq)) | ||
901 | vgic_cpu->vgic_lr[lr] |= GICH_LR_EOI; | ||
902 | |||
903 | return true; | ||
904 | } | ||
905 | |||
906 | static bool vgic_queue_sgi(struct kvm_vcpu *vcpu, int irq) | ||
907 | { | ||
908 | struct vgic_dist *dist = &vcpu->kvm->arch.vgic; | ||
909 | unsigned long sources; | ||
910 | int vcpu_id = vcpu->vcpu_id; | ||
911 | int c; | ||
912 | |||
913 | sources = dist->irq_sgi_sources[vcpu_id][irq]; | ||
914 | |||
915 | for_each_set_bit(c, &sources, VGIC_MAX_CPUS) { | ||
916 | if (vgic_queue_irq(vcpu, c, irq)) | ||
917 | clear_bit(c, &sources); | ||
918 | } | ||
919 | |||
920 | dist->irq_sgi_sources[vcpu_id][irq] = sources; | ||
921 | |||
922 | /* | ||
923 | * If the sources bitmap has been cleared it means that we | ||
924 | * could queue all the SGIs onto link registers (see the | ||
925 | * clear_bit above), and therefore we are done with them in | ||
926 | * our emulated gic and can get rid of them. | ||
927 | */ | ||
928 | if (!sources) { | ||
929 | vgic_dist_irq_clear(vcpu, irq); | ||
930 | vgic_cpu_irq_clear(vcpu, irq); | ||
931 | return true; | ||
932 | } | ||
933 | |||
934 | return false; | ||
935 | } | ||
936 | |||
937 | static bool vgic_queue_hwirq(struct kvm_vcpu *vcpu, int irq) | ||
938 | { | ||
939 | if (vgic_irq_is_active(vcpu, irq)) | ||
940 | return true; /* level interrupt, already queued */ | ||
941 | |||
942 | if (vgic_queue_irq(vcpu, 0, irq)) { | ||
943 | if (vgic_irq_is_edge(vcpu, irq)) { | ||
944 | vgic_dist_irq_clear(vcpu, irq); | ||
945 | vgic_cpu_irq_clear(vcpu, irq); | ||
946 | } else { | ||
947 | vgic_irq_set_active(vcpu, irq); | ||
948 | } | ||
949 | |||
950 | return true; | ||
951 | } | ||
952 | |||
953 | return false; | ||
954 | } | ||
955 | |||
956 | /* | ||
957 | * Fill the list registers with pending interrupts before running the | ||
958 | * guest. | ||
959 | */ | ||
960 | static void __kvm_vgic_flush_hwstate(struct kvm_vcpu *vcpu) | ||
961 | { | ||
962 | struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu; | ||
963 | struct vgic_dist *dist = &vcpu->kvm->arch.vgic; | ||
964 | int i, vcpu_id; | ||
965 | int overflow = 0; | ||
966 | |||
967 | vcpu_id = vcpu->vcpu_id; | ||
968 | |||
969 | /* | ||
970 | * We may not have any pending interrupt, or the interrupts | ||
971 | * may have been serviced from another vcpu. In all cases, | ||
972 | * move along. | ||
973 | */ | ||
974 | if (!kvm_vgic_vcpu_pending_irq(vcpu)) { | ||
975 | pr_debug("CPU%d has no pending interrupt\n", vcpu_id); | ||
976 | goto epilog; | ||
977 | } | ||
978 | |||
979 | /* SGIs */ | ||
980 | for_each_set_bit(i, vgic_cpu->pending_percpu, VGIC_NR_SGIS) { | ||
981 | if (!vgic_queue_sgi(vcpu, i)) | ||
982 | overflow = 1; | ||
983 | } | ||
984 | |||
985 | /* PPIs */ | ||
986 | for_each_set_bit_from(i, vgic_cpu->pending_percpu, VGIC_NR_PRIVATE_IRQS) { | ||
987 | if (!vgic_queue_hwirq(vcpu, i)) | ||
988 | overflow = 1; | ||
989 | } | ||
990 | |||
991 | /* SPIs */ | ||
992 | for_each_set_bit(i, vgic_cpu->pending_shared, VGIC_NR_SHARED_IRQS) { | ||
993 | if (!vgic_queue_hwirq(vcpu, i + VGIC_NR_PRIVATE_IRQS)) | ||
994 | overflow = 1; | ||
995 | } | ||
996 | |||
997 | epilog: | ||
998 | if (overflow) { | ||
999 | vgic_cpu->vgic_hcr |= GICH_HCR_UIE; | ||
1000 | } else { | ||
1001 | vgic_cpu->vgic_hcr &= ~GICH_HCR_UIE; | ||
1002 | /* | ||
1003 | * We're about to run this VCPU, and we've consumed | ||
1004 | * everything the distributor had in store for | ||
1005 | * us. Claim we don't have anything pending. We'll | ||
1006 | * adjust that if needed while exiting. | ||
1007 | */ | ||
1008 | clear_bit(vcpu_id, &dist->irq_pending_on_cpu); | ||
1009 | } | ||
1010 | } | ||
1011 | |||
1012 | static bool vgic_process_maintenance(struct kvm_vcpu *vcpu) | ||
1013 | { | ||
1014 | struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu; | ||
1015 | bool level_pending = false; | ||
1016 | |||
1017 | kvm_debug("MISR = %08x\n", vgic_cpu->vgic_misr); | ||
1018 | |||
1019 | if (vgic_cpu->vgic_misr & GICH_MISR_EOI) { | ||
1020 | /* | ||
1021 | * Some level interrupts have been EOIed. Clear their | ||
1022 | * active bit. | ||
1023 | */ | ||
1024 | int lr, irq; | ||
1025 | |||
1026 | for_each_set_bit(lr, (unsigned long *)vgic_cpu->vgic_eisr, | ||
1027 | vgic_cpu->nr_lr) { | ||
1028 | irq = vgic_cpu->vgic_lr[lr] & GICH_LR_VIRTUALID; | ||
1029 | |||
1030 | vgic_irq_clear_active(vcpu, irq); | ||
1031 | vgic_cpu->vgic_lr[lr] &= ~GICH_LR_EOI; | ||
1032 | |||
1033 | /* Any additional pending interrupt? */ | ||
1034 | if (vgic_dist_irq_is_pending(vcpu, irq)) { | ||
1035 | vgic_cpu_irq_set(vcpu, irq); | ||
1036 | level_pending = true; | ||
1037 | } else { | ||
1038 | vgic_cpu_irq_clear(vcpu, irq); | ||
1039 | } | ||
1040 | |||
1041 | /* | ||
1042 | * Despite being EOIed, the LR may not have | ||
1043 | * been marked as empty. | ||
1044 | */ | ||
1045 | set_bit(lr, (unsigned long *)vgic_cpu->vgic_elrsr); | ||
1046 | vgic_cpu->vgic_lr[lr] &= ~GICH_LR_ACTIVE_BIT; | ||
1047 | } | ||
1048 | } | ||
1049 | |||
1050 | if (vgic_cpu->vgic_misr & GICH_MISR_U) | ||
1051 | vgic_cpu->vgic_hcr &= ~GICH_HCR_UIE; | ||
1052 | |||
1053 | return level_pending; | ||
1054 | } | ||
1055 | |||
1056 | /* | ||
1057 | * Sync back the VGIC state after a guest run. The distributor lock is | ||
1058 | * needed so we don't get preempted in the middle of the state processing. | ||
1059 | */ | ||
1060 | static void __kvm_vgic_sync_hwstate(struct kvm_vcpu *vcpu) | ||
1061 | { | ||
1062 | struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu; | ||
1063 | struct vgic_dist *dist = &vcpu->kvm->arch.vgic; | ||
1064 | int lr, pending; | ||
1065 | bool level_pending; | ||
1066 | |||
1067 | level_pending = vgic_process_maintenance(vcpu); | ||
1068 | |||
1069 | /* Clear mappings for empty LRs */ | ||
1070 | for_each_set_bit(lr, (unsigned long *)vgic_cpu->vgic_elrsr, | ||
1071 | vgic_cpu->nr_lr) { | ||
1072 | int irq; | ||
1073 | |||
1074 | if (!test_and_clear_bit(lr, vgic_cpu->lr_used)) | ||
1075 | continue; | ||
1076 | |||
1077 | irq = vgic_cpu->vgic_lr[lr] & GICH_LR_VIRTUALID; | ||
1078 | |||
1079 | BUG_ON(irq >= VGIC_NR_IRQS); | ||
1080 | vgic_cpu->vgic_irq_lr_map[irq] = LR_EMPTY; | ||
1081 | } | ||
1082 | |||
1083 | /* Check if we still have something up our sleeve... */ | ||
1084 | pending = find_first_zero_bit((unsigned long *)vgic_cpu->vgic_elrsr, | ||
1085 | vgic_cpu->nr_lr); | ||
1086 | if (level_pending || pending < vgic_cpu->nr_lr) | ||
1087 | set_bit(vcpu->vcpu_id, &dist->irq_pending_on_cpu); | ||
1088 | } | ||
1089 | |||
1090 | void kvm_vgic_flush_hwstate(struct kvm_vcpu *vcpu) | ||
1091 | { | ||
1092 | struct vgic_dist *dist = &vcpu->kvm->arch.vgic; | ||
1093 | |||
1094 | if (!irqchip_in_kernel(vcpu->kvm)) | ||
1095 | return; | ||
1096 | |||
1097 | spin_lock(&dist->lock); | ||
1098 | __kvm_vgic_flush_hwstate(vcpu); | ||
1099 | spin_unlock(&dist->lock); | ||
1100 | } | ||
1101 | |||
1102 | void kvm_vgic_sync_hwstate(struct kvm_vcpu *vcpu) | ||
1103 | { | ||
1104 | struct vgic_dist *dist = &vcpu->kvm->arch.vgic; | ||
1105 | |||
1106 | if (!irqchip_in_kernel(vcpu->kvm)) | ||
1107 | return; | ||
1108 | |||
1109 | spin_lock(&dist->lock); | ||
1110 | __kvm_vgic_sync_hwstate(vcpu); | ||
1111 | spin_unlock(&dist->lock); | ||
1112 | } | ||
1113 | |||
1114 | int kvm_vgic_vcpu_pending_irq(struct kvm_vcpu *vcpu) | ||
1115 | { | ||
1116 | struct vgic_dist *dist = &vcpu->kvm->arch.vgic; | ||
1117 | |||
1118 | if (!irqchip_in_kernel(vcpu->kvm)) | ||
1119 | return 0; | ||
1120 | |||
1121 | return test_bit(vcpu->vcpu_id, &dist->irq_pending_on_cpu); | ||
1122 | } | ||
1123 | |||
1124 | static void vgic_kick_vcpus(struct kvm *kvm) | ||
1125 | { | ||
1126 | struct kvm_vcpu *vcpu; | ||
1127 | int c; | ||
1128 | |||
1129 | /* | ||
1130 | * We've injected an interrupt, time to find out who deserves | ||
1131 | * a good kick... | ||
1132 | */ | ||
1133 | kvm_for_each_vcpu(c, vcpu, kvm) { | ||
1134 | if (kvm_vgic_vcpu_pending_irq(vcpu)) | ||
1135 | kvm_vcpu_kick(vcpu); | ||
1136 | } | ||
1137 | } | ||
1138 | |||
1139 | static int vgic_validate_injection(struct kvm_vcpu *vcpu, int irq, int level) | ||
1140 | { | ||
1141 | int is_edge = vgic_irq_is_edge(vcpu, irq); | ||
1142 | int state = vgic_dist_irq_is_pending(vcpu, irq); | ||
1143 | |||
1144 | /* | ||
1145 | * Only inject an interrupt if: | ||
1146 | * - edge triggered and we have a rising edge | ||
1147 | * - level triggered and we change level | ||
1148 | */ | ||
1149 | if (is_edge) | ||
1150 | return level > state; | ||
1151 | else | ||
1152 | return level != state; | ||
1153 | } | ||
1154 | |||
1155 | static bool vgic_update_irq_state(struct kvm *kvm, int cpuid, | ||
1156 | unsigned int irq_num, bool level) | ||
1157 | { | ||
1158 | struct vgic_dist *dist = &kvm->arch.vgic; | ||
1159 | struct kvm_vcpu *vcpu; | ||
1160 | int is_edge, is_level; | ||
1161 | int enabled; | ||
1162 | bool ret = true; | ||
1163 | |||
1164 | spin_lock(&dist->lock); | ||
1165 | |||
1166 | vcpu = kvm_get_vcpu(kvm, cpuid); | ||
1167 | is_edge = vgic_irq_is_edge(vcpu, irq_num); | ||
1168 | is_level = !is_edge; | ||
1169 | |||
1170 | if (!vgic_validate_injection(vcpu, irq_num, level)) { | ||
1171 | ret = false; | ||
1172 | goto out; | ||
1173 | } | ||
1174 | |||
1175 | if (irq_num >= VGIC_NR_PRIVATE_IRQS) { | ||
1176 | cpuid = dist->irq_spi_cpu[irq_num - VGIC_NR_PRIVATE_IRQS]; | ||
1177 | vcpu = kvm_get_vcpu(kvm, cpuid); | ||
1178 | } | ||
1179 | |||
1180 | kvm_debug("Inject IRQ%d level %d CPU%d\n", irq_num, level, cpuid); | ||
1181 | |||
1182 | if (level) | ||
1183 | vgic_dist_irq_set(vcpu, irq_num); | ||
1184 | else | ||
1185 | vgic_dist_irq_clear(vcpu, irq_num); | ||
1186 | |||
1187 | enabled = vgic_irq_is_enabled(vcpu, irq_num); | ||
1188 | |||
1189 | if (!enabled) { | ||
1190 | ret = false; | ||
1191 | goto out; | ||
1192 | } | ||
1193 | |||
1194 | if (is_level && vgic_irq_is_active(vcpu, irq_num)) { | ||
1195 | /* | ||
1196 | * Level interrupt in progress, will be picked up | ||
1197 | * when EOId. | ||
1198 | */ | ||
1199 | ret = false; | ||
1200 | goto out; | ||
1201 | } | ||
1202 | |||
1203 | if (level) { | ||
1204 | vgic_cpu_irq_set(vcpu, irq_num); | ||
1205 | set_bit(cpuid, &dist->irq_pending_on_cpu); | ||
1206 | } | ||
1207 | |||
1208 | out: | ||
1209 | spin_unlock(&dist->lock); | ||
1210 | |||
1211 | return ret; | ||
1212 | } | ||
1213 | |||
1214 | /** | ||
1215 | * kvm_vgic_inject_irq - Inject an IRQ from a device to the vgic | ||
1216 | * @kvm: The VM structure pointer | ||
1217 | * @cpuid: The CPU for PPIs | ||
1218 | * @irq_num: The IRQ number that is assigned to the device | ||
1219 | * @level: Edge-triggered: true: to trigger the interrupt | ||
1220 | * false: to ignore the call | ||
1221 | * Level-sensitive true: activates an interrupt | ||
1222 | * false: deactivates an interrupt | ||
1223 | * | ||
1224 | * The GIC is not concerned with devices being active-LOW or active-HIGH for | ||
1225 | * level-sensitive interrupts. You can think of the level parameter as 1 | ||
1226 | * being HIGH and 0 being LOW and all devices being active-HIGH. | ||
1227 | */ | ||
1228 | int kvm_vgic_inject_irq(struct kvm *kvm, int cpuid, unsigned int irq_num, | ||
1229 | bool level) | ||
1230 | { | ||
1231 | if (vgic_update_irq_state(kvm, cpuid, irq_num, level)) | ||
1232 | vgic_kick_vcpus(kvm); | ||
1233 | |||
1234 | return 0; | ||
1235 | } | ||
1236 | |||
1237 | static irqreturn_t vgic_maintenance_handler(int irq, void *data) | ||
1238 | { | ||
1239 | /* | ||
1240 | * We cannot rely on the vgic maintenance interrupt to be | ||
1241 | * delivered synchronously. This means we can only use it to | ||
1242 | * exit the VM, and we perform the handling of EOIed | ||
1243 | * interrupts on the exit path (see vgic_process_maintenance). | ||
1244 | */ | ||
1245 | return IRQ_HANDLED; | ||
1246 | } | ||
1247 | |||
1248 | int kvm_vgic_vcpu_init(struct kvm_vcpu *vcpu) | ||
1249 | { | ||
1250 | struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu; | ||
1251 | struct vgic_dist *dist = &vcpu->kvm->arch.vgic; | ||
1252 | int i; | ||
1253 | |||
1254 | if (!irqchip_in_kernel(vcpu->kvm)) | ||
1255 | return 0; | ||
1256 | |||
1257 | if (vcpu->vcpu_id >= VGIC_MAX_CPUS) | ||
1258 | return -EBUSY; | ||
1259 | |||
1260 | for (i = 0; i < VGIC_NR_IRQS; i++) { | ||
1261 | if (i < VGIC_NR_PPIS) | ||
1262 | vgic_bitmap_set_irq_val(&dist->irq_enabled, | ||
1263 | vcpu->vcpu_id, i, 1); | ||
1264 | if (i < VGIC_NR_PRIVATE_IRQS) | ||
1265 | vgic_bitmap_set_irq_val(&dist->irq_cfg, | ||
1266 | vcpu->vcpu_id, i, VGIC_CFG_EDGE); | ||
1267 | |||
1268 | vgic_cpu->vgic_irq_lr_map[i] = LR_EMPTY; | ||
1269 | } | ||
1270 | |||
1271 | /* | ||
1272 | * By forcing VMCR to zero, the GIC will restore the binary | ||
1273 | * points to their reset values. Anything else resets to zero | ||
1274 | * anyway. | ||
1275 | */ | ||
1276 | vgic_cpu->vgic_vmcr = 0; | ||
1277 | |||
1278 | vgic_cpu->nr_lr = vgic_nr_lr; | ||
1279 | vgic_cpu->vgic_hcr = GICH_HCR_EN; /* Get the show on the road... */ | ||
1280 | |||
1281 | return 0; | ||
1282 | } | ||
1283 | |||
1284 | static void vgic_init_maintenance_interrupt(void *info) | ||
1285 | { | ||
1286 | enable_percpu_irq(vgic_maint_irq, 0); | ||
1287 | } | ||
1288 | |||
1289 | static int vgic_cpu_notify(struct notifier_block *self, | ||
1290 | unsigned long action, void *cpu) | ||
1291 | { | ||
1292 | switch (action) { | ||
1293 | case CPU_STARTING: | ||
1294 | case CPU_STARTING_FROZEN: | ||
1295 | vgic_init_maintenance_interrupt(NULL); | ||
1296 | break; | ||
1297 | case CPU_DYING: | ||
1298 | case CPU_DYING_FROZEN: | ||
1299 | disable_percpu_irq(vgic_maint_irq); | ||
1300 | break; | ||
1301 | } | ||
1302 | |||
1303 | return NOTIFY_OK; | ||
1304 | } | ||
1305 | |||
1306 | static struct notifier_block vgic_cpu_nb = { | ||
1307 | .notifier_call = vgic_cpu_notify, | ||
1308 | }; | ||
1309 | |||
1310 | int kvm_vgic_hyp_init(void) | ||
1311 | { | ||
1312 | int ret; | ||
1313 | struct resource vctrl_res; | ||
1314 | struct resource vcpu_res; | ||
1315 | |||
1316 | vgic_node = of_find_compatible_node(NULL, NULL, "arm,cortex-a15-gic"); | ||
1317 | if (!vgic_node) { | ||
1318 | kvm_err("error: no compatible vgic node in DT\n"); | ||
1319 | return -ENODEV; | ||
1320 | } | ||
1321 | |||
1322 | vgic_maint_irq = irq_of_parse_and_map(vgic_node, 0); | ||
1323 | if (!vgic_maint_irq) { | ||
1324 | kvm_err("error getting vgic maintenance irq from DT\n"); | ||
1325 | ret = -ENXIO; | ||
1326 | goto out; | ||
1327 | } | ||
1328 | |||
1329 | ret = request_percpu_irq(vgic_maint_irq, vgic_maintenance_handler, | ||
1330 | "vgic", kvm_get_running_vcpus()); | ||
1331 | if (ret) { | ||
1332 | kvm_err("Cannot register interrupt %d\n", vgic_maint_irq); | ||
1333 | goto out; | ||
1334 | } | ||
1335 | |||
1336 | ret = register_cpu_notifier(&vgic_cpu_nb); | ||
1337 | if (ret) { | ||
1338 | kvm_err("Cannot register vgic CPU notifier\n"); | ||
1339 | goto out_free_irq; | ||
1340 | } | ||
1341 | |||
1342 | ret = of_address_to_resource(vgic_node, 2, &vctrl_res); | ||
1343 | if (ret) { | ||
1344 | kvm_err("Cannot obtain VCTRL resource\n"); | ||
1345 | goto out_free_irq; | ||
1346 | } | ||
1347 | |||
1348 | vgic_vctrl_base = of_iomap(vgic_node, 2); | ||
1349 | if (!vgic_vctrl_base) { | ||
1350 | kvm_err("Cannot ioremap VCTRL\n"); | ||
1351 | ret = -ENOMEM; | ||
1352 | goto out_free_irq; | ||
1353 | } | ||
1354 | |||
1355 | vgic_nr_lr = readl_relaxed(vgic_vctrl_base + GICH_VTR); | ||
1356 | vgic_nr_lr = (vgic_nr_lr & 0x3f) + 1; | ||
1357 | |||
1358 | ret = create_hyp_io_mappings(vgic_vctrl_base, | ||
1359 | vgic_vctrl_base + resource_size(&vctrl_res), | ||
1360 | vctrl_res.start); | ||
1361 | if (ret) { | ||
1362 | kvm_err("Cannot map VCTRL into hyp\n"); | ||
1363 | goto out_unmap; | ||
1364 | } | ||
1365 | |||
1366 | kvm_info("%s@%llx IRQ%d\n", vgic_node->name, | ||
1367 | vctrl_res.start, vgic_maint_irq); | ||
1368 | on_each_cpu(vgic_init_maintenance_interrupt, NULL, 1); | ||
1369 | |||
1370 | if (of_address_to_resource(vgic_node, 3, &vcpu_res)) { | ||
1371 | kvm_err("Cannot obtain VCPU resource\n"); | ||
1372 | ret = -ENXIO; | ||
1373 | goto out_unmap; | ||
1374 | } | ||
1375 | vgic_vcpu_base = vcpu_res.start; | ||
1376 | |||
1377 | goto out; | ||
1378 | |||
1379 | out_unmap: | ||
1380 | iounmap(vgic_vctrl_base); | ||
1381 | out_free_irq: | ||
1382 | free_percpu_irq(vgic_maint_irq, kvm_get_running_vcpus()); | ||
1383 | out: | ||
1384 | of_node_put(vgic_node); | ||
1385 | return ret; | ||
1386 | } | ||
1387 | |||
1388 | int kvm_vgic_init(struct kvm *kvm) | ||
1389 | { | ||
1390 | int ret = 0, i; | ||
1391 | |||
1392 | mutex_lock(&kvm->lock); | ||
1393 | |||
1394 | if (vgic_initialized(kvm)) | ||
1395 | goto out; | ||
1396 | |||
1397 | if (IS_VGIC_ADDR_UNDEF(kvm->arch.vgic.vgic_dist_base) || | ||
1398 | IS_VGIC_ADDR_UNDEF(kvm->arch.vgic.vgic_cpu_base)) { | ||
1399 | kvm_err("Need to set vgic cpu and dist addresses first\n"); | ||
1400 | ret = -ENXIO; | ||
1401 | goto out; | ||
1402 | } | ||
1403 | |||
1404 | ret = kvm_phys_addr_ioremap(kvm, kvm->arch.vgic.vgic_cpu_base, | ||
1405 | vgic_vcpu_base, KVM_VGIC_V2_CPU_SIZE); | ||
1406 | if (ret) { | ||
1407 | kvm_err("Unable to remap VGIC CPU to VCPU\n"); | ||
1408 | goto out; | ||
1409 | } | ||
1410 | |||
1411 | for (i = VGIC_NR_PRIVATE_IRQS; i < VGIC_NR_IRQS; i += 4) | ||
1412 | vgic_set_target_reg(kvm, 0, i); | ||
1413 | |||
1414 | kvm_timer_init(kvm); | ||
1415 | kvm->arch.vgic.ready = true; | ||
1416 | out: | ||
1417 | mutex_unlock(&kvm->lock); | ||
1418 | return ret; | ||
1419 | } | ||
1420 | |||
1421 | int kvm_vgic_create(struct kvm *kvm) | ||
1422 | { | ||
1423 | int ret = 0; | ||
1424 | |||
1425 | mutex_lock(&kvm->lock); | ||
1426 | |||
1427 | if (atomic_read(&kvm->online_vcpus) || kvm->arch.vgic.vctrl_base) { | ||
1428 | ret = -EEXIST; | ||
1429 | goto out; | ||
1430 | } | ||
1431 | |||
1432 | spin_lock_init(&kvm->arch.vgic.lock); | ||
1433 | kvm->arch.vgic.vctrl_base = vgic_vctrl_base; | ||
1434 | kvm->arch.vgic.vgic_dist_base = VGIC_ADDR_UNDEF; | ||
1435 | kvm->arch.vgic.vgic_cpu_base = VGIC_ADDR_UNDEF; | ||
1436 | |||
1437 | out: | ||
1438 | mutex_unlock(&kvm->lock); | ||
1439 | return ret; | ||
1440 | } | ||
1441 | |||
1442 | static bool vgic_ioaddr_overlap(struct kvm *kvm) | ||
1443 | { | ||
1444 | phys_addr_t dist = kvm->arch.vgic.vgic_dist_base; | ||
1445 | phys_addr_t cpu = kvm->arch.vgic.vgic_cpu_base; | ||
1446 | |||
1447 | if (IS_VGIC_ADDR_UNDEF(dist) || IS_VGIC_ADDR_UNDEF(cpu)) | ||
1448 | return 0; | ||
1449 | if ((dist <= cpu && dist + KVM_VGIC_V2_DIST_SIZE > cpu) || | ||
1450 | (cpu <= dist && cpu + KVM_VGIC_V2_CPU_SIZE > dist)) | ||
1451 | return -EBUSY; | ||
1452 | return 0; | ||
1453 | } | ||
1454 | |||
1455 | static int vgic_ioaddr_assign(struct kvm *kvm, phys_addr_t *ioaddr, | ||
1456 | phys_addr_t addr, phys_addr_t size) | ||
1457 | { | ||
1458 | int ret; | ||
1459 | |||
1460 | if (!IS_VGIC_ADDR_UNDEF(*ioaddr)) | ||
1461 | return -EEXIST; | ||
1462 | if (addr + size < addr) | ||
1463 | return -EINVAL; | ||
1464 | |||
1465 | ret = vgic_ioaddr_overlap(kvm); | ||
1466 | if (ret) | ||
1467 | return ret; | ||
1468 | *ioaddr = addr; | ||
1469 | return ret; | ||
1470 | } | ||
1471 | |||
1472 | int kvm_vgic_set_addr(struct kvm *kvm, unsigned long type, u64 addr) | ||
1473 | { | ||
1474 | int r = 0; | ||
1475 | struct vgic_dist *vgic = &kvm->arch.vgic; | ||
1476 | |||
1477 | if (addr & ~KVM_PHYS_MASK) | ||
1478 | return -E2BIG; | ||
1479 | |||
1480 | if (addr & (SZ_4K - 1)) | ||
1481 | return -EINVAL; | ||
1482 | |||
1483 | mutex_lock(&kvm->lock); | ||
1484 | switch (type) { | ||
1485 | case KVM_VGIC_V2_ADDR_TYPE_DIST: | ||
1486 | r = vgic_ioaddr_assign(kvm, &vgic->vgic_dist_base, | ||
1487 | addr, KVM_VGIC_V2_DIST_SIZE); | ||
1488 | break; | ||
1489 | case KVM_VGIC_V2_ADDR_TYPE_CPU: | ||
1490 | r = vgic_ioaddr_assign(kvm, &vgic->vgic_cpu_base, | ||
1491 | addr, KVM_VGIC_V2_CPU_SIZE); | ||
1492 | break; | ||
1493 | default: | ||
1494 | r = -ENODEV; | ||
1495 | } | ||
1496 | |||
1497 | mutex_unlock(&kvm->lock); | ||
1498 | return r; | ||
1499 | } | ||
diff --git a/arch/arm/mach-davinci/Kconfig b/arch/arm/mach-davinci/Kconfig index a075b3e0c5c7..e026b19b23ea 100644 --- a/arch/arm/mach-davinci/Kconfig +++ b/arch/arm/mach-davinci/Kconfig | |||
@@ -40,6 +40,7 @@ config ARCH_DAVINCI_DA850 | |||
40 | bool "DA850/OMAP-L138/AM18x based system" | 40 | bool "DA850/OMAP-L138/AM18x based system" |
41 | select ARCH_DAVINCI_DA8XX | 41 | select ARCH_DAVINCI_DA8XX |
42 | select ARCH_HAS_CPUFREQ | 42 | select ARCH_HAS_CPUFREQ |
43 | select CPU_FREQ_TABLE | ||
43 | select CP_INTC | 44 | select CP_INTC |
44 | 45 | ||
45 | config ARCH_DAVINCI_DA8XX | 46 | config ARCH_DAVINCI_DA8XX |
diff --git a/arch/arm/mach-davinci/da850.c b/arch/arm/mach-davinci/da850.c index 4d6933848abf..a0d4f6038b60 100644 --- a/arch/arm/mach-davinci/da850.c +++ b/arch/arm/mach-davinci/da850.c | |||
@@ -1004,7 +1004,7 @@ static const struct da850_opp da850_opp_96 = { | |||
1004 | 1004 | ||
1005 | #define OPP(freq) \ | 1005 | #define OPP(freq) \ |
1006 | { \ | 1006 | { \ |
1007 | .index = (unsigned int) &da850_opp_##freq, \ | 1007 | .driver_data = (unsigned int) &da850_opp_##freq, \ |
1008 | .frequency = freq * 1000, \ | 1008 | .frequency = freq * 1000, \ |
1009 | } | 1009 | } |
1010 | 1010 | ||
@@ -1016,7 +1016,7 @@ static struct cpufreq_frequency_table da850_freq_table[] = { | |||
1016 | OPP(200), | 1016 | OPP(200), |
1017 | OPP(96), | 1017 | OPP(96), |
1018 | { | 1018 | { |
1019 | .index = 0, | 1019 | .driver_data = 0, |
1020 | .frequency = CPUFREQ_TABLE_END, | 1020 | .frequency = CPUFREQ_TABLE_END, |
1021 | }, | 1021 | }, |
1022 | }; | 1022 | }; |
@@ -1044,7 +1044,7 @@ static int da850_set_voltage(unsigned int index) | |||
1044 | if (!cvdd) | 1044 | if (!cvdd) |
1045 | return -ENODEV; | 1045 | return -ENODEV; |
1046 | 1046 | ||
1047 | opp = (struct da850_opp *) cpufreq_info.freq_table[index].index; | 1047 | opp = (struct da850_opp *) cpufreq_info.freq_table[index].driver_data; |
1048 | 1048 | ||
1049 | return regulator_set_voltage(cvdd, opp->cvdd_min, opp->cvdd_max); | 1049 | return regulator_set_voltage(cvdd, opp->cvdd_min, opp->cvdd_max); |
1050 | } | 1050 | } |
@@ -1125,7 +1125,7 @@ static int da850_set_pll0rate(struct clk *clk, unsigned long index) | |||
1125 | struct pll_data *pll = clk->pll_data; | 1125 | struct pll_data *pll = clk->pll_data; |
1126 | int ret; | 1126 | int ret; |
1127 | 1127 | ||
1128 | opp = (struct da850_opp *) cpufreq_info.freq_table[index].index; | 1128 | opp = (struct da850_opp *) cpufreq_info.freq_table[index].driver_data; |
1129 | prediv = opp->prediv; | 1129 | prediv = opp->prediv; |
1130 | mult = opp->mult; | 1130 | mult = opp->mult; |
1131 | postdiv = opp->postdiv; | 1131 | postdiv = opp->postdiv; |
diff --git a/arch/arm/mach-ebsa110/core.c b/arch/arm/mach-ebsa110/core.c index b13cc74114db..8a53f346cdb3 100644 --- a/arch/arm/mach-ebsa110/core.c +++ b/arch/arm/mach-ebsa110/core.c | |||
@@ -116,7 +116,7 @@ static void __init ebsa110_map_io(void) | |||
116 | iotable_init(ebsa110_io_desc, ARRAY_SIZE(ebsa110_io_desc)); | 116 | iotable_init(ebsa110_io_desc, ARRAY_SIZE(ebsa110_io_desc)); |
117 | } | 117 | } |
118 | 118 | ||
119 | static void __iomem *ebsa110_ioremap_caller(unsigned long cookie, size_t size, | 119 | static void __iomem *ebsa110_ioremap_caller(phys_addr_t cookie, size_t size, |
120 | unsigned int flags, void *caller) | 120 | unsigned int flags, void *caller) |
121 | { | 121 | { |
122 | return (void __iomem *)cookie; | 122 | return (void __iomem *)cookie; |
diff --git a/arch/arm/mach-exynos/Kconfig b/arch/arm/mach-exynos/Kconfig index ba70a846d1c6..855d4a7b462d 100644 --- a/arch/arm/mach-exynos/Kconfig +++ b/arch/arm/mach-exynos/Kconfig | |||
@@ -93,7 +93,7 @@ config SOC_EXYNOS5440 | |||
93 | default y | 93 | default y |
94 | depends on ARCH_EXYNOS5 | 94 | depends on ARCH_EXYNOS5 |
95 | select ARCH_HAS_OPP | 95 | select ARCH_HAS_OPP |
96 | select ARM_ARCH_TIMER | 96 | select HAVE_ARM_ARCH_TIMER |
97 | select AUTO_ZRELADDR | 97 | select AUTO_ZRELADDR |
98 | select MIGHT_HAVE_PCI | 98 | select MIGHT_HAVE_PCI |
99 | select PCI_DOMAINS if PCI | 99 | select PCI_DOMAINS if PCI |
diff --git a/arch/arm/mach-imx/mach-mx31moboard.c b/arch/arm/mach-imx/mach-mx31moboard.c index dae4cd7be040..6f424eced181 100644 --- a/arch/arm/mach-imx/mach-mx31moboard.c +++ b/arch/arm/mach-imx/mach-mx31moboard.c | |||
@@ -268,10 +268,11 @@ static struct mc13xxx_led_platform_data moboard_led[] = { | |||
268 | static struct mc13xxx_leds_platform_data moboard_leds = { | 268 | static struct mc13xxx_leds_platform_data moboard_leds = { |
269 | .num_leds = ARRAY_SIZE(moboard_led), | 269 | .num_leds = ARRAY_SIZE(moboard_led), |
270 | .led = moboard_led, | 270 | .led = moboard_led, |
271 | .flags = MC13783_LED_SLEWLIMTC, | 271 | .led_control[0] = MC13783_LED_C0_ENABLE | MC13783_LED_C0_ABMODE(0), |
272 | .abmode = MC13783_LED_AB_DISABLED, | 272 | .led_control[1] = MC13783_LED_C1_SLEWLIM, |
273 | .tc1_period = MC13783_LED_PERIOD_10MS, | 273 | .led_control[2] = MC13783_LED_C2_SLEWLIM, |
274 | .tc2_period = MC13783_LED_PERIOD_10MS, | 274 | .led_control[3] = MC13783_LED_C3_PERIOD(0), |
275 | .led_control[4] = MC13783_LED_C3_PERIOD(0), | ||
275 | }; | 276 | }; |
276 | 277 | ||
277 | static struct mc13xxx_buttons_platform_data moboard_buttons = { | 278 | static struct mc13xxx_buttons_platform_data moboard_buttons = { |
diff --git a/arch/arm/mach-imx/mm-imx3.c b/arch/arm/mach-imx/mm-imx3.c index 8f0f60697f55..0884ca90d15a 100644 --- a/arch/arm/mach-imx/mm-imx3.c +++ b/arch/arm/mach-imx/mm-imx3.c | |||
@@ -65,7 +65,7 @@ static void imx3_idle(void) | |||
65 | : "=r" (reg)); | 65 | : "=r" (reg)); |
66 | } | 66 | } |
67 | 67 | ||
68 | static void __iomem *imx3_ioremap_caller(unsigned long phys_addr, size_t size, | 68 | static void __iomem *imx3_ioremap_caller(phys_addr_t phys_addr, size_t size, |
69 | unsigned int mtype, void *caller) | 69 | unsigned int mtype, void *caller) |
70 | { | 70 | { |
71 | if (mtype == MT_DEVICE) { | 71 | if (mtype == MT_DEVICE) { |
diff --git a/arch/arm/mach-iop13xx/io.c b/arch/arm/mach-iop13xx/io.c index 183dc8b5511b..faaf7d4482c5 100644 --- a/arch/arm/mach-iop13xx/io.c +++ b/arch/arm/mach-iop13xx/io.c | |||
@@ -23,7 +23,7 @@ | |||
23 | 23 | ||
24 | #include "pci.h" | 24 | #include "pci.h" |
25 | 25 | ||
26 | static void __iomem *__iop13xx_ioremap_caller(unsigned long cookie, | 26 | static void __iomem *__iop13xx_ioremap_caller(phys_addr_t cookie, |
27 | size_t size, unsigned int mtype, void *caller) | 27 | size_t size, unsigned int mtype, void *caller) |
28 | { | 28 | { |
29 | void __iomem * retval; | 29 | void __iomem * retval; |
diff --git a/arch/arm/mach-iop13xx/setup.c b/arch/arm/mach-iop13xx/setup.c index 3181f61ea63e..1c5bd7637b05 100644 --- a/arch/arm/mach-iop13xx/setup.c +++ b/arch/arm/mach-iop13xx/setup.c | |||
@@ -469,7 +469,6 @@ void __init iop13xx_platform_init(void) | |||
469 | dma_cap_set(DMA_MEMCPY, plat_data->cap_mask); | 469 | dma_cap_set(DMA_MEMCPY, plat_data->cap_mask); |
470 | dma_cap_set(DMA_XOR, plat_data->cap_mask); | 470 | dma_cap_set(DMA_XOR, plat_data->cap_mask); |
471 | dma_cap_set(DMA_XOR_VAL, plat_data->cap_mask); | 471 | dma_cap_set(DMA_XOR_VAL, plat_data->cap_mask); |
472 | dma_cap_set(DMA_MEMSET, plat_data->cap_mask); | ||
473 | dma_cap_set(DMA_INTERRUPT, plat_data->cap_mask); | 472 | dma_cap_set(DMA_INTERRUPT, plat_data->cap_mask); |
474 | break; | 473 | break; |
475 | case IOP13XX_INIT_ADMA_1: | 474 | case IOP13XX_INIT_ADMA_1: |
@@ -479,7 +478,6 @@ void __init iop13xx_platform_init(void) | |||
479 | dma_cap_set(DMA_MEMCPY, plat_data->cap_mask); | 478 | dma_cap_set(DMA_MEMCPY, plat_data->cap_mask); |
480 | dma_cap_set(DMA_XOR, plat_data->cap_mask); | 479 | dma_cap_set(DMA_XOR, plat_data->cap_mask); |
481 | dma_cap_set(DMA_XOR_VAL, plat_data->cap_mask); | 480 | dma_cap_set(DMA_XOR_VAL, plat_data->cap_mask); |
482 | dma_cap_set(DMA_MEMSET, plat_data->cap_mask); | ||
483 | dma_cap_set(DMA_INTERRUPT, plat_data->cap_mask); | 481 | dma_cap_set(DMA_INTERRUPT, plat_data->cap_mask); |
484 | break; | 482 | break; |
485 | case IOP13XX_INIT_ADMA_2: | 483 | case IOP13XX_INIT_ADMA_2: |
@@ -489,7 +487,6 @@ void __init iop13xx_platform_init(void) | |||
489 | dma_cap_set(DMA_MEMCPY, plat_data->cap_mask); | 487 | dma_cap_set(DMA_MEMCPY, plat_data->cap_mask); |
490 | dma_cap_set(DMA_XOR, plat_data->cap_mask); | 488 | dma_cap_set(DMA_XOR, plat_data->cap_mask); |
491 | dma_cap_set(DMA_XOR_VAL, plat_data->cap_mask); | 489 | dma_cap_set(DMA_XOR_VAL, plat_data->cap_mask); |
492 | dma_cap_set(DMA_MEMSET, plat_data->cap_mask); | ||
493 | dma_cap_set(DMA_INTERRUPT, plat_data->cap_mask); | 490 | dma_cap_set(DMA_INTERRUPT, plat_data->cap_mask); |
494 | dma_cap_set(DMA_PQ, plat_data->cap_mask); | 491 | dma_cap_set(DMA_PQ, plat_data->cap_mask); |
495 | dma_cap_set(DMA_PQ_VAL, plat_data->cap_mask); | 492 | dma_cap_set(DMA_PQ_VAL, plat_data->cap_mask); |
diff --git a/arch/arm/mach-ixp4xx/common.c b/arch/arm/mach-ixp4xx/common.c index 6600cff6bd92..d7223b3b81f3 100644 --- a/arch/arm/mach-ixp4xx/common.c +++ b/arch/arm/mach-ixp4xx/common.c | |||
@@ -559,7 +559,7 @@ void ixp4xx_restart(char mode, const char *cmd) | |||
559 | * fallback to the default. | 559 | * fallback to the default. |
560 | */ | 560 | */ |
561 | 561 | ||
562 | static void __iomem *ixp4xx_ioremap_caller(unsigned long addr, size_t size, | 562 | static void __iomem *ixp4xx_ioremap_caller(phys_addr_t addr, size_t size, |
563 | unsigned int mtype, void *caller) | 563 | unsigned int mtype, void *caller) |
564 | { | 564 | { |
565 | if (!is_pci_memory(addr)) | 565 | if (!is_pci_memory(addr)) |
diff --git a/arch/arm/mach-msm/common.h b/arch/arm/mach-msm/common.h index ce8215a269e5..421cf7751a80 100644 --- a/arch/arm/mach-msm/common.h +++ b/arch/arm/mach-msm/common.h | |||
@@ -23,7 +23,7 @@ extern void msm_map_msm8x60_io(void); | |||
23 | extern void msm_map_msm8960_io(void); | 23 | extern void msm_map_msm8960_io(void); |
24 | extern void msm_map_qsd8x50_io(void); | 24 | extern void msm_map_qsd8x50_io(void); |
25 | 25 | ||
26 | extern void __iomem *__msm_ioremap_caller(unsigned long phys_addr, size_t size, | 26 | extern void __iomem *__msm_ioremap_caller(phys_addr_t phys_addr, size_t size, |
27 | unsigned int mtype, void *caller); | 27 | unsigned int mtype, void *caller); |
28 | 28 | ||
29 | extern struct smp_operations msm_smp_ops; | 29 | extern struct smp_operations msm_smp_ops; |
diff --git a/arch/arm/mach-msm/io.c b/arch/arm/mach-msm/io.c index efa113e4de86..3dc04ccaf59f 100644 --- a/arch/arm/mach-msm/io.c +++ b/arch/arm/mach-msm/io.c | |||
@@ -168,7 +168,7 @@ void __init msm_map_msm7x30_io(void) | |||
168 | } | 168 | } |
169 | #endif /* CONFIG_ARCH_MSM7X30 */ | 169 | #endif /* CONFIG_ARCH_MSM7X30 */ |
170 | 170 | ||
171 | void __iomem *__msm_ioremap_caller(unsigned long phys_addr, size_t size, | 171 | void __iomem *__msm_ioremap_caller(phys_addr_t phys_addr, size_t size, |
172 | unsigned int mtype, void *caller) | 172 | unsigned int mtype, void *caller) |
173 | { | 173 | { |
174 | if (mtype == MT_DEVICE) { | 174 | if (mtype == MT_DEVICE) { |
diff --git a/arch/arm/mach-omap2/Kconfig b/arch/arm/mach-omap2/Kconfig index 58152b15ecaa..627fa7e41fba 100644 --- a/arch/arm/mach-omap2/Kconfig +++ b/arch/arm/mach-omap2/Kconfig | |||
@@ -62,6 +62,7 @@ config SOC_OMAP5 | |||
62 | select HAVE_SMP | 62 | select HAVE_SMP |
63 | select COMMON_CLK | 63 | select COMMON_CLK |
64 | select HAVE_ARM_ARCH_TIMER | 64 | select HAVE_ARM_ARCH_TIMER |
65 | select ARM_ERRATA_798181 | ||
65 | 66 | ||
66 | config SOC_AM33XX | 67 | config SOC_AM33XX |
67 | bool "AM33XX support" | 68 | bool "AM33XX support" |
diff --git a/arch/arm/mach-omap2/Makefile b/arch/arm/mach-omap2/Makefile index ea5a27ff9941..d4f671547c37 100644 --- a/arch/arm/mach-omap2/Makefile +++ b/arch/arm/mach-omap2/Makefile | |||
@@ -95,10 +95,6 @@ obj-$(CONFIG_POWER_AVS_OMAP_CLASS3) += smartreflex-class3.o | |||
95 | AFLAGS_sleep24xx.o :=-Wa,-march=armv6 | 95 | AFLAGS_sleep24xx.o :=-Wa,-march=armv6 |
96 | AFLAGS_sleep34xx.o :=-Wa,-march=armv7-a$(plus_sec) | 96 | AFLAGS_sleep34xx.o :=-Wa,-march=armv7-a$(plus_sec) |
97 | 97 | ||
98 | ifeq ($(CONFIG_PM_VERBOSE),y) | ||
99 | CFLAGS_pm_bus.o += -DDEBUG | ||
100 | endif | ||
101 | |||
102 | endif | 98 | endif |
103 | 99 | ||
104 | ifeq ($(CONFIG_CPU_IDLE),y) | 100 | ifeq ($(CONFIG_CPU_IDLE),y) |
diff --git a/arch/arm/mach-omap2/board-igep0020.c b/arch/arm/mach-omap2/board-igep0020.c index b54562d1235e..87e65dde8e13 100644 --- a/arch/arm/mach-omap2/board-igep0020.c +++ b/arch/arm/mach-omap2/board-igep0020.c | |||
@@ -553,6 +553,37 @@ static struct usbhs_omap_platform_data igep3_usbhs_bdata __initdata = { | |||
553 | 553 | ||
554 | #ifdef CONFIG_OMAP_MUX | 554 | #ifdef CONFIG_OMAP_MUX |
555 | static struct omap_board_mux board_mux[] __initdata = { | 555 | static struct omap_board_mux board_mux[] __initdata = { |
556 | /* Display Sub System */ | ||
557 | OMAP3_MUX(DSS_PCLK, OMAP_MUX_MODE0 | OMAP_PIN_OUTPUT), | ||
558 | OMAP3_MUX(DSS_HSYNC, OMAP_MUX_MODE0 | OMAP_PIN_OUTPUT), | ||
559 | OMAP3_MUX(DSS_VSYNC, OMAP_MUX_MODE0 | OMAP_PIN_OUTPUT), | ||
560 | OMAP3_MUX(DSS_ACBIAS, OMAP_MUX_MODE0 | OMAP_PIN_OUTPUT), | ||
561 | OMAP3_MUX(DSS_DATA0, OMAP_MUX_MODE0 | OMAP_PIN_OUTPUT), | ||
562 | OMAP3_MUX(DSS_DATA1, OMAP_MUX_MODE0 | OMAP_PIN_OUTPUT), | ||
563 | OMAP3_MUX(DSS_DATA2, OMAP_MUX_MODE0 | OMAP_PIN_OUTPUT), | ||
564 | OMAP3_MUX(DSS_DATA3, OMAP_MUX_MODE0 | OMAP_PIN_OUTPUT), | ||
565 | OMAP3_MUX(DSS_DATA4, OMAP_MUX_MODE0 | OMAP_PIN_OUTPUT), | ||
566 | OMAP3_MUX(DSS_DATA5, OMAP_MUX_MODE0 | OMAP_PIN_OUTPUT), | ||
567 | OMAP3_MUX(DSS_DATA6, OMAP_MUX_MODE0 | OMAP_PIN_OUTPUT), | ||
568 | OMAP3_MUX(DSS_DATA7, OMAP_MUX_MODE0 | OMAP_PIN_OUTPUT), | ||
569 | OMAP3_MUX(DSS_DATA8, OMAP_MUX_MODE0 | OMAP_PIN_OUTPUT), | ||
570 | OMAP3_MUX(DSS_DATA9, OMAP_MUX_MODE0 | OMAP_PIN_OUTPUT), | ||
571 | OMAP3_MUX(DSS_DATA10, OMAP_MUX_MODE0 | OMAP_PIN_OUTPUT), | ||
572 | OMAP3_MUX(DSS_DATA11, OMAP_MUX_MODE0 | OMAP_PIN_OUTPUT), | ||
573 | OMAP3_MUX(DSS_DATA12, OMAP_MUX_MODE0 | OMAP_PIN_OUTPUT), | ||
574 | OMAP3_MUX(DSS_DATA13, OMAP_MUX_MODE0 | OMAP_PIN_OUTPUT), | ||
575 | OMAP3_MUX(DSS_DATA14, OMAP_MUX_MODE0 | OMAP_PIN_OUTPUT), | ||
576 | OMAP3_MUX(DSS_DATA15, OMAP_MUX_MODE0 | OMAP_PIN_OUTPUT), | ||
577 | OMAP3_MUX(DSS_DATA16, OMAP_MUX_MODE0 | OMAP_PIN_OUTPUT), | ||
578 | OMAP3_MUX(DSS_DATA17, OMAP_MUX_MODE0 | OMAP_PIN_OUTPUT), | ||
579 | OMAP3_MUX(DSS_DATA18, OMAP_MUX_MODE0 | OMAP_PIN_OUTPUT), | ||
580 | OMAP3_MUX(DSS_DATA19, OMAP_MUX_MODE0 | OMAP_PIN_OUTPUT), | ||
581 | OMAP3_MUX(DSS_DATA20, OMAP_MUX_MODE0 | OMAP_PIN_OUTPUT), | ||
582 | OMAP3_MUX(DSS_DATA21, OMAP_MUX_MODE0 | OMAP_PIN_OUTPUT), | ||
583 | OMAP3_MUX(DSS_DATA22, OMAP_MUX_MODE0 | OMAP_PIN_OUTPUT), | ||
584 | OMAP3_MUX(DSS_DATA23, OMAP_MUX_MODE0 | OMAP_PIN_OUTPUT), | ||
585 | /* TFP410 PanelBus DVI Transmitte (GPIO_170) */ | ||
586 | OMAP3_MUX(HDQ_SIO, OMAP_MUX_MODE4 | OMAP_PIN_OUTPUT), | ||
556 | /* SMSC9221 LAN Controller ETH IRQ (GPIO_176) */ | 587 | /* SMSC9221 LAN Controller ETH IRQ (GPIO_176) */ |
557 | OMAP3_MUX(MCSPI1_CS2, OMAP_MUX_MODE4 | OMAP_PIN_INPUT), | 588 | OMAP3_MUX(MCSPI1_CS2, OMAP_MUX_MODE4 | OMAP_PIN_INPUT), |
558 | { .reg_offset = OMAP_MUX_TERMINATOR }, | 589 | { .reg_offset = OMAP_MUX_TERMINATOR }, |
diff --git a/arch/arm/mach-omap2/board-rx51-video.c b/arch/arm/mach-omap2/board-rx51-video.c index bd74f9f6063b..bdd1e3a179e1 100644 --- a/arch/arm/mach-omap2/board-rx51-video.c +++ b/arch/arm/mach-omap2/board-rx51-video.c | |||
@@ -61,7 +61,7 @@ static struct omap_dss_board_info rx51_dss_board_info = { | |||
61 | 61 | ||
62 | static int __init rx51_video_init(void) | 62 | static int __init rx51_video_init(void) |
63 | { | 63 | { |
64 | if (!machine_is_nokia_rx51()) | 64 | if (!machine_is_nokia_rx51() && !of_machine_is_compatible("nokia,omap3-n900")) |
65 | return 0; | 65 | return 0; |
66 | 66 | ||
67 | if (omap_mux_init_gpio(RX51_LCD_RESET_GPIO, OMAP_PIN_OUTPUT)) { | 67 | if (omap_mux_init_gpio(RX51_LCD_RESET_GPIO, OMAP_PIN_OUTPUT)) { |
diff --git a/arch/arm/mach-omap2/devices.c b/arch/arm/mach-omap2/devices.c index aef96e45cb20..3c1279f27d1f 100644 --- a/arch/arm/mach-omap2/devices.c +++ b/arch/arm/mach-omap2/devices.c | |||
@@ -15,7 +15,6 @@ | |||
15 | #include <linux/io.h> | 15 | #include <linux/io.h> |
16 | #include <linux/clk.h> | 16 | #include <linux/clk.h> |
17 | #include <linux/err.h> | 17 | #include <linux/err.h> |
18 | #include <linux/gpio.h> | ||
19 | #include <linux/slab.h> | 18 | #include <linux/slab.h> |
20 | #include <linux/of.h> | 19 | #include <linux/of.h> |
21 | #include <linux/pinctrl/machine.h> | 20 | #include <linux/pinctrl/machine.h> |
@@ -66,7 +65,7 @@ static int __init omap3_l3_init(void) | |||
66 | 65 | ||
67 | WARN(IS_ERR(pdev), "could not build omap_device for %s\n", oh_name); | 66 | WARN(IS_ERR(pdev), "could not build omap_device for %s\n", oh_name); |
68 | 67 | ||
69 | return IS_ERR(pdev) ? PTR_ERR(pdev) : 0; | 68 | return PTR_RET(pdev); |
70 | } | 69 | } |
71 | omap_postcore_initcall(omap3_l3_init); | 70 | omap_postcore_initcall(omap3_l3_init); |
72 | 71 | ||
@@ -100,7 +99,7 @@ static int __init omap4_l3_init(void) | |||
100 | 99 | ||
101 | WARN(IS_ERR(pdev), "could not build omap_device for %s\n", oh_name); | 100 | WARN(IS_ERR(pdev), "could not build omap_device for %s\n", oh_name); |
102 | 101 | ||
103 | return IS_ERR(pdev) ? PTR_ERR(pdev) : 0; | 102 | return PTR_RET(pdev); |
104 | } | 103 | } |
105 | omap_postcore_initcall(omap4_l3_init); | 104 | omap_postcore_initcall(omap4_l3_init); |
106 | 105 | ||
diff --git a/arch/arm/mach-omap2/fb.c b/arch/arm/mach-omap2/fb.c index 190ae493c6ef..2ca33cc0c484 100644 --- a/arch/arm/mach-omap2/fb.c +++ b/arch/arm/mach-omap2/fb.c | |||
@@ -83,10 +83,7 @@ static int __init omap_init_vrfb(void) | |||
83 | pdev = platform_device_register_resndata(NULL, "omapvrfb", -1, | 83 | pdev = platform_device_register_resndata(NULL, "omapvrfb", -1, |
84 | res, num_res, NULL, 0); | 84 | res, num_res, NULL, 0); |
85 | 85 | ||
86 | if (IS_ERR(pdev)) | 86 | return PTR_RET(pdev); |
87 | return PTR_ERR(pdev); | ||
88 | else | ||
89 | return 0; | ||
90 | } | 87 | } |
91 | 88 | ||
92 | omap_arch_initcall(omap_init_vrfb); | 89 | omap_arch_initcall(omap_init_vrfb); |
diff --git a/arch/arm/mach-omap2/gpmc.c b/arch/arm/mach-omap2/gpmc.c index 1c7969e965d7..f3fdd6afa213 100644 --- a/arch/arm/mach-omap2/gpmc.c +++ b/arch/arm/mach-omap2/gpmc.c | |||
@@ -1734,7 +1734,7 @@ static int __init omap_gpmc_init(void) | |||
1734 | pdev = omap_device_build(DEVICE_NAME, -1, oh, NULL, 0); | 1734 | pdev = omap_device_build(DEVICE_NAME, -1, oh, NULL, 0); |
1735 | WARN(IS_ERR(pdev), "could not build omap_device for %s\n", oh_name); | 1735 | WARN(IS_ERR(pdev), "could not build omap_device for %s\n", oh_name); |
1736 | 1736 | ||
1737 | return IS_ERR(pdev) ? PTR_ERR(pdev) : 0; | 1737 | return PTR_RET(pdev); |
1738 | } | 1738 | } |
1739 | omap_postcore_initcall(omap_gpmc_init); | 1739 | omap_postcore_initcall(omap_gpmc_init); |
1740 | 1740 | ||
diff --git a/arch/arm/mach-omap2/omap_device.c b/arch/arm/mach-omap2/omap_device.c index 68be532f8688..5cc92874be7e 100644 --- a/arch/arm/mach-omap2/omap_device.c +++ b/arch/arm/mach-omap2/omap_device.c | |||
@@ -588,11 +588,6 @@ static int _od_runtime_suspend(struct device *dev) | |||
588 | return ret; | 588 | return ret; |
589 | } | 589 | } |
590 | 590 | ||
591 | static int _od_runtime_idle(struct device *dev) | ||
592 | { | ||
593 | return pm_generic_runtime_idle(dev); | ||
594 | } | ||
595 | |||
596 | static int _od_runtime_resume(struct device *dev) | 591 | static int _od_runtime_resume(struct device *dev) |
597 | { | 592 | { |
598 | struct platform_device *pdev = to_platform_device(dev); | 593 | struct platform_device *pdev = to_platform_device(dev); |
@@ -648,7 +643,7 @@ static int _od_resume_noirq(struct device *dev) | |||
648 | struct dev_pm_domain omap_device_pm_domain = { | 643 | struct dev_pm_domain omap_device_pm_domain = { |
649 | .ops = { | 644 | .ops = { |
650 | SET_RUNTIME_PM_OPS(_od_runtime_suspend, _od_runtime_resume, | 645 | SET_RUNTIME_PM_OPS(_od_runtime_suspend, _od_runtime_resume, |
651 | _od_runtime_idle) | 646 | NULL) |
652 | USE_PLATFORM_PM_SLEEP_OPS | 647 | USE_PLATFORM_PM_SLEEP_OPS |
653 | .suspend_noirq = _od_suspend_noirq, | 648 | .suspend_noirq = _od_suspend_noirq, |
654 | .resume_noirq = _od_resume_noirq, | 649 | .resume_noirq = _od_resume_noirq, |
diff --git a/arch/arm/mach-omap2/pmu.c b/arch/arm/mach-omap2/pmu.c index 9ace8eae7ee8..33c8846b4193 100644 --- a/arch/arm/mach-omap2/pmu.c +++ b/arch/arm/mach-omap2/pmu.c | |||
@@ -54,10 +54,7 @@ static int __init omap2_init_pmu(unsigned oh_num, char *oh_names[]) | |||
54 | WARN(IS_ERR(omap_pmu_dev), "Can't build omap_device for %s.\n", | 54 | WARN(IS_ERR(omap_pmu_dev), "Can't build omap_device for %s.\n", |
55 | dev_name); | 55 | dev_name); |
56 | 56 | ||
57 | if (IS_ERR(omap_pmu_dev)) | 57 | return PTR_RET(omap_pmu_dev); |
58 | return PTR_ERR(omap_pmu_dev); | ||
59 | |||
60 | return 0; | ||
61 | } | 58 | } |
62 | 59 | ||
63 | static int __init omap_init_pmu(void) | 60 | static int __init omap_init_pmu(void) |
diff --git a/arch/arm/mach-omap2/sleep44xx.S b/arch/arm/mach-omap2/sleep44xx.S index 88ff83a0942e..9086ce03ae12 100644 --- a/arch/arm/mach-omap2/sleep44xx.S +++ b/arch/arm/mach-omap2/sleep44xx.S | |||
@@ -34,6 +34,8 @@ ppa_zero_params: | |||
34 | ppa_por_params: | 34 | ppa_por_params: |
35 | .word 1, 0 | 35 | .word 1, 0 |
36 | 36 | ||
37 | #ifdef CONFIG_ARCH_OMAP4 | ||
38 | |||
37 | /* | 39 | /* |
38 | * ============================= | 40 | * ============================= |
39 | * == CPU suspend finisher == | 41 | * == CPU suspend finisher == |
@@ -326,7 +328,9 @@ skip_l2en: | |||
326 | 328 | ||
327 | b cpu_resume @ Jump to generic resume | 329 | b cpu_resume @ Jump to generic resume |
328 | ENDPROC(omap4_cpu_resume) | 330 | ENDPROC(omap4_cpu_resume) |
329 | #endif | 331 | #endif /* CONFIG_ARCH_OMAP4 */ |
332 | |||
333 | #endif /* defined(CONFIG_SMP) && defined(CONFIG_PM) */ | ||
330 | 334 | ||
331 | #ifndef CONFIG_OMAP4_ERRATA_I688 | 335 | #ifndef CONFIG_OMAP4_ERRATA_I688 |
332 | ENTRY(omap_bus_sync) | 336 | ENTRY(omap_bus_sync) |
diff --git a/arch/arm/mach-omap2/smartreflex-class3.c b/arch/arm/mach-omap2/smartreflex-class3.c index aee3c8940a30..7a42e1960c3b 100644 --- a/arch/arm/mach-omap2/smartreflex-class3.c +++ b/arch/arm/mach-omap2/smartreflex-class3.c | |||
@@ -26,14 +26,14 @@ static int sr_class3_enable(struct omap_sr *sr) | |||
26 | } | 26 | } |
27 | 27 | ||
28 | omap_vp_enable(sr->voltdm); | 28 | omap_vp_enable(sr->voltdm); |
29 | return sr_enable(sr->voltdm, volt); | 29 | return sr_enable(sr, volt); |
30 | } | 30 | } |
31 | 31 | ||
32 | static int sr_class3_disable(struct omap_sr *sr, int is_volt_reset) | 32 | static int sr_class3_disable(struct omap_sr *sr, int is_volt_reset) |
33 | { | 33 | { |
34 | sr_disable_errgen(sr->voltdm); | 34 | sr_disable_errgen(sr); |
35 | omap_vp_disable(sr->voltdm); | 35 | omap_vp_disable(sr->voltdm); |
36 | sr_disable(sr->voltdm); | 36 | sr_disable(sr); |
37 | if (is_volt_reset) | 37 | if (is_volt_reset) |
38 | voltdm_reset(sr->voltdm); | 38 | voltdm_reset(sr->voltdm); |
39 | 39 | ||
@@ -42,7 +42,7 @@ static int sr_class3_disable(struct omap_sr *sr, int is_volt_reset) | |||
42 | 42 | ||
43 | static int sr_class3_configure(struct omap_sr *sr) | 43 | static int sr_class3_configure(struct omap_sr *sr) |
44 | { | 44 | { |
45 | return sr_configure_errgen(sr->voltdm); | 45 | return sr_configure_errgen(sr); |
46 | } | 46 | } |
47 | 47 | ||
48 | /* SR class3 structure */ | 48 | /* SR class3 structure */ |
diff --git a/arch/arm/mach-omap2/timer.c b/arch/arm/mach-omap2/timer.c index 3bdb0fb02028..5f148e721790 100644 --- a/arch/arm/mach-omap2/timer.c +++ b/arch/arm/mach-omap2/timer.c | |||
@@ -220,7 +220,7 @@ static int __init omap_dm_timer_init_one(struct omap_dm_timer *timer, | |||
220 | int posted) | 220 | int posted) |
221 | { | 221 | { |
222 | char name[10]; /* 10 = sizeof("gptXX_Xck0") */ | 222 | char name[10]; /* 10 = sizeof("gptXX_Xck0") */ |
223 | const char *oh_name; | 223 | const char *oh_name = NULL; |
224 | struct device_node *np; | 224 | struct device_node *np; |
225 | struct omap_hwmod *oh; | 225 | struct omap_hwmod *oh; |
226 | struct resource irq, mem; | 226 | struct resource irq, mem; |
diff --git a/arch/arm/mach-pxa/Kconfig b/arch/arm/mach-pxa/Kconfig index 96100dbf5a2e..a8427115ee07 100644 --- a/arch/arm/mach-pxa/Kconfig +++ b/arch/arm/mach-pxa/Kconfig | |||
@@ -615,12 +615,14 @@ endmenu | |||
615 | config PXA25x | 615 | config PXA25x |
616 | bool | 616 | bool |
617 | select CPU_XSCALE | 617 | select CPU_XSCALE |
618 | select CPU_FREQ_TABLE if CPU_FREQ | ||
618 | help | 619 | help |
619 | Select code specific to PXA21x/25x/26x variants | 620 | Select code specific to PXA21x/25x/26x variants |
620 | 621 | ||
621 | config PXA27x | 622 | config PXA27x |
622 | bool | 623 | bool |
623 | select CPU_XSCALE | 624 | select CPU_XSCALE |
625 | select CPU_FREQ_TABLE if CPU_FREQ | ||
624 | help | 626 | help |
625 | Select code specific to PXA27x variants | 627 | Select code specific to PXA27x variants |
626 | 628 | ||
@@ -633,6 +635,7 @@ config CPU_PXA26x | |||
633 | config PXA3xx | 635 | config PXA3xx |
634 | bool | 636 | bool |
635 | select CPU_XSC3 | 637 | select CPU_XSC3 |
638 | select CPU_FREQ_TABLE if CPU_FREQ | ||
636 | help | 639 | help |
637 | Select code specific to PXA3xx variants | 640 | Select code specific to PXA3xx variants |
638 | 641 | ||
diff --git a/arch/arm/mach-s3c24xx/cpufreq-utils.c b/arch/arm/mach-s3c24xx/cpufreq-utils.c index ddd8280e3875..2a0aa5684e72 100644 --- a/arch/arm/mach-s3c24xx/cpufreq-utils.c +++ b/arch/arm/mach-s3c24xx/cpufreq-utils.c | |||
@@ -60,5 +60,5 @@ void s3c2410_cpufreq_setrefresh(struct s3c_cpufreq_config *cfg) | |||
60 | */ | 60 | */ |
61 | void s3c2410_set_fvco(struct s3c_cpufreq_config *cfg) | 61 | void s3c2410_set_fvco(struct s3c_cpufreq_config *cfg) |
62 | { | 62 | { |
63 | __raw_writel(cfg->pll.index, S3C2410_MPLLCON); | 63 | __raw_writel(cfg->pll.driver_data, S3C2410_MPLLCON); |
64 | } | 64 | } |
diff --git a/arch/arm/mach-s3c24xx/pll-s3c2410.c b/arch/arm/mach-s3c24xx/pll-s3c2410.c index dcf3420a3271..5e37d368594b 100644 --- a/arch/arm/mach-s3c24xx/pll-s3c2410.c +++ b/arch/arm/mach-s3c24xx/pll-s3c2410.c | |||
@@ -33,36 +33,36 @@ | |||
33 | #include <plat/cpu-freq-core.h> | 33 | #include <plat/cpu-freq-core.h> |
34 | 34 | ||
35 | static struct cpufreq_frequency_table pll_vals_12MHz[] = { | 35 | static struct cpufreq_frequency_table pll_vals_12MHz[] = { |
36 | { .frequency = 34000000, .index = PLLVAL(82, 2, 3), }, | 36 | { .frequency = 34000000, .driver_data = PLLVAL(82, 2, 3), }, |
37 | { .frequency = 45000000, .index = PLLVAL(82, 1, 3), }, | 37 | { .frequency = 45000000, .driver_data = PLLVAL(82, 1, 3), }, |
38 | { .frequency = 51000000, .index = PLLVAL(161, 3, 3), }, | 38 | { .frequency = 51000000, .driver_data = PLLVAL(161, 3, 3), }, |
39 | { .frequency = 48000000, .index = PLLVAL(120, 2, 3), }, | 39 | { .frequency = 48000000, .driver_data = PLLVAL(120, 2, 3), }, |
40 | { .frequency = 56000000, .index = PLLVAL(142, 2, 3), }, | 40 | { .frequency = 56000000, .driver_data = PLLVAL(142, 2, 3), }, |
41 | { .frequency = 68000000, .index = PLLVAL(82, 2, 2), }, | 41 | { .frequency = 68000000, .driver_data = PLLVAL(82, 2, 2), }, |
42 | { .frequency = 79000000, .index = PLLVAL(71, 1, 2), }, | 42 | { .frequency = 79000000, .driver_data = PLLVAL(71, 1, 2), }, |
43 | { .frequency = 85000000, .index = PLLVAL(105, 2, 2), }, | 43 | { .frequency = 85000000, .driver_data = PLLVAL(105, 2, 2), }, |
44 | { .frequency = 90000000, .index = PLLVAL(112, 2, 2), }, | 44 | { .frequency = 90000000, .driver_data = PLLVAL(112, 2, 2), }, |
45 | { .frequency = 101000000, .index = PLLVAL(127, 2, 2), }, | 45 | { .frequency = 101000000, .driver_data = PLLVAL(127, 2, 2), }, |
46 | { .frequency = 113000000, .index = PLLVAL(105, 1, 2), }, | 46 | { .frequency = 113000000, .driver_data = PLLVAL(105, 1, 2), }, |
47 | { .frequency = 118000000, .index = PLLVAL(150, 2, 2), }, | 47 | { .frequency = 118000000, .driver_data = PLLVAL(150, 2, 2), }, |
48 | { .frequency = 124000000, .index = PLLVAL(116, 1, 2), }, | 48 | { .frequency = 124000000, .driver_data = PLLVAL(116, 1, 2), }, |
49 | { .frequency = 135000000, .index = PLLVAL(82, 2, 1), }, | 49 | { .frequency = 135000000, .driver_data = PLLVAL(82, 2, 1), }, |
50 | { .frequency = 147000000, .index = PLLVAL(90, 2, 1), }, | 50 | { .frequency = 147000000, .driver_data = PLLVAL(90, 2, 1), }, |
51 | { .frequency = 152000000, .index = PLLVAL(68, 1, 1), }, | 51 | { .frequency = 152000000, .driver_data = PLLVAL(68, 1, 1), }, |
52 | { .frequency = 158000000, .index = PLLVAL(71, 1, 1), }, | 52 | { .frequency = 158000000, .driver_data = PLLVAL(71, 1, 1), }, |
53 | { .frequency = 170000000, .index = PLLVAL(77, 1, 1), }, | 53 | { .frequency = 170000000, .driver_data = PLLVAL(77, 1, 1), }, |
54 | { .frequency = 180000000, .index = PLLVAL(82, 1, 1), }, | 54 | { .frequency = 180000000, .driver_data = PLLVAL(82, 1, 1), }, |
55 | { .frequency = 186000000, .index = PLLVAL(85, 1, 1), }, | 55 | { .frequency = 186000000, .driver_data = PLLVAL(85, 1, 1), }, |
56 | { .frequency = 192000000, .index = PLLVAL(88, 1, 1), }, | 56 | { .frequency = 192000000, .driver_data = PLLVAL(88, 1, 1), }, |
57 | { .frequency = 203000000, .index = PLLVAL(161, 3, 1), }, | 57 | { .frequency = 203000000, .driver_data = PLLVAL(161, 3, 1), }, |
58 | 58 | ||
59 | /* 2410A extras */ | 59 | /* 2410A extras */ |
60 | 60 | ||
61 | { .frequency = 210000000, .index = PLLVAL(132, 2, 1), }, | 61 | { .frequency = 210000000, .driver_data = PLLVAL(132, 2, 1), }, |
62 | { .frequency = 226000000, .index = PLLVAL(105, 1, 1), }, | 62 | { .frequency = 226000000, .driver_data = PLLVAL(105, 1, 1), }, |
63 | { .frequency = 266000000, .index = PLLVAL(125, 1, 1), }, | 63 | { .frequency = 266000000, .driver_data = PLLVAL(125, 1, 1), }, |
64 | { .frequency = 268000000, .index = PLLVAL(126, 1, 1), }, | 64 | { .frequency = 268000000, .driver_data = PLLVAL(126, 1, 1), }, |
65 | { .frequency = 270000000, .index = PLLVAL(127, 1, 1), }, | 65 | { .frequency = 270000000, .driver_data = PLLVAL(127, 1, 1), }, |
66 | }; | 66 | }; |
67 | 67 | ||
68 | static int s3c2410_plls_add(struct device *dev, struct subsys_interface *sif) | 68 | static int s3c2410_plls_add(struct device *dev, struct subsys_interface *sif) |
diff --git a/arch/arm/mach-s3c24xx/pll-s3c2440-12000000.c b/arch/arm/mach-s3c24xx/pll-s3c2440-12000000.c index 673781758319..a19460e6e7b0 100644 --- a/arch/arm/mach-s3c24xx/pll-s3c2440-12000000.c +++ b/arch/arm/mach-s3c24xx/pll-s3c2440-12000000.c | |||
@@ -21,33 +21,33 @@ | |||
21 | #include <plat/cpu-freq-core.h> | 21 | #include <plat/cpu-freq-core.h> |
22 | 22 | ||
23 | static struct cpufreq_frequency_table s3c2440_plls_12[] __initdata = { | 23 | static struct cpufreq_frequency_table s3c2440_plls_12[] __initdata = { |
24 | { .frequency = 75000000, .index = PLLVAL(0x75, 3, 3), }, /* FVco 600.000000 */ | 24 | { .frequency = 75000000, .driver_data = PLLVAL(0x75, 3, 3), }, /* FVco 600.000000 */ |
25 | { .frequency = 80000000, .index = PLLVAL(0x98, 4, 3), }, /* FVco 640.000000 */ | 25 | { .frequency = 80000000, .driver_data = PLLVAL(0x98, 4, 3), }, /* FVco 640.000000 */ |
26 | { .frequency = 90000000, .index = PLLVAL(0x70, 2, 3), }, /* FVco 720.000000 */ | 26 | { .frequency = 90000000, .driver_data = PLLVAL(0x70, 2, 3), }, /* FVco 720.000000 */ |
27 | { .frequency = 100000000, .index = PLLVAL(0x5c, 1, 3), }, /* FVco 800.000000 */ | 27 | { .frequency = 100000000, .driver_data = PLLVAL(0x5c, 1, 3), }, /* FVco 800.000000 */ |
28 | { .frequency = 110000000, .index = PLLVAL(0x66, 1, 3), }, /* FVco 880.000000 */ | 28 | { .frequency = 110000000, .driver_data = PLLVAL(0x66, 1, 3), }, /* FVco 880.000000 */ |
29 | { .frequency = 120000000, .index = PLLVAL(0x70, 1, 3), }, /* FVco 960.000000 */ | 29 | { .frequency = 120000000, .driver_data = PLLVAL(0x70, 1, 3), }, /* FVco 960.000000 */ |
30 | { .frequency = 150000000, .index = PLLVAL(0x75, 3, 2), }, /* FVco 600.000000 */ | 30 | { .frequency = 150000000, .driver_data = PLLVAL(0x75, 3, 2), }, /* FVco 600.000000 */ |
31 | { .frequency = 160000000, .index = PLLVAL(0x98, 4, 2), }, /* FVco 640.000000 */ | 31 | { .frequency = 160000000, .driver_data = PLLVAL(0x98, 4, 2), }, /* FVco 640.000000 */ |
32 | { .frequency = 170000000, .index = PLLVAL(0x4d, 1, 2), }, /* FVco 680.000000 */ | 32 | { .frequency = 170000000, .driver_data = PLLVAL(0x4d, 1, 2), }, /* FVco 680.000000 */ |
33 | { .frequency = 180000000, .index = PLLVAL(0x70, 2, 2), }, /* FVco 720.000000 */ | 33 | { .frequency = 180000000, .driver_data = PLLVAL(0x70, 2, 2), }, /* FVco 720.000000 */ |
34 | { .frequency = 190000000, .index = PLLVAL(0x57, 1, 2), }, /* FVco 760.000000 */ | 34 | { .frequency = 190000000, .driver_data = PLLVAL(0x57, 1, 2), }, /* FVco 760.000000 */ |
35 | { .frequency = 200000000, .index = PLLVAL(0x5c, 1, 2), }, /* FVco 800.000000 */ | 35 | { .frequency = 200000000, .driver_data = PLLVAL(0x5c, 1, 2), }, /* FVco 800.000000 */ |
36 | { .frequency = 210000000, .index = PLLVAL(0x84, 2, 2), }, /* FVco 840.000000 */ | 36 | { .frequency = 210000000, .driver_data = PLLVAL(0x84, 2, 2), }, /* FVco 840.000000 */ |
37 | { .frequency = 220000000, .index = PLLVAL(0x66, 1, 2), }, /* FVco 880.000000 */ | 37 | { .frequency = 220000000, .driver_data = PLLVAL(0x66, 1, 2), }, /* FVco 880.000000 */ |
38 | { .frequency = 230000000, .index = PLLVAL(0x6b, 1, 2), }, /* FVco 920.000000 */ | 38 | { .frequency = 230000000, .driver_data = PLLVAL(0x6b, 1, 2), }, /* FVco 920.000000 */ |
39 | { .frequency = 240000000, .index = PLLVAL(0x70, 1, 2), }, /* FVco 960.000000 */ | 39 | { .frequency = 240000000, .driver_data = PLLVAL(0x70, 1, 2), }, /* FVco 960.000000 */ |
40 | { .frequency = 300000000, .index = PLLVAL(0x75, 3, 1), }, /* FVco 600.000000 */ | 40 | { .frequency = 300000000, .driver_data = PLLVAL(0x75, 3, 1), }, /* FVco 600.000000 */ |
41 | { .frequency = 310000000, .index = PLLVAL(0x93, 4, 1), }, /* FVco 620.000000 */ | 41 | { .frequency = 310000000, .driver_data = PLLVAL(0x93, 4, 1), }, /* FVco 620.000000 */ |
42 | { .frequency = 320000000, .index = PLLVAL(0x98, 4, 1), }, /* FVco 640.000000 */ | 42 | { .frequency = 320000000, .driver_data = PLLVAL(0x98, 4, 1), }, /* FVco 640.000000 */ |
43 | { .frequency = 330000000, .index = PLLVAL(0x66, 2, 1), }, /* FVco 660.000000 */ | 43 | { .frequency = 330000000, .driver_data = PLLVAL(0x66, 2, 1), }, /* FVco 660.000000 */ |
44 | { .frequency = 340000000, .index = PLLVAL(0x4d, 1, 1), }, /* FVco 680.000000 */ | 44 | { .frequency = 340000000, .driver_data = PLLVAL(0x4d, 1, 1), }, /* FVco 680.000000 */ |
45 | { .frequency = 350000000, .index = PLLVAL(0xa7, 4, 1), }, /* FVco 700.000000 */ | 45 | { .frequency = 350000000, .driver_data = PLLVAL(0xa7, 4, 1), }, /* FVco 700.000000 */ |
46 | { .frequency = 360000000, .index = PLLVAL(0x70, 2, 1), }, /* FVco 720.000000 */ | 46 | { .frequency = 360000000, .driver_data = PLLVAL(0x70, 2, 1), }, /* FVco 720.000000 */ |
47 | { .frequency = 370000000, .index = PLLVAL(0xb1, 4, 1), }, /* FVco 740.000000 */ | 47 | { .frequency = 370000000, .driver_data = PLLVAL(0xb1, 4, 1), }, /* FVco 740.000000 */ |
48 | { .frequency = 380000000, .index = PLLVAL(0x57, 1, 1), }, /* FVco 760.000000 */ | 48 | { .frequency = 380000000, .driver_data = PLLVAL(0x57, 1, 1), }, /* FVco 760.000000 */ |
49 | { .frequency = 390000000, .index = PLLVAL(0x7a, 2, 1), }, /* FVco 780.000000 */ | 49 | { .frequency = 390000000, .driver_data = PLLVAL(0x7a, 2, 1), }, /* FVco 780.000000 */ |
50 | { .frequency = 400000000, .index = PLLVAL(0x5c, 1, 1), }, /* FVco 800.000000 */ | 50 | { .frequency = 400000000, .driver_data = PLLVAL(0x5c, 1, 1), }, /* FVco 800.000000 */ |
51 | }; | 51 | }; |
52 | 52 | ||
53 | static int s3c2440_plls12_add(struct device *dev, struct subsys_interface *sif) | 53 | static int s3c2440_plls12_add(struct device *dev, struct subsys_interface *sif) |
diff --git a/arch/arm/mach-s3c24xx/pll-s3c2440-16934400.c b/arch/arm/mach-s3c24xx/pll-s3c2440-16934400.c index debfa106289b..1191b2905625 100644 --- a/arch/arm/mach-s3c24xx/pll-s3c2440-16934400.c +++ b/arch/arm/mach-s3c24xx/pll-s3c2440-16934400.c | |||
@@ -21,61 +21,61 @@ | |||
21 | #include <plat/cpu-freq-core.h> | 21 | #include <plat/cpu-freq-core.h> |
22 | 22 | ||
23 | static struct cpufreq_frequency_table s3c2440_plls_169344[] __initdata = { | 23 | static struct cpufreq_frequency_table s3c2440_plls_169344[] __initdata = { |
24 | { .frequency = 78019200, .index = PLLVAL(121, 5, 3), }, /* FVco 624.153600 */ | 24 | { .frequency = 78019200, .driver_data = PLLVAL(121, 5, 3), }, /* FVco 624.153600 */ |
25 | { .frequency = 84067200, .index = PLLVAL(131, 5, 3), }, /* FVco 672.537600 */ | 25 | { .frequency = 84067200, .driver_data = PLLVAL(131, 5, 3), }, /* FVco 672.537600 */ |
26 | { .frequency = 90115200, .index = PLLVAL(141, 5, 3), }, /* FVco 720.921600 */ | 26 | { .frequency = 90115200, .driver_data = PLLVAL(141, 5, 3), }, /* FVco 720.921600 */ |
27 | { .frequency = 96163200, .index = PLLVAL(151, 5, 3), }, /* FVco 769.305600 */ | 27 | { .frequency = 96163200, .driver_data = PLLVAL(151, 5, 3), }, /* FVco 769.305600 */ |
28 | { .frequency = 102135600, .index = PLLVAL(185, 6, 3), }, /* FVco 817.084800 */ | 28 | { .frequency = 102135600, .driver_data = PLLVAL(185, 6, 3), }, /* FVco 817.084800 */ |
29 | { .frequency = 108259200, .index = PLLVAL(171, 5, 3), }, /* FVco 866.073600 */ | 29 | { .frequency = 108259200, .driver_data = PLLVAL(171, 5, 3), }, /* FVco 866.073600 */ |
30 | { .frequency = 114307200, .index = PLLVAL(127, 3, 3), }, /* FVco 914.457600 */ | 30 | { .frequency = 114307200, .driver_data = PLLVAL(127, 3, 3), }, /* FVco 914.457600 */ |
31 | { .frequency = 120234240, .index = PLLVAL(134, 3, 3), }, /* FVco 961.873920 */ | 31 | { .frequency = 120234240, .driver_data = PLLVAL(134, 3, 3), }, /* FVco 961.873920 */ |
32 | { .frequency = 126161280, .index = PLLVAL(141, 3, 3), }, /* FVco 1009.290240 */ | 32 | { .frequency = 126161280, .driver_data = PLLVAL(141, 3, 3), }, /* FVco 1009.290240 */ |
33 | { .frequency = 132088320, .index = PLLVAL(148, 3, 3), }, /* FVco 1056.706560 */ | 33 | { .frequency = 132088320, .driver_data = PLLVAL(148, 3, 3), }, /* FVco 1056.706560 */ |
34 | { .frequency = 138015360, .index = PLLVAL(155, 3, 3), }, /* FVco 1104.122880 */ | 34 | { .frequency = 138015360, .driver_data = PLLVAL(155, 3, 3), }, /* FVco 1104.122880 */ |
35 | { .frequency = 144789120, .index = PLLVAL(163, 3, 3), }, /* FVco 1158.312960 */ | 35 | { .frequency = 144789120, .driver_data = PLLVAL(163, 3, 3), }, /* FVco 1158.312960 */ |
36 | { .frequency = 150100363, .index = PLLVAL(187, 9, 2), }, /* FVco 600.401454 */ | 36 | { .frequency = 150100363, .driver_data = PLLVAL(187, 9, 2), }, /* FVco 600.401454 */ |
37 | { .frequency = 156038400, .index = PLLVAL(121, 5, 2), }, /* FVco 624.153600 */ | 37 | { .frequency = 156038400, .driver_data = PLLVAL(121, 5, 2), }, /* FVco 624.153600 */ |
38 | { .frequency = 162086400, .index = PLLVAL(126, 5, 2), }, /* FVco 648.345600 */ | 38 | { .frequency = 162086400, .driver_data = PLLVAL(126, 5, 2), }, /* FVco 648.345600 */ |
39 | { .frequency = 168134400, .index = PLLVAL(131, 5, 2), }, /* FVco 672.537600 */ | 39 | { .frequency = 168134400, .driver_data = PLLVAL(131, 5, 2), }, /* FVco 672.537600 */ |
40 | { .frequency = 174048000, .index = PLLVAL(177, 7, 2), }, /* FVco 696.192000 */ | 40 | { .frequency = 174048000, .driver_data = PLLVAL(177, 7, 2), }, /* FVco 696.192000 */ |
41 | { .frequency = 180230400, .index = PLLVAL(141, 5, 2), }, /* FVco 720.921600 */ | 41 | { .frequency = 180230400, .driver_data = PLLVAL(141, 5, 2), }, /* FVco 720.921600 */ |
42 | { .frequency = 186278400, .index = PLLVAL(124, 4, 2), }, /* FVco 745.113600 */ | 42 | { .frequency = 186278400, .driver_data = PLLVAL(124, 4, 2), }, /* FVco 745.113600 */ |
43 | { .frequency = 192326400, .index = PLLVAL(151, 5, 2), }, /* FVco 769.305600 */ | 43 | { .frequency = 192326400, .driver_data = PLLVAL(151, 5, 2), }, /* FVco 769.305600 */ |
44 | { .frequency = 198132480, .index = PLLVAL(109, 3, 2), }, /* FVco 792.529920 */ | 44 | { .frequency = 198132480, .driver_data = PLLVAL(109, 3, 2), }, /* FVco 792.529920 */ |
45 | { .frequency = 204271200, .index = PLLVAL(185, 6, 2), }, /* FVco 817.084800 */ | 45 | { .frequency = 204271200, .driver_data = PLLVAL(185, 6, 2), }, /* FVco 817.084800 */ |
46 | { .frequency = 210268800, .index = PLLVAL(141, 4, 2), }, /* FVco 841.075200 */ | 46 | { .frequency = 210268800, .driver_data = PLLVAL(141, 4, 2), }, /* FVco 841.075200 */ |
47 | { .frequency = 216518400, .index = PLLVAL(171, 5, 2), }, /* FVco 866.073600 */ | 47 | { .frequency = 216518400, .driver_data = PLLVAL(171, 5, 2), }, /* FVco 866.073600 */ |
48 | { .frequency = 222264000, .index = PLLVAL(97, 2, 2), }, /* FVco 889.056000 */ | 48 | { .frequency = 222264000, .driver_data = PLLVAL(97, 2, 2), }, /* FVco 889.056000 */ |
49 | { .frequency = 228614400, .index = PLLVAL(127, 3, 2), }, /* FVco 914.457600 */ | 49 | { .frequency = 228614400, .driver_data = PLLVAL(127, 3, 2), }, /* FVco 914.457600 */ |
50 | { .frequency = 234259200, .index = PLLVAL(158, 4, 2), }, /* FVco 937.036800 */ | 50 | { .frequency = 234259200, .driver_data = PLLVAL(158, 4, 2), }, /* FVco 937.036800 */ |
51 | { .frequency = 240468480, .index = PLLVAL(134, 3, 2), }, /* FVco 961.873920 */ | 51 | { .frequency = 240468480, .driver_data = PLLVAL(134, 3, 2), }, /* FVco 961.873920 */ |
52 | { .frequency = 246960000, .index = PLLVAL(167, 4, 2), }, /* FVco 987.840000 */ | 52 | { .frequency = 246960000, .driver_data = PLLVAL(167, 4, 2), }, /* FVco 987.840000 */ |
53 | { .frequency = 252322560, .index = PLLVAL(141, 3, 2), }, /* FVco 1009.290240 */ | 53 | { .frequency = 252322560, .driver_data = PLLVAL(141, 3, 2), }, /* FVco 1009.290240 */ |
54 | { .frequency = 258249600, .index = PLLVAL(114, 2, 2), }, /* FVco 1032.998400 */ | 54 | { .frequency = 258249600, .driver_data = PLLVAL(114, 2, 2), }, /* FVco 1032.998400 */ |
55 | { .frequency = 264176640, .index = PLLVAL(148, 3, 2), }, /* FVco 1056.706560 */ | 55 | { .frequency = 264176640, .driver_data = PLLVAL(148, 3, 2), }, /* FVco 1056.706560 */ |
56 | { .frequency = 270950400, .index = PLLVAL(120, 2, 2), }, /* FVco 1083.801600 */ | 56 | { .frequency = 270950400, .driver_data = PLLVAL(120, 2, 2), }, /* FVco 1083.801600 */ |
57 | { .frequency = 276030720, .index = PLLVAL(155, 3, 2), }, /* FVco 1104.122880 */ | 57 | { .frequency = 276030720, .driver_data = PLLVAL(155, 3, 2), }, /* FVco 1104.122880 */ |
58 | { .frequency = 282240000, .index = PLLVAL(92, 1, 2), }, /* FVco 1128.960000 */ | 58 | { .frequency = 282240000, .driver_data = PLLVAL(92, 1, 2), }, /* FVco 1128.960000 */ |
59 | { .frequency = 289578240, .index = PLLVAL(163, 3, 2), }, /* FVco 1158.312960 */ | 59 | { .frequency = 289578240, .driver_data = PLLVAL(163, 3, 2), }, /* FVco 1158.312960 */ |
60 | { .frequency = 294235200, .index = PLLVAL(131, 2, 2), }, /* FVco 1176.940800 */ | 60 | { .frequency = 294235200, .driver_data = PLLVAL(131, 2, 2), }, /* FVco 1176.940800 */ |
61 | { .frequency = 300200727, .index = PLLVAL(187, 9, 1), }, /* FVco 600.401454 */ | 61 | { .frequency = 300200727, .driver_data = PLLVAL(187, 9, 1), }, /* FVco 600.401454 */ |
62 | { .frequency = 306358690, .index = PLLVAL(191, 9, 1), }, /* FVco 612.717380 */ | 62 | { .frequency = 306358690, .driver_data = PLLVAL(191, 9, 1), }, /* FVco 612.717380 */ |
63 | { .frequency = 312076800, .index = PLLVAL(121, 5, 1), }, /* FVco 624.153600 */ | 63 | { .frequency = 312076800, .driver_data = PLLVAL(121, 5, 1), }, /* FVco 624.153600 */ |
64 | { .frequency = 318366720, .index = PLLVAL(86, 3, 1), }, /* FVco 636.733440 */ | 64 | { .frequency = 318366720, .driver_data = PLLVAL(86, 3, 1), }, /* FVco 636.733440 */ |
65 | { .frequency = 324172800, .index = PLLVAL(126, 5, 1), }, /* FVco 648.345600 */ | 65 | { .frequency = 324172800, .driver_data = PLLVAL(126, 5, 1), }, /* FVco 648.345600 */ |
66 | { .frequency = 330220800, .index = PLLVAL(109, 4, 1), }, /* FVco 660.441600 */ | 66 | { .frequency = 330220800, .driver_data = PLLVAL(109, 4, 1), }, /* FVco 660.441600 */ |
67 | { .frequency = 336268800, .index = PLLVAL(131, 5, 1), }, /* FVco 672.537600 */ | 67 | { .frequency = 336268800, .driver_data = PLLVAL(131, 5, 1), }, /* FVco 672.537600 */ |
68 | { .frequency = 342074880, .index = PLLVAL(93, 3, 1), }, /* FVco 684.149760 */ | 68 | { .frequency = 342074880, .driver_data = PLLVAL(93, 3, 1), }, /* FVco 684.149760 */ |
69 | { .frequency = 348096000, .index = PLLVAL(177, 7, 1), }, /* FVco 696.192000 */ | 69 | { .frequency = 348096000, .driver_data = PLLVAL(177, 7, 1), }, /* FVco 696.192000 */ |
70 | { .frequency = 355622400, .index = PLLVAL(118, 4, 1), }, /* FVco 711.244800 */ | 70 | { .frequency = 355622400, .driver_data = PLLVAL(118, 4, 1), }, /* FVco 711.244800 */ |
71 | { .frequency = 360460800, .index = PLLVAL(141, 5, 1), }, /* FVco 720.921600 */ | 71 | { .frequency = 360460800, .driver_data = PLLVAL(141, 5, 1), }, /* FVco 720.921600 */ |
72 | { .frequency = 366206400, .index = PLLVAL(165, 6, 1), }, /* FVco 732.412800 */ | 72 | { .frequency = 366206400, .driver_data = PLLVAL(165, 6, 1), }, /* FVco 732.412800 */ |
73 | { .frequency = 372556800, .index = PLLVAL(124, 4, 1), }, /* FVco 745.113600 */ | 73 | { .frequency = 372556800, .driver_data = PLLVAL(124, 4, 1), }, /* FVco 745.113600 */ |
74 | { .frequency = 378201600, .index = PLLVAL(126, 4, 1), }, /* FVco 756.403200 */ | 74 | { .frequency = 378201600, .driver_data = PLLVAL(126, 4, 1), }, /* FVco 756.403200 */ |
75 | { .frequency = 384652800, .index = PLLVAL(151, 5, 1), }, /* FVco 769.305600 */ | 75 | { .frequency = 384652800, .driver_data = PLLVAL(151, 5, 1), }, /* FVco 769.305600 */ |
76 | { .frequency = 391608000, .index = PLLVAL(177, 6, 1), }, /* FVco 783.216000 */ | 76 | { .frequency = 391608000, .driver_data = PLLVAL(177, 6, 1), }, /* FVco 783.216000 */ |
77 | { .frequency = 396264960, .index = PLLVAL(109, 3, 1), }, /* FVco 792.529920 */ | 77 | { .frequency = 396264960, .driver_data = PLLVAL(109, 3, 1), }, /* FVco 792.529920 */ |
78 | { .frequency = 402192000, .index = PLLVAL(87, 2, 1), }, /* FVco 804.384000 */ | 78 | { .frequency = 402192000, .driver_data = PLLVAL(87, 2, 1), }, /* FVco 804.384000 */ |
79 | }; | 79 | }; |
80 | 80 | ||
81 | static int s3c2440_plls169344_add(struct device *dev, | 81 | static int s3c2440_plls169344_add(struct device *dev, |
diff --git a/arch/arm/mach-shmobile/Kconfig b/arch/arm/mach-shmobile/Kconfig index db27e8eef192..3912ce91fee4 100644 --- a/arch/arm/mach-shmobile/Kconfig +++ b/arch/arm/mach-shmobile/Kconfig | |||
@@ -23,7 +23,7 @@ config ARCH_R8A73A4 | |||
23 | select ARCH_WANT_OPTIONAL_GPIOLIB | 23 | select ARCH_WANT_OPTIONAL_GPIOLIB |
24 | select ARM_GIC | 24 | select ARM_GIC |
25 | select CPU_V7 | 25 | select CPU_V7 |
26 | select ARM_ARCH_TIMER | 26 | select HAVE_ARM_ARCH_TIMER |
27 | select SH_CLK_CPG | 27 | select SH_CLK_CPG |
28 | select RENESAS_IRQC | 28 | select RENESAS_IRQC |
29 | 29 | ||
@@ -59,7 +59,7 @@ config ARCH_R8A7790 | |||
59 | select ARCH_WANT_OPTIONAL_GPIOLIB | 59 | select ARCH_WANT_OPTIONAL_GPIOLIB |
60 | select ARM_GIC | 60 | select ARM_GIC |
61 | select CPU_V7 | 61 | select CPU_V7 |
62 | select ARM_ARCH_TIMER | 62 | select HAVE_ARM_ARCH_TIMER |
63 | select SH_CLK_CPG | 63 | select SH_CLK_CPG |
64 | select RENESAS_IRQC | 64 | select RENESAS_IRQC |
65 | 65 | ||
diff --git a/arch/arm/mach-shmobile/clock-sh7372.c b/arch/arm/mach-shmobile/clock-sh7372.c index 7e105932c09d..5390c6bbbc02 100644 --- a/arch/arm/mach-shmobile/clock-sh7372.c +++ b/arch/arm/mach-shmobile/clock-sh7372.c | |||
@@ -142,15 +142,15 @@ static void pllc2_table_rebuild(struct clk *clk) | |||
142 | /* Initialise PLLC2 frequency table */ | 142 | /* Initialise PLLC2 frequency table */ |
143 | for (i = 0; i < ARRAY_SIZE(pllc2_freq_table) - 2; i++) { | 143 | for (i = 0; i < ARRAY_SIZE(pllc2_freq_table) - 2; i++) { |
144 | pllc2_freq_table[i].frequency = clk->parent->rate * (i + 20) * 2; | 144 | pllc2_freq_table[i].frequency = clk->parent->rate * (i + 20) * 2; |
145 | pllc2_freq_table[i].index = i; | 145 | pllc2_freq_table[i].driver_data = i; |
146 | } | 146 | } |
147 | 147 | ||
148 | /* This is a special entry - switching PLL off makes it a repeater */ | 148 | /* This is a special entry - switching PLL off makes it a repeater */ |
149 | pllc2_freq_table[i].frequency = clk->parent->rate; | 149 | pllc2_freq_table[i].frequency = clk->parent->rate; |
150 | pllc2_freq_table[i].index = i; | 150 | pllc2_freq_table[i].driver_data = i; |
151 | 151 | ||
152 | pllc2_freq_table[++i].frequency = CPUFREQ_TABLE_END; | 152 | pllc2_freq_table[++i].frequency = CPUFREQ_TABLE_END; |
153 | pllc2_freq_table[i].index = i; | 153 | pllc2_freq_table[i].driver_data = i; |
154 | } | 154 | } |
155 | 155 | ||
156 | static unsigned long pllc2_recalc(struct clk *clk) | 156 | static unsigned long pllc2_recalc(struct clk *clk) |
diff --git a/arch/arm/mach-tegra/Kconfig b/arch/arm/mach-tegra/Kconfig index 84d72fc36dfe..ef3a8da49b2d 100644 --- a/arch/arm/mach-tegra/Kconfig +++ b/arch/arm/mach-tegra/Kconfig | |||
@@ -28,7 +28,6 @@ config ARCH_TEGRA_2x_SOC | |||
28 | select ARM_ERRATA_754327 if SMP | 28 | select ARM_ERRATA_754327 if SMP |
29 | select ARM_ERRATA_764369 if SMP | 29 | select ARM_ERRATA_764369 if SMP |
30 | select ARM_GIC | 30 | select ARM_GIC |
31 | select CPU_FREQ_TABLE if CPU_FREQ | ||
32 | select CPU_V7 | 31 | select CPU_V7 |
33 | select PINCTRL | 32 | select PINCTRL |
34 | select PINCTRL_TEGRA20 | 33 | select PINCTRL_TEGRA20 |
@@ -46,7 +45,6 @@ config ARCH_TEGRA_3x_SOC | |||
46 | select ARM_ERRATA_754322 | 45 | select ARM_ERRATA_754322 |
47 | select ARM_ERRATA_764369 if SMP | 46 | select ARM_ERRATA_764369 if SMP |
48 | select ARM_GIC | 47 | select ARM_GIC |
49 | select CPU_FREQ_TABLE if CPU_FREQ | ||
50 | select CPU_V7 | 48 | select CPU_V7 |
51 | select PINCTRL | 49 | select PINCTRL |
52 | select PINCTRL_TEGRA30 | 50 | select PINCTRL_TEGRA30 |
@@ -60,10 +58,9 @@ config ARCH_TEGRA_3x_SOC | |||
60 | 58 | ||
61 | config ARCH_TEGRA_114_SOC | 59 | config ARCH_TEGRA_114_SOC |
62 | bool "Enable support for Tegra114 family" | 60 | bool "Enable support for Tegra114 family" |
63 | select ARM_ARCH_TIMER | 61 | select HAVE_ARM_ARCH_TIMER |
64 | select ARM_GIC | 62 | select ARM_GIC |
65 | select ARM_L1_CACHE_SHIFT_6 | 63 | select ARM_L1_CACHE_SHIFT_6 |
66 | select CPU_FREQ_TABLE if CPU_FREQ | ||
67 | select CPU_V7 | 64 | select CPU_V7 |
68 | select PINCTRL | 65 | select PINCTRL |
69 | select PINCTRL_TEGRA114 | 66 | select PINCTRL_TEGRA114 |
diff --git a/arch/arm/mach-tegra/common.c b/arch/arm/mach-tegra/common.c index ec5836b1e713..b25153e2ebaa 100644 --- a/arch/arm/mach-tegra/common.c +++ b/arch/arm/mach-tegra/common.c | |||
@@ -23,7 +23,7 @@ | |||
23 | #include <linux/clk.h> | 23 | #include <linux/clk.h> |
24 | #include <linux/delay.h> | 24 | #include <linux/delay.h> |
25 | #include <linux/irqchip.h> | 25 | #include <linux/irqchip.h> |
26 | #include <linux/clk/tegra.h> | 26 | #include <linux/clk-provider.h> |
27 | 27 | ||
28 | #include <asm/hardware/cache-l2x0.h> | 28 | #include <asm/hardware/cache-l2x0.h> |
29 | 29 | ||
@@ -60,7 +60,7 @@ u32 tegra_uart_config[4] = { | |||
60 | #ifdef CONFIG_OF | 60 | #ifdef CONFIG_OF |
61 | void __init tegra_dt_init_irq(void) | 61 | void __init tegra_dt_init_irq(void) |
62 | { | 62 | { |
63 | tegra_clocks_init(); | 63 | of_clk_init(NULL); |
64 | tegra_pmc_init(); | 64 | tegra_pmc_init(); |
65 | tegra_init_irq(); | 65 | tegra_init_irq(); |
66 | irqchip_init(); | 66 | irqchip_init(); |
diff --git a/arch/arm/mach-ux500/cpu.c b/arch/arm/mach-ux500/cpu.c index b6145ea51641..e6fb0239151b 100644 --- a/arch/arm/mach-ux500/cpu.c +++ b/arch/arm/mach-ux500/cpu.c | |||
@@ -76,13 +76,15 @@ void __init ux500_init_irq(void) | |||
76 | } else if (cpu_is_u9540()) { | 76 | } else if (cpu_is_u9540()) { |
77 | prcmu_early_init(U8500_PRCMU_BASE, SZ_8K - 1); | 77 | prcmu_early_init(U8500_PRCMU_BASE, SZ_8K - 1); |
78 | ux500_pm_init(U8500_PRCMU_BASE, SZ_8K - 1); | 78 | ux500_pm_init(U8500_PRCMU_BASE, SZ_8K - 1); |
79 | u8500_clk_init(U8500_CLKRST1_BASE, U8500_CLKRST2_BASE, | 79 | u9540_clk_init(U8500_CLKRST1_BASE, U8500_CLKRST2_BASE, |
80 | U8500_CLKRST3_BASE, U8500_CLKRST5_BASE, | 80 | U8500_CLKRST3_BASE, U8500_CLKRST5_BASE, |
81 | U8500_CLKRST6_BASE); | 81 | U8500_CLKRST6_BASE); |
82 | } else if (cpu_is_u8540()) { | 82 | } else if (cpu_is_u8540()) { |
83 | prcmu_early_init(U8500_PRCMU_BASE, SZ_8K + SZ_4K - 1); | 83 | prcmu_early_init(U8500_PRCMU_BASE, SZ_8K + SZ_4K - 1); |
84 | ux500_pm_init(U8500_PRCMU_BASE, SZ_8K + SZ_4K - 1); | 84 | ux500_pm_init(U8500_PRCMU_BASE, SZ_8K + SZ_4K - 1); |
85 | u8540_clk_init(); | 85 | u8540_clk_init(U8500_CLKRST1_BASE, U8500_CLKRST2_BASE, |
86 | U8500_CLKRST3_BASE, U8500_CLKRST5_BASE, | ||
87 | U8500_CLKRST6_BASE); | ||
86 | } | 88 | } |
87 | } | 89 | } |
88 | 90 | ||
diff --git a/arch/arm/mach-virt/Kconfig b/arch/arm/mach-virt/Kconfig index 8958f0d896bc..081d46929436 100644 --- a/arch/arm/mach-virt/Kconfig +++ b/arch/arm/mach-virt/Kconfig | |||
@@ -2,7 +2,7 @@ config ARCH_VIRT | |||
2 | bool "Dummy Virtual Machine" if ARCH_MULTI_V7 | 2 | bool "Dummy Virtual Machine" if ARCH_MULTI_V7 |
3 | select ARCH_WANT_OPTIONAL_GPIOLIB | 3 | select ARCH_WANT_OPTIONAL_GPIOLIB |
4 | select ARM_GIC | 4 | select ARM_GIC |
5 | select ARM_ARCH_TIMER | 5 | select HAVE_ARM_ARCH_TIMER |
6 | select ARM_PSCI | 6 | select ARM_PSCI |
7 | select HAVE_SMP | 7 | select HAVE_SMP |
8 | select CPU_V7 | 8 | select CPU_V7 |
diff --git a/arch/arm/mm/Kconfig b/arch/arm/mm/Kconfig index 9e8101ecd63e..6cacdc8dd654 100644 --- a/arch/arm/mm/Kconfig +++ b/arch/arm/mm/Kconfig | |||
@@ -392,7 +392,8 @@ config CPU_V7 | |||
392 | select CPU_CACHE_V7 | 392 | select CPU_CACHE_V7 |
393 | select CPU_CACHE_VIPT | 393 | select CPU_CACHE_VIPT |
394 | select CPU_COPY_V6 if MMU | 394 | select CPU_COPY_V6 if MMU |
395 | select CPU_CP15_MMU | 395 | select CPU_CP15_MMU if MMU |
396 | select CPU_CP15_MPU if !MMU | ||
396 | select CPU_HAS_ASID if MMU | 397 | select CPU_HAS_ASID if MMU |
397 | select CPU_PABRT_V7 | 398 | select CPU_PABRT_V7 |
398 | select CPU_TLB_V7 if MMU | 399 | select CPU_TLB_V7 if MMU |
diff --git a/arch/arm/mm/Makefile b/arch/arm/mm/Makefile index ee558a01f390..ecfe6e53f6e0 100644 --- a/arch/arm/mm/Makefile +++ b/arch/arm/mm/Makefile | |||
@@ -16,6 +16,7 @@ obj-$(CONFIG_MODULES) += proc-syms.o | |||
16 | 16 | ||
17 | obj-$(CONFIG_ALIGNMENT_TRAP) += alignment.o | 17 | obj-$(CONFIG_ALIGNMENT_TRAP) += alignment.o |
18 | obj-$(CONFIG_HIGHMEM) += highmem.o | 18 | obj-$(CONFIG_HIGHMEM) += highmem.o |
19 | obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpage.o | ||
19 | 20 | ||
20 | obj-$(CONFIG_CPU_ABRT_NOMMU) += abort-nommu.o | 21 | obj-$(CONFIG_CPU_ABRT_NOMMU) += abort-nommu.o |
21 | obj-$(CONFIG_CPU_ABRT_EV4) += abort-ev4.o | 22 | obj-$(CONFIG_CPU_ABRT_EV4) += abort-ev4.o |
diff --git a/arch/arm/mm/cache-l2x0.c b/arch/arm/mm/cache-l2x0.c index c465faca51b0..d70e0aba0c9d 100644 --- a/arch/arm/mm/cache-l2x0.c +++ b/arch/arm/mm/cache-l2x0.c | |||
@@ -523,6 +523,147 @@ static void aurora_flush_range(unsigned long start, unsigned long end) | |||
523 | } | 523 | } |
524 | } | 524 | } |
525 | 525 | ||
526 | /* | ||
527 | * For certain Broadcom SoCs, depending on the address range, different offsets | ||
528 | * need to be added to the address before passing it to L2 for | ||
529 | * invalidation/clean/flush | ||
530 | * | ||
531 | * Section Address Range Offset EMI | ||
532 | * 1 0x00000000 - 0x3FFFFFFF 0x80000000 VC | ||
533 | * 2 0x40000000 - 0xBFFFFFFF 0x40000000 SYS | ||
534 | * 3 0xC0000000 - 0xFFFFFFFF 0x80000000 VC | ||
535 | * | ||
536 | * When the start and end addresses have crossed two different sections, we | ||
537 | * need to break the L2 operation into two, each within its own section. | ||
538 | * For example, if we need to invalidate addresses starts at 0xBFFF0000 and | ||
539 | * ends at 0xC0001000, we need do invalidate 1) 0xBFFF0000 - 0xBFFFFFFF and 2) | ||
540 | * 0xC0000000 - 0xC0001000 | ||
541 | * | ||
542 | * Note 1: | ||
543 | * By breaking a single L2 operation into two, we may potentially suffer some | ||
544 | * performance hit, but keep in mind the cross section case is very rare | ||
545 | * | ||
546 | * Note 2: | ||
547 | * We do not need to handle the case when the start address is in | ||
548 | * Section 1 and the end address is in Section 3, since it is not a valid use | ||
549 | * case | ||
550 | * | ||
551 | * Note 3: | ||
552 | * Section 1 in practical terms can no longer be used on rev A2. Because of | ||
553 | * that the code does not need to handle section 1 at all. | ||
554 | * | ||
555 | */ | ||
556 | #define BCM_SYS_EMI_START_ADDR 0x40000000UL | ||
557 | #define BCM_VC_EMI_SEC3_START_ADDR 0xC0000000UL | ||
558 | |||
559 | #define BCM_SYS_EMI_OFFSET 0x40000000UL | ||
560 | #define BCM_VC_EMI_OFFSET 0x80000000UL | ||
561 | |||
562 | static inline int bcm_addr_is_sys_emi(unsigned long addr) | ||
563 | { | ||
564 | return (addr >= BCM_SYS_EMI_START_ADDR) && | ||
565 | (addr < BCM_VC_EMI_SEC3_START_ADDR); | ||
566 | } | ||
567 | |||
568 | static inline unsigned long bcm_l2_phys_addr(unsigned long addr) | ||
569 | { | ||
570 | if (bcm_addr_is_sys_emi(addr)) | ||
571 | return addr + BCM_SYS_EMI_OFFSET; | ||
572 | else | ||
573 | return addr + BCM_VC_EMI_OFFSET; | ||
574 | } | ||
575 | |||
576 | static void bcm_inv_range(unsigned long start, unsigned long end) | ||
577 | { | ||
578 | unsigned long new_start, new_end; | ||
579 | |||
580 | BUG_ON(start < BCM_SYS_EMI_START_ADDR); | ||
581 | |||
582 | if (unlikely(end <= start)) | ||
583 | return; | ||
584 | |||
585 | new_start = bcm_l2_phys_addr(start); | ||
586 | new_end = bcm_l2_phys_addr(end); | ||
587 | |||
588 | /* normal case, no cross section between start and end */ | ||
589 | if (likely(bcm_addr_is_sys_emi(end) || !bcm_addr_is_sys_emi(start))) { | ||
590 | l2x0_inv_range(new_start, new_end); | ||
591 | return; | ||
592 | } | ||
593 | |||
594 | /* They cross sections, so it can only be a cross from section | ||
595 | * 2 to section 3 | ||
596 | */ | ||
597 | l2x0_inv_range(new_start, | ||
598 | bcm_l2_phys_addr(BCM_VC_EMI_SEC3_START_ADDR-1)); | ||
599 | l2x0_inv_range(bcm_l2_phys_addr(BCM_VC_EMI_SEC3_START_ADDR), | ||
600 | new_end); | ||
601 | } | ||
602 | |||
603 | static void bcm_clean_range(unsigned long start, unsigned long end) | ||
604 | { | ||
605 | unsigned long new_start, new_end; | ||
606 | |||
607 | BUG_ON(start < BCM_SYS_EMI_START_ADDR); | ||
608 | |||
609 | if (unlikely(end <= start)) | ||
610 | return; | ||
611 | |||
612 | if ((end - start) >= l2x0_size) { | ||
613 | l2x0_clean_all(); | ||
614 | return; | ||
615 | } | ||
616 | |||
617 | new_start = bcm_l2_phys_addr(start); | ||
618 | new_end = bcm_l2_phys_addr(end); | ||
619 | |||
620 | /* normal case, no cross section between start and end */ | ||
621 | if (likely(bcm_addr_is_sys_emi(end) || !bcm_addr_is_sys_emi(start))) { | ||
622 | l2x0_clean_range(new_start, new_end); | ||
623 | return; | ||
624 | } | ||
625 | |||
626 | /* They cross sections, so it can only be a cross from section | ||
627 | * 2 to section 3 | ||
628 | */ | ||
629 | l2x0_clean_range(new_start, | ||
630 | bcm_l2_phys_addr(BCM_VC_EMI_SEC3_START_ADDR-1)); | ||
631 | l2x0_clean_range(bcm_l2_phys_addr(BCM_VC_EMI_SEC3_START_ADDR), | ||
632 | new_end); | ||
633 | } | ||
634 | |||
635 | static void bcm_flush_range(unsigned long start, unsigned long end) | ||
636 | { | ||
637 | unsigned long new_start, new_end; | ||
638 | |||
639 | BUG_ON(start < BCM_SYS_EMI_START_ADDR); | ||
640 | |||
641 | if (unlikely(end <= start)) | ||
642 | return; | ||
643 | |||
644 | if ((end - start) >= l2x0_size) { | ||
645 | l2x0_flush_all(); | ||
646 | return; | ||
647 | } | ||
648 | |||
649 | new_start = bcm_l2_phys_addr(start); | ||
650 | new_end = bcm_l2_phys_addr(end); | ||
651 | |||
652 | /* normal case, no cross section between start and end */ | ||
653 | if (likely(bcm_addr_is_sys_emi(end) || !bcm_addr_is_sys_emi(start))) { | ||
654 | l2x0_flush_range(new_start, new_end); | ||
655 | return; | ||
656 | } | ||
657 | |||
658 | /* They cross sections, so it can only be a cross from section | ||
659 | * 2 to section 3 | ||
660 | */ | ||
661 | l2x0_flush_range(new_start, | ||
662 | bcm_l2_phys_addr(BCM_VC_EMI_SEC3_START_ADDR-1)); | ||
663 | l2x0_flush_range(bcm_l2_phys_addr(BCM_VC_EMI_SEC3_START_ADDR), | ||
664 | new_end); | ||
665 | } | ||
666 | |||
526 | static void __init l2x0_of_setup(const struct device_node *np, | 667 | static void __init l2x0_of_setup(const struct device_node *np, |
527 | u32 *aux_val, u32 *aux_mask) | 668 | u32 *aux_val, u32 *aux_mask) |
528 | { | 669 | { |
@@ -765,6 +906,21 @@ static const struct l2x0_of_data aurora_no_outer_data = { | |||
765 | }, | 906 | }, |
766 | }; | 907 | }; |
767 | 908 | ||
909 | static const struct l2x0_of_data bcm_l2x0_data = { | ||
910 | .setup = pl310_of_setup, | ||
911 | .save = pl310_save, | ||
912 | .outer_cache = { | ||
913 | .resume = pl310_resume, | ||
914 | .inv_range = bcm_inv_range, | ||
915 | .clean_range = bcm_clean_range, | ||
916 | .flush_range = bcm_flush_range, | ||
917 | .sync = l2x0_cache_sync, | ||
918 | .flush_all = l2x0_flush_all, | ||
919 | .inv_all = l2x0_inv_all, | ||
920 | .disable = l2x0_disable, | ||
921 | }, | ||
922 | }; | ||
923 | |||
768 | static const struct of_device_id l2x0_ids[] __initconst = { | 924 | static const struct of_device_id l2x0_ids[] __initconst = { |
769 | { .compatible = "arm,pl310-cache", .data = (void *)&pl310_data }, | 925 | { .compatible = "arm,pl310-cache", .data = (void *)&pl310_data }, |
770 | { .compatible = "arm,l220-cache", .data = (void *)&l2x0_data }, | 926 | { .compatible = "arm,l220-cache", .data = (void *)&l2x0_data }, |
@@ -773,6 +929,8 @@ static const struct of_device_id l2x0_ids[] __initconst = { | |||
773 | .data = (void *)&aurora_no_outer_data}, | 929 | .data = (void *)&aurora_no_outer_data}, |
774 | { .compatible = "marvell,aurora-outer-cache", | 930 | { .compatible = "marvell,aurora-outer-cache", |
775 | .data = (void *)&aurora_with_outer_data}, | 931 | .data = (void *)&aurora_with_outer_data}, |
932 | { .compatible = "bcm,bcm11351-a2-pl310-cache", | ||
933 | .data = (void *)&bcm_l2x0_data}, | ||
776 | {} | 934 | {} |
777 | }; | 935 | }; |
778 | 936 | ||
diff --git a/arch/arm/mm/context.c b/arch/arm/mm/context.c index 2ac37372ef52..b55b1015724b 100644 --- a/arch/arm/mm/context.c +++ b/arch/arm/mm/context.c | |||
@@ -20,6 +20,7 @@ | |||
20 | #include <asm/smp_plat.h> | 20 | #include <asm/smp_plat.h> |
21 | #include <asm/thread_notify.h> | 21 | #include <asm/thread_notify.h> |
22 | #include <asm/tlbflush.h> | 22 | #include <asm/tlbflush.h> |
23 | #include <asm/proc-fns.h> | ||
23 | 24 | ||
24 | /* | 25 | /* |
25 | * On ARMv6, we have the following structure in the Context ID: | 26 | * On ARMv6, we have the following structure in the Context ID: |
@@ -39,33 +40,51 @@ | |||
39 | * non 64-bit operations. | 40 | * non 64-bit operations. |
40 | */ | 41 | */ |
41 | #define ASID_FIRST_VERSION (1ULL << ASID_BITS) | 42 | #define ASID_FIRST_VERSION (1ULL << ASID_BITS) |
42 | #define NUM_USER_ASIDS (ASID_FIRST_VERSION - 1) | 43 | #define NUM_USER_ASIDS ASID_FIRST_VERSION |
43 | |||
44 | #define ASID_TO_IDX(asid) ((asid & ~ASID_MASK) - 1) | ||
45 | #define IDX_TO_ASID(idx) ((idx + 1) & ~ASID_MASK) | ||
46 | 44 | ||
47 | static DEFINE_RAW_SPINLOCK(cpu_asid_lock); | 45 | static DEFINE_RAW_SPINLOCK(cpu_asid_lock); |
48 | static atomic64_t asid_generation = ATOMIC64_INIT(ASID_FIRST_VERSION); | 46 | static atomic64_t asid_generation = ATOMIC64_INIT(ASID_FIRST_VERSION); |
49 | static DECLARE_BITMAP(asid_map, NUM_USER_ASIDS); | 47 | static DECLARE_BITMAP(asid_map, NUM_USER_ASIDS); |
50 | 48 | ||
51 | DEFINE_PER_CPU(atomic64_t, active_asids); | 49 | static DEFINE_PER_CPU(atomic64_t, active_asids); |
52 | static DEFINE_PER_CPU(u64, reserved_asids); | 50 | static DEFINE_PER_CPU(u64, reserved_asids); |
53 | static cpumask_t tlb_flush_pending; | 51 | static cpumask_t tlb_flush_pending; |
54 | 52 | ||
53 | #ifdef CONFIG_ARM_ERRATA_798181 | ||
54 | void a15_erratum_get_cpumask(int this_cpu, struct mm_struct *mm, | ||
55 | cpumask_t *mask) | ||
56 | { | ||
57 | int cpu; | ||
58 | unsigned long flags; | ||
59 | u64 context_id, asid; | ||
60 | |||
61 | raw_spin_lock_irqsave(&cpu_asid_lock, flags); | ||
62 | context_id = mm->context.id.counter; | ||
63 | for_each_online_cpu(cpu) { | ||
64 | if (cpu == this_cpu) | ||
65 | continue; | ||
66 | /* | ||
67 | * We only need to send an IPI if the other CPUs are | ||
68 | * running the same ASID as the one being invalidated. | ||
69 | */ | ||
70 | asid = per_cpu(active_asids, cpu).counter; | ||
71 | if (asid == 0) | ||
72 | asid = per_cpu(reserved_asids, cpu); | ||
73 | if (context_id == asid) | ||
74 | cpumask_set_cpu(cpu, mask); | ||
75 | } | ||
76 | raw_spin_unlock_irqrestore(&cpu_asid_lock, flags); | ||
77 | } | ||
78 | #endif | ||
79 | |||
55 | #ifdef CONFIG_ARM_LPAE | 80 | #ifdef CONFIG_ARM_LPAE |
56 | static void cpu_set_reserved_ttbr0(void) | 81 | static void cpu_set_reserved_ttbr0(void) |
57 | { | 82 | { |
58 | unsigned long ttbl = __pa(swapper_pg_dir); | ||
59 | unsigned long ttbh = 0; | ||
60 | |||
61 | /* | 83 | /* |
62 | * Set TTBR0 to swapper_pg_dir which contains only global entries. The | 84 | * Set TTBR0 to swapper_pg_dir which contains only global entries. The |
63 | * ASID is set to 0. | 85 | * ASID is set to 0. |
64 | */ | 86 | */ |
65 | asm volatile( | 87 | cpu_set_ttbr(0, __pa(swapper_pg_dir)); |
66 | " mcrr p15, 0, %0, %1, c2 @ set TTBR0\n" | ||
67 | : | ||
68 | : "r" (ttbl), "r" (ttbh)); | ||
69 | isb(); | 88 | isb(); |
70 | } | 89 | } |
71 | #else | 90 | #else |
@@ -128,7 +147,16 @@ static void flush_context(unsigned int cpu) | |||
128 | asid = 0; | 147 | asid = 0; |
129 | } else { | 148 | } else { |
130 | asid = atomic64_xchg(&per_cpu(active_asids, i), 0); | 149 | asid = atomic64_xchg(&per_cpu(active_asids, i), 0); |
131 | __set_bit(ASID_TO_IDX(asid), asid_map); | 150 | /* |
151 | * If this CPU has already been through a | ||
152 | * rollover, but hasn't run another task in | ||
153 | * the meantime, we must preserve its reserved | ||
154 | * ASID, as this is the only trace we have of | ||
155 | * the process it is still running. | ||
156 | */ | ||
157 | if (asid == 0) | ||
158 | asid = per_cpu(reserved_asids, i); | ||
159 | __set_bit(asid & ~ASID_MASK, asid_map); | ||
132 | } | 160 | } |
133 | per_cpu(reserved_asids, i) = asid; | 161 | per_cpu(reserved_asids, i) = asid; |
134 | } | 162 | } |
@@ -167,17 +195,19 @@ static u64 new_context(struct mm_struct *mm, unsigned int cpu) | |||
167 | /* | 195 | /* |
168 | * Allocate a free ASID. If we can't find one, take a | 196 | * Allocate a free ASID. If we can't find one, take a |
169 | * note of the currently active ASIDs and mark the TLBs | 197 | * note of the currently active ASIDs and mark the TLBs |
170 | * as requiring flushes. | 198 | * as requiring flushes. We always count from ASID #1, |
199 | * as we reserve ASID #0 to switch via TTBR0 and indicate | ||
200 | * rollover events. | ||
171 | */ | 201 | */ |
172 | asid = find_first_zero_bit(asid_map, NUM_USER_ASIDS); | 202 | asid = find_next_zero_bit(asid_map, NUM_USER_ASIDS, 1); |
173 | if (asid == NUM_USER_ASIDS) { | 203 | if (asid == NUM_USER_ASIDS) { |
174 | generation = atomic64_add_return(ASID_FIRST_VERSION, | 204 | generation = atomic64_add_return(ASID_FIRST_VERSION, |
175 | &asid_generation); | 205 | &asid_generation); |
176 | flush_context(cpu); | 206 | flush_context(cpu); |
177 | asid = find_first_zero_bit(asid_map, NUM_USER_ASIDS); | 207 | asid = find_next_zero_bit(asid_map, NUM_USER_ASIDS, 1); |
178 | } | 208 | } |
179 | __set_bit(asid, asid_map); | 209 | __set_bit(asid, asid_map); |
180 | asid = generation | IDX_TO_ASID(asid); | 210 | asid |= generation; |
181 | cpumask_clear(mm_cpumask(mm)); | 211 | cpumask_clear(mm_cpumask(mm)); |
182 | } | 212 | } |
183 | 213 | ||
diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c index ef3e0f3aac96..7ec02961dfa0 100644 --- a/arch/arm/mm/dma-mapping.c +++ b/arch/arm/mm/dma-mapping.c | |||
@@ -250,7 +250,7 @@ static void __dma_free_buffer(struct page *page, size_t size) | |||
250 | 250 | ||
251 | #ifdef CONFIG_MMU | 251 | #ifdef CONFIG_MMU |
252 | #ifdef CONFIG_HUGETLB_PAGE | 252 | #ifdef CONFIG_HUGETLB_PAGE |
253 | #error ARM Coherent DMA allocator does not (yet) support huge TLB | 253 | #warning ARM Coherent DMA allocator does not (yet) support huge TLB |
254 | #endif | 254 | #endif |
255 | 255 | ||
256 | static void *__alloc_from_contiguous(struct device *dev, size_t size, | 256 | static void *__alloc_from_contiguous(struct device *dev, size_t size, |
@@ -880,10 +880,24 @@ static void __dma_page_dev_to_cpu(struct page *page, unsigned long off, | |||
880 | dma_cache_maint_page(page, off, size, dir, dmac_unmap_area); | 880 | dma_cache_maint_page(page, off, size, dir, dmac_unmap_area); |
881 | 881 | ||
882 | /* | 882 | /* |
883 | * Mark the D-cache clean for this page to avoid extra flushing. | 883 | * Mark the D-cache clean for these pages to avoid extra flushing. |
884 | */ | 884 | */ |
885 | if (dir != DMA_TO_DEVICE && off == 0 && size >= PAGE_SIZE) | 885 | if (dir != DMA_TO_DEVICE && size >= PAGE_SIZE) { |
886 | set_bit(PG_dcache_clean, &page->flags); | 886 | unsigned long pfn; |
887 | size_t left = size; | ||
888 | |||
889 | pfn = page_to_pfn(page) + off / PAGE_SIZE; | ||
890 | off %= PAGE_SIZE; | ||
891 | if (off) { | ||
892 | pfn++; | ||
893 | left -= PAGE_SIZE - off; | ||
894 | } | ||
895 | while (left >= PAGE_SIZE) { | ||
896 | page = pfn_to_page(pfn++); | ||
897 | set_bit(PG_dcache_clean, &page->flags); | ||
898 | left -= PAGE_SIZE; | ||
899 | } | ||
900 | } | ||
887 | } | 901 | } |
888 | 902 | ||
889 | /** | 903 | /** |
diff --git a/arch/arm/mm/fault.c b/arch/arm/mm/fault.c index 5dbf13f954f6..c97f7940cb95 100644 --- a/arch/arm/mm/fault.c +++ b/arch/arm/mm/fault.c | |||
@@ -491,12 +491,14 @@ do_translation_fault(unsigned long addr, unsigned int fsr, | |||
491 | * Some section permission faults need to be handled gracefully. | 491 | * Some section permission faults need to be handled gracefully. |
492 | * They can happen due to a __{get,put}_user during an oops. | 492 | * They can happen due to a __{get,put}_user during an oops. |
493 | */ | 493 | */ |
494 | #ifndef CONFIG_ARM_LPAE | ||
494 | static int | 495 | static int |
495 | do_sect_fault(unsigned long addr, unsigned int fsr, struct pt_regs *regs) | 496 | do_sect_fault(unsigned long addr, unsigned int fsr, struct pt_regs *regs) |
496 | { | 497 | { |
497 | do_bad_area(addr, fsr, regs); | 498 | do_bad_area(addr, fsr, regs); |
498 | return 0; | 499 | return 0; |
499 | } | 500 | } |
501 | #endif /* CONFIG_ARM_LPAE */ | ||
500 | 502 | ||
501 | /* | 503 | /* |
502 | * This abort handler always returns "fault". | 504 | * This abort handler always returns "fault". |
diff --git a/arch/arm/mm/flush.c b/arch/arm/mm/flush.c index 32aa5861119f..6d5ba9afb16a 100644 --- a/arch/arm/mm/flush.c +++ b/arch/arm/mm/flush.c | |||
@@ -17,6 +17,7 @@ | |||
17 | #include <asm/highmem.h> | 17 | #include <asm/highmem.h> |
18 | #include <asm/smp_plat.h> | 18 | #include <asm/smp_plat.h> |
19 | #include <asm/tlbflush.h> | 19 | #include <asm/tlbflush.h> |
20 | #include <linux/hugetlb.h> | ||
20 | 21 | ||
21 | #include "mm.h" | 22 | #include "mm.h" |
22 | 23 | ||
@@ -168,19 +169,23 @@ void __flush_dcache_page(struct address_space *mapping, struct page *page) | |||
168 | * coherent with the kernels mapping. | 169 | * coherent with the kernels mapping. |
169 | */ | 170 | */ |
170 | if (!PageHighMem(page)) { | 171 | if (!PageHighMem(page)) { |
171 | __cpuc_flush_dcache_area(page_address(page), PAGE_SIZE); | 172 | size_t page_size = PAGE_SIZE << compound_order(page); |
173 | __cpuc_flush_dcache_area(page_address(page), page_size); | ||
172 | } else { | 174 | } else { |
173 | void *addr; | 175 | unsigned long i; |
174 | |||
175 | if (cache_is_vipt_nonaliasing()) { | 176 | if (cache_is_vipt_nonaliasing()) { |
176 | addr = kmap_atomic(page); | 177 | for (i = 0; i < (1 << compound_order(page)); i++) { |
177 | __cpuc_flush_dcache_area(addr, PAGE_SIZE); | 178 | void *addr = kmap_atomic(page); |
178 | kunmap_atomic(addr); | ||
179 | } else { | ||
180 | addr = kmap_high_get(page); | ||
181 | if (addr) { | ||
182 | __cpuc_flush_dcache_area(addr, PAGE_SIZE); | 179 | __cpuc_flush_dcache_area(addr, PAGE_SIZE); |
183 | kunmap_high(page); | 180 | kunmap_atomic(addr); |
181 | } | ||
182 | } else { | ||
183 | for (i = 0; i < (1 << compound_order(page)); i++) { | ||
184 | void *addr = kmap_high_get(page); | ||
185 | if (addr) { | ||
186 | __cpuc_flush_dcache_area(addr, PAGE_SIZE); | ||
187 | kunmap_high(page); | ||
188 | } | ||
184 | } | 189 | } |
185 | } | 190 | } |
186 | } | 191 | } |
@@ -287,7 +292,7 @@ void flush_dcache_page(struct page *page) | |||
287 | mapping = page_mapping(page); | 292 | mapping = page_mapping(page); |
288 | 293 | ||
289 | if (!cache_ops_need_broadcast() && | 294 | if (!cache_ops_need_broadcast() && |
290 | mapping && !mapping_mapped(mapping)) | 295 | mapping && !page_mapped(page)) |
291 | clear_bit(PG_dcache_clean, &page->flags); | 296 | clear_bit(PG_dcache_clean, &page->flags); |
292 | else { | 297 | else { |
293 | __flush_dcache_page(mapping, page); | 298 | __flush_dcache_page(mapping, page); |
diff --git a/arch/arm/mm/fsr-3level.c b/arch/arm/mm/fsr-3level.c index 05a4e9431836..ab4409a2307e 100644 --- a/arch/arm/mm/fsr-3level.c +++ b/arch/arm/mm/fsr-3level.c | |||
@@ -9,11 +9,11 @@ static struct fsr_info fsr_info[] = { | |||
9 | { do_page_fault, SIGSEGV, SEGV_MAPERR, "level 3 translation fault" }, | 9 | { do_page_fault, SIGSEGV, SEGV_MAPERR, "level 3 translation fault" }, |
10 | { do_bad, SIGBUS, 0, "reserved access flag fault" }, | 10 | { do_bad, SIGBUS, 0, "reserved access flag fault" }, |
11 | { do_bad, SIGSEGV, SEGV_ACCERR, "level 1 access flag fault" }, | 11 | { do_bad, SIGSEGV, SEGV_ACCERR, "level 1 access flag fault" }, |
12 | { do_bad, SIGSEGV, SEGV_ACCERR, "level 2 access flag fault" }, | 12 | { do_page_fault, SIGSEGV, SEGV_ACCERR, "level 2 access flag fault" }, |
13 | { do_page_fault, SIGSEGV, SEGV_ACCERR, "level 3 access flag fault" }, | 13 | { do_page_fault, SIGSEGV, SEGV_ACCERR, "level 3 access flag fault" }, |
14 | { do_bad, SIGBUS, 0, "reserved permission fault" }, | 14 | { do_bad, SIGBUS, 0, "reserved permission fault" }, |
15 | { do_bad, SIGSEGV, SEGV_ACCERR, "level 1 permission fault" }, | 15 | { do_bad, SIGSEGV, SEGV_ACCERR, "level 1 permission fault" }, |
16 | { do_sect_fault, SIGSEGV, SEGV_ACCERR, "level 2 permission fault" }, | 16 | { do_page_fault, SIGSEGV, SEGV_ACCERR, "level 2 permission fault" }, |
17 | { do_page_fault, SIGSEGV, SEGV_ACCERR, "level 3 permission fault" }, | 17 | { do_page_fault, SIGSEGV, SEGV_ACCERR, "level 3 permission fault" }, |
18 | { do_bad, SIGBUS, 0, "synchronous external abort" }, | 18 | { do_bad, SIGBUS, 0, "synchronous external abort" }, |
19 | { do_bad, SIGBUS, 0, "asynchronous external abort" }, | 19 | { do_bad, SIGBUS, 0, "asynchronous external abort" }, |
diff --git a/arch/arm/mm/hugetlbpage.c b/arch/arm/mm/hugetlbpage.c new file mode 100644 index 000000000000..3d1e4a205b0b --- /dev/null +++ b/arch/arm/mm/hugetlbpage.c | |||
@@ -0,0 +1,101 @@ | |||
1 | /* | ||
2 | * arch/arm/mm/hugetlbpage.c | ||
3 | * | ||
4 | * Copyright (C) 2012 ARM Ltd. | ||
5 | * | ||
6 | * Based on arch/x86/include/asm/hugetlb.h and Bill Carson's patches | ||
7 | * | ||
8 | * This program is free software; you can redistribute it and/or modify | ||
9 | * it under the terms of the GNU General Public License version 2 as | ||
10 | * published by the Free Software Foundation. | ||
11 | * | ||
12 | * This program is distributed in the hope that it will be useful, | ||
13 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
14 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
15 | * GNU General Public License for more details. | ||
16 | * | ||
17 | * You should have received a copy of the GNU General Public License | ||
18 | * along with this program; if not, write to the Free Software | ||
19 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA | ||
20 | */ | ||
21 | |||
22 | #include <linux/init.h> | ||
23 | #include <linux/fs.h> | ||
24 | #include <linux/mm.h> | ||
25 | #include <linux/hugetlb.h> | ||
26 | #include <linux/pagemap.h> | ||
27 | #include <linux/err.h> | ||
28 | #include <linux/sysctl.h> | ||
29 | #include <asm/mman.h> | ||
30 | #include <asm/tlb.h> | ||
31 | #include <asm/tlbflush.h> | ||
32 | #include <asm/pgalloc.h> | ||
33 | |||
34 | /* | ||
35 | * On ARM, huge pages are backed by pmd's rather than pte's, so we do a lot | ||
36 | * of type casting from pmd_t * to pte_t *. | ||
37 | */ | ||
38 | |||
39 | pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr) | ||
40 | { | ||
41 | pgd_t *pgd; | ||
42 | pud_t *pud; | ||
43 | pmd_t *pmd = NULL; | ||
44 | |||
45 | pgd = pgd_offset(mm, addr); | ||
46 | if (pgd_present(*pgd)) { | ||
47 | pud = pud_offset(pgd, addr); | ||
48 | if (pud_present(*pud)) | ||
49 | pmd = pmd_offset(pud, addr); | ||
50 | } | ||
51 | |||
52 | return (pte_t *)pmd; | ||
53 | } | ||
54 | |||
55 | struct page *follow_huge_addr(struct mm_struct *mm, unsigned long address, | ||
56 | int write) | ||
57 | { | ||
58 | return ERR_PTR(-EINVAL); | ||
59 | } | ||
60 | |||
61 | int pud_huge(pud_t pud) | ||
62 | { | ||
63 | return 0; | ||
64 | } | ||
65 | |||
66 | int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, pte_t *ptep) | ||
67 | { | ||
68 | return 0; | ||
69 | } | ||
70 | |||
71 | pte_t *huge_pte_alloc(struct mm_struct *mm, | ||
72 | unsigned long addr, unsigned long sz) | ||
73 | { | ||
74 | pgd_t *pgd; | ||
75 | pud_t *pud; | ||
76 | pte_t *pte = NULL; | ||
77 | |||
78 | pgd = pgd_offset(mm, addr); | ||
79 | pud = pud_alloc(mm, pgd, addr); | ||
80 | if (pud) | ||
81 | pte = (pte_t *)pmd_alloc(mm, pud, addr); | ||
82 | |||
83 | return pte; | ||
84 | } | ||
85 | |||
86 | struct page * | ||
87 | follow_huge_pmd(struct mm_struct *mm, unsigned long address, | ||
88 | pmd_t *pmd, int write) | ||
89 | { | ||
90 | struct page *page; | ||
91 | |||
92 | page = pte_page(*(pte_t *)pmd); | ||
93 | if (page) | ||
94 | page += ((address & ~PMD_MASK) >> PAGE_SHIFT); | ||
95 | return page; | ||
96 | } | ||
97 | |||
98 | int pmd_huge(pmd_t pmd) | ||
99 | { | ||
100 | return pmd_val(pmd) && !(pmd_val(pmd) & PMD_TABLE_BIT); | ||
101 | } | ||
diff --git a/arch/arm/mm/init.c b/arch/arm/mm/init.c index 9a5cdc01fcdf..6833cbead6cc 100644 --- a/arch/arm/mm/init.c +++ b/arch/arm/mm/init.c | |||
@@ -36,12 +36,13 @@ | |||
36 | 36 | ||
37 | #include "mm.h" | 37 | #include "mm.h" |
38 | 38 | ||
39 | static unsigned long phys_initrd_start __initdata = 0; | 39 | static phys_addr_t phys_initrd_start __initdata = 0; |
40 | static unsigned long phys_initrd_size __initdata = 0; | 40 | static unsigned long phys_initrd_size __initdata = 0; |
41 | 41 | ||
42 | static int __init early_initrd(char *p) | 42 | static int __init early_initrd(char *p) |
43 | { | 43 | { |
44 | unsigned long start, size; | 44 | phys_addr_t start; |
45 | unsigned long size; | ||
45 | char *endp; | 46 | char *endp; |
46 | 47 | ||
47 | start = memparse(p, &endp); | 48 | start = memparse(p, &endp); |
@@ -350,14 +351,14 @@ void __init arm_memblock_init(struct meminfo *mi, struct machine_desc *mdesc) | |||
350 | #ifdef CONFIG_BLK_DEV_INITRD | 351 | #ifdef CONFIG_BLK_DEV_INITRD |
351 | if (phys_initrd_size && | 352 | if (phys_initrd_size && |
352 | !memblock_is_region_memory(phys_initrd_start, phys_initrd_size)) { | 353 | !memblock_is_region_memory(phys_initrd_start, phys_initrd_size)) { |
353 | pr_err("INITRD: 0x%08lx+0x%08lx is not a memory region - disabling initrd\n", | 354 | pr_err("INITRD: 0x%08llx+0x%08lx is not a memory region - disabling initrd\n", |
354 | phys_initrd_start, phys_initrd_size); | 355 | (u64)phys_initrd_start, phys_initrd_size); |
355 | phys_initrd_start = phys_initrd_size = 0; | 356 | phys_initrd_start = phys_initrd_size = 0; |
356 | } | 357 | } |
357 | if (phys_initrd_size && | 358 | if (phys_initrd_size && |
358 | memblock_is_region_reserved(phys_initrd_start, phys_initrd_size)) { | 359 | memblock_is_region_reserved(phys_initrd_start, phys_initrd_size)) { |
359 | pr_err("INITRD: 0x%08lx+0x%08lx overlaps in-use memory region - disabling initrd\n", | 360 | pr_err("INITRD: 0x%08llx+0x%08lx overlaps in-use memory region - disabling initrd\n", |
360 | phys_initrd_start, phys_initrd_size); | 361 | (u64)phys_initrd_start, phys_initrd_size); |
361 | phys_initrd_start = phys_initrd_size = 0; | 362 | phys_initrd_start = phys_initrd_size = 0; |
362 | } | 363 | } |
363 | if (phys_initrd_size) { | 364 | if (phys_initrd_size) { |
@@ -442,7 +443,7 @@ static inline void | |||
442 | free_memmap(unsigned long start_pfn, unsigned long end_pfn) | 443 | free_memmap(unsigned long start_pfn, unsigned long end_pfn) |
443 | { | 444 | { |
444 | struct page *start_pg, *end_pg; | 445 | struct page *start_pg, *end_pg; |
445 | unsigned long pg, pgend; | 446 | phys_addr_t pg, pgend; |
446 | 447 | ||
447 | /* | 448 | /* |
448 | * Convert start_pfn/end_pfn to a struct page pointer. | 449 | * Convert start_pfn/end_pfn to a struct page pointer. |
@@ -454,8 +455,8 @@ free_memmap(unsigned long start_pfn, unsigned long end_pfn) | |||
454 | * Convert to physical addresses, and | 455 | * Convert to physical addresses, and |
455 | * round start upwards and end downwards. | 456 | * round start upwards and end downwards. |
456 | */ | 457 | */ |
457 | pg = (unsigned long)PAGE_ALIGN(__pa(start_pg)); | 458 | pg = PAGE_ALIGN(__pa(start_pg)); |
458 | pgend = (unsigned long)__pa(end_pg) & PAGE_MASK; | 459 | pgend = __pa(end_pg) & PAGE_MASK; |
459 | 460 | ||
460 | /* | 461 | /* |
461 | * If there are free pages between these, | 462 | * If there are free pages between these, |
@@ -582,9 +583,6 @@ static void __init free_highpages(void) | |||
582 | */ | 583 | */ |
583 | void __init mem_init(void) | 584 | void __init mem_init(void) |
584 | { | 585 | { |
585 | unsigned long reserved_pages, free_pages; | ||
586 | struct memblock_region *reg; | ||
587 | int i; | ||
588 | #ifdef CONFIG_HAVE_TCM | 586 | #ifdef CONFIG_HAVE_TCM |
589 | /* These pointers are filled in on TCM detection */ | 587 | /* These pointers are filled in on TCM detection */ |
590 | extern u32 dtcm_end; | 588 | extern u32 dtcm_end; |
@@ -595,57 +593,16 @@ void __init mem_init(void) | |||
595 | 593 | ||
596 | /* this will put all unused low memory onto the freelists */ | 594 | /* this will put all unused low memory onto the freelists */ |
597 | free_unused_memmap(&meminfo); | 595 | free_unused_memmap(&meminfo); |
598 | 596 | free_all_bootmem(); | |
599 | totalram_pages += free_all_bootmem(); | ||
600 | 597 | ||
601 | #ifdef CONFIG_SA1111 | 598 | #ifdef CONFIG_SA1111 |
602 | /* now that our DMA memory is actually so designated, we can free it */ | 599 | /* now that our DMA memory is actually so designated, we can free it */ |
603 | free_reserved_area(__va(PHYS_PFN_OFFSET), swapper_pg_dir, 0, NULL); | 600 | free_reserved_area(__va(PHYS_PFN_OFFSET), swapper_pg_dir, -1, NULL); |
604 | #endif | 601 | #endif |
605 | 602 | ||
606 | free_highpages(); | 603 | free_highpages(); |
607 | 604 | ||
608 | reserved_pages = free_pages = 0; | 605 | mem_init_print_info(NULL); |
609 | |||
610 | for_each_bank(i, &meminfo) { | ||
611 | struct membank *bank = &meminfo.bank[i]; | ||
612 | unsigned int pfn1, pfn2; | ||
613 | struct page *page, *end; | ||
614 | |||
615 | pfn1 = bank_pfn_start(bank); | ||
616 | pfn2 = bank_pfn_end(bank); | ||
617 | |||
618 | page = pfn_to_page(pfn1); | ||
619 | end = pfn_to_page(pfn2 - 1) + 1; | ||
620 | |||
621 | do { | ||
622 | if (PageReserved(page)) | ||
623 | reserved_pages++; | ||
624 | else if (!page_count(page)) | ||
625 | free_pages++; | ||
626 | page++; | ||
627 | } while (page < end); | ||
628 | } | ||
629 | |||
630 | /* | ||
631 | * Since our memory may not be contiguous, calculate the | ||
632 | * real number of pages we have in this system | ||
633 | */ | ||
634 | printk(KERN_INFO "Memory:"); | ||
635 | num_physpages = 0; | ||
636 | for_each_memblock(memory, reg) { | ||
637 | unsigned long pages = memblock_region_memory_end_pfn(reg) - | ||
638 | memblock_region_memory_base_pfn(reg); | ||
639 | num_physpages += pages; | ||
640 | printk(" %ldMB", pages >> (20 - PAGE_SHIFT)); | ||
641 | } | ||
642 | printk(" = %luMB total\n", num_physpages >> (20 - PAGE_SHIFT)); | ||
643 | |||
644 | printk(KERN_NOTICE "Memory: %luk/%luk available, %luk reserved, %luK highmem\n", | ||
645 | nr_free_pages() << (PAGE_SHIFT-10), | ||
646 | free_pages << (PAGE_SHIFT-10), | ||
647 | reserved_pages << (PAGE_SHIFT-10), | ||
648 | totalhigh_pages << (PAGE_SHIFT-10)); | ||
649 | 606 | ||
650 | #define MLK(b, t) b, t, ((t) - (b)) >> 10 | 607 | #define MLK(b, t) b, t, ((t) - (b)) >> 10 |
651 | #define MLM(b, t) b, t, ((t) - (b)) >> 20 | 608 | #define MLM(b, t) b, t, ((t) - (b)) >> 20 |
@@ -711,7 +668,7 @@ void __init mem_init(void) | |||
711 | BUG_ON(PKMAP_BASE + LAST_PKMAP * PAGE_SIZE > PAGE_OFFSET); | 668 | BUG_ON(PKMAP_BASE + LAST_PKMAP * PAGE_SIZE > PAGE_OFFSET); |
712 | #endif | 669 | #endif |
713 | 670 | ||
714 | if (PAGE_SIZE >= 16384 && num_physpages <= 128) { | 671 | if (PAGE_SIZE >= 16384 && get_num_physpages() <= 128) { |
715 | extern int sysctl_overcommit_memory; | 672 | extern int sysctl_overcommit_memory; |
716 | /* | 673 | /* |
717 | * On a machine this small we won't get | 674 | * On a machine this small we won't get |
@@ -728,12 +685,12 @@ void free_initmem(void) | |||
728 | extern char __tcm_start, __tcm_end; | 685 | extern char __tcm_start, __tcm_end; |
729 | 686 | ||
730 | poison_init_mem(&__tcm_start, &__tcm_end - &__tcm_start); | 687 | poison_init_mem(&__tcm_start, &__tcm_end - &__tcm_start); |
731 | free_reserved_area(&__tcm_start, &__tcm_end, 0, "TCM link"); | 688 | free_reserved_area(&__tcm_start, &__tcm_end, -1, "TCM link"); |
732 | #endif | 689 | #endif |
733 | 690 | ||
734 | poison_init_mem(__init_begin, __init_end - __init_begin); | 691 | poison_init_mem(__init_begin, __init_end - __init_begin); |
735 | if (!machine_is_integrator() && !machine_is_cintegrator()) | 692 | if (!machine_is_integrator() && !machine_is_cintegrator()) |
736 | free_initmem_default(0); | 693 | free_initmem_default(-1); |
737 | } | 694 | } |
738 | 695 | ||
739 | #ifdef CONFIG_BLK_DEV_INITRD | 696 | #ifdef CONFIG_BLK_DEV_INITRD |
@@ -744,7 +701,7 @@ void free_initrd_mem(unsigned long start, unsigned long end) | |||
744 | { | 701 | { |
745 | if (!keep_initrd) { | 702 | if (!keep_initrd) { |
746 | poison_init_mem((void *)start, PAGE_ALIGN(end) - start); | 703 | poison_init_mem((void *)start, PAGE_ALIGN(end) - start); |
747 | free_reserved_area(start, end, 0, "initrd"); | 704 | free_reserved_area((void *)start, (void *)end, -1, "initrd"); |
748 | } | 705 | } |
749 | } | 706 | } |
750 | 707 | ||
diff --git a/arch/arm/mm/ioremap.c b/arch/arm/mm/ioremap.c index 04d9006eab1f..f123d6eb074b 100644 --- a/arch/arm/mm/ioremap.c +++ b/arch/arm/mm/ioremap.c | |||
@@ -331,10 +331,10 @@ void __iomem * __arm_ioremap_pfn_caller(unsigned long pfn, | |||
331 | return (void __iomem *) (offset + addr); | 331 | return (void __iomem *) (offset + addr); |
332 | } | 332 | } |
333 | 333 | ||
334 | void __iomem *__arm_ioremap_caller(unsigned long phys_addr, size_t size, | 334 | void __iomem *__arm_ioremap_caller(phys_addr_t phys_addr, size_t size, |
335 | unsigned int mtype, void *caller) | 335 | unsigned int mtype, void *caller) |
336 | { | 336 | { |
337 | unsigned long last_addr; | 337 | phys_addr_t last_addr; |
338 | unsigned long offset = phys_addr & ~PAGE_MASK; | 338 | unsigned long offset = phys_addr & ~PAGE_MASK; |
339 | unsigned long pfn = __phys_to_pfn(phys_addr); | 339 | unsigned long pfn = __phys_to_pfn(phys_addr); |
340 | 340 | ||
@@ -367,12 +367,12 @@ __arm_ioremap_pfn(unsigned long pfn, unsigned long offset, size_t size, | |||
367 | } | 367 | } |
368 | EXPORT_SYMBOL(__arm_ioremap_pfn); | 368 | EXPORT_SYMBOL(__arm_ioremap_pfn); |
369 | 369 | ||
370 | void __iomem * (*arch_ioremap_caller)(unsigned long, size_t, | 370 | void __iomem * (*arch_ioremap_caller)(phys_addr_t, size_t, |
371 | unsigned int, void *) = | 371 | unsigned int, void *) = |
372 | __arm_ioremap_caller; | 372 | __arm_ioremap_caller; |
373 | 373 | ||
374 | void __iomem * | 374 | void __iomem * |
375 | __arm_ioremap(unsigned long phys_addr, size_t size, unsigned int mtype) | 375 | __arm_ioremap(phys_addr_t phys_addr, size_t size, unsigned int mtype) |
376 | { | 376 | { |
377 | return arch_ioremap_caller(phys_addr, size, mtype, | 377 | return arch_ioremap_caller(phys_addr, size, mtype, |
378 | __builtin_return_address(0)); | 378 | __builtin_return_address(0)); |
@@ -387,7 +387,7 @@ EXPORT_SYMBOL(__arm_ioremap); | |||
387 | * CONFIG_GENERIC_ALLOCATOR for allocating external memory. | 387 | * CONFIG_GENERIC_ALLOCATOR for allocating external memory. |
388 | */ | 388 | */ |
389 | void __iomem * | 389 | void __iomem * |
390 | __arm_ioremap_exec(unsigned long phys_addr, size_t size, bool cached) | 390 | __arm_ioremap_exec(phys_addr_t phys_addr, size_t size, bool cached) |
391 | { | 391 | { |
392 | unsigned int mtype; | 392 | unsigned int mtype; |
393 | 393 | ||
diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c index d1d1cefa1f93..d7229d28c7f8 100644 --- a/arch/arm/mm/mmu.c +++ b/arch/arm/mm/mmu.c | |||
@@ -675,7 +675,8 @@ static void __init alloc_init_pmd(pud_t *pud, unsigned long addr, | |||
675 | } | 675 | } |
676 | 676 | ||
677 | static void __init alloc_init_pud(pgd_t *pgd, unsigned long addr, | 677 | static void __init alloc_init_pud(pgd_t *pgd, unsigned long addr, |
678 | unsigned long end, unsigned long phys, const struct mem_type *type) | 678 | unsigned long end, phys_addr_t phys, |
679 | const struct mem_type *type) | ||
679 | { | 680 | { |
680 | pud_t *pud = pud_offset(pgd, addr); | 681 | pud_t *pud = pud_offset(pgd, addr); |
681 | unsigned long next; | 682 | unsigned long next; |
@@ -989,27 +990,28 @@ phys_addr_t arm_lowmem_limit __initdata = 0; | |||
989 | void __init sanity_check_meminfo(void) | 990 | void __init sanity_check_meminfo(void) |
990 | { | 991 | { |
991 | int i, j, highmem = 0; | 992 | int i, j, highmem = 0; |
993 | phys_addr_t vmalloc_limit = __pa(vmalloc_min - 1) + 1; | ||
992 | 994 | ||
993 | for (i = 0, j = 0; i < meminfo.nr_banks; i++) { | 995 | for (i = 0, j = 0; i < meminfo.nr_banks; i++) { |
994 | struct membank *bank = &meminfo.bank[j]; | 996 | struct membank *bank = &meminfo.bank[j]; |
995 | *bank = meminfo.bank[i]; | 997 | phys_addr_t size_limit; |
996 | 998 | ||
997 | if (bank->start > ULONG_MAX) | 999 | *bank = meminfo.bank[i]; |
998 | highmem = 1; | 1000 | size_limit = bank->size; |
999 | 1001 | ||
1000 | #ifdef CONFIG_HIGHMEM | 1002 | if (bank->start >= vmalloc_limit) |
1001 | if (__va(bank->start) >= vmalloc_min || | ||
1002 | __va(bank->start) < (void *)PAGE_OFFSET) | ||
1003 | highmem = 1; | 1003 | highmem = 1; |
1004 | else | ||
1005 | size_limit = vmalloc_limit - bank->start; | ||
1004 | 1006 | ||
1005 | bank->highmem = highmem; | 1007 | bank->highmem = highmem; |
1006 | 1008 | ||
1009 | #ifdef CONFIG_HIGHMEM | ||
1007 | /* | 1010 | /* |
1008 | * Split those memory banks which are partially overlapping | 1011 | * Split those memory banks which are partially overlapping |
1009 | * the vmalloc area greatly simplifying things later. | 1012 | * the vmalloc area greatly simplifying things later. |
1010 | */ | 1013 | */ |
1011 | if (!highmem && __va(bank->start) < vmalloc_min && | 1014 | if (!highmem && bank->size > size_limit) { |
1012 | bank->size > vmalloc_min - __va(bank->start)) { | ||
1013 | if (meminfo.nr_banks >= NR_BANKS) { | 1015 | if (meminfo.nr_banks >= NR_BANKS) { |
1014 | printk(KERN_CRIT "NR_BANKS too low, " | 1016 | printk(KERN_CRIT "NR_BANKS too low, " |
1015 | "ignoring high memory\n"); | 1017 | "ignoring high memory\n"); |
@@ -1018,16 +1020,14 @@ void __init sanity_check_meminfo(void) | |||
1018 | (meminfo.nr_banks - i) * sizeof(*bank)); | 1020 | (meminfo.nr_banks - i) * sizeof(*bank)); |
1019 | meminfo.nr_banks++; | 1021 | meminfo.nr_banks++; |
1020 | i++; | 1022 | i++; |
1021 | bank[1].size -= vmalloc_min - __va(bank->start); | 1023 | bank[1].size -= size_limit; |
1022 | bank[1].start = __pa(vmalloc_min - 1) + 1; | 1024 | bank[1].start = vmalloc_limit; |
1023 | bank[1].highmem = highmem = 1; | 1025 | bank[1].highmem = highmem = 1; |
1024 | j++; | 1026 | j++; |
1025 | } | 1027 | } |
1026 | bank->size = vmalloc_min - __va(bank->start); | 1028 | bank->size = size_limit; |
1027 | } | 1029 | } |
1028 | #else | 1030 | #else |
1029 | bank->highmem = highmem; | ||
1030 | |||
1031 | /* | 1031 | /* |
1032 | * Highmem banks not allowed with !CONFIG_HIGHMEM. | 1032 | * Highmem banks not allowed with !CONFIG_HIGHMEM. |
1033 | */ | 1033 | */ |
@@ -1040,31 +1040,16 @@ void __init sanity_check_meminfo(void) | |||
1040 | } | 1040 | } |
1041 | 1041 | ||
1042 | /* | 1042 | /* |
1043 | * Check whether this memory bank would entirely overlap | ||
1044 | * the vmalloc area. | ||
1045 | */ | ||
1046 | if (__va(bank->start) >= vmalloc_min || | ||
1047 | __va(bank->start) < (void *)PAGE_OFFSET) { | ||
1048 | printk(KERN_NOTICE "Ignoring RAM at %.8llx-%.8llx " | ||
1049 | "(vmalloc region overlap).\n", | ||
1050 | (unsigned long long)bank->start, | ||
1051 | (unsigned long long)bank->start + bank->size - 1); | ||
1052 | continue; | ||
1053 | } | ||
1054 | |||
1055 | /* | ||
1056 | * Check whether this memory bank would partially overlap | 1043 | * Check whether this memory bank would partially overlap |
1057 | * the vmalloc area. | 1044 | * the vmalloc area. |
1058 | */ | 1045 | */ |
1059 | if (__va(bank->start + bank->size - 1) >= vmalloc_min || | 1046 | if (bank->size > size_limit) { |
1060 | __va(bank->start + bank->size - 1) <= __va(bank->start)) { | ||
1061 | unsigned long newsize = vmalloc_min - __va(bank->start); | ||
1062 | printk(KERN_NOTICE "Truncating RAM at %.8llx-%.8llx " | 1047 | printk(KERN_NOTICE "Truncating RAM at %.8llx-%.8llx " |
1063 | "to -%.8llx (vmalloc region overlap).\n", | 1048 | "to -%.8llx (vmalloc region overlap).\n", |
1064 | (unsigned long long)bank->start, | 1049 | (unsigned long long)bank->start, |
1065 | (unsigned long long)bank->start + bank->size - 1, | 1050 | (unsigned long long)bank->start + bank->size - 1, |
1066 | (unsigned long long)bank->start + newsize - 1); | 1051 | (unsigned long long)bank->start + size_limit - 1); |
1067 | bank->size = newsize; | 1052 | bank->size = size_limit; |
1068 | } | 1053 | } |
1069 | #endif | 1054 | #endif |
1070 | if (!bank->highmem && bank->start + bank->size > arm_lowmem_limit) | 1055 | if (!bank->highmem && bank->start + bank->size > arm_lowmem_limit) |
diff --git a/arch/arm/mm/nommu.c b/arch/arm/mm/nommu.c index 5a3aba614a40..1fa50100ab6a 100644 --- a/arch/arm/mm/nommu.c +++ b/arch/arm/mm/nommu.c | |||
@@ -8,6 +8,7 @@ | |||
8 | #include <linux/pagemap.h> | 8 | #include <linux/pagemap.h> |
9 | #include <linux/io.h> | 9 | #include <linux/io.h> |
10 | #include <linux/memblock.h> | 10 | #include <linux/memblock.h> |
11 | #include <linux/kernel.h> | ||
11 | 12 | ||
12 | #include <asm/cacheflush.h> | 13 | #include <asm/cacheflush.h> |
13 | #include <asm/sections.h> | 14 | #include <asm/sections.h> |
@@ -15,9 +16,260 @@ | |||
15 | #include <asm/setup.h> | 16 | #include <asm/setup.h> |
16 | #include <asm/traps.h> | 17 | #include <asm/traps.h> |
17 | #include <asm/mach/arch.h> | 18 | #include <asm/mach/arch.h> |
19 | #include <asm/cputype.h> | ||
20 | #include <asm/mpu.h> | ||
18 | 21 | ||
19 | #include "mm.h" | 22 | #include "mm.h" |
20 | 23 | ||
24 | #ifdef CONFIG_ARM_MPU | ||
25 | struct mpu_rgn_info mpu_rgn_info; | ||
26 | |||
27 | /* Region number */ | ||
28 | static void rgnr_write(u32 v) | ||
29 | { | ||
30 | asm("mcr p15, 0, %0, c6, c2, 0" : : "r" (v)); | ||
31 | } | ||
32 | |||
33 | /* Data-side / unified region attributes */ | ||
34 | |||
35 | /* Region access control register */ | ||
36 | static void dracr_write(u32 v) | ||
37 | { | ||
38 | asm("mcr p15, 0, %0, c6, c1, 4" : : "r" (v)); | ||
39 | } | ||
40 | |||
41 | /* Region size register */ | ||
42 | static void drsr_write(u32 v) | ||
43 | { | ||
44 | asm("mcr p15, 0, %0, c6, c1, 2" : : "r" (v)); | ||
45 | } | ||
46 | |||
47 | /* Region base address register */ | ||
48 | static void drbar_write(u32 v) | ||
49 | { | ||
50 | asm("mcr p15, 0, %0, c6, c1, 0" : : "r" (v)); | ||
51 | } | ||
52 | |||
53 | static u32 drbar_read(void) | ||
54 | { | ||
55 | u32 v; | ||
56 | asm("mrc p15, 0, %0, c6, c1, 0" : "=r" (v)); | ||
57 | return v; | ||
58 | } | ||
59 | /* Optional instruction-side region attributes */ | ||
60 | |||
61 | /* I-side Region access control register */ | ||
62 | static void iracr_write(u32 v) | ||
63 | { | ||
64 | asm("mcr p15, 0, %0, c6, c1, 5" : : "r" (v)); | ||
65 | } | ||
66 | |||
67 | /* I-side Region size register */ | ||
68 | static void irsr_write(u32 v) | ||
69 | { | ||
70 | asm("mcr p15, 0, %0, c6, c1, 3" : : "r" (v)); | ||
71 | } | ||
72 | |||
73 | /* I-side Region base address register */ | ||
74 | static void irbar_write(u32 v) | ||
75 | { | ||
76 | asm("mcr p15, 0, %0, c6, c1, 1" : : "r" (v)); | ||
77 | } | ||
78 | |||
79 | static unsigned long irbar_read(void) | ||
80 | { | ||
81 | unsigned long v; | ||
82 | asm("mrc p15, 0, %0, c6, c1, 1" : "=r" (v)); | ||
83 | return v; | ||
84 | } | ||
85 | |||
86 | /* MPU initialisation functions */ | ||
87 | void __init sanity_check_meminfo_mpu(void) | ||
88 | { | ||
89 | int i; | ||
90 | struct membank *bank = meminfo.bank; | ||
91 | phys_addr_t phys_offset = PHYS_OFFSET; | ||
92 | phys_addr_t aligned_region_size, specified_mem_size, rounded_mem_size; | ||
93 | |||
94 | /* Initially only use memory continuous from PHYS_OFFSET */ | ||
95 | if (bank_phys_start(&bank[0]) != phys_offset) | ||
96 | panic("First memory bank must be contiguous from PHYS_OFFSET"); | ||
97 | |||
98 | /* Banks have already been sorted by start address */ | ||
99 | for (i = 1; i < meminfo.nr_banks; i++) { | ||
100 | if (bank[i].start <= bank_phys_end(&bank[0]) && | ||
101 | bank_phys_end(&bank[i]) > bank_phys_end(&bank[0])) { | ||
102 | bank[0].size = bank_phys_end(&bank[i]) - bank[0].start; | ||
103 | } else { | ||
104 | pr_notice("Ignoring RAM after 0x%.8lx. " | ||
105 | "First non-contiguous (ignored) bank start: 0x%.8lx\n", | ||
106 | (unsigned long)bank_phys_end(&bank[0]), | ||
107 | (unsigned long)bank_phys_start(&bank[i])); | ||
108 | break; | ||
109 | } | ||
110 | } | ||
111 | /* All contiguous banks are now merged in to the first bank */ | ||
112 | meminfo.nr_banks = 1; | ||
113 | specified_mem_size = bank[0].size; | ||
114 | |||
115 | /* | ||
116 | * MPU has curious alignment requirements: Size must be power of 2, and | ||
117 | * region start must be aligned to the region size | ||
118 | */ | ||
119 | if (phys_offset != 0) | ||
120 | pr_info("PHYS_OFFSET != 0 => MPU Region size constrained by alignment requirements\n"); | ||
121 | |||
122 | /* | ||
123 | * Maximum aligned region might overflow phys_addr_t if phys_offset is | ||
124 | * 0. Hence we keep everything below 4G until we take the smaller of | ||
125 | * the aligned_region_size and rounded_mem_size, one of which is | ||
126 | * guaranteed to be smaller than the maximum physical address. | ||
127 | */ | ||
128 | aligned_region_size = (phys_offset - 1) ^ (phys_offset); | ||
129 | /* Find the max power-of-two sized region that fits inside our bank */ | ||
130 | rounded_mem_size = (1 << __fls(bank[0].size)) - 1; | ||
131 | |||
132 | /* The actual region size is the smaller of the two */ | ||
133 | aligned_region_size = aligned_region_size < rounded_mem_size | ||
134 | ? aligned_region_size + 1 | ||
135 | : rounded_mem_size + 1; | ||
136 | |||
137 | if (aligned_region_size != specified_mem_size) | ||
138 | pr_warn("Truncating memory from 0x%.8lx to 0x%.8lx (MPU region constraints)", | ||
139 | (unsigned long)specified_mem_size, | ||
140 | (unsigned long)aligned_region_size); | ||
141 | |||
142 | meminfo.bank[0].size = aligned_region_size; | ||
143 | pr_debug("MPU Region from 0x%.8lx size 0x%.8lx (end 0x%.8lx))\n", | ||
144 | (unsigned long)phys_offset, | ||
145 | (unsigned long)aligned_region_size, | ||
146 | (unsigned long)bank_phys_end(&bank[0])); | ||
147 | |||
148 | } | ||
149 | |||
150 | static int mpu_present(void) | ||
151 | { | ||
152 | return ((read_cpuid_ext(CPUID_EXT_MMFR0) & MMFR0_PMSA) == MMFR0_PMSAv7); | ||
153 | } | ||
154 | |||
155 | static int mpu_max_regions(void) | ||
156 | { | ||
157 | /* | ||
158 | * We don't support a different number of I/D side regions so if we | ||
159 | * have separate instruction and data memory maps then return | ||
160 | * whichever side has a smaller number of supported regions. | ||
161 | */ | ||
162 | u32 dregions, iregions, mpuir; | ||
163 | mpuir = read_cpuid(CPUID_MPUIR); | ||
164 | |||
165 | dregions = iregions = (mpuir & MPUIR_DREGION_SZMASK) >> MPUIR_DREGION; | ||
166 | |||
167 | /* Check for separate d-side and i-side memory maps */ | ||
168 | if (mpuir & MPUIR_nU) | ||
169 | iregions = (mpuir & MPUIR_IREGION_SZMASK) >> MPUIR_IREGION; | ||
170 | |||
171 | /* Use the smallest of the two maxima */ | ||
172 | return min(dregions, iregions); | ||
173 | } | ||
174 | |||
175 | static int mpu_iside_independent(void) | ||
176 | { | ||
177 | /* MPUIR.nU specifies whether there is *not* a unified memory map */ | ||
178 | return read_cpuid(CPUID_MPUIR) & MPUIR_nU; | ||
179 | } | ||
180 | |||
181 | static int mpu_min_region_order(void) | ||
182 | { | ||
183 | u32 drbar_result, irbar_result; | ||
184 | /* We've kept a region free for this probing */ | ||
185 | rgnr_write(MPU_PROBE_REGION); | ||
186 | isb(); | ||
187 | /* | ||
188 | * As per ARM ARM, write 0xFFFFFFFC to DRBAR to find the minimum | ||
189 | * region order | ||
190 | */ | ||
191 | drbar_write(0xFFFFFFFC); | ||
192 | drbar_result = irbar_result = drbar_read(); | ||
193 | drbar_write(0x0); | ||
194 | /* If the MPU is non-unified, we use the larger of the two minima*/ | ||
195 | if (mpu_iside_independent()) { | ||
196 | irbar_write(0xFFFFFFFC); | ||
197 | irbar_result = irbar_read(); | ||
198 | irbar_write(0x0); | ||
199 | } | ||
200 | isb(); /* Ensure that MPU region operations have completed */ | ||
201 | /* Return whichever result is larger */ | ||
202 | return __ffs(max(drbar_result, irbar_result)); | ||
203 | } | ||
204 | |||
205 | static int mpu_setup_region(unsigned int number, phys_addr_t start, | ||
206 | unsigned int size_order, unsigned int properties) | ||
207 | { | ||
208 | u32 size_data; | ||
209 | |||
210 | /* We kept a region free for probing resolution of MPU regions*/ | ||
211 | if (number > mpu_max_regions() || number == MPU_PROBE_REGION) | ||
212 | return -ENOENT; | ||
213 | |||
214 | if (size_order > 32) | ||
215 | return -ENOMEM; | ||
216 | |||
217 | if (size_order < mpu_min_region_order()) | ||
218 | return -ENOMEM; | ||
219 | |||
220 | /* Writing N to bits 5:1 (RSR_SZ) specifies region size 2^N+1 */ | ||
221 | size_data = ((size_order - 1) << MPU_RSR_SZ) | 1 << MPU_RSR_EN; | ||
222 | |||
223 | dsb(); /* Ensure all previous data accesses occur with old mappings */ | ||
224 | rgnr_write(number); | ||
225 | isb(); | ||
226 | drbar_write(start); | ||
227 | dracr_write(properties); | ||
228 | isb(); /* Propagate properties before enabling region */ | ||
229 | drsr_write(size_data); | ||
230 | |||
231 | /* Check for independent I-side registers */ | ||
232 | if (mpu_iside_independent()) { | ||
233 | irbar_write(start); | ||
234 | iracr_write(properties); | ||
235 | isb(); | ||
236 | irsr_write(size_data); | ||
237 | } | ||
238 | isb(); | ||
239 | |||
240 | /* Store region info (we treat i/d side the same, so only store d) */ | ||
241 | mpu_rgn_info.rgns[number].dracr = properties; | ||
242 | mpu_rgn_info.rgns[number].drbar = start; | ||
243 | mpu_rgn_info.rgns[number].drsr = size_data; | ||
244 | return 0; | ||
245 | } | ||
246 | |||
247 | /* | ||
248 | * Set up default MPU regions, doing nothing if there is no MPU | ||
249 | */ | ||
250 | void __init mpu_setup(void) | ||
251 | { | ||
252 | int region_err; | ||
253 | if (!mpu_present()) | ||
254 | return; | ||
255 | |||
256 | region_err = mpu_setup_region(MPU_RAM_REGION, PHYS_OFFSET, | ||
257 | ilog2(meminfo.bank[0].size), | ||
258 | MPU_AP_PL1RW_PL0RW | MPU_RGN_NORMAL); | ||
259 | if (region_err) { | ||
260 | panic("MPU region initialization failure! %d", region_err); | ||
261 | } else { | ||
262 | pr_info("Using ARMv7 PMSA Compliant MPU. " | ||
263 | "Region independence: %s, Max regions: %d\n", | ||
264 | mpu_iside_independent() ? "Yes" : "No", | ||
265 | mpu_max_regions()); | ||
266 | } | ||
267 | } | ||
268 | #else | ||
269 | static void sanity_check_meminfo_mpu(void) {} | ||
270 | static void __init mpu_setup(void) {} | ||
271 | #endif /* CONFIG_ARM_MPU */ | ||
272 | |||
21 | void __init arm_mm_memblock_reserve(void) | 273 | void __init arm_mm_memblock_reserve(void) |
22 | { | 274 | { |
23 | #ifndef CONFIG_CPU_V7M | 275 | #ifndef CONFIG_CPU_V7M |
@@ -37,7 +289,9 @@ void __init arm_mm_memblock_reserve(void) | |||
37 | 289 | ||
38 | void __init sanity_check_meminfo(void) | 290 | void __init sanity_check_meminfo(void) |
39 | { | 291 | { |
40 | phys_addr_t end = bank_phys_end(&meminfo.bank[meminfo.nr_banks - 1]); | 292 | phys_addr_t end; |
293 | sanity_check_meminfo_mpu(); | ||
294 | end = bank_phys_end(&meminfo.bank[meminfo.nr_banks - 1]); | ||
41 | high_memory = __va(end - 1) + 1; | 295 | high_memory = __va(end - 1) + 1; |
42 | } | 296 | } |
43 | 297 | ||
@@ -48,6 +302,7 @@ void __init sanity_check_meminfo(void) | |||
48 | void __init paging_init(struct machine_desc *mdesc) | 302 | void __init paging_init(struct machine_desc *mdesc) |
49 | { | 303 | { |
50 | early_trap_init((void *)CONFIG_VECTORS_BASE); | 304 | early_trap_init((void *)CONFIG_VECTORS_BASE); |
305 | mpu_setup(); | ||
51 | bootmem_init(); | 306 | bootmem_init(); |
52 | } | 307 | } |
53 | 308 | ||
@@ -94,16 +349,16 @@ void __iomem *__arm_ioremap_pfn_caller(unsigned long pfn, unsigned long offset, | |||
94 | return __arm_ioremap_pfn(pfn, offset, size, mtype); | 349 | return __arm_ioremap_pfn(pfn, offset, size, mtype); |
95 | } | 350 | } |
96 | 351 | ||
97 | void __iomem *__arm_ioremap(unsigned long phys_addr, size_t size, | 352 | void __iomem *__arm_ioremap(phys_addr_t phys_addr, size_t size, |
98 | unsigned int mtype) | 353 | unsigned int mtype) |
99 | { | 354 | { |
100 | return (void __iomem *)phys_addr; | 355 | return (void __iomem *)phys_addr; |
101 | } | 356 | } |
102 | EXPORT_SYMBOL(__arm_ioremap); | 357 | EXPORT_SYMBOL(__arm_ioremap); |
103 | 358 | ||
104 | void __iomem * (*arch_ioremap_caller)(unsigned long, size_t, unsigned int, void *); | 359 | void __iomem * (*arch_ioremap_caller)(phys_addr_t, size_t, unsigned int, void *); |
105 | 360 | ||
106 | void __iomem *__arm_ioremap_caller(unsigned long phys_addr, size_t size, | 361 | void __iomem *__arm_ioremap_caller(phys_addr_t phys_addr, size_t size, |
107 | unsigned int mtype, void *caller) | 362 | unsigned int mtype, void *caller) |
108 | { | 363 | { |
109 | return __arm_ioremap(phys_addr, size, mtype); | 364 | return __arm_ioremap(phys_addr, size, mtype); |
diff --git a/arch/arm/mm/proc-v6.S b/arch/arm/mm/proc-v6.S index 919405e20b80..2d1ef87328a1 100644 --- a/arch/arm/mm/proc-v6.S +++ b/arch/arm/mm/proc-v6.S | |||
@@ -140,8 +140,10 @@ ENTRY(cpu_v6_set_pte_ext) | |||
140 | ENTRY(cpu_v6_do_suspend) | 140 | ENTRY(cpu_v6_do_suspend) |
141 | stmfd sp!, {r4 - r9, lr} | 141 | stmfd sp!, {r4 - r9, lr} |
142 | mrc p15, 0, r4, c13, c0, 0 @ FCSE/PID | 142 | mrc p15, 0, r4, c13, c0, 0 @ FCSE/PID |
143 | #ifdef CONFIG_MMU | ||
143 | mrc p15, 0, r5, c3, c0, 0 @ Domain ID | 144 | mrc p15, 0, r5, c3, c0, 0 @ Domain ID |
144 | mrc p15, 0, r6, c2, c0, 1 @ Translation table base 1 | 145 | mrc p15, 0, r6, c2, c0, 1 @ Translation table base 1 |
146 | #endif | ||
145 | mrc p15, 0, r7, c1, c0, 1 @ auxiliary control register | 147 | mrc p15, 0, r7, c1, c0, 1 @ auxiliary control register |
146 | mrc p15, 0, r8, c1, c0, 2 @ co-processor access control | 148 | mrc p15, 0, r8, c1, c0, 2 @ co-processor access control |
147 | mrc p15, 0, r9, c1, c0, 0 @ control register | 149 | mrc p15, 0, r9, c1, c0, 0 @ control register |
@@ -158,14 +160,16 @@ ENTRY(cpu_v6_do_resume) | |||
158 | mcr p15, 0, ip, c13, c0, 1 @ set reserved context ID | 160 | mcr p15, 0, ip, c13, c0, 1 @ set reserved context ID |
159 | ldmia r0, {r4 - r9} | 161 | ldmia r0, {r4 - r9} |
160 | mcr p15, 0, r4, c13, c0, 0 @ FCSE/PID | 162 | mcr p15, 0, r4, c13, c0, 0 @ FCSE/PID |
163 | #ifdef CONFIG_MMU | ||
161 | mcr p15, 0, r5, c3, c0, 0 @ Domain ID | 164 | mcr p15, 0, r5, c3, c0, 0 @ Domain ID |
162 | ALT_SMP(orr r1, r1, #TTB_FLAGS_SMP) | 165 | ALT_SMP(orr r1, r1, #TTB_FLAGS_SMP) |
163 | ALT_UP(orr r1, r1, #TTB_FLAGS_UP) | 166 | ALT_UP(orr r1, r1, #TTB_FLAGS_UP) |
164 | mcr p15, 0, r1, c2, c0, 0 @ Translation table base 0 | 167 | mcr p15, 0, r1, c2, c0, 0 @ Translation table base 0 |
165 | mcr p15, 0, r6, c2, c0, 1 @ Translation table base 1 | 168 | mcr p15, 0, r6, c2, c0, 1 @ Translation table base 1 |
169 | mcr p15, 0, ip, c2, c0, 2 @ TTB control register | ||
170 | #endif | ||
166 | mcr p15, 0, r7, c1, c0, 1 @ auxiliary control register | 171 | mcr p15, 0, r7, c1, c0, 1 @ auxiliary control register |
167 | mcr p15, 0, r8, c1, c0, 2 @ co-processor access control | 172 | mcr p15, 0, r8, c1, c0, 2 @ co-processor access control |
168 | mcr p15, 0, ip, c2, c0, 2 @ TTB control register | ||
169 | mcr p15, 0, ip, c7, c5, 4 @ ISB | 173 | mcr p15, 0, ip, c7, c5, 4 @ ISB |
170 | mov r0, r9 @ control register | 174 | mov r0, r9 @ control register |
171 | b cpu_resume_mmu | 175 | b cpu_resume_mmu |
diff --git a/arch/arm/mm/proc-v7-3level.S b/arch/arm/mm/proc-v7-3level.S index 363027e811d6..5ffe1956c6d9 100644 --- a/arch/arm/mm/proc-v7-3level.S +++ b/arch/arm/mm/proc-v7-3level.S | |||
@@ -39,6 +39,14 @@ | |||
39 | #define TTB_FLAGS_SMP (TTB_IRGN_WBWA|TTB_S|TTB_RGN_OC_WBWA) | 39 | #define TTB_FLAGS_SMP (TTB_IRGN_WBWA|TTB_S|TTB_RGN_OC_WBWA) |
40 | #define PMD_FLAGS_SMP (PMD_SECT_WBWA|PMD_SECT_S) | 40 | #define PMD_FLAGS_SMP (PMD_SECT_WBWA|PMD_SECT_S) |
41 | 41 | ||
42 | #ifndef __ARMEB__ | ||
43 | # define rpgdl r0 | ||
44 | # define rpgdh r1 | ||
45 | #else | ||
46 | # define rpgdl r1 | ||
47 | # define rpgdh r0 | ||
48 | #endif | ||
49 | |||
42 | /* | 50 | /* |
43 | * cpu_v7_switch_mm(pgd_phys, tsk) | 51 | * cpu_v7_switch_mm(pgd_phys, tsk) |
44 | * | 52 | * |
@@ -47,10 +55,10 @@ | |||
47 | */ | 55 | */ |
48 | ENTRY(cpu_v7_switch_mm) | 56 | ENTRY(cpu_v7_switch_mm) |
49 | #ifdef CONFIG_MMU | 57 | #ifdef CONFIG_MMU |
50 | mmid r1, r1 @ get mm->context.id | 58 | mmid r2, r2 |
51 | asid r3, r1 | 59 | asid r2, r2 |
52 | mov r3, r3, lsl #(48 - 32) @ ASID | 60 | orr rpgdh, rpgdh, r2, lsl #(48 - 32) @ upper 32-bits of pgd |
53 | mcrr p15, 0, r0, r3, c2 @ set TTB 0 | 61 | mcrr p15, 0, rpgdl, rpgdh, c2 @ set TTB 0 |
54 | isb | 62 | isb |
55 | #endif | 63 | #endif |
56 | mov pc, lr | 64 | mov pc, lr |
@@ -106,7 +114,8 @@ ENDPROC(cpu_v7_set_pte_ext) | |||
106 | */ | 114 | */ |
107 | .macro v7_ttb_setup, zero, ttbr0, ttbr1, tmp | 115 | .macro v7_ttb_setup, zero, ttbr0, ttbr1, tmp |
108 | ldr \tmp, =swapper_pg_dir @ swapper_pg_dir virtual address | 116 | ldr \tmp, =swapper_pg_dir @ swapper_pg_dir virtual address |
109 | cmp \ttbr1, \tmp @ PHYS_OFFSET > PAGE_OFFSET? (branch below) | 117 | mov \tmp, \tmp, lsr #ARCH_PGD_SHIFT |
118 | cmp \ttbr1, \tmp @ PHYS_OFFSET > PAGE_OFFSET? | ||
110 | mrc p15, 0, \tmp, c2, c0, 2 @ TTB control register | 119 | mrc p15, 0, \tmp, c2, c0, 2 @ TTB control register |
111 | orr \tmp, \tmp, #TTB_EAE | 120 | orr \tmp, \tmp, #TTB_EAE |
112 | ALT_SMP(orr \tmp, \tmp, #TTB_FLAGS_SMP) | 121 | ALT_SMP(orr \tmp, \tmp, #TTB_FLAGS_SMP) |
@@ -114,27 +123,21 @@ ENDPROC(cpu_v7_set_pte_ext) | |||
114 | ALT_SMP(orr \tmp, \tmp, #TTB_FLAGS_SMP << 16) | 123 | ALT_SMP(orr \tmp, \tmp, #TTB_FLAGS_SMP << 16) |
115 | ALT_UP(orr \tmp, \tmp, #TTB_FLAGS_UP << 16) | 124 | ALT_UP(orr \tmp, \tmp, #TTB_FLAGS_UP << 16) |
116 | /* | 125 | /* |
117 | * TTBR0/TTBR1 split (PAGE_OFFSET): | 126 | * Only use split TTBRs if PHYS_OFFSET <= PAGE_OFFSET (cmp above), |
118 | * 0x40000000: T0SZ = 2, T1SZ = 0 (not used) | 127 | * otherwise booting secondary CPUs would end up using TTBR1 for the |
119 | * 0x80000000: T0SZ = 0, T1SZ = 1 | 128 | * identity mapping set up in TTBR0. |
120 | * 0xc0000000: T0SZ = 0, T1SZ = 2 | ||
121 | * | ||
122 | * Only use this feature if PHYS_OFFSET <= PAGE_OFFSET, otherwise | ||
123 | * booting secondary CPUs would end up using TTBR1 for the identity | ||
124 | * mapping set up in TTBR0. | ||
125 | */ | 129 | */ |
126 | bhi 9001f @ PHYS_OFFSET > PAGE_OFFSET? | 130 | orrls \tmp, \tmp, #TTBR1_SIZE @ TTBCR.T1SZ |
127 | orr \tmp, \tmp, #(((PAGE_OFFSET >> 30) - 1) << 16) @ TTBCR.T1SZ | 131 | mcr p15, 0, \tmp, c2, c0, 2 @ TTBCR |
128 | #if defined CONFIG_VMSPLIT_2G | 132 | mov \tmp, \ttbr1, lsr #(32 - ARCH_PGD_SHIFT) @ upper bits |
129 | /* PAGE_OFFSET == 0x80000000, T1SZ == 1 */ | 133 | mov \ttbr1, \ttbr1, lsl #ARCH_PGD_SHIFT @ lower bits |
130 | add \ttbr1, \ttbr1, #1 << 4 @ skip two L1 entries | 134 | addls \ttbr1, \ttbr1, #TTBR1_OFFSET |
131 | #elif defined CONFIG_VMSPLIT_3G | 135 | mcrr p15, 1, \ttbr1, \zero, c2 @ load TTBR1 |
132 | /* PAGE_OFFSET == 0xc0000000, T1SZ == 2 */ | 136 | mov \tmp, \ttbr0, lsr #(32 - ARCH_PGD_SHIFT) @ upper bits |
133 | add \ttbr1, \ttbr1, #4096 * (1 + 3) @ only L2 used, skip pgd+3*pmd | 137 | mov \ttbr0, \ttbr0, lsl #ARCH_PGD_SHIFT @ lower bits |
134 | #endif | 138 | mcrr p15, 0, \ttbr0, \zero, c2 @ load TTBR0 |
135 | /* CONFIG_VMSPLIT_1G does not need TTBR1 adjustment */ | 139 | mcrr p15, 1, \ttbr1, \zero, c2 @ load TTBR1 |
136 | 9001: mcr p15, 0, \tmp, c2, c0, 2 @ TTB control register | 140 | mcrr p15, 0, \ttbr0, \zero, c2 @ load TTBR0 |
137 | mcrr p15, 1, \ttbr1, \zero, c2 @ load TTBR1 | ||
138 | .endm | 141 | .endm |
139 | 142 | ||
140 | __CPUINIT | 143 | __CPUINIT |
diff --git a/arch/arm/mm/proc-v7.S b/arch/arm/mm/proc-v7.S index e35fec34453e..7ef3ad05df39 100644 --- a/arch/arm/mm/proc-v7.S +++ b/arch/arm/mm/proc-v7.S | |||
@@ -98,9 +98,11 @@ ENTRY(cpu_v7_do_suspend) | |||
98 | mrc p15, 0, r4, c13, c0, 0 @ FCSE/PID | 98 | mrc p15, 0, r4, c13, c0, 0 @ FCSE/PID |
99 | mrc p15, 0, r5, c13, c0, 3 @ User r/o thread ID | 99 | mrc p15, 0, r5, c13, c0, 3 @ User r/o thread ID |
100 | stmia r0!, {r4 - r5} | 100 | stmia r0!, {r4 - r5} |
101 | #ifdef CONFIG_MMU | ||
101 | mrc p15, 0, r6, c3, c0, 0 @ Domain ID | 102 | mrc p15, 0, r6, c3, c0, 0 @ Domain ID |
102 | mrc p15, 0, r7, c2, c0, 1 @ TTB 1 | 103 | mrc p15, 0, r7, c2, c0, 1 @ TTB 1 |
103 | mrc p15, 0, r11, c2, c0, 2 @ TTB control register | 104 | mrc p15, 0, r11, c2, c0, 2 @ TTB control register |
105 | #endif | ||
104 | mrc p15, 0, r8, c1, c0, 0 @ Control register | 106 | mrc p15, 0, r8, c1, c0, 0 @ Control register |
105 | mrc p15, 0, r9, c1, c0, 1 @ Auxiliary control register | 107 | mrc p15, 0, r9, c1, c0, 1 @ Auxiliary control register |
106 | mrc p15, 0, r10, c1, c0, 2 @ Co-processor access control | 108 | mrc p15, 0, r10, c1, c0, 2 @ Co-processor access control |
@@ -110,13 +112,14 @@ ENDPROC(cpu_v7_do_suspend) | |||
110 | 112 | ||
111 | ENTRY(cpu_v7_do_resume) | 113 | ENTRY(cpu_v7_do_resume) |
112 | mov ip, #0 | 114 | mov ip, #0 |
113 | mcr p15, 0, ip, c8, c7, 0 @ invalidate TLBs | ||
114 | mcr p15, 0, ip, c7, c5, 0 @ invalidate I cache | 115 | mcr p15, 0, ip, c7, c5, 0 @ invalidate I cache |
115 | mcr p15, 0, ip, c13, c0, 1 @ set reserved context ID | 116 | mcr p15, 0, ip, c13, c0, 1 @ set reserved context ID |
116 | ldmia r0!, {r4 - r5} | 117 | ldmia r0!, {r4 - r5} |
117 | mcr p15, 0, r4, c13, c0, 0 @ FCSE/PID | 118 | mcr p15, 0, r4, c13, c0, 0 @ FCSE/PID |
118 | mcr p15, 0, r5, c13, c0, 3 @ User r/o thread ID | 119 | mcr p15, 0, r5, c13, c0, 3 @ User r/o thread ID |
119 | ldmia r0, {r6 - r11} | 120 | ldmia r0, {r6 - r11} |
121 | #ifdef CONFIG_MMU | ||
122 | mcr p15, 0, ip, c8, c7, 0 @ invalidate TLBs | ||
120 | mcr p15, 0, r6, c3, c0, 0 @ Domain ID | 123 | mcr p15, 0, r6, c3, c0, 0 @ Domain ID |
121 | #ifndef CONFIG_ARM_LPAE | 124 | #ifndef CONFIG_ARM_LPAE |
122 | ALT_SMP(orr r1, r1, #TTB_FLAGS_SMP) | 125 | ALT_SMP(orr r1, r1, #TTB_FLAGS_SMP) |
@@ -125,14 +128,15 @@ ENTRY(cpu_v7_do_resume) | |||
125 | mcr p15, 0, r1, c2, c0, 0 @ TTB 0 | 128 | mcr p15, 0, r1, c2, c0, 0 @ TTB 0 |
126 | mcr p15, 0, r7, c2, c0, 1 @ TTB 1 | 129 | mcr p15, 0, r7, c2, c0, 1 @ TTB 1 |
127 | mcr p15, 0, r11, c2, c0, 2 @ TTB control register | 130 | mcr p15, 0, r11, c2, c0, 2 @ TTB control register |
128 | mrc p15, 0, r4, c1, c0, 1 @ Read Auxiliary control register | ||
129 | teq r4, r9 @ Is it already set? | ||
130 | mcrne p15, 0, r9, c1, c0, 1 @ No, so write it | ||
131 | mcr p15, 0, r10, c1, c0, 2 @ Co-processor access control | ||
132 | ldr r4, =PRRR @ PRRR | 131 | ldr r4, =PRRR @ PRRR |
133 | ldr r5, =NMRR @ NMRR | 132 | ldr r5, =NMRR @ NMRR |
134 | mcr p15, 0, r4, c10, c2, 0 @ write PRRR | 133 | mcr p15, 0, r4, c10, c2, 0 @ write PRRR |
135 | mcr p15, 0, r5, c10, c2, 1 @ write NMRR | 134 | mcr p15, 0, r5, c10, c2, 1 @ write NMRR |
135 | #endif /* CONFIG_MMU */ | ||
136 | mrc p15, 0, r4, c1, c0, 1 @ Read Auxiliary control register | ||
137 | teq r4, r9 @ Is it already set? | ||
138 | mcrne p15, 0, r9, c1, c0, 1 @ No, so write it | ||
139 | mcr p15, 0, r10, c1, c0, 2 @ Co-processor access control | ||
136 | isb | 140 | isb |
137 | dsb | 141 | dsb |
138 | mov r0, r8 @ control register | 142 | mov r0, r8 @ control register |
@@ -178,7 +182,8 @@ ENDPROC(cpu_pj4b_do_idle) | |||
178 | */ | 182 | */ |
179 | __v7_ca5mp_setup: | 183 | __v7_ca5mp_setup: |
180 | __v7_ca9mp_setup: | 184 | __v7_ca9mp_setup: |
181 | mov r10, #(1 << 0) @ TLB ops broadcasting | 185 | __v7_cr7mp_setup: |
186 | mov r10, #(1 << 0) @ Cache/TLB ops broadcasting | ||
182 | b 1f | 187 | b 1f |
183 | __v7_ca7mp_setup: | 188 | __v7_ca7mp_setup: |
184 | __v7_ca15mp_setup: | 189 | __v7_ca15mp_setup: |
@@ -443,6 +448,16 @@ __v7_pj4b_proc_info: | |||
443 | #endif | 448 | #endif |
444 | 449 | ||
445 | /* | 450 | /* |
451 | * ARM Ltd. Cortex R7 processor. | ||
452 | */ | ||
453 | .type __v7_cr7mp_proc_info, #object | ||
454 | __v7_cr7mp_proc_info: | ||
455 | .long 0x410fc170 | ||
456 | .long 0xff0ffff0 | ||
457 | __v7_proc __v7_cr7mp_setup | ||
458 | .size __v7_cr7mp_proc_info, . - __v7_cr7mp_proc_info | ||
459 | |||
460 | /* | ||
446 | * ARM Ltd. Cortex A7 processor. | 461 | * ARM Ltd. Cortex A7 processor. |
447 | */ | 462 | */ |
448 | .type __v7_ca7mp_proc_info, #object | 463 | .type __v7_ca7mp_proc_info, #object |
diff --git a/arch/arm/plat-iop/adma.c b/arch/arm/plat-iop/adma.c index 1ff6a37e893c..a4d1f8de3b5b 100644 --- a/arch/arm/plat-iop/adma.c +++ b/arch/arm/plat-iop/adma.c | |||
@@ -192,12 +192,10 @@ static int __init iop3xx_adma_cap_init(void) | |||
192 | 192 | ||
193 | #ifdef CONFIG_ARCH_IOP32X /* the 32x AAU does not perform zero sum */ | 193 | #ifdef CONFIG_ARCH_IOP32X /* the 32x AAU does not perform zero sum */ |
194 | dma_cap_set(DMA_XOR, iop3xx_aau_data.cap_mask); | 194 | dma_cap_set(DMA_XOR, iop3xx_aau_data.cap_mask); |
195 | dma_cap_set(DMA_MEMSET, iop3xx_aau_data.cap_mask); | ||
196 | dma_cap_set(DMA_INTERRUPT, iop3xx_aau_data.cap_mask); | 195 | dma_cap_set(DMA_INTERRUPT, iop3xx_aau_data.cap_mask); |
197 | #else | 196 | #else |
198 | dma_cap_set(DMA_XOR, iop3xx_aau_data.cap_mask); | 197 | dma_cap_set(DMA_XOR, iop3xx_aau_data.cap_mask); |
199 | dma_cap_set(DMA_XOR_VAL, iop3xx_aau_data.cap_mask); | 198 | dma_cap_set(DMA_XOR_VAL, iop3xx_aau_data.cap_mask); |
200 | dma_cap_set(DMA_MEMSET, iop3xx_aau_data.cap_mask); | ||
201 | dma_cap_set(DMA_INTERRUPT, iop3xx_aau_data.cap_mask); | 199 | dma_cap_set(DMA_INTERRUPT, iop3xx_aau_data.cap_mask); |
202 | #endif | 200 | #endif |
203 | 201 | ||
diff --git a/arch/arm/plat-orion/common.c b/arch/arm/plat-orion/common.c index c019b7aaf776..c66d163d7a2a 100644 --- a/arch/arm/plat-orion/common.c +++ b/arch/arm/plat-orion/common.c | |||
@@ -666,14 +666,9 @@ void __init orion_xor0_init(unsigned long mapbase_low, | |||
666 | orion_xor0_shared_resources[3].start = irq_1; | 666 | orion_xor0_shared_resources[3].start = irq_1; |
667 | orion_xor0_shared_resources[3].end = irq_1; | 667 | orion_xor0_shared_resources[3].end = irq_1; |
668 | 668 | ||
669 | /* | ||
670 | * two engines can't do memset simultaneously, this limitation | ||
671 | * satisfied by removing memset support from one of the engines. | ||
672 | */ | ||
673 | dma_cap_set(DMA_MEMCPY, orion_xor0_channels_data[0].cap_mask); | 669 | dma_cap_set(DMA_MEMCPY, orion_xor0_channels_data[0].cap_mask); |
674 | dma_cap_set(DMA_XOR, orion_xor0_channels_data[0].cap_mask); | 670 | dma_cap_set(DMA_XOR, orion_xor0_channels_data[0].cap_mask); |
675 | 671 | ||
676 | dma_cap_set(DMA_MEMSET, orion_xor0_channels_data[1].cap_mask); | ||
677 | dma_cap_set(DMA_MEMCPY, orion_xor0_channels_data[1].cap_mask); | 672 | dma_cap_set(DMA_MEMCPY, orion_xor0_channels_data[1].cap_mask); |
678 | dma_cap_set(DMA_XOR, orion_xor0_channels_data[1].cap_mask); | 673 | dma_cap_set(DMA_XOR, orion_xor0_channels_data[1].cap_mask); |
679 | 674 | ||
@@ -732,14 +727,9 @@ void __init orion_xor1_init(unsigned long mapbase_low, | |||
732 | orion_xor1_shared_resources[3].start = irq_1; | 727 | orion_xor1_shared_resources[3].start = irq_1; |
733 | orion_xor1_shared_resources[3].end = irq_1; | 728 | orion_xor1_shared_resources[3].end = irq_1; |
734 | 729 | ||
735 | /* | ||
736 | * two engines can't do memset simultaneously, this limitation | ||
737 | * satisfied by removing memset support from one of the engines. | ||
738 | */ | ||
739 | dma_cap_set(DMA_MEMCPY, orion_xor1_channels_data[0].cap_mask); | 730 | dma_cap_set(DMA_MEMCPY, orion_xor1_channels_data[0].cap_mask); |
740 | dma_cap_set(DMA_XOR, orion_xor1_channels_data[0].cap_mask); | 731 | dma_cap_set(DMA_XOR, orion_xor1_channels_data[0].cap_mask); |
741 | 732 | ||
742 | dma_cap_set(DMA_MEMSET, orion_xor1_channels_data[1].cap_mask); | ||
743 | dma_cap_set(DMA_MEMCPY, orion_xor1_channels_data[1].cap_mask); | 733 | dma_cap_set(DMA_MEMCPY, orion_xor1_channels_data[1].cap_mask); |
744 | dma_cap_set(DMA_XOR, orion_xor1_channels_data[1].cap_mask); | 734 | dma_cap_set(DMA_XOR, orion_xor1_channels_data[1].cap_mask); |
745 | 735 | ||
diff --git a/arch/arm/plat-orion/gpio.c b/arch/arm/plat-orion/gpio.c index 249fe6333e18..6816192a7561 100644 --- a/arch/arm/plat-orion/gpio.c +++ b/arch/arm/plat-orion/gpio.c | |||
@@ -426,7 +426,7 @@ static void gpio_irq_handler(unsigned irq, struct irq_desc *desc) | |||
426 | if (!(cause & (1 << i))) | 426 | if (!(cause & (1 << i))) |
427 | continue; | 427 | continue; |
428 | 428 | ||
429 | type = irqd_get_trigger_type(irq_get_irq_data(irq)); | 429 | type = irq_get_trigger_type(irq); |
430 | if ((type & IRQ_TYPE_SENSE_MASK) == IRQ_TYPE_EDGE_BOTH) { | 430 | if ((type & IRQ_TYPE_SENSE_MASK) == IRQ_TYPE_EDGE_BOTH) { |
431 | /* Swap polarity (race with GPIO line) */ | 431 | /* Swap polarity (race with GPIO line) */ |
432 | u32 polarity; | 432 | u32 polarity; |
diff --git a/arch/arm/plat-samsung/include/plat/cpu-freq-core.h b/arch/arm/plat-samsung/include/plat/cpu-freq-core.h index d7e17150028a..7231c8e4975e 100644 --- a/arch/arm/plat-samsung/include/plat/cpu-freq-core.h +++ b/arch/arm/plat-samsung/include/plat/cpu-freq-core.h | |||
@@ -285,7 +285,7 @@ static inline int s3c_cpufreq_addfreq(struct cpufreq_frequency_table *table, | |||
285 | s3c_freq_dbg("%s: { %d = %u kHz }\n", | 285 | s3c_freq_dbg("%s: { %d = %u kHz }\n", |
286 | __func__, index, freq); | 286 | __func__, index, freq); |
287 | 287 | ||
288 | table[index].index = index; | 288 | table[index].driver_data = index; |
289 | table[index].frequency = freq; | 289 | table[index].frequency = freq; |
290 | } | 290 | } |
291 | 291 | ||
diff --git a/arch/arm/plat-versatile/headsmp.S b/arch/arm/plat-versatile/headsmp.S index b178d44e9eaa..2677bc3762d7 100644 --- a/arch/arm/plat-versatile/headsmp.S +++ b/arch/arm/plat-versatile/headsmp.S | |||
@@ -11,8 +11,6 @@ | |||
11 | #include <linux/linkage.h> | 11 | #include <linux/linkage.h> |
12 | #include <linux/init.h> | 12 | #include <linux/init.h> |
13 | 13 | ||
14 | __INIT | ||
15 | |||
16 | /* | 14 | /* |
17 | * Realview/Versatile Express specific entry point for secondary CPUs. | 15 | * Realview/Versatile Express specific entry point for secondary CPUs. |
18 | * This provides a "holding pen" into which all secondary cores are held | 16 | * This provides a "holding pen" into which all secondary cores are held |