diff options
Diffstat (limited to 'arch')
293 files changed, 12016 insertions, 3159 deletions
diff --git a/arch/Kconfig b/arch/Kconfig index 7f8f281f2585..97fb7d0365d1 100644 --- a/arch/Kconfig +++ b/arch/Kconfig | |||
@@ -76,6 +76,15 @@ config OPTPROBES | |||
76 | depends on KPROBES && HAVE_OPTPROBES | 76 | depends on KPROBES && HAVE_OPTPROBES |
77 | depends on !PREEMPT | 77 | depends on !PREEMPT |
78 | 78 | ||
79 | config KPROBES_ON_FTRACE | ||
80 | def_bool y | ||
81 | depends on KPROBES && HAVE_KPROBES_ON_FTRACE | ||
82 | depends on DYNAMIC_FTRACE_WITH_REGS | ||
83 | help | ||
84 | If function tracer is enabled and the arch supports full | ||
85 | passing of pt_regs to function tracing, then kprobes can | ||
86 | optimize on top of function tracing. | ||
87 | |||
79 | config UPROBES | 88 | config UPROBES |
80 | bool "Transparent user-space probes (EXPERIMENTAL)" | 89 | bool "Transparent user-space probes (EXPERIMENTAL)" |
81 | depends on UPROBE_EVENT && PERF_EVENTS | 90 | depends on UPROBE_EVENT && PERF_EVENTS |
@@ -158,6 +167,9 @@ config HAVE_KRETPROBES | |||
158 | config HAVE_OPTPROBES | 167 | config HAVE_OPTPROBES |
159 | bool | 168 | bool |
160 | 169 | ||
170 | config HAVE_KPROBES_ON_FTRACE | ||
171 | bool | ||
172 | |||
161 | config HAVE_NMI_WATCHDOG | 173 | config HAVE_NMI_WATCHDOG |
162 | bool | 174 | bool |
163 | # | 175 | # |
diff --git a/arch/alpha/Kconfig b/arch/alpha/Kconfig index 9d5904cc7712..9b504af2e966 100644 --- a/arch/alpha/Kconfig +++ b/arch/alpha/Kconfig | |||
@@ -5,7 +5,6 @@ config ALPHA | |||
5 | select HAVE_IDE | 5 | select HAVE_IDE |
6 | select HAVE_OPROFILE | 6 | select HAVE_OPROFILE |
7 | select HAVE_SYSCALL_WRAPPERS | 7 | select HAVE_SYSCALL_WRAPPERS |
8 | select HAVE_IRQ_WORK | ||
9 | select HAVE_PCSPKR_PLATFORM | 8 | select HAVE_PCSPKR_PLATFORM |
10 | select HAVE_PERF_EVENTS | 9 | select HAVE_PERF_EVENTS |
11 | select HAVE_DMA_ATTRS | 10 | select HAVE_DMA_ATTRS |
diff --git a/arch/alpha/kernel/osf_sys.c b/arch/alpha/kernel/osf_sys.c index 14db93e4c8a8..dbc1760f418b 100644 --- a/arch/alpha/kernel/osf_sys.c +++ b/arch/alpha/kernel/osf_sys.c | |||
@@ -1139,6 +1139,7 @@ struct rusage32 { | |||
1139 | SYSCALL_DEFINE2(osf_getrusage, int, who, struct rusage32 __user *, ru) | 1139 | SYSCALL_DEFINE2(osf_getrusage, int, who, struct rusage32 __user *, ru) |
1140 | { | 1140 | { |
1141 | struct rusage32 r; | 1141 | struct rusage32 r; |
1142 | cputime_t utime, stime; | ||
1142 | 1143 | ||
1143 | if (who != RUSAGE_SELF && who != RUSAGE_CHILDREN) | 1144 | if (who != RUSAGE_SELF && who != RUSAGE_CHILDREN) |
1144 | return -EINVAL; | 1145 | return -EINVAL; |
@@ -1146,8 +1147,9 @@ SYSCALL_DEFINE2(osf_getrusage, int, who, struct rusage32 __user *, ru) | |||
1146 | memset(&r, 0, sizeof(r)); | 1147 | memset(&r, 0, sizeof(r)); |
1147 | switch (who) { | 1148 | switch (who) { |
1148 | case RUSAGE_SELF: | 1149 | case RUSAGE_SELF: |
1149 | jiffies_to_timeval32(current->utime, &r.ru_utime); | 1150 | task_cputime(current, &utime, &stime); |
1150 | jiffies_to_timeval32(current->stime, &r.ru_stime); | 1151 | jiffies_to_timeval32(utime, &r.ru_utime); |
1152 | jiffies_to_timeval32(stime, &r.ru_stime); | ||
1151 | r.ru_minflt = current->min_flt; | 1153 | r.ru_minflt = current->min_flt; |
1152 | r.ru_majflt = current->maj_flt; | 1154 | r.ru_majflt = current->maj_flt; |
1153 | break; | 1155 | break; |
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig index 67874b82a4ed..2f66b2e42490 100644 --- a/arch/arm/Kconfig +++ b/arch/arm/Kconfig | |||
@@ -36,7 +36,6 @@ config ARM | |||
36 | select HAVE_GENERIC_HARDIRQS | 36 | select HAVE_GENERIC_HARDIRQS |
37 | select HAVE_HW_BREAKPOINT if (PERF_EVENTS && (CPU_V6 || CPU_V6K || CPU_V7)) | 37 | select HAVE_HW_BREAKPOINT if (PERF_EVENTS && (CPU_V6 || CPU_V6K || CPU_V7)) |
38 | select HAVE_IDE if PCI || ISA || PCMCIA | 38 | select HAVE_IDE if PCI || ISA || PCMCIA |
39 | select HAVE_IRQ_WORK | ||
40 | select HAVE_KERNEL_GZIP | 39 | select HAVE_KERNEL_GZIP |
41 | select HAVE_KERNEL_LZMA | 40 | select HAVE_KERNEL_LZMA |
42 | select HAVE_KERNEL_LZO | 41 | select HAVE_KERNEL_LZO |
@@ -1620,6 +1619,16 @@ config HOTPLUG_CPU | |||
1620 | Say Y here to experiment with turning CPUs off and on. CPUs | 1619 | Say Y here to experiment with turning CPUs off and on. CPUs |
1621 | can be controlled through /sys/devices/system/cpu. | 1620 | can be controlled through /sys/devices/system/cpu. |
1622 | 1621 | ||
1622 | config ARM_PSCI | ||
1623 | bool "Support for the ARM Power State Coordination Interface (PSCI)" | ||
1624 | depends on CPU_V7 | ||
1625 | help | ||
1626 | Say Y here if you want Linux to communicate with system firmware | ||
1627 | implementing the PSCI specification for CPU-centric power | ||
1628 | management operations described in ARM document number ARM DEN | ||
1629 | 0022A ("Power State Coordination Interface System Software on | ||
1630 | ARM processors"). | ||
1631 | |||
1623 | config LOCAL_TIMERS | 1632 | config LOCAL_TIMERS |
1624 | bool "Use local timer interrupts" | 1633 | bool "Use local timer interrupts" |
1625 | depends on SMP | 1634 | depends on SMP |
@@ -1637,7 +1646,7 @@ config ARCH_NR_GPIO | |||
1637 | default 355 if ARCH_U8500 | 1646 | default 355 if ARCH_U8500 |
1638 | default 264 if MACH_H4700 | 1647 | default 264 if MACH_H4700 |
1639 | default 512 if SOC_OMAP5 | 1648 | default 512 if SOC_OMAP5 |
1640 | default 288 if ARCH_VT8500 | 1649 | default 288 if ARCH_VT8500 || ARCH_SUNXI |
1641 | default 0 | 1650 | default 0 |
1642 | help | 1651 | help |
1643 | Maximum number of GPIOs in the system. | 1652 | Maximum number of GPIOs in the system. |
@@ -1655,6 +1664,9 @@ config HZ | |||
1655 | default SHMOBILE_TIMER_HZ if ARCH_SHMOBILE | 1664 | default SHMOBILE_TIMER_HZ if ARCH_SHMOBILE |
1656 | default 100 | 1665 | default 100 |
1657 | 1666 | ||
1667 | config SCHED_HRTICK | ||
1668 | def_bool HIGH_RES_TIMERS | ||
1669 | |||
1658 | config THUMB2_KERNEL | 1670 | config THUMB2_KERNEL |
1659 | bool "Compile the kernel in Thumb-2 mode" | 1671 | bool "Compile the kernel in Thumb-2 mode" |
1660 | depends on CPU_V7 && !CPU_V6 && !CPU_V6K | 1672 | depends on CPU_V7 && !CPU_V6 && !CPU_V6K |
@@ -2322,3 +2334,5 @@ source "security/Kconfig" | |||
2322 | source "crypto/Kconfig" | 2334 | source "crypto/Kconfig" |
2323 | 2335 | ||
2324 | source "lib/Kconfig" | 2336 | source "lib/Kconfig" |
2337 | |||
2338 | source "arch/arm/kvm/Kconfig" | ||
diff --git a/arch/arm/Makefile b/arch/arm/Makefile index 30c443c406f3..4bcd2d6b0535 100644 --- a/arch/arm/Makefile +++ b/arch/arm/Makefile | |||
@@ -252,6 +252,7 @@ core-$(CONFIG_FPE_NWFPE) += arch/arm/nwfpe/ | |||
252 | core-$(CONFIG_FPE_FASTFPE) += $(FASTFPE_OBJ) | 252 | core-$(CONFIG_FPE_FASTFPE) += $(FASTFPE_OBJ) |
253 | core-$(CONFIG_VFP) += arch/arm/vfp/ | 253 | core-$(CONFIG_VFP) += arch/arm/vfp/ |
254 | core-$(CONFIG_XEN) += arch/arm/xen/ | 254 | core-$(CONFIG_XEN) += arch/arm/xen/ |
255 | core-$(CONFIG_KVM_ARM_HOST) += arch/arm/kvm/ | ||
255 | 256 | ||
256 | # If we have a machine-specific directory, then include it in the build. | 257 | # If we have a machine-specific directory, then include it in the build. |
257 | core-y += arch/arm/kernel/ arch/arm/mm/ arch/arm/common/ | 258 | core-y += arch/arm/kernel/ arch/arm/mm/ arch/arm/common/ |
diff --git a/arch/arm/boot/dts/dbx5x0.dtsi b/arch/arm/boot/dts/dbx5x0.dtsi index 63f2fbcfe819..69140ba99f46 100644 --- a/arch/arm/boot/dts/dbx5x0.dtsi +++ b/arch/arm/boot/dts/dbx5x0.dtsi | |||
@@ -170,10 +170,9 @@ | |||
170 | gpio-bank = <8>; | 170 | gpio-bank = <8>; |
171 | }; | 171 | }; |
172 | 172 | ||
173 | pinctrl@80157000 { | 173 | pinctrl { |
174 | // This is actually the PRCMU base address | 174 | compatible = "stericsson,nmk-pinctrl"; |
175 | reg = <0x80157000 0x2000>; | 175 | prcm = <&prcmu>; |
176 | compatible = "stericsson,nmk_pinctrl"; | ||
177 | }; | 176 | }; |
178 | 177 | ||
179 | usb@a03e0000 { | 178 | usb@a03e0000 { |
@@ -190,9 +189,10 @@ | |||
190 | interrupts = <0 25 0x4>; | 189 | interrupts = <0 25 0x4>; |
191 | }; | 190 | }; |
192 | 191 | ||
193 | prcmu@80157000 { | 192 | prcmu: prcmu@80157000 { |
194 | compatible = "stericsson,db8500-prcmu"; | 193 | compatible = "stericsson,db8500-prcmu"; |
195 | reg = <0x80157000 0x1000>; | 194 | reg = <0x80157000 0x1000>; |
195 | reg-names = "prcmu"; | ||
196 | interrupts = <0 47 0x4>; | 196 | interrupts = <0 47 0x4>; |
197 | #address-cells = <1>; | 197 | #address-cells = <1>; |
198 | #size-cells = <1>; | 198 | #size-cells = <1>; |
diff --git a/arch/arm/boot/dts/highbank.dts b/arch/arm/boot/dts/highbank.dts index 5927a8df5625..6aad34ad9517 100644 --- a/arch/arm/boot/dts/highbank.dts +++ b/arch/arm/boot/dts/highbank.dts | |||
@@ -37,6 +37,16 @@ | |||
37 | next-level-cache = <&L2>; | 37 | next-level-cache = <&L2>; |
38 | clocks = <&a9pll>; | 38 | clocks = <&a9pll>; |
39 | clock-names = "cpu"; | 39 | clock-names = "cpu"; |
40 | operating-points = < | ||
41 | /* kHz ignored */ | ||
42 | 1300000 1000000 | ||
43 | 1200000 1000000 | ||
44 | 1100000 1000000 | ||
45 | 800000 1000000 | ||
46 | 400000 1000000 | ||
47 | 200000 1000000 | ||
48 | >; | ||
49 | clock-latency = <100000>; | ||
40 | }; | 50 | }; |
41 | 51 | ||
42 | cpu@901 { | 52 | cpu@901 { |
diff --git a/arch/arm/boot/dts/prima2.dtsi b/arch/arm/boot/dts/prima2.dtsi index 055fca542120..3329719a9412 100644 --- a/arch/arm/boot/dts/prima2.dtsi +++ b/arch/arm/boot/dts/prima2.dtsi | |||
@@ -58,10 +58,11 @@ | |||
58 | #size-cells = <1>; | 58 | #size-cells = <1>; |
59 | ranges = <0x88000000 0x88000000 0x40000>; | 59 | ranges = <0x88000000 0x88000000 0x40000>; |
60 | 60 | ||
61 | clock-controller@88000000 { | 61 | clks: clock-controller@88000000 { |
62 | compatible = "sirf,prima2-clkc"; | 62 | compatible = "sirf,prima2-clkc"; |
63 | reg = <0x88000000 0x1000>; | 63 | reg = <0x88000000 0x1000>; |
64 | interrupts = <3>; | 64 | interrupts = <3>; |
65 | #clock-cells = <1>; | ||
65 | }; | 66 | }; |
66 | 67 | ||
67 | reset-controller@88010000 { | 68 | reset-controller@88010000 { |
@@ -85,6 +86,7 @@ | |||
85 | compatible = "sirf,prima2-memc"; | 86 | compatible = "sirf,prima2-memc"; |
86 | reg = <0x90000000 0x10000>; | 87 | reg = <0x90000000 0x10000>; |
87 | interrupts = <27>; | 88 | interrupts = <27>; |
89 | clocks = <&clks 5>; | ||
88 | }; | 90 | }; |
89 | }; | 91 | }; |
90 | 92 | ||
@@ -104,6 +106,7 @@ | |||
104 | compatible = "sirf,prima2-vpp"; | 106 | compatible = "sirf,prima2-vpp"; |
105 | reg = <0x90020000 0x10000>; | 107 | reg = <0x90020000 0x10000>; |
106 | interrupts = <31>; | 108 | interrupts = <31>; |
109 | clocks = <&clks 35>; | ||
107 | }; | 110 | }; |
108 | }; | 111 | }; |
109 | 112 | ||
@@ -117,6 +120,7 @@ | |||
117 | compatible = "powervr,sgx531"; | 120 | compatible = "powervr,sgx531"; |
118 | reg = <0x98000000 0x8000000>; | 121 | reg = <0x98000000 0x8000000>; |
119 | interrupts = <6>; | 122 | interrupts = <6>; |
123 | clocks = <&clks 32>; | ||
120 | }; | 124 | }; |
121 | }; | 125 | }; |
122 | 126 | ||
@@ -130,6 +134,7 @@ | |||
130 | compatible = "sirf,prima2-video-codec"; | 134 | compatible = "sirf,prima2-video-codec"; |
131 | reg = <0xa0000000 0x8000000>; | 135 | reg = <0xa0000000 0x8000000>; |
132 | interrupts = <5>; | 136 | interrupts = <5>; |
137 | clocks = <&clks 33>; | ||
133 | }; | 138 | }; |
134 | }; | 139 | }; |
135 | 140 | ||
@@ -149,12 +154,14 @@ | |||
149 | compatible = "sirf,prima2-gps"; | 154 | compatible = "sirf,prima2-gps"; |
150 | reg = <0xa8010000 0x10000>; | 155 | reg = <0xa8010000 0x10000>; |
151 | interrupts = <7>; | 156 | interrupts = <7>; |
157 | clocks = <&clks 9>; | ||
152 | }; | 158 | }; |
153 | 159 | ||
154 | dsp@a9000000 { | 160 | dsp@a9000000 { |
155 | compatible = "sirf,prima2-dsp"; | 161 | compatible = "sirf,prima2-dsp"; |
156 | reg = <0xa9000000 0x1000000>; | 162 | reg = <0xa9000000 0x1000000>; |
157 | interrupts = <8>; | 163 | interrupts = <8>; |
164 | clocks = <&clks 8>; | ||
158 | }; | 165 | }; |
159 | }; | 166 | }; |
160 | 167 | ||
@@ -174,12 +181,14 @@ | |||
174 | compatible = "sirf,prima2-nand"; | 181 | compatible = "sirf,prima2-nand"; |
175 | reg = <0xb0030000 0x10000>; | 182 | reg = <0xb0030000 0x10000>; |
176 | interrupts = <41>; | 183 | interrupts = <41>; |
184 | clocks = <&clks 26>; | ||
177 | }; | 185 | }; |
178 | 186 | ||
179 | audio@b0040000 { | 187 | audio@b0040000 { |
180 | compatible = "sirf,prima2-audio"; | 188 | compatible = "sirf,prima2-audio"; |
181 | reg = <0xb0040000 0x10000>; | 189 | reg = <0xb0040000 0x10000>; |
182 | interrupts = <35>; | 190 | interrupts = <35>; |
191 | clocks = <&clks 27>; | ||
183 | }; | 192 | }; |
184 | 193 | ||
185 | uart0: uart@b0050000 { | 194 | uart0: uart@b0050000 { |
@@ -187,6 +196,7 @@ | |||
187 | compatible = "sirf,prima2-uart"; | 196 | compatible = "sirf,prima2-uart"; |
188 | reg = <0xb0050000 0x10000>; | 197 | reg = <0xb0050000 0x10000>; |
189 | interrupts = <17>; | 198 | interrupts = <17>; |
199 | clocks = <&clks 13>; | ||
190 | }; | 200 | }; |
191 | 201 | ||
192 | uart1: uart@b0060000 { | 202 | uart1: uart@b0060000 { |
@@ -194,6 +204,7 @@ | |||
194 | compatible = "sirf,prima2-uart"; | 204 | compatible = "sirf,prima2-uart"; |
195 | reg = <0xb0060000 0x10000>; | 205 | reg = <0xb0060000 0x10000>; |
196 | interrupts = <18>; | 206 | interrupts = <18>; |
207 | clocks = <&clks 14>; | ||
197 | }; | 208 | }; |
198 | 209 | ||
199 | uart2: uart@b0070000 { | 210 | uart2: uart@b0070000 { |
@@ -201,6 +212,7 @@ | |||
201 | compatible = "sirf,prima2-uart"; | 212 | compatible = "sirf,prima2-uart"; |
202 | reg = <0xb0070000 0x10000>; | 213 | reg = <0xb0070000 0x10000>; |
203 | interrupts = <19>; | 214 | interrupts = <19>; |
215 | clocks = <&clks 15>; | ||
204 | }; | 216 | }; |
205 | 217 | ||
206 | usp0: usp@b0080000 { | 218 | usp0: usp@b0080000 { |
@@ -208,6 +220,7 @@ | |||
208 | compatible = "sirf,prima2-usp"; | 220 | compatible = "sirf,prima2-usp"; |
209 | reg = <0xb0080000 0x10000>; | 221 | reg = <0xb0080000 0x10000>; |
210 | interrupts = <20>; | 222 | interrupts = <20>; |
223 | clocks = <&clks 28>; | ||
211 | }; | 224 | }; |
212 | 225 | ||
213 | usp1: usp@b0090000 { | 226 | usp1: usp@b0090000 { |
@@ -215,6 +228,7 @@ | |||
215 | compatible = "sirf,prima2-usp"; | 228 | compatible = "sirf,prima2-usp"; |
216 | reg = <0xb0090000 0x10000>; | 229 | reg = <0xb0090000 0x10000>; |
217 | interrupts = <21>; | 230 | interrupts = <21>; |
231 | clocks = <&clks 29>; | ||
218 | }; | 232 | }; |
219 | 233 | ||
220 | usp2: usp@b00a0000 { | 234 | usp2: usp@b00a0000 { |
@@ -222,6 +236,7 @@ | |||
222 | compatible = "sirf,prima2-usp"; | 236 | compatible = "sirf,prima2-usp"; |
223 | reg = <0xb00a0000 0x10000>; | 237 | reg = <0xb00a0000 0x10000>; |
224 | interrupts = <22>; | 238 | interrupts = <22>; |
239 | clocks = <&clks 30>; | ||
225 | }; | 240 | }; |
226 | 241 | ||
227 | dmac0: dma-controller@b00b0000 { | 242 | dmac0: dma-controller@b00b0000 { |
@@ -229,6 +244,7 @@ | |||
229 | compatible = "sirf,prima2-dmac"; | 244 | compatible = "sirf,prima2-dmac"; |
230 | reg = <0xb00b0000 0x10000>; | 245 | reg = <0xb00b0000 0x10000>; |
231 | interrupts = <12>; | 246 | interrupts = <12>; |
247 | clocks = <&clks 24>; | ||
232 | }; | 248 | }; |
233 | 249 | ||
234 | dmac1: dma-controller@b0160000 { | 250 | dmac1: dma-controller@b0160000 { |
@@ -236,11 +252,13 @@ | |||
236 | compatible = "sirf,prima2-dmac"; | 252 | compatible = "sirf,prima2-dmac"; |
237 | reg = <0xb0160000 0x10000>; | 253 | reg = <0xb0160000 0x10000>; |
238 | interrupts = <13>; | 254 | interrupts = <13>; |
255 | clocks = <&clks 25>; | ||
239 | }; | 256 | }; |
240 | 257 | ||
241 | vip@b00C0000 { | 258 | vip@b00C0000 { |
242 | compatible = "sirf,prima2-vip"; | 259 | compatible = "sirf,prima2-vip"; |
243 | reg = <0xb00C0000 0x10000>; | 260 | reg = <0xb00C0000 0x10000>; |
261 | clocks = <&clks 31>; | ||
244 | }; | 262 | }; |
245 | 263 | ||
246 | spi0: spi@b00d0000 { | 264 | spi0: spi@b00d0000 { |
@@ -248,6 +266,7 @@ | |||
248 | compatible = "sirf,prima2-spi"; | 266 | compatible = "sirf,prima2-spi"; |
249 | reg = <0xb00d0000 0x10000>; | 267 | reg = <0xb00d0000 0x10000>; |
250 | interrupts = <15>; | 268 | interrupts = <15>; |
269 | clocks = <&clks 19>; | ||
251 | }; | 270 | }; |
252 | 271 | ||
253 | spi1: spi@b0170000 { | 272 | spi1: spi@b0170000 { |
@@ -255,6 +274,7 @@ | |||
255 | compatible = "sirf,prima2-spi"; | 274 | compatible = "sirf,prima2-spi"; |
256 | reg = <0xb0170000 0x10000>; | 275 | reg = <0xb0170000 0x10000>; |
257 | interrupts = <16>; | 276 | interrupts = <16>; |
277 | clocks = <&clks 20>; | ||
258 | }; | 278 | }; |
259 | 279 | ||
260 | i2c0: i2c@b00e0000 { | 280 | i2c0: i2c@b00e0000 { |
@@ -262,6 +282,7 @@ | |||
262 | compatible = "sirf,prima2-i2c"; | 282 | compatible = "sirf,prima2-i2c"; |
263 | reg = <0xb00e0000 0x10000>; | 283 | reg = <0xb00e0000 0x10000>; |
264 | interrupts = <24>; | 284 | interrupts = <24>; |
285 | clocks = <&clks 17>; | ||
265 | }; | 286 | }; |
266 | 287 | ||
267 | i2c1: i2c@b00f0000 { | 288 | i2c1: i2c@b00f0000 { |
@@ -269,12 +290,14 @@ | |||
269 | compatible = "sirf,prima2-i2c"; | 290 | compatible = "sirf,prima2-i2c"; |
270 | reg = <0xb00f0000 0x10000>; | 291 | reg = <0xb00f0000 0x10000>; |
271 | interrupts = <25>; | 292 | interrupts = <25>; |
293 | clocks = <&clks 18>; | ||
272 | }; | 294 | }; |
273 | 295 | ||
274 | tsc@b0110000 { | 296 | tsc@b0110000 { |
275 | compatible = "sirf,prima2-tsc"; | 297 | compatible = "sirf,prima2-tsc"; |
276 | reg = <0xb0110000 0x10000>; | 298 | reg = <0xb0110000 0x10000>; |
277 | interrupts = <33>; | 299 | interrupts = <33>; |
300 | clocks = <&clks 16>; | ||
278 | }; | 301 | }; |
279 | 302 | ||
280 | gpio: pinctrl@b0120000 { | 303 | gpio: pinctrl@b0120000 { |
@@ -507,17 +530,20 @@ | |||
507 | pwm@b0130000 { | 530 | pwm@b0130000 { |
508 | compatible = "sirf,prima2-pwm"; | 531 | compatible = "sirf,prima2-pwm"; |
509 | reg = <0xb0130000 0x10000>; | 532 | reg = <0xb0130000 0x10000>; |
533 | clocks = <&clks 21>; | ||
510 | }; | 534 | }; |
511 | 535 | ||
512 | efusesys@b0140000 { | 536 | efusesys@b0140000 { |
513 | compatible = "sirf,prima2-efuse"; | 537 | compatible = "sirf,prima2-efuse"; |
514 | reg = <0xb0140000 0x10000>; | 538 | reg = <0xb0140000 0x10000>; |
539 | clocks = <&clks 22>; | ||
515 | }; | 540 | }; |
516 | 541 | ||
517 | pulsec@b0150000 { | 542 | pulsec@b0150000 { |
518 | compatible = "sirf,prima2-pulsec"; | 543 | compatible = "sirf,prima2-pulsec"; |
519 | reg = <0xb0150000 0x10000>; | 544 | reg = <0xb0150000 0x10000>; |
520 | interrupts = <48>; | 545 | interrupts = <48>; |
546 | clocks = <&clks 23>; | ||
521 | }; | 547 | }; |
522 | 548 | ||
523 | pci-iobg { | 549 | pci-iobg { |
@@ -616,12 +642,14 @@ | |||
616 | compatible = "chipidea,ci13611a-prima2"; | 642 | compatible = "chipidea,ci13611a-prima2"; |
617 | reg = <0xb8000000 0x10000>; | 643 | reg = <0xb8000000 0x10000>; |
618 | interrupts = <10>; | 644 | interrupts = <10>; |
645 | clocks = <&clks 40>; | ||
619 | }; | 646 | }; |
620 | 647 | ||
621 | usb1: usb@b00f0000 { | 648 | usb1: usb@b00f0000 { |
622 | compatible = "chipidea,ci13611a-prima2"; | 649 | compatible = "chipidea,ci13611a-prima2"; |
623 | reg = <0xb8010000 0x10000>; | 650 | reg = <0xb8010000 0x10000>; |
624 | interrupts = <11>; | 651 | interrupts = <11>; |
652 | clocks = <&clks 41>; | ||
625 | }; | 653 | }; |
626 | 654 | ||
627 | sata@b00f0000 { | 655 | sata@b00f0000 { |
@@ -634,6 +662,7 @@ | |||
634 | compatible = "sirf,prima2-security"; | 662 | compatible = "sirf,prima2-security"; |
635 | reg = <0xb8030000 0x10000>; | 663 | reg = <0xb8030000 0x10000>; |
636 | interrupts = <42>; | 664 | interrupts = <42>; |
665 | clocks = <&clks 7>; | ||
637 | }; | 666 | }; |
638 | }; | 667 | }; |
639 | }; | 668 | }; |
diff --git a/arch/arm/boot/dts/sun4i-a10.dtsi b/arch/arm/boot/dts/sun4i-a10.dtsi index e61fdd47bd01..f99f60dadf5d 100644 --- a/arch/arm/boot/dts/sun4i-a10.dtsi +++ b/arch/arm/boot/dts/sun4i-a10.dtsi | |||
@@ -16,4 +16,34 @@ | |||
16 | memory { | 16 | memory { |
17 | reg = <0x40000000 0x80000000>; | 17 | reg = <0x40000000 0x80000000>; |
18 | }; | 18 | }; |
19 | |||
20 | soc { | ||
21 | pinctrl@01c20800 { | ||
22 | compatible = "allwinner,sun4i-a10-pinctrl"; | ||
23 | reg = <0x01c20800 0x400>; | ||
24 | #address-cells = <1>; | ||
25 | #size-cells = <0>; | ||
26 | |||
27 | uart0_pins_a: uart0@0 { | ||
28 | allwinner,pins = "PB22", "PB23"; | ||
29 | allwinner,function = "uart0"; | ||
30 | allwinner,drive = <0>; | ||
31 | allwinner,pull = <0>; | ||
32 | }; | ||
33 | |||
34 | uart0_pins_b: uart0@1 { | ||
35 | allwinner,pins = "PF2", "PF4"; | ||
36 | allwinner,function = "uart0"; | ||
37 | allwinner,drive = <0>; | ||
38 | allwinner,pull = <0>; | ||
39 | }; | ||
40 | |||
41 | uart1_pins_a: uart1@0 { | ||
42 | allwinner,pins = "PA10", "PA11"; | ||
43 | allwinner,function = "uart1"; | ||
44 | allwinner,drive = <0>; | ||
45 | allwinner,pull = <0>; | ||
46 | }; | ||
47 | }; | ||
48 | }; | ||
19 | }; | 49 | }; |
diff --git a/arch/arm/boot/dts/sun5i-a13-olinuxino.dts b/arch/arm/boot/dts/sun5i-a13-olinuxino.dts index 498a091a4ea2..4a1e45d4aace 100644 --- a/arch/arm/boot/dts/sun5i-a13-olinuxino.dts +++ b/arch/arm/boot/dts/sun5i-a13-olinuxino.dts | |||
@@ -24,6 +24,8 @@ | |||
24 | 24 | ||
25 | soc { | 25 | soc { |
26 | uart1: uart@01c28400 { | 26 | uart1: uart@01c28400 { |
27 | pinctrl-names = "default"; | ||
28 | pinctrl-0 = <&uart1_pins_b>; | ||
27 | status = "okay"; | 29 | status = "okay"; |
28 | }; | 30 | }; |
29 | }; | 31 | }; |
diff --git a/arch/arm/boot/dts/sun5i-a13.dtsi b/arch/arm/boot/dts/sun5i-a13.dtsi index 59a2d265a98e..e1121890fb29 100644 --- a/arch/arm/boot/dts/sun5i-a13.dtsi +++ b/arch/arm/boot/dts/sun5i-a13.dtsi | |||
@@ -17,4 +17,27 @@ | |||
17 | memory { | 17 | memory { |
18 | reg = <0x40000000 0x20000000>; | 18 | reg = <0x40000000 0x20000000>; |
19 | }; | 19 | }; |
20 | |||
21 | soc { | ||
22 | pinctrl@01c20800 { | ||
23 | compatible = "allwinner,sun5i-a13-pinctrl"; | ||
24 | reg = <0x01c20800 0x400>; | ||
25 | #address-cells = <1>; | ||
26 | #size-cells = <0>; | ||
27 | |||
28 | uart1_pins_a: uart1@0 { | ||
29 | allwinner,pins = "PE10", "PE11"; | ||
30 | allwinner,function = "uart1"; | ||
31 | allwinner,drive = <0>; | ||
32 | allwinner,pull = <0>; | ||
33 | }; | ||
34 | |||
35 | uart1_pins_b: uart1@1 { | ||
36 | allwinner,pins = "PG3", "PG4"; | ||
37 | allwinner,function = "uart1"; | ||
38 | allwinner,drive = <0>; | ||
39 | allwinner,pull = <0>; | ||
40 | }; | ||
41 | }; | ||
42 | }; | ||
20 | }; | 43 | }; |
diff --git a/arch/arm/crypto/aes-armv4.S b/arch/arm/crypto/aes-armv4.S index e59b1d505d6c..19d6cd6f29f9 100644 --- a/arch/arm/crypto/aes-armv4.S +++ b/arch/arm/crypto/aes-armv4.S | |||
@@ -34,8 +34,9 @@ | |||
34 | @ A little glue here to select the correct code below for the ARM CPU | 34 | @ A little glue here to select the correct code below for the ARM CPU |
35 | @ that is being targetted. | 35 | @ that is being targetted. |
36 | 36 | ||
37 | #include <linux/linkage.h> | ||
38 | |||
37 | .text | 39 | .text |
38 | .code 32 | ||
39 | 40 | ||
40 | .type AES_Te,%object | 41 | .type AES_Te,%object |
41 | .align 5 | 42 | .align 5 |
@@ -145,10 +146,8 @@ AES_Te: | |||
145 | 146 | ||
146 | @ void AES_encrypt(const unsigned char *in, unsigned char *out, | 147 | @ void AES_encrypt(const unsigned char *in, unsigned char *out, |
147 | @ const AES_KEY *key) { | 148 | @ const AES_KEY *key) { |
148 | .global AES_encrypt | ||
149 | .type AES_encrypt,%function | ||
150 | .align 5 | 149 | .align 5 |
151 | AES_encrypt: | 150 | ENTRY(AES_encrypt) |
152 | sub r3,pc,#8 @ AES_encrypt | 151 | sub r3,pc,#8 @ AES_encrypt |
153 | stmdb sp!,{r1,r4-r12,lr} | 152 | stmdb sp!,{r1,r4-r12,lr} |
154 | mov r12,r0 @ inp | 153 | mov r12,r0 @ inp |
@@ -239,15 +238,8 @@ AES_encrypt: | |||
239 | strb r6,[r12,#14] | 238 | strb r6,[r12,#14] |
240 | strb r3,[r12,#15] | 239 | strb r3,[r12,#15] |
241 | #endif | 240 | #endif |
242 | #if __ARM_ARCH__>=5 | ||
243 | ldmia sp!,{r4-r12,pc} | 241 | ldmia sp!,{r4-r12,pc} |
244 | #else | 242 | ENDPROC(AES_encrypt) |
245 | ldmia sp!,{r4-r12,lr} | ||
246 | tst lr,#1 | ||
247 | moveq pc,lr @ be binary compatible with V4, yet | ||
248 | .word 0xe12fff1e @ interoperable with Thumb ISA:-) | ||
249 | #endif | ||
250 | .size AES_encrypt,.-AES_encrypt | ||
251 | 243 | ||
252 | .type _armv4_AES_encrypt,%function | 244 | .type _armv4_AES_encrypt,%function |
253 | .align 2 | 245 | .align 2 |
@@ -386,10 +378,8 @@ _armv4_AES_encrypt: | |||
386 | ldr pc,[sp],#4 @ pop and return | 378 | ldr pc,[sp],#4 @ pop and return |
387 | .size _armv4_AES_encrypt,.-_armv4_AES_encrypt | 379 | .size _armv4_AES_encrypt,.-_armv4_AES_encrypt |
388 | 380 | ||
389 | .global private_AES_set_encrypt_key | ||
390 | .type private_AES_set_encrypt_key,%function | ||
391 | .align 5 | 381 | .align 5 |
392 | private_AES_set_encrypt_key: | 382 | ENTRY(private_AES_set_encrypt_key) |
393 | _armv4_AES_set_encrypt_key: | 383 | _armv4_AES_set_encrypt_key: |
394 | sub r3,pc,#8 @ AES_set_encrypt_key | 384 | sub r3,pc,#8 @ AES_set_encrypt_key |
395 | teq r0,#0 | 385 | teq r0,#0 |
@@ -658,15 +648,11 @@ _armv4_AES_set_encrypt_key: | |||
658 | 648 | ||
659 | .Ldone: mov r0,#0 | 649 | .Ldone: mov r0,#0 |
660 | ldmia sp!,{r4-r12,lr} | 650 | ldmia sp!,{r4-r12,lr} |
661 | .Labrt: tst lr,#1 | 651 | .Labrt: mov pc,lr |
662 | moveq pc,lr @ be binary compatible with V4, yet | 652 | ENDPROC(private_AES_set_encrypt_key) |
663 | .word 0xe12fff1e @ interoperable with Thumb ISA:-) | ||
664 | .size private_AES_set_encrypt_key,.-private_AES_set_encrypt_key | ||
665 | 653 | ||
666 | .global private_AES_set_decrypt_key | ||
667 | .type private_AES_set_decrypt_key,%function | ||
668 | .align 5 | 654 | .align 5 |
669 | private_AES_set_decrypt_key: | 655 | ENTRY(private_AES_set_decrypt_key) |
670 | str lr,[sp,#-4]! @ push lr | 656 | str lr,[sp,#-4]! @ push lr |
671 | #if 0 | 657 | #if 0 |
672 | @ kernel does both of these in setkey so optimise this bit out by | 658 | @ kernel does both of these in setkey so optimise this bit out by |
@@ -748,15 +734,8 @@ private_AES_set_decrypt_key: | |||
748 | bne .Lmix | 734 | bne .Lmix |
749 | 735 | ||
750 | mov r0,#0 | 736 | mov r0,#0 |
751 | #if __ARM_ARCH__>=5 | ||
752 | ldmia sp!,{r4-r12,pc} | 737 | ldmia sp!,{r4-r12,pc} |
753 | #else | 738 | ENDPROC(private_AES_set_decrypt_key) |
754 | ldmia sp!,{r4-r12,lr} | ||
755 | tst lr,#1 | ||
756 | moveq pc,lr @ be binary compatible with V4, yet | ||
757 | .word 0xe12fff1e @ interoperable with Thumb ISA:-) | ||
758 | #endif | ||
759 | .size private_AES_set_decrypt_key,.-private_AES_set_decrypt_key | ||
760 | 739 | ||
761 | .type AES_Td,%object | 740 | .type AES_Td,%object |
762 | .align 5 | 741 | .align 5 |
@@ -862,10 +841,8 @@ AES_Td: | |||
862 | 841 | ||
863 | @ void AES_decrypt(const unsigned char *in, unsigned char *out, | 842 | @ void AES_decrypt(const unsigned char *in, unsigned char *out, |
864 | @ const AES_KEY *key) { | 843 | @ const AES_KEY *key) { |
865 | .global AES_decrypt | ||
866 | .type AES_decrypt,%function | ||
867 | .align 5 | 844 | .align 5 |
868 | AES_decrypt: | 845 | ENTRY(AES_decrypt) |
869 | sub r3,pc,#8 @ AES_decrypt | 846 | sub r3,pc,#8 @ AES_decrypt |
870 | stmdb sp!,{r1,r4-r12,lr} | 847 | stmdb sp!,{r1,r4-r12,lr} |
871 | mov r12,r0 @ inp | 848 | mov r12,r0 @ inp |
@@ -956,15 +933,8 @@ AES_decrypt: | |||
956 | strb r6,[r12,#14] | 933 | strb r6,[r12,#14] |
957 | strb r3,[r12,#15] | 934 | strb r3,[r12,#15] |
958 | #endif | 935 | #endif |
959 | #if __ARM_ARCH__>=5 | ||
960 | ldmia sp!,{r4-r12,pc} | 936 | ldmia sp!,{r4-r12,pc} |
961 | #else | 937 | ENDPROC(AES_decrypt) |
962 | ldmia sp!,{r4-r12,lr} | ||
963 | tst lr,#1 | ||
964 | moveq pc,lr @ be binary compatible with V4, yet | ||
965 | .word 0xe12fff1e @ interoperable with Thumb ISA:-) | ||
966 | #endif | ||
967 | .size AES_decrypt,.-AES_decrypt | ||
968 | 938 | ||
969 | .type _armv4_AES_decrypt,%function | 939 | .type _armv4_AES_decrypt,%function |
970 | .align 2 | 940 | .align 2 |
@@ -1064,7 +1034,9 @@ _armv4_AES_decrypt: | |||
1064 | and r9,lr,r1,lsr#8 | 1034 | and r9,lr,r1,lsr#8 |
1065 | 1035 | ||
1066 | ldrb r7,[r10,r7] @ Td4[s1>>0] | 1036 | ldrb r7,[r10,r7] @ Td4[s1>>0] |
1067 | ldrb r1,[r10,r1,lsr#24] @ Td4[s1>>24] | 1037 | ARM( ldrb r1,[r10,r1,lsr#24] ) @ Td4[s1>>24] |
1038 | THUMB( add r1,r10,r1,lsr#24 ) @ Td4[s1>>24] | ||
1039 | THUMB( ldrb r1,[r1] ) | ||
1068 | ldrb r8,[r10,r8] @ Td4[s1>>16] | 1040 | ldrb r8,[r10,r8] @ Td4[s1>>16] |
1069 | eor r0,r7,r0,lsl#24 | 1041 | eor r0,r7,r0,lsl#24 |
1070 | ldrb r9,[r10,r9] @ Td4[s1>>8] | 1042 | ldrb r9,[r10,r9] @ Td4[s1>>8] |
@@ -1077,7 +1049,9 @@ _armv4_AES_decrypt: | |||
1077 | ldrb r8,[r10,r8] @ Td4[s2>>0] | 1049 | ldrb r8,[r10,r8] @ Td4[s2>>0] |
1078 | and r9,lr,r2,lsr#16 | 1050 | and r9,lr,r2,lsr#16 |
1079 | 1051 | ||
1080 | ldrb r2,[r10,r2,lsr#24] @ Td4[s2>>24] | 1052 | ARM( ldrb r2,[r10,r2,lsr#24] ) @ Td4[s2>>24] |
1053 | THUMB( add r2,r10,r2,lsr#24 ) @ Td4[s2>>24] | ||
1054 | THUMB( ldrb r2,[r2] ) | ||
1081 | eor r0,r0,r7,lsl#8 | 1055 | eor r0,r0,r7,lsl#8 |
1082 | ldrb r9,[r10,r9] @ Td4[s2>>16] | 1056 | ldrb r9,[r10,r9] @ Td4[s2>>16] |
1083 | eor r1,r8,r1,lsl#16 | 1057 | eor r1,r8,r1,lsl#16 |
@@ -1090,7 +1064,9 @@ _armv4_AES_decrypt: | |||
1090 | and r9,lr,r3 @ i2 | 1064 | and r9,lr,r3 @ i2 |
1091 | 1065 | ||
1092 | ldrb r9,[r10,r9] @ Td4[s3>>0] | 1066 | ldrb r9,[r10,r9] @ Td4[s3>>0] |
1093 | ldrb r3,[r10,r3,lsr#24] @ Td4[s3>>24] | 1067 | ARM( ldrb r3,[r10,r3,lsr#24] ) @ Td4[s3>>24] |
1068 | THUMB( add r3,r10,r3,lsr#24 ) @ Td4[s3>>24] | ||
1069 | THUMB( ldrb r3,[r3] ) | ||
1094 | eor r0,r0,r7,lsl#16 | 1070 | eor r0,r0,r7,lsl#16 |
1095 | ldr r7,[r11,#0] | 1071 | ldr r7,[r11,#0] |
1096 | eor r1,r1,r8,lsl#8 | 1072 | eor r1,r1,r8,lsl#8 |
diff --git a/arch/arm/crypto/sha1-armv4-large.S b/arch/arm/crypto/sha1-armv4-large.S index 7050ab133b9d..92c6eed7aac9 100644 --- a/arch/arm/crypto/sha1-armv4-large.S +++ b/arch/arm/crypto/sha1-armv4-large.S | |||
@@ -51,13 +51,12 @@ | |||
51 | @ Profiler-assisted and platform-specific optimization resulted in 10% | 51 | @ Profiler-assisted and platform-specific optimization resulted in 10% |
52 | @ improvement on Cortex A8 core and 12.2 cycles per byte. | 52 | @ improvement on Cortex A8 core and 12.2 cycles per byte. |
53 | 53 | ||
54 | .text | 54 | #include <linux/linkage.h> |
55 | 55 | ||
56 | .global sha1_block_data_order | 56 | .text |
57 | .type sha1_block_data_order,%function | ||
58 | 57 | ||
59 | .align 2 | 58 | .align 2 |
60 | sha1_block_data_order: | 59 | ENTRY(sha1_block_data_order) |
61 | stmdb sp!,{r4-r12,lr} | 60 | stmdb sp!,{r4-r12,lr} |
62 | add r2,r1,r2,lsl#6 @ r2 to point at the end of r1 | 61 | add r2,r1,r2,lsl#6 @ r2 to point at the end of r1 |
63 | ldmia r0,{r3,r4,r5,r6,r7} | 62 | ldmia r0,{r3,r4,r5,r6,r7} |
@@ -194,7 +193,7 @@ sha1_block_data_order: | |||
194 | eor r10,r10,r7,ror#2 @ F_00_19(B,C,D) | 193 | eor r10,r10,r7,ror#2 @ F_00_19(B,C,D) |
195 | str r9,[r14,#-4]! | 194 | str r9,[r14,#-4]! |
196 | add r3,r3,r10 @ E+=F_00_19(B,C,D) | 195 | add r3,r3,r10 @ E+=F_00_19(B,C,D) |
197 | teq r14,sp | 196 | cmp r14,sp |
198 | bne .L_00_15 @ [((11+4)*5+2)*3] | 197 | bne .L_00_15 @ [((11+4)*5+2)*3] |
199 | #if __ARM_ARCH__<7 | 198 | #if __ARM_ARCH__<7 |
200 | ldrb r10,[r1,#2] | 199 | ldrb r10,[r1,#2] |
@@ -374,7 +373,9 @@ sha1_block_data_order: | |||
374 | @ F_xx_xx | 373 | @ F_xx_xx |
375 | add r3,r3,r9 @ E+=X[i] | 374 | add r3,r3,r9 @ E+=X[i] |
376 | add r3,r3,r10 @ E+=F_20_39(B,C,D) | 375 | add r3,r3,r10 @ E+=F_20_39(B,C,D) |
377 | teq r14,sp @ preserve carry | 376 | ARM( teq r14,sp ) @ preserve carry |
377 | THUMB( mov r11,sp ) | ||
378 | THUMB( teq r14,r11 ) @ preserve carry | ||
378 | bne .L_20_39_or_60_79 @ [+((12+3)*5+2)*4] | 379 | bne .L_20_39_or_60_79 @ [+((12+3)*5+2)*4] |
379 | bcs .L_done @ [+((12+3)*5+2)*4], spare 300 bytes | 380 | bcs .L_done @ [+((12+3)*5+2)*4], spare 300 bytes |
380 | 381 | ||
@@ -466,7 +467,7 @@ sha1_block_data_order: | |||
466 | add r3,r3,r9 @ E+=X[i] | 467 | add r3,r3,r9 @ E+=X[i] |
467 | add r3,r3,r10 @ E+=F_40_59(B,C,D) | 468 | add r3,r3,r10 @ E+=F_40_59(B,C,D) |
468 | add r3,r3,r11,ror#2 | 469 | add r3,r3,r11,ror#2 |
469 | teq r14,sp | 470 | cmp r14,sp |
470 | bne .L_40_59 @ [+((12+5)*5+2)*4] | 471 | bne .L_40_59 @ [+((12+5)*5+2)*4] |
471 | 472 | ||
472 | ldr r8,.LK_60_79 | 473 | ldr r8,.LK_60_79 |
@@ -485,19 +486,12 @@ sha1_block_data_order: | |||
485 | teq r1,r2 | 486 | teq r1,r2 |
486 | bne .Lloop @ [+18], total 1307 | 487 | bne .Lloop @ [+18], total 1307 |
487 | 488 | ||
488 | #if __ARM_ARCH__>=5 | ||
489 | ldmia sp!,{r4-r12,pc} | 489 | ldmia sp!,{r4-r12,pc} |
490 | #else | ||
491 | ldmia sp!,{r4-r12,lr} | ||
492 | tst lr,#1 | ||
493 | moveq pc,lr @ be binary compatible with V4, yet | ||
494 | .word 0xe12fff1e @ interoperable with Thumb ISA:-) | ||
495 | #endif | ||
496 | .align 2 | 490 | .align 2 |
497 | .LK_00_19: .word 0x5a827999 | 491 | .LK_00_19: .word 0x5a827999 |
498 | .LK_20_39: .word 0x6ed9eba1 | 492 | .LK_20_39: .word 0x6ed9eba1 |
499 | .LK_40_59: .word 0x8f1bbcdc | 493 | .LK_40_59: .word 0x8f1bbcdc |
500 | .LK_60_79: .word 0xca62c1d6 | 494 | .LK_60_79: .word 0xca62c1d6 |
501 | .size sha1_block_data_order,.-sha1_block_data_order | 495 | ENDPROC(sha1_block_data_order) |
502 | .asciz "SHA1 block transform for ARMv4, CRYPTOGAMS by <appro@openssl.org>" | 496 | .asciz "SHA1 block transform for ARMv4, CRYPTOGAMS by <appro@openssl.org>" |
503 | .align 2 | 497 | .align 2 |
diff --git a/arch/arm/include/asm/assembler.h b/arch/arm/include/asm/assembler.h index eb87200aa4b5..05ee9eebad6b 100644 --- a/arch/arm/include/asm/assembler.h +++ b/arch/arm/include/asm/assembler.h | |||
@@ -246,18 +246,14 @@ | |||
246 | * | 246 | * |
247 | * This macro is intended for forcing the CPU into SVC mode at boot time. | 247 | * This macro is intended for forcing the CPU into SVC mode at boot time. |
248 | * you cannot return to the original mode. | 248 | * you cannot return to the original mode. |
249 | * | ||
250 | * Beware, it also clobers LR. | ||
251 | */ | 249 | */ |
252 | .macro safe_svcmode_maskall reg:req | 250 | .macro safe_svcmode_maskall reg:req |
253 | #if __LINUX_ARM_ARCH__ >= 6 | 251 | #if __LINUX_ARM_ARCH__ >= 6 |
254 | mrs \reg , cpsr | 252 | mrs \reg , cpsr |
255 | mov lr , \reg | 253 | eor \reg, \reg, #HYP_MODE |
256 | and lr , lr , #MODE_MASK | 254 | tst \reg, #MODE_MASK |
257 | cmp lr , #HYP_MODE | ||
258 | orr \reg , \reg , #PSR_I_BIT | PSR_F_BIT | ||
259 | bic \reg , \reg , #MODE_MASK | 255 | bic \reg , \reg , #MODE_MASK |
260 | orr \reg , \reg , #SVC_MODE | 256 | orr \reg , \reg , #PSR_I_BIT | PSR_F_BIT | SVC_MODE |
261 | THUMB( orr \reg , \reg , #PSR_T_BIT ) | 257 | THUMB( orr \reg , \reg , #PSR_T_BIT ) |
262 | bne 1f | 258 | bne 1f |
263 | orr \reg, \reg, #PSR_A_BIT | 259 | orr \reg, \reg, #PSR_A_BIT |
diff --git a/arch/arm/include/asm/cputype.h b/arch/arm/include/asm/cputype.h index a59dcb5ab5fc..ad41ec2471e8 100644 --- a/arch/arm/include/asm/cputype.h +++ b/arch/arm/include/asm/cputype.h | |||
@@ -64,6 +64,24 @@ extern unsigned int processor_id; | |||
64 | #define read_cpuid_ext(reg) 0 | 64 | #define read_cpuid_ext(reg) 0 |
65 | #endif | 65 | #endif |
66 | 66 | ||
67 | #define ARM_CPU_IMP_ARM 0x41 | ||
68 | #define ARM_CPU_IMP_INTEL 0x69 | ||
69 | |||
70 | #define ARM_CPU_PART_ARM1136 0xB360 | ||
71 | #define ARM_CPU_PART_ARM1156 0xB560 | ||
72 | #define ARM_CPU_PART_ARM1176 0xB760 | ||
73 | #define ARM_CPU_PART_ARM11MPCORE 0xB020 | ||
74 | #define ARM_CPU_PART_CORTEX_A8 0xC080 | ||
75 | #define ARM_CPU_PART_CORTEX_A9 0xC090 | ||
76 | #define ARM_CPU_PART_CORTEX_A5 0xC050 | ||
77 | #define ARM_CPU_PART_CORTEX_A15 0xC0F0 | ||
78 | #define ARM_CPU_PART_CORTEX_A7 0xC070 | ||
79 | |||
80 | #define ARM_CPU_XSCALE_ARCH_MASK 0xe000 | ||
81 | #define ARM_CPU_XSCALE_ARCH_V1 0x2000 | ||
82 | #define ARM_CPU_XSCALE_ARCH_V2 0x4000 | ||
83 | #define ARM_CPU_XSCALE_ARCH_V3 0x6000 | ||
84 | |||
67 | /* | 85 | /* |
68 | * The CPU ID never changes at run time, so we might as well tell the | 86 | * The CPU ID never changes at run time, so we might as well tell the |
69 | * compiler that it's constant. Use this function to read the CPU ID | 87 | * compiler that it's constant. Use this function to read the CPU ID |
@@ -74,6 +92,21 @@ static inline unsigned int __attribute_const__ read_cpuid_id(void) | |||
74 | return read_cpuid(CPUID_ID); | 92 | return read_cpuid(CPUID_ID); |
75 | } | 93 | } |
76 | 94 | ||
95 | static inline unsigned int __attribute_const__ read_cpuid_implementor(void) | ||
96 | { | ||
97 | return (read_cpuid_id() & 0xFF000000) >> 24; | ||
98 | } | ||
99 | |||
100 | static inline unsigned int __attribute_const__ read_cpuid_part_number(void) | ||
101 | { | ||
102 | return read_cpuid_id() & 0xFFF0; | ||
103 | } | ||
104 | |||
105 | static inline unsigned int __attribute_const__ xscale_cpu_arch_version(void) | ||
106 | { | ||
107 | return read_cpuid_part_number() & ARM_CPU_XSCALE_ARCH_MASK; | ||
108 | } | ||
109 | |||
77 | static inline unsigned int __attribute_const__ read_cpuid_cachetype(void) | 110 | static inline unsigned int __attribute_const__ read_cpuid_cachetype(void) |
78 | { | 111 | { |
79 | return read_cpuid(CPUID_CACHETYPE); | 112 | return read_cpuid(CPUID_CACHETYPE); |
diff --git a/arch/arm/include/asm/cti.h b/arch/arm/include/asm/cti.h index f2e5cad3f306..2381199acb7d 100644 --- a/arch/arm/include/asm/cti.h +++ b/arch/arm/include/asm/cti.h | |||
@@ -2,6 +2,7 @@ | |||
2 | #define __ASMARM_CTI_H | 2 | #define __ASMARM_CTI_H |
3 | 3 | ||
4 | #include <asm/io.h> | 4 | #include <asm/io.h> |
5 | #include <asm/hardware/coresight.h> | ||
5 | 6 | ||
6 | /* The registers' definition is from section 3.2 of | 7 | /* The registers' definition is from section 3.2 of |
7 | * Embedded Cross Trigger Revision: r0p0 | 8 | * Embedded Cross Trigger Revision: r0p0 |
@@ -35,11 +36,6 @@ | |||
35 | #define LOCKACCESS 0xFB0 | 36 | #define LOCKACCESS 0xFB0 |
36 | #define LOCKSTATUS 0xFB4 | 37 | #define LOCKSTATUS 0xFB4 |
37 | 38 | ||
38 | /* write this value to LOCKACCESS will unlock the module, and | ||
39 | * other value will lock the module | ||
40 | */ | ||
41 | #define LOCKCODE 0xC5ACCE55 | ||
42 | |||
43 | /** | 39 | /** |
44 | * struct cti - cross trigger interface struct | 40 | * struct cti - cross trigger interface struct |
45 | * @base: mapped virtual address for the cti base | 41 | * @base: mapped virtual address for the cti base |
@@ -146,7 +142,7 @@ static inline void cti_irq_ack(struct cti *cti) | |||
146 | */ | 142 | */ |
147 | static inline void cti_unlock(struct cti *cti) | 143 | static inline void cti_unlock(struct cti *cti) |
148 | { | 144 | { |
149 | __raw_writel(LOCKCODE, cti->base + LOCKACCESS); | 145 | __raw_writel(CS_LAR_KEY, cti->base + LOCKACCESS); |
150 | } | 146 | } |
151 | 147 | ||
152 | /** | 148 | /** |
@@ -158,6 +154,6 @@ static inline void cti_unlock(struct cti *cti) | |||
158 | */ | 154 | */ |
159 | static inline void cti_lock(struct cti *cti) | 155 | static inline void cti_lock(struct cti *cti) |
160 | { | 156 | { |
161 | __raw_writel(~LOCKCODE, cti->base + LOCKACCESS); | 157 | __raw_writel(~CS_LAR_KEY, cti->base + LOCKACCESS); |
162 | } | 158 | } |
163 | #endif | 159 | #endif |
diff --git a/arch/arm/include/asm/hardware/coresight.h b/arch/arm/include/asm/hardware/coresight.h index 7ecd793b8f5a..0cf7a6b842ff 100644 --- a/arch/arm/include/asm/hardware/coresight.h +++ b/arch/arm/include/asm/hardware/coresight.h | |||
@@ -36,7 +36,7 @@ | |||
36 | /* CoreSight Component Registers */ | 36 | /* CoreSight Component Registers */ |
37 | #define CSCR_CLASS 0xff4 | 37 | #define CSCR_CLASS 0xff4 |
38 | 38 | ||
39 | #define UNLOCK_MAGIC 0xc5acce55 | 39 | #define CS_LAR_KEY 0xc5acce55 |
40 | 40 | ||
41 | /* ETM control register, "ETM Architecture", 3.3.1 */ | 41 | /* ETM control register, "ETM Architecture", 3.3.1 */ |
42 | #define ETMR_CTRL 0 | 42 | #define ETMR_CTRL 0 |
@@ -147,11 +147,11 @@ | |||
147 | 147 | ||
148 | #define etm_lock(t) do { etm_writel((t), 0, CSMR_LOCKACCESS); } while (0) | 148 | #define etm_lock(t) do { etm_writel((t), 0, CSMR_LOCKACCESS); } while (0) |
149 | #define etm_unlock(t) \ | 149 | #define etm_unlock(t) \ |
150 | do { etm_writel((t), UNLOCK_MAGIC, CSMR_LOCKACCESS); } while (0) | 150 | do { etm_writel((t), CS_LAR_KEY, CSMR_LOCKACCESS); } while (0) |
151 | 151 | ||
152 | #define etb_lock(t) do { etb_writel((t), 0, CSMR_LOCKACCESS); } while (0) | 152 | #define etb_lock(t) do { etb_writel((t), 0, CSMR_LOCKACCESS); } while (0) |
153 | #define etb_unlock(t) \ | 153 | #define etb_unlock(t) \ |
154 | do { etb_writel((t), UNLOCK_MAGIC, CSMR_LOCKACCESS); } while (0) | 154 | do { etb_writel((t), CS_LAR_KEY, CSMR_LOCKACCESS); } while (0) |
155 | 155 | ||
156 | #endif /* __ASM_HARDWARE_CORESIGHT_H */ | 156 | #endif /* __ASM_HARDWARE_CORESIGHT_H */ |
157 | 157 | ||
diff --git a/arch/arm/include/asm/hw_breakpoint.h b/arch/arm/include/asm/hw_breakpoint.h index 01169dd723f1..eef55ea9ef00 100644 --- a/arch/arm/include/asm/hw_breakpoint.h +++ b/arch/arm/include/asm/hw_breakpoint.h | |||
@@ -85,6 +85,9 @@ static inline void decode_ctrl_reg(u32 reg, | |||
85 | #define ARM_DSCR_HDBGEN (1 << 14) | 85 | #define ARM_DSCR_HDBGEN (1 << 14) |
86 | #define ARM_DSCR_MDBGEN (1 << 15) | 86 | #define ARM_DSCR_MDBGEN (1 << 15) |
87 | 87 | ||
88 | /* OSLSR os lock model bits */ | ||
89 | #define ARM_OSLSR_OSLM0 (1 << 0) | ||
90 | |||
88 | /* opcode2 numbers for the co-processor instructions. */ | 91 | /* opcode2 numbers for the co-processor instructions. */ |
89 | #define ARM_OP2_BVR 4 | 92 | #define ARM_OP2_BVR 4 |
90 | #define ARM_OP2_BCR 5 | 93 | #define ARM_OP2_BCR 5 |
diff --git a/arch/arm/include/asm/idmap.h b/arch/arm/include/asm/idmap.h index bf863edb517d..1a66f907e5cc 100644 --- a/arch/arm/include/asm/idmap.h +++ b/arch/arm/include/asm/idmap.h | |||
@@ -8,6 +8,7 @@ | |||
8 | #define __idmap __section(.idmap.text) noinline notrace | 8 | #define __idmap __section(.idmap.text) noinline notrace |
9 | 9 | ||
10 | extern pgd_t *idmap_pgd; | 10 | extern pgd_t *idmap_pgd; |
11 | extern pgd_t *hyp_pgd; | ||
11 | 12 | ||
12 | void setup_mm_for_reboot(void); | 13 | void setup_mm_for_reboot(void); |
13 | 14 | ||
diff --git a/arch/arm/include/asm/kvm_arm.h b/arch/arm/include/asm/kvm_arm.h new file mode 100644 index 000000000000..7c3d813e15df --- /dev/null +++ b/arch/arm/include/asm/kvm_arm.h | |||
@@ -0,0 +1,214 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2012 - Virtual Open Systems and Columbia University | ||
3 | * Author: Christoffer Dall <c.dall@virtualopensystems.com> | ||
4 | * | ||
5 | * This program is free software; you can redistribute it and/or modify | ||
6 | * it under the terms of the GNU General Public License, version 2, as | ||
7 | * published by the Free Software Foundation. | ||
8 | * | ||
9 | * This program is distributed in the hope that it will be useful, | ||
10 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
11 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
12 | * GNU General Public License for more details. | ||
13 | * | ||
14 | * You should have received a copy of the GNU General Public License | ||
15 | * along with this program; if not, write to the Free Software | ||
16 | * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. | ||
17 | */ | ||
18 | |||
19 | #ifndef __ARM_KVM_ARM_H__ | ||
20 | #define __ARM_KVM_ARM_H__ | ||
21 | |||
22 | #include <linux/types.h> | ||
23 | |||
24 | /* Hyp Configuration Register (HCR) bits */ | ||
25 | #define HCR_TGE (1 << 27) | ||
26 | #define HCR_TVM (1 << 26) | ||
27 | #define HCR_TTLB (1 << 25) | ||
28 | #define HCR_TPU (1 << 24) | ||
29 | #define HCR_TPC (1 << 23) | ||
30 | #define HCR_TSW (1 << 22) | ||
31 | #define HCR_TAC (1 << 21) | ||
32 | #define HCR_TIDCP (1 << 20) | ||
33 | #define HCR_TSC (1 << 19) | ||
34 | #define HCR_TID3 (1 << 18) | ||
35 | #define HCR_TID2 (1 << 17) | ||
36 | #define HCR_TID1 (1 << 16) | ||
37 | #define HCR_TID0 (1 << 15) | ||
38 | #define HCR_TWE (1 << 14) | ||
39 | #define HCR_TWI (1 << 13) | ||
40 | #define HCR_DC (1 << 12) | ||
41 | #define HCR_BSU (3 << 10) | ||
42 | #define HCR_BSU_IS (1 << 10) | ||
43 | #define HCR_FB (1 << 9) | ||
44 | #define HCR_VA (1 << 8) | ||
45 | #define HCR_VI (1 << 7) | ||
46 | #define HCR_VF (1 << 6) | ||
47 | #define HCR_AMO (1 << 5) | ||
48 | #define HCR_IMO (1 << 4) | ||
49 | #define HCR_FMO (1 << 3) | ||
50 | #define HCR_PTW (1 << 2) | ||
51 | #define HCR_SWIO (1 << 1) | ||
52 | #define HCR_VM 1 | ||
53 | |||
54 | /* | ||
55 | * The bits we set in HCR: | ||
56 | * TAC: Trap ACTLR | ||
57 | * TSC: Trap SMC | ||
58 | * TSW: Trap cache operations by set/way | ||
59 | * TWI: Trap WFI | ||
60 | * TIDCP: Trap L2CTLR/L2ECTLR | ||
61 | * BSU_IS: Upgrade barriers to the inner shareable domain | ||
62 | * FB: Force broadcast of all maintainance operations | ||
63 | * AMO: Override CPSR.A and enable signaling with VA | ||
64 | * IMO: Override CPSR.I and enable signaling with VI | ||
65 | * FMO: Override CPSR.F and enable signaling with VF | ||
66 | * SWIO: Turn set/way invalidates into set/way clean+invalidate | ||
67 | */ | ||
68 | #define HCR_GUEST_MASK (HCR_TSC | HCR_TSW | HCR_TWI | HCR_VM | HCR_BSU_IS | \ | ||
69 | HCR_FB | HCR_TAC | HCR_AMO | HCR_IMO | HCR_FMO | \ | ||
70 | HCR_SWIO | HCR_TIDCP) | ||
71 | #define HCR_VIRT_EXCP_MASK (HCR_VA | HCR_VI | HCR_VF) | ||
72 | |||
73 | /* System Control Register (SCTLR) bits */ | ||
74 | #define SCTLR_TE (1 << 30) | ||
75 | #define SCTLR_EE (1 << 25) | ||
76 | #define SCTLR_V (1 << 13) | ||
77 | |||
78 | /* Hyp System Control Register (HSCTLR) bits */ | ||
79 | #define HSCTLR_TE (1 << 30) | ||
80 | #define HSCTLR_EE (1 << 25) | ||
81 | #define HSCTLR_FI (1 << 21) | ||
82 | #define HSCTLR_WXN (1 << 19) | ||
83 | #define HSCTLR_I (1 << 12) | ||
84 | #define HSCTLR_C (1 << 2) | ||
85 | #define HSCTLR_A (1 << 1) | ||
86 | #define HSCTLR_M 1 | ||
87 | #define HSCTLR_MASK (HSCTLR_M | HSCTLR_A | HSCTLR_C | HSCTLR_I | \ | ||
88 | HSCTLR_WXN | HSCTLR_FI | HSCTLR_EE | HSCTLR_TE) | ||
89 | |||
90 | /* TTBCR and HTCR Registers bits */ | ||
91 | #define TTBCR_EAE (1 << 31) | ||
92 | #define TTBCR_IMP (1 << 30) | ||
93 | #define TTBCR_SH1 (3 << 28) | ||
94 | #define TTBCR_ORGN1 (3 << 26) | ||
95 | #define TTBCR_IRGN1 (3 << 24) | ||
96 | #define TTBCR_EPD1 (1 << 23) | ||
97 | #define TTBCR_A1 (1 << 22) | ||
98 | #define TTBCR_T1SZ (3 << 16) | ||
99 | #define TTBCR_SH0 (3 << 12) | ||
100 | #define TTBCR_ORGN0 (3 << 10) | ||
101 | #define TTBCR_IRGN0 (3 << 8) | ||
102 | #define TTBCR_EPD0 (1 << 7) | ||
103 | #define TTBCR_T0SZ 3 | ||
104 | #define HTCR_MASK (TTBCR_T0SZ | TTBCR_IRGN0 | TTBCR_ORGN0 | TTBCR_SH0) | ||
105 | |||
106 | /* Hyp System Trap Register */ | ||
107 | #define HSTR_T(x) (1 << x) | ||
108 | #define HSTR_TTEE (1 << 16) | ||
109 | #define HSTR_TJDBX (1 << 17) | ||
110 | |||
111 | /* Hyp Coprocessor Trap Register */ | ||
112 | #define HCPTR_TCP(x) (1 << x) | ||
113 | #define HCPTR_TCP_MASK (0x3fff) | ||
114 | #define HCPTR_TASE (1 << 15) | ||
115 | #define HCPTR_TTA (1 << 20) | ||
116 | #define HCPTR_TCPAC (1 << 31) | ||
117 | |||
118 | /* Hyp Debug Configuration Register bits */ | ||
119 | #define HDCR_TDRA (1 << 11) | ||
120 | #define HDCR_TDOSA (1 << 10) | ||
121 | #define HDCR_TDA (1 << 9) | ||
122 | #define HDCR_TDE (1 << 8) | ||
123 | #define HDCR_HPME (1 << 7) | ||
124 | #define HDCR_TPM (1 << 6) | ||
125 | #define HDCR_TPMCR (1 << 5) | ||
126 | #define HDCR_HPMN_MASK (0x1F) | ||
127 | |||
128 | /* | ||
129 | * The architecture supports 40-bit IPA as input to the 2nd stage translations | ||
130 | * and PTRS_PER_S2_PGD becomes 1024, because each entry covers 1GB of address | ||
131 | * space. | ||
132 | */ | ||
133 | #define KVM_PHYS_SHIFT (40) | ||
134 | #define KVM_PHYS_SIZE (1ULL << KVM_PHYS_SHIFT) | ||
135 | #define KVM_PHYS_MASK (KVM_PHYS_SIZE - 1ULL) | ||
136 | #define PTRS_PER_S2_PGD (1ULL << (KVM_PHYS_SHIFT - 30)) | ||
137 | #define S2_PGD_ORDER get_order(PTRS_PER_S2_PGD * sizeof(pgd_t)) | ||
138 | #define S2_PGD_SIZE (1 << S2_PGD_ORDER) | ||
139 | |||
140 | /* Virtualization Translation Control Register (VTCR) bits */ | ||
141 | #define VTCR_SH0 (3 << 12) | ||
142 | #define VTCR_ORGN0 (3 << 10) | ||
143 | #define VTCR_IRGN0 (3 << 8) | ||
144 | #define VTCR_SL0 (3 << 6) | ||
145 | #define VTCR_S (1 << 4) | ||
146 | #define VTCR_T0SZ (0xf) | ||
147 | #define VTCR_MASK (VTCR_SH0 | VTCR_ORGN0 | VTCR_IRGN0 | VTCR_SL0 | \ | ||
148 | VTCR_S | VTCR_T0SZ) | ||
149 | #define VTCR_HTCR_SH (VTCR_SH0 | VTCR_ORGN0 | VTCR_IRGN0) | ||
150 | #define VTCR_SL_L2 (0 << 6) /* Starting-level: 2 */ | ||
151 | #define VTCR_SL_L1 (1 << 6) /* Starting-level: 1 */ | ||
152 | #define KVM_VTCR_SL0 VTCR_SL_L1 | ||
153 | /* stage-2 input address range defined as 2^(32-T0SZ) */ | ||
154 | #define KVM_T0SZ (32 - KVM_PHYS_SHIFT) | ||
155 | #define KVM_VTCR_T0SZ (KVM_T0SZ & VTCR_T0SZ) | ||
156 | #define KVM_VTCR_S ((KVM_VTCR_T0SZ << 1) & VTCR_S) | ||
157 | |||
158 | /* Virtualization Translation Table Base Register (VTTBR) bits */ | ||
159 | #if KVM_VTCR_SL0 == VTCR_SL_L2 /* see ARM DDI 0406C: B4-1720 */ | ||
160 | #define VTTBR_X (14 - KVM_T0SZ) | ||
161 | #else | ||
162 | #define VTTBR_X (5 - KVM_T0SZ) | ||
163 | #endif | ||
164 | #define VTTBR_BADDR_SHIFT (VTTBR_X - 1) | ||
165 | #define VTTBR_BADDR_MASK (((1LLU << (40 - VTTBR_X)) - 1) << VTTBR_BADDR_SHIFT) | ||
166 | #define VTTBR_VMID_SHIFT (48LLU) | ||
167 | #define VTTBR_VMID_MASK (0xffLLU << VTTBR_VMID_SHIFT) | ||
168 | |||
169 | /* Hyp Syndrome Register (HSR) bits */ | ||
170 | #define HSR_EC_SHIFT (26) | ||
171 | #define HSR_EC (0x3fU << HSR_EC_SHIFT) | ||
172 | #define HSR_IL (1U << 25) | ||
173 | #define HSR_ISS (HSR_IL - 1) | ||
174 | #define HSR_ISV_SHIFT (24) | ||
175 | #define HSR_ISV (1U << HSR_ISV_SHIFT) | ||
176 | #define HSR_SRT_SHIFT (16) | ||
177 | #define HSR_SRT_MASK (0xf << HSR_SRT_SHIFT) | ||
178 | #define HSR_FSC (0x3f) | ||
179 | #define HSR_FSC_TYPE (0x3c) | ||
180 | #define HSR_SSE (1 << 21) | ||
181 | #define HSR_WNR (1 << 6) | ||
182 | #define HSR_CV_SHIFT (24) | ||
183 | #define HSR_CV (1U << HSR_CV_SHIFT) | ||
184 | #define HSR_COND_SHIFT (20) | ||
185 | #define HSR_COND (0xfU << HSR_COND_SHIFT) | ||
186 | |||
187 | #define FSC_FAULT (0x04) | ||
188 | #define FSC_PERM (0x0c) | ||
189 | |||
190 | /* Hyp Prefetch Fault Address Register (HPFAR/HDFAR) */ | ||
191 | #define HPFAR_MASK (~0xf) | ||
192 | |||
193 | #define HSR_EC_UNKNOWN (0x00) | ||
194 | #define HSR_EC_WFI (0x01) | ||
195 | #define HSR_EC_CP15_32 (0x03) | ||
196 | #define HSR_EC_CP15_64 (0x04) | ||
197 | #define HSR_EC_CP14_MR (0x05) | ||
198 | #define HSR_EC_CP14_LS (0x06) | ||
199 | #define HSR_EC_CP_0_13 (0x07) | ||
200 | #define HSR_EC_CP10_ID (0x08) | ||
201 | #define HSR_EC_JAZELLE (0x09) | ||
202 | #define HSR_EC_BXJ (0x0A) | ||
203 | #define HSR_EC_CP14_64 (0x0C) | ||
204 | #define HSR_EC_SVC_HYP (0x11) | ||
205 | #define HSR_EC_HVC (0x12) | ||
206 | #define HSR_EC_SMC (0x13) | ||
207 | #define HSR_EC_IABT (0x20) | ||
208 | #define HSR_EC_IABT_HYP (0x21) | ||
209 | #define HSR_EC_DABT (0x24) | ||
210 | #define HSR_EC_DABT_HYP (0x25) | ||
211 | |||
212 | #define HSR_HVC_IMM_MASK ((1UL << 16) - 1) | ||
213 | |||
214 | #endif /* __ARM_KVM_ARM_H__ */ | ||
diff --git a/arch/arm/include/asm/kvm_asm.h b/arch/arm/include/asm/kvm_asm.h new file mode 100644 index 000000000000..5e06e8177784 --- /dev/null +++ b/arch/arm/include/asm/kvm_asm.h | |||
@@ -0,0 +1,82 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2012 - Virtual Open Systems and Columbia University | ||
3 | * Author: Christoffer Dall <c.dall@virtualopensystems.com> | ||
4 | * | ||
5 | * This program is free software; you can redistribute it and/or modify | ||
6 | * it under the terms of the GNU General Public License, version 2, as | ||
7 | * published by the Free Software Foundation. | ||
8 | * | ||
9 | * This program is distributed in the hope that it will be useful, | ||
10 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
11 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
12 | * GNU General Public License for more details. | ||
13 | * | ||
14 | * You should have received a copy of the GNU General Public License | ||
15 | * along with this program; if not, write to the Free Software | ||
16 | * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. | ||
17 | */ | ||
18 | |||
19 | #ifndef __ARM_KVM_ASM_H__ | ||
20 | #define __ARM_KVM_ASM_H__ | ||
21 | |||
22 | /* 0 is reserved as an invalid value. */ | ||
23 | #define c0_MPIDR 1 /* MultiProcessor ID Register */ | ||
24 | #define c0_CSSELR 2 /* Cache Size Selection Register */ | ||
25 | #define c1_SCTLR 3 /* System Control Register */ | ||
26 | #define c1_ACTLR 4 /* Auxilliary Control Register */ | ||
27 | #define c1_CPACR 5 /* Coprocessor Access Control */ | ||
28 | #define c2_TTBR0 6 /* Translation Table Base Register 0 */ | ||
29 | #define c2_TTBR0_high 7 /* TTBR0 top 32 bits */ | ||
30 | #define c2_TTBR1 8 /* Translation Table Base Register 1 */ | ||
31 | #define c2_TTBR1_high 9 /* TTBR1 top 32 bits */ | ||
32 | #define c2_TTBCR 10 /* Translation Table Base Control R. */ | ||
33 | #define c3_DACR 11 /* Domain Access Control Register */ | ||
34 | #define c5_DFSR 12 /* Data Fault Status Register */ | ||
35 | #define c5_IFSR 13 /* Instruction Fault Status Register */ | ||
36 | #define c5_ADFSR 14 /* Auxilary Data Fault Status R */ | ||
37 | #define c5_AIFSR 15 /* Auxilary Instrunction Fault Status R */ | ||
38 | #define c6_DFAR 16 /* Data Fault Address Register */ | ||
39 | #define c6_IFAR 17 /* Instruction Fault Address Register */ | ||
40 | #define c9_L2CTLR 18 /* Cortex A15 L2 Control Register */ | ||
41 | #define c10_PRRR 19 /* Primary Region Remap Register */ | ||
42 | #define c10_NMRR 20 /* Normal Memory Remap Register */ | ||
43 | #define c12_VBAR 21 /* Vector Base Address Register */ | ||
44 | #define c13_CID 22 /* Context ID Register */ | ||
45 | #define c13_TID_URW 23 /* Thread ID, User R/W */ | ||
46 | #define c13_TID_URO 24 /* Thread ID, User R/O */ | ||
47 | #define c13_TID_PRIV 25 /* Thread ID, Privileged */ | ||
48 | #define NR_CP15_REGS 26 /* Number of regs (incl. invalid) */ | ||
49 | |||
50 | #define ARM_EXCEPTION_RESET 0 | ||
51 | #define ARM_EXCEPTION_UNDEFINED 1 | ||
52 | #define ARM_EXCEPTION_SOFTWARE 2 | ||
53 | #define ARM_EXCEPTION_PREF_ABORT 3 | ||
54 | #define ARM_EXCEPTION_DATA_ABORT 4 | ||
55 | #define ARM_EXCEPTION_IRQ 5 | ||
56 | #define ARM_EXCEPTION_FIQ 6 | ||
57 | #define ARM_EXCEPTION_HVC 7 | ||
58 | |||
59 | #ifndef __ASSEMBLY__ | ||
60 | struct kvm; | ||
61 | struct kvm_vcpu; | ||
62 | |||
63 | extern char __kvm_hyp_init[]; | ||
64 | extern char __kvm_hyp_init_end[]; | ||
65 | |||
66 | extern char __kvm_hyp_exit[]; | ||
67 | extern char __kvm_hyp_exit_end[]; | ||
68 | |||
69 | extern char __kvm_hyp_vector[]; | ||
70 | |||
71 | extern char __kvm_hyp_code_start[]; | ||
72 | extern char __kvm_hyp_code_end[]; | ||
73 | |||
74 | extern void __kvm_tlb_flush_vmid(struct kvm *kvm); | ||
75 | |||
76 | extern void __kvm_flush_vm_context(void); | ||
77 | extern void __kvm_tlb_flush_vmid(struct kvm *kvm); | ||
78 | |||
79 | extern int __kvm_vcpu_run(struct kvm_vcpu *vcpu); | ||
80 | #endif | ||
81 | |||
82 | #endif /* __ARM_KVM_ASM_H__ */ | ||
diff --git a/arch/arm/include/asm/kvm_coproc.h b/arch/arm/include/asm/kvm_coproc.h new file mode 100644 index 000000000000..4917c2f7e459 --- /dev/null +++ b/arch/arm/include/asm/kvm_coproc.h | |||
@@ -0,0 +1,47 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2012 Rusty Russell IBM Corporation | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or modify | ||
5 | * it under the terms of the GNU General Public License, version 2, as | ||
6 | * published by the Free Software Foundation. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, | ||
9 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
11 | * GNU General Public License for more details. | ||
12 | * | ||
13 | * You should have received a copy of the GNU General Public License | ||
14 | * along with this program; if not, write to the Free Software | ||
15 | * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. | ||
16 | */ | ||
17 | |||
18 | #ifndef __ARM_KVM_COPROC_H__ | ||
19 | #define __ARM_KVM_COPROC_H__ | ||
20 | #include <linux/kvm_host.h> | ||
21 | |||
22 | void kvm_reset_coprocs(struct kvm_vcpu *vcpu); | ||
23 | |||
24 | struct kvm_coproc_target_table { | ||
25 | unsigned target; | ||
26 | const struct coproc_reg *table; | ||
27 | size_t num; | ||
28 | }; | ||
29 | void kvm_register_target_coproc_table(struct kvm_coproc_target_table *table); | ||
30 | |||
31 | int kvm_handle_cp10_id(struct kvm_vcpu *vcpu, struct kvm_run *run); | ||
32 | int kvm_handle_cp_0_13_access(struct kvm_vcpu *vcpu, struct kvm_run *run); | ||
33 | int kvm_handle_cp14_load_store(struct kvm_vcpu *vcpu, struct kvm_run *run); | ||
34 | int kvm_handle_cp14_access(struct kvm_vcpu *vcpu, struct kvm_run *run); | ||
35 | int kvm_handle_cp15_32(struct kvm_vcpu *vcpu, struct kvm_run *run); | ||
36 | int kvm_handle_cp15_64(struct kvm_vcpu *vcpu, struct kvm_run *run); | ||
37 | |||
38 | unsigned long kvm_arm_num_guest_msrs(struct kvm_vcpu *vcpu); | ||
39 | int kvm_arm_copy_msrindices(struct kvm_vcpu *vcpu, u64 __user *uindices); | ||
40 | void kvm_coproc_table_init(void); | ||
41 | |||
42 | struct kvm_one_reg; | ||
43 | int kvm_arm_copy_coproc_indices(struct kvm_vcpu *vcpu, u64 __user *uindices); | ||
44 | int kvm_arm_coproc_get_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *); | ||
45 | int kvm_arm_coproc_set_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *); | ||
46 | unsigned long kvm_arm_num_coproc_regs(struct kvm_vcpu *vcpu); | ||
47 | #endif /* __ARM_KVM_COPROC_H__ */ | ||
diff --git a/arch/arm/include/asm/kvm_emulate.h b/arch/arm/include/asm/kvm_emulate.h new file mode 100644 index 000000000000..fd611996bfb5 --- /dev/null +++ b/arch/arm/include/asm/kvm_emulate.h | |||
@@ -0,0 +1,72 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2012 - Virtual Open Systems and Columbia University | ||
3 | * Author: Christoffer Dall <c.dall@virtualopensystems.com> | ||
4 | * | ||
5 | * This program is free software; you can redistribute it and/or modify | ||
6 | * it under the terms of the GNU General Public License, version 2, as | ||
7 | * published by the Free Software Foundation. | ||
8 | * | ||
9 | * This program is distributed in the hope that it will be useful, | ||
10 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
11 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
12 | * GNU General Public License for more details. | ||
13 | * | ||
14 | * You should have received a copy of the GNU General Public License | ||
15 | * along with this program; if not, write to the Free Software | ||
16 | * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. | ||
17 | */ | ||
18 | |||
19 | #ifndef __ARM_KVM_EMULATE_H__ | ||
20 | #define __ARM_KVM_EMULATE_H__ | ||
21 | |||
22 | #include <linux/kvm_host.h> | ||
23 | #include <asm/kvm_asm.h> | ||
24 | #include <asm/kvm_mmio.h> | ||
25 | |||
26 | u32 *vcpu_reg(struct kvm_vcpu *vcpu, u8 reg_num); | ||
27 | u32 *vcpu_spsr(struct kvm_vcpu *vcpu); | ||
28 | |||
29 | int kvm_handle_wfi(struct kvm_vcpu *vcpu, struct kvm_run *run); | ||
30 | void kvm_skip_instr(struct kvm_vcpu *vcpu, bool is_wide_instr); | ||
31 | void kvm_inject_undefined(struct kvm_vcpu *vcpu); | ||
32 | void kvm_inject_dabt(struct kvm_vcpu *vcpu, unsigned long addr); | ||
33 | void kvm_inject_pabt(struct kvm_vcpu *vcpu, unsigned long addr); | ||
34 | |||
35 | static inline bool vcpu_mode_is_32bit(struct kvm_vcpu *vcpu) | ||
36 | { | ||
37 | return 1; | ||
38 | } | ||
39 | |||
40 | static inline u32 *vcpu_pc(struct kvm_vcpu *vcpu) | ||
41 | { | ||
42 | return (u32 *)&vcpu->arch.regs.usr_regs.ARM_pc; | ||
43 | } | ||
44 | |||
45 | static inline u32 *vcpu_cpsr(struct kvm_vcpu *vcpu) | ||
46 | { | ||
47 | return (u32 *)&vcpu->arch.regs.usr_regs.ARM_cpsr; | ||
48 | } | ||
49 | |||
50 | static inline void vcpu_set_thumb(struct kvm_vcpu *vcpu) | ||
51 | { | ||
52 | *vcpu_cpsr(vcpu) |= PSR_T_BIT; | ||
53 | } | ||
54 | |||
55 | static inline bool mode_has_spsr(struct kvm_vcpu *vcpu) | ||
56 | { | ||
57 | unsigned long cpsr_mode = vcpu->arch.regs.usr_regs.ARM_cpsr & MODE_MASK; | ||
58 | return (cpsr_mode > USR_MODE && cpsr_mode < SYSTEM_MODE); | ||
59 | } | ||
60 | |||
61 | static inline bool vcpu_mode_priv(struct kvm_vcpu *vcpu) | ||
62 | { | ||
63 | unsigned long cpsr_mode = vcpu->arch.regs.usr_regs.ARM_cpsr & MODE_MASK; | ||
64 | return cpsr_mode > USR_MODE;; | ||
65 | } | ||
66 | |||
67 | static inline bool kvm_vcpu_reg_is_pc(struct kvm_vcpu *vcpu, int reg) | ||
68 | { | ||
69 | return reg == 15; | ||
70 | } | ||
71 | |||
72 | #endif /* __ARM_KVM_EMULATE_H__ */ | ||
diff --git a/arch/arm/include/asm/kvm_host.h b/arch/arm/include/asm/kvm_host.h new file mode 100644 index 000000000000..98b4d1a72923 --- /dev/null +++ b/arch/arm/include/asm/kvm_host.h | |||
@@ -0,0 +1,161 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2012 - Virtual Open Systems and Columbia University | ||
3 | * Author: Christoffer Dall <c.dall@virtualopensystems.com> | ||
4 | * | ||
5 | * This program is free software; you can redistribute it and/or modify | ||
6 | * it under the terms of the GNU General Public License, version 2, as | ||
7 | * published by the Free Software Foundation. | ||
8 | * | ||
9 | * This program is distributed in the hope that it will be useful, | ||
10 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
11 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
12 | * GNU General Public License for more details. | ||
13 | * | ||
14 | * You should have received a copy of the GNU General Public License | ||
15 | * along with this program; if not, write to the Free Software | ||
16 | * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. | ||
17 | */ | ||
18 | |||
19 | #ifndef __ARM_KVM_HOST_H__ | ||
20 | #define __ARM_KVM_HOST_H__ | ||
21 | |||
22 | #include <asm/kvm.h> | ||
23 | #include <asm/kvm_asm.h> | ||
24 | #include <asm/kvm_mmio.h> | ||
25 | #include <asm/fpstate.h> | ||
26 | |||
27 | #define KVM_MAX_VCPUS CONFIG_KVM_ARM_MAX_VCPUS | ||
28 | #define KVM_MEMORY_SLOTS 32 | ||
29 | #define KVM_PRIVATE_MEM_SLOTS 4 | ||
30 | #define KVM_COALESCED_MMIO_PAGE_OFFSET 1 | ||
31 | #define KVM_HAVE_ONE_REG | ||
32 | |||
33 | #define KVM_VCPU_MAX_FEATURES 1 | ||
34 | |||
35 | /* We don't currently support large pages. */ | ||
36 | #define KVM_HPAGE_GFN_SHIFT(x) 0 | ||
37 | #define KVM_NR_PAGE_SIZES 1 | ||
38 | #define KVM_PAGES_PER_HPAGE(x) (1UL<<31) | ||
39 | |||
40 | struct kvm_vcpu; | ||
41 | u32 *kvm_vcpu_reg(struct kvm_vcpu *vcpu, u8 reg_num, u32 mode); | ||
42 | int kvm_target_cpu(void); | ||
43 | int kvm_reset_vcpu(struct kvm_vcpu *vcpu); | ||
44 | void kvm_reset_coprocs(struct kvm_vcpu *vcpu); | ||
45 | |||
46 | struct kvm_arch { | ||
47 | /* VTTBR value associated with below pgd and vmid */ | ||
48 | u64 vttbr; | ||
49 | |||
50 | /* | ||
51 | * Anything that is not used directly from assembly code goes | ||
52 | * here. | ||
53 | */ | ||
54 | |||
55 | /* The VMID generation used for the virt. memory system */ | ||
56 | u64 vmid_gen; | ||
57 | u32 vmid; | ||
58 | |||
59 | /* Stage-2 page table */ | ||
60 | pgd_t *pgd; | ||
61 | }; | ||
62 | |||
63 | #define KVM_NR_MEM_OBJS 40 | ||
64 | |||
65 | /* | ||
66 | * We don't want allocation failures within the mmu code, so we preallocate | ||
67 | * enough memory for a single page fault in a cache. | ||
68 | */ | ||
69 | struct kvm_mmu_memory_cache { | ||
70 | int nobjs; | ||
71 | void *objects[KVM_NR_MEM_OBJS]; | ||
72 | }; | ||
73 | |||
74 | struct kvm_vcpu_arch { | ||
75 | struct kvm_regs regs; | ||
76 | |||
77 | int target; /* Processor target */ | ||
78 | DECLARE_BITMAP(features, KVM_VCPU_MAX_FEATURES); | ||
79 | |||
80 | /* System control coprocessor (cp15) */ | ||
81 | u32 cp15[NR_CP15_REGS]; | ||
82 | |||
83 | /* The CPU type we expose to the VM */ | ||
84 | u32 midr; | ||
85 | |||
86 | /* Exception Information */ | ||
87 | u32 hsr; /* Hyp Syndrome Register */ | ||
88 | u32 hxfar; /* Hyp Data/Inst Fault Address Register */ | ||
89 | u32 hpfar; /* Hyp IPA Fault Address Register */ | ||
90 | |||
91 | /* Floating point registers (VFP and Advanced SIMD/NEON) */ | ||
92 | struct vfp_hard_struct vfp_guest; | ||
93 | struct vfp_hard_struct *vfp_host; | ||
94 | |||
95 | /* | ||
96 | * Anything that is not used directly from assembly code goes | ||
97 | * here. | ||
98 | */ | ||
99 | /* dcache set/way operation pending */ | ||
100 | int last_pcpu; | ||
101 | cpumask_t require_dcache_flush; | ||
102 | |||
103 | /* Don't run the guest on this vcpu */ | ||
104 | bool pause; | ||
105 | |||
106 | /* IO related fields */ | ||
107 | struct kvm_decode mmio_decode; | ||
108 | |||
109 | /* Interrupt related fields */ | ||
110 | u32 irq_lines; /* IRQ and FIQ levels */ | ||
111 | |||
112 | /* Hyp exception information */ | ||
113 | u32 hyp_pc; /* PC when exception was taken from Hyp mode */ | ||
114 | |||
115 | /* Cache some mmu pages needed inside spinlock regions */ | ||
116 | struct kvm_mmu_memory_cache mmu_page_cache; | ||
117 | |||
118 | /* Detect first run of a vcpu */ | ||
119 | bool has_run_once; | ||
120 | }; | ||
121 | |||
122 | struct kvm_vm_stat { | ||
123 | u32 remote_tlb_flush; | ||
124 | }; | ||
125 | |||
126 | struct kvm_vcpu_stat { | ||
127 | u32 halt_wakeup; | ||
128 | }; | ||
129 | |||
130 | struct kvm_vcpu_init; | ||
131 | int kvm_vcpu_set_target(struct kvm_vcpu *vcpu, | ||
132 | const struct kvm_vcpu_init *init); | ||
133 | unsigned long kvm_arm_num_regs(struct kvm_vcpu *vcpu); | ||
134 | int kvm_arm_copy_reg_indices(struct kvm_vcpu *vcpu, u64 __user *indices); | ||
135 | struct kvm_one_reg; | ||
136 | int kvm_arm_get_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg); | ||
137 | int kvm_arm_set_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg); | ||
138 | u64 kvm_call_hyp(void *hypfn, ...); | ||
139 | void force_vm_exit(const cpumask_t *mask); | ||
140 | |||
141 | #define KVM_ARCH_WANT_MMU_NOTIFIER | ||
142 | struct kvm; | ||
143 | int kvm_unmap_hva(struct kvm *kvm, unsigned long hva); | ||
144 | int kvm_unmap_hva_range(struct kvm *kvm, | ||
145 | unsigned long start, unsigned long end); | ||
146 | void kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte); | ||
147 | |||
148 | unsigned long kvm_arm_num_regs(struct kvm_vcpu *vcpu); | ||
149 | int kvm_arm_copy_reg_indices(struct kvm_vcpu *vcpu, u64 __user *indices); | ||
150 | |||
151 | /* We do not have shadow page tables, hence the empty hooks */ | ||
152 | static inline int kvm_age_hva(struct kvm *kvm, unsigned long hva) | ||
153 | { | ||
154 | return 0; | ||
155 | } | ||
156 | |||
157 | static inline int kvm_test_age_hva(struct kvm *kvm, unsigned long hva) | ||
158 | { | ||
159 | return 0; | ||
160 | } | ||
161 | #endif /* __ARM_KVM_HOST_H__ */ | ||
diff --git a/arch/arm/include/asm/kvm_mmio.h b/arch/arm/include/asm/kvm_mmio.h new file mode 100644 index 000000000000..adcc0d7d3175 --- /dev/null +++ b/arch/arm/include/asm/kvm_mmio.h | |||
@@ -0,0 +1,56 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2012 - Virtual Open Systems and Columbia University | ||
3 | * Author: Christoffer Dall <c.dall@virtualopensystems.com> | ||
4 | * | ||
5 | * This program is free software; you can redistribute it and/or modify | ||
6 | * it under the terms of the GNU General Public License, version 2, as | ||
7 | * published by the Free Software Foundation. | ||
8 | * | ||
9 | * This program is distributed in the hope that it will be useful, | ||
10 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
11 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
12 | * GNU General Public License for more details. | ||
13 | * | ||
14 | * You should have received a copy of the GNU General Public License | ||
15 | * along with this program; if not, write to the Free Software | ||
16 | * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. | ||
17 | */ | ||
18 | |||
19 | #ifndef __ARM_KVM_MMIO_H__ | ||
20 | #define __ARM_KVM_MMIO_H__ | ||
21 | |||
22 | #include <linux/kvm_host.h> | ||
23 | #include <asm/kvm_asm.h> | ||
24 | #include <asm/kvm_arm.h> | ||
25 | |||
26 | struct kvm_decode { | ||
27 | unsigned long rt; | ||
28 | bool sign_extend; | ||
29 | }; | ||
30 | |||
31 | /* | ||
32 | * The in-kernel MMIO emulation code wants to use a copy of run->mmio, | ||
33 | * which is an anonymous type. Use our own type instead. | ||
34 | */ | ||
35 | struct kvm_exit_mmio { | ||
36 | phys_addr_t phys_addr; | ||
37 | u8 data[8]; | ||
38 | u32 len; | ||
39 | bool is_write; | ||
40 | }; | ||
41 | |||
42 | static inline void kvm_prepare_mmio(struct kvm_run *run, | ||
43 | struct kvm_exit_mmio *mmio) | ||
44 | { | ||
45 | run->mmio.phys_addr = mmio->phys_addr; | ||
46 | run->mmio.len = mmio->len; | ||
47 | run->mmio.is_write = mmio->is_write; | ||
48 | memcpy(run->mmio.data, mmio->data, mmio->len); | ||
49 | run->exit_reason = KVM_EXIT_MMIO; | ||
50 | } | ||
51 | |||
52 | int kvm_handle_mmio_return(struct kvm_vcpu *vcpu, struct kvm_run *run); | ||
53 | int io_mem_abort(struct kvm_vcpu *vcpu, struct kvm_run *run, | ||
54 | phys_addr_t fault_ipa); | ||
55 | |||
56 | #endif /* __ARM_KVM_MMIO_H__ */ | ||
diff --git a/arch/arm/include/asm/kvm_mmu.h b/arch/arm/include/asm/kvm_mmu.h new file mode 100644 index 000000000000..421a20b34874 --- /dev/null +++ b/arch/arm/include/asm/kvm_mmu.h | |||
@@ -0,0 +1,50 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2012 - Virtual Open Systems and Columbia University | ||
3 | * Author: Christoffer Dall <c.dall@virtualopensystems.com> | ||
4 | * | ||
5 | * This program is free software; you can redistribute it and/or modify | ||
6 | * it under the terms of the GNU General Public License, version 2, as | ||
7 | * published by the Free Software Foundation. | ||
8 | * | ||
9 | * This program is distributed in the hope that it will be useful, | ||
10 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
11 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
12 | * GNU General Public License for more details. | ||
13 | * | ||
14 | * You should have received a copy of the GNU General Public License | ||
15 | * along with this program; if not, write to the Free Software | ||
16 | * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. | ||
17 | */ | ||
18 | |||
19 | #ifndef __ARM_KVM_MMU_H__ | ||
20 | #define __ARM_KVM_MMU_H__ | ||
21 | |||
22 | int create_hyp_mappings(void *from, void *to); | ||
23 | int create_hyp_io_mappings(void *from, void *to, phys_addr_t); | ||
24 | void free_hyp_pmds(void); | ||
25 | |||
26 | int kvm_alloc_stage2_pgd(struct kvm *kvm); | ||
27 | void kvm_free_stage2_pgd(struct kvm *kvm); | ||
28 | int kvm_phys_addr_ioremap(struct kvm *kvm, phys_addr_t guest_ipa, | ||
29 | phys_addr_t pa, unsigned long size); | ||
30 | |||
31 | int kvm_handle_guest_abort(struct kvm_vcpu *vcpu, struct kvm_run *run); | ||
32 | |||
33 | void kvm_mmu_free_memory_caches(struct kvm_vcpu *vcpu); | ||
34 | |||
35 | phys_addr_t kvm_mmu_get_httbr(void); | ||
36 | int kvm_mmu_init(void); | ||
37 | void kvm_clear_hyp_idmap(void); | ||
38 | |||
39 | static inline bool kvm_is_write_fault(unsigned long hsr) | ||
40 | { | ||
41 | unsigned long hsr_ec = hsr >> HSR_EC_SHIFT; | ||
42 | if (hsr_ec == HSR_EC_IABT) | ||
43 | return false; | ||
44 | else if ((hsr & HSR_ISV) && !(hsr & HSR_WNR)) | ||
45 | return false; | ||
46 | else | ||
47 | return true; | ||
48 | } | ||
49 | |||
50 | #endif /* __ARM_KVM_MMU_H__ */ | ||
diff --git a/arch/arm/include/asm/kvm_psci.h b/arch/arm/include/asm/kvm_psci.h new file mode 100644 index 000000000000..9a83d98bf170 --- /dev/null +++ b/arch/arm/include/asm/kvm_psci.h | |||
@@ -0,0 +1,23 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2012 - ARM Ltd | ||
3 | * Author: Marc Zyngier <marc.zyngier@arm.com> | ||
4 | * | ||
5 | * This program is free software; you can redistribute it and/or modify | ||
6 | * it under the terms of the GNU General Public License version 2 as | ||
7 | * published by the Free Software Foundation. | ||
8 | * | ||
9 | * This program is distributed in the hope that it will be useful, | ||
10 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
11 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
12 | * GNU General Public License for more details. | ||
13 | * | ||
14 | * You should have received a copy of the GNU General Public License | ||
15 | * along with this program. If not, see <http://www.gnu.org/licenses/>. | ||
16 | */ | ||
17 | |||
18 | #ifndef __ARM_KVM_PSCI_H__ | ||
19 | #define __ARM_KVM_PSCI_H__ | ||
20 | |||
21 | bool kvm_psci_call(struct kvm_vcpu *vcpu); | ||
22 | |||
23 | #endif /* __ARM_KVM_PSCI_H__ */ | ||
diff --git a/arch/arm/include/asm/mach/pci.h b/arch/arm/include/asm/mach/pci.h index db9fedb57f2c..5cf2e979b4be 100644 --- a/arch/arm/include/asm/mach/pci.h +++ b/arch/arm/include/asm/mach/pci.h | |||
@@ -23,6 +23,7 @@ struct hw_pci { | |||
23 | #endif | 23 | #endif |
24 | struct pci_ops *ops; | 24 | struct pci_ops *ops; |
25 | int nr_controllers; | 25 | int nr_controllers; |
26 | void **private_data; | ||
26 | int (*setup)(int nr, struct pci_sys_data *); | 27 | int (*setup)(int nr, struct pci_sys_data *); |
27 | struct pci_bus *(*scan)(int nr, struct pci_sys_data *); | 28 | struct pci_bus *(*scan)(int nr, struct pci_sys_data *); |
28 | void (*preinit)(void); | 29 | void (*preinit)(void); |
diff --git a/arch/arm/include/asm/memory.h b/arch/arm/include/asm/memory.h index 1c4df27f9332..64c770d24198 100644 --- a/arch/arm/include/asm/memory.h +++ b/arch/arm/include/asm/memory.h | |||
@@ -36,23 +36,23 @@ | |||
36 | * TASK_UNMAPPED_BASE - the lower boundary of the mmap VM area | 36 | * TASK_UNMAPPED_BASE - the lower boundary of the mmap VM area |
37 | */ | 37 | */ |
38 | #define PAGE_OFFSET UL(CONFIG_PAGE_OFFSET) | 38 | #define PAGE_OFFSET UL(CONFIG_PAGE_OFFSET) |
39 | #define TASK_SIZE (UL(CONFIG_PAGE_OFFSET) - UL(0x01000000)) | 39 | #define TASK_SIZE (UL(CONFIG_PAGE_OFFSET) - UL(SZ_16M)) |
40 | #define TASK_UNMAPPED_BASE ALIGN(TASK_SIZE / 3, SZ_16M) | 40 | #define TASK_UNMAPPED_BASE ALIGN(TASK_SIZE / 3, SZ_16M) |
41 | 41 | ||
42 | /* | 42 | /* |
43 | * The maximum size of a 26-bit user space task. | 43 | * The maximum size of a 26-bit user space task. |
44 | */ | 44 | */ |
45 | #define TASK_SIZE_26 UL(0x04000000) | 45 | #define TASK_SIZE_26 (UL(1) << 26) |
46 | 46 | ||
47 | /* | 47 | /* |
48 | * The module space lives between the addresses given by TASK_SIZE | 48 | * The module space lives between the addresses given by TASK_SIZE |
49 | * and PAGE_OFFSET - it must be within 32MB of the kernel text. | 49 | * and PAGE_OFFSET - it must be within 32MB of the kernel text. |
50 | */ | 50 | */ |
51 | #ifndef CONFIG_THUMB2_KERNEL | 51 | #ifndef CONFIG_THUMB2_KERNEL |
52 | #define MODULES_VADDR (PAGE_OFFSET - 16*1024*1024) | 52 | #define MODULES_VADDR (PAGE_OFFSET - SZ_16M) |
53 | #else | 53 | #else |
54 | /* smaller range for Thumb-2 symbols relocation (2^24)*/ | 54 | /* smaller range for Thumb-2 symbols relocation (2^24)*/ |
55 | #define MODULES_VADDR (PAGE_OFFSET - 8*1024*1024) | 55 | #define MODULES_VADDR (PAGE_OFFSET - SZ_8M) |
56 | #endif | 56 | #endif |
57 | 57 | ||
58 | #if TASK_SIZE > MODULES_VADDR | 58 | #if TASK_SIZE > MODULES_VADDR |
diff --git a/arch/arm/include/asm/opcodes-sec.h b/arch/arm/include/asm/opcodes-sec.h new file mode 100644 index 000000000000..bc3a9174417c --- /dev/null +++ b/arch/arm/include/asm/opcodes-sec.h | |||
@@ -0,0 +1,24 @@ | |||
1 | /* | ||
2 | * This program is free software; you can redistribute it and/or modify | ||
3 | * it under the terms of the GNU General Public License version 2 as | ||
4 | * published by the Free Software Foundation. | ||
5 | * | ||
6 | * This program is distributed in the hope that it will be useful, | ||
7 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
8 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
9 | * GNU General Public License for more details. | ||
10 | * | ||
11 | * Copyright (C) 2012 ARM Limited | ||
12 | */ | ||
13 | |||
14 | #ifndef __ASM_ARM_OPCODES_SEC_H | ||
15 | #define __ASM_ARM_OPCODES_SEC_H | ||
16 | |||
17 | #include <asm/opcodes.h> | ||
18 | |||
19 | #define __SMC(imm4) __inst_arm_thumb32( \ | ||
20 | 0xE1600070 | (((imm4) & 0xF) << 0), \ | ||
21 | 0xF7F08000 | (((imm4) & 0xF) << 16) \ | ||
22 | ) | ||
23 | |||
24 | #endif /* __ASM_ARM_OPCODES_SEC_H */ | ||
diff --git a/arch/arm/include/asm/opcodes.h b/arch/arm/include/asm/opcodes.h index 74e211a6fb24..e796c598513b 100644 --- a/arch/arm/include/asm/opcodes.h +++ b/arch/arm/include/asm/opcodes.h | |||
@@ -10,6 +10,7 @@ | |||
10 | #define __ASM_ARM_OPCODES_H | 10 | #define __ASM_ARM_OPCODES_H |
11 | 11 | ||
12 | #ifndef __ASSEMBLY__ | 12 | #ifndef __ASSEMBLY__ |
13 | #include <linux/linkage.h> | ||
13 | extern asmlinkage unsigned int arm_check_condition(u32 opcode, u32 psr); | 14 | extern asmlinkage unsigned int arm_check_condition(u32 opcode, u32 psr); |
14 | #endif | 15 | #endif |
15 | 16 | ||
diff --git a/arch/arm/include/asm/outercache.h b/arch/arm/include/asm/outercache.h index 53426c66352a..12f71a190422 100644 --- a/arch/arm/include/asm/outercache.h +++ b/arch/arm/include/asm/outercache.h | |||
@@ -92,6 +92,7 @@ static inline void outer_flush_range(phys_addr_t start, phys_addr_t end) | |||
92 | static inline void outer_flush_all(void) { } | 92 | static inline void outer_flush_all(void) { } |
93 | static inline void outer_inv_all(void) { } | 93 | static inline void outer_inv_all(void) { } |
94 | static inline void outer_disable(void) { } | 94 | static inline void outer_disable(void) { } |
95 | static inline void outer_resume(void) { } | ||
95 | 96 | ||
96 | #endif | 97 | #endif |
97 | 98 | ||
diff --git a/arch/arm/include/asm/pgtable-3level-hwdef.h b/arch/arm/include/asm/pgtable-3level-hwdef.h index d7952824c5c4..18f5cef82ad5 100644 --- a/arch/arm/include/asm/pgtable-3level-hwdef.h +++ b/arch/arm/include/asm/pgtable-3level-hwdef.h | |||
@@ -32,6 +32,9 @@ | |||
32 | #define PMD_TYPE_SECT (_AT(pmdval_t, 1) << 0) | 32 | #define PMD_TYPE_SECT (_AT(pmdval_t, 1) << 0) |
33 | #define PMD_BIT4 (_AT(pmdval_t, 0)) | 33 | #define PMD_BIT4 (_AT(pmdval_t, 0)) |
34 | #define PMD_DOMAIN(x) (_AT(pmdval_t, 0)) | 34 | #define PMD_DOMAIN(x) (_AT(pmdval_t, 0)) |
35 | #define PMD_APTABLE_SHIFT (61) | ||
36 | #define PMD_APTABLE (_AT(pgdval_t, 3) << PGD_APTABLE_SHIFT) | ||
37 | #define PMD_PXNTABLE (_AT(pgdval_t, 1) << 59) | ||
35 | 38 | ||
36 | /* | 39 | /* |
37 | * - section | 40 | * - section |
@@ -41,9 +44,11 @@ | |||
41 | #define PMD_SECT_S (_AT(pmdval_t, 3) << 8) | 44 | #define PMD_SECT_S (_AT(pmdval_t, 3) << 8) |
42 | #define PMD_SECT_AF (_AT(pmdval_t, 1) << 10) | 45 | #define PMD_SECT_AF (_AT(pmdval_t, 1) << 10) |
43 | #define PMD_SECT_nG (_AT(pmdval_t, 1) << 11) | 46 | #define PMD_SECT_nG (_AT(pmdval_t, 1) << 11) |
47 | #define PMD_SECT_PXN (_AT(pmdval_t, 1) << 53) | ||
44 | #define PMD_SECT_XN (_AT(pmdval_t, 1) << 54) | 48 | #define PMD_SECT_XN (_AT(pmdval_t, 1) << 54) |
45 | #define PMD_SECT_AP_WRITE (_AT(pmdval_t, 0)) | 49 | #define PMD_SECT_AP_WRITE (_AT(pmdval_t, 0)) |
46 | #define PMD_SECT_AP_READ (_AT(pmdval_t, 0)) | 50 | #define PMD_SECT_AP_READ (_AT(pmdval_t, 0)) |
51 | #define PMD_SECT_AP1 (_AT(pmdval_t, 1) << 6) | ||
47 | #define PMD_SECT_TEX(x) (_AT(pmdval_t, 0)) | 52 | #define PMD_SECT_TEX(x) (_AT(pmdval_t, 0)) |
48 | 53 | ||
49 | /* | 54 | /* |
diff --git a/arch/arm/include/asm/pgtable-3level.h b/arch/arm/include/asm/pgtable-3level.h index a3f37929940a..6ef8afd1b64c 100644 --- a/arch/arm/include/asm/pgtable-3level.h +++ b/arch/arm/include/asm/pgtable-3level.h | |||
@@ -104,11 +104,29 @@ | |||
104 | */ | 104 | */ |
105 | #define L_PGD_SWAPPER (_AT(pgdval_t, 1) << 55) /* swapper_pg_dir entry */ | 105 | #define L_PGD_SWAPPER (_AT(pgdval_t, 1) << 55) /* swapper_pg_dir entry */ |
106 | 106 | ||
107 | /* | ||
108 | * 2nd stage PTE definitions for LPAE. | ||
109 | */ | ||
110 | #define L_PTE_S2_MT_UNCACHED (_AT(pteval_t, 0x5) << 2) /* MemAttr[3:0] */ | ||
111 | #define L_PTE_S2_MT_WRITETHROUGH (_AT(pteval_t, 0xa) << 2) /* MemAttr[3:0] */ | ||
112 | #define L_PTE_S2_MT_WRITEBACK (_AT(pteval_t, 0xf) << 2) /* MemAttr[3:0] */ | ||
113 | #define L_PTE_S2_RDONLY (_AT(pteval_t, 1) << 6) /* HAP[1] */ | ||
114 | #define L_PTE_S2_RDWR (_AT(pteval_t, 2) << 6) /* HAP[2:1] */ | ||
115 | |||
116 | /* | ||
117 | * Hyp-mode PL2 PTE definitions for LPAE. | ||
118 | */ | ||
119 | #define L_PTE_HYP L_PTE_USER | ||
120 | |||
107 | #ifndef __ASSEMBLY__ | 121 | #ifndef __ASSEMBLY__ |
108 | 122 | ||
109 | #define pud_none(pud) (!pud_val(pud)) | 123 | #define pud_none(pud) (!pud_val(pud)) |
110 | #define pud_bad(pud) (!(pud_val(pud) & 2)) | 124 | #define pud_bad(pud) (!(pud_val(pud) & 2)) |
111 | #define pud_present(pud) (pud_val(pud)) | 125 | #define pud_present(pud) (pud_val(pud)) |
126 | #define pmd_table(pmd) ((pmd_val(pmd) & PMD_TYPE_MASK) == \ | ||
127 | PMD_TYPE_TABLE) | ||
128 | #define pmd_sect(pmd) ((pmd_val(pmd) & PMD_TYPE_MASK) == \ | ||
129 | PMD_TYPE_SECT) | ||
112 | 130 | ||
113 | #define pud_clear(pudp) \ | 131 | #define pud_clear(pudp) \ |
114 | do { \ | 132 | do { \ |
diff --git a/arch/arm/include/asm/pgtable.h b/arch/arm/include/asm/pgtable.h index 9c82f988c0e3..f30ac3b55ba9 100644 --- a/arch/arm/include/asm/pgtable.h +++ b/arch/arm/include/asm/pgtable.h | |||
@@ -70,6 +70,9 @@ extern void __pgd_error(const char *file, int line, pgd_t); | |||
70 | 70 | ||
71 | extern pgprot_t pgprot_user; | 71 | extern pgprot_t pgprot_user; |
72 | extern pgprot_t pgprot_kernel; | 72 | extern pgprot_t pgprot_kernel; |
73 | extern pgprot_t pgprot_hyp_device; | ||
74 | extern pgprot_t pgprot_s2; | ||
75 | extern pgprot_t pgprot_s2_device; | ||
73 | 76 | ||
74 | #define _MOD_PROT(p, b) __pgprot(pgprot_val(p) | (b)) | 77 | #define _MOD_PROT(p, b) __pgprot(pgprot_val(p) | (b)) |
75 | 78 | ||
@@ -82,6 +85,10 @@ extern pgprot_t pgprot_kernel; | |||
82 | #define PAGE_READONLY_EXEC _MOD_PROT(pgprot_user, L_PTE_USER | L_PTE_RDONLY) | 85 | #define PAGE_READONLY_EXEC _MOD_PROT(pgprot_user, L_PTE_USER | L_PTE_RDONLY) |
83 | #define PAGE_KERNEL _MOD_PROT(pgprot_kernel, L_PTE_XN) | 86 | #define PAGE_KERNEL _MOD_PROT(pgprot_kernel, L_PTE_XN) |
84 | #define PAGE_KERNEL_EXEC pgprot_kernel | 87 | #define PAGE_KERNEL_EXEC pgprot_kernel |
88 | #define PAGE_HYP _MOD_PROT(pgprot_kernel, L_PTE_HYP) | ||
89 | #define PAGE_HYP_DEVICE _MOD_PROT(pgprot_hyp_device, L_PTE_HYP) | ||
90 | #define PAGE_S2 _MOD_PROT(pgprot_s2, L_PTE_S2_RDONLY) | ||
91 | #define PAGE_S2_DEVICE _MOD_PROT(pgprot_s2_device, L_PTE_USER | L_PTE_S2_RDONLY) | ||
85 | 92 | ||
86 | #define __PAGE_NONE __pgprot(_L_PTE_DEFAULT | L_PTE_RDONLY | L_PTE_XN | L_PTE_NONE) | 93 | #define __PAGE_NONE __pgprot(_L_PTE_DEFAULT | L_PTE_RDONLY | L_PTE_XN | L_PTE_NONE) |
87 | #define __PAGE_SHARED __pgprot(_L_PTE_DEFAULT | L_PTE_USER | L_PTE_XN) | 94 | #define __PAGE_SHARED __pgprot(_L_PTE_DEFAULT | L_PTE_USER | L_PTE_XN) |
diff --git a/arch/arm/include/asm/psci.h b/arch/arm/include/asm/psci.h new file mode 100644 index 000000000000..ce0dbe7c1625 --- /dev/null +++ b/arch/arm/include/asm/psci.h | |||
@@ -0,0 +1,36 @@ | |||
1 | /* | ||
2 | * This program is free software; you can redistribute it and/or modify | ||
3 | * it under the terms of the GNU General Public License version 2 as | ||
4 | * published by the Free Software Foundation. | ||
5 | * | ||
6 | * This program is distributed in the hope that it will be useful, | ||
7 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
8 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
9 | * GNU General Public License for more details. | ||
10 | * | ||
11 | * Copyright (C) 2012 ARM Limited | ||
12 | */ | ||
13 | |||
14 | #ifndef __ASM_ARM_PSCI_H | ||
15 | #define __ASM_ARM_PSCI_H | ||
16 | |||
17 | #define PSCI_POWER_STATE_TYPE_STANDBY 0 | ||
18 | #define PSCI_POWER_STATE_TYPE_POWER_DOWN 1 | ||
19 | |||
20 | struct psci_power_state { | ||
21 | u16 id; | ||
22 | u8 type; | ||
23 | u8 affinity_level; | ||
24 | }; | ||
25 | |||
26 | struct psci_operations { | ||
27 | int (*cpu_suspend)(struct psci_power_state state, | ||
28 | unsigned long entry_point); | ||
29 | int (*cpu_off)(struct psci_power_state state); | ||
30 | int (*cpu_on)(unsigned long cpuid, unsigned long entry_point); | ||
31 | int (*migrate)(unsigned long cpuid); | ||
32 | }; | ||
33 | |||
34 | extern struct psci_operations psci_ops; | ||
35 | |||
36 | #endif /* __ASM_ARM_PSCI_H */ | ||
diff --git a/arch/arm/include/asm/spinlock.h b/arch/arm/include/asm/spinlock.h index b4ca707d0a69..6220e9fdf4c7 100644 --- a/arch/arm/include/asm/spinlock.h +++ b/arch/arm/include/asm/spinlock.h | |||
@@ -119,22 +119,8 @@ static inline int arch_spin_trylock(arch_spinlock_t *lock) | |||
119 | 119 | ||
120 | static inline void arch_spin_unlock(arch_spinlock_t *lock) | 120 | static inline void arch_spin_unlock(arch_spinlock_t *lock) |
121 | { | 121 | { |
122 | unsigned long tmp; | ||
123 | u32 slock; | ||
124 | |||
125 | smp_mb(); | 122 | smp_mb(); |
126 | 123 | lock->tickets.owner++; | |
127 | __asm__ __volatile__( | ||
128 | " mov %1, #1\n" | ||
129 | "1: ldrex %0, [%2]\n" | ||
130 | " uadd16 %0, %0, %1\n" | ||
131 | " strex %1, %0, [%2]\n" | ||
132 | " teq %1, #0\n" | ||
133 | " bne 1b" | ||
134 | : "=&r" (slock), "=&r" (tmp) | ||
135 | : "r" (&lock->slock) | ||
136 | : "cc"); | ||
137 | |||
138 | dsb_sev(); | 124 | dsb_sev(); |
139 | } | 125 | } |
140 | 126 | ||
diff --git a/arch/arm/include/asm/virt.h b/arch/arm/include/asm/virt.h index 86164df86cb4..50af92bac737 100644 --- a/arch/arm/include/asm/virt.h +++ b/arch/arm/include/asm/virt.h | |||
@@ -24,9 +24,9 @@ | |||
24 | /* | 24 | /* |
25 | * Flag indicating that the kernel was not entered in the same mode on every | 25 | * Flag indicating that the kernel was not entered in the same mode on every |
26 | * CPU. The zImage loader stashes this value in an SPSR, so we need an | 26 | * CPU. The zImage loader stashes this value in an SPSR, so we need an |
27 | * architecturally defined flag bit here (the N flag, as it happens) | 27 | * architecturally defined flag bit here. |
28 | */ | 28 | */ |
29 | #define BOOT_CPU_MODE_MISMATCH (1<<31) | 29 | #define BOOT_CPU_MODE_MISMATCH PSR_N_BIT |
30 | 30 | ||
31 | #ifndef __ASSEMBLY__ | 31 | #ifndef __ASSEMBLY__ |
32 | 32 | ||
diff --git a/arch/arm/include/uapi/asm/kvm.h b/arch/arm/include/uapi/asm/kvm.h new file mode 100644 index 000000000000..3303ff5adbf3 --- /dev/null +++ b/arch/arm/include/uapi/asm/kvm.h | |||
@@ -0,0 +1,164 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2012 - Virtual Open Systems and Columbia University | ||
3 | * Author: Christoffer Dall <c.dall@virtualopensystems.com> | ||
4 | * | ||
5 | * This program is free software; you can redistribute it and/or modify | ||
6 | * it under the terms of the GNU General Public License, version 2, as | ||
7 | * published by the Free Software Foundation. | ||
8 | * | ||
9 | * This program is distributed in the hope that it will be useful, | ||
10 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
11 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
12 | * GNU General Public License for more details. | ||
13 | * | ||
14 | * You should have received a copy of the GNU General Public License | ||
15 | * along with this program; if not, write to the Free Software | ||
16 | * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. | ||
17 | */ | ||
18 | |||
19 | #ifndef __ARM_KVM_H__ | ||
20 | #define __ARM_KVM_H__ | ||
21 | |||
22 | #include <linux/types.h> | ||
23 | #include <asm/ptrace.h> | ||
24 | |||
25 | #define __KVM_HAVE_GUEST_DEBUG | ||
26 | #define __KVM_HAVE_IRQ_LINE | ||
27 | |||
28 | #define KVM_REG_SIZE(id) \ | ||
29 | (1U << (((id) & KVM_REG_SIZE_MASK) >> KVM_REG_SIZE_SHIFT)) | ||
30 | |||
31 | /* Valid for svc_regs, abt_regs, und_regs, irq_regs in struct kvm_regs */ | ||
32 | #define KVM_ARM_SVC_sp svc_regs[0] | ||
33 | #define KVM_ARM_SVC_lr svc_regs[1] | ||
34 | #define KVM_ARM_SVC_spsr svc_regs[2] | ||
35 | #define KVM_ARM_ABT_sp abt_regs[0] | ||
36 | #define KVM_ARM_ABT_lr abt_regs[1] | ||
37 | #define KVM_ARM_ABT_spsr abt_regs[2] | ||
38 | #define KVM_ARM_UND_sp und_regs[0] | ||
39 | #define KVM_ARM_UND_lr und_regs[1] | ||
40 | #define KVM_ARM_UND_spsr und_regs[2] | ||
41 | #define KVM_ARM_IRQ_sp irq_regs[0] | ||
42 | #define KVM_ARM_IRQ_lr irq_regs[1] | ||
43 | #define KVM_ARM_IRQ_spsr irq_regs[2] | ||
44 | |||
45 | /* Valid only for fiq_regs in struct kvm_regs */ | ||
46 | #define KVM_ARM_FIQ_r8 fiq_regs[0] | ||
47 | #define KVM_ARM_FIQ_r9 fiq_regs[1] | ||
48 | #define KVM_ARM_FIQ_r10 fiq_regs[2] | ||
49 | #define KVM_ARM_FIQ_fp fiq_regs[3] | ||
50 | #define KVM_ARM_FIQ_ip fiq_regs[4] | ||
51 | #define KVM_ARM_FIQ_sp fiq_regs[5] | ||
52 | #define KVM_ARM_FIQ_lr fiq_regs[6] | ||
53 | #define KVM_ARM_FIQ_spsr fiq_regs[7] | ||
54 | |||
55 | struct kvm_regs { | ||
56 | struct pt_regs usr_regs;/* R0_usr - R14_usr, PC, CPSR */ | ||
57 | __u32 svc_regs[3]; /* SP_svc, LR_svc, SPSR_svc */ | ||
58 | __u32 abt_regs[3]; /* SP_abt, LR_abt, SPSR_abt */ | ||
59 | __u32 und_regs[3]; /* SP_und, LR_und, SPSR_und */ | ||
60 | __u32 irq_regs[3]; /* SP_irq, LR_irq, SPSR_irq */ | ||
61 | __u32 fiq_regs[8]; /* R8_fiq - R14_fiq, SPSR_fiq */ | ||
62 | }; | ||
63 | |||
64 | /* Supported Processor Types */ | ||
65 | #define KVM_ARM_TARGET_CORTEX_A15 0 | ||
66 | #define KVM_ARM_NUM_TARGETS 1 | ||
67 | |||
68 | #define KVM_ARM_VCPU_POWER_OFF 0 /* CPU is started in OFF state */ | ||
69 | |||
70 | struct kvm_vcpu_init { | ||
71 | __u32 target; | ||
72 | __u32 features[7]; | ||
73 | }; | ||
74 | |||
75 | struct kvm_sregs { | ||
76 | }; | ||
77 | |||
78 | struct kvm_fpu { | ||
79 | }; | ||
80 | |||
81 | struct kvm_guest_debug_arch { | ||
82 | }; | ||
83 | |||
84 | struct kvm_debug_exit_arch { | ||
85 | }; | ||
86 | |||
87 | struct kvm_sync_regs { | ||
88 | }; | ||
89 | |||
90 | struct kvm_arch_memory_slot { | ||
91 | }; | ||
92 | |||
93 | /* If you need to interpret the index values, here is the key: */ | ||
94 | #define KVM_REG_ARM_COPROC_MASK 0x000000000FFF0000 | ||
95 | #define KVM_REG_ARM_COPROC_SHIFT 16 | ||
96 | #define KVM_REG_ARM_32_OPC2_MASK 0x0000000000000007 | ||
97 | #define KVM_REG_ARM_32_OPC2_SHIFT 0 | ||
98 | #define KVM_REG_ARM_OPC1_MASK 0x0000000000000078 | ||
99 | #define KVM_REG_ARM_OPC1_SHIFT 3 | ||
100 | #define KVM_REG_ARM_CRM_MASK 0x0000000000000780 | ||
101 | #define KVM_REG_ARM_CRM_SHIFT 7 | ||
102 | #define KVM_REG_ARM_32_CRN_MASK 0x0000000000007800 | ||
103 | #define KVM_REG_ARM_32_CRN_SHIFT 11 | ||
104 | |||
105 | /* Normal registers are mapped as coprocessor 16. */ | ||
106 | #define KVM_REG_ARM_CORE (0x0010 << KVM_REG_ARM_COPROC_SHIFT) | ||
107 | #define KVM_REG_ARM_CORE_REG(name) (offsetof(struct kvm_regs, name) / 4) | ||
108 | |||
109 | /* Some registers need more space to represent values. */ | ||
110 | #define KVM_REG_ARM_DEMUX (0x0011 << KVM_REG_ARM_COPROC_SHIFT) | ||
111 | #define KVM_REG_ARM_DEMUX_ID_MASK 0x000000000000FF00 | ||
112 | #define KVM_REG_ARM_DEMUX_ID_SHIFT 8 | ||
113 | #define KVM_REG_ARM_DEMUX_ID_CCSIDR (0x00 << KVM_REG_ARM_DEMUX_ID_SHIFT) | ||
114 | #define KVM_REG_ARM_DEMUX_VAL_MASK 0x00000000000000FF | ||
115 | #define KVM_REG_ARM_DEMUX_VAL_SHIFT 0 | ||
116 | |||
117 | /* VFP registers: we could overload CP10 like ARM does, but that's ugly. */ | ||
118 | #define KVM_REG_ARM_VFP (0x0012 << KVM_REG_ARM_COPROC_SHIFT) | ||
119 | #define KVM_REG_ARM_VFP_MASK 0x000000000000FFFF | ||
120 | #define KVM_REG_ARM_VFP_BASE_REG 0x0 | ||
121 | #define KVM_REG_ARM_VFP_FPSID 0x1000 | ||
122 | #define KVM_REG_ARM_VFP_FPSCR 0x1001 | ||
123 | #define KVM_REG_ARM_VFP_MVFR1 0x1006 | ||
124 | #define KVM_REG_ARM_VFP_MVFR0 0x1007 | ||
125 | #define KVM_REG_ARM_VFP_FPEXC 0x1008 | ||
126 | #define KVM_REG_ARM_VFP_FPINST 0x1009 | ||
127 | #define KVM_REG_ARM_VFP_FPINST2 0x100A | ||
128 | |||
129 | |||
130 | /* KVM_IRQ_LINE irq field index values */ | ||
131 | #define KVM_ARM_IRQ_TYPE_SHIFT 24 | ||
132 | #define KVM_ARM_IRQ_TYPE_MASK 0xff | ||
133 | #define KVM_ARM_IRQ_VCPU_SHIFT 16 | ||
134 | #define KVM_ARM_IRQ_VCPU_MASK 0xff | ||
135 | #define KVM_ARM_IRQ_NUM_SHIFT 0 | ||
136 | #define KVM_ARM_IRQ_NUM_MASK 0xffff | ||
137 | |||
138 | /* irq_type field */ | ||
139 | #define KVM_ARM_IRQ_TYPE_CPU 0 | ||
140 | #define KVM_ARM_IRQ_TYPE_SPI 1 | ||
141 | #define KVM_ARM_IRQ_TYPE_PPI 2 | ||
142 | |||
143 | /* out-of-kernel GIC cpu interrupt injection irq_number field */ | ||
144 | #define KVM_ARM_IRQ_CPU_IRQ 0 | ||
145 | #define KVM_ARM_IRQ_CPU_FIQ 1 | ||
146 | |||
147 | /* Highest supported SPI, from VGIC_NR_IRQS */ | ||
148 | #define KVM_ARM_IRQ_GIC_MAX 127 | ||
149 | |||
150 | /* PSCI interface */ | ||
151 | #define KVM_PSCI_FN_BASE 0x95c1ba5e | ||
152 | #define KVM_PSCI_FN(n) (KVM_PSCI_FN_BASE + (n)) | ||
153 | |||
154 | #define KVM_PSCI_FN_CPU_SUSPEND KVM_PSCI_FN(0) | ||
155 | #define KVM_PSCI_FN_CPU_OFF KVM_PSCI_FN(1) | ||
156 | #define KVM_PSCI_FN_CPU_ON KVM_PSCI_FN(2) | ||
157 | #define KVM_PSCI_FN_MIGRATE KVM_PSCI_FN(3) | ||
158 | |||
159 | #define KVM_PSCI_RET_SUCCESS 0 | ||
160 | #define KVM_PSCI_RET_NI ((unsigned long)-1) | ||
161 | #define KVM_PSCI_RET_INVAL ((unsigned long)-2) | ||
162 | #define KVM_PSCI_RET_DENIED ((unsigned long)-3) | ||
163 | |||
164 | #endif /* __ARM_KVM_H__ */ | ||
diff --git a/arch/arm/kernel/Makefile b/arch/arm/kernel/Makefile index 5bbec7b8183e..5f3338eacad2 100644 --- a/arch/arm/kernel/Makefile +++ b/arch/arm/kernel/Makefile | |||
@@ -82,5 +82,6 @@ obj-$(CONFIG_DEBUG_LL) += debug.o | |||
82 | obj-$(CONFIG_EARLY_PRINTK) += early_printk.o | 82 | obj-$(CONFIG_EARLY_PRINTK) += early_printk.o |
83 | 83 | ||
84 | obj-$(CONFIG_ARM_VIRT_EXT) += hyp-stub.o | 84 | obj-$(CONFIG_ARM_VIRT_EXT) += hyp-stub.o |
85 | obj-$(CONFIG_ARM_PSCI) += psci.o | ||
85 | 86 | ||
86 | extra-y := $(head-y) vmlinux.lds | 87 | extra-y := $(head-y) vmlinux.lds |
diff --git a/arch/arm/kernel/asm-offsets.c b/arch/arm/kernel/asm-offsets.c index c985b481192c..c8b3272dfed1 100644 --- a/arch/arm/kernel/asm-offsets.c +++ b/arch/arm/kernel/asm-offsets.c | |||
@@ -13,6 +13,9 @@ | |||
13 | #include <linux/sched.h> | 13 | #include <linux/sched.h> |
14 | #include <linux/mm.h> | 14 | #include <linux/mm.h> |
15 | #include <linux/dma-mapping.h> | 15 | #include <linux/dma-mapping.h> |
16 | #ifdef CONFIG_KVM_ARM_HOST | ||
17 | #include <linux/kvm_host.h> | ||
18 | #endif | ||
16 | #include <asm/cacheflush.h> | 19 | #include <asm/cacheflush.h> |
17 | #include <asm/glue-df.h> | 20 | #include <asm/glue-df.h> |
18 | #include <asm/glue-pf.h> | 21 | #include <asm/glue-pf.h> |
@@ -146,5 +149,27 @@ int main(void) | |||
146 | DEFINE(DMA_BIDIRECTIONAL, DMA_BIDIRECTIONAL); | 149 | DEFINE(DMA_BIDIRECTIONAL, DMA_BIDIRECTIONAL); |
147 | DEFINE(DMA_TO_DEVICE, DMA_TO_DEVICE); | 150 | DEFINE(DMA_TO_DEVICE, DMA_TO_DEVICE); |
148 | DEFINE(DMA_FROM_DEVICE, DMA_FROM_DEVICE); | 151 | DEFINE(DMA_FROM_DEVICE, DMA_FROM_DEVICE); |
152 | #ifdef CONFIG_KVM_ARM_HOST | ||
153 | DEFINE(VCPU_KVM, offsetof(struct kvm_vcpu, kvm)); | ||
154 | DEFINE(VCPU_MIDR, offsetof(struct kvm_vcpu, arch.midr)); | ||
155 | DEFINE(VCPU_CP15, offsetof(struct kvm_vcpu, arch.cp15)); | ||
156 | DEFINE(VCPU_VFP_GUEST, offsetof(struct kvm_vcpu, arch.vfp_guest)); | ||
157 | DEFINE(VCPU_VFP_HOST, offsetof(struct kvm_vcpu, arch.vfp_host)); | ||
158 | DEFINE(VCPU_REGS, offsetof(struct kvm_vcpu, arch.regs)); | ||
159 | DEFINE(VCPU_USR_REGS, offsetof(struct kvm_vcpu, arch.regs.usr_regs)); | ||
160 | DEFINE(VCPU_SVC_REGS, offsetof(struct kvm_vcpu, arch.regs.svc_regs)); | ||
161 | DEFINE(VCPU_ABT_REGS, offsetof(struct kvm_vcpu, arch.regs.abt_regs)); | ||
162 | DEFINE(VCPU_UND_REGS, offsetof(struct kvm_vcpu, arch.regs.und_regs)); | ||
163 | DEFINE(VCPU_IRQ_REGS, offsetof(struct kvm_vcpu, arch.regs.irq_regs)); | ||
164 | DEFINE(VCPU_FIQ_REGS, offsetof(struct kvm_vcpu, arch.regs.fiq_regs)); | ||
165 | DEFINE(VCPU_PC, offsetof(struct kvm_vcpu, arch.regs.usr_regs.ARM_pc)); | ||
166 | DEFINE(VCPU_CPSR, offsetof(struct kvm_vcpu, arch.regs.usr_regs.ARM_cpsr)); | ||
167 | DEFINE(VCPU_IRQ_LINES, offsetof(struct kvm_vcpu, arch.irq_lines)); | ||
168 | DEFINE(VCPU_HSR, offsetof(struct kvm_vcpu, arch.hsr)); | ||
169 | DEFINE(VCPU_HxFAR, offsetof(struct kvm_vcpu, arch.hxfar)); | ||
170 | DEFINE(VCPU_HPFAR, offsetof(struct kvm_vcpu, arch.hpfar)); | ||
171 | DEFINE(VCPU_HYP_PC, offsetof(struct kvm_vcpu, arch.hyp_pc)); | ||
172 | DEFINE(KVM_VTTBR, offsetof(struct kvm, arch.vttbr)); | ||
173 | #endif | ||
149 | return 0; | 174 | return 0; |
150 | } | 175 | } |
diff --git a/arch/arm/kernel/bios32.c b/arch/arm/kernel/bios32.c index 379cf3292390..a1f73b502ef0 100644 --- a/arch/arm/kernel/bios32.c +++ b/arch/arm/kernel/bios32.c | |||
@@ -413,7 +413,7 @@ static int pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) | |||
413 | return irq; | 413 | return irq; |
414 | } | 414 | } |
415 | 415 | ||
416 | static int __init pcibios_init_resources(int busnr, struct pci_sys_data *sys) | 416 | static int pcibios_init_resources(int busnr, struct pci_sys_data *sys) |
417 | { | 417 | { |
418 | int ret; | 418 | int ret; |
419 | struct pci_host_bridge_window *window; | 419 | struct pci_host_bridge_window *window; |
@@ -445,7 +445,7 @@ static int __init pcibios_init_resources(int busnr, struct pci_sys_data *sys) | |||
445 | return 0; | 445 | return 0; |
446 | } | 446 | } |
447 | 447 | ||
448 | static void __init pcibios_init_hw(struct hw_pci *hw, struct list_head *head) | 448 | static void pcibios_init_hw(struct hw_pci *hw, struct list_head *head) |
449 | { | 449 | { |
450 | struct pci_sys_data *sys = NULL; | 450 | struct pci_sys_data *sys = NULL; |
451 | int ret; | 451 | int ret; |
@@ -464,6 +464,9 @@ static void __init pcibios_init_hw(struct hw_pci *hw, struct list_head *head) | |||
464 | sys->map_irq = hw->map_irq; | 464 | sys->map_irq = hw->map_irq; |
465 | INIT_LIST_HEAD(&sys->resources); | 465 | INIT_LIST_HEAD(&sys->resources); |
466 | 466 | ||
467 | if (hw->private_data) | ||
468 | sys->private_data = hw->private_data[nr]; | ||
469 | |||
467 | ret = hw->setup(nr, sys); | 470 | ret = hw->setup(nr, sys); |
468 | 471 | ||
469 | if (ret > 0) { | 472 | if (ret > 0) { |
@@ -493,7 +496,7 @@ static void __init pcibios_init_hw(struct hw_pci *hw, struct list_head *head) | |||
493 | } | 496 | } |
494 | } | 497 | } |
495 | 498 | ||
496 | void __init pci_common_init(struct hw_pci *hw) | 499 | void pci_common_init(struct hw_pci *hw) |
497 | { | 500 | { |
498 | struct pci_sys_data *sys; | 501 | struct pci_sys_data *sys; |
499 | LIST_HEAD(head); | 502 | LIST_HEAD(head); |
diff --git a/arch/arm/kernel/hw_breakpoint.c b/arch/arm/kernel/hw_breakpoint.c index 5ff2e77782b1..5eae53e7a2e1 100644 --- a/arch/arm/kernel/hw_breakpoint.c +++ b/arch/arm/kernel/hw_breakpoint.c | |||
@@ -28,6 +28,7 @@ | |||
28 | #include <linux/perf_event.h> | 28 | #include <linux/perf_event.h> |
29 | #include <linux/hw_breakpoint.h> | 29 | #include <linux/hw_breakpoint.h> |
30 | #include <linux/smp.h> | 30 | #include <linux/smp.h> |
31 | #include <linux/cpu_pm.h> | ||
31 | 32 | ||
32 | #include <asm/cacheflush.h> | 33 | #include <asm/cacheflush.h> |
33 | #include <asm/cputype.h> | 34 | #include <asm/cputype.h> |
@@ -35,6 +36,7 @@ | |||
35 | #include <asm/hw_breakpoint.h> | 36 | #include <asm/hw_breakpoint.h> |
36 | #include <asm/kdebug.h> | 37 | #include <asm/kdebug.h> |
37 | #include <asm/traps.h> | 38 | #include <asm/traps.h> |
39 | #include <asm/hardware/coresight.h> | ||
38 | 40 | ||
39 | /* Breakpoint currently in use for each BRP. */ | 41 | /* Breakpoint currently in use for each BRP. */ |
40 | static DEFINE_PER_CPU(struct perf_event *, bp_on_reg[ARM_MAX_BRP]); | 42 | static DEFINE_PER_CPU(struct perf_event *, bp_on_reg[ARM_MAX_BRP]); |
@@ -49,6 +51,9 @@ static int core_num_wrps; | |||
49 | /* Debug architecture version. */ | 51 | /* Debug architecture version. */ |
50 | static u8 debug_arch; | 52 | static u8 debug_arch; |
51 | 53 | ||
54 | /* Does debug architecture support OS Save and Restore? */ | ||
55 | static bool has_ossr; | ||
56 | |||
52 | /* Maximum supported watchpoint length. */ | 57 | /* Maximum supported watchpoint length. */ |
53 | static u8 max_watchpoint_len; | 58 | static u8 max_watchpoint_len; |
54 | 59 | ||
@@ -903,6 +908,23 @@ static struct undef_hook debug_reg_hook = { | |||
903 | .fn = debug_reg_trap, | 908 | .fn = debug_reg_trap, |
904 | }; | 909 | }; |
905 | 910 | ||
911 | /* Does this core support OS Save and Restore? */ | ||
912 | static bool core_has_os_save_restore(void) | ||
913 | { | ||
914 | u32 oslsr; | ||
915 | |||
916 | switch (get_debug_arch()) { | ||
917 | case ARM_DEBUG_ARCH_V7_1: | ||
918 | return true; | ||
919 | case ARM_DEBUG_ARCH_V7_ECP14: | ||
920 | ARM_DBG_READ(c1, c1, 4, oslsr); | ||
921 | if (oslsr & ARM_OSLSR_OSLM0) | ||
922 | return true; | ||
923 | default: | ||
924 | return false; | ||
925 | } | ||
926 | } | ||
927 | |||
906 | static void reset_ctrl_regs(void *unused) | 928 | static void reset_ctrl_regs(void *unused) |
907 | { | 929 | { |
908 | int i, raw_num_brps, err = 0, cpu = smp_processor_id(); | 930 | int i, raw_num_brps, err = 0, cpu = smp_processor_id(); |
@@ -930,11 +952,7 @@ static void reset_ctrl_regs(void *unused) | |||
930 | if ((val & 0x1) == 0) | 952 | if ((val & 0x1) == 0) |
931 | err = -EPERM; | 953 | err = -EPERM; |
932 | 954 | ||
933 | /* | 955 | if (!has_ossr) |
934 | * Check whether we implement OS save and restore. | ||
935 | */ | ||
936 | ARM_DBG_READ(c1, c1, 4, val); | ||
937 | if ((val & 0x9) == 0) | ||
938 | goto clear_vcr; | 956 | goto clear_vcr; |
939 | break; | 957 | break; |
940 | case ARM_DEBUG_ARCH_V7_1: | 958 | case ARM_DEBUG_ARCH_V7_1: |
@@ -955,9 +973,9 @@ static void reset_ctrl_regs(void *unused) | |||
955 | 973 | ||
956 | /* | 974 | /* |
957 | * Unconditionally clear the OS lock by writing a value | 975 | * Unconditionally clear the OS lock by writing a value |
958 | * other than 0xC5ACCE55 to the access register. | 976 | * other than CS_LAR_KEY to the access register. |
959 | */ | 977 | */ |
960 | ARM_DBG_WRITE(c1, c0, 4, 0); | 978 | ARM_DBG_WRITE(c1, c0, 4, ~CS_LAR_KEY); |
961 | isb(); | 979 | isb(); |
962 | 980 | ||
963 | /* | 981 | /* |
@@ -1015,6 +1033,30 @@ static struct notifier_block __cpuinitdata dbg_reset_nb = { | |||
1015 | .notifier_call = dbg_reset_notify, | 1033 | .notifier_call = dbg_reset_notify, |
1016 | }; | 1034 | }; |
1017 | 1035 | ||
1036 | #ifdef CONFIG_CPU_PM | ||
1037 | static int dbg_cpu_pm_notify(struct notifier_block *self, unsigned long action, | ||
1038 | void *v) | ||
1039 | { | ||
1040 | if (action == CPU_PM_EXIT) | ||
1041 | reset_ctrl_regs(NULL); | ||
1042 | |||
1043 | return NOTIFY_OK; | ||
1044 | } | ||
1045 | |||
1046 | static struct notifier_block __cpuinitdata dbg_cpu_pm_nb = { | ||
1047 | .notifier_call = dbg_cpu_pm_notify, | ||
1048 | }; | ||
1049 | |||
1050 | static void __init pm_init(void) | ||
1051 | { | ||
1052 | cpu_pm_register_notifier(&dbg_cpu_pm_nb); | ||
1053 | } | ||
1054 | #else | ||
1055 | static inline void pm_init(void) | ||
1056 | { | ||
1057 | } | ||
1058 | #endif | ||
1059 | |||
1018 | static int __init arch_hw_breakpoint_init(void) | 1060 | static int __init arch_hw_breakpoint_init(void) |
1019 | { | 1061 | { |
1020 | debug_arch = get_debug_arch(); | 1062 | debug_arch = get_debug_arch(); |
@@ -1024,6 +1066,8 @@ static int __init arch_hw_breakpoint_init(void) | |||
1024 | return 0; | 1066 | return 0; |
1025 | } | 1067 | } |
1026 | 1068 | ||
1069 | has_ossr = core_has_os_save_restore(); | ||
1070 | |||
1027 | /* Determine how many BRPs/WRPs are available. */ | 1071 | /* Determine how many BRPs/WRPs are available. */ |
1028 | core_num_brps = get_num_brps(); | 1072 | core_num_brps = get_num_brps(); |
1029 | core_num_wrps = get_num_wrps(); | 1073 | core_num_wrps = get_num_wrps(); |
@@ -1062,8 +1106,9 @@ static int __init arch_hw_breakpoint_init(void) | |||
1062 | hook_ifault_code(FAULT_CODE_DEBUG, hw_breakpoint_pending, SIGTRAP, | 1106 | hook_ifault_code(FAULT_CODE_DEBUG, hw_breakpoint_pending, SIGTRAP, |
1063 | TRAP_HWBKPT, "breakpoint debug exception"); | 1107 | TRAP_HWBKPT, "breakpoint debug exception"); |
1064 | 1108 | ||
1065 | /* Register hotplug notifier. */ | 1109 | /* Register hotplug and PM notifiers. */ |
1066 | register_cpu_notifier(&dbg_reset_nb); | 1110 | register_cpu_notifier(&dbg_reset_nb); |
1111 | pm_init(); | ||
1067 | return 0; | 1112 | return 0; |
1068 | } | 1113 | } |
1069 | arch_initcall(arch_hw_breakpoint_init); | 1114 | arch_initcall(arch_hw_breakpoint_init); |
diff --git a/arch/arm/kernel/perf_event.c b/arch/arm/kernel/perf_event.c index f9e8657dd241..31e0eb353cd8 100644 --- a/arch/arm/kernel/perf_event.c +++ b/arch/arm/kernel/perf_event.c | |||
@@ -149,12 +149,6 @@ again: | |||
149 | static void | 149 | static void |
150 | armpmu_read(struct perf_event *event) | 150 | armpmu_read(struct perf_event *event) |
151 | { | 151 | { |
152 | struct hw_perf_event *hwc = &event->hw; | ||
153 | |||
154 | /* Don't read disabled counters! */ | ||
155 | if (hwc->idx < 0) | ||
156 | return; | ||
157 | |||
158 | armpmu_event_update(event); | 152 | armpmu_event_update(event); |
159 | } | 153 | } |
160 | 154 | ||
@@ -207,8 +201,6 @@ armpmu_del(struct perf_event *event, int flags) | |||
207 | struct hw_perf_event *hwc = &event->hw; | 201 | struct hw_perf_event *hwc = &event->hw; |
208 | int idx = hwc->idx; | 202 | int idx = hwc->idx; |
209 | 203 | ||
210 | WARN_ON(idx < 0); | ||
211 | |||
212 | armpmu_stop(event, PERF_EF_UPDATE); | 204 | armpmu_stop(event, PERF_EF_UPDATE); |
213 | hw_events->events[idx] = NULL; | 205 | hw_events->events[idx] = NULL; |
214 | clear_bit(idx, hw_events->used_mask); | 206 | clear_bit(idx, hw_events->used_mask); |
@@ -358,7 +350,7 @@ __hw_perf_event_init(struct perf_event *event) | |||
358 | { | 350 | { |
359 | struct arm_pmu *armpmu = to_arm_pmu(event->pmu); | 351 | struct arm_pmu *armpmu = to_arm_pmu(event->pmu); |
360 | struct hw_perf_event *hwc = &event->hw; | 352 | struct hw_perf_event *hwc = &event->hw; |
361 | int mapping, err; | 353 | int mapping; |
362 | 354 | ||
363 | mapping = armpmu->map_event(event); | 355 | mapping = armpmu->map_event(event); |
364 | 356 | ||
@@ -407,14 +399,12 @@ __hw_perf_event_init(struct perf_event *event) | |||
407 | local64_set(&hwc->period_left, hwc->sample_period); | 399 | local64_set(&hwc->period_left, hwc->sample_period); |
408 | } | 400 | } |
409 | 401 | ||
410 | err = 0; | ||
411 | if (event->group_leader != event) { | 402 | if (event->group_leader != event) { |
412 | err = validate_group(event); | 403 | if (validate_group(event) != 0); |
413 | if (err) | ||
414 | return -EINVAL; | 404 | return -EINVAL; |
415 | } | 405 | } |
416 | 406 | ||
417 | return err; | 407 | return 0; |
418 | } | 408 | } |
419 | 409 | ||
420 | static int armpmu_event_init(struct perf_event *event) | 410 | static int armpmu_event_init(struct perf_event *event) |
diff --git a/arch/arm/kernel/perf_event_cpu.c b/arch/arm/kernel/perf_event_cpu.c index 5f6620684e25..1f2740e3dbc0 100644 --- a/arch/arm/kernel/perf_event_cpu.c +++ b/arch/arm/kernel/perf_event_cpu.c | |||
@@ -147,7 +147,7 @@ static void cpu_pmu_init(struct arm_pmu *cpu_pmu) | |||
147 | cpu_pmu->free_irq = cpu_pmu_free_irq; | 147 | cpu_pmu->free_irq = cpu_pmu_free_irq; |
148 | 148 | ||
149 | /* Ensure the PMU has sane values out of reset. */ | 149 | /* Ensure the PMU has sane values out of reset. */ |
150 | if (cpu_pmu && cpu_pmu->reset) | 150 | if (cpu_pmu->reset) |
151 | on_each_cpu(cpu_pmu->reset, cpu_pmu, 1); | 151 | on_each_cpu(cpu_pmu->reset, cpu_pmu, 1); |
152 | } | 152 | } |
153 | 153 | ||
@@ -201,48 +201,46 @@ static struct platform_device_id cpu_pmu_plat_device_ids[] = { | |||
201 | static int probe_current_pmu(struct arm_pmu *pmu) | 201 | static int probe_current_pmu(struct arm_pmu *pmu) |
202 | { | 202 | { |
203 | int cpu = get_cpu(); | 203 | int cpu = get_cpu(); |
204 | unsigned long cpuid = read_cpuid_id(); | 204 | unsigned long implementor = read_cpuid_implementor(); |
205 | unsigned long implementor = (cpuid & 0xFF000000) >> 24; | 205 | unsigned long part_number = read_cpuid_part_number(); |
206 | unsigned long part_number = (cpuid & 0xFFF0); | ||
207 | int ret = -ENODEV; | 206 | int ret = -ENODEV; |
208 | 207 | ||
209 | pr_info("probing PMU on CPU %d\n", cpu); | 208 | pr_info("probing PMU on CPU %d\n", cpu); |
210 | 209 | ||
211 | /* ARM Ltd CPUs. */ | 210 | /* ARM Ltd CPUs. */ |
212 | if (0x41 == implementor) { | 211 | if (implementor == ARM_CPU_IMP_ARM) { |
213 | switch (part_number) { | 212 | switch (part_number) { |
214 | case 0xB360: /* ARM1136 */ | 213 | case ARM_CPU_PART_ARM1136: |
215 | case 0xB560: /* ARM1156 */ | 214 | case ARM_CPU_PART_ARM1156: |
216 | case 0xB760: /* ARM1176 */ | 215 | case ARM_CPU_PART_ARM1176: |
217 | ret = armv6pmu_init(pmu); | 216 | ret = armv6pmu_init(pmu); |
218 | break; | 217 | break; |
219 | case 0xB020: /* ARM11mpcore */ | 218 | case ARM_CPU_PART_ARM11MPCORE: |
220 | ret = armv6mpcore_pmu_init(pmu); | 219 | ret = armv6mpcore_pmu_init(pmu); |
221 | break; | 220 | break; |
222 | case 0xC080: /* Cortex-A8 */ | 221 | case ARM_CPU_PART_CORTEX_A8: |
223 | ret = armv7_a8_pmu_init(pmu); | 222 | ret = armv7_a8_pmu_init(pmu); |
224 | break; | 223 | break; |
225 | case 0xC090: /* Cortex-A9 */ | 224 | case ARM_CPU_PART_CORTEX_A9: |
226 | ret = armv7_a9_pmu_init(pmu); | 225 | ret = armv7_a9_pmu_init(pmu); |
227 | break; | 226 | break; |
228 | case 0xC050: /* Cortex-A5 */ | 227 | case ARM_CPU_PART_CORTEX_A5: |
229 | ret = armv7_a5_pmu_init(pmu); | 228 | ret = armv7_a5_pmu_init(pmu); |
230 | break; | 229 | break; |
231 | case 0xC0F0: /* Cortex-A15 */ | 230 | case ARM_CPU_PART_CORTEX_A15: |
232 | ret = armv7_a15_pmu_init(pmu); | 231 | ret = armv7_a15_pmu_init(pmu); |
233 | break; | 232 | break; |
234 | case 0xC070: /* Cortex-A7 */ | 233 | case ARM_CPU_PART_CORTEX_A7: |
235 | ret = armv7_a7_pmu_init(pmu); | 234 | ret = armv7_a7_pmu_init(pmu); |
236 | break; | 235 | break; |
237 | } | 236 | } |
238 | /* Intel CPUs [xscale]. */ | 237 | /* Intel CPUs [xscale]. */ |
239 | } else if (0x69 == implementor) { | 238 | } else if (implementor == ARM_CPU_IMP_INTEL) { |
240 | part_number = (cpuid >> 13) & 0x7; | 239 | switch (xscale_cpu_arch_version()) { |
241 | switch (part_number) { | 240 | case ARM_CPU_XSCALE_ARCH_V1: |
242 | case 1: | ||
243 | ret = xscale1pmu_init(pmu); | 241 | ret = xscale1pmu_init(pmu); |
244 | break; | 242 | break; |
245 | case 2: | 243 | case ARM_CPU_XSCALE_ARCH_V2: |
246 | ret = xscale2pmu_init(pmu); | 244 | ret = xscale2pmu_init(pmu); |
247 | break; | 245 | break; |
248 | } | 246 | } |
@@ -279,17 +277,22 @@ static int cpu_pmu_device_probe(struct platform_device *pdev) | |||
279 | } | 277 | } |
280 | 278 | ||
281 | if (ret) { | 279 | if (ret) { |
282 | pr_info("failed to register PMU devices!"); | 280 | pr_info("failed to probe PMU!"); |
283 | kfree(pmu); | 281 | goto out_free; |
284 | return ret; | ||
285 | } | 282 | } |
286 | 283 | ||
287 | cpu_pmu = pmu; | 284 | cpu_pmu = pmu; |
288 | cpu_pmu->plat_device = pdev; | 285 | cpu_pmu->plat_device = pdev; |
289 | cpu_pmu_init(cpu_pmu); | 286 | cpu_pmu_init(cpu_pmu); |
290 | armpmu_register(cpu_pmu, PERF_TYPE_RAW); | 287 | ret = armpmu_register(cpu_pmu, PERF_TYPE_RAW); |
291 | 288 | ||
292 | return 0; | 289 | if (!ret) |
290 | return 0; | ||
291 | |||
292 | out_free: | ||
293 | pr_info("failed to register PMU devices!"); | ||
294 | kfree(pmu); | ||
295 | return ret; | ||
293 | } | 296 | } |
294 | 297 | ||
295 | static struct platform_driver cpu_pmu_driver = { | 298 | static struct platform_driver cpu_pmu_driver = { |
diff --git a/arch/arm/kernel/perf_event_v6.c b/arch/arm/kernel/perf_event_v6.c index 041d0526a288..03664b0e8fa4 100644 --- a/arch/arm/kernel/perf_event_v6.c +++ b/arch/arm/kernel/perf_event_v6.c | |||
@@ -106,7 +106,7 @@ static const unsigned armv6_perf_cache_map[PERF_COUNT_HW_CACHE_MAX] | |||
106 | }, | 106 | }, |
107 | [C(OP_WRITE)] = { | 107 | [C(OP_WRITE)] = { |
108 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, | 108 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, |
109 | [C(RESULT_MISS)] = ARMV6_PERFCTR_ICACHE_MISS, | 109 | [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, |
110 | }, | 110 | }, |
111 | [C(OP_PREFETCH)] = { | 111 | [C(OP_PREFETCH)] = { |
112 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, | 112 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, |
@@ -259,7 +259,7 @@ static const unsigned armv6mpcore_perf_cache_map[PERF_COUNT_HW_CACHE_MAX] | |||
259 | }, | 259 | }, |
260 | [C(OP_WRITE)] = { | 260 | [C(OP_WRITE)] = { |
261 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, | 261 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, |
262 | [C(RESULT_MISS)] = ARMV6MPCORE_PERFCTR_ICACHE_MISS, | 262 | [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, |
263 | }, | 263 | }, |
264 | [C(OP_PREFETCH)] = { | 264 | [C(OP_PREFETCH)] = { |
265 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, | 265 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, |
diff --git a/arch/arm/kernel/perf_event_v7.c b/arch/arm/kernel/perf_event_v7.c index 4fbc757d9cff..8c79a9e70b83 100644 --- a/arch/arm/kernel/perf_event_v7.c +++ b/arch/arm/kernel/perf_event_v7.c | |||
@@ -157,8 +157,8 @@ static const unsigned armv7_a8_perf_cache_map[PERF_COUNT_HW_CACHE_MAX] | |||
157 | [C(RESULT_MISS)] = ARMV7_PERFCTR_L1_ICACHE_REFILL, | 157 | [C(RESULT_MISS)] = ARMV7_PERFCTR_L1_ICACHE_REFILL, |
158 | }, | 158 | }, |
159 | [C(OP_WRITE)] = { | 159 | [C(OP_WRITE)] = { |
160 | [C(RESULT_ACCESS)] = ARMV7_A8_PERFCTR_L1_ICACHE_ACCESS, | 160 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, |
161 | [C(RESULT_MISS)] = ARMV7_PERFCTR_L1_ICACHE_REFILL, | 161 | [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, |
162 | }, | 162 | }, |
163 | [C(OP_PREFETCH)] = { | 163 | [C(OP_PREFETCH)] = { |
164 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, | 164 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, |
@@ -282,7 +282,7 @@ static const unsigned armv7_a9_perf_cache_map[PERF_COUNT_HW_CACHE_MAX] | |||
282 | }, | 282 | }, |
283 | [C(OP_WRITE)] = { | 283 | [C(OP_WRITE)] = { |
284 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, | 284 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, |
285 | [C(RESULT_MISS)] = ARMV7_PERFCTR_L1_ICACHE_REFILL, | 285 | [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, |
286 | }, | 286 | }, |
287 | [C(OP_PREFETCH)] = { | 287 | [C(OP_PREFETCH)] = { |
288 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, | 288 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, |
@@ -399,8 +399,8 @@ static const unsigned armv7_a5_perf_cache_map[PERF_COUNT_HW_CACHE_MAX] | |||
399 | [C(RESULT_MISS)] = ARMV7_PERFCTR_L1_ICACHE_REFILL, | 399 | [C(RESULT_MISS)] = ARMV7_PERFCTR_L1_ICACHE_REFILL, |
400 | }, | 400 | }, |
401 | [C(OP_WRITE)] = { | 401 | [C(OP_WRITE)] = { |
402 | [C(RESULT_ACCESS)] = ARMV7_PERFCTR_L1_ICACHE_ACCESS, | 402 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, |
403 | [C(RESULT_MISS)] = ARMV7_PERFCTR_L1_ICACHE_REFILL, | 403 | [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, |
404 | }, | 404 | }, |
405 | /* | 405 | /* |
406 | * The prefetch counters don't differentiate between the I | 406 | * The prefetch counters don't differentiate between the I |
@@ -527,8 +527,8 @@ static const unsigned armv7_a15_perf_cache_map[PERF_COUNT_HW_CACHE_MAX] | |||
527 | [C(RESULT_MISS)] = ARMV7_PERFCTR_L1_ICACHE_REFILL, | 527 | [C(RESULT_MISS)] = ARMV7_PERFCTR_L1_ICACHE_REFILL, |
528 | }, | 528 | }, |
529 | [C(OP_WRITE)] = { | 529 | [C(OP_WRITE)] = { |
530 | [C(RESULT_ACCESS)] = ARMV7_PERFCTR_L1_ICACHE_ACCESS, | 530 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, |
531 | [C(RESULT_MISS)] = ARMV7_PERFCTR_L1_ICACHE_REFILL, | 531 | [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, |
532 | }, | 532 | }, |
533 | [C(OP_PREFETCH)] = { | 533 | [C(OP_PREFETCH)] = { |
534 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, | 534 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, |
@@ -651,8 +651,8 @@ static const unsigned armv7_a7_perf_cache_map[PERF_COUNT_HW_CACHE_MAX] | |||
651 | [C(RESULT_MISS)] = ARMV7_PERFCTR_L1_ICACHE_REFILL, | 651 | [C(RESULT_MISS)] = ARMV7_PERFCTR_L1_ICACHE_REFILL, |
652 | }, | 652 | }, |
653 | [C(OP_WRITE)] = { | 653 | [C(OP_WRITE)] = { |
654 | [C(RESULT_ACCESS)] = ARMV7_PERFCTR_L1_ICACHE_ACCESS, | 654 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, |
655 | [C(RESULT_MISS)] = ARMV7_PERFCTR_L1_ICACHE_REFILL, | 655 | [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, |
656 | }, | 656 | }, |
657 | [C(OP_PREFETCH)] = { | 657 | [C(OP_PREFETCH)] = { |
658 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, | 658 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, |
diff --git a/arch/arm/kernel/perf_event_xscale.c b/arch/arm/kernel/perf_event_xscale.c index 2b0fe30ec12e..63990c42fac9 100644 --- a/arch/arm/kernel/perf_event_xscale.c +++ b/arch/arm/kernel/perf_event_xscale.c | |||
@@ -83,7 +83,7 @@ static const unsigned xscale_perf_cache_map[PERF_COUNT_HW_CACHE_MAX] | |||
83 | }, | 83 | }, |
84 | [C(OP_WRITE)] = { | 84 | [C(OP_WRITE)] = { |
85 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, | 85 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, |
86 | [C(RESULT_MISS)] = XSCALE_PERFCTR_ICACHE_MISS, | 86 | [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, |
87 | }, | 87 | }, |
88 | [C(OP_PREFETCH)] = { | 88 | [C(OP_PREFETCH)] = { |
89 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, | 89 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, |
diff --git a/arch/arm/kernel/process.c b/arch/arm/kernel/process.c index c6dec5fc20aa..047d3e40e470 100644 --- a/arch/arm/kernel/process.c +++ b/arch/arm/kernel/process.c | |||
@@ -172,14 +172,9 @@ static void default_idle(void) | |||
172 | local_irq_enable(); | 172 | local_irq_enable(); |
173 | } | 173 | } |
174 | 174 | ||
175 | void (*pm_idle)(void) = default_idle; | ||
176 | EXPORT_SYMBOL(pm_idle); | ||
177 | |||
178 | /* | 175 | /* |
179 | * The idle thread, has rather strange semantics for calling pm_idle, | 176 | * The idle thread. |
180 | * but this is what x86 does and we need to do the same, so that | 177 | * We always respect 'hlt_counter' to prevent low power idle. |
181 | * things like cpuidle get called in the same way. The only difference | ||
182 | * is that we always respect 'hlt_counter' to prevent low power idle. | ||
183 | */ | 178 | */ |
184 | void cpu_idle(void) | 179 | void cpu_idle(void) |
185 | { | 180 | { |
@@ -210,10 +205,10 @@ void cpu_idle(void) | |||
210 | } else if (!need_resched()) { | 205 | } else if (!need_resched()) { |
211 | stop_critical_timings(); | 206 | stop_critical_timings(); |
212 | if (cpuidle_idle_call()) | 207 | if (cpuidle_idle_call()) |
213 | pm_idle(); | 208 | default_idle(); |
214 | start_critical_timings(); | 209 | start_critical_timings(); |
215 | /* | 210 | /* |
216 | * pm_idle functions must always | 211 | * default_idle functions must always |
217 | * return with IRQs enabled. | 212 | * return with IRQs enabled. |
218 | */ | 213 | */ |
219 | WARN_ON(irqs_disabled()); | 214 | WARN_ON(irqs_disabled()); |
diff --git a/arch/arm/kernel/psci.c b/arch/arm/kernel/psci.c new file mode 100644 index 000000000000..36531643cc2c --- /dev/null +++ b/arch/arm/kernel/psci.c | |||
@@ -0,0 +1,211 @@ | |||
1 | /* | ||
2 | * This program is free software; you can redistribute it and/or modify | ||
3 | * it under the terms of the GNU General Public License version 2 as | ||
4 | * published by the Free Software Foundation. | ||
5 | * | ||
6 | * This program is distributed in the hope that it will be useful, | ||
7 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
8 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
9 | * GNU General Public License for more details. | ||
10 | * | ||
11 | * Copyright (C) 2012 ARM Limited | ||
12 | * | ||
13 | * Author: Will Deacon <will.deacon@arm.com> | ||
14 | */ | ||
15 | |||
16 | #define pr_fmt(fmt) "psci: " fmt | ||
17 | |||
18 | #include <linux/init.h> | ||
19 | #include <linux/of.h> | ||
20 | |||
21 | #include <asm/compiler.h> | ||
22 | #include <asm/errno.h> | ||
23 | #include <asm/opcodes-sec.h> | ||
24 | #include <asm/opcodes-virt.h> | ||
25 | #include <asm/psci.h> | ||
26 | |||
27 | struct psci_operations psci_ops; | ||
28 | |||
29 | static int (*invoke_psci_fn)(u32, u32, u32, u32); | ||
30 | |||
31 | enum psci_function { | ||
32 | PSCI_FN_CPU_SUSPEND, | ||
33 | PSCI_FN_CPU_ON, | ||
34 | PSCI_FN_CPU_OFF, | ||
35 | PSCI_FN_MIGRATE, | ||
36 | PSCI_FN_MAX, | ||
37 | }; | ||
38 | |||
39 | static u32 psci_function_id[PSCI_FN_MAX]; | ||
40 | |||
41 | #define PSCI_RET_SUCCESS 0 | ||
42 | #define PSCI_RET_EOPNOTSUPP -1 | ||
43 | #define PSCI_RET_EINVAL -2 | ||
44 | #define PSCI_RET_EPERM -3 | ||
45 | |||
46 | static int psci_to_linux_errno(int errno) | ||
47 | { | ||
48 | switch (errno) { | ||
49 | case PSCI_RET_SUCCESS: | ||
50 | return 0; | ||
51 | case PSCI_RET_EOPNOTSUPP: | ||
52 | return -EOPNOTSUPP; | ||
53 | case PSCI_RET_EINVAL: | ||
54 | return -EINVAL; | ||
55 | case PSCI_RET_EPERM: | ||
56 | return -EPERM; | ||
57 | }; | ||
58 | |||
59 | return -EINVAL; | ||
60 | } | ||
61 | |||
62 | #define PSCI_POWER_STATE_ID_MASK 0xffff | ||
63 | #define PSCI_POWER_STATE_ID_SHIFT 0 | ||
64 | #define PSCI_POWER_STATE_TYPE_MASK 0x1 | ||
65 | #define PSCI_POWER_STATE_TYPE_SHIFT 16 | ||
66 | #define PSCI_POWER_STATE_AFFL_MASK 0x3 | ||
67 | #define PSCI_POWER_STATE_AFFL_SHIFT 24 | ||
68 | |||
69 | static u32 psci_power_state_pack(struct psci_power_state state) | ||
70 | { | ||
71 | return ((state.id & PSCI_POWER_STATE_ID_MASK) | ||
72 | << PSCI_POWER_STATE_ID_SHIFT) | | ||
73 | ((state.type & PSCI_POWER_STATE_TYPE_MASK) | ||
74 | << PSCI_POWER_STATE_TYPE_SHIFT) | | ||
75 | ((state.affinity_level & PSCI_POWER_STATE_AFFL_MASK) | ||
76 | << PSCI_POWER_STATE_AFFL_SHIFT); | ||
77 | } | ||
78 | |||
79 | /* | ||
80 | * The following two functions are invoked via the invoke_psci_fn pointer | ||
81 | * and will not be inlined, allowing us to piggyback on the AAPCS. | ||
82 | */ | ||
83 | static noinline int __invoke_psci_fn_hvc(u32 function_id, u32 arg0, u32 arg1, | ||
84 | u32 arg2) | ||
85 | { | ||
86 | asm volatile( | ||
87 | __asmeq("%0", "r0") | ||
88 | __asmeq("%1", "r1") | ||
89 | __asmeq("%2", "r2") | ||
90 | __asmeq("%3", "r3") | ||
91 | __HVC(0) | ||
92 | : "+r" (function_id) | ||
93 | : "r" (arg0), "r" (arg1), "r" (arg2)); | ||
94 | |||
95 | return function_id; | ||
96 | } | ||
97 | |||
98 | static noinline int __invoke_psci_fn_smc(u32 function_id, u32 arg0, u32 arg1, | ||
99 | u32 arg2) | ||
100 | { | ||
101 | asm volatile( | ||
102 | __asmeq("%0", "r0") | ||
103 | __asmeq("%1", "r1") | ||
104 | __asmeq("%2", "r2") | ||
105 | __asmeq("%3", "r3") | ||
106 | __SMC(0) | ||
107 | : "+r" (function_id) | ||
108 | : "r" (arg0), "r" (arg1), "r" (arg2)); | ||
109 | |||
110 | return function_id; | ||
111 | } | ||
112 | |||
113 | static int psci_cpu_suspend(struct psci_power_state state, | ||
114 | unsigned long entry_point) | ||
115 | { | ||
116 | int err; | ||
117 | u32 fn, power_state; | ||
118 | |||
119 | fn = psci_function_id[PSCI_FN_CPU_SUSPEND]; | ||
120 | power_state = psci_power_state_pack(state); | ||
121 | err = invoke_psci_fn(fn, power_state, entry_point, 0); | ||
122 | return psci_to_linux_errno(err); | ||
123 | } | ||
124 | |||
125 | static int psci_cpu_off(struct psci_power_state state) | ||
126 | { | ||
127 | int err; | ||
128 | u32 fn, power_state; | ||
129 | |||
130 | fn = psci_function_id[PSCI_FN_CPU_OFF]; | ||
131 | power_state = psci_power_state_pack(state); | ||
132 | err = invoke_psci_fn(fn, power_state, 0, 0); | ||
133 | return psci_to_linux_errno(err); | ||
134 | } | ||
135 | |||
136 | static int psci_cpu_on(unsigned long cpuid, unsigned long entry_point) | ||
137 | { | ||
138 | int err; | ||
139 | u32 fn; | ||
140 | |||
141 | fn = psci_function_id[PSCI_FN_CPU_ON]; | ||
142 | err = invoke_psci_fn(fn, cpuid, entry_point, 0); | ||
143 | return psci_to_linux_errno(err); | ||
144 | } | ||
145 | |||
146 | static int psci_migrate(unsigned long cpuid) | ||
147 | { | ||
148 | int err; | ||
149 | u32 fn; | ||
150 | |||
151 | fn = psci_function_id[PSCI_FN_MIGRATE]; | ||
152 | err = invoke_psci_fn(fn, cpuid, 0, 0); | ||
153 | return psci_to_linux_errno(err); | ||
154 | } | ||
155 | |||
156 | static const struct of_device_id psci_of_match[] __initconst = { | ||
157 | { .compatible = "arm,psci", }, | ||
158 | {}, | ||
159 | }; | ||
160 | |||
161 | static int __init psci_init(void) | ||
162 | { | ||
163 | struct device_node *np; | ||
164 | const char *method; | ||
165 | u32 id; | ||
166 | |||
167 | np = of_find_matching_node(NULL, psci_of_match); | ||
168 | if (!np) | ||
169 | return 0; | ||
170 | |||
171 | pr_info("probing function IDs from device-tree\n"); | ||
172 | |||
173 | if (of_property_read_string(np, "method", &method)) { | ||
174 | pr_warning("missing \"method\" property\n"); | ||
175 | goto out_put_node; | ||
176 | } | ||
177 | |||
178 | if (!strcmp("hvc", method)) { | ||
179 | invoke_psci_fn = __invoke_psci_fn_hvc; | ||
180 | } else if (!strcmp("smc", method)) { | ||
181 | invoke_psci_fn = __invoke_psci_fn_smc; | ||
182 | } else { | ||
183 | pr_warning("invalid \"method\" property: %s\n", method); | ||
184 | goto out_put_node; | ||
185 | } | ||
186 | |||
187 | if (!of_property_read_u32(np, "cpu_suspend", &id)) { | ||
188 | psci_function_id[PSCI_FN_CPU_SUSPEND] = id; | ||
189 | psci_ops.cpu_suspend = psci_cpu_suspend; | ||
190 | } | ||
191 | |||
192 | if (!of_property_read_u32(np, "cpu_off", &id)) { | ||
193 | psci_function_id[PSCI_FN_CPU_OFF] = id; | ||
194 | psci_ops.cpu_off = psci_cpu_off; | ||
195 | } | ||
196 | |||
197 | if (!of_property_read_u32(np, "cpu_on", &id)) { | ||
198 | psci_function_id[PSCI_FN_CPU_ON] = id; | ||
199 | psci_ops.cpu_on = psci_cpu_on; | ||
200 | } | ||
201 | |||
202 | if (!of_property_read_u32(np, "migrate", &id)) { | ||
203 | psci_function_id[PSCI_FN_MIGRATE] = id; | ||
204 | psci_ops.migrate = psci_migrate; | ||
205 | } | ||
206 | |||
207 | out_put_node: | ||
208 | of_node_put(np); | ||
209 | return 0; | ||
210 | } | ||
211 | early_initcall(psci_init); | ||
diff --git a/arch/arm/kernel/sched_clock.c b/arch/arm/kernel/sched_clock.c index fc6692e2b603..bd6f56b9ec21 100644 --- a/arch/arm/kernel/sched_clock.c +++ b/arch/arm/kernel/sched_clock.c | |||
@@ -93,11 +93,11 @@ static void notrace update_sched_clock(void) | |||
93 | * detectable in cyc_to_fixed_sched_clock(). | 93 | * detectable in cyc_to_fixed_sched_clock(). |
94 | */ | 94 | */ |
95 | raw_local_irq_save(flags); | 95 | raw_local_irq_save(flags); |
96 | cd.epoch_cyc = cyc; | 96 | cd.epoch_cyc_copy = cyc; |
97 | smp_wmb(); | 97 | smp_wmb(); |
98 | cd.epoch_ns = ns; | 98 | cd.epoch_ns = ns; |
99 | smp_wmb(); | 99 | smp_wmb(); |
100 | cd.epoch_cyc_copy = cyc; | 100 | cd.epoch_cyc = cyc; |
101 | raw_local_irq_restore(flags); | 101 | raw_local_irq_restore(flags); |
102 | } | 102 | } |
103 | 103 | ||
diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c index 84f4cbf652e5..365c8d92e2eb 100644 --- a/arch/arm/kernel/smp.c +++ b/arch/arm/kernel/smp.c | |||
@@ -125,18 +125,6 @@ void __init smp_init_cpus(void) | |||
125 | smp_ops.smp_init_cpus(); | 125 | smp_ops.smp_init_cpus(); |
126 | } | 126 | } |
127 | 127 | ||
128 | static void __init platform_smp_prepare_cpus(unsigned int max_cpus) | ||
129 | { | ||
130 | if (smp_ops.smp_prepare_cpus) | ||
131 | smp_ops.smp_prepare_cpus(max_cpus); | ||
132 | } | ||
133 | |||
134 | static void __cpuinit platform_secondary_init(unsigned int cpu) | ||
135 | { | ||
136 | if (smp_ops.smp_secondary_init) | ||
137 | smp_ops.smp_secondary_init(cpu); | ||
138 | } | ||
139 | |||
140 | int __cpuinit boot_secondary(unsigned int cpu, struct task_struct *idle) | 128 | int __cpuinit boot_secondary(unsigned int cpu, struct task_struct *idle) |
141 | { | 129 | { |
142 | if (smp_ops.smp_boot_secondary) | 130 | if (smp_ops.smp_boot_secondary) |
@@ -154,12 +142,6 @@ static int platform_cpu_kill(unsigned int cpu) | |||
154 | return 1; | 142 | return 1; |
155 | } | 143 | } |
156 | 144 | ||
157 | static void platform_cpu_die(unsigned int cpu) | ||
158 | { | ||
159 | if (smp_ops.cpu_die) | ||
160 | smp_ops.cpu_die(cpu); | ||
161 | } | ||
162 | |||
163 | static int platform_cpu_disable(unsigned int cpu) | 145 | static int platform_cpu_disable(unsigned int cpu) |
164 | { | 146 | { |
165 | if (smp_ops.cpu_disable) | 147 | if (smp_ops.cpu_disable) |
@@ -257,7 +239,8 @@ void __ref cpu_die(void) | |||
257 | * actual CPU shutdown procedure is at least platform (if not | 239 | * actual CPU shutdown procedure is at least platform (if not |
258 | * CPU) specific. | 240 | * CPU) specific. |
259 | */ | 241 | */ |
260 | platform_cpu_die(cpu); | 242 | if (smp_ops.cpu_die) |
243 | smp_ops.cpu_die(cpu); | ||
261 | 244 | ||
262 | /* | 245 | /* |
263 | * Do not return to the idle loop - jump back to the secondary | 246 | * Do not return to the idle loop - jump back to the secondary |
@@ -324,7 +307,8 @@ asmlinkage void __cpuinit secondary_start_kernel(void) | |||
324 | /* | 307 | /* |
325 | * Give the platform a chance to do its own initialisation. | 308 | * Give the platform a chance to do its own initialisation. |
326 | */ | 309 | */ |
327 | platform_secondary_init(cpu); | 310 | if (smp_ops.smp_secondary_init) |
311 | smp_ops.smp_secondary_init(cpu); | ||
328 | 312 | ||
329 | notify_cpu_starting(cpu); | 313 | notify_cpu_starting(cpu); |
330 | 314 | ||
@@ -399,8 +383,8 @@ void __init smp_prepare_cpus(unsigned int max_cpus) | |||
399 | /* | 383 | /* |
400 | * Initialise the present map, which describes the set of CPUs | 384 | * Initialise the present map, which describes the set of CPUs |
401 | * actually populated at the present time. A platform should | 385 | * actually populated at the present time. A platform should |
402 | * re-initialize the map in platform_smp_prepare_cpus() if | 386 | * re-initialize the map in the platforms smp_prepare_cpus() |
403 | * present != possible (e.g. physical hotplug). | 387 | * if present != possible (e.g. physical hotplug). |
404 | */ | 388 | */ |
405 | init_cpu_present(cpu_possible_mask); | 389 | init_cpu_present(cpu_possible_mask); |
406 | 390 | ||
@@ -408,7 +392,8 @@ void __init smp_prepare_cpus(unsigned int max_cpus) | |||
408 | * Initialise the SCU if there are more than one CPU | 392 | * Initialise the SCU if there are more than one CPU |
409 | * and let them know where to start. | 393 | * and let them know where to start. |
410 | */ | 394 | */ |
411 | platform_smp_prepare_cpus(max_cpus); | 395 | if (smp_ops.smp_prepare_cpus) |
396 | smp_ops.smp_prepare_cpus(max_cpus); | ||
412 | } | 397 | } |
413 | } | 398 | } |
414 | 399 | ||
diff --git a/arch/arm/kernel/smp_twd.c b/arch/arm/kernel/smp_twd.c index 49f335d301ba..ae0c7bb39ae8 100644 --- a/arch/arm/kernel/smp_twd.c +++ b/arch/arm/kernel/smp_twd.c | |||
@@ -31,7 +31,6 @@ static void __iomem *twd_base; | |||
31 | 31 | ||
32 | static struct clk *twd_clk; | 32 | static struct clk *twd_clk; |
33 | static unsigned long twd_timer_rate; | 33 | static unsigned long twd_timer_rate; |
34 | static bool common_setup_called; | ||
35 | static DEFINE_PER_CPU(bool, percpu_setup_called); | 34 | static DEFINE_PER_CPU(bool, percpu_setup_called); |
36 | 35 | ||
37 | static struct clock_event_device __percpu **twd_evt; | 36 | static struct clock_event_device __percpu **twd_evt; |
@@ -239,25 +238,28 @@ static irqreturn_t twd_handler(int irq, void *dev_id) | |||
239 | return IRQ_NONE; | 238 | return IRQ_NONE; |
240 | } | 239 | } |
241 | 240 | ||
242 | static struct clk *twd_get_clock(void) | 241 | static void twd_get_clock(struct device_node *np) |
243 | { | 242 | { |
244 | struct clk *clk; | ||
245 | int err; | 243 | int err; |
246 | 244 | ||
247 | clk = clk_get_sys("smp_twd", NULL); | 245 | if (np) |
248 | if (IS_ERR(clk)) { | 246 | twd_clk = of_clk_get(np, 0); |
249 | pr_err("smp_twd: clock not found: %d\n", (int)PTR_ERR(clk)); | 247 | else |
250 | return clk; | 248 | twd_clk = clk_get_sys("smp_twd", NULL); |
249 | |||
250 | if (IS_ERR(twd_clk)) { | ||
251 | pr_err("smp_twd: clock not found %d\n", (int) PTR_ERR(twd_clk)); | ||
252 | return; | ||
251 | } | 253 | } |
252 | 254 | ||
253 | err = clk_prepare_enable(clk); | 255 | err = clk_prepare_enable(twd_clk); |
254 | if (err) { | 256 | if (err) { |
255 | pr_err("smp_twd: clock failed to prepare+enable: %d\n", err); | 257 | pr_err("smp_twd: clock failed to prepare+enable: %d\n", err); |
256 | clk_put(clk); | 258 | clk_put(twd_clk); |
257 | return ERR_PTR(err); | 259 | return; |
258 | } | 260 | } |
259 | 261 | ||
260 | return clk; | 262 | twd_timer_rate = clk_get_rate(twd_clk); |
261 | } | 263 | } |
262 | 264 | ||
263 | /* | 265 | /* |
@@ -280,26 +282,7 @@ static int __cpuinit twd_timer_setup(struct clock_event_device *clk) | |||
280 | } | 282 | } |
281 | per_cpu(percpu_setup_called, cpu) = true; | 283 | per_cpu(percpu_setup_called, cpu) = true; |
282 | 284 | ||
283 | /* | 285 | twd_calibrate_rate(); |
284 | * This stuff only need to be done once for the entire TWD cluster | ||
285 | * during the runtime of the system. | ||
286 | */ | ||
287 | if (!common_setup_called) { | ||
288 | twd_clk = twd_get_clock(); | ||
289 | |||
290 | /* | ||
291 | * We use IS_ERR_OR_NULL() here, because if the clock stubs | ||
292 | * are active we will get a valid clk reference which is | ||
293 | * however NULL and will return the rate 0. In that case we | ||
294 | * need to calibrate the rate instead. | ||
295 | */ | ||
296 | if (!IS_ERR_OR_NULL(twd_clk)) | ||
297 | twd_timer_rate = clk_get_rate(twd_clk); | ||
298 | else | ||
299 | twd_calibrate_rate(); | ||
300 | |||
301 | common_setup_called = true; | ||
302 | } | ||
303 | 286 | ||
304 | /* | 287 | /* |
305 | * The following is done once per CPU the first time .setup() is | 288 | * The following is done once per CPU the first time .setup() is |
@@ -330,7 +313,7 @@ static struct local_timer_ops twd_lt_ops __cpuinitdata = { | |||
330 | .stop = twd_timer_stop, | 313 | .stop = twd_timer_stop, |
331 | }; | 314 | }; |
332 | 315 | ||
333 | static int __init twd_local_timer_common_register(void) | 316 | static int __init twd_local_timer_common_register(struct device_node *np) |
334 | { | 317 | { |
335 | int err; | 318 | int err; |
336 | 319 | ||
@@ -350,6 +333,8 @@ static int __init twd_local_timer_common_register(void) | |||
350 | if (err) | 333 | if (err) |
351 | goto out_irq; | 334 | goto out_irq; |
352 | 335 | ||
336 | twd_get_clock(np); | ||
337 | |||
353 | return 0; | 338 | return 0; |
354 | 339 | ||
355 | out_irq: | 340 | out_irq: |
@@ -373,7 +358,7 @@ int __init twd_local_timer_register(struct twd_local_timer *tlt) | |||
373 | if (!twd_base) | 358 | if (!twd_base) |
374 | return -ENOMEM; | 359 | return -ENOMEM; |
375 | 360 | ||
376 | return twd_local_timer_common_register(); | 361 | return twd_local_timer_common_register(NULL); |
377 | } | 362 | } |
378 | 363 | ||
379 | #ifdef CONFIG_OF | 364 | #ifdef CONFIG_OF |
@@ -405,7 +390,7 @@ void __init twd_local_timer_of_register(void) | |||
405 | goto out; | 390 | goto out; |
406 | } | 391 | } |
407 | 392 | ||
408 | err = twd_local_timer_common_register(); | 393 | err = twd_local_timer_common_register(np); |
409 | 394 | ||
410 | out: | 395 | out: |
411 | WARN(err, "twd_local_timer_of_register failed (%d)\n", err); | 396 | WARN(err, "twd_local_timer_of_register failed (%d)\n", err); |
diff --git a/arch/arm/kernel/vmlinux.lds.S b/arch/arm/kernel/vmlinux.lds.S index 11c1785bf63e..b571484e9f03 100644 --- a/arch/arm/kernel/vmlinux.lds.S +++ b/arch/arm/kernel/vmlinux.lds.S | |||
@@ -19,7 +19,11 @@ | |||
19 | ALIGN_FUNCTION(); \ | 19 | ALIGN_FUNCTION(); \ |
20 | VMLINUX_SYMBOL(__idmap_text_start) = .; \ | 20 | VMLINUX_SYMBOL(__idmap_text_start) = .; \ |
21 | *(.idmap.text) \ | 21 | *(.idmap.text) \ |
22 | VMLINUX_SYMBOL(__idmap_text_end) = .; | 22 | VMLINUX_SYMBOL(__idmap_text_end) = .; \ |
23 | ALIGN_FUNCTION(); \ | ||
24 | VMLINUX_SYMBOL(__hyp_idmap_text_start) = .; \ | ||
25 | *(.hyp.idmap.text) \ | ||
26 | VMLINUX_SYMBOL(__hyp_idmap_text_end) = .; | ||
23 | 27 | ||
24 | #ifdef CONFIG_HOTPLUG_CPU | 28 | #ifdef CONFIG_HOTPLUG_CPU |
25 | #define ARM_CPU_DISCARD(x) | 29 | #define ARM_CPU_DISCARD(x) |
diff --git a/arch/arm/kvm/Kconfig b/arch/arm/kvm/Kconfig new file mode 100644 index 000000000000..05227cb57a7b --- /dev/null +++ b/arch/arm/kvm/Kconfig | |||
@@ -0,0 +1,56 @@ | |||
1 | # | ||
2 | # KVM configuration | ||
3 | # | ||
4 | |||
5 | source "virt/kvm/Kconfig" | ||
6 | |||
7 | menuconfig VIRTUALIZATION | ||
8 | bool "Virtualization" | ||
9 | ---help--- | ||
10 | Say Y here to get to see options for using your Linux host to run | ||
11 | other operating systems inside virtual machines (guests). | ||
12 | This option alone does not add any kernel code. | ||
13 | |||
14 | If you say N, all options in this submenu will be skipped and | ||
15 | disabled. | ||
16 | |||
17 | if VIRTUALIZATION | ||
18 | |||
19 | config KVM | ||
20 | bool "Kernel-based Virtual Machine (KVM) support" | ||
21 | select PREEMPT_NOTIFIERS | ||
22 | select ANON_INODES | ||
23 | select KVM_MMIO | ||
24 | select KVM_ARM_HOST | ||
25 | depends on ARM_VIRT_EXT && ARM_LPAE | ||
26 | ---help--- | ||
27 | Support hosting virtualized guest machines. You will also | ||
28 | need to select one or more of the processor modules below. | ||
29 | |||
30 | This module provides access to the hardware capabilities through | ||
31 | a character device node named /dev/kvm. | ||
32 | |||
33 | If unsure, say N. | ||
34 | |||
35 | config KVM_ARM_HOST | ||
36 | bool "KVM host support for ARM cpus." | ||
37 | depends on KVM | ||
38 | depends on MMU | ||
39 | select MMU_NOTIFIER | ||
40 | ---help--- | ||
41 | Provides host support for ARM processors. | ||
42 | |||
43 | config KVM_ARM_MAX_VCPUS | ||
44 | int "Number maximum supported virtual CPUs per VM" | ||
45 | depends on KVM_ARM_HOST | ||
46 | default 4 | ||
47 | help | ||
48 | Static number of max supported virtual CPUs per VM. | ||
49 | |||
50 | If you choose a high number, the vcpu structures will be quite | ||
51 | large, so only choose a reasonable number that you expect to | ||
52 | actually use. | ||
53 | |||
54 | source drivers/virtio/Kconfig | ||
55 | |||
56 | endif # VIRTUALIZATION | ||
diff --git a/arch/arm/kvm/Makefile b/arch/arm/kvm/Makefile new file mode 100644 index 000000000000..ea27987bd07f --- /dev/null +++ b/arch/arm/kvm/Makefile | |||
@@ -0,0 +1,21 @@ | |||
1 | # | ||
2 | # Makefile for Kernel-based Virtual Machine module | ||
3 | # | ||
4 | |||
5 | plus_virt := $(call as-instr,.arch_extension virt,+virt) | ||
6 | ifeq ($(plus_virt),+virt) | ||
7 | plus_virt_def := -DREQUIRES_VIRT=1 | ||
8 | endif | ||
9 | |||
10 | ccflags-y += -Ivirt/kvm -Iarch/arm/kvm | ||
11 | CFLAGS_arm.o := -I. $(plus_virt_def) | ||
12 | CFLAGS_mmu.o := -I. | ||
13 | |||
14 | AFLAGS_init.o := -Wa,-march=armv7-a$(plus_virt) | ||
15 | AFLAGS_interrupts.o := -Wa,-march=armv7-a$(plus_virt) | ||
16 | |||
17 | kvm-arm-y = $(addprefix ../../../virt/kvm/, kvm_main.o coalesced_mmio.o) | ||
18 | |||
19 | obj-y += kvm-arm.o init.o interrupts.o | ||
20 | obj-y += arm.o guest.o mmu.o emulate.o reset.o | ||
21 | obj-y += coproc.o coproc_a15.o mmio.o psci.o | ||
diff --git a/arch/arm/kvm/arm.c b/arch/arm/kvm/arm.c new file mode 100644 index 000000000000..2d30e3afdaf9 --- /dev/null +++ b/arch/arm/kvm/arm.c | |||
@@ -0,0 +1,1015 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2012 - Virtual Open Systems and Columbia University | ||
3 | * Author: Christoffer Dall <c.dall@virtualopensystems.com> | ||
4 | * | ||
5 | * This program is free software; you can redistribute it and/or modify | ||
6 | * it under the terms of the GNU General Public License, version 2, as | ||
7 | * published by the Free Software Foundation. | ||
8 | * | ||
9 | * This program is distributed in the hope that it will be useful, | ||
10 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
11 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
12 | * GNU General Public License for more details. | ||
13 | * | ||
14 | * You should have received a copy of the GNU General Public License | ||
15 | * along with this program; if not, write to the Free Software | ||
16 | * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. | ||
17 | */ | ||
18 | |||
19 | #include <linux/errno.h> | ||
20 | #include <linux/err.h> | ||
21 | #include <linux/kvm_host.h> | ||
22 | #include <linux/module.h> | ||
23 | #include <linux/vmalloc.h> | ||
24 | #include <linux/fs.h> | ||
25 | #include <linux/mman.h> | ||
26 | #include <linux/sched.h> | ||
27 | #include <linux/kvm.h> | ||
28 | #include <trace/events/kvm.h> | ||
29 | |||
30 | #define CREATE_TRACE_POINTS | ||
31 | #include "trace.h" | ||
32 | |||
33 | #include <asm/unified.h> | ||
34 | #include <asm/uaccess.h> | ||
35 | #include <asm/ptrace.h> | ||
36 | #include <asm/mman.h> | ||
37 | #include <asm/cputype.h> | ||
38 | #include <asm/tlbflush.h> | ||
39 | #include <asm/cacheflush.h> | ||
40 | #include <asm/virt.h> | ||
41 | #include <asm/kvm_arm.h> | ||
42 | #include <asm/kvm_asm.h> | ||
43 | #include <asm/kvm_mmu.h> | ||
44 | #include <asm/kvm_emulate.h> | ||
45 | #include <asm/kvm_coproc.h> | ||
46 | #include <asm/kvm_psci.h> | ||
47 | #include <asm/opcodes.h> | ||
48 | |||
49 | #ifdef REQUIRES_VIRT | ||
50 | __asm__(".arch_extension virt"); | ||
51 | #endif | ||
52 | |||
53 | static DEFINE_PER_CPU(unsigned long, kvm_arm_hyp_stack_page); | ||
54 | static struct vfp_hard_struct __percpu *kvm_host_vfp_state; | ||
55 | static unsigned long hyp_default_vectors; | ||
56 | |||
57 | /* The VMID used in the VTTBR */ | ||
58 | static atomic64_t kvm_vmid_gen = ATOMIC64_INIT(1); | ||
59 | static u8 kvm_next_vmid; | ||
60 | static DEFINE_SPINLOCK(kvm_vmid_lock); | ||
61 | |||
62 | int kvm_arch_hardware_enable(void *garbage) | ||
63 | { | ||
64 | return 0; | ||
65 | } | ||
66 | |||
67 | int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu) | ||
68 | { | ||
69 | return kvm_vcpu_exiting_guest_mode(vcpu) == IN_GUEST_MODE; | ||
70 | } | ||
71 | |||
72 | void kvm_arch_hardware_disable(void *garbage) | ||
73 | { | ||
74 | } | ||
75 | |||
76 | int kvm_arch_hardware_setup(void) | ||
77 | { | ||
78 | return 0; | ||
79 | } | ||
80 | |||
81 | void kvm_arch_hardware_unsetup(void) | ||
82 | { | ||
83 | } | ||
84 | |||
85 | void kvm_arch_check_processor_compat(void *rtn) | ||
86 | { | ||
87 | *(int *)rtn = 0; | ||
88 | } | ||
89 | |||
90 | void kvm_arch_sync_events(struct kvm *kvm) | ||
91 | { | ||
92 | } | ||
93 | |||
94 | /** | ||
95 | * kvm_arch_init_vm - initializes a VM data structure | ||
96 | * @kvm: pointer to the KVM struct | ||
97 | */ | ||
98 | int kvm_arch_init_vm(struct kvm *kvm, unsigned long type) | ||
99 | { | ||
100 | int ret = 0; | ||
101 | |||
102 | if (type) | ||
103 | return -EINVAL; | ||
104 | |||
105 | ret = kvm_alloc_stage2_pgd(kvm); | ||
106 | if (ret) | ||
107 | goto out_fail_alloc; | ||
108 | |||
109 | ret = create_hyp_mappings(kvm, kvm + 1); | ||
110 | if (ret) | ||
111 | goto out_free_stage2_pgd; | ||
112 | |||
113 | /* Mark the initial VMID generation invalid */ | ||
114 | kvm->arch.vmid_gen = 0; | ||
115 | |||
116 | return ret; | ||
117 | out_free_stage2_pgd: | ||
118 | kvm_free_stage2_pgd(kvm); | ||
119 | out_fail_alloc: | ||
120 | return ret; | ||
121 | } | ||
122 | |||
123 | int kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf) | ||
124 | { | ||
125 | return VM_FAULT_SIGBUS; | ||
126 | } | ||
127 | |||
128 | void kvm_arch_free_memslot(struct kvm_memory_slot *free, | ||
129 | struct kvm_memory_slot *dont) | ||
130 | { | ||
131 | } | ||
132 | |||
133 | int kvm_arch_create_memslot(struct kvm_memory_slot *slot, unsigned long npages) | ||
134 | { | ||
135 | return 0; | ||
136 | } | ||
137 | |||
138 | /** | ||
139 | * kvm_arch_destroy_vm - destroy the VM data structure | ||
140 | * @kvm: pointer to the KVM struct | ||
141 | */ | ||
142 | void kvm_arch_destroy_vm(struct kvm *kvm) | ||
143 | { | ||
144 | int i; | ||
145 | |||
146 | kvm_free_stage2_pgd(kvm); | ||
147 | |||
148 | for (i = 0; i < KVM_MAX_VCPUS; ++i) { | ||
149 | if (kvm->vcpus[i]) { | ||
150 | kvm_arch_vcpu_free(kvm->vcpus[i]); | ||
151 | kvm->vcpus[i] = NULL; | ||
152 | } | ||
153 | } | ||
154 | } | ||
155 | |||
156 | int kvm_dev_ioctl_check_extension(long ext) | ||
157 | { | ||
158 | int r; | ||
159 | switch (ext) { | ||
160 | case KVM_CAP_USER_MEMORY: | ||
161 | case KVM_CAP_SYNC_MMU: | ||
162 | case KVM_CAP_DESTROY_MEMORY_REGION_WORKS: | ||
163 | case KVM_CAP_ONE_REG: | ||
164 | case KVM_CAP_ARM_PSCI: | ||
165 | r = 1; | ||
166 | break; | ||
167 | case KVM_CAP_COALESCED_MMIO: | ||
168 | r = KVM_COALESCED_MMIO_PAGE_OFFSET; | ||
169 | break; | ||
170 | case KVM_CAP_NR_VCPUS: | ||
171 | r = num_online_cpus(); | ||
172 | break; | ||
173 | case KVM_CAP_MAX_VCPUS: | ||
174 | r = KVM_MAX_VCPUS; | ||
175 | break; | ||
176 | default: | ||
177 | r = 0; | ||
178 | break; | ||
179 | } | ||
180 | return r; | ||
181 | } | ||
182 | |||
183 | long kvm_arch_dev_ioctl(struct file *filp, | ||
184 | unsigned int ioctl, unsigned long arg) | ||
185 | { | ||
186 | return -EINVAL; | ||
187 | } | ||
188 | |||
189 | int kvm_arch_set_memory_region(struct kvm *kvm, | ||
190 | struct kvm_userspace_memory_region *mem, | ||
191 | struct kvm_memory_slot old, | ||
192 | int user_alloc) | ||
193 | { | ||
194 | return 0; | ||
195 | } | ||
196 | |||
197 | int kvm_arch_prepare_memory_region(struct kvm *kvm, | ||
198 | struct kvm_memory_slot *memslot, | ||
199 | struct kvm_memory_slot old, | ||
200 | struct kvm_userspace_memory_region *mem, | ||
201 | int user_alloc) | ||
202 | { | ||
203 | return 0; | ||
204 | } | ||
205 | |||
206 | void kvm_arch_commit_memory_region(struct kvm *kvm, | ||
207 | struct kvm_userspace_memory_region *mem, | ||
208 | struct kvm_memory_slot old, | ||
209 | int user_alloc) | ||
210 | { | ||
211 | } | ||
212 | |||
213 | void kvm_arch_flush_shadow_all(struct kvm *kvm) | ||
214 | { | ||
215 | } | ||
216 | |||
217 | void kvm_arch_flush_shadow_memslot(struct kvm *kvm, | ||
218 | struct kvm_memory_slot *slot) | ||
219 | { | ||
220 | } | ||
221 | |||
222 | struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, unsigned int id) | ||
223 | { | ||
224 | int err; | ||
225 | struct kvm_vcpu *vcpu; | ||
226 | |||
227 | vcpu = kmem_cache_zalloc(kvm_vcpu_cache, GFP_KERNEL); | ||
228 | if (!vcpu) { | ||
229 | err = -ENOMEM; | ||
230 | goto out; | ||
231 | } | ||
232 | |||
233 | err = kvm_vcpu_init(vcpu, kvm, id); | ||
234 | if (err) | ||
235 | goto free_vcpu; | ||
236 | |||
237 | err = create_hyp_mappings(vcpu, vcpu + 1); | ||
238 | if (err) | ||
239 | goto vcpu_uninit; | ||
240 | |||
241 | return vcpu; | ||
242 | vcpu_uninit: | ||
243 | kvm_vcpu_uninit(vcpu); | ||
244 | free_vcpu: | ||
245 | kmem_cache_free(kvm_vcpu_cache, vcpu); | ||
246 | out: | ||
247 | return ERR_PTR(err); | ||
248 | } | ||
249 | |||
250 | int kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu) | ||
251 | { | ||
252 | return 0; | ||
253 | } | ||
254 | |||
255 | void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu) | ||
256 | { | ||
257 | kvm_mmu_free_memory_caches(vcpu); | ||
258 | kmem_cache_free(kvm_vcpu_cache, vcpu); | ||
259 | } | ||
260 | |||
261 | void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu) | ||
262 | { | ||
263 | kvm_arch_vcpu_free(vcpu); | ||
264 | } | ||
265 | |||
266 | int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu) | ||
267 | { | ||
268 | return 0; | ||
269 | } | ||
270 | |||
271 | int __attribute_const__ kvm_target_cpu(void) | ||
272 | { | ||
273 | unsigned long implementor = read_cpuid_implementor(); | ||
274 | unsigned long part_number = read_cpuid_part_number(); | ||
275 | |||
276 | if (implementor != ARM_CPU_IMP_ARM) | ||
277 | return -EINVAL; | ||
278 | |||
279 | switch (part_number) { | ||
280 | case ARM_CPU_PART_CORTEX_A15: | ||
281 | return KVM_ARM_TARGET_CORTEX_A15; | ||
282 | default: | ||
283 | return -EINVAL; | ||
284 | } | ||
285 | } | ||
286 | |||
287 | int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu) | ||
288 | { | ||
289 | /* Force users to call KVM_ARM_VCPU_INIT */ | ||
290 | vcpu->arch.target = -1; | ||
291 | return 0; | ||
292 | } | ||
293 | |||
294 | void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu) | ||
295 | { | ||
296 | } | ||
297 | |||
298 | void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu) | ||
299 | { | ||
300 | vcpu->cpu = cpu; | ||
301 | vcpu->arch.vfp_host = this_cpu_ptr(kvm_host_vfp_state); | ||
302 | |||
303 | /* | ||
304 | * Check whether this vcpu requires the cache to be flushed on | ||
305 | * this physical CPU. This is a consequence of doing dcache | ||
306 | * operations by set/way on this vcpu. We do it here to be in | ||
307 | * a non-preemptible section. | ||
308 | */ | ||
309 | if (cpumask_test_and_clear_cpu(cpu, &vcpu->arch.require_dcache_flush)) | ||
310 | flush_cache_all(); /* We'd really want v7_flush_dcache_all() */ | ||
311 | } | ||
312 | |||
313 | void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu) | ||
314 | { | ||
315 | } | ||
316 | |||
317 | int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu, | ||
318 | struct kvm_guest_debug *dbg) | ||
319 | { | ||
320 | return -EINVAL; | ||
321 | } | ||
322 | |||
323 | |||
324 | int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu, | ||
325 | struct kvm_mp_state *mp_state) | ||
326 | { | ||
327 | return -EINVAL; | ||
328 | } | ||
329 | |||
330 | int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu, | ||
331 | struct kvm_mp_state *mp_state) | ||
332 | { | ||
333 | return -EINVAL; | ||
334 | } | ||
335 | |||
336 | /** | ||
337 | * kvm_arch_vcpu_runnable - determine if the vcpu can be scheduled | ||
338 | * @v: The VCPU pointer | ||
339 | * | ||
340 | * If the guest CPU is not waiting for interrupts or an interrupt line is | ||
341 | * asserted, the CPU is by definition runnable. | ||
342 | */ | ||
343 | int kvm_arch_vcpu_runnable(struct kvm_vcpu *v) | ||
344 | { | ||
345 | return !!v->arch.irq_lines; | ||
346 | } | ||
347 | |||
348 | /* Just ensure a guest exit from a particular CPU */ | ||
349 | static void exit_vm_noop(void *info) | ||
350 | { | ||
351 | } | ||
352 | |||
353 | void force_vm_exit(const cpumask_t *mask) | ||
354 | { | ||
355 | smp_call_function_many(mask, exit_vm_noop, NULL, true); | ||
356 | } | ||
357 | |||
358 | /** | ||
359 | * need_new_vmid_gen - check that the VMID is still valid | ||
360 | * @kvm: The VM's VMID to checkt | ||
361 | * | ||
362 | * return true if there is a new generation of VMIDs being used | ||
363 | * | ||
364 | * The hardware supports only 256 values with the value zero reserved for the | ||
365 | * host, so we check if an assigned value belongs to a previous generation, | ||
366 | * which which requires us to assign a new value. If we're the first to use a | ||
367 | * VMID for the new generation, we must flush necessary caches and TLBs on all | ||
368 | * CPUs. | ||
369 | */ | ||
370 | static bool need_new_vmid_gen(struct kvm *kvm) | ||
371 | { | ||
372 | return unlikely(kvm->arch.vmid_gen != atomic64_read(&kvm_vmid_gen)); | ||
373 | } | ||
374 | |||
375 | /** | ||
376 | * update_vttbr - Update the VTTBR with a valid VMID before the guest runs | ||
377 | * @kvm The guest that we are about to run | ||
378 | * | ||
379 | * Called from kvm_arch_vcpu_ioctl_run before entering the guest to ensure the | ||
380 | * VM has a valid VMID, otherwise assigns a new one and flushes corresponding | ||
381 | * caches and TLBs. | ||
382 | */ | ||
383 | static void update_vttbr(struct kvm *kvm) | ||
384 | { | ||
385 | phys_addr_t pgd_phys; | ||
386 | u64 vmid; | ||
387 | |||
388 | if (!need_new_vmid_gen(kvm)) | ||
389 | return; | ||
390 | |||
391 | spin_lock(&kvm_vmid_lock); | ||
392 | |||
393 | /* | ||
394 | * We need to re-check the vmid_gen here to ensure that if another vcpu | ||
395 | * already allocated a valid vmid for this vm, then this vcpu should | ||
396 | * use the same vmid. | ||
397 | */ | ||
398 | if (!need_new_vmid_gen(kvm)) { | ||
399 | spin_unlock(&kvm_vmid_lock); | ||
400 | return; | ||
401 | } | ||
402 | |||
403 | /* First user of a new VMID generation? */ | ||
404 | if (unlikely(kvm_next_vmid == 0)) { | ||
405 | atomic64_inc(&kvm_vmid_gen); | ||
406 | kvm_next_vmid = 1; | ||
407 | |||
408 | /* | ||
409 | * On SMP we know no other CPUs can use this CPU's or each | ||
410 | * other's VMID after force_vm_exit returns since the | ||
411 | * kvm_vmid_lock blocks them from reentry to the guest. | ||
412 | */ | ||
413 | force_vm_exit(cpu_all_mask); | ||
414 | /* | ||
415 | * Now broadcast TLB + ICACHE invalidation over the inner | ||
416 | * shareable domain to make sure all data structures are | ||
417 | * clean. | ||
418 | */ | ||
419 | kvm_call_hyp(__kvm_flush_vm_context); | ||
420 | } | ||
421 | |||
422 | kvm->arch.vmid_gen = atomic64_read(&kvm_vmid_gen); | ||
423 | kvm->arch.vmid = kvm_next_vmid; | ||
424 | kvm_next_vmid++; | ||
425 | |||
426 | /* update vttbr to be used with the new vmid */ | ||
427 | pgd_phys = virt_to_phys(kvm->arch.pgd); | ||
428 | vmid = ((u64)(kvm->arch.vmid) << VTTBR_VMID_SHIFT) & VTTBR_VMID_MASK; | ||
429 | kvm->arch.vttbr = pgd_phys & VTTBR_BADDR_MASK; | ||
430 | kvm->arch.vttbr |= vmid; | ||
431 | |||
432 | spin_unlock(&kvm_vmid_lock); | ||
433 | } | ||
434 | |||
435 | static int handle_svc_hyp(struct kvm_vcpu *vcpu, struct kvm_run *run) | ||
436 | { | ||
437 | /* SVC called from Hyp mode should never get here */ | ||
438 | kvm_debug("SVC called from Hyp mode shouldn't go here\n"); | ||
439 | BUG(); | ||
440 | return -EINVAL; /* Squash warning */ | ||
441 | } | ||
442 | |||
443 | static int handle_hvc(struct kvm_vcpu *vcpu, struct kvm_run *run) | ||
444 | { | ||
445 | trace_kvm_hvc(*vcpu_pc(vcpu), *vcpu_reg(vcpu, 0), | ||
446 | vcpu->arch.hsr & HSR_HVC_IMM_MASK); | ||
447 | |||
448 | if (kvm_psci_call(vcpu)) | ||
449 | return 1; | ||
450 | |||
451 | kvm_inject_undefined(vcpu); | ||
452 | return 1; | ||
453 | } | ||
454 | |||
455 | static int handle_smc(struct kvm_vcpu *vcpu, struct kvm_run *run) | ||
456 | { | ||
457 | if (kvm_psci_call(vcpu)) | ||
458 | return 1; | ||
459 | |||
460 | kvm_inject_undefined(vcpu); | ||
461 | return 1; | ||
462 | } | ||
463 | |||
464 | static int handle_pabt_hyp(struct kvm_vcpu *vcpu, struct kvm_run *run) | ||
465 | { | ||
466 | /* The hypervisor should never cause aborts */ | ||
467 | kvm_err("Prefetch Abort taken from Hyp mode at %#08x (HSR: %#08x)\n", | ||
468 | vcpu->arch.hxfar, vcpu->arch.hsr); | ||
469 | return -EFAULT; | ||
470 | } | ||
471 | |||
472 | static int handle_dabt_hyp(struct kvm_vcpu *vcpu, struct kvm_run *run) | ||
473 | { | ||
474 | /* This is either an error in the ws. code or an external abort */ | ||
475 | kvm_err("Data Abort taken from Hyp mode at %#08x (HSR: %#08x)\n", | ||
476 | vcpu->arch.hxfar, vcpu->arch.hsr); | ||
477 | return -EFAULT; | ||
478 | } | ||
479 | |||
480 | typedef int (*exit_handle_fn)(struct kvm_vcpu *, struct kvm_run *); | ||
481 | static exit_handle_fn arm_exit_handlers[] = { | ||
482 | [HSR_EC_WFI] = kvm_handle_wfi, | ||
483 | [HSR_EC_CP15_32] = kvm_handle_cp15_32, | ||
484 | [HSR_EC_CP15_64] = kvm_handle_cp15_64, | ||
485 | [HSR_EC_CP14_MR] = kvm_handle_cp14_access, | ||
486 | [HSR_EC_CP14_LS] = kvm_handle_cp14_load_store, | ||
487 | [HSR_EC_CP14_64] = kvm_handle_cp14_access, | ||
488 | [HSR_EC_CP_0_13] = kvm_handle_cp_0_13_access, | ||
489 | [HSR_EC_CP10_ID] = kvm_handle_cp10_id, | ||
490 | [HSR_EC_SVC_HYP] = handle_svc_hyp, | ||
491 | [HSR_EC_HVC] = handle_hvc, | ||
492 | [HSR_EC_SMC] = handle_smc, | ||
493 | [HSR_EC_IABT] = kvm_handle_guest_abort, | ||
494 | [HSR_EC_IABT_HYP] = handle_pabt_hyp, | ||
495 | [HSR_EC_DABT] = kvm_handle_guest_abort, | ||
496 | [HSR_EC_DABT_HYP] = handle_dabt_hyp, | ||
497 | }; | ||
498 | |||
499 | /* | ||
500 | * A conditional instruction is allowed to trap, even though it | ||
501 | * wouldn't be executed. So let's re-implement the hardware, in | ||
502 | * software! | ||
503 | */ | ||
504 | static bool kvm_condition_valid(struct kvm_vcpu *vcpu) | ||
505 | { | ||
506 | unsigned long cpsr, cond, insn; | ||
507 | |||
508 | /* | ||
509 | * Exception Code 0 can only happen if we set HCR.TGE to 1, to | ||
510 | * catch undefined instructions, and then we won't get past | ||
511 | * the arm_exit_handlers test anyway. | ||
512 | */ | ||
513 | BUG_ON(((vcpu->arch.hsr & HSR_EC) >> HSR_EC_SHIFT) == 0); | ||
514 | |||
515 | /* Top two bits non-zero? Unconditional. */ | ||
516 | if (vcpu->arch.hsr >> 30) | ||
517 | return true; | ||
518 | |||
519 | cpsr = *vcpu_cpsr(vcpu); | ||
520 | |||
521 | /* Is condition field valid? */ | ||
522 | if ((vcpu->arch.hsr & HSR_CV) >> HSR_CV_SHIFT) | ||
523 | cond = (vcpu->arch.hsr & HSR_COND) >> HSR_COND_SHIFT; | ||
524 | else { | ||
525 | /* This can happen in Thumb mode: examine IT state. */ | ||
526 | unsigned long it; | ||
527 | |||
528 | it = ((cpsr >> 8) & 0xFC) | ((cpsr >> 25) & 0x3); | ||
529 | |||
530 | /* it == 0 => unconditional. */ | ||
531 | if (it == 0) | ||
532 | return true; | ||
533 | |||
534 | /* The cond for this insn works out as the top 4 bits. */ | ||
535 | cond = (it >> 4); | ||
536 | } | ||
537 | |||
538 | /* Shift makes it look like an ARM-mode instruction */ | ||
539 | insn = cond << 28; | ||
540 | return arm_check_condition(insn, cpsr) != ARM_OPCODE_CONDTEST_FAIL; | ||
541 | } | ||
542 | |||
543 | /* | ||
544 | * Return > 0 to return to guest, < 0 on error, 0 (and set exit_reason) on | ||
545 | * proper exit to QEMU. | ||
546 | */ | ||
547 | static int handle_exit(struct kvm_vcpu *vcpu, struct kvm_run *run, | ||
548 | int exception_index) | ||
549 | { | ||
550 | unsigned long hsr_ec; | ||
551 | |||
552 | switch (exception_index) { | ||
553 | case ARM_EXCEPTION_IRQ: | ||
554 | return 1; | ||
555 | case ARM_EXCEPTION_UNDEFINED: | ||
556 | kvm_err("Undefined exception in Hyp mode at: %#08x\n", | ||
557 | vcpu->arch.hyp_pc); | ||
558 | BUG(); | ||
559 | panic("KVM: Hypervisor undefined exception!\n"); | ||
560 | case ARM_EXCEPTION_DATA_ABORT: | ||
561 | case ARM_EXCEPTION_PREF_ABORT: | ||
562 | case ARM_EXCEPTION_HVC: | ||
563 | hsr_ec = (vcpu->arch.hsr & HSR_EC) >> HSR_EC_SHIFT; | ||
564 | |||
565 | if (hsr_ec >= ARRAY_SIZE(arm_exit_handlers) | ||
566 | || !arm_exit_handlers[hsr_ec]) { | ||
567 | kvm_err("Unkown exception class: %#08lx, " | ||
568 | "hsr: %#08x\n", hsr_ec, | ||
569 | (unsigned int)vcpu->arch.hsr); | ||
570 | BUG(); | ||
571 | } | ||
572 | |||
573 | /* | ||
574 | * See ARM ARM B1.14.1: "Hyp traps on instructions | ||
575 | * that fail their condition code check" | ||
576 | */ | ||
577 | if (!kvm_condition_valid(vcpu)) { | ||
578 | bool is_wide = vcpu->arch.hsr & HSR_IL; | ||
579 | kvm_skip_instr(vcpu, is_wide); | ||
580 | return 1; | ||
581 | } | ||
582 | |||
583 | return arm_exit_handlers[hsr_ec](vcpu, run); | ||
584 | default: | ||
585 | kvm_pr_unimpl("Unsupported exception type: %d", | ||
586 | exception_index); | ||
587 | run->exit_reason = KVM_EXIT_INTERNAL_ERROR; | ||
588 | return 0; | ||
589 | } | ||
590 | } | ||
591 | |||
592 | static int kvm_vcpu_first_run_init(struct kvm_vcpu *vcpu) | ||
593 | { | ||
594 | if (likely(vcpu->arch.has_run_once)) | ||
595 | return 0; | ||
596 | |||
597 | vcpu->arch.has_run_once = true; | ||
598 | |||
599 | /* | ||
600 | * Handle the "start in power-off" case by calling into the | ||
601 | * PSCI code. | ||
602 | */ | ||
603 | if (test_and_clear_bit(KVM_ARM_VCPU_POWER_OFF, vcpu->arch.features)) { | ||
604 | *vcpu_reg(vcpu, 0) = KVM_PSCI_FN_CPU_OFF; | ||
605 | kvm_psci_call(vcpu); | ||
606 | } | ||
607 | |||
608 | return 0; | ||
609 | } | ||
610 | |||
611 | static void vcpu_pause(struct kvm_vcpu *vcpu) | ||
612 | { | ||
613 | wait_queue_head_t *wq = kvm_arch_vcpu_wq(vcpu); | ||
614 | |||
615 | wait_event_interruptible(*wq, !vcpu->arch.pause); | ||
616 | } | ||
617 | |||
618 | /** | ||
619 | * kvm_arch_vcpu_ioctl_run - the main VCPU run function to execute guest code | ||
620 | * @vcpu: The VCPU pointer | ||
621 | * @run: The kvm_run structure pointer used for userspace state exchange | ||
622 | * | ||
623 | * This function is called through the VCPU_RUN ioctl called from user space. It | ||
624 | * will execute VM code in a loop until the time slice for the process is used | ||
625 | * or some emulation is needed from user space in which case the function will | ||
626 | * return with return value 0 and with the kvm_run structure filled in with the | ||
627 | * required data for the requested emulation. | ||
628 | */ | ||
629 | int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run) | ||
630 | { | ||
631 | int ret; | ||
632 | sigset_t sigsaved; | ||
633 | |||
634 | /* Make sure they initialize the vcpu with KVM_ARM_VCPU_INIT */ | ||
635 | if (unlikely(vcpu->arch.target < 0)) | ||
636 | return -ENOEXEC; | ||
637 | |||
638 | ret = kvm_vcpu_first_run_init(vcpu); | ||
639 | if (ret) | ||
640 | return ret; | ||
641 | |||
642 | if (run->exit_reason == KVM_EXIT_MMIO) { | ||
643 | ret = kvm_handle_mmio_return(vcpu, vcpu->run); | ||
644 | if (ret) | ||
645 | return ret; | ||
646 | } | ||
647 | |||
648 | if (vcpu->sigset_active) | ||
649 | sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved); | ||
650 | |||
651 | ret = 1; | ||
652 | run->exit_reason = KVM_EXIT_UNKNOWN; | ||
653 | while (ret > 0) { | ||
654 | /* | ||
655 | * Check conditions before entering the guest | ||
656 | */ | ||
657 | cond_resched(); | ||
658 | |||
659 | update_vttbr(vcpu->kvm); | ||
660 | |||
661 | if (vcpu->arch.pause) | ||
662 | vcpu_pause(vcpu); | ||
663 | |||
664 | local_irq_disable(); | ||
665 | |||
666 | /* | ||
667 | * Re-check atomic conditions | ||
668 | */ | ||
669 | if (signal_pending(current)) { | ||
670 | ret = -EINTR; | ||
671 | run->exit_reason = KVM_EXIT_INTR; | ||
672 | } | ||
673 | |||
674 | if (ret <= 0 || need_new_vmid_gen(vcpu->kvm)) { | ||
675 | local_irq_enable(); | ||
676 | continue; | ||
677 | } | ||
678 | |||
679 | /************************************************************** | ||
680 | * Enter the guest | ||
681 | */ | ||
682 | trace_kvm_entry(*vcpu_pc(vcpu)); | ||
683 | kvm_guest_enter(); | ||
684 | vcpu->mode = IN_GUEST_MODE; | ||
685 | |||
686 | ret = kvm_call_hyp(__kvm_vcpu_run, vcpu); | ||
687 | |||
688 | vcpu->mode = OUTSIDE_GUEST_MODE; | ||
689 | vcpu->arch.last_pcpu = smp_processor_id(); | ||
690 | kvm_guest_exit(); | ||
691 | trace_kvm_exit(*vcpu_pc(vcpu)); | ||
692 | /* | ||
693 | * We may have taken a host interrupt in HYP mode (ie | ||
694 | * while executing the guest). This interrupt is still | ||
695 | * pending, as we haven't serviced it yet! | ||
696 | * | ||
697 | * We're now back in SVC mode, with interrupts | ||
698 | * disabled. Enabling the interrupts now will have | ||
699 | * the effect of taking the interrupt again, in SVC | ||
700 | * mode this time. | ||
701 | */ | ||
702 | local_irq_enable(); | ||
703 | |||
704 | /* | ||
705 | * Back from guest | ||
706 | *************************************************************/ | ||
707 | |||
708 | ret = handle_exit(vcpu, run, ret); | ||
709 | } | ||
710 | |||
711 | if (vcpu->sigset_active) | ||
712 | sigprocmask(SIG_SETMASK, &sigsaved, NULL); | ||
713 | return ret; | ||
714 | } | ||
715 | |||
716 | static int vcpu_interrupt_line(struct kvm_vcpu *vcpu, int number, bool level) | ||
717 | { | ||
718 | int bit_index; | ||
719 | bool set; | ||
720 | unsigned long *ptr; | ||
721 | |||
722 | if (number == KVM_ARM_IRQ_CPU_IRQ) | ||
723 | bit_index = __ffs(HCR_VI); | ||
724 | else /* KVM_ARM_IRQ_CPU_FIQ */ | ||
725 | bit_index = __ffs(HCR_VF); | ||
726 | |||
727 | ptr = (unsigned long *)&vcpu->arch.irq_lines; | ||
728 | if (level) | ||
729 | set = test_and_set_bit(bit_index, ptr); | ||
730 | else | ||
731 | set = test_and_clear_bit(bit_index, ptr); | ||
732 | |||
733 | /* | ||
734 | * If we didn't change anything, no need to wake up or kick other CPUs | ||
735 | */ | ||
736 | if (set == level) | ||
737 | return 0; | ||
738 | |||
739 | /* | ||
740 | * The vcpu irq_lines field was updated, wake up sleeping VCPUs and | ||
741 | * trigger a world-switch round on the running physical CPU to set the | ||
742 | * virtual IRQ/FIQ fields in the HCR appropriately. | ||
743 | */ | ||
744 | kvm_vcpu_kick(vcpu); | ||
745 | |||
746 | return 0; | ||
747 | } | ||
748 | |||
749 | int kvm_vm_ioctl_irq_line(struct kvm *kvm, struct kvm_irq_level *irq_level) | ||
750 | { | ||
751 | u32 irq = irq_level->irq; | ||
752 | unsigned int irq_type, vcpu_idx, irq_num; | ||
753 | int nrcpus = atomic_read(&kvm->online_vcpus); | ||
754 | struct kvm_vcpu *vcpu = NULL; | ||
755 | bool level = irq_level->level; | ||
756 | |||
757 | irq_type = (irq >> KVM_ARM_IRQ_TYPE_SHIFT) & KVM_ARM_IRQ_TYPE_MASK; | ||
758 | vcpu_idx = (irq >> KVM_ARM_IRQ_VCPU_SHIFT) & KVM_ARM_IRQ_VCPU_MASK; | ||
759 | irq_num = (irq >> KVM_ARM_IRQ_NUM_SHIFT) & KVM_ARM_IRQ_NUM_MASK; | ||
760 | |||
761 | trace_kvm_irq_line(irq_type, vcpu_idx, irq_num, irq_level->level); | ||
762 | |||
763 | if (irq_type != KVM_ARM_IRQ_TYPE_CPU) | ||
764 | return -EINVAL; | ||
765 | |||
766 | if (vcpu_idx >= nrcpus) | ||
767 | return -EINVAL; | ||
768 | |||
769 | vcpu = kvm_get_vcpu(kvm, vcpu_idx); | ||
770 | if (!vcpu) | ||
771 | return -EINVAL; | ||
772 | |||
773 | if (irq_num > KVM_ARM_IRQ_CPU_FIQ) | ||
774 | return -EINVAL; | ||
775 | |||
776 | return vcpu_interrupt_line(vcpu, irq_num, level); | ||
777 | } | ||
778 | |||
779 | long kvm_arch_vcpu_ioctl(struct file *filp, | ||
780 | unsigned int ioctl, unsigned long arg) | ||
781 | { | ||
782 | struct kvm_vcpu *vcpu = filp->private_data; | ||
783 | void __user *argp = (void __user *)arg; | ||
784 | |||
785 | switch (ioctl) { | ||
786 | case KVM_ARM_VCPU_INIT: { | ||
787 | struct kvm_vcpu_init init; | ||
788 | |||
789 | if (copy_from_user(&init, argp, sizeof(init))) | ||
790 | return -EFAULT; | ||
791 | |||
792 | return kvm_vcpu_set_target(vcpu, &init); | ||
793 | |||
794 | } | ||
795 | case KVM_SET_ONE_REG: | ||
796 | case KVM_GET_ONE_REG: { | ||
797 | struct kvm_one_reg reg; | ||
798 | if (copy_from_user(®, argp, sizeof(reg))) | ||
799 | return -EFAULT; | ||
800 | if (ioctl == KVM_SET_ONE_REG) | ||
801 | return kvm_arm_set_reg(vcpu, ®); | ||
802 | else | ||
803 | return kvm_arm_get_reg(vcpu, ®); | ||
804 | } | ||
805 | case KVM_GET_REG_LIST: { | ||
806 | struct kvm_reg_list __user *user_list = argp; | ||
807 | struct kvm_reg_list reg_list; | ||
808 | unsigned n; | ||
809 | |||
810 | if (copy_from_user(®_list, user_list, sizeof(reg_list))) | ||
811 | return -EFAULT; | ||
812 | n = reg_list.n; | ||
813 | reg_list.n = kvm_arm_num_regs(vcpu); | ||
814 | if (copy_to_user(user_list, ®_list, sizeof(reg_list))) | ||
815 | return -EFAULT; | ||
816 | if (n < reg_list.n) | ||
817 | return -E2BIG; | ||
818 | return kvm_arm_copy_reg_indices(vcpu, user_list->reg); | ||
819 | } | ||
820 | default: | ||
821 | return -EINVAL; | ||
822 | } | ||
823 | } | ||
824 | |||
825 | int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log) | ||
826 | { | ||
827 | return -EINVAL; | ||
828 | } | ||
829 | |||
830 | long kvm_arch_vm_ioctl(struct file *filp, | ||
831 | unsigned int ioctl, unsigned long arg) | ||
832 | { | ||
833 | return -EINVAL; | ||
834 | } | ||
835 | |||
836 | static void cpu_init_hyp_mode(void *vector) | ||
837 | { | ||
838 | unsigned long long pgd_ptr; | ||
839 | unsigned long pgd_low, pgd_high; | ||
840 | unsigned long hyp_stack_ptr; | ||
841 | unsigned long stack_page; | ||
842 | unsigned long vector_ptr; | ||
843 | |||
844 | /* Switch from the HYP stub to our own HYP init vector */ | ||
845 | __hyp_set_vectors((unsigned long)vector); | ||
846 | |||
847 | pgd_ptr = (unsigned long long)kvm_mmu_get_httbr(); | ||
848 | pgd_low = (pgd_ptr & ((1ULL << 32) - 1)); | ||
849 | pgd_high = (pgd_ptr >> 32ULL); | ||
850 | stack_page = __get_cpu_var(kvm_arm_hyp_stack_page); | ||
851 | hyp_stack_ptr = stack_page + PAGE_SIZE; | ||
852 | vector_ptr = (unsigned long)__kvm_hyp_vector; | ||
853 | |||
854 | /* | ||
855 | * Call initialization code, and switch to the full blown | ||
856 | * HYP code. The init code doesn't need to preserve these registers as | ||
857 | * r1-r3 and r12 are already callee save according to the AAPCS. | ||
858 | * Note that we slightly misuse the prototype by casing the pgd_low to | ||
859 | * a void *. | ||
860 | */ | ||
861 | kvm_call_hyp((void *)pgd_low, pgd_high, hyp_stack_ptr, vector_ptr); | ||
862 | } | ||
863 | |||
864 | /** | ||
865 | * Inits Hyp-mode on all online CPUs | ||
866 | */ | ||
867 | static int init_hyp_mode(void) | ||
868 | { | ||
869 | phys_addr_t init_phys_addr; | ||
870 | int cpu; | ||
871 | int err = 0; | ||
872 | |||
873 | /* | ||
874 | * Allocate Hyp PGD and setup Hyp identity mapping | ||
875 | */ | ||
876 | err = kvm_mmu_init(); | ||
877 | if (err) | ||
878 | goto out_err; | ||
879 | |||
880 | /* | ||
881 | * It is probably enough to obtain the default on one | ||
882 | * CPU. It's unlikely to be different on the others. | ||
883 | */ | ||
884 | hyp_default_vectors = __hyp_get_vectors(); | ||
885 | |||
886 | /* | ||
887 | * Allocate stack pages for Hypervisor-mode | ||
888 | */ | ||
889 | for_each_possible_cpu(cpu) { | ||
890 | unsigned long stack_page; | ||
891 | |||
892 | stack_page = __get_free_page(GFP_KERNEL); | ||
893 | if (!stack_page) { | ||
894 | err = -ENOMEM; | ||
895 | goto out_free_stack_pages; | ||
896 | } | ||
897 | |||
898 | per_cpu(kvm_arm_hyp_stack_page, cpu) = stack_page; | ||
899 | } | ||
900 | |||
901 | /* | ||
902 | * Execute the init code on each CPU. | ||
903 | * | ||
904 | * Note: The stack is not mapped yet, so don't do anything else than | ||
905 | * initializing the hypervisor mode on each CPU using a local stack | ||
906 | * space for temporary storage. | ||
907 | */ | ||
908 | init_phys_addr = virt_to_phys(__kvm_hyp_init); | ||
909 | for_each_online_cpu(cpu) { | ||
910 | smp_call_function_single(cpu, cpu_init_hyp_mode, | ||
911 | (void *)(long)init_phys_addr, 1); | ||
912 | } | ||
913 | |||
914 | /* | ||
915 | * Unmap the identity mapping | ||
916 | */ | ||
917 | kvm_clear_hyp_idmap(); | ||
918 | |||
919 | /* | ||
920 | * Map the Hyp-code called directly from the host | ||
921 | */ | ||
922 | err = create_hyp_mappings(__kvm_hyp_code_start, __kvm_hyp_code_end); | ||
923 | if (err) { | ||
924 | kvm_err("Cannot map world-switch code\n"); | ||
925 | goto out_free_mappings; | ||
926 | } | ||
927 | |||
928 | /* | ||
929 | * Map the Hyp stack pages | ||
930 | */ | ||
931 | for_each_possible_cpu(cpu) { | ||
932 | char *stack_page = (char *)per_cpu(kvm_arm_hyp_stack_page, cpu); | ||
933 | err = create_hyp_mappings(stack_page, stack_page + PAGE_SIZE); | ||
934 | |||
935 | if (err) { | ||
936 | kvm_err("Cannot map hyp stack\n"); | ||
937 | goto out_free_mappings; | ||
938 | } | ||
939 | } | ||
940 | |||
941 | /* | ||
942 | * Map the host VFP structures | ||
943 | */ | ||
944 | kvm_host_vfp_state = alloc_percpu(struct vfp_hard_struct); | ||
945 | if (!kvm_host_vfp_state) { | ||
946 | err = -ENOMEM; | ||
947 | kvm_err("Cannot allocate host VFP state\n"); | ||
948 | goto out_free_mappings; | ||
949 | } | ||
950 | |||
951 | for_each_possible_cpu(cpu) { | ||
952 | struct vfp_hard_struct *vfp; | ||
953 | |||
954 | vfp = per_cpu_ptr(kvm_host_vfp_state, cpu); | ||
955 | err = create_hyp_mappings(vfp, vfp + 1); | ||
956 | |||
957 | if (err) { | ||
958 | kvm_err("Cannot map host VFP state: %d\n", err); | ||
959 | goto out_free_vfp; | ||
960 | } | ||
961 | } | ||
962 | |||
963 | kvm_info("Hyp mode initialized successfully\n"); | ||
964 | return 0; | ||
965 | out_free_vfp: | ||
966 | free_percpu(kvm_host_vfp_state); | ||
967 | out_free_mappings: | ||
968 | free_hyp_pmds(); | ||
969 | out_free_stack_pages: | ||
970 | for_each_possible_cpu(cpu) | ||
971 | free_page(per_cpu(kvm_arm_hyp_stack_page, cpu)); | ||
972 | out_err: | ||
973 | kvm_err("error initializing Hyp mode: %d\n", err); | ||
974 | return err; | ||
975 | } | ||
976 | |||
977 | /** | ||
978 | * Initialize Hyp-mode and memory mappings on all CPUs. | ||
979 | */ | ||
980 | int kvm_arch_init(void *opaque) | ||
981 | { | ||
982 | int err; | ||
983 | |||
984 | if (!is_hyp_mode_available()) { | ||
985 | kvm_err("HYP mode not available\n"); | ||
986 | return -ENODEV; | ||
987 | } | ||
988 | |||
989 | if (kvm_target_cpu() < 0) { | ||
990 | kvm_err("Target CPU not supported!\n"); | ||
991 | return -ENODEV; | ||
992 | } | ||
993 | |||
994 | err = init_hyp_mode(); | ||
995 | if (err) | ||
996 | goto out_err; | ||
997 | |||
998 | kvm_coproc_table_init(); | ||
999 | return 0; | ||
1000 | out_err: | ||
1001 | return err; | ||
1002 | } | ||
1003 | |||
1004 | /* NOP: Compiling as a module not supported */ | ||
1005 | void kvm_arch_exit(void) | ||
1006 | { | ||
1007 | } | ||
1008 | |||
1009 | static int arm_init(void) | ||
1010 | { | ||
1011 | int rc = kvm_init(NULL, sizeof(struct kvm_vcpu), 0, THIS_MODULE); | ||
1012 | return rc; | ||
1013 | } | ||
1014 | |||
1015 | module_init(arm_init); | ||
diff --git a/arch/arm/kvm/coproc.c b/arch/arm/kvm/coproc.c new file mode 100644 index 000000000000..d782638c7ec0 --- /dev/null +++ b/arch/arm/kvm/coproc.c | |||
@@ -0,0 +1,1046 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2012 - Virtual Open Systems and Columbia University | ||
3 | * Authors: Rusty Russell <rusty@rustcorp.com.au> | ||
4 | * Christoffer Dall <c.dall@virtualopensystems.com> | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or modify | ||
7 | * it under the terms of the GNU General Public License, version 2, as | ||
8 | * published by the Free Software Foundation. | ||
9 | * | ||
10 | * This program is distributed in the hope that it will be useful, | ||
11 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
12 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
13 | * GNU General Public License for more details. | ||
14 | * | ||
15 | * You should have received a copy of the GNU General Public License | ||
16 | * along with this program; if not, write to the Free Software | ||
17 | * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. | ||
18 | */ | ||
19 | #include <linux/mm.h> | ||
20 | #include <linux/kvm_host.h> | ||
21 | #include <linux/uaccess.h> | ||
22 | #include <asm/kvm_arm.h> | ||
23 | #include <asm/kvm_host.h> | ||
24 | #include <asm/kvm_emulate.h> | ||
25 | #include <asm/kvm_coproc.h> | ||
26 | #include <asm/cacheflush.h> | ||
27 | #include <asm/cputype.h> | ||
28 | #include <trace/events/kvm.h> | ||
29 | #include <asm/vfp.h> | ||
30 | #include "../vfp/vfpinstr.h" | ||
31 | |||
32 | #include "trace.h" | ||
33 | #include "coproc.h" | ||
34 | |||
35 | |||
36 | /****************************************************************************** | ||
37 | * Co-processor emulation | ||
38 | *****************************************************************************/ | ||
39 | |||
40 | /* 3 bits per cache level, as per CLIDR, but non-existent caches always 0 */ | ||
41 | static u32 cache_levels; | ||
42 | |||
43 | /* CSSELR values; used to index KVM_REG_ARM_DEMUX_ID_CCSIDR */ | ||
44 | #define CSSELR_MAX 12 | ||
45 | |||
46 | int kvm_handle_cp10_id(struct kvm_vcpu *vcpu, struct kvm_run *run) | ||
47 | { | ||
48 | kvm_inject_undefined(vcpu); | ||
49 | return 1; | ||
50 | } | ||
51 | |||
52 | int kvm_handle_cp_0_13_access(struct kvm_vcpu *vcpu, struct kvm_run *run) | ||
53 | { | ||
54 | /* | ||
55 | * We can get here, if the host has been built without VFPv3 support, | ||
56 | * but the guest attempted a floating point operation. | ||
57 | */ | ||
58 | kvm_inject_undefined(vcpu); | ||
59 | return 1; | ||
60 | } | ||
61 | |||
62 | int kvm_handle_cp14_load_store(struct kvm_vcpu *vcpu, struct kvm_run *run) | ||
63 | { | ||
64 | kvm_inject_undefined(vcpu); | ||
65 | return 1; | ||
66 | } | ||
67 | |||
68 | int kvm_handle_cp14_access(struct kvm_vcpu *vcpu, struct kvm_run *run) | ||
69 | { | ||
70 | kvm_inject_undefined(vcpu); | ||
71 | return 1; | ||
72 | } | ||
73 | |||
74 | /* See note at ARM ARM B1.14.4 */ | ||
75 | static bool access_dcsw(struct kvm_vcpu *vcpu, | ||
76 | const struct coproc_params *p, | ||
77 | const struct coproc_reg *r) | ||
78 | { | ||
79 | u32 val; | ||
80 | int cpu; | ||
81 | |||
82 | cpu = get_cpu(); | ||
83 | |||
84 | if (!p->is_write) | ||
85 | return read_from_write_only(vcpu, p); | ||
86 | |||
87 | cpumask_setall(&vcpu->arch.require_dcache_flush); | ||
88 | cpumask_clear_cpu(cpu, &vcpu->arch.require_dcache_flush); | ||
89 | |||
90 | /* If we were already preempted, take the long way around */ | ||
91 | if (cpu != vcpu->arch.last_pcpu) { | ||
92 | flush_cache_all(); | ||
93 | goto done; | ||
94 | } | ||
95 | |||
96 | val = *vcpu_reg(vcpu, p->Rt1); | ||
97 | |||
98 | switch (p->CRm) { | ||
99 | case 6: /* Upgrade DCISW to DCCISW, as per HCR.SWIO */ | ||
100 | case 14: /* DCCISW */ | ||
101 | asm volatile("mcr p15, 0, %0, c7, c14, 2" : : "r" (val)); | ||
102 | break; | ||
103 | |||
104 | case 10: /* DCCSW */ | ||
105 | asm volatile("mcr p15, 0, %0, c7, c10, 2" : : "r" (val)); | ||
106 | break; | ||
107 | } | ||
108 | |||
109 | done: | ||
110 | put_cpu(); | ||
111 | |||
112 | return true; | ||
113 | } | ||
114 | |||
115 | /* | ||
116 | * We could trap ID_DFR0 and tell the guest we don't support performance | ||
117 | * monitoring. Unfortunately the patch to make the kernel check ID_DFR0 was | ||
118 | * NAKed, so it will read the PMCR anyway. | ||
119 | * | ||
120 | * Therefore we tell the guest we have 0 counters. Unfortunately, we | ||
121 | * must always support PMCCNTR (the cycle counter): we just RAZ/WI for | ||
122 | * all PM registers, which doesn't crash the guest kernel at least. | ||
123 | */ | ||
124 | static bool pm_fake(struct kvm_vcpu *vcpu, | ||
125 | const struct coproc_params *p, | ||
126 | const struct coproc_reg *r) | ||
127 | { | ||
128 | if (p->is_write) | ||
129 | return ignore_write(vcpu, p); | ||
130 | else | ||
131 | return read_zero(vcpu, p); | ||
132 | } | ||
133 | |||
134 | #define access_pmcr pm_fake | ||
135 | #define access_pmcntenset pm_fake | ||
136 | #define access_pmcntenclr pm_fake | ||
137 | #define access_pmovsr pm_fake | ||
138 | #define access_pmselr pm_fake | ||
139 | #define access_pmceid0 pm_fake | ||
140 | #define access_pmceid1 pm_fake | ||
141 | #define access_pmccntr pm_fake | ||
142 | #define access_pmxevtyper pm_fake | ||
143 | #define access_pmxevcntr pm_fake | ||
144 | #define access_pmuserenr pm_fake | ||
145 | #define access_pmintenset pm_fake | ||
146 | #define access_pmintenclr pm_fake | ||
147 | |||
148 | /* Architected CP15 registers. | ||
149 | * Important: Must be sorted ascending by CRn, CRM, Op1, Op2 | ||
150 | */ | ||
151 | static const struct coproc_reg cp15_regs[] = { | ||
152 | /* CSSELR: swapped by interrupt.S. */ | ||
153 | { CRn( 0), CRm( 0), Op1( 2), Op2( 0), is32, | ||
154 | NULL, reset_unknown, c0_CSSELR }, | ||
155 | |||
156 | /* TTBR0/TTBR1: swapped by interrupt.S. */ | ||
157 | { CRm( 2), Op1( 0), is64, NULL, reset_unknown64, c2_TTBR0 }, | ||
158 | { CRm( 2), Op1( 1), is64, NULL, reset_unknown64, c2_TTBR1 }, | ||
159 | |||
160 | /* TTBCR: swapped by interrupt.S. */ | ||
161 | { CRn( 2), CRm( 0), Op1( 0), Op2( 2), is32, | ||
162 | NULL, reset_val, c2_TTBCR, 0x00000000 }, | ||
163 | |||
164 | /* DACR: swapped by interrupt.S. */ | ||
165 | { CRn( 3), CRm( 0), Op1( 0), Op2( 0), is32, | ||
166 | NULL, reset_unknown, c3_DACR }, | ||
167 | |||
168 | /* DFSR/IFSR/ADFSR/AIFSR: swapped by interrupt.S. */ | ||
169 | { CRn( 5), CRm( 0), Op1( 0), Op2( 0), is32, | ||
170 | NULL, reset_unknown, c5_DFSR }, | ||
171 | { CRn( 5), CRm( 0), Op1( 0), Op2( 1), is32, | ||
172 | NULL, reset_unknown, c5_IFSR }, | ||
173 | { CRn( 5), CRm( 1), Op1( 0), Op2( 0), is32, | ||
174 | NULL, reset_unknown, c5_ADFSR }, | ||
175 | { CRn( 5), CRm( 1), Op1( 0), Op2( 1), is32, | ||
176 | NULL, reset_unknown, c5_AIFSR }, | ||
177 | |||
178 | /* DFAR/IFAR: swapped by interrupt.S. */ | ||
179 | { CRn( 6), CRm( 0), Op1( 0), Op2( 0), is32, | ||
180 | NULL, reset_unknown, c6_DFAR }, | ||
181 | { CRn( 6), CRm( 0), Op1( 0), Op2( 2), is32, | ||
182 | NULL, reset_unknown, c6_IFAR }, | ||
183 | /* | ||
184 | * DC{C,I,CI}SW operations: | ||
185 | */ | ||
186 | { CRn( 7), CRm( 6), Op1( 0), Op2( 2), is32, access_dcsw}, | ||
187 | { CRn( 7), CRm(10), Op1( 0), Op2( 2), is32, access_dcsw}, | ||
188 | { CRn( 7), CRm(14), Op1( 0), Op2( 2), is32, access_dcsw}, | ||
189 | /* | ||
190 | * Dummy performance monitor implementation. | ||
191 | */ | ||
192 | { CRn( 9), CRm(12), Op1( 0), Op2( 0), is32, access_pmcr}, | ||
193 | { CRn( 9), CRm(12), Op1( 0), Op2( 1), is32, access_pmcntenset}, | ||
194 | { CRn( 9), CRm(12), Op1( 0), Op2( 2), is32, access_pmcntenclr}, | ||
195 | { CRn( 9), CRm(12), Op1( 0), Op2( 3), is32, access_pmovsr}, | ||
196 | { CRn( 9), CRm(12), Op1( 0), Op2( 5), is32, access_pmselr}, | ||
197 | { CRn( 9), CRm(12), Op1( 0), Op2( 6), is32, access_pmceid0}, | ||
198 | { CRn( 9), CRm(12), Op1( 0), Op2( 7), is32, access_pmceid1}, | ||
199 | { CRn( 9), CRm(13), Op1( 0), Op2( 0), is32, access_pmccntr}, | ||
200 | { CRn( 9), CRm(13), Op1( 0), Op2( 1), is32, access_pmxevtyper}, | ||
201 | { CRn( 9), CRm(13), Op1( 0), Op2( 2), is32, access_pmxevcntr}, | ||
202 | { CRn( 9), CRm(14), Op1( 0), Op2( 0), is32, access_pmuserenr}, | ||
203 | { CRn( 9), CRm(14), Op1( 0), Op2( 1), is32, access_pmintenset}, | ||
204 | { CRn( 9), CRm(14), Op1( 0), Op2( 2), is32, access_pmintenclr}, | ||
205 | |||
206 | /* PRRR/NMRR (aka MAIR0/MAIR1): swapped by interrupt.S. */ | ||
207 | { CRn(10), CRm( 2), Op1( 0), Op2( 0), is32, | ||
208 | NULL, reset_unknown, c10_PRRR}, | ||
209 | { CRn(10), CRm( 2), Op1( 0), Op2( 1), is32, | ||
210 | NULL, reset_unknown, c10_NMRR}, | ||
211 | |||
212 | /* VBAR: swapped by interrupt.S. */ | ||
213 | { CRn(12), CRm( 0), Op1( 0), Op2( 0), is32, | ||
214 | NULL, reset_val, c12_VBAR, 0x00000000 }, | ||
215 | |||
216 | /* CONTEXTIDR/TPIDRURW/TPIDRURO/TPIDRPRW: swapped by interrupt.S. */ | ||
217 | { CRn(13), CRm( 0), Op1( 0), Op2( 1), is32, | ||
218 | NULL, reset_val, c13_CID, 0x00000000 }, | ||
219 | { CRn(13), CRm( 0), Op1( 0), Op2( 2), is32, | ||
220 | NULL, reset_unknown, c13_TID_URW }, | ||
221 | { CRn(13), CRm( 0), Op1( 0), Op2( 3), is32, | ||
222 | NULL, reset_unknown, c13_TID_URO }, | ||
223 | { CRn(13), CRm( 0), Op1( 0), Op2( 4), is32, | ||
224 | NULL, reset_unknown, c13_TID_PRIV }, | ||
225 | }; | ||
226 | |||
227 | /* Target specific emulation tables */ | ||
228 | static struct kvm_coproc_target_table *target_tables[KVM_ARM_NUM_TARGETS]; | ||
229 | |||
230 | void kvm_register_target_coproc_table(struct kvm_coproc_target_table *table) | ||
231 | { | ||
232 | target_tables[table->target] = table; | ||
233 | } | ||
234 | |||
235 | /* Get specific register table for this target. */ | ||
236 | static const struct coproc_reg *get_target_table(unsigned target, size_t *num) | ||
237 | { | ||
238 | struct kvm_coproc_target_table *table; | ||
239 | |||
240 | table = target_tables[target]; | ||
241 | *num = table->num; | ||
242 | return table->table; | ||
243 | } | ||
244 | |||
245 | static const struct coproc_reg *find_reg(const struct coproc_params *params, | ||
246 | const struct coproc_reg table[], | ||
247 | unsigned int num) | ||
248 | { | ||
249 | unsigned int i; | ||
250 | |||
251 | for (i = 0; i < num; i++) { | ||
252 | const struct coproc_reg *r = &table[i]; | ||
253 | |||
254 | if (params->is_64bit != r->is_64) | ||
255 | continue; | ||
256 | if (params->CRn != r->CRn) | ||
257 | continue; | ||
258 | if (params->CRm != r->CRm) | ||
259 | continue; | ||
260 | if (params->Op1 != r->Op1) | ||
261 | continue; | ||
262 | if (params->Op2 != r->Op2) | ||
263 | continue; | ||
264 | |||
265 | return r; | ||
266 | } | ||
267 | return NULL; | ||
268 | } | ||
269 | |||
270 | static int emulate_cp15(struct kvm_vcpu *vcpu, | ||
271 | const struct coproc_params *params) | ||
272 | { | ||
273 | size_t num; | ||
274 | const struct coproc_reg *table, *r; | ||
275 | |||
276 | trace_kvm_emulate_cp15_imp(params->Op1, params->Rt1, params->CRn, | ||
277 | params->CRm, params->Op2, params->is_write); | ||
278 | |||
279 | table = get_target_table(vcpu->arch.target, &num); | ||
280 | |||
281 | /* Search target-specific then generic table. */ | ||
282 | r = find_reg(params, table, num); | ||
283 | if (!r) | ||
284 | r = find_reg(params, cp15_regs, ARRAY_SIZE(cp15_regs)); | ||
285 | |||
286 | if (likely(r)) { | ||
287 | /* If we don't have an accessor, we should never get here! */ | ||
288 | BUG_ON(!r->access); | ||
289 | |||
290 | if (likely(r->access(vcpu, params, r))) { | ||
291 | /* Skip instruction, since it was emulated */ | ||
292 | kvm_skip_instr(vcpu, (vcpu->arch.hsr >> 25) & 1); | ||
293 | return 1; | ||
294 | } | ||
295 | /* If access function fails, it should complain. */ | ||
296 | } else { | ||
297 | kvm_err("Unsupported guest CP15 access at: %08x\n", | ||
298 | *vcpu_pc(vcpu)); | ||
299 | print_cp_instr(params); | ||
300 | } | ||
301 | kvm_inject_undefined(vcpu); | ||
302 | return 1; | ||
303 | } | ||
304 | |||
305 | /** | ||
306 | * kvm_handle_cp15_64 -- handles a mrrc/mcrr trap on a guest CP15 access | ||
307 | * @vcpu: The VCPU pointer | ||
308 | * @run: The kvm_run struct | ||
309 | */ | ||
310 | int kvm_handle_cp15_64(struct kvm_vcpu *vcpu, struct kvm_run *run) | ||
311 | { | ||
312 | struct coproc_params params; | ||
313 | |||
314 | params.CRm = (vcpu->arch.hsr >> 1) & 0xf; | ||
315 | params.Rt1 = (vcpu->arch.hsr >> 5) & 0xf; | ||
316 | params.is_write = ((vcpu->arch.hsr & 1) == 0); | ||
317 | params.is_64bit = true; | ||
318 | |||
319 | params.Op1 = (vcpu->arch.hsr >> 16) & 0xf; | ||
320 | params.Op2 = 0; | ||
321 | params.Rt2 = (vcpu->arch.hsr >> 10) & 0xf; | ||
322 | params.CRn = 0; | ||
323 | |||
324 | return emulate_cp15(vcpu, ¶ms); | ||
325 | } | ||
326 | |||
327 | static void reset_coproc_regs(struct kvm_vcpu *vcpu, | ||
328 | const struct coproc_reg *table, size_t num) | ||
329 | { | ||
330 | unsigned long i; | ||
331 | |||
332 | for (i = 0; i < num; i++) | ||
333 | if (table[i].reset) | ||
334 | table[i].reset(vcpu, &table[i]); | ||
335 | } | ||
336 | |||
337 | /** | ||
338 | * kvm_handle_cp15_32 -- handles a mrc/mcr trap on a guest CP15 access | ||
339 | * @vcpu: The VCPU pointer | ||
340 | * @run: The kvm_run struct | ||
341 | */ | ||
342 | int kvm_handle_cp15_32(struct kvm_vcpu *vcpu, struct kvm_run *run) | ||
343 | { | ||
344 | struct coproc_params params; | ||
345 | |||
346 | params.CRm = (vcpu->arch.hsr >> 1) & 0xf; | ||
347 | params.Rt1 = (vcpu->arch.hsr >> 5) & 0xf; | ||
348 | params.is_write = ((vcpu->arch.hsr & 1) == 0); | ||
349 | params.is_64bit = false; | ||
350 | |||
351 | params.CRn = (vcpu->arch.hsr >> 10) & 0xf; | ||
352 | params.Op1 = (vcpu->arch.hsr >> 14) & 0x7; | ||
353 | params.Op2 = (vcpu->arch.hsr >> 17) & 0x7; | ||
354 | params.Rt2 = 0; | ||
355 | |||
356 | return emulate_cp15(vcpu, ¶ms); | ||
357 | } | ||
358 | |||
359 | /****************************************************************************** | ||
360 | * Userspace API | ||
361 | *****************************************************************************/ | ||
362 | |||
363 | static bool index_to_params(u64 id, struct coproc_params *params) | ||
364 | { | ||
365 | switch (id & KVM_REG_SIZE_MASK) { | ||
366 | case KVM_REG_SIZE_U32: | ||
367 | /* Any unused index bits means it's not valid. */ | ||
368 | if (id & ~(KVM_REG_ARCH_MASK | KVM_REG_SIZE_MASK | ||
369 | | KVM_REG_ARM_COPROC_MASK | ||
370 | | KVM_REG_ARM_32_CRN_MASK | ||
371 | | KVM_REG_ARM_CRM_MASK | ||
372 | | KVM_REG_ARM_OPC1_MASK | ||
373 | | KVM_REG_ARM_32_OPC2_MASK)) | ||
374 | return false; | ||
375 | |||
376 | params->is_64bit = false; | ||
377 | params->CRn = ((id & KVM_REG_ARM_32_CRN_MASK) | ||
378 | >> KVM_REG_ARM_32_CRN_SHIFT); | ||
379 | params->CRm = ((id & KVM_REG_ARM_CRM_MASK) | ||
380 | >> KVM_REG_ARM_CRM_SHIFT); | ||
381 | params->Op1 = ((id & KVM_REG_ARM_OPC1_MASK) | ||
382 | >> KVM_REG_ARM_OPC1_SHIFT); | ||
383 | params->Op2 = ((id & KVM_REG_ARM_32_OPC2_MASK) | ||
384 | >> KVM_REG_ARM_32_OPC2_SHIFT); | ||
385 | return true; | ||
386 | case KVM_REG_SIZE_U64: | ||
387 | /* Any unused index bits means it's not valid. */ | ||
388 | if (id & ~(KVM_REG_ARCH_MASK | KVM_REG_SIZE_MASK | ||
389 | | KVM_REG_ARM_COPROC_MASK | ||
390 | | KVM_REG_ARM_CRM_MASK | ||
391 | | KVM_REG_ARM_OPC1_MASK)) | ||
392 | return false; | ||
393 | params->is_64bit = true; | ||
394 | params->CRm = ((id & KVM_REG_ARM_CRM_MASK) | ||
395 | >> KVM_REG_ARM_CRM_SHIFT); | ||
396 | params->Op1 = ((id & KVM_REG_ARM_OPC1_MASK) | ||
397 | >> KVM_REG_ARM_OPC1_SHIFT); | ||
398 | params->Op2 = 0; | ||
399 | params->CRn = 0; | ||
400 | return true; | ||
401 | default: | ||
402 | return false; | ||
403 | } | ||
404 | } | ||
405 | |||
406 | /* Decode an index value, and find the cp15 coproc_reg entry. */ | ||
407 | static const struct coproc_reg *index_to_coproc_reg(struct kvm_vcpu *vcpu, | ||
408 | u64 id) | ||
409 | { | ||
410 | size_t num; | ||
411 | const struct coproc_reg *table, *r; | ||
412 | struct coproc_params params; | ||
413 | |||
414 | /* We only do cp15 for now. */ | ||
415 | if ((id & KVM_REG_ARM_COPROC_MASK) >> KVM_REG_ARM_COPROC_SHIFT != 15) | ||
416 | return NULL; | ||
417 | |||
418 | if (!index_to_params(id, ¶ms)) | ||
419 | return NULL; | ||
420 | |||
421 | table = get_target_table(vcpu->arch.target, &num); | ||
422 | r = find_reg(¶ms, table, num); | ||
423 | if (!r) | ||
424 | r = find_reg(¶ms, cp15_regs, ARRAY_SIZE(cp15_regs)); | ||
425 | |||
426 | /* Not saved in the cp15 array? */ | ||
427 | if (r && !r->reg) | ||
428 | r = NULL; | ||
429 | |||
430 | return r; | ||
431 | } | ||
432 | |||
433 | /* | ||
434 | * These are the invariant cp15 registers: we let the guest see the host | ||
435 | * versions of these, so they're part of the guest state. | ||
436 | * | ||
437 | * A future CPU may provide a mechanism to present different values to | ||
438 | * the guest, or a future kvm may trap them. | ||
439 | */ | ||
440 | /* Unfortunately, there's no register-argument for mrc, so generate. */ | ||
441 | #define FUNCTION_FOR32(crn, crm, op1, op2, name) \ | ||
442 | static void get_##name(struct kvm_vcpu *v, \ | ||
443 | const struct coproc_reg *r) \ | ||
444 | { \ | ||
445 | u32 val; \ | ||
446 | \ | ||
447 | asm volatile("mrc p15, " __stringify(op1) \ | ||
448 | ", %0, c" __stringify(crn) \ | ||
449 | ", c" __stringify(crm) \ | ||
450 | ", " __stringify(op2) "\n" : "=r" (val)); \ | ||
451 | ((struct coproc_reg *)r)->val = val; \ | ||
452 | } | ||
453 | |||
454 | FUNCTION_FOR32(0, 0, 0, 0, MIDR) | ||
455 | FUNCTION_FOR32(0, 0, 0, 1, CTR) | ||
456 | FUNCTION_FOR32(0, 0, 0, 2, TCMTR) | ||
457 | FUNCTION_FOR32(0, 0, 0, 3, TLBTR) | ||
458 | FUNCTION_FOR32(0, 0, 0, 6, REVIDR) | ||
459 | FUNCTION_FOR32(0, 1, 0, 0, ID_PFR0) | ||
460 | FUNCTION_FOR32(0, 1, 0, 1, ID_PFR1) | ||
461 | FUNCTION_FOR32(0, 1, 0, 2, ID_DFR0) | ||
462 | FUNCTION_FOR32(0, 1, 0, 3, ID_AFR0) | ||
463 | FUNCTION_FOR32(0, 1, 0, 4, ID_MMFR0) | ||
464 | FUNCTION_FOR32(0, 1, 0, 5, ID_MMFR1) | ||
465 | FUNCTION_FOR32(0, 1, 0, 6, ID_MMFR2) | ||
466 | FUNCTION_FOR32(0, 1, 0, 7, ID_MMFR3) | ||
467 | FUNCTION_FOR32(0, 2, 0, 0, ID_ISAR0) | ||
468 | FUNCTION_FOR32(0, 2, 0, 1, ID_ISAR1) | ||
469 | FUNCTION_FOR32(0, 2, 0, 2, ID_ISAR2) | ||
470 | FUNCTION_FOR32(0, 2, 0, 3, ID_ISAR3) | ||
471 | FUNCTION_FOR32(0, 2, 0, 4, ID_ISAR4) | ||
472 | FUNCTION_FOR32(0, 2, 0, 5, ID_ISAR5) | ||
473 | FUNCTION_FOR32(0, 0, 1, 1, CLIDR) | ||
474 | FUNCTION_FOR32(0, 0, 1, 7, AIDR) | ||
475 | |||
476 | /* ->val is filled in by kvm_invariant_coproc_table_init() */ | ||
477 | static struct coproc_reg invariant_cp15[] = { | ||
478 | { CRn( 0), CRm( 0), Op1( 0), Op2( 0), is32, NULL, get_MIDR }, | ||
479 | { CRn( 0), CRm( 0), Op1( 0), Op2( 1), is32, NULL, get_CTR }, | ||
480 | { CRn( 0), CRm( 0), Op1( 0), Op2( 2), is32, NULL, get_TCMTR }, | ||
481 | { CRn( 0), CRm( 0), Op1( 0), Op2( 3), is32, NULL, get_TLBTR }, | ||
482 | { CRn( 0), CRm( 0), Op1( 0), Op2( 6), is32, NULL, get_REVIDR }, | ||
483 | |||
484 | { CRn( 0), CRm( 1), Op1( 0), Op2( 0), is32, NULL, get_ID_PFR0 }, | ||
485 | { CRn( 0), CRm( 1), Op1( 0), Op2( 1), is32, NULL, get_ID_PFR1 }, | ||
486 | { CRn( 0), CRm( 1), Op1( 0), Op2( 2), is32, NULL, get_ID_DFR0 }, | ||
487 | { CRn( 0), CRm( 1), Op1( 0), Op2( 3), is32, NULL, get_ID_AFR0 }, | ||
488 | { CRn( 0), CRm( 1), Op1( 0), Op2( 4), is32, NULL, get_ID_MMFR0 }, | ||
489 | { CRn( 0), CRm( 1), Op1( 0), Op2( 5), is32, NULL, get_ID_MMFR1 }, | ||
490 | { CRn( 0), CRm( 1), Op1( 0), Op2( 6), is32, NULL, get_ID_MMFR2 }, | ||
491 | { CRn( 0), CRm( 1), Op1( 0), Op2( 7), is32, NULL, get_ID_MMFR3 }, | ||
492 | |||
493 | { CRn( 0), CRm( 2), Op1( 0), Op2( 0), is32, NULL, get_ID_ISAR0 }, | ||
494 | { CRn( 0), CRm( 2), Op1( 0), Op2( 1), is32, NULL, get_ID_ISAR1 }, | ||
495 | { CRn( 0), CRm( 2), Op1( 0), Op2( 2), is32, NULL, get_ID_ISAR2 }, | ||
496 | { CRn( 0), CRm( 2), Op1( 0), Op2( 3), is32, NULL, get_ID_ISAR3 }, | ||
497 | { CRn( 0), CRm( 2), Op1( 0), Op2( 4), is32, NULL, get_ID_ISAR4 }, | ||
498 | { CRn( 0), CRm( 2), Op1( 0), Op2( 5), is32, NULL, get_ID_ISAR5 }, | ||
499 | |||
500 | { CRn( 0), CRm( 0), Op1( 1), Op2( 1), is32, NULL, get_CLIDR }, | ||
501 | { CRn( 0), CRm( 0), Op1( 1), Op2( 7), is32, NULL, get_AIDR }, | ||
502 | }; | ||
503 | |||
504 | static int reg_from_user(void *val, const void __user *uaddr, u64 id) | ||
505 | { | ||
506 | /* This Just Works because we are little endian. */ | ||
507 | if (copy_from_user(val, uaddr, KVM_REG_SIZE(id)) != 0) | ||
508 | return -EFAULT; | ||
509 | return 0; | ||
510 | } | ||
511 | |||
512 | static int reg_to_user(void __user *uaddr, const void *val, u64 id) | ||
513 | { | ||
514 | /* This Just Works because we are little endian. */ | ||
515 | if (copy_to_user(uaddr, val, KVM_REG_SIZE(id)) != 0) | ||
516 | return -EFAULT; | ||
517 | return 0; | ||
518 | } | ||
519 | |||
520 | static int get_invariant_cp15(u64 id, void __user *uaddr) | ||
521 | { | ||
522 | struct coproc_params params; | ||
523 | const struct coproc_reg *r; | ||
524 | |||
525 | if (!index_to_params(id, ¶ms)) | ||
526 | return -ENOENT; | ||
527 | |||
528 | r = find_reg(¶ms, invariant_cp15, ARRAY_SIZE(invariant_cp15)); | ||
529 | if (!r) | ||
530 | return -ENOENT; | ||
531 | |||
532 | return reg_to_user(uaddr, &r->val, id); | ||
533 | } | ||
534 | |||
535 | static int set_invariant_cp15(u64 id, void __user *uaddr) | ||
536 | { | ||
537 | struct coproc_params params; | ||
538 | const struct coproc_reg *r; | ||
539 | int err; | ||
540 | u64 val = 0; /* Make sure high bits are 0 for 32-bit regs */ | ||
541 | |||
542 | if (!index_to_params(id, ¶ms)) | ||
543 | return -ENOENT; | ||
544 | r = find_reg(¶ms, invariant_cp15, ARRAY_SIZE(invariant_cp15)); | ||
545 | if (!r) | ||
546 | return -ENOENT; | ||
547 | |||
548 | err = reg_from_user(&val, uaddr, id); | ||
549 | if (err) | ||
550 | return err; | ||
551 | |||
552 | /* This is what we mean by invariant: you can't change it. */ | ||
553 | if (r->val != val) | ||
554 | return -EINVAL; | ||
555 | |||
556 | return 0; | ||
557 | } | ||
558 | |||
559 | static bool is_valid_cache(u32 val) | ||
560 | { | ||
561 | u32 level, ctype; | ||
562 | |||
563 | if (val >= CSSELR_MAX) | ||
564 | return -ENOENT; | ||
565 | |||
566 | /* Bottom bit is Instruction or Data bit. Next 3 bits are level. */ | ||
567 | level = (val >> 1); | ||
568 | ctype = (cache_levels >> (level * 3)) & 7; | ||
569 | |||
570 | switch (ctype) { | ||
571 | case 0: /* No cache */ | ||
572 | return false; | ||
573 | case 1: /* Instruction cache only */ | ||
574 | return (val & 1); | ||
575 | case 2: /* Data cache only */ | ||
576 | case 4: /* Unified cache */ | ||
577 | return !(val & 1); | ||
578 | case 3: /* Separate instruction and data caches */ | ||
579 | return true; | ||
580 | default: /* Reserved: we can't know instruction or data. */ | ||
581 | return false; | ||
582 | } | ||
583 | } | ||
584 | |||
585 | /* Which cache CCSIDR represents depends on CSSELR value. */ | ||
586 | static u32 get_ccsidr(u32 csselr) | ||
587 | { | ||
588 | u32 ccsidr; | ||
589 | |||
590 | /* Make sure noone else changes CSSELR during this! */ | ||
591 | local_irq_disable(); | ||
592 | /* Put value into CSSELR */ | ||
593 | asm volatile("mcr p15, 2, %0, c0, c0, 0" : : "r" (csselr)); | ||
594 | isb(); | ||
595 | /* Read result out of CCSIDR */ | ||
596 | asm volatile("mrc p15, 1, %0, c0, c0, 0" : "=r" (ccsidr)); | ||
597 | local_irq_enable(); | ||
598 | |||
599 | return ccsidr; | ||
600 | } | ||
601 | |||
602 | static int demux_c15_get(u64 id, void __user *uaddr) | ||
603 | { | ||
604 | u32 val; | ||
605 | u32 __user *uval = uaddr; | ||
606 | |||
607 | /* Fail if we have unknown bits set. */ | ||
608 | if (id & ~(KVM_REG_ARCH_MASK|KVM_REG_SIZE_MASK|KVM_REG_ARM_COPROC_MASK | ||
609 | | ((1 << KVM_REG_ARM_COPROC_SHIFT)-1))) | ||
610 | return -ENOENT; | ||
611 | |||
612 | switch (id & KVM_REG_ARM_DEMUX_ID_MASK) { | ||
613 | case KVM_REG_ARM_DEMUX_ID_CCSIDR: | ||
614 | if (KVM_REG_SIZE(id) != 4) | ||
615 | return -ENOENT; | ||
616 | val = (id & KVM_REG_ARM_DEMUX_VAL_MASK) | ||
617 | >> KVM_REG_ARM_DEMUX_VAL_SHIFT; | ||
618 | if (!is_valid_cache(val)) | ||
619 | return -ENOENT; | ||
620 | |||
621 | return put_user(get_ccsidr(val), uval); | ||
622 | default: | ||
623 | return -ENOENT; | ||
624 | } | ||
625 | } | ||
626 | |||
627 | static int demux_c15_set(u64 id, void __user *uaddr) | ||
628 | { | ||
629 | u32 val, newval; | ||
630 | u32 __user *uval = uaddr; | ||
631 | |||
632 | /* Fail if we have unknown bits set. */ | ||
633 | if (id & ~(KVM_REG_ARCH_MASK|KVM_REG_SIZE_MASK|KVM_REG_ARM_COPROC_MASK | ||
634 | | ((1 << KVM_REG_ARM_COPROC_SHIFT)-1))) | ||
635 | return -ENOENT; | ||
636 | |||
637 | switch (id & KVM_REG_ARM_DEMUX_ID_MASK) { | ||
638 | case KVM_REG_ARM_DEMUX_ID_CCSIDR: | ||
639 | if (KVM_REG_SIZE(id) != 4) | ||
640 | return -ENOENT; | ||
641 | val = (id & KVM_REG_ARM_DEMUX_VAL_MASK) | ||
642 | >> KVM_REG_ARM_DEMUX_VAL_SHIFT; | ||
643 | if (!is_valid_cache(val)) | ||
644 | return -ENOENT; | ||
645 | |||
646 | if (get_user(newval, uval)) | ||
647 | return -EFAULT; | ||
648 | |||
649 | /* This is also invariant: you can't change it. */ | ||
650 | if (newval != get_ccsidr(val)) | ||
651 | return -EINVAL; | ||
652 | return 0; | ||
653 | default: | ||
654 | return -ENOENT; | ||
655 | } | ||
656 | } | ||
657 | |||
658 | #ifdef CONFIG_VFPv3 | ||
659 | static const int vfp_sysregs[] = { KVM_REG_ARM_VFP_FPEXC, | ||
660 | KVM_REG_ARM_VFP_FPSCR, | ||
661 | KVM_REG_ARM_VFP_FPINST, | ||
662 | KVM_REG_ARM_VFP_FPINST2, | ||
663 | KVM_REG_ARM_VFP_MVFR0, | ||
664 | KVM_REG_ARM_VFP_MVFR1, | ||
665 | KVM_REG_ARM_VFP_FPSID }; | ||
666 | |||
667 | static unsigned int num_fp_regs(void) | ||
668 | { | ||
669 | if (((fmrx(MVFR0) & MVFR0_A_SIMD_MASK) >> MVFR0_A_SIMD_BIT) == 2) | ||
670 | return 32; | ||
671 | else | ||
672 | return 16; | ||
673 | } | ||
674 | |||
675 | static unsigned int num_vfp_regs(void) | ||
676 | { | ||
677 | /* Normal FP regs + control regs. */ | ||
678 | return num_fp_regs() + ARRAY_SIZE(vfp_sysregs); | ||
679 | } | ||
680 | |||
681 | static int copy_vfp_regids(u64 __user *uindices) | ||
682 | { | ||
683 | unsigned int i; | ||
684 | const u64 u32reg = KVM_REG_ARM | KVM_REG_SIZE_U32 | KVM_REG_ARM_VFP; | ||
685 | const u64 u64reg = KVM_REG_ARM | KVM_REG_SIZE_U64 | KVM_REG_ARM_VFP; | ||
686 | |||
687 | for (i = 0; i < num_fp_regs(); i++) { | ||
688 | if (put_user((u64reg | KVM_REG_ARM_VFP_BASE_REG) + i, | ||
689 | uindices)) | ||
690 | return -EFAULT; | ||
691 | uindices++; | ||
692 | } | ||
693 | |||
694 | for (i = 0; i < ARRAY_SIZE(vfp_sysregs); i++) { | ||
695 | if (put_user(u32reg | vfp_sysregs[i], uindices)) | ||
696 | return -EFAULT; | ||
697 | uindices++; | ||
698 | } | ||
699 | |||
700 | return num_vfp_regs(); | ||
701 | } | ||
702 | |||
703 | static int vfp_get_reg(const struct kvm_vcpu *vcpu, u64 id, void __user *uaddr) | ||
704 | { | ||
705 | u32 vfpid = (id & KVM_REG_ARM_VFP_MASK); | ||
706 | u32 val; | ||
707 | |||
708 | /* Fail if we have unknown bits set. */ | ||
709 | if (id & ~(KVM_REG_ARCH_MASK|KVM_REG_SIZE_MASK|KVM_REG_ARM_COPROC_MASK | ||
710 | | ((1 << KVM_REG_ARM_COPROC_SHIFT)-1))) | ||
711 | return -ENOENT; | ||
712 | |||
713 | if (vfpid < num_fp_regs()) { | ||
714 | if (KVM_REG_SIZE(id) != 8) | ||
715 | return -ENOENT; | ||
716 | return reg_to_user(uaddr, &vcpu->arch.vfp_guest.fpregs[vfpid], | ||
717 | id); | ||
718 | } | ||
719 | |||
720 | /* FP control registers are all 32 bit. */ | ||
721 | if (KVM_REG_SIZE(id) != 4) | ||
722 | return -ENOENT; | ||
723 | |||
724 | switch (vfpid) { | ||
725 | case KVM_REG_ARM_VFP_FPEXC: | ||
726 | return reg_to_user(uaddr, &vcpu->arch.vfp_guest.fpexc, id); | ||
727 | case KVM_REG_ARM_VFP_FPSCR: | ||
728 | return reg_to_user(uaddr, &vcpu->arch.vfp_guest.fpscr, id); | ||
729 | case KVM_REG_ARM_VFP_FPINST: | ||
730 | return reg_to_user(uaddr, &vcpu->arch.vfp_guest.fpinst, id); | ||
731 | case KVM_REG_ARM_VFP_FPINST2: | ||
732 | return reg_to_user(uaddr, &vcpu->arch.vfp_guest.fpinst2, id); | ||
733 | case KVM_REG_ARM_VFP_MVFR0: | ||
734 | val = fmrx(MVFR0); | ||
735 | return reg_to_user(uaddr, &val, id); | ||
736 | case KVM_REG_ARM_VFP_MVFR1: | ||
737 | val = fmrx(MVFR1); | ||
738 | return reg_to_user(uaddr, &val, id); | ||
739 | case KVM_REG_ARM_VFP_FPSID: | ||
740 | val = fmrx(FPSID); | ||
741 | return reg_to_user(uaddr, &val, id); | ||
742 | default: | ||
743 | return -ENOENT; | ||
744 | } | ||
745 | } | ||
746 | |||
747 | static int vfp_set_reg(struct kvm_vcpu *vcpu, u64 id, const void __user *uaddr) | ||
748 | { | ||
749 | u32 vfpid = (id & KVM_REG_ARM_VFP_MASK); | ||
750 | u32 val; | ||
751 | |||
752 | /* Fail if we have unknown bits set. */ | ||
753 | if (id & ~(KVM_REG_ARCH_MASK|KVM_REG_SIZE_MASK|KVM_REG_ARM_COPROC_MASK | ||
754 | | ((1 << KVM_REG_ARM_COPROC_SHIFT)-1))) | ||
755 | return -ENOENT; | ||
756 | |||
757 | if (vfpid < num_fp_regs()) { | ||
758 | if (KVM_REG_SIZE(id) != 8) | ||
759 | return -ENOENT; | ||
760 | return reg_from_user(&vcpu->arch.vfp_guest.fpregs[vfpid], | ||
761 | uaddr, id); | ||
762 | } | ||
763 | |||
764 | /* FP control registers are all 32 bit. */ | ||
765 | if (KVM_REG_SIZE(id) != 4) | ||
766 | return -ENOENT; | ||
767 | |||
768 | switch (vfpid) { | ||
769 | case KVM_REG_ARM_VFP_FPEXC: | ||
770 | return reg_from_user(&vcpu->arch.vfp_guest.fpexc, uaddr, id); | ||
771 | case KVM_REG_ARM_VFP_FPSCR: | ||
772 | return reg_from_user(&vcpu->arch.vfp_guest.fpscr, uaddr, id); | ||
773 | case KVM_REG_ARM_VFP_FPINST: | ||
774 | return reg_from_user(&vcpu->arch.vfp_guest.fpinst, uaddr, id); | ||
775 | case KVM_REG_ARM_VFP_FPINST2: | ||
776 | return reg_from_user(&vcpu->arch.vfp_guest.fpinst2, uaddr, id); | ||
777 | /* These are invariant. */ | ||
778 | case KVM_REG_ARM_VFP_MVFR0: | ||
779 | if (reg_from_user(&val, uaddr, id)) | ||
780 | return -EFAULT; | ||
781 | if (val != fmrx(MVFR0)) | ||
782 | return -EINVAL; | ||
783 | return 0; | ||
784 | case KVM_REG_ARM_VFP_MVFR1: | ||
785 | if (reg_from_user(&val, uaddr, id)) | ||
786 | return -EFAULT; | ||
787 | if (val != fmrx(MVFR1)) | ||
788 | return -EINVAL; | ||
789 | return 0; | ||
790 | case KVM_REG_ARM_VFP_FPSID: | ||
791 | if (reg_from_user(&val, uaddr, id)) | ||
792 | return -EFAULT; | ||
793 | if (val != fmrx(FPSID)) | ||
794 | return -EINVAL; | ||
795 | return 0; | ||
796 | default: | ||
797 | return -ENOENT; | ||
798 | } | ||
799 | } | ||
800 | #else /* !CONFIG_VFPv3 */ | ||
801 | static unsigned int num_vfp_regs(void) | ||
802 | { | ||
803 | return 0; | ||
804 | } | ||
805 | |||
806 | static int copy_vfp_regids(u64 __user *uindices) | ||
807 | { | ||
808 | return 0; | ||
809 | } | ||
810 | |||
811 | static int vfp_get_reg(const struct kvm_vcpu *vcpu, u64 id, void __user *uaddr) | ||
812 | { | ||
813 | return -ENOENT; | ||
814 | } | ||
815 | |||
816 | static int vfp_set_reg(struct kvm_vcpu *vcpu, u64 id, const void __user *uaddr) | ||
817 | { | ||
818 | return -ENOENT; | ||
819 | } | ||
820 | #endif /* !CONFIG_VFPv3 */ | ||
821 | |||
822 | int kvm_arm_coproc_get_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg) | ||
823 | { | ||
824 | const struct coproc_reg *r; | ||
825 | void __user *uaddr = (void __user *)(long)reg->addr; | ||
826 | |||
827 | if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_DEMUX) | ||
828 | return demux_c15_get(reg->id, uaddr); | ||
829 | |||
830 | if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_VFP) | ||
831 | return vfp_get_reg(vcpu, reg->id, uaddr); | ||
832 | |||
833 | r = index_to_coproc_reg(vcpu, reg->id); | ||
834 | if (!r) | ||
835 | return get_invariant_cp15(reg->id, uaddr); | ||
836 | |||
837 | /* Note: copies two regs if size is 64 bit. */ | ||
838 | return reg_to_user(uaddr, &vcpu->arch.cp15[r->reg], reg->id); | ||
839 | } | ||
840 | |||
841 | int kvm_arm_coproc_set_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg) | ||
842 | { | ||
843 | const struct coproc_reg *r; | ||
844 | void __user *uaddr = (void __user *)(long)reg->addr; | ||
845 | |||
846 | if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_DEMUX) | ||
847 | return demux_c15_set(reg->id, uaddr); | ||
848 | |||
849 | if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_VFP) | ||
850 | return vfp_set_reg(vcpu, reg->id, uaddr); | ||
851 | |||
852 | r = index_to_coproc_reg(vcpu, reg->id); | ||
853 | if (!r) | ||
854 | return set_invariant_cp15(reg->id, uaddr); | ||
855 | |||
856 | /* Note: copies two regs if size is 64 bit */ | ||
857 | return reg_from_user(&vcpu->arch.cp15[r->reg], uaddr, reg->id); | ||
858 | } | ||
859 | |||
860 | static unsigned int num_demux_regs(void) | ||
861 | { | ||
862 | unsigned int i, count = 0; | ||
863 | |||
864 | for (i = 0; i < CSSELR_MAX; i++) | ||
865 | if (is_valid_cache(i)) | ||
866 | count++; | ||
867 | |||
868 | return count; | ||
869 | } | ||
870 | |||
871 | static int write_demux_regids(u64 __user *uindices) | ||
872 | { | ||
873 | u64 val = KVM_REG_ARM | KVM_REG_SIZE_U32 | KVM_REG_ARM_DEMUX; | ||
874 | unsigned int i; | ||
875 | |||
876 | val |= KVM_REG_ARM_DEMUX_ID_CCSIDR; | ||
877 | for (i = 0; i < CSSELR_MAX; i++) { | ||
878 | if (!is_valid_cache(i)) | ||
879 | continue; | ||
880 | if (put_user(val | i, uindices)) | ||
881 | return -EFAULT; | ||
882 | uindices++; | ||
883 | } | ||
884 | return 0; | ||
885 | } | ||
886 | |||
887 | static u64 cp15_to_index(const struct coproc_reg *reg) | ||
888 | { | ||
889 | u64 val = KVM_REG_ARM | (15 << KVM_REG_ARM_COPROC_SHIFT); | ||
890 | if (reg->is_64) { | ||
891 | val |= KVM_REG_SIZE_U64; | ||
892 | val |= (reg->Op1 << KVM_REG_ARM_OPC1_SHIFT); | ||
893 | val |= (reg->CRm << KVM_REG_ARM_CRM_SHIFT); | ||
894 | } else { | ||
895 | val |= KVM_REG_SIZE_U32; | ||
896 | val |= (reg->Op1 << KVM_REG_ARM_OPC1_SHIFT); | ||
897 | val |= (reg->Op2 << KVM_REG_ARM_32_OPC2_SHIFT); | ||
898 | val |= (reg->CRm << KVM_REG_ARM_CRM_SHIFT); | ||
899 | val |= (reg->CRn << KVM_REG_ARM_32_CRN_SHIFT); | ||
900 | } | ||
901 | return val; | ||
902 | } | ||
903 | |||
904 | static bool copy_reg_to_user(const struct coproc_reg *reg, u64 __user **uind) | ||
905 | { | ||
906 | if (!*uind) | ||
907 | return true; | ||
908 | |||
909 | if (put_user(cp15_to_index(reg), *uind)) | ||
910 | return false; | ||
911 | |||
912 | (*uind)++; | ||
913 | return true; | ||
914 | } | ||
915 | |||
916 | /* Assumed ordered tables, see kvm_coproc_table_init. */ | ||
917 | static int walk_cp15(struct kvm_vcpu *vcpu, u64 __user *uind) | ||
918 | { | ||
919 | const struct coproc_reg *i1, *i2, *end1, *end2; | ||
920 | unsigned int total = 0; | ||
921 | size_t num; | ||
922 | |||
923 | /* We check for duplicates here, to allow arch-specific overrides. */ | ||
924 | i1 = get_target_table(vcpu->arch.target, &num); | ||
925 | end1 = i1 + num; | ||
926 | i2 = cp15_regs; | ||
927 | end2 = cp15_regs + ARRAY_SIZE(cp15_regs); | ||
928 | |||
929 | BUG_ON(i1 == end1 || i2 == end2); | ||
930 | |||
931 | /* Walk carefully, as both tables may refer to the same register. */ | ||
932 | while (i1 || i2) { | ||
933 | int cmp = cmp_reg(i1, i2); | ||
934 | /* target-specific overrides generic entry. */ | ||
935 | if (cmp <= 0) { | ||
936 | /* Ignore registers we trap but don't save. */ | ||
937 | if (i1->reg) { | ||
938 | if (!copy_reg_to_user(i1, &uind)) | ||
939 | return -EFAULT; | ||
940 | total++; | ||
941 | } | ||
942 | } else { | ||
943 | /* Ignore registers we trap but don't save. */ | ||
944 | if (i2->reg) { | ||
945 | if (!copy_reg_to_user(i2, &uind)) | ||
946 | return -EFAULT; | ||
947 | total++; | ||
948 | } | ||
949 | } | ||
950 | |||
951 | if (cmp <= 0 && ++i1 == end1) | ||
952 | i1 = NULL; | ||
953 | if (cmp >= 0 && ++i2 == end2) | ||
954 | i2 = NULL; | ||
955 | } | ||
956 | return total; | ||
957 | } | ||
958 | |||
959 | unsigned long kvm_arm_num_coproc_regs(struct kvm_vcpu *vcpu) | ||
960 | { | ||
961 | return ARRAY_SIZE(invariant_cp15) | ||
962 | + num_demux_regs() | ||
963 | + num_vfp_regs() | ||
964 | + walk_cp15(vcpu, (u64 __user *)NULL); | ||
965 | } | ||
966 | |||
967 | int kvm_arm_copy_coproc_indices(struct kvm_vcpu *vcpu, u64 __user *uindices) | ||
968 | { | ||
969 | unsigned int i; | ||
970 | int err; | ||
971 | |||
972 | /* Then give them all the invariant registers' indices. */ | ||
973 | for (i = 0; i < ARRAY_SIZE(invariant_cp15); i++) { | ||
974 | if (put_user(cp15_to_index(&invariant_cp15[i]), uindices)) | ||
975 | return -EFAULT; | ||
976 | uindices++; | ||
977 | } | ||
978 | |||
979 | err = walk_cp15(vcpu, uindices); | ||
980 | if (err < 0) | ||
981 | return err; | ||
982 | uindices += err; | ||
983 | |||
984 | err = copy_vfp_regids(uindices); | ||
985 | if (err < 0) | ||
986 | return err; | ||
987 | uindices += err; | ||
988 | |||
989 | return write_demux_regids(uindices); | ||
990 | } | ||
991 | |||
992 | void kvm_coproc_table_init(void) | ||
993 | { | ||
994 | unsigned int i; | ||
995 | |||
996 | /* Make sure tables are unique and in order. */ | ||
997 | for (i = 1; i < ARRAY_SIZE(cp15_regs); i++) | ||
998 | BUG_ON(cmp_reg(&cp15_regs[i-1], &cp15_regs[i]) >= 0); | ||
999 | |||
1000 | /* We abuse the reset function to overwrite the table itself. */ | ||
1001 | for (i = 0; i < ARRAY_SIZE(invariant_cp15); i++) | ||
1002 | invariant_cp15[i].reset(NULL, &invariant_cp15[i]); | ||
1003 | |||
1004 | /* | ||
1005 | * CLIDR format is awkward, so clean it up. See ARM B4.1.20: | ||
1006 | * | ||
1007 | * If software reads the Cache Type fields from Ctype1 | ||
1008 | * upwards, once it has seen a value of 0b000, no caches | ||
1009 | * exist at further-out levels of the hierarchy. So, for | ||
1010 | * example, if Ctype3 is the first Cache Type field with a | ||
1011 | * value of 0b000, the values of Ctype4 to Ctype7 must be | ||
1012 | * ignored. | ||
1013 | */ | ||
1014 | asm volatile("mrc p15, 1, %0, c0, c0, 1" : "=r" (cache_levels)); | ||
1015 | for (i = 0; i < 7; i++) | ||
1016 | if (((cache_levels >> (i*3)) & 7) == 0) | ||
1017 | break; | ||
1018 | /* Clear all higher bits. */ | ||
1019 | cache_levels &= (1 << (i*3))-1; | ||
1020 | } | ||
1021 | |||
1022 | /** | ||
1023 | * kvm_reset_coprocs - sets cp15 registers to reset value | ||
1024 | * @vcpu: The VCPU pointer | ||
1025 | * | ||
1026 | * This function finds the right table above and sets the registers on the | ||
1027 | * virtual CPU struct to their architecturally defined reset values. | ||
1028 | */ | ||
1029 | void kvm_reset_coprocs(struct kvm_vcpu *vcpu) | ||
1030 | { | ||
1031 | size_t num; | ||
1032 | const struct coproc_reg *table; | ||
1033 | |||
1034 | /* Catch someone adding a register without putting in reset entry. */ | ||
1035 | memset(vcpu->arch.cp15, 0x42, sizeof(vcpu->arch.cp15)); | ||
1036 | |||
1037 | /* Generic chip reset first (so target could override). */ | ||
1038 | reset_coproc_regs(vcpu, cp15_regs, ARRAY_SIZE(cp15_regs)); | ||
1039 | |||
1040 | table = get_target_table(vcpu->arch.target, &num); | ||
1041 | reset_coproc_regs(vcpu, table, num); | ||
1042 | |||
1043 | for (num = 1; num < NR_CP15_REGS; num++) | ||
1044 | if (vcpu->arch.cp15[num] == 0x42424242) | ||
1045 | panic("Didn't reset vcpu->arch.cp15[%zi]", num); | ||
1046 | } | ||
diff --git a/arch/arm/kvm/coproc.h b/arch/arm/kvm/coproc.h new file mode 100644 index 000000000000..992adfafa2ff --- /dev/null +++ b/arch/arm/kvm/coproc.h | |||
@@ -0,0 +1,153 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2012 - Virtual Open Systems and Columbia University | ||
3 | * Authors: Christoffer Dall <c.dall@virtualopensystems.com> | ||
4 | * | ||
5 | * This program is free software; you can redistribute it and/or modify | ||
6 | * it under the terms of the GNU General Public License, version 2, as | ||
7 | * published by the Free Software Foundation. | ||
8 | * | ||
9 | * This program is distributed in the hope that it will be useful, | ||
10 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
11 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
12 | * GNU General Public License for more details. | ||
13 | * | ||
14 | * You should have received a copy of the GNU General Public License | ||
15 | * along with this program; if not, write to the Free Software | ||
16 | * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. | ||
17 | */ | ||
18 | |||
19 | #ifndef __ARM_KVM_COPROC_LOCAL_H__ | ||
20 | #define __ARM_KVM_COPROC_LOCAL_H__ | ||
21 | |||
22 | struct coproc_params { | ||
23 | unsigned long CRn; | ||
24 | unsigned long CRm; | ||
25 | unsigned long Op1; | ||
26 | unsigned long Op2; | ||
27 | unsigned long Rt1; | ||
28 | unsigned long Rt2; | ||
29 | bool is_64bit; | ||
30 | bool is_write; | ||
31 | }; | ||
32 | |||
33 | struct coproc_reg { | ||
34 | /* MRC/MCR/MRRC/MCRR instruction which accesses it. */ | ||
35 | unsigned long CRn; | ||
36 | unsigned long CRm; | ||
37 | unsigned long Op1; | ||
38 | unsigned long Op2; | ||
39 | |||
40 | bool is_64; | ||
41 | |||
42 | /* Trapped access from guest, if non-NULL. */ | ||
43 | bool (*access)(struct kvm_vcpu *, | ||
44 | const struct coproc_params *, | ||
45 | const struct coproc_reg *); | ||
46 | |||
47 | /* Initialization for vcpu. */ | ||
48 | void (*reset)(struct kvm_vcpu *, const struct coproc_reg *); | ||
49 | |||
50 | /* Index into vcpu->arch.cp15[], or 0 if we don't need to save it. */ | ||
51 | unsigned long reg; | ||
52 | |||
53 | /* Value (usually reset value) */ | ||
54 | u64 val; | ||
55 | }; | ||
56 | |||
57 | static inline void print_cp_instr(const struct coproc_params *p) | ||
58 | { | ||
59 | /* Look, we even formatted it for you to paste into the table! */ | ||
60 | if (p->is_64bit) { | ||
61 | kvm_pr_unimpl(" { CRm(%2lu), Op1(%2lu), is64, func_%s },\n", | ||
62 | p->CRm, p->Op1, p->is_write ? "write" : "read"); | ||
63 | } else { | ||
64 | kvm_pr_unimpl(" { CRn(%2lu), CRm(%2lu), Op1(%2lu), Op2(%2lu), is32," | ||
65 | " func_%s },\n", | ||
66 | p->CRn, p->CRm, p->Op1, p->Op2, | ||
67 | p->is_write ? "write" : "read"); | ||
68 | } | ||
69 | } | ||
70 | |||
71 | static inline bool ignore_write(struct kvm_vcpu *vcpu, | ||
72 | const struct coproc_params *p) | ||
73 | { | ||
74 | return true; | ||
75 | } | ||
76 | |||
77 | static inline bool read_zero(struct kvm_vcpu *vcpu, | ||
78 | const struct coproc_params *p) | ||
79 | { | ||
80 | *vcpu_reg(vcpu, p->Rt1) = 0; | ||
81 | return true; | ||
82 | } | ||
83 | |||
84 | static inline bool write_to_read_only(struct kvm_vcpu *vcpu, | ||
85 | const struct coproc_params *params) | ||
86 | { | ||
87 | kvm_debug("CP15 write to read-only register at: %08x\n", | ||
88 | *vcpu_pc(vcpu)); | ||
89 | print_cp_instr(params); | ||
90 | return false; | ||
91 | } | ||
92 | |||
93 | static inline bool read_from_write_only(struct kvm_vcpu *vcpu, | ||
94 | const struct coproc_params *params) | ||
95 | { | ||
96 | kvm_debug("CP15 read to write-only register at: %08x\n", | ||
97 | *vcpu_pc(vcpu)); | ||
98 | print_cp_instr(params); | ||
99 | return false; | ||
100 | } | ||
101 | |||
102 | /* Reset functions */ | ||
103 | static inline void reset_unknown(struct kvm_vcpu *vcpu, | ||
104 | const struct coproc_reg *r) | ||
105 | { | ||
106 | BUG_ON(!r->reg); | ||
107 | BUG_ON(r->reg >= ARRAY_SIZE(vcpu->arch.cp15)); | ||
108 | vcpu->arch.cp15[r->reg] = 0xdecafbad; | ||
109 | } | ||
110 | |||
111 | static inline void reset_val(struct kvm_vcpu *vcpu, const struct coproc_reg *r) | ||
112 | { | ||
113 | BUG_ON(!r->reg); | ||
114 | BUG_ON(r->reg >= ARRAY_SIZE(vcpu->arch.cp15)); | ||
115 | vcpu->arch.cp15[r->reg] = r->val; | ||
116 | } | ||
117 | |||
118 | static inline void reset_unknown64(struct kvm_vcpu *vcpu, | ||
119 | const struct coproc_reg *r) | ||
120 | { | ||
121 | BUG_ON(!r->reg); | ||
122 | BUG_ON(r->reg + 1 >= ARRAY_SIZE(vcpu->arch.cp15)); | ||
123 | |||
124 | vcpu->arch.cp15[r->reg] = 0xdecafbad; | ||
125 | vcpu->arch.cp15[r->reg+1] = 0xd0c0ffee; | ||
126 | } | ||
127 | |||
128 | static inline int cmp_reg(const struct coproc_reg *i1, | ||
129 | const struct coproc_reg *i2) | ||
130 | { | ||
131 | BUG_ON(i1 == i2); | ||
132 | if (!i1) | ||
133 | return 1; | ||
134 | else if (!i2) | ||
135 | return -1; | ||
136 | if (i1->CRn != i2->CRn) | ||
137 | return i1->CRn - i2->CRn; | ||
138 | if (i1->CRm != i2->CRm) | ||
139 | return i1->CRm - i2->CRm; | ||
140 | if (i1->Op1 != i2->Op1) | ||
141 | return i1->Op1 - i2->Op1; | ||
142 | return i1->Op2 - i2->Op2; | ||
143 | } | ||
144 | |||
145 | |||
146 | #define CRn(_x) .CRn = _x | ||
147 | #define CRm(_x) .CRm = _x | ||
148 | #define Op1(_x) .Op1 = _x | ||
149 | #define Op2(_x) .Op2 = _x | ||
150 | #define is64 .is_64 = true | ||
151 | #define is32 .is_64 = false | ||
152 | |||
153 | #endif /* __ARM_KVM_COPROC_LOCAL_H__ */ | ||
diff --git a/arch/arm/kvm/coproc_a15.c b/arch/arm/kvm/coproc_a15.c new file mode 100644 index 000000000000..685063a6d0cf --- /dev/null +++ b/arch/arm/kvm/coproc_a15.c | |||
@@ -0,0 +1,162 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2012 - Virtual Open Systems and Columbia University | ||
3 | * Authors: Rusty Russell <rusty@rustcorp.au> | ||
4 | * Christoffer Dall <c.dall@virtualopensystems.com> | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or modify | ||
7 | * it under the terms of the GNU General Public License, version 2, as | ||
8 | * published by the Free Software Foundation. | ||
9 | * | ||
10 | * This program is distributed in the hope that it will be useful, | ||
11 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
12 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
13 | * GNU General Public License for more details. | ||
14 | * | ||
15 | * You should have received a copy of the GNU General Public License | ||
16 | * along with this program; if not, write to the Free Software | ||
17 | * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. | ||
18 | */ | ||
19 | #include <linux/kvm_host.h> | ||
20 | #include <asm/cputype.h> | ||
21 | #include <asm/kvm_arm.h> | ||
22 | #include <asm/kvm_host.h> | ||
23 | #include <asm/kvm_emulate.h> | ||
24 | #include <asm/kvm_coproc.h> | ||
25 | #include <linux/init.h> | ||
26 | |||
27 | static void reset_mpidr(struct kvm_vcpu *vcpu, const struct coproc_reg *r) | ||
28 | { | ||
29 | /* | ||
30 | * Compute guest MPIDR: | ||
31 | * (Even if we present only one VCPU to the guest on an SMP | ||
32 | * host we don't set the U bit in the MPIDR, or vice versa, as | ||
33 | * revealing the underlying hardware properties is likely to | ||
34 | * be the best choice). | ||
35 | */ | ||
36 | vcpu->arch.cp15[c0_MPIDR] = (read_cpuid_mpidr() & ~MPIDR_LEVEL_MASK) | ||
37 | | (vcpu->vcpu_id & MPIDR_LEVEL_MASK); | ||
38 | } | ||
39 | |||
40 | #include "coproc.h" | ||
41 | |||
42 | /* A15 TRM 4.3.28: RO WI */ | ||
43 | static bool access_actlr(struct kvm_vcpu *vcpu, | ||
44 | const struct coproc_params *p, | ||
45 | const struct coproc_reg *r) | ||
46 | { | ||
47 | if (p->is_write) | ||
48 | return ignore_write(vcpu, p); | ||
49 | |||
50 | *vcpu_reg(vcpu, p->Rt1) = vcpu->arch.cp15[c1_ACTLR]; | ||
51 | return true; | ||
52 | } | ||
53 | |||
54 | /* A15 TRM 4.3.60: R/O. */ | ||
55 | static bool access_cbar(struct kvm_vcpu *vcpu, | ||
56 | const struct coproc_params *p, | ||
57 | const struct coproc_reg *r) | ||
58 | { | ||
59 | if (p->is_write) | ||
60 | return write_to_read_only(vcpu, p); | ||
61 | return read_zero(vcpu, p); | ||
62 | } | ||
63 | |||
64 | /* A15 TRM 4.3.48: R/O WI. */ | ||
65 | static bool access_l2ctlr(struct kvm_vcpu *vcpu, | ||
66 | const struct coproc_params *p, | ||
67 | const struct coproc_reg *r) | ||
68 | { | ||
69 | if (p->is_write) | ||
70 | return ignore_write(vcpu, p); | ||
71 | |||
72 | *vcpu_reg(vcpu, p->Rt1) = vcpu->arch.cp15[c9_L2CTLR]; | ||
73 | return true; | ||
74 | } | ||
75 | |||
76 | static void reset_l2ctlr(struct kvm_vcpu *vcpu, const struct coproc_reg *r) | ||
77 | { | ||
78 | u32 l2ctlr, ncores; | ||
79 | |||
80 | asm volatile("mrc p15, 1, %0, c9, c0, 2\n" : "=r" (l2ctlr)); | ||
81 | l2ctlr &= ~(3 << 24); | ||
82 | ncores = atomic_read(&vcpu->kvm->online_vcpus) - 1; | ||
83 | l2ctlr |= (ncores & 3) << 24; | ||
84 | |||
85 | vcpu->arch.cp15[c9_L2CTLR] = l2ctlr; | ||
86 | } | ||
87 | |||
88 | static void reset_actlr(struct kvm_vcpu *vcpu, const struct coproc_reg *r) | ||
89 | { | ||
90 | u32 actlr; | ||
91 | |||
92 | /* ACTLR contains SMP bit: make sure you create all cpus first! */ | ||
93 | asm volatile("mrc p15, 0, %0, c1, c0, 1\n" : "=r" (actlr)); | ||
94 | /* Make the SMP bit consistent with the guest configuration */ | ||
95 | if (atomic_read(&vcpu->kvm->online_vcpus) > 1) | ||
96 | actlr |= 1U << 6; | ||
97 | else | ||
98 | actlr &= ~(1U << 6); | ||
99 | |||
100 | vcpu->arch.cp15[c1_ACTLR] = actlr; | ||
101 | } | ||
102 | |||
103 | /* A15 TRM 4.3.49: R/O WI (even if NSACR.NS_L2ERR, a write of 1 is ignored). */ | ||
104 | static bool access_l2ectlr(struct kvm_vcpu *vcpu, | ||
105 | const struct coproc_params *p, | ||
106 | const struct coproc_reg *r) | ||
107 | { | ||
108 | if (p->is_write) | ||
109 | return ignore_write(vcpu, p); | ||
110 | |||
111 | *vcpu_reg(vcpu, p->Rt1) = 0; | ||
112 | return true; | ||
113 | } | ||
114 | |||
115 | /* | ||
116 | * A15-specific CP15 registers. | ||
117 | * Important: Must be sorted ascending by CRn, CRM, Op1, Op2 | ||
118 | */ | ||
119 | static const struct coproc_reg a15_regs[] = { | ||
120 | /* MPIDR: we use VMPIDR for guest access. */ | ||
121 | { CRn( 0), CRm( 0), Op1( 0), Op2( 5), is32, | ||
122 | NULL, reset_mpidr, c0_MPIDR }, | ||
123 | |||
124 | /* SCTLR: swapped by interrupt.S. */ | ||
125 | { CRn( 1), CRm( 0), Op1( 0), Op2( 0), is32, | ||
126 | NULL, reset_val, c1_SCTLR, 0x00C50078 }, | ||
127 | /* ACTLR: trapped by HCR.TAC bit. */ | ||
128 | { CRn( 1), CRm( 0), Op1( 0), Op2( 1), is32, | ||
129 | access_actlr, reset_actlr, c1_ACTLR }, | ||
130 | /* CPACR: swapped by interrupt.S. */ | ||
131 | { CRn( 1), CRm( 0), Op1( 0), Op2( 2), is32, | ||
132 | NULL, reset_val, c1_CPACR, 0x00000000 }, | ||
133 | |||
134 | /* | ||
135 | * L2CTLR access (guest wants to know #CPUs). | ||
136 | */ | ||
137 | { CRn( 9), CRm( 0), Op1( 1), Op2( 2), is32, | ||
138 | access_l2ctlr, reset_l2ctlr, c9_L2CTLR }, | ||
139 | { CRn( 9), CRm( 0), Op1( 1), Op2( 3), is32, access_l2ectlr}, | ||
140 | |||
141 | /* The Configuration Base Address Register. */ | ||
142 | { CRn(15), CRm( 0), Op1( 4), Op2( 0), is32, access_cbar}, | ||
143 | }; | ||
144 | |||
145 | static struct kvm_coproc_target_table a15_target_table = { | ||
146 | .target = KVM_ARM_TARGET_CORTEX_A15, | ||
147 | .table = a15_regs, | ||
148 | .num = ARRAY_SIZE(a15_regs), | ||
149 | }; | ||
150 | |||
151 | static int __init coproc_a15_init(void) | ||
152 | { | ||
153 | unsigned int i; | ||
154 | |||
155 | for (i = 1; i < ARRAY_SIZE(a15_regs); i++) | ||
156 | BUG_ON(cmp_reg(&a15_regs[i-1], | ||
157 | &a15_regs[i]) >= 0); | ||
158 | |||
159 | kvm_register_target_coproc_table(&a15_target_table); | ||
160 | return 0; | ||
161 | } | ||
162 | late_initcall(coproc_a15_init); | ||
diff --git a/arch/arm/kvm/emulate.c b/arch/arm/kvm/emulate.c new file mode 100644 index 000000000000..d61450ac6665 --- /dev/null +++ b/arch/arm/kvm/emulate.c | |||
@@ -0,0 +1,373 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2012 - Virtual Open Systems and Columbia University | ||
3 | * Author: Christoffer Dall <c.dall@virtualopensystems.com> | ||
4 | * | ||
5 | * This program is free software; you can redistribute it and/or modify | ||
6 | * it under the terms of the GNU General Public License, version 2, as | ||
7 | * published by the Free Software Foundation. | ||
8 | * | ||
9 | * This program is distributed in the hope that it will be useful, | ||
10 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
11 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
12 | * GNU General Public License for more details. | ||
13 | * | ||
14 | * You should have received a copy of the GNU General Public License | ||
15 | * along with this program; if not, write to the Free Software | ||
16 | * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. | ||
17 | */ | ||
18 | |||
19 | #include <linux/mm.h> | ||
20 | #include <linux/kvm_host.h> | ||
21 | #include <asm/kvm_arm.h> | ||
22 | #include <asm/kvm_emulate.h> | ||
23 | #include <trace/events/kvm.h> | ||
24 | |||
25 | #include "trace.h" | ||
26 | |||
27 | #define VCPU_NR_MODES 6 | ||
28 | #define VCPU_REG_OFFSET_USR 0 | ||
29 | #define VCPU_REG_OFFSET_FIQ 1 | ||
30 | #define VCPU_REG_OFFSET_IRQ 2 | ||
31 | #define VCPU_REG_OFFSET_SVC 3 | ||
32 | #define VCPU_REG_OFFSET_ABT 4 | ||
33 | #define VCPU_REG_OFFSET_UND 5 | ||
34 | #define REG_OFFSET(_reg) \ | ||
35 | (offsetof(struct kvm_regs, _reg) / sizeof(u32)) | ||
36 | |||
37 | #define USR_REG_OFFSET(_num) REG_OFFSET(usr_regs.uregs[_num]) | ||
38 | |||
39 | static const unsigned long vcpu_reg_offsets[VCPU_NR_MODES][15] = { | ||
40 | /* USR/SYS Registers */ | ||
41 | [VCPU_REG_OFFSET_USR] = { | ||
42 | USR_REG_OFFSET(0), USR_REG_OFFSET(1), USR_REG_OFFSET(2), | ||
43 | USR_REG_OFFSET(3), USR_REG_OFFSET(4), USR_REG_OFFSET(5), | ||
44 | USR_REG_OFFSET(6), USR_REG_OFFSET(7), USR_REG_OFFSET(8), | ||
45 | USR_REG_OFFSET(9), USR_REG_OFFSET(10), USR_REG_OFFSET(11), | ||
46 | USR_REG_OFFSET(12), USR_REG_OFFSET(13), USR_REG_OFFSET(14), | ||
47 | }, | ||
48 | |||
49 | /* FIQ Registers */ | ||
50 | [VCPU_REG_OFFSET_FIQ] = { | ||
51 | USR_REG_OFFSET(0), USR_REG_OFFSET(1), USR_REG_OFFSET(2), | ||
52 | USR_REG_OFFSET(3), USR_REG_OFFSET(4), USR_REG_OFFSET(5), | ||
53 | USR_REG_OFFSET(6), USR_REG_OFFSET(7), | ||
54 | REG_OFFSET(fiq_regs[0]), /* r8 */ | ||
55 | REG_OFFSET(fiq_regs[1]), /* r9 */ | ||
56 | REG_OFFSET(fiq_regs[2]), /* r10 */ | ||
57 | REG_OFFSET(fiq_regs[3]), /* r11 */ | ||
58 | REG_OFFSET(fiq_regs[4]), /* r12 */ | ||
59 | REG_OFFSET(fiq_regs[5]), /* r13 */ | ||
60 | REG_OFFSET(fiq_regs[6]), /* r14 */ | ||
61 | }, | ||
62 | |||
63 | /* IRQ Registers */ | ||
64 | [VCPU_REG_OFFSET_IRQ] = { | ||
65 | USR_REG_OFFSET(0), USR_REG_OFFSET(1), USR_REG_OFFSET(2), | ||
66 | USR_REG_OFFSET(3), USR_REG_OFFSET(4), USR_REG_OFFSET(5), | ||
67 | USR_REG_OFFSET(6), USR_REG_OFFSET(7), USR_REG_OFFSET(8), | ||
68 | USR_REG_OFFSET(9), USR_REG_OFFSET(10), USR_REG_OFFSET(11), | ||
69 | USR_REG_OFFSET(12), | ||
70 | REG_OFFSET(irq_regs[0]), /* r13 */ | ||
71 | REG_OFFSET(irq_regs[1]), /* r14 */ | ||
72 | }, | ||
73 | |||
74 | /* SVC Registers */ | ||
75 | [VCPU_REG_OFFSET_SVC] = { | ||
76 | USR_REG_OFFSET(0), USR_REG_OFFSET(1), USR_REG_OFFSET(2), | ||
77 | USR_REG_OFFSET(3), USR_REG_OFFSET(4), USR_REG_OFFSET(5), | ||
78 | USR_REG_OFFSET(6), USR_REG_OFFSET(7), USR_REG_OFFSET(8), | ||
79 | USR_REG_OFFSET(9), USR_REG_OFFSET(10), USR_REG_OFFSET(11), | ||
80 | USR_REG_OFFSET(12), | ||
81 | REG_OFFSET(svc_regs[0]), /* r13 */ | ||
82 | REG_OFFSET(svc_regs[1]), /* r14 */ | ||
83 | }, | ||
84 | |||
85 | /* ABT Registers */ | ||
86 | [VCPU_REG_OFFSET_ABT] = { | ||
87 | USR_REG_OFFSET(0), USR_REG_OFFSET(1), USR_REG_OFFSET(2), | ||
88 | USR_REG_OFFSET(3), USR_REG_OFFSET(4), USR_REG_OFFSET(5), | ||
89 | USR_REG_OFFSET(6), USR_REG_OFFSET(7), USR_REG_OFFSET(8), | ||
90 | USR_REG_OFFSET(9), USR_REG_OFFSET(10), USR_REG_OFFSET(11), | ||
91 | USR_REG_OFFSET(12), | ||
92 | REG_OFFSET(abt_regs[0]), /* r13 */ | ||
93 | REG_OFFSET(abt_regs[1]), /* r14 */ | ||
94 | }, | ||
95 | |||
96 | /* UND Registers */ | ||
97 | [VCPU_REG_OFFSET_UND] = { | ||
98 | USR_REG_OFFSET(0), USR_REG_OFFSET(1), USR_REG_OFFSET(2), | ||
99 | USR_REG_OFFSET(3), USR_REG_OFFSET(4), USR_REG_OFFSET(5), | ||
100 | USR_REG_OFFSET(6), USR_REG_OFFSET(7), USR_REG_OFFSET(8), | ||
101 | USR_REG_OFFSET(9), USR_REG_OFFSET(10), USR_REG_OFFSET(11), | ||
102 | USR_REG_OFFSET(12), | ||
103 | REG_OFFSET(und_regs[0]), /* r13 */ | ||
104 | REG_OFFSET(und_regs[1]), /* r14 */ | ||
105 | }, | ||
106 | }; | ||
107 | |||
108 | /* | ||
109 | * Return a pointer to the register number valid in the current mode of | ||
110 | * the virtual CPU. | ||
111 | */ | ||
112 | u32 *vcpu_reg(struct kvm_vcpu *vcpu, u8 reg_num) | ||
113 | { | ||
114 | u32 *reg_array = (u32 *)&vcpu->arch.regs; | ||
115 | u32 mode = *vcpu_cpsr(vcpu) & MODE_MASK; | ||
116 | |||
117 | switch (mode) { | ||
118 | case USR_MODE...SVC_MODE: | ||
119 | mode &= ~MODE32_BIT; /* 0 ... 3 */ | ||
120 | break; | ||
121 | |||
122 | case ABT_MODE: | ||
123 | mode = VCPU_REG_OFFSET_ABT; | ||
124 | break; | ||
125 | |||
126 | case UND_MODE: | ||
127 | mode = VCPU_REG_OFFSET_UND; | ||
128 | break; | ||
129 | |||
130 | case SYSTEM_MODE: | ||
131 | mode = VCPU_REG_OFFSET_USR; | ||
132 | break; | ||
133 | |||
134 | default: | ||
135 | BUG(); | ||
136 | } | ||
137 | |||
138 | return reg_array + vcpu_reg_offsets[mode][reg_num]; | ||
139 | } | ||
140 | |||
141 | /* | ||
142 | * Return the SPSR for the current mode of the virtual CPU. | ||
143 | */ | ||
144 | u32 *vcpu_spsr(struct kvm_vcpu *vcpu) | ||
145 | { | ||
146 | u32 mode = *vcpu_cpsr(vcpu) & MODE_MASK; | ||
147 | switch (mode) { | ||
148 | case SVC_MODE: | ||
149 | return &vcpu->arch.regs.KVM_ARM_SVC_spsr; | ||
150 | case ABT_MODE: | ||
151 | return &vcpu->arch.regs.KVM_ARM_ABT_spsr; | ||
152 | case UND_MODE: | ||
153 | return &vcpu->arch.regs.KVM_ARM_UND_spsr; | ||
154 | case IRQ_MODE: | ||
155 | return &vcpu->arch.regs.KVM_ARM_IRQ_spsr; | ||
156 | case FIQ_MODE: | ||
157 | return &vcpu->arch.regs.KVM_ARM_FIQ_spsr; | ||
158 | default: | ||
159 | BUG(); | ||
160 | } | ||
161 | } | ||
162 | |||
163 | /** | ||
164 | * kvm_handle_wfi - handle a wait-for-interrupts instruction executed by a guest | ||
165 | * @vcpu: the vcpu pointer | ||
166 | * @run: the kvm_run structure pointer | ||
167 | * | ||
168 | * Simply sets the wait_for_interrupts flag on the vcpu structure, which will | ||
169 | * halt execution of world-switches and schedule other host processes until | ||
170 | * there is an incoming IRQ or FIQ to the VM. | ||
171 | */ | ||
172 | int kvm_handle_wfi(struct kvm_vcpu *vcpu, struct kvm_run *run) | ||
173 | { | ||
174 | trace_kvm_wfi(*vcpu_pc(vcpu)); | ||
175 | kvm_vcpu_block(vcpu); | ||
176 | return 1; | ||
177 | } | ||
178 | |||
179 | /** | ||
180 | * adjust_itstate - adjust ITSTATE when emulating instructions in IT-block | ||
181 | * @vcpu: The VCPU pointer | ||
182 | * | ||
183 | * When exceptions occur while instructions are executed in Thumb IF-THEN | ||
184 | * blocks, the ITSTATE field of the CPSR is not advanved (updated), so we have | ||
185 | * to do this little bit of work manually. The fields map like this: | ||
186 | * | ||
187 | * IT[7:0] -> CPSR[26:25],CPSR[15:10] | ||
188 | */ | ||
189 | static void kvm_adjust_itstate(struct kvm_vcpu *vcpu) | ||
190 | { | ||
191 | unsigned long itbits, cond; | ||
192 | unsigned long cpsr = *vcpu_cpsr(vcpu); | ||
193 | bool is_arm = !(cpsr & PSR_T_BIT); | ||
194 | |||
195 | BUG_ON(is_arm && (cpsr & PSR_IT_MASK)); | ||
196 | |||
197 | if (!(cpsr & PSR_IT_MASK)) | ||
198 | return; | ||
199 | |||
200 | cond = (cpsr & 0xe000) >> 13; | ||
201 | itbits = (cpsr & 0x1c00) >> (10 - 2); | ||
202 | itbits |= (cpsr & (0x3 << 25)) >> 25; | ||
203 | |||
204 | /* Perform ITAdvance (see page A-52 in ARM DDI 0406C) */ | ||
205 | if ((itbits & 0x7) == 0) | ||
206 | itbits = cond = 0; | ||
207 | else | ||
208 | itbits = (itbits << 1) & 0x1f; | ||
209 | |||
210 | cpsr &= ~PSR_IT_MASK; | ||
211 | cpsr |= cond << 13; | ||
212 | cpsr |= (itbits & 0x1c) << (10 - 2); | ||
213 | cpsr |= (itbits & 0x3) << 25; | ||
214 | *vcpu_cpsr(vcpu) = cpsr; | ||
215 | } | ||
216 | |||
217 | /** | ||
218 | * kvm_skip_instr - skip a trapped instruction and proceed to the next | ||
219 | * @vcpu: The vcpu pointer | ||
220 | */ | ||
221 | void kvm_skip_instr(struct kvm_vcpu *vcpu, bool is_wide_instr) | ||
222 | { | ||
223 | bool is_thumb; | ||
224 | |||
225 | is_thumb = !!(*vcpu_cpsr(vcpu) & PSR_T_BIT); | ||
226 | if (is_thumb && !is_wide_instr) | ||
227 | *vcpu_pc(vcpu) += 2; | ||
228 | else | ||
229 | *vcpu_pc(vcpu) += 4; | ||
230 | kvm_adjust_itstate(vcpu); | ||
231 | } | ||
232 | |||
233 | |||
234 | /****************************************************************************** | ||
235 | * Inject exceptions into the guest | ||
236 | */ | ||
237 | |||
238 | static u32 exc_vector_base(struct kvm_vcpu *vcpu) | ||
239 | { | ||
240 | u32 sctlr = vcpu->arch.cp15[c1_SCTLR]; | ||
241 | u32 vbar = vcpu->arch.cp15[c12_VBAR]; | ||
242 | |||
243 | if (sctlr & SCTLR_V) | ||
244 | return 0xffff0000; | ||
245 | else /* always have security exceptions */ | ||
246 | return vbar; | ||
247 | } | ||
248 | |||
249 | /** | ||
250 | * kvm_inject_undefined - inject an undefined exception into the guest | ||
251 | * @vcpu: The VCPU to receive the undefined exception | ||
252 | * | ||
253 | * It is assumed that this code is called from the VCPU thread and that the | ||
254 | * VCPU therefore is not currently executing guest code. | ||
255 | * | ||
256 | * Modelled after TakeUndefInstrException() pseudocode. | ||
257 | */ | ||
258 | void kvm_inject_undefined(struct kvm_vcpu *vcpu) | ||
259 | { | ||
260 | u32 new_lr_value; | ||
261 | u32 new_spsr_value; | ||
262 | u32 cpsr = *vcpu_cpsr(vcpu); | ||
263 | u32 sctlr = vcpu->arch.cp15[c1_SCTLR]; | ||
264 | bool is_thumb = (cpsr & PSR_T_BIT); | ||
265 | u32 vect_offset = 4; | ||
266 | u32 return_offset = (is_thumb) ? 2 : 4; | ||
267 | |||
268 | new_spsr_value = cpsr; | ||
269 | new_lr_value = *vcpu_pc(vcpu) - return_offset; | ||
270 | |||
271 | *vcpu_cpsr(vcpu) = (cpsr & ~MODE_MASK) | UND_MODE; | ||
272 | *vcpu_cpsr(vcpu) |= PSR_I_BIT; | ||
273 | *vcpu_cpsr(vcpu) &= ~(PSR_IT_MASK | PSR_J_BIT | PSR_E_BIT | PSR_T_BIT); | ||
274 | |||
275 | if (sctlr & SCTLR_TE) | ||
276 | *vcpu_cpsr(vcpu) |= PSR_T_BIT; | ||
277 | if (sctlr & SCTLR_EE) | ||
278 | *vcpu_cpsr(vcpu) |= PSR_E_BIT; | ||
279 | |||
280 | /* Note: These now point to UND banked copies */ | ||
281 | *vcpu_spsr(vcpu) = cpsr; | ||
282 | *vcpu_reg(vcpu, 14) = new_lr_value; | ||
283 | |||
284 | /* Branch to exception vector */ | ||
285 | *vcpu_pc(vcpu) = exc_vector_base(vcpu) + vect_offset; | ||
286 | } | ||
287 | |||
288 | /* | ||
289 | * Modelled after TakeDataAbortException() and TakePrefetchAbortException | ||
290 | * pseudocode. | ||
291 | */ | ||
292 | static void inject_abt(struct kvm_vcpu *vcpu, bool is_pabt, unsigned long addr) | ||
293 | { | ||
294 | u32 new_lr_value; | ||
295 | u32 new_spsr_value; | ||
296 | u32 cpsr = *vcpu_cpsr(vcpu); | ||
297 | u32 sctlr = vcpu->arch.cp15[c1_SCTLR]; | ||
298 | bool is_thumb = (cpsr & PSR_T_BIT); | ||
299 | u32 vect_offset; | ||
300 | u32 return_offset = (is_thumb) ? 4 : 0; | ||
301 | bool is_lpae; | ||
302 | |||
303 | new_spsr_value = cpsr; | ||
304 | new_lr_value = *vcpu_pc(vcpu) + return_offset; | ||
305 | |||
306 | *vcpu_cpsr(vcpu) = (cpsr & ~MODE_MASK) | ABT_MODE; | ||
307 | *vcpu_cpsr(vcpu) |= PSR_I_BIT | PSR_A_BIT; | ||
308 | *vcpu_cpsr(vcpu) &= ~(PSR_IT_MASK | PSR_J_BIT | PSR_E_BIT | PSR_T_BIT); | ||
309 | |||
310 | if (sctlr & SCTLR_TE) | ||
311 | *vcpu_cpsr(vcpu) |= PSR_T_BIT; | ||
312 | if (sctlr & SCTLR_EE) | ||
313 | *vcpu_cpsr(vcpu) |= PSR_E_BIT; | ||
314 | |||
315 | /* Note: These now point to ABT banked copies */ | ||
316 | *vcpu_spsr(vcpu) = cpsr; | ||
317 | *vcpu_reg(vcpu, 14) = new_lr_value; | ||
318 | |||
319 | if (is_pabt) | ||
320 | vect_offset = 12; | ||
321 | else | ||
322 | vect_offset = 16; | ||
323 | |||
324 | /* Branch to exception vector */ | ||
325 | *vcpu_pc(vcpu) = exc_vector_base(vcpu) + vect_offset; | ||
326 | |||
327 | if (is_pabt) { | ||
328 | /* Set DFAR and DFSR */ | ||
329 | vcpu->arch.cp15[c6_IFAR] = addr; | ||
330 | is_lpae = (vcpu->arch.cp15[c2_TTBCR] >> 31); | ||
331 | /* Always give debug fault for now - should give guest a clue */ | ||
332 | if (is_lpae) | ||
333 | vcpu->arch.cp15[c5_IFSR] = 1 << 9 | 0x22; | ||
334 | else | ||
335 | vcpu->arch.cp15[c5_IFSR] = 2; | ||
336 | } else { /* !iabt */ | ||
337 | /* Set DFAR and DFSR */ | ||
338 | vcpu->arch.cp15[c6_DFAR] = addr; | ||
339 | is_lpae = (vcpu->arch.cp15[c2_TTBCR] >> 31); | ||
340 | /* Always give debug fault for now - should give guest a clue */ | ||
341 | if (is_lpae) | ||
342 | vcpu->arch.cp15[c5_DFSR] = 1 << 9 | 0x22; | ||
343 | else | ||
344 | vcpu->arch.cp15[c5_DFSR] = 2; | ||
345 | } | ||
346 | |||
347 | } | ||
348 | |||
349 | /** | ||
350 | * kvm_inject_dabt - inject a data abort into the guest | ||
351 | * @vcpu: The VCPU to receive the undefined exception | ||
352 | * @addr: The address to report in the DFAR | ||
353 | * | ||
354 | * It is assumed that this code is called from the VCPU thread and that the | ||
355 | * VCPU therefore is not currently executing guest code. | ||
356 | */ | ||
357 | void kvm_inject_dabt(struct kvm_vcpu *vcpu, unsigned long addr) | ||
358 | { | ||
359 | inject_abt(vcpu, false, addr); | ||
360 | } | ||
361 | |||
362 | /** | ||
363 | * kvm_inject_pabt - inject a prefetch abort into the guest | ||
364 | * @vcpu: The VCPU to receive the undefined exception | ||
365 | * @addr: The address to report in the DFAR | ||
366 | * | ||
367 | * It is assumed that this code is called from the VCPU thread and that the | ||
368 | * VCPU therefore is not currently executing guest code. | ||
369 | */ | ||
370 | void kvm_inject_pabt(struct kvm_vcpu *vcpu, unsigned long addr) | ||
371 | { | ||
372 | inject_abt(vcpu, true, addr); | ||
373 | } | ||
diff --git a/arch/arm/kvm/guest.c b/arch/arm/kvm/guest.c new file mode 100644 index 000000000000..2339d9609d36 --- /dev/null +++ b/arch/arm/kvm/guest.c | |||
@@ -0,0 +1,222 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2012 - Virtual Open Systems and Columbia University | ||
3 | * Author: Christoffer Dall <c.dall@virtualopensystems.com> | ||
4 | * | ||
5 | * This program is free software; you can redistribute it and/or modify | ||
6 | * it under the terms of the GNU General Public License, version 2, as | ||
7 | * published by the Free Software Foundation. | ||
8 | * | ||
9 | * This program is distributed in the hope that it will be useful, | ||
10 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
11 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
12 | * GNU General Public License for more details. | ||
13 | * | ||
14 | * You should have received a copy of the GNU General Public License | ||
15 | * along with this program; if not, write to the Free Software | ||
16 | * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. | ||
17 | */ | ||
18 | |||
19 | #include <linux/errno.h> | ||
20 | #include <linux/err.h> | ||
21 | #include <linux/kvm_host.h> | ||
22 | #include <linux/module.h> | ||
23 | #include <linux/vmalloc.h> | ||
24 | #include <linux/fs.h> | ||
25 | #include <asm/uaccess.h> | ||
26 | #include <asm/kvm.h> | ||
27 | #include <asm/kvm_asm.h> | ||
28 | #include <asm/kvm_emulate.h> | ||
29 | #include <asm/kvm_coproc.h> | ||
30 | |||
31 | #define VM_STAT(x) { #x, offsetof(struct kvm, stat.x), KVM_STAT_VM } | ||
32 | #define VCPU_STAT(x) { #x, offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU } | ||
33 | |||
34 | struct kvm_stats_debugfs_item debugfs_entries[] = { | ||
35 | { NULL } | ||
36 | }; | ||
37 | |||
38 | int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu) | ||
39 | { | ||
40 | return 0; | ||
41 | } | ||
42 | |||
43 | static u64 core_reg_offset_from_id(u64 id) | ||
44 | { | ||
45 | return id & ~(KVM_REG_ARCH_MASK | KVM_REG_SIZE_MASK | KVM_REG_ARM_CORE); | ||
46 | } | ||
47 | |||
48 | static int get_core_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg) | ||
49 | { | ||
50 | u32 __user *uaddr = (u32 __user *)(long)reg->addr; | ||
51 | struct kvm_regs *regs = &vcpu->arch.regs; | ||
52 | u64 off; | ||
53 | |||
54 | if (KVM_REG_SIZE(reg->id) != 4) | ||
55 | return -ENOENT; | ||
56 | |||
57 | /* Our ID is an index into the kvm_regs struct. */ | ||
58 | off = core_reg_offset_from_id(reg->id); | ||
59 | if (off >= sizeof(*regs) / KVM_REG_SIZE(reg->id)) | ||
60 | return -ENOENT; | ||
61 | |||
62 | return put_user(((u32 *)regs)[off], uaddr); | ||
63 | } | ||
64 | |||
65 | static int set_core_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg) | ||
66 | { | ||
67 | u32 __user *uaddr = (u32 __user *)(long)reg->addr; | ||
68 | struct kvm_regs *regs = &vcpu->arch.regs; | ||
69 | u64 off, val; | ||
70 | |||
71 | if (KVM_REG_SIZE(reg->id) != 4) | ||
72 | return -ENOENT; | ||
73 | |||
74 | /* Our ID is an index into the kvm_regs struct. */ | ||
75 | off = core_reg_offset_from_id(reg->id); | ||
76 | if (off >= sizeof(*regs) / KVM_REG_SIZE(reg->id)) | ||
77 | return -ENOENT; | ||
78 | |||
79 | if (get_user(val, uaddr) != 0) | ||
80 | return -EFAULT; | ||
81 | |||
82 | if (off == KVM_REG_ARM_CORE_REG(usr_regs.ARM_cpsr)) { | ||
83 | unsigned long mode = val & MODE_MASK; | ||
84 | switch (mode) { | ||
85 | case USR_MODE: | ||
86 | case FIQ_MODE: | ||
87 | case IRQ_MODE: | ||
88 | case SVC_MODE: | ||
89 | case ABT_MODE: | ||
90 | case UND_MODE: | ||
91 | break; | ||
92 | default: | ||
93 | return -EINVAL; | ||
94 | } | ||
95 | } | ||
96 | |||
97 | ((u32 *)regs)[off] = val; | ||
98 | return 0; | ||
99 | } | ||
100 | |||
101 | int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs) | ||
102 | { | ||
103 | return -EINVAL; | ||
104 | } | ||
105 | |||
106 | int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs) | ||
107 | { | ||
108 | return -EINVAL; | ||
109 | } | ||
110 | |||
111 | static unsigned long num_core_regs(void) | ||
112 | { | ||
113 | return sizeof(struct kvm_regs) / sizeof(u32); | ||
114 | } | ||
115 | |||
116 | /** | ||
117 | * kvm_arm_num_regs - how many registers do we present via KVM_GET_ONE_REG | ||
118 | * | ||
119 | * This is for all registers. | ||
120 | */ | ||
121 | unsigned long kvm_arm_num_regs(struct kvm_vcpu *vcpu) | ||
122 | { | ||
123 | return num_core_regs() + kvm_arm_num_coproc_regs(vcpu); | ||
124 | } | ||
125 | |||
126 | /** | ||
127 | * kvm_arm_copy_reg_indices - get indices of all registers. | ||
128 | * | ||
129 | * We do core registers right here, then we apppend coproc regs. | ||
130 | */ | ||
131 | int kvm_arm_copy_reg_indices(struct kvm_vcpu *vcpu, u64 __user *uindices) | ||
132 | { | ||
133 | unsigned int i; | ||
134 | const u64 core_reg = KVM_REG_ARM | KVM_REG_SIZE_U32 | KVM_REG_ARM_CORE; | ||
135 | |||
136 | for (i = 0; i < sizeof(struct kvm_regs)/sizeof(u32); i++) { | ||
137 | if (put_user(core_reg | i, uindices)) | ||
138 | return -EFAULT; | ||
139 | uindices++; | ||
140 | } | ||
141 | |||
142 | return kvm_arm_copy_coproc_indices(vcpu, uindices); | ||
143 | } | ||
144 | |||
145 | int kvm_arm_get_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg) | ||
146 | { | ||
147 | /* We currently use nothing arch-specific in upper 32 bits */ | ||
148 | if ((reg->id & ~KVM_REG_SIZE_MASK) >> 32 != KVM_REG_ARM >> 32) | ||
149 | return -EINVAL; | ||
150 | |||
151 | /* Register group 16 means we want a core register. */ | ||
152 | if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_CORE) | ||
153 | return get_core_reg(vcpu, reg); | ||
154 | |||
155 | return kvm_arm_coproc_get_reg(vcpu, reg); | ||
156 | } | ||
157 | |||
158 | int kvm_arm_set_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg) | ||
159 | { | ||
160 | /* We currently use nothing arch-specific in upper 32 bits */ | ||
161 | if ((reg->id & ~KVM_REG_SIZE_MASK) >> 32 != KVM_REG_ARM >> 32) | ||
162 | return -EINVAL; | ||
163 | |||
164 | /* Register group 16 means we set a core register. */ | ||
165 | if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_CORE) | ||
166 | return set_core_reg(vcpu, reg); | ||
167 | |||
168 | return kvm_arm_coproc_set_reg(vcpu, reg); | ||
169 | } | ||
170 | |||
171 | int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu, | ||
172 | struct kvm_sregs *sregs) | ||
173 | { | ||
174 | return -EINVAL; | ||
175 | } | ||
176 | |||
177 | int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu, | ||
178 | struct kvm_sregs *sregs) | ||
179 | { | ||
180 | return -EINVAL; | ||
181 | } | ||
182 | |||
183 | int kvm_vcpu_set_target(struct kvm_vcpu *vcpu, | ||
184 | const struct kvm_vcpu_init *init) | ||
185 | { | ||
186 | unsigned int i; | ||
187 | |||
188 | /* We can only do a cortex A15 for now. */ | ||
189 | if (init->target != kvm_target_cpu()) | ||
190 | return -EINVAL; | ||
191 | |||
192 | vcpu->arch.target = init->target; | ||
193 | bitmap_zero(vcpu->arch.features, KVM_VCPU_MAX_FEATURES); | ||
194 | |||
195 | /* -ENOENT for unknown features, -EINVAL for invalid combinations. */ | ||
196 | for (i = 0; i < sizeof(init->features) * 8; i++) { | ||
197 | if (test_bit(i, (void *)init->features)) { | ||
198 | if (i >= KVM_VCPU_MAX_FEATURES) | ||
199 | return -ENOENT; | ||
200 | set_bit(i, vcpu->arch.features); | ||
201 | } | ||
202 | } | ||
203 | |||
204 | /* Now we know what it is, we can reset it. */ | ||
205 | return kvm_reset_vcpu(vcpu); | ||
206 | } | ||
207 | |||
208 | int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu) | ||
209 | { | ||
210 | return -EINVAL; | ||
211 | } | ||
212 | |||
213 | int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu) | ||
214 | { | ||
215 | return -EINVAL; | ||
216 | } | ||
217 | |||
218 | int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu, | ||
219 | struct kvm_translation *tr) | ||
220 | { | ||
221 | return -EINVAL; | ||
222 | } | ||
diff --git a/arch/arm/kvm/init.S b/arch/arm/kvm/init.S new file mode 100644 index 000000000000..9f37a79b880b --- /dev/null +++ b/arch/arm/kvm/init.S | |||
@@ -0,0 +1,114 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2012 - Virtual Open Systems and Columbia University | ||
3 | * Author: Christoffer Dall <c.dall@virtualopensystems.com> | ||
4 | * | ||
5 | * This program is free software; you can redistribute it and/or modify | ||
6 | * it under the terms of the GNU General Public License, version 2, as | ||
7 | * published by the Free Software Foundation. | ||
8 | * | ||
9 | * This program is distributed in the hope that it will be useful, | ||
10 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
11 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
12 | * GNU General Public License for more details. | ||
13 | * | ||
14 | * You should have received a copy of the GNU General Public License | ||
15 | * along with this program; if not, write to the Free Software | ||
16 | * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. | ||
17 | */ | ||
18 | |||
19 | #include <linux/linkage.h> | ||
20 | #include <asm/unified.h> | ||
21 | #include <asm/asm-offsets.h> | ||
22 | #include <asm/kvm_asm.h> | ||
23 | #include <asm/kvm_arm.h> | ||
24 | |||
25 | /******************************************************************** | ||
26 | * Hypervisor initialization | ||
27 | * - should be called with: | ||
28 | * r0,r1 = Hypervisor pgd pointer | ||
29 | * r2 = top of Hyp stack (kernel VA) | ||
30 | * r3 = pointer to hyp vectors | ||
31 | */ | ||
32 | |||
33 | .text | ||
34 | .pushsection .hyp.idmap.text,"ax" | ||
35 | .align 5 | ||
36 | __kvm_hyp_init: | ||
37 | .globl __kvm_hyp_init | ||
38 | |||
39 | @ Hyp-mode exception vector | ||
40 | W(b) . | ||
41 | W(b) . | ||
42 | W(b) . | ||
43 | W(b) . | ||
44 | W(b) . | ||
45 | W(b) __do_hyp_init | ||
46 | W(b) . | ||
47 | W(b) . | ||
48 | |||
49 | __do_hyp_init: | ||
50 | @ Set the HTTBR to point to the hypervisor PGD pointer passed | ||
51 | mcrr p15, 4, r0, r1, c2 | ||
52 | |||
53 | @ Set the HTCR and VTCR to the same shareability and cacheability | ||
54 | @ settings as the non-secure TTBCR and with T0SZ == 0. | ||
55 | mrc p15, 4, r0, c2, c0, 2 @ HTCR | ||
56 | ldr r12, =HTCR_MASK | ||
57 | bic r0, r0, r12 | ||
58 | mrc p15, 0, r1, c2, c0, 2 @ TTBCR | ||
59 | and r1, r1, #(HTCR_MASK & ~TTBCR_T0SZ) | ||
60 | orr r0, r0, r1 | ||
61 | mcr p15, 4, r0, c2, c0, 2 @ HTCR | ||
62 | |||
63 | mrc p15, 4, r1, c2, c1, 2 @ VTCR | ||
64 | ldr r12, =VTCR_MASK | ||
65 | bic r1, r1, r12 | ||
66 | bic r0, r0, #(~VTCR_HTCR_SH) @ clear non-reusable HTCR bits | ||
67 | orr r1, r0, r1 | ||
68 | orr r1, r1, #(KVM_VTCR_SL0 | KVM_VTCR_T0SZ | KVM_VTCR_S) | ||
69 | mcr p15, 4, r1, c2, c1, 2 @ VTCR | ||
70 | |||
71 | @ Use the same memory attributes for hyp. accesses as the kernel | ||
72 | @ (copy MAIRx ro HMAIRx). | ||
73 | mrc p15, 0, r0, c10, c2, 0 | ||
74 | mcr p15, 4, r0, c10, c2, 0 | ||
75 | mrc p15, 0, r0, c10, c2, 1 | ||
76 | mcr p15, 4, r0, c10, c2, 1 | ||
77 | |||
78 | @ Set the HSCTLR to: | ||
79 | @ - ARM/THUMB exceptions: Kernel config (Thumb-2 kernel) | ||
80 | @ - Endianness: Kernel config | ||
81 | @ - Fast Interrupt Features: Kernel config | ||
82 | @ - Write permission implies XN: disabled | ||
83 | @ - Instruction cache: enabled | ||
84 | @ - Data/Unified cache: enabled | ||
85 | @ - Memory alignment checks: enabled | ||
86 | @ - MMU: enabled (this code must be run from an identity mapping) | ||
87 | mrc p15, 4, r0, c1, c0, 0 @ HSCR | ||
88 | ldr r12, =HSCTLR_MASK | ||
89 | bic r0, r0, r12 | ||
90 | mrc p15, 0, r1, c1, c0, 0 @ SCTLR | ||
91 | ldr r12, =(HSCTLR_EE | HSCTLR_FI | HSCTLR_I | HSCTLR_C) | ||
92 | and r1, r1, r12 | ||
93 | ARM( ldr r12, =(HSCTLR_M | HSCTLR_A) ) | ||
94 | THUMB( ldr r12, =(HSCTLR_M | HSCTLR_A | HSCTLR_TE) ) | ||
95 | orr r1, r1, r12 | ||
96 | orr r0, r0, r1 | ||
97 | isb | ||
98 | mcr p15, 4, r0, c1, c0, 0 @ HSCR | ||
99 | isb | ||
100 | |||
101 | @ Set stack pointer and return to the kernel | ||
102 | mov sp, r2 | ||
103 | |||
104 | @ Set HVBAR to point to the HYP vectors | ||
105 | mcr p15, 4, r3, c12, c0, 0 @ HVBAR | ||
106 | |||
107 | eret | ||
108 | |||
109 | .ltorg | ||
110 | |||
111 | .globl __kvm_hyp_init_end | ||
112 | __kvm_hyp_init_end: | ||
113 | |||
114 | .popsection | ||
diff --git a/arch/arm/kvm/interrupts.S b/arch/arm/kvm/interrupts.S new file mode 100644 index 000000000000..c5400d2e97ca --- /dev/null +++ b/arch/arm/kvm/interrupts.S | |||
@@ -0,0 +1,478 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2012 - Virtual Open Systems and Columbia University | ||
3 | * Author: Christoffer Dall <c.dall@virtualopensystems.com> | ||
4 | * | ||
5 | * This program is free software; you can redistribute it and/or modify | ||
6 | * it under the terms of the GNU General Public License, version 2, as | ||
7 | * published by the Free Software Foundation. | ||
8 | * | ||
9 | * This program is distributed in the hope that it will be useful, | ||
10 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
11 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
12 | * GNU General Public License for more details. | ||
13 | * | ||
14 | * You should have received a copy of the GNU General Public License | ||
15 | * along with this program; if not, write to the Free Software | ||
16 | * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. | ||
17 | */ | ||
18 | |||
19 | #include <linux/linkage.h> | ||
20 | #include <linux/const.h> | ||
21 | #include <asm/unified.h> | ||
22 | #include <asm/page.h> | ||
23 | #include <asm/ptrace.h> | ||
24 | #include <asm/asm-offsets.h> | ||
25 | #include <asm/kvm_asm.h> | ||
26 | #include <asm/kvm_arm.h> | ||
27 | #include <asm/vfpmacros.h> | ||
28 | #include "interrupts_head.S" | ||
29 | |||
30 | .text | ||
31 | |||
32 | __kvm_hyp_code_start: | ||
33 | .globl __kvm_hyp_code_start | ||
34 | |||
35 | /******************************************************************** | ||
36 | * Flush per-VMID TLBs | ||
37 | * | ||
38 | * void __kvm_tlb_flush_vmid(struct kvm *kvm); | ||
39 | * | ||
40 | * We rely on the hardware to broadcast the TLB invalidation to all CPUs | ||
41 | * inside the inner-shareable domain (which is the case for all v7 | ||
42 | * implementations). If we come across a non-IS SMP implementation, we'll | ||
43 | * have to use an IPI based mechanism. Until then, we stick to the simple | ||
44 | * hardware assisted version. | ||
45 | */ | ||
46 | ENTRY(__kvm_tlb_flush_vmid) | ||
47 | push {r2, r3} | ||
48 | |||
49 | add r0, r0, #KVM_VTTBR | ||
50 | ldrd r2, r3, [r0] | ||
51 | mcrr p15, 6, r2, r3, c2 @ Write VTTBR | ||
52 | isb | ||
53 | mcr p15, 0, r0, c8, c3, 0 @ TLBIALLIS (rt ignored) | ||
54 | dsb | ||
55 | isb | ||
56 | mov r2, #0 | ||
57 | mov r3, #0 | ||
58 | mcrr p15, 6, r2, r3, c2 @ Back to VMID #0 | ||
59 | isb @ Not necessary if followed by eret | ||
60 | |||
61 | pop {r2, r3} | ||
62 | bx lr | ||
63 | ENDPROC(__kvm_tlb_flush_vmid) | ||
64 | |||
65 | /******************************************************************** | ||
66 | * Flush TLBs and instruction caches of all CPUs inside the inner-shareable | ||
67 | * domain, for all VMIDs | ||
68 | * | ||
69 | * void __kvm_flush_vm_context(void); | ||
70 | */ | ||
71 | ENTRY(__kvm_flush_vm_context) | ||
72 | mov r0, #0 @ rn parameter for c15 flushes is SBZ | ||
73 | |||
74 | /* Invalidate NS Non-Hyp TLB Inner Shareable (TLBIALLNSNHIS) */ | ||
75 | mcr p15, 4, r0, c8, c3, 4 | ||
76 | /* Invalidate instruction caches Inner Shareable (ICIALLUIS) */ | ||
77 | mcr p15, 0, r0, c7, c1, 0 | ||
78 | dsb | ||
79 | isb @ Not necessary if followed by eret | ||
80 | |||
81 | bx lr | ||
82 | ENDPROC(__kvm_flush_vm_context) | ||
83 | |||
84 | |||
85 | /******************************************************************** | ||
86 | * Hypervisor world-switch code | ||
87 | * | ||
88 | * | ||
89 | * int __kvm_vcpu_run(struct kvm_vcpu *vcpu) | ||
90 | */ | ||
91 | ENTRY(__kvm_vcpu_run) | ||
92 | @ Save the vcpu pointer | ||
93 | mcr p15, 4, vcpu, c13, c0, 2 @ HTPIDR | ||
94 | |||
95 | save_host_regs | ||
96 | |||
97 | @ Store hardware CP15 state and load guest state | ||
98 | read_cp15_state store_to_vcpu = 0 | ||
99 | write_cp15_state read_from_vcpu = 1 | ||
100 | |||
101 | @ If the host kernel has not been configured with VFPv3 support, | ||
102 | @ then it is safer if we deny guests from using it as well. | ||
103 | #ifdef CONFIG_VFPv3 | ||
104 | @ Set FPEXC_EN so the guest doesn't trap floating point instructions | ||
105 | VFPFMRX r2, FPEXC @ VMRS | ||
106 | push {r2} | ||
107 | orr r2, r2, #FPEXC_EN | ||
108 | VFPFMXR FPEXC, r2 @ VMSR | ||
109 | #endif | ||
110 | |||
111 | @ Configure Hyp-role | ||
112 | configure_hyp_role vmentry | ||
113 | |||
114 | @ Trap coprocessor CRx accesses | ||
115 | set_hstr vmentry | ||
116 | set_hcptr vmentry, (HCPTR_TTA | HCPTR_TCP(10) | HCPTR_TCP(11)) | ||
117 | set_hdcr vmentry | ||
118 | |||
119 | @ Write configured ID register into MIDR alias | ||
120 | ldr r1, [vcpu, #VCPU_MIDR] | ||
121 | mcr p15, 4, r1, c0, c0, 0 | ||
122 | |||
123 | @ Write guest view of MPIDR into VMPIDR | ||
124 | ldr r1, [vcpu, #CP15_OFFSET(c0_MPIDR)] | ||
125 | mcr p15, 4, r1, c0, c0, 5 | ||
126 | |||
127 | @ Set up guest memory translation | ||
128 | ldr r1, [vcpu, #VCPU_KVM] | ||
129 | add r1, r1, #KVM_VTTBR | ||
130 | ldrd r2, r3, [r1] | ||
131 | mcrr p15, 6, r2, r3, c2 @ Write VTTBR | ||
132 | |||
133 | @ We're all done, just restore the GPRs and go to the guest | ||
134 | restore_guest_regs | ||
135 | clrex @ Clear exclusive monitor | ||
136 | eret | ||
137 | |||
138 | __kvm_vcpu_return: | ||
139 | /* | ||
140 | * return convention: | ||
141 | * guest r0, r1, r2 saved on the stack | ||
142 | * r0: vcpu pointer | ||
143 | * r1: exception code | ||
144 | */ | ||
145 | save_guest_regs | ||
146 | |||
147 | @ Set VMID == 0 | ||
148 | mov r2, #0 | ||
149 | mov r3, #0 | ||
150 | mcrr p15, 6, r2, r3, c2 @ Write VTTBR | ||
151 | |||
152 | @ Don't trap coprocessor accesses for host kernel | ||
153 | set_hstr vmexit | ||
154 | set_hdcr vmexit | ||
155 | set_hcptr vmexit, (HCPTR_TTA | HCPTR_TCP(10) | HCPTR_TCP(11)) | ||
156 | |||
157 | #ifdef CONFIG_VFPv3 | ||
158 | @ Save floating point registers we if let guest use them. | ||
159 | tst r2, #(HCPTR_TCP(10) | HCPTR_TCP(11)) | ||
160 | bne after_vfp_restore | ||
161 | |||
162 | @ Switch VFP/NEON hardware state to the host's | ||
163 | add r7, vcpu, #VCPU_VFP_GUEST | ||
164 | store_vfp_state r7 | ||
165 | add r7, vcpu, #VCPU_VFP_HOST | ||
166 | ldr r7, [r7] | ||
167 | restore_vfp_state r7 | ||
168 | |||
169 | after_vfp_restore: | ||
170 | @ Restore FPEXC_EN which we clobbered on entry | ||
171 | pop {r2} | ||
172 | VFPFMXR FPEXC, r2 | ||
173 | #endif | ||
174 | |||
175 | @ Reset Hyp-role | ||
176 | configure_hyp_role vmexit | ||
177 | |||
178 | @ Let host read hardware MIDR | ||
179 | mrc p15, 0, r2, c0, c0, 0 | ||
180 | mcr p15, 4, r2, c0, c0, 0 | ||
181 | |||
182 | @ Back to hardware MPIDR | ||
183 | mrc p15, 0, r2, c0, c0, 5 | ||
184 | mcr p15, 4, r2, c0, c0, 5 | ||
185 | |||
186 | @ Store guest CP15 state and restore host state | ||
187 | read_cp15_state store_to_vcpu = 1 | ||
188 | write_cp15_state read_from_vcpu = 0 | ||
189 | |||
190 | restore_host_regs | ||
191 | clrex @ Clear exclusive monitor | ||
192 | mov r0, r1 @ Return the return code | ||
193 | mov r1, #0 @ Clear upper bits in return value | ||
194 | bx lr @ return to IOCTL | ||
195 | |||
196 | /******************************************************************** | ||
197 | * Call function in Hyp mode | ||
198 | * | ||
199 | * | ||
200 | * u64 kvm_call_hyp(void *hypfn, ...); | ||
201 | * | ||
202 | * This is not really a variadic function in the classic C-way and care must | ||
203 | * be taken when calling this to ensure parameters are passed in registers | ||
204 | * only, since the stack will change between the caller and the callee. | ||
205 | * | ||
206 | * Call the function with the first argument containing a pointer to the | ||
207 | * function you wish to call in Hyp mode, and subsequent arguments will be | ||
208 | * passed as r0, r1, and r2 (a maximum of 3 arguments in addition to the | ||
209 | * function pointer can be passed). The function being called must be mapped | ||
210 | * in Hyp mode (see init_hyp_mode in arch/arm/kvm/arm.c). Return values are | ||
211 | * passed in r0 and r1. | ||
212 | * | ||
213 | * The calling convention follows the standard AAPCS: | ||
214 | * r0 - r3: caller save | ||
215 | * r12: caller save | ||
216 | * rest: callee save | ||
217 | */ | ||
218 | ENTRY(kvm_call_hyp) | ||
219 | hvc #0 | ||
220 | bx lr | ||
221 | |||
222 | /******************************************************************** | ||
223 | * Hypervisor exception vector and handlers | ||
224 | * | ||
225 | * | ||
226 | * The KVM/ARM Hypervisor ABI is defined as follows: | ||
227 | * | ||
228 | * Entry to Hyp mode from the host kernel will happen _only_ when an HVC | ||
229 | * instruction is issued since all traps are disabled when running the host | ||
230 | * kernel as per the Hyp-mode initialization at boot time. | ||
231 | * | ||
232 | * HVC instructions cause a trap to the vector page + offset 0x18 (see hyp_hvc | ||
233 | * below) when the HVC instruction is called from SVC mode (i.e. a guest or the | ||
234 | * host kernel) and they cause a trap to the vector page + offset 0xc when HVC | ||
235 | * instructions are called from within Hyp-mode. | ||
236 | * | ||
237 | * Hyp-ABI: Calling HYP-mode functions from host (in SVC mode): | ||
238 | * Switching to Hyp mode is done through a simple HVC #0 instruction. The | ||
239 | * exception vector code will check that the HVC comes from VMID==0 and if | ||
240 | * so will push the necessary state (SPSR, lr_usr) on the Hyp stack. | ||
241 | * - r0 contains a pointer to a HYP function | ||
242 | * - r1, r2, and r3 contain arguments to the above function. | ||
243 | * - The HYP function will be called with its arguments in r0, r1 and r2. | ||
244 | * On HYP function return, we return directly to SVC. | ||
245 | * | ||
246 | * Note that the above is used to execute code in Hyp-mode from a host-kernel | ||
247 | * point of view, and is a different concept from performing a world-switch and | ||
248 | * executing guest code SVC mode (with a VMID != 0). | ||
249 | */ | ||
250 | |||
251 | /* Handle undef, svc, pabt, or dabt by crashing with a user notice */ | ||
252 | .macro bad_exception exception_code, panic_str | ||
253 | push {r0-r2} | ||
254 | mrrc p15, 6, r0, r1, c2 @ Read VTTBR | ||
255 | lsr r1, r1, #16 | ||
256 | ands r1, r1, #0xff | ||
257 | beq 99f | ||
258 | |||
259 | load_vcpu @ Load VCPU pointer | ||
260 | .if \exception_code == ARM_EXCEPTION_DATA_ABORT | ||
261 | mrc p15, 4, r2, c5, c2, 0 @ HSR | ||
262 | mrc p15, 4, r1, c6, c0, 0 @ HDFAR | ||
263 | str r2, [vcpu, #VCPU_HSR] | ||
264 | str r1, [vcpu, #VCPU_HxFAR] | ||
265 | .endif | ||
266 | .if \exception_code == ARM_EXCEPTION_PREF_ABORT | ||
267 | mrc p15, 4, r2, c5, c2, 0 @ HSR | ||
268 | mrc p15, 4, r1, c6, c0, 2 @ HIFAR | ||
269 | str r2, [vcpu, #VCPU_HSR] | ||
270 | str r1, [vcpu, #VCPU_HxFAR] | ||
271 | .endif | ||
272 | mov r1, #\exception_code | ||
273 | b __kvm_vcpu_return | ||
274 | |||
275 | @ We were in the host already. Let's craft a panic-ing return to SVC. | ||
276 | 99: mrs r2, cpsr | ||
277 | bic r2, r2, #MODE_MASK | ||
278 | orr r2, r2, #SVC_MODE | ||
279 | THUMB( orr r2, r2, #PSR_T_BIT ) | ||
280 | msr spsr_cxsf, r2 | ||
281 | mrs r1, ELR_hyp | ||
282 | ldr r2, =BSYM(panic) | ||
283 | msr ELR_hyp, r2 | ||
284 | ldr r0, =\panic_str | ||
285 | eret | ||
286 | .endm | ||
287 | |||
288 | .text | ||
289 | |||
290 | .align 5 | ||
291 | __kvm_hyp_vector: | ||
292 | .globl __kvm_hyp_vector | ||
293 | |||
294 | @ Hyp-mode exception vector | ||
295 | W(b) hyp_reset | ||
296 | W(b) hyp_undef | ||
297 | W(b) hyp_svc | ||
298 | W(b) hyp_pabt | ||
299 | W(b) hyp_dabt | ||
300 | W(b) hyp_hvc | ||
301 | W(b) hyp_irq | ||
302 | W(b) hyp_fiq | ||
303 | |||
304 | .align | ||
305 | hyp_reset: | ||
306 | b hyp_reset | ||
307 | |||
308 | .align | ||
309 | hyp_undef: | ||
310 | bad_exception ARM_EXCEPTION_UNDEFINED, und_die_str | ||
311 | |||
312 | .align | ||
313 | hyp_svc: | ||
314 | bad_exception ARM_EXCEPTION_HVC, svc_die_str | ||
315 | |||
316 | .align | ||
317 | hyp_pabt: | ||
318 | bad_exception ARM_EXCEPTION_PREF_ABORT, pabt_die_str | ||
319 | |||
320 | .align | ||
321 | hyp_dabt: | ||
322 | bad_exception ARM_EXCEPTION_DATA_ABORT, dabt_die_str | ||
323 | |||
324 | .align | ||
325 | hyp_hvc: | ||
326 | /* | ||
327 | * Getting here is either becuase of a trap from a guest or from calling | ||
328 | * HVC from the host kernel, which means "switch to Hyp mode". | ||
329 | */ | ||
330 | push {r0, r1, r2} | ||
331 | |||
332 | @ Check syndrome register | ||
333 | mrc p15, 4, r1, c5, c2, 0 @ HSR | ||
334 | lsr r0, r1, #HSR_EC_SHIFT | ||
335 | #ifdef CONFIG_VFPv3 | ||
336 | cmp r0, #HSR_EC_CP_0_13 | ||
337 | beq switch_to_guest_vfp | ||
338 | #endif | ||
339 | cmp r0, #HSR_EC_HVC | ||
340 | bne guest_trap @ Not HVC instr. | ||
341 | |||
342 | /* | ||
343 | * Let's check if the HVC came from VMID 0 and allow simple | ||
344 | * switch to Hyp mode | ||
345 | */ | ||
346 | mrrc p15, 6, r0, r2, c2 | ||
347 | lsr r2, r2, #16 | ||
348 | and r2, r2, #0xff | ||
349 | cmp r2, #0 | ||
350 | bne guest_trap @ Guest called HVC | ||
351 | |||
352 | host_switch_to_hyp: | ||
353 | pop {r0, r1, r2} | ||
354 | |||
355 | push {lr} | ||
356 | mrs lr, SPSR | ||
357 | push {lr} | ||
358 | |||
359 | mov lr, r0 | ||
360 | mov r0, r1 | ||
361 | mov r1, r2 | ||
362 | mov r2, r3 | ||
363 | |||
364 | THUMB( orr lr, #1) | ||
365 | blx lr @ Call the HYP function | ||
366 | |||
367 | pop {lr} | ||
368 | msr SPSR_csxf, lr | ||
369 | pop {lr} | ||
370 | eret | ||
371 | |||
372 | guest_trap: | ||
373 | load_vcpu @ Load VCPU pointer to r0 | ||
374 | str r1, [vcpu, #VCPU_HSR] | ||
375 | |||
376 | @ Check if we need the fault information | ||
377 | lsr r1, r1, #HSR_EC_SHIFT | ||
378 | cmp r1, #HSR_EC_IABT | ||
379 | mrceq p15, 4, r2, c6, c0, 2 @ HIFAR | ||
380 | beq 2f | ||
381 | cmp r1, #HSR_EC_DABT | ||
382 | bne 1f | ||
383 | mrc p15, 4, r2, c6, c0, 0 @ HDFAR | ||
384 | |||
385 | 2: str r2, [vcpu, #VCPU_HxFAR] | ||
386 | |||
387 | /* | ||
388 | * B3.13.5 Reporting exceptions taken to the Non-secure PL2 mode: | ||
389 | * | ||
390 | * Abort on the stage 2 translation for a memory access from a | ||
391 | * Non-secure PL1 or PL0 mode: | ||
392 | * | ||
393 | * For any Access flag fault or Translation fault, and also for any | ||
394 | * Permission fault on the stage 2 translation of a memory access | ||
395 | * made as part of a translation table walk for a stage 1 translation, | ||
396 | * the HPFAR holds the IPA that caused the fault. Otherwise, the HPFAR | ||
397 | * is UNKNOWN. | ||
398 | */ | ||
399 | |||
400 | /* Check for permission fault, and S1PTW */ | ||
401 | mrc p15, 4, r1, c5, c2, 0 @ HSR | ||
402 | and r0, r1, #HSR_FSC_TYPE | ||
403 | cmp r0, #FSC_PERM | ||
404 | tsteq r1, #(1 << 7) @ S1PTW | ||
405 | mrcne p15, 4, r2, c6, c0, 4 @ HPFAR | ||
406 | bne 3f | ||
407 | |||
408 | /* Resolve IPA using the xFAR */ | ||
409 | mcr p15, 0, r2, c7, c8, 0 @ ATS1CPR | ||
410 | isb | ||
411 | mrrc p15, 0, r0, r1, c7 @ PAR | ||
412 | tst r0, #1 | ||
413 | bne 4f @ Failed translation | ||
414 | ubfx r2, r0, #12, #20 | ||
415 | lsl r2, r2, #4 | ||
416 | orr r2, r2, r1, lsl #24 | ||
417 | |||
418 | 3: load_vcpu @ Load VCPU pointer to r0 | ||
419 | str r2, [r0, #VCPU_HPFAR] | ||
420 | |||
421 | 1: mov r1, #ARM_EXCEPTION_HVC | ||
422 | b __kvm_vcpu_return | ||
423 | |||
424 | 4: pop {r0, r1, r2} @ Failed translation, return to guest | ||
425 | eret | ||
426 | |||
427 | /* | ||
428 | * If VFPv3 support is not available, then we will not switch the VFP | ||
429 | * registers; however cp10 and cp11 accesses will still trap and fallback | ||
430 | * to the regular coprocessor emulation code, which currently will | ||
431 | * inject an undefined exception to the guest. | ||
432 | */ | ||
433 | #ifdef CONFIG_VFPv3 | ||
434 | switch_to_guest_vfp: | ||
435 | load_vcpu @ Load VCPU pointer to r0 | ||
436 | push {r3-r7} | ||
437 | |||
438 | @ NEON/VFP used. Turn on VFP access. | ||
439 | set_hcptr vmexit, (HCPTR_TCP(10) | HCPTR_TCP(11)) | ||
440 | |||
441 | @ Switch VFP/NEON hardware state to the guest's | ||
442 | add r7, r0, #VCPU_VFP_HOST | ||
443 | ldr r7, [r7] | ||
444 | store_vfp_state r7 | ||
445 | add r7, r0, #VCPU_VFP_GUEST | ||
446 | restore_vfp_state r7 | ||
447 | |||
448 | pop {r3-r7} | ||
449 | pop {r0-r2} | ||
450 | eret | ||
451 | #endif | ||
452 | |||
453 | .align | ||
454 | hyp_irq: | ||
455 | push {r0, r1, r2} | ||
456 | mov r1, #ARM_EXCEPTION_IRQ | ||
457 | load_vcpu @ Load VCPU pointer to r0 | ||
458 | b __kvm_vcpu_return | ||
459 | |||
460 | .align | ||
461 | hyp_fiq: | ||
462 | b hyp_fiq | ||
463 | |||
464 | .ltorg | ||
465 | |||
466 | __kvm_hyp_code_end: | ||
467 | .globl __kvm_hyp_code_end | ||
468 | |||
469 | .section ".rodata" | ||
470 | |||
471 | und_die_str: | ||
472 | .ascii "unexpected undefined exception in Hyp mode at: %#08x" | ||
473 | pabt_die_str: | ||
474 | .ascii "unexpected prefetch abort in Hyp mode at: %#08x" | ||
475 | dabt_die_str: | ||
476 | .ascii "unexpected data abort in Hyp mode at: %#08x" | ||
477 | svc_die_str: | ||
478 | .ascii "unexpected HVC/SVC trap in Hyp mode at: %#08x" | ||
diff --git a/arch/arm/kvm/interrupts_head.S b/arch/arm/kvm/interrupts_head.S new file mode 100644 index 000000000000..6a95d341e9c5 --- /dev/null +++ b/arch/arm/kvm/interrupts_head.S | |||
@@ -0,0 +1,441 @@ | |||
1 | #define VCPU_USR_REG(_reg_nr) (VCPU_USR_REGS + (_reg_nr * 4)) | ||
2 | #define VCPU_USR_SP (VCPU_USR_REG(13)) | ||
3 | #define VCPU_USR_LR (VCPU_USR_REG(14)) | ||
4 | #define CP15_OFFSET(_cp15_reg_idx) (VCPU_CP15 + (_cp15_reg_idx * 4)) | ||
5 | |||
6 | /* | ||
7 | * Many of these macros need to access the VCPU structure, which is always | ||
8 | * held in r0. These macros should never clobber r1, as it is used to hold the | ||
9 | * exception code on the return path (except of course the macro that switches | ||
10 | * all the registers before the final jump to the VM). | ||
11 | */ | ||
12 | vcpu .req r0 @ vcpu pointer always in r0 | ||
13 | |||
14 | /* Clobbers {r2-r6} */ | ||
15 | .macro store_vfp_state vfp_base | ||
16 | @ The VFPFMRX and VFPFMXR macros are the VMRS and VMSR instructions | ||
17 | VFPFMRX r2, FPEXC | ||
18 | @ Make sure VFP is enabled so we can touch the registers. | ||
19 | orr r6, r2, #FPEXC_EN | ||
20 | VFPFMXR FPEXC, r6 | ||
21 | |||
22 | VFPFMRX r3, FPSCR | ||
23 | tst r2, #FPEXC_EX @ Check for VFP Subarchitecture | ||
24 | beq 1f | ||
25 | @ If FPEXC_EX is 0, then FPINST/FPINST2 reads are upredictable, so | ||
26 | @ we only need to save them if FPEXC_EX is set. | ||
27 | VFPFMRX r4, FPINST | ||
28 | tst r2, #FPEXC_FP2V | ||
29 | VFPFMRX r5, FPINST2, ne @ vmrsne | ||
30 | bic r6, r2, #FPEXC_EX @ FPEXC_EX disable | ||
31 | VFPFMXR FPEXC, r6 | ||
32 | 1: | ||
33 | VFPFSTMIA \vfp_base, r6 @ Save VFP registers | ||
34 | stm \vfp_base, {r2-r5} @ Save FPEXC, FPSCR, FPINST, FPINST2 | ||
35 | .endm | ||
36 | |||
37 | /* Assume FPEXC_EN is on and FPEXC_EX is off, clobbers {r2-r6} */ | ||
38 | .macro restore_vfp_state vfp_base | ||
39 | VFPFLDMIA \vfp_base, r6 @ Load VFP registers | ||
40 | ldm \vfp_base, {r2-r5} @ Load FPEXC, FPSCR, FPINST, FPINST2 | ||
41 | |||
42 | VFPFMXR FPSCR, r3 | ||
43 | tst r2, #FPEXC_EX @ Check for VFP Subarchitecture | ||
44 | beq 1f | ||
45 | VFPFMXR FPINST, r4 | ||
46 | tst r2, #FPEXC_FP2V | ||
47 | VFPFMXR FPINST2, r5, ne | ||
48 | 1: | ||
49 | VFPFMXR FPEXC, r2 @ FPEXC (last, in case !EN) | ||
50 | .endm | ||
51 | |||
52 | /* These are simply for the macros to work - value don't have meaning */ | ||
53 | .equ usr, 0 | ||
54 | .equ svc, 1 | ||
55 | .equ abt, 2 | ||
56 | .equ und, 3 | ||
57 | .equ irq, 4 | ||
58 | .equ fiq, 5 | ||
59 | |||
60 | .macro push_host_regs_mode mode | ||
61 | mrs r2, SP_\mode | ||
62 | mrs r3, LR_\mode | ||
63 | mrs r4, SPSR_\mode | ||
64 | push {r2, r3, r4} | ||
65 | .endm | ||
66 | |||
67 | /* | ||
68 | * Store all host persistent registers on the stack. | ||
69 | * Clobbers all registers, in all modes, except r0 and r1. | ||
70 | */ | ||
71 | .macro save_host_regs | ||
72 | /* Hyp regs. Only ELR_hyp (SPSR_hyp already saved) */ | ||
73 | mrs r2, ELR_hyp | ||
74 | push {r2} | ||
75 | |||
76 | /* usr regs */ | ||
77 | push {r4-r12} @ r0-r3 are always clobbered | ||
78 | mrs r2, SP_usr | ||
79 | mov r3, lr | ||
80 | push {r2, r3} | ||
81 | |||
82 | push_host_regs_mode svc | ||
83 | push_host_regs_mode abt | ||
84 | push_host_regs_mode und | ||
85 | push_host_regs_mode irq | ||
86 | |||
87 | /* fiq regs */ | ||
88 | mrs r2, r8_fiq | ||
89 | mrs r3, r9_fiq | ||
90 | mrs r4, r10_fiq | ||
91 | mrs r5, r11_fiq | ||
92 | mrs r6, r12_fiq | ||
93 | mrs r7, SP_fiq | ||
94 | mrs r8, LR_fiq | ||
95 | mrs r9, SPSR_fiq | ||
96 | push {r2-r9} | ||
97 | .endm | ||
98 | |||
99 | .macro pop_host_regs_mode mode | ||
100 | pop {r2, r3, r4} | ||
101 | msr SP_\mode, r2 | ||
102 | msr LR_\mode, r3 | ||
103 | msr SPSR_\mode, r4 | ||
104 | .endm | ||
105 | |||
106 | /* | ||
107 | * Restore all host registers from the stack. | ||
108 | * Clobbers all registers, in all modes, except r0 and r1. | ||
109 | */ | ||
110 | .macro restore_host_regs | ||
111 | pop {r2-r9} | ||
112 | msr r8_fiq, r2 | ||
113 | msr r9_fiq, r3 | ||
114 | msr r10_fiq, r4 | ||
115 | msr r11_fiq, r5 | ||
116 | msr r12_fiq, r6 | ||
117 | msr SP_fiq, r7 | ||
118 | msr LR_fiq, r8 | ||
119 | msr SPSR_fiq, r9 | ||
120 | |||
121 | pop_host_regs_mode irq | ||
122 | pop_host_regs_mode und | ||
123 | pop_host_regs_mode abt | ||
124 | pop_host_regs_mode svc | ||
125 | |||
126 | pop {r2, r3} | ||
127 | msr SP_usr, r2 | ||
128 | mov lr, r3 | ||
129 | pop {r4-r12} | ||
130 | |||
131 | pop {r2} | ||
132 | msr ELR_hyp, r2 | ||
133 | .endm | ||
134 | |||
135 | /* | ||
136 | * Restore SP, LR and SPSR for a given mode. offset is the offset of | ||
137 | * this mode's registers from the VCPU base. | ||
138 | * | ||
139 | * Assumes vcpu pointer in vcpu reg | ||
140 | * | ||
141 | * Clobbers r1, r2, r3, r4. | ||
142 | */ | ||
143 | .macro restore_guest_regs_mode mode, offset | ||
144 | add r1, vcpu, \offset | ||
145 | ldm r1, {r2, r3, r4} | ||
146 | msr SP_\mode, r2 | ||
147 | msr LR_\mode, r3 | ||
148 | msr SPSR_\mode, r4 | ||
149 | .endm | ||
150 | |||
151 | /* | ||
152 | * Restore all guest registers from the vcpu struct. | ||
153 | * | ||
154 | * Assumes vcpu pointer in vcpu reg | ||
155 | * | ||
156 | * Clobbers *all* registers. | ||
157 | */ | ||
158 | .macro restore_guest_regs | ||
159 | restore_guest_regs_mode svc, #VCPU_SVC_REGS | ||
160 | restore_guest_regs_mode abt, #VCPU_ABT_REGS | ||
161 | restore_guest_regs_mode und, #VCPU_UND_REGS | ||
162 | restore_guest_regs_mode irq, #VCPU_IRQ_REGS | ||
163 | |||
164 | add r1, vcpu, #VCPU_FIQ_REGS | ||
165 | ldm r1, {r2-r9} | ||
166 | msr r8_fiq, r2 | ||
167 | msr r9_fiq, r3 | ||
168 | msr r10_fiq, r4 | ||
169 | msr r11_fiq, r5 | ||
170 | msr r12_fiq, r6 | ||
171 | msr SP_fiq, r7 | ||
172 | msr LR_fiq, r8 | ||
173 | msr SPSR_fiq, r9 | ||
174 | |||
175 | @ Load return state | ||
176 | ldr r2, [vcpu, #VCPU_PC] | ||
177 | ldr r3, [vcpu, #VCPU_CPSR] | ||
178 | msr ELR_hyp, r2 | ||
179 | msr SPSR_cxsf, r3 | ||
180 | |||
181 | @ Load user registers | ||
182 | ldr r2, [vcpu, #VCPU_USR_SP] | ||
183 | ldr r3, [vcpu, #VCPU_USR_LR] | ||
184 | msr SP_usr, r2 | ||
185 | mov lr, r3 | ||
186 | add vcpu, vcpu, #(VCPU_USR_REGS) | ||
187 | ldm vcpu, {r0-r12} | ||
188 | .endm | ||
189 | |||
190 | /* | ||
191 | * Save SP, LR and SPSR for a given mode. offset is the offset of | ||
192 | * this mode's registers from the VCPU base. | ||
193 | * | ||
194 | * Assumes vcpu pointer in vcpu reg | ||
195 | * | ||
196 | * Clobbers r2, r3, r4, r5. | ||
197 | */ | ||
198 | .macro save_guest_regs_mode mode, offset | ||
199 | add r2, vcpu, \offset | ||
200 | mrs r3, SP_\mode | ||
201 | mrs r4, LR_\mode | ||
202 | mrs r5, SPSR_\mode | ||
203 | stm r2, {r3, r4, r5} | ||
204 | .endm | ||
205 | |||
206 | /* | ||
207 | * Save all guest registers to the vcpu struct | ||
208 | * Expects guest's r0, r1, r2 on the stack. | ||
209 | * | ||
210 | * Assumes vcpu pointer in vcpu reg | ||
211 | * | ||
212 | * Clobbers r2, r3, r4, r5. | ||
213 | */ | ||
214 | .macro save_guest_regs | ||
215 | @ Store usr registers | ||
216 | add r2, vcpu, #VCPU_USR_REG(3) | ||
217 | stm r2, {r3-r12} | ||
218 | add r2, vcpu, #VCPU_USR_REG(0) | ||
219 | pop {r3, r4, r5} @ r0, r1, r2 | ||
220 | stm r2, {r3, r4, r5} | ||
221 | mrs r2, SP_usr | ||
222 | mov r3, lr | ||
223 | str r2, [vcpu, #VCPU_USR_SP] | ||
224 | str r3, [vcpu, #VCPU_USR_LR] | ||
225 | |||
226 | @ Store return state | ||
227 | mrs r2, ELR_hyp | ||
228 | mrs r3, spsr | ||
229 | str r2, [vcpu, #VCPU_PC] | ||
230 | str r3, [vcpu, #VCPU_CPSR] | ||
231 | |||
232 | @ Store other guest registers | ||
233 | save_guest_regs_mode svc, #VCPU_SVC_REGS | ||
234 | save_guest_regs_mode abt, #VCPU_ABT_REGS | ||
235 | save_guest_regs_mode und, #VCPU_UND_REGS | ||
236 | save_guest_regs_mode irq, #VCPU_IRQ_REGS | ||
237 | .endm | ||
238 | |||
239 | /* Reads cp15 registers from hardware and stores them in memory | ||
240 | * @store_to_vcpu: If 0, registers are written in-order to the stack, | ||
241 | * otherwise to the VCPU struct pointed to by vcpup | ||
242 | * | ||
243 | * Assumes vcpu pointer in vcpu reg | ||
244 | * | ||
245 | * Clobbers r2 - r12 | ||
246 | */ | ||
247 | .macro read_cp15_state store_to_vcpu | ||
248 | mrc p15, 0, r2, c1, c0, 0 @ SCTLR | ||
249 | mrc p15, 0, r3, c1, c0, 2 @ CPACR | ||
250 | mrc p15, 0, r4, c2, c0, 2 @ TTBCR | ||
251 | mrc p15, 0, r5, c3, c0, 0 @ DACR | ||
252 | mrrc p15, 0, r6, r7, c2 @ TTBR 0 | ||
253 | mrrc p15, 1, r8, r9, c2 @ TTBR 1 | ||
254 | mrc p15, 0, r10, c10, c2, 0 @ PRRR | ||
255 | mrc p15, 0, r11, c10, c2, 1 @ NMRR | ||
256 | mrc p15, 2, r12, c0, c0, 0 @ CSSELR | ||
257 | |||
258 | .if \store_to_vcpu == 0 | ||
259 | push {r2-r12} @ Push CP15 registers | ||
260 | .else | ||
261 | str r2, [vcpu, #CP15_OFFSET(c1_SCTLR)] | ||
262 | str r3, [vcpu, #CP15_OFFSET(c1_CPACR)] | ||
263 | str r4, [vcpu, #CP15_OFFSET(c2_TTBCR)] | ||
264 | str r5, [vcpu, #CP15_OFFSET(c3_DACR)] | ||
265 | add r2, vcpu, #CP15_OFFSET(c2_TTBR0) | ||
266 | strd r6, r7, [r2] | ||
267 | add r2, vcpu, #CP15_OFFSET(c2_TTBR1) | ||
268 | strd r8, r9, [r2] | ||
269 | str r10, [vcpu, #CP15_OFFSET(c10_PRRR)] | ||
270 | str r11, [vcpu, #CP15_OFFSET(c10_NMRR)] | ||
271 | str r12, [vcpu, #CP15_OFFSET(c0_CSSELR)] | ||
272 | .endif | ||
273 | |||
274 | mrc p15, 0, r2, c13, c0, 1 @ CID | ||
275 | mrc p15, 0, r3, c13, c0, 2 @ TID_URW | ||
276 | mrc p15, 0, r4, c13, c0, 3 @ TID_URO | ||
277 | mrc p15, 0, r5, c13, c0, 4 @ TID_PRIV | ||
278 | mrc p15, 0, r6, c5, c0, 0 @ DFSR | ||
279 | mrc p15, 0, r7, c5, c0, 1 @ IFSR | ||
280 | mrc p15, 0, r8, c5, c1, 0 @ ADFSR | ||
281 | mrc p15, 0, r9, c5, c1, 1 @ AIFSR | ||
282 | mrc p15, 0, r10, c6, c0, 0 @ DFAR | ||
283 | mrc p15, 0, r11, c6, c0, 2 @ IFAR | ||
284 | mrc p15, 0, r12, c12, c0, 0 @ VBAR | ||
285 | |||
286 | .if \store_to_vcpu == 0 | ||
287 | push {r2-r12} @ Push CP15 registers | ||
288 | .else | ||
289 | str r2, [vcpu, #CP15_OFFSET(c13_CID)] | ||
290 | str r3, [vcpu, #CP15_OFFSET(c13_TID_URW)] | ||
291 | str r4, [vcpu, #CP15_OFFSET(c13_TID_URO)] | ||
292 | str r5, [vcpu, #CP15_OFFSET(c13_TID_PRIV)] | ||
293 | str r6, [vcpu, #CP15_OFFSET(c5_DFSR)] | ||
294 | str r7, [vcpu, #CP15_OFFSET(c5_IFSR)] | ||
295 | str r8, [vcpu, #CP15_OFFSET(c5_ADFSR)] | ||
296 | str r9, [vcpu, #CP15_OFFSET(c5_AIFSR)] | ||
297 | str r10, [vcpu, #CP15_OFFSET(c6_DFAR)] | ||
298 | str r11, [vcpu, #CP15_OFFSET(c6_IFAR)] | ||
299 | str r12, [vcpu, #CP15_OFFSET(c12_VBAR)] | ||
300 | .endif | ||
301 | .endm | ||
302 | |||
303 | /* | ||
304 | * Reads cp15 registers from memory and writes them to hardware | ||
305 | * @read_from_vcpu: If 0, registers are read in-order from the stack, | ||
306 | * otherwise from the VCPU struct pointed to by vcpup | ||
307 | * | ||
308 | * Assumes vcpu pointer in vcpu reg | ||
309 | */ | ||
310 | .macro write_cp15_state read_from_vcpu | ||
311 | .if \read_from_vcpu == 0 | ||
312 | pop {r2-r12} | ||
313 | .else | ||
314 | ldr r2, [vcpu, #CP15_OFFSET(c13_CID)] | ||
315 | ldr r3, [vcpu, #CP15_OFFSET(c13_TID_URW)] | ||
316 | ldr r4, [vcpu, #CP15_OFFSET(c13_TID_URO)] | ||
317 | ldr r5, [vcpu, #CP15_OFFSET(c13_TID_PRIV)] | ||
318 | ldr r6, [vcpu, #CP15_OFFSET(c5_DFSR)] | ||
319 | ldr r7, [vcpu, #CP15_OFFSET(c5_IFSR)] | ||
320 | ldr r8, [vcpu, #CP15_OFFSET(c5_ADFSR)] | ||
321 | ldr r9, [vcpu, #CP15_OFFSET(c5_AIFSR)] | ||
322 | ldr r10, [vcpu, #CP15_OFFSET(c6_DFAR)] | ||
323 | ldr r11, [vcpu, #CP15_OFFSET(c6_IFAR)] | ||
324 | ldr r12, [vcpu, #CP15_OFFSET(c12_VBAR)] | ||
325 | .endif | ||
326 | |||
327 | mcr p15, 0, r2, c13, c0, 1 @ CID | ||
328 | mcr p15, 0, r3, c13, c0, 2 @ TID_URW | ||
329 | mcr p15, 0, r4, c13, c0, 3 @ TID_URO | ||
330 | mcr p15, 0, r5, c13, c0, 4 @ TID_PRIV | ||
331 | mcr p15, 0, r6, c5, c0, 0 @ DFSR | ||
332 | mcr p15, 0, r7, c5, c0, 1 @ IFSR | ||
333 | mcr p15, 0, r8, c5, c1, 0 @ ADFSR | ||
334 | mcr p15, 0, r9, c5, c1, 1 @ AIFSR | ||
335 | mcr p15, 0, r10, c6, c0, 0 @ DFAR | ||
336 | mcr p15, 0, r11, c6, c0, 2 @ IFAR | ||
337 | mcr p15, 0, r12, c12, c0, 0 @ VBAR | ||
338 | |||
339 | .if \read_from_vcpu == 0 | ||
340 | pop {r2-r12} | ||
341 | .else | ||
342 | ldr r2, [vcpu, #CP15_OFFSET(c1_SCTLR)] | ||
343 | ldr r3, [vcpu, #CP15_OFFSET(c1_CPACR)] | ||
344 | ldr r4, [vcpu, #CP15_OFFSET(c2_TTBCR)] | ||
345 | ldr r5, [vcpu, #CP15_OFFSET(c3_DACR)] | ||
346 | add r12, vcpu, #CP15_OFFSET(c2_TTBR0) | ||
347 | ldrd r6, r7, [r12] | ||
348 | add r12, vcpu, #CP15_OFFSET(c2_TTBR1) | ||
349 | ldrd r8, r9, [r12] | ||
350 | ldr r10, [vcpu, #CP15_OFFSET(c10_PRRR)] | ||
351 | ldr r11, [vcpu, #CP15_OFFSET(c10_NMRR)] | ||
352 | ldr r12, [vcpu, #CP15_OFFSET(c0_CSSELR)] | ||
353 | .endif | ||
354 | |||
355 | mcr p15, 0, r2, c1, c0, 0 @ SCTLR | ||
356 | mcr p15, 0, r3, c1, c0, 2 @ CPACR | ||
357 | mcr p15, 0, r4, c2, c0, 2 @ TTBCR | ||
358 | mcr p15, 0, r5, c3, c0, 0 @ DACR | ||
359 | mcrr p15, 0, r6, r7, c2 @ TTBR 0 | ||
360 | mcrr p15, 1, r8, r9, c2 @ TTBR 1 | ||
361 | mcr p15, 0, r10, c10, c2, 0 @ PRRR | ||
362 | mcr p15, 0, r11, c10, c2, 1 @ NMRR | ||
363 | mcr p15, 2, r12, c0, c0, 0 @ CSSELR | ||
364 | .endm | ||
365 | |||
366 | /* | ||
367 | * Save the VGIC CPU state into memory | ||
368 | * | ||
369 | * Assumes vcpu pointer in vcpu reg | ||
370 | */ | ||
371 | .macro save_vgic_state | ||
372 | .endm | ||
373 | |||
374 | /* | ||
375 | * Restore the VGIC CPU state from memory | ||
376 | * | ||
377 | * Assumes vcpu pointer in vcpu reg | ||
378 | */ | ||
379 | .macro restore_vgic_state | ||
380 | .endm | ||
381 | |||
382 | .equ vmentry, 0 | ||
383 | .equ vmexit, 1 | ||
384 | |||
385 | /* Configures the HSTR (Hyp System Trap Register) on entry/return | ||
386 | * (hardware reset value is 0) */ | ||
387 | .macro set_hstr operation | ||
388 | mrc p15, 4, r2, c1, c1, 3 | ||
389 | ldr r3, =HSTR_T(15) | ||
390 | .if \operation == vmentry | ||
391 | orr r2, r2, r3 @ Trap CR{15} | ||
392 | .else | ||
393 | bic r2, r2, r3 @ Don't trap any CRx accesses | ||
394 | .endif | ||
395 | mcr p15, 4, r2, c1, c1, 3 | ||
396 | .endm | ||
397 | |||
398 | /* Configures the HCPTR (Hyp Coprocessor Trap Register) on entry/return | ||
399 | * (hardware reset value is 0). Keep previous value in r2. */ | ||
400 | .macro set_hcptr operation, mask | ||
401 | mrc p15, 4, r2, c1, c1, 2 | ||
402 | ldr r3, =\mask | ||
403 | .if \operation == vmentry | ||
404 | orr r3, r2, r3 @ Trap coproc-accesses defined in mask | ||
405 | .else | ||
406 | bic r3, r2, r3 @ Don't trap defined coproc-accesses | ||
407 | .endif | ||
408 | mcr p15, 4, r3, c1, c1, 2 | ||
409 | .endm | ||
410 | |||
411 | /* Configures the HDCR (Hyp Debug Configuration Register) on entry/return | ||
412 | * (hardware reset value is 0) */ | ||
413 | .macro set_hdcr operation | ||
414 | mrc p15, 4, r2, c1, c1, 1 | ||
415 | ldr r3, =(HDCR_TPM|HDCR_TPMCR) | ||
416 | .if \operation == vmentry | ||
417 | orr r2, r2, r3 @ Trap some perfmon accesses | ||
418 | .else | ||
419 | bic r2, r2, r3 @ Don't trap any perfmon accesses | ||
420 | .endif | ||
421 | mcr p15, 4, r2, c1, c1, 1 | ||
422 | .endm | ||
423 | |||
424 | /* Enable/Disable: stage-2 trans., trap interrupts, trap wfi, trap smc */ | ||
425 | .macro configure_hyp_role operation | ||
426 | mrc p15, 4, r2, c1, c1, 0 @ HCR | ||
427 | bic r2, r2, #HCR_VIRT_EXCP_MASK | ||
428 | ldr r3, =HCR_GUEST_MASK | ||
429 | .if \operation == vmentry | ||
430 | orr r2, r2, r3 | ||
431 | ldr r3, [vcpu, #VCPU_IRQ_LINES] | ||
432 | orr r2, r2, r3 | ||
433 | .else | ||
434 | bic r2, r2, r3 | ||
435 | .endif | ||
436 | mcr p15, 4, r2, c1, c1, 0 | ||
437 | .endm | ||
438 | |||
439 | .macro load_vcpu | ||
440 | mrc p15, 4, vcpu, c13, c0, 2 @ HTPIDR | ||
441 | .endm | ||
diff --git a/arch/arm/kvm/mmio.c b/arch/arm/kvm/mmio.c new file mode 100644 index 000000000000..0144baf82904 --- /dev/null +++ b/arch/arm/kvm/mmio.c | |||
@@ -0,0 +1,153 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2012 - Virtual Open Systems and Columbia University | ||
3 | * Author: Christoffer Dall <c.dall@virtualopensystems.com> | ||
4 | * | ||
5 | * This program is free software; you can redistribute it and/or modify | ||
6 | * it under the terms of the GNU General Public License, version 2, as | ||
7 | * published by the Free Software Foundation. | ||
8 | * | ||
9 | * This program is distributed in the hope that it will be useful, | ||
10 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
11 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
12 | * GNU General Public License for more details. | ||
13 | * | ||
14 | * You should have received a copy of the GNU General Public License | ||
15 | * along with this program; if not, write to the Free Software | ||
16 | * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. | ||
17 | */ | ||
18 | |||
19 | #include <linux/kvm_host.h> | ||
20 | #include <asm/kvm_mmio.h> | ||
21 | #include <asm/kvm_emulate.h> | ||
22 | #include <trace/events/kvm.h> | ||
23 | |||
24 | #include "trace.h" | ||
25 | |||
26 | /** | ||
27 | * kvm_handle_mmio_return -- Handle MMIO loads after user space emulation | ||
28 | * @vcpu: The VCPU pointer | ||
29 | * @run: The VCPU run struct containing the mmio data | ||
30 | * | ||
31 | * This should only be called after returning from userspace for MMIO load | ||
32 | * emulation. | ||
33 | */ | ||
34 | int kvm_handle_mmio_return(struct kvm_vcpu *vcpu, struct kvm_run *run) | ||
35 | { | ||
36 | __u32 *dest; | ||
37 | unsigned int len; | ||
38 | int mask; | ||
39 | |||
40 | if (!run->mmio.is_write) { | ||
41 | dest = vcpu_reg(vcpu, vcpu->arch.mmio_decode.rt); | ||
42 | memset(dest, 0, sizeof(int)); | ||
43 | |||
44 | len = run->mmio.len; | ||
45 | if (len > 4) | ||
46 | return -EINVAL; | ||
47 | |||
48 | memcpy(dest, run->mmio.data, len); | ||
49 | |||
50 | trace_kvm_mmio(KVM_TRACE_MMIO_READ, len, run->mmio.phys_addr, | ||
51 | *((u64 *)run->mmio.data)); | ||
52 | |||
53 | if (vcpu->arch.mmio_decode.sign_extend && len < 4) { | ||
54 | mask = 1U << ((len * 8) - 1); | ||
55 | *dest = (*dest ^ mask) - mask; | ||
56 | } | ||
57 | } | ||
58 | |||
59 | return 0; | ||
60 | } | ||
61 | |||
62 | static int decode_hsr(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa, | ||
63 | struct kvm_exit_mmio *mmio) | ||
64 | { | ||
65 | unsigned long rt, len; | ||
66 | bool is_write, sign_extend; | ||
67 | |||
68 | if ((vcpu->arch.hsr >> 8) & 1) { | ||
69 | /* cache operation on I/O addr, tell guest unsupported */ | ||
70 | kvm_inject_dabt(vcpu, vcpu->arch.hxfar); | ||
71 | return 1; | ||
72 | } | ||
73 | |||
74 | if ((vcpu->arch.hsr >> 7) & 1) { | ||
75 | /* page table accesses IO mem: tell guest to fix its TTBR */ | ||
76 | kvm_inject_dabt(vcpu, vcpu->arch.hxfar); | ||
77 | return 1; | ||
78 | } | ||
79 | |||
80 | switch ((vcpu->arch.hsr >> 22) & 0x3) { | ||
81 | case 0: | ||
82 | len = 1; | ||
83 | break; | ||
84 | case 1: | ||
85 | len = 2; | ||
86 | break; | ||
87 | case 2: | ||
88 | len = 4; | ||
89 | break; | ||
90 | default: | ||
91 | kvm_err("Hardware is weird: SAS 0b11 is reserved\n"); | ||
92 | return -EFAULT; | ||
93 | } | ||
94 | |||
95 | is_write = vcpu->arch.hsr & HSR_WNR; | ||
96 | sign_extend = vcpu->arch.hsr & HSR_SSE; | ||
97 | rt = (vcpu->arch.hsr & HSR_SRT_MASK) >> HSR_SRT_SHIFT; | ||
98 | |||
99 | if (kvm_vcpu_reg_is_pc(vcpu, rt)) { | ||
100 | /* IO memory trying to read/write pc */ | ||
101 | kvm_inject_pabt(vcpu, vcpu->arch.hxfar); | ||
102 | return 1; | ||
103 | } | ||
104 | |||
105 | mmio->is_write = is_write; | ||
106 | mmio->phys_addr = fault_ipa; | ||
107 | mmio->len = len; | ||
108 | vcpu->arch.mmio_decode.sign_extend = sign_extend; | ||
109 | vcpu->arch.mmio_decode.rt = rt; | ||
110 | |||
111 | /* | ||
112 | * The MMIO instruction is emulated and should not be re-executed | ||
113 | * in the guest. | ||
114 | */ | ||
115 | kvm_skip_instr(vcpu, (vcpu->arch.hsr >> 25) & 1); | ||
116 | return 0; | ||
117 | } | ||
118 | |||
119 | int io_mem_abort(struct kvm_vcpu *vcpu, struct kvm_run *run, | ||
120 | phys_addr_t fault_ipa) | ||
121 | { | ||
122 | struct kvm_exit_mmio mmio; | ||
123 | unsigned long rt; | ||
124 | int ret; | ||
125 | |||
126 | /* | ||
127 | * Prepare MMIO operation. First stash it in a private | ||
128 | * structure that we can use for in-kernel emulation. If the | ||
129 | * kernel can't handle it, copy it into run->mmio and let user | ||
130 | * space do its magic. | ||
131 | */ | ||
132 | |||
133 | if (vcpu->arch.hsr & HSR_ISV) { | ||
134 | ret = decode_hsr(vcpu, fault_ipa, &mmio); | ||
135 | if (ret) | ||
136 | return ret; | ||
137 | } else { | ||
138 | kvm_err("load/store instruction decoding not implemented\n"); | ||
139 | return -ENOSYS; | ||
140 | } | ||
141 | |||
142 | rt = vcpu->arch.mmio_decode.rt; | ||
143 | trace_kvm_mmio((mmio.is_write) ? KVM_TRACE_MMIO_WRITE : | ||
144 | KVM_TRACE_MMIO_READ_UNSATISFIED, | ||
145 | mmio.len, fault_ipa, | ||
146 | (mmio.is_write) ? *vcpu_reg(vcpu, rt) : 0); | ||
147 | |||
148 | if (mmio.is_write) | ||
149 | memcpy(mmio.data, vcpu_reg(vcpu, rt), mmio.len); | ||
150 | |||
151 | kvm_prepare_mmio(run, &mmio); | ||
152 | return 0; | ||
153 | } | ||
diff --git a/arch/arm/kvm/mmu.c b/arch/arm/kvm/mmu.c new file mode 100644 index 000000000000..f30e13163a96 --- /dev/null +++ b/arch/arm/kvm/mmu.c | |||
@@ -0,0 +1,787 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2012 - Virtual Open Systems and Columbia University | ||
3 | * Author: Christoffer Dall <c.dall@virtualopensystems.com> | ||
4 | * | ||
5 | * This program is free software; you can redistribute it and/or modify | ||
6 | * it under the terms of the GNU General Public License, version 2, as | ||
7 | * published by the Free Software Foundation. | ||
8 | * | ||
9 | * This program is distributed in the hope that it will be useful, | ||
10 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
11 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
12 | * GNU General Public License for more details. | ||
13 | * | ||
14 | * You should have received a copy of the GNU General Public License | ||
15 | * along with this program; if not, write to the Free Software | ||
16 | * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. | ||
17 | */ | ||
18 | |||
19 | #include <linux/mman.h> | ||
20 | #include <linux/kvm_host.h> | ||
21 | #include <linux/io.h> | ||
22 | #include <trace/events/kvm.h> | ||
23 | #include <asm/idmap.h> | ||
24 | #include <asm/pgalloc.h> | ||
25 | #include <asm/cacheflush.h> | ||
26 | #include <asm/kvm_arm.h> | ||
27 | #include <asm/kvm_mmu.h> | ||
28 | #include <asm/kvm_mmio.h> | ||
29 | #include <asm/kvm_asm.h> | ||
30 | #include <asm/kvm_emulate.h> | ||
31 | #include <asm/mach/map.h> | ||
32 | #include <trace/events/kvm.h> | ||
33 | |||
34 | #include "trace.h" | ||
35 | |||
36 | extern char __hyp_idmap_text_start[], __hyp_idmap_text_end[]; | ||
37 | |||
38 | static DEFINE_MUTEX(kvm_hyp_pgd_mutex); | ||
39 | |||
40 | static void kvm_tlb_flush_vmid(struct kvm *kvm) | ||
41 | { | ||
42 | kvm_call_hyp(__kvm_tlb_flush_vmid, kvm); | ||
43 | } | ||
44 | |||
45 | static void kvm_set_pte(pte_t *pte, pte_t new_pte) | ||
46 | { | ||
47 | pte_val(*pte) = new_pte; | ||
48 | /* | ||
49 | * flush_pmd_entry just takes a void pointer and cleans the necessary | ||
50 | * cache entries, so we can reuse the function for ptes. | ||
51 | */ | ||
52 | flush_pmd_entry(pte); | ||
53 | } | ||
54 | |||
55 | static int mmu_topup_memory_cache(struct kvm_mmu_memory_cache *cache, | ||
56 | int min, int max) | ||
57 | { | ||
58 | void *page; | ||
59 | |||
60 | BUG_ON(max > KVM_NR_MEM_OBJS); | ||
61 | if (cache->nobjs >= min) | ||
62 | return 0; | ||
63 | while (cache->nobjs < max) { | ||
64 | page = (void *)__get_free_page(PGALLOC_GFP); | ||
65 | if (!page) | ||
66 | return -ENOMEM; | ||
67 | cache->objects[cache->nobjs++] = page; | ||
68 | } | ||
69 | return 0; | ||
70 | } | ||
71 | |||
72 | static void mmu_free_memory_cache(struct kvm_mmu_memory_cache *mc) | ||
73 | { | ||
74 | while (mc->nobjs) | ||
75 | free_page((unsigned long)mc->objects[--mc->nobjs]); | ||
76 | } | ||
77 | |||
78 | static void *mmu_memory_cache_alloc(struct kvm_mmu_memory_cache *mc) | ||
79 | { | ||
80 | void *p; | ||
81 | |||
82 | BUG_ON(!mc || !mc->nobjs); | ||
83 | p = mc->objects[--mc->nobjs]; | ||
84 | return p; | ||
85 | } | ||
86 | |||
87 | static void free_ptes(pmd_t *pmd, unsigned long addr) | ||
88 | { | ||
89 | pte_t *pte; | ||
90 | unsigned int i; | ||
91 | |||
92 | for (i = 0; i < PTRS_PER_PMD; i++, addr += PMD_SIZE) { | ||
93 | if (!pmd_none(*pmd) && pmd_table(*pmd)) { | ||
94 | pte = pte_offset_kernel(pmd, addr); | ||
95 | pte_free_kernel(NULL, pte); | ||
96 | } | ||
97 | pmd++; | ||
98 | } | ||
99 | } | ||
100 | |||
101 | /** | ||
102 | * free_hyp_pmds - free a Hyp-mode level-2 tables and child level-3 tables | ||
103 | * | ||
104 | * Assumes this is a page table used strictly in Hyp-mode and therefore contains | ||
105 | * only mappings in the kernel memory area, which is above PAGE_OFFSET. | ||
106 | */ | ||
107 | void free_hyp_pmds(void) | ||
108 | { | ||
109 | pgd_t *pgd; | ||
110 | pud_t *pud; | ||
111 | pmd_t *pmd; | ||
112 | unsigned long addr; | ||
113 | |||
114 | mutex_lock(&kvm_hyp_pgd_mutex); | ||
115 | for (addr = PAGE_OFFSET; addr != 0; addr += PGDIR_SIZE) { | ||
116 | pgd = hyp_pgd + pgd_index(addr); | ||
117 | pud = pud_offset(pgd, addr); | ||
118 | |||
119 | if (pud_none(*pud)) | ||
120 | continue; | ||
121 | BUG_ON(pud_bad(*pud)); | ||
122 | |||
123 | pmd = pmd_offset(pud, addr); | ||
124 | free_ptes(pmd, addr); | ||
125 | pmd_free(NULL, pmd); | ||
126 | pud_clear(pud); | ||
127 | } | ||
128 | mutex_unlock(&kvm_hyp_pgd_mutex); | ||
129 | } | ||
130 | |||
131 | static void create_hyp_pte_mappings(pmd_t *pmd, unsigned long start, | ||
132 | unsigned long end) | ||
133 | { | ||
134 | pte_t *pte; | ||
135 | unsigned long addr; | ||
136 | struct page *page; | ||
137 | |||
138 | for (addr = start & PAGE_MASK; addr < end; addr += PAGE_SIZE) { | ||
139 | pte = pte_offset_kernel(pmd, addr); | ||
140 | BUG_ON(!virt_addr_valid(addr)); | ||
141 | page = virt_to_page(addr); | ||
142 | kvm_set_pte(pte, mk_pte(page, PAGE_HYP)); | ||
143 | } | ||
144 | } | ||
145 | |||
146 | static void create_hyp_io_pte_mappings(pmd_t *pmd, unsigned long start, | ||
147 | unsigned long end, | ||
148 | unsigned long *pfn_base) | ||
149 | { | ||
150 | pte_t *pte; | ||
151 | unsigned long addr; | ||
152 | |||
153 | for (addr = start & PAGE_MASK; addr < end; addr += PAGE_SIZE) { | ||
154 | pte = pte_offset_kernel(pmd, addr); | ||
155 | BUG_ON(pfn_valid(*pfn_base)); | ||
156 | kvm_set_pte(pte, pfn_pte(*pfn_base, PAGE_HYP_DEVICE)); | ||
157 | (*pfn_base)++; | ||
158 | } | ||
159 | } | ||
160 | |||
161 | static int create_hyp_pmd_mappings(pud_t *pud, unsigned long start, | ||
162 | unsigned long end, unsigned long *pfn_base) | ||
163 | { | ||
164 | pmd_t *pmd; | ||
165 | pte_t *pte; | ||
166 | unsigned long addr, next; | ||
167 | |||
168 | for (addr = start; addr < end; addr = next) { | ||
169 | pmd = pmd_offset(pud, addr); | ||
170 | |||
171 | BUG_ON(pmd_sect(*pmd)); | ||
172 | |||
173 | if (pmd_none(*pmd)) { | ||
174 | pte = pte_alloc_one_kernel(NULL, addr); | ||
175 | if (!pte) { | ||
176 | kvm_err("Cannot allocate Hyp pte\n"); | ||
177 | return -ENOMEM; | ||
178 | } | ||
179 | pmd_populate_kernel(NULL, pmd, pte); | ||
180 | } | ||
181 | |||
182 | next = pmd_addr_end(addr, end); | ||
183 | |||
184 | /* | ||
185 | * If pfn_base is NULL, we map kernel pages into HYP with the | ||
186 | * virtual address. Otherwise, this is considered an I/O | ||
187 | * mapping and we map the physical region starting at | ||
188 | * *pfn_base to [start, end[. | ||
189 | */ | ||
190 | if (!pfn_base) | ||
191 | create_hyp_pte_mappings(pmd, addr, next); | ||
192 | else | ||
193 | create_hyp_io_pte_mappings(pmd, addr, next, pfn_base); | ||
194 | } | ||
195 | |||
196 | return 0; | ||
197 | } | ||
198 | |||
199 | static int __create_hyp_mappings(void *from, void *to, unsigned long *pfn_base) | ||
200 | { | ||
201 | unsigned long start = (unsigned long)from; | ||
202 | unsigned long end = (unsigned long)to; | ||
203 | pgd_t *pgd; | ||
204 | pud_t *pud; | ||
205 | pmd_t *pmd; | ||
206 | unsigned long addr, next; | ||
207 | int err = 0; | ||
208 | |||
209 | BUG_ON(start > end); | ||
210 | if (start < PAGE_OFFSET) | ||
211 | return -EINVAL; | ||
212 | |||
213 | mutex_lock(&kvm_hyp_pgd_mutex); | ||
214 | for (addr = start; addr < end; addr = next) { | ||
215 | pgd = hyp_pgd + pgd_index(addr); | ||
216 | pud = pud_offset(pgd, addr); | ||
217 | |||
218 | if (pud_none_or_clear_bad(pud)) { | ||
219 | pmd = pmd_alloc_one(NULL, addr); | ||
220 | if (!pmd) { | ||
221 | kvm_err("Cannot allocate Hyp pmd\n"); | ||
222 | err = -ENOMEM; | ||
223 | goto out; | ||
224 | } | ||
225 | pud_populate(NULL, pud, pmd); | ||
226 | } | ||
227 | |||
228 | next = pgd_addr_end(addr, end); | ||
229 | err = create_hyp_pmd_mappings(pud, addr, next, pfn_base); | ||
230 | if (err) | ||
231 | goto out; | ||
232 | } | ||
233 | out: | ||
234 | mutex_unlock(&kvm_hyp_pgd_mutex); | ||
235 | return err; | ||
236 | } | ||
237 | |||
238 | /** | ||
239 | * create_hyp_mappings - map a kernel virtual address range in Hyp mode | ||
240 | * @from: The virtual kernel start address of the range | ||
241 | * @to: The virtual kernel end address of the range (exclusive) | ||
242 | * | ||
243 | * The same virtual address as the kernel virtual address is also used in | ||
244 | * Hyp-mode mapping to the same underlying physical pages. | ||
245 | * | ||
246 | * Note: Wrapping around zero in the "to" address is not supported. | ||
247 | */ | ||
248 | int create_hyp_mappings(void *from, void *to) | ||
249 | { | ||
250 | return __create_hyp_mappings(from, to, NULL); | ||
251 | } | ||
252 | |||
253 | /** | ||
254 | * create_hyp_io_mappings - map a physical IO range in Hyp mode | ||
255 | * @from: The virtual HYP start address of the range | ||
256 | * @to: The virtual HYP end address of the range (exclusive) | ||
257 | * @addr: The physical start address which gets mapped | ||
258 | */ | ||
259 | int create_hyp_io_mappings(void *from, void *to, phys_addr_t addr) | ||
260 | { | ||
261 | unsigned long pfn = __phys_to_pfn(addr); | ||
262 | return __create_hyp_mappings(from, to, &pfn); | ||
263 | } | ||
264 | |||
265 | /** | ||
266 | * kvm_alloc_stage2_pgd - allocate level-1 table for stage-2 translation. | ||
267 | * @kvm: The KVM struct pointer for the VM. | ||
268 | * | ||
269 | * Allocates the 1st level table only of size defined by S2_PGD_ORDER (can | ||
270 | * support either full 40-bit input addresses or limited to 32-bit input | ||
271 | * addresses). Clears the allocated pages. | ||
272 | * | ||
273 | * Note we don't need locking here as this is only called when the VM is | ||
274 | * created, which can only be done once. | ||
275 | */ | ||
276 | int kvm_alloc_stage2_pgd(struct kvm *kvm) | ||
277 | { | ||
278 | pgd_t *pgd; | ||
279 | |||
280 | if (kvm->arch.pgd != NULL) { | ||
281 | kvm_err("kvm_arch already initialized?\n"); | ||
282 | return -EINVAL; | ||
283 | } | ||
284 | |||
285 | pgd = (pgd_t *)__get_free_pages(GFP_KERNEL, S2_PGD_ORDER); | ||
286 | if (!pgd) | ||
287 | return -ENOMEM; | ||
288 | |||
289 | /* stage-2 pgd must be aligned to its size */ | ||
290 | VM_BUG_ON((unsigned long)pgd & (S2_PGD_SIZE - 1)); | ||
291 | |||
292 | memset(pgd, 0, PTRS_PER_S2_PGD * sizeof(pgd_t)); | ||
293 | clean_dcache_area(pgd, PTRS_PER_S2_PGD * sizeof(pgd_t)); | ||
294 | kvm->arch.pgd = pgd; | ||
295 | |||
296 | return 0; | ||
297 | } | ||
298 | |||
299 | static void clear_pud_entry(pud_t *pud) | ||
300 | { | ||
301 | pmd_t *pmd_table = pmd_offset(pud, 0); | ||
302 | pud_clear(pud); | ||
303 | pmd_free(NULL, pmd_table); | ||
304 | put_page(virt_to_page(pud)); | ||
305 | } | ||
306 | |||
307 | static void clear_pmd_entry(pmd_t *pmd) | ||
308 | { | ||
309 | pte_t *pte_table = pte_offset_kernel(pmd, 0); | ||
310 | pmd_clear(pmd); | ||
311 | pte_free_kernel(NULL, pte_table); | ||
312 | put_page(virt_to_page(pmd)); | ||
313 | } | ||
314 | |||
315 | static bool pmd_empty(pmd_t *pmd) | ||
316 | { | ||
317 | struct page *pmd_page = virt_to_page(pmd); | ||
318 | return page_count(pmd_page) == 1; | ||
319 | } | ||
320 | |||
321 | static void clear_pte_entry(pte_t *pte) | ||
322 | { | ||
323 | if (pte_present(*pte)) { | ||
324 | kvm_set_pte(pte, __pte(0)); | ||
325 | put_page(virt_to_page(pte)); | ||
326 | } | ||
327 | } | ||
328 | |||
329 | static bool pte_empty(pte_t *pte) | ||
330 | { | ||
331 | struct page *pte_page = virt_to_page(pte); | ||
332 | return page_count(pte_page) == 1; | ||
333 | } | ||
334 | |||
335 | /** | ||
336 | * unmap_stage2_range -- Clear stage2 page table entries to unmap a range | ||
337 | * @kvm: The VM pointer | ||
338 | * @start: The intermediate physical base address of the range to unmap | ||
339 | * @size: The size of the area to unmap | ||
340 | * | ||
341 | * Clear a range of stage-2 mappings, lowering the various ref-counts. Must | ||
342 | * be called while holding mmu_lock (unless for freeing the stage2 pgd before | ||
343 | * destroying the VM), otherwise another faulting VCPU may come in and mess | ||
344 | * with things behind our backs. | ||
345 | */ | ||
346 | static void unmap_stage2_range(struct kvm *kvm, phys_addr_t start, u64 size) | ||
347 | { | ||
348 | pgd_t *pgd; | ||
349 | pud_t *pud; | ||
350 | pmd_t *pmd; | ||
351 | pte_t *pte; | ||
352 | phys_addr_t addr = start, end = start + size; | ||
353 | u64 range; | ||
354 | |||
355 | while (addr < end) { | ||
356 | pgd = kvm->arch.pgd + pgd_index(addr); | ||
357 | pud = pud_offset(pgd, addr); | ||
358 | if (pud_none(*pud)) { | ||
359 | addr += PUD_SIZE; | ||
360 | continue; | ||
361 | } | ||
362 | |||
363 | pmd = pmd_offset(pud, addr); | ||
364 | if (pmd_none(*pmd)) { | ||
365 | addr += PMD_SIZE; | ||
366 | continue; | ||
367 | } | ||
368 | |||
369 | pte = pte_offset_kernel(pmd, addr); | ||
370 | clear_pte_entry(pte); | ||
371 | range = PAGE_SIZE; | ||
372 | |||
373 | /* If we emptied the pte, walk back up the ladder */ | ||
374 | if (pte_empty(pte)) { | ||
375 | clear_pmd_entry(pmd); | ||
376 | range = PMD_SIZE; | ||
377 | if (pmd_empty(pmd)) { | ||
378 | clear_pud_entry(pud); | ||
379 | range = PUD_SIZE; | ||
380 | } | ||
381 | } | ||
382 | |||
383 | addr += range; | ||
384 | } | ||
385 | } | ||
386 | |||
387 | /** | ||
388 | * kvm_free_stage2_pgd - free all stage-2 tables | ||
389 | * @kvm: The KVM struct pointer for the VM. | ||
390 | * | ||
391 | * Walks the level-1 page table pointed to by kvm->arch.pgd and frees all | ||
392 | * underlying level-2 and level-3 tables before freeing the actual level-1 table | ||
393 | * and setting the struct pointer to NULL. | ||
394 | * | ||
395 | * Note we don't need locking here as this is only called when the VM is | ||
396 | * destroyed, which can only be done once. | ||
397 | */ | ||
398 | void kvm_free_stage2_pgd(struct kvm *kvm) | ||
399 | { | ||
400 | if (kvm->arch.pgd == NULL) | ||
401 | return; | ||
402 | |||
403 | unmap_stage2_range(kvm, 0, KVM_PHYS_SIZE); | ||
404 | free_pages((unsigned long)kvm->arch.pgd, S2_PGD_ORDER); | ||
405 | kvm->arch.pgd = NULL; | ||
406 | } | ||
407 | |||
408 | |||
409 | static int stage2_set_pte(struct kvm *kvm, struct kvm_mmu_memory_cache *cache, | ||
410 | phys_addr_t addr, const pte_t *new_pte, bool iomap) | ||
411 | { | ||
412 | pgd_t *pgd; | ||
413 | pud_t *pud; | ||
414 | pmd_t *pmd; | ||
415 | pte_t *pte, old_pte; | ||
416 | |||
417 | /* Create 2nd stage page table mapping - Level 1 */ | ||
418 | pgd = kvm->arch.pgd + pgd_index(addr); | ||
419 | pud = pud_offset(pgd, addr); | ||
420 | if (pud_none(*pud)) { | ||
421 | if (!cache) | ||
422 | return 0; /* ignore calls from kvm_set_spte_hva */ | ||
423 | pmd = mmu_memory_cache_alloc(cache); | ||
424 | pud_populate(NULL, pud, pmd); | ||
425 | pmd += pmd_index(addr); | ||
426 | get_page(virt_to_page(pud)); | ||
427 | } else | ||
428 | pmd = pmd_offset(pud, addr); | ||
429 | |||
430 | /* Create 2nd stage page table mapping - Level 2 */ | ||
431 | if (pmd_none(*pmd)) { | ||
432 | if (!cache) | ||
433 | return 0; /* ignore calls from kvm_set_spte_hva */ | ||
434 | pte = mmu_memory_cache_alloc(cache); | ||
435 | clean_pte_table(pte); | ||
436 | pmd_populate_kernel(NULL, pmd, pte); | ||
437 | pte += pte_index(addr); | ||
438 | get_page(virt_to_page(pmd)); | ||
439 | } else | ||
440 | pte = pte_offset_kernel(pmd, addr); | ||
441 | |||
442 | if (iomap && pte_present(*pte)) | ||
443 | return -EFAULT; | ||
444 | |||
445 | /* Create 2nd stage page table mapping - Level 3 */ | ||
446 | old_pte = *pte; | ||
447 | kvm_set_pte(pte, *new_pte); | ||
448 | if (pte_present(old_pte)) | ||
449 | kvm_tlb_flush_vmid(kvm); | ||
450 | else | ||
451 | get_page(virt_to_page(pte)); | ||
452 | |||
453 | return 0; | ||
454 | } | ||
455 | |||
456 | /** | ||
457 | * kvm_phys_addr_ioremap - map a device range to guest IPA | ||
458 | * | ||
459 | * @kvm: The KVM pointer | ||
460 | * @guest_ipa: The IPA at which to insert the mapping | ||
461 | * @pa: The physical address of the device | ||
462 | * @size: The size of the mapping | ||
463 | */ | ||
464 | int kvm_phys_addr_ioremap(struct kvm *kvm, phys_addr_t guest_ipa, | ||
465 | phys_addr_t pa, unsigned long size) | ||
466 | { | ||
467 | phys_addr_t addr, end; | ||
468 | int ret = 0; | ||
469 | unsigned long pfn; | ||
470 | struct kvm_mmu_memory_cache cache = { 0, }; | ||
471 | |||
472 | end = (guest_ipa + size + PAGE_SIZE - 1) & PAGE_MASK; | ||
473 | pfn = __phys_to_pfn(pa); | ||
474 | |||
475 | for (addr = guest_ipa; addr < end; addr += PAGE_SIZE) { | ||
476 | pte_t pte = pfn_pte(pfn, PAGE_S2_DEVICE | L_PTE_S2_RDWR); | ||
477 | |||
478 | ret = mmu_topup_memory_cache(&cache, 2, 2); | ||
479 | if (ret) | ||
480 | goto out; | ||
481 | spin_lock(&kvm->mmu_lock); | ||
482 | ret = stage2_set_pte(kvm, &cache, addr, &pte, true); | ||
483 | spin_unlock(&kvm->mmu_lock); | ||
484 | if (ret) | ||
485 | goto out; | ||
486 | |||
487 | pfn++; | ||
488 | } | ||
489 | |||
490 | out: | ||
491 | mmu_free_memory_cache(&cache); | ||
492 | return ret; | ||
493 | } | ||
494 | |||
495 | static void coherent_icache_guest_page(struct kvm *kvm, gfn_t gfn) | ||
496 | { | ||
497 | /* | ||
498 | * If we are going to insert an instruction page and the icache is | ||
499 | * either VIPT or PIPT, there is a potential problem where the host | ||
500 | * (or another VM) may have used the same page as this guest, and we | ||
501 | * read incorrect data from the icache. If we're using a PIPT cache, | ||
502 | * we can invalidate just that page, but if we are using a VIPT cache | ||
503 | * we need to invalidate the entire icache - damn shame - as written | ||
504 | * in the ARM ARM (DDI 0406C.b - Page B3-1393). | ||
505 | * | ||
506 | * VIVT caches are tagged using both the ASID and the VMID and doesn't | ||
507 | * need any kind of flushing (DDI 0406C.b - Page B3-1392). | ||
508 | */ | ||
509 | if (icache_is_pipt()) { | ||
510 | unsigned long hva = gfn_to_hva(kvm, gfn); | ||
511 | __cpuc_coherent_user_range(hva, hva + PAGE_SIZE); | ||
512 | } else if (!icache_is_vivt_asid_tagged()) { | ||
513 | /* any kind of VIPT cache */ | ||
514 | __flush_icache_all(); | ||
515 | } | ||
516 | } | ||
517 | |||
518 | static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa, | ||
519 | gfn_t gfn, struct kvm_memory_slot *memslot, | ||
520 | unsigned long fault_status) | ||
521 | { | ||
522 | pte_t new_pte; | ||
523 | pfn_t pfn; | ||
524 | int ret; | ||
525 | bool write_fault, writable; | ||
526 | unsigned long mmu_seq; | ||
527 | struct kvm_mmu_memory_cache *memcache = &vcpu->arch.mmu_page_cache; | ||
528 | |||
529 | write_fault = kvm_is_write_fault(vcpu->arch.hsr); | ||
530 | if (fault_status == FSC_PERM && !write_fault) { | ||
531 | kvm_err("Unexpected L2 read permission error\n"); | ||
532 | return -EFAULT; | ||
533 | } | ||
534 | |||
535 | /* We need minimum second+third level pages */ | ||
536 | ret = mmu_topup_memory_cache(memcache, 2, KVM_NR_MEM_OBJS); | ||
537 | if (ret) | ||
538 | return ret; | ||
539 | |||
540 | mmu_seq = vcpu->kvm->mmu_notifier_seq; | ||
541 | /* | ||
542 | * Ensure the read of mmu_notifier_seq happens before we call | ||
543 | * gfn_to_pfn_prot (which calls get_user_pages), so that we don't risk | ||
544 | * the page we just got a reference to gets unmapped before we have a | ||
545 | * chance to grab the mmu_lock, which ensure that if the page gets | ||
546 | * unmapped afterwards, the call to kvm_unmap_hva will take it away | ||
547 | * from us again properly. This smp_rmb() interacts with the smp_wmb() | ||
548 | * in kvm_mmu_notifier_invalidate_<page|range_end>. | ||
549 | */ | ||
550 | smp_rmb(); | ||
551 | |||
552 | pfn = gfn_to_pfn_prot(vcpu->kvm, gfn, write_fault, &writable); | ||
553 | if (is_error_pfn(pfn)) | ||
554 | return -EFAULT; | ||
555 | |||
556 | new_pte = pfn_pte(pfn, PAGE_S2); | ||
557 | coherent_icache_guest_page(vcpu->kvm, gfn); | ||
558 | |||
559 | spin_lock(&vcpu->kvm->mmu_lock); | ||
560 | if (mmu_notifier_retry(vcpu->kvm, mmu_seq)) | ||
561 | goto out_unlock; | ||
562 | if (writable) { | ||
563 | pte_val(new_pte) |= L_PTE_S2_RDWR; | ||
564 | kvm_set_pfn_dirty(pfn); | ||
565 | } | ||
566 | stage2_set_pte(vcpu->kvm, memcache, fault_ipa, &new_pte, false); | ||
567 | |||
568 | out_unlock: | ||
569 | spin_unlock(&vcpu->kvm->mmu_lock); | ||
570 | kvm_release_pfn_clean(pfn); | ||
571 | return 0; | ||
572 | } | ||
573 | |||
574 | /** | ||
575 | * kvm_handle_guest_abort - handles all 2nd stage aborts | ||
576 | * @vcpu: the VCPU pointer | ||
577 | * @run: the kvm_run structure | ||
578 | * | ||
579 | * Any abort that gets to the host is almost guaranteed to be caused by a | ||
580 | * missing second stage translation table entry, which can mean that either the | ||
581 | * guest simply needs more memory and we must allocate an appropriate page or it | ||
582 | * can mean that the guest tried to access I/O memory, which is emulated by user | ||
583 | * space. The distinction is based on the IPA causing the fault and whether this | ||
584 | * memory region has been registered as standard RAM by user space. | ||
585 | */ | ||
586 | int kvm_handle_guest_abort(struct kvm_vcpu *vcpu, struct kvm_run *run) | ||
587 | { | ||
588 | unsigned long hsr_ec; | ||
589 | unsigned long fault_status; | ||
590 | phys_addr_t fault_ipa; | ||
591 | struct kvm_memory_slot *memslot; | ||
592 | bool is_iabt; | ||
593 | gfn_t gfn; | ||
594 | int ret, idx; | ||
595 | |||
596 | hsr_ec = vcpu->arch.hsr >> HSR_EC_SHIFT; | ||
597 | is_iabt = (hsr_ec == HSR_EC_IABT); | ||
598 | fault_ipa = ((phys_addr_t)vcpu->arch.hpfar & HPFAR_MASK) << 8; | ||
599 | |||
600 | trace_kvm_guest_fault(*vcpu_pc(vcpu), vcpu->arch.hsr, | ||
601 | vcpu->arch.hxfar, fault_ipa); | ||
602 | |||
603 | /* Check the stage-2 fault is trans. fault or write fault */ | ||
604 | fault_status = (vcpu->arch.hsr & HSR_FSC_TYPE); | ||
605 | if (fault_status != FSC_FAULT && fault_status != FSC_PERM) { | ||
606 | kvm_err("Unsupported fault status: EC=%#lx DFCS=%#lx\n", | ||
607 | hsr_ec, fault_status); | ||
608 | return -EFAULT; | ||
609 | } | ||
610 | |||
611 | idx = srcu_read_lock(&vcpu->kvm->srcu); | ||
612 | |||
613 | gfn = fault_ipa >> PAGE_SHIFT; | ||
614 | if (!kvm_is_visible_gfn(vcpu->kvm, gfn)) { | ||
615 | if (is_iabt) { | ||
616 | /* Prefetch Abort on I/O address */ | ||
617 | kvm_inject_pabt(vcpu, vcpu->arch.hxfar); | ||
618 | ret = 1; | ||
619 | goto out_unlock; | ||
620 | } | ||
621 | |||
622 | if (fault_status != FSC_FAULT) { | ||
623 | kvm_err("Unsupported fault status on io memory: %#lx\n", | ||
624 | fault_status); | ||
625 | ret = -EFAULT; | ||
626 | goto out_unlock; | ||
627 | } | ||
628 | |||
629 | /* Adjust page offset */ | ||
630 | fault_ipa |= vcpu->arch.hxfar & ~PAGE_MASK; | ||
631 | ret = io_mem_abort(vcpu, run, fault_ipa); | ||
632 | goto out_unlock; | ||
633 | } | ||
634 | |||
635 | memslot = gfn_to_memslot(vcpu->kvm, gfn); | ||
636 | if (!memslot->user_alloc) { | ||
637 | kvm_err("non user-alloc memslots not supported\n"); | ||
638 | ret = -EINVAL; | ||
639 | goto out_unlock; | ||
640 | } | ||
641 | |||
642 | ret = user_mem_abort(vcpu, fault_ipa, gfn, memslot, fault_status); | ||
643 | if (ret == 0) | ||
644 | ret = 1; | ||
645 | out_unlock: | ||
646 | srcu_read_unlock(&vcpu->kvm->srcu, idx); | ||
647 | return ret; | ||
648 | } | ||
649 | |||
650 | static void handle_hva_to_gpa(struct kvm *kvm, | ||
651 | unsigned long start, | ||
652 | unsigned long end, | ||
653 | void (*handler)(struct kvm *kvm, | ||
654 | gpa_t gpa, void *data), | ||
655 | void *data) | ||
656 | { | ||
657 | struct kvm_memslots *slots; | ||
658 | struct kvm_memory_slot *memslot; | ||
659 | |||
660 | slots = kvm_memslots(kvm); | ||
661 | |||
662 | /* we only care about the pages that the guest sees */ | ||
663 | kvm_for_each_memslot(memslot, slots) { | ||
664 | unsigned long hva_start, hva_end; | ||
665 | gfn_t gfn, gfn_end; | ||
666 | |||
667 | hva_start = max(start, memslot->userspace_addr); | ||
668 | hva_end = min(end, memslot->userspace_addr + | ||
669 | (memslot->npages << PAGE_SHIFT)); | ||
670 | if (hva_start >= hva_end) | ||
671 | continue; | ||
672 | |||
673 | /* | ||
674 | * {gfn(page) | page intersects with [hva_start, hva_end)} = | ||
675 | * {gfn_start, gfn_start+1, ..., gfn_end-1}. | ||
676 | */ | ||
677 | gfn = hva_to_gfn_memslot(hva_start, memslot); | ||
678 | gfn_end = hva_to_gfn_memslot(hva_end + PAGE_SIZE - 1, memslot); | ||
679 | |||
680 | for (; gfn < gfn_end; ++gfn) { | ||
681 | gpa_t gpa = gfn << PAGE_SHIFT; | ||
682 | handler(kvm, gpa, data); | ||
683 | } | ||
684 | } | ||
685 | } | ||
686 | |||
687 | static void kvm_unmap_hva_handler(struct kvm *kvm, gpa_t gpa, void *data) | ||
688 | { | ||
689 | unmap_stage2_range(kvm, gpa, PAGE_SIZE); | ||
690 | kvm_tlb_flush_vmid(kvm); | ||
691 | } | ||
692 | |||
693 | int kvm_unmap_hva(struct kvm *kvm, unsigned long hva) | ||
694 | { | ||
695 | unsigned long end = hva + PAGE_SIZE; | ||
696 | |||
697 | if (!kvm->arch.pgd) | ||
698 | return 0; | ||
699 | |||
700 | trace_kvm_unmap_hva(hva); | ||
701 | handle_hva_to_gpa(kvm, hva, end, &kvm_unmap_hva_handler, NULL); | ||
702 | return 0; | ||
703 | } | ||
704 | |||
705 | int kvm_unmap_hva_range(struct kvm *kvm, | ||
706 | unsigned long start, unsigned long end) | ||
707 | { | ||
708 | if (!kvm->arch.pgd) | ||
709 | return 0; | ||
710 | |||
711 | trace_kvm_unmap_hva_range(start, end); | ||
712 | handle_hva_to_gpa(kvm, start, end, &kvm_unmap_hva_handler, NULL); | ||
713 | return 0; | ||
714 | } | ||
715 | |||
716 | static void kvm_set_spte_handler(struct kvm *kvm, gpa_t gpa, void *data) | ||
717 | { | ||
718 | pte_t *pte = (pte_t *)data; | ||
719 | |||
720 | stage2_set_pte(kvm, NULL, gpa, pte, false); | ||
721 | } | ||
722 | |||
723 | |||
724 | void kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte) | ||
725 | { | ||
726 | unsigned long end = hva + PAGE_SIZE; | ||
727 | pte_t stage2_pte; | ||
728 | |||
729 | if (!kvm->arch.pgd) | ||
730 | return; | ||
731 | |||
732 | trace_kvm_set_spte_hva(hva); | ||
733 | stage2_pte = pfn_pte(pte_pfn(pte), PAGE_S2); | ||
734 | handle_hva_to_gpa(kvm, hva, end, &kvm_set_spte_handler, &stage2_pte); | ||
735 | } | ||
736 | |||
737 | void kvm_mmu_free_memory_caches(struct kvm_vcpu *vcpu) | ||
738 | { | ||
739 | mmu_free_memory_cache(&vcpu->arch.mmu_page_cache); | ||
740 | } | ||
741 | |||
742 | phys_addr_t kvm_mmu_get_httbr(void) | ||
743 | { | ||
744 | VM_BUG_ON(!virt_addr_valid(hyp_pgd)); | ||
745 | return virt_to_phys(hyp_pgd); | ||
746 | } | ||
747 | |||
748 | int kvm_mmu_init(void) | ||
749 | { | ||
750 | if (!hyp_pgd) { | ||
751 | kvm_err("Hyp mode PGD not allocated\n"); | ||
752 | return -ENOMEM; | ||
753 | } | ||
754 | |||
755 | return 0; | ||
756 | } | ||
757 | |||
758 | /** | ||
759 | * kvm_clear_idmap - remove all idmaps from the hyp pgd | ||
760 | * | ||
761 | * Free the underlying pmds for all pgds in range and clear the pgds (but | ||
762 | * don't free them) afterwards. | ||
763 | */ | ||
764 | void kvm_clear_hyp_idmap(void) | ||
765 | { | ||
766 | unsigned long addr, end; | ||
767 | unsigned long next; | ||
768 | pgd_t *pgd = hyp_pgd; | ||
769 | pud_t *pud; | ||
770 | pmd_t *pmd; | ||
771 | |||
772 | addr = virt_to_phys(__hyp_idmap_text_start); | ||
773 | end = virt_to_phys(__hyp_idmap_text_end); | ||
774 | |||
775 | pgd += pgd_index(addr); | ||
776 | do { | ||
777 | next = pgd_addr_end(addr, end); | ||
778 | if (pgd_none_or_clear_bad(pgd)) | ||
779 | continue; | ||
780 | pud = pud_offset(pgd, addr); | ||
781 | pmd = pmd_offset(pud, addr); | ||
782 | |||
783 | pud_clear(pud); | ||
784 | clean_pmd_entry(pmd); | ||
785 | pmd_free(NULL, (pmd_t *)((unsigned long)pmd & PAGE_MASK)); | ||
786 | } while (pgd++, addr = next, addr < end); | ||
787 | } | ||
diff --git a/arch/arm/kvm/psci.c b/arch/arm/kvm/psci.c new file mode 100644 index 000000000000..7ee5bb7a3667 --- /dev/null +++ b/arch/arm/kvm/psci.c | |||
@@ -0,0 +1,108 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2012 - ARM Ltd | ||
3 | * Author: Marc Zyngier <marc.zyngier@arm.com> | ||
4 | * | ||
5 | * This program is free software; you can redistribute it and/or modify | ||
6 | * it under the terms of the GNU General Public License version 2 as | ||
7 | * published by the Free Software Foundation. | ||
8 | * | ||
9 | * This program is distributed in the hope that it will be useful, | ||
10 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
11 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
12 | * GNU General Public License for more details. | ||
13 | * | ||
14 | * You should have received a copy of the GNU General Public License | ||
15 | * along with this program. If not, see <http://www.gnu.org/licenses/>. | ||
16 | */ | ||
17 | |||
18 | #include <linux/kvm_host.h> | ||
19 | #include <linux/wait.h> | ||
20 | |||
21 | #include <asm/kvm_emulate.h> | ||
22 | #include <asm/kvm_psci.h> | ||
23 | |||
24 | /* | ||
25 | * This is an implementation of the Power State Coordination Interface | ||
26 | * as described in ARM document number ARM DEN 0022A. | ||
27 | */ | ||
28 | |||
29 | static void kvm_psci_vcpu_off(struct kvm_vcpu *vcpu) | ||
30 | { | ||
31 | vcpu->arch.pause = true; | ||
32 | } | ||
33 | |||
34 | static unsigned long kvm_psci_vcpu_on(struct kvm_vcpu *source_vcpu) | ||
35 | { | ||
36 | struct kvm *kvm = source_vcpu->kvm; | ||
37 | struct kvm_vcpu *vcpu; | ||
38 | wait_queue_head_t *wq; | ||
39 | unsigned long cpu_id; | ||
40 | phys_addr_t target_pc; | ||
41 | |||
42 | cpu_id = *vcpu_reg(source_vcpu, 1); | ||
43 | if (vcpu_mode_is_32bit(source_vcpu)) | ||
44 | cpu_id &= ~((u32) 0); | ||
45 | |||
46 | if (cpu_id >= atomic_read(&kvm->online_vcpus)) | ||
47 | return KVM_PSCI_RET_INVAL; | ||
48 | |||
49 | target_pc = *vcpu_reg(source_vcpu, 2); | ||
50 | |||
51 | vcpu = kvm_get_vcpu(kvm, cpu_id); | ||
52 | |||
53 | wq = kvm_arch_vcpu_wq(vcpu); | ||
54 | if (!waitqueue_active(wq)) | ||
55 | return KVM_PSCI_RET_INVAL; | ||
56 | |||
57 | kvm_reset_vcpu(vcpu); | ||
58 | |||
59 | /* Gracefully handle Thumb2 entry point */ | ||
60 | if (vcpu_mode_is_32bit(vcpu) && (target_pc & 1)) { | ||
61 | target_pc &= ~((phys_addr_t) 1); | ||
62 | vcpu_set_thumb(vcpu); | ||
63 | } | ||
64 | |||
65 | *vcpu_pc(vcpu) = target_pc; | ||
66 | vcpu->arch.pause = false; | ||
67 | smp_mb(); /* Make sure the above is visible */ | ||
68 | |||
69 | wake_up_interruptible(wq); | ||
70 | |||
71 | return KVM_PSCI_RET_SUCCESS; | ||
72 | } | ||
73 | |||
74 | /** | ||
75 | * kvm_psci_call - handle PSCI call if r0 value is in range | ||
76 | * @vcpu: Pointer to the VCPU struct | ||
77 | * | ||
78 | * Handle PSCI calls from guests through traps from HVC or SMC instructions. | ||
79 | * The calling convention is similar to SMC calls to the secure world where | ||
80 | * the function number is placed in r0 and this function returns true if the | ||
81 | * function number specified in r0 is withing the PSCI range, and false | ||
82 | * otherwise. | ||
83 | */ | ||
84 | bool kvm_psci_call(struct kvm_vcpu *vcpu) | ||
85 | { | ||
86 | unsigned long psci_fn = *vcpu_reg(vcpu, 0) & ~((u32) 0); | ||
87 | unsigned long val; | ||
88 | |||
89 | switch (psci_fn) { | ||
90 | case KVM_PSCI_FN_CPU_OFF: | ||
91 | kvm_psci_vcpu_off(vcpu); | ||
92 | val = KVM_PSCI_RET_SUCCESS; | ||
93 | break; | ||
94 | case KVM_PSCI_FN_CPU_ON: | ||
95 | val = kvm_psci_vcpu_on(vcpu); | ||
96 | break; | ||
97 | case KVM_PSCI_FN_CPU_SUSPEND: | ||
98 | case KVM_PSCI_FN_MIGRATE: | ||
99 | val = KVM_PSCI_RET_NI; | ||
100 | break; | ||
101 | |||
102 | default: | ||
103 | return false; | ||
104 | } | ||
105 | |||
106 | *vcpu_reg(vcpu, 0) = val; | ||
107 | return true; | ||
108 | } | ||
diff --git a/arch/arm/kvm/reset.c b/arch/arm/kvm/reset.c new file mode 100644 index 000000000000..b80256b554cd --- /dev/null +++ b/arch/arm/kvm/reset.c | |||
@@ -0,0 +1,74 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2012 - Virtual Open Systems and Columbia University | ||
3 | * Author: Christoffer Dall <c.dall@virtualopensystems.com> | ||
4 | * | ||
5 | * This program is free software; you can redistribute it and/or modify | ||
6 | * it under the terms of the GNU General Public License, version 2, as | ||
7 | * published by the Free Software Foundation. | ||
8 | * | ||
9 | * This program is distributed in the hope that it will be useful, | ||
10 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
11 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
12 | * GNU General Public License for more details. | ||
13 | * | ||
14 | * You should have received a copy of the GNU General Public License | ||
15 | * along with this program; if not, write to the Free Software | ||
16 | * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. | ||
17 | */ | ||
18 | #include <linux/compiler.h> | ||
19 | #include <linux/errno.h> | ||
20 | #include <linux/sched.h> | ||
21 | #include <linux/kvm_host.h> | ||
22 | #include <linux/kvm.h> | ||
23 | |||
24 | #include <asm/unified.h> | ||
25 | #include <asm/ptrace.h> | ||
26 | #include <asm/cputype.h> | ||
27 | #include <asm/kvm_arm.h> | ||
28 | #include <asm/kvm_coproc.h> | ||
29 | |||
30 | /****************************************************************************** | ||
31 | * Cortex-A15 Reset Values | ||
32 | */ | ||
33 | |||
34 | static const int a15_max_cpu_idx = 3; | ||
35 | |||
36 | static struct kvm_regs a15_regs_reset = { | ||
37 | .usr_regs.ARM_cpsr = SVC_MODE | PSR_A_BIT | PSR_I_BIT | PSR_F_BIT, | ||
38 | }; | ||
39 | |||
40 | |||
41 | /******************************************************************************* | ||
42 | * Exported reset function | ||
43 | */ | ||
44 | |||
45 | /** | ||
46 | * kvm_reset_vcpu - sets core registers and cp15 registers to reset value | ||
47 | * @vcpu: The VCPU pointer | ||
48 | * | ||
49 | * This function finds the right table above and sets the registers on the | ||
50 | * virtual CPU struct to their architectually defined reset values. | ||
51 | */ | ||
52 | int kvm_reset_vcpu(struct kvm_vcpu *vcpu) | ||
53 | { | ||
54 | struct kvm_regs *cpu_reset; | ||
55 | |||
56 | switch (vcpu->arch.target) { | ||
57 | case KVM_ARM_TARGET_CORTEX_A15: | ||
58 | if (vcpu->vcpu_id > a15_max_cpu_idx) | ||
59 | return -EINVAL; | ||
60 | cpu_reset = &a15_regs_reset; | ||
61 | vcpu->arch.midr = read_cpuid_id(); | ||
62 | break; | ||
63 | default: | ||
64 | return -ENODEV; | ||
65 | } | ||
66 | |||
67 | /* Reset core registers */ | ||
68 | memcpy(&vcpu->arch.regs, cpu_reset, sizeof(vcpu->arch.regs)); | ||
69 | |||
70 | /* Reset CP15 registers */ | ||
71 | kvm_reset_coprocs(vcpu); | ||
72 | |||
73 | return 0; | ||
74 | } | ||
diff --git a/arch/arm/kvm/trace.h b/arch/arm/kvm/trace.h new file mode 100644 index 000000000000..a8e73ed5ad5b --- /dev/null +++ b/arch/arm/kvm/trace.h | |||
@@ -0,0 +1,235 @@ | |||
1 | #if !defined(_TRACE_KVM_H) || defined(TRACE_HEADER_MULTI_READ) | ||
2 | #define _TRACE_KVM_H | ||
3 | |||
4 | #include <linux/tracepoint.h> | ||
5 | |||
6 | #undef TRACE_SYSTEM | ||
7 | #define TRACE_SYSTEM kvm | ||
8 | |||
9 | /* | ||
10 | * Tracepoints for entry/exit to guest | ||
11 | */ | ||
12 | TRACE_EVENT(kvm_entry, | ||
13 | TP_PROTO(unsigned long vcpu_pc), | ||
14 | TP_ARGS(vcpu_pc), | ||
15 | |||
16 | TP_STRUCT__entry( | ||
17 | __field( unsigned long, vcpu_pc ) | ||
18 | ), | ||
19 | |||
20 | TP_fast_assign( | ||
21 | __entry->vcpu_pc = vcpu_pc; | ||
22 | ), | ||
23 | |||
24 | TP_printk("PC: 0x%08lx", __entry->vcpu_pc) | ||
25 | ); | ||
26 | |||
27 | TRACE_EVENT(kvm_exit, | ||
28 | TP_PROTO(unsigned long vcpu_pc), | ||
29 | TP_ARGS(vcpu_pc), | ||
30 | |||
31 | TP_STRUCT__entry( | ||
32 | __field( unsigned long, vcpu_pc ) | ||
33 | ), | ||
34 | |||
35 | TP_fast_assign( | ||
36 | __entry->vcpu_pc = vcpu_pc; | ||
37 | ), | ||
38 | |||
39 | TP_printk("PC: 0x%08lx", __entry->vcpu_pc) | ||
40 | ); | ||
41 | |||
42 | TRACE_EVENT(kvm_guest_fault, | ||
43 | TP_PROTO(unsigned long vcpu_pc, unsigned long hsr, | ||
44 | unsigned long hxfar, | ||
45 | unsigned long long ipa), | ||
46 | TP_ARGS(vcpu_pc, hsr, hxfar, ipa), | ||
47 | |||
48 | TP_STRUCT__entry( | ||
49 | __field( unsigned long, vcpu_pc ) | ||
50 | __field( unsigned long, hsr ) | ||
51 | __field( unsigned long, hxfar ) | ||
52 | __field( unsigned long long, ipa ) | ||
53 | ), | ||
54 | |||
55 | TP_fast_assign( | ||
56 | __entry->vcpu_pc = vcpu_pc; | ||
57 | __entry->hsr = hsr; | ||
58 | __entry->hxfar = hxfar; | ||
59 | __entry->ipa = ipa; | ||
60 | ), | ||
61 | |||
62 | TP_printk("guest fault at PC %#08lx (hxfar %#08lx, " | ||
63 | "ipa %#16llx, hsr %#08lx", | ||
64 | __entry->vcpu_pc, __entry->hxfar, | ||
65 | __entry->ipa, __entry->hsr) | ||
66 | ); | ||
67 | |||
68 | TRACE_EVENT(kvm_irq_line, | ||
69 | TP_PROTO(unsigned int type, int vcpu_idx, int irq_num, int level), | ||
70 | TP_ARGS(type, vcpu_idx, irq_num, level), | ||
71 | |||
72 | TP_STRUCT__entry( | ||
73 | __field( unsigned int, type ) | ||
74 | __field( int, vcpu_idx ) | ||
75 | __field( int, irq_num ) | ||
76 | __field( int, level ) | ||
77 | ), | ||
78 | |||
79 | TP_fast_assign( | ||
80 | __entry->type = type; | ||
81 | __entry->vcpu_idx = vcpu_idx; | ||
82 | __entry->irq_num = irq_num; | ||
83 | __entry->level = level; | ||
84 | ), | ||
85 | |||
86 | TP_printk("Inject %s interrupt (%d), vcpu->idx: %d, num: %d, level: %d", | ||
87 | (__entry->type == KVM_ARM_IRQ_TYPE_CPU) ? "CPU" : | ||
88 | (__entry->type == KVM_ARM_IRQ_TYPE_PPI) ? "VGIC PPI" : | ||
89 | (__entry->type == KVM_ARM_IRQ_TYPE_SPI) ? "VGIC SPI" : "UNKNOWN", | ||
90 | __entry->type, __entry->vcpu_idx, __entry->irq_num, __entry->level) | ||
91 | ); | ||
92 | |||
93 | TRACE_EVENT(kvm_mmio_emulate, | ||
94 | TP_PROTO(unsigned long vcpu_pc, unsigned long instr, | ||
95 | unsigned long cpsr), | ||
96 | TP_ARGS(vcpu_pc, instr, cpsr), | ||
97 | |||
98 | TP_STRUCT__entry( | ||
99 | __field( unsigned long, vcpu_pc ) | ||
100 | __field( unsigned long, instr ) | ||
101 | __field( unsigned long, cpsr ) | ||
102 | ), | ||
103 | |||
104 | TP_fast_assign( | ||
105 | __entry->vcpu_pc = vcpu_pc; | ||
106 | __entry->instr = instr; | ||
107 | __entry->cpsr = cpsr; | ||
108 | ), | ||
109 | |||
110 | TP_printk("Emulate MMIO at: 0x%08lx (instr: %08lx, cpsr: %08lx)", | ||
111 | __entry->vcpu_pc, __entry->instr, __entry->cpsr) | ||
112 | ); | ||
113 | |||
114 | /* Architecturally implementation defined CP15 register access */ | ||
115 | TRACE_EVENT(kvm_emulate_cp15_imp, | ||
116 | TP_PROTO(unsigned long Op1, unsigned long Rt1, unsigned long CRn, | ||
117 | unsigned long CRm, unsigned long Op2, bool is_write), | ||
118 | TP_ARGS(Op1, Rt1, CRn, CRm, Op2, is_write), | ||
119 | |||
120 | TP_STRUCT__entry( | ||
121 | __field( unsigned int, Op1 ) | ||
122 | __field( unsigned int, Rt1 ) | ||
123 | __field( unsigned int, CRn ) | ||
124 | __field( unsigned int, CRm ) | ||
125 | __field( unsigned int, Op2 ) | ||
126 | __field( bool, is_write ) | ||
127 | ), | ||
128 | |||
129 | TP_fast_assign( | ||
130 | __entry->is_write = is_write; | ||
131 | __entry->Op1 = Op1; | ||
132 | __entry->Rt1 = Rt1; | ||
133 | __entry->CRn = CRn; | ||
134 | __entry->CRm = CRm; | ||
135 | __entry->Op2 = Op2; | ||
136 | ), | ||
137 | |||
138 | TP_printk("Implementation defined CP15: %s\tp15, %u, r%u, c%u, c%u, %u", | ||
139 | (__entry->is_write) ? "mcr" : "mrc", | ||
140 | __entry->Op1, __entry->Rt1, __entry->CRn, | ||
141 | __entry->CRm, __entry->Op2) | ||
142 | ); | ||
143 | |||
144 | TRACE_EVENT(kvm_wfi, | ||
145 | TP_PROTO(unsigned long vcpu_pc), | ||
146 | TP_ARGS(vcpu_pc), | ||
147 | |||
148 | TP_STRUCT__entry( | ||
149 | __field( unsigned long, vcpu_pc ) | ||
150 | ), | ||
151 | |||
152 | TP_fast_assign( | ||
153 | __entry->vcpu_pc = vcpu_pc; | ||
154 | ), | ||
155 | |||
156 | TP_printk("guest executed wfi at: 0x%08lx", __entry->vcpu_pc) | ||
157 | ); | ||
158 | |||
159 | TRACE_EVENT(kvm_unmap_hva, | ||
160 | TP_PROTO(unsigned long hva), | ||
161 | TP_ARGS(hva), | ||
162 | |||
163 | TP_STRUCT__entry( | ||
164 | __field( unsigned long, hva ) | ||
165 | ), | ||
166 | |||
167 | TP_fast_assign( | ||
168 | __entry->hva = hva; | ||
169 | ), | ||
170 | |||
171 | TP_printk("mmu notifier unmap hva: %#08lx", __entry->hva) | ||
172 | ); | ||
173 | |||
174 | TRACE_EVENT(kvm_unmap_hva_range, | ||
175 | TP_PROTO(unsigned long start, unsigned long end), | ||
176 | TP_ARGS(start, end), | ||
177 | |||
178 | TP_STRUCT__entry( | ||
179 | __field( unsigned long, start ) | ||
180 | __field( unsigned long, end ) | ||
181 | ), | ||
182 | |||
183 | TP_fast_assign( | ||
184 | __entry->start = start; | ||
185 | __entry->end = end; | ||
186 | ), | ||
187 | |||
188 | TP_printk("mmu notifier unmap range: %#08lx -- %#08lx", | ||
189 | __entry->start, __entry->end) | ||
190 | ); | ||
191 | |||
192 | TRACE_EVENT(kvm_set_spte_hva, | ||
193 | TP_PROTO(unsigned long hva), | ||
194 | TP_ARGS(hva), | ||
195 | |||
196 | TP_STRUCT__entry( | ||
197 | __field( unsigned long, hva ) | ||
198 | ), | ||
199 | |||
200 | TP_fast_assign( | ||
201 | __entry->hva = hva; | ||
202 | ), | ||
203 | |||
204 | TP_printk("mmu notifier set pte hva: %#08lx", __entry->hva) | ||
205 | ); | ||
206 | |||
207 | TRACE_EVENT(kvm_hvc, | ||
208 | TP_PROTO(unsigned long vcpu_pc, unsigned long r0, unsigned long imm), | ||
209 | TP_ARGS(vcpu_pc, r0, imm), | ||
210 | |||
211 | TP_STRUCT__entry( | ||
212 | __field( unsigned long, vcpu_pc ) | ||
213 | __field( unsigned long, r0 ) | ||
214 | __field( unsigned long, imm ) | ||
215 | ), | ||
216 | |||
217 | TP_fast_assign( | ||
218 | __entry->vcpu_pc = vcpu_pc; | ||
219 | __entry->r0 = r0; | ||
220 | __entry->imm = imm; | ||
221 | ), | ||
222 | |||
223 | TP_printk("HVC at 0x%08lx (r0: 0x%08lx, imm: 0x%lx", | ||
224 | __entry->vcpu_pc, __entry->r0, __entry->imm) | ||
225 | ); | ||
226 | |||
227 | #endif /* _TRACE_KVM_H */ | ||
228 | |||
229 | #undef TRACE_INCLUDE_PATH | ||
230 | #define TRACE_INCLUDE_PATH arch/arm/kvm | ||
231 | #undef TRACE_INCLUDE_FILE | ||
232 | #define TRACE_INCLUDE_FILE trace | ||
233 | |||
234 | /* This part must be outside protection */ | ||
235 | #include <trace/define_trace.h> | ||
diff --git a/arch/arm/mach-davinci/cpuidle.c b/arch/arm/mach-davinci/cpuidle.c index 9107691adbdb..5ac9e9384b15 100644 --- a/arch/arm/mach-davinci/cpuidle.c +++ b/arch/arm/mach-davinci/cpuidle.c | |||
@@ -25,35 +25,44 @@ | |||
25 | 25 | ||
26 | #define DAVINCI_CPUIDLE_MAX_STATES 2 | 26 | #define DAVINCI_CPUIDLE_MAX_STATES 2 |
27 | 27 | ||
28 | struct davinci_ops { | 28 | static DEFINE_PER_CPU(struct cpuidle_device, davinci_cpuidle_device); |
29 | void (*enter) (u32 flags); | 29 | static void __iomem *ddr2_reg_base; |
30 | void (*exit) (u32 flags); | 30 | static bool ddr2_pdown; |
31 | u32 flags; | 31 | |
32 | }; | 32 | static void davinci_save_ddr_power(int enter, bool pdown) |
33 | { | ||
34 | u32 val; | ||
35 | |||
36 | val = __raw_readl(ddr2_reg_base + DDR2_SDRCR_OFFSET); | ||
37 | |||
38 | if (enter) { | ||
39 | if (pdown) | ||
40 | val |= DDR2_SRPD_BIT; | ||
41 | else | ||
42 | val &= ~DDR2_SRPD_BIT; | ||
43 | val |= DDR2_LPMODEN_BIT; | ||
44 | } else { | ||
45 | val &= ~(DDR2_SRPD_BIT | DDR2_LPMODEN_BIT); | ||
46 | } | ||
47 | |||
48 | __raw_writel(val, ddr2_reg_base + DDR2_SDRCR_OFFSET); | ||
49 | } | ||
33 | 50 | ||
34 | /* Actual code that puts the SoC in different idle states */ | 51 | /* Actual code that puts the SoC in different idle states */ |
35 | static int davinci_enter_idle(struct cpuidle_device *dev, | 52 | static int davinci_enter_idle(struct cpuidle_device *dev, |
36 | struct cpuidle_driver *drv, | 53 | struct cpuidle_driver *drv, |
37 | int index) | 54 | int index) |
38 | { | 55 | { |
39 | struct cpuidle_state_usage *state_usage = &dev->states_usage[index]; | 56 | davinci_save_ddr_power(1, ddr2_pdown); |
40 | struct davinci_ops *ops = cpuidle_get_statedata(state_usage); | ||
41 | |||
42 | if (ops && ops->enter) | ||
43 | ops->enter(ops->flags); | ||
44 | 57 | ||
45 | index = cpuidle_wrap_enter(dev, drv, index, | 58 | index = cpuidle_wrap_enter(dev, drv, index, |
46 | arm_cpuidle_simple_enter); | 59 | arm_cpuidle_simple_enter); |
47 | 60 | ||
48 | if (ops && ops->exit) | 61 | davinci_save_ddr_power(0, ddr2_pdown); |
49 | ops->exit(ops->flags); | ||
50 | 62 | ||
51 | return index; | 63 | return index; |
52 | } | 64 | } |
53 | 65 | ||
54 | /* fields in davinci_ops.flags */ | ||
55 | #define DAVINCI_CPUIDLE_FLAGS_DDR2_PWDN BIT(0) | ||
56 | |||
57 | static struct cpuidle_driver davinci_idle_driver = { | 66 | static struct cpuidle_driver davinci_idle_driver = { |
58 | .name = "cpuidle-davinci", | 67 | .name = "cpuidle-davinci", |
59 | .owner = THIS_MODULE, | 68 | .owner = THIS_MODULE, |
@@ -70,45 +79,6 @@ static struct cpuidle_driver davinci_idle_driver = { | |||
70 | .state_count = DAVINCI_CPUIDLE_MAX_STATES, | 79 | .state_count = DAVINCI_CPUIDLE_MAX_STATES, |
71 | }; | 80 | }; |
72 | 81 | ||
73 | static DEFINE_PER_CPU(struct cpuidle_device, davinci_cpuidle_device); | ||
74 | static void __iomem *ddr2_reg_base; | ||
75 | |||
76 | static void davinci_save_ddr_power(int enter, bool pdown) | ||
77 | { | ||
78 | u32 val; | ||
79 | |||
80 | val = __raw_readl(ddr2_reg_base + DDR2_SDRCR_OFFSET); | ||
81 | |||
82 | if (enter) { | ||
83 | if (pdown) | ||
84 | val |= DDR2_SRPD_BIT; | ||
85 | else | ||
86 | val &= ~DDR2_SRPD_BIT; | ||
87 | val |= DDR2_LPMODEN_BIT; | ||
88 | } else { | ||
89 | val &= ~(DDR2_SRPD_BIT | DDR2_LPMODEN_BIT); | ||
90 | } | ||
91 | |||
92 | __raw_writel(val, ddr2_reg_base + DDR2_SDRCR_OFFSET); | ||
93 | } | ||
94 | |||
95 | static void davinci_c2state_enter(u32 flags) | ||
96 | { | ||
97 | davinci_save_ddr_power(1, !!(flags & DAVINCI_CPUIDLE_FLAGS_DDR2_PWDN)); | ||
98 | } | ||
99 | |||
100 | static void davinci_c2state_exit(u32 flags) | ||
101 | { | ||
102 | davinci_save_ddr_power(0, !!(flags & DAVINCI_CPUIDLE_FLAGS_DDR2_PWDN)); | ||
103 | } | ||
104 | |||
105 | static struct davinci_ops davinci_states[DAVINCI_CPUIDLE_MAX_STATES] = { | ||
106 | [1] = { | ||
107 | .enter = davinci_c2state_enter, | ||
108 | .exit = davinci_c2state_exit, | ||
109 | }, | ||
110 | }; | ||
111 | |||
112 | static int __init davinci_cpuidle_probe(struct platform_device *pdev) | 82 | static int __init davinci_cpuidle_probe(struct platform_device *pdev) |
113 | { | 83 | { |
114 | int ret; | 84 | int ret; |
@@ -124,11 +94,7 @@ static int __init davinci_cpuidle_probe(struct platform_device *pdev) | |||
124 | 94 | ||
125 | ddr2_reg_base = pdata->ddr2_ctlr_base; | 95 | ddr2_reg_base = pdata->ddr2_ctlr_base; |
126 | 96 | ||
127 | if (pdata->ddr2_pdown) | 97 | ddr2_pdown = pdata->ddr2_pdown; |
128 | davinci_states[1].flags |= DAVINCI_CPUIDLE_FLAGS_DDR2_PWDN; | ||
129 | cpuidle_set_statedata(&device->states_usage[1], &davinci_states[1]); | ||
130 | |||
131 | device->state_count = DAVINCI_CPUIDLE_MAX_STATES; | ||
132 | 98 | ||
133 | ret = cpuidle_register_driver(&davinci_idle_driver); | 99 | ret = cpuidle_register_driver(&davinci_idle_driver); |
134 | if (ret) { | 100 | if (ret) { |
diff --git a/arch/arm/mach-exynos/include/mach/cpufreq.h b/arch/arm/mach-exynos/include/mach/cpufreq.h index 7517c3f417af..b5d39dd03b2a 100644 --- a/arch/arm/mach-exynos/include/mach/cpufreq.h +++ b/arch/arm/mach-exynos/include/mach/cpufreq.h | |||
@@ -18,12 +18,25 @@ enum cpufreq_level_index { | |||
18 | L20, | 18 | L20, |
19 | }; | 19 | }; |
20 | 20 | ||
21 | #define APLL_FREQ(f, a0, a1, a2, a3, a4, a5, a6, a7, b0, b1, b2, m, p, s) \ | ||
22 | { \ | ||
23 | .freq = (f) * 1000, \ | ||
24 | .clk_div_cpu0 = ((a0) | (a1) << 4 | (a2) << 8 | (a3) << 12 | \ | ||
25 | (a4) << 16 | (a5) << 20 | (a6) << 24 | (a7) << 28), \ | ||
26 | .clk_div_cpu1 = (b0 << 0 | b1 << 4 | b2 << 8), \ | ||
27 | .mps = ((m) << 16 | (p) << 8 | (s)), \ | ||
28 | } | ||
29 | |||
30 | struct apll_freq { | ||
31 | unsigned int freq; | ||
32 | u32 clk_div_cpu0; | ||
33 | u32 clk_div_cpu1; | ||
34 | u32 mps; | ||
35 | }; | ||
36 | |||
21 | struct exynos_dvfs_info { | 37 | struct exynos_dvfs_info { |
22 | unsigned long mpll_freq_khz; | 38 | unsigned long mpll_freq_khz; |
23 | unsigned int pll_safe_idx; | 39 | unsigned int pll_safe_idx; |
24 | unsigned int pm_lock_idx; | ||
25 | unsigned int max_support_idx; | ||
26 | unsigned int min_support_idx; | ||
27 | struct clk *cpu_clk; | 40 | struct clk *cpu_clk; |
28 | unsigned int *volt_table; | 41 | unsigned int *volt_table; |
29 | struct cpufreq_frequency_table *freq_table; | 42 | struct cpufreq_frequency_table *freq_table; |
diff --git a/arch/arm/mach-highbank/Kconfig b/arch/arm/mach-highbank/Kconfig index 551c97e87a78..44b12f9c1584 100644 --- a/arch/arm/mach-highbank/Kconfig +++ b/arch/arm/mach-highbank/Kconfig | |||
@@ -1,5 +1,7 @@ | |||
1 | config ARCH_HIGHBANK | 1 | config ARCH_HIGHBANK |
2 | bool "Calxeda ECX-1000/2000 (Highbank/Midway)" if ARCH_MULTI_V7 | 2 | bool "Calxeda ECX-1000/2000 (Highbank/Midway)" if ARCH_MULTI_V7 |
3 | select ARCH_HAS_CPUFREQ | ||
4 | select ARCH_HAS_OPP | ||
3 | select ARCH_WANT_OPTIONAL_GPIOLIB | 5 | select ARCH_WANT_OPTIONAL_GPIOLIB |
4 | select ARM_AMBA | 6 | select ARM_AMBA |
5 | select ARM_GIC | 7 | select ARM_GIC |
@@ -11,5 +13,7 @@ config ARCH_HIGHBANK | |||
11 | select GENERIC_CLOCKEVENTS | 13 | select GENERIC_CLOCKEVENTS |
12 | select HAVE_ARM_SCU | 14 | select HAVE_ARM_SCU |
13 | select HAVE_SMP | 15 | select HAVE_SMP |
16 | select MAILBOX | ||
17 | select PL320_MBOX | ||
14 | select SPARSE_IRQ | 18 | select SPARSE_IRQ |
15 | select USE_OF | 19 | select USE_OF |
diff --git a/arch/arm/mach-highbank/core.h b/arch/arm/mach-highbank/core.h index 80235b46cb58..3f65206a9b92 100644 --- a/arch/arm/mach-highbank/core.h +++ b/arch/arm/mach-highbank/core.h | |||
@@ -2,7 +2,6 @@ | |||
2 | #define __HIGHBANK_CORE_H | 2 | #define __HIGHBANK_CORE_H |
3 | 3 | ||
4 | extern void highbank_set_cpu_jump(int cpu, void *jump_addr); | 4 | extern void highbank_set_cpu_jump(int cpu, void *jump_addr); |
5 | extern void highbank_clocks_init(void); | ||
6 | extern void highbank_restart(char, const char *); | 5 | extern void highbank_restart(char, const char *); |
7 | extern void __iomem *scu_base_addr; | 6 | extern void __iomem *scu_base_addr; |
8 | 7 | ||
diff --git a/arch/arm/mach-highbank/highbank.c b/arch/arm/mach-highbank/highbank.c index e6c061282939..65656ff0eb33 100644 --- a/arch/arm/mach-highbank/highbank.c +++ b/arch/arm/mach-highbank/highbank.c | |||
@@ -25,6 +25,7 @@ | |||
25 | #include <linux/of_address.h> | 25 | #include <linux/of_address.h> |
26 | #include <linux/smp.h> | 26 | #include <linux/smp.h> |
27 | #include <linux/amba/bus.h> | 27 | #include <linux/amba/bus.h> |
28 | #include <linux/clk-provider.h> | ||
28 | 29 | ||
29 | #include <asm/arch_timer.h> | 30 | #include <asm/arch_timer.h> |
30 | #include <asm/cacheflush.h> | 31 | #include <asm/cacheflush.h> |
@@ -117,7 +118,7 @@ static void __init highbank_timer_init(void) | |||
117 | WARN_ON(!timer_base); | 118 | WARN_ON(!timer_base); |
118 | irq = irq_of_parse_and_map(np, 0); | 119 | irq = irq_of_parse_and_map(np, 0); |
119 | 120 | ||
120 | highbank_clocks_init(); | 121 | of_clk_init(NULL); |
121 | lookup.clk = of_clk_get(np, 0); | 122 | lookup.clk = of_clk_get(np, 0); |
122 | clkdev_add(&lookup); | 123 | clkdev_add(&lookup); |
123 | 124 | ||
diff --git a/arch/arm/mach-omap2/pm34xx.c b/arch/arm/mach-omap2/pm34xx.c index 7be3622cfc85..2d93d8b23835 100644 --- a/arch/arm/mach-omap2/pm34xx.c +++ b/arch/arm/mach-omap2/pm34xx.c | |||
@@ -351,12 +351,10 @@ static void omap3_pm_idle(void) | |||
351 | if (omap_irq_pending()) | 351 | if (omap_irq_pending()) |
352 | goto out; | 352 | goto out; |
353 | 353 | ||
354 | trace_power_start(POWER_CSTATE, 1, smp_processor_id()); | ||
355 | trace_cpu_idle(1, smp_processor_id()); | 354 | trace_cpu_idle(1, smp_processor_id()); |
356 | 355 | ||
357 | omap_sram_idle(); | 356 | omap_sram_idle(); |
358 | 357 | ||
359 | trace_power_end(smp_processor_id()); | ||
360 | trace_cpu_idle(PWR_EVENT_EXIT, smp_processor_id()); | 358 | trace_cpu_idle(PWR_EVENT_EXIT, smp_processor_id()); |
361 | 359 | ||
362 | out: | 360 | out: |
diff --git a/arch/arm/mach-sunxi/Kconfig b/arch/arm/mach-sunxi/Kconfig index 3fdd0085e306..8709a39bd34c 100644 --- a/arch/arm/mach-sunxi/Kconfig +++ b/arch/arm/mach-sunxi/Kconfig | |||
@@ -7,3 +7,4 @@ config ARCH_SUNXI | |||
7 | select PINCTRL | 7 | select PINCTRL |
8 | select SPARSE_IRQ | 8 | select SPARSE_IRQ |
9 | select SUNXI_TIMER | 9 | select SUNXI_TIMER |
10 | select PINCTRL_SUNXI \ No newline at end of file | ||
diff --git a/arch/arm/mach-tegra/cpu-tegra.c b/arch/arm/mach-tegra/cpu-tegra.c index a74d3c7d2e26..a36a03d3c9a0 100644 --- a/arch/arm/mach-tegra/cpu-tegra.c +++ b/arch/arm/mach-tegra/cpu-tegra.c | |||
@@ -243,8 +243,7 @@ static int tegra_cpu_init(struct cpufreq_policy *policy) | |||
243 | /* FIXME: what's the actual transition time? */ | 243 | /* FIXME: what's the actual transition time? */ |
244 | policy->cpuinfo.transition_latency = 300 * 1000; | 244 | policy->cpuinfo.transition_latency = 300 * 1000; |
245 | 245 | ||
246 | policy->shared_type = CPUFREQ_SHARED_TYPE_ALL; | 246 | cpumask_copy(policy->cpus, cpu_possible_mask); |
247 | cpumask_copy(policy->related_cpus, cpu_possible_mask); | ||
248 | 247 | ||
249 | if (policy->cpu == 0) | 248 | if (policy->cpu == 0) |
250 | register_pm_notifier(&tegra_cpu_pm_notifier); | 249 | register_pm_notifier(&tegra_cpu_pm_notifier); |
diff --git a/arch/arm/mach-ux500/Kconfig b/arch/arm/mach-ux500/Kconfig index 5dea90636d94..3e5bbd0e5b23 100644 --- a/arch/arm/mach-ux500/Kconfig +++ b/arch/arm/mach-ux500/Kconfig | |||
@@ -11,6 +11,7 @@ config UX500_SOC_COMMON | |||
11 | select COMMON_CLK | 11 | select COMMON_CLK |
12 | select PINCTRL | 12 | select PINCTRL |
13 | select PINCTRL_NOMADIK | 13 | select PINCTRL_NOMADIK |
14 | select PINCTRL_ABX500 | ||
14 | select PL310_ERRATA_753970 if CACHE_PL310 | 15 | select PL310_ERRATA_753970 if CACHE_PL310 |
15 | 16 | ||
16 | config UX500_SOC_DB8500 | 17 | config UX500_SOC_DB8500 |
@@ -18,6 +19,11 @@ config UX500_SOC_DB8500 | |||
18 | select CPU_FREQ_TABLE if CPU_FREQ | 19 | select CPU_FREQ_TABLE if CPU_FREQ |
19 | select MFD_DB8500_PRCMU | 20 | select MFD_DB8500_PRCMU |
20 | select PINCTRL_DB8500 | 21 | select PINCTRL_DB8500 |
22 | select PINCTRL_DB8540 | ||
23 | select PINCTRL_AB8500 | ||
24 | select PINCTRL_AB8505 | ||
25 | select PINCTRL_AB9540 | ||
26 | select PINCTRL_AB8540 | ||
21 | select REGULATOR | 27 | select REGULATOR |
22 | select REGULATOR_DB8500_PRCMU | 28 | select REGULATOR_DB8500_PRCMU |
23 | 29 | ||
diff --git a/arch/arm/mach-ux500/board-mop500.c b/arch/arm/mach-ux500/board-mop500.c index d453522edb0d..b8781caa54b8 100644 --- a/arch/arm/mach-ux500/board-mop500.c +++ b/arch/arm/mach-ux500/board-mop500.c | |||
@@ -90,26 +90,8 @@ static struct platform_device snowball_gpio_en_3v3_regulator_dev = { | |||
90 | }, | 90 | }, |
91 | }; | 91 | }; |
92 | 92 | ||
93 | static struct ab8500_gpio_platform_data ab8500_gpio_pdata = { | 93 | static struct abx500_gpio_platform_data ab8500_gpio_pdata = { |
94 | .gpio_base = MOP500_AB8500_PIN_GPIO(1), | 94 | .gpio_base = MOP500_AB8500_PIN_GPIO(1), |
95 | .irq_base = MOP500_AB8500_VIR_GPIO_IRQ_BASE, | ||
96 | /* config_reg is the initial configuration of ab8500 pins. | ||
97 | * The pins can be configured as GPIO or alt functions based | ||
98 | * on value present in GpioSel1 to GpioSel6 and AlternatFunction | ||
99 | * register. This is the array of 7 configuration settings. | ||
100 | * One has to compile time decide these settings. Below is the | ||
101 | * explanation of these setting | ||
102 | * GpioSel1 = 0x00 => Pins GPIO1 to GPIO8 are not used as GPIO | ||
103 | * GpioSel2 = 0x1E => Pins GPIO10 to GPIO13 are configured as GPIO | ||
104 | * GpioSel3 = 0x80 => Pin GPIO24 is configured as GPIO | ||
105 | * GpioSel4 = 0x01 => Pin GPIo25 is configured as GPIO | ||
106 | * GpioSel5 = 0x7A => Pins GPIO34, GPIO36 to GPIO39 are conf as GPIO | ||
107 | * GpioSel6 = 0x00 => Pins GPIO41 & GPIo42 are not configured as GPIO | ||
108 | * AlternaFunction = 0x00 => If Pins GPIO10 to 13 are not configured | ||
109 | * as GPIO then this register selectes the alternate fucntions | ||
110 | */ | ||
111 | .config_reg = {0x00, 0x1E, 0x80, 0x01, | ||
112 | 0x7A, 0x00, 0x00}, | ||
113 | }; | 95 | }; |
114 | 96 | ||
115 | /* ab8500-codec */ | 97 | /* ab8500-codec */ |
diff --git a/arch/arm/mach-ux500/cpu-db8500.c b/arch/arm/mach-ux500/cpu-db8500.c index 5b286e06474c..b80ad9610e97 100644 --- a/arch/arm/mach-ux500/cpu-db8500.c +++ b/arch/arm/mach-ux500/cpu-db8500.c | |||
@@ -285,7 +285,7 @@ static struct of_dev_auxdata u8500_auxdata_lookup[] __initdata = { | |||
285 | OF_DEV_AUXDATA("st,nomadik-i2c", 0x80110000, "nmk-i2c.3", NULL), | 285 | OF_DEV_AUXDATA("st,nomadik-i2c", 0x80110000, "nmk-i2c.3", NULL), |
286 | OF_DEV_AUXDATA("st,nomadik-i2c", 0x8012a000, "nmk-i2c.4", NULL), | 286 | OF_DEV_AUXDATA("st,nomadik-i2c", 0x8012a000, "nmk-i2c.4", NULL), |
287 | /* Requires device name bindings. */ | 287 | /* Requires device name bindings. */ |
288 | OF_DEV_AUXDATA("stericsson,nmk_pinctrl", U8500_PRCMU_BASE, | 288 | OF_DEV_AUXDATA("stericsson,nmk-pinctrl", U8500_PRCMU_BASE, |
289 | "pinctrl-db8500", NULL), | 289 | "pinctrl-db8500", NULL), |
290 | /* Requires clock name and DMA bindings. */ | 290 | /* Requires clock name and DMA bindings. */ |
291 | OF_DEV_AUXDATA("stericsson,ux500-msp-i2s", 0x80123000, | 291 | OF_DEV_AUXDATA("stericsson,ux500-msp-i2s", 0x80123000, |
diff --git a/arch/arm/mach-ux500/include/mach/irqs-board-mop500.h b/arch/arm/mach-ux500/include/mach/irqs-board-mop500.h index 7d34c52798b5..d526dd8e87d3 100644 --- a/arch/arm/mach-ux500/include/mach/irqs-board-mop500.h +++ b/arch/arm/mach-ux500/include/mach/irqs-board-mop500.h | |||
@@ -38,15 +38,7 @@ | |||
38 | #define MOP500_STMPE1601_IRQ_END \ | 38 | #define MOP500_STMPE1601_IRQ_END \ |
39 | MOP500_STMPE1601_IRQ(STMPE_NR_INTERNAL_IRQS) | 39 | MOP500_STMPE1601_IRQ(STMPE_NR_INTERNAL_IRQS) |
40 | 40 | ||
41 | /* AB8500 virtual gpio IRQ */ | 41 | #define MOP500_NR_IRQS MOP500_STMPE1601_IRQ_END |
42 | #define AB8500_VIR_GPIO_NR_IRQS 16 | ||
43 | |||
44 | #define MOP500_AB8500_VIR_GPIO_IRQ_BASE \ | ||
45 | MOP500_STMPE1601_IRQ_END | ||
46 | #define MOP500_AB8500_VIR_GPIO_IRQ_END \ | ||
47 | (MOP500_AB8500_VIR_GPIO_IRQ_BASE + AB8500_VIR_GPIO_NR_IRQS) | ||
48 | |||
49 | #define MOP500_NR_IRQS MOP500_AB8500_VIR_GPIO_IRQ_END | ||
50 | 42 | ||
51 | #define MOP500_IRQ_END MOP500_NR_IRQS | 43 | #define MOP500_IRQ_END MOP500_NR_IRQS |
52 | 44 | ||
diff --git a/arch/arm/mach-versatile/core.c b/arch/arm/mach-versatile/core.c index 5d5929450366..a78827b70270 100644 --- a/arch/arm/mach-versatile/core.c +++ b/arch/arm/mach-versatile/core.c | |||
@@ -36,6 +36,7 @@ | |||
36 | #include <linux/gfp.h> | 36 | #include <linux/gfp.h> |
37 | #include <linux/clkdev.h> | 37 | #include <linux/clkdev.h> |
38 | #include <linux/mtd/physmap.h> | 38 | #include <linux/mtd/physmap.h> |
39 | #include <linux/bitops.h> | ||
39 | 40 | ||
40 | #include <asm/irq.h> | 41 | #include <asm/irq.h> |
41 | #include <asm/hardware/arm_timer.h> | 42 | #include <asm/hardware/arm_timer.h> |
@@ -65,16 +66,28 @@ | |||
65 | #define VA_VIC_BASE __io_address(VERSATILE_VIC_BASE) | 66 | #define VA_VIC_BASE __io_address(VERSATILE_VIC_BASE) |
66 | #define VA_SIC_BASE __io_address(VERSATILE_SIC_BASE) | 67 | #define VA_SIC_BASE __io_address(VERSATILE_SIC_BASE) |
67 | 68 | ||
69 | /* These PIC IRQs are valid in each configuration */ | ||
70 | #define PIC_VALID_ALL BIT(SIC_INT_KMI0) | BIT(SIC_INT_KMI1) | \ | ||
71 | BIT(SIC_INT_SCI3) | BIT(SIC_INT_UART3) | \ | ||
72 | BIT(SIC_INT_CLCD) | BIT(SIC_INT_TOUCH) | \ | ||
73 | BIT(SIC_INT_KEYPAD) | BIT(SIC_INT_DoC) | \ | ||
74 | BIT(SIC_INT_USB) | BIT(SIC_INT_PCI0) | \ | ||
75 | BIT(SIC_INT_PCI1) | BIT(SIC_INT_PCI2) | \ | ||
76 | BIT(SIC_INT_PCI3) | ||
68 | #if 1 | 77 | #if 1 |
69 | #define IRQ_MMCI0A IRQ_VICSOURCE22 | 78 | #define IRQ_MMCI0A IRQ_VICSOURCE22 |
70 | #define IRQ_AACI IRQ_VICSOURCE24 | 79 | #define IRQ_AACI IRQ_VICSOURCE24 |
71 | #define IRQ_ETH IRQ_VICSOURCE25 | 80 | #define IRQ_ETH IRQ_VICSOURCE25 |
72 | #define PIC_MASK 0xFFD00000 | 81 | #define PIC_MASK 0xFFD00000 |
82 | #define PIC_VALID PIC_VALID_ALL | ||
73 | #else | 83 | #else |
74 | #define IRQ_MMCI0A IRQ_SIC_MMCI0A | 84 | #define IRQ_MMCI0A IRQ_SIC_MMCI0A |
75 | #define IRQ_AACI IRQ_SIC_AACI | 85 | #define IRQ_AACI IRQ_SIC_AACI |
76 | #define IRQ_ETH IRQ_SIC_ETH | 86 | #define IRQ_ETH IRQ_SIC_ETH |
77 | #define PIC_MASK 0 | 87 | #define PIC_MASK 0 |
88 | #define PIC_VALID PIC_VALID_ALL | BIT(SIC_INT_MMCI0A) | \ | ||
89 | BIT(SIC_INT_MMCI1A) | BIT(SIC_INT_AACI) | \ | ||
90 | BIT(SIC_INT_ETH) | ||
78 | #endif | 91 | #endif |
79 | 92 | ||
80 | /* Lookup table for finding a DT node that represents the vic instance */ | 93 | /* Lookup table for finding a DT node that represents the vic instance */ |
@@ -102,7 +115,7 @@ void __init versatile_init_irq(void) | |||
102 | VERSATILE_SIC_BASE); | 115 | VERSATILE_SIC_BASE); |
103 | 116 | ||
104 | fpga_irq_init(VA_SIC_BASE, "SIC", IRQ_SIC_START, | 117 | fpga_irq_init(VA_SIC_BASE, "SIC", IRQ_SIC_START, |
105 | IRQ_VICSOURCE31, ~PIC_MASK, np); | 118 | IRQ_VICSOURCE31, PIC_VALID, np); |
106 | 119 | ||
107 | /* | 120 | /* |
108 | * Interrupts on secondary controller from 0 to 8 are routed to | 121 | * Interrupts on secondary controller from 0 to 8 are routed to |
diff --git a/arch/arm/mach-versatile/pci.c b/arch/arm/mach-versatile/pci.c index 2f84f4094f13..e92e5e0705bc 100644 --- a/arch/arm/mach-versatile/pci.c +++ b/arch/arm/mach-versatile/pci.c | |||
@@ -23,6 +23,7 @@ | |||
23 | #include <linux/io.h> | 23 | #include <linux/io.h> |
24 | 24 | ||
25 | #include <mach/hardware.h> | 25 | #include <mach/hardware.h> |
26 | #include <mach/irqs.h> | ||
26 | #include <asm/irq.h> | 27 | #include <asm/irq.h> |
27 | #include <asm/mach/pci.h> | 28 | #include <asm/mach/pci.h> |
28 | 29 | ||
@@ -327,12 +328,12 @@ static int __init versatile_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) | |||
327 | int irq; | 328 | int irq; |
328 | 329 | ||
329 | /* slot, pin, irq | 330 | /* slot, pin, irq |
330 | * 24 1 27 | 331 | * 24 1 IRQ_SIC_PCI0 |
331 | * 25 1 28 | 332 | * 25 1 IRQ_SIC_PCI1 |
332 | * 26 1 29 | 333 | * 26 1 IRQ_SIC_PCI2 |
333 | * 27 1 30 | 334 | * 27 1 IRQ_SIC_PCI3 |
334 | */ | 335 | */ |
335 | irq = 27 + ((slot - 24 + pin - 1) & 3); | 336 | irq = IRQ_SIC_PCI0 + ((slot - 24 + pin - 1) & 3); |
336 | 337 | ||
337 | return irq; | 338 | return irq; |
338 | } | 339 | } |
diff --git a/arch/arm/mm/Kconfig b/arch/arm/mm/Kconfig index 3fd629d5a513..025d17328730 100644 --- a/arch/arm/mm/Kconfig +++ b/arch/arm/mm/Kconfig | |||
@@ -629,8 +629,9 @@ config ARM_THUMBEE | |||
629 | make use of it. Say N for code that can run on CPUs without ThumbEE. | 629 | make use of it. Say N for code that can run on CPUs without ThumbEE. |
630 | 630 | ||
631 | config ARM_VIRT_EXT | 631 | config ARM_VIRT_EXT |
632 | bool "Native support for the ARM Virtualization Extensions" | 632 | bool |
633 | depends on MMU && CPU_V7 | 633 | depends on MMU |
634 | default y if CPU_V7 | ||
634 | help | 635 | help |
635 | Enable the kernel to make use of the ARM Virtualization | 636 | Enable the kernel to make use of the ARM Virtualization |
636 | Extensions to install hypervisors without run-time firmware | 637 | Extensions to install hypervisors without run-time firmware |
@@ -640,11 +641,6 @@ config ARM_VIRT_EXT | |||
640 | use of this feature. Refer to Documentation/arm/Booting for | 641 | use of this feature. Refer to Documentation/arm/Booting for |
641 | details. | 642 | details. |
642 | 643 | ||
643 | It is safe to enable this option even if the kernel may not be | ||
644 | booted in HYP mode, may not have support for the | ||
645 | virtualization extensions, or may be booted with a | ||
646 | non-compliant bootloader. | ||
647 | |||
648 | config SWP_EMULATE | 644 | config SWP_EMULATE |
649 | bool "Emulate SWP/SWPB instructions" | 645 | bool "Emulate SWP/SWPB instructions" |
650 | depends on !CPU_USE_DOMAINS && CPU_V7 | 646 | depends on !CPU_USE_DOMAINS && CPU_V7 |
diff --git a/arch/arm/mm/Makefile b/arch/arm/mm/Makefile index 8a9c4cb50a93..4e333fa2756f 100644 --- a/arch/arm/mm/Makefile +++ b/arch/arm/mm/Makefile | |||
@@ -6,7 +6,7 @@ obj-y := dma-mapping.o extable.o fault.o init.o \ | |||
6 | iomap.o | 6 | iomap.o |
7 | 7 | ||
8 | obj-$(CONFIG_MMU) += fault-armv.o flush.o idmap.o ioremap.o \ | 8 | obj-$(CONFIG_MMU) += fault-armv.o flush.o idmap.o ioremap.o \ |
9 | mmap.o pgd.o mmu.o vmregion.o | 9 | mmap.o pgd.o mmu.o |
10 | 10 | ||
11 | ifneq ($(CONFIG_MMU),y) | 11 | ifneq ($(CONFIG_MMU),y) |
12 | obj-y += nommu.o | 12 | obj-y += nommu.o |
diff --git a/arch/arm/mm/context.c b/arch/arm/mm/context.c index bc4a5e9ebb78..7a0511191f6b 100644 --- a/arch/arm/mm/context.c +++ b/arch/arm/mm/context.c | |||
@@ -34,6 +34,9 @@ | |||
34 | * The ASID is used to tag entries in the CPU caches and TLBs. | 34 | * The ASID is used to tag entries in the CPU caches and TLBs. |
35 | * The context ID is used by debuggers and trace logic, and | 35 | * The context ID is used by debuggers and trace logic, and |
36 | * should be unique within all running processes. | 36 | * should be unique within all running processes. |
37 | * | ||
38 | * In big endian operation, the two 32 bit words are swapped if accesed by | ||
39 | * non 64-bit operations. | ||
37 | */ | 40 | */ |
38 | #define ASID_FIRST_VERSION (1ULL << ASID_BITS) | 41 | #define ASID_FIRST_VERSION (1ULL << ASID_BITS) |
39 | #define NUM_USER_ASIDS (ASID_FIRST_VERSION - 1) | 42 | #define NUM_USER_ASIDS (ASID_FIRST_VERSION - 1) |
diff --git a/arch/arm/mm/idmap.c b/arch/arm/mm/idmap.c index 99db769307ec..2dffc010cc41 100644 --- a/arch/arm/mm/idmap.c +++ b/arch/arm/mm/idmap.c | |||
@@ -1,4 +1,6 @@ | |||
1 | #include <linux/module.h> | ||
1 | #include <linux/kernel.h> | 2 | #include <linux/kernel.h> |
3 | #include <linux/slab.h> | ||
2 | 4 | ||
3 | #include <asm/cputype.h> | 5 | #include <asm/cputype.h> |
4 | #include <asm/idmap.h> | 6 | #include <asm/idmap.h> |
@@ -6,6 +8,7 @@ | |||
6 | #include <asm/pgtable.h> | 8 | #include <asm/pgtable.h> |
7 | #include <asm/sections.h> | 9 | #include <asm/sections.h> |
8 | #include <asm/system_info.h> | 10 | #include <asm/system_info.h> |
11 | #include <asm/virt.h> | ||
9 | 12 | ||
10 | pgd_t *idmap_pgd; | 13 | pgd_t *idmap_pgd; |
11 | 14 | ||
@@ -59,11 +62,17 @@ static void idmap_add_pud(pgd_t *pgd, unsigned long addr, unsigned long end, | |||
59 | } while (pud++, addr = next, addr != end); | 62 | } while (pud++, addr = next, addr != end); |
60 | } | 63 | } |
61 | 64 | ||
62 | static void identity_mapping_add(pgd_t *pgd, unsigned long addr, unsigned long end) | 65 | static void identity_mapping_add(pgd_t *pgd, const char *text_start, |
66 | const char *text_end, unsigned long prot) | ||
63 | { | 67 | { |
64 | unsigned long prot, next; | 68 | unsigned long addr, end; |
69 | unsigned long next; | ||
70 | |||
71 | addr = virt_to_phys(text_start); | ||
72 | end = virt_to_phys(text_end); | ||
73 | |||
74 | prot |= PMD_TYPE_SECT | PMD_SECT_AP_WRITE | PMD_SECT_AF; | ||
65 | 75 | ||
66 | prot = PMD_TYPE_SECT | PMD_SECT_AP_WRITE | PMD_SECT_AF; | ||
67 | if (cpu_architecture() <= CPU_ARCH_ARMv5TEJ && !cpu_is_xscale()) | 76 | if (cpu_architecture() <= CPU_ARCH_ARMv5TEJ && !cpu_is_xscale()) |
68 | prot |= PMD_BIT4; | 77 | prot |= PMD_BIT4; |
69 | 78 | ||
@@ -74,28 +83,52 @@ static void identity_mapping_add(pgd_t *pgd, unsigned long addr, unsigned long e | |||
74 | } while (pgd++, addr = next, addr != end); | 83 | } while (pgd++, addr = next, addr != end); |
75 | } | 84 | } |
76 | 85 | ||
86 | #if defined(CONFIG_ARM_VIRT_EXT) && defined(CONFIG_ARM_LPAE) | ||
87 | pgd_t *hyp_pgd; | ||
88 | |||
89 | extern char __hyp_idmap_text_start[], __hyp_idmap_text_end[]; | ||
90 | |||
91 | static int __init init_static_idmap_hyp(void) | ||
92 | { | ||
93 | hyp_pgd = kzalloc(PTRS_PER_PGD * sizeof(pgd_t), GFP_KERNEL); | ||
94 | if (!hyp_pgd) | ||
95 | return -ENOMEM; | ||
96 | |||
97 | pr_info("Setting up static HYP identity map for 0x%p - 0x%p\n", | ||
98 | __hyp_idmap_text_start, __hyp_idmap_text_end); | ||
99 | identity_mapping_add(hyp_pgd, __hyp_idmap_text_start, | ||
100 | __hyp_idmap_text_end, PMD_SECT_AP1); | ||
101 | |||
102 | return 0; | ||
103 | } | ||
104 | #else | ||
105 | static int __init init_static_idmap_hyp(void) | ||
106 | { | ||
107 | return 0; | ||
108 | } | ||
109 | #endif | ||
110 | |||
77 | extern char __idmap_text_start[], __idmap_text_end[]; | 111 | extern char __idmap_text_start[], __idmap_text_end[]; |
78 | 112 | ||
79 | static int __init init_static_idmap(void) | 113 | static int __init init_static_idmap(void) |
80 | { | 114 | { |
81 | phys_addr_t idmap_start, idmap_end; | 115 | int ret; |
82 | 116 | ||
83 | idmap_pgd = pgd_alloc(&init_mm); | 117 | idmap_pgd = pgd_alloc(&init_mm); |
84 | if (!idmap_pgd) | 118 | if (!idmap_pgd) |
85 | return -ENOMEM; | 119 | return -ENOMEM; |
86 | 120 | ||
87 | /* Add an identity mapping for the physical address of the section. */ | 121 | pr_info("Setting up static identity map for 0x%p - 0x%p\n", |
88 | idmap_start = virt_to_phys((void *)__idmap_text_start); | 122 | __idmap_text_start, __idmap_text_end); |
89 | idmap_end = virt_to_phys((void *)__idmap_text_end); | 123 | identity_mapping_add(idmap_pgd, __idmap_text_start, |
124 | __idmap_text_end, 0); | ||
90 | 125 | ||
91 | pr_info("Setting up static identity map for 0x%llx - 0x%llx\n", | 126 | ret = init_static_idmap_hyp(); |
92 | (long long)idmap_start, (long long)idmap_end); | ||
93 | identity_mapping_add(idmap_pgd, idmap_start, idmap_end); | ||
94 | 127 | ||
95 | /* Flush L1 for the hardware to see this page table content */ | 128 | /* Flush L1 for the hardware to see this page table content */ |
96 | flush_cache_louis(); | 129 | flush_cache_louis(); |
97 | 130 | ||
98 | return 0; | 131 | return ret; |
99 | } | 132 | } |
100 | early_initcall(init_static_idmap); | 133 | early_initcall(init_static_idmap); |
101 | 134 | ||
diff --git a/arch/arm/mm/ioremap.c b/arch/arm/mm/ioremap.c index 88fd86cf3d9a..04d9006eab1f 100644 --- a/arch/arm/mm/ioremap.c +++ b/arch/arm/mm/ioremap.c | |||
@@ -39,6 +39,70 @@ | |||
39 | #include <asm/mach/pci.h> | 39 | #include <asm/mach/pci.h> |
40 | #include "mm.h" | 40 | #include "mm.h" |
41 | 41 | ||
42 | |||
43 | LIST_HEAD(static_vmlist); | ||
44 | |||
45 | static struct static_vm *find_static_vm_paddr(phys_addr_t paddr, | ||
46 | size_t size, unsigned int mtype) | ||
47 | { | ||
48 | struct static_vm *svm; | ||
49 | struct vm_struct *vm; | ||
50 | |||
51 | list_for_each_entry(svm, &static_vmlist, list) { | ||
52 | vm = &svm->vm; | ||
53 | if (!(vm->flags & VM_ARM_STATIC_MAPPING)) | ||
54 | continue; | ||
55 | if ((vm->flags & VM_ARM_MTYPE_MASK) != VM_ARM_MTYPE(mtype)) | ||
56 | continue; | ||
57 | |||
58 | if (vm->phys_addr > paddr || | ||
59 | paddr + size - 1 > vm->phys_addr + vm->size - 1) | ||
60 | continue; | ||
61 | |||
62 | return svm; | ||
63 | } | ||
64 | |||
65 | return NULL; | ||
66 | } | ||
67 | |||
68 | struct static_vm *find_static_vm_vaddr(void *vaddr) | ||
69 | { | ||
70 | struct static_vm *svm; | ||
71 | struct vm_struct *vm; | ||
72 | |||
73 | list_for_each_entry(svm, &static_vmlist, list) { | ||
74 | vm = &svm->vm; | ||
75 | |||
76 | /* static_vmlist is ascending order */ | ||
77 | if (vm->addr > vaddr) | ||
78 | break; | ||
79 | |||
80 | if (vm->addr <= vaddr && vm->addr + vm->size > vaddr) | ||
81 | return svm; | ||
82 | } | ||
83 | |||
84 | return NULL; | ||
85 | } | ||
86 | |||
87 | void __init add_static_vm_early(struct static_vm *svm) | ||
88 | { | ||
89 | struct static_vm *curr_svm; | ||
90 | struct vm_struct *vm; | ||
91 | void *vaddr; | ||
92 | |||
93 | vm = &svm->vm; | ||
94 | vm_area_add_early(vm); | ||
95 | vaddr = vm->addr; | ||
96 | |||
97 | list_for_each_entry(curr_svm, &static_vmlist, list) { | ||
98 | vm = &curr_svm->vm; | ||
99 | |||
100 | if (vm->addr > vaddr) | ||
101 | break; | ||
102 | } | ||
103 | list_add_tail(&svm->list, &curr_svm->list); | ||
104 | } | ||
105 | |||
42 | int ioremap_page(unsigned long virt, unsigned long phys, | 106 | int ioremap_page(unsigned long virt, unsigned long phys, |
43 | const struct mem_type *mtype) | 107 | const struct mem_type *mtype) |
44 | { | 108 | { |
@@ -197,13 +261,14 @@ void __iomem * __arm_ioremap_pfn_caller(unsigned long pfn, | |||
197 | const struct mem_type *type; | 261 | const struct mem_type *type; |
198 | int err; | 262 | int err; |
199 | unsigned long addr; | 263 | unsigned long addr; |
200 | struct vm_struct * area; | 264 | struct vm_struct *area; |
265 | phys_addr_t paddr = __pfn_to_phys(pfn); | ||
201 | 266 | ||
202 | #ifndef CONFIG_ARM_LPAE | 267 | #ifndef CONFIG_ARM_LPAE |
203 | /* | 268 | /* |
204 | * High mappings must be supersection aligned | 269 | * High mappings must be supersection aligned |
205 | */ | 270 | */ |
206 | if (pfn >= 0x100000 && (__pfn_to_phys(pfn) & ~SUPERSECTION_MASK)) | 271 | if (pfn >= 0x100000 && (paddr & ~SUPERSECTION_MASK)) |
207 | return NULL; | 272 | return NULL; |
208 | #endif | 273 | #endif |
209 | 274 | ||
@@ -219,24 +284,16 @@ void __iomem * __arm_ioremap_pfn_caller(unsigned long pfn, | |||
219 | /* | 284 | /* |
220 | * Try to reuse one of the static mapping whenever possible. | 285 | * Try to reuse one of the static mapping whenever possible. |
221 | */ | 286 | */ |
222 | read_lock(&vmlist_lock); | 287 | if (size && !(sizeof(phys_addr_t) == 4 && pfn >= 0x100000)) { |
223 | for (area = vmlist; area; area = area->next) { | 288 | struct static_vm *svm; |
224 | if (!size || (sizeof(phys_addr_t) == 4 && pfn >= 0x100000)) | 289 | |
225 | break; | 290 | svm = find_static_vm_paddr(paddr, size, mtype); |
226 | if (!(area->flags & VM_ARM_STATIC_MAPPING)) | 291 | if (svm) { |
227 | continue; | 292 | addr = (unsigned long)svm->vm.addr; |
228 | if ((area->flags & VM_ARM_MTYPE_MASK) != VM_ARM_MTYPE(mtype)) | 293 | addr += paddr - svm->vm.phys_addr; |
229 | continue; | 294 | return (void __iomem *) (offset + addr); |
230 | if (__phys_to_pfn(area->phys_addr) > pfn || | 295 | } |
231 | __pfn_to_phys(pfn) + size-1 > area->phys_addr + area->size-1) | ||
232 | continue; | ||
233 | /* we can drop the lock here as we know *area is static */ | ||
234 | read_unlock(&vmlist_lock); | ||
235 | addr = (unsigned long)area->addr; | ||
236 | addr += __pfn_to_phys(pfn) - area->phys_addr; | ||
237 | return (void __iomem *) (offset + addr); | ||
238 | } | 296 | } |
239 | read_unlock(&vmlist_lock); | ||
240 | 297 | ||
241 | /* | 298 | /* |
242 | * Don't allow RAM to be mapped - this causes problems with ARMv6+ | 299 | * Don't allow RAM to be mapped - this causes problems with ARMv6+ |
@@ -248,21 +305,21 @@ void __iomem * __arm_ioremap_pfn_caller(unsigned long pfn, | |||
248 | if (!area) | 305 | if (!area) |
249 | return NULL; | 306 | return NULL; |
250 | addr = (unsigned long)area->addr; | 307 | addr = (unsigned long)area->addr; |
251 | area->phys_addr = __pfn_to_phys(pfn); | 308 | area->phys_addr = paddr; |
252 | 309 | ||
253 | #if !defined(CONFIG_SMP) && !defined(CONFIG_ARM_LPAE) | 310 | #if !defined(CONFIG_SMP) && !defined(CONFIG_ARM_LPAE) |
254 | if (DOMAIN_IO == 0 && | 311 | if (DOMAIN_IO == 0 && |
255 | (((cpu_architecture() >= CPU_ARCH_ARMv6) && (get_cr() & CR_XP)) || | 312 | (((cpu_architecture() >= CPU_ARCH_ARMv6) && (get_cr() & CR_XP)) || |
256 | cpu_is_xsc3()) && pfn >= 0x100000 && | 313 | cpu_is_xsc3()) && pfn >= 0x100000 && |
257 | !((__pfn_to_phys(pfn) | size | addr) & ~SUPERSECTION_MASK)) { | 314 | !((paddr | size | addr) & ~SUPERSECTION_MASK)) { |
258 | area->flags |= VM_ARM_SECTION_MAPPING; | 315 | area->flags |= VM_ARM_SECTION_MAPPING; |
259 | err = remap_area_supersections(addr, pfn, size, type); | 316 | err = remap_area_supersections(addr, pfn, size, type); |
260 | } else if (!((__pfn_to_phys(pfn) | size | addr) & ~PMD_MASK)) { | 317 | } else if (!((paddr | size | addr) & ~PMD_MASK)) { |
261 | area->flags |= VM_ARM_SECTION_MAPPING; | 318 | area->flags |= VM_ARM_SECTION_MAPPING; |
262 | err = remap_area_sections(addr, pfn, size, type); | 319 | err = remap_area_sections(addr, pfn, size, type); |
263 | } else | 320 | } else |
264 | #endif | 321 | #endif |
265 | err = ioremap_page_range(addr, addr + size, __pfn_to_phys(pfn), | 322 | err = ioremap_page_range(addr, addr + size, paddr, |
266 | __pgprot(type->prot_pte)); | 323 | __pgprot(type->prot_pte)); |
267 | 324 | ||
268 | if (err) { | 325 | if (err) { |
@@ -346,34 +403,28 @@ __arm_ioremap_exec(unsigned long phys_addr, size_t size, bool cached) | |||
346 | void __iounmap(volatile void __iomem *io_addr) | 403 | void __iounmap(volatile void __iomem *io_addr) |
347 | { | 404 | { |
348 | void *addr = (void *)(PAGE_MASK & (unsigned long)io_addr); | 405 | void *addr = (void *)(PAGE_MASK & (unsigned long)io_addr); |
349 | struct vm_struct *vm; | 406 | struct static_vm *svm; |
407 | |||
408 | /* If this is a static mapping, we must leave it alone */ | ||
409 | svm = find_static_vm_vaddr(addr); | ||
410 | if (svm) | ||
411 | return; | ||
350 | 412 | ||
351 | read_lock(&vmlist_lock); | ||
352 | for (vm = vmlist; vm; vm = vm->next) { | ||
353 | if (vm->addr > addr) | ||
354 | break; | ||
355 | if (!(vm->flags & VM_IOREMAP)) | ||
356 | continue; | ||
357 | /* If this is a static mapping we must leave it alone */ | ||
358 | if ((vm->flags & VM_ARM_STATIC_MAPPING) && | ||
359 | (vm->addr <= addr) && (vm->addr + vm->size > addr)) { | ||
360 | read_unlock(&vmlist_lock); | ||
361 | return; | ||
362 | } | ||
363 | #if !defined(CONFIG_SMP) && !defined(CONFIG_ARM_LPAE) | 413 | #if !defined(CONFIG_SMP) && !defined(CONFIG_ARM_LPAE) |
414 | { | ||
415 | struct vm_struct *vm; | ||
416 | |||
417 | vm = find_vm_area(addr); | ||
418 | |||
364 | /* | 419 | /* |
365 | * If this is a section based mapping we need to handle it | 420 | * If this is a section based mapping we need to handle it |
366 | * specially as the VM subsystem does not know how to handle | 421 | * specially as the VM subsystem does not know how to handle |
367 | * such a beast. | 422 | * such a beast. |
368 | */ | 423 | */ |
369 | if ((vm->addr == addr) && | 424 | if (vm && (vm->flags & VM_ARM_SECTION_MAPPING)) |
370 | (vm->flags & VM_ARM_SECTION_MAPPING)) { | ||
371 | unmap_area_sections((unsigned long)vm->addr, vm->size); | 425 | unmap_area_sections((unsigned long)vm->addr, vm->size); |
372 | break; | ||
373 | } | ||
374 | #endif | ||
375 | } | 426 | } |
376 | read_unlock(&vmlist_lock); | 427 | #endif |
377 | 428 | ||
378 | vunmap(addr); | 429 | vunmap(addr); |
379 | } | 430 | } |
diff --git a/arch/arm/mm/mm.h b/arch/arm/mm/mm.h index a8ee92da3544..d5a4e9ad8f0f 100644 --- a/arch/arm/mm/mm.h +++ b/arch/arm/mm/mm.h | |||
@@ -1,4 +1,6 @@ | |||
1 | #ifdef CONFIG_MMU | 1 | #ifdef CONFIG_MMU |
2 | #include <linux/list.h> | ||
3 | #include <linux/vmalloc.h> | ||
2 | 4 | ||
3 | /* the upper-most page table pointer */ | 5 | /* the upper-most page table pointer */ |
4 | extern pmd_t *top_pmd; | 6 | extern pmd_t *top_pmd; |
@@ -65,6 +67,16 @@ extern void __flush_dcache_page(struct address_space *mapping, struct page *page | |||
65 | /* consistent regions used by dma_alloc_attrs() */ | 67 | /* consistent regions used by dma_alloc_attrs() */ |
66 | #define VM_ARM_DMA_CONSISTENT 0x20000000 | 68 | #define VM_ARM_DMA_CONSISTENT 0x20000000 |
67 | 69 | ||
70 | |||
71 | struct static_vm { | ||
72 | struct vm_struct vm; | ||
73 | struct list_head list; | ||
74 | }; | ||
75 | |||
76 | extern struct list_head static_vmlist; | ||
77 | extern struct static_vm *find_static_vm_vaddr(void *vaddr); | ||
78 | extern __init void add_static_vm_early(struct static_vm *svm); | ||
79 | |||
68 | #endif | 80 | #endif |
69 | 81 | ||
70 | #ifdef CONFIG_ZONE_DMA | 82 | #ifdef CONFIG_ZONE_DMA |
diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c index ce328c7f5c94..e95a996ab78f 100644 --- a/arch/arm/mm/mmu.c +++ b/arch/arm/mm/mmu.c | |||
@@ -57,6 +57,9 @@ static unsigned int cachepolicy __initdata = CPOLICY_WRITEBACK; | |||
57 | static unsigned int ecc_mask __initdata = 0; | 57 | static unsigned int ecc_mask __initdata = 0; |
58 | pgprot_t pgprot_user; | 58 | pgprot_t pgprot_user; |
59 | pgprot_t pgprot_kernel; | 59 | pgprot_t pgprot_kernel; |
60 | pgprot_t pgprot_hyp_device; | ||
61 | pgprot_t pgprot_s2; | ||
62 | pgprot_t pgprot_s2_device; | ||
60 | 63 | ||
61 | EXPORT_SYMBOL(pgprot_user); | 64 | EXPORT_SYMBOL(pgprot_user); |
62 | EXPORT_SYMBOL(pgprot_kernel); | 65 | EXPORT_SYMBOL(pgprot_kernel); |
@@ -66,34 +69,46 @@ struct cachepolicy { | |||
66 | unsigned int cr_mask; | 69 | unsigned int cr_mask; |
67 | pmdval_t pmd; | 70 | pmdval_t pmd; |
68 | pteval_t pte; | 71 | pteval_t pte; |
72 | pteval_t pte_s2; | ||
69 | }; | 73 | }; |
70 | 74 | ||
75 | #ifdef CONFIG_ARM_LPAE | ||
76 | #define s2_policy(policy) policy | ||
77 | #else | ||
78 | #define s2_policy(policy) 0 | ||
79 | #endif | ||
80 | |||
71 | static struct cachepolicy cache_policies[] __initdata = { | 81 | static struct cachepolicy cache_policies[] __initdata = { |
72 | { | 82 | { |
73 | .policy = "uncached", | 83 | .policy = "uncached", |
74 | .cr_mask = CR_W|CR_C, | 84 | .cr_mask = CR_W|CR_C, |
75 | .pmd = PMD_SECT_UNCACHED, | 85 | .pmd = PMD_SECT_UNCACHED, |
76 | .pte = L_PTE_MT_UNCACHED, | 86 | .pte = L_PTE_MT_UNCACHED, |
87 | .pte_s2 = s2_policy(L_PTE_S2_MT_UNCACHED), | ||
77 | }, { | 88 | }, { |
78 | .policy = "buffered", | 89 | .policy = "buffered", |
79 | .cr_mask = CR_C, | 90 | .cr_mask = CR_C, |
80 | .pmd = PMD_SECT_BUFFERED, | 91 | .pmd = PMD_SECT_BUFFERED, |
81 | .pte = L_PTE_MT_BUFFERABLE, | 92 | .pte = L_PTE_MT_BUFFERABLE, |
93 | .pte_s2 = s2_policy(L_PTE_S2_MT_UNCACHED), | ||
82 | }, { | 94 | }, { |
83 | .policy = "writethrough", | 95 | .policy = "writethrough", |
84 | .cr_mask = 0, | 96 | .cr_mask = 0, |
85 | .pmd = PMD_SECT_WT, | 97 | .pmd = PMD_SECT_WT, |
86 | .pte = L_PTE_MT_WRITETHROUGH, | 98 | .pte = L_PTE_MT_WRITETHROUGH, |
99 | .pte_s2 = s2_policy(L_PTE_S2_MT_WRITETHROUGH), | ||
87 | }, { | 100 | }, { |
88 | .policy = "writeback", | 101 | .policy = "writeback", |
89 | .cr_mask = 0, | 102 | .cr_mask = 0, |
90 | .pmd = PMD_SECT_WB, | 103 | .pmd = PMD_SECT_WB, |
91 | .pte = L_PTE_MT_WRITEBACK, | 104 | .pte = L_PTE_MT_WRITEBACK, |
105 | .pte_s2 = s2_policy(L_PTE_S2_MT_WRITEBACK), | ||
92 | }, { | 106 | }, { |
93 | .policy = "writealloc", | 107 | .policy = "writealloc", |
94 | .cr_mask = 0, | 108 | .cr_mask = 0, |
95 | .pmd = PMD_SECT_WBWA, | 109 | .pmd = PMD_SECT_WBWA, |
96 | .pte = L_PTE_MT_WRITEALLOC, | 110 | .pte = L_PTE_MT_WRITEALLOC, |
111 | .pte_s2 = s2_policy(L_PTE_S2_MT_WRITEBACK), | ||
97 | } | 112 | } |
98 | }; | 113 | }; |
99 | 114 | ||
@@ -310,6 +325,7 @@ static void __init build_mem_type_table(void) | |||
310 | struct cachepolicy *cp; | 325 | struct cachepolicy *cp; |
311 | unsigned int cr = get_cr(); | 326 | unsigned int cr = get_cr(); |
312 | pteval_t user_pgprot, kern_pgprot, vecs_pgprot; | 327 | pteval_t user_pgprot, kern_pgprot, vecs_pgprot; |
328 | pteval_t hyp_device_pgprot, s2_pgprot, s2_device_pgprot; | ||
313 | int cpu_arch = cpu_architecture(); | 329 | int cpu_arch = cpu_architecture(); |
314 | int i; | 330 | int i; |
315 | 331 | ||
@@ -421,6 +437,8 @@ static void __init build_mem_type_table(void) | |||
421 | */ | 437 | */ |
422 | cp = &cache_policies[cachepolicy]; | 438 | cp = &cache_policies[cachepolicy]; |
423 | vecs_pgprot = kern_pgprot = user_pgprot = cp->pte; | 439 | vecs_pgprot = kern_pgprot = user_pgprot = cp->pte; |
440 | s2_pgprot = cp->pte_s2; | ||
441 | hyp_device_pgprot = s2_device_pgprot = mem_types[MT_DEVICE].prot_pte; | ||
424 | 442 | ||
425 | /* | 443 | /* |
426 | * ARMv6 and above have extended page tables. | 444 | * ARMv6 and above have extended page tables. |
@@ -444,6 +462,7 @@ static void __init build_mem_type_table(void) | |||
444 | user_pgprot |= L_PTE_SHARED; | 462 | user_pgprot |= L_PTE_SHARED; |
445 | kern_pgprot |= L_PTE_SHARED; | 463 | kern_pgprot |= L_PTE_SHARED; |
446 | vecs_pgprot |= L_PTE_SHARED; | 464 | vecs_pgprot |= L_PTE_SHARED; |
465 | s2_pgprot |= L_PTE_SHARED; | ||
447 | mem_types[MT_DEVICE_WC].prot_sect |= PMD_SECT_S; | 466 | mem_types[MT_DEVICE_WC].prot_sect |= PMD_SECT_S; |
448 | mem_types[MT_DEVICE_WC].prot_pte |= L_PTE_SHARED; | 467 | mem_types[MT_DEVICE_WC].prot_pte |= L_PTE_SHARED; |
449 | mem_types[MT_DEVICE_CACHED].prot_sect |= PMD_SECT_S; | 468 | mem_types[MT_DEVICE_CACHED].prot_sect |= PMD_SECT_S; |
@@ -498,6 +517,9 @@ static void __init build_mem_type_table(void) | |||
498 | pgprot_user = __pgprot(L_PTE_PRESENT | L_PTE_YOUNG | user_pgprot); | 517 | pgprot_user = __pgprot(L_PTE_PRESENT | L_PTE_YOUNG | user_pgprot); |
499 | pgprot_kernel = __pgprot(L_PTE_PRESENT | L_PTE_YOUNG | | 518 | pgprot_kernel = __pgprot(L_PTE_PRESENT | L_PTE_YOUNG | |
500 | L_PTE_DIRTY | kern_pgprot); | 519 | L_PTE_DIRTY | kern_pgprot); |
520 | pgprot_s2 = __pgprot(L_PTE_PRESENT | L_PTE_YOUNG | s2_pgprot); | ||
521 | pgprot_s2_device = __pgprot(s2_device_pgprot); | ||
522 | pgprot_hyp_device = __pgprot(hyp_device_pgprot); | ||
501 | 523 | ||
502 | mem_types[MT_LOW_VECTORS].prot_l1 |= ecc_mask; | 524 | mem_types[MT_LOW_VECTORS].prot_l1 |= ecc_mask; |
503 | mem_types[MT_HIGH_VECTORS].prot_l1 |= ecc_mask; | 525 | mem_types[MT_HIGH_VECTORS].prot_l1 |= ecc_mask; |
@@ -757,21 +779,24 @@ void __init iotable_init(struct map_desc *io_desc, int nr) | |||
757 | { | 779 | { |
758 | struct map_desc *md; | 780 | struct map_desc *md; |
759 | struct vm_struct *vm; | 781 | struct vm_struct *vm; |
782 | struct static_vm *svm; | ||
760 | 783 | ||
761 | if (!nr) | 784 | if (!nr) |
762 | return; | 785 | return; |
763 | 786 | ||
764 | vm = early_alloc_aligned(sizeof(*vm) * nr, __alignof__(*vm)); | 787 | svm = early_alloc_aligned(sizeof(*svm) * nr, __alignof__(*svm)); |
765 | 788 | ||
766 | for (md = io_desc; nr; md++, nr--) { | 789 | for (md = io_desc; nr; md++, nr--) { |
767 | create_mapping(md); | 790 | create_mapping(md); |
791 | |||
792 | vm = &svm->vm; | ||
768 | vm->addr = (void *)(md->virtual & PAGE_MASK); | 793 | vm->addr = (void *)(md->virtual & PAGE_MASK); |
769 | vm->size = PAGE_ALIGN(md->length + (md->virtual & ~PAGE_MASK)); | 794 | vm->size = PAGE_ALIGN(md->length + (md->virtual & ~PAGE_MASK)); |
770 | vm->phys_addr = __pfn_to_phys(md->pfn); | 795 | vm->phys_addr = __pfn_to_phys(md->pfn); |
771 | vm->flags = VM_IOREMAP | VM_ARM_STATIC_MAPPING; | 796 | vm->flags = VM_IOREMAP | VM_ARM_STATIC_MAPPING; |
772 | vm->flags |= VM_ARM_MTYPE(md->type); | 797 | vm->flags |= VM_ARM_MTYPE(md->type); |
773 | vm->caller = iotable_init; | 798 | vm->caller = iotable_init; |
774 | vm_area_add_early(vm++); | 799 | add_static_vm_early(svm++); |
775 | } | 800 | } |
776 | } | 801 | } |
777 | 802 | ||
@@ -779,13 +804,16 @@ void __init vm_reserve_area_early(unsigned long addr, unsigned long size, | |||
779 | void *caller) | 804 | void *caller) |
780 | { | 805 | { |
781 | struct vm_struct *vm; | 806 | struct vm_struct *vm; |
807 | struct static_vm *svm; | ||
808 | |||
809 | svm = early_alloc_aligned(sizeof(*svm), __alignof__(*svm)); | ||
782 | 810 | ||
783 | vm = early_alloc_aligned(sizeof(*vm), __alignof__(*vm)); | 811 | vm = &svm->vm; |
784 | vm->addr = (void *)addr; | 812 | vm->addr = (void *)addr; |
785 | vm->size = size; | 813 | vm->size = size; |
786 | vm->flags = VM_IOREMAP | VM_ARM_EMPTY_MAPPING; | 814 | vm->flags = VM_IOREMAP | VM_ARM_EMPTY_MAPPING; |
787 | vm->caller = caller; | 815 | vm->caller = caller; |
788 | vm_area_add_early(vm); | 816 | add_static_vm_early(svm); |
789 | } | 817 | } |
790 | 818 | ||
791 | #ifndef CONFIG_ARM_LPAE | 819 | #ifndef CONFIG_ARM_LPAE |
@@ -810,14 +838,13 @@ static void __init pmd_empty_section_gap(unsigned long addr) | |||
810 | 838 | ||
811 | static void __init fill_pmd_gaps(void) | 839 | static void __init fill_pmd_gaps(void) |
812 | { | 840 | { |
841 | struct static_vm *svm; | ||
813 | struct vm_struct *vm; | 842 | struct vm_struct *vm; |
814 | unsigned long addr, next = 0; | 843 | unsigned long addr, next = 0; |
815 | pmd_t *pmd; | 844 | pmd_t *pmd; |
816 | 845 | ||
817 | /* we're still single threaded hence no lock needed here */ | 846 | list_for_each_entry(svm, &static_vmlist, list) { |
818 | for (vm = vmlist; vm; vm = vm->next) { | 847 | vm = &svm->vm; |
819 | if (!(vm->flags & (VM_ARM_STATIC_MAPPING | VM_ARM_EMPTY_MAPPING))) | ||
820 | continue; | ||
821 | addr = (unsigned long)vm->addr; | 848 | addr = (unsigned long)vm->addr; |
822 | if (addr < next) | 849 | if (addr < next) |
823 | continue; | 850 | continue; |
@@ -857,19 +884,12 @@ static void __init fill_pmd_gaps(void) | |||
857 | #if defined(CONFIG_PCI) && !defined(CONFIG_NEED_MACH_IO_H) | 884 | #if defined(CONFIG_PCI) && !defined(CONFIG_NEED_MACH_IO_H) |
858 | static void __init pci_reserve_io(void) | 885 | static void __init pci_reserve_io(void) |
859 | { | 886 | { |
860 | struct vm_struct *vm; | 887 | struct static_vm *svm; |
861 | unsigned long addr; | ||
862 | 888 | ||
863 | /* we're still single threaded hence no lock needed here */ | 889 | svm = find_static_vm_vaddr((void *)PCI_IO_VIRT_BASE); |
864 | for (vm = vmlist; vm; vm = vm->next) { | 890 | if (svm) |
865 | if (!(vm->flags & VM_ARM_STATIC_MAPPING)) | 891 | return; |
866 | continue; | ||
867 | addr = (unsigned long)vm->addr; | ||
868 | addr &= ~(SZ_2M - 1); | ||
869 | if (addr == PCI_IO_VIRT_BASE) | ||
870 | return; | ||
871 | 892 | ||
872 | } | ||
873 | vm_reserve_area_early(PCI_IO_VIRT_BASE, SZ_2M, pci_reserve_io); | 893 | vm_reserve_area_early(PCI_IO_VIRT_BASE, SZ_2M, pci_reserve_io); |
874 | } | 894 | } |
875 | #else | 895 | #else |
diff --git a/arch/arm/mm/proc-macros.S b/arch/arm/mm/proc-macros.S index eb6aa73bc8b7..f9a0aa725ea9 100644 --- a/arch/arm/mm/proc-macros.S +++ b/arch/arm/mm/proc-macros.S | |||
@@ -38,9 +38,14 @@ | |||
38 | 38 | ||
39 | /* | 39 | /* |
40 | * mmid - get context id from mm pointer (mm->context.id) | 40 | * mmid - get context id from mm pointer (mm->context.id) |
41 | * note, this field is 64bit, so in big-endian the two words are swapped too. | ||
41 | */ | 42 | */ |
42 | .macro mmid, rd, rn | 43 | .macro mmid, rd, rn |
44 | #ifdef __ARMEB__ | ||
45 | ldr \rd, [\rn, #MM_CONTEXT_ID + 4 ] | ||
46 | #else | ||
43 | ldr \rd, [\rn, #MM_CONTEXT_ID] | 47 | ldr \rd, [\rn, #MM_CONTEXT_ID] |
48 | #endif | ||
44 | .endm | 49 | .endm |
45 | 50 | ||
46 | /* | 51 | /* |
diff --git a/arch/arm/mm/proc-v6.S b/arch/arm/mm/proc-v6.S index 09c5233f4dfc..bcaaa8de9325 100644 --- a/arch/arm/mm/proc-v6.S +++ b/arch/arm/mm/proc-v6.S | |||
@@ -101,7 +101,7 @@ ENTRY(cpu_v6_dcache_clean_area) | |||
101 | ENTRY(cpu_v6_switch_mm) | 101 | ENTRY(cpu_v6_switch_mm) |
102 | #ifdef CONFIG_MMU | 102 | #ifdef CONFIG_MMU |
103 | mov r2, #0 | 103 | mov r2, #0 |
104 | ldr r1, [r1, #MM_CONTEXT_ID] @ get mm->context.id | 104 | mmid r1, r1 @ get mm->context.id |
105 | ALT_SMP(orr r0, r0, #TTB_FLAGS_SMP) | 105 | ALT_SMP(orr r0, r0, #TTB_FLAGS_SMP) |
106 | ALT_UP(orr r0, r0, #TTB_FLAGS_UP) | 106 | ALT_UP(orr r0, r0, #TTB_FLAGS_UP) |
107 | mcr p15, 0, r2, c7, c5, 6 @ flush BTAC/BTB | 107 | mcr p15, 0, r2, c7, c5, 6 @ flush BTAC/BTB |
diff --git a/arch/arm/mm/proc-v7-2level.S b/arch/arm/mm/proc-v7-2level.S index 6d98c13ab827..78f520bc0e99 100644 --- a/arch/arm/mm/proc-v7-2level.S +++ b/arch/arm/mm/proc-v7-2level.S | |||
@@ -40,7 +40,7 @@ | |||
40 | ENTRY(cpu_v7_switch_mm) | 40 | ENTRY(cpu_v7_switch_mm) |
41 | #ifdef CONFIG_MMU | 41 | #ifdef CONFIG_MMU |
42 | mov r2, #0 | 42 | mov r2, #0 |
43 | ldr r1, [r1, #MM_CONTEXT_ID] @ get mm->context.id | 43 | mmid r1, r1 @ get mm->context.id |
44 | ALT_SMP(orr r0, r0, #TTB_FLAGS_SMP) | 44 | ALT_SMP(orr r0, r0, #TTB_FLAGS_SMP) |
45 | ALT_UP(orr r0, r0, #TTB_FLAGS_UP) | 45 | ALT_UP(orr r0, r0, #TTB_FLAGS_UP) |
46 | #ifdef CONFIG_ARM_ERRATA_430973 | 46 | #ifdef CONFIG_ARM_ERRATA_430973 |
diff --git a/arch/arm/mm/proc-v7-3level.S b/arch/arm/mm/proc-v7-3level.S index 7b56386f9496..50bf1dafc9ea 100644 --- a/arch/arm/mm/proc-v7-3level.S +++ b/arch/arm/mm/proc-v7-3level.S | |||
@@ -47,7 +47,7 @@ | |||
47 | */ | 47 | */ |
48 | ENTRY(cpu_v7_switch_mm) | 48 | ENTRY(cpu_v7_switch_mm) |
49 | #ifdef CONFIG_MMU | 49 | #ifdef CONFIG_MMU |
50 | ldr r1, [r1, #MM_CONTEXT_ID] @ get mm->context.id | 50 | mmid r1, r1 @ get mm->context.id |
51 | and r3, r1, #0xff | 51 | and r3, r1, #0xff |
52 | mov r3, r3, lsl #(48 - 32) @ ASID | 52 | mov r3, r3, lsl #(48 - 32) @ ASID |
53 | mcrr p15, 0, r0, r3, c2 @ set TTB 0 | 53 | mcrr p15, 0, r0, r3, c2 @ set TTB 0 |
diff --git a/arch/arm/mm/vmregion.c b/arch/arm/mm/vmregion.c deleted file mode 100644 index a631016e1f8f..000000000000 --- a/arch/arm/mm/vmregion.c +++ /dev/null | |||
@@ -1,205 +0,0 @@ | |||
1 | #include <linux/fs.h> | ||
2 | #include <linux/spinlock.h> | ||
3 | #include <linux/list.h> | ||
4 | #include <linux/proc_fs.h> | ||
5 | #include <linux/seq_file.h> | ||
6 | #include <linux/slab.h> | ||
7 | |||
8 | #include "vmregion.h" | ||
9 | |||
10 | /* | ||
11 | * VM region handling support. | ||
12 | * | ||
13 | * This should become something generic, handling VM region allocations for | ||
14 | * vmalloc and similar (ioremap, module space, etc). | ||
15 | * | ||
16 | * I envisage vmalloc()'s supporting vm_struct becoming: | ||
17 | * | ||
18 | * struct vm_struct { | ||
19 | * struct vmregion region; | ||
20 | * unsigned long flags; | ||
21 | * struct page **pages; | ||
22 | * unsigned int nr_pages; | ||
23 | * unsigned long phys_addr; | ||
24 | * }; | ||
25 | * | ||
26 | * get_vm_area() would then call vmregion_alloc with an appropriate | ||
27 | * struct vmregion head (eg): | ||
28 | * | ||
29 | * struct vmregion vmalloc_head = { | ||
30 | * .vm_list = LIST_HEAD_INIT(vmalloc_head.vm_list), | ||
31 | * .vm_start = VMALLOC_START, | ||
32 | * .vm_end = VMALLOC_END, | ||
33 | * }; | ||
34 | * | ||
35 | * However, vmalloc_head.vm_start is variable (typically, it is dependent on | ||
36 | * the amount of RAM found at boot time.) I would imagine that get_vm_area() | ||
37 | * would have to initialise this each time prior to calling vmregion_alloc(). | ||
38 | */ | ||
39 | |||
40 | struct arm_vmregion * | ||
41 | arm_vmregion_alloc(struct arm_vmregion_head *head, size_t align, | ||
42 | size_t size, gfp_t gfp, const void *caller) | ||
43 | { | ||
44 | unsigned long start = head->vm_start, addr = head->vm_end; | ||
45 | unsigned long flags; | ||
46 | struct arm_vmregion *c, *new; | ||
47 | |||
48 | if (head->vm_end - head->vm_start < size) { | ||
49 | printk(KERN_WARNING "%s: allocation too big (requested %#x)\n", | ||
50 | __func__, size); | ||
51 | goto out; | ||
52 | } | ||
53 | |||
54 | new = kmalloc(sizeof(struct arm_vmregion), gfp); | ||
55 | if (!new) | ||
56 | goto out; | ||
57 | |||
58 | new->caller = caller; | ||
59 | |||
60 | spin_lock_irqsave(&head->vm_lock, flags); | ||
61 | |||
62 | addr = rounddown(addr - size, align); | ||
63 | list_for_each_entry_reverse(c, &head->vm_list, vm_list) { | ||
64 | if (addr >= c->vm_end) | ||
65 | goto found; | ||
66 | addr = rounddown(c->vm_start - size, align); | ||
67 | if (addr < start) | ||
68 | goto nospc; | ||
69 | } | ||
70 | |||
71 | found: | ||
72 | /* | ||
73 | * Insert this entry after the one we found. | ||
74 | */ | ||
75 | list_add(&new->vm_list, &c->vm_list); | ||
76 | new->vm_start = addr; | ||
77 | new->vm_end = addr + size; | ||
78 | new->vm_active = 1; | ||
79 | |||
80 | spin_unlock_irqrestore(&head->vm_lock, flags); | ||
81 | return new; | ||
82 | |||
83 | nospc: | ||
84 | spin_unlock_irqrestore(&head->vm_lock, flags); | ||
85 | kfree(new); | ||
86 | out: | ||
87 | return NULL; | ||
88 | } | ||
89 | |||
90 | static struct arm_vmregion *__arm_vmregion_find(struct arm_vmregion_head *head, unsigned long addr) | ||
91 | { | ||
92 | struct arm_vmregion *c; | ||
93 | |||
94 | list_for_each_entry(c, &head->vm_list, vm_list) { | ||
95 | if (c->vm_active && c->vm_start == addr) | ||
96 | goto out; | ||
97 | } | ||
98 | c = NULL; | ||
99 | out: | ||
100 | return c; | ||
101 | } | ||
102 | |||
103 | struct arm_vmregion *arm_vmregion_find(struct arm_vmregion_head *head, unsigned long addr) | ||
104 | { | ||
105 | struct arm_vmregion *c; | ||
106 | unsigned long flags; | ||
107 | |||
108 | spin_lock_irqsave(&head->vm_lock, flags); | ||
109 | c = __arm_vmregion_find(head, addr); | ||
110 | spin_unlock_irqrestore(&head->vm_lock, flags); | ||
111 | return c; | ||
112 | } | ||
113 | |||
114 | struct arm_vmregion *arm_vmregion_find_remove(struct arm_vmregion_head *head, unsigned long addr) | ||
115 | { | ||
116 | struct arm_vmregion *c; | ||
117 | unsigned long flags; | ||
118 | |||
119 | spin_lock_irqsave(&head->vm_lock, flags); | ||
120 | c = __arm_vmregion_find(head, addr); | ||
121 | if (c) | ||
122 | c->vm_active = 0; | ||
123 | spin_unlock_irqrestore(&head->vm_lock, flags); | ||
124 | return c; | ||
125 | } | ||
126 | |||
127 | void arm_vmregion_free(struct arm_vmregion_head *head, struct arm_vmregion *c) | ||
128 | { | ||
129 | unsigned long flags; | ||
130 | |||
131 | spin_lock_irqsave(&head->vm_lock, flags); | ||
132 | list_del(&c->vm_list); | ||
133 | spin_unlock_irqrestore(&head->vm_lock, flags); | ||
134 | |||
135 | kfree(c); | ||
136 | } | ||
137 | |||
138 | #ifdef CONFIG_PROC_FS | ||
139 | static int arm_vmregion_show(struct seq_file *m, void *p) | ||
140 | { | ||
141 | struct arm_vmregion *c = list_entry(p, struct arm_vmregion, vm_list); | ||
142 | |||
143 | seq_printf(m, "0x%08lx-0x%08lx %7lu", c->vm_start, c->vm_end, | ||
144 | c->vm_end - c->vm_start); | ||
145 | if (c->caller) | ||
146 | seq_printf(m, " %pS", (void *)c->caller); | ||
147 | seq_putc(m, '\n'); | ||
148 | return 0; | ||
149 | } | ||
150 | |||
151 | static void *arm_vmregion_start(struct seq_file *m, loff_t *pos) | ||
152 | { | ||
153 | struct arm_vmregion_head *h = m->private; | ||
154 | spin_lock_irq(&h->vm_lock); | ||
155 | return seq_list_start(&h->vm_list, *pos); | ||
156 | } | ||
157 | |||
158 | static void *arm_vmregion_next(struct seq_file *m, void *p, loff_t *pos) | ||
159 | { | ||
160 | struct arm_vmregion_head *h = m->private; | ||
161 | return seq_list_next(p, &h->vm_list, pos); | ||
162 | } | ||
163 | |||
164 | static void arm_vmregion_stop(struct seq_file *m, void *p) | ||
165 | { | ||
166 | struct arm_vmregion_head *h = m->private; | ||
167 | spin_unlock_irq(&h->vm_lock); | ||
168 | } | ||
169 | |||
170 | static const struct seq_operations arm_vmregion_ops = { | ||
171 | .start = arm_vmregion_start, | ||
172 | .stop = arm_vmregion_stop, | ||
173 | .next = arm_vmregion_next, | ||
174 | .show = arm_vmregion_show, | ||
175 | }; | ||
176 | |||
177 | static int arm_vmregion_open(struct inode *inode, struct file *file) | ||
178 | { | ||
179 | struct arm_vmregion_head *h = PDE(inode)->data; | ||
180 | int ret = seq_open(file, &arm_vmregion_ops); | ||
181 | if (!ret) { | ||
182 | struct seq_file *m = file->private_data; | ||
183 | m->private = h; | ||
184 | } | ||
185 | return ret; | ||
186 | } | ||
187 | |||
188 | static const struct file_operations arm_vmregion_fops = { | ||
189 | .open = arm_vmregion_open, | ||
190 | .read = seq_read, | ||
191 | .llseek = seq_lseek, | ||
192 | .release = seq_release, | ||
193 | }; | ||
194 | |||
195 | int arm_vmregion_create_proc(const char *path, struct arm_vmregion_head *h) | ||
196 | { | ||
197 | proc_create_data(path, S_IRUSR, NULL, &arm_vmregion_fops, h); | ||
198 | return 0; | ||
199 | } | ||
200 | #else | ||
201 | int arm_vmregion_create_proc(const char *path, struct arm_vmregion_head *h) | ||
202 | { | ||
203 | return 0; | ||
204 | } | ||
205 | #endif | ||
diff --git a/arch/arm/mm/vmregion.h b/arch/arm/mm/vmregion.h deleted file mode 100644 index 0f5a5f2a2c7b..000000000000 --- a/arch/arm/mm/vmregion.h +++ /dev/null | |||
@@ -1,31 +0,0 @@ | |||
1 | #ifndef VMREGION_H | ||
2 | #define VMREGION_H | ||
3 | |||
4 | #include <linux/spinlock.h> | ||
5 | #include <linux/list.h> | ||
6 | |||
7 | struct page; | ||
8 | |||
9 | struct arm_vmregion_head { | ||
10 | spinlock_t vm_lock; | ||
11 | struct list_head vm_list; | ||
12 | unsigned long vm_start; | ||
13 | unsigned long vm_end; | ||
14 | }; | ||
15 | |||
16 | struct arm_vmregion { | ||
17 | struct list_head vm_list; | ||
18 | unsigned long vm_start; | ||
19 | unsigned long vm_end; | ||
20 | int vm_active; | ||
21 | const void *caller; | ||
22 | }; | ||
23 | |||
24 | struct arm_vmregion *arm_vmregion_alloc(struct arm_vmregion_head *, size_t, size_t, gfp_t, const void *); | ||
25 | struct arm_vmregion *arm_vmregion_find(struct arm_vmregion_head *, unsigned long); | ||
26 | struct arm_vmregion *arm_vmregion_find_remove(struct arm_vmregion_head *, unsigned long); | ||
27 | void arm_vmregion_free(struct arm_vmregion_head *, struct arm_vmregion *); | ||
28 | |||
29 | int arm_vmregion_create_proc(const char *, struct arm_vmregion_head *); | ||
30 | |||
31 | #endif | ||
diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig index f8f362aafee9..7c43569e3141 100644 --- a/arch/arm64/Kconfig +++ b/arch/arm64/Kconfig | |||
@@ -2,6 +2,7 @@ config ARM64 | |||
2 | def_bool y | 2 | def_bool y |
3 | select ARCH_HAS_ATOMIC64_DEC_IF_POSITIVE | 3 | select ARCH_HAS_ATOMIC64_DEC_IF_POSITIVE |
4 | select ARCH_WANT_COMPAT_IPC_PARSE_VERSION | 4 | select ARCH_WANT_COMPAT_IPC_PARSE_VERSION |
5 | select ARCH_WANT_FRAME_POINTERS | ||
5 | select ARM_AMBA | 6 | select ARM_AMBA |
6 | select CLONE_BACKWARDS | 7 | select CLONE_BACKWARDS |
7 | select COMMON_CLK | 8 | select COMMON_CLK |
@@ -21,7 +22,6 @@ config ARM64 | |||
21 | select HAVE_GENERIC_DMA_COHERENT | 22 | select HAVE_GENERIC_DMA_COHERENT |
22 | select HAVE_GENERIC_HARDIRQS | 23 | select HAVE_GENERIC_HARDIRQS |
23 | select HAVE_HW_BREAKPOINT if PERF_EVENTS | 24 | select HAVE_HW_BREAKPOINT if PERF_EVENTS |
24 | select HAVE_IRQ_WORK | ||
25 | select HAVE_MEMBLOCK | 25 | select HAVE_MEMBLOCK |
26 | select HAVE_PERF_EVENTS | 26 | select HAVE_PERF_EVENTS |
27 | select IRQ_DOMAIN | 27 | select IRQ_DOMAIN |
diff --git a/arch/arm64/Kconfig.debug b/arch/arm64/Kconfig.debug index d7553f2bda66..51493430f142 100644 --- a/arch/arm64/Kconfig.debug +++ b/arch/arm64/Kconfig.debug | |||
@@ -24,4 +24,21 @@ config DEBUG_STACK_USAGE | |||
24 | Enables the display of the minimum amount of free stack which each | 24 | Enables the display of the minimum amount of free stack which each |
25 | task has ever had available in the sysrq-T output. | 25 | task has ever had available in the sysrq-T output. |
26 | 26 | ||
27 | config EARLY_PRINTK | ||
28 | bool "Early printk support" | ||
29 | default y | ||
30 | help | ||
31 | Say Y here if you want to have an early console using the | ||
32 | earlyprintk=<name>[,<addr>][,<options>] kernel parameter. It | ||
33 | is assumed that the early console device has been initialised | ||
34 | by the boot loader prior to starting the Linux kernel. | ||
35 | |||
36 | config PID_IN_CONTEXTIDR | ||
37 | bool "Write the current PID to the CONTEXTIDR register" | ||
38 | help | ||
39 | Enabling this option causes the kernel to write the current PID to | ||
40 | the CONTEXTIDR register, at the expense of some additional | ||
41 | instructions during context switch. Say Y here only if you are | ||
42 | planning to use hardware trace tools with this kernel. | ||
43 | |||
27 | endmenu | 44 | endmenu |
diff --git a/arch/arm64/include/asm/Kbuild b/arch/arm64/include/asm/Kbuild index 14a9d5a2b85b..e5fe4f99fe10 100644 --- a/arch/arm64/include/asm/Kbuild +++ b/arch/arm64/include/asm/Kbuild | |||
@@ -19,6 +19,7 @@ generic-y += ipcbuf.h | |||
19 | generic-y += irq_regs.h | 19 | generic-y += irq_regs.h |
20 | generic-y += kdebug.h | 20 | generic-y += kdebug.h |
21 | generic-y += kmap_types.h | 21 | generic-y += kmap_types.h |
22 | generic-y += kvm_para.h | ||
22 | generic-y += local.h | 23 | generic-y += local.h |
23 | generic-y += local64.h | 24 | generic-y += local64.h |
24 | generic-y += mman.h | 25 | generic-y += mman.h |
@@ -48,3 +49,4 @@ generic-y += trace_clock.h | |||
48 | generic-y += types.h | 49 | generic-y += types.h |
49 | generic-y += unaligned.h | 50 | generic-y += unaligned.h |
50 | generic-y += user.h | 51 | generic-y += user.h |
52 | generic-y += xor.h | ||
diff --git a/arch/arm64/include/asm/atomic.h b/arch/arm64/include/asm/atomic.h index 407717ba060e..836364468571 100644 --- a/arch/arm64/include/asm/atomic.h +++ b/arch/arm64/include/asm/atomic.h | |||
@@ -49,12 +49,12 @@ static inline void atomic_add(int i, atomic_t *v) | |||
49 | int result; | 49 | int result; |
50 | 50 | ||
51 | asm volatile("// atomic_add\n" | 51 | asm volatile("// atomic_add\n" |
52 | "1: ldxr %w0, [%3]\n" | 52 | "1: ldxr %w0, %2\n" |
53 | " add %w0, %w0, %w4\n" | 53 | " add %w0, %w0, %w3\n" |
54 | " stxr %w1, %w0, [%3]\n" | 54 | " stxr %w1, %w0, %2\n" |
55 | " cbnz %w1, 1b" | 55 | " cbnz %w1, 1b" |
56 | : "=&r" (result), "=&r" (tmp), "+o" (v->counter) | 56 | : "=&r" (result), "=&r" (tmp), "+Q" (v->counter) |
57 | : "r" (&v->counter), "Ir" (i) | 57 | : "Ir" (i) |
58 | : "cc"); | 58 | : "cc"); |
59 | } | 59 | } |
60 | 60 | ||
@@ -64,13 +64,13 @@ static inline int atomic_add_return(int i, atomic_t *v) | |||
64 | int result; | 64 | int result; |
65 | 65 | ||
66 | asm volatile("// atomic_add_return\n" | 66 | asm volatile("// atomic_add_return\n" |
67 | "1: ldaxr %w0, [%3]\n" | 67 | "1: ldaxr %w0, %2\n" |
68 | " add %w0, %w0, %w4\n" | 68 | " add %w0, %w0, %w3\n" |
69 | " stlxr %w1, %w0, [%3]\n" | 69 | " stlxr %w1, %w0, %2\n" |
70 | " cbnz %w1, 1b" | 70 | " cbnz %w1, 1b" |
71 | : "=&r" (result), "=&r" (tmp), "+o" (v->counter) | 71 | : "=&r" (result), "=&r" (tmp), "+Q" (v->counter) |
72 | : "r" (&v->counter), "Ir" (i) | 72 | : "Ir" (i) |
73 | : "cc"); | 73 | : "cc", "memory"); |
74 | 74 | ||
75 | return result; | 75 | return result; |
76 | } | 76 | } |
@@ -81,12 +81,12 @@ static inline void atomic_sub(int i, atomic_t *v) | |||
81 | int result; | 81 | int result; |
82 | 82 | ||
83 | asm volatile("// atomic_sub\n" | 83 | asm volatile("// atomic_sub\n" |
84 | "1: ldxr %w0, [%3]\n" | 84 | "1: ldxr %w0, %2\n" |
85 | " sub %w0, %w0, %w4\n" | 85 | " sub %w0, %w0, %w3\n" |
86 | " stxr %w1, %w0, [%3]\n" | 86 | " stxr %w1, %w0, %2\n" |
87 | " cbnz %w1, 1b" | 87 | " cbnz %w1, 1b" |
88 | : "=&r" (result), "=&r" (tmp), "+o" (v->counter) | 88 | : "=&r" (result), "=&r" (tmp), "+Q" (v->counter) |
89 | : "r" (&v->counter), "Ir" (i) | 89 | : "Ir" (i) |
90 | : "cc"); | 90 | : "cc"); |
91 | } | 91 | } |
92 | 92 | ||
@@ -96,13 +96,13 @@ static inline int atomic_sub_return(int i, atomic_t *v) | |||
96 | int result; | 96 | int result; |
97 | 97 | ||
98 | asm volatile("// atomic_sub_return\n" | 98 | asm volatile("// atomic_sub_return\n" |
99 | "1: ldaxr %w0, [%3]\n" | 99 | "1: ldaxr %w0, %2\n" |
100 | " sub %w0, %w0, %w4\n" | 100 | " sub %w0, %w0, %w3\n" |
101 | " stlxr %w1, %w0, [%3]\n" | 101 | " stlxr %w1, %w0, %2\n" |
102 | " cbnz %w1, 1b" | 102 | " cbnz %w1, 1b" |
103 | : "=&r" (result), "=&r" (tmp), "+o" (v->counter) | 103 | : "=&r" (result), "=&r" (tmp), "+Q" (v->counter) |
104 | : "r" (&v->counter), "Ir" (i) | 104 | : "Ir" (i) |
105 | : "cc"); | 105 | : "cc", "memory"); |
106 | 106 | ||
107 | return result; | 107 | return result; |
108 | } | 108 | } |
@@ -113,15 +113,15 @@ static inline int atomic_cmpxchg(atomic_t *ptr, int old, int new) | |||
113 | int oldval; | 113 | int oldval; |
114 | 114 | ||
115 | asm volatile("// atomic_cmpxchg\n" | 115 | asm volatile("// atomic_cmpxchg\n" |
116 | "1: ldaxr %w1, [%3]\n" | 116 | "1: ldaxr %w1, %2\n" |
117 | " cmp %w1, %w4\n" | 117 | " cmp %w1, %w3\n" |
118 | " b.ne 2f\n" | 118 | " b.ne 2f\n" |
119 | " stlxr %w0, %w5, [%3]\n" | 119 | " stlxr %w0, %w4, %2\n" |
120 | " cbnz %w0, 1b\n" | 120 | " cbnz %w0, 1b\n" |
121 | "2:" | 121 | "2:" |
122 | : "=&r" (tmp), "=&r" (oldval), "+o" (ptr->counter) | 122 | : "=&r" (tmp), "=&r" (oldval), "+Q" (ptr->counter) |
123 | : "r" (&ptr->counter), "Ir" (old), "r" (new) | 123 | : "Ir" (old), "r" (new) |
124 | : "cc"); | 124 | : "cc", "memory"); |
125 | 125 | ||
126 | return oldval; | 126 | return oldval; |
127 | } | 127 | } |
@@ -131,12 +131,12 @@ static inline void atomic_clear_mask(unsigned long mask, unsigned long *addr) | |||
131 | unsigned long tmp, tmp2; | 131 | unsigned long tmp, tmp2; |
132 | 132 | ||
133 | asm volatile("// atomic_clear_mask\n" | 133 | asm volatile("// atomic_clear_mask\n" |
134 | "1: ldxr %0, [%3]\n" | 134 | "1: ldxr %0, %2\n" |
135 | " bic %0, %0, %4\n" | 135 | " bic %0, %0, %3\n" |
136 | " stxr %w1, %0, [%3]\n" | 136 | " stxr %w1, %0, %2\n" |
137 | " cbnz %w1, 1b" | 137 | " cbnz %w1, 1b" |
138 | : "=&r" (tmp), "=&r" (tmp2), "+o" (*addr) | 138 | : "=&r" (tmp), "=&r" (tmp2), "+Q" (*addr) |
139 | : "r" (addr), "Ir" (mask) | 139 | : "Ir" (mask) |
140 | : "cc"); | 140 | : "cc"); |
141 | } | 141 | } |
142 | 142 | ||
@@ -182,12 +182,12 @@ static inline void atomic64_add(u64 i, atomic64_t *v) | |||
182 | unsigned long tmp; | 182 | unsigned long tmp; |
183 | 183 | ||
184 | asm volatile("// atomic64_add\n" | 184 | asm volatile("// atomic64_add\n" |
185 | "1: ldxr %0, [%3]\n" | 185 | "1: ldxr %0, %2\n" |
186 | " add %0, %0, %4\n" | 186 | " add %0, %0, %3\n" |
187 | " stxr %w1, %0, [%3]\n" | 187 | " stxr %w1, %0, %2\n" |
188 | " cbnz %w1, 1b" | 188 | " cbnz %w1, 1b" |
189 | : "=&r" (result), "=&r" (tmp), "+o" (v->counter) | 189 | : "=&r" (result), "=&r" (tmp), "+Q" (v->counter) |
190 | : "r" (&v->counter), "Ir" (i) | 190 | : "Ir" (i) |
191 | : "cc"); | 191 | : "cc"); |
192 | } | 192 | } |
193 | 193 | ||
@@ -197,13 +197,13 @@ static inline long atomic64_add_return(long i, atomic64_t *v) | |||
197 | unsigned long tmp; | 197 | unsigned long tmp; |
198 | 198 | ||
199 | asm volatile("// atomic64_add_return\n" | 199 | asm volatile("// atomic64_add_return\n" |
200 | "1: ldaxr %0, [%3]\n" | 200 | "1: ldaxr %0, %2\n" |
201 | " add %0, %0, %4\n" | 201 | " add %0, %0, %3\n" |
202 | " stlxr %w1, %0, [%3]\n" | 202 | " stlxr %w1, %0, %2\n" |
203 | " cbnz %w1, 1b" | 203 | " cbnz %w1, 1b" |
204 | : "=&r" (result), "=&r" (tmp), "+o" (v->counter) | 204 | : "=&r" (result), "=&r" (tmp), "+Q" (v->counter) |
205 | : "r" (&v->counter), "Ir" (i) | 205 | : "Ir" (i) |
206 | : "cc"); | 206 | : "cc", "memory"); |
207 | 207 | ||
208 | return result; | 208 | return result; |
209 | } | 209 | } |
@@ -214,12 +214,12 @@ static inline void atomic64_sub(u64 i, atomic64_t *v) | |||
214 | unsigned long tmp; | 214 | unsigned long tmp; |
215 | 215 | ||
216 | asm volatile("// atomic64_sub\n" | 216 | asm volatile("// atomic64_sub\n" |
217 | "1: ldxr %0, [%3]\n" | 217 | "1: ldxr %0, %2\n" |
218 | " sub %0, %0, %4\n" | 218 | " sub %0, %0, %3\n" |
219 | " stxr %w1, %0, [%3]\n" | 219 | " stxr %w1, %0, %2\n" |
220 | " cbnz %w1, 1b" | 220 | " cbnz %w1, 1b" |
221 | : "=&r" (result), "=&r" (tmp), "+o" (v->counter) | 221 | : "=&r" (result), "=&r" (tmp), "+Q" (v->counter) |
222 | : "r" (&v->counter), "Ir" (i) | 222 | : "Ir" (i) |
223 | : "cc"); | 223 | : "cc"); |
224 | } | 224 | } |
225 | 225 | ||
@@ -229,13 +229,13 @@ static inline long atomic64_sub_return(long i, atomic64_t *v) | |||
229 | unsigned long tmp; | 229 | unsigned long tmp; |
230 | 230 | ||
231 | asm volatile("// atomic64_sub_return\n" | 231 | asm volatile("// atomic64_sub_return\n" |
232 | "1: ldaxr %0, [%3]\n" | 232 | "1: ldaxr %0, %2\n" |
233 | " sub %0, %0, %4\n" | 233 | " sub %0, %0, %3\n" |
234 | " stlxr %w1, %0, [%3]\n" | 234 | " stlxr %w1, %0, %2\n" |
235 | " cbnz %w1, 1b" | 235 | " cbnz %w1, 1b" |
236 | : "=&r" (result), "=&r" (tmp), "+o" (v->counter) | 236 | : "=&r" (result), "=&r" (tmp), "+Q" (v->counter) |
237 | : "r" (&v->counter), "Ir" (i) | 237 | : "Ir" (i) |
238 | : "cc"); | 238 | : "cc", "memory"); |
239 | 239 | ||
240 | return result; | 240 | return result; |
241 | } | 241 | } |
@@ -246,15 +246,15 @@ static inline long atomic64_cmpxchg(atomic64_t *ptr, long old, long new) | |||
246 | unsigned long res; | 246 | unsigned long res; |
247 | 247 | ||
248 | asm volatile("// atomic64_cmpxchg\n" | 248 | asm volatile("// atomic64_cmpxchg\n" |
249 | "1: ldaxr %1, [%3]\n" | 249 | "1: ldaxr %1, %2\n" |
250 | " cmp %1, %4\n" | 250 | " cmp %1, %3\n" |
251 | " b.ne 2f\n" | 251 | " b.ne 2f\n" |
252 | " stlxr %w0, %5, [%3]\n" | 252 | " stlxr %w0, %4, %2\n" |
253 | " cbnz %w0, 1b\n" | 253 | " cbnz %w0, 1b\n" |
254 | "2:" | 254 | "2:" |
255 | : "=&r" (res), "=&r" (oldval), "+o" (ptr->counter) | 255 | : "=&r" (res), "=&r" (oldval), "+Q" (ptr->counter) |
256 | : "r" (&ptr->counter), "Ir" (old), "r" (new) | 256 | : "Ir" (old), "r" (new) |
257 | : "cc"); | 257 | : "cc", "memory"); |
258 | 258 | ||
259 | return oldval; | 259 | return oldval; |
260 | } | 260 | } |
@@ -267,15 +267,15 @@ static inline long atomic64_dec_if_positive(atomic64_t *v) | |||
267 | unsigned long tmp; | 267 | unsigned long tmp; |
268 | 268 | ||
269 | asm volatile("// atomic64_dec_if_positive\n" | 269 | asm volatile("// atomic64_dec_if_positive\n" |
270 | "1: ldaxr %0, [%3]\n" | 270 | "1: ldaxr %0, %2\n" |
271 | " subs %0, %0, #1\n" | 271 | " subs %0, %0, #1\n" |
272 | " b.mi 2f\n" | 272 | " b.mi 2f\n" |
273 | " stlxr %w1, %0, [%3]\n" | 273 | " stlxr %w1, %0, %2\n" |
274 | " cbnz %w1, 1b\n" | 274 | " cbnz %w1, 1b\n" |
275 | "2:" | 275 | "2:" |
276 | : "=&r" (result), "=&r" (tmp), "+o" (v->counter) | 276 | : "=&r" (result), "=&r" (tmp), "+Q" (v->counter) |
277 | : "r" (&v->counter) | 277 | : |
278 | : "cc"); | 278 | : "cc", "memory"); |
279 | 279 | ||
280 | return result; | 280 | return result; |
281 | } | 281 | } |
diff --git a/arch/arm64/include/asm/cmpxchg.h b/arch/arm64/include/asm/cmpxchg.h index e0e65b069d9e..968b5cbfc260 100644 --- a/arch/arm64/include/asm/cmpxchg.h +++ b/arch/arm64/include/asm/cmpxchg.h | |||
@@ -29,39 +29,39 @@ static inline unsigned long __xchg(unsigned long x, volatile void *ptr, int size | |||
29 | switch (size) { | 29 | switch (size) { |
30 | case 1: | 30 | case 1: |
31 | asm volatile("// __xchg1\n" | 31 | asm volatile("// __xchg1\n" |
32 | "1: ldaxrb %w0, [%3]\n" | 32 | "1: ldaxrb %w0, %2\n" |
33 | " stlxrb %w1, %w2, [%3]\n" | 33 | " stlxrb %w1, %w3, %2\n" |
34 | " cbnz %w1, 1b\n" | 34 | " cbnz %w1, 1b\n" |
35 | : "=&r" (ret), "=&r" (tmp) | 35 | : "=&r" (ret), "=&r" (tmp), "+Q" (*(u8 *)ptr) |
36 | : "r" (x), "r" (ptr) | 36 | : "r" (x) |
37 | : "memory", "cc"); | 37 | : "cc", "memory"); |
38 | break; | 38 | break; |
39 | case 2: | 39 | case 2: |
40 | asm volatile("// __xchg2\n" | 40 | asm volatile("// __xchg2\n" |
41 | "1: ldaxrh %w0, [%3]\n" | 41 | "1: ldaxrh %w0, %2\n" |
42 | " stlxrh %w1, %w2, [%3]\n" | 42 | " stlxrh %w1, %w3, %2\n" |
43 | " cbnz %w1, 1b\n" | 43 | " cbnz %w1, 1b\n" |
44 | : "=&r" (ret), "=&r" (tmp) | 44 | : "=&r" (ret), "=&r" (tmp), "+Q" (*(u16 *)ptr) |
45 | : "r" (x), "r" (ptr) | 45 | : "r" (x) |
46 | : "memory", "cc"); | 46 | : "cc", "memory"); |
47 | break; | 47 | break; |
48 | case 4: | 48 | case 4: |
49 | asm volatile("// __xchg4\n" | 49 | asm volatile("// __xchg4\n" |
50 | "1: ldaxr %w0, [%3]\n" | 50 | "1: ldaxr %w0, %2\n" |
51 | " stlxr %w1, %w2, [%3]\n" | 51 | " stlxr %w1, %w3, %2\n" |
52 | " cbnz %w1, 1b\n" | 52 | " cbnz %w1, 1b\n" |
53 | : "=&r" (ret), "=&r" (tmp) | 53 | : "=&r" (ret), "=&r" (tmp), "+Q" (*(u32 *)ptr) |
54 | : "r" (x), "r" (ptr) | 54 | : "r" (x) |
55 | : "memory", "cc"); | 55 | : "cc", "memory"); |
56 | break; | 56 | break; |
57 | case 8: | 57 | case 8: |
58 | asm volatile("// __xchg8\n" | 58 | asm volatile("// __xchg8\n" |
59 | "1: ldaxr %0, [%3]\n" | 59 | "1: ldaxr %0, %2\n" |
60 | " stlxr %w1, %2, [%3]\n" | 60 | " stlxr %w1, %3, %2\n" |
61 | " cbnz %w1, 1b\n" | 61 | " cbnz %w1, 1b\n" |
62 | : "=&r" (ret), "=&r" (tmp) | 62 | : "=&r" (ret), "=&r" (tmp), "+Q" (*(u64 *)ptr) |
63 | : "r" (x), "r" (ptr) | 63 | : "r" (x) |
64 | : "memory", "cc"); | 64 | : "cc", "memory"); |
65 | break; | 65 | break; |
66 | default: | 66 | default: |
67 | BUILD_BUG(); | 67 | BUILD_BUG(); |
@@ -82,14 +82,14 @@ static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old, | |||
82 | case 1: | 82 | case 1: |
83 | do { | 83 | do { |
84 | asm volatile("// __cmpxchg1\n" | 84 | asm volatile("// __cmpxchg1\n" |
85 | " ldxrb %w1, [%2]\n" | 85 | " ldxrb %w1, %2\n" |
86 | " mov %w0, #0\n" | 86 | " mov %w0, #0\n" |
87 | " cmp %w1, %w3\n" | 87 | " cmp %w1, %w3\n" |
88 | " b.ne 1f\n" | 88 | " b.ne 1f\n" |
89 | " stxrb %w0, %w4, [%2]\n" | 89 | " stxrb %w0, %w4, %2\n" |
90 | "1:\n" | 90 | "1:\n" |
91 | : "=&r" (res), "=&r" (oldval) | 91 | : "=&r" (res), "=&r" (oldval), "+Q" (*(u8 *)ptr) |
92 | : "r" (ptr), "Ir" (old), "r" (new) | 92 | : "Ir" (old), "r" (new) |
93 | : "cc"); | 93 | : "cc"); |
94 | } while (res); | 94 | } while (res); |
95 | break; | 95 | break; |
@@ -97,29 +97,29 @@ static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old, | |||
97 | case 2: | 97 | case 2: |
98 | do { | 98 | do { |
99 | asm volatile("// __cmpxchg2\n" | 99 | asm volatile("// __cmpxchg2\n" |
100 | " ldxrh %w1, [%2]\n" | 100 | " ldxrh %w1, %2\n" |
101 | " mov %w0, #0\n" | 101 | " mov %w0, #0\n" |
102 | " cmp %w1, %w3\n" | 102 | " cmp %w1, %w3\n" |
103 | " b.ne 1f\n" | 103 | " b.ne 1f\n" |
104 | " stxrh %w0, %w4, [%2]\n" | 104 | " stxrh %w0, %w4, %2\n" |
105 | "1:\n" | 105 | "1:\n" |
106 | : "=&r" (res), "=&r" (oldval) | 106 | : "=&r" (res), "=&r" (oldval), "+Q" (*(u16 *)ptr) |
107 | : "r" (ptr), "Ir" (old), "r" (new) | 107 | : "Ir" (old), "r" (new) |
108 | : "memory", "cc"); | 108 | : "cc"); |
109 | } while (res); | 109 | } while (res); |
110 | break; | 110 | break; |
111 | 111 | ||
112 | case 4: | 112 | case 4: |
113 | do { | 113 | do { |
114 | asm volatile("// __cmpxchg4\n" | 114 | asm volatile("// __cmpxchg4\n" |
115 | " ldxr %w1, [%2]\n" | 115 | " ldxr %w1, %2\n" |
116 | " mov %w0, #0\n" | 116 | " mov %w0, #0\n" |
117 | " cmp %w1, %w3\n" | 117 | " cmp %w1, %w3\n" |
118 | " b.ne 1f\n" | 118 | " b.ne 1f\n" |
119 | " stxr %w0, %w4, [%2]\n" | 119 | " stxr %w0, %w4, %2\n" |
120 | "1:\n" | 120 | "1:\n" |
121 | : "=&r" (res), "=&r" (oldval) | 121 | : "=&r" (res), "=&r" (oldval), "+Q" (*(u32 *)ptr) |
122 | : "r" (ptr), "Ir" (old), "r" (new) | 122 | : "Ir" (old), "r" (new) |
123 | : "cc"); | 123 | : "cc"); |
124 | } while (res); | 124 | } while (res); |
125 | break; | 125 | break; |
@@ -127,14 +127,14 @@ static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old, | |||
127 | case 8: | 127 | case 8: |
128 | do { | 128 | do { |
129 | asm volatile("// __cmpxchg8\n" | 129 | asm volatile("// __cmpxchg8\n" |
130 | " ldxr %1, [%2]\n" | 130 | " ldxr %1, %2\n" |
131 | " mov %w0, #0\n" | 131 | " mov %w0, #0\n" |
132 | " cmp %1, %3\n" | 132 | " cmp %1, %3\n" |
133 | " b.ne 1f\n" | 133 | " b.ne 1f\n" |
134 | " stxr %w0, %4, [%2]\n" | 134 | " stxr %w0, %4, %2\n" |
135 | "1:\n" | 135 | "1:\n" |
136 | : "=&r" (res), "=&r" (oldval) | 136 | : "=&r" (res), "=&r" (oldval), "+Q" (*(u64 *)ptr) |
137 | : "r" (ptr), "Ir" (old), "r" (new) | 137 | : "Ir" (old), "r" (new) |
138 | : "cc"); | 138 | : "cc"); |
139 | } while (res); | 139 | } while (res); |
140 | break; | 140 | break; |
diff --git a/arch/arm64/include/asm/futex.h b/arch/arm64/include/asm/futex.h index 3468ae8439fa..c582fa316366 100644 --- a/arch/arm64/include/asm/futex.h +++ b/arch/arm64/include/asm/futex.h | |||
@@ -39,7 +39,7 @@ | |||
39 | " .popsection\n" \ | 39 | " .popsection\n" \ |
40 | : "=&r" (ret), "=&r" (oldval), "+Q" (*uaddr), "=&r" (tmp) \ | 40 | : "=&r" (ret), "=&r" (oldval), "+Q" (*uaddr), "=&r" (tmp) \ |
41 | : "r" (oparg), "Ir" (-EFAULT) \ | 41 | : "r" (oparg), "Ir" (-EFAULT) \ |
42 | : "cc") | 42 | : "cc", "memory") |
43 | 43 | ||
44 | static inline int | 44 | static inline int |
45 | futex_atomic_op_inuser (int encoded_op, u32 __user *uaddr) | 45 | futex_atomic_op_inuser (int encoded_op, u32 __user *uaddr) |
diff --git a/arch/arm64/include/asm/io.h b/arch/arm64/include/asm/io.h index d2f05a608274..57f12c991de2 100644 --- a/arch/arm64/include/asm/io.h +++ b/arch/arm64/include/asm/io.h | |||
@@ -230,6 +230,9 @@ extern void __iounmap(volatile void __iomem *addr); | |||
230 | #define ioremap_wc(addr, size) __ioremap((addr), (size), __pgprot(PROT_NORMAL_NC)) | 230 | #define ioremap_wc(addr, size) __ioremap((addr), (size), __pgprot(PROT_NORMAL_NC)) |
231 | #define iounmap __iounmap | 231 | #define iounmap __iounmap |
232 | 232 | ||
233 | #define PROT_SECT_DEFAULT (PMD_TYPE_SECT | PMD_SECT_AF) | ||
234 | #define PROT_SECT_DEVICE_nGnRE (PROT_SECT_DEFAULT | PTE_PXN | PTE_UXN | PMD_ATTRINDX(MT_DEVICE_nGnRE)) | ||
235 | |||
233 | #define ARCH_HAS_IOREMAP_WC | 236 | #define ARCH_HAS_IOREMAP_WC |
234 | #include <asm-generic/iomap.h> | 237 | #include <asm-generic/iomap.h> |
235 | 238 | ||
diff --git a/arch/arm64/include/asm/memory.h b/arch/arm64/include/asm/memory.h index 1cac16a001cb..381f556b664e 100644 --- a/arch/arm64/include/asm/memory.h +++ b/arch/arm64/include/asm/memory.h | |||
@@ -43,6 +43,7 @@ | |||
43 | #define PAGE_OFFSET UL(0xffffffc000000000) | 43 | #define PAGE_OFFSET UL(0xffffffc000000000) |
44 | #define MODULES_END (PAGE_OFFSET) | 44 | #define MODULES_END (PAGE_OFFSET) |
45 | #define MODULES_VADDR (MODULES_END - SZ_64M) | 45 | #define MODULES_VADDR (MODULES_END - SZ_64M) |
46 | #define EARLYCON_IOBASE (MODULES_VADDR - SZ_4M) | ||
46 | #define VA_BITS (39) | 47 | #define VA_BITS (39) |
47 | #define TASK_SIZE_64 (UL(1) << VA_BITS) | 48 | #define TASK_SIZE_64 (UL(1) << VA_BITS) |
48 | 49 | ||
diff --git a/arch/arm64/include/asm/mmu.h b/arch/arm64/include/asm/mmu.h index d4f7fd5b9e33..2494fc01896a 100644 --- a/arch/arm64/include/asm/mmu.h +++ b/arch/arm64/include/asm/mmu.h | |||
@@ -26,5 +26,6 @@ typedef struct { | |||
26 | 26 | ||
27 | extern void paging_init(void); | 27 | extern void paging_init(void); |
28 | extern void setup_mm_for_reboot(void); | 28 | extern void setup_mm_for_reboot(void); |
29 | extern void __iomem *early_io_map(phys_addr_t phys, unsigned long virt); | ||
29 | 30 | ||
30 | #endif | 31 | #endif |
diff --git a/arch/arm64/include/asm/mmu_context.h b/arch/arm64/include/asm/mmu_context.h index f68465dee026..e2bc385adb6b 100644 --- a/arch/arm64/include/asm/mmu_context.h +++ b/arch/arm64/include/asm/mmu_context.h | |||
@@ -35,6 +35,21 @@ extern unsigned int cpu_last_asid; | |||
35 | void __init_new_context(struct task_struct *tsk, struct mm_struct *mm); | 35 | void __init_new_context(struct task_struct *tsk, struct mm_struct *mm); |
36 | void __new_context(struct mm_struct *mm); | 36 | void __new_context(struct mm_struct *mm); |
37 | 37 | ||
38 | #ifdef CONFIG_PID_IN_CONTEXTIDR | ||
39 | static inline void contextidr_thread_switch(struct task_struct *next) | ||
40 | { | ||
41 | asm( | ||
42 | " msr contextidr_el1, %0\n" | ||
43 | " isb" | ||
44 | : | ||
45 | : "r" (task_pid_nr(next))); | ||
46 | } | ||
47 | #else | ||
48 | static inline void contextidr_thread_switch(struct task_struct *next) | ||
49 | { | ||
50 | } | ||
51 | #endif | ||
52 | |||
38 | /* | 53 | /* |
39 | * Set TTBR0 to empty_zero_page. No translations will be possible via TTBR0. | 54 | * Set TTBR0 to empty_zero_page. No translations will be possible via TTBR0. |
40 | */ | 55 | */ |
diff --git a/arch/arm64/include/asm/perf_event.h b/arch/arm64/include/asm/perf_event.h index a6fffd511c5e..d26d1d53c0d7 100644 --- a/arch/arm64/include/asm/perf_event.h +++ b/arch/arm64/include/asm/perf_event.h | |||
@@ -17,6 +17,11 @@ | |||
17 | #ifndef __ASM_PERF_EVENT_H | 17 | #ifndef __ASM_PERF_EVENT_H |
18 | #define __ASM_PERF_EVENT_H | 18 | #define __ASM_PERF_EVENT_H |
19 | 19 | ||
20 | /* It's quiet around here... */ | 20 | #ifdef CONFIG_HW_PERF_EVENTS |
21 | struct pt_regs; | ||
22 | extern unsigned long perf_instruction_pointer(struct pt_regs *regs); | ||
23 | extern unsigned long perf_misc_flags(struct pt_regs *regs); | ||
24 | #define perf_misc_flags(regs) perf_misc_flags(regs) | ||
25 | #endif | ||
21 | 26 | ||
22 | #endif | 27 | #endif |
diff --git a/arch/arm64/include/asm/psci.h b/arch/arm64/include/asm/psci.h new file mode 100644 index 000000000000..0604237ecd99 --- /dev/null +++ b/arch/arm64/include/asm/psci.h | |||
@@ -0,0 +1,38 @@ | |||
1 | /* | ||
2 | * This program is free software; you can redistribute it and/or modify | ||
3 | * it under the terms of the GNU General Public License version 2 as | ||
4 | * published by the Free Software Foundation. | ||
5 | * | ||
6 | * This program is distributed in the hope that it will be useful, | ||
7 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
8 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
9 | * GNU General Public License for more details. | ||
10 | * | ||
11 | * Copyright (C) 2013 ARM Limited | ||
12 | */ | ||
13 | |||
14 | #ifndef __ASM_PSCI_H | ||
15 | #define __ASM_PSCI_H | ||
16 | |||
17 | #define PSCI_POWER_STATE_TYPE_STANDBY 0 | ||
18 | #define PSCI_POWER_STATE_TYPE_POWER_DOWN 1 | ||
19 | |||
20 | struct psci_power_state { | ||
21 | u16 id; | ||
22 | u8 type; | ||
23 | u8 affinity_level; | ||
24 | }; | ||
25 | |||
26 | struct psci_operations { | ||
27 | int (*cpu_suspend)(struct psci_power_state state, | ||
28 | unsigned long entry_point); | ||
29 | int (*cpu_off)(struct psci_power_state state); | ||
30 | int (*cpu_on)(unsigned long cpuid, unsigned long entry_point); | ||
31 | int (*migrate)(unsigned long cpuid); | ||
32 | }; | ||
33 | |||
34 | extern struct psci_operations psci_ops; | ||
35 | |||
36 | int psci_init(void); | ||
37 | |||
38 | #endif /* __ASM_PSCI_H */ | ||
diff --git a/arch/arm64/include/asm/ptrace.h b/arch/arm64/include/asm/ptrace.h index 4ce845f8ee1c..41a71ee4c3df 100644 --- a/arch/arm64/include/asm/ptrace.h +++ b/arch/arm64/include/asm/ptrace.h | |||
@@ -42,6 +42,16 @@ | |||
42 | #define COMPAT_PSR_MODE_UND 0x0000001b | 42 | #define COMPAT_PSR_MODE_UND 0x0000001b |
43 | #define COMPAT_PSR_MODE_SYS 0x0000001f | 43 | #define COMPAT_PSR_MODE_SYS 0x0000001f |
44 | #define COMPAT_PSR_T_BIT 0x00000020 | 44 | #define COMPAT_PSR_T_BIT 0x00000020 |
45 | #define COMPAT_PSR_F_BIT 0x00000040 | ||
46 | #define COMPAT_PSR_I_BIT 0x00000080 | ||
47 | #define COMPAT_PSR_A_BIT 0x00000100 | ||
48 | #define COMPAT_PSR_E_BIT 0x00000200 | ||
49 | #define COMPAT_PSR_J_BIT 0x01000000 | ||
50 | #define COMPAT_PSR_Q_BIT 0x08000000 | ||
51 | #define COMPAT_PSR_V_BIT 0x10000000 | ||
52 | #define COMPAT_PSR_C_BIT 0x20000000 | ||
53 | #define COMPAT_PSR_Z_BIT 0x40000000 | ||
54 | #define COMPAT_PSR_N_BIT 0x80000000 | ||
45 | #define COMPAT_PSR_IT_MASK 0x0600fc00 /* If-Then execution state mask */ | 55 | #define COMPAT_PSR_IT_MASK 0x0600fc00 /* If-Then execution state mask */ |
46 | /* | 56 | /* |
47 | * These are 'magic' values for PTRACE_PEEKUSR that return info about where a | 57 | * These are 'magic' values for PTRACE_PEEKUSR that return info about where a |
diff --git a/arch/arm64/include/asm/smp.h b/arch/arm64/include/asm/smp.h index 7e34295f78e3..4b8023c5d146 100644 --- a/arch/arm64/include/asm/smp.h +++ b/arch/arm64/include/asm/smp.h | |||
@@ -66,4 +66,15 @@ extern volatile unsigned long secondary_holding_pen_release; | |||
66 | extern void arch_send_call_function_single_ipi(int cpu); | 66 | extern void arch_send_call_function_single_ipi(int cpu); |
67 | extern void arch_send_call_function_ipi_mask(const struct cpumask *mask); | 67 | extern void arch_send_call_function_ipi_mask(const struct cpumask *mask); |
68 | 68 | ||
69 | struct device_node; | ||
70 | |||
71 | struct smp_enable_ops { | ||
72 | const char *name; | ||
73 | int (*init_cpu)(struct device_node *, int); | ||
74 | int (*prepare_cpu)(int); | ||
75 | }; | ||
76 | |||
77 | extern const struct smp_enable_ops smp_spin_table_ops; | ||
78 | extern const struct smp_enable_ops smp_psci_ops; | ||
79 | |||
69 | #endif /* ifndef __ASM_SMP_H */ | 80 | #endif /* ifndef __ASM_SMP_H */ |
diff --git a/arch/arm64/include/asm/spinlock.h b/arch/arm64/include/asm/spinlock.h index 41112fe2f8b1..7065e920149d 100644 --- a/arch/arm64/include/asm/spinlock.h +++ b/arch/arm64/include/asm/spinlock.h | |||
@@ -45,13 +45,13 @@ static inline void arch_spin_lock(arch_spinlock_t *lock) | |||
45 | asm volatile( | 45 | asm volatile( |
46 | " sevl\n" | 46 | " sevl\n" |
47 | "1: wfe\n" | 47 | "1: wfe\n" |
48 | "2: ldaxr %w0, [%1]\n" | 48 | "2: ldaxr %w0, %1\n" |
49 | " cbnz %w0, 1b\n" | 49 | " cbnz %w0, 1b\n" |
50 | " stxr %w0, %w2, [%1]\n" | 50 | " stxr %w0, %w2, %1\n" |
51 | " cbnz %w0, 2b\n" | 51 | " cbnz %w0, 2b\n" |
52 | : "=&r" (tmp) | 52 | : "=&r" (tmp), "+Q" (lock->lock) |
53 | : "r" (&lock->lock), "r" (1) | 53 | : "r" (1) |
54 | : "memory"); | 54 | : "cc", "memory"); |
55 | } | 55 | } |
56 | 56 | ||
57 | static inline int arch_spin_trylock(arch_spinlock_t *lock) | 57 | static inline int arch_spin_trylock(arch_spinlock_t *lock) |
@@ -59,13 +59,13 @@ static inline int arch_spin_trylock(arch_spinlock_t *lock) | |||
59 | unsigned int tmp; | 59 | unsigned int tmp; |
60 | 60 | ||
61 | asm volatile( | 61 | asm volatile( |
62 | " ldaxr %w0, [%1]\n" | 62 | " ldaxr %w0, %1\n" |
63 | " cbnz %w0, 1f\n" | 63 | " cbnz %w0, 1f\n" |
64 | " stxr %w0, %w2, [%1]\n" | 64 | " stxr %w0, %w2, %1\n" |
65 | "1:\n" | 65 | "1:\n" |
66 | : "=&r" (tmp) | 66 | : "=&r" (tmp), "+Q" (lock->lock) |
67 | : "r" (&lock->lock), "r" (1) | 67 | : "r" (1) |
68 | : "memory"); | 68 | : "cc", "memory"); |
69 | 69 | ||
70 | return !tmp; | 70 | return !tmp; |
71 | } | 71 | } |
@@ -73,8 +73,8 @@ static inline int arch_spin_trylock(arch_spinlock_t *lock) | |||
73 | static inline void arch_spin_unlock(arch_spinlock_t *lock) | 73 | static inline void arch_spin_unlock(arch_spinlock_t *lock) |
74 | { | 74 | { |
75 | asm volatile( | 75 | asm volatile( |
76 | " stlr %w1, [%0]\n" | 76 | " stlr %w1, %0\n" |
77 | : : "r" (&lock->lock), "r" (0) : "memory"); | 77 | : "=Q" (lock->lock) : "r" (0) : "memory"); |
78 | } | 78 | } |
79 | 79 | ||
80 | /* | 80 | /* |
@@ -94,13 +94,13 @@ static inline void arch_write_lock(arch_rwlock_t *rw) | |||
94 | asm volatile( | 94 | asm volatile( |
95 | " sevl\n" | 95 | " sevl\n" |
96 | "1: wfe\n" | 96 | "1: wfe\n" |
97 | "2: ldaxr %w0, [%1]\n" | 97 | "2: ldaxr %w0, %1\n" |
98 | " cbnz %w0, 1b\n" | 98 | " cbnz %w0, 1b\n" |
99 | " stxr %w0, %w2, [%1]\n" | 99 | " stxr %w0, %w2, %1\n" |
100 | " cbnz %w0, 2b\n" | 100 | " cbnz %w0, 2b\n" |
101 | : "=&r" (tmp) | 101 | : "=&r" (tmp), "+Q" (rw->lock) |
102 | : "r" (&rw->lock), "r" (0x80000000) | 102 | : "r" (0x80000000) |
103 | : "memory"); | 103 | : "cc", "memory"); |
104 | } | 104 | } |
105 | 105 | ||
106 | static inline int arch_write_trylock(arch_rwlock_t *rw) | 106 | static inline int arch_write_trylock(arch_rwlock_t *rw) |
@@ -108,13 +108,13 @@ static inline int arch_write_trylock(arch_rwlock_t *rw) | |||
108 | unsigned int tmp; | 108 | unsigned int tmp; |
109 | 109 | ||
110 | asm volatile( | 110 | asm volatile( |
111 | " ldaxr %w0, [%1]\n" | 111 | " ldaxr %w0, %1\n" |
112 | " cbnz %w0, 1f\n" | 112 | " cbnz %w0, 1f\n" |
113 | " stxr %w0, %w2, [%1]\n" | 113 | " stxr %w0, %w2, %1\n" |
114 | "1:\n" | 114 | "1:\n" |
115 | : "=&r" (tmp) | 115 | : "=&r" (tmp), "+Q" (rw->lock) |
116 | : "r" (&rw->lock), "r" (0x80000000) | 116 | : "r" (0x80000000) |
117 | : "memory"); | 117 | : "cc", "memory"); |
118 | 118 | ||
119 | return !tmp; | 119 | return !tmp; |
120 | } | 120 | } |
@@ -122,8 +122,8 @@ static inline int arch_write_trylock(arch_rwlock_t *rw) | |||
122 | static inline void arch_write_unlock(arch_rwlock_t *rw) | 122 | static inline void arch_write_unlock(arch_rwlock_t *rw) |
123 | { | 123 | { |
124 | asm volatile( | 124 | asm volatile( |
125 | " stlr %w1, [%0]\n" | 125 | " stlr %w1, %0\n" |
126 | : : "r" (&rw->lock), "r" (0) : "memory"); | 126 | : "=Q" (rw->lock) : "r" (0) : "memory"); |
127 | } | 127 | } |
128 | 128 | ||
129 | /* write_can_lock - would write_trylock() succeed? */ | 129 | /* write_can_lock - would write_trylock() succeed? */ |
@@ -148,14 +148,14 @@ static inline void arch_read_lock(arch_rwlock_t *rw) | |||
148 | asm volatile( | 148 | asm volatile( |
149 | " sevl\n" | 149 | " sevl\n" |
150 | "1: wfe\n" | 150 | "1: wfe\n" |
151 | "2: ldaxr %w0, [%2]\n" | 151 | "2: ldaxr %w0, %2\n" |
152 | " add %w0, %w0, #1\n" | 152 | " add %w0, %w0, #1\n" |
153 | " tbnz %w0, #31, 1b\n" | 153 | " tbnz %w0, #31, 1b\n" |
154 | " stxr %w1, %w0, [%2]\n" | 154 | " stxr %w1, %w0, %2\n" |
155 | " cbnz %w1, 2b\n" | 155 | " cbnz %w1, 2b\n" |
156 | : "=&r" (tmp), "=&r" (tmp2) | 156 | : "=&r" (tmp), "=&r" (tmp2), "+Q" (rw->lock) |
157 | : "r" (&rw->lock) | 157 | : |
158 | : "memory"); | 158 | : "cc", "memory"); |
159 | } | 159 | } |
160 | 160 | ||
161 | static inline void arch_read_unlock(arch_rwlock_t *rw) | 161 | static inline void arch_read_unlock(arch_rwlock_t *rw) |
@@ -163,13 +163,13 @@ static inline void arch_read_unlock(arch_rwlock_t *rw) | |||
163 | unsigned int tmp, tmp2; | 163 | unsigned int tmp, tmp2; |
164 | 164 | ||
165 | asm volatile( | 165 | asm volatile( |
166 | "1: ldxr %w0, [%2]\n" | 166 | "1: ldxr %w0, %2\n" |
167 | " sub %w0, %w0, #1\n" | 167 | " sub %w0, %w0, #1\n" |
168 | " stlxr %w1, %w0, [%2]\n" | 168 | " stlxr %w1, %w0, %2\n" |
169 | " cbnz %w1, 1b\n" | 169 | " cbnz %w1, 1b\n" |
170 | : "=&r" (tmp), "=&r" (tmp2) | 170 | : "=&r" (tmp), "=&r" (tmp2), "+Q" (rw->lock) |
171 | : "r" (&rw->lock) | 171 | : |
172 | : "memory"); | 172 | : "cc", "memory"); |
173 | } | 173 | } |
174 | 174 | ||
175 | static inline int arch_read_trylock(arch_rwlock_t *rw) | 175 | static inline int arch_read_trylock(arch_rwlock_t *rw) |
@@ -177,14 +177,14 @@ static inline int arch_read_trylock(arch_rwlock_t *rw) | |||
177 | unsigned int tmp, tmp2 = 1; | 177 | unsigned int tmp, tmp2 = 1; |
178 | 178 | ||
179 | asm volatile( | 179 | asm volatile( |
180 | " ldaxr %w0, [%2]\n" | 180 | " ldaxr %w0, %2\n" |
181 | " add %w0, %w0, #1\n" | 181 | " add %w0, %w0, #1\n" |
182 | " tbnz %w0, #31, 1f\n" | 182 | " tbnz %w0, #31, 1f\n" |
183 | " stxr %w1, %w0, [%2]\n" | 183 | " stxr %w1, %w0, %2\n" |
184 | "1:\n" | 184 | "1:\n" |
185 | : "=&r" (tmp), "+r" (tmp2) | 185 | : "=&r" (tmp), "+r" (tmp2), "+Q" (rw->lock) |
186 | : "r" (&rw->lock) | 186 | : |
187 | : "memory"); | 187 | : "cc", "memory"); |
188 | 188 | ||
189 | return !tmp2; | 189 | return !tmp2; |
190 | } | 190 | } |
diff --git a/arch/arm64/include/uapi/asm/Kbuild b/arch/arm64/include/uapi/asm/Kbuild index ca5b65f75c7b..e4b78bdca19e 100644 --- a/arch/arm64/include/uapi/asm/Kbuild +++ b/arch/arm64/include/uapi/asm/Kbuild | |||
@@ -1,11 +1,14 @@ | |||
1 | # UAPI Header export list | 1 | # UAPI Header export list |
2 | include include/uapi/asm-generic/Kbuild.asm | 2 | include include/uapi/asm-generic/Kbuild.asm |
3 | 3 | ||
4 | generic-y += kvm_para.h | ||
5 | |||
4 | header-y += auxvec.h | 6 | header-y += auxvec.h |
5 | header-y += bitsperlong.h | 7 | header-y += bitsperlong.h |
6 | header-y += byteorder.h | 8 | header-y += byteorder.h |
7 | header-y += fcntl.h | 9 | header-y += fcntl.h |
8 | header-y += hwcap.h | 10 | header-y += hwcap.h |
11 | header-y += kvm_para.h | ||
9 | header-y += param.h | 12 | header-y += param.h |
10 | header-y += ptrace.h | 13 | header-y += ptrace.h |
11 | header-y += setup.h | 14 | header-y += setup.h |
diff --git a/arch/arm64/kernel/Makefile b/arch/arm64/kernel/Makefile index 74239c31e25a..7b4b564961d4 100644 --- a/arch/arm64/kernel/Makefile +++ b/arch/arm64/kernel/Makefile | |||
@@ -9,14 +9,15 @@ AFLAGS_head.o := -DTEXT_OFFSET=$(TEXT_OFFSET) | |||
9 | arm64-obj-y := cputable.o debug-monitors.o entry.o irq.o fpsimd.o \ | 9 | arm64-obj-y := cputable.o debug-monitors.o entry.o irq.o fpsimd.o \ |
10 | entry-fpsimd.o process.o ptrace.o setup.o signal.o \ | 10 | entry-fpsimd.o process.o ptrace.o setup.o signal.o \ |
11 | sys.o stacktrace.o time.o traps.o io.o vdso.o \ | 11 | sys.o stacktrace.o time.o traps.o io.o vdso.o \ |
12 | hyp-stub.o | 12 | hyp-stub.o psci.o |
13 | 13 | ||
14 | arm64-obj-$(CONFIG_COMPAT) += sys32.o kuser32.o signal32.o \ | 14 | arm64-obj-$(CONFIG_COMPAT) += sys32.o kuser32.o signal32.o \ |
15 | sys_compat.o | 15 | sys_compat.o |
16 | arm64-obj-$(CONFIG_MODULES) += arm64ksyms.o module.o | 16 | arm64-obj-$(CONFIG_MODULES) += arm64ksyms.o module.o |
17 | arm64-obj-$(CONFIG_SMP) += smp.o | 17 | arm64-obj-$(CONFIG_SMP) += smp.o smp_spin_table.o smp_psci.o |
18 | arm64-obj-$(CONFIG_HW_PERF_EVENTS) += perf_event.o | 18 | arm64-obj-$(CONFIG_HW_PERF_EVENTS) += perf_event.o |
19 | arm64-obj-$(CONFIG_HAVE_HW_BREAKPOINT)+= hw_breakpoint.o | 19 | arm64-obj-$(CONFIG_HAVE_HW_BREAKPOINT)+= hw_breakpoint.o |
20 | arm64-obj-$(CONFIG_EARLY_PRINTK) += early_printk.o | ||
20 | 21 | ||
21 | obj-y += $(arm64-obj-y) vdso/ | 22 | obj-y += $(arm64-obj-y) vdso/ |
22 | obj-m += $(arm64-obj-m) | 23 | obj-m += $(arm64-obj-m) |
diff --git a/arch/arm64/kernel/early_printk.c b/arch/arm64/kernel/early_printk.c new file mode 100644 index 000000000000..7e320a2edb9b --- /dev/null +++ b/arch/arm64/kernel/early_printk.c | |||
@@ -0,0 +1,118 @@ | |||
1 | /* | ||
2 | * Earlyprintk support. | ||
3 | * | ||
4 | * Copyright (C) 2012 ARM Ltd. | ||
5 | * Author: Catalin Marinas <catalin.marinas@arm.com> | ||
6 | * | ||
7 | * This program is free software: you can redistribute it and/or modify | ||
8 | * it under the terms of the GNU General Public License version 2 as | ||
9 | * published by the Free Software Foundation. | ||
10 | * | ||
11 | * This program is distributed in the hope that it will be useful, | ||
12 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
14 | * GNU General Public License for more details. | ||
15 | * | ||
16 | * You should have received a copy of the GNU General Public License | ||
17 | * along with this program. If not, see <http://www.gnu.org/licenses/>. | ||
18 | */ | ||
19 | #include <linux/kernel.h> | ||
20 | #include <linux/console.h> | ||
21 | #include <linux/init.h> | ||
22 | #include <linux/string.h> | ||
23 | #include <linux/mm.h> | ||
24 | #include <linux/io.h> | ||
25 | |||
26 | #include <linux/amba/serial.h> | ||
27 | |||
28 | static void __iomem *early_base; | ||
29 | static void (*printch)(char ch); | ||
30 | |||
31 | /* | ||
32 | * PL011 single character TX. | ||
33 | */ | ||
34 | static void pl011_printch(char ch) | ||
35 | { | ||
36 | while (readl_relaxed(early_base + UART01x_FR) & UART01x_FR_TXFF) | ||
37 | ; | ||
38 | writeb_relaxed(ch, early_base + UART01x_DR); | ||
39 | while (readl_relaxed(early_base + UART01x_FR) & UART01x_FR_BUSY) | ||
40 | ; | ||
41 | } | ||
42 | |||
43 | struct earlycon_match { | ||
44 | const char *name; | ||
45 | void (*printch)(char ch); | ||
46 | }; | ||
47 | |||
48 | static const struct earlycon_match earlycon_match[] __initconst = { | ||
49 | { .name = "pl011", .printch = pl011_printch, }, | ||
50 | {} | ||
51 | }; | ||
52 | |||
53 | static void early_write(struct console *con, const char *s, unsigned n) | ||
54 | { | ||
55 | while (n-- > 0) { | ||
56 | if (*s == '\n') | ||
57 | printch('\r'); | ||
58 | printch(*s); | ||
59 | s++; | ||
60 | } | ||
61 | } | ||
62 | |||
63 | static struct console early_console = { | ||
64 | .name = "earlycon", | ||
65 | .write = early_write, | ||
66 | .flags = CON_PRINTBUFFER | CON_BOOT, | ||
67 | .index = -1, | ||
68 | }; | ||
69 | |||
70 | /* | ||
71 | * Parse earlyprintk=... parameter in the format: | ||
72 | * | ||
73 | * <name>[,<addr>][,<options>] | ||
74 | * | ||
75 | * and register the early console. It is assumed that the UART has been | ||
76 | * initialised by the bootloader already. | ||
77 | */ | ||
78 | static int __init setup_early_printk(char *buf) | ||
79 | { | ||
80 | const struct earlycon_match *match = earlycon_match; | ||
81 | phys_addr_t paddr = 0; | ||
82 | |||
83 | if (!buf) { | ||
84 | pr_warning("No earlyprintk arguments passed.\n"); | ||
85 | return 0; | ||
86 | } | ||
87 | |||
88 | while (match->name) { | ||
89 | size_t len = strlen(match->name); | ||
90 | if (!strncmp(buf, match->name, len)) { | ||
91 | buf += len; | ||
92 | break; | ||
93 | } | ||
94 | match++; | ||
95 | } | ||
96 | if (!match->name) { | ||
97 | pr_warning("Unknown earlyprintk arguments: %s\n", buf); | ||
98 | return 0; | ||
99 | } | ||
100 | |||
101 | /* I/O address */ | ||
102 | if (!strncmp(buf, ",0x", 3)) { | ||
103 | char *e; | ||
104 | paddr = simple_strtoul(buf + 1, &e, 16); | ||
105 | buf = e; | ||
106 | } | ||
107 | /* no options parsing yet */ | ||
108 | |||
109 | if (paddr) | ||
110 | early_base = early_io_map(paddr, EARLYCON_IOBASE); | ||
111 | |||
112 | printch = match->printch; | ||
113 | register_console(&early_console); | ||
114 | |||
115 | return 0; | ||
116 | } | ||
117 | |||
118 | early_param("earlyprintk", setup_early_printk); | ||
diff --git a/arch/arm64/kernel/head.S b/arch/arm64/kernel/head.S index 368ad1f7c36c..0a0a49756826 100644 --- a/arch/arm64/kernel/head.S +++ b/arch/arm64/kernel/head.S | |||
@@ -82,10 +82,8 @@ | |||
82 | 82 | ||
83 | #ifdef CONFIG_ARM64_64K_PAGES | 83 | #ifdef CONFIG_ARM64_64K_PAGES |
84 | #define MM_MMUFLAGS PTE_ATTRINDX(MT_NORMAL) | PTE_FLAGS | 84 | #define MM_MMUFLAGS PTE_ATTRINDX(MT_NORMAL) | PTE_FLAGS |
85 | #define IO_MMUFLAGS PTE_ATTRINDX(MT_DEVICE_nGnRE) | PTE_XN | PTE_FLAGS | ||
86 | #else | 85 | #else |
87 | #define MM_MMUFLAGS PMD_ATTRINDX(MT_NORMAL) | PMD_FLAGS | 86 | #define MM_MMUFLAGS PMD_ATTRINDX(MT_NORMAL) | PMD_FLAGS |
88 | #define IO_MMUFLAGS PMD_ATTRINDX(MT_DEVICE_nGnRE) | PMD_SECT_XN | PMD_FLAGS | ||
89 | #endif | 87 | #endif |
90 | 88 | ||
91 | /* | 89 | /* |
@@ -368,6 +366,7 @@ ENDPROC(__calc_phys_offset) | |||
368 | * - identity mapping to enable the MMU (low address, TTBR0) | 366 | * - identity mapping to enable the MMU (low address, TTBR0) |
369 | * - first few MB of the kernel linear mapping to jump to once the MMU has | 367 | * - first few MB of the kernel linear mapping to jump to once the MMU has |
370 | * been enabled, including the FDT blob (TTBR1) | 368 | * been enabled, including the FDT blob (TTBR1) |
369 | * - UART mapping if CONFIG_EARLY_PRINTK is enabled (TTBR1) | ||
371 | */ | 370 | */ |
372 | __create_page_tables: | 371 | __create_page_tables: |
373 | pgtbl x25, x26, x24 // idmap_pg_dir and swapper_pg_dir addresses | 372 | pgtbl x25, x26, x24 // idmap_pg_dir and swapper_pg_dir addresses |
@@ -420,6 +419,15 @@ __create_page_tables: | |||
420 | sub x6, x6, #1 // inclusive range | 419 | sub x6, x6, #1 // inclusive range |
421 | create_block_map x0, x7, x3, x5, x6 | 420 | create_block_map x0, x7, x3, x5, x6 |
422 | 1: | 421 | 1: |
422 | #ifdef CONFIG_EARLY_PRINTK | ||
423 | /* | ||
424 | * Create the pgd entry for the UART mapping. The full mapping is done | ||
425 | * later based earlyprintk kernel parameter. | ||
426 | */ | ||
427 | ldr x5, =EARLYCON_IOBASE // UART virtual address | ||
428 | add x0, x26, #2 * PAGE_SIZE // section table address | ||
429 | create_pgd_entry x26, x0, x5, x6, x7 | ||
430 | #endif | ||
423 | ret | 431 | ret |
424 | ENDPROC(__create_page_tables) | 432 | ENDPROC(__create_page_tables) |
425 | .ltorg | 433 | .ltorg |
diff --git a/arch/arm64/kernel/perf_event.c b/arch/arm64/kernel/perf_event.c index f7073c7b1ca9..1e49e5eb81e9 100644 --- a/arch/arm64/kernel/perf_event.c +++ b/arch/arm64/kernel/perf_event.c | |||
@@ -1331,6 +1331,11 @@ void perf_callchain_user(struct perf_callchain_entry *entry, | |||
1331 | { | 1331 | { |
1332 | struct frame_tail __user *tail; | 1332 | struct frame_tail __user *tail; |
1333 | 1333 | ||
1334 | if (perf_guest_cbs && perf_guest_cbs->is_in_guest()) { | ||
1335 | /* We don't support guest os callchain now */ | ||
1336 | return; | ||
1337 | } | ||
1338 | |||
1334 | tail = (struct frame_tail __user *)regs->regs[29]; | 1339 | tail = (struct frame_tail __user *)regs->regs[29]; |
1335 | 1340 | ||
1336 | while (entry->nr < PERF_MAX_STACK_DEPTH && | 1341 | while (entry->nr < PERF_MAX_STACK_DEPTH && |
@@ -1355,8 +1360,40 @@ void perf_callchain_kernel(struct perf_callchain_entry *entry, | |||
1355 | { | 1360 | { |
1356 | struct stackframe frame; | 1361 | struct stackframe frame; |
1357 | 1362 | ||
1363 | if (perf_guest_cbs && perf_guest_cbs->is_in_guest()) { | ||
1364 | /* We don't support guest os callchain now */ | ||
1365 | return; | ||
1366 | } | ||
1367 | |||
1358 | frame.fp = regs->regs[29]; | 1368 | frame.fp = regs->regs[29]; |
1359 | frame.sp = regs->sp; | 1369 | frame.sp = regs->sp; |
1360 | frame.pc = regs->pc; | 1370 | frame.pc = regs->pc; |
1361 | walk_stackframe(&frame, callchain_trace, entry); | 1371 | walk_stackframe(&frame, callchain_trace, entry); |
1362 | } | 1372 | } |
1373 | |||
1374 | unsigned long perf_instruction_pointer(struct pt_regs *regs) | ||
1375 | { | ||
1376 | if (perf_guest_cbs && perf_guest_cbs->is_in_guest()) | ||
1377 | return perf_guest_cbs->get_guest_ip(); | ||
1378 | |||
1379 | return instruction_pointer(regs); | ||
1380 | } | ||
1381 | |||
1382 | unsigned long perf_misc_flags(struct pt_regs *regs) | ||
1383 | { | ||
1384 | int misc = 0; | ||
1385 | |||
1386 | if (perf_guest_cbs && perf_guest_cbs->is_in_guest()) { | ||
1387 | if (perf_guest_cbs->is_user_mode()) | ||
1388 | misc |= PERF_RECORD_MISC_GUEST_USER; | ||
1389 | else | ||
1390 | misc |= PERF_RECORD_MISC_GUEST_KERNEL; | ||
1391 | } else { | ||
1392 | if (user_mode(regs)) | ||
1393 | misc |= PERF_RECORD_MISC_USER; | ||
1394 | else | ||
1395 | misc |= PERF_RECORD_MISC_KERNEL; | ||
1396 | } | ||
1397 | |||
1398 | return misc; | ||
1399 | } | ||
diff --git a/arch/arm64/kernel/process.c b/arch/arm64/kernel/process.c index cb0956bc96ed..0337cdb0667b 100644 --- a/arch/arm64/kernel/process.c +++ b/arch/arm64/kernel/process.c | |||
@@ -45,9 +45,10 @@ | |||
45 | 45 | ||
46 | #include <asm/compat.h> | 46 | #include <asm/compat.h> |
47 | #include <asm/cacheflush.h> | 47 | #include <asm/cacheflush.h> |
48 | #include <asm/fpsimd.h> | ||
49 | #include <asm/mmu_context.h> | ||
48 | #include <asm/processor.h> | 50 | #include <asm/processor.h> |
49 | #include <asm/stacktrace.h> | 51 | #include <asm/stacktrace.h> |
50 | #include <asm/fpsimd.h> | ||
51 | 52 | ||
52 | static void setup_restart(void) | 53 | static void setup_restart(void) |
53 | { | 54 | { |
@@ -97,14 +98,9 @@ static void default_idle(void) | |||
97 | local_irq_enable(); | 98 | local_irq_enable(); |
98 | } | 99 | } |
99 | 100 | ||
100 | void (*pm_idle)(void) = default_idle; | ||
101 | EXPORT_SYMBOL_GPL(pm_idle); | ||
102 | |||
103 | /* | 101 | /* |
104 | * The idle thread, has rather strange semantics for calling pm_idle, | 102 | * The idle thread. |
105 | * but this is what x86 does and we need to do the same, so that | 103 | * We always respect 'hlt_counter' to prevent low power idle. |
106 | * things like cpuidle get called in the same way. The only difference | ||
107 | * is that we always respect 'hlt_counter' to prevent low power idle. | ||
108 | */ | 104 | */ |
109 | void cpu_idle(void) | 105 | void cpu_idle(void) |
110 | { | 106 | { |
@@ -122,10 +118,10 @@ void cpu_idle(void) | |||
122 | local_irq_disable(); | 118 | local_irq_disable(); |
123 | if (!need_resched()) { | 119 | if (!need_resched()) { |
124 | stop_critical_timings(); | 120 | stop_critical_timings(); |
125 | pm_idle(); | 121 | default_idle(); |
126 | start_critical_timings(); | 122 | start_critical_timings(); |
127 | /* | 123 | /* |
128 | * pm_idle functions should always return | 124 | * default_idle functions should always return |
129 | * with IRQs enabled. | 125 | * with IRQs enabled. |
130 | */ | 126 | */ |
131 | WARN_ON(irqs_disabled()); | 127 | WARN_ON(irqs_disabled()); |
@@ -319,6 +315,7 @@ struct task_struct *__switch_to(struct task_struct *prev, | |||
319 | /* the actual thread switch */ | 315 | /* the actual thread switch */ |
320 | last = cpu_switch_to(prev, next); | 316 | last = cpu_switch_to(prev, next); |
321 | 317 | ||
318 | contextidr_thread_switch(next); | ||
322 | return last; | 319 | return last; |
323 | } | 320 | } |
324 | 321 | ||
diff --git a/arch/arm64/kernel/psci.c b/arch/arm64/kernel/psci.c new file mode 100644 index 000000000000..14f73c445ff5 --- /dev/null +++ b/arch/arm64/kernel/psci.c | |||
@@ -0,0 +1,211 @@ | |||
1 | /* | ||
2 | * This program is free software; you can redistribute it and/or modify | ||
3 | * it under the terms of the GNU General Public License version 2 as | ||
4 | * published by the Free Software Foundation. | ||
5 | * | ||
6 | * This program is distributed in the hope that it will be useful, | ||
7 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
8 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
9 | * GNU General Public License for more details. | ||
10 | * | ||
11 | * Copyright (C) 2013 ARM Limited | ||
12 | * | ||
13 | * Author: Will Deacon <will.deacon@arm.com> | ||
14 | */ | ||
15 | |||
16 | #define pr_fmt(fmt) "psci: " fmt | ||
17 | |||
18 | #include <linux/init.h> | ||
19 | #include <linux/of.h> | ||
20 | |||
21 | #include <asm/compiler.h> | ||
22 | #include <asm/errno.h> | ||
23 | #include <asm/psci.h> | ||
24 | |||
25 | struct psci_operations psci_ops; | ||
26 | |||
27 | static int (*invoke_psci_fn)(u64, u64, u64, u64); | ||
28 | |||
29 | enum psci_function { | ||
30 | PSCI_FN_CPU_SUSPEND, | ||
31 | PSCI_FN_CPU_ON, | ||
32 | PSCI_FN_CPU_OFF, | ||
33 | PSCI_FN_MIGRATE, | ||
34 | PSCI_FN_MAX, | ||
35 | }; | ||
36 | |||
37 | static u32 psci_function_id[PSCI_FN_MAX]; | ||
38 | |||
39 | #define PSCI_RET_SUCCESS 0 | ||
40 | #define PSCI_RET_EOPNOTSUPP -1 | ||
41 | #define PSCI_RET_EINVAL -2 | ||
42 | #define PSCI_RET_EPERM -3 | ||
43 | |||
44 | static int psci_to_linux_errno(int errno) | ||
45 | { | ||
46 | switch (errno) { | ||
47 | case PSCI_RET_SUCCESS: | ||
48 | return 0; | ||
49 | case PSCI_RET_EOPNOTSUPP: | ||
50 | return -EOPNOTSUPP; | ||
51 | case PSCI_RET_EINVAL: | ||
52 | return -EINVAL; | ||
53 | case PSCI_RET_EPERM: | ||
54 | return -EPERM; | ||
55 | }; | ||
56 | |||
57 | return -EINVAL; | ||
58 | } | ||
59 | |||
60 | #define PSCI_POWER_STATE_ID_MASK 0xffff | ||
61 | #define PSCI_POWER_STATE_ID_SHIFT 0 | ||
62 | #define PSCI_POWER_STATE_TYPE_MASK 0x1 | ||
63 | #define PSCI_POWER_STATE_TYPE_SHIFT 16 | ||
64 | #define PSCI_POWER_STATE_AFFL_MASK 0x3 | ||
65 | #define PSCI_POWER_STATE_AFFL_SHIFT 24 | ||
66 | |||
67 | static u32 psci_power_state_pack(struct psci_power_state state) | ||
68 | { | ||
69 | return ((state.id & PSCI_POWER_STATE_ID_MASK) | ||
70 | << PSCI_POWER_STATE_ID_SHIFT) | | ||
71 | ((state.type & PSCI_POWER_STATE_TYPE_MASK) | ||
72 | << PSCI_POWER_STATE_TYPE_SHIFT) | | ||
73 | ((state.affinity_level & PSCI_POWER_STATE_AFFL_MASK) | ||
74 | << PSCI_POWER_STATE_AFFL_SHIFT); | ||
75 | } | ||
76 | |||
77 | /* | ||
78 | * The following two functions are invoked via the invoke_psci_fn pointer | ||
79 | * and will not be inlined, allowing us to piggyback on the AAPCS. | ||
80 | */ | ||
81 | static noinline int __invoke_psci_fn_hvc(u64 function_id, u64 arg0, u64 arg1, | ||
82 | u64 arg2) | ||
83 | { | ||
84 | asm volatile( | ||
85 | __asmeq("%0", "x0") | ||
86 | __asmeq("%1", "x1") | ||
87 | __asmeq("%2", "x2") | ||
88 | __asmeq("%3", "x3") | ||
89 | "hvc #0\n" | ||
90 | : "+r" (function_id) | ||
91 | : "r" (arg0), "r" (arg1), "r" (arg2)); | ||
92 | |||
93 | return function_id; | ||
94 | } | ||
95 | |||
96 | static noinline int __invoke_psci_fn_smc(u64 function_id, u64 arg0, u64 arg1, | ||
97 | u64 arg2) | ||
98 | { | ||
99 | asm volatile( | ||
100 | __asmeq("%0", "x0") | ||
101 | __asmeq("%1", "x1") | ||
102 | __asmeq("%2", "x2") | ||
103 | __asmeq("%3", "x3") | ||
104 | "smc #0\n" | ||
105 | : "+r" (function_id) | ||
106 | : "r" (arg0), "r" (arg1), "r" (arg2)); | ||
107 | |||
108 | return function_id; | ||
109 | } | ||
110 | |||
111 | static int psci_cpu_suspend(struct psci_power_state state, | ||
112 | unsigned long entry_point) | ||
113 | { | ||
114 | int err; | ||
115 | u32 fn, power_state; | ||
116 | |||
117 | fn = psci_function_id[PSCI_FN_CPU_SUSPEND]; | ||
118 | power_state = psci_power_state_pack(state); | ||
119 | err = invoke_psci_fn(fn, power_state, entry_point, 0); | ||
120 | return psci_to_linux_errno(err); | ||
121 | } | ||
122 | |||
123 | static int psci_cpu_off(struct psci_power_state state) | ||
124 | { | ||
125 | int err; | ||
126 | u32 fn, power_state; | ||
127 | |||
128 | fn = psci_function_id[PSCI_FN_CPU_OFF]; | ||
129 | power_state = psci_power_state_pack(state); | ||
130 | err = invoke_psci_fn(fn, power_state, 0, 0); | ||
131 | return psci_to_linux_errno(err); | ||
132 | } | ||
133 | |||
134 | static int psci_cpu_on(unsigned long cpuid, unsigned long entry_point) | ||
135 | { | ||
136 | int err; | ||
137 | u32 fn; | ||
138 | |||
139 | fn = psci_function_id[PSCI_FN_CPU_ON]; | ||
140 | err = invoke_psci_fn(fn, cpuid, entry_point, 0); | ||
141 | return psci_to_linux_errno(err); | ||
142 | } | ||
143 | |||
144 | static int psci_migrate(unsigned long cpuid) | ||
145 | { | ||
146 | int err; | ||
147 | u32 fn; | ||
148 | |||
149 | fn = psci_function_id[PSCI_FN_MIGRATE]; | ||
150 | err = invoke_psci_fn(fn, cpuid, 0, 0); | ||
151 | return psci_to_linux_errno(err); | ||
152 | } | ||
153 | |||
154 | static const struct of_device_id psci_of_match[] __initconst = { | ||
155 | { .compatible = "arm,psci", }, | ||
156 | {}, | ||
157 | }; | ||
158 | |||
159 | int __init psci_init(void) | ||
160 | { | ||
161 | struct device_node *np; | ||
162 | const char *method; | ||
163 | u32 id; | ||
164 | int err = 0; | ||
165 | |||
166 | np = of_find_matching_node(NULL, psci_of_match); | ||
167 | if (!np) | ||
168 | return -ENODEV; | ||
169 | |||
170 | pr_info("probing function IDs from device-tree\n"); | ||
171 | |||
172 | if (of_property_read_string(np, "method", &method)) { | ||
173 | pr_warning("missing \"method\" property\n"); | ||
174 | err = -ENXIO; | ||
175 | goto out_put_node; | ||
176 | } | ||
177 | |||
178 | if (!strcmp("hvc", method)) { | ||
179 | invoke_psci_fn = __invoke_psci_fn_hvc; | ||
180 | } else if (!strcmp("smc", method)) { | ||
181 | invoke_psci_fn = __invoke_psci_fn_smc; | ||
182 | } else { | ||
183 | pr_warning("invalid \"method\" property: %s\n", method); | ||
184 | err = -EINVAL; | ||
185 | goto out_put_node; | ||
186 | } | ||
187 | |||
188 | if (!of_property_read_u32(np, "cpu_suspend", &id)) { | ||
189 | psci_function_id[PSCI_FN_CPU_SUSPEND] = id; | ||
190 | psci_ops.cpu_suspend = psci_cpu_suspend; | ||
191 | } | ||
192 | |||
193 | if (!of_property_read_u32(np, "cpu_off", &id)) { | ||
194 | psci_function_id[PSCI_FN_CPU_OFF] = id; | ||
195 | psci_ops.cpu_off = psci_cpu_off; | ||
196 | } | ||
197 | |||
198 | if (!of_property_read_u32(np, "cpu_on", &id)) { | ||
199 | psci_function_id[PSCI_FN_CPU_ON] = id; | ||
200 | psci_ops.cpu_on = psci_cpu_on; | ||
201 | } | ||
202 | |||
203 | if (!of_property_read_u32(np, "migrate", &id)) { | ||
204 | psci_function_id[PSCI_FN_MIGRATE] = id; | ||
205 | psci_ops.migrate = psci_migrate; | ||
206 | } | ||
207 | |||
208 | out_put_node: | ||
209 | of_node_put(np); | ||
210 | return err; | ||
211 | } | ||
diff --git a/arch/arm64/kernel/setup.c b/arch/arm64/kernel/setup.c index 7665a9bfdb1e..113db863f832 100644 --- a/arch/arm64/kernel/setup.c +++ b/arch/arm64/kernel/setup.c | |||
@@ -39,6 +39,7 @@ | |||
39 | #include <linux/proc_fs.h> | 39 | #include <linux/proc_fs.h> |
40 | #include <linux/memblock.h> | 40 | #include <linux/memblock.h> |
41 | #include <linux/of_fdt.h> | 41 | #include <linux/of_fdt.h> |
42 | #include <linux/of_platform.h> | ||
42 | 43 | ||
43 | #include <asm/cputype.h> | 44 | #include <asm/cputype.h> |
44 | #include <asm/elf.h> | 45 | #include <asm/elf.h> |
@@ -49,6 +50,7 @@ | |||
49 | #include <asm/tlbflush.h> | 50 | #include <asm/tlbflush.h> |
50 | #include <asm/traps.h> | 51 | #include <asm/traps.h> |
51 | #include <asm/memblock.h> | 52 | #include <asm/memblock.h> |
53 | #include <asm/psci.h> | ||
52 | 54 | ||
53 | unsigned int processor_id; | 55 | unsigned int processor_id; |
54 | EXPORT_SYMBOL(processor_id); | 56 | EXPORT_SYMBOL(processor_id); |
@@ -260,6 +262,8 @@ void __init setup_arch(char **cmdline_p) | |||
260 | 262 | ||
261 | unflatten_device_tree(); | 263 | unflatten_device_tree(); |
262 | 264 | ||
265 | psci_init(); | ||
266 | |||
263 | #ifdef CONFIG_SMP | 267 | #ifdef CONFIG_SMP |
264 | smp_init_cpus(); | 268 | smp_init_cpus(); |
265 | #endif | 269 | #endif |
@@ -289,6 +293,13 @@ static int __init topology_init(void) | |||
289 | } | 293 | } |
290 | subsys_initcall(topology_init); | 294 | subsys_initcall(topology_init); |
291 | 295 | ||
296 | static int __init arm64_device_probe(void) | ||
297 | { | ||
298 | of_platform_populate(NULL, of_default_bus_match_table, NULL, NULL); | ||
299 | return 0; | ||
300 | } | ||
301 | device_initcall(arm64_device_probe); | ||
302 | |||
292 | static const char *hwcap_str[] = { | 303 | static const char *hwcap_str[] = { |
293 | "fp", | 304 | "fp", |
294 | "asimd", | 305 | "asimd", |
diff --git a/arch/arm64/kernel/signal32.c b/arch/arm64/kernel/signal32.c index a4db3d22aac4..41db148a7eb9 100644 --- a/arch/arm64/kernel/signal32.c +++ b/arch/arm64/kernel/signal32.c | |||
@@ -76,7 +76,7 @@ struct compat_sigcontext { | |||
76 | 76 | ||
77 | struct compat_ucontext { | 77 | struct compat_ucontext { |
78 | compat_ulong_t uc_flags; | 78 | compat_ulong_t uc_flags; |
79 | struct compat_ucontext *uc_link; | 79 | compat_uptr_t uc_link; |
80 | compat_stack_t uc_stack; | 80 | compat_stack_t uc_stack; |
81 | struct compat_sigcontext uc_mcontext; | 81 | struct compat_sigcontext uc_mcontext; |
82 | compat_sigset_t uc_sigmask; | 82 | compat_sigset_t uc_sigmask; |
@@ -703,7 +703,7 @@ int compat_setup_rt_frame(int usig, struct k_sigaction *ka, siginfo_t *info, | |||
703 | err |= copy_siginfo_to_user32(&frame->info, info); | 703 | err |= copy_siginfo_to_user32(&frame->info, info); |
704 | 704 | ||
705 | __put_user_error(0, &frame->sig.uc.uc_flags, err); | 705 | __put_user_error(0, &frame->sig.uc.uc_flags, err); |
706 | __put_user_error(NULL, &frame->sig.uc.uc_link, err); | 706 | __put_user_error(0, &frame->sig.uc.uc_link, err); |
707 | 707 | ||
708 | memset(&stack, 0, sizeof(stack)); | 708 | memset(&stack, 0, sizeof(stack)); |
709 | stack.ss_sp = (compat_uptr_t)current->sas_ss_sp; | 709 | stack.ss_sp = (compat_uptr_t)current->sas_ss_sp; |
diff --git a/arch/arm64/kernel/smp.c b/arch/arm64/kernel/smp.c index 538300f2273d..bdd34597254b 100644 --- a/arch/arm64/kernel/smp.c +++ b/arch/arm64/kernel/smp.c | |||
@@ -233,7 +233,28 @@ void __init smp_prepare_boot_cpu(void) | |||
233 | } | 233 | } |
234 | 234 | ||
235 | static void (*smp_cross_call)(const struct cpumask *, unsigned int); | 235 | static void (*smp_cross_call)(const struct cpumask *, unsigned int); |
236 | static phys_addr_t cpu_release_addr[NR_CPUS]; | 236 | |
237 | static const struct smp_enable_ops *enable_ops[] __initconst = { | ||
238 | &smp_spin_table_ops, | ||
239 | &smp_psci_ops, | ||
240 | NULL, | ||
241 | }; | ||
242 | |||
243 | static const struct smp_enable_ops *smp_enable_ops[NR_CPUS]; | ||
244 | |||
245 | static const struct smp_enable_ops * __init smp_get_enable_ops(const char *name) | ||
246 | { | ||
247 | const struct smp_enable_ops *ops = enable_ops[0]; | ||
248 | |||
249 | while (ops) { | ||
250 | if (!strcmp(name, ops->name)) | ||
251 | return ops; | ||
252 | |||
253 | ops++; | ||
254 | } | ||
255 | |||
256 | return NULL; | ||
257 | } | ||
237 | 258 | ||
238 | /* | 259 | /* |
239 | * Enumerate the possible CPU set from the device tree. | 260 | * Enumerate the possible CPU set from the device tree. |
@@ -252,22 +273,22 @@ void __init smp_init_cpus(void) | |||
252 | * We currently support only the "spin-table" enable-method. | 273 | * We currently support only the "spin-table" enable-method. |
253 | */ | 274 | */ |
254 | enable_method = of_get_property(dn, "enable-method", NULL); | 275 | enable_method = of_get_property(dn, "enable-method", NULL); |
255 | if (!enable_method || strcmp(enable_method, "spin-table")) { | 276 | if (!enable_method) { |
256 | pr_err("CPU %d: missing or invalid enable-method property: %s\n", | 277 | pr_err("CPU %d: missing enable-method property\n", cpu); |
257 | cpu, enable_method); | ||
258 | goto next; | 278 | goto next; |
259 | } | 279 | } |
260 | 280 | ||
261 | /* | 281 | smp_enable_ops[cpu] = smp_get_enable_ops(enable_method); |
262 | * Determine the address from which the CPU is polling. | 282 | |
263 | */ | 283 | if (!smp_enable_ops[cpu]) { |
264 | if (of_property_read_u64(dn, "cpu-release-addr", | 284 | pr_err("CPU %d: invalid enable-method property: %s\n", |
265 | &cpu_release_addr[cpu])) { | 285 | cpu, enable_method); |
266 | pr_err("CPU %d: missing or invalid cpu-release-addr property\n", | ||
267 | cpu); | ||
268 | goto next; | 286 | goto next; |
269 | } | 287 | } |
270 | 288 | ||
289 | if (smp_enable_ops[cpu]->init_cpu(dn, cpu)) | ||
290 | goto next; | ||
291 | |||
271 | set_cpu_possible(cpu, true); | 292 | set_cpu_possible(cpu, true); |
272 | next: | 293 | next: |
273 | cpu++; | 294 | cpu++; |
@@ -281,8 +302,7 @@ next: | |||
281 | 302 | ||
282 | void __init smp_prepare_cpus(unsigned int max_cpus) | 303 | void __init smp_prepare_cpus(unsigned int max_cpus) |
283 | { | 304 | { |
284 | int cpu; | 305 | int cpu, err; |
285 | void **release_addr; | ||
286 | unsigned int ncores = num_possible_cpus(); | 306 | unsigned int ncores = num_possible_cpus(); |
287 | 307 | ||
288 | /* | 308 | /* |
@@ -291,30 +311,35 @@ void __init smp_prepare_cpus(unsigned int max_cpus) | |||
291 | if (max_cpus > ncores) | 311 | if (max_cpus > ncores) |
292 | max_cpus = ncores; | 312 | max_cpus = ncores; |
293 | 313 | ||
314 | /* Don't bother if we're effectively UP */ | ||
315 | if (max_cpus <= 1) | ||
316 | return; | ||
317 | |||
294 | /* | 318 | /* |
295 | * Initialise the present map (which describes the set of CPUs | 319 | * Initialise the present map (which describes the set of CPUs |
296 | * actually populated at the present time) and release the | 320 | * actually populated at the present time) and release the |
297 | * secondaries from the bootloader. | 321 | * secondaries from the bootloader. |
322 | * | ||
323 | * Make sure we online at most (max_cpus - 1) additional CPUs. | ||
298 | */ | 324 | */ |
325 | max_cpus--; | ||
299 | for_each_possible_cpu(cpu) { | 326 | for_each_possible_cpu(cpu) { |
300 | if (max_cpus == 0) | 327 | if (max_cpus == 0) |
301 | break; | 328 | break; |
302 | 329 | ||
303 | if (!cpu_release_addr[cpu]) | 330 | if (cpu == smp_processor_id()) |
331 | continue; | ||
332 | |||
333 | if (!smp_enable_ops[cpu]) | ||
304 | continue; | 334 | continue; |
305 | 335 | ||
306 | release_addr = __va(cpu_release_addr[cpu]); | 336 | err = smp_enable_ops[cpu]->prepare_cpu(cpu); |
307 | release_addr[0] = (void *)__pa(secondary_holding_pen); | 337 | if (err) |
308 | __flush_dcache_area(release_addr, sizeof(release_addr[0])); | 338 | continue; |
309 | 339 | ||
310 | set_cpu_present(cpu, true); | 340 | set_cpu_present(cpu, true); |
311 | max_cpus--; | 341 | max_cpus--; |
312 | } | 342 | } |
313 | |||
314 | /* | ||
315 | * Send an event to wake up the secondaries. | ||
316 | */ | ||
317 | sev(); | ||
318 | } | 343 | } |
319 | 344 | ||
320 | 345 | ||
diff --git a/arch/arm64/kernel/smp_psci.c b/arch/arm64/kernel/smp_psci.c new file mode 100644 index 000000000000..112091684c22 --- /dev/null +++ b/arch/arm64/kernel/smp_psci.c | |||
@@ -0,0 +1,52 @@ | |||
1 | /* | ||
2 | * PSCI SMP initialisation | ||
3 | * | ||
4 | * Copyright (C) 2013 ARM Ltd. | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or modify | ||
7 | * it under the terms of the GNU General Public License version 2 as | ||
8 | * published by the Free Software Foundation. | ||
9 | * | ||
10 | * This program is distributed in the hope that it will be useful, | ||
11 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
12 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
13 | * GNU General Public License for more details. | ||
14 | * | ||
15 | * You should have received a copy of the GNU General Public License | ||
16 | * along with this program. If not, see <http://www.gnu.org/licenses/>. | ||
17 | */ | ||
18 | |||
19 | #include <linux/init.h> | ||
20 | #include <linux/of.h> | ||
21 | #include <linux/smp.h> | ||
22 | |||
23 | #include <asm/psci.h> | ||
24 | |||
25 | static int __init smp_psci_init_cpu(struct device_node *dn, int cpu) | ||
26 | { | ||
27 | return 0; | ||
28 | } | ||
29 | |||
30 | static int __init smp_psci_prepare_cpu(int cpu) | ||
31 | { | ||
32 | int err; | ||
33 | |||
34 | if (!psci_ops.cpu_on) { | ||
35 | pr_err("psci: no cpu_on method, not booting CPU%d\n", cpu); | ||
36 | return -ENODEV; | ||
37 | } | ||
38 | |||
39 | err = psci_ops.cpu_on(cpu, __pa(secondary_holding_pen)); | ||
40 | if (err) { | ||
41 | pr_err("psci: failed to boot CPU%d (%d)\n", cpu, err); | ||
42 | return err; | ||
43 | } | ||
44 | |||
45 | return 0; | ||
46 | } | ||
47 | |||
48 | const struct smp_enable_ops smp_psci_ops __initconst = { | ||
49 | .name = "psci", | ||
50 | .init_cpu = smp_psci_init_cpu, | ||
51 | .prepare_cpu = smp_psci_prepare_cpu, | ||
52 | }; | ||
diff --git a/arch/arm64/kernel/smp_spin_table.c b/arch/arm64/kernel/smp_spin_table.c new file mode 100644 index 000000000000..7c35fa682f76 --- /dev/null +++ b/arch/arm64/kernel/smp_spin_table.c | |||
@@ -0,0 +1,66 @@ | |||
1 | /* | ||
2 | * Spin Table SMP initialisation | ||
3 | * | ||
4 | * Copyright (C) 2013 ARM Ltd. | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or modify | ||
7 | * it under the terms of the GNU General Public License version 2 as | ||
8 | * published by the Free Software Foundation. | ||
9 | * | ||
10 | * This program is distributed in the hope that it will be useful, | ||
11 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
12 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
13 | * GNU General Public License for more details. | ||
14 | * | ||
15 | * You should have received a copy of the GNU General Public License | ||
16 | * along with this program. If not, see <http://www.gnu.org/licenses/>. | ||
17 | */ | ||
18 | |||
19 | #include <linux/init.h> | ||
20 | #include <linux/of.h> | ||
21 | #include <linux/smp.h> | ||
22 | |||
23 | #include <asm/cacheflush.h> | ||
24 | |||
25 | static phys_addr_t cpu_release_addr[NR_CPUS]; | ||
26 | |||
27 | static int __init smp_spin_table_init_cpu(struct device_node *dn, int cpu) | ||
28 | { | ||
29 | /* | ||
30 | * Determine the address from which the CPU is polling. | ||
31 | */ | ||
32 | if (of_property_read_u64(dn, "cpu-release-addr", | ||
33 | &cpu_release_addr[cpu])) { | ||
34 | pr_err("CPU %d: missing or invalid cpu-release-addr property\n", | ||
35 | cpu); | ||
36 | |||
37 | return -1; | ||
38 | } | ||
39 | |||
40 | return 0; | ||
41 | } | ||
42 | |||
43 | static int __init smp_spin_table_prepare_cpu(int cpu) | ||
44 | { | ||
45 | void **release_addr; | ||
46 | |||
47 | if (!cpu_release_addr[cpu]) | ||
48 | return -ENODEV; | ||
49 | |||
50 | release_addr = __va(cpu_release_addr[cpu]); | ||
51 | release_addr[0] = (void *)__pa(secondary_holding_pen); | ||
52 | __flush_dcache_area(release_addr, sizeof(release_addr[0])); | ||
53 | |||
54 | /* | ||
55 | * Send an event to wake up the secondary CPU. | ||
56 | */ | ||
57 | sev(); | ||
58 | |||
59 | return 0; | ||
60 | } | ||
61 | |||
62 | const struct smp_enable_ops smp_spin_table_ops __initconst = { | ||
63 | .name = "spin-table", | ||
64 | .init_cpu = smp_spin_table_init_cpu, | ||
65 | .prepare_cpu = smp_spin_table_prepare_cpu, | ||
66 | }; | ||
diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c index a6885d896ab6..f4dd585898c5 100644 --- a/arch/arm64/mm/mmu.c +++ b/arch/arm64/mm/mmu.c | |||
@@ -25,6 +25,7 @@ | |||
25 | #include <linux/nodemask.h> | 25 | #include <linux/nodemask.h> |
26 | #include <linux/memblock.h> | 26 | #include <linux/memblock.h> |
27 | #include <linux/fs.h> | 27 | #include <linux/fs.h> |
28 | #include <linux/io.h> | ||
28 | 29 | ||
29 | #include <asm/cputype.h> | 30 | #include <asm/cputype.h> |
30 | #include <asm/sections.h> | 31 | #include <asm/sections.h> |
@@ -251,6 +252,47 @@ static void __init create_mapping(phys_addr_t phys, unsigned long virt, | |||
251 | } while (pgd++, addr = next, addr != end); | 252 | } while (pgd++, addr = next, addr != end); |
252 | } | 253 | } |
253 | 254 | ||
255 | #ifdef CONFIG_EARLY_PRINTK | ||
256 | /* | ||
257 | * Create an early I/O mapping using the pgd/pmd entries already populated | ||
258 | * in head.S as this function is called too early to allocated any memory. The | ||
259 | * mapping size is 2MB with 4KB pages or 64KB or 64KB pages. | ||
260 | */ | ||
261 | void __iomem * __init early_io_map(phys_addr_t phys, unsigned long virt) | ||
262 | { | ||
263 | unsigned long size, mask; | ||
264 | bool page64k = IS_ENABLED(ARM64_64K_PAGES); | ||
265 | pgd_t *pgd; | ||
266 | pud_t *pud; | ||
267 | pmd_t *pmd; | ||
268 | pte_t *pte; | ||
269 | |||
270 | /* | ||
271 | * No early pte entries with !ARM64_64K_PAGES configuration, so using | ||
272 | * sections (pmd). | ||
273 | */ | ||
274 | size = page64k ? PAGE_SIZE : SECTION_SIZE; | ||
275 | mask = ~(size - 1); | ||
276 | |||
277 | pgd = pgd_offset_k(virt); | ||
278 | pud = pud_offset(pgd, virt); | ||
279 | if (pud_none(*pud)) | ||
280 | return NULL; | ||
281 | pmd = pmd_offset(pud, virt); | ||
282 | |||
283 | if (page64k) { | ||
284 | if (pmd_none(*pmd)) | ||
285 | return NULL; | ||
286 | pte = pte_offset_kernel(pmd, virt); | ||
287 | set_pte(pte, __pte((phys & mask) | PROT_DEVICE_nGnRE)); | ||
288 | } else { | ||
289 | set_pmd(pmd, __pmd((phys & mask) | PROT_SECT_DEVICE_nGnRE)); | ||
290 | } | ||
291 | |||
292 | return (void __iomem *)((virt & mask) + (phys & ~mask)); | ||
293 | } | ||
294 | #endif | ||
295 | |||
254 | static void __init map_mem(void) | 296 | static void __init map_mem(void) |
255 | { | 297 | { |
256 | struct memblock_region *reg; | 298 | struct memblock_region *reg; |
diff --git a/arch/blackfin/Kconfig b/arch/blackfin/Kconfig index b6f3ad5441c5..67e4aaad78f5 100644 --- a/arch/blackfin/Kconfig +++ b/arch/blackfin/Kconfig | |||
@@ -24,7 +24,6 @@ config BLACKFIN | |||
24 | select HAVE_FUNCTION_TRACER | 24 | select HAVE_FUNCTION_TRACER |
25 | select HAVE_FUNCTION_TRACE_MCOUNT_TEST | 25 | select HAVE_FUNCTION_TRACE_MCOUNT_TEST |
26 | select HAVE_IDE | 26 | select HAVE_IDE |
27 | select HAVE_IRQ_WORK | ||
28 | select HAVE_KERNEL_GZIP if RAMKERNEL | 27 | select HAVE_KERNEL_GZIP if RAMKERNEL |
29 | select HAVE_KERNEL_BZIP2 if RAMKERNEL | 28 | select HAVE_KERNEL_BZIP2 if RAMKERNEL |
30 | select HAVE_KERNEL_LZMA if RAMKERNEL | 29 | select HAVE_KERNEL_LZMA if RAMKERNEL |
@@ -38,7 +37,6 @@ config BLACKFIN | |||
38 | select HAVE_GENERIC_HARDIRQS | 37 | select HAVE_GENERIC_HARDIRQS |
39 | select GENERIC_ATOMIC64 | 38 | select GENERIC_ATOMIC64 |
40 | select GENERIC_IRQ_PROBE | 39 | select GENERIC_IRQ_PROBE |
41 | select IRQ_PER_CPU if SMP | ||
42 | select USE_GENERIC_SMP_HELPERS if SMP | 40 | select USE_GENERIC_SMP_HELPERS if SMP |
43 | select HAVE_NMI_WATCHDOG if NMI_WATCHDOG | 41 | select HAVE_NMI_WATCHDOG if NMI_WATCHDOG |
44 | select GENERIC_SMP_IDLE_THREAD | 42 | select GENERIC_SMP_IDLE_THREAD |
diff --git a/arch/blackfin/kernel/process.c b/arch/blackfin/kernel/process.c index 3e16ad9b0a99..8061426b7df5 100644 --- a/arch/blackfin/kernel/process.c +++ b/arch/blackfin/kernel/process.c | |||
@@ -39,12 +39,6 @@ int nr_l1stack_tasks; | |||
39 | void *l1_stack_base; | 39 | void *l1_stack_base; |
40 | unsigned long l1_stack_len; | 40 | unsigned long l1_stack_len; |
41 | 41 | ||
42 | /* | ||
43 | * Powermanagement idle function, if any.. | ||
44 | */ | ||
45 | void (*pm_idle)(void) = NULL; | ||
46 | EXPORT_SYMBOL(pm_idle); | ||
47 | |||
48 | void (*pm_power_off)(void) = NULL; | 42 | void (*pm_power_off)(void) = NULL; |
49 | EXPORT_SYMBOL(pm_power_off); | 43 | EXPORT_SYMBOL(pm_power_off); |
50 | 44 | ||
@@ -81,7 +75,6 @@ void cpu_idle(void) | |||
81 | { | 75 | { |
82 | /* endless idle loop with no priority at all */ | 76 | /* endless idle loop with no priority at all */ |
83 | while (1) { | 77 | while (1) { |
84 | void (*idle)(void) = pm_idle; | ||
85 | 78 | ||
86 | #ifdef CONFIG_HOTPLUG_CPU | 79 | #ifdef CONFIG_HOTPLUG_CPU |
87 | if (cpu_is_offline(smp_processor_id())) | 80 | if (cpu_is_offline(smp_processor_id())) |
diff --git a/arch/cris/kernel/process.c b/arch/cris/kernel/process.c index 7f65be6f7f17..104ff4dd9b98 100644 --- a/arch/cris/kernel/process.c +++ b/arch/cris/kernel/process.c | |||
@@ -54,11 +54,6 @@ void enable_hlt(void) | |||
54 | 54 | ||
55 | EXPORT_SYMBOL(enable_hlt); | 55 | EXPORT_SYMBOL(enable_hlt); |
56 | 56 | ||
57 | /* | ||
58 | * The following aren't currently used. | ||
59 | */ | ||
60 | void (*pm_idle)(void); | ||
61 | |||
62 | extern void default_idle(void); | 57 | extern void default_idle(void); |
63 | 58 | ||
64 | void (*pm_power_off)(void); | 59 | void (*pm_power_off)(void); |
@@ -77,16 +72,12 @@ void cpu_idle (void) | |||
77 | while (1) { | 72 | while (1) { |
78 | rcu_idle_enter(); | 73 | rcu_idle_enter(); |
79 | while (!need_resched()) { | 74 | while (!need_resched()) { |
80 | void (*idle)(void); | ||
81 | /* | 75 | /* |
82 | * Mark this as an RCU critical section so that | 76 | * Mark this as an RCU critical section so that |
83 | * synchronize_kernel() in the unload path waits | 77 | * synchronize_kernel() in the unload path waits |
84 | * for our completion. | 78 | * for our completion. |
85 | */ | 79 | */ |
86 | idle = pm_idle; | 80 | default_idle(); |
87 | if (!idle) | ||
88 | idle = default_idle; | ||
89 | idle(); | ||
90 | } | 81 | } |
91 | rcu_idle_exit(); | 82 | rcu_idle_exit(); |
92 | schedule_preempt_disabled(); | 83 | schedule_preempt_disabled(); |
diff --git a/arch/frv/Kconfig b/arch/frv/Kconfig index 9d262645f667..17df48fc8f44 100644 --- a/arch/frv/Kconfig +++ b/arch/frv/Kconfig | |||
@@ -3,7 +3,6 @@ config FRV | |||
3 | default y | 3 | default y |
4 | select HAVE_IDE | 4 | select HAVE_IDE |
5 | select HAVE_ARCH_TRACEHOOK | 5 | select HAVE_ARCH_TRACEHOOK |
6 | select HAVE_IRQ_WORK | ||
7 | select HAVE_PERF_EVENTS | 6 | select HAVE_PERF_EVENTS |
8 | select HAVE_UID16 | 7 | select HAVE_UID16 |
9 | select HAVE_GENERIC_HARDIRQS | 8 | select HAVE_GENERIC_HARDIRQS |
diff --git a/arch/hexagon/Kconfig b/arch/hexagon/Kconfig index 0744f7d7b1fd..e4decc6b8947 100644 --- a/arch/hexagon/Kconfig +++ b/arch/hexagon/Kconfig | |||
@@ -12,9 +12,7 @@ config HEXAGON | |||
12 | # select ARCH_WANT_OPTIONAL_GPIOLIB | 12 | # select ARCH_WANT_OPTIONAL_GPIOLIB |
13 | # select ARCH_REQUIRE_GPIOLIB | 13 | # select ARCH_REQUIRE_GPIOLIB |
14 | # select HAVE_CLK | 14 | # select HAVE_CLK |
15 | # select IRQ_PER_CPU | ||
16 | # select GENERIC_PENDING_IRQ if SMP | 15 | # select GENERIC_PENDING_IRQ if SMP |
17 | select HAVE_IRQ_WORK | ||
18 | select GENERIC_ATOMIC64 | 16 | select GENERIC_ATOMIC64 |
19 | select HAVE_PERF_EVENTS | 17 | select HAVE_PERF_EVENTS |
20 | select HAVE_GENERIC_HARDIRQS | 18 | select HAVE_GENERIC_HARDIRQS |
diff --git a/arch/ia64/Kconfig b/arch/ia64/Kconfig index 3279646120e3..00c2e88f7755 100644 --- a/arch/ia64/Kconfig +++ b/arch/ia64/Kconfig | |||
@@ -29,7 +29,6 @@ config IA64 | |||
29 | select ARCH_DISCARD_MEMBLOCK | 29 | select ARCH_DISCARD_MEMBLOCK |
30 | select GENERIC_IRQ_PROBE | 30 | select GENERIC_IRQ_PROBE |
31 | select GENERIC_PENDING_IRQ if SMP | 31 | select GENERIC_PENDING_IRQ if SMP |
32 | select IRQ_PER_CPU | ||
33 | select GENERIC_IRQ_SHOW | 32 | select GENERIC_IRQ_SHOW |
34 | select ARCH_WANT_OPTIONAL_GPIOLIB | 33 | select ARCH_WANT_OPTIONAL_GPIOLIB |
35 | select ARCH_HAVE_NMI_SAFE_CMPXCHG | 34 | select ARCH_HAVE_NMI_SAFE_CMPXCHG |
diff --git a/arch/ia64/hp/common/aml_nfw.c b/arch/ia64/hp/common/aml_nfw.c index 6192f7188654..916ffe770bcf 100644 --- a/arch/ia64/hp/common/aml_nfw.c +++ b/arch/ia64/hp/common/aml_nfw.c | |||
@@ -191,7 +191,7 @@ static int aml_nfw_add(struct acpi_device *device) | |||
191 | return aml_nfw_add_global_handler(); | 191 | return aml_nfw_add_global_handler(); |
192 | } | 192 | } |
193 | 193 | ||
194 | static int aml_nfw_remove(struct acpi_device *device, int type) | 194 | static int aml_nfw_remove(struct acpi_device *device) |
195 | { | 195 | { |
196 | return aml_nfw_remove_global_handler(); | 196 | return aml_nfw_remove_global_handler(); |
197 | } | 197 | } |
diff --git a/arch/ia64/include/asm/acpi.h b/arch/ia64/include/asm/acpi.h index 359e68a03ca3..faa1bf0da815 100644 --- a/arch/ia64/include/asm/acpi.h +++ b/arch/ia64/include/asm/acpi.h | |||
@@ -52,10 +52,6 @@ | |||
52 | 52 | ||
53 | /* Asm macros */ | 53 | /* Asm macros */ |
54 | 54 | ||
55 | #define ACPI_ASM_MACROS | ||
56 | #define BREAKPOINT3 | ||
57 | #define ACPI_DISABLE_IRQS() local_irq_disable() | ||
58 | #define ACPI_ENABLE_IRQS() local_irq_enable() | ||
59 | #define ACPI_FLUSH_CPU_CACHE() | 55 | #define ACPI_FLUSH_CPU_CACHE() |
60 | 56 | ||
61 | static inline int | 57 | static inline int |
diff --git a/arch/ia64/include/asm/cputime.h b/arch/ia64/include/asm/cputime.h index 7fcf7f08ab06..e2d3f5baf265 100644 --- a/arch/ia64/include/asm/cputime.h +++ b/arch/ia64/include/asm/cputime.h | |||
@@ -11,99 +11,19 @@ | |||
11 | * as published by the Free Software Foundation; either version | 11 | * as published by the Free Software Foundation; either version |
12 | * 2 of the License, or (at your option) any later version. | 12 | * 2 of the License, or (at your option) any later version. |
13 | * | 13 | * |
14 | * If we have CONFIG_VIRT_CPU_ACCOUNTING, we measure cpu time in nsec. | 14 | * If we have CONFIG_VIRT_CPU_ACCOUNTING_NATIVE, we measure cpu time in nsec. |
15 | * Otherwise we measure cpu time in jiffies using the generic definitions. | 15 | * Otherwise we measure cpu time in jiffies using the generic definitions. |
16 | */ | 16 | */ |
17 | 17 | ||
18 | #ifndef __IA64_CPUTIME_H | 18 | #ifndef __IA64_CPUTIME_H |
19 | #define __IA64_CPUTIME_H | 19 | #define __IA64_CPUTIME_H |
20 | 20 | ||
21 | #ifndef CONFIG_VIRT_CPU_ACCOUNTING | 21 | #ifndef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE |
22 | #include <asm-generic/cputime.h> | 22 | # include <asm-generic/cputime.h> |
23 | #else | 23 | #else |
24 | 24 | # include <asm/processor.h> | |
25 | #include <linux/time.h> | 25 | # include <asm-generic/cputime_nsecs.h> |
26 | #include <linux/jiffies.h> | ||
27 | #include <asm/processor.h> | ||
28 | |||
29 | typedef u64 __nocast cputime_t; | ||
30 | typedef u64 __nocast cputime64_t; | ||
31 | |||
32 | #define cputime_one_jiffy jiffies_to_cputime(1) | ||
33 | |||
34 | /* | ||
35 | * Convert cputime <-> jiffies (HZ) | ||
36 | */ | ||
37 | #define cputime_to_jiffies(__ct) \ | ||
38 | ((__force u64)(__ct) / (NSEC_PER_SEC / HZ)) | ||
39 | #define jiffies_to_cputime(__jif) \ | ||
40 | (__force cputime_t)((__jif) * (NSEC_PER_SEC / HZ)) | ||
41 | #define cputime64_to_jiffies64(__ct) \ | ||
42 | ((__force u64)(__ct) / (NSEC_PER_SEC / HZ)) | ||
43 | #define jiffies64_to_cputime64(__jif) \ | ||
44 | (__force cputime64_t)((__jif) * (NSEC_PER_SEC / HZ)) | ||
45 | |||
46 | /* | ||
47 | * Convert cputime <-> microseconds | ||
48 | */ | ||
49 | #define cputime_to_usecs(__ct) \ | ||
50 | ((__force u64)(__ct) / NSEC_PER_USEC) | ||
51 | #define usecs_to_cputime(__usecs) \ | ||
52 | (__force cputime_t)((__usecs) * NSEC_PER_USEC) | ||
53 | #define usecs_to_cputime64(__usecs) \ | ||
54 | (__force cputime64_t)((__usecs) * NSEC_PER_USEC) | ||
55 | |||
56 | /* | ||
57 | * Convert cputime <-> seconds | ||
58 | */ | ||
59 | #define cputime_to_secs(__ct) \ | ||
60 | ((__force u64)(__ct) / NSEC_PER_SEC) | ||
61 | #define secs_to_cputime(__secs) \ | ||
62 | (__force cputime_t)((__secs) * NSEC_PER_SEC) | ||
63 | |||
64 | /* | ||
65 | * Convert cputime <-> timespec (nsec) | ||
66 | */ | ||
67 | static inline cputime_t timespec_to_cputime(const struct timespec *val) | ||
68 | { | ||
69 | u64 ret = val->tv_sec * NSEC_PER_SEC + val->tv_nsec; | ||
70 | return (__force cputime_t) ret; | ||
71 | } | ||
72 | static inline void cputime_to_timespec(const cputime_t ct, struct timespec *val) | ||
73 | { | ||
74 | val->tv_sec = (__force u64) ct / NSEC_PER_SEC; | ||
75 | val->tv_nsec = (__force u64) ct % NSEC_PER_SEC; | ||
76 | } | ||
77 | |||
78 | /* | ||
79 | * Convert cputime <-> timeval (msec) | ||
80 | */ | ||
81 | static inline cputime_t timeval_to_cputime(struct timeval *val) | ||
82 | { | ||
83 | u64 ret = val->tv_sec * NSEC_PER_SEC + val->tv_usec * NSEC_PER_USEC; | ||
84 | return (__force cputime_t) ret; | ||
85 | } | ||
86 | static inline void cputime_to_timeval(const cputime_t ct, struct timeval *val) | ||
87 | { | ||
88 | val->tv_sec = (__force u64) ct / NSEC_PER_SEC; | ||
89 | val->tv_usec = ((__force u64) ct % NSEC_PER_SEC) / NSEC_PER_USEC; | ||
90 | } | ||
91 | |||
92 | /* | ||
93 | * Convert cputime <-> clock (USER_HZ) | ||
94 | */ | ||
95 | #define cputime_to_clock_t(__ct) \ | ||
96 | ((__force u64)(__ct) / (NSEC_PER_SEC / USER_HZ)) | ||
97 | #define clock_t_to_cputime(__x) \ | ||
98 | (__force cputime_t)((__x) * (NSEC_PER_SEC / USER_HZ)) | ||
99 | |||
100 | /* | ||
101 | * Convert cputime64 to clock. | ||
102 | */ | ||
103 | #define cputime64_to_clock_t(__ct) \ | ||
104 | cputime_to_clock_t((__force cputime_t)__ct) | ||
105 | |||
106 | extern void arch_vtime_task_switch(struct task_struct *tsk); | 26 | extern void arch_vtime_task_switch(struct task_struct *tsk); |
27 | #endif /* CONFIG_VIRT_CPU_ACCOUNTING_NATIVE */ | ||
107 | 28 | ||
108 | #endif /* CONFIG_VIRT_CPU_ACCOUNTING */ | ||
109 | #endif /* __IA64_CPUTIME_H */ | 29 | #endif /* __IA64_CPUTIME_H */ |
diff --git a/arch/ia64/include/asm/thread_info.h b/arch/ia64/include/asm/thread_info.h index ff2ae4136584..020d655ed082 100644 --- a/arch/ia64/include/asm/thread_info.h +++ b/arch/ia64/include/asm/thread_info.h | |||
@@ -31,7 +31,7 @@ struct thread_info { | |||
31 | mm_segment_t addr_limit; /* user-level address space limit */ | 31 | mm_segment_t addr_limit; /* user-level address space limit */ |
32 | int preempt_count; /* 0=premptable, <0=BUG; will also serve as bh-counter */ | 32 | int preempt_count; /* 0=premptable, <0=BUG; will also serve as bh-counter */ |
33 | struct restart_block restart_block; | 33 | struct restart_block restart_block; |
34 | #ifdef CONFIG_VIRT_CPU_ACCOUNTING | 34 | #ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE |
35 | __u64 ac_stamp; | 35 | __u64 ac_stamp; |
36 | __u64 ac_leave; | 36 | __u64 ac_leave; |
37 | __u64 ac_stime; | 37 | __u64 ac_stime; |
@@ -69,7 +69,7 @@ struct thread_info { | |||
69 | #define task_stack_page(tsk) ((void *)(tsk)) | 69 | #define task_stack_page(tsk) ((void *)(tsk)) |
70 | 70 | ||
71 | #define __HAVE_THREAD_FUNCTIONS | 71 | #define __HAVE_THREAD_FUNCTIONS |
72 | #ifdef CONFIG_VIRT_CPU_ACCOUNTING | 72 | #ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE |
73 | #define setup_thread_stack(p, org) \ | 73 | #define setup_thread_stack(p, org) \ |
74 | *task_thread_info(p) = *task_thread_info(org); \ | 74 | *task_thread_info(p) = *task_thread_info(org); \ |
75 | task_thread_info(p)->ac_stime = 0; \ | 75 | task_thread_info(p)->ac_stime = 0; \ |
diff --git a/arch/ia64/include/asm/xen/minstate.h b/arch/ia64/include/asm/xen/minstate.h index c57fa910f2c9..00cf03e0cb82 100644 --- a/arch/ia64/include/asm/xen/minstate.h +++ b/arch/ia64/include/asm/xen/minstate.h | |||
@@ -1,5 +1,5 @@ | |||
1 | 1 | ||
2 | #ifdef CONFIG_VIRT_CPU_ACCOUNTING | 2 | #ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE |
3 | /* read ar.itc in advance, and use it before leaving bank 0 */ | 3 | /* read ar.itc in advance, and use it before leaving bank 0 */ |
4 | #define XEN_ACCOUNT_GET_STAMP \ | 4 | #define XEN_ACCOUNT_GET_STAMP \ |
5 | MOV_FROM_ITC(pUStk, p6, r20, r2); | 5 | MOV_FROM_ITC(pUStk, p6, r20, r2); |
diff --git a/arch/ia64/kernel/asm-offsets.c b/arch/ia64/kernel/asm-offsets.c index a48bd9a9927b..46c9e3007315 100644 --- a/arch/ia64/kernel/asm-offsets.c +++ b/arch/ia64/kernel/asm-offsets.c | |||
@@ -41,7 +41,7 @@ void foo(void) | |||
41 | DEFINE(TI_FLAGS, offsetof(struct thread_info, flags)); | 41 | DEFINE(TI_FLAGS, offsetof(struct thread_info, flags)); |
42 | DEFINE(TI_CPU, offsetof(struct thread_info, cpu)); | 42 | DEFINE(TI_CPU, offsetof(struct thread_info, cpu)); |
43 | DEFINE(TI_PRE_COUNT, offsetof(struct thread_info, preempt_count)); | 43 | DEFINE(TI_PRE_COUNT, offsetof(struct thread_info, preempt_count)); |
44 | #ifdef CONFIG_VIRT_CPU_ACCOUNTING | 44 | #ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE |
45 | DEFINE(TI_AC_STAMP, offsetof(struct thread_info, ac_stamp)); | 45 | DEFINE(TI_AC_STAMP, offsetof(struct thread_info, ac_stamp)); |
46 | DEFINE(TI_AC_LEAVE, offsetof(struct thread_info, ac_leave)); | 46 | DEFINE(TI_AC_LEAVE, offsetof(struct thread_info, ac_leave)); |
47 | DEFINE(TI_AC_STIME, offsetof(struct thread_info, ac_stime)); | 47 | DEFINE(TI_AC_STIME, offsetof(struct thread_info, ac_stime)); |
diff --git a/arch/ia64/kernel/entry.S b/arch/ia64/kernel/entry.S index 6bfd8429ee0f..7a53530f22c2 100644 --- a/arch/ia64/kernel/entry.S +++ b/arch/ia64/kernel/entry.S | |||
@@ -724,7 +724,7 @@ GLOBAL_ENTRY(__paravirt_leave_syscall) | |||
724 | #endif | 724 | #endif |
725 | .global __paravirt_work_processed_syscall; | 725 | .global __paravirt_work_processed_syscall; |
726 | __paravirt_work_processed_syscall: | 726 | __paravirt_work_processed_syscall: |
727 | #ifdef CONFIG_VIRT_CPU_ACCOUNTING | 727 | #ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE |
728 | adds r2=PT(LOADRS)+16,r12 | 728 | adds r2=PT(LOADRS)+16,r12 |
729 | MOV_FROM_ITC(pUStk, p9, r22, r19) // fetch time at leave | 729 | MOV_FROM_ITC(pUStk, p9, r22, r19) // fetch time at leave |
730 | adds r18=TI_FLAGS+IA64_TASK_SIZE,r13 | 730 | adds r18=TI_FLAGS+IA64_TASK_SIZE,r13 |
@@ -762,7 +762,7 @@ __paravirt_work_processed_syscall: | |||
762 | 762 | ||
763 | ld8 r29=[r2],16 // M0|1 load cr.ipsr | 763 | ld8 r29=[r2],16 // M0|1 load cr.ipsr |
764 | ld8 r28=[r3],16 // M0|1 load cr.iip | 764 | ld8 r28=[r3],16 // M0|1 load cr.iip |
765 | #ifdef CONFIG_VIRT_CPU_ACCOUNTING | 765 | #ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE |
766 | (pUStk) add r14=TI_AC_LEAVE+IA64_TASK_SIZE,r13 | 766 | (pUStk) add r14=TI_AC_LEAVE+IA64_TASK_SIZE,r13 |
767 | ;; | 767 | ;; |
768 | ld8 r30=[r2],16 // M0|1 load cr.ifs | 768 | ld8 r30=[r2],16 // M0|1 load cr.ifs |
@@ -793,7 +793,7 @@ __paravirt_work_processed_syscall: | |||
793 | ld8.fill r1=[r3],16 // M0|1 load r1 | 793 | ld8.fill r1=[r3],16 // M0|1 load r1 |
794 | (pUStk) mov r17=1 // A | 794 | (pUStk) mov r17=1 // A |
795 | ;; | 795 | ;; |
796 | #ifdef CONFIG_VIRT_CPU_ACCOUNTING | 796 | #ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE |
797 | (pUStk) st1 [r15]=r17 // M2|3 | 797 | (pUStk) st1 [r15]=r17 // M2|3 |
798 | #else | 798 | #else |
799 | (pUStk) st1 [r14]=r17 // M2|3 | 799 | (pUStk) st1 [r14]=r17 // M2|3 |
@@ -813,7 +813,7 @@ __paravirt_work_processed_syscall: | |||
813 | shr.u r18=r19,16 // I0|1 get byte size of existing "dirty" partition | 813 | shr.u r18=r19,16 // I0|1 get byte size of existing "dirty" partition |
814 | COVER // B add current frame into dirty partition & set cr.ifs | 814 | COVER // B add current frame into dirty partition & set cr.ifs |
815 | ;; | 815 | ;; |
816 | #ifdef CONFIG_VIRT_CPU_ACCOUNTING | 816 | #ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE |
817 | mov r19=ar.bsp // M2 get new backing store pointer | 817 | mov r19=ar.bsp // M2 get new backing store pointer |
818 | st8 [r14]=r22 // M save time at leave | 818 | st8 [r14]=r22 // M save time at leave |
819 | mov f10=f0 // F clear f10 | 819 | mov f10=f0 // F clear f10 |
@@ -948,7 +948,7 @@ GLOBAL_ENTRY(__paravirt_leave_kernel) | |||
948 | adds r16=PT(CR_IPSR)+16,r12 | 948 | adds r16=PT(CR_IPSR)+16,r12 |
949 | adds r17=PT(CR_IIP)+16,r12 | 949 | adds r17=PT(CR_IIP)+16,r12 |
950 | 950 | ||
951 | #ifdef CONFIG_VIRT_CPU_ACCOUNTING | 951 | #ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE |
952 | .pred.rel.mutex pUStk,pKStk | 952 | .pred.rel.mutex pUStk,pKStk |
953 | MOV_FROM_PSR(pKStk, r22, r29) // M2 read PSR now that interrupts are disabled | 953 | MOV_FROM_PSR(pKStk, r22, r29) // M2 read PSR now that interrupts are disabled |
954 | MOV_FROM_ITC(pUStk, p9, r22, r29) // M fetch time at leave | 954 | MOV_FROM_ITC(pUStk, p9, r22, r29) // M fetch time at leave |
@@ -981,7 +981,7 @@ GLOBAL_ENTRY(__paravirt_leave_kernel) | |||
981 | ;; | 981 | ;; |
982 | ld8.fill r12=[r16],16 | 982 | ld8.fill r12=[r16],16 |
983 | ld8.fill r13=[r17],16 | 983 | ld8.fill r13=[r17],16 |
984 | #ifdef CONFIG_VIRT_CPU_ACCOUNTING | 984 | #ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE |
985 | (pUStk) adds r3=TI_AC_LEAVE+IA64_TASK_SIZE,r18 | 985 | (pUStk) adds r3=TI_AC_LEAVE+IA64_TASK_SIZE,r18 |
986 | #else | 986 | #else |
987 | (pUStk) adds r18=IA64_TASK_THREAD_ON_USTACK_OFFSET,r18 | 987 | (pUStk) adds r18=IA64_TASK_THREAD_ON_USTACK_OFFSET,r18 |
@@ -989,7 +989,7 @@ GLOBAL_ENTRY(__paravirt_leave_kernel) | |||
989 | ;; | 989 | ;; |
990 | ld8 r20=[r16],16 // ar.fpsr | 990 | ld8 r20=[r16],16 // ar.fpsr |
991 | ld8.fill r15=[r17],16 | 991 | ld8.fill r15=[r17],16 |
992 | #ifdef CONFIG_VIRT_CPU_ACCOUNTING | 992 | #ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE |
993 | (pUStk) adds r18=IA64_TASK_THREAD_ON_USTACK_OFFSET,r18 // deferred | 993 | (pUStk) adds r18=IA64_TASK_THREAD_ON_USTACK_OFFSET,r18 // deferred |
994 | #endif | 994 | #endif |
995 | ;; | 995 | ;; |
@@ -997,7 +997,7 @@ GLOBAL_ENTRY(__paravirt_leave_kernel) | |||
997 | ld8.fill r2=[r17] | 997 | ld8.fill r2=[r17] |
998 | (pUStk) mov r17=1 | 998 | (pUStk) mov r17=1 |
999 | ;; | 999 | ;; |
1000 | #ifdef CONFIG_VIRT_CPU_ACCOUNTING | 1000 | #ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE |
1001 | // mmi_ : ld8 st1 shr;; mmi_ : st8 st1 shr;; | 1001 | // mmi_ : ld8 st1 shr;; mmi_ : st8 st1 shr;; |
1002 | // mib : mov add br -> mib : ld8 add br | 1002 | // mib : mov add br -> mib : ld8 add br |
1003 | // bbb_ : br nop cover;; mbb_ : mov br cover;; | 1003 | // bbb_ : br nop cover;; mbb_ : mov br cover;; |
diff --git a/arch/ia64/kernel/fsys.S b/arch/ia64/kernel/fsys.S index e662f178b990..c4cd45d97749 100644 --- a/arch/ia64/kernel/fsys.S +++ b/arch/ia64/kernel/fsys.S | |||
@@ -529,7 +529,7 @@ GLOBAL_ENTRY(paravirt_fsys_bubble_down) | |||
529 | nop.i 0 | 529 | nop.i 0 |
530 | ;; | 530 | ;; |
531 | mov ar.rsc=0 // M2 set enforced lazy mode, pl 0, LE, loadrs=0 | 531 | mov ar.rsc=0 // M2 set enforced lazy mode, pl 0, LE, loadrs=0 |
532 | #ifdef CONFIG_VIRT_CPU_ACCOUNTING | 532 | #ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE |
533 | MOV_FROM_ITC(p0, p6, r30, r23) // M get cycle for accounting | 533 | MOV_FROM_ITC(p0, p6, r30, r23) // M get cycle for accounting |
534 | #else | 534 | #else |
535 | nop.m 0 | 535 | nop.m 0 |
@@ -555,7 +555,7 @@ GLOBAL_ENTRY(paravirt_fsys_bubble_down) | |||
555 | cmp.ne pKStk,pUStk=r0,r0 // A set pKStk <- 0, pUStk <- 1 | 555 | cmp.ne pKStk,pUStk=r0,r0 // A set pKStk <- 0, pUStk <- 1 |
556 | br.call.sptk.many b7=ia64_syscall_setup // B | 556 | br.call.sptk.many b7=ia64_syscall_setup // B |
557 | ;; | 557 | ;; |
558 | #ifdef CONFIG_VIRT_CPU_ACCOUNTING | 558 | #ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE |
559 | // mov.m r30=ar.itc is called in advance | 559 | // mov.m r30=ar.itc is called in advance |
560 | add r16=TI_AC_STAMP+IA64_TASK_SIZE,r2 | 560 | add r16=TI_AC_STAMP+IA64_TASK_SIZE,r2 |
561 | add r17=TI_AC_LEAVE+IA64_TASK_SIZE,r2 | 561 | add r17=TI_AC_LEAVE+IA64_TASK_SIZE,r2 |
diff --git a/arch/ia64/kernel/head.S b/arch/ia64/kernel/head.S index 4738ff7bd66a..9be4e497f3d3 100644 --- a/arch/ia64/kernel/head.S +++ b/arch/ia64/kernel/head.S | |||
@@ -1073,7 +1073,7 @@ END(ia64_native_sched_clock) | |||
1073 | sched_clock = ia64_native_sched_clock | 1073 | sched_clock = ia64_native_sched_clock |
1074 | #endif | 1074 | #endif |
1075 | 1075 | ||
1076 | #ifdef CONFIG_VIRT_CPU_ACCOUNTING | 1076 | #ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE |
1077 | GLOBAL_ENTRY(cycle_to_cputime) | 1077 | GLOBAL_ENTRY(cycle_to_cputime) |
1078 | alloc r16=ar.pfs,1,0,0,0 | 1078 | alloc r16=ar.pfs,1,0,0,0 |
1079 | addl r8=THIS_CPU(ia64_cpu_info) + IA64_CPUINFO_NSEC_PER_CYC_OFFSET,r0 | 1079 | addl r8=THIS_CPU(ia64_cpu_info) + IA64_CPUINFO_NSEC_PER_CYC_OFFSET,r0 |
@@ -1091,7 +1091,7 @@ GLOBAL_ENTRY(cycle_to_cputime) | |||
1091 | shrp r8=r9,r8,IA64_NSEC_PER_CYC_SHIFT | 1091 | shrp r8=r9,r8,IA64_NSEC_PER_CYC_SHIFT |
1092 | br.ret.sptk.many rp | 1092 | br.ret.sptk.many rp |
1093 | END(cycle_to_cputime) | 1093 | END(cycle_to_cputime) |
1094 | #endif /* CONFIG_VIRT_CPU_ACCOUNTING */ | 1094 | #endif /* CONFIG_VIRT_CPU_ACCOUNTING_NATIVE */ |
1095 | 1095 | ||
1096 | #ifdef CONFIG_IA64_BRL_EMU | 1096 | #ifdef CONFIG_IA64_BRL_EMU |
1097 | 1097 | ||
diff --git a/arch/ia64/kernel/ivt.S b/arch/ia64/kernel/ivt.S index fa25689fc453..689ffcaa284e 100644 --- a/arch/ia64/kernel/ivt.S +++ b/arch/ia64/kernel/ivt.S | |||
@@ -784,7 +784,7 @@ ENTRY(break_fault) | |||
784 | 784 | ||
785 | (p8) adds r28=16,r28 // A switch cr.iip to next bundle | 785 | (p8) adds r28=16,r28 // A switch cr.iip to next bundle |
786 | (p9) adds r8=1,r8 // A increment ei to next slot | 786 | (p9) adds r8=1,r8 // A increment ei to next slot |
787 | #ifdef CONFIG_VIRT_CPU_ACCOUNTING | 787 | #ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE |
788 | ;; | 788 | ;; |
789 | mov b6=r30 // I0 setup syscall handler branch reg early | 789 | mov b6=r30 // I0 setup syscall handler branch reg early |
790 | #else | 790 | #else |
@@ -801,7 +801,7 @@ ENTRY(break_fault) | |||
801 | // | 801 | // |
802 | /////////////////////////////////////////////////////////////////////// | 802 | /////////////////////////////////////////////////////////////////////// |
803 | st1 [r16]=r0 // M2|3 clear current->thread.on_ustack flag | 803 | st1 [r16]=r0 // M2|3 clear current->thread.on_ustack flag |
804 | #ifdef CONFIG_VIRT_CPU_ACCOUNTING | 804 | #ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE |
805 | MOV_FROM_ITC(p0, p14, r30, r18) // M get cycle for accounting | 805 | MOV_FROM_ITC(p0, p14, r30, r18) // M get cycle for accounting |
806 | #else | 806 | #else |
807 | mov b6=r30 // I0 setup syscall handler branch reg early | 807 | mov b6=r30 // I0 setup syscall handler branch reg early |
@@ -817,7 +817,7 @@ ENTRY(break_fault) | |||
817 | cmp.eq p14,p0=r9,r0 // A are syscalls being traced/audited? | 817 | cmp.eq p14,p0=r9,r0 // A are syscalls being traced/audited? |
818 | br.call.sptk.many b7=ia64_syscall_setup // B | 818 | br.call.sptk.many b7=ia64_syscall_setup // B |
819 | 1: | 819 | 1: |
820 | #ifdef CONFIG_VIRT_CPU_ACCOUNTING | 820 | #ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE |
821 | // mov.m r30=ar.itc is called in advance, and r13 is current | 821 | // mov.m r30=ar.itc is called in advance, and r13 is current |
822 | add r16=TI_AC_STAMP+IA64_TASK_SIZE,r13 // A | 822 | add r16=TI_AC_STAMP+IA64_TASK_SIZE,r13 // A |
823 | add r17=TI_AC_LEAVE+IA64_TASK_SIZE,r13 // A | 823 | add r17=TI_AC_LEAVE+IA64_TASK_SIZE,r13 // A |
@@ -1043,7 +1043,7 @@ END(ia64_syscall_setup) | |||
1043 | DBG_FAULT(16) | 1043 | DBG_FAULT(16) |
1044 | FAULT(16) | 1044 | FAULT(16) |
1045 | 1045 | ||
1046 | #if defined(CONFIG_VIRT_CPU_ACCOUNTING) && defined(__IA64_ASM_PARAVIRTUALIZED_NATIVE) | 1046 | #if defined(CONFIG_VIRT_CPU_ACCOUNTING_NATIVE) && defined(__IA64_ASM_PARAVIRTUALIZED_NATIVE) |
1047 | /* | 1047 | /* |
1048 | * There is no particular reason for this code to be here, other than | 1048 | * There is no particular reason for this code to be here, other than |
1049 | * that there happens to be space here that would go unused otherwise. | 1049 | * that there happens to be space here that would go unused otherwise. |
diff --git a/arch/ia64/kernel/minstate.h b/arch/ia64/kernel/minstate.h index d56753a11636..cc82a7d744c9 100644 --- a/arch/ia64/kernel/minstate.h +++ b/arch/ia64/kernel/minstate.h | |||
@@ -4,7 +4,7 @@ | |||
4 | #include "entry.h" | 4 | #include "entry.h" |
5 | #include "paravirt_inst.h" | 5 | #include "paravirt_inst.h" |
6 | 6 | ||
7 | #ifdef CONFIG_VIRT_CPU_ACCOUNTING | 7 | #ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE |
8 | /* read ar.itc in advance, and use it before leaving bank 0 */ | 8 | /* read ar.itc in advance, and use it before leaving bank 0 */ |
9 | #define ACCOUNT_GET_STAMP \ | 9 | #define ACCOUNT_GET_STAMP \ |
10 | (pUStk) mov.m r20=ar.itc; | 10 | (pUStk) mov.m r20=ar.itc; |
diff --git a/arch/ia64/kernel/process.c b/arch/ia64/kernel/process.c index 31360cbbd5f8..e34f565f595a 100644 --- a/arch/ia64/kernel/process.c +++ b/arch/ia64/kernel/process.c | |||
@@ -57,8 +57,6 @@ void (*ia64_mark_idle)(int); | |||
57 | 57 | ||
58 | unsigned long boot_option_idle_override = IDLE_NO_OVERRIDE; | 58 | unsigned long boot_option_idle_override = IDLE_NO_OVERRIDE; |
59 | EXPORT_SYMBOL(boot_option_idle_override); | 59 | EXPORT_SYMBOL(boot_option_idle_override); |
60 | void (*pm_idle) (void); | ||
61 | EXPORT_SYMBOL(pm_idle); | ||
62 | void (*pm_power_off) (void); | 60 | void (*pm_power_off) (void); |
63 | EXPORT_SYMBOL(pm_power_off); | 61 | EXPORT_SYMBOL(pm_power_off); |
64 | 62 | ||
@@ -301,7 +299,6 @@ cpu_idle (void) | |||
301 | if (mark_idle) | 299 | if (mark_idle) |
302 | (*mark_idle)(1); | 300 | (*mark_idle)(1); |
303 | 301 | ||
304 | idle = pm_idle; | ||
305 | if (!idle) | 302 | if (!idle) |
306 | idle = default_idle; | 303 | idle = default_idle; |
307 | (*idle)(); | 304 | (*idle)(); |
diff --git a/arch/ia64/kernel/setup.c b/arch/ia64/kernel/setup.c index aaefd9b94f2f..2029cc0d2fc6 100644 --- a/arch/ia64/kernel/setup.c +++ b/arch/ia64/kernel/setup.c | |||
@@ -1051,7 +1051,6 @@ cpu_init (void) | |||
1051 | max_num_phys_stacked = num_phys_stacked; | 1051 | max_num_phys_stacked = num_phys_stacked; |
1052 | } | 1052 | } |
1053 | platform_cpu_init(); | 1053 | platform_cpu_init(); |
1054 | pm_idle = default_idle; | ||
1055 | } | 1054 | } |
1056 | 1055 | ||
1057 | void __init | 1056 | void __init |
diff --git a/arch/ia64/kernel/time.c b/arch/ia64/kernel/time.c index 88a794536bc0..fbaac1afb844 100644 --- a/arch/ia64/kernel/time.c +++ b/arch/ia64/kernel/time.c | |||
@@ -77,7 +77,7 @@ static struct clocksource clocksource_itc = { | |||
77 | }; | 77 | }; |
78 | static struct clocksource *itc_clocksource; | 78 | static struct clocksource *itc_clocksource; |
79 | 79 | ||
80 | #ifdef CONFIG_VIRT_CPU_ACCOUNTING | 80 | #ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE |
81 | 81 | ||
82 | #include <linux/kernel_stat.h> | 82 | #include <linux/kernel_stat.h> |
83 | 83 | ||
@@ -136,13 +136,14 @@ void vtime_account_system(struct task_struct *tsk) | |||
136 | 136 | ||
137 | account_system_time(tsk, 0, delta, delta); | 137 | account_system_time(tsk, 0, delta, delta); |
138 | } | 138 | } |
139 | EXPORT_SYMBOL_GPL(vtime_account_system); | ||
139 | 140 | ||
140 | void vtime_account_idle(struct task_struct *tsk) | 141 | void vtime_account_idle(struct task_struct *tsk) |
141 | { | 142 | { |
142 | account_idle_time(vtime_delta(tsk)); | 143 | account_idle_time(vtime_delta(tsk)); |
143 | } | 144 | } |
144 | 145 | ||
145 | #endif /* CONFIG_VIRT_CPU_ACCOUNTING */ | 146 | #endif /* CONFIG_VIRT_CPU_ACCOUNTING_NATIVE */ |
146 | 147 | ||
147 | static irqreturn_t | 148 | static irqreturn_t |
148 | timer_interrupt (int irq, void *dev_id) | 149 | timer_interrupt (int irq, void *dev_id) |
diff --git a/arch/m32r/kernel/process.c b/arch/m32r/kernel/process.c index 765d0f57c787..bde899e155d3 100644 --- a/arch/m32r/kernel/process.c +++ b/arch/m32r/kernel/process.c | |||
@@ -44,36 +44,10 @@ unsigned long thread_saved_pc(struct task_struct *tsk) | |||
44 | return tsk->thread.lr; | 44 | return tsk->thread.lr; |
45 | } | 45 | } |
46 | 46 | ||
47 | /* | ||
48 | * Powermanagement idle function, if any.. | ||
49 | */ | ||
50 | static void (*pm_idle)(void) = NULL; | ||
51 | |||
52 | void (*pm_power_off)(void) = NULL; | 47 | void (*pm_power_off)(void) = NULL; |
53 | EXPORT_SYMBOL(pm_power_off); | 48 | EXPORT_SYMBOL(pm_power_off); |
54 | 49 | ||
55 | /* | 50 | /* |
56 | * We use this is we don't have any better | ||
57 | * idle routine.. | ||
58 | */ | ||
59 | static void default_idle(void) | ||
60 | { | ||
61 | /* M32R_FIXME: Please use "cpu_sleep" mode. */ | ||
62 | cpu_relax(); | ||
63 | } | ||
64 | |||
65 | /* | ||
66 | * On SMP it's slightly faster (but much more power-consuming!) | ||
67 | * to poll the ->work.need_resched flag instead of waiting for the | ||
68 | * cross-CPU IPI to arrive. Use this option with caution. | ||
69 | */ | ||
70 | static void poll_idle (void) | ||
71 | { | ||
72 | /* M32R_FIXME */ | ||
73 | cpu_relax(); | ||
74 | } | ||
75 | |||
76 | /* | ||
77 | * The idle thread. There's no useful work to be | 51 | * The idle thread. There's no useful work to be |
78 | * done, so just try to conserve power and have a | 52 | * done, so just try to conserve power and have a |
79 | * low exit latency (ie sit in a loop waiting for | 53 | * low exit latency (ie sit in a loop waiting for |
@@ -84,14 +58,8 @@ void cpu_idle (void) | |||
84 | /* endless idle loop with no priority at all */ | 58 | /* endless idle loop with no priority at all */ |
85 | while (1) { | 59 | while (1) { |
86 | rcu_idle_enter(); | 60 | rcu_idle_enter(); |
87 | while (!need_resched()) { | 61 | while (!need_resched()) |
88 | void (*idle)(void) = pm_idle; | 62 | cpu_relax(); |
89 | |||
90 | if (!idle) | ||
91 | idle = default_idle; | ||
92 | |||
93 | idle(); | ||
94 | } | ||
95 | rcu_idle_exit(); | 63 | rcu_idle_exit(); |
96 | schedule_preempt_disabled(); | 64 | schedule_preempt_disabled(); |
97 | } | 65 | } |
@@ -120,21 +88,6 @@ void machine_power_off(void) | |||
120 | /* M32R_FIXME */ | 88 | /* M32R_FIXME */ |
121 | } | 89 | } |
122 | 90 | ||
123 | static int __init idle_setup (char *str) | ||
124 | { | ||
125 | if (!strncmp(str, "poll", 4)) { | ||
126 | printk("using poll in idle threads.\n"); | ||
127 | pm_idle = poll_idle; | ||
128 | } else if (!strncmp(str, "sleep", 4)) { | ||
129 | printk("using sleep in idle threads.\n"); | ||
130 | pm_idle = default_idle; | ||
131 | } | ||
132 | |||
133 | return 1; | ||
134 | } | ||
135 | |||
136 | __setup("idle=", idle_setup); | ||
137 | |||
138 | void show_regs(struct pt_regs * regs) | 91 | void show_regs(struct pt_regs * regs) |
139 | { | 92 | { |
140 | printk("\n"); | 93 | printk("\n"); |
diff --git a/arch/m68k/include/asm/dma-mapping.h b/arch/m68k/include/asm/dma-mapping.h index 292805f0762e..05aa53594d49 100644 --- a/arch/m68k/include/asm/dma-mapping.h +++ b/arch/m68k/include/asm/dma-mapping.h | |||
@@ -5,7 +5,6 @@ | |||
5 | 5 | ||
6 | struct scatterlist; | 6 | struct scatterlist; |
7 | 7 | ||
8 | #ifndef CONFIG_MMU_SUN3 | ||
9 | static inline int dma_supported(struct device *dev, u64 mask) | 8 | static inline int dma_supported(struct device *dev, u64 mask) |
10 | { | 9 | { |
11 | return 1; | 10 | return 1; |
@@ -111,10 +110,6 @@ static inline int dma_mapping_error(struct device *dev, dma_addr_t handle) | |||
111 | return 0; | 110 | return 0; |
112 | } | 111 | } |
113 | 112 | ||
114 | #else | ||
115 | #include <asm-generic/dma-mapping-broken.h> | ||
116 | #endif | ||
117 | |||
118 | /* drivers/base/dma-mapping.c */ | 113 | /* drivers/base/dma-mapping.c */ |
119 | extern int dma_common_mmap(struct device *dev, struct vm_area_struct *vma, | 114 | extern int dma_common_mmap(struct device *dev, struct vm_area_struct *vma, |
120 | void *cpu_addr, dma_addr_t dma_addr, size_t size); | 115 | void *cpu_addr, dma_addr_t dma_addr, size_t size); |
diff --git a/arch/m68k/kernel/Makefile b/arch/m68k/kernel/Makefile index 068ad49210d6..655347d80780 100644 --- a/arch/m68k/kernel/Makefile +++ b/arch/m68k/kernel/Makefile | |||
@@ -20,7 +20,5 @@ obj-$(CONFIG_MMU_MOTOROLA) += ints.o vectors.o | |||
20 | obj-$(CONFIG_MMU_SUN3) += ints.o vectors.o | 20 | obj-$(CONFIG_MMU_SUN3) += ints.o vectors.o |
21 | obj-$(CONFIG_PCI) += pcibios.o | 21 | obj-$(CONFIG_PCI) += pcibios.o |
22 | 22 | ||
23 | ifndef CONFIG_MMU_SUN3 | 23 | obj-$(CONFIG_HAS_DMA) += dma.o |
24 | obj-y += dma.o | ||
25 | endif | ||
26 | 24 | ||
diff --git a/arch/microblaze/kernel/process.c b/arch/microblaze/kernel/process.c index a5b74f729e5b..6ff2dcff3410 100644 --- a/arch/microblaze/kernel/process.c +++ b/arch/microblaze/kernel/process.c | |||
@@ -41,7 +41,6 @@ void show_regs(struct pt_regs *regs) | |||
41 | regs->msr, regs->ear, regs->esr, regs->fsr); | 41 | regs->msr, regs->ear, regs->esr, regs->fsr); |
42 | } | 42 | } |
43 | 43 | ||
44 | void (*pm_idle)(void); | ||
45 | void (*pm_power_off)(void) = NULL; | 44 | void (*pm_power_off)(void) = NULL; |
46 | EXPORT_SYMBOL(pm_power_off); | 45 | EXPORT_SYMBOL(pm_power_off); |
47 | 46 | ||
@@ -98,8 +97,6 @@ void cpu_idle(void) | |||
98 | 97 | ||
99 | /* endless idle loop with no priority at all */ | 98 | /* endless idle loop with no priority at all */ |
100 | while (1) { | 99 | while (1) { |
101 | void (*idle)(void) = pm_idle; | ||
102 | |||
103 | if (!idle) | 100 | if (!idle) |
104 | idle = default_idle; | 101 | idle = default_idle; |
105 | 102 | ||
diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig index 2ac626ab9d43..9becc44d9d7a 100644 --- a/arch/mips/Kconfig +++ b/arch/mips/Kconfig | |||
@@ -4,7 +4,6 @@ config MIPS | |||
4 | select HAVE_GENERIC_DMA_COHERENT | 4 | select HAVE_GENERIC_DMA_COHERENT |
5 | select HAVE_IDE | 5 | select HAVE_IDE |
6 | select HAVE_OPROFILE | 6 | select HAVE_OPROFILE |
7 | select HAVE_IRQ_WORK | ||
8 | select HAVE_PERF_EVENTS | 7 | select HAVE_PERF_EVENTS |
9 | select PERF_USE_VMALLOC | 8 | select PERF_USE_VMALLOC |
10 | select HAVE_ARCH_KGDB | 9 | select HAVE_ARCH_KGDB |
@@ -2161,7 +2160,6 @@ source "mm/Kconfig" | |||
2161 | config SMP | 2160 | config SMP |
2162 | bool "Multi-Processing support" | 2161 | bool "Multi-Processing support" |
2163 | depends on SYS_SUPPORTS_SMP | 2162 | depends on SYS_SUPPORTS_SMP |
2164 | select IRQ_PER_CPU | ||
2165 | select USE_GENERIC_SMP_HELPERS | 2163 | select USE_GENERIC_SMP_HELPERS |
2166 | help | 2164 | help |
2167 | This enables support for systems with more than one CPU. If you have | 2165 | This enables support for systems with more than one CPU. If you have |
diff --git a/arch/mn10300/kernel/process.c b/arch/mn10300/kernel/process.c index eb09f5a552ff..84f4e97e3074 100644 --- a/arch/mn10300/kernel/process.c +++ b/arch/mn10300/kernel/process.c | |||
@@ -37,12 +37,6 @@ | |||
37 | #include "internal.h" | 37 | #include "internal.h" |
38 | 38 | ||
39 | /* | 39 | /* |
40 | * power management idle function, if any.. | ||
41 | */ | ||
42 | void (*pm_idle)(void); | ||
43 | EXPORT_SYMBOL(pm_idle); | ||
44 | |||
45 | /* | ||
46 | * return saved PC of a blocked thread. | 40 | * return saved PC of a blocked thread. |
47 | */ | 41 | */ |
48 | unsigned long thread_saved_pc(struct task_struct *tsk) | 42 | unsigned long thread_saved_pc(struct task_struct *tsk) |
@@ -113,7 +107,6 @@ void cpu_idle(void) | |||
113 | void (*idle)(void); | 107 | void (*idle)(void); |
114 | 108 | ||
115 | smp_rmb(); | 109 | smp_rmb(); |
116 | idle = pm_idle; | ||
117 | if (!idle) { | 110 | if (!idle) { |
118 | #if defined(CONFIG_SMP) && !defined(CONFIG_HOTPLUG_CPU) | 111 | #if defined(CONFIG_SMP) && !defined(CONFIG_HOTPLUG_CPU) |
119 | idle = poll_idle; | 112 | idle = poll_idle; |
diff --git a/arch/openrisc/kernel/idle.c b/arch/openrisc/kernel/idle.c index 7d618feb1b72..5e8a3b6d6bc6 100644 --- a/arch/openrisc/kernel/idle.c +++ b/arch/openrisc/kernel/idle.c | |||
@@ -39,11 +39,6 @@ | |||
39 | 39 | ||
40 | void (*powersave) (void) = NULL; | 40 | void (*powersave) (void) = NULL; |
41 | 41 | ||
42 | static inline void pm_idle(void) | ||
43 | { | ||
44 | barrier(); | ||
45 | } | ||
46 | |||
47 | void cpu_idle(void) | 42 | void cpu_idle(void) |
48 | { | 43 | { |
49 | set_thread_flag(TIF_POLLING_NRFLAG); | 44 | set_thread_flag(TIF_POLLING_NRFLAG); |
diff --git a/arch/parisc/Kconfig b/arch/parisc/Kconfig index b77feffbadea..a32e34ecda9e 100644 --- a/arch/parisc/Kconfig +++ b/arch/parisc/Kconfig | |||
@@ -9,14 +9,12 @@ config PARISC | |||
9 | select RTC_DRV_GENERIC | 9 | select RTC_DRV_GENERIC |
10 | select INIT_ALL_POSSIBLE | 10 | select INIT_ALL_POSSIBLE |
11 | select BUG | 11 | select BUG |
12 | select HAVE_IRQ_WORK | ||
13 | select HAVE_PERF_EVENTS | 12 | select HAVE_PERF_EVENTS |
14 | select GENERIC_ATOMIC64 if !64BIT | 13 | select GENERIC_ATOMIC64 if !64BIT |
15 | select HAVE_GENERIC_HARDIRQS | 14 | select HAVE_GENERIC_HARDIRQS |
16 | select BROKEN_RODATA | 15 | select BROKEN_RODATA |
17 | select GENERIC_IRQ_PROBE | 16 | select GENERIC_IRQ_PROBE |
18 | select GENERIC_PCI_IOMAP | 17 | select GENERIC_PCI_IOMAP |
19 | select IRQ_PER_CPU | ||
20 | select ARCH_HAVE_NMI_SAFE_CMPXCHG | 18 | select ARCH_HAVE_NMI_SAFE_CMPXCHG |
21 | select GENERIC_SMP_IDLE_THREAD | 19 | select GENERIC_SMP_IDLE_THREAD |
22 | select GENERIC_STRNCPY_FROM_USER | 20 | select GENERIC_STRNCPY_FROM_USER |
diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig index 17903f1f356b..561ccca7b1a7 100644 --- a/arch/powerpc/Kconfig +++ b/arch/powerpc/Kconfig | |||
@@ -118,14 +118,12 @@ config PPC | |||
118 | select HAVE_SYSCALL_WRAPPERS if PPC64 | 118 | select HAVE_SYSCALL_WRAPPERS if PPC64 |
119 | select GENERIC_ATOMIC64 if PPC32 | 119 | select GENERIC_ATOMIC64 if PPC32 |
120 | select ARCH_HAS_ATOMIC64_DEC_IF_POSITIVE | 120 | select ARCH_HAS_ATOMIC64_DEC_IF_POSITIVE |
121 | select HAVE_IRQ_WORK | ||
122 | select HAVE_PERF_EVENTS | 121 | select HAVE_PERF_EVENTS |
123 | select HAVE_REGS_AND_STACK_ACCESS_API | 122 | select HAVE_REGS_AND_STACK_ACCESS_API |
124 | select HAVE_HW_BREAKPOINT if PERF_EVENTS && PPC_BOOK3S_64 | 123 | select HAVE_HW_BREAKPOINT if PERF_EVENTS && PPC_BOOK3S_64 |
125 | select HAVE_GENERIC_HARDIRQS | 124 | select HAVE_GENERIC_HARDIRQS |
126 | select ARCH_WANT_IPC_PARSE_VERSION | 125 | select ARCH_WANT_IPC_PARSE_VERSION |
127 | select SPARSE_IRQ | 126 | select SPARSE_IRQ |
128 | select IRQ_PER_CPU | ||
129 | select IRQ_DOMAIN | 127 | select IRQ_DOMAIN |
130 | select GENERIC_IRQ_SHOW | 128 | select GENERIC_IRQ_SHOW |
131 | select GENERIC_IRQ_SHOW_LEVEL | 129 | select GENERIC_IRQ_SHOW_LEVEL |
diff --git a/arch/powerpc/configs/chroma_defconfig b/arch/powerpc/configs/chroma_defconfig index 29bb11ec6c64..4f35fc462385 100644 --- a/arch/powerpc/configs/chroma_defconfig +++ b/arch/powerpc/configs/chroma_defconfig | |||
@@ -1,6 +1,6 @@ | |||
1 | CONFIG_PPC64=y | 1 | CONFIG_PPC64=y |
2 | CONFIG_PPC_BOOK3E_64=y | 2 | CONFIG_PPC_BOOK3E_64=y |
3 | # CONFIG_VIRT_CPU_ACCOUNTING is not set | 3 | # CONFIG_VIRT_CPU_ACCOUNTING_NATIVE is not set |
4 | CONFIG_SMP=y | 4 | CONFIG_SMP=y |
5 | CONFIG_NR_CPUS=256 | 5 | CONFIG_NR_CPUS=256 |
6 | CONFIG_EXPERIMENTAL=y | 6 | CONFIG_EXPERIMENTAL=y |
diff --git a/arch/powerpc/configs/corenet64_smp_defconfig b/arch/powerpc/configs/corenet64_smp_defconfig index 88fa5c46f66f..f7df8362911f 100644 --- a/arch/powerpc/configs/corenet64_smp_defconfig +++ b/arch/powerpc/configs/corenet64_smp_defconfig | |||
@@ -1,6 +1,6 @@ | |||
1 | CONFIG_PPC64=y | 1 | CONFIG_PPC64=y |
2 | CONFIG_PPC_BOOK3E_64=y | 2 | CONFIG_PPC_BOOK3E_64=y |
3 | # CONFIG_VIRT_CPU_ACCOUNTING is not set | 3 | # CONFIG_VIRT_CPU_ACCOUNTING_NATIVE is not set |
4 | CONFIG_SMP=y | 4 | CONFIG_SMP=y |
5 | CONFIG_NR_CPUS=2 | 5 | CONFIG_NR_CPUS=2 |
6 | CONFIG_EXPERIMENTAL=y | 6 | CONFIG_EXPERIMENTAL=y |
diff --git a/arch/powerpc/configs/pasemi_defconfig b/arch/powerpc/configs/pasemi_defconfig index 840a2c2d0430..bcedeea0df89 100644 --- a/arch/powerpc/configs/pasemi_defconfig +++ b/arch/powerpc/configs/pasemi_defconfig | |||
@@ -1,6 +1,6 @@ | |||
1 | CONFIG_PPC64=y | 1 | CONFIG_PPC64=y |
2 | CONFIG_ALTIVEC=y | 2 | CONFIG_ALTIVEC=y |
3 | # CONFIG_VIRT_CPU_ACCOUNTING is not set | 3 | # CONFIG_VIRT_CPU_ACCOUNTING_NATIVE is not set |
4 | CONFIG_SMP=y | 4 | CONFIG_SMP=y |
5 | CONFIG_NR_CPUS=2 | 5 | CONFIG_NR_CPUS=2 |
6 | CONFIG_EXPERIMENTAL=y | 6 | CONFIG_EXPERIMENTAL=y |
diff --git a/arch/powerpc/include/asm/cputime.h b/arch/powerpc/include/asm/cputime.h index 483733bd06d4..607559ab271f 100644 --- a/arch/powerpc/include/asm/cputime.h +++ b/arch/powerpc/include/asm/cputime.h | |||
@@ -8,7 +8,7 @@ | |||
8 | * as published by the Free Software Foundation; either version | 8 | * as published by the Free Software Foundation; either version |
9 | * 2 of the License, or (at your option) any later version. | 9 | * 2 of the License, or (at your option) any later version. |
10 | * | 10 | * |
11 | * If we have CONFIG_VIRT_CPU_ACCOUNTING, we measure cpu time in | 11 | * If we have CONFIG_VIRT_CPU_ACCOUNTING_NATIVE, we measure cpu time in |
12 | * the same units as the timebase. Otherwise we measure cpu time | 12 | * the same units as the timebase. Otherwise we measure cpu time |
13 | * in jiffies using the generic definitions. | 13 | * in jiffies using the generic definitions. |
14 | */ | 14 | */ |
@@ -16,7 +16,7 @@ | |||
16 | #ifndef __POWERPC_CPUTIME_H | 16 | #ifndef __POWERPC_CPUTIME_H |
17 | #define __POWERPC_CPUTIME_H | 17 | #define __POWERPC_CPUTIME_H |
18 | 18 | ||
19 | #ifndef CONFIG_VIRT_CPU_ACCOUNTING | 19 | #ifndef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE |
20 | #include <asm-generic/cputime.h> | 20 | #include <asm-generic/cputime.h> |
21 | #ifdef __KERNEL__ | 21 | #ifdef __KERNEL__ |
22 | static inline void setup_cputime_one_jiffy(void) { } | 22 | static inline void setup_cputime_one_jiffy(void) { } |
@@ -231,5 +231,5 @@ static inline cputime_t clock_t_to_cputime(const unsigned long clk) | |||
231 | static inline void arch_vtime_task_switch(struct task_struct *tsk) { } | 231 | static inline void arch_vtime_task_switch(struct task_struct *tsk) { } |
232 | 232 | ||
233 | #endif /* __KERNEL__ */ | 233 | #endif /* __KERNEL__ */ |
234 | #endif /* CONFIG_VIRT_CPU_ACCOUNTING */ | 234 | #endif /* CONFIG_VIRT_CPU_ACCOUNTING_NATIVE */ |
235 | #endif /* __POWERPC_CPUTIME_H */ | 235 | #endif /* __POWERPC_CPUTIME_H */ |
diff --git a/arch/powerpc/include/asm/lppaca.h b/arch/powerpc/include/asm/lppaca.h index 531fe0c3108f..b1e7f2af1016 100644 --- a/arch/powerpc/include/asm/lppaca.h +++ b/arch/powerpc/include/asm/lppaca.h | |||
@@ -145,7 +145,7 @@ struct dtl_entry { | |||
145 | extern struct kmem_cache *dtl_cache; | 145 | extern struct kmem_cache *dtl_cache; |
146 | 146 | ||
147 | /* | 147 | /* |
148 | * When CONFIG_VIRT_CPU_ACCOUNTING = y, the cpu accounting code controls | 148 | * When CONFIG_VIRT_CPU_ACCOUNTING_NATIVE = y, the cpu accounting code controls |
149 | * reading from the dispatch trace log. If other code wants to consume | 149 | * reading from the dispatch trace log. If other code wants to consume |
150 | * DTL entries, it can set this pointer to a function that will get | 150 | * DTL entries, it can set this pointer to a function that will get |
151 | * called once for each DTL entry that gets processed. | 151 | * called once for each DTL entry that gets processed. |
diff --git a/arch/powerpc/include/asm/perf_event_server.h b/arch/powerpc/include/asm/perf_event_server.h index 9710be3a2d17..136bba62efa4 100644 --- a/arch/powerpc/include/asm/perf_event_server.h +++ b/arch/powerpc/include/asm/perf_event_server.h | |||
@@ -11,6 +11,7 @@ | |||
11 | 11 | ||
12 | #include <linux/types.h> | 12 | #include <linux/types.h> |
13 | #include <asm/hw_irq.h> | 13 | #include <asm/hw_irq.h> |
14 | #include <linux/device.h> | ||
14 | 15 | ||
15 | #define MAX_HWEVENTS 8 | 16 | #define MAX_HWEVENTS 8 |
16 | #define MAX_EVENT_ALTERNATIVES 8 | 17 | #define MAX_EVENT_ALTERNATIVES 8 |
@@ -35,6 +36,7 @@ struct power_pmu { | |||
35 | void (*disable_pmc)(unsigned int pmc, unsigned long mmcr[]); | 36 | void (*disable_pmc)(unsigned int pmc, unsigned long mmcr[]); |
36 | int (*limited_pmc_event)(u64 event_id); | 37 | int (*limited_pmc_event)(u64 event_id); |
37 | u32 flags; | 38 | u32 flags; |
39 | const struct attribute_group **attr_groups; | ||
38 | int n_generic; | 40 | int n_generic; |
39 | int *generic_events; | 41 | int *generic_events; |
40 | int (*cache_events)[PERF_COUNT_HW_CACHE_MAX] | 42 | int (*cache_events)[PERF_COUNT_HW_CACHE_MAX] |
@@ -109,3 +111,27 @@ extern unsigned long perf_instruction_pointer(struct pt_regs *regs); | |||
109 | * If an event_id is not subject to the constraint expressed by a particular | 111 | * If an event_id is not subject to the constraint expressed by a particular |
110 | * field, then it will have 0 in both the mask and value for that field. | 112 | * field, then it will have 0 in both the mask and value for that field. |
111 | */ | 113 | */ |
114 | |||
115 | extern ssize_t power_events_sysfs_show(struct device *dev, | ||
116 | struct device_attribute *attr, char *page); | ||
117 | |||
118 | /* | ||
119 | * EVENT_VAR() is same as PMU_EVENT_VAR with a suffix. | ||
120 | * | ||
121 | * Having a suffix allows us to have aliases in sysfs - eg: the generic | ||
122 | * event 'cpu-cycles' can have two entries in sysfs: 'cpu-cycles' and | ||
123 | * 'PM_CYC' where the latter is the name by which the event is known in | ||
124 | * POWER CPU specification. | ||
125 | */ | ||
126 | #define EVENT_VAR(_id, _suffix) event_attr_##_id##_suffix | ||
127 | #define EVENT_PTR(_id, _suffix) &EVENT_VAR(_id, _suffix).attr.attr | ||
128 | |||
129 | #define EVENT_ATTR(_name, _id, _suffix) \ | ||
130 | PMU_EVENT_ATTR(_name, EVENT_VAR(_id, _suffix), PME_PM_##_id, \ | ||
131 | power_events_sysfs_show) | ||
132 | |||
133 | #define GENERIC_EVENT_ATTR(_name, _id) EVENT_ATTR(_name, _id, _g) | ||
134 | #define GENERIC_EVENT_PTR(_id) EVENT_PTR(_id, _g) | ||
135 | |||
136 | #define POWER_EVENT_ATTR(_name, _id) EVENT_ATTR(PM_##_name, _id, _p) | ||
137 | #define POWER_EVENT_PTR(_id) EVENT_PTR(_id, _p) | ||
diff --git a/arch/powerpc/include/asm/ppc_asm.h b/arch/powerpc/include/asm/ppc_asm.h index ea2a86e8ff95..2d0e1f5d8339 100644 --- a/arch/powerpc/include/asm/ppc_asm.h +++ b/arch/powerpc/include/asm/ppc_asm.h | |||
@@ -24,7 +24,7 @@ | |||
24 | * user_time and system_time fields in the paca. | 24 | * user_time and system_time fields in the paca. |
25 | */ | 25 | */ |
26 | 26 | ||
27 | #ifndef CONFIG_VIRT_CPU_ACCOUNTING | 27 | #ifndef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE |
28 | #define ACCOUNT_CPU_USER_ENTRY(ra, rb) | 28 | #define ACCOUNT_CPU_USER_ENTRY(ra, rb) |
29 | #define ACCOUNT_CPU_USER_EXIT(ra, rb) | 29 | #define ACCOUNT_CPU_USER_EXIT(ra, rb) |
30 | #define ACCOUNT_STOLEN_TIME | 30 | #define ACCOUNT_STOLEN_TIME |
@@ -70,7 +70,7 @@ END_FW_FTR_SECTION_IFSET(FW_FEATURE_SPLPAR) | |||
70 | 70 | ||
71 | #endif /* CONFIG_PPC_SPLPAR */ | 71 | #endif /* CONFIG_PPC_SPLPAR */ |
72 | 72 | ||
73 | #endif /* CONFIG_VIRT_CPU_ACCOUNTING */ | 73 | #endif /* CONFIG_VIRT_CPU_ACCOUNTING_NATIVE */ |
74 | 74 | ||
75 | /* | 75 | /* |
76 | * Macros for storing registers into and loading registers from | 76 | * Macros for storing registers into and loading registers from |
diff --git a/arch/powerpc/kernel/entry_64.S b/arch/powerpc/kernel/entry_64.S index 3d990d3bd8ba..ac057013f9fd 100644 --- a/arch/powerpc/kernel/entry_64.S +++ b/arch/powerpc/kernel/entry_64.S | |||
@@ -94,7 +94,7 @@ system_call_common: | |||
94 | addi r9,r1,STACK_FRAME_OVERHEAD | 94 | addi r9,r1,STACK_FRAME_OVERHEAD |
95 | ld r11,exception_marker@toc(r2) | 95 | ld r11,exception_marker@toc(r2) |
96 | std r11,-16(r9) /* "regshere" marker */ | 96 | std r11,-16(r9) /* "regshere" marker */ |
97 | #if defined(CONFIG_VIRT_CPU_ACCOUNTING) && defined(CONFIG_PPC_SPLPAR) | 97 | #if defined(CONFIG_VIRT_CPU_ACCOUNTING_NATIVE) && defined(CONFIG_PPC_SPLPAR) |
98 | BEGIN_FW_FTR_SECTION | 98 | BEGIN_FW_FTR_SECTION |
99 | beq 33f | 99 | beq 33f |
100 | /* if from user, see if there are any DTL entries to process */ | 100 | /* if from user, see if there are any DTL entries to process */ |
@@ -110,7 +110,7 @@ BEGIN_FW_FTR_SECTION | |||
110 | addi r9,r1,STACK_FRAME_OVERHEAD | 110 | addi r9,r1,STACK_FRAME_OVERHEAD |
111 | 33: | 111 | 33: |
112 | END_FW_FTR_SECTION_IFSET(FW_FEATURE_SPLPAR) | 112 | END_FW_FTR_SECTION_IFSET(FW_FEATURE_SPLPAR) |
113 | #endif /* CONFIG_VIRT_CPU_ACCOUNTING && CONFIG_PPC_SPLPAR */ | 113 | #endif /* CONFIG_VIRT_CPU_ACCOUNTING_NATIVE && CONFIG_PPC_SPLPAR */ |
114 | 114 | ||
115 | /* | 115 | /* |
116 | * A syscall should always be called with interrupts enabled | 116 | * A syscall should always be called with interrupts enabled |
diff --git a/arch/powerpc/kernel/time.c b/arch/powerpc/kernel/time.c index 127361e093f4..f77fa22754bc 100644 --- a/arch/powerpc/kernel/time.c +++ b/arch/powerpc/kernel/time.c | |||
@@ -143,7 +143,7 @@ EXPORT_SYMBOL_GPL(ppc_proc_freq); | |||
143 | unsigned long ppc_tb_freq; | 143 | unsigned long ppc_tb_freq; |
144 | EXPORT_SYMBOL_GPL(ppc_tb_freq); | 144 | EXPORT_SYMBOL_GPL(ppc_tb_freq); |
145 | 145 | ||
146 | #ifdef CONFIG_VIRT_CPU_ACCOUNTING | 146 | #ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE |
147 | /* | 147 | /* |
148 | * Factors for converting from cputime_t (timebase ticks) to | 148 | * Factors for converting from cputime_t (timebase ticks) to |
149 | * jiffies, microseconds, seconds, and clock_t (1/USER_HZ seconds). | 149 | * jiffies, microseconds, seconds, and clock_t (1/USER_HZ seconds). |
@@ -347,6 +347,7 @@ void vtime_account_system(struct task_struct *tsk) | |||
347 | if (stolen) | 347 | if (stolen) |
348 | account_steal_time(stolen); | 348 | account_steal_time(stolen); |
349 | } | 349 | } |
350 | EXPORT_SYMBOL_GPL(vtime_account_system); | ||
350 | 351 | ||
351 | void vtime_account_idle(struct task_struct *tsk) | 352 | void vtime_account_idle(struct task_struct *tsk) |
352 | { | 353 | { |
@@ -377,7 +378,7 @@ void vtime_account_user(struct task_struct *tsk) | |||
377 | account_user_time(tsk, utime, utimescaled); | 378 | account_user_time(tsk, utime, utimescaled); |
378 | } | 379 | } |
379 | 380 | ||
380 | #else /* ! CONFIG_VIRT_CPU_ACCOUNTING */ | 381 | #else /* ! CONFIG_VIRT_CPU_ACCOUNTING_NATIVE */ |
381 | #define calc_cputime_factors() | 382 | #define calc_cputime_factors() |
382 | #endif | 383 | #endif |
383 | 384 | ||
@@ -668,7 +669,7 @@ int update_persistent_clock(struct timespec now) | |||
668 | struct rtc_time tm; | 669 | struct rtc_time tm; |
669 | 670 | ||
670 | if (!ppc_md.set_rtc_time) | 671 | if (!ppc_md.set_rtc_time) |
671 | return 0; | 672 | return -ENODEV; |
672 | 673 | ||
673 | to_tm(now.tv_sec + 1 + timezone_offset, &tm); | 674 | to_tm(now.tv_sec + 1 + timezone_offset, &tm); |
674 | tm.tm_year -= 1900; | 675 | tm.tm_year -= 1900; |
diff --git a/arch/powerpc/perf/core-book3s.c b/arch/powerpc/perf/core-book3s.c index aa2465e21f1a..fa476d50791f 100644 --- a/arch/powerpc/perf/core-book3s.c +++ b/arch/powerpc/perf/core-book3s.c | |||
@@ -1305,6 +1305,16 @@ static int power_pmu_event_idx(struct perf_event *event) | |||
1305 | return event->hw.idx; | 1305 | return event->hw.idx; |
1306 | } | 1306 | } |
1307 | 1307 | ||
1308 | ssize_t power_events_sysfs_show(struct device *dev, | ||
1309 | struct device_attribute *attr, char *page) | ||
1310 | { | ||
1311 | struct perf_pmu_events_attr *pmu_attr; | ||
1312 | |||
1313 | pmu_attr = container_of(attr, struct perf_pmu_events_attr, attr); | ||
1314 | |||
1315 | return sprintf(page, "event=0x%02llx\n", pmu_attr->id); | ||
1316 | } | ||
1317 | |||
1308 | struct pmu power_pmu = { | 1318 | struct pmu power_pmu = { |
1309 | .pmu_enable = power_pmu_enable, | 1319 | .pmu_enable = power_pmu_enable, |
1310 | .pmu_disable = power_pmu_disable, | 1320 | .pmu_disable = power_pmu_disable, |
@@ -1537,6 +1547,8 @@ int __cpuinit register_power_pmu(struct power_pmu *pmu) | |||
1537 | pr_info("%s performance monitor hardware support registered\n", | 1547 | pr_info("%s performance monitor hardware support registered\n", |
1538 | pmu->name); | 1548 | pmu->name); |
1539 | 1549 | ||
1550 | power_pmu.attr_groups = ppmu->attr_groups; | ||
1551 | |||
1540 | #ifdef MSR_HV | 1552 | #ifdef MSR_HV |
1541 | /* | 1553 | /* |
1542 | * Use FCHV to ignore kernel events if MSR.HV is set. | 1554 | * Use FCHV to ignore kernel events if MSR.HV is set. |
diff --git a/arch/powerpc/perf/power7-pmu.c b/arch/powerpc/perf/power7-pmu.c index 2ee01e38d5e2..b554879bd31e 100644 --- a/arch/powerpc/perf/power7-pmu.c +++ b/arch/powerpc/perf/power7-pmu.c | |||
@@ -51,6 +51,18 @@ | |||
51 | #define MMCR1_PMCSEL_MSK 0xff | 51 | #define MMCR1_PMCSEL_MSK 0xff |
52 | 52 | ||
53 | /* | 53 | /* |
54 | * Power7 event codes. | ||
55 | */ | ||
56 | #define PME_PM_CYC 0x1e | ||
57 | #define PME_PM_GCT_NOSLOT_CYC 0x100f8 | ||
58 | #define PME_PM_CMPLU_STALL 0x4000a | ||
59 | #define PME_PM_INST_CMPL 0x2 | ||
60 | #define PME_PM_LD_REF_L1 0xc880 | ||
61 | #define PME_PM_LD_MISS_L1 0x400f0 | ||
62 | #define PME_PM_BRU_FIN 0x10068 | ||
63 | #define PME_PM_BRU_MPRED 0x400f6 | ||
64 | |||
65 | /* | ||
54 | * Layout of constraint bits: | 66 | * Layout of constraint bits: |
55 | * 6666555555555544444444443333333333222222222211111111110000000000 | 67 | * 6666555555555544444444443333333333222222222211111111110000000000 |
56 | * 3210987654321098765432109876543210987654321098765432109876543210 | 68 | * 3210987654321098765432109876543210987654321098765432109876543210 |
@@ -307,14 +319,14 @@ static void power7_disable_pmc(unsigned int pmc, unsigned long mmcr[]) | |||
307 | } | 319 | } |
308 | 320 | ||
309 | static int power7_generic_events[] = { | 321 | static int power7_generic_events[] = { |
310 | [PERF_COUNT_HW_CPU_CYCLES] = 0x1e, | 322 | [PERF_COUNT_HW_CPU_CYCLES] = PME_PM_CYC, |
311 | [PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = 0x100f8, /* GCT_NOSLOT_CYC */ | 323 | [PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = PME_PM_GCT_NOSLOT_CYC, |
312 | [PERF_COUNT_HW_STALLED_CYCLES_BACKEND] = 0x4000a, /* CMPLU_STALL */ | 324 | [PERF_COUNT_HW_STALLED_CYCLES_BACKEND] = PME_PM_CMPLU_STALL, |
313 | [PERF_COUNT_HW_INSTRUCTIONS] = 2, | 325 | [PERF_COUNT_HW_INSTRUCTIONS] = PME_PM_INST_CMPL, |
314 | [PERF_COUNT_HW_CACHE_REFERENCES] = 0xc880, /* LD_REF_L1_LSU*/ | 326 | [PERF_COUNT_HW_CACHE_REFERENCES] = PME_PM_LD_REF_L1, |
315 | [PERF_COUNT_HW_CACHE_MISSES] = 0x400f0, /* LD_MISS_L1 */ | 327 | [PERF_COUNT_HW_CACHE_MISSES] = PME_PM_LD_MISS_L1, |
316 | [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = 0x10068, /* BRU_FIN */ | 328 | [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = PME_PM_BRU_FIN, |
317 | [PERF_COUNT_HW_BRANCH_MISSES] = 0x400f6, /* BR_MPRED */ | 329 | [PERF_COUNT_HW_BRANCH_MISSES] = PME_PM_BRU_MPRED, |
318 | }; | 330 | }; |
319 | 331 | ||
320 | #define C(x) PERF_COUNT_HW_CACHE_##x | 332 | #define C(x) PERF_COUNT_HW_CACHE_##x |
@@ -362,6 +374,57 @@ static int power7_cache_events[C(MAX)][C(OP_MAX)][C(RESULT_MAX)] = { | |||
362 | }, | 374 | }, |
363 | }; | 375 | }; |
364 | 376 | ||
377 | |||
378 | GENERIC_EVENT_ATTR(cpu-cycles, CYC); | ||
379 | GENERIC_EVENT_ATTR(stalled-cycles-frontend, GCT_NOSLOT_CYC); | ||
380 | GENERIC_EVENT_ATTR(stalled-cycles-backend, CMPLU_STALL); | ||
381 | GENERIC_EVENT_ATTR(instructions, INST_CMPL); | ||
382 | GENERIC_EVENT_ATTR(cache-references, LD_REF_L1); | ||
383 | GENERIC_EVENT_ATTR(cache-misses, LD_MISS_L1); | ||
384 | GENERIC_EVENT_ATTR(branch-instructions, BRU_FIN); | ||
385 | GENERIC_EVENT_ATTR(branch-misses, BRU_MPRED); | ||
386 | |||
387 | POWER_EVENT_ATTR(CYC, CYC); | ||
388 | POWER_EVENT_ATTR(GCT_NOSLOT_CYC, GCT_NOSLOT_CYC); | ||
389 | POWER_EVENT_ATTR(CMPLU_STALL, CMPLU_STALL); | ||
390 | POWER_EVENT_ATTR(INST_CMPL, INST_CMPL); | ||
391 | POWER_EVENT_ATTR(LD_REF_L1, LD_REF_L1); | ||
392 | POWER_EVENT_ATTR(LD_MISS_L1, LD_MISS_L1); | ||
393 | POWER_EVENT_ATTR(BRU_FIN, BRU_FIN) | ||
394 | POWER_EVENT_ATTR(BRU_MPRED, BRU_MPRED); | ||
395 | |||
396 | static struct attribute *power7_events_attr[] = { | ||
397 | GENERIC_EVENT_PTR(CYC), | ||
398 | GENERIC_EVENT_PTR(GCT_NOSLOT_CYC), | ||
399 | GENERIC_EVENT_PTR(CMPLU_STALL), | ||
400 | GENERIC_EVENT_PTR(INST_CMPL), | ||
401 | GENERIC_EVENT_PTR(LD_REF_L1), | ||
402 | GENERIC_EVENT_PTR(LD_MISS_L1), | ||
403 | GENERIC_EVENT_PTR(BRU_FIN), | ||
404 | GENERIC_EVENT_PTR(BRU_MPRED), | ||
405 | |||
406 | POWER_EVENT_PTR(CYC), | ||
407 | POWER_EVENT_PTR(GCT_NOSLOT_CYC), | ||
408 | POWER_EVENT_PTR(CMPLU_STALL), | ||
409 | POWER_EVENT_PTR(INST_CMPL), | ||
410 | POWER_EVENT_PTR(LD_REF_L1), | ||
411 | POWER_EVENT_PTR(LD_MISS_L1), | ||
412 | POWER_EVENT_PTR(BRU_FIN), | ||
413 | POWER_EVENT_PTR(BRU_MPRED), | ||
414 | NULL | ||
415 | }; | ||
416 | |||
417 | |||
418 | static struct attribute_group power7_pmu_events_group = { | ||
419 | .name = "events", | ||
420 | .attrs = power7_events_attr, | ||
421 | }; | ||
422 | |||
423 | static const struct attribute_group *power7_pmu_attr_groups[] = { | ||
424 | &power7_pmu_events_group, | ||
425 | NULL, | ||
426 | }; | ||
427 | |||
365 | static struct power_pmu power7_pmu = { | 428 | static struct power_pmu power7_pmu = { |
366 | .name = "POWER7", | 429 | .name = "POWER7", |
367 | .n_counter = 6, | 430 | .n_counter = 6, |
@@ -373,6 +436,7 @@ static struct power_pmu power7_pmu = { | |||
373 | .get_alternatives = power7_get_alternatives, | 436 | .get_alternatives = power7_get_alternatives, |
374 | .disable_pmc = power7_disable_pmc, | 437 | .disable_pmc = power7_disable_pmc, |
375 | .flags = PPMU_ALT_SIPR, | 438 | .flags = PPMU_ALT_SIPR, |
439 | .attr_groups = power7_pmu_attr_groups, | ||
376 | .n_generic = ARRAY_SIZE(power7_generic_events), | 440 | .n_generic = ARRAY_SIZE(power7_generic_events), |
377 | .generic_events = power7_generic_events, | 441 | .generic_events = power7_generic_events, |
378 | .cache_events = &power7_cache_events, | 442 | .cache_events = &power7_cache_events, |
diff --git a/arch/powerpc/platforms/cell/spufs/sched.c b/arch/powerpc/platforms/cell/spufs/sched.c index 25db92a8e1cf..49318385d4fa 100644 --- a/arch/powerpc/platforms/cell/spufs/sched.c +++ b/arch/powerpc/platforms/cell/spufs/sched.c | |||
@@ -24,6 +24,7 @@ | |||
24 | 24 | ||
25 | #include <linux/errno.h> | 25 | #include <linux/errno.h> |
26 | #include <linux/sched.h> | 26 | #include <linux/sched.h> |
27 | #include <linux/sched/rt.h> | ||
27 | #include <linux/kernel.h> | 28 | #include <linux/kernel.h> |
28 | #include <linux/mm.h> | 29 | #include <linux/mm.h> |
29 | #include <linux/slab.h> | 30 | #include <linux/slab.h> |
diff --git a/arch/powerpc/platforms/pseries/dtl.c b/arch/powerpc/platforms/pseries/dtl.c index a7648543c59e..0cc0ac07a55d 100644 --- a/arch/powerpc/platforms/pseries/dtl.c +++ b/arch/powerpc/platforms/pseries/dtl.c | |||
@@ -57,7 +57,7 @@ static u8 dtl_event_mask = 0x7; | |||
57 | */ | 57 | */ |
58 | static int dtl_buf_entries = N_DISPATCH_LOG; | 58 | static int dtl_buf_entries = N_DISPATCH_LOG; |
59 | 59 | ||
60 | #ifdef CONFIG_VIRT_CPU_ACCOUNTING | 60 | #ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE |
61 | struct dtl_ring { | 61 | struct dtl_ring { |
62 | u64 write_index; | 62 | u64 write_index; |
63 | struct dtl_entry *write_ptr; | 63 | struct dtl_entry *write_ptr; |
@@ -142,7 +142,7 @@ static u64 dtl_current_index(struct dtl *dtl) | |||
142 | return per_cpu(dtl_rings, dtl->cpu).write_index; | 142 | return per_cpu(dtl_rings, dtl->cpu).write_index; |
143 | } | 143 | } |
144 | 144 | ||
145 | #else /* CONFIG_VIRT_CPU_ACCOUNTING */ | 145 | #else /* CONFIG_VIRT_CPU_ACCOUNTING_NATIVE */ |
146 | 146 | ||
147 | static int dtl_start(struct dtl *dtl) | 147 | static int dtl_start(struct dtl *dtl) |
148 | { | 148 | { |
@@ -188,7 +188,7 @@ static u64 dtl_current_index(struct dtl *dtl) | |||
188 | { | 188 | { |
189 | return lppaca_of(dtl->cpu).dtl_idx; | 189 | return lppaca_of(dtl->cpu).dtl_idx; |
190 | } | 190 | } |
191 | #endif /* CONFIG_VIRT_CPU_ACCOUNTING */ | 191 | #endif /* CONFIG_VIRT_CPU_ACCOUNTING_NATIVE */ |
192 | 192 | ||
193 | static int dtl_enable(struct dtl *dtl) | 193 | static int dtl_enable(struct dtl *dtl) |
194 | { | 194 | { |
diff --git a/arch/powerpc/platforms/pseries/setup.c b/arch/powerpc/platforms/pseries/setup.c index ca55882465d6..527e12c9573b 100644 --- a/arch/powerpc/platforms/pseries/setup.c +++ b/arch/powerpc/platforms/pseries/setup.c | |||
@@ -281,7 +281,7 @@ static struct notifier_block pci_dn_reconfig_nb = { | |||
281 | 281 | ||
282 | struct kmem_cache *dtl_cache; | 282 | struct kmem_cache *dtl_cache; |
283 | 283 | ||
284 | #ifdef CONFIG_VIRT_CPU_ACCOUNTING | 284 | #ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE |
285 | /* | 285 | /* |
286 | * Allocate space for the dispatch trace log for all possible cpus | 286 | * Allocate space for the dispatch trace log for all possible cpus |
287 | * and register the buffers with the hypervisor. This is used for | 287 | * and register the buffers with the hypervisor. This is used for |
@@ -332,12 +332,12 @@ static int alloc_dispatch_logs(void) | |||
332 | 332 | ||
333 | return 0; | 333 | return 0; |
334 | } | 334 | } |
335 | #else /* !CONFIG_VIRT_CPU_ACCOUNTING */ | 335 | #else /* !CONFIG_VIRT_CPU_ACCOUNTING_NATIVE */ |
336 | static inline int alloc_dispatch_logs(void) | 336 | static inline int alloc_dispatch_logs(void) |
337 | { | 337 | { |
338 | return 0; | 338 | return 0; |
339 | } | 339 | } |
340 | #endif /* CONFIG_VIRT_CPU_ACCOUNTING */ | 340 | #endif /* CONFIG_VIRT_CPU_ACCOUNTING_NATIVE */ |
341 | 341 | ||
342 | static int alloc_dispatch_log_kmem_cache(void) | 342 | static int alloc_dispatch_log_kmem_cache(void) |
343 | { | 343 | { |
diff --git a/arch/powerpc/sysdev/bestcomm/bestcomm.c b/arch/powerpc/sysdev/bestcomm/bestcomm.c index d9130630f7ef..81c331481336 100644 --- a/arch/powerpc/sysdev/bestcomm/bestcomm.c +++ b/arch/powerpc/sysdev/bestcomm/bestcomm.c | |||
@@ -414,7 +414,7 @@ static int mpc52xx_bcom_probe(struct platform_device *op) | |||
414 | goto error_sramclean; | 414 | goto error_sramclean; |
415 | } | 415 | } |
416 | 416 | ||
417 | if (!request_mem_region(res_bcom.start, sizeof(struct mpc52xx_sdma), | 417 | if (!request_mem_region(res_bcom.start, resource_size(&res_bcom), |
418 | DRIVER_NAME)) { | 418 | DRIVER_NAME)) { |
419 | printk(KERN_ERR DRIVER_NAME ": " | 419 | printk(KERN_ERR DRIVER_NAME ": " |
420 | "Can't request registers region\n"); | 420 | "Can't request registers region\n"); |
diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig index b5ea38c25647..c15ba7d1be64 100644 --- a/arch/s390/Kconfig +++ b/arch/s390/Kconfig | |||
@@ -78,7 +78,6 @@ config S390 | |||
78 | select HAVE_KVM if 64BIT | 78 | select HAVE_KVM if 64BIT |
79 | select HAVE_ARCH_TRACEHOOK | 79 | select HAVE_ARCH_TRACEHOOK |
80 | select INIT_ALL_POSSIBLE | 80 | select INIT_ALL_POSSIBLE |
81 | select HAVE_IRQ_WORK | ||
82 | select HAVE_PERF_EVENTS | 81 | select HAVE_PERF_EVENTS |
83 | select ARCH_HAVE_NMI_SAFE_CMPXCHG | 82 | select ARCH_HAVE_NMI_SAFE_CMPXCHG |
84 | select HAVE_DEBUG_KMEMLEAK | 83 | select HAVE_DEBUG_KMEMLEAK |
diff --git a/arch/s390/kernel/vtime.c b/arch/s390/kernel/vtime.c index e84b8b68444a..ce9cc5aa2033 100644 --- a/arch/s390/kernel/vtime.c +++ b/arch/s390/kernel/vtime.c | |||
@@ -127,7 +127,7 @@ void vtime_account_user(struct task_struct *tsk) | |||
127 | * Update process times based on virtual cpu times stored by entry.S | 127 | * Update process times based on virtual cpu times stored by entry.S |
128 | * to the lowcore fields user_timer, system_timer & steal_clock. | 128 | * to the lowcore fields user_timer, system_timer & steal_clock. |
129 | */ | 129 | */ |
130 | void vtime_account(struct task_struct *tsk) | 130 | void vtime_account_irq_enter(struct task_struct *tsk) |
131 | { | 131 | { |
132 | struct thread_info *ti = task_thread_info(tsk); | 132 | struct thread_info *ti = task_thread_info(tsk); |
133 | u64 timer, system; | 133 | u64 timer, system; |
@@ -145,10 +145,10 @@ void vtime_account(struct task_struct *tsk) | |||
145 | 145 | ||
146 | virt_timer_forward(system); | 146 | virt_timer_forward(system); |
147 | } | 147 | } |
148 | EXPORT_SYMBOL_GPL(vtime_account); | 148 | EXPORT_SYMBOL_GPL(vtime_account_irq_enter); |
149 | 149 | ||
150 | void vtime_account_system(struct task_struct *tsk) | 150 | void vtime_account_system(struct task_struct *tsk) |
151 | __attribute__((alias("vtime_account"))); | 151 | __attribute__((alias("vtime_account_irq_enter"))); |
152 | EXPORT_SYMBOL_GPL(vtime_account_system); | 152 | EXPORT_SYMBOL_GPL(vtime_account_system); |
153 | 153 | ||
154 | void __kprobes vtime_stop_cpu(void) | 154 | void __kprobes vtime_stop_cpu(void) |
diff --git a/arch/sh/Kconfig b/arch/sh/Kconfig index babc2b826c5c..9c833c585871 100644 --- a/arch/sh/Kconfig +++ b/arch/sh/Kconfig | |||
@@ -11,7 +11,6 @@ config SUPERH | |||
11 | select HAVE_ARCH_TRACEHOOK | 11 | select HAVE_ARCH_TRACEHOOK |
12 | select HAVE_DMA_API_DEBUG | 12 | select HAVE_DMA_API_DEBUG |
13 | select HAVE_DMA_ATTRS | 13 | select HAVE_DMA_ATTRS |
14 | select HAVE_IRQ_WORK | ||
15 | select HAVE_PERF_EVENTS | 14 | select HAVE_PERF_EVENTS |
16 | select HAVE_DEBUG_BUGVERBOSE | 15 | select HAVE_DEBUG_BUGVERBOSE |
17 | select ARCH_HAVE_CUSTOM_GPIO_H | 16 | select ARCH_HAVE_CUSTOM_GPIO_H |
@@ -91,9 +90,6 @@ config GENERIC_CSUM | |||
91 | config GENERIC_HWEIGHT | 90 | config GENERIC_HWEIGHT |
92 | def_bool y | 91 | def_bool y |
93 | 92 | ||
94 | config IRQ_PER_CPU | ||
95 | def_bool y | ||
96 | |||
97 | config GENERIC_GPIO | 93 | config GENERIC_GPIO |
98 | def_bool n | 94 | def_bool n |
99 | 95 | ||
diff --git a/arch/sh/kernel/idle.c b/arch/sh/kernel/idle.c index 0c910163caa3..3d5a1b387cc0 100644 --- a/arch/sh/kernel/idle.c +++ b/arch/sh/kernel/idle.c | |||
@@ -22,7 +22,7 @@ | |||
22 | #include <asm/smp.h> | 22 | #include <asm/smp.h> |
23 | #include <asm/bl_bit.h> | 23 | #include <asm/bl_bit.h> |
24 | 24 | ||
25 | void (*pm_idle)(void); | 25 | static void (*sh_idle)(void); |
26 | 26 | ||
27 | static int hlt_counter; | 27 | static int hlt_counter; |
28 | 28 | ||
@@ -103,9 +103,9 @@ void cpu_idle(void) | |||
103 | /* Don't trace irqs off for idle */ | 103 | /* Don't trace irqs off for idle */ |
104 | stop_critical_timings(); | 104 | stop_critical_timings(); |
105 | if (cpuidle_idle_call()) | 105 | if (cpuidle_idle_call()) |
106 | pm_idle(); | 106 | sh_idle(); |
107 | /* | 107 | /* |
108 | * Sanity check to ensure that pm_idle() returns | 108 | * Sanity check to ensure that sh_idle() returns |
109 | * with IRQs enabled | 109 | * with IRQs enabled |
110 | */ | 110 | */ |
111 | WARN_ON(irqs_disabled()); | 111 | WARN_ON(irqs_disabled()); |
@@ -123,13 +123,13 @@ void __init select_idle_routine(void) | |||
123 | /* | 123 | /* |
124 | * If a platform has set its own idle routine, leave it alone. | 124 | * If a platform has set its own idle routine, leave it alone. |
125 | */ | 125 | */ |
126 | if (pm_idle) | 126 | if (sh_idle) |
127 | return; | 127 | return; |
128 | 128 | ||
129 | if (hlt_works()) | 129 | if (hlt_works()) |
130 | pm_idle = default_idle; | 130 | sh_idle = default_idle; |
131 | else | 131 | else |
132 | pm_idle = poll_idle; | 132 | sh_idle = poll_idle; |
133 | } | 133 | } |
134 | 134 | ||
135 | void stop_this_cpu(void *unused) | 135 | void stop_this_cpu(void *unused) |
diff --git a/arch/sparc/Kconfig b/arch/sparc/Kconfig index 9f2edb5c5551..9bff3db17c8c 100644 --- a/arch/sparc/Kconfig +++ b/arch/sparc/Kconfig | |||
@@ -23,7 +23,6 @@ config SPARC | |||
23 | select ARCH_HAS_ATOMIC64_DEC_IF_POSITIVE | 23 | select ARCH_HAS_ATOMIC64_DEC_IF_POSITIVE |
24 | select RTC_CLASS | 24 | select RTC_CLASS |
25 | select RTC_DRV_M48T59 | 25 | select RTC_DRV_M48T59 |
26 | select HAVE_IRQ_WORK | ||
27 | select HAVE_DMA_ATTRS | 26 | select HAVE_DMA_ATTRS |
28 | select HAVE_DMA_API_DEBUG | 27 | select HAVE_DMA_API_DEBUG |
29 | select HAVE_ARCH_JUMP_LABEL | 28 | select HAVE_ARCH_JUMP_LABEL |
@@ -61,6 +60,7 @@ config SPARC64 | |||
61 | select HAVE_MEMBLOCK | 60 | select HAVE_MEMBLOCK |
62 | select HAVE_MEMBLOCK_NODE_MAP | 61 | select HAVE_MEMBLOCK_NODE_MAP |
63 | select HAVE_SYSCALL_WRAPPERS | 62 | select HAVE_SYSCALL_WRAPPERS |
63 | select HAVE_ARCH_TRANSPARENT_HUGEPAGE | ||
64 | select HAVE_DYNAMIC_FTRACE | 64 | select HAVE_DYNAMIC_FTRACE |
65 | select HAVE_FTRACE_MCOUNT_RECORD | 65 | select HAVE_FTRACE_MCOUNT_RECORD |
66 | select HAVE_SYSCALL_TRACEPOINTS | 66 | select HAVE_SYSCALL_TRACEPOINTS |
diff --git a/arch/sparc/include/asm/hugetlb.h b/arch/sparc/include/asm/hugetlb.h index 9661e9bc7bb6..7eb57d245044 100644 --- a/arch/sparc/include/asm/hugetlb.h +++ b/arch/sparc/include/asm/hugetlb.h | |||
@@ -12,7 +12,6 @@ pte_t huge_ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, | |||
12 | 12 | ||
13 | static inline void hugetlb_prefault_arch_hook(struct mm_struct *mm) | 13 | static inline void hugetlb_prefault_arch_hook(struct mm_struct *mm) |
14 | { | 14 | { |
15 | hugetlb_setup(mm); | ||
16 | } | 15 | } |
17 | 16 | ||
18 | static inline int is_hugepage_only_range(struct mm_struct *mm, | 17 | static inline int is_hugepage_only_range(struct mm_struct *mm, |
diff --git a/arch/sparc/include/asm/page_64.h b/arch/sparc/include/asm/page_64.h index 4b39f74d6ca0..e15538899f3d 100644 --- a/arch/sparc/include/asm/page_64.h +++ b/arch/sparc/include/asm/page_64.h | |||
@@ -27,8 +27,8 @@ | |||
27 | #ifndef __ASSEMBLY__ | 27 | #ifndef __ASSEMBLY__ |
28 | 28 | ||
29 | #if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE) | 29 | #if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE) |
30 | struct mm_struct; | 30 | struct pt_regs; |
31 | extern void hugetlb_setup(struct mm_struct *mm); | 31 | extern void hugetlb_setup(struct pt_regs *regs); |
32 | #endif | 32 | #endif |
33 | 33 | ||
34 | #define WANT_PAGE_VIRTUAL | 34 | #define WANT_PAGE_VIRTUAL |
diff --git a/arch/sparc/include/asm/pgtable_64.h b/arch/sparc/include/asm/pgtable_64.h index 7870be0f5adc..08fcce90316b 100644 --- a/arch/sparc/include/asm/pgtable_64.h +++ b/arch/sparc/include/asm/pgtable_64.h | |||
@@ -71,7 +71,6 @@ | |||
71 | #define PMD_PADDR _AC(0xfffffffe,UL) | 71 | #define PMD_PADDR _AC(0xfffffffe,UL) |
72 | #define PMD_PADDR_SHIFT _AC(11,UL) | 72 | #define PMD_PADDR_SHIFT _AC(11,UL) |
73 | 73 | ||
74 | #ifdef CONFIG_TRANSPARENT_HUGEPAGE | ||
75 | #define PMD_ISHUGE _AC(0x00000001,UL) | 74 | #define PMD_ISHUGE _AC(0x00000001,UL) |
76 | 75 | ||
77 | /* This is the PMD layout when PMD_ISHUGE is set. With 4MB huge | 76 | /* This is the PMD layout when PMD_ISHUGE is set. With 4MB huge |
@@ -86,7 +85,6 @@ | |||
86 | #define PMD_HUGE_ACCESSED _AC(0x00000080,UL) | 85 | #define PMD_HUGE_ACCESSED _AC(0x00000080,UL) |
87 | #define PMD_HUGE_EXEC _AC(0x00000040,UL) | 86 | #define PMD_HUGE_EXEC _AC(0x00000040,UL) |
88 | #define PMD_HUGE_SPLITTING _AC(0x00000020,UL) | 87 | #define PMD_HUGE_SPLITTING _AC(0x00000020,UL) |
89 | #endif | ||
90 | 88 | ||
91 | /* PGDs point to PMD tables which are 8K aligned. */ | 89 | /* PGDs point to PMD tables which are 8K aligned. */ |
92 | #define PGD_PADDR _AC(0xfffffffc,UL) | 90 | #define PGD_PADDR _AC(0xfffffffc,UL) |
@@ -628,6 +626,12 @@ static inline unsigned long pte_special(pte_t pte) | |||
628 | return pte_val(pte) & _PAGE_SPECIAL; | 626 | return pte_val(pte) & _PAGE_SPECIAL; |
629 | } | 627 | } |
630 | 628 | ||
629 | static inline int pmd_large(pmd_t pmd) | ||
630 | { | ||
631 | return (pmd_val(pmd) & (PMD_ISHUGE | PMD_HUGE_PRESENT)) == | ||
632 | (PMD_ISHUGE | PMD_HUGE_PRESENT); | ||
633 | } | ||
634 | |||
631 | #ifdef CONFIG_TRANSPARENT_HUGEPAGE | 635 | #ifdef CONFIG_TRANSPARENT_HUGEPAGE |
632 | static inline int pmd_young(pmd_t pmd) | 636 | static inline int pmd_young(pmd_t pmd) |
633 | { | 637 | { |
@@ -646,12 +650,6 @@ static inline unsigned long pmd_pfn(pmd_t pmd) | |||
646 | return val >> (PAGE_SHIFT - PMD_PADDR_SHIFT); | 650 | return val >> (PAGE_SHIFT - PMD_PADDR_SHIFT); |
647 | } | 651 | } |
648 | 652 | ||
649 | static inline int pmd_large(pmd_t pmd) | ||
650 | { | ||
651 | return (pmd_val(pmd) & (PMD_ISHUGE | PMD_HUGE_PRESENT)) == | ||
652 | (PMD_ISHUGE | PMD_HUGE_PRESENT); | ||
653 | } | ||
654 | |||
655 | static inline int pmd_trans_splitting(pmd_t pmd) | 653 | static inline int pmd_trans_splitting(pmd_t pmd) |
656 | { | 654 | { |
657 | return (pmd_val(pmd) & (PMD_ISHUGE|PMD_HUGE_SPLITTING)) == | 655 | return (pmd_val(pmd) & (PMD_ISHUGE|PMD_HUGE_SPLITTING)) == |
diff --git a/arch/sparc/include/asm/processor_32.h b/arch/sparc/include/asm/processor_32.h index c1e01914fd98..2c7baa4c4505 100644 --- a/arch/sparc/include/asm/processor_32.h +++ b/arch/sparc/include/asm/processor_32.h | |||
@@ -118,6 +118,7 @@ extern unsigned long get_wchan(struct task_struct *); | |||
118 | extern struct task_struct *last_task_used_math; | 118 | extern struct task_struct *last_task_used_math; |
119 | 119 | ||
120 | #define cpu_relax() barrier() | 120 | #define cpu_relax() barrier() |
121 | extern void (*sparc_idle)(void); | ||
121 | 122 | ||
122 | #endif | 123 | #endif |
123 | 124 | ||
diff --git a/arch/sparc/include/asm/tsb.h b/arch/sparc/include/asm/tsb.h index b4c258de4443..e696432b950d 100644 --- a/arch/sparc/include/asm/tsb.h +++ b/arch/sparc/include/asm/tsb.h | |||
@@ -157,17 +157,26 @@ extern struct tsb_phys_patch_entry __tsb_phys_patch, __tsb_phys_patch_end; | |||
157 | andn REG2, 0x7, REG2; \ | 157 | andn REG2, 0x7, REG2; \ |
158 | add REG1, REG2, REG1; | 158 | add REG1, REG2, REG1; |
159 | 159 | ||
160 | /* This macro exists only to make the PMD translator below easier | 160 | /* These macros exists only to make the PMD translator below |
161 | * to read. It hides the ELF section switch for the sun4v code | 161 | * easier to read. It hides the ELF section switch for the |
162 | * patching. | 162 | * sun4v code patching. |
163 | */ | 163 | */ |
164 | #define OR_PTE_BIT(REG, NAME) \ | 164 | #define OR_PTE_BIT_1INSN(REG, NAME) \ |
165 | 661: or REG, _PAGE_##NAME##_4U, REG; \ | 165 | 661: or REG, _PAGE_##NAME##_4U, REG; \ |
166 | .section .sun4v_1insn_patch, "ax"; \ | 166 | .section .sun4v_1insn_patch, "ax"; \ |
167 | .word 661b; \ | 167 | .word 661b; \ |
168 | or REG, _PAGE_##NAME##_4V, REG; \ | 168 | or REG, _PAGE_##NAME##_4V, REG; \ |
169 | .previous; | 169 | .previous; |
170 | 170 | ||
171 | #define OR_PTE_BIT_2INSN(REG, TMP, NAME) \ | ||
172 | 661: sethi %hi(_PAGE_##NAME##_4U), TMP; \ | ||
173 | or REG, TMP, REG; \ | ||
174 | .section .sun4v_2insn_patch, "ax"; \ | ||
175 | .word 661b; \ | ||
176 | mov -1, TMP; \ | ||
177 | or REG, _PAGE_##NAME##_4V, REG; \ | ||
178 | .previous; | ||
179 | |||
171 | /* Load into REG the PTE value for VALID, CACHE, and SZHUGE. */ | 180 | /* Load into REG the PTE value for VALID, CACHE, and SZHUGE. */ |
172 | #define BUILD_PTE_VALID_SZHUGE_CACHE(REG) \ | 181 | #define BUILD_PTE_VALID_SZHUGE_CACHE(REG) \ |
173 | 661: sethi %uhi(_PAGE_VALID|_PAGE_SZHUGE_4U), REG; \ | 182 | 661: sethi %uhi(_PAGE_VALID|_PAGE_SZHUGE_4U), REG; \ |
@@ -214,12 +223,13 @@ extern struct tsb_phys_patch_entry __tsb_phys_patch, __tsb_phys_patch_end; | |||
214 | andn REG1, PMD_HUGE_PROTBITS, REG2; \ | 223 | andn REG1, PMD_HUGE_PROTBITS, REG2; \ |
215 | sllx REG2, PMD_PADDR_SHIFT, REG2; \ | 224 | sllx REG2, PMD_PADDR_SHIFT, REG2; \ |
216 | /* REG2 now holds PFN << PAGE_SHIFT */ \ | 225 | /* REG2 now holds PFN << PAGE_SHIFT */ \ |
217 | andcc REG1, PMD_HUGE_EXEC, %g0; \ | 226 | andcc REG1, PMD_HUGE_WRITE, %g0; \ |
218 | bne,a,pt %xcc, 1f; \ | ||
219 | OR_PTE_BIT(REG2, EXEC); \ | ||
220 | 1: andcc REG1, PMD_HUGE_WRITE, %g0; \ | ||
221 | bne,a,pt %xcc, 1f; \ | 227 | bne,a,pt %xcc, 1f; \ |
222 | OR_PTE_BIT(REG2, W); \ | 228 | OR_PTE_BIT_1INSN(REG2, W); \ |
229 | 1: andcc REG1, PMD_HUGE_EXEC, %g0; \ | ||
230 | be,pt %xcc, 1f; \ | ||
231 | nop; \ | ||
232 | OR_PTE_BIT_2INSN(REG2, REG1, EXEC); \ | ||
223 | /* REG1 can now be clobbered, build final PTE */ \ | 233 | /* REG1 can now be clobbered, build final PTE */ \ |
224 | 1: BUILD_PTE_VALID_SZHUGE_CACHE(REG1); \ | 234 | 1: BUILD_PTE_VALID_SZHUGE_CACHE(REG1); \ |
225 | ba,pt %xcc, PTE_LABEL; \ | 235 | ba,pt %xcc, PTE_LABEL; \ |
diff --git a/arch/sparc/kernel/apc.c b/arch/sparc/kernel/apc.c index 348fa1aeabce..eefda32b595e 100644 --- a/arch/sparc/kernel/apc.c +++ b/arch/sparc/kernel/apc.c | |||
@@ -20,6 +20,7 @@ | |||
20 | #include <asm/uaccess.h> | 20 | #include <asm/uaccess.h> |
21 | #include <asm/auxio.h> | 21 | #include <asm/auxio.h> |
22 | #include <asm/apc.h> | 22 | #include <asm/apc.h> |
23 | #include <asm/processor.h> | ||
23 | 24 | ||
24 | /* Debugging | 25 | /* Debugging |
25 | * | 26 | * |
@@ -158,7 +159,7 @@ static int apc_probe(struct platform_device *op) | |||
158 | 159 | ||
159 | /* Assign power management IDLE handler */ | 160 | /* Assign power management IDLE handler */ |
160 | if (!apc_no_idle) | 161 | if (!apc_no_idle) |
161 | pm_idle = apc_swift_idle; | 162 | sparc_idle = apc_swift_idle; |
162 | 163 | ||
163 | printk(KERN_INFO "%s: power management initialized%s\n", | 164 | printk(KERN_INFO "%s: power management initialized%s\n", |
164 | APC_DEVNAME, apc_no_idle ? " (CPU idle disabled)" : ""); | 165 | APC_DEVNAME, apc_no_idle ? " (CPU idle disabled)" : ""); |
diff --git a/arch/sparc/kernel/kernel.h b/arch/sparc/kernel/kernel.h index 291bb5de9ce0..a702d9ab019c 100644 --- a/arch/sparc/kernel/kernel.h +++ b/arch/sparc/kernel/kernel.h | |||
@@ -48,6 +48,10 @@ extern void sun4m_init_IRQ(void); | |||
48 | extern void sun4m_unmask_profile_irq(void); | 48 | extern void sun4m_unmask_profile_irq(void); |
49 | extern void sun4m_clear_profile_irq(int cpu); | 49 | extern void sun4m_clear_profile_irq(int cpu); |
50 | 50 | ||
51 | /* sun4m_smp.c */ | ||
52 | void sun4m_cpu_pre_starting(void *arg); | ||
53 | void sun4m_cpu_pre_online(void *arg); | ||
54 | |||
51 | /* sun4d_irq.c */ | 55 | /* sun4d_irq.c */ |
52 | extern spinlock_t sun4d_imsk_lock; | 56 | extern spinlock_t sun4d_imsk_lock; |
53 | 57 | ||
@@ -60,6 +64,14 @@ extern int show_sun4d_interrupts(struct seq_file *, void *); | |||
60 | extern void sun4d_distribute_irqs(void); | 64 | extern void sun4d_distribute_irqs(void); |
61 | extern void sun4d_free_irq(unsigned int irq, void *dev_id); | 65 | extern void sun4d_free_irq(unsigned int irq, void *dev_id); |
62 | 66 | ||
67 | /* sun4d_smp.c */ | ||
68 | void sun4d_cpu_pre_starting(void *arg); | ||
69 | void sun4d_cpu_pre_online(void *arg); | ||
70 | |||
71 | /* leon_smp.c */ | ||
72 | void leon_cpu_pre_starting(void *arg); | ||
73 | void leon_cpu_pre_online(void *arg); | ||
74 | |||
63 | /* head_32.S */ | 75 | /* head_32.S */ |
64 | extern unsigned int t_nmi[]; | 76 | extern unsigned int t_nmi[]; |
65 | extern unsigned int linux_trap_ipi15_sun4d[]; | 77 | extern unsigned int linux_trap_ipi15_sun4d[]; |
diff --git a/arch/sparc/kernel/leon_pmc.c b/arch/sparc/kernel/leon_pmc.c index 4e174321097d..708bca435219 100644 --- a/arch/sparc/kernel/leon_pmc.c +++ b/arch/sparc/kernel/leon_pmc.c | |||
@@ -9,6 +9,7 @@ | |||
9 | #include <asm/leon_amba.h> | 9 | #include <asm/leon_amba.h> |
10 | #include <asm/cpu_type.h> | 10 | #include <asm/cpu_type.h> |
11 | #include <asm/leon.h> | 11 | #include <asm/leon.h> |
12 | #include <asm/processor.h> | ||
12 | 13 | ||
13 | /* List of Systems that need fixup instructions around power-down instruction */ | 14 | /* List of Systems that need fixup instructions around power-down instruction */ |
14 | unsigned int pmc_leon_fixup_ids[] = { | 15 | unsigned int pmc_leon_fixup_ids[] = { |
@@ -69,9 +70,9 @@ static int __init leon_pmc_install(void) | |||
69 | if (sparc_cpu_model == sparc_leon) { | 70 | if (sparc_cpu_model == sparc_leon) { |
70 | /* Assign power management IDLE handler */ | 71 | /* Assign power management IDLE handler */ |
71 | if (pmc_leon_need_fixup()) | 72 | if (pmc_leon_need_fixup()) |
72 | pm_idle = pmc_leon_idle_fixup; | 73 | sparc_idle = pmc_leon_idle_fixup; |
73 | else | 74 | else |
74 | pm_idle = pmc_leon_idle; | 75 | sparc_idle = pmc_leon_idle; |
75 | 76 | ||
76 | printk(KERN_INFO "leon: power management initialized\n"); | 77 | printk(KERN_INFO "leon: power management initialized\n"); |
77 | } | 78 | } |
diff --git a/arch/sparc/kernel/leon_smp.c b/arch/sparc/kernel/leon_smp.c index 0f3fb6d9c8ef..9b40c9c12a0c 100644 --- a/arch/sparc/kernel/leon_smp.c +++ b/arch/sparc/kernel/leon_smp.c | |||
@@ -69,31 +69,19 @@ static inline unsigned long do_swap(volatile unsigned long *ptr, | |||
69 | return val; | 69 | return val; |
70 | } | 70 | } |
71 | 71 | ||
72 | void __cpuinit leon_callin(void) | 72 | void __cpuinit leon_cpu_pre_starting(void *arg) |
73 | { | 73 | { |
74 | int cpuid = hard_smp_processor_id(); | ||
75 | |||
76 | local_ops->cache_all(); | ||
77 | local_ops->tlb_all(); | ||
78 | leon_configure_cache_smp(); | 74 | leon_configure_cache_smp(); |
75 | } | ||
79 | 76 | ||
80 | notify_cpu_starting(cpuid); | 77 | void __cpuinit leon_cpu_pre_online(void *arg) |
81 | 78 | { | |
82 | /* Get our local ticker going. */ | 79 | int cpuid = hard_smp_processor_id(); |
83 | register_percpu_ce(cpuid); | ||
84 | |||
85 | calibrate_delay(); | ||
86 | smp_store_cpu_info(cpuid); | ||
87 | |||
88 | local_ops->cache_all(); | ||
89 | local_ops->tlb_all(); | ||
90 | 80 | ||
91 | /* | 81 | /* Allow master to continue. The master will then give us the |
92 | * Unblock the master CPU _only_ when the scheduler state | 82 | * go-ahead by setting the smp_commenced_mask and will wait without |
93 | * of all secondary CPUs will be up-to-date, so after | 83 | * timeouts until our setup is completed fully (signified by |
94 | * the SMP initialization the master will be just allowed | 84 | * our bit being set in the cpu_online_mask). |
95 | * to call the scheduler code. | ||
96 | * Allow master to continue. | ||
97 | */ | 85 | */ |
98 | do_swap(&cpu_callin_map[cpuid], 1); | 86 | do_swap(&cpu_callin_map[cpuid], 1); |
99 | 87 | ||
@@ -110,9 +98,6 @@ void __cpuinit leon_callin(void) | |||
110 | 98 | ||
111 | while (!cpumask_test_cpu(cpuid, &smp_commenced_mask)) | 99 | while (!cpumask_test_cpu(cpuid, &smp_commenced_mask)) |
112 | mb(); | 100 | mb(); |
113 | |||
114 | local_irq_enable(); | ||
115 | set_cpu_online(cpuid, true); | ||
116 | } | 101 | } |
117 | 102 | ||
118 | /* | 103 | /* |
diff --git a/arch/sparc/kernel/pmc.c b/arch/sparc/kernel/pmc.c index dcbb62f63068..8b7297faca79 100644 --- a/arch/sparc/kernel/pmc.c +++ b/arch/sparc/kernel/pmc.c | |||
@@ -17,6 +17,7 @@ | |||
17 | #include <asm/oplib.h> | 17 | #include <asm/oplib.h> |
18 | #include <asm/uaccess.h> | 18 | #include <asm/uaccess.h> |
19 | #include <asm/auxio.h> | 19 | #include <asm/auxio.h> |
20 | #include <asm/processor.h> | ||
20 | 21 | ||
21 | /* Debug | 22 | /* Debug |
22 | * | 23 | * |
@@ -63,7 +64,7 @@ static int pmc_probe(struct platform_device *op) | |||
63 | 64 | ||
64 | #ifndef PMC_NO_IDLE | 65 | #ifndef PMC_NO_IDLE |
65 | /* Assign power management IDLE handler */ | 66 | /* Assign power management IDLE handler */ |
66 | pm_idle = pmc_swift_idle; | 67 | sparc_idle = pmc_swift_idle; |
67 | #endif | 68 | #endif |
68 | 69 | ||
69 | printk(KERN_INFO "%s: power management initialized\n", PMC_DEVNAME); | 70 | printk(KERN_INFO "%s: power management initialized\n", PMC_DEVNAME); |
diff --git a/arch/sparc/kernel/process_32.c b/arch/sparc/kernel/process_32.c index be8e862badaf..62eede13831a 100644 --- a/arch/sparc/kernel/process_32.c +++ b/arch/sparc/kernel/process_32.c | |||
@@ -43,8 +43,7 @@ | |||
43 | * Power management idle function | 43 | * Power management idle function |
44 | * Set in pm platform drivers (apc.c and pmc.c) | 44 | * Set in pm platform drivers (apc.c and pmc.c) |
45 | */ | 45 | */ |
46 | void (*pm_idle)(void); | 46 | void (*sparc_idle)(void); |
47 | EXPORT_SYMBOL(pm_idle); | ||
48 | 47 | ||
49 | /* | 48 | /* |
50 | * Power-off handler instantiation for pm.h compliance | 49 | * Power-off handler instantiation for pm.h compliance |
@@ -75,8 +74,8 @@ void cpu_idle(void) | |||
75 | /* endless idle loop with no priority at all */ | 74 | /* endless idle loop with no priority at all */ |
76 | for (;;) { | 75 | for (;;) { |
77 | while (!need_resched()) { | 76 | while (!need_resched()) { |
78 | if (pm_idle) | 77 | if (sparc_idle) |
79 | (*pm_idle)(); | 78 | (*sparc_idle)(); |
80 | else | 79 | else |
81 | cpu_relax(); | 80 | cpu_relax(); |
82 | } | 81 | } |
diff --git a/arch/sparc/kernel/prom_common.c b/arch/sparc/kernel/prom_common.c index 1303021748c8..9f20566b0773 100644 --- a/arch/sparc/kernel/prom_common.c +++ b/arch/sparc/kernel/prom_common.c | |||
@@ -64,7 +64,7 @@ int of_set_property(struct device_node *dp, const char *name, void *val, int len | |||
64 | err = -ENODEV; | 64 | err = -ENODEV; |
65 | 65 | ||
66 | mutex_lock(&of_set_property_mutex); | 66 | mutex_lock(&of_set_property_mutex); |
67 | write_lock(&devtree_lock); | 67 | raw_spin_lock(&devtree_lock); |
68 | prevp = &dp->properties; | 68 | prevp = &dp->properties; |
69 | while (*prevp) { | 69 | while (*prevp) { |
70 | struct property *prop = *prevp; | 70 | struct property *prop = *prevp; |
@@ -91,7 +91,7 @@ int of_set_property(struct device_node *dp, const char *name, void *val, int len | |||
91 | } | 91 | } |
92 | prevp = &(*prevp)->next; | 92 | prevp = &(*prevp)->next; |
93 | } | 93 | } |
94 | write_unlock(&devtree_lock); | 94 | raw_spin_unlock(&devtree_lock); |
95 | mutex_unlock(&of_set_property_mutex); | 95 | mutex_unlock(&of_set_property_mutex); |
96 | 96 | ||
97 | /* XXX Upate procfs if necessary... */ | 97 | /* XXX Upate procfs if necessary... */ |
diff --git a/arch/sparc/kernel/sbus.c b/arch/sparc/kernel/sbus.c index 1271b3a27d4e..be5bdf93c767 100644 --- a/arch/sparc/kernel/sbus.c +++ b/arch/sparc/kernel/sbus.c | |||
@@ -554,10 +554,8 @@ static void __init sbus_iommu_init(struct platform_device *op) | |||
554 | regs = pr->phys_addr; | 554 | regs = pr->phys_addr; |
555 | 555 | ||
556 | iommu = kzalloc(sizeof(*iommu), GFP_ATOMIC); | 556 | iommu = kzalloc(sizeof(*iommu), GFP_ATOMIC); |
557 | if (!iommu) | ||
558 | goto fatal_memory_error; | ||
559 | strbuf = kzalloc(sizeof(*strbuf), GFP_ATOMIC); | 557 | strbuf = kzalloc(sizeof(*strbuf), GFP_ATOMIC); |
560 | if (!strbuf) | 558 | if (!iommu || !strbuf) |
561 | goto fatal_memory_error; | 559 | goto fatal_memory_error; |
562 | 560 | ||
563 | op->dev.archdata.iommu = iommu; | 561 | op->dev.archdata.iommu = iommu; |
@@ -656,6 +654,8 @@ static void __init sbus_iommu_init(struct platform_device *op) | |||
656 | return; | 654 | return; |
657 | 655 | ||
658 | fatal_memory_error: | 656 | fatal_memory_error: |
657 | kfree(iommu); | ||
658 | kfree(strbuf); | ||
659 | prom_printf("sbus_iommu_init: Fatal memory allocation error.\n"); | 659 | prom_printf("sbus_iommu_init: Fatal memory allocation error.\n"); |
660 | } | 660 | } |
661 | 661 | ||
diff --git a/arch/sparc/kernel/smp_32.c b/arch/sparc/kernel/smp_32.c index 79db45e5134a..9e7e6d718367 100644 --- a/arch/sparc/kernel/smp_32.c +++ b/arch/sparc/kernel/smp_32.c | |||
@@ -20,6 +20,7 @@ | |||
20 | #include <linux/seq_file.h> | 20 | #include <linux/seq_file.h> |
21 | #include <linux/cache.h> | 21 | #include <linux/cache.h> |
22 | #include <linux/delay.h> | 22 | #include <linux/delay.h> |
23 | #include <linux/cpu.h> | ||
23 | 24 | ||
24 | #include <asm/ptrace.h> | 25 | #include <asm/ptrace.h> |
25 | #include <linux/atomic.h> | 26 | #include <linux/atomic.h> |
@@ -32,8 +33,10 @@ | |||
32 | #include <asm/cacheflush.h> | 33 | #include <asm/cacheflush.h> |
33 | #include <asm/tlbflush.h> | 34 | #include <asm/tlbflush.h> |
34 | #include <asm/cpudata.h> | 35 | #include <asm/cpudata.h> |
36 | #include <asm/timer.h> | ||
35 | #include <asm/leon.h> | 37 | #include <asm/leon.h> |
36 | 38 | ||
39 | #include "kernel.h" | ||
37 | #include "irq.h" | 40 | #include "irq.h" |
38 | 41 | ||
39 | volatile unsigned long cpu_callin_map[NR_CPUS] __cpuinitdata = {0,}; | 42 | volatile unsigned long cpu_callin_map[NR_CPUS] __cpuinitdata = {0,}; |
@@ -294,6 +297,89 @@ int __cpuinit __cpu_up(unsigned int cpu, struct task_struct *tidle) | |||
294 | return ret; | 297 | return ret; |
295 | } | 298 | } |
296 | 299 | ||
300 | void __cpuinit arch_cpu_pre_starting(void *arg) | ||
301 | { | ||
302 | local_ops->cache_all(); | ||
303 | local_ops->tlb_all(); | ||
304 | |||
305 | switch(sparc_cpu_model) { | ||
306 | case sun4m: | ||
307 | sun4m_cpu_pre_starting(arg); | ||
308 | break; | ||
309 | case sun4d: | ||
310 | sun4d_cpu_pre_starting(arg); | ||
311 | break; | ||
312 | case sparc_leon: | ||
313 | leon_cpu_pre_starting(arg); | ||
314 | break; | ||
315 | default: | ||
316 | BUG(); | ||
317 | } | ||
318 | } | ||
319 | |||
320 | void __cpuinit arch_cpu_pre_online(void *arg) | ||
321 | { | ||
322 | unsigned int cpuid = hard_smp_processor_id(); | ||
323 | |||
324 | register_percpu_ce(cpuid); | ||
325 | |||
326 | calibrate_delay(); | ||
327 | smp_store_cpu_info(cpuid); | ||
328 | |||
329 | local_ops->cache_all(); | ||
330 | local_ops->tlb_all(); | ||
331 | |||
332 | switch(sparc_cpu_model) { | ||
333 | case sun4m: | ||
334 | sun4m_cpu_pre_online(arg); | ||
335 | break; | ||
336 | case sun4d: | ||
337 | sun4d_cpu_pre_online(arg); | ||
338 | break; | ||
339 | case sparc_leon: | ||
340 | leon_cpu_pre_online(arg); | ||
341 | break; | ||
342 | default: | ||
343 | BUG(); | ||
344 | } | ||
345 | } | ||
346 | |||
347 | void __cpuinit sparc_start_secondary(void *arg) | ||
348 | { | ||
349 | unsigned int cpu; | ||
350 | |||
351 | /* | ||
352 | * SMP booting is extremely fragile in some architectures. So run | ||
353 | * the cpu initialization code first before anything else. | ||
354 | */ | ||
355 | arch_cpu_pre_starting(arg); | ||
356 | |||
357 | preempt_disable(); | ||
358 | cpu = smp_processor_id(); | ||
359 | |||
360 | /* Invoke the CPU_STARTING notifier callbacks */ | ||
361 | notify_cpu_starting(cpu); | ||
362 | |||
363 | arch_cpu_pre_online(arg); | ||
364 | |||
365 | /* Set the CPU in the cpu_online_mask */ | ||
366 | set_cpu_online(cpu, true); | ||
367 | |||
368 | /* Enable local interrupts now */ | ||
369 | local_irq_enable(); | ||
370 | |||
371 | wmb(); | ||
372 | cpu_idle(); | ||
373 | |||
374 | /* We should never reach here! */ | ||
375 | BUG(); | ||
376 | } | ||
377 | |||
378 | void __cpuinit smp_callin(void) | ||
379 | { | ||
380 | sparc_start_secondary(NULL); | ||
381 | } | ||
382 | |||
297 | void smp_bogo(struct seq_file *m) | 383 | void smp_bogo(struct seq_file *m) |
298 | { | 384 | { |
299 | int i; | 385 | int i; |
diff --git a/arch/sparc/kernel/sun4d_smp.c b/arch/sparc/kernel/sun4d_smp.c index ddaea31de586..c9eb82f23d92 100644 --- a/arch/sparc/kernel/sun4d_smp.c +++ b/arch/sparc/kernel/sun4d_smp.c | |||
@@ -50,10 +50,9 @@ static inline void show_leds(int cpuid) | |||
50 | "i" (ASI_M_CTL)); | 50 | "i" (ASI_M_CTL)); |
51 | } | 51 | } |
52 | 52 | ||
53 | void __cpuinit smp4d_callin(void) | 53 | void __cpuinit sun4d_cpu_pre_starting(void *arg) |
54 | { | 54 | { |
55 | int cpuid = hard_smp_processor_id(); | 55 | int cpuid = hard_smp_processor_id(); |
56 | unsigned long flags; | ||
57 | 56 | ||
58 | /* Show we are alive */ | 57 | /* Show we are alive */ |
59 | cpu_leds[cpuid] = 0x6; | 58 | cpu_leds[cpuid] = 0x6; |
@@ -61,26 +60,20 @@ void __cpuinit smp4d_callin(void) | |||
61 | 60 | ||
62 | /* Enable level15 interrupt, disable level14 interrupt for now */ | 61 | /* Enable level15 interrupt, disable level14 interrupt for now */ |
63 | cc_set_imsk((cc_get_imsk() & ~0x8000) | 0x4000); | 62 | cc_set_imsk((cc_get_imsk() & ~0x8000) | 0x4000); |
63 | } | ||
64 | 64 | ||
65 | local_ops->cache_all(); | 65 | void __cpuinit sun4d_cpu_pre_online(void *arg) |
66 | local_ops->tlb_all(); | 66 | { |
67 | unsigned long flags; | ||
68 | int cpuid; | ||
67 | 69 | ||
68 | notify_cpu_starting(cpuid); | 70 | cpuid = hard_smp_processor_id(); |
69 | /* | 71 | |
70 | * Unblock the master CPU _only_ when the scheduler state | 72 | /* Unblock the master CPU _only_ when the scheduler state |
71 | * of all secondary CPUs will be up-to-date, so after | 73 | * of all secondary CPUs will be up-to-date, so after |
72 | * the SMP initialization the master will be just allowed | 74 | * the SMP initialization the master will be just allowed |
73 | * to call the scheduler code. | 75 | * to call the scheduler code. |
74 | */ | 76 | */ |
75 | /* Get our local ticker going. */ | ||
76 | register_percpu_ce(cpuid); | ||
77 | |||
78 | calibrate_delay(); | ||
79 | smp_store_cpu_info(cpuid); | ||
80 | local_ops->cache_all(); | ||
81 | local_ops->tlb_all(); | ||
82 | |||
83 | /* Allow master to continue. */ | ||
84 | sun4d_swap((unsigned long *)&cpu_callin_map[cpuid], 1); | 77 | sun4d_swap((unsigned long *)&cpu_callin_map[cpuid], 1); |
85 | local_ops->cache_all(); | 78 | local_ops->cache_all(); |
86 | local_ops->tlb_all(); | 79 | local_ops->tlb_all(); |
@@ -106,16 +99,12 @@ void __cpuinit smp4d_callin(void) | |||
106 | local_ops->cache_all(); | 99 | local_ops->cache_all(); |
107 | local_ops->tlb_all(); | 100 | local_ops->tlb_all(); |
108 | 101 | ||
109 | local_irq_enable(); /* We don't allow PIL 14 yet */ | ||
110 | |||
111 | while (!cpumask_test_cpu(cpuid, &smp_commenced_mask)) | 102 | while (!cpumask_test_cpu(cpuid, &smp_commenced_mask)) |
112 | barrier(); | 103 | barrier(); |
113 | 104 | ||
114 | spin_lock_irqsave(&sun4d_imsk_lock, flags); | 105 | spin_lock_irqsave(&sun4d_imsk_lock, flags); |
115 | cc_set_imsk(cc_get_imsk() & ~0x4000); /* Allow PIL 14 as well */ | 106 | cc_set_imsk(cc_get_imsk() & ~0x4000); /* Allow PIL 14 as well */ |
116 | spin_unlock_irqrestore(&sun4d_imsk_lock, flags); | 107 | spin_unlock_irqrestore(&sun4d_imsk_lock, flags); |
117 | set_cpu_online(cpuid, true); | ||
118 | |||
119 | } | 108 | } |
120 | 109 | ||
121 | /* | 110 | /* |
diff --git a/arch/sparc/kernel/sun4m_smp.c b/arch/sparc/kernel/sun4m_smp.c index 128af7304288..8a65f158153d 100644 --- a/arch/sparc/kernel/sun4m_smp.c +++ b/arch/sparc/kernel/sun4m_smp.c | |||
@@ -34,30 +34,19 @@ swap_ulong(volatile unsigned long *ptr, unsigned long val) | |||
34 | return val; | 34 | return val; |
35 | } | 35 | } |
36 | 36 | ||
37 | void __cpuinit smp4m_callin(void) | 37 | void __cpuinit sun4m_cpu_pre_starting(void *arg) |
38 | { | 38 | { |
39 | int cpuid = hard_smp_processor_id(); | 39 | } |
40 | |||
41 | local_ops->cache_all(); | ||
42 | local_ops->tlb_all(); | ||
43 | |||
44 | notify_cpu_starting(cpuid); | ||
45 | |||
46 | register_percpu_ce(cpuid); | ||
47 | |||
48 | calibrate_delay(); | ||
49 | smp_store_cpu_info(cpuid); | ||
50 | 40 | ||
51 | local_ops->cache_all(); | 41 | void __cpuinit sun4m_cpu_pre_online(void *arg) |
52 | local_ops->tlb_all(); | 42 | { |
43 | int cpuid = hard_smp_processor_id(); | ||
53 | 44 | ||
54 | /* | 45 | /* Allow master to continue. The master will then give us the |
55 | * Unblock the master CPU _only_ when the scheduler state | 46 | * go-ahead by setting the smp_commenced_mask and will wait without |
56 | * of all secondary CPUs will be up-to-date, so after | 47 | * timeouts until our setup is completed fully (signified by |
57 | * the SMP initialization the master will be just allowed | 48 | * our bit being set in the cpu_online_mask). |
58 | * to call the scheduler code. | ||
59 | */ | 49 | */ |
60 | /* Allow master to continue. */ | ||
61 | swap_ulong(&cpu_callin_map[cpuid], 1); | 50 | swap_ulong(&cpu_callin_map[cpuid], 1); |
62 | 51 | ||
63 | /* XXX: What's up with all the flushes? */ | 52 | /* XXX: What's up with all the flushes? */ |
@@ -75,10 +64,6 @@ void __cpuinit smp4m_callin(void) | |||
75 | 64 | ||
76 | while (!cpumask_test_cpu(cpuid, &smp_commenced_mask)) | 65 | while (!cpumask_test_cpu(cpuid, &smp_commenced_mask)) |
77 | mb(); | 66 | mb(); |
78 | |||
79 | local_irq_enable(); | ||
80 | |||
81 | set_cpu_online(cpuid, true); | ||
82 | } | 67 | } |
83 | 68 | ||
84 | /* | 69 | /* |
diff --git a/arch/sparc/kernel/trampoline_32.S b/arch/sparc/kernel/trampoline_32.S index af27acab4486..6cdb08cdabf0 100644 --- a/arch/sparc/kernel/trampoline_32.S +++ b/arch/sparc/kernel/trampoline_32.S | |||
@@ -79,18 +79,15 @@ cpu3_startup: | |||
79 | nop | 79 | nop |
80 | 80 | ||
81 | /* Start this processor. */ | 81 | /* Start this processor. */ |
82 | call smp4m_callin | 82 | call smp_callin |
83 | nop | 83 | nop |
84 | 84 | ||
85 | b,a smp_do_cpu_idle | 85 | b,a smp_panic |
86 | 86 | ||
87 | .text | 87 | .text |
88 | .align 4 | 88 | .align 4 |
89 | 89 | ||
90 | smp_do_cpu_idle: | 90 | smp_panic: |
91 | call cpu_idle | ||
92 | mov 0, %o0 | ||
93 | |||
94 | call cpu_panic | 91 | call cpu_panic |
95 | nop | 92 | nop |
96 | 93 | ||
@@ -144,10 +141,10 @@ sun4d_cpu_startup: | |||
144 | nop | 141 | nop |
145 | 142 | ||
146 | /* Start this processor. */ | 143 | /* Start this processor. */ |
147 | call smp4d_callin | 144 | call smp_callin |
148 | nop | 145 | nop |
149 | 146 | ||
150 | b,a smp_do_cpu_idle | 147 | b,a smp_panic |
151 | 148 | ||
152 | __CPUINIT | 149 | __CPUINIT |
153 | .align 4 | 150 | .align 4 |
@@ -201,7 +198,7 @@ leon_smp_cpu_startup: | |||
201 | nop | 198 | nop |
202 | 199 | ||
203 | /* Start this processor. */ | 200 | /* Start this processor. */ |
204 | call leon_callin | 201 | call smp_callin |
205 | nop | 202 | nop |
206 | 203 | ||
207 | b,a smp_do_cpu_idle | 204 | b,a smp_panic |
diff --git a/arch/sparc/kernel/tsb.S b/arch/sparc/kernel/tsb.S index d4bdc7a62375..a313e4a9399b 100644 --- a/arch/sparc/kernel/tsb.S +++ b/arch/sparc/kernel/tsb.S | |||
@@ -136,12 +136,43 @@ tsb_miss_page_table_walk_sun4v_fastpath: | |||
136 | nop | 136 | nop |
137 | 137 | ||
138 | /* It is a huge page, use huge page TSB entry address we | 138 | /* It is a huge page, use huge page TSB entry address we |
139 | * calculated above. | 139 | * calculated above. If the huge page TSB has not been |
140 | * allocated, setup a trap stack and call hugetlb_setup() | ||
141 | * to do so, then return from the trap to replay the TLB | ||
142 | * miss. | ||
143 | * | ||
144 | * This is necessary to handle the case of transparent huge | ||
145 | * pages where we don't really have a non-atomic context | ||
146 | * in which to allocate the hugepage TSB hash table. When | ||
147 | * the 'mm' faults in the hugepage for the first time, we | ||
148 | * thus handle it here. This also makes sure that we can | ||
149 | * allocate the TSB hash table on the correct NUMA node. | ||
140 | */ | 150 | */ |
141 | TRAP_LOAD_TRAP_BLOCK(%g7, %g2) | 151 | TRAP_LOAD_TRAP_BLOCK(%g7, %g2) |
142 | ldx [%g7 + TRAP_PER_CPU_TSB_HUGE_TEMP], %g2 | 152 | ldx [%g7 + TRAP_PER_CPU_TSB_HUGE_TEMP], %g1 |
143 | cmp %g2, -1 | 153 | cmp %g1, -1 |
144 | movne %xcc, %g2, %g1 | 154 | bne,pt %xcc, 60f |
155 | nop | ||
156 | |||
157 | 661: rdpr %pstate, %g5 | ||
158 | wrpr %g5, PSTATE_AG | PSTATE_MG, %pstate | ||
159 | .section .sun4v_2insn_patch, "ax" | ||
160 | .word 661b | ||
161 | SET_GL(1) | ||
162 | nop | ||
163 | .previous | ||
164 | |||
165 | rdpr %tl, %g3 | ||
166 | cmp %g3, 1 | ||
167 | bne,pn %xcc, winfix_trampoline | ||
168 | nop | ||
169 | ba,pt %xcc, etrap | ||
170 | rd %pc, %g7 | ||
171 | call hugetlb_setup | ||
172 | add %sp, PTREGS_OFF, %o0 | ||
173 | ba,pt %xcc, rtrap | ||
174 | nop | ||
175 | |||
145 | 60: | 176 | 60: |
146 | #endif | 177 | #endif |
147 | 178 | ||
diff --git a/arch/sparc/mm/fault_64.c b/arch/sparc/mm/fault_64.c index 097aee763af3..5062ff389e83 100644 --- a/arch/sparc/mm/fault_64.c +++ b/arch/sparc/mm/fault_64.c | |||
@@ -472,8 +472,13 @@ good_area: | |||
472 | #if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE) | 472 | #if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE) |
473 | mm_rss = mm->context.huge_pte_count; | 473 | mm_rss = mm->context.huge_pte_count; |
474 | if (unlikely(mm_rss > | 474 | if (unlikely(mm_rss > |
475 | mm->context.tsb_block[MM_TSB_HUGE].tsb_rss_limit)) | 475 | mm->context.tsb_block[MM_TSB_HUGE].tsb_rss_limit)) { |
476 | tsb_grow(mm, MM_TSB_HUGE, mm_rss); | 476 | if (mm->context.tsb_block[MM_TSB_HUGE].tsb) |
477 | tsb_grow(mm, MM_TSB_HUGE, mm_rss); | ||
478 | else | ||
479 | hugetlb_setup(regs); | ||
480 | |||
481 | } | ||
477 | #endif | 482 | #endif |
478 | return; | 483 | return; |
479 | 484 | ||
diff --git a/arch/sparc/mm/gup.c b/arch/sparc/mm/gup.c index 42c55df3aec3..01ee23dd724d 100644 --- a/arch/sparc/mm/gup.c +++ b/arch/sparc/mm/gup.c | |||
@@ -66,6 +66,56 @@ static noinline int gup_pte_range(pmd_t pmd, unsigned long addr, | |||
66 | return 1; | 66 | return 1; |
67 | } | 67 | } |
68 | 68 | ||
69 | static int gup_huge_pmd(pmd_t *pmdp, pmd_t pmd, unsigned long addr, | ||
70 | unsigned long end, int write, struct page **pages, | ||
71 | int *nr) | ||
72 | { | ||
73 | struct page *head, *page, *tail; | ||
74 | u32 mask; | ||
75 | int refs; | ||
76 | |||
77 | mask = PMD_HUGE_PRESENT; | ||
78 | if (write) | ||
79 | mask |= PMD_HUGE_WRITE; | ||
80 | if ((pmd_val(pmd) & mask) != mask) | ||
81 | return 0; | ||
82 | |||
83 | refs = 0; | ||
84 | head = pmd_page(pmd); | ||
85 | page = head + ((addr & ~PMD_MASK) >> PAGE_SHIFT); | ||
86 | tail = page; | ||
87 | do { | ||
88 | VM_BUG_ON(compound_head(page) != head); | ||
89 | pages[*nr] = page; | ||
90 | (*nr)++; | ||
91 | page++; | ||
92 | refs++; | ||
93 | } while (addr += PAGE_SIZE, addr != end); | ||
94 | |||
95 | if (!page_cache_add_speculative(head, refs)) { | ||
96 | *nr -= refs; | ||
97 | return 0; | ||
98 | } | ||
99 | |||
100 | if (unlikely(pmd_val(pmd) != pmd_val(*pmdp))) { | ||
101 | *nr -= refs; | ||
102 | while (refs--) | ||
103 | put_page(head); | ||
104 | return 0; | ||
105 | } | ||
106 | |||
107 | /* Any tail page need their mapcount reference taken before we | ||
108 | * return. | ||
109 | */ | ||
110 | while (refs--) { | ||
111 | if (PageTail(tail)) | ||
112 | get_huge_page_tail(tail); | ||
113 | tail++; | ||
114 | } | ||
115 | |||
116 | return 1; | ||
117 | } | ||
118 | |||
69 | static int gup_pmd_range(pud_t pud, unsigned long addr, unsigned long end, | 119 | static int gup_pmd_range(pud_t pud, unsigned long addr, unsigned long end, |
70 | int write, struct page **pages, int *nr) | 120 | int write, struct page **pages, int *nr) |
71 | { | 121 | { |
@@ -77,9 +127,14 @@ static int gup_pmd_range(pud_t pud, unsigned long addr, unsigned long end, | |||
77 | pmd_t pmd = *pmdp; | 127 | pmd_t pmd = *pmdp; |
78 | 128 | ||
79 | next = pmd_addr_end(addr, end); | 129 | next = pmd_addr_end(addr, end); |
80 | if (pmd_none(pmd)) | 130 | if (pmd_none(pmd) || pmd_trans_splitting(pmd)) |
81 | return 0; | 131 | return 0; |
82 | if (!gup_pte_range(pmd, addr, next, write, pages, nr)) | 132 | if (unlikely(pmd_large(pmd))) { |
133 | if (!gup_huge_pmd(pmdp, pmd, addr, next, | ||
134 | write, pages, nr)) | ||
135 | return 0; | ||
136 | } else if (!gup_pte_range(pmd, addr, next, write, | ||
137 | pages, nr)) | ||
83 | return 0; | 138 | return 0; |
84 | } while (pmdp++, addr = next, addr != end); | 139 | } while (pmdp++, addr = next, addr != end); |
85 | 140 | ||
diff --git a/arch/sparc/mm/init_64.c b/arch/sparc/mm/init_64.c index c3b72423c846..82bbf048a5b0 100644 --- a/arch/sparc/mm/init_64.c +++ b/arch/sparc/mm/init_64.c | |||
@@ -314,16 +314,31 @@ static void __update_mmu_tsb_insert(struct mm_struct *mm, unsigned long tsb_inde | |||
314 | struct tsb *tsb = mm->context.tsb_block[tsb_index].tsb; | 314 | struct tsb *tsb = mm->context.tsb_block[tsb_index].tsb; |
315 | unsigned long tag; | 315 | unsigned long tag; |
316 | 316 | ||
317 | if (unlikely(!tsb)) | ||
318 | return; | ||
319 | |||
317 | tsb += ((address >> tsb_hash_shift) & | 320 | tsb += ((address >> tsb_hash_shift) & |
318 | (mm->context.tsb_block[tsb_index].tsb_nentries - 1UL)); | 321 | (mm->context.tsb_block[tsb_index].tsb_nentries - 1UL)); |
319 | tag = (address >> 22UL); | 322 | tag = (address >> 22UL); |
320 | tsb_insert(tsb, tag, tte); | 323 | tsb_insert(tsb, tag, tte); |
321 | } | 324 | } |
322 | 325 | ||
326 | #if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE) | ||
327 | static inline bool is_hugetlb_pte(pte_t pte) | ||
328 | { | ||
329 | if ((tlb_type == hypervisor && | ||
330 | (pte_val(pte) & _PAGE_SZALL_4V) == _PAGE_SZHUGE_4V) || | ||
331 | (tlb_type != hypervisor && | ||
332 | (pte_val(pte) & _PAGE_SZALL_4U) == _PAGE_SZHUGE_4U)) | ||
333 | return true; | ||
334 | return false; | ||
335 | } | ||
336 | #endif | ||
337 | |||
323 | void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t *ptep) | 338 | void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t *ptep) |
324 | { | 339 | { |
325 | unsigned long tsb_index, tsb_hash_shift, flags; | ||
326 | struct mm_struct *mm; | 340 | struct mm_struct *mm; |
341 | unsigned long flags; | ||
327 | pte_t pte = *ptep; | 342 | pte_t pte = *ptep; |
328 | 343 | ||
329 | if (tlb_type != hypervisor) { | 344 | if (tlb_type != hypervisor) { |
@@ -335,25 +350,16 @@ void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t * | |||
335 | 350 | ||
336 | mm = vma->vm_mm; | 351 | mm = vma->vm_mm; |
337 | 352 | ||
338 | tsb_index = MM_TSB_BASE; | ||
339 | tsb_hash_shift = PAGE_SHIFT; | ||
340 | |||
341 | spin_lock_irqsave(&mm->context.lock, flags); | 353 | spin_lock_irqsave(&mm->context.lock, flags); |
342 | 354 | ||
343 | #if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE) | 355 | #if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE) |
344 | if (mm->context.tsb_block[MM_TSB_HUGE].tsb != NULL) { | 356 | if (mm->context.huge_pte_count && is_hugetlb_pte(pte)) |
345 | if ((tlb_type == hypervisor && | 357 | __update_mmu_tsb_insert(mm, MM_TSB_HUGE, HPAGE_SHIFT, |
346 | (pte_val(pte) & _PAGE_SZALL_4V) == _PAGE_SZHUGE_4V) || | 358 | address, pte_val(pte)); |
347 | (tlb_type != hypervisor && | 359 | else |
348 | (pte_val(pte) & _PAGE_SZALL_4U) == _PAGE_SZHUGE_4U)) { | ||
349 | tsb_index = MM_TSB_HUGE; | ||
350 | tsb_hash_shift = HPAGE_SHIFT; | ||
351 | } | ||
352 | } | ||
353 | #endif | 360 | #endif |
354 | 361 | __update_mmu_tsb_insert(mm, MM_TSB_BASE, PAGE_SHIFT, | |
355 | __update_mmu_tsb_insert(mm, tsb_index, tsb_hash_shift, | 362 | address, pte_val(pte)); |
356 | address, pte_val(pte)); | ||
357 | 363 | ||
358 | spin_unlock_irqrestore(&mm->context.lock, flags); | 364 | spin_unlock_irqrestore(&mm->context.lock, flags); |
359 | } | 365 | } |
@@ -2712,14 +2718,28 @@ static void context_reload(void *__data) | |||
2712 | load_secondary_context(mm); | 2718 | load_secondary_context(mm); |
2713 | } | 2719 | } |
2714 | 2720 | ||
2715 | void hugetlb_setup(struct mm_struct *mm) | 2721 | void hugetlb_setup(struct pt_regs *regs) |
2716 | { | 2722 | { |
2717 | struct tsb_config *tp = &mm->context.tsb_block[MM_TSB_HUGE]; | 2723 | struct mm_struct *mm = current->mm; |
2724 | struct tsb_config *tp; | ||
2718 | 2725 | ||
2719 | if (likely(tp->tsb != NULL)) | 2726 | if (in_atomic() || !mm) { |
2720 | return; | 2727 | const struct exception_table_entry *entry; |
2728 | |||
2729 | entry = search_exception_tables(regs->tpc); | ||
2730 | if (entry) { | ||
2731 | regs->tpc = entry->fixup; | ||
2732 | regs->tnpc = regs->tpc + 4; | ||
2733 | return; | ||
2734 | } | ||
2735 | pr_alert("Unexpected HugeTLB setup in atomic context.\n"); | ||
2736 | die_if_kernel("HugeTSB in atomic", regs); | ||
2737 | } | ||
2738 | |||
2739 | tp = &mm->context.tsb_block[MM_TSB_HUGE]; | ||
2740 | if (likely(tp->tsb == NULL)) | ||
2741 | tsb_grow(mm, MM_TSB_HUGE, 0); | ||
2721 | 2742 | ||
2722 | tsb_grow(mm, MM_TSB_HUGE, 0); | ||
2723 | tsb_context_switch(mm); | 2743 | tsb_context_switch(mm); |
2724 | smp_tsb_sync(mm); | 2744 | smp_tsb_sync(mm); |
2725 | 2745 | ||
diff --git a/arch/sparc/mm/tlb.c b/arch/sparc/mm/tlb.c index 3e8fec391fe0..ba6ae7ffdc2c 100644 --- a/arch/sparc/mm/tlb.c +++ b/arch/sparc/mm/tlb.c | |||
@@ -135,8 +135,15 @@ void set_pmd_at(struct mm_struct *mm, unsigned long addr, | |||
135 | mm->context.huge_pte_count++; | 135 | mm->context.huge_pte_count++; |
136 | else | 136 | else |
137 | mm->context.huge_pte_count--; | 137 | mm->context.huge_pte_count--; |
138 | if (mm->context.huge_pte_count == 1) | 138 | |
139 | hugetlb_setup(mm); | 139 | /* Do not try to allocate the TSB hash table if we |
140 | * don't have one already. We have various locks held | ||
141 | * and thus we'll end up doing a GFP_KERNEL allocation | ||
142 | * in an atomic context. | ||
143 | * | ||
144 | * Instead, we let the first TLB miss on a hugepage | ||
145 | * take care of this. | ||
146 | */ | ||
140 | } | 147 | } |
141 | 148 | ||
142 | if (!pmd_none(orig)) { | 149 | if (!pmd_none(orig)) { |
diff --git a/arch/sparc/mm/tsb.c b/arch/sparc/mm/tsb.c index 7f6474347491..428982b9becf 100644 --- a/arch/sparc/mm/tsb.c +++ b/arch/sparc/mm/tsb.c | |||
@@ -314,7 +314,7 @@ void tsb_grow(struct mm_struct *mm, unsigned long tsb_index, unsigned long rss) | |||
314 | retry_tsb_alloc: | 314 | retry_tsb_alloc: |
315 | gfp_flags = GFP_KERNEL; | 315 | gfp_flags = GFP_KERNEL; |
316 | if (new_size > (PAGE_SIZE * 2)) | 316 | if (new_size > (PAGE_SIZE * 2)) |
317 | gfp_flags = __GFP_NOWARN | __GFP_NORETRY; | 317 | gfp_flags |= __GFP_NOWARN | __GFP_NORETRY; |
318 | 318 | ||
319 | new_tsb = kmem_cache_alloc_node(tsb_caches[new_cache_index], | 319 | new_tsb = kmem_cache_alloc_node(tsb_caches[new_cache_index], |
320 | gfp_flags, numa_node_id()); | 320 | gfp_flags, numa_node_id()); |
diff --git a/arch/unicore32/kernel/process.c b/arch/unicore32/kernel/process.c index 62bad9fed03e..872d7e22d847 100644 --- a/arch/unicore32/kernel/process.c +++ b/arch/unicore32/kernel/process.c | |||
@@ -45,11 +45,6 @@ static const char * const processor_modes[] = { | |||
45 | "UK18", "UK19", "UK1A", "EXTN", "UK1C", "UK1D", "UK1E", "SUSR" | 45 | "UK18", "UK19", "UK1A", "EXTN", "UK1C", "UK1D", "UK1E", "SUSR" |
46 | }; | 46 | }; |
47 | 47 | ||
48 | /* | ||
49 | * The idle thread, has rather strange semantics for calling pm_idle, | ||
50 | * but this is what x86 does and we need to do the same, so that | ||
51 | * things like cpuidle get called in the same way. | ||
52 | */ | ||
53 | void cpu_idle(void) | 48 | void cpu_idle(void) |
54 | { | 49 | { |
55 | /* endless idle loop with no priority at all */ | 50 | /* endless idle loop with no priority at all */ |
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index 225543bf45a5..f7a27fdb5098 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig | |||
@@ -1,7 +1,7 @@ | |||
1 | # Select 32 or 64 bit | 1 | # Select 32 or 64 bit |
2 | config 64BIT | 2 | config 64BIT |
3 | bool "64-bit kernel" if ARCH = "x86" | 3 | bool "64-bit kernel" if ARCH = "x86" |
4 | default ARCH = "x86_64" | 4 | default ARCH != "i386" |
5 | ---help--- | 5 | ---help--- |
6 | Say yes to build a 64-bit kernel - formerly known as x86_64 | 6 | Say yes to build a 64-bit kernel - formerly known as x86_64 |
7 | Say no to build a 32-bit kernel - formerly known as i386 | 7 | Say no to build a 32-bit kernel - formerly known as i386 |
@@ -28,7 +28,6 @@ config X86 | |||
28 | select HAVE_OPROFILE | 28 | select HAVE_OPROFILE |
29 | select HAVE_PCSPKR_PLATFORM | 29 | select HAVE_PCSPKR_PLATFORM |
30 | select HAVE_PERF_EVENTS | 30 | select HAVE_PERF_EVENTS |
31 | select HAVE_IRQ_WORK | ||
32 | select HAVE_IOREMAP_PROT | 31 | select HAVE_IOREMAP_PROT |
33 | select HAVE_KPROBES | 32 | select HAVE_KPROBES |
34 | select HAVE_MEMBLOCK | 33 | select HAVE_MEMBLOCK |
@@ -40,10 +39,12 @@ config X86 | |||
40 | select HAVE_DMA_CONTIGUOUS if !SWIOTLB | 39 | select HAVE_DMA_CONTIGUOUS if !SWIOTLB |
41 | select HAVE_KRETPROBES | 40 | select HAVE_KRETPROBES |
42 | select HAVE_OPTPROBES | 41 | select HAVE_OPTPROBES |
42 | select HAVE_KPROBES_ON_FTRACE | ||
43 | select HAVE_FTRACE_MCOUNT_RECORD | 43 | select HAVE_FTRACE_MCOUNT_RECORD |
44 | select HAVE_FENTRY if X86_64 | 44 | select HAVE_FENTRY if X86_64 |
45 | select HAVE_C_RECORDMCOUNT | 45 | select HAVE_C_RECORDMCOUNT |
46 | select HAVE_DYNAMIC_FTRACE | 46 | select HAVE_DYNAMIC_FTRACE |
47 | select HAVE_DYNAMIC_FTRACE_WITH_REGS | ||
47 | select HAVE_FUNCTION_TRACER | 48 | select HAVE_FUNCTION_TRACER |
48 | select HAVE_FUNCTION_GRAPH_TRACER | 49 | select HAVE_FUNCTION_GRAPH_TRACER |
49 | select HAVE_FUNCTION_GRAPH_FP_TEST | 50 | select HAVE_FUNCTION_GRAPH_FP_TEST |
@@ -106,6 +107,7 @@ config X86 | |||
106 | select GENERIC_CLOCKEVENTS_BROADCAST if X86_64 || (X86_32 && X86_LOCAL_APIC) | 107 | select GENERIC_CLOCKEVENTS_BROADCAST if X86_64 || (X86_32 && X86_LOCAL_APIC) |
107 | select GENERIC_TIME_VSYSCALL if X86_64 | 108 | select GENERIC_TIME_VSYSCALL if X86_64 |
108 | select KTIME_SCALAR if X86_32 | 109 | select KTIME_SCALAR if X86_32 |
110 | select ALWAYS_USE_PERSISTENT_CLOCK | ||
109 | select GENERIC_STRNCPY_FROM_USER | 111 | select GENERIC_STRNCPY_FROM_USER |
110 | select GENERIC_STRNLEN_USER | 112 | select GENERIC_STRNLEN_USER |
111 | select HAVE_CONTEXT_TRACKING if X86_64 | 113 | select HAVE_CONTEXT_TRACKING if X86_64 |
@@ -114,6 +116,7 @@ config X86 | |||
114 | select MODULES_USE_ELF_RELA if X86_64 | 116 | select MODULES_USE_ELF_RELA if X86_64 |
115 | select CLONE_BACKWARDS if X86_32 | 117 | select CLONE_BACKWARDS if X86_32 |
116 | select GENERIC_SIGALTSTACK | 118 | select GENERIC_SIGALTSTACK |
119 | select ARCH_USE_BUILTIN_BSWAP | ||
117 | 120 | ||
118 | config INSTRUCTION_DECODER | 121 | config INSTRUCTION_DECODER |
119 | def_bool y | 122 | def_bool y |
@@ -320,6 +323,10 @@ config X86_BIGSMP | |||
320 | ---help--- | 323 | ---help--- |
321 | This option is needed for the systems that have more than 8 CPUs | 324 | This option is needed for the systems that have more than 8 CPUs |
322 | 325 | ||
326 | config GOLDFISH | ||
327 | def_bool y | ||
328 | depends on X86_GOLDFISH | ||
329 | |||
323 | if X86_32 | 330 | if X86_32 |
324 | config X86_EXTENDED_PLATFORM | 331 | config X86_EXTENDED_PLATFORM |
325 | bool "Support for extended (non-PC) x86 platforms" | 332 | bool "Support for extended (non-PC) x86 platforms" |
@@ -402,6 +409,14 @@ config X86_UV | |||
402 | # Following is an alphabetically sorted list of 32 bit extended platforms | 409 | # Following is an alphabetically sorted list of 32 bit extended platforms |
403 | # Please maintain the alphabetic order if and when there are additions | 410 | # Please maintain the alphabetic order if and when there are additions |
404 | 411 | ||
412 | config X86_GOLDFISH | ||
413 | bool "Goldfish (Virtual Platform)" | ||
414 | depends on X86_32 | ||
415 | ---help--- | ||
416 | Enable support for the Goldfish virtual platform used primarily | ||
417 | for Android development. Unless you are building for the Android | ||
418 | Goldfish emulator say N here. | ||
419 | |||
405 | config X86_INTEL_CE | 420 | config X86_INTEL_CE |
406 | bool "CE4100 TV platform" | 421 | bool "CE4100 TV platform" |
407 | depends on PCI | 422 | depends on PCI |
@@ -454,6 +469,16 @@ config X86_MDFLD | |||
454 | 469 | ||
455 | endif | 470 | endif |
456 | 471 | ||
472 | config X86_INTEL_LPSS | ||
473 | bool "Intel Low Power Subsystem Support" | ||
474 | depends on ACPI | ||
475 | select COMMON_CLK | ||
476 | ---help--- | ||
477 | Select to build support for Intel Low Power Subsystem such as | ||
478 | found on Intel Lynxpoint PCH. Selecting this option enables | ||
479 | things like clock tree (common clock framework) which are needed | ||
480 | by the LPSS peripheral drivers. | ||
481 | |||
457 | config X86_RDC321X | 482 | config X86_RDC321X |
458 | bool "RDC R-321x SoC" | 483 | bool "RDC R-321x SoC" |
459 | depends on X86_32 | 484 | depends on X86_32 |
@@ -1912,6 +1937,7 @@ config APM_DO_ENABLE | |||
1912 | this feature. | 1937 | this feature. |
1913 | 1938 | ||
1914 | config APM_CPU_IDLE | 1939 | config APM_CPU_IDLE |
1940 | depends on CPU_IDLE | ||
1915 | bool "Make CPU Idle calls when idle" | 1941 | bool "Make CPU Idle calls when idle" |
1916 | ---help--- | 1942 | ---help--- |
1917 | Enable calls to APM CPU Idle/CPU Busy inside the kernel's idle loop. | 1943 | Enable calls to APM CPU Idle/CPU Busy inside the kernel's idle loop. |
@@ -2188,6 +2214,15 @@ config GEOS | |||
2188 | ---help--- | 2214 | ---help--- |
2189 | This option enables system support for the Traverse Technologies GEOS. | 2215 | This option enables system support for the Traverse Technologies GEOS. |
2190 | 2216 | ||
2217 | config TS5500 | ||
2218 | bool "Technologic Systems TS-5500 platform support" | ||
2219 | depends on MELAN | ||
2220 | select CHECK_SIGNATURE | ||
2221 | select NEW_LEDS | ||
2222 | select LEDS_CLASS | ||
2223 | ---help--- | ||
2224 | This option enables system support for the Technologic Systems TS-5500. | ||
2225 | |||
2191 | endif # X86_32 | 2226 | endif # X86_32 |
2192 | 2227 | ||
2193 | config AMD_NB | 2228 | config AMD_NB |
diff --git a/arch/x86/Makefile b/arch/x86/Makefile index e71fc4279aab..5c477260294f 100644 --- a/arch/x86/Makefile +++ b/arch/x86/Makefile | |||
@@ -2,7 +2,11 @@ | |||
2 | 2 | ||
3 | # select defconfig based on actual architecture | 3 | # select defconfig based on actual architecture |
4 | ifeq ($(ARCH),x86) | 4 | ifeq ($(ARCH),x86) |
5 | ifeq ($(shell uname -m),x86_64) | ||
6 | KBUILD_DEFCONFIG := x86_64_defconfig | ||
7 | else | ||
5 | KBUILD_DEFCONFIG := i386_defconfig | 8 | KBUILD_DEFCONFIG := i386_defconfig |
9 | endif | ||
6 | else | 10 | else |
7 | KBUILD_DEFCONFIG := $(ARCH)_defconfig | 11 | KBUILD_DEFCONFIG := $(ARCH)_defconfig |
8 | endif | 12 | endif |
diff --git a/arch/x86/boot/compressed/misc.c b/arch/x86/boot/compressed/misc.c index 88f7ff6da404..7cb56c6ca351 100644 --- a/arch/x86/boot/compressed/misc.c +++ b/arch/x86/boot/compressed/misc.c | |||
@@ -325,6 +325,8 @@ asmlinkage void decompress_kernel(void *rmode, memptr heap, | |||
325 | { | 325 | { |
326 | real_mode = rmode; | 326 | real_mode = rmode; |
327 | 327 | ||
328 | sanitize_boot_params(real_mode); | ||
329 | |||
328 | if (real_mode->screen_info.orig_video_mode == 7) { | 330 | if (real_mode->screen_info.orig_video_mode == 7) { |
329 | vidmem = (char *) 0xb0000; | 331 | vidmem = (char *) 0xb0000; |
330 | vidport = 0x3b4; | 332 | vidport = 0x3b4; |
diff --git a/arch/x86/boot/compressed/misc.h b/arch/x86/boot/compressed/misc.h index 0e6dc0ee0eea..674019d8e235 100644 --- a/arch/x86/boot/compressed/misc.h +++ b/arch/x86/boot/compressed/misc.h | |||
@@ -18,6 +18,7 @@ | |||
18 | #include <asm/page.h> | 18 | #include <asm/page.h> |
19 | #include <asm/boot.h> | 19 | #include <asm/boot.h> |
20 | #include <asm/bootparam.h> | 20 | #include <asm/bootparam.h> |
21 | #include <asm/bootparam_utils.h> | ||
21 | 22 | ||
22 | #define BOOT_BOOT_H | 23 | #define BOOT_BOOT_H |
23 | #include "../ctype.h" | 24 | #include "../ctype.h" |
diff --git a/arch/x86/configs/i386_defconfig b/arch/x86/configs/i386_defconfig index 5598547281a7..94447086e551 100644 --- a/arch/x86/configs/i386_defconfig +++ b/arch/x86/configs/i386_defconfig | |||
@@ -1,3 +1,4 @@ | |||
1 | # CONFIG_64BIT is not set | ||
1 | CONFIG_EXPERIMENTAL=y | 2 | CONFIG_EXPERIMENTAL=y |
2 | # CONFIG_LOCALVERSION_AUTO is not set | 3 | # CONFIG_LOCALVERSION_AUTO is not set |
3 | CONFIG_SYSVIPC=y | 4 | CONFIG_SYSVIPC=y |
diff --git a/arch/x86/include/asm/acpi.h b/arch/x86/include/asm/acpi.h index 0c44630d1789..b31bf97775fc 100644 --- a/arch/x86/include/asm/acpi.h +++ b/arch/x86/include/asm/acpi.h | |||
@@ -49,10 +49,6 @@ | |||
49 | 49 | ||
50 | /* Asm macros */ | 50 | /* Asm macros */ |
51 | 51 | ||
52 | #define ACPI_ASM_MACROS | ||
53 | #define BREAKPOINT3 | ||
54 | #define ACPI_DISABLE_IRQS() local_irq_disable() | ||
55 | #define ACPI_ENABLE_IRQS() local_irq_enable() | ||
56 | #define ACPI_FLUSH_CPU_CACHE() wbinvd() | 52 | #define ACPI_FLUSH_CPU_CACHE() wbinvd() |
57 | 53 | ||
58 | int __acpi_acquire_global_lock(unsigned int *lock); | 54 | int __acpi_acquire_global_lock(unsigned int *lock); |
diff --git a/arch/x86/include/asm/amd_nb.h b/arch/x86/include/asm/amd_nb.h index b3341e9cd8fd..a54ee1d054d9 100644 --- a/arch/x86/include/asm/amd_nb.h +++ b/arch/x86/include/asm/amd_nb.h | |||
@@ -81,6 +81,23 @@ static inline struct amd_northbridge *node_to_amd_nb(int node) | |||
81 | return (node < amd_northbridges.num) ? &amd_northbridges.nb[node] : NULL; | 81 | return (node < amd_northbridges.num) ? &amd_northbridges.nb[node] : NULL; |
82 | } | 82 | } |
83 | 83 | ||
84 | static inline u16 amd_get_node_id(struct pci_dev *pdev) | ||
85 | { | ||
86 | struct pci_dev *misc; | ||
87 | int i; | ||
88 | |||
89 | for (i = 0; i != amd_nb_num(); i++) { | ||
90 | misc = node_to_amd_nb(i)->misc; | ||
91 | |||
92 | if (pci_domain_nr(misc->bus) == pci_domain_nr(pdev->bus) && | ||
93 | PCI_SLOT(misc->devfn) == PCI_SLOT(pdev->devfn)) | ||
94 | return i; | ||
95 | } | ||
96 | |||
97 | WARN(1, "Unable to find AMD Northbridge id for %s\n", pci_name(pdev)); | ||
98 | return 0; | ||
99 | } | ||
100 | |||
84 | #else | 101 | #else |
85 | 102 | ||
86 | #define amd_nb_num(x) 0 | 103 | #define amd_nb_num(x) 0 |
diff --git a/arch/x86/include/asm/bootparam_utils.h b/arch/x86/include/asm/bootparam_utils.h new file mode 100644 index 000000000000..5b5e9cb774b5 --- /dev/null +++ b/arch/x86/include/asm/bootparam_utils.h | |||
@@ -0,0 +1,38 @@ | |||
1 | #ifndef _ASM_X86_BOOTPARAM_UTILS_H | ||
2 | #define _ASM_X86_BOOTPARAM_UTILS_H | ||
3 | |||
4 | #include <asm/bootparam.h> | ||
5 | |||
6 | /* | ||
7 | * This file is included from multiple environments. Do not | ||
8 | * add completing #includes to make it standalone. | ||
9 | */ | ||
10 | |||
11 | /* | ||
12 | * Deal with bootloaders which fail to initialize unknown fields in | ||
13 | * boot_params to zero. The list fields in this list are taken from | ||
14 | * analysis of kexec-tools; if other broken bootloaders initialize a | ||
15 | * different set of fields we will need to figure out how to disambiguate. | ||
16 | * | ||
17 | */ | ||
18 | static void sanitize_boot_params(struct boot_params *boot_params) | ||
19 | { | ||
20 | if (boot_params->sentinel) { | ||
21 | /*fields in boot_params are not valid, clear them */ | ||
22 | memset(&boot_params->olpc_ofw_header, 0, | ||
23 | (char *)&boot_params->alt_mem_k - | ||
24 | (char *)&boot_params->olpc_ofw_header); | ||
25 | memset(&boot_params->kbd_status, 0, | ||
26 | (char *)&boot_params->hdr - | ||
27 | (char *)&boot_params->kbd_status); | ||
28 | memset(&boot_params->_pad7[0], 0, | ||
29 | (char *)&boot_params->edd_mbr_sig_buffer[0] - | ||
30 | (char *)&boot_params->_pad7[0]); | ||
31 | memset(&boot_params->_pad8[0], 0, | ||
32 | (char *)&boot_params->eddbuf[0] - | ||
33 | (char *)&boot_params->_pad8[0]); | ||
34 | memset(&boot_params->_pad9[0], 0, sizeof(boot_params->_pad9)); | ||
35 | } | ||
36 | } | ||
37 | |||
38 | #endif /* _ASM_X86_BOOTPARAM_UTILS_H */ | ||
diff --git a/arch/x86/include/asm/cpufeature.h b/arch/x86/include/asm/cpufeature.h index 2d9075e863a0..93fe929d1cee 100644 --- a/arch/x86/include/asm/cpufeature.h +++ b/arch/x86/include/asm/cpufeature.h | |||
@@ -167,6 +167,7 @@ | |||
167 | #define X86_FEATURE_TBM (6*32+21) /* trailing bit manipulations */ | 167 | #define X86_FEATURE_TBM (6*32+21) /* trailing bit manipulations */ |
168 | #define X86_FEATURE_TOPOEXT (6*32+22) /* topology extensions CPUID leafs */ | 168 | #define X86_FEATURE_TOPOEXT (6*32+22) /* topology extensions CPUID leafs */ |
169 | #define X86_FEATURE_PERFCTR_CORE (6*32+23) /* core performance counter extensions */ | 169 | #define X86_FEATURE_PERFCTR_CORE (6*32+23) /* core performance counter extensions */ |
170 | #define X86_FEATURE_PERFCTR_NB (6*32+24) /* NB performance counter extensions */ | ||
170 | 171 | ||
171 | /* | 172 | /* |
172 | * Auxiliary flags: Linux defined - For features scattered in various | 173 | * Auxiliary flags: Linux defined - For features scattered in various |
@@ -309,6 +310,7 @@ extern const char * const x86_power_flags[32]; | |||
309 | #define cpu_has_hypervisor boot_cpu_has(X86_FEATURE_HYPERVISOR) | 310 | #define cpu_has_hypervisor boot_cpu_has(X86_FEATURE_HYPERVISOR) |
310 | #define cpu_has_pclmulqdq boot_cpu_has(X86_FEATURE_PCLMULQDQ) | 311 | #define cpu_has_pclmulqdq boot_cpu_has(X86_FEATURE_PCLMULQDQ) |
311 | #define cpu_has_perfctr_core boot_cpu_has(X86_FEATURE_PERFCTR_CORE) | 312 | #define cpu_has_perfctr_core boot_cpu_has(X86_FEATURE_PERFCTR_CORE) |
313 | #define cpu_has_perfctr_nb boot_cpu_has(X86_FEATURE_PERFCTR_NB) | ||
312 | #define cpu_has_cx8 boot_cpu_has(X86_FEATURE_CX8) | 314 | #define cpu_has_cx8 boot_cpu_has(X86_FEATURE_CX8) |
313 | #define cpu_has_cx16 boot_cpu_has(X86_FEATURE_CX16) | 315 | #define cpu_has_cx16 boot_cpu_has(X86_FEATURE_CX16) |
314 | #define cpu_has_eager_fpu boot_cpu_has(X86_FEATURE_EAGER_FPU) | 316 | #define cpu_has_eager_fpu boot_cpu_has(X86_FEATURE_EAGER_FPU) |
diff --git a/arch/x86/include/asm/ftrace.h b/arch/x86/include/asm/ftrace.h index 9a25b522d377..86cb51e1ca96 100644 --- a/arch/x86/include/asm/ftrace.h +++ b/arch/x86/include/asm/ftrace.h | |||
@@ -44,7 +44,6 @@ | |||
44 | 44 | ||
45 | #ifdef CONFIG_DYNAMIC_FTRACE | 45 | #ifdef CONFIG_DYNAMIC_FTRACE |
46 | #define ARCH_SUPPORTS_FTRACE_OPS 1 | 46 | #define ARCH_SUPPORTS_FTRACE_OPS 1 |
47 | #define ARCH_SUPPORTS_FTRACE_SAVE_REGS | ||
48 | #endif | 47 | #endif |
49 | 48 | ||
50 | #ifndef __ASSEMBLY__ | 49 | #ifndef __ASSEMBLY__ |
diff --git a/arch/x86/include/asm/hpet.h b/arch/x86/include/asm/hpet.h index 434e2106cc87..b18df579c0e9 100644 --- a/arch/x86/include/asm/hpet.h +++ b/arch/x86/include/asm/hpet.h | |||
@@ -80,9 +80,9 @@ extern void hpet_msi_write(struct hpet_dev *hdev, struct msi_msg *msg); | |||
80 | extern void hpet_msi_read(struct hpet_dev *hdev, struct msi_msg *msg); | 80 | extern void hpet_msi_read(struct hpet_dev *hdev, struct msi_msg *msg); |
81 | 81 | ||
82 | #ifdef CONFIG_PCI_MSI | 82 | #ifdef CONFIG_PCI_MSI |
83 | extern int arch_setup_hpet_msi(unsigned int irq, unsigned int id); | 83 | extern int default_setup_hpet_msi(unsigned int irq, unsigned int id); |
84 | #else | 84 | #else |
85 | static inline int arch_setup_hpet_msi(unsigned int irq, unsigned int id) | 85 | static inline int default_setup_hpet_msi(unsigned int irq, unsigned int id) |
86 | { | 86 | { |
87 | return -EINVAL; | 87 | return -EINVAL; |
88 | } | 88 | } |
@@ -111,6 +111,7 @@ extern void hpet_unregister_irq_handler(rtc_irq_handler handler); | |||
111 | static inline int hpet_enable(void) { return 0; } | 111 | static inline int hpet_enable(void) { return 0; } |
112 | static inline int is_hpet_enabled(void) { return 0; } | 112 | static inline int is_hpet_enabled(void) { return 0; } |
113 | #define hpet_readl(a) 0 | 113 | #define hpet_readl(a) 0 |
114 | #define default_setup_hpet_msi NULL | ||
114 | 115 | ||
115 | #endif | 116 | #endif |
116 | #endif /* _ASM_X86_HPET_H */ | 117 | #endif /* _ASM_X86_HPET_H */ |
diff --git a/arch/x86/include/asm/hw_irq.h b/arch/x86/include/asm/hw_irq.h index eb92a6ed2be7..10a78c3d3d5a 100644 --- a/arch/x86/include/asm/hw_irq.h +++ b/arch/x86/include/asm/hw_irq.h | |||
@@ -101,6 +101,7 @@ static inline void set_io_apic_irq_attr(struct io_apic_irq_attr *irq_attr, | |||
101 | irq_attr->polarity = polarity; | 101 | irq_attr->polarity = polarity; |
102 | } | 102 | } |
103 | 103 | ||
104 | /* Intel specific interrupt remapping information */ | ||
104 | struct irq_2_iommu { | 105 | struct irq_2_iommu { |
105 | struct intel_iommu *iommu; | 106 | struct intel_iommu *iommu; |
106 | u16 irte_index; | 107 | u16 irte_index; |
@@ -108,6 +109,12 @@ struct irq_2_iommu { | |||
108 | u8 irte_mask; | 109 | u8 irte_mask; |
109 | }; | 110 | }; |
110 | 111 | ||
112 | /* AMD specific interrupt remapping information */ | ||
113 | struct irq_2_irte { | ||
114 | u16 devid; /* Device ID for IRTE table */ | ||
115 | u16 index; /* Index into IRTE table*/ | ||
116 | }; | ||
117 | |||
111 | /* | 118 | /* |
112 | * This is performance-critical, we want to do it O(1) | 119 | * This is performance-critical, we want to do it O(1) |
113 | * | 120 | * |
@@ -120,7 +127,11 @@ struct irq_cfg { | |||
120 | u8 vector; | 127 | u8 vector; |
121 | u8 move_in_progress : 1; | 128 | u8 move_in_progress : 1; |
122 | #ifdef CONFIG_IRQ_REMAP | 129 | #ifdef CONFIG_IRQ_REMAP |
123 | struct irq_2_iommu irq_2_iommu; | 130 | u8 remapped : 1; |
131 | union { | ||
132 | struct irq_2_iommu irq_2_iommu; | ||
133 | struct irq_2_irte irq_2_irte; | ||
134 | }; | ||
124 | #endif | 135 | #endif |
125 | }; | 136 | }; |
126 | 137 | ||
diff --git a/arch/x86/include/asm/hypervisor.h b/arch/x86/include/asm/hypervisor.h index b518c7509933..86095ed14135 100644 --- a/arch/x86/include/asm/hypervisor.h +++ b/arch/x86/include/asm/hypervisor.h | |||
@@ -25,6 +25,7 @@ | |||
25 | 25 | ||
26 | extern void init_hypervisor(struct cpuinfo_x86 *c); | 26 | extern void init_hypervisor(struct cpuinfo_x86 *c); |
27 | extern void init_hypervisor_platform(void); | 27 | extern void init_hypervisor_platform(void); |
28 | extern bool hypervisor_x2apic_available(void); | ||
28 | 29 | ||
29 | /* | 30 | /* |
30 | * x86 hypervisor information | 31 | * x86 hypervisor information |
@@ -41,6 +42,9 @@ struct hypervisor_x86 { | |||
41 | 42 | ||
42 | /* Platform setup (run once per boot) */ | 43 | /* Platform setup (run once per boot) */ |
43 | void (*init_platform)(void); | 44 | void (*init_platform)(void); |
45 | |||
46 | /* X2APIC detection (run once per boot) */ | ||
47 | bool (*x2apic_available)(void); | ||
44 | }; | 48 | }; |
45 | 49 | ||
46 | extern const struct hypervisor_x86 *x86_hyper; | 50 | extern const struct hypervisor_x86 *x86_hyper; |
@@ -51,13 +55,4 @@ extern const struct hypervisor_x86 x86_hyper_ms_hyperv; | |||
51 | extern const struct hypervisor_x86 x86_hyper_xen_hvm; | 55 | extern const struct hypervisor_x86 x86_hyper_xen_hvm; |
52 | extern const struct hypervisor_x86 x86_hyper_kvm; | 56 | extern const struct hypervisor_x86 x86_hyper_kvm; |
53 | 57 | ||
54 | static inline bool hypervisor_x2apic_available(void) | ||
55 | { | ||
56 | if (kvm_para_available()) | ||
57 | return true; | ||
58 | if (xen_x2apic_para_available()) | ||
59 | return true; | ||
60 | return false; | ||
61 | } | ||
62 | |||
63 | #endif | 58 | #endif |
diff --git a/arch/x86/include/asm/io_apic.h b/arch/x86/include/asm/io_apic.h index 73d8c5398ea9..459e50a424d1 100644 --- a/arch/x86/include/asm/io_apic.h +++ b/arch/x86/include/asm/io_apic.h | |||
@@ -144,11 +144,24 @@ extern int timer_through_8259; | |||
144 | (mp_irq_entries && !skip_ioapic_setup && io_apic_irqs) | 144 | (mp_irq_entries && !skip_ioapic_setup && io_apic_irqs) |
145 | 145 | ||
146 | struct io_apic_irq_attr; | 146 | struct io_apic_irq_attr; |
147 | struct irq_cfg; | ||
147 | extern int io_apic_set_pci_routing(struct device *dev, int irq, | 148 | extern int io_apic_set_pci_routing(struct device *dev, int irq, |
148 | struct io_apic_irq_attr *irq_attr); | 149 | struct io_apic_irq_attr *irq_attr); |
149 | void setup_IO_APIC_irq_extra(u32 gsi); | 150 | void setup_IO_APIC_irq_extra(u32 gsi); |
150 | extern void ioapic_insert_resources(void); | 151 | extern void ioapic_insert_resources(void); |
151 | 152 | ||
153 | extern int native_setup_ioapic_entry(int, struct IO_APIC_route_entry *, | ||
154 | unsigned int, int, | ||
155 | struct io_apic_irq_attr *); | ||
156 | extern int native_setup_ioapic_entry(int, struct IO_APIC_route_entry *, | ||
157 | unsigned int, int, | ||
158 | struct io_apic_irq_attr *); | ||
159 | extern void eoi_ioapic_irq(unsigned int irq, struct irq_cfg *cfg); | ||
160 | |||
161 | extern void native_compose_msi_msg(struct pci_dev *pdev, | ||
162 | unsigned int irq, unsigned int dest, | ||
163 | struct msi_msg *msg, u8 hpet_id); | ||
164 | extern void native_eoi_ioapic_pin(int apic, int pin, int vector); | ||
152 | int io_apic_setup_irq_pin_once(unsigned int irq, int node, struct io_apic_irq_attr *attr); | 165 | int io_apic_setup_irq_pin_once(unsigned int irq, int node, struct io_apic_irq_attr *attr); |
153 | 166 | ||
154 | extern int save_ioapic_entries(void); | 167 | extern int save_ioapic_entries(void); |
@@ -179,6 +192,12 @@ extern void __init native_io_apic_init_mappings(void); | |||
179 | extern unsigned int native_io_apic_read(unsigned int apic, unsigned int reg); | 192 | extern unsigned int native_io_apic_read(unsigned int apic, unsigned int reg); |
180 | extern void native_io_apic_write(unsigned int apic, unsigned int reg, unsigned int val); | 193 | extern void native_io_apic_write(unsigned int apic, unsigned int reg, unsigned int val); |
181 | extern void native_io_apic_modify(unsigned int apic, unsigned int reg, unsigned int val); | 194 | extern void native_io_apic_modify(unsigned int apic, unsigned int reg, unsigned int val); |
195 | extern void native_disable_io_apic(void); | ||
196 | extern void native_io_apic_print_entries(unsigned int apic, unsigned int nr_entries); | ||
197 | extern void intel_ir_io_apic_print_entries(unsigned int apic, unsigned int nr_entries); | ||
198 | extern int native_ioapic_set_affinity(struct irq_data *, | ||
199 | const struct cpumask *, | ||
200 | bool); | ||
182 | 201 | ||
183 | static inline unsigned int io_apic_read(unsigned int apic, unsigned int reg) | 202 | static inline unsigned int io_apic_read(unsigned int apic, unsigned int reg) |
184 | { | 203 | { |
@@ -193,6 +212,9 @@ static inline void io_apic_modify(unsigned int apic, unsigned int reg, unsigned | |||
193 | { | 212 | { |
194 | x86_io_apic_ops.modify(apic, reg, value); | 213 | x86_io_apic_ops.modify(apic, reg, value); |
195 | } | 214 | } |
215 | |||
216 | extern void io_apic_eoi(unsigned int apic, unsigned int vector); | ||
217 | |||
196 | #else /* !CONFIG_X86_IO_APIC */ | 218 | #else /* !CONFIG_X86_IO_APIC */ |
197 | 219 | ||
198 | #define io_apic_assign_pci_irqs 0 | 220 | #define io_apic_assign_pci_irqs 0 |
@@ -223,6 +245,12 @@ static inline void disable_ioapic_support(void) { } | |||
223 | #define native_io_apic_read NULL | 245 | #define native_io_apic_read NULL |
224 | #define native_io_apic_write NULL | 246 | #define native_io_apic_write NULL |
225 | #define native_io_apic_modify NULL | 247 | #define native_io_apic_modify NULL |
248 | #define native_disable_io_apic NULL | ||
249 | #define native_io_apic_print_entries NULL | ||
250 | #define native_ioapic_set_affinity NULL | ||
251 | #define native_setup_ioapic_entry NULL | ||
252 | #define native_compose_msi_msg NULL | ||
253 | #define native_eoi_ioapic_pin NULL | ||
226 | #endif | 254 | #endif |
227 | 255 | ||
228 | #endif /* _ASM_X86_IO_APIC_H */ | 256 | #endif /* _ASM_X86_IO_APIC_H */ |
diff --git a/arch/x86/include/asm/irq_remapping.h b/arch/x86/include/asm/irq_remapping.h index 5fb9bbbd2f14..95fd3527f632 100644 --- a/arch/x86/include/asm/irq_remapping.h +++ b/arch/x86/include/asm/irq_remapping.h | |||
@@ -26,8 +26,6 @@ | |||
26 | 26 | ||
27 | #ifdef CONFIG_IRQ_REMAP | 27 | #ifdef CONFIG_IRQ_REMAP |
28 | 28 | ||
29 | extern int irq_remapping_enabled; | ||
30 | |||
31 | extern void setup_irq_remapping_ops(void); | 29 | extern void setup_irq_remapping_ops(void); |
32 | extern int irq_remapping_supported(void); | 30 | extern int irq_remapping_supported(void); |
33 | extern int irq_remapping_prepare(void); | 31 | extern int irq_remapping_prepare(void); |
@@ -40,21 +38,19 @@ extern int setup_ioapic_remapped_entry(int irq, | |||
40 | unsigned int destination, | 38 | unsigned int destination, |
41 | int vector, | 39 | int vector, |
42 | struct io_apic_irq_attr *attr); | 40 | struct io_apic_irq_attr *attr); |
43 | extern int set_remapped_irq_affinity(struct irq_data *data, | ||
44 | const struct cpumask *mask, | ||
45 | bool force); | ||
46 | extern void free_remapped_irq(int irq); | 41 | extern void free_remapped_irq(int irq); |
47 | extern void compose_remapped_msi_msg(struct pci_dev *pdev, | 42 | extern void compose_remapped_msi_msg(struct pci_dev *pdev, |
48 | unsigned int irq, unsigned int dest, | 43 | unsigned int irq, unsigned int dest, |
49 | struct msi_msg *msg, u8 hpet_id); | 44 | struct msi_msg *msg, u8 hpet_id); |
50 | extern int msi_alloc_remapped_irq(struct pci_dev *pdev, int irq, int nvec); | ||
51 | extern int msi_setup_remapped_irq(struct pci_dev *pdev, unsigned int irq, | ||
52 | int index, int sub_handle); | ||
53 | extern int setup_hpet_msi_remapped(unsigned int irq, unsigned int id); | 45 | extern int setup_hpet_msi_remapped(unsigned int irq, unsigned int id); |
46 | extern void panic_if_irq_remap(const char *msg); | ||
47 | extern bool setup_remapped_irq(int irq, | ||
48 | struct irq_cfg *cfg, | ||
49 | struct irq_chip *chip); | ||
54 | 50 | ||
55 | #else /* CONFIG_IRQ_REMAP */ | 51 | void irq_remap_modify_chip_defaults(struct irq_chip *chip); |
56 | 52 | ||
57 | #define irq_remapping_enabled 0 | 53 | #else /* CONFIG_IRQ_REMAP */ |
58 | 54 | ||
59 | static inline void setup_irq_remapping_ops(void) { } | 55 | static inline void setup_irq_remapping_ops(void) { } |
60 | static inline int irq_remapping_supported(void) { return 0; } | 56 | static inline int irq_remapping_supported(void) { return 0; } |
@@ -71,30 +67,30 @@ static inline int setup_ioapic_remapped_entry(int irq, | |||
71 | { | 67 | { |
72 | return -ENODEV; | 68 | return -ENODEV; |
73 | } | 69 | } |
74 | static inline int set_remapped_irq_affinity(struct irq_data *data, | ||
75 | const struct cpumask *mask, | ||
76 | bool force) | ||
77 | { | ||
78 | return 0; | ||
79 | } | ||
80 | static inline void free_remapped_irq(int irq) { } | 70 | static inline void free_remapped_irq(int irq) { } |
81 | static inline void compose_remapped_msi_msg(struct pci_dev *pdev, | 71 | static inline void compose_remapped_msi_msg(struct pci_dev *pdev, |
82 | unsigned int irq, unsigned int dest, | 72 | unsigned int irq, unsigned int dest, |
83 | struct msi_msg *msg, u8 hpet_id) | 73 | struct msi_msg *msg, u8 hpet_id) |
84 | { | 74 | { |
85 | } | 75 | } |
86 | static inline int msi_alloc_remapped_irq(struct pci_dev *pdev, int irq, int nvec) | 76 | static inline int setup_hpet_msi_remapped(unsigned int irq, unsigned int id) |
87 | { | 77 | { |
88 | return -ENODEV; | 78 | return -ENODEV; |
89 | } | 79 | } |
90 | static inline int msi_setup_remapped_irq(struct pci_dev *pdev, unsigned int irq, | 80 | |
91 | int index, int sub_handle) | 81 | static inline void panic_if_irq_remap(const char *msg) |
82 | { | ||
83 | } | ||
84 | |||
85 | static inline void irq_remap_modify_chip_defaults(struct irq_chip *chip) | ||
92 | { | 86 | { |
93 | return -ENODEV; | ||
94 | } | 87 | } |
95 | static inline int setup_hpet_msi_remapped(unsigned int irq, unsigned int id) | 88 | |
89 | static inline bool setup_remapped_irq(int irq, | ||
90 | struct irq_cfg *cfg, | ||
91 | struct irq_chip *chip) | ||
96 | { | 92 | { |
97 | return -ENODEV; | 93 | return false; |
98 | } | 94 | } |
99 | #endif /* CONFIG_IRQ_REMAP */ | 95 | #endif /* CONFIG_IRQ_REMAP */ |
100 | 96 | ||
diff --git a/arch/x86/include/asm/irq_vectors.h b/arch/x86/include/asm/irq_vectors.h index 1508e518c7e3..aac5fa62a86c 100644 --- a/arch/x86/include/asm/irq_vectors.h +++ b/arch/x86/include/asm/irq_vectors.h | |||
@@ -109,8 +109,8 @@ | |||
109 | 109 | ||
110 | #define UV_BAU_MESSAGE 0xf5 | 110 | #define UV_BAU_MESSAGE 0xf5 |
111 | 111 | ||
112 | /* Xen vector callback to receive events in a HVM domain */ | 112 | /* Vector on which hypervisor callbacks will be delivered */ |
113 | #define XEN_HVM_EVTCHN_CALLBACK 0xf3 | 113 | #define HYPERVISOR_CALLBACK_VECTOR 0xf3 |
114 | 114 | ||
115 | /* | 115 | /* |
116 | * Local APIC timer IRQ vector is on a different priority level, | 116 | * Local APIC timer IRQ vector is on a different priority level, |
diff --git a/arch/x86/include/asm/kvm_para.h b/arch/x86/include/asm/kvm_para.h index 5ed1f16187be..65231e173baf 100644 --- a/arch/x86/include/asm/kvm_para.h +++ b/arch/x86/include/asm/kvm_para.h | |||
@@ -85,13 +85,13 @@ static inline long kvm_hypercall4(unsigned int nr, unsigned long p1, | |||
85 | return ret; | 85 | return ret; |
86 | } | 86 | } |
87 | 87 | ||
88 | static inline int kvm_para_available(void) | 88 | static inline bool kvm_para_available(void) |
89 | { | 89 | { |
90 | unsigned int eax, ebx, ecx, edx; | 90 | unsigned int eax, ebx, ecx, edx; |
91 | char signature[13]; | 91 | char signature[13]; |
92 | 92 | ||
93 | if (boot_cpu_data.cpuid_level < 0) | 93 | if (boot_cpu_data.cpuid_level < 0) |
94 | return 0; /* So we don't blow up on old processors */ | 94 | return false; /* So we don't blow up on old processors */ |
95 | 95 | ||
96 | if (cpu_has_hypervisor) { | 96 | if (cpu_has_hypervisor) { |
97 | cpuid(KVM_CPUID_SIGNATURE, &eax, &ebx, &ecx, &edx); | 97 | cpuid(KVM_CPUID_SIGNATURE, &eax, &ebx, &ecx, &edx); |
@@ -101,10 +101,10 @@ static inline int kvm_para_available(void) | |||
101 | signature[12] = 0; | 101 | signature[12] = 0; |
102 | 102 | ||
103 | if (strcmp(signature, "KVMKVMKVM") == 0) | 103 | if (strcmp(signature, "KVMKVMKVM") == 0) |
104 | return 1; | 104 | return true; |
105 | } | 105 | } |
106 | 106 | ||
107 | return 0; | 107 | return false; |
108 | } | 108 | } |
109 | 109 | ||
110 | static inline unsigned int kvm_arch_para_features(void) | 110 | static inline unsigned int kvm_arch_para_features(void) |
diff --git a/arch/x86/include/asm/linkage.h b/arch/x86/include/asm/linkage.h index 48142971b25d..79327e9483a3 100644 --- a/arch/x86/include/asm/linkage.h +++ b/arch/x86/include/asm/linkage.h | |||
@@ -27,20 +27,20 @@ | |||
27 | #define __asmlinkage_protect0(ret) \ | 27 | #define __asmlinkage_protect0(ret) \ |
28 | __asmlinkage_protect_n(ret) | 28 | __asmlinkage_protect_n(ret) |
29 | #define __asmlinkage_protect1(ret, arg1) \ | 29 | #define __asmlinkage_protect1(ret, arg1) \ |
30 | __asmlinkage_protect_n(ret, "g" (arg1)) | 30 | __asmlinkage_protect_n(ret, "m" (arg1)) |
31 | #define __asmlinkage_protect2(ret, arg1, arg2) \ | 31 | #define __asmlinkage_protect2(ret, arg1, arg2) \ |
32 | __asmlinkage_protect_n(ret, "g" (arg1), "g" (arg2)) | 32 | __asmlinkage_protect_n(ret, "m" (arg1), "m" (arg2)) |
33 | #define __asmlinkage_protect3(ret, arg1, arg2, arg3) \ | 33 | #define __asmlinkage_protect3(ret, arg1, arg2, arg3) \ |
34 | __asmlinkage_protect_n(ret, "g" (arg1), "g" (arg2), "g" (arg3)) | 34 | __asmlinkage_protect_n(ret, "m" (arg1), "m" (arg2), "m" (arg3)) |
35 | #define __asmlinkage_protect4(ret, arg1, arg2, arg3, arg4) \ | 35 | #define __asmlinkage_protect4(ret, arg1, arg2, arg3, arg4) \ |
36 | __asmlinkage_protect_n(ret, "g" (arg1), "g" (arg2), "g" (arg3), \ | 36 | __asmlinkage_protect_n(ret, "m" (arg1), "m" (arg2), "m" (arg3), \ |
37 | "g" (arg4)) | 37 | "m" (arg4)) |
38 | #define __asmlinkage_protect5(ret, arg1, arg2, arg3, arg4, arg5) \ | 38 | #define __asmlinkage_protect5(ret, arg1, arg2, arg3, arg4, arg5) \ |
39 | __asmlinkage_protect_n(ret, "g" (arg1), "g" (arg2), "g" (arg3), \ | 39 | __asmlinkage_protect_n(ret, "m" (arg1), "m" (arg2), "m" (arg3), \ |
40 | "g" (arg4), "g" (arg5)) | 40 | "m" (arg4), "m" (arg5)) |
41 | #define __asmlinkage_protect6(ret, arg1, arg2, arg3, arg4, arg5, arg6) \ | 41 | #define __asmlinkage_protect6(ret, arg1, arg2, arg3, arg4, arg5, arg6) \ |
42 | __asmlinkage_protect_n(ret, "g" (arg1), "g" (arg2), "g" (arg3), \ | 42 | __asmlinkage_protect_n(ret, "m" (arg1), "m" (arg2), "m" (arg3), \ |
43 | "g" (arg4), "g" (arg5), "g" (arg6)) | 43 | "m" (arg4), "m" (arg5), "m" (arg6)) |
44 | 44 | ||
45 | #endif /* CONFIG_X86_32 */ | 45 | #endif /* CONFIG_X86_32 */ |
46 | 46 | ||
diff --git a/arch/x86/include/asm/mshyperv.h b/arch/x86/include/asm/mshyperv.h index 79ce5685ab64..c2934be2446a 100644 --- a/arch/x86/include/asm/mshyperv.h +++ b/arch/x86/include/asm/mshyperv.h | |||
@@ -11,4 +11,8 @@ struct ms_hyperv_info { | |||
11 | 11 | ||
12 | extern struct ms_hyperv_info ms_hyperv; | 12 | extern struct ms_hyperv_info ms_hyperv; |
13 | 13 | ||
14 | void hyperv_callback_vector(void); | ||
15 | void hyperv_vector_handler(struct pt_regs *regs); | ||
16 | void hv_register_vmbus_handler(int irq, irq_handler_t handler); | ||
17 | |||
14 | #endif | 18 | #endif |
diff --git a/arch/x86/include/asm/mwait.h b/arch/x86/include/asm/mwait.h index bcdff997668c..2f366d0ac6b4 100644 --- a/arch/x86/include/asm/mwait.h +++ b/arch/x86/include/asm/mwait.h | |||
@@ -4,7 +4,8 @@ | |||
4 | #define MWAIT_SUBSTATE_MASK 0xf | 4 | #define MWAIT_SUBSTATE_MASK 0xf |
5 | #define MWAIT_CSTATE_MASK 0xf | 5 | #define MWAIT_CSTATE_MASK 0xf |
6 | #define MWAIT_SUBSTATE_SIZE 4 | 6 | #define MWAIT_SUBSTATE_SIZE 4 |
7 | #define MWAIT_MAX_NUM_CSTATES 8 | 7 | #define MWAIT_HINT2CSTATE(hint) (((hint) >> MWAIT_SUBSTATE_SIZE) & MWAIT_CSTATE_MASK) |
8 | #define MWAIT_HINT2SUBSTATE(hint) ((hint) & MWAIT_CSTATE_MASK) | ||
8 | 9 | ||
9 | #define CPUID_MWAIT_LEAF 5 | 10 | #define CPUID_MWAIT_LEAF 5 |
10 | #define CPUID5_ECX_EXTENSIONS_SUPPORTED 0x1 | 11 | #define CPUID5_ECX_EXTENSIONS_SUPPORTED 0x1 |
diff --git a/arch/x86/include/asm/pci.h b/arch/x86/include/asm/pci.h index dba7805176bf..c28fd02f4bf7 100644 --- a/arch/x86/include/asm/pci.h +++ b/arch/x86/include/asm/pci.h | |||
@@ -121,9 +121,12 @@ static inline void x86_restore_msi_irqs(struct pci_dev *dev, int irq) | |||
121 | #define arch_teardown_msi_irq x86_teardown_msi_irq | 121 | #define arch_teardown_msi_irq x86_teardown_msi_irq |
122 | #define arch_restore_msi_irqs x86_restore_msi_irqs | 122 | #define arch_restore_msi_irqs x86_restore_msi_irqs |
123 | /* implemented in arch/x86/kernel/apic/io_apic. */ | 123 | /* implemented in arch/x86/kernel/apic/io_apic. */ |
124 | struct msi_desc; | ||
124 | int native_setup_msi_irqs(struct pci_dev *dev, int nvec, int type); | 125 | int native_setup_msi_irqs(struct pci_dev *dev, int nvec, int type); |
125 | void native_teardown_msi_irq(unsigned int irq); | 126 | void native_teardown_msi_irq(unsigned int irq); |
126 | void native_restore_msi_irqs(struct pci_dev *dev, int irq); | 127 | void native_restore_msi_irqs(struct pci_dev *dev, int irq); |
128 | int setup_msi_irq(struct pci_dev *dev, struct msi_desc *msidesc, | ||
129 | unsigned int irq_base, unsigned int irq_offset); | ||
127 | /* default to the implementation in drivers/lib/msi.c */ | 130 | /* default to the implementation in drivers/lib/msi.c */ |
128 | #define HAVE_DEFAULT_MSI_TEARDOWN_IRQS | 131 | #define HAVE_DEFAULT_MSI_TEARDOWN_IRQS |
129 | #define HAVE_DEFAULT_MSI_RESTORE_IRQS | 132 | #define HAVE_DEFAULT_MSI_RESTORE_IRQS |
diff --git a/arch/x86/include/asm/perf_event.h b/arch/x86/include/asm/perf_event.h index 4fabcdf1cfa7..57cb63402213 100644 --- a/arch/x86/include/asm/perf_event.h +++ b/arch/x86/include/asm/perf_event.h | |||
@@ -29,8 +29,13 @@ | |||
29 | #define ARCH_PERFMON_EVENTSEL_INV (1ULL << 23) | 29 | #define ARCH_PERFMON_EVENTSEL_INV (1ULL << 23) |
30 | #define ARCH_PERFMON_EVENTSEL_CMASK 0xFF000000ULL | 30 | #define ARCH_PERFMON_EVENTSEL_CMASK 0xFF000000ULL |
31 | 31 | ||
32 | #define AMD_PERFMON_EVENTSEL_GUESTONLY (1ULL << 40) | 32 | #define AMD64_EVENTSEL_INT_CORE_ENABLE (1ULL << 36) |
33 | #define AMD_PERFMON_EVENTSEL_HOSTONLY (1ULL << 41) | 33 | #define AMD64_EVENTSEL_GUESTONLY (1ULL << 40) |
34 | #define AMD64_EVENTSEL_HOSTONLY (1ULL << 41) | ||
35 | |||
36 | #define AMD64_EVENTSEL_INT_CORE_SEL_SHIFT 37 | ||
37 | #define AMD64_EVENTSEL_INT_CORE_SEL_MASK \ | ||
38 | (0xFULL << AMD64_EVENTSEL_INT_CORE_SEL_SHIFT) | ||
34 | 39 | ||
35 | #define AMD64_EVENTSEL_EVENT \ | 40 | #define AMD64_EVENTSEL_EVENT \ |
36 | (ARCH_PERFMON_EVENTSEL_EVENT | (0x0FULL << 32)) | 41 | (ARCH_PERFMON_EVENTSEL_EVENT | (0x0FULL << 32)) |
@@ -46,8 +51,12 @@ | |||
46 | #define AMD64_RAW_EVENT_MASK \ | 51 | #define AMD64_RAW_EVENT_MASK \ |
47 | (X86_RAW_EVENT_MASK | \ | 52 | (X86_RAW_EVENT_MASK | \ |
48 | AMD64_EVENTSEL_EVENT) | 53 | AMD64_EVENTSEL_EVENT) |
54 | #define AMD64_RAW_EVENT_MASK_NB \ | ||
55 | (AMD64_EVENTSEL_EVENT | \ | ||
56 | ARCH_PERFMON_EVENTSEL_UMASK) | ||
49 | #define AMD64_NUM_COUNTERS 4 | 57 | #define AMD64_NUM_COUNTERS 4 |
50 | #define AMD64_NUM_COUNTERS_CORE 6 | 58 | #define AMD64_NUM_COUNTERS_CORE 6 |
59 | #define AMD64_NUM_COUNTERS_NB 4 | ||
51 | 60 | ||
52 | #define ARCH_PERFMON_UNHALTED_CORE_CYCLES_SEL 0x3c | 61 | #define ARCH_PERFMON_UNHALTED_CORE_CYCLES_SEL 0x3c |
53 | #define ARCH_PERFMON_UNHALTED_CORE_CYCLES_UMASK (0x00 << 8) | 62 | #define ARCH_PERFMON_UNHALTED_CORE_CYCLES_UMASK (0x00 << 8) |
diff --git a/arch/x86/include/asm/pgtable.h b/arch/x86/include/asm/pgtable.h index 1c1a955e67c0..fc304279b559 100644 --- a/arch/x86/include/asm/pgtable.h +++ b/arch/x86/include/asm/pgtable.h | |||
@@ -786,6 +786,18 @@ static inline void clone_pgd_range(pgd_t *dst, pgd_t *src, int count) | |||
786 | memcpy(dst, src, count * sizeof(pgd_t)); | 786 | memcpy(dst, src, count * sizeof(pgd_t)); |
787 | } | 787 | } |
788 | 788 | ||
789 | /* | ||
790 | * The x86 doesn't have any external MMU info: the kernel page | ||
791 | * tables contain all the necessary information. | ||
792 | */ | ||
793 | static inline void update_mmu_cache(struct vm_area_struct *vma, | ||
794 | unsigned long addr, pte_t *ptep) | ||
795 | { | ||
796 | } | ||
797 | static inline void update_mmu_cache_pmd(struct vm_area_struct *vma, | ||
798 | unsigned long addr, pmd_t *pmd) | ||
799 | { | ||
800 | } | ||
789 | 801 | ||
790 | #include <asm-generic/pgtable.h> | 802 | #include <asm-generic/pgtable.h> |
791 | #endif /* __ASSEMBLY__ */ | 803 | #endif /* __ASSEMBLY__ */ |
diff --git a/arch/x86/include/asm/pgtable_32.h b/arch/x86/include/asm/pgtable_32.h index 8faa215a503e..9ee322103c6d 100644 --- a/arch/x86/include/asm/pgtable_32.h +++ b/arch/x86/include/asm/pgtable_32.h | |||
@@ -66,13 +66,6 @@ do { \ | |||
66 | __flush_tlb_one((vaddr)); \ | 66 | __flush_tlb_one((vaddr)); \ |
67 | } while (0) | 67 | } while (0) |
68 | 68 | ||
69 | /* | ||
70 | * The i386 doesn't have any external MMU info: the kernel page | ||
71 | * tables contain all the necessary information. | ||
72 | */ | ||
73 | #define update_mmu_cache(vma, address, ptep) do { } while (0) | ||
74 | #define update_mmu_cache_pmd(vma, address, pmd) do { } while (0) | ||
75 | |||
76 | #endif /* !__ASSEMBLY__ */ | 69 | #endif /* !__ASSEMBLY__ */ |
77 | 70 | ||
78 | /* | 71 | /* |
diff --git a/arch/x86/include/asm/pgtable_64.h b/arch/x86/include/asm/pgtable_64.h index 47356f9df82e..615b0c78449f 100644 --- a/arch/x86/include/asm/pgtable_64.h +++ b/arch/x86/include/asm/pgtable_64.h | |||
@@ -142,9 +142,6 @@ static inline int pgd_large(pgd_t pgd) { return 0; } | |||
142 | #define pte_offset_map(dir, address) pte_offset_kernel((dir), (address)) | 142 | #define pte_offset_map(dir, address) pte_offset_kernel((dir), (address)) |
143 | #define pte_unmap(pte) ((void)(pte))/* NOP */ | 143 | #define pte_unmap(pte) ((void)(pte))/* NOP */ |
144 | 144 | ||
145 | #define update_mmu_cache(vma, address, ptep) do { } while (0) | ||
146 | #define update_mmu_cache_pmd(vma, address, pmd) do { } while (0) | ||
147 | |||
148 | /* Encode and de-code a swap entry */ | 145 | /* Encode and de-code a swap entry */ |
149 | #if _PAGE_BIT_FILE < _PAGE_BIT_PROTNONE | 146 | #if _PAGE_BIT_FILE < _PAGE_BIT_PROTNONE |
150 | #define SWP_TYPE_BITS (_PAGE_BIT_FILE - _PAGE_BIT_PRESENT - 1) | 147 | #define SWP_TYPE_BITS (_PAGE_BIT_FILE - _PAGE_BIT_PRESENT - 1) |
diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h index 888184b2fc85..d172588efae5 100644 --- a/arch/x86/include/asm/processor.h +++ b/arch/x86/include/asm/processor.h | |||
@@ -89,7 +89,6 @@ struct cpuinfo_x86 { | |||
89 | char wp_works_ok; /* It doesn't on 386's */ | 89 | char wp_works_ok; /* It doesn't on 386's */ |
90 | 90 | ||
91 | /* Problems on some 486Dx4's and old 386's: */ | 91 | /* Problems on some 486Dx4's and old 386's: */ |
92 | char hlt_works_ok; | ||
93 | char hard_math; | 92 | char hard_math; |
94 | char rfu; | 93 | char rfu; |
95 | char fdiv_bug; | 94 | char fdiv_bug; |
@@ -165,15 +164,6 @@ DECLARE_PER_CPU_SHARED_ALIGNED(struct cpuinfo_x86, cpu_info); | |||
165 | 164 | ||
166 | extern const struct seq_operations cpuinfo_op; | 165 | extern const struct seq_operations cpuinfo_op; |
167 | 166 | ||
168 | static inline int hlt_works(int cpu) | ||
169 | { | ||
170 | #ifdef CONFIG_X86_32 | ||
171 | return cpu_data(cpu).hlt_works_ok; | ||
172 | #else | ||
173 | return 1; | ||
174 | #endif | ||
175 | } | ||
176 | |||
177 | #define cache_line_size() (boot_cpu_data.x86_cache_alignment) | 167 | #define cache_line_size() (boot_cpu_data.x86_cache_alignment) |
178 | 168 | ||
179 | extern void cpu_detect(struct cpuinfo_x86 *c); | 169 | extern void cpu_detect(struct cpuinfo_x86 *c); |
@@ -725,7 +715,7 @@ extern unsigned long boot_option_idle_override; | |||
725 | extern bool amd_e400_c1e_detected; | 715 | extern bool amd_e400_c1e_detected; |
726 | 716 | ||
727 | enum idle_boot_override {IDLE_NO_OVERRIDE=0, IDLE_HALT, IDLE_NOMWAIT, | 717 | enum idle_boot_override {IDLE_NO_OVERRIDE=0, IDLE_HALT, IDLE_NOMWAIT, |
728 | IDLE_POLL, IDLE_FORCE_MWAIT}; | 718 | IDLE_POLL}; |
729 | 719 | ||
730 | extern void enable_sep_cpu(void); | 720 | extern void enable_sep_cpu(void); |
731 | extern int sysenter_setup(void); | 721 | extern int sysenter_setup(void); |
@@ -943,7 +933,7 @@ extern void start_thread(struct pt_regs *regs, unsigned long new_ip, | |||
943 | extern int get_tsc_mode(unsigned long adr); | 933 | extern int get_tsc_mode(unsigned long adr); |
944 | extern int set_tsc_mode(unsigned int val); | 934 | extern int set_tsc_mode(unsigned int val); |
945 | 935 | ||
946 | extern int amd_get_nb_id(int cpu); | 936 | extern u16 amd_get_nb_id(int cpu); |
947 | 937 | ||
948 | struct aperfmperf { | 938 | struct aperfmperf { |
949 | u64 aperf, mperf; | 939 | u64 aperf, mperf; |
@@ -998,7 +988,11 @@ extern unsigned long arch_align_stack(unsigned long sp); | |||
998 | extern void free_init_pages(char *what, unsigned long begin, unsigned long end); | 988 | extern void free_init_pages(char *what, unsigned long begin, unsigned long end); |
999 | 989 | ||
1000 | void default_idle(void); | 990 | void default_idle(void); |
1001 | bool set_pm_idle_to_default(void); | 991 | #ifdef CONFIG_XEN |
992 | bool xen_set_default_idle(void); | ||
993 | #else | ||
994 | #define xen_set_default_idle 0 | ||
995 | #endif | ||
1002 | 996 | ||
1003 | void stop_this_cpu(void *dummy); | 997 | void stop_this_cpu(void *dummy); |
1004 | 998 | ||
diff --git a/arch/x86/include/asm/required-features.h b/arch/x86/include/asm/required-features.h index 6c7fc25f2c34..5c6e4fb370f5 100644 --- a/arch/x86/include/asm/required-features.h +++ b/arch/x86/include/asm/required-features.h | |||
@@ -47,6 +47,12 @@ | |||
47 | # define NEED_NOPL 0 | 47 | # define NEED_NOPL 0 |
48 | #endif | 48 | #endif |
49 | 49 | ||
50 | #ifdef CONFIG_MATOM | ||
51 | # define NEED_MOVBE (1<<(X86_FEATURE_MOVBE & 31)) | ||
52 | #else | ||
53 | # define NEED_MOVBE 0 | ||
54 | #endif | ||
55 | |||
50 | #ifdef CONFIG_X86_64 | 56 | #ifdef CONFIG_X86_64 |
51 | #ifdef CONFIG_PARAVIRT | 57 | #ifdef CONFIG_PARAVIRT |
52 | /* Paravirtualized systems may not have PSE or PGE available */ | 58 | /* Paravirtualized systems may not have PSE or PGE available */ |
@@ -80,7 +86,7 @@ | |||
80 | 86 | ||
81 | #define REQUIRED_MASK2 0 | 87 | #define REQUIRED_MASK2 0 |
82 | #define REQUIRED_MASK3 (NEED_NOPL) | 88 | #define REQUIRED_MASK3 (NEED_NOPL) |
83 | #define REQUIRED_MASK4 0 | 89 | #define REQUIRED_MASK4 (NEED_MOVBE) |
84 | #define REQUIRED_MASK5 0 | 90 | #define REQUIRED_MASK5 0 |
85 | #define REQUIRED_MASK6 0 | 91 | #define REQUIRED_MASK6 0 |
86 | #define REQUIRED_MASK7 0 | 92 | #define REQUIRED_MASK7 0 |
diff --git a/arch/x86/include/asm/uv/uv_hub.h b/arch/x86/include/asm/uv/uv_hub.h index 21f7385badb8..2c32df95bb78 100644 --- a/arch/x86/include/asm/uv/uv_hub.h +++ b/arch/x86/include/asm/uv/uv_hub.h | |||
@@ -5,7 +5,7 @@ | |||
5 | * | 5 | * |
6 | * SGI UV architectural definitions | 6 | * SGI UV architectural definitions |
7 | * | 7 | * |
8 | * Copyright (C) 2007-2010 Silicon Graphics, Inc. All rights reserved. | 8 | * Copyright (C) 2007-2013 Silicon Graphics, Inc. All rights reserved. |
9 | */ | 9 | */ |
10 | 10 | ||
11 | #ifndef _ASM_X86_UV_UV_HUB_H | 11 | #ifndef _ASM_X86_UV_UV_HUB_H |
@@ -175,6 +175,7 @@ DECLARE_PER_CPU(struct uv_hub_info_s, __uv_hub_info); | |||
175 | */ | 175 | */ |
176 | #define UV1_HUB_REVISION_BASE 1 | 176 | #define UV1_HUB_REVISION_BASE 1 |
177 | #define UV2_HUB_REVISION_BASE 3 | 177 | #define UV2_HUB_REVISION_BASE 3 |
178 | #define UV3_HUB_REVISION_BASE 5 | ||
178 | 179 | ||
179 | static inline int is_uv1_hub(void) | 180 | static inline int is_uv1_hub(void) |
180 | { | 181 | { |
@@ -183,6 +184,23 @@ static inline int is_uv1_hub(void) | |||
183 | 184 | ||
184 | static inline int is_uv2_hub(void) | 185 | static inline int is_uv2_hub(void) |
185 | { | 186 | { |
187 | return ((uv_hub_info->hub_revision >= UV2_HUB_REVISION_BASE) && | ||
188 | (uv_hub_info->hub_revision < UV3_HUB_REVISION_BASE)); | ||
189 | } | ||
190 | |||
191 | static inline int is_uv3_hub(void) | ||
192 | { | ||
193 | return uv_hub_info->hub_revision >= UV3_HUB_REVISION_BASE; | ||
194 | } | ||
195 | |||
196 | static inline int is_uv_hub(void) | ||
197 | { | ||
198 | return uv_hub_info->hub_revision; | ||
199 | } | ||
200 | |||
201 | /* code common to uv2 and uv3 only */ | ||
202 | static inline int is_uvx_hub(void) | ||
203 | { | ||
186 | return uv_hub_info->hub_revision >= UV2_HUB_REVISION_BASE; | 204 | return uv_hub_info->hub_revision >= UV2_HUB_REVISION_BASE; |
187 | } | 205 | } |
188 | 206 | ||
@@ -230,14 +248,23 @@ union uvh_apicid { | |||
230 | #define UV2_LOCAL_MMR_SIZE (32UL * 1024 * 1024) | 248 | #define UV2_LOCAL_MMR_SIZE (32UL * 1024 * 1024) |
231 | #define UV2_GLOBAL_MMR32_SIZE (32UL * 1024 * 1024) | 249 | #define UV2_GLOBAL_MMR32_SIZE (32UL * 1024 * 1024) |
232 | 250 | ||
233 | #define UV_LOCAL_MMR_BASE (is_uv1_hub() ? UV1_LOCAL_MMR_BASE \ | 251 | #define UV3_LOCAL_MMR_BASE 0xfa000000UL |
234 | : UV2_LOCAL_MMR_BASE) | 252 | #define UV3_GLOBAL_MMR32_BASE 0xfc000000UL |
235 | #define UV_GLOBAL_MMR32_BASE (is_uv1_hub() ? UV1_GLOBAL_MMR32_BASE \ | 253 | #define UV3_LOCAL_MMR_SIZE (32UL * 1024 * 1024) |
236 | : UV2_GLOBAL_MMR32_BASE) | 254 | #define UV3_GLOBAL_MMR32_SIZE (32UL * 1024 * 1024) |
237 | #define UV_LOCAL_MMR_SIZE (is_uv1_hub() ? UV1_LOCAL_MMR_SIZE : \ | 255 | |
238 | UV2_LOCAL_MMR_SIZE) | 256 | #define UV_LOCAL_MMR_BASE (is_uv1_hub() ? UV1_LOCAL_MMR_BASE : \ |
257 | (is_uv2_hub() ? UV2_LOCAL_MMR_BASE : \ | ||
258 | UV3_LOCAL_MMR_BASE)) | ||
259 | #define UV_GLOBAL_MMR32_BASE (is_uv1_hub() ? UV1_GLOBAL_MMR32_BASE :\ | ||
260 | (is_uv2_hub() ? UV2_GLOBAL_MMR32_BASE :\ | ||
261 | UV3_GLOBAL_MMR32_BASE)) | ||
262 | #define UV_LOCAL_MMR_SIZE (is_uv1_hub() ? UV1_LOCAL_MMR_SIZE : \ | ||
263 | (is_uv2_hub() ? UV2_LOCAL_MMR_SIZE : \ | ||
264 | UV3_LOCAL_MMR_SIZE)) | ||
239 | #define UV_GLOBAL_MMR32_SIZE (is_uv1_hub() ? UV1_GLOBAL_MMR32_SIZE :\ | 265 | #define UV_GLOBAL_MMR32_SIZE (is_uv1_hub() ? UV1_GLOBAL_MMR32_SIZE :\ |
240 | UV2_GLOBAL_MMR32_SIZE) | 266 | (is_uv2_hub() ? UV2_GLOBAL_MMR32_SIZE :\ |
267 | UV3_GLOBAL_MMR32_SIZE)) | ||
241 | #define UV_GLOBAL_MMR64_BASE (uv_hub_info->global_mmr_base) | 268 | #define UV_GLOBAL_MMR64_BASE (uv_hub_info->global_mmr_base) |
242 | 269 | ||
243 | #define UV_GLOBAL_GRU_MMR_BASE 0x4000000 | 270 | #define UV_GLOBAL_GRU_MMR_BASE 0x4000000 |
@@ -599,6 +626,7 @@ static inline void uv_hub_send_ipi(int pnode, int apicid, int vector) | |||
599 | * 1 - UV1 rev 1.0 initial silicon | 626 | * 1 - UV1 rev 1.0 initial silicon |
600 | * 2 - UV1 rev 2.0 production silicon | 627 | * 2 - UV1 rev 2.0 production silicon |
601 | * 3 - UV2 rev 1.0 initial silicon | 628 | * 3 - UV2 rev 1.0 initial silicon |
629 | * 5 - UV3 rev 1.0 initial silicon | ||
602 | */ | 630 | */ |
603 | static inline int uv_get_min_hub_revision_id(void) | 631 | static inline int uv_get_min_hub_revision_id(void) |
604 | { | 632 | { |
diff --git a/arch/x86/include/asm/uv/uv_mmrs.h b/arch/x86/include/asm/uv/uv_mmrs.h index cf1d73643f60..bd5f80e58a23 100644 --- a/arch/x86/include/asm/uv/uv_mmrs.h +++ b/arch/x86/include/asm/uv/uv_mmrs.h | |||
@@ -5,16 +5,25 @@ | |||
5 | * | 5 | * |
6 | * SGI UV MMR definitions | 6 | * SGI UV MMR definitions |
7 | * | 7 | * |
8 | * Copyright (C) 2007-2011 Silicon Graphics, Inc. All rights reserved. | 8 | * Copyright (C) 2007-2013 Silicon Graphics, Inc. All rights reserved. |
9 | */ | 9 | */ |
10 | 10 | ||
11 | #ifndef _ASM_X86_UV_UV_MMRS_H | 11 | #ifndef _ASM_X86_UV_UV_MMRS_H |
12 | #define _ASM_X86_UV_UV_MMRS_H | 12 | #define _ASM_X86_UV_UV_MMRS_H |
13 | 13 | ||
14 | /* | 14 | /* |
15 | * This file contains MMR definitions for both UV1 & UV2 hubs. | 15 | * This file contains MMR definitions for all UV hubs types. |
16 | * | 16 | * |
17 | * In general, MMR addresses and structures are identical on both hubs. | 17 | * To minimize coding differences between hub types, the symbols are |
18 | * grouped by architecture types. | ||
19 | * | ||
20 | * UVH - definitions common to all UV hub types. | ||
21 | * UVXH - definitions common to all UV eXtended hub types (currently 2 & 3). | ||
22 | * UV1H - definitions specific to UV type 1 hub. | ||
23 | * UV2H - definitions specific to UV type 2 hub. | ||
24 | * UV3H - definitions specific to UV type 3 hub. | ||
25 | * | ||
26 | * So in general, MMR addresses and structures are identical on all hubs types. | ||
18 | * These MMRs are identified as: | 27 | * These MMRs are identified as: |
19 | * #define UVH_xxx <address> | 28 | * #define UVH_xxx <address> |
20 | * union uvh_xxx { | 29 | * union uvh_xxx { |
@@ -23,24 +32,36 @@ | |||
23 | * } s; | 32 | * } s; |
24 | * }; | 33 | * }; |
25 | * | 34 | * |
26 | * If the MMR exists on both hub type but has different addresses or | 35 | * If the MMR exists on all hub types but have different addresses: |
27 | * contents, the MMR definition is similar to: | 36 | * #define UV1Hxxx a |
28 | * #define UV1H_xxx <uv1 address> | 37 | * #define UV2Hxxx b |
29 | * #define UV2H_xxx <uv2address> | 38 | * #define UV3Hxxx c |
30 | * #define UVH_xxx (is_uv1_hub() ? UV1H_xxx : UV2H_xxx) | 39 | * #define UVHxxx (is_uv1_hub() ? UV1Hxxx : |
40 | * (is_uv2_hub() ? UV2Hxxx : | ||
41 | * UV3Hxxx)) | ||
42 | * | ||
43 | * If the MMR exists on all hub types > 1 but have different addresses: | ||
44 | * #define UV2Hxxx b | ||
45 | * #define UV3Hxxx c | ||
46 | * #define UVXHxxx (is_uv2_hub() ? UV2Hxxx : | ||
47 | * UV3Hxxx)) | ||
48 | * | ||
31 | * union uvh_xxx { | 49 | * union uvh_xxx { |
32 | * unsigned long v; | 50 | * unsigned long v; |
33 | * struct uv1h_int_cmpd_s { (Common fields only) | 51 | * struct uvh_xxx_s { # Common fields only |
34 | * } s; | 52 | * } s; |
35 | * struct uv1h_int_cmpd_s { (Full UV1 definition) | 53 | * struct uv1h_xxx_s { # Full UV1 definition (*) |
36 | * } s1; | 54 | * } s1; |
37 | * struct uv2h_int_cmpd_s { (Full UV2 definition) | 55 | * struct uv2h_xxx_s { # Full UV2 definition (*) |
38 | * } s2; | 56 | * } s2; |
57 | * struct uv3h_xxx_s { # Full UV3 definition (*) | ||
58 | * } s3; | ||
39 | * }; | 59 | * }; |
60 | * (* - if present and different than the common struct) | ||
40 | * | 61 | * |
41 | * Only essential difference are enumerated. For example, if the address is | 62 | * Only essential differences are enumerated. For example, if the address is |
42 | * the same for both UV1 & UV2, only a single #define is generated. Likewise, | 63 | * the same for all UV's, only a single #define is generated. Likewise, |
43 | * if the contents is the same for both hubs, only the "s" structure is | 64 | * if the contents is the same for all hubs, only the "s" structure is |
44 | * generated. | 65 | * generated. |
45 | * | 66 | * |
46 | * If the MMR exists on ONLY 1 type of hub, no generic definition is | 67 | * If the MMR exists on ONLY 1 type of hub, no generic definition is |
@@ -51,6 +72,8 @@ | |||
51 | * struct uvh_int_cmpd_s { | 72 | * struct uvh_int_cmpd_s { |
52 | * } sn; | 73 | * } sn; |
53 | * }; | 74 | * }; |
75 | * | ||
76 | * (GEN Flags: mflags_opt= undefs=0 UV23=UVXH) | ||
54 | */ | 77 | */ |
55 | 78 | ||
56 | #define UV_MMR_ENABLE (1UL << 63) | 79 | #define UV_MMR_ENABLE (1UL << 63) |
@@ -58,15 +81,18 @@ | |||
58 | #define UV1_HUB_PART_NUMBER 0x88a5 | 81 | #define UV1_HUB_PART_NUMBER 0x88a5 |
59 | #define UV2_HUB_PART_NUMBER 0x8eb8 | 82 | #define UV2_HUB_PART_NUMBER 0x8eb8 |
60 | #define UV2_HUB_PART_NUMBER_X 0x1111 | 83 | #define UV2_HUB_PART_NUMBER_X 0x1111 |
84 | #define UV3_HUB_PART_NUMBER 0x9578 | ||
85 | #define UV3_HUB_PART_NUMBER_X 0x4321 | ||
61 | 86 | ||
62 | /* Compat: if this #define is present, UV headers support UV2 */ | 87 | /* Compat: Indicate which UV Hubs are supported. */ |
63 | #define UV2_HUB_IS_SUPPORTED 1 | 88 | #define UV2_HUB_IS_SUPPORTED 1 |
89 | #define UV3_HUB_IS_SUPPORTED 1 | ||
64 | 90 | ||
65 | /* ========================================================================= */ | 91 | /* ========================================================================= */ |
66 | /* UVH_BAU_DATA_BROADCAST */ | 92 | /* UVH_BAU_DATA_BROADCAST */ |
67 | /* ========================================================================= */ | 93 | /* ========================================================================= */ |
68 | #define UVH_BAU_DATA_BROADCAST 0x61688UL | 94 | #define UVH_BAU_DATA_BROADCAST 0x61688UL |
69 | #define UVH_BAU_DATA_BROADCAST_32 0x440 | 95 | #define UVH_BAU_DATA_BROADCAST_32 0x440 |
70 | 96 | ||
71 | #define UVH_BAU_DATA_BROADCAST_ENABLE_SHFT 0 | 97 | #define UVH_BAU_DATA_BROADCAST_ENABLE_SHFT 0 |
72 | #define UVH_BAU_DATA_BROADCAST_ENABLE_MASK 0x0000000000000001UL | 98 | #define UVH_BAU_DATA_BROADCAST_ENABLE_MASK 0x0000000000000001UL |
@@ -82,8 +108,8 @@ union uvh_bau_data_broadcast_u { | |||
82 | /* ========================================================================= */ | 108 | /* ========================================================================= */ |
83 | /* UVH_BAU_DATA_CONFIG */ | 109 | /* UVH_BAU_DATA_CONFIG */ |
84 | /* ========================================================================= */ | 110 | /* ========================================================================= */ |
85 | #define UVH_BAU_DATA_CONFIG 0x61680UL | 111 | #define UVH_BAU_DATA_CONFIG 0x61680UL |
86 | #define UVH_BAU_DATA_CONFIG_32 0x438 | 112 | #define UVH_BAU_DATA_CONFIG_32 0x438 |
87 | 113 | ||
88 | #define UVH_BAU_DATA_CONFIG_VECTOR_SHFT 0 | 114 | #define UVH_BAU_DATA_CONFIG_VECTOR_SHFT 0 |
89 | #define UVH_BAU_DATA_CONFIG_DM_SHFT 8 | 115 | #define UVH_BAU_DATA_CONFIG_DM_SHFT 8 |
@@ -121,10 +147,14 @@ union uvh_bau_data_config_u { | |||
121 | /* ========================================================================= */ | 147 | /* ========================================================================= */ |
122 | /* UVH_EVENT_OCCURRED0 */ | 148 | /* UVH_EVENT_OCCURRED0 */ |
123 | /* ========================================================================= */ | 149 | /* ========================================================================= */ |
124 | #define UVH_EVENT_OCCURRED0 0x70000UL | 150 | #define UVH_EVENT_OCCURRED0 0x70000UL |
125 | #define UVH_EVENT_OCCURRED0_32 0x5e8 | 151 | #define UVH_EVENT_OCCURRED0_32 0x5e8 |
152 | |||
153 | #define UVH_EVENT_OCCURRED0_LB_HCERR_SHFT 0 | ||
154 | #define UVH_EVENT_OCCURRED0_RH_AOERR0_SHFT 11 | ||
155 | #define UVH_EVENT_OCCURRED0_LB_HCERR_MASK 0x0000000000000001UL | ||
156 | #define UVH_EVENT_OCCURRED0_RH_AOERR0_MASK 0x0000000000000800UL | ||
126 | 157 | ||
127 | #define UV1H_EVENT_OCCURRED0_LB_HCERR_SHFT 0 | ||
128 | #define UV1H_EVENT_OCCURRED0_GR0_HCERR_SHFT 1 | 158 | #define UV1H_EVENT_OCCURRED0_GR0_HCERR_SHFT 1 |
129 | #define UV1H_EVENT_OCCURRED0_GR1_HCERR_SHFT 2 | 159 | #define UV1H_EVENT_OCCURRED0_GR1_HCERR_SHFT 2 |
130 | #define UV1H_EVENT_OCCURRED0_LH_HCERR_SHFT 3 | 160 | #define UV1H_EVENT_OCCURRED0_LH_HCERR_SHFT 3 |
@@ -135,7 +165,6 @@ union uvh_bau_data_config_u { | |||
135 | #define UV1H_EVENT_OCCURRED0_GR0_AOERR0_SHFT 8 | 165 | #define UV1H_EVENT_OCCURRED0_GR0_AOERR0_SHFT 8 |
136 | #define UV1H_EVENT_OCCURRED0_GR1_AOERR0_SHFT 9 | 166 | #define UV1H_EVENT_OCCURRED0_GR1_AOERR0_SHFT 9 |
137 | #define UV1H_EVENT_OCCURRED0_LH_AOERR0_SHFT 10 | 167 | #define UV1H_EVENT_OCCURRED0_LH_AOERR0_SHFT 10 |
138 | #define UV1H_EVENT_OCCURRED0_RH_AOERR0_SHFT 11 | ||
139 | #define UV1H_EVENT_OCCURRED0_XN_AOERR0_SHFT 12 | 168 | #define UV1H_EVENT_OCCURRED0_XN_AOERR0_SHFT 12 |
140 | #define UV1H_EVENT_OCCURRED0_SI_AOERR0_SHFT 13 | 169 | #define UV1H_EVENT_OCCURRED0_SI_AOERR0_SHFT 13 |
141 | #define UV1H_EVENT_OCCURRED0_LB_AOERR1_SHFT 14 | 170 | #define UV1H_EVENT_OCCURRED0_LB_AOERR1_SHFT 14 |
@@ -181,7 +210,6 @@ union uvh_bau_data_config_u { | |||
181 | #define UV1H_EVENT_OCCURRED0_RTC3_SHFT 54 | 210 | #define UV1H_EVENT_OCCURRED0_RTC3_SHFT 54 |
182 | #define UV1H_EVENT_OCCURRED0_BAU_DATA_SHFT 55 | 211 | #define UV1H_EVENT_OCCURRED0_BAU_DATA_SHFT 55 |
183 | #define UV1H_EVENT_OCCURRED0_POWER_MANAGEMENT_REQ_SHFT 56 | 212 | #define UV1H_EVENT_OCCURRED0_POWER_MANAGEMENT_REQ_SHFT 56 |
184 | #define UV1H_EVENT_OCCURRED0_LB_HCERR_MASK 0x0000000000000001UL | ||
185 | #define UV1H_EVENT_OCCURRED0_GR0_HCERR_MASK 0x0000000000000002UL | 213 | #define UV1H_EVENT_OCCURRED0_GR0_HCERR_MASK 0x0000000000000002UL |
186 | #define UV1H_EVENT_OCCURRED0_GR1_HCERR_MASK 0x0000000000000004UL | 214 | #define UV1H_EVENT_OCCURRED0_GR1_HCERR_MASK 0x0000000000000004UL |
187 | #define UV1H_EVENT_OCCURRED0_LH_HCERR_MASK 0x0000000000000008UL | 215 | #define UV1H_EVENT_OCCURRED0_LH_HCERR_MASK 0x0000000000000008UL |
@@ -192,7 +220,6 @@ union uvh_bau_data_config_u { | |||
192 | #define UV1H_EVENT_OCCURRED0_GR0_AOERR0_MASK 0x0000000000000100UL | 220 | #define UV1H_EVENT_OCCURRED0_GR0_AOERR0_MASK 0x0000000000000100UL |
193 | #define UV1H_EVENT_OCCURRED0_GR1_AOERR0_MASK 0x0000000000000200UL | 221 | #define UV1H_EVENT_OCCURRED0_GR1_AOERR0_MASK 0x0000000000000200UL |
194 | #define UV1H_EVENT_OCCURRED0_LH_AOERR0_MASK 0x0000000000000400UL | 222 | #define UV1H_EVENT_OCCURRED0_LH_AOERR0_MASK 0x0000000000000400UL |
195 | #define UV1H_EVENT_OCCURRED0_RH_AOERR0_MASK 0x0000000000000800UL | ||
196 | #define UV1H_EVENT_OCCURRED0_XN_AOERR0_MASK 0x0000000000001000UL | 223 | #define UV1H_EVENT_OCCURRED0_XN_AOERR0_MASK 0x0000000000001000UL |
197 | #define UV1H_EVENT_OCCURRED0_SI_AOERR0_MASK 0x0000000000002000UL | 224 | #define UV1H_EVENT_OCCURRED0_SI_AOERR0_MASK 0x0000000000002000UL |
198 | #define UV1H_EVENT_OCCURRED0_LB_AOERR1_MASK 0x0000000000004000UL | 225 | #define UV1H_EVENT_OCCURRED0_LB_AOERR1_MASK 0x0000000000004000UL |
@@ -239,188 +266,130 @@ union uvh_bau_data_config_u { | |||
239 | #define UV1H_EVENT_OCCURRED0_BAU_DATA_MASK 0x0080000000000000UL | 266 | #define UV1H_EVENT_OCCURRED0_BAU_DATA_MASK 0x0080000000000000UL |
240 | #define UV1H_EVENT_OCCURRED0_POWER_MANAGEMENT_REQ_MASK 0x0100000000000000UL | 267 | #define UV1H_EVENT_OCCURRED0_POWER_MANAGEMENT_REQ_MASK 0x0100000000000000UL |
241 | 268 | ||
242 | #define UV2H_EVENT_OCCURRED0_LB_HCERR_SHFT 0 | 269 | #define UVXH_EVENT_OCCURRED0_QP_HCERR_SHFT 1 |
243 | #define UV2H_EVENT_OCCURRED0_QP_HCERR_SHFT 1 | 270 | #define UVXH_EVENT_OCCURRED0_RH_HCERR_SHFT 2 |
244 | #define UV2H_EVENT_OCCURRED0_RH_HCERR_SHFT 2 | 271 | #define UVXH_EVENT_OCCURRED0_LH0_HCERR_SHFT 3 |
245 | #define UV2H_EVENT_OCCURRED0_LH0_HCERR_SHFT 3 | 272 | #define UVXH_EVENT_OCCURRED0_LH1_HCERR_SHFT 4 |
246 | #define UV2H_EVENT_OCCURRED0_LH1_HCERR_SHFT 4 | 273 | #define UVXH_EVENT_OCCURRED0_GR0_HCERR_SHFT 5 |
247 | #define UV2H_EVENT_OCCURRED0_GR0_HCERR_SHFT 5 | 274 | #define UVXH_EVENT_OCCURRED0_GR1_HCERR_SHFT 6 |
248 | #define UV2H_EVENT_OCCURRED0_GR1_HCERR_SHFT 6 | 275 | #define UVXH_EVENT_OCCURRED0_NI0_HCERR_SHFT 7 |
249 | #define UV2H_EVENT_OCCURRED0_NI0_HCERR_SHFT 7 | 276 | #define UVXH_EVENT_OCCURRED0_NI1_HCERR_SHFT 8 |
250 | #define UV2H_EVENT_OCCURRED0_NI1_HCERR_SHFT 8 | 277 | #define UVXH_EVENT_OCCURRED0_LB_AOERR0_SHFT 9 |
251 | #define UV2H_EVENT_OCCURRED0_LB_AOERR0_SHFT 9 | 278 | #define UVXH_EVENT_OCCURRED0_QP_AOERR0_SHFT 10 |
252 | #define UV2H_EVENT_OCCURRED0_QP_AOERR0_SHFT 10 | 279 | #define UVXH_EVENT_OCCURRED0_LH0_AOERR0_SHFT 12 |
253 | #define UV2H_EVENT_OCCURRED0_RH_AOERR0_SHFT 11 | 280 | #define UVXH_EVENT_OCCURRED0_LH1_AOERR0_SHFT 13 |
254 | #define UV2H_EVENT_OCCURRED0_LH0_AOERR0_SHFT 12 | 281 | #define UVXH_EVENT_OCCURRED0_GR0_AOERR0_SHFT 14 |
255 | #define UV2H_EVENT_OCCURRED0_LH1_AOERR0_SHFT 13 | 282 | #define UVXH_EVENT_OCCURRED0_GR1_AOERR0_SHFT 15 |
256 | #define UV2H_EVENT_OCCURRED0_GR0_AOERR0_SHFT 14 | 283 | #define UVXH_EVENT_OCCURRED0_XB_AOERR0_SHFT 16 |
257 | #define UV2H_EVENT_OCCURRED0_GR1_AOERR0_SHFT 15 | 284 | #define UVXH_EVENT_OCCURRED0_RT_AOERR0_SHFT 17 |
258 | #define UV2H_EVENT_OCCURRED0_XB_AOERR0_SHFT 16 | 285 | #define UVXH_EVENT_OCCURRED0_NI0_AOERR0_SHFT 18 |
259 | #define UV2H_EVENT_OCCURRED0_RT_AOERR0_SHFT 17 | 286 | #define UVXH_EVENT_OCCURRED0_NI1_AOERR0_SHFT 19 |
260 | #define UV2H_EVENT_OCCURRED0_NI0_AOERR0_SHFT 18 | 287 | #define UVXH_EVENT_OCCURRED0_LB_AOERR1_SHFT 20 |
261 | #define UV2H_EVENT_OCCURRED0_NI1_AOERR0_SHFT 19 | 288 | #define UVXH_EVENT_OCCURRED0_QP_AOERR1_SHFT 21 |
262 | #define UV2H_EVENT_OCCURRED0_LB_AOERR1_SHFT 20 | 289 | #define UVXH_EVENT_OCCURRED0_RH_AOERR1_SHFT 22 |
263 | #define UV2H_EVENT_OCCURRED0_QP_AOERR1_SHFT 21 | 290 | #define UVXH_EVENT_OCCURRED0_LH0_AOERR1_SHFT 23 |
264 | #define UV2H_EVENT_OCCURRED0_RH_AOERR1_SHFT 22 | 291 | #define UVXH_EVENT_OCCURRED0_LH1_AOERR1_SHFT 24 |
265 | #define UV2H_EVENT_OCCURRED0_LH0_AOERR1_SHFT 23 | 292 | #define UVXH_EVENT_OCCURRED0_GR0_AOERR1_SHFT 25 |
266 | #define UV2H_EVENT_OCCURRED0_LH1_AOERR1_SHFT 24 | 293 | #define UVXH_EVENT_OCCURRED0_GR1_AOERR1_SHFT 26 |
267 | #define UV2H_EVENT_OCCURRED0_GR0_AOERR1_SHFT 25 | 294 | #define UVXH_EVENT_OCCURRED0_XB_AOERR1_SHFT 27 |
268 | #define UV2H_EVENT_OCCURRED0_GR1_AOERR1_SHFT 26 | 295 | #define UVXH_EVENT_OCCURRED0_RT_AOERR1_SHFT 28 |
269 | #define UV2H_EVENT_OCCURRED0_XB_AOERR1_SHFT 27 | 296 | #define UVXH_EVENT_OCCURRED0_NI0_AOERR1_SHFT 29 |
270 | #define UV2H_EVENT_OCCURRED0_RT_AOERR1_SHFT 28 | 297 | #define UVXH_EVENT_OCCURRED0_NI1_AOERR1_SHFT 30 |
271 | #define UV2H_EVENT_OCCURRED0_NI0_AOERR1_SHFT 29 | 298 | #define UVXH_EVENT_OCCURRED0_SYSTEM_SHUTDOWN_INT_SHFT 31 |
272 | #define UV2H_EVENT_OCCURRED0_NI1_AOERR1_SHFT 30 | 299 | #define UVXH_EVENT_OCCURRED0_LB_IRQ_INT_0_SHFT 32 |
273 | #define UV2H_EVENT_OCCURRED0_SYSTEM_SHUTDOWN_INT_SHFT 31 | 300 | #define UVXH_EVENT_OCCURRED0_LB_IRQ_INT_1_SHFT 33 |
274 | #define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_0_SHFT 32 | 301 | #define UVXH_EVENT_OCCURRED0_LB_IRQ_INT_2_SHFT 34 |
275 | #define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_1_SHFT 33 | 302 | #define UVXH_EVENT_OCCURRED0_LB_IRQ_INT_3_SHFT 35 |
276 | #define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_2_SHFT 34 | 303 | #define UVXH_EVENT_OCCURRED0_LB_IRQ_INT_4_SHFT 36 |
277 | #define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_3_SHFT 35 | 304 | #define UVXH_EVENT_OCCURRED0_LB_IRQ_INT_5_SHFT 37 |
278 | #define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_4_SHFT 36 | 305 | #define UVXH_EVENT_OCCURRED0_LB_IRQ_INT_6_SHFT 38 |
279 | #define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_5_SHFT 37 | 306 | #define UVXH_EVENT_OCCURRED0_LB_IRQ_INT_7_SHFT 39 |
280 | #define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_6_SHFT 38 | 307 | #define UVXH_EVENT_OCCURRED0_LB_IRQ_INT_8_SHFT 40 |
281 | #define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_7_SHFT 39 | 308 | #define UVXH_EVENT_OCCURRED0_LB_IRQ_INT_9_SHFT 41 |
282 | #define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_8_SHFT 40 | 309 | #define UVXH_EVENT_OCCURRED0_LB_IRQ_INT_10_SHFT 42 |
283 | #define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_9_SHFT 41 | 310 | #define UVXH_EVENT_OCCURRED0_LB_IRQ_INT_11_SHFT 43 |
284 | #define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_10_SHFT 42 | 311 | #define UVXH_EVENT_OCCURRED0_LB_IRQ_INT_12_SHFT 44 |
285 | #define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_11_SHFT 43 | 312 | #define UVXH_EVENT_OCCURRED0_LB_IRQ_INT_13_SHFT 45 |
286 | #define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_12_SHFT 44 | 313 | #define UVXH_EVENT_OCCURRED0_LB_IRQ_INT_14_SHFT 46 |
287 | #define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_13_SHFT 45 | 314 | #define UVXH_EVENT_OCCURRED0_LB_IRQ_INT_15_SHFT 47 |
288 | #define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_14_SHFT 46 | 315 | #define UVXH_EVENT_OCCURRED0_L1_NMI_INT_SHFT 48 |
289 | #define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_15_SHFT 47 | 316 | #define UVXH_EVENT_OCCURRED0_STOP_CLOCK_SHFT 49 |
290 | #define UV2H_EVENT_OCCURRED0_L1_NMI_INT_SHFT 48 | 317 | #define UVXH_EVENT_OCCURRED0_ASIC_TO_L1_SHFT 50 |
291 | #define UV2H_EVENT_OCCURRED0_STOP_CLOCK_SHFT 49 | 318 | #define UVXH_EVENT_OCCURRED0_L1_TO_ASIC_SHFT 51 |
292 | #define UV2H_EVENT_OCCURRED0_ASIC_TO_L1_SHFT 50 | 319 | #define UVXH_EVENT_OCCURRED0_LA_SEQ_TRIGGER_SHFT 52 |
293 | #define UV2H_EVENT_OCCURRED0_L1_TO_ASIC_SHFT 51 | 320 | #define UVXH_EVENT_OCCURRED0_IPI_INT_SHFT 53 |
294 | #define UV2H_EVENT_OCCURRED0_LA_SEQ_TRIGGER_SHFT 52 | 321 | #define UVXH_EVENT_OCCURRED0_EXTIO_INT0_SHFT 54 |
295 | #define UV2H_EVENT_OCCURRED0_IPI_INT_SHFT 53 | 322 | #define UVXH_EVENT_OCCURRED0_EXTIO_INT1_SHFT 55 |
296 | #define UV2H_EVENT_OCCURRED0_EXTIO_INT0_SHFT 54 | 323 | #define UVXH_EVENT_OCCURRED0_EXTIO_INT2_SHFT 56 |
297 | #define UV2H_EVENT_OCCURRED0_EXTIO_INT1_SHFT 55 | 324 | #define UVXH_EVENT_OCCURRED0_EXTIO_INT3_SHFT 57 |
298 | #define UV2H_EVENT_OCCURRED0_EXTIO_INT2_SHFT 56 | 325 | #define UVXH_EVENT_OCCURRED0_PROFILE_INT_SHFT 58 |
299 | #define UV2H_EVENT_OCCURRED0_EXTIO_INT3_SHFT 57 | 326 | #define UVXH_EVENT_OCCURRED0_QP_HCERR_MASK 0x0000000000000002UL |
300 | #define UV2H_EVENT_OCCURRED0_PROFILE_INT_SHFT 58 | 327 | #define UVXH_EVENT_OCCURRED0_RH_HCERR_MASK 0x0000000000000004UL |
301 | #define UV2H_EVENT_OCCURRED0_LB_HCERR_MASK 0x0000000000000001UL | 328 | #define UVXH_EVENT_OCCURRED0_LH0_HCERR_MASK 0x0000000000000008UL |
302 | #define UV2H_EVENT_OCCURRED0_QP_HCERR_MASK 0x0000000000000002UL | 329 | #define UVXH_EVENT_OCCURRED0_LH1_HCERR_MASK 0x0000000000000010UL |
303 | #define UV2H_EVENT_OCCURRED0_RH_HCERR_MASK 0x0000000000000004UL | 330 | #define UVXH_EVENT_OCCURRED0_GR0_HCERR_MASK 0x0000000000000020UL |
304 | #define UV2H_EVENT_OCCURRED0_LH0_HCERR_MASK 0x0000000000000008UL | 331 | #define UVXH_EVENT_OCCURRED0_GR1_HCERR_MASK 0x0000000000000040UL |
305 | #define UV2H_EVENT_OCCURRED0_LH1_HCERR_MASK 0x0000000000000010UL | 332 | #define UVXH_EVENT_OCCURRED0_NI0_HCERR_MASK 0x0000000000000080UL |
306 | #define UV2H_EVENT_OCCURRED0_GR0_HCERR_MASK 0x0000000000000020UL | 333 | #define UVXH_EVENT_OCCURRED0_NI1_HCERR_MASK 0x0000000000000100UL |
307 | #define UV2H_EVENT_OCCURRED0_GR1_HCERR_MASK 0x0000000000000040UL | 334 | #define UVXH_EVENT_OCCURRED0_LB_AOERR0_MASK 0x0000000000000200UL |
308 | #define UV2H_EVENT_OCCURRED0_NI0_HCERR_MASK 0x0000000000000080UL | 335 | #define UVXH_EVENT_OCCURRED0_QP_AOERR0_MASK 0x0000000000000400UL |
309 | #define UV2H_EVENT_OCCURRED0_NI1_HCERR_MASK 0x0000000000000100UL | 336 | #define UVXH_EVENT_OCCURRED0_LH0_AOERR0_MASK 0x0000000000001000UL |
310 | #define UV2H_EVENT_OCCURRED0_LB_AOERR0_MASK 0x0000000000000200UL | 337 | #define UVXH_EVENT_OCCURRED0_LH1_AOERR0_MASK 0x0000000000002000UL |
311 | #define UV2H_EVENT_OCCURRED0_QP_AOERR0_MASK 0x0000000000000400UL | 338 | #define UVXH_EVENT_OCCURRED0_GR0_AOERR0_MASK 0x0000000000004000UL |
312 | #define UV2H_EVENT_OCCURRED0_RH_AOERR0_MASK 0x0000000000000800UL | 339 | #define UVXH_EVENT_OCCURRED0_GR1_AOERR0_MASK 0x0000000000008000UL |
313 | #define UV2H_EVENT_OCCURRED0_LH0_AOERR0_MASK 0x0000000000001000UL | 340 | #define UVXH_EVENT_OCCURRED0_XB_AOERR0_MASK 0x0000000000010000UL |
314 | #define UV2H_EVENT_OCCURRED0_LH1_AOERR0_MASK 0x0000000000002000UL | 341 | #define UVXH_EVENT_OCCURRED0_RT_AOERR0_MASK 0x0000000000020000UL |
315 | #define UV2H_EVENT_OCCURRED0_GR0_AOERR0_MASK 0x0000000000004000UL | 342 | #define UVXH_EVENT_OCCURRED0_NI0_AOERR0_MASK 0x0000000000040000UL |
316 | #define UV2H_EVENT_OCCURRED0_GR1_AOERR0_MASK 0x0000000000008000UL | 343 | #define UVXH_EVENT_OCCURRED0_NI1_AOERR0_MASK 0x0000000000080000UL |
317 | #define UV2H_EVENT_OCCURRED0_XB_AOERR0_MASK 0x0000000000010000UL | 344 | #define UVXH_EVENT_OCCURRED0_LB_AOERR1_MASK 0x0000000000100000UL |
318 | #define UV2H_EVENT_OCCURRED0_RT_AOERR0_MASK 0x0000000000020000UL | 345 | #define UVXH_EVENT_OCCURRED0_QP_AOERR1_MASK 0x0000000000200000UL |
319 | #define UV2H_EVENT_OCCURRED0_NI0_AOERR0_MASK 0x0000000000040000UL | 346 | #define UVXH_EVENT_OCCURRED0_RH_AOERR1_MASK 0x0000000000400000UL |
320 | #define UV2H_EVENT_OCCURRED0_NI1_AOERR0_MASK 0x0000000000080000UL | 347 | #define UVXH_EVENT_OCCURRED0_LH0_AOERR1_MASK 0x0000000000800000UL |
321 | #define UV2H_EVENT_OCCURRED0_LB_AOERR1_MASK 0x0000000000100000UL | 348 | #define UVXH_EVENT_OCCURRED0_LH1_AOERR1_MASK 0x0000000001000000UL |
322 | #define UV2H_EVENT_OCCURRED0_QP_AOERR1_MASK 0x0000000000200000UL | 349 | #define UVXH_EVENT_OCCURRED0_GR0_AOERR1_MASK 0x0000000002000000UL |
323 | #define UV2H_EVENT_OCCURRED0_RH_AOERR1_MASK 0x0000000000400000UL | 350 | #define UVXH_EVENT_OCCURRED0_GR1_AOERR1_MASK 0x0000000004000000UL |
324 | #define UV2H_EVENT_OCCURRED0_LH0_AOERR1_MASK 0x0000000000800000UL | 351 | #define UVXH_EVENT_OCCURRED0_XB_AOERR1_MASK 0x0000000008000000UL |
325 | #define UV2H_EVENT_OCCURRED0_LH1_AOERR1_MASK 0x0000000001000000UL | 352 | #define UVXH_EVENT_OCCURRED0_RT_AOERR1_MASK 0x0000000010000000UL |
326 | #define UV2H_EVENT_OCCURRED0_GR0_AOERR1_MASK 0x0000000002000000UL | 353 | #define UVXH_EVENT_OCCURRED0_NI0_AOERR1_MASK 0x0000000020000000UL |
327 | #define UV2H_EVENT_OCCURRED0_GR1_AOERR1_MASK 0x0000000004000000UL | 354 | #define UVXH_EVENT_OCCURRED0_NI1_AOERR1_MASK 0x0000000040000000UL |
328 | #define UV2H_EVENT_OCCURRED0_XB_AOERR1_MASK 0x0000000008000000UL | 355 | #define UVXH_EVENT_OCCURRED0_SYSTEM_SHUTDOWN_INT_MASK 0x0000000080000000UL |
329 | #define UV2H_EVENT_OCCURRED0_RT_AOERR1_MASK 0x0000000010000000UL | 356 | #define UVXH_EVENT_OCCURRED0_LB_IRQ_INT_0_MASK 0x0000000100000000UL |
330 | #define UV2H_EVENT_OCCURRED0_NI0_AOERR1_MASK 0x0000000020000000UL | 357 | #define UVXH_EVENT_OCCURRED0_LB_IRQ_INT_1_MASK 0x0000000200000000UL |
331 | #define UV2H_EVENT_OCCURRED0_NI1_AOERR1_MASK 0x0000000040000000UL | 358 | #define UVXH_EVENT_OCCURRED0_LB_IRQ_INT_2_MASK 0x0000000400000000UL |
332 | #define UV2H_EVENT_OCCURRED0_SYSTEM_SHUTDOWN_INT_MASK 0x0000000080000000UL | 359 | #define UVXH_EVENT_OCCURRED0_LB_IRQ_INT_3_MASK 0x0000000800000000UL |
333 | #define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_0_MASK 0x0000000100000000UL | 360 | #define UVXH_EVENT_OCCURRED0_LB_IRQ_INT_4_MASK 0x0000001000000000UL |
334 | #define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_1_MASK 0x0000000200000000UL | 361 | #define UVXH_EVENT_OCCURRED0_LB_IRQ_INT_5_MASK 0x0000002000000000UL |
335 | #define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_2_MASK 0x0000000400000000UL | 362 | #define UVXH_EVENT_OCCURRED0_LB_IRQ_INT_6_MASK 0x0000004000000000UL |
336 | #define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_3_MASK 0x0000000800000000UL | 363 | #define UVXH_EVENT_OCCURRED0_LB_IRQ_INT_7_MASK 0x0000008000000000UL |
337 | #define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_4_MASK 0x0000001000000000UL | 364 | #define UVXH_EVENT_OCCURRED0_LB_IRQ_INT_8_MASK 0x0000010000000000UL |
338 | #define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_5_MASK 0x0000002000000000UL | 365 | #define UVXH_EVENT_OCCURRED0_LB_IRQ_INT_9_MASK 0x0000020000000000UL |
339 | #define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_6_MASK 0x0000004000000000UL | 366 | #define UVXH_EVENT_OCCURRED0_LB_IRQ_INT_10_MASK 0x0000040000000000UL |
340 | #define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_7_MASK 0x0000008000000000UL | 367 | #define UVXH_EVENT_OCCURRED0_LB_IRQ_INT_11_MASK 0x0000080000000000UL |
341 | #define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_8_MASK 0x0000010000000000UL | 368 | #define UVXH_EVENT_OCCURRED0_LB_IRQ_INT_12_MASK 0x0000100000000000UL |
342 | #define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_9_MASK 0x0000020000000000UL | 369 | #define UVXH_EVENT_OCCURRED0_LB_IRQ_INT_13_MASK 0x0000200000000000UL |
343 | #define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_10_MASK 0x0000040000000000UL | 370 | #define UVXH_EVENT_OCCURRED0_LB_IRQ_INT_14_MASK 0x0000400000000000UL |
344 | #define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_11_MASK 0x0000080000000000UL | 371 | #define UVXH_EVENT_OCCURRED0_LB_IRQ_INT_15_MASK 0x0000800000000000UL |
345 | #define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_12_MASK 0x0000100000000000UL | 372 | #define UVXH_EVENT_OCCURRED0_L1_NMI_INT_MASK 0x0001000000000000UL |
346 | #define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_13_MASK 0x0000200000000000UL | 373 | #define UVXH_EVENT_OCCURRED0_STOP_CLOCK_MASK 0x0002000000000000UL |
347 | #define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_14_MASK 0x0000400000000000UL | 374 | #define UVXH_EVENT_OCCURRED0_ASIC_TO_L1_MASK 0x0004000000000000UL |
348 | #define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_15_MASK 0x0000800000000000UL | 375 | #define UVXH_EVENT_OCCURRED0_L1_TO_ASIC_MASK 0x0008000000000000UL |
349 | #define UV2H_EVENT_OCCURRED0_L1_NMI_INT_MASK 0x0001000000000000UL | 376 | #define UVXH_EVENT_OCCURRED0_LA_SEQ_TRIGGER_MASK 0x0010000000000000UL |
350 | #define UV2H_EVENT_OCCURRED0_STOP_CLOCK_MASK 0x0002000000000000UL | 377 | #define UVXH_EVENT_OCCURRED0_IPI_INT_MASK 0x0020000000000000UL |
351 | #define UV2H_EVENT_OCCURRED0_ASIC_TO_L1_MASK 0x0004000000000000UL | 378 | #define UVXH_EVENT_OCCURRED0_EXTIO_INT0_MASK 0x0040000000000000UL |
352 | #define UV2H_EVENT_OCCURRED0_L1_TO_ASIC_MASK 0x0008000000000000UL | 379 | #define UVXH_EVENT_OCCURRED0_EXTIO_INT1_MASK 0x0080000000000000UL |
353 | #define UV2H_EVENT_OCCURRED0_LA_SEQ_TRIGGER_MASK 0x0010000000000000UL | 380 | #define UVXH_EVENT_OCCURRED0_EXTIO_INT2_MASK 0x0100000000000000UL |
354 | #define UV2H_EVENT_OCCURRED0_IPI_INT_MASK 0x0020000000000000UL | 381 | #define UVXH_EVENT_OCCURRED0_EXTIO_INT3_MASK 0x0200000000000000UL |
355 | #define UV2H_EVENT_OCCURRED0_EXTIO_INT0_MASK 0x0040000000000000UL | 382 | #define UVXH_EVENT_OCCURRED0_PROFILE_INT_MASK 0x0400000000000000UL |
356 | #define UV2H_EVENT_OCCURRED0_EXTIO_INT1_MASK 0x0080000000000000UL | ||
357 | #define UV2H_EVENT_OCCURRED0_EXTIO_INT2_MASK 0x0100000000000000UL | ||
358 | #define UV2H_EVENT_OCCURRED0_EXTIO_INT3_MASK 0x0200000000000000UL | ||
359 | #define UV2H_EVENT_OCCURRED0_PROFILE_INT_MASK 0x0400000000000000UL | ||
360 | 383 | ||
361 | union uvh_event_occurred0_u { | 384 | union uvh_event_occurred0_u { |
362 | unsigned long v; | 385 | unsigned long v; |
363 | struct uv1h_event_occurred0_s { | 386 | struct uvh_event_occurred0_s { |
364 | unsigned long lb_hcerr:1; /* RW, W1C */ | 387 | unsigned long lb_hcerr:1; /* RW, W1C */ |
365 | unsigned long gr0_hcerr:1; /* RW, W1C */ | 388 | unsigned long rsvd_1_10:10; |
366 | unsigned long gr1_hcerr:1; /* RW, W1C */ | ||
367 | unsigned long lh_hcerr:1; /* RW, W1C */ | ||
368 | unsigned long rh_hcerr:1; /* RW, W1C */ | ||
369 | unsigned long xn_hcerr:1; /* RW, W1C */ | ||
370 | unsigned long si_hcerr:1; /* RW, W1C */ | ||
371 | unsigned long lb_aoerr0:1; /* RW, W1C */ | ||
372 | unsigned long gr0_aoerr0:1; /* RW, W1C */ | ||
373 | unsigned long gr1_aoerr0:1; /* RW, W1C */ | ||
374 | unsigned long lh_aoerr0:1; /* RW, W1C */ | ||
375 | unsigned long rh_aoerr0:1; /* RW, W1C */ | 389 | unsigned long rh_aoerr0:1; /* RW, W1C */ |
376 | unsigned long xn_aoerr0:1; /* RW, W1C */ | 390 | unsigned long rsvd_12_63:52; |
377 | unsigned long si_aoerr0:1; /* RW, W1C */ | 391 | } s; |
378 | unsigned long lb_aoerr1:1; /* RW, W1C */ | 392 | struct uvxh_event_occurred0_s { |
379 | unsigned long gr0_aoerr1:1; /* RW, W1C */ | ||
380 | unsigned long gr1_aoerr1:1; /* RW, W1C */ | ||
381 | unsigned long lh_aoerr1:1; /* RW, W1C */ | ||
382 | unsigned long rh_aoerr1:1; /* RW, W1C */ | ||
383 | unsigned long xn_aoerr1:1; /* RW, W1C */ | ||
384 | unsigned long si_aoerr1:1; /* RW, W1C */ | ||
385 | unsigned long rh_vpi_int:1; /* RW, W1C */ | ||
386 | unsigned long system_shutdown_int:1; /* RW, W1C */ | ||
387 | unsigned long lb_irq_int_0:1; /* RW, W1C */ | ||
388 | unsigned long lb_irq_int_1:1; /* RW, W1C */ | ||
389 | unsigned long lb_irq_int_2:1; /* RW, W1C */ | ||
390 | unsigned long lb_irq_int_3:1; /* RW, W1C */ | ||
391 | unsigned long lb_irq_int_4:1; /* RW, W1C */ | ||
392 | unsigned long lb_irq_int_5:1; /* RW, W1C */ | ||
393 | unsigned long lb_irq_int_6:1; /* RW, W1C */ | ||
394 | unsigned long lb_irq_int_7:1; /* RW, W1C */ | ||
395 | unsigned long lb_irq_int_8:1; /* RW, W1C */ | ||
396 | unsigned long lb_irq_int_9:1; /* RW, W1C */ | ||
397 | unsigned long lb_irq_int_10:1; /* RW, W1C */ | ||
398 | unsigned long lb_irq_int_11:1; /* RW, W1C */ | ||
399 | unsigned long lb_irq_int_12:1; /* RW, W1C */ | ||
400 | unsigned long lb_irq_int_13:1; /* RW, W1C */ | ||
401 | unsigned long lb_irq_int_14:1; /* RW, W1C */ | ||
402 | unsigned long lb_irq_int_15:1; /* RW, W1C */ | ||
403 | unsigned long l1_nmi_int:1; /* RW, W1C */ | ||
404 | unsigned long stop_clock:1; /* RW, W1C */ | ||
405 | unsigned long asic_to_l1:1; /* RW, W1C */ | ||
406 | unsigned long l1_to_asic:1; /* RW, W1C */ | ||
407 | unsigned long ltc_int:1; /* RW, W1C */ | ||
408 | unsigned long la_seq_trigger:1; /* RW, W1C */ | ||
409 | unsigned long ipi_int:1; /* RW, W1C */ | ||
410 | unsigned long extio_int0:1; /* RW, W1C */ | ||
411 | unsigned long extio_int1:1; /* RW, W1C */ | ||
412 | unsigned long extio_int2:1; /* RW, W1C */ | ||
413 | unsigned long extio_int3:1; /* RW, W1C */ | ||
414 | unsigned long profile_int:1; /* RW, W1C */ | ||
415 | unsigned long rtc0:1; /* RW, W1C */ | ||
416 | unsigned long rtc1:1; /* RW, W1C */ | ||
417 | unsigned long rtc2:1; /* RW, W1C */ | ||
418 | unsigned long rtc3:1; /* RW, W1C */ | ||
419 | unsigned long bau_data:1; /* RW, W1C */ | ||
420 | unsigned long power_management_req:1; /* RW, W1C */ | ||
421 | unsigned long rsvd_57_63:7; | ||
422 | } s1; | ||
423 | struct uv2h_event_occurred0_s { | ||
424 | unsigned long lb_hcerr:1; /* RW */ | 393 | unsigned long lb_hcerr:1; /* RW */ |
425 | unsigned long qp_hcerr:1; /* RW */ | 394 | unsigned long qp_hcerr:1; /* RW */ |
426 | unsigned long rh_hcerr:1; /* RW */ | 395 | unsigned long rh_hcerr:1; /* RW */ |
@@ -481,19 +450,20 @@ union uvh_event_occurred0_u { | |||
481 | unsigned long extio_int3:1; /* RW */ | 450 | unsigned long extio_int3:1; /* RW */ |
482 | unsigned long profile_int:1; /* RW */ | 451 | unsigned long profile_int:1; /* RW */ |
483 | unsigned long rsvd_59_63:5; | 452 | unsigned long rsvd_59_63:5; |
484 | } s2; | 453 | } sx; |
485 | }; | 454 | }; |
486 | 455 | ||
487 | /* ========================================================================= */ | 456 | /* ========================================================================= */ |
488 | /* UVH_EVENT_OCCURRED0_ALIAS */ | 457 | /* UVH_EVENT_OCCURRED0_ALIAS */ |
489 | /* ========================================================================= */ | 458 | /* ========================================================================= */ |
490 | #define UVH_EVENT_OCCURRED0_ALIAS 0x0000000000070008UL | 459 | #define UVH_EVENT_OCCURRED0_ALIAS 0x70008UL |
491 | #define UVH_EVENT_OCCURRED0_ALIAS_32 0x5f0 | 460 | #define UVH_EVENT_OCCURRED0_ALIAS_32 0x5f0 |
461 | |||
492 | 462 | ||
493 | /* ========================================================================= */ | 463 | /* ========================================================================= */ |
494 | /* UVH_GR0_TLB_INT0_CONFIG */ | 464 | /* UVH_GR0_TLB_INT0_CONFIG */ |
495 | /* ========================================================================= */ | 465 | /* ========================================================================= */ |
496 | #define UVH_GR0_TLB_INT0_CONFIG 0x61b00UL | 466 | #define UVH_GR0_TLB_INT0_CONFIG 0x61b00UL |
497 | 467 | ||
498 | #define UVH_GR0_TLB_INT0_CONFIG_VECTOR_SHFT 0 | 468 | #define UVH_GR0_TLB_INT0_CONFIG_VECTOR_SHFT 0 |
499 | #define UVH_GR0_TLB_INT0_CONFIG_DM_SHFT 8 | 469 | #define UVH_GR0_TLB_INT0_CONFIG_DM_SHFT 8 |
@@ -531,7 +501,7 @@ union uvh_gr0_tlb_int0_config_u { | |||
531 | /* ========================================================================= */ | 501 | /* ========================================================================= */ |
532 | /* UVH_GR0_TLB_INT1_CONFIG */ | 502 | /* UVH_GR0_TLB_INT1_CONFIG */ |
533 | /* ========================================================================= */ | 503 | /* ========================================================================= */ |
534 | #define UVH_GR0_TLB_INT1_CONFIG 0x61b40UL | 504 | #define UVH_GR0_TLB_INT1_CONFIG 0x61b40UL |
535 | 505 | ||
536 | #define UVH_GR0_TLB_INT1_CONFIG_VECTOR_SHFT 0 | 506 | #define UVH_GR0_TLB_INT1_CONFIG_VECTOR_SHFT 0 |
537 | #define UVH_GR0_TLB_INT1_CONFIG_DM_SHFT 8 | 507 | #define UVH_GR0_TLB_INT1_CONFIG_DM_SHFT 8 |
@@ -571,9 +541,11 @@ union uvh_gr0_tlb_int1_config_u { | |||
571 | /* ========================================================================= */ | 541 | /* ========================================================================= */ |
572 | #define UV1H_GR0_TLB_MMR_CONTROL 0x401080UL | 542 | #define UV1H_GR0_TLB_MMR_CONTROL 0x401080UL |
573 | #define UV2H_GR0_TLB_MMR_CONTROL 0xc01080UL | 543 | #define UV2H_GR0_TLB_MMR_CONTROL 0xc01080UL |
574 | #define UVH_GR0_TLB_MMR_CONTROL (is_uv1_hub() ? \ | 544 | #define UV3H_GR0_TLB_MMR_CONTROL 0xc01080UL |
575 | UV1H_GR0_TLB_MMR_CONTROL : \ | 545 | #define UVH_GR0_TLB_MMR_CONTROL \ |
576 | UV2H_GR0_TLB_MMR_CONTROL) | 546 | (is_uv1_hub() ? UV1H_GR0_TLB_MMR_CONTROL : \ |
547 | (is_uv2_hub() ? UV2H_GR0_TLB_MMR_CONTROL : \ | ||
548 | UV3H_GR0_TLB_MMR_CONTROL)) | ||
577 | 549 | ||
578 | #define UVH_GR0_TLB_MMR_CONTROL_INDEX_SHFT 0 | 550 | #define UVH_GR0_TLB_MMR_CONTROL_INDEX_SHFT 0 |
579 | #define UVH_GR0_TLB_MMR_CONTROL_MEM_SEL_SHFT 12 | 551 | #define UVH_GR0_TLB_MMR_CONTROL_MEM_SEL_SHFT 12 |
@@ -611,6 +583,21 @@ union uvh_gr0_tlb_int1_config_u { | |||
611 | #define UV1H_GR0_TLB_MMR_CONTROL_MMR_INJ_TLBRREG_MASK 0x0100000000000000UL | 583 | #define UV1H_GR0_TLB_MMR_CONTROL_MMR_INJ_TLBRREG_MASK 0x0100000000000000UL |
612 | #define UV1H_GR0_TLB_MMR_CONTROL_MMR_INJ_TLBLRUV_MASK 0x1000000000000000UL | 584 | #define UV1H_GR0_TLB_MMR_CONTROL_MMR_INJ_TLBLRUV_MASK 0x1000000000000000UL |
613 | 585 | ||
586 | #define UVXH_GR0_TLB_MMR_CONTROL_INDEX_SHFT 0 | ||
587 | #define UVXH_GR0_TLB_MMR_CONTROL_MEM_SEL_SHFT 12 | ||
588 | #define UVXH_GR0_TLB_MMR_CONTROL_AUTO_VALID_EN_SHFT 16 | ||
589 | #define UVXH_GR0_TLB_MMR_CONTROL_MMR_HASH_INDEX_EN_SHFT 20 | ||
590 | #define UVXH_GR0_TLB_MMR_CONTROL_MMR_WRITE_SHFT 30 | ||
591 | #define UVXH_GR0_TLB_MMR_CONTROL_MMR_READ_SHFT 31 | ||
592 | #define UVXH_GR0_TLB_MMR_CONTROL_MMR_OP_DONE_SHFT 32 | ||
593 | #define UVXH_GR0_TLB_MMR_CONTROL_INDEX_MASK 0x0000000000000fffUL | ||
594 | #define UVXH_GR0_TLB_MMR_CONTROL_MEM_SEL_MASK 0x0000000000003000UL | ||
595 | #define UVXH_GR0_TLB_MMR_CONTROL_AUTO_VALID_EN_MASK 0x0000000000010000UL | ||
596 | #define UVXH_GR0_TLB_MMR_CONTROL_MMR_HASH_INDEX_EN_MASK 0x0000000000100000UL | ||
597 | #define UVXH_GR0_TLB_MMR_CONTROL_MMR_WRITE_MASK 0x0000000040000000UL | ||
598 | #define UVXH_GR0_TLB_MMR_CONTROL_MMR_READ_MASK 0x0000000080000000UL | ||
599 | #define UVXH_GR0_TLB_MMR_CONTROL_MMR_OP_DONE_MASK 0x0000000100000000UL | ||
600 | |||
614 | #define UV2H_GR0_TLB_MMR_CONTROL_INDEX_SHFT 0 | 601 | #define UV2H_GR0_TLB_MMR_CONTROL_INDEX_SHFT 0 |
615 | #define UV2H_GR0_TLB_MMR_CONTROL_MEM_SEL_SHFT 12 | 602 | #define UV2H_GR0_TLB_MMR_CONTROL_MEM_SEL_SHFT 12 |
616 | #define UV2H_GR0_TLB_MMR_CONTROL_AUTO_VALID_EN_SHFT 16 | 603 | #define UV2H_GR0_TLB_MMR_CONTROL_AUTO_VALID_EN_SHFT 16 |
@@ -630,6 +617,23 @@ union uvh_gr0_tlb_int1_config_u { | |||
630 | #define UV2H_GR0_TLB_MMR_CONTROL_MMR_INJ_CON_MASK 0x0001000000000000UL | 617 | #define UV2H_GR0_TLB_MMR_CONTROL_MMR_INJ_CON_MASK 0x0001000000000000UL |
631 | #define UV2H_GR0_TLB_MMR_CONTROL_MMR_INJ_TLBRAM_MASK 0x0010000000000000UL | 618 | #define UV2H_GR0_TLB_MMR_CONTROL_MMR_INJ_TLBRAM_MASK 0x0010000000000000UL |
632 | 619 | ||
620 | #define UV3H_GR0_TLB_MMR_CONTROL_INDEX_SHFT 0 | ||
621 | #define UV3H_GR0_TLB_MMR_CONTROL_MEM_SEL_SHFT 12 | ||
622 | #define UV3H_GR0_TLB_MMR_CONTROL_AUTO_VALID_EN_SHFT 16 | ||
623 | #define UV3H_GR0_TLB_MMR_CONTROL_MMR_HASH_INDEX_EN_SHFT 20 | ||
624 | #define UV3H_GR0_TLB_MMR_CONTROL_ECC_SEL_SHFT 21 | ||
625 | #define UV3H_GR0_TLB_MMR_CONTROL_MMR_WRITE_SHFT 30 | ||
626 | #define UV3H_GR0_TLB_MMR_CONTROL_MMR_READ_SHFT 31 | ||
627 | #define UV3H_GR0_TLB_MMR_CONTROL_MMR_OP_DONE_SHFT 32 | ||
628 | #define UV3H_GR0_TLB_MMR_CONTROL_INDEX_MASK 0x0000000000000fffUL | ||
629 | #define UV3H_GR0_TLB_MMR_CONTROL_MEM_SEL_MASK 0x0000000000003000UL | ||
630 | #define UV3H_GR0_TLB_MMR_CONTROL_AUTO_VALID_EN_MASK 0x0000000000010000UL | ||
631 | #define UV3H_GR0_TLB_MMR_CONTROL_MMR_HASH_INDEX_EN_MASK 0x0000000000100000UL | ||
632 | #define UV3H_GR0_TLB_MMR_CONTROL_ECC_SEL_MASK 0x0000000000200000UL | ||
633 | #define UV3H_GR0_TLB_MMR_CONTROL_MMR_WRITE_MASK 0x0000000040000000UL | ||
634 | #define UV3H_GR0_TLB_MMR_CONTROL_MMR_READ_MASK 0x0000000080000000UL | ||
635 | #define UV3H_GR0_TLB_MMR_CONTROL_MMR_OP_DONE_MASK 0x0000000100000000UL | ||
636 | |||
633 | union uvh_gr0_tlb_mmr_control_u { | 637 | union uvh_gr0_tlb_mmr_control_u { |
634 | unsigned long v; | 638 | unsigned long v; |
635 | struct uvh_gr0_tlb_mmr_control_s { | 639 | struct uvh_gr0_tlb_mmr_control_s { |
@@ -642,7 +646,9 @@ union uvh_gr0_tlb_mmr_control_u { | |||
642 | unsigned long rsvd_21_29:9; | 646 | unsigned long rsvd_21_29:9; |
643 | unsigned long mmr_write:1; /* WP */ | 647 | unsigned long mmr_write:1; /* WP */ |
644 | unsigned long mmr_read:1; /* WP */ | 648 | unsigned long mmr_read:1; /* WP */ |
645 | unsigned long rsvd_32_63:32; | 649 | unsigned long rsvd_32_48:17; |
650 | unsigned long rsvd_49_51:3; | ||
651 | unsigned long rsvd_52_63:12; | ||
646 | } s; | 652 | } s; |
647 | struct uv1h_gr0_tlb_mmr_control_s { | 653 | struct uv1h_gr0_tlb_mmr_control_s { |
648 | unsigned long index:12; /* RW */ | 654 | unsigned long index:12; /* RW */ |
@@ -666,6 +672,23 @@ union uvh_gr0_tlb_mmr_control_u { | |||
666 | unsigned long mmr_inj_tlblruv:1; /* RW */ | 672 | unsigned long mmr_inj_tlblruv:1; /* RW */ |
667 | unsigned long rsvd_61_63:3; | 673 | unsigned long rsvd_61_63:3; |
668 | } s1; | 674 | } s1; |
675 | struct uvxh_gr0_tlb_mmr_control_s { | ||
676 | unsigned long index:12; /* RW */ | ||
677 | unsigned long mem_sel:2; /* RW */ | ||
678 | unsigned long rsvd_14_15:2; | ||
679 | unsigned long auto_valid_en:1; /* RW */ | ||
680 | unsigned long rsvd_17_19:3; | ||
681 | unsigned long mmr_hash_index_en:1; /* RW */ | ||
682 | unsigned long rsvd_21_29:9; | ||
683 | unsigned long mmr_write:1; /* WP */ | ||
684 | unsigned long mmr_read:1; /* WP */ | ||
685 | unsigned long mmr_op_done:1; /* RW */ | ||
686 | unsigned long rsvd_33_47:15; | ||
687 | unsigned long rsvd_48:1; | ||
688 | unsigned long rsvd_49_51:3; | ||
689 | unsigned long rsvd_52:1; | ||
690 | unsigned long rsvd_53_63:11; | ||
691 | } sx; | ||
669 | struct uv2h_gr0_tlb_mmr_control_s { | 692 | struct uv2h_gr0_tlb_mmr_control_s { |
670 | unsigned long index:12; /* RW */ | 693 | unsigned long index:12; /* RW */ |
671 | unsigned long mem_sel:2; /* RW */ | 694 | unsigned long mem_sel:2; /* RW */ |
@@ -683,6 +706,24 @@ union uvh_gr0_tlb_mmr_control_u { | |||
683 | unsigned long mmr_inj_tlbram:1; /* RW */ | 706 | unsigned long mmr_inj_tlbram:1; /* RW */ |
684 | unsigned long rsvd_53_63:11; | 707 | unsigned long rsvd_53_63:11; |
685 | } s2; | 708 | } s2; |
709 | struct uv3h_gr0_tlb_mmr_control_s { | ||
710 | unsigned long index:12; /* RW */ | ||
711 | unsigned long mem_sel:2; /* RW */ | ||
712 | unsigned long rsvd_14_15:2; | ||
713 | unsigned long auto_valid_en:1; /* RW */ | ||
714 | unsigned long rsvd_17_19:3; | ||
715 | unsigned long mmr_hash_index_en:1; /* RW */ | ||
716 | unsigned long ecc_sel:1; /* RW */ | ||
717 | unsigned long rsvd_22_29:8; | ||
718 | unsigned long mmr_write:1; /* WP */ | ||
719 | unsigned long mmr_read:1; /* WP */ | ||
720 | unsigned long mmr_op_done:1; /* RW */ | ||
721 | unsigned long rsvd_33_47:15; | ||
722 | unsigned long undef_48:1; /* Undefined */ | ||
723 | unsigned long rsvd_49_51:3; | ||
724 | unsigned long undef_52:1; /* Undefined */ | ||
725 | unsigned long rsvd_53_63:11; | ||
726 | } s3; | ||
686 | }; | 727 | }; |
687 | 728 | ||
688 | /* ========================================================================= */ | 729 | /* ========================================================================= */ |
@@ -690,9 +731,11 @@ union uvh_gr0_tlb_mmr_control_u { | |||
690 | /* ========================================================================= */ | 731 | /* ========================================================================= */ |
691 | #define UV1H_GR0_TLB_MMR_READ_DATA_HI 0x4010a0UL | 732 | #define UV1H_GR0_TLB_MMR_READ_DATA_HI 0x4010a0UL |
692 | #define UV2H_GR0_TLB_MMR_READ_DATA_HI 0xc010a0UL | 733 | #define UV2H_GR0_TLB_MMR_READ_DATA_HI 0xc010a0UL |
693 | #define UVH_GR0_TLB_MMR_READ_DATA_HI (is_uv1_hub() ? \ | 734 | #define UV3H_GR0_TLB_MMR_READ_DATA_HI 0xc010a0UL |
694 | UV1H_GR0_TLB_MMR_READ_DATA_HI : \ | 735 | #define UVH_GR0_TLB_MMR_READ_DATA_HI \ |
695 | UV2H_GR0_TLB_MMR_READ_DATA_HI) | 736 | (is_uv1_hub() ? UV1H_GR0_TLB_MMR_READ_DATA_HI : \ |
737 | (is_uv2_hub() ? UV2H_GR0_TLB_MMR_READ_DATA_HI : \ | ||
738 | UV3H_GR0_TLB_MMR_READ_DATA_HI)) | ||
696 | 739 | ||
697 | #define UVH_GR0_TLB_MMR_READ_DATA_HI_PFN_SHFT 0 | 740 | #define UVH_GR0_TLB_MMR_READ_DATA_HI_PFN_SHFT 0 |
698 | #define UVH_GR0_TLB_MMR_READ_DATA_HI_GAA_SHFT 41 | 741 | #define UVH_GR0_TLB_MMR_READ_DATA_HI_GAA_SHFT 41 |
@@ -703,6 +746,46 @@ union uvh_gr0_tlb_mmr_control_u { | |||
703 | #define UVH_GR0_TLB_MMR_READ_DATA_HI_DIRTY_MASK 0x0000080000000000UL | 746 | #define UVH_GR0_TLB_MMR_READ_DATA_HI_DIRTY_MASK 0x0000080000000000UL |
704 | #define UVH_GR0_TLB_MMR_READ_DATA_HI_LARGER_MASK 0x0000100000000000UL | 747 | #define UVH_GR0_TLB_MMR_READ_DATA_HI_LARGER_MASK 0x0000100000000000UL |
705 | 748 | ||
749 | #define UV1H_GR0_TLB_MMR_READ_DATA_HI_PFN_SHFT 0 | ||
750 | #define UV1H_GR0_TLB_MMR_READ_DATA_HI_GAA_SHFT 41 | ||
751 | #define UV1H_GR0_TLB_MMR_READ_DATA_HI_DIRTY_SHFT 43 | ||
752 | #define UV1H_GR0_TLB_MMR_READ_DATA_HI_LARGER_SHFT 44 | ||
753 | #define UV1H_GR0_TLB_MMR_READ_DATA_HI_PFN_MASK 0x000001ffffffffffUL | ||
754 | #define UV1H_GR0_TLB_MMR_READ_DATA_HI_GAA_MASK 0x0000060000000000UL | ||
755 | #define UV1H_GR0_TLB_MMR_READ_DATA_HI_DIRTY_MASK 0x0000080000000000UL | ||
756 | #define UV1H_GR0_TLB_MMR_READ_DATA_HI_LARGER_MASK 0x0000100000000000UL | ||
757 | |||
758 | #define UVXH_GR0_TLB_MMR_READ_DATA_HI_PFN_SHFT 0 | ||
759 | #define UVXH_GR0_TLB_MMR_READ_DATA_HI_GAA_SHFT 41 | ||
760 | #define UVXH_GR0_TLB_MMR_READ_DATA_HI_DIRTY_SHFT 43 | ||
761 | #define UVXH_GR0_TLB_MMR_READ_DATA_HI_LARGER_SHFT 44 | ||
762 | #define UVXH_GR0_TLB_MMR_READ_DATA_HI_PFN_MASK 0x000001ffffffffffUL | ||
763 | #define UVXH_GR0_TLB_MMR_READ_DATA_HI_GAA_MASK 0x0000060000000000UL | ||
764 | #define UVXH_GR0_TLB_MMR_READ_DATA_HI_DIRTY_MASK 0x0000080000000000UL | ||
765 | #define UVXH_GR0_TLB_MMR_READ_DATA_HI_LARGER_MASK 0x0000100000000000UL | ||
766 | |||
767 | #define UV2H_GR0_TLB_MMR_READ_DATA_HI_PFN_SHFT 0 | ||
768 | #define UV2H_GR0_TLB_MMR_READ_DATA_HI_GAA_SHFT 41 | ||
769 | #define UV2H_GR0_TLB_MMR_READ_DATA_HI_DIRTY_SHFT 43 | ||
770 | #define UV2H_GR0_TLB_MMR_READ_DATA_HI_LARGER_SHFT 44 | ||
771 | #define UV2H_GR0_TLB_MMR_READ_DATA_HI_PFN_MASK 0x000001ffffffffffUL | ||
772 | #define UV2H_GR0_TLB_MMR_READ_DATA_HI_GAA_MASK 0x0000060000000000UL | ||
773 | #define UV2H_GR0_TLB_MMR_READ_DATA_HI_DIRTY_MASK 0x0000080000000000UL | ||
774 | #define UV2H_GR0_TLB_MMR_READ_DATA_HI_LARGER_MASK 0x0000100000000000UL | ||
775 | |||
776 | #define UV3H_GR0_TLB_MMR_READ_DATA_HI_PFN_SHFT 0 | ||
777 | #define UV3H_GR0_TLB_MMR_READ_DATA_HI_GAA_SHFT 41 | ||
778 | #define UV3H_GR0_TLB_MMR_READ_DATA_HI_DIRTY_SHFT 43 | ||
779 | #define UV3H_GR0_TLB_MMR_READ_DATA_HI_LARGER_SHFT 44 | ||
780 | #define UV3H_GR0_TLB_MMR_READ_DATA_HI_AA_EXT_SHFT 45 | ||
781 | #define UV3H_GR0_TLB_MMR_READ_DATA_HI_WAY_ECC_SHFT 55 | ||
782 | #define UV3H_GR0_TLB_MMR_READ_DATA_HI_PFN_MASK 0x000001ffffffffffUL | ||
783 | #define UV3H_GR0_TLB_MMR_READ_DATA_HI_GAA_MASK 0x0000060000000000UL | ||
784 | #define UV3H_GR0_TLB_MMR_READ_DATA_HI_DIRTY_MASK 0x0000080000000000UL | ||
785 | #define UV3H_GR0_TLB_MMR_READ_DATA_HI_LARGER_MASK 0x0000100000000000UL | ||
786 | #define UV3H_GR0_TLB_MMR_READ_DATA_HI_AA_EXT_MASK 0x0000200000000000UL | ||
787 | #define UV3H_GR0_TLB_MMR_READ_DATA_HI_WAY_ECC_MASK 0xff80000000000000UL | ||
788 | |||
706 | union uvh_gr0_tlb_mmr_read_data_hi_u { | 789 | union uvh_gr0_tlb_mmr_read_data_hi_u { |
707 | unsigned long v; | 790 | unsigned long v; |
708 | struct uvh_gr0_tlb_mmr_read_data_hi_s { | 791 | struct uvh_gr0_tlb_mmr_read_data_hi_s { |
@@ -712,6 +795,36 @@ union uvh_gr0_tlb_mmr_read_data_hi_u { | |||
712 | unsigned long larger:1; /* RO */ | 795 | unsigned long larger:1; /* RO */ |
713 | unsigned long rsvd_45_63:19; | 796 | unsigned long rsvd_45_63:19; |
714 | } s; | 797 | } s; |
798 | struct uv1h_gr0_tlb_mmr_read_data_hi_s { | ||
799 | unsigned long pfn:41; /* RO */ | ||
800 | unsigned long gaa:2; /* RO */ | ||
801 | unsigned long dirty:1; /* RO */ | ||
802 | unsigned long larger:1; /* RO */ | ||
803 | unsigned long rsvd_45_63:19; | ||
804 | } s1; | ||
805 | struct uvxh_gr0_tlb_mmr_read_data_hi_s { | ||
806 | unsigned long pfn:41; /* RO */ | ||
807 | unsigned long gaa:2; /* RO */ | ||
808 | unsigned long dirty:1; /* RO */ | ||
809 | unsigned long larger:1; /* RO */ | ||
810 | unsigned long rsvd_45_63:19; | ||
811 | } sx; | ||
812 | struct uv2h_gr0_tlb_mmr_read_data_hi_s { | ||
813 | unsigned long pfn:41; /* RO */ | ||
814 | unsigned long gaa:2; /* RO */ | ||
815 | unsigned long dirty:1; /* RO */ | ||
816 | unsigned long larger:1; /* RO */ | ||
817 | unsigned long rsvd_45_63:19; | ||
818 | } s2; | ||
819 | struct uv3h_gr0_tlb_mmr_read_data_hi_s { | ||
820 | unsigned long pfn:41; /* RO */ | ||
821 | unsigned long gaa:2; /* RO */ | ||
822 | unsigned long dirty:1; /* RO */ | ||
823 | unsigned long larger:1; /* RO */ | ||
824 | unsigned long aa_ext:1; /* RO */ | ||
825 | unsigned long undef_46_54:9; /* Undefined */ | ||
826 | unsigned long way_ecc:9; /* RO */ | ||
827 | } s3; | ||
715 | }; | 828 | }; |
716 | 829 | ||
717 | /* ========================================================================= */ | 830 | /* ========================================================================= */ |
@@ -719,9 +832,11 @@ union uvh_gr0_tlb_mmr_read_data_hi_u { | |||
719 | /* ========================================================================= */ | 832 | /* ========================================================================= */ |
720 | #define UV1H_GR0_TLB_MMR_READ_DATA_LO 0x4010a8UL | 833 | #define UV1H_GR0_TLB_MMR_READ_DATA_LO 0x4010a8UL |
721 | #define UV2H_GR0_TLB_MMR_READ_DATA_LO 0xc010a8UL | 834 | #define UV2H_GR0_TLB_MMR_READ_DATA_LO 0xc010a8UL |
722 | #define UVH_GR0_TLB_MMR_READ_DATA_LO (is_uv1_hub() ? \ | 835 | #define UV3H_GR0_TLB_MMR_READ_DATA_LO 0xc010a8UL |
723 | UV1H_GR0_TLB_MMR_READ_DATA_LO : \ | 836 | #define UVH_GR0_TLB_MMR_READ_DATA_LO \ |
724 | UV2H_GR0_TLB_MMR_READ_DATA_LO) | 837 | (is_uv1_hub() ? UV1H_GR0_TLB_MMR_READ_DATA_LO : \ |
838 | (is_uv2_hub() ? UV2H_GR0_TLB_MMR_READ_DATA_LO : \ | ||
839 | UV3H_GR0_TLB_MMR_READ_DATA_LO)) | ||
725 | 840 | ||
726 | #define UVH_GR0_TLB_MMR_READ_DATA_LO_VPN_SHFT 0 | 841 | #define UVH_GR0_TLB_MMR_READ_DATA_LO_VPN_SHFT 0 |
727 | #define UVH_GR0_TLB_MMR_READ_DATA_LO_ASID_SHFT 39 | 842 | #define UVH_GR0_TLB_MMR_READ_DATA_LO_ASID_SHFT 39 |
@@ -730,6 +845,34 @@ union uvh_gr0_tlb_mmr_read_data_hi_u { | |||
730 | #define UVH_GR0_TLB_MMR_READ_DATA_LO_ASID_MASK 0x7fffff8000000000UL | 845 | #define UVH_GR0_TLB_MMR_READ_DATA_LO_ASID_MASK 0x7fffff8000000000UL |
731 | #define UVH_GR0_TLB_MMR_READ_DATA_LO_VALID_MASK 0x8000000000000000UL | 846 | #define UVH_GR0_TLB_MMR_READ_DATA_LO_VALID_MASK 0x8000000000000000UL |
732 | 847 | ||
848 | #define UV1H_GR0_TLB_MMR_READ_DATA_LO_VPN_SHFT 0 | ||
849 | #define UV1H_GR0_TLB_MMR_READ_DATA_LO_ASID_SHFT 39 | ||
850 | #define UV1H_GR0_TLB_MMR_READ_DATA_LO_VALID_SHFT 63 | ||
851 | #define UV1H_GR0_TLB_MMR_READ_DATA_LO_VPN_MASK 0x0000007fffffffffUL | ||
852 | #define UV1H_GR0_TLB_MMR_READ_DATA_LO_ASID_MASK 0x7fffff8000000000UL | ||
853 | #define UV1H_GR0_TLB_MMR_READ_DATA_LO_VALID_MASK 0x8000000000000000UL | ||
854 | |||
855 | #define UVXH_GR0_TLB_MMR_READ_DATA_LO_VPN_SHFT 0 | ||
856 | #define UVXH_GR0_TLB_MMR_READ_DATA_LO_ASID_SHFT 39 | ||
857 | #define UVXH_GR0_TLB_MMR_READ_DATA_LO_VALID_SHFT 63 | ||
858 | #define UVXH_GR0_TLB_MMR_READ_DATA_LO_VPN_MASK 0x0000007fffffffffUL | ||
859 | #define UVXH_GR0_TLB_MMR_READ_DATA_LO_ASID_MASK 0x7fffff8000000000UL | ||
860 | #define UVXH_GR0_TLB_MMR_READ_DATA_LO_VALID_MASK 0x8000000000000000UL | ||
861 | |||
862 | #define UV2H_GR0_TLB_MMR_READ_DATA_LO_VPN_SHFT 0 | ||
863 | #define UV2H_GR0_TLB_MMR_READ_DATA_LO_ASID_SHFT 39 | ||
864 | #define UV2H_GR0_TLB_MMR_READ_DATA_LO_VALID_SHFT 63 | ||
865 | #define UV2H_GR0_TLB_MMR_READ_DATA_LO_VPN_MASK 0x0000007fffffffffUL | ||
866 | #define UV2H_GR0_TLB_MMR_READ_DATA_LO_ASID_MASK 0x7fffff8000000000UL | ||
867 | #define UV2H_GR0_TLB_MMR_READ_DATA_LO_VALID_MASK 0x8000000000000000UL | ||
868 | |||
869 | #define UV3H_GR0_TLB_MMR_READ_DATA_LO_VPN_SHFT 0 | ||
870 | #define UV3H_GR0_TLB_MMR_READ_DATA_LO_ASID_SHFT 39 | ||
871 | #define UV3H_GR0_TLB_MMR_READ_DATA_LO_VALID_SHFT 63 | ||
872 | #define UV3H_GR0_TLB_MMR_READ_DATA_LO_VPN_MASK 0x0000007fffffffffUL | ||
873 | #define UV3H_GR0_TLB_MMR_READ_DATA_LO_ASID_MASK 0x7fffff8000000000UL | ||
874 | #define UV3H_GR0_TLB_MMR_READ_DATA_LO_VALID_MASK 0x8000000000000000UL | ||
875 | |||
733 | union uvh_gr0_tlb_mmr_read_data_lo_u { | 876 | union uvh_gr0_tlb_mmr_read_data_lo_u { |
734 | unsigned long v; | 877 | unsigned long v; |
735 | struct uvh_gr0_tlb_mmr_read_data_lo_s { | 878 | struct uvh_gr0_tlb_mmr_read_data_lo_s { |
@@ -737,12 +880,32 @@ union uvh_gr0_tlb_mmr_read_data_lo_u { | |||
737 | unsigned long asid:24; /* RO */ | 880 | unsigned long asid:24; /* RO */ |
738 | unsigned long valid:1; /* RO */ | 881 | unsigned long valid:1; /* RO */ |
739 | } s; | 882 | } s; |
883 | struct uv1h_gr0_tlb_mmr_read_data_lo_s { | ||
884 | unsigned long vpn:39; /* RO */ | ||
885 | unsigned long asid:24; /* RO */ | ||
886 | unsigned long valid:1; /* RO */ | ||
887 | } s1; | ||
888 | struct uvxh_gr0_tlb_mmr_read_data_lo_s { | ||
889 | unsigned long vpn:39; /* RO */ | ||
890 | unsigned long asid:24; /* RO */ | ||
891 | unsigned long valid:1; /* RO */ | ||
892 | } sx; | ||
893 | struct uv2h_gr0_tlb_mmr_read_data_lo_s { | ||
894 | unsigned long vpn:39; /* RO */ | ||
895 | unsigned long asid:24; /* RO */ | ||
896 | unsigned long valid:1; /* RO */ | ||
897 | } s2; | ||
898 | struct uv3h_gr0_tlb_mmr_read_data_lo_s { | ||
899 | unsigned long vpn:39; /* RO */ | ||
900 | unsigned long asid:24; /* RO */ | ||
901 | unsigned long valid:1; /* RO */ | ||
902 | } s3; | ||
740 | }; | 903 | }; |
741 | 904 | ||
742 | /* ========================================================================= */ | 905 | /* ========================================================================= */ |
743 | /* UVH_GR1_TLB_INT0_CONFIG */ | 906 | /* UVH_GR1_TLB_INT0_CONFIG */ |
744 | /* ========================================================================= */ | 907 | /* ========================================================================= */ |
745 | #define UVH_GR1_TLB_INT0_CONFIG 0x61f00UL | 908 | #define UVH_GR1_TLB_INT0_CONFIG 0x61f00UL |
746 | 909 | ||
747 | #define UVH_GR1_TLB_INT0_CONFIG_VECTOR_SHFT 0 | 910 | #define UVH_GR1_TLB_INT0_CONFIG_VECTOR_SHFT 0 |
748 | #define UVH_GR1_TLB_INT0_CONFIG_DM_SHFT 8 | 911 | #define UVH_GR1_TLB_INT0_CONFIG_DM_SHFT 8 |
@@ -780,7 +943,7 @@ union uvh_gr1_tlb_int0_config_u { | |||
780 | /* ========================================================================= */ | 943 | /* ========================================================================= */ |
781 | /* UVH_GR1_TLB_INT1_CONFIG */ | 944 | /* UVH_GR1_TLB_INT1_CONFIG */ |
782 | /* ========================================================================= */ | 945 | /* ========================================================================= */ |
783 | #define UVH_GR1_TLB_INT1_CONFIG 0x61f40UL | 946 | #define UVH_GR1_TLB_INT1_CONFIG 0x61f40UL |
784 | 947 | ||
785 | #define UVH_GR1_TLB_INT1_CONFIG_VECTOR_SHFT 0 | 948 | #define UVH_GR1_TLB_INT1_CONFIG_VECTOR_SHFT 0 |
786 | #define UVH_GR1_TLB_INT1_CONFIG_DM_SHFT 8 | 949 | #define UVH_GR1_TLB_INT1_CONFIG_DM_SHFT 8 |
@@ -820,9 +983,11 @@ union uvh_gr1_tlb_int1_config_u { | |||
820 | /* ========================================================================= */ | 983 | /* ========================================================================= */ |
821 | #define UV1H_GR1_TLB_MMR_CONTROL 0x801080UL | 984 | #define UV1H_GR1_TLB_MMR_CONTROL 0x801080UL |
822 | #define UV2H_GR1_TLB_MMR_CONTROL 0x1001080UL | 985 | #define UV2H_GR1_TLB_MMR_CONTROL 0x1001080UL |
823 | #define UVH_GR1_TLB_MMR_CONTROL (is_uv1_hub() ? \ | 986 | #define UV3H_GR1_TLB_MMR_CONTROL 0x1001080UL |
824 | UV1H_GR1_TLB_MMR_CONTROL : \ | 987 | #define UVH_GR1_TLB_MMR_CONTROL \ |
825 | UV2H_GR1_TLB_MMR_CONTROL) | 988 | (is_uv1_hub() ? UV1H_GR1_TLB_MMR_CONTROL : \ |
989 | (is_uv2_hub() ? UV2H_GR1_TLB_MMR_CONTROL : \ | ||
990 | UV3H_GR1_TLB_MMR_CONTROL)) | ||
826 | 991 | ||
827 | #define UVH_GR1_TLB_MMR_CONTROL_INDEX_SHFT 0 | 992 | #define UVH_GR1_TLB_MMR_CONTROL_INDEX_SHFT 0 |
828 | #define UVH_GR1_TLB_MMR_CONTROL_MEM_SEL_SHFT 12 | 993 | #define UVH_GR1_TLB_MMR_CONTROL_MEM_SEL_SHFT 12 |
@@ -860,6 +1025,21 @@ union uvh_gr1_tlb_int1_config_u { | |||
860 | #define UV1H_GR1_TLB_MMR_CONTROL_MMR_INJ_TLBRREG_MASK 0x0100000000000000UL | 1025 | #define UV1H_GR1_TLB_MMR_CONTROL_MMR_INJ_TLBRREG_MASK 0x0100000000000000UL |
861 | #define UV1H_GR1_TLB_MMR_CONTROL_MMR_INJ_TLBLRUV_MASK 0x1000000000000000UL | 1026 | #define UV1H_GR1_TLB_MMR_CONTROL_MMR_INJ_TLBLRUV_MASK 0x1000000000000000UL |
862 | 1027 | ||
1028 | #define UVXH_GR1_TLB_MMR_CONTROL_INDEX_SHFT 0 | ||
1029 | #define UVXH_GR1_TLB_MMR_CONTROL_MEM_SEL_SHFT 12 | ||
1030 | #define UVXH_GR1_TLB_MMR_CONTROL_AUTO_VALID_EN_SHFT 16 | ||
1031 | #define UVXH_GR1_TLB_MMR_CONTROL_MMR_HASH_INDEX_EN_SHFT 20 | ||
1032 | #define UVXH_GR1_TLB_MMR_CONTROL_MMR_WRITE_SHFT 30 | ||
1033 | #define UVXH_GR1_TLB_MMR_CONTROL_MMR_READ_SHFT 31 | ||
1034 | #define UVXH_GR1_TLB_MMR_CONTROL_MMR_OP_DONE_SHFT 32 | ||
1035 | #define UVXH_GR1_TLB_MMR_CONTROL_INDEX_MASK 0x0000000000000fffUL | ||
1036 | #define UVXH_GR1_TLB_MMR_CONTROL_MEM_SEL_MASK 0x0000000000003000UL | ||
1037 | #define UVXH_GR1_TLB_MMR_CONTROL_AUTO_VALID_EN_MASK 0x0000000000010000UL | ||
1038 | #define UVXH_GR1_TLB_MMR_CONTROL_MMR_HASH_INDEX_EN_MASK 0x0000000000100000UL | ||
1039 | #define UVXH_GR1_TLB_MMR_CONTROL_MMR_WRITE_MASK 0x0000000040000000UL | ||
1040 | #define UVXH_GR1_TLB_MMR_CONTROL_MMR_READ_MASK 0x0000000080000000UL | ||
1041 | #define UVXH_GR1_TLB_MMR_CONTROL_MMR_OP_DONE_MASK 0x0000000100000000UL | ||
1042 | |||
863 | #define UV2H_GR1_TLB_MMR_CONTROL_INDEX_SHFT 0 | 1043 | #define UV2H_GR1_TLB_MMR_CONTROL_INDEX_SHFT 0 |
864 | #define UV2H_GR1_TLB_MMR_CONTROL_MEM_SEL_SHFT 12 | 1044 | #define UV2H_GR1_TLB_MMR_CONTROL_MEM_SEL_SHFT 12 |
865 | #define UV2H_GR1_TLB_MMR_CONTROL_AUTO_VALID_EN_SHFT 16 | 1045 | #define UV2H_GR1_TLB_MMR_CONTROL_AUTO_VALID_EN_SHFT 16 |
@@ -879,6 +1059,23 @@ union uvh_gr1_tlb_int1_config_u { | |||
879 | #define UV2H_GR1_TLB_MMR_CONTROL_MMR_INJ_CON_MASK 0x0001000000000000UL | 1059 | #define UV2H_GR1_TLB_MMR_CONTROL_MMR_INJ_CON_MASK 0x0001000000000000UL |
880 | #define UV2H_GR1_TLB_MMR_CONTROL_MMR_INJ_TLBRAM_MASK 0x0010000000000000UL | 1060 | #define UV2H_GR1_TLB_MMR_CONTROL_MMR_INJ_TLBRAM_MASK 0x0010000000000000UL |
881 | 1061 | ||
1062 | #define UV3H_GR1_TLB_MMR_CONTROL_INDEX_SHFT 0 | ||
1063 | #define UV3H_GR1_TLB_MMR_CONTROL_MEM_SEL_SHFT 12 | ||
1064 | #define UV3H_GR1_TLB_MMR_CONTROL_AUTO_VALID_EN_SHFT 16 | ||
1065 | #define UV3H_GR1_TLB_MMR_CONTROL_MMR_HASH_INDEX_EN_SHFT 20 | ||
1066 | #define UV3H_GR1_TLB_MMR_CONTROL_ECC_SEL_SHFT 21 | ||
1067 | #define UV3H_GR1_TLB_MMR_CONTROL_MMR_WRITE_SHFT 30 | ||
1068 | #define UV3H_GR1_TLB_MMR_CONTROL_MMR_READ_SHFT 31 | ||
1069 | #define UV3H_GR1_TLB_MMR_CONTROL_MMR_OP_DONE_SHFT 32 | ||
1070 | #define UV3H_GR1_TLB_MMR_CONTROL_INDEX_MASK 0x0000000000000fffUL | ||
1071 | #define UV3H_GR1_TLB_MMR_CONTROL_MEM_SEL_MASK 0x0000000000003000UL | ||
1072 | #define UV3H_GR1_TLB_MMR_CONTROL_AUTO_VALID_EN_MASK 0x0000000000010000UL | ||
1073 | #define UV3H_GR1_TLB_MMR_CONTROL_MMR_HASH_INDEX_EN_MASK 0x0000000000100000UL | ||
1074 | #define UV3H_GR1_TLB_MMR_CONTROL_ECC_SEL_MASK 0x0000000000200000UL | ||
1075 | #define UV3H_GR1_TLB_MMR_CONTROL_MMR_WRITE_MASK 0x0000000040000000UL | ||
1076 | #define UV3H_GR1_TLB_MMR_CONTROL_MMR_READ_MASK 0x0000000080000000UL | ||
1077 | #define UV3H_GR1_TLB_MMR_CONTROL_MMR_OP_DONE_MASK 0x0000000100000000UL | ||
1078 | |||
882 | union uvh_gr1_tlb_mmr_control_u { | 1079 | union uvh_gr1_tlb_mmr_control_u { |
883 | unsigned long v; | 1080 | unsigned long v; |
884 | struct uvh_gr1_tlb_mmr_control_s { | 1081 | struct uvh_gr1_tlb_mmr_control_s { |
@@ -891,7 +1088,9 @@ union uvh_gr1_tlb_mmr_control_u { | |||
891 | unsigned long rsvd_21_29:9; | 1088 | unsigned long rsvd_21_29:9; |
892 | unsigned long mmr_write:1; /* WP */ | 1089 | unsigned long mmr_write:1; /* WP */ |
893 | unsigned long mmr_read:1; /* WP */ | 1090 | unsigned long mmr_read:1; /* WP */ |
894 | unsigned long rsvd_32_63:32; | 1091 | unsigned long rsvd_32_48:17; |
1092 | unsigned long rsvd_49_51:3; | ||
1093 | unsigned long rsvd_52_63:12; | ||
895 | } s; | 1094 | } s; |
896 | struct uv1h_gr1_tlb_mmr_control_s { | 1095 | struct uv1h_gr1_tlb_mmr_control_s { |
897 | unsigned long index:12; /* RW */ | 1096 | unsigned long index:12; /* RW */ |
@@ -915,6 +1114,23 @@ union uvh_gr1_tlb_mmr_control_u { | |||
915 | unsigned long mmr_inj_tlblruv:1; /* RW */ | 1114 | unsigned long mmr_inj_tlblruv:1; /* RW */ |
916 | unsigned long rsvd_61_63:3; | 1115 | unsigned long rsvd_61_63:3; |
917 | } s1; | 1116 | } s1; |
1117 | struct uvxh_gr1_tlb_mmr_control_s { | ||
1118 | unsigned long index:12; /* RW */ | ||
1119 | unsigned long mem_sel:2; /* RW */ | ||
1120 | unsigned long rsvd_14_15:2; | ||
1121 | unsigned long auto_valid_en:1; /* RW */ | ||
1122 | unsigned long rsvd_17_19:3; | ||
1123 | unsigned long mmr_hash_index_en:1; /* RW */ | ||
1124 | unsigned long rsvd_21_29:9; | ||
1125 | unsigned long mmr_write:1; /* WP */ | ||
1126 | unsigned long mmr_read:1; /* WP */ | ||
1127 | unsigned long mmr_op_done:1; /* RW */ | ||
1128 | unsigned long rsvd_33_47:15; | ||
1129 | unsigned long rsvd_48:1; | ||
1130 | unsigned long rsvd_49_51:3; | ||
1131 | unsigned long rsvd_52:1; | ||
1132 | unsigned long rsvd_53_63:11; | ||
1133 | } sx; | ||
918 | struct uv2h_gr1_tlb_mmr_control_s { | 1134 | struct uv2h_gr1_tlb_mmr_control_s { |
919 | unsigned long index:12; /* RW */ | 1135 | unsigned long index:12; /* RW */ |
920 | unsigned long mem_sel:2; /* RW */ | 1136 | unsigned long mem_sel:2; /* RW */ |
@@ -932,6 +1148,24 @@ union uvh_gr1_tlb_mmr_control_u { | |||
932 | unsigned long mmr_inj_tlbram:1; /* RW */ | 1148 | unsigned long mmr_inj_tlbram:1; /* RW */ |
933 | unsigned long rsvd_53_63:11; | 1149 | unsigned long rsvd_53_63:11; |
934 | } s2; | 1150 | } s2; |
1151 | struct uv3h_gr1_tlb_mmr_control_s { | ||
1152 | unsigned long index:12; /* RW */ | ||
1153 | unsigned long mem_sel:2; /* RW */ | ||
1154 | unsigned long rsvd_14_15:2; | ||
1155 | unsigned long auto_valid_en:1; /* RW */ | ||
1156 | unsigned long rsvd_17_19:3; | ||
1157 | unsigned long mmr_hash_index_en:1; /* RW */ | ||
1158 | unsigned long ecc_sel:1; /* RW */ | ||
1159 | unsigned long rsvd_22_29:8; | ||
1160 | unsigned long mmr_write:1; /* WP */ | ||
1161 | unsigned long mmr_read:1; /* WP */ | ||
1162 | unsigned long mmr_op_done:1; /* RW */ | ||
1163 | unsigned long rsvd_33_47:15; | ||
1164 | unsigned long undef_48:1; /* Undefined */ | ||
1165 | unsigned long rsvd_49_51:3; | ||
1166 | unsigned long undef_52:1; /* Undefined */ | ||
1167 | unsigned long rsvd_53_63:11; | ||
1168 | } s3; | ||
935 | }; | 1169 | }; |
936 | 1170 | ||
937 | /* ========================================================================= */ | 1171 | /* ========================================================================= */ |
@@ -939,9 +1173,11 @@ union uvh_gr1_tlb_mmr_control_u { | |||
939 | /* ========================================================================= */ | 1173 | /* ========================================================================= */ |
940 | #define UV1H_GR1_TLB_MMR_READ_DATA_HI 0x8010a0UL | 1174 | #define UV1H_GR1_TLB_MMR_READ_DATA_HI 0x8010a0UL |
941 | #define UV2H_GR1_TLB_MMR_READ_DATA_HI 0x10010a0UL | 1175 | #define UV2H_GR1_TLB_MMR_READ_DATA_HI 0x10010a0UL |
942 | #define UVH_GR1_TLB_MMR_READ_DATA_HI (is_uv1_hub() ? \ | 1176 | #define UV3H_GR1_TLB_MMR_READ_DATA_HI 0x10010a0UL |
943 | UV1H_GR1_TLB_MMR_READ_DATA_HI : \ | 1177 | #define UVH_GR1_TLB_MMR_READ_DATA_HI \ |
944 | UV2H_GR1_TLB_MMR_READ_DATA_HI) | 1178 | (is_uv1_hub() ? UV1H_GR1_TLB_MMR_READ_DATA_HI : \ |
1179 | (is_uv2_hub() ? UV2H_GR1_TLB_MMR_READ_DATA_HI : \ | ||
1180 | UV3H_GR1_TLB_MMR_READ_DATA_HI)) | ||
945 | 1181 | ||
946 | #define UVH_GR1_TLB_MMR_READ_DATA_HI_PFN_SHFT 0 | 1182 | #define UVH_GR1_TLB_MMR_READ_DATA_HI_PFN_SHFT 0 |
947 | #define UVH_GR1_TLB_MMR_READ_DATA_HI_GAA_SHFT 41 | 1183 | #define UVH_GR1_TLB_MMR_READ_DATA_HI_GAA_SHFT 41 |
@@ -952,6 +1188,46 @@ union uvh_gr1_tlb_mmr_control_u { | |||
952 | #define UVH_GR1_TLB_MMR_READ_DATA_HI_DIRTY_MASK 0x0000080000000000UL | 1188 | #define UVH_GR1_TLB_MMR_READ_DATA_HI_DIRTY_MASK 0x0000080000000000UL |
953 | #define UVH_GR1_TLB_MMR_READ_DATA_HI_LARGER_MASK 0x0000100000000000UL | 1189 | #define UVH_GR1_TLB_MMR_READ_DATA_HI_LARGER_MASK 0x0000100000000000UL |
954 | 1190 | ||
1191 | #define UV1H_GR1_TLB_MMR_READ_DATA_HI_PFN_SHFT 0 | ||
1192 | #define UV1H_GR1_TLB_MMR_READ_DATA_HI_GAA_SHFT 41 | ||
1193 | #define UV1H_GR1_TLB_MMR_READ_DATA_HI_DIRTY_SHFT 43 | ||
1194 | #define UV1H_GR1_TLB_MMR_READ_DATA_HI_LARGER_SHFT 44 | ||
1195 | #define UV1H_GR1_TLB_MMR_READ_DATA_HI_PFN_MASK 0x000001ffffffffffUL | ||
1196 | #define UV1H_GR1_TLB_MMR_READ_DATA_HI_GAA_MASK 0x0000060000000000UL | ||
1197 | #define UV1H_GR1_TLB_MMR_READ_DATA_HI_DIRTY_MASK 0x0000080000000000UL | ||
1198 | #define UV1H_GR1_TLB_MMR_READ_DATA_HI_LARGER_MASK 0x0000100000000000UL | ||
1199 | |||
1200 | #define UVXH_GR1_TLB_MMR_READ_DATA_HI_PFN_SHFT 0 | ||
1201 | #define UVXH_GR1_TLB_MMR_READ_DATA_HI_GAA_SHFT 41 | ||
1202 | #define UVXH_GR1_TLB_MMR_READ_DATA_HI_DIRTY_SHFT 43 | ||
1203 | #define UVXH_GR1_TLB_MMR_READ_DATA_HI_LARGER_SHFT 44 | ||
1204 | #define UVXH_GR1_TLB_MMR_READ_DATA_HI_PFN_MASK 0x000001ffffffffffUL | ||
1205 | #define UVXH_GR1_TLB_MMR_READ_DATA_HI_GAA_MASK 0x0000060000000000UL | ||
1206 | #define UVXH_GR1_TLB_MMR_READ_DATA_HI_DIRTY_MASK 0x0000080000000000UL | ||
1207 | #define UVXH_GR1_TLB_MMR_READ_DATA_HI_LARGER_MASK 0x0000100000000000UL | ||
1208 | |||
1209 | #define UV2H_GR1_TLB_MMR_READ_DATA_HI_PFN_SHFT 0 | ||
1210 | #define UV2H_GR1_TLB_MMR_READ_DATA_HI_GAA_SHFT 41 | ||
1211 | #define UV2H_GR1_TLB_MMR_READ_DATA_HI_DIRTY_SHFT 43 | ||
1212 | #define UV2H_GR1_TLB_MMR_READ_DATA_HI_LARGER_SHFT 44 | ||
1213 | #define UV2H_GR1_TLB_MMR_READ_DATA_HI_PFN_MASK 0x000001ffffffffffUL | ||
1214 | #define UV2H_GR1_TLB_MMR_READ_DATA_HI_GAA_MASK 0x0000060000000000UL | ||
1215 | #define UV2H_GR1_TLB_MMR_READ_DATA_HI_DIRTY_MASK 0x0000080000000000UL | ||
1216 | #define UV2H_GR1_TLB_MMR_READ_DATA_HI_LARGER_MASK 0x0000100000000000UL | ||
1217 | |||
1218 | #define UV3H_GR1_TLB_MMR_READ_DATA_HI_PFN_SHFT 0 | ||
1219 | #define UV3H_GR1_TLB_MMR_READ_DATA_HI_GAA_SHFT 41 | ||
1220 | #define UV3H_GR1_TLB_MMR_READ_DATA_HI_DIRTY_SHFT 43 | ||
1221 | #define UV3H_GR1_TLB_MMR_READ_DATA_HI_LARGER_SHFT 44 | ||
1222 | #define UV3H_GR1_TLB_MMR_READ_DATA_HI_AA_EXT_SHFT 45 | ||
1223 | #define UV3H_GR1_TLB_MMR_READ_DATA_HI_WAY_ECC_SHFT 55 | ||
1224 | #define UV3H_GR1_TLB_MMR_READ_DATA_HI_PFN_MASK 0x000001ffffffffffUL | ||
1225 | #define UV3H_GR1_TLB_MMR_READ_DATA_HI_GAA_MASK 0x0000060000000000UL | ||
1226 | #define UV3H_GR1_TLB_MMR_READ_DATA_HI_DIRTY_MASK 0x0000080000000000UL | ||
1227 | #define UV3H_GR1_TLB_MMR_READ_DATA_HI_LARGER_MASK 0x0000100000000000UL | ||
1228 | #define UV3H_GR1_TLB_MMR_READ_DATA_HI_AA_EXT_MASK 0x0000200000000000UL | ||
1229 | #define UV3H_GR1_TLB_MMR_READ_DATA_HI_WAY_ECC_MASK 0xff80000000000000UL | ||
1230 | |||
955 | union uvh_gr1_tlb_mmr_read_data_hi_u { | 1231 | union uvh_gr1_tlb_mmr_read_data_hi_u { |
956 | unsigned long v; | 1232 | unsigned long v; |
957 | struct uvh_gr1_tlb_mmr_read_data_hi_s { | 1233 | struct uvh_gr1_tlb_mmr_read_data_hi_s { |
@@ -961,6 +1237,36 @@ union uvh_gr1_tlb_mmr_read_data_hi_u { | |||
961 | unsigned long larger:1; /* RO */ | 1237 | unsigned long larger:1; /* RO */ |
962 | unsigned long rsvd_45_63:19; | 1238 | unsigned long rsvd_45_63:19; |
963 | } s; | 1239 | } s; |
1240 | struct uv1h_gr1_tlb_mmr_read_data_hi_s { | ||
1241 | unsigned long pfn:41; /* RO */ | ||
1242 | unsigned long gaa:2; /* RO */ | ||
1243 | unsigned long dirty:1; /* RO */ | ||
1244 | unsigned long larger:1; /* RO */ | ||
1245 | unsigned long rsvd_45_63:19; | ||
1246 | } s1; | ||
1247 | struct uvxh_gr1_tlb_mmr_read_data_hi_s { | ||
1248 | unsigned long pfn:41; /* RO */ | ||
1249 | unsigned long gaa:2; /* RO */ | ||
1250 | unsigned long dirty:1; /* RO */ | ||
1251 | unsigned long larger:1; /* RO */ | ||
1252 | unsigned long rsvd_45_63:19; | ||
1253 | } sx; | ||
1254 | struct uv2h_gr1_tlb_mmr_read_data_hi_s { | ||
1255 | unsigned long pfn:41; /* RO */ | ||
1256 | unsigned long gaa:2; /* RO */ | ||
1257 | unsigned long dirty:1; /* RO */ | ||
1258 | unsigned long larger:1; /* RO */ | ||
1259 | unsigned long rsvd_45_63:19; | ||
1260 | } s2; | ||
1261 | struct uv3h_gr1_tlb_mmr_read_data_hi_s { | ||
1262 | unsigned long pfn:41; /* RO */ | ||
1263 | unsigned long gaa:2; /* RO */ | ||
1264 | unsigned long dirty:1; /* RO */ | ||
1265 | unsigned long larger:1; /* RO */ | ||
1266 | unsigned long aa_ext:1; /* RO */ | ||
1267 | unsigned long undef_46_54:9; /* Undefined */ | ||
1268 | unsigned long way_ecc:9; /* RO */ | ||
1269 | } s3; | ||
964 | }; | 1270 | }; |
965 | 1271 | ||
966 | /* ========================================================================= */ | 1272 | /* ========================================================================= */ |
@@ -968,9 +1274,11 @@ union uvh_gr1_tlb_mmr_read_data_hi_u { | |||
968 | /* ========================================================================= */ | 1274 | /* ========================================================================= */ |
969 | #define UV1H_GR1_TLB_MMR_READ_DATA_LO 0x8010a8UL | 1275 | #define UV1H_GR1_TLB_MMR_READ_DATA_LO 0x8010a8UL |
970 | #define UV2H_GR1_TLB_MMR_READ_DATA_LO 0x10010a8UL | 1276 | #define UV2H_GR1_TLB_MMR_READ_DATA_LO 0x10010a8UL |
971 | #define UVH_GR1_TLB_MMR_READ_DATA_LO (is_uv1_hub() ? \ | 1277 | #define UV3H_GR1_TLB_MMR_READ_DATA_LO 0x10010a8UL |
972 | UV1H_GR1_TLB_MMR_READ_DATA_LO : \ | 1278 | #define UVH_GR1_TLB_MMR_READ_DATA_LO \ |
973 | UV2H_GR1_TLB_MMR_READ_DATA_LO) | 1279 | (is_uv1_hub() ? UV1H_GR1_TLB_MMR_READ_DATA_LO : \ |
1280 | (is_uv2_hub() ? UV2H_GR1_TLB_MMR_READ_DATA_LO : \ | ||
1281 | UV3H_GR1_TLB_MMR_READ_DATA_LO)) | ||
974 | 1282 | ||
975 | #define UVH_GR1_TLB_MMR_READ_DATA_LO_VPN_SHFT 0 | 1283 | #define UVH_GR1_TLB_MMR_READ_DATA_LO_VPN_SHFT 0 |
976 | #define UVH_GR1_TLB_MMR_READ_DATA_LO_ASID_SHFT 39 | 1284 | #define UVH_GR1_TLB_MMR_READ_DATA_LO_ASID_SHFT 39 |
@@ -979,6 +1287,34 @@ union uvh_gr1_tlb_mmr_read_data_hi_u { | |||
979 | #define UVH_GR1_TLB_MMR_READ_DATA_LO_ASID_MASK 0x7fffff8000000000UL | 1287 | #define UVH_GR1_TLB_MMR_READ_DATA_LO_ASID_MASK 0x7fffff8000000000UL |
980 | #define UVH_GR1_TLB_MMR_READ_DATA_LO_VALID_MASK 0x8000000000000000UL | 1288 | #define UVH_GR1_TLB_MMR_READ_DATA_LO_VALID_MASK 0x8000000000000000UL |
981 | 1289 | ||
1290 | #define UV1H_GR1_TLB_MMR_READ_DATA_LO_VPN_SHFT 0 | ||
1291 | #define UV1H_GR1_TLB_MMR_READ_DATA_LO_ASID_SHFT 39 | ||
1292 | #define UV1H_GR1_TLB_MMR_READ_DATA_LO_VALID_SHFT 63 | ||
1293 | #define UV1H_GR1_TLB_MMR_READ_DATA_LO_VPN_MASK 0x0000007fffffffffUL | ||
1294 | #define UV1H_GR1_TLB_MMR_READ_DATA_LO_ASID_MASK 0x7fffff8000000000UL | ||
1295 | #define UV1H_GR1_TLB_MMR_READ_DATA_LO_VALID_MASK 0x8000000000000000UL | ||
1296 | |||
1297 | #define UVXH_GR1_TLB_MMR_READ_DATA_LO_VPN_SHFT 0 | ||
1298 | #define UVXH_GR1_TLB_MMR_READ_DATA_LO_ASID_SHFT 39 | ||
1299 | #define UVXH_GR1_TLB_MMR_READ_DATA_LO_VALID_SHFT 63 | ||
1300 | #define UVXH_GR1_TLB_MMR_READ_DATA_LO_VPN_MASK 0x0000007fffffffffUL | ||
1301 | #define UVXH_GR1_TLB_MMR_READ_DATA_LO_ASID_MASK 0x7fffff8000000000UL | ||
1302 | #define UVXH_GR1_TLB_MMR_READ_DATA_LO_VALID_MASK 0x8000000000000000UL | ||
1303 | |||
1304 | #define UV2H_GR1_TLB_MMR_READ_DATA_LO_VPN_SHFT 0 | ||
1305 | #define UV2H_GR1_TLB_MMR_READ_DATA_LO_ASID_SHFT 39 | ||
1306 | #define UV2H_GR1_TLB_MMR_READ_DATA_LO_VALID_SHFT 63 | ||
1307 | #define UV2H_GR1_TLB_MMR_READ_DATA_LO_VPN_MASK 0x0000007fffffffffUL | ||
1308 | #define UV2H_GR1_TLB_MMR_READ_DATA_LO_ASID_MASK 0x7fffff8000000000UL | ||
1309 | #define UV2H_GR1_TLB_MMR_READ_DATA_LO_VALID_MASK 0x8000000000000000UL | ||
1310 | |||
1311 | #define UV3H_GR1_TLB_MMR_READ_DATA_LO_VPN_SHFT 0 | ||
1312 | #define UV3H_GR1_TLB_MMR_READ_DATA_LO_ASID_SHFT 39 | ||
1313 | #define UV3H_GR1_TLB_MMR_READ_DATA_LO_VALID_SHFT 63 | ||
1314 | #define UV3H_GR1_TLB_MMR_READ_DATA_LO_VPN_MASK 0x0000007fffffffffUL | ||
1315 | #define UV3H_GR1_TLB_MMR_READ_DATA_LO_ASID_MASK 0x7fffff8000000000UL | ||
1316 | #define UV3H_GR1_TLB_MMR_READ_DATA_LO_VALID_MASK 0x8000000000000000UL | ||
1317 | |||
982 | union uvh_gr1_tlb_mmr_read_data_lo_u { | 1318 | union uvh_gr1_tlb_mmr_read_data_lo_u { |
983 | unsigned long v; | 1319 | unsigned long v; |
984 | struct uvh_gr1_tlb_mmr_read_data_lo_s { | 1320 | struct uvh_gr1_tlb_mmr_read_data_lo_s { |
@@ -986,12 +1322,32 @@ union uvh_gr1_tlb_mmr_read_data_lo_u { | |||
986 | unsigned long asid:24; /* RO */ | 1322 | unsigned long asid:24; /* RO */ |
987 | unsigned long valid:1; /* RO */ | 1323 | unsigned long valid:1; /* RO */ |
988 | } s; | 1324 | } s; |
1325 | struct uv1h_gr1_tlb_mmr_read_data_lo_s { | ||
1326 | unsigned long vpn:39; /* RO */ | ||
1327 | unsigned long asid:24; /* RO */ | ||
1328 | unsigned long valid:1; /* RO */ | ||
1329 | } s1; | ||
1330 | struct uvxh_gr1_tlb_mmr_read_data_lo_s { | ||
1331 | unsigned long vpn:39; /* RO */ | ||
1332 | unsigned long asid:24; /* RO */ | ||
1333 | unsigned long valid:1; /* RO */ | ||
1334 | } sx; | ||
1335 | struct uv2h_gr1_tlb_mmr_read_data_lo_s { | ||
1336 | unsigned long vpn:39; /* RO */ | ||
1337 | unsigned long asid:24; /* RO */ | ||
1338 | unsigned long valid:1; /* RO */ | ||
1339 | } s2; | ||
1340 | struct uv3h_gr1_tlb_mmr_read_data_lo_s { | ||
1341 | unsigned long vpn:39; /* RO */ | ||
1342 | unsigned long asid:24; /* RO */ | ||
1343 | unsigned long valid:1; /* RO */ | ||
1344 | } s3; | ||
989 | }; | 1345 | }; |
990 | 1346 | ||
991 | /* ========================================================================= */ | 1347 | /* ========================================================================= */ |
992 | /* UVH_INT_CMPB */ | 1348 | /* UVH_INT_CMPB */ |
993 | /* ========================================================================= */ | 1349 | /* ========================================================================= */ |
994 | #define UVH_INT_CMPB 0x22080UL | 1350 | #define UVH_INT_CMPB 0x22080UL |
995 | 1351 | ||
996 | #define UVH_INT_CMPB_REAL_TIME_CMPB_SHFT 0 | 1352 | #define UVH_INT_CMPB_REAL_TIME_CMPB_SHFT 0 |
997 | #define UVH_INT_CMPB_REAL_TIME_CMPB_MASK 0x00ffffffffffffffUL | 1353 | #define UVH_INT_CMPB_REAL_TIME_CMPB_MASK 0x00ffffffffffffffUL |
@@ -1007,10 +1363,13 @@ union uvh_int_cmpb_u { | |||
1007 | /* ========================================================================= */ | 1363 | /* ========================================================================= */ |
1008 | /* UVH_INT_CMPC */ | 1364 | /* UVH_INT_CMPC */ |
1009 | /* ========================================================================= */ | 1365 | /* ========================================================================= */ |
1010 | #define UVH_INT_CMPC 0x22100UL | 1366 | #define UVH_INT_CMPC 0x22100UL |
1367 | |||
1368 | #define UV1H_INT_CMPC_REAL_TIME_CMPC_SHFT 0 | ||
1369 | #define UV1H_INT_CMPC_REAL_TIME_CMPC_MASK 0x00ffffffffffffffUL | ||
1011 | 1370 | ||
1012 | #define UVH_INT_CMPC_REAL_TIME_CMPC_SHFT 0 | 1371 | #define UVXH_INT_CMPC_REAL_TIME_CMP_2_SHFT 0 |
1013 | #define UVH_INT_CMPC_REAL_TIME_CMPC_MASK 0xffffffffffffffUL | 1372 | #define UVXH_INT_CMPC_REAL_TIME_CMP_2_MASK 0x00ffffffffffffffUL |
1014 | 1373 | ||
1015 | union uvh_int_cmpc_u { | 1374 | union uvh_int_cmpc_u { |
1016 | unsigned long v; | 1375 | unsigned long v; |
@@ -1023,10 +1382,13 @@ union uvh_int_cmpc_u { | |||
1023 | /* ========================================================================= */ | 1382 | /* ========================================================================= */ |
1024 | /* UVH_INT_CMPD */ | 1383 | /* UVH_INT_CMPD */ |
1025 | /* ========================================================================= */ | 1384 | /* ========================================================================= */ |
1026 | #define UVH_INT_CMPD 0x22180UL | 1385 | #define UVH_INT_CMPD 0x22180UL |
1027 | 1386 | ||
1028 | #define UVH_INT_CMPD_REAL_TIME_CMPD_SHFT 0 | 1387 | #define UV1H_INT_CMPD_REAL_TIME_CMPD_SHFT 0 |
1029 | #define UVH_INT_CMPD_REAL_TIME_CMPD_MASK 0xffffffffffffffUL | 1388 | #define UV1H_INT_CMPD_REAL_TIME_CMPD_MASK 0x00ffffffffffffffUL |
1389 | |||
1390 | #define UVXH_INT_CMPD_REAL_TIME_CMP_3_SHFT 0 | ||
1391 | #define UVXH_INT_CMPD_REAL_TIME_CMP_3_MASK 0x00ffffffffffffffUL | ||
1030 | 1392 | ||
1031 | union uvh_int_cmpd_u { | 1393 | union uvh_int_cmpd_u { |
1032 | unsigned long v; | 1394 | unsigned long v; |
@@ -1039,8 +1401,8 @@ union uvh_int_cmpd_u { | |||
1039 | /* ========================================================================= */ | 1401 | /* ========================================================================= */ |
1040 | /* UVH_IPI_INT */ | 1402 | /* UVH_IPI_INT */ |
1041 | /* ========================================================================= */ | 1403 | /* ========================================================================= */ |
1042 | #define UVH_IPI_INT 0x60500UL | 1404 | #define UVH_IPI_INT 0x60500UL |
1043 | #define UVH_IPI_INT_32 0x348 | 1405 | #define UVH_IPI_INT_32 0x348 |
1044 | 1406 | ||
1045 | #define UVH_IPI_INT_VECTOR_SHFT 0 | 1407 | #define UVH_IPI_INT_VECTOR_SHFT 0 |
1046 | #define UVH_IPI_INT_DELIVERY_MODE_SHFT 8 | 1408 | #define UVH_IPI_INT_DELIVERY_MODE_SHFT 8 |
@@ -1069,8 +1431,8 @@ union uvh_ipi_int_u { | |||
1069 | /* ========================================================================= */ | 1431 | /* ========================================================================= */ |
1070 | /* UVH_LB_BAU_INTD_PAYLOAD_QUEUE_FIRST */ | 1432 | /* UVH_LB_BAU_INTD_PAYLOAD_QUEUE_FIRST */ |
1071 | /* ========================================================================= */ | 1433 | /* ========================================================================= */ |
1072 | #define UVH_LB_BAU_INTD_PAYLOAD_QUEUE_FIRST 0x320050UL | 1434 | #define UVH_LB_BAU_INTD_PAYLOAD_QUEUE_FIRST 0x320050UL |
1073 | #define UVH_LB_BAU_INTD_PAYLOAD_QUEUE_FIRST_32 0x9c0 | 1435 | #define UVH_LB_BAU_INTD_PAYLOAD_QUEUE_FIRST_32 0x9c0 |
1074 | 1436 | ||
1075 | #define UVH_LB_BAU_INTD_PAYLOAD_QUEUE_FIRST_ADDRESS_SHFT 4 | 1437 | #define UVH_LB_BAU_INTD_PAYLOAD_QUEUE_FIRST_ADDRESS_SHFT 4 |
1076 | #define UVH_LB_BAU_INTD_PAYLOAD_QUEUE_FIRST_NODE_ID_SHFT 49 | 1438 | #define UVH_LB_BAU_INTD_PAYLOAD_QUEUE_FIRST_NODE_ID_SHFT 49 |
@@ -1091,8 +1453,8 @@ union uvh_lb_bau_intd_payload_queue_first_u { | |||
1091 | /* ========================================================================= */ | 1453 | /* ========================================================================= */ |
1092 | /* UVH_LB_BAU_INTD_PAYLOAD_QUEUE_LAST */ | 1454 | /* UVH_LB_BAU_INTD_PAYLOAD_QUEUE_LAST */ |
1093 | /* ========================================================================= */ | 1455 | /* ========================================================================= */ |
1094 | #define UVH_LB_BAU_INTD_PAYLOAD_QUEUE_LAST 0x320060UL | 1456 | #define UVH_LB_BAU_INTD_PAYLOAD_QUEUE_LAST 0x320060UL |
1095 | #define UVH_LB_BAU_INTD_PAYLOAD_QUEUE_LAST_32 0x9c8 | 1457 | #define UVH_LB_BAU_INTD_PAYLOAD_QUEUE_LAST_32 0x9c8 |
1096 | 1458 | ||
1097 | #define UVH_LB_BAU_INTD_PAYLOAD_QUEUE_LAST_ADDRESS_SHFT 4 | 1459 | #define UVH_LB_BAU_INTD_PAYLOAD_QUEUE_LAST_ADDRESS_SHFT 4 |
1098 | #define UVH_LB_BAU_INTD_PAYLOAD_QUEUE_LAST_ADDRESS_MASK 0x000007fffffffff0UL | 1460 | #define UVH_LB_BAU_INTD_PAYLOAD_QUEUE_LAST_ADDRESS_MASK 0x000007fffffffff0UL |
@@ -1109,8 +1471,8 @@ union uvh_lb_bau_intd_payload_queue_last_u { | |||
1109 | /* ========================================================================= */ | 1471 | /* ========================================================================= */ |
1110 | /* UVH_LB_BAU_INTD_PAYLOAD_QUEUE_TAIL */ | 1472 | /* UVH_LB_BAU_INTD_PAYLOAD_QUEUE_TAIL */ |
1111 | /* ========================================================================= */ | 1473 | /* ========================================================================= */ |
1112 | #define UVH_LB_BAU_INTD_PAYLOAD_QUEUE_TAIL 0x320070UL | 1474 | #define UVH_LB_BAU_INTD_PAYLOAD_QUEUE_TAIL 0x320070UL |
1113 | #define UVH_LB_BAU_INTD_PAYLOAD_QUEUE_TAIL_32 0x9d0 | 1475 | #define UVH_LB_BAU_INTD_PAYLOAD_QUEUE_TAIL_32 0x9d0 |
1114 | 1476 | ||
1115 | #define UVH_LB_BAU_INTD_PAYLOAD_QUEUE_TAIL_ADDRESS_SHFT 4 | 1477 | #define UVH_LB_BAU_INTD_PAYLOAD_QUEUE_TAIL_ADDRESS_SHFT 4 |
1116 | #define UVH_LB_BAU_INTD_PAYLOAD_QUEUE_TAIL_ADDRESS_MASK 0x000007fffffffff0UL | 1478 | #define UVH_LB_BAU_INTD_PAYLOAD_QUEUE_TAIL_ADDRESS_MASK 0x000007fffffffff0UL |
@@ -1127,8 +1489,8 @@ union uvh_lb_bau_intd_payload_queue_tail_u { | |||
1127 | /* ========================================================================= */ | 1489 | /* ========================================================================= */ |
1128 | /* UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE */ | 1490 | /* UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE */ |
1129 | /* ========================================================================= */ | 1491 | /* ========================================================================= */ |
1130 | #define UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE 0x320080UL | 1492 | #define UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE 0x320080UL |
1131 | #define UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_32 0xa68 | 1493 | #define UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_32 0xa68 |
1132 | 1494 | ||
1133 | #define UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_PENDING_0_SHFT 0 | 1495 | #define UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_PENDING_0_SHFT 0 |
1134 | #define UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_PENDING_1_SHFT 1 | 1496 | #define UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_PENDING_1_SHFT 1 |
@@ -1189,14 +1551,21 @@ union uvh_lb_bau_intd_software_acknowledge_u { | |||
1189 | /* ========================================================================= */ | 1551 | /* ========================================================================= */ |
1190 | /* UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_ALIAS */ | 1552 | /* UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_ALIAS */ |
1191 | /* ========================================================================= */ | 1553 | /* ========================================================================= */ |
1192 | #define UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_ALIAS 0x0000000000320088UL | 1554 | #define UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_ALIAS 0x320088UL |
1193 | #define UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_ALIAS_32 0xa70 | 1555 | #define UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_ALIAS_32 0xa70 |
1556 | |||
1194 | 1557 | ||
1195 | /* ========================================================================= */ | 1558 | /* ========================================================================= */ |
1196 | /* UVH_LB_BAU_MISC_CONTROL */ | 1559 | /* UVH_LB_BAU_MISC_CONTROL */ |
1197 | /* ========================================================================= */ | 1560 | /* ========================================================================= */ |
1198 | #define UVH_LB_BAU_MISC_CONTROL 0x320170UL | 1561 | #define UVH_LB_BAU_MISC_CONTROL 0x320170UL |
1199 | #define UVH_LB_BAU_MISC_CONTROL_32 0xa10 | 1562 | #define UV1H_LB_BAU_MISC_CONTROL 0x320170UL |
1563 | #define UV2H_LB_BAU_MISC_CONTROL 0x320170UL | ||
1564 | #define UV3H_LB_BAU_MISC_CONTROL 0x320170UL | ||
1565 | #define UVH_LB_BAU_MISC_CONTROL_32 0xa10 | ||
1566 | #define UV1H_LB_BAU_MISC_CONTROL_32 0x320170UL | ||
1567 | #define UV2H_LB_BAU_MISC_CONTROL_32 0x320170UL | ||
1568 | #define UV3H_LB_BAU_MISC_CONTROL_32 0x320170UL | ||
1200 | 1569 | ||
1201 | #define UVH_LB_BAU_MISC_CONTROL_REJECTION_DELAY_SHFT 0 | 1570 | #define UVH_LB_BAU_MISC_CONTROL_REJECTION_DELAY_SHFT 0 |
1202 | #define UVH_LB_BAU_MISC_CONTROL_APIC_MODE_SHFT 8 | 1571 | #define UVH_LB_BAU_MISC_CONTROL_APIC_MODE_SHFT 8 |
@@ -1213,6 +1582,7 @@ union uvh_lb_bau_intd_software_acknowledge_u { | |||
1213 | #define UVH_LB_BAU_MISC_CONTROL_PROGRAMMED_INITIAL_PRIORITY_SHFT 24 | 1582 | #define UVH_LB_BAU_MISC_CONTROL_PROGRAMMED_INITIAL_PRIORITY_SHFT 24 |
1214 | #define UVH_LB_BAU_MISC_CONTROL_USE_INCOMING_PRIORITY_SHFT 27 | 1583 | #define UVH_LB_BAU_MISC_CONTROL_USE_INCOMING_PRIORITY_SHFT 27 |
1215 | #define UVH_LB_BAU_MISC_CONTROL_ENABLE_PROGRAMMED_INITIAL_PRIORITY_SHFT 28 | 1584 | #define UVH_LB_BAU_MISC_CONTROL_ENABLE_PROGRAMMED_INITIAL_PRIORITY_SHFT 28 |
1585 | #define UVH_LB_BAU_MISC_CONTROL_FUN_SHFT 48 | ||
1216 | #define UVH_LB_BAU_MISC_CONTROL_REJECTION_DELAY_MASK 0x00000000000000ffUL | 1586 | #define UVH_LB_BAU_MISC_CONTROL_REJECTION_DELAY_MASK 0x00000000000000ffUL |
1217 | #define UVH_LB_BAU_MISC_CONTROL_APIC_MODE_MASK 0x0000000000000100UL | 1587 | #define UVH_LB_BAU_MISC_CONTROL_APIC_MODE_MASK 0x0000000000000100UL |
1218 | #define UVH_LB_BAU_MISC_CONTROL_FORCE_BROADCAST_MASK 0x0000000000000200UL | 1588 | #define UVH_LB_BAU_MISC_CONTROL_FORCE_BROADCAST_MASK 0x0000000000000200UL |
@@ -1228,6 +1598,7 @@ union uvh_lb_bau_intd_software_acknowledge_u { | |||
1228 | #define UVH_LB_BAU_MISC_CONTROL_PROGRAMMED_INITIAL_PRIORITY_MASK 0x0000000007000000UL | 1598 | #define UVH_LB_BAU_MISC_CONTROL_PROGRAMMED_INITIAL_PRIORITY_MASK 0x0000000007000000UL |
1229 | #define UVH_LB_BAU_MISC_CONTROL_USE_INCOMING_PRIORITY_MASK 0x0000000008000000UL | 1599 | #define UVH_LB_BAU_MISC_CONTROL_USE_INCOMING_PRIORITY_MASK 0x0000000008000000UL |
1230 | #define UVH_LB_BAU_MISC_CONTROL_ENABLE_PROGRAMMED_INITIAL_PRIORITY_MASK 0x0000000010000000UL | 1600 | #define UVH_LB_BAU_MISC_CONTROL_ENABLE_PROGRAMMED_INITIAL_PRIORITY_MASK 0x0000000010000000UL |
1601 | #define UVH_LB_BAU_MISC_CONTROL_FUN_MASK 0xffff000000000000UL | ||
1231 | 1602 | ||
1232 | #define UV1H_LB_BAU_MISC_CONTROL_REJECTION_DELAY_SHFT 0 | 1603 | #define UV1H_LB_BAU_MISC_CONTROL_REJECTION_DELAY_SHFT 0 |
1233 | #define UV1H_LB_BAU_MISC_CONTROL_APIC_MODE_SHFT 8 | 1604 | #define UV1H_LB_BAU_MISC_CONTROL_APIC_MODE_SHFT 8 |
@@ -1262,6 +1633,53 @@ union uvh_lb_bau_intd_software_acknowledge_u { | |||
1262 | #define UV1H_LB_BAU_MISC_CONTROL_ENABLE_PROGRAMMED_INITIAL_PRIORITY_MASK 0x0000000010000000UL | 1633 | #define UV1H_LB_BAU_MISC_CONTROL_ENABLE_PROGRAMMED_INITIAL_PRIORITY_MASK 0x0000000010000000UL |
1263 | #define UV1H_LB_BAU_MISC_CONTROL_FUN_MASK 0xffff000000000000UL | 1634 | #define UV1H_LB_BAU_MISC_CONTROL_FUN_MASK 0xffff000000000000UL |
1264 | 1635 | ||
1636 | #define UVXH_LB_BAU_MISC_CONTROL_REJECTION_DELAY_SHFT 0 | ||
1637 | #define UVXH_LB_BAU_MISC_CONTROL_APIC_MODE_SHFT 8 | ||
1638 | #define UVXH_LB_BAU_MISC_CONTROL_FORCE_BROADCAST_SHFT 9 | ||
1639 | #define UVXH_LB_BAU_MISC_CONTROL_FORCE_LOCK_NOP_SHFT 10 | ||
1640 | #define UVXH_LB_BAU_MISC_CONTROL_QPI_AGENT_PRESENCE_VECTOR_SHFT 11 | ||
1641 | #define UVXH_LB_BAU_MISC_CONTROL_DESCRIPTOR_FETCH_MODE_SHFT 14 | ||
1642 | #define UVXH_LB_BAU_MISC_CONTROL_ENABLE_INTD_SOFT_ACK_MODE_SHFT 15 | ||
1643 | #define UVXH_LB_BAU_MISC_CONTROL_INTD_SOFT_ACK_TIMEOUT_PERIOD_SHFT 16 | ||
1644 | #define UVXH_LB_BAU_MISC_CONTROL_ENABLE_DUAL_MAPPING_MODE_SHFT 20 | ||
1645 | #define UVXH_LB_BAU_MISC_CONTROL_VGA_IO_PORT_DECODE_ENABLE_SHFT 21 | ||
1646 | #define UVXH_LB_BAU_MISC_CONTROL_VGA_IO_PORT_16_BIT_DECODE_SHFT 22 | ||
1647 | #define UVXH_LB_BAU_MISC_CONTROL_SUPPRESS_DEST_REGISTRATION_SHFT 23 | ||
1648 | #define UVXH_LB_BAU_MISC_CONTROL_PROGRAMMED_INITIAL_PRIORITY_SHFT 24 | ||
1649 | #define UVXH_LB_BAU_MISC_CONTROL_USE_INCOMING_PRIORITY_SHFT 27 | ||
1650 | #define UVXH_LB_BAU_MISC_CONTROL_ENABLE_PROGRAMMED_INITIAL_PRIORITY_SHFT 28 | ||
1651 | #define UVXH_LB_BAU_MISC_CONTROL_ENABLE_AUTOMATIC_APIC_MODE_SELECTION_SHFT 29 | ||
1652 | #define UVXH_LB_BAU_MISC_CONTROL_APIC_MODE_STATUS_SHFT 30 | ||
1653 | #define UVXH_LB_BAU_MISC_CONTROL_SUPPRESS_INTERRUPTS_TO_SELF_SHFT 31 | ||
1654 | #define UVXH_LB_BAU_MISC_CONTROL_ENABLE_LOCK_BASED_SYSTEM_FLUSH_SHFT 32 | ||
1655 | #define UVXH_LB_BAU_MISC_CONTROL_ENABLE_EXTENDED_SB_STATUS_SHFT 33 | ||
1656 | #define UVXH_LB_BAU_MISC_CONTROL_SUPPRESS_INT_PRIO_UDT_TO_SELF_SHFT 34 | ||
1657 | #define UVXH_LB_BAU_MISC_CONTROL_USE_LEGACY_DESCRIPTOR_FORMATS_SHFT 35 | ||
1658 | #define UVXH_LB_BAU_MISC_CONTROL_FUN_SHFT 48 | ||
1659 | #define UVXH_LB_BAU_MISC_CONTROL_REJECTION_DELAY_MASK 0x00000000000000ffUL | ||
1660 | #define UVXH_LB_BAU_MISC_CONTROL_APIC_MODE_MASK 0x0000000000000100UL | ||
1661 | #define UVXH_LB_BAU_MISC_CONTROL_FORCE_BROADCAST_MASK 0x0000000000000200UL | ||
1662 | #define UVXH_LB_BAU_MISC_CONTROL_FORCE_LOCK_NOP_MASK 0x0000000000000400UL | ||
1663 | #define UVXH_LB_BAU_MISC_CONTROL_QPI_AGENT_PRESENCE_VECTOR_MASK 0x0000000000003800UL | ||
1664 | #define UVXH_LB_BAU_MISC_CONTROL_DESCRIPTOR_FETCH_MODE_MASK 0x0000000000004000UL | ||
1665 | #define UVXH_LB_BAU_MISC_CONTROL_ENABLE_INTD_SOFT_ACK_MODE_MASK 0x0000000000008000UL | ||
1666 | #define UVXH_LB_BAU_MISC_CONTROL_INTD_SOFT_ACK_TIMEOUT_PERIOD_MASK 0x00000000000f0000UL | ||
1667 | #define UVXH_LB_BAU_MISC_CONTROL_ENABLE_DUAL_MAPPING_MODE_MASK 0x0000000000100000UL | ||
1668 | #define UVXH_LB_BAU_MISC_CONTROL_VGA_IO_PORT_DECODE_ENABLE_MASK 0x0000000000200000UL | ||
1669 | #define UVXH_LB_BAU_MISC_CONTROL_VGA_IO_PORT_16_BIT_DECODE_MASK 0x0000000000400000UL | ||
1670 | #define UVXH_LB_BAU_MISC_CONTROL_SUPPRESS_DEST_REGISTRATION_MASK 0x0000000000800000UL | ||
1671 | #define UVXH_LB_BAU_MISC_CONTROL_PROGRAMMED_INITIAL_PRIORITY_MASK 0x0000000007000000UL | ||
1672 | #define UVXH_LB_BAU_MISC_CONTROL_USE_INCOMING_PRIORITY_MASK 0x0000000008000000UL | ||
1673 | #define UVXH_LB_BAU_MISC_CONTROL_ENABLE_PROGRAMMED_INITIAL_PRIORITY_MASK 0x0000000010000000UL | ||
1674 | #define UVXH_LB_BAU_MISC_CONTROL_ENABLE_AUTOMATIC_APIC_MODE_SELECTION_MASK 0x0000000020000000UL | ||
1675 | #define UVXH_LB_BAU_MISC_CONTROL_APIC_MODE_STATUS_MASK 0x0000000040000000UL | ||
1676 | #define UVXH_LB_BAU_MISC_CONTROL_SUPPRESS_INTERRUPTS_TO_SELF_MASK 0x0000000080000000UL | ||
1677 | #define UVXH_LB_BAU_MISC_CONTROL_ENABLE_LOCK_BASED_SYSTEM_FLUSH_MASK 0x0000000100000000UL | ||
1678 | #define UVXH_LB_BAU_MISC_CONTROL_ENABLE_EXTENDED_SB_STATUS_MASK 0x0000000200000000UL | ||
1679 | #define UVXH_LB_BAU_MISC_CONTROL_SUPPRESS_INT_PRIO_UDT_TO_SELF_MASK 0x0000000400000000UL | ||
1680 | #define UVXH_LB_BAU_MISC_CONTROL_USE_LEGACY_DESCRIPTOR_FORMATS_MASK 0x0000000800000000UL | ||
1681 | #define UVXH_LB_BAU_MISC_CONTROL_FUN_MASK 0xffff000000000000UL | ||
1682 | |||
1265 | #define UV2H_LB_BAU_MISC_CONTROL_REJECTION_DELAY_SHFT 0 | 1683 | #define UV2H_LB_BAU_MISC_CONTROL_REJECTION_DELAY_SHFT 0 |
1266 | #define UV2H_LB_BAU_MISC_CONTROL_APIC_MODE_SHFT 8 | 1684 | #define UV2H_LB_BAU_MISC_CONTROL_APIC_MODE_SHFT 8 |
1267 | #define UV2H_LB_BAU_MISC_CONTROL_FORCE_BROADCAST_SHFT 9 | 1685 | #define UV2H_LB_BAU_MISC_CONTROL_FORCE_BROADCAST_SHFT 9 |
@@ -1309,6 +1727,59 @@ union uvh_lb_bau_intd_software_acknowledge_u { | |||
1309 | #define UV2H_LB_BAU_MISC_CONTROL_USE_LEGACY_DESCRIPTOR_FORMATS_MASK 0x0000000800000000UL | 1727 | #define UV2H_LB_BAU_MISC_CONTROL_USE_LEGACY_DESCRIPTOR_FORMATS_MASK 0x0000000800000000UL |
1310 | #define UV2H_LB_BAU_MISC_CONTROL_FUN_MASK 0xffff000000000000UL | 1728 | #define UV2H_LB_BAU_MISC_CONTROL_FUN_MASK 0xffff000000000000UL |
1311 | 1729 | ||
1730 | #define UV3H_LB_BAU_MISC_CONTROL_REJECTION_DELAY_SHFT 0 | ||
1731 | #define UV3H_LB_BAU_MISC_CONTROL_APIC_MODE_SHFT 8 | ||
1732 | #define UV3H_LB_BAU_MISC_CONTROL_FORCE_BROADCAST_SHFT 9 | ||
1733 | #define UV3H_LB_BAU_MISC_CONTROL_FORCE_LOCK_NOP_SHFT 10 | ||
1734 | #define UV3H_LB_BAU_MISC_CONTROL_QPI_AGENT_PRESENCE_VECTOR_SHFT 11 | ||
1735 | #define UV3H_LB_BAU_MISC_CONTROL_DESCRIPTOR_FETCH_MODE_SHFT 14 | ||
1736 | #define UV3H_LB_BAU_MISC_CONTROL_ENABLE_INTD_SOFT_ACK_MODE_SHFT 15 | ||
1737 | #define UV3H_LB_BAU_MISC_CONTROL_INTD_SOFT_ACK_TIMEOUT_PERIOD_SHFT 16 | ||
1738 | #define UV3H_LB_BAU_MISC_CONTROL_ENABLE_DUAL_MAPPING_MODE_SHFT 20 | ||
1739 | #define UV3H_LB_BAU_MISC_CONTROL_VGA_IO_PORT_DECODE_ENABLE_SHFT 21 | ||
1740 | #define UV3H_LB_BAU_MISC_CONTROL_VGA_IO_PORT_16_BIT_DECODE_SHFT 22 | ||
1741 | #define UV3H_LB_BAU_MISC_CONTROL_SUPPRESS_DEST_REGISTRATION_SHFT 23 | ||
1742 | #define UV3H_LB_BAU_MISC_CONTROL_PROGRAMMED_INITIAL_PRIORITY_SHFT 24 | ||
1743 | #define UV3H_LB_BAU_MISC_CONTROL_USE_INCOMING_PRIORITY_SHFT 27 | ||
1744 | #define UV3H_LB_BAU_MISC_CONTROL_ENABLE_PROGRAMMED_INITIAL_PRIORITY_SHFT 28 | ||
1745 | #define UV3H_LB_BAU_MISC_CONTROL_ENABLE_AUTOMATIC_APIC_MODE_SELECTION_SHFT 29 | ||
1746 | #define UV3H_LB_BAU_MISC_CONTROL_APIC_MODE_STATUS_SHFT 30 | ||
1747 | #define UV3H_LB_BAU_MISC_CONTROL_SUPPRESS_INTERRUPTS_TO_SELF_SHFT 31 | ||
1748 | #define UV3H_LB_BAU_MISC_CONTROL_ENABLE_LOCK_BASED_SYSTEM_FLUSH_SHFT 32 | ||
1749 | #define UV3H_LB_BAU_MISC_CONTROL_ENABLE_EXTENDED_SB_STATUS_SHFT 33 | ||
1750 | #define UV3H_LB_BAU_MISC_CONTROL_SUPPRESS_INT_PRIO_UDT_TO_SELF_SHFT 34 | ||
1751 | #define UV3H_LB_BAU_MISC_CONTROL_USE_LEGACY_DESCRIPTOR_FORMATS_SHFT 35 | ||
1752 | #define UV3H_LB_BAU_MISC_CONTROL_SUPPRESS_QUIESCE_MSGS_TO_QPI_SHFT 36 | ||
1753 | #define UV3H_LB_BAU_MISC_CONTROL_ENABLE_INTD_PREFETCH_HINT_SHFT 37 | ||
1754 | #define UV3H_LB_BAU_MISC_CONTROL_THREAD_KILL_TIMEBASE_SHFT 38 | ||
1755 | #define UV3H_LB_BAU_MISC_CONTROL_FUN_SHFT 48 | ||
1756 | #define UV3H_LB_BAU_MISC_CONTROL_REJECTION_DELAY_MASK 0x00000000000000ffUL | ||
1757 | #define UV3H_LB_BAU_MISC_CONTROL_APIC_MODE_MASK 0x0000000000000100UL | ||
1758 | #define UV3H_LB_BAU_MISC_CONTROL_FORCE_BROADCAST_MASK 0x0000000000000200UL | ||
1759 | #define UV3H_LB_BAU_MISC_CONTROL_FORCE_LOCK_NOP_MASK 0x0000000000000400UL | ||
1760 | #define UV3H_LB_BAU_MISC_CONTROL_QPI_AGENT_PRESENCE_VECTOR_MASK 0x0000000000003800UL | ||
1761 | #define UV3H_LB_BAU_MISC_CONTROL_DESCRIPTOR_FETCH_MODE_MASK 0x0000000000004000UL | ||
1762 | #define UV3H_LB_BAU_MISC_CONTROL_ENABLE_INTD_SOFT_ACK_MODE_MASK 0x0000000000008000UL | ||
1763 | #define UV3H_LB_BAU_MISC_CONTROL_INTD_SOFT_ACK_TIMEOUT_PERIOD_MASK 0x00000000000f0000UL | ||
1764 | #define UV3H_LB_BAU_MISC_CONTROL_ENABLE_DUAL_MAPPING_MODE_MASK 0x0000000000100000UL | ||
1765 | #define UV3H_LB_BAU_MISC_CONTROL_VGA_IO_PORT_DECODE_ENABLE_MASK 0x0000000000200000UL | ||
1766 | #define UV3H_LB_BAU_MISC_CONTROL_VGA_IO_PORT_16_BIT_DECODE_MASK 0x0000000000400000UL | ||
1767 | #define UV3H_LB_BAU_MISC_CONTROL_SUPPRESS_DEST_REGISTRATION_MASK 0x0000000000800000UL | ||
1768 | #define UV3H_LB_BAU_MISC_CONTROL_PROGRAMMED_INITIAL_PRIORITY_MASK 0x0000000007000000UL | ||
1769 | #define UV3H_LB_BAU_MISC_CONTROL_USE_INCOMING_PRIORITY_MASK 0x0000000008000000UL | ||
1770 | #define UV3H_LB_BAU_MISC_CONTROL_ENABLE_PROGRAMMED_INITIAL_PRIORITY_MASK 0x0000000010000000UL | ||
1771 | #define UV3H_LB_BAU_MISC_CONTROL_ENABLE_AUTOMATIC_APIC_MODE_SELECTION_MASK 0x0000000020000000UL | ||
1772 | #define UV3H_LB_BAU_MISC_CONTROL_APIC_MODE_STATUS_MASK 0x0000000040000000UL | ||
1773 | #define UV3H_LB_BAU_MISC_CONTROL_SUPPRESS_INTERRUPTS_TO_SELF_MASK 0x0000000080000000UL | ||
1774 | #define UV3H_LB_BAU_MISC_CONTROL_ENABLE_LOCK_BASED_SYSTEM_FLUSH_MASK 0x0000000100000000UL | ||
1775 | #define UV3H_LB_BAU_MISC_CONTROL_ENABLE_EXTENDED_SB_STATUS_MASK 0x0000000200000000UL | ||
1776 | #define UV3H_LB_BAU_MISC_CONTROL_SUPPRESS_INT_PRIO_UDT_TO_SELF_MASK 0x0000000400000000UL | ||
1777 | #define UV3H_LB_BAU_MISC_CONTROL_USE_LEGACY_DESCRIPTOR_FORMATS_MASK 0x0000000800000000UL | ||
1778 | #define UV3H_LB_BAU_MISC_CONTROL_SUPPRESS_QUIESCE_MSGS_TO_QPI_MASK 0x0000001000000000UL | ||
1779 | #define UV3H_LB_BAU_MISC_CONTROL_ENABLE_INTD_PREFETCH_HINT_MASK 0x0000002000000000UL | ||
1780 | #define UV3H_LB_BAU_MISC_CONTROL_THREAD_KILL_TIMEBASE_MASK 0x00003fc000000000UL | ||
1781 | #define UV3H_LB_BAU_MISC_CONTROL_FUN_MASK 0xffff000000000000UL | ||
1782 | |||
1312 | union uvh_lb_bau_misc_control_u { | 1783 | union uvh_lb_bau_misc_control_u { |
1313 | unsigned long v; | 1784 | unsigned long v; |
1314 | struct uvh_lb_bau_misc_control_s { | 1785 | struct uvh_lb_bau_misc_control_s { |
@@ -1327,7 +1798,8 @@ union uvh_lb_bau_misc_control_u { | |||
1327 | unsigned long programmed_initial_priority:3; /* RW */ | 1798 | unsigned long programmed_initial_priority:3; /* RW */ |
1328 | unsigned long use_incoming_priority:1; /* RW */ | 1799 | unsigned long use_incoming_priority:1; /* RW */ |
1329 | unsigned long enable_programmed_initial_priority:1;/* RW */ | 1800 | unsigned long enable_programmed_initial_priority:1;/* RW */ |
1330 | unsigned long rsvd_29_63:35; | 1801 | unsigned long rsvd_29_47:19; |
1802 | unsigned long fun:16; /* RW */ | ||
1331 | } s; | 1803 | } s; |
1332 | struct uv1h_lb_bau_misc_control_s { | 1804 | struct uv1h_lb_bau_misc_control_s { |
1333 | unsigned long rejection_delay:8; /* RW */ | 1805 | unsigned long rejection_delay:8; /* RW */ |
@@ -1348,6 +1820,32 @@ union uvh_lb_bau_misc_control_u { | |||
1348 | unsigned long rsvd_29_47:19; | 1820 | unsigned long rsvd_29_47:19; |
1349 | unsigned long fun:16; /* RW */ | 1821 | unsigned long fun:16; /* RW */ |
1350 | } s1; | 1822 | } s1; |
1823 | struct uvxh_lb_bau_misc_control_s { | ||
1824 | unsigned long rejection_delay:8; /* RW */ | ||
1825 | unsigned long apic_mode:1; /* RW */ | ||
1826 | unsigned long force_broadcast:1; /* RW */ | ||
1827 | unsigned long force_lock_nop:1; /* RW */ | ||
1828 | unsigned long qpi_agent_presence_vector:3; /* RW */ | ||
1829 | unsigned long descriptor_fetch_mode:1; /* RW */ | ||
1830 | unsigned long enable_intd_soft_ack_mode:1; /* RW */ | ||
1831 | unsigned long intd_soft_ack_timeout_period:4; /* RW */ | ||
1832 | unsigned long enable_dual_mapping_mode:1; /* RW */ | ||
1833 | unsigned long vga_io_port_decode_enable:1; /* RW */ | ||
1834 | unsigned long vga_io_port_16_bit_decode:1; /* RW */ | ||
1835 | unsigned long suppress_dest_registration:1; /* RW */ | ||
1836 | unsigned long programmed_initial_priority:3; /* RW */ | ||
1837 | unsigned long use_incoming_priority:1; /* RW */ | ||
1838 | unsigned long enable_programmed_initial_priority:1;/* RW */ | ||
1839 | unsigned long enable_automatic_apic_mode_selection:1;/* RW */ | ||
1840 | unsigned long apic_mode_status:1; /* RO */ | ||
1841 | unsigned long suppress_interrupts_to_self:1; /* RW */ | ||
1842 | unsigned long enable_lock_based_system_flush:1;/* RW */ | ||
1843 | unsigned long enable_extended_sb_status:1; /* RW */ | ||
1844 | unsigned long suppress_int_prio_udt_to_self:1;/* RW */ | ||
1845 | unsigned long use_legacy_descriptor_formats:1;/* RW */ | ||
1846 | unsigned long rsvd_36_47:12; | ||
1847 | unsigned long fun:16; /* RW */ | ||
1848 | } sx; | ||
1351 | struct uv2h_lb_bau_misc_control_s { | 1849 | struct uv2h_lb_bau_misc_control_s { |
1352 | unsigned long rejection_delay:8; /* RW */ | 1850 | unsigned long rejection_delay:8; /* RW */ |
1353 | unsigned long apic_mode:1; /* RW */ | 1851 | unsigned long apic_mode:1; /* RW */ |
@@ -1374,13 +1872,42 @@ union uvh_lb_bau_misc_control_u { | |||
1374 | unsigned long rsvd_36_47:12; | 1872 | unsigned long rsvd_36_47:12; |
1375 | unsigned long fun:16; /* RW */ | 1873 | unsigned long fun:16; /* RW */ |
1376 | } s2; | 1874 | } s2; |
1875 | struct uv3h_lb_bau_misc_control_s { | ||
1876 | unsigned long rejection_delay:8; /* RW */ | ||
1877 | unsigned long apic_mode:1; /* RW */ | ||
1878 | unsigned long force_broadcast:1; /* RW */ | ||
1879 | unsigned long force_lock_nop:1; /* RW */ | ||
1880 | unsigned long qpi_agent_presence_vector:3; /* RW */ | ||
1881 | unsigned long descriptor_fetch_mode:1; /* RW */ | ||
1882 | unsigned long enable_intd_soft_ack_mode:1; /* RW */ | ||
1883 | unsigned long intd_soft_ack_timeout_period:4; /* RW */ | ||
1884 | unsigned long enable_dual_mapping_mode:1; /* RW */ | ||
1885 | unsigned long vga_io_port_decode_enable:1; /* RW */ | ||
1886 | unsigned long vga_io_port_16_bit_decode:1; /* RW */ | ||
1887 | unsigned long suppress_dest_registration:1; /* RW */ | ||
1888 | unsigned long programmed_initial_priority:3; /* RW */ | ||
1889 | unsigned long use_incoming_priority:1; /* RW */ | ||
1890 | unsigned long enable_programmed_initial_priority:1;/* RW */ | ||
1891 | unsigned long enable_automatic_apic_mode_selection:1;/* RW */ | ||
1892 | unsigned long apic_mode_status:1; /* RO */ | ||
1893 | unsigned long suppress_interrupts_to_self:1; /* RW */ | ||
1894 | unsigned long enable_lock_based_system_flush:1;/* RW */ | ||
1895 | unsigned long enable_extended_sb_status:1; /* RW */ | ||
1896 | unsigned long suppress_int_prio_udt_to_self:1;/* RW */ | ||
1897 | unsigned long use_legacy_descriptor_formats:1;/* RW */ | ||
1898 | unsigned long suppress_quiesce_msgs_to_qpi:1; /* RW */ | ||
1899 | unsigned long enable_intd_prefetch_hint:1; /* RW */ | ||
1900 | unsigned long thread_kill_timebase:8; /* RW */ | ||
1901 | unsigned long rsvd_46_47:2; | ||
1902 | unsigned long fun:16; /* RW */ | ||
1903 | } s3; | ||
1377 | }; | 1904 | }; |
1378 | 1905 | ||
1379 | /* ========================================================================= */ | 1906 | /* ========================================================================= */ |
1380 | /* UVH_LB_BAU_SB_ACTIVATION_CONTROL */ | 1907 | /* UVH_LB_BAU_SB_ACTIVATION_CONTROL */ |
1381 | /* ========================================================================= */ | 1908 | /* ========================================================================= */ |
1382 | #define UVH_LB_BAU_SB_ACTIVATION_CONTROL 0x320020UL | 1909 | #define UVH_LB_BAU_SB_ACTIVATION_CONTROL 0x320020UL |
1383 | #define UVH_LB_BAU_SB_ACTIVATION_CONTROL_32 0x9a8 | 1910 | #define UVH_LB_BAU_SB_ACTIVATION_CONTROL_32 0x9a8 |
1384 | 1911 | ||
1385 | #define UVH_LB_BAU_SB_ACTIVATION_CONTROL_INDEX_SHFT 0 | 1912 | #define UVH_LB_BAU_SB_ACTIVATION_CONTROL_INDEX_SHFT 0 |
1386 | #define UVH_LB_BAU_SB_ACTIVATION_CONTROL_PUSH_SHFT 62 | 1913 | #define UVH_LB_BAU_SB_ACTIVATION_CONTROL_PUSH_SHFT 62 |
@@ -1402,8 +1929,8 @@ union uvh_lb_bau_sb_activation_control_u { | |||
1402 | /* ========================================================================= */ | 1929 | /* ========================================================================= */ |
1403 | /* UVH_LB_BAU_SB_ACTIVATION_STATUS_0 */ | 1930 | /* UVH_LB_BAU_SB_ACTIVATION_STATUS_0 */ |
1404 | /* ========================================================================= */ | 1931 | /* ========================================================================= */ |
1405 | #define UVH_LB_BAU_SB_ACTIVATION_STATUS_0 0x320030UL | 1932 | #define UVH_LB_BAU_SB_ACTIVATION_STATUS_0 0x320030UL |
1406 | #define UVH_LB_BAU_SB_ACTIVATION_STATUS_0_32 0x9b0 | 1933 | #define UVH_LB_BAU_SB_ACTIVATION_STATUS_0_32 0x9b0 |
1407 | 1934 | ||
1408 | #define UVH_LB_BAU_SB_ACTIVATION_STATUS_0_STATUS_SHFT 0 | 1935 | #define UVH_LB_BAU_SB_ACTIVATION_STATUS_0_STATUS_SHFT 0 |
1409 | #define UVH_LB_BAU_SB_ACTIVATION_STATUS_0_STATUS_MASK 0xffffffffffffffffUL | 1936 | #define UVH_LB_BAU_SB_ACTIVATION_STATUS_0_STATUS_MASK 0xffffffffffffffffUL |
@@ -1418,8 +1945,8 @@ union uvh_lb_bau_sb_activation_status_0_u { | |||
1418 | /* ========================================================================= */ | 1945 | /* ========================================================================= */ |
1419 | /* UVH_LB_BAU_SB_ACTIVATION_STATUS_1 */ | 1946 | /* UVH_LB_BAU_SB_ACTIVATION_STATUS_1 */ |
1420 | /* ========================================================================= */ | 1947 | /* ========================================================================= */ |
1421 | #define UVH_LB_BAU_SB_ACTIVATION_STATUS_1 0x320040UL | 1948 | #define UVH_LB_BAU_SB_ACTIVATION_STATUS_1 0x320040UL |
1422 | #define UVH_LB_BAU_SB_ACTIVATION_STATUS_1_32 0x9b8 | 1949 | #define UVH_LB_BAU_SB_ACTIVATION_STATUS_1_32 0x9b8 |
1423 | 1950 | ||
1424 | #define UVH_LB_BAU_SB_ACTIVATION_STATUS_1_STATUS_SHFT 0 | 1951 | #define UVH_LB_BAU_SB_ACTIVATION_STATUS_1_STATUS_SHFT 0 |
1425 | #define UVH_LB_BAU_SB_ACTIVATION_STATUS_1_STATUS_MASK 0xffffffffffffffffUL | 1952 | #define UVH_LB_BAU_SB_ACTIVATION_STATUS_1_STATUS_MASK 0xffffffffffffffffUL |
@@ -1434,8 +1961,8 @@ union uvh_lb_bau_sb_activation_status_1_u { | |||
1434 | /* ========================================================================= */ | 1961 | /* ========================================================================= */ |
1435 | /* UVH_LB_BAU_SB_DESCRIPTOR_BASE */ | 1962 | /* UVH_LB_BAU_SB_DESCRIPTOR_BASE */ |
1436 | /* ========================================================================= */ | 1963 | /* ========================================================================= */ |
1437 | #define UVH_LB_BAU_SB_DESCRIPTOR_BASE 0x320010UL | 1964 | #define UVH_LB_BAU_SB_DESCRIPTOR_BASE 0x320010UL |
1438 | #define UVH_LB_BAU_SB_DESCRIPTOR_BASE_32 0x9a0 | 1965 | #define UVH_LB_BAU_SB_DESCRIPTOR_BASE_32 0x9a0 |
1439 | 1966 | ||
1440 | #define UVH_LB_BAU_SB_DESCRIPTOR_BASE_PAGE_ADDRESS_SHFT 12 | 1967 | #define UVH_LB_BAU_SB_DESCRIPTOR_BASE_PAGE_ADDRESS_SHFT 12 |
1441 | #define UVH_LB_BAU_SB_DESCRIPTOR_BASE_NODE_ID_SHFT 49 | 1968 | #define UVH_LB_BAU_SB_DESCRIPTOR_BASE_NODE_ID_SHFT 49 |
@@ -1456,7 +1983,10 @@ union uvh_lb_bau_sb_descriptor_base_u { | |||
1456 | /* ========================================================================= */ | 1983 | /* ========================================================================= */ |
1457 | /* UVH_NODE_ID */ | 1984 | /* UVH_NODE_ID */ |
1458 | /* ========================================================================= */ | 1985 | /* ========================================================================= */ |
1459 | #define UVH_NODE_ID 0x0UL | 1986 | #define UVH_NODE_ID 0x0UL |
1987 | #define UV1H_NODE_ID 0x0UL | ||
1988 | #define UV2H_NODE_ID 0x0UL | ||
1989 | #define UV3H_NODE_ID 0x0UL | ||
1460 | 1990 | ||
1461 | #define UVH_NODE_ID_FORCE1_SHFT 0 | 1991 | #define UVH_NODE_ID_FORCE1_SHFT 0 |
1462 | #define UVH_NODE_ID_MANUFACTURER_SHFT 1 | 1992 | #define UVH_NODE_ID_MANUFACTURER_SHFT 1 |
@@ -1484,6 +2014,21 @@ union uvh_lb_bau_sb_descriptor_base_u { | |||
1484 | #define UV1H_NODE_ID_NODES_PER_BIT_MASK 0x007f000000000000UL | 2014 | #define UV1H_NODE_ID_NODES_PER_BIT_MASK 0x007f000000000000UL |
1485 | #define UV1H_NODE_ID_NI_PORT_MASK 0x0f00000000000000UL | 2015 | #define UV1H_NODE_ID_NI_PORT_MASK 0x0f00000000000000UL |
1486 | 2016 | ||
2017 | #define UVXH_NODE_ID_FORCE1_SHFT 0 | ||
2018 | #define UVXH_NODE_ID_MANUFACTURER_SHFT 1 | ||
2019 | #define UVXH_NODE_ID_PART_NUMBER_SHFT 12 | ||
2020 | #define UVXH_NODE_ID_REVISION_SHFT 28 | ||
2021 | #define UVXH_NODE_ID_NODE_ID_SHFT 32 | ||
2022 | #define UVXH_NODE_ID_NODES_PER_BIT_SHFT 50 | ||
2023 | #define UVXH_NODE_ID_NI_PORT_SHFT 57 | ||
2024 | #define UVXH_NODE_ID_FORCE1_MASK 0x0000000000000001UL | ||
2025 | #define UVXH_NODE_ID_MANUFACTURER_MASK 0x0000000000000ffeUL | ||
2026 | #define UVXH_NODE_ID_PART_NUMBER_MASK 0x000000000ffff000UL | ||
2027 | #define UVXH_NODE_ID_REVISION_MASK 0x00000000f0000000UL | ||
2028 | #define UVXH_NODE_ID_NODE_ID_MASK 0x00007fff00000000UL | ||
2029 | #define UVXH_NODE_ID_NODES_PER_BIT_MASK 0x01fc000000000000UL | ||
2030 | #define UVXH_NODE_ID_NI_PORT_MASK 0x3e00000000000000UL | ||
2031 | |||
1487 | #define UV2H_NODE_ID_FORCE1_SHFT 0 | 2032 | #define UV2H_NODE_ID_FORCE1_SHFT 0 |
1488 | #define UV2H_NODE_ID_MANUFACTURER_SHFT 1 | 2033 | #define UV2H_NODE_ID_MANUFACTURER_SHFT 1 |
1489 | #define UV2H_NODE_ID_PART_NUMBER_SHFT 12 | 2034 | #define UV2H_NODE_ID_PART_NUMBER_SHFT 12 |
@@ -1499,6 +2044,25 @@ union uvh_lb_bau_sb_descriptor_base_u { | |||
1499 | #define UV2H_NODE_ID_NODES_PER_BIT_MASK 0x01fc000000000000UL | 2044 | #define UV2H_NODE_ID_NODES_PER_BIT_MASK 0x01fc000000000000UL |
1500 | #define UV2H_NODE_ID_NI_PORT_MASK 0x3e00000000000000UL | 2045 | #define UV2H_NODE_ID_NI_PORT_MASK 0x3e00000000000000UL |
1501 | 2046 | ||
2047 | #define UV3H_NODE_ID_FORCE1_SHFT 0 | ||
2048 | #define UV3H_NODE_ID_MANUFACTURER_SHFT 1 | ||
2049 | #define UV3H_NODE_ID_PART_NUMBER_SHFT 12 | ||
2050 | #define UV3H_NODE_ID_REVISION_SHFT 28 | ||
2051 | #define UV3H_NODE_ID_NODE_ID_SHFT 32 | ||
2052 | #define UV3H_NODE_ID_ROUTER_SELECT_SHFT 48 | ||
2053 | #define UV3H_NODE_ID_RESERVED_2_SHFT 49 | ||
2054 | #define UV3H_NODE_ID_NODES_PER_BIT_SHFT 50 | ||
2055 | #define UV3H_NODE_ID_NI_PORT_SHFT 57 | ||
2056 | #define UV3H_NODE_ID_FORCE1_MASK 0x0000000000000001UL | ||
2057 | #define UV3H_NODE_ID_MANUFACTURER_MASK 0x0000000000000ffeUL | ||
2058 | #define UV3H_NODE_ID_PART_NUMBER_MASK 0x000000000ffff000UL | ||
2059 | #define UV3H_NODE_ID_REVISION_MASK 0x00000000f0000000UL | ||
2060 | #define UV3H_NODE_ID_NODE_ID_MASK 0x00007fff00000000UL | ||
2061 | #define UV3H_NODE_ID_ROUTER_SELECT_MASK 0x0001000000000000UL | ||
2062 | #define UV3H_NODE_ID_RESERVED_2_MASK 0x0002000000000000UL | ||
2063 | #define UV3H_NODE_ID_NODES_PER_BIT_MASK 0x01fc000000000000UL | ||
2064 | #define UV3H_NODE_ID_NI_PORT_MASK 0x3e00000000000000UL | ||
2065 | |||
1502 | union uvh_node_id_u { | 2066 | union uvh_node_id_u { |
1503 | unsigned long v; | 2067 | unsigned long v; |
1504 | struct uvh_node_id_s { | 2068 | struct uvh_node_id_s { |
@@ -1521,6 +2085,17 @@ union uvh_node_id_u { | |||
1521 | unsigned long ni_port:4; /* RO */ | 2085 | unsigned long ni_port:4; /* RO */ |
1522 | unsigned long rsvd_60_63:4; | 2086 | unsigned long rsvd_60_63:4; |
1523 | } s1; | 2087 | } s1; |
2088 | struct uvxh_node_id_s { | ||
2089 | unsigned long force1:1; /* RO */ | ||
2090 | unsigned long manufacturer:11; /* RO */ | ||
2091 | unsigned long part_number:16; /* RO */ | ||
2092 | unsigned long revision:4; /* RO */ | ||
2093 | unsigned long node_id:15; /* RW */ | ||
2094 | unsigned long rsvd_47_49:3; | ||
2095 | unsigned long nodes_per_bit:7; /* RO */ | ||
2096 | unsigned long ni_port:5; /* RO */ | ||
2097 | unsigned long rsvd_62_63:2; | ||
2098 | } sx; | ||
1524 | struct uv2h_node_id_s { | 2099 | struct uv2h_node_id_s { |
1525 | unsigned long force1:1; /* RO */ | 2100 | unsigned long force1:1; /* RO */ |
1526 | unsigned long manufacturer:11; /* RO */ | 2101 | unsigned long manufacturer:11; /* RO */ |
@@ -1532,13 +2107,26 @@ union uvh_node_id_u { | |||
1532 | unsigned long ni_port:5; /* RO */ | 2107 | unsigned long ni_port:5; /* RO */ |
1533 | unsigned long rsvd_62_63:2; | 2108 | unsigned long rsvd_62_63:2; |
1534 | } s2; | 2109 | } s2; |
2110 | struct uv3h_node_id_s { | ||
2111 | unsigned long force1:1; /* RO */ | ||
2112 | unsigned long manufacturer:11; /* RO */ | ||
2113 | unsigned long part_number:16; /* RO */ | ||
2114 | unsigned long revision:4; /* RO */ | ||
2115 | unsigned long node_id:15; /* RW */ | ||
2116 | unsigned long rsvd_47:1; | ||
2117 | unsigned long router_select:1; /* RO */ | ||
2118 | unsigned long rsvd_49:1; | ||
2119 | unsigned long nodes_per_bit:7; /* RO */ | ||
2120 | unsigned long ni_port:5; /* RO */ | ||
2121 | unsigned long rsvd_62_63:2; | ||
2122 | } s3; | ||
1535 | }; | 2123 | }; |
1536 | 2124 | ||
1537 | /* ========================================================================= */ | 2125 | /* ========================================================================= */ |
1538 | /* UVH_NODE_PRESENT_TABLE */ | 2126 | /* UVH_NODE_PRESENT_TABLE */ |
1539 | /* ========================================================================= */ | 2127 | /* ========================================================================= */ |
1540 | #define UVH_NODE_PRESENT_TABLE 0x1400UL | 2128 | #define UVH_NODE_PRESENT_TABLE 0x1400UL |
1541 | #define UVH_NODE_PRESENT_TABLE_DEPTH 16 | 2129 | #define UVH_NODE_PRESENT_TABLE_DEPTH 16 |
1542 | 2130 | ||
1543 | #define UVH_NODE_PRESENT_TABLE_NODES_SHFT 0 | 2131 | #define UVH_NODE_PRESENT_TABLE_NODES_SHFT 0 |
1544 | #define UVH_NODE_PRESENT_TABLE_NODES_MASK 0xffffffffffffffffUL | 2132 | #define UVH_NODE_PRESENT_TABLE_NODES_MASK 0xffffffffffffffffUL |
@@ -1553,7 +2141,7 @@ union uvh_node_present_table_u { | |||
1553 | /* ========================================================================= */ | 2141 | /* ========================================================================= */ |
1554 | /* UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_0_MMR */ | 2142 | /* UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_0_MMR */ |
1555 | /* ========================================================================= */ | 2143 | /* ========================================================================= */ |
1556 | #define UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_0_MMR 0x16000c8UL | 2144 | #define UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_0_MMR 0x16000c8UL |
1557 | 2145 | ||
1558 | #define UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_0_MMR_BASE_SHFT 24 | 2146 | #define UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_0_MMR_BASE_SHFT 24 |
1559 | #define UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_0_MMR_M_ALIAS_SHFT 48 | 2147 | #define UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_0_MMR_M_ALIAS_SHFT 48 |
@@ -1577,7 +2165,7 @@ union uvh_rh_gam_alias210_overlay_config_0_mmr_u { | |||
1577 | /* ========================================================================= */ | 2165 | /* ========================================================================= */ |
1578 | /* UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_1_MMR */ | 2166 | /* UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_1_MMR */ |
1579 | /* ========================================================================= */ | 2167 | /* ========================================================================= */ |
1580 | #define UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_1_MMR 0x16000d8UL | 2168 | #define UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_1_MMR 0x16000d8UL |
1581 | 2169 | ||
1582 | #define UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_1_MMR_BASE_SHFT 24 | 2170 | #define UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_1_MMR_BASE_SHFT 24 |
1583 | #define UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_1_MMR_M_ALIAS_SHFT 48 | 2171 | #define UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_1_MMR_M_ALIAS_SHFT 48 |
@@ -1601,7 +2189,7 @@ union uvh_rh_gam_alias210_overlay_config_1_mmr_u { | |||
1601 | /* ========================================================================= */ | 2189 | /* ========================================================================= */ |
1602 | /* UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_2_MMR */ | 2190 | /* UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_2_MMR */ |
1603 | /* ========================================================================= */ | 2191 | /* ========================================================================= */ |
1604 | #define UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_2_MMR 0x16000e8UL | 2192 | #define UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_2_MMR 0x16000e8UL |
1605 | 2193 | ||
1606 | #define UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_2_MMR_BASE_SHFT 24 | 2194 | #define UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_2_MMR_BASE_SHFT 24 |
1607 | #define UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_2_MMR_M_ALIAS_SHFT 48 | 2195 | #define UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_2_MMR_M_ALIAS_SHFT 48 |
@@ -1625,7 +2213,7 @@ union uvh_rh_gam_alias210_overlay_config_2_mmr_u { | |||
1625 | /* ========================================================================= */ | 2213 | /* ========================================================================= */ |
1626 | /* UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_0_MMR */ | 2214 | /* UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_0_MMR */ |
1627 | /* ========================================================================= */ | 2215 | /* ========================================================================= */ |
1628 | #define UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_0_MMR 0x16000d0UL | 2216 | #define UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_0_MMR 0x16000d0UL |
1629 | 2217 | ||
1630 | #define UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_0_MMR_DEST_BASE_SHFT 24 | 2218 | #define UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_0_MMR_DEST_BASE_SHFT 24 |
1631 | #define UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_0_MMR_DEST_BASE_MASK 0x00003fffff000000UL | 2219 | #define UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_0_MMR_DEST_BASE_MASK 0x00003fffff000000UL |
@@ -1642,7 +2230,7 @@ union uvh_rh_gam_alias210_redirect_config_0_mmr_u { | |||
1642 | /* ========================================================================= */ | 2230 | /* ========================================================================= */ |
1643 | /* UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_1_MMR */ | 2231 | /* UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_1_MMR */ |
1644 | /* ========================================================================= */ | 2232 | /* ========================================================================= */ |
1645 | #define UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_1_MMR 0x16000e0UL | 2233 | #define UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_1_MMR 0x16000e0UL |
1646 | 2234 | ||
1647 | #define UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_1_MMR_DEST_BASE_SHFT 24 | 2235 | #define UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_1_MMR_DEST_BASE_SHFT 24 |
1648 | #define UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_1_MMR_DEST_BASE_MASK 0x00003fffff000000UL | 2236 | #define UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_1_MMR_DEST_BASE_MASK 0x00003fffff000000UL |
@@ -1659,7 +2247,7 @@ union uvh_rh_gam_alias210_redirect_config_1_mmr_u { | |||
1659 | /* ========================================================================= */ | 2247 | /* ========================================================================= */ |
1660 | /* UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_2_MMR */ | 2248 | /* UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_2_MMR */ |
1661 | /* ========================================================================= */ | 2249 | /* ========================================================================= */ |
1662 | #define UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_2_MMR 0x16000f0UL | 2250 | #define UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_2_MMR 0x16000f0UL |
1663 | 2251 | ||
1664 | #define UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_2_MMR_DEST_BASE_SHFT 24 | 2252 | #define UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_2_MMR_DEST_BASE_SHFT 24 |
1665 | #define UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_2_MMR_DEST_BASE_MASK 0x00003fffff000000UL | 2253 | #define UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_2_MMR_DEST_BASE_MASK 0x00003fffff000000UL |
@@ -1676,7 +2264,10 @@ union uvh_rh_gam_alias210_redirect_config_2_mmr_u { | |||
1676 | /* ========================================================================= */ | 2264 | /* ========================================================================= */ |
1677 | /* UVH_RH_GAM_CONFIG_MMR */ | 2265 | /* UVH_RH_GAM_CONFIG_MMR */ |
1678 | /* ========================================================================= */ | 2266 | /* ========================================================================= */ |
1679 | #define UVH_RH_GAM_CONFIG_MMR 0x1600000UL | 2267 | #define UVH_RH_GAM_CONFIG_MMR 0x1600000UL |
2268 | #define UV1H_RH_GAM_CONFIG_MMR 0x1600000UL | ||
2269 | #define UV2H_RH_GAM_CONFIG_MMR 0x1600000UL | ||
2270 | #define UV3H_RH_GAM_CONFIG_MMR 0x1600000UL | ||
1680 | 2271 | ||
1681 | #define UVH_RH_GAM_CONFIG_MMR_M_SKT_SHFT 0 | 2272 | #define UVH_RH_GAM_CONFIG_MMR_M_SKT_SHFT 0 |
1682 | #define UVH_RH_GAM_CONFIG_MMR_N_SKT_SHFT 6 | 2273 | #define UVH_RH_GAM_CONFIG_MMR_N_SKT_SHFT 6 |
@@ -1690,11 +2281,21 @@ union uvh_rh_gam_alias210_redirect_config_2_mmr_u { | |||
1690 | #define UV1H_RH_GAM_CONFIG_MMR_N_SKT_MASK 0x00000000000003c0UL | 2281 | #define UV1H_RH_GAM_CONFIG_MMR_N_SKT_MASK 0x00000000000003c0UL |
1691 | #define UV1H_RH_GAM_CONFIG_MMR_MMIOL_CFG_MASK 0x0000000000001000UL | 2282 | #define UV1H_RH_GAM_CONFIG_MMR_MMIOL_CFG_MASK 0x0000000000001000UL |
1692 | 2283 | ||
2284 | #define UVXH_RH_GAM_CONFIG_MMR_M_SKT_SHFT 0 | ||
2285 | #define UVXH_RH_GAM_CONFIG_MMR_N_SKT_SHFT 6 | ||
2286 | #define UVXH_RH_GAM_CONFIG_MMR_M_SKT_MASK 0x000000000000003fUL | ||
2287 | #define UVXH_RH_GAM_CONFIG_MMR_N_SKT_MASK 0x00000000000003c0UL | ||
2288 | |||
1693 | #define UV2H_RH_GAM_CONFIG_MMR_M_SKT_SHFT 0 | 2289 | #define UV2H_RH_GAM_CONFIG_MMR_M_SKT_SHFT 0 |
1694 | #define UV2H_RH_GAM_CONFIG_MMR_N_SKT_SHFT 6 | 2290 | #define UV2H_RH_GAM_CONFIG_MMR_N_SKT_SHFT 6 |
1695 | #define UV2H_RH_GAM_CONFIG_MMR_M_SKT_MASK 0x000000000000003fUL | 2291 | #define UV2H_RH_GAM_CONFIG_MMR_M_SKT_MASK 0x000000000000003fUL |
1696 | #define UV2H_RH_GAM_CONFIG_MMR_N_SKT_MASK 0x00000000000003c0UL | 2292 | #define UV2H_RH_GAM_CONFIG_MMR_N_SKT_MASK 0x00000000000003c0UL |
1697 | 2293 | ||
2294 | #define UV3H_RH_GAM_CONFIG_MMR_M_SKT_SHFT 0 | ||
2295 | #define UV3H_RH_GAM_CONFIG_MMR_N_SKT_SHFT 6 | ||
2296 | #define UV3H_RH_GAM_CONFIG_MMR_M_SKT_MASK 0x000000000000003fUL | ||
2297 | #define UV3H_RH_GAM_CONFIG_MMR_N_SKT_MASK 0x00000000000003c0UL | ||
2298 | |||
1698 | union uvh_rh_gam_config_mmr_u { | 2299 | union uvh_rh_gam_config_mmr_u { |
1699 | unsigned long v; | 2300 | unsigned long v; |
1700 | struct uvh_rh_gam_config_mmr_s { | 2301 | struct uvh_rh_gam_config_mmr_s { |
@@ -1709,20 +2310,37 @@ union uvh_rh_gam_config_mmr_u { | |||
1709 | unsigned long mmiol_cfg:1; /* RW */ | 2310 | unsigned long mmiol_cfg:1; /* RW */ |
1710 | unsigned long rsvd_13_63:51; | 2311 | unsigned long rsvd_13_63:51; |
1711 | } s1; | 2312 | } s1; |
2313 | struct uvxh_rh_gam_config_mmr_s { | ||
2314 | unsigned long m_skt:6; /* RW */ | ||
2315 | unsigned long n_skt:4; /* RW */ | ||
2316 | unsigned long rsvd_10_63:54; | ||
2317 | } sx; | ||
1712 | struct uv2h_rh_gam_config_mmr_s { | 2318 | struct uv2h_rh_gam_config_mmr_s { |
1713 | unsigned long m_skt:6; /* RW */ | 2319 | unsigned long m_skt:6; /* RW */ |
1714 | unsigned long n_skt:4; /* RW */ | 2320 | unsigned long n_skt:4; /* RW */ |
1715 | unsigned long rsvd_10_63:54; | 2321 | unsigned long rsvd_10_63:54; |
1716 | } s2; | 2322 | } s2; |
2323 | struct uv3h_rh_gam_config_mmr_s { | ||
2324 | unsigned long m_skt:6; /* RW */ | ||
2325 | unsigned long n_skt:4; /* RW */ | ||
2326 | unsigned long rsvd_10_63:54; | ||
2327 | } s3; | ||
1717 | }; | 2328 | }; |
1718 | 2329 | ||
1719 | /* ========================================================================= */ | 2330 | /* ========================================================================= */ |
1720 | /* UVH_RH_GAM_GRU_OVERLAY_CONFIG_MMR */ | 2331 | /* UVH_RH_GAM_GRU_OVERLAY_CONFIG_MMR */ |
1721 | /* ========================================================================= */ | 2332 | /* ========================================================================= */ |
1722 | #define UVH_RH_GAM_GRU_OVERLAY_CONFIG_MMR 0x1600010UL | 2333 | #define UVH_RH_GAM_GRU_OVERLAY_CONFIG_MMR 0x1600010UL |
2334 | #define UV1H_RH_GAM_GRU_OVERLAY_CONFIG_MMR 0x1600010UL | ||
2335 | #define UV2H_RH_GAM_GRU_OVERLAY_CONFIG_MMR 0x1600010UL | ||
2336 | #define UV3H_RH_GAM_GRU_OVERLAY_CONFIG_MMR 0x1600010UL | ||
1723 | 2337 | ||
1724 | #define UVH_RH_GAM_GRU_OVERLAY_CONFIG_MMR_BASE_SHFT 28 | 2338 | #define UVH_RH_GAM_GRU_OVERLAY_CONFIG_MMR_BASE_SHFT 28 |
2339 | #define UVH_RH_GAM_GRU_OVERLAY_CONFIG_MMR_N_GRU_SHFT 52 | ||
2340 | #define UVH_RH_GAM_GRU_OVERLAY_CONFIG_MMR_ENABLE_SHFT 63 | ||
1725 | #define UVH_RH_GAM_GRU_OVERLAY_CONFIG_MMR_BASE_MASK 0x00003ffff0000000UL | 2341 | #define UVH_RH_GAM_GRU_OVERLAY_CONFIG_MMR_BASE_MASK 0x00003ffff0000000UL |
2342 | #define UVH_RH_GAM_GRU_OVERLAY_CONFIG_MMR_N_GRU_MASK 0x00f0000000000000UL | ||
2343 | #define UVH_RH_GAM_GRU_OVERLAY_CONFIG_MMR_ENABLE_MASK 0x8000000000000000UL | ||
1726 | 2344 | ||
1727 | #define UV1H_RH_GAM_GRU_OVERLAY_CONFIG_MMR_BASE_SHFT 28 | 2345 | #define UV1H_RH_GAM_GRU_OVERLAY_CONFIG_MMR_BASE_SHFT 28 |
1728 | #define UV1H_RH_GAM_GRU_OVERLAY_CONFIG_MMR_GR4_SHFT 48 | 2346 | #define UV1H_RH_GAM_GRU_OVERLAY_CONFIG_MMR_GR4_SHFT 48 |
@@ -1733,6 +2351,13 @@ union uvh_rh_gam_config_mmr_u { | |||
1733 | #define UV1H_RH_GAM_GRU_OVERLAY_CONFIG_MMR_N_GRU_MASK 0x00f0000000000000UL | 2351 | #define UV1H_RH_GAM_GRU_OVERLAY_CONFIG_MMR_N_GRU_MASK 0x00f0000000000000UL |
1734 | #define UV1H_RH_GAM_GRU_OVERLAY_CONFIG_MMR_ENABLE_MASK 0x8000000000000000UL | 2352 | #define UV1H_RH_GAM_GRU_OVERLAY_CONFIG_MMR_ENABLE_MASK 0x8000000000000000UL |
1735 | 2353 | ||
2354 | #define UVXH_RH_GAM_GRU_OVERLAY_CONFIG_MMR_BASE_SHFT 28 | ||
2355 | #define UVXH_RH_GAM_GRU_OVERLAY_CONFIG_MMR_N_GRU_SHFT 52 | ||
2356 | #define UVXH_RH_GAM_GRU_OVERLAY_CONFIG_MMR_ENABLE_SHFT 63 | ||
2357 | #define UVXH_RH_GAM_GRU_OVERLAY_CONFIG_MMR_BASE_MASK 0x00003ffff0000000UL | ||
2358 | #define UVXH_RH_GAM_GRU_OVERLAY_CONFIG_MMR_N_GRU_MASK 0x00f0000000000000UL | ||
2359 | #define UVXH_RH_GAM_GRU_OVERLAY_CONFIG_MMR_ENABLE_MASK 0x8000000000000000UL | ||
2360 | |||
1736 | #define UV2H_RH_GAM_GRU_OVERLAY_CONFIG_MMR_BASE_SHFT 28 | 2361 | #define UV2H_RH_GAM_GRU_OVERLAY_CONFIG_MMR_BASE_SHFT 28 |
1737 | #define UV2H_RH_GAM_GRU_OVERLAY_CONFIG_MMR_N_GRU_SHFT 52 | 2362 | #define UV2H_RH_GAM_GRU_OVERLAY_CONFIG_MMR_N_GRU_SHFT 52 |
1738 | #define UV2H_RH_GAM_GRU_OVERLAY_CONFIG_MMR_ENABLE_SHFT 63 | 2363 | #define UV2H_RH_GAM_GRU_OVERLAY_CONFIG_MMR_ENABLE_SHFT 63 |
@@ -1740,12 +2365,23 @@ union uvh_rh_gam_config_mmr_u { | |||
1740 | #define UV2H_RH_GAM_GRU_OVERLAY_CONFIG_MMR_N_GRU_MASK 0x00f0000000000000UL | 2365 | #define UV2H_RH_GAM_GRU_OVERLAY_CONFIG_MMR_N_GRU_MASK 0x00f0000000000000UL |
1741 | #define UV2H_RH_GAM_GRU_OVERLAY_CONFIG_MMR_ENABLE_MASK 0x8000000000000000UL | 2366 | #define UV2H_RH_GAM_GRU_OVERLAY_CONFIG_MMR_ENABLE_MASK 0x8000000000000000UL |
1742 | 2367 | ||
2368 | #define UV3H_RH_GAM_GRU_OVERLAY_CONFIG_MMR_BASE_SHFT 28 | ||
2369 | #define UV3H_RH_GAM_GRU_OVERLAY_CONFIG_MMR_N_GRU_SHFT 52 | ||
2370 | #define UV3H_RH_GAM_GRU_OVERLAY_CONFIG_MMR_MODE_SHFT 62 | ||
2371 | #define UV3H_RH_GAM_GRU_OVERLAY_CONFIG_MMR_ENABLE_SHFT 63 | ||
2372 | #define UV3H_RH_GAM_GRU_OVERLAY_CONFIG_MMR_BASE_MASK 0x00003ffff0000000UL | ||
2373 | #define UV3H_RH_GAM_GRU_OVERLAY_CONFIG_MMR_N_GRU_MASK 0x00f0000000000000UL | ||
2374 | #define UV3H_RH_GAM_GRU_OVERLAY_CONFIG_MMR_MODE_MASK 0x4000000000000000UL | ||
2375 | #define UV3H_RH_GAM_GRU_OVERLAY_CONFIG_MMR_ENABLE_MASK 0x8000000000000000UL | ||
2376 | |||
1743 | union uvh_rh_gam_gru_overlay_config_mmr_u { | 2377 | union uvh_rh_gam_gru_overlay_config_mmr_u { |
1744 | unsigned long v; | 2378 | unsigned long v; |
1745 | struct uvh_rh_gam_gru_overlay_config_mmr_s { | 2379 | struct uvh_rh_gam_gru_overlay_config_mmr_s { |
1746 | unsigned long rsvd_0_27:28; | 2380 | unsigned long rsvd_0_27:28; |
1747 | unsigned long base:18; /* RW */ | 2381 | unsigned long base:18; /* RW */ |
1748 | unsigned long rsvd_46_62:17; | 2382 | unsigned long rsvd_46_51:6; |
2383 | unsigned long n_gru:4; /* RW */ | ||
2384 | unsigned long rsvd_56_62:7; | ||
1749 | unsigned long enable:1; /* RW */ | 2385 | unsigned long enable:1; /* RW */ |
1750 | } s; | 2386 | } s; |
1751 | struct uv1h_rh_gam_gru_overlay_config_mmr_s { | 2387 | struct uv1h_rh_gam_gru_overlay_config_mmr_s { |
@@ -1758,6 +2394,14 @@ union uvh_rh_gam_gru_overlay_config_mmr_u { | |||
1758 | unsigned long rsvd_56_62:7; | 2394 | unsigned long rsvd_56_62:7; |
1759 | unsigned long enable:1; /* RW */ | 2395 | unsigned long enable:1; /* RW */ |
1760 | } s1; | 2396 | } s1; |
2397 | struct uvxh_rh_gam_gru_overlay_config_mmr_s { | ||
2398 | unsigned long rsvd_0_27:28; | ||
2399 | unsigned long base:18; /* RW */ | ||
2400 | unsigned long rsvd_46_51:6; | ||
2401 | unsigned long n_gru:4; /* RW */ | ||
2402 | unsigned long rsvd_56_62:7; | ||
2403 | unsigned long enable:1; /* RW */ | ||
2404 | } sx; | ||
1761 | struct uv2h_rh_gam_gru_overlay_config_mmr_s { | 2405 | struct uv2h_rh_gam_gru_overlay_config_mmr_s { |
1762 | unsigned long rsvd_0_27:28; | 2406 | unsigned long rsvd_0_27:28; |
1763 | unsigned long base:18; /* RW */ | 2407 | unsigned long base:18; /* RW */ |
@@ -1766,12 +2410,22 @@ union uvh_rh_gam_gru_overlay_config_mmr_u { | |||
1766 | unsigned long rsvd_56_62:7; | 2410 | unsigned long rsvd_56_62:7; |
1767 | unsigned long enable:1; /* RW */ | 2411 | unsigned long enable:1; /* RW */ |
1768 | } s2; | 2412 | } s2; |
2413 | struct uv3h_rh_gam_gru_overlay_config_mmr_s { | ||
2414 | unsigned long rsvd_0_27:28; | ||
2415 | unsigned long base:18; /* RW */ | ||
2416 | unsigned long rsvd_46_51:6; | ||
2417 | unsigned long n_gru:4; /* RW */ | ||
2418 | unsigned long rsvd_56_61:6; | ||
2419 | unsigned long mode:1; /* RW */ | ||
2420 | unsigned long enable:1; /* RW */ | ||
2421 | } s3; | ||
1769 | }; | 2422 | }; |
1770 | 2423 | ||
1771 | /* ========================================================================= */ | 2424 | /* ========================================================================= */ |
1772 | /* UVH_RH_GAM_MMIOH_OVERLAY_CONFIG_MMR */ | 2425 | /* UVH_RH_GAM_MMIOH_OVERLAY_CONFIG_MMR */ |
1773 | /* ========================================================================= */ | 2426 | /* ========================================================================= */ |
1774 | #define UVH_RH_GAM_MMIOH_OVERLAY_CONFIG_MMR 0x1600030UL | 2427 | #define UV1H_RH_GAM_MMIOH_OVERLAY_CONFIG_MMR 0x1600030UL |
2428 | #define UV2H_RH_GAM_MMIOH_OVERLAY_CONFIG_MMR 0x1600030UL | ||
1775 | 2429 | ||
1776 | #define UV1H_RH_GAM_MMIOH_OVERLAY_CONFIG_MMR_BASE_SHFT 30 | 2430 | #define UV1H_RH_GAM_MMIOH_OVERLAY_CONFIG_MMR_BASE_SHFT 30 |
1777 | #define UV1H_RH_GAM_MMIOH_OVERLAY_CONFIG_MMR_M_IO_SHFT 46 | 2431 | #define UV1H_RH_GAM_MMIOH_OVERLAY_CONFIG_MMR_M_IO_SHFT 46 |
@@ -1814,10 +2468,15 @@ union uvh_rh_gam_mmioh_overlay_config_mmr_u { | |||
1814 | /* ========================================================================= */ | 2468 | /* ========================================================================= */ |
1815 | /* UVH_RH_GAM_MMR_OVERLAY_CONFIG_MMR */ | 2469 | /* UVH_RH_GAM_MMR_OVERLAY_CONFIG_MMR */ |
1816 | /* ========================================================================= */ | 2470 | /* ========================================================================= */ |
1817 | #define UVH_RH_GAM_MMR_OVERLAY_CONFIG_MMR 0x1600028UL | 2471 | #define UVH_RH_GAM_MMR_OVERLAY_CONFIG_MMR 0x1600028UL |
2472 | #define UV1H_RH_GAM_MMR_OVERLAY_CONFIG_MMR 0x1600028UL | ||
2473 | #define UV2H_RH_GAM_MMR_OVERLAY_CONFIG_MMR 0x1600028UL | ||
2474 | #define UV3H_RH_GAM_MMR_OVERLAY_CONFIG_MMR 0x1600028UL | ||
1818 | 2475 | ||
1819 | #define UVH_RH_GAM_MMR_OVERLAY_CONFIG_MMR_BASE_SHFT 26 | 2476 | #define UVH_RH_GAM_MMR_OVERLAY_CONFIG_MMR_BASE_SHFT 26 |
2477 | #define UVH_RH_GAM_MMR_OVERLAY_CONFIG_MMR_ENABLE_SHFT 63 | ||
1820 | #define UVH_RH_GAM_MMR_OVERLAY_CONFIG_MMR_BASE_MASK 0x00003ffffc000000UL | 2478 | #define UVH_RH_GAM_MMR_OVERLAY_CONFIG_MMR_BASE_MASK 0x00003ffffc000000UL |
2479 | #define UVH_RH_GAM_MMR_OVERLAY_CONFIG_MMR_ENABLE_MASK 0x8000000000000000UL | ||
1821 | 2480 | ||
1822 | #define UV1H_RH_GAM_MMR_OVERLAY_CONFIG_MMR_BASE_SHFT 26 | 2481 | #define UV1H_RH_GAM_MMR_OVERLAY_CONFIG_MMR_BASE_SHFT 26 |
1823 | #define UV1H_RH_GAM_MMR_OVERLAY_CONFIG_MMR_DUAL_HUB_SHFT 46 | 2482 | #define UV1H_RH_GAM_MMR_OVERLAY_CONFIG_MMR_DUAL_HUB_SHFT 46 |
@@ -1826,11 +2485,21 @@ union uvh_rh_gam_mmioh_overlay_config_mmr_u { | |||
1826 | #define UV1H_RH_GAM_MMR_OVERLAY_CONFIG_MMR_DUAL_HUB_MASK 0x0000400000000000UL | 2485 | #define UV1H_RH_GAM_MMR_OVERLAY_CONFIG_MMR_DUAL_HUB_MASK 0x0000400000000000UL |
1827 | #define UV1H_RH_GAM_MMR_OVERLAY_CONFIG_MMR_ENABLE_MASK 0x8000000000000000UL | 2486 | #define UV1H_RH_GAM_MMR_OVERLAY_CONFIG_MMR_ENABLE_MASK 0x8000000000000000UL |
1828 | 2487 | ||
2488 | #define UVXH_RH_GAM_MMR_OVERLAY_CONFIG_MMR_BASE_SHFT 26 | ||
2489 | #define UVXH_RH_GAM_MMR_OVERLAY_CONFIG_MMR_ENABLE_SHFT 63 | ||
2490 | #define UVXH_RH_GAM_MMR_OVERLAY_CONFIG_MMR_BASE_MASK 0x00003ffffc000000UL | ||
2491 | #define UVXH_RH_GAM_MMR_OVERLAY_CONFIG_MMR_ENABLE_MASK 0x8000000000000000UL | ||
2492 | |||
1829 | #define UV2H_RH_GAM_MMR_OVERLAY_CONFIG_MMR_BASE_SHFT 26 | 2493 | #define UV2H_RH_GAM_MMR_OVERLAY_CONFIG_MMR_BASE_SHFT 26 |
1830 | #define UV2H_RH_GAM_MMR_OVERLAY_CONFIG_MMR_ENABLE_SHFT 63 | 2494 | #define UV2H_RH_GAM_MMR_OVERLAY_CONFIG_MMR_ENABLE_SHFT 63 |
1831 | #define UV2H_RH_GAM_MMR_OVERLAY_CONFIG_MMR_BASE_MASK 0x00003ffffc000000UL | 2495 | #define UV2H_RH_GAM_MMR_OVERLAY_CONFIG_MMR_BASE_MASK 0x00003ffffc000000UL |
1832 | #define UV2H_RH_GAM_MMR_OVERLAY_CONFIG_MMR_ENABLE_MASK 0x8000000000000000UL | 2496 | #define UV2H_RH_GAM_MMR_OVERLAY_CONFIG_MMR_ENABLE_MASK 0x8000000000000000UL |
1833 | 2497 | ||
2498 | #define UV3H_RH_GAM_MMR_OVERLAY_CONFIG_MMR_BASE_SHFT 26 | ||
2499 | #define UV3H_RH_GAM_MMR_OVERLAY_CONFIG_MMR_ENABLE_SHFT 63 | ||
2500 | #define UV3H_RH_GAM_MMR_OVERLAY_CONFIG_MMR_BASE_MASK 0x00003ffffc000000UL | ||
2501 | #define UV3H_RH_GAM_MMR_OVERLAY_CONFIG_MMR_ENABLE_MASK 0x8000000000000000UL | ||
2502 | |||
1834 | union uvh_rh_gam_mmr_overlay_config_mmr_u { | 2503 | union uvh_rh_gam_mmr_overlay_config_mmr_u { |
1835 | unsigned long v; | 2504 | unsigned long v; |
1836 | struct uvh_rh_gam_mmr_overlay_config_mmr_s { | 2505 | struct uvh_rh_gam_mmr_overlay_config_mmr_s { |
@@ -1846,18 +2515,30 @@ union uvh_rh_gam_mmr_overlay_config_mmr_u { | |||
1846 | unsigned long rsvd_47_62:16; | 2515 | unsigned long rsvd_47_62:16; |
1847 | unsigned long enable:1; /* RW */ | 2516 | unsigned long enable:1; /* RW */ |
1848 | } s1; | 2517 | } s1; |
2518 | struct uvxh_rh_gam_mmr_overlay_config_mmr_s { | ||
2519 | unsigned long rsvd_0_25:26; | ||
2520 | unsigned long base:20; /* RW */ | ||
2521 | unsigned long rsvd_46_62:17; | ||
2522 | unsigned long enable:1; /* RW */ | ||
2523 | } sx; | ||
1849 | struct uv2h_rh_gam_mmr_overlay_config_mmr_s { | 2524 | struct uv2h_rh_gam_mmr_overlay_config_mmr_s { |
1850 | unsigned long rsvd_0_25:26; | 2525 | unsigned long rsvd_0_25:26; |
1851 | unsigned long base:20; /* RW */ | 2526 | unsigned long base:20; /* RW */ |
1852 | unsigned long rsvd_46_62:17; | 2527 | unsigned long rsvd_46_62:17; |
1853 | unsigned long enable:1; /* RW */ | 2528 | unsigned long enable:1; /* RW */ |
1854 | } s2; | 2529 | } s2; |
2530 | struct uv3h_rh_gam_mmr_overlay_config_mmr_s { | ||
2531 | unsigned long rsvd_0_25:26; | ||
2532 | unsigned long base:20; /* RW */ | ||
2533 | unsigned long rsvd_46_62:17; | ||
2534 | unsigned long enable:1; /* RW */ | ||
2535 | } s3; | ||
1855 | }; | 2536 | }; |
1856 | 2537 | ||
1857 | /* ========================================================================= */ | 2538 | /* ========================================================================= */ |
1858 | /* UVH_RTC */ | 2539 | /* UVH_RTC */ |
1859 | /* ========================================================================= */ | 2540 | /* ========================================================================= */ |
1860 | #define UVH_RTC 0x340000UL | 2541 | #define UVH_RTC 0x340000UL |
1861 | 2542 | ||
1862 | #define UVH_RTC_REAL_TIME_CLOCK_SHFT 0 | 2543 | #define UVH_RTC_REAL_TIME_CLOCK_SHFT 0 |
1863 | #define UVH_RTC_REAL_TIME_CLOCK_MASK 0x00ffffffffffffffUL | 2544 | #define UVH_RTC_REAL_TIME_CLOCK_MASK 0x00ffffffffffffffUL |
@@ -1873,7 +2554,7 @@ union uvh_rtc_u { | |||
1873 | /* ========================================================================= */ | 2554 | /* ========================================================================= */ |
1874 | /* UVH_RTC1_INT_CONFIG */ | 2555 | /* UVH_RTC1_INT_CONFIG */ |
1875 | /* ========================================================================= */ | 2556 | /* ========================================================================= */ |
1876 | #define UVH_RTC1_INT_CONFIG 0x615c0UL | 2557 | #define UVH_RTC1_INT_CONFIG 0x615c0UL |
1877 | 2558 | ||
1878 | #define UVH_RTC1_INT_CONFIG_VECTOR_SHFT 0 | 2559 | #define UVH_RTC1_INT_CONFIG_VECTOR_SHFT 0 |
1879 | #define UVH_RTC1_INT_CONFIG_DM_SHFT 8 | 2560 | #define UVH_RTC1_INT_CONFIG_DM_SHFT 8 |
@@ -1911,8 +2592,8 @@ union uvh_rtc1_int_config_u { | |||
1911 | /* ========================================================================= */ | 2592 | /* ========================================================================= */ |
1912 | /* UVH_SCRATCH5 */ | 2593 | /* UVH_SCRATCH5 */ |
1913 | /* ========================================================================= */ | 2594 | /* ========================================================================= */ |
1914 | #define UVH_SCRATCH5 0x2d0200UL | 2595 | #define UVH_SCRATCH5 0x2d0200UL |
1915 | #define UVH_SCRATCH5_32 0x778 | 2596 | #define UVH_SCRATCH5_32 0x778 |
1916 | 2597 | ||
1917 | #define UVH_SCRATCH5_SCRATCH5_SHFT 0 | 2598 | #define UVH_SCRATCH5_SCRATCH5_SHFT 0 |
1918 | #define UVH_SCRATCH5_SCRATCH5_MASK 0xffffffffffffffffUL | 2599 | #define UVH_SCRATCH5_SCRATCH5_MASK 0xffffffffffffffffUL |
@@ -1925,79 +2606,79 @@ union uvh_scratch5_u { | |||
1925 | }; | 2606 | }; |
1926 | 2607 | ||
1927 | /* ========================================================================= */ | 2608 | /* ========================================================================= */ |
1928 | /* UV2H_EVENT_OCCURRED2 */ | 2609 | /* UVXH_EVENT_OCCURRED2 */ |
1929 | /* ========================================================================= */ | 2610 | /* ========================================================================= */ |
1930 | #define UV2H_EVENT_OCCURRED2 0x70100UL | 2611 | #define UVXH_EVENT_OCCURRED2 0x70100UL |
1931 | #define UV2H_EVENT_OCCURRED2_32 0xb68 | 2612 | #define UVXH_EVENT_OCCURRED2_32 0xb68 |
1932 | 2613 | ||
1933 | #define UV2H_EVENT_OCCURRED2_RTC_0_SHFT 0 | 2614 | #define UVXH_EVENT_OCCURRED2_RTC_0_SHFT 0 |
1934 | #define UV2H_EVENT_OCCURRED2_RTC_1_SHFT 1 | 2615 | #define UVXH_EVENT_OCCURRED2_RTC_1_SHFT 1 |
1935 | #define UV2H_EVENT_OCCURRED2_RTC_2_SHFT 2 | 2616 | #define UVXH_EVENT_OCCURRED2_RTC_2_SHFT 2 |
1936 | #define UV2H_EVENT_OCCURRED2_RTC_3_SHFT 3 | 2617 | #define UVXH_EVENT_OCCURRED2_RTC_3_SHFT 3 |
1937 | #define UV2H_EVENT_OCCURRED2_RTC_4_SHFT 4 | 2618 | #define UVXH_EVENT_OCCURRED2_RTC_4_SHFT 4 |
1938 | #define UV2H_EVENT_OCCURRED2_RTC_5_SHFT 5 | 2619 | #define UVXH_EVENT_OCCURRED2_RTC_5_SHFT 5 |
1939 | #define UV2H_EVENT_OCCURRED2_RTC_6_SHFT 6 | 2620 | #define UVXH_EVENT_OCCURRED2_RTC_6_SHFT 6 |
1940 | #define UV2H_EVENT_OCCURRED2_RTC_7_SHFT 7 | 2621 | #define UVXH_EVENT_OCCURRED2_RTC_7_SHFT 7 |
1941 | #define UV2H_EVENT_OCCURRED2_RTC_8_SHFT 8 | 2622 | #define UVXH_EVENT_OCCURRED2_RTC_8_SHFT 8 |
1942 | #define UV2H_EVENT_OCCURRED2_RTC_9_SHFT 9 | 2623 | #define UVXH_EVENT_OCCURRED2_RTC_9_SHFT 9 |
1943 | #define UV2H_EVENT_OCCURRED2_RTC_10_SHFT 10 | 2624 | #define UVXH_EVENT_OCCURRED2_RTC_10_SHFT 10 |
1944 | #define UV2H_EVENT_OCCURRED2_RTC_11_SHFT 11 | 2625 | #define UVXH_EVENT_OCCURRED2_RTC_11_SHFT 11 |
1945 | #define UV2H_EVENT_OCCURRED2_RTC_12_SHFT 12 | 2626 | #define UVXH_EVENT_OCCURRED2_RTC_12_SHFT 12 |
1946 | #define UV2H_EVENT_OCCURRED2_RTC_13_SHFT 13 | 2627 | #define UVXH_EVENT_OCCURRED2_RTC_13_SHFT 13 |
1947 | #define UV2H_EVENT_OCCURRED2_RTC_14_SHFT 14 | 2628 | #define UVXH_EVENT_OCCURRED2_RTC_14_SHFT 14 |
1948 | #define UV2H_EVENT_OCCURRED2_RTC_15_SHFT 15 | 2629 | #define UVXH_EVENT_OCCURRED2_RTC_15_SHFT 15 |
1949 | #define UV2H_EVENT_OCCURRED2_RTC_16_SHFT 16 | 2630 | #define UVXH_EVENT_OCCURRED2_RTC_16_SHFT 16 |
1950 | #define UV2H_EVENT_OCCURRED2_RTC_17_SHFT 17 | 2631 | #define UVXH_EVENT_OCCURRED2_RTC_17_SHFT 17 |
1951 | #define UV2H_EVENT_OCCURRED2_RTC_18_SHFT 18 | 2632 | #define UVXH_EVENT_OCCURRED2_RTC_18_SHFT 18 |
1952 | #define UV2H_EVENT_OCCURRED2_RTC_19_SHFT 19 | 2633 | #define UVXH_EVENT_OCCURRED2_RTC_19_SHFT 19 |
1953 | #define UV2H_EVENT_OCCURRED2_RTC_20_SHFT 20 | 2634 | #define UVXH_EVENT_OCCURRED2_RTC_20_SHFT 20 |
1954 | #define UV2H_EVENT_OCCURRED2_RTC_21_SHFT 21 | 2635 | #define UVXH_EVENT_OCCURRED2_RTC_21_SHFT 21 |
1955 | #define UV2H_EVENT_OCCURRED2_RTC_22_SHFT 22 | 2636 | #define UVXH_EVENT_OCCURRED2_RTC_22_SHFT 22 |
1956 | #define UV2H_EVENT_OCCURRED2_RTC_23_SHFT 23 | 2637 | #define UVXH_EVENT_OCCURRED2_RTC_23_SHFT 23 |
1957 | #define UV2H_EVENT_OCCURRED2_RTC_24_SHFT 24 | 2638 | #define UVXH_EVENT_OCCURRED2_RTC_24_SHFT 24 |
1958 | #define UV2H_EVENT_OCCURRED2_RTC_25_SHFT 25 | 2639 | #define UVXH_EVENT_OCCURRED2_RTC_25_SHFT 25 |
1959 | #define UV2H_EVENT_OCCURRED2_RTC_26_SHFT 26 | 2640 | #define UVXH_EVENT_OCCURRED2_RTC_26_SHFT 26 |
1960 | #define UV2H_EVENT_OCCURRED2_RTC_27_SHFT 27 | 2641 | #define UVXH_EVENT_OCCURRED2_RTC_27_SHFT 27 |
1961 | #define UV2H_EVENT_OCCURRED2_RTC_28_SHFT 28 | 2642 | #define UVXH_EVENT_OCCURRED2_RTC_28_SHFT 28 |
1962 | #define UV2H_EVENT_OCCURRED2_RTC_29_SHFT 29 | 2643 | #define UVXH_EVENT_OCCURRED2_RTC_29_SHFT 29 |
1963 | #define UV2H_EVENT_OCCURRED2_RTC_30_SHFT 30 | 2644 | #define UVXH_EVENT_OCCURRED2_RTC_30_SHFT 30 |
1964 | #define UV2H_EVENT_OCCURRED2_RTC_31_SHFT 31 | 2645 | #define UVXH_EVENT_OCCURRED2_RTC_31_SHFT 31 |
1965 | #define UV2H_EVENT_OCCURRED2_RTC_0_MASK 0x0000000000000001UL | 2646 | #define UVXH_EVENT_OCCURRED2_RTC_0_MASK 0x0000000000000001UL |
1966 | #define UV2H_EVENT_OCCURRED2_RTC_1_MASK 0x0000000000000002UL | 2647 | #define UVXH_EVENT_OCCURRED2_RTC_1_MASK 0x0000000000000002UL |
1967 | #define UV2H_EVENT_OCCURRED2_RTC_2_MASK 0x0000000000000004UL | 2648 | #define UVXH_EVENT_OCCURRED2_RTC_2_MASK 0x0000000000000004UL |
1968 | #define UV2H_EVENT_OCCURRED2_RTC_3_MASK 0x0000000000000008UL | 2649 | #define UVXH_EVENT_OCCURRED2_RTC_3_MASK 0x0000000000000008UL |
1969 | #define UV2H_EVENT_OCCURRED2_RTC_4_MASK 0x0000000000000010UL | 2650 | #define UVXH_EVENT_OCCURRED2_RTC_4_MASK 0x0000000000000010UL |
1970 | #define UV2H_EVENT_OCCURRED2_RTC_5_MASK 0x0000000000000020UL | 2651 | #define UVXH_EVENT_OCCURRED2_RTC_5_MASK 0x0000000000000020UL |
1971 | #define UV2H_EVENT_OCCURRED2_RTC_6_MASK 0x0000000000000040UL | 2652 | #define UVXH_EVENT_OCCURRED2_RTC_6_MASK 0x0000000000000040UL |
1972 | #define UV2H_EVENT_OCCURRED2_RTC_7_MASK 0x0000000000000080UL | 2653 | #define UVXH_EVENT_OCCURRED2_RTC_7_MASK 0x0000000000000080UL |
1973 | #define UV2H_EVENT_OCCURRED2_RTC_8_MASK 0x0000000000000100UL | 2654 | #define UVXH_EVENT_OCCURRED2_RTC_8_MASK 0x0000000000000100UL |
1974 | #define UV2H_EVENT_OCCURRED2_RTC_9_MASK 0x0000000000000200UL | 2655 | #define UVXH_EVENT_OCCURRED2_RTC_9_MASK 0x0000000000000200UL |
1975 | #define UV2H_EVENT_OCCURRED2_RTC_10_MASK 0x0000000000000400UL | 2656 | #define UVXH_EVENT_OCCURRED2_RTC_10_MASK 0x0000000000000400UL |
1976 | #define UV2H_EVENT_OCCURRED2_RTC_11_MASK 0x0000000000000800UL | 2657 | #define UVXH_EVENT_OCCURRED2_RTC_11_MASK 0x0000000000000800UL |
1977 | #define UV2H_EVENT_OCCURRED2_RTC_12_MASK 0x0000000000001000UL | 2658 | #define UVXH_EVENT_OCCURRED2_RTC_12_MASK 0x0000000000001000UL |
1978 | #define UV2H_EVENT_OCCURRED2_RTC_13_MASK 0x0000000000002000UL | 2659 | #define UVXH_EVENT_OCCURRED2_RTC_13_MASK 0x0000000000002000UL |
1979 | #define UV2H_EVENT_OCCURRED2_RTC_14_MASK 0x0000000000004000UL | 2660 | #define UVXH_EVENT_OCCURRED2_RTC_14_MASK 0x0000000000004000UL |
1980 | #define UV2H_EVENT_OCCURRED2_RTC_15_MASK 0x0000000000008000UL | 2661 | #define UVXH_EVENT_OCCURRED2_RTC_15_MASK 0x0000000000008000UL |
1981 | #define UV2H_EVENT_OCCURRED2_RTC_16_MASK 0x0000000000010000UL | 2662 | #define UVXH_EVENT_OCCURRED2_RTC_16_MASK 0x0000000000010000UL |
1982 | #define UV2H_EVENT_OCCURRED2_RTC_17_MASK 0x0000000000020000UL | 2663 | #define UVXH_EVENT_OCCURRED2_RTC_17_MASK 0x0000000000020000UL |
1983 | #define UV2H_EVENT_OCCURRED2_RTC_18_MASK 0x0000000000040000UL | 2664 | #define UVXH_EVENT_OCCURRED2_RTC_18_MASK 0x0000000000040000UL |
1984 | #define UV2H_EVENT_OCCURRED2_RTC_19_MASK 0x0000000000080000UL | 2665 | #define UVXH_EVENT_OCCURRED2_RTC_19_MASK 0x0000000000080000UL |
1985 | #define UV2H_EVENT_OCCURRED2_RTC_20_MASK 0x0000000000100000UL | 2666 | #define UVXH_EVENT_OCCURRED2_RTC_20_MASK 0x0000000000100000UL |
1986 | #define UV2H_EVENT_OCCURRED2_RTC_21_MASK 0x0000000000200000UL | 2667 | #define UVXH_EVENT_OCCURRED2_RTC_21_MASK 0x0000000000200000UL |
1987 | #define UV2H_EVENT_OCCURRED2_RTC_22_MASK 0x0000000000400000UL | 2668 | #define UVXH_EVENT_OCCURRED2_RTC_22_MASK 0x0000000000400000UL |
1988 | #define UV2H_EVENT_OCCURRED2_RTC_23_MASK 0x0000000000800000UL | 2669 | #define UVXH_EVENT_OCCURRED2_RTC_23_MASK 0x0000000000800000UL |
1989 | #define UV2H_EVENT_OCCURRED2_RTC_24_MASK 0x0000000001000000UL | 2670 | #define UVXH_EVENT_OCCURRED2_RTC_24_MASK 0x0000000001000000UL |
1990 | #define UV2H_EVENT_OCCURRED2_RTC_25_MASK 0x0000000002000000UL | 2671 | #define UVXH_EVENT_OCCURRED2_RTC_25_MASK 0x0000000002000000UL |
1991 | #define UV2H_EVENT_OCCURRED2_RTC_26_MASK 0x0000000004000000UL | 2672 | #define UVXH_EVENT_OCCURRED2_RTC_26_MASK 0x0000000004000000UL |
1992 | #define UV2H_EVENT_OCCURRED2_RTC_27_MASK 0x0000000008000000UL | 2673 | #define UVXH_EVENT_OCCURRED2_RTC_27_MASK 0x0000000008000000UL |
1993 | #define UV2H_EVENT_OCCURRED2_RTC_28_MASK 0x0000000010000000UL | 2674 | #define UVXH_EVENT_OCCURRED2_RTC_28_MASK 0x0000000010000000UL |
1994 | #define UV2H_EVENT_OCCURRED2_RTC_29_MASK 0x0000000020000000UL | 2675 | #define UVXH_EVENT_OCCURRED2_RTC_29_MASK 0x0000000020000000UL |
1995 | #define UV2H_EVENT_OCCURRED2_RTC_30_MASK 0x0000000040000000UL | 2676 | #define UVXH_EVENT_OCCURRED2_RTC_30_MASK 0x0000000040000000UL |
1996 | #define UV2H_EVENT_OCCURRED2_RTC_31_MASK 0x0000000080000000UL | 2677 | #define UVXH_EVENT_OCCURRED2_RTC_31_MASK 0x0000000080000000UL |
1997 | 2678 | ||
1998 | union uv2h_event_occurred2_u { | 2679 | union uvxh_event_occurred2_u { |
1999 | unsigned long v; | 2680 | unsigned long v; |
2000 | struct uv2h_event_occurred2_s { | 2681 | struct uvxh_event_occurred2_s { |
2001 | unsigned long rtc_0:1; /* RW */ | 2682 | unsigned long rtc_0:1; /* RW */ |
2002 | unsigned long rtc_1:1; /* RW */ | 2683 | unsigned long rtc_1:1; /* RW */ |
2003 | unsigned long rtc_2:1; /* RW */ | 2684 | unsigned long rtc_2:1; /* RW */ |
@@ -2031,29 +2712,46 @@ union uv2h_event_occurred2_u { | |||
2031 | unsigned long rtc_30:1; /* RW */ | 2712 | unsigned long rtc_30:1; /* RW */ |
2032 | unsigned long rtc_31:1; /* RW */ | 2713 | unsigned long rtc_31:1; /* RW */ |
2033 | unsigned long rsvd_32_63:32; | 2714 | unsigned long rsvd_32_63:32; |
2034 | } s1; | 2715 | } sx; |
2035 | }; | 2716 | }; |
2036 | 2717 | ||
2037 | /* ========================================================================= */ | 2718 | /* ========================================================================= */ |
2038 | /* UV2H_EVENT_OCCURRED2_ALIAS */ | 2719 | /* UVXH_EVENT_OCCURRED2_ALIAS */ |
2039 | /* ========================================================================= */ | 2720 | /* ========================================================================= */ |
2040 | #define UV2H_EVENT_OCCURRED2_ALIAS 0x70108UL | 2721 | #define UVXH_EVENT_OCCURRED2_ALIAS 0x70108UL |
2041 | #define UV2H_EVENT_OCCURRED2_ALIAS_32 0xb70 | 2722 | #define UVXH_EVENT_OCCURRED2_ALIAS_32 0xb70 |
2723 | |||
2042 | 2724 | ||
2043 | /* ========================================================================= */ | 2725 | /* ========================================================================= */ |
2044 | /* UV2H_LB_BAU_SB_ACTIVATION_STATUS_2 */ | 2726 | /* UVXH_LB_BAU_SB_ACTIVATION_STATUS_2 */ |
2045 | /* ========================================================================= */ | 2727 | /* ========================================================================= */ |
2046 | #define UV2H_LB_BAU_SB_ACTIVATION_STATUS_2 0x320130UL | 2728 | #define UVXH_LB_BAU_SB_ACTIVATION_STATUS_2 0x320130UL |
2047 | #define UV2H_LB_BAU_SB_ACTIVATION_STATUS_2_32 0x9f0 | 2729 | #define UV2H_LB_BAU_SB_ACTIVATION_STATUS_2 0x320130UL |
2730 | #define UV3H_LB_BAU_SB_ACTIVATION_STATUS_2 0x320130UL | ||
2731 | #define UVXH_LB_BAU_SB_ACTIVATION_STATUS_2_32 0x9f0 | ||
2732 | #define UV2H_LB_BAU_SB_ACTIVATION_STATUS_2_32 0x320130UL | ||
2733 | #define UV3H_LB_BAU_SB_ACTIVATION_STATUS_2_32 0x320130UL | ||
2734 | |||
2735 | #define UVXH_LB_BAU_SB_ACTIVATION_STATUS_2_AUX_ERROR_SHFT 0 | ||
2736 | #define UVXH_LB_BAU_SB_ACTIVATION_STATUS_2_AUX_ERROR_MASK 0xffffffffffffffffUL | ||
2048 | 2737 | ||
2049 | #define UV2H_LB_BAU_SB_ACTIVATION_STATUS_2_AUX_ERROR_SHFT 0 | 2738 | #define UV2H_LB_BAU_SB_ACTIVATION_STATUS_2_AUX_ERROR_SHFT 0 |
2050 | #define UV2H_LB_BAU_SB_ACTIVATION_STATUS_2_AUX_ERROR_MASK 0xffffffffffffffffUL | 2739 | #define UV2H_LB_BAU_SB_ACTIVATION_STATUS_2_AUX_ERROR_MASK 0xffffffffffffffffUL |
2051 | 2740 | ||
2052 | union uv2h_lb_bau_sb_activation_status_2_u { | 2741 | #define UV3H_LB_BAU_SB_ACTIVATION_STATUS_2_AUX_ERROR_SHFT 0 |
2742 | #define UV3H_LB_BAU_SB_ACTIVATION_STATUS_2_AUX_ERROR_MASK 0xffffffffffffffffUL | ||
2743 | |||
2744 | union uvxh_lb_bau_sb_activation_status_2_u { | ||
2053 | unsigned long v; | 2745 | unsigned long v; |
2746 | struct uvxh_lb_bau_sb_activation_status_2_s { | ||
2747 | unsigned long aux_error:64; /* RW */ | ||
2748 | } sx; | ||
2054 | struct uv2h_lb_bau_sb_activation_status_2_s { | 2749 | struct uv2h_lb_bau_sb_activation_status_2_s { |
2055 | unsigned long aux_error:64; /* RW */ | 2750 | unsigned long aux_error:64; /* RW */ |
2056 | } s1; | 2751 | } s2; |
2752 | struct uv3h_lb_bau_sb_activation_status_2_s { | ||
2753 | unsigned long aux_error:64; /* RW */ | ||
2754 | } s3; | ||
2057 | }; | 2755 | }; |
2058 | 2756 | ||
2059 | /* ========================================================================= */ | 2757 | /* ========================================================================= */ |
@@ -2073,5 +2771,87 @@ union uv1h_lb_target_physical_apic_id_mask_u { | |||
2073 | } s1; | 2771 | } s1; |
2074 | }; | 2772 | }; |
2075 | 2773 | ||
2774 | /* ========================================================================= */ | ||
2775 | /* UV3H_RH_GAM_MMIOH_OVERLAY_CONFIG0_MMR */ | ||
2776 | /* ========================================================================= */ | ||
2777 | #define UV3H_RH_GAM_MMIOH_OVERLAY_CONFIG0_MMR 0x1603000UL | ||
2778 | |||
2779 | #define UV3H_RH_GAM_MMIOH_OVERLAY_CONFIG0_MMR_BASE_SHFT 26 | ||
2780 | #define UV3H_RH_GAM_MMIOH_OVERLAY_CONFIG0_MMR_M_IO_SHFT 46 | ||
2781 | #define UV3H_RH_GAM_MMIOH_OVERLAY_CONFIG0_MMR_ENABLE_SHFT 63 | ||
2782 | #define UV3H_RH_GAM_MMIOH_OVERLAY_CONFIG0_MMR_BASE_MASK 0x00003ffffc000000UL | ||
2783 | #define UV3H_RH_GAM_MMIOH_OVERLAY_CONFIG0_MMR_M_IO_MASK 0x000fc00000000000UL | ||
2784 | #define UV3H_RH_GAM_MMIOH_OVERLAY_CONFIG0_MMR_ENABLE_MASK 0x8000000000000000UL | ||
2785 | |||
2786 | union uv3h_rh_gam_mmioh_overlay_config0_mmr_u { | ||
2787 | unsigned long v; | ||
2788 | struct uv3h_rh_gam_mmioh_overlay_config0_mmr_s { | ||
2789 | unsigned long rsvd_0_25:26; | ||
2790 | unsigned long base:20; /* RW */ | ||
2791 | unsigned long m_io:6; /* RW */ | ||
2792 | unsigned long n_io:4; | ||
2793 | unsigned long rsvd_56_62:7; | ||
2794 | unsigned long enable:1; /* RW */ | ||
2795 | } s3; | ||
2796 | }; | ||
2797 | |||
2798 | /* ========================================================================= */ | ||
2799 | /* UV3H_RH_GAM_MMIOH_OVERLAY_CONFIG1_MMR */ | ||
2800 | /* ========================================================================= */ | ||
2801 | #define UV3H_RH_GAM_MMIOH_OVERLAY_CONFIG1_MMR 0x1604000UL | ||
2802 | |||
2803 | #define UV3H_RH_GAM_MMIOH_OVERLAY_CONFIG1_MMR_BASE_SHFT 26 | ||
2804 | #define UV3H_RH_GAM_MMIOH_OVERLAY_CONFIG1_MMR_M_IO_SHFT 46 | ||
2805 | #define UV3H_RH_GAM_MMIOH_OVERLAY_CONFIG1_MMR_ENABLE_SHFT 63 | ||
2806 | #define UV3H_RH_GAM_MMIOH_OVERLAY_CONFIG1_MMR_BASE_MASK 0x00003ffffc000000UL | ||
2807 | #define UV3H_RH_GAM_MMIOH_OVERLAY_CONFIG1_MMR_M_IO_MASK 0x000fc00000000000UL | ||
2808 | #define UV3H_RH_GAM_MMIOH_OVERLAY_CONFIG1_MMR_ENABLE_MASK 0x8000000000000000UL | ||
2809 | |||
2810 | union uv3h_rh_gam_mmioh_overlay_config1_mmr_u { | ||
2811 | unsigned long v; | ||
2812 | struct uv3h_rh_gam_mmioh_overlay_config1_mmr_s { | ||
2813 | unsigned long rsvd_0_25:26; | ||
2814 | unsigned long base:20; /* RW */ | ||
2815 | unsigned long m_io:6; /* RW */ | ||
2816 | unsigned long n_io:4; | ||
2817 | unsigned long rsvd_56_62:7; | ||
2818 | unsigned long enable:1; /* RW */ | ||
2819 | } s3; | ||
2820 | }; | ||
2821 | |||
2822 | /* ========================================================================= */ | ||
2823 | /* UV3H_RH_GAM_MMIOH_REDIRECT_CONFIG0_MMR */ | ||
2824 | /* ========================================================================= */ | ||
2825 | #define UV3H_RH_GAM_MMIOH_REDIRECT_CONFIG0_MMR 0x1603800UL | ||
2826 | #define UV3H_RH_GAM_MMIOH_REDIRECT_CONFIG0_MMR_DEPTH 128 | ||
2827 | |||
2828 | #define UV3H_RH_GAM_MMIOH_REDIRECT_CONFIG0_MMR_NASID_SHFT 0 | ||
2829 | #define UV3H_RH_GAM_MMIOH_REDIRECT_CONFIG0_MMR_NASID_MASK 0x0000000000007fffUL | ||
2830 | |||
2831 | union uv3h_rh_gam_mmioh_redirect_config0_mmr_u { | ||
2832 | unsigned long v; | ||
2833 | struct uv3h_rh_gam_mmioh_redirect_config0_mmr_s { | ||
2834 | unsigned long nasid:15; /* RW */ | ||
2835 | unsigned long rsvd_15_63:49; | ||
2836 | } s3; | ||
2837 | }; | ||
2838 | |||
2839 | /* ========================================================================= */ | ||
2840 | /* UV3H_RH_GAM_MMIOH_REDIRECT_CONFIG1_MMR */ | ||
2841 | /* ========================================================================= */ | ||
2842 | #define UV3H_RH_GAM_MMIOH_REDIRECT_CONFIG1_MMR 0x1604800UL | ||
2843 | #define UV3H_RH_GAM_MMIOH_REDIRECT_CONFIG1_MMR_DEPTH 128 | ||
2844 | |||
2845 | #define UV3H_RH_GAM_MMIOH_REDIRECT_CONFIG1_MMR_NASID_SHFT 0 | ||
2846 | #define UV3H_RH_GAM_MMIOH_REDIRECT_CONFIG1_MMR_NASID_MASK 0x0000000000007fffUL | ||
2847 | |||
2848 | union uv3h_rh_gam_mmioh_redirect_config1_mmr_u { | ||
2849 | unsigned long v; | ||
2850 | struct uv3h_rh_gam_mmioh_redirect_config1_mmr_s { | ||
2851 | unsigned long nasid:15; /* RW */ | ||
2852 | unsigned long rsvd_15_63:49; | ||
2853 | } s3; | ||
2854 | }; | ||
2855 | |||
2076 | 2856 | ||
2077 | #endif /* _ASM_X86_UV_UV_MMRS_H */ | 2857 | #endif /* _ASM_X86_UV_UV_MMRS_H */ |
diff --git a/arch/x86/include/asm/x86_init.h b/arch/x86/include/asm/x86_init.h index 57693498519c..7669941cc9d2 100644 --- a/arch/x86/include/asm/x86_init.h +++ b/arch/x86/include/asm/x86_init.h | |||
@@ -181,19 +181,38 @@ struct x86_platform_ops { | |||
181 | }; | 181 | }; |
182 | 182 | ||
183 | struct pci_dev; | 183 | struct pci_dev; |
184 | struct msi_msg; | ||
184 | 185 | ||
185 | struct x86_msi_ops { | 186 | struct x86_msi_ops { |
186 | int (*setup_msi_irqs)(struct pci_dev *dev, int nvec, int type); | 187 | int (*setup_msi_irqs)(struct pci_dev *dev, int nvec, int type); |
188 | void (*compose_msi_msg)(struct pci_dev *dev, unsigned int irq, | ||
189 | unsigned int dest, struct msi_msg *msg, | ||
190 | u8 hpet_id); | ||
187 | void (*teardown_msi_irq)(unsigned int irq); | 191 | void (*teardown_msi_irq)(unsigned int irq); |
188 | void (*teardown_msi_irqs)(struct pci_dev *dev); | 192 | void (*teardown_msi_irqs)(struct pci_dev *dev); |
189 | void (*restore_msi_irqs)(struct pci_dev *dev, int irq); | 193 | void (*restore_msi_irqs)(struct pci_dev *dev, int irq); |
194 | int (*setup_hpet_msi)(unsigned int irq, unsigned int id); | ||
190 | }; | 195 | }; |
191 | 196 | ||
197 | struct IO_APIC_route_entry; | ||
198 | struct io_apic_irq_attr; | ||
199 | struct irq_data; | ||
200 | struct cpumask; | ||
201 | |||
192 | struct x86_io_apic_ops { | 202 | struct x86_io_apic_ops { |
193 | void (*init) (void); | 203 | void (*init) (void); |
194 | unsigned int (*read) (unsigned int apic, unsigned int reg); | 204 | unsigned int (*read) (unsigned int apic, unsigned int reg); |
195 | void (*write) (unsigned int apic, unsigned int reg, unsigned int value); | 205 | void (*write) (unsigned int apic, unsigned int reg, unsigned int value); |
196 | void (*modify)(unsigned int apic, unsigned int reg, unsigned int value); | 206 | void (*modify) (unsigned int apic, unsigned int reg, unsigned int value); |
207 | void (*disable)(void); | ||
208 | void (*print_entries)(unsigned int apic, unsigned int nr_entries); | ||
209 | int (*set_affinity)(struct irq_data *data, | ||
210 | const struct cpumask *mask, | ||
211 | bool force); | ||
212 | int (*setup_entry)(int irq, struct IO_APIC_route_entry *entry, | ||
213 | unsigned int destination, int vector, | ||
214 | struct io_apic_irq_attr *attr); | ||
215 | void (*eoi_ioapic_pin)(int apic, int pin, int vector); | ||
197 | }; | 216 | }; |
198 | 217 | ||
199 | extern struct x86_init_ops x86_init; | 218 | extern struct x86_init_ops x86_init; |
diff --git a/arch/x86/include/asm/xor.h b/arch/x86/include/asm/xor.h index f8fde90bc45e..d8829751b3f8 100644 --- a/arch/x86/include/asm/xor.h +++ b/arch/x86/include/asm/xor.h | |||
@@ -1,10 +1,499 @@ | |||
1 | #ifdef CONFIG_KMEMCHECK | 1 | #ifdef CONFIG_KMEMCHECK |
2 | /* kmemcheck doesn't handle MMX/SSE/SSE2 instructions */ | 2 | /* kmemcheck doesn't handle MMX/SSE/SSE2 instructions */ |
3 | # include <asm-generic/xor.h> | 3 | # include <asm-generic/xor.h> |
4 | #elif !defined(_ASM_X86_XOR_H) | ||
5 | #define _ASM_X86_XOR_H | ||
6 | |||
7 | /* | ||
8 | * Optimized RAID-5 checksumming functions for SSE. | ||
9 | * | ||
10 | * This program is free software; you can redistribute it and/or modify | ||
11 | * it under the terms of the GNU General Public License as published by | ||
12 | * the Free Software Foundation; either version 2, or (at your option) | ||
13 | * any later version. | ||
14 | * | ||
15 | * You should have received a copy of the GNU General Public License | ||
16 | * (for example /usr/src/linux/COPYING); if not, write to the Free | ||
17 | * Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. | ||
18 | */ | ||
19 | |||
20 | /* | ||
21 | * Cache avoiding checksumming functions utilizing KNI instructions | ||
22 | * Copyright (C) 1999 Zach Brown (with obvious credit due Ingo) | ||
23 | */ | ||
24 | |||
25 | /* | ||
26 | * Based on | ||
27 | * High-speed RAID5 checksumming functions utilizing SSE instructions. | ||
28 | * Copyright (C) 1998 Ingo Molnar. | ||
29 | */ | ||
30 | |||
31 | /* | ||
32 | * x86-64 changes / gcc fixes from Andi Kleen. | ||
33 | * Copyright 2002 Andi Kleen, SuSE Labs. | ||
34 | * | ||
35 | * This hasn't been optimized for the hammer yet, but there are likely | ||
36 | * no advantages to be gotten from x86-64 here anyways. | ||
37 | */ | ||
38 | |||
39 | #include <asm/i387.h> | ||
40 | |||
41 | #ifdef CONFIG_X86_32 | ||
42 | /* reduce register pressure */ | ||
43 | # define XOR_CONSTANT_CONSTRAINT "i" | ||
4 | #else | 44 | #else |
45 | # define XOR_CONSTANT_CONSTRAINT "re" | ||
46 | #endif | ||
47 | |||
48 | #define OFFS(x) "16*("#x")" | ||
49 | #define PF_OFFS(x) "256+16*("#x")" | ||
50 | #define PF0(x) " prefetchnta "PF_OFFS(x)"(%[p1]) ;\n" | ||
51 | #define LD(x, y) " movaps "OFFS(x)"(%[p1]), %%xmm"#y" ;\n" | ||
52 | #define ST(x, y) " movaps %%xmm"#y", "OFFS(x)"(%[p1]) ;\n" | ||
53 | #define PF1(x) " prefetchnta "PF_OFFS(x)"(%[p2]) ;\n" | ||
54 | #define PF2(x) " prefetchnta "PF_OFFS(x)"(%[p3]) ;\n" | ||
55 | #define PF3(x) " prefetchnta "PF_OFFS(x)"(%[p4]) ;\n" | ||
56 | #define PF4(x) " prefetchnta "PF_OFFS(x)"(%[p5]) ;\n" | ||
57 | #define XO1(x, y) " xorps "OFFS(x)"(%[p2]), %%xmm"#y" ;\n" | ||
58 | #define XO2(x, y) " xorps "OFFS(x)"(%[p3]), %%xmm"#y" ;\n" | ||
59 | #define XO3(x, y) " xorps "OFFS(x)"(%[p4]), %%xmm"#y" ;\n" | ||
60 | #define XO4(x, y) " xorps "OFFS(x)"(%[p5]), %%xmm"#y" ;\n" | ||
61 | #define NOP(x) | ||
62 | |||
63 | #define BLK64(pf, op, i) \ | ||
64 | pf(i) \ | ||
65 | op(i, 0) \ | ||
66 | op(i + 1, 1) \ | ||
67 | op(i + 2, 2) \ | ||
68 | op(i + 3, 3) | ||
69 | |||
70 | static void | ||
71 | xor_sse_2(unsigned long bytes, unsigned long *p1, unsigned long *p2) | ||
72 | { | ||
73 | unsigned long lines = bytes >> 8; | ||
74 | |||
75 | kernel_fpu_begin(); | ||
76 | |||
77 | asm volatile( | ||
78 | #undef BLOCK | ||
79 | #define BLOCK(i) \ | ||
80 | LD(i, 0) \ | ||
81 | LD(i + 1, 1) \ | ||
82 | PF1(i) \ | ||
83 | PF1(i + 2) \ | ||
84 | LD(i + 2, 2) \ | ||
85 | LD(i + 3, 3) \ | ||
86 | PF0(i + 4) \ | ||
87 | PF0(i + 6) \ | ||
88 | XO1(i, 0) \ | ||
89 | XO1(i + 1, 1) \ | ||
90 | XO1(i + 2, 2) \ | ||
91 | XO1(i + 3, 3) \ | ||
92 | ST(i, 0) \ | ||
93 | ST(i + 1, 1) \ | ||
94 | ST(i + 2, 2) \ | ||
95 | ST(i + 3, 3) \ | ||
96 | |||
97 | |||
98 | PF0(0) | ||
99 | PF0(2) | ||
100 | |||
101 | " .align 32 ;\n" | ||
102 | " 1: ;\n" | ||
103 | |||
104 | BLOCK(0) | ||
105 | BLOCK(4) | ||
106 | BLOCK(8) | ||
107 | BLOCK(12) | ||
108 | |||
109 | " add %[inc], %[p1] ;\n" | ||
110 | " add %[inc], %[p2] ;\n" | ||
111 | " dec %[cnt] ;\n" | ||
112 | " jnz 1b ;\n" | ||
113 | : [cnt] "+r" (lines), | ||
114 | [p1] "+r" (p1), [p2] "+r" (p2) | ||
115 | : [inc] XOR_CONSTANT_CONSTRAINT (256UL) | ||
116 | : "memory"); | ||
117 | |||
118 | kernel_fpu_end(); | ||
119 | } | ||
120 | |||
121 | static void | ||
122 | xor_sse_2_pf64(unsigned long bytes, unsigned long *p1, unsigned long *p2) | ||
123 | { | ||
124 | unsigned long lines = bytes >> 8; | ||
125 | |||
126 | kernel_fpu_begin(); | ||
127 | |||
128 | asm volatile( | ||
129 | #undef BLOCK | ||
130 | #define BLOCK(i) \ | ||
131 | BLK64(PF0, LD, i) \ | ||
132 | BLK64(PF1, XO1, i) \ | ||
133 | BLK64(NOP, ST, i) \ | ||
134 | |||
135 | " .align 32 ;\n" | ||
136 | " 1: ;\n" | ||
137 | |||
138 | BLOCK(0) | ||
139 | BLOCK(4) | ||
140 | BLOCK(8) | ||
141 | BLOCK(12) | ||
142 | |||
143 | " add %[inc], %[p1] ;\n" | ||
144 | " add %[inc], %[p2] ;\n" | ||
145 | " dec %[cnt] ;\n" | ||
146 | " jnz 1b ;\n" | ||
147 | : [cnt] "+r" (lines), | ||
148 | [p1] "+r" (p1), [p2] "+r" (p2) | ||
149 | : [inc] XOR_CONSTANT_CONSTRAINT (256UL) | ||
150 | : "memory"); | ||
151 | |||
152 | kernel_fpu_end(); | ||
153 | } | ||
154 | |||
155 | static void | ||
156 | xor_sse_3(unsigned long bytes, unsigned long *p1, unsigned long *p2, | ||
157 | unsigned long *p3) | ||
158 | { | ||
159 | unsigned long lines = bytes >> 8; | ||
160 | |||
161 | kernel_fpu_begin(); | ||
162 | |||
163 | asm volatile( | ||
164 | #undef BLOCK | ||
165 | #define BLOCK(i) \ | ||
166 | PF1(i) \ | ||
167 | PF1(i + 2) \ | ||
168 | LD(i, 0) \ | ||
169 | LD(i + 1, 1) \ | ||
170 | LD(i + 2, 2) \ | ||
171 | LD(i + 3, 3) \ | ||
172 | PF2(i) \ | ||
173 | PF2(i + 2) \ | ||
174 | PF0(i + 4) \ | ||
175 | PF0(i + 6) \ | ||
176 | XO1(i, 0) \ | ||
177 | XO1(i + 1, 1) \ | ||
178 | XO1(i + 2, 2) \ | ||
179 | XO1(i + 3, 3) \ | ||
180 | XO2(i, 0) \ | ||
181 | XO2(i + 1, 1) \ | ||
182 | XO2(i + 2, 2) \ | ||
183 | XO2(i + 3, 3) \ | ||
184 | ST(i, 0) \ | ||
185 | ST(i + 1, 1) \ | ||
186 | ST(i + 2, 2) \ | ||
187 | ST(i + 3, 3) \ | ||
188 | |||
189 | |||
190 | PF0(0) | ||
191 | PF0(2) | ||
192 | |||
193 | " .align 32 ;\n" | ||
194 | " 1: ;\n" | ||
195 | |||
196 | BLOCK(0) | ||
197 | BLOCK(4) | ||
198 | BLOCK(8) | ||
199 | BLOCK(12) | ||
200 | |||
201 | " add %[inc], %[p1] ;\n" | ||
202 | " add %[inc], %[p2] ;\n" | ||
203 | " add %[inc], %[p3] ;\n" | ||
204 | " dec %[cnt] ;\n" | ||
205 | " jnz 1b ;\n" | ||
206 | : [cnt] "+r" (lines), | ||
207 | [p1] "+r" (p1), [p2] "+r" (p2), [p3] "+r" (p3) | ||
208 | : [inc] XOR_CONSTANT_CONSTRAINT (256UL) | ||
209 | : "memory"); | ||
210 | |||
211 | kernel_fpu_end(); | ||
212 | } | ||
213 | |||
214 | static void | ||
215 | xor_sse_3_pf64(unsigned long bytes, unsigned long *p1, unsigned long *p2, | ||
216 | unsigned long *p3) | ||
217 | { | ||
218 | unsigned long lines = bytes >> 8; | ||
219 | |||
220 | kernel_fpu_begin(); | ||
221 | |||
222 | asm volatile( | ||
223 | #undef BLOCK | ||
224 | #define BLOCK(i) \ | ||
225 | BLK64(PF0, LD, i) \ | ||
226 | BLK64(PF1, XO1, i) \ | ||
227 | BLK64(PF2, XO2, i) \ | ||
228 | BLK64(NOP, ST, i) \ | ||
229 | |||
230 | " .align 32 ;\n" | ||
231 | " 1: ;\n" | ||
232 | |||
233 | BLOCK(0) | ||
234 | BLOCK(4) | ||
235 | BLOCK(8) | ||
236 | BLOCK(12) | ||
237 | |||
238 | " add %[inc], %[p1] ;\n" | ||
239 | " add %[inc], %[p2] ;\n" | ||
240 | " add %[inc], %[p3] ;\n" | ||
241 | " dec %[cnt] ;\n" | ||
242 | " jnz 1b ;\n" | ||
243 | : [cnt] "+r" (lines), | ||
244 | [p1] "+r" (p1), [p2] "+r" (p2), [p3] "+r" (p3) | ||
245 | : [inc] XOR_CONSTANT_CONSTRAINT (256UL) | ||
246 | : "memory"); | ||
247 | |||
248 | kernel_fpu_end(); | ||
249 | } | ||
250 | |||
251 | static void | ||
252 | xor_sse_4(unsigned long bytes, unsigned long *p1, unsigned long *p2, | ||
253 | unsigned long *p3, unsigned long *p4) | ||
254 | { | ||
255 | unsigned long lines = bytes >> 8; | ||
256 | |||
257 | kernel_fpu_begin(); | ||
258 | |||
259 | asm volatile( | ||
260 | #undef BLOCK | ||
261 | #define BLOCK(i) \ | ||
262 | PF1(i) \ | ||
263 | PF1(i + 2) \ | ||
264 | LD(i, 0) \ | ||
265 | LD(i + 1, 1) \ | ||
266 | LD(i + 2, 2) \ | ||
267 | LD(i + 3, 3) \ | ||
268 | PF2(i) \ | ||
269 | PF2(i + 2) \ | ||
270 | XO1(i, 0) \ | ||
271 | XO1(i + 1, 1) \ | ||
272 | XO1(i + 2, 2) \ | ||
273 | XO1(i + 3, 3) \ | ||
274 | PF3(i) \ | ||
275 | PF3(i + 2) \ | ||
276 | PF0(i + 4) \ | ||
277 | PF0(i + 6) \ | ||
278 | XO2(i, 0) \ | ||
279 | XO2(i + 1, 1) \ | ||
280 | XO2(i + 2, 2) \ | ||
281 | XO2(i + 3, 3) \ | ||
282 | XO3(i, 0) \ | ||
283 | XO3(i + 1, 1) \ | ||
284 | XO3(i + 2, 2) \ | ||
285 | XO3(i + 3, 3) \ | ||
286 | ST(i, 0) \ | ||
287 | ST(i + 1, 1) \ | ||
288 | ST(i + 2, 2) \ | ||
289 | ST(i + 3, 3) \ | ||
290 | |||
291 | |||
292 | PF0(0) | ||
293 | PF0(2) | ||
294 | |||
295 | " .align 32 ;\n" | ||
296 | " 1: ;\n" | ||
297 | |||
298 | BLOCK(0) | ||
299 | BLOCK(4) | ||
300 | BLOCK(8) | ||
301 | BLOCK(12) | ||
302 | |||
303 | " add %[inc], %[p1] ;\n" | ||
304 | " add %[inc], %[p2] ;\n" | ||
305 | " add %[inc], %[p3] ;\n" | ||
306 | " add %[inc], %[p4] ;\n" | ||
307 | " dec %[cnt] ;\n" | ||
308 | " jnz 1b ;\n" | ||
309 | : [cnt] "+r" (lines), [p1] "+r" (p1), | ||
310 | [p2] "+r" (p2), [p3] "+r" (p3), [p4] "+r" (p4) | ||
311 | : [inc] XOR_CONSTANT_CONSTRAINT (256UL) | ||
312 | : "memory"); | ||
313 | |||
314 | kernel_fpu_end(); | ||
315 | } | ||
316 | |||
317 | static void | ||
318 | xor_sse_4_pf64(unsigned long bytes, unsigned long *p1, unsigned long *p2, | ||
319 | unsigned long *p3, unsigned long *p4) | ||
320 | { | ||
321 | unsigned long lines = bytes >> 8; | ||
322 | |||
323 | kernel_fpu_begin(); | ||
324 | |||
325 | asm volatile( | ||
326 | #undef BLOCK | ||
327 | #define BLOCK(i) \ | ||
328 | BLK64(PF0, LD, i) \ | ||
329 | BLK64(PF1, XO1, i) \ | ||
330 | BLK64(PF2, XO2, i) \ | ||
331 | BLK64(PF3, XO3, i) \ | ||
332 | BLK64(NOP, ST, i) \ | ||
333 | |||
334 | " .align 32 ;\n" | ||
335 | " 1: ;\n" | ||
336 | |||
337 | BLOCK(0) | ||
338 | BLOCK(4) | ||
339 | BLOCK(8) | ||
340 | BLOCK(12) | ||
341 | |||
342 | " add %[inc], %[p1] ;\n" | ||
343 | " add %[inc], %[p2] ;\n" | ||
344 | " add %[inc], %[p3] ;\n" | ||
345 | " add %[inc], %[p4] ;\n" | ||
346 | " dec %[cnt] ;\n" | ||
347 | " jnz 1b ;\n" | ||
348 | : [cnt] "+r" (lines), [p1] "+r" (p1), | ||
349 | [p2] "+r" (p2), [p3] "+r" (p3), [p4] "+r" (p4) | ||
350 | : [inc] XOR_CONSTANT_CONSTRAINT (256UL) | ||
351 | : "memory"); | ||
352 | |||
353 | kernel_fpu_end(); | ||
354 | } | ||
355 | |||
356 | static void | ||
357 | xor_sse_5(unsigned long bytes, unsigned long *p1, unsigned long *p2, | ||
358 | unsigned long *p3, unsigned long *p4, unsigned long *p5) | ||
359 | { | ||
360 | unsigned long lines = bytes >> 8; | ||
361 | |||
362 | kernel_fpu_begin(); | ||
363 | |||
364 | asm volatile( | ||
365 | #undef BLOCK | ||
366 | #define BLOCK(i) \ | ||
367 | PF1(i) \ | ||
368 | PF1(i + 2) \ | ||
369 | LD(i, 0) \ | ||
370 | LD(i + 1, 1) \ | ||
371 | LD(i + 2, 2) \ | ||
372 | LD(i + 3, 3) \ | ||
373 | PF2(i) \ | ||
374 | PF2(i + 2) \ | ||
375 | XO1(i, 0) \ | ||
376 | XO1(i + 1, 1) \ | ||
377 | XO1(i + 2, 2) \ | ||
378 | XO1(i + 3, 3) \ | ||
379 | PF3(i) \ | ||
380 | PF3(i + 2) \ | ||
381 | XO2(i, 0) \ | ||
382 | XO2(i + 1, 1) \ | ||
383 | XO2(i + 2, 2) \ | ||
384 | XO2(i + 3, 3) \ | ||
385 | PF4(i) \ | ||
386 | PF4(i + 2) \ | ||
387 | PF0(i + 4) \ | ||
388 | PF0(i + 6) \ | ||
389 | XO3(i, 0) \ | ||
390 | XO3(i + 1, 1) \ | ||
391 | XO3(i + 2, 2) \ | ||
392 | XO3(i + 3, 3) \ | ||
393 | XO4(i, 0) \ | ||
394 | XO4(i + 1, 1) \ | ||
395 | XO4(i + 2, 2) \ | ||
396 | XO4(i + 3, 3) \ | ||
397 | ST(i, 0) \ | ||
398 | ST(i + 1, 1) \ | ||
399 | ST(i + 2, 2) \ | ||
400 | ST(i + 3, 3) \ | ||
401 | |||
402 | |||
403 | PF0(0) | ||
404 | PF0(2) | ||
405 | |||
406 | " .align 32 ;\n" | ||
407 | " 1: ;\n" | ||
408 | |||
409 | BLOCK(0) | ||
410 | BLOCK(4) | ||
411 | BLOCK(8) | ||
412 | BLOCK(12) | ||
413 | |||
414 | " add %[inc], %[p1] ;\n" | ||
415 | " add %[inc], %[p2] ;\n" | ||
416 | " add %[inc], %[p3] ;\n" | ||
417 | " add %[inc], %[p4] ;\n" | ||
418 | " add %[inc], %[p5] ;\n" | ||
419 | " dec %[cnt] ;\n" | ||
420 | " jnz 1b ;\n" | ||
421 | : [cnt] "+r" (lines), [p1] "+r" (p1), [p2] "+r" (p2), | ||
422 | [p3] "+r" (p3), [p4] "+r" (p4), [p5] "+r" (p5) | ||
423 | : [inc] XOR_CONSTANT_CONSTRAINT (256UL) | ||
424 | : "memory"); | ||
425 | |||
426 | kernel_fpu_end(); | ||
427 | } | ||
428 | |||
429 | static void | ||
430 | xor_sse_5_pf64(unsigned long bytes, unsigned long *p1, unsigned long *p2, | ||
431 | unsigned long *p3, unsigned long *p4, unsigned long *p5) | ||
432 | { | ||
433 | unsigned long lines = bytes >> 8; | ||
434 | |||
435 | kernel_fpu_begin(); | ||
436 | |||
437 | asm volatile( | ||
438 | #undef BLOCK | ||
439 | #define BLOCK(i) \ | ||
440 | BLK64(PF0, LD, i) \ | ||
441 | BLK64(PF1, XO1, i) \ | ||
442 | BLK64(PF2, XO2, i) \ | ||
443 | BLK64(PF3, XO3, i) \ | ||
444 | BLK64(PF4, XO4, i) \ | ||
445 | BLK64(NOP, ST, i) \ | ||
446 | |||
447 | " .align 32 ;\n" | ||
448 | " 1: ;\n" | ||
449 | |||
450 | BLOCK(0) | ||
451 | BLOCK(4) | ||
452 | BLOCK(8) | ||
453 | BLOCK(12) | ||
454 | |||
455 | " add %[inc], %[p1] ;\n" | ||
456 | " add %[inc], %[p2] ;\n" | ||
457 | " add %[inc], %[p3] ;\n" | ||
458 | " add %[inc], %[p4] ;\n" | ||
459 | " add %[inc], %[p5] ;\n" | ||
460 | " dec %[cnt] ;\n" | ||
461 | " jnz 1b ;\n" | ||
462 | : [cnt] "+r" (lines), [p1] "+r" (p1), [p2] "+r" (p2), | ||
463 | [p3] "+r" (p3), [p4] "+r" (p4), [p5] "+r" (p5) | ||
464 | : [inc] XOR_CONSTANT_CONSTRAINT (256UL) | ||
465 | : "memory"); | ||
466 | |||
467 | kernel_fpu_end(); | ||
468 | } | ||
469 | |||
470 | static struct xor_block_template xor_block_sse_pf64 = { | ||
471 | .name = "prefetch64-sse", | ||
472 | .do_2 = xor_sse_2_pf64, | ||
473 | .do_3 = xor_sse_3_pf64, | ||
474 | .do_4 = xor_sse_4_pf64, | ||
475 | .do_5 = xor_sse_5_pf64, | ||
476 | }; | ||
477 | |||
478 | #undef LD | ||
479 | #undef XO1 | ||
480 | #undef XO2 | ||
481 | #undef XO3 | ||
482 | #undef XO4 | ||
483 | #undef ST | ||
484 | #undef NOP | ||
485 | #undef BLK64 | ||
486 | #undef BLOCK | ||
487 | |||
488 | #undef XOR_CONSTANT_CONSTRAINT | ||
489 | |||
5 | #ifdef CONFIG_X86_32 | 490 | #ifdef CONFIG_X86_32 |
6 | # include <asm/xor_32.h> | 491 | # include <asm/xor_32.h> |
7 | #else | 492 | #else |
8 | # include <asm/xor_64.h> | 493 | # include <asm/xor_64.h> |
9 | #endif | 494 | #endif |
10 | #endif | 495 | |
496 | #define XOR_SELECT_TEMPLATE(FASTEST) \ | ||
497 | AVX_SELECT(FASTEST) | ||
498 | |||
499 | #endif /* _ASM_X86_XOR_H */ | ||
diff --git a/arch/x86/include/asm/xor_32.h b/arch/x86/include/asm/xor_32.h index f79cb7ec0e06..ce05722e3c68 100644 --- a/arch/x86/include/asm/xor_32.h +++ b/arch/x86/include/asm/xor_32.h | |||
@@ -2,7 +2,7 @@ | |||
2 | #define _ASM_X86_XOR_32_H | 2 | #define _ASM_X86_XOR_32_H |
3 | 3 | ||
4 | /* | 4 | /* |
5 | * Optimized RAID-5 checksumming functions for MMX and SSE. | 5 | * Optimized RAID-5 checksumming functions for MMX. |
6 | * | 6 | * |
7 | * This program is free software; you can redistribute it and/or modify | 7 | * This program is free software; you can redistribute it and/or modify |
8 | * it under the terms of the GNU General Public License as published by | 8 | * it under the terms of the GNU General Public License as published by |
@@ -529,290 +529,6 @@ static struct xor_block_template xor_block_p5_mmx = { | |||
529 | .do_5 = xor_p5_mmx_5, | 529 | .do_5 = xor_p5_mmx_5, |
530 | }; | 530 | }; |
531 | 531 | ||
532 | /* | ||
533 | * Cache avoiding checksumming functions utilizing KNI instructions | ||
534 | * Copyright (C) 1999 Zach Brown (with obvious credit due Ingo) | ||
535 | */ | ||
536 | |||
537 | #define OFFS(x) "16*("#x")" | ||
538 | #define PF_OFFS(x) "256+16*("#x")" | ||
539 | #define PF0(x) " prefetchnta "PF_OFFS(x)"(%1) ;\n" | ||
540 | #define LD(x, y) " movaps "OFFS(x)"(%1), %%xmm"#y" ;\n" | ||
541 | #define ST(x, y) " movaps %%xmm"#y", "OFFS(x)"(%1) ;\n" | ||
542 | #define PF1(x) " prefetchnta "PF_OFFS(x)"(%2) ;\n" | ||
543 | #define PF2(x) " prefetchnta "PF_OFFS(x)"(%3) ;\n" | ||
544 | #define PF3(x) " prefetchnta "PF_OFFS(x)"(%4) ;\n" | ||
545 | #define PF4(x) " prefetchnta "PF_OFFS(x)"(%5) ;\n" | ||
546 | #define PF5(x) " prefetchnta "PF_OFFS(x)"(%6) ;\n" | ||
547 | #define XO1(x, y) " xorps "OFFS(x)"(%2), %%xmm"#y" ;\n" | ||
548 | #define XO2(x, y) " xorps "OFFS(x)"(%3), %%xmm"#y" ;\n" | ||
549 | #define XO3(x, y) " xorps "OFFS(x)"(%4), %%xmm"#y" ;\n" | ||
550 | #define XO4(x, y) " xorps "OFFS(x)"(%5), %%xmm"#y" ;\n" | ||
551 | #define XO5(x, y) " xorps "OFFS(x)"(%6), %%xmm"#y" ;\n" | ||
552 | |||
553 | |||
554 | static void | ||
555 | xor_sse_2(unsigned long bytes, unsigned long *p1, unsigned long *p2) | ||
556 | { | ||
557 | unsigned long lines = bytes >> 8; | ||
558 | |||
559 | kernel_fpu_begin(); | ||
560 | |||
561 | asm volatile( | ||
562 | #undef BLOCK | ||
563 | #define BLOCK(i) \ | ||
564 | LD(i, 0) \ | ||
565 | LD(i + 1, 1) \ | ||
566 | PF1(i) \ | ||
567 | PF1(i + 2) \ | ||
568 | LD(i + 2, 2) \ | ||
569 | LD(i + 3, 3) \ | ||
570 | PF0(i + 4) \ | ||
571 | PF0(i + 6) \ | ||
572 | XO1(i, 0) \ | ||
573 | XO1(i + 1, 1) \ | ||
574 | XO1(i + 2, 2) \ | ||
575 | XO1(i + 3, 3) \ | ||
576 | ST(i, 0) \ | ||
577 | ST(i + 1, 1) \ | ||
578 | ST(i + 2, 2) \ | ||
579 | ST(i + 3, 3) \ | ||
580 | |||
581 | |||
582 | PF0(0) | ||
583 | PF0(2) | ||
584 | |||
585 | " .align 32 ;\n" | ||
586 | " 1: ;\n" | ||
587 | |||
588 | BLOCK(0) | ||
589 | BLOCK(4) | ||
590 | BLOCK(8) | ||
591 | BLOCK(12) | ||
592 | |||
593 | " addl $256, %1 ;\n" | ||
594 | " addl $256, %2 ;\n" | ||
595 | " decl %0 ;\n" | ||
596 | " jnz 1b ;\n" | ||
597 | : "+r" (lines), | ||
598 | "+r" (p1), "+r" (p2) | ||
599 | : | ||
600 | : "memory"); | ||
601 | |||
602 | kernel_fpu_end(); | ||
603 | } | ||
604 | |||
605 | static void | ||
606 | xor_sse_3(unsigned long bytes, unsigned long *p1, unsigned long *p2, | ||
607 | unsigned long *p3) | ||
608 | { | ||
609 | unsigned long lines = bytes >> 8; | ||
610 | |||
611 | kernel_fpu_begin(); | ||
612 | |||
613 | asm volatile( | ||
614 | #undef BLOCK | ||
615 | #define BLOCK(i) \ | ||
616 | PF1(i) \ | ||
617 | PF1(i + 2) \ | ||
618 | LD(i,0) \ | ||
619 | LD(i + 1, 1) \ | ||
620 | LD(i + 2, 2) \ | ||
621 | LD(i + 3, 3) \ | ||
622 | PF2(i) \ | ||
623 | PF2(i + 2) \ | ||
624 | PF0(i + 4) \ | ||
625 | PF0(i + 6) \ | ||
626 | XO1(i,0) \ | ||
627 | XO1(i + 1, 1) \ | ||
628 | XO1(i + 2, 2) \ | ||
629 | XO1(i + 3, 3) \ | ||
630 | XO2(i,0) \ | ||
631 | XO2(i + 1, 1) \ | ||
632 | XO2(i + 2, 2) \ | ||
633 | XO2(i + 3, 3) \ | ||
634 | ST(i,0) \ | ||
635 | ST(i + 1, 1) \ | ||
636 | ST(i + 2, 2) \ | ||
637 | ST(i + 3, 3) \ | ||
638 | |||
639 | |||
640 | PF0(0) | ||
641 | PF0(2) | ||
642 | |||
643 | " .align 32 ;\n" | ||
644 | " 1: ;\n" | ||
645 | |||
646 | BLOCK(0) | ||
647 | BLOCK(4) | ||
648 | BLOCK(8) | ||
649 | BLOCK(12) | ||
650 | |||
651 | " addl $256, %1 ;\n" | ||
652 | " addl $256, %2 ;\n" | ||
653 | " addl $256, %3 ;\n" | ||
654 | " decl %0 ;\n" | ||
655 | " jnz 1b ;\n" | ||
656 | : "+r" (lines), | ||
657 | "+r" (p1), "+r"(p2), "+r"(p3) | ||
658 | : | ||
659 | : "memory" ); | ||
660 | |||
661 | kernel_fpu_end(); | ||
662 | } | ||
663 | |||
664 | static void | ||
665 | xor_sse_4(unsigned long bytes, unsigned long *p1, unsigned long *p2, | ||
666 | unsigned long *p3, unsigned long *p4) | ||
667 | { | ||
668 | unsigned long lines = bytes >> 8; | ||
669 | |||
670 | kernel_fpu_begin(); | ||
671 | |||
672 | asm volatile( | ||
673 | #undef BLOCK | ||
674 | #define BLOCK(i) \ | ||
675 | PF1(i) \ | ||
676 | PF1(i + 2) \ | ||
677 | LD(i,0) \ | ||
678 | LD(i + 1, 1) \ | ||
679 | LD(i + 2, 2) \ | ||
680 | LD(i + 3, 3) \ | ||
681 | PF2(i) \ | ||
682 | PF2(i + 2) \ | ||
683 | XO1(i,0) \ | ||
684 | XO1(i + 1, 1) \ | ||
685 | XO1(i + 2, 2) \ | ||
686 | XO1(i + 3, 3) \ | ||
687 | PF3(i) \ | ||
688 | PF3(i + 2) \ | ||
689 | PF0(i + 4) \ | ||
690 | PF0(i + 6) \ | ||
691 | XO2(i,0) \ | ||
692 | XO2(i + 1, 1) \ | ||
693 | XO2(i + 2, 2) \ | ||
694 | XO2(i + 3, 3) \ | ||
695 | XO3(i,0) \ | ||
696 | XO3(i + 1, 1) \ | ||
697 | XO3(i + 2, 2) \ | ||
698 | XO3(i + 3, 3) \ | ||
699 | ST(i,0) \ | ||
700 | ST(i + 1, 1) \ | ||
701 | ST(i + 2, 2) \ | ||
702 | ST(i + 3, 3) \ | ||
703 | |||
704 | |||
705 | PF0(0) | ||
706 | PF0(2) | ||
707 | |||
708 | " .align 32 ;\n" | ||
709 | " 1: ;\n" | ||
710 | |||
711 | BLOCK(0) | ||
712 | BLOCK(4) | ||
713 | BLOCK(8) | ||
714 | BLOCK(12) | ||
715 | |||
716 | " addl $256, %1 ;\n" | ||
717 | " addl $256, %2 ;\n" | ||
718 | " addl $256, %3 ;\n" | ||
719 | " addl $256, %4 ;\n" | ||
720 | " decl %0 ;\n" | ||
721 | " jnz 1b ;\n" | ||
722 | : "+r" (lines), | ||
723 | "+r" (p1), "+r" (p2), "+r" (p3), "+r" (p4) | ||
724 | : | ||
725 | : "memory" ); | ||
726 | |||
727 | kernel_fpu_end(); | ||
728 | } | ||
729 | |||
730 | static void | ||
731 | xor_sse_5(unsigned long bytes, unsigned long *p1, unsigned long *p2, | ||
732 | unsigned long *p3, unsigned long *p4, unsigned long *p5) | ||
733 | { | ||
734 | unsigned long lines = bytes >> 8; | ||
735 | |||
736 | kernel_fpu_begin(); | ||
737 | |||
738 | /* Make sure GCC forgets anything it knows about p4 or p5, | ||
739 | such that it won't pass to the asm volatile below a | ||
740 | register that is shared with any other variable. That's | ||
741 | because we modify p4 and p5 there, but we can't mark them | ||
742 | as read/write, otherwise we'd overflow the 10-asm-operands | ||
743 | limit of GCC < 3.1. */ | ||
744 | asm("" : "+r" (p4), "+r" (p5)); | ||
745 | |||
746 | asm volatile( | ||
747 | #undef BLOCK | ||
748 | #define BLOCK(i) \ | ||
749 | PF1(i) \ | ||
750 | PF1(i + 2) \ | ||
751 | LD(i,0) \ | ||
752 | LD(i + 1, 1) \ | ||
753 | LD(i + 2, 2) \ | ||
754 | LD(i + 3, 3) \ | ||
755 | PF2(i) \ | ||
756 | PF2(i + 2) \ | ||
757 | XO1(i,0) \ | ||
758 | XO1(i + 1, 1) \ | ||
759 | XO1(i + 2, 2) \ | ||
760 | XO1(i + 3, 3) \ | ||
761 | PF3(i) \ | ||
762 | PF3(i + 2) \ | ||
763 | XO2(i,0) \ | ||
764 | XO2(i + 1, 1) \ | ||
765 | XO2(i + 2, 2) \ | ||
766 | XO2(i + 3, 3) \ | ||
767 | PF4(i) \ | ||
768 | PF4(i + 2) \ | ||
769 | PF0(i + 4) \ | ||
770 | PF0(i + 6) \ | ||
771 | XO3(i,0) \ | ||
772 | XO3(i + 1, 1) \ | ||
773 | XO3(i + 2, 2) \ | ||
774 | XO3(i + 3, 3) \ | ||
775 | XO4(i,0) \ | ||
776 | XO4(i + 1, 1) \ | ||
777 | XO4(i + 2, 2) \ | ||
778 | XO4(i + 3, 3) \ | ||
779 | ST(i,0) \ | ||
780 | ST(i + 1, 1) \ | ||
781 | ST(i + 2, 2) \ | ||
782 | ST(i + 3, 3) \ | ||
783 | |||
784 | |||
785 | PF0(0) | ||
786 | PF0(2) | ||
787 | |||
788 | " .align 32 ;\n" | ||
789 | " 1: ;\n" | ||
790 | |||
791 | BLOCK(0) | ||
792 | BLOCK(4) | ||
793 | BLOCK(8) | ||
794 | BLOCK(12) | ||
795 | |||
796 | " addl $256, %1 ;\n" | ||
797 | " addl $256, %2 ;\n" | ||
798 | " addl $256, %3 ;\n" | ||
799 | " addl $256, %4 ;\n" | ||
800 | " addl $256, %5 ;\n" | ||
801 | " decl %0 ;\n" | ||
802 | " jnz 1b ;\n" | ||
803 | : "+r" (lines), | ||
804 | "+r" (p1), "+r" (p2), "+r" (p3) | ||
805 | : "r" (p4), "r" (p5) | ||
806 | : "memory"); | ||
807 | |||
808 | /* p4 and p5 were modified, and now the variables are dead. | ||
809 | Clobber them just to be sure nobody does something stupid | ||
810 | like assuming they have some legal value. */ | ||
811 | asm("" : "=r" (p4), "=r" (p5)); | ||
812 | |||
813 | kernel_fpu_end(); | ||
814 | } | ||
815 | |||
816 | static struct xor_block_template xor_block_pIII_sse = { | 532 | static struct xor_block_template xor_block_pIII_sse = { |
817 | .name = "pIII_sse", | 533 | .name = "pIII_sse", |
818 | .do_2 = xor_sse_2, | 534 | .do_2 = xor_sse_2, |
@@ -827,26 +543,25 @@ static struct xor_block_template xor_block_pIII_sse = { | |||
827 | /* Also try the generic routines. */ | 543 | /* Also try the generic routines. */ |
828 | #include <asm-generic/xor.h> | 544 | #include <asm-generic/xor.h> |
829 | 545 | ||
546 | /* We force the use of the SSE xor block because it can write around L2. | ||
547 | We may also be able to load into the L1 only depending on how the cpu | ||
548 | deals with a load to a line that is being prefetched. */ | ||
830 | #undef XOR_TRY_TEMPLATES | 549 | #undef XOR_TRY_TEMPLATES |
831 | #define XOR_TRY_TEMPLATES \ | 550 | #define XOR_TRY_TEMPLATES \ |
832 | do { \ | 551 | do { \ |
833 | xor_speed(&xor_block_8regs); \ | ||
834 | xor_speed(&xor_block_8regs_p); \ | ||
835 | xor_speed(&xor_block_32regs); \ | ||
836 | xor_speed(&xor_block_32regs_p); \ | ||
837 | AVX_XOR_SPEED; \ | 552 | AVX_XOR_SPEED; \ |
838 | if (cpu_has_xmm) \ | 553 | if (cpu_has_xmm) { \ |
839 | xor_speed(&xor_block_pIII_sse); \ | 554 | xor_speed(&xor_block_pIII_sse); \ |
840 | if (cpu_has_mmx) { \ | 555 | xor_speed(&xor_block_sse_pf64); \ |
556 | } else if (cpu_has_mmx) { \ | ||
841 | xor_speed(&xor_block_pII_mmx); \ | 557 | xor_speed(&xor_block_pII_mmx); \ |
842 | xor_speed(&xor_block_p5_mmx); \ | 558 | xor_speed(&xor_block_p5_mmx); \ |
559 | } else { \ | ||
560 | xor_speed(&xor_block_8regs); \ | ||
561 | xor_speed(&xor_block_8regs_p); \ | ||
562 | xor_speed(&xor_block_32regs); \ | ||
563 | xor_speed(&xor_block_32regs_p); \ | ||
843 | } \ | 564 | } \ |
844 | } while (0) | 565 | } while (0) |
845 | 566 | ||
846 | /* We force the use of the SSE xor block because it can write around L2. | ||
847 | We may also be able to load into the L1 only depending on how the cpu | ||
848 | deals with a load to a line that is being prefetched. */ | ||
849 | #define XOR_SELECT_TEMPLATE(FASTEST) \ | ||
850 | AVX_SELECT(cpu_has_xmm ? &xor_block_pIII_sse : FASTEST) | ||
851 | |||
852 | #endif /* _ASM_X86_XOR_32_H */ | 567 | #endif /* _ASM_X86_XOR_32_H */ |
diff --git a/arch/x86/include/asm/xor_64.h b/arch/x86/include/asm/xor_64.h index 87ac522c4af5..546f1e3b87cc 100644 --- a/arch/x86/include/asm/xor_64.h +++ b/arch/x86/include/asm/xor_64.h | |||
@@ -1,301 +1,6 @@ | |||
1 | #ifndef _ASM_X86_XOR_64_H | 1 | #ifndef _ASM_X86_XOR_64_H |
2 | #define _ASM_X86_XOR_64_H | 2 | #define _ASM_X86_XOR_64_H |
3 | 3 | ||
4 | /* | ||
5 | * Optimized RAID-5 checksumming functions for MMX and SSE. | ||
6 | * | ||
7 | * This program is free software; you can redistribute it and/or modify | ||
8 | * it under the terms of the GNU General Public License as published by | ||
9 | * the Free Software Foundation; either version 2, or (at your option) | ||
10 | * any later version. | ||
11 | * | ||
12 | * You should have received a copy of the GNU General Public License | ||
13 | * (for example /usr/src/linux/COPYING); if not, write to the Free | ||
14 | * Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. | ||
15 | */ | ||
16 | |||
17 | |||
18 | /* | ||
19 | * Cache avoiding checksumming functions utilizing KNI instructions | ||
20 | * Copyright (C) 1999 Zach Brown (with obvious credit due Ingo) | ||
21 | */ | ||
22 | |||
23 | /* | ||
24 | * Based on | ||
25 | * High-speed RAID5 checksumming functions utilizing SSE instructions. | ||
26 | * Copyright (C) 1998 Ingo Molnar. | ||
27 | */ | ||
28 | |||
29 | /* | ||
30 | * x86-64 changes / gcc fixes from Andi Kleen. | ||
31 | * Copyright 2002 Andi Kleen, SuSE Labs. | ||
32 | * | ||
33 | * This hasn't been optimized for the hammer yet, but there are likely | ||
34 | * no advantages to be gotten from x86-64 here anyways. | ||
35 | */ | ||
36 | |||
37 | #include <asm/i387.h> | ||
38 | |||
39 | #define OFFS(x) "16*("#x")" | ||
40 | #define PF_OFFS(x) "256+16*("#x")" | ||
41 | #define PF0(x) " prefetchnta "PF_OFFS(x)"(%[p1]) ;\n" | ||
42 | #define LD(x, y) " movaps "OFFS(x)"(%[p1]), %%xmm"#y" ;\n" | ||
43 | #define ST(x, y) " movaps %%xmm"#y", "OFFS(x)"(%[p1]) ;\n" | ||
44 | #define PF1(x) " prefetchnta "PF_OFFS(x)"(%[p2]) ;\n" | ||
45 | #define PF2(x) " prefetchnta "PF_OFFS(x)"(%[p3]) ;\n" | ||
46 | #define PF3(x) " prefetchnta "PF_OFFS(x)"(%[p4]) ;\n" | ||
47 | #define PF4(x) " prefetchnta "PF_OFFS(x)"(%[p5]) ;\n" | ||
48 | #define PF5(x) " prefetchnta "PF_OFFS(x)"(%[p6]) ;\n" | ||
49 | #define XO1(x, y) " xorps "OFFS(x)"(%[p2]), %%xmm"#y" ;\n" | ||
50 | #define XO2(x, y) " xorps "OFFS(x)"(%[p3]), %%xmm"#y" ;\n" | ||
51 | #define XO3(x, y) " xorps "OFFS(x)"(%[p4]), %%xmm"#y" ;\n" | ||
52 | #define XO4(x, y) " xorps "OFFS(x)"(%[p5]), %%xmm"#y" ;\n" | ||
53 | #define XO5(x, y) " xorps "OFFS(x)"(%[p6]), %%xmm"#y" ;\n" | ||
54 | |||
55 | |||
56 | static void | ||
57 | xor_sse_2(unsigned long bytes, unsigned long *p1, unsigned long *p2) | ||
58 | { | ||
59 | unsigned int lines = bytes >> 8; | ||
60 | |||
61 | kernel_fpu_begin(); | ||
62 | |||
63 | asm volatile( | ||
64 | #undef BLOCK | ||
65 | #define BLOCK(i) \ | ||
66 | LD(i, 0) \ | ||
67 | LD(i + 1, 1) \ | ||
68 | PF1(i) \ | ||
69 | PF1(i + 2) \ | ||
70 | LD(i + 2, 2) \ | ||
71 | LD(i + 3, 3) \ | ||
72 | PF0(i + 4) \ | ||
73 | PF0(i + 6) \ | ||
74 | XO1(i, 0) \ | ||
75 | XO1(i + 1, 1) \ | ||
76 | XO1(i + 2, 2) \ | ||
77 | XO1(i + 3, 3) \ | ||
78 | ST(i, 0) \ | ||
79 | ST(i + 1, 1) \ | ||
80 | ST(i + 2, 2) \ | ||
81 | ST(i + 3, 3) \ | ||
82 | |||
83 | |||
84 | PF0(0) | ||
85 | PF0(2) | ||
86 | |||
87 | " .align 32 ;\n" | ||
88 | " 1: ;\n" | ||
89 | |||
90 | BLOCK(0) | ||
91 | BLOCK(4) | ||
92 | BLOCK(8) | ||
93 | BLOCK(12) | ||
94 | |||
95 | " addq %[inc], %[p1] ;\n" | ||
96 | " addq %[inc], %[p2] ;\n" | ||
97 | " decl %[cnt] ; jnz 1b" | ||
98 | : [p1] "+r" (p1), [p2] "+r" (p2), [cnt] "+r" (lines) | ||
99 | : [inc] "r" (256UL) | ||
100 | : "memory"); | ||
101 | |||
102 | kernel_fpu_end(); | ||
103 | } | ||
104 | |||
105 | static void | ||
106 | xor_sse_3(unsigned long bytes, unsigned long *p1, unsigned long *p2, | ||
107 | unsigned long *p3) | ||
108 | { | ||
109 | unsigned int lines = bytes >> 8; | ||
110 | |||
111 | kernel_fpu_begin(); | ||
112 | asm volatile( | ||
113 | #undef BLOCK | ||
114 | #define BLOCK(i) \ | ||
115 | PF1(i) \ | ||
116 | PF1(i + 2) \ | ||
117 | LD(i, 0) \ | ||
118 | LD(i + 1, 1) \ | ||
119 | LD(i + 2, 2) \ | ||
120 | LD(i + 3, 3) \ | ||
121 | PF2(i) \ | ||
122 | PF2(i + 2) \ | ||
123 | PF0(i + 4) \ | ||
124 | PF0(i + 6) \ | ||
125 | XO1(i, 0) \ | ||
126 | XO1(i + 1, 1) \ | ||
127 | XO1(i + 2, 2) \ | ||
128 | XO1(i + 3, 3) \ | ||
129 | XO2(i, 0) \ | ||
130 | XO2(i + 1, 1) \ | ||
131 | XO2(i + 2, 2) \ | ||
132 | XO2(i + 3, 3) \ | ||
133 | ST(i, 0) \ | ||
134 | ST(i + 1, 1) \ | ||
135 | ST(i + 2, 2) \ | ||
136 | ST(i + 3, 3) \ | ||
137 | |||
138 | |||
139 | PF0(0) | ||
140 | PF0(2) | ||
141 | |||
142 | " .align 32 ;\n" | ||
143 | " 1: ;\n" | ||
144 | |||
145 | BLOCK(0) | ||
146 | BLOCK(4) | ||
147 | BLOCK(8) | ||
148 | BLOCK(12) | ||
149 | |||
150 | " addq %[inc], %[p1] ;\n" | ||
151 | " addq %[inc], %[p2] ;\n" | ||
152 | " addq %[inc], %[p3] ;\n" | ||
153 | " decl %[cnt] ; jnz 1b" | ||
154 | : [cnt] "+r" (lines), | ||
155 | [p1] "+r" (p1), [p2] "+r" (p2), [p3] "+r" (p3) | ||
156 | : [inc] "r" (256UL) | ||
157 | : "memory"); | ||
158 | kernel_fpu_end(); | ||
159 | } | ||
160 | |||
161 | static void | ||
162 | xor_sse_4(unsigned long bytes, unsigned long *p1, unsigned long *p2, | ||
163 | unsigned long *p3, unsigned long *p4) | ||
164 | { | ||
165 | unsigned int lines = bytes >> 8; | ||
166 | |||
167 | kernel_fpu_begin(); | ||
168 | |||
169 | asm volatile( | ||
170 | #undef BLOCK | ||
171 | #define BLOCK(i) \ | ||
172 | PF1(i) \ | ||
173 | PF1(i + 2) \ | ||
174 | LD(i, 0) \ | ||
175 | LD(i + 1, 1) \ | ||
176 | LD(i + 2, 2) \ | ||
177 | LD(i + 3, 3) \ | ||
178 | PF2(i) \ | ||
179 | PF2(i + 2) \ | ||
180 | XO1(i, 0) \ | ||
181 | XO1(i + 1, 1) \ | ||
182 | XO1(i + 2, 2) \ | ||
183 | XO1(i + 3, 3) \ | ||
184 | PF3(i) \ | ||
185 | PF3(i + 2) \ | ||
186 | PF0(i + 4) \ | ||
187 | PF0(i + 6) \ | ||
188 | XO2(i, 0) \ | ||
189 | XO2(i + 1, 1) \ | ||
190 | XO2(i + 2, 2) \ | ||
191 | XO2(i + 3, 3) \ | ||
192 | XO3(i, 0) \ | ||
193 | XO3(i + 1, 1) \ | ||
194 | XO3(i + 2, 2) \ | ||
195 | XO3(i + 3, 3) \ | ||
196 | ST(i, 0) \ | ||
197 | ST(i + 1, 1) \ | ||
198 | ST(i + 2, 2) \ | ||
199 | ST(i + 3, 3) \ | ||
200 | |||
201 | |||
202 | PF0(0) | ||
203 | PF0(2) | ||
204 | |||
205 | " .align 32 ;\n" | ||
206 | " 1: ;\n" | ||
207 | |||
208 | BLOCK(0) | ||
209 | BLOCK(4) | ||
210 | BLOCK(8) | ||
211 | BLOCK(12) | ||
212 | |||
213 | " addq %[inc], %[p1] ;\n" | ||
214 | " addq %[inc], %[p2] ;\n" | ||
215 | " addq %[inc], %[p3] ;\n" | ||
216 | " addq %[inc], %[p4] ;\n" | ||
217 | " decl %[cnt] ; jnz 1b" | ||
218 | : [cnt] "+c" (lines), | ||
219 | [p1] "+r" (p1), [p2] "+r" (p2), [p3] "+r" (p3), [p4] "+r" (p4) | ||
220 | : [inc] "r" (256UL) | ||
221 | : "memory" ); | ||
222 | |||
223 | kernel_fpu_end(); | ||
224 | } | ||
225 | |||
226 | static void | ||
227 | xor_sse_5(unsigned long bytes, unsigned long *p1, unsigned long *p2, | ||
228 | unsigned long *p3, unsigned long *p4, unsigned long *p5) | ||
229 | { | ||
230 | unsigned int lines = bytes >> 8; | ||
231 | |||
232 | kernel_fpu_begin(); | ||
233 | |||
234 | asm volatile( | ||
235 | #undef BLOCK | ||
236 | #define BLOCK(i) \ | ||
237 | PF1(i) \ | ||
238 | PF1(i + 2) \ | ||
239 | LD(i, 0) \ | ||
240 | LD(i + 1, 1) \ | ||
241 | LD(i + 2, 2) \ | ||
242 | LD(i + 3, 3) \ | ||
243 | PF2(i) \ | ||
244 | PF2(i + 2) \ | ||
245 | XO1(i, 0) \ | ||
246 | XO1(i + 1, 1) \ | ||
247 | XO1(i + 2, 2) \ | ||
248 | XO1(i + 3, 3) \ | ||
249 | PF3(i) \ | ||
250 | PF3(i + 2) \ | ||
251 | XO2(i, 0) \ | ||
252 | XO2(i + 1, 1) \ | ||
253 | XO2(i + 2, 2) \ | ||
254 | XO2(i + 3, 3) \ | ||
255 | PF4(i) \ | ||
256 | PF4(i + 2) \ | ||
257 | PF0(i + 4) \ | ||
258 | PF0(i + 6) \ | ||
259 | XO3(i, 0) \ | ||
260 | XO3(i + 1, 1) \ | ||
261 | XO3(i + 2, 2) \ | ||
262 | XO3(i + 3, 3) \ | ||
263 | XO4(i, 0) \ | ||
264 | XO4(i + 1, 1) \ | ||
265 | XO4(i + 2, 2) \ | ||
266 | XO4(i + 3, 3) \ | ||
267 | ST(i, 0) \ | ||
268 | ST(i + 1, 1) \ | ||
269 | ST(i + 2, 2) \ | ||
270 | ST(i + 3, 3) \ | ||
271 | |||
272 | |||
273 | PF0(0) | ||
274 | PF0(2) | ||
275 | |||
276 | " .align 32 ;\n" | ||
277 | " 1: ;\n" | ||
278 | |||
279 | BLOCK(0) | ||
280 | BLOCK(4) | ||
281 | BLOCK(8) | ||
282 | BLOCK(12) | ||
283 | |||
284 | " addq %[inc], %[p1] ;\n" | ||
285 | " addq %[inc], %[p2] ;\n" | ||
286 | " addq %[inc], %[p3] ;\n" | ||
287 | " addq %[inc], %[p4] ;\n" | ||
288 | " addq %[inc], %[p5] ;\n" | ||
289 | " decl %[cnt] ; jnz 1b" | ||
290 | : [cnt] "+c" (lines), | ||
291 | [p1] "+r" (p1), [p2] "+r" (p2), [p3] "+r" (p3), [p4] "+r" (p4), | ||
292 | [p5] "+r" (p5) | ||
293 | : [inc] "r" (256UL) | ||
294 | : "memory"); | ||
295 | |||
296 | kernel_fpu_end(); | ||
297 | } | ||
298 | |||
299 | static struct xor_block_template xor_block_sse = { | 4 | static struct xor_block_template xor_block_sse = { |
300 | .name = "generic_sse", | 5 | .name = "generic_sse", |
301 | .do_2 = xor_sse_2, | 6 | .do_2 = xor_sse_2, |
@@ -308,17 +13,15 @@ static struct xor_block_template xor_block_sse = { | |||
308 | /* Also try the AVX routines */ | 13 | /* Also try the AVX routines */ |
309 | #include <asm/xor_avx.h> | 14 | #include <asm/xor_avx.h> |
310 | 15 | ||
16 | /* We force the use of the SSE xor block because it can write around L2. | ||
17 | We may also be able to load into the L1 only depending on how the cpu | ||
18 | deals with a load to a line that is being prefetched. */ | ||
311 | #undef XOR_TRY_TEMPLATES | 19 | #undef XOR_TRY_TEMPLATES |
312 | #define XOR_TRY_TEMPLATES \ | 20 | #define XOR_TRY_TEMPLATES \ |
313 | do { \ | 21 | do { \ |
314 | AVX_XOR_SPEED; \ | 22 | AVX_XOR_SPEED; \ |
23 | xor_speed(&xor_block_sse_pf64); \ | ||
315 | xor_speed(&xor_block_sse); \ | 24 | xor_speed(&xor_block_sse); \ |
316 | } while (0) | 25 | } while (0) |
317 | 26 | ||
318 | /* We force the use of the SSE xor block because it can write around L2. | ||
319 | We may also be able to load into the L1 only depending on how the cpu | ||
320 | deals with a load to a line that is being prefetched. */ | ||
321 | #define XOR_SELECT_TEMPLATE(FASTEST) \ | ||
322 | AVX_SELECT(&xor_block_sse) | ||
323 | |||
324 | #endif /* _ASM_X86_XOR_64_H */ | 27 | #endif /* _ASM_X86_XOR_64_H */ |
diff --git a/arch/x86/include/uapi/asm/msr-index.h b/arch/x86/include/uapi/asm/msr-index.h index 433a59fb1a74..f26d2771846f 100644 --- a/arch/x86/include/uapi/asm/msr-index.h +++ b/arch/x86/include/uapi/asm/msr-index.h | |||
@@ -103,6 +103,8 @@ | |||
103 | #define DEBUGCTLMSR_BTS_OFF_USR (1UL << 10) | 103 | #define DEBUGCTLMSR_BTS_OFF_USR (1UL << 10) |
104 | #define DEBUGCTLMSR_FREEZE_LBRS_ON_PMI (1UL << 11) | 104 | #define DEBUGCTLMSR_FREEZE_LBRS_ON_PMI (1UL << 11) |
105 | 105 | ||
106 | #define MSR_IA32_POWER_CTL 0x000001fc | ||
107 | |||
106 | #define MSR_IA32_MC0_CTL 0x00000400 | 108 | #define MSR_IA32_MC0_CTL 0x00000400 |
107 | #define MSR_IA32_MC0_STATUS 0x00000401 | 109 | #define MSR_IA32_MC0_STATUS 0x00000401 |
108 | #define MSR_IA32_MC0_ADDR 0x00000402 | 110 | #define MSR_IA32_MC0_ADDR 0x00000402 |
@@ -194,6 +196,8 @@ | |||
194 | /* Fam 15h MSRs */ | 196 | /* Fam 15h MSRs */ |
195 | #define MSR_F15H_PERF_CTL 0xc0010200 | 197 | #define MSR_F15H_PERF_CTL 0xc0010200 |
196 | #define MSR_F15H_PERF_CTR 0xc0010201 | 198 | #define MSR_F15H_PERF_CTR 0xc0010201 |
199 | #define MSR_F15H_NB_PERF_CTL 0xc0010240 | ||
200 | #define MSR_F15H_NB_PERF_CTR 0xc0010241 | ||
197 | 201 | ||
198 | /* Fam 10h MSRs */ | 202 | /* Fam 10h MSRs */ |
199 | #define MSR_FAM10H_MMIO_CONF_BASE 0xc0010058 | 203 | #define MSR_FAM10H_MMIO_CONF_BASE 0xc0010058 |
@@ -272,6 +276,7 @@ | |||
272 | #define MSR_IA32_PLATFORM_ID 0x00000017 | 276 | #define MSR_IA32_PLATFORM_ID 0x00000017 |
273 | #define MSR_IA32_EBL_CR_POWERON 0x0000002a | 277 | #define MSR_IA32_EBL_CR_POWERON 0x0000002a |
274 | #define MSR_EBC_FREQUENCY_ID 0x0000002c | 278 | #define MSR_EBC_FREQUENCY_ID 0x0000002c |
279 | #define MSR_SMI_COUNT 0x00000034 | ||
275 | #define MSR_IA32_FEATURE_CONTROL 0x0000003a | 280 | #define MSR_IA32_FEATURE_CONTROL 0x0000003a |
276 | #define MSR_IA32_TSC_ADJUST 0x0000003b | 281 | #define MSR_IA32_TSC_ADJUST 0x0000003b |
277 | 282 | ||
diff --git a/arch/x86/kernel/Makefile b/arch/x86/kernel/Makefile index 34e923a53762..ac3b3d002833 100644 --- a/arch/x86/kernel/Makefile +++ b/arch/x86/kernel/Makefile | |||
@@ -65,8 +65,7 @@ obj-$(CONFIG_X86_TSC) += trace_clock.o | |||
65 | obj-$(CONFIG_KEXEC) += machine_kexec_$(BITS).o | 65 | obj-$(CONFIG_KEXEC) += machine_kexec_$(BITS).o |
66 | obj-$(CONFIG_KEXEC) += relocate_kernel_$(BITS).o crash.o | 66 | obj-$(CONFIG_KEXEC) += relocate_kernel_$(BITS).o crash.o |
67 | obj-$(CONFIG_CRASH_DUMP) += crash_dump_$(BITS).o | 67 | obj-$(CONFIG_CRASH_DUMP) += crash_dump_$(BITS).o |
68 | obj-$(CONFIG_KPROBES) += kprobes.o | 68 | obj-y += kprobes/ |
69 | obj-$(CONFIG_OPTPROBES) += kprobes-opt.o | ||
70 | obj-$(CONFIG_MODULES) += module.o | 69 | obj-$(CONFIG_MODULES) += module.o |
71 | obj-$(CONFIG_DOUBLEFAULT) += doublefault_32.o | 70 | obj-$(CONFIG_DOUBLEFAULT) += doublefault_32.o |
72 | obj-$(CONFIG_KGDB) += kgdb.o | 71 | obj-$(CONFIG_KGDB) += kgdb.o |
diff --git a/arch/x86/kernel/apb_timer.c b/arch/x86/kernel/apb_timer.c index afdc3f756dea..c9876efecafb 100644 --- a/arch/x86/kernel/apb_timer.c +++ b/arch/x86/kernel/apb_timer.c | |||
@@ -240,7 +240,7 @@ static int apbt_cpuhp_notify(struct notifier_block *n, | |||
240 | dw_apb_clockevent_pause(adev->timer); | 240 | dw_apb_clockevent_pause(adev->timer); |
241 | if (system_state == SYSTEM_RUNNING) { | 241 | if (system_state == SYSTEM_RUNNING) { |
242 | pr_debug("skipping APBT CPU %lu offline\n", cpu); | 242 | pr_debug("skipping APBT CPU %lu offline\n", cpu); |
243 | } else if (adev) { | 243 | } else { |
244 | pr_debug("APBT clockevent for cpu %lu offline\n", cpu); | 244 | pr_debug("APBT clockevent for cpu %lu offline\n", cpu); |
245 | dw_apb_clockevent_stop(adev->timer); | 245 | dw_apb_clockevent_stop(adev->timer); |
246 | } | 246 | } |
@@ -311,7 +311,6 @@ void __init apbt_time_init(void) | |||
311 | #ifdef CONFIG_SMP | 311 | #ifdef CONFIG_SMP |
312 | int i; | 312 | int i; |
313 | struct sfi_timer_table_entry *p_mtmr; | 313 | struct sfi_timer_table_entry *p_mtmr; |
314 | unsigned int percpu_timer; | ||
315 | struct apbt_dev *adev; | 314 | struct apbt_dev *adev; |
316 | #endif | 315 | #endif |
317 | 316 | ||
@@ -346,13 +345,10 @@ void __init apbt_time_init(void) | |||
346 | return; | 345 | return; |
347 | } | 346 | } |
348 | pr_debug("%s: %d CPUs online\n", __func__, num_online_cpus()); | 347 | pr_debug("%s: %d CPUs online\n", __func__, num_online_cpus()); |
349 | if (num_possible_cpus() <= sfi_mtimer_num) { | 348 | if (num_possible_cpus() <= sfi_mtimer_num) |
350 | percpu_timer = 1; | ||
351 | apbt_num_timers_used = num_possible_cpus(); | 349 | apbt_num_timers_used = num_possible_cpus(); |
352 | } else { | 350 | else |
353 | percpu_timer = 0; | ||
354 | apbt_num_timers_used = 1; | 351 | apbt_num_timers_used = 1; |
355 | } | ||
356 | pr_debug("%s: %d APB timers used\n", __func__, apbt_num_timers_used); | 352 | pr_debug("%s: %d APB timers used\n", __func__, apbt_num_timers_used); |
357 | 353 | ||
358 | /* here we set up per CPU timer data structure */ | 354 | /* here we set up per CPU timer data structure */ |
diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c index b994cc84aa7e..a5b4dce1b7ac 100644 --- a/arch/x86/kernel/apic/apic.c +++ b/arch/x86/kernel/apic/apic.c | |||
@@ -1477,8 +1477,7 @@ void __init bsp_end_local_APIC_setup(void) | |||
1477 | * Now that local APIC setup is completed for BP, configure the fault | 1477 | * Now that local APIC setup is completed for BP, configure the fault |
1478 | * handling for interrupt remapping. | 1478 | * handling for interrupt remapping. |
1479 | */ | 1479 | */ |
1480 | if (irq_remapping_enabled) | 1480 | irq_remap_enable_fault_handling(); |
1481 | irq_remap_enable_fault_handling(); | ||
1482 | 1481 | ||
1483 | } | 1482 | } |
1484 | 1483 | ||
@@ -2251,8 +2250,7 @@ static int lapic_suspend(void) | |||
2251 | local_irq_save(flags); | 2250 | local_irq_save(flags); |
2252 | disable_local_APIC(); | 2251 | disable_local_APIC(); |
2253 | 2252 | ||
2254 | if (irq_remapping_enabled) | 2253 | irq_remapping_disable(); |
2255 | irq_remapping_disable(); | ||
2256 | 2254 | ||
2257 | local_irq_restore(flags); | 2255 | local_irq_restore(flags); |
2258 | return 0; | 2256 | return 0; |
@@ -2268,16 +2266,15 @@ static void lapic_resume(void) | |||
2268 | return; | 2266 | return; |
2269 | 2267 | ||
2270 | local_irq_save(flags); | 2268 | local_irq_save(flags); |
2271 | if (irq_remapping_enabled) { | 2269 | |
2272 | /* | 2270 | /* |
2273 | * IO-APIC and PIC have their own resume routines. | 2271 | * IO-APIC and PIC have their own resume routines. |
2274 | * We just mask them here to make sure the interrupt | 2272 | * We just mask them here to make sure the interrupt |
2275 | * subsystem is completely quiet while we enable x2apic | 2273 | * subsystem is completely quiet while we enable x2apic |
2276 | * and interrupt-remapping. | 2274 | * and interrupt-remapping. |
2277 | */ | 2275 | */ |
2278 | mask_ioapic_entries(); | 2276 | mask_ioapic_entries(); |
2279 | legacy_pic->mask_all(); | 2277 | legacy_pic->mask_all(); |
2280 | } | ||
2281 | 2278 | ||
2282 | if (x2apic_mode) | 2279 | if (x2apic_mode) |
2283 | enable_x2apic(); | 2280 | enable_x2apic(); |
@@ -2320,8 +2317,7 @@ static void lapic_resume(void) | |||
2320 | apic_write(APIC_ESR, 0); | 2317 | apic_write(APIC_ESR, 0); |
2321 | apic_read(APIC_ESR); | 2318 | apic_read(APIC_ESR); |
2322 | 2319 | ||
2323 | if (irq_remapping_enabled) | 2320 | irq_remapping_reenable(x2apic_mode); |
2324 | irq_remapping_reenable(x2apic_mode); | ||
2325 | 2321 | ||
2326 | local_irq_restore(flags); | 2322 | local_irq_restore(flags); |
2327 | } | 2323 | } |
diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c index b739d398bb29..9ed796ccc32c 100644 --- a/arch/x86/kernel/apic/io_apic.c +++ b/arch/x86/kernel/apic/io_apic.c | |||
@@ -68,22 +68,6 @@ | |||
68 | #define for_each_irq_pin(entry, head) \ | 68 | #define for_each_irq_pin(entry, head) \ |
69 | for (entry = head; entry; entry = entry->next) | 69 | for (entry = head; entry; entry = entry->next) |
70 | 70 | ||
71 | #ifdef CONFIG_IRQ_REMAP | ||
72 | static void irq_remap_modify_chip_defaults(struct irq_chip *chip); | ||
73 | static inline bool irq_remapped(struct irq_cfg *cfg) | ||
74 | { | ||
75 | return cfg->irq_2_iommu.iommu != NULL; | ||
76 | } | ||
77 | #else | ||
78 | static inline bool irq_remapped(struct irq_cfg *cfg) | ||
79 | { | ||
80 | return false; | ||
81 | } | ||
82 | static inline void irq_remap_modify_chip_defaults(struct irq_chip *chip) | ||
83 | { | ||
84 | } | ||
85 | #endif | ||
86 | |||
87 | /* | 71 | /* |
88 | * Is the SiS APIC rmw bug present ? | 72 | * Is the SiS APIC rmw bug present ? |
89 | * -1 = don't know, 0 = no, 1 = yes | 73 | * -1 = don't know, 0 = no, 1 = yes |
@@ -300,9 +284,9 @@ static struct irq_cfg *alloc_irq_and_cfg_at(unsigned int at, int node) | |||
300 | return cfg; | 284 | return cfg; |
301 | } | 285 | } |
302 | 286 | ||
303 | static int alloc_irq_from(unsigned int from, int node) | 287 | static int alloc_irqs_from(unsigned int from, unsigned int count, int node) |
304 | { | 288 | { |
305 | return irq_alloc_desc_from(from, node); | 289 | return irq_alloc_descs_from(from, count, node); |
306 | } | 290 | } |
307 | 291 | ||
308 | static void free_irq_at(unsigned int at, struct irq_cfg *cfg) | 292 | static void free_irq_at(unsigned int at, struct irq_cfg *cfg) |
@@ -326,7 +310,7 @@ static __attribute_const__ struct io_apic __iomem *io_apic_base(int idx) | |||
326 | + (mpc_ioapic_addr(idx) & ~PAGE_MASK); | 310 | + (mpc_ioapic_addr(idx) & ~PAGE_MASK); |
327 | } | 311 | } |
328 | 312 | ||
329 | static inline void io_apic_eoi(unsigned int apic, unsigned int vector) | 313 | void io_apic_eoi(unsigned int apic, unsigned int vector) |
330 | { | 314 | { |
331 | struct io_apic __iomem *io_apic = io_apic_base(apic); | 315 | struct io_apic __iomem *io_apic = io_apic_base(apic); |
332 | writel(vector, &io_apic->eoi); | 316 | writel(vector, &io_apic->eoi); |
@@ -573,19 +557,10 @@ static void unmask_ioapic_irq(struct irq_data *data) | |||
573 | * Otherwise, we simulate the EOI message manually by changing the trigger | 557 | * Otherwise, we simulate the EOI message manually by changing the trigger |
574 | * mode to edge and then back to level, with RTE being masked during this. | 558 | * mode to edge and then back to level, with RTE being masked during this. |
575 | */ | 559 | */ |
576 | static void __eoi_ioapic_pin(int apic, int pin, int vector, struct irq_cfg *cfg) | 560 | void native_eoi_ioapic_pin(int apic, int pin, int vector) |
577 | { | 561 | { |
578 | if (mpc_ioapic_ver(apic) >= 0x20) { | 562 | if (mpc_ioapic_ver(apic) >= 0x20) { |
579 | /* | 563 | io_apic_eoi(apic, vector); |
580 | * Intr-remapping uses pin number as the virtual vector | ||
581 | * in the RTE. Actual vector is programmed in | ||
582 | * intr-remapping table entry. Hence for the io-apic | ||
583 | * EOI we use the pin number. | ||
584 | */ | ||
585 | if (cfg && irq_remapped(cfg)) | ||
586 | io_apic_eoi(apic, pin); | ||
587 | else | ||
588 | io_apic_eoi(apic, vector); | ||
589 | } else { | 564 | } else { |
590 | struct IO_APIC_route_entry entry, entry1; | 565 | struct IO_APIC_route_entry entry, entry1; |
591 | 566 | ||
@@ -606,14 +581,15 @@ static void __eoi_ioapic_pin(int apic, int pin, int vector, struct irq_cfg *cfg) | |||
606 | } | 581 | } |
607 | } | 582 | } |
608 | 583 | ||
609 | static void eoi_ioapic_irq(unsigned int irq, struct irq_cfg *cfg) | 584 | void eoi_ioapic_irq(unsigned int irq, struct irq_cfg *cfg) |
610 | { | 585 | { |
611 | struct irq_pin_list *entry; | 586 | struct irq_pin_list *entry; |
612 | unsigned long flags; | 587 | unsigned long flags; |
613 | 588 | ||
614 | raw_spin_lock_irqsave(&ioapic_lock, flags); | 589 | raw_spin_lock_irqsave(&ioapic_lock, flags); |
615 | for_each_irq_pin(entry, cfg->irq_2_pin) | 590 | for_each_irq_pin(entry, cfg->irq_2_pin) |
616 | __eoi_ioapic_pin(entry->apic, entry->pin, cfg->vector, cfg); | 591 | x86_io_apic_ops.eoi_ioapic_pin(entry->apic, entry->pin, |
592 | cfg->vector); | ||
617 | raw_spin_unlock_irqrestore(&ioapic_lock, flags); | 593 | raw_spin_unlock_irqrestore(&ioapic_lock, flags); |
618 | } | 594 | } |
619 | 595 | ||
@@ -650,7 +626,7 @@ static void clear_IO_APIC_pin(unsigned int apic, unsigned int pin) | |||
650 | } | 626 | } |
651 | 627 | ||
652 | raw_spin_lock_irqsave(&ioapic_lock, flags); | 628 | raw_spin_lock_irqsave(&ioapic_lock, flags); |
653 | __eoi_ioapic_pin(apic, pin, entry.vector, NULL); | 629 | x86_io_apic_ops.eoi_ioapic_pin(apic, pin, entry.vector); |
654 | raw_spin_unlock_irqrestore(&ioapic_lock, flags); | 630 | raw_spin_unlock_irqrestore(&ioapic_lock, flags); |
655 | } | 631 | } |
656 | 632 | ||
@@ -1304,25 +1280,18 @@ static void ioapic_register_intr(unsigned int irq, struct irq_cfg *cfg, | |||
1304 | fasteoi = false; | 1280 | fasteoi = false; |
1305 | } | 1281 | } |
1306 | 1282 | ||
1307 | if (irq_remapped(cfg)) { | 1283 | if (setup_remapped_irq(irq, cfg, chip)) |
1308 | irq_set_status_flags(irq, IRQ_MOVE_PCNTXT); | ||
1309 | irq_remap_modify_chip_defaults(chip); | ||
1310 | fasteoi = trigger != 0; | 1284 | fasteoi = trigger != 0; |
1311 | } | ||
1312 | 1285 | ||
1313 | hdl = fasteoi ? handle_fasteoi_irq : handle_edge_irq; | 1286 | hdl = fasteoi ? handle_fasteoi_irq : handle_edge_irq; |
1314 | irq_set_chip_and_handler_name(irq, chip, hdl, | 1287 | irq_set_chip_and_handler_name(irq, chip, hdl, |
1315 | fasteoi ? "fasteoi" : "edge"); | 1288 | fasteoi ? "fasteoi" : "edge"); |
1316 | } | 1289 | } |
1317 | 1290 | ||
1318 | static int setup_ioapic_entry(int irq, struct IO_APIC_route_entry *entry, | 1291 | int native_setup_ioapic_entry(int irq, struct IO_APIC_route_entry *entry, |
1319 | unsigned int destination, int vector, | 1292 | unsigned int destination, int vector, |
1320 | struct io_apic_irq_attr *attr) | 1293 | struct io_apic_irq_attr *attr) |
1321 | { | 1294 | { |
1322 | if (irq_remapping_enabled) | ||
1323 | return setup_ioapic_remapped_entry(irq, entry, destination, | ||
1324 | vector, attr); | ||
1325 | |||
1326 | memset(entry, 0, sizeof(*entry)); | 1295 | memset(entry, 0, sizeof(*entry)); |
1327 | 1296 | ||
1328 | entry->delivery_mode = apic->irq_delivery_mode; | 1297 | entry->delivery_mode = apic->irq_delivery_mode; |
@@ -1370,8 +1339,8 @@ static void setup_ioapic_irq(unsigned int irq, struct irq_cfg *cfg, | |||
1370 | attr->ioapic, mpc_ioapic_id(attr->ioapic), attr->ioapic_pin, | 1339 | attr->ioapic, mpc_ioapic_id(attr->ioapic), attr->ioapic_pin, |
1371 | cfg->vector, irq, attr->trigger, attr->polarity, dest); | 1340 | cfg->vector, irq, attr->trigger, attr->polarity, dest); |
1372 | 1341 | ||
1373 | if (setup_ioapic_entry(irq, &entry, dest, cfg->vector, attr)) { | 1342 | if (x86_io_apic_ops.setup_entry(irq, &entry, dest, cfg->vector, attr)) { |
1374 | pr_warn("Failed to setup ioapic entry for ioapic %d, pin %d\n", | 1343 | pr_warn("Failed to setup ioapic entry for ioapic %d, pin %d\n", |
1375 | mpc_ioapic_id(attr->ioapic), attr->ioapic_pin); | 1344 | mpc_ioapic_id(attr->ioapic), attr->ioapic_pin); |
1376 | __clear_irq_vector(irq, cfg); | 1345 | __clear_irq_vector(irq, cfg); |
1377 | 1346 | ||
@@ -1479,9 +1448,6 @@ static void __init setup_timer_IRQ0_pin(unsigned int ioapic_idx, | |||
1479 | struct IO_APIC_route_entry entry; | 1448 | struct IO_APIC_route_entry entry; |
1480 | unsigned int dest; | 1449 | unsigned int dest; |
1481 | 1450 | ||
1482 | if (irq_remapping_enabled) | ||
1483 | return; | ||
1484 | |||
1485 | memset(&entry, 0, sizeof(entry)); | 1451 | memset(&entry, 0, sizeof(entry)); |
1486 | 1452 | ||
1487 | /* | 1453 | /* |
@@ -1513,9 +1479,63 @@ static void __init setup_timer_IRQ0_pin(unsigned int ioapic_idx, | |||
1513 | ioapic_write_entry(ioapic_idx, pin, entry); | 1479 | ioapic_write_entry(ioapic_idx, pin, entry); |
1514 | } | 1480 | } |
1515 | 1481 | ||
1516 | __apicdebuginit(void) print_IO_APIC(int ioapic_idx) | 1482 | void native_io_apic_print_entries(unsigned int apic, unsigned int nr_entries) |
1517 | { | 1483 | { |
1518 | int i; | 1484 | int i; |
1485 | |||
1486 | pr_debug(" NR Dst Mask Trig IRR Pol Stat Dmod Deli Vect:\n"); | ||
1487 | |||
1488 | for (i = 0; i <= nr_entries; i++) { | ||
1489 | struct IO_APIC_route_entry entry; | ||
1490 | |||
1491 | entry = ioapic_read_entry(apic, i); | ||
1492 | |||
1493 | pr_debug(" %02x %02X ", i, entry.dest); | ||
1494 | pr_cont("%1d %1d %1d %1d %1d " | ||
1495 | "%1d %1d %02X\n", | ||
1496 | entry.mask, | ||
1497 | entry.trigger, | ||
1498 | entry.irr, | ||
1499 | entry.polarity, | ||
1500 | entry.delivery_status, | ||
1501 | entry.dest_mode, | ||
1502 | entry.delivery_mode, | ||
1503 | entry.vector); | ||
1504 | } | ||
1505 | } | ||
1506 | |||
1507 | void intel_ir_io_apic_print_entries(unsigned int apic, | ||
1508 | unsigned int nr_entries) | ||
1509 | { | ||
1510 | int i; | ||
1511 | |||
1512 | pr_debug(" NR Indx Fmt Mask Trig IRR Pol Stat Indx2 Zero Vect:\n"); | ||
1513 | |||
1514 | for (i = 0; i <= nr_entries; i++) { | ||
1515 | struct IR_IO_APIC_route_entry *ir_entry; | ||
1516 | struct IO_APIC_route_entry entry; | ||
1517 | |||
1518 | entry = ioapic_read_entry(apic, i); | ||
1519 | |||
1520 | ir_entry = (struct IR_IO_APIC_route_entry *)&entry; | ||
1521 | |||
1522 | pr_debug(" %02x %04X ", i, ir_entry->index); | ||
1523 | pr_cont("%1d %1d %1d %1d %1d " | ||
1524 | "%1d %1d %X %02X\n", | ||
1525 | ir_entry->format, | ||
1526 | ir_entry->mask, | ||
1527 | ir_entry->trigger, | ||
1528 | ir_entry->irr, | ||
1529 | ir_entry->polarity, | ||
1530 | ir_entry->delivery_status, | ||
1531 | ir_entry->index2, | ||
1532 | ir_entry->zero, | ||
1533 | ir_entry->vector); | ||
1534 | } | ||
1535 | } | ||
1536 | |||
1537 | __apicdebuginit(void) print_IO_APIC(int ioapic_idx) | ||
1538 | { | ||
1519 | union IO_APIC_reg_00 reg_00; | 1539 | union IO_APIC_reg_00 reg_00; |
1520 | union IO_APIC_reg_01 reg_01; | 1540 | union IO_APIC_reg_01 reg_01; |
1521 | union IO_APIC_reg_02 reg_02; | 1541 | union IO_APIC_reg_02 reg_02; |
@@ -1568,58 +1588,7 @@ __apicdebuginit(void) print_IO_APIC(int ioapic_idx) | |||
1568 | 1588 | ||
1569 | printk(KERN_DEBUG ".... IRQ redirection table:\n"); | 1589 | printk(KERN_DEBUG ".... IRQ redirection table:\n"); |
1570 | 1590 | ||
1571 | if (irq_remapping_enabled) { | 1591 | x86_io_apic_ops.print_entries(ioapic_idx, reg_01.bits.entries); |
1572 | printk(KERN_DEBUG " NR Indx Fmt Mask Trig IRR" | ||
1573 | " Pol Stat Indx2 Zero Vect:\n"); | ||
1574 | } else { | ||
1575 | printk(KERN_DEBUG " NR Dst Mask Trig IRR Pol" | ||
1576 | " Stat Dmod Deli Vect:\n"); | ||
1577 | } | ||
1578 | |||
1579 | for (i = 0; i <= reg_01.bits.entries; i++) { | ||
1580 | if (irq_remapping_enabled) { | ||
1581 | struct IO_APIC_route_entry entry; | ||
1582 | struct IR_IO_APIC_route_entry *ir_entry; | ||
1583 | |||
1584 | entry = ioapic_read_entry(ioapic_idx, i); | ||
1585 | ir_entry = (struct IR_IO_APIC_route_entry *) &entry; | ||
1586 | printk(KERN_DEBUG " %02x %04X ", | ||
1587 | i, | ||
1588 | ir_entry->index | ||
1589 | ); | ||
1590 | pr_cont("%1d %1d %1d %1d %1d " | ||
1591 | "%1d %1d %X %02X\n", | ||
1592 | ir_entry->format, | ||
1593 | ir_entry->mask, | ||
1594 | ir_entry->trigger, | ||
1595 | ir_entry->irr, | ||
1596 | ir_entry->polarity, | ||
1597 | ir_entry->delivery_status, | ||
1598 | ir_entry->index2, | ||
1599 | ir_entry->zero, | ||
1600 | ir_entry->vector | ||
1601 | ); | ||
1602 | } else { | ||
1603 | struct IO_APIC_route_entry entry; | ||
1604 | |||
1605 | entry = ioapic_read_entry(ioapic_idx, i); | ||
1606 | printk(KERN_DEBUG " %02x %02X ", | ||
1607 | i, | ||
1608 | entry.dest | ||
1609 | ); | ||
1610 | pr_cont("%1d %1d %1d %1d %1d " | ||
1611 | "%1d %1d %02X\n", | ||
1612 | entry.mask, | ||
1613 | entry.trigger, | ||
1614 | entry.irr, | ||
1615 | entry.polarity, | ||
1616 | entry.delivery_status, | ||
1617 | entry.dest_mode, | ||
1618 | entry.delivery_mode, | ||
1619 | entry.vector | ||
1620 | ); | ||
1621 | } | ||
1622 | } | ||
1623 | } | 1592 | } |
1624 | 1593 | ||
1625 | __apicdebuginit(void) print_IO_APICs(void) | 1594 | __apicdebuginit(void) print_IO_APICs(void) |
@@ -1921,30 +1890,14 @@ void __init enable_IO_APIC(void) | |||
1921 | clear_IO_APIC(); | 1890 | clear_IO_APIC(); |
1922 | } | 1891 | } |
1923 | 1892 | ||
1924 | /* | 1893 | void native_disable_io_apic(void) |
1925 | * Not an __init, needed by the reboot code | ||
1926 | */ | ||
1927 | void disable_IO_APIC(void) | ||
1928 | { | 1894 | { |
1929 | /* | 1895 | /* |
1930 | * Clear the IO-APIC before rebooting: | ||
1931 | */ | ||
1932 | clear_IO_APIC(); | ||
1933 | |||
1934 | if (!legacy_pic->nr_legacy_irqs) | ||
1935 | return; | ||
1936 | |||
1937 | /* | ||
1938 | * If the i8259 is routed through an IOAPIC | 1896 | * If the i8259 is routed through an IOAPIC |
1939 | * Put that IOAPIC in virtual wire mode | 1897 | * Put that IOAPIC in virtual wire mode |
1940 | * so legacy interrupts can be delivered. | 1898 | * so legacy interrupts can be delivered. |
1941 | * | ||
1942 | * With interrupt-remapping, for now we will use virtual wire A mode, | ||
1943 | * as virtual wire B is little complex (need to configure both | ||
1944 | * IOAPIC RTE as well as interrupt-remapping table entry). | ||
1945 | * As this gets called during crash dump, keep this simple for now. | ||
1946 | */ | 1899 | */ |
1947 | if (ioapic_i8259.pin != -1 && !irq_remapping_enabled) { | 1900 | if (ioapic_i8259.pin != -1) { |
1948 | struct IO_APIC_route_entry entry; | 1901 | struct IO_APIC_route_entry entry; |
1949 | 1902 | ||
1950 | memset(&entry, 0, sizeof(entry)); | 1903 | memset(&entry, 0, sizeof(entry)); |
@@ -1964,12 +1917,25 @@ void disable_IO_APIC(void) | |||
1964 | ioapic_write_entry(ioapic_i8259.apic, ioapic_i8259.pin, entry); | 1917 | ioapic_write_entry(ioapic_i8259.apic, ioapic_i8259.pin, entry); |
1965 | } | 1918 | } |
1966 | 1919 | ||
1920 | if (cpu_has_apic || apic_from_smp_config()) | ||
1921 | disconnect_bsp_APIC(ioapic_i8259.pin != -1); | ||
1922 | |||
1923 | } | ||
1924 | |||
1925 | /* | ||
1926 | * Not an __init, needed by the reboot code | ||
1927 | */ | ||
1928 | void disable_IO_APIC(void) | ||
1929 | { | ||
1967 | /* | 1930 | /* |
1968 | * Use virtual wire A mode when interrupt remapping is enabled. | 1931 | * Clear the IO-APIC before rebooting: |
1969 | */ | 1932 | */ |
1970 | if (cpu_has_apic || apic_from_smp_config()) | 1933 | clear_IO_APIC(); |
1971 | disconnect_bsp_APIC(!irq_remapping_enabled && | 1934 | |
1972 | ioapic_i8259.pin != -1); | 1935 | if (!legacy_pic->nr_legacy_irqs) |
1936 | return; | ||
1937 | |||
1938 | x86_io_apic_ops.disable(); | ||
1973 | } | 1939 | } |
1974 | 1940 | ||
1975 | #ifdef CONFIG_X86_32 | 1941 | #ifdef CONFIG_X86_32 |
@@ -2322,12 +2288,8 @@ static void __target_IO_APIC_irq(unsigned int irq, unsigned int dest, struct irq | |||
2322 | 2288 | ||
2323 | apic = entry->apic; | 2289 | apic = entry->apic; |
2324 | pin = entry->pin; | 2290 | pin = entry->pin; |
2325 | /* | 2291 | |
2326 | * With interrupt-remapping, destination information comes | 2292 | io_apic_write(apic, 0x11 + pin*2, dest); |
2327 | * from interrupt-remapping table entry. | ||
2328 | */ | ||
2329 | if (!irq_remapped(cfg)) | ||
2330 | io_apic_write(apic, 0x11 + pin*2, dest); | ||
2331 | reg = io_apic_read(apic, 0x10 + pin*2); | 2293 | reg = io_apic_read(apic, 0x10 + pin*2); |
2332 | reg &= ~IO_APIC_REDIR_VECTOR_MASK; | 2294 | reg &= ~IO_APIC_REDIR_VECTOR_MASK; |
2333 | reg |= vector; | 2295 | reg |= vector; |
@@ -2369,9 +2331,10 @@ int __ioapic_set_affinity(struct irq_data *data, const struct cpumask *mask, | |||
2369 | return 0; | 2331 | return 0; |
2370 | } | 2332 | } |
2371 | 2333 | ||
2372 | static int | 2334 | |
2373 | ioapic_set_affinity(struct irq_data *data, const struct cpumask *mask, | 2335 | int native_ioapic_set_affinity(struct irq_data *data, |
2374 | bool force) | 2336 | const struct cpumask *mask, |
2337 | bool force) | ||
2375 | { | 2338 | { |
2376 | unsigned int dest, irq = data->irq; | 2339 | unsigned int dest, irq = data->irq; |
2377 | unsigned long flags; | 2340 | unsigned long flags; |
@@ -2548,33 +2511,6 @@ static void ack_apic_level(struct irq_data *data) | |||
2548 | ioapic_irqd_unmask(data, cfg, masked); | 2511 | ioapic_irqd_unmask(data, cfg, masked); |
2549 | } | 2512 | } |
2550 | 2513 | ||
2551 | #ifdef CONFIG_IRQ_REMAP | ||
2552 | static void ir_ack_apic_edge(struct irq_data *data) | ||
2553 | { | ||
2554 | ack_APIC_irq(); | ||
2555 | } | ||
2556 | |||
2557 | static void ir_ack_apic_level(struct irq_data *data) | ||
2558 | { | ||
2559 | ack_APIC_irq(); | ||
2560 | eoi_ioapic_irq(data->irq, data->chip_data); | ||
2561 | } | ||
2562 | |||
2563 | static void ir_print_prefix(struct irq_data *data, struct seq_file *p) | ||
2564 | { | ||
2565 | seq_printf(p, " IR-%s", data->chip->name); | ||
2566 | } | ||
2567 | |||
2568 | static void irq_remap_modify_chip_defaults(struct irq_chip *chip) | ||
2569 | { | ||
2570 | chip->irq_print_chip = ir_print_prefix; | ||
2571 | chip->irq_ack = ir_ack_apic_edge; | ||
2572 | chip->irq_eoi = ir_ack_apic_level; | ||
2573 | |||
2574 | chip->irq_set_affinity = set_remapped_irq_affinity; | ||
2575 | } | ||
2576 | #endif /* CONFIG_IRQ_REMAP */ | ||
2577 | |||
2578 | static struct irq_chip ioapic_chip __read_mostly = { | 2514 | static struct irq_chip ioapic_chip __read_mostly = { |
2579 | .name = "IO-APIC", | 2515 | .name = "IO-APIC", |
2580 | .irq_startup = startup_ioapic_irq, | 2516 | .irq_startup = startup_ioapic_irq, |
@@ -2582,7 +2518,7 @@ static struct irq_chip ioapic_chip __read_mostly = { | |||
2582 | .irq_unmask = unmask_ioapic_irq, | 2518 | .irq_unmask = unmask_ioapic_irq, |
2583 | .irq_ack = ack_apic_edge, | 2519 | .irq_ack = ack_apic_edge, |
2584 | .irq_eoi = ack_apic_level, | 2520 | .irq_eoi = ack_apic_level, |
2585 | .irq_set_affinity = ioapic_set_affinity, | 2521 | .irq_set_affinity = native_ioapic_set_affinity, |
2586 | .irq_retrigger = ioapic_retrigger_irq, | 2522 | .irq_retrigger = ioapic_retrigger_irq, |
2587 | }; | 2523 | }; |
2588 | 2524 | ||
@@ -2781,8 +2717,7 @@ static inline void __init check_timer(void) | |||
2781 | * 8259A. | 2717 | * 8259A. |
2782 | */ | 2718 | */ |
2783 | if (pin1 == -1) { | 2719 | if (pin1 == -1) { |
2784 | if (irq_remapping_enabled) | 2720 | panic_if_irq_remap("BIOS bug: timer not connected to IO-APIC"); |
2785 | panic("BIOS bug: timer not connected to IO-APIC"); | ||
2786 | pin1 = pin2; | 2721 | pin1 = pin2; |
2787 | apic1 = apic2; | 2722 | apic1 = apic2; |
2788 | no_pin1 = 1; | 2723 | no_pin1 = 1; |
@@ -2814,8 +2749,7 @@ static inline void __init check_timer(void) | |||
2814 | clear_IO_APIC_pin(0, pin1); | 2749 | clear_IO_APIC_pin(0, pin1); |
2815 | goto out; | 2750 | goto out; |
2816 | } | 2751 | } |
2817 | if (irq_remapping_enabled) | 2752 | panic_if_irq_remap("timer doesn't work through Interrupt-remapped IO-APIC"); |
2818 | panic("timer doesn't work through Interrupt-remapped IO-APIC"); | ||
2819 | local_irq_disable(); | 2753 | local_irq_disable(); |
2820 | clear_IO_APIC_pin(apic1, pin1); | 2754 | clear_IO_APIC_pin(apic1, pin1); |
2821 | if (!no_pin1) | 2755 | if (!no_pin1) |
@@ -2982,37 +2916,58 @@ device_initcall(ioapic_init_ops); | |||
2982 | /* | 2916 | /* |
2983 | * Dynamic irq allocate and deallocation | 2917 | * Dynamic irq allocate and deallocation |
2984 | */ | 2918 | */ |
2985 | unsigned int create_irq_nr(unsigned int from, int node) | 2919 | unsigned int __create_irqs(unsigned int from, unsigned int count, int node) |
2986 | { | 2920 | { |
2987 | struct irq_cfg *cfg; | 2921 | struct irq_cfg **cfg; |
2988 | unsigned long flags; | 2922 | unsigned long flags; |
2989 | unsigned int ret = 0; | 2923 | int irq, i; |
2990 | int irq; | ||
2991 | 2924 | ||
2992 | if (from < nr_irqs_gsi) | 2925 | if (from < nr_irqs_gsi) |
2993 | from = nr_irqs_gsi; | 2926 | from = nr_irqs_gsi; |
2994 | 2927 | ||
2995 | irq = alloc_irq_from(from, node); | 2928 | cfg = kzalloc_node(count * sizeof(cfg[0]), GFP_KERNEL, node); |
2996 | if (irq < 0) | 2929 | if (!cfg) |
2997 | return 0; | ||
2998 | cfg = alloc_irq_cfg(irq, node); | ||
2999 | if (!cfg) { | ||
3000 | free_irq_at(irq, NULL); | ||
3001 | return 0; | 2930 | return 0; |
2931 | |||
2932 | irq = alloc_irqs_from(from, count, node); | ||
2933 | if (irq < 0) | ||
2934 | goto out_cfgs; | ||
2935 | |||
2936 | for (i = 0; i < count; i++) { | ||
2937 | cfg[i] = alloc_irq_cfg(irq + i, node); | ||
2938 | if (!cfg[i]) | ||
2939 | goto out_irqs; | ||
3002 | } | 2940 | } |
3003 | 2941 | ||
3004 | raw_spin_lock_irqsave(&vector_lock, flags); | 2942 | raw_spin_lock_irqsave(&vector_lock, flags); |
3005 | if (!__assign_irq_vector(irq, cfg, apic->target_cpus())) | 2943 | for (i = 0; i < count; i++) |
3006 | ret = irq; | 2944 | if (__assign_irq_vector(irq + i, cfg[i], apic->target_cpus())) |
2945 | goto out_vecs; | ||
3007 | raw_spin_unlock_irqrestore(&vector_lock, flags); | 2946 | raw_spin_unlock_irqrestore(&vector_lock, flags); |
3008 | 2947 | ||
3009 | if (ret) { | 2948 | for (i = 0; i < count; i++) { |
3010 | irq_set_chip_data(irq, cfg); | 2949 | irq_set_chip_data(irq + i, cfg[i]); |
3011 | irq_clear_status_flags(irq, IRQ_NOREQUEST); | 2950 | irq_clear_status_flags(irq + i, IRQ_NOREQUEST); |
3012 | } else { | ||
3013 | free_irq_at(irq, cfg); | ||
3014 | } | 2951 | } |
3015 | return ret; | 2952 | |
2953 | kfree(cfg); | ||
2954 | return irq; | ||
2955 | |||
2956 | out_vecs: | ||
2957 | for (i--; i >= 0; i--) | ||
2958 | __clear_irq_vector(irq + i, cfg[i]); | ||
2959 | raw_spin_unlock_irqrestore(&vector_lock, flags); | ||
2960 | out_irqs: | ||
2961 | for (i = 0; i < count; i++) | ||
2962 | free_irq_at(irq + i, cfg[i]); | ||
2963 | out_cfgs: | ||
2964 | kfree(cfg); | ||
2965 | return 0; | ||
2966 | } | ||
2967 | |||
2968 | unsigned int create_irq_nr(unsigned int from, int node) | ||
2969 | { | ||
2970 | return __create_irqs(from, 1, node); | ||
3016 | } | 2971 | } |
3017 | 2972 | ||
3018 | int create_irq(void) | 2973 | int create_irq(void) |
@@ -3037,48 +2992,35 @@ void destroy_irq(unsigned int irq) | |||
3037 | 2992 | ||
3038 | irq_set_status_flags(irq, IRQ_NOREQUEST|IRQ_NOPROBE); | 2993 | irq_set_status_flags(irq, IRQ_NOREQUEST|IRQ_NOPROBE); |
3039 | 2994 | ||
3040 | if (irq_remapped(cfg)) | 2995 | free_remapped_irq(irq); |
3041 | free_remapped_irq(irq); | 2996 | |
3042 | raw_spin_lock_irqsave(&vector_lock, flags); | 2997 | raw_spin_lock_irqsave(&vector_lock, flags); |
3043 | __clear_irq_vector(irq, cfg); | 2998 | __clear_irq_vector(irq, cfg); |
3044 | raw_spin_unlock_irqrestore(&vector_lock, flags); | 2999 | raw_spin_unlock_irqrestore(&vector_lock, flags); |
3045 | free_irq_at(irq, cfg); | 3000 | free_irq_at(irq, cfg); |
3046 | } | 3001 | } |
3047 | 3002 | ||
3003 | void destroy_irqs(unsigned int irq, unsigned int count) | ||
3004 | { | ||
3005 | unsigned int i; | ||
3006 | |||
3007 | for (i = 0; i < count; i++) | ||
3008 | destroy_irq(irq + i); | ||
3009 | } | ||
3010 | |||
3048 | /* | 3011 | /* |
3049 | * MSI message composition | 3012 | * MSI message composition |
3050 | */ | 3013 | */ |
3051 | #ifdef CONFIG_PCI_MSI | 3014 | void native_compose_msi_msg(struct pci_dev *pdev, |
3052 | static int msi_compose_msg(struct pci_dev *pdev, unsigned int irq, | 3015 | unsigned int irq, unsigned int dest, |
3053 | struct msi_msg *msg, u8 hpet_id) | 3016 | struct msi_msg *msg, u8 hpet_id) |
3054 | { | 3017 | { |
3055 | struct irq_cfg *cfg; | 3018 | struct irq_cfg *cfg = irq_cfg(irq); |
3056 | int err; | ||
3057 | unsigned dest; | ||
3058 | |||
3059 | if (disable_apic) | ||
3060 | return -ENXIO; | ||
3061 | |||
3062 | cfg = irq_cfg(irq); | ||
3063 | err = assign_irq_vector(irq, cfg, apic->target_cpus()); | ||
3064 | if (err) | ||
3065 | return err; | ||
3066 | 3019 | ||
3067 | err = apic->cpu_mask_to_apicid_and(cfg->domain, | 3020 | msg->address_hi = MSI_ADDR_BASE_HI; |
3068 | apic->target_cpus(), &dest); | ||
3069 | if (err) | ||
3070 | return err; | ||
3071 | |||
3072 | if (irq_remapped(cfg)) { | ||
3073 | compose_remapped_msi_msg(pdev, irq, dest, msg, hpet_id); | ||
3074 | return err; | ||
3075 | } | ||
3076 | 3021 | ||
3077 | if (x2apic_enabled()) | 3022 | if (x2apic_enabled()) |
3078 | msg->address_hi = MSI_ADDR_BASE_HI | | 3023 | msg->address_hi |= MSI_ADDR_EXT_DEST_ID(dest); |
3079 | MSI_ADDR_EXT_DEST_ID(dest); | ||
3080 | else | ||
3081 | msg->address_hi = MSI_ADDR_BASE_HI; | ||
3082 | 3024 | ||
3083 | msg->address_lo = | 3025 | msg->address_lo = |
3084 | MSI_ADDR_BASE_LO | | 3026 | MSI_ADDR_BASE_LO | |
@@ -3097,8 +3039,32 @@ static int msi_compose_msg(struct pci_dev *pdev, unsigned int irq, | |||
3097 | MSI_DATA_DELIVERY_FIXED: | 3039 | MSI_DATA_DELIVERY_FIXED: |
3098 | MSI_DATA_DELIVERY_LOWPRI) | | 3040 | MSI_DATA_DELIVERY_LOWPRI) | |
3099 | MSI_DATA_VECTOR(cfg->vector); | 3041 | MSI_DATA_VECTOR(cfg->vector); |
3042 | } | ||
3100 | 3043 | ||
3101 | return err; | 3044 | #ifdef CONFIG_PCI_MSI |
3045 | static int msi_compose_msg(struct pci_dev *pdev, unsigned int irq, | ||
3046 | struct msi_msg *msg, u8 hpet_id) | ||
3047 | { | ||
3048 | struct irq_cfg *cfg; | ||
3049 | int err; | ||
3050 | unsigned dest; | ||
3051 | |||
3052 | if (disable_apic) | ||
3053 | return -ENXIO; | ||
3054 | |||
3055 | cfg = irq_cfg(irq); | ||
3056 | err = assign_irq_vector(irq, cfg, apic->target_cpus()); | ||
3057 | if (err) | ||
3058 | return err; | ||
3059 | |||
3060 | err = apic->cpu_mask_to_apicid_and(cfg->domain, | ||
3061 | apic->target_cpus(), &dest); | ||
3062 | if (err) | ||
3063 | return err; | ||
3064 | |||
3065 | x86_msi.compose_msi_msg(pdev, irq, dest, msg, hpet_id); | ||
3066 | |||
3067 | return 0; | ||
3102 | } | 3068 | } |
3103 | 3069 | ||
3104 | static int | 3070 | static int |
@@ -3136,23 +3102,28 @@ static struct irq_chip msi_chip = { | |||
3136 | .irq_retrigger = ioapic_retrigger_irq, | 3102 | .irq_retrigger = ioapic_retrigger_irq, |
3137 | }; | 3103 | }; |
3138 | 3104 | ||
3139 | static int setup_msi_irq(struct pci_dev *dev, struct msi_desc *msidesc, int irq) | 3105 | int setup_msi_irq(struct pci_dev *dev, struct msi_desc *msidesc, |
3106 | unsigned int irq_base, unsigned int irq_offset) | ||
3140 | { | 3107 | { |
3141 | struct irq_chip *chip = &msi_chip; | 3108 | struct irq_chip *chip = &msi_chip; |
3142 | struct msi_msg msg; | 3109 | struct msi_msg msg; |
3110 | unsigned int irq = irq_base + irq_offset; | ||
3143 | int ret; | 3111 | int ret; |
3144 | 3112 | ||
3145 | ret = msi_compose_msg(dev, irq, &msg, -1); | 3113 | ret = msi_compose_msg(dev, irq, &msg, -1); |
3146 | if (ret < 0) | 3114 | if (ret < 0) |
3147 | return ret; | 3115 | return ret; |
3148 | 3116 | ||
3149 | irq_set_msi_desc(irq, msidesc); | 3117 | irq_set_msi_desc_off(irq_base, irq_offset, msidesc); |
3150 | write_msi_msg(irq, &msg); | ||
3151 | 3118 | ||
3152 | if (irq_remapped(irq_get_chip_data(irq))) { | 3119 | /* |
3153 | irq_set_status_flags(irq, IRQ_MOVE_PCNTXT); | 3120 | * MSI-X message is written per-IRQ, the offset is always 0. |
3154 | irq_remap_modify_chip_defaults(chip); | 3121 | * MSI message denotes a contiguous group of IRQs, written for 0th IRQ. |
3155 | } | 3122 | */ |
3123 | if (!irq_offset) | ||
3124 | write_msi_msg(irq, &msg); | ||
3125 | |||
3126 | setup_remapped_irq(irq, irq_get_chip_data(irq), chip); | ||
3156 | 3127 | ||
3157 | irq_set_chip_and_handler_name(irq, chip, handle_edge_irq, "edge"); | 3128 | irq_set_chip_and_handler_name(irq, chip, handle_edge_irq, "edge"); |
3158 | 3129 | ||
@@ -3163,46 +3134,26 @@ static int setup_msi_irq(struct pci_dev *dev, struct msi_desc *msidesc, int irq) | |||
3163 | 3134 | ||
3164 | int native_setup_msi_irqs(struct pci_dev *dev, int nvec, int type) | 3135 | int native_setup_msi_irqs(struct pci_dev *dev, int nvec, int type) |
3165 | { | 3136 | { |
3166 | int node, ret, sub_handle, index = 0; | ||
3167 | unsigned int irq, irq_want; | 3137 | unsigned int irq, irq_want; |
3168 | struct msi_desc *msidesc; | 3138 | struct msi_desc *msidesc; |
3139 | int node, ret; | ||
3169 | 3140 | ||
3170 | /* x86 doesn't support multiple MSI yet */ | 3141 | /* Multiple MSI vectors only supported with interrupt remapping */ |
3171 | if (type == PCI_CAP_ID_MSI && nvec > 1) | 3142 | if (type == PCI_CAP_ID_MSI && nvec > 1) |
3172 | return 1; | 3143 | return 1; |
3173 | 3144 | ||
3174 | node = dev_to_node(&dev->dev); | 3145 | node = dev_to_node(&dev->dev); |
3175 | irq_want = nr_irqs_gsi; | 3146 | irq_want = nr_irqs_gsi; |
3176 | sub_handle = 0; | ||
3177 | list_for_each_entry(msidesc, &dev->msi_list, list) { | 3147 | list_for_each_entry(msidesc, &dev->msi_list, list) { |
3178 | irq = create_irq_nr(irq_want, node); | 3148 | irq = create_irq_nr(irq_want, node); |
3179 | if (irq == 0) | 3149 | if (irq == 0) |
3180 | return -1; | 3150 | return -ENOSPC; |
3151 | |||
3181 | irq_want = irq + 1; | 3152 | irq_want = irq + 1; |
3182 | if (!irq_remapping_enabled) | ||
3183 | goto no_ir; | ||
3184 | 3153 | ||
3185 | if (!sub_handle) { | 3154 | ret = setup_msi_irq(dev, msidesc, irq, 0); |
3186 | /* | ||
3187 | * allocate the consecutive block of IRTE's | ||
3188 | * for 'nvec' | ||
3189 | */ | ||
3190 | index = msi_alloc_remapped_irq(dev, irq, nvec); | ||
3191 | if (index < 0) { | ||
3192 | ret = index; | ||
3193 | goto error; | ||
3194 | } | ||
3195 | } else { | ||
3196 | ret = msi_setup_remapped_irq(dev, irq, index, | ||
3197 | sub_handle); | ||
3198 | if (ret < 0) | ||
3199 | goto error; | ||
3200 | } | ||
3201 | no_ir: | ||
3202 | ret = setup_msi_irq(dev, msidesc, irq); | ||
3203 | if (ret < 0) | 3155 | if (ret < 0) |
3204 | goto error; | 3156 | goto error; |
3205 | sub_handle++; | ||
3206 | } | 3157 | } |
3207 | return 0; | 3158 | return 0; |
3208 | 3159 | ||
@@ -3298,26 +3249,19 @@ static struct irq_chip hpet_msi_type = { | |||
3298 | .irq_retrigger = ioapic_retrigger_irq, | 3249 | .irq_retrigger = ioapic_retrigger_irq, |
3299 | }; | 3250 | }; |
3300 | 3251 | ||
3301 | int arch_setup_hpet_msi(unsigned int irq, unsigned int id) | 3252 | int default_setup_hpet_msi(unsigned int irq, unsigned int id) |
3302 | { | 3253 | { |
3303 | struct irq_chip *chip = &hpet_msi_type; | 3254 | struct irq_chip *chip = &hpet_msi_type; |
3304 | struct msi_msg msg; | 3255 | struct msi_msg msg; |
3305 | int ret; | 3256 | int ret; |
3306 | 3257 | ||
3307 | if (irq_remapping_enabled) { | ||
3308 | ret = setup_hpet_msi_remapped(irq, id); | ||
3309 | if (ret) | ||
3310 | return ret; | ||
3311 | } | ||
3312 | |||
3313 | ret = msi_compose_msg(NULL, irq, &msg, id); | 3258 | ret = msi_compose_msg(NULL, irq, &msg, id); |
3314 | if (ret < 0) | 3259 | if (ret < 0) |
3315 | return ret; | 3260 | return ret; |
3316 | 3261 | ||
3317 | hpet_msi_write(irq_get_handler_data(irq), &msg); | 3262 | hpet_msi_write(irq_get_handler_data(irq), &msg); |
3318 | irq_set_status_flags(irq, IRQ_MOVE_PCNTXT); | 3263 | irq_set_status_flags(irq, IRQ_MOVE_PCNTXT); |
3319 | if (irq_remapped(irq_get_chip_data(irq))) | 3264 | setup_remapped_irq(irq, irq_get_chip_data(irq), chip); |
3320 | irq_remap_modify_chip_defaults(chip); | ||
3321 | 3265 | ||
3322 | irq_set_chip_and_handler_name(irq, chip, handle_edge_irq, "edge"); | 3266 | irq_set_chip_and_handler_name(irq, chip, handle_edge_irq, "edge"); |
3323 | return 0; | 3267 | return 0; |
@@ -3683,10 +3627,7 @@ void __init setup_ioapic_dest(void) | |||
3683 | else | 3627 | else |
3684 | mask = apic->target_cpus(); | 3628 | mask = apic->target_cpus(); |
3685 | 3629 | ||
3686 | if (irq_remapping_enabled) | 3630 | x86_io_apic_ops.set_affinity(idata, mask, false); |
3687 | set_remapped_irq_affinity(idata, mask, false); | ||
3688 | else | ||
3689 | ioapic_set_affinity(idata, mask, false); | ||
3690 | } | 3631 | } |
3691 | 3632 | ||
3692 | } | 3633 | } |
diff --git a/arch/x86/kernel/apic/ipi.c b/arch/x86/kernel/apic/ipi.c index cce91bf26676..7434d8556d09 100644 --- a/arch/x86/kernel/apic/ipi.c +++ b/arch/x86/kernel/apic/ipi.c | |||
@@ -106,7 +106,7 @@ void default_send_IPI_mask_logical(const struct cpumask *cpumask, int vector) | |||
106 | unsigned long mask = cpumask_bits(cpumask)[0]; | 106 | unsigned long mask = cpumask_bits(cpumask)[0]; |
107 | unsigned long flags; | 107 | unsigned long flags; |
108 | 108 | ||
109 | if (WARN_ONCE(!mask, "empty IPI mask")) | 109 | if (!mask) |
110 | return; | 110 | return; |
111 | 111 | ||
112 | local_irq_save(flags); | 112 | local_irq_save(flags); |
diff --git a/arch/x86/kernel/apic/x2apic_uv_x.c b/arch/x86/kernel/apic/x2apic_uv_x.c index 8cfade9510a4..794f6eb54cd3 100644 --- a/arch/x86/kernel/apic/x2apic_uv_x.c +++ b/arch/x86/kernel/apic/x2apic_uv_x.c | |||
@@ -5,7 +5,7 @@ | |||
5 | * | 5 | * |
6 | * SGI UV APIC functions (note: not an Intel compatible APIC) | 6 | * SGI UV APIC functions (note: not an Intel compatible APIC) |
7 | * | 7 | * |
8 | * Copyright (C) 2007-2010 Silicon Graphics, Inc. All rights reserved. | 8 | * Copyright (C) 2007-2013 Silicon Graphics, Inc. All rights reserved. |
9 | */ | 9 | */ |
10 | #include <linux/cpumask.h> | 10 | #include <linux/cpumask.h> |
11 | #include <linux/hardirq.h> | 11 | #include <linux/hardirq.h> |
@@ -91,10 +91,16 @@ static int __init early_get_pnodeid(void) | |||
91 | m_n_config.v = uv_early_read_mmr(UVH_RH_GAM_CONFIG_MMR); | 91 | m_n_config.v = uv_early_read_mmr(UVH_RH_GAM_CONFIG_MMR); |
92 | uv_min_hub_revision_id = node_id.s.revision; | 92 | uv_min_hub_revision_id = node_id.s.revision; |
93 | 93 | ||
94 | if (node_id.s.part_number == UV2_HUB_PART_NUMBER) | 94 | switch (node_id.s.part_number) { |
95 | uv_min_hub_revision_id += UV2_HUB_REVISION_BASE - 1; | 95 | case UV2_HUB_PART_NUMBER: |
96 | if (node_id.s.part_number == UV2_HUB_PART_NUMBER_X) | 96 | case UV2_HUB_PART_NUMBER_X: |
97 | uv_min_hub_revision_id += UV2_HUB_REVISION_BASE - 1; | 97 | uv_min_hub_revision_id += UV2_HUB_REVISION_BASE - 1; |
98 | break; | ||
99 | case UV3_HUB_PART_NUMBER: | ||
100 | case UV3_HUB_PART_NUMBER_X: | ||
101 | uv_min_hub_revision_id += UV3_HUB_REVISION_BASE - 1; | ||
102 | break; | ||
103 | } | ||
98 | 104 | ||
99 | uv_hub_info->hub_revision = uv_min_hub_revision_id; | 105 | uv_hub_info->hub_revision = uv_min_hub_revision_id; |
100 | pnode = (node_id.s.node_id >> 1) & ((1 << m_n_config.s.n_skt) - 1); | 106 | pnode = (node_id.s.node_id >> 1) & ((1 << m_n_config.s.n_skt) - 1); |
@@ -130,13 +136,16 @@ static void __init uv_set_apicid_hibit(void) | |||
130 | 136 | ||
131 | static int __init uv_acpi_madt_oem_check(char *oem_id, char *oem_table_id) | 137 | static int __init uv_acpi_madt_oem_check(char *oem_id, char *oem_table_id) |
132 | { | 138 | { |
133 | int pnodeid, is_uv1, is_uv2; | 139 | int pnodeid, is_uv1, is_uv2, is_uv3; |
134 | 140 | ||
135 | is_uv1 = !strcmp(oem_id, "SGI"); | 141 | is_uv1 = !strcmp(oem_id, "SGI"); |
136 | is_uv2 = !strcmp(oem_id, "SGI2"); | 142 | is_uv2 = !strcmp(oem_id, "SGI2"); |
137 | if (is_uv1 || is_uv2) { | 143 | is_uv3 = !strncmp(oem_id, "SGI3", 4); /* there are varieties of UV3 */ |
144 | if (is_uv1 || is_uv2 || is_uv3) { | ||
138 | uv_hub_info->hub_revision = | 145 | uv_hub_info->hub_revision = |
139 | is_uv1 ? UV1_HUB_REVISION_BASE : UV2_HUB_REVISION_BASE; | 146 | (is_uv1 ? UV1_HUB_REVISION_BASE : |
147 | (is_uv2 ? UV2_HUB_REVISION_BASE : | ||
148 | UV3_HUB_REVISION_BASE)); | ||
140 | pnodeid = early_get_pnodeid(); | 149 | pnodeid = early_get_pnodeid(); |
141 | early_get_apic_pnode_shift(); | 150 | early_get_apic_pnode_shift(); |
142 | x86_platform.is_untracked_pat_range = uv_is_untracked_pat_range; | 151 | x86_platform.is_untracked_pat_range = uv_is_untracked_pat_range; |
@@ -450,14 +459,17 @@ static __init void map_high(char *id, unsigned long base, int pshift, | |||
450 | 459 | ||
451 | paddr = base << pshift; | 460 | paddr = base << pshift; |
452 | bytes = (1UL << bshift) * (max_pnode + 1); | 461 | bytes = (1UL << bshift) * (max_pnode + 1); |
453 | printk(KERN_INFO "UV: Map %s_HI 0x%lx - 0x%lx\n", id, paddr, | 462 | if (!paddr) { |
454 | paddr + bytes); | 463 | pr_info("UV: Map %s_HI base address NULL\n", id); |
464 | return; | ||
465 | } | ||
466 | pr_info("UV: Map %s_HI 0x%lx - 0x%lx\n", id, paddr, paddr + bytes); | ||
455 | if (map_type == map_uc) | 467 | if (map_type == map_uc) |
456 | init_extra_mapping_uc(paddr, bytes); | 468 | init_extra_mapping_uc(paddr, bytes); |
457 | else | 469 | else |
458 | init_extra_mapping_wb(paddr, bytes); | 470 | init_extra_mapping_wb(paddr, bytes); |
459 | |||
460 | } | 471 | } |
472 | |||
461 | static __init void map_gru_high(int max_pnode) | 473 | static __init void map_gru_high(int max_pnode) |
462 | { | 474 | { |
463 | union uvh_rh_gam_gru_overlay_config_mmr_u gru; | 475 | union uvh_rh_gam_gru_overlay_config_mmr_u gru; |
@@ -468,7 +480,8 @@ static __init void map_gru_high(int max_pnode) | |||
468 | map_high("GRU", gru.s.base, shift, shift, max_pnode, map_wb); | 480 | map_high("GRU", gru.s.base, shift, shift, max_pnode, map_wb); |
469 | gru_start_paddr = ((u64)gru.s.base << shift); | 481 | gru_start_paddr = ((u64)gru.s.base << shift); |
470 | gru_end_paddr = gru_start_paddr + (1UL << shift) * (max_pnode + 1); | 482 | gru_end_paddr = gru_start_paddr + (1UL << shift) * (max_pnode + 1); |
471 | 483 | } else { | |
484 | pr_info("UV: GRU disabled\n"); | ||
472 | } | 485 | } |
473 | } | 486 | } |
474 | 487 | ||
@@ -480,23 +493,146 @@ static __init void map_mmr_high(int max_pnode) | |||
480 | mmr.v = uv_read_local_mmr(UVH_RH_GAM_MMR_OVERLAY_CONFIG_MMR); | 493 | mmr.v = uv_read_local_mmr(UVH_RH_GAM_MMR_OVERLAY_CONFIG_MMR); |
481 | if (mmr.s.enable) | 494 | if (mmr.s.enable) |
482 | map_high("MMR", mmr.s.base, shift, shift, max_pnode, map_uc); | 495 | map_high("MMR", mmr.s.base, shift, shift, max_pnode, map_uc); |
496 | else | ||
497 | pr_info("UV: MMR disabled\n"); | ||
498 | } | ||
499 | |||
500 | /* | ||
501 | * This commonality works because both 0 & 1 versions of the MMIOH OVERLAY | ||
502 | * and REDIRECT MMR regs are exactly the same on UV3. | ||
503 | */ | ||
504 | struct mmioh_config { | ||
505 | unsigned long overlay; | ||
506 | unsigned long redirect; | ||
507 | char *id; | ||
508 | }; | ||
509 | |||
510 | static __initdata struct mmioh_config mmiohs[] = { | ||
511 | { | ||
512 | UV3H_RH_GAM_MMIOH_OVERLAY_CONFIG0_MMR, | ||
513 | UV3H_RH_GAM_MMIOH_REDIRECT_CONFIG0_MMR, | ||
514 | "MMIOH0" | ||
515 | }, | ||
516 | { | ||
517 | UV3H_RH_GAM_MMIOH_OVERLAY_CONFIG1_MMR, | ||
518 | UV3H_RH_GAM_MMIOH_REDIRECT_CONFIG1_MMR, | ||
519 | "MMIOH1" | ||
520 | }, | ||
521 | }; | ||
522 | |||
523 | static __init void map_mmioh_high_uv3(int index, int min_pnode, int max_pnode) | ||
524 | { | ||
525 | union uv3h_rh_gam_mmioh_overlay_config0_mmr_u overlay; | ||
526 | unsigned long mmr; | ||
527 | unsigned long base; | ||
528 | int i, n, shift, m_io, max_io; | ||
529 | int nasid, lnasid, fi, li; | ||
530 | char *id; | ||
531 | |||
532 | id = mmiohs[index].id; | ||
533 | overlay.v = uv_read_local_mmr(mmiohs[index].overlay); | ||
534 | pr_info("UV: %s overlay 0x%lx base:0x%x m_io:%d\n", | ||
535 | id, overlay.v, overlay.s3.base, overlay.s3.m_io); | ||
536 | if (!overlay.s3.enable) { | ||
537 | pr_info("UV: %s disabled\n", id); | ||
538 | return; | ||
539 | } | ||
540 | |||
541 | shift = UV3H_RH_GAM_MMIOH_OVERLAY_CONFIG0_MMR_BASE_SHFT; | ||
542 | base = (unsigned long)overlay.s3.base; | ||
543 | m_io = overlay.s3.m_io; | ||
544 | mmr = mmiohs[index].redirect; | ||
545 | n = UV3H_RH_GAM_MMIOH_REDIRECT_CONFIG0_MMR_DEPTH; | ||
546 | min_pnode *= 2; /* convert to NASID */ | ||
547 | max_pnode *= 2; | ||
548 | max_io = lnasid = fi = li = -1; | ||
549 | |||
550 | for (i = 0; i < n; i++) { | ||
551 | union uv3h_rh_gam_mmioh_redirect_config0_mmr_u redirect; | ||
552 | |||
553 | redirect.v = uv_read_local_mmr(mmr + i * 8); | ||
554 | nasid = redirect.s3.nasid; | ||
555 | if (nasid < min_pnode || max_pnode < nasid) | ||
556 | nasid = -1; /* invalid NASID */ | ||
557 | |||
558 | if (nasid == lnasid) { | ||
559 | li = i; | ||
560 | if (i != n-1) /* last entry check */ | ||
561 | continue; | ||
562 | } | ||
563 | |||
564 | /* check if we have a cached (or last) redirect to print */ | ||
565 | if (lnasid != -1 || (i == n-1 && nasid != -1)) { | ||
566 | unsigned long addr1, addr2; | ||
567 | int f, l; | ||
568 | |||
569 | if (lnasid == -1) { | ||
570 | f = l = i; | ||
571 | lnasid = nasid; | ||
572 | } else { | ||
573 | f = fi; | ||
574 | l = li; | ||
575 | } | ||
576 | addr1 = (base << shift) + | ||
577 | f * (unsigned long)(1 << m_io); | ||
578 | addr2 = (base << shift) + | ||
579 | (l + 1) * (unsigned long)(1 << m_io); | ||
580 | pr_info("UV: %s[%03d..%03d] NASID 0x%04x ADDR 0x%016lx - 0x%016lx\n", | ||
581 | id, fi, li, lnasid, addr1, addr2); | ||
582 | if (max_io < l) | ||
583 | max_io = l; | ||
584 | } | ||
585 | fi = li = i; | ||
586 | lnasid = nasid; | ||
587 | } | ||
588 | |||
589 | pr_info("UV: %s base:0x%lx shift:%d M_IO:%d MAX_IO:%d\n", | ||
590 | id, base, shift, m_io, max_io); | ||
591 | |||
592 | if (max_io >= 0) | ||
593 | map_high(id, base, shift, m_io, max_io, map_uc); | ||
483 | } | 594 | } |
484 | 595 | ||
485 | static __init void map_mmioh_high(int max_pnode) | 596 | static __init void map_mmioh_high(int min_pnode, int max_pnode) |
486 | { | 597 | { |
487 | union uvh_rh_gam_mmioh_overlay_config_mmr_u mmioh; | 598 | union uvh_rh_gam_mmioh_overlay_config_mmr_u mmioh; |
488 | int shift; | 599 | unsigned long mmr, base; |
600 | int shift, enable, m_io, n_io; | ||
489 | 601 | ||
490 | mmioh.v = uv_read_local_mmr(UVH_RH_GAM_MMIOH_OVERLAY_CONFIG_MMR); | 602 | if (is_uv3_hub()) { |
491 | if (is_uv1_hub() && mmioh.s1.enable) { | 603 | /* Map both MMIOH Regions */ |
492 | shift = UV1H_RH_GAM_MMIOH_OVERLAY_CONFIG_MMR_BASE_SHFT; | 604 | map_mmioh_high_uv3(0, min_pnode, max_pnode); |
493 | map_high("MMIOH", mmioh.s1.base, shift, mmioh.s1.m_io, | 605 | map_mmioh_high_uv3(1, min_pnode, max_pnode); |
494 | max_pnode, map_uc); | 606 | return; |
495 | } | 607 | } |
496 | if (is_uv2_hub() && mmioh.s2.enable) { | 608 | |
609 | if (is_uv1_hub()) { | ||
610 | mmr = UV1H_RH_GAM_MMIOH_OVERLAY_CONFIG_MMR; | ||
611 | shift = UV1H_RH_GAM_MMIOH_OVERLAY_CONFIG_MMR_BASE_SHFT; | ||
612 | mmioh.v = uv_read_local_mmr(mmr); | ||
613 | enable = !!mmioh.s1.enable; | ||
614 | base = mmioh.s1.base; | ||
615 | m_io = mmioh.s1.m_io; | ||
616 | n_io = mmioh.s1.n_io; | ||
617 | } else if (is_uv2_hub()) { | ||
618 | mmr = UV2H_RH_GAM_MMIOH_OVERLAY_CONFIG_MMR; | ||
497 | shift = UV2H_RH_GAM_MMIOH_OVERLAY_CONFIG_MMR_BASE_SHFT; | 619 | shift = UV2H_RH_GAM_MMIOH_OVERLAY_CONFIG_MMR_BASE_SHFT; |
498 | map_high("MMIOH", mmioh.s2.base, shift, mmioh.s2.m_io, | 620 | mmioh.v = uv_read_local_mmr(mmr); |
499 | max_pnode, map_uc); | 621 | enable = !!mmioh.s2.enable; |
622 | base = mmioh.s2.base; | ||
623 | m_io = mmioh.s2.m_io; | ||
624 | n_io = mmioh.s2.n_io; | ||
625 | } else | ||
626 | return; | ||
627 | |||
628 | if (enable) { | ||
629 | max_pnode &= (1 << n_io) - 1; | ||
630 | pr_info( | ||
631 | "UV: base:0x%lx shift:%d N_IO:%d M_IO:%d max_pnode:0x%x\n", | ||
632 | base, shift, m_io, n_io, max_pnode); | ||
633 | map_high("MMIOH", base, shift, m_io, max_pnode, map_uc); | ||
634 | } else { | ||
635 | pr_info("UV: MMIOH disabled\n"); | ||
500 | } | 636 | } |
501 | } | 637 | } |
502 | 638 | ||
@@ -724,42 +860,41 @@ void uv_nmi_init(void) | |||
724 | void __init uv_system_init(void) | 860 | void __init uv_system_init(void) |
725 | { | 861 | { |
726 | union uvh_rh_gam_config_mmr_u m_n_config; | 862 | union uvh_rh_gam_config_mmr_u m_n_config; |
727 | union uvh_rh_gam_mmioh_overlay_config_mmr_u mmioh; | ||
728 | union uvh_node_id_u node_id; | 863 | union uvh_node_id_u node_id; |
729 | unsigned long gnode_upper, lowmem_redir_base, lowmem_redir_size; | 864 | unsigned long gnode_upper, lowmem_redir_base, lowmem_redir_size; |
730 | int bytes, nid, cpu, lcpu, pnode, blade, i, j, m_val, n_val, n_io; | 865 | int bytes, nid, cpu, lcpu, pnode, blade, i, j, m_val, n_val; |
731 | int gnode_extra, max_pnode = 0; | 866 | int gnode_extra, min_pnode = 999999, max_pnode = -1; |
732 | unsigned long mmr_base, present, paddr; | 867 | unsigned long mmr_base, present, paddr; |
733 | unsigned short pnode_mask, pnode_io_mask; | 868 | unsigned short pnode_mask; |
869 | char *hub = (is_uv1_hub() ? "UV1" : | ||
870 | (is_uv2_hub() ? "UV2" : | ||
871 | "UV3")); | ||
734 | 872 | ||
735 | printk(KERN_INFO "UV: Found %s hub\n", is_uv1_hub() ? "UV1" : "UV2"); | 873 | pr_info("UV: Found %s hub\n", hub); |
736 | map_low_mmrs(); | 874 | map_low_mmrs(); |
737 | 875 | ||
738 | m_n_config.v = uv_read_local_mmr(UVH_RH_GAM_CONFIG_MMR ); | 876 | m_n_config.v = uv_read_local_mmr(UVH_RH_GAM_CONFIG_MMR ); |
739 | m_val = m_n_config.s.m_skt; | 877 | m_val = m_n_config.s.m_skt; |
740 | n_val = m_n_config.s.n_skt; | 878 | n_val = m_n_config.s.n_skt; |
741 | mmioh.v = uv_read_local_mmr(UVH_RH_GAM_MMIOH_OVERLAY_CONFIG_MMR); | 879 | pnode_mask = (1 << n_val) - 1; |
742 | n_io = is_uv1_hub() ? mmioh.s1.n_io : mmioh.s2.n_io; | ||
743 | mmr_base = | 880 | mmr_base = |
744 | uv_read_local_mmr(UVH_RH_GAM_MMR_OVERLAY_CONFIG_MMR) & | 881 | uv_read_local_mmr(UVH_RH_GAM_MMR_OVERLAY_CONFIG_MMR) & |
745 | ~UV_MMR_ENABLE; | 882 | ~UV_MMR_ENABLE; |
746 | pnode_mask = (1 << n_val) - 1; | ||
747 | pnode_io_mask = (1 << n_io) - 1; | ||
748 | 883 | ||
749 | node_id.v = uv_read_local_mmr(UVH_NODE_ID); | 884 | node_id.v = uv_read_local_mmr(UVH_NODE_ID); |
750 | gnode_extra = (node_id.s.node_id & ~((1 << n_val) - 1)) >> 1; | 885 | gnode_extra = (node_id.s.node_id & ~((1 << n_val) - 1)) >> 1; |
751 | gnode_upper = ((unsigned long)gnode_extra << m_val); | 886 | gnode_upper = ((unsigned long)gnode_extra << m_val); |
752 | printk(KERN_INFO "UV: N %d, M %d, N_IO: %d, gnode_upper 0x%lx, gnode_extra 0x%x, pnode_mask 0x%x, pnode_io_mask 0x%x\n", | 887 | pr_info("UV: N:%d M:%d pnode_mask:0x%x gnode_upper/extra:0x%lx/0x%x\n", |
753 | n_val, m_val, n_io, gnode_upper, gnode_extra, pnode_mask, pnode_io_mask); | 888 | n_val, m_val, pnode_mask, gnode_upper, gnode_extra); |
754 | 889 | ||
755 | printk(KERN_DEBUG "UV: global MMR base 0x%lx\n", mmr_base); | 890 | pr_info("UV: global MMR base 0x%lx\n", mmr_base); |
756 | 891 | ||
757 | for(i = 0; i < UVH_NODE_PRESENT_TABLE_DEPTH; i++) | 892 | for(i = 0; i < UVH_NODE_PRESENT_TABLE_DEPTH; i++) |
758 | uv_possible_blades += | 893 | uv_possible_blades += |
759 | hweight64(uv_read_local_mmr( UVH_NODE_PRESENT_TABLE + i * 8)); | 894 | hweight64(uv_read_local_mmr( UVH_NODE_PRESENT_TABLE + i * 8)); |
760 | 895 | ||
761 | /* uv_num_possible_blades() is really the hub count */ | 896 | /* uv_num_possible_blades() is really the hub count */ |
762 | printk(KERN_INFO "UV: Found %d blades, %d hubs\n", | 897 | pr_info("UV: Found %d blades, %d hubs\n", |
763 | is_uv1_hub() ? uv_num_possible_blades() : | 898 | is_uv1_hub() ? uv_num_possible_blades() : |
764 | (uv_num_possible_blades() + 1) / 2, | 899 | (uv_num_possible_blades() + 1) / 2, |
765 | uv_num_possible_blades()); | 900 | uv_num_possible_blades()); |
@@ -794,6 +929,7 @@ void __init uv_system_init(void) | |||
794 | uv_blade_info[blade].nr_possible_cpus = 0; | 929 | uv_blade_info[blade].nr_possible_cpus = 0; |
795 | uv_blade_info[blade].nr_online_cpus = 0; | 930 | uv_blade_info[blade].nr_online_cpus = 0; |
796 | spin_lock_init(&uv_blade_info[blade].nmi_lock); | 931 | spin_lock_init(&uv_blade_info[blade].nmi_lock); |
932 | min_pnode = min(pnode, min_pnode); | ||
797 | max_pnode = max(pnode, max_pnode); | 933 | max_pnode = max(pnode, max_pnode); |
798 | blade++; | 934 | blade++; |
799 | } | 935 | } |
@@ -856,7 +992,7 @@ void __init uv_system_init(void) | |||
856 | 992 | ||
857 | map_gru_high(max_pnode); | 993 | map_gru_high(max_pnode); |
858 | map_mmr_high(max_pnode); | 994 | map_mmr_high(max_pnode); |
859 | map_mmioh_high(max_pnode & pnode_io_mask); | 995 | map_mmioh_high(min_pnode, max_pnode); |
860 | 996 | ||
861 | uv_cpu_init(); | 997 | uv_cpu_init(); |
862 | uv_scir_register_cpu_notifier(); | 998 | uv_scir_register_cpu_notifier(); |
diff --git a/arch/x86/kernel/apm_32.c b/arch/x86/kernel/apm_32.c index d65464e43503..66b5faffe14a 100644 --- a/arch/x86/kernel/apm_32.c +++ b/arch/x86/kernel/apm_32.c | |||
@@ -232,6 +232,7 @@ | |||
232 | #include <linux/acpi.h> | 232 | #include <linux/acpi.h> |
233 | #include <linux/syscore_ops.h> | 233 | #include <linux/syscore_ops.h> |
234 | #include <linux/i8253.h> | 234 | #include <linux/i8253.h> |
235 | #include <linux/cpuidle.h> | ||
235 | 236 | ||
236 | #include <asm/uaccess.h> | 237 | #include <asm/uaccess.h> |
237 | #include <asm/desc.h> | 238 | #include <asm/desc.h> |
@@ -360,13 +361,35 @@ struct apm_user { | |||
360 | * idle percentage above which bios idle calls are done | 361 | * idle percentage above which bios idle calls are done |
361 | */ | 362 | */ |
362 | #ifdef CONFIG_APM_CPU_IDLE | 363 | #ifdef CONFIG_APM_CPU_IDLE |
363 | #warning deprecated CONFIG_APM_CPU_IDLE will be deleted in 2012 | ||
364 | #define DEFAULT_IDLE_THRESHOLD 95 | 364 | #define DEFAULT_IDLE_THRESHOLD 95 |
365 | #else | 365 | #else |
366 | #define DEFAULT_IDLE_THRESHOLD 100 | 366 | #define DEFAULT_IDLE_THRESHOLD 100 |
367 | #endif | 367 | #endif |
368 | #define DEFAULT_IDLE_PERIOD (100 / 3) | 368 | #define DEFAULT_IDLE_PERIOD (100 / 3) |
369 | 369 | ||
370 | static int apm_cpu_idle(struct cpuidle_device *dev, | ||
371 | struct cpuidle_driver *drv, int index); | ||
372 | |||
373 | static struct cpuidle_driver apm_idle_driver = { | ||
374 | .name = "apm_idle", | ||
375 | .owner = THIS_MODULE, | ||
376 | .en_core_tk_irqen = 1, | ||
377 | .states = { | ||
378 | { /* entry 0 is for polling */ }, | ||
379 | { /* entry 1 is for APM idle */ | ||
380 | .name = "APM", | ||
381 | .desc = "APM idle", | ||
382 | .flags = CPUIDLE_FLAG_TIME_VALID, | ||
383 | .exit_latency = 250, /* WAG */ | ||
384 | .target_residency = 500, /* WAG */ | ||
385 | .enter = &apm_cpu_idle | ||
386 | }, | ||
387 | }, | ||
388 | .state_count = 2, | ||
389 | }; | ||
390 | |||
391 | static struct cpuidle_device apm_cpuidle_device; | ||
392 | |||
370 | /* | 393 | /* |
371 | * Local variables | 394 | * Local variables |
372 | */ | 395 | */ |
@@ -377,7 +400,6 @@ static struct { | |||
377 | static int clock_slowed; | 400 | static int clock_slowed; |
378 | static int idle_threshold __read_mostly = DEFAULT_IDLE_THRESHOLD; | 401 | static int idle_threshold __read_mostly = DEFAULT_IDLE_THRESHOLD; |
379 | static int idle_period __read_mostly = DEFAULT_IDLE_PERIOD; | 402 | static int idle_period __read_mostly = DEFAULT_IDLE_PERIOD; |
380 | static int set_pm_idle; | ||
381 | static int suspends_pending; | 403 | static int suspends_pending; |
382 | static int standbys_pending; | 404 | static int standbys_pending; |
383 | static int ignore_sys_suspend; | 405 | static int ignore_sys_suspend; |
@@ -884,8 +906,6 @@ static void apm_do_busy(void) | |||
884 | #define IDLE_CALC_LIMIT (HZ * 100) | 906 | #define IDLE_CALC_LIMIT (HZ * 100) |
885 | #define IDLE_LEAKY_MAX 16 | 907 | #define IDLE_LEAKY_MAX 16 |
886 | 908 | ||
887 | static void (*original_pm_idle)(void) __read_mostly; | ||
888 | |||
889 | /** | 909 | /** |
890 | * apm_cpu_idle - cpu idling for APM capable Linux | 910 | * apm_cpu_idle - cpu idling for APM capable Linux |
891 | * | 911 | * |
@@ -894,35 +914,36 @@ static void (*original_pm_idle)(void) __read_mostly; | |||
894 | * Furthermore it calls the system default idle routine. | 914 | * Furthermore it calls the system default idle routine. |
895 | */ | 915 | */ |
896 | 916 | ||
897 | static void apm_cpu_idle(void) | 917 | static int apm_cpu_idle(struct cpuidle_device *dev, |
918 | struct cpuidle_driver *drv, int index) | ||
898 | { | 919 | { |
899 | static int use_apm_idle; /* = 0 */ | 920 | static int use_apm_idle; /* = 0 */ |
900 | static unsigned int last_jiffies; /* = 0 */ | 921 | static unsigned int last_jiffies; /* = 0 */ |
901 | static unsigned int last_stime; /* = 0 */ | 922 | static unsigned int last_stime; /* = 0 */ |
923 | cputime_t stime; | ||
902 | 924 | ||
903 | int apm_idle_done = 0; | 925 | int apm_idle_done = 0; |
904 | unsigned int jiffies_since_last_check = jiffies - last_jiffies; | 926 | unsigned int jiffies_since_last_check = jiffies - last_jiffies; |
905 | unsigned int bucket; | 927 | unsigned int bucket; |
906 | 928 | ||
907 | WARN_ONCE(1, "deprecated apm_cpu_idle will be deleted in 2012"); | ||
908 | recalc: | 929 | recalc: |
930 | task_cputime(current, NULL, &stime); | ||
909 | if (jiffies_since_last_check > IDLE_CALC_LIMIT) { | 931 | if (jiffies_since_last_check > IDLE_CALC_LIMIT) { |
910 | use_apm_idle = 0; | 932 | use_apm_idle = 0; |
911 | last_jiffies = jiffies; | ||
912 | last_stime = current->stime; | ||
913 | } else if (jiffies_since_last_check > idle_period) { | 933 | } else if (jiffies_since_last_check > idle_period) { |
914 | unsigned int idle_percentage; | 934 | unsigned int idle_percentage; |
915 | 935 | ||
916 | idle_percentage = current->stime - last_stime; | 936 | idle_percentage = stime - last_stime; |
917 | idle_percentage *= 100; | 937 | idle_percentage *= 100; |
918 | idle_percentage /= jiffies_since_last_check; | 938 | idle_percentage /= jiffies_since_last_check; |
919 | use_apm_idle = (idle_percentage > idle_threshold); | 939 | use_apm_idle = (idle_percentage > idle_threshold); |
920 | if (apm_info.forbid_idle) | 940 | if (apm_info.forbid_idle) |
921 | use_apm_idle = 0; | 941 | use_apm_idle = 0; |
922 | last_jiffies = jiffies; | ||
923 | last_stime = current->stime; | ||
924 | } | 942 | } |
925 | 943 | ||
944 | last_jiffies = jiffies; | ||
945 | last_stime = stime; | ||
946 | |||
926 | bucket = IDLE_LEAKY_MAX; | 947 | bucket = IDLE_LEAKY_MAX; |
927 | 948 | ||
928 | while (!need_resched()) { | 949 | while (!need_resched()) { |
@@ -950,10 +971,7 @@ recalc: | |||
950 | break; | 971 | break; |
951 | } | 972 | } |
952 | } | 973 | } |
953 | if (original_pm_idle) | 974 | default_idle(); |
954 | original_pm_idle(); | ||
955 | else | ||
956 | default_idle(); | ||
957 | local_irq_disable(); | 975 | local_irq_disable(); |
958 | jiffies_since_last_check = jiffies - last_jiffies; | 976 | jiffies_since_last_check = jiffies - last_jiffies; |
959 | if (jiffies_since_last_check > idle_period) | 977 | if (jiffies_since_last_check > idle_period) |
@@ -963,7 +981,7 @@ recalc: | |||
963 | if (apm_idle_done) | 981 | if (apm_idle_done) |
964 | apm_do_busy(); | 982 | apm_do_busy(); |
965 | 983 | ||
966 | local_irq_enable(); | 984 | return index; |
967 | } | 985 | } |
968 | 986 | ||
969 | /** | 987 | /** |
@@ -2381,9 +2399,9 @@ static int __init apm_init(void) | |||
2381 | if (HZ != 100) | 2399 | if (HZ != 100) |
2382 | idle_period = (idle_period * HZ) / 100; | 2400 | idle_period = (idle_period * HZ) / 100; |
2383 | if (idle_threshold < 100) { | 2401 | if (idle_threshold < 100) { |
2384 | original_pm_idle = pm_idle; | 2402 | if (!cpuidle_register_driver(&apm_idle_driver)) |
2385 | pm_idle = apm_cpu_idle; | 2403 | if (cpuidle_register_device(&apm_cpuidle_device)) |
2386 | set_pm_idle = 1; | 2404 | cpuidle_unregister_driver(&apm_idle_driver); |
2387 | } | 2405 | } |
2388 | 2406 | ||
2389 | return 0; | 2407 | return 0; |
@@ -2393,15 +2411,9 @@ static void __exit apm_exit(void) | |||
2393 | { | 2411 | { |
2394 | int error; | 2412 | int error; |
2395 | 2413 | ||
2396 | if (set_pm_idle) { | 2414 | cpuidle_unregister_device(&apm_cpuidle_device); |
2397 | pm_idle = original_pm_idle; | 2415 | cpuidle_unregister_driver(&apm_idle_driver); |
2398 | /* | 2416 | |
2399 | * We are about to unload the current idle thread pm callback | ||
2400 | * (pm_idle), Wait for all processors to update cached/local | ||
2401 | * copies of pm_idle before proceeding. | ||
2402 | */ | ||
2403 | kick_all_cpus_sync(); | ||
2404 | } | ||
2405 | if (((apm_info.bios.flags & APM_BIOS_DISENGAGED) == 0) | 2417 | if (((apm_info.bios.flags & APM_BIOS_DISENGAGED) == 0) |
2406 | && (apm_info.connection_version > 0x0100)) { | 2418 | && (apm_info.connection_version > 0x0100)) { |
2407 | error = apm_engage_power_management(APM_DEVICE_ALL, 0); | 2419 | error = apm_engage_power_management(APM_DEVICE_ALL, 0); |
diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c index 15239fffd6fe..782c456eaa01 100644 --- a/arch/x86/kernel/cpu/amd.c +++ b/arch/x86/kernel/cpu/amd.c | |||
@@ -364,9 +364,9 @@ static void __cpuinit amd_detect_cmp(struct cpuinfo_x86 *c) | |||
364 | #endif | 364 | #endif |
365 | } | 365 | } |
366 | 366 | ||
367 | int amd_get_nb_id(int cpu) | 367 | u16 amd_get_nb_id(int cpu) |
368 | { | 368 | { |
369 | int id = 0; | 369 | u16 id = 0; |
370 | #ifdef CONFIG_SMP | 370 | #ifdef CONFIG_SMP |
371 | id = per_cpu(cpu_llc_id, cpu); | 371 | id = per_cpu(cpu_llc_id, cpu); |
372 | #endif | 372 | #endif |
diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c index 92dfec986a48..af6455e3fcc9 100644 --- a/arch/x86/kernel/cpu/bugs.c +++ b/arch/x86/kernel/cpu/bugs.c | |||
@@ -17,15 +17,6 @@ | |||
17 | #include <asm/paravirt.h> | 17 | #include <asm/paravirt.h> |
18 | #include <asm/alternative.h> | 18 | #include <asm/alternative.h> |
19 | 19 | ||
20 | static int __init no_halt(char *s) | ||
21 | { | ||
22 | WARN_ONCE(1, "\"no-hlt\" is deprecated, please use \"idle=poll\"\n"); | ||
23 | boot_cpu_data.hlt_works_ok = 0; | ||
24 | return 1; | ||
25 | } | ||
26 | |||
27 | __setup("no-hlt", no_halt); | ||
28 | |||
29 | static int __init no_387(char *s) | 20 | static int __init no_387(char *s) |
30 | { | 21 | { |
31 | boot_cpu_data.hard_math = 0; | 22 | boot_cpu_data.hard_math = 0; |
@@ -89,23 +80,6 @@ static void __init check_fpu(void) | |||
89 | pr_warn("Hmm, FPU with FDIV bug\n"); | 80 | pr_warn("Hmm, FPU with FDIV bug\n"); |
90 | } | 81 | } |
91 | 82 | ||
92 | static void __init check_hlt(void) | ||
93 | { | ||
94 | if (boot_cpu_data.x86 >= 5 || paravirt_enabled()) | ||
95 | return; | ||
96 | |||
97 | pr_info("Checking 'hlt' instruction... "); | ||
98 | if (!boot_cpu_data.hlt_works_ok) { | ||
99 | pr_cont("disabled\n"); | ||
100 | return; | ||
101 | } | ||
102 | halt(); | ||
103 | halt(); | ||
104 | halt(); | ||
105 | halt(); | ||
106 | pr_cont("OK\n"); | ||
107 | } | ||
108 | |||
109 | /* | 83 | /* |
110 | * Check whether we are able to run this kernel safely on SMP. | 84 | * Check whether we are able to run this kernel safely on SMP. |
111 | * | 85 | * |
@@ -129,7 +103,6 @@ void __init check_bugs(void) | |||
129 | print_cpu_info(&boot_cpu_data); | 103 | print_cpu_info(&boot_cpu_data); |
130 | #endif | 104 | #endif |
131 | check_config(); | 105 | check_config(); |
132 | check_hlt(); | ||
133 | init_utsname()->machine[1] = | 106 | init_utsname()->machine[1] = |
134 | '0' + (boot_cpu_data.x86 > 6 ? 6 : boot_cpu_data.x86); | 107 | '0' + (boot_cpu_data.x86 > 6 ? 6 : boot_cpu_data.x86); |
135 | alternative_instructions(); | 108 | alternative_instructions(); |
diff --git a/arch/x86/kernel/cpu/hypervisor.c b/arch/x86/kernel/cpu/hypervisor.c index a8f8fa9769d6..1e7e84a02eba 100644 --- a/arch/x86/kernel/cpu/hypervisor.c +++ b/arch/x86/kernel/cpu/hypervisor.c | |||
@@ -79,3 +79,10 @@ void __init init_hypervisor_platform(void) | |||
79 | if (x86_hyper->init_platform) | 79 | if (x86_hyper->init_platform) |
80 | x86_hyper->init_platform(); | 80 | x86_hyper->init_platform(); |
81 | } | 81 | } |
82 | |||
83 | bool __init hypervisor_x2apic_available(void) | ||
84 | { | ||
85 | return x86_hyper && | ||
86 | x86_hyper->x2apic_available && | ||
87 | x86_hyper->x2apic_available(); | ||
88 | } | ||
diff --git a/arch/x86/kernel/cpu/intel_cacheinfo.c b/arch/x86/kernel/cpu/intel_cacheinfo.c index 84c1309c4c0c..7c6f7d548c0f 100644 --- a/arch/x86/kernel/cpu/intel_cacheinfo.c +++ b/arch/x86/kernel/cpu/intel_cacheinfo.c | |||
@@ -1226,7 +1226,7 @@ static struct notifier_block __cpuinitdata cacheinfo_cpu_notifier = { | |||
1226 | .notifier_call = cacheinfo_cpu_callback, | 1226 | .notifier_call = cacheinfo_cpu_callback, |
1227 | }; | 1227 | }; |
1228 | 1228 | ||
1229 | static int __cpuinit cache_sysfs_init(void) | 1229 | static int __init cache_sysfs_init(void) |
1230 | { | 1230 | { |
1231 | int i; | 1231 | int i; |
1232 | 1232 | ||
diff --git a/arch/x86/kernel/cpu/mcheck/mce.c b/arch/x86/kernel/cpu/mcheck/mce.c index 80dbda84f1c3..fc7608a89d93 100644 --- a/arch/x86/kernel/cpu/mcheck/mce.c +++ b/arch/x86/kernel/cpu/mcheck/mce.c | |||
@@ -512,11 +512,8 @@ int mce_available(struct cpuinfo_x86 *c) | |||
512 | 512 | ||
513 | static void mce_schedule_work(void) | 513 | static void mce_schedule_work(void) |
514 | { | 514 | { |
515 | if (!mce_ring_empty()) { | 515 | if (!mce_ring_empty()) |
516 | struct work_struct *work = &__get_cpu_var(mce_work); | 516 | schedule_work(&__get_cpu_var(mce_work)); |
517 | if (!work_pending(work)) | ||
518 | schedule_work(work); | ||
519 | } | ||
520 | } | 517 | } |
521 | 518 | ||
522 | DEFINE_PER_CPU(struct irq_work, mce_irq_work); | 519 | DEFINE_PER_CPU(struct irq_work, mce_irq_work); |
@@ -1351,12 +1348,7 @@ int mce_notify_irq(void) | |||
1351 | /* wake processes polling /dev/mcelog */ | 1348 | /* wake processes polling /dev/mcelog */ |
1352 | wake_up_interruptible(&mce_chrdev_wait); | 1349 | wake_up_interruptible(&mce_chrdev_wait); |
1353 | 1350 | ||
1354 | /* | 1351 | if (mce_helper[0]) |
1355 | * There is no risk of missing notifications because | ||
1356 | * work_pending is always cleared before the function is | ||
1357 | * executed. | ||
1358 | */ | ||
1359 | if (mce_helper[0] && !work_pending(&mce_trigger_work)) | ||
1360 | schedule_work(&mce_trigger_work); | 1352 | schedule_work(&mce_trigger_work); |
1361 | 1353 | ||
1362 | if (__ratelimit(&ratelimit)) | 1354 | if (__ratelimit(&ratelimit)) |
diff --git a/arch/x86/kernel/cpu/mshyperv.c b/arch/x86/kernel/cpu/mshyperv.c index 0a630dd4b620..a7d26d83fb70 100644 --- a/arch/x86/kernel/cpu/mshyperv.c +++ b/arch/x86/kernel/cpu/mshyperv.c | |||
@@ -14,10 +14,15 @@ | |||
14 | #include <linux/time.h> | 14 | #include <linux/time.h> |
15 | #include <linux/clocksource.h> | 15 | #include <linux/clocksource.h> |
16 | #include <linux/module.h> | 16 | #include <linux/module.h> |
17 | #include <linux/hardirq.h> | ||
18 | #include <linux/interrupt.h> | ||
17 | #include <asm/processor.h> | 19 | #include <asm/processor.h> |
18 | #include <asm/hypervisor.h> | 20 | #include <asm/hypervisor.h> |
19 | #include <asm/hyperv.h> | 21 | #include <asm/hyperv.h> |
20 | #include <asm/mshyperv.h> | 22 | #include <asm/mshyperv.h> |
23 | #include <asm/desc.h> | ||
24 | #include <asm/idle.h> | ||
25 | #include <asm/irq_regs.h> | ||
21 | 26 | ||
22 | struct ms_hyperv_info ms_hyperv; | 27 | struct ms_hyperv_info ms_hyperv; |
23 | EXPORT_SYMBOL_GPL(ms_hyperv); | 28 | EXPORT_SYMBOL_GPL(ms_hyperv); |
@@ -30,6 +35,13 @@ static bool __init ms_hyperv_platform(void) | |||
30 | if (!boot_cpu_has(X86_FEATURE_HYPERVISOR)) | 35 | if (!boot_cpu_has(X86_FEATURE_HYPERVISOR)) |
31 | return false; | 36 | return false; |
32 | 37 | ||
38 | /* | ||
39 | * Xen emulates Hyper-V to support enlightened Windows. | ||
40 | * Check to see first if we are on a Xen Hypervisor. | ||
41 | */ | ||
42 | if (xen_cpuid_base()) | ||
43 | return false; | ||
44 | |||
33 | cpuid(HYPERV_CPUID_VENDOR_AND_MAX_FUNCTIONS, | 45 | cpuid(HYPERV_CPUID_VENDOR_AND_MAX_FUNCTIONS, |
34 | &eax, &hyp_signature[0], &hyp_signature[1], &hyp_signature[2]); | 46 | &eax, &hyp_signature[0], &hyp_signature[1], &hyp_signature[2]); |
35 | 47 | ||
@@ -68,7 +80,14 @@ static void __init ms_hyperv_init_platform(void) | |||
68 | printk(KERN_INFO "HyperV: features 0x%x, hints 0x%x\n", | 80 | printk(KERN_INFO "HyperV: features 0x%x, hints 0x%x\n", |
69 | ms_hyperv.features, ms_hyperv.hints); | 81 | ms_hyperv.features, ms_hyperv.hints); |
70 | 82 | ||
71 | clocksource_register_hz(&hyperv_cs, NSEC_PER_SEC/100); | 83 | if (ms_hyperv.features & HV_X64_MSR_TIME_REF_COUNT_AVAILABLE) |
84 | clocksource_register_hz(&hyperv_cs, NSEC_PER_SEC/100); | ||
85 | #if IS_ENABLED(CONFIG_HYPERV) | ||
86 | /* | ||
87 | * Setup the IDT for hypervisor callback. | ||
88 | */ | ||
89 | alloc_intr_gate(HYPERVISOR_CALLBACK_VECTOR, hyperv_callback_vector); | ||
90 | #endif | ||
72 | } | 91 | } |
73 | 92 | ||
74 | const __refconst struct hypervisor_x86 x86_hyper_ms_hyperv = { | 93 | const __refconst struct hypervisor_x86 x86_hyper_ms_hyperv = { |
@@ -77,3 +96,36 @@ const __refconst struct hypervisor_x86 x86_hyper_ms_hyperv = { | |||
77 | .init_platform = ms_hyperv_init_platform, | 96 | .init_platform = ms_hyperv_init_platform, |
78 | }; | 97 | }; |
79 | EXPORT_SYMBOL(x86_hyper_ms_hyperv); | 98 | EXPORT_SYMBOL(x86_hyper_ms_hyperv); |
99 | |||
100 | #if IS_ENABLED(CONFIG_HYPERV) | ||
101 | static int vmbus_irq = -1; | ||
102 | static irq_handler_t vmbus_isr; | ||
103 | |||
104 | void hv_register_vmbus_handler(int irq, irq_handler_t handler) | ||
105 | { | ||
106 | vmbus_irq = irq; | ||
107 | vmbus_isr = handler; | ||
108 | } | ||
109 | |||
110 | void hyperv_vector_handler(struct pt_regs *regs) | ||
111 | { | ||
112 | struct pt_regs *old_regs = set_irq_regs(regs); | ||
113 | struct irq_desc *desc; | ||
114 | |||
115 | irq_enter(); | ||
116 | exit_idle(); | ||
117 | |||
118 | desc = irq_to_desc(vmbus_irq); | ||
119 | |||
120 | if (desc) | ||
121 | generic_handle_irq_desc(vmbus_irq, desc); | ||
122 | |||
123 | irq_exit(); | ||
124 | set_irq_regs(old_regs); | ||
125 | } | ||
126 | #else | ||
127 | void hv_register_vmbus_handler(int irq, irq_handler_t handler) | ||
128 | { | ||
129 | } | ||
130 | #endif | ||
131 | EXPORT_SYMBOL_GPL(hv_register_vmbus_handler); | ||
diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c index 6774c17a5576..bf0f01aea994 100644 --- a/arch/x86/kernel/cpu/perf_event.c +++ b/arch/x86/kernel/cpu/perf_event.c | |||
@@ -829,7 +829,7 @@ static inline void x86_assign_hw_event(struct perf_event *event, | |||
829 | } else { | 829 | } else { |
830 | hwc->config_base = x86_pmu_config_addr(hwc->idx); | 830 | hwc->config_base = x86_pmu_config_addr(hwc->idx); |
831 | hwc->event_base = x86_pmu_event_addr(hwc->idx); | 831 | hwc->event_base = x86_pmu_event_addr(hwc->idx); |
832 | hwc->event_base_rdpmc = hwc->idx; | 832 | hwc->event_base_rdpmc = x86_pmu_rdpmc_index(hwc->idx); |
833 | } | 833 | } |
834 | } | 834 | } |
835 | 835 | ||
@@ -1310,11 +1310,6 @@ static struct attribute_group x86_pmu_format_group = { | |||
1310 | .attrs = NULL, | 1310 | .attrs = NULL, |
1311 | }; | 1311 | }; |
1312 | 1312 | ||
1313 | struct perf_pmu_events_attr { | ||
1314 | struct device_attribute attr; | ||
1315 | u64 id; | ||
1316 | }; | ||
1317 | |||
1318 | /* | 1313 | /* |
1319 | * Remove all undefined events (x86_pmu.event_map(id) == 0) | 1314 | * Remove all undefined events (x86_pmu.event_map(id) == 0) |
1320 | * out of events_attr attributes. | 1315 | * out of events_attr attributes. |
@@ -1348,11 +1343,9 @@ static ssize_t events_sysfs_show(struct device *dev, struct device_attribute *at | |||
1348 | #define EVENT_VAR(_id) event_attr_##_id | 1343 | #define EVENT_VAR(_id) event_attr_##_id |
1349 | #define EVENT_PTR(_id) &event_attr_##_id.attr.attr | 1344 | #define EVENT_PTR(_id) &event_attr_##_id.attr.attr |
1350 | 1345 | ||
1351 | #define EVENT_ATTR(_name, _id) \ | 1346 | #define EVENT_ATTR(_name, _id) \ |
1352 | static struct perf_pmu_events_attr EVENT_VAR(_id) = { \ | 1347 | PMU_EVENT_ATTR(_name, EVENT_VAR(_id), PERF_COUNT_HW_##_id, \ |
1353 | .attr = __ATTR(_name, 0444, events_sysfs_show, NULL), \ | 1348 | events_sysfs_show) |
1354 | .id = PERF_COUNT_HW_##_id, \ | ||
1355 | }; | ||
1356 | 1349 | ||
1357 | EVENT_ATTR(cpu-cycles, CPU_CYCLES ); | 1350 | EVENT_ATTR(cpu-cycles, CPU_CYCLES ); |
1358 | EVENT_ATTR(instructions, INSTRUCTIONS ); | 1351 | EVENT_ATTR(instructions, INSTRUCTIONS ); |
diff --git a/arch/x86/kernel/cpu/perf_event.h b/arch/x86/kernel/cpu/perf_event.h index 115c1ea97746..7f5c75c2afdd 100644 --- a/arch/x86/kernel/cpu/perf_event.h +++ b/arch/x86/kernel/cpu/perf_event.h | |||
@@ -325,6 +325,8 @@ struct x86_pmu { | |||
325 | int (*schedule_events)(struct cpu_hw_events *cpuc, int n, int *assign); | 325 | int (*schedule_events)(struct cpu_hw_events *cpuc, int n, int *assign); |
326 | unsigned eventsel; | 326 | unsigned eventsel; |
327 | unsigned perfctr; | 327 | unsigned perfctr; |
328 | int (*addr_offset)(int index, bool eventsel); | ||
329 | int (*rdpmc_index)(int index); | ||
328 | u64 (*event_map)(int); | 330 | u64 (*event_map)(int); |
329 | int max_events; | 331 | int max_events; |
330 | int num_counters; | 332 | int num_counters; |
@@ -446,28 +448,21 @@ extern u64 __read_mostly hw_cache_extra_regs | |||
446 | 448 | ||
447 | u64 x86_perf_event_update(struct perf_event *event); | 449 | u64 x86_perf_event_update(struct perf_event *event); |
448 | 450 | ||
449 | static inline int x86_pmu_addr_offset(int index) | 451 | static inline unsigned int x86_pmu_config_addr(int index) |
450 | { | 452 | { |
451 | int offset; | 453 | return x86_pmu.eventsel + (x86_pmu.addr_offset ? |
452 | 454 | x86_pmu.addr_offset(index, true) : index); | |
453 | /* offset = X86_FEATURE_PERFCTR_CORE ? index << 1 : index */ | ||
454 | alternative_io(ASM_NOP2, | ||
455 | "shll $1, %%eax", | ||
456 | X86_FEATURE_PERFCTR_CORE, | ||
457 | "=a" (offset), | ||
458 | "a" (index)); | ||
459 | |||
460 | return offset; | ||
461 | } | 455 | } |
462 | 456 | ||
463 | static inline unsigned int x86_pmu_config_addr(int index) | 457 | static inline unsigned int x86_pmu_event_addr(int index) |
464 | { | 458 | { |
465 | return x86_pmu.eventsel + x86_pmu_addr_offset(index); | 459 | return x86_pmu.perfctr + (x86_pmu.addr_offset ? |
460 | x86_pmu.addr_offset(index, false) : index); | ||
466 | } | 461 | } |
467 | 462 | ||
468 | static inline unsigned int x86_pmu_event_addr(int index) | 463 | static inline int x86_pmu_rdpmc_index(int index) |
469 | { | 464 | { |
470 | return x86_pmu.perfctr + x86_pmu_addr_offset(index); | 465 | return x86_pmu.rdpmc_index ? x86_pmu.rdpmc_index(index) : index; |
471 | } | 466 | } |
472 | 467 | ||
473 | int x86_setup_perfctr(struct perf_event *event); | 468 | int x86_setup_perfctr(struct perf_event *event); |
diff --git a/arch/x86/kernel/cpu/perf_event_amd.c b/arch/x86/kernel/cpu/perf_event_amd.c index c93bc4e813a0..dfdab42aed27 100644 --- a/arch/x86/kernel/cpu/perf_event_amd.c +++ b/arch/x86/kernel/cpu/perf_event_amd.c | |||
@@ -132,21 +132,102 @@ static u64 amd_pmu_event_map(int hw_event) | |||
132 | return amd_perfmon_event_map[hw_event]; | 132 | return amd_perfmon_event_map[hw_event]; |
133 | } | 133 | } |
134 | 134 | ||
135 | static int amd_pmu_hw_config(struct perf_event *event) | 135 | static struct event_constraint *amd_nb_event_constraint; |
136 | |||
137 | /* | ||
138 | * Previously calculated offsets | ||
139 | */ | ||
140 | static unsigned int event_offsets[X86_PMC_IDX_MAX] __read_mostly; | ||
141 | static unsigned int count_offsets[X86_PMC_IDX_MAX] __read_mostly; | ||
142 | static unsigned int rdpmc_indexes[X86_PMC_IDX_MAX] __read_mostly; | ||
143 | |||
144 | /* | ||
145 | * Legacy CPUs: | ||
146 | * 4 counters starting at 0xc0010000 each offset by 1 | ||
147 | * | ||
148 | * CPUs with core performance counter extensions: | ||
149 | * 6 counters starting at 0xc0010200 each offset by 2 | ||
150 | * | ||
151 | * CPUs with north bridge performance counter extensions: | ||
152 | * 4 additional counters starting at 0xc0010240 each offset by 2 | ||
153 | * (indexed right above either one of the above core counters) | ||
154 | */ | ||
155 | static inline int amd_pmu_addr_offset(int index, bool eventsel) | ||
136 | { | 156 | { |
137 | int ret; | 157 | int offset, first, base; |
138 | 158 | ||
139 | /* pass precise event sampling to ibs: */ | 159 | if (!index) |
140 | if (event->attr.precise_ip && get_ibs_caps()) | 160 | return index; |
141 | return -ENOENT; | 161 | |
162 | if (eventsel) | ||
163 | offset = event_offsets[index]; | ||
164 | else | ||
165 | offset = count_offsets[index]; | ||
166 | |||
167 | if (offset) | ||
168 | return offset; | ||
169 | |||
170 | if (amd_nb_event_constraint && | ||
171 | test_bit(index, amd_nb_event_constraint->idxmsk)) { | ||
172 | /* | ||
173 | * calculate the offset of NB counters with respect to | ||
174 | * base eventsel or perfctr | ||
175 | */ | ||
176 | |||
177 | first = find_first_bit(amd_nb_event_constraint->idxmsk, | ||
178 | X86_PMC_IDX_MAX); | ||
179 | |||
180 | if (eventsel) | ||
181 | base = MSR_F15H_NB_PERF_CTL - x86_pmu.eventsel; | ||
182 | else | ||
183 | base = MSR_F15H_NB_PERF_CTR - x86_pmu.perfctr; | ||
184 | |||
185 | offset = base + ((index - first) << 1); | ||
186 | } else if (!cpu_has_perfctr_core) | ||
187 | offset = index; | ||
188 | else | ||
189 | offset = index << 1; | ||
190 | |||
191 | if (eventsel) | ||
192 | event_offsets[index] = offset; | ||
193 | else | ||
194 | count_offsets[index] = offset; | ||
195 | |||
196 | return offset; | ||
197 | } | ||
198 | |||
199 | static inline int amd_pmu_rdpmc_index(int index) | ||
200 | { | ||
201 | int ret, first; | ||
202 | |||
203 | if (!index) | ||
204 | return index; | ||
205 | |||
206 | ret = rdpmc_indexes[index]; | ||
142 | 207 | ||
143 | ret = x86_pmu_hw_config(event); | ||
144 | if (ret) | 208 | if (ret) |
145 | return ret; | 209 | return ret; |
146 | 210 | ||
147 | if (has_branch_stack(event)) | 211 | if (amd_nb_event_constraint && |
148 | return -EOPNOTSUPP; | 212 | test_bit(index, amd_nb_event_constraint->idxmsk)) { |
213 | /* | ||
214 | * according to the mnual, ECX value of the NB counters is | ||
215 | * the index of the NB counter (0, 1, 2 or 3) plus 6 | ||
216 | */ | ||
217 | |||
218 | first = find_first_bit(amd_nb_event_constraint->idxmsk, | ||
219 | X86_PMC_IDX_MAX); | ||
220 | ret = index - first + 6; | ||
221 | } else | ||
222 | ret = index; | ||
223 | |||
224 | rdpmc_indexes[index] = ret; | ||
225 | |||
226 | return ret; | ||
227 | } | ||
149 | 228 | ||
229 | static int amd_core_hw_config(struct perf_event *event) | ||
230 | { | ||
150 | if (event->attr.exclude_host && event->attr.exclude_guest) | 231 | if (event->attr.exclude_host && event->attr.exclude_guest) |
151 | /* | 232 | /* |
152 | * When HO == GO == 1 the hardware treats that as GO == HO == 0 | 233 | * When HO == GO == 1 the hardware treats that as GO == HO == 0 |
@@ -156,14 +237,37 @@ static int amd_pmu_hw_config(struct perf_event *event) | |||
156 | event->hw.config &= ~(ARCH_PERFMON_EVENTSEL_USR | | 237 | event->hw.config &= ~(ARCH_PERFMON_EVENTSEL_USR | |
157 | ARCH_PERFMON_EVENTSEL_OS); | 238 | ARCH_PERFMON_EVENTSEL_OS); |
158 | else if (event->attr.exclude_host) | 239 | else if (event->attr.exclude_host) |
159 | event->hw.config |= AMD_PERFMON_EVENTSEL_GUESTONLY; | 240 | event->hw.config |= AMD64_EVENTSEL_GUESTONLY; |
160 | else if (event->attr.exclude_guest) | 241 | else if (event->attr.exclude_guest) |
161 | event->hw.config |= AMD_PERFMON_EVENTSEL_HOSTONLY; | 242 | event->hw.config |= AMD64_EVENTSEL_HOSTONLY; |
243 | |||
244 | return 0; | ||
245 | } | ||
246 | |||
247 | /* | ||
248 | * NB counters do not support the following event select bits: | ||
249 | * Host/Guest only | ||
250 | * Counter mask | ||
251 | * Invert counter mask | ||
252 | * Edge detect | ||
253 | * OS/User mode | ||
254 | */ | ||
255 | static int amd_nb_hw_config(struct perf_event *event) | ||
256 | { | ||
257 | /* for NB, we only allow system wide counting mode */ | ||
258 | if (is_sampling_event(event) || event->attach_state & PERF_ATTACH_TASK) | ||
259 | return -EINVAL; | ||
260 | |||
261 | if (event->attr.exclude_user || event->attr.exclude_kernel || | ||
262 | event->attr.exclude_host || event->attr.exclude_guest) | ||
263 | return -EINVAL; | ||
162 | 264 | ||
163 | if (event->attr.type != PERF_TYPE_RAW) | 265 | event->hw.config &= ~(ARCH_PERFMON_EVENTSEL_USR | |
164 | return 0; | 266 | ARCH_PERFMON_EVENTSEL_OS); |
165 | 267 | ||
166 | event->hw.config |= event->attr.config & AMD64_RAW_EVENT_MASK; | 268 | if (event->hw.config & ~(AMD64_RAW_EVENT_MASK_NB | |
269 | ARCH_PERFMON_EVENTSEL_INT)) | ||
270 | return -EINVAL; | ||
167 | 271 | ||
168 | return 0; | 272 | return 0; |
169 | } | 273 | } |
@@ -181,6 +285,11 @@ static inline int amd_is_nb_event(struct hw_perf_event *hwc) | |||
181 | return (hwc->config & 0xe0) == 0xe0; | 285 | return (hwc->config & 0xe0) == 0xe0; |
182 | } | 286 | } |
183 | 287 | ||
288 | static inline int amd_is_perfctr_nb_event(struct hw_perf_event *hwc) | ||
289 | { | ||
290 | return amd_nb_event_constraint && amd_is_nb_event(hwc); | ||
291 | } | ||
292 | |||
184 | static inline int amd_has_nb(struct cpu_hw_events *cpuc) | 293 | static inline int amd_has_nb(struct cpu_hw_events *cpuc) |
185 | { | 294 | { |
186 | struct amd_nb *nb = cpuc->amd_nb; | 295 | struct amd_nb *nb = cpuc->amd_nb; |
@@ -188,20 +297,37 @@ static inline int amd_has_nb(struct cpu_hw_events *cpuc) | |||
188 | return nb && nb->nb_id != -1; | 297 | return nb && nb->nb_id != -1; |
189 | } | 298 | } |
190 | 299 | ||
191 | static void amd_put_event_constraints(struct cpu_hw_events *cpuc, | 300 | static int amd_pmu_hw_config(struct perf_event *event) |
192 | struct perf_event *event) | 301 | { |
302 | int ret; | ||
303 | |||
304 | /* pass precise event sampling to ibs: */ | ||
305 | if (event->attr.precise_ip && get_ibs_caps()) | ||
306 | return -ENOENT; | ||
307 | |||
308 | if (has_branch_stack(event)) | ||
309 | return -EOPNOTSUPP; | ||
310 | |||
311 | ret = x86_pmu_hw_config(event); | ||
312 | if (ret) | ||
313 | return ret; | ||
314 | |||
315 | if (event->attr.type == PERF_TYPE_RAW) | ||
316 | event->hw.config |= event->attr.config & AMD64_RAW_EVENT_MASK; | ||
317 | |||
318 | if (amd_is_perfctr_nb_event(&event->hw)) | ||
319 | return amd_nb_hw_config(event); | ||
320 | |||
321 | return amd_core_hw_config(event); | ||
322 | } | ||
323 | |||
324 | static void __amd_put_nb_event_constraints(struct cpu_hw_events *cpuc, | ||
325 | struct perf_event *event) | ||
193 | { | 326 | { |
194 | struct hw_perf_event *hwc = &event->hw; | ||
195 | struct amd_nb *nb = cpuc->amd_nb; | 327 | struct amd_nb *nb = cpuc->amd_nb; |
196 | int i; | 328 | int i; |
197 | 329 | ||
198 | /* | 330 | /* |
199 | * only care about NB events | ||
200 | */ | ||
201 | if (!(amd_has_nb(cpuc) && amd_is_nb_event(hwc))) | ||
202 | return; | ||
203 | |||
204 | /* | ||
205 | * need to scan whole list because event may not have | 331 | * need to scan whole list because event may not have |
206 | * been assigned during scheduling | 332 | * been assigned during scheduling |
207 | * | 333 | * |
@@ -215,6 +341,19 @@ static void amd_put_event_constraints(struct cpu_hw_events *cpuc, | |||
215 | } | 341 | } |
216 | } | 342 | } |
217 | 343 | ||
344 | static void amd_nb_interrupt_hw_config(struct hw_perf_event *hwc) | ||
345 | { | ||
346 | int core_id = cpu_data(smp_processor_id()).cpu_core_id; | ||
347 | |||
348 | /* deliver interrupts only to this core */ | ||
349 | if (hwc->config & ARCH_PERFMON_EVENTSEL_INT) { | ||
350 | hwc->config |= AMD64_EVENTSEL_INT_CORE_ENABLE; | ||
351 | hwc->config &= ~AMD64_EVENTSEL_INT_CORE_SEL_MASK; | ||
352 | hwc->config |= (u64)(core_id) << | ||
353 | AMD64_EVENTSEL_INT_CORE_SEL_SHIFT; | ||
354 | } | ||
355 | } | ||
356 | |||
218 | /* | 357 | /* |
219 | * AMD64 NorthBridge events need special treatment because | 358 | * AMD64 NorthBridge events need special treatment because |
220 | * counter access needs to be synchronized across all cores | 359 | * counter access needs to be synchronized across all cores |
@@ -247,24 +386,24 @@ static void amd_put_event_constraints(struct cpu_hw_events *cpuc, | |||
247 | * | 386 | * |
248 | * Given that resources are allocated (cmpxchg), they must be | 387 | * Given that resources are allocated (cmpxchg), they must be |
249 | * eventually freed for others to use. This is accomplished by | 388 | * eventually freed for others to use. This is accomplished by |
250 | * calling amd_put_event_constraints(). | 389 | * calling __amd_put_nb_event_constraints() |
251 | * | 390 | * |
252 | * Non NB events are not impacted by this restriction. | 391 | * Non NB events are not impacted by this restriction. |
253 | */ | 392 | */ |
254 | static struct event_constraint * | 393 | static struct event_constraint * |
255 | amd_get_event_constraints(struct cpu_hw_events *cpuc, struct perf_event *event) | 394 | __amd_get_nb_event_constraints(struct cpu_hw_events *cpuc, struct perf_event *event, |
395 | struct event_constraint *c) | ||
256 | { | 396 | { |
257 | struct hw_perf_event *hwc = &event->hw; | 397 | struct hw_perf_event *hwc = &event->hw; |
258 | struct amd_nb *nb = cpuc->amd_nb; | 398 | struct amd_nb *nb = cpuc->amd_nb; |
259 | struct perf_event *old = NULL; | 399 | struct perf_event *old; |
260 | int max = x86_pmu.num_counters; | 400 | int idx, new = -1; |
261 | int i, j, k = -1; | ||
262 | 401 | ||
263 | /* | 402 | if (!c) |
264 | * if not NB event or no NB, then no constraints | 403 | c = &unconstrained; |
265 | */ | 404 | |
266 | if (!(amd_has_nb(cpuc) && amd_is_nb_event(hwc))) | 405 | if (cpuc->is_fake) |
267 | return &unconstrained; | 406 | return c; |
268 | 407 | ||
269 | /* | 408 | /* |
270 | * detect if already present, if so reuse | 409 | * detect if already present, if so reuse |
@@ -276,48 +415,36 @@ amd_get_event_constraints(struct cpu_hw_events *cpuc, struct perf_event *event) | |||
276 | * because of successive calls to x86_schedule_events() from | 415 | * because of successive calls to x86_schedule_events() from |
277 | * hw_perf_group_sched_in() without hw_perf_enable() | 416 | * hw_perf_group_sched_in() without hw_perf_enable() |
278 | */ | 417 | */ |
279 | for (i = 0; i < max; i++) { | 418 | for_each_set_bit(idx, c->idxmsk, x86_pmu.num_counters) { |
280 | /* | 419 | if (new == -1 || hwc->idx == idx) |
281 | * keep track of first free slot | 420 | /* assign free slot, prefer hwc->idx */ |
282 | */ | 421 | old = cmpxchg(nb->owners + idx, NULL, event); |
283 | if (k == -1 && !nb->owners[i]) | 422 | else if (nb->owners[idx] == event) |
284 | k = i; | 423 | /* event already present */ |
424 | old = event; | ||
425 | else | ||
426 | continue; | ||
427 | |||
428 | if (old && old != event) | ||
429 | continue; | ||
430 | |||
431 | /* reassign to this slot */ | ||
432 | if (new != -1) | ||
433 | cmpxchg(nb->owners + new, event, NULL); | ||
434 | new = idx; | ||
285 | 435 | ||
286 | /* already present, reuse */ | 436 | /* already present, reuse */ |
287 | if (nb->owners[i] == event) | 437 | if (old == event) |
288 | goto done; | ||
289 | } | ||
290 | /* | ||
291 | * not present, so grab a new slot | ||
292 | * starting either at: | ||
293 | */ | ||
294 | if (hwc->idx != -1) { | ||
295 | /* previous assignment */ | ||
296 | i = hwc->idx; | ||
297 | } else if (k != -1) { | ||
298 | /* start from free slot found */ | ||
299 | i = k; | ||
300 | } else { | ||
301 | /* | ||
302 | * event not found, no slot found in | ||
303 | * first pass, try again from the | ||
304 | * beginning | ||
305 | */ | ||
306 | i = 0; | ||
307 | } | ||
308 | j = i; | ||
309 | do { | ||
310 | old = cmpxchg(nb->owners+i, NULL, event); | ||
311 | if (!old) | ||
312 | break; | 438 | break; |
313 | if (++i == max) | 439 | } |
314 | i = 0; | 440 | |
315 | } while (i != j); | 441 | if (new == -1) |
316 | done: | 442 | return &emptyconstraint; |
317 | if (!old) | 443 | |
318 | return &nb->event_constraints[i]; | 444 | if (amd_is_perfctr_nb_event(hwc)) |
319 | 445 | amd_nb_interrupt_hw_config(hwc); | |
320 | return &emptyconstraint; | 446 | |
447 | return &nb->event_constraints[new]; | ||
321 | } | 448 | } |
322 | 449 | ||
323 | static struct amd_nb *amd_alloc_nb(int cpu) | 450 | static struct amd_nb *amd_alloc_nb(int cpu) |
@@ -364,7 +491,7 @@ static void amd_pmu_cpu_starting(int cpu) | |||
364 | struct amd_nb *nb; | 491 | struct amd_nb *nb; |
365 | int i, nb_id; | 492 | int i, nb_id; |
366 | 493 | ||
367 | cpuc->perf_ctr_virt_mask = AMD_PERFMON_EVENTSEL_HOSTONLY; | 494 | cpuc->perf_ctr_virt_mask = AMD64_EVENTSEL_HOSTONLY; |
368 | 495 | ||
369 | if (boot_cpu_data.x86_max_cores < 2) | 496 | if (boot_cpu_data.x86_max_cores < 2) |
370 | return; | 497 | return; |
@@ -407,6 +534,26 @@ static void amd_pmu_cpu_dead(int cpu) | |||
407 | } | 534 | } |
408 | } | 535 | } |
409 | 536 | ||
537 | static struct event_constraint * | ||
538 | amd_get_event_constraints(struct cpu_hw_events *cpuc, struct perf_event *event) | ||
539 | { | ||
540 | /* | ||
541 | * if not NB event or no NB, then no constraints | ||
542 | */ | ||
543 | if (!(amd_has_nb(cpuc) && amd_is_nb_event(&event->hw))) | ||
544 | return &unconstrained; | ||
545 | |||
546 | return __amd_get_nb_event_constraints(cpuc, event, | ||
547 | amd_nb_event_constraint); | ||
548 | } | ||
549 | |||
550 | static void amd_put_event_constraints(struct cpu_hw_events *cpuc, | ||
551 | struct perf_event *event) | ||
552 | { | ||
553 | if (amd_has_nb(cpuc) && amd_is_nb_event(&event->hw)) | ||
554 | __amd_put_nb_event_constraints(cpuc, event); | ||
555 | } | ||
556 | |||
410 | PMU_FORMAT_ATTR(event, "config:0-7,32-35"); | 557 | PMU_FORMAT_ATTR(event, "config:0-7,32-35"); |
411 | PMU_FORMAT_ATTR(umask, "config:8-15" ); | 558 | PMU_FORMAT_ATTR(umask, "config:8-15" ); |
412 | PMU_FORMAT_ATTR(edge, "config:18" ); | 559 | PMU_FORMAT_ATTR(edge, "config:18" ); |
@@ -496,6 +643,9 @@ static struct event_constraint amd_f15_PMC30 = EVENT_CONSTRAINT_OVERLAP(0, 0x09, | |||
496 | static struct event_constraint amd_f15_PMC50 = EVENT_CONSTRAINT(0, 0x3F, 0); | 643 | static struct event_constraint amd_f15_PMC50 = EVENT_CONSTRAINT(0, 0x3F, 0); |
497 | static struct event_constraint amd_f15_PMC53 = EVENT_CONSTRAINT(0, 0x38, 0); | 644 | static struct event_constraint amd_f15_PMC53 = EVENT_CONSTRAINT(0, 0x38, 0); |
498 | 645 | ||
646 | static struct event_constraint amd_NBPMC96 = EVENT_CONSTRAINT(0, 0x3C0, 0); | ||
647 | static struct event_constraint amd_NBPMC74 = EVENT_CONSTRAINT(0, 0xF0, 0); | ||
648 | |||
499 | static struct event_constraint * | 649 | static struct event_constraint * |
500 | amd_get_event_constraints_f15h(struct cpu_hw_events *cpuc, struct perf_event *event) | 650 | amd_get_event_constraints_f15h(struct cpu_hw_events *cpuc, struct perf_event *event) |
501 | { | 651 | { |
@@ -561,8 +711,8 @@ amd_get_event_constraints_f15h(struct cpu_hw_events *cpuc, struct perf_event *ev | |||
561 | return &amd_f15_PMC20; | 711 | return &amd_f15_PMC20; |
562 | } | 712 | } |
563 | case AMD_EVENT_NB: | 713 | case AMD_EVENT_NB: |
564 | /* not yet implemented */ | 714 | return __amd_get_nb_event_constraints(cpuc, event, |
565 | return &emptyconstraint; | 715 | amd_nb_event_constraint); |
566 | default: | 716 | default: |
567 | return &emptyconstraint; | 717 | return &emptyconstraint; |
568 | } | 718 | } |
@@ -587,6 +737,8 @@ static __initconst const struct x86_pmu amd_pmu = { | |||
587 | .schedule_events = x86_schedule_events, | 737 | .schedule_events = x86_schedule_events, |
588 | .eventsel = MSR_K7_EVNTSEL0, | 738 | .eventsel = MSR_K7_EVNTSEL0, |
589 | .perfctr = MSR_K7_PERFCTR0, | 739 | .perfctr = MSR_K7_PERFCTR0, |
740 | .addr_offset = amd_pmu_addr_offset, | ||
741 | .rdpmc_index = amd_pmu_rdpmc_index, | ||
590 | .event_map = amd_pmu_event_map, | 742 | .event_map = amd_pmu_event_map, |
591 | .max_events = ARRAY_SIZE(amd_perfmon_event_map), | 743 | .max_events = ARRAY_SIZE(amd_perfmon_event_map), |
592 | .num_counters = AMD64_NUM_COUNTERS, | 744 | .num_counters = AMD64_NUM_COUNTERS, |
@@ -608,7 +760,7 @@ static __initconst const struct x86_pmu amd_pmu = { | |||
608 | 760 | ||
609 | static int setup_event_constraints(void) | 761 | static int setup_event_constraints(void) |
610 | { | 762 | { |
611 | if (boot_cpu_data.x86 >= 0x15) | 763 | if (boot_cpu_data.x86 == 0x15) |
612 | x86_pmu.get_event_constraints = amd_get_event_constraints_f15h; | 764 | x86_pmu.get_event_constraints = amd_get_event_constraints_f15h; |
613 | return 0; | 765 | return 0; |
614 | } | 766 | } |
@@ -638,6 +790,23 @@ static int setup_perfctr_core(void) | |||
638 | return 0; | 790 | return 0; |
639 | } | 791 | } |
640 | 792 | ||
793 | static int setup_perfctr_nb(void) | ||
794 | { | ||
795 | if (!cpu_has_perfctr_nb) | ||
796 | return -ENODEV; | ||
797 | |||
798 | x86_pmu.num_counters += AMD64_NUM_COUNTERS_NB; | ||
799 | |||
800 | if (cpu_has_perfctr_core) | ||
801 | amd_nb_event_constraint = &amd_NBPMC96; | ||
802 | else | ||
803 | amd_nb_event_constraint = &amd_NBPMC74; | ||
804 | |||
805 | printk(KERN_INFO "perf: AMD northbridge performance counters detected\n"); | ||
806 | |||
807 | return 0; | ||
808 | } | ||
809 | |||
641 | __init int amd_pmu_init(void) | 810 | __init int amd_pmu_init(void) |
642 | { | 811 | { |
643 | /* Performance-monitoring supported from K7 and later: */ | 812 | /* Performance-monitoring supported from K7 and later: */ |
@@ -648,6 +817,7 @@ __init int amd_pmu_init(void) | |||
648 | 817 | ||
649 | setup_event_constraints(); | 818 | setup_event_constraints(); |
650 | setup_perfctr_core(); | 819 | setup_perfctr_core(); |
820 | setup_perfctr_nb(); | ||
651 | 821 | ||
652 | /* Events are common for all AMDs */ | 822 | /* Events are common for all AMDs */ |
653 | memcpy(hw_cache_event_ids, amd_hw_cache_event_ids, | 823 | memcpy(hw_cache_event_ids, amd_hw_cache_event_ids, |
@@ -678,7 +848,7 @@ void amd_pmu_disable_virt(void) | |||
678 | * SVM is disabled the Guest-only bits still gets set and the counter | 848 | * SVM is disabled the Guest-only bits still gets set and the counter |
679 | * will not count anything. | 849 | * will not count anything. |
680 | */ | 850 | */ |
681 | cpuc->perf_ctr_virt_mask = AMD_PERFMON_EVENTSEL_HOSTONLY; | 851 | cpuc->perf_ctr_virt_mask = AMD64_EVENTSEL_HOSTONLY; |
682 | 852 | ||
683 | /* Reload all events */ | 853 | /* Reload all events */ |
684 | x86_pmu_disable_all(); | 854 | x86_pmu_disable_all(); |
diff --git a/arch/x86/kernel/cpu/proc.c b/arch/x86/kernel/cpu/proc.c index 3286a92e662a..e280253f6f94 100644 --- a/arch/x86/kernel/cpu/proc.c +++ b/arch/x86/kernel/cpu/proc.c | |||
@@ -28,7 +28,6 @@ static void show_cpuinfo_misc(struct seq_file *m, struct cpuinfo_x86 *c) | |||
28 | { | 28 | { |
29 | seq_printf(m, | 29 | seq_printf(m, |
30 | "fdiv_bug\t: %s\n" | 30 | "fdiv_bug\t: %s\n" |
31 | "hlt_bug\t\t: %s\n" | ||
32 | "f00f_bug\t: %s\n" | 31 | "f00f_bug\t: %s\n" |
33 | "coma_bug\t: %s\n" | 32 | "coma_bug\t: %s\n" |
34 | "fpu\t\t: %s\n" | 33 | "fpu\t\t: %s\n" |
@@ -36,7 +35,6 @@ static void show_cpuinfo_misc(struct seq_file *m, struct cpuinfo_x86 *c) | |||
36 | "cpuid level\t: %d\n" | 35 | "cpuid level\t: %d\n" |
37 | "wp\t\t: %s\n", | 36 | "wp\t\t: %s\n", |
38 | c->fdiv_bug ? "yes" : "no", | 37 | c->fdiv_bug ? "yes" : "no", |
39 | c->hlt_works_ok ? "no" : "yes", | ||
40 | c->f00f_bug ? "yes" : "no", | 38 | c->f00f_bug ? "yes" : "no", |
41 | c->coma_bug ? "yes" : "no", | 39 | c->coma_bug ? "yes" : "no", |
42 | c->hard_math ? "yes" : "no", | 40 | c->hard_math ? "yes" : "no", |
diff --git a/arch/x86/kernel/cpu/vmware.c b/arch/x86/kernel/cpu/vmware.c index d22d0c4edcfd..03a36321ec54 100644 --- a/arch/x86/kernel/cpu/vmware.c +++ b/arch/x86/kernel/cpu/vmware.c | |||
@@ -33,6 +33,9 @@ | |||
33 | 33 | ||
34 | #define VMWARE_PORT_CMD_GETVERSION 10 | 34 | #define VMWARE_PORT_CMD_GETVERSION 10 |
35 | #define VMWARE_PORT_CMD_GETHZ 45 | 35 | #define VMWARE_PORT_CMD_GETHZ 45 |
36 | #define VMWARE_PORT_CMD_GETVCPU_INFO 68 | ||
37 | #define VMWARE_PORT_CMD_LEGACY_X2APIC 3 | ||
38 | #define VMWARE_PORT_CMD_VCPU_RESERVED 31 | ||
36 | 39 | ||
37 | #define VMWARE_PORT(cmd, eax, ebx, ecx, edx) \ | 40 | #define VMWARE_PORT(cmd, eax, ebx, ecx, edx) \ |
38 | __asm__("inl (%%dx)" : \ | 41 | __asm__("inl (%%dx)" : \ |
@@ -125,10 +128,20 @@ static void __cpuinit vmware_set_cpu_features(struct cpuinfo_x86 *c) | |||
125 | set_cpu_cap(c, X86_FEATURE_TSC_RELIABLE); | 128 | set_cpu_cap(c, X86_FEATURE_TSC_RELIABLE); |
126 | } | 129 | } |
127 | 130 | ||
131 | /* Checks if hypervisor supports x2apic without VT-D interrupt remapping. */ | ||
132 | static bool __init vmware_legacy_x2apic_available(void) | ||
133 | { | ||
134 | uint32_t eax, ebx, ecx, edx; | ||
135 | VMWARE_PORT(GETVCPU_INFO, eax, ebx, ecx, edx); | ||
136 | return (eax & (1 << VMWARE_PORT_CMD_VCPU_RESERVED)) == 0 && | ||
137 | (eax & (1 << VMWARE_PORT_CMD_LEGACY_X2APIC)) != 0; | ||
138 | } | ||
139 | |||
128 | const __refconst struct hypervisor_x86 x86_hyper_vmware = { | 140 | const __refconst struct hypervisor_x86 x86_hyper_vmware = { |
129 | .name = "VMware", | 141 | .name = "VMware", |
130 | .detect = vmware_platform, | 142 | .detect = vmware_platform, |
131 | .set_cpu_features = vmware_set_cpu_features, | 143 | .set_cpu_features = vmware_set_cpu_features, |
132 | .init_platform = vmware_platform_setup, | 144 | .init_platform = vmware_platform_setup, |
145 | .x2apic_available = vmware_legacy_x2apic_available, | ||
133 | }; | 146 | }; |
134 | EXPORT_SYMBOL(x86_hyper_vmware); | 147 | EXPORT_SYMBOL(x86_hyper_vmware); |
diff --git a/arch/x86/kernel/entry_32.S b/arch/x86/kernel/entry_32.S index 6ed91d9980e2..8831176aa5ef 100644 --- a/arch/x86/kernel/entry_32.S +++ b/arch/x86/kernel/entry_32.S | |||
@@ -1091,11 +1091,18 @@ ENTRY(xen_failsafe_callback) | |||
1091 | _ASM_EXTABLE(4b,9b) | 1091 | _ASM_EXTABLE(4b,9b) |
1092 | ENDPROC(xen_failsafe_callback) | 1092 | ENDPROC(xen_failsafe_callback) |
1093 | 1093 | ||
1094 | BUILD_INTERRUPT3(xen_hvm_callback_vector, XEN_HVM_EVTCHN_CALLBACK, | 1094 | BUILD_INTERRUPT3(xen_hvm_callback_vector, HYPERVISOR_CALLBACK_VECTOR, |
1095 | xen_evtchn_do_upcall) | 1095 | xen_evtchn_do_upcall) |
1096 | 1096 | ||
1097 | #endif /* CONFIG_XEN */ | 1097 | #endif /* CONFIG_XEN */ |
1098 | 1098 | ||
1099 | #if IS_ENABLED(CONFIG_HYPERV) | ||
1100 | |||
1101 | BUILD_INTERRUPT3(hyperv_callback_vector, HYPERVISOR_CALLBACK_VECTOR, | ||
1102 | hyperv_vector_handler) | ||
1103 | |||
1104 | #endif /* CONFIG_HYPERV */ | ||
1105 | |||
1099 | #ifdef CONFIG_FUNCTION_TRACER | 1106 | #ifdef CONFIG_FUNCTION_TRACER |
1100 | #ifdef CONFIG_DYNAMIC_FTRACE | 1107 | #ifdef CONFIG_DYNAMIC_FTRACE |
1101 | 1108 | ||
diff --git a/arch/x86/kernel/entry_64.S b/arch/x86/kernel/entry_64.S index cb3c591339aa..048f2240f8e6 100644 --- a/arch/x86/kernel/entry_64.S +++ b/arch/x86/kernel/entry_64.S | |||
@@ -1454,11 +1454,16 @@ ENTRY(xen_failsafe_callback) | |||
1454 | CFI_ENDPROC | 1454 | CFI_ENDPROC |
1455 | END(xen_failsafe_callback) | 1455 | END(xen_failsafe_callback) |
1456 | 1456 | ||
1457 | apicinterrupt XEN_HVM_EVTCHN_CALLBACK \ | 1457 | apicinterrupt HYPERVISOR_CALLBACK_VECTOR \ |
1458 | xen_hvm_callback_vector xen_evtchn_do_upcall | 1458 | xen_hvm_callback_vector xen_evtchn_do_upcall |
1459 | 1459 | ||
1460 | #endif /* CONFIG_XEN */ | 1460 | #endif /* CONFIG_XEN */ |
1461 | 1461 | ||
1462 | #if IS_ENABLED(CONFIG_HYPERV) | ||
1463 | apicinterrupt HYPERVISOR_CALLBACK_VECTOR \ | ||
1464 | hyperv_callback_vector hyperv_vector_handler | ||
1465 | #endif /* CONFIG_HYPERV */ | ||
1466 | |||
1462 | /* | 1467 | /* |
1463 | * Some functions should be protected against kprobes | 1468 | * Some functions should be protected against kprobes |
1464 | */ | 1469 | */ |
diff --git a/arch/x86/kernel/head32.c b/arch/x86/kernel/head32.c index c18f59d10101..6773c918b8cc 100644 --- a/arch/x86/kernel/head32.c +++ b/arch/x86/kernel/head32.c | |||
@@ -18,6 +18,7 @@ | |||
18 | #include <asm/io_apic.h> | 18 | #include <asm/io_apic.h> |
19 | #include <asm/bios_ebda.h> | 19 | #include <asm/bios_ebda.h> |
20 | #include <asm/tlbflush.h> | 20 | #include <asm/tlbflush.h> |
21 | #include <asm/bootparam_utils.h> | ||
21 | 22 | ||
22 | static void __init i386_default_early_setup(void) | 23 | static void __init i386_default_early_setup(void) |
23 | { | 24 | { |
@@ -30,6 +31,8 @@ static void __init i386_default_early_setup(void) | |||
30 | 31 | ||
31 | void __init i386_start_kernel(void) | 32 | void __init i386_start_kernel(void) |
32 | { | 33 | { |
34 | sanitize_boot_params(&boot_params); | ||
35 | |||
33 | memblock_reserve(__pa_symbol(&_text), | 36 | memblock_reserve(__pa_symbol(&_text), |
34 | __pa_symbol(&__bss_stop) - __pa_symbol(&_text)); | 37 | __pa_symbol(&__bss_stop) - __pa_symbol(&_text)); |
35 | 38 | ||
diff --git a/arch/x86/kernel/head64.c b/arch/x86/kernel/head64.c index 037df57a99ac..849fc9e63c2f 100644 --- a/arch/x86/kernel/head64.c +++ b/arch/x86/kernel/head64.c | |||
@@ -25,6 +25,7 @@ | |||
25 | #include <asm/kdebug.h> | 25 | #include <asm/kdebug.h> |
26 | #include <asm/e820.h> | 26 | #include <asm/e820.h> |
27 | #include <asm/bios_ebda.h> | 27 | #include <asm/bios_ebda.h> |
28 | #include <asm/bootparam_utils.h> | ||
28 | 29 | ||
29 | static void __init zap_identity_mappings(void) | 30 | static void __init zap_identity_mappings(void) |
30 | { | 31 | { |
@@ -46,6 +47,7 @@ static void __init copy_bootdata(char *real_mode_data) | |||
46 | char * command_line; | 47 | char * command_line; |
47 | 48 | ||
48 | memcpy(&boot_params, real_mode_data, sizeof boot_params); | 49 | memcpy(&boot_params, real_mode_data, sizeof boot_params); |
50 | sanitize_boot_params(&boot_params); | ||
49 | if (boot_params.hdr.cmd_line_ptr) { | 51 | if (boot_params.hdr.cmd_line_ptr) { |
50 | command_line = __va(boot_params.hdr.cmd_line_ptr); | 52 | command_line = __va(boot_params.hdr.cmd_line_ptr); |
51 | memcpy(boot_command_line, command_line, COMMAND_LINE_SIZE); | 53 | memcpy(boot_command_line, command_line, COMMAND_LINE_SIZE); |
diff --git a/arch/x86/kernel/head_32.S b/arch/x86/kernel/head_32.S index c8932c79e78b..3c3f58a0808f 100644 --- a/arch/x86/kernel/head_32.S +++ b/arch/x86/kernel/head_32.S | |||
@@ -307,36 +307,45 @@ default_entry: | |||
307 | movl %eax,%cr0 | 307 | movl %eax,%cr0 |
308 | 308 | ||
309 | /* | 309 | /* |
310 | * New page tables may be in 4Mbyte page mode and may | 310 | * We want to start out with EFLAGS unambiguously cleared. Some BIOSes leave |
311 | * be using the global pages. | 311 | * bits like NT set. This would confuse the debugger if this code is traced. So |
312 | * initialize them properly now before switching to protected mode. That means | ||
313 | * DF in particular (even though we have cleared it earlier after copying the | ||
314 | * command line) because GCC expects it. | ||
315 | */ | ||
316 | pushl $0 | ||
317 | popfl | ||
318 | |||
319 | /* | ||
320 | * New page tables may be in 4Mbyte page mode and may be using the global pages. | ||
312 | * | 321 | * |
313 | * NOTE! If we are on a 486 we may have no cr4 at all! | 322 | * NOTE! If we are on a 486 we may have no cr4 at all! Specifically, cr4 exists |
314 | * Specifically, cr4 exists if and only if CPUID exists | 323 | * if and only if CPUID exists and has flags other than the FPU flag set. |
315 | * and has flags other than the FPU flag set. | ||
316 | */ | 324 | */ |
325 | movl $-1,pa(X86_CPUID) # preset CPUID level | ||
317 | movl $X86_EFLAGS_ID,%ecx | 326 | movl $X86_EFLAGS_ID,%ecx |
318 | pushl %ecx | 327 | pushl %ecx |
319 | popfl | 328 | popfl # set EFLAGS=ID |
320 | pushfl | 329 | pushfl |
321 | popl %eax | 330 | popl %eax # get EFLAGS |
322 | pushl $0 | 331 | testl $X86_EFLAGS_ID,%eax # did EFLAGS.ID remained set? |
323 | popfl | 332 | jz enable_paging # hw disallowed setting of ID bit |
324 | pushfl | 333 | # which means no CPUID and no CR4 |
325 | popl %edx | 334 | |
326 | xorl %edx,%eax | 335 | xorl %eax,%eax |
327 | testl %ecx,%eax | 336 | cpuid |
328 | jz 6f # No ID flag = no CPUID = no CR4 | 337 | movl %eax,pa(X86_CPUID) # save largest std CPUID function |
329 | 338 | ||
330 | movl $1,%eax | 339 | movl $1,%eax |
331 | cpuid | 340 | cpuid |
332 | andl $~1,%edx # Ignore CPUID.FPU | 341 | andl $~1,%edx # Ignore CPUID.FPU |
333 | jz 6f # No flags or only CPUID.FPU = no CR4 | 342 | jz enable_paging # No flags or only CPUID.FPU = no CR4 |
334 | 343 | ||
335 | movl pa(mmu_cr4_features),%eax | 344 | movl pa(mmu_cr4_features),%eax |
336 | movl %eax,%cr4 | 345 | movl %eax,%cr4 |
337 | 346 | ||
338 | testb $X86_CR4_PAE, %al # check if PAE is enabled | 347 | testb $X86_CR4_PAE, %al # check if PAE is enabled |
339 | jz 6f | 348 | jz enable_paging |
340 | 349 | ||
341 | /* Check if extended functions are implemented */ | 350 | /* Check if extended functions are implemented */ |
342 | movl $0x80000000, %eax | 351 | movl $0x80000000, %eax |
@@ -344,7 +353,7 @@ default_entry: | |||
344 | /* Value must be in the range 0x80000001 to 0x8000ffff */ | 353 | /* Value must be in the range 0x80000001 to 0x8000ffff */ |
345 | subl $0x80000001, %eax | 354 | subl $0x80000001, %eax |
346 | cmpl $(0x8000ffff-0x80000001), %eax | 355 | cmpl $(0x8000ffff-0x80000001), %eax |
347 | ja 6f | 356 | ja enable_paging |
348 | 357 | ||
349 | /* Clear bogus XD_DISABLE bits */ | 358 | /* Clear bogus XD_DISABLE bits */ |
350 | call verify_cpu | 359 | call verify_cpu |
@@ -353,7 +362,7 @@ default_entry: | |||
353 | cpuid | 362 | cpuid |
354 | /* Execute Disable bit supported? */ | 363 | /* Execute Disable bit supported? */ |
355 | btl $(X86_FEATURE_NX & 31), %edx | 364 | btl $(X86_FEATURE_NX & 31), %edx |
356 | jnc 6f | 365 | jnc enable_paging |
357 | 366 | ||
358 | /* Setup EFER (Extended Feature Enable Register) */ | 367 | /* Setup EFER (Extended Feature Enable Register) */ |
359 | movl $MSR_EFER, %ecx | 368 | movl $MSR_EFER, %ecx |
@@ -363,7 +372,7 @@ default_entry: | |||
363 | /* Make changes effective */ | 372 | /* Make changes effective */ |
364 | wrmsr | 373 | wrmsr |
365 | 374 | ||
366 | 6: | 375 | enable_paging: |
367 | 376 | ||
368 | /* | 377 | /* |
369 | * Enable paging | 378 | * Enable paging |
@@ -378,14 +387,6 @@ default_entry: | |||
378 | addl $__PAGE_OFFSET, %esp | 387 | addl $__PAGE_OFFSET, %esp |
379 | 388 | ||
380 | /* | 389 | /* |
381 | * Initialize eflags. Some BIOS's leave bits like NT set. This would | ||
382 | * confuse the debugger if this code is traced. | ||
383 | * XXX - best to initialize before switching to protected mode. | ||
384 | */ | ||
385 | pushl $0 | ||
386 | popfl | ||
387 | |||
388 | /* | ||
389 | * start system 32-bit setup. We need to re-do some of the things done | 390 | * start system 32-bit setup. We need to re-do some of the things done |
390 | * in 16-bit mode for the "real" operations. | 391 | * in 16-bit mode for the "real" operations. |
391 | */ | 392 | */ |
@@ -394,31 +395,11 @@ default_entry: | |||
394 | jz 1f # Did we do this already? | 395 | jz 1f # Did we do this already? |
395 | call *%eax | 396 | call *%eax |
396 | 1: | 397 | 1: |
397 | 398 | ||
398 | /* check if it is 486 or 386. */ | ||
399 | /* | 399 | /* |
400 | * XXX - this does a lot of unnecessary setup. Alignment checks don't | 400 | * Check if it is 486 |
401 | * apply at our cpl of 0 and the stack ought to be aligned already, and | ||
402 | * we don't need to preserve eflags. | ||
403 | */ | 401 | */ |
404 | movl $-1,X86_CPUID # -1 for no CPUID initially | 402 | cmpl $-1,X86_CPUID |
405 | movb $3,X86 # at least 386 | ||
406 | pushfl # push EFLAGS | ||
407 | popl %eax # get EFLAGS | ||
408 | movl %eax,%ecx # save original EFLAGS | ||
409 | xorl $0x240000,%eax # flip AC and ID bits in EFLAGS | ||
410 | pushl %eax # copy to EFLAGS | ||
411 | popfl # set EFLAGS | ||
412 | pushfl # get new EFLAGS | ||
413 | popl %eax # put it in eax | ||
414 | xorl %ecx,%eax # change in flags | ||
415 | pushl %ecx # restore original EFLAGS | ||
416 | popfl | ||
417 | testl $0x40000,%eax # check if AC bit changed | ||
418 | je is386 | ||
419 | |||
420 | movb $4,X86 # at least 486 | ||
421 | testl $0x200000,%eax # check if ID bit changed | ||
422 | je is486 | 403 | je is486 |
423 | 404 | ||
424 | /* get vendor info */ | 405 | /* get vendor info */ |
@@ -444,11 +425,10 @@ default_entry: | |||
444 | movb %cl,X86_MASK | 425 | movb %cl,X86_MASK |
445 | movl %edx,X86_CAPABILITY | 426 | movl %edx,X86_CAPABILITY |
446 | 427 | ||
447 | is486: movl $0x50022,%ecx # set AM, WP, NE and MP | 428 | is486: |
448 | jmp 2f | 429 | movb $4,X86 |
449 | 430 | movl $0x50022,%ecx # set AM, WP, NE and MP | |
450 | is386: movl $2,%ecx # set MP | 431 | movl %cr0,%eax |
451 | 2: movl %cr0,%eax | ||
452 | andl $0x80000011,%eax # Save PG,PE,ET | 432 | andl $0x80000011,%eax # Save PG,PE,ET |
453 | orl %ecx,%eax | 433 | orl %ecx,%eax |
454 | movl %eax,%cr0 | 434 | movl %eax,%cr0 |
@@ -473,7 +453,6 @@ is386: movl $2,%ecx # set MP | |||
473 | xorl %eax,%eax # Clear LDT | 453 | xorl %eax,%eax # Clear LDT |
474 | lldt %ax | 454 | lldt %ax |
475 | 455 | ||
476 | cld # gcc2 wants the direction flag cleared at all times | ||
477 | pushl $0 # fake return address for unwinder | 456 | pushl $0 # fake return address for unwinder |
478 | jmp *(initial_code) | 457 | jmp *(initial_code) |
479 | 458 | ||
diff --git a/arch/x86/kernel/hpet.c b/arch/x86/kernel/hpet.c index e28670f9a589..da85a8e830a1 100644 --- a/arch/x86/kernel/hpet.c +++ b/arch/x86/kernel/hpet.c | |||
@@ -478,7 +478,7 @@ static int hpet_msi_next_event(unsigned long delta, | |||
478 | 478 | ||
479 | static int hpet_setup_msi_irq(unsigned int irq) | 479 | static int hpet_setup_msi_irq(unsigned int irq) |
480 | { | 480 | { |
481 | if (arch_setup_hpet_msi(irq, hpet_blockid)) { | 481 | if (x86_msi.setup_hpet_msi(irq, hpet_blockid)) { |
482 | destroy_irq(irq); | 482 | destroy_irq(irq); |
483 | return -EINVAL; | 483 | return -EINVAL; |
484 | } | 484 | } |
diff --git a/arch/x86/kernel/kprobes/Makefile b/arch/x86/kernel/kprobes/Makefile new file mode 100644 index 000000000000..0d33169cc1a2 --- /dev/null +++ b/arch/x86/kernel/kprobes/Makefile | |||
@@ -0,0 +1,7 @@ | |||
1 | # | ||
2 | # Makefile for kernel probes | ||
3 | # | ||
4 | |||
5 | obj-$(CONFIG_KPROBES) += core.o | ||
6 | obj-$(CONFIG_OPTPROBES) += opt.o | ||
7 | obj-$(CONFIG_KPROBES_ON_FTRACE) += ftrace.o | ||
diff --git a/arch/x86/kernel/kprobes-common.h b/arch/x86/kernel/kprobes/common.h index 3230b68ef29a..2e9d4b5af036 100644 --- a/arch/x86/kernel/kprobes-common.h +++ b/arch/x86/kernel/kprobes/common.h | |||
@@ -99,4 +99,15 @@ static inline unsigned long __recover_optprobed_insn(kprobe_opcode_t *buf, unsig | |||
99 | return addr; | 99 | return addr; |
100 | } | 100 | } |
101 | #endif | 101 | #endif |
102 | |||
103 | #ifdef CONFIG_KPROBES_ON_FTRACE | ||
104 | extern int skip_singlestep(struct kprobe *p, struct pt_regs *regs, | ||
105 | struct kprobe_ctlblk *kcb); | ||
106 | #else | ||
107 | static inline int skip_singlestep(struct kprobe *p, struct pt_regs *regs, | ||
108 | struct kprobe_ctlblk *kcb) | ||
109 | { | ||
110 | return 0; | ||
111 | } | ||
112 | #endif | ||
102 | #endif | 113 | #endif |
diff --git a/arch/x86/kernel/kprobes.c b/arch/x86/kernel/kprobes/core.c index 57916c0d3cf6..e124554598ee 100644 --- a/arch/x86/kernel/kprobes.c +++ b/arch/x86/kernel/kprobes/core.c | |||
@@ -58,7 +58,7 @@ | |||
58 | #include <asm/insn.h> | 58 | #include <asm/insn.h> |
59 | #include <asm/debugreg.h> | 59 | #include <asm/debugreg.h> |
60 | 60 | ||
61 | #include "kprobes-common.h" | 61 | #include "common.h" |
62 | 62 | ||
63 | void jprobe_return_end(void); | 63 | void jprobe_return_end(void); |
64 | 64 | ||
@@ -78,7 +78,7 @@ DEFINE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk); | |||
78 | * Groups, and some special opcodes can not boost. | 78 | * Groups, and some special opcodes can not boost. |
79 | * This is non-const and volatile to keep gcc from statically | 79 | * This is non-const and volatile to keep gcc from statically |
80 | * optimizing it out, as variable_test_bit makes gcc think only | 80 | * optimizing it out, as variable_test_bit makes gcc think only |
81 | * *(unsigned long*) is used. | 81 | * *(unsigned long*) is used. |
82 | */ | 82 | */ |
83 | static volatile u32 twobyte_is_boostable[256 / 32] = { | 83 | static volatile u32 twobyte_is_boostable[256 / 32] = { |
84 | /* 0 1 2 3 4 5 6 7 8 9 a b c d e f */ | 84 | /* 0 1 2 3 4 5 6 7 8 9 a b c d e f */ |
@@ -117,7 +117,7 @@ static void __kprobes __synthesize_relative_insn(void *from, void *to, u8 op) | |||
117 | struct __arch_relative_insn { | 117 | struct __arch_relative_insn { |
118 | u8 op; | 118 | u8 op; |
119 | s32 raddr; | 119 | s32 raddr; |
120 | } __attribute__((packed)) *insn; | 120 | } __packed *insn; |
121 | 121 | ||
122 | insn = (struct __arch_relative_insn *)from; | 122 | insn = (struct __arch_relative_insn *)from; |
123 | insn->raddr = (s32)((long)(to) - ((long)(from) + 5)); | 123 | insn->raddr = (s32)((long)(to) - ((long)(from) + 5)); |
@@ -541,23 +541,6 @@ reenter_kprobe(struct kprobe *p, struct pt_regs *regs, struct kprobe_ctlblk *kcb | |||
541 | return 1; | 541 | return 1; |
542 | } | 542 | } |
543 | 543 | ||
544 | #ifdef KPROBES_CAN_USE_FTRACE | ||
545 | static void __kprobes skip_singlestep(struct kprobe *p, struct pt_regs *regs, | ||
546 | struct kprobe_ctlblk *kcb) | ||
547 | { | ||
548 | /* | ||
549 | * Emulate singlestep (and also recover regs->ip) | ||
550 | * as if there is a 5byte nop | ||
551 | */ | ||
552 | regs->ip = (unsigned long)p->addr + MCOUNT_INSN_SIZE; | ||
553 | if (unlikely(p->post_handler)) { | ||
554 | kcb->kprobe_status = KPROBE_HIT_SSDONE; | ||
555 | p->post_handler(p, regs, 0); | ||
556 | } | ||
557 | __this_cpu_write(current_kprobe, NULL); | ||
558 | } | ||
559 | #endif | ||
560 | |||
561 | /* | 544 | /* |
562 | * Interrupts are disabled on entry as trap3 is an interrupt gate and they | 545 | * Interrupts are disabled on entry as trap3 is an interrupt gate and they |
563 | * remain disabled throughout this function. | 546 | * remain disabled throughout this function. |
@@ -616,13 +599,8 @@ static int __kprobes kprobe_handler(struct pt_regs *regs) | |||
616 | } else if (kprobe_running()) { | 599 | } else if (kprobe_running()) { |
617 | p = __this_cpu_read(current_kprobe); | 600 | p = __this_cpu_read(current_kprobe); |
618 | if (p->break_handler && p->break_handler(p, regs)) { | 601 | if (p->break_handler && p->break_handler(p, regs)) { |
619 | #ifdef KPROBES_CAN_USE_FTRACE | 602 | if (!skip_singlestep(p, regs, kcb)) |
620 | if (kprobe_ftrace(p)) { | 603 | setup_singlestep(p, regs, kcb, 0); |
621 | skip_singlestep(p, regs, kcb); | ||
622 | return 1; | ||
623 | } | ||
624 | #endif | ||
625 | setup_singlestep(p, regs, kcb, 0); | ||
626 | return 1; | 604 | return 1; |
627 | } | 605 | } |
628 | } /* else: not a kprobe fault; let the kernel handle it */ | 606 | } /* else: not a kprobe fault; let the kernel handle it */ |
@@ -1075,50 +1053,6 @@ int __kprobes longjmp_break_handler(struct kprobe *p, struct pt_regs *regs) | |||
1075 | return 0; | 1053 | return 0; |
1076 | } | 1054 | } |
1077 | 1055 | ||
1078 | #ifdef KPROBES_CAN_USE_FTRACE | ||
1079 | /* Ftrace callback handler for kprobes */ | ||
1080 | void __kprobes kprobe_ftrace_handler(unsigned long ip, unsigned long parent_ip, | ||
1081 | struct ftrace_ops *ops, struct pt_regs *regs) | ||
1082 | { | ||
1083 | struct kprobe *p; | ||
1084 | struct kprobe_ctlblk *kcb; | ||
1085 | unsigned long flags; | ||
1086 | |||
1087 | /* Disable irq for emulating a breakpoint and avoiding preempt */ | ||
1088 | local_irq_save(flags); | ||
1089 | |||
1090 | p = get_kprobe((kprobe_opcode_t *)ip); | ||
1091 | if (unlikely(!p) || kprobe_disabled(p)) | ||
1092 | goto end; | ||
1093 | |||
1094 | kcb = get_kprobe_ctlblk(); | ||
1095 | if (kprobe_running()) { | ||
1096 | kprobes_inc_nmissed_count(p); | ||
1097 | } else { | ||
1098 | /* Kprobe handler expects regs->ip = ip + 1 as breakpoint hit */ | ||
1099 | regs->ip = ip + sizeof(kprobe_opcode_t); | ||
1100 | |||
1101 | __this_cpu_write(current_kprobe, p); | ||
1102 | kcb->kprobe_status = KPROBE_HIT_ACTIVE; | ||
1103 | if (!p->pre_handler || !p->pre_handler(p, regs)) | ||
1104 | skip_singlestep(p, regs, kcb); | ||
1105 | /* | ||
1106 | * If pre_handler returns !0, it sets regs->ip and | ||
1107 | * resets current kprobe. | ||
1108 | */ | ||
1109 | } | ||
1110 | end: | ||
1111 | local_irq_restore(flags); | ||
1112 | } | ||
1113 | |||
1114 | int __kprobes arch_prepare_kprobe_ftrace(struct kprobe *p) | ||
1115 | { | ||
1116 | p->ainsn.insn = NULL; | ||
1117 | p->ainsn.boostable = -1; | ||
1118 | return 0; | ||
1119 | } | ||
1120 | #endif | ||
1121 | |||
1122 | int __init arch_init_kprobes(void) | 1056 | int __init arch_init_kprobes(void) |
1123 | { | 1057 | { |
1124 | return arch_init_optprobes(); | 1058 | return arch_init_optprobes(); |
diff --git a/arch/x86/kernel/kprobes/ftrace.c b/arch/x86/kernel/kprobes/ftrace.c new file mode 100644 index 000000000000..23ef5c556f06 --- /dev/null +++ b/arch/x86/kernel/kprobes/ftrace.c | |||
@@ -0,0 +1,93 @@ | |||
1 | /* | ||
2 | * Dynamic Ftrace based Kprobes Optimization | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or modify | ||
5 | * it under the terms of the GNU General Public License as published by | ||
6 | * the Free Software Foundation; either version 2 of the License, or | ||
7 | * (at your option) any later version. | ||
8 | * | ||
9 | * This program is distributed in the hope that it will be useful, | ||
10 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
11 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
12 | * GNU General Public License for more details. | ||
13 | * | ||
14 | * You should have received a copy of the GNU General Public License | ||
15 | * along with this program; if not, write to the Free Software | ||
16 | * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. | ||
17 | * | ||
18 | * Copyright (C) Hitachi Ltd., 2012 | ||
19 | */ | ||
20 | #include <linux/kprobes.h> | ||
21 | #include <linux/ptrace.h> | ||
22 | #include <linux/hardirq.h> | ||
23 | #include <linux/preempt.h> | ||
24 | #include <linux/ftrace.h> | ||
25 | |||
26 | #include "common.h" | ||
27 | |||
28 | static int __skip_singlestep(struct kprobe *p, struct pt_regs *regs, | ||
29 | struct kprobe_ctlblk *kcb) | ||
30 | { | ||
31 | /* | ||
32 | * Emulate singlestep (and also recover regs->ip) | ||
33 | * as if there is a 5byte nop | ||
34 | */ | ||
35 | regs->ip = (unsigned long)p->addr + MCOUNT_INSN_SIZE; | ||
36 | if (unlikely(p->post_handler)) { | ||
37 | kcb->kprobe_status = KPROBE_HIT_SSDONE; | ||
38 | p->post_handler(p, regs, 0); | ||
39 | } | ||
40 | __this_cpu_write(current_kprobe, NULL); | ||
41 | return 1; | ||
42 | } | ||
43 | |||
44 | int __kprobes skip_singlestep(struct kprobe *p, struct pt_regs *regs, | ||
45 | struct kprobe_ctlblk *kcb) | ||
46 | { | ||
47 | if (kprobe_ftrace(p)) | ||
48 | return __skip_singlestep(p, regs, kcb); | ||
49 | else | ||
50 | return 0; | ||
51 | } | ||
52 | |||
53 | /* Ftrace callback handler for kprobes */ | ||
54 | void __kprobes kprobe_ftrace_handler(unsigned long ip, unsigned long parent_ip, | ||
55 | struct ftrace_ops *ops, struct pt_regs *regs) | ||
56 | { | ||
57 | struct kprobe *p; | ||
58 | struct kprobe_ctlblk *kcb; | ||
59 | unsigned long flags; | ||
60 | |||
61 | /* Disable irq for emulating a breakpoint and avoiding preempt */ | ||
62 | local_irq_save(flags); | ||
63 | |||
64 | p = get_kprobe((kprobe_opcode_t *)ip); | ||
65 | if (unlikely(!p) || kprobe_disabled(p)) | ||
66 | goto end; | ||
67 | |||
68 | kcb = get_kprobe_ctlblk(); | ||
69 | if (kprobe_running()) { | ||
70 | kprobes_inc_nmissed_count(p); | ||
71 | } else { | ||
72 | /* Kprobe handler expects regs->ip = ip + 1 as breakpoint hit */ | ||
73 | regs->ip = ip + sizeof(kprobe_opcode_t); | ||
74 | |||
75 | __this_cpu_write(current_kprobe, p); | ||
76 | kcb->kprobe_status = KPROBE_HIT_ACTIVE; | ||
77 | if (!p->pre_handler || !p->pre_handler(p, regs)) | ||
78 | __skip_singlestep(p, regs, kcb); | ||
79 | /* | ||
80 | * If pre_handler returns !0, it sets regs->ip and | ||
81 | * resets current kprobe. | ||
82 | */ | ||
83 | } | ||
84 | end: | ||
85 | local_irq_restore(flags); | ||
86 | } | ||
87 | |||
88 | int __kprobes arch_prepare_kprobe_ftrace(struct kprobe *p) | ||
89 | { | ||
90 | p->ainsn.insn = NULL; | ||
91 | p->ainsn.boostable = -1; | ||
92 | return 0; | ||
93 | } | ||
diff --git a/arch/x86/kernel/kprobes-opt.c b/arch/x86/kernel/kprobes/opt.c index c5e410eed403..76dc6f095724 100644 --- a/arch/x86/kernel/kprobes-opt.c +++ b/arch/x86/kernel/kprobes/opt.c | |||
@@ -37,7 +37,7 @@ | |||
37 | #include <asm/insn.h> | 37 | #include <asm/insn.h> |
38 | #include <asm/debugreg.h> | 38 | #include <asm/debugreg.h> |
39 | 39 | ||
40 | #include "kprobes-common.h" | 40 | #include "common.h" |
41 | 41 | ||
42 | unsigned long __recover_optprobed_insn(kprobe_opcode_t *buf, unsigned long addr) | 42 | unsigned long __recover_optprobed_insn(kprobe_opcode_t *buf, unsigned long addr) |
43 | { | 43 | { |
diff --git a/arch/x86/kernel/kvm.c b/arch/x86/kernel/kvm.c index 9c2bd8bd4b4c..2b44ea5f269d 100644 --- a/arch/x86/kernel/kvm.c +++ b/arch/x86/kernel/kvm.c | |||
@@ -505,6 +505,7 @@ static bool __init kvm_detect(void) | |||
505 | const struct hypervisor_x86 x86_hyper_kvm __refconst = { | 505 | const struct hypervisor_x86 x86_hyper_kvm __refconst = { |
506 | .name = "KVM", | 506 | .name = "KVM", |
507 | .detect = kvm_detect, | 507 | .detect = kvm_detect, |
508 | .x2apic_available = kvm_para_available, | ||
508 | }; | 509 | }; |
509 | EXPORT_SYMBOL_GPL(x86_hyper_kvm); | 510 | EXPORT_SYMBOL_GPL(x86_hyper_kvm); |
510 | 511 | ||
diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c index 2ed787f15bf0..14ae10031ff0 100644 --- a/arch/x86/kernel/process.c +++ b/arch/x86/kernel/process.c | |||
@@ -268,13 +268,7 @@ void __switch_to_xtra(struct task_struct *prev_p, struct task_struct *next_p, | |||
268 | unsigned long boot_option_idle_override = IDLE_NO_OVERRIDE; | 268 | unsigned long boot_option_idle_override = IDLE_NO_OVERRIDE; |
269 | EXPORT_SYMBOL(boot_option_idle_override); | 269 | EXPORT_SYMBOL(boot_option_idle_override); |
270 | 270 | ||
271 | /* | 271 | static void (*x86_idle)(void); |
272 | * Powermanagement idle function, if any.. | ||
273 | */ | ||
274 | void (*pm_idle)(void); | ||
275 | #ifdef CONFIG_APM_MODULE | ||
276 | EXPORT_SYMBOL(pm_idle); | ||
277 | #endif | ||
278 | 272 | ||
279 | #ifndef CONFIG_SMP | 273 | #ifndef CONFIG_SMP |
280 | static inline void play_dead(void) | 274 | static inline void play_dead(void) |
@@ -351,7 +345,7 @@ void cpu_idle(void) | |||
351 | rcu_idle_enter(); | 345 | rcu_idle_enter(); |
352 | 346 | ||
353 | if (cpuidle_idle_call()) | 347 | if (cpuidle_idle_call()) |
354 | pm_idle(); | 348 | x86_idle(); |
355 | 349 | ||
356 | rcu_idle_exit(); | 350 | rcu_idle_exit(); |
357 | start_critical_timings(); | 351 | start_critical_timings(); |
@@ -375,7 +369,6 @@ void cpu_idle(void) | |||
375 | */ | 369 | */ |
376 | void default_idle(void) | 370 | void default_idle(void) |
377 | { | 371 | { |
378 | trace_power_start_rcuidle(POWER_CSTATE, 1, smp_processor_id()); | ||
379 | trace_cpu_idle_rcuidle(1, smp_processor_id()); | 372 | trace_cpu_idle_rcuidle(1, smp_processor_id()); |
380 | current_thread_info()->status &= ~TS_POLLING; | 373 | current_thread_info()->status &= ~TS_POLLING; |
381 | /* | 374 | /* |
@@ -389,21 +382,22 @@ void default_idle(void) | |||
389 | else | 382 | else |
390 | local_irq_enable(); | 383 | local_irq_enable(); |
391 | current_thread_info()->status |= TS_POLLING; | 384 | current_thread_info()->status |= TS_POLLING; |
392 | trace_power_end_rcuidle(smp_processor_id()); | ||
393 | trace_cpu_idle_rcuidle(PWR_EVENT_EXIT, smp_processor_id()); | 385 | trace_cpu_idle_rcuidle(PWR_EVENT_EXIT, smp_processor_id()); |
394 | } | 386 | } |
395 | #ifdef CONFIG_APM_MODULE | 387 | #ifdef CONFIG_APM_MODULE |
396 | EXPORT_SYMBOL(default_idle); | 388 | EXPORT_SYMBOL(default_idle); |
397 | #endif | 389 | #endif |
398 | 390 | ||
399 | bool set_pm_idle_to_default(void) | 391 | #ifdef CONFIG_XEN |
392 | bool xen_set_default_idle(void) | ||
400 | { | 393 | { |
401 | bool ret = !!pm_idle; | 394 | bool ret = !!x86_idle; |
402 | 395 | ||
403 | pm_idle = default_idle; | 396 | x86_idle = default_idle; |
404 | 397 | ||
405 | return ret; | 398 | return ret; |
406 | } | 399 | } |
400 | #endif | ||
407 | void stop_this_cpu(void *dummy) | 401 | void stop_this_cpu(void *dummy) |
408 | { | 402 | { |
409 | local_irq_disable(); | 403 | local_irq_disable(); |
@@ -413,31 +407,8 @@ void stop_this_cpu(void *dummy) | |||
413 | set_cpu_online(smp_processor_id(), false); | 407 | set_cpu_online(smp_processor_id(), false); |
414 | disable_local_APIC(); | 408 | disable_local_APIC(); |
415 | 409 | ||
416 | for (;;) { | 410 | for (;;) |
417 | if (hlt_works(smp_processor_id())) | 411 | halt(); |
418 | halt(); | ||
419 | } | ||
420 | } | ||
421 | |||
422 | /* Default MONITOR/MWAIT with no hints, used for default C1 state */ | ||
423 | static void mwait_idle(void) | ||
424 | { | ||
425 | if (!need_resched()) { | ||
426 | trace_power_start_rcuidle(POWER_CSTATE, 1, smp_processor_id()); | ||
427 | trace_cpu_idle_rcuidle(1, smp_processor_id()); | ||
428 | if (this_cpu_has(X86_FEATURE_CLFLUSH_MONITOR)) | ||
429 | clflush((void *)¤t_thread_info()->flags); | ||
430 | |||
431 | __monitor((void *)¤t_thread_info()->flags, 0, 0); | ||
432 | smp_mb(); | ||
433 | if (!need_resched()) | ||
434 | __sti_mwait(0, 0); | ||
435 | else | ||
436 | local_irq_enable(); | ||
437 | trace_power_end_rcuidle(smp_processor_id()); | ||
438 | trace_cpu_idle_rcuidle(PWR_EVENT_EXIT, smp_processor_id()); | ||
439 | } else | ||
440 | local_irq_enable(); | ||
441 | } | 412 | } |
442 | 413 | ||
443 | /* | 414 | /* |
@@ -447,62 +418,13 @@ static void mwait_idle(void) | |||
447 | */ | 418 | */ |
448 | static void poll_idle(void) | 419 | static void poll_idle(void) |
449 | { | 420 | { |
450 | trace_power_start_rcuidle(POWER_CSTATE, 0, smp_processor_id()); | ||
451 | trace_cpu_idle_rcuidle(0, smp_processor_id()); | 421 | trace_cpu_idle_rcuidle(0, smp_processor_id()); |
452 | local_irq_enable(); | 422 | local_irq_enable(); |
453 | while (!need_resched()) | 423 | while (!need_resched()) |
454 | cpu_relax(); | 424 | cpu_relax(); |
455 | trace_power_end_rcuidle(smp_processor_id()); | ||
456 | trace_cpu_idle_rcuidle(PWR_EVENT_EXIT, smp_processor_id()); | 425 | trace_cpu_idle_rcuidle(PWR_EVENT_EXIT, smp_processor_id()); |
457 | } | 426 | } |
458 | 427 | ||
459 | /* | ||
460 | * mwait selection logic: | ||
461 | * | ||
462 | * It depends on the CPU. For AMD CPUs that support MWAIT this is | ||
463 | * wrong. Family 0x10 and 0x11 CPUs will enter C1 on HLT. Powersavings | ||
464 | * then depend on a clock divisor and current Pstate of the core. If | ||
465 | * all cores of a processor are in halt state (C1) the processor can | ||
466 | * enter the C1E (C1 enhanced) state. If mwait is used this will never | ||
467 | * happen. | ||
468 | * | ||
469 | * idle=mwait overrides this decision and forces the usage of mwait. | ||
470 | */ | ||
471 | |||
472 | #define MWAIT_INFO 0x05 | ||
473 | #define MWAIT_ECX_EXTENDED_INFO 0x01 | ||
474 | #define MWAIT_EDX_C1 0xf0 | ||
475 | |||
476 | int mwait_usable(const struct cpuinfo_x86 *c) | ||
477 | { | ||
478 | u32 eax, ebx, ecx, edx; | ||
479 | |||
480 | /* Use mwait if idle=mwait boot option is given */ | ||
481 | if (boot_option_idle_override == IDLE_FORCE_MWAIT) | ||
482 | return 1; | ||
483 | |||
484 | /* | ||
485 | * Any idle= boot option other than idle=mwait means that we must not | ||
486 | * use mwait. Eg: idle=halt or idle=poll or idle=nomwait | ||
487 | */ | ||
488 | if (boot_option_idle_override != IDLE_NO_OVERRIDE) | ||
489 | return 0; | ||
490 | |||
491 | if (c->cpuid_level < MWAIT_INFO) | ||
492 | return 0; | ||
493 | |||
494 | cpuid(MWAIT_INFO, &eax, &ebx, &ecx, &edx); | ||
495 | /* Check, whether EDX has extended info about MWAIT */ | ||
496 | if (!(ecx & MWAIT_ECX_EXTENDED_INFO)) | ||
497 | return 1; | ||
498 | |||
499 | /* | ||
500 | * edx enumeratios MONITOR/MWAIT extensions. Check, whether | ||
501 | * C1 supports MWAIT | ||
502 | */ | ||
503 | return (edx & MWAIT_EDX_C1); | ||
504 | } | ||
505 | |||
506 | bool amd_e400_c1e_detected; | 428 | bool amd_e400_c1e_detected; |
507 | EXPORT_SYMBOL(amd_e400_c1e_detected); | 429 | EXPORT_SYMBOL(amd_e400_c1e_detected); |
508 | 430 | ||
@@ -567,31 +489,24 @@ static void amd_e400_idle(void) | |||
567 | void __cpuinit select_idle_routine(const struct cpuinfo_x86 *c) | 489 | void __cpuinit select_idle_routine(const struct cpuinfo_x86 *c) |
568 | { | 490 | { |
569 | #ifdef CONFIG_SMP | 491 | #ifdef CONFIG_SMP |
570 | if (pm_idle == poll_idle && smp_num_siblings > 1) { | 492 | if (x86_idle == poll_idle && smp_num_siblings > 1) |
571 | pr_warn_once("WARNING: polling idle and HT enabled, performance may degrade\n"); | 493 | pr_warn_once("WARNING: polling idle and HT enabled, performance may degrade\n"); |
572 | } | ||
573 | #endif | 494 | #endif |
574 | if (pm_idle) | 495 | if (x86_idle) |
575 | return; | 496 | return; |
576 | 497 | ||
577 | if (cpu_has(c, X86_FEATURE_MWAIT) && mwait_usable(c)) { | 498 | if (cpu_has_amd_erratum(amd_erratum_400)) { |
578 | /* | ||
579 | * One CPU supports mwait => All CPUs supports mwait | ||
580 | */ | ||
581 | pr_info("using mwait in idle threads\n"); | ||
582 | pm_idle = mwait_idle; | ||
583 | } else if (cpu_has_amd_erratum(amd_erratum_400)) { | ||
584 | /* E400: APIC timer interrupt does not wake up CPU from C1e */ | 499 | /* E400: APIC timer interrupt does not wake up CPU from C1e */ |
585 | pr_info("using AMD E400 aware idle routine\n"); | 500 | pr_info("using AMD E400 aware idle routine\n"); |
586 | pm_idle = amd_e400_idle; | 501 | x86_idle = amd_e400_idle; |
587 | } else | 502 | } else |
588 | pm_idle = default_idle; | 503 | x86_idle = default_idle; |
589 | } | 504 | } |
590 | 505 | ||
591 | void __init init_amd_e400_c1e_mask(void) | 506 | void __init init_amd_e400_c1e_mask(void) |
592 | { | 507 | { |
593 | /* If we're using amd_e400_idle, we need to allocate amd_e400_c1e_mask. */ | 508 | /* If we're using amd_e400_idle, we need to allocate amd_e400_c1e_mask. */ |
594 | if (pm_idle == amd_e400_idle) | 509 | if (x86_idle == amd_e400_idle) |
595 | zalloc_cpumask_var(&amd_e400_c1e_mask, GFP_KERNEL); | 510 | zalloc_cpumask_var(&amd_e400_c1e_mask, GFP_KERNEL); |
596 | } | 511 | } |
597 | 512 | ||
@@ -602,11 +517,8 @@ static int __init idle_setup(char *str) | |||
602 | 517 | ||
603 | if (!strcmp(str, "poll")) { | 518 | if (!strcmp(str, "poll")) { |
604 | pr_info("using polling idle threads\n"); | 519 | pr_info("using polling idle threads\n"); |
605 | pm_idle = poll_idle; | 520 | x86_idle = poll_idle; |
606 | boot_option_idle_override = IDLE_POLL; | 521 | boot_option_idle_override = IDLE_POLL; |
607 | } else if (!strcmp(str, "mwait")) { | ||
608 | boot_option_idle_override = IDLE_FORCE_MWAIT; | ||
609 | WARN_ONCE(1, "\"idle=mwait\" will be removed in 2012\n"); | ||
610 | } else if (!strcmp(str, "halt")) { | 522 | } else if (!strcmp(str, "halt")) { |
611 | /* | 523 | /* |
612 | * When the boot option of idle=halt is added, halt is | 524 | * When the boot option of idle=halt is added, halt is |
@@ -615,7 +527,7 @@ static int __init idle_setup(char *str) | |||
615 | * To continue to load the CPU idle driver, don't touch | 527 | * To continue to load the CPU idle driver, don't touch |
616 | * the boot_option_idle_override. | 528 | * the boot_option_idle_override. |
617 | */ | 529 | */ |
618 | pm_idle = default_idle; | 530 | x86_idle = default_idle; |
619 | boot_option_idle_override = IDLE_HALT; | 531 | boot_option_idle_override = IDLE_HALT; |
620 | } else if (!strcmp(str, "nomwait")) { | 532 | } else if (!strcmp(str, "nomwait")) { |
621 | /* | 533 | /* |
diff --git a/arch/x86/kernel/ptrace.c b/arch/x86/kernel/ptrace.c index b629bbe0d9bd..29a8120e6fe8 100644 --- a/arch/x86/kernel/ptrace.c +++ b/arch/x86/kernel/ptrace.c | |||
@@ -22,7 +22,7 @@ | |||
22 | #include <linux/perf_event.h> | 22 | #include <linux/perf_event.h> |
23 | #include <linux/hw_breakpoint.h> | 23 | #include <linux/hw_breakpoint.h> |
24 | #include <linux/rcupdate.h> | 24 | #include <linux/rcupdate.h> |
25 | #include <linux/module.h> | 25 | #include <linux/export.h> |
26 | #include <linux/context_tracking.h> | 26 | #include <linux/context_tracking.h> |
27 | 27 | ||
28 | #include <asm/uaccess.h> | 28 | #include <asm/uaccess.h> |
diff --git a/arch/x86/kernel/rtc.c b/arch/x86/kernel/rtc.c index 801602b5d745..2e8f3d3b5641 100644 --- a/arch/x86/kernel/rtc.c +++ b/arch/x86/kernel/rtc.c | |||
@@ -149,7 +149,6 @@ unsigned long mach_get_cmos_time(void) | |||
149 | if (century) { | 149 | if (century) { |
150 | century = bcd2bin(century); | 150 | century = bcd2bin(century); |
151 | year += century * 100; | 151 | year += century * 100; |
152 | printk(KERN_INFO "Extended CMOS year: %d\n", century * 100); | ||
153 | } else | 152 | } else |
154 | year += CMOS_YEARS_OFFS; | 153 | year += CMOS_YEARS_OFFS; |
155 | 154 | ||
diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c index ed0fe385289d..a6ceaedc396a 100644 --- a/arch/x86/kernel/smpboot.c +++ b/arch/x86/kernel/smpboot.c | |||
@@ -1369,7 +1369,7 @@ static inline void mwait_play_dead(void) | |||
1369 | void *mwait_ptr; | 1369 | void *mwait_ptr; |
1370 | struct cpuinfo_x86 *c = __this_cpu_ptr(&cpu_info); | 1370 | struct cpuinfo_x86 *c = __this_cpu_ptr(&cpu_info); |
1371 | 1371 | ||
1372 | if (!(this_cpu_has(X86_FEATURE_MWAIT) && mwait_usable(c))) | 1372 | if (!this_cpu_has(X86_FEATURE_MWAIT)) |
1373 | return; | 1373 | return; |
1374 | if (!this_cpu_has(X86_FEATURE_CLFLSH)) | 1374 | if (!this_cpu_has(X86_FEATURE_CLFLSH)) |
1375 | return; | 1375 | return; |
diff --git a/arch/x86/kernel/sys_x86_64.c b/arch/x86/kernel/sys_x86_64.c index 97ef74b88e0f..dbded5aedb81 100644 --- a/arch/x86/kernel/sys_x86_64.c +++ b/arch/x86/kernel/sys_x86_64.c | |||
@@ -157,7 +157,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0, | |||
157 | if (flags & MAP_FIXED) | 157 | if (flags & MAP_FIXED) |
158 | return addr; | 158 | return addr; |
159 | 159 | ||
160 | /* for MAP_32BIT mappings we force the legact mmap base */ | 160 | /* for MAP_32BIT mappings we force the legacy mmap base */ |
161 | if (!test_thread_flag(TIF_ADDR32) && (flags & MAP_32BIT)) | 161 | if (!test_thread_flag(TIF_ADDR32) && (flags & MAP_32BIT)) |
162 | goto bottomup; | 162 | goto bottomup; |
163 | 163 | ||
diff --git a/arch/x86/kernel/tsc.c b/arch/x86/kernel/tsc.c index 06ccb5073a3f..4b9ea101fe3b 100644 --- a/arch/x86/kernel/tsc.c +++ b/arch/x86/kernel/tsc.c | |||
@@ -623,7 +623,8 @@ static void set_cyc2ns_scale(unsigned long cpu_khz, int cpu) | |||
623 | ns_now = __cycles_2_ns(tsc_now); | 623 | ns_now = __cycles_2_ns(tsc_now); |
624 | 624 | ||
625 | if (cpu_khz) { | 625 | if (cpu_khz) { |
626 | *scale = (NSEC_PER_MSEC << CYC2NS_SCALE_FACTOR)/cpu_khz; | 626 | *scale = ((NSEC_PER_MSEC << CYC2NS_SCALE_FACTOR) + |
627 | cpu_khz / 2) / cpu_khz; | ||
627 | *offset = ns_now - mult_frac(tsc_now, *scale, | 628 | *offset = ns_now - mult_frac(tsc_now, *scale, |
628 | (1UL << CYC2NS_SCALE_FACTOR)); | 629 | (1UL << CYC2NS_SCALE_FACTOR)); |
629 | } | 630 | } |
diff --git a/arch/x86/kernel/uprobes.c b/arch/x86/kernel/uprobes.c index c71025b67462..0ba4cfb4f412 100644 --- a/arch/x86/kernel/uprobes.c +++ b/arch/x86/kernel/uprobes.c | |||
@@ -680,8 +680,10 @@ static bool __skip_sstep(struct arch_uprobe *auprobe, struct pt_regs *regs) | |||
680 | if (auprobe->insn[i] == 0x66) | 680 | if (auprobe->insn[i] == 0x66) |
681 | continue; | 681 | continue; |
682 | 682 | ||
683 | if (auprobe->insn[i] == 0x90) | 683 | if (auprobe->insn[i] == 0x90) { |
684 | regs->ip += i + 1; | ||
684 | return true; | 685 | return true; |
686 | } | ||
685 | 687 | ||
686 | break; | 688 | break; |
687 | } | 689 | } |
diff --git a/arch/x86/kernel/x86_init.c b/arch/x86/kernel/x86_init.c index 7a3d075a814a..d065d67c2672 100644 --- a/arch/x86/kernel/x86_init.c +++ b/arch/x86/kernel/x86_init.c | |||
@@ -19,6 +19,7 @@ | |||
19 | #include <asm/time.h> | 19 | #include <asm/time.h> |
20 | #include <asm/irq.h> | 20 | #include <asm/irq.h> |
21 | #include <asm/io_apic.h> | 21 | #include <asm/io_apic.h> |
22 | #include <asm/hpet.h> | ||
22 | #include <asm/pat.h> | 23 | #include <asm/pat.h> |
23 | #include <asm/tsc.h> | 24 | #include <asm/tsc.h> |
24 | #include <asm/iommu.h> | 25 | #include <asm/iommu.h> |
@@ -111,15 +112,22 @@ struct x86_platform_ops x86_platform = { | |||
111 | 112 | ||
112 | EXPORT_SYMBOL_GPL(x86_platform); | 113 | EXPORT_SYMBOL_GPL(x86_platform); |
113 | struct x86_msi_ops x86_msi = { | 114 | struct x86_msi_ops x86_msi = { |
114 | .setup_msi_irqs = native_setup_msi_irqs, | 115 | .setup_msi_irqs = native_setup_msi_irqs, |
115 | .teardown_msi_irq = native_teardown_msi_irq, | 116 | .compose_msi_msg = native_compose_msi_msg, |
116 | .teardown_msi_irqs = default_teardown_msi_irqs, | 117 | .teardown_msi_irq = native_teardown_msi_irq, |
117 | .restore_msi_irqs = default_restore_msi_irqs, | 118 | .teardown_msi_irqs = default_teardown_msi_irqs, |
119 | .restore_msi_irqs = default_restore_msi_irqs, | ||
120 | .setup_hpet_msi = default_setup_hpet_msi, | ||
118 | }; | 121 | }; |
119 | 122 | ||
120 | struct x86_io_apic_ops x86_io_apic_ops = { | 123 | struct x86_io_apic_ops x86_io_apic_ops = { |
121 | .init = native_io_apic_init_mappings, | 124 | .init = native_io_apic_init_mappings, |
122 | .read = native_io_apic_read, | 125 | .read = native_io_apic_read, |
123 | .write = native_io_apic_write, | 126 | .write = native_io_apic_write, |
124 | .modify = native_io_apic_modify, | 127 | .modify = native_io_apic_modify, |
128 | .disable = native_disable_io_apic, | ||
129 | .print_entries = native_io_apic_print_entries, | ||
130 | .set_affinity = native_ioapic_set_affinity, | ||
131 | .setup_entry = native_setup_ioapic_entry, | ||
132 | .eoi_ioapic_pin = native_eoi_ioapic_pin, | ||
125 | }; | 133 | }; |
diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c index 75c9a6a59697..d6eeead43758 100644 --- a/arch/x86/mm/init_64.c +++ b/arch/x86/mm/init_64.c | |||
@@ -605,7 +605,7 @@ kernel_physical_mapping_init(unsigned long start, | |||
605 | } | 605 | } |
606 | 606 | ||
607 | if (pgd_changed) | 607 | if (pgd_changed) |
608 | sync_global_pgds(addr, end); | 608 | sync_global_pgds(addr, end - 1); |
609 | 609 | ||
610 | __flush_tlb_all(); | 610 | __flush_tlb_all(); |
611 | 611 | ||
@@ -984,7 +984,7 @@ vmemmap_populate(struct page *start_page, unsigned long size, int node) | |||
984 | } | 984 | } |
985 | 985 | ||
986 | } | 986 | } |
987 | sync_global_pgds((unsigned long)start_page, end); | 987 | sync_global_pgds((unsigned long)start_page, end - 1); |
988 | return 0; | 988 | return 0; |
989 | } | 989 | } |
990 | 990 | ||
diff --git a/arch/x86/mm/memtest.c b/arch/x86/mm/memtest.c index c80b9fb95734..8dabbed409ee 100644 --- a/arch/x86/mm/memtest.c +++ b/arch/x86/mm/memtest.c | |||
@@ -9,6 +9,7 @@ | |||
9 | #include <linux/memblock.h> | 9 | #include <linux/memblock.h> |
10 | 10 | ||
11 | static u64 patterns[] __initdata = { | 11 | static u64 patterns[] __initdata = { |
12 | /* The first entry has to be 0 to leave memtest with zeroed memory */ | ||
12 | 0, | 13 | 0, |
13 | 0xffffffffffffffffULL, | 14 | 0xffffffffffffffffULL, |
14 | 0x5555555555555555ULL, | 15 | 0x5555555555555555ULL, |
@@ -110,15 +111,8 @@ void __init early_memtest(unsigned long start, unsigned long end) | |||
110 | return; | 111 | return; |
111 | 112 | ||
112 | printk(KERN_INFO "early_memtest: # of tests: %d\n", memtest_pattern); | 113 | printk(KERN_INFO "early_memtest: # of tests: %d\n", memtest_pattern); |
113 | for (i = 0; i < memtest_pattern; i++) { | 114 | for (i = memtest_pattern-1; i < UINT_MAX; --i) { |
114 | idx = i % ARRAY_SIZE(patterns); | 115 | idx = i % ARRAY_SIZE(patterns); |
115 | do_one_pass(patterns[idx], start, end); | 116 | do_one_pass(patterns[idx], start, end); |
116 | } | 117 | } |
117 | |||
118 | if (idx > 0) { | ||
119 | printk(KERN_INFO "early_memtest: wipe out " | ||
120 | "test pattern from memory\n"); | ||
121 | /* additional test with pattern 0 will do this */ | ||
122 | do_one_pass(0, start, end); | ||
123 | } | ||
124 | } | 118 | } |
diff --git a/arch/x86/mm/srat.c b/arch/x86/mm/srat.c index 4ddf497ca65b..cdd0da9dd530 100644 --- a/arch/x86/mm/srat.c +++ b/arch/x86/mm/srat.c | |||
@@ -149,39 +149,40 @@ acpi_numa_memory_affinity_init(struct acpi_srat_mem_affinity *ma) | |||
149 | int node, pxm; | 149 | int node, pxm; |
150 | 150 | ||
151 | if (srat_disabled()) | 151 | if (srat_disabled()) |
152 | return -1; | 152 | goto out_err; |
153 | if (ma->header.length != sizeof(struct acpi_srat_mem_affinity)) { | 153 | if (ma->header.length != sizeof(struct acpi_srat_mem_affinity)) |
154 | bad_srat(); | 154 | goto out_err_bad_srat; |
155 | return -1; | ||
156 | } | ||
157 | if ((ma->flags & ACPI_SRAT_MEM_ENABLED) == 0) | 155 | if ((ma->flags & ACPI_SRAT_MEM_ENABLED) == 0) |
158 | return -1; | 156 | goto out_err; |
159 | |||
160 | if ((ma->flags & ACPI_SRAT_MEM_HOT_PLUGGABLE) && !save_add_info()) | 157 | if ((ma->flags & ACPI_SRAT_MEM_HOT_PLUGGABLE) && !save_add_info()) |
161 | return -1; | 158 | goto out_err; |
159 | |||
162 | start = ma->base_address; | 160 | start = ma->base_address; |
163 | end = start + ma->length; | 161 | end = start + ma->length; |
164 | pxm = ma->proximity_domain; | 162 | pxm = ma->proximity_domain; |
165 | if (acpi_srat_revision <= 1) | 163 | if (acpi_srat_revision <= 1) |
166 | pxm &= 0xff; | 164 | pxm &= 0xff; |
165 | |||
167 | node = setup_node(pxm); | 166 | node = setup_node(pxm); |
168 | if (node < 0) { | 167 | if (node < 0) { |
169 | printk(KERN_ERR "SRAT: Too many proximity domains.\n"); | 168 | printk(KERN_ERR "SRAT: Too many proximity domains.\n"); |
170 | bad_srat(); | 169 | goto out_err_bad_srat; |
171 | return -1; | ||
172 | } | 170 | } |
173 | 171 | ||
174 | if (numa_add_memblk(node, start, end) < 0) { | 172 | if (numa_add_memblk(node, start, end) < 0) |
175 | bad_srat(); | 173 | goto out_err_bad_srat; |
176 | return -1; | ||
177 | } | ||
178 | 174 | ||
179 | node_set(node, numa_nodes_parsed); | 175 | node_set(node, numa_nodes_parsed); |
180 | 176 | ||
181 | printk(KERN_INFO "SRAT: Node %u PXM %u [mem %#010Lx-%#010Lx]\n", | 177 | printk(KERN_INFO "SRAT: Node %u PXM %u [mem %#010Lx-%#010Lx]\n", |
182 | node, pxm, | 178 | node, pxm, |
183 | (unsigned long long) start, (unsigned long long) end - 1); | 179 | (unsigned long long) start, (unsigned long long) end - 1); |
180 | |||
184 | return 0; | 181 | return 0; |
182 | out_err_bad_srat: | ||
183 | bad_srat(); | ||
184 | out_err: | ||
185 | return -1; | ||
185 | } | 186 | } |
186 | 187 | ||
187 | void __init acpi_numa_arch_fixup(void) {} | 188 | void __init acpi_numa_arch_fixup(void) {} |
diff --git a/arch/x86/mm/tlb.c b/arch/x86/mm/tlb.c index 13a6b29e2e5d..282375f13c7e 100644 --- a/arch/x86/mm/tlb.c +++ b/arch/x86/mm/tlb.c | |||
@@ -335,7 +335,7 @@ static const struct file_operations fops_tlbflush = { | |||
335 | .llseek = default_llseek, | 335 | .llseek = default_llseek, |
336 | }; | 336 | }; |
337 | 337 | ||
338 | static int __cpuinit create_tlb_flushall_shift(void) | 338 | static int __init create_tlb_flushall_shift(void) |
339 | { | 339 | { |
340 | debugfs_create_file("tlb_flushall_shift", S_IRUSR | S_IWUSR, | 340 | debugfs_create_file("tlb_flushall_shift", S_IRUSR | S_IWUSR, |
341 | arch_debugfs_dir, NULL, &fops_tlbflush); | 341 | arch_debugfs_dir, NULL, &fops_tlbflush); |
diff --git a/arch/x86/pci/mmconfig-shared.c b/arch/x86/pci/mmconfig-shared.c index fb29968a7cd5..082e88129712 100644 --- a/arch/x86/pci/mmconfig-shared.c +++ b/arch/x86/pci/mmconfig-shared.c | |||
@@ -548,8 +548,7 @@ static int __init acpi_mcfg_check_entry(struct acpi_table_mcfg *mcfg, | |||
548 | if (cfg->address < 0xFFFFFFFF) | 548 | if (cfg->address < 0xFFFFFFFF) |
549 | return 0; | 549 | return 0; |
550 | 550 | ||
551 | if (!strcmp(mcfg->header.oem_id, "SGI") || | 551 | if (!strncmp(mcfg->header.oem_id, "SGI", 3)) |
552 | !strcmp(mcfg->header.oem_id, "SGI2")) | ||
553 | return 0; | 552 | return 0; |
554 | 553 | ||
555 | if (mcfg->header.revision >= 1) { | 554 | if (mcfg->header.revision >= 1) { |
diff --git a/arch/x86/platform/Makefile b/arch/x86/platform/Makefile index 8d874396cb29..01e0231a113e 100644 --- a/arch/x86/platform/Makefile +++ b/arch/x86/platform/Makefile | |||
@@ -2,10 +2,12 @@ | |||
2 | obj-y += ce4100/ | 2 | obj-y += ce4100/ |
3 | obj-y += efi/ | 3 | obj-y += efi/ |
4 | obj-y += geode/ | 4 | obj-y += geode/ |
5 | obj-y += goldfish/ | ||
5 | obj-y += iris/ | 6 | obj-y += iris/ |
6 | obj-y += mrst/ | 7 | obj-y += mrst/ |
7 | obj-y += olpc/ | 8 | obj-y += olpc/ |
8 | obj-y += scx200/ | 9 | obj-y += scx200/ |
9 | obj-y += sfi/ | 10 | obj-y += sfi/ |
11 | obj-y += ts5500/ | ||
10 | obj-y += visws/ | 12 | obj-y += visws/ |
11 | obj-y += uv/ | 13 | obj-y += uv/ |
diff --git a/arch/x86/platform/efi/efi-bgrt.c b/arch/x86/platform/efi/efi-bgrt.c index d9c1b95af17c..7145ec63c520 100644 --- a/arch/x86/platform/efi/efi-bgrt.c +++ b/arch/x86/platform/efi/efi-bgrt.c | |||
@@ -11,20 +11,21 @@ | |||
11 | * published by the Free Software Foundation. | 11 | * published by the Free Software Foundation. |
12 | */ | 12 | */ |
13 | #include <linux/kernel.h> | 13 | #include <linux/kernel.h> |
14 | #include <linux/init.h> | ||
14 | #include <linux/acpi.h> | 15 | #include <linux/acpi.h> |
15 | #include <linux/efi.h> | 16 | #include <linux/efi.h> |
16 | #include <linux/efi-bgrt.h> | 17 | #include <linux/efi-bgrt.h> |
17 | 18 | ||
18 | struct acpi_table_bgrt *bgrt_tab; | 19 | struct acpi_table_bgrt *bgrt_tab; |
19 | void *bgrt_image; | 20 | void *__initdata bgrt_image; |
20 | size_t bgrt_image_size; | 21 | size_t __initdata bgrt_image_size; |
21 | 22 | ||
22 | struct bmp_header { | 23 | struct bmp_header { |
23 | u16 id; | 24 | u16 id; |
24 | u32 size; | 25 | u32 size; |
25 | } __packed; | 26 | } __packed; |
26 | 27 | ||
27 | void efi_bgrt_init(void) | 28 | void __init efi_bgrt_init(void) |
28 | { | 29 | { |
29 | acpi_status status; | 30 | acpi_status status; |
30 | void __iomem *image; | 31 | void __iomem *image; |
diff --git a/arch/x86/platform/efi/efi.c b/arch/x86/platform/efi/efi.c index 77cf0090c0a3..928bf837040a 100644 --- a/arch/x86/platform/efi/efi.c +++ b/arch/x86/platform/efi/efi.c | |||
@@ -87,7 +87,7 @@ EXPORT_SYMBOL(efi_enabled); | |||
87 | 87 | ||
88 | static int __init setup_noefi(char *arg) | 88 | static int __init setup_noefi(char *arg) |
89 | { | 89 | { |
90 | clear_bit(EFI_BOOT, &x86_efi_facility); | 90 | clear_bit(EFI_RUNTIME_SERVICES, &x86_efi_facility); |
91 | return 0; | 91 | return 0; |
92 | } | 92 | } |
93 | early_param("noefi", setup_noefi); | 93 | early_param("noefi", setup_noefi); |
diff --git a/arch/x86/platform/goldfish/Makefile b/arch/x86/platform/goldfish/Makefile new file mode 100644 index 000000000000..f030b532fdf3 --- /dev/null +++ b/arch/x86/platform/goldfish/Makefile | |||
@@ -0,0 +1 @@ | |||
obj-$(CONFIG_GOLDFISH) += goldfish.o | |||
diff --git a/arch/x86/platform/goldfish/goldfish.c b/arch/x86/platform/goldfish/goldfish.c new file mode 100644 index 000000000000..1693107a518e --- /dev/null +++ b/arch/x86/platform/goldfish/goldfish.c | |||
@@ -0,0 +1,51 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2007 Google, Inc. | ||
3 | * Copyright (C) 2011 Intel, Inc. | ||
4 | * Copyright (C) 2013 Intel, Inc. | ||
5 | * | ||
6 | * This software is licensed under the terms of the GNU General Public | ||
7 | * License version 2, as published by the Free Software Foundation, and | ||
8 | * may be copied, distributed, and modified under those terms. | ||
9 | * | ||
10 | * This program is distributed in the hope that it will be useful, | ||
11 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
12 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
13 | * GNU General Public License for more details. | ||
14 | * | ||
15 | */ | ||
16 | |||
17 | #include <linux/kernel.h> | ||
18 | #include <linux/irq.h> | ||
19 | #include <linux/platform_device.h> | ||
20 | |||
21 | /* | ||
22 | * Where in virtual device memory the IO devices (timers, system controllers | ||
23 | * and so on) | ||
24 | */ | ||
25 | |||
26 | #define GOLDFISH_PDEV_BUS_BASE (0xff001000) | ||
27 | #define GOLDFISH_PDEV_BUS_END (0xff7fffff) | ||
28 | #define GOLDFISH_PDEV_BUS_IRQ (4) | ||
29 | |||
30 | #define GOLDFISH_TTY_BASE (0x2000) | ||
31 | |||
32 | static struct resource goldfish_pdev_bus_resources[] = { | ||
33 | { | ||
34 | .start = GOLDFISH_PDEV_BUS_BASE, | ||
35 | .end = GOLDFISH_PDEV_BUS_END, | ||
36 | .flags = IORESOURCE_MEM, | ||
37 | }, | ||
38 | { | ||
39 | .start = GOLDFISH_PDEV_BUS_IRQ, | ||
40 | .end = GOLDFISH_PDEV_BUS_IRQ, | ||
41 | .flags = IORESOURCE_IRQ, | ||
42 | } | ||
43 | }; | ||
44 | |||
45 | static int __init goldfish_init(void) | ||
46 | { | ||
47 | platform_device_register_simple("goldfish_pdev_bus", -1, | ||
48 | goldfish_pdev_bus_resources, 2); | ||
49 | return 0; | ||
50 | } | ||
51 | device_initcall(goldfish_init); | ||
diff --git a/arch/x86/platform/olpc/olpc-xo15-sci.c b/arch/x86/platform/olpc/olpc-xo15-sci.c index 2fdca25905ae..fef7d0ba7e3a 100644 --- a/arch/x86/platform/olpc/olpc-xo15-sci.c +++ b/arch/x86/platform/olpc/olpc-xo15-sci.c | |||
@@ -195,7 +195,7 @@ err_sysfs: | |||
195 | return r; | 195 | return r; |
196 | } | 196 | } |
197 | 197 | ||
198 | static int xo15_sci_remove(struct acpi_device *device, int type) | 198 | static int xo15_sci_remove(struct acpi_device *device) |
199 | { | 199 | { |
200 | acpi_disable_gpe(NULL, xo15_sci_gpe); | 200 | acpi_disable_gpe(NULL, xo15_sci_gpe); |
201 | acpi_remove_gpe_handler(NULL, xo15_sci_gpe, xo15_sci_gpe_handler); | 201 | acpi_remove_gpe_handler(NULL, xo15_sci_gpe, xo15_sci_gpe_handler); |
diff --git a/arch/x86/platform/sfi/sfi.c b/arch/x86/platform/sfi/sfi.c index 7785b72ecc3a..bcd1a703e3e6 100644 --- a/arch/x86/platform/sfi/sfi.c +++ b/arch/x86/platform/sfi/sfi.c | |||
@@ -35,7 +35,7 @@ | |||
35 | static unsigned long sfi_lapic_addr __initdata = APIC_DEFAULT_PHYS_BASE; | 35 | static unsigned long sfi_lapic_addr __initdata = APIC_DEFAULT_PHYS_BASE; |
36 | 36 | ||
37 | /* All CPUs enumerated by SFI must be present and enabled */ | 37 | /* All CPUs enumerated by SFI must be present and enabled */ |
38 | static void __cpuinit mp_sfi_register_lapic(u8 id) | 38 | static void __init mp_sfi_register_lapic(u8 id) |
39 | { | 39 | { |
40 | if (MAX_LOCAL_APIC - id <= 0) { | 40 | if (MAX_LOCAL_APIC - id <= 0) { |
41 | pr_warning("Processor #%d invalid (max %d)\n", | 41 | pr_warning("Processor #%d invalid (max %d)\n", |
diff --git a/arch/x86/platform/ts5500/Makefile b/arch/x86/platform/ts5500/Makefile new file mode 100644 index 000000000000..c54e348c96a7 --- /dev/null +++ b/arch/x86/platform/ts5500/Makefile | |||
@@ -0,0 +1 @@ | |||
obj-$(CONFIG_TS5500) += ts5500.o | |||
diff --git a/arch/x86/platform/ts5500/ts5500.c b/arch/x86/platform/ts5500/ts5500.c new file mode 100644 index 000000000000..39febb214e8c --- /dev/null +++ b/arch/x86/platform/ts5500/ts5500.c | |||
@@ -0,0 +1,339 @@ | |||
1 | /* | ||
2 | * Technologic Systems TS-5500 Single Board Computer support | ||
3 | * | ||
4 | * Copyright (C) 2013 Savoir-faire Linux Inc. | ||
5 | * Vivien Didelot <vivien.didelot@savoirfairelinux.com> | ||
6 | * | ||
7 | * This program is free software; you can redistribute it and/or modify it under | ||
8 | * the terms of the GNU General Public License as published by the Free Software | ||
9 | * Foundation; either version 2 of the License, or (at your option) any later | ||
10 | * version. | ||
11 | * | ||
12 | * | ||
13 | * This driver registers the Technologic Systems TS-5500 Single Board Computer | ||
14 | * (SBC) and its devices, and exposes information to userspace such as jumpers' | ||
15 | * state or available options. For further information about sysfs entries, see | ||
16 | * Documentation/ABI/testing/sysfs-platform-ts5500. | ||
17 | * | ||
18 | * This code actually supports the TS-5500 platform, but it may be extended to | ||
19 | * support similar Technologic Systems x86-based platforms, such as the TS-5600. | ||
20 | */ | ||
21 | |||
22 | #include <linux/delay.h> | ||
23 | #include <linux/io.h> | ||
24 | #include <linux/kernel.h> | ||
25 | #include <linux/leds.h> | ||
26 | #include <linux/module.h> | ||
27 | #include <linux/platform_data/gpio-ts5500.h> | ||
28 | #include <linux/platform_data/max197.h> | ||
29 | #include <linux/platform_device.h> | ||
30 | #include <linux/slab.h> | ||
31 | |||
32 | /* Product code register */ | ||
33 | #define TS5500_PRODUCT_CODE_ADDR 0x74 | ||
34 | #define TS5500_PRODUCT_CODE 0x60 /* TS-5500 product code */ | ||
35 | |||
36 | /* SRAM/RS-485/ADC options, and RS-485 RTS/Automatic RS-485 flags register */ | ||
37 | #define TS5500_SRAM_RS485_ADC_ADDR 0x75 | ||
38 | #define TS5500_SRAM BIT(0) /* SRAM option */ | ||
39 | #define TS5500_RS485 BIT(1) /* RS-485 option */ | ||
40 | #define TS5500_ADC BIT(2) /* A/D converter option */ | ||
41 | #define TS5500_RS485_RTS BIT(6) /* RTS for RS-485 */ | ||
42 | #define TS5500_RS485_AUTO BIT(7) /* Automatic RS-485 */ | ||
43 | |||
44 | /* External Reset/Industrial Temperature Range options register */ | ||
45 | #define TS5500_ERESET_ITR_ADDR 0x76 | ||
46 | #define TS5500_ERESET BIT(0) /* External Reset option */ | ||
47 | #define TS5500_ITR BIT(1) /* Indust. Temp. Range option */ | ||
48 | |||
49 | /* LED/Jumpers register */ | ||
50 | #define TS5500_LED_JP_ADDR 0x77 | ||
51 | #define TS5500_LED BIT(0) /* LED flag */ | ||
52 | #define TS5500_JP1 BIT(1) /* Automatic CMOS */ | ||
53 | #define TS5500_JP2 BIT(2) /* Enable Serial Console */ | ||
54 | #define TS5500_JP3 BIT(3) /* Write Enable Drive A */ | ||
55 | #define TS5500_JP4 BIT(4) /* Fast Console (115K baud) */ | ||
56 | #define TS5500_JP5 BIT(5) /* User Jumper */ | ||
57 | #define TS5500_JP6 BIT(6) /* Console on COM1 (req. JP2) */ | ||
58 | #define TS5500_JP7 BIT(7) /* Undocumented (Unused) */ | ||
59 | |||
60 | /* A/D Converter registers */ | ||
61 | #define TS5500_ADC_CONV_BUSY_ADDR 0x195 /* Conversion state register */ | ||
62 | #define TS5500_ADC_CONV_BUSY BIT(0) | ||
63 | #define TS5500_ADC_CONV_INIT_LSB_ADDR 0x196 /* Start conv. / LSB register */ | ||
64 | #define TS5500_ADC_CONV_MSB_ADDR 0x197 /* MSB register */ | ||
65 | #define TS5500_ADC_CONV_DELAY 12 /* usec */ | ||
66 | |||
67 | /** | ||
68 | * struct ts5500_sbc - TS-5500 board description | ||
69 | * @id: Board product ID. | ||
70 | * @sram: Flag for SRAM option. | ||
71 | * @rs485: Flag for RS-485 option. | ||
72 | * @adc: Flag for Analog/Digital converter option. | ||
73 | * @ereset: Flag for External Reset option. | ||
74 | * @itr: Flag for Industrial Temperature Range option. | ||
75 | * @jumpers: Bitfield for jumpers' state. | ||
76 | */ | ||
77 | struct ts5500_sbc { | ||
78 | int id; | ||
79 | bool sram; | ||
80 | bool rs485; | ||
81 | bool adc; | ||
82 | bool ereset; | ||
83 | bool itr; | ||
84 | u8 jumpers; | ||
85 | }; | ||
86 | |||
87 | /* Board signatures in BIOS shadow RAM */ | ||
88 | static const struct { | ||
89 | const char * const string; | ||
90 | const ssize_t offset; | ||
91 | } ts5500_signatures[] __initdata = { | ||
92 | { "TS-5x00 AMD Elan", 0xb14 }, | ||
93 | }; | ||
94 | |||
95 | static int __init ts5500_check_signature(void) | ||
96 | { | ||
97 | void __iomem *bios; | ||
98 | int i, ret = -ENODEV; | ||
99 | |||
100 | bios = ioremap(0xf0000, 0x10000); | ||
101 | if (!bios) | ||
102 | return -ENOMEM; | ||
103 | |||
104 | for (i = 0; i < ARRAY_SIZE(ts5500_signatures); i++) { | ||
105 | if (check_signature(bios + ts5500_signatures[i].offset, | ||
106 | ts5500_signatures[i].string, | ||
107 | strlen(ts5500_signatures[i].string))) { | ||
108 | ret = 0; | ||
109 | break; | ||
110 | } | ||
111 | } | ||
112 | |||
113 | iounmap(bios); | ||
114 | return ret; | ||
115 | } | ||
116 | |||
117 | static int __init ts5500_detect_config(struct ts5500_sbc *sbc) | ||
118 | { | ||
119 | u8 tmp; | ||
120 | int ret = 0; | ||
121 | |||
122 | if (!request_region(TS5500_PRODUCT_CODE_ADDR, 4, "ts5500")) | ||
123 | return -EBUSY; | ||
124 | |||
125 | tmp = inb(TS5500_PRODUCT_CODE_ADDR); | ||
126 | if (tmp != TS5500_PRODUCT_CODE) { | ||
127 | pr_err("This platform is not a TS-5500 (found ID 0x%x)\n", tmp); | ||
128 | ret = -ENODEV; | ||
129 | goto cleanup; | ||
130 | } | ||
131 | sbc->id = tmp; | ||
132 | |||
133 | tmp = inb(TS5500_SRAM_RS485_ADC_ADDR); | ||
134 | sbc->sram = tmp & TS5500_SRAM; | ||
135 | sbc->rs485 = tmp & TS5500_RS485; | ||
136 | sbc->adc = tmp & TS5500_ADC; | ||
137 | |||
138 | tmp = inb(TS5500_ERESET_ITR_ADDR); | ||
139 | sbc->ereset = tmp & TS5500_ERESET; | ||
140 | sbc->itr = tmp & TS5500_ITR; | ||
141 | |||
142 | tmp = inb(TS5500_LED_JP_ADDR); | ||
143 | sbc->jumpers = tmp & ~TS5500_LED; | ||
144 | |||
145 | cleanup: | ||
146 | release_region(TS5500_PRODUCT_CODE_ADDR, 4); | ||
147 | return ret; | ||
148 | } | ||
149 | |||
150 | static ssize_t ts5500_show_id(struct device *dev, | ||
151 | struct device_attribute *attr, char *buf) | ||
152 | { | ||
153 | struct ts5500_sbc *sbc = dev_get_drvdata(dev); | ||
154 | |||
155 | return sprintf(buf, "0x%.2x\n", sbc->id); | ||
156 | } | ||
157 | |||
158 | static ssize_t ts5500_show_jumpers(struct device *dev, | ||
159 | struct device_attribute *attr, | ||
160 | char *buf) | ||
161 | { | ||
162 | struct ts5500_sbc *sbc = dev_get_drvdata(dev); | ||
163 | |||
164 | return sprintf(buf, "0x%.2x\n", sbc->jumpers >> 1); | ||
165 | } | ||
166 | |||
167 | #define TS5500_SHOW(field) \ | ||
168 | static ssize_t ts5500_show_##field(struct device *dev, \ | ||
169 | struct device_attribute *attr, \ | ||
170 | char *buf) \ | ||
171 | { \ | ||
172 | struct ts5500_sbc *sbc = dev_get_drvdata(dev); \ | ||
173 | return sprintf(buf, "%d\n", sbc->field); \ | ||
174 | } | ||
175 | |||
176 | TS5500_SHOW(sram) | ||
177 | TS5500_SHOW(rs485) | ||
178 | TS5500_SHOW(adc) | ||
179 | TS5500_SHOW(ereset) | ||
180 | TS5500_SHOW(itr) | ||
181 | |||
182 | static DEVICE_ATTR(id, S_IRUGO, ts5500_show_id, NULL); | ||
183 | static DEVICE_ATTR(jumpers, S_IRUGO, ts5500_show_jumpers, NULL); | ||
184 | static DEVICE_ATTR(sram, S_IRUGO, ts5500_show_sram, NULL); | ||
185 | static DEVICE_ATTR(rs485, S_IRUGO, ts5500_show_rs485, NULL); | ||
186 | static DEVICE_ATTR(adc, S_IRUGO, ts5500_show_adc, NULL); | ||
187 | static DEVICE_ATTR(ereset, S_IRUGO, ts5500_show_ereset, NULL); | ||
188 | static DEVICE_ATTR(itr, S_IRUGO, ts5500_show_itr, NULL); | ||
189 | |||
190 | static struct attribute *ts5500_attributes[] = { | ||
191 | &dev_attr_id.attr, | ||
192 | &dev_attr_jumpers.attr, | ||
193 | &dev_attr_sram.attr, | ||
194 | &dev_attr_rs485.attr, | ||
195 | &dev_attr_adc.attr, | ||
196 | &dev_attr_ereset.attr, | ||
197 | &dev_attr_itr.attr, | ||
198 | NULL | ||
199 | }; | ||
200 | |||
201 | static const struct attribute_group ts5500_attr_group = { | ||
202 | .attrs = ts5500_attributes, | ||
203 | }; | ||
204 | |||
205 | static struct resource ts5500_dio1_resource[] = { | ||
206 | DEFINE_RES_IRQ_NAMED(7, "DIO1 interrupt"), | ||
207 | }; | ||
208 | |||
209 | static struct platform_device ts5500_dio1_pdev = { | ||
210 | .name = "ts5500-dio1", | ||
211 | .id = -1, | ||
212 | .resource = ts5500_dio1_resource, | ||
213 | .num_resources = 1, | ||
214 | }; | ||
215 | |||
216 | static struct resource ts5500_dio2_resource[] = { | ||
217 | DEFINE_RES_IRQ_NAMED(6, "DIO2 interrupt"), | ||
218 | }; | ||
219 | |||
220 | static struct platform_device ts5500_dio2_pdev = { | ||
221 | .name = "ts5500-dio2", | ||
222 | .id = -1, | ||
223 | .resource = ts5500_dio2_resource, | ||
224 | .num_resources = 1, | ||
225 | }; | ||
226 | |||
227 | static void ts5500_led_set(struct led_classdev *led_cdev, | ||
228 | enum led_brightness brightness) | ||
229 | { | ||
230 | outb(!!brightness, TS5500_LED_JP_ADDR); | ||
231 | } | ||
232 | |||
233 | static enum led_brightness ts5500_led_get(struct led_classdev *led_cdev) | ||
234 | { | ||
235 | return (inb(TS5500_LED_JP_ADDR) & TS5500_LED) ? LED_FULL : LED_OFF; | ||
236 | } | ||
237 | |||
238 | static struct led_classdev ts5500_led_cdev = { | ||
239 | .name = "ts5500:green:", | ||
240 | .brightness_set = ts5500_led_set, | ||
241 | .brightness_get = ts5500_led_get, | ||
242 | }; | ||
243 | |||
244 | static int ts5500_adc_convert(u8 ctrl) | ||
245 | { | ||
246 | u8 lsb, msb; | ||
247 | |||
248 | /* Start conversion (ensure the 3 MSB are set to 0) */ | ||
249 | outb(ctrl & 0x1f, TS5500_ADC_CONV_INIT_LSB_ADDR); | ||
250 | |||
251 | /* | ||
252 | * The platform has CPLD logic driving the A/D converter. | ||
253 | * The conversion must complete within 11 microseconds, | ||
254 | * otherwise we have to re-initiate a conversion. | ||
255 | */ | ||
256 | udelay(TS5500_ADC_CONV_DELAY); | ||
257 | if (inb(TS5500_ADC_CONV_BUSY_ADDR) & TS5500_ADC_CONV_BUSY) | ||
258 | return -EBUSY; | ||
259 | |||
260 | /* Read the raw data */ | ||
261 | lsb = inb(TS5500_ADC_CONV_INIT_LSB_ADDR); | ||
262 | msb = inb(TS5500_ADC_CONV_MSB_ADDR); | ||
263 | |||
264 | return (msb << 8) | lsb; | ||
265 | } | ||
266 | |||
267 | static struct max197_platform_data ts5500_adc_pdata = { | ||
268 | .convert = ts5500_adc_convert, | ||
269 | }; | ||
270 | |||
271 | static struct platform_device ts5500_adc_pdev = { | ||
272 | .name = "max197", | ||
273 | .id = -1, | ||
274 | .dev = { | ||
275 | .platform_data = &ts5500_adc_pdata, | ||
276 | }, | ||
277 | }; | ||
278 | |||
279 | static int __init ts5500_init(void) | ||
280 | { | ||
281 | struct platform_device *pdev; | ||
282 | struct ts5500_sbc *sbc; | ||
283 | int err; | ||
284 | |||
285 | /* | ||
286 | * There is no DMI available or PCI bridge subvendor info, | ||
287 | * only the BIOS provides a 16-bit identification call. | ||
288 | * It is safer to find a signature in the BIOS shadow RAM. | ||
289 | */ | ||
290 | err = ts5500_check_signature(); | ||
291 | if (err) | ||
292 | return err; | ||
293 | |||
294 | pdev = platform_device_register_simple("ts5500", -1, NULL, 0); | ||
295 | if (IS_ERR(pdev)) | ||
296 | return PTR_ERR(pdev); | ||
297 | |||
298 | sbc = devm_kzalloc(&pdev->dev, sizeof(struct ts5500_sbc), GFP_KERNEL); | ||
299 | if (!sbc) { | ||
300 | err = -ENOMEM; | ||
301 | goto error; | ||
302 | } | ||
303 | |||
304 | err = ts5500_detect_config(sbc); | ||
305 | if (err) | ||
306 | goto error; | ||
307 | |||
308 | platform_set_drvdata(pdev, sbc); | ||
309 | |||
310 | err = sysfs_create_group(&pdev->dev.kobj, &ts5500_attr_group); | ||
311 | if (err) | ||
312 | goto error; | ||
313 | |||
314 | ts5500_dio1_pdev.dev.parent = &pdev->dev; | ||
315 | if (platform_device_register(&ts5500_dio1_pdev)) | ||
316 | dev_warn(&pdev->dev, "DIO1 block registration failed\n"); | ||
317 | ts5500_dio2_pdev.dev.parent = &pdev->dev; | ||
318 | if (platform_device_register(&ts5500_dio2_pdev)) | ||
319 | dev_warn(&pdev->dev, "DIO2 block registration failed\n"); | ||
320 | |||
321 | if (led_classdev_register(&pdev->dev, &ts5500_led_cdev)) | ||
322 | dev_warn(&pdev->dev, "LED registration failed\n"); | ||
323 | |||
324 | if (sbc->adc) { | ||
325 | ts5500_adc_pdev.dev.parent = &pdev->dev; | ||
326 | if (platform_device_register(&ts5500_adc_pdev)) | ||
327 | dev_warn(&pdev->dev, "ADC registration failed\n"); | ||
328 | } | ||
329 | |||
330 | return 0; | ||
331 | error: | ||
332 | platform_device_unregister(pdev); | ||
333 | return err; | ||
334 | } | ||
335 | device_initcall(ts5500_init); | ||
336 | |||
337 | MODULE_LICENSE("GPL"); | ||
338 | MODULE_AUTHOR("Savoir-faire Linux Inc. <kernel@savoirfairelinux.com>"); | ||
339 | MODULE_DESCRIPTION("Technologic Systems TS-5500 platform driver"); | ||
diff --git a/arch/x86/platform/uv/tlb_uv.c b/arch/x86/platform/uv/tlb_uv.c index dbbdca5f508c..0f92173a12b6 100644 --- a/arch/x86/platform/uv/tlb_uv.c +++ b/arch/x86/platform/uv/tlb_uv.c | |||
@@ -1467,7 +1467,7 @@ static ssize_t ptc_proc_write(struct file *file, const char __user *user, | |||
1467 | } | 1467 | } |
1468 | 1468 | ||
1469 | if (input_arg == 0) { | 1469 | if (input_arg == 0) { |
1470 | elements = sizeof(stat_description)/sizeof(*stat_description); | 1470 | elements = ARRAY_SIZE(stat_description); |
1471 | printk(KERN_DEBUG "# cpu: cpu number\n"); | 1471 | printk(KERN_DEBUG "# cpu: cpu number\n"); |
1472 | printk(KERN_DEBUG "Sender statistics:\n"); | 1472 | printk(KERN_DEBUG "Sender statistics:\n"); |
1473 | for (i = 0; i < elements; i++) | 1473 | for (i = 0; i < elements; i++) |
@@ -1508,7 +1508,7 @@ static int parse_tunables_write(struct bau_control *bcp, char *instr, | |||
1508 | char *q; | 1508 | char *q; |
1509 | int cnt = 0; | 1509 | int cnt = 0; |
1510 | int val; | 1510 | int val; |
1511 | int e = sizeof(tunables) / sizeof(*tunables); | 1511 | int e = ARRAY_SIZE(tunables); |
1512 | 1512 | ||
1513 | p = instr + strspn(instr, WHITESPACE); | 1513 | p = instr + strspn(instr, WHITESPACE); |
1514 | q = p; | 1514 | q = p; |
diff --git a/arch/x86/platform/uv/uv_time.c b/arch/x86/platform/uv/uv_time.c index 5032e0d19b86..98718f604eb6 100644 --- a/arch/x86/platform/uv/uv_time.c +++ b/arch/x86/platform/uv/uv_time.c | |||
@@ -15,7 +15,7 @@ | |||
15 | * along with this program; if not, write to the Free Software | 15 | * along with this program; if not, write to the Free Software |
16 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA | 16 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA |
17 | * | 17 | * |
18 | * Copyright (c) 2009 Silicon Graphics, Inc. All Rights Reserved. | 18 | * Copyright (c) 2009-2013 Silicon Graphics, Inc. All Rights Reserved. |
19 | * Copyright (c) Dimitri Sivanich | 19 | * Copyright (c) Dimitri Sivanich |
20 | */ | 20 | */ |
21 | #include <linux/clockchips.h> | 21 | #include <linux/clockchips.h> |
@@ -102,9 +102,10 @@ static int uv_intr_pending(int pnode) | |||
102 | if (is_uv1_hub()) | 102 | if (is_uv1_hub()) |
103 | return uv_read_global_mmr64(pnode, UVH_EVENT_OCCURRED0) & | 103 | return uv_read_global_mmr64(pnode, UVH_EVENT_OCCURRED0) & |
104 | UV1H_EVENT_OCCURRED0_RTC1_MASK; | 104 | UV1H_EVENT_OCCURRED0_RTC1_MASK; |
105 | else | 105 | else if (is_uvx_hub()) |
106 | return uv_read_global_mmr64(pnode, UV2H_EVENT_OCCURRED2) & | 106 | return uv_read_global_mmr64(pnode, UVXH_EVENT_OCCURRED2) & |
107 | UV2H_EVENT_OCCURRED2_RTC_1_MASK; | 107 | UVXH_EVENT_OCCURRED2_RTC_1_MASK; |
108 | return 0; | ||
108 | } | 109 | } |
109 | 110 | ||
110 | /* Setup interrupt and return non-zero if early expiration occurred. */ | 111 | /* Setup interrupt and return non-zero if early expiration occurred. */ |
@@ -122,8 +123,8 @@ static int uv_setup_intr(int cpu, u64 expires) | |||
122 | uv_write_global_mmr64(pnode, UVH_EVENT_OCCURRED0_ALIAS, | 123 | uv_write_global_mmr64(pnode, UVH_EVENT_OCCURRED0_ALIAS, |
123 | UV1H_EVENT_OCCURRED0_RTC1_MASK); | 124 | UV1H_EVENT_OCCURRED0_RTC1_MASK); |
124 | else | 125 | else |
125 | uv_write_global_mmr64(pnode, UV2H_EVENT_OCCURRED2_ALIAS, | 126 | uv_write_global_mmr64(pnode, UVXH_EVENT_OCCURRED2_ALIAS, |
126 | UV2H_EVENT_OCCURRED2_RTC_1_MASK); | 127 | UVXH_EVENT_OCCURRED2_RTC_1_MASK); |
127 | 128 | ||
128 | val = (X86_PLATFORM_IPI_VECTOR << UVH_RTC1_INT_CONFIG_VECTOR_SHFT) | | 129 | val = (X86_PLATFORM_IPI_VECTOR << UVH_RTC1_INT_CONFIG_VECTOR_SHFT) | |
129 | ((u64)apicid << UVH_RTC1_INT_CONFIG_APIC_ID_SHFT); | 130 | ((u64)apicid << UVH_RTC1_INT_CONFIG_APIC_ID_SHFT); |
diff --git a/arch/x86/um/fault.c b/arch/x86/um/fault.c index 8784ab30d91b..84ac7f7b0257 100644 --- a/arch/x86/um/fault.c +++ b/arch/x86/um/fault.c | |||
@@ -20,7 +20,7 @@ int arch_fixup(unsigned long address, struct uml_pt_regs *regs) | |||
20 | const struct exception_table_entry *fixup; | 20 | const struct exception_table_entry *fixup; |
21 | 21 | ||
22 | fixup = search_exception_tables(address); | 22 | fixup = search_exception_tables(address); |
23 | if (fixup != 0) { | 23 | if (fixup) { |
24 | UPT_IP(regs) = fixup->fixup; | 24 | UPT_IP(regs) = fixup->fixup; |
25 | return 1; | 25 | return 1; |
26 | } | 26 | } |
diff --git a/arch/x86/vdso/vclock_gettime.c b/arch/x86/vdso/vclock_gettime.c index 205ad328aa52..c74436e687bf 100644 --- a/arch/x86/vdso/vclock_gettime.c +++ b/arch/x86/vdso/vclock_gettime.c | |||
@@ -60,7 +60,7 @@ notrace static cycle_t vread_tsc(void) | |||
60 | 60 | ||
61 | static notrace cycle_t vread_hpet(void) | 61 | static notrace cycle_t vread_hpet(void) |
62 | { | 62 | { |
63 | return readl((const void __iomem *)fix_to_virt(VSYSCALL_HPET) + 0xf0); | 63 | return readl((const void __iomem *)fix_to_virt(VSYSCALL_HPET) + HPET_COUNTER); |
64 | } | 64 | } |
65 | 65 | ||
66 | #ifdef CONFIG_PARAVIRT_CLOCK | 66 | #ifdef CONFIG_PARAVIRT_CLOCK |
diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c index 138e5667409a..39928d16be3b 100644 --- a/arch/x86/xen/enlighten.c +++ b/arch/x86/xen/enlighten.c | |||
@@ -1517,72 +1517,51 @@ asmlinkage void __init xen_start_kernel(void) | |||
1517 | #endif | 1517 | #endif |
1518 | } | 1518 | } |
1519 | 1519 | ||
1520 | #ifdef CONFIG_XEN_PVHVM | 1520 | void __ref xen_hvm_init_shared_info(void) |
1521 | #define HVM_SHARED_INFO_ADDR 0xFE700000UL | ||
1522 | static struct shared_info *xen_hvm_shared_info; | ||
1523 | static unsigned long xen_hvm_sip_phys; | ||
1524 | static int xen_major, xen_minor; | ||
1525 | |||
1526 | static void xen_hvm_connect_shared_info(unsigned long pfn) | ||
1527 | { | 1521 | { |
1522 | int cpu; | ||
1528 | struct xen_add_to_physmap xatp; | 1523 | struct xen_add_to_physmap xatp; |
1524 | static struct shared_info *shared_info_page = 0; | ||
1529 | 1525 | ||
1526 | if (!shared_info_page) | ||
1527 | shared_info_page = (struct shared_info *) | ||
1528 | extend_brk(PAGE_SIZE, PAGE_SIZE); | ||
1530 | xatp.domid = DOMID_SELF; | 1529 | xatp.domid = DOMID_SELF; |
1531 | xatp.idx = 0; | 1530 | xatp.idx = 0; |
1532 | xatp.space = XENMAPSPACE_shared_info; | 1531 | xatp.space = XENMAPSPACE_shared_info; |
1533 | xatp.gpfn = pfn; | 1532 | xatp.gpfn = __pa(shared_info_page) >> PAGE_SHIFT; |
1534 | if (HYPERVISOR_memory_op(XENMEM_add_to_physmap, &xatp)) | 1533 | if (HYPERVISOR_memory_op(XENMEM_add_to_physmap, &xatp)) |
1535 | BUG(); | 1534 | BUG(); |
1536 | 1535 | ||
1537 | } | 1536 | HYPERVISOR_shared_info = (struct shared_info *)shared_info_page; |
1538 | static void __init xen_hvm_set_shared_info(struct shared_info *sip) | ||
1539 | { | ||
1540 | int cpu; | ||
1541 | |||
1542 | HYPERVISOR_shared_info = sip; | ||
1543 | 1537 | ||
1544 | /* xen_vcpu is a pointer to the vcpu_info struct in the shared_info | 1538 | /* xen_vcpu is a pointer to the vcpu_info struct in the shared_info |
1545 | * page, we use it in the event channel upcall and in some pvclock | 1539 | * page, we use it in the event channel upcall and in some pvclock |
1546 | * related functions. We don't need the vcpu_info placement | 1540 | * related functions. We don't need the vcpu_info placement |
1547 | * optimizations because we don't use any pv_mmu or pv_irq op on | 1541 | * optimizations because we don't use any pv_mmu or pv_irq op on |
1548 | * HVM. */ | 1542 | * HVM. |
1549 | for_each_online_cpu(cpu) | 1543 | * When xen_hvm_init_shared_info is run at boot time only vcpu 0 is |
1544 | * online but xen_hvm_init_shared_info is run at resume time too and | ||
1545 | * in that case multiple vcpus might be online. */ | ||
1546 | for_each_online_cpu(cpu) { | ||
1550 | per_cpu(xen_vcpu, cpu) = &HYPERVISOR_shared_info->vcpu_info[cpu]; | 1547 | per_cpu(xen_vcpu, cpu) = &HYPERVISOR_shared_info->vcpu_info[cpu]; |
1551 | } | ||
1552 | |||
1553 | /* Reconnect the shared_info pfn to a (new) mfn */ | ||
1554 | void xen_hvm_resume_shared_info(void) | ||
1555 | { | ||
1556 | xen_hvm_connect_shared_info(xen_hvm_sip_phys >> PAGE_SHIFT); | ||
1557 | } | ||
1558 | |||
1559 | /* Xen tools prior to Xen 4 do not provide a E820_Reserved area for guest usage. | ||
1560 | * On these old tools the shared info page will be placed in E820_Ram. | ||
1561 | * Xen 4 provides a E820_Reserved area at 0xFC000000, and this code expects | ||
1562 | * that nothing is mapped up to HVM_SHARED_INFO_ADDR. | ||
1563 | * Xen 4.3+ provides an explicit 1MB area at HVM_SHARED_INFO_ADDR which is used | ||
1564 | * here for the shared info page. */ | ||
1565 | static void __init xen_hvm_init_shared_info(void) | ||
1566 | { | ||
1567 | if (xen_major < 4) { | ||
1568 | xen_hvm_shared_info = extend_brk(PAGE_SIZE, PAGE_SIZE); | ||
1569 | xen_hvm_sip_phys = __pa(xen_hvm_shared_info); | ||
1570 | } else { | ||
1571 | xen_hvm_sip_phys = HVM_SHARED_INFO_ADDR; | ||
1572 | set_fixmap(FIX_PARAVIRT_BOOTMAP, xen_hvm_sip_phys); | ||
1573 | xen_hvm_shared_info = | ||
1574 | (struct shared_info *)fix_to_virt(FIX_PARAVIRT_BOOTMAP); | ||
1575 | } | 1548 | } |
1576 | xen_hvm_connect_shared_info(xen_hvm_sip_phys >> PAGE_SHIFT); | ||
1577 | xen_hvm_set_shared_info(xen_hvm_shared_info); | ||
1578 | } | 1549 | } |
1579 | 1550 | ||
1551 | #ifdef CONFIG_XEN_PVHVM | ||
1580 | static void __init init_hvm_pv_info(void) | 1552 | static void __init init_hvm_pv_info(void) |
1581 | { | 1553 | { |
1582 | uint32_t ecx, edx, pages, msr, base; | 1554 | int major, minor; |
1555 | uint32_t eax, ebx, ecx, edx, pages, msr, base; | ||
1583 | u64 pfn; | 1556 | u64 pfn; |
1584 | 1557 | ||
1585 | base = xen_cpuid_base(); | 1558 | base = xen_cpuid_base(); |
1559 | cpuid(base + 1, &eax, &ebx, &ecx, &edx); | ||
1560 | |||
1561 | major = eax >> 16; | ||
1562 | minor = eax & 0xffff; | ||
1563 | printk(KERN_INFO "Xen version %d.%d.\n", major, minor); | ||
1564 | |||
1586 | cpuid(base + 2, &pages, &msr, &ecx, &edx); | 1565 | cpuid(base + 2, &pages, &msr, &ecx, &edx); |
1587 | 1566 | ||
1588 | pfn = __pa(hypercall_page); | 1567 | pfn = __pa(hypercall_page); |
@@ -1633,22 +1612,12 @@ static void __init xen_hvm_guest_init(void) | |||
1633 | 1612 | ||
1634 | static bool __init xen_hvm_platform(void) | 1613 | static bool __init xen_hvm_platform(void) |
1635 | { | 1614 | { |
1636 | uint32_t eax, ebx, ecx, edx, base; | ||
1637 | |||
1638 | if (xen_pv_domain()) | 1615 | if (xen_pv_domain()) |
1639 | return false; | 1616 | return false; |
1640 | 1617 | ||
1641 | base = xen_cpuid_base(); | 1618 | if (!xen_cpuid_base()) |
1642 | if (!base) | ||
1643 | return false; | 1619 | return false; |
1644 | 1620 | ||
1645 | cpuid(base + 1, &eax, &ebx, &ecx, &edx); | ||
1646 | |||
1647 | xen_major = eax >> 16; | ||
1648 | xen_minor = eax & 0xffff; | ||
1649 | |||
1650 | printk(KERN_INFO "Xen version %d.%d.\n", xen_major, xen_minor); | ||
1651 | |||
1652 | return true; | 1621 | return true; |
1653 | } | 1622 | } |
1654 | 1623 | ||
@@ -1668,6 +1637,7 @@ const struct hypervisor_x86 x86_hyper_xen_hvm __refconst = { | |||
1668 | .name = "Xen HVM", | 1637 | .name = "Xen HVM", |
1669 | .detect = xen_hvm_platform, | 1638 | .detect = xen_hvm_platform, |
1670 | .init_platform = xen_hvm_guest_init, | 1639 | .init_platform = xen_hvm_guest_init, |
1640 | .x2apic_available = xen_x2apic_para_available, | ||
1671 | }; | 1641 | }; |
1672 | EXPORT_SYMBOL(x86_hyper_xen_hvm); | 1642 | EXPORT_SYMBOL(x86_hyper_xen_hvm); |
1673 | #endif | 1643 | #endif |
diff --git a/arch/x86/xen/setup.c b/arch/x86/xen/setup.c index 8971a26d21ab..94eac5c85cdc 100644 --- a/arch/x86/xen/setup.c +++ b/arch/x86/xen/setup.c | |||
@@ -556,12 +556,9 @@ void __init xen_arch_setup(void) | |||
556 | COMMAND_LINE_SIZE : MAX_GUEST_CMDLINE); | 556 | COMMAND_LINE_SIZE : MAX_GUEST_CMDLINE); |
557 | 557 | ||
558 | /* Set up idle, making sure it calls safe_halt() pvop */ | 558 | /* Set up idle, making sure it calls safe_halt() pvop */ |
559 | #ifdef CONFIG_X86_32 | ||
560 | boot_cpu_data.hlt_works_ok = 1; | ||
561 | #endif | ||
562 | disable_cpuidle(); | 559 | disable_cpuidle(); |
563 | disable_cpufreq(); | 560 | disable_cpufreq(); |
564 | WARN_ON(set_pm_idle_to_default()); | 561 | WARN_ON(xen_set_default_idle()); |
565 | fiddle_vdso(); | 562 | fiddle_vdso(); |
566 | #ifdef CONFIG_NUMA | 563 | #ifdef CONFIG_NUMA |
567 | numa_off = 1; | 564 | numa_off = 1; |
diff --git a/arch/x86/xen/suspend.c b/arch/x86/xen/suspend.c index ae8a00c39de4..45329c8c226e 100644 --- a/arch/x86/xen/suspend.c +++ b/arch/x86/xen/suspend.c | |||
@@ -30,7 +30,7 @@ void xen_arch_hvm_post_suspend(int suspend_cancelled) | |||
30 | { | 30 | { |
31 | #ifdef CONFIG_XEN_PVHVM | 31 | #ifdef CONFIG_XEN_PVHVM |
32 | int cpu; | 32 | int cpu; |
33 | xen_hvm_resume_shared_info(); | 33 | xen_hvm_init_shared_info(); |
34 | xen_callback_vector(); | 34 | xen_callback_vector(); |
35 | xen_unplug_emulated_devices(); | 35 | xen_unplug_emulated_devices(); |
36 | if (xen_feature(XENFEAT_hvm_safe_pvclock)) { | 36 | if (xen_feature(XENFEAT_hvm_safe_pvclock)) { |
diff --git a/arch/x86/xen/xen-asm_32.S b/arch/x86/xen/xen-asm_32.S index f9643fc50de5..33ca6e42a4ca 100644 --- a/arch/x86/xen/xen-asm_32.S +++ b/arch/x86/xen/xen-asm_32.S | |||
@@ -89,11 +89,11 @@ ENTRY(xen_iret) | |||
89 | */ | 89 | */ |
90 | #ifdef CONFIG_SMP | 90 | #ifdef CONFIG_SMP |
91 | GET_THREAD_INFO(%eax) | 91 | GET_THREAD_INFO(%eax) |
92 | movl TI_cpu(%eax), %eax | 92 | movl %ss:TI_cpu(%eax), %eax |
93 | movl __per_cpu_offset(,%eax,4), %eax | 93 | movl %ss:__per_cpu_offset(,%eax,4), %eax |
94 | mov xen_vcpu(%eax), %eax | 94 | mov %ss:xen_vcpu(%eax), %eax |
95 | #else | 95 | #else |
96 | movl xen_vcpu, %eax | 96 | movl %ss:xen_vcpu, %eax |
97 | #endif | 97 | #endif |
98 | 98 | ||
99 | /* check IF state we're restoring */ | 99 | /* check IF state we're restoring */ |
@@ -106,11 +106,11 @@ ENTRY(xen_iret) | |||
106 | * resuming the code, so we don't have to be worried about | 106 | * resuming the code, so we don't have to be worried about |
107 | * being preempted to another CPU. | 107 | * being preempted to another CPU. |
108 | */ | 108 | */ |
109 | setz XEN_vcpu_info_mask(%eax) | 109 | setz %ss:XEN_vcpu_info_mask(%eax) |
110 | xen_iret_start_crit: | 110 | xen_iret_start_crit: |
111 | 111 | ||
112 | /* check for unmasked and pending */ | 112 | /* check for unmasked and pending */ |
113 | cmpw $0x0001, XEN_vcpu_info_pending(%eax) | 113 | cmpw $0x0001, %ss:XEN_vcpu_info_pending(%eax) |
114 | 114 | ||
115 | /* | 115 | /* |
116 | * If there's something pending, mask events again so we can | 116 | * If there's something pending, mask events again so we can |
@@ -118,7 +118,7 @@ xen_iret_start_crit: | |||
118 | * touch XEN_vcpu_info_mask. | 118 | * touch XEN_vcpu_info_mask. |
119 | */ | 119 | */ |
120 | jne 1f | 120 | jne 1f |
121 | movb $1, XEN_vcpu_info_mask(%eax) | 121 | movb $1, %ss:XEN_vcpu_info_mask(%eax) |
122 | 122 | ||
123 | 1: popl %eax | 123 | 1: popl %eax |
124 | 124 | ||
diff --git a/arch/x86/xen/xen-ops.h b/arch/x86/xen/xen-ops.h index d2e73d19d366..a95b41744ad0 100644 --- a/arch/x86/xen/xen-ops.h +++ b/arch/x86/xen/xen-ops.h | |||
@@ -40,7 +40,7 @@ void xen_enable_syscall(void); | |||
40 | void xen_vcpu_restore(void); | 40 | void xen_vcpu_restore(void); |
41 | 41 | ||
42 | void xen_callback_vector(void); | 42 | void xen_callback_vector(void); |
43 | void xen_hvm_resume_shared_info(void); | 43 | void xen_hvm_init_shared_info(void); |
44 | void xen_unplug_emulated_devices(void); | 44 | void xen_unplug_emulated_devices(void); |
45 | 45 | ||
46 | void __init xen_build_dynamic_phys_to_machine(void); | 46 | void __init xen_build_dynamic_phys_to_machine(void); |