diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2016-01-15 16:18:47 -0500 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2016-01-15 16:18:47 -0500 |
commit | f689b742f217b2ffe7925f8a6521b208ee995309 (patch) | |
tree | 40ed4482ce5808fd5498fe935205b06782bbbca4 /arch/powerpc | |
parent | 37cea93b99d2d89bef3adcb4632d71e1f377c447 (diff) | |
parent | be6bfc29bc759670fb3a2519325c4ab2edf259c9 (diff) |
Merge tag 'powerpc-4.5-1' of git://git.kernel.org/pub/scm/linux/kernel/git/powerpc/linux
Pull powerpc updates from Michael Ellerman:
"Core:
- Ground work for the new Power9 MMU from Aneesh Kumar K.V
- Optimise FP/VMX/VSX context switching from Anton Blanchard
Misc:
- Various cleanups from Krzysztof Kozlowski, John Ogness, Rashmica
Gupta, Russell Currey, Gavin Shan, Daniel Axtens, Michael Neuling,
Andrew Donnellan
- Allow wrapper to work on non-english system from Laurent Vivier
- Add rN aliases to the pt_regs_offset table from Rashmica Gupta
- Fix module autoload for rackmeter & axonram drivers from Luis de
Bethencourt
- Include KVM guest test in all interrupt vectors from Paul Mackerras
- Fix DSCR inheritance over fork() from Anton Blanchard
- Make value-returning atomics & {cmp}xchg* & their atomic_ versions
fully ordered from Boqun Feng
- Print MSR TM bits in oops messages from Michael Neuling
- Add TM signal return & invalid stack selftests from Michael Neuling
- Limit EPOW reset event warnings from Vipin K Parashar
- Remove the Cell QPACE code from Rashmica Gupta
- Append linux_banner to exception information in xmon from Rashmica
Gupta
- Add selftest to check if VSRs are corrupted from Rashmica Gupta
- Remove broken GregorianDay() from Daniel Axtens
- Import Anton's context_switch2 benchmark into selftests from
Michael Ellerman
- Add selftest script to test HMI functionality from Daniel Axtens
- Remove obsolete OPAL v2 support from Stewart Smith
- Make enter_rtas() private from Michael Ellerman
- PPR exception cleanups from Michael Ellerman
- Add page soft dirty tracking from Laurent Dufour
- Add support for Nvlink NPUs from Alistair Popple
- Add support for kexec on 476fpe from Alistair Popple
- Enable kernel CPU dlpar from sysfs from Nathan Fontenot
- Copy only required pieces of the mm_context_t to the paca from
Michael Neuling
- Add a kmsg_dumper that flushes OPAL console output on panic from
Russell Currey
- Implement save_stack_trace_regs() to enable kprobe stack tracing
from Steven Rostedt
- Add HWCAP bits for Power9 from Michael Ellerman
- Fix _PAGE_PTE breaking swapoff from Aneesh Kumar K.V
- Fix _PAGE_SWP_SOFT_DIRTY breaking swapoff from Hugh Dickins
- scripts/recordmcount.pl: support data in text section on powerpc
from Ulrich Weigand
- Handle R_PPC64_ENTRY relocations in modules from Ulrich Weigand
cxl:
- cxl: Fix possible idr warning when contexts are released from
Vaibhav Jain
- cxl: use correct operator when writing pcie config space values
from Andrew Donnellan
- cxl: Fix DSI misses when the context owning task exits from Vaibhav
Jain
- cxl: fix build for GCC 4.6.x from Brian Norris
- cxl: use -Werror only with CONFIG_PPC_WERROR from Brian Norris
- cxl: Enable PCI device ID for future IBM CXL adapter from Uma
Krishnan
Freescale:
- Freescale updates from Scott: Highlights include moving QE code out
of arch/powerpc (to be shared with arm), device tree updates, and
minor fixes"
* tag 'powerpc-4.5-1' of git://git.kernel.org/pub/scm/linux/kernel/git/powerpc/linux: (149 commits)
powerpc/module: Handle R_PPC64_ENTRY relocations
scripts/recordmcount.pl: support data in text section on powerpc
powerpc/powernv: Fix OPAL_CONSOLE_FLUSH prototype and usages
powerpc/mm: fix _PAGE_SWP_SOFT_DIRTY breaking swapoff
powerpc/mm: Fix _PAGE_PTE breaking swapoff
cxl: Enable PCI device ID for future IBM CXL adapter
cxl: use -Werror only with CONFIG_PPC_WERROR
cxl: fix build for GCC 4.6.x
powerpc: Add HWCAP bits for Power9
powerpc/powernv: Reserve PE#0 on NPU
powerpc/powernv: Change NPU PE# assignment
powerpc/powernv: Fix update of NVLink DMA mask
powerpc/powernv: Remove misleading comment in pci.c
powerpc: Implement save_stack_trace_regs() to enable kprobe stack tracing
powerpc: Fix build break due to paca mm_context_t changes
cxl: Fix DSI misses when the context owning task exits
MAINTAINERS: Update Scott Wood's e-mail address
powerpc/powernv: Fix minor off-by-one error in opal_mce_check_early_recovery()
powerpc: Fix style of self-test config prompts
powerpc/powernv: Only delay opal_rtc_read() retry when necessary
...
Diffstat (limited to 'arch/powerpc')
190 files changed, 5142 insertions, 8423 deletions
diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig index 85eabc49de61..7d5a8350f913 100644 --- a/arch/powerpc/Kconfig +++ b/arch/powerpc/Kconfig | |||
@@ -560,6 +560,7 @@ choice | |||
560 | 560 | ||
561 | config PPC_4K_PAGES | 561 | config PPC_4K_PAGES |
562 | bool "4k page size" | 562 | bool "4k page size" |
563 | select HAVE_ARCH_SOFT_DIRTY if CHECKPOINT_RESTORE && PPC_BOOK3S | ||
563 | 564 | ||
564 | config PPC_16K_PAGES | 565 | config PPC_16K_PAGES |
565 | bool "16k page size" | 566 | bool "16k page size" |
@@ -568,6 +569,7 @@ config PPC_16K_PAGES | |||
568 | config PPC_64K_PAGES | 569 | config PPC_64K_PAGES |
569 | bool "64k page size" | 570 | bool "64k page size" |
570 | depends on !PPC_FSL_BOOK3E && (44x || PPC_STD_MMU_64 || PPC_BOOK3E_64) | 571 | depends on !PPC_FSL_BOOK3E && (44x || PPC_STD_MMU_64 || PPC_BOOK3E_64) |
572 | select HAVE_ARCH_SOFT_DIRTY if CHECKPOINT_RESTORE && PPC_BOOK3S | ||
571 | 573 | ||
572 | config PPC_256K_PAGES | 574 | config PPC_256K_PAGES |
573 | bool "256k page size" | 575 | bool "256k page size" |
@@ -1075,8 +1077,6 @@ source "drivers/Kconfig" | |||
1075 | 1077 | ||
1076 | source "fs/Kconfig" | 1078 | source "fs/Kconfig" |
1077 | 1079 | ||
1078 | source "arch/powerpc/sysdev/qe_lib/Kconfig" | ||
1079 | |||
1080 | source "lib/Kconfig" | 1080 | source "lib/Kconfig" |
1081 | 1081 | ||
1082 | source "arch/powerpc/Kconfig.debug" | 1082 | source "arch/powerpc/Kconfig.debug" |
diff --git a/arch/powerpc/Kconfig.debug b/arch/powerpc/Kconfig.debug index a0e44a9c456f..638f9ce740f5 100644 --- a/arch/powerpc/Kconfig.debug +++ b/arch/powerpc/Kconfig.debug | |||
@@ -64,17 +64,17 @@ config PPC_EMULATED_STATS | |||
64 | emulated. | 64 | emulated. |
65 | 65 | ||
66 | config CODE_PATCHING_SELFTEST | 66 | config CODE_PATCHING_SELFTEST |
67 | bool "Run self-tests of the code-patching code." | 67 | bool "Run self-tests of the code-patching code" |
68 | depends on DEBUG_KERNEL | 68 | depends on DEBUG_KERNEL |
69 | default n | 69 | default n |
70 | 70 | ||
71 | config FTR_FIXUP_SELFTEST | 71 | config FTR_FIXUP_SELFTEST |
72 | bool "Run self-tests of the feature-fixup code." | 72 | bool "Run self-tests of the feature-fixup code" |
73 | depends on DEBUG_KERNEL | 73 | depends on DEBUG_KERNEL |
74 | default n | 74 | default n |
75 | 75 | ||
76 | config MSI_BITMAP_SELFTEST | 76 | config MSI_BITMAP_SELFTEST |
77 | bool "Run self-tests of the MSI bitmap code." | 77 | bool "Run self-tests of the MSI bitmap code" |
78 | depends on DEBUG_KERNEL | 78 | depends on DEBUG_KERNEL |
79 | default n | 79 | default n |
80 | 80 | ||
diff --git a/arch/powerpc/boot/Makefile b/arch/powerpc/boot/Makefile index 99e4487248ff..61165101342c 100644 --- a/arch/powerpc/boot/Makefile +++ b/arch/powerpc/boot/Makefile | |||
@@ -113,7 +113,6 @@ src-plat-$(CONFIG_EPAPR_BOOT) += epapr.c epapr-wrapper.c | |||
113 | src-plat-$(CONFIG_PPC_PSERIES) += pseries-head.S | 113 | src-plat-$(CONFIG_PPC_PSERIES) += pseries-head.S |
114 | src-plat-$(CONFIG_PPC_POWERNV) += pseries-head.S | 114 | src-plat-$(CONFIG_PPC_POWERNV) += pseries-head.S |
115 | src-plat-$(CONFIG_PPC_IBM_CELL_BLADE) += pseries-head.S | 115 | src-plat-$(CONFIG_PPC_IBM_CELL_BLADE) += pseries-head.S |
116 | src-plat-$(CONFIG_PPC_CELL_QPACE) += pseries-head.S | ||
117 | 116 | ||
118 | src-wlib := $(sort $(src-wlib-y)) | 117 | src-wlib := $(sort $(src-wlib-y)) |
119 | src-plat := $(sort $(src-plat-y)) | 118 | src-plat := $(sort $(src-plat-y)) |
@@ -217,7 +216,6 @@ image-$(CONFIG_PPC_POWERNV) += zImage.pseries | |||
217 | image-$(CONFIG_PPC_MAPLE) += zImage.maple | 216 | image-$(CONFIG_PPC_MAPLE) += zImage.maple |
218 | image-$(CONFIG_PPC_IBM_CELL_BLADE) += zImage.pseries | 217 | image-$(CONFIG_PPC_IBM_CELL_BLADE) += zImage.pseries |
219 | image-$(CONFIG_PPC_PS3) += dtbImage.ps3 | 218 | image-$(CONFIG_PPC_PS3) += dtbImage.ps3 |
220 | image-$(CONFIG_PPC_CELL_QPACE) += zImage.pseries | ||
221 | image-$(CONFIG_PPC_CHRP) += zImage.chrp | 219 | image-$(CONFIG_PPC_CHRP) += zImage.chrp |
222 | image-$(CONFIG_PPC_EFIKA) += zImage.chrp | 220 | image-$(CONFIG_PPC_EFIKA) += zImage.chrp |
223 | image-$(CONFIG_PPC_PMAC) += zImage.pmac | 221 | image-$(CONFIG_PPC_PMAC) += zImage.pmac |
diff --git a/arch/powerpc/boot/dts/fsl/b4si-post.dtsi b/arch/powerpc/boot/dts/fsl/b4si-post.dtsi index 74866ac52f39..1b33f5157c8a 100644 --- a/arch/powerpc/boot/dts/fsl/b4si-post.dtsi +++ b/arch/powerpc/boot/dts/fsl/b4si-post.dtsi | |||
@@ -474,6 +474,11 @@ | |||
474 | fman@400000 { | 474 | fman@400000 { |
475 | interrupts = <96 2 0 0>, <16 2 1 30>; | 475 | interrupts = <96 2 0 0>, <16 2 1 30>; |
476 | 476 | ||
477 | muram@0 { | ||
478 | compatible = "fsl,fman-muram"; | ||
479 | reg = <0x0 0x80000>; | ||
480 | }; | ||
481 | |||
477 | enet0: ethernet@e0000 { | 482 | enet0: ethernet@e0000 { |
478 | }; | 483 | }; |
479 | 484 | ||
diff --git a/arch/powerpc/boot/dts/fsl/bsc9132qds.dts b/arch/powerpc/boot/dts/fsl/bsc9132qds.dts index 70882ade606d..56e6f1337e96 100644 --- a/arch/powerpc/boot/dts/fsl/bsc9132qds.dts +++ b/arch/powerpc/boot/dts/fsl/bsc9132qds.dts | |||
@@ -29,6 +29,21 @@ | |||
29 | soc: soc@ff700000 { | 29 | soc: soc@ff700000 { |
30 | ranges = <0x0 0x0 0xff700000 0x100000>; | 30 | ranges = <0x0 0x0 0xff700000 0x100000>; |
31 | }; | 31 | }; |
32 | |||
33 | pci0: pcie@ff70a000 { | ||
34 | reg = <0 0xff70a000 0 0x1000>; | ||
35 | ranges = <0x2000000 0x0 0x90000000 0 0x90000000 0x0 0x20000000 | ||
36 | 0x1000000 0x0 0x00000000 0 0xc0010000 0x0 0x10000>; | ||
37 | pcie@0 { | ||
38 | ranges = <0x2000000 0x0 0x90000000 | ||
39 | 0x2000000 0x0 0x90000000 | ||
40 | 0x0 0x20000000 | ||
41 | |||
42 | 0x1000000 0x0 0x0 | ||
43 | 0x1000000 0x0 0x0 | ||
44 | 0x0 0x100000>; | ||
45 | }; | ||
46 | }; | ||
32 | }; | 47 | }; |
33 | 48 | ||
34 | /include/ "bsc9132qds.dtsi" | 49 | /include/ "bsc9132qds.dtsi" |
diff --git a/arch/powerpc/boot/dts/fsl/bsc9132si-post.dtsi b/arch/powerpc/boot/dts/fsl/bsc9132si-post.dtsi index c72307198140..b5f071574e83 100644 --- a/arch/powerpc/boot/dts/fsl/bsc9132si-post.dtsi +++ b/arch/powerpc/boot/dts/fsl/bsc9132si-post.dtsi | |||
@@ -40,6 +40,34 @@ | |||
40 | interrupts = <16 2 0 0 20 2 0 0>; | 40 | interrupts = <16 2 0 0 20 2 0 0>; |
41 | }; | 41 | }; |
42 | 42 | ||
43 | /* controller at 0xa000 */ | ||
44 | &pci0 { | ||
45 | compatible = "fsl,bsc9132-pcie", "fsl,qoriq-pcie-v2.2"; | ||
46 | device_type = "pci"; | ||
47 | #size-cells = <2>; | ||
48 | #address-cells = <3>; | ||
49 | bus-range = <0 255>; | ||
50 | interrupts = <16 2 0 0>; | ||
51 | |||
52 | pcie@0 { | ||
53 | reg = <0 0 0 0 0>; | ||
54 | #interrupt-cells = <1>; | ||
55 | #size-cells = <2>; | ||
56 | #address-cells = <3>; | ||
57 | device_type = "pci"; | ||
58 | interrupts = <16 2 0 0>; | ||
59 | interrupt-map-mask = <0xf800 0 0 7>; | ||
60 | |||
61 | interrupt-map = < | ||
62 | /* IDSEL 0x0 */ | ||
63 | 0000 0x0 0x0 0x1 &mpic 0x0 0x2 0x0 0x0 | ||
64 | 0000 0x0 0x0 0x2 &mpic 0x1 0x2 0x0 0x0 | ||
65 | 0000 0x0 0x0 0x3 &mpic 0x2 0x2 0x0 0x0 | ||
66 | 0000 0x0 0x0 0x4 &mpic 0x3 0x2 0x0 0x0 | ||
67 | >; | ||
68 | }; | ||
69 | }; | ||
70 | |||
43 | &soc { | 71 | &soc { |
44 | #address-cells = <1>; | 72 | #address-cells = <1>; |
45 | #size-cells = <1>; | 73 | #size-cells = <1>; |
diff --git a/arch/powerpc/boot/dts/fsl/bsc9132si-pre.dtsi b/arch/powerpc/boot/dts/fsl/bsc9132si-pre.dtsi index 301a9dba5790..90f7949fe312 100644 --- a/arch/powerpc/boot/dts/fsl/bsc9132si-pre.dtsi +++ b/arch/powerpc/boot/dts/fsl/bsc9132si-pre.dtsi | |||
@@ -45,6 +45,7 @@ | |||
45 | serial0 = &serial0; | 45 | serial0 = &serial0; |
46 | ethernet0 = &enet0; | 46 | ethernet0 = &enet0; |
47 | ethernet1 = &enet1; | 47 | ethernet1 = &enet1; |
48 | pci0 = &pci0; | ||
48 | }; | 49 | }; |
49 | 50 | ||
50 | cpus { | 51 | cpus { |
diff --git a/arch/powerpc/boot/dts/fsl/p1010rdb.dtsi b/arch/powerpc/boot/dts/fsl/p1010rdb.dtsi index 0f0ced69835a..14b629505038 100644 --- a/arch/powerpc/boot/dts/fsl/p1010rdb.dtsi +++ b/arch/powerpc/boot/dts/fsl/p1010rdb.dtsi | |||
@@ -215,3 +215,19 @@ | |||
215 | phy-connection-type = "sgmii"; | 215 | phy-connection-type = "sgmii"; |
216 | }; | 216 | }; |
217 | }; | 217 | }; |
218 | |||
219 | &pci0 { | ||
220 | pcie@0 { | ||
221 | interrupt-map = < | ||
222 | /* IDSEL 0x0 */ | ||
223 | /* | ||
224 | *irq[4:5] are active-high | ||
225 | *irq[6:7] are active-low | ||
226 | */ | ||
227 | 0000 0x0 0x0 0x1 &mpic 0x4 0x2 0x0 0x0 | ||
228 | 0000 0x0 0x0 0x2 &mpic 0x5 0x2 0x0 0x0 | ||
229 | 0000 0x0 0x0 0x3 &mpic 0x6 0x1 0x0 0x0 | ||
230 | 0000 0x0 0x0 0x4 &mpic 0x7 0x1 0x0 0x0 | ||
231 | >; | ||
232 | }; | ||
233 | }; | ||
diff --git a/arch/powerpc/boot/dts/fsl/t1023rdb.dts b/arch/powerpc/boot/dts/fsl/t1023rdb.dts index 2b2fff4a12a2..6bd842beb1dc 100644 --- a/arch/powerpc/boot/dts/fsl/t1023rdb.dts +++ b/arch/powerpc/boot/dts/fsl/t1023rdb.dts | |||
@@ -159,4 +159,4 @@ | |||
159 | }; | 159 | }; |
160 | }; | 160 | }; |
161 | 161 | ||
162 | /include/ "t1023si-post.dtsi" | 162 | #include "t1023si-post.dtsi" |
diff --git a/arch/powerpc/boot/dts/fsl/t1023si-post.dtsi b/arch/powerpc/boot/dts/fsl/t1023si-post.dtsi index 518ddaa8da2d..99e421df79d4 100644 --- a/arch/powerpc/boot/dts/fsl/t1023si-post.dtsi +++ b/arch/powerpc/boot/dts/fsl/t1023si-post.dtsi | |||
@@ -32,6 +32,8 @@ | |||
32 | * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | 32 | * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
33 | */ | 33 | */ |
34 | 34 | ||
35 | #include <dt-bindings/thermal/thermal.h> | ||
36 | |||
35 | &ifc { | 37 | &ifc { |
36 | #address-cells = <2>; | 38 | #address-cells = <2>; |
37 | #size-cells = <1>; | 39 | #size-cells = <1>; |
@@ -275,6 +277,90 @@ | |||
275 | reg = <0xea000 0x4000>; | 277 | reg = <0xea000 0x4000>; |
276 | }; | 278 | }; |
277 | 279 | ||
280 | tmu: tmu@f0000 { | ||
281 | compatible = "fsl,qoriq-tmu"; | ||
282 | reg = <0xf0000 0x1000>; | ||
283 | interrupts = <18 2 0 0>; | ||
284 | fsl,tmu-range = <0xb0000 0xa0026 0x80048 0x30061>; | ||
285 | fsl,tmu-calibration = <0x00000000 0x0000000f | ||
286 | 0x00000001 0x00000017 | ||
287 | 0x00000002 0x0000001e | ||
288 | 0x00000003 0x00000026 | ||
289 | 0x00000004 0x0000002e | ||
290 | 0x00000005 0x00000035 | ||
291 | 0x00000006 0x0000003d | ||
292 | 0x00000007 0x00000044 | ||
293 | 0x00000008 0x0000004c | ||
294 | 0x00000009 0x00000053 | ||
295 | 0x0000000a 0x0000005b | ||
296 | 0x0000000b 0x00000064 | ||
297 | |||
298 | 0x00010000 0x00000011 | ||
299 | 0x00010001 0x0000001c | ||
300 | 0x00010002 0x00000024 | ||
301 | 0x00010003 0x0000002b | ||
302 | 0x00010004 0x00000034 | ||
303 | 0x00010005 0x00000039 | ||
304 | 0x00010006 0x00000042 | ||
305 | 0x00010007 0x0000004c | ||
306 | 0x00010008 0x00000051 | ||
307 | 0x00010009 0x0000005a | ||
308 | 0x0001000a 0x00000063 | ||
309 | |||
310 | 0x00020000 0x00000013 | ||
311 | 0x00020001 0x00000019 | ||
312 | 0x00020002 0x00000024 | ||
313 | 0x00020003 0x0000002c | ||
314 | 0x00020004 0x00000035 | ||
315 | 0x00020005 0x0000003d | ||
316 | 0x00020006 0x00000046 | ||
317 | 0x00020007 0x00000050 | ||
318 | 0x00020008 0x00000059 | ||
319 | |||
320 | 0x00030000 0x00000002 | ||
321 | 0x00030001 0x0000000d | ||
322 | 0x00030002 0x00000019 | ||
323 | 0x00030003 0x00000024>; | ||
324 | #thermal-sensor-cells = <0>; | ||
325 | }; | ||
326 | |||
327 | thermal-zones { | ||
328 | cpu_thermal: cpu-thermal { | ||
329 | polling-delay-passive = <1000>; | ||
330 | polling-delay = <5000>; | ||
331 | |||
332 | thermal-sensors = <&tmu>; | ||
333 | |||
334 | trips { | ||
335 | cpu_alert: cpu-alert { | ||
336 | temperature = <85000>; | ||
337 | hysteresis = <2000>; | ||
338 | type = "passive"; | ||
339 | }; | ||
340 | cpu_crit: cpu-crit { | ||
341 | temperature = <95000>; | ||
342 | hysteresis = <2000>; | ||
343 | type = "critical"; | ||
344 | }; | ||
345 | }; | ||
346 | |||
347 | cooling-maps { | ||
348 | map0 { | ||
349 | trip = <&cpu_alert>; | ||
350 | cooling-device = | ||
351 | <&cpu0 THERMAL_NO_LIMIT | ||
352 | THERMAL_NO_LIMIT>; | ||
353 | }; | ||
354 | map1 { | ||
355 | trip = <&cpu_alert>; | ||
356 | cooling-device = | ||
357 | <&cpu1 THERMAL_NO_LIMIT | ||
358 | THERMAL_NO_LIMIT>; | ||
359 | }; | ||
360 | }; | ||
361 | }; | ||
362 | }; | ||
363 | |||
278 | scfg: global-utilities@fc000 { | 364 | scfg: global-utilities@fc000 { |
279 | compatible = "fsl,t1023-scfg"; | 365 | compatible = "fsl,t1023-scfg"; |
280 | reg = <0xfc000 0x1000>; | 366 | reg = <0xfc000 0x1000>; |
diff --git a/arch/powerpc/boot/dts/fsl/t1024qds.dts b/arch/powerpc/boot/dts/fsl/t1024qds.dts index 43cd5b50cd0a..6a3581b8e1f8 100644 --- a/arch/powerpc/boot/dts/fsl/t1024qds.dts +++ b/arch/powerpc/boot/dts/fsl/t1024qds.dts | |||
@@ -248,4 +248,4 @@ | |||
248 | }; | 248 | }; |
249 | }; | 249 | }; |
250 | 250 | ||
251 | /include/ "t1024si-post.dtsi" | 251 | #include "t1024si-post.dtsi" |
diff --git a/arch/powerpc/boot/dts/fsl/t1024rdb.dts b/arch/powerpc/boot/dts/fsl/t1024rdb.dts index 429d8c73650a..0ccc7d03335e 100644 --- a/arch/powerpc/boot/dts/fsl/t1024rdb.dts +++ b/arch/powerpc/boot/dts/fsl/t1024rdb.dts | |||
@@ -188,4 +188,4 @@ | |||
188 | }; | 188 | }; |
189 | }; | 189 | }; |
190 | 190 | ||
191 | /include/ "t1024si-post.dtsi" | 191 | #include "t1024si-post.dtsi" |
diff --git a/arch/powerpc/boot/dts/fsl/t1024si-post.dtsi b/arch/powerpc/boot/dts/fsl/t1024si-post.dtsi index 95e3af8d768e..bb480346a58d 100644 --- a/arch/powerpc/boot/dts/fsl/t1024si-post.dtsi +++ b/arch/powerpc/boot/dts/fsl/t1024si-post.dtsi | |||
@@ -32,7 +32,7 @@ | |||
32 | * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | 32 | * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
33 | */ | 33 | */ |
34 | 34 | ||
35 | /include/ "t1023si-post.dtsi" | 35 | #include "t1023si-post.dtsi" |
36 | 36 | ||
37 | / { | 37 | / { |
38 | aliases { | 38 | aliases { |
diff --git a/arch/powerpc/boot/dts/fsl/t102xsi-pre.dtsi b/arch/powerpc/boot/dts/fsl/t102xsi-pre.dtsi index 3e1528abf3f4..9d08a363bab3 100644 --- a/arch/powerpc/boot/dts/fsl/t102xsi-pre.dtsi +++ b/arch/powerpc/boot/dts/fsl/t102xsi-pre.dtsi | |||
@@ -76,6 +76,7 @@ | |||
76 | reg = <0>; | 76 | reg = <0>; |
77 | clocks = <&mux0>; | 77 | clocks = <&mux0>; |
78 | next-level-cache = <&L2_1>; | 78 | next-level-cache = <&L2_1>; |
79 | #cooling-cells = <2>; | ||
79 | L2_1: l2-cache { | 80 | L2_1: l2-cache { |
80 | next-level-cache = <&cpc>; | 81 | next-level-cache = <&cpc>; |
81 | }; | 82 | }; |
@@ -85,6 +86,7 @@ | |||
85 | reg = <1>; | 86 | reg = <1>; |
86 | clocks = <&mux1>; | 87 | clocks = <&mux1>; |
87 | next-level-cache = <&L2_2>; | 88 | next-level-cache = <&L2_2>; |
89 | #cooling-cells = <2>; | ||
88 | L2_2: l2-cache { | 90 | L2_2: l2-cache { |
89 | next-level-cache = <&cpc>; | 91 | next-level-cache = <&cpc>; |
90 | }; | 92 | }; |
diff --git a/arch/powerpc/boot/dts/fsl/t1040d4rdb.dts b/arch/powerpc/boot/dts/fsl/t1040d4rdb.dts index 681746efd31d..fb6bc02ebb60 100644 --- a/arch/powerpc/boot/dts/fsl/t1040d4rdb.dts +++ b/arch/powerpc/boot/dts/fsl/t1040d4rdb.dts | |||
@@ -43,4 +43,4 @@ | |||
43 | interrupt-parent = <&mpic>; | 43 | interrupt-parent = <&mpic>; |
44 | }; | 44 | }; |
45 | 45 | ||
46 | /include/ "t1040si-post.dtsi" | 46 | #include "t1040si-post.dtsi" |
diff --git a/arch/powerpc/boot/dts/fsl/t1040qds.dts b/arch/powerpc/boot/dts/fsl/t1040qds.dts index 4d298659468c..5f76edc7838c 100644 --- a/arch/powerpc/boot/dts/fsl/t1040qds.dts +++ b/arch/powerpc/boot/dts/fsl/t1040qds.dts | |||
@@ -43,4 +43,4 @@ | |||
43 | interrupt-parent = <&mpic>; | 43 | interrupt-parent = <&mpic>; |
44 | }; | 44 | }; |
45 | 45 | ||
46 | /include/ "t1040si-post.dtsi" | 46 | #include "t1040si-post.dtsi" |
diff --git a/arch/powerpc/boot/dts/fsl/t1040rdb.dts b/arch/powerpc/boot/dts/fsl/t1040rdb.dts index 8f9e65b47515..cf194154bbdc 100644 --- a/arch/powerpc/boot/dts/fsl/t1040rdb.dts +++ b/arch/powerpc/boot/dts/fsl/t1040rdb.dts | |||
@@ -45,4 +45,4 @@ | |||
45 | }; | 45 | }; |
46 | }; | 46 | }; |
47 | 47 | ||
48 | /include/ "t1040si-post.dtsi" | 48 | #include "t1040si-post.dtsi" |
diff --git a/arch/powerpc/boot/dts/fsl/t1040si-post.dtsi b/arch/powerpc/boot/dts/fsl/t1040si-post.dtsi index d30b3de1cfc5..e0f4da554774 100644 --- a/arch/powerpc/boot/dts/fsl/t1040si-post.dtsi +++ b/arch/powerpc/boot/dts/fsl/t1040si-post.dtsi | |||
@@ -32,6 +32,8 @@ | |||
32 | * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | 32 | * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
33 | */ | 33 | */ |
34 | 34 | ||
35 | #include <dt-bindings/thermal/thermal.h> | ||
36 | |||
35 | &bman_fbpr { | 37 | &bman_fbpr { |
36 | compatible = "fsl,bman-fbpr"; | 38 | compatible = "fsl,bman-fbpr"; |
37 | alloc-ranges = <0 0 0x10000 0>; | 39 | alloc-ranges = <0 0 0x10000 0>; |
@@ -484,6 +486,98 @@ | |||
484 | reg = <0xea000 0x4000>; | 486 | reg = <0xea000 0x4000>; |
485 | }; | 487 | }; |
486 | 488 | ||
489 | tmu: tmu@f0000 { | ||
490 | compatible = "fsl,qoriq-tmu"; | ||
491 | reg = <0xf0000 0x1000>; | ||
492 | interrupts = <18 2 0 0>; | ||
493 | fsl,tmu-range = <0xa0000 0x90026 0x8004a 0x1006a>; | ||
494 | fsl,tmu-calibration = <0x00000000 0x00000025 | ||
495 | 0x00000001 0x00000028 | ||
496 | 0x00000002 0x0000002d | ||
497 | 0x00000003 0x00000031 | ||
498 | 0x00000004 0x00000036 | ||
499 | 0x00000005 0x0000003a | ||
500 | 0x00000006 0x00000040 | ||
501 | 0x00000007 0x00000044 | ||
502 | 0x00000008 0x0000004a | ||
503 | 0x00000009 0x0000004f | ||
504 | 0x0000000a 0x00000054 | ||
505 | |||
506 | 0x00010000 0x0000000d | ||
507 | 0x00010001 0x00000013 | ||
508 | 0x00010002 0x00000019 | ||
509 | 0x00010003 0x0000001f | ||
510 | 0x00010004 0x00000025 | ||
511 | 0x00010005 0x0000002d | ||
512 | 0x00010006 0x00000033 | ||
513 | 0x00010007 0x00000043 | ||
514 | 0x00010008 0x0000004b | ||
515 | 0x00010009 0x00000053 | ||
516 | |||
517 | 0x00020000 0x00000010 | ||
518 | 0x00020001 0x00000017 | ||
519 | 0x00020002 0x0000001f | ||
520 | 0x00020003 0x00000029 | ||
521 | 0x00020004 0x00000031 | ||
522 | 0x00020005 0x0000003c | ||
523 | 0x00020006 0x00000042 | ||
524 | 0x00020007 0x0000004d | ||
525 | 0x00020008 0x00000056 | ||
526 | |||
527 | 0x00030000 0x00000012 | ||
528 | 0x00030001 0x0000001d>; | ||
529 | #thermal-sensor-cells = <0>; | ||
530 | }; | ||
531 | |||
532 | thermal-zones { | ||
533 | cpu_thermal: cpu-thermal { | ||
534 | polling-delay-passive = <1000>; | ||
535 | polling-delay = <5000>; | ||
536 | |||
537 | thermal-sensors = <&tmu>; | ||
538 | |||
539 | trips { | ||
540 | cpu_alert: cpu-alert { | ||
541 | temperature = <85000>; | ||
542 | hysteresis = <2000>; | ||
543 | type = "passive"; | ||
544 | }; | ||
545 | cpu_crit: cpu-crit { | ||
546 | temperature = <95000>; | ||
547 | hysteresis = <2000>; | ||
548 | type = "critical"; | ||
549 | }; | ||
550 | }; | ||
551 | |||
552 | cooling-maps { | ||
553 | map0 { | ||
554 | trip = <&cpu_alert>; | ||
555 | cooling-device = | ||
556 | <&cpu0 THERMAL_NO_LIMIT | ||
557 | THERMAL_NO_LIMIT>; | ||
558 | }; | ||
559 | map1 { | ||
560 | trip = <&cpu_alert>; | ||
561 | cooling-device = | ||
562 | <&cpu1 THERMAL_NO_LIMIT | ||
563 | THERMAL_NO_LIMIT>; | ||
564 | }; | ||
565 | map2 { | ||
566 | trip = <&cpu_alert>; | ||
567 | cooling-device = | ||
568 | <&cpu2 THERMAL_NO_LIMIT | ||
569 | THERMAL_NO_LIMIT>; | ||
570 | }; | ||
571 | map3 { | ||
572 | trip = <&cpu_alert>; | ||
573 | cooling-device = | ||
574 | <&cpu3 THERMAL_NO_LIMIT | ||
575 | THERMAL_NO_LIMIT>; | ||
576 | }; | ||
577 | }; | ||
578 | }; | ||
579 | }; | ||
580 | |||
487 | scfg: global-utilities@fc000 { | 581 | scfg: global-utilities@fc000 { |
488 | compatible = "fsl,t1040-scfg"; | 582 | compatible = "fsl,t1040-scfg"; |
489 | reg = <0xfc000 0x1000>; | 583 | reg = <0xfc000 0x1000>; |
diff --git a/arch/powerpc/boot/dts/fsl/t1042d4rdb.dts b/arch/powerpc/boot/dts/fsl/t1042d4rdb.dts index b245b31b8279..2a5a90dd272e 100644 --- a/arch/powerpc/boot/dts/fsl/t1042d4rdb.dts +++ b/arch/powerpc/boot/dts/fsl/t1042d4rdb.dts | |||
@@ -50,4 +50,4 @@ | |||
50 | }; | 50 | }; |
51 | }; | 51 | }; |
52 | 52 | ||
53 | /include/ "t1040si-post.dtsi" | 53 | #include "t1042si-post.dtsi" |
diff --git a/arch/powerpc/boot/dts/fsl/t1042qds.dts b/arch/powerpc/boot/dts/fsl/t1042qds.dts index 4ab9bbe7c5c5..90a4a73bb905 100644 --- a/arch/powerpc/boot/dts/fsl/t1042qds.dts +++ b/arch/powerpc/boot/dts/fsl/t1042qds.dts | |||
@@ -43,4 +43,4 @@ | |||
43 | interrupt-parent = <&mpic>; | 43 | interrupt-parent = <&mpic>; |
44 | }; | 44 | }; |
45 | 45 | ||
46 | /include/ "t1042si-post.dtsi" | 46 | #include "t1042si-post.dtsi" |
diff --git a/arch/powerpc/boot/dts/fsl/t1042rdb.dts b/arch/powerpc/boot/dts/fsl/t1042rdb.dts index 67af56bc5ee9..8d908e795e4d 100644 --- a/arch/powerpc/boot/dts/fsl/t1042rdb.dts +++ b/arch/powerpc/boot/dts/fsl/t1042rdb.dts | |||
@@ -45,4 +45,4 @@ | |||
45 | }; | 45 | }; |
46 | }; | 46 | }; |
47 | 47 | ||
48 | /include/ "t1042si-post.dtsi" | 48 | #include "t1042si-post.dtsi" |
diff --git a/arch/powerpc/boot/dts/fsl/t1042rdb_pi.dts b/arch/powerpc/boot/dts/fsl/t1042rdb_pi.dts index 2f67677530a4..98c001019d6a 100644 --- a/arch/powerpc/boot/dts/fsl/t1042rdb_pi.dts +++ b/arch/powerpc/boot/dts/fsl/t1042rdb_pi.dts | |||
@@ -54,4 +54,4 @@ | |||
54 | }; | 54 | }; |
55 | }; | 55 | }; |
56 | 56 | ||
57 | /include/ "t1042si-post.dtsi" | 57 | #include "t1042si-post.dtsi" |
diff --git a/arch/powerpc/boot/dts/fsl/t1042si-post.dtsi b/arch/powerpc/boot/dts/fsl/t1042si-post.dtsi index 319b74f29724..a5544f93689c 100644 --- a/arch/powerpc/boot/dts/fsl/t1042si-post.dtsi +++ b/arch/powerpc/boot/dts/fsl/t1042si-post.dtsi | |||
@@ -32,6 +32,6 @@ | |||
32 | * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | 32 | * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
33 | */ | 33 | */ |
34 | 34 | ||
35 | /include/ "t1040si-post.dtsi" | 35 | #include "t1040si-post.dtsi" |
36 | 36 | ||
37 | /* Place holder for ethernet related device tree nodes */ | 37 | /* Place holder for ethernet related device tree nodes */ |
diff --git a/arch/powerpc/boot/dts/fsl/t104xsi-pre.dtsi b/arch/powerpc/boot/dts/fsl/t104xsi-pre.dtsi index fcfa38ae5e02..6db0ee8b1384 100644 --- a/arch/powerpc/boot/dts/fsl/t104xsi-pre.dtsi +++ b/arch/powerpc/boot/dts/fsl/t104xsi-pre.dtsi | |||
@@ -76,6 +76,7 @@ | |||
76 | reg = <0>; | 76 | reg = <0>; |
77 | clocks = <&mux0>; | 77 | clocks = <&mux0>; |
78 | next-level-cache = <&L2_1>; | 78 | next-level-cache = <&L2_1>; |
79 | #cooling-cells = <2>; | ||
79 | L2_1: l2-cache { | 80 | L2_1: l2-cache { |
80 | next-level-cache = <&cpc>; | 81 | next-level-cache = <&cpc>; |
81 | }; | 82 | }; |
@@ -85,6 +86,7 @@ | |||
85 | reg = <1>; | 86 | reg = <1>; |
86 | clocks = <&mux1>; | 87 | clocks = <&mux1>; |
87 | next-level-cache = <&L2_2>; | 88 | next-level-cache = <&L2_2>; |
89 | #cooling-cells = <2>; | ||
88 | L2_2: l2-cache { | 90 | L2_2: l2-cache { |
89 | next-level-cache = <&cpc>; | 91 | next-level-cache = <&cpc>; |
90 | }; | 92 | }; |
@@ -94,6 +96,7 @@ | |||
94 | reg = <2>; | 96 | reg = <2>; |
95 | clocks = <&mux2>; | 97 | clocks = <&mux2>; |
96 | next-level-cache = <&L2_3>; | 98 | next-level-cache = <&L2_3>; |
99 | #cooling-cells = <2>; | ||
97 | L2_3: l2-cache { | 100 | L2_3: l2-cache { |
98 | next-level-cache = <&cpc>; | 101 | next-level-cache = <&cpc>; |
99 | }; | 102 | }; |
@@ -103,6 +106,7 @@ | |||
103 | reg = <3>; | 106 | reg = <3>; |
104 | clocks = <&mux3>; | 107 | clocks = <&mux3>; |
105 | next-level-cache = <&L2_4>; | 108 | next-level-cache = <&L2_4>; |
109 | #cooling-cells = <2>; | ||
106 | L2_4: l2-cache { | 110 | L2_4: l2-cache { |
107 | next-level-cache = <&cpc>; | 111 | next-level-cache = <&cpc>; |
108 | }; | 112 | }; |
diff --git a/arch/powerpc/boot/wrapper b/arch/powerpc/boot/wrapper index ceaa75d5a684..6a19fcef5596 100755 --- a/arch/powerpc/boot/wrapper +++ b/arch/powerpc/boot/wrapper | |||
@@ -154,7 +154,7 @@ if [ -z "$kernel" ]; then | |||
154 | kernel=vmlinux | 154 | kernel=vmlinux |
155 | fi | 155 | fi |
156 | 156 | ||
157 | elfformat="`${CROSS}objdump -p "$kernel" | grep 'file format' | awk '{print $4}'`" | 157 | LANG=C elfformat="`${CROSS}objdump -p "$kernel" | grep 'file format' | awk '{print $4}'`" |
158 | case "$elfformat" in | 158 | case "$elfformat" in |
159 | elf64-powerpcle) format=elf64lppc ;; | 159 | elf64-powerpcle) format=elf64lppc ;; |
160 | elf64-powerpc) format=elf32ppc ;; | 160 | elf64-powerpc) format=elf32ppc ;; |
diff --git a/arch/powerpc/configs/mpc85xx_basic_defconfig b/arch/powerpc/configs/mpc85xx_basic_defconfig index 850bd195d0e8..b1593fe6f70b 100644 --- a/arch/powerpc/configs/mpc85xx_basic_defconfig +++ b/arch/powerpc/configs/mpc85xx_basic_defconfig | |||
@@ -12,6 +12,7 @@ CONFIG_P1010_RDB=y | |||
12 | CONFIG_P1022_DS=y | 12 | CONFIG_P1022_DS=y |
13 | CONFIG_P1022_RDK=y | 13 | CONFIG_P1022_RDK=y |
14 | CONFIG_P1023_RDB=y | 14 | CONFIG_P1023_RDB=y |
15 | CONFIG_TWR_P102x=y | ||
15 | CONFIG_SBC8548=y | 16 | CONFIG_SBC8548=y |
16 | CONFIG_SOCRATES=y | 17 | CONFIG_SOCRATES=y |
17 | CONFIG_STX_GP3=y | 18 | CONFIG_STX_GP3=y |
diff --git a/arch/powerpc/configs/ppc64_defconfig b/arch/powerpc/configs/ppc64_defconfig index 2c041b535a64..b041fb607376 100644 --- a/arch/powerpc/configs/ppc64_defconfig +++ b/arch/powerpc/configs/ppc64_defconfig | |||
@@ -36,7 +36,6 @@ CONFIG_PS3_ROM=m | |||
36 | CONFIG_PS3_FLASH=m | 36 | CONFIG_PS3_FLASH=m |
37 | CONFIG_PS3_LPM=m | 37 | CONFIG_PS3_LPM=m |
38 | CONFIG_PPC_IBM_CELL_BLADE=y | 38 | CONFIG_PPC_IBM_CELL_BLADE=y |
39 | CONFIG_PPC_CELL_QPACE=y | ||
40 | CONFIG_RTAS_FLASH=m | 39 | CONFIG_RTAS_FLASH=m |
41 | CONFIG_IBMEBUS=y | 40 | CONFIG_IBMEBUS=y |
42 | CONFIG_CPU_FREQ_PMAC64=y | 41 | CONFIG_CPU_FREQ_PMAC64=y |
diff --git a/arch/powerpc/crypto/aes-spe-glue.c b/arch/powerpc/crypto/aes-spe-glue.c index bd5e63f72ad4..93ee046d12cd 100644 --- a/arch/powerpc/crypto/aes-spe-glue.c +++ b/arch/powerpc/crypto/aes-spe-glue.c | |||
@@ -85,6 +85,7 @@ static void spe_begin(void) | |||
85 | 85 | ||
86 | static void spe_end(void) | 86 | static void spe_end(void) |
87 | { | 87 | { |
88 | disable_kernel_spe(); | ||
88 | /* reenable preemption */ | 89 | /* reenable preemption */ |
89 | preempt_enable(); | 90 | preempt_enable(); |
90 | } | 91 | } |
diff --git a/arch/powerpc/crypto/sha1-spe-glue.c b/arch/powerpc/crypto/sha1-spe-glue.c index 3e1d22212521..f9ebc38d3fe7 100644 --- a/arch/powerpc/crypto/sha1-spe-glue.c +++ b/arch/powerpc/crypto/sha1-spe-glue.c | |||
@@ -46,6 +46,7 @@ static void spe_begin(void) | |||
46 | 46 | ||
47 | static void spe_end(void) | 47 | static void spe_end(void) |
48 | { | 48 | { |
49 | disable_kernel_spe(); | ||
49 | /* reenable preemption */ | 50 | /* reenable preemption */ |
50 | preempt_enable(); | 51 | preempt_enable(); |
51 | } | 52 | } |
diff --git a/arch/powerpc/crypto/sha256-spe-glue.c b/arch/powerpc/crypto/sha256-spe-glue.c index f4a616fe1a82..718a079dcdbf 100644 --- a/arch/powerpc/crypto/sha256-spe-glue.c +++ b/arch/powerpc/crypto/sha256-spe-glue.c | |||
@@ -47,6 +47,7 @@ static void spe_begin(void) | |||
47 | 47 | ||
48 | static void spe_end(void) | 48 | static void spe_end(void) |
49 | { | 49 | { |
50 | disable_kernel_spe(); | ||
50 | /* reenable preemption */ | 51 | /* reenable preemption */ |
51 | preempt_enable(); | 52 | preempt_enable(); |
52 | } | 53 | } |
diff --git a/arch/powerpc/include/asm/pte-hash32.h b/arch/powerpc/include/asm/book3s/32/hash.h index 62cfb0c663bb..264b754d65b0 100644 --- a/arch/powerpc/include/asm/pte-hash32.h +++ b/arch/powerpc/include/asm/book3s/32/hash.h | |||
@@ -1,5 +1,5 @@ | |||
1 | #ifndef _ASM_POWERPC_PTE_HASH32_H | 1 | #ifndef _ASM_POWERPC_BOOK3S_32_HASH_H |
2 | #define _ASM_POWERPC_PTE_HASH32_H | 2 | #define _ASM_POWERPC_BOOK3S_32_HASH_H |
3 | #ifdef __KERNEL__ | 3 | #ifdef __KERNEL__ |
4 | 4 | ||
5 | /* | 5 | /* |
@@ -43,4 +43,4 @@ | |||
43 | #define PTE_ATOMIC_UPDATES 1 | 43 | #define PTE_ATOMIC_UPDATES 1 |
44 | 44 | ||
45 | #endif /* __KERNEL__ */ | 45 | #endif /* __KERNEL__ */ |
46 | #endif /* _ASM_POWERPC_PTE_HASH32_H */ | 46 | #endif /* _ASM_POWERPC_BOOK3S_32_HASH_H */ |
diff --git a/arch/powerpc/include/asm/book3s/32/pgtable.h b/arch/powerpc/include/asm/book3s/32/pgtable.h new file mode 100644 index 000000000000..38b33dcfcc9d --- /dev/null +++ b/arch/powerpc/include/asm/book3s/32/pgtable.h | |||
@@ -0,0 +1,482 @@ | |||
1 | #ifndef _ASM_POWERPC_BOOK3S_32_PGTABLE_H | ||
2 | #define _ASM_POWERPC_BOOK3S_32_PGTABLE_H | ||
3 | |||
4 | #include <asm-generic/pgtable-nopmd.h> | ||
5 | |||
6 | #include <asm/book3s/32/hash.h> | ||
7 | |||
8 | /* And here we include common definitions */ | ||
9 | #include <asm/pte-common.h> | ||
10 | |||
11 | /* | ||
12 | * The normal case is that PTEs are 32-bits and we have a 1-page | ||
13 | * 1024-entry pgdir pointing to 1-page 1024-entry PTE pages. -- paulus | ||
14 | * | ||
15 | * For any >32-bit physical address platform, we can use the following | ||
16 | * two level page table layout where the pgdir is 8KB and the MS 13 bits | ||
17 | * are an index to the second level table. The combined pgdir/pmd first | ||
18 | * level has 2048 entries and the second level has 512 64-bit PTE entries. | ||
19 | * -Matt | ||
20 | */ | ||
21 | /* PGDIR_SHIFT determines what a top-level page table entry can map */ | ||
22 | #define PGDIR_SHIFT (PAGE_SHIFT + PTE_SHIFT) | ||
23 | #define PGDIR_SIZE (1UL << PGDIR_SHIFT) | ||
24 | #define PGDIR_MASK (~(PGDIR_SIZE-1)) | ||
25 | |||
26 | #define PTRS_PER_PTE (1 << PTE_SHIFT) | ||
27 | #define PTRS_PER_PMD 1 | ||
28 | #define PTRS_PER_PGD (1 << (32 - PGDIR_SHIFT)) | ||
29 | |||
30 | #define USER_PTRS_PER_PGD (TASK_SIZE / PGDIR_SIZE) | ||
31 | /* | ||
32 | * This is the bottom of the PKMAP area with HIGHMEM or an arbitrary | ||
33 | * value (for now) on others, from where we can start layout kernel | ||
34 | * virtual space that goes below PKMAP and FIXMAP | ||
35 | */ | ||
36 | #ifdef CONFIG_HIGHMEM | ||
37 | #define KVIRT_TOP PKMAP_BASE | ||
38 | #else | ||
39 | #define KVIRT_TOP (0xfe000000UL) /* for now, could be FIXMAP_BASE ? */ | ||
40 | #endif | ||
41 | |||
42 | /* | ||
43 | * ioremap_bot starts at that address. Early ioremaps move down from there, | ||
44 | * until mem_init() at which point this becomes the top of the vmalloc | ||
45 | * and ioremap space | ||
46 | */ | ||
47 | #ifdef CONFIG_NOT_COHERENT_CACHE | ||
48 | #define IOREMAP_TOP ((KVIRT_TOP - CONFIG_CONSISTENT_SIZE) & PAGE_MASK) | ||
49 | #else | ||
50 | #define IOREMAP_TOP KVIRT_TOP | ||
51 | #endif | ||
52 | |||
53 | /* | ||
54 | * Just any arbitrary offset to the start of the vmalloc VM area: the | ||
55 | * current 16MB value just means that there will be a 64MB "hole" after the | ||
56 | * physical memory until the kernel virtual memory starts. That means that | ||
57 | * any out-of-bounds memory accesses will hopefully be caught. | ||
58 | * The vmalloc() routines leaves a hole of 4kB between each vmalloced | ||
59 | * area for the same reason. ;) | ||
60 | * | ||
61 | * We no longer map larger than phys RAM with the BATs so we don't have | ||
62 | * to worry about the VMALLOC_OFFSET causing problems. We do have to worry | ||
63 | * about clashes between our early calls to ioremap() that start growing down | ||
64 | * from ioremap_base being run into the VM area allocations (growing upwards | ||
65 | * from VMALLOC_START). For this reason we have ioremap_bot to check when | ||
66 | * we actually run into our mappings setup in the early boot with the VM | ||
67 | * system. This really does become a problem for machines with good amounts | ||
68 | * of RAM. -- Cort | ||
69 | */ | ||
70 | #define VMALLOC_OFFSET (0x1000000) /* 16M */ | ||
71 | #ifdef PPC_PIN_SIZE | ||
72 | #define VMALLOC_START (((_ALIGN((long)high_memory, PPC_PIN_SIZE) + VMALLOC_OFFSET) & ~(VMALLOC_OFFSET-1))) | ||
73 | #else | ||
74 | #define VMALLOC_START ((((long)high_memory + VMALLOC_OFFSET) & ~(VMALLOC_OFFSET-1))) | ||
75 | #endif | ||
76 | #define VMALLOC_END ioremap_bot | ||
77 | |||
78 | #ifndef __ASSEMBLY__ | ||
79 | #include <linux/sched.h> | ||
80 | #include <linux/threads.h> | ||
81 | #include <asm/io.h> /* For sub-arch specific PPC_PIN_SIZE */ | ||
82 | |||
83 | extern unsigned long ioremap_bot; | ||
84 | |||
85 | /* | ||
86 | * entries per page directory level: our page-table tree is two-level, so | ||
87 | * we don't really have any PMD directory. | ||
88 | */ | ||
89 | #define PTE_TABLE_SIZE (sizeof(pte_t) << PTE_SHIFT) | ||
90 | #define PGD_TABLE_SIZE (sizeof(pgd_t) << (32 - PGDIR_SHIFT)) | ||
91 | |||
92 | #define pte_ERROR(e) \ | ||
93 | pr_err("%s:%d: bad pte %llx.\n", __FILE__, __LINE__, \ | ||
94 | (unsigned long long)pte_val(e)) | ||
95 | #define pgd_ERROR(e) \ | ||
96 | pr_err("%s:%d: bad pgd %08lx.\n", __FILE__, __LINE__, pgd_val(e)) | ||
97 | /* | ||
98 | * Bits in a linux-style PTE. These match the bits in the | ||
99 | * (hardware-defined) PowerPC PTE as closely as possible. | ||
100 | */ | ||
101 | |||
102 | #define pte_clear(mm, addr, ptep) \ | ||
103 | do { pte_update(ptep, ~_PAGE_HASHPTE, 0); } while (0) | ||
104 | |||
105 | #define pmd_none(pmd) (!pmd_val(pmd)) | ||
106 | #define pmd_bad(pmd) (pmd_val(pmd) & _PMD_BAD) | ||
107 | #define pmd_present(pmd) (pmd_val(pmd) & _PMD_PRESENT_MASK) | ||
108 | static inline void pmd_clear(pmd_t *pmdp) | ||
109 | { | ||
110 | *pmdp = __pmd(0); | ||
111 | } | ||
112 | |||
113 | |||
114 | /* | ||
115 | * When flushing the tlb entry for a page, we also need to flush the hash | ||
116 | * table entry. flush_hash_pages is assembler (for speed) in hashtable.S. | ||
117 | */ | ||
118 | extern int flush_hash_pages(unsigned context, unsigned long va, | ||
119 | unsigned long pmdval, int count); | ||
120 | |||
121 | /* Add an HPTE to the hash table */ | ||
122 | extern void add_hash_page(unsigned context, unsigned long va, | ||
123 | unsigned long pmdval); | ||
124 | |||
125 | /* Flush an entry from the TLB/hash table */ | ||
126 | extern void flush_hash_entry(struct mm_struct *mm, pte_t *ptep, | ||
127 | unsigned long address); | ||
128 | |||
129 | /* | ||
130 | * PTE updates. This function is called whenever an existing | ||
131 | * valid PTE is updated. This does -not- include set_pte_at() | ||
132 | * which nowadays only sets a new PTE. | ||
133 | * | ||
134 | * Depending on the type of MMU, we may need to use atomic updates | ||
135 | * and the PTE may be either 32 or 64 bit wide. In the later case, | ||
136 | * when using atomic updates, only the low part of the PTE is | ||
137 | * accessed atomically. | ||
138 | * | ||
139 | * In addition, on 44x, we also maintain a global flag indicating | ||
140 | * that an executable user mapping was modified, which is needed | ||
141 | * to properly flush the virtually tagged instruction cache of | ||
142 | * those implementations. | ||
143 | */ | ||
144 | #ifndef CONFIG_PTE_64BIT | ||
145 | static inline unsigned long pte_update(pte_t *p, | ||
146 | unsigned long clr, | ||
147 | unsigned long set) | ||
148 | { | ||
149 | unsigned long old, tmp; | ||
150 | |||
151 | __asm__ __volatile__("\ | ||
152 | 1: lwarx %0,0,%3\n\ | ||
153 | andc %1,%0,%4\n\ | ||
154 | or %1,%1,%5\n" | ||
155 | PPC405_ERR77(0,%3) | ||
156 | " stwcx. %1,0,%3\n\ | ||
157 | bne- 1b" | ||
158 | : "=&r" (old), "=&r" (tmp), "=m" (*p) | ||
159 | : "r" (p), "r" (clr), "r" (set), "m" (*p) | ||
160 | : "cc" ); | ||
161 | |||
162 | return old; | ||
163 | } | ||
164 | #else /* CONFIG_PTE_64BIT */ | ||
165 | static inline unsigned long long pte_update(pte_t *p, | ||
166 | unsigned long clr, | ||
167 | unsigned long set) | ||
168 | { | ||
169 | unsigned long long old; | ||
170 | unsigned long tmp; | ||
171 | |||
172 | __asm__ __volatile__("\ | ||
173 | 1: lwarx %L0,0,%4\n\ | ||
174 | lwzx %0,0,%3\n\ | ||
175 | andc %1,%L0,%5\n\ | ||
176 | or %1,%1,%6\n" | ||
177 | PPC405_ERR77(0,%3) | ||
178 | " stwcx. %1,0,%4\n\ | ||
179 | bne- 1b" | ||
180 | : "=&r" (old), "=&r" (tmp), "=m" (*p) | ||
181 | : "r" (p), "r" ((unsigned long)(p) + 4), "r" (clr), "r" (set), "m" (*p) | ||
182 | : "cc" ); | ||
183 | |||
184 | return old; | ||
185 | } | ||
186 | #endif /* CONFIG_PTE_64BIT */ | ||
187 | |||
188 | /* | ||
189 | * 2.6 calls this without flushing the TLB entry; this is wrong | ||
190 | * for our hash-based implementation, we fix that up here. | ||
191 | */ | ||
192 | #define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG | ||
193 | static inline int __ptep_test_and_clear_young(unsigned int context, unsigned long addr, pte_t *ptep) | ||
194 | { | ||
195 | unsigned long old; | ||
196 | old = pte_update(ptep, _PAGE_ACCESSED, 0); | ||
197 | if (old & _PAGE_HASHPTE) { | ||
198 | unsigned long ptephys = __pa(ptep) & PAGE_MASK; | ||
199 | flush_hash_pages(context, addr, ptephys, 1); | ||
200 | } | ||
201 | return (old & _PAGE_ACCESSED) != 0; | ||
202 | } | ||
203 | #define ptep_test_and_clear_young(__vma, __addr, __ptep) \ | ||
204 | __ptep_test_and_clear_young((__vma)->vm_mm->context.id, __addr, __ptep) | ||
205 | |||
206 | #define __HAVE_ARCH_PTEP_GET_AND_CLEAR | ||
207 | static inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, | ||
208 | pte_t *ptep) | ||
209 | { | ||
210 | return __pte(pte_update(ptep, ~_PAGE_HASHPTE, 0)); | ||
211 | } | ||
212 | |||
213 | #define __HAVE_ARCH_PTEP_SET_WRPROTECT | ||
214 | static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr, | ||
215 | pte_t *ptep) | ||
216 | { | ||
217 | pte_update(ptep, (_PAGE_RW | _PAGE_HWWRITE), _PAGE_RO); | ||
218 | } | ||
219 | static inline void huge_ptep_set_wrprotect(struct mm_struct *mm, | ||
220 | unsigned long addr, pte_t *ptep) | ||
221 | { | ||
222 | ptep_set_wrprotect(mm, addr, ptep); | ||
223 | } | ||
224 | |||
225 | |||
226 | static inline void __ptep_set_access_flags(pte_t *ptep, pte_t entry) | ||
227 | { | ||
228 | unsigned long set = pte_val(entry) & | ||
229 | (_PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_RW | _PAGE_EXEC); | ||
230 | unsigned long clr = ~pte_val(entry) & _PAGE_RO; | ||
231 | |||
232 | pte_update(ptep, clr, set); | ||
233 | } | ||
234 | |||
235 | #define __HAVE_ARCH_PTE_SAME | ||
236 | #define pte_same(A,B) (((pte_val(A) ^ pte_val(B)) & ~_PAGE_HASHPTE) == 0) | ||
237 | |||
238 | /* | ||
239 | * Note that on Book E processors, the pmd contains the kernel virtual | ||
240 | * (lowmem) address of the pte page. The physical address is less useful | ||
241 | * because everything runs with translation enabled (even the TLB miss | ||
242 | * handler). On everything else the pmd contains the physical address | ||
243 | * of the pte page. -- paulus | ||
244 | */ | ||
245 | #ifndef CONFIG_BOOKE | ||
246 | #define pmd_page_vaddr(pmd) \ | ||
247 | ((unsigned long) __va(pmd_val(pmd) & PAGE_MASK)) | ||
248 | #define pmd_page(pmd) \ | ||
249 | pfn_to_page(pmd_val(pmd) >> PAGE_SHIFT) | ||
250 | #else | ||
251 | #define pmd_page_vaddr(pmd) \ | ||
252 | ((unsigned long) (pmd_val(pmd) & PAGE_MASK)) | ||
253 | #define pmd_page(pmd) \ | ||
254 | pfn_to_page((__pa(pmd_val(pmd)) >> PAGE_SHIFT)) | ||
255 | #endif | ||
256 | |||
257 | /* to find an entry in a kernel page-table-directory */ | ||
258 | #define pgd_offset_k(address) pgd_offset(&init_mm, address) | ||
259 | |||
260 | /* to find an entry in a page-table-directory */ | ||
261 | #define pgd_index(address) ((address) >> PGDIR_SHIFT) | ||
262 | #define pgd_offset(mm, address) ((mm)->pgd + pgd_index(address)) | ||
263 | |||
264 | /* Find an entry in the third-level page table.. */ | ||
265 | #define pte_index(address) \ | ||
266 | (((address) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1)) | ||
267 | #define pte_offset_kernel(dir, addr) \ | ||
268 | ((pte_t *) pmd_page_vaddr(*(dir)) + pte_index(addr)) | ||
269 | #define pte_offset_map(dir, addr) \ | ||
270 | ((pte_t *) kmap_atomic(pmd_page(*(dir))) + pte_index(addr)) | ||
271 | #define pte_unmap(pte) kunmap_atomic(pte) | ||
272 | |||
273 | /* | ||
274 | * Encode and decode a swap entry. | ||
275 | * Note that the bits we use in a PTE for representing a swap entry | ||
276 | * must not include the _PAGE_PRESENT bit or the _PAGE_HASHPTE bit (if used). | ||
277 | * -- paulus | ||
278 | */ | ||
279 | #define __swp_type(entry) ((entry).val & 0x1f) | ||
280 | #define __swp_offset(entry) ((entry).val >> 5) | ||
281 | #define __swp_entry(type, offset) ((swp_entry_t) { (type) | ((offset) << 5) }) | ||
282 | #define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) >> 3 }) | ||
283 | #define __swp_entry_to_pte(x) ((pte_t) { (x).val << 3 }) | ||
284 | |||
285 | #ifndef CONFIG_PPC_4K_PAGES | ||
286 | void pgtable_cache_init(void); | ||
287 | #else | ||
288 | /* | ||
289 | * No page table caches to initialise | ||
290 | */ | ||
291 | #define pgtable_cache_init() do { } while (0) | ||
292 | #endif | ||
293 | |||
294 | extern int get_pteptr(struct mm_struct *mm, unsigned long addr, pte_t **ptep, | ||
295 | pmd_t **pmdp); | ||
296 | |||
297 | /* Generic accessors to PTE bits */ | ||
298 | static inline int pte_write(pte_t pte) { return !!(pte_val(pte) & _PAGE_RW);} | ||
299 | static inline int pte_dirty(pte_t pte) { return !!(pte_val(pte) & _PAGE_DIRTY); } | ||
300 | static inline int pte_young(pte_t pte) { return !!(pte_val(pte) & _PAGE_ACCESSED); } | ||
301 | static inline int pte_special(pte_t pte) { return !!(pte_val(pte) & _PAGE_SPECIAL); } | ||
302 | static inline int pte_none(pte_t pte) { return (pte_val(pte) & ~_PTE_NONE_MASK) == 0; } | ||
303 | static inline pgprot_t pte_pgprot(pte_t pte) { return __pgprot(pte_val(pte) & PAGE_PROT_BITS); } | ||
304 | |||
305 | static inline int pte_present(pte_t pte) | ||
306 | { | ||
307 | return pte_val(pte) & _PAGE_PRESENT; | ||
308 | } | ||
309 | |||
310 | /* Conversion functions: convert a page and protection to a page entry, | ||
311 | * and a page entry and page directory to the page they refer to. | ||
312 | * | ||
313 | * Even if PTEs can be unsigned long long, a PFN is always an unsigned | ||
314 | * long for now. | ||
315 | */ | ||
316 | static inline pte_t pfn_pte(unsigned long pfn, pgprot_t pgprot) | ||
317 | { | ||
318 | return __pte(((pte_basic_t)(pfn) << PTE_RPN_SHIFT) | | ||
319 | pgprot_val(pgprot)); | ||
320 | } | ||
321 | |||
322 | static inline unsigned long pte_pfn(pte_t pte) | ||
323 | { | ||
324 | return pte_val(pte) >> PTE_RPN_SHIFT; | ||
325 | } | ||
326 | |||
327 | /* Generic modifiers for PTE bits */ | ||
328 | static inline pte_t pte_wrprotect(pte_t pte) | ||
329 | { | ||
330 | return __pte(pte_val(pte) & ~_PAGE_RW); | ||
331 | } | ||
332 | |||
333 | static inline pte_t pte_mkclean(pte_t pte) | ||
334 | { | ||
335 | return __pte(pte_val(pte) & ~_PAGE_DIRTY); | ||
336 | } | ||
337 | |||
338 | static inline pte_t pte_mkold(pte_t pte) | ||
339 | { | ||
340 | return __pte(pte_val(pte) & ~_PAGE_ACCESSED); | ||
341 | } | ||
342 | |||
343 | static inline pte_t pte_mkwrite(pte_t pte) | ||
344 | { | ||
345 | return __pte(pte_val(pte) | _PAGE_RW); | ||
346 | } | ||
347 | |||
348 | static inline pte_t pte_mkdirty(pte_t pte) | ||
349 | { | ||
350 | return __pte(pte_val(pte) | _PAGE_DIRTY); | ||
351 | } | ||
352 | |||
353 | static inline pte_t pte_mkyoung(pte_t pte) | ||
354 | { | ||
355 | return __pte(pte_val(pte) | _PAGE_ACCESSED); | ||
356 | } | ||
357 | |||
358 | static inline pte_t pte_mkspecial(pte_t pte) | ||
359 | { | ||
360 | return __pte(pte_val(pte) | _PAGE_SPECIAL); | ||
361 | } | ||
362 | |||
363 | static inline pte_t pte_mkhuge(pte_t pte) | ||
364 | { | ||
365 | return pte; | ||
366 | } | ||
367 | |||
368 | static inline pte_t pte_modify(pte_t pte, pgprot_t newprot) | ||
369 | { | ||
370 | return __pte((pte_val(pte) & _PAGE_CHG_MASK) | pgprot_val(newprot)); | ||
371 | } | ||
372 | |||
373 | |||
374 | |||
375 | /* This low level function performs the actual PTE insertion | ||
376 | * Setting the PTE depends on the MMU type and other factors. It's | ||
377 | * an horrible mess that I'm not going to try to clean up now but | ||
378 | * I'm keeping it in one place rather than spread around | ||
379 | */ | ||
380 | static inline void __set_pte_at(struct mm_struct *mm, unsigned long addr, | ||
381 | pte_t *ptep, pte_t pte, int percpu) | ||
382 | { | ||
383 | #if defined(CONFIG_PPC_STD_MMU_32) && defined(CONFIG_SMP) && !defined(CONFIG_PTE_64BIT) | ||
384 | /* First case is 32-bit Hash MMU in SMP mode with 32-bit PTEs. We use the | ||
385 | * helper pte_update() which does an atomic update. We need to do that | ||
386 | * because a concurrent invalidation can clear _PAGE_HASHPTE. If it's a | ||
387 | * per-CPU PTE such as a kmap_atomic, we do a simple update preserving | ||
388 | * the hash bits instead (ie, same as the non-SMP case) | ||
389 | */ | ||
390 | if (percpu) | ||
391 | *ptep = __pte((pte_val(*ptep) & _PAGE_HASHPTE) | ||
392 | | (pte_val(pte) & ~_PAGE_HASHPTE)); | ||
393 | else | ||
394 | pte_update(ptep, ~_PAGE_HASHPTE, pte_val(pte)); | ||
395 | |||
396 | #elif defined(CONFIG_PPC32) && defined(CONFIG_PTE_64BIT) | ||
397 | /* Second case is 32-bit with 64-bit PTE. In this case, we | ||
398 | * can just store as long as we do the two halves in the right order | ||
399 | * with a barrier in between. This is possible because we take care, | ||
400 | * in the hash code, to pre-invalidate if the PTE was already hashed, | ||
401 | * which synchronizes us with any concurrent invalidation. | ||
402 | * In the percpu case, we also fallback to the simple update preserving | ||
403 | * the hash bits | ||
404 | */ | ||
405 | if (percpu) { | ||
406 | *ptep = __pte((pte_val(*ptep) & _PAGE_HASHPTE) | ||
407 | | (pte_val(pte) & ~_PAGE_HASHPTE)); | ||
408 | return; | ||
409 | } | ||
410 | if (pte_val(*ptep) & _PAGE_HASHPTE) | ||
411 | flush_hash_entry(mm, ptep, addr); | ||
412 | __asm__ __volatile__("\ | ||
413 | stw%U0%X0 %2,%0\n\ | ||
414 | eieio\n\ | ||
415 | stw%U0%X0 %L2,%1" | ||
416 | : "=m" (*ptep), "=m" (*((unsigned char *)ptep+4)) | ||
417 | : "r" (pte) : "memory"); | ||
418 | |||
419 | #elif defined(CONFIG_PPC_STD_MMU_32) | ||
420 | /* Third case is 32-bit hash table in UP mode, we need to preserve | ||
421 | * the _PAGE_HASHPTE bit since we may not have invalidated the previous | ||
422 | * translation in the hash yet (done in a subsequent flush_tlb_xxx()) | ||
423 | * and see we need to keep track that this PTE needs invalidating | ||
424 | */ | ||
425 | *ptep = __pte((pte_val(*ptep) & _PAGE_HASHPTE) | ||
426 | | (pte_val(pte) & ~_PAGE_HASHPTE)); | ||
427 | |||
428 | #else | ||
429 | #error "Not supported " | ||
430 | #endif | ||
431 | } | ||
432 | |||
433 | /* | ||
434 | * Macro to mark a page protection value as "uncacheable". | ||
435 | */ | ||
436 | |||
437 | #define _PAGE_CACHE_CTL (_PAGE_COHERENT | _PAGE_GUARDED | _PAGE_NO_CACHE | \ | ||
438 | _PAGE_WRITETHRU) | ||
439 | |||
440 | #define pgprot_noncached pgprot_noncached | ||
441 | static inline pgprot_t pgprot_noncached(pgprot_t prot) | ||
442 | { | ||
443 | return __pgprot((pgprot_val(prot) & ~_PAGE_CACHE_CTL) | | ||
444 | _PAGE_NO_CACHE | _PAGE_GUARDED); | ||
445 | } | ||
446 | |||
447 | #define pgprot_noncached_wc pgprot_noncached_wc | ||
448 | static inline pgprot_t pgprot_noncached_wc(pgprot_t prot) | ||
449 | { | ||
450 | return __pgprot((pgprot_val(prot) & ~_PAGE_CACHE_CTL) | | ||
451 | _PAGE_NO_CACHE); | ||
452 | } | ||
453 | |||
454 | #define pgprot_cached pgprot_cached | ||
455 | static inline pgprot_t pgprot_cached(pgprot_t prot) | ||
456 | { | ||
457 | return __pgprot((pgprot_val(prot) & ~_PAGE_CACHE_CTL) | | ||
458 | _PAGE_COHERENT); | ||
459 | } | ||
460 | |||
461 | #define pgprot_cached_wthru pgprot_cached_wthru | ||
462 | static inline pgprot_t pgprot_cached_wthru(pgprot_t prot) | ||
463 | { | ||
464 | return __pgprot((pgprot_val(prot) & ~_PAGE_CACHE_CTL) | | ||
465 | _PAGE_COHERENT | _PAGE_WRITETHRU); | ||
466 | } | ||
467 | |||
468 | #define pgprot_cached_noncoherent pgprot_cached_noncoherent | ||
469 | static inline pgprot_t pgprot_cached_noncoherent(pgprot_t prot) | ||
470 | { | ||
471 | return __pgprot(pgprot_val(prot) & ~_PAGE_CACHE_CTL); | ||
472 | } | ||
473 | |||
474 | #define pgprot_writecombine pgprot_writecombine | ||
475 | static inline pgprot_t pgprot_writecombine(pgprot_t prot) | ||
476 | { | ||
477 | return pgprot_noncached_wc(prot); | ||
478 | } | ||
479 | |||
480 | #endif /* !__ASSEMBLY__ */ | ||
481 | |||
482 | #endif /* _ASM_POWERPC_BOOK3S_32_PGTABLE_H */ | ||
diff --git a/arch/powerpc/include/asm/book3s/64/hash-4k.h b/arch/powerpc/include/asm/book3s/64/hash-4k.h new file mode 100644 index 000000000000..ea0414d6659e --- /dev/null +++ b/arch/powerpc/include/asm/book3s/64/hash-4k.h | |||
@@ -0,0 +1,132 @@ | |||
1 | #ifndef _ASM_POWERPC_BOOK3S_64_HASH_4K_H | ||
2 | #define _ASM_POWERPC_BOOK3S_64_HASH_4K_H | ||
3 | /* | ||
4 | * Entries per page directory level. The PTE level must use a 64b record | ||
5 | * for each page table entry. The PMD and PGD level use a 32b record for | ||
6 | * each entry by assuming that each entry is page aligned. | ||
7 | */ | ||
8 | #define PTE_INDEX_SIZE 9 | ||
9 | #define PMD_INDEX_SIZE 7 | ||
10 | #define PUD_INDEX_SIZE 9 | ||
11 | #define PGD_INDEX_SIZE 9 | ||
12 | |||
13 | #ifndef __ASSEMBLY__ | ||
14 | #define PTE_TABLE_SIZE (sizeof(pte_t) << PTE_INDEX_SIZE) | ||
15 | #define PMD_TABLE_SIZE (sizeof(pmd_t) << PMD_INDEX_SIZE) | ||
16 | #define PUD_TABLE_SIZE (sizeof(pud_t) << PUD_INDEX_SIZE) | ||
17 | #define PGD_TABLE_SIZE (sizeof(pgd_t) << PGD_INDEX_SIZE) | ||
18 | #endif /* __ASSEMBLY__ */ | ||
19 | |||
20 | #define PTRS_PER_PTE (1 << PTE_INDEX_SIZE) | ||
21 | #define PTRS_PER_PMD (1 << PMD_INDEX_SIZE) | ||
22 | #define PTRS_PER_PUD (1 << PUD_INDEX_SIZE) | ||
23 | #define PTRS_PER_PGD (1 << PGD_INDEX_SIZE) | ||
24 | |||
25 | /* PMD_SHIFT determines what a second-level page table entry can map */ | ||
26 | #define PMD_SHIFT (PAGE_SHIFT + PTE_INDEX_SIZE) | ||
27 | #define PMD_SIZE (1UL << PMD_SHIFT) | ||
28 | #define PMD_MASK (~(PMD_SIZE-1)) | ||
29 | |||
30 | /* With 4k base page size, hugepage PTEs go at the PMD level */ | ||
31 | #define MIN_HUGEPTE_SHIFT PMD_SHIFT | ||
32 | |||
33 | /* PUD_SHIFT determines what a third-level page table entry can map */ | ||
34 | #define PUD_SHIFT (PMD_SHIFT + PMD_INDEX_SIZE) | ||
35 | #define PUD_SIZE (1UL << PUD_SHIFT) | ||
36 | #define PUD_MASK (~(PUD_SIZE-1)) | ||
37 | |||
38 | /* PGDIR_SHIFT determines what a fourth-level page table entry can map */ | ||
39 | #define PGDIR_SHIFT (PUD_SHIFT + PUD_INDEX_SIZE) | ||
40 | #define PGDIR_SIZE (1UL << PGDIR_SHIFT) | ||
41 | #define PGDIR_MASK (~(PGDIR_SIZE-1)) | ||
42 | |||
43 | /* Bits to mask out from a PMD to get to the PTE page */ | ||
44 | #define PMD_MASKED_BITS 0 | ||
45 | /* Bits to mask out from a PUD to get to the PMD page */ | ||
46 | #define PUD_MASKED_BITS 0 | ||
47 | /* Bits to mask out from a PGD to get to the PUD page */ | ||
48 | #define PGD_MASKED_BITS 0 | ||
49 | |||
50 | /* PTE flags to conserve for HPTE identification */ | ||
51 | #define _PAGE_HPTEFLAGS (_PAGE_BUSY | _PAGE_HASHPTE | \ | ||
52 | _PAGE_F_SECOND | _PAGE_F_GIX) | ||
53 | |||
54 | /* shift to put page number into pte */ | ||
55 | #define PTE_RPN_SHIFT (18) | ||
56 | |||
57 | #define _PAGE_4K_PFN 0 | ||
58 | #ifndef __ASSEMBLY__ | ||
59 | /* | ||
60 | * 4-level page tables related bits | ||
61 | */ | ||
62 | |||
63 | #define pgd_none(pgd) (!pgd_val(pgd)) | ||
64 | #define pgd_bad(pgd) (pgd_val(pgd) == 0) | ||
65 | #define pgd_present(pgd) (pgd_val(pgd) != 0) | ||
66 | #define pgd_page_vaddr(pgd) (pgd_val(pgd) & ~PGD_MASKED_BITS) | ||
67 | |||
68 | static inline void pgd_clear(pgd_t *pgdp) | ||
69 | { | ||
70 | *pgdp = __pgd(0); | ||
71 | } | ||
72 | |||
73 | static inline pte_t pgd_pte(pgd_t pgd) | ||
74 | { | ||
75 | return __pte(pgd_val(pgd)); | ||
76 | } | ||
77 | |||
78 | static inline pgd_t pte_pgd(pte_t pte) | ||
79 | { | ||
80 | return __pgd(pte_val(pte)); | ||
81 | } | ||
82 | extern struct page *pgd_page(pgd_t pgd); | ||
83 | |||
84 | #define pud_offset(pgdp, addr) \ | ||
85 | (((pud_t *) pgd_page_vaddr(*(pgdp))) + \ | ||
86 | (((addr) >> PUD_SHIFT) & (PTRS_PER_PUD - 1))) | ||
87 | |||
88 | #define pud_ERROR(e) \ | ||
89 | pr_err("%s:%d: bad pud %08lx.\n", __FILE__, __LINE__, pud_val(e)) | ||
90 | |||
91 | /* | ||
92 | * On all 4K setups, remap_4k_pfn() equates to remap_pfn_range() */ | ||
93 | #define remap_4k_pfn(vma, addr, pfn, prot) \ | ||
94 | remap_pfn_range((vma), (addr), (pfn), PAGE_SIZE, (prot)) | ||
95 | |||
96 | #ifdef CONFIG_HUGETLB_PAGE | ||
97 | /* | ||
98 | * For 4k page size, we support explicit hugepage via hugepd | ||
99 | */ | ||
100 | static inline int pmd_huge(pmd_t pmd) | ||
101 | { | ||
102 | return 0; | ||
103 | } | ||
104 | |||
105 | static inline int pud_huge(pud_t pud) | ||
106 | { | ||
107 | return 0; | ||
108 | } | ||
109 | |||
110 | static inline int pgd_huge(pgd_t pgd) | ||
111 | { | ||
112 | return 0; | ||
113 | } | ||
114 | #define pgd_huge pgd_huge | ||
115 | |||
116 | static inline int hugepd_ok(hugepd_t hpd) | ||
117 | { | ||
118 | /* | ||
119 | * if it is not a pte and have hugepd shift mask | ||
120 | * set, then it is a hugepd directory pointer | ||
121 | */ | ||
122 | if (!(hpd.pd & _PAGE_PTE) && | ||
123 | ((hpd.pd & HUGEPD_SHIFT_MASK) != 0)) | ||
124 | return true; | ||
125 | return false; | ||
126 | } | ||
127 | #define is_hugepd(hpd) (hugepd_ok(hpd)) | ||
128 | #endif | ||
129 | |||
130 | #endif /* !__ASSEMBLY__ */ | ||
131 | |||
132 | #endif /* _ASM_POWERPC_BOOK3S_64_HASH_4K_H */ | ||
diff --git a/arch/powerpc/include/asm/book3s/64/hash-64k.h b/arch/powerpc/include/asm/book3s/64/hash-64k.h new file mode 100644 index 000000000000..9e55e3b1fef0 --- /dev/null +++ b/arch/powerpc/include/asm/book3s/64/hash-64k.h | |||
@@ -0,0 +1,312 @@ | |||
1 | #ifndef _ASM_POWERPC_BOOK3S_64_HASH_64K_H | ||
2 | #define _ASM_POWERPC_BOOK3S_64_HASH_64K_H | ||
3 | |||
4 | #include <asm-generic/pgtable-nopud.h> | ||
5 | |||
6 | #define PTE_INDEX_SIZE 8 | ||
7 | #define PMD_INDEX_SIZE 10 | ||
8 | #define PUD_INDEX_SIZE 0 | ||
9 | #define PGD_INDEX_SIZE 12 | ||
10 | |||
11 | #define PTRS_PER_PTE (1 << PTE_INDEX_SIZE) | ||
12 | #define PTRS_PER_PMD (1 << PMD_INDEX_SIZE) | ||
13 | #define PTRS_PER_PGD (1 << PGD_INDEX_SIZE) | ||
14 | |||
15 | /* With 4k base page size, hugepage PTEs go at the PMD level */ | ||
16 | #define MIN_HUGEPTE_SHIFT PAGE_SHIFT | ||
17 | |||
18 | /* PMD_SHIFT determines what a second-level page table entry can map */ | ||
19 | #define PMD_SHIFT (PAGE_SHIFT + PTE_INDEX_SIZE) | ||
20 | #define PMD_SIZE (1UL << PMD_SHIFT) | ||
21 | #define PMD_MASK (~(PMD_SIZE-1)) | ||
22 | |||
23 | /* PGDIR_SHIFT determines what a third-level page table entry can map */ | ||
24 | #define PGDIR_SHIFT (PMD_SHIFT + PMD_INDEX_SIZE) | ||
25 | #define PGDIR_SIZE (1UL << PGDIR_SHIFT) | ||
26 | #define PGDIR_MASK (~(PGDIR_SIZE-1)) | ||
27 | |||
28 | #define _PAGE_COMBO 0x00040000 /* this is a combo 4k page */ | ||
29 | #define _PAGE_4K_PFN 0x00080000 /* PFN is for a single 4k page */ | ||
30 | /* | ||
31 | * Used to track subpage group valid if _PAGE_COMBO is set | ||
32 | * This overloads _PAGE_F_GIX and _PAGE_F_SECOND | ||
33 | */ | ||
34 | #define _PAGE_COMBO_VALID (_PAGE_F_GIX | _PAGE_F_SECOND) | ||
35 | |||
36 | /* PTE flags to conserve for HPTE identification */ | ||
37 | #define _PAGE_HPTEFLAGS (_PAGE_BUSY | _PAGE_F_SECOND | \ | ||
38 | _PAGE_F_GIX | _PAGE_HASHPTE | _PAGE_COMBO) | ||
39 | |||
40 | /* Shift to put page number into pte. | ||
41 | * | ||
42 | * That gives us a max RPN of 34 bits, which means a max of 50 bits | ||
43 | * of addressable physical space, or 46 bits for the special 4k PFNs. | ||
44 | */ | ||
45 | #define PTE_RPN_SHIFT (30) | ||
46 | /* | ||
47 | * we support 16 fragments per PTE page of 64K size. | ||
48 | */ | ||
49 | #define PTE_FRAG_NR 16 | ||
50 | /* | ||
51 | * We use a 2K PTE page fragment and another 2K for storing | ||
52 | * real_pte_t hash index | ||
53 | */ | ||
54 | #define PTE_FRAG_SIZE_SHIFT 12 | ||
55 | #define PTE_FRAG_SIZE (1UL << PTE_FRAG_SIZE_SHIFT) | ||
56 | |||
57 | /* | ||
58 | * Bits to mask out from a PMD to get to the PTE page | ||
59 | * PMDs point to PTE table fragments which are PTE_FRAG_SIZE aligned. | ||
60 | */ | ||
61 | #define PMD_MASKED_BITS (PTE_FRAG_SIZE - 1) | ||
62 | /* Bits to mask out from a PGD/PUD to get to the PMD page */ | ||
63 | #define PUD_MASKED_BITS 0x1ff | ||
64 | |||
65 | #ifndef __ASSEMBLY__ | ||
66 | |||
67 | /* | ||
68 | * With 64K pages on hash table, we have a special PTE format that | ||
69 | * uses a second "half" of the page table to encode sub-page information | ||
70 | * in order to deal with 64K made of 4K HW pages. Thus we override the | ||
71 | * generic accessors and iterators here | ||
72 | */ | ||
73 | #define __real_pte __real_pte | ||
74 | static inline real_pte_t __real_pte(pte_t pte, pte_t *ptep) | ||
75 | { | ||
76 | real_pte_t rpte; | ||
77 | unsigned long *hidxp; | ||
78 | |||
79 | rpte.pte = pte; | ||
80 | rpte.hidx = 0; | ||
81 | if (pte_val(pte) & _PAGE_COMBO) { | ||
82 | /* | ||
83 | * Make sure we order the hidx load against the _PAGE_COMBO | ||
84 | * check. The store side ordering is done in __hash_page_4K | ||
85 | */ | ||
86 | smp_rmb(); | ||
87 | hidxp = (unsigned long *)(ptep + PTRS_PER_PTE); | ||
88 | rpte.hidx = *hidxp; | ||
89 | } | ||
90 | return rpte; | ||
91 | } | ||
92 | |||
93 | static inline unsigned long __rpte_to_hidx(real_pte_t rpte, unsigned long index) | ||
94 | { | ||
95 | if ((pte_val(rpte.pte) & _PAGE_COMBO)) | ||
96 | return (rpte.hidx >> (index<<2)) & 0xf; | ||
97 | return (pte_val(rpte.pte) >> _PAGE_F_GIX_SHIFT) & 0xf; | ||
98 | } | ||
99 | |||
100 | #define __rpte_to_pte(r) ((r).pte) | ||
101 | extern bool __rpte_sub_valid(real_pte_t rpte, unsigned long index); | ||
102 | /* | ||
103 | * Trick: we set __end to va + 64k, which happens works for | ||
104 | * a 16M page as well as we want only one iteration | ||
105 | */ | ||
106 | #define pte_iterate_hashed_subpages(rpte, psize, vpn, index, shift) \ | ||
107 | do { \ | ||
108 | unsigned long __end = vpn + (1UL << (PAGE_SHIFT - VPN_SHIFT)); \ | ||
109 | unsigned __split = (psize == MMU_PAGE_4K || \ | ||
110 | psize == MMU_PAGE_64K_AP); \ | ||
111 | shift = mmu_psize_defs[psize].shift; \ | ||
112 | for (index = 0; vpn < __end; index++, \ | ||
113 | vpn += (1L << (shift - VPN_SHIFT))) { \ | ||
114 | if (!__split || __rpte_sub_valid(rpte, index)) \ | ||
115 | do { | ||
116 | |||
117 | #define pte_iterate_hashed_end() } while(0); } } while(0) | ||
118 | |||
119 | #define pte_pagesize_index(mm, addr, pte) \ | ||
120 | (((pte) & _PAGE_COMBO)? MMU_PAGE_4K: MMU_PAGE_64K) | ||
121 | |||
122 | #define remap_4k_pfn(vma, addr, pfn, prot) \ | ||
123 | (WARN_ON(((pfn) >= (1UL << (64 - PTE_RPN_SHIFT)))) ? -EINVAL : \ | ||
124 | remap_pfn_range((vma), (addr), (pfn), PAGE_SIZE, \ | ||
125 | __pgprot(pgprot_val((prot)) | _PAGE_4K_PFN))) | ||
126 | |||
127 | #define PTE_TABLE_SIZE PTE_FRAG_SIZE | ||
128 | #ifdef CONFIG_TRANSPARENT_HUGEPAGE | ||
129 | #define PMD_TABLE_SIZE ((sizeof(pmd_t) << PMD_INDEX_SIZE) + (sizeof(unsigned long) << PMD_INDEX_SIZE)) | ||
130 | #else | ||
131 | #define PMD_TABLE_SIZE (sizeof(pmd_t) << PMD_INDEX_SIZE) | ||
132 | #endif | ||
133 | #define PGD_TABLE_SIZE (sizeof(pgd_t) << PGD_INDEX_SIZE) | ||
134 | |||
135 | #define pgd_pte(pgd) (pud_pte(((pud_t){ pgd }))) | ||
136 | #define pte_pgd(pte) ((pgd_t)pte_pud(pte)) | ||
137 | |||
138 | #ifdef CONFIG_HUGETLB_PAGE | ||
139 | /* | ||
140 | * We have PGD_INDEX_SIZ = 12 and PTE_INDEX_SIZE = 8, so that we can have | ||
141 | * 16GB hugepage pte in PGD and 16MB hugepage pte at PMD; | ||
142 | * | ||
143 | * Defined in such a way that we can optimize away code block at build time | ||
144 | * if CONFIG_HUGETLB_PAGE=n. | ||
145 | */ | ||
146 | static inline int pmd_huge(pmd_t pmd) | ||
147 | { | ||
148 | /* | ||
149 | * leaf pte for huge page | ||
150 | */ | ||
151 | return !!(pmd_val(pmd) & _PAGE_PTE); | ||
152 | } | ||
153 | |||
154 | static inline int pud_huge(pud_t pud) | ||
155 | { | ||
156 | /* | ||
157 | * leaf pte for huge page | ||
158 | */ | ||
159 | return !!(pud_val(pud) & _PAGE_PTE); | ||
160 | } | ||
161 | |||
162 | static inline int pgd_huge(pgd_t pgd) | ||
163 | { | ||
164 | /* | ||
165 | * leaf pte for huge page | ||
166 | */ | ||
167 | return !!(pgd_val(pgd) & _PAGE_PTE); | ||
168 | } | ||
169 | #define pgd_huge pgd_huge | ||
170 | |||
171 | #ifdef CONFIG_DEBUG_VM | ||
172 | extern int hugepd_ok(hugepd_t hpd); | ||
173 | #define is_hugepd(hpd) (hugepd_ok(hpd)) | ||
174 | #else | ||
175 | /* | ||
176 | * With 64k page size, we have hugepage ptes in the pgd and pmd entries. We don't | ||
177 | * need to setup hugepage directory for them. Our pte and page directory format | ||
178 | * enable us to have this enabled. | ||
179 | */ | ||
180 | static inline int hugepd_ok(hugepd_t hpd) | ||
181 | { | ||
182 | return 0; | ||
183 | } | ||
184 | #define is_hugepd(pdep) 0 | ||
185 | #endif /* CONFIG_DEBUG_VM */ | ||
186 | |||
187 | #endif /* CONFIG_HUGETLB_PAGE */ | ||
188 | |||
189 | #ifdef CONFIG_TRANSPARENT_HUGEPAGE | ||
190 | extern unsigned long pmd_hugepage_update(struct mm_struct *mm, | ||
191 | unsigned long addr, | ||
192 | pmd_t *pmdp, | ||
193 | unsigned long clr, | ||
194 | unsigned long set); | ||
195 | static inline char *get_hpte_slot_array(pmd_t *pmdp) | ||
196 | { | ||
197 | /* | ||
198 | * The hpte hindex is stored in the pgtable whose address is in the | ||
199 | * second half of the PMD | ||
200 | * | ||
201 | * Order this load with the test for pmd_trans_huge in the caller | ||
202 | */ | ||
203 | smp_rmb(); | ||
204 | return *(char **)(pmdp + PTRS_PER_PMD); | ||
205 | |||
206 | |||
207 | } | ||
208 | /* | ||
209 | * The linux hugepage PMD now include the pmd entries followed by the address | ||
210 | * to the stashed pgtable_t. The stashed pgtable_t contains the hpte bits. | ||
211 | * [ 1 bit secondary | 3 bit hidx | 1 bit valid | 000]. We use one byte per | ||
212 | * each HPTE entry. With 16MB hugepage and 64K HPTE we need 256 entries and | ||
213 | * with 4K HPTE we need 4096 entries. Both will fit in a 4K pgtable_t. | ||
214 | * | ||
215 | * The last three bits are intentionally left to zero. This memory location | ||
216 | * are also used as normal page PTE pointers. So if we have any pointers | ||
217 | * left around while we collapse a hugepage, we need to make sure | ||
218 | * _PAGE_PRESENT bit of that is zero when we look at them | ||
219 | */ | ||
220 | static inline unsigned int hpte_valid(unsigned char *hpte_slot_array, int index) | ||
221 | { | ||
222 | return (hpte_slot_array[index] >> 3) & 0x1; | ||
223 | } | ||
224 | |||
225 | static inline unsigned int hpte_hash_index(unsigned char *hpte_slot_array, | ||
226 | int index) | ||
227 | { | ||
228 | return hpte_slot_array[index] >> 4; | ||
229 | } | ||
230 | |||
231 | static inline void mark_hpte_slot_valid(unsigned char *hpte_slot_array, | ||
232 | unsigned int index, unsigned int hidx) | ||
233 | { | ||
234 | hpte_slot_array[index] = hidx << 4 | 0x1 << 3; | ||
235 | } | ||
236 | |||
237 | /* | ||
238 | * | ||
239 | * For core kernel code by design pmd_trans_huge is never run on any hugetlbfs | ||
240 | * page. The hugetlbfs page table walking and mangling paths are totally | ||
241 | * separated form the core VM paths and they're differentiated by | ||
242 | * VM_HUGETLB being set on vm_flags well before any pmd_trans_huge could run. | ||
243 | * | ||
244 | * pmd_trans_huge() is defined as false at build time if | ||
245 | * CONFIG_TRANSPARENT_HUGEPAGE=n to optimize away code blocks at build | ||
246 | * time in such case. | ||
247 | * | ||
248 | * For ppc64 we need to differntiate from explicit hugepages from THP, because | ||
249 | * for THP we also track the subpage details at the pmd level. We don't do | ||
250 | * that for explicit huge pages. | ||
251 | * | ||
252 | */ | ||
253 | static inline int pmd_trans_huge(pmd_t pmd) | ||
254 | { | ||
255 | return !!((pmd_val(pmd) & (_PAGE_PTE | _PAGE_THP_HUGE)) == | ||
256 | (_PAGE_PTE | _PAGE_THP_HUGE)); | ||
257 | } | ||
258 | |||
259 | static inline int pmd_trans_splitting(pmd_t pmd) | ||
260 | { | ||
261 | if (pmd_trans_huge(pmd)) | ||
262 | return pmd_val(pmd) & _PAGE_SPLITTING; | ||
263 | return 0; | ||
264 | } | ||
265 | |||
266 | static inline int pmd_large(pmd_t pmd) | ||
267 | { | ||
268 | return !!(pmd_val(pmd) & _PAGE_PTE); | ||
269 | } | ||
270 | |||
271 | static inline pmd_t pmd_mknotpresent(pmd_t pmd) | ||
272 | { | ||
273 | return __pmd(pmd_val(pmd) & ~_PAGE_PRESENT); | ||
274 | } | ||
275 | |||
276 | static inline pmd_t pmd_mksplitting(pmd_t pmd) | ||
277 | { | ||
278 | return __pmd(pmd_val(pmd) | _PAGE_SPLITTING); | ||
279 | } | ||
280 | |||
281 | #define __HAVE_ARCH_PMD_SAME | ||
282 | static inline int pmd_same(pmd_t pmd_a, pmd_t pmd_b) | ||
283 | { | ||
284 | return (((pmd_val(pmd_a) ^ pmd_val(pmd_b)) & ~_PAGE_HPTEFLAGS) == 0); | ||
285 | } | ||
286 | |||
287 | static inline int __pmdp_test_and_clear_young(struct mm_struct *mm, | ||
288 | unsigned long addr, pmd_t *pmdp) | ||
289 | { | ||
290 | unsigned long old; | ||
291 | |||
292 | if ((pmd_val(*pmdp) & (_PAGE_ACCESSED | _PAGE_HASHPTE)) == 0) | ||
293 | return 0; | ||
294 | old = pmd_hugepage_update(mm, addr, pmdp, _PAGE_ACCESSED, 0); | ||
295 | return ((old & _PAGE_ACCESSED) != 0); | ||
296 | } | ||
297 | |||
298 | #define __HAVE_ARCH_PMDP_SET_WRPROTECT | ||
299 | static inline void pmdp_set_wrprotect(struct mm_struct *mm, unsigned long addr, | ||
300 | pmd_t *pmdp) | ||
301 | { | ||
302 | |||
303 | if ((pmd_val(*pmdp) & _PAGE_RW) == 0) | ||
304 | return; | ||
305 | |||
306 | pmd_hugepage_update(mm, addr, pmdp, _PAGE_RW, 0); | ||
307 | } | ||
308 | |||
309 | #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ | ||
310 | #endif /* __ASSEMBLY__ */ | ||
311 | |||
312 | #endif /* _ASM_POWERPC_BOOK3S_64_HASH_64K_H */ | ||
diff --git a/arch/powerpc/include/asm/book3s/64/hash.h b/arch/powerpc/include/asm/book3s/64/hash.h new file mode 100644 index 000000000000..2ff8b3df553d --- /dev/null +++ b/arch/powerpc/include/asm/book3s/64/hash.h | |||
@@ -0,0 +1,551 @@ | |||
1 | #ifndef _ASM_POWERPC_BOOK3S_64_HASH_H | ||
2 | #define _ASM_POWERPC_BOOK3S_64_HASH_H | ||
3 | #ifdef __KERNEL__ | ||
4 | |||
5 | /* | ||
6 | * Common bits between 4K and 64K pages in a linux-style PTE. | ||
7 | * These match the bits in the (hardware-defined) PowerPC PTE as closely | ||
8 | * as possible. Additional bits may be defined in pgtable-hash64-*.h | ||
9 | * | ||
10 | * Note: We only support user read/write permissions. Supervisor always | ||
11 | * have full read/write to pages above PAGE_OFFSET (pages below that | ||
12 | * always use the user access permissions). | ||
13 | * | ||
14 | * We could create separate kernel read-only if we used the 3 PP bits | ||
15 | * combinations that newer processors provide but we currently don't. | ||
16 | */ | ||
17 | #define _PAGE_PTE 0x00001 | ||
18 | #define _PAGE_PRESENT 0x00002 /* software: pte contains a translation */ | ||
19 | #define _PAGE_BIT_SWAP_TYPE 2 | ||
20 | #define _PAGE_USER 0x00004 /* matches one of the PP bits */ | ||
21 | #define _PAGE_EXEC 0x00008 /* No execute on POWER4 and newer (we invert) */ | ||
22 | #define _PAGE_GUARDED 0x00010 | ||
23 | /* We can derive Memory coherence from _PAGE_NO_CACHE */ | ||
24 | #define _PAGE_COHERENT 0x0 | ||
25 | #define _PAGE_NO_CACHE 0x00020 /* I: cache inhibit */ | ||
26 | #define _PAGE_WRITETHRU 0x00040 /* W: cache write-through */ | ||
27 | #define _PAGE_DIRTY 0x00080 /* C: page changed */ | ||
28 | #define _PAGE_ACCESSED 0x00100 /* R: page referenced */ | ||
29 | #define _PAGE_RW 0x00200 /* software: user write access allowed */ | ||
30 | #define _PAGE_HASHPTE 0x00400 /* software: pte has an associated HPTE */ | ||
31 | #define _PAGE_BUSY 0x00800 /* software: PTE & hash are busy */ | ||
32 | #define _PAGE_F_GIX 0x07000 /* full page: hidx bits */ | ||
33 | #define _PAGE_F_GIX_SHIFT 12 | ||
34 | #define _PAGE_F_SECOND 0x08000 /* Whether to use secondary hash or not */ | ||
35 | #define _PAGE_SPECIAL 0x10000 /* software: special page */ | ||
36 | |||
37 | #ifdef CONFIG_MEM_SOFT_DIRTY | ||
38 | #define _PAGE_SOFT_DIRTY 0x20000 /* software: software dirty tracking */ | ||
39 | #else | ||
40 | #define _PAGE_SOFT_DIRTY 0x00000 | ||
41 | #endif | ||
42 | |||
43 | /* | ||
44 | * THP pages can't be special. So use the _PAGE_SPECIAL | ||
45 | */ | ||
46 | #define _PAGE_SPLITTING _PAGE_SPECIAL | ||
47 | |||
48 | /* | ||
49 | * We need to differentiate between explicit huge page and THP huge | ||
50 | * page, since THP huge page also need to track real subpage details | ||
51 | */ | ||
52 | #define _PAGE_THP_HUGE _PAGE_4K_PFN | ||
53 | |||
54 | /* | ||
55 | * set of bits not changed in pmd_modify. | ||
56 | */ | ||
57 | #define _HPAGE_CHG_MASK (PTE_RPN_MASK | _PAGE_HPTEFLAGS | \ | ||
58 | _PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_SPLITTING | \ | ||
59 | _PAGE_THP_HUGE | _PAGE_PTE | _PAGE_SOFT_DIRTY) | ||
60 | |||
61 | #ifdef CONFIG_PPC_64K_PAGES | ||
62 | #include <asm/book3s/64/hash-64k.h> | ||
63 | #else | ||
64 | #include <asm/book3s/64/hash-4k.h> | ||
65 | #endif | ||
66 | |||
67 | /* | ||
68 | * Size of EA range mapped by our pagetables. | ||
69 | */ | ||
70 | #define PGTABLE_EADDR_SIZE (PTE_INDEX_SIZE + PMD_INDEX_SIZE + \ | ||
71 | PUD_INDEX_SIZE + PGD_INDEX_SIZE + PAGE_SHIFT) | ||
72 | #define PGTABLE_RANGE (ASM_CONST(1) << PGTABLE_EADDR_SIZE) | ||
73 | |||
74 | #ifdef CONFIG_TRANSPARENT_HUGEPAGE | ||
75 | #define PMD_CACHE_INDEX (PMD_INDEX_SIZE + 1) | ||
76 | #else | ||
77 | #define PMD_CACHE_INDEX PMD_INDEX_SIZE | ||
78 | #endif | ||
79 | /* | ||
80 | * Define the address range of the kernel non-linear virtual area | ||
81 | */ | ||
82 | #define KERN_VIRT_START ASM_CONST(0xD000000000000000) | ||
83 | #define KERN_VIRT_SIZE ASM_CONST(0x0000100000000000) | ||
84 | |||
85 | /* | ||
86 | * The vmalloc space starts at the beginning of that region, and | ||
87 | * occupies half of it on hash CPUs and a quarter of it on Book3E | ||
88 | * (we keep a quarter for the virtual memmap) | ||
89 | */ | ||
90 | #define VMALLOC_START KERN_VIRT_START | ||
91 | #define VMALLOC_SIZE (KERN_VIRT_SIZE >> 1) | ||
92 | #define VMALLOC_END (VMALLOC_START + VMALLOC_SIZE) | ||
93 | |||
94 | /* | ||
95 | * Region IDs | ||
96 | */ | ||
97 | #define REGION_SHIFT 60UL | ||
98 | #define REGION_MASK (0xfUL << REGION_SHIFT) | ||
99 | #define REGION_ID(ea) (((unsigned long)(ea)) >> REGION_SHIFT) | ||
100 | |||
101 | #define VMALLOC_REGION_ID (REGION_ID(VMALLOC_START)) | ||
102 | #define KERNEL_REGION_ID (REGION_ID(PAGE_OFFSET)) | ||
103 | #define VMEMMAP_REGION_ID (0xfUL) /* Server only */ | ||
104 | #define USER_REGION_ID (0UL) | ||
105 | |||
106 | /* | ||
107 | * Defines the address of the vmemap area, in its own region on | ||
108 | * hash table CPUs. | ||
109 | */ | ||
110 | #define VMEMMAP_BASE (VMEMMAP_REGION_ID << REGION_SHIFT) | ||
111 | |||
112 | #ifdef CONFIG_PPC_MM_SLICES | ||
113 | #define HAVE_ARCH_UNMAPPED_AREA | ||
114 | #define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN | ||
115 | #endif /* CONFIG_PPC_MM_SLICES */ | ||
116 | |||
117 | /* No separate kernel read-only */ | ||
118 | #define _PAGE_KERNEL_RW (_PAGE_RW | _PAGE_DIRTY) /* user access blocked by key */ | ||
119 | #define _PAGE_KERNEL_RO _PAGE_KERNEL_RW | ||
120 | #define _PAGE_KERNEL_RWX (_PAGE_DIRTY | _PAGE_RW | _PAGE_EXEC) | ||
121 | |||
122 | /* Strong Access Ordering */ | ||
123 | #define _PAGE_SAO (_PAGE_WRITETHRU | _PAGE_NO_CACHE | _PAGE_COHERENT) | ||
124 | |||
125 | /* No page size encoding in the linux PTE */ | ||
126 | #define _PAGE_PSIZE 0 | ||
127 | |||
128 | /* PTEIDX nibble */ | ||
129 | #define _PTEIDX_SECONDARY 0x8 | ||
130 | #define _PTEIDX_GROUP_IX 0x7 | ||
131 | |||
132 | /* Hash table based platforms need atomic updates of the linux PTE */ | ||
133 | #define PTE_ATOMIC_UPDATES 1 | ||
134 | #define _PTE_NONE_MASK _PAGE_HPTEFLAGS | ||
135 | /* | ||
136 | * The mask convered by the RPN must be a ULL on 32-bit platforms with | ||
137 | * 64-bit PTEs | ||
138 | */ | ||
139 | #define PTE_RPN_MASK (~((1UL << PTE_RPN_SHIFT) - 1)) | ||
140 | /* | ||
141 | * _PAGE_CHG_MASK masks of bits that are to be preserved across | ||
142 | * pgprot changes | ||
143 | */ | ||
144 | #define _PAGE_CHG_MASK (PTE_RPN_MASK | _PAGE_HPTEFLAGS | _PAGE_DIRTY | \ | ||
145 | _PAGE_ACCESSED | _PAGE_SPECIAL | _PAGE_PTE | \ | ||
146 | _PAGE_SOFT_DIRTY) | ||
147 | /* | ||
148 | * Mask of bits returned by pte_pgprot() | ||
149 | */ | ||
150 | #define PAGE_PROT_BITS (_PAGE_GUARDED | _PAGE_COHERENT | _PAGE_NO_CACHE | \ | ||
151 | _PAGE_WRITETHRU | _PAGE_4K_PFN | \ | ||
152 | _PAGE_USER | _PAGE_ACCESSED | \ | ||
153 | _PAGE_RW | _PAGE_DIRTY | _PAGE_EXEC | \ | ||
154 | _PAGE_SOFT_DIRTY) | ||
155 | /* | ||
156 | * We define 2 sets of base prot bits, one for basic pages (ie, | ||
157 | * cacheable kernel and user pages) and one for non cacheable | ||
158 | * pages. We always set _PAGE_COHERENT when SMP is enabled or | ||
159 | * the processor might need it for DMA coherency. | ||
160 | */ | ||
161 | #define _PAGE_BASE_NC (_PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_PSIZE) | ||
162 | #define _PAGE_BASE (_PAGE_BASE_NC | _PAGE_COHERENT) | ||
163 | |||
164 | /* Permission masks used to generate the __P and __S table, | ||
165 | * | ||
166 | * Note:__pgprot is defined in arch/powerpc/include/asm/page.h | ||
167 | * | ||
168 | * Write permissions imply read permissions for now (we could make write-only | ||
169 | * pages on BookE but we don't bother for now). Execute permission control is | ||
170 | * possible on platforms that define _PAGE_EXEC | ||
171 | * | ||
172 | * Note due to the way vm flags are laid out, the bits are XWR | ||
173 | */ | ||
174 | #define PAGE_NONE __pgprot(_PAGE_BASE) | ||
175 | #define PAGE_SHARED __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_RW) | ||
176 | #define PAGE_SHARED_X __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_RW | \ | ||
177 | _PAGE_EXEC) | ||
178 | #define PAGE_COPY __pgprot(_PAGE_BASE | _PAGE_USER ) | ||
179 | #define PAGE_COPY_X __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_EXEC) | ||
180 | #define PAGE_READONLY __pgprot(_PAGE_BASE | _PAGE_USER ) | ||
181 | #define PAGE_READONLY_X __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_EXEC) | ||
182 | |||
183 | #define __P000 PAGE_NONE | ||
184 | #define __P001 PAGE_READONLY | ||
185 | #define __P010 PAGE_COPY | ||
186 | #define __P011 PAGE_COPY | ||
187 | #define __P100 PAGE_READONLY_X | ||
188 | #define __P101 PAGE_READONLY_X | ||
189 | #define __P110 PAGE_COPY_X | ||
190 | #define __P111 PAGE_COPY_X | ||
191 | |||
192 | #define __S000 PAGE_NONE | ||
193 | #define __S001 PAGE_READONLY | ||
194 | #define __S010 PAGE_SHARED | ||
195 | #define __S011 PAGE_SHARED | ||
196 | #define __S100 PAGE_READONLY_X | ||
197 | #define __S101 PAGE_READONLY_X | ||
198 | #define __S110 PAGE_SHARED_X | ||
199 | #define __S111 PAGE_SHARED_X | ||
200 | |||
201 | /* Permission masks used for kernel mappings */ | ||
202 | #define PAGE_KERNEL __pgprot(_PAGE_BASE | _PAGE_KERNEL_RW) | ||
203 | #define PAGE_KERNEL_NC __pgprot(_PAGE_BASE_NC | _PAGE_KERNEL_RW | \ | ||
204 | _PAGE_NO_CACHE) | ||
205 | #define PAGE_KERNEL_NCG __pgprot(_PAGE_BASE_NC | _PAGE_KERNEL_RW | \ | ||
206 | _PAGE_NO_CACHE | _PAGE_GUARDED) | ||
207 | #define PAGE_KERNEL_X __pgprot(_PAGE_BASE | _PAGE_KERNEL_RWX) | ||
208 | #define PAGE_KERNEL_RO __pgprot(_PAGE_BASE | _PAGE_KERNEL_RO) | ||
209 | #define PAGE_KERNEL_ROX __pgprot(_PAGE_BASE | _PAGE_KERNEL_ROX) | ||
210 | |||
211 | /* Protection used for kernel text. We want the debuggers to be able to | ||
212 | * set breakpoints anywhere, so don't write protect the kernel text | ||
213 | * on platforms where such control is possible. | ||
214 | */ | ||
215 | #if defined(CONFIG_KGDB) || defined(CONFIG_XMON) || defined(CONFIG_BDI_SWITCH) ||\ | ||
216 | defined(CONFIG_KPROBES) || defined(CONFIG_DYNAMIC_FTRACE) | ||
217 | #define PAGE_KERNEL_TEXT PAGE_KERNEL_X | ||
218 | #else | ||
219 | #define PAGE_KERNEL_TEXT PAGE_KERNEL_ROX | ||
220 | #endif | ||
221 | |||
222 | /* Make modules code happy. We don't set RO yet */ | ||
223 | #define PAGE_KERNEL_EXEC PAGE_KERNEL_X | ||
224 | #define PAGE_AGP (PAGE_KERNEL_NC) | ||
225 | |||
226 | #define PMD_BAD_BITS (PTE_TABLE_SIZE-1) | ||
227 | #define PUD_BAD_BITS (PMD_TABLE_SIZE-1) | ||
228 | |||
229 | #ifndef __ASSEMBLY__ | ||
230 | #define pmd_bad(pmd) (!is_kernel_addr(pmd_val(pmd)) \ | ||
231 | || (pmd_val(pmd) & PMD_BAD_BITS)) | ||
232 | #define pmd_page_vaddr(pmd) (pmd_val(pmd) & ~PMD_MASKED_BITS) | ||
233 | |||
234 | #define pud_bad(pud) (!is_kernel_addr(pud_val(pud)) \ | ||
235 | || (pud_val(pud) & PUD_BAD_BITS)) | ||
236 | #define pud_page_vaddr(pud) (pud_val(pud) & ~PUD_MASKED_BITS) | ||
237 | |||
238 | #define pgd_index(address) (((address) >> (PGDIR_SHIFT)) & (PTRS_PER_PGD - 1)) | ||
239 | #define pmd_index(address) (((address) >> (PMD_SHIFT)) & (PTRS_PER_PMD - 1)) | ||
240 | #define pte_index(address) (((address) >> (PAGE_SHIFT)) & (PTRS_PER_PTE - 1)) | ||
241 | |||
242 | extern void hpte_need_flush(struct mm_struct *mm, unsigned long addr, | ||
243 | pte_t *ptep, unsigned long pte, int huge); | ||
244 | extern unsigned long htab_convert_pte_flags(unsigned long pteflags); | ||
245 | /* Atomic PTE updates */ | ||
246 | static inline unsigned long pte_update(struct mm_struct *mm, | ||
247 | unsigned long addr, | ||
248 | pte_t *ptep, unsigned long clr, | ||
249 | unsigned long set, | ||
250 | int huge) | ||
251 | { | ||
252 | unsigned long old, tmp; | ||
253 | |||
254 | __asm__ __volatile__( | ||
255 | "1: ldarx %0,0,%3 # pte_update\n\ | ||
256 | andi. %1,%0,%6\n\ | ||
257 | bne- 1b \n\ | ||
258 | andc %1,%0,%4 \n\ | ||
259 | or %1,%1,%7\n\ | ||
260 | stdcx. %1,0,%3 \n\ | ||
261 | bne- 1b" | ||
262 | : "=&r" (old), "=&r" (tmp), "=m" (*ptep) | ||
263 | : "r" (ptep), "r" (clr), "m" (*ptep), "i" (_PAGE_BUSY), "r" (set) | ||
264 | : "cc" ); | ||
265 | /* huge pages use the old page table lock */ | ||
266 | if (!huge) | ||
267 | assert_pte_locked(mm, addr); | ||
268 | |||
269 | if (old & _PAGE_HASHPTE) | ||
270 | hpte_need_flush(mm, addr, ptep, old, huge); | ||
271 | |||
272 | return old; | ||
273 | } | ||
274 | |||
275 | static inline int __ptep_test_and_clear_young(struct mm_struct *mm, | ||
276 | unsigned long addr, pte_t *ptep) | ||
277 | { | ||
278 | unsigned long old; | ||
279 | |||
280 | if ((pte_val(*ptep) & (_PAGE_ACCESSED | _PAGE_HASHPTE)) == 0) | ||
281 | return 0; | ||
282 | old = pte_update(mm, addr, ptep, _PAGE_ACCESSED, 0, 0); | ||
283 | return (old & _PAGE_ACCESSED) != 0; | ||
284 | } | ||
285 | #define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG | ||
286 | #define ptep_test_and_clear_young(__vma, __addr, __ptep) \ | ||
287 | ({ \ | ||
288 | int __r; \ | ||
289 | __r = __ptep_test_and_clear_young((__vma)->vm_mm, __addr, __ptep); \ | ||
290 | __r; \ | ||
291 | }) | ||
292 | |||
293 | #define __HAVE_ARCH_PTEP_SET_WRPROTECT | ||
294 | static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr, | ||
295 | pte_t *ptep) | ||
296 | { | ||
297 | |||
298 | if ((pte_val(*ptep) & _PAGE_RW) == 0) | ||
299 | return; | ||
300 | |||
301 | pte_update(mm, addr, ptep, _PAGE_RW, 0, 0); | ||
302 | } | ||
303 | |||
304 | static inline void huge_ptep_set_wrprotect(struct mm_struct *mm, | ||
305 | unsigned long addr, pte_t *ptep) | ||
306 | { | ||
307 | if ((pte_val(*ptep) & _PAGE_RW) == 0) | ||
308 | return; | ||
309 | |||
310 | pte_update(mm, addr, ptep, _PAGE_RW, 0, 1); | ||
311 | } | ||
312 | |||
313 | /* | ||
314 | * We currently remove entries from the hashtable regardless of whether | ||
315 | * the entry was young or dirty. The generic routines only flush if the | ||
316 | * entry was young or dirty which is not good enough. | ||
317 | * | ||
318 | * We should be more intelligent about this but for the moment we override | ||
319 | * these functions and force a tlb flush unconditionally | ||
320 | */ | ||
321 | #define __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH | ||
322 | #define ptep_clear_flush_young(__vma, __address, __ptep) \ | ||
323 | ({ \ | ||
324 | int __young = __ptep_test_and_clear_young((__vma)->vm_mm, __address, \ | ||
325 | __ptep); \ | ||
326 | __young; \ | ||
327 | }) | ||
328 | |||
329 | #define __HAVE_ARCH_PTEP_GET_AND_CLEAR | ||
330 | static inline pte_t ptep_get_and_clear(struct mm_struct *mm, | ||
331 | unsigned long addr, pte_t *ptep) | ||
332 | { | ||
333 | unsigned long old = pte_update(mm, addr, ptep, ~0UL, 0, 0); | ||
334 | return __pte(old); | ||
335 | } | ||
336 | |||
337 | static inline void pte_clear(struct mm_struct *mm, unsigned long addr, | ||
338 | pte_t * ptep) | ||
339 | { | ||
340 | pte_update(mm, addr, ptep, ~0UL, 0, 0); | ||
341 | } | ||
342 | |||
343 | |||
344 | /* Set the dirty and/or accessed bits atomically in a linux PTE, this | ||
345 | * function doesn't need to flush the hash entry | ||
346 | */ | ||
347 | static inline void __ptep_set_access_flags(pte_t *ptep, pte_t entry) | ||
348 | { | ||
349 | unsigned long bits = pte_val(entry) & | ||
350 | (_PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_RW | _PAGE_EXEC | | ||
351 | _PAGE_SOFT_DIRTY); | ||
352 | |||
353 | unsigned long old, tmp; | ||
354 | |||
355 | __asm__ __volatile__( | ||
356 | "1: ldarx %0,0,%4\n\ | ||
357 | andi. %1,%0,%6\n\ | ||
358 | bne- 1b \n\ | ||
359 | or %0,%3,%0\n\ | ||
360 | stdcx. %0,0,%4\n\ | ||
361 | bne- 1b" | ||
362 | :"=&r" (old), "=&r" (tmp), "=m" (*ptep) | ||
363 | :"r" (bits), "r" (ptep), "m" (*ptep), "i" (_PAGE_BUSY) | ||
364 | :"cc"); | ||
365 | } | ||
366 | |||
367 | #define __HAVE_ARCH_PTE_SAME | ||
368 | #define pte_same(A,B) (((pte_val(A) ^ pte_val(B)) & ~_PAGE_HPTEFLAGS) == 0) | ||
369 | |||
370 | /* Generic accessors to PTE bits */ | ||
371 | static inline int pte_write(pte_t pte) { return !!(pte_val(pte) & _PAGE_RW);} | ||
372 | static inline int pte_dirty(pte_t pte) { return !!(pte_val(pte) & _PAGE_DIRTY); } | ||
373 | static inline int pte_young(pte_t pte) { return !!(pte_val(pte) & _PAGE_ACCESSED); } | ||
374 | static inline int pte_special(pte_t pte) { return !!(pte_val(pte) & _PAGE_SPECIAL); } | ||
375 | static inline int pte_none(pte_t pte) { return (pte_val(pte) & ~_PTE_NONE_MASK) == 0; } | ||
376 | static inline pgprot_t pte_pgprot(pte_t pte) { return __pgprot(pte_val(pte) & PAGE_PROT_BITS); } | ||
377 | |||
378 | #ifdef CONFIG_HAVE_ARCH_SOFT_DIRTY | ||
379 | static inline bool pte_soft_dirty(pte_t pte) | ||
380 | { | ||
381 | return !!(pte_val(pte) & _PAGE_SOFT_DIRTY); | ||
382 | } | ||
383 | static inline pte_t pte_mksoft_dirty(pte_t pte) | ||
384 | { | ||
385 | return __pte(pte_val(pte) | _PAGE_SOFT_DIRTY); | ||
386 | } | ||
387 | |||
388 | static inline pte_t pte_clear_soft_dirty(pte_t pte) | ||
389 | { | ||
390 | return __pte(pte_val(pte) & ~_PAGE_SOFT_DIRTY); | ||
391 | } | ||
392 | #endif /* CONFIG_HAVE_ARCH_SOFT_DIRTY */ | ||
393 | |||
394 | #ifdef CONFIG_NUMA_BALANCING | ||
395 | /* | ||
396 | * These work without NUMA balancing but the kernel does not care. See the | ||
397 | * comment in include/asm-generic/pgtable.h . On powerpc, this will only | ||
398 | * work for user pages and always return true for kernel pages. | ||
399 | */ | ||
400 | static inline int pte_protnone(pte_t pte) | ||
401 | { | ||
402 | return (pte_val(pte) & | ||
403 | (_PAGE_PRESENT | _PAGE_USER)) == _PAGE_PRESENT; | ||
404 | } | ||
405 | #endif /* CONFIG_NUMA_BALANCING */ | ||
406 | |||
407 | static inline int pte_present(pte_t pte) | ||
408 | { | ||
409 | return pte_val(pte) & _PAGE_PRESENT; | ||
410 | } | ||
411 | |||
412 | /* Conversion functions: convert a page and protection to a page entry, | ||
413 | * and a page entry and page directory to the page they refer to. | ||
414 | * | ||
415 | * Even if PTEs can be unsigned long long, a PFN is always an unsigned | ||
416 | * long for now. | ||
417 | */ | ||
418 | static inline pte_t pfn_pte(unsigned long pfn, pgprot_t pgprot) | ||
419 | { | ||
420 | return __pte(((pte_basic_t)(pfn) << PTE_RPN_SHIFT) | | ||
421 | pgprot_val(pgprot)); | ||
422 | } | ||
423 | |||
424 | static inline unsigned long pte_pfn(pte_t pte) | ||
425 | { | ||
426 | return pte_val(pte) >> PTE_RPN_SHIFT; | ||
427 | } | ||
428 | |||
429 | /* Generic modifiers for PTE bits */ | ||
430 | static inline pte_t pte_wrprotect(pte_t pte) | ||
431 | { | ||
432 | return __pte(pte_val(pte) & ~_PAGE_RW); | ||
433 | } | ||
434 | |||
435 | static inline pte_t pte_mkclean(pte_t pte) | ||
436 | { | ||
437 | return __pte(pte_val(pte) & ~_PAGE_DIRTY); | ||
438 | } | ||
439 | |||
440 | static inline pte_t pte_mkold(pte_t pte) | ||
441 | { | ||
442 | return __pte(pte_val(pte) & ~_PAGE_ACCESSED); | ||
443 | } | ||
444 | |||
445 | static inline pte_t pte_mkwrite(pte_t pte) | ||
446 | { | ||
447 | return __pte(pte_val(pte) | _PAGE_RW); | ||
448 | } | ||
449 | |||
450 | static inline pte_t pte_mkdirty(pte_t pte) | ||
451 | { | ||
452 | return __pte(pte_val(pte) | _PAGE_DIRTY | _PAGE_SOFT_DIRTY); | ||
453 | } | ||
454 | |||
455 | static inline pte_t pte_mkyoung(pte_t pte) | ||
456 | { | ||
457 | return __pte(pte_val(pte) | _PAGE_ACCESSED); | ||
458 | } | ||
459 | |||
460 | static inline pte_t pte_mkspecial(pte_t pte) | ||
461 | { | ||
462 | return __pte(pte_val(pte) | _PAGE_SPECIAL); | ||
463 | } | ||
464 | |||
465 | static inline pte_t pte_mkhuge(pte_t pte) | ||
466 | { | ||
467 | return pte; | ||
468 | } | ||
469 | |||
470 | static inline pte_t pte_modify(pte_t pte, pgprot_t newprot) | ||
471 | { | ||
472 | return __pte((pte_val(pte) & _PAGE_CHG_MASK) | pgprot_val(newprot)); | ||
473 | } | ||
474 | |||
475 | /* This low level function performs the actual PTE insertion | ||
476 | * Setting the PTE depends on the MMU type and other factors. It's | ||
477 | * an horrible mess that I'm not going to try to clean up now but | ||
478 | * I'm keeping it in one place rather than spread around | ||
479 | */ | ||
480 | static inline void __set_pte_at(struct mm_struct *mm, unsigned long addr, | ||
481 | pte_t *ptep, pte_t pte, int percpu) | ||
482 | { | ||
483 | /* | ||
484 | * Anything else just stores the PTE normally. That covers all 64-bit | ||
485 | * cases, and 32-bit non-hash with 32-bit PTEs. | ||
486 | */ | ||
487 | *ptep = pte; | ||
488 | } | ||
489 | |||
490 | /* | ||
491 | * Macro to mark a page protection value as "uncacheable". | ||
492 | */ | ||
493 | |||
494 | #define _PAGE_CACHE_CTL (_PAGE_COHERENT | _PAGE_GUARDED | _PAGE_NO_CACHE | \ | ||
495 | _PAGE_WRITETHRU) | ||
496 | |||
497 | #define pgprot_noncached pgprot_noncached | ||
498 | static inline pgprot_t pgprot_noncached(pgprot_t prot) | ||
499 | { | ||
500 | return __pgprot((pgprot_val(prot) & ~_PAGE_CACHE_CTL) | | ||
501 | _PAGE_NO_CACHE | _PAGE_GUARDED); | ||
502 | } | ||
503 | |||
504 | #define pgprot_noncached_wc pgprot_noncached_wc | ||
505 | static inline pgprot_t pgprot_noncached_wc(pgprot_t prot) | ||
506 | { | ||
507 | return __pgprot((pgprot_val(prot) & ~_PAGE_CACHE_CTL) | | ||
508 | _PAGE_NO_CACHE); | ||
509 | } | ||
510 | |||
511 | #define pgprot_cached pgprot_cached | ||
512 | static inline pgprot_t pgprot_cached(pgprot_t prot) | ||
513 | { | ||
514 | return __pgprot((pgprot_val(prot) & ~_PAGE_CACHE_CTL) | | ||
515 | _PAGE_COHERENT); | ||
516 | } | ||
517 | |||
518 | #define pgprot_cached_wthru pgprot_cached_wthru | ||
519 | static inline pgprot_t pgprot_cached_wthru(pgprot_t prot) | ||
520 | { | ||
521 | return __pgprot((pgprot_val(prot) & ~_PAGE_CACHE_CTL) | | ||
522 | _PAGE_COHERENT | _PAGE_WRITETHRU); | ||
523 | } | ||
524 | |||
525 | #define pgprot_cached_noncoherent pgprot_cached_noncoherent | ||
526 | static inline pgprot_t pgprot_cached_noncoherent(pgprot_t prot) | ||
527 | { | ||
528 | return __pgprot(pgprot_val(prot) & ~_PAGE_CACHE_CTL); | ||
529 | } | ||
530 | |||
531 | #define pgprot_writecombine pgprot_writecombine | ||
532 | static inline pgprot_t pgprot_writecombine(pgprot_t prot) | ||
533 | { | ||
534 | return pgprot_noncached_wc(prot); | ||
535 | } | ||
536 | |||
537 | #ifdef CONFIG_TRANSPARENT_HUGEPAGE | ||
538 | extern void hpte_do_hugepage_flush(struct mm_struct *mm, unsigned long addr, | ||
539 | pmd_t *pmdp, unsigned long old_pmd); | ||
540 | #else | ||
541 | static inline void hpte_do_hugepage_flush(struct mm_struct *mm, | ||
542 | unsigned long addr, pmd_t *pmdp, | ||
543 | unsigned long old_pmd) | ||
544 | { | ||
545 | WARN(1, "%s called with THP disabled\n", __func__); | ||
546 | } | ||
547 | #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ | ||
548 | |||
549 | #endif /* !__ASSEMBLY__ */ | ||
550 | #endif /* __KERNEL__ */ | ||
551 | #endif /* _ASM_POWERPC_BOOK3S_64_HASH_H */ | ||
diff --git a/arch/powerpc/include/asm/book3s/64/pgtable.h b/arch/powerpc/include/asm/book3s/64/pgtable.h new file mode 100644 index 000000000000..b3a5badab69f --- /dev/null +++ b/arch/powerpc/include/asm/book3s/64/pgtable.h | |||
@@ -0,0 +1,300 @@ | |||
1 | #ifndef _ASM_POWERPC_BOOK3S_64_PGTABLE_H_ | ||
2 | #define _ASM_POWERPC_BOOK3S_64_PGTABLE_H_ | ||
3 | /* | ||
4 | * This file contains the functions and defines necessary to modify and use | ||
5 | * the ppc64 hashed page table. | ||
6 | */ | ||
7 | |||
8 | #include <asm/book3s/64/hash.h> | ||
9 | #include <asm/barrier.h> | ||
10 | |||
11 | /* | ||
12 | * The second half of the kernel virtual space is used for IO mappings, | ||
13 | * it's itself carved into the PIO region (ISA and PHB IO space) and | ||
14 | * the ioremap space | ||
15 | * | ||
16 | * ISA_IO_BASE = KERN_IO_START, 64K reserved area | ||
17 | * PHB_IO_BASE = ISA_IO_BASE + 64K to ISA_IO_BASE + 2G, PHB IO spaces | ||
18 | * IOREMAP_BASE = ISA_IO_BASE + 2G to VMALLOC_START + PGTABLE_RANGE | ||
19 | */ | ||
20 | #define KERN_IO_START (KERN_VIRT_START + (KERN_VIRT_SIZE >> 1)) | ||
21 | #define FULL_IO_SIZE 0x80000000ul | ||
22 | #define ISA_IO_BASE (KERN_IO_START) | ||
23 | #define ISA_IO_END (KERN_IO_START + 0x10000ul) | ||
24 | #define PHB_IO_BASE (ISA_IO_END) | ||
25 | #define PHB_IO_END (KERN_IO_START + FULL_IO_SIZE) | ||
26 | #define IOREMAP_BASE (PHB_IO_END) | ||
27 | #define IOREMAP_END (KERN_VIRT_START + KERN_VIRT_SIZE) | ||
28 | |||
29 | #define vmemmap ((struct page *)VMEMMAP_BASE) | ||
30 | |||
31 | /* Advertise special mapping type for AGP */ | ||
32 | #define HAVE_PAGE_AGP | ||
33 | |||
34 | /* Advertise support for _PAGE_SPECIAL */ | ||
35 | #define __HAVE_ARCH_PTE_SPECIAL | ||
36 | |||
37 | #ifndef __ASSEMBLY__ | ||
38 | |||
39 | /* | ||
40 | * This is the default implementation of various PTE accessors, it's | ||
41 | * used in all cases except Book3S with 64K pages where we have a | ||
42 | * concept of sub-pages | ||
43 | */ | ||
44 | #ifndef __real_pte | ||
45 | |||
46 | #ifdef CONFIG_STRICT_MM_TYPECHECKS | ||
47 | #define __real_pte(e,p) ((real_pte_t){(e)}) | ||
48 | #define __rpte_to_pte(r) ((r).pte) | ||
49 | #else | ||
50 | #define __real_pte(e,p) (e) | ||
51 | #define __rpte_to_pte(r) (__pte(r)) | ||
52 | #endif | ||
53 | #define __rpte_to_hidx(r,index) (pte_val(__rpte_to_pte(r)) >>_PAGE_F_GIX_SHIFT) | ||
54 | |||
55 | #define pte_iterate_hashed_subpages(rpte, psize, va, index, shift) \ | ||
56 | do { \ | ||
57 | index = 0; \ | ||
58 | shift = mmu_psize_defs[psize].shift; \ | ||
59 | |||
60 | #define pte_iterate_hashed_end() } while(0) | ||
61 | |||
62 | /* | ||
63 | * We expect this to be called only for user addresses or kernel virtual | ||
64 | * addresses other than the linear mapping. | ||
65 | */ | ||
66 | #define pte_pagesize_index(mm, addr, pte) MMU_PAGE_4K | ||
67 | |||
68 | #endif /* __real_pte */ | ||
69 | |||
70 | static inline void pmd_set(pmd_t *pmdp, unsigned long val) | ||
71 | { | ||
72 | *pmdp = __pmd(val); | ||
73 | } | ||
74 | |||
75 | static inline void pmd_clear(pmd_t *pmdp) | ||
76 | { | ||
77 | *pmdp = __pmd(0); | ||
78 | } | ||
79 | |||
80 | #define pmd_none(pmd) (!pmd_val(pmd)) | ||
81 | #define pmd_present(pmd) (!pmd_none(pmd)) | ||
82 | |||
83 | static inline void pud_set(pud_t *pudp, unsigned long val) | ||
84 | { | ||
85 | *pudp = __pud(val); | ||
86 | } | ||
87 | |||
88 | static inline void pud_clear(pud_t *pudp) | ||
89 | { | ||
90 | *pudp = __pud(0); | ||
91 | } | ||
92 | |||
93 | #define pud_none(pud) (!pud_val(pud)) | ||
94 | #define pud_present(pud) (pud_val(pud) != 0) | ||
95 | |||
96 | extern struct page *pud_page(pud_t pud); | ||
97 | extern struct page *pmd_page(pmd_t pmd); | ||
98 | static inline pte_t pud_pte(pud_t pud) | ||
99 | { | ||
100 | return __pte(pud_val(pud)); | ||
101 | } | ||
102 | |||
103 | static inline pud_t pte_pud(pte_t pte) | ||
104 | { | ||
105 | return __pud(pte_val(pte)); | ||
106 | } | ||
107 | #define pud_write(pud) pte_write(pud_pte(pud)) | ||
108 | #define pgd_write(pgd) pte_write(pgd_pte(pgd)) | ||
109 | static inline void pgd_set(pgd_t *pgdp, unsigned long val) | ||
110 | { | ||
111 | *pgdp = __pgd(val); | ||
112 | } | ||
113 | |||
114 | /* | ||
115 | * Find an entry in a page-table-directory. We combine the address region | ||
116 | * (the high order N bits) and the pgd portion of the address. | ||
117 | */ | ||
118 | |||
119 | #define pgd_offset(mm, address) ((mm)->pgd + pgd_index(address)) | ||
120 | |||
121 | #define pmd_offset(pudp,addr) \ | ||
122 | (((pmd_t *) pud_page_vaddr(*(pudp))) + pmd_index(addr)) | ||
123 | |||
124 | #define pte_offset_kernel(dir,addr) \ | ||
125 | (((pte_t *) pmd_page_vaddr(*(dir))) + pte_index(addr)) | ||
126 | |||
127 | #define pte_offset_map(dir,addr) pte_offset_kernel((dir), (addr)) | ||
128 | #define pte_unmap(pte) do { } while(0) | ||
129 | |||
130 | /* to find an entry in a kernel page-table-directory */ | ||
131 | /* This now only contains the vmalloc pages */ | ||
132 | #define pgd_offset_k(address) pgd_offset(&init_mm, address) | ||
133 | |||
134 | #define pte_ERROR(e) \ | ||
135 | pr_err("%s:%d: bad pte %08lx.\n", __FILE__, __LINE__, pte_val(e)) | ||
136 | #define pmd_ERROR(e) \ | ||
137 | pr_err("%s:%d: bad pmd %08lx.\n", __FILE__, __LINE__, pmd_val(e)) | ||
138 | #define pgd_ERROR(e) \ | ||
139 | pr_err("%s:%d: bad pgd %08lx.\n", __FILE__, __LINE__, pgd_val(e)) | ||
140 | |||
141 | /* Encode and de-code a swap entry */ | ||
142 | #define MAX_SWAPFILES_CHECK() do { \ | ||
143 | BUILD_BUG_ON(MAX_SWAPFILES_SHIFT > SWP_TYPE_BITS); \ | ||
144 | /* \ | ||
145 | * Don't have overlapping bits with _PAGE_HPTEFLAGS \ | ||
146 | * We filter HPTEFLAGS on set_pte. \ | ||
147 | */ \ | ||
148 | BUILD_BUG_ON(_PAGE_HPTEFLAGS & (0x1f << _PAGE_BIT_SWAP_TYPE)); \ | ||
149 | BUILD_BUG_ON(_PAGE_HPTEFLAGS & _PAGE_SWP_SOFT_DIRTY); \ | ||
150 | } while (0) | ||
151 | /* | ||
152 | * on pte we don't need handle RADIX_TREE_EXCEPTIONAL_SHIFT; | ||
153 | */ | ||
154 | #define SWP_TYPE_BITS 5 | ||
155 | #define __swp_type(x) (((x).val >> _PAGE_BIT_SWAP_TYPE) \ | ||
156 | & ((1UL << SWP_TYPE_BITS) - 1)) | ||
157 | #define __swp_offset(x) ((x).val >> PTE_RPN_SHIFT) | ||
158 | #define __swp_entry(type, offset) ((swp_entry_t) { \ | ||
159 | ((type) << _PAGE_BIT_SWAP_TYPE) \ | ||
160 | | ((offset) << PTE_RPN_SHIFT) }) | ||
161 | /* | ||
162 | * swp_entry_t must be independent of pte bits. We build a swp_entry_t from | ||
163 | * swap type and offset we get from swap and convert that to pte to find a | ||
164 | * matching pte in linux page table. | ||
165 | * Clear bits not found in swap entries here. | ||
166 | */ | ||
167 | #define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val((pte)) & ~_PAGE_PTE }) | ||
168 | #define __swp_entry_to_pte(x) __pte((x).val | _PAGE_PTE) | ||
169 | |||
170 | #ifdef CONFIG_MEM_SOFT_DIRTY | ||
171 | #define _PAGE_SWP_SOFT_DIRTY (1UL << (SWP_TYPE_BITS + _PAGE_BIT_SWAP_TYPE)) | ||
172 | #else | ||
173 | #define _PAGE_SWP_SOFT_DIRTY 0UL | ||
174 | #endif /* CONFIG_MEM_SOFT_DIRTY */ | ||
175 | |||
176 | #ifdef CONFIG_HAVE_ARCH_SOFT_DIRTY | ||
177 | static inline pte_t pte_swp_mksoft_dirty(pte_t pte) | ||
178 | { | ||
179 | return __pte(pte_val(pte) | _PAGE_SWP_SOFT_DIRTY); | ||
180 | } | ||
181 | static inline bool pte_swp_soft_dirty(pte_t pte) | ||
182 | { | ||
183 | return !!(pte_val(pte) & _PAGE_SWP_SOFT_DIRTY); | ||
184 | } | ||
185 | static inline pte_t pte_swp_clear_soft_dirty(pte_t pte) | ||
186 | { | ||
187 | return __pte(pte_val(pte) & ~_PAGE_SWP_SOFT_DIRTY); | ||
188 | } | ||
189 | #endif /* CONFIG_HAVE_ARCH_SOFT_DIRTY */ | ||
190 | |||
191 | void pgtable_cache_add(unsigned shift, void (*ctor)(void *)); | ||
192 | void pgtable_cache_init(void); | ||
193 | |||
194 | struct page *realmode_pfn_to_page(unsigned long pfn); | ||
195 | |||
196 | #ifdef CONFIG_TRANSPARENT_HUGEPAGE | ||
197 | extern pmd_t pfn_pmd(unsigned long pfn, pgprot_t pgprot); | ||
198 | extern pmd_t mk_pmd(struct page *page, pgprot_t pgprot); | ||
199 | extern pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot); | ||
200 | extern void set_pmd_at(struct mm_struct *mm, unsigned long addr, | ||
201 | pmd_t *pmdp, pmd_t pmd); | ||
202 | extern void update_mmu_cache_pmd(struct vm_area_struct *vma, unsigned long addr, | ||
203 | pmd_t *pmd); | ||
204 | extern int has_transparent_hugepage(void); | ||
205 | #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ | ||
206 | |||
207 | |||
208 | static inline pte_t pmd_pte(pmd_t pmd) | ||
209 | { | ||
210 | return __pte(pmd_val(pmd)); | ||
211 | } | ||
212 | |||
213 | static inline pmd_t pte_pmd(pte_t pte) | ||
214 | { | ||
215 | return __pmd(pte_val(pte)); | ||
216 | } | ||
217 | |||
218 | static inline pte_t *pmdp_ptep(pmd_t *pmd) | ||
219 | { | ||
220 | return (pte_t *)pmd; | ||
221 | } | ||
222 | |||
223 | #define pmd_pfn(pmd) pte_pfn(pmd_pte(pmd)) | ||
224 | #define pmd_dirty(pmd) pte_dirty(pmd_pte(pmd)) | ||
225 | #define pmd_young(pmd) pte_young(pmd_pte(pmd)) | ||
226 | #define pmd_mkold(pmd) pte_pmd(pte_mkold(pmd_pte(pmd))) | ||
227 | #define pmd_wrprotect(pmd) pte_pmd(pte_wrprotect(pmd_pte(pmd))) | ||
228 | #define pmd_mkdirty(pmd) pte_pmd(pte_mkdirty(pmd_pte(pmd))) | ||
229 | #define pmd_mkyoung(pmd) pte_pmd(pte_mkyoung(pmd_pte(pmd))) | ||
230 | #define pmd_mkwrite(pmd) pte_pmd(pte_mkwrite(pmd_pte(pmd))) | ||
231 | |||
232 | #ifdef CONFIG_HAVE_ARCH_SOFT_DIRTY | ||
233 | #define pmd_soft_dirty(pmd) pte_soft_dirty(pmd_pte(pmd)) | ||
234 | #define pmd_mksoft_dirty(pmd) pte_pmd(pte_mksoft_dirty(pmd_pte(pmd))) | ||
235 | #define pmd_clear_soft_dirty(pmd) pte_pmd(pte_clear_soft_dirty(pmd_pte(pmd))) | ||
236 | #endif /* CONFIG_HAVE_ARCH_SOFT_DIRTY */ | ||
237 | |||
238 | #ifdef CONFIG_NUMA_BALANCING | ||
239 | static inline int pmd_protnone(pmd_t pmd) | ||
240 | { | ||
241 | return pte_protnone(pmd_pte(pmd)); | ||
242 | } | ||
243 | #endif /* CONFIG_NUMA_BALANCING */ | ||
244 | |||
245 | #define __HAVE_ARCH_PMD_WRITE | ||
246 | #define pmd_write(pmd) pte_write(pmd_pte(pmd)) | ||
247 | |||
248 | static inline pmd_t pmd_mkhuge(pmd_t pmd) | ||
249 | { | ||
250 | return __pmd(pmd_val(pmd) | (_PAGE_PTE | _PAGE_THP_HUGE)); | ||
251 | } | ||
252 | |||
253 | #define __HAVE_ARCH_PMDP_SET_ACCESS_FLAGS | ||
254 | extern int pmdp_set_access_flags(struct vm_area_struct *vma, | ||
255 | unsigned long address, pmd_t *pmdp, | ||
256 | pmd_t entry, int dirty); | ||
257 | |||
258 | #define __HAVE_ARCH_PMDP_TEST_AND_CLEAR_YOUNG | ||
259 | extern int pmdp_test_and_clear_young(struct vm_area_struct *vma, | ||
260 | unsigned long address, pmd_t *pmdp); | ||
261 | #define __HAVE_ARCH_PMDP_CLEAR_YOUNG_FLUSH | ||
262 | extern int pmdp_clear_flush_young(struct vm_area_struct *vma, | ||
263 | unsigned long address, pmd_t *pmdp); | ||
264 | |||
265 | #define __HAVE_ARCH_PMDP_HUGE_GET_AND_CLEAR | ||
266 | extern pmd_t pmdp_huge_get_and_clear(struct mm_struct *mm, | ||
267 | unsigned long addr, pmd_t *pmdp); | ||
268 | |||
269 | #define __HAVE_ARCH_PMDP_SPLITTING_FLUSH | ||
270 | extern void pmdp_splitting_flush(struct vm_area_struct *vma, | ||
271 | unsigned long address, pmd_t *pmdp); | ||
272 | |||
273 | extern pmd_t pmdp_collapse_flush(struct vm_area_struct *vma, | ||
274 | unsigned long address, pmd_t *pmdp); | ||
275 | #define pmdp_collapse_flush pmdp_collapse_flush | ||
276 | |||
277 | #define __HAVE_ARCH_PGTABLE_DEPOSIT | ||
278 | extern void pgtable_trans_huge_deposit(struct mm_struct *mm, pmd_t *pmdp, | ||
279 | pgtable_t pgtable); | ||
280 | #define __HAVE_ARCH_PGTABLE_WITHDRAW | ||
281 | extern pgtable_t pgtable_trans_huge_withdraw(struct mm_struct *mm, pmd_t *pmdp); | ||
282 | |||
283 | #define __HAVE_ARCH_PMDP_INVALIDATE | ||
284 | extern void pmdp_invalidate(struct vm_area_struct *vma, unsigned long address, | ||
285 | pmd_t *pmdp); | ||
286 | |||
287 | #define pmd_move_must_withdraw pmd_move_must_withdraw | ||
288 | struct spinlock; | ||
289 | static inline int pmd_move_must_withdraw(struct spinlock *new_pmd_ptl, | ||
290 | struct spinlock *old_pmd_ptl) | ||
291 | { | ||
292 | /* | ||
293 | * Archs like ppc64 use pgtable to store per pmd | ||
294 | * specific information. So when we switch the pmd, | ||
295 | * we should also withdraw and deposit the pgtable | ||
296 | */ | ||
297 | return true; | ||
298 | } | ||
299 | #endif /* __ASSEMBLY__ */ | ||
300 | #endif /* _ASM_POWERPC_BOOK3S_64_PGTABLE_H_ */ | ||
diff --git a/arch/powerpc/include/asm/book3s/pgtable.h b/arch/powerpc/include/asm/book3s/pgtable.h new file mode 100644 index 000000000000..8b0f4a29259a --- /dev/null +++ b/arch/powerpc/include/asm/book3s/pgtable.h | |||
@@ -0,0 +1,29 @@ | |||
1 | #ifndef _ASM_POWERPC_BOOK3S_PGTABLE_H | ||
2 | #define _ASM_POWERPC_BOOK3S_PGTABLE_H | ||
3 | |||
4 | #ifdef CONFIG_PPC64 | ||
5 | #include <asm/book3s/64/pgtable.h> | ||
6 | #else | ||
7 | #include <asm/book3s/32/pgtable.h> | ||
8 | #endif | ||
9 | |||
10 | #define FIRST_USER_ADDRESS 0UL | ||
11 | #ifndef __ASSEMBLY__ | ||
12 | /* Insert a PTE, top-level function is out of line. It uses an inline | ||
13 | * low level function in the respective pgtable-* files | ||
14 | */ | ||
15 | extern void set_pte_at(struct mm_struct *mm, unsigned long addr, pte_t *ptep, | ||
16 | pte_t pte); | ||
17 | |||
18 | |||
19 | #define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS | ||
20 | extern int ptep_set_access_flags(struct vm_area_struct *vma, unsigned long address, | ||
21 | pte_t *ptep, pte_t entry, int dirty); | ||
22 | |||
23 | struct file; | ||
24 | extern pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn, | ||
25 | unsigned long size, pgprot_t vma_prot); | ||
26 | #define __HAVE_PHYS_MEM_ACCESS_PROT | ||
27 | |||
28 | #endif /* __ASSEMBLY__ */ | ||
29 | #endif | ||
diff --git a/arch/powerpc/include/asm/cmpxchg.h b/arch/powerpc/include/asm/cmpxchg.h index ad6263cffb0f..d1a8d93cccfd 100644 --- a/arch/powerpc/include/asm/cmpxchg.h +++ b/arch/powerpc/include/asm/cmpxchg.h | |||
@@ -18,12 +18,12 @@ __xchg_u32(volatile void *p, unsigned long val) | |||
18 | unsigned long prev; | 18 | unsigned long prev; |
19 | 19 | ||
20 | __asm__ __volatile__( | 20 | __asm__ __volatile__( |
21 | PPC_RELEASE_BARRIER | 21 | PPC_ATOMIC_ENTRY_BARRIER |
22 | "1: lwarx %0,0,%2 \n" | 22 | "1: lwarx %0,0,%2 \n" |
23 | PPC405_ERR77(0,%2) | 23 | PPC405_ERR77(0,%2) |
24 | " stwcx. %3,0,%2 \n\ | 24 | " stwcx. %3,0,%2 \n\ |
25 | bne- 1b" | 25 | bne- 1b" |
26 | PPC_ACQUIRE_BARRIER | 26 | PPC_ATOMIC_EXIT_BARRIER |
27 | : "=&r" (prev), "+m" (*(volatile unsigned int *)p) | 27 | : "=&r" (prev), "+m" (*(volatile unsigned int *)p) |
28 | : "r" (p), "r" (val) | 28 | : "r" (p), "r" (val) |
29 | : "cc", "memory"); | 29 | : "cc", "memory"); |
@@ -61,12 +61,12 @@ __xchg_u64(volatile void *p, unsigned long val) | |||
61 | unsigned long prev; | 61 | unsigned long prev; |
62 | 62 | ||
63 | __asm__ __volatile__( | 63 | __asm__ __volatile__( |
64 | PPC_RELEASE_BARRIER | 64 | PPC_ATOMIC_ENTRY_BARRIER |
65 | "1: ldarx %0,0,%2 \n" | 65 | "1: ldarx %0,0,%2 \n" |
66 | PPC405_ERR77(0,%2) | 66 | PPC405_ERR77(0,%2) |
67 | " stdcx. %3,0,%2 \n\ | 67 | " stdcx. %3,0,%2 \n\ |
68 | bne- 1b" | 68 | bne- 1b" |
69 | PPC_ACQUIRE_BARRIER | 69 | PPC_ATOMIC_EXIT_BARRIER |
70 | : "=&r" (prev), "+m" (*(volatile unsigned long *)p) | 70 | : "=&r" (prev), "+m" (*(volatile unsigned long *)p) |
71 | : "r" (p), "r" (val) | 71 | : "r" (p), "r" (val) |
72 | : "cc", "memory"); | 72 | : "cc", "memory"); |
@@ -151,14 +151,14 @@ __cmpxchg_u32(volatile unsigned int *p, unsigned long old, unsigned long new) | |||
151 | unsigned int prev; | 151 | unsigned int prev; |
152 | 152 | ||
153 | __asm__ __volatile__ ( | 153 | __asm__ __volatile__ ( |
154 | PPC_RELEASE_BARRIER | 154 | PPC_ATOMIC_ENTRY_BARRIER |
155 | "1: lwarx %0,0,%2 # __cmpxchg_u32\n\ | 155 | "1: lwarx %0,0,%2 # __cmpxchg_u32\n\ |
156 | cmpw 0,%0,%3\n\ | 156 | cmpw 0,%0,%3\n\ |
157 | bne- 2f\n" | 157 | bne- 2f\n" |
158 | PPC405_ERR77(0,%2) | 158 | PPC405_ERR77(0,%2) |
159 | " stwcx. %4,0,%2\n\ | 159 | " stwcx. %4,0,%2\n\ |
160 | bne- 1b" | 160 | bne- 1b" |
161 | PPC_ACQUIRE_BARRIER | 161 | PPC_ATOMIC_EXIT_BARRIER |
162 | "\n\ | 162 | "\n\ |
163 | 2:" | 163 | 2:" |
164 | : "=&r" (prev), "+m" (*p) | 164 | : "=&r" (prev), "+m" (*p) |
@@ -197,13 +197,13 @@ __cmpxchg_u64(volatile unsigned long *p, unsigned long old, unsigned long new) | |||
197 | unsigned long prev; | 197 | unsigned long prev; |
198 | 198 | ||
199 | __asm__ __volatile__ ( | 199 | __asm__ __volatile__ ( |
200 | PPC_RELEASE_BARRIER | 200 | PPC_ATOMIC_ENTRY_BARRIER |
201 | "1: ldarx %0,0,%2 # __cmpxchg_u64\n\ | 201 | "1: ldarx %0,0,%2 # __cmpxchg_u64\n\ |
202 | cmpd 0,%0,%3\n\ | 202 | cmpd 0,%0,%3\n\ |
203 | bne- 2f\n\ | 203 | bne- 2f\n\ |
204 | stdcx. %4,0,%2\n\ | 204 | stdcx. %4,0,%2\n\ |
205 | bne- 1b" | 205 | bne- 1b" |
206 | PPC_ACQUIRE_BARRIER | 206 | PPC_ATOMIC_EXIT_BARRIER |
207 | "\n\ | 207 | "\n\ |
208 | 2:" | 208 | 2:" |
209 | : "=&r" (prev), "+m" (*p) | 209 | : "=&r" (prev), "+m" (*p) |
diff --git a/arch/powerpc/include/asm/cpm.h b/arch/powerpc/include/asm/cpm.h index 4398a6cdcf53..2c5c5b476804 100644 --- a/arch/powerpc/include/asm/cpm.h +++ b/arch/powerpc/include/asm/cpm.h | |||
@@ -5,6 +5,7 @@ | |||
5 | #include <linux/types.h> | 5 | #include <linux/types.h> |
6 | #include <linux/errno.h> | 6 | #include <linux/errno.h> |
7 | #include <linux/of.h> | 7 | #include <linux/of.h> |
8 | #include <soc/fsl/qe/qe.h> | ||
8 | 9 | ||
9 | /* | 10 | /* |
10 | * SPI Parameter RAM common to QE and CPM. | 11 | * SPI Parameter RAM common to QE and CPM. |
@@ -155,49 +156,6 @@ typedef struct cpm_buf_desc { | |||
155 | */ | 156 | */ |
156 | #define BD_I2C_START (0x0400) | 157 | #define BD_I2C_START (0x0400) |
157 | 158 | ||
158 | int cpm_muram_init(void); | ||
159 | |||
160 | #if defined(CONFIG_CPM) || defined(CONFIG_QUICC_ENGINE) | ||
161 | unsigned long cpm_muram_alloc(unsigned long size, unsigned long align); | ||
162 | int cpm_muram_free(unsigned long offset); | ||
163 | unsigned long cpm_muram_alloc_fixed(unsigned long offset, unsigned long size); | ||
164 | void __iomem *cpm_muram_addr(unsigned long offset); | ||
165 | unsigned long cpm_muram_offset(void __iomem *addr); | ||
166 | dma_addr_t cpm_muram_dma(void __iomem *addr); | ||
167 | #else | ||
168 | static inline unsigned long cpm_muram_alloc(unsigned long size, | ||
169 | unsigned long align) | ||
170 | { | ||
171 | return -ENOSYS; | ||
172 | } | ||
173 | |||
174 | static inline int cpm_muram_free(unsigned long offset) | ||
175 | { | ||
176 | return -ENOSYS; | ||
177 | } | ||
178 | |||
179 | static inline unsigned long cpm_muram_alloc_fixed(unsigned long offset, | ||
180 | unsigned long size) | ||
181 | { | ||
182 | return -ENOSYS; | ||
183 | } | ||
184 | |||
185 | static inline void __iomem *cpm_muram_addr(unsigned long offset) | ||
186 | { | ||
187 | return NULL; | ||
188 | } | ||
189 | |||
190 | static inline unsigned long cpm_muram_offset(void __iomem *addr) | ||
191 | { | ||
192 | return -ENOSYS; | ||
193 | } | ||
194 | |||
195 | static inline dma_addr_t cpm_muram_dma(void __iomem *addr) | ||
196 | { | ||
197 | return 0; | ||
198 | } | ||
199 | #endif /* defined(CONFIG_CPM) || defined(CONFIG_QUICC_ENGINE) */ | ||
200 | |||
201 | #ifdef CONFIG_CPM | 159 | #ifdef CONFIG_CPM |
202 | int cpm_command(u32 command, u8 opcode); | 160 | int cpm_command(u32 command, u8 opcode); |
203 | #else | 161 | #else |
diff --git a/arch/powerpc/include/asm/exception-64s.h b/arch/powerpc/include/asm/exception-64s.h index 77f52b26dad6..93ae809fe5ea 100644 --- a/arch/powerpc/include/asm/exception-64s.h +++ b/arch/powerpc/include/asm/exception-64s.h | |||
@@ -130,15 +130,6 @@ BEGIN_FTR_SECTION_NESTED(941) \ | |||
130 | END_FTR_SECTION_NESTED(CPU_FTR_HAS_PPR,CPU_FTR_HAS_PPR,941) | 130 | END_FTR_SECTION_NESTED(CPU_FTR_HAS_PPR,CPU_FTR_HAS_PPR,941) |
131 | 131 | ||
132 | /* | 132 | /* |
133 | * Increase the priority on systems where PPR save/restore is not | ||
134 | * implemented/ supported. | ||
135 | */ | ||
136 | #define HMT_MEDIUM_PPR_DISCARD \ | ||
137 | BEGIN_FTR_SECTION_NESTED(942) \ | ||
138 | HMT_MEDIUM; \ | ||
139 | END_FTR_SECTION_NESTED(CPU_FTR_HAS_PPR,0,942) /*non P7*/ | ||
140 | |||
141 | /* | ||
142 | * Get an SPR into a register if the CPU has the given feature | 133 | * Get an SPR into a register if the CPU has the given feature |
143 | */ | 134 | */ |
144 | #define OPT_GET_SPR(ra, spr, ftr) \ | 135 | #define OPT_GET_SPR(ra, spr, ftr) \ |
@@ -263,17 +254,6 @@ do_kvm_##n: \ | |||
263 | #define KVM_HANDLER_SKIP(area, h, n) | 254 | #define KVM_HANDLER_SKIP(area, h, n) |
264 | #endif | 255 | #endif |
265 | 256 | ||
266 | #ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE | ||
267 | #define KVMTEST_PR(n) __KVMTEST(n) | ||
268 | #define KVM_HANDLER_PR(area, h, n) __KVM_HANDLER(area, h, n) | ||
269 | #define KVM_HANDLER_PR_SKIP(area, h, n) __KVM_HANDLER_SKIP(area, h, n) | ||
270 | |||
271 | #else | ||
272 | #define KVMTEST_PR(n) | ||
273 | #define KVM_HANDLER_PR(area, h, n) | ||
274 | #define KVM_HANDLER_PR_SKIP(area, h, n) | ||
275 | #endif | ||
276 | |||
277 | #define NOTEST(n) | 257 | #define NOTEST(n) |
278 | 258 | ||
279 | /* | 259 | /* |
@@ -353,27 +333,25 @@ do_kvm_##n: \ | |||
353 | /* | 333 | /* |
354 | * Exception vectors. | 334 | * Exception vectors. |
355 | */ | 335 | */ |
356 | #define STD_EXCEPTION_PSERIES(loc, vec, label) \ | 336 | #define STD_EXCEPTION_PSERIES(vec, label) \ |
357 | . = loc; \ | 337 | . = vec; \ |
358 | .globl label##_pSeries; \ | 338 | .globl label##_pSeries; \ |
359 | label##_pSeries: \ | 339 | label##_pSeries: \ |
360 | HMT_MEDIUM_PPR_DISCARD; \ | ||
361 | SET_SCRATCH0(r13); /* save r13 */ \ | 340 | SET_SCRATCH0(r13); /* save r13 */ \ |
362 | EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, label##_common, \ | 341 | EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, label##_common, \ |
363 | EXC_STD, KVMTEST_PR, vec) | 342 | EXC_STD, KVMTEST, vec) |
364 | 343 | ||
365 | /* Version of above for when we have to branch out-of-line */ | 344 | /* Version of above for when we have to branch out-of-line */ |
366 | #define STD_EXCEPTION_PSERIES_OOL(vec, label) \ | 345 | #define STD_EXCEPTION_PSERIES_OOL(vec, label) \ |
367 | .globl label##_pSeries; \ | 346 | .globl label##_pSeries; \ |
368 | label##_pSeries: \ | 347 | label##_pSeries: \ |
369 | EXCEPTION_PROLOG_1(PACA_EXGEN, KVMTEST_PR, vec); \ | 348 | EXCEPTION_PROLOG_1(PACA_EXGEN, KVMTEST, vec); \ |
370 | EXCEPTION_PROLOG_PSERIES_1(label##_common, EXC_STD) | 349 | EXCEPTION_PROLOG_PSERIES_1(label##_common, EXC_STD) |
371 | 350 | ||
372 | #define STD_EXCEPTION_HV(loc, vec, label) \ | 351 | #define STD_EXCEPTION_HV(loc, vec, label) \ |
373 | . = loc; \ | 352 | . = loc; \ |
374 | .globl label##_hv; \ | 353 | .globl label##_hv; \ |
375 | label##_hv: \ | 354 | label##_hv: \ |
376 | HMT_MEDIUM_PPR_DISCARD; \ | ||
377 | SET_SCRATCH0(r13); /* save r13 */ \ | 355 | SET_SCRATCH0(r13); /* save r13 */ \ |
378 | EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, label##_common, \ | 356 | EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, label##_common, \ |
379 | EXC_HV, KVMTEST, vec) | 357 | EXC_HV, KVMTEST, vec) |
@@ -389,7 +367,6 @@ label##_hv: \ | |||
389 | . = loc; \ | 367 | . = loc; \ |
390 | .globl label##_relon_pSeries; \ | 368 | .globl label##_relon_pSeries; \ |
391 | label##_relon_pSeries: \ | 369 | label##_relon_pSeries: \ |
392 | HMT_MEDIUM_PPR_DISCARD; \ | ||
393 | /* No guest interrupts come through here */ \ | 370 | /* No guest interrupts come through here */ \ |
394 | SET_SCRATCH0(r13); /* save r13 */ \ | 371 | SET_SCRATCH0(r13); /* save r13 */ \ |
395 | EXCEPTION_RELON_PROLOG_PSERIES(PACA_EXGEN, label##_common, \ | 372 | EXCEPTION_RELON_PROLOG_PSERIES(PACA_EXGEN, label##_common, \ |
@@ -405,7 +382,6 @@ label##_relon_pSeries: \ | |||
405 | . = loc; \ | 382 | . = loc; \ |
406 | .globl label##_relon_hv; \ | 383 | .globl label##_relon_hv; \ |
407 | label##_relon_hv: \ | 384 | label##_relon_hv: \ |
408 | HMT_MEDIUM_PPR_DISCARD; \ | ||
409 | /* No guest interrupts come through here */ \ | 385 | /* No guest interrupts come through here */ \ |
410 | SET_SCRATCH0(r13); /* save r13 */ \ | 386 | SET_SCRATCH0(r13); /* save r13 */ \ |
411 | EXCEPTION_RELON_PROLOG_PSERIES(PACA_EXGEN, label##_common, \ | 387 | EXCEPTION_RELON_PROLOG_PSERIES(PACA_EXGEN, label##_common, \ |
@@ -436,17 +412,13 @@ label##_relon_hv: \ | |||
436 | #define _SOFTEN_TEST(h, vec) __SOFTEN_TEST(h, vec) | 412 | #define _SOFTEN_TEST(h, vec) __SOFTEN_TEST(h, vec) |
437 | 413 | ||
438 | #define SOFTEN_TEST_PR(vec) \ | 414 | #define SOFTEN_TEST_PR(vec) \ |
439 | KVMTEST_PR(vec); \ | 415 | KVMTEST(vec); \ |
440 | _SOFTEN_TEST(EXC_STD, vec) | 416 | _SOFTEN_TEST(EXC_STD, vec) |
441 | 417 | ||
442 | #define SOFTEN_TEST_HV(vec) \ | 418 | #define SOFTEN_TEST_HV(vec) \ |
443 | KVMTEST(vec); \ | 419 | KVMTEST(vec); \ |
444 | _SOFTEN_TEST(EXC_HV, vec) | 420 | _SOFTEN_TEST(EXC_HV, vec) |
445 | 421 | ||
446 | #define SOFTEN_TEST_HV_201(vec) \ | ||
447 | KVMTEST(vec); \ | ||
448 | _SOFTEN_TEST(EXC_STD, vec) | ||
449 | |||
450 | #define SOFTEN_NOTEST_PR(vec) _SOFTEN_TEST(EXC_STD, vec) | 422 | #define SOFTEN_NOTEST_PR(vec) _SOFTEN_TEST(EXC_STD, vec) |
451 | #define SOFTEN_NOTEST_HV(vec) _SOFTEN_TEST(EXC_HV, vec) | 423 | #define SOFTEN_NOTEST_HV(vec) _SOFTEN_TEST(EXC_HV, vec) |
452 | 424 | ||
@@ -463,7 +435,6 @@ label##_relon_hv: \ | |||
463 | . = loc; \ | 435 | . = loc; \ |
464 | .globl label##_pSeries; \ | 436 | .globl label##_pSeries; \ |
465 | label##_pSeries: \ | 437 | label##_pSeries: \ |
466 | HMT_MEDIUM_PPR_DISCARD; \ | ||
467 | _MASKABLE_EXCEPTION_PSERIES(vec, label, \ | 438 | _MASKABLE_EXCEPTION_PSERIES(vec, label, \ |
468 | EXC_STD, SOFTEN_TEST_PR) | 439 | EXC_STD, SOFTEN_TEST_PR) |
469 | 440 | ||
@@ -481,7 +452,6 @@ label##_hv: \ | |||
481 | EXCEPTION_PROLOG_PSERIES_1(label##_common, EXC_HV); | 452 | EXCEPTION_PROLOG_PSERIES_1(label##_common, EXC_HV); |
482 | 453 | ||
483 | #define __MASKABLE_RELON_EXCEPTION_PSERIES(vec, label, h, extra) \ | 454 | #define __MASKABLE_RELON_EXCEPTION_PSERIES(vec, label, h, extra) \ |
484 | HMT_MEDIUM_PPR_DISCARD; \ | ||
485 | SET_SCRATCH0(r13); /* save r13 */ \ | 455 | SET_SCRATCH0(r13); /* save r13 */ \ |
486 | EXCEPTION_PROLOG_0(PACA_EXGEN); \ | 456 | EXCEPTION_PROLOG_0(PACA_EXGEN); \ |
487 | __EXCEPTION_PROLOG_1(PACA_EXGEN, extra, vec); \ | 457 | __EXCEPTION_PROLOG_1(PACA_EXGEN, extra, vec); \ |
diff --git a/arch/powerpc/include/asm/firmware.h b/arch/powerpc/include/asm/firmware.h index e05808a328db..b0629249778b 100644 --- a/arch/powerpc/include/asm/firmware.h +++ b/arch/powerpc/include/asm/firmware.h | |||
@@ -47,12 +47,10 @@ | |||
47 | #define FW_FEATURE_VPHN ASM_CONST(0x0000000004000000) | 47 | #define FW_FEATURE_VPHN ASM_CONST(0x0000000004000000) |
48 | #define FW_FEATURE_XCMO ASM_CONST(0x0000000008000000) | 48 | #define FW_FEATURE_XCMO ASM_CONST(0x0000000008000000) |
49 | #define FW_FEATURE_OPAL ASM_CONST(0x0000000010000000) | 49 | #define FW_FEATURE_OPAL ASM_CONST(0x0000000010000000) |
50 | #define FW_FEATURE_OPALv2 ASM_CONST(0x0000000020000000) | ||
51 | #define FW_FEATURE_SET_MODE ASM_CONST(0x0000000040000000) | 50 | #define FW_FEATURE_SET_MODE ASM_CONST(0x0000000040000000) |
52 | #define FW_FEATURE_BEST_ENERGY ASM_CONST(0x0000000080000000) | 51 | #define FW_FEATURE_BEST_ENERGY ASM_CONST(0x0000000080000000) |
53 | #define FW_FEATURE_TYPE1_AFFINITY ASM_CONST(0x0000000100000000) | 52 | #define FW_FEATURE_TYPE1_AFFINITY ASM_CONST(0x0000000100000000) |
54 | #define FW_FEATURE_PRRN ASM_CONST(0x0000000200000000) | 53 | #define FW_FEATURE_PRRN ASM_CONST(0x0000000200000000) |
55 | #define FW_FEATURE_OPALv3 ASM_CONST(0x0000000400000000) | ||
56 | 54 | ||
57 | #ifndef __ASSEMBLY__ | 55 | #ifndef __ASSEMBLY__ |
58 | 56 | ||
@@ -70,8 +68,7 @@ enum { | |||
70 | FW_FEATURE_SET_MODE | FW_FEATURE_BEST_ENERGY | | 68 | FW_FEATURE_SET_MODE | FW_FEATURE_BEST_ENERGY | |
71 | FW_FEATURE_TYPE1_AFFINITY | FW_FEATURE_PRRN, | 69 | FW_FEATURE_TYPE1_AFFINITY | FW_FEATURE_PRRN, |
72 | FW_FEATURE_PSERIES_ALWAYS = 0, | 70 | FW_FEATURE_PSERIES_ALWAYS = 0, |
73 | FW_FEATURE_POWERNV_POSSIBLE = FW_FEATURE_OPAL | FW_FEATURE_OPALv2 | | 71 | FW_FEATURE_POWERNV_POSSIBLE = FW_FEATURE_OPAL, |
74 | FW_FEATURE_OPALv3, | ||
75 | FW_FEATURE_POWERNV_ALWAYS = 0, | 72 | FW_FEATURE_POWERNV_ALWAYS = 0, |
76 | FW_FEATURE_PS3_POSSIBLE = FW_FEATURE_LPAR | FW_FEATURE_PS3_LV1, | 73 | FW_FEATURE_PS3_POSSIBLE = FW_FEATURE_LPAR | FW_FEATURE_PS3_LV1, |
77 | FW_FEATURE_PS3_ALWAYS = FW_FEATURE_LPAR | FW_FEATURE_PS3_LV1, | 74 | FW_FEATURE_PS3_ALWAYS = FW_FEATURE_LPAR | FW_FEATURE_PS3_LV1, |
diff --git a/arch/powerpc/include/asm/immap_qe.h b/arch/powerpc/include/asm/immap_qe.h deleted file mode 100644 index bedbff891423..000000000000 --- a/arch/powerpc/include/asm/immap_qe.h +++ /dev/null | |||
@@ -1,491 +0,0 @@ | |||
1 | /* | ||
2 | * QUICC Engine (QE) Internal Memory Map. | ||
3 | * The Internal Memory Map for devices with QE on them. This | ||
4 | * is the superset of all QE devices (8360, etc.). | ||
5 | |||
6 | * Copyright (C) 2006. Freescale Semiconductor, Inc. All rights reserved. | ||
7 | * | ||
8 | * Authors: Shlomi Gridish <gridish@freescale.com> | ||
9 | * Li Yang <leoli@freescale.com> | ||
10 | * | ||
11 | * This program is free software; you can redistribute it and/or modify it | ||
12 | * under the terms of the GNU General Public License as published by the | ||
13 | * Free Software Foundation; either version 2 of the License, or (at your | ||
14 | * option) any later version. | ||
15 | */ | ||
16 | #ifndef _ASM_POWERPC_IMMAP_QE_H | ||
17 | #define _ASM_POWERPC_IMMAP_QE_H | ||
18 | #ifdef __KERNEL__ | ||
19 | |||
20 | #include <linux/kernel.h> | ||
21 | #include <asm/io.h> | ||
22 | |||
23 | #define QE_IMMAP_SIZE (1024 * 1024) /* 1MB from 1MB+IMMR */ | ||
24 | |||
25 | /* QE I-RAM */ | ||
26 | struct qe_iram { | ||
27 | __be32 iadd; /* I-RAM Address Register */ | ||
28 | __be32 idata; /* I-RAM Data Register */ | ||
29 | u8 res0[0x04]; | ||
30 | __be32 iready; /* I-RAM Ready Register */ | ||
31 | u8 res1[0x70]; | ||
32 | } __attribute__ ((packed)); | ||
33 | |||
34 | /* QE Interrupt Controller */ | ||
35 | struct qe_ic_regs { | ||
36 | __be32 qicr; | ||
37 | __be32 qivec; | ||
38 | __be32 qripnr; | ||
39 | __be32 qipnr; | ||
40 | __be32 qipxcc; | ||
41 | __be32 qipycc; | ||
42 | __be32 qipwcc; | ||
43 | __be32 qipzcc; | ||
44 | __be32 qimr; | ||
45 | __be32 qrimr; | ||
46 | __be32 qicnr; | ||
47 | u8 res0[0x4]; | ||
48 | __be32 qiprta; | ||
49 | __be32 qiprtb; | ||
50 | u8 res1[0x4]; | ||
51 | __be32 qricr; | ||
52 | u8 res2[0x20]; | ||
53 | __be32 qhivec; | ||
54 | u8 res3[0x1C]; | ||
55 | } __attribute__ ((packed)); | ||
56 | |||
57 | /* Communications Processor */ | ||
58 | struct cp_qe { | ||
59 | __be32 cecr; /* QE command register */ | ||
60 | __be32 ceccr; /* QE controller configuration register */ | ||
61 | __be32 cecdr; /* QE command data register */ | ||
62 | u8 res0[0xA]; | ||
63 | __be16 ceter; /* QE timer event register */ | ||
64 | u8 res1[0x2]; | ||
65 | __be16 cetmr; /* QE timers mask register */ | ||
66 | __be32 cetscr; /* QE time-stamp timer control register */ | ||
67 | __be32 cetsr1; /* QE time-stamp register 1 */ | ||
68 | __be32 cetsr2; /* QE time-stamp register 2 */ | ||
69 | u8 res2[0x8]; | ||
70 | __be32 cevter; /* QE virtual tasks event register */ | ||
71 | __be32 cevtmr; /* QE virtual tasks mask register */ | ||
72 | __be16 cercr; /* QE RAM control register */ | ||
73 | u8 res3[0x2]; | ||
74 | u8 res4[0x24]; | ||
75 | __be16 ceexe1; /* QE external request 1 event register */ | ||
76 | u8 res5[0x2]; | ||
77 | __be16 ceexm1; /* QE external request 1 mask register */ | ||
78 | u8 res6[0x2]; | ||
79 | __be16 ceexe2; /* QE external request 2 event register */ | ||
80 | u8 res7[0x2]; | ||
81 | __be16 ceexm2; /* QE external request 2 mask register */ | ||
82 | u8 res8[0x2]; | ||
83 | __be16 ceexe3; /* QE external request 3 event register */ | ||
84 | u8 res9[0x2]; | ||
85 | __be16 ceexm3; /* QE external request 3 mask register */ | ||
86 | u8 res10[0x2]; | ||
87 | __be16 ceexe4; /* QE external request 4 event register */ | ||
88 | u8 res11[0x2]; | ||
89 | __be16 ceexm4; /* QE external request 4 mask register */ | ||
90 | u8 res12[0x3A]; | ||
91 | __be32 ceurnr; /* QE microcode revision number register */ | ||
92 | u8 res13[0x244]; | ||
93 | } __attribute__ ((packed)); | ||
94 | |||
95 | /* QE Multiplexer */ | ||
96 | struct qe_mux { | ||
97 | __be32 cmxgcr; /* CMX general clock route register */ | ||
98 | __be32 cmxsi1cr_l; /* CMX SI1 clock route low register */ | ||
99 | __be32 cmxsi1cr_h; /* CMX SI1 clock route high register */ | ||
100 | __be32 cmxsi1syr; /* CMX SI1 SYNC route register */ | ||
101 | __be32 cmxucr[4]; /* CMX UCCx clock route registers */ | ||
102 | __be32 cmxupcr; /* CMX UPC clock route register */ | ||
103 | u8 res0[0x1C]; | ||
104 | } __attribute__ ((packed)); | ||
105 | |||
106 | /* QE Timers */ | ||
107 | struct qe_timers { | ||
108 | u8 gtcfr1; /* Timer 1 and Timer 2 global config register*/ | ||
109 | u8 res0[0x3]; | ||
110 | u8 gtcfr2; /* Timer 3 and timer 4 global config register*/ | ||
111 | u8 res1[0xB]; | ||
112 | __be16 gtmdr1; /* Timer 1 mode register */ | ||
113 | __be16 gtmdr2; /* Timer 2 mode register */ | ||
114 | __be16 gtrfr1; /* Timer 1 reference register */ | ||
115 | __be16 gtrfr2; /* Timer 2 reference register */ | ||
116 | __be16 gtcpr1; /* Timer 1 capture register */ | ||
117 | __be16 gtcpr2; /* Timer 2 capture register */ | ||
118 | __be16 gtcnr1; /* Timer 1 counter */ | ||
119 | __be16 gtcnr2; /* Timer 2 counter */ | ||
120 | __be16 gtmdr3; /* Timer 3 mode register */ | ||
121 | __be16 gtmdr4; /* Timer 4 mode register */ | ||
122 | __be16 gtrfr3; /* Timer 3 reference register */ | ||
123 | __be16 gtrfr4; /* Timer 4 reference register */ | ||
124 | __be16 gtcpr3; /* Timer 3 capture register */ | ||
125 | __be16 gtcpr4; /* Timer 4 capture register */ | ||
126 | __be16 gtcnr3; /* Timer 3 counter */ | ||
127 | __be16 gtcnr4; /* Timer 4 counter */ | ||
128 | __be16 gtevr1; /* Timer 1 event register */ | ||
129 | __be16 gtevr2; /* Timer 2 event register */ | ||
130 | __be16 gtevr3; /* Timer 3 event register */ | ||
131 | __be16 gtevr4; /* Timer 4 event register */ | ||
132 | __be16 gtps; /* Timer 1 prescale register */ | ||
133 | u8 res2[0x46]; | ||
134 | } __attribute__ ((packed)); | ||
135 | |||
136 | /* BRG */ | ||
137 | struct qe_brg { | ||
138 | __be32 brgc[16]; /* BRG configuration registers */ | ||
139 | u8 res0[0x40]; | ||
140 | } __attribute__ ((packed)); | ||
141 | |||
142 | /* SPI */ | ||
143 | struct spi { | ||
144 | u8 res0[0x20]; | ||
145 | __be32 spmode; /* SPI mode register */ | ||
146 | u8 res1[0x2]; | ||
147 | u8 spie; /* SPI event register */ | ||
148 | u8 res2[0x1]; | ||
149 | u8 res3[0x2]; | ||
150 | u8 spim; /* SPI mask register */ | ||
151 | u8 res4[0x1]; | ||
152 | u8 res5[0x1]; | ||
153 | u8 spcom; /* SPI command register */ | ||
154 | u8 res6[0x2]; | ||
155 | __be32 spitd; /* SPI transmit data register (cpu mode) */ | ||
156 | __be32 spird; /* SPI receive data register (cpu mode) */ | ||
157 | u8 res7[0x8]; | ||
158 | } __attribute__ ((packed)); | ||
159 | |||
160 | /* SI */ | ||
161 | struct si1 { | ||
162 | __be16 siamr1; /* SI1 TDMA mode register */ | ||
163 | __be16 sibmr1; /* SI1 TDMB mode register */ | ||
164 | __be16 sicmr1; /* SI1 TDMC mode register */ | ||
165 | __be16 sidmr1; /* SI1 TDMD mode register */ | ||
166 | u8 siglmr1_h; /* SI1 global mode register high */ | ||
167 | u8 res0[0x1]; | ||
168 | u8 sicmdr1_h; /* SI1 command register high */ | ||
169 | u8 res2[0x1]; | ||
170 | u8 sistr1_h; /* SI1 status register high */ | ||
171 | u8 res3[0x1]; | ||
172 | __be16 sirsr1_h; /* SI1 RAM shadow address register high */ | ||
173 | u8 sitarc1; /* SI1 RAM counter Tx TDMA */ | ||
174 | u8 sitbrc1; /* SI1 RAM counter Tx TDMB */ | ||
175 | u8 sitcrc1; /* SI1 RAM counter Tx TDMC */ | ||
176 | u8 sitdrc1; /* SI1 RAM counter Tx TDMD */ | ||
177 | u8 sirarc1; /* SI1 RAM counter Rx TDMA */ | ||
178 | u8 sirbrc1; /* SI1 RAM counter Rx TDMB */ | ||
179 | u8 sircrc1; /* SI1 RAM counter Rx TDMC */ | ||
180 | u8 sirdrc1; /* SI1 RAM counter Rx TDMD */ | ||
181 | u8 res4[0x8]; | ||
182 | __be16 siemr1; /* SI1 TDME mode register 16 bits */ | ||
183 | __be16 sifmr1; /* SI1 TDMF mode register 16 bits */ | ||
184 | __be16 sigmr1; /* SI1 TDMG mode register 16 bits */ | ||
185 | __be16 sihmr1; /* SI1 TDMH mode register 16 bits */ | ||
186 | u8 siglmg1_l; /* SI1 global mode register low 8 bits */ | ||
187 | u8 res5[0x1]; | ||
188 | u8 sicmdr1_l; /* SI1 command register low 8 bits */ | ||
189 | u8 res6[0x1]; | ||
190 | u8 sistr1_l; /* SI1 status register low 8 bits */ | ||
191 | u8 res7[0x1]; | ||
192 | __be16 sirsr1_l; /* SI1 RAM shadow address register low 16 bits*/ | ||
193 | u8 siterc1; /* SI1 RAM counter Tx TDME 8 bits */ | ||
194 | u8 sitfrc1; /* SI1 RAM counter Tx TDMF 8 bits */ | ||
195 | u8 sitgrc1; /* SI1 RAM counter Tx TDMG 8 bits */ | ||
196 | u8 sithrc1; /* SI1 RAM counter Tx TDMH 8 bits */ | ||
197 | u8 sirerc1; /* SI1 RAM counter Rx TDME 8 bits */ | ||
198 | u8 sirfrc1; /* SI1 RAM counter Rx TDMF 8 bits */ | ||
199 | u8 sirgrc1; /* SI1 RAM counter Rx TDMG 8 bits */ | ||
200 | u8 sirhrc1; /* SI1 RAM counter Rx TDMH 8 bits */ | ||
201 | u8 res8[0x8]; | ||
202 | __be32 siml1; /* SI1 multiframe limit register */ | ||
203 | u8 siedm1; /* SI1 extended diagnostic mode register */ | ||
204 | u8 res9[0xBB]; | ||
205 | } __attribute__ ((packed)); | ||
206 | |||
207 | /* SI Routing Tables */ | ||
208 | struct sir { | ||
209 | u8 tx[0x400]; | ||
210 | u8 rx[0x400]; | ||
211 | u8 res0[0x800]; | ||
212 | } __attribute__ ((packed)); | ||
213 | |||
214 | /* USB Controller */ | ||
215 | struct qe_usb_ctlr { | ||
216 | u8 usb_usmod; | ||
217 | u8 usb_usadr; | ||
218 | u8 usb_uscom; | ||
219 | u8 res1[1]; | ||
220 | __be16 usb_usep[4]; | ||
221 | u8 res2[4]; | ||
222 | __be16 usb_usber; | ||
223 | u8 res3[2]; | ||
224 | __be16 usb_usbmr; | ||
225 | u8 res4[1]; | ||
226 | u8 usb_usbs; | ||
227 | __be16 usb_ussft; | ||
228 | u8 res5[2]; | ||
229 | __be16 usb_usfrn; | ||
230 | u8 res6[0x22]; | ||
231 | } __attribute__ ((packed)); | ||
232 | |||
233 | /* MCC */ | ||
234 | struct qe_mcc { | ||
235 | __be32 mcce; /* MCC event register */ | ||
236 | __be32 mccm; /* MCC mask register */ | ||
237 | __be32 mccf; /* MCC configuration register */ | ||
238 | __be32 merl; /* MCC emergency request level register */ | ||
239 | u8 res0[0xF0]; | ||
240 | } __attribute__ ((packed)); | ||
241 | |||
242 | /* QE UCC Slow */ | ||
243 | struct ucc_slow { | ||
244 | __be32 gumr_l; /* UCCx general mode register (low) */ | ||
245 | __be32 gumr_h; /* UCCx general mode register (high) */ | ||
246 | __be16 upsmr; /* UCCx protocol-specific mode register */ | ||
247 | u8 res0[0x2]; | ||
248 | __be16 utodr; /* UCCx transmit on demand register */ | ||
249 | __be16 udsr; /* UCCx data synchronization register */ | ||
250 | __be16 ucce; /* UCCx event register */ | ||
251 | u8 res1[0x2]; | ||
252 | __be16 uccm; /* UCCx mask register */ | ||
253 | u8 res2[0x1]; | ||
254 | u8 uccs; /* UCCx status register */ | ||
255 | u8 res3[0x24]; | ||
256 | __be16 utpt; | ||
257 | u8 res4[0x52]; | ||
258 | u8 guemr; /* UCC general extended mode register */ | ||
259 | } __attribute__ ((packed)); | ||
260 | |||
261 | /* QE UCC Fast */ | ||
262 | struct ucc_fast { | ||
263 | __be32 gumr; /* UCCx general mode register */ | ||
264 | __be32 upsmr; /* UCCx protocol-specific mode register */ | ||
265 | __be16 utodr; /* UCCx transmit on demand register */ | ||
266 | u8 res0[0x2]; | ||
267 | __be16 udsr; /* UCCx data synchronization register */ | ||
268 | u8 res1[0x2]; | ||
269 | __be32 ucce; /* UCCx event register */ | ||
270 | __be32 uccm; /* UCCx mask register */ | ||
271 | u8 uccs; /* UCCx status register */ | ||
272 | u8 res2[0x7]; | ||
273 | __be32 urfb; /* UCC receive FIFO base */ | ||
274 | __be16 urfs; /* UCC receive FIFO size */ | ||
275 | u8 res3[0x2]; | ||
276 | __be16 urfet; /* UCC receive FIFO emergency threshold */ | ||
277 | __be16 urfset; /* UCC receive FIFO special emergency | ||
278 | threshold */ | ||
279 | __be32 utfb; /* UCC transmit FIFO base */ | ||
280 | __be16 utfs; /* UCC transmit FIFO size */ | ||
281 | u8 res4[0x2]; | ||
282 | __be16 utfet; /* UCC transmit FIFO emergency threshold */ | ||
283 | u8 res5[0x2]; | ||
284 | __be16 utftt; /* UCC transmit FIFO transmit threshold */ | ||
285 | u8 res6[0x2]; | ||
286 | __be16 utpt; /* UCC transmit polling timer */ | ||
287 | u8 res7[0x2]; | ||
288 | __be32 urtry; /* UCC retry counter register */ | ||
289 | u8 res8[0x4C]; | ||
290 | u8 guemr; /* UCC general extended mode register */ | ||
291 | } __attribute__ ((packed)); | ||
292 | |||
293 | struct ucc { | ||
294 | union { | ||
295 | struct ucc_slow slow; | ||
296 | struct ucc_fast fast; | ||
297 | u8 res[0x200]; /* UCC blocks are 512 bytes each */ | ||
298 | }; | ||
299 | } __attribute__ ((packed)); | ||
300 | |||
301 | /* MultiPHY UTOPIA POS Controllers (UPC) */ | ||
302 | struct upc { | ||
303 | __be32 upgcr; /* UTOPIA/POS general configuration register */ | ||
304 | __be32 uplpa; /* UTOPIA/POS last PHY address */ | ||
305 | __be32 uphec; /* ATM HEC register */ | ||
306 | __be32 upuc; /* UTOPIA/POS UCC configuration */ | ||
307 | __be32 updc1; /* UTOPIA/POS device 1 configuration */ | ||
308 | __be32 updc2; /* UTOPIA/POS device 2 configuration */ | ||
309 | __be32 updc3; /* UTOPIA/POS device 3 configuration */ | ||
310 | __be32 updc4; /* UTOPIA/POS device 4 configuration */ | ||
311 | __be32 upstpa; /* UTOPIA/POS STPA threshold */ | ||
312 | u8 res0[0xC]; | ||
313 | __be32 updrs1_h; /* UTOPIA/POS device 1 rate select */ | ||
314 | __be32 updrs1_l; /* UTOPIA/POS device 1 rate select */ | ||
315 | __be32 updrs2_h; /* UTOPIA/POS device 2 rate select */ | ||
316 | __be32 updrs2_l; /* UTOPIA/POS device 2 rate select */ | ||
317 | __be32 updrs3_h; /* UTOPIA/POS device 3 rate select */ | ||
318 | __be32 updrs3_l; /* UTOPIA/POS device 3 rate select */ | ||
319 | __be32 updrs4_h; /* UTOPIA/POS device 4 rate select */ | ||
320 | __be32 updrs4_l; /* UTOPIA/POS device 4 rate select */ | ||
321 | __be32 updrp1; /* UTOPIA/POS device 1 receive priority low */ | ||
322 | __be32 updrp2; /* UTOPIA/POS device 2 receive priority low */ | ||
323 | __be32 updrp3; /* UTOPIA/POS device 3 receive priority low */ | ||
324 | __be32 updrp4; /* UTOPIA/POS device 4 receive priority low */ | ||
325 | __be32 upde1; /* UTOPIA/POS device 1 event */ | ||
326 | __be32 upde2; /* UTOPIA/POS device 2 event */ | ||
327 | __be32 upde3; /* UTOPIA/POS device 3 event */ | ||
328 | __be32 upde4; /* UTOPIA/POS device 4 event */ | ||
329 | __be16 uprp1; | ||
330 | __be16 uprp2; | ||
331 | __be16 uprp3; | ||
332 | __be16 uprp4; | ||
333 | u8 res1[0x8]; | ||
334 | __be16 uptirr1_0; /* Device 1 transmit internal rate 0 */ | ||
335 | __be16 uptirr1_1; /* Device 1 transmit internal rate 1 */ | ||
336 | __be16 uptirr1_2; /* Device 1 transmit internal rate 2 */ | ||
337 | __be16 uptirr1_3; /* Device 1 transmit internal rate 3 */ | ||
338 | __be16 uptirr2_0; /* Device 2 transmit internal rate 0 */ | ||
339 | __be16 uptirr2_1; /* Device 2 transmit internal rate 1 */ | ||
340 | __be16 uptirr2_2; /* Device 2 transmit internal rate 2 */ | ||
341 | __be16 uptirr2_3; /* Device 2 transmit internal rate 3 */ | ||
342 | __be16 uptirr3_0; /* Device 3 transmit internal rate 0 */ | ||
343 | __be16 uptirr3_1; /* Device 3 transmit internal rate 1 */ | ||
344 | __be16 uptirr3_2; /* Device 3 transmit internal rate 2 */ | ||
345 | __be16 uptirr3_3; /* Device 3 transmit internal rate 3 */ | ||
346 | __be16 uptirr4_0; /* Device 4 transmit internal rate 0 */ | ||
347 | __be16 uptirr4_1; /* Device 4 transmit internal rate 1 */ | ||
348 | __be16 uptirr4_2; /* Device 4 transmit internal rate 2 */ | ||
349 | __be16 uptirr4_3; /* Device 4 transmit internal rate 3 */ | ||
350 | __be32 uper1; /* Device 1 port enable register */ | ||
351 | __be32 uper2; /* Device 2 port enable register */ | ||
352 | __be32 uper3; /* Device 3 port enable register */ | ||
353 | __be32 uper4; /* Device 4 port enable register */ | ||
354 | u8 res2[0x150]; | ||
355 | } __attribute__ ((packed)); | ||
356 | |||
357 | /* SDMA */ | ||
358 | struct sdma { | ||
359 | __be32 sdsr; /* Serial DMA status register */ | ||
360 | __be32 sdmr; /* Serial DMA mode register */ | ||
361 | __be32 sdtr1; /* SDMA system bus threshold register */ | ||
362 | __be32 sdtr2; /* SDMA secondary bus threshold register */ | ||
363 | __be32 sdhy1; /* SDMA system bus hysteresis register */ | ||
364 | __be32 sdhy2; /* SDMA secondary bus hysteresis register */ | ||
365 | __be32 sdta1; /* SDMA system bus address register */ | ||
366 | __be32 sdta2; /* SDMA secondary bus address register */ | ||
367 | __be32 sdtm1; /* SDMA system bus MSNUM register */ | ||
368 | __be32 sdtm2; /* SDMA secondary bus MSNUM register */ | ||
369 | u8 res0[0x10]; | ||
370 | __be32 sdaqr; /* SDMA address bus qualify register */ | ||
371 | __be32 sdaqmr; /* SDMA address bus qualify mask register */ | ||
372 | u8 res1[0x4]; | ||
373 | __be32 sdebcr; /* SDMA CAM entries base register */ | ||
374 | u8 res2[0x38]; | ||
375 | } __attribute__ ((packed)); | ||
376 | |||
377 | /* Debug Space */ | ||
378 | struct dbg { | ||
379 | __be32 bpdcr; /* Breakpoint debug command register */ | ||
380 | __be32 bpdsr; /* Breakpoint debug status register */ | ||
381 | __be32 bpdmr; /* Breakpoint debug mask register */ | ||
382 | __be32 bprmrr0; /* Breakpoint request mode risc register 0 */ | ||
383 | __be32 bprmrr1; /* Breakpoint request mode risc register 1 */ | ||
384 | u8 res0[0x8]; | ||
385 | __be32 bprmtr0; /* Breakpoint request mode trb register 0 */ | ||
386 | __be32 bprmtr1; /* Breakpoint request mode trb register 1 */ | ||
387 | u8 res1[0x8]; | ||
388 | __be32 bprmir; /* Breakpoint request mode immediate register */ | ||
389 | __be32 bprmsr; /* Breakpoint request mode serial register */ | ||
390 | __be32 bpemr; /* Breakpoint exit mode register */ | ||
391 | u8 res2[0x48]; | ||
392 | } __attribute__ ((packed)); | ||
393 | |||
394 | /* | ||
395 | * RISC Special Registers (Trap and Breakpoint). These are described in | ||
396 | * the QE Developer's Handbook. | ||
397 | */ | ||
398 | struct rsp { | ||
399 | __be32 tibcr[16]; /* Trap/instruction breakpoint control regs */ | ||
400 | u8 res0[64]; | ||
401 | __be32 ibcr0; | ||
402 | __be32 ibs0; | ||
403 | __be32 ibcnr0; | ||
404 | u8 res1[4]; | ||
405 | __be32 ibcr1; | ||
406 | __be32 ibs1; | ||
407 | __be32 ibcnr1; | ||
408 | __be32 npcr; | ||
409 | __be32 dbcr; | ||
410 | __be32 dbar; | ||
411 | __be32 dbamr; | ||
412 | __be32 dbsr; | ||
413 | __be32 dbcnr; | ||
414 | u8 res2[12]; | ||
415 | __be32 dbdr_h; | ||
416 | __be32 dbdr_l; | ||
417 | __be32 dbdmr_h; | ||
418 | __be32 dbdmr_l; | ||
419 | __be32 bsr; | ||
420 | __be32 bor; | ||
421 | __be32 bior; | ||
422 | u8 res3[4]; | ||
423 | __be32 iatr[4]; | ||
424 | __be32 eccr; /* Exception control configuration register */ | ||
425 | __be32 eicr; | ||
426 | u8 res4[0x100-0xf8]; | ||
427 | } __attribute__ ((packed)); | ||
428 | |||
429 | struct qe_immap { | ||
430 | struct qe_iram iram; /* I-RAM */ | ||
431 | struct qe_ic_regs ic; /* Interrupt Controller */ | ||
432 | struct cp_qe cp; /* Communications Processor */ | ||
433 | struct qe_mux qmx; /* QE Multiplexer */ | ||
434 | struct qe_timers qet; /* QE Timers */ | ||
435 | struct spi spi[0x2]; /* spi */ | ||
436 | struct qe_mcc mcc; /* mcc */ | ||
437 | struct qe_brg brg; /* brg */ | ||
438 | struct qe_usb_ctlr usb; /* USB */ | ||
439 | struct si1 si1; /* SI */ | ||
440 | u8 res11[0x800]; | ||
441 | struct sir sir; /* SI Routing Tables */ | ||
442 | struct ucc ucc1; /* ucc1 */ | ||
443 | struct ucc ucc3; /* ucc3 */ | ||
444 | struct ucc ucc5; /* ucc5 */ | ||
445 | struct ucc ucc7; /* ucc7 */ | ||
446 | u8 res12[0x600]; | ||
447 | struct upc upc1; /* MultiPHY UTOPIA POS Ctrlr 1*/ | ||
448 | struct ucc ucc2; /* ucc2 */ | ||
449 | struct ucc ucc4; /* ucc4 */ | ||
450 | struct ucc ucc6; /* ucc6 */ | ||
451 | struct ucc ucc8; /* ucc8 */ | ||
452 | u8 res13[0x600]; | ||
453 | struct upc upc2; /* MultiPHY UTOPIA POS Ctrlr 2*/ | ||
454 | struct sdma sdma; /* SDMA */ | ||
455 | struct dbg dbg; /* 0x104080 - 0x1040FF | ||
456 | Debug Space */ | ||
457 | struct rsp rsp[0x2]; /* 0x104100 - 0x1042FF | ||
458 | RISC Special Registers | ||
459 | (Trap and Breakpoint) */ | ||
460 | u8 res14[0x300]; /* 0x104300 - 0x1045FF */ | ||
461 | u8 res15[0x3A00]; /* 0x104600 - 0x107FFF */ | ||
462 | u8 res16[0x8000]; /* 0x108000 - 0x110000 */ | ||
463 | u8 muram[0xC000]; /* 0x110000 - 0x11C000 | ||
464 | Multi-user RAM */ | ||
465 | u8 res17[0x24000]; /* 0x11C000 - 0x140000 */ | ||
466 | u8 res18[0xC0000]; /* 0x140000 - 0x200000 */ | ||
467 | } __attribute__ ((packed)); | ||
468 | |||
469 | extern struct qe_immap __iomem *qe_immr; | ||
470 | extern phys_addr_t get_qe_base(void); | ||
471 | |||
472 | /* | ||
473 | * Returns the offset within the QE address space of the given pointer. | ||
474 | * | ||
475 | * Note that the QE does not support 36-bit physical addresses, so if | ||
476 | * get_qe_base() returns a number above 4GB, the caller will probably fail. | ||
477 | */ | ||
478 | static inline phys_addr_t immrbar_virt_to_phys(void *address) | ||
479 | { | ||
480 | void *q = (void *)qe_immr; | ||
481 | |||
482 | /* Is it a MURAM address? */ | ||
483 | if ((address >= q) && (address < (q + QE_IMMAP_SIZE))) | ||
484 | return get_qe_base() + (address - q); | ||
485 | |||
486 | /* It's an address returned by kmalloc */ | ||
487 | return virt_to_phys(address); | ||
488 | } | ||
489 | |||
490 | #endif /* __KERNEL__ */ | ||
491 | #endif /* _ASM_POWERPC_IMMAP_QE_H */ | ||
diff --git a/arch/powerpc/include/asm/io.h b/arch/powerpc/include/asm/io.h index 5879fde56f3c..6c1297ec374c 100644 --- a/arch/powerpc/include/asm/io.h +++ b/arch/powerpc/include/asm/io.h | |||
@@ -385,6 +385,17 @@ static inline void __raw_writeq(unsigned long v, volatile void __iomem *addr) | |||
385 | { | 385 | { |
386 | *(volatile unsigned long __force *)PCI_FIX_ADDR(addr) = v; | 386 | *(volatile unsigned long __force *)PCI_FIX_ADDR(addr) = v; |
387 | } | 387 | } |
388 | |||
389 | /* | ||
390 | * Real mode version of the above. stdcix is only supposed to be used | ||
391 | * in hypervisor real mode as per the architecture spec. | ||
392 | */ | ||
393 | static inline void __raw_rm_writeq(u64 val, volatile void __iomem *paddr) | ||
394 | { | ||
395 | __asm__ __volatile__("stdcix %0,0,%1" | ||
396 | : : "r" (val), "r" (paddr) : "memory"); | ||
397 | } | ||
398 | |||
388 | #endif /* __powerpc64__ */ | 399 | #endif /* __powerpc64__ */ |
389 | 400 | ||
390 | /* | 401 | /* |
diff --git a/arch/powerpc/include/asm/mmu-hash64.h b/arch/powerpc/include/asm/mmu-hash64.h index ba3342bbdbda..7352d3f212df 100644 --- a/arch/powerpc/include/asm/mmu-hash64.h +++ b/arch/powerpc/include/asm/mmu-hash64.h | |||
@@ -21,7 +21,7 @@ | |||
21 | * need for various slices related matters. Note that this isn't the | 21 | * need for various slices related matters. Note that this isn't the |
22 | * complete pgtable.h but only a portion of it. | 22 | * complete pgtable.h but only a portion of it. |
23 | */ | 23 | */ |
24 | #include <asm/pgtable-ppc64.h> | 24 | #include <asm/book3s/64/pgtable.h> |
25 | #include <asm/bug.h> | 25 | #include <asm/bug.h> |
26 | #include <asm/processor.h> | 26 | #include <asm/processor.h> |
27 | 27 | ||
diff --git a/arch/powerpc/include/asm/pgtable-ppc32.h b/arch/powerpc/include/asm/nohash/32/pgtable.h index 9c326565d498..c82cbf52d19e 100644 --- a/arch/powerpc/include/asm/pgtable-ppc32.h +++ b/arch/powerpc/include/asm/nohash/32/pgtable.h | |||
@@ -1,5 +1,5 @@ | |||
1 | #ifndef _ASM_POWERPC_PGTABLE_PPC32_H | 1 | #ifndef _ASM_POWERPC_NOHASH_32_PGTABLE_H |
2 | #define _ASM_POWERPC_PGTABLE_PPC32_H | 2 | #define _ASM_POWERPC_NOHASH_32_PGTABLE_H |
3 | 3 | ||
4 | #include <asm-generic/pgtable-nopmd.h> | 4 | #include <asm-generic/pgtable-nopmd.h> |
5 | 5 | ||
@@ -106,17 +106,15 @@ extern int icache_44x_need_flush; | |||
106 | */ | 106 | */ |
107 | 107 | ||
108 | #if defined(CONFIG_40x) | 108 | #if defined(CONFIG_40x) |
109 | #include <asm/pte-40x.h> | 109 | #include <asm/nohash/32/pte-40x.h> |
110 | #elif defined(CONFIG_44x) | 110 | #elif defined(CONFIG_44x) |
111 | #include <asm/pte-44x.h> | 111 | #include <asm/nohash/32/pte-44x.h> |
112 | #elif defined(CONFIG_FSL_BOOKE) && defined(CONFIG_PTE_64BIT) | 112 | #elif defined(CONFIG_FSL_BOOKE) && defined(CONFIG_PTE_64BIT) |
113 | #include <asm/pte-book3e.h> | 113 | #include <asm/nohash/pte-book3e.h> |
114 | #elif defined(CONFIG_FSL_BOOKE) | 114 | #elif defined(CONFIG_FSL_BOOKE) |
115 | #include <asm/pte-fsl-booke.h> | 115 | #include <asm/nohash/32/pte-fsl-booke.h> |
116 | #elif defined(CONFIG_8xx) | 116 | #elif defined(CONFIG_8xx) |
117 | #include <asm/pte-8xx.h> | 117 | #include <asm/nohash/32/pte-8xx.h> |
118 | #else /* CONFIG_6xx */ | ||
119 | #include <asm/pte-hash32.h> | ||
120 | #endif | 118 | #endif |
121 | 119 | ||
122 | /* And here we include common definitions */ | 120 | /* And here we include common definitions */ |
@@ -130,7 +128,12 @@ extern int icache_44x_need_flush; | |||
130 | #define pmd_none(pmd) (!pmd_val(pmd)) | 128 | #define pmd_none(pmd) (!pmd_val(pmd)) |
131 | #define pmd_bad(pmd) (pmd_val(pmd) & _PMD_BAD) | 129 | #define pmd_bad(pmd) (pmd_val(pmd) & _PMD_BAD) |
132 | #define pmd_present(pmd) (pmd_val(pmd) & _PMD_PRESENT_MASK) | 130 | #define pmd_present(pmd) (pmd_val(pmd) & _PMD_PRESENT_MASK) |
133 | #define pmd_clear(pmdp) do { pmd_val(*(pmdp)) = 0; } while (0) | 131 | static inline void pmd_clear(pmd_t *pmdp) |
132 | { | ||
133 | *pmdp = __pmd(0); | ||
134 | } | ||
135 | |||
136 | |||
134 | 137 | ||
135 | /* | 138 | /* |
136 | * When flushing the tlb entry for a page, we also need to flush the hash | 139 | * When flushing the tlb entry for a page, we also need to flush the hash |
@@ -337,4 +340,4 @@ extern int get_pteptr(struct mm_struct *mm, unsigned long addr, pte_t **ptep, | |||
337 | 340 | ||
338 | #endif /* !__ASSEMBLY__ */ | 341 | #endif /* !__ASSEMBLY__ */ |
339 | 342 | ||
340 | #endif /* _ASM_POWERPC_PGTABLE_PPC32_H */ | 343 | #endif /* __ASM_POWERPC_NOHASH_32_PGTABLE_H */ |
diff --git a/arch/powerpc/include/asm/pte-40x.h b/arch/powerpc/include/asm/nohash/32/pte-40x.h index 486b1ef81338..9624ebdacc47 100644 --- a/arch/powerpc/include/asm/pte-40x.h +++ b/arch/powerpc/include/asm/nohash/32/pte-40x.h | |||
@@ -1,5 +1,5 @@ | |||
1 | #ifndef _ASM_POWERPC_PTE_40x_H | 1 | #ifndef _ASM_POWERPC_NOHASH_32_PTE_40x_H |
2 | #define _ASM_POWERPC_PTE_40x_H | 2 | #define _ASM_POWERPC_NOHASH_32_PTE_40x_H |
3 | #ifdef __KERNEL__ | 3 | #ifdef __KERNEL__ |
4 | 4 | ||
5 | /* | 5 | /* |
@@ -61,4 +61,4 @@ | |||
61 | #define PTE_ATOMIC_UPDATES 1 | 61 | #define PTE_ATOMIC_UPDATES 1 |
62 | 62 | ||
63 | #endif /* __KERNEL__ */ | 63 | #endif /* __KERNEL__ */ |
64 | #endif /* _ASM_POWERPC_PTE_40x_H */ | 64 | #endif /* _ASM_POWERPC_NOHASH_32_PTE_40x_H */ |
diff --git a/arch/powerpc/include/asm/pte-44x.h b/arch/powerpc/include/asm/nohash/32/pte-44x.h index 36f75fab23f5..fdab41c654ef 100644 --- a/arch/powerpc/include/asm/pte-44x.h +++ b/arch/powerpc/include/asm/nohash/32/pte-44x.h | |||
@@ -1,5 +1,5 @@ | |||
1 | #ifndef _ASM_POWERPC_PTE_44x_H | 1 | #ifndef _ASM_POWERPC_NOHASH_32_PTE_44x_H |
2 | #define _ASM_POWERPC_PTE_44x_H | 2 | #define _ASM_POWERPC_NOHASH_32_PTE_44x_H |
3 | #ifdef __KERNEL__ | 3 | #ifdef __KERNEL__ |
4 | 4 | ||
5 | /* | 5 | /* |
@@ -94,4 +94,4 @@ | |||
94 | 94 | ||
95 | 95 | ||
96 | #endif /* __KERNEL__ */ | 96 | #endif /* __KERNEL__ */ |
97 | #endif /* _ASM_POWERPC_PTE_44x_H */ | 97 | #endif /* _ASM_POWERPC_NOHASH_32_PTE_44x_H */ |
diff --git a/arch/powerpc/include/asm/pte-8xx.h b/arch/powerpc/include/asm/nohash/32/pte-8xx.h index a0e2ba960976..3742b1919661 100644 --- a/arch/powerpc/include/asm/pte-8xx.h +++ b/arch/powerpc/include/asm/nohash/32/pte-8xx.h | |||
@@ -1,5 +1,5 @@ | |||
1 | #ifndef _ASM_POWERPC_PTE_8xx_H | 1 | #ifndef _ASM_POWERPC_NOHASH_32_PTE_8xx_H |
2 | #define _ASM_POWERPC_PTE_8xx_H | 2 | #define _ASM_POWERPC_NOHASH_32_PTE_8xx_H |
3 | #ifdef __KERNEL__ | 3 | #ifdef __KERNEL__ |
4 | 4 | ||
5 | /* | 5 | /* |
@@ -62,4 +62,4 @@ | |||
62 | _PAGE_HWWRITE | _PAGE_EXEC) | 62 | _PAGE_HWWRITE | _PAGE_EXEC) |
63 | 63 | ||
64 | #endif /* __KERNEL__ */ | 64 | #endif /* __KERNEL__ */ |
65 | #endif /* _ASM_POWERPC_PTE_8xx_H */ | 65 | #endif /* _ASM_POWERPC_NOHASH_32_PTE_8xx_H */ |
diff --git a/arch/powerpc/include/asm/pte-fsl-booke.h b/arch/powerpc/include/asm/nohash/32/pte-fsl-booke.h index 9f5c3d04a1a3..5422d00c6145 100644 --- a/arch/powerpc/include/asm/pte-fsl-booke.h +++ b/arch/powerpc/include/asm/nohash/32/pte-fsl-booke.h | |||
@@ -1,5 +1,5 @@ | |||
1 | #ifndef _ASM_POWERPC_PTE_FSL_BOOKE_H | 1 | #ifndef _ASM_POWERPC_NOHASH_32_PTE_FSL_BOOKE_H |
2 | #define _ASM_POWERPC_PTE_FSL_BOOKE_H | 2 | #define _ASM_POWERPC_NOHASH_32_PTE_FSL_BOOKE_H |
3 | #ifdef __KERNEL__ | 3 | #ifdef __KERNEL__ |
4 | 4 | ||
5 | /* PTE bit definitions for Freescale BookE SW loaded TLB MMU based | 5 | /* PTE bit definitions for Freescale BookE SW loaded TLB MMU based |
@@ -37,4 +37,4 @@ | |||
37 | #define PTE_WIMGE_SHIFT (6) | 37 | #define PTE_WIMGE_SHIFT (6) |
38 | 38 | ||
39 | #endif /* __KERNEL__ */ | 39 | #endif /* __KERNEL__ */ |
40 | #endif /* _ASM_POWERPC_PTE_FSL_BOOKE_H */ | 40 | #endif /* _ASM_POWERPC_NOHASH_32_PTE_FSL_BOOKE_H */ |
diff --git a/arch/powerpc/include/asm/pgtable-ppc64-4k.h b/arch/powerpc/include/asm/nohash/64/pgtable-4k.h index 132ee1d482c2..fc7d51753f81 100644 --- a/arch/powerpc/include/asm/pgtable-ppc64-4k.h +++ b/arch/powerpc/include/asm/nohash/64/pgtable-4k.h | |||
@@ -1,5 +1,5 @@ | |||
1 | #ifndef _ASM_POWERPC_PGTABLE_PPC64_4K_H | 1 | #ifndef _ASM_POWERPC_NOHASH_64_PGTABLE_4K_H |
2 | #define _ASM_POWERPC_PGTABLE_PPC64_4K_H | 2 | #define _ASM_POWERPC_NOHASH_64_PGTABLE_4K_H |
3 | /* | 3 | /* |
4 | * Entries per page directory level. The PTE level must use a 64b record | 4 | * Entries per page directory level. The PTE level must use a 64b record |
5 | * for each page table entry. The PMD and PGD level use a 32b record for | 5 | * for each page table entry. The PMD and PGD level use a 32b record for |
@@ -55,11 +55,15 @@ | |||
55 | #define pgd_none(pgd) (!pgd_val(pgd)) | 55 | #define pgd_none(pgd) (!pgd_val(pgd)) |
56 | #define pgd_bad(pgd) (pgd_val(pgd) == 0) | 56 | #define pgd_bad(pgd) (pgd_val(pgd) == 0) |
57 | #define pgd_present(pgd) (pgd_val(pgd) != 0) | 57 | #define pgd_present(pgd) (pgd_val(pgd) != 0) |
58 | #define pgd_clear(pgdp) (pgd_val(*(pgdp)) = 0) | ||
59 | #define pgd_page_vaddr(pgd) (pgd_val(pgd) & ~PGD_MASKED_BITS) | 58 | #define pgd_page_vaddr(pgd) (pgd_val(pgd) & ~PGD_MASKED_BITS) |
60 | 59 | ||
61 | #ifndef __ASSEMBLY__ | 60 | #ifndef __ASSEMBLY__ |
62 | 61 | ||
62 | static inline void pgd_clear(pgd_t *pgdp) | ||
63 | { | ||
64 | *pgdp = __pgd(0); | ||
65 | } | ||
66 | |||
63 | static inline pte_t pgd_pte(pgd_t pgd) | 67 | static inline pte_t pgd_pte(pgd_t pgd) |
64 | { | 68 | { |
65 | return __pte(pgd_val(pgd)); | 69 | return __pte(pgd_val(pgd)); |
@@ -85,4 +89,4 @@ extern struct page *pgd_page(pgd_t pgd); | |||
85 | #define remap_4k_pfn(vma, addr, pfn, prot) \ | 89 | #define remap_4k_pfn(vma, addr, pfn, prot) \ |
86 | remap_pfn_range((vma), (addr), (pfn), PAGE_SIZE, (prot)) | 90 | remap_pfn_range((vma), (addr), (pfn), PAGE_SIZE, (prot)) |
87 | 91 | ||
88 | #endif /* _ASM_POWERPC_PGTABLE_PPC64_4K_H */ | 92 | #endif /* _ _ASM_POWERPC_NOHASH_64_PGTABLE_4K_H */ |
diff --git a/arch/powerpc/include/asm/pgtable-ppc64-64k.h b/arch/powerpc/include/asm/nohash/64/pgtable-64k.h index 1de35bbd02a6..570fb30be21c 100644 --- a/arch/powerpc/include/asm/pgtable-ppc64-64k.h +++ b/arch/powerpc/include/asm/nohash/64/pgtable-64k.h | |||
@@ -1,5 +1,5 @@ | |||
1 | #ifndef _ASM_POWERPC_PGTABLE_PPC64_64K_H | 1 | #ifndef _ASM_POWERPC_NOHASH_64_PGTABLE_64K_H |
2 | #define _ASM_POWERPC_PGTABLE_PPC64_64K_H | 2 | #define _ASM_POWERPC_NOHASH_64_PGTABLE_64K_H |
3 | 3 | ||
4 | #include <asm-generic/pgtable-nopud.h> | 4 | #include <asm-generic/pgtable-nopud.h> |
5 | 5 | ||
@@ -9,8 +9,19 @@ | |||
9 | #define PUD_INDEX_SIZE 0 | 9 | #define PUD_INDEX_SIZE 0 |
10 | #define PGD_INDEX_SIZE 12 | 10 | #define PGD_INDEX_SIZE 12 |
11 | 11 | ||
12 | /* | ||
13 | * we support 32 fragments per PTE page of 64K size | ||
14 | */ | ||
15 | #define PTE_FRAG_NR 32 | ||
16 | /* | ||
17 | * We use a 2K PTE page fragment and another 2K for storing | ||
18 | * real_pte_t hash index | ||
19 | */ | ||
20 | #define PTE_FRAG_SIZE_SHIFT 11 | ||
21 | #define PTE_FRAG_SIZE (1UL << PTE_FRAG_SIZE_SHIFT) | ||
22 | |||
12 | #ifndef __ASSEMBLY__ | 23 | #ifndef __ASSEMBLY__ |
13 | #define PTE_TABLE_SIZE (sizeof(real_pte_t) << PTE_INDEX_SIZE) | 24 | #define PTE_TABLE_SIZE PTE_FRAG_SIZE |
14 | #define PMD_TABLE_SIZE (sizeof(pmd_t) << PMD_INDEX_SIZE) | 25 | #define PMD_TABLE_SIZE (sizeof(pmd_t) << PMD_INDEX_SIZE) |
15 | #define PGD_TABLE_SIZE (sizeof(pgd_t) << PGD_INDEX_SIZE) | 26 | #define PGD_TABLE_SIZE (sizeof(pgd_t) << PGD_INDEX_SIZE) |
16 | #endif /* __ASSEMBLY__ */ | 27 | #endif /* __ASSEMBLY__ */ |
@@ -32,13 +43,15 @@ | |||
32 | #define PGDIR_SIZE (1UL << PGDIR_SHIFT) | 43 | #define PGDIR_SIZE (1UL << PGDIR_SHIFT) |
33 | #define PGDIR_MASK (~(PGDIR_SIZE-1)) | 44 | #define PGDIR_MASK (~(PGDIR_SIZE-1)) |
34 | 45 | ||
35 | /* Bits to mask out from a PMD to get to the PTE page */ | 46 | /* |
36 | /* PMDs point to PTE table fragments which are 4K aligned. */ | 47 | * Bits to mask out from a PMD to get to the PTE page |
37 | #define PMD_MASKED_BITS 0xfff | 48 | * PMDs point to PTE table fragments which are PTE_FRAG_SIZE aligned. |
49 | */ | ||
50 | #define PMD_MASKED_BITS (PTE_FRAG_SIZE - 1) | ||
38 | /* Bits to mask out from a PGD/PUD to get to the PMD page */ | 51 | /* Bits to mask out from a PGD/PUD to get to the PMD page */ |
39 | #define PUD_MASKED_BITS 0x1ff | 52 | #define PUD_MASKED_BITS 0x1ff |
40 | 53 | ||
41 | #define pgd_pte(pgd) (pud_pte(((pud_t){ pgd }))) | 54 | #define pgd_pte(pgd) (pud_pte(((pud_t){ pgd }))) |
42 | #define pte_pgd(pte) ((pgd_t)pte_pud(pte)) | 55 | #define pte_pgd(pte) ((pgd_t)pte_pud(pte)) |
43 | 56 | ||
44 | #endif /* _ASM_POWERPC_PGTABLE_PPC64_64K_H */ | 57 | #endif /* _ASM_POWERPC_NOHASH_64_PGTABLE_64K_H */ |
diff --git a/arch/powerpc/include/asm/pgtable-ppc64.h b/arch/powerpc/include/asm/nohash/64/pgtable.h index 3245f2d96d4f..b9f734dd5b81 100644 --- a/arch/powerpc/include/asm/pgtable-ppc64.h +++ b/arch/powerpc/include/asm/nohash/64/pgtable.h | |||
@@ -1,14 +1,14 @@ | |||
1 | #ifndef _ASM_POWERPC_PGTABLE_PPC64_H_ | 1 | #ifndef _ASM_POWERPC_NOHASH_64_PGTABLE_H |
2 | #define _ASM_POWERPC_PGTABLE_PPC64_H_ | 2 | #define _ASM_POWERPC_NOHASH_64_PGTABLE_H |
3 | /* | 3 | /* |
4 | * This file contains the functions and defines necessary to modify and use | 4 | * This file contains the functions and defines necessary to modify and use |
5 | * the ppc64 hashed page table. | 5 | * the ppc64 hashed page table. |
6 | */ | 6 | */ |
7 | 7 | ||
8 | #ifdef CONFIG_PPC_64K_PAGES | 8 | #ifdef CONFIG_PPC_64K_PAGES |
9 | #include <asm/pgtable-ppc64-64k.h> | 9 | #include <asm/nohash/64/pgtable-64k.h> |
10 | #else | 10 | #else |
11 | #include <asm/pgtable-ppc64-4k.h> | 11 | #include <asm/nohash/64/pgtable-4k.h> |
12 | #endif | 12 | #endif |
13 | #include <asm/barrier.h> | 13 | #include <asm/barrier.h> |
14 | 14 | ||
@@ -18,7 +18,7 @@ | |||
18 | * Size of EA range mapped by our pagetables. | 18 | * Size of EA range mapped by our pagetables. |
19 | */ | 19 | */ |
20 | #define PGTABLE_EADDR_SIZE (PTE_INDEX_SIZE + PMD_INDEX_SIZE + \ | 20 | #define PGTABLE_EADDR_SIZE (PTE_INDEX_SIZE + PMD_INDEX_SIZE + \ |
21 | PUD_INDEX_SIZE + PGD_INDEX_SIZE + PAGE_SHIFT) | 21 | PUD_INDEX_SIZE + PGD_INDEX_SIZE + PAGE_SHIFT) |
22 | #define PGTABLE_RANGE (ASM_CONST(1) << PGTABLE_EADDR_SIZE) | 22 | #define PGTABLE_RANGE (ASM_CONST(1) << PGTABLE_EADDR_SIZE) |
23 | 23 | ||
24 | #ifdef CONFIG_TRANSPARENT_HUGEPAGE | 24 | #ifdef CONFIG_TRANSPARENT_HUGEPAGE |
@@ -97,11 +97,7 @@ | |||
97 | /* | 97 | /* |
98 | * Include the PTE bits definitions | 98 | * Include the PTE bits definitions |
99 | */ | 99 | */ |
100 | #ifdef CONFIG_PPC_BOOK3S | 100 | #include <asm/nohash/pte-book3e.h> |
101 | #include <asm/pte-hash64.h> | ||
102 | #else | ||
103 | #include <asm/pte-book3e.h> | ||
104 | #endif | ||
105 | #include <asm/pte-common.h> | 101 | #include <asm/pte-common.h> |
106 | 102 | ||
107 | #ifdef CONFIG_PPC_MM_SLICES | 103 | #ifdef CONFIG_PPC_MM_SLICES |
@@ -110,59 +106,47 @@ | |||
110 | #endif /* CONFIG_PPC_MM_SLICES */ | 106 | #endif /* CONFIG_PPC_MM_SLICES */ |
111 | 107 | ||
112 | #ifndef __ASSEMBLY__ | 108 | #ifndef __ASSEMBLY__ |
113 | |||
114 | /* | ||
115 | * This is the default implementation of various PTE accessors, it's | ||
116 | * used in all cases except Book3S with 64K pages where we have a | ||
117 | * concept of sub-pages | ||
118 | */ | ||
119 | #ifndef __real_pte | ||
120 | |||
121 | #ifdef CONFIG_STRICT_MM_TYPECHECKS | ||
122 | #define __real_pte(e,p) ((real_pte_t){(e)}) | ||
123 | #define __rpte_to_pte(r) ((r).pte) | ||
124 | #else | ||
125 | #define __real_pte(e,p) (e) | ||
126 | #define __rpte_to_pte(r) (__pte(r)) | ||
127 | #endif | ||
128 | #define __rpte_to_hidx(r,index) (pte_val(__rpte_to_pte(r)) >> 12) | ||
129 | |||
130 | #define pte_iterate_hashed_subpages(rpte, psize, va, index, shift) \ | ||
131 | do { \ | ||
132 | index = 0; \ | ||
133 | shift = mmu_psize_defs[psize].shift; \ | ||
134 | |||
135 | #define pte_iterate_hashed_end() } while(0) | ||
136 | |||
137 | /* | ||
138 | * We expect this to be called only for user addresses or kernel virtual | ||
139 | * addresses other than the linear mapping. | ||
140 | */ | ||
141 | #define pte_pagesize_index(mm, addr, pte) MMU_PAGE_4K | ||
142 | |||
143 | #endif /* __real_pte */ | ||
144 | |||
145 | |||
146 | /* pte_clear moved to later in this file */ | 109 | /* pte_clear moved to later in this file */ |
147 | 110 | ||
148 | #define PMD_BAD_BITS (PTE_TABLE_SIZE-1) | 111 | #define PMD_BAD_BITS (PTE_TABLE_SIZE-1) |
149 | #define PUD_BAD_BITS (PMD_TABLE_SIZE-1) | 112 | #define PUD_BAD_BITS (PMD_TABLE_SIZE-1) |
150 | 113 | ||
151 | #define pmd_set(pmdp, pmdval) (pmd_val(*(pmdp)) = (pmdval)) | 114 | static inline void pmd_set(pmd_t *pmdp, unsigned long val) |
115 | { | ||
116 | *pmdp = __pmd(val); | ||
117 | } | ||
118 | |||
119 | static inline void pmd_clear(pmd_t *pmdp) | ||
120 | { | ||
121 | *pmdp = __pmd(0); | ||
122 | } | ||
123 | |||
124 | static inline pte_t pmd_pte(pmd_t pmd) | ||
125 | { | ||
126 | return __pte(pmd_val(pmd)); | ||
127 | } | ||
128 | |||
152 | #define pmd_none(pmd) (!pmd_val(pmd)) | 129 | #define pmd_none(pmd) (!pmd_val(pmd)) |
153 | #define pmd_bad(pmd) (!is_kernel_addr(pmd_val(pmd)) \ | 130 | #define pmd_bad(pmd) (!is_kernel_addr(pmd_val(pmd)) \ |
154 | || (pmd_val(pmd) & PMD_BAD_BITS)) | 131 | || (pmd_val(pmd) & PMD_BAD_BITS)) |
155 | #define pmd_present(pmd) (!pmd_none(pmd)) | 132 | #define pmd_present(pmd) (!pmd_none(pmd)) |
156 | #define pmd_clear(pmdp) (pmd_val(*(pmdp)) = 0) | ||
157 | #define pmd_page_vaddr(pmd) (pmd_val(pmd) & ~PMD_MASKED_BITS) | 133 | #define pmd_page_vaddr(pmd) (pmd_val(pmd) & ~PMD_MASKED_BITS) |
158 | extern struct page *pmd_page(pmd_t pmd); | 134 | extern struct page *pmd_page(pmd_t pmd); |
159 | 135 | ||
160 | #define pud_set(pudp, pudval) (pud_val(*(pudp)) = (pudval)) | 136 | static inline void pud_set(pud_t *pudp, unsigned long val) |
137 | { | ||
138 | *pudp = __pud(val); | ||
139 | } | ||
140 | |||
141 | static inline void pud_clear(pud_t *pudp) | ||
142 | { | ||
143 | *pudp = __pud(0); | ||
144 | } | ||
145 | |||
161 | #define pud_none(pud) (!pud_val(pud)) | 146 | #define pud_none(pud) (!pud_val(pud)) |
162 | #define pud_bad(pud) (!is_kernel_addr(pud_val(pud)) \ | 147 | #define pud_bad(pud) (!is_kernel_addr(pud_val(pud)) \ |
163 | || (pud_val(pud) & PUD_BAD_BITS)) | 148 | || (pud_val(pud) & PUD_BAD_BITS)) |
164 | #define pud_present(pud) (pud_val(pud) != 0) | 149 | #define pud_present(pud) (pud_val(pud) != 0) |
165 | #define pud_clear(pudp) (pud_val(*(pudp)) = 0) | ||
166 | #define pud_page_vaddr(pud) (pud_val(pud) & ~PUD_MASKED_BITS) | 150 | #define pud_page_vaddr(pud) (pud_val(pud) & ~PUD_MASKED_BITS) |
167 | 151 | ||
168 | extern struct page *pud_page(pud_t pud); | 152 | extern struct page *pud_page(pud_t pud); |
@@ -177,9 +161,13 @@ static inline pud_t pte_pud(pte_t pte) | |||
177 | return __pud(pte_val(pte)); | 161 | return __pud(pte_val(pte)); |
178 | } | 162 | } |
179 | #define pud_write(pud) pte_write(pud_pte(pud)) | 163 | #define pud_write(pud) pte_write(pud_pte(pud)) |
180 | #define pgd_set(pgdp, pudp) ({pgd_val(*(pgdp)) = (unsigned long)(pudp);}) | ||
181 | #define pgd_write(pgd) pte_write(pgd_pte(pgd)) | 164 | #define pgd_write(pgd) pte_write(pgd_pte(pgd)) |
182 | 165 | ||
166 | static inline void pgd_set(pgd_t *pgdp, unsigned long val) | ||
167 | { | ||
168 | *pgdp = __pgd(val); | ||
169 | } | ||
170 | |||
183 | /* | 171 | /* |
184 | * Find an entry in a page-table-directory. We combine the address region | 172 | * Find an entry in a page-table-directory. We combine the address region |
185 | * (the high order N bits) and the pgd portion of the address. | 173 | * (the high order N bits) and the pgd portion of the address. |
@@ -373,254 +361,4 @@ void pgtable_cache_add(unsigned shift, void (*ctor)(void *)); | |||
373 | void pgtable_cache_init(void); | 361 | void pgtable_cache_init(void); |
374 | #endif /* __ASSEMBLY__ */ | 362 | #endif /* __ASSEMBLY__ */ |
375 | 363 | ||
376 | /* | 364 | #endif /* _ASM_POWERPC_NOHASH_64_PGTABLE_H */ |
377 | * THP pages can't be special. So use the _PAGE_SPECIAL | ||
378 | */ | ||
379 | #define _PAGE_SPLITTING _PAGE_SPECIAL | ||
380 | |||
381 | /* | ||
382 | * We need to differentiate between explicit huge page and THP huge | ||
383 | * page, since THP huge page also need to track real subpage details | ||
384 | */ | ||
385 | #define _PAGE_THP_HUGE _PAGE_4K_PFN | ||
386 | |||
387 | /* | ||
388 | * set of bits not changed in pmd_modify. | ||
389 | */ | ||
390 | #define _HPAGE_CHG_MASK (PTE_RPN_MASK | _PAGE_HPTEFLAGS | \ | ||
391 | _PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_SPLITTING | \ | ||
392 | _PAGE_THP_HUGE) | ||
393 | |||
394 | #ifndef __ASSEMBLY__ | ||
395 | /* | ||
396 | * The linux hugepage PMD now include the pmd entries followed by the address | ||
397 | * to the stashed pgtable_t. The stashed pgtable_t contains the hpte bits. | ||
398 | * [ 1 bit secondary | 3 bit hidx | 1 bit valid | 000]. We use one byte per | ||
399 | * each HPTE entry. With 16MB hugepage and 64K HPTE we need 256 entries and | ||
400 | * with 4K HPTE we need 4096 entries. Both will fit in a 4K pgtable_t. | ||
401 | * | ||
402 | * The last three bits are intentionally left to zero. This memory location | ||
403 | * are also used as normal page PTE pointers. So if we have any pointers | ||
404 | * left around while we collapse a hugepage, we need to make sure | ||
405 | * _PAGE_PRESENT bit of that is zero when we look at them | ||
406 | */ | ||
407 | static inline unsigned int hpte_valid(unsigned char *hpte_slot_array, int index) | ||
408 | { | ||
409 | return (hpte_slot_array[index] >> 3) & 0x1; | ||
410 | } | ||
411 | |||
412 | static inline unsigned int hpte_hash_index(unsigned char *hpte_slot_array, | ||
413 | int index) | ||
414 | { | ||
415 | return hpte_slot_array[index] >> 4; | ||
416 | } | ||
417 | |||
418 | static inline void mark_hpte_slot_valid(unsigned char *hpte_slot_array, | ||
419 | unsigned int index, unsigned int hidx) | ||
420 | { | ||
421 | hpte_slot_array[index] = hidx << 4 | 0x1 << 3; | ||
422 | } | ||
423 | |||
424 | struct page *realmode_pfn_to_page(unsigned long pfn); | ||
425 | |||
426 | static inline char *get_hpte_slot_array(pmd_t *pmdp) | ||
427 | { | ||
428 | /* | ||
429 | * The hpte hindex is stored in the pgtable whose address is in the | ||
430 | * second half of the PMD | ||
431 | * | ||
432 | * Order this load with the test for pmd_trans_huge in the caller | ||
433 | */ | ||
434 | smp_rmb(); | ||
435 | return *(char **)(pmdp + PTRS_PER_PMD); | ||
436 | |||
437 | |||
438 | } | ||
439 | |||
440 | #ifdef CONFIG_TRANSPARENT_HUGEPAGE | ||
441 | extern void hpte_do_hugepage_flush(struct mm_struct *mm, unsigned long addr, | ||
442 | pmd_t *pmdp, unsigned long old_pmd); | ||
443 | extern pmd_t pfn_pmd(unsigned long pfn, pgprot_t pgprot); | ||
444 | extern pmd_t mk_pmd(struct page *page, pgprot_t pgprot); | ||
445 | extern pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot); | ||
446 | extern void set_pmd_at(struct mm_struct *mm, unsigned long addr, | ||
447 | pmd_t *pmdp, pmd_t pmd); | ||
448 | extern void update_mmu_cache_pmd(struct vm_area_struct *vma, unsigned long addr, | ||
449 | pmd_t *pmd); | ||
450 | /* | ||
451 | * | ||
452 | * For core kernel code by design pmd_trans_huge is never run on any hugetlbfs | ||
453 | * page. The hugetlbfs page table walking and mangling paths are totally | ||
454 | * separated form the core VM paths and they're differentiated by | ||
455 | * VM_HUGETLB being set on vm_flags well before any pmd_trans_huge could run. | ||
456 | * | ||
457 | * pmd_trans_huge() is defined as false at build time if | ||
458 | * CONFIG_TRANSPARENT_HUGEPAGE=n to optimize away code blocks at build | ||
459 | * time in such case. | ||
460 | * | ||
461 | * For ppc64 we need to differntiate from explicit hugepages from THP, because | ||
462 | * for THP we also track the subpage details at the pmd level. We don't do | ||
463 | * that for explicit huge pages. | ||
464 | * | ||
465 | */ | ||
466 | static inline int pmd_trans_huge(pmd_t pmd) | ||
467 | { | ||
468 | /* | ||
469 | * leaf pte for huge page, bottom two bits != 00 | ||
470 | */ | ||
471 | return (pmd_val(pmd) & 0x3) && (pmd_val(pmd) & _PAGE_THP_HUGE); | ||
472 | } | ||
473 | |||
474 | static inline int pmd_trans_splitting(pmd_t pmd) | ||
475 | { | ||
476 | if (pmd_trans_huge(pmd)) | ||
477 | return pmd_val(pmd) & _PAGE_SPLITTING; | ||
478 | return 0; | ||
479 | } | ||
480 | |||
481 | extern int has_transparent_hugepage(void); | ||
482 | #else | ||
483 | static inline void hpte_do_hugepage_flush(struct mm_struct *mm, | ||
484 | unsigned long addr, pmd_t *pmdp, | ||
485 | unsigned long old_pmd) | ||
486 | { | ||
487 | |||
488 | WARN(1, "%s called with THP disabled\n", __func__); | ||
489 | } | ||
490 | #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ | ||
491 | |||
492 | static inline int pmd_large(pmd_t pmd) | ||
493 | { | ||
494 | /* | ||
495 | * leaf pte for huge page, bottom two bits != 00 | ||
496 | */ | ||
497 | return ((pmd_val(pmd) & 0x3) != 0x0); | ||
498 | } | ||
499 | |||
500 | static inline pte_t pmd_pte(pmd_t pmd) | ||
501 | { | ||
502 | return __pte(pmd_val(pmd)); | ||
503 | } | ||
504 | |||
505 | static inline pmd_t pte_pmd(pte_t pte) | ||
506 | { | ||
507 | return __pmd(pte_val(pte)); | ||
508 | } | ||
509 | |||
510 | static inline pte_t *pmdp_ptep(pmd_t *pmd) | ||
511 | { | ||
512 | return (pte_t *)pmd; | ||
513 | } | ||
514 | |||
515 | #define pmd_pfn(pmd) pte_pfn(pmd_pte(pmd)) | ||
516 | #define pmd_dirty(pmd) pte_dirty(pmd_pte(pmd)) | ||
517 | #define pmd_young(pmd) pte_young(pmd_pte(pmd)) | ||
518 | #define pmd_mkold(pmd) pte_pmd(pte_mkold(pmd_pte(pmd))) | ||
519 | #define pmd_wrprotect(pmd) pte_pmd(pte_wrprotect(pmd_pte(pmd))) | ||
520 | #define pmd_mkdirty(pmd) pte_pmd(pte_mkdirty(pmd_pte(pmd))) | ||
521 | #define pmd_mkyoung(pmd) pte_pmd(pte_mkyoung(pmd_pte(pmd))) | ||
522 | #define pmd_mkwrite(pmd) pte_pmd(pte_mkwrite(pmd_pte(pmd))) | ||
523 | |||
524 | #define __HAVE_ARCH_PMD_WRITE | ||
525 | #define pmd_write(pmd) pte_write(pmd_pte(pmd)) | ||
526 | |||
527 | static inline pmd_t pmd_mkhuge(pmd_t pmd) | ||
528 | { | ||
529 | /* Do nothing, mk_pmd() does this part. */ | ||
530 | return pmd; | ||
531 | } | ||
532 | |||
533 | static inline pmd_t pmd_mknotpresent(pmd_t pmd) | ||
534 | { | ||
535 | pmd_val(pmd) &= ~_PAGE_PRESENT; | ||
536 | return pmd; | ||
537 | } | ||
538 | |||
539 | static inline pmd_t pmd_mksplitting(pmd_t pmd) | ||
540 | { | ||
541 | pmd_val(pmd) |= _PAGE_SPLITTING; | ||
542 | return pmd; | ||
543 | } | ||
544 | |||
545 | #define __HAVE_ARCH_PMD_SAME | ||
546 | static inline int pmd_same(pmd_t pmd_a, pmd_t pmd_b) | ||
547 | { | ||
548 | return (((pmd_val(pmd_a) ^ pmd_val(pmd_b)) & ~_PAGE_HPTEFLAGS) == 0); | ||
549 | } | ||
550 | |||
551 | #define __HAVE_ARCH_PMDP_SET_ACCESS_FLAGS | ||
552 | extern int pmdp_set_access_flags(struct vm_area_struct *vma, | ||
553 | unsigned long address, pmd_t *pmdp, | ||
554 | pmd_t entry, int dirty); | ||
555 | |||
556 | extern unsigned long pmd_hugepage_update(struct mm_struct *mm, | ||
557 | unsigned long addr, | ||
558 | pmd_t *pmdp, | ||
559 | unsigned long clr, | ||
560 | unsigned long set); | ||
561 | |||
562 | static inline int __pmdp_test_and_clear_young(struct mm_struct *mm, | ||
563 | unsigned long addr, pmd_t *pmdp) | ||
564 | { | ||
565 | unsigned long old; | ||
566 | |||
567 | if ((pmd_val(*pmdp) & (_PAGE_ACCESSED | _PAGE_HASHPTE)) == 0) | ||
568 | return 0; | ||
569 | old = pmd_hugepage_update(mm, addr, pmdp, _PAGE_ACCESSED, 0); | ||
570 | return ((old & _PAGE_ACCESSED) != 0); | ||
571 | } | ||
572 | |||
573 | #define __HAVE_ARCH_PMDP_TEST_AND_CLEAR_YOUNG | ||
574 | extern int pmdp_test_and_clear_young(struct vm_area_struct *vma, | ||
575 | unsigned long address, pmd_t *pmdp); | ||
576 | #define __HAVE_ARCH_PMDP_CLEAR_YOUNG_FLUSH | ||
577 | extern int pmdp_clear_flush_young(struct vm_area_struct *vma, | ||
578 | unsigned long address, pmd_t *pmdp); | ||
579 | |||
580 | #define __HAVE_ARCH_PMDP_HUGE_GET_AND_CLEAR | ||
581 | extern pmd_t pmdp_huge_get_and_clear(struct mm_struct *mm, | ||
582 | unsigned long addr, pmd_t *pmdp); | ||
583 | |||
584 | #define __HAVE_ARCH_PMDP_SET_WRPROTECT | ||
585 | static inline void pmdp_set_wrprotect(struct mm_struct *mm, unsigned long addr, | ||
586 | pmd_t *pmdp) | ||
587 | { | ||
588 | |||
589 | if ((pmd_val(*pmdp) & _PAGE_RW) == 0) | ||
590 | return; | ||
591 | |||
592 | pmd_hugepage_update(mm, addr, pmdp, _PAGE_RW, 0); | ||
593 | } | ||
594 | |||
595 | #define __HAVE_ARCH_PMDP_SPLITTING_FLUSH | ||
596 | extern void pmdp_splitting_flush(struct vm_area_struct *vma, | ||
597 | unsigned long address, pmd_t *pmdp); | ||
598 | |||
599 | extern pmd_t pmdp_collapse_flush(struct vm_area_struct *vma, | ||
600 | unsigned long address, pmd_t *pmdp); | ||
601 | #define pmdp_collapse_flush pmdp_collapse_flush | ||
602 | |||
603 | #define __HAVE_ARCH_PGTABLE_DEPOSIT | ||
604 | extern void pgtable_trans_huge_deposit(struct mm_struct *mm, pmd_t *pmdp, | ||
605 | pgtable_t pgtable); | ||
606 | #define __HAVE_ARCH_PGTABLE_WITHDRAW | ||
607 | extern pgtable_t pgtable_trans_huge_withdraw(struct mm_struct *mm, pmd_t *pmdp); | ||
608 | |||
609 | #define __HAVE_ARCH_PMDP_INVALIDATE | ||
610 | extern void pmdp_invalidate(struct vm_area_struct *vma, unsigned long address, | ||
611 | pmd_t *pmdp); | ||
612 | |||
613 | #define pmd_move_must_withdraw pmd_move_must_withdraw | ||
614 | struct spinlock; | ||
615 | static inline int pmd_move_must_withdraw(struct spinlock *new_pmd_ptl, | ||
616 | struct spinlock *old_pmd_ptl) | ||
617 | { | ||
618 | /* | ||
619 | * Archs like ppc64 use pgtable to store per pmd | ||
620 | * specific information. So when we switch the pmd, | ||
621 | * we should also withdraw and deposit the pgtable | ||
622 | */ | ||
623 | return true; | ||
624 | } | ||
625 | #endif /* __ASSEMBLY__ */ | ||
626 | #endif /* _ASM_POWERPC_PGTABLE_PPC64_H_ */ | ||
diff --git a/arch/powerpc/include/asm/nohash/pgtable.h b/arch/powerpc/include/asm/nohash/pgtable.h new file mode 100644 index 000000000000..1263c22d60d8 --- /dev/null +++ b/arch/powerpc/include/asm/nohash/pgtable.h | |||
@@ -0,0 +1,252 @@ | |||
1 | #ifndef _ASM_POWERPC_NOHASH_PGTABLE_H | ||
2 | #define _ASM_POWERPC_NOHASH_PGTABLE_H | ||
3 | |||
4 | #if defined(CONFIG_PPC64) | ||
5 | #include <asm/nohash/64/pgtable.h> | ||
6 | #else | ||
7 | #include <asm/nohash/32/pgtable.h> | ||
8 | #endif | ||
9 | |||
10 | #ifndef __ASSEMBLY__ | ||
11 | |||
12 | /* Generic accessors to PTE bits */ | ||
13 | static inline int pte_write(pte_t pte) | ||
14 | { | ||
15 | return (pte_val(pte) & (_PAGE_RW | _PAGE_RO)) != _PAGE_RO; | ||
16 | } | ||
17 | static inline int pte_dirty(pte_t pte) { return pte_val(pte) & _PAGE_DIRTY; } | ||
18 | static inline int pte_young(pte_t pte) { return pte_val(pte) & _PAGE_ACCESSED; } | ||
19 | static inline int pte_special(pte_t pte) { return pte_val(pte) & _PAGE_SPECIAL; } | ||
20 | static inline int pte_none(pte_t pte) { return (pte_val(pte) & ~_PTE_NONE_MASK) == 0; } | ||
21 | static inline pgprot_t pte_pgprot(pte_t pte) { return __pgprot(pte_val(pte) & PAGE_PROT_BITS); } | ||
22 | |||
23 | #ifdef CONFIG_NUMA_BALANCING | ||
24 | /* | ||
25 | * These work without NUMA balancing but the kernel does not care. See the | ||
26 | * comment in include/asm-generic/pgtable.h . On powerpc, this will only | ||
27 | * work for user pages and always return true for kernel pages. | ||
28 | */ | ||
29 | static inline int pte_protnone(pte_t pte) | ||
30 | { | ||
31 | return (pte_val(pte) & | ||
32 | (_PAGE_PRESENT | _PAGE_USER)) == _PAGE_PRESENT; | ||
33 | } | ||
34 | |||
35 | static inline int pmd_protnone(pmd_t pmd) | ||
36 | { | ||
37 | return pte_protnone(pmd_pte(pmd)); | ||
38 | } | ||
39 | #endif /* CONFIG_NUMA_BALANCING */ | ||
40 | |||
41 | static inline int pte_present(pte_t pte) | ||
42 | { | ||
43 | return pte_val(pte) & _PAGE_PRESENT; | ||
44 | } | ||
45 | |||
46 | /* Conversion functions: convert a page and protection to a page entry, | ||
47 | * and a page entry and page directory to the page they refer to. | ||
48 | * | ||
49 | * Even if PTEs can be unsigned long long, a PFN is always an unsigned | ||
50 | * long for now. | ||
51 | */ | ||
52 | static inline pte_t pfn_pte(unsigned long pfn, pgprot_t pgprot) { | ||
53 | return __pte(((pte_basic_t)(pfn) << PTE_RPN_SHIFT) | | ||
54 | pgprot_val(pgprot)); } | ||
55 | static inline unsigned long pte_pfn(pte_t pte) { | ||
56 | return pte_val(pte) >> PTE_RPN_SHIFT; } | ||
57 | |||
58 | /* Generic modifiers for PTE bits */ | ||
59 | static inline pte_t pte_wrprotect(pte_t pte) | ||
60 | { | ||
61 | pte_basic_t ptev; | ||
62 | |||
63 | ptev = pte_val(pte) & ~(_PAGE_RW | _PAGE_HWWRITE); | ||
64 | ptev |= _PAGE_RO; | ||
65 | return __pte(ptev); | ||
66 | } | ||
67 | |||
68 | static inline pte_t pte_mkclean(pte_t pte) | ||
69 | { | ||
70 | return __pte(pte_val(pte) & ~(_PAGE_DIRTY | _PAGE_HWWRITE)); | ||
71 | } | ||
72 | |||
73 | static inline pte_t pte_mkold(pte_t pte) | ||
74 | { | ||
75 | return __pte(pte_val(pte) & ~_PAGE_ACCESSED); | ||
76 | } | ||
77 | |||
78 | static inline pte_t pte_mkwrite(pte_t pte) | ||
79 | { | ||
80 | pte_basic_t ptev; | ||
81 | |||
82 | ptev = pte_val(pte) & ~_PAGE_RO; | ||
83 | ptev |= _PAGE_RW; | ||
84 | return __pte(ptev); | ||
85 | } | ||
86 | |||
87 | static inline pte_t pte_mkdirty(pte_t pte) | ||
88 | { | ||
89 | return __pte(pte_val(pte) | _PAGE_DIRTY); | ||
90 | } | ||
91 | |||
92 | static inline pte_t pte_mkyoung(pte_t pte) | ||
93 | { | ||
94 | return __pte(pte_val(pte) | _PAGE_ACCESSED); | ||
95 | } | ||
96 | |||
97 | static inline pte_t pte_mkspecial(pte_t pte) | ||
98 | { | ||
99 | return __pte(pte_val(pte) | _PAGE_SPECIAL); | ||
100 | } | ||
101 | |||
102 | static inline pte_t pte_mkhuge(pte_t pte) | ||
103 | { | ||
104 | return pte; | ||
105 | } | ||
106 | |||
107 | static inline pte_t pte_modify(pte_t pte, pgprot_t newprot) | ||
108 | { | ||
109 | return __pte((pte_val(pte) & _PAGE_CHG_MASK) | pgprot_val(newprot)); | ||
110 | } | ||
111 | |||
112 | /* Insert a PTE, top-level function is out of line. It uses an inline | ||
113 | * low level function in the respective pgtable-* files | ||
114 | */ | ||
115 | extern void set_pte_at(struct mm_struct *mm, unsigned long addr, pte_t *ptep, | ||
116 | pte_t pte); | ||
117 | |||
118 | /* This low level function performs the actual PTE insertion | ||
119 | * Setting the PTE depends on the MMU type and other factors. It's | ||
120 | * an horrible mess that I'm not going to try to clean up now but | ||
121 | * I'm keeping it in one place rather than spread around | ||
122 | */ | ||
123 | static inline void __set_pte_at(struct mm_struct *mm, unsigned long addr, | ||
124 | pte_t *ptep, pte_t pte, int percpu) | ||
125 | { | ||
126 | #if defined(CONFIG_PPC_STD_MMU_32) && defined(CONFIG_SMP) && !defined(CONFIG_PTE_64BIT) | ||
127 | /* First case is 32-bit Hash MMU in SMP mode with 32-bit PTEs. We use the | ||
128 | * helper pte_update() which does an atomic update. We need to do that | ||
129 | * because a concurrent invalidation can clear _PAGE_HASHPTE. If it's a | ||
130 | * per-CPU PTE such as a kmap_atomic, we do a simple update preserving | ||
131 | * the hash bits instead (ie, same as the non-SMP case) | ||
132 | */ | ||
133 | if (percpu) | ||
134 | *ptep = __pte((pte_val(*ptep) & _PAGE_HASHPTE) | ||
135 | | (pte_val(pte) & ~_PAGE_HASHPTE)); | ||
136 | else | ||
137 | pte_update(ptep, ~_PAGE_HASHPTE, pte_val(pte)); | ||
138 | |||
139 | #elif defined(CONFIG_PPC32) && defined(CONFIG_PTE_64BIT) | ||
140 | /* Second case is 32-bit with 64-bit PTE. In this case, we | ||
141 | * can just store as long as we do the two halves in the right order | ||
142 | * with a barrier in between. This is possible because we take care, | ||
143 | * in the hash code, to pre-invalidate if the PTE was already hashed, | ||
144 | * which synchronizes us with any concurrent invalidation. | ||
145 | * In the percpu case, we also fallback to the simple update preserving | ||
146 | * the hash bits | ||
147 | */ | ||
148 | if (percpu) { | ||
149 | *ptep = __pte((pte_val(*ptep) & _PAGE_HASHPTE) | ||
150 | | (pte_val(pte) & ~_PAGE_HASHPTE)); | ||
151 | return; | ||
152 | } | ||
153 | #if _PAGE_HASHPTE != 0 | ||
154 | if (pte_val(*ptep) & _PAGE_HASHPTE) | ||
155 | flush_hash_entry(mm, ptep, addr); | ||
156 | #endif | ||
157 | __asm__ __volatile__("\ | ||
158 | stw%U0%X0 %2,%0\n\ | ||
159 | eieio\n\ | ||
160 | stw%U0%X0 %L2,%1" | ||
161 | : "=m" (*ptep), "=m" (*((unsigned char *)ptep+4)) | ||
162 | : "r" (pte) : "memory"); | ||
163 | |||
164 | #elif defined(CONFIG_PPC_STD_MMU_32) | ||
165 | /* Third case is 32-bit hash table in UP mode, we need to preserve | ||
166 | * the _PAGE_HASHPTE bit since we may not have invalidated the previous | ||
167 | * translation in the hash yet (done in a subsequent flush_tlb_xxx()) | ||
168 | * and see we need to keep track that this PTE needs invalidating | ||
169 | */ | ||
170 | *ptep = __pte((pte_val(*ptep) & _PAGE_HASHPTE) | ||
171 | | (pte_val(pte) & ~_PAGE_HASHPTE)); | ||
172 | |||
173 | #else | ||
174 | /* Anything else just stores the PTE normally. That covers all 64-bit | ||
175 | * cases, and 32-bit non-hash with 32-bit PTEs. | ||
176 | */ | ||
177 | *ptep = pte; | ||
178 | |||
179 | #ifdef CONFIG_PPC_BOOK3E_64 | ||
180 | /* | ||
181 | * With hardware tablewalk, a sync is needed to ensure that | ||
182 | * subsequent accesses see the PTE we just wrote. Unlike userspace | ||
183 | * mappings, we can't tolerate spurious faults, so make sure | ||
184 | * the new PTE will be seen the first time. | ||
185 | */ | ||
186 | if (is_kernel_addr(addr)) | ||
187 | mb(); | ||
188 | #endif | ||
189 | #endif | ||
190 | } | ||
191 | |||
192 | |||
193 | #define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS | ||
194 | extern int ptep_set_access_flags(struct vm_area_struct *vma, unsigned long address, | ||
195 | pte_t *ptep, pte_t entry, int dirty); | ||
196 | |||
197 | /* | ||
198 | * Macro to mark a page protection value as "uncacheable". | ||
199 | */ | ||
200 | |||
201 | #define _PAGE_CACHE_CTL (_PAGE_COHERENT | _PAGE_GUARDED | _PAGE_NO_CACHE | \ | ||
202 | _PAGE_WRITETHRU) | ||
203 | |||
204 | #define pgprot_noncached(prot) (__pgprot((pgprot_val(prot) & ~_PAGE_CACHE_CTL) | \ | ||
205 | _PAGE_NO_CACHE | _PAGE_GUARDED)) | ||
206 | |||
207 | #define pgprot_noncached_wc(prot) (__pgprot((pgprot_val(prot) & ~_PAGE_CACHE_CTL) | \ | ||
208 | _PAGE_NO_CACHE)) | ||
209 | |||
210 | #define pgprot_cached(prot) (__pgprot((pgprot_val(prot) & ~_PAGE_CACHE_CTL) | \ | ||
211 | _PAGE_COHERENT)) | ||
212 | |||
213 | #define pgprot_cached_wthru(prot) (__pgprot((pgprot_val(prot) & ~_PAGE_CACHE_CTL) | \ | ||
214 | _PAGE_COHERENT | _PAGE_WRITETHRU)) | ||
215 | |||
216 | #define pgprot_cached_noncoherent(prot) \ | ||
217 | (__pgprot(pgprot_val(prot) & ~_PAGE_CACHE_CTL)) | ||
218 | |||
219 | #define pgprot_writecombine pgprot_noncached_wc | ||
220 | |||
221 | struct file; | ||
222 | extern pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn, | ||
223 | unsigned long size, pgprot_t vma_prot); | ||
224 | #define __HAVE_PHYS_MEM_ACCESS_PROT | ||
225 | |||
226 | #ifdef CONFIG_HUGETLB_PAGE | ||
227 | static inline int hugepd_ok(hugepd_t hpd) | ||
228 | { | ||
229 | return (hpd.pd > 0); | ||
230 | } | ||
231 | |||
232 | static inline int pmd_huge(pmd_t pmd) | ||
233 | { | ||
234 | return 0; | ||
235 | } | ||
236 | |||
237 | static inline int pud_huge(pud_t pud) | ||
238 | { | ||
239 | return 0; | ||
240 | } | ||
241 | |||
242 | static inline int pgd_huge(pgd_t pgd) | ||
243 | { | ||
244 | return 0; | ||
245 | } | ||
246 | #define pgd_huge pgd_huge | ||
247 | |||
248 | #define is_hugepd(hpd) (hugepd_ok(hpd)) | ||
249 | #endif | ||
250 | |||
251 | #endif /* __ASSEMBLY__ */ | ||
252 | #endif | ||
diff --git a/arch/powerpc/include/asm/pte-book3e.h b/arch/powerpc/include/asm/nohash/pte-book3e.h index 8d8473278d91..e16807b78edf 100644 --- a/arch/powerpc/include/asm/pte-book3e.h +++ b/arch/powerpc/include/asm/nohash/pte-book3e.h | |||
@@ -1,5 +1,5 @@ | |||
1 | #ifndef _ASM_POWERPC_PTE_BOOK3E_H | 1 | #ifndef _ASM_POWERPC_NOHASH_PTE_BOOK3E_H |
2 | #define _ASM_POWERPC_PTE_BOOK3E_H | 2 | #define _ASM_POWERPC_NOHASH_PTE_BOOK3E_H |
3 | #ifdef __KERNEL__ | 3 | #ifdef __KERNEL__ |
4 | 4 | ||
5 | /* PTE bit definitions for processors compliant to the Book3E | 5 | /* PTE bit definitions for processors compliant to the Book3E |
@@ -84,4 +84,4 @@ | |||
84 | #endif | 84 | #endif |
85 | 85 | ||
86 | #endif /* __KERNEL__ */ | 86 | #endif /* __KERNEL__ */ |
87 | #endif /* _ASM_POWERPC_PTE_FSL_BOOKE_H */ | 87 | #endif /* _ASM_POWERPC_NOHASH_PTE_BOOK3E_H */ |
diff --git a/arch/powerpc/include/asm/opal-api.h b/arch/powerpc/include/asm/opal-api.h index 8374afed9d0a..f8faaaeeca1e 100644 --- a/arch/powerpc/include/asm/opal-api.h +++ b/arch/powerpc/include/asm/opal-api.h | |||
@@ -157,7 +157,8 @@ | |||
157 | #define OPAL_LEDS_GET_INDICATOR 114 | 157 | #define OPAL_LEDS_GET_INDICATOR 114 |
158 | #define OPAL_LEDS_SET_INDICATOR 115 | 158 | #define OPAL_LEDS_SET_INDICATOR 115 |
159 | #define OPAL_CEC_REBOOT2 116 | 159 | #define OPAL_CEC_REBOOT2 116 |
160 | #define OPAL_LAST 116 | 160 | #define OPAL_CONSOLE_FLUSH 117 |
161 | #define OPAL_LAST 117 | ||
161 | 162 | ||
162 | /* Device tree flags */ | 163 | /* Device tree flags */ |
163 | 164 | ||
diff --git a/arch/powerpc/include/asm/opal.h b/arch/powerpc/include/asm/opal.h index 800115910e43..07a99e638449 100644 --- a/arch/powerpc/include/asm/opal.h +++ b/arch/powerpc/include/asm/opal.h | |||
@@ -35,6 +35,7 @@ int64_t opal_console_read(int64_t term_number, __be64 *length, | |||
35 | uint8_t *buffer); | 35 | uint8_t *buffer); |
36 | int64_t opal_console_write_buffer_space(int64_t term_number, | 36 | int64_t opal_console_write_buffer_space(int64_t term_number, |
37 | __be64 *length); | 37 | __be64 *length); |
38 | int64_t opal_console_flush(int64_t term_number); | ||
38 | int64_t opal_rtc_read(__be32 *year_month_day, | 39 | int64_t opal_rtc_read(__be32 *year_month_day, |
39 | __be64 *hour_minute_second_millisecond); | 40 | __be64 *hour_minute_second_millisecond); |
40 | int64_t opal_rtc_write(uint32_t year_month_day, | 41 | int64_t opal_rtc_write(uint32_t year_month_day, |
@@ -262,6 +263,8 @@ extern int opal_resync_timebase(void); | |||
262 | 263 | ||
263 | extern void opal_lpc_init(void); | 264 | extern void opal_lpc_init(void); |
264 | 265 | ||
266 | extern void opal_kmsg_init(void); | ||
267 | |||
265 | extern int opal_event_request(unsigned int opal_event_nr); | 268 | extern int opal_event_request(unsigned int opal_event_nr); |
266 | 269 | ||
267 | struct opal_sg_list *opal_vmalloc_to_sg_list(void *vmalloc_addr, | 270 | struct opal_sg_list *opal_vmalloc_to_sg_list(void *vmalloc_addr, |
diff --git a/arch/powerpc/include/asm/paca.h b/arch/powerpc/include/asm/paca.h index 70bd4381f8e6..546540b91095 100644 --- a/arch/powerpc/include/asm/paca.h +++ b/arch/powerpc/include/asm/paca.h | |||
@@ -16,6 +16,7 @@ | |||
16 | 16 | ||
17 | #ifdef CONFIG_PPC64 | 17 | #ifdef CONFIG_PPC64 |
18 | 18 | ||
19 | #include <linux/string.h> | ||
19 | #include <asm/types.h> | 20 | #include <asm/types.h> |
20 | #include <asm/lppaca.h> | 21 | #include <asm/lppaca.h> |
21 | #include <asm/mmu.h> | 22 | #include <asm/mmu.h> |
@@ -131,7 +132,16 @@ struct paca_struct { | |||
131 | struct tlb_core_data tcd; | 132 | struct tlb_core_data tcd; |
132 | #endif /* CONFIG_PPC_BOOK3E */ | 133 | #endif /* CONFIG_PPC_BOOK3E */ |
133 | 134 | ||
134 | mm_context_t context; | 135 | #ifdef CONFIG_PPC_BOOK3S |
136 | mm_context_id_t mm_ctx_id; | ||
137 | #ifdef CONFIG_PPC_MM_SLICES | ||
138 | u64 mm_ctx_low_slices_psize; | ||
139 | unsigned char mm_ctx_high_slices_psize[SLICE_ARRAY_SIZE]; | ||
140 | #else | ||
141 | u16 mm_ctx_user_psize; | ||
142 | u16 mm_ctx_sllp; | ||
143 | #endif | ||
144 | #endif | ||
135 | 145 | ||
136 | /* | 146 | /* |
137 | * then miscellaneous read-write fields | 147 | * then miscellaneous read-write fields |
@@ -194,6 +204,23 @@ struct paca_struct { | |||
194 | #endif | 204 | #endif |
195 | }; | 205 | }; |
196 | 206 | ||
207 | #ifdef CONFIG_PPC_BOOK3S | ||
208 | static inline void copy_mm_to_paca(mm_context_t *context) | ||
209 | { | ||
210 | get_paca()->mm_ctx_id = context->id; | ||
211 | #ifdef CONFIG_PPC_MM_SLICES | ||
212 | get_paca()->mm_ctx_low_slices_psize = context->low_slices_psize; | ||
213 | memcpy(&get_paca()->mm_ctx_high_slices_psize, | ||
214 | &context->high_slices_psize, SLICE_ARRAY_SIZE); | ||
215 | #else | ||
216 | get_paca()->mm_ctx_user_psize = context->user_psize; | ||
217 | get_paca()->mm_ctx_sllp = context->sllp; | ||
218 | #endif | ||
219 | } | ||
220 | #else | ||
221 | static inline void copy_mm_to_paca(mm_context_t *context){} | ||
222 | #endif | ||
223 | |||
197 | extern struct paca_struct *paca; | 224 | extern struct paca_struct *paca; |
198 | extern void initialise_paca(struct paca_struct *new_paca, int cpu); | 225 | extern void initialise_paca(struct paca_struct *new_paca, int cpu); |
199 | extern void setup_paca(struct paca_struct *new_paca); | 226 | extern void setup_paca(struct paca_struct *new_paca); |
diff --git a/arch/powerpc/include/asm/page.h b/arch/powerpc/include/asm/page.h index 3140c19c448c..e34124f6fbf2 100644 --- a/arch/powerpc/include/asm/page.h +++ b/arch/powerpc/include/asm/page.h | |||
@@ -286,8 +286,11 @@ extern long long virt_phys_offset; | |||
286 | 286 | ||
287 | /* PTE level */ | 287 | /* PTE level */ |
288 | typedef struct { pte_basic_t pte; } pte_t; | 288 | typedef struct { pte_basic_t pte; } pte_t; |
289 | #define pte_val(x) ((x).pte) | ||
290 | #define __pte(x) ((pte_t) { (x) }) | 289 | #define __pte(x) ((pte_t) { (x) }) |
290 | static inline pte_basic_t pte_val(pte_t x) | ||
291 | { | ||
292 | return x.pte; | ||
293 | } | ||
291 | 294 | ||
292 | /* 64k pages additionally define a bigger "real PTE" type that gathers | 295 | /* 64k pages additionally define a bigger "real PTE" type that gathers |
293 | * the "second half" part of the PTE for pseudo 64k pages | 296 | * the "second half" part of the PTE for pseudo 64k pages |
@@ -301,21 +304,30 @@ typedef struct { pte_t pte; } real_pte_t; | |||
301 | /* PMD level */ | 304 | /* PMD level */ |
302 | #ifdef CONFIG_PPC64 | 305 | #ifdef CONFIG_PPC64 |
303 | typedef struct { unsigned long pmd; } pmd_t; | 306 | typedef struct { unsigned long pmd; } pmd_t; |
304 | #define pmd_val(x) ((x).pmd) | ||
305 | #define __pmd(x) ((pmd_t) { (x) }) | 307 | #define __pmd(x) ((pmd_t) { (x) }) |
308 | static inline unsigned long pmd_val(pmd_t x) | ||
309 | { | ||
310 | return x.pmd; | ||
311 | } | ||
306 | 312 | ||
307 | /* PUD level exusts only on 4k pages */ | 313 | /* PUD level exusts only on 4k pages */ |
308 | #ifndef CONFIG_PPC_64K_PAGES | 314 | #ifndef CONFIG_PPC_64K_PAGES |
309 | typedef struct { unsigned long pud; } pud_t; | 315 | typedef struct { unsigned long pud; } pud_t; |
310 | #define pud_val(x) ((x).pud) | ||
311 | #define __pud(x) ((pud_t) { (x) }) | 316 | #define __pud(x) ((pud_t) { (x) }) |
317 | static inline unsigned long pud_val(pud_t x) | ||
318 | { | ||
319 | return x.pud; | ||
320 | } | ||
312 | #endif /* !CONFIG_PPC_64K_PAGES */ | 321 | #endif /* !CONFIG_PPC_64K_PAGES */ |
313 | #endif /* CONFIG_PPC64 */ | 322 | #endif /* CONFIG_PPC64 */ |
314 | 323 | ||
315 | /* PGD level */ | 324 | /* PGD level */ |
316 | typedef struct { unsigned long pgd; } pgd_t; | 325 | typedef struct { unsigned long pgd; } pgd_t; |
317 | #define pgd_val(x) ((x).pgd) | ||
318 | #define __pgd(x) ((pgd_t) { (x) }) | 326 | #define __pgd(x) ((pgd_t) { (x) }) |
327 | static inline unsigned long pgd_val(pgd_t x) | ||
328 | { | ||
329 | return x.pgd; | ||
330 | } | ||
319 | 331 | ||
320 | /* Page protection bits */ | 332 | /* Page protection bits */ |
321 | typedef struct { unsigned long pgprot; } pgprot_t; | 333 | typedef struct { unsigned long pgprot; } pgprot_t; |
@@ -329,8 +341,11 @@ typedef struct { unsigned long pgprot; } pgprot_t; | |||
329 | */ | 341 | */ |
330 | 342 | ||
331 | typedef pte_basic_t pte_t; | 343 | typedef pte_basic_t pte_t; |
332 | #define pte_val(x) (x) | ||
333 | #define __pte(x) (x) | 344 | #define __pte(x) (x) |
345 | static inline pte_basic_t pte_val(pte_t pte) | ||
346 | { | ||
347 | return pte; | ||
348 | } | ||
334 | 349 | ||
335 | #if defined(CONFIG_PPC_64K_PAGES) && defined(CONFIG_PPC_STD_MMU_64) | 350 | #if defined(CONFIG_PPC_64K_PAGES) && defined(CONFIG_PPC_STD_MMU_64) |
336 | typedef struct { pte_t pte; unsigned long hidx; } real_pte_t; | 351 | typedef struct { pte_t pte; unsigned long hidx; } real_pte_t; |
@@ -341,67 +356,42 @@ typedef pte_t real_pte_t; | |||
341 | 356 | ||
342 | #ifdef CONFIG_PPC64 | 357 | #ifdef CONFIG_PPC64 |
343 | typedef unsigned long pmd_t; | 358 | typedef unsigned long pmd_t; |
344 | #define pmd_val(x) (x) | ||
345 | #define __pmd(x) (x) | 359 | #define __pmd(x) (x) |
360 | static inline unsigned long pmd_val(pmd_t pmd) | ||
361 | { | ||
362 | return pmd; | ||
363 | } | ||
346 | 364 | ||
347 | #ifndef CONFIG_PPC_64K_PAGES | 365 | #ifndef CONFIG_PPC_64K_PAGES |
348 | typedef unsigned long pud_t; | 366 | typedef unsigned long pud_t; |
349 | #define pud_val(x) (x) | ||
350 | #define __pud(x) (x) | 367 | #define __pud(x) (x) |
368 | static inline unsigned long pud_val(pud_t pud) | ||
369 | { | ||
370 | return pud; | ||
371 | } | ||
351 | #endif /* !CONFIG_PPC_64K_PAGES */ | 372 | #endif /* !CONFIG_PPC_64K_PAGES */ |
352 | #endif /* CONFIG_PPC64 */ | 373 | #endif /* CONFIG_PPC64 */ |
353 | 374 | ||
354 | typedef unsigned long pgd_t; | 375 | typedef unsigned long pgd_t; |
355 | #define pgd_val(x) (x) | 376 | #define __pgd(x) (x) |
356 | #define pgprot_val(x) (x) | 377 | static inline unsigned long pgd_val(pgd_t pgd) |
378 | { | ||
379 | return pgd; | ||
380 | } | ||
357 | 381 | ||
358 | typedef unsigned long pgprot_t; | 382 | typedef unsigned long pgprot_t; |
359 | #define __pgd(x) (x) | 383 | #define pgprot_val(x) (x) |
360 | #define __pgprot(x) (x) | 384 | #define __pgprot(x) (x) |
361 | 385 | ||
362 | #endif | 386 | #endif |
363 | 387 | ||
364 | typedef struct { signed long pd; } hugepd_t; | 388 | typedef struct { signed long pd; } hugepd_t; |
365 | 389 | ||
366 | #ifdef CONFIG_HUGETLB_PAGE | 390 | #ifndef CONFIG_HUGETLB_PAGE |
367 | #ifdef CONFIG_PPC_BOOK3S_64 | 391 | #define is_hugepd(pdep) (0) |
368 | #ifdef CONFIG_PPC_64K_PAGES | 392 | #define pgd_huge(pgd) (0) |
369 | /* | ||
370 | * With 64k page size, we have hugepage ptes in the pgd and pmd entries. We don't | ||
371 | * need to setup hugepage directory for them. Our pte and page directory format | ||
372 | * enable us to have this enabled. But to avoid errors when implementing new | ||
373 | * features disable hugepd for 64K. We enable a debug version here, So we catch | ||
374 | * wrong usage. | ||
375 | */ | ||
376 | #ifdef CONFIG_DEBUG_VM | ||
377 | extern int hugepd_ok(hugepd_t hpd); | ||
378 | #else | ||
379 | #define hugepd_ok(x) (0) | ||
380 | #endif | ||
381 | #else | ||
382 | static inline int hugepd_ok(hugepd_t hpd) | ||
383 | { | ||
384 | /* | ||
385 | * hugepd pointer, bottom two bits == 00 and next 4 bits | ||
386 | * indicate size of table | ||
387 | */ | ||
388 | return (((hpd.pd & 0x3) == 0x0) && ((hpd.pd & HUGEPD_SHIFT_MASK) != 0)); | ||
389 | } | ||
390 | #endif | ||
391 | #else | ||
392 | static inline int hugepd_ok(hugepd_t hpd) | ||
393 | { | ||
394 | return (hpd.pd > 0); | ||
395 | } | ||
396 | #endif | ||
397 | |||
398 | #define is_hugepd(hpd) (hugepd_ok(hpd)) | ||
399 | #define pgd_huge pgd_huge | ||
400 | int pgd_huge(pgd_t pgd); | ||
401 | #else /* CONFIG_HUGETLB_PAGE */ | ||
402 | #define is_hugepd(pdep) 0 | ||
403 | #define pgd_huge(pgd) 0 | ||
404 | #endif /* CONFIG_HUGETLB_PAGE */ | 393 | #endif /* CONFIG_HUGETLB_PAGE */ |
394 | |||
405 | #define __hugepd(x) ((hugepd_t) { (x) }) | 395 | #define __hugepd(x) ((hugepd_t) { (x) }) |
406 | 396 | ||
407 | struct page; | 397 | struct page; |
diff --git a/arch/powerpc/include/asm/pci-bridge.h b/arch/powerpc/include/asm/pci-bridge.h index 37fc53587bb4..54843ca5fa2b 100644 --- a/arch/powerpc/include/asm/pci-bridge.h +++ b/arch/powerpc/include/asm/pci-bridge.h | |||
@@ -205,6 +205,7 @@ struct pci_dn { | |||
205 | 205 | ||
206 | int pci_ext_config_space; /* for pci devices */ | 206 | int pci_ext_config_space; /* for pci devices */ |
207 | 207 | ||
208 | struct pci_dev *pcidev; /* back-pointer to the pci device */ | ||
208 | #ifdef CONFIG_EEH | 209 | #ifdef CONFIG_EEH |
209 | struct eeh_dev *edev; /* eeh device */ | 210 | struct eeh_dev *edev; /* eeh device */ |
210 | #endif | 211 | #endif |
diff --git a/arch/powerpc/include/asm/pci.h b/arch/powerpc/include/asm/pci.h index 3453bd8dc18f..6f8065a7d487 100644 --- a/arch/powerpc/include/asm/pci.h +++ b/arch/powerpc/include/asm/pci.h | |||
@@ -149,4 +149,8 @@ extern void pcibios_setup_phb_io_space(struct pci_controller *hose); | |||
149 | extern void pcibios_scan_phb(struct pci_controller *hose); | 149 | extern void pcibios_scan_phb(struct pci_controller *hose); |
150 | 150 | ||
151 | #endif /* __KERNEL__ */ | 151 | #endif /* __KERNEL__ */ |
152 | |||
153 | extern struct pci_dev *pnv_pci_get_gpu_dev(struct pci_dev *npdev); | ||
154 | extern struct pci_dev *pnv_pci_get_npu_dev(struct pci_dev *gpdev, int index); | ||
155 | |||
152 | #endif /* __ASM_POWERPC_PCI_H */ | 156 | #endif /* __ASM_POWERPC_PCI_H */ |
diff --git a/arch/powerpc/include/asm/pgalloc-32.h b/arch/powerpc/include/asm/pgalloc-32.h index 842846c1b711..76d6b9e0c8a9 100644 --- a/arch/powerpc/include/asm/pgalloc-32.h +++ b/arch/powerpc/include/asm/pgalloc-32.h | |||
@@ -21,16 +21,34 @@ extern void pgd_free(struct mm_struct *mm, pgd_t *pgd); | |||
21 | /* #define pgd_populate(mm, pmd, pte) BUG() */ | 21 | /* #define pgd_populate(mm, pmd, pte) BUG() */ |
22 | 22 | ||
23 | #ifndef CONFIG_BOOKE | 23 | #ifndef CONFIG_BOOKE |
24 | #define pmd_populate_kernel(mm, pmd, pte) \ | 24 | |
25 | (pmd_val(*(pmd)) = __pa(pte) | _PMD_PRESENT) | 25 | static inline void pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmdp, |
26 | #define pmd_populate(mm, pmd, pte) \ | 26 | pte_t *pte) |
27 | (pmd_val(*(pmd)) = (page_to_pfn(pte) << PAGE_SHIFT) | _PMD_PRESENT) | 27 | { |
28 | *pmdp = __pmd(__pa(pte) | _PMD_PRESENT); | ||
29 | } | ||
30 | |||
31 | static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmdp, | ||
32 | pgtable_t pte_page) | ||
33 | { | ||
34 | *pmdp = __pmd((page_to_pfn(pte_page) << PAGE_SHIFT) | _PMD_PRESENT); | ||
35 | } | ||
36 | |||
28 | #define pmd_pgtable(pmd) pmd_page(pmd) | 37 | #define pmd_pgtable(pmd) pmd_page(pmd) |
29 | #else | 38 | #else |
30 | #define pmd_populate_kernel(mm, pmd, pte) \ | 39 | |
31 | (pmd_val(*(pmd)) = (unsigned long)pte | _PMD_PRESENT) | 40 | static inline void pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmdp, |
32 | #define pmd_populate(mm, pmd, pte) \ | 41 | pte_t *pte) |
33 | (pmd_val(*(pmd)) = (unsigned long)lowmem_page_address(pte) | _PMD_PRESENT) | 42 | { |
43 | *pmdp = __pmd((unsigned long)pte | _PMD_PRESENT); | ||
44 | } | ||
45 | |||
46 | static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmdp, | ||
47 | pgtable_t pte_page) | ||
48 | { | ||
49 | *pmdp = __pmd((unsigned long)lowmem_page_address(pte_page) | _PMD_PRESENT); | ||
50 | } | ||
51 | |||
34 | #define pmd_pgtable(pmd) pmd_page(pmd) | 52 | #define pmd_pgtable(pmd) pmd_page(pmd) |
35 | #endif | 53 | #endif |
36 | 54 | ||
diff --git a/arch/powerpc/include/asm/pgalloc-64.h b/arch/powerpc/include/asm/pgalloc-64.h index 4b0be20fcbfd..69ef28a81733 100644 --- a/arch/powerpc/include/asm/pgalloc-64.h +++ b/arch/powerpc/include/asm/pgalloc-64.h | |||
@@ -53,7 +53,7 @@ static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd) | |||
53 | 53 | ||
54 | #ifndef CONFIG_PPC_64K_PAGES | 54 | #ifndef CONFIG_PPC_64K_PAGES |
55 | 55 | ||
56 | #define pgd_populate(MM, PGD, PUD) pgd_set(PGD, PUD) | 56 | #define pgd_populate(MM, PGD, PUD) pgd_set(PGD, (unsigned long)PUD) |
57 | 57 | ||
58 | static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr) | 58 | static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr) |
59 | { | 59 | { |
@@ -71,9 +71,18 @@ static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd) | |||
71 | pud_set(pud, (unsigned long)pmd); | 71 | pud_set(pud, (unsigned long)pmd); |
72 | } | 72 | } |
73 | 73 | ||
74 | #define pmd_populate(mm, pmd, pte_page) \ | 74 | static inline void pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd, |
75 | pmd_populate_kernel(mm, pmd, page_address(pte_page)) | 75 | pte_t *pte) |
76 | #define pmd_populate_kernel(mm, pmd, pte) pmd_set(pmd, (unsigned long)(pte)) | 76 | { |
77 | pmd_set(pmd, (unsigned long)pte); | ||
78 | } | ||
79 | |||
80 | static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd, | ||
81 | pgtable_t pte_page) | ||
82 | { | ||
83 | pmd_set(pmd, (unsigned long)page_address(pte_page)); | ||
84 | } | ||
85 | |||
77 | #define pmd_pgtable(pmd) pmd_page(pmd) | 86 | #define pmd_pgtable(pmd) pmd_page(pmd) |
78 | 87 | ||
79 | static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm, | 88 | static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm, |
@@ -154,16 +163,6 @@ static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t table, | |||
154 | } | 163 | } |
155 | 164 | ||
156 | #else /* if CONFIG_PPC_64K_PAGES */ | 165 | #else /* if CONFIG_PPC_64K_PAGES */ |
157 | /* | ||
158 | * we support 16 fragments per PTE page. | ||
159 | */ | ||
160 | #define PTE_FRAG_NR 16 | ||
161 | /* | ||
162 | * We use a 2K PTE page fragment and another 2K for storing | ||
163 | * real_pte_t hash index | ||
164 | */ | ||
165 | #define PTE_FRAG_SIZE_SHIFT 12 | ||
166 | #define PTE_FRAG_SIZE (2 * PTRS_PER_PTE * sizeof(pte_t)) | ||
167 | 166 | ||
168 | extern pte_t *page_table_alloc(struct mm_struct *, unsigned long, int); | 167 | extern pte_t *page_table_alloc(struct mm_struct *, unsigned long, int); |
169 | extern void page_table_free(struct mm_struct *, unsigned long *, int); | 168 | extern void page_table_free(struct mm_struct *, unsigned long *, int); |
diff --git a/arch/powerpc/include/asm/pgtable.h b/arch/powerpc/include/asm/pgtable.h index b64b4212b71f..ac9fb114e25d 100644 --- a/arch/powerpc/include/asm/pgtable.h +++ b/arch/powerpc/include/asm/pgtable.h | |||
@@ -1,6 +1,5 @@ | |||
1 | #ifndef _ASM_POWERPC_PGTABLE_H | 1 | #ifndef _ASM_POWERPC_PGTABLE_H |
2 | #define _ASM_POWERPC_PGTABLE_H | 2 | #define _ASM_POWERPC_PGTABLE_H |
3 | #ifdef __KERNEL__ | ||
4 | 3 | ||
5 | #ifndef __ASSEMBLY__ | 4 | #ifndef __ASSEMBLY__ |
6 | #include <linux/mmdebug.h> | 5 | #include <linux/mmdebug.h> |
@@ -13,210 +12,20 @@ struct mm_struct; | |||
13 | 12 | ||
14 | #endif /* !__ASSEMBLY__ */ | 13 | #endif /* !__ASSEMBLY__ */ |
15 | 14 | ||
16 | #if defined(CONFIG_PPC64) | 15 | #ifdef CONFIG_PPC_BOOK3S |
17 | # include <asm/pgtable-ppc64.h> | 16 | #include <asm/book3s/pgtable.h> |
18 | #else | 17 | #else |
19 | # include <asm/pgtable-ppc32.h> | 18 | #include <asm/nohash/pgtable.h> |
20 | #endif | 19 | #endif /* !CONFIG_PPC_BOOK3S */ |
21 | |||
22 | /* | ||
23 | * We save the slot number & secondary bit in the second half of the | ||
24 | * PTE page. We use the 8 bytes per each pte entry. | ||
25 | */ | ||
26 | #define PTE_PAGE_HIDX_OFFSET (PTRS_PER_PTE * 8) | ||
27 | 20 | ||
28 | #ifndef __ASSEMBLY__ | 21 | #ifndef __ASSEMBLY__ |
29 | 22 | ||
30 | #include <asm/tlbflush.h> | 23 | #include <asm/tlbflush.h> |
31 | 24 | ||
32 | /* Generic accessors to PTE bits */ | ||
33 | static inline int pte_write(pte_t pte) | ||
34 | { return (pte_val(pte) & (_PAGE_RW | _PAGE_RO)) != _PAGE_RO; } | ||
35 | static inline int pte_dirty(pte_t pte) { return pte_val(pte) & _PAGE_DIRTY; } | ||
36 | static inline int pte_young(pte_t pte) { return pte_val(pte) & _PAGE_ACCESSED; } | ||
37 | static inline int pte_special(pte_t pte) { return pte_val(pte) & _PAGE_SPECIAL; } | ||
38 | static inline int pte_none(pte_t pte) { return (pte_val(pte) & ~_PTE_NONE_MASK) == 0; } | ||
39 | static inline pgprot_t pte_pgprot(pte_t pte) { return __pgprot(pte_val(pte) & PAGE_PROT_BITS); } | ||
40 | |||
41 | #ifdef CONFIG_NUMA_BALANCING | ||
42 | /* | ||
43 | * These work without NUMA balancing but the kernel does not care. See the | ||
44 | * comment in include/asm-generic/pgtable.h . On powerpc, this will only | ||
45 | * work for user pages and always return true for kernel pages. | ||
46 | */ | ||
47 | static inline int pte_protnone(pte_t pte) | ||
48 | { | ||
49 | return (pte_val(pte) & | ||
50 | (_PAGE_PRESENT | _PAGE_USER)) == _PAGE_PRESENT; | ||
51 | } | ||
52 | |||
53 | static inline int pmd_protnone(pmd_t pmd) | ||
54 | { | ||
55 | return pte_protnone(pmd_pte(pmd)); | ||
56 | } | ||
57 | #endif /* CONFIG_NUMA_BALANCING */ | ||
58 | |||
59 | static inline int pte_present(pte_t pte) | ||
60 | { | ||
61 | return pte_val(pte) & _PAGE_PRESENT; | ||
62 | } | ||
63 | |||
64 | /* Conversion functions: convert a page and protection to a page entry, | ||
65 | * and a page entry and page directory to the page they refer to. | ||
66 | * | ||
67 | * Even if PTEs can be unsigned long long, a PFN is always an unsigned | ||
68 | * long for now. | ||
69 | */ | ||
70 | static inline pte_t pfn_pte(unsigned long pfn, pgprot_t pgprot) { | ||
71 | return __pte(((pte_basic_t)(pfn) << PTE_RPN_SHIFT) | | ||
72 | pgprot_val(pgprot)); } | ||
73 | static inline unsigned long pte_pfn(pte_t pte) { | ||
74 | return pte_val(pte) >> PTE_RPN_SHIFT; } | ||
75 | |||
76 | /* Keep these as a macros to avoid include dependency mess */ | 25 | /* Keep these as a macros to avoid include dependency mess */ |
77 | #define pte_page(x) pfn_to_page(pte_pfn(x)) | 26 | #define pte_page(x) pfn_to_page(pte_pfn(x)) |
78 | #define mk_pte(page, pgprot) pfn_pte(page_to_pfn(page), (pgprot)) | 27 | #define mk_pte(page, pgprot) pfn_pte(page_to_pfn(page), (pgprot)) |
79 | 28 | ||
80 | /* Generic modifiers for PTE bits */ | ||
81 | static inline pte_t pte_wrprotect(pte_t pte) { | ||
82 | pte_val(pte) &= ~(_PAGE_RW | _PAGE_HWWRITE); | ||
83 | pte_val(pte) |= _PAGE_RO; return pte; } | ||
84 | static inline pte_t pte_mkclean(pte_t pte) { | ||
85 | pte_val(pte) &= ~(_PAGE_DIRTY | _PAGE_HWWRITE); return pte; } | ||
86 | static inline pte_t pte_mkold(pte_t pte) { | ||
87 | pte_val(pte) &= ~_PAGE_ACCESSED; return pte; } | ||
88 | static inline pte_t pte_mkwrite(pte_t pte) { | ||
89 | pte_val(pte) &= ~_PAGE_RO; | ||
90 | pte_val(pte) |= _PAGE_RW; return pte; } | ||
91 | static inline pte_t pte_mkdirty(pte_t pte) { | ||
92 | pte_val(pte) |= _PAGE_DIRTY; return pte; } | ||
93 | static inline pte_t pte_mkyoung(pte_t pte) { | ||
94 | pte_val(pte) |= _PAGE_ACCESSED; return pte; } | ||
95 | static inline pte_t pte_mkspecial(pte_t pte) { | ||
96 | pte_val(pte) |= _PAGE_SPECIAL; return pte; } | ||
97 | static inline pte_t pte_mkhuge(pte_t pte) { | ||
98 | return pte; } | ||
99 | static inline pte_t pte_modify(pte_t pte, pgprot_t newprot) | ||
100 | { | ||
101 | pte_val(pte) = (pte_val(pte) & _PAGE_CHG_MASK) | pgprot_val(newprot); | ||
102 | return pte; | ||
103 | } | ||
104 | |||
105 | |||
106 | /* Insert a PTE, top-level function is out of line. It uses an inline | ||
107 | * low level function in the respective pgtable-* files | ||
108 | */ | ||
109 | extern void set_pte_at(struct mm_struct *mm, unsigned long addr, pte_t *ptep, | ||
110 | pte_t pte); | ||
111 | |||
112 | /* This low level function performs the actual PTE insertion | ||
113 | * Setting the PTE depends on the MMU type and other factors. It's | ||
114 | * an horrible mess that I'm not going to try to clean up now but | ||
115 | * I'm keeping it in one place rather than spread around | ||
116 | */ | ||
117 | static inline void __set_pte_at(struct mm_struct *mm, unsigned long addr, | ||
118 | pte_t *ptep, pte_t pte, int percpu) | ||
119 | { | ||
120 | #if defined(CONFIG_PPC_STD_MMU_32) && defined(CONFIG_SMP) && !defined(CONFIG_PTE_64BIT) | ||
121 | /* First case is 32-bit Hash MMU in SMP mode with 32-bit PTEs. We use the | ||
122 | * helper pte_update() which does an atomic update. We need to do that | ||
123 | * because a concurrent invalidation can clear _PAGE_HASHPTE. If it's a | ||
124 | * per-CPU PTE such as a kmap_atomic, we do a simple update preserving | ||
125 | * the hash bits instead (ie, same as the non-SMP case) | ||
126 | */ | ||
127 | if (percpu) | ||
128 | *ptep = __pte((pte_val(*ptep) & _PAGE_HASHPTE) | ||
129 | | (pte_val(pte) & ~_PAGE_HASHPTE)); | ||
130 | else | ||
131 | pte_update(ptep, ~_PAGE_HASHPTE, pte_val(pte)); | ||
132 | |||
133 | #elif defined(CONFIG_PPC32) && defined(CONFIG_PTE_64BIT) | ||
134 | /* Second case is 32-bit with 64-bit PTE. In this case, we | ||
135 | * can just store as long as we do the two halves in the right order | ||
136 | * with a barrier in between. This is possible because we take care, | ||
137 | * in the hash code, to pre-invalidate if the PTE was already hashed, | ||
138 | * which synchronizes us with any concurrent invalidation. | ||
139 | * In the percpu case, we also fallback to the simple update preserving | ||
140 | * the hash bits | ||
141 | */ | ||
142 | if (percpu) { | ||
143 | *ptep = __pte((pte_val(*ptep) & _PAGE_HASHPTE) | ||
144 | | (pte_val(pte) & ~_PAGE_HASHPTE)); | ||
145 | return; | ||
146 | } | ||
147 | #if _PAGE_HASHPTE != 0 | ||
148 | if (pte_val(*ptep) & _PAGE_HASHPTE) | ||
149 | flush_hash_entry(mm, ptep, addr); | ||
150 | #endif | ||
151 | __asm__ __volatile__("\ | ||
152 | stw%U0%X0 %2,%0\n\ | ||
153 | eieio\n\ | ||
154 | stw%U0%X0 %L2,%1" | ||
155 | : "=m" (*ptep), "=m" (*((unsigned char *)ptep+4)) | ||
156 | : "r" (pte) : "memory"); | ||
157 | |||
158 | #elif defined(CONFIG_PPC_STD_MMU_32) | ||
159 | /* Third case is 32-bit hash table in UP mode, we need to preserve | ||
160 | * the _PAGE_HASHPTE bit since we may not have invalidated the previous | ||
161 | * translation in the hash yet (done in a subsequent flush_tlb_xxx()) | ||
162 | * and see we need to keep track that this PTE needs invalidating | ||
163 | */ | ||
164 | *ptep = __pte((pte_val(*ptep) & _PAGE_HASHPTE) | ||
165 | | (pte_val(pte) & ~_PAGE_HASHPTE)); | ||
166 | |||
167 | #else | ||
168 | /* Anything else just stores the PTE normally. That covers all 64-bit | ||
169 | * cases, and 32-bit non-hash with 32-bit PTEs. | ||
170 | */ | ||
171 | *ptep = pte; | ||
172 | |||
173 | #ifdef CONFIG_PPC_BOOK3E_64 | ||
174 | /* | ||
175 | * With hardware tablewalk, a sync is needed to ensure that | ||
176 | * subsequent accesses see the PTE we just wrote. Unlike userspace | ||
177 | * mappings, we can't tolerate spurious faults, so make sure | ||
178 | * the new PTE will be seen the first time. | ||
179 | */ | ||
180 | if (is_kernel_addr(addr)) | ||
181 | mb(); | ||
182 | #endif | ||
183 | #endif | ||
184 | } | ||
185 | |||
186 | |||
187 | #define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS | ||
188 | extern int ptep_set_access_flags(struct vm_area_struct *vma, unsigned long address, | ||
189 | pte_t *ptep, pte_t entry, int dirty); | ||
190 | |||
191 | /* | ||
192 | * Macro to mark a page protection value as "uncacheable". | ||
193 | */ | ||
194 | |||
195 | #define _PAGE_CACHE_CTL (_PAGE_COHERENT | _PAGE_GUARDED | _PAGE_NO_CACHE | \ | ||
196 | _PAGE_WRITETHRU) | ||
197 | |||
198 | #define pgprot_noncached(prot) (__pgprot((pgprot_val(prot) & ~_PAGE_CACHE_CTL) | \ | ||
199 | _PAGE_NO_CACHE | _PAGE_GUARDED)) | ||
200 | |||
201 | #define pgprot_noncached_wc(prot) (__pgprot((pgprot_val(prot) & ~_PAGE_CACHE_CTL) | \ | ||
202 | _PAGE_NO_CACHE)) | ||
203 | |||
204 | #define pgprot_cached(prot) (__pgprot((pgprot_val(prot) & ~_PAGE_CACHE_CTL) | \ | ||
205 | _PAGE_COHERENT)) | ||
206 | |||
207 | #define pgprot_cached_wthru(prot) (__pgprot((pgprot_val(prot) & ~_PAGE_CACHE_CTL) | \ | ||
208 | _PAGE_COHERENT | _PAGE_WRITETHRU)) | ||
209 | |||
210 | #define pgprot_cached_noncoherent(prot) \ | ||
211 | (__pgprot(pgprot_val(prot) & ~_PAGE_CACHE_CTL)) | ||
212 | |||
213 | #define pgprot_writecombine pgprot_noncached_wc | ||
214 | |||
215 | struct file; | ||
216 | extern pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn, | ||
217 | unsigned long size, pgprot_t vma_prot); | ||
218 | #define __HAVE_PHYS_MEM_ACCESS_PROT | ||
219 | |||
220 | /* | 29 | /* |
221 | * ZERO_PAGE is a global shared page that is always zero: used | 30 | * ZERO_PAGE is a global shared page that is always zero: used |
222 | * for zero-mapped memory areas etc.. | 31 | * for zero-mapped memory areas etc.. |
@@ -271,5 +80,4 @@ static inline pte_t *find_linux_pte_or_hugepte(pgd_t *pgdir, unsigned long ea, | |||
271 | } | 80 | } |
272 | #endif /* __ASSEMBLY__ */ | 81 | #endif /* __ASSEMBLY__ */ |
273 | 82 | ||
274 | #endif /* __KERNEL__ */ | ||
275 | #endif /* _ASM_POWERPC_PGTABLE_H */ | 83 | #endif /* _ASM_POWERPC_PGTABLE_H */ |
diff --git a/arch/powerpc/include/asm/plpar_wrappers.h b/arch/powerpc/include/asm/plpar_wrappers.h index 67859edbf8fd..1b394247afc2 100644 --- a/arch/powerpc/include/asm/plpar_wrappers.h +++ b/arch/powerpc/include/asm/plpar_wrappers.h | |||
@@ -202,6 +202,23 @@ static inline long plpar_pte_read_raw(unsigned long flags, unsigned long ptex, | |||
202 | } | 202 | } |
203 | 203 | ||
204 | /* | 204 | /* |
205 | * ptes must be 8*sizeof(unsigned long) | ||
206 | */ | ||
207 | static inline long plpar_pte_read_4(unsigned long flags, unsigned long ptex, | ||
208 | unsigned long *ptes) | ||
209 | |||
210 | { | ||
211 | long rc; | ||
212 | unsigned long retbuf[PLPAR_HCALL9_BUFSIZE]; | ||
213 | |||
214 | rc = plpar_hcall9(H_READ, retbuf, flags | H_READ_4, ptex); | ||
215 | |||
216 | memcpy(ptes, retbuf, 8*sizeof(unsigned long)); | ||
217 | |||
218 | return rc; | ||
219 | } | ||
220 | |||
221 | /* | ||
205 | * plpar_pte_read_4_raw can be called in real mode. | 222 | * plpar_pte_read_4_raw can be called in real mode. |
206 | * ptes must be 8*sizeof(unsigned long) | 223 | * ptes must be 8*sizeof(unsigned long) |
207 | */ | 224 | */ |
diff --git a/arch/powerpc/include/asm/ppc_asm.h b/arch/powerpc/include/asm/ppc_asm.h index dd0fc18d8103..499d9f89435a 100644 --- a/arch/powerpc/include/asm/ppc_asm.h +++ b/arch/powerpc/include/asm/ppc_asm.h | |||
@@ -413,24 +413,6 @@ END_FTR_SECTION_IFCLR(CPU_FTR_601) | |||
413 | FTR_SECTION_ELSE_NESTED(848); \ | 413 | FTR_SECTION_ELSE_NESTED(848); \ |
414 | mtocrf (FXM), RS; \ | 414 | mtocrf (FXM), RS; \ |
415 | ALT_FTR_SECTION_END_NESTED_IFCLR(CPU_FTR_NOEXECUTE, 848) | 415 | ALT_FTR_SECTION_END_NESTED_IFCLR(CPU_FTR_NOEXECUTE, 848) |
416 | |||
417 | /* | ||
418 | * PPR restore macros used in entry_64.S | ||
419 | * Used for P7 or later processors | ||
420 | */ | ||
421 | #define HMT_MEDIUM_LOW_HAS_PPR \ | ||
422 | BEGIN_FTR_SECTION_NESTED(944) \ | ||
423 | HMT_MEDIUM_LOW; \ | ||
424 | END_FTR_SECTION_NESTED(CPU_FTR_HAS_PPR,CPU_FTR_HAS_PPR,944) | ||
425 | |||
426 | #define SET_DEFAULT_THREAD_PPR(ra, rb) \ | ||
427 | BEGIN_FTR_SECTION_NESTED(945) \ | ||
428 | lis ra,INIT_PPR@highest; /* default ppr=3 */ \ | ||
429 | ld rb,PACACURRENT(r13); \ | ||
430 | sldi ra,ra,32; /* 11- 13 bits are used for ppr */ \ | ||
431 | std ra,TASKTHREADPPR(rb); \ | ||
432 | END_FTR_SECTION_NESTED(CPU_FTR_HAS_PPR,CPU_FTR_HAS_PPR,945) | ||
433 | |||
434 | #endif | 416 | #endif |
435 | 417 | ||
436 | /* | 418 | /* |
diff --git a/arch/powerpc/include/asm/processor.h b/arch/powerpc/include/asm/processor.h index 5afea361beaa..ac2330820b9a 100644 --- a/arch/powerpc/include/asm/processor.h +++ b/arch/powerpc/include/asm/processor.h | |||
@@ -88,12 +88,6 @@ struct task_struct; | |||
88 | void start_thread(struct pt_regs *regs, unsigned long fdptr, unsigned long sp); | 88 | void start_thread(struct pt_regs *regs, unsigned long fdptr, unsigned long sp); |
89 | void release_thread(struct task_struct *); | 89 | void release_thread(struct task_struct *); |
90 | 90 | ||
91 | /* Lazy FPU handling on uni-processor */ | ||
92 | extern struct task_struct *last_task_used_math; | ||
93 | extern struct task_struct *last_task_used_altivec; | ||
94 | extern struct task_struct *last_task_used_vsx; | ||
95 | extern struct task_struct *last_task_used_spe; | ||
96 | |||
97 | #ifdef CONFIG_PPC32 | 91 | #ifdef CONFIG_PPC32 |
98 | 92 | ||
99 | #if CONFIG_TASK_SIZE > CONFIG_KERNEL_START | 93 | #if CONFIG_TASK_SIZE > CONFIG_KERNEL_START |
@@ -294,6 +288,7 @@ struct thread_struct { | |||
294 | #endif | 288 | #endif |
295 | #ifdef CONFIG_PPC64 | 289 | #ifdef CONFIG_PPC64 |
296 | unsigned long dscr; | 290 | unsigned long dscr; |
291 | unsigned long fscr; | ||
297 | /* | 292 | /* |
298 | * This member element dscr_inherit indicates that the process | 293 | * This member element dscr_inherit indicates that the process |
299 | * has explicitly attempted and changed the DSCR register value | 294 | * has explicitly attempted and changed the DSCR register value |
@@ -385,8 +380,6 @@ extern int set_endian(struct task_struct *tsk, unsigned int val); | |||
385 | extern int get_unalign_ctl(struct task_struct *tsk, unsigned long adr); | 380 | extern int get_unalign_ctl(struct task_struct *tsk, unsigned long adr); |
386 | extern int set_unalign_ctl(struct task_struct *tsk, unsigned int val); | 381 | extern int set_unalign_ctl(struct task_struct *tsk, unsigned int val); |
387 | 382 | ||
388 | extern void fp_enable(void); | ||
389 | extern void vec_enable(void); | ||
390 | extern void load_fp_state(struct thread_fp_state *fp); | 383 | extern void load_fp_state(struct thread_fp_state *fp); |
391 | extern void store_fp_state(struct thread_fp_state *fp); | 384 | extern void store_fp_state(struct thread_fp_state *fp); |
392 | extern void load_vr_state(struct thread_vr_state *vr); | 385 | extern void load_vr_state(struct thread_vr_state *vr); |
diff --git a/arch/powerpc/include/asm/pte-common.h b/arch/powerpc/include/asm/pte-common.h index 71537a319fc8..1ec67b043065 100644 --- a/arch/powerpc/include/asm/pte-common.h +++ b/arch/powerpc/include/asm/pte-common.h | |||
@@ -40,6 +40,11 @@ | |||
40 | #else | 40 | #else |
41 | #define _PAGE_RW 0 | 41 | #define _PAGE_RW 0 |
42 | #endif | 42 | #endif |
43 | |||
44 | #ifndef _PAGE_PTE | ||
45 | #define _PAGE_PTE 0 | ||
46 | #endif | ||
47 | |||
43 | #ifndef _PMD_PRESENT_MASK | 48 | #ifndef _PMD_PRESENT_MASK |
44 | #define _PMD_PRESENT_MASK _PMD_PRESENT | 49 | #define _PMD_PRESENT_MASK _PMD_PRESENT |
45 | #endif | 50 | #endif |
diff --git a/arch/powerpc/include/asm/pte-hash64-4k.h b/arch/powerpc/include/asm/pte-hash64-4k.h deleted file mode 100644 index c134e809aac3..000000000000 --- a/arch/powerpc/include/asm/pte-hash64-4k.h +++ /dev/null | |||
@@ -1,17 +0,0 @@ | |||
1 | /* To be include by pgtable-hash64.h only */ | ||
2 | |||
3 | /* PTE bits */ | ||
4 | #define _PAGE_HASHPTE 0x0400 /* software: pte has an associated HPTE */ | ||
5 | #define _PAGE_SECONDARY 0x8000 /* software: HPTE is in secondary group */ | ||
6 | #define _PAGE_GROUP_IX 0x7000 /* software: HPTE index within group */ | ||
7 | #define _PAGE_F_SECOND _PAGE_SECONDARY | ||
8 | #define _PAGE_F_GIX _PAGE_GROUP_IX | ||
9 | #define _PAGE_SPECIAL 0x10000 /* software: special page */ | ||
10 | |||
11 | /* PTE flags to conserve for HPTE identification */ | ||
12 | #define _PAGE_HPTEFLAGS (_PAGE_BUSY | _PAGE_HASHPTE | \ | ||
13 | _PAGE_SECONDARY | _PAGE_GROUP_IX) | ||
14 | |||
15 | /* shift to put page number into pte */ | ||
16 | #define PTE_RPN_SHIFT (17) | ||
17 | |||
diff --git a/arch/powerpc/include/asm/pte-hash64-64k.h b/arch/powerpc/include/asm/pte-hash64-64k.h deleted file mode 100644 index 4f4ec2ab45c9..000000000000 --- a/arch/powerpc/include/asm/pte-hash64-64k.h +++ /dev/null | |||
@@ -1,102 +0,0 @@ | |||
1 | /* To be include by pgtable-hash64.h only */ | ||
2 | |||
3 | /* Additional PTE bits (don't change without checking asm in hash_low.S) */ | ||
4 | #define _PAGE_SPECIAL 0x00000400 /* software: special page */ | ||
5 | #define _PAGE_HPTE_SUB 0x0ffff000 /* combo only: sub pages HPTE bits */ | ||
6 | #define _PAGE_HPTE_SUB0 0x08000000 /* combo only: first sub page */ | ||
7 | #define _PAGE_COMBO 0x10000000 /* this is a combo 4k page */ | ||
8 | #define _PAGE_4K_PFN 0x20000000 /* PFN is for a single 4k page */ | ||
9 | |||
10 | /* For 64K page, we don't have a separate _PAGE_HASHPTE bit. Instead, | ||
11 | * we set that to be the whole sub-bits mask. The C code will only | ||
12 | * test this, so a multi-bit mask will work. For combo pages, this | ||
13 | * is equivalent as effectively, the old _PAGE_HASHPTE was an OR of | ||
14 | * all the sub bits. For real 64k pages, we now have the assembly set | ||
15 | * _PAGE_HPTE_SUB0 in addition to setting the HIDX bits which overlap | ||
16 | * that mask. This is fine as long as the HIDX bits are never set on | ||
17 | * a PTE that isn't hashed, which is the case today. | ||
18 | * | ||
19 | * A little nit is for the huge page C code, which does the hashing | ||
20 | * in C, we need to provide which bit to use. | ||
21 | */ | ||
22 | #define _PAGE_HASHPTE _PAGE_HPTE_SUB | ||
23 | |||
24 | /* Note the full page bits must be in the same location as for normal | ||
25 | * 4k pages as the same assembly will be used to insert 64K pages | ||
26 | * whether the kernel has CONFIG_PPC_64K_PAGES or not | ||
27 | */ | ||
28 | #define _PAGE_F_SECOND 0x00008000 /* full page: hidx bits */ | ||
29 | #define _PAGE_F_GIX 0x00007000 /* full page: hidx bits */ | ||
30 | |||
31 | /* PTE flags to conserve for HPTE identification */ | ||
32 | #define _PAGE_HPTEFLAGS (_PAGE_BUSY | _PAGE_HASHPTE | _PAGE_COMBO) | ||
33 | |||
34 | /* Shift to put page number into pte. | ||
35 | * | ||
36 | * That gives us a max RPN of 34 bits, which means a max of 50 bits | ||
37 | * of addressable physical space, or 46 bits for the special 4k PFNs. | ||
38 | */ | ||
39 | #define PTE_RPN_SHIFT (30) | ||
40 | |||
41 | #ifndef __ASSEMBLY__ | ||
42 | |||
43 | /* | ||
44 | * With 64K pages on hash table, we have a special PTE format that | ||
45 | * uses a second "half" of the page table to encode sub-page information | ||
46 | * in order to deal with 64K made of 4K HW pages. Thus we override the | ||
47 | * generic accessors and iterators here | ||
48 | */ | ||
49 | #define __real_pte __real_pte | ||
50 | static inline real_pte_t __real_pte(pte_t pte, pte_t *ptep) | ||
51 | { | ||
52 | real_pte_t rpte; | ||
53 | |||
54 | rpte.pte = pte; | ||
55 | rpte.hidx = 0; | ||
56 | if (pte_val(pte) & _PAGE_COMBO) { | ||
57 | /* | ||
58 | * Make sure we order the hidx load against the _PAGE_COMBO | ||
59 | * check. The store side ordering is done in __hash_page_4K | ||
60 | */ | ||
61 | smp_rmb(); | ||
62 | rpte.hidx = pte_val(*((ptep) + PTRS_PER_PTE)); | ||
63 | } | ||
64 | return rpte; | ||
65 | } | ||
66 | |||
67 | static inline unsigned long __rpte_to_hidx(real_pte_t rpte, unsigned long index) | ||
68 | { | ||
69 | if ((pte_val(rpte.pte) & _PAGE_COMBO)) | ||
70 | return (rpte.hidx >> (index<<2)) & 0xf; | ||
71 | return (pte_val(rpte.pte) >> 12) & 0xf; | ||
72 | } | ||
73 | |||
74 | #define __rpte_to_pte(r) ((r).pte) | ||
75 | #define __rpte_sub_valid(rpte, index) \ | ||
76 | (pte_val(rpte.pte) & (_PAGE_HPTE_SUB0 >> (index))) | ||
77 | |||
78 | /* Trick: we set __end to va + 64k, which happens works for | ||
79 | * a 16M page as well as we want only one iteration | ||
80 | */ | ||
81 | #define pte_iterate_hashed_subpages(rpte, psize, vpn, index, shift) \ | ||
82 | do { \ | ||
83 | unsigned long __end = vpn + (1UL << (PAGE_SHIFT - VPN_SHIFT)); \ | ||
84 | unsigned __split = (psize == MMU_PAGE_4K || \ | ||
85 | psize == MMU_PAGE_64K_AP); \ | ||
86 | shift = mmu_psize_defs[psize].shift; \ | ||
87 | for (index = 0; vpn < __end; index++, \ | ||
88 | vpn += (1L << (shift - VPN_SHIFT))) { \ | ||
89 | if (!__split || __rpte_sub_valid(rpte, index)) \ | ||
90 | do { | ||
91 | |||
92 | #define pte_iterate_hashed_end() } while(0); } } while(0) | ||
93 | |||
94 | #define pte_pagesize_index(mm, addr, pte) \ | ||
95 | (((pte) & _PAGE_COMBO)? MMU_PAGE_4K: MMU_PAGE_64K) | ||
96 | |||
97 | #define remap_4k_pfn(vma, addr, pfn, prot) \ | ||
98 | (WARN_ON(((pfn) >= (1UL << (64 - PTE_RPN_SHIFT)))) ? -EINVAL : \ | ||
99 | remap_pfn_range((vma), (addr), (pfn), PAGE_SIZE, \ | ||
100 | __pgprot(pgprot_val((prot)) | _PAGE_4K_PFN))) | ||
101 | |||
102 | #endif /* __ASSEMBLY__ */ | ||
diff --git a/arch/powerpc/include/asm/pte-hash64.h b/arch/powerpc/include/asm/pte-hash64.h deleted file mode 100644 index ef612c160da7..000000000000 --- a/arch/powerpc/include/asm/pte-hash64.h +++ /dev/null | |||
@@ -1,54 +0,0 @@ | |||
1 | #ifndef _ASM_POWERPC_PTE_HASH64_H | ||
2 | #define _ASM_POWERPC_PTE_HASH64_H | ||
3 | #ifdef __KERNEL__ | ||
4 | |||
5 | /* | ||
6 | * Common bits between 4K and 64K pages in a linux-style PTE. | ||
7 | * These match the bits in the (hardware-defined) PowerPC PTE as closely | ||
8 | * as possible. Additional bits may be defined in pgtable-hash64-*.h | ||
9 | * | ||
10 | * Note: We only support user read/write permissions. Supervisor always | ||
11 | * have full read/write to pages above PAGE_OFFSET (pages below that | ||
12 | * always use the user access permissions). | ||
13 | * | ||
14 | * We could create separate kernel read-only if we used the 3 PP bits | ||
15 | * combinations that newer processors provide but we currently don't. | ||
16 | */ | ||
17 | #define _PAGE_PRESENT 0x0001 /* software: pte contains a translation */ | ||
18 | #define _PAGE_USER 0x0002 /* matches one of the PP bits */ | ||
19 | #define _PAGE_BIT_SWAP_TYPE 2 | ||
20 | #define _PAGE_EXEC 0x0004 /* No execute on POWER4 and newer (we invert) */ | ||
21 | #define _PAGE_GUARDED 0x0008 | ||
22 | /* We can derive Memory coherence from _PAGE_NO_CACHE */ | ||
23 | #define _PAGE_NO_CACHE 0x0020 /* I: cache inhibit */ | ||
24 | #define _PAGE_WRITETHRU 0x0040 /* W: cache write-through */ | ||
25 | #define _PAGE_DIRTY 0x0080 /* C: page changed */ | ||
26 | #define _PAGE_ACCESSED 0x0100 /* R: page referenced */ | ||
27 | #define _PAGE_RW 0x0200 /* software: user write access allowed */ | ||
28 | #define _PAGE_BUSY 0x0800 /* software: PTE & hash are busy */ | ||
29 | |||
30 | /* No separate kernel read-only */ | ||
31 | #define _PAGE_KERNEL_RW (_PAGE_RW | _PAGE_DIRTY) /* user access blocked by key */ | ||
32 | #define _PAGE_KERNEL_RO _PAGE_KERNEL_RW | ||
33 | |||
34 | /* Strong Access Ordering */ | ||
35 | #define _PAGE_SAO (_PAGE_WRITETHRU | _PAGE_NO_CACHE | _PAGE_COHERENT) | ||
36 | |||
37 | /* No page size encoding in the linux PTE */ | ||
38 | #define _PAGE_PSIZE 0 | ||
39 | |||
40 | /* PTEIDX nibble */ | ||
41 | #define _PTEIDX_SECONDARY 0x8 | ||
42 | #define _PTEIDX_GROUP_IX 0x7 | ||
43 | |||
44 | /* Hash table based platforms need atomic updates of the linux PTE */ | ||
45 | #define PTE_ATOMIC_UPDATES 1 | ||
46 | |||
47 | #ifdef CONFIG_PPC_64K_PAGES | ||
48 | #include <asm/pte-hash64-64k.h> | ||
49 | #else | ||
50 | #include <asm/pte-hash64-4k.h> | ||
51 | #endif | ||
52 | |||
53 | #endif /* __KERNEL__ */ | ||
54 | #endif /* _ASM_POWERPC_PTE_HASH64_H */ | ||
diff --git a/arch/powerpc/include/asm/qe.h b/arch/powerpc/include/asm/qe.h deleted file mode 100644 index 32b9bfa0c9bd..000000000000 --- a/arch/powerpc/include/asm/qe.h +++ /dev/null | |||
@@ -1,740 +0,0 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2006 Freescale Semiconductor, Inc. All rights reserved. | ||
3 | * | ||
4 | * Authors: Shlomi Gridish <gridish@freescale.com> | ||
5 | * Li Yang <leoli@freescale.com> | ||
6 | * | ||
7 | * Description: | ||
8 | * QUICC Engine (QE) external definitions and structure. | ||
9 | * | ||
10 | * This program is free software; you can redistribute it and/or modify it | ||
11 | * under the terms of the GNU General Public License as published by the | ||
12 | * Free Software Foundation; either version 2 of the License, or (at your | ||
13 | * option) any later version. | ||
14 | */ | ||
15 | #ifndef _ASM_POWERPC_QE_H | ||
16 | #define _ASM_POWERPC_QE_H | ||
17 | #ifdef __KERNEL__ | ||
18 | |||
19 | #include <linux/spinlock.h> | ||
20 | #include <linux/errno.h> | ||
21 | #include <linux/err.h> | ||
22 | #include <asm/cpm.h> | ||
23 | #include <asm/immap_qe.h> | ||
24 | |||
25 | #define QE_NUM_OF_SNUM 256 /* There are 256 serial number in QE */ | ||
26 | #define QE_NUM_OF_BRGS 16 | ||
27 | #define QE_NUM_OF_PORTS 1024 | ||
28 | |||
29 | /* Memory partitions | ||
30 | */ | ||
31 | #define MEM_PART_SYSTEM 0 | ||
32 | #define MEM_PART_SECONDARY 1 | ||
33 | #define MEM_PART_MURAM 2 | ||
34 | |||
35 | /* Clocks and BRGs */ | ||
36 | enum qe_clock { | ||
37 | QE_CLK_NONE = 0, | ||
38 | QE_BRG1, /* Baud Rate Generator 1 */ | ||
39 | QE_BRG2, /* Baud Rate Generator 2 */ | ||
40 | QE_BRG3, /* Baud Rate Generator 3 */ | ||
41 | QE_BRG4, /* Baud Rate Generator 4 */ | ||
42 | QE_BRG5, /* Baud Rate Generator 5 */ | ||
43 | QE_BRG6, /* Baud Rate Generator 6 */ | ||
44 | QE_BRG7, /* Baud Rate Generator 7 */ | ||
45 | QE_BRG8, /* Baud Rate Generator 8 */ | ||
46 | QE_BRG9, /* Baud Rate Generator 9 */ | ||
47 | QE_BRG10, /* Baud Rate Generator 10 */ | ||
48 | QE_BRG11, /* Baud Rate Generator 11 */ | ||
49 | QE_BRG12, /* Baud Rate Generator 12 */ | ||
50 | QE_BRG13, /* Baud Rate Generator 13 */ | ||
51 | QE_BRG14, /* Baud Rate Generator 14 */ | ||
52 | QE_BRG15, /* Baud Rate Generator 15 */ | ||
53 | QE_BRG16, /* Baud Rate Generator 16 */ | ||
54 | QE_CLK1, /* Clock 1 */ | ||
55 | QE_CLK2, /* Clock 2 */ | ||
56 | QE_CLK3, /* Clock 3 */ | ||
57 | QE_CLK4, /* Clock 4 */ | ||
58 | QE_CLK5, /* Clock 5 */ | ||
59 | QE_CLK6, /* Clock 6 */ | ||
60 | QE_CLK7, /* Clock 7 */ | ||
61 | QE_CLK8, /* Clock 8 */ | ||
62 | QE_CLK9, /* Clock 9 */ | ||
63 | QE_CLK10, /* Clock 10 */ | ||
64 | QE_CLK11, /* Clock 11 */ | ||
65 | QE_CLK12, /* Clock 12 */ | ||
66 | QE_CLK13, /* Clock 13 */ | ||
67 | QE_CLK14, /* Clock 14 */ | ||
68 | QE_CLK15, /* Clock 15 */ | ||
69 | QE_CLK16, /* Clock 16 */ | ||
70 | QE_CLK17, /* Clock 17 */ | ||
71 | QE_CLK18, /* Clock 18 */ | ||
72 | QE_CLK19, /* Clock 19 */ | ||
73 | QE_CLK20, /* Clock 20 */ | ||
74 | QE_CLK21, /* Clock 21 */ | ||
75 | QE_CLK22, /* Clock 22 */ | ||
76 | QE_CLK23, /* Clock 23 */ | ||
77 | QE_CLK24, /* Clock 24 */ | ||
78 | QE_CLK_DUMMY | ||
79 | }; | ||
80 | |||
81 | static inline bool qe_clock_is_brg(enum qe_clock clk) | ||
82 | { | ||
83 | return clk >= QE_BRG1 && clk <= QE_BRG16; | ||
84 | } | ||
85 | |||
86 | extern spinlock_t cmxgcr_lock; | ||
87 | |||
88 | /* Export QE common operations */ | ||
89 | #ifdef CONFIG_QUICC_ENGINE | ||
90 | extern void qe_reset(void); | ||
91 | #else | ||
92 | static inline void qe_reset(void) {} | ||
93 | #endif | ||
94 | |||
95 | /* QE PIO */ | ||
96 | #define QE_PIO_PINS 32 | ||
97 | |||
98 | struct qe_pio_regs { | ||
99 | __be32 cpodr; /* Open drain register */ | ||
100 | __be32 cpdata; /* Data register */ | ||
101 | __be32 cpdir1; /* Direction register */ | ||
102 | __be32 cpdir2; /* Direction register */ | ||
103 | __be32 cppar1; /* Pin assignment register */ | ||
104 | __be32 cppar2; /* Pin assignment register */ | ||
105 | #ifdef CONFIG_PPC_85xx | ||
106 | u8 pad[8]; | ||
107 | #endif | ||
108 | }; | ||
109 | |||
110 | #define QE_PIO_DIR_IN 2 | ||
111 | #define QE_PIO_DIR_OUT 1 | ||
112 | extern void __par_io_config_pin(struct qe_pio_regs __iomem *par_io, u8 pin, | ||
113 | int dir, int open_drain, int assignment, | ||
114 | int has_irq); | ||
115 | #ifdef CONFIG_QUICC_ENGINE | ||
116 | extern int par_io_init(struct device_node *np); | ||
117 | extern int par_io_of_config(struct device_node *np); | ||
118 | extern int par_io_config_pin(u8 port, u8 pin, int dir, int open_drain, | ||
119 | int assignment, int has_irq); | ||
120 | extern int par_io_data_set(u8 port, u8 pin, u8 val); | ||
121 | #else | ||
122 | static inline int par_io_init(struct device_node *np) { return -ENOSYS; } | ||
123 | static inline int par_io_of_config(struct device_node *np) { return -ENOSYS; } | ||
124 | static inline int par_io_config_pin(u8 port, u8 pin, int dir, int open_drain, | ||
125 | int assignment, int has_irq) { return -ENOSYS; } | ||
126 | static inline int par_io_data_set(u8 port, u8 pin, u8 val) { return -ENOSYS; } | ||
127 | #endif /* CONFIG_QUICC_ENGINE */ | ||
128 | |||
129 | /* | ||
130 | * Pin multiplexing functions. | ||
131 | */ | ||
132 | struct qe_pin; | ||
133 | #ifdef CONFIG_QE_GPIO | ||
134 | extern struct qe_pin *qe_pin_request(struct device_node *np, int index); | ||
135 | extern void qe_pin_free(struct qe_pin *qe_pin); | ||
136 | extern void qe_pin_set_gpio(struct qe_pin *qe_pin); | ||
137 | extern void qe_pin_set_dedicated(struct qe_pin *pin); | ||
138 | #else | ||
139 | static inline struct qe_pin *qe_pin_request(struct device_node *np, int index) | ||
140 | { | ||
141 | return ERR_PTR(-ENOSYS); | ||
142 | } | ||
143 | static inline void qe_pin_free(struct qe_pin *qe_pin) {} | ||
144 | static inline void qe_pin_set_gpio(struct qe_pin *qe_pin) {} | ||
145 | static inline void qe_pin_set_dedicated(struct qe_pin *pin) {} | ||
146 | #endif /* CONFIG_QE_GPIO */ | ||
147 | |||
148 | #ifdef CONFIG_QUICC_ENGINE | ||
149 | int qe_issue_cmd(u32 cmd, u32 device, u8 mcn_protocol, u32 cmd_input); | ||
150 | #else | ||
151 | static inline int qe_issue_cmd(u32 cmd, u32 device, u8 mcn_protocol, | ||
152 | u32 cmd_input) | ||
153 | { | ||
154 | return -ENOSYS; | ||
155 | } | ||
156 | #endif /* CONFIG_QUICC_ENGINE */ | ||
157 | |||
158 | /* QE internal API */ | ||
159 | enum qe_clock qe_clock_source(const char *source); | ||
160 | unsigned int qe_get_brg_clk(void); | ||
161 | int qe_setbrg(enum qe_clock brg, unsigned int rate, unsigned int multiplier); | ||
162 | int qe_get_snum(void); | ||
163 | void qe_put_snum(u8 snum); | ||
164 | unsigned int qe_get_num_of_risc(void); | ||
165 | unsigned int qe_get_num_of_snums(void); | ||
166 | |||
167 | static inline int qe_alive_during_sleep(void) | ||
168 | { | ||
169 | /* | ||
170 | * MPC8568E reference manual says: | ||
171 | * | ||
172 | * "...power down sequence waits for all I/O interfaces to become idle. | ||
173 | * In some applications this may happen eventually without actively | ||
174 | * shutting down interfaces, but most likely, software will have to | ||
175 | * take steps to shut down the eTSEC, QUICC Engine Block, and PCI | ||
176 | * interfaces before issuing the command (either the write to the core | ||
177 | * MSR[WE] as described above or writing to POWMGTCSR) to put the | ||
178 | * device into sleep state." | ||
179 | * | ||
180 | * MPC8569E reference manual has a similar paragraph. | ||
181 | */ | ||
182 | #ifdef CONFIG_PPC_85xx | ||
183 | return 0; | ||
184 | #else | ||
185 | return 1; | ||
186 | #endif | ||
187 | } | ||
188 | |||
189 | /* we actually use cpm_muram implementation, define this for convenience */ | ||
190 | #define qe_muram_init cpm_muram_init | ||
191 | #define qe_muram_alloc cpm_muram_alloc | ||
192 | #define qe_muram_alloc_fixed cpm_muram_alloc_fixed | ||
193 | #define qe_muram_free cpm_muram_free | ||
194 | #define qe_muram_addr cpm_muram_addr | ||
195 | #define qe_muram_offset cpm_muram_offset | ||
196 | |||
197 | /* Structure that defines QE firmware binary files. | ||
198 | * | ||
199 | * See Documentation/powerpc/qe_firmware.txt for a description of these | ||
200 | * fields. | ||
201 | */ | ||
202 | struct qe_firmware { | ||
203 | struct qe_header { | ||
204 | __be32 length; /* Length of the entire structure, in bytes */ | ||
205 | u8 magic[3]; /* Set to { 'Q', 'E', 'F' } */ | ||
206 | u8 version; /* Version of this layout. First ver is '1' */ | ||
207 | } header; | ||
208 | u8 id[62]; /* Null-terminated identifier string */ | ||
209 | u8 split; /* 0 = shared I-RAM, 1 = split I-RAM */ | ||
210 | u8 count; /* Number of microcode[] structures */ | ||
211 | struct { | ||
212 | __be16 model; /* The SOC model */ | ||
213 | u8 major; /* The SOC revision major */ | ||
214 | u8 minor; /* The SOC revision minor */ | ||
215 | } __attribute__ ((packed)) soc; | ||
216 | u8 padding[4]; /* Reserved, for alignment */ | ||
217 | __be64 extended_modes; /* Extended modes */ | ||
218 | __be32 vtraps[8]; /* Virtual trap addresses */ | ||
219 | u8 reserved[4]; /* Reserved, for future expansion */ | ||
220 | struct qe_microcode { | ||
221 | u8 id[32]; /* Null-terminated identifier */ | ||
222 | __be32 traps[16]; /* Trap addresses, 0 == ignore */ | ||
223 | __be32 eccr; /* The value for the ECCR register */ | ||
224 | __be32 iram_offset; /* Offset into I-RAM for the code */ | ||
225 | __be32 count; /* Number of 32-bit words of the code */ | ||
226 | __be32 code_offset; /* Offset of the actual microcode */ | ||
227 | u8 major; /* The microcode version major */ | ||
228 | u8 minor; /* The microcode version minor */ | ||
229 | u8 revision; /* The microcode version revision */ | ||
230 | u8 padding; /* Reserved, for alignment */ | ||
231 | u8 reserved[4]; /* Reserved, for future expansion */ | ||
232 | } __attribute__ ((packed)) microcode[1]; | ||
233 | /* All microcode binaries should be located here */ | ||
234 | /* CRC32 should be located here, after the microcode binaries */ | ||
235 | } __attribute__ ((packed)); | ||
236 | |||
237 | struct qe_firmware_info { | ||
238 | char id[64]; /* Firmware name */ | ||
239 | u32 vtraps[8]; /* Virtual trap addresses */ | ||
240 | u64 extended_modes; /* Extended modes */ | ||
241 | }; | ||
242 | |||
243 | #ifdef CONFIG_QUICC_ENGINE | ||
244 | /* Upload a firmware to the QE */ | ||
245 | int qe_upload_firmware(const struct qe_firmware *firmware); | ||
246 | #else | ||
247 | static inline int qe_upload_firmware(const struct qe_firmware *firmware) | ||
248 | { | ||
249 | return -ENOSYS; | ||
250 | } | ||
251 | #endif /* CONFIG_QUICC_ENGINE */ | ||
252 | |||
253 | /* Obtain information on the uploaded firmware */ | ||
254 | struct qe_firmware_info *qe_get_firmware_info(void); | ||
255 | |||
256 | /* QE USB */ | ||
257 | int qe_usb_clock_set(enum qe_clock clk, int rate); | ||
258 | |||
259 | /* Buffer descriptors */ | ||
260 | struct qe_bd { | ||
261 | __be16 status; | ||
262 | __be16 length; | ||
263 | __be32 buf; | ||
264 | } __attribute__ ((packed)); | ||
265 | |||
266 | #define BD_STATUS_MASK 0xffff0000 | ||
267 | #define BD_LENGTH_MASK 0x0000ffff | ||
268 | |||
269 | /* Alignment */ | ||
270 | #define QE_INTR_TABLE_ALIGN 16 /* ??? */ | ||
271 | #define QE_ALIGNMENT_OF_BD 8 | ||
272 | #define QE_ALIGNMENT_OF_PRAM 64 | ||
273 | |||
274 | /* RISC allocation */ | ||
275 | #define QE_RISC_ALLOCATION_RISC1 0x1 /* RISC 1 */ | ||
276 | #define QE_RISC_ALLOCATION_RISC2 0x2 /* RISC 2 */ | ||
277 | #define QE_RISC_ALLOCATION_RISC3 0x4 /* RISC 3 */ | ||
278 | #define QE_RISC_ALLOCATION_RISC4 0x8 /* RISC 4 */ | ||
279 | #define QE_RISC_ALLOCATION_RISC1_AND_RISC2 (QE_RISC_ALLOCATION_RISC1 | \ | ||
280 | QE_RISC_ALLOCATION_RISC2) | ||
281 | #define QE_RISC_ALLOCATION_FOUR_RISCS (QE_RISC_ALLOCATION_RISC1 | \ | ||
282 | QE_RISC_ALLOCATION_RISC2 | \ | ||
283 | QE_RISC_ALLOCATION_RISC3 | \ | ||
284 | QE_RISC_ALLOCATION_RISC4) | ||
285 | |||
286 | /* QE extended filtering Table Lookup Key Size */ | ||
287 | enum qe_fltr_tbl_lookup_key_size { | ||
288 | QE_FLTR_TABLE_LOOKUP_KEY_SIZE_8_BYTES | ||
289 | = 0x3f, /* LookupKey parsed by the Generate LookupKey | ||
290 | CMD is truncated to 8 bytes */ | ||
291 | QE_FLTR_TABLE_LOOKUP_KEY_SIZE_16_BYTES | ||
292 | = 0x5f, /* LookupKey parsed by the Generate LookupKey | ||
293 | CMD is truncated to 16 bytes */ | ||
294 | }; | ||
295 | |||
296 | /* QE FLTR extended filtering Largest External Table Lookup Key Size */ | ||
297 | enum qe_fltr_largest_external_tbl_lookup_key_size { | ||
298 | QE_FLTR_LARGEST_EXTERNAL_TABLE_LOOKUP_KEY_SIZE_NONE | ||
299 | = 0x0,/* not used */ | ||
300 | QE_FLTR_LARGEST_EXTERNAL_TABLE_LOOKUP_KEY_SIZE_8_BYTES | ||
301 | = QE_FLTR_TABLE_LOOKUP_KEY_SIZE_8_BYTES, /* 8 bytes */ | ||
302 | QE_FLTR_LARGEST_EXTERNAL_TABLE_LOOKUP_KEY_SIZE_16_BYTES | ||
303 | = QE_FLTR_TABLE_LOOKUP_KEY_SIZE_16_BYTES, /* 16 bytes */ | ||
304 | }; | ||
305 | |||
306 | /* structure representing QE parameter RAM */ | ||
307 | struct qe_timer_tables { | ||
308 | u16 tm_base; /* QE timer table base adr */ | ||
309 | u16 tm_ptr; /* QE timer table pointer */ | ||
310 | u16 r_tmr; /* QE timer mode register */ | ||
311 | u16 r_tmv; /* QE timer valid register */ | ||
312 | u32 tm_cmd; /* QE timer cmd register */ | ||
313 | u32 tm_cnt; /* QE timer internal cnt */ | ||
314 | } __attribute__ ((packed)); | ||
315 | |||
316 | #define QE_FLTR_TAD_SIZE 8 | ||
317 | |||
318 | /* QE extended filtering Termination Action Descriptor (TAD) */ | ||
319 | struct qe_fltr_tad { | ||
320 | u8 serialized[QE_FLTR_TAD_SIZE]; | ||
321 | } __attribute__ ((packed)); | ||
322 | |||
323 | /* Communication Direction */ | ||
324 | enum comm_dir { | ||
325 | COMM_DIR_NONE = 0, | ||
326 | COMM_DIR_RX = 1, | ||
327 | COMM_DIR_TX = 2, | ||
328 | COMM_DIR_RX_AND_TX = 3 | ||
329 | }; | ||
330 | |||
331 | /* QE CMXUCR Registers. | ||
332 | * There are two UCCs represented in each of the four CMXUCR registers. | ||
333 | * These values are for the UCC in the LSBs | ||
334 | */ | ||
335 | #define QE_CMXUCR_MII_ENET_MNG 0x00007000 | ||
336 | #define QE_CMXUCR_MII_ENET_MNG_SHIFT 12 | ||
337 | #define QE_CMXUCR_GRANT 0x00008000 | ||
338 | #define QE_CMXUCR_TSA 0x00004000 | ||
339 | #define QE_CMXUCR_BKPT 0x00000100 | ||
340 | #define QE_CMXUCR_TX_CLK_SRC_MASK 0x0000000F | ||
341 | |||
342 | /* QE CMXGCR Registers. | ||
343 | */ | ||
344 | #define QE_CMXGCR_MII_ENET_MNG 0x00007000 | ||
345 | #define QE_CMXGCR_MII_ENET_MNG_SHIFT 12 | ||
346 | #define QE_CMXGCR_USBCS 0x0000000f | ||
347 | #define QE_CMXGCR_USBCS_CLK3 0x1 | ||
348 | #define QE_CMXGCR_USBCS_CLK5 0x2 | ||
349 | #define QE_CMXGCR_USBCS_CLK7 0x3 | ||
350 | #define QE_CMXGCR_USBCS_CLK9 0x4 | ||
351 | #define QE_CMXGCR_USBCS_CLK13 0x5 | ||
352 | #define QE_CMXGCR_USBCS_CLK17 0x6 | ||
353 | #define QE_CMXGCR_USBCS_CLK19 0x7 | ||
354 | #define QE_CMXGCR_USBCS_CLK21 0x8 | ||
355 | #define QE_CMXGCR_USBCS_BRG9 0x9 | ||
356 | #define QE_CMXGCR_USBCS_BRG10 0xa | ||
357 | |||
358 | /* QE CECR Commands. | ||
359 | */ | ||
360 | #define QE_CR_FLG 0x00010000 | ||
361 | #define QE_RESET 0x80000000 | ||
362 | #define QE_INIT_TX_RX 0x00000000 | ||
363 | #define QE_INIT_RX 0x00000001 | ||
364 | #define QE_INIT_TX 0x00000002 | ||
365 | #define QE_ENTER_HUNT_MODE 0x00000003 | ||
366 | #define QE_STOP_TX 0x00000004 | ||
367 | #define QE_GRACEFUL_STOP_TX 0x00000005 | ||
368 | #define QE_RESTART_TX 0x00000006 | ||
369 | #define QE_CLOSE_RX_BD 0x00000007 | ||
370 | #define QE_SWITCH_COMMAND 0x00000007 | ||
371 | #define QE_SET_GROUP_ADDRESS 0x00000008 | ||
372 | #define QE_START_IDMA 0x00000009 | ||
373 | #define QE_MCC_STOP_RX 0x00000009 | ||
374 | #define QE_ATM_TRANSMIT 0x0000000a | ||
375 | #define QE_HPAC_CLEAR_ALL 0x0000000b | ||
376 | #define QE_GRACEFUL_STOP_RX 0x0000001a | ||
377 | #define QE_RESTART_RX 0x0000001b | ||
378 | #define QE_HPAC_SET_PRIORITY 0x0000010b | ||
379 | #define QE_HPAC_STOP_TX 0x0000020b | ||
380 | #define QE_HPAC_STOP_RX 0x0000030b | ||
381 | #define QE_HPAC_GRACEFUL_STOP_TX 0x0000040b | ||
382 | #define QE_HPAC_GRACEFUL_STOP_RX 0x0000050b | ||
383 | #define QE_HPAC_START_TX 0x0000060b | ||
384 | #define QE_HPAC_START_RX 0x0000070b | ||
385 | #define QE_USB_STOP_TX 0x0000000a | ||
386 | #define QE_USB_RESTART_TX 0x0000000c | ||
387 | #define QE_QMC_STOP_TX 0x0000000c | ||
388 | #define QE_QMC_STOP_RX 0x0000000d | ||
389 | #define QE_SS7_SU_FIL_RESET 0x0000000e | ||
390 | /* jonathbr added from here down for 83xx */ | ||
391 | #define QE_RESET_BCS 0x0000000a | ||
392 | #define QE_MCC_INIT_TX_RX_16 0x00000003 | ||
393 | #define QE_MCC_STOP_TX 0x00000004 | ||
394 | #define QE_MCC_INIT_TX_1 0x00000005 | ||
395 | #define QE_MCC_INIT_RX_1 0x00000006 | ||
396 | #define QE_MCC_RESET 0x00000007 | ||
397 | #define QE_SET_TIMER 0x00000008 | ||
398 | #define QE_RANDOM_NUMBER 0x0000000c | ||
399 | #define QE_ATM_MULTI_THREAD_INIT 0x00000011 | ||
400 | #define QE_ASSIGN_PAGE 0x00000012 | ||
401 | #define QE_ADD_REMOVE_HASH_ENTRY 0x00000013 | ||
402 | #define QE_START_FLOW_CONTROL 0x00000014 | ||
403 | #define QE_STOP_FLOW_CONTROL 0x00000015 | ||
404 | #define QE_ASSIGN_PAGE_TO_DEVICE 0x00000016 | ||
405 | |||
406 | #define QE_ASSIGN_RISC 0x00000010 | ||
407 | #define QE_CR_MCN_NORMAL_SHIFT 6 | ||
408 | #define QE_CR_MCN_USB_SHIFT 4 | ||
409 | #define QE_CR_MCN_RISC_ASSIGN_SHIFT 8 | ||
410 | #define QE_CR_SNUM_SHIFT 17 | ||
411 | |||
412 | /* QE CECR Sub Block - sub block of QE command. | ||
413 | */ | ||
414 | #define QE_CR_SUBBLOCK_INVALID 0x00000000 | ||
415 | #define QE_CR_SUBBLOCK_USB 0x03200000 | ||
416 | #define QE_CR_SUBBLOCK_UCCFAST1 0x02000000 | ||
417 | #define QE_CR_SUBBLOCK_UCCFAST2 0x02200000 | ||
418 | #define QE_CR_SUBBLOCK_UCCFAST3 0x02400000 | ||
419 | #define QE_CR_SUBBLOCK_UCCFAST4 0x02600000 | ||
420 | #define QE_CR_SUBBLOCK_UCCFAST5 0x02800000 | ||
421 | #define QE_CR_SUBBLOCK_UCCFAST6 0x02a00000 | ||
422 | #define QE_CR_SUBBLOCK_UCCFAST7 0x02c00000 | ||
423 | #define QE_CR_SUBBLOCK_UCCFAST8 0x02e00000 | ||
424 | #define QE_CR_SUBBLOCK_UCCSLOW1 0x00000000 | ||
425 | #define QE_CR_SUBBLOCK_UCCSLOW2 0x00200000 | ||
426 | #define QE_CR_SUBBLOCK_UCCSLOW3 0x00400000 | ||
427 | #define QE_CR_SUBBLOCK_UCCSLOW4 0x00600000 | ||
428 | #define QE_CR_SUBBLOCK_UCCSLOW5 0x00800000 | ||
429 | #define QE_CR_SUBBLOCK_UCCSLOW6 0x00a00000 | ||
430 | #define QE_CR_SUBBLOCK_UCCSLOW7 0x00c00000 | ||
431 | #define QE_CR_SUBBLOCK_UCCSLOW8 0x00e00000 | ||
432 | #define QE_CR_SUBBLOCK_MCC1 0x03800000 | ||
433 | #define QE_CR_SUBBLOCK_MCC2 0x03a00000 | ||
434 | #define QE_CR_SUBBLOCK_MCC3 0x03000000 | ||
435 | #define QE_CR_SUBBLOCK_IDMA1 0x02800000 | ||
436 | #define QE_CR_SUBBLOCK_IDMA2 0x02a00000 | ||
437 | #define QE_CR_SUBBLOCK_IDMA3 0x02c00000 | ||
438 | #define QE_CR_SUBBLOCK_IDMA4 0x02e00000 | ||
439 | #define QE_CR_SUBBLOCK_HPAC 0x01e00000 | ||
440 | #define QE_CR_SUBBLOCK_SPI1 0x01400000 | ||
441 | #define QE_CR_SUBBLOCK_SPI2 0x01600000 | ||
442 | #define QE_CR_SUBBLOCK_RAND 0x01c00000 | ||
443 | #define QE_CR_SUBBLOCK_TIMER 0x01e00000 | ||
444 | #define QE_CR_SUBBLOCK_GENERAL 0x03c00000 | ||
445 | |||
446 | /* QE CECR Protocol - For non-MCC, specifies mode for QE CECR command */ | ||
447 | #define QE_CR_PROTOCOL_UNSPECIFIED 0x00 /* For all other protocols */ | ||
448 | #define QE_CR_PROTOCOL_HDLC_TRANSPARENT 0x00 | ||
449 | #define QE_CR_PROTOCOL_QMC 0x02 | ||
450 | #define QE_CR_PROTOCOL_UART 0x04 | ||
451 | #define QE_CR_PROTOCOL_ATM_POS 0x0A | ||
452 | #define QE_CR_PROTOCOL_ETHERNET 0x0C | ||
453 | #define QE_CR_PROTOCOL_L2_SWITCH 0x0D | ||
454 | |||
455 | /* BRG configuration register */ | ||
456 | #define QE_BRGC_ENABLE 0x00010000 | ||
457 | #define QE_BRGC_DIVISOR_SHIFT 1 | ||
458 | #define QE_BRGC_DIVISOR_MAX 0xFFF | ||
459 | #define QE_BRGC_DIV16 1 | ||
460 | |||
461 | /* QE Timers registers */ | ||
462 | #define QE_GTCFR1_PCAS 0x80 | ||
463 | #define QE_GTCFR1_STP2 0x20 | ||
464 | #define QE_GTCFR1_RST2 0x10 | ||
465 | #define QE_GTCFR1_GM2 0x08 | ||
466 | #define QE_GTCFR1_GM1 0x04 | ||
467 | #define QE_GTCFR1_STP1 0x02 | ||
468 | #define QE_GTCFR1_RST1 0x01 | ||
469 | |||
470 | /* SDMA registers */ | ||
471 | #define QE_SDSR_BER1 0x02000000 | ||
472 | #define QE_SDSR_BER2 0x01000000 | ||
473 | |||
474 | #define QE_SDMR_GLB_1_MSK 0x80000000 | ||
475 | #define QE_SDMR_ADR_SEL 0x20000000 | ||
476 | #define QE_SDMR_BER1_MSK 0x02000000 | ||
477 | #define QE_SDMR_BER2_MSK 0x01000000 | ||
478 | #define QE_SDMR_EB1_MSK 0x00800000 | ||
479 | #define QE_SDMR_ER1_MSK 0x00080000 | ||
480 | #define QE_SDMR_ER2_MSK 0x00040000 | ||
481 | #define QE_SDMR_CEN_MASK 0x0000E000 | ||
482 | #define QE_SDMR_SBER_1 0x00000200 | ||
483 | #define QE_SDMR_SBER_2 0x00000200 | ||
484 | #define QE_SDMR_EB1_PR_MASK 0x000000C0 | ||
485 | #define QE_SDMR_ER1_PR 0x00000008 | ||
486 | |||
487 | #define QE_SDMR_CEN_SHIFT 13 | ||
488 | #define QE_SDMR_EB1_PR_SHIFT 6 | ||
489 | |||
490 | #define QE_SDTM_MSNUM_SHIFT 24 | ||
491 | |||
492 | #define QE_SDEBCR_BA_MASK 0x01FFFFFF | ||
493 | |||
494 | /* Communication Processor */ | ||
495 | #define QE_CP_CERCR_MEE 0x8000 /* Multi-user RAM ECC enable */ | ||
496 | #define QE_CP_CERCR_IEE 0x4000 /* Instruction RAM ECC enable */ | ||
497 | #define QE_CP_CERCR_CIR 0x0800 /* Common instruction RAM */ | ||
498 | |||
499 | /* I-RAM */ | ||
500 | #define QE_IRAM_IADD_AIE 0x80000000 /* Auto Increment Enable */ | ||
501 | #define QE_IRAM_IADD_BADDR 0x00080000 /* Base Address */ | ||
502 | #define QE_IRAM_READY 0x80000000 /* Ready */ | ||
503 | |||
504 | /* UPC */ | ||
505 | #define UPGCR_PROTOCOL 0x80000000 /* protocol ul2 or pl2 */ | ||
506 | #define UPGCR_TMS 0x40000000 /* Transmit master/slave mode */ | ||
507 | #define UPGCR_RMS 0x20000000 /* Receive master/slave mode */ | ||
508 | #define UPGCR_ADDR 0x10000000 /* Master MPHY Addr multiplexing */ | ||
509 | #define UPGCR_DIAG 0x01000000 /* Diagnostic mode */ | ||
510 | |||
511 | /* UCC GUEMR register */ | ||
512 | #define UCC_GUEMR_MODE_MASK_RX 0x02 | ||
513 | #define UCC_GUEMR_MODE_FAST_RX 0x02 | ||
514 | #define UCC_GUEMR_MODE_SLOW_RX 0x00 | ||
515 | #define UCC_GUEMR_MODE_MASK_TX 0x01 | ||
516 | #define UCC_GUEMR_MODE_FAST_TX 0x01 | ||
517 | #define UCC_GUEMR_MODE_SLOW_TX 0x00 | ||
518 | #define UCC_GUEMR_MODE_MASK (UCC_GUEMR_MODE_MASK_RX | UCC_GUEMR_MODE_MASK_TX) | ||
519 | #define UCC_GUEMR_SET_RESERVED3 0x10 /* Bit 3 in the guemr is reserved but | ||
520 | must be set 1 */ | ||
521 | |||
522 | /* structure representing UCC SLOW parameter RAM */ | ||
523 | struct ucc_slow_pram { | ||
524 | __be16 rbase; /* RX BD base address */ | ||
525 | __be16 tbase; /* TX BD base address */ | ||
526 | u8 rbmr; /* RX bus mode register (same as CPM's RFCR) */ | ||
527 | u8 tbmr; /* TX bus mode register (same as CPM's TFCR) */ | ||
528 | __be16 mrblr; /* Rx buffer length */ | ||
529 | __be32 rstate; /* Rx internal state */ | ||
530 | __be32 rptr; /* Rx internal data pointer */ | ||
531 | __be16 rbptr; /* rb BD Pointer */ | ||
532 | __be16 rcount; /* Rx internal byte count */ | ||
533 | __be32 rtemp; /* Rx temp */ | ||
534 | __be32 tstate; /* Tx internal state */ | ||
535 | __be32 tptr; /* Tx internal data pointer */ | ||
536 | __be16 tbptr; /* Tx BD pointer */ | ||
537 | __be16 tcount; /* Tx byte count */ | ||
538 | __be32 ttemp; /* Tx temp */ | ||
539 | __be32 rcrc; /* temp receive CRC */ | ||
540 | __be32 tcrc; /* temp transmit CRC */ | ||
541 | } __attribute__ ((packed)); | ||
542 | |||
543 | /* General UCC SLOW Mode Register (GUMRH & GUMRL) */ | ||
544 | #define UCC_SLOW_GUMR_H_SAM_QMC 0x00000000 | ||
545 | #define UCC_SLOW_GUMR_H_SAM_SATM 0x00008000 | ||
546 | #define UCC_SLOW_GUMR_H_REVD 0x00002000 | ||
547 | #define UCC_SLOW_GUMR_H_TRX 0x00001000 | ||
548 | #define UCC_SLOW_GUMR_H_TTX 0x00000800 | ||
549 | #define UCC_SLOW_GUMR_H_CDP 0x00000400 | ||
550 | #define UCC_SLOW_GUMR_H_CTSP 0x00000200 | ||
551 | #define UCC_SLOW_GUMR_H_CDS 0x00000100 | ||
552 | #define UCC_SLOW_GUMR_H_CTSS 0x00000080 | ||
553 | #define UCC_SLOW_GUMR_H_TFL 0x00000040 | ||
554 | #define UCC_SLOW_GUMR_H_RFW 0x00000020 | ||
555 | #define UCC_SLOW_GUMR_H_TXSY 0x00000010 | ||
556 | #define UCC_SLOW_GUMR_H_4SYNC 0x00000004 | ||
557 | #define UCC_SLOW_GUMR_H_8SYNC 0x00000008 | ||
558 | #define UCC_SLOW_GUMR_H_16SYNC 0x0000000c | ||
559 | #define UCC_SLOW_GUMR_H_RTSM 0x00000002 | ||
560 | #define UCC_SLOW_GUMR_H_RSYN 0x00000001 | ||
561 | |||
562 | #define UCC_SLOW_GUMR_L_TCI 0x10000000 | ||
563 | #define UCC_SLOW_GUMR_L_RINV 0x02000000 | ||
564 | #define UCC_SLOW_GUMR_L_TINV 0x01000000 | ||
565 | #define UCC_SLOW_GUMR_L_TEND 0x00040000 | ||
566 | #define UCC_SLOW_GUMR_L_TDCR_MASK 0x00030000 | ||
567 | #define UCC_SLOW_GUMR_L_TDCR_32 0x00030000 | ||
568 | #define UCC_SLOW_GUMR_L_TDCR_16 0x00020000 | ||
569 | #define UCC_SLOW_GUMR_L_TDCR_8 0x00010000 | ||
570 | #define UCC_SLOW_GUMR_L_TDCR_1 0x00000000 | ||
571 | #define UCC_SLOW_GUMR_L_RDCR_MASK 0x0000c000 | ||
572 | #define UCC_SLOW_GUMR_L_RDCR_32 0x0000c000 | ||
573 | #define UCC_SLOW_GUMR_L_RDCR_16 0x00008000 | ||
574 | #define UCC_SLOW_GUMR_L_RDCR_8 0x00004000 | ||
575 | #define UCC_SLOW_GUMR_L_RDCR_1 0x00000000 | ||
576 | #define UCC_SLOW_GUMR_L_RENC_NRZI 0x00000800 | ||
577 | #define UCC_SLOW_GUMR_L_RENC_NRZ 0x00000000 | ||
578 | #define UCC_SLOW_GUMR_L_TENC_NRZI 0x00000100 | ||
579 | #define UCC_SLOW_GUMR_L_TENC_NRZ 0x00000000 | ||
580 | #define UCC_SLOW_GUMR_L_DIAG_MASK 0x000000c0 | ||
581 | #define UCC_SLOW_GUMR_L_DIAG_LE 0x000000c0 | ||
582 | #define UCC_SLOW_GUMR_L_DIAG_ECHO 0x00000080 | ||
583 | #define UCC_SLOW_GUMR_L_DIAG_LOOP 0x00000040 | ||
584 | #define UCC_SLOW_GUMR_L_DIAG_NORM 0x00000000 | ||
585 | #define UCC_SLOW_GUMR_L_ENR 0x00000020 | ||
586 | #define UCC_SLOW_GUMR_L_ENT 0x00000010 | ||
587 | #define UCC_SLOW_GUMR_L_MODE_MASK 0x0000000F | ||
588 | #define UCC_SLOW_GUMR_L_MODE_BISYNC 0x00000008 | ||
589 | #define UCC_SLOW_GUMR_L_MODE_AHDLC 0x00000006 | ||
590 | #define UCC_SLOW_GUMR_L_MODE_UART 0x00000004 | ||
591 | #define UCC_SLOW_GUMR_L_MODE_QMC 0x00000002 | ||
592 | |||
593 | /* General UCC FAST Mode Register */ | ||
594 | #define UCC_FAST_GUMR_TCI 0x20000000 | ||
595 | #define UCC_FAST_GUMR_TRX 0x10000000 | ||
596 | #define UCC_FAST_GUMR_TTX 0x08000000 | ||
597 | #define UCC_FAST_GUMR_CDP 0x04000000 | ||
598 | #define UCC_FAST_GUMR_CTSP 0x02000000 | ||
599 | #define UCC_FAST_GUMR_CDS 0x01000000 | ||
600 | #define UCC_FAST_GUMR_CTSS 0x00800000 | ||
601 | #define UCC_FAST_GUMR_TXSY 0x00020000 | ||
602 | #define UCC_FAST_GUMR_RSYN 0x00010000 | ||
603 | #define UCC_FAST_GUMR_RTSM 0x00002000 | ||
604 | #define UCC_FAST_GUMR_REVD 0x00000400 | ||
605 | #define UCC_FAST_GUMR_ENR 0x00000020 | ||
606 | #define UCC_FAST_GUMR_ENT 0x00000010 | ||
607 | |||
608 | /* UART Slow UCC Event Register (UCCE) */ | ||
609 | #define UCC_UART_UCCE_AB 0x0200 | ||
610 | #define UCC_UART_UCCE_IDLE 0x0100 | ||
611 | #define UCC_UART_UCCE_GRA 0x0080 | ||
612 | #define UCC_UART_UCCE_BRKE 0x0040 | ||
613 | #define UCC_UART_UCCE_BRKS 0x0020 | ||
614 | #define UCC_UART_UCCE_CCR 0x0008 | ||
615 | #define UCC_UART_UCCE_BSY 0x0004 | ||
616 | #define UCC_UART_UCCE_TX 0x0002 | ||
617 | #define UCC_UART_UCCE_RX 0x0001 | ||
618 | |||
619 | /* HDLC Slow UCC Event Register (UCCE) */ | ||
620 | #define UCC_HDLC_UCCE_GLR 0x1000 | ||
621 | #define UCC_HDLC_UCCE_GLT 0x0800 | ||
622 | #define UCC_HDLC_UCCE_IDLE 0x0100 | ||
623 | #define UCC_HDLC_UCCE_BRKE 0x0040 | ||
624 | #define UCC_HDLC_UCCE_BRKS 0x0020 | ||
625 | #define UCC_HDLC_UCCE_TXE 0x0010 | ||
626 | #define UCC_HDLC_UCCE_RXF 0x0008 | ||
627 | #define UCC_HDLC_UCCE_BSY 0x0004 | ||
628 | #define UCC_HDLC_UCCE_TXB 0x0002 | ||
629 | #define UCC_HDLC_UCCE_RXB 0x0001 | ||
630 | |||
631 | /* BISYNC Slow UCC Event Register (UCCE) */ | ||
632 | #define UCC_BISYNC_UCCE_GRA 0x0080 | ||
633 | #define UCC_BISYNC_UCCE_TXE 0x0010 | ||
634 | #define UCC_BISYNC_UCCE_RCH 0x0008 | ||
635 | #define UCC_BISYNC_UCCE_BSY 0x0004 | ||
636 | #define UCC_BISYNC_UCCE_TXB 0x0002 | ||
637 | #define UCC_BISYNC_UCCE_RXB 0x0001 | ||
638 | |||
639 | /* Gigabit Ethernet Fast UCC Event Register (UCCE) */ | ||
640 | #define UCC_GETH_UCCE_MPD 0x80000000 | ||
641 | #define UCC_GETH_UCCE_SCAR 0x40000000 | ||
642 | #define UCC_GETH_UCCE_GRA 0x20000000 | ||
643 | #define UCC_GETH_UCCE_CBPR 0x10000000 | ||
644 | #define UCC_GETH_UCCE_BSY 0x08000000 | ||
645 | #define UCC_GETH_UCCE_RXC 0x04000000 | ||
646 | #define UCC_GETH_UCCE_TXC 0x02000000 | ||
647 | #define UCC_GETH_UCCE_TXE 0x01000000 | ||
648 | #define UCC_GETH_UCCE_TXB7 0x00800000 | ||
649 | #define UCC_GETH_UCCE_TXB6 0x00400000 | ||
650 | #define UCC_GETH_UCCE_TXB5 0x00200000 | ||
651 | #define UCC_GETH_UCCE_TXB4 0x00100000 | ||
652 | #define UCC_GETH_UCCE_TXB3 0x00080000 | ||
653 | #define UCC_GETH_UCCE_TXB2 0x00040000 | ||
654 | #define UCC_GETH_UCCE_TXB1 0x00020000 | ||
655 | #define UCC_GETH_UCCE_TXB0 0x00010000 | ||
656 | #define UCC_GETH_UCCE_RXB7 0x00008000 | ||
657 | #define UCC_GETH_UCCE_RXB6 0x00004000 | ||
658 | #define UCC_GETH_UCCE_RXB5 0x00002000 | ||
659 | #define UCC_GETH_UCCE_RXB4 0x00001000 | ||
660 | #define UCC_GETH_UCCE_RXB3 0x00000800 | ||
661 | #define UCC_GETH_UCCE_RXB2 0x00000400 | ||
662 | #define UCC_GETH_UCCE_RXB1 0x00000200 | ||
663 | #define UCC_GETH_UCCE_RXB0 0x00000100 | ||
664 | #define UCC_GETH_UCCE_RXF7 0x00000080 | ||
665 | #define UCC_GETH_UCCE_RXF6 0x00000040 | ||
666 | #define UCC_GETH_UCCE_RXF5 0x00000020 | ||
667 | #define UCC_GETH_UCCE_RXF4 0x00000010 | ||
668 | #define UCC_GETH_UCCE_RXF3 0x00000008 | ||
669 | #define UCC_GETH_UCCE_RXF2 0x00000004 | ||
670 | #define UCC_GETH_UCCE_RXF1 0x00000002 | ||
671 | #define UCC_GETH_UCCE_RXF0 0x00000001 | ||
672 | |||
673 | /* UCC Protocol Specific Mode Register (UPSMR), when used for UART */ | ||
674 | #define UCC_UART_UPSMR_FLC 0x8000 | ||
675 | #define UCC_UART_UPSMR_SL 0x4000 | ||
676 | #define UCC_UART_UPSMR_CL_MASK 0x3000 | ||
677 | #define UCC_UART_UPSMR_CL_8 0x3000 | ||
678 | #define UCC_UART_UPSMR_CL_7 0x2000 | ||
679 | #define UCC_UART_UPSMR_CL_6 0x1000 | ||
680 | #define UCC_UART_UPSMR_CL_5 0x0000 | ||
681 | #define UCC_UART_UPSMR_UM_MASK 0x0c00 | ||
682 | #define UCC_UART_UPSMR_UM_NORMAL 0x0000 | ||
683 | #define UCC_UART_UPSMR_UM_MAN_MULTI 0x0400 | ||
684 | #define UCC_UART_UPSMR_UM_AUTO_MULTI 0x0c00 | ||
685 | #define UCC_UART_UPSMR_FRZ 0x0200 | ||
686 | #define UCC_UART_UPSMR_RZS 0x0100 | ||
687 | #define UCC_UART_UPSMR_SYN 0x0080 | ||
688 | #define UCC_UART_UPSMR_DRT 0x0040 | ||
689 | #define UCC_UART_UPSMR_PEN 0x0010 | ||
690 | #define UCC_UART_UPSMR_RPM_MASK 0x000c | ||
691 | #define UCC_UART_UPSMR_RPM_ODD 0x0000 | ||
692 | #define UCC_UART_UPSMR_RPM_LOW 0x0004 | ||
693 | #define UCC_UART_UPSMR_RPM_EVEN 0x0008 | ||
694 | #define UCC_UART_UPSMR_RPM_HIGH 0x000C | ||
695 | #define UCC_UART_UPSMR_TPM_MASK 0x0003 | ||
696 | #define UCC_UART_UPSMR_TPM_ODD 0x0000 | ||
697 | #define UCC_UART_UPSMR_TPM_LOW 0x0001 | ||
698 | #define UCC_UART_UPSMR_TPM_EVEN 0x0002 | ||
699 | #define UCC_UART_UPSMR_TPM_HIGH 0x0003 | ||
700 | |||
701 | /* UCC Protocol Specific Mode Register (UPSMR), when used for Ethernet */ | ||
702 | #define UCC_GETH_UPSMR_FTFE 0x80000000 | ||
703 | #define UCC_GETH_UPSMR_PTPE 0x40000000 | ||
704 | #define UCC_GETH_UPSMR_ECM 0x04000000 | ||
705 | #define UCC_GETH_UPSMR_HSE 0x02000000 | ||
706 | #define UCC_GETH_UPSMR_PRO 0x00400000 | ||
707 | #define UCC_GETH_UPSMR_CAP 0x00200000 | ||
708 | #define UCC_GETH_UPSMR_RSH 0x00100000 | ||
709 | #define UCC_GETH_UPSMR_RPM 0x00080000 | ||
710 | #define UCC_GETH_UPSMR_R10M 0x00040000 | ||
711 | #define UCC_GETH_UPSMR_RLPB 0x00020000 | ||
712 | #define UCC_GETH_UPSMR_TBIM 0x00010000 | ||
713 | #define UCC_GETH_UPSMR_RES1 0x00002000 | ||
714 | #define UCC_GETH_UPSMR_RMM 0x00001000 | ||
715 | #define UCC_GETH_UPSMR_CAM 0x00000400 | ||
716 | #define UCC_GETH_UPSMR_BRO 0x00000200 | ||
717 | #define UCC_GETH_UPSMR_SMM 0x00000080 | ||
718 | #define UCC_GETH_UPSMR_SGMM 0x00000020 | ||
719 | |||
720 | /* UCC Transmit On Demand Register (UTODR) */ | ||
721 | #define UCC_SLOW_TOD 0x8000 | ||
722 | #define UCC_FAST_TOD 0x8000 | ||
723 | |||
724 | /* UCC Bus Mode Register masks */ | ||
725 | /* Not to be confused with the Bundle Mode Register */ | ||
726 | #define UCC_BMR_GBL 0x20 | ||
727 | #define UCC_BMR_BO_BE 0x10 | ||
728 | #define UCC_BMR_CETM 0x04 | ||
729 | #define UCC_BMR_DTB 0x02 | ||
730 | #define UCC_BMR_BDB 0x01 | ||
731 | |||
732 | /* Function code masks */ | ||
733 | #define FC_GBL 0x20 | ||
734 | #define FC_DTB_LCL 0x02 | ||
735 | #define UCC_FAST_FUNCTION_CODE_GBL 0x20 | ||
736 | #define UCC_FAST_FUNCTION_CODE_DTB_LCL 0x02 | ||
737 | #define UCC_FAST_FUNCTION_CODE_BDB_LCL 0x01 | ||
738 | |||
739 | #endif /* __KERNEL__ */ | ||
740 | #endif /* _ASM_POWERPC_QE_H */ | ||
diff --git a/arch/powerpc/include/asm/qe_ic.h b/arch/powerpc/include/asm/qe_ic.h deleted file mode 100644 index 1e155ca6d33c..000000000000 --- a/arch/powerpc/include/asm/qe_ic.h +++ /dev/null | |||
@@ -1,139 +0,0 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2006 Freescale Semiconductor, Inc. All rights reserved. | ||
3 | * | ||
4 | * Authors: Shlomi Gridish <gridish@freescale.com> | ||
5 | * Li Yang <leoli@freescale.com> | ||
6 | * | ||
7 | * Description: | ||
8 | * QE IC external definitions and structure. | ||
9 | * | ||
10 | * This program is free software; you can redistribute it and/or modify it | ||
11 | * under the terms of the GNU General Public License as published by the | ||
12 | * Free Software Foundation; either version 2 of the License, or (at your | ||
13 | * option) any later version. | ||
14 | */ | ||
15 | #ifndef _ASM_POWERPC_QE_IC_H | ||
16 | #define _ASM_POWERPC_QE_IC_H | ||
17 | |||
18 | #include <linux/irq.h> | ||
19 | |||
20 | struct device_node; | ||
21 | struct qe_ic; | ||
22 | |||
23 | #define NUM_OF_QE_IC_GROUPS 6 | ||
24 | |||
25 | /* Flags when we init the QE IC */ | ||
26 | #define QE_IC_SPREADMODE_GRP_W 0x00000001 | ||
27 | #define QE_IC_SPREADMODE_GRP_X 0x00000002 | ||
28 | #define QE_IC_SPREADMODE_GRP_Y 0x00000004 | ||
29 | #define QE_IC_SPREADMODE_GRP_Z 0x00000008 | ||
30 | #define QE_IC_SPREADMODE_GRP_RISCA 0x00000010 | ||
31 | #define QE_IC_SPREADMODE_GRP_RISCB 0x00000020 | ||
32 | |||
33 | #define QE_IC_LOW_SIGNAL 0x00000100 | ||
34 | #define QE_IC_HIGH_SIGNAL 0x00000200 | ||
35 | |||
36 | #define QE_IC_GRP_W_PRI0_DEST_SIGNAL_HIGH 0x00001000 | ||
37 | #define QE_IC_GRP_W_PRI1_DEST_SIGNAL_HIGH 0x00002000 | ||
38 | #define QE_IC_GRP_X_PRI0_DEST_SIGNAL_HIGH 0x00004000 | ||
39 | #define QE_IC_GRP_X_PRI1_DEST_SIGNAL_HIGH 0x00008000 | ||
40 | #define QE_IC_GRP_Y_PRI0_DEST_SIGNAL_HIGH 0x00010000 | ||
41 | #define QE_IC_GRP_Y_PRI1_DEST_SIGNAL_HIGH 0x00020000 | ||
42 | #define QE_IC_GRP_Z_PRI0_DEST_SIGNAL_HIGH 0x00040000 | ||
43 | #define QE_IC_GRP_Z_PRI1_DEST_SIGNAL_HIGH 0x00080000 | ||
44 | #define QE_IC_GRP_RISCA_PRI0_DEST_SIGNAL_HIGH 0x00100000 | ||
45 | #define QE_IC_GRP_RISCA_PRI1_DEST_SIGNAL_HIGH 0x00200000 | ||
46 | #define QE_IC_GRP_RISCB_PRI0_DEST_SIGNAL_HIGH 0x00400000 | ||
47 | #define QE_IC_GRP_RISCB_PRI1_DEST_SIGNAL_HIGH 0x00800000 | ||
48 | #define QE_IC_GRP_W_DEST_SIGNAL_SHIFT (12) | ||
49 | |||
50 | /* QE interrupt sources groups */ | ||
51 | enum qe_ic_grp_id { | ||
52 | QE_IC_GRP_W = 0, /* QE interrupt controller group W */ | ||
53 | QE_IC_GRP_X, /* QE interrupt controller group X */ | ||
54 | QE_IC_GRP_Y, /* QE interrupt controller group Y */ | ||
55 | QE_IC_GRP_Z, /* QE interrupt controller group Z */ | ||
56 | QE_IC_GRP_RISCA, /* QE interrupt controller RISC group A */ | ||
57 | QE_IC_GRP_RISCB /* QE interrupt controller RISC group B */ | ||
58 | }; | ||
59 | |||
60 | #ifdef CONFIG_QUICC_ENGINE | ||
61 | void qe_ic_init(struct device_node *node, unsigned int flags, | ||
62 | void (*low_handler)(struct irq_desc *desc), | ||
63 | void (*high_handler)(struct irq_desc *desc)); | ||
64 | unsigned int qe_ic_get_low_irq(struct qe_ic *qe_ic); | ||
65 | unsigned int qe_ic_get_high_irq(struct qe_ic *qe_ic); | ||
66 | #else | ||
67 | static inline void qe_ic_init(struct device_node *node, unsigned int flags, | ||
68 | void (*low_handler)(struct irq_desc *desc), | ||
69 | void (*high_handler)(struct irq_desc *desc)) | ||
70 | {} | ||
71 | static inline unsigned int qe_ic_get_low_irq(struct qe_ic *qe_ic) | ||
72 | { return 0; } | ||
73 | static inline unsigned int qe_ic_get_high_irq(struct qe_ic *qe_ic) | ||
74 | { return 0; } | ||
75 | #endif /* CONFIG_QUICC_ENGINE */ | ||
76 | |||
77 | void qe_ic_set_highest_priority(unsigned int virq, int high); | ||
78 | int qe_ic_set_priority(unsigned int virq, unsigned int priority); | ||
79 | int qe_ic_set_high_priority(unsigned int virq, unsigned int priority, int high); | ||
80 | |||
81 | static inline void qe_ic_cascade_low_ipic(struct irq_desc *desc) | ||
82 | { | ||
83 | struct qe_ic *qe_ic = irq_desc_get_handler_data(desc); | ||
84 | unsigned int cascade_irq = qe_ic_get_low_irq(qe_ic); | ||
85 | |||
86 | if (cascade_irq != NO_IRQ) | ||
87 | generic_handle_irq(cascade_irq); | ||
88 | } | ||
89 | |||
90 | static inline void qe_ic_cascade_high_ipic(struct irq_desc *desc) | ||
91 | { | ||
92 | struct qe_ic *qe_ic = irq_desc_get_handler_data(desc); | ||
93 | unsigned int cascade_irq = qe_ic_get_high_irq(qe_ic); | ||
94 | |||
95 | if (cascade_irq != NO_IRQ) | ||
96 | generic_handle_irq(cascade_irq); | ||
97 | } | ||
98 | |||
99 | static inline void qe_ic_cascade_low_mpic(struct irq_desc *desc) | ||
100 | { | ||
101 | struct qe_ic *qe_ic = irq_desc_get_handler_data(desc); | ||
102 | unsigned int cascade_irq = qe_ic_get_low_irq(qe_ic); | ||
103 | struct irq_chip *chip = irq_desc_get_chip(desc); | ||
104 | |||
105 | if (cascade_irq != NO_IRQ) | ||
106 | generic_handle_irq(cascade_irq); | ||
107 | |||
108 | chip->irq_eoi(&desc->irq_data); | ||
109 | } | ||
110 | |||
111 | static inline void qe_ic_cascade_high_mpic(struct irq_desc *desc) | ||
112 | { | ||
113 | struct qe_ic *qe_ic = irq_desc_get_handler_data(desc); | ||
114 | unsigned int cascade_irq = qe_ic_get_high_irq(qe_ic); | ||
115 | struct irq_chip *chip = irq_desc_get_chip(desc); | ||
116 | |||
117 | if (cascade_irq != NO_IRQ) | ||
118 | generic_handle_irq(cascade_irq); | ||
119 | |||
120 | chip->irq_eoi(&desc->irq_data); | ||
121 | } | ||
122 | |||
123 | static inline void qe_ic_cascade_muxed_mpic(struct irq_desc *desc) | ||
124 | { | ||
125 | struct qe_ic *qe_ic = irq_desc_get_handler_data(desc); | ||
126 | unsigned int cascade_irq; | ||
127 | struct irq_chip *chip = irq_desc_get_chip(desc); | ||
128 | |||
129 | cascade_irq = qe_ic_get_high_irq(qe_ic); | ||
130 | if (cascade_irq == NO_IRQ) | ||
131 | cascade_irq = qe_ic_get_low_irq(qe_ic); | ||
132 | |||
133 | if (cascade_irq != NO_IRQ) | ||
134 | generic_handle_irq(cascade_irq); | ||
135 | |||
136 | chip->irq_eoi(&desc->irq_data); | ||
137 | } | ||
138 | |||
139 | #endif /* _ASM_POWERPC_QE_IC_H */ | ||
diff --git a/arch/powerpc/include/asm/reg.h b/arch/powerpc/include/asm/reg.h index 2220f7a60def..c4cb2ffc624e 100644 --- a/arch/powerpc/include/asm/reg.h +++ b/arch/powerpc/include/asm/reg.h | |||
@@ -1194,12 +1194,20 @@ | |||
1194 | #define __mtmsrd(v, l) asm volatile("mtmsrd %0," __stringify(l) \ | 1194 | #define __mtmsrd(v, l) asm volatile("mtmsrd %0," __stringify(l) \ |
1195 | : : "r" (v) : "memory") | 1195 | : : "r" (v) : "memory") |
1196 | #define mtmsr(v) __mtmsrd((v), 0) | 1196 | #define mtmsr(v) __mtmsrd((v), 0) |
1197 | #define __MTMSR "mtmsrd" | ||
1197 | #else | 1198 | #else |
1198 | #define mtmsr(v) asm volatile("mtmsr %0" : \ | 1199 | #define mtmsr(v) asm volatile("mtmsr %0" : \ |
1199 | : "r" ((unsigned long)(v)) \ | 1200 | : "r" ((unsigned long)(v)) \ |
1200 | : "memory") | 1201 | : "memory") |
1202 | #define __MTMSR "mtmsr" | ||
1201 | #endif | 1203 | #endif |
1202 | 1204 | ||
1205 | static inline void mtmsr_isync(unsigned long val) | ||
1206 | { | ||
1207 | asm volatile(__MTMSR " %0; " ASM_FTR_IFCLR("isync", "nop", %1) : : | ||
1208 | "r" (val), "i" (CPU_FTR_ARCH_206) : "memory"); | ||
1209 | } | ||
1210 | |||
1203 | #define mfspr(rn) ({unsigned long rval; \ | 1211 | #define mfspr(rn) ({unsigned long rval; \ |
1204 | asm volatile("mfspr %0," __stringify(rn) \ | 1212 | asm volatile("mfspr %0," __stringify(rn) \ |
1205 | : "=r" (rval)); rval;}) | 1213 | : "=r" (rval)); rval;}) |
@@ -1207,6 +1215,15 @@ | |||
1207 | : "r" ((unsigned long)(v)) \ | 1215 | : "r" ((unsigned long)(v)) \ |
1208 | : "memory") | 1216 | : "memory") |
1209 | 1217 | ||
1218 | extern void msr_check_and_set(unsigned long bits); | ||
1219 | extern bool strict_msr_control; | ||
1220 | extern void __msr_check_and_clear(unsigned long bits); | ||
1221 | static inline void msr_check_and_clear(unsigned long bits) | ||
1222 | { | ||
1223 | if (strict_msr_control) | ||
1224 | __msr_check_and_clear(bits); | ||
1225 | } | ||
1226 | |||
1210 | static inline unsigned long mfvtb (void) | 1227 | static inline unsigned long mfvtb (void) |
1211 | { | 1228 | { |
1212 | #ifdef CONFIG_PPC_BOOK3S_64 | 1229 | #ifdef CONFIG_PPC_BOOK3S_64 |
diff --git a/arch/powerpc/include/asm/rtas.h b/arch/powerpc/include/asm/rtas.h index b77ef369c0f0..51400baa8d48 100644 --- a/arch/powerpc/include/asm/rtas.h +++ b/arch/powerpc/include/asm/rtas.h | |||
@@ -334,10 +334,11 @@ extern void (*rtas_flash_term_hook)(int); | |||
334 | 334 | ||
335 | extern struct rtas_t rtas; | 335 | extern struct rtas_t rtas; |
336 | 336 | ||
337 | extern void enter_rtas(unsigned long); | ||
338 | extern int rtas_token(const char *service); | 337 | extern int rtas_token(const char *service); |
339 | extern int rtas_service_present(const char *service); | 338 | extern int rtas_service_present(const char *service); |
340 | extern int rtas_call(int token, int, int, int *, ...); | 339 | extern int rtas_call(int token, int, int, int *, ...); |
340 | void rtas_call_unlocked(struct rtas_args *args, int token, int nargs, | ||
341 | int nret, ...); | ||
341 | extern void rtas_restart(char *cmd); | 342 | extern void rtas_restart(char *cmd); |
342 | extern void rtas_power_off(void); | 343 | extern void rtas_power_off(void); |
343 | extern void rtas_halt(void); | 344 | extern void rtas_halt(void); |
diff --git a/arch/powerpc/include/asm/switch_to.h b/arch/powerpc/include/asm/switch_to.h index 15cca17cba4b..5b268b6be74c 100644 --- a/arch/powerpc/include/asm/switch_to.h +++ b/arch/powerpc/include/asm/switch_to.h | |||
@@ -4,6 +4,8 @@ | |||
4 | #ifndef _ASM_POWERPC_SWITCH_TO_H | 4 | #ifndef _ASM_POWERPC_SWITCH_TO_H |
5 | #define _ASM_POWERPC_SWITCH_TO_H | 5 | #define _ASM_POWERPC_SWITCH_TO_H |
6 | 6 | ||
7 | #include <asm/reg.h> | ||
8 | |||
7 | struct thread_struct; | 9 | struct thread_struct; |
8 | struct task_struct; | 10 | struct task_struct; |
9 | struct pt_regs; | 11 | struct pt_regs; |
@@ -12,74 +14,59 @@ extern struct task_struct *__switch_to(struct task_struct *, | |||
12 | struct task_struct *); | 14 | struct task_struct *); |
13 | #define switch_to(prev, next, last) ((last) = __switch_to((prev), (next))) | 15 | #define switch_to(prev, next, last) ((last) = __switch_to((prev), (next))) |
14 | 16 | ||
15 | struct thread_struct; | ||
16 | extern struct task_struct *_switch(struct thread_struct *prev, | 17 | extern struct task_struct *_switch(struct thread_struct *prev, |
17 | struct thread_struct *next); | 18 | struct thread_struct *next); |
18 | #ifdef CONFIG_PPC_BOOK3S_64 | ||
19 | static inline void save_early_sprs(struct thread_struct *prev) | ||
20 | { | ||
21 | if (cpu_has_feature(CPU_FTR_ARCH_207S)) | ||
22 | prev->tar = mfspr(SPRN_TAR); | ||
23 | if (cpu_has_feature(CPU_FTR_DSCR)) | ||
24 | prev->dscr = mfspr(SPRN_DSCR); | ||
25 | } | ||
26 | #else | ||
27 | static inline void save_early_sprs(struct thread_struct *prev) {} | ||
28 | #endif | ||
29 | 19 | ||
30 | extern void enable_kernel_fp(void); | ||
31 | extern void enable_kernel_altivec(void); | ||
32 | extern void enable_kernel_vsx(void); | ||
33 | extern int emulate_altivec(struct pt_regs *); | ||
34 | extern void __giveup_vsx(struct task_struct *); | ||
35 | extern void giveup_vsx(struct task_struct *); | ||
36 | extern void enable_kernel_spe(void); | ||
37 | extern void giveup_spe(struct task_struct *); | ||
38 | extern void load_up_spe(struct task_struct *); | ||
39 | extern void switch_booke_debug_regs(struct debug_reg *new_debug); | 20 | extern void switch_booke_debug_regs(struct debug_reg *new_debug); |
40 | 21 | ||
41 | #ifndef CONFIG_SMP | 22 | extern int emulate_altivec(struct pt_regs *); |
42 | extern void discard_lazy_cpu_state(void); | 23 | |
43 | #else | 24 | extern void flush_all_to_thread(struct task_struct *); |
44 | static inline void discard_lazy_cpu_state(void) | 25 | extern void giveup_all(struct task_struct *); |
45 | { | ||
46 | } | ||
47 | #endif | ||
48 | 26 | ||
49 | #ifdef CONFIG_PPC_FPU | 27 | #ifdef CONFIG_PPC_FPU |
28 | extern void enable_kernel_fp(void); | ||
50 | extern void flush_fp_to_thread(struct task_struct *); | 29 | extern void flush_fp_to_thread(struct task_struct *); |
51 | extern void giveup_fpu(struct task_struct *); | 30 | extern void giveup_fpu(struct task_struct *); |
31 | extern void __giveup_fpu(struct task_struct *); | ||
32 | static inline void disable_kernel_fp(void) | ||
33 | { | ||
34 | msr_check_and_clear(MSR_FP); | ||
35 | } | ||
52 | #else | 36 | #else |
53 | static inline void flush_fp_to_thread(struct task_struct *t) { } | 37 | static inline void flush_fp_to_thread(struct task_struct *t) { } |
54 | static inline void giveup_fpu(struct task_struct *t) { } | ||
55 | #endif | 38 | #endif |
56 | 39 | ||
57 | #ifdef CONFIG_ALTIVEC | 40 | #ifdef CONFIG_ALTIVEC |
41 | extern void enable_kernel_altivec(void); | ||
58 | extern void flush_altivec_to_thread(struct task_struct *); | 42 | extern void flush_altivec_to_thread(struct task_struct *); |
59 | extern void giveup_altivec(struct task_struct *); | 43 | extern void giveup_altivec(struct task_struct *); |
60 | extern void giveup_altivec_notask(void); | 44 | extern void __giveup_altivec(struct task_struct *); |
61 | #else | 45 | static inline void disable_kernel_altivec(void) |
62 | static inline void flush_altivec_to_thread(struct task_struct *t) | ||
63 | { | ||
64 | } | ||
65 | static inline void giveup_altivec(struct task_struct *t) | ||
66 | { | 46 | { |
47 | msr_check_and_clear(MSR_VEC); | ||
67 | } | 48 | } |
68 | #endif | 49 | #endif |
69 | 50 | ||
70 | #ifdef CONFIG_VSX | 51 | #ifdef CONFIG_VSX |
52 | extern void enable_kernel_vsx(void); | ||
71 | extern void flush_vsx_to_thread(struct task_struct *); | 53 | extern void flush_vsx_to_thread(struct task_struct *); |
72 | #else | 54 | extern void giveup_vsx(struct task_struct *); |
73 | static inline void flush_vsx_to_thread(struct task_struct *t) | 55 | extern void __giveup_vsx(struct task_struct *); |
56 | static inline void disable_kernel_vsx(void) | ||
74 | { | 57 | { |
58 | msr_check_and_clear(MSR_FP|MSR_VEC|MSR_VSX); | ||
75 | } | 59 | } |
76 | #endif | 60 | #endif |
77 | 61 | ||
78 | #ifdef CONFIG_SPE | 62 | #ifdef CONFIG_SPE |
63 | extern void enable_kernel_spe(void); | ||
79 | extern void flush_spe_to_thread(struct task_struct *); | 64 | extern void flush_spe_to_thread(struct task_struct *); |
80 | #else | 65 | extern void giveup_spe(struct task_struct *); |
81 | static inline void flush_spe_to_thread(struct task_struct *t) | 66 | extern void __giveup_spe(struct task_struct *); |
67 | static inline void disable_kernel_spe(void) | ||
82 | { | 68 | { |
69 | msr_check_and_clear(MSR_SPE); | ||
83 | } | 70 | } |
84 | #endif | 71 | #endif |
85 | 72 | ||
diff --git a/arch/powerpc/include/asm/synch.h b/arch/powerpc/include/asm/synch.h index e682a7143edb..c50868681f9e 100644 --- a/arch/powerpc/include/asm/synch.h +++ b/arch/powerpc/include/asm/synch.h | |||
@@ -44,7 +44,7 @@ static inline void isync(void) | |||
44 | MAKE_LWSYNC_SECTION_ENTRY(97, __lwsync_fixup); | 44 | MAKE_LWSYNC_SECTION_ENTRY(97, __lwsync_fixup); |
45 | #define PPC_ACQUIRE_BARRIER "\n" stringify_in_c(__PPC_ACQUIRE_BARRIER) | 45 | #define PPC_ACQUIRE_BARRIER "\n" stringify_in_c(__PPC_ACQUIRE_BARRIER) |
46 | #define PPC_RELEASE_BARRIER stringify_in_c(LWSYNC) "\n" | 46 | #define PPC_RELEASE_BARRIER stringify_in_c(LWSYNC) "\n" |
47 | #define PPC_ATOMIC_ENTRY_BARRIER "\n" stringify_in_c(LWSYNC) "\n" | 47 | #define PPC_ATOMIC_ENTRY_BARRIER "\n" stringify_in_c(sync) "\n" |
48 | #define PPC_ATOMIC_EXIT_BARRIER "\n" stringify_in_c(sync) "\n" | 48 | #define PPC_ATOMIC_EXIT_BARRIER "\n" stringify_in_c(sync) "\n" |
49 | #else | 49 | #else |
50 | #define PPC_ACQUIRE_BARRIER | 50 | #define PPC_ACQUIRE_BARRIER |
diff --git a/arch/powerpc/include/asm/time.h b/arch/powerpc/include/asm/time.h index 10fc784a2ad4..2d7109a8d296 100644 --- a/arch/powerpc/include/asm/time.h +++ b/arch/powerpc/include/asm/time.h | |||
@@ -27,7 +27,6 @@ extern struct clock_event_device decrementer_clockevent; | |||
27 | 27 | ||
28 | struct rtc_time; | 28 | struct rtc_time; |
29 | extern void to_tm(int tim, struct rtc_time * tm); | 29 | extern void to_tm(int tim, struct rtc_time * tm); |
30 | extern void GregorianDay(struct rtc_time *tm); | ||
31 | extern void tick_broadcast_ipi_handler(void); | 30 | extern void tick_broadcast_ipi_handler(void); |
32 | 31 | ||
33 | extern void generic_calibrate_decr(void); | 32 | extern void generic_calibrate_decr(void); |
diff --git a/arch/powerpc/include/asm/ucc.h b/arch/powerpc/include/asm/ucc.h deleted file mode 100644 index 6927ac26516e..000000000000 --- a/arch/powerpc/include/asm/ucc.h +++ /dev/null | |||
@@ -1,64 +0,0 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2006 Freescale Semiconductor, Inc. All rights reserved. | ||
3 | * | ||
4 | * Authors: Shlomi Gridish <gridish@freescale.com> | ||
5 | * Li Yang <leoli@freescale.com> | ||
6 | * | ||
7 | * Description: | ||
8 | * Internal header file for UCC unit routines. | ||
9 | * | ||
10 | * This program is free software; you can redistribute it and/or modify it | ||
11 | * under the terms of the GNU General Public License as published by the | ||
12 | * Free Software Foundation; either version 2 of the License, or (at your | ||
13 | * option) any later version. | ||
14 | */ | ||
15 | #ifndef __UCC_H__ | ||
16 | #define __UCC_H__ | ||
17 | |||
18 | #include <asm/immap_qe.h> | ||
19 | #include <asm/qe.h> | ||
20 | |||
21 | #define STATISTICS | ||
22 | |||
23 | #define UCC_MAX_NUM 8 | ||
24 | |||
25 | /* Slow or fast type for UCCs. | ||
26 | */ | ||
27 | enum ucc_speed_type { | ||
28 | UCC_SPEED_TYPE_FAST = UCC_GUEMR_MODE_FAST_RX | UCC_GUEMR_MODE_FAST_TX, | ||
29 | UCC_SPEED_TYPE_SLOW = UCC_GUEMR_MODE_SLOW_RX | UCC_GUEMR_MODE_SLOW_TX | ||
30 | }; | ||
31 | |||
32 | /* ucc_set_type | ||
33 | * Sets UCC to slow or fast mode. | ||
34 | * | ||
35 | * ucc_num - (In) number of UCC (0-7). | ||
36 | * speed - (In) slow or fast mode for UCC. | ||
37 | */ | ||
38 | int ucc_set_type(unsigned int ucc_num, enum ucc_speed_type speed); | ||
39 | |||
40 | int ucc_set_qe_mux_mii_mng(unsigned int ucc_num); | ||
41 | |||
42 | int ucc_set_qe_mux_rxtx(unsigned int ucc_num, enum qe_clock clock, | ||
43 | enum comm_dir mode); | ||
44 | |||
45 | int ucc_mux_set_grant_tsa_bkpt(unsigned int ucc_num, int set, u32 mask); | ||
46 | |||
47 | /* QE MUX clock routing for UCC | ||
48 | */ | ||
49 | static inline int ucc_set_qe_mux_grant(unsigned int ucc_num, int set) | ||
50 | { | ||
51 | return ucc_mux_set_grant_tsa_bkpt(ucc_num, set, QE_CMXUCR_GRANT); | ||
52 | } | ||
53 | |||
54 | static inline int ucc_set_qe_mux_tsa(unsigned int ucc_num, int set) | ||
55 | { | ||
56 | return ucc_mux_set_grant_tsa_bkpt(ucc_num, set, QE_CMXUCR_TSA); | ||
57 | } | ||
58 | |||
59 | static inline int ucc_set_qe_mux_bkpt(unsigned int ucc_num, int set) | ||
60 | { | ||
61 | return ucc_mux_set_grant_tsa_bkpt(ucc_num, set, QE_CMXUCR_BKPT); | ||
62 | } | ||
63 | |||
64 | #endif /* __UCC_H__ */ | ||
diff --git a/arch/powerpc/include/asm/ucc_fast.h b/arch/powerpc/include/asm/ucc_fast.h deleted file mode 100644 index 72ea9bab07df..000000000000 --- a/arch/powerpc/include/asm/ucc_fast.h +++ /dev/null | |||
@@ -1,244 +0,0 @@ | |||
1 | /* | ||
2 | * Internal header file for UCC FAST unit routines. | ||
3 | * | ||
4 | * Copyright (C) 2006 Freescale Semiconductor, Inc. All rights reserved. | ||
5 | * | ||
6 | * Authors: Shlomi Gridish <gridish@freescale.com> | ||
7 | * Li Yang <leoli@freescale.com> | ||
8 | * | ||
9 | * This program is free software; you can redistribute it and/or modify it | ||
10 | * under the terms of the GNU General Public License as published by the | ||
11 | * Free Software Foundation; either version 2 of the License, or (at your | ||
12 | * option) any later version. | ||
13 | */ | ||
14 | #ifndef __UCC_FAST_H__ | ||
15 | #define __UCC_FAST_H__ | ||
16 | |||
17 | #include <linux/kernel.h> | ||
18 | |||
19 | #include <asm/immap_qe.h> | ||
20 | #include <asm/qe.h> | ||
21 | |||
22 | #include <asm/ucc.h> | ||
23 | |||
24 | /* Receive BD's status */ | ||
25 | #define R_E 0x80000000 /* buffer empty */ | ||
26 | #define R_W 0x20000000 /* wrap bit */ | ||
27 | #define R_I 0x10000000 /* interrupt on reception */ | ||
28 | #define R_L 0x08000000 /* last */ | ||
29 | #define R_F 0x04000000 /* first */ | ||
30 | |||
31 | /* transmit BD's status */ | ||
32 | #define T_R 0x80000000 /* ready bit */ | ||
33 | #define T_W 0x20000000 /* wrap bit */ | ||
34 | #define T_I 0x10000000 /* interrupt on completion */ | ||
35 | #define T_L 0x08000000 /* last */ | ||
36 | |||
37 | /* Rx Data buffer must be 4 bytes aligned in most cases */ | ||
38 | #define UCC_FAST_RX_ALIGN 4 | ||
39 | #define UCC_FAST_MRBLR_ALIGNMENT 4 | ||
40 | #define UCC_FAST_VIRT_FIFO_REGS_ALIGNMENT 8 | ||
41 | |||
42 | /* Sizes */ | ||
43 | #define UCC_FAST_URFS_MIN_VAL 0x88 | ||
44 | #define UCC_FAST_RECEIVE_VIRTUAL_FIFO_SIZE_FUDGE_FACTOR 8 | ||
45 | |||
46 | /* ucc_fast_channel_protocol_mode - UCC FAST mode */ | ||
47 | enum ucc_fast_channel_protocol_mode { | ||
48 | UCC_FAST_PROTOCOL_MODE_HDLC = 0x00000000, | ||
49 | UCC_FAST_PROTOCOL_MODE_RESERVED01 = 0x00000001, | ||
50 | UCC_FAST_PROTOCOL_MODE_RESERVED_QMC = 0x00000002, | ||
51 | UCC_FAST_PROTOCOL_MODE_RESERVED02 = 0x00000003, | ||
52 | UCC_FAST_PROTOCOL_MODE_RESERVED_UART = 0x00000004, | ||
53 | UCC_FAST_PROTOCOL_MODE_RESERVED03 = 0x00000005, | ||
54 | UCC_FAST_PROTOCOL_MODE_RESERVED_EX_MAC_1 = 0x00000006, | ||
55 | UCC_FAST_PROTOCOL_MODE_RESERVED_EX_MAC_2 = 0x00000007, | ||
56 | UCC_FAST_PROTOCOL_MODE_RESERVED_BISYNC = 0x00000008, | ||
57 | UCC_FAST_PROTOCOL_MODE_RESERVED04 = 0x00000009, | ||
58 | UCC_FAST_PROTOCOL_MODE_ATM = 0x0000000A, | ||
59 | UCC_FAST_PROTOCOL_MODE_RESERVED05 = 0x0000000B, | ||
60 | UCC_FAST_PROTOCOL_MODE_ETHERNET = 0x0000000C, | ||
61 | UCC_FAST_PROTOCOL_MODE_RESERVED06 = 0x0000000D, | ||
62 | UCC_FAST_PROTOCOL_MODE_POS = 0x0000000E, | ||
63 | UCC_FAST_PROTOCOL_MODE_RESERVED07 = 0x0000000F | ||
64 | }; | ||
65 | |||
66 | /* ucc_fast_transparent_txrx - UCC Fast Transparent TX & RX */ | ||
67 | enum ucc_fast_transparent_txrx { | ||
68 | UCC_FAST_GUMR_TRANSPARENT_TTX_TRX_NORMAL = 0x00000000, | ||
69 | UCC_FAST_GUMR_TRANSPARENT_TTX_TRX_TRANSPARENT = 0x18000000 | ||
70 | }; | ||
71 | |||
72 | /* UCC fast diagnostic mode */ | ||
73 | enum ucc_fast_diag_mode { | ||
74 | UCC_FAST_DIAGNOSTIC_NORMAL = 0x0, | ||
75 | UCC_FAST_DIAGNOSTIC_LOCAL_LOOP_BACK = 0x40000000, | ||
76 | UCC_FAST_DIAGNOSTIC_AUTO_ECHO = 0x80000000, | ||
77 | UCC_FAST_DIAGNOSTIC_LOOP_BACK_AND_ECHO = 0xC0000000 | ||
78 | }; | ||
79 | |||
80 | /* UCC fast Sync length (transparent mode only) */ | ||
81 | enum ucc_fast_sync_len { | ||
82 | UCC_FAST_SYNC_LEN_NOT_USED = 0x0, | ||
83 | UCC_FAST_SYNC_LEN_AUTOMATIC = 0x00004000, | ||
84 | UCC_FAST_SYNC_LEN_8_BIT = 0x00008000, | ||
85 | UCC_FAST_SYNC_LEN_16_BIT = 0x0000C000 | ||
86 | }; | ||
87 | |||
88 | /* UCC fast RTS mode */ | ||
89 | enum ucc_fast_ready_to_send { | ||
90 | UCC_FAST_SEND_IDLES_BETWEEN_FRAMES = 0x00000000, | ||
91 | UCC_FAST_SEND_FLAGS_BETWEEN_FRAMES = 0x00002000 | ||
92 | }; | ||
93 | |||
94 | /* UCC fast receiver decoding mode */ | ||
95 | enum ucc_fast_rx_decoding_method { | ||
96 | UCC_FAST_RX_ENCODING_NRZ = 0x00000000, | ||
97 | UCC_FAST_RX_ENCODING_NRZI = 0x00000800, | ||
98 | UCC_FAST_RX_ENCODING_RESERVED0 = 0x00001000, | ||
99 | UCC_FAST_RX_ENCODING_RESERVED1 = 0x00001800 | ||
100 | }; | ||
101 | |||
102 | /* UCC fast transmitter encoding mode */ | ||
103 | enum ucc_fast_tx_encoding_method { | ||
104 | UCC_FAST_TX_ENCODING_NRZ = 0x00000000, | ||
105 | UCC_FAST_TX_ENCODING_NRZI = 0x00000100, | ||
106 | UCC_FAST_TX_ENCODING_RESERVED0 = 0x00000200, | ||
107 | UCC_FAST_TX_ENCODING_RESERVED1 = 0x00000300 | ||
108 | }; | ||
109 | |||
110 | /* UCC fast CRC length */ | ||
111 | enum ucc_fast_transparent_tcrc { | ||
112 | UCC_FAST_16_BIT_CRC = 0x00000000, | ||
113 | UCC_FAST_CRC_RESERVED0 = 0x00000040, | ||
114 | UCC_FAST_32_BIT_CRC = 0x00000080, | ||
115 | UCC_FAST_CRC_RESERVED1 = 0x000000C0 | ||
116 | }; | ||
117 | |||
118 | /* Fast UCC initialization structure */ | ||
119 | struct ucc_fast_info { | ||
120 | int ucc_num; | ||
121 | enum qe_clock rx_clock; | ||
122 | enum qe_clock tx_clock; | ||
123 | u32 regs; | ||
124 | int irq; | ||
125 | u32 uccm_mask; | ||
126 | int bd_mem_part; | ||
127 | int brkpt_support; | ||
128 | int grant_support; | ||
129 | int tsa; | ||
130 | int cdp; | ||
131 | int cds; | ||
132 | int ctsp; | ||
133 | int ctss; | ||
134 | int tci; | ||
135 | int txsy; | ||
136 | int rtsm; | ||
137 | int revd; | ||
138 | int rsyn; | ||
139 | u16 max_rx_buf_length; | ||
140 | u16 urfs; | ||
141 | u16 urfet; | ||
142 | u16 urfset; | ||
143 | u16 utfs; | ||
144 | u16 utfet; | ||
145 | u16 utftt; | ||
146 | u16 ufpt; | ||
147 | enum ucc_fast_channel_protocol_mode mode; | ||
148 | enum ucc_fast_transparent_txrx ttx_trx; | ||
149 | enum ucc_fast_tx_encoding_method tenc; | ||
150 | enum ucc_fast_rx_decoding_method renc; | ||
151 | enum ucc_fast_transparent_tcrc tcrc; | ||
152 | enum ucc_fast_sync_len synl; | ||
153 | }; | ||
154 | |||
155 | struct ucc_fast_private { | ||
156 | struct ucc_fast_info *uf_info; | ||
157 | struct ucc_fast __iomem *uf_regs; /* a pointer to the UCC regs. */ | ||
158 | u32 __iomem *p_ucce; /* a pointer to the event register in memory. */ | ||
159 | u32 __iomem *p_uccm; /* a pointer to the mask register in memory. */ | ||
160 | #ifdef CONFIG_UGETH_TX_ON_DEMAND | ||
161 | u16 __iomem *p_utodr; /* pointer to the transmit on demand register */ | ||
162 | #endif | ||
163 | int enabled_tx; /* Whether channel is enabled for Tx (ENT) */ | ||
164 | int enabled_rx; /* Whether channel is enabled for Rx (ENR) */ | ||
165 | int stopped_tx; /* Whether channel has been stopped for Tx | ||
166 | (STOP_TX, etc.) */ | ||
167 | int stopped_rx; /* Whether channel has been stopped for Rx */ | ||
168 | u32 ucc_fast_tx_virtual_fifo_base_offset;/* pointer to base of Tx | ||
169 | virtual fifo */ | ||
170 | u32 ucc_fast_rx_virtual_fifo_base_offset;/* pointer to base of Rx | ||
171 | virtual fifo */ | ||
172 | #ifdef STATISTICS | ||
173 | u32 tx_frames; /* Transmitted frames counter. */ | ||
174 | u32 rx_frames; /* Received frames counter (only frames | ||
175 | passed to application). */ | ||
176 | u32 tx_discarded; /* Discarded tx frames counter (frames that | ||
177 | were discarded by the driver due to errors). | ||
178 | */ | ||
179 | u32 rx_discarded; /* Discarded rx frames counter (frames that | ||
180 | were discarded by the driver due to errors). | ||
181 | */ | ||
182 | #endif /* STATISTICS */ | ||
183 | u16 mrblr; /* maximum receive buffer length */ | ||
184 | }; | ||
185 | |||
186 | /* ucc_fast_init | ||
187 | * Initializes Fast UCC according to user provided parameters. | ||
188 | * | ||
189 | * uf_info - (In) pointer to the fast UCC info structure. | ||
190 | * uccf_ret - (Out) pointer to the fast UCC structure. | ||
191 | */ | ||
192 | int ucc_fast_init(struct ucc_fast_info * uf_info, struct ucc_fast_private ** uccf_ret); | ||
193 | |||
194 | /* ucc_fast_free | ||
195 | * Frees all resources for fast UCC. | ||
196 | * | ||
197 | * uccf - (In) pointer to the fast UCC structure. | ||
198 | */ | ||
199 | void ucc_fast_free(struct ucc_fast_private * uccf); | ||
200 | |||
201 | /* ucc_fast_enable | ||
202 | * Enables a fast UCC port. | ||
203 | * This routine enables Tx and/or Rx through the General UCC Mode Register. | ||
204 | * | ||
205 | * uccf - (In) pointer to the fast UCC structure. | ||
206 | * mode - (In) TX, RX, or both. | ||
207 | */ | ||
208 | void ucc_fast_enable(struct ucc_fast_private * uccf, enum comm_dir mode); | ||
209 | |||
210 | /* ucc_fast_disable | ||
211 | * Disables a fast UCC port. | ||
212 | * This routine disables Tx and/or Rx through the General UCC Mode Register. | ||
213 | * | ||
214 | * uccf - (In) pointer to the fast UCC structure. | ||
215 | * mode - (In) TX, RX, or both. | ||
216 | */ | ||
217 | void ucc_fast_disable(struct ucc_fast_private * uccf, enum comm_dir mode); | ||
218 | |||
219 | /* ucc_fast_irq | ||
220 | * Handles interrupts on fast UCC. | ||
221 | * Called from the general interrupt routine to handle interrupts on fast UCC. | ||
222 | * | ||
223 | * uccf - (In) pointer to the fast UCC structure. | ||
224 | */ | ||
225 | void ucc_fast_irq(struct ucc_fast_private * uccf); | ||
226 | |||
227 | /* ucc_fast_transmit_on_demand | ||
228 | * Immediately forces a poll of the transmitter for data to be sent. | ||
229 | * Typically, the hardware performs a periodic poll for data that the | ||
230 | * transmit routine has set up to be transmitted. In cases where | ||
231 | * this polling cycle is not soon enough, this optional routine can | ||
232 | * be invoked to force a poll right away, instead. Proper use for | ||
233 | * each transmission for which this functionality is desired is to | ||
234 | * call the transmit routine and then this routine right after. | ||
235 | * | ||
236 | * uccf - (In) pointer to the fast UCC structure. | ||
237 | */ | ||
238 | void ucc_fast_transmit_on_demand(struct ucc_fast_private * uccf); | ||
239 | |||
240 | u32 ucc_fast_get_qe_cr_subblock(int uccf_num); | ||
241 | |||
242 | void ucc_fast_dump_regs(struct ucc_fast_private * uccf); | ||
243 | |||
244 | #endif /* __UCC_FAST_H__ */ | ||
diff --git a/arch/powerpc/include/asm/ucc_slow.h b/arch/powerpc/include/asm/ucc_slow.h deleted file mode 100644 index 233ef5fe5fde..000000000000 --- a/arch/powerpc/include/asm/ucc_slow.h +++ /dev/null | |||
@@ -1,277 +0,0 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2006 Freescale Semiconductor, Inc. All rights reserved. | ||
3 | * | ||
4 | * Authors: Shlomi Gridish <gridish@freescale.com> | ||
5 | * Li Yang <leoli@freescale.com> | ||
6 | * | ||
7 | * Description: | ||
8 | * Internal header file for UCC SLOW unit routines. | ||
9 | * | ||
10 | * This program is free software; you can redistribute it and/or modify it | ||
11 | * under the terms of the GNU General Public License as published by the | ||
12 | * Free Software Foundation; either version 2 of the License, or (at your | ||
13 | * option) any later version. | ||
14 | */ | ||
15 | #ifndef __UCC_SLOW_H__ | ||
16 | #define __UCC_SLOW_H__ | ||
17 | |||
18 | #include <linux/kernel.h> | ||
19 | |||
20 | #include <asm/immap_qe.h> | ||
21 | #include <asm/qe.h> | ||
22 | |||
23 | #include <asm/ucc.h> | ||
24 | |||
25 | /* transmit BD's status */ | ||
26 | #define T_R 0x80000000 /* ready bit */ | ||
27 | #define T_PAD 0x40000000 /* add pads to short frames */ | ||
28 | #define T_W 0x20000000 /* wrap bit */ | ||
29 | #define T_I 0x10000000 /* interrupt on completion */ | ||
30 | #define T_L 0x08000000 /* last */ | ||
31 | |||
32 | #define T_A 0x04000000 /* Address - the data transmitted as address | ||
33 | chars */ | ||
34 | #define T_TC 0x04000000 /* transmit CRC */ | ||
35 | #define T_CM 0x02000000 /* continuous mode */ | ||
36 | #define T_DEF 0x02000000 /* collision on previous attempt to transmit */ | ||
37 | #define T_P 0x01000000 /* Preamble - send Preamble sequence before | ||
38 | data */ | ||
39 | #define T_HB 0x01000000 /* heartbeat */ | ||
40 | #define T_NS 0x00800000 /* No Stop */ | ||
41 | #define T_LC 0x00800000 /* late collision */ | ||
42 | #define T_RL 0x00400000 /* retransmission limit */ | ||
43 | #define T_UN 0x00020000 /* underrun */ | ||
44 | #define T_CT 0x00010000 /* CTS lost */ | ||
45 | #define T_CSL 0x00010000 /* carrier sense lost */ | ||
46 | #define T_RC 0x003c0000 /* retry count */ | ||
47 | |||
48 | /* Receive BD's status */ | ||
49 | #define R_E 0x80000000 /* buffer empty */ | ||
50 | #define R_W 0x20000000 /* wrap bit */ | ||
51 | #define R_I 0x10000000 /* interrupt on reception */ | ||
52 | #define R_L 0x08000000 /* last */ | ||
53 | #define R_C 0x08000000 /* the last byte in this buffer is a cntl | ||
54 | char */ | ||
55 | #define R_F 0x04000000 /* first */ | ||
56 | #define R_A 0x04000000 /* the first byte in this buffer is address | ||
57 | byte */ | ||
58 | #define R_CM 0x02000000 /* continuous mode */ | ||
59 | #define R_ID 0x01000000 /* buffer close on reception of idles */ | ||
60 | #define R_M 0x01000000 /* Frame received because of promiscuous | ||
61 | mode */ | ||
62 | #define R_AM 0x00800000 /* Address match */ | ||
63 | #define R_DE 0x00800000 /* Address match */ | ||
64 | #define R_LG 0x00200000 /* Break received */ | ||
65 | #define R_BR 0x00200000 /* Frame length violation */ | ||
66 | #define R_NO 0x00100000 /* Rx Non Octet Aligned Packet */ | ||
67 | #define R_FR 0x00100000 /* Framing Error (no stop bit) character | ||
68 | received */ | ||
69 | #define R_PR 0x00080000 /* Parity Error character received */ | ||
70 | #define R_AB 0x00080000 /* Frame Aborted */ | ||
71 | #define R_SH 0x00080000 /* frame is too short */ | ||
72 | #define R_CR 0x00040000 /* CRC Error */ | ||
73 | #define R_OV 0x00020000 /* Overrun */ | ||
74 | #define R_CD 0x00010000 /* CD lost */ | ||
75 | #define R_CL 0x00010000 /* this frame is closed because of a | ||
76 | collision */ | ||
77 | |||
78 | /* Rx Data buffer must be 4 bytes aligned in most cases.*/ | ||
79 | #define UCC_SLOW_RX_ALIGN 4 | ||
80 | #define UCC_SLOW_MRBLR_ALIGNMENT 4 | ||
81 | #define UCC_SLOW_PRAM_SIZE 0x100 | ||
82 | #define ALIGNMENT_OF_UCC_SLOW_PRAM 64 | ||
83 | |||
84 | /* UCC Slow Channel Protocol Mode */ | ||
85 | enum ucc_slow_channel_protocol_mode { | ||
86 | UCC_SLOW_CHANNEL_PROTOCOL_MODE_QMC = 0x00000002, | ||
87 | UCC_SLOW_CHANNEL_PROTOCOL_MODE_UART = 0x00000004, | ||
88 | UCC_SLOW_CHANNEL_PROTOCOL_MODE_BISYNC = 0x00000008, | ||
89 | }; | ||
90 | |||
91 | /* UCC Slow Transparent Transmit CRC (TCRC) */ | ||
92 | enum ucc_slow_transparent_tcrc { | ||
93 | /* 16-bit CCITT CRC (HDLC). (X16 + X12 + X5 + 1) */ | ||
94 | UCC_SLOW_TRANSPARENT_TCRC_CCITT_CRC16 = 0x00000000, | ||
95 | /* CRC16 (BISYNC). (X16 + X15 + X2 + 1) */ | ||
96 | UCC_SLOW_TRANSPARENT_TCRC_CRC16 = 0x00004000, | ||
97 | /* 32-bit CCITT CRC (Ethernet and HDLC) */ | ||
98 | UCC_SLOW_TRANSPARENT_TCRC_CCITT_CRC32 = 0x00008000, | ||
99 | }; | ||
100 | |||
101 | /* UCC Slow oversampling rate for transmitter (TDCR) */ | ||
102 | enum ucc_slow_tx_oversampling_rate { | ||
103 | /* 1x clock mode */ | ||
104 | UCC_SLOW_OVERSAMPLING_RATE_TX_TDCR_1 = 0x00000000, | ||
105 | /* 8x clock mode */ | ||
106 | UCC_SLOW_OVERSAMPLING_RATE_TX_TDCR_8 = 0x00010000, | ||
107 | /* 16x clock mode */ | ||
108 | UCC_SLOW_OVERSAMPLING_RATE_TX_TDCR_16 = 0x00020000, | ||
109 | /* 32x clock mode */ | ||
110 | UCC_SLOW_OVERSAMPLING_RATE_TX_TDCR_32 = 0x00030000, | ||
111 | }; | ||
112 | |||
113 | /* UCC Slow Oversampling rate for receiver (RDCR) | ||
114 | */ | ||
115 | enum ucc_slow_rx_oversampling_rate { | ||
116 | /* 1x clock mode */ | ||
117 | UCC_SLOW_OVERSAMPLING_RATE_RX_RDCR_1 = 0x00000000, | ||
118 | /* 8x clock mode */ | ||
119 | UCC_SLOW_OVERSAMPLING_RATE_RX_RDCR_8 = 0x00004000, | ||
120 | /* 16x clock mode */ | ||
121 | UCC_SLOW_OVERSAMPLING_RATE_RX_RDCR_16 = 0x00008000, | ||
122 | /* 32x clock mode */ | ||
123 | UCC_SLOW_OVERSAMPLING_RATE_RX_RDCR_32 = 0x0000c000, | ||
124 | }; | ||
125 | |||
126 | /* UCC Slow Transmitter encoding method (TENC) | ||
127 | */ | ||
128 | enum ucc_slow_tx_encoding_method { | ||
129 | UCC_SLOW_TRANSMITTER_ENCODING_METHOD_TENC_NRZ = 0x00000000, | ||
130 | UCC_SLOW_TRANSMITTER_ENCODING_METHOD_TENC_NRZI = 0x00000100 | ||
131 | }; | ||
132 | |||
133 | /* UCC Slow Receiver decoding method (RENC) | ||
134 | */ | ||
135 | enum ucc_slow_rx_decoding_method { | ||
136 | UCC_SLOW_RECEIVER_DECODING_METHOD_RENC_NRZ = 0x00000000, | ||
137 | UCC_SLOW_RECEIVER_DECODING_METHOD_RENC_NRZI = 0x00000800 | ||
138 | }; | ||
139 | |||
140 | /* UCC Slow Diagnostic mode (DIAG) | ||
141 | */ | ||
142 | enum ucc_slow_diag_mode { | ||
143 | UCC_SLOW_DIAG_MODE_NORMAL = 0x00000000, | ||
144 | UCC_SLOW_DIAG_MODE_LOOPBACK = 0x00000040, | ||
145 | UCC_SLOW_DIAG_MODE_ECHO = 0x00000080, | ||
146 | UCC_SLOW_DIAG_MODE_LOOPBACK_ECHO = 0x000000c0 | ||
147 | }; | ||
148 | |||
149 | struct ucc_slow_info { | ||
150 | int ucc_num; | ||
151 | int protocol; /* QE_CR_PROTOCOL_xxx */ | ||
152 | enum qe_clock rx_clock; | ||
153 | enum qe_clock tx_clock; | ||
154 | phys_addr_t regs; | ||
155 | int irq; | ||
156 | u16 uccm_mask; | ||
157 | int data_mem_part; | ||
158 | int init_tx; | ||
159 | int init_rx; | ||
160 | u32 tx_bd_ring_len; | ||
161 | u32 rx_bd_ring_len; | ||
162 | int rx_interrupts; | ||
163 | int brkpt_support; | ||
164 | int grant_support; | ||
165 | int tsa; | ||
166 | int cdp; | ||
167 | int cds; | ||
168 | int ctsp; | ||
169 | int ctss; | ||
170 | int rinv; | ||
171 | int tinv; | ||
172 | int rtsm; | ||
173 | int rfw; | ||
174 | int tci; | ||
175 | int tend; | ||
176 | int tfl; | ||
177 | int txsy; | ||
178 | u16 max_rx_buf_length; | ||
179 | enum ucc_slow_transparent_tcrc tcrc; | ||
180 | enum ucc_slow_channel_protocol_mode mode; | ||
181 | enum ucc_slow_diag_mode diag; | ||
182 | enum ucc_slow_tx_oversampling_rate tdcr; | ||
183 | enum ucc_slow_rx_oversampling_rate rdcr; | ||
184 | enum ucc_slow_tx_encoding_method tenc; | ||
185 | enum ucc_slow_rx_decoding_method renc; | ||
186 | }; | ||
187 | |||
188 | struct ucc_slow_private { | ||
189 | struct ucc_slow_info *us_info; | ||
190 | struct ucc_slow __iomem *us_regs; /* Ptr to memory map of UCC regs */ | ||
191 | struct ucc_slow_pram *us_pram; /* a pointer to the parameter RAM */ | ||
192 | u32 us_pram_offset; | ||
193 | int enabled_tx; /* Whether channel is enabled for Tx (ENT) */ | ||
194 | int enabled_rx; /* Whether channel is enabled for Rx (ENR) */ | ||
195 | int stopped_tx; /* Whether channel has been stopped for Tx | ||
196 | (STOP_TX, etc.) */ | ||
197 | int stopped_rx; /* Whether channel has been stopped for Rx */ | ||
198 | struct list_head confQ; /* frames passed to chip waiting for tx */ | ||
199 | u32 first_tx_bd_mask; /* mask is used in Tx routine to save status | ||
200 | and length for first BD in a frame */ | ||
201 | u32 tx_base_offset; /* first BD in Tx BD table offset (In MURAM) */ | ||
202 | u32 rx_base_offset; /* first BD in Rx BD table offset (In MURAM) */ | ||
203 | struct qe_bd *confBd; /* next BD for confirm after Tx */ | ||
204 | struct qe_bd *tx_bd; /* next BD for new Tx request */ | ||
205 | struct qe_bd *rx_bd; /* next BD to collect after Rx */ | ||
206 | void *p_rx_frame; /* accumulating receive frame */ | ||
207 | u16 *p_ucce; /* a pointer to the event register in memory. | ||
208 | */ | ||
209 | u16 *p_uccm; /* a pointer to the mask register in memory */ | ||
210 | u16 saved_uccm; /* a saved mask for the RX Interrupt bits */ | ||
211 | #ifdef STATISTICS | ||
212 | u32 tx_frames; /* Transmitted frames counters */ | ||
213 | u32 rx_frames; /* Received frames counters (only frames | ||
214 | passed to application) */ | ||
215 | u32 rx_discarded; /* Discarded frames counters (frames that | ||
216 | were discarded by the driver due to | ||
217 | errors) */ | ||
218 | #endif /* STATISTICS */ | ||
219 | }; | ||
220 | |||
221 | /* ucc_slow_init | ||
222 | * Initializes Slow UCC according to provided parameters. | ||
223 | * | ||
224 | * us_info - (In) pointer to the slow UCC info structure. | ||
225 | * uccs_ret - (Out) pointer to the slow UCC structure. | ||
226 | */ | ||
227 | int ucc_slow_init(struct ucc_slow_info * us_info, struct ucc_slow_private ** uccs_ret); | ||
228 | |||
229 | /* ucc_slow_free | ||
230 | * Frees all resources for slow UCC. | ||
231 | * | ||
232 | * uccs - (In) pointer to the slow UCC structure. | ||
233 | */ | ||
234 | void ucc_slow_free(struct ucc_slow_private * uccs); | ||
235 | |||
236 | /* ucc_slow_enable | ||
237 | * Enables a fast UCC port. | ||
238 | * This routine enables Tx and/or Rx through the General UCC Mode Register. | ||
239 | * | ||
240 | * uccs - (In) pointer to the slow UCC structure. | ||
241 | * mode - (In) TX, RX, or both. | ||
242 | */ | ||
243 | void ucc_slow_enable(struct ucc_slow_private * uccs, enum comm_dir mode); | ||
244 | |||
245 | /* ucc_slow_disable | ||
246 | * Disables a fast UCC port. | ||
247 | * This routine disables Tx and/or Rx through the General UCC Mode Register. | ||
248 | * | ||
249 | * uccs - (In) pointer to the slow UCC structure. | ||
250 | * mode - (In) TX, RX, or both. | ||
251 | */ | ||
252 | void ucc_slow_disable(struct ucc_slow_private * uccs, enum comm_dir mode); | ||
253 | |||
254 | /* ucc_slow_graceful_stop_tx | ||
255 | * Smoothly stops transmission on a specified slow UCC. | ||
256 | * | ||
257 | * uccs - (In) pointer to the slow UCC structure. | ||
258 | */ | ||
259 | void ucc_slow_graceful_stop_tx(struct ucc_slow_private * uccs); | ||
260 | |||
261 | /* ucc_slow_stop_tx | ||
262 | * Stops transmission on a specified slow UCC. | ||
263 | * | ||
264 | * uccs - (In) pointer to the slow UCC structure. | ||
265 | */ | ||
266 | void ucc_slow_stop_tx(struct ucc_slow_private * uccs); | ||
267 | |||
268 | /* ucc_slow_restart_tx | ||
269 | * Restarts transmitting on a specified slow UCC. | ||
270 | * | ||
271 | * uccs - (In) pointer to the slow UCC structure. | ||
272 | */ | ||
273 | void ucc_slow_restart_tx(struct ucc_slow_private *uccs); | ||
274 | |||
275 | u32 ucc_slow_get_qe_cr_subblock(int uccs_num); | ||
276 | |||
277 | #endif /* __UCC_SLOW_H__ */ | ||
diff --git a/arch/powerpc/include/asm/unistd.h b/arch/powerpc/include/asm/unistd.h index 4b6b8ace18e0..6a5ace5fa0c8 100644 --- a/arch/powerpc/include/asm/unistd.h +++ b/arch/powerpc/include/asm/unistd.h | |||
@@ -12,10 +12,9 @@ | |||
12 | #include <uapi/asm/unistd.h> | 12 | #include <uapi/asm/unistd.h> |
13 | 13 | ||
14 | 14 | ||
15 | #define __NR_syscalls 379 | 15 | #define NR_syscalls 379 |
16 | 16 | ||
17 | #define __NR__exit __NR_exit | 17 | #define __NR__exit __NR_exit |
18 | #define NR_syscalls __NR_syscalls | ||
19 | 18 | ||
20 | #ifndef __ASSEMBLY__ | 19 | #ifndef __ASSEMBLY__ |
21 | 20 | ||
diff --git a/arch/powerpc/include/asm/vdso_datapage.h b/arch/powerpc/include/asm/vdso_datapage.h index b73a8199f161..1afe90ade595 100644 --- a/arch/powerpc/include/asm/vdso_datapage.h +++ b/arch/powerpc/include/asm/vdso_datapage.h | |||
@@ -41,7 +41,7 @@ | |||
41 | #include <linux/unistd.h> | 41 | #include <linux/unistd.h> |
42 | #include <linux/time.h> | 42 | #include <linux/time.h> |
43 | 43 | ||
44 | #define SYSCALL_MAP_SIZE ((__NR_syscalls + 31) / 32) | 44 | #define SYSCALL_MAP_SIZE ((NR_syscalls + 31) / 32) |
45 | 45 | ||
46 | /* | 46 | /* |
47 | * So here is the ppc64 backward compatible version | 47 | * So here is the ppc64 backward compatible version |
diff --git a/arch/powerpc/include/uapi/asm/cputable.h b/arch/powerpc/include/uapi/asm/cputable.h index 43686043e297..8dde19962a5b 100644 --- a/arch/powerpc/include/uapi/asm/cputable.h +++ b/arch/powerpc/include/uapi/asm/cputable.h | |||
@@ -43,5 +43,7 @@ | |||
43 | #define PPC_FEATURE2_TAR 0x04000000 | 43 | #define PPC_FEATURE2_TAR 0x04000000 |
44 | #define PPC_FEATURE2_VEC_CRYPTO 0x02000000 | 44 | #define PPC_FEATURE2_VEC_CRYPTO 0x02000000 |
45 | #define PPC_FEATURE2_HTM_NOSC 0x01000000 | 45 | #define PPC_FEATURE2_HTM_NOSC 0x01000000 |
46 | #define PPC_FEATURE2_ARCH_3_00 0x00800000 /* ISA 3.00 */ | ||
47 | #define PPC_FEATURE2_HAS_IEEE128 0x00400000 /* VSX IEEE Binary Float 128-bit */ | ||
46 | 48 | ||
47 | #endif /* _UAPI__ASM_POWERPC_CPUTABLE_H */ | 49 | #endif /* _UAPI__ASM_POWERPC_CPUTABLE_H */ |
diff --git a/arch/powerpc/include/uapi/asm/elf.h b/arch/powerpc/include/uapi/asm/elf.h index 59dad113897b..c2d21d11c2d2 100644 --- a/arch/powerpc/include/uapi/asm/elf.h +++ b/arch/powerpc/include/uapi/asm/elf.h | |||
@@ -295,6 +295,8 @@ do { \ | |||
295 | #define R_PPC64_TLSLD 108 | 295 | #define R_PPC64_TLSLD 108 |
296 | #define R_PPC64_TOCSAVE 109 | 296 | #define R_PPC64_TOCSAVE 109 |
297 | 297 | ||
298 | #define R_PPC64_ENTRY 118 | ||
299 | |||
298 | #define R_PPC64_REL16 249 | 300 | #define R_PPC64_REL16 249 |
299 | #define R_PPC64_REL16_LO 250 | 301 | #define R_PPC64_REL16_LO 250 |
300 | #define R_PPC64_REL16_HI 251 | 302 | #define R_PPC64_REL16_HI 251 |
diff --git a/arch/powerpc/kernel/align.c b/arch/powerpc/kernel/align.c index 86150fbb42c3..8e7cb8e2b21a 100644 --- a/arch/powerpc/kernel/align.c +++ b/arch/powerpc/kernel/align.c | |||
@@ -960,6 +960,7 @@ int fix_alignment(struct pt_regs *regs) | |||
960 | preempt_disable(); | 960 | preempt_disable(); |
961 | enable_kernel_fp(); | 961 | enable_kernel_fp(); |
962 | cvt_df(&data.dd, (float *)&data.x32.low32); | 962 | cvt_df(&data.dd, (float *)&data.x32.low32); |
963 | disable_kernel_fp(); | ||
963 | preempt_enable(); | 964 | preempt_enable(); |
964 | #else | 965 | #else |
965 | return 0; | 966 | return 0; |
@@ -1000,6 +1001,7 @@ int fix_alignment(struct pt_regs *regs) | |||
1000 | preempt_disable(); | 1001 | preempt_disable(); |
1001 | enable_kernel_fp(); | 1002 | enable_kernel_fp(); |
1002 | cvt_fd((float *)&data.x32.low32, &data.dd); | 1003 | cvt_fd((float *)&data.x32.low32, &data.dd); |
1004 | disable_kernel_fp(); | ||
1003 | preempt_enable(); | 1005 | preempt_enable(); |
1004 | #else | 1006 | #else |
1005 | return 0; | 1007 | return 0; |
diff --git a/arch/powerpc/kernel/asm-offsets.c b/arch/powerpc/kernel/asm-offsets.c index 221d584d089f..07cebc3514f3 100644 --- a/arch/powerpc/kernel/asm-offsets.c +++ b/arch/powerpc/kernel/asm-offsets.c | |||
@@ -185,14 +185,16 @@ int main(void) | |||
185 | DEFINE(PACAKMSR, offsetof(struct paca_struct, kernel_msr)); | 185 | DEFINE(PACAKMSR, offsetof(struct paca_struct, kernel_msr)); |
186 | DEFINE(PACASOFTIRQEN, offsetof(struct paca_struct, soft_enabled)); | 186 | DEFINE(PACASOFTIRQEN, offsetof(struct paca_struct, soft_enabled)); |
187 | DEFINE(PACAIRQHAPPENED, offsetof(struct paca_struct, irq_happened)); | 187 | DEFINE(PACAIRQHAPPENED, offsetof(struct paca_struct, irq_happened)); |
188 | DEFINE(PACACONTEXTID, offsetof(struct paca_struct, context.id)); | 188 | #ifdef CONFIG_PPC_BOOK3S |
189 | DEFINE(PACACONTEXTID, offsetof(struct paca_struct, mm_ctx_id)); | ||
189 | #ifdef CONFIG_PPC_MM_SLICES | 190 | #ifdef CONFIG_PPC_MM_SLICES |
190 | DEFINE(PACALOWSLICESPSIZE, offsetof(struct paca_struct, | 191 | DEFINE(PACALOWSLICESPSIZE, offsetof(struct paca_struct, |
191 | context.low_slices_psize)); | 192 | mm_ctx_low_slices_psize)); |
192 | DEFINE(PACAHIGHSLICEPSIZE, offsetof(struct paca_struct, | 193 | DEFINE(PACAHIGHSLICEPSIZE, offsetof(struct paca_struct, |
193 | context.high_slices_psize)); | 194 | mm_ctx_high_slices_psize)); |
194 | DEFINE(MMUPSIZEDEFSIZE, sizeof(struct mmu_psize_def)); | 195 | DEFINE(MMUPSIZEDEFSIZE, sizeof(struct mmu_psize_def)); |
195 | #endif /* CONFIG_PPC_MM_SLICES */ | 196 | #endif /* CONFIG_PPC_MM_SLICES */ |
197 | #endif | ||
196 | 198 | ||
197 | #ifdef CONFIG_PPC_BOOK3E | 199 | #ifdef CONFIG_PPC_BOOK3E |
198 | DEFINE(PACAPGD, offsetof(struct paca_struct, pgd)); | 200 | DEFINE(PACAPGD, offsetof(struct paca_struct, pgd)); |
@@ -222,7 +224,7 @@ int main(void) | |||
222 | #ifdef CONFIG_PPC_MM_SLICES | 224 | #ifdef CONFIG_PPC_MM_SLICES |
223 | DEFINE(MMUPSIZESLLP, offsetof(struct mmu_psize_def, sllp)); | 225 | DEFINE(MMUPSIZESLLP, offsetof(struct mmu_psize_def, sllp)); |
224 | #else | 226 | #else |
225 | DEFINE(PACACONTEXTSLLP, offsetof(struct paca_struct, context.sllp)); | 227 | DEFINE(PACACONTEXTSLLP, offsetof(struct paca_struct, mm_ctx_sllp)); |
226 | #endif /* CONFIG_PPC_MM_SLICES */ | 228 | #endif /* CONFIG_PPC_MM_SLICES */ |
227 | DEFINE(PACA_EXGEN, offsetof(struct paca_struct, exgen)); | 229 | DEFINE(PACA_EXGEN, offsetof(struct paca_struct, exgen)); |
228 | DEFINE(PACA_EXMC, offsetof(struct paca_struct, exmc)); | 230 | DEFINE(PACA_EXMC, offsetof(struct paca_struct, exmc)); |
diff --git a/arch/powerpc/kernel/entry_64.S b/arch/powerpc/kernel/entry_64.S index a94f155db78e..0d525ce3717f 100644 --- a/arch/powerpc/kernel/entry_64.S +++ b/arch/powerpc/kernel/entry_64.S | |||
@@ -223,7 +223,11 @@ END_FTR_SECTION_IFCLR(CPU_FTR_STCX_CHECKS_ADDRESS) | |||
223 | 223 | ||
224 | beq- 1f | 224 | beq- 1f |
225 | ACCOUNT_CPU_USER_EXIT(r11, r12) | 225 | ACCOUNT_CPU_USER_EXIT(r11, r12) |
226 | HMT_MEDIUM_LOW_HAS_PPR | 226 | |
227 | BEGIN_FTR_SECTION | ||
228 | HMT_MEDIUM_LOW | ||
229 | END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR) | ||
230 | |||
227 | ld r13,GPR13(r1) /* only restore r13 if returning to usermode */ | 231 | ld r13,GPR13(r1) /* only restore r13 if returning to usermode */ |
228 | 1: ld r2,GPR2(r1) | 232 | 1: ld r2,GPR2(r1) |
229 | ld r1,GPR1(r1) | 233 | ld r1,GPR1(r1) |
@@ -312,7 +316,13 @@ syscall_exit_work: | |||
312 | subi r12,r12,TI_FLAGS | 316 | subi r12,r12,TI_FLAGS |
313 | 317 | ||
314 | 4: /* Anything else left to do? */ | 318 | 4: /* Anything else left to do? */ |
315 | SET_DEFAULT_THREAD_PPR(r3, r10) /* Set thread.ppr = 3 */ | 319 | BEGIN_FTR_SECTION |
320 | lis r3,INIT_PPR@highest /* Set thread.ppr = 3 */ | ||
321 | ld r10,PACACURRENT(r13) | ||
322 | sldi r3,r3,32 /* bits 11-13 are used for ppr */ | ||
323 | std r3,TASKTHREADPPR(r10) | ||
324 | END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR) | ||
325 | |||
316 | andi. r0,r9,(_TIF_SYSCALL_DOTRACE|_TIF_SINGLESTEP) | 326 | andi. r0,r9,(_TIF_SYSCALL_DOTRACE|_TIF_SINGLESTEP) |
317 | beq ret_from_except_lite | 327 | beq ret_from_except_lite |
318 | 328 | ||
@@ -452,43 +462,11 @@ _GLOBAL(_switch) | |||
452 | /* r3-r13 are caller saved -- Cort */ | 462 | /* r3-r13 are caller saved -- Cort */ |
453 | SAVE_8GPRS(14, r1) | 463 | SAVE_8GPRS(14, r1) |
454 | SAVE_10GPRS(22, r1) | 464 | SAVE_10GPRS(22, r1) |
455 | mflr r20 /* Return to switch caller */ | 465 | std r0,_NIP(r1) /* Return to switch caller */ |
456 | mfmsr r22 | ||
457 | li r0, MSR_FP | ||
458 | #ifdef CONFIG_VSX | ||
459 | BEGIN_FTR_SECTION | ||
460 | oris r0,r0,MSR_VSX@h /* Disable VSX */ | ||
461 | END_FTR_SECTION_IFSET(CPU_FTR_VSX) | ||
462 | #endif /* CONFIG_VSX */ | ||
463 | #ifdef CONFIG_ALTIVEC | ||
464 | BEGIN_FTR_SECTION | ||
465 | oris r0,r0,MSR_VEC@h /* Disable altivec */ | ||
466 | mfspr r24,SPRN_VRSAVE /* save vrsave register value */ | ||
467 | std r24,THREAD_VRSAVE(r3) | ||
468 | END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC) | ||
469 | #endif /* CONFIG_ALTIVEC */ | ||
470 | and. r0,r0,r22 | ||
471 | beq+ 1f | ||
472 | andc r22,r22,r0 | ||
473 | MTMSRD(r22) | ||
474 | isync | ||
475 | 1: std r20,_NIP(r1) | ||
476 | mfcr r23 | 466 | mfcr r23 |
477 | std r23,_CCR(r1) | 467 | std r23,_CCR(r1) |
478 | std r1,KSP(r3) /* Set old stack pointer */ | 468 | std r1,KSP(r3) /* Set old stack pointer */ |
479 | 469 | ||
480 | #ifdef CONFIG_PPC_BOOK3S_64 | ||
481 | BEGIN_FTR_SECTION | ||
482 | /* Event based branch registers */ | ||
483 | mfspr r0, SPRN_BESCR | ||
484 | std r0, THREAD_BESCR(r3) | ||
485 | mfspr r0, SPRN_EBBHR | ||
486 | std r0, THREAD_EBBHR(r3) | ||
487 | mfspr r0, SPRN_EBBRR | ||
488 | std r0, THREAD_EBBRR(r3) | ||
489 | END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S) | ||
490 | #endif | ||
491 | |||
492 | #ifdef CONFIG_SMP | 470 | #ifdef CONFIG_SMP |
493 | /* We need a sync somewhere here to make sure that if the | 471 | /* We need a sync somewhere here to make sure that if the |
494 | * previous task gets rescheduled on another CPU, it sees all | 472 | * previous task gets rescheduled on another CPU, it sees all |
@@ -576,47 +554,6 @@ END_MMU_FTR_SECTION_IFSET(MMU_FTR_1T_SEGMENT) | |||
576 | mr r1,r8 /* start using new stack pointer */ | 554 | mr r1,r8 /* start using new stack pointer */ |
577 | std r7,PACAKSAVE(r13) | 555 | std r7,PACAKSAVE(r13) |
578 | 556 | ||
579 | #ifdef CONFIG_PPC_BOOK3S_64 | ||
580 | BEGIN_FTR_SECTION | ||
581 | /* Event based branch registers */ | ||
582 | ld r0, THREAD_BESCR(r4) | ||
583 | mtspr SPRN_BESCR, r0 | ||
584 | ld r0, THREAD_EBBHR(r4) | ||
585 | mtspr SPRN_EBBHR, r0 | ||
586 | ld r0, THREAD_EBBRR(r4) | ||
587 | mtspr SPRN_EBBRR, r0 | ||
588 | |||
589 | ld r0,THREAD_TAR(r4) | ||
590 | mtspr SPRN_TAR,r0 | ||
591 | END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S) | ||
592 | #endif | ||
593 | |||
594 | #ifdef CONFIG_ALTIVEC | ||
595 | BEGIN_FTR_SECTION | ||
596 | ld r0,THREAD_VRSAVE(r4) | ||
597 | mtspr SPRN_VRSAVE,r0 /* if G4, restore VRSAVE reg */ | ||
598 | END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC) | ||
599 | #endif /* CONFIG_ALTIVEC */ | ||
600 | #ifdef CONFIG_PPC64 | ||
601 | BEGIN_FTR_SECTION | ||
602 | lwz r6,THREAD_DSCR_INHERIT(r4) | ||
603 | ld r0,THREAD_DSCR(r4) | ||
604 | cmpwi r6,0 | ||
605 | bne 1f | ||
606 | ld r0,PACA_DSCR_DEFAULT(r13) | ||
607 | 1: | ||
608 | BEGIN_FTR_SECTION_NESTED(70) | ||
609 | mfspr r8, SPRN_FSCR | ||
610 | rldimi r8, r6, FSCR_DSCR_LG, (63 - FSCR_DSCR_LG) | ||
611 | mtspr SPRN_FSCR, r8 | ||
612 | END_FTR_SECTION_NESTED(CPU_FTR_ARCH_207S, CPU_FTR_ARCH_207S, 70) | ||
613 | cmpd r0,r25 | ||
614 | beq 2f | ||
615 | mtspr SPRN_DSCR,r0 | ||
616 | 2: | ||
617 | END_FTR_SECTION_IFSET(CPU_FTR_DSCR) | ||
618 | #endif | ||
619 | |||
620 | ld r6,_CCR(r1) | 557 | ld r6,_CCR(r1) |
621 | mtcrf 0xFF,r6 | 558 | mtcrf 0xFF,r6 |
622 | 559 | ||
diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S index 0a0399c2af11..7716cebf4b8e 100644 --- a/arch/powerpc/kernel/exceptions-64s.S +++ b/arch/powerpc/kernel/exceptions-64s.S | |||
@@ -96,7 +96,6 @@ __start_interrupts: | |||
96 | 96 | ||
97 | .globl system_reset_pSeries; | 97 | .globl system_reset_pSeries; |
98 | system_reset_pSeries: | 98 | system_reset_pSeries: |
99 | HMT_MEDIUM_PPR_DISCARD | ||
100 | SET_SCRATCH0(r13) | 99 | SET_SCRATCH0(r13) |
101 | #ifdef CONFIG_PPC_P7_NAP | 100 | #ifdef CONFIG_PPC_P7_NAP |
102 | BEGIN_FTR_SECTION | 101 | BEGIN_FTR_SECTION |
@@ -164,7 +163,6 @@ machine_check_pSeries_1: | |||
164 | * some code path might still want to branch into the original | 163 | * some code path might still want to branch into the original |
165 | * vector | 164 | * vector |
166 | */ | 165 | */ |
167 | HMT_MEDIUM_PPR_DISCARD | ||
168 | SET_SCRATCH0(r13) /* save r13 */ | 166 | SET_SCRATCH0(r13) /* save r13 */ |
169 | #ifdef CONFIG_PPC_P7_NAP | 167 | #ifdef CONFIG_PPC_P7_NAP |
170 | BEGIN_FTR_SECTION | 168 | BEGIN_FTR_SECTION |
@@ -199,7 +197,6 @@ ALT_FTR_SECTION_END_IFSET(CPU_FTR_HVMODE) | |||
199 | . = 0x300 | 197 | . = 0x300 |
200 | .globl data_access_pSeries | 198 | .globl data_access_pSeries |
201 | data_access_pSeries: | 199 | data_access_pSeries: |
202 | HMT_MEDIUM_PPR_DISCARD | ||
203 | SET_SCRATCH0(r13) | 200 | SET_SCRATCH0(r13) |
204 | EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, data_access_common, EXC_STD, | 201 | EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, data_access_common, EXC_STD, |
205 | KVMTEST, 0x300) | 202 | KVMTEST, 0x300) |
@@ -207,7 +204,6 @@ data_access_pSeries: | |||
207 | . = 0x380 | 204 | . = 0x380 |
208 | .globl data_access_slb_pSeries | 205 | .globl data_access_slb_pSeries |
209 | data_access_slb_pSeries: | 206 | data_access_slb_pSeries: |
210 | HMT_MEDIUM_PPR_DISCARD | ||
211 | SET_SCRATCH0(r13) | 207 | SET_SCRATCH0(r13) |
212 | EXCEPTION_PROLOG_0(PACA_EXSLB) | 208 | EXCEPTION_PROLOG_0(PACA_EXSLB) |
213 | EXCEPTION_PROLOG_1(PACA_EXSLB, KVMTEST, 0x380) | 209 | EXCEPTION_PROLOG_1(PACA_EXSLB, KVMTEST, 0x380) |
@@ -234,15 +230,14 @@ data_access_slb_pSeries: | |||
234 | bctr | 230 | bctr |
235 | #endif | 231 | #endif |
236 | 232 | ||
237 | STD_EXCEPTION_PSERIES(0x400, 0x400, instruction_access) | 233 | STD_EXCEPTION_PSERIES(0x400, instruction_access) |
238 | 234 | ||
239 | . = 0x480 | 235 | . = 0x480 |
240 | .globl instruction_access_slb_pSeries | 236 | .globl instruction_access_slb_pSeries |
241 | instruction_access_slb_pSeries: | 237 | instruction_access_slb_pSeries: |
242 | HMT_MEDIUM_PPR_DISCARD | ||
243 | SET_SCRATCH0(r13) | 238 | SET_SCRATCH0(r13) |
244 | EXCEPTION_PROLOG_0(PACA_EXSLB) | 239 | EXCEPTION_PROLOG_0(PACA_EXSLB) |
245 | EXCEPTION_PROLOG_1(PACA_EXSLB, KVMTEST_PR, 0x480) | 240 | EXCEPTION_PROLOG_1(PACA_EXSLB, KVMTEST, 0x480) |
246 | std r3,PACA_EXSLB+EX_R3(r13) | 241 | std r3,PACA_EXSLB+EX_R3(r13) |
247 | mfspr r3,SPRN_SRR0 /* SRR0 is faulting address */ | 242 | mfspr r3,SPRN_SRR0 /* SRR0 is faulting address */ |
248 | #ifdef __DISABLED__ | 243 | #ifdef __DISABLED__ |
@@ -269,25 +264,24 @@ instruction_access_slb_pSeries: | |||
269 | .globl hardware_interrupt_hv; | 264 | .globl hardware_interrupt_hv; |
270 | hardware_interrupt_pSeries: | 265 | hardware_interrupt_pSeries: |
271 | hardware_interrupt_hv: | 266 | hardware_interrupt_hv: |
272 | HMT_MEDIUM_PPR_DISCARD | ||
273 | BEGIN_FTR_SECTION | 267 | BEGIN_FTR_SECTION |
274 | _MASKABLE_EXCEPTION_PSERIES(0x502, hardware_interrupt, | 268 | _MASKABLE_EXCEPTION_PSERIES(0x502, hardware_interrupt, |
275 | EXC_HV, SOFTEN_TEST_HV) | 269 | EXC_HV, SOFTEN_TEST_HV) |
276 | KVM_HANDLER(PACA_EXGEN, EXC_HV, 0x502) | 270 | KVM_HANDLER(PACA_EXGEN, EXC_HV, 0x502) |
277 | FTR_SECTION_ELSE | 271 | FTR_SECTION_ELSE |
278 | _MASKABLE_EXCEPTION_PSERIES(0x500, hardware_interrupt, | 272 | _MASKABLE_EXCEPTION_PSERIES(0x500, hardware_interrupt, |
279 | EXC_STD, SOFTEN_TEST_HV_201) | 273 | EXC_STD, SOFTEN_TEST_PR) |
280 | KVM_HANDLER(PACA_EXGEN, EXC_STD, 0x500) | 274 | KVM_HANDLER(PACA_EXGEN, EXC_STD, 0x500) |
281 | ALT_FTR_SECTION_END_IFSET(CPU_FTR_HVMODE | CPU_FTR_ARCH_206) | 275 | ALT_FTR_SECTION_END_IFSET(CPU_FTR_HVMODE | CPU_FTR_ARCH_206) |
282 | 276 | ||
283 | STD_EXCEPTION_PSERIES(0x600, 0x600, alignment) | 277 | STD_EXCEPTION_PSERIES(0x600, alignment) |
284 | KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0x600) | 278 | KVM_HANDLER(PACA_EXGEN, EXC_STD, 0x600) |
285 | 279 | ||
286 | STD_EXCEPTION_PSERIES(0x700, 0x700, program_check) | 280 | STD_EXCEPTION_PSERIES(0x700, program_check) |
287 | KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0x700) | 281 | KVM_HANDLER(PACA_EXGEN, EXC_STD, 0x700) |
288 | 282 | ||
289 | STD_EXCEPTION_PSERIES(0x800, 0x800, fp_unavailable) | 283 | STD_EXCEPTION_PSERIES(0x800, fp_unavailable) |
290 | KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0x800) | 284 | KVM_HANDLER(PACA_EXGEN, EXC_STD, 0x800) |
291 | 285 | ||
292 | . = 0x900 | 286 | . = 0x900 |
293 | .globl decrementer_pSeries | 287 | .globl decrementer_pSeries |
@@ -297,10 +291,10 @@ decrementer_pSeries: | |||
297 | STD_EXCEPTION_HV(0x980, 0x982, hdecrementer) | 291 | STD_EXCEPTION_HV(0x980, 0x982, hdecrementer) |
298 | 292 | ||
299 | MASKABLE_EXCEPTION_PSERIES(0xa00, 0xa00, doorbell_super) | 293 | MASKABLE_EXCEPTION_PSERIES(0xa00, 0xa00, doorbell_super) |
300 | KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0xa00) | 294 | KVM_HANDLER(PACA_EXGEN, EXC_STD, 0xa00) |
301 | 295 | ||
302 | STD_EXCEPTION_PSERIES(0xb00, 0xb00, trap_0b) | 296 | STD_EXCEPTION_PSERIES(0xb00, trap_0b) |
303 | KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0xb00) | 297 | KVM_HANDLER(PACA_EXGEN, EXC_STD, 0xb00) |
304 | 298 | ||
305 | . = 0xc00 | 299 | . = 0xc00 |
306 | .globl system_call_pSeries | 300 | .globl system_call_pSeries |
@@ -331,8 +325,8 @@ system_call_pSeries: | |||
331 | SYSCALL_PSERIES_3 | 325 | SYSCALL_PSERIES_3 |
332 | KVM_HANDLER(PACA_EXGEN, EXC_STD, 0xc00) | 326 | KVM_HANDLER(PACA_EXGEN, EXC_STD, 0xc00) |
333 | 327 | ||
334 | STD_EXCEPTION_PSERIES(0xd00, 0xd00, single_step) | 328 | STD_EXCEPTION_PSERIES(0xd00, single_step) |
335 | KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0xd00) | 329 | KVM_HANDLER(PACA_EXGEN, EXC_STD, 0xd00) |
336 | 330 | ||
337 | /* At 0xe??? we have a bunch of hypervisor exceptions, we branch | 331 | /* At 0xe??? we have a bunch of hypervisor exceptions, we branch |
338 | * out of line to handle them | 332 | * out of line to handle them |
@@ -407,13 +401,12 @@ hv_facility_unavailable_trampoline: | |||
407 | KVM_HANDLER_SKIP(PACA_EXGEN, EXC_HV, 0x1202) | 401 | KVM_HANDLER_SKIP(PACA_EXGEN, EXC_HV, 0x1202) |
408 | #endif /* CONFIG_CBE_RAS */ | 402 | #endif /* CONFIG_CBE_RAS */ |
409 | 403 | ||
410 | STD_EXCEPTION_PSERIES(0x1300, 0x1300, instruction_breakpoint) | 404 | STD_EXCEPTION_PSERIES(0x1300, instruction_breakpoint) |
411 | KVM_HANDLER_PR_SKIP(PACA_EXGEN, EXC_STD, 0x1300) | 405 | KVM_HANDLER_SKIP(PACA_EXGEN, EXC_STD, 0x1300) |
412 | 406 | ||
413 | . = 0x1500 | 407 | . = 0x1500 |
414 | .global denorm_exception_hv | 408 | .global denorm_exception_hv |
415 | denorm_exception_hv: | 409 | denorm_exception_hv: |
416 | HMT_MEDIUM_PPR_DISCARD | ||
417 | mtspr SPRN_SPRG_HSCRATCH0,r13 | 410 | mtspr SPRN_SPRG_HSCRATCH0,r13 |
418 | EXCEPTION_PROLOG_0(PACA_EXGEN) | 411 | EXCEPTION_PROLOG_0(PACA_EXGEN) |
419 | EXCEPTION_PROLOG_1(PACA_EXGEN, NOTEST, 0x1500) | 412 | EXCEPTION_PROLOG_1(PACA_EXGEN, NOTEST, 0x1500) |
@@ -435,8 +428,8 @@ denorm_exception_hv: | |||
435 | KVM_HANDLER_SKIP(PACA_EXGEN, EXC_HV, 0x1602) | 428 | KVM_HANDLER_SKIP(PACA_EXGEN, EXC_HV, 0x1602) |
436 | #endif /* CONFIG_CBE_RAS */ | 429 | #endif /* CONFIG_CBE_RAS */ |
437 | 430 | ||
438 | STD_EXCEPTION_PSERIES(0x1700, 0x1700, altivec_assist) | 431 | STD_EXCEPTION_PSERIES(0x1700, altivec_assist) |
439 | KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0x1700) | 432 | KVM_HANDLER(PACA_EXGEN, EXC_STD, 0x1700) |
440 | 433 | ||
441 | #ifdef CONFIG_CBE_RAS | 434 | #ifdef CONFIG_CBE_RAS |
442 | STD_EXCEPTION_HV(0x1800, 0x1802, cbe_thermal) | 435 | STD_EXCEPTION_HV(0x1800, 0x1802, cbe_thermal) |
@@ -527,7 +520,6 @@ END_FTR_SECTION_IFSET(CPU_FTR_HVMODE) | |||
527 | machine_check_pSeries: | 520 | machine_check_pSeries: |
528 | .globl machine_check_fwnmi | 521 | .globl machine_check_fwnmi |
529 | machine_check_fwnmi: | 522 | machine_check_fwnmi: |
530 | HMT_MEDIUM_PPR_DISCARD | ||
531 | SET_SCRATCH0(r13) /* save r13 */ | 523 | SET_SCRATCH0(r13) /* save r13 */ |
532 | EXCEPTION_PROLOG_0(PACA_EXMC) | 524 | EXCEPTION_PROLOG_0(PACA_EXMC) |
533 | machine_check_pSeries_0: | 525 | machine_check_pSeries_0: |
@@ -536,9 +528,9 @@ machine_check_pSeries_0: | |||
536 | KVM_HANDLER_SKIP(PACA_EXMC, EXC_STD, 0x200) | 528 | KVM_HANDLER_SKIP(PACA_EXMC, EXC_STD, 0x200) |
537 | KVM_HANDLER_SKIP(PACA_EXGEN, EXC_STD, 0x300) | 529 | KVM_HANDLER_SKIP(PACA_EXGEN, EXC_STD, 0x300) |
538 | KVM_HANDLER_SKIP(PACA_EXSLB, EXC_STD, 0x380) | 530 | KVM_HANDLER_SKIP(PACA_EXSLB, EXC_STD, 0x380) |
539 | KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0x400) | 531 | KVM_HANDLER(PACA_EXGEN, EXC_STD, 0x400) |
540 | KVM_HANDLER_PR(PACA_EXSLB, EXC_STD, 0x480) | 532 | KVM_HANDLER(PACA_EXSLB, EXC_STD, 0x480) |
541 | KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0x900) | 533 | KVM_HANDLER(PACA_EXGEN, EXC_STD, 0x900) |
542 | KVM_HANDLER(PACA_EXGEN, EXC_HV, 0x982) | 534 | KVM_HANDLER(PACA_EXGEN, EXC_HV, 0x982) |
543 | 535 | ||
544 | #ifdef CONFIG_PPC_DENORMALISATION | 536 | #ifdef CONFIG_PPC_DENORMALISATION |
@@ -621,13 +613,13 @@ END_FTR_SECTION_IFSET(CPU_FTR_CFAR) | |||
621 | 613 | ||
622 | /* moved from 0xf00 */ | 614 | /* moved from 0xf00 */ |
623 | STD_EXCEPTION_PSERIES_OOL(0xf00, performance_monitor) | 615 | STD_EXCEPTION_PSERIES_OOL(0xf00, performance_monitor) |
624 | KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0xf00) | 616 | KVM_HANDLER(PACA_EXGEN, EXC_STD, 0xf00) |
625 | STD_EXCEPTION_PSERIES_OOL(0xf20, altivec_unavailable) | 617 | STD_EXCEPTION_PSERIES_OOL(0xf20, altivec_unavailable) |
626 | KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0xf20) | 618 | KVM_HANDLER(PACA_EXGEN, EXC_STD, 0xf20) |
627 | STD_EXCEPTION_PSERIES_OOL(0xf40, vsx_unavailable) | 619 | STD_EXCEPTION_PSERIES_OOL(0xf40, vsx_unavailable) |
628 | KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0xf40) | 620 | KVM_HANDLER(PACA_EXGEN, EXC_STD, 0xf40) |
629 | STD_EXCEPTION_PSERIES_OOL(0xf60, facility_unavailable) | 621 | STD_EXCEPTION_PSERIES_OOL(0xf60, facility_unavailable) |
630 | KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0xf60) | 622 | KVM_HANDLER(PACA_EXGEN, EXC_STD, 0xf60) |
631 | STD_EXCEPTION_HV_OOL(0xf82, facility_unavailable) | 623 | STD_EXCEPTION_HV_OOL(0xf82, facility_unavailable) |
632 | KVM_HANDLER(PACA_EXGEN, EXC_HV, 0xf82) | 624 | KVM_HANDLER(PACA_EXGEN, EXC_HV, 0xf82) |
633 | 625 | ||
@@ -711,7 +703,6 @@ ALT_FTR_SECTION_END_IFSET(CPU_FTR_HVMODE) | |||
711 | .globl system_reset_fwnmi | 703 | .globl system_reset_fwnmi |
712 | .align 7 | 704 | .align 7 |
713 | system_reset_fwnmi: | 705 | system_reset_fwnmi: |
714 | HMT_MEDIUM_PPR_DISCARD | ||
715 | SET_SCRATCH0(r13) /* save r13 */ | 706 | SET_SCRATCH0(r13) /* save r13 */ |
716 | EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, system_reset_common, EXC_STD, | 707 | EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, system_reset_common, EXC_STD, |
717 | NOTEST, 0x100) | 708 | NOTEST, 0x100) |
@@ -1556,29 +1547,19 @@ do_hash_page: | |||
1556 | lwz r0,TI_PREEMPT(r11) /* If we're in an "NMI" */ | 1547 | lwz r0,TI_PREEMPT(r11) /* If we're in an "NMI" */ |
1557 | andis. r0,r0,NMI_MASK@h /* (i.e. an irq when soft-disabled) */ | 1548 | andis. r0,r0,NMI_MASK@h /* (i.e. an irq when soft-disabled) */ |
1558 | bne 77f /* then don't call hash_page now */ | 1549 | bne 77f /* then don't call hash_page now */ |
1559 | /* | ||
1560 | * We need to set the _PAGE_USER bit if MSR_PR is set or if we are | ||
1561 | * accessing a userspace segment (even from the kernel). We assume | ||
1562 | * kernel addresses always have the high bit set. | ||
1563 | */ | ||
1564 | rlwinm r4,r4,32-25+9,31-9,31-9 /* DSISR_STORE -> _PAGE_RW */ | ||
1565 | rotldi r0,r3,15 /* Move high bit into MSR_PR posn */ | ||
1566 | orc r0,r12,r0 /* MSR_PR | ~high_bit */ | ||
1567 | rlwimi r4,r0,32-13,30,30 /* becomes _PAGE_USER access bit */ | ||
1568 | ori r4,r4,1 /* add _PAGE_PRESENT */ | ||
1569 | rlwimi r4,r5,22+2,31-2,31-2 /* Set _PAGE_EXEC if trap is 0x400 */ | ||
1570 | 1550 | ||
1571 | /* | 1551 | /* |
1572 | * r3 contains the faulting address | 1552 | * r3 contains the faulting address |
1573 | * r4 contains the required access permissions | 1553 | * r4 msr |
1574 | * r5 contains the trap number | 1554 | * r5 contains the trap number |
1575 | * r6 contains dsisr | 1555 | * r6 contains dsisr |
1576 | * | 1556 | * |
1577 | * at return r3 = 0 for success, 1 for page fault, negative for error | 1557 | * at return r3 = 0 for success, 1 for page fault, negative for error |
1578 | */ | 1558 | */ |
1559 | mr r4,r12 | ||
1579 | ld r6,_DSISR(r1) | 1560 | ld r6,_DSISR(r1) |
1580 | bl hash_page /* build HPTE if possible */ | 1561 | bl __hash_page /* build HPTE if possible */ |
1581 | cmpdi r3,0 /* see if hash_page succeeded */ | 1562 | cmpdi r3,0 /* see if __hash_page succeeded */ |
1582 | 1563 | ||
1583 | /* Success */ | 1564 | /* Success */ |
1584 | beq fast_exc_return_irq /* Return from exception on success */ | 1565 | beq fast_exc_return_irq /* Return from exception on success */ |
diff --git a/arch/powerpc/kernel/fpu.S b/arch/powerpc/kernel/fpu.S index 9ad236e5d2c9..2117eaca3d28 100644 --- a/arch/powerpc/kernel/fpu.S +++ b/arch/powerpc/kernel/fpu.S | |||
@@ -73,30 +73,10 @@ END_FTR_SECTION_IFSET(CPU_FTR_VSX) | |||
73 | MTFSF_L(fr0) | 73 | MTFSF_L(fr0) |
74 | REST_32FPVSRS(0, R4, R7) | 74 | REST_32FPVSRS(0, R4, R7) |
75 | 75 | ||
76 | /* FP/VSX off again */ | ||
77 | MTMSRD(r6) | ||
78 | SYNC | ||
79 | |||
80 | blr | 76 | blr |
81 | #endif /* CONFIG_PPC_TRANSACTIONAL_MEM */ | 77 | #endif /* CONFIG_PPC_TRANSACTIONAL_MEM */ |
82 | 78 | ||
83 | /* | 79 | /* |
84 | * Enable use of the FPU, and VSX if possible, for the caller. | ||
85 | */ | ||
86 | _GLOBAL(fp_enable) | ||
87 | mfmsr r3 | ||
88 | ori r3,r3,MSR_FP | ||
89 | #ifdef CONFIG_VSX | ||
90 | BEGIN_FTR_SECTION | ||
91 | oris r3,r3,MSR_VSX@h | ||
92 | END_FTR_SECTION_IFSET(CPU_FTR_VSX) | ||
93 | #endif | ||
94 | SYNC | ||
95 | MTMSRD(r3) | ||
96 | isync /* (not necessary for arch 2.02 and later) */ | ||
97 | blr | ||
98 | |||
99 | /* | ||
100 | * Load state from memory into FP registers including FPSCR. | 80 | * Load state from memory into FP registers including FPSCR. |
101 | * Assumes the caller has enabled FP in the MSR. | 81 | * Assumes the caller has enabled FP in the MSR. |
102 | */ | 82 | */ |
@@ -136,31 +116,6 @@ END_FTR_SECTION_IFSET(CPU_FTR_VSX) | |||
136 | SYNC | 116 | SYNC |
137 | MTMSRD(r5) /* enable use of fpu now */ | 117 | MTMSRD(r5) /* enable use of fpu now */ |
138 | isync | 118 | isync |
139 | /* | ||
140 | * For SMP, we don't do lazy FPU switching because it just gets too | ||
141 | * horrendously complex, especially when a task switches from one CPU | ||
142 | * to another. Instead we call giveup_fpu in switch_to. | ||
143 | */ | ||
144 | #ifndef CONFIG_SMP | ||
145 | LOAD_REG_ADDRBASE(r3, last_task_used_math) | ||
146 | toreal(r3) | ||
147 | PPC_LL r4,ADDROFF(last_task_used_math)(r3) | ||
148 | PPC_LCMPI 0,r4,0 | ||
149 | beq 1f | ||
150 | toreal(r4) | ||
151 | addi r4,r4,THREAD /* want last_task_used_math->thread */ | ||
152 | addi r10,r4,THREAD_FPSTATE | ||
153 | SAVE_32FPVSRS(0, R5, R10) | ||
154 | mffs fr0 | ||
155 | stfd fr0,FPSTATE_FPSCR(r10) | ||
156 | PPC_LL r5,PT_REGS(r4) | ||
157 | toreal(r5) | ||
158 | PPC_LL r4,_MSR-STACK_FRAME_OVERHEAD(r5) | ||
159 | li r10,MSR_FP|MSR_FE0|MSR_FE1 | ||
160 | andc r4,r4,r10 /* disable FP for previous task */ | ||
161 | PPC_STL r4,_MSR-STACK_FRAME_OVERHEAD(r5) | ||
162 | 1: | ||
163 | #endif /* CONFIG_SMP */ | ||
164 | /* enable use of FP after return */ | 119 | /* enable use of FP after return */ |
165 | #ifdef CONFIG_PPC32 | 120 | #ifdef CONFIG_PPC32 |
166 | mfspr r5,SPRN_SPRG_THREAD /* current task's THREAD (phys) */ | 121 | mfspr r5,SPRN_SPRG_THREAD /* current task's THREAD (phys) */ |
@@ -179,36 +134,17 @@ END_FTR_SECTION_IFSET(CPU_FTR_VSX) | |||
179 | lfd fr0,FPSTATE_FPSCR(r10) | 134 | lfd fr0,FPSTATE_FPSCR(r10) |
180 | MTFSF_L(fr0) | 135 | MTFSF_L(fr0) |
181 | REST_32FPVSRS(0, R4, R10) | 136 | REST_32FPVSRS(0, R4, R10) |
182 | #ifndef CONFIG_SMP | ||
183 | subi r4,r5,THREAD | ||
184 | fromreal(r4) | ||
185 | PPC_STL r4,ADDROFF(last_task_used_math)(r3) | ||
186 | #endif /* CONFIG_SMP */ | ||
187 | /* restore registers and return */ | 137 | /* restore registers and return */ |
188 | /* we haven't used ctr or xer or lr */ | 138 | /* we haven't used ctr or xer or lr */ |
189 | blr | 139 | blr |
190 | 140 | ||
191 | /* | 141 | /* |
192 | * giveup_fpu(tsk) | 142 | * __giveup_fpu(tsk) |
193 | * Disable FP for the task given as the argument, | 143 | * Disable FP for the task given as the argument, |
194 | * and save the floating-point registers in its thread_struct. | 144 | * and save the floating-point registers in its thread_struct. |
195 | * Enables the FPU for use in the kernel on return. | 145 | * Enables the FPU for use in the kernel on return. |
196 | */ | 146 | */ |
197 | _GLOBAL(giveup_fpu) | 147 | _GLOBAL(__giveup_fpu) |
198 | mfmsr r5 | ||
199 | ori r5,r5,MSR_FP | ||
200 | #ifdef CONFIG_VSX | ||
201 | BEGIN_FTR_SECTION | ||
202 | oris r5,r5,MSR_VSX@h | ||
203 | END_FTR_SECTION_IFSET(CPU_FTR_VSX) | ||
204 | #endif | ||
205 | SYNC_601 | ||
206 | ISYNC_601 | ||
207 | MTMSRD(r5) /* enable use of fpu now */ | ||
208 | SYNC_601 | ||
209 | isync | ||
210 | PPC_LCMPI 0,r3,0 | ||
211 | beqlr- /* if no previous owner, done */ | ||
212 | addi r3,r3,THREAD /* want THREAD of task */ | 148 | addi r3,r3,THREAD /* want THREAD of task */ |
213 | PPC_LL r6,THREAD_FPSAVEAREA(r3) | 149 | PPC_LL r6,THREAD_FPSAVEAREA(r3) |
214 | PPC_LL r5,PT_REGS(r3) | 150 | PPC_LL r5,PT_REGS(r3) |
@@ -230,11 +166,6 @@ END_FTR_SECTION_IFSET(CPU_FTR_VSX) | |||
230 | andc r4,r4,r3 /* disable FP for previous task */ | 166 | andc r4,r4,r3 /* disable FP for previous task */ |
231 | PPC_STL r4,_MSR-STACK_FRAME_OVERHEAD(r5) | 167 | PPC_STL r4,_MSR-STACK_FRAME_OVERHEAD(r5) |
232 | 1: | 168 | 1: |
233 | #ifndef CONFIG_SMP | ||
234 | li r5,0 | ||
235 | LOAD_REG_ADDRBASE(r4,last_task_used_math) | ||
236 | PPC_STL r5,ADDROFF(last_task_used_math)(r4) | ||
237 | #endif /* CONFIG_SMP */ | ||
238 | blr | 169 | blr |
239 | 170 | ||
240 | /* | 171 | /* |
diff --git a/arch/powerpc/kernel/head_fsl_booke.S b/arch/powerpc/kernel/head_fsl_booke.S index fffd1f96bb1d..f705171b924b 100644 --- a/arch/powerpc/kernel/head_fsl_booke.S +++ b/arch/powerpc/kernel/head_fsl_booke.S | |||
@@ -857,29 +857,6 @@ _GLOBAL(load_up_spe) | |||
857 | oris r5,r5,MSR_SPE@h | 857 | oris r5,r5,MSR_SPE@h |
858 | mtmsr r5 /* enable use of SPE now */ | 858 | mtmsr r5 /* enable use of SPE now */ |
859 | isync | 859 | isync |
860 | /* | ||
861 | * For SMP, we don't do lazy SPE switching because it just gets too | ||
862 | * horrendously complex, especially when a task switches from one CPU | ||
863 | * to another. Instead we call giveup_spe in switch_to. | ||
864 | */ | ||
865 | #ifndef CONFIG_SMP | ||
866 | lis r3,last_task_used_spe@ha | ||
867 | lwz r4,last_task_used_spe@l(r3) | ||
868 | cmpi 0,r4,0 | ||
869 | beq 1f | ||
870 | addi r4,r4,THREAD /* want THREAD of last_task_used_spe */ | ||
871 | SAVE_32EVRS(0,r10,r4,THREAD_EVR0) | ||
872 | evxor evr10, evr10, evr10 /* clear out evr10 */ | ||
873 | evmwumiaa evr10, evr10, evr10 /* evr10 <- ACC = 0 * 0 + ACC */ | ||
874 | li r5,THREAD_ACC | ||
875 | evstddx evr10, r4, r5 /* save off accumulator */ | ||
876 | lwz r5,PT_REGS(r4) | ||
877 | lwz r4,_MSR-STACK_FRAME_OVERHEAD(r5) | ||
878 | lis r10,MSR_SPE@h | ||
879 | andc r4,r4,r10 /* disable SPE for previous task */ | ||
880 | stw r4,_MSR-STACK_FRAME_OVERHEAD(r5) | ||
881 | 1: | ||
882 | #endif /* !CONFIG_SMP */ | ||
883 | /* enable use of SPE after return */ | 860 | /* enable use of SPE after return */ |
884 | oris r9,r9,MSR_SPE@h | 861 | oris r9,r9,MSR_SPE@h |
885 | mfspr r5,SPRN_SPRG_THREAD /* current task's THREAD (phys) */ | 862 | mfspr r5,SPRN_SPRG_THREAD /* current task's THREAD (phys) */ |
@@ -889,10 +866,6 @@ _GLOBAL(load_up_spe) | |||
889 | evlddx evr4,r10,r5 | 866 | evlddx evr4,r10,r5 |
890 | evmra evr4,evr4 | 867 | evmra evr4,evr4 |
891 | REST_32EVRS(0,r10,r5,THREAD_EVR0) | 868 | REST_32EVRS(0,r10,r5,THREAD_EVR0) |
892 | #ifndef CONFIG_SMP | ||
893 | subi r4,r5,THREAD | ||
894 | stw r4,last_task_used_spe@l(r3) | ||
895 | #endif /* !CONFIG_SMP */ | ||
896 | blr | 869 | blr |
897 | 870 | ||
898 | /* | 871 | /* |
@@ -1011,16 +984,10 @@ _GLOBAL(__setup_ehv_ivors) | |||
1011 | 984 | ||
1012 | #ifdef CONFIG_SPE | 985 | #ifdef CONFIG_SPE |
1013 | /* | 986 | /* |
1014 | * extern void giveup_spe(struct task_struct *prev) | 987 | * extern void __giveup_spe(struct task_struct *prev) |
1015 | * | 988 | * |
1016 | */ | 989 | */ |
1017 | _GLOBAL(giveup_spe) | 990 | _GLOBAL(__giveup_spe) |
1018 | mfmsr r5 | ||
1019 | oris r5,r5,MSR_SPE@h | ||
1020 | mtmsr r5 /* enable use of SPE now */ | ||
1021 | isync | ||
1022 | cmpi 0,r3,0 | ||
1023 | beqlr- /* if no previous owner, done */ | ||
1024 | addi r3,r3,THREAD /* want THREAD of task */ | 991 | addi r3,r3,THREAD /* want THREAD of task */ |
1025 | lwz r5,PT_REGS(r3) | 992 | lwz r5,PT_REGS(r3) |
1026 | cmpi 0,r5,0 | 993 | cmpi 0,r5,0 |
@@ -1035,11 +1002,6 @@ _GLOBAL(giveup_spe) | |||
1035 | andc r4,r4,r3 /* disable SPE for previous task */ | 1002 | andc r4,r4,r3 /* disable SPE for previous task */ |
1036 | stw r4,_MSR-STACK_FRAME_OVERHEAD(r5) | 1003 | stw r4,_MSR-STACK_FRAME_OVERHEAD(r5) |
1037 | 1: | 1004 | 1: |
1038 | #ifndef CONFIG_SMP | ||
1039 | li r5,0 | ||
1040 | lis r4,last_task_used_spe@ha | ||
1041 | stw r5,last_task_used_spe@l(r4) | ||
1042 | #endif /* !CONFIG_SMP */ | ||
1043 | blr | 1005 | blr |
1044 | #endif /* CONFIG_SPE */ | 1006 | #endif /* CONFIG_SPE */ |
1045 | 1007 | ||
diff --git a/arch/powerpc/kernel/idle_power7.S b/arch/powerpc/kernel/idle_power7.S index 112ccf497562..cf4fb5429cf1 100644 --- a/arch/powerpc/kernel/idle_power7.S +++ b/arch/powerpc/kernel/idle_power7.S | |||
@@ -89,13 +89,6 @@ _GLOBAL(power7_powersave_common) | |||
89 | std r0,_LINK(r1) | 89 | std r0,_LINK(r1) |
90 | std r0,_NIP(r1) | 90 | std r0,_NIP(r1) |
91 | 91 | ||
92 | #ifndef CONFIG_SMP | ||
93 | /* Make sure FPU, VSX etc... are flushed as we may lose | ||
94 | * state when going to nap mode | ||
95 | */ | ||
96 | bl discard_lazy_cpu_state | ||
97 | #endif /* CONFIG_SMP */ | ||
98 | |||
99 | /* Hard disable interrupts */ | 92 | /* Hard disable interrupts */ |
100 | mfmsr r9 | 93 | mfmsr r9 |
101 | rldicl r9,r9,48,1 | 94 | rldicl r9,r9,48,1 |
diff --git a/arch/powerpc/kernel/misc_32.S b/arch/powerpc/kernel/misc_32.S index ed3ab509faca..be8edd67f05b 100644 --- a/arch/powerpc/kernel/misc_32.S +++ b/arch/powerpc/kernel/misc_32.S | |||
@@ -743,6 +743,8 @@ relocate_new_kernel: | |||
743 | /* Check for 47x cores */ | 743 | /* Check for 47x cores */ |
744 | mfspr r3,SPRN_PVR | 744 | mfspr r3,SPRN_PVR |
745 | srwi r3,r3,16 | 745 | srwi r3,r3,16 |
746 | cmplwi cr0,r3,PVR_476FPE@h | ||
747 | beq setup_map_47x | ||
746 | cmplwi cr0,r3,PVR_476@h | 748 | cmplwi cr0,r3,PVR_476@h |
747 | beq setup_map_47x | 749 | beq setup_map_47x |
748 | cmplwi cr0,r3,PVR_476_ISS@h | 750 | cmplwi cr0,r3,PVR_476_ISS@h |
diff --git a/arch/powerpc/kernel/module_64.c b/arch/powerpc/kernel/module_64.c index 68384514506b..59663af9315f 100644 --- a/arch/powerpc/kernel/module_64.c +++ b/arch/powerpc/kernel/module_64.c | |||
@@ -635,6 +635,33 @@ int apply_relocate_add(Elf64_Shdr *sechdrs, | |||
635 | */ | 635 | */ |
636 | break; | 636 | break; |
637 | 637 | ||
638 | case R_PPC64_ENTRY: | ||
639 | /* | ||
640 | * Optimize ELFv2 large code model entry point if | ||
641 | * the TOC is within 2GB range of current location. | ||
642 | */ | ||
643 | value = my_r2(sechdrs, me) - (unsigned long)location; | ||
644 | if (value + 0x80008000 > 0xffffffff) | ||
645 | break; | ||
646 | /* | ||
647 | * Check for the large code model prolog sequence: | ||
648 | * ld r2, ...(r12) | ||
649 | * add r2, r2, r12 | ||
650 | */ | ||
651 | if ((((uint32_t *)location)[0] & ~0xfffc) | ||
652 | != 0xe84c0000) | ||
653 | break; | ||
654 | if (((uint32_t *)location)[1] != 0x7c426214) | ||
655 | break; | ||
656 | /* | ||
657 | * If found, replace it with: | ||
658 | * addis r2, r12, (.TOC.-func)@ha | ||
659 | * addi r2, r12, (.TOC.-func)@l | ||
660 | */ | ||
661 | ((uint32_t *)location)[0] = 0x3c4c0000 + PPC_HA(value); | ||
662 | ((uint32_t *)location)[1] = 0x38420000 + PPC_LO(value); | ||
663 | break; | ||
664 | |||
638 | case R_PPC64_REL16_HA: | 665 | case R_PPC64_REL16_HA: |
639 | /* Subtract location pointer */ | 666 | /* Subtract location pointer */ |
640 | value -= (unsigned long)location; | 667 | value -= (unsigned long)location; |
diff --git a/arch/powerpc/kernel/ppc_ksyms.c b/arch/powerpc/kernel/ppc_ksyms.c index 202963ee013a..41e1607e800c 100644 --- a/arch/powerpc/kernel/ppc_ksyms.c +++ b/arch/powerpc/kernel/ppc_ksyms.c | |||
@@ -19,13 +19,11 @@ EXPORT_SYMBOL(_mcount); | |||
19 | #endif | 19 | #endif |
20 | 20 | ||
21 | #ifdef CONFIG_PPC_FPU | 21 | #ifdef CONFIG_PPC_FPU |
22 | EXPORT_SYMBOL(giveup_fpu); | ||
23 | EXPORT_SYMBOL(load_fp_state); | 22 | EXPORT_SYMBOL(load_fp_state); |
24 | EXPORT_SYMBOL(store_fp_state); | 23 | EXPORT_SYMBOL(store_fp_state); |
25 | #endif | 24 | #endif |
26 | 25 | ||
27 | #ifdef CONFIG_ALTIVEC | 26 | #ifdef CONFIG_ALTIVEC |
28 | EXPORT_SYMBOL(giveup_altivec); | ||
29 | EXPORT_SYMBOL(load_vr_state); | 27 | EXPORT_SYMBOL(load_vr_state); |
30 | EXPORT_SYMBOL(store_vr_state); | 28 | EXPORT_SYMBOL(store_vr_state); |
31 | #endif | 29 | #endif |
@@ -34,10 +32,6 @@ EXPORT_SYMBOL(store_vr_state); | |||
34 | EXPORT_SYMBOL_GPL(__giveup_vsx); | 32 | EXPORT_SYMBOL_GPL(__giveup_vsx); |
35 | #endif | 33 | #endif |
36 | 34 | ||
37 | #ifdef CONFIG_SPE | ||
38 | EXPORT_SYMBOL(giveup_spe); | ||
39 | #endif | ||
40 | |||
41 | #ifdef CONFIG_EPAPR_PARAVIRT | 35 | #ifdef CONFIG_EPAPR_PARAVIRT |
42 | EXPORT_SYMBOL(epapr_hypercall_start); | 36 | EXPORT_SYMBOL(epapr_hypercall_start); |
43 | #endif | 37 | #endif |
diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c index 646bf4d222c1..dccc87e8fee5 100644 --- a/arch/powerpc/kernel/process.c +++ b/arch/powerpc/kernel/process.c | |||
@@ -67,15 +67,8 @@ | |||
67 | 67 | ||
68 | extern unsigned long _get_SP(void); | 68 | extern unsigned long _get_SP(void); |
69 | 69 | ||
70 | #ifndef CONFIG_SMP | ||
71 | struct task_struct *last_task_used_math = NULL; | ||
72 | struct task_struct *last_task_used_altivec = NULL; | ||
73 | struct task_struct *last_task_used_vsx = NULL; | ||
74 | struct task_struct *last_task_used_spe = NULL; | ||
75 | #endif | ||
76 | |||
77 | #ifdef CONFIG_PPC_TRANSACTIONAL_MEM | 70 | #ifdef CONFIG_PPC_TRANSACTIONAL_MEM |
78 | void giveup_fpu_maybe_transactional(struct task_struct *tsk) | 71 | static void check_if_tm_restore_required(struct task_struct *tsk) |
79 | { | 72 | { |
80 | /* | 73 | /* |
81 | * If we are saving the current thread's registers, and the | 74 | * If we are saving the current thread's registers, and the |
@@ -89,34 +82,67 @@ void giveup_fpu_maybe_transactional(struct task_struct *tsk) | |||
89 | tsk->thread.ckpt_regs.msr = tsk->thread.regs->msr; | 82 | tsk->thread.ckpt_regs.msr = tsk->thread.regs->msr; |
90 | set_thread_flag(TIF_RESTORE_TM); | 83 | set_thread_flag(TIF_RESTORE_TM); |
91 | } | 84 | } |
85 | } | ||
86 | #else | ||
87 | static inline void check_if_tm_restore_required(struct task_struct *tsk) { } | ||
88 | #endif /* CONFIG_PPC_TRANSACTIONAL_MEM */ | ||
89 | |||
90 | bool strict_msr_control; | ||
91 | EXPORT_SYMBOL(strict_msr_control); | ||
92 | |||
93 | static int __init enable_strict_msr_control(char *str) | ||
94 | { | ||
95 | strict_msr_control = true; | ||
96 | pr_info("Enabling strict facility control\n"); | ||
92 | 97 | ||
93 | giveup_fpu(tsk); | 98 | return 0; |
94 | } | 99 | } |
100 | early_param("ppc_strict_facility_enable", enable_strict_msr_control); | ||
95 | 101 | ||
96 | void giveup_altivec_maybe_transactional(struct task_struct *tsk) | 102 | void msr_check_and_set(unsigned long bits) |
97 | { | 103 | { |
98 | /* | 104 | unsigned long oldmsr = mfmsr(); |
99 | * If we are saving the current thread's registers, and the | 105 | unsigned long newmsr; |
100 | * thread is in a transactional state, set the TIF_RESTORE_TM | ||
101 | * bit so that we know to restore the registers before | ||
102 | * returning to userspace. | ||
103 | */ | ||
104 | if (tsk == current && tsk->thread.regs && | ||
105 | MSR_TM_ACTIVE(tsk->thread.regs->msr) && | ||
106 | !test_thread_flag(TIF_RESTORE_TM)) { | ||
107 | tsk->thread.ckpt_regs.msr = tsk->thread.regs->msr; | ||
108 | set_thread_flag(TIF_RESTORE_TM); | ||
109 | } | ||
110 | 106 | ||
111 | giveup_altivec(tsk); | 107 | newmsr = oldmsr | bits; |
108 | |||
109 | #ifdef CONFIG_VSX | ||
110 | if (cpu_has_feature(CPU_FTR_VSX) && (bits & MSR_FP)) | ||
111 | newmsr |= MSR_VSX; | ||
112 | #endif | ||
113 | |||
114 | if (oldmsr != newmsr) | ||
115 | mtmsr_isync(newmsr); | ||
112 | } | 116 | } |
113 | 117 | ||
114 | #else | 118 | void __msr_check_and_clear(unsigned long bits) |
115 | #define giveup_fpu_maybe_transactional(tsk) giveup_fpu(tsk) | 119 | { |
116 | #define giveup_altivec_maybe_transactional(tsk) giveup_altivec(tsk) | 120 | unsigned long oldmsr = mfmsr(); |
117 | #endif /* CONFIG_PPC_TRANSACTIONAL_MEM */ | 121 | unsigned long newmsr; |
122 | |||
123 | newmsr = oldmsr & ~bits; | ||
124 | |||
125 | #ifdef CONFIG_VSX | ||
126 | if (cpu_has_feature(CPU_FTR_VSX) && (bits & MSR_FP)) | ||
127 | newmsr &= ~MSR_VSX; | ||
128 | #endif | ||
129 | |||
130 | if (oldmsr != newmsr) | ||
131 | mtmsr_isync(newmsr); | ||
132 | } | ||
133 | EXPORT_SYMBOL(__msr_check_and_clear); | ||
118 | 134 | ||
119 | #ifdef CONFIG_PPC_FPU | 135 | #ifdef CONFIG_PPC_FPU |
136 | void giveup_fpu(struct task_struct *tsk) | ||
137 | { | ||
138 | check_if_tm_restore_required(tsk); | ||
139 | |||
140 | msr_check_and_set(MSR_FP); | ||
141 | __giveup_fpu(tsk); | ||
142 | msr_check_and_clear(MSR_FP); | ||
143 | } | ||
144 | EXPORT_SYMBOL(giveup_fpu); | ||
145 | |||
120 | /* | 146 | /* |
121 | * Make sure the floating-point register state in the | 147 | * Make sure the floating-point register state in the |
122 | * the thread_struct is up to date for task tsk. | 148 | * the thread_struct is up to date for task tsk. |
@@ -134,52 +160,56 @@ void flush_fp_to_thread(struct task_struct *tsk) | |||
134 | */ | 160 | */ |
135 | preempt_disable(); | 161 | preempt_disable(); |
136 | if (tsk->thread.regs->msr & MSR_FP) { | 162 | if (tsk->thread.regs->msr & MSR_FP) { |
137 | #ifdef CONFIG_SMP | ||
138 | /* | 163 | /* |
139 | * This should only ever be called for current or | 164 | * This should only ever be called for current or |
140 | * for a stopped child process. Since we save away | 165 | * for a stopped child process. Since we save away |
141 | * the FP register state on context switch on SMP, | 166 | * the FP register state on context switch, |
142 | * there is something wrong if a stopped child appears | 167 | * there is something wrong if a stopped child appears |
143 | * to still have its FP state in the CPU registers. | 168 | * to still have its FP state in the CPU registers. |
144 | */ | 169 | */ |
145 | BUG_ON(tsk != current); | 170 | BUG_ON(tsk != current); |
146 | #endif | 171 | giveup_fpu(tsk); |
147 | giveup_fpu_maybe_transactional(tsk); | ||
148 | } | 172 | } |
149 | preempt_enable(); | 173 | preempt_enable(); |
150 | } | 174 | } |
151 | } | 175 | } |
152 | EXPORT_SYMBOL_GPL(flush_fp_to_thread); | 176 | EXPORT_SYMBOL_GPL(flush_fp_to_thread); |
153 | #endif /* CONFIG_PPC_FPU */ | ||
154 | 177 | ||
155 | void enable_kernel_fp(void) | 178 | void enable_kernel_fp(void) |
156 | { | 179 | { |
157 | WARN_ON(preemptible()); | 180 | WARN_ON(preemptible()); |
158 | 181 | ||
159 | #ifdef CONFIG_SMP | 182 | msr_check_and_set(MSR_FP); |
160 | if (current->thread.regs && (current->thread.regs->msr & MSR_FP)) | 183 | |
161 | giveup_fpu_maybe_transactional(current); | 184 | if (current->thread.regs && (current->thread.regs->msr & MSR_FP)) { |
162 | else | 185 | check_if_tm_restore_required(current); |
163 | giveup_fpu(NULL); /* just enables FP for kernel */ | 186 | __giveup_fpu(current); |
164 | #else | 187 | } |
165 | giveup_fpu_maybe_transactional(last_task_used_math); | ||
166 | #endif /* CONFIG_SMP */ | ||
167 | } | 188 | } |
168 | EXPORT_SYMBOL(enable_kernel_fp); | 189 | EXPORT_SYMBOL(enable_kernel_fp); |
190 | #endif /* CONFIG_PPC_FPU */ | ||
169 | 191 | ||
170 | #ifdef CONFIG_ALTIVEC | 192 | #ifdef CONFIG_ALTIVEC |
193 | void giveup_altivec(struct task_struct *tsk) | ||
194 | { | ||
195 | check_if_tm_restore_required(tsk); | ||
196 | |||
197 | msr_check_and_set(MSR_VEC); | ||
198 | __giveup_altivec(tsk); | ||
199 | msr_check_and_clear(MSR_VEC); | ||
200 | } | ||
201 | EXPORT_SYMBOL(giveup_altivec); | ||
202 | |||
171 | void enable_kernel_altivec(void) | 203 | void enable_kernel_altivec(void) |
172 | { | 204 | { |
173 | WARN_ON(preemptible()); | 205 | WARN_ON(preemptible()); |
174 | 206 | ||
175 | #ifdef CONFIG_SMP | 207 | msr_check_and_set(MSR_VEC); |
176 | if (current->thread.regs && (current->thread.regs->msr & MSR_VEC)) | 208 | |
177 | giveup_altivec_maybe_transactional(current); | 209 | if (current->thread.regs && (current->thread.regs->msr & MSR_VEC)) { |
178 | else | 210 | check_if_tm_restore_required(current); |
179 | giveup_altivec_notask(); | 211 | __giveup_altivec(current); |
180 | #else | 212 | } |
181 | giveup_altivec_maybe_transactional(last_task_used_altivec); | ||
182 | #endif /* CONFIG_SMP */ | ||
183 | } | 213 | } |
184 | EXPORT_SYMBOL(enable_kernel_altivec); | 214 | EXPORT_SYMBOL(enable_kernel_altivec); |
185 | 215 | ||
@@ -192,10 +222,8 @@ void flush_altivec_to_thread(struct task_struct *tsk) | |||
192 | if (tsk->thread.regs) { | 222 | if (tsk->thread.regs) { |
193 | preempt_disable(); | 223 | preempt_disable(); |
194 | if (tsk->thread.regs->msr & MSR_VEC) { | 224 | if (tsk->thread.regs->msr & MSR_VEC) { |
195 | #ifdef CONFIG_SMP | ||
196 | BUG_ON(tsk != current); | 225 | BUG_ON(tsk != current); |
197 | #endif | 226 | giveup_altivec(tsk); |
198 | giveup_altivec_maybe_transactional(tsk); | ||
199 | } | 227 | } |
200 | preempt_enable(); | 228 | preempt_enable(); |
201 | } | 229 | } |
@@ -204,37 +232,43 @@ EXPORT_SYMBOL_GPL(flush_altivec_to_thread); | |||
204 | #endif /* CONFIG_ALTIVEC */ | 232 | #endif /* CONFIG_ALTIVEC */ |
205 | 233 | ||
206 | #ifdef CONFIG_VSX | 234 | #ifdef CONFIG_VSX |
207 | void enable_kernel_vsx(void) | 235 | void giveup_vsx(struct task_struct *tsk) |
208 | { | 236 | { |
209 | WARN_ON(preemptible()); | 237 | check_if_tm_restore_required(tsk); |
210 | 238 | ||
211 | #ifdef CONFIG_SMP | 239 | msr_check_and_set(MSR_FP|MSR_VEC|MSR_VSX); |
212 | if (current->thread.regs && (current->thread.regs->msr & MSR_VSX)) | 240 | if (tsk->thread.regs->msr & MSR_FP) |
213 | giveup_vsx(current); | 241 | __giveup_fpu(tsk); |
214 | else | 242 | if (tsk->thread.regs->msr & MSR_VEC) |
215 | giveup_vsx(NULL); /* just enable vsx for kernel - force */ | 243 | __giveup_altivec(tsk); |
216 | #else | 244 | __giveup_vsx(tsk); |
217 | giveup_vsx(last_task_used_vsx); | 245 | msr_check_and_clear(MSR_FP|MSR_VEC|MSR_VSX); |
218 | #endif /* CONFIG_SMP */ | ||
219 | } | 246 | } |
220 | EXPORT_SYMBOL(enable_kernel_vsx); | 247 | EXPORT_SYMBOL(giveup_vsx); |
221 | 248 | ||
222 | void giveup_vsx(struct task_struct *tsk) | 249 | void enable_kernel_vsx(void) |
223 | { | 250 | { |
224 | giveup_fpu_maybe_transactional(tsk); | 251 | WARN_ON(preemptible()); |
225 | giveup_altivec_maybe_transactional(tsk); | 252 | |
226 | __giveup_vsx(tsk); | 253 | msr_check_and_set(MSR_FP|MSR_VEC|MSR_VSX); |
254 | |||
255 | if (current->thread.regs && (current->thread.regs->msr & MSR_VSX)) { | ||
256 | check_if_tm_restore_required(current); | ||
257 | if (current->thread.regs->msr & MSR_FP) | ||
258 | __giveup_fpu(current); | ||
259 | if (current->thread.regs->msr & MSR_VEC) | ||
260 | __giveup_altivec(current); | ||
261 | __giveup_vsx(current); | ||
262 | } | ||
227 | } | 263 | } |
228 | EXPORT_SYMBOL(giveup_vsx); | 264 | EXPORT_SYMBOL(enable_kernel_vsx); |
229 | 265 | ||
230 | void flush_vsx_to_thread(struct task_struct *tsk) | 266 | void flush_vsx_to_thread(struct task_struct *tsk) |
231 | { | 267 | { |
232 | if (tsk->thread.regs) { | 268 | if (tsk->thread.regs) { |
233 | preempt_disable(); | 269 | preempt_disable(); |
234 | if (tsk->thread.regs->msr & MSR_VSX) { | 270 | if (tsk->thread.regs->msr & MSR_VSX) { |
235 | #ifdef CONFIG_SMP | ||
236 | BUG_ON(tsk != current); | 271 | BUG_ON(tsk != current); |
237 | #endif | ||
238 | giveup_vsx(tsk); | 272 | giveup_vsx(tsk); |
239 | } | 273 | } |
240 | preempt_enable(); | 274 | preempt_enable(); |
@@ -244,19 +278,26 @@ EXPORT_SYMBOL_GPL(flush_vsx_to_thread); | |||
244 | #endif /* CONFIG_VSX */ | 278 | #endif /* CONFIG_VSX */ |
245 | 279 | ||
246 | #ifdef CONFIG_SPE | 280 | #ifdef CONFIG_SPE |
281 | void giveup_spe(struct task_struct *tsk) | ||
282 | { | ||
283 | check_if_tm_restore_required(tsk); | ||
284 | |||
285 | msr_check_and_set(MSR_SPE); | ||
286 | __giveup_spe(tsk); | ||
287 | msr_check_and_clear(MSR_SPE); | ||
288 | } | ||
289 | EXPORT_SYMBOL(giveup_spe); | ||
247 | 290 | ||
248 | void enable_kernel_spe(void) | 291 | void enable_kernel_spe(void) |
249 | { | 292 | { |
250 | WARN_ON(preemptible()); | 293 | WARN_ON(preemptible()); |
251 | 294 | ||
252 | #ifdef CONFIG_SMP | 295 | msr_check_and_set(MSR_SPE); |
253 | if (current->thread.regs && (current->thread.regs->msr & MSR_SPE)) | 296 | |
254 | giveup_spe(current); | 297 | if (current->thread.regs && (current->thread.regs->msr & MSR_SPE)) { |
255 | else | 298 | check_if_tm_restore_required(current); |
256 | giveup_spe(NULL); /* just enable SPE for kernel - force */ | 299 | __giveup_spe(current); |
257 | #else | 300 | } |
258 | giveup_spe(last_task_used_spe); | ||
259 | #endif /* __SMP __ */ | ||
260 | } | 301 | } |
261 | EXPORT_SYMBOL(enable_kernel_spe); | 302 | EXPORT_SYMBOL(enable_kernel_spe); |
262 | 303 | ||
@@ -265,9 +306,7 @@ void flush_spe_to_thread(struct task_struct *tsk) | |||
265 | if (tsk->thread.regs) { | 306 | if (tsk->thread.regs) { |
266 | preempt_disable(); | 307 | preempt_disable(); |
267 | if (tsk->thread.regs->msr & MSR_SPE) { | 308 | if (tsk->thread.regs->msr & MSR_SPE) { |
268 | #ifdef CONFIG_SMP | ||
269 | BUG_ON(tsk != current); | 309 | BUG_ON(tsk != current); |
270 | #endif | ||
271 | tsk->thread.spefscr = mfspr(SPRN_SPEFSCR); | 310 | tsk->thread.spefscr = mfspr(SPRN_SPEFSCR); |
272 | giveup_spe(tsk); | 311 | giveup_spe(tsk); |
273 | } | 312 | } |
@@ -276,31 +315,81 @@ void flush_spe_to_thread(struct task_struct *tsk) | |||
276 | } | 315 | } |
277 | #endif /* CONFIG_SPE */ | 316 | #endif /* CONFIG_SPE */ |
278 | 317 | ||
279 | #ifndef CONFIG_SMP | 318 | static unsigned long msr_all_available; |
280 | /* | 319 | |
281 | * If we are doing lazy switching of CPU state (FP, altivec or SPE), | 320 | static int __init init_msr_all_available(void) |
282 | * and the current task has some state, discard it. | ||
283 | */ | ||
284 | void discard_lazy_cpu_state(void) | ||
285 | { | 321 | { |
286 | preempt_disable(); | 322 | #ifdef CONFIG_PPC_FPU |
287 | if (last_task_used_math == current) | 323 | msr_all_available |= MSR_FP; |
288 | last_task_used_math = NULL; | 324 | #endif |
289 | #ifdef CONFIG_ALTIVEC | 325 | #ifdef CONFIG_ALTIVEC |
290 | if (last_task_used_altivec == current) | 326 | if (cpu_has_feature(CPU_FTR_ALTIVEC)) |
291 | last_task_used_altivec = NULL; | 327 | msr_all_available |= MSR_VEC; |
292 | #endif /* CONFIG_ALTIVEC */ | 328 | #endif |
293 | #ifdef CONFIG_VSX | 329 | #ifdef CONFIG_VSX |
294 | if (last_task_used_vsx == current) | 330 | if (cpu_has_feature(CPU_FTR_VSX)) |
295 | last_task_used_vsx = NULL; | 331 | msr_all_available |= MSR_VSX; |
296 | #endif /* CONFIG_VSX */ | 332 | #endif |
297 | #ifdef CONFIG_SPE | 333 | #ifdef CONFIG_SPE |
298 | if (last_task_used_spe == current) | 334 | if (cpu_has_feature(CPU_FTR_SPE)) |
299 | last_task_used_spe = NULL; | 335 | msr_all_available |= MSR_SPE; |
300 | #endif | 336 | #endif |
301 | preempt_enable(); | 337 | |
338 | return 0; | ||
339 | } | ||
340 | early_initcall(init_msr_all_available); | ||
341 | |||
342 | void giveup_all(struct task_struct *tsk) | ||
343 | { | ||
344 | unsigned long usermsr; | ||
345 | |||
346 | if (!tsk->thread.regs) | ||
347 | return; | ||
348 | |||
349 | usermsr = tsk->thread.regs->msr; | ||
350 | |||
351 | if ((usermsr & msr_all_available) == 0) | ||
352 | return; | ||
353 | |||
354 | msr_check_and_set(msr_all_available); | ||
355 | |||
356 | #ifdef CONFIG_PPC_FPU | ||
357 | if (usermsr & MSR_FP) | ||
358 | __giveup_fpu(tsk); | ||
359 | #endif | ||
360 | #ifdef CONFIG_ALTIVEC | ||
361 | if (usermsr & MSR_VEC) | ||
362 | __giveup_altivec(tsk); | ||
363 | #endif | ||
364 | #ifdef CONFIG_VSX | ||
365 | if (usermsr & MSR_VSX) | ||
366 | __giveup_vsx(tsk); | ||
367 | #endif | ||
368 | #ifdef CONFIG_SPE | ||
369 | if (usermsr & MSR_SPE) | ||
370 | __giveup_spe(tsk); | ||
371 | #endif | ||
372 | |||
373 | msr_check_and_clear(msr_all_available); | ||
374 | } | ||
375 | EXPORT_SYMBOL(giveup_all); | ||
376 | |||
377 | void flush_all_to_thread(struct task_struct *tsk) | ||
378 | { | ||
379 | if (tsk->thread.regs) { | ||
380 | preempt_disable(); | ||
381 | BUG_ON(tsk != current); | ||
382 | giveup_all(tsk); | ||
383 | |||
384 | #ifdef CONFIG_SPE | ||
385 | if (tsk->thread.regs->msr & MSR_SPE) | ||
386 | tsk->thread.spefscr = mfspr(SPRN_SPEFSCR); | ||
387 | #endif | ||
388 | |||
389 | preempt_enable(); | ||
390 | } | ||
302 | } | 391 | } |
303 | #endif /* CONFIG_SMP */ | 392 | EXPORT_SYMBOL(flush_all_to_thread); |
304 | 393 | ||
305 | #ifdef CONFIG_PPC_ADV_DEBUG_REGS | 394 | #ifdef CONFIG_PPC_ADV_DEBUG_REGS |
306 | void do_send_trap(struct pt_regs *regs, unsigned long address, | 395 | void do_send_trap(struct pt_regs *regs, unsigned long address, |
@@ -744,13 +833,15 @@ void restore_tm_state(struct pt_regs *regs) | |||
744 | msr_diff = current->thread.ckpt_regs.msr & ~regs->msr; | 833 | msr_diff = current->thread.ckpt_regs.msr & ~regs->msr; |
745 | msr_diff &= MSR_FP | MSR_VEC | MSR_VSX; | 834 | msr_diff &= MSR_FP | MSR_VEC | MSR_VSX; |
746 | if (msr_diff & MSR_FP) { | 835 | if (msr_diff & MSR_FP) { |
747 | fp_enable(); | 836 | msr_check_and_set(MSR_FP); |
748 | load_fp_state(¤t->thread.fp_state); | 837 | load_fp_state(¤t->thread.fp_state); |
838 | msr_check_and_clear(MSR_FP); | ||
749 | regs->msr |= current->thread.fpexc_mode; | 839 | regs->msr |= current->thread.fpexc_mode; |
750 | } | 840 | } |
751 | if (msr_diff & MSR_VEC) { | 841 | if (msr_diff & MSR_VEC) { |
752 | vec_enable(); | 842 | msr_check_and_set(MSR_VEC); |
753 | load_vr_state(¤t->thread.vr_state); | 843 | load_vr_state(¤t->thread.vr_state); |
844 | msr_check_and_clear(MSR_VEC); | ||
754 | } | 845 | } |
755 | regs->msr |= msr_diff; | 846 | regs->msr |= msr_diff; |
756 | } | 847 | } |
@@ -760,112 +851,87 @@ void restore_tm_state(struct pt_regs *regs) | |||
760 | #define __switch_to_tm(prev) | 851 | #define __switch_to_tm(prev) |
761 | #endif /* CONFIG_PPC_TRANSACTIONAL_MEM */ | 852 | #endif /* CONFIG_PPC_TRANSACTIONAL_MEM */ |
762 | 853 | ||
763 | struct task_struct *__switch_to(struct task_struct *prev, | 854 | static inline void save_sprs(struct thread_struct *t) |
764 | struct task_struct *new) | ||
765 | { | 855 | { |
766 | struct thread_struct *new_thread, *old_thread; | 856 | #ifdef CONFIG_ALTIVEC |
767 | struct task_struct *last; | 857 | if (cpu_has_feature(cpu_has_feature(CPU_FTR_ALTIVEC))) |
768 | #ifdef CONFIG_PPC_BOOK3S_64 | 858 | t->vrsave = mfspr(SPRN_VRSAVE); |
769 | struct ppc64_tlb_batch *batch; | ||
770 | #endif | 859 | #endif |
860 | #ifdef CONFIG_PPC_BOOK3S_64 | ||
861 | if (cpu_has_feature(CPU_FTR_DSCR)) | ||
862 | t->dscr = mfspr(SPRN_DSCR); | ||
771 | 863 | ||
772 | WARN_ON(!irqs_disabled()); | 864 | if (cpu_has_feature(CPU_FTR_ARCH_207S)) { |
865 | t->bescr = mfspr(SPRN_BESCR); | ||
866 | t->ebbhr = mfspr(SPRN_EBBHR); | ||
867 | t->ebbrr = mfspr(SPRN_EBBRR); | ||
773 | 868 | ||
774 | /* Back up the TAR and DSCR across context switches. | 869 | t->fscr = mfspr(SPRN_FSCR); |
775 | * Note that the TAR is not available for use in the kernel. (To | ||
776 | * provide this, the TAR should be backed up/restored on exception | ||
777 | * entry/exit instead, and be in pt_regs. FIXME, this should be in | ||
778 | * pt_regs anyway (for debug).) | ||
779 | * Save the TAR and DSCR here before we do treclaim/trecheckpoint as | ||
780 | * these will change them. | ||
781 | */ | ||
782 | save_early_sprs(&prev->thread); | ||
783 | 870 | ||
784 | __switch_to_tm(prev); | 871 | /* |
872 | * Note that the TAR is not available for use in the kernel. | ||
873 | * (To provide this, the TAR should be backed up/restored on | ||
874 | * exception entry/exit instead, and be in pt_regs. FIXME, | ||
875 | * this should be in pt_regs anyway (for debug).) | ||
876 | */ | ||
877 | t->tar = mfspr(SPRN_TAR); | ||
878 | } | ||
879 | #endif | ||
880 | } | ||
785 | 881 | ||
786 | #ifdef CONFIG_SMP | 882 | static inline void restore_sprs(struct thread_struct *old_thread, |
787 | /* avoid complexity of lazy save/restore of fpu | 883 | struct thread_struct *new_thread) |
788 | * by just saving it every time we switch out if | 884 | { |
789 | * this task used the fpu during the last quantum. | ||
790 | * | ||
791 | * If it tries to use the fpu again, it'll trap and | ||
792 | * reload its fp regs. So we don't have to do a restore | ||
793 | * every switch, just a save. | ||
794 | * -- Cort | ||
795 | */ | ||
796 | if (prev->thread.regs && (prev->thread.regs->msr & MSR_FP)) | ||
797 | giveup_fpu(prev); | ||
798 | #ifdef CONFIG_ALTIVEC | 885 | #ifdef CONFIG_ALTIVEC |
799 | /* | 886 | if (cpu_has_feature(CPU_FTR_ALTIVEC) && |
800 | * If the previous thread used altivec in the last quantum | 887 | old_thread->vrsave != new_thread->vrsave) |
801 | * (thus changing altivec regs) then save them. | 888 | mtspr(SPRN_VRSAVE, new_thread->vrsave); |
802 | * We used to check the VRSAVE register but not all apps | 889 | #endif |
803 | * set it, so we don't rely on it now (and in fact we need | 890 | #ifdef CONFIG_PPC_BOOK3S_64 |
804 | * to save & restore VSCR even if VRSAVE == 0). -- paulus | 891 | if (cpu_has_feature(CPU_FTR_DSCR)) { |
805 | * | 892 | u64 dscr = get_paca()->dscr_default; |
806 | * On SMP we always save/restore altivec regs just to avoid the | 893 | u64 fscr = old_thread->fscr & ~FSCR_DSCR; |
807 | * complexity of changing processors. | ||
808 | * -- Cort | ||
809 | */ | ||
810 | if (prev->thread.regs && (prev->thread.regs->msr & MSR_VEC)) | ||
811 | giveup_altivec(prev); | ||
812 | #endif /* CONFIG_ALTIVEC */ | ||
813 | #ifdef CONFIG_VSX | ||
814 | if (prev->thread.regs && (prev->thread.regs->msr & MSR_VSX)) | ||
815 | /* VMX and FPU registers are already save here */ | ||
816 | __giveup_vsx(prev); | ||
817 | #endif /* CONFIG_VSX */ | ||
818 | #ifdef CONFIG_SPE | ||
819 | /* | ||
820 | * If the previous thread used spe in the last quantum | ||
821 | * (thus changing spe regs) then save them. | ||
822 | * | ||
823 | * On SMP we always save/restore spe regs just to avoid the | ||
824 | * complexity of changing processors. | ||
825 | */ | ||
826 | if ((prev->thread.regs && (prev->thread.regs->msr & MSR_SPE))) | ||
827 | giveup_spe(prev); | ||
828 | #endif /* CONFIG_SPE */ | ||
829 | 894 | ||
830 | #else /* CONFIG_SMP */ | 895 | if (new_thread->dscr_inherit) { |
831 | #ifdef CONFIG_ALTIVEC | 896 | dscr = new_thread->dscr; |
832 | /* Avoid the trap. On smp this this never happens since | 897 | fscr |= FSCR_DSCR; |
833 | * we don't set last_task_used_altivec -- Cort | 898 | } |
834 | */ | ||
835 | if (new->thread.regs && last_task_used_altivec == new) | ||
836 | new->thread.regs->msr |= MSR_VEC; | ||
837 | #endif /* CONFIG_ALTIVEC */ | ||
838 | #ifdef CONFIG_VSX | ||
839 | if (new->thread.regs && last_task_used_vsx == new) | ||
840 | new->thread.regs->msr |= MSR_VSX; | ||
841 | #endif /* CONFIG_VSX */ | ||
842 | #ifdef CONFIG_SPE | ||
843 | /* Avoid the trap. On smp this this never happens since | ||
844 | * we don't set last_task_used_spe | ||
845 | */ | ||
846 | if (new->thread.regs && last_task_used_spe == new) | ||
847 | new->thread.regs->msr |= MSR_SPE; | ||
848 | #endif /* CONFIG_SPE */ | ||
849 | 899 | ||
850 | #endif /* CONFIG_SMP */ | 900 | if (old_thread->dscr != dscr) |
901 | mtspr(SPRN_DSCR, dscr); | ||
851 | 902 | ||
852 | #ifdef CONFIG_PPC_ADV_DEBUG_REGS | 903 | if (old_thread->fscr != fscr) |
853 | switch_booke_debug_regs(&new->thread.debug); | 904 | mtspr(SPRN_FSCR, fscr); |
854 | #else | 905 | } |
855 | /* | 906 | |
856 | * For PPC_BOOK3S_64, we use the hw-breakpoint interfaces that would | 907 | if (cpu_has_feature(CPU_FTR_ARCH_207S)) { |
857 | * schedule DABR | 908 | if (old_thread->bescr != new_thread->bescr) |
858 | */ | 909 | mtspr(SPRN_BESCR, new_thread->bescr); |
859 | #ifndef CONFIG_HAVE_HW_BREAKPOINT | 910 | if (old_thread->ebbhr != new_thread->ebbhr) |
860 | if (unlikely(!hw_brk_match(this_cpu_ptr(¤t_brk), &new->thread.hw_brk))) | 911 | mtspr(SPRN_EBBHR, new_thread->ebbhr); |
861 | __set_breakpoint(&new->thread.hw_brk); | 912 | if (old_thread->ebbrr != new_thread->ebbrr) |
862 | #endif /* CONFIG_HAVE_HW_BREAKPOINT */ | 913 | mtspr(SPRN_EBBRR, new_thread->ebbrr); |
914 | |||
915 | if (old_thread->tar != new_thread->tar) | ||
916 | mtspr(SPRN_TAR, new_thread->tar); | ||
917 | } | ||
863 | #endif | 918 | #endif |
919 | } | ||
864 | 920 | ||
921 | struct task_struct *__switch_to(struct task_struct *prev, | ||
922 | struct task_struct *new) | ||
923 | { | ||
924 | struct thread_struct *new_thread, *old_thread; | ||
925 | struct task_struct *last; | ||
926 | #ifdef CONFIG_PPC_BOOK3S_64 | ||
927 | struct ppc64_tlb_batch *batch; | ||
928 | #endif | ||
865 | 929 | ||
866 | new_thread = &new->thread; | 930 | new_thread = &new->thread; |
867 | old_thread = ¤t->thread; | 931 | old_thread = ¤t->thread; |
868 | 932 | ||
933 | WARN_ON(!irqs_disabled()); | ||
934 | |||
869 | #ifdef CONFIG_PPC64 | 935 | #ifdef CONFIG_PPC64 |
870 | /* | 936 | /* |
871 | * Collect processor utilization data per process | 937 | * Collect processor utilization data per process |
@@ -890,6 +956,30 @@ struct task_struct *__switch_to(struct task_struct *prev, | |||
890 | } | 956 | } |
891 | #endif /* CONFIG_PPC_BOOK3S_64 */ | 957 | #endif /* CONFIG_PPC_BOOK3S_64 */ |
892 | 958 | ||
959 | #ifdef CONFIG_PPC_ADV_DEBUG_REGS | ||
960 | switch_booke_debug_regs(&new->thread.debug); | ||
961 | #else | ||
962 | /* | ||
963 | * For PPC_BOOK3S_64, we use the hw-breakpoint interfaces that would | ||
964 | * schedule DABR | ||
965 | */ | ||
966 | #ifndef CONFIG_HAVE_HW_BREAKPOINT | ||
967 | if (unlikely(!hw_brk_match(this_cpu_ptr(¤t_brk), &new->thread.hw_brk))) | ||
968 | __set_breakpoint(&new->thread.hw_brk); | ||
969 | #endif /* CONFIG_HAVE_HW_BREAKPOINT */ | ||
970 | #endif | ||
971 | |||
972 | /* | ||
973 | * We need to save SPRs before treclaim/trecheckpoint as these will | ||
974 | * change a number of them. | ||
975 | */ | ||
976 | save_sprs(&prev->thread); | ||
977 | |||
978 | __switch_to_tm(prev); | ||
979 | |||
980 | /* Save FPU, Altivec, VSX and SPE state */ | ||
981 | giveup_all(prev); | ||
982 | |||
893 | /* | 983 | /* |
894 | * We can't take a PMU exception inside _switch() since there is a | 984 | * We can't take a PMU exception inside _switch() since there is a |
895 | * window where the kernel stack SLB and the kernel stack are out | 985 | * window where the kernel stack SLB and the kernel stack are out |
@@ -899,6 +989,15 @@ struct task_struct *__switch_to(struct task_struct *prev, | |||
899 | 989 | ||
900 | tm_recheckpoint_new_task(new); | 990 | tm_recheckpoint_new_task(new); |
901 | 991 | ||
992 | /* | ||
993 | * Call restore_sprs() before calling _switch(). If we move it after | ||
994 | * _switch() then we miss out on calling it for new tasks. The reason | ||
995 | * for this is we manually create a stack frame for new tasks that | ||
996 | * directly returns through ret_from_fork() or | ||
997 | * ret_from_kernel_thread(). See copy_thread() for details. | ||
998 | */ | ||
999 | restore_sprs(old_thread, new_thread); | ||
1000 | |||
902 | last = _switch(old_thread, new_thread); | 1001 | last = _switch(old_thread, new_thread); |
903 | 1002 | ||
904 | #ifdef CONFIG_PPC_BOOK3S_64 | 1003 | #ifdef CONFIG_PPC_BOOK3S_64 |
@@ -952,10 +1051,12 @@ static void show_instructions(struct pt_regs *regs) | |||
952 | printk("\n"); | 1051 | printk("\n"); |
953 | } | 1052 | } |
954 | 1053 | ||
955 | static struct regbit { | 1054 | struct regbit { |
956 | unsigned long bit; | 1055 | unsigned long bit; |
957 | const char *name; | 1056 | const char *name; |
958 | } msr_bits[] = { | 1057 | }; |
1058 | |||
1059 | static struct regbit msr_bits[] = { | ||
959 | #if defined(CONFIG_PPC64) && !defined(CONFIG_BOOKE) | 1060 | #if defined(CONFIG_PPC64) && !defined(CONFIG_BOOKE) |
960 | {MSR_SF, "SF"}, | 1061 | {MSR_SF, "SF"}, |
961 | {MSR_HV, "HV"}, | 1062 | {MSR_HV, "HV"}, |
@@ -985,16 +1086,49 @@ static struct regbit { | |||
985 | {0, NULL} | 1086 | {0, NULL} |
986 | }; | 1087 | }; |
987 | 1088 | ||
988 | static void printbits(unsigned long val, struct regbit *bits) | 1089 | static void print_bits(unsigned long val, struct regbit *bits, const char *sep) |
989 | { | 1090 | { |
990 | const char *sep = ""; | 1091 | const char *s = ""; |
991 | 1092 | ||
992 | printk("<"); | ||
993 | for (; bits->bit; ++bits) | 1093 | for (; bits->bit; ++bits) |
994 | if (val & bits->bit) { | 1094 | if (val & bits->bit) { |
995 | printk("%s%s", sep, bits->name); | 1095 | printk("%s%s", s, bits->name); |
996 | sep = ","; | 1096 | s = sep; |
997 | } | 1097 | } |
1098 | } | ||
1099 | |||
1100 | #ifdef CONFIG_PPC_TRANSACTIONAL_MEM | ||
1101 | static struct regbit msr_tm_bits[] = { | ||
1102 | {MSR_TS_T, "T"}, | ||
1103 | {MSR_TS_S, "S"}, | ||
1104 | {MSR_TM, "E"}, | ||
1105 | {0, NULL} | ||
1106 | }; | ||
1107 | |||
1108 | static void print_tm_bits(unsigned long val) | ||
1109 | { | ||
1110 | /* | ||
1111 | * This only prints something if at least one of the TM bit is set. | ||
1112 | * Inside the TM[], the output means: | ||
1113 | * E: Enabled (bit 32) | ||
1114 | * S: Suspended (bit 33) | ||
1115 | * T: Transactional (bit 34) | ||
1116 | */ | ||
1117 | if (val & (MSR_TM | MSR_TS_S | MSR_TS_T)) { | ||
1118 | printk(",TM["); | ||
1119 | print_bits(val, msr_tm_bits, ""); | ||
1120 | printk("]"); | ||
1121 | } | ||
1122 | } | ||
1123 | #else | ||
1124 | static void print_tm_bits(unsigned long val) {} | ||
1125 | #endif | ||
1126 | |||
1127 | static void print_msr_bits(unsigned long val) | ||
1128 | { | ||
1129 | printk("<"); | ||
1130 | print_bits(val, msr_bits, ","); | ||
1131 | print_tm_bits(val); | ||
998 | printk(">"); | 1132 | printk(">"); |
999 | } | 1133 | } |
1000 | 1134 | ||
@@ -1019,7 +1153,7 @@ void show_regs(struct pt_regs * regs) | |||
1019 | printk("REGS: %p TRAP: %04lx %s (%s)\n", | 1153 | printk("REGS: %p TRAP: %04lx %s (%s)\n", |
1020 | regs, regs->trap, print_tainted(), init_utsname()->release); | 1154 | regs, regs->trap, print_tainted(), init_utsname()->release); |
1021 | printk("MSR: "REG" ", regs->msr); | 1155 | printk("MSR: "REG" ", regs->msr); |
1022 | printbits(regs->msr, msr_bits); | 1156 | print_msr_bits(regs->msr); |
1023 | printk(" CR: %08lx XER: %08lx\n", regs->ccr, regs->xer); | 1157 | printk(" CR: %08lx XER: %08lx\n", regs->ccr, regs->xer); |
1024 | trap = TRAP(regs); | 1158 | trap = TRAP(regs); |
1025 | if ((regs->trap != 0xc00) && cpu_has_feature(CPU_FTR_CFAR)) | 1159 | if ((regs->trap != 0xc00) && cpu_has_feature(CPU_FTR_CFAR)) |
@@ -1061,13 +1195,10 @@ void show_regs(struct pt_regs * regs) | |||
1061 | 1195 | ||
1062 | void exit_thread(void) | 1196 | void exit_thread(void) |
1063 | { | 1197 | { |
1064 | discard_lazy_cpu_state(); | ||
1065 | } | 1198 | } |
1066 | 1199 | ||
1067 | void flush_thread(void) | 1200 | void flush_thread(void) |
1068 | { | 1201 | { |
1069 | discard_lazy_cpu_state(); | ||
1070 | |||
1071 | #ifdef CONFIG_HAVE_HW_BREAKPOINT | 1202 | #ifdef CONFIG_HAVE_HW_BREAKPOINT |
1072 | flush_ptrace_hw_breakpoint(current); | 1203 | flush_ptrace_hw_breakpoint(current); |
1073 | #else /* CONFIG_HAVE_HW_BREAKPOINT */ | 1204 | #else /* CONFIG_HAVE_HW_BREAKPOINT */ |
@@ -1086,10 +1217,7 @@ release_thread(struct task_struct *t) | |||
1086 | */ | 1217 | */ |
1087 | int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src) | 1218 | int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src) |
1088 | { | 1219 | { |
1089 | flush_fp_to_thread(src); | 1220 | flush_all_to_thread(src); |
1090 | flush_altivec_to_thread(src); | ||
1091 | flush_vsx_to_thread(src); | ||
1092 | flush_spe_to_thread(src); | ||
1093 | /* | 1221 | /* |
1094 | * Flush TM state out so we can copy it. __switch_to_tm() does this | 1222 | * Flush TM state out so we can copy it. __switch_to_tm() does this |
1095 | * flush but it removes the checkpointed state from the current CPU and | 1223 | * flush but it removes the checkpointed state from the current CPU and |
@@ -1212,7 +1340,7 @@ int copy_thread(unsigned long clone_flags, unsigned long usp, | |||
1212 | #ifdef CONFIG_PPC64 | 1340 | #ifdef CONFIG_PPC64 |
1213 | if (cpu_has_feature(CPU_FTR_DSCR)) { | 1341 | if (cpu_has_feature(CPU_FTR_DSCR)) { |
1214 | p->thread.dscr_inherit = current->thread.dscr_inherit; | 1342 | p->thread.dscr_inherit = current->thread.dscr_inherit; |
1215 | p->thread.dscr = current->thread.dscr; | 1343 | p->thread.dscr = mfspr(SPRN_DSCR); |
1216 | } | 1344 | } |
1217 | if (cpu_has_feature(CPU_FTR_HAS_PPR)) | 1345 | if (cpu_has_feature(CPU_FTR_HAS_PPR)) |
1218 | p->thread.ppr = INIT_PPR; | 1346 | p->thread.ppr = INIT_PPR; |
@@ -1305,7 +1433,6 @@ void start_thread(struct pt_regs *regs, unsigned long start, unsigned long sp) | |||
1305 | regs->msr = MSR_USER32; | 1433 | regs->msr = MSR_USER32; |
1306 | } | 1434 | } |
1307 | #endif | 1435 | #endif |
1308 | discard_lazy_cpu_state(); | ||
1309 | #ifdef CONFIG_VSX | 1436 | #ifdef CONFIG_VSX |
1310 | current->thread.used_vsr = 0; | 1437 | current->thread.used_vsr = 0; |
1311 | #endif | 1438 | #endif |
diff --git a/arch/powerpc/kernel/prom_init.c b/arch/powerpc/kernel/prom_init.c index 92dea8df6b26..da5192590c44 100644 --- a/arch/powerpc/kernel/prom_init.c +++ b/arch/powerpc/kernel/prom_init.c | |||
@@ -389,6 +389,7 @@ static void __init prom_printf(const char *format, ...) | |||
389 | break; | 389 | break; |
390 | } | 390 | } |
391 | } | 391 | } |
392 | va_end(args); | ||
392 | } | 393 | } |
393 | 394 | ||
394 | 395 | ||
diff --git a/arch/powerpc/kernel/ptrace.c b/arch/powerpc/kernel/ptrace.c index 737c0d0b53ac..30a03c03fe73 100644 --- a/arch/powerpc/kernel/ptrace.c +++ b/arch/powerpc/kernel/ptrace.c | |||
@@ -60,6 +60,7 @@ struct pt_regs_offset { | |||
60 | #define STR(s) #s /* convert to string */ | 60 | #define STR(s) #s /* convert to string */ |
61 | #define REG_OFFSET_NAME(r) {.name = #r, .offset = offsetof(struct pt_regs, r)} | 61 | #define REG_OFFSET_NAME(r) {.name = #r, .offset = offsetof(struct pt_regs, r)} |
62 | #define GPR_OFFSET_NAME(num) \ | 62 | #define GPR_OFFSET_NAME(num) \ |
63 | {.name = STR(r##num), .offset = offsetof(struct pt_regs, gpr[num])}, \ | ||
63 | {.name = STR(gpr##num), .offset = offsetof(struct pt_regs, gpr[num])} | 64 | {.name = STR(gpr##num), .offset = offsetof(struct pt_regs, gpr[num])} |
64 | #define REG_OFFSET_END {.name = NULL, .offset = 0} | 65 | #define REG_OFFSET_END {.name = NULL, .offset = 0} |
65 | 66 | ||
diff --git a/arch/powerpc/kernel/rtas.c b/arch/powerpc/kernel/rtas.c index 5a753fae8265..28736ff27fea 100644 --- a/arch/powerpc/kernel/rtas.c +++ b/arch/powerpc/kernel/rtas.c | |||
@@ -44,6 +44,9 @@ | |||
44 | #include <asm/mmu.h> | 44 | #include <asm/mmu.h> |
45 | #include <asm/topology.h> | 45 | #include <asm/topology.h> |
46 | 46 | ||
47 | /* This is here deliberately so it's only used in this file */ | ||
48 | void enter_rtas(unsigned long); | ||
49 | |||
47 | struct rtas_t rtas = { | 50 | struct rtas_t rtas = { |
48 | .lock = __ARCH_SPIN_LOCK_UNLOCKED | 51 | .lock = __ARCH_SPIN_LOCK_UNLOCKED |
49 | }; | 52 | }; |
@@ -93,21 +96,13 @@ static void unlock_rtas(unsigned long flags) | |||
93 | */ | 96 | */ |
94 | static void call_rtas_display_status(unsigned char c) | 97 | static void call_rtas_display_status(unsigned char c) |
95 | { | 98 | { |
96 | struct rtas_args *args = &rtas.args; | ||
97 | unsigned long s; | 99 | unsigned long s; |
98 | 100 | ||
99 | if (!rtas.base) | 101 | if (!rtas.base) |
100 | return; | 102 | return; |
101 | s = lock_rtas(); | ||
102 | |||
103 | args->token = cpu_to_be32(10); | ||
104 | args->nargs = cpu_to_be32(1); | ||
105 | args->nret = cpu_to_be32(1); | ||
106 | args->rets = &(args->args[1]); | ||
107 | args->args[0] = cpu_to_be32(c); | ||
108 | |||
109 | enter_rtas(__pa(args)); | ||
110 | 103 | ||
104 | s = lock_rtas(); | ||
105 | rtas_call_unlocked(&rtas.args, 10, 1, 1, NULL, c); | ||
111 | unlock_rtas(s); | 106 | unlock_rtas(s); |
112 | } | 107 | } |
113 | 108 | ||
@@ -418,6 +413,36 @@ static char *__fetch_rtas_last_error(char *altbuf) | |||
418 | #define get_errorlog_buffer() NULL | 413 | #define get_errorlog_buffer() NULL |
419 | #endif | 414 | #endif |
420 | 415 | ||
416 | |||
417 | static void | ||
418 | va_rtas_call_unlocked(struct rtas_args *args, int token, int nargs, int nret, | ||
419 | va_list list) | ||
420 | { | ||
421 | int i; | ||
422 | |||
423 | args->token = cpu_to_be32(token); | ||
424 | args->nargs = cpu_to_be32(nargs); | ||
425 | args->nret = cpu_to_be32(nret); | ||
426 | args->rets = &(args->args[nargs]); | ||
427 | |||
428 | for (i = 0; i < nargs; ++i) | ||
429 | args->args[i] = cpu_to_be32(va_arg(list, __u32)); | ||
430 | |||
431 | for (i = 0; i < nret; ++i) | ||
432 | args->rets[i] = 0; | ||
433 | |||
434 | enter_rtas(__pa(args)); | ||
435 | } | ||
436 | |||
437 | void rtas_call_unlocked(struct rtas_args *args, int token, int nargs, int nret, ...) | ||
438 | { | ||
439 | va_list list; | ||
440 | |||
441 | va_start(list, nret); | ||
442 | va_rtas_call_unlocked(args, token, nargs, nret, list); | ||
443 | va_end(list); | ||
444 | } | ||
445 | |||
421 | int rtas_call(int token, int nargs, int nret, int *outputs, ...) | 446 | int rtas_call(int token, int nargs, int nret, int *outputs, ...) |
422 | { | 447 | { |
423 | va_list list; | 448 | va_list list; |
@@ -431,22 +456,14 @@ int rtas_call(int token, int nargs, int nret, int *outputs, ...) | |||
431 | return -1; | 456 | return -1; |
432 | 457 | ||
433 | s = lock_rtas(); | 458 | s = lock_rtas(); |
459 | |||
460 | /* We use the global rtas args buffer */ | ||
434 | rtas_args = &rtas.args; | 461 | rtas_args = &rtas.args; |
435 | 462 | ||
436 | rtas_args->token = cpu_to_be32(token); | ||
437 | rtas_args->nargs = cpu_to_be32(nargs); | ||
438 | rtas_args->nret = cpu_to_be32(nret); | ||
439 | rtas_args->rets = &(rtas_args->args[nargs]); | ||
440 | va_start(list, outputs); | 463 | va_start(list, outputs); |
441 | for (i = 0; i < nargs; ++i) | 464 | va_rtas_call_unlocked(rtas_args, token, nargs, nret, list); |
442 | rtas_args->args[i] = cpu_to_be32(va_arg(list, __u32)); | ||
443 | va_end(list); | 465 | va_end(list); |
444 | 466 | ||
445 | for (i = 0; i < nret; ++i) | ||
446 | rtas_args->rets[i] = 0; | ||
447 | |||
448 | enter_rtas(__pa(rtas_args)); | ||
449 | |||
450 | /* A -1 return code indicates that the last command couldn't | 467 | /* A -1 return code indicates that the last command couldn't |
451 | be completed due to a hardware error. */ | 468 | be completed due to a hardware error. */ |
452 | if (be32_to_cpu(rtas_args->rets[0]) == -1) | 469 | if (be32_to_cpu(rtas_args->rets[0]) == -1) |
diff --git a/arch/powerpc/kernel/signal_32.c b/arch/powerpc/kernel/signal_32.c index ef7c24e84a62..b6aa378aff63 100644 --- a/arch/powerpc/kernel/signal_32.c +++ b/arch/powerpc/kernel/signal_32.c | |||
@@ -458,7 +458,7 @@ static int save_user_regs(struct pt_regs *regs, struct mcontext __user *frame, | |||
458 | * contains valid data | 458 | * contains valid data |
459 | */ | 459 | */ |
460 | if (current->thread.used_vsr && ctx_has_vsx_region) { | 460 | if (current->thread.used_vsr && ctx_has_vsx_region) { |
461 | __giveup_vsx(current); | 461 | flush_vsx_to_thread(current); |
462 | if (copy_vsx_to_user(&frame->mc_vsregs, current)) | 462 | if (copy_vsx_to_user(&frame->mc_vsregs, current)) |
463 | return 1; | 463 | return 1; |
464 | msr |= MSR_VSX; | 464 | msr |= MSR_VSX; |
@@ -606,7 +606,7 @@ static int save_tm_user_regs(struct pt_regs *regs, | |||
606 | * contains valid data | 606 | * contains valid data |
607 | */ | 607 | */ |
608 | if (current->thread.used_vsr) { | 608 | if (current->thread.used_vsr) { |
609 | __giveup_vsx(current); | 609 | flush_vsx_to_thread(current); |
610 | if (copy_vsx_to_user(&frame->mc_vsregs, current)) | 610 | if (copy_vsx_to_user(&frame->mc_vsregs, current)) |
611 | return 1; | 611 | return 1; |
612 | if (msr & MSR_VSX) { | 612 | if (msr & MSR_VSX) { |
@@ -687,15 +687,6 @@ static long restore_user_regs(struct pt_regs *regs, | |||
687 | if (sig) | 687 | if (sig) |
688 | regs->msr = (regs->msr & ~MSR_LE) | (msr & MSR_LE); | 688 | regs->msr = (regs->msr & ~MSR_LE) | (msr & MSR_LE); |
689 | 689 | ||
690 | /* | ||
691 | * Do this before updating the thread state in | ||
692 | * current->thread.fpr/vr/evr. That way, if we get preempted | ||
693 | * and another task grabs the FPU/Altivec/SPE, it won't be | ||
694 | * tempted to save the current CPU state into the thread_struct | ||
695 | * and corrupt what we are writing there. | ||
696 | */ | ||
697 | discard_lazy_cpu_state(); | ||
698 | |||
699 | #ifdef CONFIG_ALTIVEC | 690 | #ifdef CONFIG_ALTIVEC |
700 | /* | 691 | /* |
701 | * Force the process to reload the altivec registers from | 692 | * Force the process to reload the altivec registers from |
@@ -798,15 +789,6 @@ static long restore_tm_user_regs(struct pt_regs *regs, | |||
798 | /* Restore the previous little-endian mode */ | 789 | /* Restore the previous little-endian mode */ |
799 | regs->msr = (regs->msr & ~MSR_LE) | (msr & MSR_LE); | 790 | regs->msr = (regs->msr & ~MSR_LE) | (msr & MSR_LE); |
800 | 791 | ||
801 | /* | ||
802 | * Do this before updating the thread state in | ||
803 | * current->thread.fpr/vr/evr. That way, if we get preempted | ||
804 | * and another task grabs the FPU/Altivec/SPE, it won't be | ||
805 | * tempted to save the current CPU state into the thread_struct | ||
806 | * and corrupt what we are writing there. | ||
807 | */ | ||
808 | discard_lazy_cpu_state(); | ||
809 | |||
810 | #ifdef CONFIG_ALTIVEC | 792 | #ifdef CONFIG_ALTIVEC |
811 | regs->msr &= ~MSR_VEC; | 793 | regs->msr &= ~MSR_VEC; |
812 | if (msr & MSR_VEC) { | 794 | if (msr & MSR_VEC) { |
diff --git a/arch/powerpc/kernel/signal_64.c b/arch/powerpc/kernel/signal_64.c index c676ecec0869..25520794aa37 100644 --- a/arch/powerpc/kernel/signal_64.c +++ b/arch/powerpc/kernel/signal_64.c | |||
@@ -147,7 +147,7 @@ static long setup_sigcontext(struct sigcontext __user *sc, struct pt_regs *regs, | |||
147 | * VMX data. | 147 | * VMX data. |
148 | */ | 148 | */ |
149 | if (current->thread.used_vsr && ctx_has_vsx_region) { | 149 | if (current->thread.used_vsr && ctx_has_vsx_region) { |
150 | __giveup_vsx(current); | 150 | flush_vsx_to_thread(current); |
151 | v_regs += ELF_NVRREG; | 151 | v_regs += ELF_NVRREG; |
152 | err |= copy_vsx_to_user(v_regs, current); | 152 | err |= copy_vsx_to_user(v_regs, current); |
153 | /* set MSR_VSX in the MSR value in the frame to | 153 | /* set MSR_VSX in the MSR value in the frame to |
@@ -270,7 +270,7 @@ static long setup_tm_sigcontexts(struct sigcontext __user *sc, | |||
270 | * VMX data. | 270 | * VMX data. |
271 | */ | 271 | */ |
272 | if (current->thread.used_vsr) { | 272 | if (current->thread.used_vsr) { |
273 | __giveup_vsx(current); | 273 | flush_vsx_to_thread(current); |
274 | v_regs += ELF_NVRREG; | 274 | v_regs += ELF_NVRREG; |
275 | tm_v_regs += ELF_NVRREG; | 275 | tm_v_regs += ELF_NVRREG; |
276 | 276 | ||
@@ -350,15 +350,6 @@ static long restore_sigcontext(struct pt_regs *regs, sigset_t *set, int sig, | |||
350 | err |= __get_user(set->sig[0], &sc->oldmask); | 350 | err |= __get_user(set->sig[0], &sc->oldmask); |
351 | 351 | ||
352 | /* | 352 | /* |
353 | * Do this before updating the thread state in | ||
354 | * current->thread.fpr/vr. That way, if we get preempted | ||
355 | * and another task grabs the FPU/Altivec, it won't be | ||
356 | * tempted to save the current CPU state into the thread_struct | ||
357 | * and corrupt what we are writing there. | ||
358 | */ | ||
359 | discard_lazy_cpu_state(); | ||
360 | |||
361 | /* | ||
362 | * Force reload of FP/VEC. | 353 | * Force reload of FP/VEC. |
363 | * This has to be done before copying stuff into current->thread.fpr/vr | 354 | * This has to be done before copying stuff into current->thread.fpr/vr |
364 | * for the reasons explained in the previous comment. | 355 | * for the reasons explained in the previous comment. |
@@ -469,15 +460,6 @@ static long restore_tm_sigcontexts(struct pt_regs *regs, | |||
469 | err |= __get_user(regs->result, &sc->gp_regs[PT_RESULT]); | 460 | err |= __get_user(regs->result, &sc->gp_regs[PT_RESULT]); |
470 | 461 | ||
471 | /* | 462 | /* |
472 | * Do this before updating the thread state in | ||
473 | * current->thread.fpr/vr. That way, if we get preempted | ||
474 | * and another task grabs the FPU/Altivec, it won't be | ||
475 | * tempted to save the current CPU state into the thread_struct | ||
476 | * and corrupt what we are writing there. | ||
477 | */ | ||
478 | discard_lazy_cpu_state(); | ||
479 | |||
480 | /* | ||
481 | * Force reload of FP/VEC. | 463 | * Force reload of FP/VEC. |
482 | * This has to be done before copying stuff into current->thread.fpr/vr | 464 | * This has to be done before copying stuff into current->thread.fpr/vr |
483 | * for the reasons explained in the previous comment. | 465 | * for the reasons explained in the previous comment. |
diff --git a/arch/powerpc/kernel/stacktrace.c b/arch/powerpc/kernel/stacktrace.c index ea43a347a104..4f24606afc3f 100644 --- a/arch/powerpc/kernel/stacktrace.c +++ b/arch/powerpc/kernel/stacktrace.c | |||
@@ -61,3 +61,10 @@ void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace) | |||
61 | save_context_stack(trace, tsk->thread.ksp, tsk, 0); | 61 | save_context_stack(trace, tsk->thread.ksp, tsk, 0); |
62 | } | 62 | } |
63 | EXPORT_SYMBOL_GPL(save_stack_trace_tsk); | 63 | EXPORT_SYMBOL_GPL(save_stack_trace_tsk); |
64 | |||
65 | void | ||
66 | save_stack_trace_regs(struct pt_regs *regs, struct stack_trace *trace) | ||
67 | { | ||
68 | save_context_stack(trace, regs->gpr[1], current, 0); | ||
69 | } | ||
70 | EXPORT_SYMBOL_GPL(save_stack_trace_regs); | ||
diff --git a/arch/powerpc/kernel/swsusp.c b/arch/powerpc/kernel/swsusp.c index eae33e10b65f..6669b1752512 100644 --- a/arch/powerpc/kernel/swsusp.c +++ b/arch/powerpc/kernel/swsusp.c | |||
@@ -20,9 +20,7 @@ void save_processor_state(void) | |||
20 | * flush out all the special registers so we don't need | 20 | * flush out all the special registers so we don't need |
21 | * to save them in the snapshot | 21 | * to save them in the snapshot |
22 | */ | 22 | */ |
23 | flush_fp_to_thread(current); | 23 | flush_all_to_thread(current); |
24 | flush_altivec_to_thread(current); | ||
25 | flush_spe_to_thread(current); | ||
26 | 24 | ||
27 | #ifdef CONFIG_PPC64 | 25 | #ifdef CONFIG_PPC64 |
28 | hard_irq_disable(); | 26 | hard_irq_disable(); |
diff --git a/arch/powerpc/kernel/systbl_chk.c b/arch/powerpc/kernel/systbl_chk.c index 2384129f5893..55323a620cfe 100644 --- a/arch/powerpc/kernel/systbl_chk.c +++ b/arch/powerpc/kernel/systbl_chk.c | |||
@@ -57,4 +57,4 @@ | |||
57 | 57 | ||
58 | START_TABLE | 58 | START_TABLE |
59 | #include <asm/systbl.h> | 59 | #include <asm/systbl.h> |
60 | END_TABLE __NR_syscalls | 60 | END_TABLE NR_syscalls |
diff --git a/arch/powerpc/kernel/systbl_chk.sh b/arch/powerpc/kernel/systbl_chk.sh index 19415e7674a5..31b6e7c358ca 100644 --- a/arch/powerpc/kernel/systbl_chk.sh +++ b/arch/powerpc/kernel/systbl_chk.sh | |||
@@ -16,7 +16,7 @@ awk 'BEGIN { num = -1; } # Ignore the beginning of the file | |||
16 | /^START_TABLE/ { num = 0; next; } | 16 | /^START_TABLE/ { num = 0; next; } |
17 | /^END_TABLE/ { | 17 | /^END_TABLE/ { |
18 | if (num != $2) { | 18 | if (num != $2) { |
19 | printf "__NR_syscalls (%s) is not one more than the last syscall (%s)\n", | 19 | printf "NR_syscalls (%s) is not one more than the last syscall (%s)\n", |
20 | $2, num - 1; | 20 | $2, num - 1; |
21 | exit(1); | 21 | exit(1); |
22 | } | 22 | } |
diff --git a/arch/powerpc/kernel/time.c b/arch/powerpc/kernel/time.c index 1be1092c7204..81b0900a39ee 100644 --- a/arch/powerpc/kernel/time.c +++ b/arch/powerpc/kernel/time.c | |||
@@ -1002,38 +1002,6 @@ static int month_days[12] = { | |||
1002 | 31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31 | 1002 | 31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31 |
1003 | }; | 1003 | }; |
1004 | 1004 | ||
1005 | /* | ||
1006 | * This only works for the Gregorian calendar - i.e. after 1752 (in the UK) | ||
1007 | */ | ||
1008 | void GregorianDay(struct rtc_time * tm) | ||
1009 | { | ||
1010 | int leapsToDate; | ||
1011 | int lastYear; | ||
1012 | int day; | ||
1013 | int MonthOffset[] = { 0, 31, 59, 90, 120, 151, 181, 212, 243, 273, 304, 334 }; | ||
1014 | |||
1015 | lastYear = tm->tm_year - 1; | ||
1016 | |||
1017 | /* | ||
1018 | * Number of leap corrections to apply up to end of last year | ||
1019 | */ | ||
1020 | leapsToDate = lastYear / 4 - lastYear / 100 + lastYear / 400; | ||
1021 | |||
1022 | /* | ||
1023 | * This year is a leap year if it is divisible by 4 except when it is | ||
1024 | * divisible by 100 unless it is divisible by 400 | ||
1025 | * | ||
1026 | * e.g. 1904 was a leap year, 1900 was not, 1996 is, and 2000 was | ||
1027 | */ | ||
1028 | day = tm->tm_mon > 2 && leapyear(tm->tm_year); | ||
1029 | |||
1030 | day += lastYear*365 + leapsToDate + MonthOffset[tm->tm_mon-1] + | ||
1031 | tm->tm_mday; | ||
1032 | |||
1033 | tm->tm_wday = day % 7; | ||
1034 | } | ||
1035 | EXPORT_SYMBOL_GPL(GregorianDay); | ||
1036 | |||
1037 | void to_tm(int tim, struct rtc_time * tm) | 1005 | void to_tm(int tim, struct rtc_time * tm) |
1038 | { | 1006 | { |
1039 | register int i; | 1007 | register int i; |
@@ -1064,9 +1032,9 @@ void to_tm(int tim, struct rtc_time * tm) | |||
1064 | tm->tm_mday = day + 1; | 1032 | tm->tm_mday = day + 1; |
1065 | 1033 | ||
1066 | /* | 1034 | /* |
1067 | * Determine the day of week | 1035 | * No-one uses the day of the week. |
1068 | */ | 1036 | */ |
1069 | GregorianDay(tm); | 1037 | tm->tm_wday = -1; |
1070 | } | 1038 | } |
1071 | EXPORT_SYMBOL(to_tm); | 1039 | EXPORT_SYMBOL(to_tm); |
1072 | 1040 | ||
diff --git a/arch/powerpc/kernel/traps.c b/arch/powerpc/kernel/traps.c index 37de90f8a845..b6becc795bb5 100644 --- a/arch/powerpc/kernel/traps.c +++ b/arch/powerpc/kernel/traps.c | |||
@@ -1313,13 +1313,6 @@ void nonrecoverable_exception(struct pt_regs *regs) | |||
1313 | die("nonrecoverable exception", regs, SIGKILL); | 1313 | die("nonrecoverable exception", regs, SIGKILL); |
1314 | } | 1314 | } |
1315 | 1315 | ||
1316 | void trace_syscall(struct pt_regs *regs) | ||
1317 | { | ||
1318 | printk("Task: %p(%d), PC: %08lX/%08lX, Syscall: %3ld, Result: %s%ld %s\n", | ||
1319 | current, task_pid_nr(current), regs->nip, regs->link, regs->gpr[0], | ||
1320 | regs->ccr&0x10000000?"Error=":"", regs->gpr[3], print_tainted()); | ||
1321 | } | ||
1322 | |||
1323 | void kernel_fp_unavailable_exception(struct pt_regs *regs) | 1316 | void kernel_fp_unavailable_exception(struct pt_regs *regs) |
1324 | { | 1317 | { |
1325 | enum ctx_state prev_state = exception_enter(); | 1318 | enum ctx_state prev_state = exception_enter(); |
diff --git a/arch/powerpc/kernel/vdso.c b/arch/powerpc/kernel/vdso.c index b457bfa28436..def1b8b5e6c1 100644 --- a/arch/powerpc/kernel/vdso.c +++ b/arch/powerpc/kernel/vdso.c | |||
@@ -671,7 +671,7 @@ static void __init vdso_setup_syscall_map(void) | |||
671 | extern unsigned long sys_ni_syscall; | 671 | extern unsigned long sys_ni_syscall; |
672 | 672 | ||
673 | 673 | ||
674 | for (i = 0; i < __NR_syscalls; i++) { | 674 | for (i = 0; i < NR_syscalls; i++) { |
675 | #ifdef CONFIG_PPC64 | 675 | #ifdef CONFIG_PPC64 |
676 | if (sys_call_table[i*2] != sys_ni_syscall) | 676 | if (sys_call_table[i*2] != sys_ni_syscall) |
677 | vdso_data->syscall_map_64[i >> 5] |= | 677 | vdso_data->syscall_map_64[i >> 5] |= |
diff --git a/arch/powerpc/kernel/vdso32/datapage.S b/arch/powerpc/kernel/vdso32/datapage.S index 59cf5f452879..3745113fcc65 100644 --- a/arch/powerpc/kernel/vdso32/datapage.S +++ b/arch/powerpc/kernel/vdso32/datapage.S | |||
@@ -61,7 +61,7 @@ V_FUNCTION_BEGIN(__kernel_get_syscall_map) | |||
61 | addi r3,r3,CFG_SYSCALL_MAP32 | 61 | addi r3,r3,CFG_SYSCALL_MAP32 |
62 | cmpli cr0,r4,0 | 62 | cmpli cr0,r4,0 |
63 | beqlr | 63 | beqlr |
64 | li r0,__NR_syscalls | 64 | li r0,NR_syscalls |
65 | stw r0,0(r4) | 65 | stw r0,0(r4) |
66 | crclr cr0*4+so | 66 | crclr cr0*4+so |
67 | blr | 67 | blr |
diff --git a/arch/powerpc/kernel/vdso64/datapage.S b/arch/powerpc/kernel/vdso64/datapage.S index 2f01c4a0d8a0..184a6ba7f283 100644 --- a/arch/powerpc/kernel/vdso64/datapage.S +++ b/arch/powerpc/kernel/vdso64/datapage.S | |||
@@ -62,7 +62,7 @@ V_FUNCTION_BEGIN(__kernel_get_syscall_map) | |||
62 | cmpli cr0,r4,0 | 62 | cmpli cr0,r4,0 |
63 | crclr cr0*4+so | 63 | crclr cr0*4+so |
64 | beqlr | 64 | beqlr |
65 | li r0,__NR_syscalls | 65 | li r0,NR_syscalls |
66 | stw r0,0(r4) | 66 | stw r0,0(r4) |
67 | blr | 67 | blr |
68 | .cfi_endproc | 68 | .cfi_endproc |
diff --git a/arch/powerpc/kernel/vector.S b/arch/powerpc/kernel/vector.S index f5c80d567d8d..162d0f714941 100644 --- a/arch/powerpc/kernel/vector.S +++ b/arch/powerpc/kernel/vector.S | |||
@@ -29,24 +29,10 @@ _GLOBAL(do_load_up_transact_altivec) | |||
29 | addi r10,r3,THREAD_TRANSACT_VRSTATE | 29 | addi r10,r3,THREAD_TRANSACT_VRSTATE |
30 | REST_32VRS(0,r4,r10) | 30 | REST_32VRS(0,r4,r10) |
31 | 31 | ||
32 | /* Disable VEC again. */ | ||
33 | MTMSRD(r6) | ||
34 | isync | ||
35 | |||
36 | blr | 32 | blr |
37 | #endif | 33 | #endif |
38 | 34 | ||
39 | /* | 35 | /* |
40 | * Enable use of VMX/Altivec for the caller. | ||
41 | */ | ||
42 | _GLOBAL(vec_enable) | ||
43 | mfmsr r3 | ||
44 | oris r3,r3,MSR_VEC@h | ||
45 | MTMSRD(r3) | ||
46 | isync | ||
47 | blr | ||
48 | |||
49 | /* | ||
50 | * Load state from memory into VMX registers including VSCR. | 36 | * Load state from memory into VMX registers including VSCR. |
51 | * Assumes the caller has enabled VMX in the MSR. | 37 | * Assumes the caller has enabled VMX in the MSR. |
52 | */ | 38 | */ |
@@ -84,39 +70,6 @@ _GLOBAL(load_up_altivec) | |||
84 | MTMSRD(r5) /* enable use of AltiVec now */ | 70 | MTMSRD(r5) /* enable use of AltiVec now */ |
85 | isync | 71 | isync |
86 | 72 | ||
87 | /* | ||
88 | * For SMP, we don't do lazy VMX switching because it just gets too | ||
89 | * horrendously complex, especially when a task switches from one CPU | ||
90 | * to another. Instead we call giveup_altvec in switch_to. | ||
91 | * VRSAVE isn't dealt with here, that is done in the normal context | ||
92 | * switch code. Note that we could rely on vrsave value to eventually | ||
93 | * avoid saving all of the VREGs here... | ||
94 | */ | ||
95 | #ifndef CONFIG_SMP | ||
96 | LOAD_REG_ADDRBASE(r3, last_task_used_altivec) | ||
97 | toreal(r3) | ||
98 | PPC_LL r4,ADDROFF(last_task_used_altivec)(r3) | ||
99 | PPC_LCMPI 0,r4,0 | ||
100 | beq 1f | ||
101 | |||
102 | /* Save VMX state to last_task_used_altivec's THREAD struct */ | ||
103 | toreal(r4) | ||
104 | addi r4,r4,THREAD | ||
105 | addi r6,r4,THREAD_VRSTATE | ||
106 | SAVE_32VRS(0,r5,r6) | ||
107 | mfvscr v0 | ||
108 | li r10,VRSTATE_VSCR | ||
109 | stvx v0,r10,r6 | ||
110 | /* Disable VMX for last_task_used_altivec */ | ||
111 | PPC_LL r5,PT_REGS(r4) | ||
112 | toreal(r5) | ||
113 | PPC_LL r4,_MSR-STACK_FRAME_OVERHEAD(r5) | ||
114 | lis r10,MSR_VEC@h | ||
115 | andc r4,r4,r10 | ||
116 | PPC_STL r4,_MSR-STACK_FRAME_OVERHEAD(r5) | ||
117 | 1: | ||
118 | #endif /* CONFIG_SMP */ | ||
119 | |||
120 | /* Hack: if we get an altivec unavailable trap with VRSAVE | 73 | /* Hack: if we get an altivec unavailable trap with VRSAVE |
121 | * set to all zeros, we assume this is a broken application | 74 | * set to all zeros, we assume this is a broken application |
122 | * that fails to set it properly, and thus we switch it to | 75 | * that fails to set it properly, and thus we switch it to |
@@ -145,39 +98,15 @@ _GLOBAL(load_up_altivec) | |||
145 | lvx v0,r10,r6 | 98 | lvx v0,r10,r6 |
146 | mtvscr v0 | 99 | mtvscr v0 |
147 | REST_32VRS(0,r4,r6) | 100 | REST_32VRS(0,r4,r6) |
148 | #ifndef CONFIG_SMP | ||
149 | /* Update last_task_used_altivec to 'current' */ | ||
150 | subi r4,r5,THREAD /* Back to 'current' */ | ||
151 | fromreal(r4) | ||
152 | PPC_STL r4,ADDROFF(last_task_used_altivec)(r3) | ||
153 | #endif /* CONFIG_SMP */ | ||
154 | /* restore registers and return */ | 101 | /* restore registers and return */ |
155 | blr | 102 | blr |
156 | 103 | ||
157 | _GLOBAL(giveup_altivec_notask) | ||
158 | mfmsr r3 | ||
159 | andis. r4,r3,MSR_VEC@h | ||
160 | bnelr /* Already enabled? */ | ||
161 | oris r3,r3,MSR_VEC@h | ||
162 | SYNC | ||
163 | MTMSRD(r3) /* enable use of VMX now */ | ||
164 | isync | ||
165 | blr | ||
166 | |||
167 | /* | 104 | /* |
168 | * giveup_altivec(tsk) | 105 | * __giveup_altivec(tsk) |
169 | * Disable VMX for the task given as the argument, | 106 | * Disable VMX for the task given as the argument, |
170 | * and save the vector registers in its thread_struct. | 107 | * and save the vector registers in its thread_struct. |
171 | * Enables the VMX for use in the kernel on return. | ||
172 | */ | 108 | */ |
173 | _GLOBAL(giveup_altivec) | 109 | _GLOBAL(__giveup_altivec) |
174 | mfmsr r5 | ||
175 | oris r5,r5,MSR_VEC@h | ||
176 | SYNC | ||
177 | MTMSRD(r5) /* enable use of VMX now */ | ||
178 | isync | ||
179 | PPC_LCMPI 0,r3,0 | ||
180 | beqlr /* if no previous owner, done */ | ||
181 | addi r3,r3,THREAD /* want THREAD of task */ | 110 | addi r3,r3,THREAD /* want THREAD of task */ |
182 | PPC_LL r7,THREAD_VRSAVEAREA(r3) | 111 | PPC_LL r7,THREAD_VRSAVEAREA(r3) |
183 | PPC_LL r5,PT_REGS(r3) | 112 | PPC_LL r5,PT_REGS(r3) |
@@ -203,11 +132,6 @@ ALT_FTR_SECTION_END_IFSET(CPU_FTR_VSX) | |||
203 | andc r4,r4,r3 /* disable FP for previous task */ | 132 | andc r4,r4,r3 /* disable FP for previous task */ |
204 | PPC_STL r4,_MSR-STACK_FRAME_OVERHEAD(r5) | 133 | PPC_STL r4,_MSR-STACK_FRAME_OVERHEAD(r5) |
205 | 1: | 134 | 1: |
206 | #ifndef CONFIG_SMP | ||
207 | li r5,0 | ||
208 | LOAD_REG_ADDRBASE(r4,last_task_used_altivec) | ||
209 | PPC_STL r5,ADDROFF(last_task_used_altivec)(r4) | ||
210 | #endif /* CONFIG_SMP */ | ||
211 | blr | 135 | blr |
212 | 136 | ||
213 | #ifdef CONFIG_VSX | 137 | #ifdef CONFIG_VSX |
@@ -230,20 +154,6 @@ _GLOBAL(load_up_vsx) | |||
230 | andis. r5,r12,MSR_VEC@h | 154 | andis. r5,r12,MSR_VEC@h |
231 | beql+ load_up_altivec /* skip if already loaded */ | 155 | beql+ load_up_altivec /* skip if already loaded */ |
232 | 156 | ||
233 | #ifndef CONFIG_SMP | ||
234 | ld r3,last_task_used_vsx@got(r2) | ||
235 | ld r4,0(r3) | ||
236 | cmpdi 0,r4,0 | ||
237 | beq 1f | ||
238 | /* Disable VSX for last_task_used_vsx */ | ||
239 | addi r4,r4,THREAD | ||
240 | ld r5,PT_REGS(r4) | ||
241 | ld r4,_MSR-STACK_FRAME_OVERHEAD(r5) | ||
242 | lis r6,MSR_VSX@h | ||
243 | andc r6,r4,r6 | ||
244 | std r6,_MSR-STACK_FRAME_OVERHEAD(r5) | ||
245 | 1: | ||
246 | #endif /* CONFIG_SMP */ | ||
247 | ld r4,PACACURRENT(r13) | 157 | ld r4,PACACURRENT(r13) |
248 | addi r4,r4,THREAD /* Get THREAD */ | 158 | addi r4,r4,THREAD /* Get THREAD */ |
249 | li r6,1 | 159 | li r6,1 |
@@ -251,27 +161,14 @@ _GLOBAL(load_up_vsx) | |||
251 | /* enable use of VSX after return */ | 161 | /* enable use of VSX after return */ |
252 | oris r12,r12,MSR_VSX@h | 162 | oris r12,r12,MSR_VSX@h |
253 | std r12,_MSR(r1) | 163 | std r12,_MSR(r1) |
254 | #ifndef CONFIG_SMP | ||
255 | /* Update last_task_used_vsx to 'current' */ | ||
256 | ld r4,PACACURRENT(r13) | ||
257 | std r4,0(r3) | ||
258 | #endif /* CONFIG_SMP */ | ||
259 | b fast_exception_return | 164 | b fast_exception_return |
260 | 165 | ||
261 | /* | 166 | /* |
262 | * __giveup_vsx(tsk) | 167 | * __giveup_vsx(tsk) |
263 | * Disable VSX for the task given as the argument. | 168 | * Disable VSX for the task given as the argument. |
264 | * Does NOT save vsx registers. | 169 | * Does NOT save vsx registers. |
265 | * Enables the VSX for use in the kernel on return. | ||
266 | */ | 170 | */ |
267 | _GLOBAL(__giveup_vsx) | 171 | _GLOBAL(__giveup_vsx) |
268 | mfmsr r5 | ||
269 | oris r5,r5,MSR_VSX@h | ||
270 | mtmsrd r5 /* enable use of VSX now */ | ||
271 | isync | ||
272 | |||
273 | cmpdi 0,r3,0 | ||
274 | beqlr- /* if no previous owner, done */ | ||
275 | addi r3,r3,THREAD /* want THREAD of task */ | 172 | addi r3,r3,THREAD /* want THREAD of task */ |
276 | ld r5,PT_REGS(r3) | 173 | ld r5,PT_REGS(r3) |
277 | cmpdi 0,r5,0 | 174 | cmpdi 0,r5,0 |
@@ -281,11 +178,6 @@ _GLOBAL(__giveup_vsx) | |||
281 | andc r4,r4,r3 /* disable VSX for previous task */ | 178 | andc r4,r4,r3 /* disable VSX for previous task */ |
282 | std r4,_MSR-STACK_FRAME_OVERHEAD(r5) | 179 | std r4,_MSR-STACK_FRAME_OVERHEAD(r5) |
283 | 1: | 180 | 1: |
284 | #ifndef CONFIG_SMP | ||
285 | li r5,0 | ||
286 | ld r4,last_task_used_vsx@got(r2) | ||
287 | std r5,0(r4) | ||
288 | #endif /* CONFIG_SMP */ | ||
289 | blr | 181 | blr |
290 | 182 | ||
291 | #endif /* CONFIG_VSX */ | 183 | #endif /* CONFIG_VSX */ |
diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c index 6b352691b8c9..cff207b72c46 100644 --- a/arch/powerpc/kvm/book3s_hv.c +++ b/arch/powerpc/kvm/book3s_hv.c | |||
@@ -2700,9 +2700,8 @@ static int kvmppc_vcpu_run_hv(struct kvm_run *run, struct kvm_vcpu *vcpu) | |||
2700 | goto out; | 2700 | goto out; |
2701 | } | 2701 | } |
2702 | 2702 | ||
2703 | flush_fp_to_thread(current); | 2703 | flush_all_to_thread(current); |
2704 | flush_altivec_to_thread(current); | 2704 | |
2705 | flush_vsx_to_thread(current); | ||
2706 | vcpu->arch.wqp = &vcpu->arch.vcore->wq; | 2705 | vcpu->arch.wqp = &vcpu->arch.vcore->wq; |
2707 | vcpu->arch.pgdir = current->mm->pgd; | 2706 | vcpu->arch.pgdir = current->mm->pgd; |
2708 | vcpu->arch.state = KVMPPC_VCPU_BUSY_IN_HOST; | 2707 | vcpu->arch.state = KVMPPC_VCPU_BUSY_IN_HOST; |
diff --git a/arch/powerpc/kvm/book3s_paired_singles.c b/arch/powerpc/kvm/book3s_paired_singles.c index a759d9adb0b6..eab96cfe82fa 100644 --- a/arch/powerpc/kvm/book3s_paired_singles.c +++ b/arch/powerpc/kvm/book3s_paired_singles.c | |||
@@ -1265,6 +1265,7 @@ int kvmppc_emulate_paired_single(struct kvm_run *run, struct kvm_vcpu *vcpu) | |||
1265 | if (rcomp) | 1265 | if (rcomp) |
1266 | kvmppc_set_cr(vcpu, cr); | 1266 | kvmppc_set_cr(vcpu, cr); |
1267 | 1267 | ||
1268 | disable_kernel_fp(); | ||
1268 | preempt_enable(); | 1269 | preempt_enable(); |
1269 | 1270 | ||
1270 | return emulated; | 1271 | return emulated; |
diff --git a/arch/powerpc/kvm/book3s_pr.c b/arch/powerpc/kvm/book3s_pr.c index 70fb08da416d..95bceca8f40e 100644 --- a/arch/powerpc/kvm/book3s_pr.c +++ b/arch/powerpc/kvm/book3s_pr.c | |||
@@ -751,6 +751,7 @@ static int kvmppc_handle_ext(struct kvm_vcpu *vcpu, unsigned int exit_nr, | |||
751 | preempt_disable(); | 751 | preempt_disable(); |
752 | enable_kernel_fp(); | 752 | enable_kernel_fp(); |
753 | load_fp_state(&vcpu->arch.fp); | 753 | load_fp_state(&vcpu->arch.fp); |
754 | disable_kernel_fp(); | ||
754 | t->fp_save_area = &vcpu->arch.fp; | 755 | t->fp_save_area = &vcpu->arch.fp; |
755 | preempt_enable(); | 756 | preempt_enable(); |
756 | } | 757 | } |
@@ -760,6 +761,7 @@ static int kvmppc_handle_ext(struct kvm_vcpu *vcpu, unsigned int exit_nr, | |||
760 | preempt_disable(); | 761 | preempt_disable(); |
761 | enable_kernel_altivec(); | 762 | enable_kernel_altivec(); |
762 | load_vr_state(&vcpu->arch.vr); | 763 | load_vr_state(&vcpu->arch.vr); |
764 | disable_kernel_altivec(); | ||
763 | t->vr_save_area = &vcpu->arch.vr; | 765 | t->vr_save_area = &vcpu->arch.vr; |
764 | preempt_enable(); | 766 | preempt_enable(); |
765 | #endif | 767 | #endif |
@@ -788,6 +790,7 @@ static void kvmppc_handle_lost_ext(struct kvm_vcpu *vcpu) | |||
788 | preempt_disable(); | 790 | preempt_disable(); |
789 | enable_kernel_fp(); | 791 | enable_kernel_fp(); |
790 | load_fp_state(&vcpu->arch.fp); | 792 | load_fp_state(&vcpu->arch.fp); |
793 | disable_kernel_fp(); | ||
791 | preempt_enable(); | 794 | preempt_enable(); |
792 | } | 795 | } |
793 | #ifdef CONFIG_ALTIVEC | 796 | #ifdef CONFIG_ALTIVEC |
@@ -795,6 +798,7 @@ static void kvmppc_handle_lost_ext(struct kvm_vcpu *vcpu) | |||
795 | preempt_disable(); | 798 | preempt_disable(); |
796 | enable_kernel_altivec(); | 799 | enable_kernel_altivec(); |
797 | load_vr_state(&vcpu->arch.vr); | 800 | load_vr_state(&vcpu->arch.vr); |
801 | disable_kernel_altivec(); | ||
798 | preempt_enable(); | 802 | preempt_enable(); |
799 | } | 803 | } |
800 | #endif | 804 | #endif |
@@ -1486,21 +1490,8 @@ static int kvmppc_vcpu_run_pr(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu) | |||
1486 | goto out; | 1490 | goto out; |
1487 | /* interrupts now hard-disabled */ | 1491 | /* interrupts now hard-disabled */ |
1488 | 1492 | ||
1489 | /* Save FPU state in thread_struct */ | 1493 | /* Save FPU, Altivec and VSX state */ |
1490 | if (current->thread.regs->msr & MSR_FP) | 1494 | giveup_all(current); |
1491 | giveup_fpu(current); | ||
1492 | |||
1493 | #ifdef CONFIG_ALTIVEC | ||
1494 | /* Save Altivec state in thread_struct */ | ||
1495 | if (current->thread.regs->msr & MSR_VEC) | ||
1496 | giveup_altivec(current); | ||
1497 | #endif | ||
1498 | |||
1499 | #ifdef CONFIG_VSX | ||
1500 | /* Save VSX state in thread_struct */ | ||
1501 | if (current->thread.regs->msr & MSR_VSX) | ||
1502 | __giveup_vsx(current); | ||
1503 | #endif | ||
1504 | 1495 | ||
1505 | /* Preload FPU if it's enabled */ | 1496 | /* Preload FPU if it's enabled */ |
1506 | if (kvmppc_get_msr(vcpu) & MSR_FP) | 1497 | if (kvmppc_get_msr(vcpu) & MSR_FP) |
diff --git a/arch/powerpc/kvm/booke.c b/arch/powerpc/kvm/booke.c index fd5875179e5c..778ef86e187e 100644 --- a/arch/powerpc/kvm/booke.c +++ b/arch/powerpc/kvm/booke.c | |||
@@ -98,6 +98,7 @@ void kvmppc_vcpu_disable_spe(struct kvm_vcpu *vcpu) | |||
98 | preempt_disable(); | 98 | preempt_disable(); |
99 | enable_kernel_spe(); | 99 | enable_kernel_spe(); |
100 | kvmppc_save_guest_spe(vcpu); | 100 | kvmppc_save_guest_spe(vcpu); |
101 | disable_kernel_spe(); | ||
101 | vcpu->arch.shadow_msr &= ~MSR_SPE; | 102 | vcpu->arch.shadow_msr &= ~MSR_SPE; |
102 | preempt_enable(); | 103 | preempt_enable(); |
103 | } | 104 | } |
@@ -107,6 +108,7 @@ static void kvmppc_vcpu_enable_spe(struct kvm_vcpu *vcpu) | |||
107 | preempt_disable(); | 108 | preempt_disable(); |
108 | enable_kernel_spe(); | 109 | enable_kernel_spe(); |
109 | kvmppc_load_guest_spe(vcpu); | 110 | kvmppc_load_guest_spe(vcpu); |
111 | disable_kernel_spe(); | ||
110 | vcpu->arch.shadow_msr |= MSR_SPE; | 112 | vcpu->arch.shadow_msr |= MSR_SPE; |
111 | preempt_enable(); | 113 | preempt_enable(); |
112 | } | 114 | } |
@@ -141,6 +143,7 @@ static inline void kvmppc_load_guest_fp(struct kvm_vcpu *vcpu) | |||
141 | if (!(current->thread.regs->msr & MSR_FP)) { | 143 | if (!(current->thread.regs->msr & MSR_FP)) { |
142 | enable_kernel_fp(); | 144 | enable_kernel_fp(); |
143 | load_fp_state(&vcpu->arch.fp); | 145 | load_fp_state(&vcpu->arch.fp); |
146 | disable_kernel_fp(); | ||
144 | current->thread.fp_save_area = &vcpu->arch.fp; | 147 | current->thread.fp_save_area = &vcpu->arch.fp; |
145 | current->thread.regs->msr |= MSR_FP; | 148 | current->thread.regs->msr |= MSR_FP; |
146 | } | 149 | } |
@@ -182,6 +185,7 @@ static inline void kvmppc_load_guest_altivec(struct kvm_vcpu *vcpu) | |||
182 | if (!(current->thread.regs->msr & MSR_VEC)) { | 185 | if (!(current->thread.regs->msr & MSR_VEC)) { |
183 | enable_kernel_altivec(); | 186 | enable_kernel_altivec(); |
184 | load_vr_state(&vcpu->arch.vr); | 187 | load_vr_state(&vcpu->arch.vr); |
188 | disable_kernel_altivec(); | ||
185 | current->thread.vr_save_area = &vcpu->arch.vr; | 189 | current->thread.vr_save_area = &vcpu->arch.vr; |
186 | current->thread.regs->msr |= MSR_VEC; | 190 | current->thread.regs->msr |= MSR_VEC; |
187 | } | 191 | } |
diff --git a/arch/powerpc/lib/vmx-helper.c b/arch/powerpc/lib/vmx-helper.c index ac93a3bd2730..b27e030fc9f8 100644 --- a/arch/powerpc/lib/vmx-helper.c +++ b/arch/powerpc/lib/vmx-helper.c | |||
@@ -46,6 +46,7 @@ int enter_vmx_usercopy(void) | |||
46 | */ | 46 | */ |
47 | int exit_vmx_usercopy(void) | 47 | int exit_vmx_usercopy(void) |
48 | { | 48 | { |
49 | disable_kernel_altivec(); | ||
49 | pagefault_enable(); | 50 | pagefault_enable(); |
50 | preempt_enable(); | 51 | preempt_enable(); |
51 | return 0; | 52 | return 0; |
@@ -70,6 +71,7 @@ int enter_vmx_copy(void) | |||
70 | */ | 71 | */ |
71 | void *exit_vmx_copy(void *dest) | 72 | void *exit_vmx_copy(void *dest) |
72 | { | 73 | { |
74 | disable_kernel_altivec(); | ||
73 | preempt_enable(); | 75 | preempt_enable(); |
74 | return dest; | 76 | return dest; |
75 | } | 77 | } |
diff --git a/arch/powerpc/lib/xor_vmx.c b/arch/powerpc/lib/xor_vmx.c index e905f7c2ea7b..07f49f1568e5 100644 --- a/arch/powerpc/lib/xor_vmx.c +++ b/arch/powerpc/lib/xor_vmx.c | |||
@@ -74,6 +74,7 @@ void xor_altivec_2(unsigned long bytes, unsigned long *v1_in, | |||
74 | v2 += 4; | 74 | v2 += 4; |
75 | } while (--lines > 0); | 75 | } while (--lines > 0); |
76 | 76 | ||
77 | disable_kernel_altivec(); | ||
77 | preempt_enable(); | 78 | preempt_enable(); |
78 | } | 79 | } |
79 | EXPORT_SYMBOL(xor_altivec_2); | 80 | EXPORT_SYMBOL(xor_altivec_2); |
@@ -102,6 +103,7 @@ void xor_altivec_3(unsigned long bytes, unsigned long *v1_in, | |||
102 | v3 += 4; | 103 | v3 += 4; |
103 | } while (--lines > 0); | 104 | } while (--lines > 0); |
104 | 105 | ||
106 | disable_kernel_altivec(); | ||
105 | preempt_enable(); | 107 | preempt_enable(); |
106 | } | 108 | } |
107 | EXPORT_SYMBOL(xor_altivec_3); | 109 | EXPORT_SYMBOL(xor_altivec_3); |
@@ -135,6 +137,7 @@ void xor_altivec_4(unsigned long bytes, unsigned long *v1_in, | |||
135 | v4 += 4; | 137 | v4 += 4; |
136 | } while (--lines > 0); | 138 | } while (--lines > 0); |
137 | 139 | ||
140 | disable_kernel_altivec(); | ||
138 | preempt_enable(); | 141 | preempt_enable(); |
139 | } | 142 | } |
140 | EXPORT_SYMBOL(xor_altivec_4); | 143 | EXPORT_SYMBOL(xor_altivec_4); |
@@ -172,6 +175,7 @@ void xor_altivec_5(unsigned long bytes, unsigned long *v1_in, | |||
172 | v5 += 4; | 175 | v5 += 4; |
173 | } while (--lines > 0); | 176 | } while (--lines > 0); |
174 | 177 | ||
178 | disable_kernel_altivec(); | ||
175 | preempt_enable(); | 179 | preempt_enable(); |
176 | } | 180 | } |
177 | EXPORT_SYMBOL(xor_altivec_5); | 181 | EXPORT_SYMBOL(xor_altivec_5); |
diff --git a/arch/powerpc/mm/40x_mmu.c b/arch/powerpc/mm/40x_mmu.c index 5810967511d4..31a5d42df8c9 100644 --- a/arch/powerpc/mm/40x_mmu.c +++ b/arch/powerpc/mm/40x_mmu.c | |||
@@ -110,10 +110,10 @@ unsigned long __init mmu_mapin_ram(unsigned long top) | |||
110 | unsigned long val = p | _PMD_SIZE_16M | _PAGE_EXEC | _PAGE_HWWRITE; | 110 | unsigned long val = p | _PMD_SIZE_16M | _PAGE_EXEC | _PAGE_HWWRITE; |
111 | 111 | ||
112 | pmdp = pmd_offset(pud_offset(pgd_offset_k(v), v), v); | 112 | pmdp = pmd_offset(pud_offset(pgd_offset_k(v), v), v); |
113 | pmd_val(*pmdp++) = val; | 113 | *pmdp++ = __pmd(val); |
114 | pmd_val(*pmdp++) = val; | 114 | *pmdp++ = __pmd(val); |
115 | pmd_val(*pmdp++) = val; | 115 | *pmdp++ = __pmd(val); |
116 | pmd_val(*pmdp++) = val; | 116 | *pmdp++ = __pmd(val); |
117 | 117 | ||
118 | v += LARGE_PAGE_SIZE_16M; | 118 | v += LARGE_PAGE_SIZE_16M; |
119 | p += LARGE_PAGE_SIZE_16M; | 119 | p += LARGE_PAGE_SIZE_16M; |
@@ -125,7 +125,7 @@ unsigned long __init mmu_mapin_ram(unsigned long top) | |||
125 | unsigned long val = p | _PMD_SIZE_4M | _PAGE_EXEC | _PAGE_HWWRITE; | 125 | unsigned long val = p | _PMD_SIZE_4M | _PAGE_EXEC | _PAGE_HWWRITE; |
126 | 126 | ||
127 | pmdp = pmd_offset(pud_offset(pgd_offset_k(v), v), v); | 127 | pmdp = pmd_offset(pud_offset(pgd_offset_k(v), v), v); |
128 | pmd_val(*pmdp) = val; | 128 | *pmdp = __pmd(val); |
129 | 129 | ||
130 | v += LARGE_PAGE_SIZE_4M; | 130 | v += LARGE_PAGE_SIZE_4M; |
131 | p += LARGE_PAGE_SIZE_4M; | 131 | p += LARGE_PAGE_SIZE_4M; |
diff --git a/arch/powerpc/mm/Makefile b/arch/powerpc/mm/Makefile index 3eb73a38220d..1ffeda85c086 100644 --- a/arch/powerpc/mm/Makefile +++ b/arch/powerpc/mm/Makefile | |||
@@ -14,10 +14,13 @@ obj-$(CONFIG_PPC_MMU_NOHASH) += mmu_context_nohash.o tlb_nohash.o \ | |||
14 | obj-$(CONFIG_PPC_BOOK3E) += tlb_low_$(CONFIG_WORD_SIZE)e.o | 14 | obj-$(CONFIG_PPC_BOOK3E) += tlb_low_$(CONFIG_WORD_SIZE)e.o |
15 | hash64-$(CONFIG_PPC_NATIVE) := hash_native_64.o | 15 | hash64-$(CONFIG_PPC_NATIVE) := hash_native_64.o |
16 | obj-$(CONFIG_PPC_STD_MMU_64) += hash_utils_64.o slb_low.o slb.o $(hash64-y) | 16 | obj-$(CONFIG_PPC_STD_MMU_64) += hash_utils_64.o slb_low.o slb.o $(hash64-y) |
17 | obj-$(CONFIG_PPC_STD_MMU_32) += ppc_mmu_32.o | 17 | obj-$(CONFIG_PPC_STD_MMU_32) += ppc_mmu_32.o hash_low_32.o |
18 | obj-$(CONFIG_PPC_STD_MMU) += hash_low_$(CONFIG_WORD_SIZE).o \ | 18 | obj-$(CONFIG_PPC_STD_MMU) += tlb_hash$(CONFIG_WORD_SIZE).o \ |
19 | tlb_hash$(CONFIG_WORD_SIZE).o \ | ||
20 | mmu_context_hash$(CONFIG_WORD_SIZE).o | 19 | mmu_context_hash$(CONFIG_WORD_SIZE).o |
20 | ifeq ($(CONFIG_PPC_STD_MMU_64),y) | ||
21 | obj-$(CONFIG_PPC_4K_PAGES) += hash64_4k.o | ||
22 | obj-$(CONFIG_PPC_64K_PAGES) += hash64_64k.o | ||
23 | endif | ||
21 | obj-$(CONFIG_PPC_ICSWX) += icswx.o | 24 | obj-$(CONFIG_PPC_ICSWX) += icswx.o |
22 | obj-$(CONFIG_PPC_ICSWX_PID) += icswx_pid.o | 25 | obj-$(CONFIG_PPC_ICSWX_PID) += icswx_pid.o |
23 | obj-$(CONFIG_40x) += 40x_mmu.o | 26 | obj-$(CONFIG_40x) += 40x_mmu.o |
diff --git a/arch/powerpc/mm/hash64_4k.c b/arch/powerpc/mm/hash64_4k.c new file mode 100644 index 000000000000..e7c04542ba62 --- /dev/null +++ b/arch/powerpc/mm/hash64_4k.c | |||
@@ -0,0 +1,123 @@ | |||
1 | /* | ||
2 | * Copyright IBM Corporation, 2015 | ||
3 | * Author Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com> | ||
4 | * | ||
5 | * This program is free software; you can redistribute it and/or modify it | ||
6 | * under the terms of version 2 of the GNU Lesser General Public License | ||
7 | * as published by the Free Software Foundation. | ||
8 | * | ||
9 | * This program is distributed in the hope that it would be useful, but | ||
10 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
11 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. | ||
12 | * | ||
13 | */ | ||
14 | |||
15 | #include <linux/mm.h> | ||
16 | #include <asm/machdep.h> | ||
17 | #include <asm/mmu.h> | ||
18 | |||
19 | int __hash_page_4K(unsigned long ea, unsigned long access, unsigned long vsid, | ||
20 | pte_t *ptep, unsigned long trap, unsigned long flags, | ||
21 | int ssize, int subpg_prot) | ||
22 | { | ||
23 | unsigned long hpte_group; | ||
24 | unsigned long rflags, pa; | ||
25 | unsigned long old_pte, new_pte; | ||
26 | unsigned long vpn, hash, slot; | ||
27 | unsigned long shift = mmu_psize_defs[MMU_PAGE_4K].shift; | ||
28 | |||
29 | /* | ||
30 | * atomically mark the linux large page PTE busy and dirty | ||
31 | */ | ||
32 | do { | ||
33 | pte_t pte = READ_ONCE(*ptep); | ||
34 | |||
35 | old_pte = pte_val(pte); | ||
36 | /* If PTE busy, retry the access */ | ||
37 | if (unlikely(old_pte & _PAGE_BUSY)) | ||
38 | return 0; | ||
39 | /* If PTE permissions don't match, take page fault */ | ||
40 | if (unlikely(access & ~old_pte)) | ||
41 | return 1; | ||
42 | /* | ||
43 | * Try to lock the PTE, add ACCESSED and DIRTY if it was | ||
44 | * a write access. Since this is 4K insert of 64K page size | ||
45 | * also add _PAGE_COMBO | ||
46 | */ | ||
47 | new_pte = old_pte | _PAGE_BUSY | _PAGE_ACCESSED | _PAGE_HASHPTE; | ||
48 | if (access & _PAGE_RW) | ||
49 | new_pte |= _PAGE_DIRTY; | ||
50 | } while (old_pte != __cmpxchg_u64((unsigned long *)ptep, | ||
51 | old_pte, new_pte)); | ||
52 | /* | ||
53 | * PP bits. _PAGE_USER is already PP bit 0x2, so we only | ||
54 | * need to add in 0x1 if it's a read-only user page | ||
55 | */ | ||
56 | rflags = htab_convert_pte_flags(new_pte); | ||
57 | |||
58 | if (!cpu_has_feature(CPU_FTR_NOEXECUTE) && | ||
59 | !cpu_has_feature(CPU_FTR_COHERENT_ICACHE)) | ||
60 | rflags = hash_page_do_lazy_icache(rflags, __pte(old_pte), trap); | ||
61 | |||
62 | vpn = hpt_vpn(ea, vsid, ssize); | ||
63 | if (unlikely(old_pte & _PAGE_HASHPTE)) { | ||
64 | /* | ||
65 | * There MIGHT be an HPTE for this pte | ||
66 | */ | ||
67 | hash = hpt_hash(vpn, shift, ssize); | ||
68 | if (old_pte & _PAGE_F_SECOND) | ||
69 | hash = ~hash; | ||
70 | slot = (hash & htab_hash_mask) * HPTES_PER_GROUP; | ||
71 | slot += (old_pte & _PAGE_F_GIX) >> _PAGE_F_GIX_SHIFT; | ||
72 | |||
73 | if (ppc_md.hpte_updatepp(slot, rflags, vpn, MMU_PAGE_4K, | ||
74 | MMU_PAGE_4K, ssize, flags) == -1) | ||
75 | old_pte &= ~_PAGE_HPTEFLAGS; | ||
76 | } | ||
77 | |||
78 | if (likely(!(old_pte & _PAGE_HASHPTE))) { | ||
79 | |||
80 | pa = pte_pfn(__pte(old_pte)) << PAGE_SHIFT; | ||
81 | hash = hpt_hash(vpn, shift, ssize); | ||
82 | |||
83 | repeat: | ||
84 | hpte_group = ((hash & htab_hash_mask) * HPTES_PER_GROUP) & ~0x7UL; | ||
85 | |||
86 | /* Insert into the hash table, primary slot */ | ||
87 | slot = ppc_md.hpte_insert(hpte_group, vpn, pa, rflags, 0, | ||
88 | MMU_PAGE_4K, MMU_PAGE_4K, ssize); | ||
89 | /* | ||
90 | * Primary is full, try the secondary | ||
91 | */ | ||
92 | if (unlikely(slot == -1)) { | ||
93 | hpte_group = ((~hash & htab_hash_mask) * HPTES_PER_GROUP) & ~0x7UL; | ||
94 | slot = ppc_md.hpte_insert(hpte_group, vpn, pa, | ||
95 | rflags, HPTE_V_SECONDARY, | ||
96 | MMU_PAGE_4K, MMU_PAGE_4K, ssize); | ||
97 | if (slot == -1) { | ||
98 | if (mftb() & 0x1) | ||
99 | hpte_group = ((hash & htab_hash_mask) * | ||
100 | HPTES_PER_GROUP) & ~0x7UL; | ||
101 | ppc_md.hpte_remove(hpte_group); | ||
102 | /* | ||
103 | * FIXME!! Should be try the group from which we removed ? | ||
104 | */ | ||
105 | goto repeat; | ||
106 | } | ||
107 | } | ||
108 | /* | ||
109 | * Hypervisor failure. Restore old pmd and return -1 | ||
110 | * similar to __hash_page_* | ||
111 | */ | ||
112 | if (unlikely(slot == -2)) { | ||
113 | *ptep = __pte(old_pte); | ||
114 | hash_failure_debug(ea, access, vsid, trap, ssize, | ||
115 | MMU_PAGE_4K, MMU_PAGE_4K, old_pte); | ||
116 | return -1; | ||
117 | } | ||
118 | new_pte = (new_pte & ~_PAGE_HPTEFLAGS) | _PAGE_HASHPTE; | ||
119 | new_pte |= (slot << _PAGE_F_GIX_SHIFT) & (_PAGE_F_SECOND | _PAGE_F_GIX); | ||
120 | } | ||
121 | *ptep = __pte(new_pte & ~_PAGE_BUSY); | ||
122 | return 0; | ||
123 | } | ||
diff --git a/arch/powerpc/mm/hash64_64k.c b/arch/powerpc/mm/hash64_64k.c new file mode 100644 index 000000000000..0762c1e08c88 --- /dev/null +++ b/arch/powerpc/mm/hash64_64k.c | |||
@@ -0,0 +1,322 @@ | |||
1 | /* | ||
2 | * Copyright IBM Corporation, 2015 | ||
3 | * Author Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com> | ||
4 | * | ||
5 | * This program is free software; you can redistribute it and/or modify it | ||
6 | * under the terms of version 2 of the GNU Lesser General Public License | ||
7 | * as published by the Free Software Foundation. | ||
8 | * | ||
9 | * This program is distributed in the hope that it would be useful, but | ||
10 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
11 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. | ||
12 | * | ||
13 | */ | ||
14 | |||
15 | #include <linux/mm.h> | ||
16 | #include <asm/machdep.h> | ||
17 | #include <asm/mmu.h> | ||
18 | /* | ||
19 | * index from 0 - 15 | ||
20 | */ | ||
21 | bool __rpte_sub_valid(real_pte_t rpte, unsigned long index) | ||
22 | { | ||
23 | unsigned long g_idx; | ||
24 | unsigned long ptev = pte_val(rpte.pte); | ||
25 | |||
26 | g_idx = (ptev & _PAGE_COMBO_VALID) >> _PAGE_F_GIX_SHIFT; | ||
27 | index = index >> 2; | ||
28 | if (g_idx & (0x1 << index)) | ||
29 | return true; | ||
30 | else | ||
31 | return false; | ||
32 | } | ||
33 | /* | ||
34 | * index from 0 - 15 | ||
35 | */ | ||
36 | static unsigned long mark_subptegroup_valid(unsigned long ptev, unsigned long index) | ||
37 | { | ||
38 | unsigned long g_idx; | ||
39 | |||
40 | if (!(ptev & _PAGE_COMBO)) | ||
41 | return ptev; | ||
42 | index = index >> 2; | ||
43 | g_idx = 0x1 << index; | ||
44 | |||
45 | return ptev | (g_idx << _PAGE_F_GIX_SHIFT); | ||
46 | } | ||
47 | |||
48 | int __hash_page_4K(unsigned long ea, unsigned long access, unsigned long vsid, | ||
49 | pte_t *ptep, unsigned long trap, unsigned long flags, | ||
50 | int ssize, int subpg_prot) | ||
51 | { | ||
52 | real_pte_t rpte; | ||
53 | unsigned long *hidxp; | ||
54 | unsigned long hpte_group; | ||
55 | unsigned int subpg_index; | ||
56 | unsigned long rflags, pa, hidx; | ||
57 | unsigned long old_pte, new_pte, subpg_pte; | ||
58 | unsigned long vpn, hash, slot; | ||
59 | unsigned long shift = mmu_psize_defs[MMU_PAGE_4K].shift; | ||
60 | |||
61 | /* | ||
62 | * atomically mark the linux large page PTE busy and dirty | ||
63 | */ | ||
64 | do { | ||
65 | pte_t pte = READ_ONCE(*ptep); | ||
66 | |||
67 | old_pte = pte_val(pte); | ||
68 | /* If PTE busy, retry the access */ | ||
69 | if (unlikely(old_pte & _PAGE_BUSY)) | ||
70 | return 0; | ||
71 | /* If PTE permissions don't match, take page fault */ | ||
72 | if (unlikely(access & ~old_pte)) | ||
73 | return 1; | ||
74 | /* | ||
75 | * Try to lock the PTE, add ACCESSED and DIRTY if it was | ||
76 | * a write access. Since this is 4K insert of 64K page size | ||
77 | * also add _PAGE_COMBO | ||
78 | */ | ||
79 | new_pte = old_pte | _PAGE_BUSY | _PAGE_ACCESSED | _PAGE_COMBO; | ||
80 | if (access & _PAGE_RW) | ||
81 | new_pte |= _PAGE_DIRTY; | ||
82 | } while (old_pte != __cmpxchg_u64((unsigned long *)ptep, | ||
83 | old_pte, new_pte)); | ||
84 | /* | ||
85 | * Handle the subpage protection bits | ||
86 | */ | ||
87 | subpg_pte = new_pte & ~subpg_prot; | ||
88 | rflags = htab_convert_pte_flags(subpg_pte); | ||
89 | |||
90 | if (!cpu_has_feature(CPU_FTR_NOEXECUTE) && | ||
91 | !cpu_has_feature(CPU_FTR_COHERENT_ICACHE)) { | ||
92 | |||
93 | /* | ||
94 | * No CPU has hugepages but lacks no execute, so we | ||
95 | * don't need to worry about that case | ||
96 | */ | ||
97 | rflags = hash_page_do_lazy_icache(rflags, __pte(old_pte), trap); | ||
98 | } | ||
99 | |||
100 | subpg_index = (ea & (PAGE_SIZE - 1)) >> shift; | ||
101 | vpn = hpt_vpn(ea, vsid, ssize); | ||
102 | rpte = __real_pte(__pte(old_pte), ptep); | ||
103 | /* | ||
104 | *None of the sub 4k page is hashed | ||
105 | */ | ||
106 | if (!(old_pte & _PAGE_HASHPTE)) | ||
107 | goto htab_insert_hpte; | ||
108 | /* | ||
109 | * Check if the pte was already inserted into the hash table | ||
110 | * as a 64k HW page, and invalidate the 64k HPTE if so. | ||
111 | */ | ||
112 | if (!(old_pte & _PAGE_COMBO)) { | ||
113 | flush_hash_page(vpn, rpte, MMU_PAGE_64K, ssize, flags); | ||
114 | old_pte &= ~_PAGE_HASHPTE | _PAGE_F_GIX | _PAGE_F_SECOND; | ||
115 | goto htab_insert_hpte; | ||
116 | } | ||
117 | /* | ||
118 | * Check for sub page valid and update | ||
119 | */ | ||
120 | if (__rpte_sub_valid(rpte, subpg_index)) { | ||
121 | int ret; | ||
122 | |||
123 | hash = hpt_hash(vpn, shift, ssize); | ||
124 | hidx = __rpte_to_hidx(rpte, subpg_index); | ||
125 | if (hidx & _PTEIDX_SECONDARY) | ||
126 | hash = ~hash; | ||
127 | slot = (hash & htab_hash_mask) * HPTES_PER_GROUP; | ||
128 | slot += hidx & _PTEIDX_GROUP_IX; | ||
129 | |||
130 | ret = ppc_md.hpte_updatepp(slot, rflags, vpn, | ||
131 | MMU_PAGE_4K, MMU_PAGE_4K, | ||
132 | ssize, flags); | ||
133 | /* | ||
134 | *if we failed because typically the HPTE wasn't really here | ||
135 | * we try an insertion. | ||
136 | */ | ||
137 | if (ret == -1) | ||
138 | goto htab_insert_hpte; | ||
139 | |||
140 | *ptep = __pte(new_pte & ~_PAGE_BUSY); | ||
141 | return 0; | ||
142 | } | ||
143 | |||
144 | htab_insert_hpte: | ||
145 | /* | ||
146 | * handle _PAGE_4K_PFN case | ||
147 | */ | ||
148 | if (old_pte & _PAGE_4K_PFN) { | ||
149 | /* | ||
150 | * All the sub 4k page have the same | ||
151 | * physical address. | ||
152 | */ | ||
153 | pa = pte_pfn(__pte(old_pte)) << HW_PAGE_SHIFT; | ||
154 | } else { | ||
155 | pa = pte_pfn(__pte(old_pte)) << PAGE_SHIFT; | ||
156 | pa += (subpg_index << shift); | ||
157 | } | ||
158 | hash = hpt_hash(vpn, shift, ssize); | ||
159 | repeat: | ||
160 | hpte_group = ((hash & htab_hash_mask) * HPTES_PER_GROUP) & ~0x7UL; | ||
161 | |||
162 | /* Insert into the hash table, primary slot */ | ||
163 | slot = ppc_md.hpte_insert(hpte_group, vpn, pa, rflags, 0, | ||
164 | MMU_PAGE_4K, MMU_PAGE_4K, ssize); | ||
165 | /* | ||
166 | * Primary is full, try the secondary | ||
167 | */ | ||
168 | if (unlikely(slot == -1)) { | ||
169 | hpte_group = ((~hash & htab_hash_mask) * HPTES_PER_GROUP) & ~0x7UL; | ||
170 | slot = ppc_md.hpte_insert(hpte_group, vpn, pa, | ||
171 | rflags, HPTE_V_SECONDARY, | ||
172 | MMU_PAGE_4K, MMU_PAGE_4K, ssize); | ||
173 | if (slot == -1) { | ||
174 | if (mftb() & 0x1) | ||
175 | hpte_group = ((hash & htab_hash_mask) * | ||
176 | HPTES_PER_GROUP) & ~0x7UL; | ||
177 | ppc_md.hpte_remove(hpte_group); | ||
178 | /* | ||
179 | * FIXME!! Should be try the group from which we removed ? | ||
180 | */ | ||
181 | goto repeat; | ||
182 | } | ||
183 | } | ||
184 | /* | ||
185 | * Hypervisor failure. Restore old pmd and return -1 | ||
186 | * similar to __hash_page_* | ||
187 | */ | ||
188 | if (unlikely(slot == -2)) { | ||
189 | *ptep = __pte(old_pte); | ||
190 | hash_failure_debug(ea, access, vsid, trap, ssize, | ||
191 | MMU_PAGE_4K, MMU_PAGE_4K, old_pte); | ||
192 | return -1; | ||
193 | } | ||
194 | /* | ||
195 | * Insert slot number & secondary bit in PTE second half, | ||
196 | * clear _PAGE_BUSY and set appropriate HPTE slot bit | ||
197 | * Since we have _PAGE_BUSY set on ptep, we can be sure | ||
198 | * nobody is undating hidx. | ||
199 | */ | ||
200 | hidxp = (unsigned long *)(ptep + PTRS_PER_PTE); | ||
201 | rpte.hidx &= ~(0xfUL << (subpg_index << 2)); | ||
202 | *hidxp = rpte.hidx | (slot << (subpg_index << 2)); | ||
203 | new_pte = mark_subptegroup_valid(new_pte, subpg_index); | ||
204 | new_pte |= _PAGE_HASHPTE; | ||
205 | /* | ||
206 | * check __real_pte for details on matching smp_rmb() | ||
207 | */ | ||
208 | smp_wmb(); | ||
209 | *ptep = __pte(new_pte & ~_PAGE_BUSY); | ||
210 | return 0; | ||
211 | } | ||
212 | |||
213 | int __hash_page_64K(unsigned long ea, unsigned long access, | ||
214 | unsigned long vsid, pte_t *ptep, unsigned long trap, | ||
215 | unsigned long flags, int ssize) | ||
216 | { | ||
217 | |||
218 | unsigned long hpte_group; | ||
219 | unsigned long rflags, pa; | ||
220 | unsigned long old_pte, new_pte; | ||
221 | unsigned long vpn, hash, slot; | ||
222 | unsigned long shift = mmu_psize_defs[MMU_PAGE_64K].shift; | ||
223 | |||
224 | /* | ||
225 | * atomically mark the linux large page PTE busy and dirty | ||
226 | */ | ||
227 | do { | ||
228 | pte_t pte = READ_ONCE(*ptep); | ||
229 | |||
230 | old_pte = pte_val(pte); | ||
231 | /* If PTE busy, retry the access */ | ||
232 | if (unlikely(old_pte & _PAGE_BUSY)) | ||
233 | return 0; | ||
234 | /* If PTE permissions don't match, take page fault */ | ||
235 | if (unlikely(access & ~old_pte)) | ||
236 | return 1; | ||
237 | /* | ||
238 | * Check if PTE has the cache-inhibit bit set | ||
239 | * If so, bail out and refault as a 4k page | ||
240 | */ | ||
241 | if (!mmu_has_feature(MMU_FTR_CI_LARGE_PAGE) && | ||
242 | unlikely(old_pte & _PAGE_NO_CACHE)) | ||
243 | return 0; | ||
244 | /* | ||
245 | * Try to lock the PTE, add ACCESSED and DIRTY if it was | ||
246 | * a write access. Since this is 4K insert of 64K page size | ||
247 | * also add _PAGE_COMBO | ||
248 | */ | ||
249 | new_pte = old_pte | _PAGE_BUSY | _PAGE_ACCESSED; | ||
250 | if (access & _PAGE_RW) | ||
251 | new_pte |= _PAGE_DIRTY; | ||
252 | } while (old_pte != __cmpxchg_u64((unsigned long *)ptep, | ||
253 | old_pte, new_pte)); | ||
254 | |||
255 | rflags = htab_convert_pte_flags(new_pte); | ||
256 | |||
257 | if (!cpu_has_feature(CPU_FTR_NOEXECUTE) && | ||
258 | !cpu_has_feature(CPU_FTR_COHERENT_ICACHE)) | ||
259 | rflags = hash_page_do_lazy_icache(rflags, __pte(old_pte), trap); | ||
260 | |||
261 | vpn = hpt_vpn(ea, vsid, ssize); | ||
262 | if (unlikely(old_pte & _PAGE_HASHPTE)) { | ||
263 | /* | ||
264 | * There MIGHT be an HPTE for this pte | ||
265 | */ | ||
266 | hash = hpt_hash(vpn, shift, ssize); | ||
267 | if (old_pte & _PAGE_F_SECOND) | ||
268 | hash = ~hash; | ||
269 | slot = (hash & htab_hash_mask) * HPTES_PER_GROUP; | ||
270 | slot += (old_pte & _PAGE_F_GIX) >> _PAGE_F_GIX_SHIFT; | ||
271 | |||
272 | if (ppc_md.hpte_updatepp(slot, rflags, vpn, MMU_PAGE_64K, | ||
273 | MMU_PAGE_64K, ssize, flags) == -1) | ||
274 | old_pte &= ~_PAGE_HPTEFLAGS; | ||
275 | } | ||
276 | |||
277 | if (likely(!(old_pte & _PAGE_HASHPTE))) { | ||
278 | |||
279 | pa = pte_pfn(__pte(old_pte)) << PAGE_SHIFT; | ||
280 | hash = hpt_hash(vpn, shift, ssize); | ||
281 | |||
282 | repeat: | ||
283 | hpte_group = ((hash & htab_hash_mask) * HPTES_PER_GROUP) & ~0x7UL; | ||
284 | |||
285 | /* Insert into the hash table, primary slot */ | ||
286 | slot = ppc_md.hpte_insert(hpte_group, vpn, pa, rflags, 0, | ||
287 | MMU_PAGE_64K, MMU_PAGE_64K, ssize); | ||
288 | /* | ||
289 | * Primary is full, try the secondary | ||
290 | */ | ||
291 | if (unlikely(slot == -1)) { | ||
292 | hpte_group = ((~hash & htab_hash_mask) * HPTES_PER_GROUP) & ~0x7UL; | ||
293 | slot = ppc_md.hpte_insert(hpte_group, vpn, pa, | ||
294 | rflags, HPTE_V_SECONDARY, | ||
295 | MMU_PAGE_64K, MMU_PAGE_64K, ssize); | ||
296 | if (slot == -1) { | ||
297 | if (mftb() & 0x1) | ||
298 | hpte_group = ((hash & htab_hash_mask) * | ||
299 | HPTES_PER_GROUP) & ~0x7UL; | ||
300 | ppc_md.hpte_remove(hpte_group); | ||
301 | /* | ||
302 | * FIXME!! Should be try the group from which we removed ? | ||
303 | */ | ||
304 | goto repeat; | ||
305 | } | ||
306 | } | ||
307 | /* | ||
308 | * Hypervisor failure. Restore old pmd and return -1 | ||
309 | * similar to __hash_page_* | ||
310 | */ | ||
311 | if (unlikely(slot == -2)) { | ||
312 | *ptep = __pte(old_pte); | ||
313 | hash_failure_debug(ea, access, vsid, trap, ssize, | ||
314 | MMU_PAGE_64K, MMU_PAGE_64K, old_pte); | ||
315 | return -1; | ||
316 | } | ||
317 | new_pte = (new_pte & ~_PAGE_HPTEFLAGS) | _PAGE_HASHPTE; | ||
318 | new_pte |= (slot << _PAGE_F_GIX_SHIFT) & (_PAGE_F_SECOND | _PAGE_F_GIX); | ||
319 | } | ||
320 | *ptep = __pte(new_pte & ~_PAGE_BUSY); | ||
321 | return 0; | ||
322 | } | ||
diff --git a/arch/powerpc/mm/hash_low_64.S b/arch/powerpc/mm/hash_low_64.S deleted file mode 100644 index 3b49e3295901..000000000000 --- a/arch/powerpc/mm/hash_low_64.S +++ /dev/null | |||
@@ -1,1003 +0,0 @@ | |||
1 | /* | ||
2 | * ppc64 MMU hashtable management routines | ||
3 | * | ||
4 | * (c) Copyright IBM Corp. 2003, 2005 | ||
5 | * | ||
6 | * Maintained by: Benjamin Herrenschmidt | ||
7 | * <benh@kernel.crashing.org> | ||
8 | * | ||
9 | * This file is covered by the GNU Public Licence v2 as | ||
10 | * described in the kernel's COPYING file. | ||
11 | */ | ||
12 | |||
13 | #include <asm/reg.h> | ||
14 | #include <asm/pgtable.h> | ||
15 | #include <asm/mmu.h> | ||
16 | #include <asm/page.h> | ||
17 | #include <asm/types.h> | ||
18 | #include <asm/ppc_asm.h> | ||
19 | #include <asm/asm-offsets.h> | ||
20 | #include <asm/cputable.h> | ||
21 | |||
22 | .text | ||
23 | |||
24 | /* | ||
25 | * Stackframe: | ||
26 | * | ||
27 | * +-> Back chain (SP + 256) | ||
28 | * | General register save area (SP + 112) | ||
29 | * | Parameter save area (SP + 48) | ||
30 | * | TOC save area (SP + 40) | ||
31 | * | link editor doubleword (SP + 32) | ||
32 | * | compiler doubleword (SP + 24) | ||
33 | * | LR save area (SP + 16) | ||
34 | * | CR save area (SP + 8) | ||
35 | * SP ---> +-- Back chain (SP + 0) | ||
36 | */ | ||
37 | |||
38 | #ifndef CONFIG_PPC_64K_PAGES | ||
39 | |||
40 | /***************************************************************************** | ||
41 | * * | ||
42 | * 4K SW & 4K HW pages implementation * | ||
43 | * * | ||
44 | *****************************************************************************/ | ||
45 | |||
46 | |||
47 | /* | ||
48 | * _hash_page_4K(unsigned long ea, unsigned long access, unsigned long vsid, | ||
49 | * pte_t *ptep, unsigned long trap, unsigned long flags, | ||
50 | * int ssize) | ||
51 | * | ||
52 | * Adds a 4K page to the hash table in a segment of 4K pages only | ||
53 | */ | ||
54 | |||
55 | _GLOBAL(__hash_page_4K) | ||
56 | mflr r0 | ||
57 | std r0,16(r1) | ||
58 | stdu r1,-STACKFRAMESIZE(r1) | ||
59 | /* Save all params that we need after a function call */ | ||
60 | std r6,STK_PARAM(R6)(r1) | ||
61 | std r8,STK_PARAM(R8)(r1) | ||
62 | std r9,STK_PARAM(R9)(r1) | ||
63 | |||
64 | /* Save non-volatile registers. | ||
65 | * r31 will hold "old PTE" | ||
66 | * r30 is "new PTE" | ||
67 | * r29 is vpn | ||
68 | * r28 is a hash value | ||
69 | * r27 is hashtab mask (maybe dynamic patched instead ?) | ||
70 | */ | ||
71 | std r27,STK_REG(R27)(r1) | ||
72 | std r28,STK_REG(R28)(r1) | ||
73 | std r29,STK_REG(R29)(r1) | ||
74 | std r30,STK_REG(R30)(r1) | ||
75 | std r31,STK_REG(R31)(r1) | ||
76 | |||
77 | /* Step 1: | ||
78 | * | ||
79 | * Check permissions, atomically mark the linux PTE busy | ||
80 | * and hashed. | ||
81 | */ | ||
82 | 1: | ||
83 | ldarx r31,0,r6 | ||
84 | /* Check access rights (access & ~(pte_val(*ptep))) */ | ||
85 | andc. r0,r4,r31 | ||
86 | bne- htab_wrong_access | ||
87 | /* Check if PTE is busy */ | ||
88 | andi. r0,r31,_PAGE_BUSY | ||
89 | /* If so, just bail out and refault if needed. Someone else | ||
90 | * is changing this PTE anyway and might hash it. | ||
91 | */ | ||
92 | bne- htab_bail_ok | ||
93 | |||
94 | /* Prepare new PTE value (turn access RW into DIRTY, then | ||
95 | * add BUSY,HASHPTE and ACCESSED) | ||
96 | */ | ||
97 | rlwinm r30,r4,32-9+7,31-7,31-7 /* _PAGE_RW -> _PAGE_DIRTY */ | ||
98 | or r30,r30,r31 | ||
99 | ori r30,r30,_PAGE_BUSY | _PAGE_ACCESSED | _PAGE_HASHPTE | ||
100 | /* Write the linux PTE atomically (setting busy) */ | ||
101 | stdcx. r30,0,r6 | ||
102 | bne- 1b | ||
103 | isync | ||
104 | |||
105 | /* Step 2: | ||
106 | * | ||
107 | * Insert/Update the HPTE in the hash table. At this point, | ||
108 | * r4 (access) is re-useable, we use it for the new HPTE flags | ||
109 | */ | ||
110 | |||
111 | BEGIN_FTR_SECTION | ||
112 | cmpdi r9,0 /* check segment size */ | ||
113 | bne 3f | ||
114 | END_MMU_FTR_SECTION_IFSET(MMU_FTR_1T_SEGMENT) | ||
115 | /* Calc vpn and put it in r29 */ | ||
116 | sldi r29,r5,SID_SHIFT - VPN_SHIFT | ||
117 | rldicl r28,r3,64 - VPN_SHIFT,64 - (SID_SHIFT - VPN_SHIFT) | ||
118 | or r29,r28,r29 | ||
119 | /* | ||
120 | * Calculate hash value for primary slot and store it in r28 | ||
121 | * r3 = va, r5 = vsid | ||
122 | * r0 = (va >> 12) & ((1ul << (28 - 12)) -1) | ||
123 | */ | ||
124 | rldicl r0,r3,64-12,48 | ||
125 | xor r28,r5,r0 /* hash */ | ||
126 | b 4f | ||
127 | |||
128 | 3: /* Calc vpn and put it in r29 */ | ||
129 | sldi r29,r5,SID_SHIFT_1T - VPN_SHIFT | ||
130 | rldicl r28,r3,64 - VPN_SHIFT,64 - (SID_SHIFT_1T - VPN_SHIFT) | ||
131 | or r29,r28,r29 | ||
132 | |||
133 | /* | ||
134 | * calculate hash value for primary slot and | ||
135 | * store it in r28 for 1T segment | ||
136 | * r3 = va, r5 = vsid | ||
137 | */ | ||
138 | sldi r28,r5,25 /* vsid << 25 */ | ||
139 | /* r0 = (va >> 12) & ((1ul << (40 - 12)) -1) */ | ||
140 | rldicl r0,r3,64-12,36 | ||
141 | xor r28,r28,r5 /* vsid ^ ( vsid << 25) */ | ||
142 | xor r28,r28,r0 /* hash */ | ||
143 | |||
144 | /* Convert linux PTE bits into HW equivalents */ | ||
145 | 4: andi. r3,r30,0x1fe /* Get basic set of flags */ | ||
146 | xori r3,r3,HPTE_R_N /* _PAGE_EXEC -> NOEXEC */ | ||
147 | rlwinm r0,r30,32-9+1,30,30 /* _PAGE_RW -> _PAGE_USER (r0) */ | ||
148 | rlwinm r4,r30,32-7+1,30,30 /* _PAGE_DIRTY -> _PAGE_USER (r4) */ | ||
149 | and r0,r0,r4 /* _PAGE_RW & _PAGE_DIRTY ->r0 bit 30*/ | ||
150 | andc r0,r30,r0 /* r0 = pte & ~r0 */ | ||
151 | rlwimi r3,r0,32-1,31,31 /* Insert result into PP lsb */ | ||
152 | /* | ||
153 | * Always add "C" bit for perf. Memory coherence is always enabled | ||
154 | */ | ||
155 | ori r3,r3,HPTE_R_C | HPTE_R_M | ||
156 | |||
157 | /* We eventually do the icache sync here (maybe inline that | ||
158 | * code rather than call a C function...) | ||
159 | */ | ||
160 | BEGIN_FTR_SECTION | ||
161 | mr r4,r30 | ||
162 | mr r5,r7 | ||
163 | bl hash_page_do_lazy_icache | ||
164 | END_FTR_SECTION(CPU_FTR_NOEXECUTE|CPU_FTR_COHERENT_ICACHE, CPU_FTR_NOEXECUTE) | ||
165 | |||
166 | /* At this point, r3 contains new PP bits, save them in | ||
167 | * place of "access" in the param area (sic) | ||
168 | */ | ||
169 | std r3,STK_PARAM(R4)(r1) | ||
170 | |||
171 | /* Get htab_hash_mask */ | ||
172 | ld r4,htab_hash_mask@got(2) | ||
173 | ld r27,0(r4) /* htab_hash_mask -> r27 */ | ||
174 | |||
175 | /* Check if we may already be in the hashtable, in this case, we | ||
176 | * go to out-of-line code to try to modify the HPTE | ||
177 | */ | ||
178 | andi. r0,r31,_PAGE_HASHPTE | ||
179 | bne htab_modify_pte | ||
180 | |||
181 | htab_insert_pte: | ||
182 | /* Clear hpte bits in new pte (we also clear BUSY btw) and | ||
183 | * add _PAGE_HASHPTE | ||
184 | */ | ||
185 | lis r0,_PAGE_HPTEFLAGS@h | ||
186 | ori r0,r0,_PAGE_HPTEFLAGS@l | ||
187 | andc r30,r30,r0 | ||
188 | ori r30,r30,_PAGE_HASHPTE | ||
189 | |||
190 | /* physical address r5 */ | ||
191 | rldicl r5,r31,64-PTE_RPN_SHIFT,PTE_RPN_SHIFT | ||
192 | sldi r5,r5,PAGE_SHIFT | ||
193 | |||
194 | /* Calculate primary group hash */ | ||
195 | and r0,r28,r27 | ||
196 | rldicr r3,r0,3,63-3 /* r3 = (hash & mask) << 3 */ | ||
197 | |||
198 | /* Call ppc_md.hpte_insert */ | ||
199 | ld r6,STK_PARAM(R4)(r1) /* Retrieve new pp bits */ | ||
200 | mr r4,r29 /* Retrieve vpn */ | ||
201 | li r7,0 /* !bolted, !secondary */ | ||
202 | li r8,MMU_PAGE_4K /* page size */ | ||
203 | li r9,MMU_PAGE_4K /* actual page size */ | ||
204 | ld r10,STK_PARAM(R9)(r1) /* segment size */ | ||
205 | .globl htab_call_hpte_insert1 | ||
206 | htab_call_hpte_insert1: | ||
207 | bl . /* Patched by htab_finish_init() */ | ||
208 | cmpdi 0,r3,0 | ||
209 | bge htab_pte_insert_ok /* Insertion successful */ | ||
210 | cmpdi 0,r3,-2 /* Critical failure */ | ||
211 | beq- htab_pte_insert_failure | ||
212 | |||
213 | /* Now try secondary slot */ | ||
214 | |||
215 | /* physical address r5 */ | ||
216 | rldicl r5,r31,64-PTE_RPN_SHIFT,PTE_RPN_SHIFT | ||
217 | sldi r5,r5,PAGE_SHIFT | ||
218 | |||
219 | /* Calculate secondary group hash */ | ||
220 | andc r0,r27,r28 | ||
221 | rldicr r3,r0,3,63-3 /* r0 = (~hash & mask) << 3 */ | ||
222 | |||
223 | /* Call ppc_md.hpte_insert */ | ||
224 | ld r6,STK_PARAM(R4)(r1) /* Retrieve new pp bits */ | ||
225 | mr r4,r29 /* Retrieve vpn */ | ||
226 | li r7,HPTE_V_SECONDARY /* !bolted, secondary */ | ||
227 | li r8,MMU_PAGE_4K /* page size */ | ||
228 | li r9,MMU_PAGE_4K /* actual page size */ | ||
229 | ld r10,STK_PARAM(R9)(r1) /* segment size */ | ||
230 | .globl htab_call_hpte_insert2 | ||
231 | htab_call_hpte_insert2: | ||
232 | bl . /* Patched by htab_finish_init() */ | ||
233 | cmpdi 0,r3,0 | ||
234 | bge+ htab_pte_insert_ok /* Insertion successful */ | ||
235 | cmpdi 0,r3,-2 /* Critical failure */ | ||
236 | beq- htab_pte_insert_failure | ||
237 | |||
238 | /* Both are full, we need to evict something */ | ||
239 | mftb r0 | ||
240 | /* Pick a random group based on TB */ | ||
241 | andi. r0,r0,1 | ||
242 | mr r5,r28 | ||
243 | bne 2f | ||
244 | not r5,r5 | ||
245 | 2: and r0,r5,r27 | ||
246 | rldicr r3,r0,3,63-3 /* r0 = (hash & mask) << 3 */ | ||
247 | /* Call ppc_md.hpte_remove */ | ||
248 | .globl htab_call_hpte_remove | ||
249 | htab_call_hpte_remove: | ||
250 | bl . /* Patched by htab_finish_init() */ | ||
251 | |||
252 | /* Try all again */ | ||
253 | b htab_insert_pte | ||
254 | |||
255 | htab_bail_ok: | ||
256 | li r3,0 | ||
257 | b htab_bail | ||
258 | |||
259 | htab_pte_insert_ok: | ||
260 | /* Insert slot number & secondary bit in PTE */ | ||
261 | rldimi r30,r3,12,63-15 | ||
262 | |||
263 | /* Write out the PTE with a normal write | ||
264 | * (maybe add eieio may be good still ?) | ||
265 | */ | ||
266 | htab_write_out_pte: | ||
267 | ld r6,STK_PARAM(R6)(r1) | ||
268 | std r30,0(r6) | ||
269 | li r3, 0 | ||
270 | htab_bail: | ||
271 | ld r27,STK_REG(R27)(r1) | ||
272 | ld r28,STK_REG(R28)(r1) | ||
273 | ld r29,STK_REG(R29)(r1) | ||
274 | ld r30,STK_REG(R30)(r1) | ||
275 | ld r31,STK_REG(R31)(r1) | ||
276 | addi r1,r1,STACKFRAMESIZE | ||
277 | ld r0,16(r1) | ||
278 | mtlr r0 | ||
279 | blr | ||
280 | |||
281 | htab_modify_pte: | ||
282 | /* Keep PP bits in r4 and slot idx from the PTE around in r3 */ | ||
283 | mr r4,r3 | ||
284 | rlwinm r3,r31,32-12,29,31 | ||
285 | |||
286 | /* Secondary group ? if yes, get a inverted hash value */ | ||
287 | mr r5,r28 | ||
288 | andi. r0,r31,_PAGE_SECONDARY | ||
289 | beq 1f | ||
290 | not r5,r5 | ||
291 | 1: | ||
292 | /* Calculate proper slot value for ppc_md.hpte_updatepp */ | ||
293 | and r0,r5,r27 | ||
294 | rldicr r0,r0,3,63-3 /* r0 = (hash & mask) << 3 */ | ||
295 | add r3,r0,r3 /* add slot idx */ | ||
296 | |||
297 | /* Call ppc_md.hpte_updatepp */ | ||
298 | mr r5,r29 /* vpn */ | ||
299 | li r6,MMU_PAGE_4K /* base page size */ | ||
300 | li r7,MMU_PAGE_4K /* actual page size */ | ||
301 | ld r8,STK_PARAM(R9)(r1) /* segment size */ | ||
302 | ld r9,STK_PARAM(R8)(r1) /* get "flags" param */ | ||
303 | .globl htab_call_hpte_updatepp | ||
304 | htab_call_hpte_updatepp: | ||
305 | bl . /* Patched by htab_finish_init() */ | ||
306 | |||
307 | /* if we failed because typically the HPTE wasn't really here | ||
308 | * we try an insertion. | ||
309 | */ | ||
310 | cmpdi 0,r3,-1 | ||
311 | beq- htab_insert_pte | ||
312 | |||
313 | /* Clear the BUSY bit and Write out the PTE */ | ||
314 | li r0,_PAGE_BUSY | ||
315 | andc r30,r30,r0 | ||
316 | b htab_write_out_pte | ||
317 | |||
318 | htab_wrong_access: | ||
319 | /* Bail out clearing reservation */ | ||
320 | stdcx. r31,0,r6 | ||
321 | li r3,1 | ||
322 | b htab_bail | ||
323 | |||
324 | htab_pte_insert_failure: | ||
325 | /* Bail out restoring old PTE */ | ||
326 | ld r6,STK_PARAM(R6)(r1) | ||
327 | std r31,0(r6) | ||
328 | li r3,-1 | ||
329 | b htab_bail | ||
330 | |||
331 | |||
332 | #else /* CONFIG_PPC_64K_PAGES */ | ||
333 | |||
334 | |||
335 | /***************************************************************************** | ||
336 | * * | ||
337 | * 64K SW & 4K or 64K HW in a 4K segment pages implementation * | ||
338 | * * | ||
339 | *****************************************************************************/ | ||
340 | |||
341 | /* _hash_page_4K(unsigned long ea, unsigned long access, unsigned long vsid, | ||
342 | * pte_t *ptep, unsigned long trap, unsigned local flags, | ||
343 | * int ssize, int subpg_prot) | ||
344 | */ | ||
345 | |||
346 | /* | ||
347 | * For now, we do NOT implement Admixed pages | ||
348 | */ | ||
349 | _GLOBAL(__hash_page_4K) | ||
350 | mflr r0 | ||
351 | std r0,16(r1) | ||
352 | stdu r1,-STACKFRAMESIZE(r1) | ||
353 | /* Save all params that we need after a function call */ | ||
354 | std r6,STK_PARAM(R6)(r1) | ||
355 | std r8,STK_PARAM(R8)(r1) | ||
356 | std r9,STK_PARAM(R9)(r1) | ||
357 | |||
358 | /* Save non-volatile registers. | ||
359 | * r31 will hold "old PTE" | ||
360 | * r30 is "new PTE" | ||
361 | * r29 is vpn | ||
362 | * r28 is a hash value | ||
363 | * r27 is hashtab mask (maybe dynamic patched instead ?) | ||
364 | * r26 is the hidx mask | ||
365 | * r25 is the index in combo page | ||
366 | */ | ||
367 | std r25,STK_REG(R25)(r1) | ||
368 | std r26,STK_REG(R26)(r1) | ||
369 | std r27,STK_REG(R27)(r1) | ||
370 | std r28,STK_REG(R28)(r1) | ||
371 | std r29,STK_REG(R29)(r1) | ||
372 | std r30,STK_REG(R30)(r1) | ||
373 | std r31,STK_REG(R31)(r1) | ||
374 | |||
375 | /* Step 1: | ||
376 | * | ||
377 | * Check permissions, atomically mark the linux PTE busy | ||
378 | * and hashed. | ||
379 | */ | ||
380 | 1: | ||
381 | ldarx r31,0,r6 | ||
382 | /* Check access rights (access & ~(pte_val(*ptep))) */ | ||
383 | andc. r0,r4,r31 | ||
384 | bne- htab_wrong_access | ||
385 | /* Check if PTE is busy */ | ||
386 | andi. r0,r31,_PAGE_BUSY | ||
387 | /* If so, just bail out and refault if needed. Someone else | ||
388 | * is changing this PTE anyway and might hash it. | ||
389 | */ | ||
390 | bne- htab_bail_ok | ||
391 | /* Prepare new PTE value (turn access RW into DIRTY, then | ||
392 | * add BUSY and ACCESSED) | ||
393 | */ | ||
394 | rlwinm r30,r4,32-9+7,31-7,31-7 /* _PAGE_RW -> _PAGE_DIRTY */ | ||
395 | or r30,r30,r31 | ||
396 | ori r30,r30,_PAGE_BUSY | _PAGE_ACCESSED | ||
397 | oris r30,r30,_PAGE_COMBO@h | ||
398 | /* Write the linux PTE atomically (setting busy) */ | ||
399 | stdcx. r30,0,r6 | ||
400 | bne- 1b | ||
401 | isync | ||
402 | |||
403 | /* Step 2: | ||
404 | * | ||
405 | * Insert/Update the HPTE in the hash table. At this point, | ||
406 | * r4 (access) is re-useable, we use it for the new HPTE flags | ||
407 | */ | ||
408 | |||
409 | /* Load the hidx index */ | ||
410 | rldicl r25,r3,64-12,60 | ||
411 | |||
412 | BEGIN_FTR_SECTION | ||
413 | cmpdi r9,0 /* check segment size */ | ||
414 | bne 3f | ||
415 | END_MMU_FTR_SECTION_IFSET(MMU_FTR_1T_SEGMENT) | ||
416 | /* Calc vpn and put it in r29 */ | ||
417 | sldi r29,r5,SID_SHIFT - VPN_SHIFT | ||
418 | /* | ||
419 | * clrldi r3,r3,64 - SID_SHIFT --> ea & 0xfffffff | ||
420 | * srdi r28,r3,VPN_SHIFT | ||
421 | */ | ||
422 | rldicl r28,r3,64 - VPN_SHIFT,64 - (SID_SHIFT - VPN_SHIFT) | ||
423 | or r29,r28,r29 | ||
424 | /* | ||
425 | * Calculate hash value for primary slot and store it in r28 | ||
426 | * r3 = va, r5 = vsid | ||
427 | * r0 = (va >> 12) & ((1ul << (28 - 12)) -1) | ||
428 | */ | ||
429 | rldicl r0,r3,64-12,48 | ||
430 | xor r28,r5,r0 /* hash */ | ||
431 | b 4f | ||
432 | |||
433 | 3: /* Calc vpn and put it in r29 */ | ||
434 | sldi r29,r5,SID_SHIFT_1T - VPN_SHIFT | ||
435 | /* | ||
436 | * clrldi r3,r3,64 - SID_SHIFT_1T --> ea & 0xffffffffff | ||
437 | * srdi r28,r3,VPN_SHIFT | ||
438 | */ | ||
439 | rldicl r28,r3,64 - VPN_SHIFT,64 - (SID_SHIFT_1T - VPN_SHIFT) | ||
440 | or r29,r28,r29 | ||
441 | |||
442 | /* | ||
443 | * Calculate hash value for primary slot and | ||
444 | * store it in r28 for 1T segment | ||
445 | * r3 = va, r5 = vsid | ||
446 | */ | ||
447 | sldi r28,r5,25 /* vsid << 25 */ | ||
448 | /* r0 = (va >> 12) & ((1ul << (40 - 12)) -1) */ | ||
449 | rldicl r0,r3,64-12,36 | ||
450 | xor r28,r28,r5 /* vsid ^ ( vsid << 25) */ | ||
451 | xor r28,r28,r0 /* hash */ | ||
452 | |||
453 | /* Convert linux PTE bits into HW equivalents */ | ||
454 | 4: | ||
455 | #ifdef CONFIG_PPC_SUBPAGE_PROT | ||
456 | andc r10,r30,r10 | ||
457 | andi. r3,r10,0x1fe /* Get basic set of flags */ | ||
458 | rlwinm r0,r10,32-9+1,30,30 /* _PAGE_RW -> _PAGE_USER (r0) */ | ||
459 | #else | ||
460 | andi. r3,r30,0x1fe /* Get basic set of flags */ | ||
461 | rlwinm r0,r30,32-9+1,30,30 /* _PAGE_RW -> _PAGE_USER (r0) */ | ||
462 | #endif | ||
463 | xori r3,r3,HPTE_R_N /* _PAGE_EXEC -> NOEXEC */ | ||
464 | rlwinm r4,r30,32-7+1,30,30 /* _PAGE_DIRTY -> _PAGE_USER (r4) */ | ||
465 | and r0,r0,r4 /* _PAGE_RW & _PAGE_DIRTY ->r0 bit 30*/ | ||
466 | andc r0,r3,r0 /* r0 = pte & ~r0 */ | ||
467 | rlwimi r3,r0,32-1,31,31 /* Insert result into PP lsb */ | ||
468 | /* | ||
469 | * Always add "C" bit for perf. Memory coherence is always enabled | ||
470 | */ | ||
471 | ori r3,r3,HPTE_R_C | HPTE_R_M | ||
472 | |||
473 | /* We eventually do the icache sync here (maybe inline that | ||
474 | * code rather than call a C function...) | ||
475 | */ | ||
476 | BEGIN_FTR_SECTION | ||
477 | mr r4,r30 | ||
478 | mr r5,r7 | ||
479 | bl hash_page_do_lazy_icache | ||
480 | END_FTR_SECTION(CPU_FTR_NOEXECUTE|CPU_FTR_COHERENT_ICACHE, CPU_FTR_NOEXECUTE) | ||
481 | |||
482 | /* At this point, r3 contains new PP bits, save them in | ||
483 | * place of "access" in the param area (sic) | ||
484 | */ | ||
485 | std r3,STK_PARAM(R4)(r1) | ||
486 | |||
487 | /* Get htab_hash_mask */ | ||
488 | ld r4,htab_hash_mask@got(2) | ||
489 | ld r27,0(r4) /* htab_hash_mask -> r27 */ | ||
490 | |||
491 | /* Check if we may already be in the hashtable, in this case, we | ||
492 | * go to out-of-line code to try to modify the HPTE. We look for | ||
493 | * the bit at (1 >> (index + 32)) | ||
494 | */ | ||
495 | rldicl. r0,r31,64-12,48 | ||
496 | li r26,0 /* Default hidx */ | ||
497 | beq htab_insert_pte | ||
498 | |||
499 | /* | ||
500 | * Check if the pte was already inserted into the hash table | ||
501 | * as a 64k HW page, and invalidate the 64k HPTE if so. | ||
502 | */ | ||
503 | andis. r0,r31,_PAGE_COMBO@h | ||
504 | beq htab_inval_old_hpte | ||
505 | |||
506 | ld r6,STK_PARAM(R6)(r1) | ||
507 | ori r26,r6,PTE_PAGE_HIDX_OFFSET /* Load the hidx mask. */ | ||
508 | ld r26,0(r26) | ||
509 | addi r5,r25,36 /* Check actual HPTE_SUB bit, this */ | ||
510 | rldcr. r0,r31,r5,0 /* must match pgtable.h definition */ | ||
511 | bne htab_modify_pte | ||
512 | |||
513 | htab_insert_pte: | ||
514 | /* real page number in r5, PTE RPN value + index */ | ||
515 | andis. r0,r31,_PAGE_4K_PFN@h | ||
516 | srdi r5,r31,PTE_RPN_SHIFT | ||
517 | bne- htab_special_pfn | ||
518 | sldi r5,r5,PAGE_FACTOR | ||
519 | add r5,r5,r25 | ||
520 | htab_special_pfn: | ||
521 | sldi r5,r5,HW_PAGE_SHIFT | ||
522 | |||
523 | /* Calculate primary group hash */ | ||
524 | and r0,r28,r27 | ||
525 | rldicr r3,r0,3,63-3 /* r0 = (hash & mask) << 3 */ | ||
526 | |||
527 | /* Call ppc_md.hpte_insert */ | ||
528 | ld r6,STK_PARAM(R4)(r1) /* Retrieve new pp bits */ | ||
529 | mr r4,r29 /* Retrieve vpn */ | ||
530 | li r7,0 /* !bolted, !secondary */ | ||
531 | li r8,MMU_PAGE_4K /* page size */ | ||
532 | li r9,MMU_PAGE_4K /* actual page size */ | ||
533 | ld r10,STK_PARAM(R9)(r1) /* segment size */ | ||
534 | .globl htab_call_hpte_insert1 | ||
535 | htab_call_hpte_insert1: | ||
536 | bl . /* patched by htab_finish_init() */ | ||
537 | cmpdi 0,r3,0 | ||
538 | bge htab_pte_insert_ok /* Insertion successful */ | ||
539 | cmpdi 0,r3,-2 /* Critical failure */ | ||
540 | beq- htab_pte_insert_failure | ||
541 | |||
542 | /* Now try secondary slot */ | ||
543 | |||
544 | /* real page number in r5, PTE RPN value + index */ | ||
545 | andis. r0,r31,_PAGE_4K_PFN@h | ||
546 | srdi r5,r31,PTE_RPN_SHIFT | ||
547 | bne- 3f | ||
548 | sldi r5,r5,PAGE_FACTOR | ||
549 | add r5,r5,r25 | ||
550 | 3: sldi r5,r5,HW_PAGE_SHIFT | ||
551 | |||
552 | /* Calculate secondary group hash */ | ||
553 | andc r0,r27,r28 | ||
554 | rldicr r3,r0,3,63-3 /* r0 = (~hash & mask) << 3 */ | ||
555 | |||
556 | /* Call ppc_md.hpte_insert */ | ||
557 | ld r6,STK_PARAM(R4)(r1) /* Retrieve new pp bits */ | ||
558 | mr r4,r29 /* Retrieve vpn */ | ||
559 | li r7,HPTE_V_SECONDARY /* !bolted, secondary */ | ||
560 | li r8,MMU_PAGE_4K /* page size */ | ||
561 | li r9,MMU_PAGE_4K /* actual page size */ | ||
562 | ld r10,STK_PARAM(R9)(r1) /* segment size */ | ||
563 | .globl htab_call_hpte_insert2 | ||
564 | htab_call_hpte_insert2: | ||
565 | bl . /* patched by htab_finish_init() */ | ||
566 | cmpdi 0,r3,0 | ||
567 | bge+ htab_pte_insert_ok /* Insertion successful */ | ||
568 | cmpdi 0,r3,-2 /* Critical failure */ | ||
569 | beq- htab_pte_insert_failure | ||
570 | |||
571 | /* Both are full, we need to evict something */ | ||
572 | mftb r0 | ||
573 | /* Pick a random group based on TB */ | ||
574 | andi. r0,r0,1 | ||
575 | mr r5,r28 | ||
576 | bne 2f | ||
577 | not r5,r5 | ||
578 | 2: and r0,r5,r27 | ||
579 | rldicr r3,r0,3,63-3 /* r0 = (hash & mask) << 3 */ | ||
580 | /* Call ppc_md.hpte_remove */ | ||
581 | .globl htab_call_hpte_remove | ||
582 | htab_call_hpte_remove: | ||
583 | bl . /* patched by htab_finish_init() */ | ||
584 | |||
585 | /* Try all again */ | ||
586 | b htab_insert_pte | ||
587 | |||
588 | /* | ||
589 | * Call out to C code to invalidate an 64k HW HPTE that is | ||
590 | * useless now that the segment has been switched to 4k pages. | ||
591 | */ | ||
592 | htab_inval_old_hpte: | ||
593 | mr r3,r29 /* vpn */ | ||
594 | mr r4,r31 /* PTE.pte */ | ||
595 | li r5,0 /* PTE.hidx */ | ||
596 | li r6,MMU_PAGE_64K /* psize */ | ||
597 | ld r7,STK_PARAM(R9)(r1) /* ssize */ | ||
598 | ld r8,STK_PARAM(R8)(r1) /* flags */ | ||
599 | bl flush_hash_page | ||
600 | /* Clear out _PAGE_HPTE_SUB bits in the new linux PTE */ | ||
601 | lis r0,_PAGE_HPTE_SUB@h | ||
602 | ori r0,r0,_PAGE_HPTE_SUB@l | ||
603 | andc r30,r30,r0 | ||
604 | b htab_insert_pte | ||
605 | |||
606 | htab_bail_ok: | ||
607 | li r3,0 | ||
608 | b htab_bail | ||
609 | |||
610 | htab_pte_insert_ok: | ||
611 | /* Insert slot number & secondary bit in PTE second half, | ||
612 | * clear _PAGE_BUSY and set approriate HPTE slot bit | ||
613 | */ | ||
614 | ld r6,STK_PARAM(R6)(r1) | ||
615 | li r0,_PAGE_BUSY | ||
616 | andc r30,r30,r0 | ||
617 | /* HPTE SUB bit */ | ||
618 | li r0,1 | ||
619 | subfic r5,r25,27 /* Must match bit position in */ | ||
620 | sld r0,r0,r5 /* pgtable.h */ | ||
621 | or r30,r30,r0 | ||
622 | /* hindx */ | ||
623 | sldi r5,r25,2 | ||
624 | sld r3,r3,r5 | ||
625 | li r4,0xf | ||
626 | sld r4,r4,r5 | ||
627 | andc r26,r26,r4 | ||
628 | or r26,r26,r3 | ||
629 | ori r5,r6,PTE_PAGE_HIDX_OFFSET | ||
630 | std r26,0(r5) | ||
631 | lwsync | ||
632 | std r30,0(r6) | ||
633 | li r3, 0 | ||
634 | htab_bail: | ||
635 | ld r25,STK_REG(R25)(r1) | ||
636 | ld r26,STK_REG(R26)(r1) | ||
637 | ld r27,STK_REG(R27)(r1) | ||
638 | ld r28,STK_REG(R28)(r1) | ||
639 | ld r29,STK_REG(R29)(r1) | ||
640 | ld r30,STK_REG(R30)(r1) | ||
641 | ld r31,STK_REG(R31)(r1) | ||
642 | addi r1,r1,STACKFRAMESIZE | ||
643 | ld r0,16(r1) | ||
644 | mtlr r0 | ||
645 | blr | ||
646 | |||
647 | htab_modify_pte: | ||
648 | /* Keep PP bits in r4 and slot idx from the PTE around in r3 */ | ||
649 | mr r4,r3 | ||
650 | sldi r5,r25,2 | ||
651 | srd r3,r26,r5 | ||
652 | |||
653 | /* Secondary group ? if yes, get a inverted hash value */ | ||
654 | mr r5,r28 | ||
655 | andi. r0,r3,0x8 /* page secondary ? */ | ||
656 | beq 1f | ||
657 | not r5,r5 | ||
658 | 1: andi. r3,r3,0x7 /* extract idx alone */ | ||
659 | |||
660 | /* Calculate proper slot value for ppc_md.hpte_updatepp */ | ||
661 | and r0,r5,r27 | ||
662 | rldicr r0,r0,3,63-3 /* r0 = (hash & mask) << 3 */ | ||
663 | add r3,r0,r3 /* add slot idx */ | ||
664 | |||
665 | /* Call ppc_md.hpte_updatepp */ | ||
666 | mr r5,r29 /* vpn */ | ||
667 | li r6,MMU_PAGE_4K /* base page size */ | ||
668 | li r7,MMU_PAGE_4K /* actual page size */ | ||
669 | ld r8,STK_PARAM(R9)(r1) /* segment size */ | ||
670 | ld r9,STK_PARAM(R8)(r1) /* get "flags" param */ | ||
671 | .globl htab_call_hpte_updatepp | ||
672 | htab_call_hpte_updatepp: | ||
673 | bl . /* patched by htab_finish_init() */ | ||
674 | |||
675 | /* if we failed because typically the HPTE wasn't really here | ||
676 | * we try an insertion. | ||
677 | */ | ||
678 | cmpdi 0,r3,-1 | ||
679 | beq- htab_insert_pte | ||
680 | |||
681 | /* Clear the BUSY bit and Write out the PTE */ | ||
682 | li r0,_PAGE_BUSY | ||
683 | andc r30,r30,r0 | ||
684 | ld r6,STK_PARAM(R6)(r1) | ||
685 | std r30,0(r6) | ||
686 | li r3,0 | ||
687 | b htab_bail | ||
688 | |||
689 | htab_wrong_access: | ||
690 | /* Bail out clearing reservation */ | ||
691 | stdcx. r31,0,r6 | ||
692 | li r3,1 | ||
693 | b htab_bail | ||
694 | |||
695 | htab_pte_insert_failure: | ||
696 | /* Bail out restoring old PTE */ | ||
697 | ld r6,STK_PARAM(R6)(r1) | ||
698 | std r31,0(r6) | ||
699 | li r3,-1 | ||
700 | b htab_bail | ||
701 | |||
702 | #endif /* CONFIG_PPC_64K_PAGES */ | ||
703 | |||
704 | #ifdef CONFIG_PPC_64K_PAGES | ||
705 | |||
706 | /***************************************************************************** | ||
707 | * * | ||
708 | * 64K SW & 64K HW in a 64K segment pages implementation * | ||
709 | * * | ||
710 | *****************************************************************************/ | ||
711 | |||
712 | _GLOBAL(__hash_page_64K) | ||
713 | mflr r0 | ||
714 | std r0,16(r1) | ||
715 | stdu r1,-STACKFRAMESIZE(r1) | ||
716 | /* Save all params that we need after a function call */ | ||
717 | std r6,STK_PARAM(R6)(r1) | ||
718 | std r8,STK_PARAM(R8)(r1) | ||
719 | std r9,STK_PARAM(R9)(r1) | ||
720 | |||
721 | /* Save non-volatile registers. | ||
722 | * r31 will hold "old PTE" | ||
723 | * r30 is "new PTE" | ||
724 | * r29 is vpn | ||
725 | * r28 is a hash value | ||
726 | * r27 is hashtab mask (maybe dynamic patched instead ?) | ||
727 | */ | ||
728 | std r27,STK_REG(R27)(r1) | ||
729 | std r28,STK_REG(R28)(r1) | ||
730 | std r29,STK_REG(R29)(r1) | ||
731 | std r30,STK_REG(R30)(r1) | ||
732 | std r31,STK_REG(R31)(r1) | ||
733 | |||
734 | /* Step 1: | ||
735 | * | ||
736 | * Check permissions, atomically mark the linux PTE busy | ||
737 | * and hashed. | ||
738 | */ | ||
739 | 1: | ||
740 | ldarx r31,0,r6 | ||
741 | /* Check access rights (access & ~(pte_val(*ptep))) */ | ||
742 | andc. r0,r4,r31 | ||
743 | bne- ht64_wrong_access | ||
744 | /* Check if PTE is busy */ | ||
745 | andi. r0,r31,_PAGE_BUSY | ||
746 | /* If so, just bail out and refault if needed. Someone else | ||
747 | * is changing this PTE anyway and might hash it. | ||
748 | */ | ||
749 | bne- ht64_bail_ok | ||
750 | BEGIN_FTR_SECTION | ||
751 | /* Check if PTE has the cache-inhibit bit set */ | ||
752 | andi. r0,r31,_PAGE_NO_CACHE | ||
753 | /* If so, bail out and refault as a 4k page */ | ||
754 | bne- ht64_bail_ok | ||
755 | END_MMU_FTR_SECTION_IFCLR(MMU_FTR_CI_LARGE_PAGE) | ||
756 | /* Prepare new PTE value (turn access RW into DIRTY, then | ||
757 | * add BUSY and ACCESSED) | ||
758 | */ | ||
759 | rlwinm r30,r4,32-9+7,31-7,31-7 /* _PAGE_RW -> _PAGE_DIRTY */ | ||
760 | or r30,r30,r31 | ||
761 | ori r30,r30,_PAGE_BUSY | _PAGE_ACCESSED | ||
762 | /* Write the linux PTE atomically (setting busy) */ | ||
763 | stdcx. r30,0,r6 | ||
764 | bne- 1b | ||
765 | isync | ||
766 | |||
767 | /* Step 2: | ||
768 | * | ||
769 | * Insert/Update the HPTE in the hash table. At this point, | ||
770 | * r4 (access) is re-useable, we use it for the new HPTE flags | ||
771 | */ | ||
772 | |||
773 | BEGIN_FTR_SECTION | ||
774 | cmpdi r9,0 /* check segment size */ | ||
775 | bne 3f | ||
776 | END_MMU_FTR_SECTION_IFSET(MMU_FTR_1T_SEGMENT) | ||
777 | /* Calc vpn and put it in r29 */ | ||
778 | sldi r29,r5,SID_SHIFT - VPN_SHIFT | ||
779 | rldicl r28,r3,64 - VPN_SHIFT,64 - (SID_SHIFT - VPN_SHIFT) | ||
780 | or r29,r28,r29 | ||
781 | |||
782 | /* Calculate hash value for primary slot and store it in r28 | ||
783 | * r3 = va, r5 = vsid | ||
784 | * r0 = (va >> 16) & ((1ul << (28 - 16)) -1) | ||
785 | */ | ||
786 | rldicl r0,r3,64-16,52 | ||
787 | xor r28,r5,r0 /* hash */ | ||
788 | b 4f | ||
789 | |||
790 | 3: /* Calc vpn and put it in r29 */ | ||
791 | sldi r29,r5,SID_SHIFT_1T - VPN_SHIFT | ||
792 | rldicl r28,r3,64 - VPN_SHIFT,64 - (SID_SHIFT_1T - VPN_SHIFT) | ||
793 | or r29,r28,r29 | ||
794 | /* | ||
795 | * calculate hash value for primary slot and | ||
796 | * store it in r28 for 1T segment | ||
797 | * r3 = va, r5 = vsid | ||
798 | */ | ||
799 | sldi r28,r5,25 /* vsid << 25 */ | ||
800 | /* r0 = (va >> 16) & ((1ul << (40 - 16)) -1) */ | ||
801 | rldicl r0,r3,64-16,40 | ||
802 | xor r28,r28,r5 /* vsid ^ ( vsid << 25) */ | ||
803 | xor r28,r28,r0 /* hash */ | ||
804 | |||
805 | /* Convert linux PTE bits into HW equivalents */ | ||
806 | 4: andi. r3,r30,0x1fe /* Get basic set of flags */ | ||
807 | xori r3,r3,HPTE_R_N /* _PAGE_EXEC -> NOEXEC */ | ||
808 | rlwinm r0,r30,32-9+1,30,30 /* _PAGE_RW -> _PAGE_USER (r0) */ | ||
809 | rlwinm r4,r30,32-7+1,30,30 /* _PAGE_DIRTY -> _PAGE_USER (r4) */ | ||
810 | and r0,r0,r4 /* _PAGE_RW & _PAGE_DIRTY ->r0 bit 30*/ | ||
811 | andc r0,r30,r0 /* r0 = pte & ~r0 */ | ||
812 | rlwimi r3,r0,32-1,31,31 /* Insert result into PP lsb */ | ||
813 | /* | ||
814 | * Always add "C" bit for perf. Memory coherence is always enabled | ||
815 | */ | ||
816 | ori r3,r3,HPTE_R_C | HPTE_R_M | ||
817 | |||
818 | /* We eventually do the icache sync here (maybe inline that | ||
819 | * code rather than call a C function...) | ||
820 | */ | ||
821 | BEGIN_FTR_SECTION | ||
822 | mr r4,r30 | ||
823 | mr r5,r7 | ||
824 | bl hash_page_do_lazy_icache | ||
825 | END_FTR_SECTION(CPU_FTR_NOEXECUTE|CPU_FTR_COHERENT_ICACHE, CPU_FTR_NOEXECUTE) | ||
826 | |||
827 | /* At this point, r3 contains new PP bits, save them in | ||
828 | * place of "access" in the param area (sic) | ||
829 | */ | ||
830 | std r3,STK_PARAM(R4)(r1) | ||
831 | |||
832 | /* Get htab_hash_mask */ | ||
833 | ld r4,htab_hash_mask@got(2) | ||
834 | ld r27,0(r4) /* htab_hash_mask -> r27 */ | ||
835 | |||
836 | /* Check if we may already be in the hashtable, in this case, we | ||
837 | * go to out-of-line code to try to modify the HPTE | ||
838 | */ | ||
839 | rldicl. r0,r31,64-12,48 | ||
840 | bne ht64_modify_pte | ||
841 | |||
842 | ht64_insert_pte: | ||
843 | /* Clear hpte bits in new pte (we also clear BUSY btw) and | ||
844 | * add _PAGE_HPTE_SUB0 | ||
845 | */ | ||
846 | lis r0,_PAGE_HPTEFLAGS@h | ||
847 | ori r0,r0,_PAGE_HPTEFLAGS@l | ||
848 | andc r30,r30,r0 | ||
849 | #ifdef CONFIG_PPC_64K_PAGES | ||
850 | oris r30,r30,_PAGE_HPTE_SUB0@h | ||
851 | #else | ||
852 | ori r30,r30,_PAGE_HASHPTE | ||
853 | #endif | ||
854 | /* Phyical address in r5 */ | ||
855 | rldicl r5,r31,64-PTE_RPN_SHIFT,PTE_RPN_SHIFT | ||
856 | sldi r5,r5,PAGE_SHIFT | ||
857 | |||
858 | /* Calculate primary group hash */ | ||
859 | and r0,r28,r27 | ||
860 | rldicr r3,r0,3,63-3 /* r0 = (hash & mask) << 3 */ | ||
861 | |||
862 | /* Call ppc_md.hpte_insert */ | ||
863 | ld r6,STK_PARAM(R4)(r1) /* Retrieve new pp bits */ | ||
864 | mr r4,r29 /* Retrieve vpn */ | ||
865 | li r7,0 /* !bolted, !secondary */ | ||
866 | li r8,MMU_PAGE_64K | ||
867 | li r9,MMU_PAGE_64K /* actual page size */ | ||
868 | ld r10,STK_PARAM(R9)(r1) /* segment size */ | ||
869 | .globl ht64_call_hpte_insert1 | ||
870 | ht64_call_hpte_insert1: | ||
871 | bl . /* patched by htab_finish_init() */ | ||
872 | cmpdi 0,r3,0 | ||
873 | bge ht64_pte_insert_ok /* Insertion successful */ | ||
874 | cmpdi 0,r3,-2 /* Critical failure */ | ||
875 | beq- ht64_pte_insert_failure | ||
876 | |||
877 | /* Now try secondary slot */ | ||
878 | |||
879 | /* Phyical address in r5 */ | ||
880 | rldicl r5,r31,64-PTE_RPN_SHIFT,PTE_RPN_SHIFT | ||
881 | sldi r5,r5,PAGE_SHIFT | ||
882 | |||
883 | /* Calculate secondary group hash */ | ||
884 | andc r0,r27,r28 | ||
885 | rldicr r3,r0,3,63-3 /* r0 = (~hash & mask) << 3 */ | ||
886 | |||
887 | /* Call ppc_md.hpte_insert */ | ||
888 | ld r6,STK_PARAM(R4)(r1) /* Retrieve new pp bits */ | ||
889 | mr r4,r29 /* Retrieve vpn */ | ||
890 | li r7,HPTE_V_SECONDARY /* !bolted, secondary */ | ||
891 | li r8,MMU_PAGE_64K | ||
892 | li r9,MMU_PAGE_64K /* actual page size */ | ||
893 | ld r10,STK_PARAM(R9)(r1) /* segment size */ | ||
894 | .globl ht64_call_hpte_insert2 | ||
895 | ht64_call_hpte_insert2: | ||
896 | bl . /* patched by htab_finish_init() */ | ||
897 | cmpdi 0,r3,0 | ||
898 | bge+ ht64_pte_insert_ok /* Insertion successful */ | ||
899 | cmpdi 0,r3,-2 /* Critical failure */ | ||
900 | beq- ht64_pte_insert_failure | ||
901 | |||
902 | /* Both are full, we need to evict something */ | ||
903 | mftb r0 | ||
904 | /* Pick a random group based on TB */ | ||
905 | andi. r0,r0,1 | ||
906 | mr r5,r28 | ||
907 | bne 2f | ||
908 | not r5,r5 | ||
909 | 2: and r0,r5,r27 | ||
910 | rldicr r3,r0,3,63-3 /* r0 = (hash & mask) << 3 */ | ||
911 | /* Call ppc_md.hpte_remove */ | ||
912 | .globl ht64_call_hpte_remove | ||
913 | ht64_call_hpte_remove: | ||
914 | bl . /* patched by htab_finish_init() */ | ||
915 | |||
916 | /* Try all again */ | ||
917 | b ht64_insert_pte | ||
918 | |||
919 | ht64_bail_ok: | ||
920 | li r3,0 | ||
921 | b ht64_bail | ||
922 | |||
923 | ht64_pte_insert_ok: | ||
924 | /* Insert slot number & secondary bit in PTE */ | ||
925 | rldimi r30,r3,12,63-15 | ||
926 | |||
927 | /* Write out the PTE with a normal write | ||
928 | * (maybe add eieio may be good still ?) | ||
929 | */ | ||
930 | ht64_write_out_pte: | ||
931 | ld r6,STK_PARAM(R6)(r1) | ||
932 | std r30,0(r6) | ||
933 | li r3, 0 | ||
934 | ht64_bail: | ||
935 | ld r27,STK_REG(R27)(r1) | ||
936 | ld r28,STK_REG(R28)(r1) | ||
937 | ld r29,STK_REG(R29)(r1) | ||
938 | ld r30,STK_REG(R30)(r1) | ||
939 | ld r31,STK_REG(R31)(r1) | ||
940 | addi r1,r1,STACKFRAMESIZE | ||
941 | ld r0,16(r1) | ||
942 | mtlr r0 | ||
943 | blr | ||
944 | |||
945 | ht64_modify_pte: | ||
946 | /* Keep PP bits in r4 and slot idx from the PTE around in r3 */ | ||
947 | mr r4,r3 | ||
948 | rlwinm r3,r31,32-12,29,31 | ||
949 | |||
950 | /* Secondary group ? if yes, get a inverted hash value */ | ||
951 | mr r5,r28 | ||
952 | andi. r0,r31,_PAGE_F_SECOND | ||
953 | beq 1f | ||
954 | not r5,r5 | ||
955 | 1: | ||
956 | /* Calculate proper slot value for ppc_md.hpte_updatepp */ | ||
957 | and r0,r5,r27 | ||
958 | rldicr r0,r0,3,63-3 /* r0 = (hash & mask) << 3 */ | ||
959 | add r3,r0,r3 /* add slot idx */ | ||
960 | |||
961 | /* Call ppc_md.hpte_updatepp */ | ||
962 | mr r5,r29 /* vpn */ | ||
963 | li r6,MMU_PAGE_64K /* base page size */ | ||
964 | li r7,MMU_PAGE_64K /* actual page size */ | ||
965 | ld r8,STK_PARAM(R9)(r1) /* segment size */ | ||
966 | ld r9,STK_PARAM(R8)(r1) /* get "flags" param */ | ||
967 | .globl ht64_call_hpte_updatepp | ||
968 | ht64_call_hpte_updatepp: | ||
969 | bl . /* patched by htab_finish_init() */ | ||
970 | |||
971 | /* if we failed because typically the HPTE wasn't really here | ||
972 | * we try an insertion. | ||
973 | */ | ||
974 | cmpdi 0,r3,-1 | ||
975 | beq- ht64_insert_pte | ||
976 | |||
977 | /* Clear the BUSY bit and Write out the PTE */ | ||
978 | li r0,_PAGE_BUSY | ||
979 | andc r30,r30,r0 | ||
980 | b ht64_write_out_pte | ||
981 | |||
982 | ht64_wrong_access: | ||
983 | /* Bail out clearing reservation */ | ||
984 | stdcx. r31,0,r6 | ||
985 | li r3,1 | ||
986 | b ht64_bail | ||
987 | |||
988 | ht64_pte_insert_failure: | ||
989 | /* Bail out restoring old PTE */ | ||
990 | ld r6,STK_PARAM(R6)(r1) | ||
991 | std r31,0(r6) | ||
992 | li r3,-1 | ||
993 | b ht64_bail | ||
994 | |||
995 | |||
996 | #endif /* CONFIG_PPC_64K_PAGES */ | ||
997 | |||
998 | |||
999 | /***************************************************************************** | ||
1000 | * * | ||
1001 | * Huge pages implementation is in hugetlbpage.c * | ||
1002 | * * | ||
1003 | *****************************************************************************/ | ||
diff --git a/arch/powerpc/mm/hash_native_64.c b/arch/powerpc/mm/hash_native_64.c index c8822af10a58..8eaac81347fd 100644 --- a/arch/powerpc/mm/hash_native_64.c +++ b/arch/powerpc/mm/hash_native_64.c | |||
@@ -429,6 +429,7 @@ static void native_hpte_invalidate(unsigned long slot, unsigned long vpn, | |||
429 | local_irq_restore(flags); | 429 | local_irq_restore(flags); |
430 | } | 430 | } |
431 | 431 | ||
432 | #ifdef CONFIG_TRANSPARENT_HUGEPAGE | ||
432 | static void native_hugepage_invalidate(unsigned long vsid, | 433 | static void native_hugepage_invalidate(unsigned long vsid, |
433 | unsigned long addr, | 434 | unsigned long addr, |
434 | unsigned char *hpte_slot_array, | 435 | unsigned char *hpte_slot_array, |
@@ -482,6 +483,15 @@ static void native_hugepage_invalidate(unsigned long vsid, | |||
482 | } | 483 | } |
483 | local_irq_restore(flags); | 484 | local_irq_restore(flags); |
484 | } | 485 | } |
486 | #else | ||
487 | static void native_hugepage_invalidate(unsigned long vsid, | ||
488 | unsigned long addr, | ||
489 | unsigned char *hpte_slot_array, | ||
490 | int psize, int ssize, int local) | ||
491 | { | ||
492 | WARN(1, "%s called without THP support\n", __func__); | ||
493 | } | ||
494 | #endif | ||
485 | 495 | ||
486 | static inline int __hpte_actual_psize(unsigned int lp, int psize) | 496 | static inline int __hpte_actual_psize(unsigned int lp, int psize) |
487 | { | 497 | { |
diff --git a/arch/powerpc/mm/hash_utils_64.c b/arch/powerpc/mm/hash_utils_64.c index 7f9616f7c479..ba59d5977f34 100644 --- a/arch/powerpc/mm/hash_utils_64.c +++ b/arch/powerpc/mm/hash_utils_64.c | |||
@@ -159,24 +159,41 @@ static struct mmu_psize_def mmu_psize_defaults_gp[] = { | |||
159 | }, | 159 | }, |
160 | }; | 160 | }; |
161 | 161 | ||
162 | static unsigned long htab_convert_pte_flags(unsigned long pteflags) | 162 | unsigned long htab_convert_pte_flags(unsigned long pteflags) |
163 | { | 163 | { |
164 | unsigned long rflags = pteflags & 0x1fa; | 164 | unsigned long rflags = 0; |
165 | 165 | ||
166 | /* _PAGE_EXEC -> NOEXEC */ | 166 | /* _PAGE_EXEC -> NOEXEC */ |
167 | if ((pteflags & _PAGE_EXEC) == 0) | 167 | if ((pteflags & _PAGE_EXEC) == 0) |
168 | rflags |= HPTE_R_N; | 168 | rflags |= HPTE_R_N; |
169 | 169 | /* | |
170 | /* PP bits. PAGE_USER is already PP bit 0x2, so we only | 170 | * PP bits: |
171 | * need to add in 0x1 if it's a read-only user page | 171 | * Linux use slb key 0 for kernel and 1 for user. |
172 | * kernel areas are mapped by PP bits 00 | ||
173 | * and and there is no kernel RO (_PAGE_KERNEL_RO). | ||
174 | * User area mapped by 0x2 and read only use by | ||
175 | * 0x3. | ||
172 | */ | 176 | */ |
173 | if ((pteflags & _PAGE_USER) && !((pteflags & _PAGE_RW) && | 177 | if (pteflags & _PAGE_USER) { |
174 | (pteflags & _PAGE_DIRTY))) | 178 | rflags |= 0x2; |
175 | rflags |= 1; | 179 | if (!((pteflags & _PAGE_RW) && (pteflags & _PAGE_DIRTY))) |
180 | rflags |= 0x1; | ||
181 | } | ||
176 | /* | 182 | /* |
177 | * Always add "C" bit for perf. Memory coherence is always enabled | 183 | * Always add "C" bit for perf. Memory coherence is always enabled |
178 | */ | 184 | */ |
179 | return rflags | HPTE_R_C | HPTE_R_M; | 185 | rflags |= HPTE_R_C | HPTE_R_M; |
186 | /* | ||
187 | * Add in WIG bits | ||
188 | */ | ||
189 | if (pteflags & _PAGE_WRITETHRU) | ||
190 | rflags |= HPTE_R_W; | ||
191 | if (pteflags & _PAGE_NO_CACHE) | ||
192 | rflags |= HPTE_R_I; | ||
193 | if (pteflags & _PAGE_GUARDED) | ||
194 | rflags |= HPTE_R_G; | ||
195 | |||
196 | return rflags; | ||
180 | } | 197 | } |
181 | 198 | ||
182 | int htab_bolt_mapping(unsigned long vstart, unsigned long vend, | 199 | int htab_bolt_mapping(unsigned long vstart, unsigned long vend, |
@@ -629,46 +646,6 @@ int remove_section_mapping(unsigned long start, unsigned long end) | |||
629 | } | 646 | } |
630 | #endif /* CONFIG_MEMORY_HOTPLUG */ | 647 | #endif /* CONFIG_MEMORY_HOTPLUG */ |
631 | 648 | ||
632 | extern u32 htab_call_hpte_insert1[]; | ||
633 | extern u32 htab_call_hpte_insert2[]; | ||
634 | extern u32 htab_call_hpte_remove[]; | ||
635 | extern u32 htab_call_hpte_updatepp[]; | ||
636 | extern u32 ht64_call_hpte_insert1[]; | ||
637 | extern u32 ht64_call_hpte_insert2[]; | ||
638 | extern u32 ht64_call_hpte_remove[]; | ||
639 | extern u32 ht64_call_hpte_updatepp[]; | ||
640 | |||
641 | static void __init htab_finish_init(void) | ||
642 | { | ||
643 | #ifdef CONFIG_PPC_64K_PAGES | ||
644 | patch_branch(ht64_call_hpte_insert1, | ||
645 | ppc_function_entry(ppc_md.hpte_insert), | ||
646 | BRANCH_SET_LINK); | ||
647 | patch_branch(ht64_call_hpte_insert2, | ||
648 | ppc_function_entry(ppc_md.hpte_insert), | ||
649 | BRANCH_SET_LINK); | ||
650 | patch_branch(ht64_call_hpte_remove, | ||
651 | ppc_function_entry(ppc_md.hpte_remove), | ||
652 | BRANCH_SET_LINK); | ||
653 | patch_branch(ht64_call_hpte_updatepp, | ||
654 | ppc_function_entry(ppc_md.hpte_updatepp), | ||
655 | BRANCH_SET_LINK); | ||
656 | #endif /* CONFIG_PPC_64K_PAGES */ | ||
657 | |||
658 | patch_branch(htab_call_hpte_insert1, | ||
659 | ppc_function_entry(ppc_md.hpte_insert), | ||
660 | BRANCH_SET_LINK); | ||
661 | patch_branch(htab_call_hpte_insert2, | ||
662 | ppc_function_entry(ppc_md.hpte_insert), | ||
663 | BRANCH_SET_LINK); | ||
664 | patch_branch(htab_call_hpte_remove, | ||
665 | ppc_function_entry(ppc_md.hpte_remove), | ||
666 | BRANCH_SET_LINK); | ||
667 | patch_branch(htab_call_hpte_updatepp, | ||
668 | ppc_function_entry(ppc_md.hpte_updatepp), | ||
669 | BRANCH_SET_LINK); | ||
670 | } | ||
671 | |||
672 | static void __init htab_initialize(void) | 649 | static void __init htab_initialize(void) |
673 | { | 650 | { |
674 | unsigned long table; | 651 | unsigned long table; |
@@ -815,7 +792,6 @@ static void __init htab_initialize(void) | |||
815 | mmu_linear_psize, mmu_kernel_ssize)); | 792 | mmu_linear_psize, mmu_kernel_ssize)); |
816 | } | 793 | } |
817 | 794 | ||
818 | htab_finish_init(); | ||
819 | 795 | ||
820 | DBG(" <- htab_initialize()\n"); | 796 | DBG(" <- htab_initialize()\n"); |
821 | } | 797 | } |
@@ -877,11 +853,11 @@ static unsigned int get_paca_psize(unsigned long addr) | |||
877 | unsigned long index, mask_index; | 853 | unsigned long index, mask_index; |
878 | 854 | ||
879 | if (addr < SLICE_LOW_TOP) { | 855 | if (addr < SLICE_LOW_TOP) { |
880 | lpsizes = get_paca()->context.low_slices_psize; | 856 | lpsizes = get_paca()->mm_ctx_low_slices_psize; |
881 | index = GET_LOW_SLICE_INDEX(addr); | 857 | index = GET_LOW_SLICE_INDEX(addr); |
882 | return (lpsizes >> (index * 4)) & 0xF; | 858 | return (lpsizes >> (index * 4)) & 0xF; |
883 | } | 859 | } |
884 | hpsizes = get_paca()->context.high_slices_psize; | 860 | hpsizes = get_paca()->mm_ctx_high_slices_psize; |
885 | index = GET_HIGH_SLICE_INDEX(addr); | 861 | index = GET_HIGH_SLICE_INDEX(addr); |
886 | mask_index = index & 0x1; | 862 | mask_index = index & 0x1; |
887 | return (hpsizes[index >> 1] >> (mask_index * 4)) & 0xF; | 863 | return (hpsizes[index >> 1] >> (mask_index * 4)) & 0xF; |
@@ -890,7 +866,7 @@ static unsigned int get_paca_psize(unsigned long addr) | |||
890 | #else | 866 | #else |
891 | unsigned int get_paca_psize(unsigned long addr) | 867 | unsigned int get_paca_psize(unsigned long addr) |
892 | { | 868 | { |
893 | return get_paca()->context.user_psize; | 869 | return get_paca()->mm_ctx_user_psize; |
894 | } | 870 | } |
895 | #endif | 871 | #endif |
896 | 872 | ||
@@ -906,7 +882,8 @@ void demote_segment_4k(struct mm_struct *mm, unsigned long addr) | |||
906 | slice_set_range_psize(mm, addr, 1, MMU_PAGE_4K); | 882 | slice_set_range_psize(mm, addr, 1, MMU_PAGE_4K); |
907 | copro_flush_all_slbs(mm); | 883 | copro_flush_all_slbs(mm); |
908 | if ((get_paca_psize(addr) != MMU_PAGE_4K) && (current->mm == mm)) { | 884 | if ((get_paca_psize(addr) != MMU_PAGE_4K) && (current->mm == mm)) { |
909 | get_paca()->context = mm->context; | 885 | |
886 | copy_mm_to_paca(&mm->context); | ||
910 | slb_flush_and_rebolt(); | 887 | slb_flush_and_rebolt(); |
911 | } | 888 | } |
912 | } | 889 | } |
@@ -973,7 +950,7 @@ static void check_paca_psize(unsigned long ea, struct mm_struct *mm, | |||
973 | { | 950 | { |
974 | if (user_region) { | 951 | if (user_region) { |
975 | if (psize != get_paca_psize(ea)) { | 952 | if (psize != get_paca_psize(ea)) { |
976 | get_paca()->context = mm->context; | 953 | copy_mm_to_paca(&mm->context); |
977 | slb_flush_and_rebolt(); | 954 | slb_flush_and_rebolt(); |
978 | } | 955 | } |
979 | } else if (get_paca()->vmalloc_sllp != | 956 | } else if (get_paca()->vmalloc_sllp != |
@@ -1148,9 +1125,10 @@ int hash_page_mm(struct mm_struct *mm, unsigned long ea, | |||
1148 | } | 1125 | } |
1149 | } | 1126 | } |
1150 | 1127 | ||
1128 | #endif /* CONFIG_PPC_64K_PAGES */ | ||
1129 | |||
1151 | if (current->mm == mm) | 1130 | if (current->mm == mm) |
1152 | check_paca_psize(ea, mm, psize, user_region); | 1131 | check_paca_psize(ea, mm, psize, user_region); |
1153 | #endif /* CONFIG_PPC_64K_PAGES */ | ||
1154 | 1132 | ||
1155 | #ifdef CONFIG_PPC_64K_PAGES | 1133 | #ifdef CONFIG_PPC_64K_PAGES |
1156 | if (psize == MMU_PAGE_64K) | 1134 | if (psize == MMU_PAGE_64K) |
@@ -1203,6 +1181,35 @@ int hash_page(unsigned long ea, unsigned long access, unsigned long trap, | |||
1203 | } | 1181 | } |
1204 | EXPORT_SYMBOL_GPL(hash_page); | 1182 | EXPORT_SYMBOL_GPL(hash_page); |
1205 | 1183 | ||
1184 | int __hash_page(unsigned long ea, unsigned long msr, unsigned long trap, | ||
1185 | unsigned long dsisr) | ||
1186 | { | ||
1187 | unsigned long access = _PAGE_PRESENT; | ||
1188 | unsigned long flags = 0; | ||
1189 | struct mm_struct *mm = current->mm; | ||
1190 | |||
1191 | if (REGION_ID(ea) == VMALLOC_REGION_ID) | ||
1192 | mm = &init_mm; | ||
1193 | |||
1194 | if (dsisr & DSISR_NOHPTE) | ||
1195 | flags |= HPTE_NOHPTE_UPDATE; | ||
1196 | |||
1197 | if (dsisr & DSISR_ISSTORE) | ||
1198 | access |= _PAGE_RW; | ||
1199 | /* | ||
1200 | * We need to set the _PAGE_USER bit if MSR_PR is set or if we are | ||
1201 | * accessing a userspace segment (even from the kernel). We assume | ||
1202 | * kernel addresses always have the high bit set. | ||
1203 | */ | ||
1204 | if ((msr & MSR_PR) || (REGION_ID(ea) == USER_REGION_ID)) | ||
1205 | access |= _PAGE_USER; | ||
1206 | |||
1207 | if (trap == 0x400) | ||
1208 | access |= _PAGE_EXEC; | ||
1209 | |||
1210 | return hash_page_mm(mm, ea, access, trap, flags); | ||
1211 | } | ||
1212 | |||
1206 | void hash_preload(struct mm_struct *mm, unsigned long ea, | 1213 | void hash_preload(struct mm_struct *mm, unsigned long ea, |
1207 | unsigned long access, unsigned long trap) | 1214 | unsigned long access, unsigned long trap) |
1208 | { | 1215 | { |
diff --git a/arch/powerpc/mm/hugepage-hash64.c b/arch/powerpc/mm/hugepage-hash64.c index 4d87122cf6a7..baf1301ded0c 100644 --- a/arch/powerpc/mm/hugepage-hash64.c +++ b/arch/powerpc/mm/hugepage-hash64.c | |||
@@ -54,18 +54,7 @@ int __hash_page_thp(unsigned long ea, unsigned long access, unsigned long vsid, | |||
54 | new_pmd |= _PAGE_DIRTY; | 54 | new_pmd |= _PAGE_DIRTY; |
55 | } while (old_pmd != __cmpxchg_u64((unsigned long *)pmdp, | 55 | } while (old_pmd != __cmpxchg_u64((unsigned long *)pmdp, |
56 | old_pmd, new_pmd)); | 56 | old_pmd, new_pmd)); |
57 | /* | 57 | rflags = htab_convert_pte_flags(new_pmd); |
58 | * PP bits. _PAGE_USER is already PP bit 0x2, so we only | ||
59 | * need to add in 0x1 if it's a read-only user page | ||
60 | */ | ||
61 | rflags = new_pmd & _PAGE_USER; | ||
62 | if ((new_pmd & _PAGE_USER) && !((new_pmd & _PAGE_RW) && | ||
63 | (new_pmd & _PAGE_DIRTY))) | ||
64 | rflags |= 0x1; | ||
65 | /* | ||
66 | * _PAGE_EXEC -> HW_NO_EXEC since it's inverted | ||
67 | */ | ||
68 | rflags |= ((new_pmd & _PAGE_EXEC) ? 0 : HPTE_R_N); | ||
69 | 58 | ||
70 | #if 0 | 59 | #if 0 |
71 | if (!cpu_has_feature(CPU_FTR_COHERENT_ICACHE)) { | 60 | if (!cpu_has_feature(CPU_FTR_COHERENT_ICACHE)) { |
@@ -82,7 +71,7 @@ int __hash_page_thp(unsigned long ea, unsigned long access, unsigned long vsid, | |||
82 | */ | 71 | */ |
83 | shift = mmu_psize_defs[psize].shift; | 72 | shift = mmu_psize_defs[psize].shift; |
84 | index = (ea & ~HPAGE_PMD_MASK) >> shift; | 73 | index = (ea & ~HPAGE_PMD_MASK) >> shift; |
85 | BUG_ON(index >= 4096); | 74 | BUG_ON(index >= PTE_FRAG_SIZE); |
86 | 75 | ||
87 | vpn = hpt_vpn(ea, vsid, ssize); | 76 | vpn = hpt_vpn(ea, vsid, ssize); |
88 | hpte_slot_array = get_hpte_slot_array(pmdp); | 77 | hpte_slot_array = get_hpte_slot_array(pmdp); |
@@ -131,13 +120,6 @@ int __hash_page_thp(unsigned long ea, unsigned long access, unsigned long vsid, | |||
131 | pa = pmd_pfn(__pmd(old_pmd)) << PAGE_SHIFT; | 120 | pa = pmd_pfn(__pmd(old_pmd)) << PAGE_SHIFT; |
132 | new_pmd |= _PAGE_HASHPTE; | 121 | new_pmd |= _PAGE_HASHPTE; |
133 | 122 | ||
134 | /* Add in WIMG bits */ | ||
135 | rflags |= (new_pmd & (_PAGE_WRITETHRU | _PAGE_NO_CACHE | | ||
136 | _PAGE_GUARDED)); | ||
137 | /* | ||
138 | * enable the memory coherence always | ||
139 | */ | ||
140 | rflags |= HPTE_R_M; | ||
141 | repeat: | 123 | repeat: |
142 | hpte_group = ((hash & htab_hash_mask) * HPTES_PER_GROUP) & ~0x7UL; | 124 | hpte_group = ((hash & htab_hash_mask) * HPTES_PER_GROUP) & ~0x7UL; |
143 | 125 | ||
diff --git a/arch/powerpc/mm/hugetlbpage-book3e.c b/arch/powerpc/mm/hugetlbpage-book3e.c index ba47aaf33a4b..7e6d0880813f 100644 --- a/arch/powerpc/mm/hugetlbpage-book3e.c +++ b/arch/powerpc/mm/hugetlbpage-book3e.c | |||
@@ -51,6 +51,48 @@ static inline int mmu_get_tsize(int psize) | |||
51 | return mmu_psize_defs[psize].enc; | 51 | return mmu_psize_defs[psize].enc; |
52 | } | 52 | } |
53 | 53 | ||
54 | #if defined(CONFIG_PPC_FSL_BOOK3E) && defined(CONFIG_PPC64) | ||
55 | #include <asm/paca.h> | ||
56 | |||
57 | static inline void book3e_tlb_lock(void) | ||
58 | { | ||
59 | struct paca_struct *paca = get_paca(); | ||
60 | unsigned long tmp; | ||
61 | int token = smp_processor_id() + 1; | ||
62 | |||
63 | asm volatile("1: lbarx %0, 0, %1;" | ||
64 | "cmpwi %0, 0;" | ||
65 | "bne 2f;" | ||
66 | "stbcx. %2, 0, %1;" | ||
67 | "bne 1b;" | ||
68 | "b 3f;" | ||
69 | "2: lbzx %0, 0, %1;" | ||
70 | "cmpwi %0, 0;" | ||
71 | "bne 2b;" | ||
72 | "b 1b;" | ||
73 | "3:" | ||
74 | : "=&r" (tmp) | ||
75 | : "r" (&paca->tcd_ptr->lock), "r" (token) | ||
76 | : "memory"); | ||
77 | } | ||
78 | |||
79 | static inline void book3e_tlb_unlock(void) | ||
80 | { | ||
81 | struct paca_struct *paca = get_paca(); | ||
82 | |||
83 | isync(); | ||
84 | paca->tcd_ptr->lock = 0; | ||
85 | } | ||
86 | #else | ||
87 | static inline void book3e_tlb_lock(void) | ||
88 | { | ||
89 | } | ||
90 | |||
91 | static inline void book3e_tlb_unlock(void) | ||
92 | { | ||
93 | } | ||
94 | #endif | ||
95 | |||
54 | static inline int book3e_tlb_exists(unsigned long ea, unsigned long pid) | 96 | static inline int book3e_tlb_exists(unsigned long ea, unsigned long pid) |
55 | { | 97 | { |
56 | int found = 0; | 98 | int found = 0; |
@@ -109,7 +151,10 @@ void book3e_hugetlb_preload(struct vm_area_struct *vma, unsigned long ea, | |||
109 | */ | 151 | */ |
110 | local_irq_save(flags); | 152 | local_irq_save(flags); |
111 | 153 | ||
154 | book3e_tlb_lock(); | ||
155 | |||
112 | if (unlikely(book3e_tlb_exists(ea, mm->context.id))) { | 156 | if (unlikely(book3e_tlb_exists(ea, mm->context.id))) { |
157 | book3e_tlb_unlock(); | ||
113 | local_irq_restore(flags); | 158 | local_irq_restore(flags); |
114 | return; | 159 | return; |
115 | } | 160 | } |
@@ -141,6 +186,7 @@ void book3e_hugetlb_preload(struct vm_area_struct *vma, unsigned long ea, | |||
141 | 186 | ||
142 | asm volatile ("tlbwe"); | 187 | asm volatile ("tlbwe"); |
143 | 188 | ||
189 | book3e_tlb_unlock(); | ||
144 | local_irq_restore(flags); | 190 | local_irq_restore(flags); |
145 | } | 191 | } |
146 | 192 | ||
diff --git a/arch/powerpc/mm/hugetlbpage-hash64.c b/arch/powerpc/mm/hugetlbpage-hash64.c index d94b1af53a93..e2138c7ae70f 100644 --- a/arch/powerpc/mm/hugetlbpage-hash64.c +++ b/arch/powerpc/mm/hugetlbpage-hash64.c | |||
@@ -59,10 +59,8 @@ int __hash_page_huge(unsigned long ea, unsigned long access, unsigned long vsid, | |||
59 | new_pte |= _PAGE_DIRTY; | 59 | new_pte |= _PAGE_DIRTY; |
60 | } while(old_pte != __cmpxchg_u64((unsigned long *)ptep, | 60 | } while(old_pte != __cmpxchg_u64((unsigned long *)ptep, |
61 | old_pte, new_pte)); | 61 | old_pte, new_pte)); |
62 | rflags = htab_convert_pte_flags(new_pte); | ||
62 | 63 | ||
63 | rflags = 0x2 | (!(new_pte & _PAGE_RW)); | ||
64 | /* _PAGE_EXEC -> HW_NO_EXEC since it's inverted */ | ||
65 | rflags |= ((new_pte & _PAGE_EXEC) ? 0 : HPTE_R_N); | ||
66 | sz = ((1UL) << shift); | 64 | sz = ((1UL) << shift); |
67 | if (!cpu_has_feature(CPU_FTR_COHERENT_ICACHE)) | 65 | if (!cpu_has_feature(CPU_FTR_COHERENT_ICACHE)) |
68 | /* No CPU has hugepages but lacks no execute, so we | 66 | /* No CPU has hugepages but lacks no execute, so we |
@@ -91,18 +89,7 @@ int __hash_page_huge(unsigned long ea, unsigned long access, unsigned long vsid, | |||
91 | pa = pte_pfn(__pte(old_pte)) << PAGE_SHIFT; | 89 | pa = pte_pfn(__pte(old_pte)) << PAGE_SHIFT; |
92 | 90 | ||
93 | /* clear HPTE slot informations in new PTE */ | 91 | /* clear HPTE slot informations in new PTE */ |
94 | #ifdef CONFIG_PPC_64K_PAGES | ||
95 | new_pte = (new_pte & ~_PAGE_HPTEFLAGS) | _PAGE_HPTE_SUB0; | ||
96 | #else | ||
97 | new_pte = (new_pte & ~_PAGE_HPTEFLAGS) | _PAGE_HASHPTE; | 92 | new_pte = (new_pte & ~_PAGE_HPTEFLAGS) | _PAGE_HASHPTE; |
98 | #endif | ||
99 | /* Add in WIMG bits */ | ||
100 | rflags |= (new_pte & (_PAGE_WRITETHRU | _PAGE_NO_CACHE | | ||
101 | _PAGE_COHERENT | _PAGE_GUARDED)); | ||
102 | /* | ||
103 | * enable the memory coherence always | ||
104 | */ | ||
105 | rflags |= HPTE_R_M; | ||
106 | 93 | ||
107 | slot = hpte_insert_repeating(hash, vpn, pa, rflags, 0, | 94 | slot = hpte_insert_repeating(hash, vpn, pa, rflags, 0, |
108 | mmu_psize, ssize); | 95 | mmu_psize, ssize); |
@@ -127,3 +114,21 @@ int __hash_page_huge(unsigned long ea, unsigned long access, unsigned long vsid, | |||
127 | *ptep = __pte(new_pte & ~_PAGE_BUSY); | 114 | *ptep = __pte(new_pte & ~_PAGE_BUSY); |
128 | return 0; | 115 | return 0; |
129 | } | 116 | } |
117 | |||
118 | #if defined(CONFIG_PPC_64K_PAGES) && defined(CONFIG_DEBUG_VM) | ||
119 | /* | ||
120 | * This enables us to catch the wrong page directory format | ||
121 | * Moved here so that we can use WARN() in the call. | ||
122 | */ | ||
123 | int hugepd_ok(hugepd_t hpd) | ||
124 | { | ||
125 | bool is_hugepd; | ||
126 | |||
127 | /* | ||
128 | * We should not find this format in page directory, warn otherwise. | ||
129 | */ | ||
130 | is_hugepd = (((hpd.pd & 0x3) == 0x0) && ((hpd.pd & HUGEPD_SHIFT_MASK) != 0)); | ||
131 | WARN(is_hugepd, "Found wrong page directory format\n"); | ||
132 | return 0; | ||
133 | } | ||
134 | #endif | ||
diff --git a/arch/powerpc/mm/hugetlbpage.c b/arch/powerpc/mm/hugetlbpage.c index 9833fee493ec..61b8b7ccea4f 100644 --- a/arch/powerpc/mm/hugetlbpage.c +++ b/arch/powerpc/mm/hugetlbpage.c | |||
@@ -53,78 +53,6 @@ static unsigned nr_gpages; | |||
53 | 53 | ||
54 | #define hugepd_none(hpd) ((hpd).pd == 0) | 54 | #define hugepd_none(hpd) ((hpd).pd == 0) |
55 | 55 | ||
56 | #ifdef CONFIG_PPC_BOOK3S_64 | ||
57 | /* | ||
58 | * At this point we do the placement change only for BOOK3S 64. This would | ||
59 | * possibly work on other subarchs. | ||
60 | */ | ||
61 | |||
62 | /* | ||
63 | * We have PGD_INDEX_SIZ = 12 and PTE_INDEX_SIZE = 8, so that we can have | ||
64 | * 16GB hugepage pte in PGD and 16MB hugepage pte at PMD; | ||
65 | * | ||
66 | * Defined in such a way that we can optimize away code block at build time | ||
67 | * if CONFIG_HUGETLB_PAGE=n. | ||
68 | */ | ||
69 | int pmd_huge(pmd_t pmd) | ||
70 | { | ||
71 | /* | ||
72 | * leaf pte for huge page, bottom two bits != 00 | ||
73 | */ | ||
74 | return ((pmd_val(pmd) & 0x3) != 0x0); | ||
75 | } | ||
76 | |||
77 | int pud_huge(pud_t pud) | ||
78 | { | ||
79 | /* | ||
80 | * leaf pte for huge page, bottom two bits != 00 | ||
81 | */ | ||
82 | return ((pud_val(pud) & 0x3) != 0x0); | ||
83 | } | ||
84 | |||
85 | int pgd_huge(pgd_t pgd) | ||
86 | { | ||
87 | /* | ||
88 | * leaf pte for huge page, bottom two bits != 00 | ||
89 | */ | ||
90 | return ((pgd_val(pgd) & 0x3) != 0x0); | ||
91 | } | ||
92 | |||
93 | #if defined(CONFIG_PPC_64K_PAGES) && defined(CONFIG_DEBUG_VM) | ||
94 | /* | ||
95 | * This enables us to catch the wrong page directory format | ||
96 | * Moved here so that we can use WARN() in the call. | ||
97 | */ | ||
98 | int hugepd_ok(hugepd_t hpd) | ||
99 | { | ||
100 | bool is_hugepd; | ||
101 | |||
102 | /* | ||
103 | * We should not find this format in page directory, warn otherwise. | ||
104 | */ | ||
105 | is_hugepd = (((hpd.pd & 0x3) == 0x0) && ((hpd.pd & HUGEPD_SHIFT_MASK) != 0)); | ||
106 | WARN(is_hugepd, "Found wrong page directory format\n"); | ||
107 | return 0; | ||
108 | } | ||
109 | #endif | ||
110 | |||
111 | #else | ||
112 | int pmd_huge(pmd_t pmd) | ||
113 | { | ||
114 | return 0; | ||
115 | } | ||
116 | |||
117 | int pud_huge(pud_t pud) | ||
118 | { | ||
119 | return 0; | ||
120 | } | ||
121 | |||
122 | int pgd_huge(pgd_t pgd) | ||
123 | { | ||
124 | return 0; | ||
125 | } | ||
126 | #endif | ||
127 | |||
128 | pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr) | 56 | pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr) |
129 | { | 57 | { |
130 | /* Only called for hugetlbfs pages, hence can ignore THP */ | 58 | /* Only called for hugetlbfs pages, hence can ignore THP */ |
@@ -966,8 +894,8 @@ void flush_dcache_icache_hugepage(struct page *page) | |||
966 | * We have 4 cases for pgds and pmds: | 894 | * We have 4 cases for pgds and pmds: |
967 | * (1) invalid (all zeroes) | 895 | * (1) invalid (all zeroes) |
968 | * (2) pointer to next table, as normal; bottom 6 bits == 0 | 896 | * (2) pointer to next table, as normal; bottom 6 bits == 0 |
969 | * (3) leaf pte for huge page, bottom two bits != 00 | 897 | * (3) leaf pte for huge page _PAGE_PTE set |
970 | * (4) hugepd pointer, bottom two bits == 00, next 4 bits indicate size of table | 898 | * (4) hugepd pointer, _PAGE_PTE = 0 and bits [2..6] indicate size of table |
971 | * | 899 | * |
972 | * So long as we atomically load page table pointers we are safe against teardown, | 900 | * So long as we atomically load page table pointers we are safe against teardown, |
973 | * we can follow the address down to the the page and take a ref on it. | 901 | * we can follow the address down to the the page and take a ref on it. |
diff --git a/arch/powerpc/mm/init_64.c b/arch/powerpc/mm/init_64.c index d747dd7bc90b..379a6a90644b 100644 --- a/arch/powerpc/mm/init_64.c +++ b/arch/powerpc/mm/init_64.c | |||
@@ -87,11 +87,7 @@ static void pgd_ctor(void *addr) | |||
87 | 87 | ||
88 | static void pmd_ctor(void *addr) | 88 | static void pmd_ctor(void *addr) |
89 | { | 89 | { |
90 | #ifdef CONFIG_TRANSPARENT_HUGEPAGE | ||
91 | memset(addr, 0, PMD_TABLE_SIZE * 2); | ||
92 | #else | ||
93 | memset(addr, 0, PMD_TABLE_SIZE); | 90 | memset(addr, 0, PMD_TABLE_SIZE); |
94 | #endif | ||
95 | } | 91 | } |
96 | 92 | ||
97 | struct kmem_cache *pgtable_cache[MAX_PGTABLE_INDEX_SIZE]; | 93 | struct kmem_cache *pgtable_cache[MAX_PGTABLE_INDEX_SIZE]; |
diff --git a/arch/powerpc/mm/pgtable.c b/arch/powerpc/mm/pgtable.c index 83dfcb55ffef..83dfd7925c72 100644 --- a/arch/powerpc/mm/pgtable.c +++ b/arch/powerpc/mm/pgtable.c | |||
@@ -179,6 +179,10 @@ void set_pte_at(struct mm_struct *mm, unsigned long addr, pte_t *ptep, | |||
179 | */ | 179 | */ |
180 | VM_WARN_ON((pte_val(*ptep) & (_PAGE_PRESENT | _PAGE_USER)) == | 180 | VM_WARN_ON((pte_val(*ptep) & (_PAGE_PRESENT | _PAGE_USER)) == |
181 | (_PAGE_PRESENT | _PAGE_USER)); | 181 | (_PAGE_PRESENT | _PAGE_USER)); |
182 | /* | ||
183 | * Add the pte bit when tryint set a pte | ||
184 | */ | ||
185 | pte = __pte(pte_val(pte) | _PAGE_PTE); | ||
182 | 186 | ||
183 | /* Note: mm->context.id might not yet have been assigned as | 187 | /* Note: mm->context.id might not yet have been assigned as |
184 | * this context might not have been activated yet when this | 188 | * this context might not have been activated yet when this |
diff --git a/arch/powerpc/mm/pgtable_64.c b/arch/powerpc/mm/pgtable_64.c index e92cb2146b18..ea6bc31debb0 100644 --- a/arch/powerpc/mm/pgtable_64.c +++ b/arch/powerpc/mm/pgtable_64.c | |||
@@ -359,7 +359,7 @@ struct page *pud_page(pud_t pud) | |||
359 | struct page *pmd_page(pmd_t pmd) | 359 | struct page *pmd_page(pmd_t pmd) |
360 | { | 360 | { |
361 | if (pmd_trans_huge(pmd) || pmd_huge(pmd)) | 361 | if (pmd_trans_huge(pmd) || pmd_huge(pmd)) |
362 | return pfn_to_page(pmd_pfn(pmd)); | 362 | return pte_page(pmd_pte(pmd)); |
363 | return virt_to_page(pmd_page_vaddr(pmd)); | 363 | return virt_to_page(pmd_page_vaddr(pmd)); |
364 | } | 364 | } |
365 | 365 | ||
@@ -625,7 +625,7 @@ void pmdp_splitting_flush(struct vm_area_struct *vma, | |||
625 | "1: ldarx %0,0,%3\n\ | 625 | "1: ldarx %0,0,%3\n\ |
626 | andi. %1,%0,%6\n\ | 626 | andi. %1,%0,%6\n\ |
627 | bne- 1b \n\ | 627 | bne- 1b \n\ |
628 | ori %1,%0,%4 \n\ | 628 | oris %1,%0,%4@h \n\ |
629 | stdcx. %1,0,%3 \n\ | 629 | stdcx. %1,0,%3 \n\ |
630 | bne- 1b" | 630 | bne- 1b" |
631 | : "=&r" (old), "=&r" (tmp), "=m" (*pmdp) | 631 | : "=&r" (old), "=&r" (tmp), "=m" (*pmdp) |
@@ -759,22 +759,15 @@ void hpte_do_hugepage_flush(struct mm_struct *mm, unsigned long addr, | |||
759 | 759 | ||
760 | static pmd_t pmd_set_protbits(pmd_t pmd, pgprot_t pgprot) | 760 | static pmd_t pmd_set_protbits(pmd_t pmd, pgprot_t pgprot) |
761 | { | 761 | { |
762 | pmd_val(pmd) |= pgprot_val(pgprot); | 762 | return __pmd(pmd_val(pmd) | pgprot_val(pgprot)); |
763 | return pmd; | ||
764 | } | 763 | } |
765 | 764 | ||
766 | pmd_t pfn_pmd(unsigned long pfn, pgprot_t pgprot) | 765 | pmd_t pfn_pmd(unsigned long pfn, pgprot_t pgprot) |
767 | { | 766 | { |
768 | pmd_t pmd; | 767 | unsigned long pmdv; |
769 | /* | 768 | |
770 | * For a valid pte, we would have _PAGE_PRESENT always | 769 | pmdv = pfn << PTE_RPN_SHIFT; |
771 | * set. We use this to check THP page at pmd level. | 770 | return pmd_set_protbits(__pmd(pmdv), pgprot); |
772 | * leaf pte for huge page, bottom two bits != 00 | ||
773 | */ | ||
774 | pmd_val(pmd) = pfn << PTE_RPN_SHIFT; | ||
775 | pmd_val(pmd) |= _PAGE_THP_HUGE; | ||
776 | pmd = pmd_set_protbits(pmd, pgprot); | ||
777 | return pmd; | ||
778 | } | 771 | } |
779 | 772 | ||
780 | pmd_t mk_pmd(struct page *page, pgprot_t pgprot) | 773 | pmd_t mk_pmd(struct page *page, pgprot_t pgprot) |
@@ -784,10 +777,11 @@ pmd_t mk_pmd(struct page *page, pgprot_t pgprot) | |||
784 | 777 | ||
785 | pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot) | 778 | pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot) |
786 | { | 779 | { |
780 | unsigned long pmdv; | ||
787 | 781 | ||
788 | pmd_val(pmd) &= _HPAGE_CHG_MASK; | 782 | pmdv = pmd_val(pmd); |
789 | pmd = pmd_set_protbits(pmd, newprot); | 783 | pmdv &= _HPAGE_CHG_MASK; |
790 | return pmd; | 784 | return pmd_set_protbits(__pmd(pmdv), newprot); |
791 | } | 785 | } |
792 | 786 | ||
793 | /* | 787 | /* |
diff --git a/arch/powerpc/mm/slb.c b/arch/powerpc/mm/slb.c index 515730e499fe..825b6873391f 100644 --- a/arch/powerpc/mm/slb.c +++ b/arch/powerpc/mm/slb.c | |||
@@ -228,7 +228,7 @@ void switch_slb(struct task_struct *tsk, struct mm_struct *mm) | |||
228 | asm volatile("slbie %0" : : "r" (slbie_data)); | 228 | asm volatile("slbie %0" : : "r" (slbie_data)); |
229 | 229 | ||
230 | get_paca()->slb_cache_ptr = 0; | 230 | get_paca()->slb_cache_ptr = 0; |
231 | get_paca()->context = mm->context; | 231 | copy_mm_to_paca(&mm->context); |
232 | 232 | ||
233 | /* | 233 | /* |
234 | * preload some userspace segments into the SLB. | 234 | * preload some userspace segments into the SLB. |
diff --git a/arch/powerpc/mm/slice.c b/arch/powerpc/mm/slice.c index 0f432a702870..42954f0b47ac 100644 --- a/arch/powerpc/mm/slice.c +++ b/arch/powerpc/mm/slice.c | |||
@@ -185,8 +185,7 @@ static void slice_flush_segments(void *parm) | |||
185 | if (mm != current->active_mm) | 185 | if (mm != current->active_mm) |
186 | return; | 186 | return; |
187 | 187 | ||
188 | /* update the paca copy of the context struct */ | 188 | copy_mm_to_paca(¤t->active_mm->context); |
189 | get_paca()->context = current->active_mm->context; | ||
190 | 189 | ||
191 | local_irq_save(flags); | 190 | local_irq_save(flags); |
192 | slb_flush_and_rebolt(); | 191 | slb_flush_and_rebolt(); |
diff --git a/arch/powerpc/platforms/83xx/km83xx.c b/arch/powerpc/platforms/83xx/km83xx.c index bf4c4473abb9..4bc6bbbe9ada 100644 --- a/arch/powerpc/platforms/83xx/km83xx.c +++ b/arch/powerpc/platforms/83xx/km83xx.c | |||
@@ -37,8 +37,8 @@ | |||
37 | #include <asm/udbg.h> | 37 | #include <asm/udbg.h> |
38 | #include <sysdev/fsl_soc.h> | 38 | #include <sysdev/fsl_soc.h> |
39 | #include <sysdev/fsl_pci.h> | 39 | #include <sysdev/fsl_pci.h> |
40 | #include <asm/qe.h> | 40 | #include <soc/fsl/qe/qe.h> |
41 | #include <asm/qe_ic.h> | 41 | #include <soc/fsl/qe/qe_ic.h> |
42 | 42 | ||
43 | #include "mpc83xx.h" | 43 | #include "mpc83xx.h" |
44 | 44 | ||
@@ -136,8 +136,6 @@ static void __init mpc83xx_km_setup_arch(void) | |||
136 | mpc83xx_setup_pci(); | 136 | mpc83xx_setup_pci(); |
137 | 137 | ||
138 | #ifdef CONFIG_QUICC_ENGINE | 138 | #ifdef CONFIG_QUICC_ENGINE |
139 | qe_reset(); | ||
140 | |||
141 | np = of_find_node_by_name(NULL, "par_io"); | 139 | np = of_find_node_by_name(NULL, "par_io"); |
142 | if (np != NULL) { | 140 | if (np != NULL) { |
143 | par_io_init(np); | 141 | par_io_init(np); |
diff --git a/arch/powerpc/platforms/83xx/misc.c b/arch/powerpc/platforms/83xx/misc.c index ef9d01a049c1..7e923cad56cf 100644 --- a/arch/powerpc/platforms/83xx/misc.c +++ b/arch/powerpc/platforms/83xx/misc.c | |||
@@ -17,7 +17,7 @@ | |||
17 | #include <asm/io.h> | 17 | #include <asm/io.h> |
18 | #include <asm/hw_irq.h> | 18 | #include <asm/hw_irq.h> |
19 | #include <asm/ipic.h> | 19 | #include <asm/ipic.h> |
20 | #include <asm/qe_ic.h> | 20 | #include <soc/fsl/qe/qe_ic.h> |
21 | #include <sysdev/fsl_soc.h> | 21 | #include <sysdev/fsl_soc.h> |
22 | #include <sysdev/fsl_pci.h> | 22 | #include <sysdev/fsl_pci.h> |
23 | 23 | ||
diff --git a/arch/powerpc/platforms/83xx/mpc832x_mds.c b/arch/powerpc/platforms/83xx/mpc832x_mds.c index 8d762203eeff..a973b2ae5df6 100644 --- a/arch/powerpc/platforms/83xx/mpc832x_mds.c +++ b/arch/powerpc/platforms/83xx/mpc832x_mds.c | |||
@@ -36,8 +36,8 @@ | |||
36 | #include <asm/udbg.h> | 36 | #include <asm/udbg.h> |
37 | #include <sysdev/fsl_soc.h> | 37 | #include <sysdev/fsl_soc.h> |
38 | #include <sysdev/fsl_pci.h> | 38 | #include <sysdev/fsl_pci.h> |
39 | #include <asm/qe.h> | 39 | #include <soc/fsl/qe/qe.h> |
40 | #include <asm/qe_ic.h> | 40 | #include <soc/fsl/qe/qe_ic.h> |
41 | 41 | ||
42 | #include "mpc83xx.h" | 42 | #include "mpc83xx.h" |
43 | 43 | ||
@@ -74,8 +74,6 @@ static void __init mpc832x_sys_setup_arch(void) | |||
74 | mpc83xx_setup_pci(); | 74 | mpc83xx_setup_pci(); |
75 | 75 | ||
76 | #ifdef CONFIG_QUICC_ENGINE | 76 | #ifdef CONFIG_QUICC_ENGINE |
77 | qe_reset(); | ||
78 | |||
79 | if ((np = of_find_node_by_name(NULL, "par_io")) != NULL) { | 77 | if ((np = of_find_node_by_name(NULL, "par_io")) != NULL) { |
80 | par_io_init(np); | 78 | par_io_init(np); |
81 | of_node_put(np); | 79 | of_node_put(np); |
diff --git a/arch/powerpc/platforms/83xx/mpc832x_rdb.c b/arch/powerpc/platforms/83xx/mpc832x_rdb.c index eff5baabc3fb..ea2b87d202ca 100644 --- a/arch/powerpc/platforms/83xx/mpc832x_rdb.c +++ b/arch/powerpc/platforms/83xx/mpc832x_rdb.c | |||
@@ -25,8 +25,8 @@ | |||
25 | #include <asm/time.h> | 25 | #include <asm/time.h> |
26 | #include <asm/ipic.h> | 26 | #include <asm/ipic.h> |
27 | #include <asm/udbg.h> | 27 | #include <asm/udbg.h> |
28 | #include <asm/qe.h> | 28 | #include <soc/fsl/qe/qe.h> |
29 | #include <asm/qe_ic.h> | 29 | #include <soc/fsl/qe/qe_ic.h> |
30 | #include <sysdev/fsl_soc.h> | 30 | #include <sysdev/fsl_soc.h> |
31 | #include <sysdev/fsl_pci.h> | 31 | #include <sysdev/fsl_pci.h> |
32 | 32 | ||
@@ -203,8 +203,6 @@ static void __init mpc832x_rdb_setup_arch(void) | |||
203 | mpc83xx_setup_pci(); | 203 | mpc83xx_setup_pci(); |
204 | 204 | ||
205 | #ifdef CONFIG_QUICC_ENGINE | 205 | #ifdef CONFIG_QUICC_ENGINE |
206 | qe_reset(); | ||
207 | |||
208 | if ((np = of_find_node_by_name(NULL, "par_io")) != NULL) { | 206 | if ((np = of_find_node_by_name(NULL, "par_io")) != NULL) { |
209 | par_io_init(np); | 207 | par_io_init(np); |
210 | of_node_put(np); | 208 | of_node_put(np); |
diff --git a/arch/powerpc/platforms/83xx/mpc836x_mds.c b/arch/powerpc/platforms/83xx/mpc836x_mds.c index 1a26d2f83401..dd70b85f56d4 100644 --- a/arch/powerpc/platforms/83xx/mpc836x_mds.c +++ b/arch/powerpc/platforms/83xx/mpc836x_mds.c | |||
@@ -44,8 +44,8 @@ | |||
44 | #include <sysdev/fsl_soc.h> | 44 | #include <sysdev/fsl_soc.h> |
45 | #include <sysdev/fsl_pci.h> | 45 | #include <sysdev/fsl_pci.h> |
46 | #include <sysdev/simple_gpio.h> | 46 | #include <sysdev/simple_gpio.h> |
47 | #include <asm/qe.h> | 47 | #include <soc/fsl/qe/qe.h> |
48 | #include <asm/qe_ic.h> | 48 | #include <soc/fsl/qe/qe_ic.h> |
49 | 49 | ||
50 | #include "mpc83xx.h" | 50 | #include "mpc83xx.h" |
51 | 51 | ||
@@ -82,8 +82,6 @@ static void __init mpc836x_mds_setup_arch(void) | |||
82 | mpc83xx_setup_pci(); | 82 | mpc83xx_setup_pci(); |
83 | 83 | ||
84 | #ifdef CONFIG_QUICC_ENGINE | 84 | #ifdef CONFIG_QUICC_ENGINE |
85 | qe_reset(); | ||
86 | |||
87 | if ((np = of_find_node_by_name(NULL, "par_io")) != NULL) { | 85 | if ((np = of_find_node_by_name(NULL, "par_io")) != NULL) { |
88 | par_io_init(np); | 86 | par_io_init(np); |
89 | of_node_put(np); | 87 | of_node_put(np); |
diff --git a/arch/powerpc/platforms/83xx/mpc836x_rdk.c b/arch/powerpc/platforms/83xx/mpc836x_rdk.c index b63b42d11d6c..4cd7153a6c88 100644 --- a/arch/powerpc/platforms/83xx/mpc836x_rdk.c +++ b/arch/powerpc/platforms/83xx/mpc836x_rdk.c | |||
@@ -20,8 +20,8 @@ | |||
20 | #include <asm/time.h> | 20 | #include <asm/time.h> |
21 | #include <asm/ipic.h> | 21 | #include <asm/ipic.h> |
22 | #include <asm/udbg.h> | 22 | #include <asm/udbg.h> |
23 | #include <asm/qe.h> | 23 | #include <soc/fsl/qe/qe.h> |
24 | #include <asm/qe_ic.h> | 24 | #include <soc/fsl/qe/qe_ic.h> |
25 | #include <sysdev/fsl_soc.h> | 25 | #include <sysdev/fsl_soc.h> |
26 | #include <sysdev/fsl_pci.h> | 26 | #include <sysdev/fsl_pci.h> |
27 | 27 | ||
@@ -35,9 +35,6 @@ static void __init mpc836x_rdk_setup_arch(void) | |||
35 | ppc_md.progress("mpc836x_rdk_setup_arch()", 0); | 35 | ppc_md.progress("mpc836x_rdk_setup_arch()", 0); |
36 | 36 | ||
37 | mpc83xx_setup_pci(); | 37 | mpc83xx_setup_pci(); |
38 | #ifdef CONFIG_QUICC_ENGINE | ||
39 | qe_reset(); | ||
40 | #endif | ||
41 | } | 38 | } |
42 | 39 | ||
43 | /* | 40 | /* |
diff --git a/arch/powerpc/platforms/85xx/bsc913x_qds.c b/arch/powerpc/platforms/85xx/bsc913x_qds.c index f0927e58af25..dcfafd6b91ee 100644 --- a/arch/powerpc/platforms/85xx/bsc913x_qds.c +++ b/arch/powerpc/platforms/85xx/bsc913x_qds.c | |||
@@ -17,6 +17,7 @@ | |||
17 | #include <linux/pci.h> | 17 | #include <linux/pci.h> |
18 | #include <asm/mpic.h> | 18 | #include <asm/mpic.h> |
19 | #include <sysdev/fsl_soc.h> | 19 | #include <sysdev/fsl_soc.h> |
20 | #include <sysdev/fsl_pci.h> | ||
20 | #include <asm/udbg.h> | 21 | #include <asm/udbg.h> |
21 | 22 | ||
22 | #include "mpc85xx.h" | 23 | #include "mpc85xx.h" |
@@ -46,10 +47,12 @@ static void __init bsc913x_qds_setup_arch(void) | |||
46 | mpc85xx_smp_init(); | 47 | mpc85xx_smp_init(); |
47 | #endif | 48 | #endif |
48 | 49 | ||
50 | fsl_pci_assign_primary(); | ||
51 | |||
49 | pr_info("bsc913x board from Freescale Semiconductor\n"); | 52 | pr_info("bsc913x board from Freescale Semiconductor\n"); |
50 | } | 53 | } |
51 | 54 | ||
52 | machine_device_initcall(bsc9132_qds, mpc85xx_common_publish_devices); | 55 | machine_arch_initcall(bsc9132_qds, mpc85xx_common_publish_devices); |
53 | 56 | ||
54 | /* | 57 | /* |
55 | * Called very early, device-tree isn't unflattened | 58 | * Called very early, device-tree isn't unflattened |
@@ -67,6 +70,9 @@ define_machine(bsc9132_qds) { | |||
67 | .probe = bsc9132_qds_probe, | 70 | .probe = bsc9132_qds_probe, |
68 | .setup_arch = bsc913x_qds_setup_arch, | 71 | .setup_arch = bsc913x_qds_setup_arch, |
69 | .init_IRQ = bsc913x_qds_pic_init, | 72 | .init_IRQ = bsc913x_qds_pic_init, |
73 | #ifdef CONFIG_PCI | ||
74 | .pcibios_fixup_bus = fsl_pcibios_fixup_bus, | ||
75 | #endif | ||
70 | .get_irq = mpic_get_irq, | 76 | .get_irq = mpic_get_irq, |
71 | .restart = fsl_rstcr_restart, | 77 | .restart = fsl_rstcr_restart, |
72 | .calibrate_decr = generic_calibrate_decr, | 78 | .calibrate_decr = generic_calibrate_decr, |
diff --git a/arch/powerpc/platforms/85xx/common.c b/arch/powerpc/platforms/85xx/common.c index 23791de7b688..949f22c86e61 100644 --- a/arch/powerpc/platforms/85xx/common.c +++ b/arch/powerpc/platforms/85xx/common.c | |||
@@ -9,7 +9,7 @@ | |||
9 | #include <linux/of_irq.h> | 9 | #include <linux/of_irq.h> |
10 | #include <linux/of_platform.h> | 10 | #include <linux/of_platform.h> |
11 | 11 | ||
12 | #include <asm/qe.h> | 12 | #include <soc/fsl/qe/qe.h> |
13 | #include <sysdev/cpm2_pic.h> | 13 | #include <sysdev/cpm2_pic.h> |
14 | 14 | ||
15 | #include "mpc85xx.h" | 15 | #include "mpc85xx.h" |
@@ -105,7 +105,6 @@ void __init mpc85xx_qe_init(void) | |||
105 | return; | 105 | return; |
106 | } | 106 | } |
107 | 107 | ||
108 | qe_reset(); | ||
109 | of_node_put(np); | 108 | of_node_put(np); |
110 | 109 | ||
111 | } | 110 | } |
diff --git a/arch/powerpc/platforms/85xx/corenet_generic.c b/arch/powerpc/platforms/85xx/corenet_generic.c index 46d05c94add6..a2b0bc859de0 100644 --- a/arch/powerpc/platforms/85xx/corenet_generic.c +++ b/arch/powerpc/platforms/85xx/corenet_generic.c | |||
@@ -27,7 +27,7 @@ | |||
27 | #include <asm/udbg.h> | 27 | #include <asm/udbg.h> |
28 | #include <asm/mpic.h> | 28 | #include <asm/mpic.h> |
29 | #include <asm/ehv_pic.h> | 29 | #include <asm/ehv_pic.h> |
30 | #include <asm/qe_ic.h> | 30 | #include <soc/fsl/qe/qe_ic.h> |
31 | 31 | ||
32 | #include <linux/of_platform.h> | 32 | #include <linux/of_platform.h> |
33 | #include <sysdev/fsl_soc.h> | 33 | #include <sysdev/fsl_soc.h> |
diff --git a/arch/powerpc/platforms/85xx/mpc85xx_ads.c b/arch/powerpc/platforms/85xx/mpc85xx_ads.c index 7d12a19aa7ee..de72a5f464b1 100644 --- a/arch/powerpc/platforms/85xx/mpc85xx_ads.c +++ b/arch/powerpc/platforms/85xx/mpc85xx_ads.c | |||
@@ -36,17 +36,6 @@ | |||
36 | 36 | ||
37 | #include "mpc85xx.h" | 37 | #include "mpc85xx.h" |
38 | 38 | ||
39 | #ifdef CONFIG_PCI | ||
40 | static int mpc85xx_exclude_device(struct pci_controller *hose, | ||
41 | u_char bus, u_char devfn) | ||
42 | { | ||
43 | if (bus == 0 && PCI_SLOT(devfn) == 0) | ||
44 | return PCIBIOS_DEVICE_NOT_FOUND; | ||
45 | else | ||
46 | return PCIBIOS_SUCCESSFUL; | ||
47 | } | ||
48 | #endif /* CONFIG_PCI */ | ||
49 | |||
50 | static void __init mpc85xx_ads_pic_init(void) | 39 | static void __init mpc85xx_ads_pic_init(void) |
51 | { | 40 | { |
52 | struct mpic *mpic = mpic_alloc(NULL, 0, MPIC_BIG_ENDIAN, | 41 | struct mpic *mpic = mpic_alloc(NULL, 0, MPIC_BIG_ENDIAN, |
@@ -145,10 +134,6 @@ static void __init mpc85xx_ads_setup_arch(void) | |||
145 | init_ioports(); | 134 | init_ioports(); |
146 | #endif | 135 | #endif |
147 | 136 | ||
148 | #ifdef CONFIG_PCI | ||
149 | ppc_md.pci_exclude_device = mpc85xx_exclude_device; | ||
150 | #endif | ||
151 | |||
152 | fsl_pci_assign_primary(); | 137 | fsl_pci_assign_primary(); |
153 | } | 138 | } |
154 | 139 | ||
diff --git a/arch/powerpc/platforms/85xx/mpc85xx_mds.c b/arch/powerpc/platforms/85xx/mpc85xx_mds.c index f0be439ceaaa..f61cbe235581 100644 --- a/arch/powerpc/platforms/85xx/mpc85xx_mds.c +++ b/arch/powerpc/platforms/85xx/mpc85xx_mds.c | |||
@@ -48,8 +48,8 @@ | |||
48 | #include <sysdev/fsl_soc.h> | 48 | #include <sysdev/fsl_soc.h> |
49 | #include <sysdev/fsl_pci.h> | 49 | #include <sysdev/fsl_pci.h> |
50 | #include <sysdev/simple_gpio.h> | 50 | #include <sysdev/simple_gpio.h> |
51 | #include <asm/qe.h> | 51 | #include <soc/fsl/qe/qe.h> |
52 | #include <asm/qe_ic.h> | 52 | #include <soc/fsl/qe/qe_ic.h> |
53 | #include <asm/mpic.h> | 53 | #include <asm/mpic.h> |
54 | #include <asm/swiotlb.h> | 54 | #include <asm/swiotlb.h> |
55 | #include "smp.h" | 55 | #include "smp.h" |
diff --git a/arch/powerpc/platforms/85xx/mpc85xx_rdb.c b/arch/powerpc/platforms/85xx/mpc85xx_rdb.c index 50dcc00a0f5a..3f4dad133338 100644 --- a/arch/powerpc/platforms/85xx/mpc85xx_rdb.c +++ b/arch/powerpc/platforms/85xx/mpc85xx_rdb.c | |||
@@ -26,8 +26,8 @@ | |||
26 | #include <asm/prom.h> | 26 | #include <asm/prom.h> |
27 | #include <asm/udbg.h> | 27 | #include <asm/udbg.h> |
28 | #include <asm/mpic.h> | 28 | #include <asm/mpic.h> |
29 | #include <asm/qe.h> | 29 | #include <soc/fsl/qe/qe.h> |
30 | #include <asm/qe_ic.h> | 30 | #include <soc/fsl/qe/qe_ic.h> |
31 | 31 | ||
32 | #include <sysdev/fsl_soc.h> | 32 | #include <sysdev/fsl_soc.h> |
33 | #include <sysdev/fsl_pci.h> | 33 | #include <sysdev/fsl_pci.h> |
diff --git a/arch/powerpc/platforms/85xx/twr_p102x.c b/arch/powerpc/platforms/85xx/twr_p102x.c index 892e613519cc..71bc255b4324 100644 --- a/arch/powerpc/platforms/85xx/twr_p102x.c +++ b/arch/powerpc/platforms/85xx/twr_p102x.c | |||
@@ -22,8 +22,8 @@ | |||
22 | #include <asm/pci-bridge.h> | 22 | #include <asm/pci-bridge.h> |
23 | #include <asm/udbg.h> | 23 | #include <asm/udbg.h> |
24 | #include <asm/mpic.h> | 24 | #include <asm/mpic.h> |
25 | #include <asm/qe.h> | 25 | #include <soc/fsl/qe/qe.h> |
26 | #include <asm/qe_ic.h> | 26 | #include <soc/fsl/qe/qe_ic.h> |
27 | 27 | ||
28 | #include <sysdev/fsl_soc.h> | 28 | #include <sysdev/fsl_soc.h> |
29 | #include <sysdev/fsl_pci.h> | 29 | #include <sysdev/fsl_pci.h> |
diff --git a/arch/powerpc/platforms/Kconfig b/arch/powerpc/platforms/Kconfig index b7f9c408bf24..46a3533d3acb 100644 --- a/arch/powerpc/platforms/Kconfig +++ b/arch/powerpc/platforms/Kconfig | |||
@@ -272,17 +272,6 @@ config TAU_AVERAGE | |||
272 | 272 | ||
273 | If in doubt, say N here. | 273 | If in doubt, say N here. |
274 | 274 | ||
275 | config QUICC_ENGINE | ||
276 | bool "Freescale QUICC Engine (QE) Support" | ||
277 | depends on FSL_SOC && PPC32 | ||
278 | select PPC_LIB_RHEAP | ||
279 | select CRC32 | ||
280 | help | ||
281 | The QUICC Engine (QE) is a new generation of communications | ||
282 | coprocessors on Freescale embedded CPUs (akin to CPM in older chips). | ||
283 | Selecting this option means that you wish to build a kernel | ||
284 | for a machine with a QE coprocessor. | ||
285 | |||
286 | config QE_GPIO | 275 | config QE_GPIO |
287 | bool "QE GPIO support" | 276 | bool "QE GPIO support" |
288 | depends on QUICC_ENGINE | 277 | depends on QUICC_ENGINE |
@@ -295,7 +284,6 @@ config CPM2 | |||
295 | bool "Enable support for the CPM2 (Communications Processor Module)" | 284 | bool "Enable support for the CPM2 (Communications Processor Module)" |
296 | depends on (FSL_SOC_BOOKE && PPC32) || 8260 | 285 | depends on (FSL_SOC_BOOKE && PPC32) || 8260 |
297 | select CPM | 286 | select CPM |
298 | select PPC_LIB_RHEAP | ||
299 | select PPC_PCI_CHOICE | 287 | select PPC_PCI_CHOICE |
300 | select ARCH_REQUIRE_GPIOLIB | 288 | select ARCH_REQUIRE_GPIOLIB |
301 | help | 289 | help |
@@ -325,6 +313,7 @@ config FSL_ULI1575 | |||
325 | 313 | ||
326 | config CPM | 314 | config CPM |
327 | bool | 315 | bool |
316 | select GENERIC_ALLOCATOR | ||
328 | 317 | ||
329 | config OF_RTC | 318 | config OF_RTC |
330 | bool | 319 | bool |
diff --git a/arch/powerpc/platforms/cell/Kconfig b/arch/powerpc/platforms/cell/Kconfig index 429fc59d2a47..d9088f0b8fcc 100644 --- a/arch/powerpc/platforms/cell/Kconfig +++ b/arch/powerpc/platforms/cell/Kconfig | |||
@@ -33,11 +33,6 @@ config PPC_IBM_CELL_BLADE | |||
33 | select PPC_UDBG_16550 | 33 | select PPC_UDBG_16550 |
34 | select UDBG_RTAS_CONSOLE | 34 | select UDBG_RTAS_CONSOLE |
35 | 35 | ||
36 | config PPC_CELL_QPACE | ||
37 | bool "IBM Cell - QPACE" | ||
38 | depends on PPC64 && PPC_BOOK3S && CPU_BIG_ENDIAN | ||
39 | select PPC_CELL_COMMON | ||
40 | |||
41 | config AXON_MSI | 36 | config AXON_MSI |
42 | bool | 37 | bool |
43 | depends on PPC_IBM_CELL_BLADE && PCI_MSI | 38 | depends on PPC_IBM_CELL_BLADE && PCI_MSI |
diff --git a/arch/powerpc/platforms/cell/Makefile b/arch/powerpc/platforms/cell/Makefile index 34699bddfddd..00464305763d 100644 --- a/arch/powerpc/platforms/cell/Makefile +++ b/arch/powerpc/platforms/cell/Makefile | |||
@@ -11,7 +11,6 @@ obj-$(CONFIG_PPC_IBM_CELL_POWERBUTTON) += cbe_powerbutton.o | |||
11 | 11 | ||
12 | ifeq ($(CONFIG_SMP),y) | 12 | ifeq ($(CONFIG_SMP),y) |
13 | obj-$(CONFIG_PPC_CELL_NATIVE) += smp.o | 13 | obj-$(CONFIG_PPC_CELL_NATIVE) += smp.o |
14 | obj-$(CONFIG_PPC_CELL_QPACE) += smp.o | ||
15 | endif | 14 | endif |
16 | 15 | ||
17 | # needed only when building loadable spufs.ko | 16 | # needed only when building loadable spufs.ko |
@@ -26,6 +25,3 @@ obj-$(CONFIG_SPU_BASE) += spu_callbacks.o spu_base.o \ | |||
26 | spufs/ | 25 | spufs/ |
27 | 26 | ||
28 | obj-$(CONFIG_AXON_MSI) += axon_msi.o | 27 | obj-$(CONFIG_AXON_MSI) += axon_msi.o |
29 | |||
30 | # qpace setup | ||
31 | obj-$(CONFIG_PPC_CELL_QPACE) += qpace_setup.o | ||
diff --git a/arch/powerpc/platforms/cell/qpace_setup.c b/arch/powerpc/platforms/cell/qpace_setup.c deleted file mode 100644 index d328140dc6f5..000000000000 --- a/arch/powerpc/platforms/cell/qpace_setup.c +++ /dev/null | |||
@@ -1,148 +0,0 @@ | |||
1 | /* | ||
2 | * linux/arch/powerpc/platforms/cell/qpace_setup.c | ||
3 | * | ||
4 | * Copyright (C) 1995 Linus Torvalds | ||
5 | * Adapted from 'alpha' version by Gary Thomas | ||
6 | * Modified by Cort Dougan (cort@cs.nmt.edu) | ||
7 | * Modified by PPC64 Team, IBM Corp | ||
8 | * Modified by Cell Team, IBM Deutschland Entwicklung GmbH | ||
9 | * Modified by Benjamin Krill <ben@codiert.org>, IBM Corp. | ||
10 | * | ||
11 | * This program is free software; you can redistribute it and/or | ||
12 | * modify it under the terms of the GNU General Public License | ||
13 | * as published by the Free Software Foundation; either version | ||
14 | * 2 of the License, or (at your option) any later version. | ||
15 | */ | ||
16 | |||
17 | #include <linux/sched.h> | ||
18 | #include <linux/kernel.h> | ||
19 | #include <linux/init.h> | ||
20 | #include <linux/export.h> | ||
21 | #include <linux/delay.h> | ||
22 | #include <linux/irq.h> | ||
23 | #include <linux/console.h> | ||
24 | #include <linux/of_platform.h> | ||
25 | |||
26 | #include <asm/mmu.h> | ||
27 | #include <asm/processor.h> | ||
28 | #include <asm/io.h> | ||
29 | #include <asm/kexec.h> | ||
30 | #include <asm/pgtable.h> | ||
31 | #include <asm/prom.h> | ||
32 | #include <asm/rtas.h> | ||
33 | #include <asm/dma.h> | ||
34 | #include <asm/machdep.h> | ||
35 | #include <asm/time.h> | ||
36 | #include <asm/cputable.h> | ||
37 | #include <asm/irq.h> | ||
38 | #include <asm/spu.h> | ||
39 | #include <asm/spu_priv1.h> | ||
40 | #include <asm/udbg.h> | ||
41 | #include <asm/cell-regs.h> | ||
42 | |||
43 | #include "interrupt.h" | ||
44 | #include "pervasive.h" | ||
45 | #include "ras.h" | ||
46 | |||
47 | static void qpace_show_cpuinfo(struct seq_file *m) | ||
48 | { | ||
49 | struct device_node *root; | ||
50 | const char *model = ""; | ||
51 | |||
52 | root = of_find_node_by_path("/"); | ||
53 | if (root) | ||
54 | model = of_get_property(root, "model", NULL); | ||
55 | seq_printf(m, "machine\t\t: CHRP %s\n", model); | ||
56 | of_node_put(root); | ||
57 | } | ||
58 | |||
59 | static void qpace_progress(char *s, unsigned short hex) | ||
60 | { | ||
61 | printk("*** %04x : %s\n", hex, s ? s : ""); | ||
62 | } | ||
63 | |||
64 | static const struct of_device_id qpace_bus_ids[] __initconst = { | ||
65 | { .type = "soc", }, | ||
66 | { .compatible = "soc", }, | ||
67 | { .type = "spider", }, | ||
68 | { .type = "axon", }, | ||
69 | { .type = "plb5", }, | ||
70 | { .type = "plb4", }, | ||
71 | { .type = "opb", }, | ||
72 | { .type = "ebc", }, | ||
73 | {}, | ||
74 | }; | ||
75 | |||
76 | static int __init qpace_publish_devices(void) | ||
77 | { | ||
78 | int node; | ||
79 | |||
80 | /* Publish OF platform devices for southbridge IOs */ | ||
81 | of_platform_bus_probe(NULL, qpace_bus_ids, NULL); | ||
82 | |||
83 | /* There is no device for the MIC memory controller, thus we create | ||
84 | * a platform device for it to attach the EDAC driver to. | ||
85 | */ | ||
86 | for_each_online_node(node) { | ||
87 | if (cbe_get_cpu_mic_tm_regs(cbe_node_to_cpu(node)) == NULL) | ||
88 | continue; | ||
89 | platform_device_register_simple("cbe-mic", node, NULL, 0); | ||
90 | } | ||
91 | |||
92 | return 0; | ||
93 | } | ||
94 | machine_subsys_initcall(qpace, qpace_publish_devices); | ||
95 | |||
96 | static void __init qpace_setup_arch(void) | ||
97 | { | ||
98 | #ifdef CONFIG_SPU_BASE | ||
99 | spu_priv1_ops = &spu_priv1_mmio_ops; | ||
100 | spu_management_ops = &spu_management_of_ops; | ||
101 | #endif | ||
102 | |||
103 | cbe_regs_init(); | ||
104 | |||
105 | #ifdef CONFIG_CBE_RAS | ||
106 | cbe_ras_init(); | ||
107 | #endif | ||
108 | |||
109 | #ifdef CONFIG_SMP | ||
110 | smp_init_cell(); | ||
111 | #endif | ||
112 | |||
113 | /* init to some ~sane value until calibrate_delay() runs */ | ||
114 | loops_per_jiffy = 50000000; | ||
115 | |||
116 | cbe_pervasive_init(); | ||
117 | #ifdef CONFIG_DUMMY_CONSOLE | ||
118 | conswitchp = &dummy_con; | ||
119 | #endif | ||
120 | } | ||
121 | |||
122 | static int __init qpace_probe(void) | ||
123 | { | ||
124 | unsigned long root = of_get_flat_dt_root(); | ||
125 | |||
126 | if (!of_flat_dt_is_compatible(root, "IBM,QPACE")) | ||
127 | return 0; | ||
128 | |||
129 | hpte_init_native(); | ||
130 | pm_power_off = rtas_power_off; | ||
131 | |||
132 | return 1; | ||
133 | } | ||
134 | |||
135 | define_machine(qpace) { | ||
136 | .name = "QPACE", | ||
137 | .probe = qpace_probe, | ||
138 | .setup_arch = qpace_setup_arch, | ||
139 | .show_cpuinfo = qpace_show_cpuinfo, | ||
140 | .restart = rtas_restart, | ||
141 | .halt = rtas_halt, | ||
142 | .get_boot_time = rtas_get_boot_time, | ||
143 | .get_rtc_time = rtas_get_rtc_time, | ||
144 | .set_rtc_time = rtas_set_rtc_time, | ||
145 | .calibrate_decr = generic_calibrate_decr, | ||
146 | .progress = qpace_progress, | ||
147 | .init_IRQ = iic_init_IRQ, | ||
148 | }; | ||
diff --git a/arch/powerpc/platforms/cell/spufs/run.c b/arch/powerpc/platforms/cell/spufs/run.c index 4ddf769a64e5..9f79004e6d6f 100644 --- a/arch/powerpc/platforms/cell/spufs/run.c +++ b/arch/powerpc/platforms/cell/spufs/run.c | |||
@@ -326,7 +326,7 @@ static int spu_process_callback(struct spu_context *ctx) | |||
326 | spu_ret = -ENOSYS; | 326 | spu_ret = -ENOSYS; |
327 | npc += 4; | 327 | npc += 4; |
328 | 328 | ||
329 | if (s.nr_ret < __NR_syscalls) { | 329 | if (s.nr_ret < NR_syscalls) { |
330 | spu_release(ctx); | 330 | spu_release(ctx); |
331 | /* do actual system call from here */ | 331 | /* do actual system call from here */ |
332 | spu_ret = spu_sys_callback(&s); | 332 | spu_ret = spu_sys_callback(&s); |
diff --git a/arch/powerpc/platforms/maple/time.c b/arch/powerpc/platforms/maple/time.c index b4a369dac3a8..81799d70a1ee 100644 --- a/arch/powerpc/platforms/maple/time.c +++ b/arch/powerpc/platforms/maple/time.c | |||
@@ -77,7 +77,7 @@ void maple_get_rtc_time(struct rtc_time *tm) | |||
77 | if ((tm->tm_year + 1900) < 1970) | 77 | if ((tm->tm_year + 1900) < 1970) |
78 | tm->tm_year += 100; | 78 | tm->tm_year += 100; |
79 | 79 | ||
80 | GregorianDay(tm); | 80 | tm->tm_wday = -1; |
81 | } | 81 | } |
82 | 82 | ||
83 | int maple_set_rtc_time(struct rtc_time *tm) | 83 | int maple_set_rtc_time(struct rtc_time *tm) |
diff --git a/arch/powerpc/platforms/powermac/bootx_init.c b/arch/powerpc/platforms/powermac/bootx_init.c index 76f5013c35e5..c3c9bbb3573a 100644 --- a/arch/powerpc/platforms/powermac/bootx_init.c +++ b/arch/powerpc/platforms/powermac/bootx_init.c | |||
@@ -84,6 +84,7 @@ static void __init bootx_printf(const char *format, ...) | |||
84 | break; | 84 | break; |
85 | } | 85 | } |
86 | } | 86 | } |
87 | va_end(args); | ||
87 | } | 88 | } |
88 | #else /* CONFIG_BOOTX_TEXT */ | 89 | #else /* CONFIG_BOOTX_TEXT */ |
89 | static void __init bootx_printf(const char *format, ...) {} | 90 | static void __init bootx_printf(const char *format, ...) {} |
diff --git a/arch/powerpc/platforms/powermac/pic.c b/arch/powerpc/platforms/powermac/pic.c index 6f4f8b060def..981546345033 100644 --- a/arch/powerpc/platforms/powermac/pic.c +++ b/arch/powerpc/platforms/powermac/pic.c | |||
@@ -258,13 +258,14 @@ static unsigned int pmac_pic_get_irq(void) | |||
258 | #ifdef CONFIG_XMON | 258 | #ifdef CONFIG_XMON |
259 | static struct irqaction xmon_action = { | 259 | static struct irqaction xmon_action = { |
260 | .handler = xmon_irq, | 260 | .handler = xmon_irq, |
261 | .flags = 0, | 261 | .flags = IRQF_NO_THREAD, |
262 | .name = "NMI - XMON" | 262 | .name = "NMI - XMON" |
263 | }; | 263 | }; |
264 | #endif | 264 | #endif |
265 | 265 | ||
266 | static struct irqaction gatwick_cascade_action = { | 266 | static struct irqaction gatwick_cascade_action = { |
267 | .handler = gatwick_action, | 267 | .handler = gatwick_action, |
268 | .flags = IRQF_NO_THREAD, | ||
268 | .name = "cascade", | 269 | .name = "cascade", |
269 | }; | 270 | }; |
270 | 271 | ||
diff --git a/arch/powerpc/platforms/powernv/Makefile b/arch/powerpc/platforms/powernv/Makefile index 1c8cdb6250e7..f1516b5ecec9 100644 --- a/arch/powerpc/platforms/powernv/Makefile +++ b/arch/powerpc/platforms/powernv/Makefile | |||
@@ -2,9 +2,10 @@ obj-y += setup.o opal-wrappers.o opal.o opal-async.o idle.o | |||
2 | obj-y += opal-rtc.o opal-nvram.o opal-lpc.o opal-flash.o | 2 | obj-y += opal-rtc.o opal-nvram.o opal-lpc.o opal-flash.o |
3 | obj-y += rng.o opal-elog.o opal-dump.o opal-sysparam.o opal-sensor.o | 3 | obj-y += rng.o opal-elog.o opal-dump.o opal-sysparam.o opal-sensor.o |
4 | obj-y += opal-msglog.o opal-hmi.o opal-power.o opal-irqchip.o | 4 | obj-y += opal-msglog.o opal-hmi.o opal-power.o opal-irqchip.o |
5 | obj-y += opal-kmsg.o | ||
5 | 6 | ||
6 | obj-$(CONFIG_SMP) += smp.o subcore.o subcore-asm.o | 7 | obj-$(CONFIG_SMP) += smp.o subcore.o subcore-asm.o |
7 | obj-$(CONFIG_PCI) += pci.o pci-p5ioc2.o pci-ioda.o | 8 | obj-$(CONFIG_PCI) += pci.o pci-p5ioc2.o pci-ioda.o npu-dma.o |
8 | obj-$(CONFIG_EEH) += eeh-powernv.o | 9 | obj-$(CONFIG_EEH) += eeh-powernv.o |
9 | obj-$(CONFIG_PPC_SCOM) += opal-xscom.o | 10 | obj-$(CONFIG_PPC_SCOM) += opal-xscom.o |
10 | obj-$(CONFIG_MEMORY_FAILURE) += opal-memory-errors.o | 11 | obj-$(CONFIG_MEMORY_FAILURE) += opal-memory-errors.o |
diff --git a/arch/powerpc/platforms/powernv/eeh-powernv.c b/arch/powerpc/platforms/powernv/eeh-powernv.c index e1c90725522a..5f152b95ca0c 100644 --- a/arch/powerpc/platforms/powernv/eeh-powernv.c +++ b/arch/powerpc/platforms/powernv/eeh-powernv.c | |||
@@ -48,8 +48,8 @@ static int pnv_eeh_init(void) | |||
48 | struct pci_controller *hose; | 48 | struct pci_controller *hose; |
49 | struct pnv_phb *phb; | 49 | struct pnv_phb *phb; |
50 | 50 | ||
51 | if (!firmware_has_feature(FW_FEATURE_OPALv3)) { | 51 | if (!firmware_has_feature(FW_FEATURE_OPAL)) { |
52 | pr_warn("%s: OPALv3 is required !\n", | 52 | pr_warn("%s: OPAL is required !\n", |
53 | __func__); | 53 | __func__); |
54 | return -EINVAL; | 54 | return -EINVAL; |
55 | } | 55 | } |
diff --git a/arch/powerpc/platforms/powernv/idle.c b/arch/powerpc/platforms/powernv/idle.c index 59d735d2e5c0..15bfbcd5debc 100644 --- a/arch/powerpc/platforms/powernv/idle.c +++ b/arch/powerpc/platforms/powernv/idle.c | |||
@@ -242,7 +242,7 @@ static int __init pnv_init_idle_states(void) | |||
242 | if (cpuidle_disable != IDLE_NO_OVERRIDE) | 242 | if (cpuidle_disable != IDLE_NO_OVERRIDE) |
243 | goto out; | 243 | goto out; |
244 | 244 | ||
245 | if (!firmware_has_feature(FW_FEATURE_OPALv3)) | 245 | if (!firmware_has_feature(FW_FEATURE_OPAL)) |
246 | goto out; | 246 | goto out; |
247 | 247 | ||
248 | power_mgt = of_find_node_by_path("/ibm,opal/power-mgt"); | 248 | power_mgt = of_find_node_by_path("/ibm,opal/power-mgt"); |
diff --git a/arch/powerpc/platforms/powernv/npu-dma.c b/arch/powerpc/platforms/powernv/npu-dma.c new file mode 100644 index 000000000000..e85aa900f5c0 --- /dev/null +++ b/arch/powerpc/platforms/powernv/npu-dma.c | |||
@@ -0,0 +1,348 @@ | |||
1 | /* | ||
2 | * This file implements the DMA operations for NVLink devices. The NPU | ||
3 | * devices all point to the same iommu table as the parent PCI device. | ||
4 | * | ||
5 | * Copyright Alistair Popple, IBM Corporation 2015. | ||
6 | * | ||
7 | * This program is free software; you can redistribute it and/or | ||
8 | * modify it under the terms of version 2 of the GNU General Public | ||
9 | * License as published by the Free Software Foundation. | ||
10 | */ | ||
11 | |||
12 | #include <linux/export.h> | ||
13 | #include <linux/pci.h> | ||
14 | #include <linux/memblock.h> | ||
15 | |||
16 | #include <asm/iommu.h> | ||
17 | #include <asm/pnv-pci.h> | ||
18 | #include <asm/msi_bitmap.h> | ||
19 | #include <asm/opal.h> | ||
20 | |||
21 | #include "powernv.h" | ||
22 | #include "pci.h" | ||
23 | |||
24 | /* | ||
25 | * Other types of TCE cache invalidation are not functional in the | ||
26 | * hardware. | ||
27 | */ | ||
28 | #define TCE_KILL_INVAL_ALL PPC_BIT(0) | ||
29 | |||
30 | static struct pci_dev *get_pci_dev(struct device_node *dn) | ||
31 | { | ||
32 | return PCI_DN(dn)->pcidev; | ||
33 | } | ||
34 | |||
35 | /* Given a NPU device get the associated PCI device. */ | ||
36 | struct pci_dev *pnv_pci_get_gpu_dev(struct pci_dev *npdev) | ||
37 | { | ||
38 | struct device_node *dn; | ||
39 | struct pci_dev *gpdev; | ||
40 | |||
41 | /* Get assoicated PCI device */ | ||
42 | dn = of_parse_phandle(npdev->dev.of_node, "ibm,gpu", 0); | ||
43 | if (!dn) | ||
44 | return NULL; | ||
45 | |||
46 | gpdev = get_pci_dev(dn); | ||
47 | of_node_put(dn); | ||
48 | |||
49 | return gpdev; | ||
50 | } | ||
51 | EXPORT_SYMBOL(pnv_pci_get_gpu_dev); | ||
52 | |||
53 | /* Given the real PCI device get a linked NPU device. */ | ||
54 | struct pci_dev *pnv_pci_get_npu_dev(struct pci_dev *gpdev, int index) | ||
55 | { | ||
56 | struct device_node *dn; | ||
57 | struct pci_dev *npdev; | ||
58 | |||
59 | /* Get assoicated PCI device */ | ||
60 | dn = of_parse_phandle(gpdev->dev.of_node, "ibm,npu", index); | ||
61 | if (!dn) | ||
62 | return NULL; | ||
63 | |||
64 | npdev = get_pci_dev(dn); | ||
65 | of_node_put(dn); | ||
66 | |||
67 | return npdev; | ||
68 | } | ||
69 | EXPORT_SYMBOL(pnv_pci_get_npu_dev); | ||
70 | |||
71 | #define NPU_DMA_OP_UNSUPPORTED() \ | ||
72 | dev_err_once(dev, "%s operation unsupported for NVLink devices\n", \ | ||
73 | __func__) | ||
74 | |||
75 | static void *dma_npu_alloc(struct device *dev, size_t size, | ||
76 | dma_addr_t *dma_handle, gfp_t flag, | ||
77 | struct dma_attrs *attrs) | ||
78 | { | ||
79 | NPU_DMA_OP_UNSUPPORTED(); | ||
80 | return NULL; | ||
81 | } | ||
82 | |||
83 | static void dma_npu_free(struct device *dev, size_t size, | ||
84 | void *vaddr, dma_addr_t dma_handle, | ||
85 | struct dma_attrs *attrs) | ||
86 | { | ||
87 | NPU_DMA_OP_UNSUPPORTED(); | ||
88 | } | ||
89 | |||
90 | static dma_addr_t dma_npu_map_page(struct device *dev, struct page *page, | ||
91 | unsigned long offset, size_t size, | ||
92 | enum dma_data_direction direction, | ||
93 | struct dma_attrs *attrs) | ||
94 | { | ||
95 | NPU_DMA_OP_UNSUPPORTED(); | ||
96 | return 0; | ||
97 | } | ||
98 | |||
99 | static int dma_npu_map_sg(struct device *dev, struct scatterlist *sglist, | ||
100 | int nelems, enum dma_data_direction direction, | ||
101 | struct dma_attrs *attrs) | ||
102 | { | ||
103 | NPU_DMA_OP_UNSUPPORTED(); | ||
104 | return 0; | ||
105 | } | ||
106 | |||
107 | static int dma_npu_dma_supported(struct device *dev, u64 mask) | ||
108 | { | ||
109 | NPU_DMA_OP_UNSUPPORTED(); | ||
110 | return 0; | ||
111 | } | ||
112 | |||
113 | static u64 dma_npu_get_required_mask(struct device *dev) | ||
114 | { | ||
115 | NPU_DMA_OP_UNSUPPORTED(); | ||
116 | return 0; | ||
117 | } | ||
118 | |||
119 | struct dma_map_ops dma_npu_ops = { | ||
120 | .map_page = dma_npu_map_page, | ||
121 | .map_sg = dma_npu_map_sg, | ||
122 | .alloc = dma_npu_alloc, | ||
123 | .free = dma_npu_free, | ||
124 | .dma_supported = dma_npu_dma_supported, | ||
125 | .get_required_mask = dma_npu_get_required_mask, | ||
126 | }; | ||
127 | |||
128 | /* | ||
129 | * Returns the PE assoicated with the PCI device of the given | ||
130 | * NPU. Returns the linked pci device if pci_dev != NULL. | ||
131 | */ | ||
132 | static struct pnv_ioda_pe *get_gpu_pci_dev_and_pe(struct pnv_ioda_pe *npe, | ||
133 | struct pci_dev **gpdev) | ||
134 | { | ||
135 | struct pnv_phb *phb; | ||
136 | struct pci_controller *hose; | ||
137 | struct pci_dev *pdev; | ||
138 | struct pnv_ioda_pe *pe; | ||
139 | struct pci_dn *pdn; | ||
140 | |||
141 | if (npe->flags & PNV_IODA_PE_PEER) { | ||
142 | pe = npe->peers[0]; | ||
143 | pdev = pe->pdev; | ||
144 | } else { | ||
145 | pdev = pnv_pci_get_gpu_dev(npe->pdev); | ||
146 | if (!pdev) | ||
147 | return NULL; | ||
148 | |||
149 | pdn = pci_get_pdn(pdev); | ||
150 | if (WARN_ON(!pdn || pdn->pe_number == IODA_INVALID_PE)) | ||
151 | return NULL; | ||
152 | |||
153 | hose = pci_bus_to_host(pdev->bus); | ||
154 | phb = hose->private_data; | ||
155 | pe = &phb->ioda.pe_array[pdn->pe_number]; | ||
156 | } | ||
157 | |||
158 | if (gpdev) | ||
159 | *gpdev = pdev; | ||
160 | |||
161 | return pe; | ||
162 | } | ||
163 | |||
164 | void pnv_npu_tce_invalidate_entire(struct pnv_ioda_pe *npe) | ||
165 | { | ||
166 | struct pnv_phb *phb = npe->phb; | ||
167 | |||
168 | if (WARN_ON(phb->type != PNV_PHB_NPU || | ||
169 | !phb->ioda.tce_inval_reg || | ||
170 | !(npe->flags & PNV_IODA_PE_DEV))) | ||
171 | return; | ||
172 | |||
173 | mb(); /* Ensure previous TCE table stores are visible */ | ||
174 | __raw_writeq(cpu_to_be64(TCE_KILL_INVAL_ALL), | ||
175 | phb->ioda.tce_inval_reg); | ||
176 | } | ||
177 | |||
178 | void pnv_npu_tce_invalidate(struct pnv_ioda_pe *npe, | ||
179 | struct iommu_table *tbl, | ||
180 | unsigned long index, | ||
181 | unsigned long npages, | ||
182 | bool rm) | ||
183 | { | ||
184 | struct pnv_phb *phb = npe->phb; | ||
185 | |||
186 | /* We can only invalidate the whole cache on NPU */ | ||
187 | unsigned long val = TCE_KILL_INVAL_ALL; | ||
188 | |||
189 | if (WARN_ON(phb->type != PNV_PHB_NPU || | ||
190 | !phb->ioda.tce_inval_reg || | ||
191 | !(npe->flags & PNV_IODA_PE_DEV))) | ||
192 | return; | ||
193 | |||
194 | mb(); /* Ensure previous TCE table stores are visible */ | ||
195 | if (rm) | ||
196 | __raw_rm_writeq(cpu_to_be64(val), | ||
197 | (__be64 __iomem *) phb->ioda.tce_inval_reg_phys); | ||
198 | else | ||
199 | __raw_writeq(cpu_to_be64(val), | ||
200 | phb->ioda.tce_inval_reg); | ||
201 | } | ||
202 | |||
203 | void pnv_npu_init_dma_pe(struct pnv_ioda_pe *npe) | ||
204 | { | ||
205 | struct pnv_ioda_pe *gpe; | ||
206 | struct pci_dev *gpdev; | ||
207 | int i, avail = -1; | ||
208 | |||
209 | if (!npe->pdev || !(npe->flags & PNV_IODA_PE_DEV)) | ||
210 | return; | ||
211 | |||
212 | gpe = get_gpu_pci_dev_and_pe(npe, &gpdev); | ||
213 | if (!gpe) | ||
214 | return; | ||
215 | |||
216 | for (i = 0; i < PNV_IODA_MAX_PEER_PES; i++) { | ||
217 | /* Nothing to do if the PE is already connected. */ | ||
218 | if (gpe->peers[i] == npe) | ||
219 | return; | ||
220 | |||
221 | if (!gpe->peers[i]) | ||
222 | avail = i; | ||
223 | } | ||
224 | |||
225 | if (WARN_ON(avail < 0)) | ||
226 | return; | ||
227 | |||
228 | gpe->peers[avail] = npe; | ||
229 | gpe->flags |= PNV_IODA_PE_PEER; | ||
230 | |||
231 | /* | ||
232 | * We assume that the NPU devices only have a single peer PE | ||
233 | * (the GPU PCIe device PE). | ||
234 | */ | ||
235 | npe->peers[0] = gpe; | ||
236 | npe->flags |= PNV_IODA_PE_PEER; | ||
237 | } | ||
238 | |||
239 | /* | ||
240 | * For the NPU we want to point the TCE table at the same table as the | ||
241 | * real PCI device. | ||
242 | */ | ||
243 | static void pnv_npu_disable_bypass(struct pnv_ioda_pe *npe) | ||
244 | { | ||
245 | struct pnv_phb *phb = npe->phb; | ||
246 | struct pci_dev *gpdev; | ||
247 | struct pnv_ioda_pe *gpe; | ||
248 | void *addr; | ||
249 | unsigned int size; | ||
250 | int64_t rc; | ||
251 | |||
252 | /* | ||
253 | * Find the assoicated PCI devices and get the dma window | ||
254 | * information from there. | ||
255 | */ | ||
256 | if (!npe->pdev || !(npe->flags & PNV_IODA_PE_DEV)) | ||
257 | return; | ||
258 | |||
259 | gpe = get_gpu_pci_dev_and_pe(npe, &gpdev); | ||
260 | if (!gpe) | ||
261 | return; | ||
262 | |||
263 | addr = (void *)gpe->table_group.tables[0]->it_base; | ||
264 | size = gpe->table_group.tables[0]->it_size << 3; | ||
265 | rc = opal_pci_map_pe_dma_window(phb->opal_id, npe->pe_number, | ||
266 | npe->pe_number, 1, __pa(addr), | ||
267 | size, 0x1000); | ||
268 | if (rc != OPAL_SUCCESS) | ||
269 | pr_warn("%s: Error %lld setting DMA window on PHB#%d-PE#%d\n", | ||
270 | __func__, rc, phb->hose->global_number, npe->pe_number); | ||
271 | |||
272 | /* | ||
273 | * We don't initialise npu_pe->tce32_table as we always use | ||
274 | * dma_npu_ops which are nops. | ||
275 | */ | ||
276 | set_dma_ops(&npe->pdev->dev, &dma_npu_ops); | ||
277 | } | ||
278 | |||
279 | /* | ||
280 | * Enable/disable bypass mode on the NPU. The NPU only supports one | ||
281 | * window per link, so bypass needs to be explicity enabled or | ||
282 | * disabled. Unlike for a PHB3 bypass and non-bypass modes can't be | ||
283 | * active at the same time. | ||
284 | */ | ||
285 | int pnv_npu_dma_set_bypass(struct pnv_ioda_pe *npe, bool enable) | ||
286 | { | ||
287 | struct pnv_phb *phb = npe->phb; | ||
288 | int64_t rc = 0; | ||
289 | |||
290 | if (phb->type != PNV_PHB_NPU || !npe->pdev) | ||
291 | return -EINVAL; | ||
292 | |||
293 | if (enable) { | ||
294 | /* Enable the bypass window */ | ||
295 | phys_addr_t top = memblock_end_of_DRAM(); | ||
296 | |||
297 | npe->tce_bypass_base = 0; | ||
298 | top = roundup_pow_of_two(top); | ||
299 | dev_info(&npe->pdev->dev, "Enabling bypass for PE %d\n", | ||
300 | npe->pe_number); | ||
301 | rc = opal_pci_map_pe_dma_window_real(phb->opal_id, | ||
302 | npe->pe_number, npe->pe_number, | ||
303 | npe->tce_bypass_base, top); | ||
304 | } else { | ||
305 | /* | ||
306 | * Disable the bypass window by replacing it with the | ||
307 | * TCE32 window. | ||
308 | */ | ||
309 | pnv_npu_disable_bypass(npe); | ||
310 | } | ||
311 | |||
312 | return rc; | ||
313 | } | ||
314 | |||
315 | int pnv_npu_dma_set_mask(struct pci_dev *npdev, u64 dma_mask) | ||
316 | { | ||
317 | struct pci_controller *hose = pci_bus_to_host(npdev->bus); | ||
318 | struct pnv_phb *phb = hose->private_data; | ||
319 | struct pci_dn *pdn = pci_get_pdn(npdev); | ||
320 | struct pnv_ioda_pe *npe, *gpe; | ||
321 | struct pci_dev *gpdev; | ||
322 | uint64_t top; | ||
323 | bool bypass = false; | ||
324 | |||
325 | if (WARN_ON(!pdn || pdn->pe_number == IODA_INVALID_PE)) | ||
326 | return -ENXIO; | ||
327 | |||
328 | /* We only do bypass if it's enabled on the linked device */ | ||
329 | npe = &phb->ioda.pe_array[pdn->pe_number]; | ||
330 | gpe = get_gpu_pci_dev_and_pe(npe, &gpdev); | ||
331 | if (!gpe) | ||
332 | return -ENODEV; | ||
333 | |||
334 | if (gpe->tce_bypass_enabled) { | ||
335 | top = gpe->tce_bypass_base + memblock_end_of_DRAM() - 1; | ||
336 | bypass = (dma_mask >= top); | ||
337 | } | ||
338 | |||
339 | if (bypass) | ||
340 | dev_info(&npdev->dev, "Using 64-bit DMA iommu bypass\n"); | ||
341 | else | ||
342 | dev_info(&npdev->dev, "Using 32-bit DMA via iommu\n"); | ||
343 | |||
344 | pnv_npu_dma_set_bypass(npe, bypass); | ||
345 | *npdev->dev.dma_mask = dma_mask; | ||
346 | |||
347 | return 0; | ||
348 | } | ||
diff --git a/arch/powerpc/platforms/powernv/opal-kmsg.c b/arch/powerpc/platforms/powernv/opal-kmsg.c new file mode 100644 index 000000000000..6f1214d4de92 --- /dev/null +++ b/arch/powerpc/platforms/powernv/opal-kmsg.c | |||
@@ -0,0 +1,75 @@ | |||
1 | /* | ||
2 | * kmsg dumper that ensures the OPAL console fully flushes panic messages | ||
3 | * | ||
4 | * Author: Russell Currey <ruscur@russell.cc> | ||
5 | * | ||
6 | * Copyright 2015 IBM Corporation. | ||
7 | * | ||
8 | * This program is free software; you can redistribute it and/or modify it | ||
9 | * under the terms of the GNU General Public License as published by the | ||
10 | * Free Software Foundation; either version 2 of the License, or (at your | ||
11 | * option) any later version. | ||
12 | */ | ||
13 | |||
14 | #include <linux/kmsg_dump.h> | ||
15 | |||
16 | #include <asm/opal.h> | ||
17 | #include <asm/opal-api.h> | ||
18 | |||
19 | /* | ||
20 | * Console output is controlled by OPAL firmware. The kernel regularly calls | ||
21 | * OPAL_POLL_EVENTS, which flushes some console output. In a panic state, | ||
22 | * however, the kernel no longer calls OPAL_POLL_EVENTS and the panic message | ||
23 | * may not be completely printed. This function does not actually dump the | ||
24 | * message, it just ensures that OPAL completely flushes the console buffer. | ||
25 | */ | ||
26 | static void force_opal_console_flush(struct kmsg_dumper *dumper, | ||
27 | enum kmsg_dump_reason reason) | ||
28 | { | ||
29 | int i; | ||
30 | int64_t ret; | ||
31 | |||
32 | /* | ||
33 | * Outside of a panic context the pollers will continue to run, | ||
34 | * so we don't need to do any special flushing. | ||
35 | */ | ||
36 | if (reason != KMSG_DUMP_PANIC) | ||
37 | return; | ||
38 | |||
39 | if (opal_check_token(OPAL_CONSOLE_FLUSH)) { | ||
40 | ret = opal_console_flush(0); | ||
41 | |||
42 | if (ret == OPAL_UNSUPPORTED || ret == OPAL_PARAMETER) | ||
43 | return; | ||
44 | |||
45 | /* Incrementally flush until there's nothing left */ | ||
46 | while (opal_console_flush(0) != OPAL_SUCCESS); | ||
47 | } else { | ||
48 | /* | ||
49 | * If OPAL_CONSOLE_FLUSH is not implemented in the firmware, | ||
50 | * the console can still be flushed by calling the polling | ||
51 | * function enough times to flush the buffer. We don't know | ||
52 | * how much output still needs to be flushed, but we can be | ||
53 | * generous since the kernel is in panic and doesn't need | ||
54 | * to do much else. | ||
55 | */ | ||
56 | printk(KERN_NOTICE "opal: OPAL_CONSOLE_FLUSH missing.\n"); | ||
57 | for (i = 0; i < 1024; i++) { | ||
58 | opal_poll_events(NULL); | ||
59 | } | ||
60 | } | ||
61 | } | ||
62 | |||
63 | static struct kmsg_dumper opal_kmsg_dumper = { | ||
64 | .dump = force_opal_console_flush | ||
65 | }; | ||
66 | |||
67 | void __init opal_kmsg_init(void) | ||
68 | { | ||
69 | int rc; | ||
70 | |||
71 | /* Add our dumper to the list */ | ||
72 | rc = kmsg_dump_register(&opal_kmsg_dumper); | ||
73 | if (rc != 0) | ||
74 | pr_err("opal: kmsg_dump_register failed; returned %d\n", rc); | ||
75 | } | ||
diff --git a/arch/powerpc/platforms/powernv/opal-prd.c b/arch/powerpc/platforms/powernv/opal-prd.c index 4ece8e40dd54..e315e704cca7 100644 --- a/arch/powerpc/platforms/powernv/opal-prd.c +++ b/arch/powerpc/platforms/powernv/opal-prd.c | |||
@@ -434,7 +434,6 @@ static const struct of_device_id opal_prd_match[] = { | |||
434 | static struct platform_driver opal_prd_driver = { | 434 | static struct platform_driver opal_prd_driver = { |
435 | .driver = { | 435 | .driver = { |
436 | .name = "opal-prd", | 436 | .name = "opal-prd", |
437 | .owner = THIS_MODULE, | ||
438 | .of_match_table = opal_prd_match, | 437 | .of_match_table = opal_prd_match, |
439 | }, | 438 | }, |
440 | .probe = opal_prd_probe, | 439 | .probe = opal_prd_probe, |
diff --git a/arch/powerpc/platforms/powernv/opal-rtc.c b/arch/powerpc/platforms/powernv/opal-rtc.c index 37dbee15769f..f8868864f373 100644 --- a/arch/powerpc/platforms/powernv/opal-rtc.c +++ b/arch/powerpc/platforms/powernv/opal-rtc.c | |||
@@ -31,8 +31,7 @@ static void opal_to_tm(u32 y_m_d, u64 h_m_s_ms, struct rtc_time *tm) | |||
31 | tm->tm_hour = bcd2bin((h_m_s_ms >> 56) & 0xff); | 31 | tm->tm_hour = bcd2bin((h_m_s_ms >> 56) & 0xff); |
32 | tm->tm_min = bcd2bin((h_m_s_ms >> 48) & 0xff); | 32 | tm->tm_min = bcd2bin((h_m_s_ms >> 48) & 0xff); |
33 | tm->tm_sec = bcd2bin((h_m_s_ms >> 40) & 0xff); | 33 | tm->tm_sec = bcd2bin((h_m_s_ms >> 40) & 0xff); |
34 | 34 | tm->tm_wday = -1; | |
35 | GregorianDay(tm); | ||
36 | } | 35 | } |
37 | 36 | ||
38 | unsigned long __init opal_get_boot_time(void) | 37 | unsigned long __init opal_get_boot_time(void) |
@@ -51,7 +50,7 @@ unsigned long __init opal_get_boot_time(void) | |||
51 | rc = opal_rtc_read(&__y_m_d, &__h_m_s_ms); | 50 | rc = opal_rtc_read(&__y_m_d, &__h_m_s_ms); |
52 | if (rc == OPAL_BUSY_EVENT) | 51 | if (rc == OPAL_BUSY_EVENT) |
53 | opal_poll_events(NULL); | 52 | opal_poll_events(NULL); |
54 | else | 53 | else if (rc == OPAL_BUSY) |
55 | mdelay(10); | 54 | mdelay(10); |
56 | } | 55 | } |
57 | if (rc != OPAL_SUCCESS) | 56 | if (rc != OPAL_SUCCESS) |
diff --git a/arch/powerpc/platforms/powernv/opal-wrappers.S b/arch/powerpc/platforms/powernv/opal-wrappers.S index b7a464fef7a7..e45b88a5d7e0 100644 --- a/arch/powerpc/platforms/powernv/opal-wrappers.S +++ b/arch/powerpc/platforms/powernv/opal-wrappers.S | |||
@@ -301,3 +301,4 @@ OPAL_CALL(opal_flash_erase, OPAL_FLASH_ERASE); | |||
301 | OPAL_CALL(opal_prd_msg, OPAL_PRD_MSG); | 301 | OPAL_CALL(opal_prd_msg, OPAL_PRD_MSG); |
302 | OPAL_CALL(opal_leds_get_ind, OPAL_LEDS_GET_INDICATOR); | 302 | OPAL_CALL(opal_leds_get_ind, OPAL_LEDS_GET_INDICATOR); |
303 | OPAL_CALL(opal_leds_set_ind, OPAL_LEDS_SET_INDICATOR); | 303 | OPAL_CALL(opal_leds_set_ind, OPAL_LEDS_SET_INDICATOR); |
304 | OPAL_CALL(opal_console_flush, OPAL_CONSOLE_FLUSH); | ||
diff --git a/arch/powerpc/platforms/powernv/opal-xscom.c b/arch/powerpc/platforms/powernv/opal-xscom.c index 7634d1c62299..d0ac535cf5d7 100644 --- a/arch/powerpc/platforms/powernv/opal-xscom.c +++ b/arch/powerpc/platforms/powernv/opal-xscom.c | |||
@@ -126,7 +126,7 @@ static const struct scom_controller opal_scom_controller = { | |||
126 | 126 | ||
127 | static int opal_xscom_init(void) | 127 | static int opal_xscom_init(void) |
128 | { | 128 | { |
129 | if (firmware_has_feature(FW_FEATURE_OPALv3)) | 129 | if (firmware_has_feature(FW_FEATURE_OPAL)) |
130 | scom_init(&opal_scom_controller); | 130 | scom_init(&opal_scom_controller); |
131 | return 0; | 131 | return 0; |
132 | } | 132 | } |
diff --git a/arch/powerpc/platforms/powernv/opal.c b/arch/powerpc/platforms/powernv/opal.c index 57cffb80bc36..4e0da5af94a1 100644 --- a/arch/powerpc/platforms/powernv/opal.c +++ b/arch/powerpc/platforms/powernv/opal.c | |||
@@ -98,16 +98,11 @@ int __init early_init_dt_scan_opal(unsigned long node, | |||
98 | pr_debug("OPAL Entry = 0x%llx (sizep=%p runtimesz=%d)\n", | 98 | pr_debug("OPAL Entry = 0x%llx (sizep=%p runtimesz=%d)\n", |
99 | opal.size, sizep, runtimesz); | 99 | opal.size, sizep, runtimesz); |
100 | 100 | ||
101 | powerpc_firmware_features |= FW_FEATURE_OPAL; | ||
102 | if (of_flat_dt_is_compatible(node, "ibm,opal-v3")) { | 101 | if (of_flat_dt_is_compatible(node, "ibm,opal-v3")) { |
103 | powerpc_firmware_features |= FW_FEATURE_OPALv2; | 102 | powerpc_firmware_features |= FW_FEATURE_OPAL; |
104 | powerpc_firmware_features |= FW_FEATURE_OPALv3; | 103 | pr_info("OPAL detected !\n"); |
105 | pr_info("OPAL V3 detected !\n"); | ||
106 | } else if (of_flat_dt_is_compatible(node, "ibm,opal-v2")) { | ||
107 | powerpc_firmware_features |= FW_FEATURE_OPALv2; | ||
108 | pr_info("OPAL V2 detected !\n"); | ||
109 | } else { | 104 | } else { |
110 | pr_info("OPAL V1 detected !\n"); | 105 | panic("OPAL != V3 detected, no longer supported.\n"); |
111 | } | 106 | } |
112 | 107 | ||
113 | /* Reinit all cores with the right endian */ | 108 | /* Reinit all cores with the right endian */ |
@@ -352,17 +347,15 @@ int opal_put_chars(uint32_t vtermno, const char *data, int total_len) | |||
352 | * enough room and be done with it | 347 | * enough room and be done with it |
353 | */ | 348 | */ |
354 | spin_lock_irqsave(&opal_write_lock, flags); | 349 | spin_lock_irqsave(&opal_write_lock, flags); |
355 | if (firmware_has_feature(FW_FEATURE_OPALv2)) { | 350 | rc = opal_console_write_buffer_space(vtermno, &olen); |
356 | rc = opal_console_write_buffer_space(vtermno, &olen); | 351 | len = be64_to_cpu(olen); |
357 | len = be64_to_cpu(olen); | 352 | if (rc || len < total_len) { |
358 | if (rc || len < total_len) { | 353 | spin_unlock_irqrestore(&opal_write_lock, flags); |
359 | spin_unlock_irqrestore(&opal_write_lock, flags); | 354 | /* Closed -> drop characters */ |
360 | /* Closed -> drop characters */ | 355 | if (rc) |
361 | if (rc) | 356 | return total_len; |
362 | return total_len; | 357 | opal_poll_events(NULL); |
363 | opal_poll_events(NULL); | 358 | return -EAGAIN; |
364 | return -EAGAIN; | ||
365 | } | ||
366 | } | 359 | } |
367 | 360 | ||
368 | /* We still try to handle partial completions, though they | 361 | /* We still try to handle partial completions, though they |
@@ -555,7 +548,7 @@ bool opal_mce_check_early_recovery(struct pt_regs *regs) | |||
555 | goto out; | 548 | goto out; |
556 | 549 | ||
557 | if ((regs->nip >= opal.base) && | 550 | if ((regs->nip >= opal.base) && |
558 | (regs->nip <= (opal.base + opal.size))) | 551 | (regs->nip < (opal.base + opal.size))) |
559 | recover_addr = find_recovery_address(regs->nip); | 552 | recover_addr = find_recovery_address(regs->nip); |
560 | 553 | ||
561 | /* | 554 | /* |
@@ -696,10 +689,7 @@ static int __init opal_init(void) | |||
696 | } | 689 | } |
697 | 690 | ||
698 | /* Register OPAL consoles if any ports */ | 691 | /* Register OPAL consoles if any ports */ |
699 | if (firmware_has_feature(FW_FEATURE_OPALv2)) | 692 | consoles = of_find_node_by_path("/ibm,opal/consoles"); |
700 | consoles = of_find_node_by_path("/ibm,opal/consoles"); | ||
701 | else | ||
702 | consoles = of_node_get(opal_node); | ||
703 | if (consoles) { | 693 | if (consoles) { |
704 | for_each_child_of_node(consoles, np) { | 694 | for_each_child_of_node(consoles, np) { |
705 | if (strcmp(np->name, "serial")) | 695 | if (strcmp(np->name, "serial")) |
@@ -758,6 +748,9 @@ static int __init opal_init(void) | |||
758 | opal_pdev_init(opal_node, "ibm,opal-flash"); | 748 | opal_pdev_init(opal_node, "ibm,opal-flash"); |
759 | opal_pdev_init(opal_node, "ibm,opal-prd"); | 749 | opal_pdev_init(opal_node, "ibm,opal-prd"); |
760 | 750 | ||
751 | /* Initialise OPAL kmsg dumper for flushing console on panic */ | ||
752 | opal_kmsg_init(); | ||
753 | |||
761 | return 0; | 754 | return 0; |
762 | } | 755 | } |
763 | machine_subsys_initcall(powernv, opal_init); | 756 | machine_subsys_initcall(powernv, opal_init); |
diff --git a/arch/powerpc/platforms/powernv/pci-ioda.c b/arch/powerpc/platforms/powernv/pci-ioda.c index 414fd1a00fda..573ae1994097 100644 --- a/arch/powerpc/platforms/powernv/pci-ioda.c +++ b/arch/powerpc/platforms/powernv/pci-ioda.c | |||
@@ -116,16 +116,6 @@ static int __init iommu_setup(char *str) | |||
116 | } | 116 | } |
117 | early_param("iommu", iommu_setup); | 117 | early_param("iommu", iommu_setup); |
118 | 118 | ||
119 | /* | ||
120 | * stdcix is only supposed to be used in hypervisor real mode as per | ||
121 | * the architecture spec | ||
122 | */ | ||
123 | static inline void __raw_rm_writeq(u64 val, volatile void __iomem *paddr) | ||
124 | { | ||
125 | __asm__ __volatile__("stdcix %0,0,%1" | ||
126 | : : "r" (val), "r" (paddr) : "memory"); | ||
127 | } | ||
128 | |||
129 | static inline bool pnv_pci_is_mem_pref_64(unsigned long flags) | 119 | static inline bool pnv_pci_is_mem_pref_64(unsigned long flags) |
130 | { | 120 | { |
131 | return ((flags & (IORESOURCE_MEM_64 | IORESOURCE_PREFETCH)) == | 121 | return ((flags & (IORESOURCE_MEM_64 | IORESOURCE_PREFETCH)) == |
@@ -344,7 +334,7 @@ static void __init pnv_ioda_parse_m64_window(struct pnv_phb *phb) | |||
344 | return; | 334 | return; |
345 | } | 335 | } |
346 | 336 | ||
347 | if (!firmware_has_feature(FW_FEATURE_OPALv3)) { | 337 | if (!firmware_has_feature(FW_FEATURE_OPAL)) { |
348 | pr_info(" Firmware too old to support M64 window\n"); | 338 | pr_info(" Firmware too old to support M64 window\n"); |
349 | return; | 339 | return; |
350 | } | 340 | } |
@@ -357,6 +347,7 @@ static void __init pnv_ioda_parse_m64_window(struct pnv_phb *phb) | |||
357 | } | 347 | } |
358 | 348 | ||
359 | res = &hose->mem_resources[1]; | 349 | res = &hose->mem_resources[1]; |
350 | res->name = dn->full_name; | ||
360 | res->start = of_translate_address(dn, r + 2); | 351 | res->start = of_translate_address(dn, r + 2); |
361 | res->end = res->start + of_read_number(r + 4, 2) - 1; | 352 | res->end = res->start + of_read_number(r + 4, 2) - 1; |
362 | res->flags = (IORESOURCE_MEM | IORESOURCE_MEM_64 | IORESOURCE_PREFETCH); | 353 | res->flags = (IORESOURCE_MEM | IORESOURCE_MEM_64 | IORESOURCE_PREFETCH); |
@@ -780,8 +771,12 @@ static int pnv_ioda_configure_pe(struct pnv_phb *phb, struct pnv_ioda_pe *pe) | |||
780 | return -ENXIO; | 771 | return -ENXIO; |
781 | } | 772 | } |
782 | 773 | ||
783 | /* Configure PELTV */ | 774 | /* |
784 | pnv_ioda_set_peltv(phb, pe, true); | 775 | * Configure PELTV. NPUs don't have a PELTV table so skip |
776 | * configuration on them. | ||
777 | */ | ||
778 | if (phb->type != PNV_PHB_NPU) | ||
779 | pnv_ioda_set_peltv(phb, pe, true); | ||
785 | 780 | ||
786 | /* Setup reverse map */ | 781 | /* Setup reverse map */ |
787 | for (rid = pe->rid; rid < rid_end; rid++) | 782 | for (rid = pe->rid; rid < rid_end; rid++) |
@@ -924,7 +919,6 @@ static int pnv_pci_vf_resource_shift(struct pci_dev *dev, int offset) | |||
924 | } | 919 | } |
925 | #endif /* CONFIG_PCI_IOV */ | 920 | #endif /* CONFIG_PCI_IOV */ |
926 | 921 | ||
927 | #if 0 | ||
928 | static struct pnv_ioda_pe *pnv_ioda_setup_dev_PE(struct pci_dev *dev) | 922 | static struct pnv_ioda_pe *pnv_ioda_setup_dev_PE(struct pci_dev *dev) |
929 | { | 923 | { |
930 | struct pci_controller *hose = pci_bus_to_host(dev->bus); | 924 | struct pci_controller *hose = pci_bus_to_host(dev->bus); |
@@ -941,11 +935,7 @@ static struct pnv_ioda_pe *pnv_ioda_setup_dev_PE(struct pci_dev *dev) | |||
941 | if (pdn->pe_number != IODA_INVALID_PE) | 935 | if (pdn->pe_number != IODA_INVALID_PE) |
942 | return NULL; | 936 | return NULL; |
943 | 937 | ||
944 | /* PE#0 has been pre-set */ | 938 | pe_num = pnv_ioda_alloc_pe(phb); |
945 | if (dev->bus->number == 0) | ||
946 | pe_num = 0; | ||
947 | else | ||
948 | pe_num = pnv_ioda_alloc_pe(phb); | ||
949 | if (pe_num == IODA_INVALID_PE) { | 939 | if (pe_num == IODA_INVALID_PE) { |
950 | pr_warning("%s: Not enough PE# available, disabling device\n", | 940 | pr_warning("%s: Not enough PE# available, disabling device\n", |
951 | pci_name(dev)); | 941 | pci_name(dev)); |
@@ -963,6 +953,7 @@ static struct pnv_ioda_pe *pnv_ioda_setup_dev_PE(struct pci_dev *dev) | |||
963 | pci_dev_get(dev); | 953 | pci_dev_get(dev); |
964 | pdn->pcidev = dev; | 954 | pdn->pcidev = dev; |
965 | pdn->pe_number = pe_num; | 955 | pdn->pe_number = pe_num; |
956 | pe->flags = PNV_IODA_PE_DEV; | ||
966 | pe->pdev = dev; | 957 | pe->pdev = dev; |
967 | pe->pbus = NULL; | 958 | pe->pbus = NULL; |
968 | pe->tce32_seg = -1; | 959 | pe->tce32_seg = -1; |
@@ -993,7 +984,6 @@ static struct pnv_ioda_pe *pnv_ioda_setup_dev_PE(struct pci_dev *dev) | |||
993 | 984 | ||
994 | return pe; | 985 | return pe; |
995 | } | 986 | } |
996 | #endif /* Useful for SRIOV case */ | ||
997 | 987 | ||
998 | static void pnv_ioda_setup_same_PE(struct pci_bus *bus, struct pnv_ioda_pe *pe) | 988 | static void pnv_ioda_setup_same_PE(struct pci_bus *bus, struct pnv_ioda_pe *pe) |
999 | { | 989 | { |
@@ -1007,6 +997,7 @@ static void pnv_ioda_setup_same_PE(struct pci_bus *bus, struct pnv_ioda_pe *pe) | |||
1007 | pci_name(dev)); | 997 | pci_name(dev)); |
1008 | continue; | 998 | continue; |
1009 | } | 999 | } |
1000 | pdn->pcidev = dev; | ||
1010 | pdn->pe_number = pe->pe_number; | 1001 | pdn->pe_number = pe->pe_number; |
1011 | pe->dma_weight += pnv_ioda_dma_weight(dev); | 1002 | pe->dma_weight += pnv_ioda_dma_weight(dev); |
1012 | if ((pe->flags & PNV_IODA_PE_BUS_ALL) && dev->subordinate) | 1003 | if ((pe->flags & PNV_IODA_PE_BUS_ALL) && dev->subordinate) |
@@ -1083,6 +1074,77 @@ static void pnv_ioda_setup_bus_PE(struct pci_bus *bus, bool all) | |||
1083 | pnv_ioda_link_pe_by_weight(phb, pe); | 1074 | pnv_ioda_link_pe_by_weight(phb, pe); |
1084 | } | 1075 | } |
1085 | 1076 | ||
1077 | static struct pnv_ioda_pe *pnv_ioda_setup_npu_PE(struct pci_dev *npu_pdev) | ||
1078 | { | ||
1079 | int pe_num, found_pe = false, rc; | ||
1080 | long rid; | ||
1081 | struct pnv_ioda_pe *pe; | ||
1082 | struct pci_dev *gpu_pdev; | ||
1083 | struct pci_dn *npu_pdn; | ||
1084 | struct pci_controller *hose = pci_bus_to_host(npu_pdev->bus); | ||
1085 | struct pnv_phb *phb = hose->private_data; | ||
1086 | |||
1087 | /* | ||
1088 | * Due to a hardware errata PE#0 on the NPU is reserved for | ||
1089 | * error handling. This means we only have three PEs remaining | ||
1090 | * which need to be assigned to four links, implying some | ||
1091 | * links must share PEs. | ||
1092 | * | ||
1093 | * To achieve this we assign PEs such that NPUs linking the | ||
1094 | * same GPU get assigned the same PE. | ||
1095 | */ | ||
1096 | gpu_pdev = pnv_pci_get_gpu_dev(npu_pdev); | ||
1097 | for (pe_num = 0; pe_num < phb->ioda.total_pe; pe_num++) { | ||
1098 | pe = &phb->ioda.pe_array[pe_num]; | ||
1099 | if (!pe->pdev) | ||
1100 | continue; | ||
1101 | |||
1102 | if (pnv_pci_get_gpu_dev(pe->pdev) == gpu_pdev) { | ||
1103 | /* | ||
1104 | * This device has the same peer GPU so should | ||
1105 | * be assigned the same PE as the existing | ||
1106 | * peer NPU. | ||
1107 | */ | ||
1108 | dev_info(&npu_pdev->dev, | ||
1109 | "Associating to existing PE %d\n", pe_num); | ||
1110 | pci_dev_get(npu_pdev); | ||
1111 | npu_pdn = pci_get_pdn(npu_pdev); | ||
1112 | rid = npu_pdev->bus->number << 8 | npu_pdn->devfn; | ||
1113 | npu_pdn->pcidev = npu_pdev; | ||
1114 | npu_pdn->pe_number = pe_num; | ||
1115 | pe->dma_weight += pnv_ioda_dma_weight(npu_pdev); | ||
1116 | phb->ioda.pe_rmap[rid] = pe->pe_number; | ||
1117 | |||
1118 | /* Map the PE to this link */ | ||
1119 | rc = opal_pci_set_pe(phb->opal_id, pe_num, rid, | ||
1120 | OpalPciBusAll, | ||
1121 | OPAL_COMPARE_RID_DEVICE_NUMBER, | ||
1122 | OPAL_COMPARE_RID_FUNCTION_NUMBER, | ||
1123 | OPAL_MAP_PE); | ||
1124 | WARN_ON(rc != OPAL_SUCCESS); | ||
1125 | found_pe = true; | ||
1126 | break; | ||
1127 | } | ||
1128 | } | ||
1129 | |||
1130 | if (!found_pe) | ||
1131 | /* | ||
1132 | * Could not find an existing PE so allocate a new | ||
1133 | * one. | ||
1134 | */ | ||
1135 | return pnv_ioda_setup_dev_PE(npu_pdev); | ||
1136 | else | ||
1137 | return pe; | ||
1138 | } | ||
1139 | |||
1140 | static void pnv_ioda_setup_npu_PEs(struct pci_bus *bus) | ||
1141 | { | ||
1142 | struct pci_dev *pdev; | ||
1143 | |||
1144 | list_for_each_entry(pdev, &bus->devices, bus_list) | ||
1145 | pnv_ioda_setup_npu_PE(pdev); | ||
1146 | } | ||
1147 | |||
1086 | static void pnv_ioda_setup_PEs(struct pci_bus *bus) | 1148 | static void pnv_ioda_setup_PEs(struct pci_bus *bus) |
1087 | { | 1149 | { |
1088 | struct pci_dev *dev; | 1150 | struct pci_dev *dev; |
@@ -1119,7 +1181,17 @@ static void pnv_pci_ioda_setup_PEs(void) | |||
1119 | if (phb->reserve_m64_pe) | 1181 | if (phb->reserve_m64_pe) |
1120 | phb->reserve_m64_pe(hose->bus, NULL, true); | 1182 | phb->reserve_m64_pe(hose->bus, NULL, true); |
1121 | 1183 | ||
1122 | pnv_ioda_setup_PEs(hose->bus); | 1184 | /* |
1185 | * On NPU PHB, we expect separate PEs for individual PCI | ||
1186 | * functions. PCI bus dependent PEs are required for the | ||
1187 | * remaining types of PHBs. | ||
1188 | */ | ||
1189 | if (phb->type == PNV_PHB_NPU) { | ||
1190 | /* PE#0 is needed for error reporting */ | ||
1191 | pnv_ioda_reserve_pe(phb, 0); | ||
1192 | pnv_ioda_setup_npu_PEs(hose->bus); | ||
1193 | } else | ||
1194 | pnv_ioda_setup_PEs(hose->bus); | ||
1123 | } | 1195 | } |
1124 | } | 1196 | } |
1125 | 1197 | ||
@@ -1578,6 +1650,8 @@ static int pnv_pci_ioda_dma_set_mask(struct pci_dev *pdev, u64 dma_mask) | |||
1578 | struct pnv_ioda_pe *pe; | 1650 | struct pnv_ioda_pe *pe; |
1579 | uint64_t top; | 1651 | uint64_t top; |
1580 | bool bypass = false; | 1652 | bool bypass = false; |
1653 | struct pci_dev *linked_npu_dev; | ||
1654 | int i; | ||
1581 | 1655 | ||
1582 | if (WARN_ON(!pdn || pdn->pe_number == IODA_INVALID_PE)) | 1656 | if (WARN_ON(!pdn || pdn->pe_number == IODA_INVALID_PE)) |
1583 | return -ENODEV;; | 1657 | return -ENODEV;; |
@@ -1596,6 +1670,18 @@ static int pnv_pci_ioda_dma_set_mask(struct pci_dev *pdev, u64 dma_mask) | |||
1596 | set_dma_ops(&pdev->dev, &dma_iommu_ops); | 1670 | set_dma_ops(&pdev->dev, &dma_iommu_ops); |
1597 | } | 1671 | } |
1598 | *pdev->dev.dma_mask = dma_mask; | 1672 | *pdev->dev.dma_mask = dma_mask; |
1673 | |||
1674 | /* Update peer npu devices */ | ||
1675 | if (pe->flags & PNV_IODA_PE_PEER) | ||
1676 | for (i = 0; i < PNV_IODA_MAX_PEER_PES; i++) { | ||
1677 | if (!pe->peers[i]) | ||
1678 | continue; | ||
1679 | |||
1680 | linked_npu_dev = pe->peers[i]->pdev; | ||
1681 | if (dma_get_mask(&linked_npu_dev->dev) != dma_mask) | ||
1682 | dma_set_mask(&linked_npu_dev->dev, dma_mask); | ||
1683 | } | ||
1684 | |||
1599 | return 0; | 1685 | return 0; |
1600 | } | 1686 | } |
1601 | 1687 | ||
@@ -1740,12 +1826,23 @@ static inline void pnv_pci_ioda2_tce_invalidate_entire(struct pnv_ioda_pe *pe) | |||
1740 | /* 01xb - invalidate TCEs that match the specified PE# */ | 1826 | /* 01xb - invalidate TCEs that match the specified PE# */ |
1741 | unsigned long val = (0x4ull << 60) | (pe->pe_number & 0xFF); | 1827 | unsigned long val = (0x4ull << 60) | (pe->pe_number & 0xFF); |
1742 | struct pnv_phb *phb = pe->phb; | 1828 | struct pnv_phb *phb = pe->phb; |
1829 | struct pnv_ioda_pe *npe; | ||
1830 | int i; | ||
1743 | 1831 | ||
1744 | if (!phb->ioda.tce_inval_reg) | 1832 | if (!phb->ioda.tce_inval_reg) |
1745 | return; | 1833 | return; |
1746 | 1834 | ||
1747 | mb(); /* Ensure above stores are visible */ | 1835 | mb(); /* Ensure above stores are visible */ |
1748 | __raw_writeq(cpu_to_be64(val), phb->ioda.tce_inval_reg); | 1836 | __raw_writeq(cpu_to_be64(val), phb->ioda.tce_inval_reg); |
1837 | |||
1838 | if (pe->flags & PNV_IODA_PE_PEER) | ||
1839 | for (i = 0; i < PNV_IODA_MAX_PEER_PES; i++) { | ||
1840 | npe = pe->peers[i]; | ||
1841 | if (!npe || npe->phb->type != PNV_PHB_NPU) | ||
1842 | continue; | ||
1843 | |||
1844 | pnv_npu_tce_invalidate_entire(npe); | ||
1845 | } | ||
1749 | } | 1846 | } |
1750 | 1847 | ||
1751 | static void pnv_pci_ioda2_do_tce_invalidate(unsigned pe_number, bool rm, | 1848 | static void pnv_pci_ioda2_do_tce_invalidate(unsigned pe_number, bool rm, |
@@ -1780,15 +1877,28 @@ static void pnv_pci_ioda2_tce_invalidate(struct iommu_table *tbl, | |||
1780 | struct iommu_table_group_link *tgl; | 1877 | struct iommu_table_group_link *tgl; |
1781 | 1878 | ||
1782 | list_for_each_entry_rcu(tgl, &tbl->it_group_list, next) { | 1879 | list_for_each_entry_rcu(tgl, &tbl->it_group_list, next) { |
1880 | struct pnv_ioda_pe *npe; | ||
1783 | struct pnv_ioda_pe *pe = container_of(tgl->table_group, | 1881 | struct pnv_ioda_pe *pe = container_of(tgl->table_group, |
1784 | struct pnv_ioda_pe, table_group); | 1882 | struct pnv_ioda_pe, table_group); |
1785 | __be64 __iomem *invalidate = rm ? | 1883 | __be64 __iomem *invalidate = rm ? |
1786 | (__be64 __iomem *)pe->phb->ioda.tce_inval_reg_phys : | 1884 | (__be64 __iomem *)pe->phb->ioda.tce_inval_reg_phys : |
1787 | pe->phb->ioda.tce_inval_reg; | 1885 | pe->phb->ioda.tce_inval_reg; |
1886 | int i; | ||
1788 | 1887 | ||
1789 | pnv_pci_ioda2_do_tce_invalidate(pe->pe_number, rm, | 1888 | pnv_pci_ioda2_do_tce_invalidate(pe->pe_number, rm, |
1790 | invalidate, tbl->it_page_shift, | 1889 | invalidate, tbl->it_page_shift, |
1791 | index, npages); | 1890 | index, npages); |
1891 | |||
1892 | if (pe->flags & PNV_IODA_PE_PEER) | ||
1893 | /* Invalidate PEs using the same TCE table */ | ||
1894 | for (i = 0; i < PNV_IODA_MAX_PEER_PES; i++) { | ||
1895 | npe = pe->peers[i]; | ||
1896 | if (!npe || npe->phb->type != PNV_PHB_NPU) | ||
1897 | continue; | ||
1898 | |||
1899 | pnv_npu_tce_invalidate(npe, tbl, index, | ||
1900 | npages, rm); | ||
1901 | } | ||
1792 | } | 1902 | } |
1793 | } | 1903 | } |
1794 | 1904 | ||
@@ -2436,10 +2546,17 @@ static void pnv_ioda_setup_dma(struct pnv_phb *phb) | |||
2436 | pe_info(pe, "DMA weight %d, assigned %d DMA32 segments\n", | 2546 | pe_info(pe, "DMA weight %d, assigned %d DMA32 segments\n", |
2437 | pe->dma_weight, segs); | 2547 | pe->dma_weight, segs); |
2438 | pnv_pci_ioda_setup_dma_pe(phb, pe, base, segs); | 2548 | pnv_pci_ioda_setup_dma_pe(phb, pe, base, segs); |
2439 | } else { | 2549 | } else if (phb->type == PNV_PHB_IODA2) { |
2440 | pe_info(pe, "Assign DMA32 space\n"); | 2550 | pe_info(pe, "Assign DMA32 space\n"); |
2441 | segs = 0; | 2551 | segs = 0; |
2442 | pnv_pci_ioda2_setup_dma_pe(phb, pe); | 2552 | pnv_pci_ioda2_setup_dma_pe(phb, pe); |
2553 | } else if (phb->type == PNV_PHB_NPU) { | ||
2554 | /* | ||
2555 | * We initialise the DMA space for an NPU PHB | ||
2556 | * after setup of the PHB is complete as we | ||
2557 | * point the NPU TVT to the the same location | ||
2558 | * as the PHB3 TVT. | ||
2559 | */ | ||
2443 | } | 2560 | } |
2444 | 2561 | ||
2445 | remaining -= segs; | 2562 | remaining -= segs; |
@@ -2881,6 +2998,11 @@ static void pnv_pci_ioda_setup_seg(void) | |||
2881 | 2998 | ||
2882 | list_for_each_entry_safe(hose, tmp, &hose_list, list_node) { | 2999 | list_for_each_entry_safe(hose, tmp, &hose_list, list_node) { |
2883 | phb = hose->private_data; | 3000 | phb = hose->private_data; |
3001 | |||
3002 | /* NPU PHB does not support IO or MMIO segmentation */ | ||
3003 | if (phb->type == PNV_PHB_NPU) | ||
3004 | continue; | ||
3005 | |||
2884 | list_for_each_entry(pe, &phb->ioda.pe_list, list) { | 3006 | list_for_each_entry(pe, &phb->ioda.pe_list, list) { |
2885 | pnv_ioda_setup_pe_seg(hose, pe); | 3007 | pnv_ioda_setup_pe_seg(hose, pe); |
2886 | } | 3008 | } |
@@ -2920,6 +3042,27 @@ static void pnv_pci_ioda_create_dbgfs(void) | |||
2920 | #endif /* CONFIG_DEBUG_FS */ | 3042 | #endif /* CONFIG_DEBUG_FS */ |
2921 | } | 3043 | } |
2922 | 3044 | ||
3045 | static void pnv_npu_ioda_fixup(void) | ||
3046 | { | ||
3047 | bool enable_bypass; | ||
3048 | struct pci_controller *hose, *tmp; | ||
3049 | struct pnv_phb *phb; | ||
3050 | struct pnv_ioda_pe *pe; | ||
3051 | |||
3052 | list_for_each_entry_safe(hose, tmp, &hose_list, list_node) { | ||
3053 | phb = hose->private_data; | ||
3054 | if (phb->type != PNV_PHB_NPU) | ||
3055 | continue; | ||
3056 | |||
3057 | list_for_each_entry(pe, &phb->ioda.pe_dma_list, dma_link) { | ||
3058 | enable_bypass = dma_get_mask(&pe->pdev->dev) == | ||
3059 | DMA_BIT_MASK(64); | ||
3060 | pnv_npu_init_dma_pe(pe); | ||
3061 | pnv_npu_dma_set_bypass(pe, enable_bypass); | ||
3062 | } | ||
3063 | } | ||
3064 | } | ||
3065 | |||
2923 | static void pnv_pci_ioda_fixup(void) | 3066 | static void pnv_pci_ioda_fixup(void) |
2924 | { | 3067 | { |
2925 | pnv_pci_ioda_setup_PEs(); | 3068 | pnv_pci_ioda_setup_PEs(); |
@@ -2932,6 +3075,9 @@ static void pnv_pci_ioda_fixup(void) | |||
2932 | eeh_init(); | 3075 | eeh_init(); |
2933 | eeh_addr_cache_build(); | 3076 | eeh_addr_cache_build(); |
2934 | #endif | 3077 | #endif |
3078 | |||
3079 | /* Link NPU IODA tables to their PCI devices. */ | ||
3080 | pnv_npu_ioda_fixup(); | ||
2935 | } | 3081 | } |
2936 | 3082 | ||
2937 | /* | 3083 | /* |
@@ -3046,6 +3192,19 @@ static const struct pci_controller_ops pnv_pci_ioda_controller_ops = { | |||
3046 | .shutdown = pnv_pci_ioda_shutdown, | 3192 | .shutdown = pnv_pci_ioda_shutdown, |
3047 | }; | 3193 | }; |
3048 | 3194 | ||
3195 | static const struct pci_controller_ops pnv_npu_ioda_controller_ops = { | ||
3196 | .dma_dev_setup = pnv_pci_dma_dev_setup, | ||
3197 | #ifdef CONFIG_PCI_MSI | ||
3198 | .setup_msi_irqs = pnv_setup_msi_irqs, | ||
3199 | .teardown_msi_irqs = pnv_teardown_msi_irqs, | ||
3200 | #endif | ||
3201 | .enable_device_hook = pnv_pci_enable_device_hook, | ||
3202 | .window_alignment = pnv_pci_window_alignment, | ||
3203 | .reset_secondary_bus = pnv_pci_reset_secondary_bus, | ||
3204 | .dma_set_mask = pnv_npu_dma_set_mask, | ||
3205 | .shutdown = pnv_pci_ioda_shutdown, | ||
3206 | }; | ||
3207 | |||
3049 | static void __init pnv_pci_init_ioda_phb(struct device_node *np, | 3208 | static void __init pnv_pci_init_ioda_phb(struct device_node *np, |
3050 | u64 hub_id, int ioda_type) | 3209 | u64 hub_id, int ioda_type) |
3051 | { | 3210 | { |
@@ -3101,6 +3260,8 @@ static void __init pnv_pci_init_ioda_phb(struct device_node *np, | |||
3101 | phb->model = PNV_PHB_MODEL_P7IOC; | 3260 | phb->model = PNV_PHB_MODEL_P7IOC; |
3102 | else if (of_device_is_compatible(np, "ibm,power8-pciex")) | 3261 | else if (of_device_is_compatible(np, "ibm,power8-pciex")) |
3103 | phb->model = PNV_PHB_MODEL_PHB3; | 3262 | phb->model = PNV_PHB_MODEL_PHB3; |
3263 | else if (of_device_is_compatible(np, "ibm,power8-npu-pciex")) | ||
3264 | phb->model = PNV_PHB_MODEL_NPU; | ||
3104 | else | 3265 | else |
3105 | phb->model = PNV_PHB_MODEL_UNKNOWN; | 3266 | phb->model = PNV_PHB_MODEL_UNKNOWN; |
3106 | 3267 | ||
@@ -3201,7 +3362,11 @@ static void __init pnv_pci_init_ioda_phb(struct device_node *np, | |||
3201 | * the child P2P bridges) can form individual PE. | 3362 | * the child P2P bridges) can form individual PE. |
3202 | */ | 3363 | */ |
3203 | ppc_md.pcibios_fixup = pnv_pci_ioda_fixup; | 3364 | ppc_md.pcibios_fixup = pnv_pci_ioda_fixup; |
3204 | hose->controller_ops = pnv_pci_ioda_controller_ops; | 3365 | |
3366 | if (phb->type == PNV_PHB_NPU) | ||
3367 | hose->controller_ops = pnv_npu_ioda_controller_ops; | ||
3368 | else | ||
3369 | hose->controller_ops = pnv_pci_ioda_controller_ops; | ||
3205 | 3370 | ||
3206 | #ifdef CONFIG_PCI_IOV | 3371 | #ifdef CONFIG_PCI_IOV |
3207 | ppc_md.pcibios_fixup_sriov = pnv_pci_ioda_fixup_iov_resources; | 3372 | ppc_md.pcibios_fixup_sriov = pnv_pci_ioda_fixup_iov_resources; |
@@ -3236,6 +3401,11 @@ void __init pnv_pci_init_ioda2_phb(struct device_node *np) | |||
3236 | pnv_pci_init_ioda_phb(np, 0, PNV_PHB_IODA2); | 3401 | pnv_pci_init_ioda_phb(np, 0, PNV_PHB_IODA2); |
3237 | } | 3402 | } |
3238 | 3403 | ||
3404 | void __init pnv_pci_init_npu_phb(struct device_node *np) | ||
3405 | { | ||
3406 | pnv_pci_init_ioda_phb(np, 0, PNV_PHB_NPU); | ||
3407 | } | ||
3408 | |||
3239 | void __init pnv_pci_init_ioda_hub(struct device_node *np) | 3409 | void __init pnv_pci_init_ioda_hub(struct device_node *np) |
3240 | { | 3410 | { |
3241 | struct device_node *phbn; | 3411 | struct device_node *phbn; |
diff --git a/arch/powerpc/platforms/powernv/pci.c b/arch/powerpc/platforms/powernv/pci.c index f2dd77234240..2f55c86df703 100644 --- a/arch/powerpc/platforms/powernv/pci.c +++ b/arch/powerpc/platforms/powernv/pci.c | |||
@@ -1,8 +1,6 @@ | |||
1 | /* | 1 | /* |
2 | * Support PCI/PCIe on PowerNV platforms | 2 | * Support PCI/PCIe on PowerNV platforms |
3 | * | 3 | * |
4 | * Currently supports only P5IOC2 | ||
5 | * | ||
6 | * Copyright 2011 Benjamin Herrenschmidt, IBM Corp. | 4 | * Copyright 2011 Benjamin Herrenschmidt, IBM Corp. |
7 | * | 5 | * |
8 | * This program is free software; you can redistribute it and/or | 6 | * This program is free software; you can redistribute it and/or |
@@ -807,6 +805,10 @@ void __init pnv_pci_init(void) | |||
807 | for_each_compatible_node(np, NULL, "ibm,ioda2-phb") | 805 | for_each_compatible_node(np, NULL, "ibm,ioda2-phb") |
808 | pnv_pci_init_ioda2_phb(np); | 806 | pnv_pci_init_ioda2_phb(np); |
809 | 807 | ||
808 | /* Look for NPU PHBs */ | ||
809 | for_each_compatible_node(np, NULL, "ibm,ioda2-npu-phb") | ||
810 | pnv_pci_init_npu_phb(np); | ||
811 | |||
810 | /* Setup the linkage between OF nodes and PHBs */ | 812 | /* Setup the linkage between OF nodes and PHBs */ |
811 | pci_devs_phb_init(); | 813 | pci_devs_phb_init(); |
812 | 814 | ||
diff --git a/arch/powerpc/platforms/powernv/pci.h b/arch/powerpc/platforms/powernv/pci.h index c8ff50e90766..7f56313e8d72 100644 --- a/arch/powerpc/platforms/powernv/pci.h +++ b/arch/powerpc/platforms/powernv/pci.h | |||
@@ -7,6 +7,7 @@ enum pnv_phb_type { | |||
7 | PNV_PHB_P5IOC2 = 0, | 7 | PNV_PHB_P5IOC2 = 0, |
8 | PNV_PHB_IODA1 = 1, | 8 | PNV_PHB_IODA1 = 1, |
9 | PNV_PHB_IODA2 = 2, | 9 | PNV_PHB_IODA2 = 2, |
10 | PNV_PHB_NPU = 3, | ||
10 | }; | 11 | }; |
11 | 12 | ||
12 | /* Precise PHB model for error management */ | 13 | /* Precise PHB model for error management */ |
@@ -15,6 +16,7 @@ enum pnv_phb_model { | |||
15 | PNV_PHB_MODEL_P5IOC2, | 16 | PNV_PHB_MODEL_P5IOC2, |
16 | PNV_PHB_MODEL_P7IOC, | 17 | PNV_PHB_MODEL_P7IOC, |
17 | PNV_PHB_MODEL_PHB3, | 18 | PNV_PHB_MODEL_PHB3, |
19 | PNV_PHB_MODEL_NPU, | ||
18 | }; | 20 | }; |
19 | 21 | ||
20 | #define PNV_PCI_DIAG_BUF_SIZE 8192 | 22 | #define PNV_PCI_DIAG_BUF_SIZE 8192 |
@@ -24,6 +26,7 @@ enum pnv_phb_model { | |||
24 | #define PNV_IODA_PE_MASTER (1 << 3) /* Master PE in compound case */ | 26 | #define PNV_IODA_PE_MASTER (1 << 3) /* Master PE in compound case */ |
25 | #define PNV_IODA_PE_SLAVE (1 << 4) /* Slave PE in compound case */ | 27 | #define PNV_IODA_PE_SLAVE (1 << 4) /* Slave PE in compound case */ |
26 | #define PNV_IODA_PE_VF (1 << 5) /* PE for one VF */ | 28 | #define PNV_IODA_PE_VF (1 << 5) /* PE for one VF */ |
29 | #define PNV_IODA_PE_PEER (1 << 6) /* PE has peers */ | ||
27 | 30 | ||
28 | /* Data associated with a PE, including IOMMU tracking etc.. */ | 31 | /* Data associated with a PE, including IOMMU tracking etc.. */ |
29 | struct pnv_phb; | 32 | struct pnv_phb; |
@@ -31,6 +34,9 @@ struct pnv_ioda_pe { | |||
31 | unsigned long flags; | 34 | unsigned long flags; |
32 | struct pnv_phb *phb; | 35 | struct pnv_phb *phb; |
33 | 36 | ||
37 | #define PNV_IODA_MAX_PEER_PES 8 | ||
38 | struct pnv_ioda_pe *peers[PNV_IODA_MAX_PEER_PES]; | ||
39 | |||
34 | /* A PE can be associated with a single device or an | 40 | /* A PE can be associated with a single device or an |
35 | * entire bus (& children). In the former case, pdev | 41 | * entire bus (& children). In the former case, pdev |
36 | * is populated, in the later case, pbus is. | 42 | * is populated, in the later case, pbus is. |
@@ -229,6 +235,7 @@ extern void pnv_pci_setup_iommu_table(struct iommu_table *tbl, | |||
229 | extern void pnv_pci_init_p5ioc2_hub(struct device_node *np); | 235 | extern void pnv_pci_init_p5ioc2_hub(struct device_node *np); |
230 | extern void pnv_pci_init_ioda_hub(struct device_node *np); | 236 | extern void pnv_pci_init_ioda_hub(struct device_node *np); |
231 | extern void pnv_pci_init_ioda2_phb(struct device_node *np); | 237 | extern void pnv_pci_init_ioda2_phb(struct device_node *np); |
238 | extern void pnv_pci_init_npu_phb(struct device_node *np); | ||
232 | extern void pnv_pci_ioda_tce_invalidate(struct iommu_table *tbl, | 239 | extern void pnv_pci_ioda_tce_invalidate(struct iommu_table *tbl, |
233 | __be64 *startp, __be64 *endp, bool rm); | 240 | __be64 *startp, __be64 *endp, bool rm); |
234 | extern void pnv_pci_reset_secondary_bus(struct pci_dev *dev); | 241 | extern void pnv_pci_reset_secondary_bus(struct pci_dev *dev); |
@@ -238,4 +245,16 @@ extern void pnv_pci_dma_dev_setup(struct pci_dev *pdev); | |||
238 | extern int pnv_setup_msi_irqs(struct pci_dev *pdev, int nvec, int type); | 245 | extern int pnv_setup_msi_irqs(struct pci_dev *pdev, int nvec, int type); |
239 | extern void pnv_teardown_msi_irqs(struct pci_dev *pdev); | 246 | extern void pnv_teardown_msi_irqs(struct pci_dev *pdev); |
240 | 247 | ||
248 | /* Nvlink functions */ | ||
249 | extern void pnv_npu_tce_invalidate_entire(struct pnv_ioda_pe *npe); | ||
250 | extern void pnv_npu_tce_invalidate(struct pnv_ioda_pe *npe, | ||
251 | struct iommu_table *tbl, | ||
252 | unsigned long index, | ||
253 | unsigned long npages, | ||
254 | bool rm); | ||
255 | extern void pnv_npu_init_dma_pe(struct pnv_ioda_pe *npe); | ||
256 | extern void pnv_npu_setup_dma_pe(struct pnv_ioda_pe *npe); | ||
257 | extern int pnv_npu_dma_set_bypass(struct pnv_ioda_pe *npe, bool enabled); | ||
258 | extern int pnv_npu_dma_set_mask(struct pci_dev *npdev, u64 dma_mask); | ||
259 | |||
241 | #endif /* __POWERNV_PCI_H */ | 260 | #endif /* __POWERNV_PCI_H */ |
diff --git a/arch/powerpc/platforms/powernv/setup.c b/arch/powerpc/platforms/powernv/setup.c index a9a8fa37a555..1acb0c72d923 100644 --- a/arch/powerpc/platforms/powernv/setup.c +++ b/arch/powerpc/platforms/powernv/setup.c | |||
@@ -90,12 +90,8 @@ static void pnv_show_cpuinfo(struct seq_file *m) | |||
90 | if (root) | 90 | if (root) |
91 | model = of_get_property(root, "model", NULL); | 91 | model = of_get_property(root, "model", NULL); |
92 | seq_printf(m, "machine\t\t: PowerNV %s\n", model); | 92 | seq_printf(m, "machine\t\t: PowerNV %s\n", model); |
93 | if (firmware_has_feature(FW_FEATURE_OPALv3)) | 93 | if (firmware_has_feature(FW_FEATURE_OPAL)) |
94 | seq_printf(m, "firmware\t: OPAL v3\n"); | 94 | seq_printf(m, "firmware\t: OPAL\n"); |
95 | else if (firmware_has_feature(FW_FEATURE_OPALv2)) | ||
96 | seq_printf(m, "firmware\t: OPAL v2\n"); | ||
97 | else if (firmware_has_feature(FW_FEATURE_OPAL)) | ||
98 | seq_printf(m, "firmware\t: OPAL v1\n"); | ||
99 | else | 95 | else |
100 | seq_printf(m, "firmware\t: BML\n"); | 96 | seq_printf(m, "firmware\t: BML\n"); |
101 | of_node_put(root); | 97 | of_node_put(root); |
@@ -224,9 +220,9 @@ static void pnv_kexec_cpu_down(int crash_shutdown, int secondary) | |||
224 | { | 220 | { |
225 | xics_kexec_teardown_cpu(secondary); | 221 | xics_kexec_teardown_cpu(secondary); |
226 | 222 | ||
227 | /* On OPAL v3, we return all CPUs to firmware */ | 223 | /* On OPAL, we return all CPUs to firmware */ |
228 | 224 | ||
229 | if (!firmware_has_feature(FW_FEATURE_OPALv3)) | 225 | if (!firmware_has_feature(FW_FEATURE_OPAL)) |
230 | return; | 226 | return; |
231 | 227 | ||
232 | if (secondary) { | 228 | if (secondary) { |
diff --git a/arch/powerpc/platforms/powernv/smp.c b/arch/powerpc/platforms/powernv/smp.c index ca264833ee64..ad7b1a3dbed0 100644 --- a/arch/powerpc/platforms/powernv/smp.c +++ b/arch/powerpc/platforms/powernv/smp.c | |||
@@ -61,14 +61,15 @@ static int pnv_smp_kick_cpu(int nr) | |||
61 | unsigned long start_here = | 61 | unsigned long start_here = |
62 | __pa(ppc_function_entry(generic_secondary_smp_init)); | 62 | __pa(ppc_function_entry(generic_secondary_smp_init)); |
63 | long rc; | 63 | long rc; |
64 | uint8_t status; | ||
64 | 65 | ||
65 | BUG_ON(nr < 0 || nr >= NR_CPUS); | 66 | BUG_ON(nr < 0 || nr >= NR_CPUS); |
66 | 67 | ||
67 | /* | 68 | /* |
68 | * If we already started or OPALv2 is not supported, we just | 69 | * If we already started or OPAL is not supported, we just |
69 | * kick the CPU via the PACA | 70 | * kick the CPU via the PACA |
70 | */ | 71 | */ |
71 | if (paca[nr].cpu_start || !firmware_has_feature(FW_FEATURE_OPALv2)) | 72 | if (paca[nr].cpu_start || !firmware_has_feature(FW_FEATURE_OPAL)) |
72 | goto kick; | 73 | goto kick; |
73 | 74 | ||
74 | /* | 75 | /* |
@@ -77,55 +78,42 @@ static int pnv_smp_kick_cpu(int nr) | |||
77 | * first time. OPAL v3 allows us to query OPAL to know if it | 78 | * first time. OPAL v3 allows us to query OPAL to know if it |
78 | * has the CPUs, so we do that | 79 | * has the CPUs, so we do that |
79 | */ | 80 | */ |
80 | if (firmware_has_feature(FW_FEATURE_OPALv3)) { | 81 | rc = opal_query_cpu_status(pcpu, &status); |
81 | uint8_t status; | 82 | if (rc != OPAL_SUCCESS) { |
82 | 83 | pr_warn("OPAL Error %ld querying CPU %d state\n", rc, nr); | |
83 | rc = opal_query_cpu_status(pcpu, &status); | 84 | return -ENODEV; |
84 | if (rc != OPAL_SUCCESS) { | 85 | } |
85 | pr_warn("OPAL Error %ld querying CPU %d state\n", | ||
86 | rc, nr); | ||
87 | return -ENODEV; | ||
88 | } | ||
89 | 86 | ||
90 | /* | 87 | /* |
91 | * Already started, just kick it, probably coming from | 88 | * Already started, just kick it, probably coming from |
92 | * kexec and spinning | 89 | * kexec and spinning |
93 | */ | 90 | */ |
94 | if (status == OPAL_THREAD_STARTED) | 91 | if (status == OPAL_THREAD_STARTED) |
95 | goto kick; | 92 | goto kick; |
96 | 93 | ||
97 | /* | 94 | /* |
98 | * Available/inactive, let's kick it | 95 | * Available/inactive, let's kick it |
99 | */ | 96 | */ |
100 | if (status == OPAL_THREAD_INACTIVE) { | 97 | if (status == OPAL_THREAD_INACTIVE) { |
101 | pr_devel("OPAL: Starting CPU %d (HW 0x%x)...\n", | 98 | pr_devel("OPAL: Starting CPU %d (HW 0x%x)...\n", nr, pcpu); |
102 | nr, pcpu); | 99 | rc = opal_start_cpu(pcpu, start_here); |
103 | rc = opal_start_cpu(pcpu, start_here); | 100 | if (rc != OPAL_SUCCESS) { |
104 | if (rc != OPAL_SUCCESS) { | 101 | pr_warn("OPAL Error %ld starting CPU %d\n", rc, nr); |
105 | pr_warn("OPAL Error %ld starting CPU %d\n", | ||
106 | rc, nr); | ||
107 | return -ENODEV; | ||
108 | } | ||
109 | } else { | ||
110 | /* | ||
111 | * An unavailable CPU (or any other unknown status) | ||
112 | * shouldn't be started. It should also | ||
113 | * not be in the possible map but currently it can | ||
114 | * happen | ||
115 | */ | ||
116 | pr_devel("OPAL: CPU %d (HW 0x%x) is unavailable" | ||
117 | " (status %d)...\n", nr, pcpu, status); | ||
118 | return -ENODEV; | 102 | return -ENODEV; |
119 | } | 103 | } |
120 | } else { | 104 | } else { |
121 | /* | 105 | /* |
122 | * On OPAL v2, we just kick it and hope for the best, | 106 | * An unavailable CPU (or any other unknown status) |
123 | * we must not test the error from opal_start_cpu() or | 107 | * shouldn't be started. It should also |
124 | * we would fail to get CPUs from kexec. | 108 | * not be in the possible map but currently it can |
109 | * happen | ||
125 | */ | 110 | */ |
126 | opal_start_cpu(pcpu, start_here); | 111 | pr_devel("OPAL: CPU %d (HW 0x%x) is unavailable" |
112 | " (status %d)...\n", nr, pcpu, status); | ||
113 | return -ENODEV; | ||
127 | } | 114 | } |
128 | kick: | 115 | |
116 | kick: | ||
129 | return smp_generic_kick_cpu(nr); | 117 | return smp_generic_kick_cpu(nr); |
130 | } | 118 | } |
131 | 119 | ||
diff --git a/arch/powerpc/platforms/pseries/dlpar.c b/arch/powerpc/platforms/pseries/dlpar.c index f244dcb4f2cf..2b93ae8d557a 100644 --- a/arch/powerpc/platforms/pseries/dlpar.c +++ b/arch/powerpc/platforms/pseries/dlpar.c | |||
@@ -20,7 +20,6 @@ | |||
20 | #include <linux/of.h> | 20 | #include <linux/of.h> |
21 | 21 | ||
22 | #include "of_helpers.h" | 22 | #include "of_helpers.h" |
23 | #include "offline_states.h" | ||
24 | #include "pseries.h" | 23 | #include "pseries.h" |
25 | 24 | ||
26 | #include <asm/prom.h> | 25 | #include <asm/prom.h> |
@@ -338,185 +337,6 @@ int dlpar_release_drc(u32 drc_index) | |||
338 | return 0; | 337 | return 0; |
339 | } | 338 | } |
340 | 339 | ||
341 | #ifdef CONFIG_ARCH_CPU_PROBE_RELEASE | ||
342 | |||
343 | static int dlpar_online_cpu(struct device_node *dn) | ||
344 | { | ||
345 | int rc = 0; | ||
346 | unsigned int cpu; | ||
347 | int len, nthreads, i; | ||
348 | const __be32 *intserv; | ||
349 | u32 thread; | ||
350 | |||
351 | intserv = of_get_property(dn, "ibm,ppc-interrupt-server#s", &len); | ||
352 | if (!intserv) | ||
353 | return -EINVAL; | ||
354 | |||
355 | nthreads = len / sizeof(u32); | ||
356 | |||
357 | cpu_maps_update_begin(); | ||
358 | for (i = 0; i < nthreads; i++) { | ||
359 | thread = be32_to_cpu(intserv[i]); | ||
360 | for_each_present_cpu(cpu) { | ||
361 | if (get_hard_smp_processor_id(cpu) != thread) | ||
362 | continue; | ||
363 | BUG_ON(get_cpu_current_state(cpu) | ||
364 | != CPU_STATE_OFFLINE); | ||
365 | cpu_maps_update_done(); | ||
366 | rc = device_online(get_cpu_device(cpu)); | ||
367 | if (rc) | ||
368 | goto out; | ||
369 | cpu_maps_update_begin(); | ||
370 | |||
371 | break; | ||
372 | } | ||
373 | if (cpu == num_possible_cpus()) | ||
374 | printk(KERN_WARNING "Could not find cpu to online " | ||
375 | "with physical id 0x%x\n", thread); | ||
376 | } | ||
377 | cpu_maps_update_done(); | ||
378 | |||
379 | out: | ||
380 | return rc; | ||
381 | |||
382 | } | ||
383 | |||
384 | static ssize_t dlpar_cpu_probe(const char *buf, size_t count) | ||
385 | { | ||
386 | struct device_node *dn, *parent; | ||
387 | u32 drc_index; | ||
388 | int rc; | ||
389 | |||
390 | rc = kstrtou32(buf, 0, &drc_index); | ||
391 | if (rc) | ||
392 | return -EINVAL; | ||
393 | |||
394 | rc = dlpar_acquire_drc(drc_index); | ||
395 | if (rc) | ||
396 | return -EINVAL; | ||
397 | |||
398 | parent = of_find_node_by_path("/cpus"); | ||
399 | if (!parent) | ||
400 | return -ENODEV; | ||
401 | |||
402 | dn = dlpar_configure_connector(cpu_to_be32(drc_index), parent); | ||
403 | of_node_put(parent); | ||
404 | if (!dn) { | ||
405 | dlpar_release_drc(drc_index); | ||
406 | return -EINVAL; | ||
407 | } | ||
408 | |||
409 | rc = dlpar_attach_node(dn); | ||
410 | if (rc) { | ||
411 | dlpar_release_drc(drc_index); | ||
412 | dlpar_free_cc_nodes(dn); | ||
413 | return rc; | ||
414 | } | ||
415 | |||
416 | rc = dlpar_online_cpu(dn); | ||
417 | if (rc) | ||
418 | return rc; | ||
419 | |||
420 | return count; | ||
421 | } | ||
422 | |||
423 | static int dlpar_offline_cpu(struct device_node *dn) | ||
424 | { | ||
425 | int rc = 0; | ||
426 | unsigned int cpu; | ||
427 | int len, nthreads, i; | ||
428 | const __be32 *intserv; | ||
429 | u32 thread; | ||
430 | |||
431 | intserv = of_get_property(dn, "ibm,ppc-interrupt-server#s", &len); | ||
432 | if (!intserv) | ||
433 | return -EINVAL; | ||
434 | |||
435 | nthreads = len / sizeof(u32); | ||
436 | |||
437 | cpu_maps_update_begin(); | ||
438 | for (i = 0; i < nthreads; i++) { | ||
439 | thread = be32_to_cpu(intserv[i]); | ||
440 | for_each_present_cpu(cpu) { | ||
441 | if (get_hard_smp_processor_id(cpu) != thread) | ||
442 | continue; | ||
443 | |||
444 | if (get_cpu_current_state(cpu) == CPU_STATE_OFFLINE) | ||
445 | break; | ||
446 | |||
447 | if (get_cpu_current_state(cpu) == CPU_STATE_ONLINE) { | ||
448 | set_preferred_offline_state(cpu, CPU_STATE_OFFLINE); | ||
449 | cpu_maps_update_done(); | ||
450 | rc = device_offline(get_cpu_device(cpu)); | ||
451 | if (rc) | ||
452 | goto out; | ||
453 | cpu_maps_update_begin(); | ||
454 | break; | ||
455 | |||
456 | } | ||
457 | |||
458 | /* | ||
459 | * The cpu is in CPU_STATE_INACTIVE. | ||
460 | * Upgrade it's state to CPU_STATE_OFFLINE. | ||
461 | */ | ||
462 | set_preferred_offline_state(cpu, CPU_STATE_OFFLINE); | ||
463 | BUG_ON(plpar_hcall_norets(H_PROD, thread) | ||
464 | != H_SUCCESS); | ||
465 | __cpu_die(cpu); | ||
466 | break; | ||
467 | } | ||
468 | if (cpu == num_possible_cpus()) | ||
469 | printk(KERN_WARNING "Could not find cpu to offline " | ||
470 | "with physical id 0x%x\n", thread); | ||
471 | } | ||
472 | cpu_maps_update_done(); | ||
473 | |||
474 | out: | ||
475 | return rc; | ||
476 | |||
477 | } | ||
478 | |||
479 | static ssize_t dlpar_cpu_release(const char *buf, size_t count) | ||
480 | { | ||
481 | struct device_node *dn; | ||
482 | u32 drc_index; | ||
483 | int rc; | ||
484 | |||
485 | dn = of_find_node_by_path(buf); | ||
486 | if (!dn) | ||
487 | return -EINVAL; | ||
488 | |||
489 | rc = of_property_read_u32(dn, "ibm,my-drc-index", &drc_index); | ||
490 | if (rc) { | ||
491 | of_node_put(dn); | ||
492 | return -EINVAL; | ||
493 | } | ||
494 | |||
495 | rc = dlpar_offline_cpu(dn); | ||
496 | if (rc) { | ||
497 | of_node_put(dn); | ||
498 | return -EINVAL; | ||
499 | } | ||
500 | |||
501 | rc = dlpar_release_drc(drc_index); | ||
502 | if (rc) { | ||
503 | of_node_put(dn); | ||
504 | return rc; | ||
505 | } | ||
506 | |||
507 | rc = dlpar_detach_node(dn); | ||
508 | if (rc) { | ||
509 | dlpar_acquire_drc(drc_index); | ||
510 | return rc; | ||
511 | } | ||
512 | |||
513 | of_node_put(dn); | ||
514 | |||
515 | return count; | ||
516 | } | ||
517 | |||
518 | #endif /* CONFIG_ARCH_CPU_PROBE_RELEASE */ | ||
519 | |||
520 | static int handle_dlpar_errorlog(struct pseries_hp_errorlog *hp_elog) | 340 | static int handle_dlpar_errorlog(struct pseries_hp_errorlog *hp_elog) |
521 | { | 341 | { |
522 | int rc; | 342 | int rc; |
@@ -536,6 +356,9 @@ static int handle_dlpar_errorlog(struct pseries_hp_errorlog *hp_elog) | |||
536 | case PSERIES_HP_ELOG_RESOURCE_MEM: | 356 | case PSERIES_HP_ELOG_RESOURCE_MEM: |
537 | rc = dlpar_memory(hp_elog); | 357 | rc = dlpar_memory(hp_elog); |
538 | break; | 358 | break; |
359 | case PSERIES_HP_ELOG_RESOURCE_CPU: | ||
360 | rc = dlpar_cpu(hp_elog); | ||
361 | break; | ||
539 | default: | 362 | default: |
540 | pr_warn_ratelimited("Invalid resource (%d) specified\n", | 363 | pr_warn_ratelimited("Invalid resource (%d) specified\n", |
541 | hp_elog->resource); | 364 | hp_elog->resource); |
@@ -565,6 +388,9 @@ static ssize_t dlpar_store(struct class *class, struct class_attribute *attr, | |||
565 | if (!strncmp(arg, "memory", 6)) { | 388 | if (!strncmp(arg, "memory", 6)) { |
566 | hp_elog->resource = PSERIES_HP_ELOG_RESOURCE_MEM; | 389 | hp_elog->resource = PSERIES_HP_ELOG_RESOURCE_MEM; |
567 | arg += strlen("memory "); | 390 | arg += strlen("memory "); |
391 | } else if (!strncmp(arg, "cpu", 3)) { | ||
392 | hp_elog->resource = PSERIES_HP_ELOG_RESOURCE_CPU; | ||
393 | arg += strlen("cpu "); | ||
568 | } else { | 394 | } else { |
569 | pr_err("Invalid resource specified: \"%s\"\n", buf); | 395 | pr_err("Invalid resource specified: \"%s\"\n", buf); |
570 | rc = -EINVAL; | 396 | rc = -EINVAL; |
@@ -624,16 +450,7 @@ static CLASS_ATTR(dlpar, S_IWUSR, NULL, dlpar_store); | |||
624 | 450 | ||
625 | static int __init pseries_dlpar_init(void) | 451 | static int __init pseries_dlpar_init(void) |
626 | { | 452 | { |
627 | int rc; | 453 | return sysfs_create_file(kernel_kobj, &class_attr_dlpar.attr); |
628 | |||
629 | #ifdef CONFIG_ARCH_CPU_PROBE_RELEASE | ||
630 | ppc_md.cpu_probe = dlpar_cpu_probe; | ||
631 | ppc_md.cpu_release = dlpar_cpu_release; | ||
632 | #endif /* CONFIG_ARCH_CPU_PROBE_RELEASE */ | ||
633 | |||
634 | rc = sysfs_create_file(kernel_kobj, &class_attr_dlpar.attr); | ||
635 | |||
636 | return rc; | ||
637 | } | 454 | } |
638 | machine_device_initcall(pseries, pseries_dlpar_init); | 455 | machine_device_initcall(pseries, pseries_dlpar_init); |
639 | 456 | ||
diff --git a/arch/powerpc/platforms/pseries/hotplug-cpu.c b/arch/powerpc/platforms/pseries/hotplug-cpu.c index 62475440fd45..32274f72fe3f 100644 --- a/arch/powerpc/platforms/pseries/hotplug-cpu.c +++ b/arch/powerpc/platforms/pseries/hotplug-cpu.c | |||
@@ -18,12 +18,15 @@ | |||
18 | * 2 of the License, or (at your option) any later version. | 18 | * 2 of the License, or (at your option) any later version. |
19 | */ | 19 | */ |
20 | 20 | ||
21 | #define pr_fmt(fmt) "pseries-hotplug-cpu: " fmt | ||
22 | |||
21 | #include <linux/kernel.h> | 23 | #include <linux/kernel.h> |
22 | #include <linux/interrupt.h> | 24 | #include <linux/interrupt.h> |
23 | #include <linux/delay.h> | 25 | #include <linux/delay.h> |
24 | #include <linux/sched.h> /* for idle_task_exit */ | 26 | #include <linux/sched.h> /* for idle_task_exit */ |
25 | #include <linux/cpu.h> | 27 | #include <linux/cpu.h> |
26 | #include <linux/of.h> | 28 | #include <linux/of.h> |
29 | #include <linux/slab.h> | ||
27 | #include <asm/prom.h> | 30 | #include <asm/prom.h> |
28 | #include <asm/rtas.h> | 31 | #include <asm/rtas.h> |
29 | #include <asm/firmware.h> | 32 | #include <asm/firmware.h> |
@@ -32,6 +35,7 @@ | |||
32 | #include <asm/xics.h> | 35 | #include <asm/xics.h> |
33 | #include <asm/plpar_wrappers.h> | 36 | #include <asm/plpar_wrappers.h> |
34 | 37 | ||
38 | #include "pseries.h" | ||
35 | #include "offline_states.h" | 39 | #include "offline_states.h" |
36 | 40 | ||
37 | /* This version can't take the spinlock, because it never returns */ | 41 | /* This version can't take the spinlock, because it never returns */ |
@@ -88,13 +92,7 @@ void set_default_offline_state(int cpu) | |||
88 | 92 | ||
89 | static void rtas_stop_self(void) | 93 | static void rtas_stop_self(void) |
90 | { | 94 | { |
91 | static struct rtas_args args = { | 95 | static struct rtas_args args; |
92 | .nargs = 0, | ||
93 | .nret = cpu_to_be32(1), | ||
94 | .rets = &args.args[0], | ||
95 | }; | ||
96 | |||
97 | args.token = cpu_to_be32(rtas_stop_self_token); | ||
98 | 96 | ||
99 | local_irq_disable(); | 97 | local_irq_disable(); |
100 | 98 | ||
@@ -102,7 +100,8 @@ static void rtas_stop_self(void) | |||
102 | 100 | ||
103 | printk("cpu %u (hwid %u) Ready to die...\n", | 101 | printk("cpu %u (hwid %u) Ready to die...\n", |
104 | smp_processor_id(), hard_smp_processor_id()); | 102 | smp_processor_id(), hard_smp_processor_id()); |
105 | enter_rtas(__pa(&args)); | 103 | |
104 | rtas_call_unlocked(&args, rtas_stop_self_token, 0, 1, NULL); | ||
106 | 105 | ||
107 | panic("Alas, I survived.\n"); | 106 | panic("Alas, I survived.\n"); |
108 | } | 107 | } |
@@ -339,6 +338,536 @@ static void pseries_remove_processor(struct device_node *np) | |||
339 | cpu_maps_update_done(); | 338 | cpu_maps_update_done(); |
340 | } | 339 | } |
341 | 340 | ||
341 | static int dlpar_online_cpu(struct device_node *dn) | ||
342 | { | ||
343 | int rc = 0; | ||
344 | unsigned int cpu; | ||
345 | int len, nthreads, i; | ||
346 | const __be32 *intserv; | ||
347 | u32 thread; | ||
348 | |||
349 | intserv = of_get_property(dn, "ibm,ppc-interrupt-server#s", &len); | ||
350 | if (!intserv) | ||
351 | return -EINVAL; | ||
352 | |||
353 | nthreads = len / sizeof(u32); | ||
354 | |||
355 | cpu_maps_update_begin(); | ||
356 | for (i = 0; i < nthreads; i++) { | ||
357 | thread = be32_to_cpu(intserv[i]); | ||
358 | for_each_present_cpu(cpu) { | ||
359 | if (get_hard_smp_processor_id(cpu) != thread) | ||
360 | continue; | ||
361 | BUG_ON(get_cpu_current_state(cpu) | ||
362 | != CPU_STATE_OFFLINE); | ||
363 | cpu_maps_update_done(); | ||
364 | rc = device_online(get_cpu_device(cpu)); | ||
365 | if (rc) | ||
366 | goto out; | ||
367 | cpu_maps_update_begin(); | ||
368 | |||
369 | break; | ||
370 | } | ||
371 | if (cpu == num_possible_cpus()) | ||
372 | printk(KERN_WARNING "Could not find cpu to online " | ||
373 | "with physical id 0x%x\n", thread); | ||
374 | } | ||
375 | cpu_maps_update_done(); | ||
376 | |||
377 | out: | ||
378 | return rc; | ||
379 | |||
380 | } | ||
381 | |||
382 | static bool dlpar_cpu_exists(struct device_node *parent, u32 drc_index) | ||
383 | { | ||
384 | struct device_node *child = NULL; | ||
385 | u32 my_drc_index; | ||
386 | bool found; | ||
387 | int rc; | ||
388 | |||
389 | /* Assume cpu doesn't exist */ | ||
390 | found = false; | ||
391 | |||
392 | for_each_child_of_node(parent, child) { | ||
393 | rc = of_property_read_u32(child, "ibm,my-drc-index", | ||
394 | &my_drc_index); | ||
395 | if (rc) | ||
396 | continue; | ||
397 | |||
398 | if (my_drc_index == drc_index) { | ||
399 | of_node_put(child); | ||
400 | found = true; | ||
401 | break; | ||
402 | } | ||
403 | } | ||
404 | |||
405 | return found; | ||
406 | } | ||
407 | |||
408 | static bool valid_cpu_drc_index(struct device_node *parent, u32 drc_index) | ||
409 | { | ||
410 | bool found = false; | ||
411 | int rc, index; | ||
412 | |||
413 | index = 0; | ||
414 | while (!found) { | ||
415 | u32 drc; | ||
416 | |||
417 | rc = of_property_read_u32_index(parent, "ibm,drc-indexes", | ||
418 | index++, &drc); | ||
419 | if (rc) | ||
420 | break; | ||
421 | |||
422 | if (drc == drc_index) | ||
423 | found = true; | ||
424 | } | ||
425 | |||
426 | return found; | ||
427 | } | ||
428 | |||
429 | static ssize_t dlpar_cpu_add(u32 drc_index) | ||
430 | { | ||
431 | struct device_node *dn, *parent; | ||
432 | int rc, saved_rc; | ||
433 | |||
434 | pr_debug("Attempting to add CPU, drc index: %x\n", drc_index); | ||
435 | |||
436 | parent = of_find_node_by_path("/cpus"); | ||
437 | if (!parent) { | ||
438 | pr_warn("Failed to find CPU root node \"/cpus\"\n"); | ||
439 | return -ENODEV; | ||
440 | } | ||
441 | |||
442 | if (dlpar_cpu_exists(parent, drc_index)) { | ||
443 | of_node_put(parent); | ||
444 | pr_warn("CPU with drc index %x already exists\n", drc_index); | ||
445 | return -EINVAL; | ||
446 | } | ||
447 | |||
448 | if (!valid_cpu_drc_index(parent, drc_index)) { | ||
449 | of_node_put(parent); | ||
450 | pr_warn("Cannot find CPU (drc index %x) to add.\n", drc_index); | ||
451 | return -EINVAL; | ||
452 | } | ||
453 | |||
454 | rc = dlpar_acquire_drc(drc_index); | ||
455 | if (rc) { | ||
456 | pr_warn("Failed to acquire DRC, rc: %d, drc index: %x\n", | ||
457 | rc, drc_index); | ||
458 | of_node_put(parent); | ||
459 | return -EINVAL; | ||
460 | } | ||
461 | |||
462 | dn = dlpar_configure_connector(cpu_to_be32(drc_index), parent); | ||
463 | of_node_put(parent); | ||
464 | if (!dn) { | ||
465 | pr_warn("Failed call to configure-connector, drc index: %x\n", | ||
466 | drc_index); | ||
467 | dlpar_release_drc(drc_index); | ||
468 | return -EINVAL; | ||
469 | } | ||
470 | |||
471 | rc = dlpar_attach_node(dn); | ||
472 | if (rc) { | ||
473 | saved_rc = rc; | ||
474 | pr_warn("Failed to attach node %s, rc: %d, drc index: %x\n", | ||
475 | dn->name, rc, drc_index); | ||
476 | |||
477 | rc = dlpar_release_drc(drc_index); | ||
478 | if (!rc) | ||
479 | dlpar_free_cc_nodes(dn); | ||
480 | |||
481 | return saved_rc; | ||
482 | } | ||
483 | |||
484 | rc = dlpar_online_cpu(dn); | ||
485 | if (rc) { | ||
486 | saved_rc = rc; | ||
487 | pr_warn("Failed to online cpu %s, rc: %d, drc index: %x\n", | ||
488 | dn->name, rc, drc_index); | ||
489 | |||
490 | rc = dlpar_detach_node(dn); | ||
491 | if (!rc) | ||
492 | dlpar_release_drc(drc_index); | ||
493 | |||
494 | return saved_rc; | ||
495 | } | ||
496 | |||
497 | pr_debug("Successfully added CPU %s, drc index: %x\n", dn->name, | ||
498 | drc_index); | ||
499 | return rc; | ||
500 | } | ||
501 | |||
502 | static int dlpar_offline_cpu(struct device_node *dn) | ||
503 | { | ||
504 | int rc = 0; | ||
505 | unsigned int cpu; | ||
506 | int len, nthreads, i; | ||
507 | const __be32 *intserv; | ||
508 | u32 thread; | ||
509 | |||
510 | intserv = of_get_property(dn, "ibm,ppc-interrupt-server#s", &len); | ||
511 | if (!intserv) | ||
512 | return -EINVAL; | ||
513 | |||
514 | nthreads = len / sizeof(u32); | ||
515 | |||
516 | cpu_maps_update_begin(); | ||
517 | for (i = 0; i < nthreads; i++) { | ||
518 | thread = be32_to_cpu(intserv[i]); | ||
519 | for_each_present_cpu(cpu) { | ||
520 | if (get_hard_smp_processor_id(cpu) != thread) | ||
521 | continue; | ||
522 | |||
523 | if (get_cpu_current_state(cpu) == CPU_STATE_OFFLINE) | ||
524 | break; | ||
525 | |||
526 | if (get_cpu_current_state(cpu) == CPU_STATE_ONLINE) { | ||
527 | set_preferred_offline_state(cpu, | ||
528 | CPU_STATE_OFFLINE); | ||
529 | cpu_maps_update_done(); | ||
530 | rc = device_offline(get_cpu_device(cpu)); | ||
531 | if (rc) | ||
532 | goto out; | ||
533 | cpu_maps_update_begin(); | ||
534 | break; | ||
535 | |||
536 | } | ||
537 | |||
538 | /* | ||
539 | * The cpu is in CPU_STATE_INACTIVE. | ||
540 | * Upgrade it's state to CPU_STATE_OFFLINE. | ||
541 | */ | ||
542 | set_preferred_offline_state(cpu, CPU_STATE_OFFLINE); | ||
543 | BUG_ON(plpar_hcall_norets(H_PROD, thread) | ||
544 | != H_SUCCESS); | ||
545 | __cpu_die(cpu); | ||
546 | break; | ||
547 | } | ||
548 | if (cpu == num_possible_cpus()) | ||
549 | printk(KERN_WARNING "Could not find cpu to offline with physical id 0x%x\n", thread); | ||
550 | } | ||
551 | cpu_maps_update_done(); | ||
552 | |||
553 | out: | ||
554 | return rc; | ||
555 | |||
556 | } | ||
557 | |||
558 | static ssize_t dlpar_cpu_remove(struct device_node *dn, u32 drc_index) | ||
559 | { | ||
560 | int rc; | ||
561 | |||
562 | pr_debug("Attemping to remove CPU %s, drc index: %x\n", | ||
563 | dn->name, drc_index); | ||
564 | |||
565 | rc = dlpar_offline_cpu(dn); | ||
566 | if (rc) { | ||
567 | pr_warn("Failed to offline CPU %s, rc: %d\n", dn->name, rc); | ||
568 | return -EINVAL; | ||
569 | } | ||
570 | |||
571 | rc = dlpar_release_drc(drc_index); | ||
572 | if (rc) { | ||
573 | pr_warn("Failed to release drc (%x) for CPU %s, rc: %d\n", | ||
574 | drc_index, dn->name, rc); | ||
575 | dlpar_online_cpu(dn); | ||
576 | return rc; | ||
577 | } | ||
578 | |||
579 | rc = dlpar_detach_node(dn); | ||
580 | if (rc) { | ||
581 | int saved_rc = rc; | ||
582 | |||
583 | pr_warn("Failed to detach CPU %s, rc: %d", dn->name, rc); | ||
584 | |||
585 | rc = dlpar_acquire_drc(drc_index); | ||
586 | if (!rc) | ||
587 | dlpar_online_cpu(dn); | ||
588 | |||
589 | return saved_rc; | ||
590 | } | ||
591 | |||
592 | pr_debug("Successfully removed CPU, drc index: %x\n", drc_index); | ||
593 | return 0; | ||
594 | } | ||
595 | |||
596 | static struct device_node *cpu_drc_index_to_dn(u32 drc_index) | ||
597 | { | ||
598 | struct device_node *dn; | ||
599 | u32 my_index; | ||
600 | int rc; | ||
601 | |||
602 | for_each_node_by_type(dn, "cpu") { | ||
603 | rc = of_property_read_u32(dn, "ibm,my-drc-index", &my_index); | ||
604 | if (rc) | ||
605 | continue; | ||
606 | |||
607 | if (my_index == drc_index) | ||
608 | break; | ||
609 | } | ||
610 | |||
611 | return dn; | ||
612 | } | ||
613 | |||
614 | static int dlpar_cpu_remove_by_index(u32 drc_index) | ||
615 | { | ||
616 | struct device_node *dn; | ||
617 | int rc; | ||
618 | |||
619 | dn = cpu_drc_index_to_dn(drc_index); | ||
620 | if (!dn) { | ||
621 | pr_warn("Cannot find CPU (drc index %x) to remove\n", | ||
622 | drc_index); | ||
623 | return -ENODEV; | ||
624 | } | ||
625 | |||
626 | rc = dlpar_cpu_remove(dn, drc_index); | ||
627 | of_node_put(dn); | ||
628 | return rc; | ||
629 | } | ||
630 | |||
631 | static int find_dlpar_cpus_to_remove(u32 *cpu_drcs, int cpus_to_remove) | ||
632 | { | ||
633 | struct device_node *dn; | ||
634 | int cpus_found = 0; | ||
635 | int rc; | ||
636 | |||
637 | /* We want to find cpus_to_remove + 1 CPUs to ensure we do not | ||
638 | * remove the last CPU. | ||
639 | */ | ||
640 | for_each_node_by_type(dn, "cpu") { | ||
641 | cpus_found++; | ||
642 | |||
643 | if (cpus_found > cpus_to_remove) { | ||
644 | of_node_put(dn); | ||
645 | break; | ||
646 | } | ||
647 | |||
648 | /* Note that cpus_found is always 1 ahead of the index | ||
649 | * into the cpu_drcs array, so we use cpus_found - 1 | ||
650 | */ | ||
651 | rc = of_property_read_u32(dn, "ibm,my-drc-index", | ||
652 | &cpu_drcs[cpus_found - 1]); | ||
653 | if (rc) { | ||
654 | pr_warn("Error occurred getting drc-index for %s\n", | ||
655 | dn->name); | ||
656 | of_node_put(dn); | ||
657 | return -1; | ||
658 | } | ||
659 | } | ||
660 | |||
661 | if (cpus_found < cpus_to_remove) { | ||
662 | pr_warn("Failed to find enough CPUs (%d of %d) to remove\n", | ||
663 | cpus_found, cpus_to_remove); | ||
664 | } else if (cpus_found == cpus_to_remove) { | ||
665 | pr_warn("Cannot remove all CPUs\n"); | ||
666 | } | ||
667 | |||
668 | return cpus_found; | ||
669 | } | ||
670 | |||
671 | static int dlpar_cpu_remove_by_count(u32 cpus_to_remove) | ||
672 | { | ||
673 | u32 *cpu_drcs; | ||
674 | int cpus_found; | ||
675 | int cpus_removed = 0; | ||
676 | int i, rc; | ||
677 | |||
678 | pr_debug("Attempting to hot-remove %d CPUs\n", cpus_to_remove); | ||
679 | |||
680 | cpu_drcs = kcalloc(cpus_to_remove, sizeof(*cpu_drcs), GFP_KERNEL); | ||
681 | if (!cpu_drcs) | ||
682 | return -EINVAL; | ||
683 | |||
684 | cpus_found = find_dlpar_cpus_to_remove(cpu_drcs, cpus_to_remove); | ||
685 | if (cpus_found <= cpus_to_remove) { | ||
686 | kfree(cpu_drcs); | ||
687 | return -EINVAL; | ||
688 | } | ||
689 | |||
690 | for (i = 0; i < cpus_to_remove; i++) { | ||
691 | rc = dlpar_cpu_remove_by_index(cpu_drcs[i]); | ||
692 | if (rc) | ||
693 | break; | ||
694 | |||
695 | cpus_removed++; | ||
696 | } | ||
697 | |||
698 | if (cpus_removed != cpus_to_remove) { | ||
699 | pr_warn("CPU hot-remove failed, adding back removed CPUs\n"); | ||
700 | |||
701 | for (i = 0; i < cpus_removed; i++) | ||
702 | dlpar_cpu_add(cpu_drcs[i]); | ||
703 | |||
704 | rc = -EINVAL; | ||
705 | } else { | ||
706 | rc = 0; | ||
707 | } | ||
708 | |||
709 | kfree(cpu_drcs); | ||
710 | return rc; | ||
711 | } | ||
712 | |||
713 | static int find_dlpar_cpus_to_add(u32 *cpu_drcs, u32 cpus_to_add) | ||
714 | { | ||
715 | struct device_node *parent; | ||
716 | int cpus_found = 0; | ||
717 | int index, rc; | ||
718 | |||
719 | parent = of_find_node_by_path("/cpus"); | ||
720 | if (!parent) { | ||
721 | pr_warn("Could not find CPU root node in device tree\n"); | ||
722 | kfree(cpu_drcs); | ||
723 | return -1; | ||
724 | } | ||
725 | |||
726 | /* Search the ibm,drc-indexes array for possible CPU drcs to | ||
727 | * add. Note that the format of the ibm,drc-indexes array is | ||
728 | * the number of entries in the array followed by the array | ||
729 | * of drc values so we start looking at index = 1. | ||
730 | */ | ||
731 | index = 1; | ||
732 | while (cpus_found < cpus_to_add) { | ||
733 | u32 drc; | ||
734 | |||
735 | rc = of_property_read_u32_index(parent, "ibm,drc-indexes", | ||
736 | index++, &drc); | ||
737 | if (rc) | ||
738 | break; | ||
739 | |||
740 | if (dlpar_cpu_exists(parent, drc)) | ||
741 | continue; | ||
742 | |||
743 | cpu_drcs[cpus_found++] = drc; | ||
744 | } | ||
745 | |||
746 | of_node_put(parent); | ||
747 | return cpus_found; | ||
748 | } | ||
749 | |||
750 | static int dlpar_cpu_add_by_count(u32 cpus_to_add) | ||
751 | { | ||
752 | u32 *cpu_drcs; | ||
753 | int cpus_added = 0; | ||
754 | int cpus_found; | ||
755 | int i, rc; | ||
756 | |||
757 | pr_debug("Attempting to hot-add %d CPUs\n", cpus_to_add); | ||
758 | |||
759 | cpu_drcs = kcalloc(cpus_to_add, sizeof(*cpu_drcs), GFP_KERNEL); | ||
760 | if (!cpu_drcs) | ||
761 | return -EINVAL; | ||
762 | |||
763 | cpus_found = find_dlpar_cpus_to_add(cpu_drcs, cpus_to_add); | ||
764 | if (cpus_found < cpus_to_add) { | ||
765 | pr_warn("Failed to find enough CPUs (%d of %d) to add\n", | ||
766 | cpus_found, cpus_to_add); | ||
767 | kfree(cpu_drcs); | ||
768 | return -EINVAL; | ||
769 | } | ||
770 | |||
771 | for (i = 0; i < cpus_to_add; i++) { | ||
772 | rc = dlpar_cpu_add(cpu_drcs[i]); | ||
773 | if (rc) | ||
774 | break; | ||
775 | |||
776 | cpus_added++; | ||
777 | } | ||
778 | |||
779 | if (cpus_added < cpus_to_add) { | ||
780 | pr_warn("CPU hot-add failed, removing any added CPUs\n"); | ||
781 | |||
782 | for (i = 0; i < cpus_added; i++) | ||
783 | dlpar_cpu_remove_by_index(cpu_drcs[i]); | ||
784 | |||
785 | rc = -EINVAL; | ||
786 | } else { | ||
787 | rc = 0; | ||
788 | } | ||
789 | |||
790 | kfree(cpu_drcs); | ||
791 | return rc; | ||
792 | } | ||
793 | |||
794 | int dlpar_cpu(struct pseries_hp_errorlog *hp_elog) | ||
795 | { | ||
796 | u32 count, drc_index; | ||
797 | int rc; | ||
798 | |||
799 | count = hp_elog->_drc_u.drc_count; | ||
800 | drc_index = hp_elog->_drc_u.drc_index; | ||
801 | |||
802 | lock_device_hotplug(); | ||
803 | |||
804 | switch (hp_elog->action) { | ||
805 | case PSERIES_HP_ELOG_ACTION_REMOVE: | ||
806 | if (hp_elog->id_type == PSERIES_HP_ELOG_ID_DRC_COUNT) | ||
807 | rc = dlpar_cpu_remove_by_count(count); | ||
808 | else if (hp_elog->id_type == PSERIES_HP_ELOG_ID_DRC_INDEX) | ||
809 | rc = dlpar_cpu_remove_by_index(drc_index); | ||
810 | else | ||
811 | rc = -EINVAL; | ||
812 | break; | ||
813 | case PSERIES_HP_ELOG_ACTION_ADD: | ||
814 | if (hp_elog->id_type == PSERIES_HP_ELOG_ID_DRC_COUNT) | ||
815 | rc = dlpar_cpu_add_by_count(count); | ||
816 | else if (hp_elog->id_type == PSERIES_HP_ELOG_ID_DRC_INDEX) | ||
817 | rc = dlpar_cpu_add(drc_index); | ||
818 | else | ||
819 | rc = -EINVAL; | ||
820 | break; | ||
821 | default: | ||
822 | pr_err("Invalid action (%d) specified\n", hp_elog->action); | ||
823 | rc = -EINVAL; | ||
824 | break; | ||
825 | } | ||
826 | |||
827 | unlock_device_hotplug(); | ||
828 | return rc; | ||
829 | } | ||
830 | |||
831 | #ifdef CONFIG_ARCH_CPU_PROBE_RELEASE | ||
832 | |||
833 | static ssize_t dlpar_cpu_probe(const char *buf, size_t count) | ||
834 | { | ||
835 | u32 drc_index; | ||
836 | int rc; | ||
837 | |||
838 | rc = kstrtou32(buf, 0, &drc_index); | ||
839 | if (rc) | ||
840 | return -EINVAL; | ||
841 | |||
842 | rc = dlpar_cpu_add(drc_index); | ||
843 | |||
844 | return rc ? rc : count; | ||
845 | } | ||
846 | |||
847 | static ssize_t dlpar_cpu_release(const char *buf, size_t count) | ||
848 | { | ||
849 | struct device_node *dn; | ||
850 | u32 drc_index; | ||
851 | int rc; | ||
852 | |||
853 | dn = of_find_node_by_path(buf); | ||
854 | if (!dn) | ||
855 | return -EINVAL; | ||
856 | |||
857 | rc = of_property_read_u32(dn, "ibm,my-drc-index", &drc_index); | ||
858 | if (rc) { | ||
859 | of_node_put(dn); | ||
860 | return -EINVAL; | ||
861 | } | ||
862 | |||
863 | rc = dlpar_cpu_remove(dn, drc_index); | ||
864 | of_node_put(dn); | ||
865 | |||
866 | return rc ? rc : count; | ||
867 | } | ||
868 | |||
869 | #endif /* CONFIG_ARCH_CPU_PROBE_RELEASE */ | ||
870 | |||
342 | static int pseries_smp_notifier(struct notifier_block *nb, | 871 | static int pseries_smp_notifier(struct notifier_block *nb, |
343 | unsigned long action, void *data) | 872 | unsigned long action, void *data) |
344 | { | 873 | { |
@@ -385,6 +914,11 @@ static int __init pseries_cpu_hotplug_init(void) | |||
385 | int cpu; | 914 | int cpu; |
386 | int qcss_tok; | 915 | int qcss_tok; |
387 | 916 | ||
917 | #ifdef CONFIG_ARCH_CPU_PROBE_RELEASE | ||
918 | ppc_md.cpu_probe = dlpar_cpu_probe; | ||
919 | ppc_md.cpu_release = dlpar_cpu_release; | ||
920 | #endif /* CONFIG_ARCH_CPU_PROBE_RELEASE */ | ||
921 | |||
388 | for_each_node_by_name(np, "interrupt-controller") { | 922 | for_each_node_by_name(np, "interrupt-controller") { |
389 | typep = of_get_property(np, "compatible", NULL); | 923 | typep = of_get_property(np, "compatible", NULL); |
390 | if (strstr(typep, "open-pic")) { | 924 | if (strstr(typep, "open-pic")) { |
diff --git a/arch/powerpc/platforms/pseries/lpar.c b/arch/powerpc/platforms/pseries/lpar.c index b7a67e3d2201..477290ad855e 100644 --- a/arch/powerpc/platforms/pseries/lpar.c +++ b/arch/powerpc/platforms/pseries/lpar.c | |||
@@ -315,48 +315,48 @@ static long pSeries_lpar_hpte_updatepp(unsigned long slot, | |||
315 | return 0; | 315 | return 0; |
316 | } | 316 | } |
317 | 317 | ||
318 | static unsigned long pSeries_lpar_hpte_getword0(unsigned long slot) | 318 | static long __pSeries_lpar_hpte_find(unsigned long want_v, unsigned long hpte_group) |
319 | { | 319 | { |
320 | unsigned long dword0; | 320 | long lpar_rc; |
321 | unsigned long lpar_rc; | 321 | unsigned long i, j; |
322 | unsigned long dummy_word1; | 322 | struct { |
323 | unsigned long flags; | 323 | unsigned long pteh; |
324 | unsigned long ptel; | ||
325 | } ptes[4]; | ||
324 | 326 | ||
325 | /* Read 1 pte at a time */ | 327 | for (i = 0; i < HPTES_PER_GROUP; i += 4, hpte_group += 4) { |
326 | /* Do not need RPN to logical page translation */ | ||
327 | /* No cross CEC PFT access */ | ||
328 | flags = 0; | ||
329 | 328 | ||
330 | lpar_rc = plpar_pte_read(flags, slot, &dword0, &dummy_word1); | 329 | lpar_rc = plpar_pte_read_4(0, hpte_group, (void *)ptes); |
330 | if (lpar_rc != H_SUCCESS) | ||
331 | continue; | ||
331 | 332 | ||
332 | BUG_ON(lpar_rc != H_SUCCESS); | 333 | for (j = 0; j < 4; j++) { |
334 | if (HPTE_V_COMPARE(ptes[j].pteh, want_v) && | ||
335 | (ptes[j].pteh & HPTE_V_VALID)) | ||
336 | return i + j; | ||
337 | } | ||
338 | } | ||
333 | 339 | ||
334 | return dword0; | 340 | return -1; |
335 | } | 341 | } |
336 | 342 | ||
337 | static long pSeries_lpar_hpte_find(unsigned long vpn, int psize, int ssize) | 343 | static long pSeries_lpar_hpte_find(unsigned long vpn, int psize, int ssize) |
338 | { | 344 | { |
339 | unsigned long hash; | ||
340 | unsigned long i; | ||
341 | long slot; | 345 | long slot; |
342 | unsigned long want_v, hpte_v; | 346 | unsigned long hash; |
347 | unsigned long want_v; | ||
348 | unsigned long hpte_group; | ||
343 | 349 | ||
344 | hash = hpt_hash(vpn, mmu_psize_defs[psize].shift, ssize); | 350 | hash = hpt_hash(vpn, mmu_psize_defs[psize].shift, ssize); |
345 | want_v = hpte_encode_avpn(vpn, psize, ssize); | 351 | want_v = hpte_encode_avpn(vpn, psize, ssize); |
346 | 352 | ||
347 | /* Bolted entries are always in the primary group */ | 353 | /* Bolted entries are always in the primary group */ |
348 | slot = (hash & htab_hash_mask) * HPTES_PER_GROUP; | 354 | hpte_group = (hash & htab_hash_mask) * HPTES_PER_GROUP; |
349 | for (i = 0; i < HPTES_PER_GROUP; i++) { | 355 | slot = __pSeries_lpar_hpte_find(want_v, hpte_group); |
350 | hpte_v = pSeries_lpar_hpte_getword0(slot); | 356 | if (slot < 0) |
351 | 357 | return -1; | |
352 | if (HPTE_V_COMPARE(hpte_v, want_v) && (hpte_v & HPTE_V_VALID)) | 358 | return hpte_group + slot; |
353 | /* HPTE matches */ | 359 | } |
354 | return slot; | ||
355 | ++slot; | ||
356 | } | ||
357 | |||
358 | return -1; | ||
359 | } | ||
360 | 360 | ||
361 | static void pSeries_lpar_hpte_updateboltedpp(unsigned long newpp, | 361 | static void pSeries_lpar_hpte_updateboltedpp(unsigned long newpp, |
362 | unsigned long ea, | 362 | unsigned long ea, |
@@ -396,6 +396,7 @@ static void pSeries_lpar_hpte_invalidate(unsigned long slot, unsigned long vpn, | |||
396 | BUG_ON(lpar_rc != H_SUCCESS); | 396 | BUG_ON(lpar_rc != H_SUCCESS); |
397 | } | 397 | } |
398 | 398 | ||
399 | #ifdef CONFIG_TRANSPARENT_HUGEPAGE | ||
399 | /* | 400 | /* |
400 | * Limit iterations holding pSeries_lpar_tlbie_lock to 3. We also need | 401 | * Limit iterations holding pSeries_lpar_tlbie_lock to 3. We also need |
401 | * to make sure that we avoid bouncing the hypervisor tlbie lock. | 402 | * to make sure that we avoid bouncing the hypervisor tlbie lock. |
@@ -494,6 +495,15 @@ static void pSeries_lpar_hugepage_invalidate(unsigned long vsid, | |||
494 | __pSeries_lpar_hugepage_invalidate(slot_array, vpn_array, | 495 | __pSeries_lpar_hugepage_invalidate(slot_array, vpn_array, |
495 | index, psize, ssize); | 496 | index, psize, ssize); |
496 | } | 497 | } |
498 | #else | ||
499 | static void pSeries_lpar_hugepage_invalidate(unsigned long vsid, | ||
500 | unsigned long addr, | ||
501 | unsigned char *hpte_slot_array, | ||
502 | int psize, int ssize, int local) | ||
503 | { | ||
504 | WARN(1, "%s called without THP support\n", __func__); | ||
505 | } | ||
506 | #endif | ||
497 | 507 | ||
498 | static void pSeries_lpar_hpte_removebolted(unsigned long ea, | 508 | static void pSeries_lpar_hpte_removebolted(unsigned long ea, |
499 | int psize, int ssize) | 509 | int psize, int ssize) |
diff --git a/arch/powerpc/platforms/pseries/pseries.h b/arch/powerpc/platforms/pseries/pseries.h index 8411c27293e4..7aa83f00ac62 100644 --- a/arch/powerpc/platforms/pseries/pseries.h +++ b/arch/powerpc/platforms/pseries/pseries.h | |||
@@ -73,6 +73,15 @@ static inline int dlpar_memory(struct pseries_hp_errorlog *hp_elog) | |||
73 | } | 73 | } |
74 | #endif | 74 | #endif |
75 | 75 | ||
76 | #ifdef CONFIG_HOTPLUG_CPU | ||
77 | int dlpar_cpu(struct pseries_hp_errorlog *hp_elog); | ||
78 | #else | ||
79 | static inline int dlpar_cpu(struct pseries_hp_errorlog *hp_elog) | ||
80 | { | ||
81 | return -EOPNOTSUPP; | ||
82 | } | ||
83 | #endif | ||
84 | |||
76 | /* PCI root bridge prepare function override for pseries */ | 85 | /* PCI root bridge prepare function override for pseries */ |
77 | struct pci_host_bridge; | 86 | struct pci_host_bridge; |
78 | int pseries_root_bridge_prepare(struct pci_host_bridge *bridge); | 87 | int pseries_root_bridge_prepare(struct pci_host_bridge *bridge); |
diff --git a/arch/powerpc/platforms/pseries/ras.c b/arch/powerpc/platforms/pseries/ras.c index 3b6647e574b6..9a3e27b863ce 100644 --- a/arch/powerpc/platforms/pseries/ras.c +++ b/arch/powerpc/platforms/pseries/ras.c | |||
@@ -40,6 +40,9 @@ static int ras_check_exception_token; | |||
40 | #define EPOW_SENSOR_TOKEN 9 | 40 | #define EPOW_SENSOR_TOKEN 9 |
41 | #define EPOW_SENSOR_INDEX 0 | 41 | #define EPOW_SENSOR_INDEX 0 |
42 | 42 | ||
43 | /* EPOW events counter variable */ | ||
44 | static int num_epow_events; | ||
45 | |||
43 | static irqreturn_t ras_epow_interrupt(int irq, void *dev_id); | 46 | static irqreturn_t ras_epow_interrupt(int irq, void *dev_id); |
44 | static irqreturn_t ras_error_interrupt(int irq, void *dev_id); | 47 | static irqreturn_t ras_error_interrupt(int irq, void *dev_id); |
45 | 48 | ||
@@ -82,32 +85,30 @@ static void handle_system_shutdown(char event_modifier) | |||
82 | { | 85 | { |
83 | switch (event_modifier) { | 86 | switch (event_modifier) { |
84 | case EPOW_SHUTDOWN_NORMAL: | 87 | case EPOW_SHUTDOWN_NORMAL: |
85 | pr_emerg("Firmware initiated power off"); | 88 | pr_emerg("Power off requested\n"); |
86 | orderly_poweroff(true); | 89 | orderly_poweroff(true); |
87 | break; | 90 | break; |
88 | 91 | ||
89 | case EPOW_SHUTDOWN_ON_UPS: | 92 | case EPOW_SHUTDOWN_ON_UPS: |
90 | pr_emerg("Loss of power reported by firmware, system is " | 93 | pr_emerg("Loss of system power detected. System is running on" |
91 | "running on UPS/battery"); | 94 | " UPS/battery. Check RTAS error log for details\n"); |
92 | pr_emerg("Check RTAS error log for details"); | ||
93 | orderly_poweroff(true); | 95 | orderly_poweroff(true); |
94 | break; | 96 | break; |
95 | 97 | ||
96 | case EPOW_SHUTDOWN_LOSS_OF_CRITICAL_FUNCTIONS: | 98 | case EPOW_SHUTDOWN_LOSS_OF_CRITICAL_FUNCTIONS: |
97 | pr_emerg("Loss of system critical functions reported by " | 99 | pr_emerg("Loss of system critical functions detected. Check" |
98 | "firmware"); | 100 | " RTAS error log for details\n"); |
99 | pr_emerg("Check RTAS error log for details"); | ||
100 | orderly_poweroff(true); | 101 | orderly_poweroff(true); |
101 | break; | 102 | break; |
102 | 103 | ||
103 | case EPOW_SHUTDOWN_AMBIENT_TEMPERATURE_TOO_HIGH: | 104 | case EPOW_SHUTDOWN_AMBIENT_TEMPERATURE_TOO_HIGH: |
104 | pr_emerg("Ambient temperature too high reported by firmware"); | 105 | pr_emerg("High ambient temperature detected. Check RTAS" |
105 | pr_emerg("Check RTAS error log for details"); | 106 | " error log for details\n"); |
106 | orderly_poweroff(true); | 107 | orderly_poweroff(true); |
107 | break; | 108 | break; |
108 | 109 | ||
109 | default: | 110 | default: |
110 | pr_err("Unknown power/cooling shutdown event (modifier %d)", | 111 | pr_err("Unknown power/cooling shutdown event (modifier = %d)\n", |
111 | event_modifier); | 112 | event_modifier); |
112 | } | 113 | } |
113 | } | 114 | } |
@@ -145,17 +146,20 @@ static void rtas_parse_epow_errlog(struct rtas_error_log *log) | |||
145 | 146 | ||
146 | switch (action_code) { | 147 | switch (action_code) { |
147 | case EPOW_RESET: | 148 | case EPOW_RESET: |
148 | pr_err("Non critical power or cooling issue cleared"); | 149 | if (num_epow_events) { |
150 | pr_info("Non critical power/cooling issue cleared\n"); | ||
151 | num_epow_events--; | ||
152 | } | ||
149 | break; | 153 | break; |
150 | 154 | ||
151 | case EPOW_WARN_COOLING: | 155 | case EPOW_WARN_COOLING: |
152 | pr_err("Non critical cooling issue reported by firmware"); | 156 | pr_info("Non-critical cooling issue detected. Check RTAS error" |
153 | pr_err("Check RTAS error log for details"); | 157 | " log for details\n"); |
154 | break; | 158 | break; |
155 | 159 | ||
156 | case EPOW_WARN_POWER: | 160 | case EPOW_WARN_POWER: |
157 | pr_err("Non critical power issue reported by firmware"); | 161 | pr_info("Non-critical power issue detected. Check RTAS error" |
158 | pr_err("Check RTAS error log for details"); | 162 | " log for details\n"); |
159 | break; | 163 | break; |
160 | 164 | ||
161 | case EPOW_SYSTEM_SHUTDOWN: | 165 | case EPOW_SYSTEM_SHUTDOWN: |
@@ -163,23 +167,27 @@ static void rtas_parse_epow_errlog(struct rtas_error_log *log) | |||
163 | break; | 167 | break; |
164 | 168 | ||
165 | case EPOW_SYSTEM_HALT: | 169 | case EPOW_SYSTEM_HALT: |
166 | pr_emerg("Firmware initiated power off"); | 170 | pr_emerg("Critical power/cooling issue detected. Check RTAS" |
171 | " error log for details. Powering off.\n"); | ||
167 | orderly_poweroff(true); | 172 | orderly_poweroff(true); |
168 | break; | 173 | break; |
169 | 174 | ||
170 | case EPOW_MAIN_ENCLOSURE: | 175 | case EPOW_MAIN_ENCLOSURE: |
171 | case EPOW_POWER_OFF: | 176 | case EPOW_POWER_OFF: |
172 | pr_emerg("Critical power/cooling issue reported by firmware"); | 177 | pr_emerg("System about to lose power. Check RTAS error log " |
173 | pr_emerg("Check RTAS error log for details"); | 178 | " for details. Powering off immediately.\n"); |
174 | pr_emerg("Immediate power off"); | ||
175 | emergency_sync(); | 179 | emergency_sync(); |
176 | kernel_power_off(); | 180 | kernel_power_off(); |
177 | break; | 181 | break; |
178 | 182 | ||
179 | default: | 183 | default: |
180 | pr_err("Unknown power/cooling event (action code %d)", | 184 | pr_err("Unknown power/cooling event (action code = %d)\n", |
181 | action_code); | 185 | action_code); |
182 | } | 186 | } |
187 | |||
188 | /* Increment epow events counter variable */ | ||
189 | if (action_code != EPOW_RESET) | ||
190 | num_epow_events++; | ||
183 | } | 191 | } |
184 | 192 | ||
185 | /* Handle environmental and power warning (EPOW) interrupts. */ | 193 | /* Handle environmental and power warning (EPOW) interrupts. */ |
@@ -249,13 +257,12 @@ static irqreturn_t ras_error_interrupt(int irq, void *dev_id) | |||
249 | log_error(ras_log_buf, ERR_TYPE_RTAS_LOG, fatal); | 257 | log_error(ras_log_buf, ERR_TYPE_RTAS_LOG, fatal); |
250 | 258 | ||
251 | if (fatal) { | 259 | if (fatal) { |
252 | pr_emerg("Fatal hardware error reported by firmware"); | 260 | pr_emerg("Fatal hardware error detected. Check RTAS error" |
253 | pr_emerg("Check RTAS error log for details"); | 261 | " log for details. Powering off immediately\n"); |
254 | pr_emerg("Immediate power off"); | ||
255 | emergency_sync(); | 262 | emergency_sync(); |
256 | kernel_power_off(); | 263 | kernel_power_off(); |
257 | } else { | 264 | } else { |
258 | pr_err("Recoverable hardware error reported by firmware"); | 265 | pr_err("Recoverable hardware error detected\n"); |
259 | } | 266 | } |
260 | 267 | ||
261 | spin_unlock(&ras_log_buf_lock); | 268 | spin_unlock(&ras_log_buf_lock); |
diff --git a/arch/powerpc/sysdev/Makefile b/arch/powerpc/sysdev/Makefile index 5b492a6438ff..bd6bd729969c 100644 --- a/arch/powerpc/sysdev/Makefile +++ b/arch/powerpc/sysdev/Makefile | |||
@@ -26,7 +26,6 @@ obj-$(CONFIG_FSL_85XX_CACHE_SRAM) += fsl_85xx_l2ctlr.o fsl_85xx_cache_sram.o | |||
26 | obj-$(CONFIG_SIMPLE_GPIO) += simple_gpio.o | 26 | obj-$(CONFIG_SIMPLE_GPIO) += simple_gpio.o |
27 | obj-$(CONFIG_FSL_RIO) += fsl_rio.o fsl_rmu.o | 27 | obj-$(CONFIG_FSL_RIO) += fsl_rio.o fsl_rmu.o |
28 | obj-$(CONFIG_TSI108_BRIDGE) += tsi108_pci.o tsi108_dev.o | 28 | obj-$(CONFIG_TSI108_BRIDGE) += tsi108_pci.o tsi108_dev.o |
29 | obj-$(CONFIG_QUICC_ENGINE) += qe_lib/ | ||
30 | mv64x60-$(CONFIG_PCI) += mv64x60_pci.o | 29 | mv64x60-$(CONFIG_PCI) += mv64x60_pci.o |
31 | obj-$(CONFIG_MV64X60) += $(mv64x60-y) mv64x60_pic.o mv64x60_dev.o \ | 30 | obj-$(CONFIG_MV64X60) += $(mv64x60-y) mv64x60_pic.o mv64x60_dev.o \ |
32 | mv64x60_udbg.o | 31 | mv64x60_udbg.o |
diff --git a/arch/powerpc/sysdev/axonram.c b/arch/powerpc/sysdev/axonram.c index 7a399b4d60a0..c713b349d967 100644 --- a/arch/powerpc/sysdev/axonram.c +++ b/arch/powerpc/sysdev/axonram.c | |||
@@ -313,6 +313,7 @@ static const struct of_device_id axon_ram_device_id[] = { | |||
313 | }, | 313 | }, |
314 | {} | 314 | {} |
315 | }; | 315 | }; |
316 | MODULE_DEVICE_TABLE(of, axon_ram_device_id); | ||
316 | 317 | ||
317 | static struct platform_driver axon_ram_driver = { | 318 | static struct platform_driver axon_ram_driver = { |
318 | .probe = axon_ram_probe, | 319 | .probe = axon_ram_probe, |
diff --git a/arch/powerpc/sysdev/cpm_common.c b/arch/powerpc/sysdev/cpm_common.c index e00a5ee58fd7..9d32465eddb1 100644 --- a/arch/powerpc/sysdev/cpm_common.c +++ b/arch/powerpc/sysdev/cpm_common.c | |||
@@ -27,8 +27,8 @@ | |||
27 | 27 | ||
28 | #include <asm/udbg.h> | 28 | #include <asm/udbg.h> |
29 | #include <asm/io.h> | 29 | #include <asm/io.h> |
30 | #include <asm/rheap.h> | ||
31 | #include <asm/cpm.h> | 30 | #include <asm/cpm.h> |
31 | #include <soc/fsl/qe/qe.h> | ||
32 | 32 | ||
33 | #include <mm/mmu_decl.h> | 33 | #include <mm/mmu_decl.h> |
34 | 34 | ||
@@ -65,162 +65,6 @@ void __init udbg_init_cpm(void) | |||
65 | } | 65 | } |
66 | #endif | 66 | #endif |
67 | 67 | ||
68 | static spinlock_t cpm_muram_lock; | ||
69 | static rh_block_t cpm_boot_muram_rh_block[16]; | ||
70 | static rh_info_t cpm_muram_info; | ||
71 | static u8 __iomem *muram_vbase; | ||
72 | static phys_addr_t muram_pbase; | ||
73 | |||
74 | /* Max address size we deal with */ | ||
75 | #define OF_MAX_ADDR_CELLS 4 | ||
76 | |||
77 | int cpm_muram_init(void) | ||
78 | { | ||
79 | struct device_node *np; | ||
80 | struct resource r; | ||
81 | u32 zero[OF_MAX_ADDR_CELLS] = {}; | ||
82 | resource_size_t max = 0; | ||
83 | int i = 0; | ||
84 | int ret = 0; | ||
85 | |||
86 | if (muram_pbase) | ||
87 | return 0; | ||
88 | |||
89 | spin_lock_init(&cpm_muram_lock); | ||
90 | /* initialize the info header */ | ||
91 | rh_init(&cpm_muram_info, 1, | ||
92 | sizeof(cpm_boot_muram_rh_block) / | ||
93 | sizeof(cpm_boot_muram_rh_block[0]), | ||
94 | cpm_boot_muram_rh_block); | ||
95 | |||
96 | np = of_find_compatible_node(NULL, NULL, "fsl,cpm-muram-data"); | ||
97 | if (!np) { | ||
98 | /* try legacy bindings */ | ||
99 | np = of_find_node_by_name(NULL, "data-only"); | ||
100 | if (!np) { | ||
101 | printk(KERN_ERR "Cannot find CPM muram data node"); | ||
102 | ret = -ENODEV; | ||
103 | goto out; | ||
104 | } | ||
105 | } | ||
106 | |||
107 | muram_pbase = of_translate_address(np, zero); | ||
108 | if (muram_pbase == (phys_addr_t)OF_BAD_ADDR) { | ||
109 | printk(KERN_ERR "Cannot translate zero through CPM muram node"); | ||
110 | ret = -ENODEV; | ||
111 | goto out; | ||
112 | } | ||
113 | |||
114 | while (of_address_to_resource(np, i++, &r) == 0) { | ||
115 | if (r.end > max) | ||
116 | max = r.end; | ||
117 | |||
118 | rh_attach_region(&cpm_muram_info, r.start - muram_pbase, | ||
119 | resource_size(&r)); | ||
120 | } | ||
121 | |||
122 | muram_vbase = ioremap(muram_pbase, max - muram_pbase + 1); | ||
123 | if (!muram_vbase) { | ||
124 | printk(KERN_ERR "Cannot map CPM muram"); | ||
125 | ret = -ENOMEM; | ||
126 | } | ||
127 | |||
128 | out: | ||
129 | of_node_put(np); | ||
130 | return ret; | ||
131 | } | ||
132 | |||
133 | /** | ||
134 | * cpm_muram_alloc - allocate the requested size worth of multi-user ram | ||
135 | * @size: number of bytes to allocate | ||
136 | * @align: requested alignment, in bytes | ||
137 | * | ||
138 | * This function returns an offset into the muram area. | ||
139 | * Use cpm_dpram_addr() to get the virtual address of the area. | ||
140 | * Use cpm_muram_free() to free the allocation. | ||
141 | */ | ||
142 | unsigned long cpm_muram_alloc(unsigned long size, unsigned long align) | ||
143 | { | ||
144 | unsigned long start; | ||
145 | unsigned long flags; | ||
146 | |||
147 | spin_lock_irqsave(&cpm_muram_lock, flags); | ||
148 | cpm_muram_info.alignment = align; | ||
149 | start = rh_alloc(&cpm_muram_info, size, "commproc"); | ||
150 | if (!IS_ERR_VALUE(start)) | ||
151 | memset_io(cpm_muram_addr(start), 0, size); | ||
152 | spin_unlock_irqrestore(&cpm_muram_lock, flags); | ||
153 | |||
154 | return start; | ||
155 | } | ||
156 | EXPORT_SYMBOL(cpm_muram_alloc); | ||
157 | |||
158 | /** | ||
159 | * cpm_muram_free - free a chunk of multi-user ram | ||
160 | * @offset: The beginning of the chunk as returned by cpm_muram_alloc(). | ||
161 | */ | ||
162 | int cpm_muram_free(unsigned long offset) | ||
163 | { | ||
164 | int ret; | ||
165 | unsigned long flags; | ||
166 | |||
167 | spin_lock_irqsave(&cpm_muram_lock, flags); | ||
168 | ret = rh_free(&cpm_muram_info, offset); | ||
169 | spin_unlock_irqrestore(&cpm_muram_lock, flags); | ||
170 | |||
171 | return ret; | ||
172 | } | ||
173 | EXPORT_SYMBOL(cpm_muram_free); | ||
174 | |||
175 | /** | ||
176 | * cpm_muram_alloc_fixed - reserve a specific region of multi-user ram | ||
177 | * @offset: the offset into the muram area to reserve | ||
178 | * @size: the number of bytes to reserve | ||
179 | * | ||
180 | * This function returns "start" on success, -ENOMEM on failure. | ||
181 | * Use cpm_dpram_addr() to get the virtual address of the area. | ||
182 | * Use cpm_muram_free() to free the allocation. | ||
183 | */ | ||
184 | unsigned long cpm_muram_alloc_fixed(unsigned long offset, unsigned long size) | ||
185 | { | ||
186 | unsigned long start; | ||
187 | unsigned long flags; | ||
188 | |||
189 | spin_lock_irqsave(&cpm_muram_lock, flags); | ||
190 | cpm_muram_info.alignment = 1; | ||
191 | start = rh_alloc_fixed(&cpm_muram_info, offset, size, "commproc"); | ||
192 | spin_unlock_irqrestore(&cpm_muram_lock, flags); | ||
193 | |||
194 | return start; | ||
195 | } | ||
196 | EXPORT_SYMBOL(cpm_muram_alloc_fixed); | ||
197 | |||
198 | /** | ||
199 | * cpm_muram_addr - turn a muram offset into a virtual address | ||
200 | * @offset: muram offset to convert | ||
201 | */ | ||
202 | void __iomem *cpm_muram_addr(unsigned long offset) | ||
203 | { | ||
204 | return muram_vbase + offset; | ||
205 | } | ||
206 | EXPORT_SYMBOL(cpm_muram_addr); | ||
207 | |||
208 | unsigned long cpm_muram_offset(void __iomem *addr) | ||
209 | { | ||
210 | return addr - (void __iomem *)muram_vbase; | ||
211 | } | ||
212 | EXPORT_SYMBOL(cpm_muram_offset); | ||
213 | |||
214 | /** | ||
215 | * cpm_muram_dma - turn a muram virtual address into a DMA address | ||
216 | * @offset: virtual address from cpm_muram_addr() to convert | ||
217 | */ | ||
218 | dma_addr_t cpm_muram_dma(void __iomem *addr) | ||
219 | { | ||
220 | return muram_pbase + ((u8 __iomem *)addr - muram_vbase); | ||
221 | } | ||
222 | EXPORT_SYMBOL(cpm_muram_dma); | ||
223 | |||
224 | #if defined(CONFIG_CPM2) || defined(CONFIG_8xx_GPIO) | 68 | #if defined(CONFIG_CPM2) || defined(CONFIG_8xx_GPIO) |
225 | 69 | ||
226 | struct cpm2_ioports { | 70 | struct cpm2_ioports { |
diff --git a/arch/powerpc/sysdev/fsl_lbc.c b/arch/powerpc/sysdev/fsl_lbc.c index 38138cf8d33e..47f781059eeb 100644 --- a/arch/powerpc/sysdev/fsl_lbc.c +++ b/arch/powerpc/sysdev/fsl_lbc.c | |||
@@ -243,8 +243,6 @@ static irqreturn_t fsl_lbc_ctrl_irq(int irqno, void *data) | |||
243 | if (status & LTESR_CS) | 243 | if (status & LTESR_CS) |
244 | dev_err(ctrl->dev, "Chip select error: " | 244 | dev_err(ctrl->dev, "Chip select error: " |
245 | "LTESR 0x%08X\n", status); | 245 | "LTESR 0x%08X\n", status); |
246 | if (status & LTESR_UPM) | ||
247 | ; | ||
248 | if (status & LTESR_FCT) { | 246 | if (status & LTESR_FCT) { |
249 | dev_err(ctrl->dev, "FCM command time-out: " | 247 | dev_err(ctrl->dev, "FCM command time-out: " |
250 | "LTESR 0x%08X\n", status); | 248 | "LTESR 0x%08X\n", status); |
diff --git a/arch/powerpc/sysdev/fsl_pci.c b/arch/powerpc/sysdev/fsl_pci.c index a1ac80b3041a..c69e88e91459 100644 --- a/arch/powerpc/sysdev/fsl_pci.c +++ b/arch/powerpc/sysdev/fsl_pci.c | |||
@@ -218,6 +218,19 @@ static void setup_pci_atmu(struct pci_controller *hose) | |||
218 | */ | 218 | */ |
219 | setup_inbound = !is_kdump(); | 219 | setup_inbound = !is_kdump(); |
220 | 220 | ||
221 | if (of_device_is_compatible(hose->dn, "fsl,bsc9132-pcie")) { | ||
222 | /* | ||
223 | * BSC9132 Rev1.0 has an issue where all the PEX inbound | ||
224 | * windows have implemented the default target value as 0xf | ||
225 | * for CCSR space.In all Freescale legacy devices the target | ||
226 | * of 0xf is reserved for local memory space. 9132 Rev1.0 | ||
227 | * now has local mempry space mapped to target 0x0 instead of | ||
228 | * 0xf. Hence adding a workaround to remove the target 0xf | ||
229 | * defined for memory space from Inbound window attributes. | ||
230 | */ | ||
231 | piwar &= ~PIWAR_TGI_LOCAL; | ||
232 | } | ||
233 | |||
221 | if (early_find_capability(hose, 0, 0, PCI_CAP_ID_EXP)) { | 234 | if (early_find_capability(hose, 0, 0, PCI_CAP_ID_EXP)) { |
222 | if (in_be32(&pci->block_rev1) >= PCIE_IP_REV_2_2) { | 235 | if (in_be32(&pci->block_rev1) >= PCIE_IP_REV_2_2) { |
223 | win_idx = 2; | 236 | win_idx = 2; |
diff --git a/arch/powerpc/sysdev/qe_lib/Kconfig b/arch/powerpc/sysdev/qe_lib/Kconfig deleted file mode 100644 index 3c251993bacd..000000000000 --- a/arch/powerpc/sysdev/qe_lib/Kconfig +++ /dev/null | |||
@@ -1,27 +0,0 @@ | |||
1 | # | ||
2 | # QE Communication options | ||
3 | # | ||
4 | |||
5 | config UCC_SLOW | ||
6 | bool | ||
7 | default y if SERIAL_QE | ||
8 | help | ||
9 | This option provides qe_lib support to UCC slow | ||
10 | protocols: UART, BISYNC, QMC | ||
11 | |||
12 | config UCC_FAST | ||
13 | bool | ||
14 | default y if UCC_GETH | ||
15 | help | ||
16 | This option provides qe_lib support to UCC fast | ||
17 | protocols: HDLC, Ethernet, ATM, transparent | ||
18 | |||
19 | config UCC | ||
20 | bool | ||
21 | default y if UCC_FAST || UCC_SLOW | ||
22 | |||
23 | config QE_USB | ||
24 | bool | ||
25 | default y if USB_FSL_QE | ||
26 | help | ||
27 | QE USB Controller support | ||
diff --git a/arch/powerpc/sysdev/qe_lib/Makefile b/arch/powerpc/sysdev/qe_lib/Makefile deleted file mode 100644 index f1855c185291..000000000000 --- a/arch/powerpc/sysdev/qe_lib/Makefile +++ /dev/null | |||
@@ -1,10 +0,0 @@ | |||
1 | # | ||
2 | # Makefile for the linux ppc-specific parts of QE | ||
3 | # | ||
4 | obj-$(CONFIG_QUICC_ENGINE)+= qe.o qe_ic.o qe_io.o | ||
5 | |||
6 | obj-$(CONFIG_UCC) += ucc.o | ||
7 | obj-$(CONFIG_UCC_SLOW) += ucc_slow.o | ||
8 | obj-$(CONFIG_UCC_FAST) += ucc_fast.o | ||
9 | obj-$(CONFIG_QE_USB) += usb.o | ||
10 | obj-$(CONFIG_QE_GPIO) += gpio.o | ||
diff --git a/arch/powerpc/sysdev/qe_lib/gpio.c b/arch/powerpc/sysdev/qe_lib/gpio.c deleted file mode 100644 index 521e67a49dc4..000000000000 --- a/arch/powerpc/sysdev/qe_lib/gpio.c +++ /dev/null | |||
@@ -1,317 +0,0 @@ | |||
1 | /* | ||
2 | * QUICC Engine GPIOs | ||
3 | * | ||
4 | * Copyright (c) MontaVista Software, Inc. 2008. | ||
5 | * | ||
6 | * Author: Anton Vorontsov <avorontsov@ru.mvista.com> | ||
7 | * | ||
8 | * This program is free software; you can redistribute it and/or modify it | ||
9 | * under the terms of the GNU General Public License as published by the | ||
10 | * Free Software Foundation; either version 2 of the License, or (at your | ||
11 | * option) any later version. | ||
12 | */ | ||
13 | |||
14 | #include <linux/kernel.h> | ||
15 | #include <linux/init.h> | ||
16 | #include <linux/spinlock.h> | ||
17 | #include <linux/err.h> | ||
18 | #include <linux/io.h> | ||
19 | #include <linux/of.h> | ||
20 | #include <linux/of_gpio.h> | ||
21 | #include <linux/gpio.h> | ||
22 | #include <linux/slab.h> | ||
23 | #include <linux/export.h> | ||
24 | #include <asm/qe.h> | ||
25 | |||
26 | struct qe_gpio_chip { | ||
27 | struct of_mm_gpio_chip mm_gc; | ||
28 | spinlock_t lock; | ||
29 | |||
30 | unsigned long pin_flags[QE_PIO_PINS]; | ||
31 | #define QE_PIN_REQUESTED 0 | ||
32 | |||
33 | /* shadowed data register to clear/set bits safely */ | ||
34 | u32 cpdata; | ||
35 | |||
36 | /* saved_regs used to restore dedicated functions */ | ||
37 | struct qe_pio_regs saved_regs; | ||
38 | }; | ||
39 | |||
40 | static inline struct qe_gpio_chip * | ||
41 | to_qe_gpio_chip(struct of_mm_gpio_chip *mm_gc) | ||
42 | { | ||
43 | return container_of(mm_gc, struct qe_gpio_chip, mm_gc); | ||
44 | } | ||
45 | |||
46 | static void qe_gpio_save_regs(struct of_mm_gpio_chip *mm_gc) | ||
47 | { | ||
48 | struct qe_gpio_chip *qe_gc = to_qe_gpio_chip(mm_gc); | ||
49 | struct qe_pio_regs __iomem *regs = mm_gc->regs; | ||
50 | |||
51 | qe_gc->cpdata = in_be32(®s->cpdata); | ||
52 | qe_gc->saved_regs.cpdata = qe_gc->cpdata; | ||
53 | qe_gc->saved_regs.cpdir1 = in_be32(®s->cpdir1); | ||
54 | qe_gc->saved_regs.cpdir2 = in_be32(®s->cpdir2); | ||
55 | qe_gc->saved_regs.cppar1 = in_be32(®s->cppar1); | ||
56 | qe_gc->saved_regs.cppar2 = in_be32(®s->cppar2); | ||
57 | qe_gc->saved_regs.cpodr = in_be32(®s->cpodr); | ||
58 | } | ||
59 | |||
60 | static int qe_gpio_get(struct gpio_chip *gc, unsigned int gpio) | ||
61 | { | ||
62 | struct of_mm_gpio_chip *mm_gc = to_of_mm_gpio_chip(gc); | ||
63 | struct qe_pio_regs __iomem *regs = mm_gc->regs; | ||
64 | u32 pin_mask = 1 << (QE_PIO_PINS - 1 - gpio); | ||
65 | |||
66 | return in_be32(®s->cpdata) & pin_mask; | ||
67 | } | ||
68 | |||
69 | static void qe_gpio_set(struct gpio_chip *gc, unsigned int gpio, int val) | ||
70 | { | ||
71 | struct of_mm_gpio_chip *mm_gc = to_of_mm_gpio_chip(gc); | ||
72 | struct qe_gpio_chip *qe_gc = to_qe_gpio_chip(mm_gc); | ||
73 | struct qe_pio_regs __iomem *regs = mm_gc->regs; | ||
74 | unsigned long flags; | ||
75 | u32 pin_mask = 1 << (QE_PIO_PINS - 1 - gpio); | ||
76 | |||
77 | spin_lock_irqsave(&qe_gc->lock, flags); | ||
78 | |||
79 | if (val) | ||
80 | qe_gc->cpdata |= pin_mask; | ||
81 | else | ||
82 | qe_gc->cpdata &= ~pin_mask; | ||
83 | |||
84 | out_be32(®s->cpdata, qe_gc->cpdata); | ||
85 | |||
86 | spin_unlock_irqrestore(&qe_gc->lock, flags); | ||
87 | } | ||
88 | |||
89 | static int qe_gpio_dir_in(struct gpio_chip *gc, unsigned int gpio) | ||
90 | { | ||
91 | struct of_mm_gpio_chip *mm_gc = to_of_mm_gpio_chip(gc); | ||
92 | struct qe_gpio_chip *qe_gc = to_qe_gpio_chip(mm_gc); | ||
93 | unsigned long flags; | ||
94 | |||
95 | spin_lock_irqsave(&qe_gc->lock, flags); | ||
96 | |||
97 | __par_io_config_pin(mm_gc->regs, gpio, QE_PIO_DIR_IN, 0, 0, 0); | ||
98 | |||
99 | spin_unlock_irqrestore(&qe_gc->lock, flags); | ||
100 | |||
101 | return 0; | ||
102 | } | ||
103 | |||
104 | static int qe_gpio_dir_out(struct gpio_chip *gc, unsigned int gpio, int val) | ||
105 | { | ||
106 | struct of_mm_gpio_chip *mm_gc = to_of_mm_gpio_chip(gc); | ||
107 | struct qe_gpio_chip *qe_gc = to_qe_gpio_chip(mm_gc); | ||
108 | unsigned long flags; | ||
109 | |||
110 | qe_gpio_set(gc, gpio, val); | ||
111 | |||
112 | spin_lock_irqsave(&qe_gc->lock, flags); | ||
113 | |||
114 | __par_io_config_pin(mm_gc->regs, gpio, QE_PIO_DIR_OUT, 0, 0, 0); | ||
115 | |||
116 | spin_unlock_irqrestore(&qe_gc->lock, flags); | ||
117 | |||
118 | return 0; | ||
119 | } | ||
120 | |||
121 | struct qe_pin { | ||
122 | /* | ||
123 | * The qe_gpio_chip name is unfortunate, we should change that to | ||
124 | * something like qe_pio_controller. Someday. | ||
125 | */ | ||
126 | struct qe_gpio_chip *controller; | ||
127 | int num; | ||
128 | }; | ||
129 | |||
130 | /** | ||
131 | * qe_pin_request - Request a QE pin | ||
132 | * @np: device node to get a pin from | ||
133 | * @index: index of a pin in the device tree | ||
134 | * Context: non-atomic | ||
135 | * | ||
136 | * This function return qe_pin so that you could use it with the rest of | ||
137 | * the QE Pin Multiplexing API. | ||
138 | */ | ||
139 | struct qe_pin *qe_pin_request(struct device_node *np, int index) | ||
140 | { | ||
141 | struct qe_pin *qe_pin; | ||
142 | struct gpio_chip *gc; | ||
143 | struct of_mm_gpio_chip *mm_gc; | ||
144 | struct qe_gpio_chip *qe_gc; | ||
145 | int err; | ||
146 | unsigned long flags; | ||
147 | |||
148 | qe_pin = kzalloc(sizeof(*qe_pin), GFP_KERNEL); | ||
149 | if (!qe_pin) { | ||
150 | pr_debug("%s: can't allocate memory\n", __func__); | ||
151 | return ERR_PTR(-ENOMEM); | ||
152 | } | ||
153 | |||
154 | err = of_get_gpio(np, index); | ||
155 | if (err < 0) | ||
156 | goto err0; | ||
157 | gc = gpio_to_chip(err); | ||
158 | if (WARN_ON(!gc)) | ||
159 | goto err0; | ||
160 | |||
161 | if (!of_device_is_compatible(gc->of_node, "fsl,mpc8323-qe-pario-bank")) { | ||
162 | pr_debug("%s: tried to get a non-qe pin\n", __func__); | ||
163 | err = -EINVAL; | ||
164 | goto err0; | ||
165 | } | ||
166 | |||
167 | mm_gc = to_of_mm_gpio_chip(gc); | ||
168 | qe_gc = to_qe_gpio_chip(mm_gc); | ||
169 | |||
170 | spin_lock_irqsave(&qe_gc->lock, flags); | ||
171 | |||
172 | err -= gc->base; | ||
173 | if (test_and_set_bit(QE_PIN_REQUESTED, &qe_gc->pin_flags[err]) == 0) { | ||
174 | qe_pin->controller = qe_gc; | ||
175 | qe_pin->num = err; | ||
176 | err = 0; | ||
177 | } else { | ||
178 | err = -EBUSY; | ||
179 | } | ||
180 | |||
181 | spin_unlock_irqrestore(&qe_gc->lock, flags); | ||
182 | |||
183 | if (!err) | ||
184 | return qe_pin; | ||
185 | err0: | ||
186 | kfree(qe_pin); | ||
187 | pr_debug("%s failed with status %d\n", __func__, err); | ||
188 | return ERR_PTR(err); | ||
189 | } | ||
190 | EXPORT_SYMBOL(qe_pin_request); | ||
191 | |||
192 | /** | ||
193 | * qe_pin_free - Free a pin | ||
194 | * @qe_pin: pointer to the qe_pin structure | ||
195 | * Context: any | ||
196 | * | ||
197 | * This function frees the qe_pin structure and makes a pin available | ||
198 | * for further qe_pin_request() calls. | ||
199 | */ | ||
200 | void qe_pin_free(struct qe_pin *qe_pin) | ||
201 | { | ||
202 | struct qe_gpio_chip *qe_gc = qe_pin->controller; | ||
203 | unsigned long flags; | ||
204 | const int pin = qe_pin->num; | ||
205 | |||
206 | spin_lock_irqsave(&qe_gc->lock, flags); | ||
207 | test_and_clear_bit(QE_PIN_REQUESTED, &qe_gc->pin_flags[pin]); | ||
208 | spin_unlock_irqrestore(&qe_gc->lock, flags); | ||
209 | |||
210 | kfree(qe_pin); | ||
211 | } | ||
212 | EXPORT_SYMBOL(qe_pin_free); | ||
213 | |||
214 | /** | ||
215 | * qe_pin_set_dedicated - Revert a pin to a dedicated peripheral function mode | ||
216 | * @qe_pin: pointer to the qe_pin structure | ||
217 | * Context: any | ||
218 | * | ||
219 | * This function resets a pin to a dedicated peripheral function that | ||
220 | * has been set up by the firmware. | ||
221 | */ | ||
222 | void qe_pin_set_dedicated(struct qe_pin *qe_pin) | ||
223 | { | ||
224 | struct qe_gpio_chip *qe_gc = qe_pin->controller; | ||
225 | struct qe_pio_regs __iomem *regs = qe_gc->mm_gc.regs; | ||
226 | struct qe_pio_regs *sregs = &qe_gc->saved_regs; | ||
227 | int pin = qe_pin->num; | ||
228 | u32 mask1 = 1 << (QE_PIO_PINS - (pin + 1)); | ||
229 | u32 mask2 = 0x3 << (QE_PIO_PINS - (pin % (QE_PIO_PINS / 2) + 1) * 2); | ||
230 | bool second_reg = pin > (QE_PIO_PINS / 2) - 1; | ||
231 | unsigned long flags; | ||
232 | |||
233 | spin_lock_irqsave(&qe_gc->lock, flags); | ||
234 | |||
235 | if (second_reg) { | ||
236 | clrsetbits_be32(®s->cpdir2, mask2, sregs->cpdir2 & mask2); | ||
237 | clrsetbits_be32(®s->cppar2, mask2, sregs->cppar2 & mask2); | ||
238 | } else { | ||
239 | clrsetbits_be32(®s->cpdir1, mask2, sregs->cpdir1 & mask2); | ||
240 | clrsetbits_be32(®s->cppar1, mask2, sregs->cppar1 & mask2); | ||
241 | } | ||
242 | |||
243 | if (sregs->cpdata & mask1) | ||
244 | qe_gc->cpdata |= mask1; | ||
245 | else | ||
246 | qe_gc->cpdata &= ~mask1; | ||
247 | |||
248 | out_be32(®s->cpdata, qe_gc->cpdata); | ||
249 | clrsetbits_be32(®s->cpodr, mask1, sregs->cpodr & mask1); | ||
250 | |||
251 | spin_unlock_irqrestore(&qe_gc->lock, flags); | ||
252 | } | ||
253 | EXPORT_SYMBOL(qe_pin_set_dedicated); | ||
254 | |||
255 | /** | ||
256 | * qe_pin_set_gpio - Set a pin to the GPIO mode | ||
257 | * @qe_pin: pointer to the qe_pin structure | ||
258 | * Context: any | ||
259 | * | ||
260 | * This function sets a pin to the GPIO mode. | ||
261 | */ | ||
262 | void qe_pin_set_gpio(struct qe_pin *qe_pin) | ||
263 | { | ||
264 | struct qe_gpio_chip *qe_gc = qe_pin->controller; | ||
265 | struct qe_pio_regs __iomem *regs = qe_gc->mm_gc.regs; | ||
266 | unsigned long flags; | ||
267 | |||
268 | spin_lock_irqsave(&qe_gc->lock, flags); | ||
269 | |||
270 | /* Let's make it input by default, GPIO API is able to change that. */ | ||
271 | __par_io_config_pin(regs, qe_pin->num, QE_PIO_DIR_IN, 0, 0, 0); | ||
272 | |||
273 | spin_unlock_irqrestore(&qe_gc->lock, flags); | ||
274 | } | ||
275 | EXPORT_SYMBOL(qe_pin_set_gpio); | ||
276 | |||
277 | static int __init qe_add_gpiochips(void) | ||
278 | { | ||
279 | struct device_node *np; | ||
280 | |||
281 | for_each_compatible_node(np, NULL, "fsl,mpc8323-qe-pario-bank") { | ||
282 | int ret; | ||
283 | struct qe_gpio_chip *qe_gc; | ||
284 | struct of_mm_gpio_chip *mm_gc; | ||
285 | struct gpio_chip *gc; | ||
286 | |||
287 | qe_gc = kzalloc(sizeof(*qe_gc), GFP_KERNEL); | ||
288 | if (!qe_gc) { | ||
289 | ret = -ENOMEM; | ||
290 | goto err; | ||
291 | } | ||
292 | |||
293 | spin_lock_init(&qe_gc->lock); | ||
294 | |||
295 | mm_gc = &qe_gc->mm_gc; | ||
296 | gc = &mm_gc->gc; | ||
297 | |||
298 | mm_gc->save_regs = qe_gpio_save_regs; | ||
299 | gc->ngpio = QE_PIO_PINS; | ||
300 | gc->direction_input = qe_gpio_dir_in; | ||
301 | gc->direction_output = qe_gpio_dir_out; | ||
302 | gc->get = qe_gpio_get; | ||
303 | gc->set = qe_gpio_set; | ||
304 | |||
305 | ret = of_mm_gpiochip_add(np, mm_gc); | ||
306 | if (ret) | ||
307 | goto err; | ||
308 | continue; | ||
309 | err: | ||
310 | pr_err("%s: registration failed with status %d\n", | ||
311 | np->full_name, ret); | ||
312 | kfree(qe_gc); | ||
313 | /* try others anyway */ | ||
314 | } | ||
315 | return 0; | ||
316 | } | ||
317 | arch_initcall(qe_add_gpiochips); | ||
diff --git a/arch/powerpc/sysdev/qe_lib/qe.c b/arch/powerpc/sysdev/qe_lib/qe.c deleted file mode 100644 index c2518cdb7ddb..000000000000 --- a/arch/powerpc/sysdev/qe_lib/qe.c +++ /dev/null | |||
@@ -1,706 +0,0 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2006-2010 Freescale Semiconductor, Inc. All rights reserved. | ||
3 | * | ||
4 | * Authors: Shlomi Gridish <gridish@freescale.com> | ||
5 | * Li Yang <leoli@freescale.com> | ||
6 | * Based on cpm2_common.c from Dan Malek (dmalek@jlc.net) | ||
7 | * | ||
8 | * Description: | ||
9 | * General Purpose functions for the global management of the | ||
10 | * QUICC Engine (QE). | ||
11 | * | ||
12 | * This program is free software; you can redistribute it and/or modify it | ||
13 | * under the terms of the GNU General Public License as published by the | ||
14 | * Free Software Foundation; either version 2 of the License, or (at your | ||
15 | * option) any later version. | ||
16 | */ | ||
17 | #include <linux/errno.h> | ||
18 | #include <linux/sched.h> | ||
19 | #include <linux/kernel.h> | ||
20 | #include <linux/param.h> | ||
21 | #include <linux/string.h> | ||
22 | #include <linux/spinlock.h> | ||
23 | #include <linux/mm.h> | ||
24 | #include <linux/interrupt.h> | ||
25 | #include <linux/module.h> | ||
26 | #include <linux/delay.h> | ||
27 | #include <linux/ioport.h> | ||
28 | #include <linux/crc32.h> | ||
29 | #include <linux/mod_devicetable.h> | ||
30 | #include <linux/of_platform.h> | ||
31 | #include <asm/irq.h> | ||
32 | #include <asm/page.h> | ||
33 | #include <asm/pgtable.h> | ||
34 | #include <asm/immap_qe.h> | ||
35 | #include <asm/qe.h> | ||
36 | #include <asm/prom.h> | ||
37 | #include <asm/rheap.h> | ||
38 | |||
39 | static void qe_snums_init(void); | ||
40 | static int qe_sdma_init(void); | ||
41 | |||
42 | static DEFINE_SPINLOCK(qe_lock); | ||
43 | DEFINE_SPINLOCK(cmxgcr_lock); | ||
44 | EXPORT_SYMBOL(cmxgcr_lock); | ||
45 | |||
46 | /* QE snum state */ | ||
47 | enum qe_snum_state { | ||
48 | QE_SNUM_STATE_USED, | ||
49 | QE_SNUM_STATE_FREE | ||
50 | }; | ||
51 | |||
52 | /* QE snum */ | ||
53 | struct qe_snum { | ||
54 | u8 num; | ||
55 | enum qe_snum_state state; | ||
56 | }; | ||
57 | |||
58 | /* We allocate this here because it is used almost exclusively for | ||
59 | * the communication processor devices. | ||
60 | */ | ||
61 | struct qe_immap __iomem *qe_immr; | ||
62 | EXPORT_SYMBOL(qe_immr); | ||
63 | |||
64 | static struct qe_snum snums[QE_NUM_OF_SNUM]; /* Dynamically allocated SNUMs */ | ||
65 | static unsigned int qe_num_of_snum; | ||
66 | |||
67 | static phys_addr_t qebase = -1; | ||
68 | |||
69 | phys_addr_t get_qe_base(void) | ||
70 | { | ||
71 | struct device_node *qe; | ||
72 | int size; | ||
73 | const u32 *prop; | ||
74 | |||
75 | if (qebase != -1) | ||
76 | return qebase; | ||
77 | |||
78 | qe = of_find_compatible_node(NULL, NULL, "fsl,qe"); | ||
79 | if (!qe) { | ||
80 | qe = of_find_node_by_type(NULL, "qe"); | ||
81 | if (!qe) | ||
82 | return qebase; | ||
83 | } | ||
84 | |||
85 | prop = of_get_property(qe, "reg", &size); | ||
86 | if (prop && size >= sizeof(*prop)) | ||
87 | qebase = of_translate_address(qe, prop); | ||
88 | of_node_put(qe); | ||
89 | |||
90 | return qebase; | ||
91 | } | ||
92 | |||
93 | EXPORT_SYMBOL(get_qe_base); | ||
94 | |||
95 | void qe_reset(void) | ||
96 | { | ||
97 | if (qe_immr == NULL) | ||
98 | qe_immr = ioremap(get_qe_base(), QE_IMMAP_SIZE); | ||
99 | |||
100 | qe_snums_init(); | ||
101 | |||
102 | qe_issue_cmd(QE_RESET, QE_CR_SUBBLOCK_INVALID, | ||
103 | QE_CR_PROTOCOL_UNSPECIFIED, 0); | ||
104 | |||
105 | /* Reclaim the MURAM memory for our use. */ | ||
106 | qe_muram_init(); | ||
107 | |||
108 | if (qe_sdma_init()) | ||
109 | panic("sdma init failed!"); | ||
110 | } | ||
111 | |||
112 | int qe_issue_cmd(u32 cmd, u32 device, u8 mcn_protocol, u32 cmd_input) | ||
113 | { | ||
114 | unsigned long flags; | ||
115 | u8 mcn_shift = 0, dev_shift = 0; | ||
116 | u32 ret; | ||
117 | |||
118 | spin_lock_irqsave(&qe_lock, flags); | ||
119 | if (cmd == QE_RESET) { | ||
120 | out_be32(&qe_immr->cp.cecr, (u32) (cmd | QE_CR_FLG)); | ||
121 | } else { | ||
122 | if (cmd == QE_ASSIGN_PAGE) { | ||
123 | /* Here device is the SNUM, not sub-block */ | ||
124 | dev_shift = QE_CR_SNUM_SHIFT; | ||
125 | } else if (cmd == QE_ASSIGN_RISC) { | ||
126 | /* Here device is the SNUM, and mcnProtocol is | ||
127 | * e_QeCmdRiscAssignment value */ | ||
128 | dev_shift = QE_CR_SNUM_SHIFT; | ||
129 | mcn_shift = QE_CR_MCN_RISC_ASSIGN_SHIFT; | ||
130 | } else { | ||
131 | if (device == QE_CR_SUBBLOCK_USB) | ||
132 | mcn_shift = QE_CR_MCN_USB_SHIFT; | ||
133 | else | ||
134 | mcn_shift = QE_CR_MCN_NORMAL_SHIFT; | ||
135 | } | ||
136 | |||
137 | out_be32(&qe_immr->cp.cecdr, cmd_input); | ||
138 | out_be32(&qe_immr->cp.cecr, | ||
139 | (cmd | QE_CR_FLG | ((u32) device << dev_shift) | (u32) | ||
140 | mcn_protocol << mcn_shift)); | ||
141 | } | ||
142 | |||
143 | /* wait for the QE_CR_FLG to clear */ | ||
144 | ret = spin_event_timeout((in_be32(&qe_immr->cp.cecr) & QE_CR_FLG) == 0, | ||
145 | 100, 0); | ||
146 | /* On timeout (e.g. failure), the expression will be false (ret == 0), | ||
147 | otherwise it will be true (ret == 1). */ | ||
148 | spin_unlock_irqrestore(&qe_lock, flags); | ||
149 | |||
150 | return ret == 1; | ||
151 | } | ||
152 | EXPORT_SYMBOL(qe_issue_cmd); | ||
153 | |||
154 | /* Set a baud rate generator. This needs lots of work. There are | ||
155 | * 16 BRGs, which can be connected to the QE channels or output | ||
156 | * as clocks. The BRGs are in two different block of internal | ||
157 | * memory mapped space. | ||
158 | * The BRG clock is the QE clock divided by 2. | ||
159 | * It was set up long ago during the initial boot phase and is | ||
160 | * is given to us. | ||
161 | * Baud rate clocks are zero-based in the driver code (as that maps | ||
162 | * to port numbers). Documentation uses 1-based numbering. | ||
163 | */ | ||
164 | static unsigned int brg_clk = 0; | ||
165 | |||
166 | unsigned int qe_get_brg_clk(void) | ||
167 | { | ||
168 | struct device_node *qe; | ||
169 | int size; | ||
170 | const u32 *prop; | ||
171 | |||
172 | if (brg_clk) | ||
173 | return brg_clk; | ||
174 | |||
175 | qe = of_find_compatible_node(NULL, NULL, "fsl,qe"); | ||
176 | if (!qe) { | ||
177 | qe = of_find_node_by_type(NULL, "qe"); | ||
178 | if (!qe) | ||
179 | return brg_clk; | ||
180 | } | ||
181 | |||
182 | prop = of_get_property(qe, "brg-frequency", &size); | ||
183 | if (prop && size == sizeof(*prop)) | ||
184 | brg_clk = *prop; | ||
185 | |||
186 | of_node_put(qe); | ||
187 | |||
188 | return brg_clk; | ||
189 | } | ||
190 | EXPORT_SYMBOL(qe_get_brg_clk); | ||
191 | |||
192 | /* Program the BRG to the given sampling rate and multiplier | ||
193 | * | ||
194 | * @brg: the BRG, QE_BRG1 - QE_BRG16 | ||
195 | * @rate: the desired sampling rate | ||
196 | * @multiplier: corresponds to the value programmed in GUMR_L[RDCR] or | ||
197 | * GUMR_L[TDCR]. E.g., if this BRG is the RX clock, and GUMR_L[RDCR]=01, | ||
198 | * then 'multiplier' should be 8. | ||
199 | */ | ||
200 | int qe_setbrg(enum qe_clock brg, unsigned int rate, unsigned int multiplier) | ||
201 | { | ||
202 | u32 divisor, tempval; | ||
203 | u32 div16 = 0; | ||
204 | |||
205 | if ((brg < QE_BRG1) || (brg > QE_BRG16)) | ||
206 | return -EINVAL; | ||
207 | |||
208 | divisor = qe_get_brg_clk() / (rate * multiplier); | ||
209 | |||
210 | if (divisor > QE_BRGC_DIVISOR_MAX + 1) { | ||
211 | div16 = QE_BRGC_DIV16; | ||
212 | divisor /= 16; | ||
213 | } | ||
214 | |||
215 | /* Errata QE_General4, which affects some MPC832x and MPC836x SOCs, says | ||
216 | that the BRG divisor must be even if you're not using divide-by-16 | ||
217 | mode. */ | ||
218 | if (!div16 && (divisor & 1) && (divisor > 3)) | ||
219 | divisor++; | ||
220 | |||
221 | tempval = ((divisor - 1) << QE_BRGC_DIVISOR_SHIFT) | | ||
222 | QE_BRGC_ENABLE | div16; | ||
223 | |||
224 | out_be32(&qe_immr->brg.brgc[brg - QE_BRG1], tempval); | ||
225 | |||
226 | return 0; | ||
227 | } | ||
228 | EXPORT_SYMBOL(qe_setbrg); | ||
229 | |||
230 | /* Convert a string to a QE clock source enum | ||
231 | * | ||
232 | * This function takes a string, typically from a property in the device | ||
233 | * tree, and returns the corresponding "enum qe_clock" value. | ||
234 | */ | ||
235 | enum qe_clock qe_clock_source(const char *source) | ||
236 | { | ||
237 | unsigned int i; | ||
238 | |||
239 | if (strcasecmp(source, "none") == 0) | ||
240 | return QE_CLK_NONE; | ||
241 | |||
242 | if (strncasecmp(source, "brg", 3) == 0) { | ||
243 | i = simple_strtoul(source + 3, NULL, 10); | ||
244 | if ((i >= 1) && (i <= 16)) | ||
245 | return (QE_BRG1 - 1) + i; | ||
246 | else | ||
247 | return QE_CLK_DUMMY; | ||
248 | } | ||
249 | |||
250 | if (strncasecmp(source, "clk", 3) == 0) { | ||
251 | i = simple_strtoul(source + 3, NULL, 10); | ||
252 | if ((i >= 1) && (i <= 24)) | ||
253 | return (QE_CLK1 - 1) + i; | ||
254 | else | ||
255 | return QE_CLK_DUMMY; | ||
256 | } | ||
257 | |||
258 | return QE_CLK_DUMMY; | ||
259 | } | ||
260 | EXPORT_SYMBOL(qe_clock_source); | ||
261 | |||
262 | /* Initialize SNUMs (thread serial numbers) according to | ||
263 | * QE Module Control chapter, SNUM table | ||
264 | */ | ||
265 | static void qe_snums_init(void) | ||
266 | { | ||
267 | int i; | ||
268 | static const u8 snum_init_76[] = { | ||
269 | 0x04, 0x05, 0x0C, 0x0D, 0x14, 0x15, 0x1C, 0x1D, | ||
270 | 0x24, 0x25, 0x2C, 0x2D, 0x34, 0x35, 0x88, 0x89, | ||
271 | 0x98, 0x99, 0xA8, 0xA9, 0xB8, 0xB9, 0xC8, 0xC9, | ||
272 | 0xD8, 0xD9, 0xE8, 0xE9, 0x44, 0x45, 0x4C, 0x4D, | ||
273 | 0x54, 0x55, 0x5C, 0x5D, 0x64, 0x65, 0x6C, 0x6D, | ||
274 | 0x74, 0x75, 0x7C, 0x7D, 0x84, 0x85, 0x8C, 0x8D, | ||
275 | 0x94, 0x95, 0x9C, 0x9D, 0xA4, 0xA5, 0xAC, 0xAD, | ||
276 | 0xB4, 0xB5, 0xBC, 0xBD, 0xC4, 0xC5, 0xCC, 0xCD, | ||
277 | 0xD4, 0xD5, 0xDC, 0xDD, 0xE4, 0xE5, 0xEC, 0xED, | ||
278 | 0xF4, 0xF5, 0xFC, 0xFD, | ||
279 | }; | ||
280 | static const u8 snum_init_46[] = { | ||
281 | 0x04, 0x05, 0x0C, 0x0D, 0x14, 0x15, 0x1C, 0x1D, | ||
282 | 0x24, 0x25, 0x2C, 0x2D, 0x34, 0x35, 0x88, 0x89, | ||
283 | 0x98, 0x99, 0xA8, 0xA9, 0xB8, 0xB9, 0xC8, 0xC9, | ||
284 | 0xD8, 0xD9, 0xE8, 0xE9, 0x08, 0x09, 0x18, 0x19, | ||
285 | 0x28, 0x29, 0x38, 0x39, 0x48, 0x49, 0x58, 0x59, | ||
286 | 0x68, 0x69, 0x78, 0x79, 0x80, 0x81, | ||
287 | }; | ||
288 | static const u8 *snum_init; | ||
289 | |||
290 | qe_num_of_snum = qe_get_num_of_snums(); | ||
291 | |||
292 | if (qe_num_of_snum == 76) | ||
293 | snum_init = snum_init_76; | ||
294 | else | ||
295 | snum_init = snum_init_46; | ||
296 | |||
297 | for (i = 0; i < qe_num_of_snum; i++) { | ||
298 | snums[i].num = snum_init[i]; | ||
299 | snums[i].state = QE_SNUM_STATE_FREE; | ||
300 | } | ||
301 | } | ||
302 | |||
303 | int qe_get_snum(void) | ||
304 | { | ||
305 | unsigned long flags; | ||
306 | int snum = -EBUSY; | ||
307 | int i; | ||
308 | |||
309 | spin_lock_irqsave(&qe_lock, flags); | ||
310 | for (i = 0; i < qe_num_of_snum; i++) { | ||
311 | if (snums[i].state == QE_SNUM_STATE_FREE) { | ||
312 | snums[i].state = QE_SNUM_STATE_USED; | ||
313 | snum = snums[i].num; | ||
314 | break; | ||
315 | } | ||
316 | } | ||
317 | spin_unlock_irqrestore(&qe_lock, flags); | ||
318 | |||
319 | return snum; | ||
320 | } | ||
321 | EXPORT_SYMBOL(qe_get_snum); | ||
322 | |||
323 | void qe_put_snum(u8 snum) | ||
324 | { | ||
325 | int i; | ||
326 | |||
327 | for (i = 0; i < qe_num_of_snum; i++) { | ||
328 | if (snums[i].num == snum) { | ||
329 | snums[i].state = QE_SNUM_STATE_FREE; | ||
330 | break; | ||
331 | } | ||
332 | } | ||
333 | } | ||
334 | EXPORT_SYMBOL(qe_put_snum); | ||
335 | |||
336 | static int qe_sdma_init(void) | ||
337 | { | ||
338 | struct sdma __iomem *sdma = &qe_immr->sdma; | ||
339 | static unsigned long sdma_buf_offset = (unsigned long)-ENOMEM; | ||
340 | |||
341 | if (!sdma) | ||
342 | return -ENODEV; | ||
343 | |||
344 | /* allocate 2 internal temporary buffers (512 bytes size each) for | ||
345 | * the SDMA */ | ||
346 | if (IS_ERR_VALUE(sdma_buf_offset)) { | ||
347 | sdma_buf_offset = qe_muram_alloc(512 * 2, 4096); | ||
348 | if (IS_ERR_VALUE(sdma_buf_offset)) | ||
349 | return -ENOMEM; | ||
350 | } | ||
351 | |||
352 | out_be32(&sdma->sdebcr, (u32) sdma_buf_offset & QE_SDEBCR_BA_MASK); | ||
353 | out_be32(&sdma->sdmr, (QE_SDMR_GLB_1_MSK | | ||
354 | (0x1 << QE_SDMR_CEN_SHIFT))); | ||
355 | |||
356 | return 0; | ||
357 | } | ||
358 | |||
359 | /* The maximum number of RISCs we support */ | ||
360 | #define MAX_QE_RISC 4 | ||
361 | |||
362 | /* Firmware information stored here for qe_get_firmware_info() */ | ||
363 | static struct qe_firmware_info qe_firmware_info; | ||
364 | |||
365 | /* | ||
366 | * Set to 1 if QE firmware has been uploaded, and therefore | ||
367 | * qe_firmware_info contains valid data. | ||
368 | */ | ||
369 | static int qe_firmware_uploaded; | ||
370 | |||
371 | /* | ||
372 | * Upload a QE microcode | ||
373 | * | ||
374 | * This function is a worker function for qe_upload_firmware(). It does | ||
375 | * the actual uploading of the microcode. | ||
376 | */ | ||
377 | static void qe_upload_microcode(const void *base, | ||
378 | const struct qe_microcode *ucode) | ||
379 | { | ||
380 | const __be32 *code = base + be32_to_cpu(ucode->code_offset); | ||
381 | unsigned int i; | ||
382 | |||
383 | if (ucode->major || ucode->minor || ucode->revision) | ||
384 | printk(KERN_INFO "qe-firmware: " | ||
385 | "uploading microcode '%s' version %u.%u.%u\n", | ||
386 | ucode->id, ucode->major, ucode->minor, ucode->revision); | ||
387 | else | ||
388 | printk(KERN_INFO "qe-firmware: " | ||
389 | "uploading microcode '%s'\n", ucode->id); | ||
390 | |||
391 | /* Use auto-increment */ | ||
392 | out_be32(&qe_immr->iram.iadd, be32_to_cpu(ucode->iram_offset) | | ||
393 | QE_IRAM_IADD_AIE | QE_IRAM_IADD_BADDR); | ||
394 | |||
395 | for (i = 0; i < be32_to_cpu(ucode->count); i++) | ||
396 | out_be32(&qe_immr->iram.idata, be32_to_cpu(code[i])); | ||
397 | |||
398 | /* Set I-RAM Ready Register */ | ||
399 | out_be32(&qe_immr->iram.iready, be32_to_cpu(QE_IRAM_READY)); | ||
400 | } | ||
401 | |||
402 | /* | ||
403 | * Upload a microcode to the I-RAM at a specific address. | ||
404 | * | ||
405 | * See Documentation/powerpc/qe_firmware.txt for information on QE microcode | ||
406 | * uploading. | ||
407 | * | ||
408 | * Currently, only version 1 is supported, so the 'version' field must be | ||
409 | * set to 1. | ||
410 | * | ||
411 | * The SOC model and revision are not validated, they are only displayed for | ||
412 | * informational purposes. | ||
413 | * | ||
414 | * 'calc_size' is the calculated size, in bytes, of the firmware structure and | ||
415 | * all of the microcode structures, minus the CRC. | ||
416 | * | ||
417 | * 'length' is the size that the structure says it is, including the CRC. | ||
418 | */ | ||
419 | int qe_upload_firmware(const struct qe_firmware *firmware) | ||
420 | { | ||
421 | unsigned int i; | ||
422 | unsigned int j; | ||
423 | u32 crc; | ||
424 | size_t calc_size = sizeof(struct qe_firmware); | ||
425 | size_t length; | ||
426 | const struct qe_header *hdr; | ||
427 | |||
428 | if (!firmware) { | ||
429 | printk(KERN_ERR "qe-firmware: invalid pointer\n"); | ||
430 | return -EINVAL; | ||
431 | } | ||
432 | |||
433 | hdr = &firmware->header; | ||
434 | length = be32_to_cpu(hdr->length); | ||
435 | |||
436 | /* Check the magic */ | ||
437 | if ((hdr->magic[0] != 'Q') || (hdr->magic[1] != 'E') || | ||
438 | (hdr->magic[2] != 'F')) { | ||
439 | printk(KERN_ERR "qe-firmware: not a microcode\n"); | ||
440 | return -EPERM; | ||
441 | } | ||
442 | |||
443 | /* Check the version */ | ||
444 | if (hdr->version != 1) { | ||
445 | printk(KERN_ERR "qe-firmware: unsupported version\n"); | ||
446 | return -EPERM; | ||
447 | } | ||
448 | |||
449 | /* Validate some of the fields */ | ||
450 | if ((firmware->count < 1) || (firmware->count > MAX_QE_RISC)) { | ||
451 | printk(KERN_ERR "qe-firmware: invalid data\n"); | ||
452 | return -EINVAL; | ||
453 | } | ||
454 | |||
455 | /* Validate the length and check if there's a CRC */ | ||
456 | calc_size += (firmware->count - 1) * sizeof(struct qe_microcode); | ||
457 | |||
458 | for (i = 0; i < firmware->count; i++) | ||
459 | /* | ||
460 | * For situations where the second RISC uses the same microcode | ||
461 | * as the first, the 'code_offset' and 'count' fields will be | ||
462 | * zero, so it's okay to add those. | ||
463 | */ | ||
464 | calc_size += sizeof(__be32) * | ||
465 | be32_to_cpu(firmware->microcode[i].count); | ||
466 | |||
467 | /* Validate the length */ | ||
468 | if (length != calc_size + sizeof(__be32)) { | ||
469 | printk(KERN_ERR "qe-firmware: invalid length\n"); | ||
470 | return -EPERM; | ||
471 | } | ||
472 | |||
473 | /* Validate the CRC */ | ||
474 | crc = be32_to_cpu(*(__be32 *)((void *)firmware + calc_size)); | ||
475 | if (crc != crc32(0, firmware, calc_size)) { | ||
476 | printk(KERN_ERR "qe-firmware: firmware CRC is invalid\n"); | ||
477 | return -EIO; | ||
478 | } | ||
479 | |||
480 | /* | ||
481 | * If the microcode calls for it, split the I-RAM. | ||
482 | */ | ||
483 | if (!firmware->split) | ||
484 | setbits16(&qe_immr->cp.cercr, QE_CP_CERCR_CIR); | ||
485 | |||
486 | if (firmware->soc.model) | ||
487 | printk(KERN_INFO | ||
488 | "qe-firmware: firmware '%s' for %u V%u.%u\n", | ||
489 | firmware->id, be16_to_cpu(firmware->soc.model), | ||
490 | firmware->soc.major, firmware->soc.minor); | ||
491 | else | ||
492 | printk(KERN_INFO "qe-firmware: firmware '%s'\n", | ||
493 | firmware->id); | ||
494 | |||
495 | /* | ||
496 | * The QE only supports one microcode per RISC, so clear out all the | ||
497 | * saved microcode information and put in the new. | ||
498 | */ | ||
499 | memset(&qe_firmware_info, 0, sizeof(qe_firmware_info)); | ||
500 | strlcpy(qe_firmware_info.id, firmware->id, sizeof(qe_firmware_info.id)); | ||
501 | qe_firmware_info.extended_modes = firmware->extended_modes; | ||
502 | memcpy(qe_firmware_info.vtraps, firmware->vtraps, | ||
503 | sizeof(firmware->vtraps)); | ||
504 | |||
505 | /* Loop through each microcode. */ | ||
506 | for (i = 0; i < firmware->count; i++) { | ||
507 | const struct qe_microcode *ucode = &firmware->microcode[i]; | ||
508 | |||
509 | /* Upload a microcode if it's present */ | ||
510 | if (ucode->code_offset) | ||
511 | qe_upload_microcode(firmware, ucode); | ||
512 | |||
513 | /* Program the traps for this processor */ | ||
514 | for (j = 0; j < 16; j++) { | ||
515 | u32 trap = be32_to_cpu(ucode->traps[j]); | ||
516 | |||
517 | if (trap) | ||
518 | out_be32(&qe_immr->rsp[i].tibcr[j], trap); | ||
519 | } | ||
520 | |||
521 | /* Enable traps */ | ||
522 | out_be32(&qe_immr->rsp[i].eccr, be32_to_cpu(ucode->eccr)); | ||
523 | } | ||
524 | |||
525 | qe_firmware_uploaded = 1; | ||
526 | |||
527 | return 0; | ||
528 | } | ||
529 | EXPORT_SYMBOL(qe_upload_firmware); | ||
530 | |||
531 | /* | ||
532 | * Get info on the currently-loaded firmware | ||
533 | * | ||
534 | * This function also checks the device tree to see if the boot loader has | ||
535 | * uploaded a firmware already. | ||
536 | */ | ||
537 | struct qe_firmware_info *qe_get_firmware_info(void) | ||
538 | { | ||
539 | static int initialized; | ||
540 | struct property *prop; | ||
541 | struct device_node *qe; | ||
542 | struct device_node *fw = NULL; | ||
543 | const char *sprop; | ||
544 | unsigned int i; | ||
545 | |||
546 | /* | ||
547 | * If we haven't checked yet, and a driver hasn't uploaded a firmware | ||
548 | * yet, then check the device tree for information. | ||
549 | */ | ||
550 | if (qe_firmware_uploaded) | ||
551 | return &qe_firmware_info; | ||
552 | |||
553 | if (initialized) | ||
554 | return NULL; | ||
555 | |||
556 | initialized = 1; | ||
557 | |||
558 | /* | ||
559 | * Newer device trees have an "fsl,qe" compatible property for the QE | ||
560 | * node, but we still need to support older device trees. | ||
561 | */ | ||
562 | qe = of_find_compatible_node(NULL, NULL, "fsl,qe"); | ||
563 | if (!qe) { | ||
564 | qe = of_find_node_by_type(NULL, "qe"); | ||
565 | if (!qe) | ||
566 | return NULL; | ||
567 | } | ||
568 | |||
569 | /* Find the 'firmware' child node */ | ||
570 | for_each_child_of_node(qe, fw) { | ||
571 | if (strcmp(fw->name, "firmware") == 0) | ||
572 | break; | ||
573 | } | ||
574 | |||
575 | of_node_put(qe); | ||
576 | |||
577 | /* Did we find the 'firmware' node? */ | ||
578 | if (!fw) | ||
579 | return NULL; | ||
580 | |||
581 | qe_firmware_uploaded = 1; | ||
582 | |||
583 | /* Copy the data into qe_firmware_info*/ | ||
584 | sprop = of_get_property(fw, "id", NULL); | ||
585 | if (sprop) | ||
586 | strlcpy(qe_firmware_info.id, sprop, | ||
587 | sizeof(qe_firmware_info.id)); | ||
588 | |||
589 | prop = of_find_property(fw, "extended-modes", NULL); | ||
590 | if (prop && (prop->length == sizeof(u64))) { | ||
591 | const u64 *iprop = prop->value; | ||
592 | |||
593 | qe_firmware_info.extended_modes = *iprop; | ||
594 | } | ||
595 | |||
596 | prop = of_find_property(fw, "virtual-traps", NULL); | ||
597 | if (prop && (prop->length == 32)) { | ||
598 | const u32 *iprop = prop->value; | ||
599 | |||
600 | for (i = 0; i < ARRAY_SIZE(qe_firmware_info.vtraps); i++) | ||
601 | qe_firmware_info.vtraps[i] = iprop[i]; | ||
602 | } | ||
603 | |||
604 | of_node_put(fw); | ||
605 | |||
606 | return &qe_firmware_info; | ||
607 | } | ||
608 | EXPORT_SYMBOL(qe_get_firmware_info); | ||
609 | |||
610 | unsigned int qe_get_num_of_risc(void) | ||
611 | { | ||
612 | struct device_node *qe; | ||
613 | int size; | ||
614 | unsigned int num_of_risc = 0; | ||
615 | const u32 *prop; | ||
616 | |||
617 | qe = of_find_compatible_node(NULL, NULL, "fsl,qe"); | ||
618 | if (!qe) { | ||
619 | /* Older devices trees did not have an "fsl,qe" | ||
620 | * compatible property, so we need to look for | ||
621 | * the QE node by name. | ||
622 | */ | ||
623 | qe = of_find_node_by_type(NULL, "qe"); | ||
624 | if (!qe) | ||
625 | return num_of_risc; | ||
626 | } | ||
627 | |||
628 | prop = of_get_property(qe, "fsl,qe-num-riscs", &size); | ||
629 | if (prop && size == sizeof(*prop)) | ||
630 | num_of_risc = *prop; | ||
631 | |||
632 | of_node_put(qe); | ||
633 | |||
634 | return num_of_risc; | ||
635 | } | ||
636 | EXPORT_SYMBOL(qe_get_num_of_risc); | ||
637 | |||
638 | unsigned int qe_get_num_of_snums(void) | ||
639 | { | ||
640 | struct device_node *qe; | ||
641 | int size; | ||
642 | unsigned int num_of_snums; | ||
643 | const u32 *prop; | ||
644 | |||
645 | num_of_snums = 28; /* The default number of snum for threads is 28 */ | ||
646 | qe = of_find_compatible_node(NULL, NULL, "fsl,qe"); | ||
647 | if (!qe) { | ||
648 | /* Older devices trees did not have an "fsl,qe" | ||
649 | * compatible property, so we need to look for | ||
650 | * the QE node by name. | ||
651 | */ | ||
652 | qe = of_find_node_by_type(NULL, "qe"); | ||
653 | if (!qe) | ||
654 | return num_of_snums; | ||
655 | } | ||
656 | |||
657 | prop = of_get_property(qe, "fsl,qe-num-snums", &size); | ||
658 | if (prop && size == sizeof(*prop)) { | ||
659 | num_of_snums = *prop; | ||
660 | if ((num_of_snums < 28) || (num_of_snums > QE_NUM_OF_SNUM)) { | ||
661 | /* No QE ever has fewer than 28 SNUMs */ | ||
662 | pr_err("QE: number of snum is invalid\n"); | ||
663 | of_node_put(qe); | ||
664 | return -EINVAL; | ||
665 | } | ||
666 | } | ||
667 | |||
668 | of_node_put(qe); | ||
669 | |||
670 | return num_of_snums; | ||
671 | } | ||
672 | EXPORT_SYMBOL(qe_get_num_of_snums); | ||
673 | |||
674 | #if defined(CONFIG_SUSPEND) && defined(CONFIG_PPC_85xx) | ||
675 | static int qe_resume(struct platform_device *ofdev) | ||
676 | { | ||
677 | if (!qe_alive_during_sleep()) | ||
678 | qe_reset(); | ||
679 | return 0; | ||
680 | } | ||
681 | |||
682 | static int qe_probe(struct platform_device *ofdev) | ||
683 | { | ||
684 | return 0; | ||
685 | } | ||
686 | |||
687 | static const struct of_device_id qe_ids[] = { | ||
688 | { .compatible = "fsl,qe", }, | ||
689 | { }, | ||
690 | }; | ||
691 | |||
692 | static struct platform_driver qe_driver = { | ||
693 | .driver = { | ||
694 | .name = "fsl-qe", | ||
695 | .of_match_table = qe_ids, | ||
696 | }, | ||
697 | .probe = qe_probe, | ||
698 | .resume = qe_resume, | ||
699 | }; | ||
700 | |||
701 | static int __init qe_drv_init(void) | ||
702 | { | ||
703 | return platform_driver_register(&qe_driver); | ||
704 | } | ||
705 | device_initcall(qe_drv_init); | ||
706 | #endif /* defined(CONFIG_SUSPEND) && defined(CONFIG_PPC_85xx) */ | ||
diff --git a/arch/powerpc/sysdev/qe_lib/qe_ic.c b/arch/powerpc/sysdev/qe_lib/qe_ic.c deleted file mode 100644 index ef36f16f9f6f..000000000000 --- a/arch/powerpc/sysdev/qe_lib/qe_ic.c +++ /dev/null | |||
@@ -1,502 +0,0 @@ | |||
1 | /* | ||
2 | * arch/powerpc/sysdev/qe_lib/qe_ic.c | ||
3 | * | ||
4 | * Copyright (C) 2006 Freescale Semiconductor, Inc. All rights reserved. | ||
5 | * | ||
6 | * Author: Li Yang <leoli@freescale.com> | ||
7 | * Based on code from Shlomi Gridish <gridish@freescale.com> | ||
8 | * | ||
9 | * QUICC ENGINE Interrupt Controller | ||
10 | * | ||
11 | * This program is free software; you can redistribute it and/or modify it | ||
12 | * under the terms of the GNU General Public License as published by the | ||
13 | * Free Software Foundation; either version 2 of the License, or (at your | ||
14 | * option) any later version. | ||
15 | */ | ||
16 | |||
17 | #include <linux/kernel.h> | ||
18 | #include <linux/init.h> | ||
19 | #include <linux/errno.h> | ||
20 | #include <linux/reboot.h> | ||
21 | #include <linux/slab.h> | ||
22 | #include <linux/stddef.h> | ||
23 | #include <linux/sched.h> | ||
24 | #include <linux/signal.h> | ||
25 | #include <linux/device.h> | ||
26 | #include <linux/spinlock.h> | ||
27 | #include <asm/irq.h> | ||
28 | #include <asm/io.h> | ||
29 | #include <asm/prom.h> | ||
30 | #include <asm/qe_ic.h> | ||
31 | |||
32 | #include "qe_ic.h" | ||
33 | |||
34 | static DEFINE_RAW_SPINLOCK(qe_ic_lock); | ||
35 | |||
36 | static struct qe_ic_info qe_ic_info[] = { | ||
37 | [1] = { | ||
38 | .mask = 0x00008000, | ||
39 | .mask_reg = QEIC_CIMR, | ||
40 | .pri_code = 0, | ||
41 | .pri_reg = QEIC_CIPWCC, | ||
42 | }, | ||
43 | [2] = { | ||
44 | .mask = 0x00004000, | ||
45 | .mask_reg = QEIC_CIMR, | ||
46 | .pri_code = 1, | ||
47 | .pri_reg = QEIC_CIPWCC, | ||
48 | }, | ||
49 | [3] = { | ||
50 | .mask = 0x00002000, | ||
51 | .mask_reg = QEIC_CIMR, | ||
52 | .pri_code = 2, | ||
53 | .pri_reg = QEIC_CIPWCC, | ||
54 | }, | ||
55 | [10] = { | ||
56 | .mask = 0x00000040, | ||
57 | .mask_reg = QEIC_CIMR, | ||
58 | .pri_code = 1, | ||
59 | .pri_reg = QEIC_CIPZCC, | ||
60 | }, | ||
61 | [11] = { | ||
62 | .mask = 0x00000020, | ||
63 | .mask_reg = QEIC_CIMR, | ||
64 | .pri_code = 2, | ||
65 | .pri_reg = QEIC_CIPZCC, | ||
66 | }, | ||
67 | [12] = { | ||
68 | .mask = 0x00000010, | ||
69 | .mask_reg = QEIC_CIMR, | ||
70 | .pri_code = 3, | ||
71 | .pri_reg = QEIC_CIPZCC, | ||
72 | }, | ||
73 | [13] = { | ||
74 | .mask = 0x00000008, | ||
75 | .mask_reg = QEIC_CIMR, | ||
76 | .pri_code = 4, | ||
77 | .pri_reg = QEIC_CIPZCC, | ||
78 | }, | ||
79 | [14] = { | ||
80 | .mask = 0x00000004, | ||
81 | .mask_reg = QEIC_CIMR, | ||
82 | .pri_code = 5, | ||
83 | .pri_reg = QEIC_CIPZCC, | ||
84 | }, | ||
85 | [15] = { | ||
86 | .mask = 0x00000002, | ||
87 | .mask_reg = QEIC_CIMR, | ||
88 | .pri_code = 6, | ||
89 | .pri_reg = QEIC_CIPZCC, | ||
90 | }, | ||
91 | [20] = { | ||
92 | .mask = 0x10000000, | ||
93 | .mask_reg = QEIC_CRIMR, | ||
94 | .pri_code = 3, | ||
95 | .pri_reg = QEIC_CIPRTA, | ||
96 | }, | ||
97 | [25] = { | ||
98 | .mask = 0x00800000, | ||
99 | .mask_reg = QEIC_CRIMR, | ||
100 | .pri_code = 0, | ||
101 | .pri_reg = QEIC_CIPRTB, | ||
102 | }, | ||
103 | [26] = { | ||
104 | .mask = 0x00400000, | ||
105 | .mask_reg = QEIC_CRIMR, | ||
106 | .pri_code = 1, | ||
107 | .pri_reg = QEIC_CIPRTB, | ||
108 | }, | ||
109 | [27] = { | ||
110 | .mask = 0x00200000, | ||
111 | .mask_reg = QEIC_CRIMR, | ||
112 | .pri_code = 2, | ||
113 | .pri_reg = QEIC_CIPRTB, | ||
114 | }, | ||
115 | [28] = { | ||
116 | .mask = 0x00100000, | ||
117 | .mask_reg = QEIC_CRIMR, | ||
118 | .pri_code = 3, | ||
119 | .pri_reg = QEIC_CIPRTB, | ||
120 | }, | ||
121 | [32] = { | ||
122 | .mask = 0x80000000, | ||
123 | .mask_reg = QEIC_CIMR, | ||
124 | .pri_code = 0, | ||
125 | .pri_reg = QEIC_CIPXCC, | ||
126 | }, | ||
127 | [33] = { | ||
128 | .mask = 0x40000000, | ||
129 | .mask_reg = QEIC_CIMR, | ||
130 | .pri_code = 1, | ||
131 | .pri_reg = QEIC_CIPXCC, | ||
132 | }, | ||
133 | [34] = { | ||
134 | .mask = 0x20000000, | ||
135 | .mask_reg = QEIC_CIMR, | ||
136 | .pri_code = 2, | ||
137 | .pri_reg = QEIC_CIPXCC, | ||
138 | }, | ||
139 | [35] = { | ||
140 | .mask = 0x10000000, | ||
141 | .mask_reg = QEIC_CIMR, | ||
142 | .pri_code = 3, | ||
143 | .pri_reg = QEIC_CIPXCC, | ||
144 | }, | ||
145 | [36] = { | ||
146 | .mask = 0x08000000, | ||
147 | .mask_reg = QEIC_CIMR, | ||
148 | .pri_code = 4, | ||
149 | .pri_reg = QEIC_CIPXCC, | ||
150 | }, | ||
151 | [40] = { | ||
152 | .mask = 0x00800000, | ||
153 | .mask_reg = QEIC_CIMR, | ||
154 | .pri_code = 0, | ||
155 | .pri_reg = QEIC_CIPYCC, | ||
156 | }, | ||
157 | [41] = { | ||
158 | .mask = 0x00400000, | ||
159 | .mask_reg = QEIC_CIMR, | ||
160 | .pri_code = 1, | ||
161 | .pri_reg = QEIC_CIPYCC, | ||
162 | }, | ||
163 | [42] = { | ||
164 | .mask = 0x00200000, | ||
165 | .mask_reg = QEIC_CIMR, | ||
166 | .pri_code = 2, | ||
167 | .pri_reg = QEIC_CIPYCC, | ||
168 | }, | ||
169 | [43] = { | ||
170 | .mask = 0x00100000, | ||
171 | .mask_reg = QEIC_CIMR, | ||
172 | .pri_code = 3, | ||
173 | .pri_reg = QEIC_CIPYCC, | ||
174 | }, | ||
175 | }; | ||
176 | |||
177 | static inline u32 qe_ic_read(volatile __be32 __iomem * base, unsigned int reg) | ||
178 | { | ||
179 | return in_be32(base + (reg >> 2)); | ||
180 | } | ||
181 | |||
182 | static inline void qe_ic_write(volatile __be32 __iomem * base, unsigned int reg, | ||
183 | u32 value) | ||
184 | { | ||
185 | out_be32(base + (reg >> 2), value); | ||
186 | } | ||
187 | |||
188 | static inline struct qe_ic *qe_ic_from_irq(unsigned int virq) | ||
189 | { | ||
190 | return irq_get_chip_data(virq); | ||
191 | } | ||
192 | |||
193 | static inline struct qe_ic *qe_ic_from_irq_data(struct irq_data *d) | ||
194 | { | ||
195 | return irq_data_get_irq_chip_data(d); | ||
196 | } | ||
197 | |||
198 | static void qe_ic_unmask_irq(struct irq_data *d) | ||
199 | { | ||
200 | struct qe_ic *qe_ic = qe_ic_from_irq_data(d); | ||
201 | unsigned int src = irqd_to_hwirq(d); | ||
202 | unsigned long flags; | ||
203 | u32 temp; | ||
204 | |||
205 | raw_spin_lock_irqsave(&qe_ic_lock, flags); | ||
206 | |||
207 | temp = qe_ic_read(qe_ic->regs, qe_ic_info[src].mask_reg); | ||
208 | qe_ic_write(qe_ic->regs, qe_ic_info[src].mask_reg, | ||
209 | temp | qe_ic_info[src].mask); | ||
210 | |||
211 | raw_spin_unlock_irqrestore(&qe_ic_lock, flags); | ||
212 | } | ||
213 | |||
214 | static void qe_ic_mask_irq(struct irq_data *d) | ||
215 | { | ||
216 | struct qe_ic *qe_ic = qe_ic_from_irq_data(d); | ||
217 | unsigned int src = irqd_to_hwirq(d); | ||
218 | unsigned long flags; | ||
219 | u32 temp; | ||
220 | |||
221 | raw_spin_lock_irqsave(&qe_ic_lock, flags); | ||
222 | |||
223 | temp = qe_ic_read(qe_ic->regs, qe_ic_info[src].mask_reg); | ||
224 | qe_ic_write(qe_ic->regs, qe_ic_info[src].mask_reg, | ||
225 | temp & ~qe_ic_info[src].mask); | ||
226 | |||
227 | /* Flush the above write before enabling interrupts; otherwise, | ||
228 | * spurious interrupts will sometimes happen. To be 100% sure | ||
229 | * that the write has reached the device before interrupts are | ||
230 | * enabled, the mask register would have to be read back; however, | ||
231 | * this is not required for correctness, only to avoid wasting | ||
232 | * time on a large number of spurious interrupts. In testing, | ||
233 | * a sync reduced the observed spurious interrupts to zero. | ||
234 | */ | ||
235 | mb(); | ||
236 | |||
237 | raw_spin_unlock_irqrestore(&qe_ic_lock, flags); | ||
238 | } | ||
239 | |||
240 | static struct irq_chip qe_ic_irq_chip = { | ||
241 | .name = "QEIC", | ||
242 | .irq_unmask = qe_ic_unmask_irq, | ||
243 | .irq_mask = qe_ic_mask_irq, | ||
244 | .irq_mask_ack = qe_ic_mask_irq, | ||
245 | }; | ||
246 | |||
247 | static int qe_ic_host_match(struct irq_domain *h, struct device_node *node, | ||
248 | enum irq_domain_bus_token bus_token) | ||
249 | { | ||
250 | /* Exact match, unless qe_ic node is NULL */ | ||
251 | struct device_node *of_node = irq_domain_get_of_node(h); | ||
252 | return of_node == NULL || of_node == node; | ||
253 | } | ||
254 | |||
255 | static int qe_ic_host_map(struct irq_domain *h, unsigned int virq, | ||
256 | irq_hw_number_t hw) | ||
257 | { | ||
258 | struct qe_ic *qe_ic = h->host_data; | ||
259 | struct irq_chip *chip; | ||
260 | |||
261 | if (qe_ic_info[hw].mask == 0) { | ||
262 | printk(KERN_ERR "Can't map reserved IRQ\n"); | ||
263 | return -EINVAL; | ||
264 | } | ||
265 | /* Default chip */ | ||
266 | chip = &qe_ic->hc_irq; | ||
267 | |||
268 | irq_set_chip_data(virq, qe_ic); | ||
269 | irq_set_status_flags(virq, IRQ_LEVEL); | ||
270 | |||
271 | irq_set_chip_and_handler(virq, chip, handle_level_irq); | ||
272 | |||
273 | return 0; | ||
274 | } | ||
275 | |||
276 | static const struct irq_domain_ops qe_ic_host_ops = { | ||
277 | .match = qe_ic_host_match, | ||
278 | .map = qe_ic_host_map, | ||
279 | .xlate = irq_domain_xlate_onetwocell, | ||
280 | }; | ||
281 | |||
282 | /* Return an interrupt vector or NO_IRQ if no interrupt is pending. */ | ||
283 | unsigned int qe_ic_get_low_irq(struct qe_ic *qe_ic) | ||
284 | { | ||
285 | int irq; | ||
286 | |||
287 | BUG_ON(qe_ic == NULL); | ||
288 | |||
289 | /* get the interrupt source vector. */ | ||
290 | irq = qe_ic_read(qe_ic->regs, QEIC_CIVEC) >> 26; | ||
291 | |||
292 | if (irq == 0) | ||
293 | return NO_IRQ; | ||
294 | |||
295 | return irq_linear_revmap(qe_ic->irqhost, irq); | ||
296 | } | ||
297 | |||
298 | /* Return an interrupt vector or NO_IRQ if no interrupt is pending. */ | ||
299 | unsigned int qe_ic_get_high_irq(struct qe_ic *qe_ic) | ||
300 | { | ||
301 | int irq; | ||
302 | |||
303 | BUG_ON(qe_ic == NULL); | ||
304 | |||
305 | /* get the interrupt source vector. */ | ||
306 | irq = qe_ic_read(qe_ic->regs, QEIC_CHIVEC) >> 26; | ||
307 | |||
308 | if (irq == 0) | ||
309 | return NO_IRQ; | ||
310 | |||
311 | return irq_linear_revmap(qe_ic->irqhost, irq); | ||
312 | } | ||
313 | |||
314 | void __init qe_ic_init(struct device_node *node, unsigned int flags, | ||
315 | void (*low_handler)(struct irq_desc *desc), | ||
316 | void (*high_handler)(struct irq_desc *desc)) | ||
317 | { | ||
318 | struct qe_ic *qe_ic; | ||
319 | struct resource res; | ||
320 | u32 temp = 0, ret, high_active = 0; | ||
321 | |||
322 | ret = of_address_to_resource(node, 0, &res); | ||
323 | if (ret) | ||
324 | return; | ||
325 | |||
326 | qe_ic = kzalloc(sizeof(*qe_ic), GFP_KERNEL); | ||
327 | if (qe_ic == NULL) | ||
328 | return; | ||
329 | |||
330 | qe_ic->irqhost = irq_domain_add_linear(node, NR_QE_IC_INTS, | ||
331 | &qe_ic_host_ops, qe_ic); | ||
332 | if (qe_ic->irqhost == NULL) { | ||
333 | kfree(qe_ic); | ||
334 | return; | ||
335 | } | ||
336 | |||
337 | qe_ic->regs = ioremap(res.start, resource_size(&res)); | ||
338 | |||
339 | qe_ic->hc_irq = qe_ic_irq_chip; | ||
340 | |||
341 | qe_ic->virq_high = irq_of_parse_and_map(node, 0); | ||
342 | qe_ic->virq_low = irq_of_parse_and_map(node, 1); | ||
343 | |||
344 | if (qe_ic->virq_low == NO_IRQ) { | ||
345 | printk(KERN_ERR "Failed to map QE_IC low IRQ\n"); | ||
346 | kfree(qe_ic); | ||
347 | return; | ||
348 | } | ||
349 | |||
350 | /* default priority scheme is grouped. If spread mode is */ | ||
351 | /* required, configure cicr accordingly. */ | ||
352 | if (flags & QE_IC_SPREADMODE_GRP_W) | ||
353 | temp |= CICR_GWCC; | ||
354 | if (flags & QE_IC_SPREADMODE_GRP_X) | ||
355 | temp |= CICR_GXCC; | ||
356 | if (flags & QE_IC_SPREADMODE_GRP_Y) | ||
357 | temp |= CICR_GYCC; | ||
358 | if (flags & QE_IC_SPREADMODE_GRP_Z) | ||
359 | temp |= CICR_GZCC; | ||
360 | if (flags & QE_IC_SPREADMODE_GRP_RISCA) | ||
361 | temp |= CICR_GRTA; | ||
362 | if (flags & QE_IC_SPREADMODE_GRP_RISCB) | ||
363 | temp |= CICR_GRTB; | ||
364 | |||
365 | /* choose destination signal for highest priority interrupt */ | ||
366 | if (flags & QE_IC_HIGH_SIGNAL) { | ||
367 | temp |= (SIGNAL_HIGH << CICR_HPIT_SHIFT); | ||
368 | high_active = 1; | ||
369 | } | ||
370 | |||
371 | qe_ic_write(qe_ic->regs, QEIC_CICR, temp); | ||
372 | |||
373 | irq_set_handler_data(qe_ic->virq_low, qe_ic); | ||
374 | irq_set_chained_handler(qe_ic->virq_low, low_handler); | ||
375 | |||
376 | if (qe_ic->virq_high != NO_IRQ && | ||
377 | qe_ic->virq_high != qe_ic->virq_low) { | ||
378 | irq_set_handler_data(qe_ic->virq_high, qe_ic); | ||
379 | irq_set_chained_handler(qe_ic->virq_high, high_handler); | ||
380 | } | ||
381 | } | ||
382 | |||
383 | void qe_ic_set_highest_priority(unsigned int virq, int high) | ||
384 | { | ||
385 | struct qe_ic *qe_ic = qe_ic_from_irq(virq); | ||
386 | unsigned int src = virq_to_hw(virq); | ||
387 | u32 temp = 0; | ||
388 | |||
389 | temp = qe_ic_read(qe_ic->regs, QEIC_CICR); | ||
390 | |||
391 | temp &= ~CICR_HP_MASK; | ||
392 | temp |= src << CICR_HP_SHIFT; | ||
393 | |||
394 | temp &= ~CICR_HPIT_MASK; | ||
395 | temp |= (high ? SIGNAL_HIGH : SIGNAL_LOW) << CICR_HPIT_SHIFT; | ||
396 | |||
397 | qe_ic_write(qe_ic->regs, QEIC_CICR, temp); | ||
398 | } | ||
399 | |||
400 | /* Set Priority level within its group, from 1 to 8 */ | ||
401 | int qe_ic_set_priority(unsigned int virq, unsigned int priority) | ||
402 | { | ||
403 | struct qe_ic *qe_ic = qe_ic_from_irq(virq); | ||
404 | unsigned int src = virq_to_hw(virq); | ||
405 | u32 temp; | ||
406 | |||
407 | if (priority > 8 || priority == 0) | ||
408 | return -EINVAL; | ||
409 | if (src > 127) | ||
410 | return -EINVAL; | ||
411 | if (qe_ic_info[src].pri_reg == 0) | ||
412 | return -EINVAL; | ||
413 | |||
414 | temp = qe_ic_read(qe_ic->regs, qe_ic_info[src].pri_reg); | ||
415 | |||
416 | if (priority < 4) { | ||
417 | temp &= ~(0x7 << (32 - priority * 3)); | ||
418 | temp |= qe_ic_info[src].pri_code << (32 - priority * 3); | ||
419 | } else { | ||
420 | temp &= ~(0x7 << (24 - priority * 3)); | ||
421 | temp |= qe_ic_info[src].pri_code << (24 - priority * 3); | ||
422 | } | ||
423 | |||
424 | qe_ic_write(qe_ic->regs, qe_ic_info[src].pri_reg, temp); | ||
425 | |||
426 | return 0; | ||
427 | } | ||
428 | |||
429 | /* Set a QE priority to use high irq, only priority 1~2 can use high irq */ | ||
430 | int qe_ic_set_high_priority(unsigned int virq, unsigned int priority, int high) | ||
431 | { | ||
432 | struct qe_ic *qe_ic = qe_ic_from_irq(virq); | ||
433 | unsigned int src = virq_to_hw(virq); | ||
434 | u32 temp, control_reg = QEIC_CICNR, shift = 0; | ||
435 | |||
436 | if (priority > 2 || priority == 0) | ||
437 | return -EINVAL; | ||
438 | |||
439 | switch (qe_ic_info[src].pri_reg) { | ||
440 | case QEIC_CIPZCC: | ||
441 | shift = CICNR_ZCC1T_SHIFT; | ||
442 | break; | ||
443 | case QEIC_CIPWCC: | ||
444 | shift = CICNR_WCC1T_SHIFT; | ||
445 | break; | ||
446 | case QEIC_CIPYCC: | ||
447 | shift = CICNR_YCC1T_SHIFT; | ||
448 | break; | ||
449 | case QEIC_CIPXCC: | ||
450 | shift = CICNR_XCC1T_SHIFT; | ||
451 | break; | ||
452 | case QEIC_CIPRTA: | ||
453 | shift = CRICR_RTA1T_SHIFT; | ||
454 | control_reg = QEIC_CRICR; | ||
455 | break; | ||
456 | case QEIC_CIPRTB: | ||
457 | shift = CRICR_RTB1T_SHIFT; | ||
458 | control_reg = QEIC_CRICR; | ||
459 | break; | ||
460 | default: | ||
461 | return -EINVAL; | ||
462 | } | ||
463 | |||
464 | shift += (2 - priority) * 2; | ||
465 | temp = qe_ic_read(qe_ic->regs, control_reg); | ||
466 | temp &= ~(SIGNAL_MASK << shift); | ||
467 | temp |= (high ? SIGNAL_HIGH : SIGNAL_LOW) << shift; | ||
468 | qe_ic_write(qe_ic->regs, control_reg, temp); | ||
469 | |||
470 | return 0; | ||
471 | } | ||
472 | |||
473 | static struct bus_type qe_ic_subsys = { | ||
474 | .name = "qe_ic", | ||
475 | .dev_name = "qe_ic", | ||
476 | }; | ||
477 | |||
478 | static struct device device_qe_ic = { | ||
479 | .id = 0, | ||
480 | .bus = &qe_ic_subsys, | ||
481 | }; | ||
482 | |||
483 | static int __init init_qe_ic_sysfs(void) | ||
484 | { | ||
485 | int rc; | ||
486 | |||
487 | printk(KERN_DEBUG "Registering qe_ic with sysfs...\n"); | ||
488 | |||
489 | rc = subsys_system_register(&qe_ic_subsys, NULL); | ||
490 | if (rc) { | ||
491 | printk(KERN_ERR "Failed registering qe_ic sys class\n"); | ||
492 | return -ENODEV; | ||
493 | } | ||
494 | rc = device_register(&device_qe_ic); | ||
495 | if (rc) { | ||
496 | printk(KERN_ERR "Failed registering qe_ic sys device\n"); | ||
497 | return -ENODEV; | ||
498 | } | ||
499 | return 0; | ||
500 | } | ||
501 | |||
502 | subsys_initcall(init_qe_ic_sysfs); | ||
diff --git a/arch/powerpc/sysdev/qe_lib/qe_ic.h b/arch/powerpc/sysdev/qe_lib/qe_ic.h deleted file mode 100644 index efef7ab9b753..000000000000 --- a/arch/powerpc/sysdev/qe_lib/qe_ic.h +++ /dev/null | |||
@@ -1,103 +0,0 @@ | |||
1 | /* | ||
2 | * arch/powerpc/sysdev/qe_lib/qe_ic.h | ||
3 | * | ||
4 | * QUICC ENGINE Interrupt Controller Header | ||
5 | * | ||
6 | * Copyright (C) 2006 Freescale Semiconductor, Inc. All rights reserved. | ||
7 | * | ||
8 | * Author: Li Yang <leoli@freescale.com> | ||
9 | * Based on code from Shlomi Gridish <gridish@freescale.com> | ||
10 | * | ||
11 | * This program is free software; you can redistribute it and/or modify it | ||
12 | * under the terms of the GNU General Public License as published by the | ||
13 | * Free Software Foundation; either version 2 of the License, or (at your | ||
14 | * option) any later version. | ||
15 | */ | ||
16 | #ifndef _POWERPC_SYSDEV_QE_IC_H | ||
17 | #define _POWERPC_SYSDEV_QE_IC_H | ||
18 | |||
19 | #include <asm/qe_ic.h> | ||
20 | |||
21 | #define NR_QE_IC_INTS 64 | ||
22 | |||
23 | /* QE IC registers offset */ | ||
24 | #define QEIC_CICR 0x00 | ||
25 | #define QEIC_CIVEC 0x04 | ||
26 | #define QEIC_CRIPNR 0x08 | ||
27 | #define QEIC_CIPNR 0x0c | ||
28 | #define QEIC_CIPXCC 0x10 | ||
29 | #define QEIC_CIPYCC 0x14 | ||
30 | #define QEIC_CIPWCC 0x18 | ||
31 | #define QEIC_CIPZCC 0x1c | ||
32 | #define QEIC_CIMR 0x20 | ||
33 | #define QEIC_CRIMR 0x24 | ||
34 | #define QEIC_CICNR 0x28 | ||
35 | #define QEIC_CIPRTA 0x30 | ||
36 | #define QEIC_CIPRTB 0x34 | ||
37 | #define QEIC_CRICR 0x3c | ||
38 | #define QEIC_CHIVEC 0x60 | ||
39 | |||
40 | /* Interrupt priority registers */ | ||
41 | #define CIPCC_SHIFT_PRI0 29 | ||
42 | #define CIPCC_SHIFT_PRI1 26 | ||
43 | #define CIPCC_SHIFT_PRI2 23 | ||
44 | #define CIPCC_SHIFT_PRI3 20 | ||
45 | #define CIPCC_SHIFT_PRI4 13 | ||
46 | #define CIPCC_SHIFT_PRI5 10 | ||
47 | #define CIPCC_SHIFT_PRI6 7 | ||
48 | #define CIPCC_SHIFT_PRI7 4 | ||
49 | |||
50 | /* CICR priority modes */ | ||
51 | #define CICR_GWCC 0x00040000 | ||
52 | #define CICR_GXCC 0x00020000 | ||
53 | #define CICR_GYCC 0x00010000 | ||
54 | #define CICR_GZCC 0x00080000 | ||
55 | #define CICR_GRTA 0x00200000 | ||
56 | #define CICR_GRTB 0x00400000 | ||
57 | #define CICR_HPIT_SHIFT 8 | ||
58 | #define CICR_HPIT_MASK 0x00000300 | ||
59 | #define CICR_HP_SHIFT 24 | ||
60 | #define CICR_HP_MASK 0x3f000000 | ||
61 | |||
62 | /* CICNR */ | ||
63 | #define CICNR_WCC1T_SHIFT 20 | ||
64 | #define CICNR_ZCC1T_SHIFT 28 | ||
65 | #define CICNR_YCC1T_SHIFT 12 | ||
66 | #define CICNR_XCC1T_SHIFT 4 | ||
67 | |||
68 | /* CRICR */ | ||
69 | #define CRICR_RTA1T_SHIFT 20 | ||
70 | #define CRICR_RTB1T_SHIFT 28 | ||
71 | |||
72 | /* Signal indicator */ | ||
73 | #define SIGNAL_MASK 3 | ||
74 | #define SIGNAL_HIGH 2 | ||
75 | #define SIGNAL_LOW 0 | ||
76 | |||
77 | struct qe_ic { | ||
78 | /* Control registers offset */ | ||
79 | volatile u32 __iomem *regs; | ||
80 | |||
81 | /* The remapper for this QEIC */ | ||
82 | struct irq_domain *irqhost; | ||
83 | |||
84 | /* The "linux" controller struct */ | ||
85 | struct irq_chip hc_irq; | ||
86 | |||
87 | /* VIRQ numbers of QE high/low irqs */ | ||
88 | unsigned int virq_high; | ||
89 | unsigned int virq_low; | ||
90 | }; | ||
91 | |||
92 | /* | ||
93 | * QE interrupt controller internal structure | ||
94 | */ | ||
95 | struct qe_ic_info { | ||
96 | u32 mask; /* location of this source at the QIMR register. */ | ||
97 | u32 mask_reg; /* Mask register offset */ | ||
98 | u8 pri_code; /* for grouped interrupts sources - the interrupt | ||
99 | code as appears at the group priority register */ | ||
100 | u32 pri_reg; /* Group priority register offset */ | ||
101 | }; | ||
102 | |||
103 | #endif /* _POWERPC_SYSDEV_QE_IC_H */ | ||
diff --git a/arch/powerpc/sysdev/qe_lib/qe_io.c b/arch/powerpc/sysdev/qe_lib/qe_io.c deleted file mode 100644 index 7ea0174f6d3d..000000000000 --- a/arch/powerpc/sysdev/qe_lib/qe_io.c +++ /dev/null | |||
@@ -1,192 +0,0 @@ | |||
1 | /* | ||
2 | * arch/powerpc/sysdev/qe_lib/qe_io.c | ||
3 | * | ||
4 | * QE Parallel I/O ports configuration routines | ||
5 | * | ||
6 | * Copyright 2006 Freescale Semiconductor, Inc. All rights reserved. | ||
7 | * | ||
8 | * Author: Li Yang <LeoLi@freescale.com> | ||
9 | * Based on code from Shlomi Gridish <gridish@freescale.com> | ||
10 | * | ||
11 | * This program is free software; you can redistribute it and/or modify it | ||
12 | * under the terms of the GNU General Public License as published by the | ||
13 | * Free Software Foundation; either version 2 of the License, or (at your | ||
14 | * option) any later version. | ||
15 | */ | ||
16 | |||
17 | #include <linux/stddef.h> | ||
18 | #include <linux/kernel.h> | ||
19 | #include <linux/errno.h> | ||
20 | #include <linux/module.h> | ||
21 | #include <linux/ioport.h> | ||
22 | |||
23 | #include <asm/io.h> | ||
24 | #include <asm/qe.h> | ||
25 | #include <asm/prom.h> | ||
26 | #include <sysdev/fsl_soc.h> | ||
27 | |||
28 | #undef DEBUG | ||
29 | |||
30 | static struct qe_pio_regs __iomem *par_io; | ||
31 | static int num_par_io_ports = 0; | ||
32 | |||
33 | int par_io_init(struct device_node *np) | ||
34 | { | ||
35 | struct resource res; | ||
36 | int ret; | ||
37 | const u32 *num_ports; | ||
38 | |||
39 | /* Map Parallel I/O ports registers */ | ||
40 | ret = of_address_to_resource(np, 0, &res); | ||
41 | if (ret) | ||
42 | return ret; | ||
43 | par_io = ioremap(res.start, resource_size(&res)); | ||
44 | |||
45 | num_ports = of_get_property(np, "num-ports", NULL); | ||
46 | if (num_ports) | ||
47 | num_par_io_ports = *num_ports; | ||
48 | |||
49 | return 0; | ||
50 | } | ||
51 | |||
52 | void __par_io_config_pin(struct qe_pio_regs __iomem *par_io, u8 pin, int dir, | ||
53 | int open_drain, int assignment, int has_irq) | ||
54 | { | ||
55 | u32 pin_mask1bit; | ||
56 | u32 pin_mask2bits; | ||
57 | u32 new_mask2bits; | ||
58 | u32 tmp_val; | ||
59 | |||
60 | /* calculate pin location for single and 2 bits information */ | ||
61 | pin_mask1bit = (u32) (1 << (QE_PIO_PINS - (pin + 1))); | ||
62 | |||
63 | /* Set open drain, if required */ | ||
64 | tmp_val = in_be32(&par_io->cpodr); | ||
65 | if (open_drain) | ||
66 | out_be32(&par_io->cpodr, pin_mask1bit | tmp_val); | ||
67 | else | ||
68 | out_be32(&par_io->cpodr, ~pin_mask1bit & tmp_val); | ||
69 | |||
70 | /* define direction */ | ||
71 | tmp_val = (pin > (QE_PIO_PINS / 2) - 1) ? | ||
72 | in_be32(&par_io->cpdir2) : | ||
73 | in_be32(&par_io->cpdir1); | ||
74 | |||
75 | /* get all bits mask for 2 bit per port */ | ||
76 | pin_mask2bits = (u32) (0x3 << (QE_PIO_PINS - | ||
77 | (pin % (QE_PIO_PINS / 2) + 1) * 2)); | ||
78 | |||
79 | /* Get the final mask we need for the right definition */ | ||
80 | new_mask2bits = (u32) (dir << (QE_PIO_PINS - | ||
81 | (pin % (QE_PIO_PINS / 2) + 1) * 2)); | ||
82 | |||
83 | /* clear and set 2 bits mask */ | ||
84 | if (pin > (QE_PIO_PINS / 2) - 1) { | ||
85 | out_be32(&par_io->cpdir2, | ||
86 | ~pin_mask2bits & tmp_val); | ||
87 | tmp_val &= ~pin_mask2bits; | ||
88 | out_be32(&par_io->cpdir2, new_mask2bits | tmp_val); | ||
89 | } else { | ||
90 | out_be32(&par_io->cpdir1, | ||
91 | ~pin_mask2bits & tmp_val); | ||
92 | tmp_val &= ~pin_mask2bits; | ||
93 | out_be32(&par_io->cpdir1, new_mask2bits | tmp_val); | ||
94 | } | ||
95 | /* define pin assignment */ | ||
96 | tmp_val = (pin > (QE_PIO_PINS / 2) - 1) ? | ||
97 | in_be32(&par_io->cppar2) : | ||
98 | in_be32(&par_io->cppar1); | ||
99 | |||
100 | new_mask2bits = (u32) (assignment << (QE_PIO_PINS - | ||
101 | (pin % (QE_PIO_PINS / 2) + 1) * 2)); | ||
102 | /* clear and set 2 bits mask */ | ||
103 | if (pin > (QE_PIO_PINS / 2) - 1) { | ||
104 | out_be32(&par_io->cppar2, | ||
105 | ~pin_mask2bits & tmp_val); | ||
106 | tmp_val &= ~pin_mask2bits; | ||
107 | out_be32(&par_io->cppar2, new_mask2bits | tmp_val); | ||
108 | } else { | ||
109 | out_be32(&par_io->cppar1, | ||
110 | ~pin_mask2bits & tmp_val); | ||
111 | tmp_val &= ~pin_mask2bits; | ||
112 | out_be32(&par_io->cppar1, new_mask2bits | tmp_val); | ||
113 | } | ||
114 | } | ||
115 | EXPORT_SYMBOL(__par_io_config_pin); | ||
116 | |||
117 | int par_io_config_pin(u8 port, u8 pin, int dir, int open_drain, | ||
118 | int assignment, int has_irq) | ||
119 | { | ||
120 | if (!par_io || port >= num_par_io_ports) | ||
121 | return -EINVAL; | ||
122 | |||
123 | __par_io_config_pin(&par_io[port], pin, dir, open_drain, assignment, | ||
124 | has_irq); | ||
125 | return 0; | ||
126 | } | ||
127 | EXPORT_SYMBOL(par_io_config_pin); | ||
128 | |||
129 | int par_io_data_set(u8 port, u8 pin, u8 val) | ||
130 | { | ||
131 | u32 pin_mask, tmp_val; | ||
132 | |||
133 | if (port >= num_par_io_ports) | ||
134 | return -EINVAL; | ||
135 | if (pin >= QE_PIO_PINS) | ||
136 | return -EINVAL; | ||
137 | /* calculate pin location */ | ||
138 | pin_mask = (u32) (1 << (QE_PIO_PINS - 1 - pin)); | ||
139 | |||
140 | tmp_val = in_be32(&par_io[port].cpdata); | ||
141 | |||
142 | if (val == 0) /* clear */ | ||
143 | out_be32(&par_io[port].cpdata, ~pin_mask & tmp_val); | ||
144 | else /* set */ | ||
145 | out_be32(&par_io[port].cpdata, pin_mask | tmp_val); | ||
146 | |||
147 | return 0; | ||
148 | } | ||
149 | EXPORT_SYMBOL(par_io_data_set); | ||
150 | |||
151 | int par_io_of_config(struct device_node *np) | ||
152 | { | ||
153 | struct device_node *pio; | ||
154 | const phandle *ph; | ||
155 | int pio_map_len; | ||
156 | const unsigned int *pio_map; | ||
157 | |||
158 | if (par_io == NULL) { | ||
159 | printk(KERN_ERR "par_io not initialized\n"); | ||
160 | return -1; | ||
161 | } | ||
162 | |||
163 | ph = of_get_property(np, "pio-handle", NULL); | ||
164 | if (ph == NULL) { | ||
165 | printk(KERN_ERR "pio-handle not available\n"); | ||
166 | return -1; | ||
167 | } | ||
168 | |||
169 | pio = of_find_node_by_phandle(*ph); | ||
170 | |||
171 | pio_map = of_get_property(pio, "pio-map", &pio_map_len); | ||
172 | if (pio_map == NULL) { | ||
173 | printk(KERN_ERR "pio-map is not set!\n"); | ||
174 | return -1; | ||
175 | } | ||
176 | pio_map_len /= sizeof(unsigned int); | ||
177 | if ((pio_map_len % 6) != 0) { | ||
178 | printk(KERN_ERR "pio-map format wrong!\n"); | ||
179 | return -1; | ||
180 | } | ||
181 | |||
182 | while (pio_map_len > 0) { | ||
183 | par_io_config_pin((u8) pio_map[0], (u8) pio_map[1], | ||
184 | (int) pio_map[2], (int) pio_map[3], | ||
185 | (int) pio_map[4], (int) pio_map[5]); | ||
186 | pio_map += 6; | ||
187 | pio_map_len -= 6; | ||
188 | } | ||
189 | of_node_put(pio); | ||
190 | return 0; | ||
191 | } | ||
192 | EXPORT_SYMBOL(par_io_of_config); | ||
diff --git a/arch/powerpc/sysdev/qe_lib/ucc.c b/arch/powerpc/sysdev/qe_lib/ucc.c deleted file mode 100644 index 621575b7e84a..000000000000 --- a/arch/powerpc/sysdev/qe_lib/ucc.c +++ /dev/null | |||
@@ -1,212 +0,0 @@ | |||
1 | /* | ||
2 | * arch/powerpc/sysdev/qe_lib/ucc.c | ||
3 | * | ||
4 | * QE UCC API Set - UCC specific routines implementations. | ||
5 | * | ||
6 | * Copyright (C) 2006 Freescale Semiconductor, Inc. All rights reserved. | ||
7 | * | ||
8 | * Authors: Shlomi Gridish <gridish@freescale.com> | ||
9 | * Li Yang <leoli@freescale.com> | ||
10 | * | ||
11 | * This program is free software; you can redistribute it and/or modify it | ||
12 | * under the terms of the GNU General Public License as published by the | ||
13 | * Free Software Foundation; either version 2 of the License, or (at your | ||
14 | * option) any later version. | ||
15 | */ | ||
16 | #include <linux/kernel.h> | ||
17 | #include <linux/errno.h> | ||
18 | #include <linux/stddef.h> | ||
19 | #include <linux/spinlock.h> | ||
20 | #include <linux/export.h> | ||
21 | |||
22 | #include <asm/irq.h> | ||
23 | #include <asm/io.h> | ||
24 | #include <asm/immap_qe.h> | ||
25 | #include <asm/qe.h> | ||
26 | #include <asm/ucc.h> | ||
27 | |||
28 | int ucc_set_qe_mux_mii_mng(unsigned int ucc_num) | ||
29 | { | ||
30 | unsigned long flags; | ||
31 | |||
32 | if (ucc_num > UCC_MAX_NUM - 1) | ||
33 | return -EINVAL; | ||
34 | |||
35 | spin_lock_irqsave(&cmxgcr_lock, flags); | ||
36 | clrsetbits_be32(&qe_immr->qmx.cmxgcr, QE_CMXGCR_MII_ENET_MNG, | ||
37 | ucc_num << QE_CMXGCR_MII_ENET_MNG_SHIFT); | ||
38 | spin_unlock_irqrestore(&cmxgcr_lock, flags); | ||
39 | |||
40 | return 0; | ||
41 | } | ||
42 | EXPORT_SYMBOL(ucc_set_qe_mux_mii_mng); | ||
43 | |||
44 | /* Configure the UCC to either Slow or Fast. | ||
45 | * | ||
46 | * A given UCC can be figured to support either "slow" devices (e.g. UART) | ||
47 | * or "fast" devices (e.g. Ethernet). | ||
48 | * | ||
49 | * 'ucc_num' is the UCC number, from 0 - 7. | ||
50 | * | ||
51 | * This function also sets the UCC_GUEMR_SET_RESERVED3 bit because that bit | ||
52 | * must always be set to 1. | ||
53 | */ | ||
54 | int ucc_set_type(unsigned int ucc_num, enum ucc_speed_type speed) | ||
55 | { | ||
56 | u8 __iomem *guemr; | ||
57 | |||
58 | /* The GUEMR register is at the same location for both slow and fast | ||
59 | devices, so we just use uccX.slow.guemr. */ | ||
60 | switch (ucc_num) { | ||
61 | case 0: guemr = &qe_immr->ucc1.slow.guemr; | ||
62 | break; | ||
63 | case 1: guemr = &qe_immr->ucc2.slow.guemr; | ||
64 | break; | ||
65 | case 2: guemr = &qe_immr->ucc3.slow.guemr; | ||
66 | break; | ||
67 | case 3: guemr = &qe_immr->ucc4.slow.guemr; | ||
68 | break; | ||
69 | case 4: guemr = &qe_immr->ucc5.slow.guemr; | ||
70 | break; | ||
71 | case 5: guemr = &qe_immr->ucc6.slow.guemr; | ||
72 | break; | ||
73 | case 6: guemr = &qe_immr->ucc7.slow.guemr; | ||
74 | break; | ||
75 | case 7: guemr = &qe_immr->ucc8.slow.guemr; | ||
76 | break; | ||
77 | default: | ||
78 | return -EINVAL; | ||
79 | } | ||
80 | |||
81 | clrsetbits_8(guemr, UCC_GUEMR_MODE_MASK, | ||
82 | UCC_GUEMR_SET_RESERVED3 | speed); | ||
83 | |||
84 | return 0; | ||
85 | } | ||
86 | |||
87 | static void get_cmxucr_reg(unsigned int ucc_num, __be32 __iomem **cmxucr, | ||
88 | unsigned int *reg_num, unsigned int *shift) | ||
89 | { | ||
90 | unsigned int cmx = ((ucc_num & 1) << 1) + (ucc_num > 3); | ||
91 | |||
92 | *reg_num = cmx + 1; | ||
93 | *cmxucr = &qe_immr->qmx.cmxucr[cmx]; | ||
94 | *shift = 16 - 8 * (ucc_num & 2); | ||
95 | } | ||
96 | |||
97 | int ucc_mux_set_grant_tsa_bkpt(unsigned int ucc_num, int set, u32 mask) | ||
98 | { | ||
99 | __be32 __iomem *cmxucr; | ||
100 | unsigned int reg_num; | ||
101 | unsigned int shift; | ||
102 | |||
103 | /* check if the UCC number is in range. */ | ||
104 | if (ucc_num > UCC_MAX_NUM - 1) | ||
105 | return -EINVAL; | ||
106 | |||
107 | get_cmxucr_reg(ucc_num, &cmxucr, ®_num, &shift); | ||
108 | |||
109 | if (set) | ||
110 | setbits32(cmxucr, mask << shift); | ||
111 | else | ||
112 | clrbits32(cmxucr, mask << shift); | ||
113 | |||
114 | return 0; | ||
115 | } | ||
116 | |||
117 | int ucc_set_qe_mux_rxtx(unsigned int ucc_num, enum qe_clock clock, | ||
118 | enum comm_dir mode) | ||
119 | { | ||
120 | __be32 __iomem *cmxucr; | ||
121 | unsigned int reg_num; | ||
122 | unsigned int shift; | ||
123 | u32 clock_bits = 0; | ||
124 | |||
125 | /* check if the UCC number is in range. */ | ||
126 | if (ucc_num > UCC_MAX_NUM - 1) | ||
127 | return -EINVAL; | ||
128 | |||
129 | /* The communications direction must be RX or TX */ | ||
130 | if (!((mode == COMM_DIR_RX) || (mode == COMM_DIR_TX))) | ||
131 | return -EINVAL; | ||
132 | |||
133 | get_cmxucr_reg(ucc_num, &cmxucr, ®_num, &shift); | ||
134 | |||
135 | switch (reg_num) { | ||
136 | case 1: | ||
137 | switch (clock) { | ||
138 | case QE_BRG1: clock_bits = 1; break; | ||
139 | case QE_BRG2: clock_bits = 2; break; | ||
140 | case QE_BRG7: clock_bits = 3; break; | ||
141 | case QE_BRG8: clock_bits = 4; break; | ||
142 | case QE_CLK9: clock_bits = 5; break; | ||
143 | case QE_CLK10: clock_bits = 6; break; | ||
144 | case QE_CLK11: clock_bits = 7; break; | ||
145 | case QE_CLK12: clock_bits = 8; break; | ||
146 | case QE_CLK15: clock_bits = 9; break; | ||
147 | case QE_CLK16: clock_bits = 10; break; | ||
148 | default: break; | ||
149 | } | ||
150 | break; | ||
151 | case 2: | ||
152 | switch (clock) { | ||
153 | case QE_BRG5: clock_bits = 1; break; | ||
154 | case QE_BRG6: clock_bits = 2; break; | ||
155 | case QE_BRG7: clock_bits = 3; break; | ||
156 | case QE_BRG8: clock_bits = 4; break; | ||
157 | case QE_CLK13: clock_bits = 5; break; | ||
158 | case QE_CLK14: clock_bits = 6; break; | ||
159 | case QE_CLK19: clock_bits = 7; break; | ||
160 | case QE_CLK20: clock_bits = 8; break; | ||
161 | case QE_CLK15: clock_bits = 9; break; | ||
162 | case QE_CLK16: clock_bits = 10; break; | ||
163 | default: break; | ||
164 | } | ||
165 | break; | ||
166 | case 3: | ||
167 | switch (clock) { | ||
168 | case QE_BRG9: clock_bits = 1; break; | ||
169 | case QE_BRG10: clock_bits = 2; break; | ||
170 | case QE_BRG15: clock_bits = 3; break; | ||
171 | case QE_BRG16: clock_bits = 4; break; | ||
172 | case QE_CLK3: clock_bits = 5; break; | ||
173 | case QE_CLK4: clock_bits = 6; break; | ||
174 | case QE_CLK17: clock_bits = 7; break; | ||
175 | case QE_CLK18: clock_bits = 8; break; | ||
176 | case QE_CLK7: clock_bits = 9; break; | ||
177 | case QE_CLK8: clock_bits = 10; break; | ||
178 | case QE_CLK16: clock_bits = 11; break; | ||
179 | default: break; | ||
180 | } | ||
181 | break; | ||
182 | case 4: | ||
183 | switch (clock) { | ||
184 | case QE_BRG13: clock_bits = 1; break; | ||
185 | case QE_BRG14: clock_bits = 2; break; | ||
186 | case QE_BRG15: clock_bits = 3; break; | ||
187 | case QE_BRG16: clock_bits = 4; break; | ||
188 | case QE_CLK5: clock_bits = 5; break; | ||
189 | case QE_CLK6: clock_bits = 6; break; | ||
190 | case QE_CLK21: clock_bits = 7; break; | ||
191 | case QE_CLK22: clock_bits = 8; break; | ||
192 | case QE_CLK7: clock_bits = 9; break; | ||
193 | case QE_CLK8: clock_bits = 10; break; | ||
194 | case QE_CLK16: clock_bits = 11; break; | ||
195 | default: break; | ||
196 | } | ||
197 | break; | ||
198 | default: break; | ||
199 | } | ||
200 | |||
201 | /* Check for invalid combination of clock and UCC number */ | ||
202 | if (!clock_bits) | ||
203 | return -ENOENT; | ||
204 | |||
205 | if (mode == COMM_DIR_RX) | ||
206 | shift += 4; | ||
207 | |||
208 | clrsetbits_be32(cmxucr, QE_CMXUCR_TX_CLK_SRC_MASK << shift, | ||
209 | clock_bits << shift); | ||
210 | |||
211 | return 0; | ||
212 | } | ||
diff --git a/arch/powerpc/sysdev/qe_lib/ucc_fast.c b/arch/powerpc/sysdev/qe_lib/ucc_fast.c deleted file mode 100644 index 65aaf15032ae..000000000000 --- a/arch/powerpc/sysdev/qe_lib/ucc_fast.c +++ /dev/null | |||
@@ -1,363 +0,0 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2006 Freescale Semiconductor, Inc. All rights reserved. | ||
3 | * | ||
4 | * Authors: Shlomi Gridish <gridish@freescale.com> | ||
5 | * Li Yang <leoli@freescale.com> | ||
6 | * | ||
7 | * Description: | ||
8 | * QE UCC Fast API Set - UCC Fast specific routines implementations. | ||
9 | * | ||
10 | * This program is free software; you can redistribute it and/or modify it | ||
11 | * under the terms of the GNU General Public License as published by the | ||
12 | * Free Software Foundation; either version 2 of the License, or (at your | ||
13 | * option) any later version. | ||
14 | */ | ||
15 | #include <linux/kernel.h> | ||
16 | #include <linux/errno.h> | ||
17 | #include <linux/slab.h> | ||
18 | #include <linux/stddef.h> | ||
19 | #include <linux/interrupt.h> | ||
20 | #include <linux/err.h> | ||
21 | #include <linux/export.h> | ||
22 | |||
23 | #include <asm/io.h> | ||
24 | #include <asm/immap_qe.h> | ||
25 | #include <asm/qe.h> | ||
26 | |||
27 | #include <asm/ucc.h> | ||
28 | #include <asm/ucc_fast.h> | ||
29 | |||
30 | void ucc_fast_dump_regs(struct ucc_fast_private * uccf) | ||
31 | { | ||
32 | printk(KERN_INFO "UCC%u Fast registers:\n", uccf->uf_info->ucc_num); | ||
33 | printk(KERN_INFO "Base address: 0x%p\n", uccf->uf_regs); | ||
34 | |||
35 | printk(KERN_INFO "gumr : addr=0x%p, val=0x%08x\n", | ||
36 | &uccf->uf_regs->gumr, in_be32(&uccf->uf_regs->gumr)); | ||
37 | printk(KERN_INFO "upsmr : addr=0x%p, val=0x%08x\n", | ||
38 | &uccf->uf_regs->upsmr, in_be32(&uccf->uf_regs->upsmr)); | ||
39 | printk(KERN_INFO "utodr : addr=0x%p, val=0x%04x\n", | ||
40 | &uccf->uf_regs->utodr, in_be16(&uccf->uf_regs->utodr)); | ||
41 | printk(KERN_INFO "udsr : addr=0x%p, val=0x%04x\n", | ||
42 | &uccf->uf_regs->udsr, in_be16(&uccf->uf_regs->udsr)); | ||
43 | printk(KERN_INFO "ucce : addr=0x%p, val=0x%08x\n", | ||
44 | &uccf->uf_regs->ucce, in_be32(&uccf->uf_regs->ucce)); | ||
45 | printk(KERN_INFO "uccm : addr=0x%p, val=0x%08x\n", | ||
46 | &uccf->uf_regs->uccm, in_be32(&uccf->uf_regs->uccm)); | ||
47 | printk(KERN_INFO "uccs : addr=0x%p, val=0x%02x\n", | ||
48 | &uccf->uf_regs->uccs, in_8(&uccf->uf_regs->uccs)); | ||
49 | printk(KERN_INFO "urfb : addr=0x%p, val=0x%08x\n", | ||
50 | &uccf->uf_regs->urfb, in_be32(&uccf->uf_regs->urfb)); | ||
51 | printk(KERN_INFO "urfs : addr=0x%p, val=0x%04x\n", | ||
52 | &uccf->uf_regs->urfs, in_be16(&uccf->uf_regs->urfs)); | ||
53 | printk(KERN_INFO "urfet : addr=0x%p, val=0x%04x\n", | ||
54 | &uccf->uf_regs->urfet, in_be16(&uccf->uf_regs->urfet)); | ||
55 | printk(KERN_INFO "urfset: addr=0x%p, val=0x%04x\n", | ||
56 | &uccf->uf_regs->urfset, in_be16(&uccf->uf_regs->urfset)); | ||
57 | printk(KERN_INFO "utfb : addr=0x%p, val=0x%08x\n", | ||
58 | &uccf->uf_regs->utfb, in_be32(&uccf->uf_regs->utfb)); | ||
59 | printk(KERN_INFO "utfs : addr=0x%p, val=0x%04x\n", | ||
60 | &uccf->uf_regs->utfs, in_be16(&uccf->uf_regs->utfs)); | ||
61 | printk(KERN_INFO "utfet : addr=0x%p, val=0x%04x\n", | ||
62 | &uccf->uf_regs->utfet, in_be16(&uccf->uf_regs->utfet)); | ||
63 | printk(KERN_INFO "utftt : addr=0x%p, val=0x%04x\n", | ||
64 | &uccf->uf_regs->utftt, in_be16(&uccf->uf_regs->utftt)); | ||
65 | printk(KERN_INFO "utpt : addr=0x%p, val=0x%04x\n", | ||
66 | &uccf->uf_regs->utpt, in_be16(&uccf->uf_regs->utpt)); | ||
67 | printk(KERN_INFO "urtry : addr=0x%p, val=0x%08x\n", | ||
68 | &uccf->uf_regs->urtry, in_be32(&uccf->uf_regs->urtry)); | ||
69 | printk(KERN_INFO "guemr : addr=0x%p, val=0x%02x\n", | ||
70 | &uccf->uf_regs->guemr, in_8(&uccf->uf_regs->guemr)); | ||
71 | } | ||
72 | EXPORT_SYMBOL(ucc_fast_dump_regs); | ||
73 | |||
74 | u32 ucc_fast_get_qe_cr_subblock(int uccf_num) | ||
75 | { | ||
76 | switch (uccf_num) { | ||
77 | case 0: return QE_CR_SUBBLOCK_UCCFAST1; | ||
78 | case 1: return QE_CR_SUBBLOCK_UCCFAST2; | ||
79 | case 2: return QE_CR_SUBBLOCK_UCCFAST3; | ||
80 | case 3: return QE_CR_SUBBLOCK_UCCFAST4; | ||
81 | case 4: return QE_CR_SUBBLOCK_UCCFAST5; | ||
82 | case 5: return QE_CR_SUBBLOCK_UCCFAST6; | ||
83 | case 6: return QE_CR_SUBBLOCK_UCCFAST7; | ||
84 | case 7: return QE_CR_SUBBLOCK_UCCFAST8; | ||
85 | default: return QE_CR_SUBBLOCK_INVALID; | ||
86 | } | ||
87 | } | ||
88 | EXPORT_SYMBOL(ucc_fast_get_qe_cr_subblock); | ||
89 | |||
90 | void ucc_fast_transmit_on_demand(struct ucc_fast_private * uccf) | ||
91 | { | ||
92 | out_be16(&uccf->uf_regs->utodr, UCC_FAST_TOD); | ||
93 | } | ||
94 | EXPORT_SYMBOL(ucc_fast_transmit_on_demand); | ||
95 | |||
96 | void ucc_fast_enable(struct ucc_fast_private * uccf, enum comm_dir mode) | ||
97 | { | ||
98 | struct ucc_fast __iomem *uf_regs; | ||
99 | u32 gumr; | ||
100 | |||
101 | uf_regs = uccf->uf_regs; | ||
102 | |||
103 | /* Enable reception and/or transmission on this UCC. */ | ||
104 | gumr = in_be32(&uf_regs->gumr); | ||
105 | if (mode & COMM_DIR_TX) { | ||
106 | gumr |= UCC_FAST_GUMR_ENT; | ||
107 | uccf->enabled_tx = 1; | ||
108 | } | ||
109 | if (mode & COMM_DIR_RX) { | ||
110 | gumr |= UCC_FAST_GUMR_ENR; | ||
111 | uccf->enabled_rx = 1; | ||
112 | } | ||
113 | out_be32(&uf_regs->gumr, gumr); | ||
114 | } | ||
115 | EXPORT_SYMBOL(ucc_fast_enable); | ||
116 | |||
117 | void ucc_fast_disable(struct ucc_fast_private * uccf, enum comm_dir mode) | ||
118 | { | ||
119 | struct ucc_fast __iomem *uf_regs; | ||
120 | u32 gumr; | ||
121 | |||
122 | uf_regs = uccf->uf_regs; | ||
123 | |||
124 | /* Disable reception and/or transmission on this UCC. */ | ||
125 | gumr = in_be32(&uf_regs->gumr); | ||
126 | if (mode & COMM_DIR_TX) { | ||
127 | gumr &= ~UCC_FAST_GUMR_ENT; | ||
128 | uccf->enabled_tx = 0; | ||
129 | } | ||
130 | if (mode & COMM_DIR_RX) { | ||
131 | gumr &= ~UCC_FAST_GUMR_ENR; | ||
132 | uccf->enabled_rx = 0; | ||
133 | } | ||
134 | out_be32(&uf_regs->gumr, gumr); | ||
135 | } | ||
136 | EXPORT_SYMBOL(ucc_fast_disable); | ||
137 | |||
138 | int ucc_fast_init(struct ucc_fast_info * uf_info, struct ucc_fast_private ** uccf_ret) | ||
139 | { | ||
140 | struct ucc_fast_private *uccf; | ||
141 | struct ucc_fast __iomem *uf_regs; | ||
142 | u32 gumr; | ||
143 | int ret; | ||
144 | |||
145 | if (!uf_info) | ||
146 | return -EINVAL; | ||
147 | |||
148 | /* check if the UCC port number is in range. */ | ||
149 | if ((uf_info->ucc_num < 0) || (uf_info->ucc_num > UCC_MAX_NUM - 1)) { | ||
150 | printk(KERN_ERR "%s: illegal UCC number\n", __func__); | ||
151 | return -EINVAL; | ||
152 | } | ||
153 | |||
154 | /* Check that 'max_rx_buf_length' is properly aligned (4). */ | ||
155 | if (uf_info->max_rx_buf_length & (UCC_FAST_MRBLR_ALIGNMENT - 1)) { | ||
156 | printk(KERN_ERR "%s: max_rx_buf_length not aligned\n", | ||
157 | __func__); | ||
158 | return -EINVAL; | ||
159 | } | ||
160 | |||
161 | /* Validate Virtual Fifo register values */ | ||
162 | if (uf_info->urfs < UCC_FAST_URFS_MIN_VAL) { | ||
163 | printk(KERN_ERR "%s: urfs is too small\n", __func__); | ||
164 | return -EINVAL; | ||
165 | } | ||
166 | |||
167 | if (uf_info->urfs & (UCC_FAST_VIRT_FIFO_REGS_ALIGNMENT - 1)) { | ||
168 | printk(KERN_ERR "%s: urfs is not aligned\n", __func__); | ||
169 | return -EINVAL; | ||
170 | } | ||
171 | |||
172 | if (uf_info->urfet & (UCC_FAST_VIRT_FIFO_REGS_ALIGNMENT - 1)) { | ||
173 | printk(KERN_ERR "%s: urfet is not aligned.\n", __func__); | ||
174 | return -EINVAL; | ||
175 | } | ||
176 | |||
177 | if (uf_info->urfset & (UCC_FAST_VIRT_FIFO_REGS_ALIGNMENT - 1)) { | ||
178 | printk(KERN_ERR "%s: urfset is not aligned\n", __func__); | ||
179 | return -EINVAL; | ||
180 | } | ||
181 | |||
182 | if (uf_info->utfs & (UCC_FAST_VIRT_FIFO_REGS_ALIGNMENT - 1)) { | ||
183 | printk(KERN_ERR "%s: utfs is not aligned\n", __func__); | ||
184 | return -EINVAL; | ||
185 | } | ||
186 | |||
187 | if (uf_info->utfet & (UCC_FAST_VIRT_FIFO_REGS_ALIGNMENT - 1)) { | ||
188 | printk(KERN_ERR "%s: utfet is not aligned\n", __func__); | ||
189 | return -EINVAL; | ||
190 | } | ||
191 | |||
192 | if (uf_info->utftt & (UCC_FAST_VIRT_FIFO_REGS_ALIGNMENT - 1)) { | ||
193 | printk(KERN_ERR "%s: utftt is not aligned\n", __func__); | ||
194 | return -EINVAL; | ||
195 | } | ||
196 | |||
197 | uccf = kzalloc(sizeof(struct ucc_fast_private), GFP_KERNEL); | ||
198 | if (!uccf) { | ||
199 | printk(KERN_ERR "%s: Cannot allocate private data\n", | ||
200 | __func__); | ||
201 | return -ENOMEM; | ||
202 | } | ||
203 | |||
204 | /* Fill fast UCC structure */ | ||
205 | uccf->uf_info = uf_info; | ||
206 | /* Set the PHY base address */ | ||
207 | uccf->uf_regs = ioremap(uf_info->regs, sizeof(struct ucc_fast)); | ||
208 | if (uccf->uf_regs == NULL) { | ||
209 | printk(KERN_ERR "%s: Cannot map UCC registers\n", __func__); | ||
210 | kfree(uccf); | ||
211 | return -ENOMEM; | ||
212 | } | ||
213 | |||
214 | uccf->enabled_tx = 0; | ||
215 | uccf->enabled_rx = 0; | ||
216 | uccf->stopped_tx = 0; | ||
217 | uccf->stopped_rx = 0; | ||
218 | uf_regs = uccf->uf_regs; | ||
219 | uccf->p_ucce = &uf_regs->ucce; | ||
220 | uccf->p_uccm = &uf_regs->uccm; | ||
221 | #ifdef CONFIG_UGETH_TX_ON_DEMAND | ||
222 | uccf->p_utodr = &uf_regs->utodr; | ||
223 | #endif | ||
224 | #ifdef STATISTICS | ||
225 | uccf->tx_frames = 0; | ||
226 | uccf->rx_frames = 0; | ||
227 | uccf->rx_discarded = 0; | ||
228 | #endif /* STATISTICS */ | ||
229 | |||
230 | /* Set UCC to fast type */ | ||
231 | ret = ucc_set_type(uf_info->ucc_num, UCC_SPEED_TYPE_FAST); | ||
232 | if (ret) { | ||
233 | printk(KERN_ERR "%s: cannot set UCC type\n", __func__); | ||
234 | ucc_fast_free(uccf); | ||
235 | return ret; | ||
236 | } | ||
237 | |||
238 | uccf->mrblr = uf_info->max_rx_buf_length; | ||
239 | |||
240 | /* Set GUMR */ | ||
241 | /* For more details see the hardware spec. */ | ||
242 | gumr = uf_info->ttx_trx; | ||
243 | if (uf_info->tci) | ||
244 | gumr |= UCC_FAST_GUMR_TCI; | ||
245 | if (uf_info->cdp) | ||
246 | gumr |= UCC_FAST_GUMR_CDP; | ||
247 | if (uf_info->ctsp) | ||
248 | gumr |= UCC_FAST_GUMR_CTSP; | ||
249 | if (uf_info->cds) | ||
250 | gumr |= UCC_FAST_GUMR_CDS; | ||
251 | if (uf_info->ctss) | ||
252 | gumr |= UCC_FAST_GUMR_CTSS; | ||
253 | if (uf_info->txsy) | ||
254 | gumr |= UCC_FAST_GUMR_TXSY; | ||
255 | if (uf_info->rsyn) | ||
256 | gumr |= UCC_FAST_GUMR_RSYN; | ||
257 | gumr |= uf_info->synl; | ||
258 | if (uf_info->rtsm) | ||
259 | gumr |= UCC_FAST_GUMR_RTSM; | ||
260 | gumr |= uf_info->renc; | ||
261 | if (uf_info->revd) | ||
262 | gumr |= UCC_FAST_GUMR_REVD; | ||
263 | gumr |= uf_info->tenc; | ||
264 | gumr |= uf_info->tcrc; | ||
265 | gumr |= uf_info->mode; | ||
266 | out_be32(&uf_regs->gumr, gumr); | ||
267 | |||
268 | /* Allocate memory for Tx Virtual Fifo */ | ||
269 | uccf->ucc_fast_tx_virtual_fifo_base_offset = | ||
270 | qe_muram_alloc(uf_info->utfs, UCC_FAST_VIRT_FIFO_REGS_ALIGNMENT); | ||
271 | if (IS_ERR_VALUE(uccf->ucc_fast_tx_virtual_fifo_base_offset)) { | ||
272 | printk(KERN_ERR "%s: cannot allocate MURAM for TX FIFO\n", | ||
273 | __func__); | ||
274 | uccf->ucc_fast_tx_virtual_fifo_base_offset = 0; | ||
275 | ucc_fast_free(uccf); | ||
276 | return -ENOMEM; | ||
277 | } | ||
278 | |||
279 | /* Allocate memory for Rx Virtual Fifo */ | ||
280 | uccf->ucc_fast_rx_virtual_fifo_base_offset = | ||
281 | qe_muram_alloc(uf_info->urfs + | ||
282 | UCC_FAST_RECEIVE_VIRTUAL_FIFO_SIZE_FUDGE_FACTOR, | ||
283 | UCC_FAST_VIRT_FIFO_REGS_ALIGNMENT); | ||
284 | if (IS_ERR_VALUE(uccf->ucc_fast_rx_virtual_fifo_base_offset)) { | ||
285 | printk(KERN_ERR "%s: cannot allocate MURAM for RX FIFO\n", | ||
286 | __func__); | ||
287 | uccf->ucc_fast_rx_virtual_fifo_base_offset = 0; | ||
288 | ucc_fast_free(uccf); | ||
289 | return -ENOMEM; | ||
290 | } | ||
291 | |||
292 | /* Set Virtual Fifo registers */ | ||
293 | out_be16(&uf_regs->urfs, uf_info->urfs); | ||
294 | out_be16(&uf_regs->urfet, uf_info->urfet); | ||
295 | out_be16(&uf_regs->urfset, uf_info->urfset); | ||
296 | out_be16(&uf_regs->utfs, uf_info->utfs); | ||
297 | out_be16(&uf_regs->utfet, uf_info->utfet); | ||
298 | out_be16(&uf_regs->utftt, uf_info->utftt); | ||
299 | /* utfb, urfb are offsets from MURAM base */ | ||
300 | out_be32(&uf_regs->utfb, uccf->ucc_fast_tx_virtual_fifo_base_offset); | ||
301 | out_be32(&uf_regs->urfb, uccf->ucc_fast_rx_virtual_fifo_base_offset); | ||
302 | |||
303 | /* Mux clocking */ | ||
304 | /* Grant Support */ | ||
305 | ucc_set_qe_mux_grant(uf_info->ucc_num, uf_info->grant_support); | ||
306 | /* Breakpoint Support */ | ||
307 | ucc_set_qe_mux_bkpt(uf_info->ucc_num, uf_info->brkpt_support); | ||
308 | /* Set Tsa or NMSI mode. */ | ||
309 | ucc_set_qe_mux_tsa(uf_info->ucc_num, uf_info->tsa); | ||
310 | /* If NMSI (not Tsa), set Tx and Rx clock. */ | ||
311 | if (!uf_info->tsa) { | ||
312 | /* Rx clock routing */ | ||
313 | if ((uf_info->rx_clock != QE_CLK_NONE) && | ||
314 | ucc_set_qe_mux_rxtx(uf_info->ucc_num, uf_info->rx_clock, | ||
315 | COMM_DIR_RX)) { | ||
316 | printk(KERN_ERR "%s: illegal value for RX clock\n", | ||
317 | __func__); | ||
318 | ucc_fast_free(uccf); | ||
319 | return -EINVAL; | ||
320 | } | ||
321 | /* Tx clock routing */ | ||
322 | if ((uf_info->tx_clock != QE_CLK_NONE) && | ||
323 | ucc_set_qe_mux_rxtx(uf_info->ucc_num, uf_info->tx_clock, | ||
324 | COMM_DIR_TX)) { | ||
325 | printk(KERN_ERR "%s: illegal value for TX clock\n", | ||
326 | __func__); | ||
327 | ucc_fast_free(uccf); | ||
328 | return -EINVAL; | ||
329 | } | ||
330 | } | ||
331 | |||
332 | /* Set interrupt mask register at UCC level. */ | ||
333 | out_be32(&uf_regs->uccm, uf_info->uccm_mask); | ||
334 | |||
335 | /* First, clear anything pending at UCC level, | ||
336 | * otherwise, old garbage may come through | ||
337 | * as soon as the dam is opened. */ | ||
338 | |||
339 | /* Writing '1' clears */ | ||
340 | out_be32(&uf_regs->ucce, 0xffffffff); | ||
341 | |||
342 | *uccf_ret = uccf; | ||
343 | return 0; | ||
344 | } | ||
345 | EXPORT_SYMBOL(ucc_fast_init); | ||
346 | |||
347 | void ucc_fast_free(struct ucc_fast_private * uccf) | ||
348 | { | ||
349 | if (!uccf) | ||
350 | return; | ||
351 | |||
352 | if (uccf->ucc_fast_tx_virtual_fifo_base_offset) | ||
353 | qe_muram_free(uccf->ucc_fast_tx_virtual_fifo_base_offset); | ||
354 | |||
355 | if (uccf->ucc_fast_rx_virtual_fifo_base_offset) | ||
356 | qe_muram_free(uccf->ucc_fast_rx_virtual_fifo_base_offset); | ||
357 | |||
358 | if (uccf->uf_regs) | ||
359 | iounmap(uccf->uf_regs); | ||
360 | |||
361 | kfree(uccf); | ||
362 | } | ||
363 | EXPORT_SYMBOL(ucc_fast_free); | ||
diff --git a/arch/powerpc/sysdev/qe_lib/ucc_slow.c b/arch/powerpc/sysdev/qe_lib/ucc_slow.c deleted file mode 100644 index 5f91628209eb..000000000000 --- a/arch/powerpc/sysdev/qe_lib/ucc_slow.c +++ /dev/null | |||
@@ -1,374 +0,0 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2006 Freescale Semiconductor, Inc. All rights reserved. | ||
3 | * | ||
4 | * Authors: Shlomi Gridish <gridish@freescale.com> | ||
5 | * Li Yang <leoli@freescale.com> | ||
6 | * | ||
7 | * Description: | ||
8 | * QE UCC Slow API Set - UCC Slow specific routines implementations. | ||
9 | * | ||
10 | * This program is free software; you can redistribute it and/or modify it | ||
11 | * under the terms of the GNU General Public License as published by the | ||
12 | * Free Software Foundation; either version 2 of the License, or (at your | ||
13 | * option) any later version. | ||
14 | */ | ||
15 | #include <linux/kernel.h> | ||
16 | #include <linux/errno.h> | ||
17 | #include <linux/slab.h> | ||
18 | #include <linux/stddef.h> | ||
19 | #include <linux/interrupt.h> | ||
20 | #include <linux/err.h> | ||
21 | #include <linux/export.h> | ||
22 | |||
23 | #include <asm/io.h> | ||
24 | #include <asm/immap_qe.h> | ||
25 | #include <asm/qe.h> | ||
26 | |||
27 | #include <asm/ucc.h> | ||
28 | #include <asm/ucc_slow.h> | ||
29 | |||
30 | u32 ucc_slow_get_qe_cr_subblock(int uccs_num) | ||
31 | { | ||
32 | switch (uccs_num) { | ||
33 | case 0: return QE_CR_SUBBLOCK_UCCSLOW1; | ||
34 | case 1: return QE_CR_SUBBLOCK_UCCSLOW2; | ||
35 | case 2: return QE_CR_SUBBLOCK_UCCSLOW3; | ||
36 | case 3: return QE_CR_SUBBLOCK_UCCSLOW4; | ||
37 | case 4: return QE_CR_SUBBLOCK_UCCSLOW5; | ||
38 | case 5: return QE_CR_SUBBLOCK_UCCSLOW6; | ||
39 | case 6: return QE_CR_SUBBLOCK_UCCSLOW7; | ||
40 | case 7: return QE_CR_SUBBLOCK_UCCSLOW8; | ||
41 | default: return QE_CR_SUBBLOCK_INVALID; | ||
42 | } | ||
43 | } | ||
44 | EXPORT_SYMBOL(ucc_slow_get_qe_cr_subblock); | ||
45 | |||
46 | void ucc_slow_graceful_stop_tx(struct ucc_slow_private * uccs) | ||
47 | { | ||
48 | struct ucc_slow_info *us_info = uccs->us_info; | ||
49 | u32 id; | ||
50 | |||
51 | id = ucc_slow_get_qe_cr_subblock(us_info->ucc_num); | ||
52 | qe_issue_cmd(QE_GRACEFUL_STOP_TX, id, | ||
53 | QE_CR_PROTOCOL_UNSPECIFIED, 0); | ||
54 | } | ||
55 | EXPORT_SYMBOL(ucc_slow_graceful_stop_tx); | ||
56 | |||
57 | void ucc_slow_stop_tx(struct ucc_slow_private * uccs) | ||
58 | { | ||
59 | struct ucc_slow_info *us_info = uccs->us_info; | ||
60 | u32 id; | ||
61 | |||
62 | id = ucc_slow_get_qe_cr_subblock(us_info->ucc_num); | ||
63 | qe_issue_cmd(QE_STOP_TX, id, QE_CR_PROTOCOL_UNSPECIFIED, 0); | ||
64 | } | ||
65 | EXPORT_SYMBOL(ucc_slow_stop_tx); | ||
66 | |||
67 | void ucc_slow_restart_tx(struct ucc_slow_private * uccs) | ||
68 | { | ||
69 | struct ucc_slow_info *us_info = uccs->us_info; | ||
70 | u32 id; | ||
71 | |||
72 | id = ucc_slow_get_qe_cr_subblock(us_info->ucc_num); | ||
73 | qe_issue_cmd(QE_RESTART_TX, id, QE_CR_PROTOCOL_UNSPECIFIED, 0); | ||
74 | } | ||
75 | EXPORT_SYMBOL(ucc_slow_restart_tx); | ||
76 | |||
77 | void ucc_slow_enable(struct ucc_slow_private * uccs, enum comm_dir mode) | ||
78 | { | ||
79 | struct ucc_slow *us_regs; | ||
80 | u32 gumr_l; | ||
81 | |||
82 | us_regs = uccs->us_regs; | ||
83 | |||
84 | /* Enable reception and/or transmission on this UCC. */ | ||
85 | gumr_l = in_be32(&us_regs->gumr_l); | ||
86 | if (mode & COMM_DIR_TX) { | ||
87 | gumr_l |= UCC_SLOW_GUMR_L_ENT; | ||
88 | uccs->enabled_tx = 1; | ||
89 | } | ||
90 | if (mode & COMM_DIR_RX) { | ||
91 | gumr_l |= UCC_SLOW_GUMR_L_ENR; | ||
92 | uccs->enabled_rx = 1; | ||
93 | } | ||
94 | out_be32(&us_regs->gumr_l, gumr_l); | ||
95 | } | ||
96 | EXPORT_SYMBOL(ucc_slow_enable); | ||
97 | |||
98 | void ucc_slow_disable(struct ucc_slow_private * uccs, enum comm_dir mode) | ||
99 | { | ||
100 | struct ucc_slow *us_regs; | ||
101 | u32 gumr_l; | ||
102 | |||
103 | us_regs = uccs->us_regs; | ||
104 | |||
105 | /* Disable reception and/or transmission on this UCC. */ | ||
106 | gumr_l = in_be32(&us_regs->gumr_l); | ||
107 | if (mode & COMM_DIR_TX) { | ||
108 | gumr_l &= ~UCC_SLOW_GUMR_L_ENT; | ||
109 | uccs->enabled_tx = 0; | ||
110 | } | ||
111 | if (mode & COMM_DIR_RX) { | ||
112 | gumr_l &= ~UCC_SLOW_GUMR_L_ENR; | ||
113 | uccs->enabled_rx = 0; | ||
114 | } | ||
115 | out_be32(&us_regs->gumr_l, gumr_l); | ||
116 | } | ||
117 | EXPORT_SYMBOL(ucc_slow_disable); | ||
118 | |||
119 | /* Initialize the UCC for Slow operations | ||
120 | * | ||
121 | * The caller should initialize the following us_info | ||
122 | */ | ||
123 | int ucc_slow_init(struct ucc_slow_info * us_info, struct ucc_slow_private ** uccs_ret) | ||
124 | { | ||
125 | struct ucc_slow_private *uccs; | ||
126 | u32 i; | ||
127 | struct ucc_slow __iomem *us_regs; | ||
128 | u32 gumr; | ||
129 | struct qe_bd *bd; | ||
130 | u32 id; | ||
131 | u32 command; | ||
132 | int ret = 0; | ||
133 | |||
134 | if (!us_info) | ||
135 | return -EINVAL; | ||
136 | |||
137 | /* check if the UCC port number is in range. */ | ||
138 | if ((us_info->ucc_num < 0) || (us_info->ucc_num > UCC_MAX_NUM - 1)) { | ||
139 | printk(KERN_ERR "%s: illegal UCC number\n", __func__); | ||
140 | return -EINVAL; | ||
141 | } | ||
142 | |||
143 | /* | ||
144 | * Set mrblr | ||
145 | * Check that 'max_rx_buf_length' is properly aligned (4), unless | ||
146 | * rfw is 1, meaning that QE accepts one byte at a time, unlike normal | ||
147 | * case when QE accepts 32 bits at a time. | ||
148 | */ | ||
149 | if ((!us_info->rfw) && | ||
150 | (us_info->max_rx_buf_length & (UCC_SLOW_MRBLR_ALIGNMENT - 1))) { | ||
151 | printk(KERN_ERR "max_rx_buf_length not aligned.\n"); | ||
152 | return -EINVAL; | ||
153 | } | ||
154 | |||
155 | uccs = kzalloc(sizeof(struct ucc_slow_private), GFP_KERNEL); | ||
156 | if (!uccs) { | ||
157 | printk(KERN_ERR "%s: Cannot allocate private data\n", | ||
158 | __func__); | ||
159 | return -ENOMEM; | ||
160 | } | ||
161 | |||
162 | /* Fill slow UCC structure */ | ||
163 | uccs->us_info = us_info; | ||
164 | /* Set the PHY base address */ | ||
165 | uccs->us_regs = ioremap(us_info->regs, sizeof(struct ucc_slow)); | ||
166 | if (uccs->us_regs == NULL) { | ||
167 | printk(KERN_ERR "%s: Cannot map UCC registers\n", __func__); | ||
168 | kfree(uccs); | ||
169 | return -ENOMEM; | ||
170 | } | ||
171 | |||
172 | uccs->saved_uccm = 0; | ||
173 | uccs->p_rx_frame = 0; | ||
174 | us_regs = uccs->us_regs; | ||
175 | uccs->p_ucce = (u16 *) & (us_regs->ucce); | ||
176 | uccs->p_uccm = (u16 *) & (us_regs->uccm); | ||
177 | #ifdef STATISTICS | ||
178 | uccs->rx_frames = 0; | ||
179 | uccs->tx_frames = 0; | ||
180 | uccs->rx_discarded = 0; | ||
181 | #endif /* STATISTICS */ | ||
182 | |||
183 | /* Get PRAM base */ | ||
184 | uccs->us_pram_offset = | ||
185 | qe_muram_alloc(UCC_SLOW_PRAM_SIZE, ALIGNMENT_OF_UCC_SLOW_PRAM); | ||
186 | if (IS_ERR_VALUE(uccs->us_pram_offset)) { | ||
187 | printk(KERN_ERR "%s: cannot allocate MURAM for PRAM", __func__); | ||
188 | ucc_slow_free(uccs); | ||
189 | return -ENOMEM; | ||
190 | } | ||
191 | id = ucc_slow_get_qe_cr_subblock(us_info->ucc_num); | ||
192 | qe_issue_cmd(QE_ASSIGN_PAGE_TO_DEVICE, id, us_info->protocol, | ||
193 | uccs->us_pram_offset); | ||
194 | |||
195 | uccs->us_pram = qe_muram_addr(uccs->us_pram_offset); | ||
196 | |||
197 | /* Set UCC to slow type */ | ||
198 | ret = ucc_set_type(us_info->ucc_num, UCC_SPEED_TYPE_SLOW); | ||
199 | if (ret) { | ||
200 | printk(KERN_ERR "%s: cannot set UCC type", __func__); | ||
201 | ucc_slow_free(uccs); | ||
202 | return ret; | ||
203 | } | ||
204 | |||
205 | out_be16(&uccs->us_pram->mrblr, us_info->max_rx_buf_length); | ||
206 | |||
207 | INIT_LIST_HEAD(&uccs->confQ); | ||
208 | |||
209 | /* Allocate BDs. */ | ||
210 | uccs->rx_base_offset = | ||
211 | qe_muram_alloc(us_info->rx_bd_ring_len * sizeof(struct qe_bd), | ||
212 | QE_ALIGNMENT_OF_BD); | ||
213 | if (IS_ERR_VALUE(uccs->rx_base_offset)) { | ||
214 | printk(KERN_ERR "%s: cannot allocate %u RX BDs\n", __func__, | ||
215 | us_info->rx_bd_ring_len); | ||
216 | uccs->rx_base_offset = 0; | ||
217 | ucc_slow_free(uccs); | ||
218 | return -ENOMEM; | ||
219 | } | ||
220 | |||
221 | uccs->tx_base_offset = | ||
222 | qe_muram_alloc(us_info->tx_bd_ring_len * sizeof(struct qe_bd), | ||
223 | QE_ALIGNMENT_OF_BD); | ||
224 | if (IS_ERR_VALUE(uccs->tx_base_offset)) { | ||
225 | printk(KERN_ERR "%s: cannot allocate TX BDs", __func__); | ||
226 | uccs->tx_base_offset = 0; | ||
227 | ucc_slow_free(uccs); | ||
228 | return -ENOMEM; | ||
229 | } | ||
230 | |||
231 | /* Init Tx bds */ | ||
232 | bd = uccs->confBd = uccs->tx_bd = qe_muram_addr(uccs->tx_base_offset); | ||
233 | for (i = 0; i < us_info->tx_bd_ring_len - 1; i++) { | ||
234 | /* clear bd buffer */ | ||
235 | out_be32(&bd->buf, 0); | ||
236 | /* set bd status and length */ | ||
237 | out_be32((u32 *) bd, 0); | ||
238 | bd++; | ||
239 | } | ||
240 | /* for last BD set Wrap bit */ | ||
241 | out_be32(&bd->buf, 0); | ||
242 | out_be32((u32 *) bd, cpu_to_be32(T_W)); | ||
243 | |||
244 | /* Init Rx bds */ | ||
245 | bd = uccs->rx_bd = qe_muram_addr(uccs->rx_base_offset); | ||
246 | for (i = 0; i < us_info->rx_bd_ring_len - 1; i++) { | ||
247 | /* set bd status and length */ | ||
248 | out_be32((u32*)bd, 0); | ||
249 | /* clear bd buffer */ | ||
250 | out_be32(&bd->buf, 0); | ||
251 | bd++; | ||
252 | } | ||
253 | /* for last BD set Wrap bit */ | ||
254 | out_be32((u32*)bd, cpu_to_be32(R_W)); | ||
255 | out_be32(&bd->buf, 0); | ||
256 | |||
257 | /* Set GUMR (For more details see the hardware spec.). */ | ||
258 | /* gumr_h */ | ||
259 | gumr = us_info->tcrc; | ||
260 | if (us_info->cdp) | ||
261 | gumr |= UCC_SLOW_GUMR_H_CDP; | ||
262 | if (us_info->ctsp) | ||
263 | gumr |= UCC_SLOW_GUMR_H_CTSP; | ||
264 | if (us_info->cds) | ||
265 | gumr |= UCC_SLOW_GUMR_H_CDS; | ||
266 | if (us_info->ctss) | ||
267 | gumr |= UCC_SLOW_GUMR_H_CTSS; | ||
268 | if (us_info->tfl) | ||
269 | gumr |= UCC_SLOW_GUMR_H_TFL; | ||
270 | if (us_info->rfw) | ||
271 | gumr |= UCC_SLOW_GUMR_H_RFW; | ||
272 | if (us_info->txsy) | ||
273 | gumr |= UCC_SLOW_GUMR_H_TXSY; | ||
274 | if (us_info->rtsm) | ||
275 | gumr |= UCC_SLOW_GUMR_H_RTSM; | ||
276 | out_be32(&us_regs->gumr_h, gumr); | ||
277 | |||
278 | /* gumr_l */ | ||
279 | gumr = us_info->tdcr | us_info->rdcr | us_info->tenc | us_info->renc | | ||
280 | us_info->diag | us_info->mode; | ||
281 | if (us_info->tci) | ||
282 | gumr |= UCC_SLOW_GUMR_L_TCI; | ||
283 | if (us_info->rinv) | ||
284 | gumr |= UCC_SLOW_GUMR_L_RINV; | ||
285 | if (us_info->tinv) | ||
286 | gumr |= UCC_SLOW_GUMR_L_TINV; | ||
287 | if (us_info->tend) | ||
288 | gumr |= UCC_SLOW_GUMR_L_TEND; | ||
289 | out_be32(&us_regs->gumr_l, gumr); | ||
290 | |||
291 | /* Function code registers */ | ||
292 | |||
293 | /* if the data is in cachable memory, the 'global' */ | ||
294 | /* in the function code should be set. */ | ||
295 | uccs->us_pram->tbmr = UCC_BMR_BO_BE; | ||
296 | uccs->us_pram->rbmr = UCC_BMR_BO_BE; | ||
297 | |||
298 | /* rbase, tbase are offsets from MURAM base */ | ||
299 | out_be16(&uccs->us_pram->rbase, uccs->rx_base_offset); | ||
300 | out_be16(&uccs->us_pram->tbase, uccs->tx_base_offset); | ||
301 | |||
302 | /* Mux clocking */ | ||
303 | /* Grant Support */ | ||
304 | ucc_set_qe_mux_grant(us_info->ucc_num, us_info->grant_support); | ||
305 | /* Breakpoint Support */ | ||
306 | ucc_set_qe_mux_bkpt(us_info->ucc_num, us_info->brkpt_support); | ||
307 | /* Set Tsa or NMSI mode. */ | ||
308 | ucc_set_qe_mux_tsa(us_info->ucc_num, us_info->tsa); | ||
309 | /* If NMSI (not Tsa), set Tx and Rx clock. */ | ||
310 | if (!us_info->tsa) { | ||
311 | /* Rx clock routing */ | ||
312 | if (ucc_set_qe_mux_rxtx(us_info->ucc_num, us_info->rx_clock, | ||
313 | COMM_DIR_RX)) { | ||
314 | printk(KERN_ERR "%s: illegal value for RX clock\n", | ||
315 | __func__); | ||
316 | ucc_slow_free(uccs); | ||
317 | return -EINVAL; | ||
318 | } | ||
319 | /* Tx clock routing */ | ||
320 | if (ucc_set_qe_mux_rxtx(us_info->ucc_num, us_info->tx_clock, | ||
321 | COMM_DIR_TX)) { | ||
322 | printk(KERN_ERR "%s: illegal value for TX clock\n", | ||
323 | __func__); | ||
324 | ucc_slow_free(uccs); | ||
325 | return -EINVAL; | ||
326 | } | ||
327 | } | ||
328 | |||
329 | /* Set interrupt mask register at UCC level. */ | ||
330 | out_be16(&us_regs->uccm, us_info->uccm_mask); | ||
331 | |||
332 | /* First, clear anything pending at UCC level, | ||
333 | * otherwise, old garbage may come through | ||
334 | * as soon as the dam is opened. */ | ||
335 | |||
336 | /* Writing '1' clears */ | ||
337 | out_be16(&us_regs->ucce, 0xffff); | ||
338 | |||
339 | /* Issue QE Init command */ | ||
340 | if (us_info->init_tx && us_info->init_rx) | ||
341 | command = QE_INIT_TX_RX; | ||
342 | else if (us_info->init_tx) | ||
343 | command = QE_INIT_TX; | ||
344 | else | ||
345 | command = QE_INIT_RX; /* We know at least one is TRUE */ | ||
346 | |||
347 | qe_issue_cmd(command, id, us_info->protocol, 0); | ||
348 | |||
349 | *uccs_ret = uccs; | ||
350 | return 0; | ||
351 | } | ||
352 | EXPORT_SYMBOL(ucc_slow_init); | ||
353 | |||
354 | void ucc_slow_free(struct ucc_slow_private * uccs) | ||
355 | { | ||
356 | if (!uccs) | ||
357 | return; | ||
358 | |||
359 | if (uccs->rx_base_offset) | ||
360 | qe_muram_free(uccs->rx_base_offset); | ||
361 | |||
362 | if (uccs->tx_base_offset) | ||
363 | qe_muram_free(uccs->tx_base_offset); | ||
364 | |||
365 | if (uccs->us_pram) | ||
366 | qe_muram_free(uccs->us_pram_offset); | ||
367 | |||
368 | if (uccs->us_regs) | ||
369 | iounmap(uccs->us_regs); | ||
370 | |||
371 | kfree(uccs); | ||
372 | } | ||
373 | EXPORT_SYMBOL(ucc_slow_free); | ||
374 | |||
diff --git a/arch/powerpc/sysdev/qe_lib/usb.c b/arch/powerpc/sysdev/qe_lib/usb.c deleted file mode 100644 index 27f23bd15eb6..000000000000 --- a/arch/powerpc/sysdev/qe_lib/usb.c +++ /dev/null | |||
@@ -1,56 +0,0 @@ | |||
1 | /* | ||
2 | * QE USB routines | ||
3 | * | ||
4 | * Copyright 2006 Freescale Semiconductor, Inc. | ||
5 | * Shlomi Gridish <gridish@freescale.com> | ||
6 | * Jerry Huang <Chang-Ming.Huang@freescale.com> | ||
7 | * Copyright (c) MontaVista Software, Inc. 2008. | ||
8 | * Anton Vorontsov <avorontsov@ru.mvista.com> | ||
9 | * | ||
10 | * This program is free software; you can redistribute it and/or modify it | ||
11 | * under the terms of the GNU General Public License as published by the | ||
12 | * Free Software Foundation; either version 2 of the License, or (at your | ||
13 | * option) any later version. | ||
14 | */ | ||
15 | |||
16 | #include <linux/kernel.h> | ||
17 | #include <linux/errno.h> | ||
18 | #include <linux/export.h> | ||
19 | #include <linux/io.h> | ||
20 | #include <asm/immap_qe.h> | ||
21 | #include <asm/qe.h> | ||
22 | |||
23 | int qe_usb_clock_set(enum qe_clock clk, int rate) | ||
24 | { | ||
25 | struct qe_mux __iomem *mux = &qe_immr->qmx; | ||
26 | unsigned long flags; | ||
27 | u32 val; | ||
28 | |||
29 | switch (clk) { | ||
30 | case QE_CLK3: val = QE_CMXGCR_USBCS_CLK3; break; | ||
31 | case QE_CLK5: val = QE_CMXGCR_USBCS_CLK5; break; | ||
32 | case QE_CLK7: val = QE_CMXGCR_USBCS_CLK7; break; | ||
33 | case QE_CLK9: val = QE_CMXGCR_USBCS_CLK9; break; | ||
34 | case QE_CLK13: val = QE_CMXGCR_USBCS_CLK13; break; | ||
35 | case QE_CLK17: val = QE_CMXGCR_USBCS_CLK17; break; | ||
36 | case QE_CLK19: val = QE_CMXGCR_USBCS_CLK19; break; | ||
37 | case QE_CLK21: val = QE_CMXGCR_USBCS_CLK21; break; | ||
38 | case QE_BRG9: val = QE_CMXGCR_USBCS_BRG9; break; | ||
39 | case QE_BRG10: val = QE_CMXGCR_USBCS_BRG10; break; | ||
40 | default: | ||
41 | pr_err("%s: requested unknown clock %d\n", __func__, clk); | ||
42 | return -EINVAL; | ||
43 | } | ||
44 | |||
45 | if (qe_clock_is_brg(clk)) | ||
46 | qe_setbrg(clk, rate, 1); | ||
47 | |||
48 | spin_lock_irqsave(&cmxgcr_lock, flags); | ||
49 | |||
50 | clrsetbits_be32(&mux->cmxgcr, QE_CMXGCR_USBCS, val); | ||
51 | |||
52 | spin_unlock_irqrestore(&cmxgcr_lock, flags); | ||
53 | |||
54 | return 0; | ||
55 | } | ||
56 | EXPORT_SYMBOL(qe_usb_clock_set); | ||
diff --git a/arch/powerpc/xmon/xmon.c b/arch/powerpc/xmon/xmon.c index 786bf01691c9..07a8508cb7fa 100644 --- a/arch/powerpc/xmon/xmon.c +++ b/arch/powerpc/xmon/xmon.c | |||
@@ -320,6 +320,7 @@ static inline void disable_surveillance(void) | |||
320 | #ifdef CONFIG_PPC_PSERIES | 320 | #ifdef CONFIG_PPC_PSERIES |
321 | /* Since this can't be a module, args should end up below 4GB. */ | 321 | /* Since this can't be a module, args should end up below 4GB. */ |
322 | static struct rtas_args args; | 322 | static struct rtas_args args; |
323 | int token; | ||
323 | 324 | ||
324 | /* | 325 | /* |
325 | * At this point we have got all the cpus we can into | 326 | * At this point we have got all the cpus we can into |
@@ -328,17 +329,12 @@ static inline void disable_surveillance(void) | |||
328 | * If we did try to take rtas.lock there would be a | 329 | * If we did try to take rtas.lock there would be a |
329 | * real possibility of deadlock. | 330 | * real possibility of deadlock. |
330 | */ | 331 | */ |
331 | args.token = rtas_token("set-indicator"); | 332 | token = rtas_token("set-indicator"); |
332 | if (args.token == RTAS_UNKNOWN_SERVICE) | 333 | if (token == RTAS_UNKNOWN_SERVICE) |
333 | return; | 334 | return; |
334 | args.token = cpu_to_be32(args.token); | 335 | |
335 | args.nargs = cpu_to_be32(3); | 336 | rtas_call_unlocked(&args, token, 3, 1, NULL, SURVEILLANCE_TOKEN, 0, 0); |
336 | args.nret = cpu_to_be32(1); | 337 | |
337 | args.rets = &args.args[3]; | ||
338 | args.args[0] = cpu_to_be32(SURVEILLANCE_TOKEN); | ||
339 | args.args[1] = 0; | ||
340 | args.args[2] = 0; | ||
341 | enter_rtas(__pa(&args)); | ||
342 | #endif /* CONFIG_PPC_PSERIES */ | 338 | #endif /* CONFIG_PPC_PSERIES */ |
343 | } | 339 | } |
344 | 340 | ||
@@ -1522,6 +1518,8 @@ static void excprint(struct pt_regs *fp) | |||
1522 | 1518 | ||
1523 | if (trap == 0x700) | 1519 | if (trap == 0x700) |
1524 | print_bug_trap(fp); | 1520 | print_bug_trap(fp); |
1521 | |||
1522 | printf(linux_banner); | ||
1525 | } | 1523 | } |
1526 | 1524 | ||
1527 | static void prregs(struct pt_regs *fp) | 1525 | static void prregs(struct pt_regs *fp) |