diff options
author | Michael Ellerman <mpe@ellerman.id.au> | 2016-12-15 23:05:38 -0500 |
---|---|---|
committer | Michael Ellerman <mpe@ellerman.id.au> | 2016-12-15 23:05:38 -0500 |
commit | c6f6634721c871bfab4235e1cbcad208d3063798 (patch) | |
tree | 9cc1d0307b9c5a3a84021419d5f80bea8bbfc49e | |
parent | ff45000fcb56b5b0f1a14a865d3541746d838a0a (diff) | |
parent | baae856ebdeeaefbadd4a02cdb54b7c2277ff4dd (diff) |
Merge branch 'next' of git://git.kernel.org/pub/scm/linux/kernel/git/scottwood/linux into next
Freescale updates from Scott:
"Highlights include 8xx hugepage support, qbman fixes/cleanup, device
tree updates, and some misc cleanup."
47 files changed, 1040 insertions, 580 deletions
diff --git a/Documentation/devicetree/bindings/i2c/trivial-devices.txt b/Documentation/devicetree/bindings/i2c/trivial-devices.txt index fbbad6446741..c65aff0edf56 100644 --- a/Documentation/devicetree/bindings/i2c/trivial-devices.txt +++ b/Documentation/devicetree/bindings/i2c/trivial-devices.txt | |||
@@ -158,4 +158,5 @@ ti,tsc2003 I2C Touch-Screen Controller | |||
158 | ti,tmp102 Low Power Digital Temperature Sensor with SMBUS/Two Wire Serial Interface | 158 | ti,tmp102 Low Power Digital Temperature Sensor with SMBUS/Two Wire Serial Interface |
159 | ti,tmp103 Low Power Digital Temperature Sensor with SMBUS/Two Wire Serial Interface | 159 | ti,tmp103 Low Power Digital Temperature Sensor with SMBUS/Two Wire Serial Interface |
160 | ti,tmp275 Digital Temperature Sensor | 160 | ti,tmp275 Digital Temperature Sensor |
161 | winbond,w83793 Winbond/Nuvoton H/W Monitor | ||
161 | winbond,wpct301 i2c trusted platform module (TPM) | 162 | winbond,wpct301 i2c trusted platform module (TPM) |
diff --git a/arch/powerpc/boot/dts/fsl/t1023rdb.dts b/arch/powerpc/boot/dts/fsl/t1023rdb.dts index 29757623e5ba..5ba6fbfca274 100644 --- a/arch/powerpc/boot/dts/fsl/t1023rdb.dts +++ b/arch/powerpc/boot/dts/fsl/t1023rdb.dts | |||
@@ -41,6 +41,27 @@ | |||
41 | #size-cells = <2>; | 41 | #size-cells = <2>; |
42 | interrupt-parent = <&mpic>; | 42 | interrupt-parent = <&mpic>; |
43 | 43 | ||
44 | reserved-memory { | ||
45 | #address-cells = <2>; | ||
46 | #size-cells = <2>; | ||
47 | ranges; | ||
48 | |||
49 | bman_fbpr: bman-fbpr { | ||
50 | size = <0 0x1000000>; | ||
51 | alignment = <0 0x1000000>; | ||
52 | }; | ||
53 | |||
54 | qman_fqd: qman-fqd { | ||
55 | size = <0 0x400000>; | ||
56 | alignment = <0 0x400000>; | ||
57 | }; | ||
58 | |||
59 | qman_pfdr: qman-pfdr { | ||
60 | size = <0 0x2000000>; | ||
61 | alignment = <0 0x2000000>; | ||
62 | }; | ||
63 | }; | ||
64 | |||
44 | ifc: localbus@ffe124000 { | 65 | ifc: localbus@ffe124000 { |
45 | reg = <0xf 0xfe124000 0 0x2000>; | 66 | reg = <0xf 0xfe124000 0 0x2000>; |
46 | ranges = <0 0 0xf 0xe8000000 0x08000000 | 67 | ranges = <0 0 0xf 0xe8000000 0x08000000 |
@@ -72,6 +93,14 @@ | |||
72 | ranges = <0x00000000 0xf 0x00000000 0x01072000>; | 93 | ranges = <0x00000000 0xf 0x00000000 0x01072000>; |
73 | }; | 94 | }; |
74 | 95 | ||
96 | bportals: bman-portals@ff4000000 { | ||
97 | ranges = <0x0 0xf 0xf4000000 0x2000000>; | ||
98 | }; | ||
99 | |||
100 | qportals: qman-portals@ff6000000 { | ||
101 | ranges = <0x0 0xf 0xf6000000 0x2000000>; | ||
102 | }; | ||
103 | |||
75 | soc: soc@ffe000000 { | 104 | soc: soc@ffe000000 { |
76 | ranges = <0x00000000 0xf 0xfe000000 0x1000000>; | 105 | ranges = <0x00000000 0xf 0xfe000000 0x1000000>; |
77 | reg = <0xf 0xfe000000 0 0x00001000>; | 106 | reg = <0xf 0xfe000000 0 0x00001000>; |
diff --git a/arch/powerpc/boot/dts/fsl/t1023si-post.dtsi b/arch/powerpc/boot/dts/fsl/t1023si-post.dtsi index 6e0b4892a740..da2894c59479 100644 --- a/arch/powerpc/boot/dts/fsl/t1023si-post.dtsi +++ b/arch/powerpc/boot/dts/fsl/t1023si-post.dtsi | |||
@@ -34,6 +34,21 @@ | |||
34 | 34 | ||
35 | #include <dt-bindings/thermal/thermal.h> | 35 | #include <dt-bindings/thermal/thermal.h> |
36 | 36 | ||
37 | &bman_fbpr { | ||
38 | compatible = "fsl,bman-fbpr"; | ||
39 | alloc-ranges = <0 0 0x10000 0>; | ||
40 | }; | ||
41 | |||
42 | &qman_fqd { | ||
43 | compatible = "fsl,qman-fqd"; | ||
44 | alloc-ranges = <0 0 0x10000 0>; | ||
45 | }; | ||
46 | |||
47 | &qman_pfdr { | ||
48 | compatible = "fsl,qman-pfdr"; | ||
49 | alloc-ranges = <0 0 0x10000 0>; | ||
50 | }; | ||
51 | |||
37 | &ifc { | 52 | &ifc { |
38 | #address-cells = <2>; | 53 | #address-cells = <2>; |
39 | #size-cells = <1>; | 54 | #size-cells = <1>; |
@@ -180,6 +195,92 @@ | |||
180 | }; | 195 | }; |
181 | }; | 196 | }; |
182 | 197 | ||
198 | &bportals { | ||
199 | #address-cells = <0x1>; | ||
200 | #size-cells = <0x1>; | ||
201 | compatible = "simple-bus"; | ||
202 | |||
203 | bman-portal@0 { | ||
204 | cell-index = <0x0>; | ||
205 | compatible = "fsl,bman-portal"; | ||
206 | reg = <0x0 0x4000>, <0x1000000 0x1000>; | ||
207 | interrupts = <105 2 0 0>; | ||
208 | }; | ||
209 | bman-portal@4000 { | ||
210 | cell-index = <0x1>; | ||
211 | compatible = "fsl,bman-portal"; | ||
212 | reg = <0x4000 0x4000>, <0x1001000 0x1000>; | ||
213 | interrupts = <107 2 0 0>; | ||
214 | }; | ||
215 | bman-portal@8000 { | ||
216 | cell-index = <2>; | ||
217 | compatible = "fsl,bman-portal"; | ||
218 | reg = <0x8000 0x4000>, <0x1002000 0x1000>; | ||
219 | interrupts = <109 2 0 0>; | ||
220 | }; | ||
221 | bman-portal@c000 { | ||
222 | cell-index = <0x3>; | ||
223 | compatible = "fsl,bman-portal"; | ||
224 | reg = <0xc000 0x4000>, <0x1003000 0x1000>; | ||
225 | interrupts = <111 2 0 0>; | ||
226 | }; | ||
227 | bman-portal@10000 { | ||
228 | cell-index = <0x4>; | ||
229 | compatible = "fsl,bman-portal"; | ||
230 | reg = <0x10000 0x4000>, <0x1004000 0x1000>; | ||
231 | interrupts = <113 2 0 0>; | ||
232 | }; | ||
233 | bman-portal@14000 { | ||
234 | cell-index = <0x5>; | ||
235 | compatible = "fsl,bman-portal"; | ||
236 | reg = <0x14000 0x4000>, <0x1005000 0x1000>; | ||
237 | interrupts = <115 2 0 0>; | ||
238 | }; | ||
239 | }; | ||
240 | |||
241 | &qportals { | ||
242 | #address-cells = <0x1>; | ||
243 | #size-cells = <0x1>; | ||
244 | compatible = "simple-bus"; | ||
245 | |||
246 | qportal0: qman-portal@0 { | ||
247 | compatible = "fsl,qman-portal"; | ||
248 | reg = <0x0 0x4000>, <0x1000000 0x1000>; | ||
249 | interrupts = <104 0x2 0 0>; | ||
250 | cell-index = <0x0>; | ||
251 | }; | ||
252 | qportal1: qman-portal@4000 { | ||
253 | compatible = "fsl,qman-portal"; | ||
254 | reg = <0x4000 0x4000>, <0x1001000 0x1000>; | ||
255 | interrupts = <106 0x2 0 0>; | ||
256 | cell-index = <0x1>; | ||
257 | }; | ||
258 | qportal2: qman-portal@8000 { | ||
259 | compatible = "fsl,qman-portal"; | ||
260 | reg = <0x8000 0x4000>, <0x1002000 0x1000>; | ||
261 | interrupts = <108 0x2 0 0>; | ||
262 | cell-index = <0x2>; | ||
263 | }; | ||
264 | qportal3: qman-portal@c000 { | ||
265 | compatible = "fsl,qman-portal"; | ||
266 | reg = <0xc000 0x4000>, <0x1003000 0x1000>; | ||
267 | interrupts = <110 0x2 0 0>; | ||
268 | cell-index = <0x3>; | ||
269 | }; | ||
270 | qportal4: qman-portal@10000 { | ||
271 | compatible = "fsl,qman-portal"; | ||
272 | reg = <0x10000 0x4000>, <0x1004000 0x1000>; | ||
273 | interrupts = <112 0x2 0 0>; | ||
274 | cell-index = <0x4>; | ||
275 | }; | ||
276 | qportal5: qman-portal@14000 { | ||
277 | compatible = "fsl,qman-portal"; | ||
278 | reg = <0x14000 0x4000>, <0x1005000 0x1000>; | ||
279 | interrupts = <114 0x2 0 0>; | ||
280 | cell-index = <0x5>; | ||
281 | }; | ||
282 | }; | ||
283 | |||
183 | &soc { | 284 | &soc { |
184 | #address-cells = <1>; | 285 | #address-cells = <1>; |
185 | #size-cells = <1>; | 286 | #size-cells = <1>; |
@@ -413,6 +514,8 @@ | |||
413 | }; | 514 | }; |
414 | 515 | ||
415 | /include/ "qoriq-sec5.0-0.dtsi" | 516 | /include/ "qoriq-sec5.0-0.dtsi" |
517 | /include/ "qoriq-qman3.dtsi" | ||
518 | /include/ "qoriq-bman1.dtsi" | ||
416 | 519 | ||
417 | /include/ "qoriq-fman3l-0.dtsi" | 520 | /include/ "qoriq-fman3l-0.dtsi" |
418 | /include/ "qoriq-fman3-0-10g-0-best-effort.dtsi" | 521 | /include/ "qoriq-fman3-0-10g-0-best-effort.dtsi" |
diff --git a/arch/powerpc/boot/dts/fsl/t1024qds.dts b/arch/powerpc/boot/dts/fsl/t1024qds.dts index 772143da367f..d6858b7cd93f 100644 --- a/arch/powerpc/boot/dts/fsl/t1024qds.dts +++ b/arch/powerpc/boot/dts/fsl/t1024qds.dts | |||
@@ -41,6 +41,27 @@ | |||
41 | #size-cells = <2>; | 41 | #size-cells = <2>; |
42 | interrupt-parent = <&mpic>; | 42 | interrupt-parent = <&mpic>; |
43 | 43 | ||
44 | reserved-memory { | ||
45 | #address-cells = <2>; | ||
46 | #size-cells = <2>; | ||
47 | ranges; | ||
48 | |||
49 | bman_fbpr: bman-fbpr { | ||
50 | size = <0 0x1000000>; | ||
51 | alignment = <0 0x1000000>; | ||
52 | }; | ||
53 | |||
54 | qman_fqd: qman-fqd { | ||
55 | size = <0 0x400000>; | ||
56 | alignment = <0 0x400000>; | ||
57 | }; | ||
58 | |||
59 | qman_pfdr: qman-pfdr { | ||
60 | size = <0 0x2000000>; | ||
61 | alignment = <0 0x2000000>; | ||
62 | }; | ||
63 | }; | ||
64 | |||
44 | ifc: localbus@ffe124000 { | 65 | ifc: localbus@ffe124000 { |
45 | reg = <0xf 0xfe124000 0 0x2000>; | 66 | reg = <0xf 0xfe124000 0 0x2000>; |
46 | ranges = <0 0 0xf 0xe8000000 0x08000000 | 67 | ranges = <0 0 0xf 0xe8000000 0x08000000 |
@@ -80,6 +101,14 @@ | |||
80 | ranges = <0x00000000 0xf 0x00000000 0x01072000>; | 101 | ranges = <0x00000000 0xf 0x00000000 0x01072000>; |
81 | }; | 102 | }; |
82 | 103 | ||
104 | bportals: bman-portals@ff4000000 { | ||
105 | ranges = <0x0 0xf 0xf4000000 0x2000000>; | ||
106 | }; | ||
107 | |||
108 | qportals: qman-portals@ff6000000 { | ||
109 | ranges = <0x0 0xf 0xf6000000 0x2000000>; | ||
110 | }; | ||
111 | |||
83 | soc: soc@ffe000000 { | 112 | soc: soc@ffe000000 { |
84 | ranges = <0x00000000 0xf 0xfe000000 0x1000000>; | 113 | ranges = <0x00000000 0xf 0xfe000000 0x1000000>; |
85 | reg = <0xf 0xfe000000 0 0x00001000>; | 114 | reg = <0xf 0xfe000000 0 0x00001000>; |
diff --git a/arch/powerpc/boot/dts/fsl/t1024rdb.dts b/arch/powerpc/boot/dts/fsl/t1024rdb.dts index 302cdd22b4bb..73a645324bc1 100644 --- a/arch/powerpc/boot/dts/fsl/t1024rdb.dts +++ b/arch/powerpc/boot/dts/fsl/t1024rdb.dts | |||
@@ -41,6 +41,31 @@ | |||
41 | #size-cells = <2>; | 41 | #size-cells = <2>; |
42 | interrupt-parent = <&mpic>; | 42 | interrupt-parent = <&mpic>; |
43 | 43 | ||
44 | aliases { | ||
45 | sg_2500_aqr105_phy4 = &sg_2500_aqr105_phy4; | ||
46 | }; | ||
47 | |||
48 | reserved-memory { | ||
49 | #address-cells = <2>; | ||
50 | #size-cells = <2>; | ||
51 | ranges; | ||
52 | |||
53 | bman_fbpr: bman-fbpr { | ||
54 | size = <0 0x1000000>; | ||
55 | alignment = <0 0x1000000>; | ||
56 | }; | ||
57 | |||
58 | qman_fqd: qman-fqd { | ||
59 | size = <0 0x400000>; | ||
60 | alignment = <0 0x400000>; | ||
61 | }; | ||
62 | |||
63 | qman_pfdr: qman-pfdr { | ||
64 | size = <0 0x2000000>; | ||
65 | alignment = <0 0x2000000>; | ||
66 | }; | ||
67 | }; | ||
68 | |||
44 | ifc: localbus@ffe124000 { | 69 | ifc: localbus@ffe124000 { |
45 | reg = <0xf 0xfe124000 0 0x2000>; | 70 | reg = <0xf 0xfe124000 0 0x2000>; |
46 | ranges = <0 0 0xf 0xe8000000 0x08000000 | 71 | ranges = <0 0 0xf 0xe8000000 0x08000000 |
@@ -82,6 +107,14 @@ | |||
82 | ranges = <0x00000000 0xf 0x00000000 0x01072000>; | 107 | ranges = <0x00000000 0xf 0x00000000 0x01072000>; |
83 | }; | 108 | }; |
84 | 109 | ||
110 | bportals: bman-portals@ff4000000 { | ||
111 | ranges = <0x0 0xf 0xf4000000 0x2000000>; | ||
112 | }; | ||
113 | |||
114 | qportals: qman-portals@ff6000000 { | ||
115 | ranges = <0x0 0xf 0xf6000000 0x2000000>; | ||
116 | }; | ||
117 | |||
85 | soc: soc@ffe000000 { | 118 | soc: soc@ffe000000 { |
86 | ranges = <0x00000000 0xf 0xfe000000 0x1000000>; | 119 | ranges = <0x00000000 0xf 0xfe000000 0x1000000>; |
87 | reg = <0xf 0xfe000000 0 0x00001000>; | 120 | reg = <0xf 0xfe000000 0 0x00001000>; |
diff --git a/arch/powerpc/boot/dts/fsl/t1042d4rdb.dts b/arch/powerpc/boot/dts/fsl/t1042d4rdb.dts index 2a5a90dd272e..fcd2aeb5b8ac 100644 --- a/arch/powerpc/boot/dts/fsl/t1042d4rdb.dts +++ b/arch/powerpc/boot/dts/fsl/t1042d4rdb.dts | |||
@@ -48,6 +48,58 @@ | |||
48 | "fsl,deepsleep-cpld"; | 48 | "fsl,deepsleep-cpld"; |
49 | }; | 49 | }; |
50 | }; | 50 | }; |
51 | |||
52 | soc: soc@ffe000000 { | ||
53 | fman0: fman@400000 { | ||
54 | ethernet@e0000 { | ||
55 | phy-handle = <&phy_sgmii_0>; | ||
56 | phy-connection-type = "sgmii"; | ||
57 | }; | ||
58 | |||
59 | ethernet@e2000 { | ||
60 | phy-handle = <&phy_sgmii_1>; | ||
61 | phy-connection-type = "sgmii"; | ||
62 | }; | ||
63 | |||
64 | ethernet@e4000 { | ||
65 | phy-handle = <&phy_sgmii_2>; | ||
66 | phy-connection-type = "sgmii"; | ||
67 | }; | ||
68 | |||
69 | ethernet@e6000 { | ||
70 | phy-handle = <&phy_rgmii_0>; | ||
71 | phy-connection-type = "rgmii"; | ||
72 | }; | ||
73 | |||
74 | ethernet@e8000 { | ||
75 | phy-handle = <&phy_rgmii_1>; | ||
76 | phy-connection-type = "rgmii"; | ||
77 | }; | ||
78 | |||
79 | mdio0: mdio@fc000 { | ||
80 | phy_sgmii_0: ethernet-phy@02 { | ||
81 | reg = <0x02>; | ||
82 | }; | ||
83 | |||
84 | phy_sgmii_1: ethernet-phy@03 { | ||
85 | reg = <0x03>; | ||
86 | }; | ||
87 | |||
88 | phy_sgmii_2: ethernet-phy@01 { | ||
89 | reg = <0x01>; | ||
90 | }; | ||
91 | |||
92 | phy_rgmii_0: ethernet-phy@04 { | ||
93 | reg = <0x04>; | ||
94 | }; | ||
95 | |||
96 | phy_rgmii_1: ethernet-phy@05 { | ||
97 | reg = <0x05>; | ||
98 | }; | ||
99 | }; | ||
100 | }; | ||
101 | }; | ||
102 | |||
51 | }; | 103 | }; |
52 | 104 | ||
53 | #include "t1042si-post.dtsi" | 105 | #include "t1042si-post.dtsi" |
diff --git a/arch/powerpc/boot/dts/fsl/t4240rdb.dts b/arch/powerpc/boot/dts/fsl/t4240rdb.dts index cc0a264b8acb..8166c660712a 100644 --- a/arch/powerpc/boot/dts/fsl/t4240rdb.dts +++ b/arch/powerpc/boot/dts/fsl/t4240rdb.dts | |||
@@ -125,6 +125,10 @@ | |||
125 | }; | 125 | }; |
126 | 126 | ||
127 | i2c@118000 { | 127 | i2c@118000 { |
128 | hwmon@2f { | ||
129 | compatible = "winbond,w83793"; | ||
130 | reg = <0x2f>; | ||
131 | }; | ||
128 | eeprom@52 { | 132 | eeprom@52 { |
129 | compatible = "at24,24c256"; | 133 | compatible = "at24,24c256"; |
130 | reg = <0x52>; | 134 | reg = <0x52>; |
diff --git a/arch/powerpc/configs/fsl-emb-nonhw.config b/arch/powerpc/configs/fsl-emb-nonhw.config index 1a61e81ab0cd..cc49c95494da 100644 --- a/arch/powerpc/configs/fsl-emb-nonhw.config +++ b/arch/powerpc/configs/fsl-emb-nonhw.config | |||
@@ -44,6 +44,7 @@ CONFIG_FORCE_MAX_ZONEORDER=13 | |||
44 | CONFIG_FRAMEBUFFER_CONSOLE=y | 44 | CONFIG_FRAMEBUFFER_CONSOLE=y |
45 | CONFIG_FRAME_WARN=1024 | 45 | CONFIG_FRAME_WARN=1024 |
46 | CONFIG_FTL=y | 46 | CONFIG_FTL=y |
47 | CONFIG_GPIO_GENERIC_PLATFORM=y | ||
47 | CONFIG_HFS_FS=m | 48 | CONFIG_HFS_FS=m |
48 | CONFIG_HFSPLUS_FS=m | 49 | CONFIG_HFSPLUS_FS=m |
49 | CONFIG_HIGH_RES_TIMERS=y | 50 | CONFIG_HIGH_RES_TIMERS=y |
@@ -104,8 +105,13 @@ CONFIG_PACKET=y | |||
104 | CONFIG_PARTITION_ADVANCED=y | 105 | CONFIG_PARTITION_ADVANCED=y |
105 | CONFIG_PERF_EVENTS=y | 106 | CONFIG_PERF_EVENTS=y |
106 | CONFIG_POSIX_MQUEUE=y | 107 | CONFIG_POSIX_MQUEUE=y |
108 | CONFIG_POWER_SUPPLY=y | ||
109 | CONFIG_POWER_RESET=y | ||
110 | CONFIG_POWER_RESET_GPIO=y | ||
111 | CONFIG_POWER_RESET_GPIO_RESTART=y | ||
107 | CONFIG_QNX4FS_FS=m | 112 | CONFIG_QNX4FS_FS=m |
108 | CONFIG_RCU_TRACE=y | 113 | CONFIG_RCU_TRACE=y |
114 | CONFIG_RESET_CONTROLLER=y | ||
109 | CONFIG_ROOT_NFS=y | 115 | CONFIG_ROOT_NFS=y |
110 | CONFIG_SYSV_FS=m | 116 | CONFIG_SYSV_FS=m |
111 | CONFIG_SYSVIPC=y | 117 | CONFIG_SYSVIPC=y |
diff --git a/arch/powerpc/include/asm/book3s/32/pgalloc.h b/arch/powerpc/include/asm/book3s/32/pgalloc.h index 8e21bb492dca..d310546e5d9d 100644 --- a/arch/powerpc/include/asm/book3s/32/pgalloc.h +++ b/arch/powerpc/include/asm/book3s/32/pgalloc.h | |||
@@ -2,14 +2,42 @@ | |||
2 | #define _ASM_POWERPC_BOOK3S_32_PGALLOC_H | 2 | #define _ASM_POWERPC_BOOK3S_32_PGALLOC_H |
3 | 3 | ||
4 | #include <linux/threads.h> | 4 | #include <linux/threads.h> |
5 | #include <linux/slab.h> | ||
5 | 6 | ||
6 | /* For 32-bit, all levels of page tables are just drawn from get_free_page() */ | 7 | /* |
7 | #define MAX_PGTABLE_INDEX_SIZE 0 | 8 | * Functions that deal with pagetables that could be at any level of |
9 | * the table need to be passed an "index_size" so they know how to | ||
10 | * handle allocation. For PTE pages (which are linked to a struct | ||
11 | * page for now, and drawn from the main get_free_pages() pool), the | ||
12 | * allocation size will be (2^index_size * sizeof(pointer)) and | ||
13 | * allocations are drawn from the kmem_cache in PGT_CACHE(index_size). | ||
14 | * | ||
15 | * The maximum index size needs to be big enough to allow any | ||
16 | * pagetable sizes we need, but small enough to fit in the low bits of | ||
17 | * any page table pointer. In other words all pagetables, even tiny | ||
18 | * ones, must be aligned to allow at least enough low 0 bits to | ||
19 | * contain this value. This value is also used as a mask, so it must | ||
20 | * be one less than a power of two. | ||
21 | */ | ||
22 | #define MAX_PGTABLE_INDEX_SIZE 0xf | ||
8 | 23 | ||
9 | extern void __bad_pte(pmd_t *pmd); | 24 | extern void __bad_pte(pmd_t *pmd); |
10 | 25 | ||
11 | extern pgd_t *pgd_alloc(struct mm_struct *mm); | 26 | extern struct kmem_cache *pgtable_cache[]; |
12 | extern void pgd_free(struct mm_struct *mm, pgd_t *pgd); | 27 | #define PGT_CACHE(shift) ({ \ |
28 | BUG_ON(!(shift)); \ | ||
29 | pgtable_cache[(shift) - 1]; \ | ||
30 | }) | ||
31 | |||
32 | static inline pgd_t *pgd_alloc(struct mm_struct *mm) | ||
33 | { | ||
34 | return kmem_cache_alloc(PGT_CACHE(PGD_INDEX_SIZE), GFP_KERNEL); | ||
35 | } | ||
36 | |||
37 | static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd) | ||
38 | { | ||
39 | kmem_cache_free(PGT_CACHE(PGD_INDEX_SIZE), pgd); | ||
40 | } | ||
13 | 41 | ||
14 | /* | 42 | /* |
15 | * We don't have any real pmd's, and this code never triggers because | 43 | * We don't have any real pmd's, and this code never triggers because |
@@ -68,8 +96,12 @@ static inline void pte_free(struct mm_struct *mm, pgtable_t ptepage) | |||
68 | 96 | ||
69 | static inline void pgtable_free(void *table, unsigned index_size) | 97 | static inline void pgtable_free(void *table, unsigned index_size) |
70 | { | 98 | { |
71 | BUG_ON(index_size); /* 32-bit doesn't use this */ | 99 | if (!index_size) { |
72 | free_page((unsigned long)table); | 100 | free_page((unsigned long)table); |
101 | } else { | ||
102 | BUG_ON(index_size > MAX_PGTABLE_INDEX_SIZE); | ||
103 | kmem_cache_free(PGT_CACHE(index_size), table); | ||
104 | } | ||
73 | } | 105 | } |
74 | 106 | ||
75 | #define check_pgt_cache() do { } while (0) | 107 | #define check_pgt_cache() do { } while (0) |
diff --git a/arch/powerpc/include/asm/book3s/32/pgtable.h b/arch/powerpc/include/asm/book3s/32/pgtable.h index dc58980f3ad9..012223638815 100644 --- a/arch/powerpc/include/asm/book3s/32/pgtable.h +++ b/arch/powerpc/include/asm/book3s/32/pgtable.h | |||
@@ -8,6 +8,23 @@ | |||
8 | /* And here we include common definitions */ | 8 | /* And here we include common definitions */ |
9 | #include <asm/pte-common.h> | 9 | #include <asm/pte-common.h> |
10 | 10 | ||
11 | #define PTE_INDEX_SIZE PTE_SHIFT | ||
12 | #define PMD_INDEX_SIZE 0 | ||
13 | #define PUD_INDEX_SIZE 0 | ||
14 | #define PGD_INDEX_SIZE (32 - PGDIR_SHIFT) | ||
15 | |||
16 | #define PMD_CACHE_INDEX PMD_INDEX_SIZE | ||
17 | |||
18 | #ifndef __ASSEMBLY__ | ||
19 | #define PTE_TABLE_SIZE (sizeof(pte_t) << PTE_INDEX_SIZE) | ||
20 | #define PMD_TABLE_SIZE 0 | ||
21 | #define PUD_TABLE_SIZE 0 | ||
22 | #define PGD_TABLE_SIZE (sizeof(pgd_t) << PGD_INDEX_SIZE) | ||
23 | #endif /* __ASSEMBLY__ */ | ||
24 | |||
25 | #define PTRS_PER_PTE (1 << PTE_INDEX_SIZE) | ||
26 | #define PTRS_PER_PGD (1 << PGD_INDEX_SIZE) | ||
27 | |||
11 | /* | 28 | /* |
12 | * The normal case is that PTEs are 32-bits and we have a 1-page | 29 | * The normal case is that PTEs are 32-bits and we have a 1-page |
13 | * 1024-entry pgdir pointing to 1-page 1024-entry PTE pages. -- paulus | 30 | * 1024-entry pgdir pointing to 1-page 1024-entry PTE pages. -- paulus |
@@ -19,14 +36,10 @@ | |||
19 | * -Matt | 36 | * -Matt |
20 | */ | 37 | */ |
21 | /* PGDIR_SHIFT determines what a top-level page table entry can map */ | 38 | /* PGDIR_SHIFT determines what a top-level page table entry can map */ |
22 | #define PGDIR_SHIFT (PAGE_SHIFT + PTE_SHIFT) | 39 | #define PGDIR_SHIFT (PAGE_SHIFT + PTE_INDEX_SIZE) |
23 | #define PGDIR_SIZE (1UL << PGDIR_SHIFT) | 40 | #define PGDIR_SIZE (1UL << PGDIR_SHIFT) |
24 | #define PGDIR_MASK (~(PGDIR_SIZE-1)) | 41 | #define PGDIR_MASK (~(PGDIR_SIZE-1)) |
25 | 42 | ||
26 | #define PTRS_PER_PTE (1 << PTE_SHIFT) | ||
27 | #define PTRS_PER_PMD 1 | ||
28 | #define PTRS_PER_PGD (1 << (32 - PGDIR_SHIFT)) | ||
29 | |||
30 | #define USER_PTRS_PER_PGD (TASK_SIZE / PGDIR_SIZE) | 43 | #define USER_PTRS_PER_PGD (TASK_SIZE / PGDIR_SIZE) |
31 | /* | 44 | /* |
32 | * This is the bottom of the PKMAP area with HIGHMEM or an arbitrary | 45 | * This is the bottom of the PKMAP area with HIGHMEM or an arbitrary |
@@ -82,12 +95,8 @@ | |||
82 | 95 | ||
83 | extern unsigned long ioremap_bot; | 96 | extern unsigned long ioremap_bot; |
84 | 97 | ||
85 | /* | 98 | /* Bits to mask out from a PGD to get to the PUD page */ |
86 | * entries per page directory level: our page-table tree is two-level, so | 99 | #define PGD_MASKED_BITS 0 |
87 | * we don't really have any PMD directory. | ||
88 | */ | ||
89 | #define PTE_TABLE_SIZE (sizeof(pte_t) << PTE_SHIFT) | ||
90 | #define PGD_TABLE_SIZE (sizeof(pgd_t) << (32 - PGDIR_SHIFT)) | ||
91 | 100 | ||
92 | #define pte_ERROR(e) \ | 101 | #define pte_ERROR(e) \ |
93 | pr_err("%s:%d: bad pte %llx.\n", __FILE__, __LINE__, \ | 102 | pr_err("%s:%d: bad pte %llx.\n", __FILE__, __LINE__, \ |
@@ -284,15 +293,6 @@ static inline void __ptep_set_access_flags(struct mm_struct *mm, | |||
284 | #define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) >> 3 }) | 293 | #define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) >> 3 }) |
285 | #define __swp_entry_to_pte(x) ((pte_t) { (x).val << 3 }) | 294 | #define __swp_entry_to_pte(x) ((pte_t) { (x).val << 3 }) |
286 | 295 | ||
287 | #ifndef CONFIG_PPC_4K_PAGES | ||
288 | void pgtable_cache_init(void); | ||
289 | #else | ||
290 | /* | ||
291 | * No page table caches to initialise | ||
292 | */ | ||
293 | #define pgtable_cache_init() do { } while (0) | ||
294 | #endif | ||
295 | |||
296 | extern int get_pteptr(struct mm_struct *mm, unsigned long addr, pte_t **ptep, | 296 | extern int get_pteptr(struct mm_struct *mm, unsigned long addr, pte_t **ptep, |
297 | pmd_t **pmdp); | 297 | pmd_t **pmdp); |
298 | 298 | ||
diff --git a/arch/powerpc/include/asm/book3s/64/pgtable.h b/arch/powerpc/include/asm/book3s/64/pgtable.h index bbea0040320a..6cfc5dbdae03 100644 --- a/arch/powerpc/include/asm/book3s/64/pgtable.h +++ b/arch/powerpc/include/asm/book3s/64/pgtable.h | |||
@@ -800,9 +800,6 @@ extern struct page *pgd_page(pgd_t pgd); | |||
800 | #define pgd_ERROR(e) \ | 800 | #define pgd_ERROR(e) \ |
801 | pr_err("%s:%d: bad pgd %08lx.\n", __FILE__, __LINE__, pgd_val(e)) | 801 | pr_err("%s:%d: bad pgd %08lx.\n", __FILE__, __LINE__, pgd_val(e)) |
802 | 802 | ||
803 | void pgtable_cache_add(unsigned shift, void (*ctor)(void *)); | ||
804 | void pgtable_cache_init(void); | ||
805 | |||
806 | static inline int map_kernel_page(unsigned long ea, unsigned long pa, | 803 | static inline int map_kernel_page(unsigned long ea, unsigned long pa, |
807 | unsigned long flags) | 804 | unsigned long flags) |
808 | { | 805 | { |
diff --git a/arch/powerpc/include/asm/hugetlb.h b/arch/powerpc/include/asm/hugetlb.h index c03e0a3dd4d8..ede215167d1a 100644 --- a/arch/powerpc/include/asm/hugetlb.h +++ b/arch/powerpc/include/asm/hugetlb.h | |||
@@ -51,12 +51,20 @@ static inline void __local_flush_hugetlb_page(struct vm_area_struct *vma, | |||
51 | static inline pte_t *hugepd_page(hugepd_t hpd) | 51 | static inline pte_t *hugepd_page(hugepd_t hpd) |
52 | { | 52 | { |
53 | BUG_ON(!hugepd_ok(hpd)); | 53 | BUG_ON(!hugepd_ok(hpd)); |
54 | #ifdef CONFIG_PPC_8xx | ||
55 | return (pte_t *)__va(hpd.pd & ~(_PMD_PAGE_MASK | _PMD_PRESENT_MASK)); | ||
56 | #else | ||
54 | return (pte_t *)((hpd.pd & ~HUGEPD_SHIFT_MASK) | PD_HUGE); | 57 | return (pte_t *)((hpd.pd & ~HUGEPD_SHIFT_MASK) | PD_HUGE); |
58 | #endif | ||
55 | } | 59 | } |
56 | 60 | ||
57 | static inline unsigned int hugepd_shift(hugepd_t hpd) | 61 | static inline unsigned int hugepd_shift(hugepd_t hpd) |
58 | { | 62 | { |
63 | #ifdef CONFIG_PPC_8xx | ||
64 | return ((hpd.pd & _PMD_PAGE_MASK) >> 1) + 17; | ||
65 | #else | ||
59 | return hpd.pd & HUGEPD_SHIFT_MASK; | 66 | return hpd.pd & HUGEPD_SHIFT_MASK; |
67 | #endif | ||
60 | } | 68 | } |
61 | 69 | ||
62 | #endif /* CONFIG_PPC_BOOK3S_64 */ | 70 | #endif /* CONFIG_PPC_BOOK3S_64 */ |
@@ -99,7 +107,15 @@ static inline int is_hugepage_only_range(struct mm_struct *mm, | |||
99 | 107 | ||
100 | void book3e_hugetlb_preload(struct vm_area_struct *vma, unsigned long ea, | 108 | void book3e_hugetlb_preload(struct vm_area_struct *vma, unsigned long ea, |
101 | pte_t pte); | 109 | pte_t pte); |
110 | #ifdef CONFIG_PPC_8xx | ||
111 | static inline void flush_hugetlb_page(struct vm_area_struct *vma, | ||
112 | unsigned long vmaddr) | ||
113 | { | ||
114 | flush_tlb_page(vma, vmaddr); | ||
115 | } | ||
116 | #else | ||
102 | void flush_hugetlb_page(struct vm_area_struct *vma, unsigned long vmaddr); | 117 | void flush_hugetlb_page(struct vm_area_struct *vma, unsigned long vmaddr); |
118 | #endif | ||
103 | 119 | ||
104 | void hugetlb_free_pgd_range(struct mmu_gather *tlb, unsigned long addr, | 120 | void hugetlb_free_pgd_range(struct mmu_gather *tlb, unsigned long addr, |
105 | unsigned long end, unsigned long floor, | 121 | unsigned long end, unsigned long floor, |
@@ -205,7 +221,8 @@ static inline pte_t *hugepte_offset(hugepd_t hpd, unsigned long addr, | |||
205 | * are reserved early in the boot process by memblock instead of via | 221 | * are reserved early in the boot process by memblock instead of via |
206 | * the .dts as on IBM platforms. | 222 | * the .dts as on IBM platforms. |
207 | */ | 223 | */ |
208 | #if defined(CONFIG_HUGETLB_PAGE) && defined(CONFIG_PPC_FSL_BOOK3E) | 224 | #if defined(CONFIG_HUGETLB_PAGE) && (defined(CONFIG_PPC_FSL_BOOK3E) || \ |
225 | defined(CONFIG_PPC_8xx)) | ||
209 | extern void __init reserve_hugetlb_gpages(void); | 226 | extern void __init reserve_hugetlb_gpages(void); |
210 | #else | 227 | #else |
211 | static inline void reserve_hugetlb_gpages(void) | 228 | static inline void reserve_hugetlb_gpages(void) |
diff --git a/arch/powerpc/include/asm/mmu-8xx.h b/arch/powerpc/include/asm/mmu-8xx.h index 3e0e4927811c..798b5bf91427 100644 --- a/arch/powerpc/include/asm/mmu-8xx.h +++ b/arch/powerpc/include/asm/mmu-8xx.h | |||
@@ -172,6 +172,41 @@ typedef struct { | |||
172 | 172 | ||
173 | #define PHYS_IMMR_BASE (mfspr(SPRN_IMMR) & 0xfff80000) | 173 | #define PHYS_IMMR_BASE (mfspr(SPRN_IMMR) & 0xfff80000) |
174 | #define VIRT_IMMR_BASE (__fix_to_virt(FIX_IMMR_BASE)) | 174 | #define VIRT_IMMR_BASE (__fix_to_virt(FIX_IMMR_BASE)) |
175 | |||
176 | /* Page size definitions, common between 32 and 64-bit | ||
177 | * | ||
178 | * shift : is the "PAGE_SHIFT" value for that page size | ||
179 | * penc : is the pte encoding mask | ||
180 | * | ||
181 | */ | ||
182 | struct mmu_psize_def { | ||
183 | unsigned int shift; /* number of bits */ | ||
184 | unsigned int enc; /* PTE encoding */ | ||
185 | unsigned int ind; /* Corresponding indirect page size shift */ | ||
186 | unsigned int flags; | ||
187 | #define MMU_PAGE_SIZE_DIRECT 0x1 /* Supported as a direct size */ | ||
188 | #define MMU_PAGE_SIZE_INDIRECT 0x2 /* Supported as an indirect size */ | ||
189 | }; | ||
190 | |||
191 | extern struct mmu_psize_def mmu_psize_defs[MMU_PAGE_COUNT]; | ||
192 | |||
193 | static inline int shift_to_mmu_psize(unsigned int shift) | ||
194 | { | ||
195 | int psize; | ||
196 | |||
197 | for (psize = 0; psize < MMU_PAGE_COUNT; ++psize) | ||
198 | if (mmu_psize_defs[psize].shift == shift) | ||
199 | return psize; | ||
200 | return -1; | ||
201 | } | ||
202 | |||
203 | static inline unsigned int mmu_psize_to_shift(unsigned int mmu_psize) | ||
204 | { | ||
205 | if (mmu_psize_defs[mmu_psize].shift) | ||
206 | return mmu_psize_defs[mmu_psize].shift; | ||
207 | BUG(); | ||
208 | } | ||
209 | |||
175 | #endif /* !__ASSEMBLY__ */ | 210 | #endif /* !__ASSEMBLY__ */ |
176 | 211 | ||
177 | #if defined(CONFIG_PPC_4K_PAGES) | 212 | #if defined(CONFIG_PPC_4K_PAGES) |
diff --git a/arch/powerpc/include/asm/mmu.h b/arch/powerpc/include/asm/mmu.h index 060b40b1bc3d..09304d2bec03 100644 --- a/arch/powerpc/include/asm/mmu.h +++ b/arch/powerpc/include/asm/mmu.h | |||
@@ -269,19 +269,20 @@ static inline bool early_radix_enabled(void) | |||
269 | #define MMU_PAGE_64K 2 | 269 | #define MMU_PAGE_64K 2 |
270 | #define MMU_PAGE_64K_AP 3 /* "Admixed pages" (hash64 only) */ | 270 | #define MMU_PAGE_64K_AP 3 /* "Admixed pages" (hash64 only) */ |
271 | #define MMU_PAGE_256K 4 | 271 | #define MMU_PAGE_256K 4 |
272 | #define MMU_PAGE_1M 5 | 272 | #define MMU_PAGE_512K 5 |
273 | #define MMU_PAGE_2M 6 | 273 | #define MMU_PAGE_1M 6 |
274 | #define MMU_PAGE_4M 7 | 274 | #define MMU_PAGE_2M 7 |
275 | #define MMU_PAGE_8M 8 | 275 | #define MMU_PAGE_4M 8 |
276 | #define MMU_PAGE_16M 9 | 276 | #define MMU_PAGE_8M 9 |
277 | #define MMU_PAGE_64M 10 | 277 | #define MMU_PAGE_16M 10 |
278 | #define MMU_PAGE_256M 11 | 278 | #define MMU_PAGE_64M 11 |
279 | #define MMU_PAGE_1G 12 | 279 | #define MMU_PAGE_256M 12 |
280 | #define MMU_PAGE_16G 13 | 280 | #define MMU_PAGE_1G 13 |
281 | #define MMU_PAGE_64G 14 | 281 | #define MMU_PAGE_16G 14 |
282 | #define MMU_PAGE_64G 15 | ||
282 | 283 | ||
283 | /* N.B. we need to change the type of hpte_page_sizes if this gets to be > 16 */ | 284 | /* N.B. we need to change the type of hpte_page_sizes if this gets to be > 16 */ |
284 | #define MMU_PAGE_COUNT 15 | 285 | #define MMU_PAGE_COUNT 16 |
285 | 286 | ||
286 | #ifdef CONFIG_PPC_BOOK3S_64 | 287 | #ifdef CONFIG_PPC_BOOK3S_64 |
287 | #include <asm/book3s/64/mmu.h> | 288 | #include <asm/book3s/64/mmu.h> |
diff --git a/arch/powerpc/include/asm/nohash/32/pgalloc.h b/arch/powerpc/include/asm/nohash/32/pgalloc.h index 76d6b9e0c8a9..633139291a48 100644 --- a/arch/powerpc/include/asm/nohash/32/pgalloc.h +++ b/arch/powerpc/include/asm/nohash/32/pgalloc.h | |||
@@ -2,14 +2,42 @@ | |||
2 | #define _ASM_POWERPC_PGALLOC_32_H | 2 | #define _ASM_POWERPC_PGALLOC_32_H |
3 | 3 | ||
4 | #include <linux/threads.h> | 4 | #include <linux/threads.h> |
5 | #include <linux/slab.h> | ||
5 | 6 | ||
6 | /* For 32-bit, all levels of page tables are just drawn from get_free_page() */ | 7 | /* |
7 | #define MAX_PGTABLE_INDEX_SIZE 0 | 8 | * Functions that deal with pagetables that could be at any level of |
9 | * the table need to be passed an "index_size" so they know how to | ||
10 | * handle allocation. For PTE pages (which are linked to a struct | ||
11 | * page for now, and drawn from the main get_free_pages() pool), the | ||
12 | * allocation size will be (2^index_size * sizeof(pointer)) and | ||
13 | * allocations are drawn from the kmem_cache in PGT_CACHE(index_size). | ||
14 | * | ||
15 | * The maximum index size needs to be big enough to allow any | ||
16 | * pagetable sizes we need, but small enough to fit in the low bits of | ||
17 | * any page table pointer. In other words all pagetables, even tiny | ||
18 | * ones, must be aligned to allow at least enough low 0 bits to | ||
19 | * contain this value. This value is also used as a mask, so it must | ||
20 | * be one less than a power of two. | ||
21 | */ | ||
22 | #define MAX_PGTABLE_INDEX_SIZE 0xf | ||
8 | 23 | ||
9 | extern void __bad_pte(pmd_t *pmd); | 24 | extern void __bad_pte(pmd_t *pmd); |
10 | 25 | ||
11 | extern pgd_t *pgd_alloc(struct mm_struct *mm); | 26 | extern struct kmem_cache *pgtable_cache[]; |
12 | extern void pgd_free(struct mm_struct *mm, pgd_t *pgd); | 27 | #define PGT_CACHE(shift) ({ \ |
28 | BUG_ON(!(shift)); \ | ||
29 | pgtable_cache[(shift) - 1]; \ | ||
30 | }) | ||
31 | |||
32 | static inline pgd_t *pgd_alloc(struct mm_struct *mm) | ||
33 | { | ||
34 | return kmem_cache_alloc(PGT_CACHE(PGD_INDEX_SIZE), GFP_KERNEL); | ||
35 | } | ||
36 | |||
37 | static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd) | ||
38 | { | ||
39 | kmem_cache_free(PGT_CACHE(PGD_INDEX_SIZE), pgd); | ||
40 | } | ||
13 | 41 | ||
14 | /* | 42 | /* |
15 | * We don't have any real pmd's, and this code never triggers because | 43 | * We don't have any real pmd's, and this code never triggers because |
@@ -68,8 +96,12 @@ static inline void pte_free(struct mm_struct *mm, pgtable_t ptepage) | |||
68 | 96 | ||
69 | static inline void pgtable_free(void *table, unsigned index_size) | 97 | static inline void pgtable_free(void *table, unsigned index_size) |
70 | { | 98 | { |
71 | BUG_ON(index_size); /* 32-bit doesn't use this */ | 99 | if (!index_size) { |
72 | free_page((unsigned long)table); | 100 | free_page((unsigned long)table); |
101 | } else { | ||
102 | BUG_ON(index_size > MAX_PGTABLE_INDEX_SIZE); | ||
103 | kmem_cache_free(PGT_CACHE(index_size), table); | ||
104 | } | ||
73 | } | 105 | } |
74 | 106 | ||
75 | #define check_pgt_cache() do { } while (0) | 107 | #define check_pgt_cache() do { } while (0) |
diff --git a/arch/powerpc/include/asm/nohash/32/pgtable.h b/arch/powerpc/include/asm/nohash/32/pgtable.h index 65073fbc6707..ba9921bf202e 100644 --- a/arch/powerpc/include/asm/nohash/32/pgtable.h +++ b/arch/powerpc/include/asm/nohash/32/pgtable.h | |||
@@ -16,6 +16,23 @@ extern int icache_44x_need_flush; | |||
16 | 16 | ||
17 | #endif /* __ASSEMBLY__ */ | 17 | #endif /* __ASSEMBLY__ */ |
18 | 18 | ||
19 | #define PTE_INDEX_SIZE PTE_SHIFT | ||
20 | #define PMD_INDEX_SIZE 0 | ||
21 | #define PUD_INDEX_SIZE 0 | ||
22 | #define PGD_INDEX_SIZE (32 - PGDIR_SHIFT) | ||
23 | |||
24 | #define PMD_CACHE_INDEX PMD_INDEX_SIZE | ||
25 | |||
26 | #ifndef __ASSEMBLY__ | ||
27 | #define PTE_TABLE_SIZE (sizeof(pte_t) << PTE_INDEX_SIZE) | ||
28 | #define PMD_TABLE_SIZE 0 | ||
29 | #define PUD_TABLE_SIZE 0 | ||
30 | #define PGD_TABLE_SIZE (sizeof(pgd_t) << PGD_INDEX_SIZE) | ||
31 | #endif /* __ASSEMBLY__ */ | ||
32 | |||
33 | #define PTRS_PER_PTE (1 << PTE_INDEX_SIZE) | ||
34 | #define PTRS_PER_PGD (1 << PGD_INDEX_SIZE) | ||
35 | |||
19 | /* | 36 | /* |
20 | * The normal case is that PTEs are 32-bits and we have a 1-page | 37 | * The normal case is that PTEs are 32-bits and we have a 1-page |
21 | * 1024-entry pgdir pointing to 1-page 1024-entry PTE pages. -- paulus | 38 | * 1024-entry pgdir pointing to 1-page 1024-entry PTE pages. -- paulus |
@@ -27,22 +44,12 @@ extern int icache_44x_need_flush; | |||
27 | * -Matt | 44 | * -Matt |
28 | */ | 45 | */ |
29 | /* PGDIR_SHIFT determines what a top-level page table entry can map */ | 46 | /* PGDIR_SHIFT determines what a top-level page table entry can map */ |
30 | #define PGDIR_SHIFT (PAGE_SHIFT + PTE_SHIFT) | 47 | #define PGDIR_SHIFT (PAGE_SHIFT + PTE_INDEX_SIZE) |
31 | #define PGDIR_SIZE (1UL << PGDIR_SHIFT) | 48 | #define PGDIR_SIZE (1UL << PGDIR_SHIFT) |
32 | #define PGDIR_MASK (~(PGDIR_SIZE-1)) | 49 | #define PGDIR_MASK (~(PGDIR_SIZE-1)) |
33 | 50 | ||
34 | /* | 51 | /* Bits to mask out from a PGD to get to the PUD page */ |
35 | * entries per page directory level: our page-table tree is two-level, so | 52 | #define PGD_MASKED_BITS 0 |
36 | * we don't really have any PMD directory. | ||
37 | */ | ||
38 | #ifndef __ASSEMBLY__ | ||
39 | #define PTE_TABLE_SIZE (sizeof(pte_t) << PTE_SHIFT) | ||
40 | #define PGD_TABLE_SIZE (sizeof(pgd_t) << (32 - PGDIR_SHIFT)) | ||
41 | #endif /* __ASSEMBLY__ */ | ||
42 | |||
43 | #define PTRS_PER_PTE (1 << PTE_SHIFT) | ||
44 | #define PTRS_PER_PMD 1 | ||
45 | #define PTRS_PER_PGD (1 << (32 - PGDIR_SHIFT)) | ||
46 | 53 | ||
47 | #define USER_PTRS_PER_PGD (TASK_SIZE / PGDIR_SIZE) | 54 | #define USER_PTRS_PER_PGD (TASK_SIZE / PGDIR_SIZE) |
48 | #define FIRST_USER_ADDRESS 0UL | 55 | #define FIRST_USER_ADDRESS 0UL |
@@ -329,15 +336,6 @@ static inline void __ptep_set_access_flags(struct mm_struct *mm, | |||
329 | #define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) >> 3 }) | 336 | #define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) >> 3 }) |
330 | #define __swp_entry_to_pte(x) ((pte_t) { (x).val << 3 }) | 337 | #define __swp_entry_to_pte(x) ((pte_t) { (x).val << 3 }) |
331 | 338 | ||
332 | #ifndef CONFIG_PPC_4K_PAGES | ||
333 | void pgtable_cache_init(void); | ||
334 | #else | ||
335 | /* | ||
336 | * No page table caches to initialise | ||
337 | */ | ||
338 | #define pgtable_cache_init() do { } while (0) | ||
339 | #endif | ||
340 | |||
341 | extern int get_pteptr(struct mm_struct *mm, unsigned long addr, pte_t **ptep, | 339 | extern int get_pteptr(struct mm_struct *mm, unsigned long addr, pte_t **ptep, |
342 | pmd_t **pmdp); | 340 | pmd_t **pmdp); |
343 | 341 | ||
diff --git a/arch/powerpc/include/asm/nohash/32/pte-8xx.h b/arch/powerpc/include/asm/nohash/32/pte-8xx.h index 3742b1919661..b4df2734c078 100644 --- a/arch/powerpc/include/asm/nohash/32/pte-8xx.h +++ b/arch/powerpc/include/asm/nohash/32/pte-8xx.h | |||
@@ -49,6 +49,7 @@ | |||
49 | #define _PMD_BAD 0x0ff0 | 49 | #define _PMD_BAD 0x0ff0 |
50 | #define _PMD_PAGE_MASK 0x000c | 50 | #define _PMD_PAGE_MASK 0x000c |
51 | #define _PMD_PAGE_8M 0x000c | 51 | #define _PMD_PAGE_8M 0x000c |
52 | #define _PMD_PAGE_512K 0x0004 | ||
52 | 53 | ||
53 | /* Until my rework is finished, 8xx still needs atomic PTE updates */ | 54 | /* Until my rework is finished, 8xx still needs atomic PTE updates */ |
54 | #define PTE_ATOMIC_UPDATES 1 | 55 | #define PTE_ATOMIC_UPDATES 1 |
diff --git a/arch/powerpc/include/asm/nohash/64/pgtable.h b/arch/powerpc/include/asm/nohash/64/pgtable.h index 53a41b06a7b9..c7f927e67d14 100644 --- a/arch/powerpc/include/asm/nohash/64/pgtable.h +++ b/arch/powerpc/include/asm/nohash/64/pgtable.h | |||
@@ -347,8 +347,6 @@ static inline void __ptep_set_access_flags(struct mm_struct *mm, | |||
347 | #define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val((pte)) }) | 347 | #define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val((pte)) }) |
348 | #define __swp_entry_to_pte(x) __pte((x).val) | 348 | #define __swp_entry_to_pte(x) __pte((x).val) |
349 | 349 | ||
350 | void pgtable_cache_add(unsigned shift, void (*ctor)(void *)); | ||
351 | void pgtable_cache_init(void); | ||
352 | extern int map_kernel_page(unsigned long ea, unsigned long pa, | 350 | extern int map_kernel_page(unsigned long ea, unsigned long pa, |
353 | unsigned long flags); | 351 | unsigned long flags); |
354 | extern int __meminit vmemmap_create_mapping(unsigned long start, | 352 | extern int __meminit vmemmap_create_mapping(unsigned long start, |
diff --git a/arch/powerpc/include/asm/nohash/pgtable.h b/arch/powerpc/include/asm/nohash/pgtable.h index 1263c22d60d8..172849727054 100644 --- a/arch/powerpc/include/asm/nohash/pgtable.h +++ b/arch/powerpc/include/asm/nohash/pgtable.h | |||
@@ -226,7 +226,11 @@ extern pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn, | |||
226 | #ifdef CONFIG_HUGETLB_PAGE | 226 | #ifdef CONFIG_HUGETLB_PAGE |
227 | static inline int hugepd_ok(hugepd_t hpd) | 227 | static inline int hugepd_ok(hugepd_t hpd) |
228 | { | 228 | { |
229 | #ifdef CONFIG_PPC_8xx | ||
230 | return ((hpd.pd & 0x4) != 0); | ||
231 | #else | ||
229 | return (hpd.pd > 0); | 232 | return (hpd.pd > 0); |
233 | #endif | ||
230 | } | 234 | } |
231 | 235 | ||
232 | static inline int pmd_huge(pmd_t pmd) | 236 | static inline int pmd_huge(pmd_t pmd) |
diff --git a/arch/powerpc/include/asm/pgtable.h b/arch/powerpc/include/asm/pgtable.h index 9bd87f269d6d..dd01212935ac 100644 --- a/arch/powerpc/include/asm/pgtable.h +++ b/arch/powerpc/include/asm/pgtable.h | |||
@@ -78,6 +78,8 @@ static inline pte_t *find_linux_pte_or_hugepte(pgd_t *pgdir, unsigned long ea, | |||
78 | 78 | ||
79 | unsigned long vmalloc_to_phys(void *vmalloc_addr); | 79 | unsigned long vmalloc_to_phys(void *vmalloc_addr); |
80 | 80 | ||
81 | void pgtable_cache_add(unsigned shift, void (*ctor)(void *)); | ||
82 | void pgtable_cache_init(void); | ||
81 | #endif /* __ASSEMBLY__ */ | 83 | #endif /* __ASSEMBLY__ */ |
82 | 84 | ||
83 | #endif /* _ASM_POWERPC_PGTABLE_H */ | 85 | #endif /* _ASM_POWERPC_PGTABLE_H */ |
diff --git a/arch/powerpc/include/asm/reg_8xx.h b/arch/powerpc/include/asm/reg_8xx.h index 0197e12f7d48..1f1636124a04 100644 --- a/arch/powerpc/include/asm/reg_8xx.h +++ b/arch/powerpc/include/asm/reg_8xx.h | |||
@@ -4,7 +4,7 @@ | |||
4 | #ifndef _ASM_POWERPC_REG_8xx_H | 4 | #ifndef _ASM_POWERPC_REG_8xx_H |
5 | #define _ASM_POWERPC_REG_8xx_H | 5 | #define _ASM_POWERPC_REG_8xx_H |
6 | 6 | ||
7 | #include <asm/mmu-8xx.h> | 7 | #include <asm/mmu.h> |
8 | 8 | ||
9 | /* Cache control on the MPC8xx is provided through some additional | 9 | /* Cache control on the MPC8xx is provided through some additional |
10 | * special purpose registers. | 10 | * special purpose registers. |
diff --git a/arch/powerpc/kernel/head_8xx.S b/arch/powerpc/kernel/head_8xx.S index fb133a163263..1a9c99d3e5d8 100644 --- a/arch/powerpc/kernel/head_8xx.S +++ b/arch/powerpc/kernel/head_8xx.S | |||
@@ -73,6 +73,9 @@ | |||
73 | #define RPN_PATTERN 0x00f0 | 73 | #define RPN_PATTERN 0x00f0 |
74 | #endif | 74 | #endif |
75 | 75 | ||
76 | #define PAGE_SHIFT_512K 19 | ||
77 | #define PAGE_SHIFT_8M 23 | ||
78 | |||
76 | __HEAD | 79 | __HEAD |
77 | _ENTRY(_stext); | 80 | _ENTRY(_stext); |
78 | _ENTRY(_start); | 81 | _ENTRY(_start); |
@@ -322,7 +325,7 @@ SystemCall: | |||
322 | #endif | 325 | #endif |
323 | 326 | ||
324 | InstructionTLBMiss: | 327 | InstructionTLBMiss: |
325 | #if defined(CONFIG_8xx_CPU6) || defined(CONFIG_MODULES) || defined (CONFIG_DEBUG_PAGEALLOC) | 328 | #if defined(CONFIG_8xx_CPU6) || defined(CONFIG_MODULES) || defined (CONFIG_DEBUG_PAGEALLOC) || defined (CONFIG_HUGETLB_PAGE) |
326 | mtspr SPRN_SPRG_SCRATCH2, r3 | 329 | mtspr SPRN_SPRG_SCRATCH2, r3 |
327 | #endif | 330 | #endif |
328 | EXCEPTION_PROLOG_0 | 331 | EXCEPTION_PROLOG_0 |
@@ -332,10 +335,12 @@ InstructionTLBMiss: | |||
332 | */ | 335 | */ |
333 | mfspr r10, SPRN_SRR0 /* Get effective address of fault */ | 336 | mfspr r10, SPRN_SRR0 /* Get effective address of fault */ |
334 | INVALIDATE_ADJACENT_PAGES_CPU15(r11, r10) | 337 | INVALIDATE_ADJACENT_PAGES_CPU15(r11, r10) |
335 | #if defined(CONFIG_MODULES) || defined (CONFIG_DEBUG_PAGEALLOC) | ||
336 | /* Only modules will cause ITLB Misses as we always | 338 | /* Only modules will cause ITLB Misses as we always |
337 | * pin the first 8MB of kernel memory */ | 339 | * pin the first 8MB of kernel memory */ |
340 | #if defined(CONFIG_MODULES) || defined (CONFIG_DEBUG_PAGEALLOC) || defined (CONFIG_HUGETLB_PAGE) | ||
338 | mfcr r3 | 341 | mfcr r3 |
342 | #endif | ||
343 | #if defined(CONFIG_MODULES) || defined (CONFIG_DEBUG_PAGEALLOC) | ||
339 | IS_KERNEL(r11, r10) | 344 | IS_KERNEL(r11, r10) |
340 | #endif | 345 | #endif |
341 | mfspr r11, SPRN_M_TW /* Get level 1 table */ | 346 | mfspr r11, SPRN_M_TW /* Get level 1 table */ |
@@ -343,7 +348,6 @@ InstructionTLBMiss: | |||
343 | BRANCH_UNLESS_KERNEL(3f) | 348 | BRANCH_UNLESS_KERNEL(3f) |
344 | lis r11, (swapper_pg_dir-PAGE_OFFSET)@ha | 349 | lis r11, (swapper_pg_dir-PAGE_OFFSET)@ha |
345 | 3: | 350 | 3: |
346 | mtcr r3 | ||
347 | #endif | 351 | #endif |
348 | /* Insert level 1 index */ | 352 | /* Insert level 1 index */ |
349 | rlwimi r11, r10, 32 - ((PAGE_SHIFT - 2) << 1), (PAGE_SHIFT - 2) << 1, 29 | 353 | rlwimi r11, r10, 32 - ((PAGE_SHIFT - 2) << 1), (PAGE_SHIFT - 2) << 1, 29 |
@@ -351,14 +355,25 @@ InstructionTLBMiss: | |||
351 | 355 | ||
352 | /* Extract level 2 index */ | 356 | /* Extract level 2 index */ |
353 | rlwinm r10, r10, 32 - (PAGE_SHIFT - 2), 32 - PAGE_SHIFT, 29 | 357 | rlwinm r10, r10, 32 - (PAGE_SHIFT - 2), 32 - PAGE_SHIFT, 29 |
358 | #ifdef CONFIG_HUGETLB_PAGE | ||
359 | mtcr r11 | ||
360 | bt- 28, 10f /* bit 28 = Large page (8M) */ | ||
361 | bt- 29, 20f /* bit 29 = Large page (8M or 512k) */ | ||
362 | #endif | ||
354 | rlwimi r10, r11, 0, 0, 32 - PAGE_SHIFT - 1 /* Add level 2 base */ | 363 | rlwimi r10, r11, 0, 0, 32 - PAGE_SHIFT - 1 /* Add level 2 base */ |
355 | lwz r10, 0(r10) /* Get the pte */ | 364 | lwz r10, 0(r10) /* Get the pte */ |
356 | 365 | 4: | |
366 | #if defined(CONFIG_MODULES) || defined (CONFIG_DEBUG_PAGEALLOC) || defined (CONFIG_HUGETLB_PAGE) | ||
367 | mtcr r3 | ||
368 | #endif | ||
357 | /* Insert the APG into the TWC from the Linux PTE. */ | 369 | /* Insert the APG into the TWC from the Linux PTE. */ |
358 | rlwimi r11, r10, 0, 25, 26 | 370 | rlwimi r11, r10, 0, 25, 26 |
359 | /* Load the MI_TWC with the attributes for this "segment." */ | 371 | /* Load the MI_TWC with the attributes for this "segment." */ |
360 | MTSPR_CPU6(SPRN_MI_TWC, r11, r3) /* Set segment attributes */ | 372 | MTSPR_CPU6(SPRN_MI_TWC, r11, r3) /* Set segment attributes */ |
361 | 373 | ||
374 | #if defined (CONFIG_HUGETLB_PAGE) && defined (CONFIG_PPC_4K_PAGES) | ||
375 | rlwimi r10, r11, 1, MI_SPS16K | ||
376 | #endif | ||
362 | #ifdef CONFIG_SWAP | 377 | #ifdef CONFIG_SWAP |
363 | rlwinm r11, r10, 32-5, _PAGE_PRESENT | 378 | rlwinm r11, r10, 32-5, _PAGE_PRESENT |
364 | and r11, r11, r10 | 379 | and r11, r11, r10 |
@@ -371,16 +386,45 @@ InstructionTLBMiss: | |||
371 | * set. All other Linux PTE bits control the behavior | 386 | * set. All other Linux PTE bits control the behavior |
372 | * of the MMU. | 387 | * of the MMU. |
373 | */ | 388 | */ |
389 | #if defined (CONFIG_HUGETLB_PAGE) && defined (CONFIG_PPC_4K_PAGES) | ||
390 | rlwimi r10, r11, 0, 0x0ff0 /* Set 24-27, clear 20-23 */ | ||
391 | #else | ||
374 | rlwimi r10, r11, 0, 0x0ff8 /* Set 24-27, clear 20-23,28 */ | 392 | rlwimi r10, r11, 0, 0x0ff8 /* Set 24-27, clear 20-23,28 */ |
393 | #endif | ||
375 | MTSPR_CPU6(SPRN_MI_RPN, r10, r3) /* Update TLB entry */ | 394 | MTSPR_CPU6(SPRN_MI_RPN, r10, r3) /* Update TLB entry */ |
376 | 395 | ||
377 | /* Restore registers */ | 396 | /* Restore registers */ |
378 | #if defined(CONFIG_8xx_CPU6) || defined(CONFIG_MODULES) || defined (CONFIG_DEBUG_PAGEALLOC) | 397 | #if defined(CONFIG_8xx_CPU6) || defined(CONFIG_MODULES) || defined (CONFIG_DEBUG_PAGEALLOC) || defined (CONFIG_HUGETLB_PAGE) |
379 | mfspr r3, SPRN_SPRG_SCRATCH2 | 398 | mfspr r3, SPRN_SPRG_SCRATCH2 |
380 | #endif | 399 | #endif |
381 | EXCEPTION_EPILOG_0 | 400 | EXCEPTION_EPILOG_0 |
382 | rfi | 401 | rfi |
383 | 402 | ||
403 | #ifdef CONFIG_HUGETLB_PAGE | ||
404 | 10: /* 8M pages */ | ||
405 | #ifdef CONFIG_PPC_16K_PAGES | ||
406 | /* Extract level 2 index */ | ||
407 | rlwinm r10, r10, 32 - (PAGE_SHIFT_8M - PAGE_SHIFT), 32 + PAGE_SHIFT_8M - (PAGE_SHIFT << 1), 29 | ||
408 | /* Add level 2 base */ | ||
409 | rlwimi r10, r11, 0, 0, 32 + PAGE_SHIFT_8M - (PAGE_SHIFT << 1) - 1 | ||
410 | #else | ||
411 | /* Level 2 base */ | ||
412 | rlwinm r10, r11, 0, ~HUGEPD_SHIFT_MASK | ||
413 | #endif | ||
414 | lwz r10, 0(r10) /* Get the pte */ | ||
415 | rlwinm r11, r11, 0, 0xf | ||
416 | b 4b | ||
417 | |||
418 | 20: /* 512k pages */ | ||
419 | /* Extract level 2 index */ | ||
420 | rlwinm r10, r10, 32 - (PAGE_SHIFT_512K - PAGE_SHIFT), 32 + PAGE_SHIFT_512K - (PAGE_SHIFT << 1), 29 | ||
421 | /* Add level 2 base */ | ||
422 | rlwimi r10, r11, 0, 0, 32 + PAGE_SHIFT_512K - (PAGE_SHIFT << 1) - 1 | ||
423 | lwz r10, 0(r10) /* Get the pte */ | ||
424 | rlwinm r11, r11, 0, 0xf | ||
425 | b 4b | ||
426 | #endif | ||
427 | |||
384 | . = 0x1200 | 428 | . = 0x1200 |
385 | DataStoreTLBMiss: | 429 | DataStoreTLBMiss: |
386 | mtspr SPRN_SPRG_SCRATCH2, r3 | 430 | mtspr SPRN_SPRG_SCRATCH2, r3 |
@@ -407,7 +451,6 @@ _ENTRY(DTLBMiss_jmp) | |||
407 | #endif | 451 | #endif |
408 | blt cr7, DTLBMissLinear | 452 | blt cr7, DTLBMissLinear |
409 | 3: | 453 | 3: |
410 | mtcr r3 | ||
411 | mfspr r10, SPRN_MD_EPN | 454 | mfspr r10, SPRN_MD_EPN |
412 | 455 | ||
413 | /* Insert level 1 index */ | 456 | /* Insert level 1 index */ |
@@ -418,8 +461,15 @@ _ENTRY(DTLBMiss_jmp) | |||
418 | */ | 461 | */ |
419 | /* Extract level 2 index */ | 462 | /* Extract level 2 index */ |
420 | rlwinm r10, r10, 32 - (PAGE_SHIFT - 2), 32 - PAGE_SHIFT, 29 | 463 | rlwinm r10, r10, 32 - (PAGE_SHIFT - 2), 32 - PAGE_SHIFT, 29 |
464 | #ifdef CONFIG_HUGETLB_PAGE | ||
465 | mtcr r11 | ||
466 | bt- 28, 10f /* bit 28 = Large page (8M) */ | ||
467 | bt- 29, 20f /* bit 29 = Large page (8M or 512k) */ | ||
468 | #endif | ||
421 | rlwimi r10, r11, 0, 0, 32 - PAGE_SHIFT - 1 /* Add level 2 base */ | 469 | rlwimi r10, r11, 0, 0, 32 - PAGE_SHIFT - 1 /* Add level 2 base */ |
422 | lwz r10, 0(r10) /* Get the pte */ | 470 | lwz r10, 0(r10) /* Get the pte */ |
471 | 4: | ||
472 | mtcr r3 | ||
423 | 473 | ||
424 | /* Insert the Guarded flag and APG into the TWC from the Linux PTE. | 474 | /* Insert the Guarded flag and APG into the TWC from the Linux PTE. |
425 | * It is bit 26-27 of both the Linux PTE and the TWC (at least | 475 | * It is bit 26-27 of both the Linux PTE and the TWC (at least |
@@ -434,6 +484,11 @@ _ENTRY(DTLBMiss_jmp) | |||
434 | rlwimi r11, r10, 32-5, 30, 30 | 484 | rlwimi r11, r10, 32-5, 30, 30 |
435 | MTSPR_CPU6(SPRN_MD_TWC, r11, r3) | 485 | MTSPR_CPU6(SPRN_MD_TWC, r11, r3) |
436 | 486 | ||
487 | /* In 4k pages mode, SPS (bit 28) in RPN must match PS[1] (bit 29) | ||
488 | * In 16k pages mode, SPS is always 1 */ | ||
489 | #if defined (CONFIG_HUGETLB_PAGE) && defined (CONFIG_PPC_4K_PAGES) | ||
490 | rlwimi r10, r11, 1, MD_SPS16K | ||
491 | #endif | ||
437 | /* Both _PAGE_ACCESSED and _PAGE_PRESENT has to be set. | 492 | /* Both _PAGE_ACCESSED and _PAGE_PRESENT has to be set. |
438 | * We also need to know if the insn is a load/store, so: | 493 | * We also need to know if the insn is a load/store, so: |
439 | * Clear _PAGE_PRESENT and load that which will | 494 | * Clear _PAGE_PRESENT and load that which will |
@@ -455,7 +510,11 @@ _ENTRY(DTLBMiss_jmp) | |||
455 | * of the MMU. | 510 | * of the MMU. |
456 | */ | 511 | */ |
457 | li r11, RPN_PATTERN | 512 | li r11, RPN_PATTERN |
513 | #if defined (CONFIG_HUGETLB_PAGE) && defined (CONFIG_PPC_4K_PAGES) | ||
514 | rlwimi r10, r11, 0, 24, 27 /* Set 24-27 */ | ||
515 | #else | ||
458 | rlwimi r10, r11, 0, 24, 28 /* Set 24-27, clear 28 */ | 516 | rlwimi r10, r11, 0, 24, 28 /* Set 24-27, clear 28 */ |
517 | #endif | ||
459 | rlwimi r10, r11, 0, 20, 20 /* clear 20 */ | 518 | rlwimi r10, r11, 0, 20, 20 /* clear 20 */ |
460 | MTSPR_CPU6(SPRN_MD_RPN, r10, r3) /* Update TLB entry */ | 519 | MTSPR_CPU6(SPRN_MD_RPN, r10, r3) /* Update TLB entry */ |
461 | 520 | ||
@@ -465,6 +524,30 @@ _ENTRY(DTLBMiss_jmp) | |||
465 | EXCEPTION_EPILOG_0 | 524 | EXCEPTION_EPILOG_0 |
466 | rfi | 525 | rfi |
467 | 526 | ||
527 | #ifdef CONFIG_HUGETLB_PAGE | ||
528 | 10: /* 8M pages */ | ||
529 | /* Extract level 2 index */ | ||
530 | #ifdef CONFIG_PPC_16K_PAGES | ||
531 | rlwinm r10, r10, 32 - (PAGE_SHIFT_8M - PAGE_SHIFT), 32 + PAGE_SHIFT_8M - (PAGE_SHIFT << 1), 29 | ||
532 | /* Add level 2 base */ | ||
533 | rlwimi r10, r11, 0, 0, 32 + PAGE_SHIFT_8M - (PAGE_SHIFT << 1) - 1 | ||
534 | #else | ||
535 | /* Level 2 base */ | ||
536 | rlwinm r10, r11, 0, ~HUGEPD_SHIFT_MASK | ||
537 | #endif | ||
538 | lwz r10, 0(r10) /* Get the pte */ | ||
539 | rlwinm r11, r11, 0, 0xf | ||
540 | b 4b | ||
541 | |||
542 | 20: /* 512k pages */ | ||
543 | /* Extract level 2 index */ | ||
544 | rlwinm r10, r10, 32 - (PAGE_SHIFT_512K - PAGE_SHIFT), 32 + PAGE_SHIFT_512K - (PAGE_SHIFT << 1), 29 | ||
545 | /* Add level 2 base */ | ||
546 | rlwimi r10, r11, 0, 0, 32 + PAGE_SHIFT_512K - (PAGE_SHIFT << 1) - 1 | ||
547 | lwz r10, 0(r10) /* Get the pte */ | ||
548 | rlwinm r11, r11, 0, 0xf | ||
549 | b 4b | ||
550 | #endif | ||
468 | 551 | ||
469 | /* This is an instruction TLB error on the MPC8xx. This could be due | 552 | /* This is an instruction TLB error on the MPC8xx. This could be due |
470 | * to many reasons, such as executing guarded memory or illegal instruction | 553 | * to many reasons, such as executing guarded memory or illegal instruction |
@@ -586,6 +669,9 @@ _ENTRY(FixupDAR_cmp) | |||
586 | /* Insert level 1 index */ | 669 | /* Insert level 1 index */ |
587 | 3: rlwimi r11, r10, 32 - ((PAGE_SHIFT - 2) << 1), (PAGE_SHIFT - 2) << 1, 29 | 670 | 3: rlwimi r11, r10, 32 - ((PAGE_SHIFT - 2) << 1), (PAGE_SHIFT - 2) << 1, 29 |
588 | lwz r11, (swapper_pg_dir-PAGE_OFFSET)@l(r11) /* Get the level 1 entry */ | 671 | lwz r11, (swapper_pg_dir-PAGE_OFFSET)@l(r11) /* Get the level 1 entry */ |
672 | mtcr r11 | ||
673 | bt 28,200f /* bit 28 = Large page (8M) */ | ||
674 | bt 29,202f /* bit 29 = Large page (8M or 512K) */ | ||
589 | rlwinm r11, r11,0,0,19 /* Extract page descriptor page address */ | 675 | rlwinm r11, r11,0,0,19 /* Extract page descriptor page address */ |
590 | /* Insert level 2 index */ | 676 | /* Insert level 2 index */ |
591 | rlwimi r11, r10, 32 - (PAGE_SHIFT - 2), 32 - PAGE_SHIFT, 29 | 677 | rlwimi r11, r10, 32 - (PAGE_SHIFT - 2), 32 - PAGE_SHIFT, 29 |
@@ -611,6 +697,27 @@ _ENTRY(FixupDAR_cmp) | |||
611 | 141: mfspr r10,SPRN_SPRG_SCRATCH2 | 697 | 141: mfspr r10,SPRN_SPRG_SCRATCH2 |
612 | b DARFixed /* Nope, go back to normal TLB processing */ | 698 | b DARFixed /* Nope, go back to normal TLB processing */ |
613 | 699 | ||
700 | /* concat physical page address(r11) and page offset(r10) */ | ||
701 | 200: | ||
702 | #ifdef CONFIG_PPC_16K_PAGES | ||
703 | rlwinm r11, r11, 0, 0, 32 + PAGE_SHIFT_8M - (PAGE_SHIFT << 1) - 1 | ||
704 | rlwimi r11, r10, 32 - (PAGE_SHIFT_8M - 2), 32 + PAGE_SHIFT_8M - (PAGE_SHIFT << 1), 29 | ||
705 | #else | ||
706 | rlwinm r11, r10, 0, ~HUGEPD_SHIFT_MASK | ||
707 | #endif | ||
708 | lwz r11, 0(r11) /* Get the pte */ | ||
709 | /* concat physical page address(r11) and page offset(r10) */ | ||
710 | rlwimi r11, r10, 0, 32 - PAGE_SHIFT_8M, 31 | ||
711 | b 201b | ||
712 | |||
713 | 202: | ||
714 | rlwinm r11, r11, 0, 0, 32 + PAGE_SHIFT_512K - (PAGE_SHIFT << 1) - 1 | ||
715 | rlwimi r11, r10, 32 - (PAGE_SHIFT_512K - 2), 32 + PAGE_SHIFT_512K - (PAGE_SHIFT << 1), 29 | ||
716 | lwz r11, 0(r11) /* Get the pte */ | ||
717 | /* concat physical page address(r11) and page offset(r10) */ | ||
718 | rlwimi r11, r10, 0, 32 - PAGE_SHIFT_512K, 31 | ||
719 | b 201b | ||
720 | |||
614 | 144: mfspr r10, SPRN_DSISR | 721 | 144: mfspr r10, SPRN_DSISR |
615 | rlwinm r10, r10,0,7,5 /* Clear store bit for buggy dcbst insn */ | 722 | rlwinm r10, r10,0,7,5 /* Clear store bit for buggy dcbst insn */ |
616 | mtspr SPRN_DSISR, r10 | 723 | mtspr SPRN_DSISR, r10 |
diff --git a/arch/powerpc/mm/Makefile b/arch/powerpc/mm/Makefile index 5784682d7b63..7414034df1c3 100644 --- a/arch/powerpc/mm/Makefile +++ b/arch/powerpc/mm/Makefile | |||
@@ -7,7 +7,8 @@ subdir-ccflags-$(CONFIG_PPC_WERROR) := -Werror | |||
7 | ccflags-$(CONFIG_PPC64) := $(NO_MINIMAL_TOC) | 7 | ccflags-$(CONFIG_PPC64) := $(NO_MINIMAL_TOC) |
8 | 8 | ||
9 | obj-y := fault.o mem.o pgtable.o mmap.o \ | 9 | obj-y := fault.o mem.o pgtable.o mmap.o \ |
10 | init_$(BITS).o pgtable_$(BITS).o | 10 | init_$(BITS).o pgtable_$(BITS).o \ |
11 | init-common.o | ||
11 | obj-$(CONFIG_PPC_MMU_NOHASH) += mmu_context_nohash.o tlb_nohash.o \ | 12 | obj-$(CONFIG_PPC_MMU_NOHASH) += mmu_context_nohash.o tlb_nohash.o \ |
12 | tlb_nohash_low.o | 13 | tlb_nohash_low.o |
13 | obj-$(CONFIG_PPC_BOOK3E) += tlb_low_$(BITS)e.o | 14 | obj-$(CONFIG_PPC_BOOK3E) += tlb_low_$(BITS)e.o |
diff --git a/arch/powerpc/mm/hugetlbpage.c b/arch/powerpc/mm/hugetlbpage.c index a5d3ecdabc44..289df38fb7e0 100644 --- a/arch/powerpc/mm/hugetlbpage.c +++ b/arch/powerpc/mm/hugetlbpage.c | |||
@@ -26,6 +26,8 @@ | |||
26 | #ifdef CONFIG_HUGETLB_PAGE | 26 | #ifdef CONFIG_HUGETLB_PAGE |
27 | 27 | ||
28 | #define PAGE_SHIFT_64K 16 | 28 | #define PAGE_SHIFT_64K 16 |
29 | #define PAGE_SHIFT_512K 19 | ||
30 | #define PAGE_SHIFT_8M 23 | ||
29 | #define PAGE_SHIFT_16M 24 | 31 | #define PAGE_SHIFT_16M 24 |
30 | #define PAGE_SHIFT_16G 34 | 32 | #define PAGE_SHIFT_16G 34 |
31 | 33 | ||
@@ -38,7 +40,7 @@ unsigned int HPAGE_SHIFT; | |||
38 | * implementations may have more than one gpage size, so we need multiple | 40 | * implementations may have more than one gpage size, so we need multiple |
39 | * arrays | 41 | * arrays |
40 | */ | 42 | */ |
41 | #ifdef CONFIG_PPC_FSL_BOOK3E | 43 | #if defined(CONFIG_PPC_FSL_BOOK3E) || defined(CONFIG_PPC_8xx) |
42 | #define MAX_NUMBER_GPAGES 128 | 44 | #define MAX_NUMBER_GPAGES 128 |
43 | struct psize_gpages { | 45 | struct psize_gpages { |
44 | u64 gpage_list[MAX_NUMBER_GPAGES]; | 46 | u64 gpage_list[MAX_NUMBER_GPAGES]; |
@@ -64,14 +66,16 @@ static int __hugepte_alloc(struct mm_struct *mm, hugepd_t *hpdp, | |||
64 | { | 66 | { |
65 | struct kmem_cache *cachep; | 67 | struct kmem_cache *cachep; |
66 | pte_t *new; | 68 | pte_t *new; |
67 | |||
68 | #ifdef CONFIG_PPC_FSL_BOOK3E | ||
69 | int i; | 69 | int i; |
70 | int num_hugepd = 1 << (pshift - pdshift); | 70 | int num_hugepd; |
71 | cachep = hugepte_cache; | 71 | |
72 | #else | 72 | if (pshift >= pdshift) { |
73 | cachep = PGT_CACHE(pdshift - pshift); | 73 | cachep = hugepte_cache; |
74 | #endif | 74 | num_hugepd = 1 << (pshift - pdshift); |
75 | } else { | ||
76 | cachep = PGT_CACHE(pdshift - pshift); | ||
77 | num_hugepd = 1; | ||
78 | } | ||
75 | 79 | ||
76 | new = kmem_cache_zalloc(cachep, GFP_KERNEL); | 80 | new = kmem_cache_zalloc(cachep, GFP_KERNEL); |
77 | 81 | ||
@@ -89,7 +93,7 @@ static int __hugepte_alloc(struct mm_struct *mm, hugepd_t *hpdp, | |||
89 | smp_wmb(); | 93 | smp_wmb(); |
90 | 94 | ||
91 | spin_lock(&mm->page_table_lock); | 95 | spin_lock(&mm->page_table_lock); |
92 | #ifdef CONFIG_PPC_FSL_BOOK3E | 96 | |
93 | /* | 97 | /* |
94 | * We have multiple higher-level entries that point to the same | 98 | * We have multiple higher-level entries that point to the same |
95 | * actual pte location. Fill in each as we go and backtrack on error. | 99 | * actual pte location. Fill in each as we go and backtrack on error. |
@@ -100,8 +104,18 @@ static int __hugepte_alloc(struct mm_struct *mm, hugepd_t *hpdp, | |||
100 | if (unlikely(!hugepd_none(*hpdp))) | 104 | if (unlikely(!hugepd_none(*hpdp))) |
101 | break; | 105 | break; |
102 | else | 106 | else |
107 | #ifdef CONFIG_PPC_BOOK3S_64 | ||
108 | hpdp->pd = __pa(new) | | ||
109 | (shift_to_mmu_psize(pshift) << 2); | ||
110 | #elif defined(CONFIG_PPC_8xx) | ||
111 | hpdp->pd = __pa(new) | | ||
112 | (pshift == PAGE_SHIFT_8M ? _PMD_PAGE_8M : | ||
113 | _PMD_PAGE_512K) | | ||
114 | _PMD_PRESENT; | ||
115 | #else | ||
103 | /* We use the old format for PPC_FSL_BOOK3E */ | 116 | /* We use the old format for PPC_FSL_BOOK3E */ |
104 | hpdp->pd = ((unsigned long)new & ~PD_HUGE) | pshift; | 117 | hpdp->pd = ((unsigned long)new & ~PD_HUGE) | pshift; |
118 | #endif | ||
105 | } | 119 | } |
106 | /* If we bailed from the for loop early, an error occurred, clean up */ | 120 | /* If we bailed from the for loop early, an error occurred, clean up */ |
107 | if (i < num_hugepd) { | 121 | if (i < num_hugepd) { |
@@ -109,17 +123,6 @@ static int __hugepte_alloc(struct mm_struct *mm, hugepd_t *hpdp, | |||
109 | hpdp->pd = 0; | 123 | hpdp->pd = 0; |
110 | kmem_cache_free(cachep, new); | 124 | kmem_cache_free(cachep, new); |
111 | } | 125 | } |
112 | #else | ||
113 | if (!hugepd_none(*hpdp)) | ||
114 | kmem_cache_free(cachep, new); | ||
115 | else { | ||
116 | #ifdef CONFIG_PPC_BOOK3S_64 | ||
117 | hpdp->pd = __pa(new) | (shift_to_mmu_psize(pshift) << 2); | ||
118 | #else | ||
119 | hpdp->pd = ((unsigned long)new & ~PD_HUGE) | pshift; | ||
120 | #endif | ||
121 | } | ||
122 | #endif | ||
123 | spin_unlock(&mm->page_table_lock); | 126 | spin_unlock(&mm->page_table_lock); |
124 | return 0; | 127 | return 0; |
125 | } | 128 | } |
@@ -128,7 +131,7 @@ static int __hugepte_alloc(struct mm_struct *mm, hugepd_t *hpdp, | |||
128 | * These macros define how to determine which level of the page table holds | 131 | * These macros define how to determine which level of the page table holds |
129 | * the hpdp. | 132 | * the hpdp. |
130 | */ | 133 | */ |
131 | #ifdef CONFIG_PPC_FSL_BOOK3E | 134 | #if defined(CONFIG_PPC_FSL_BOOK3E) || defined(CONFIG_PPC_8xx) |
132 | #define HUGEPD_PGD_SHIFT PGDIR_SHIFT | 135 | #define HUGEPD_PGD_SHIFT PGDIR_SHIFT |
133 | #define HUGEPD_PUD_SHIFT PUD_SHIFT | 136 | #define HUGEPD_PUD_SHIFT PUD_SHIFT |
134 | #else | 137 | #else |
@@ -136,7 +139,6 @@ static int __hugepte_alloc(struct mm_struct *mm, hugepd_t *hpdp, | |||
136 | #define HUGEPD_PUD_SHIFT PMD_SHIFT | 139 | #define HUGEPD_PUD_SHIFT PMD_SHIFT |
137 | #endif | 140 | #endif |
138 | 141 | ||
139 | #ifdef CONFIG_PPC_BOOK3S_64 | ||
140 | /* | 142 | /* |
141 | * At this point we do the placement change only for BOOK3S 64. This would | 143 | * At this point we do the placement change only for BOOK3S 64. This would |
142 | * possibly work on other subarchs. | 144 | * possibly work on other subarchs. |
@@ -153,6 +155,7 @@ pte_t *huge_pte_alloc(struct mm_struct *mm, unsigned long addr, unsigned long sz | |||
153 | addr &= ~(sz-1); | 155 | addr &= ~(sz-1); |
154 | pg = pgd_offset(mm, addr); | 156 | pg = pgd_offset(mm, addr); |
155 | 157 | ||
158 | #ifdef CONFIG_PPC_BOOK3S_64 | ||
156 | if (pshift == PGDIR_SHIFT) | 159 | if (pshift == PGDIR_SHIFT) |
157 | /* 16GB huge page */ | 160 | /* 16GB huge page */ |
158 | return (pte_t *) pg; | 161 | return (pte_t *) pg; |
@@ -178,32 +181,7 @@ pte_t *huge_pte_alloc(struct mm_struct *mm, unsigned long addr, unsigned long sz | |||
178 | hpdp = (hugepd_t *)pm; | 181 | hpdp = (hugepd_t *)pm; |
179 | } | 182 | } |
180 | } | 183 | } |
181 | if (!hpdp) | ||
182 | return NULL; | ||
183 | |||
184 | BUG_ON(!hugepd_none(*hpdp) && !hugepd_ok(*hpdp)); | ||
185 | |||
186 | if (hugepd_none(*hpdp) && __hugepte_alloc(mm, hpdp, addr, pdshift, pshift)) | ||
187 | return NULL; | ||
188 | |||
189 | return hugepte_offset(*hpdp, addr, pdshift); | ||
190 | } | ||
191 | |||
192 | #else | 184 | #else |
193 | |||
194 | pte_t *huge_pte_alloc(struct mm_struct *mm, unsigned long addr, unsigned long sz) | ||
195 | { | ||
196 | pgd_t *pg; | ||
197 | pud_t *pu; | ||
198 | pmd_t *pm; | ||
199 | hugepd_t *hpdp = NULL; | ||
200 | unsigned pshift = __ffs(sz); | ||
201 | unsigned pdshift = PGDIR_SHIFT; | ||
202 | |||
203 | addr &= ~(sz-1); | ||
204 | |||
205 | pg = pgd_offset(mm, addr); | ||
206 | |||
207 | if (pshift >= HUGEPD_PGD_SHIFT) { | 185 | if (pshift >= HUGEPD_PGD_SHIFT) { |
208 | hpdp = (hugepd_t *)pg; | 186 | hpdp = (hugepd_t *)pg; |
209 | } else { | 187 | } else { |
@@ -217,7 +195,7 @@ pte_t *huge_pte_alloc(struct mm_struct *mm, unsigned long addr, unsigned long sz | |||
217 | hpdp = (hugepd_t *)pm; | 195 | hpdp = (hugepd_t *)pm; |
218 | } | 196 | } |
219 | } | 197 | } |
220 | 198 | #endif | |
221 | if (!hpdp) | 199 | if (!hpdp) |
222 | return NULL; | 200 | return NULL; |
223 | 201 | ||
@@ -228,9 +206,8 @@ pte_t *huge_pte_alloc(struct mm_struct *mm, unsigned long addr, unsigned long sz | |||
228 | 206 | ||
229 | return hugepte_offset(*hpdp, addr, pdshift); | 207 | return hugepte_offset(*hpdp, addr, pdshift); |
230 | } | 208 | } |
231 | #endif | ||
232 | 209 | ||
233 | #ifdef CONFIG_PPC_FSL_BOOK3E | 210 | #if defined(CONFIG_PPC_FSL_BOOK3E) || defined(CONFIG_PPC_8xx) |
234 | /* Build list of addresses of gigantic pages. This function is used in early | 211 | /* Build list of addresses of gigantic pages. This function is used in early |
235 | * boot before the buddy allocator is setup. | 212 | * boot before the buddy allocator is setup. |
236 | */ | 213 | */ |
@@ -310,7 +287,11 @@ static int __init do_gpage_early_setup(char *param, char *val, | |||
310 | npages = 0; | 287 | npages = 0; |
311 | if (npages > MAX_NUMBER_GPAGES) { | 288 | if (npages > MAX_NUMBER_GPAGES) { |
312 | pr_warn("MMU: %lu pages requested for page " | 289 | pr_warn("MMU: %lu pages requested for page " |
290 | #ifdef CONFIG_PHYS_ADDR_T_64BIT | ||
313 | "size %llu KB, limiting to " | 291 | "size %llu KB, limiting to " |
292 | #else | ||
293 | "size %u KB, limiting to " | ||
294 | #endif | ||
314 | __stringify(MAX_NUMBER_GPAGES) "\n", | 295 | __stringify(MAX_NUMBER_GPAGES) "\n", |
315 | npages, size / 1024); | 296 | npages, size / 1024); |
316 | npages = MAX_NUMBER_GPAGES; | 297 | npages = MAX_NUMBER_GPAGES; |
@@ -392,7 +373,7 @@ int alloc_bootmem_huge_page(struct hstate *hstate) | |||
392 | } | 373 | } |
393 | #endif | 374 | #endif |
394 | 375 | ||
395 | #ifdef CONFIG_PPC_FSL_BOOK3E | 376 | #if defined(CONFIG_PPC_FSL_BOOK3E) || defined(CONFIG_PPC_8xx) |
396 | #define HUGEPD_FREELIST_SIZE \ | 377 | #define HUGEPD_FREELIST_SIZE \ |
397 | ((PAGE_SIZE - sizeof(struct hugepd_freelist)) / sizeof(pte_t)) | 378 | ((PAGE_SIZE - sizeof(struct hugepd_freelist)) / sizeof(pte_t)) |
398 | 379 | ||
@@ -442,6 +423,8 @@ static void hugepd_free(struct mmu_gather *tlb, void *hugepte) | |||
442 | } | 423 | } |
443 | put_cpu_var(hugepd_freelist_cur); | 424 | put_cpu_var(hugepd_freelist_cur); |
444 | } | 425 | } |
426 | #else | ||
427 | static inline void hugepd_free(struct mmu_gather *tlb, void *hugepte) {} | ||
445 | #endif | 428 | #endif |
446 | 429 | ||
447 | static void free_hugepd_range(struct mmu_gather *tlb, hugepd_t *hpdp, int pdshift, | 430 | static void free_hugepd_range(struct mmu_gather *tlb, hugepd_t *hpdp, int pdshift, |
@@ -453,13 +436,11 @@ static void free_hugepd_range(struct mmu_gather *tlb, hugepd_t *hpdp, int pdshif | |||
453 | 436 | ||
454 | unsigned long pdmask = ~((1UL << pdshift) - 1); | 437 | unsigned long pdmask = ~((1UL << pdshift) - 1); |
455 | unsigned int num_hugepd = 1; | 438 | unsigned int num_hugepd = 1; |
439 | unsigned int shift = hugepd_shift(*hpdp); | ||
456 | 440 | ||
457 | #ifdef CONFIG_PPC_FSL_BOOK3E | ||
458 | /* Note: On fsl the hpdp may be the first of several */ | 441 | /* Note: On fsl the hpdp may be the first of several */ |
459 | num_hugepd = (1 << (hugepd_shift(*hpdp) - pdshift)); | 442 | if (shift > pdshift) |
460 | #else | 443 | num_hugepd = 1 << (shift - pdshift); |
461 | unsigned int shift = hugepd_shift(*hpdp); | ||
462 | #endif | ||
463 | 444 | ||
464 | start &= pdmask; | 445 | start &= pdmask; |
465 | if (start < floor) | 446 | if (start < floor) |
@@ -475,11 +456,10 @@ static void free_hugepd_range(struct mmu_gather *tlb, hugepd_t *hpdp, int pdshif | |||
475 | for (i = 0; i < num_hugepd; i++, hpdp++) | 456 | for (i = 0; i < num_hugepd; i++, hpdp++) |
476 | hpdp->pd = 0; | 457 | hpdp->pd = 0; |
477 | 458 | ||
478 | #ifdef CONFIG_PPC_FSL_BOOK3E | 459 | if (shift >= pdshift) |
479 | hugepd_free(tlb, hugepte); | 460 | hugepd_free(tlb, hugepte); |
480 | #else | 461 | else |
481 | pgtable_free_tlb(tlb, hugepte, pdshift - shift); | 462 | pgtable_free_tlb(tlb, hugepte, pdshift - shift); |
482 | #endif | ||
483 | } | 463 | } |
484 | 464 | ||
485 | static void hugetlb_free_pmd_range(struct mmu_gather *tlb, pud_t *pud, | 465 | static void hugetlb_free_pmd_range(struct mmu_gather *tlb, pud_t *pud, |
@@ -492,6 +472,8 @@ static void hugetlb_free_pmd_range(struct mmu_gather *tlb, pud_t *pud, | |||
492 | 472 | ||
493 | start = addr; | 473 | start = addr; |
494 | do { | 474 | do { |
475 | unsigned long more; | ||
476 | |||
495 | pmd = pmd_offset(pud, addr); | 477 | pmd = pmd_offset(pud, addr); |
496 | next = pmd_addr_end(addr, end); | 478 | next = pmd_addr_end(addr, end); |
497 | if (!is_hugepd(__hugepd(pmd_val(*pmd)))) { | 479 | if (!is_hugepd(__hugepd(pmd_val(*pmd)))) { |
@@ -502,15 +484,16 @@ static void hugetlb_free_pmd_range(struct mmu_gather *tlb, pud_t *pud, | |||
502 | WARN_ON(!pmd_none_or_clear_bad(pmd)); | 484 | WARN_ON(!pmd_none_or_clear_bad(pmd)); |
503 | continue; | 485 | continue; |
504 | } | 486 | } |
505 | #ifdef CONFIG_PPC_FSL_BOOK3E | ||
506 | /* | 487 | /* |
507 | * Increment next by the size of the huge mapping since | 488 | * Increment next by the size of the huge mapping since |
508 | * there may be more than one entry at this level for a | 489 | * there may be more than one entry at this level for a |
509 | * single hugepage, but all of them point to | 490 | * single hugepage, but all of them point to |
510 | * the same kmem cache that holds the hugepte. | 491 | * the same kmem cache that holds the hugepte. |
511 | */ | 492 | */ |
512 | next = addr + (1 << hugepd_shift(*(hugepd_t *)pmd)); | 493 | more = addr + (1 << hugepd_shift(*(hugepd_t *)pmd)); |
513 | #endif | 494 | if (more > next) |
495 | next = more; | ||
496 | |||
514 | free_hugepd_range(tlb, (hugepd_t *)pmd, PMD_SHIFT, | 497 | free_hugepd_range(tlb, (hugepd_t *)pmd, PMD_SHIFT, |
515 | addr, next, floor, ceiling); | 498 | addr, next, floor, ceiling); |
516 | } while (addr = next, addr != end); | 499 | } while (addr = next, addr != end); |
@@ -550,15 +533,17 @@ static void hugetlb_free_pud_range(struct mmu_gather *tlb, pgd_t *pgd, | |||
550 | hugetlb_free_pmd_range(tlb, pud, addr, next, floor, | 533 | hugetlb_free_pmd_range(tlb, pud, addr, next, floor, |
551 | ceiling); | 534 | ceiling); |
552 | } else { | 535 | } else { |
553 | #ifdef CONFIG_PPC_FSL_BOOK3E | 536 | unsigned long more; |
554 | /* | 537 | /* |
555 | * Increment next by the size of the huge mapping since | 538 | * Increment next by the size of the huge mapping since |
556 | * there may be more than one entry at this level for a | 539 | * there may be more than one entry at this level for a |
557 | * single hugepage, but all of them point to | 540 | * single hugepage, but all of them point to |
558 | * the same kmem cache that holds the hugepte. | 541 | * the same kmem cache that holds the hugepte. |
559 | */ | 542 | */ |
560 | next = addr + (1 << hugepd_shift(*(hugepd_t *)pud)); | 543 | more = addr + (1 << hugepd_shift(*(hugepd_t *)pud)); |
561 | #endif | 544 | if (more > next) |
545 | next = more; | ||
546 | |||
562 | free_hugepd_range(tlb, (hugepd_t *)pud, PUD_SHIFT, | 547 | free_hugepd_range(tlb, (hugepd_t *)pud, PUD_SHIFT, |
563 | addr, next, floor, ceiling); | 548 | addr, next, floor, ceiling); |
564 | } | 549 | } |
@@ -615,15 +600,17 @@ void hugetlb_free_pgd_range(struct mmu_gather *tlb, | |||
615 | continue; | 600 | continue; |
616 | hugetlb_free_pud_range(tlb, pgd, addr, next, floor, ceiling); | 601 | hugetlb_free_pud_range(tlb, pgd, addr, next, floor, ceiling); |
617 | } else { | 602 | } else { |
618 | #ifdef CONFIG_PPC_FSL_BOOK3E | 603 | unsigned long more; |
619 | /* | 604 | /* |
620 | * Increment next by the size of the huge mapping since | 605 | * Increment next by the size of the huge mapping since |
621 | * there may be more than one entry at the pgd level | 606 | * there may be more than one entry at the pgd level |
622 | * for a single hugepage, but all of them point to the | 607 | * for a single hugepage, but all of them point to the |
623 | * same kmem cache that holds the hugepte. | 608 | * same kmem cache that holds the hugepte. |
624 | */ | 609 | */ |
625 | next = addr + (1 << hugepd_shift(*(hugepd_t *)pgd)); | 610 | more = addr + (1 << hugepd_shift(*(hugepd_t *)pgd)); |
626 | #endif | 611 | if (more > next) |
612 | next = more; | ||
613 | |||
627 | free_hugepd_range(tlb, (hugepd_t *)pgd, PGDIR_SHIFT, | 614 | free_hugepd_range(tlb, (hugepd_t *)pgd, PGDIR_SHIFT, |
628 | addr, next, floor, ceiling); | 615 | addr, next, floor, ceiling); |
629 | } | 616 | } |
@@ -753,12 +740,13 @@ static int __init add_huge_page_size(unsigned long long size) | |||
753 | 740 | ||
754 | /* Check that it is a page size supported by the hardware and | 741 | /* Check that it is a page size supported by the hardware and |
755 | * that it fits within pagetable and slice limits. */ | 742 | * that it fits within pagetable and slice limits. */ |
756 | #ifdef CONFIG_PPC_FSL_BOOK3E | 743 | if (size <= PAGE_SIZE) |
757 | if ((size < PAGE_SIZE) || !is_power_of_4(size)) | ||
758 | return -EINVAL; | 744 | return -EINVAL; |
759 | #else | 745 | #if defined(CONFIG_PPC_FSL_BOOK3E) |
760 | if (!is_power_of_2(size) | 746 | if (!is_power_of_4(size)) |
761 | || (shift > SLICE_HIGH_SHIFT) || (shift <= PAGE_SHIFT)) | 747 | return -EINVAL; |
748 | #elif !defined(CONFIG_PPC_8xx) | ||
749 | if (!is_power_of_2(size) || (shift > SLICE_HIGH_SHIFT)) | ||
762 | return -EINVAL; | 750 | return -EINVAL; |
763 | #endif | 751 | #endif |
764 | 752 | ||
@@ -791,53 +779,15 @@ static int __init hugepage_setup_sz(char *str) | |||
791 | } | 779 | } |
792 | __setup("hugepagesz=", hugepage_setup_sz); | 780 | __setup("hugepagesz=", hugepage_setup_sz); |
793 | 781 | ||
794 | #ifdef CONFIG_PPC_FSL_BOOK3E | ||
795 | struct kmem_cache *hugepte_cache; | 782 | struct kmem_cache *hugepte_cache; |
796 | static int __init hugetlbpage_init(void) | 783 | static int __init hugetlbpage_init(void) |
797 | { | 784 | { |
798 | int psize; | 785 | int psize; |
799 | 786 | ||
800 | for (psize = 0; psize < MMU_PAGE_COUNT; ++psize) { | 787 | #if !defined(CONFIG_PPC_FSL_BOOK3E) && !defined(CONFIG_PPC_8xx) |
801 | unsigned shift; | ||
802 | |||
803 | if (!mmu_psize_defs[psize].shift) | ||
804 | continue; | ||
805 | |||
806 | shift = mmu_psize_to_shift(psize); | ||
807 | |||
808 | /* Don't treat normal page sizes as huge... */ | ||
809 | if (shift != PAGE_SHIFT) | ||
810 | if (add_huge_page_size(1ULL << shift) < 0) | ||
811 | continue; | ||
812 | } | ||
813 | |||
814 | /* | ||
815 | * Create a kmem cache for hugeptes. The bottom bits in the pte have | ||
816 | * size information encoded in them, so align them to allow this | ||
817 | */ | ||
818 | hugepte_cache = kmem_cache_create("hugepte-cache", sizeof(pte_t), | ||
819 | HUGEPD_SHIFT_MASK + 1, 0, NULL); | ||
820 | if (hugepte_cache == NULL) | ||
821 | panic("%s: Unable to create kmem cache for hugeptes\n", | ||
822 | __func__); | ||
823 | |||
824 | /* Default hpage size = 4M */ | ||
825 | if (mmu_psize_defs[MMU_PAGE_4M].shift) | ||
826 | HPAGE_SHIFT = mmu_psize_defs[MMU_PAGE_4M].shift; | ||
827 | else | ||
828 | panic("%s: Unable to set default huge page size\n", __func__); | ||
829 | |||
830 | |||
831 | return 0; | ||
832 | } | ||
833 | #else | ||
834 | static int __init hugetlbpage_init(void) | ||
835 | { | ||
836 | int psize; | ||
837 | |||
838 | if (!radix_enabled() && !mmu_has_feature(MMU_FTR_16M_PAGE)) | 788 | if (!radix_enabled() && !mmu_has_feature(MMU_FTR_16M_PAGE)) |
839 | return -ENODEV; | 789 | return -ENODEV; |
840 | 790 | #endif | |
841 | for (psize = 0; psize < MMU_PAGE_COUNT; ++psize) { | 791 | for (psize = 0; psize < MMU_PAGE_COUNT; ++psize) { |
842 | unsigned shift; | 792 | unsigned shift; |
843 | unsigned pdshift; | 793 | unsigned pdshift; |
@@ -850,9 +800,9 @@ static int __init hugetlbpage_init(void) | |||
850 | if (add_huge_page_size(1ULL << shift) < 0) | 800 | if (add_huge_page_size(1ULL << shift) < 0) |
851 | continue; | 801 | continue; |
852 | 802 | ||
853 | if (shift < PMD_SHIFT) | 803 | if (shift < HUGEPD_PUD_SHIFT) |
854 | pdshift = PMD_SHIFT; | 804 | pdshift = PMD_SHIFT; |
855 | else if (shift < PUD_SHIFT) | 805 | else if (shift < HUGEPD_PGD_SHIFT) |
856 | pdshift = PUD_SHIFT; | 806 | pdshift = PUD_SHIFT; |
857 | else | 807 | else |
858 | pdshift = PGDIR_SHIFT; | 808 | pdshift = PGDIR_SHIFT; |
@@ -860,14 +810,38 @@ static int __init hugetlbpage_init(void) | |||
860 | * if we have pdshift and shift value same, we don't | 810 | * if we have pdshift and shift value same, we don't |
861 | * use pgt cache for hugepd. | 811 | * use pgt cache for hugepd. |
862 | */ | 812 | */ |
863 | if (pdshift != shift) { | 813 | if (pdshift > shift) { |
864 | pgtable_cache_add(pdshift - shift, NULL); | 814 | pgtable_cache_add(pdshift - shift, NULL); |
865 | if (!PGT_CACHE(pdshift - shift)) | 815 | if (!PGT_CACHE(pdshift - shift)) |
866 | panic("hugetlbpage_init(): could not create " | 816 | panic("hugetlbpage_init(): could not create " |
867 | "pgtable cache for %d bit pagesize\n", shift); | 817 | "pgtable cache for %d bit pagesize\n", shift); |
868 | } | 818 | } |
819 | #if defined(CONFIG_PPC_FSL_BOOK3E) || defined(CONFIG_PPC_8xx) | ||
820 | else if (!hugepte_cache) { | ||
821 | /* | ||
822 | * Create a kmem cache for hugeptes. The bottom bits in | ||
823 | * the pte have size information encoded in them, so | ||
824 | * align them to allow this | ||
825 | */ | ||
826 | hugepte_cache = kmem_cache_create("hugepte-cache", | ||
827 | sizeof(pte_t), | ||
828 | HUGEPD_SHIFT_MASK + 1, | ||
829 | 0, NULL); | ||
830 | if (hugepte_cache == NULL) | ||
831 | panic("%s: Unable to create kmem cache " | ||
832 | "for hugeptes\n", __func__); | ||
833 | |||
834 | } | ||
835 | #endif | ||
869 | } | 836 | } |
870 | 837 | ||
838 | #if defined(CONFIG_PPC_FSL_BOOK3E) || defined(CONFIG_PPC_8xx) | ||
839 | /* Default hpage size = 4M on FSL_BOOK3E and 512k on 8xx */ | ||
840 | if (mmu_psize_defs[MMU_PAGE_4M].shift) | ||
841 | HPAGE_SHIFT = mmu_psize_defs[MMU_PAGE_4M].shift; | ||
842 | else if (mmu_psize_defs[MMU_PAGE_512K].shift) | ||
843 | HPAGE_SHIFT = mmu_psize_defs[MMU_PAGE_512K].shift; | ||
844 | #else | ||
871 | /* Set default large page size. Currently, we pick 16M or 1M | 845 | /* Set default large page size. Currently, we pick 16M or 1M |
872 | * depending on what is available | 846 | * depending on what is available |
873 | */ | 847 | */ |
@@ -877,11 +851,13 @@ static int __init hugetlbpage_init(void) | |||
877 | HPAGE_SHIFT = mmu_psize_defs[MMU_PAGE_1M].shift; | 851 | HPAGE_SHIFT = mmu_psize_defs[MMU_PAGE_1M].shift; |
878 | else if (mmu_psize_defs[MMU_PAGE_2M].shift) | 852 | else if (mmu_psize_defs[MMU_PAGE_2M].shift) |
879 | HPAGE_SHIFT = mmu_psize_defs[MMU_PAGE_2M].shift; | 853 | HPAGE_SHIFT = mmu_psize_defs[MMU_PAGE_2M].shift; |
880 | 854 | #endif | |
855 | else | ||
856 | panic("%s: Unable to set default huge page size\n", __func__); | ||
881 | 857 | ||
882 | return 0; | 858 | return 0; |
883 | } | 859 | } |
884 | #endif | 860 | |
885 | arch_initcall(hugetlbpage_init); | 861 | arch_initcall(hugetlbpage_init); |
886 | 862 | ||
887 | void flush_dcache_icache_hugepage(struct page *page) | 863 | void flush_dcache_icache_hugepage(struct page *page) |
diff --git a/arch/powerpc/mm/init-common.c b/arch/powerpc/mm/init-common.c new file mode 100644 index 000000000000..a175cd82ae8c --- /dev/null +++ b/arch/powerpc/mm/init-common.c | |||
@@ -0,0 +1,107 @@ | |||
1 | /* | ||
2 | * PowerPC version | ||
3 | * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org) | ||
4 | * | ||
5 | * Modifications by Paul Mackerras (PowerMac) (paulus@cs.anu.edu.au) | ||
6 | * and Cort Dougan (PReP) (cort@cs.nmt.edu) | ||
7 | * Copyright (C) 1996 Paul Mackerras | ||
8 | * | ||
9 | * Derived from "arch/i386/mm/init.c" | ||
10 | * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds | ||
11 | * | ||
12 | * Dave Engebretsen <engebret@us.ibm.com> | ||
13 | * Rework for PPC64 port. | ||
14 | * | ||
15 | * This program is free software; you can redistribute it and/or | ||
16 | * modify it under the terms of the GNU General Public License | ||
17 | * as published by the Free Software Foundation; either version | ||
18 | * 2 of the License, or (at your option) any later version. | ||
19 | * | ||
20 | */ | ||
21 | |||
22 | #undef DEBUG | ||
23 | |||
24 | #include <linux/string.h> | ||
25 | #include <asm/pgalloc.h> | ||
26 | #include <asm/pgtable.h> | ||
27 | |||
28 | static void pgd_ctor(void *addr) | ||
29 | { | ||
30 | memset(addr, 0, PGD_TABLE_SIZE); | ||
31 | } | ||
32 | |||
33 | static void pud_ctor(void *addr) | ||
34 | { | ||
35 | memset(addr, 0, PUD_TABLE_SIZE); | ||
36 | } | ||
37 | |||
38 | static void pmd_ctor(void *addr) | ||
39 | { | ||
40 | memset(addr, 0, PMD_TABLE_SIZE); | ||
41 | } | ||
42 | |||
43 | struct kmem_cache *pgtable_cache[MAX_PGTABLE_INDEX_SIZE]; | ||
44 | |||
45 | /* | ||
46 | * Create a kmem_cache() for pagetables. This is not used for PTE | ||
47 | * pages - they're linked to struct page, come from the normal free | ||
48 | * pages pool and have a different entry size (see real_pte_t) to | ||
49 | * everything else. Caches created by this function are used for all | ||
50 | * the higher level pagetables, and for hugepage pagetables. | ||
51 | */ | ||
52 | void pgtable_cache_add(unsigned shift, void (*ctor)(void *)) | ||
53 | { | ||
54 | char *name; | ||
55 | unsigned long table_size = sizeof(void *) << shift; | ||
56 | unsigned long align = table_size; | ||
57 | |||
58 | /* When batching pgtable pointers for RCU freeing, we store | ||
59 | * the index size in the low bits. Table alignment must be | ||
60 | * big enough to fit it. | ||
61 | * | ||
62 | * Likewise, hugeapge pagetable pointers contain a (different) | ||
63 | * shift value in the low bits. All tables must be aligned so | ||
64 | * as to leave enough 0 bits in the address to contain it. */ | ||
65 | unsigned long minalign = max(MAX_PGTABLE_INDEX_SIZE + 1, | ||
66 | HUGEPD_SHIFT_MASK + 1); | ||
67 | struct kmem_cache *new; | ||
68 | |||
69 | /* It would be nice if this was a BUILD_BUG_ON(), but at the | ||
70 | * moment, gcc doesn't seem to recognize is_power_of_2 as a | ||
71 | * constant expression, so so much for that. */ | ||
72 | BUG_ON(!is_power_of_2(minalign)); | ||
73 | BUG_ON((shift < 1) || (shift > MAX_PGTABLE_INDEX_SIZE)); | ||
74 | |||
75 | if (PGT_CACHE(shift)) | ||
76 | return; /* Already have a cache of this size */ | ||
77 | |||
78 | align = max_t(unsigned long, align, minalign); | ||
79 | name = kasprintf(GFP_KERNEL, "pgtable-2^%d", shift); | ||
80 | new = kmem_cache_create(name, table_size, align, 0, ctor); | ||
81 | kfree(name); | ||
82 | pgtable_cache[shift - 1] = new; | ||
83 | pr_debug("Allocated pgtable cache for order %d\n", shift); | ||
84 | } | ||
85 | |||
86 | |||
87 | void pgtable_cache_init(void) | ||
88 | { | ||
89 | pgtable_cache_add(PGD_INDEX_SIZE, pgd_ctor); | ||
90 | |||
91 | if (PMD_INDEX_SIZE && !PGT_CACHE(PMD_INDEX_SIZE)) | ||
92 | pgtable_cache_add(PMD_CACHE_INDEX, pmd_ctor); | ||
93 | /* | ||
94 | * In all current configs, when the PUD index exists it's the | ||
95 | * same size as either the pgd or pmd index except with THP enabled | ||
96 | * on book3s 64 | ||
97 | */ | ||
98 | if (PUD_INDEX_SIZE && !PGT_CACHE(PUD_INDEX_SIZE)) | ||
99 | pgtable_cache_add(PUD_INDEX_SIZE, pud_ctor); | ||
100 | |||
101 | if (!PGT_CACHE(PGD_INDEX_SIZE)) | ||
102 | panic("Couldn't allocate pgd cache"); | ||
103 | if (PMD_INDEX_SIZE && !PGT_CACHE(PMD_INDEX_SIZE)) | ||
104 | panic("Couldn't allocate pmd pgtable caches"); | ||
105 | if (PUD_INDEX_SIZE && !PGT_CACHE(PUD_INDEX_SIZE)) | ||
106 | panic("Couldn't allocate pud pgtable caches"); | ||
107 | } | ||
diff --git a/arch/powerpc/mm/init_64.c b/arch/powerpc/mm/init_64.c index 16ada1eb7e26..a000c3585390 100644 --- a/arch/powerpc/mm/init_64.c +++ b/arch/powerpc/mm/init_64.c | |||
@@ -80,83 +80,6 @@ EXPORT_SYMBOL_GPL(memstart_addr); | |||
80 | phys_addr_t kernstart_addr; | 80 | phys_addr_t kernstart_addr; |
81 | EXPORT_SYMBOL_GPL(kernstart_addr); | 81 | EXPORT_SYMBOL_GPL(kernstart_addr); |
82 | 82 | ||
83 | static void pgd_ctor(void *addr) | ||
84 | { | ||
85 | memset(addr, 0, PGD_TABLE_SIZE); | ||
86 | } | ||
87 | |||
88 | static void pud_ctor(void *addr) | ||
89 | { | ||
90 | memset(addr, 0, PUD_TABLE_SIZE); | ||
91 | } | ||
92 | |||
93 | static void pmd_ctor(void *addr) | ||
94 | { | ||
95 | memset(addr, 0, PMD_TABLE_SIZE); | ||
96 | } | ||
97 | |||
98 | struct kmem_cache *pgtable_cache[MAX_PGTABLE_INDEX_SIZE]; | ||
99 | |||
100 | /* | ||
101 | * Create a kmem_cache() for pagetables. This is not used for PTE | ||
102 | * pages - they're linked to struct page, come from the normal free | ||
103 | * pages pool and have a different entry size (see real_pte_t) to | ||
104 | * everything else. Caches created by this function are used for all | ||
105 | * the higher level pagetables, and for hugepage pagetables. | ||
106 | */ | ||
107 | void pgtable_cache_add(unsigned shift, void (*ctor)(void *)) | ||
108 | { | ||
109 | char *name; | ||
110 | unsigned long table_size = sizeof(void *) << shift; | ||
111 | unsigned long align = table_size; | ||
112 | |||
113 | /* When batching pgtable pointers for RCU freeing, we store | ||
114 | * the index size in the low bits. Table alignment must be | ||
115 | * big enough to fit it. | ||
116 | * | ||
117 | * Likewise, hugeapge pagetable pointers contain a (different) | ||
118 | * shift value in the low bits. All tables must be aligned so | ||
119 | * as to leave enough 0 bits in the address to contain it. */ | ||
120 | unsigned long minalign = max(MAX_PGTABLE_INDEX_SIZE + 1, | ||
121 | HUGEPD_SHIFT_MASK + 1); | ||
122 | struct kmem_cache *new; | ||
123 | |||
124 | /* It would be nice if this was a BUILD_BUG_ON(), but at the | ||
125 | * moment, gcc doesn't seem to recognize is_power_of_2 as a | ||
126 | * constant expression, so so much for that. */ | ||
127 | BUG_ON(!is_power_of_2(minalign)); | ||
128 | BUG_ON((shift < 1) || (shift > MAX_PGTABLE_INDEX_SIZE)); | ||
129 | |||
130 | if (PGT_CACHE(shift)) | ||
131 | return; /* Already have a cache of this size */ | ||
132 | |||
133 | align = max_t(unsigned long, align, minalign); | ||
134 | name = kasprintf(GFP_KERNEL, "pgtable-2^%d", shift); | ||
135 | new = kmem_cache_create(name, table_size, align, 0, ctor); | ||
136 | kfree(name); | ||
137 | pgtable_cache[shift - 1] = new; | ||
138 | pr_debug("Allocated pgtable cache for order %d\n", shift); | ||
139 | } | ||
140 | |||
141 | |||
142 | void pgtable_cache_init(void) | ||
143 | { | ||
144 | pgtable_cache_add(PGD_INDEX_SIZE, pgd_ctor); | ||
145 | pgtable_cache_add(PMD_CACHE_INDEX, pmd_ctor); | ||
146 | /* | ||
147 | * In all current configs, when the PUD index exists it's the | ||
148 | * same size as either the pgd or pmd index except with THP enabled | ||
149 | * on book3s 64 | ||
150 | */ | ||
151 | if (PUD_INDEX_SIZE && !PGT_CACHE(PUD_INDEX_SIZE)) | ||
152 | pgtable_cache_add(PUD_INDEX_SIZE, pud_ctor); | ||
153 | |||
154 | if (!PGT_CACHE(PGD_INDEX_SIZE) || !PGT_CACHE(PMD_CACHE_INDEX)) | ||
155 | panic("Couldn't allocate pgtable caches"); | ||
156 | if (PUD_INDEX_SIZE && !PGT_CACHE(PUD_INDEX_SIZE)) | ||
157 | panic("Couldn't allocate pud pgtable caches"); | ||
158 | } | ||
159 | |||
160 | #ifdef CONFIG_SPARSEMEM_VMEMMAP | 83 | #ifdef CONFIG_SPARSEMEM_VMEMMAP |
161 | /* | 84 | /* |
162 | * Given an address within the vmemmap, determine the pfn of the page that | 85 | * Given an address within the vmemmap, determine the pfn of the page that |
diff --git a/arch/powerpc/mm/pgtable_32.c b/arch/powerpc/mm/pgtable_32.c index 0ae0572bc239..a65c0b4c0669 100644 --- a/arch/powerpc/mm/pgtable_32.c +++ b/arch/powerpc/mm/pgtable_32.c | |||
@@ -42,43 +42,6 @@ EXPORT_SYMBOL(ioremap_bot); /* aka VMALLOC_END */ | |||
42 | 42 | ||
43 | extern char etext[], _stext[], _sinittext[], _einittext[]; | 43 | extern char etext[], _stext[], _sinittext[], _einittext[]; |
44 | 44 | ||
45 | #define PGDIR_ORDER (32 + PGD_T_LOG2 - PGDIR_SHIFT) | ||
46 | |||
47 | #ifndef CONFIG_PPC_4K_PAGES | ||
48 | static struct kmem_cache *pgtable_cache; | ||
49 | |||
50 | void pgtable_cache_init(void) | ||
51 | { | ||
52 | pgtable_cache = kmem_cache_create("PGDIR cache", 1 << PGDIR_ORDER, | ||
53 | 1 << PGDIR_ORDER, 0, NULL); | ||
54 | if (pgtable_cache == NULL) | ||
55 | panic("Couldn't allocate pgtable caches"); | ||
56 | } | ||
57 | #endif | ||
58 | |||
59 | pgd_t *pgd_alloc(struct mm_struct *mm) | ||
60 | { | ||
61 | pgd_t *ret; | ||
62 | |||
63 | /* pgdir take page or two with 4K pages and a page fraction otherwise */ | ||
64 | #ifndef CONFIG_PPC_4K_PAGES | ||
65 | ret = kmem_cache_alloc(pgtable_cache, GFP_KERNEL | __GFP_ZERO); | ||
66 | #else | ||
67 | ret = (pgd_t *)__get_free_pages(GFP_KERNEL|__GFP_ZERO, | ||
68 | PGDIR_ORDER - PAGE_SHIFT); | ||
69 | #endif | ||
70 | return ret; | ||
71 | } | ||
72 | |||
73 | void pgd_free(struct mm_struct *mm, pgd_t *pgd) | ||
74 | { | ||
75 | #ifndef CONFIG_PPC_4K_PAGES | ||
76 | kmem_cache_free(pgtable_cache, (void *)pgd); | ||
77 | #else | ||
78 | free_pages((unsigned long)pgd, PGDIR_ORDER - PAGE_SHIFT); | ||
79 | #endif | ||
80 | } | ||
81 | |||
82 | __ref pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address) | 45 | __ref pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address) |
83 | { | 46 | { |
84 | pte_t *pte; | 47 | pte_t *pte; |
diff --git a/arch/powerpc/mm/tlb_nohash.c b/arch/powerpc/mm/tlb_nohash.c index 050badc0ebd3..ba28fcb98597 100644 --- a/arch/powerpc/mm/tlb_nohash.c +++ b/arch/powerpc/mm/tlb_nohash.c | |||
@@ -53,7 +53,7 @@ | |||
53 | * other sizes not listed here. The .ind field is only used on MMUs that have | 53 | * other sizes not listed here. The .ind field is only used on MMUs that have |
54 | * indirect page table entries. | 54 | * indirect page table entries. |
55 | */ | 55 | */ |
56 | #ifdef CONFIG_PPC_BOOK3E_MMU | 56 | #if defined(CONFIG_PPC_BOOK3E_MMU) || defined(CONFIG_PPC_8xx) |
57 | #ifdef CONFIG_PPC_FSL_BOOK3E | 57 | #ifdef CONFIG_PPC_FSL_BOOK3E |
58 | struct mmu_psize_def mmu_psize_defs[MMU_PAGE_COUNT] = { | 58 | struct mmu_psize_def mmu_psize_defs[MMU_PAGE_COUNT] = { |
59 | [MMU_PAGE_4K] = { | 59 | [MMU_PAGE_4K] = { |
@@ -85,6 +85,25 @@ struct mmu_psize_def mmu_psize_defs[MMU_PAGE_COUNT] = { | |||
85 | .enc = BOOK3E_PAGESZ_1GB, | 85 | .enc = BOOK3E_PAGESZ_1GB, |
86 | }, | 86 | }, |
87 | }; | 87 | }; |
88 | #elif defined(CONFIG_PPC_8xx) | ||
89 | struct mmu_psize_def mmu_psize_defs[MMU_PAGE_COUNT] = { | ||
90 | /* we only manage 4k and 16k pages as normal pages */ | ||
91 | #ifdef CONFIG_PPC_4K_PAGES | ||
92 | [MMU_PAGE_4K] = { | ||
93 | .shift = 12, | ||
94 | }, | ||
95 | #else | ||
96 | [MMU_PAGE_16K] = { | ||
97 | .shift = 14, | ||
98 | }, | ||
99 | #endif | ||
100 | [MMU_PAGE_512K] = { | ||
101 | .shift = 19, | ||
102 | }, | ||
103 | [MMU_PAGE_8M] = { | ||
104 | .shift = 23, | ||
105 | }, | ||
106 | }; | ||
88 | #else | 107 | #else |
89 | struct mmu_psize_def mmu_psize_defs[MMU_PAGE_COUNT] = { | 108 | struct mmu_psize_def mmu_psize_defs[MMU_PAGE_COUNT] = { |
90 | [MMU_PAGE_4K] = { | 109 | [MMU_PAGE_4K] = { |
diff --git a/arch/powerpc/platforms/83xx/suspend.c b/arch/powerpc/platforms/83xx/suspend.c index 24717d060008..08f92f6ed228 100644 --- a/arch/powerpc/platforms/83xx/suspend.c +++ b/arch/powerpc/platforms/83xx/suspend.c | |||
@@ -441,8 +441,4 @@ static struct platform_driver pmc_driver = { | |||
441 | .remove = pmc_remove | 441 | .remove = pmc_remove |
442 | }; | 442 | }; |
443 | 443 | ||
444 | static int pmc_init(void) | 444 | builtin_platform_driver(pmc_driver); |
445 | { | ||
446 | return platform_driver_register(&pmc_driver); | ||
447 | } | ||
448 | device_initcall(pmc_init); | ||
diff --git a/arch/powerpc/platforms/85xx/Kconfig b/arch/powerpc/platforms/85xx/Kconfig index 9dc1d28975b9..47b389dc4938 100644 --- a/arch/powerpc/platforms/85xx/Kconfig +++ b/arch/powerpc/platforms/85xx/Kconfig | |||
@@ -253,6 +253,8 @@ endif # PPC32 | |||
253 | config PPC_QEMU_E500 | 253 | config PPC_QEMU_E500 |
254 | bool "QEMU generic e500 platform" | 254 | bool "QEMU generic e500 platform" |
255 | select DEFAULT_UIMAGE | 255 | select DEFAULT_UIMAGE |
256 | select E500 | ||
257 | select PPC_E500MC if PPC64 | ||
256 | help | 258 | help |
257 | This option enables support for running as a QEMU guest using | 259 | This option enables support for running as a QEMU guest using |
258 | QEMU's generic e500 machine. This is not required if you're | 260 | QEMU's generic e500 machine. This is not required if you're |
diff --git a/arch/powerpc/platforms/8xx/Kconfig b/arch/powerpc/platforms/8xx/Kconfig index 564d99bb2a26..80cbcb0ad9b1 100644 --- a/arch/powerpc/platforms/8xx/Kconfig +++ b/arch/powerpc/platforms/8xx/Kconfig | |||
@@ -130,6 +130,7 @@ config 8xx_CPU6 | |||
130 | 130 | ||
131 | config 8xx_CPU15 | 131 | config 8xx_CPU15 |
132 | bool "CPU15 Silicon Errata" | 132 | bool "CPU15 Silicon Errata" |
133 | depends on !HUGETLB_PAGE | ||
133 | default y | 134 | default y |
134 | help | 135 | help |
135 | This enables a workaround for erratum CPU15 on MPC8xx chips. | 136 | This enables a workaround for erratum CPU15 on MPC8xx chips. |
diff --git a/arch/powerpc/platforms/Kconfig.cputype b/arch/powerpc/platforms/Kconfig.cputype index ca2da30ad2ab..6e89e5a8d4fb 100644 --- a/arch/powerpc/platforms/Kconfig.cputype +++ b/arch/powerpc/platforms/Kconfig.cputype | |||
@@ -34,6 +34,7 @@ config PPC_8xx | |||
34 | select FSL_SOC | 34 | select FSL_SOC |
35 | select 8xx | 35 | select 8xx |
36 | select PPC_LIB_RHEAP | 36 | select PPC_LIB_RHEAP |
37 | select SYS_SUPPORTS_HUGETLBFS | ||
37 | 38 | ||
38 | config 40x | 39 | config 40x |
39 | bool "AMCC 40x" | 40 | bool "AMCC 40x" |
diff --git a/arch/powerpc/sysdev/fsl_lbc.c b/arch/powerpc/sysdev/fsl_lbc.c index 424b67fdb57f..5340a483cf55 100644 --- a/arch/powerpc/sysdev/fsl_lbc.c +++ b/arch/powerpc/sysdev/fsl_lbc.c | |||
@@ -31,7 +31,7 @@ | |||
31 | #include <asm/prom.h> | 31 | #include <asm/prom.h> |
32 | #include <asm/fsl_lbc.h> | 32 | #include <asm/fsl_lbc.h> |
33 | 33 | ||
34 | static spinlock_t fsl_lbc_lock = __SPIN_LOCK_UNLOCKED(fsl_lbc_lock); | 34 | static DEFINE_SPINLOCK(fsl_lbc_lock); |
35 | struct fsl_lbc_ctrl *fsl_lbc_ctrl_dev; | 35 | struct fsl_lbc_ctrl *fsl_lbc_ctrl_dev; |
36 | EXPORT_SYMBOL(fsl_lbc_ctrl_dev); | 36 | EXPORT_SYMBOL(fsl_lbc_ctrl_dev); |
37 | 37 | ||
diff --git a/arch/powerpc/sysdev/fsl_pmc.c b/arch/powerpc/sysdev/fsl_pmc.c index 1d6fd7c59fe9..232225e7f863 100644 --- a/arch/powerpc/sysdev/fsl_pmc.c +++ b/arch/powerpc/sysdev/fsl_pmc.c | |||
@@ -85,8 +85,4 @@ static struct platform_driver pmc_driver = { | |||
85 | .probe = pmc_probe, | 85 | .probe = pmc_probe, |
86 | }; | 86 | }; |
87 | 87 | ||
88 | static int __init pmc_init(void) | 88 | builtin_platform_driver(pmc_driver); |
89 | { | ||
90 | return platform_driver_register(&pmc_driver); | ||
91 | } | ||
92 | device_initcall(pmc_init); | ||
diff --git a/arch/powerpc/sysdev/fsl_soc.c b/arch/powerpc/sysdev/fsl_soc.c index d93056eedcb0..19101f9cfcfc 100644 --- a/arch/powerpc/sysdev/fsl_soc.c +++ b/arch/powerpc/sysdev/fsl_soc.c | |||
@@ -77,13 +77,10 @@ phys_addr_t get_immrbase(void) | |||
77 | 77 | ||
78 | EXPORT_SYMBOL(get_immrbase); | 78 | EXPORT_SYMBOL(get_immrbase); |
79 | 79 | ||
80 | static u32 sysfreq = -1; | ||
81 | |||
82 | u32 fsl_get_sys_freq(void) | 80 | u32 fsl_get_sys_freq(void) |
83 | { | 81 | { |
82 | static u32 sysfreq = -1; | ||
84 | struct device_node *soc; | 83 | struct device_node *soc; |
85 | const u32 *prop; | ||
86 | int size; | ||
87 | 84 | ||
88 | if (sysfreq != -1) | 85 | if (sysfreq != -1) |
89 | return sysfreq; | 86 | return sysfreq; |
@@ -92,12 +89,9 @@ u32 fsl_get_sys_freq(void) | |||
92 | if (!soc) | 89 | if (!soc) |
93 | return -1; | 90 | return -1; |
94 | 91 | ||
95 | prop = of_get_property(soc, "clock-frequency", &size); | 92 | of_property_read_u32(soc, "clock-frequency", &sysfreq); |
96 | if (!prop || size != sizeof(*prop) || *prop == 0) | 93 | if (sysfreq == -1 || !sysfreq) |
97 | prop = of_get_property(soc, "bus-frequency", &size); | 94 | of_property_read_u32(soc, "bus-frequency", &sysfreq); |
98 | |||
99 | if (prop && size == sizeof(*prop)) | ||
100 | sysfreq = *prop; | ||
101 | 95 | ||
102 | of_node_put(soc); | 96 | of_node_put(soc); |
103 | return sysfreq; | 97 | return sysfreq; |
@@ -106,23 +100,17 @@ EXPORT_SYMBOL(fsl_get_sys_freq); | |||
106 | 100 | ||
107 | #if defined(CONFIG_CPM2) || defined(CONFIG_QUICC_ENGINE) || defined(CONFIG_8xx) | 101 | #if defined(CONFIG_CPM2) || defined(CONFIG_QUICC_ENGINE) || defined(CONFIG_8xx) |
108 | 102 | ||
109 | static u32 brgfreq = -1; | ||
110 | |||
111 | u32 get_brgfreq(void) | 103 | u32 get_brgfreq(void) |
112 | { | 104 | { |
105 | static u32 brgfreq = -1; | ||
113 | struct device_node *node; | 106 | struct device_node *node; |
114 | const unsigned int *prop; | ||
115 | int size; | ||
116 | 107 | ||
117 | if (brgfreq != -1) | 108 | if (brgfreq != -1) |
118 | return brgfreq; | 109 | return brgfreq; |
119 | 110 | ||
120 | node = of_find_compatible_node(NULL, NULL, "fsl,cpm-brg"); | 111 | node = of_find_compatible_node(NULL, NULL, "fsl,cpm-brg"); |
121 | if (node) { | 112 | if (node) { |
122 | prop = of_get_property(node, "clock-frequency", &size); | 113 | of_property_read_u32(node, "clock-frequency", &brgfreq); |
123 | if (prop && size == 4) | ||
124 | brgfreq = *prop; | ||
125 | |||
126 | of_node_put(node); | 114 | of_node_put(node); |
127 | return brgfreq; | 115 | return brgfreq; |
128 | } | 116 | } |
@@ -135,15 +123,11 @@ u32 get_brgfreq(void) | |||
135 | node = of_find_node_by_type(NULL, "qe"); | 123 | node = of_find_node_by_type(NULL, "qe"); |
136 | 124 | ||
137 | if (node) { | 125 | if (node) { |
138 | prop = of_get_property(node, "brg-frequency", &size); | 126 | of_property_read_u32(node, "brg-frequency", &brgfreq); |
139 | if (prop && size == 4) | 127 | if (brgfreq == -1 || !brgfreq) |
140 | brgfreq = *prop; | 128 | if (!of_property_read_u32(node, "bus-frequency", |
141 | 129 | &brgfreq)) | |
142 | if (brgfreq == -1 || brgfreq == 0) { | 130 | brgfreq /= 2; |
143 | prop = of_get_property(node, "bus-frequency", &size); | ||
144 | if (prop && size == 4) | ||
145 | brgfreq = *prop / 2; | ||
146 | } | ||
147 | of_node_put(node); | 131 | of_node_put(node); |
148 | } | 132 | } |
149 | 133 | ||
@@ -152,10 +136,9 @@ u32 get_brgfreq(void) | |||
152 | 136 | ||
153 | EXPORT_SYMBOL(get_brgfreq); | 137 | EXPORT_SYMBOL(get_brgfreq); |
154 | 138 | ||
155 | static u32 fs_baudrate = -1; | ||
156 | |||
157 | u32 get_baudrate(void) | 139 | u32 get_baudrate(void) |
158 | { | 140 | { |
141 | static u32 fs_baudrate = -1; | ||
159 | struct device_node *node; | 142 | struct device_node *node; |
160 | 143 | ||
161 | if (fs_baudrate != -1) | 144 | if (fs_baudrate != -1) |
@@ -163,12 +146,7 @@ u32 get_baudrate(void) | |||
163 | 146 | ||
164 | node = of_find_node_by_type(NULL, "serial"); | 147 | node = of_find_node_by_type(NULL, "serial"); |
165 | if (node) { | 148 | if (node) { |
166 | int size; | 149 | of_property_read_u32(node, "current-speed", &fs_baudrate); |
167 | const unsigned int *prop = of_get_property(node, | ||
168 | "current-speed", &size); | ||
169 | |||
170 | if (prop) | ||
171 | fs_baudrate = *prop; | ||
172 | of_node_put(node); | 150 | of_node_put(node); |
173 | } | 151 | } |
174 | 152 | ||
diff --git a/drivers/soc/fsl/qbman/bman.c b/drivers/soc/fsl/qbman/bman.c index ffa48fdbb1a9..a3d6d7cfa929 100644 --- a/drivers/soc/fsl/qbman/bman.c +++ b/drivers/soc/fsl/qbman/bman.c | |||
@@ -167,12 +167,12 @@ struct bm_portal { | |||
167 | /* Cache-inhibited register access. */ | 167 | /* Cache-inhibited register access. */ |
168 | static inline u32 bm_in(struct bm_portal *p, u32 offset) | 168 | static inline u32 bm_in(struct bm_portal *p, u32 offset) |
169 | { | 169 | { |
170 | return __raw_readl(p->addr.ci + offset); | 170 | return be32_to_cpu(__raw_readl(p->addr.ci + offset)); |
171 | } | 171 | } |
172 | 172 | ||
173 | static inline void bm_out(struct bm_portal *p, u32 offset, u32 val) | 173 | static inline void bm_out(struct bm_portal *p, u32 offset, u32 val) |
174 | { | 174 | { |
175 | __raw_writel(val, p->addr.ci + offset); | 175 | __raw_writel(cpu_to_be32(val), p->addr.ci + offset); |
176 | } | 176 | } |
177 | 177 | ||
178 | /* Cache Enabled Portal Access */ | 178 | /* Cache Enabled Portal Access */ |
@@ -188,7 +188,7 @@ static inline void bm_cl_touch_ro(struct bm_portal *p, u32 offset) | |||
188 | 188 | ||
189 | static inline u32 bm_ce_in(struct bm_portal *p, u32 offset) | 189 | static inline u32 bm_ce_in(struct bm_portal *p, u32 offset) |
190 | { | 190 | { |
191 | return __raw_readl(p->addr.ce + offset); | 191 | return be32_to_cpu(__raw_readl(p->addr.ce + offset)); |
192 | } | 192 | } |
193 | 193 | ||
194 | struct bman_portal { | 194 | struct bman_portal { |
@@ -391,7 +391,7 @@ static void bm_rcr_finish(struct bm_portal *portal) | |||
391 | 391 | ||
392 | i = bm_in(portal, BM_REG_RCR_PI_CINH) & (BM_RCR_SIZE - 1); | 392 | i = bm_in(portal, BM_REG_RCR_PI_CINH) & (BM_RCR_SIZE - 1); |
393 | if (i != rcr_ptr2idx(rcr->cursor)) | 393 | if (i != rcr_ptr2idx(rcr->cursor)) |
394 | pr_crit("losing uncommited RCR entries\n"); | 394 | pr_crit("losing uncommitted RCR entries\n"); |
395 | 395 | ||
396 | i = bm_in(portal, BM_REG_RCR_CI_CINH) & (BM_RCR_SIZE - 1); | 396 | i = bm_in(portal, BM_REG_RCR_CI_CINH) & (BM_RCR_SIZE - 1); |
397 | if (i != rcr->ci) | 397 | if (i != rcr->ci) |
diff --git a/drivers/soc/fsl/qbman/bman_ccsr.c b/drivers/soc/fsl/qbman/bman_ccsr.c index 9deb0524543f..a8e8389a6894 100644 --- a/drivers/soc/fsl/qbman/bman_ccsr.c +++ b/drivers/soc/fsl/qbman/bman_ccsr.c | |||
@@ -181,8 +181,7 @@ static int fsl_bman_probe(struct platform_device *pdev) | |||
181 | node->full_name); | 181 | node->full_name); |
182 | return -ENXIO; | 182 | return -ENXIO; |
183 | } | 183 | } |
184 | bm_ccsr_start = devm_ioremap(dev, res->start, | 184 | bm_ccsr_start = devm_ioremap(dev, res->start, resource_size(res)); |
185 | res->end - res->start + 1); | ||
186 | if (!bm_ccsr_start) | 185 | if (!bm_ccsr_start) |
187 | return -ENXIO; | 186 | return -ENXIO; |
188 | 187 | ||
diff --git a/drivers/soc/fsl/qbman/bman_portal.c b/drivers/soc/fsl/qbman/bman_portal.c index 6579cc18811a..dd3f5d7617d2 100644 --- a/drivers/soc/fsl/qbman/bman_portal.c +++ b/drivers/soc/fsl/qbman/bman_portal.c | |||
@@ -146,15 +146,19 @@ static int bman_portal_probe(struct platform_device *pdev) | |||
146 | pcfg->irq = irq; | 146 | pcfg->irq = irq; |
147 | 147 | ||
148 | va = ioremap_prot(addr_phys[0]->start, resource_size(addr_phys[0]), 0); | 148 | va = ioremap_prot(addr_phys[0]->start, resource_size(addr_phys[0]), 0); |
149 | if (!va) | 149 | if (!va) { |
150 | dev_err(dev, "ioremap::CE failed\n"); | ||
150 | goto err_ioremap1; | 151 | goto err_ioremap1; |
152 | } | ||
151 | 153 | ||
152 | pcfg->addr_virt[DPAA_PORTAL_CE] = va; | 154 | pcfg->addr_virt[DPAA_PORTAL_CE] = va; |
153 | 155 | ||
154 | va = ioremap_prot(addr_phys[1]->start, resource_size(addr_phys[1]), | 156 | va = ioremap_prot(addr_phys[1]->start, resource_size(addr_phys[1]), |
155 | _PAGE_GUARDED | _PAGE_NO_CACHE); | 157 | _PAGE_GUARDED | _PAGE_NO_CACHE); |
156 | if (!va) | 158 | if (!va) { |
159 | dev_err(dev, "ioremap::CI failed\n"); | ||
157 | goto err_ioremap2; | 160 | goto err_ioremap2; |
161 | } | ||
158 | 162 | ||
159 | pcfg->addr_virt[DPAA_PORTAL_CI] = va; | 163 | pcfg->addr_virt[DPAA_PORTAL_CI] = va; |
160 | 164 | ||
@@ -170,8 +174,10 @@ static int bman_portal_probe(struct platform_device *pdev) | |||
170 | spin_unlock(&bman_lock); | 174 | spin_unlock(&bman_lock); |
171 | pcfg->cpu = cpu; | 175 | pcfg->cpu = cpu; |
172 | 176 | ||
173 | if (!init_pcfg(pcfg)) | 177 | if (!init_pcfg(pcfg)) { |
174 | goto err_ioremap2; | 178 | dev_err(dev, "portal init failed\n"); |
179 | goto err_portal_init; | ||
180 | } | ||
175 | 181 | ||
176 | /* clear irq affinity if assigned cpu is offline */ | 182 | /* clear irq affinity if assigned cpu is offline */ |
177 | if (!cpu_online(cpu)) | 183 | if (!cpu_online(cpu)) |
@@ -179,10 +185,11 @@ static int bman_portal_probe(struct platform_device *pdev) | |||
179 | 185 | ||
180 | return 0; | 186 | return 0; |
181 | 187 | ||
188 | err_portal_init: | ||
189 | iounmap(pcfg->addr_virt[DPAA_PORTAL_CI]); | ||
182 | err_ioremap2: | 190 | err_ioremap2: |
183 | iounmap(pcfg->addr_virt[DPAA_PORTAL_CE]); | 191 | iounmap(pcfg->addr_virt[DPAA_PORTAL_CE]); |
184 | err_ioremap1: | 192 | err_ioremap1: |
185 | dev_err(dev, "ioremap failed\n"); | ||
186 | return -ENXIO; | 193 | return -ENXIO; |
187 | } | 194 | } |
188 | 195 | ||
diff --git a/drivers/soc/fsl/qbman/dpaa_sys.h b/drivers/soc/fsl/qbman/dpaa_sys.h index b63fd72295c6..2eaf3184f61d 100644 --- a/drivers/soc/fsl/qbman/dpaa_sys.h +++ b/drivers/soc/fsl/qbman/dpaa_sys.h | |||
@@ -38,6 +38,7 @@ | |||
38 | #include <linux/kthread.h> | 38 | #include <linux/kthread.h> |
39 | #include <linux/vmalloc.h> | 39 | #include <linux/vmalloc.h> |
40 | #include <linux/platform_device.h> | 40 | #include <linux/platform_device.h> |
41 | #include <linux/of.h> | ||
41 | #include <linux/of_reserved_mem.h> | 42 | #include <linux/of_reserved_mem.h> |
42 | #include <linux/prefetch.h> | 43 | #include <linux/prefetch.h> |
43 | #include <linux/genalloc.h> | 44 | #include <linux/genalloc.h> |
diff --git a/drivers/soc/fsl/qbman/qman.c b/drivers/soc/fsl/qbman/qman.c index 119054bc922b..6f509f68085e 100644 --- a/drivers/soc/fsl/qbman/qman.c +++ b/drivers/soc/fsl/qbman/qman.c | |||
@@ -140,10 +140,10 @@ enum qm_mr_cmode { /* matches QCSP_CFG::MM */ | |||
140 | struct qm_eqcr_entry { | 140 | struct qm_eqcr_entry { |
141 | u8 _ncw_verb; /* writes to this are non-coherent */ | 141 | u8 _ncw_verb; /* writes to this are non-coherent */ |
142 | u8 dca; | 142 | u8 dca; |
143 | u16 seqnum; | 143 | __be16 seqnum; |
144 | u32 orp; /* 24-bit */ | 144 | u8 __reserved[4]; |
145 | u32 fqid; /* 24-bit */ | 145 | __be32 fqid; /* 24-bit */ |
146 | u32 tag; | 146 | __be32 tag; |
147 | struct qm_fd fd; | 147 | struct qm_fd fd; |
148 | u8 __reserved3[32]; | 148 | u8 __reserved3[32]; |
149 | } __packed; | 149 | } __packed; |
@@ -183,41 +183,22 @@ struct qm_mr { | |||
183 | }; | 183 | }; |
184 | 184 | ||
185 | /* MC (Management Command) command */ | 185 | /* MC (Management Command) command */ |
186 | /* "Query FQ" */ | 186 | /* "FQ" command layout */ |
187 | struct qm_mcc_queryfq { | 187 | struct qm_mcc_fq { |
188 | u8 _ncw_verb; | 188 | u8 _ncw_verb; |
189 | u8 __reserved1[3]; | 189 | u8 __reserved1[3]; |
190 | u32 fqid; /* 24-bit */ | 190 | __be32 fqid; /* 24-bit */ |
191 | u8 __reserved2[56]; | 191 | u8 __reserved2[56]; |
192 | } __packed; | 192 | } __packed; |
193 | /* "Alter FQ State Commands " */ | ||
194 | struct qm_mcc_alterfq { | ||
195 | u8 _ncw_verb; | ||
196 | u8 __reserved1[3]; | ||
197 | u32 fqid; /* 24-bit */ | ||
198 | u8 __reserved2; | ||
199 | u8 count; /* number of consecutive FQID */ | ||
200 | u8 __reserved3[10]; | ||
201 | u32 context_b; /* frame queue context b */ | ||
202 | u8 __reserved4[40]; | ||
203 | } __packed; | ||
204 | 193 | ||
205 | /* "Query CGR" */ | 194 | /* "CGR" command layout */ |
206 | struct qm_mcc_querycgr { | 195 | struct qm_mcc_cgr { |
207 | u8 _ncw_verb; | 196 | u8 _ncw_verb; |
208 | u8 __reserved1[30]; | 197 | u8 __reserved1[30]; |
209 | u8 cgid; | 198 | u8 cgid; |
210 | u8 __reserved2[32]; | 199 | u8 __reserved2[32]; |
211 | }; | 200 | }; |
212 | 201 | ||
213 | struct qm_mcc_querywq { | ||
214 | u8 _ncw_verb; | ||
215 | u8 __reserved; | ||
216 | /* select channel if verb != QUERYWQ_DEDICATED */ | ||
217 | u16 channel_wq; /* ignores wq (3 lsbits): _res[0-2] */ | ||
218 | u8 __reserved2[60]; | ||
219 | } __packed; | ||
220 | |||
221 | #define QM_MCC_VERB_VBIT 0x80 | 202 | #define QM_MCC_VERB_VBIT 0x80 |
222 | #define QM_MCC_VERB_MASK 0x7f /* where the verb contains; */ | 203 | #define QM_MCC_VERB_MASK 0x7f /* where the verb contains; */ |
223 | #define QM_MCC_VERB_INITFQ_PARKED 0x40 | 204 | #define QM_MCC_VERB_INITFQ_PARKED 0x40 |
@@ -243,12 +224,9 @@ union qm_mc_command { | |||
243 | u8 __reserved[63]; | 224 | u8 __reserved[63]; |
244 | }; | 225 | }; |
245 | struct qm_mcc_initfq initfq; | 226 | struct qm_mcc_initfq initfq; |
246 | struct qm_mcc_queryfq queryfq; | ||
247 | struct qm_mcc_alterfq alterfq; | ||
248 | struct qm_mcc_initcgr initcgr; | 227 | struct qm_mcc_initcgr initcgr; |
249 | struct qm_mcc_querycgr querycgr; | 228 | struct qm_mcc_fq fq; |
250 | struct qm_mcc_querywq querywq; | 229 | struct qm_mcc_cgr cgr; |
251 | struct qm_mcc_queryfq_np queryfq_np; | ||
252 | }; | 230 | }; |
253 | 231 | ||
254 | /* MC (Management Command) result */ | 232 | /* MC (Management Command) result */ |
@@ -343,12 +321,12 @@ struct qm_portal { | |||
343 | /* Cache-inhibited register access. */ | 321 | /* Cache-inhibited register access. */ |
344 | static inline u32 qm_in(struct qm_portal *p, u32 offset) | 322 | static inline u32 qm_in(struct qm_portal *p, u32 offset) |
345 | { | 323 | { |
346 | return __raw_readl(p->addr.ci + offset); | 324 | return be32_to_cpu(__raw_readl(p->addr.ci + offset)); |
347 | } | 325 | } |
348 | 326 | ||
349 | static inline void qm_out(struct qm_portal *p, u32 offset, u32 val) | 327 | static inline void qm_out(struct qm_portal *p, u32 offset, u32 val) |
350 | { | 328 | { |
351 | __raw_writel(val, p->addr.ci + offset); | 329 | __raw_writel(cpu_to_be32(val), p->addr.ci + offset); |
352 | } | 330 | } |
353 | 331 | ||
354 | /* Cache Enabled Portal Access */ | 332 | /* Cache Enabled Portal Access */ |
@@ -364,7 +342,7 @@ static inline void qm_cl_touch_ro(struct qm_portal *p, u32 offset) | |||
364 | 342 | ||
365 | static inline u32 qm_ce_in(struct qm_portal *p, u32 offset) | 343 | static inline u32 qm_ce_in(struct qm_portal *p, u32 offset) |
366 | { | 344 | { |
367 | return __raw_readl(p->addr.ce + offset); | 345 | return be32_to_cpu(__raw_readl(p->addr.ce + offset)); |
368 | } | 346 | } |
369 | 347 | ||
370 | /* --- EQCR API --- */ | 348 | /* --- EQCR API --- */ |
@@ -443,7 +421,7 @@ static inline void qm_eqcr_finish(struct qm_portal *portal) | |||
443 | 421 | ||
444 | DPAA_ASSERT(!eqcr->busy); | 422 | DPAA_ASSERT(!eqcr->busy); |
445 | if (pi != eqcr_ptr2idx(eqcr->cursor)) | 423 | if (pi != eqcr_ptr2idx(eqcr->cursor)) |
446 | pr_crit("losing uncommited EQCR entries\n"); | 424 | pr_crit("losing uncommitted EQCR entries\n"); |
447 | if (ci != eqcr->ci) | 425 | if (ci != eqcr->ci) |
448 | pr_crit("missing existing EQCR completions\n"); | 426 | pr_crit("missing existing EQCR completions\n"); |
449 | if (eqcr->ci != eqcr_ptr2idx(eqcr->cursor)) | 427 | if (eqcr->ci != eqcr_ptr2idx(eqcr->cursor)) |
@@ -492,8 +470,7 @@ static inline struct qm_eqcr_entry *qm_eqcr_start_stash(struct qm_portal | |||
492 | static inline void eqcr_commit_checks(struct qm_eqcr *eqcr) | 470 | static inline void eqcr_commit_checks(struct qm_eqcr *eqcr) |
493 | { | 471 | { |
494 | DPAA_ASSERT(eqcr->busy); | 472 | DPAA_ASSERT(eqcr->busy); |
495 | DPAA_ASSERT(eqcr->cursor->orp == (eqcr->cursor->orp & 0x00ffffff)); | 473 | DPAA_ASSERT(!(be32_to_cpu(eqcr->cursor->fqid) & ~QM_FQID_MASK)); |
496 | DPAA_ASSERT(eqcr->cursor->fqid == (eqcr->cursor->fqid & 0x00ffffff)); | ||
497 | DPAA_ASSERT(eqcr->available >= 1); | 474 | DPAA_ASSERT(eqcr->available >= 1); |
498 | } | 475 | } |
499 | 476 | ||
@@ -962,8 +939,6 @@ struct qman_portal { | |||
962 | u32 sdqcr; | 939 | u32 sdqcr; |
963 | /* probing time config params for cpu-affine portals */ | 940 | /* probing time config params for cpu-affine portals */ |
964 | const struct qm_portal_config *config; | 941 | const struct qm_portal_config *config; |
965 | /* needed for providing a non-NULL device to dma_map_***() */ | ||
966 | struct platform_device *pdev; | ||
967 | /* 2-element array. cgrs[0] is mask, cgrs[1] is snapshot. */ | 942 | /* 2-element array. cgrs[0] is mask, cgrs[1] is snapshot. */ |
968 | struct qman_cgrs *cgrs; | 943 | struct qman_cgrs *cgrs; |
969 | /* linked-list of CSCN handlers. */ | 944 | /* linked-list of CSCN handlers. */ |
@@ -1133,7 +1108,6 @@ static int qman_create_portal(struct qman_portal *portal, | |||
1133 | const struct qman_cgrs *cgrs) | 1108 | const struct qman_cgrs *cgrs) |
1134 | { | 1109 | { |
1135 | struct qm_portal *p; | 1110 | struct qm_portal *p; |
1136 | char buf[16]; | ||
1137 | int ret; | 1111 | int ret; |
1138 | u32 isdr; | 1112 | u32 isdr; |
1139 | 1113 | ||
@@ -1196,15 +1170,6 @@ static int qman_create_portal(struct qman_portal *portal, | |||
1196 | portal->sdqcr = QM_SDQCR_SOURCE_CHANNELS | QM_SDQCR_COUNT_UPTO3 | | 1170 | portal->sdqcr = QM_SDQCR_SOURCE_CHANNELS | QM_SDQCR_COUNT_UPTO3 | |
1197 | QM_SDQCR_DEDICATED_PRECEDENCE | QM_SDQCR_TYPE_PRIO_QOS | | 1171 | QM_SDQCR_DEDICATED_PRECEDENCE | QM_SDQCR_TYPE_PRIO_QOS | |
1198 | QM_SDQCR_TOKEN_SET(0xab) | QM_SDQCR_CHANNELS_DEDICATED; | 1172 | QM_SDQCR_TOKEN_SET(0xab) | QM_SDQCR_CHANNELS_DEDICATED; |
1199 | sprintf(buf, "qportal-%d", c->channel); | ||
1200 | portal->pdev = platform_device_alloc(buf, -1); | ||
1201 | if (!portal->pdev) | ||
1202 | goto fail_devalloc; | ||
1203 | if (dma_set_mask(&portal->pdev->dev, DMA_BIT_MASK(40))) | ||
1204 | goto fail_devadd; | ||
1205 | ret = platform_device_add(portal->pdev); | ||
1206 | if (ret) | ||
1207 | goto fail_devadd; | ||
1208 | isdr = 0xffffffff; | 1173 | isdr = 0xffffffff; |
1209 | qm_out(p, QM_REG_ISDR, isdr); | 1174 | qm_out(p, QM_REG_ISDR, isdr); |
1210 | portal->irq_sources = 0; | 1175 | portal->irq_sources = 0; |
@@ -1239,8 +1204,8 @@ static int qman_create_portal(struct qman_portal *portal, | |||
1239 | /* special handling, drain just in case it's a few FQRNIs */ | 1204 | /* special handling, drain just in case it's a few FQRNIs */ |
1240 | const union qm_mr_entry *e = qm_mr_current(p); | 1205 | const union qm_mr_entry *e = qm_mr_current(p); |
1241 | 1206 | ||
1242 | dev_err(c->dev, "MR dirty, VB 0x%x, rc 0x%x\n, addr 0x%x", | 1207 | dev_err(c->dev, "MR dirty, VB 0x%x, rc 0x%x, addr 0x%llx\n", |
1243 | e->verb, e->ern.rc, e->ern.fd.addr_lo); | 1208 | e->verb, e->ern.rc, qm_fd_addr_get64(&e->ern.fd)); |
1244 | goto fail_dqrr_mr_empty; | 1209 | goto fail_dqrr_mr_empty; |
1245 | } | 1210 | } |
1246 | /* Success */ | 1211 | /* Success */ |
@@ -1256,10 +1221,6 @@ fail_eqcr_empty: | |||
1256 | fail_affinity: | 1221 | fail_affinity: |
1257 | free_irq(c->irq, portal); | 1222 | free_irq(c->irq, portal); |
1258 | fail_irq: | 1223 | fail_irq: |
1259 | platform_device_del(portal->pdev); | ||
1260 | fail_devadd: | ||
1261 | platform_device_put(portal->pdev); | ||
1262 | fail_devalloc: | ||
1263 | kfree(portal->cgrs); | 1224 | kfree(portal->cgrs); |
1264 | fail_cgrs: | 1225 | fail_cgrs: |
1265 | qm_mc_finish(p); | 1226 | qm_mc_finish(p); |
@@ -1321,9 +1282,6 @@ static void qman_destroy_portal(struct qman_portal *qm) | |||
1321 | qm_dqrr_finish(&qm->p); | 1282 | qm_dqrr_finish(&qm->p); |
1322 | qm_eqcr_finish(&qm->p); | 1283 | qm_eqcr_finish(&qm->p); |
1323 | 1284 | ||
1324 | platform_device_del(qm->pdev); | ||
1325 | platform_device_put(qm->pdev); | ||
1326 | |||
1327 | qm->config = NULL; | 1285 | qm->config = NULL; |
1328 | } | 1286 | } |
1329 | 1287 | ||
@@ -1428,7 +1386,7 @@ static void qm_mr_process_task(struct work_struct *work) | |||
1428 | case QM_MR_VERB_FQRN: | 1386 | case QM_MR_VERB_FQRN: |
1429 | case QM_MR_VERB_FQRL: | 1387 | case QM_MR_VERB_FQRL: |
1430 | /* Lookup in the retirement table */ | 1388 | /* Lookup in the retirement table */ |
1431 | fq = fqid_to_fq(msg->fq.fqid); | 1389 | fq = fqid_to_fq(qm_fqid_get(&msg->fq)); |
1432 | if (WARN_ON(!fq)) | 1390 | if (WARN_ON(!fq)) |
1433 | break; | 1391 | break; |
1434 | fq_state_change(p, fq, msg, verb); | 1392 | fq_state_change(p, fq, msg, verb); |
@@ -1437,7 +1395,7 @@ static void qm_mr_process_task(struct work_struct *work) | |||
1437 | break; | 1395 | break; |
1438 | case QM_MR_VERB_FQPN: | 1396 | case QM_MR_VERB_FQPN: |
1439 | /* Parked */ | 1397 | /* Parked */ |
1440 | fq = tag_to_fq(msg->fq.contextB); | 1398 | fq = tag_to_fq(be32_to_cpu(msg->fq.context_b)); |
1441 | fq_state_change(p, fq, msg, verb); | 1399 | fq_state_change(p, fq, msg, verb); |
1442 | if (fq->cb.fqs) | 1400 | if (fq->cb.fqs) |
1443 | fq->cb.fqs(p, fq, msg); | 1401 | fq->cb.fqs(p, fq, msg); |
@@ -1451,7 +1409,7 @@ static void qm_mr_process_task(struct work_struct *work) | |||
1451 | } | 1409 | } |
1452 | } else { | 1410 | } else { |
1453 | /* Its a software ERN */ | 1411 | /* Its a software ERN */ |
1454 | fq = tag_to_fq(msg->ern.tag); | 1412 | fq = tag_to_fq(be32_to_cpu(msg->ern.tag)); |
1455 | fq->cb.ern(p, fq, msg); | 1413 | fq->cb.ern(p, fq, msg); |
1456 | } | 1414 | } |
1457 | num++; | 1415 | num++; |
@@ -1536,7 +1494,7 @@ static inline unsigned int __poll_portal_fast(struct qman_portal *p, | |||
1536 | 1494 | ||
1537 | if (dq->stat & QM_DQRR_STAT_UNSCHEDULED) { | 1495 | if (dq->stat & QM_DQRR_STAT_UNSCHEDULED) { |
1538 | /* | 1496 | /* |
1539 | * VDQCR: don't trust contextB as the FQ may have | 1497 | * VDQCR: don't trust context_b as the FQ may have |
1540 | * been configured for h/w consumption and we're | 1498 | * been configured for h/w consumption and we're |
1541 | * draining it post-retirement. | 1499 | * draining it post-retirement. |
1542 | */ | 1500 | */ |
@@ -1562,8 +1520,8 @@ static inline unsigned int __poll_portal_fast(struct qman_portal *p, | |||
1562 | if (dq->stat & QM_DQRR_STAT_DQCR_EXPIRED) | 1520 | if (dq->stat & QM_DQRR_STAT_DQCR_EXPIRED) |
1563 | clear_vdqcr(p, fq); | 1521 | clear_vdqcr(p, fq); |
1564 | } else { | 1522 | } else { |
1565 | /* SDQCR: contextB points to the FQ */ | 1523 | /* SDQCR: context_b points to the FQ */ |
1566 | fq = tag_to_fq(dq->contextB); | 1524 | fq = tag_to_fq(be32_to_cpu(dq->context_b)); |
1567 | /* Now let the callback do its stuff */ | 1525 | /* Now let the callback do its stuff */ |
1568 | res = fq->cb.dqrr(p, fq, dq); | 1526 | res = fq->cb.dqrr(p, fq, dq); |
1569 | /* | 1527 | /* |
@@ -1780,9 +1738,9 @@ int qman_init_fq(struct qman_fq *fq, u32 flags, struct qm_mcc_initfq *opts) | |||
1780 | if (fq_isset(fq, QMAN_FQ_FLAG_NO_MODIFY)) | 1738 | if (fq_isset(fq, QMAN_FQ_FLAG_NO_MODIFY)) |
1781 | return -EINVAL; | 1739 | return -EINVAL; |
1782 | #endif | 1740 | #endif |
1783 | if (opts && (opts->we_mask & QM_INITFQ_WE_OAC)) { | 1741 | if (opts && (be16_to_cpu(opts->we_mask) & QM_INITFQ_WE_OAC)) { |
1784 | /* And can't be set at the same time as TDTHRESH */ | 1742 | /* And can't be set at the same time as TDTHRESH */ |
1785 | if (opts->we_mask & QM_INITFQ_WE_TDTHRESH) | 1743 | if (be16_to_cpu(opts->we_mask) & QM_INITFQ_WE_TDTHRESH) |
1786 | return -EINVAL; | 1744 | return -EINVAL; |
1787 | } | 1745 | } |
1788 | /* Issue an INITFQ_[PARKED|SCHED] management command */ | 1746 | /* Issue an INITFQ_[PARKED|SCHED] management command */ |
@@ -1796,37 +1754,49 @@ int qman_init_fq(struct qman_fq *fq, u32 flags, struct qm_mcc_initfq *opts) | |||
1796 | mcc = qm_mc_start(&p->p); | 1754 | mcc = qm_mc_start(&p->p); |
1797 | if (opts) | 1755 | if (opts) |
1798 | mcc->initfq = *opts; | 1756 | mcc->initfq = *opts; |
1799 | mcc->initfq.fqid = fq->fqid; | 1757 | qm_fqid_set(&mcc->fq, fq->fqid); |
1800 | mcc->initfq.count = 0; | 1758 | mcc->initfq.count = 0; |
1801 | /* | 1759 | /* |
1802 | * If the FQ does *not* have the TO_DCPORTAL flag, contextB is set as a | 1760 | * If the FQ does *not* have the TO_DCPORTAL flag, context_b is set as a |
1803 | * demux pointer. Otherwise, the caller-provided value is allowed to | 1761 | * demux pointer. Otherwise, the caller-provided value is allowed to |
1804 | * stand, don't overwrite it. | 1762 | * stand, don't overwrite it. |
1805 | */ | 1763 | */ |
1806 | if (fq_isclear(fq, QMAN_FQ_FLAG_TO_DCPORTAL)) { | 1764 | if (fq_isclear(fq, QMAN_FQ_FLAG_TO_DCPORTAL)) { |
1807 | dma_addr_t phys_fq; | 1765 | dma_addr_t phys_fq; |
1808 | 1766 | ||
1809 | mcc->initfq.we_mask |= QM_INITFQ_WE_CONTEXTB; | 1767 | mcc->initfq.we_mask |= cpu_to_be16(QM_INITFQ_WE_CONTEXTB); |
1810 | mcc->initfq.fqd.context_b = fq_to_tag(fq); | 1768 | mcc->initfq.fqd.context_b = cpu_to_be32(fq_to_tag(fq)); |
1811 | /* | 1769 | /* |
1812 | * and the physical address - NB, if the user wasn't trying to | 1770 | * and the physical address - NB, if the user wasn't trying to |
1813 | * set CONTEXTA, clear the stashing settings. | 1771 | * set CONTEXTA, clear the stashing settings. |
1814 | */ | 1772 | */ |
1815 | if (!(mcc->initfq.we_mask & QM_INITFQ_WE_CONTEXTA)) { | 1773 | if (!(be16_to_cpu(mcc->initfq.we_mask) & |
1816 | mcc->initfq.we_mask |= QM_INITFQ_WE_CONTEXTA; | 1774 | QM_INITFQ_WE_CONTEXTA)) { |
1775 | mcc->initfq.we_mask |= | ||
1776 | cpu_to_be16(QM_INITFQ_WE_CONTEXTA); | ||
1817 | memset(&mcc->initfq.fqd.context_a, 0, | 1777 | memset(&mcc->initfq.fqd.context_a, 0, |
1818 | sizeof(mcc->initfq.fqd.context_a)); | 1778 | sizeof(mcc->initfq.fqd.context_a)); |
1819 | } else { | 1779 | } else { |
1820 | phys_fq = dma_map_single(&p->pdev->dev, fq, sizeof(*fq), | 1780 | struct qman_portal *p = qman_dma_portal; |
1821 | DMA_TO_DEVICE); | 1781 | |
1782 | phys_fq = dma_map_single(p->config->dev, fq, | ||
1783 | sizeof(*fq), DMA_TO_DEVICE); | ||
1784 | if (dma_mapping_error(p->config->dev, phys_fq)) { | ||
1785 | dev_err(p->config->dev, "dma_mapping failed\n"); | ||
1786 | ret = -EIO; | ||
1787 | goto out; | ||
1788 | } | ||
1789 | |||
1822 | qm_fqd_stashing_set64(&mcc->initfq.fqd, phys_fq); | 1790 | qm_fqd_stashing_set64(&mcc->initfq.fqd, phys_fq); |
1823 | } | 1791 | } |
1824 | } | 1792 | } |
1825 | if (flags & QMAN_INITFQ_FLAG_LOCAL) { | 1793 | if (flags & QMAN_INITFQ_FLAG_LOCAL) { |
1826 | int wq = 0; | 1794 | int wq = 0; |
1827 | 1795 | ||
1828 | if (!(mcc->initfq.we_mask & QM_INITFQ_WE_DESTWQ)) { | 1796 | if (!(be16_to_cpu(mcc->initfq.we_mask) & |
1829 | mcc->initfq.we_mask |= QM_INITFQ_WE_DESTWQ; | 1797 | QM_INITFQ_WE_DESTWQ)) { |
1798 | mcc->initfq.we_mask |= | ||
1799 | cpu_to_be16(QM_INITFQ_WE_DESTWQ); | ||
1830 | wq = 4; | 1800 | wq = 4; |
1831 | } | 1801 | } |
1832 | qm_fqd_set_destwq(&mcc->initfq.fqd, p->config->channel, wq); | 1802 | qm_fqd_set_destwq(&mcc->initfq.fqd, p->config->channel, wq); |
@@ -1845,13 +1815,13 @@ int qman_init_fq(struct qman_fq *fq, u32 flags, struct qm_mcc_initfq *opts) | |||
1845 | goto out; | 1815 | goto out; |
1846 | } | 1816 | } |
1847 | if (opts) { | 1817 | if (opts) { |
1848 | if (opts->we_mask & QM_INITFQ_WE_FQCTRL) { | 1818 | if (be16_to_cpu(opts->we_mask) & QM_INITFQ_WE_FQCTRL) { |
1849 | if (opts->fqd.fq_ctrl & QM_FQCTRL_CGE) | 1819 | if (be16_to_cpu(opts->fqd.fq_ctrl) & QM_FQCTRL_CGE) |
1850 | fq_set(fq, QMAN_FQ_STATE_CGR_EN); | 1820 | fq_set(fq, QMAN_FQ_STATE_CGR_EN); |
1851 | else | 1821 | else |
1852 | fq_clear(fq, QMAN_FQ_STATE_CGR_EN); | 1822 | fq_clear(fq, QMAN_FQ_STATE_CGR_EN); |
1853 | } | 1823 | } |
1854 | if (opts->we_mask & QM_INITFQ_WE_CGID) | 1824 | if (be16_to_cpu(opts->we_mask) & QM_INITFQ_WE_CGID) |
1855 | fq->cgr_groupid = opts->fqd.cgid; | 1825 | fq->cgr_groupid = opts->fqd.cgid; |
1856 | } | 1826 | } |
1857 | fq->state = (flags & QMAN_INITFQ_FLAG_SCHED) ? | 1827 | fq->state = (flags & QMAN_INITFQ_FLAG_SCHED) ? |
@@ -1884,7 +1854,7 @@ int qman_schedule_fq(struct qman_fq *fq) | |||
1884 | goto out; | 1854 | goto out; |
1885 | } | 1855 | } |
1886 | mcc = qm_mc_start(&p->p); | 1856 | mcc = qm_mc_start(&p->p); |
1887 | mcc->alterfq.fqid = fq->fqid; | 1857 | qm_fqid_set(&mcc->fq, fq->fqid); |
1888 | qm_mc_commit(&p->p, QM_MCC_VERB_ALTER_SCHED); | 1858 | qm_mc_commit(&p->p, QM_MCC_VERB_ALTER_SCHED); |
1889 | if (!qm_mc_result_timeout(&p->p, &mcr)) { | 1859 | if (!qm_mc_result_timeout(&p->p, &mcr)) { |
1890 | dev_err(p->config->dev, "ALTER_SCHED timeout\n"); | 1860 | dev_err(p->config->dev, "ALTER_SCHED timeout\n"); |
@@ -1927,7 +1897,7 @@ int qman_retire_fq(struct qman_fq *fq, u32 *flags) | |||
1927 | goto out; | 1897 | goto out; |
1928 | } | 1898 | } |
1929 | mcc = qm_mc_start(&p->p); | 1899 | mcc = qm_mc_start(&p->p); |
1930 | mcc->alterfq.fqid = fq->fqid; | 1900 | qm_fqid_set(&mcc->fq, fq->fqid); |
1931 | qm_mc_commit(&p->p, QM_MCC_VERB_ALTER_RETIRE); | 1901 | qm_mc_commit(&p->p, QM_MCC_VERB_ALTER_RETIRE); |
1932 | if (!qm_mc_result_timeout(&p->p, &mcr)) { | 1902 | if (!qm_mc_result_timeout(&p->p, &mcr)) { |
1933 | dev_crit(p->config->dev, "ALTER_RETIRE timeout\n"); | 1903 | dev_crit(p->config->dev, "ALTER_RETIRE timeout\n"); |
@@ -1970,8 +1940,8 @@ int qman_retire_fq(struct qman_fq *fq, u32 *flags) | |||
1970 | 1940 | ||
1971 | msg.verb = QM_MR_VERB_FQRNI; | 1941 | msg.verb = QM_MR_VERB_FQRNI; |
1972 | msg.fq.fqs = mcr->alterfq.fqs; | 1942 | msg.fq.fqs = mcr->alterfq.fqs; |
1973 | msg.fq.fqid = fq->fqid; | 1943 | qm_fqid_set(&msg.fq, fq->fqid); |
1974 | msg.fq.contextB = fq_to_tag(fq); | 1944 | msg.fq.context_b = cpu_to_be32(fq_to_tag(fq)); |
1975 | fq->cb.fqs(p, fq, &msg); | 1945 | fq->cb.fqs(p, fq, &msg); |
1976 | } | 1946 | } |
1977 | } else if (res == QM_MCR_RESULT_PENDING) { | 1947 | } else if (res == QM_MCR_RESULT_PENDING) { |
@@ -2006,7 +1976,7 @@ int qman_oos_fq(struct qman_fq *fq) | |||
2006 | goto out; | 1976 | goto out; |
2007 | } | 1977 | } |
2008 | mcc = qm_mc_start(&p->p); | 1978 | mcc = qm_mc_start(&p->p); |
2009 | mcc->alterfq.fqid = fq->fqid; | 1979 | qm_fqid_set(&mcc->fq, fq->fqid); |
2010 | qm_mc_commit(&p->p, QM_MCC_VERB_ALTER_OOS); | 1980 | qm_mc_commit(&p->p, QM_MCC_VERB_ALTER_OOS); |
2011 | if (!qm_mc_result_timeout(&p->p, &mcr)) { | 1981 | if (!qm_mc_result_timeout(&p->p, &mcr)) { |
2012 | ret = -ETIMEDOUT; | 1982 | ret = -ETIMEDOUT; |
@@ -2032,7 +2002,7 @@ int qman_query_fq(struct qman_fq *fq, struct qm_fqd *fqd) | |||
2032 | int ret = 0; | 2002 | int ret = 0; |
2033 | 2003 | ||
2034 | mcc = qm_mc_start(&p->p); | 2004 | mcc = qm_mc_start(&p->p); |
2035 | mcc->queryfq.fqid = fq->fqid; | 2005 | qm_fqid_set(&mcc->fq, fq->fqid); |
2036 | qm_mc_commit(&p->p, QM_MCC_VERB_QUERYFQ); | 2006 | qm_mc_commit(&p->p, QM_MCC_VERB_QUERYFQ); |
2037 | if (!qm_mc_result_timeout(&p->p, &mcr)) { | 2007 | if (!qm_mc_result_timeout(&p->p, &mcr)) { |
2038 | ret = -ETIMEDOUT; | 2008 | ret = -ETIMEDOUT; |
@@ -2058,7 +2028,7 @@ static int qman_query_fq_np(struct qman_fq *fq, | |||
2058 | int ret = 0; | 2028 | int ret = 0; |
2059 | 2029 | ||
2060 | mcc = qm_mc_start(&p->p); | 2030 | mcc = qm_mc_start(&p->p); |
2061 | mcc->queryfq.fqid = fq->fqid; | 2031 | qm_fqid_set(&mcc->fq, fq->fqid); |
2062 | qm_mc_commit(&p->p, QM_MCC_VERB_QUERYFQ_NP); | 2032 | qm_mc_commit(&p->p, QM_MCC_VERB_QUERYFQ_NP); |
2063 | if (!qm_mc_result_timeout(&p->p, &mcr)) { | 2033 | if (!qm_mc_result_timeout(&p->p, &mcr)) { |
2064 | ret = -ETIMEDOUT; | 2034 | ret = -ETIMEDOUT; |
@@ -2086,7 +2056,7 @@ static int qman_query_cgr(struct qman_cgr *cgr, | |||
2086 | int ret = 0; | 2056 | int ret = 0; |
2087 | 2057 | ||
2088 | mcc = qm_mc_start(&p->p); | 2058 | mcc = qm_mc_start(&p->p); |
2089 | mcc->querycgr.cgid = cgr->cgrid; | 2059 | mcc->cgr.cgid = cgr->cgrid; |
2090 | qm_mc_commit(&p->p, QM_MCC_VERB_QUERYCGR); | 2060 | qm_mc_commit(&p->p, QM_MCC_VERB_QUERYCGR); |
2091 | if (!qm_mc_result_timeout(&p->p, &mcr)) { | 2061 | if (!qm_mc_result_timeout(&p->p, &mcr)) { |
2092 | ret = -ETIMEDOUT; | 2062 | ret = -ETIMEDOUT; |
@@ -2239,8 +2209,8 @@ int qman_enqueue(struct qman_fq *fq, const struct qm_fd *fd) | |||
2239 | if (unlikely(!eq)) | 2209 | if (unlikely(!eq)) |
2240 | goto out; | 2210 | goto out; |
2241 | 2211 | ||
2242 | eq->fqid = fq->fqid; | 2212 | qm_fqid_set(eq, fq->fqid); |
2243 | eq->tag = fq_to_tag(fq); | 2213 | eq->tag = cpu_to_be32(fq_to_tag(fq)); |
2244 | eq->fd = *fd; | 2214 | eq->fd = *fd; |
2245 | 2215 | ||
2246 | qm_eqcr_pvb_commit(&p->p, QM_EQCR_VERB_CMD_ENQUEUE); | 2216 | qm_eqcr_pvb_commit(&p->p, QM_EQCR_VERB_CMD_ENQUEUE); |
@@ -2282,7 +2252,24 @@ out: | |||
2282 | } | 2252 | } |
2283 | 2253 | ||
2284 | #define PORTAL_IDX(n) (n->config->channel - QM_CHANNEL_SWPORTAL0) | 2254 | #define PORTAL_IDX(n) (n->config->channel - QM_CHANNEL_SWPORTAL0) |
2285 | #define TARG_MASK(n) (BIT(31) >> PORTAL_IDX(n)) | 2255 | |
2256 | /* congestion state change notification target update control */ | ||
2257 | static void qm_cgr_cscn_targ_set(struct __qm_mc_cgr *cgr, int pi, u32 val) | ||
2258 | { | ||
2259 | if (qman_ip_rev >= QMAN_REV30) | ||
2260 | cgr->cscn_targ_upd_ctrl = cpu_to_be16(pi | | ||
2261 | QM_CGR_TARG_UDP_CTRL_WRITE_BIT); | ||
2262 | else | ||
2263 | cgr->cscn_targ = cpu_to_be32(val | QM_CGR_TARG_PORTAL(pi)); | ||
2264 | } | ||
2265 | |||
2266 | static void qm_cgr_cscn_targ_clear(struct __qm_mc_cgr *cgr, int pi, u32 val) | ||
2267 | { | ||
2268 | if (qman_ip_rev >= QMAN_REV30) | ||
2269 | cgr->cscn_targ_upd_ctrl = cpu_to_be16(pi); | ||
2270 | else | ||
2271 | cgr->cscn_targ = cpu_to_be32(val & ~QM_CGR_TARG_PORTAL(pi)); | ||
2272 | } | ||
2286 | 2273 | ||
2287 | static u8 qman_cgr_cpus[CGR_NUM]; | 2274 | static u8 qman_cgr_cpus[CGR_NUM]; |
2288 | 2275 | ||
@@ -2305,7 +2292,6 @@ int qman_create_cgr(struct qman_cgr *cgr, u32 flags, | |||
2305 | struct qm_mcc_initcgr *opts) | 2292 | struct qm_mcc_initcgr *opts) |
2306 | { | 2293 | { |
2307 | struct qm_mcr_querycgr cgr_state; | 2294 | struct qm_mcr_querycgr cgr_state; |
2308 | struct qm_mcc_initcgr local_opts = {}; | ||
2309 | int ret; | 2295 | int ret; |
2310 | struct qman_portal *p; | 2296 | struct qman_portal *p; |
2311 | 2297 | ||
@@ -2327,22 +2313,18 @@ int qman_create_cgr(struct qman_cgr *cgr, u32 flags, | |||
2327 | spin_lock(&p->cgr_lock); | 2313 | spin_lock(&p->cgr_lock); |
2328 | 2314 | ||
2329 | if (opts) { | 2315 | if (opts) { |
2316 | struct qm_mcc_initcgr local_opts = *opts; | ||
2317 | |||
2330 | ret = qman_query_cgr(cgr, &cgr_state); | 2318 | ret = qman_query_cgr(cgr, &cgr_state); |
2331 | if (ret) | 2319 | if (ret) |
2332 | goto out; | 2320 | goto out; |
2333 | if (opts) | 2321 | |
2334 | local_opts = *opts; | 2322 | qm_cgr_cscn_targ_set(&local_opts.cgr, PORTAL_IDX(p), |
2335 | if ((qman_ip_rev & 0xFF00) >= QMAN_REV30) | 2323 | be32_to_cpu(cgr_state.cgr.cscn_targ)); |
2336 | local_opts.cgr.cscn_targ_upd_ctrl = | 2324 | local_opts.we_mask |= cpu_to_be16(QM_CGR_WE_CSCN_TARG); |
2337 | QM_CGR_TARG_UDP_CTRL_WRITE_BIT | PORTAL_IDX(p); | ||
2338 | else | ||
2339 | /* Overwrite TARG */ | ||
2340 | local_opts.cgr.cscn_targ = cgr_state.cgr.cscn_targ | | ||
2341 | TARG_MASK(p); | ||
2342 | local_opts.we_mask |= QM_CGR_WE_CSCN_TARG; | ||
2343 | 2325 | ||
2344 | /* send init if flags indicate so */ | 2326 | /* send init if flags indicate so */ |
2345 | if (opts && (flags & QMAN_CGR_FLAG_USE_INIT)) | 2327 | if (flags & QMAN_CGR_FLAG_USE_INIT) |
2346 | ret = qm_modify_cgr(cgr, QMAN_CGR_FLAG_USE_INIT, | 2328 | ret = qm_modify_cgr(cgr, QMAN_CGR_FLAG_USE_INIT, |
2347 | &local_opts); | 2329 | &local_opts); |
2348 | else | 2330 | else |
@@ -2405,13 +2387,11 @@ int qman_delete_cgr(struct qman_cgr *cgr) | |||
2405 | list_add(&cgr->node, &p->cgr_cbs); | 2387 | list_add(&cgr->node, &p->cgr_cbs); |
2406 | goto release_lock; | 2388 | goto release_lock; |
2407 | } | 2389 | } |
2408 | /* Overwrite TARG */ | 2390 | |
2409 | local_opts.we_mask = QM_CGR_WE_CSCN_TARG; | 2391 | local_opts.we_mask = cpu_to_be16(QM_CGR_WE_CSCN_TARG); |
2410 | if ((qman_ip_rev & 0xFF00) >= QMAN_REV30) | 2392 | qm_cgr_cscn_targ_clear(&local_opts.cgr, PORTAL_IDX(p), |
2411 | local_opts.cgr.cscn_targ_upd_ctrl = PORTAL_IDX(p); | 2393 | be32_to_cpu(cgr_state.cgr.cscn_targ)); |
2412 | else | 2394 | |
2413 | local_opts.cgr.cscn_targ = cgr_state.cgr.cscn_targ & | ||
2414 | ~(TARG_MASK(p)); | ||
2415 | ret = qm_modify_cgr(cgr, 0, &local_opts); | 2395 | ret = qm_modify_cgr(cgr, 0, &local_opts); |
2416 | if (ret) | 2396 | if (ret) |
2417 | /* add back to the list */ | 2397 | /* add back to the list */ |
@@ -2501,7 +2481,7 @@ static int _qm_dqrr_consume_and_match(struct qm_portal *p, u32 fqid, int s, | |||
2501 | } while (wait && !dqrr); | 2481 | } while (wait && !dqrr); |
2502 | 2482 | ||
2503 | while (dqrr) { | 2483 | while (dqrr) { |
2504 | if (dqrr->fqid == fqid && (dqrr->stat & s)) | 2484 | if (qm_fqid_get(dqrr) == fqid && (dqrr->stat & s)) |
2505 | found = 1; | 2485 | found = 1; |
2506 | qm_dqrr_cdc_consume_1ptr(p, dqrr, 0); | 2486 | qm_dqrr_cdc_consume_1ptr(p, dqrr, 0); |
2507 | qm_dqrr_pvb_update(p); | 2487 | qm_dqrr_pvb_update(p); |
@@ -2537,7 +2517,7 @@ static int qman_shutdown_fq(u32 fqid) | |||
2537 | dev = p->config->dev; | 2517 | dev = p->config->dev; |
2538 | /* Determine the state of the FQID */ | 2518 | /* Determine the state of the FQID */ |
2539 | mcc = qm_mc_start(&p->p); | 2519 | mcc = qm_mc_start(&p->p); |
2540 | mcc->queryfq_np.fqid = fqid; | 2520 | qm_fqid_set(&mcc->fq, fqid); |
2541 | qm_mc_commit(&p->p, QM_MCC_VERB_QUERYFQ_NP); | 2521 | qm_mc_commit(&p->p, QM_MCC_VERB_QUERYFQ_NP); |
2542 | if (!qm_mc_result_timeout(&p->p, &mcr)) { | 2522 | if (!qm_mc_result_timeout(&p->p, &mcr)) { |
2543 | dev_err(dev, "QUERYFQ_NP timeout\n"); | 2523 | dev_err(dev, "QUERYFQ_NP timeout\n"); |
@@ -2552,7 +2532,7 @@ static int qman_shutdown_fq(u32 fqid) | |||
2552 | 2532 | ||
2553 | /* Query which channel the FQ is using */ | 2533 | /* Query which channel the FQ is using */ |
2554 | mcc = qm_mc_start(&p->p); | 2534 | mcc = qm_mc_start(&p->p); |
2555 | mcc->queryfq.fqid = fqid; | 2535 | qm_fqid_set(&mcc->fq, fqid); |
2556 | qm_mc_commit(&p->p, QM_MCC_VERB_QUERYFQ); | 2536 | qm_mc_commit(&p->p, QM_MCC_VERB_QUERYFQ); |
2557 | if (!qm_mc_result_timeout(&p->p, &mcr)) { | 2537 | if (!qm_mc_result_timeout(&p->p, &mcr)) { |
2558 | dev_err(dev, "QUERYFQ timeout\n"); | 2538 | dev_err(dev, "QUERYFQ timeout\n"); |
@@ -2572,7 +2552,7 @@ static int qman_shutdown_fq(u32 fqid) | |||
2572 | case QM_MCR_NP_STATE_PARKED: | 2552 | case QM_MCR_NP_STATE_PARKED: |
2573 | orl_empty = 0; | 2553 | orl_empty = 0; |
2574 | mcc = qm_mc_start(&p->p); | 2554 | mcc = qm_mc_start(&p->p); |
2575 | mcc->alterfq.fqid = fqid; | 2555 | qm_fqid_set(&mcc->fq, fqid); |
2576 | qm_mc_commit(&p->p, QM_MCC_VERB_ALTER_RETIRE); | 2556 | qm_mc_commit(&p->p, QM_MCC_VERB_ALTER_RETIRE); |
2577 | if (!qm_mc_result_timeout(&p->p, &mcr)) { | 2557 | if (!qm_mc_result_timeout(&p->p, &mcr)) { |
2578 | dev_err(dev, "QUERYFQ_NP timeout\n"); | 2558 | dev_err(dev, "QUERYFQ_NP timeout\n"); |
@@ -2667,7 +2647,7 @@ static int qman_shutdown_fq(u32 fqid) | |||
2667 | cpu_relax(); | 2647 | cpu_relax(); |
2668 | } | 2648 | } |
2669 | mcc = qm_mc_start(&p->p); | 2649 | mcc = qm_mc_start(&p->p); |
2670 | mcc->alterfq.fqid = fqid; | 2650 | qm_fqid_set(&mcc->fq, fqid); |
2671 | qm_mc_commit(&p->p, QM_MCC_VERB_ALTER_OOS); | 2651 | qm_mc_commit(&p->p, QM_MCC_VERB_ALTER_OOS); |
2672 | if (!qm_mc_result_timeout(&p->p, &mcr)) { | 2652 | if (!qm_mc_result_timeout(&p->p, &mcr)) { |
2673 | ret = -ETIMEDOUT; | 2653 | ret = -ETIMEDOUT; |
@@ -2687,7 +2667,7 @@ static int qman_shutdown_fq(u32 fqid) | |||
2687 | case QM_MCR_NP_STATE_RETIRED: | 2667 | case QM_MCR_NP_STATE_RETIRED: |
2688 | /* Send OOS Command */ | 2668 | /* Send OOS Command */ |
2689 | mcc = qm_mc_start(&p->p); | 2669 | mcc = qm_mc_start(&p->p); |
2690 | mcc->alterfq.fqid = fqid; | 2670 | qm_fqid_set(&mcc->fq, fqid); |
2691 | qm_mc_commit(&p->p, QM_MCC_VERB_ALTER_OOS); | 2671 | qm_mc_commit(&p->p, QM_MCC_VERB_ALTER_OOS); |
2692 | if (!qm_mc_result_timeout(&p->p, &mcr)) { | 2672 | if (!qm_mc_result_timeout(&p->p, &mcr)) { |
2693 | ret = -ETIMEDOUT; | 2673 | ret = -ETIMEDOUT; |
@@ -2722,6 +2702,7 @@ const struct qm_portal_config *qman_get_qm_portal_config( | |||
2722 | { | 2702 | { |
2723 | return portal->config; | 2703 | return portal->config; |
2724 | } | 2704 | } |
2705 | EXPORT_SYMBOL(qman_get_qm_portal_config); | ||
2725 | 2706 | ||
2726 | struct gen_pool *qm_fqalloc; /* FQID allocator */ | 2707 | struct gen_pool *qm_fqalloc; /* FQID allocator */ |
2727 | struct gen_pool *qm_qpalloc; /* pool-channel allocator */ | 2708 | struct gen_pool *qm_qpalloc; /* pool-channel allocator */ |
@@ -2789,15 +2770,18 @@ static int qpool_cleanup(u32 qp) | |||
2789 | struct qm_mcr_queryfq_np np; | 2770 | struct qm_mcr_queryfq_np np; |
2790 | 2771 | ||
2791 | err = qman_query_fq_np(&fq, &np); | 2772 | err = qman_query_fq_np(&fq, &np); |
2792 | if (err) | 2773 | if (err == -ERANGE) |
2793 | /* FQID range exceeded, found no problems */ | 2774 | /* FQID range exceeded, found no problems */ |
2794 | return 0; | 2775 | return 0; |
2776 | else if (WARN_ON(err)) | ||
2777 | return err; | ||
2778 | |||
2795 | if ((np.state & QM_MCR_NP_STATE_MASK) != QM_MCR_NP_STATE_OOS) { | 2779 | if ((np.state & QM_MCR_NP_STATE_MASK) != QM_MCR_NP_STATE_OOS) { |
2796 | struct qm_fqd fqd; | 2780 | struct qm_fqd fqd; |
2797 | 2781 | ||
2798 | err = qman_query_fq(&fq, &fqd); | 2782 | err = qman_query_fq(&fq, &fqd); |
2799 | if (WARN_ON(err)) | 2783 | if (WARN_ON(err)) |
2800 | return 0; | 2784 | return err; |
2801 | if (qm_fqd_get_chan(&fqd) == qp) { | 2785 | if (qm_fqd_get_chan(&fqd) == qp) { |
2802 | /* The channel is the FQ's target, clean it */ | 2786 | /* The channel is the FQ's target, clean it */ |
2803 | err = qman_shutdown_fq(fq.fqid); | 2787 | err = qman_shutdown_fq(fq.fqid); |
@@ -2836,7 +2820,7 @@ static int cgr_cleanup(u32 cgrid) | |||
2836 | * error, looking for non-OOS FQDs whose CGR is the CGR being released | 2820 | * error, looking for non-OOS FQDs whose CGR is the CGR being released |
2837 | */ | 2821 | */ |
2838 | struct qman_fq fq = { | 2822 | struct qman_fq fq = { |
2839 | .fqid = 1 | 2823 | .fqid = QM_FQID_RANGE_START |
2840 | }; | 2824 | }; |
2841 | int err; | 2825 | int err; |
2842 | 2826 | ||
@@ -2844,16 +2828,19 @@ static int cgr_cleanup(u32 cgrid) | |||
2844 | struct qm_mcr_queryfq_np np; | 2828 | struct qm_mcr_queryfq_np np; |
2845 | 2829 | ||
2846 | err = qman_query_fq_np(&fq, &np); | 2830 | err = qman_query_fq_np(&fq, &np); |
2847 | if (err) | 2831 | if (err == -ERANGE) |
2848 | /* FQID range exceeded, found no problems */ | 2832 | /* FQID range exceeded, found no problems */ |
2849 | return 0; | 2833 | return 0; |
2834 | else if (WARN_ON(err)) | ||
2835 | return err; | ||
2836 | |||
2850 | if ((np.state & QM_MCR_NP_STATE_MASK) != QM_MCR_NP_STATE_OOS) { | 2837 | if ((np.state & QM_MCR_NP_STATE_MASK) != QM_MCR_NP_STATE_OOS) { |
2851 | struct qm_fqd fqd; | 2838 | struct qm_fqd fqd; |
2852 | 2839 | ||
2853 | err = qman_query_fq(&fq, &fqd); | 2840 | err = qman_query_fq(&fq, &fqd); |
2854 | if (WARN_ON(err)) | 2841 | if (WARN_ON(err)) |
2855 | return 0; | 2842 | return err; |
2856 | if ((fqd.fq_ctrl & QM_FQCTRL_CGE) && | 2843 | if (be16_to_cpu(fqd.fq_ctrl) & QM_FQCTRL_CGE && |
2857 | fqd.cgid == cgrid) { | 2844 | fqd.cgid == cgrid) { |
2858 | pr_err("CRGID 0x%x is being used by FQID 0x%x, CGR will be leaked\n", | 2845 | pr_err("CRGID 0x%x is being used by FQID 0x%x, CGR will be leaked\n", |
2859 | cgrid, fq.fqid); | 2846 | cgrid, fq.fqid); |
diff --git a/drivers/soc/fsl/qbman/qman_ccsr.c b/drivers/soc/fsl/qbman/qman_ccsr.c index 0cace9e0077e..f4e6e70de259 100644 --- a/drivers/soc/fsl/qbman/qman_ccsr.c +++ b/drivers/soc/fsl/qbman/qman_ccsr.c | |||
@@ -444,6 +444,9 @@ static int zero_priv_mem(struct device *dev, struct device_node *node, | |||
444 | /* map as cacheable, non-guarded */ | 444 | /* map as cacheable, non-guarded */ |
445 | void __iomem *tmpp = ioremap_prot(addr, sz, 0); | 445 | void __iomem *tmpp = ioremap_prot(addr, sz, 0); |
446 | 446 | ||
447 | if (!tmpp) | ||
448 | return -ENOMEM; | ||
449 | |||
447 | memset_io(tmpp, 0, sz); | 450 | memset_io(tmpp, 0, sz); |
448 | flush_dcache_range((unsigned long)tmpp, | 451 | flush_dcache_range((unsigned long)tmpp, |
449 | (unsigned long)tmpp + sz); | 452 | (unsigned long)tmpp + sz); |
diff --git a/drivers/soc/fsl/qbman/qman_portal.c b/drivers/soc/fsl/qbman/qman_portal.c index 148614388fca..c9a9bcb1aea2 100644 --- a/drivers/soc/fsl/qbman/qman_portal.c +++ b/drivers/soc/fsl/qbman/qman_portal.c | |||
@@ -30,6 +30,9 @@ | |||
30 | 30 | ||
31 | #include "qman_priv.h" | 31 | #include "qman_priv.h" |
32 | 32 | ||
33 | struct qman_portal *qman_dma_portal; | ||
34 | EXPORT_SYMBOL(qman_dma_portal); | ||
35 | |||
33 | /* Enable portal interupts (as opposed to polling mode) */ | 36 | /* Enable portal interupts (as opposed to polling mode) */ |
34 | #define CONFIG_FSL_DPA_PIRQ_SLOW 1 | 37 | #define CONFIG_FSL_DPA_PIRQ_SLOW 1 |
35 | #define CONFIG_FSL_DPA_PIRQ_FAST 1 | 38 | #define CONFIG_FSL_DPA_PIRQ_FAST 1 |
@@ -150,6 +153,10 @@ static struct qman_portal *init_pcfg(struct qm_portal_config *pcfg) | |||
150 | /* all assigned portals are initialized now */ | 153 | /* all assigned portals are initialized now */ |
151 | qman_init_cgr_all(); | 154 | qman_init_cgr_all(); |
152 | } | 155 | } |
156 | |||
157 | if (!qman_dma_portal) | ||
158 | qman_dma_portal = p; | ||
159 | |||
153 | spin_unlock(&qman_lock); | 160 | spin_unlock(&qman_lock); |
154 | 161 | ||
155 | dev_info(pcfg->dev, "Portal initialised, cpu %d\n", pcfg->cpu); | 162 | dev_info(pcfg->dev, "Portal initialised, cpu %d\n", pcfg->cpu); |
@@ -238,9 +245,9 @@ static int qman_portal_probe(struct platform_device *pdev) | |||
238 | struct device_node *node = dev->of_node; | 245 | struct device_node *node = dev->of_node; |
239 | struct qm_portal_config *pcfg; | 246 | struct qm_portal_config *pcfg; |
240 | struct resource *addr_phys[2]; | 247 | struct resource *addr_phys[2]; |
241 | const u32 *channel; | ||
242 | void __iomem *va; | 248 | void __iomem *va; |
243 | int irq, len, cpu; | 249 | int irq, cpu, err; |
250 | u32 val; | ||
244 | 251 | ||
245 | pcfg = devm_kmalloc(dev, sizeof(*pcfg), GFP_KERNEL); | 252 | pcfg = devm_kmalloc(dev, sizeof(*pcfg), GFP_KERNEL); |
246 | if (!pcfg) | 253 | if (!pcfg) |
@@ -264,13 +271,13 @@ static int qman_portal_probe(struct platform_device *pdev) | |||
264 | return -ENXIO; | 271 | return -ENXIO; |
265 | } | 272 | } |
266 | 273 | ||
267 | channel = of_get_property(node, "cell-index", &len); | 274 | err = of_property_read_u32(node, "cell-index", &val); |
268 | if (!channel || (len != 4)) { | 275 | if (err) { |
269 | dev_err(dev, "Can't get %s property 'cell-index'\n", | 276 | dev_err(dev, "Can't get %s property 'cell-index'\n", |
270 | node->full_name); | 277 | node->full_name); |
271 | return -ENXIO; | 278 | return err; |
272 | } | 279 | } |
273 | pcfg->channel = *channel; | 280 | pcfg->channel = val; |
274 | pcfg->cpu = -1; | 281 | pcfg->cpu = -1; |
275 | irq = platform_get_irq(pdev, 0); | 282 | irq = platform_get_irq(pdev, 0); |
276 | if (irq <= 0) { | 283 | if (irq <= 0) { |
@@ -280,15 +287,19 @@ static int qman_portal_probe(struct platform_device *pdev) | |||
280 | pcfg->irq = irq; | 287 | pcfg->irq = irq; |
281 | 288 | ||
282 | va = ioremap_prot(addr_phys[0]->start, resource_size(addr_phys[0]), 0); | 289 | va = ioremap_prot(addr_phys[0]->start, resource_size(addr_phys[0]), 0); |
283 | if (!va) | 290 | if (!va) { |
291 | dev_err(dev, "ioremap::CE failed\n"); | ||
284 | goto err_ioremap1; | 292 | goto err_ioremap1; |
293 | } | ||
285 | 294 | ||
286 | pcfg->addr_virt[DPAA_PORTAL_CE] = va; | 295 | pcfg->addr_virt[DPAA_PORTAL_CE] = va; |
287 | 296 | ||
288 | va = ioremap_prot(addr_phys[1]->start, resource_size(addr_phys[1]), | 297 | va = ioremap_prot(addr_phys[1]->start, resource_size(addr_phys[1]), |
289 | _PAGE_GUARDED | _PAGE_NO_CACHE); | 298 | _PAGE_GUARDED | _PAGE_NO_CACHE); |
290 | if (!va) | 299 | if (!va) { |
300 | dev_err(dev, "ioremap::CI failed\n"); | ||
291 | goto err_ioremap2; | 301 | goto err_ioremap2; |
302 | } | ||
292 | 303 | ||
293 | pcfg->addr_virt[DPAA_PORTAL_CI] = va; | 304 | pcfg->addr_virt[DPAA_PORTAL_CI] = va; |
294 | 305 | ||
@@ -306,8 +317,15 @@ static int qman_portal_probe(struct platform_device *pdev) | |||
306 | spin_unlock(&qman_lock); | 317 | spin_unlock(&qman_lock); |
307 | pcfg->cpu = cpu; | 318 | pcfg->cpu = cpu; |
308 | 319 | ||
309 | if (!init_pcfg(pcfg)) | 320 | if (dma_set_mask(dev, DMA_BIT_MASK(40))) { |
310 | goto err_ioremap2; | 321 | dev_err(dev, "dma_set_mask() failed\n"); |
322 | goto err_portal_init; | ||
323 | } | ||
324 | |||
325 | if (!init_pcfg(pcfg)) { | ||
326 | dev_err(dev, "portal init failed\n"); | ||
327 | goto err_portal_init; | ||
328 | } | ||
311 | 329 | ||
312 | /* clear irq affinity if assigned cpu is offline */ | 330 | /* clear irq affinity if assigned cpu is offline */ |
313 | if (!cpu_online(cpu)) | 331 | if (!cpu_online(cpu)) |
@@ -315,10 +333,11 @@ static int qman_portal_probe(struct platform_device *pdev) | |||
315 | 333 | ||
316 | return 0; | 334 | return 0; |
317 | 335 | ||
336 | err_portal_init: | ||
337 | iounmap(pcfg->addr_virt[DPAA_PORTAL_CI]); | ||
318 | err_ioremap2: | 338 | err_ioremap2: |
319 | iounmap(pcfg->addr_virt[DPAA_PORTAL_CE]); | 339 | iounmap(pcfg->addr_virt[DPAA_PORTAL_CE]); |
320 | err_ioremap1: | 340 | err_ioremap1: |
321 | dev_err(dev, "ioremap failed\n"); | ||
322 | return -ENXIO; | 341 | return -ENXIO; |
323 | } | 342 | } |
324 | 343 | ||
diff --git a/drivers/soc/fsl/qbman/qman_priv.h b/drivers/soc/fsl/qbman/qman_priv.h index 5cf821e623a9..53685b59718e 100644 --- a/drivers/soc/fsl/qbman/qman_priv.h +++ b/drivers/soc/fsl/qbman/qman_priv.h | |||
@@ -73,29 +73,23 @@ struct qm_mcr_querycgr { | |||
73 | struct __qm_mc_cgr cgr; /* CGR fields */ | 73 | struct __qm_mc_cgr cgr; /* CGR fields */ |
74 | u8 __reserved2[6]; | 74 | u8 __reserved2[6]; |
75 | u8 i_bcnt_hi; /* high 8-bits of 40-bit "Instant" */ | 75 | u8 i_bcnt_hi; /* high 8-bits of 40-bit "Instant" */ |
76 | u32 i_bcnt_lo; /* low 32-bits of 40-bit */ | 76 | __be32 i_bcnt_lo; /* low 32-bits of 40-bit */ |
77 | u8 __reserved3[3]; | 77 | u8 __reserved3[3]; |
78 | u8 a_bcnt_hi; /* high 8-bits of 40-bit "Average" */ | 78 | u8 a_bcnt_hi; /* high 8-bits of 40-bit "Average" */ |
79 | u32 a_bcnt_lo; /* low 32-bits of 40-bit */ | 79 | __be32 a_bcnt_lo; /* low 32-bits of 40-bit */ |
80 | u32 cscn_targ_swp[4]; | 80 | __be32 cscn_targ_swp[4]; |
81 | } __packed; | 81 | } __packed; |
82 | 82 | ||
83 | static inline u64 qm_mcr_querycgr_i_get64(const struct qm_mcr_querycgr *q) | 83 | static inline u64 qm_mcr_querycgr_i_get64(const struct qm_mcr_querycgr *q) |
84 | { | 84 | { |
85 | return ((u64)q->i_bcnt_hi << 32) | (u64)q->i_bcnt_lo; | 85 | return ((u64)q->i_bcnt_hi << 32) | be32_to_cpu(q->i_bcnt_lo); |
86 | } | 86 | } |
87 | static inline u64 qm_mcr_querycgr_a_get64(const struct qm_mcr_querycgr *q) | 87 | static inline u64 qm_mcr_querycgr_a_get64(const struct qm_mcr_querycgr *q) |
88 | { | 88 | { |
89 | return ((u64)q->a_bcnt_hi << 32) | (u64)q->a_bcnt_lo; | 89 | return ((u64)q->a_bcnt_hi << 32) | be32_to_cpu(q->a_bcnt_lo); |
90 | } | 90 | } |
91 | 91 | ||
92 | /* "Query FQ Non-Programmable Fields" */ | 92 | /* "Query FQ Non-Programmable Fields" */ |
93 | struct qm_mcc_queryfq_np { | ||
94 | u8 _ncw_verb; | ||
95 | u8 __reserved1[3]; | ||
96 | u32 fqid; /* 24-bit */ | ||
97 | u8 __reserved2[56]; | ||
98 | } __packed; | ||
99 | 93 | ||
100 | struct qm_mcr_queryfq_np { | 94 | struct qm_mcr_queryfq_np { |
101 | u8 verb; | 95 | u8 verb; |
@@ -367,5 +361,6 @@ int qman_alloc_fq_table(u32 num_fqids); | |||
367 | #define QM_PIRQ_VISIBLE (QM_PIRQ_SLOW | QM_PIRQ_DQRI) | 361 | #define QM_PIRQ_VISIBLE (QM_PIRQ_SLOW | QM_PIRQ_DQRI) |
368 | 362 | ||
369 | extern struct qman_portal *affine_portals[NR_CPUS]; | 363 | extern struct qman_portal *affine_portals[NR_CPUS]; |
364 | extern struct qman_portal *qman_dma_portal; | ||
370 | const struct qm_portal_config *qman_get_qm_portal_config( | 365 | const struct qm_portal_config *qman_get_qm_portal_config( |
371 | struct qman_portal *portal); | 366 | struct qman_portal *portal); |
diff --git a/drivers/soc/fsl/qbman/qman_test_api.c b/drivers/soc/fsl/qbman/qman_test_api.c index 6880ff17f45e..2895d062cf51 100644 --- a/drivers/soc/fsl/qbman/qman_test_api.c +++ b/drivers/soc/fsl/qbman/qman_test_api.c | |||
@@ -65,7 +65,7 @@ static void fd_init(struct qm_fd *fd) | |||
65 | { | 65 | { |
66 | qm_fd_addr_set64(fd, 0xabdeadbeefLLU); | 66 | qm_fd_addr_set64(fd, 0xabdeadbeefLLU); |
67 | qm_fd_set_contig_big(fd, 0x0000ffff); | 67 | qm_fd_set_contig_big(fd, 0x0000ffff); |
68 | fd->cmd = 0xfeedf00d; | 68 | fd->cmd = cpu_to_be32(0xfeedf00d); |
69 | } | 69 | } |
70 | 70 | ||
71 | static void fd_inc(struct qm_fd *fd) | 71 | static void fd_inc(struct qm_fd *fd) |
@@ -86,26 +86,19 @@ static void fd_inc(struct qm_fd *fd) | |||
86 | len--; | 86 | len--; |
87 | qm_fd_set_param(fd, fmt, off, len); | 87 | qm_fd_set_param(fd, fmt, off, len); |
88 | 88 | ||
89 | fd->cmd++; | 89 | fd->cmd = cpu_to_be32(be32_to_cpu(fd->cmd) + 1); |
90 | } | 90 | } |
91 | 91 | ||
92 | /* The only part of the 'fd' we can't memcmp() is the ppid */ | 92 | /* The only part of the 'fd' we can't memcmp() is the ppid */ |
93 | static int fd_cmp(const struct qm_fd *a, const struct qm_fd *b) | 93 | static bool fd_neq(const struct qm_fd *a, const struct qm_fd *b) |
94 | { | 94 | { |
95 | int r = (qm_fd_addr_get64(a) == qm_fd_addr_get64(b)) ? 0 : -1; | 95 | bool neq = qm_fd_addr_get64(a) != qm_fd_addr_get64(b); |
96 | 96 | ||
97 | if (!r) { | 97 | neq |= qm_fd_get_format(a) != qm_fd_get_format(b); |
98 | enum qm_fd_format fmt_a, fmt_b; | 98 | neq |= a->cfg != b->cfg; |
99 | neq |= a->cmd != b->cmd; | ||
99 | 100 | ||
100 | fmt_a = qm_fd_get_format(a); | 101 | return neq; |
101 | fmt_b = qm_fd_get_format(b); | ||
102 | r = fmt_a - fmt_b; | ||
103 | } | ||
104 | if (!r) | ||
105 | r = a->cfg - b->cfg; | ||
106 | if (!r) | ||
107 | r = a->cmd - b->cmd; | ||
108 | return r; | ||
109 | } | 102 | } |
110 | 103 | ||
111 | /* test */ | 104 | /* test */ |
@@ -217,12 +210,12 @@ static enum qman_cb_dqrr_result cb_dqrr(struct qman_portal *p, | |||
217 | struct qman_fq *fq, | 210 | struct qman_fq *fq, |
218 | const struct qm_dqrr_entry *dq) | 211 | const struct qm_dqrr_entry *dq) |
219 | { | 212 | { |
220 | if (WARN_ON(fd_cmp(&fd_dq, &dq->fd))) { | 213 | if (WARN_ON(fd_neq(&fd_dq, &dq->fd))) { |
221 | pr_err("BADNESS: dequeued frame doesn't match;\n"); | 214 | pr_err("BADNESS: dequeued frame doesn't match;\n"); |
222 | return qman_cb_dqrr_consume; | 215 | return qman_cb_dqrr_consume; |
223 | } | 216 | } |
224 | fd_inc(&fd_dq); | 217 | fd_inc(&fd_dq); |
225 | if (!(dq->stat & QM_DQRR_STAT_UNSCHEDULED) && !fd_cmp(&fd_dq, &fd)) { | 218 | if (!(dq->stat & QM_DQRR_STAT_UNSCHEDULED) && !fd_neq(&fd_dq, &fd)) { |
226 | sdqcr_complete = 1; | 219 | sdqcr_complete = 1; |
227 | wake_up(&waitqueue); | 220 | wake_up(&waitqueue); |
228 | } | 221 | } |
diff --git a/drivers/soc/fsl/qbman/qman_test_stash.c b/drivers/soc/fsl/qbman/qman_test_stash.c index 43cf66ba42f5..e87b65403b67 100644 --- a/drivers/soc/fsl/qbman/qman_test_stash.c +++ b/drivers/soc/fsl/qbman/qman_test_stash.c | |||
@@ -175,7 +175,7 @@ static DEFINE_PER_CPU(struct hp_cpu, hp_cpus); | |||
175 | 175 | ||
176 | /* links together the hp_cpu structs, in first-come first-serve order. */ | 176 | /* links together the hp_cpu structs, in first-come first-serve order. */ |
177 | static LIST_HEAD(hp_cpu_list); | 177 | static LIST_HEAD(hp_cpu_list); |
178 | static spinlock_t hp_lock = __SPIN_LOCK_UNLOCKED(hp_lock); | 178 | static DEFINE_SPINLOCK(hp_lock); |
179 | 179 | ||
180 | static unsigned int hp_cpu_list_length; | 180 | static unsigned int hp_cpu_list_length; |
181 | 181 | ||
@@ -191,6 +191,9 @@ static void *__frame_ptr; | |||
191 | static u32 *frame_ptr; | 191 | static u32 *frame_ptr; |
192 | static dma_addr_t frame_dma; | 192 | static dma_addr_t frame_dma; |
193 | 193 | ||
194 | /* needed for dma_map*() */ | ||
195 | static const struct qm_portal_config *pcfg; | ||
196 | |||
194 | /* the main function waits on this */ | 197 | /* the main function waits on this */ |
195 | static DECLARE_WAIT_QUEUE_HEAD(queue); | 198 | static DECLARE_WAIT_QUEUE_HEAD(queue); |
196 | 199 | ||
@@ -210,16 +213,14 @@ static int allocate_frame_data(void) | |||
210 | { | 213 | { |
211 | u32 lfsr = HP_FIRST_WORD; | 214 | u32 lfsr = HP_FIRST_WORD; |
212 | int loop; | 215 | int loop; |
213 | struct platform_device *pdev = platform_device_alloc("foobar", -1); | ||
214 | 216 | ||
215 | if (!pdev) { | 217 | if (!qman_dma_portal) { |
216 | pr_crit("platform_device_alloc() failed"); | 218 | pr_crit("portal not available\n"); |
217 | return -EIO; | ||
218 | } | ||
219 | if (platform_device_add(pdev)) { | ||
220 | pr_crit("platform_device_add() failed"); | ||
221 | return -EIO; | 219 | return -EIO; |
222 | } | 220 | } |
221 | |||
222 | pcfg = qman_get_qm_portal_config(qman_dma_portal); | ||
223 | |||
223 | __frame_ptr = kmalloc(4 * HP_NUM_WORDS, GFP_KERNEL); | 224 | __frame_ptr = kmalloc(4 * HP_NUM_WORDS, GFP_KERNEL); |
224 | if (!__frame_ptr) | 225 | if (!__frame_ptr) |
225 | return -ENOMEM; | 226 | return -ENOMEM; |
@@ -229,15 +230,22 @@ static int allocate_frame_data(void) | |||
229 | frame_ptr[loop] = lfsr; | 230 | frame_ptr[loop] = lfsr; |
230 | lfsr = do_lfsr(lfsr); | 231 | lfsr = do_lfsr(lfsr); |
231 | } | 232 | } |
232 | frame_dma = dma_map_single(&pdev->dev, frame_ptr, 4 * HP_NUM_WORDS, | 233 | |
234 | frame_dma = dma_map_single(pcfg->dev, frame_ptr, 4 * HP_NUM_WORDS, | ||
233 | DMA_BIDIRECTIONAL); | 235 | DMA_BIDIRECTIONAL); |
234 | platform_device_del(pdev); | 236 | if (dma_mapping_error(pcfg->dev, frame_dma)) { |
235 | platform_device_put(pdev); | 237 | pr_crit("dma mapping failure\n"); |
238 | kfree(__frame_ptr); | ||
239 | return -EIO; | ||
240 | } | ||
241 | |||
236 | return 0; | 242 | return 0; |
237 | } | 243 | } |
238 | 244 | ||
239 | static void deallocate_frame_data(void) | 245 | static void deallocate_frame_data(void) |
240 | { | 246 | { |
247 | dma_unmap_single(pcfg->dev, frame_dma, 4 * HP_NUM_WORDS, | ||
248 | DMA_BIDIRECTIONAL); | ||
241 | kfree(__frame_ptr); | 249 | kfree(__frame_ptr); |
242 | } | 250 | } |
243 | 251 | ||
@@ -249,7 +257,8 @@ static inline int process_frame_data(struct hp_handler *handler, | |||
249 | int loop; | 257 | int loop; |
250 | 258 | ||
251 | if (qm_fd_addr_get64(fd) != handler->addr) { | 259 | if (qm_fd_addr_get64(fd) != handler->addr) { |
252 | pr_crit("bad frame address"); | 260 | pr_crit("bad frame address, [%llX != %llX]\n", |
261 | qm_fd_addr_get64(fd), handler->addr); | ||
253 | return -EIO; | 262 | return -EIO; |
254 | } | 263 | } |
255 | for (loop = 0; loop < HP_NUM_WORDS; loop++, p++) { | 264 | for (loop = 0; loop < HP_NUM_WORDS; loop++, p++) { |
@@ -397,8 +406,9 @@ static int init_handler(void *h) | |||
397 | goto failed; | 406 | goto failed; |
398 | } | 407 | } |
399 | memset(&opts, 0, sizeof(opts)); | 408 | memset(&opts, 0, sizeof(opts)); |
400 | opts.we_mask = QM_INITFQ_WE_FQCTRL | QM_INITFQ_WE_CONTEXTA; | 409 | opts.we_mask = cpu_to_be16(QM_INITFQ_WE_FQCTRL | |
401 | opts.fqd.fq_ctrl = QM_FQCTRL_CTXASTASHING; | 410 | QM_INITFQ_WE_CONTEXTA); |
411 | opts.fqd.fq_ctrl = cpu_to_be16(QM_FQCTRL_CTXASTASHING); | ||
402 | qm_fqd_set_stashing(&opts.fqd, 0, STASH_DATA_CL, STASH_CTX_CL); | 412 | qm_fqd_set_stashing(&opts.fqd, 0, STASH_DATA_CL, STASH_CTX_CL); |
403 | err = qman_init_fq(&handler->rx, QMAN_INITFQ_FLAG_SCHED | | 413 | err = qman_init_fq(&handler->rx, QMAN_INITFQ_FLAG_SCHED | |
404 | QMAN_INITFQ_FLAG_LOCAL, &opts); | 414 | QMAN_INITFQ_FLAG_LOCAL, &opts); |
diff --git a/drivers/soc/fsl/qe/qe.c b/drivers/soc/fsl/qe/qe.c index 2707a827261b..ade168f5328e 100644 --- a/drivers/soc/fsl/qe/qe.c +++ b/drivers/soc/fsl/qe/qe.c | |||
@@ -717,9 +717,5 @@ static struct platform_driver qe_driver = { | |||
717 | .resume = qe_resume, | 717 | .resume = qe_resume, |
718 | }; | 718 | }; |
719 | 719 | ||
720 | static int __init qe_drv_init(void) | 720 | builtin_platform_driver(qe_driver); |
721 | { | ||
722 | return platform_driver_register(&qe_driver); | ||
723 | } | ||
724 | device_initcall(qe_drv_init); | ||
725 | #endif /* defined(CONFIG_SUSPEND) && defined(CONFIG_PPC_85xx) */ | 721 | #endif /* defined(CONFIG_SUSPEND) && defined(CONFIG_PPC_85xx) */ |
diff --git a/include/soc/fsl/qman.h b/include/soc/fsl/qman.h index 37f3eb001a16..3d4df74a96de 100644 --- a/include/soc/fsl/qman.h +++ b/include/soc/fsl/qman.h | |||
@@ -244,11 +244,11 @@ static inline int qm_sg_entry_get_off(const struct qm_sg_entry *sg) | |||
244 | struct qm_dqrr_entry { | 244 | struct qm_dqrr_entry { |
245 | u8 verb; | 245 | u8 verb; |
246 | u8 stat; | 246 | u8 stat; |
247 | u16 seqnum; /* 15-bit */ | 247 | __be16 seqnum; /* 15-bit */ |
248 | u8 tok; | 248 | u8 tok; |
249 | u8 __reserved2[3]; | 249 | u8 __reserved2[3]; |
250 | u32 fqid; /* 24-bit */ | 250 | __be32 fqid; /* 24-bit */ |
251 | u32 contextB; | 251 | __be32 context_b; |
252 | struct qm_fd fd; | 252 | struct qm_fd fd; |
253 | u8 __reserved4[32]; | 253 | u8 __reserved4[32]; |
254 | } __packed; | 254 | } __packed; |
@@ -262,6 +262,11 @@ struct qm_dqrr_entry { | |||
262 | #define QM_DQRR_STAT_UNSCHEDULED 0x02 /* Unscheduled dequeue */ | 262 | #define QM_DQRR_STAT_UNSCHEDULED 0x02 /* Unscheduled dequeue */ |
263 | #define QM_DQRR_STAT_DQCR_EXPIRED 0x01 /* VDQCR or PDQCR expired*/ | 263 | #define QM_DQRR_STAT_DQCR_EXPIRED 0x01 /* VDQCR or PDQCR expired*/ |
264 | 264 | ||
265 | /* 'fqid' is a 24-bit field in every h/w descriptor */ | ||
266 | #define QM_FQID_MASK GENMASK(23, 0) | ||
267 | #define qm_fqid_set(p, v) ((p)->fqid = cpu_to_be32((v) & QM_FQID_MASK)) | ||
268 | #define qm_fqid_get(p) (be32_to_cpu((p)->fqid) & QM_FQID_MASK) | ||
269 | |||
265 | /* "ERN Message Response" */ | 270 | /* "ERN Message Response" */ |
266 | /* "FQ State Change Notification" */ | 271 | /* "FQ State Change Notification" */ |
267 | union qm_mr_entry { | 272 | union qm_mr_entry { |
@@ -272,12 +277,11 @@ union qm_mr_entry { | |||
272 | struct { | 277 | struct { |
273 | u8 verb; | 278 | u8 verb; |
274 | u8 dca; | 279 | u8 dca; |
275 | u16 seqnum; | 280 | __be16 seqnum; |
276 | u8 rc; /* Rej Code: 8-bit */ | 281 | u8 rc; /* Rej Code: 8-bit */ |
277 | u8 orp_hi; /* ORP: 24-bit */ | 282 | u8 __reserved[3]; |
278 | u16 orp_lo; | 283 | __be32 fqid; /* 24-bit */ |
279 | u32 fqid; /* 24-bit */ | 284 | __be32 tag; |
280 | u32 tag; | ||
281 | struct qm_fd fd; | 285 | struct qm_fd fd; |
282 | u8 __reserved1[32]; | 286 | u8 __reserved1[32]; |
283 | } __packed ern; | 287 | } __packed ern; |
@@ -285,8 +289,8 @@ union qm_mr_entry { | |||
285 | u8 verb; | 289 | u8 verb; |
286 | u8 fqs; /* Frame Queue Status */ | 290 | u8 fqs; /* Frame Queue Status */ |
287 | u8 __reserved1[6]; | 291 | u8 __reserved1[6]; |
288 | u32 fqid; /* 24-bit */ | 292 | __be32 fqid; /* 24-bit */ |
289 | u32 contextB; | 293 | __be32 context_b; |
290 | u8 __reserved2[48]; | 294 | u8 __reserved2[48]; |
291 | } __packed fq; /* FQRN/FQRNI/FQRL/FQPN */ | 295 | } __packed fq; /* FQRN/FQRNI/FQRL/FQPN */ |
292 | }; | 296 | }; |
@@ -405,13 +409,13 @@ static inline u64 qm_fqd_context_a_get64(const struct qm_fqd *fqd) | |||
405 | 409 | ||
406 | static inline void qm_fqd_stashing_set64(struct qm_fqd *fqd, u64 addr) | 410 | static inline void qm_fqd_stashing_set64(struct qm_fqd *fqd, u64 addr) |
407 | { | 411 | { |
408 | fqd->context_a.context_hi = upper_32_bits(addr); | 412 | fqd->context_a.context_hi = cpu_to_be16(upper_32_bits(addr)); |
409 | fqd->context_a.context_lo = lower_32_bits(addr); | 413 | fqd->context_a.context_lo = cpu_to_be32(lower_32_bits(addr)); |
410 | } | 414 | } |
411 | 415 | ||
412 | static inline void qm_fqd_context_a_set64(struct qm_fqd *fqd, u64 addr) | 416 | static inline void qm_fqd_context_a_set64(struct qm_fqd *fqd, u64 addr) |
413 | { | 417 | { |
414 | fqd->context_a.hi = cpu_to_be16(upper_32_bits(addr)); | 418 | fqd->context_a.hi = cpu_to_be32(upper_32_bits(addr)); |
415 | fqd->context_a.lo = cpu_to_be32(lower_32_bits(addr)); | 419 | fqd->context_a.lo = cpu_to_be32(lower_32_bits(addr)); |
416 | } | 420 | } |
417 | 421 | ||
@@ -521,7 +525,7 @@ static inline int qm_fqd_get_wq(const struct qm_fqd *fqd) | |||
521 | */ | 525 | */ |
522 | struct qm_cgr_wr_parm { | 526 | struct qm_cgr_wr_parm { |
523 | /* MA[24-31], Mn[19-23], SA[12-18], Sn[6-11], Pn[0-5] */ | 527 | /* MA[24-31], Mn[19-23], SA[12-18], Sn[6-11], Pn[0-5] */ |
524 | u32 word; | 528 | __be32 word; |
525 | }; | 529 | }; |
526 | /* | 530 | /* |
527 | * This struct represents the 13-bit "CS_THRES" CGR field. In the corresponding | 531 | * This struct represents the 13-bit "CS_THRES" CGR field. In the corresponding |
@@ -532,7 +536,7 @@ struct qm_cgr_wr_parm { | |||
532 | */ | 536 | */ |
533 | struct qm_cgr_cs_thres { | 537 | struct qm_cgr_cs_thres { |
534 | /* _res[13-15], TA[5-12], Tn[0-4] */ | 538 | /* _res[13-15], TA[5-12], Tn[0-4] */ |
535 | u16 word; | 539 | __be16 word; |
536 | }; | 540 | }; |
537 | /* | 541 | /* |
538 | * This identical structure of CGR fields is present in the "Init/Modify CGR" | 542 | * This identical structure of CGR fields is present in the "Init/Modify CGR" |
@@ -549,10 +553,10 @@ struct __qm_mc_cgr { | |||
549 | u8 cscn_en; /* boolean, use QM_CGR_EN */ | 553 | u8 cscn_en; /* boolean, use QM_CGR_EN */ |
550 | union { | 554 | union { |
551 | struct { | 555 | struct { |
552 | u16 cscn_targ_upd_ctrl; /* use QM_CSCN_TARG_UDP_ */ | 556 | __be16 cscn_targ_upd_ctrl; /* use QM_CGR_TARG_UDP_* */ |
553 | u16 cscn_targ_dcp_low; /* CSCN_TARG_DCP low-16bits */ | 557 | __be16 cscn_targ_dcp_low; |
554 | }; | 558 | }; |
555 | u32 cscn_targ; /* use QM_CGR_TARG_* */ | 559 | __be32 cscn_targ; /* use QM_CGR_TARG_* */ |
556 | }; | 560 | }; |
557 | u8 cstd_en; /* boolean, use QM_CGR_EN */ | 561 | u8 cstd_en; /* boolean, use QM_CGR_EN */ |
558 | u8 cs; /* boolean, only used in query response */ | 562 | u8 cs; /* boolean, only used in query response */ |
@@ -568,7 +572,9 @@ struct __qm_mc_cgr { | |||
568 | /* Convert CGR thresholds to/from "cs_thres" format */ | 572 | /* Convert CGR thresholds to/from "cs_thres" format */ |
569 | static inline u64 qm_cgr_cs_thres_get64(const struct qm_cgr_cs_thres *th) | 573 | static inline u64 qm_cgr_cs_thres_get64(const struct qm_cgr_cs_thres *th) |
570 | { | 574 | { |
571 | return ((th->word >> 5) & 0xff) << (th->word & 0x1f); | 575 | int thres = be16_to_cpu(th->word); |
576 | |||
577 | return ((thres >> 5) & 0xff) << (thres & 0x1f); | ||
572 | } | 578 | } |
573 | 579 | ||
574 | static inline int qm_cgr_cs_thres_set64(struct qm_cgr_cs_thres *th, u64 val, | 580 | static inline int qm_cgr_cs_thres_set64(struct qm_cgr_cs_thres *th, u64 val, |
@@ -584,23 +590,23 @@ static inline int qm_cgr_cs_thres_set64(struct qm_cgr_cs_thres *th, u64 val, | |||
584 | if (roundup && oddbit) | 590 | if (roundup && oddbit) |
585 | val++; | 591 | val++; |
586 | } | 592 | } |
587 | th->word = ((val & 0xff) << 5) | (e & 0x1f); | 593 | th->word = cpu_to_be16(((val & 0xff) << 5) | (e & 0x1f)); |
588 | return 0; | 594 | return 0; |
589 | } | 595 | } |
590 | 596 | ||
591 | /* "Initialize FQ" */ | 597 | /* "Initialize FQ" */ |
592 | struct qm_mcc_initfq { | 598 | struct qm_mcc_initfq { |
593 | u8 __reserved1[2]; | 599 | u8 __reserved1[2]; |
594 | u16 we_mask; /* Write Enable Mask */ | 600 | __be16 we_mask; /* Write Enable Mask */ |
595 | u32 fqid; /* 24-bit */ | 601 | __be32 fqid; /* 24-bit */ |
596 | u16 count; /* Initialises 'count+1' FQDs */ | 602 | __be16 count; /* Initialises 'count+1' FQDs */ |
597 | struct qm_fqd fqd; /* the FQD fields go here */ | 603 | struct qm_fqd fqd; /* the FQD fields go here */ |
598 | u8 __reserved2[30]; | 604 | u8 __reserved2[30]; |
599 | } __packed; | 605 | } __packed; |
600 | /* "Initialize/Modify CGR" */ | 606 | /* "Initialize/Modify CGR" */ |
601 | struct qm_mcc_initcgr { | 607 | struct qm_mcc_initcgr { |
602 | u8 __reserve1[2]; | 608 | u8 __reserve1[2]; |
603 | u16 we_mask; /* Write Enable Mask */ | 609 | __be16 we_mask; /* Write Enable Mask */ |
604 | struct __qm_mc_cgr cgr; /* CGR fields */ | 610 | struct __qm_mc_cgr cgr; /* CGR fields */ |
605 | u8 __reserved2[2]; | 611 | u8 __reserved2[2]; |
606 | u8 cgid; | 612 | u8 cgid; |
@@ -654,7 +660,7 @@ struct qman_cgr; | |||
654 | /* | 660 | /* |
655 | * This enum, and the callback type that returns it, are used when handling | 661 | * This enum, and the callback type that returns it, are used when handling |
656 | * dequeued frames via DQRR. Note that for "null" callbacks registered with the | 662 | * dequeued frames via DQRR. Note that for "null" callbacks registered with the |
657 | * portal object (for handling dequeues that do not demux because contextB is | 663 | * portal object (for handling dequeues that do not demux because context_b is |
658 | * NULL), the return value *MUST* be qman_cb_dqrr_consume. | 664 | * NULL), the return value *MUST* be qman_cb_dqrr_consume. |
659 | */ | 665 | */ |
660 | enum qman_cb_dqrr_result { | 666 | enum qman_cb_dqrr_result { |
@@ -859,11 +865,11 @@ void qman_p_static_dequeue_add(struct qman_portal *p, u32 pools); | |||
859 | * qman_fq" for more info). NO_MODIFY is only intended for enqueuing to | 865 | * qman_fq" for more info). NO_MODIFY is only intended for enqueuing to |
860 | * pre-existing frame-queues that aren't to be otherwise interfered with, it | 866 | * pre-existing frame-queues that aren't to be otherwise interfered with, it |
861 | * prevents all other modifications to the frame queue. The TO_DCPORTAL flag | 867 | * prevents all other modifications to the frame queue. The TO_DCPORTAL flag |
862 | * causes the driver to honour any contextB modifications requested in the | 868 | * causes the driver to honour any context_b modifications requested in the |
863 | * qm_init_fq() API, as this indicates the frame queue will be consumed by a | 869 | * qm_init_fq() API, as this indicates the frame queue will be consumed by a |
864 | * direct-connect portal (PME, CAAM, or Fman). When frame queues are consumed by | 870 | * direct-connect portal (PME, CAAM, or Fman). When frame queues are consumed by |
865 | * software portals, the contextB field is controlled by the driver and can't be | 871 | * software portals, the context_b field is controlled by the driver and can't |
866 | * modified by the caller. | 872 | * be modified by the caller. |
867 | */ | 873 | */ |
868 | int qman_create_fq(u32 fqid, u32 flags, struct qman_fq *fq); | 874 | int qman_create_fq(u32 fqid, u32 flags, struct qman_fq *fq); |
869 | 875 | ||